From cb809d3984d86a5ef4adb0f6f452d7a4442bf10b Mon Sep 17 00:00:00 2001 From: Jose Celano Date: Mon, 23 Sep 2024 17:48:40 +0100 Subject: [PATCH 001/802] develop: bump to version 3.0.0-develop --- Cargo.lock | 16 ++++++++-------- Cargo.toml | 16 ++++++++-------- packages/clock/Cargo.toml | 2 +- packages/configuration/Cargo.toml | 2 +- packages/test-helpers/Cargo.toml | 2 +- packages/torrent-repository/Cargo.toml | 6 +++--- 6 files changed, 22 insertions(+), 22 deletions(-) diff --git a/Cargo.lock b/Cargo.lock index 56978738f..d43356ca4 100644 --- a/Cargo.lock +++ b/Cargo.lock @@ -3774,7 +3774,7 @@ dependencies = [ [[package]] name = "torrust-tracker" -version = "3.0.0-rc.1" +version = "3.0.0-develop" dependencies = [ "anyhow", "aquatic_udp_protocol", @@ -3836,7 +3836,7 @@ dependencies = [ [[package]] name = "torrust-tracker-clock" -version = "3.0.0-rc.1" +version = "3.0.0-develop" dependencies = [ "chrono", "lazy_static", @@ -3845,7 +3845,7 @@ dependencies = [ [[package]] name = "torrust-tracker-configuration" -version = "3.0.0-rc.1" +version = "3.0.0-develop" dependencies = [ "camino", "derive_more", @@ -3862,7 +3862,7 @@ dependencies = [ [[package]] name = "torrust-tracker-contrib-bencode" -version = "3.0.0-rc.1" +version = "3.0.0-develop" dependencies = [ "criterion", "thiserror", @@ -3870,7 +3870,7 @@ dependencies = [ [[package]] name = "torrust-tracker-located-error" -version = "3.0.0-rc.1" +version = "3.0.0-develop" dependencies = [ "thiserror", "tracing", @@ -3878,7 +3878,7 @@ dependencies = [ [[package]] name = "torrust-tracker-primitives" -version = "3.0.0-rc.1" +version = "3.0.0-develop" dependencies = [ "aquatic_udp_protocol", "binascii", @@ -3892,7 +3892,7 @@ dependencies = [ [[package]] name = "torrust-tracker-test-helpers" -version = "3.0.0-rc.1" +version = "3.0.0-develop" dependencies = [ "rand", "torrust-tracker-configuration", @@ -3900,7 +3900,7 @@ dependencies = [ [[package]] name = "torrust-tracker-torrent-repository" -version = "3.0.0-rc.1" +version = "3.0.0-develop" dependencies = [ "aquatic_udp_protocol", "async-std", diff --git a/Cargo.toml b/Cargo.toml index 4aa87e6e3..47102a349 100644 --- a/Cargo.toml +++ b/Cargo.toml @@ -27,7 +27,7 @@ license = "AGPL-3.0-only" publish = true repository = "https://github.com/torrust/torrust-tracker" rust-version = "1.72" -version = "3.0.0-rc.1" +version = "3.0.0-develop" [dependencies] anyhow = "1" @@ -69,12 +69,12 @@ serde_repr = "0" serde_with = { version = "3", features = ["json"] } thiserror = "1" tokio = { version = "1", features = ["macros", "net", "rt-multi-thread", "signal", "sync"] } -torrust-tracker-clock = { version = "3.0.0-rc.1", path = "packages/clock" } -torrust-tracker-configuration = { version = "3.0.0-rc.1", path = "packages/configuration" } -torrust-tracker-contrib-bencode = { version = "3.0.0-rc.1", path = "contrib/bencode" } -torrust-tracker-located-error = { version = "3.0.0-rc.1", path = "packages/located-error" } -torrust-tracker-primitives = { version = "3.0.0-rc.1", path = "packages/primitives" } -torrust-tracker-torrent-repository = { version = "3.0.0-rc.1", path = "packages/torrent-repository" } +torrust-tracker-clock = { version = "3.0.0-develop", path = "packages/clock" } +torrust-tracker-configuration = { version = "3.0.0-develop", path = "packages/configuration" } +torrust-tracker-contrib-bencode = { version = "3.0.0-develop", path = "contrib/bencode" } +torrust-tracker-located-error = { version = "3.0.0-develop", path = "packages/located-error" } +torrust-tracker-primitives = { version = "3.0.0-develop", path = "packages/primitives" } +torrust-tracker-torrent-repository = { version = "3.0.0-develop", path = "packages/torrent-repository" } tower = { version = "0", features = ["timeout"] } tower-http = { version = "0", features = ["compression-full", "cors", "propagate-header", "request-id", "trace"] } trace = "0" @@ -90,7 +90,7 @@ ignored = ["crossbeam-skiplist", "dashmap", "figment", "parking_lot", "serde_byt [dev-dependencies] local-ip-address = "0" mockall = "0" -torrust-tracker-test-helpers = { version = "3.0.0-rc.1", path = "packages/test-helpers" } +torrust-tracker-test-helpers = { version = "3.0.0-develop", path = "packages/test-helpers" } [workspace] members = [ diff --git a/packages/clock/Cargo.toml b/packages/clock/Cargo.toml index f95c12a0c..2ede678d9 100644 --- a/packages/clock/Cargo.toml +++ b/packages/clock/Cargo.toml @@ -19,6 +19,6 @@ version.workspace = true chrono = { version = "0", default-features = false, features = ["clock"] } lazy_static = "1" -torrust-tracker-primitives = { version = "3.0.0-rc.1", path = "../primitives" } +torrust-tracker-primitives = { version = "3.0.0-develop", path = "../primitives" } [dev-dependencies] diff --git a/packages/configuration/Cargo.toml b/packages/configuration/Cargo.toml index 7b8b3c3bf..8706679f6 100644 --- a/packages/configuration/Cargo.toml +++ b/packages/configuration/Cargo.toml @@ -23,7 +23,7 @@ serde_json = { version = "1", features = ["preserve_order"] } serde_with = "3" thiserror = "1" toml = "0" -torrust-tracker-located-error = { version = "3.0.0-rc.1", path = "../located-error" } +torrust-tracker-located-error = { version = "3.0.0-develop", path = "../located-error" } url = "2" [dev-dependencies] diff --git a/packages/test-helpers/Cargo.toml b/packages/test-helpers/Cargo.toml index ccf08b570..ad291d209 100644 --- a/packages/test-helpers/Cargo.toml +++ b/packages/test-helpers/Cargo.toml @@ -16,4 +16,4 @@ version.workspace = true [dependencies] rand = "0" -torrust-tracker-configuration = { version = "3.0.0-rc.1", path = "../configuration" } +torrust-tracker-configuration = { version = "3.0.0-develop", path = "../configuration" } diff --git a/packages/torrent-repository/Cargo.toml b/packages/torrent-repository/Cargo.toml index 0650d608f..32c324538 100644 --- a/packages/torrent-repository/Cargo.toml +++ b/packages/torrent-repository/Cargo.toml @@ -22,9 +22,9 @@ dashmap = "6" futures = "0" parking_lot = "0" tokio = { version = "1", features = ["macros", "net", "rt-multi-thread", "signal", "sync"] } -torrust-tracker-clock = { version = "3.0.0-rc.1", path = "../clock" } -torrust-tracker-configuration = { version = "3.0.0-rc.1", path = "../configuration" } -torrust-tracker-primitives = { version = "3.0.0-rc.1", path = "../primitives" } +torrust-tracker-clock = { version = "3.0.0-develop", path = "../clock" } +torrust-tracker-configuration = { version = "3.0.0-develop", path = "../configuration" } +torrust-tracker-primitives = { version = "3.0.0-develop", path = "../primitives" } zerocopy = "0" [dev-dependencies] From ae5ea1ea0dfe2c1a9a9d2277affee78912a8c815 Mon Sep 17 00:00:00 2001 From: Jose Celano Date: Wed, 2 Oct 2024 08:28:07 +0100 Subject: [PATCH 002/802] docs: fix link to containers docs --- src/lib.rs | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/src/lib.rs b/src/lib.rs index d242ac80e..5d7c92ae2 100644 --- a/src/lib.rs +++ b/src/lib.rs @@ -155,7 +155,7 @@ //! ## Run with docker //! //! You can run the tracker with a pre-built docker image. Please refer to the -//! [tracker docker documentation](https://github.com/torrust/torrust-tracker/tree/develop/docker). +//! [tracker docker documentation](https://github.com/torrust/torrust-tracker/blob/develop/docs/containers.md). //! //! # Configuration //! @@ -214,7 +214,7 @@ //! of the `tracker.toml` file. //! //! The env var contains the same data as the `tracker.toml`. It's particularly -//! useful in you are [running the tracker with docker](https://github.com/torrust/torrust-tracker/tree/develop/docker). +//! useful in you are [running the tracker with docker](https://github.com/torrust/torrust-tracker/blob/develop/docs/containers.md). //! //! > NOTICE: The `TORRUST_TRACKER_CONFIG_TOML` env var has priority over the `tracker.toml` file. //! From 6b2d8e8372b002cf501f244f4d1b8ffeda8983d9 Mon Sep 17 00:00:00 2001 From: Jose Celano Date: Wed, 2 Oct 2024 09:00:27 +0100 Subject: [PATCH 003/802] fix: clippy errors --- packages/clock/src/clock/stopped/mod.rs | 1 - packages/clock/src/conv/mod.rs | 1 - 2 files changed, 2 deletions(-) diff --git a/packages/clock/src/clock/stopped/mod.rs b/packages/clock/src/clock/stopped/mod.rs index 57655ab75..5d0b2ec4e 100644 --- a/packages/clock/src/clock/stopped/mod.rs +++ b/packages/clock/src/clock/stopped/mod.rs @@ -1,6 +1,5 @@ /// Trait for types that can be used as a timestamp clock stopped /// at a given time. - #[allow(clippy::module_name_repetitions)] pub struct StoppedClock {} diff --git a/packages/clock/src/conv/mod.rs b/packages/clock/src/conv/mod.rs index 894083061..0ac278171 100644 --- a/packages/clock/src/conv/mod.rs +++ b/packages/clock/src/conv/mod.rs @@ -48,7 +48,6 @@ pub fn convert_from_timestamp_to_datetime_utc(duration: DurationSinceUnixEpoch) } #[cfg(test)] - mod tests { use chrono::DateTime; use torrust_tracker_primitives::DurationSinceUnixEpoch; From 9341f2cd5db3fe555b79bddcf586066d0980cc74 Mon Sep 17 00:00:00 2001 From: Cameron Garnham Date: Thu, 3 Oct 2024 11:40:38 +0200 Subject: [PATCH 004/802] ci: temp allow clipply lint: needless_return --- .github/workflows/testing.yaml | 2 +- Cargo.toml | 11 +++++++++++ 2 files changed, 12 insertions(+), 1 deletion(-) diff --git a/.github/workflows/testing.yaml b/.github/workflows/testing.yaml index abe6f0a60..124b13b5a 100644 --- a/.github/workflows/testing.yaml +++ b/.github/workflows/testing.yaml @@ -69,7 +69,7 @@ jobs: - id: lint name: Run Lint Checks - run: cargo clippy --tests --benches --examples --workspace --all-targets --all-features -- -D clippy::correctness -D clippy::suspicious -D clippy::complexity -D clippy::perf -D clippy::style -D clippy::pedantic + run: cargo clippy --tests --benches --examples --workspace --all-targets --all-features - id: docs name: Lint Documentation diff --git a/Cargo.toml b/Cargo.toml index 47102a349..f6ac9aafd 100644 --- a/Cargo.toml +++ b/Cargo.toml @@ -115,3 +115,14 @@ opt-level = 3 [profile.release-debug] debug = true inherits = "release" + +[lints.clippy] +complexity = { level = "deny", priority = -1 } +correctness = { level = "deny", priority = -1 } +pedantic = { level = "deny", priority = -1 } +perf = { level = "deny", priority = -1 } +style = { level = "deny", priority = -1 } +suspicious = { level = "deny", priority = -1 } + +# temp allow this lint +needless_return = "allow" From 8eacbe3481c6a5613874f383076d82e7d2fe1867 Mon Sep 17 00:00:00 2001 From: Cameron Garnham Date: Thu, 3 Oct 2024 11:42:21 +0200 Subject: [PATCH 005/802] ci: fix toolchain bug with release --- .github/workflows/deployment.yaml | 4 ++++ 1 file changed, 4 insertions(+) diff --git a/.github/workflows/deployment.yaml b/.github/workflows/deployment.yaml index 6aa66e985..e30eccc71 100644 --- a/.github/workflows/deployment.yaml +++ b/.github/workflows/deployment.yaml @@ -35,6 +35,10 @@ jobs: needs: test runs-on: ubuntu-latest + strategy: + matrix: + toolchain: [stable] + steps: - id: checkout name: Checkout Repository From 7dda918c245700691d93613e874f0f20f48ac915 Mon Sep 17 00:00:00 2001 From: Cameron Garnham Date: Thu, 3 Oct 2024 11:59:54 +0200 Subject: [PATCH 006/802] vscode: update clippy to include lints from cargo manifest --- .vscode/settings.json | 30 ++++++++---------------------- 1 file changed, 8 insertions(+), 22 deletions(-) diff --git a/.vscode/settings.json b/.vscode/settings.json index caa48dd01..d27d562e8 100644 --- a/.vscode/settings.json +++ b/.vscode/settings.json @@ -2,34 +2,20 @@ "[rust]": { "editor.formatOnSave": true }, - "[ignore]": { "rust-analyzer.cargo.extraEnv" : { - "RUSTFLAGS": "-Z profile -C codegen-units=1 -C inline-threshold=0 -C link-dead-code -C overflow-checks=off -C panic=abort -Z panic_abort_tests", - "RUSTDOCFLAGS": "-Z profile -C codegen-units=1 -C inline-threshold=0 -C link-dead-code -C overflow-checks=off -C panic=abort -Z panic_abort_tests", - "CARGO_INCREMENTAL": "0", - "RUST_BACKTRACE": "1" - }}, + "[ignore]": { + "rust-analyzer.cargo.extraEnv": { + "RUSTFLAGS": "-Z profile -C codegen-units=1 -C inline-threshold=0 -C link-dead-code -C overflow-checks=off -C panic=abort -Z panic_abort_tests", + "RUSTDOCFLAGS": "-Z profile -C codegen-units=1 -C inline-threshold=0 -C link-dead-code -C overflow-checks=off -C panic=abort -Z panic_abort_tests", + "CARGO_INCREMENTAL": "0", + "RUST_BACKTRACE": "1" + } + }, "rust-analyzer.checkOnSave": true, "rust-analyzer.check.command": "clippy", "rust-analyzer.check.allTargets": true, - "rust-analyzer.check.extraArgs": [ - "--", - "-D", - "clippy::correctness", - "-D", - "clippy::suspicious", - "-W", - "clippy::complexity", - "-W", - "clippy::perf", - "-W", - "clippy::style", - "-W", - "clippy::pedantic" - ], "evenBetterToml.formatter.allowedBlankLines": 1, "evenBetterToml.formatter.columnWidth": 130, "evenBetterToml.formatter.trailingNewline": true, "evenBetterToml.formatter.reorderKeys": true, "evenBetterToml.formatter.reorderArrays": true, - } \ No newline at end of file From f95aac2338a6d96ed319d474923682b8ce14d00b Mon Sep 17 00:00:00 2001 From: Cameron Garnham Date: Thu, 3 Oct 2024 12:04:57 +0200 Subject: [PATCH 007/802] cargo: remove unused trace dependancy --- Cargo.lock | 12 ------------ Cargo.toml | 1 - 2 files changed, 13 deletions(-) diff --git a/Cargo.lock b/Cargo.lock index d43356ca4..3d0645ebf 100644 --- a/Cargo.lock +++ b/Cargo.lock @@ -3826,7 +3826,6 @@ dependencies = [ "torrust-tracker-torrent-repository", "tower 0.4.13", "tower-http", - "trace", "tracing", "tracing-subscriber", "url", @@ -3982,17 +3981,6 @@ version = "0.3.3" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "8df9b6e13f2d32c91b9bd719c00d1958837bc7dec474d94952798cc8e69eeec3" -[[package]] -name = "trace" -version = "0.1.7" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "9ad0c048e114d19d1140662762bfdb10682f3bc806d8be18af846600214dd9af" -dependencies = [ - "proc-macro2", - "quote", - "syn 1.0.109", -] - [[package]] name = "tracing" version = "0.1.40" diff --git a/Cargo.toml b/Cargo.toml index f6ac9aafd..6fd61b6e6 100644 --- a/Cargo.toml +++ b/Cargo.toml @@ -77,7 +77,6 @@ torrust-tracker-primitives = { version = "3.0.0-develop", path = "packages/primi torrust-tracker-torrent-repository = { version = "3.0.0-develop", path = "packages/torrent-repository" } tower = { version = "0", features = ["timeout"] } tower-http = { version = "0", features = ["compression-full", "cors", "propagate-header", "request-id", "trace"] } -trace = "0" tracing = "0" tracing-subscriber = { version = "0", features = ["json"] } url = { version = "2", features = ["serde"] } From c9f4dfdc3b0cba23ed508a2273beaf057d2433a5 Mon Sep 17 00:00:00 2001 From: Jose Celano Date: Fri, 4 Oct 2024 17:41:57 +0100 Subject: [PATCH 008/802] docs: update release process --- docs/release_process.md | 41 ++++++++++++++++++++++++++++------------- 1 file changed, 28 insertions(+), 13 deletions(-) diff --git a/docs/release_process.md b/docs/release_process.md index 73b0a8827..f9d1cce71 100644 --- a/docs/release_process.md +++ b/docs/release_process.md @@ -1,11 +1,12 @@ -# Torrust Tracker Release Process (v2.2.2) +# Torrust Tracker Release Process (v2.2.2) + +## Version -## Version: > **The `[semantic version]` is bumped according to releases, new features, and breaking changes.** > > *The `develop` branch uses the (semantic version) suffix `-develop`.* -## Process: +## Process **Note**: this guide assumes that the your git `torrust` remote is like this: @@ -20,18 +21,18 @@ git remote show torrust ... ``` +### 1. The `develop` branch is ready for a release -### 1. The `develop` branch is ready for a release. The `develop` branch should have the version `[semantic version]-develop` that is ready to be released. -### 2. Stage `develop` HEAD for merging into the `main` branch: +### 2. Stage `develop` HEAD for merging into the `main` branch ```sh git fetch --all git push --force torrust develop:staging/main ``` -### 3. Create Release Commit: +### 3. Create Release Commit ```sh git stash @@ -43,13 +44,13 @@ git commit -m "release: version [semantic version]" git push torrust ``` -### 4. Create and Merge Pull Request from `staging/main` into `main` branch. +### 4. Create and Merge Pull Request from `staging/main` into `main` branch Pull request title format: "Release Version `[semantic version]`". This pull request merges the new version into the `main` branch. -### 5. Push new version from `main` HEAD to `releases/v[semantic version]` branch: +### 5. Push new version from `main` HEAD to `releases/v[semantic version]` branch ```sh git fetch --all @@ -58,7 +59,7 @@ git push torrust main:releases/v[semantic version] > **Check that the deployment is successful!** -### 6. Create Release Tag: +### 6. Create Release Tag ```sh git switch releases/v[semantic version] @@ -66,17 +67,31 @@ git tag --sign v[semantic version] git push --tags torrust ``` -### 7. Create Release on Github from Tag. +Make sure the [deployment](https://github.com/torrust/torrust-tracker/actions/workflows/deployment.yaml) workflow was successfully executed and the new version for the following crates were published: + +- [torrust-tracker-contrib-bencode](https://crates.io/crates/torrust-tracker-contrib-bencode) +- [torrust-tracker-located-error](https://crates.io/crates/torrust-tracker-located-error) +- [torrust-tracker-primitives](https://crates.io/crates/torrust-tracker-primitives) +- [torrust-tracker-clock](https://crates.io/crates/torrust-tracker-clock) +- [torrust-tracker-configuration](https://crates.io/crates/torrust-tracker-configuration) +- [torrust-tracker-torrent-repository](https://crates.io/crates/torrust-tracker-torrent-repository) +- [torrust-tracker-test-helpers](https://crates.io/crates/torrust-tracker-test-helpers) +- [torrust-tracker](https://crates.io/crates/torrust-tracker) + +### 7. Create Release on Github from Tag + This is for those who wish to download the source code. -### 8. Stage `main` HEAD for merging into the `develop` branch: +### 8. Stage `main` HEAD for merging into the `develop` branch + Merge release back into the develop branch. ```sh git fetch --all git push --force torrust main:staging/develop ``` -### 9. Create Comment that bumps next development version: + +### 9. Create Comment that bumps next development version ```sh git stash @@ -88,7 +103,7 @@ git commit -m "develop: bump to version (next)[semantic version]-develop" git push torrust ``` -### 10. Create and Merge Pull Request from `staging/develop` into `develop` branch. +### 10. Create and Merge Pull Request from `staging/develop` into `develop` branch Pull request title format: "Version `[semantic version]` was Released". From a34f66e1cd422c0fdbcb01da7c6b14bee7b27055 Mon Sep 17 00:00:00 2001 From: abstralexis Date: Thu, 17 Oct 2024 16:48:57 +0100 Subject: [PATCH 009/802] Fix #1040: `continue` when finding errors --- src/console/clients/checker/checks/udp.rs | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/src/console/clients/checker/checks/udp.rs b/src/console/clients/checker/checks/udp.rs index 4044b4c52..21bdcd1b7 100644 --- a/src/console/clients/checker/checks/udp.rs +++ b/src/console/clients/checker/checks/udp.rs @@ -50,7 +50,7 @@ pub async fn run(udp_trackers: Vec, timeout: Duration) -> Vec { checks.results.push((Check::Setup, Err(err))); results.push(Err(checks)); - break; + continue; } }; @@ -65,7 +65,7 @@ pub async fn run(udp_trackers: Vec, timeout: Duration) -> Vec { checks.results.push((Check::Connect, Err(err))); results.push(Err(checks)); - break; + continue; } }; From 7d7dba500c15ddf971aa1c564259068f7967c708 Mon Sep 17 00:00:00 2001 From: Jose Celano Date: Wed, 30 Oct 2024 16:27:30 +0000 Subject: [PATCH 010/802] feat: add dep bittorrent-primitives --- Cargo.lock | 31 ++++++++++++++++++++------ Cargo.toml | 1 + packages/primitives/Cargo.toml | 1 + packages/torrent-repository/Cargo.toml | 1 + 4 files changed, 27 insertions(+), 7 deletions(-) diff --git a/Cargo.lock b/Cargo.lock index 3d0645ebf..9dade94be 100644 --- a/Cargo.lock +++ b/Cargo.lock @@ -602,6 +602,20 @@ version = "2.6.0" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "b048fb63fd8b5923fc5aa7b340d8e156aec7ec02f0c78fa8a6ddc2613f6f71de" +[[package]] +name = "bittorrent-primitives" +version = "0.1.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "fdc1bd0462f0af0b57abd5f5f8f32b904ba0a17cc8be1714db160a054552f242" +dependencies = [ + "aquatic_udp_protocol", + "binascii", + "serde", + "serde_json", + "thiserror", + "zerocopy", +] + [[package]] name = "bitvec" version = "1.0.1" @@ -3269,9 +3283,9 @@ dependencies = [ [[package]] name = "serde_json" -version = "1.0.128" +version = "1.0.132" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "6ff5456707a1de34e7e37f2a6fd3d3f808c318259cbd01ab6377795054b483d8" +checksum = "d726bfaff4b320266d395898905d0eba0345aae23b54aee3a737e260fd46db03" dependencies = [ "indexmap 2.5.0", "itoa", @@ -3592,18 +3606,18 @@ checksum = "3369f5ac52d5eb6ab48c6b4ffdc8efbcad6b89c765749064ba298f2c68a16a76" [[package]] name = "thiserror" -version = "1.0.64" +version = "1.0.65" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "d50af8abc119fb8bb6dbabcfa89656f46f84aa0ac7688088608076ad2b459a84" +checksum = "5d11abd9594d9b38965ef50805c5e469ca9cc6f197f883f717e0269a3057b3d5" dependencies = [ "thiserror-impl", ] [[package]] name = "thiserror-impl" -version = "1.0.64" +version = "1.0.65" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "08904e7672f5eb876eaaf87e0ce17857500934f4981c4a0ab2b4aa98baac7fc3" +checksum = "ae71770322cbd277e69d762a16c444af02aa0575ac0d174f0b9562d3b37f8602" dependencies = [ "proc-macro2", "quote", @@ -3782,6 +3796,7 @@ dependencies = [ "axum-client-ip", "axum-extra", "axum-server", + "bittorrent-primitives", "camino", "chrono", "clap", @@ -3824,7 +3839,7 @@ dependencies = [ "torrust-tracker-primitives", "torrust-tracker-test-helpers", "torrust-tracker-torrent-repository", - "tower 0.4.13", + "tower 0.5.1", "tower-http", "tracing", "tracing-subscriber", @@ -3881,6 +3896,7 @@ version = "3.0.0-develop" dependencies = [ "aquatic_udp_protocol", "binascii", + "bittorrent-primitives", "derive_more", "serde", "tdyne-peer-id", @@ -3903,6 +3919,7 @@ version = "3.0.0-develop" dependencies = [ "aquatic_udp_protocol", "async-std", + "bittorrent-primitives", "criterion", "crossbeam-skiplist", "dashmap", diff --git a/Cargo.toml b/Cargo.toml index 6fd61b6e6..d69fa3e5e 100644 --- a/Cargo.toml +++ b/Cargo.toml @@ -36,6 +36,7 @@ axum = { version = "0", features = ["macros"] } axum-client-ip = "0" axum-extra = { version = "0", features = ["query"] } axum-server = { version = "0", features = ["tls-rustls"] } +bittorrent-primitives = "0.1.0" camino = { version = "1", features = ["serde", "serde1"] } chrono = { version = "0", default-features = false, features = ["clock"] } clap = { version = "4", features = ["derive", "env"] } diff --git a/packages/primitives/Cargo.toml b/packages/primitives/Cargo.toml index 02a53e3b7..4b5abc8f3 100644 --- a/packages/primitives/Cargo.toml +++ b/packages/primitives/Cargo.toml @@ -17,6 +17,7 @@ version.workspace = true [dependencies] aquatic_udp_protocol = "0" binascii = "0" +bittorrent-primitives = "0.1.0" derive_more = { version = "1", features = ["constructor"] } serde = { version = "1", features = ["derive"] } tdyne-peer-id = "1" diff --git a/packages/torrent-repository/Cargo.toml b/packages/torrent-repository/Cargo.toml index 32c324538..0933457d3 100644 --- a/packages/torrent-repository/Cargo.toml +++ b/packages/torrent-repository/Cargo.toml @@ -17,6 +17,7 @@ version.workspace = true [dependencies] aquatic_udp_protocol = "0" +bittorrent-primitives = "0.1.0" crossbeam-skiplist = "0" dashmap = "6" futures = "0" From 7fe648c0dd7a3ae63be061ce998b7f22e7bd00b1 Mon Sep 17 00:00:00 2001 From: Jose Celano Date: Wed, 30 Oct 2024 16:42:37 +0000 Subject: [PATCH 011/802] refactor: replace InfoHash with external extracted crate The `InfoHash` struct has been extracted into a new crate to be reused in other projects like the Torrust Index: https://github.com/torrust/bittorrent-primitives --- contrib/bencode/src/access/bencode.rs | 2 +- packages/located-error/src/lib.rs | 6 +- packages/primitives/src/info_hash.rs | 220 ------------- packages/primitives/src/lib.rs | 11 +- .../benches/helpers/asyn.rs | 2 +- .../benches/helpers/sync.rs | 2 +- .../benches/helpers/utils.rs | 2 +- .../src/repository/dash_map_mutex_std.rs | 2 +- .../torrent-repository/src/repository/mod.rs | 2 +- .../src/repository/rw_lock_std.rs | 4 +- .../src/repository/rw_lock_std_mutex_std.rs | 2 +- .../src/repository/rw_lock_std_mutex_tokio.rs | 2 +- .../src/repository/rw_lock_tokio.rs | 7 +- .../src/repository/rw_lock_tokio_mutex_std.rs | 2 +- .../repository/rw_lock_tokio_mutex_tokio.rs | 2 +- .../src/repository/skip_map_mutex_std.rs | 2 +- .../torrent-repository/tests/common/repo.rs | 2 +- .../tests/repository/mod.rs | 2 +- src/console/clients/checker/checks/http.rs | 2 +- src/console/clients/http/app.rs | 2 +- src/console/clients/udp/app.rs | 2 +- src/console/clients/udp/checker.rs | 2 +- src/core/databases/mod.rs | 2 +- src/core/databases/mysql.rs | 2 +- src/core/databases/sqlite.rs | 2 +- src/core/error.rs | 2 +- src/core/mod.rs | 16 +- src/core/services/torrent.rs | 6 +- .../apis/v1/context/torrent/handlers.rs | 2 +- .../v1/context/torrent/resources/torrent.rs | 2 +- .../apis/v1/context/whitelist/handlers.rs | 2 +- src/servers/http/percent_encoding.rs | 8 +- .../http/v1/extractors/announce_request.rs | 2 +- .../http/v1/extractors/scrape_request.rs | 2 +- src/servers/http/v1/handlers/announce.rs | 2 +- src/servers/http/v1/handlers/scrape.rs | 2 +- src/servers/http/v1/requests/announce.rs | 6 +- src/servers/http/v1/requests/scrape.rs | 4 +- src/servers/http/v1/responses/scrape.rs | 4 +- src/servers/http/v1/services/announce.rs | 4 +- src/servers/http/v1/services/scrape.rs | 4 +- src/servers/udp/handlers.rs | 2 +- src/servers/udp/logging.rs | 2 +- src/servers/udp/mod.rs | 2 +- src/shared/bit_torrent/info_hash.rs | 288 ------------------ src/shared/bit_torrent/mod.rs | 1 - .../tracker/http/client/requests/announce.rs | 2 +- .../tracker/http/client/requests/scrape.rs | 2 +- tests/servers/api/environment.rs | 2 +- .../servers/api/v1/contract/context/stats.rs | 2 +- .../api/v1/contract/context/torrent.rs | 2 +- .../api/v1/contract/context/whitelist.rs | 2 +- tests/servers/http/environment.rs | 2 +- tests/servers/http/requests/announce.rs | 2 +- tests/servers/http/requests/scrape.rs | 2 +- tests/servers/http/v1/contract.rs | 12 +- tests/servers/udp/environment.rs | 2 +- 57 files changed, 85 insertions(+), 598 deletions(-) delete mode 100644 packages/primitives/src/info_hash.rs delete mode 100644 src/shared/bit_torrent/info_hash.rs diff --git a/contrib/bencode/src/access/bencode.rs b/contrib/bencode/src/access/bencode.rs index ee90296e2..728535a98 100644 --- a/contrib/bencode/src/access/bencode.rs +++ b/contrib/bencode/src/access/bencode.rs @@ -50,7 +50,7 @@ pub trait BRefAccessExt<'a>: BRefAccess { fn bytes_ext(&self) -> Option<&'a [u8]>; } -impl<'a, T> BRefAccess for &'a T +impl BRefAccess for &T where T: BRefAccess, { diff --git a/packages/located-error/src/lib.rs b/packages/located-error/src/lib.rs index 3cba6042d..c30043cd3 100644 --- a/packages/located-error/src/lib.rs +++ b/packages/located-error/src/lib.rs @@ -50,7 +50,7 @@ where location: Box>, } -impl<'a, E> std::fmt::Display for LocatedError<'a, E> +impl std::fmt::Display for LocatedError<'_, E> where E: Error + ?Sized + Send + Sync, { @@ -59,7 +59,7 @@ where } } -impl<'a, E> Error for LocatedError<'a, E> +impl Error for LocatedError<'_, E> where E: Error + ?Sized + Send + Sync + 'static, { @@ -68,7 +68,7 @@ where } } -impl<'a, E> Clone for LocatedError<'a, E> +impl Clone for LocatedError<'_, E> where E: Error + ?Sized + Send + Sync, { diff --git a/packages/primitives/src/info_hash.rs b/packages/primitives/src/info_hash.rs deleted file mode 100644 index 61b40a746..000000000 --- a/packages/primitives/src/info_hash.rs +++ /dev/null @@ -1,220 +0,0 @@ -use std::hash::{DefaultHasher, Hash, Hasher}; -use std::ops::{Deref, DerefMut}; -use std::panic::Location; - -use thiserror::Error; -use zerocopy::FromBytes; - -/// `BitTorrent` Info Hash v1 -#[derive(PartialEq, Eq, Hash, Clone, Copy, Debug)] -pub struct InfoHash { - data: aquatic_udp_protocol::InfoHash, -} - -pub const INFO_HASH_BYTES_LEN: usize = 20; - -impl InfoHash { - /// Create a new `InfoHash` from a byte slice. - /// - /// # Panics - /// - /// Will panic if byte slice does not contains the exact amount of bytes need for the `InfoHash`. - #[must_use] - pub fn from_bytes(bytes: &[u8]) -> Self { - let data = aquatic_udp_protocol::InfoHash::read_from(bytes).expect("it should have the exact amount of bytes"); - - Self { data } - } - - /// Returns the `InfoHash` internal byte array. - #[must_use] - pub fn bytes(&self) -> [u8; 20] { - self.0 - } - - /// Returns the `InfoHash` as a hex string. - #[must_use] - pub fn to_hex_string(&self) -> String { - self.to_string() - } -} - -impl Default for InfoHash { - fn default() -> Self { - Self { - data: aquatic_udp_protocol::InfoHash(Default::default()), - } - } -} - -impl From for InfoHash { - fn from(data: aquatic_udp_protocol::InfoHash) -> Self { - Self { data } - } -} - -impl Deref for InfoHash { - type Target = aquatic_udp_protocol::InfoHash; - - fn deref(&self) -> &Self::Target { - &self.data - } -} - -impl DerefMut for InfoHash { - fn deref_mut(&mut self) -> &mut Self::Target { - &mut self.data - } -} - -impl Ord for InfoHash { - fn cmp(&self, other: &Self) -> std::cmp::Ordering { - self.0.cmp(&other.0) - } -} - -impl PartialOrd for InfoHash { - fn partial_cmp(&self, other: &InfoHash) -> Option { - Some(self.cmp(other)) - } -} - -impl std::fmt::Display for InfoHash { - fn fmt(&self, f: &mut std::fmt::Formatter<'_>) -> std::fmt::Result { - let mut chars = [0u8; 40]; - binascii::bin2hex(&self.0, &mut chars).expect("failed to hexlify"); - write!(f, "{}", std::str::from_utf8(&chars).unwrap()) - } -} - -impl std::str::FromStr for InfoHash { - type Err = binascii::ConvertError; - - fn from_str(s: &str) -> Result { - let mut i = Self::default(); - if s.len() != 40 { - return Err(binascii::ConvertError::InvalidInputLength); - } - binascii::hex2bin(s.as_bytes(), &mut i.0)?; - Ok(i) - } -} - -impl std::convert::From<&[u8]> for InfoHash { - fn from(data: &[u8]) -> InfoHash { - assert_eq!(data.len(), 20); - let mut ret = Self::default(); - ret.0.clone_from_slice(data); - ret - } -} - -/// for testing -impl std::convert::From<&DefaultHasher> for InfoHash { - fn from(data: &DefaultHasher) -> InfoHash { - let n = data.finish().to_le_bytes(); - let bytes = [ - n[0], n[1], n[2], n[3], n[4], n[5], n[6], n[7], n[0], n[1], n[2], n[3], n[4], n[5], n[6], n[7], n[0], n[1], n[2], - n[3], - ]; - let data = aquatic_udp_protocol::InfoHash(bytes); - Self { data } - } -} - -impl std::convert::From<&i32> for InfoHash { - fn from(n: &i32) -> InfoHash { - let n = n.to_le_bytes(); - let bytes = [0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, n[0], n[1], n[2], n[3]]; - let data = aquatic_udp_protocol::InfoHash(bytes); - Self { data } - } -} - -impl std::convert::From<[u8; 20]> for InfoHash { - fn from(bytes: [u8; 20]) -> Self { - let data = aquatic_udp_protocol::InfoHash(bytes); - Self { data } - } -} - -/// Errors that can occur when converting from a `Vec` to an `InfoHash`. -#[derive(Error, Debug)] -pub enum ConversionError { - /// Not enough bytes for infohash. An infohash is 20 bytes. - #[error("not enough bytes for infohash: {message} {location}")] - NotEnoughBytes { - location: &'static Location<'static>, - message: String, - }, - /// Too many bytes for infohash. An infohash is 20 bytes. - #[error("too many bytes for infohash: {message} {location}")] - TooManyBytes { - location: &'static Location<'static>, - message: String, - }, -} - -impl TryFrom> for InfoHash { - type Error = ConversionError; - - fn try_from(bytes: Vec) -> Result { - if bytes.len() < INFO_HASH_BYTES_LEN { - return Err(ConversionError::NotEnoughBytes { - location: Location::caller(), - message: format! {"got {} bytes, expected {}", bytes.len(), INFO_HASH_BYTES_LEN}, - }); - } - if bytes.len() > INFO_HASH_BYTES_LEN { - return Err(ConversionError::TooManyBytes { - location: Location::caller(), - message: format! {"got {} bytes, expected {}", bytes.len(), INFO_HASH_BYTES_LEN}, - }); - } - Ok(Self::from_bytes(&bytes)) - } -} - -impl serde::ser::Serialize for InfoHash { - fn serialize(&self, serializer: S) -> Result { - let mut buffer = [0u8; 40]; - let bytes_out = binascii::bin2hex(&self.0, &mut buffer).ok().unwrap(); - let str_out = std::str::from_utf8(bytes_out).unwrap(); - serializer.serialize_str(str_out) - } -} - -impl<'de> serde::de::Deserialize<'de> for InfoHash { - fn deserialize>(des: D) -> Result { - des.deserialize_str(InfoHashVisitor) - } -} - -struct InfoHashVisitor; - -impl<'v> serde::de::Visitor<'v> for InfoHashVisitor { - type Value = InfoHash; - - fn expecting(&self, formatter: &mut std::fmt::Formatter<'_>) -> std::fmt::Result { - write!(formatter, "a 40 character long hash") - } - - fn visit_str(self, v: &str) -> Result { - if v.len() != 40 { - return Err(serde::de::Error::invalid_value( - serde::de::Unexpected::Str(v), - &"a 40 character long string", - )); - } - - let mut res = InfoHash::default(); - - if binascii::hex2bin(v.as_bytes(), &mut res.0).is_err() { - return Err(serde::de::Error::invalid_value( - serde::de::Unexpected::Str(v), - &"a hexadecimal string", - )); - }; - Ok(res) - } -} diff --git a/packages/primitives/src/lib.rs b/packages/primitives/src/lib.rs index 08fc58976..d5c6fc525 100644 --- a/packages/primitives/src/lib.rs +++ b/packages/primitives/src/lib.rs @@ -4,17 +4,16 @@ //! which is a `BitTorrent` tracker server. These structures are used not only //! by the tracker server crate, but also by other crates in the Torrust //! ecosystem. -use std::collections::BTreeMap; -use std::time::Duration; - -use info_hash::InfoHash; - -pub mod info_hash; pub mod pagination; pub mod peer; pub mod swarm_metadata; pub mod torrent_metrics; +use std::collections::BTreeMap; +use std::time::Duration; + +use bittorrent_primitives::info_hash::InfoHash; + /// Duration since the Unix Epoch. pub type DurationSinceUnixEpoch = Duration; diff --git a/packages/torrent-repository/benches/helpers/asyn.rs b/packages/torrent-repository/benches/helpers/asyn.rs index 08862abc8..dec3984c6 100644 --- a/packages/torrent-repository/benches/helpers/asyn.rs +++ b/packages/torrent-repository/benches/helpers/asyn.rs @@ -1,8 +1,8 @@ use std::sync::Arc; use std::time::{Duration, Instant}; +use bittorrent_primitives::info_hash::InfoHash; use futures::stream::FuturesUnordered; -use torrust_tracker_primitives::info_hash::InfoHash; use torrust_tracker_torrent_repository::repository::RepositoryAsync; use super::utils::{generate_unique_info_hashes, DEFAULT_PEER}; diff --git a/packages/torrent-repository/benches/helpers/sync.rs b/packages/torrent-repository/benches/helpers/sync.rs index 77055911d..048e709bc 100644 --- a/packages/torrent-repository/benches/helpers/sync.rs +++ b/packages/torrent-repository/benches/helpers/sync.rs @@ -1,8 +1,8 @@ use std::sync::Arc; use std::time::{Duration, Instant}; +use bittorrent_primitives::info_hash::InfoHash; use futures::stream::FuturesUnordered; -use torrust_tracker_primitives::info_hash::InfoHash; use torrust_tracker_torrent_repository::repository::Repository; use super::utils::{generate_unique_info_hashes, DEFAULT_PEER}; diff --git a/packages/torrent-repository/benches/helpers/utils.rs b/packages/torrent-repository/benches/helpers/utils.rs index e21ac7332..51b09ec0f 100644 --- a/packages/torrent-repository/benches/helpers/utils.rs +++ b/packages/torrent-repository/benches/helpers/utils.rs @@ -2,7 +2,7 @@ use std::collections::HashSet; use std::net::{IpAddr, Ipv4Addr, SocketAddr}; use aquatic_udp_protocol::{AnnounceEvent, NumberOfBytes, PeerId}; -use torrust_tracker_primitives::info_hash::InfoHash; +use bittorrent_primitives::info_hash::InfoHash; use torrust_tracker_primitives::peer::Peer; use torrust_tracker_primitives::DurationSinceUnixEpoch; use zerocopy::I64; diff --git a/packages/torrent-repository/src/repository/dash_map_mutex_std.rs b/packages/torrent-repository/src/repository/dash_map_mutex_std.rs index 4354c12ec..54a83aeb4 100644 --- a/packages/torrent-repository/src/repository/dash_map_mutex_std.rs +++ b/packages/torrent-repository/src/repository/dash_map_mutex_std.rs @@ -1,8 +1,8 @@ use std::sync::Arc; +use bittorrent_primitives::info_hash::InfoHash; use dashmap::DashMap; use torrust_tracker_configuration::TrackerPolicy; -use torrust_tracker_primitives::info_hash::InfoHash; use torrust_tracker_primitives::pagination::Pagination; use torrust_tracker_primitives::swarm_metadata::SwarmMetadata; use torrust_tracker_primitives::torrent_metrics::TorrentsMetrics; diff --git a/packages/torrent-repository/src/repository/mod.rs b/packages/torrent-repository/src/repository/mod.rs index f198288f8..14f03ed9d 100644 --- a/packages/torrent-repository/src/repository/mod.rs +++ b/packages/torrent-repository/src/repository/mod.rs @@ -1,5 +1,5 @@ +use bittorrent_primitives::info_hash::InfoHash; use torrust_tracker_configuration::TrackerPolicy; -use torrust_tracker_primitives::info_hash::InfoHash; use torrust_tracker_primitives::pagination::Pagination; use torrust_tracker_primitives::swarm_metadata::SwarmMetadata; use torrust_tracker_primitives::torrent_metrics::TorrentsMetrics; diff --git a/packages/torrent-repository/src/repository/rw_lock_std.rs b/packages/torrent-repository/src/repository/rw_lock_std.rs index 5439fdd79..409a16498 100644 --- a/packages/torrent-repository/src/repository/rw_lock_std.rs +++ b/packages/torrent-repository/src/repository/rw_lock_std.rs @@ -1,5 +1,5 @@ +use bittorrent_primitives::info_hash::InfoHash; use torrust_tracker_configuration::TrackerPolicy; -use torrust_tracker_primitives::info_hash::InfoHash; use torrust_tracker_primitives::pagination::Pagination; use torrust_tracker_primitives::swarm_metadata::SwarmMetadata; use torrust_tracker_primitives::torrent_metrics::TorrentsMetrics; @@ -21,7 +21,7 @@ impl RwLockStd { /// Panics if unable to get a lock. pub fn write( &self, - ) -> std::sync::RwLockWriteGuard<'_, std::collections::BTreeMap> { + ) -> std::sync::RwLockWriteGuard<'_, std::collections::BTreeMap> { self.torrents.write().expect("it should get lock") } } diff --git a/packages/torrent-repository/src/repository/rw_lock_std_mutex_std.rs b/packages/torrent-repository/src/repository/rw_lock_std_mutex_std.rs index 7d58b0b10..8814f09ed 100644 --- a/packages/torrent-repository/src/repository/rw_lock_std_mutex_std.rs +++ b/packages/torrent-repository/src/repository/rw_lock_std_mutex_std.rs @@ -1,7 +1,7 @@ use std::sync::Arc; +use bittorrent_primitives::info_hash::InfoHash; use torrust_tracker_configuration::TrackerPolicy; -use torrust_tracker_primitives::info_hash::InfoHash; use torrust_tracker_primitives::pagination::Pagination; use torrust_tracker_primitives::swarm_metadata::SwarmMetadata; use torrust_tracker_primitives::torrent_metrics::TorrentsMetrics; diff --git a/packages/torrent-repository/src/repository/rw_lock_std_mutex_tokio.rs b/packages/torrent-repository/src/repository/rw_lock_std_mutex_tokio.rs index 90451ca9f..46f4a9567 100644 --- a/packages/torrent-repository/src/repository/rw_lock_std_mutex_tokio.rs +++ b/packages/torrent-repository/src/repository/rw_lock_std_mutex_tokio.rs @@ -2,10 +2,10 @@ use std::iter::zip; use std::pin::Pin; use std::sync::Arc; +use bittorrent_primitives::info_hash::InfoHash; use futures::future::join_all; use futures::{Future, FutureExt}; use torrust_tracker_configuration::TrackerPolicy; -use torrust_tracker_primitives::info_hash::InfoHash; use torrust_tracker_primitives::pagination::Pagination; use torrust_tracker_primitives::swarm_metadata::SwarmMetadata; use torrust_tracker_primitives::torrent_metrics::TorrentsMetrics; diff --git a/packages/torrent-repository/src/repository/rw_lock_tokio.rs b/packages/torrent-repository/src/repository/rw_lock_tokio.rs index baaa01232..ce6646e92 100644 --- a/packages/torrent-repository/src/repository/rw_lock_tokio.rs +++ b/packages/torrent-repository/src/repository/rw_lock_tokio.rs @@ -1,5 +1,5 @@ +use bittorrent_primitives::info_hash::InfoHash; use torrust_tracker_configuration::TrackerPolicy; -use torrust_tracker_primitives::info_hash::InfoHash; use torrust_tracker_primitives::pagination::Pagination; use torrust_tracker_primitives::swarm_metadata::SwarmMetadata; use torrust_tracker_primitives::torrent_metrics::TorrentsMetrics; @@ -19,10 +19,7 @@ impl RwLockTokio { pub fn write( &self, ) -> impl std::future::Future< - Output = tokio::sync::RwLockWriteGuard< - '_, - std::collections::BTreeMap, - >, + Output = tokio::sync::RwLockWriteGuard<'_, std::collections::BTreeMap>, > { self.torrents.write() } diff --git a/packages/torrent-repository/src/repository/rw_lock_tokio_mutex_std.rs b/packages/torrent-repository/src/repository/rw_lock_tokio_mutex_std.rs index 1887f70c7..7efb093e9 100644 --- a/packages/torrent-repository/src/repository/rw_lock_tokio_mutex_std.rs +++ b/packages/torrent-repository/src/repository/rw_lock_tokio_mutex_std.rs @@ -1,7 +1,7 @@ use std::sync::Arc; +use bittorrent_primitives::info_hash::InfoHash; use torrust_tracker_configuration::TrackerPolicy; -use torrust_tracker_primitives::info_hash::InfoHash; use torrust_tracker_primitives::pagination::Pagination; use torrust_tracker_primitives::swarm_metadata::SwarmMetadata; use torrust_tracker_primitives::torrent_metrics::TorrentsMetrics; diff --git a/packages/torrent-repository/src/repository/rw_lock_tokio_mutex_tokio.rs b/packages/torrent-repository/src/repository/rw_lock_tokio_mutex_tokio.rs index 6c9c08a73..e08a6af59 100644 --- a/packages/torrent-repository/src/repository/rw_lock_tokio_mutex_tokio.rs +++ b/packages/torrent-repository/src/repository/rw_lock_tokio_mutex_tokio.rs @@ -1,7 +1,7 @@ use std::sync::Arc; +use bittorrent_primitives::info_hash::InfoHash; use torrust_tracker_configuration::TrackerPolicy; -use torrust_tracker_primitives::info_hash::InfoHash; use torrust_tracker_primitives::pagination::Pagination; use torrust_tracker_primitives::swarm_metadata::SwarmMetadata; use torrust_tracker_primitives::torrent_metrics::TorrentsMetrics; diff --git a/packages/torrent-repository/src/repository/skip_map_mutex_std.rs b/packages/torrent-repository/src/repository/skip_map_mutex_std.rs index dd0d9c1b1..47fe9620a 100644 --- a/packages/torrent-repository/src/repository/skip_map_mutex_std.rs +++ b/packages/torrent-repository/src/repository/skip_map_mutex_std.rs @@ -1,8 +1,8 @@ use std::sync::Arc; +use bittorrent_primitives::info_hash::InfoHash; use crossbeam_skiplist::SkipMap; use torrust_tracker_configuration::TrackerPolicy; -use torrust_tracker_primitives::info_hash::InfoHash; use torrust_tracker_primitives::pagination::Pagination; use torrust_tracker_primitives::swarm_metadata::SwarmMetadata; use torrust_tracker_primitives::torrent_metrics::TorrentsMetrics; diff --git a/packages/torrent-repository/tests/common/repo.rs b/packages/torrent-repository/tests/common/repo.rs index f317d0d17..ebd829f3c 100644 --- a/packages/torrent-repository/tests/common/repo.rs +++ b/packages/torrent-repository/tests/common/repo.rs @@ -1,5 +1,5 @@ +use bittorrent_primitives::info_hash::InfoHash; use torrust_tracker_configuration::TrackerPolicy; -use torrust_tracker_primitives::info_hash::InfoHash; use torrust_tracker_primitives::pagination::Pagination; use torrust_tracker_primitives::swarm_metadata::SwarmMetadata; use torrust_tracker_primitives::torrent_metrics::TorrentsMetrics; diff --git a/packages/torrent-repository/tests/repository/mod.rs b/packages/torrent-repository/tests/repository/mod.rs index 05d538582..c5cf2059c 100644 --- a/packages/torrent-repository/tests/repository/mod.rs +++ b/packages/torrent-repository/tests/repository/mod.rs @@ -2,9 +2,9 @@ use std::collections::{BTreeMap, HashSet}; use std::hash::{DefaultHasher, Hash, Hasher}; use aquatic_udp_protocol::{AnnounceEvent, NumberOfBytes}; +use bittorrent_primitives::info_hash::InfoHash; use rstest::{fixture, rstest}; use torrust_tracker_configuration::TrackerPolicy; -use torrust_tracker_primitives::info_hash::InfoHash; use torrust_tracker_primitives::pagination::Pagination; use torrust_tracker_primitives::swarm_metadata::SwarmMetadata; use torrust_tracker_primitives::PersistentTorrents; diff --git a/src/console/clients/checker/checks/http.rs b/src/console/clients/checker/checks/http.rs index 0904f4e6e..b64297bed 100644 --- a/src/console/clients/checker/checks/http.rs +++ b/src/console/clients/checker/checks/http.rs @@ -1,8 +1,8 @@ use std::str::FromStr as _; use std::time::Duration; +use bittorrent_primitives::info_hash::InfoHash; use serde::Serialize; -use torrust_tracker_primitives::info_hash::InfoHash; use url::Url; use crate::console::clients::http::Error; diff --git a/src/console/clients/http/app.rs b/src/console/clients/http/app.rs index a54db5f8b..6730c027d 100644 --- a/src/console/clients/http/app.rs +++ b/src/console/clients/http/app.rs @@ -17,10 +17,10 @@ use std::str::FromStr; use std::time::Duration; use anyhow::Context; +use bittorrent_primitives::info_hash::InfoHash; use clap::{Parser, Subcommand}; use reqwest::Url; use torrust_tracker_configuration::DEFAULT_TIMEOUT; -use torrust_tracker_primitives::info_hash::InfoHash; use crate::shared::bit_torrent::tracker::http::client::requests::announce::QueryBuilder; use crate::shared::bit_torrent::tracker::http::client::responses::announce::Announce; diff --git a/src/console/clients/udp/app.rs b/src/console/clients/udp/app.rs index c2ba647b8..a2736c365 100644 --- a/src/console/clients/udp/app.rs +++ b/src/console/clients/udp/app.rs @@ -61,9 +61,9 @@ use std::str::FromStr; use anyhow::Context; use aquatic_udp_protocol::{Response, TransactionId}; +use bittorrent_primitives::info_hash::InfoHash as TorrustInfoHash; use clap::{Parser, Subcommand}; use torrust_tracker_configuration::DEFAULT_TIMEOUT; -use torrust_tracker_primitives::info_hash::InfoHash as TorrustInfoHash; use tracing::level_filters::LevelFilter; use url::Url; diff --git a/src/console/clients/udp/checker.rs b/src/console/clients/udp/checker.rs index 437af33e0..14e94c132 100644 --- a/src/console/clients/udp/checker.rs +++ b/src/console/clients/udp/checker.rs @@ -7,7 +7,7 @@ use aquatic_udp_protocol::{ AnnounceActionPlaceholder, AnnounceEvent, AnnounceRequest, ConnectRequest, ConnectionId, NumberOfBytes, NumberOfPeers, PeerId, PeerKey, Port, Response, ScrapeRequest, TransactionId, }; -use torrust_tracker_primitives::info_hash::InfoHash as TorrustInfoHash; +use bittorrent_primitives::info_hash::InfoHash as TorrustInfoHash; use super::Error; use crate::shared::bit_torrent::tracker::udp::client::UdpTrackerClient; diff --git a/src/core/databases/mod.rs b/src/core/databases/mod.rs index f559eb80e..e29ce22e8 100644 --- a/src/core/databases/mod.rs +++ b/src/core/databases/mod.rs @@ -50,7 +50,7 @@ pub mod sqlite; use std::marker::PhantomData; -use torrust_tracker_primitives::info_hash::InfoHash; +use bittorrent_primitives::info_hash::InfoHash; use torrust_tracker_primitives::PersistentTorrents; use self::error::Error; diff --git a/src/core/databases/mysql.rs b/src/core/databases/mysql.rs index 28a5f363b..1b849421b 100644 --- a/src/core/databases/mysql.rs +++ b/src/core/databases/mysql.rs @@ -2,11 +2,11 @@ use std::str::FromStr; use std::time::Duration; +use bittorrent_primitives::info_hash::InfoHash; use r2d2::Pool; use r2d2_mysql::mysql::prelude::Queryable; use r2d2_mysql::mysql::{params, Opts, OptsBuilder}; use r2d2_mysql::MySqlConnectionManager; -use torrust_tracker_primitives::info_hash::InfoHash; use torrust_tracker_primitives::PersistentTorrents; use super::driver::Driver; diff --git a/src/core/databases/sqlite.rs b/src/core/databases/sqlite.rs index 69470ee04..5bb23bb3e 100644 --- a/src/core/databases/sqlite.rs +++ b/src/core/databases/sqlite.rs @@ -2,11 +2,11 @@ use std::panic::Location; use std::str::FromStr; +use bittorrent_primitives::info_hash::InfoHash; use r2d2::Pool; use r2d2_sqlite::rusqlite::params; use r2d2_sqlite::rusqlite::types::Null; use r2d2_sqlite::SqliteConnectionManager; -use torrust_tracker_primitives::info_hash::InfoHash; use torrust_tracker_primitives::{DurationSinceUnixEpoch, PersistentTorrents}; use super::driver::Driver; diff --git a/src/core/error.rs b/src/core/error.rs index d89b030c4..ba87c84c8 100644 --- a/src/core/error.rs +++ b/src/core/error.rs @@ -8,8 +8,8 @@ //! use std::panic::Location; +use bittorrent_primitives::info_hash::InfoHash; use torrust_tracker_located_error::LocatedError; -use torrust_tracker_primitives::info_hash::InfoHash; use super::auth::ParseKeyError; use super::databases; diff --git a/src/core/mod.rs b/src/core/mod.rs index f12eb9a3d..a41ef2eba 100644 --- a/src/core/mod.rs +++ b/src/core/mod.rs @@ -63,7 +63,7 @@ //! use aquatic_udp_protocol::{AnnounceEvent, NumberOfBytes, PeerId}; //! use torrust_tracker_primitives::DurationSinceUnixEpoch; //! use torrust_tracker_primitives::peer; -//! use torrust_tracker_primitives::info_hash::InfoHash; +//! use bittorrent_primitives::info_hash::InfoHash; //! //! let info_hash = InfoHash::from_str("3b245504cf5f11bbdbe1201cea6a6bf45aee1bc0").unwrap(); //! @@ -136,7 +136,7 @@ //! The returned struct is: //! //! ```rust,no_run -//! use torrust_tracker_primitives::info_hash::InfoHash; +//! use bittorrent_primitives::info_hash::InfoHash; //! use std::collections::HashMap; //! //! pub struct ScrapeData { @@ -165,7 +165,7 @@ //! There are two data structures for infohashes: byte arrays and hex strings: //! //! ```rust,no_run -//! use torrust_tracker_primitives::info_hash::InfoHash; +//! use bittorrent_primitives::info_hash::InfoHash; //! use std::str::FromStr; //! //! let info_hash: InfoHash = [255u8; 20].into(); @@ -456,6 +456,7 @@ use std::sync::Arc; use std::time::Duration; use auth::PeerKey; +use bittorrent_primitives::info_hash::InfoHash; use databases::driver::Driver; use derive_more::Constructor; use error::PeerKeyError; @@ -464,7 +465,6 @@ use torrust_tracker_clock::clock::Time; use torrust_tracker_configuration::v2_0_0::database; use torrust_tracker_configuration::{AnnouncePolicy, Core, TORRENT_PEERS_LIMIT}; use torrust_tracker_located_error::Located; -use torrust_tracker_primitives::info_hash::InfoHash; use torrust_tracker_primitives::swarm_metadata::SwarmMetadata; use torrust_tracker_primitives::torrent_metrics::TorrentsMetrics; use torrust_tracker_primitives::{peer, DurationSinceUnixEpoch}; @@ -1253,15 +1253,15 @@ mod tests { use std::sync::Arc; use aquatic_udp_protocol::{AnnounceEvent, NumberOfBytes, PeerId}; + use bittorrent_primitives::info_hash::fixture::gen_seeded_infohash; + use bittorrent_primitives::info_hash::InfoHash; use torrust_tracker_configuration::TORRENT_PEERS_LIMIT; - use torrust_tracker_primitives::info_hash::InfoHash; use torrust_tracker_primitives::DurationSinceUnixEpoch; use torrust_tracker_test_helpers::configuration; use crate::core::peer::Peer; use crate::core::services::tracker_factory; use crate::core::{TorrentsMetrics, Tracker}; - use crate::shared::bit_torrent::info_hash::fixture::gen_seeded_infohash; fn public_tracker() -> Tracker { tracker_factory(&configuration::ephemeral_public()) @@ -1716,7 +1716,7 @@ mod tests { use std::net::{IpAddr, Ipv4Addr}; - use torrust_tracker_primitives::info_hash::InfoHash; + use bittorrent_primitives::info_hash::InfoHash; use crate::core::tests::the_tracker::{complete_peer, incomplete_peer, public_tracker}; use crate::core::{PeersWanted, ScrapeData, SwarmMetadata}; @@ -1880,7 +1880,7 @@ mod tests { mod handling_an_scrape_request { - use torrust_tracker_primitives::info_hash::InfoHash; + use bittorrent_primitives::info_hash::InfoHash; use torrust_tracker_primitives::swarm_metadata::SwarmMetadata; use crate::core::tests::the_tracker::{ diff --git a/src/core/services/torrent.rs b/src/core/services/torrent.rs index 3b014982d..e63d2efa2 100644 --- a/src/core/services/torrent.rs +++ b/src/core/services/torrent.rs @@ -6,7 +6,7 @@ //! - [`get_torrents`]: it returns data about some torrent in bulk excluding the peer list. use std::sync::Arc; -use torrust_tracker_primitives::info_hash::InfoHash; +use bittorrent_primitives::info_hash::InfoHash; use torrust_tracker_primitives::pagination::Pagination; use torrust_tracker_primitives::peer; use torrust_tracker_torrent_repository::entry::EntrySync; @@ -125,8 +125,8 @@ mod tests { use std::str::FromStr; use std::sync::Arc; + use bittorrent_primitives::info_hash::InfoHash; use torrust_tracker_configuration::Configuration; - use torrust_tracker_primitives::info_hash::InfoHash; use torrust_tracker_test_helpers::configuration; use crate::core::services::torrent::tests::sample_peer; @@ -178,8 +178,8 @@ mod tests { use std::str::FromStr; use std::sync::Arc; + use bittorrent_primitives::info_hash::InfoHash; use torrust_tracker_configuration::Configuration; - use torrust_tracker_primitives::info_hash::InfoHash; use torrust_tracker_test_helpers::configuration; use crate::core::services::torrent::tests::sample_peer; diff --git a/src/servers/apis/v1/context/torrent/handlers.rs b/src/servers/apis/v1/context/torrent/handlers.rs index ebca504fd..0ba713f62 100644 --- a/src/servers/apis/v1/context/torrent/handlers.rs +++ b/src/servers/apis/v1/context/torrent/handlers.rs @@ -7,9 +7,9 @@ use std::sync::Arc; use axum::extract::{Path, State}; use axum::response::{IntoResponse, Response}; use axum_extra::extract::Query; +use bittorrent_primitives::info_hash::InfoHash; use serde::{de, Deserialize, Deserializer}; use thiserror::Error; -use torrust_tracker_primitives::info_hash::InfoHash; use torrust_tracker_primitives::pagination::Pagination; use super::responses::{torrent_info_response, torrent_list_response, torrent_not_known_response}; diff --git a/src/servers/apis/v1/context/torrent/resources/torrent.rs b/src/servers/apis/v1/context/torrent/resources/torrent.rs index 657382c0c..8fbb89418 100644 --- a/src/servers/apis/v1/context/torrent/resources/torrent.rs +++ b/src/servers/apis/v1/context/torrent/resources/torrent.rs @@ -98,7 +98,7 @@ mod tests { use std::str::FromStr; use aquatic_udp_protocol::{AnnounceEvent, NumberOfBytes, PeerId}; - use torrust_tracker_primitives::info_hash::InfoHash; + use bittorrent_primitives::info_hash::InfoHash; use torrust_tracker_primitives::{peer, DurationSinceUnixEpoch}; use super::Torrent; diff --git a/src/servers/apis/v1/context/whitelist/handlers.rs b/src/servers/apis/v1/context/whitelist/handlers.rs index 32e434918..04085f8ab 100644 --- a/src/servers/apis/v1/context/whitelist/handlers.rs +++ b/src/servers/apis/v1/context/whitelist/handlers.rs @@ -5,7 +5,7 @@ use std::sync::Arc; use axum::extract::{Path, State}; use axum::response::Response; -use torrust_tracker_primitives::info_hash::InfoHash; +use bittorrent_primitives::info_hash::InfoHash; use super::responses::{ failed_to_reload_whitelist_response, failed_to_remove_torrent_from_whitelist_response, failed_to_whitelist_torrent_response, diff --git a/src/servers/http/percent_encoding.rs b/src/servers/http/percent_encoding.rs index c3243d597..323444cc7 100644 --- a/src/servers/http/percent_encoding.rs +++ b/src/servers/http/percent_encoding.rs @@ -16,7 +16,7 @@ //! - //! - use aquatic_udp_protocol::PeerId; -use torrust_tracker_primitives::info_hash::{self, InfoHash}; +use bittorrent_primitives::info_hash::{self, InfoHash}; use torrust_tracker_primitives::peer; /// Percent decodes a percent encoded infohash. Internally an @@ -28,7 +28,7 @@ use torrust_tracker_primitives::peer; /// ```rust /// use std::str::FromStr; /// use torrust_tracker::servers::http::percent_encoding::percent_decode_info_hash; -/// use torrust_tracker_primitives::info_hash::InfoHash; +/// use bittorrent_primitives::info_hash::InfoHash; /// use torrust_tracker_primitives::peer; /// /// let encoded_infohash = "%3B%24U%04%CF%5F%11%BB%DB%E1%20%1C%EAjk%F4Z%EE%1B%C0"; @@ -61,7 +61,7 @@ pub fn percent_decode_info_hash(raw_info_hash: &str) -> Result) -> Result) -> AnnounceEvent { mod tests { use aquatic_udp_protocol::PeerId; - use torrust_tracker_primitives::info_hash::InfoHash; + use bittorrent_primitives::info_hash::InfoHash; use torrust_tracker_test_helpers::configuration; use crate::core::services::tracker_factory; diff --git a/src/servers/http/v1/handlers/scrape.rs b/src/servers/http/v1/handlers/scrape.rs index ca4c85207..10f945d70 100644 --- a/src/servers/http/v1/handlers/scrape.rs +++ b/src/servers/http/v1/handlers/scrape.rs @@ -110,7 +110,7 @@ mod tests { use std::net::IpAddr; use std::str::FromStr; - use torrust_tracker_primitives::info_hash::InfoHash; + use bittorrent_primitives::info_hash::InfoHash; use torrust_tracker_test_helpers::configuration; use crate::core::services::tracker_factory; diff --git a/src/servers/http/v1/requests/announce.rs b/src/servers/http/v1/requests/announce.rs index 029bdbc01..a9a9f8a76 100644 --- a/src/servers/http/v1/requests/announce.rs +++ b/src/servers/http/v1/requests/announce.rs @@ -6,9 +6,9 @@ use std::panic::Location; use std::str::FromStr; use aquatic_udp_protocol::{NumberOfBytes, PeerId}; +use bittorrent_primitives::info_hash::{self, InfoHash}; use thiserror::Error; use torrust_tracker_located_error::{Located, LocatedError}; -use torrust_tracker_primitives::info_hash::{self, InfoHash}; use torrust_tracker_primitives::peer; use crate::servers::http::percent_encoding::{percent_decode_info_hash, percent_decode_peer_id}; @@ -32,7 +32,7 @@ const NUMWANT: &str = "numwant"; /// ```rust /// use aquatic_udp_protocol::{NumberOfBytes, PeerId}; /// use torrust_tracker::servers::http::v1::requests::announce::{Announce, Compact, Event}; -/// use torrust_tracker_primitives::info_hash::InfoHash; +/// use bittorrent_primitives::info_hash::InfoHash; /// /// let request = Announce { /// // Mandatory params @@ -379,7 +379,7 @@ mod tests { mod announce_request { use aquatic_udp_protocol::{NumberOfBytes, PeerId}; - use torrust_tracker_primitives::info_hash::InfoHash; + use bittorrent_primitives::info_hash::InfoHash; use crate::servers::http::v1::query::Query; use crate::servers::http::v1::requests::announce::{ diff --git a/src/servers/http/v1/requests/scrape.rs b/src/servers/http/v1/requests/scrape.rs index c61d3be1f..0a47a4fb4 100644 --- a/src/servers/http/v1/requests/scrape.rs +++ b/src/servers/http/v1/requests/scrape.rs @@ -3,9 +3,9 @@ //! Data structures and logic for parsing the `scrape` request. use std::panic::Location; +use bittorrent_primitives::info_hash::{self, InfoHash}; use thiserror::Error; use torrust_tracker_located_error::{Located, LocatedError}; -use torrust_tracker_primitives::info_hash::{self, InfoHash}; use crate::servers::http::percent_encoding::percent_decode_info_hash; use crate::servers::http::v1::query::Query; @@ -84,7 +84,7 @@ mod tests { mod scrape_request { - use torrust_tracker_primitives::info_hash::InfoHash; + use bittorrent_primitives::info_hash::InfoHash; use crate::servers::http::v1::query::Query; use crate::servers::http::v1::requests::scrape::{Scrape, INFO_HASH}; diff --git a/src/servers/http/v1/responses/scrape.rs b/src/servers/http/v1/responses/scrape.rs index 9690d4392..0aef70cb1 100644 --- a/src/servers/http/v1/responses/scrape.rs +++ b/src/servers/http/v1/responses/scrape.rs @@ -13,7 +13,7 @@ use crate::core::ScrapeData; /// /// ```rust /// use torrust_tracker::servers::http::v1::responses::scrape::Bencoded; -/// use torrust_tracker_primitives::info_hash::InfoHash; +/// use bittorrent_primitives::info_hash::InfoHash; /// use torrust_tracker_primitives::swarm_metadata::SwarmMetadata; /// use torrust_tracker::core::ScrapeData; /// @@ -92,7 +92,7 @@ impl IntoResponse for Bencoded { mod tests { mod scrape_response { - use torrust_tracker_primitives::info_hash::InfoHash; + use bittorrent_primitives::info_hash::InfoHash; use torrust_tracker_primitives::swarm_metadata::SwarmMetadata; use crate::core::ScrapeData; diff --git a/src/servers/http/v1/services/announce.rs b/src/servers/http/v1/services/announce.rs index 9c5dfdad2..51ec43d56 100644 --- a/src/servers/http/v1/services/announce.rs +++ b/src/servers/http/v1/services/announce.rs @@ -11,7 +11,7 @@ use std::net::IpAddr; use std::sync::Arc; -use torrust_tracker_primitives::info_hash::InfoHash; +use bittorrent_primitives::info_hash::InfoHash; use torrust_tracker_primitives::peer; use crate::core::{statistics, AnnounceData, PeersWanted, Tracker}; @@ -54,7 +54,7 @@ mod tests { use std::net::{IpAddr, Ipv4Addr, Ipv6Addr, SocketAddr}; use aquatic_udp_protocol::{AnnounceEvent, NumberOfBytes, PeerId}; - use torrust_tracker_primitives::info_hash::InfoHash; + use bittorrent_primitives::info_hash::InfoHash; use torrust_tracker_primitives::{peer, DurationSinceUnixEpoch}; use torrust_tracker_test_helpers::configuration; diff --git a/src/servers/http/v1/services/scrape.rs b/src/servers/http/v1/services/scrape.rs index 0d561c7bc..f040e0430 100644 --- a/src/servers/http/v1/services/scrape.rs +++ b/src/servers/http/v1/services/scrape.rs @@ -11,7 +11,7 @@ use std::net::IpAddr; use std::sync::Arc; -use torrust_tracker_primitives::info_hash::InfoHash; +use bittorrent_primitives::info_hash::InfoHash; use crate::core::{statistics, ScrapeData, Tracker}; @@ -62,7 +62,7 @@ mod tests { use std::net::{IpAddr, Ipv4Addr, SocketAddr}; use aquatic_udp_protocol::{AnnounceEvent, NumberOfBytes, PeerId}; - use torrust_tracker_primitives::info_hash::InfoHash; + use bittorrent_primitives::info_hash::InfoHash; use torrust_tracker_primitives::{peer, DurationSinceUnixEpoch}; use torrust_tracker_test_helpers::configuration; diff --git a/src/servers/udp/handlers.rs b/src/servers/udp/handlers.rs index 69a427e0e..6af634c32 100644 --- a/src/servers/udp/handlers.rs +++ b/src/servers/udp/handlers.rs @@ -10,8 +10,8 @@ use aquatic_udp_protocol::{ ErrorResponse, Ipv4AddrBytes, Ipv6AddrBytes, NumberOfDownloads, NumberOfPeers, Port, Request, Response, ResponsePeer, ScrapeRequest, ScrapeResponse, TorrentScrapeStatistics, TransactionId, }; +use bittorrent_primitives::info_hash::InfoHash; use torrust_tracker_located_error::DynError; -use torrust_tracker_primitives::info_hash::InfoHash; use tracing::{instrument, Level}; use uuid::Uuid; use zerocopy::network_endian::I32; diff --git a/src/servers/udp/logging.rs b/src/servers/udp/logging.rs index 3891278d7..a61668e83 100644 --- a/src/servers/udp/logging.rs +++ b/src/servers/udp/logging.rs @@ -4,7 +4,7 @@ use std::net::SocketAddr; use std::time::Duration; use aquatic_udp_protocol::{Request, Response, TransactionId}; -use torrust_tracker_primitives::info_hash::InfoHash; +use bittorrent_primitives::info_hash::InfoHash; use super::handlers::RequestId; use crate::servers::udp::UDP_TRACKER_LOG_TARGET; diff --git a/src/servers/udp/mod.rs b/src/servers/udp/mod.rs index 91b19a91d..d41bc8b3f 100644 --- a/src/servers/udp/mod.rs +++ b/src/servers/udp/mod.rs @@ -342,7 +342,7 @@ //! > packet. //! //! We are using a wrapper struct for the aquatic [`AnnounceRequest`](aquatic_udp_protocol::request::AnnounceRequest) -//! struct, because we have our internal [`InfoHash`](torrust_tracker_primitives::info_hash::InfoHash) +//! struct, because we have our internal [`InfoHash`](bittorrent_primitives::info_hash::InfoHash) //! struct. //! //! ```text diff --git a/src/shared/bit_torrent/info_hash.rs b/src/shared/bit_torrent/info_hash.rs deleted file mode 100644 index 506c37758..000000000 --- a/src/shared/bit_torrent/info_hash.rs +++ /dev/null @@ -1,288 +0,0 @@ -//! A `BitTorrent` `InfoHash`. It's a unique identifier for a `BitTorrent` torrent. -//! -//! "The 20-byte sha1 hash of the bencoded form of the info value -//! from the metainfo file." -//! -//! See [BEP 3. The `BitTorrent` Protocol Specification](https://www.bittorrent.org/beps/bep_0003.html) -//! for the official specification. -//! -//! This modules provides a type that can be used to represent infohashes. -//! -//! > **NOTICE**: It only supports Info Hash v1. -//! -//! Typically infohashes are represented as hex strings, but internally they are -//! a 20-byte array. -//! -//! # Calculating the info-hash of a torrent file -//! -//! A sample torrent: -//! -//! - Torrent file: `mandelbrot_2048x2048_infohash_v1.png.torrent` -//! - File: `mandelbrot_2048x2048.png` -//! - Info Hash v1: `5452869be36f9f3350ccee6b4544e7e76caaadab` -//! - Sha1 hash of the info dictionary: `5452869BE36F9F3350CCEE6B4544E7E76CAAADAB` -//! -//! A torrent file is a binary file encoded with [Bencode encoding](https://en.wikipedia.org/wiki/Bencode): -//! -//! ```text -//! 0000000: 6431 303a 6372 6561 7465 6420 6279 3138 d10:created by18 -//! 0000010: 3a71 4269 7474 6f72 7265 6e74 2076 342e :qBittorrent v4. -//! 0000020: 342e 3131 333a 6372 6561 7469 6f6e 2064 4.113:creation d -//! 0000030: 6174 6569 3136 3739 3637 3436 3238 6534 atei1679674628e4 -//! 0000040: 3a69 6e66 6f64 363a 6c65 6e67 7468 6931 :infod6:lengthi1 -//! 0000050: 3732 3230 3465 343a 6e61 6d65 3234 3a6d 72204e4:name24:m -//! 0000060: 616e 6465 6c62 726f 745f 3230 3438 7832 andelbrot_2048x2 -//! 0000070: 3034 382e 706e 6731 323a 7069 6563 6520 048.png12:piece -//! 0000080: 6c65 6e67 7468 6931 3633 3834 6536 3a70 lengthi16384e6:p -//! 0000090: 6965 6365 7332 3230 3a7d 9171 0d9d 4dba ieces220:}.q..M. -//! 00000a0: 889b 5420 54d5 2672 8d5a 863f e121 df77 ..T T.&r.Z.?.!.w -//! 00000b0: c7f7 bb6c 7796 2166 2538 c5d9 cdab 8b08 ...lw.!f%8...... -//! 00000c0: ef8c 249b b2f5 c4cd 2adf 0bc0 0cf0 addf ..$.....*....... -//! 00000d0: 7290 e5b6 414c 236c 479b 8e9f 46aa 0c0d r...AL#lG...F... -//! 00000e0: 8ed1 97ff ee68 8b5f 34a3 87d7 71c5 a6f9 .....h._4...q... -//! 00000f0: 8e2e a631 7cbd f0f9 e223 f9cc 80af 5400 ...1|....#....T. -//! 0000100: 04f9 8569 1c77 89c1 764e d6aa bf61 a6c2 ...i.w..vN...a.. -//! 0000110: 8099 abb6 5f60 2f40 a825 be32 a33d 9d07 ...._`/@.%.2.=.. -//! 0000120: 0c79 6898 d49d 6349 af20 5866 266f 986b .yh...cI. Xf&o.k -//! 0000130: 6d32 34cd 7d08 155e 1ad0 0009 57ab 303b m24.}..^....W.0; -//! 0000140: 2060 c1dc 1287 d6f3 e745 4f70 6709 3631 `.......EOpg.61 -//! 0000150: 55f2 20f6 6ca5 156f 2c89 9569 1653 817d U. .l..o,..i.S.} -//! 0000160: 31f1 b6bd 3742 cc11 0bb2 fc2b 49a5 85b6 1...7B.....+I... -//! 0000170: fc76 7444 9365 65 .vtD.ee -//! ``` -//! -//! You can generate that output with the command: -//! -//! ```text -//! xxd mandelbrot_2048x2048_infohash_v1.png.torrent -//! ``` -//! -//! And you can show only the bytes (hexadecimal): -//! -//! ```text -//! 6431303a6372656174656420627931383a71426974746f7272656e742076 -//! 342e342e3131333a6372656174696f6e2064617465693136373936373436 -//! 323865343a696e666f64363a6c656e6774686931373232303465343a6e61 -//! 6d6532343a6d616e64656c62726f745f3230343878323034382e706e6731 -//! 323a7069656365206c656e67746869313633383465363a70696563657332 -//! 32303a7d91710d9d4dba889b542054d526728d5a863fe121df77c7f7bb6c -//! 779621662538c5d9cdab8b08ef8c249bb2f5c4cd2adf0bc00cf0addf7290 -//! e5b6414c236c479b8e9f46aa0c0d8ed197ffee688b5f34a387d771c5a6f9 -//! 8e2ea6317cbdf0f9e223f9cc80af540004f985691c7789c1764ed6aabf61 -//! a6c28099abb65f602f40a825be32a33d9d070c796898d49d6349af205866 -//! 266f986b6d3234cd7d08155e1ad0000957ab303b2060c1dc1287d6f3e745 -//! 4f706709363155f220f66ca5156f2c8995691653817d31f1b6bd3742cc11 -//! 0bb2fc2b49a585b6fc767444936565 -//! ``` -//! -//! You can generate that output with the command: -//! -//! ```text -//! `xxd -ps mandelbrot_2048x2048_infohash_v1.png.torrent`. -//! ``` -//! -//! The same data can be represented in a JSON format: -//! -//! ```json -//! { -//! "created by": "qBittorrent v4.4.1", -//! "creation date": 1679674628, -//! "info": { -//! "length": 172204, -//! "name": "mandelbrot_2048x2048.png", -//! "piece length": 16384, -//! "pieces": "7D 91 71 0D 9D 4D BA 88 9B 54 20 54 D5 26 72 8D 5A 86 3F E1 21 DF 77 C7 F7 BB 6C 77 96 21 66 25 38 C5 D9 CD AB 8B 08 EF 8C 24 9B B2 F5 C4 CD 2A DF 0B C0 0C F0 AD DF 72 90 E5 B6 41 4C 23 6C 47 9B 8E 9F 46 AA 0C 0D 8E D1 97 FF EE 68 8B 5F 34 A3 87 D7 71 C5 A6 F9 8E 2E A6 31 7C BD F0 F9 E2 23 F9 CC 80 AF 54 00 04 F9 85 69 1C 77 89 C1 76 4E D6 AA BF 61 A6 C2 80 99 AB B6 5F 60 2F 40 A8 25 BE 32 A3 3D 9D 07 0C 79 68 98 D4 9D 63 49 AF 20 58 66 26 6F 98 6B 6D 32 34 CD 7D 08 15 5E 1A D0 00 09 57 AB 30 3B 20 60 C1 DC 12 87 D6 F3 E7 45 4F 70 67 09 36 31 55 F2 20 F6 6C A5 15 6F 2C 89 95 69 16 53 81 7D 31 F1 B6 BD 37 42 CC 11 0B B2 FC 2B 49 A5 85 B6 FC 76 74 44 93" -//! } -//! } -//! ``` -//! -//! The JSON object was generated with: -//! -//! As you can see, there is a `info` attribute: -//! -//! ```json -//! { -//! "length": 172204, -//! "name": "mandelbrot_2048x2048.png", -//! "piece length": 16384, -//! "pieces": "7D 91 71 0D 9D 4D BA 88 9B 54 20 54 D5 26 72 8D 5A 86 3F E1 21 DF 77 C7 F7 BB 6C 77 96 21 66 25 38 C5 D9 CD AB 8B 08 EF 8C 24 9B B2 F5 C4 CD 2A DF 0B C0 0C F0 AD DF 72 90 E5 B6 41 4C 23 6C 47 9B 8E 9F 46 AA 0C 0D 8E D1 97 FF EE 68 8B 5F 34 A3 87 D7 71 C5 A6 F9 8E 2E A6 31 7C BD F0 F9 E2 23 F9 CC 80 AF 54 00 04 F9 85 69 1C 77 89 C1 76 4E D6 AA BF 61 A6 C2 80 99 AB B6 5F 60 2F 40 A8 25 BE 32 A3 3D 9D 07 0C 79 68 98 D4 9D 63 49 AF 20 58 66 26 6F 98 6B 6D 32 34 CD 7D 08 15 5E 1A D0 00 09 57 AB 30 3B 20 60 C1 DC 12 87 D6 F3 E7 45 4F 70 67 09 36 31 55 F2 20 F6 6C A5 15 6F 2C 89 95 69 16 53 81 7D 31 F1 B6 BD 37 42 CC 11 0B B2 FC 2B 49 A5 85 B6 FC 76 74 44 93" -//! } -//! ``` -//! -//! The infohash is the [SHA1](https://en.wikipedia.org/wiki/SHA-1) hash -//! of the `info` attribute. That is, the SHA1 hash of: -//! -//! ```text -//! 64363a6c656e6774686931373232303465343a6e61 -//! d6532343a6d616e64656c62726f745f3230343878323034382e706e6731 -//! 23a7069656365206c656e67746869313633383465363a70696563657332 -//! 2303a7d91710d9d4dba889b542054d526728d5a863fe121df77c7f7bb6c -//! 79621662538c5d9cdab8b08ef8c249bb2f5c4cd2adf0bc00cf0addf7290 -//! 5b6414c236c479b8e9f46aa0c0d8ed197ffee688b5f34a387d771c5a6f9 -//! e2ea6317cbdf0f9e223f9cc80af540004f985691c7789c1764ed6aabf61 -//! 6c28099abb65f602f40a825be32a33d9d070c796898d49d6349af205866 -//! 66f986b6d3234cd7d08155e1ad0000957ab303b2060c1dc1287d6f3e745 -//! f706709363155f220f66ca5156f2c8995691653817d31f1b6bd3742cc11 -//! bb2fc2b49a585b6fc7674449365 -//! ``` -//! -//! You can hash that byte string with -//! -//! The result is a 20-char string: `5452869BE36F9F3350CCEE6B4544E7E76CAAADAB` - -use torrust_tracker_primitives::info_hash::InfoHash; - -pub mod fixture { - use std::hash::{DefaultHasher, Hash, Hasher}; - - use super::InfoHash; - - /// Generate as semi-stable pseudo-random infohash - /// - /// Note: If the [`DefaultHasher`] implementation changes - /// so will the resulting info-hashes. - /// - /// The results should not be relied upon between versions. - #[must_use] - pub fn gen_seeded_infohash(seed: &u64) -> InfoHash { - let mut buf_a: [[u8; 8]; 4] = Default::default(); - let mut buf_b = InfoHash::default(); - - let mut hasher = DefaultHasher::new(); - seed.hash(&mut hasher); - - for u in &mut buf_a { - seed.hash(&mut hasher); - *u = hasher.finish().to_le_bytes(); - } - - for (a, b) in buf_a.iter().flat_map(|a| a.iter()).zip(buf_b.0.iter_mut()) { - *b = *a; - } - - buf_b - } -} - -#[cfg(test)] -mod tests { - - use std::str::FromStr; - - use serde::{Deserialize, Serialize}; - use serde_json::json; - - use super::InfoHash; - - #[derive(PartialEq, Eq, Debug, Clone, Serialize, Deserialize)] - struct ContainingInfoHash { - pub info_hash: InfoHash, - } - - #[test] - fn an_info_hash_can_be_created_from_a_valid_40_utf8_char_string_representing_an_hexadecimal_value() { - let info_hash = InfoHash::from_str("FFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFF"); - assert!(info_hash.is_ok()); - } - - #[test] - fn an_info_hash_can_not_be_created_from_a_utf8_string_representing_a_not_valid_hexadecimal_value() { - let info_hash = InfoHash::from_str("GGGGGGGGGGGGGGGGGGGGGGGGGGGGGGGGGGGGGGGG"); - assert!(info_hash.is_err()); - } - - #[test] - fn an_info_hash_can_only_be_created_from_a_40_utf8_char_string() { - let info_hash = InfoHash::from_str(&"F".repeat(39)); - assert!(info_hash.is_err()); - - let info_hash = InfoHash::from_str(&"F".repeat(41)); - assert!(info_hash.is_err()); - } - - #[test] - fn an_info_hash_should_by_displayed_like_a_40_utf8_lowercased_char_hex_string() { - let info_hash = InfoHash::from_str("FFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFF").unwrap(); - - let output = format!("{info_hash}"); - - assert_eq!(output, "ffffffffffffffffffffffffffffffffffffffff"); - } - - #[test] - fn an_info_hash_should_return_its_a_40_utf8_lowercased_char_hex_representations_as_string() { - let info_hash = InfoHash::from_str("FFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFF").unwrap(); - - assert_eq!(info_hash.to_hex_string(), "ffffffffffffffffffffffffffffffffffffffff"); - } - - #[test] - fn an_info_hash_can_be_created_from_a_valid_20_byte_array_slice() { - let info_hash: InfoHash = [255u8; 20].as_slice().into(); - - assert_eq!( - info_hash, - InfoHash::from_str("FFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFF").unwrap() - ); - } - - #[test] - fn an_info_hash_can_be_created_from_a_valid_20_byte_array() { - let info_hash: InfoHash = [255u8; 20].into(); - - assert_eq!( - info_hash, - InfoHash::from_str("FFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFF").unwrap() - ); - } - - #[test] - fn an_info_hash_can_be_created_from_a_byte_vector() { - let info_hash: InfoHash = [255u8; 20].to_vec().try_into().unwrap(); - - assert_eq!( - info_hash, - InfoHash::from_str("FFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFF").unwrap() - ); - } - - #[test] - fn it_should_fail_trying_to_create_an_info_hash_from_a_byte_vector_with_less_than_20_bytes() { - assert!(InfoHash::try_from([255u8; 19].to_vec()).is_err()); - } - - #[test] - fn it_should_fail_trying_to_create_an_info_hash_from_a_byte_vector_with_more_than_20_bytes() { - assert!(InfoHash::try_from([255u8; 21].to_vec()).is_err()); - } - - #[test] - fn an_info_hash_can_be_serialized() { - let s = ContainingInfoHash { - info_hash: InfoHash::from_str("FFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFF").unwrap(), - }; - - let json_serialized_value = serde_json::to_string(&s).unwrap(); - - assert_eq!( - json_serialized_value, - r#"{"info_hash":"ffffffffffffffffffffffffffffffffffffffff"}"# - ); - } - - #[test] - fn an_info_hash_can_be_deserialized() { - let json = json!({ - "info_hash": "ffffffffffffffffffffffffffffffffffffffff", - }); - - let s: ContainingInfoHash = serde_json::from_value(json).unwrap(); - - assert_eq!( - s, - ContainingInfoHash { - info_hash: InfoHash::from_str("FFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFF").unwrap() - } - ); - } -} diff --git a/src/shared/bit_torrent/mod.rs b/src/shared/bit_torrent/mod.rs index 8074661be..7d6b12f09 100644 --- a/src/shared/bit_torrent/mod.rs +++ b/src/shared/bit_torrent/mod.rs @@ -68,5 +68,4 @@ //! Percent Encoding spec | //!Bencode & bdecode in your browser | pub mod common; -pub mod info_hash; pub mod tracker; diff --git a/src/shared/bit_torrent/tracker/http/client/requests/announce.rs b/src/shared/bit_torrent/tracker/http/client/requests/announce.rs index 3c6b14222..f3ce327ea 100644 --- a/src/shared/bit_torrent/tracker/http/client/requests/announce.rs +++ b/src/shared/bit_torrent/tracker/http/client/requests/announce.rs @@ -3,8 +3,8 @@ use std::net::{IpAddr, Ipv4Addr}; use std::str::FromStr; use aquatic_udp_protocol::PeerId; +use bittorrent_primitives::info_hash::InfoHash; use serde_repr::Serialize_repr; -use torrust_tracker_primitives::info_hash::InfoHash; use crate::shared::bit_torrent::tracker::http::{percent_encode_byte_array, ByteArray20}; diff --git a/src/shared/bit_torrent/tracker/http/client/requests/scrape.rs b/src/shared/bit_torrent/tracker/http/client/requests/scrape.rs index 4d12fc2d2..58b9e0dc7 100644 --- a/src/shared/bit_torrent/tracker/http/client/requests/scrape.rs +++ b/src/shared/bit_torrent/tracker/http/client/requests/scrape.rs @@ -2,7 +2,7 @@ use std::error::Error; use std::fmt::{self}; use std::str::FromStr; -use torrust_tracker_primitives::info_hash::InfoHash; +use bittorrent_primitives::info_hash::InfoHash; use crate::shared::bit_torrent::tracker::http::{percent_encode_byte_array, ByteArray20}; diff --git a/tests/servers/api/environment.rs b/tests/servers/api/environment.rs index 2f4606be7..bffe42603 100644 --- a/tests/servers/api/environment.rs +++ b/tests/servers/api/environment.rs @@ -1,6 +1,7 @@ use std::net::SocketAddr; use std::sync::Arc; +use bittorrent_primitives::info_hash::InfoHash; use futures::executor::block_on; use torrust_tracker::bootstrap::app::initialize_with_configuration; use torrust_tracker::bootstrap::jobs::make_rust_tls; @@ -8,7 +9,6 @@ use torrust_tracker::core::Tracker; use torrust_tracker::servers::apis::server::{ApiServer, Launcher, Running, Stopped}; use torrust_tracker::servers::registar::Registar; use torrust_tracker_configuration::{Configuration, HttpApi}; -use torrust_tracker_primitives::info_hash::InfoHash; use torrust_tracker_primitives::peer; use super::connection_info::ConnectionInfo; diff --git a/tests/servers/api/v1/contract/context/stats.rs b/tests/servers/api/v1/contract/context/stats.rs index a034a7778..2c8e8d6a5 100644 --- a/tests/servers/api/v1/contract/context/stats.rs +++ b/tests/servers/api/v1/contract/context/stats.rs @@ -1,7 +1,7 @@ use std::str::FromStr; +use bittorrent_primitives::info_hash::InfoHash; use torrust_tracker::servers::apis::v1::context::stats::resources::Stats; -use torrust_tracker_primitives::info_hash::InfoHash; use torrust_tracker_primitives::peer::fixture::PeerBuilder; use torrust_tracker_test_helpers::configuration; use tracing::level_filters::LevelFilter; diff --git a/tests/servers/api/v1/contract/context/torrent.rs b/tests/servers/api/v1/contract/context/torrent.rs index f5e930be3..e500ac63c 100644 --- a/tests/servers/api/v1/contract/context/torrent.rs +++ b/tests/servers/api/v1/contract/context/torrent.rs @@ -1,8 +1,8 @@ use std::str::FromStr; +use bittorrent_primitives::info_hash::InfoHash; use torrust_tracker::servers::apis::v1::context::torrent::resources::peer::Peer; use torrust_tracker::servers::apis::v1::context::torrent::resources::torrent::{self, Torrent}; -use torrust_tracker_primitives::info_hash::InfoHash; use torrust_tracker_primitives::peer::fixture::PeerBuilder; use torrust_tracker_test_helpers::configuration; use tracing::level_filters::LevelFilter; diff --git a/tests/servers/api/v1/contract/context/whitelist.rs b/tests/servers/api/v1/contract/context/whitelist.rs index b30a7dbf8..49ce3e865 100644 --- a/tests/servers/api/v1/contract/context/whitelist.rs +++ b/tests/servers/api/v1/contract/context/whitelist.rs @@ -1,6 +1,6 @@ use std::str::FromStr; -use torrust_tracker_primitives::info_hash::InfoHash; +use bittorrent_primitives::info_hash::InfoHash; use torrust_tracker_test_helpers::configuration; use tracing::level_filters::LevelFilter; diff --git a/tests/servers/http/environment.rs b/tests/servers/http/environment.rs index b6bb21c16..20b126c18 100644 --- a/tests/servers/http/environment.rs +++ b/tests/servers/http/environment.rs @@ -1,5 +1,6 @@ use std::sync::Arc; +use bittorrent_primitives::info_hash::InfoHash; use futures::executor::block_on; use torrust_tracker::bootstrap::app::initialize_with_configuration; use torrust_tracker::bootstrap::jobs::make_rust_tls; @@ -7,7 +8,6 @@ use torrust_tracker::core::Tracker; use torrust_tracker::servers::http::server::{HttpServer, Launcher, Running, Stopped}; use torrust_tracker::servers::registar::Registar; use torrust_tracker_configuration::{Configuration, HttpTracker}; -use torrust_tracker_primitives::info_hash::InfoHash; use torrust_tracker_primitives::peer; pub struct Environment { diff --git a/tests/servers/http/requests/announce.rs b/tests/servers/http/requests/announce.rs index fa20553d0..740c86d38 100644 --- a/tests/servers/http/requests/announce.rs +++ b/tests/servers/http/requests/announce.rs @@ -3,8 +3,8 @@ use std::net::{IpAddr, Ipv4Addr}; use std::str::FromStr; use aquatic_udp_protocol::PeerId; +use bittorrent_primitives::info_hash::InfoHash; use serde_repr::Serialize_repr; -use torrust_tracker_primitives::info_hash::InfoHash; use crate::servers::http::{percent_encode_byte_array, ByteArray20}; diff --git a/tests/servers/http/requests/scrape.rs b/tests/servers/http/requests/scrape.rs index f66605855..ecef541f1 100644 --- a/tests/servers/http/requests/scrape.rs +++ b/tests/servers/http/requests/scrape.rs @@ -1,7 +1,7 @@ use std::fmt; use std::str::FromStr; -use torrust_tracker_primitives::info_hash::InfoHash; +use bittorrent_primitives::info_hash::InfoHash; use crate::servers::http::{percent_encode_byte_array, ByteArray20}; diff --git a/tests/servers/http/v1/contract.rs b/tests/servers/http/v1/contract.rs index 405a35dc5..554849aee 100644 --- a/tests/servers/http/v1/contract.rs +++ b/tests/servers/http/v1/contract.rs @@ -103,10 +103,10 @@ mod for_all_config_modes { use std::str::FromStr; use aquatic_udp_protocol::PeerId; + use bittorrent_primitives::info_hash::InfoHash; use local_ip_address::local_ip; use reqwest::{Response, StatusCode}; use tokio::net::TcpListener; - use torrust_tracker_primitives::info_hash::InfoHash; use torrust_tracker_primitives::peer::fixture::PeerBuilder; use torrust_tracker_test_helpers::configuration; use tracing::level_filters::LevelFilter; @@ -1042,8 +1042,8 @@ mod for_all_config_modes { use std::str::FromStr; use aquatic_udp_protocol::PeerId; + use bittorrent_primitives::info_hash::InfoHash; use tokio::net::TcpListener; - use torrust_tracker_primitives::info_hash::InfoHash; use torrust_tracker_primitives::peer::fixture::PeerBuilder; use torrust_tracker_test_helpers::configuration; use tracing::level_filters::LevelFilter; @@ -1300,7 +1300,7 @@ mod configured_as_whitelisted { mod and_receiving_an_announce_request { use std::str::FromStr; - use torrust_tracker_primitives::info_hash::InfoHash; + use bittorrent_primitives::info_hash::InfoHash; use torrust_tracker_test_helpers::configuration; use tracing::level_filters::LevelFilter; @@ -1358,7 +1358,7 @@ mod configured_as_whitelisted { use std::str::FromStr; use aquatic_udp_protocol::PeerId; - use torrust_tracker_primitives::info_hash::InfoHash; + use bittorrent_primitives::info_hash::InfoHash; use torrust_tracker_primitives::peer::fixture::PeerBuilder; use torrust_tracker_test_helpers::configuration; use tracing::level_filters::LevelFilter; @@ -1457,8 +1457,8 @@ mod configured_as_private { use std::str::FromStr; use std::time::Duration; + use bittorrent_primitives::info_hash::InfoHash; use torrust_tracker::core::auth::Key; - use torrust_tracker_primitives::info_hash::InfoHash; use torrust_tracker_test_helpers::configuration; use tracing::level_filters::LevelFilter; @@ -1552,8 +1552,8 @@ mod configured_as_private { use std::time::Duration; use aquatic_udp_protocol::PeerId; + use bittorrent_primitives::info_hash::InfoHash; use torrust_tracker::core::auth::Key; - use torrust_tracker_primitives::info_hash::InfoHash; use torrust_tracker_primitives::peer::fixture::PeerBuilder; use torrust_tracker_test_helpers::configuration; use tracing::level_filters::LevelFilter; diff --git a/tests/servers/udp/environment.rs b/tests/servers/udp/environment.rs index b7ac2336c..83dc076ce 100644 --- a/tests/servers/udp/environment.rs +++ b/tests/servers/udp/environment.rs @@ -1,6 +1,7 @@ use std::net::SocketAddr; use std::sync::Arc; +use bittorrent_primitives::info_hash::InfoHash; use torrust_tracker::bootstrap::app::initialize_with_configuration; use torrust_tracker::core::Tracker; use torrust_tracker::servers::registar::Registar; @@ -8,7 +9,6 @@ use torrust_tracker::servers::udp::server::spawner::Spawner; use torrust_tracker::servers::udp::server::states::{Running, Stopped}; use torrust_tracker::servers::udp::server::Server; use torrust_tracker_configuration::{Configuration, UdpTracker, DEFAULT_TIMEOUT}; -use torrust_tracker_primitives::info_hash::InfoHash; use torrust_tracker_primitives::peer; pub struct Environment From d5af5d3081f61b40edae0a3102f9ce1c97ea47eb Mon Sep 17 00:00:00 2001 From: Jose Celano Date: Fri, 1 Nov 2024 08:53:55 +0000 Subject: [PATCH 012/802] chore(deps): uddate deps ``` cargo update Updating crates.io index Locking 96 packages to latest compatible versions Updating addr2line v0.24.1 -> v0.24.2 Updating anstream v0.6.15 -> v0.6.17 Updating anstyle v1.0.8 -> v1.0.9 Updating anstyle-parse v0.2.5 -> v0.2.6 Updating anstyle-query v1.1.1 -> v1.1.2 Updating anstyle-wincon v3.0.4 -> v3.0.6 Updating anyhow v1.0.89 -> v1.0.92 Updating async-compression v0.4.12 -> v0.4.17 Updating async-trait v0.1.82 -> v0.1.83 Updating autocfg v1.3.0 -> v1.4.0 Updating aws-lc-rs v1.9.0 -> v1.10.0 Updating aws-lc-sys v0.21.2 -> v0.22.0 Updating axum v0.7.6 -> v0.7.7 Updating axum-client-ip v0.6.0 -> v0.6.1 Updating axum-core v0.4.4 -> v0.4.5 Updating bigdecimal v0.4.5 -> v0.4.6 Updating bindgen v0.69.4 -> v0.69.5 Updating brotli v6.0.0 -> v7.0.0 Updating bytemuck v1.18.0 -> v1.19.0 Updating bytes v1.7.2 -> v1.8.0 Updating cc v1.1.21 -> v1.1.31 Updating clap v4.5.18 -> v4.5.20 Updating clap_builder v4.5.18 -> v4.5.20 Updating colorchoice v1.0.2 -> v1.0.3 Updating encoding_rs v0.8.34 -> v0.8.35 Updating flate2 v1.0.33 -> v1.0.34 Adding foldhash v0.1.3 Updating futures v0.3.30 -> v0.3.31 Updating futures-channel v0.3.30 -> v0.3.31 Updating futures-core v0.3.30 -> v0.3.31 Updating futures-executor v0.3.30 -> v0.3.31 Updating futures-io v0.3.30 -> v0.3.31 Updating futures-lite v2.3.0 -> v2.4.0 Updating futures-macro v0.3.30 -> v0.3.31 Updating futures-sink v0.3.30 -> v0.3.31 Updating futures-task v0.3.30 -> v0.3.31 Updating futures-util v0.3.30 -> v0.3.31 Updating gimli v0.31.0 -> v0.31.1 Adding hashbrown v0.15.0 Updating httparse v1.9.4 -> v1.9.5 Updating hyper v1.4.1 -> v1.5.0 Updating hyper-util v0.1.8 -> v0.1.10 Updating indexmap v2.5.0 -> v2.6.0 Updating ipnet v2.10.0 -> v2.10.1 Updating js-sys v0.3.70 -> v0.3.72 Updating libc v0.2.158 -> v0.2.161 Updating libm v0.2.8 -> v0.2.11 Updating lru v0.12.4 -> v0.12.5 Updating object v0.36.4 -> v0.36.5 Updating once_cell v1.19.0 -> v1.20.2 Updating openssl v0.10.66 -> v0.10.68 Updating openssl-sys v0.9.103 -> v0.9.104 Updating pin-project v1.1.5 -> v1.1.7 Updating pin-project-internal v1.1.5 -> v1.1.7 Updating pin-project-lite v0.2.14 -> v0.2.15 Adding portable-atomic v1.9.0 Updating prettyplease v0.2.22 -> v0.2.25 Updating proc-macro2 v1.0.86 -> v1.0.89 Updating redox_syscall v0.5.4 -> v0.5.7 Updating regex v1.10.6 -> v1.11.1 Updating regex-automata v0.4.7 -> v0.4.8 Updating regex-syntax v0.8.4 -> v0.8.5 Updating reqwest v0.12.7 -> v0.12.9 Updating ringbuf v0.4.4 -> v0.4.7 Updating rstest v0.22.0 -> v0.23.0 Updating rstest_macros v0.22.0 -> v0.23.0 Updating rustix v0.38.37 -> v0.38.38 Updating rustls v0.23.13 -> v0.23.16 Updating rustls-pemfile v2.1.3 -> v2.2.0 Updating rustls-pki-types v1.8.0 -> v1.10.0 Updating rustversion v1.0.17 -> v1.0.18 Updating schannel v0.1.24 -> v0.1.26 Updating serde v1.0.210 -> v1.0.214 Updating serde_derive v1.0.210 -> v1.0.214 Updating serde_spanned v0.6.7 -> v0.6.8 Updating serde_with v3.9.0 -> v3.11.0 Updating serde_with_macros v3.9.0 -> v3.11.0 Updating syn v2.0.77 -> v2.0.86 Updating tempfile v3.12.0 -> v3.13.0 Updating thiserror v1.0.65 -> v1.0.66 Updating thiserror-impl v1.0.65 -> v1.0.66 Updating tokio v1.40.0 -> v1.41.0 Updating toml_edit v0.22.21 -> v0.22.22 Updating unicode-bidi v0.3.15 -> v0.3.17 Updating uuid v1.10.0 -> v1.11.0 Updating value-bag v1.9.0 -> v1.10.0 Updating wasm-bindgen v0.2.93 -> v0.2.95 Updating wasm-bindgen-backend v0.2.93 -> v0.2.95 Updating wasm-bindgen-futures v0.4.43 -> v0.4.45 Updating wasm-bindgen-macro v0.2.93 -> v0.2.95 Updating wasm-bindgen-macro-support v0.2.93 -> v0.2.95 Updating wasm-bindgen-shared v0.2.93 -> v0.2.95 Updating web-sys v0.3.70 -> v0.3.72 Updating winnow v0.6.18 -> v0.6.20 Adding zerocopy v0.8.8 Adding zerocopy-derive v0.8.8 ``` --- Cargo.lock | 512 +++++++++++++++++++++++++++++------------------------ 1 file changed, 276 insertions(+), 236 deletions(-) diff --git a/Cargo.lock b/Cargo.lock index 9dade94be..9788725db 100644 --- a/Cargo.lock +++ b/Cargo.lock @@ -4,9 +4,9 @@ version = 3 [[package]] name = "addr2line" -version = "0.24.1" +version = "0.24.2" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "f5fb1d8e4442bd405fdfd1dacb42792696b0cf9cb15882e5d097b742a676d375" +checksum = "dfbe277e56a376000877090da837660b4427aad530e3028d44e0bffe4f89a1c1" dependencies = [ "gimli", ] @@ -37,7 +37,7 @@ dependencies = [ "cfg-if", "once_cell", "version_check", - "zerocopy", + "zerocopy 0.7.35", ] [[package]] @@ -93,9 +93,9 @@ checksum = "4b46cbb362ab8752921c97e041f5e366ee6297bd428a31275b9fcf1e380f7299" [[package]] name = "anstream" -version = "0.6.15" +version = "0.6.17" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "64e15c1ab1f89faffbf04a634d5e1962e9074f2741eef6d97f3c4e322426d526" +checksum = "23a1e53f0f5d86382dafe1cf314783b2044280f406e7e1506368220ad11b1338" dependencies = [ "anstyle", "anstyle-parse", @@ -108,43 +108,43 @@ dependencies = [ [[package]] name = "anstyle" -version = "1.0.8" +version = "1.0.9" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "1bec1de6f59aedf83baf9ff929c98f2ad654b97c9510f4e70cf6f661d49fd5b1" +checksum = "8365de52b16c035ff4fcafe0092ba9390540e3e352870ac09933bebcaa2c8c56" [[package]] name = "anstyle-parse" -version = "0.2.5" +version = "0.2.6" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "eb47de1e80c2b463c735db5b217a0ddc39d612e7ac9e2e96a5aed1f57616c1cb" +checksum = "3b2d16507662817a6a20a9ea92df6652ee4f94f914589377d69f3b21bc5798a9" dependencies = [ "utf8parse", ] [[package]] name = "anstyle-query" -version = "1.1.1" +version = "1.1.2" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "6d36fc52c7f6c869915e99412912f22093507da8d9e942ceaf66fe4b7c14422a" +checksum = "79947af37f4177cfead1110013d678905c37501914fba0efea834c3fe9a8d60c" dependencies = [ - "windows-sys 0.52.0", + "windows-sys 0.59.0", ] [[package]] name = "anstyle-wincon" -version = "3.0.4" +version = "3.0.6" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "5bf74e1b6e971609db8ca7a9ce79fd5768ab6ae46441c572e46cf596f59e57f8" +checksum = "2109dbce0e72be3ec00bed26e6a7479ca384ad226efdd66db8fa2e3a38c83125" dependencies = [ "anstyle", - "windows-sys 0.52.0", + "windows-sys 0.59.0", ] [[package]] name = "anyhow" -version = "1.0.89" +version = "1.0.92" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "86fdf8605db99b54d3cd748a44c6d04df638eb5dafb219b135d0149bd0db01f6" +checksum = "74f37166d7d48a0284b99dd824694c26119c700b53bf0d1540cdb147dbdaaf13" [[package]] name = "aquatic_peer_id" @@ -157,7 +157,7 @@ dependencies = [ "quickcheck", "regex", "serde", - "zerocopy", + "zerocopy 0.7.35", ] [[package]] @@ -169,7 +169,7 @@ dependencies = [ "aquatic_peer_id", "byteorder", "either", - "zerocopy", + "zerocopy 0.7.35", ] [[package]] @@ -219,9 +219,9 @@ dependencies = [ [[package]] name = "async-compression" -version = "0.4.12" +version = "0.4.17" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "fec134f64e2bc57411226dfc4e52dec859ddfc7e711fc5e07b612584f000e4aa" +checksum = "0cb8f1d480b0ea3783ab015936d2a55c87e219676f0c0b7dec61494043f21857" dependencies = [ "brotli", "flate2", @@ -327,13 +327,13 @@ checksum = "8b75356056920673b02621b35afd0f7dda9306d03c79a30f5c56c44cf256e3de" [[package]] name = "async-trait" -version = "0.1.82" +version = "0.1.83" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "a27b8a3a6e1a44fa4c8baf1f653e4172e81486d4941f2237e20dc2d0cf4ddff1" +checksum = "721cae7de5c34fbb2acd27e21e6d2cf7b886dce0c27388d46c4e6c47ea4318dd" dependencies = [ "proc-macro2", "quote", - "syn 2.0.77", + "syn 2.0.86", ] [[package]] @@ -353,15 +353,15 @@ checksum = "1505bd5d3d116872e7271a6d4e16d81d0c8570876c8de68093a09ac269d8aac0" [[package]] name = "autocfg" -version = "1.3.0" +version = "1.4.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "0c4b4d0bd25bd0b74681c0ad21497610ce1b7c91b1022cd21c80c6fbdd9476b0" +checksum = "ace50bade8e6234aa140d9a2f552bbee1db4d353f69b8217bc503490fc1a9f26" [[package]] name = "aws-lc-rs" -version = "1.9.0" +version = "1.10.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "2f95446d919226d587817a7d21379e6eb099b97b45110a7f272a444ca5c54070" +checksum = "cdd82dba44d209fddb11c190e0a94b78651f95299598e472215667417a03ff1d" dependencies = [ "aws-lc-sys", "mirai-annotations", @@ -371,11 +371,11 @@ dependencies = [ [[package]] name = "aws-lc-sys" -version = "0.21.2" +version = "0.22.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "b3ddc4a5b231dd6958b140ff3151b6412b3f4321fab354f399eec8f14b06df62" +checksum = "df7a4168111d7eb622a31b214057b8509c0a7e1794f44c546d742330dc793972" dependencies = [ - "bindgen 0.69.4", + "bindgen 0.69.5", "cc", "cmake", "dunce", @@ -386,9 +386,9 @@ dependencies = [ [[package]] name = "axum" -version = "0.7.6" +version = "0.7.7" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "8f43644eed690f5374f1af436ecd6aea01cd201f6fbdf0178adaf6907afb2cec" +checksum = "504e3947307ac8326a5437504c517c4b56716c9d98fac0028c2acc7ca47d70ae" dependencies = [ "async-trait", "axum-core", @@ -421,9 +421,9 @@ dependencies = [ [[package]] name = "axum-client-ip" -version = "0.6.0" +version = "0.6.1" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "72188bed20deb981f3a4a9fe674e5980fd9e9c2bd880baa94715ad5d60d64c67" +checksum = "9eefda7e2b27e1bda4d6fa8a06b50803b8793769045918bc37ad062d48a6efac" dependencies = [ "axum", "forwarded-header-value", @@ -432,9 +432,9 @@ dependencies = [ [[package]] name = "axum-core" -version = "0.4.4" +version = "0.4.5" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "5e6b8ba012a258d63c9adfa28b9ddcf66149da6f986c5b5452e629d5ee64bf00" +checksum = "09f2bd6146b97ae3359fa0cc6d6b376d9539582c7b4220f041a33ec24c226199" dependencies = [ "async-trait", "bytes", @@ -482,7 +482,7 @@ checksum = "57d123550fa8d071b7255cb0cc04dc302baa6c8c4a79f55701552684d8399bce" dependencies = [ "proc-macro2", "quote", - "syn 2.0.77", + "syn 2.0.86", ] [[package]] @@ -538,9 +538,9 @@ checksum = "72b3254f16251a8381aa12e40e3c4d2f0199f8c6508fbecb9d91f575e0fbb8c6" [[package]] name = "bigdecimal" -version = "0.4.5" +version = "0.4.6" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "51d712318a27c7150326677b321a5fa91b55f6d9034ffd67f20319e147d40cee" +checksum = "8f850665a0385e070b64c38d2354e6c104c8479c59868d1e48a0c13ee2c7a1c1" dependencies = [ "autocfg", "libm", @@ -557,9 +557,9 @@ checksum = "383d29d513d8764dcdc42ea295d979eb99c3c9f00607b3692cf68a431f7dca72" [[package]] name = "bindgen" -version = "0.69.4" +version = "0.69.5" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "a00dc851838a2120612785d195287475a3ac45514741da670b735818822129a0" +checksum = "271383c67ccabffb7381723dea0672a673f292304fcb45c01cc648c7a8d58088" dependencies = [ "bitflags", "cexpr", @@ -574,7 +574,7 @@ dependencies = [ "regex", "rustc-hash", "shlex", - "syn 2.0.77", + "syn 2.0.86", "which", ] @@ -593,7 +593,7 @@ dependencies = [ "regex", "rustc-hash", "shlex", - "syn 2.0.77", + "syn 2.0.86", ] [[package]] @@ -613,7 +613,7 @@ dependencies = [ "serde", "serde_json", "thiserror", - "zerocopy", + "zerocopy 0.7.35", ] [[package]] @@ -670,15 +670,15 @@ dependencies = [ "proc-macro-crate", "proc-macro2", "quote", - "syn 2.0.77", + "syn 2.0.86", "syn_derive", ] [[package]] name = "brotli" -version = "6.0.0" +version = "7.0.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "74f7971dbd9326d58187408ab83117d8ac1bb9c17b085fdacd1cf2f598719b6b" +checksum = "cc97b8f16f944bba54f0433f07e30be199b6dc2bd25937444bbad560bcea29bd" dependencies = [ "alloc-no-stdlib", "alloc-stdlib", @@ -740,9 +740,9 @@ dependencies = [ [[package]] name = "bytemuck" -version = "1.18.0" +version = "1.19.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "94bbb0ad554ad961ddc5da507a12a29b14e4ae5bda06b19f575a3e6079d2e2ae" +checksum = "8334215b81e418a0a7bdb8ef0849474f40bb10c8b71f1c4ed315cff49f32494d" [[package]] name = "byteorder" @@ -752,9 +752,9 @@ checksum = "1fd0f2584146f6f2ef48085050886acf353beff7305ebd1ae69500e27c67f64b" [[package]] name = "bytes" -version = "1.7.2" +version = "1.8.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "428d9aa8fbc0670b7b8d6030a7fadd0f86151cae55e4dbbece15f3780a3dfaf3" +checksum = "9ac0150caa2ae65ca5bd83f25c7de183dea78d4d366469f148435e2acfbad0da" [[package]] name = "camino" @@ -782,9 +782,9 @@ dependencies = [ [[package]] name = "cc" -version = "1.1.21" +version = "1.1.31" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "07b1695e2c7e8fc85310cde85aeaab7e3097f593c91d209d3f9df76c928100f0" +checksum = "c2e7962b54006dcfcc61cb72735f4d89bb97061dd6a7ed882ec6b8ee53714c6f" dependencies = [ "jobserver", "libc", @@ -865,9 +865,9 @@ dependencies = [ [[package]] name = "clap" -version = "4.5.18" +version = "4.5.20" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "b0956a43b323ac1afaffc053ed5c4b7c1f1800bacd1683c353aabbb752515dd3" +checksum = "b97f376d85a664d5837dbae44bf546e6477a679ff6610010f17276f686d867e8" dependencies = [ "clap_builder", "clap_derive", @@ -875,9 +875,9 @@ dependencies = [ [[package]] name = "clap_builder" -version = "4.5.18" +version = "4.5.20" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "4d72166dd41634086d5803a47eb71ae740e61d84709c36f3c34110173db3961b" +checksum = "19bc80abd44e4bed93ca373a0704ccbd1b710dc5749406201bb018272808dc54" dependencies = [ "anstream", "anstyle", @@ -894,7 +894,7 @@ dependencies = [ "heck 0.5.0", "proc-macro2", "quote", - "syn 2.0.77", + "syn 2.0.86", ] [[package]] @@ -914,9 +914,9 @@ dependencies = [ [[package]] name = "colorchoice" -version = "1.0.2" +version = "1.0.3" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "d3fd119d74b830634cea2a0f58bbd0d54540518a14397557951e79340abc28c0" +checksum = "5b63caa9aa9397e2d9480a9b13673856c78d8ac123288526c37d7839f2a86990" [[package]] name = "compact_str" @@ -1115,7 +1115,7 @@ dependencies = [ "proc-macro2", "quote", "strsim", - "syn 2.0.77", + "syn 2.0.86", ] [[package]] @@ -1126,7 +1126,7 @@ checksum = "d336a2a514f6ccccaa3e09b02d41d35330c07ddf03a62165fcec10bb561c7806" dependencies = [ "darling_core", "quote", - "syn 2.0.77", + "syn 2.0.86", ] [[package]] @@ -1170,7 +1170,7 @@ checksum = "cb7330aeadfbe296029522e6c40f315320aba36fc43a5b3632f3795348f3bd22" dependencies = [ "proc-macro2", "quote", - "syn 2.0.77", + "syn 2.0.86", "unicode-xid", ] @@ -1182,7 +1182,7 @@ checksum = "65f152f4b8559c4da5d574bafc7af85454d706b4c5fe8b530d508cacbb6807ea" dependencies = [ "proc-macro2", "quote", - "syn 2.0.77", + "syn 2.0.86", ] [[package]] @@ -1215,9 +1215,9 @@ checksum = "60b1af1c220855b6ceac025d3f6ecdd2b7c4894bfe9cd9bda4fbb4bc7c0d4cf0" [[package]] name = "encoding_rs" -version = "0.8.34" +version = "0.8.35" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "b45de904aa0b010bce2ab45264d0631681847fa7b6f2eaa7dab7619943bc4f59" +checksum = "75030f3c4f45dafd7586dd6780965a8c7e8e285a5ecb86713e63a79c5b2766f3" dependencies = [ "cfg-if", ] @@ -1311,9 +1311,9 @@ dependencies = [ [[package]] name = "flate2" -version = "1.0.33" +version = "1.0.34" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "324a1be68054ef05ad64b861cc9eaf1d623d2d8cb25b4bf2cb9cdd902b4bf253" +checksum = "a1b589b4dc103969ad3cf85c950899926ec64300a1a46d76c03a6072957036f0" dependencies = [ "crc32fast", "libz-sys", @@ -1326,6 +1326,12 @@ version = "1.0.7" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "3f9eec918d3f24069decb9af1554cad7c880e2da24a9afd88aca000531ab82c1" +[[package]] +name = "foldhash" +version = "0.1.3" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "f81ec6369c545a7d40e4589b5597581fa1c441fe1cce96dd1de43159910a36a2" + [[package]] name = "foreign-types" version = "0.3.2" @@ -1395,7 +1401,7 @@ checksum = "e99b8b3c28ae0e84b604c75f721c21dc77afb3706076af5e8216d15fd1deaae3" dependencies = [ "frunk_proc_macro_helpers", "quote", - "syn 2.0.77", + "syn 2.0.86", ] [[package]] @@ -1407,7 +1413,7 @@ dependencies = [ "frunk_core", "proc-macro2", "quote", - "syn 2.0.77", + "syn 2.0.86", ] [[package]] @@ -1419,7 +1425,7 @@ dependencies = [ "frunk_core", "frunk_proc_macro_helpers", "quote", - "syn 2.0.77", + "syn 2.0.86", ] [[package]] @@ -1436,9 +1442,9 @@ checksum = "e6d5a32815ae3f33302d95fdcb2ce17862f8c65363dcfd29360480ba1001fc9c" [[package]] name = "futures" -version = "0.3.30" +version = "0.3.31" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "645c6916888f6cb6350d2550b80fb63e734897a8498abe35cfb732b6487804b0" +checksum = "65bc07b1a8bc7c85c5f2e110c476c7389b4554ba72af57d8445ea63a576b0876" dependencies = [ "futures-channel", "futures-core", @@ -1451,9 +1457,9 @@ dependencies = [ [[package]] name = "futures-channel" -version = "0.3.30" +version = "0.3.31" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "eac8f7d7865dcb88bd4373ab671c8cf4508703796caa2b1985a9ca867b3fcb78" +checksum = "2dff15bf788c671c1934e366d07e30c1814a8ef514e1af724a602e8a2fbe1b10" dependencies = [ "futures-core", "futures-sink", @@ -1461,15 +1467,15 @@ dependencies = [ [[package]] name = "futures-core" -version = "0.3.30" +version = "0.3.31" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "dfc6580bb841c5a68e9ef15c77ccc837b40a7504914d52e47b8b0e9bbda25a1d" +checksum = "05f29059c0c2090612e8d742178b0580d2dc940c837851ad723096f87af6663e" [[package]] name = "futures-executor" -version = "0.3.30" +version = "0.3.31" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "a576fc72ae164fca6b9db127eaa9a9dda0d61316034f33a0a0d4eda41f02b01d" +checksum = "1e28d1d997f585e54aebc3f97d39e72338912123a67330d723fdbb564d646c9f" dependencies = [ "futures-core", "futures-task", @@ -1478,15 +1484,15 @@ dependencies = [ [[package]] name = "futures-io" -version = "0.3.30" +version = "0.3.31" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "a44623e20b9681a318efdd71c299b6b222ed6f231972bfe2f224ebad6311f0c1" +checksum = "9e5c1b78ca4aae1ac06c48a526a655760685149f0d465d21f37abfe57ce075c6" [[package]] name = "futures-lite" -version = "2.3.0" +version = "2.4.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "52527eb5074e35e9339c6b4e8d12600c7128b68fb25dcb9fa9dec18f7c25f3a5" +checksum = "3f1fa2f9765705486b33fd2acf1577f8ec449c2ba1f318ae5447697b7c08d210" dependencies = [ "fastrand", "futures-core", @@ -1497,26 +1503,26 @@ dependencies = [ [[package]] name = "futures-macro" -version = "0.3.30" +version = "0.3.31" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "87750cf4b7a4c0625b1529e4c543c2182106e4dedc60a2a6455e00d212c489ac" +checksum = "162ee34ebcb7c64a8abebc059ce0fee27c2262618d7b60ed8faf72fef13c3650" dependencies = [ "proc-macro2", "quote", - "syn 2.0.77", + "syn 2.0.86", ] [[package]] name = "futures-sink" -version = "0.3.30" +version = "0.3.31" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "9fb8e00e87438d937621c1c6269e53f536c14d3fbd6a042bb24879e57d474fb5" +checksum = "e575fab7d1e0dcb8d0c7bcf9a63ee213816ab51902e6d244a95819acacf1d4f7" [[package]] name = "futures-task" -version = "0.3.30" +version = "0.3.31" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "38d84fa142264698cdce1a9f9172cf383a0c82de1bddcf3092901442c4097004" +checksum = "f90f7dce0722e95104fcb095585910c0977252f286e354b5e3bd38902cd99988" [[package]] name = "futures-timer" @@ -1526,9 +1532,9 @@ checksum = "f288b0a4f20f9a56b5d1da57e2227c661b7b16168e2f72365f57b63326e29b24" [[package]] name = "futures-util" -version = "0.3.30" +version = "0.3.31" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "3d6401deb83407ab3da39eba7e33987a73c3df0c82b4bb5813ee871c19c41d48" +checksum = "9fa08315bb612088cc391249efdc3bc77536f16c91f6cf495e6fbe85b20a4a81" dependencies = [ "futures-channel", "futures-core", @@ -1565,9 +1571,9 @@ dependencies = [ [[package]] name = "gimli" -version = "0.31.0" +version = "0.31.1" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "32085ea23f3234fc7846555e85283ba4de91e21016dc0455a16286d87a292d64" +checksum = "07e28edb80900c19c28f1072f2e8aeca7fa06b23cd4169cefe1af5aa3260783f" [[package]] name = "glob" @@ -1599,7 +1605,7 @@ dependencies = [ "futures-core", "futures-sink", "http", - "indexmap 2.5.0", + "indexmap 2.6.0", "slab", "tokio", "tokio-util", @@ -1632,7 +1638,17 @@ source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "e5274423e17b7c9fc20b6e7e208532f9b19825d82dfd615708b70edd83df41f1" dependencies = [ "ahash 0.8.11", +] + +[[package]] +name = "hashbrown" +version = "0.15.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "1e087f84d4f86bf4b218b927129862374b72199ae7d8657835f1e89000eea4fb" +dependencies = [ "allocator-api2", + "equivalent", + "foldhash", ] [[package]] @@ -1725,9 +1741,9 @@ dependencies = [ [[package]] name = "httparse" -version = "1.9.4" +version = "1.9.5" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "0fcc0b4a115bf80b728eb8ea024ad5bd707b615bfed49e0665b6e0f86fd082d9" +checksum = "7d71d3574edd2771538b901e6549113b4006ece66150fb69c0fb6d9a2adae946" [[package]] name = "httpdate" @@ -1737,9 +1753,9 @@ checksum = "df3b46402a9d5adb4c86a0cf463f42e19994e3ee891101b1841f30a545cb49a9" [[package]] name = "hyper" -version = "1.4.1" +version = "1.5.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "50dfd22e0e76d0f662d429a5f80fcaf3855009297eab6a0a9f8543834744ba05" +checksum = "bbbff0a806a4728c99295b254c8838933b5b082d75e3cb70c8dab21fdfbcfa9a" dependencies = [ "bytes", "futures-channel", @@ -1791,9 +1807,9 @@ dependencies = [ [[package]] name = "hyper-util" -version = "0.1.8" +version = "0.1.10" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "da62f120a8a37763efb0cf8fdf264b884c7b8b9ac8660b900c8661030c00e6ba" +checksum = "df2dcfbe0677734ab2f3ffa7fa7bfd4706bfdc1ef393f2ee30184aed67e631b4" dependencies = [ "bytes", "futures-channel", @@ -1804,7 +1820,6 @@ dependencies = [ "pin-project-lite", "socket2", "tokio", - "tower 0.4.13", "tower-service", "tracing", ] @@ -1861,12 +1876,12 @@ dependencies = [ [[package]] name = "indexmap" -version = "2.5.0" +version = "2.6.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "68b900aa2f7301e21c36462b170ee99994de34dff39a4a6a528e80e7376d07e5" +checksum = "707907fe3c25f5424cce2cb7e1cbcafee6bdbe735ca90ef77c29e84591e5b9da" dependencies = [ "equivalent", - "hashbrown 0.14.5", + "hashbrown 0.15.0", "serde", ] @@ -1887,9 +1902,9 @@ dependencies = [ [[package]] name = "ipnet" -version = "2.10.0" +version = "2.10.1" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "187674a687eed5fe42285b40c6291f9a01517d415fad1c3cbc6a9f778af7fcd4" +checksum = "ddc24109865250148c2e0f3d25d4f0f479571723792d3802153c60922a4fb708" [[package]] name = "is-terminal" @@ -1952,9 +1967,9 @@ dependencies = [ [[package]] name = "js-sys" -version = "0.3.70" +version = "0.3.72" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "1868808506b929d7b0cfa8f75951347aa71bb21144b7791bae35d9bccfcfe37a" +checksum = "6a88f1bda2bd75b0452a14784937d796722fdebfe50df998aeb3f0b7603019a9" dependencies = [ "wasm-bindgen", ] @@ -1982,9 +1997,9 @@ checksum = "830d08ce1d1d941e6b30645f1a0eb5643013d835ce3779a5fc208261dbe10f55" [[package]] name = "libc" -version = "0.2.158" +version = "0.2.161" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "d8adc4bb1803a324070e64a98ae98f38934d91957a99cfb3a43dcbc01bc56439" +checksum = "8e9489c2807c139ffd9c1794f4af0ebe86a828db53ecdc7fea2111d0fed085d1" [[package]] name = "libloading" @@ -1998,9 +2013,9 @@ dependencies = [ [[package]] name = "libm" -version = "0.2.8" +version = "0.2.11" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "4ec2a862134d2a7d32d7983ddcdd1c4923530833c9f2ea1a44fc5fa473989058" +checksum = "8355be11b20d696c8f18f6cc018c4e372165b1fa8126cef092399c9951984ffa" [[package]] name = "libsqlite3-sys" @@ -2063,11 +2078,11 @@ dependencies = [ [[package]] name = "lru" -version = "0.12.4" +version = "0.12.5" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "37ee39891760e7d94734f6f63fedc29a2e4a152f836120753a72503f09fcf904" +checksum = "234cf4f4a04dc1f57e24b96cc0cd600cf2af460d4161ac5ecdd0af8e1f3b2a38" dependencies = [ - "hashbrown 0.14.5", + "hashbrown 0.15.0", ] [[package]] @@ -2144,7 +2159,7 @@ dependencies = [ "cfg-if", "proc-macro2", "quote", - "syn 2.0.77", + "syn 2.0.86", ] [[package]] @@ -2194,7 +2209,7 @@ dependencies = [ "proc-macro-error", "proc-macro2", "quote", - "syn 2.0.77", + "syn 2.0.86", "termcolor", "thiserror", ] @@ -2351,18 +2366,18 @@ dependencies = [ [[package]] name = "object" -version = "0.36.4" +version = "0.36.5" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "084f1a5821ac4c651660a94a7153d27ac9d8a53736203f58b31945ded098070a" +checksum = "aedf0a2d09c573ed1d8d85b30c119153926a2b36dce0ab28322c09a117a4683e" dependencies = [ "memchr", ] [[package]] name = "once_cell" -version = "1.19.0" +version = "1.20.2" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "3fdb12b2476b595f9358c5161aa467c2438859caa136dec86c26fdd2efe17b92" +checksum = "1261fe7e33c73b354eab43b1273a57c8f967d0391e80353e51f764ac02cf6775" [[package]] name = "oorandom" @@ -2372,9 +2387,9 @@ checksum = "b410bbe7e14ab526a0e86877eb47c6996a2bd7746f027ba551028c925390e4e9" [[package]] name = "openssl" -version = "0.10.66" +version = "0.10.68" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "9529f4786b70a3e8c61e11179af17ab6188ad8d0ded78c5529441ed39d4bd9c1" +checksum = "6174bc48f102d208783c2c84bf931bb75927a617866870de8a4ea85597f871f5" dependencies = [ "bitflags", "cfg-if", @@ -2393,7 +2408,7 @@ checksum = "a948666b637a0f465e8564c73e89d4dde00d72d4d473cc972f390fc3dcee7d9c" dependencies = [ "proc-macro2", "quote", - "syn 2.0.77", + "syn 2.0.86", ] [[package]] @@ -2404,9 +2419,9 @@ checksum = "ff011a302c396a5197692431fc1948019154afc178baf7d8e37367442a4601cf" [[package]] name = "openssl-sys" -version = "0.9.103" +version = "0.9.104" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "7f9e8deee91df40a943c71b917e5874b951d32a802526c85721ce3b776c929d6" +checksum = "45abf306cbf99debc8195b66b7346498d7b10c210de50418b5ccd7ceba08c741" dependencies = [ "cc", "libc", @@ -2475,7 +2490,7 @@ dependencies = [ "proc-macro2", "proc-macro2-diagnostics", "quote", - "syn 2.0.77", + "syn 2.0.86", ] [[package]] @@ -2534,29 +2549,29 @@ dependencies = [ [[package]] name = "pin-project" -version = "1.1.5" +version = "1.1.7" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "b6bf43b791c5b9e34c3d182969b4abb522f9343702850a2e57f460d00d09b4b3" +checksum = "be57f64e946e500c8ee36ef6331845d40a93055567ec57e8fae13efd33759b95" dependencies = [ "pin-project-internal", ] [[package]] name = "pin-project-internal" -version = "1.1.5" +version = "1.1.7" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "2f38a4412a78282e09a2cf38d195ea5420d15ba0602cb375210efbc877243965" +checksum = "3c0f5fad0874fc7abcd4d750e76917eaebbecaa2c20bde22e1dbeeba8beb758c" dependencies = [ "proc-macro2", "quote", - "syn 2.0.77", + "syn 2.0.86", ] [[package]] name = "pin-project-lite" -version = "0.2.14" +version = "0.2.15" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "bda66fc9667c18cb2758a2ac84d1167245054bcf85d5d1aaa6923f45801bdd02" +checksum = "915a1e146535de9163f3987b8944ed8cf49a18bb0056bcebcdcece385cece4ff" [[package]] name = "pin-utils" @@ -2624,6 +2639,12 @@ dependencies = [ "windows-sys 0.59.0", ] +[[package]] +name = "portable-atomic" +version = "1.9.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "cc9c68a3f6da06753e9335d63e27f6b9754dd1920d941135b7ea8224f141adb2" + [[package]] name = "powerfmt" version = "0.2.0" @@ -2636,7 +2657,7 @@ version = "0.2.20" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "77957b295656769bb8ad2b6a6b09d897d94f05c41b069aede1fcdaa675eaea04" dependencies = [ - "zerocopy", + "zerocopy 0.7.35", ] [[package]] @@ -2667,12 +2688,12 @@ dependencies = [ [[package]] name = "prettyplease" -version = "0.2.22" +version = "0.2.25" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "479cf940fbbb3426c32c5d5176f62ad57549a0bb84773423ba8be9d089f5faba" +checksum = "64d1ec885c64d0457d564db4ec299b2dae3f9c02808b8ad9c3a089c591b18033" dependencies = [ "proc-macro2", - "syn 2.0.77", + "syn 2.0.86", ] [[package]] @@ -2710,9 +2731,9 @@ dependencies = [ [[package]] name = "proc-macro2" -version = "1.0.86" +version = "1.0.89" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "5e719e8df665df0d1c8fbfd238015744736151d4445ec0836b8e628aae103b77" +checksum = "f139b0662de085916d1fb67d2b4169d1addddda1919e696f3252b740b629986e" dependencies = [ "unicode-ident", ] @@ -2725,7 +2746,7 @@ checksum = "af066a9c399a26e020ada66a034357a868728e72cd426f3adcd35f80d88d88c8" dependencies = [ "proc-macro2", "quote", - "syn 2.0.77", + "syn 2.0.86", "version_check", "yansi", ] @@ -2860,18 +2881,18 @@ dependencies = [ [[package]] name = "redox_syscall" -version = "0.5.4" +version = "0.5.7" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "0884ad60e090bf1345b93da0a5de8923c93884cd03f40dfcfddd3b4bee661853" +checksum = "9b6dfecf2c74bce2466cabf93f6664d6998a69eb21e39f4207930065b27b771f" dependencies = [ "bitflags", ] [[package]] name = "regex" -version = "1.10.6" +version = "1.11.1" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "4219d74c6b67a3654a9fbebc4b419e22126d13d2f3c4a07ee0cb61ff79a79619" +checksum = "b544ef1b4eac5dc2db33ea63606ae9ffcfac26c1416a2806ae0bf5f56b201191" dependencies = [ "aho-corasick", "memchr", @@ -2881,9 +2902,9 @@ dependencies = [ [[package]] name = "regex-automata" -version = "0.4.7" +version = "0.4.8" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "38caf58cc5ef2fed281f89292ef23f6365465ed9a41b7a7754eb4e26496c92df" +checksum = "368758f23274712b504848e9d5a6f010445cc8b87a7cdb4d7cbee666c1288da3" dependencies = [ "aho-corasick", "memchr", @@ -2892,9 +2913,9 @@ dependencies = [ [[package]] name = "regex-syntax" -version = "0.8.4" +version = "0.8.5" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "7a66a03ae7c801facd77a29370b4faec201768915ac14a721ba36f20bc9c209b" +checksum = "2b15c43186be67a4fd63bee50d0303afffcef381492ebe2c5d87f324e1b8815c" [[package]] name = "relative-path" @@ -2913,9 +2934,9 @@ dependencies = [ [[package]] name = "reqwest" -version = "0.12.7" +version = "0.12.9" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "f8f4955649ef5c38cc7f9e8aa41761d48fb9677197daea9984dc54f56aad5e63" +checksum = "a77c62af46e79de0a562e1a9849205ffcb7fc1238876e9bd743357570e04046f" dependencies = [ "base64 0.22.1", "bytes", @@ -2971,11 +2992,12 @@ dependencies = [ [[package]] name = "ringbuf" -version = "0.4.4" +version = "0.4.7" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "46f7f1b88601a8ee13cabf203611ccdf64345dc1c5d24de8b11e1a678ee619b6" +checksum = "726bb493fe9cac765e8f96a144c3a8396bdf766dedad22e504b70b908dcbceb4" dependencies = [ "crossbeam-utils", + "portable-atomic", ] [[package]] @@ -3009,9 +3031,9 @@ dependencies = [ [[package]] name = "rstest" -version = "0.22.0" +version = "0.23.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "7b423f0e62bdd61734b67cd21ff50871dfaeb9cc74f869dcd6af974fbcb19936" +checksum = "0a2c585be59b6b5dd66a9d2084aa1d8bd52fbdb806eafdeffb52791147862035" dependencies = [ "futures", "futures-timer", @@ -3021,9 +3043,9 @@ dependencies = [ [[package]] name = "rstest_macros" -version = "0.22.0" +version = "0.23.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "c5e1711e7d14f74b12a58411c542185ef7fb7f2e7f8ee6e2940a883628522b42" +checksum = "825ea780781b15345a146be27eaefb05085e337e869bff01b4306a4fd4a9ad5a" dependencies = [ "cfg-if", "glob", @@ -3033,7 +3055,7 @@ dependencies = [ "regex", "relative-path", "rustc_version", - "syn 2.0.77", + "syn 2.0.86", "unicode-ident", ] @@ -3090,9 +3112,9 @@ dependencies = [ [[package]] name = "rustix" -version = "0.38.37" +version = "0.38.38" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "8acb788b847c24f28525660c4d7758620a7210875711f79e7f663cc152726811" +checksum = "aa260229e6538e52293eeb577aabd09945a09d6d9cc0fc550ed7529056c2e32a" dependencies = [ "bitflags", "errno", @@ -3103,9 +3125,9 @@ dependencies = [ [[package]] name = "rustls" -version = "0.23.13" +version = "0.23.16" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "f2dabaac7466917e566adb06783a81ca48944c6898a1b08b9374106dd671f4c8" +checksum = "eee87ff5d9b36712a58574e12e9f0ea80f915a5b0ac518d322b24a465617925e" dependencies = [ "aws-lc-rs", "once_cell", @@ -3117,19 +3139,18 @@ dependencies = [ [[package]] name = "rustls-pemfile" -version = "2.1.3" +version = "2.2.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "196fe16b00e106300d3e45ecfcb764fa292a535d7326a29a5875c579c7417425" +checksum = "dce314e5fee3f39953d46bb63bb8a46d40c2f8fb7cc5a3b6cab2bde9721d6e50" dependencies = [ - "base64 0.22.1", "rustls-pki-types", ] [[package]] name = "rustls-pki-types" -version = "1.8.0" +version = "1.10.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "fc0a2ce646f8655401bb81e7927b812614bd5d91dbc968696be50603510fcaf0" +checksum = "16f1201b3c9a7ee8039bcadc17b7e605e2945b27eee7631788c1bd2b0643674b" [[package]] name = "rustls-webpki" @@ -3145,9 +3166,9 @@ dependencies = [ [[package]] name = "rustversion" -version = "1.0.17" +version = "1.0.18" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "955d28af4278de8121b7ebeb796b6a45735dc01436d898801014aced2773a3d6" +checksum = "0e819f2bc632f285be6d7cd36e25940d45b2391dd6d9b939e79de557f7014248" [[package]] name = "ryu" @@ -3172,9 +3193,9 @@ checksum = "ece8e78b2f38ec51c51f5d475df0a7187ba5111b2a28bdc761ee05b075d40a71" [[package]] name = "schannel" -version = "0.1.24" +version = "0.1.26" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "e9aaafd5a2b6e3d657ff009d82fbd630b6bd54dd4eb06f21693925cdf80f9b8b" +checksum = "01227be5826fa0690321a2ba6c5cd57a19cf3f6a09e76973b58e61de6ab9d1c1" dependencies = [ "windows-sys 0.59.0", ] @@ -3231,9 +3252,9 @@ checksum = "61697e0a1c7e512e84a621326239844a24d8207b4669b41bc18b32ea5cbf988b" [[package]] name = "serde" -version = "1.0.210" +version = "1.0.214" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "c8e3592472072e6e22e0a54d5904d9febf8508f65fb8552499a1abc7d1078c3a" +checksum = "f55c3193aca71c12ad7890f1785d2b73e1b9f63a0bbc353c08ef26fe03fc56b5" dependencies = [ "serde_derive", ] @@ -3259,13 +3280,13 @@ dependencies = [ [[package]] name = "serde_derive" -version = "1.0.210" +version = "1.0.214" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "243902eda00fad750862fc144cea25caca5e20d615af0a81bee94ca738f1df1f" +checksum = "de523f781f095e28fa605cdce0f8307e451cc0fd14e2eb4cd2e98a355b147766" dependencies = [ "proc-macro2", "quote", - "syn 2.0.77", + "syn 2.0.86", ] [[package]] @@ -3275,7 +3296,7 @@ source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "8de514ef58196f1fc96dcaef80fe6170a1ce6215df9687a93fe8300e773fefc5" dependencies = [ "form_urlencoded", - "indexmap 2.5.0", + "indexmap 2.6.0", "itoa", "ryu", "serde", @@ -3287,7 +3308,7 @@ version = "1.0.132" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "d726bfaff4b320266d395898905d0eba0345aae23b54aee3a737e260fd46db03" dependencies = [ - "indexmap 2.5.0", + "indexmap 2.6.0", "itoa", "memchr", "ryu", @@ -3312,14 +3333,14 @@ checksum = "6c64451ba24fc7a6a2d60fc75dd9c83c90903b19028d4eff35e88fc1e86564e9" dependencies = [ "proc-macro2", "quote", - "syn 2.0.77", + "syn 2.0.86", ] [[package]] name = "serde_spanned" -version = "0.6.7" +version = "0.6.8" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "eb5b1b31579f3811bf615c144393417496f152e12ac8b7663bf664f4a815306d" +checksum = "87607cb1398ed59d48732e575a4c28a7a8ebf2454b964fe3f224f2afc07909e1" dependencies = [ "serde", ] @@ -3338,15 +3359,15 @@ dependencies = [ [[package]] name = "serde_with" -version = "3.9.0" +version = "3.11.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "69cecfa94848272156ea67b2b1a53f20fc7bc638c4a46d2f8abde08f05f4b857" +checksum = "8e28bdad6db2b8340e449f7108f020b3b092e8583a9e3fb82713e1d4e71fe817" dependencies = [ "base64 0.22.1", "chrono", "hex", "indexmap 1.9.3", - "indexmap 2.5.0", + "indexmap 2.6.0", "serde", "serde_derive", "serde_json", @@ -3356,14 +3377,14 @@ dependencies = [ [[package]] name = "serde_with_macros" -version = "3.9.0" +version = "3.11.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "a8fee4991ef4f274617a51ad4af30519438dacb2f56ac773b08a1922ff743350" +checksum = "9d846214a9854ef724f3da161b426242d8de7c1fc7de2f89bb1efcb154dca79d" dependencies = [ "darling", "proc-macro2", "quote", - "syn 2.0.77", + "syn 2.0.86", ] [[package]] @@ -3496,9 +3517,9 @@ dependencies = [ [[package]] name = "syn" -version = "2.0.77" +version = "2.0.86" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "9f35bcdf61fd8e7be6caf75f429fdca8beb3ed76584befb503b1569faee373ed" +checksum = "e89275301d38033efb81a6e60e3497e734dfcc62571f2854bf4b16690398824c" dependencies = [ "proc-macro2", "quote", @@ -3514,7 +3535,7 @@ dependencies = [ "proc-macro-error", "proc-macro2", "quote", - "syn 2.0.77", + "syn 2.0.86", ] [[package]] @@ -3578,9 +3599,9 @@ dependencies = [ [[package]] name = "tempfile" -version = "3.12.0" +version = "3.13.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "04cbcdd0c794ebb0d4cf35e88edd2f7d2c4c3e9a5a6dab322839b321c6a87a64" +checksum = "f0f2c9fc62d0beef6951ccffd757e241266a2c833136efbe35af6cd2567dca5b" dependencies = [ "cfg-if", "fastrand", @@ -3606,22 +3627,22 @@ checksum = "3369f5ac52d5eb6ab48c6b4ffdc8efbcad6b89c765749064ba298f2c68a16a76" [[package]] name = "thiserror" -version = "1.0.65" +version = "1.0.66" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "5d11abd9594d9b38965ef50805c5e469ca9cc6f197f883f717e0269a3057b3d5" +checksum = "5d171f59dbaa811dbbb1aee1e73db92ec2b122911a48e1390dfe327a821ddede" dependencies = [ "thiserror-impl", ] [[package]] name = "thiserror-impl" -version = "1.0.65" +version = "1.0.66" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "ae71770322cbd277e69d762a16c444af02aa0575ac0d174f0b9562d3b37f8602" +checksum = "b08be0f17bd307950653ce45db00cd31200d82b624b36e181337d9c7d92765b5" dependencies = [ "proc-macro2", "quote", - "syn 2.0.77", + "syn 2.0.86", ] [[package]] @@ -3692,9 +3713,9 @@ checksum = "1f3ccbac311fea05f86f61904b462b55fb3df8837a366dfc601a0161d0532f20" [[package]] name = "tokio" -version = "1.40.0" +version = "1.41.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "e2b070231665d27ad9ec9b8df639893f46727666c6767db40317fbe920a5d998" +checksum = "145f3413504347a2be84393cc8a7d2fb4d863b375909ea59f2158261aa258bbb" dependencies = [ "backtrace", "bytes", @@ -3715,7 +3736,7 @@ checksum = "693d596312e88961bc67d7f1f97af8a70227d9f90c31bba5806eec004978d752" dependencies = [ "proc-macro2", "quote", - "syn 2.0.77", + "syn 2.0.86", ] [[package]] @@ -3775,11 +3796,11 @@ dependencies = [ [[package]] name = "toml_edit" -version = "0.22.21" +version = "0.22.22" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "3b072cee73c449a636ffd6f32bd8de3a9f7119139aff882f44943ce2986dc5cf" +checksum = "4ae48d6208a266e853d946088ed816055e556cc6028c5e8e2b84d9fa5dd7c7f5" dependencies = [ - "indexmap 2.5.0", + "indexmap 2.6.0", "serde", "serde_spanned", "toml_datetime", @@ -3845,7 +3866,7 @@ dependencies = [ "tracing-subscriber", "url", "uuid", - "zerocopy", + "zerocopy 0.8.8", ] [[package]] @@ -3902,7 +3923,7 @@ dependencies = [ "tdyne-peer-id", "tdyne-peer-id-registry", "thiserror", - "zerocopy", + "zerocopy 0.8.8", ] [[package]] @@ -3930,7 +3951,7 @@ dependencies = [ "torrust-tracker-clock", "torrust-tracker-configuration", "torrust-tracker-primitives", - "zerocopy", + "zerocopy 0.8.8", ] [[package]] @@ -3943,7 +3964,6 @@ dependencies = [ "futures-util", "pin-project", "pin-project-lite", - "tokio", "tower-layer", "tower-service", "tracing", @@ -4018,7 +4038,7 @@ checksum = "34704c8d6ebcbc939824180af020566b01a7c01f80641264eba0999f6c2b6be7" dependencies = [ "proc-macro2", "quote", - "syn 2.0.77", + "syn 2.0.86", ] [[package]] @@ -4103,9 +4123,9 @@ dependencies = [ [[package]] name = "unicode-bidi" -version = "0.3.15" +version = "0.3.17" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "08f95100a766bf4f8f28f90d77e0a5461bbdb219042e7679bebe79004fed8d75" +checksum = "5ab17db44d7388991a428b2ee655ce0c212e862eff1768a455c58f9aad6e7893" [[package]] name = "unicode-ident" @@ -4154,9 +4174,9 @@ checksum = "06abde3611657adf66d383f00b093d7faecc7fa57071cce2578660c9f1010821" [[package]] name = "uuid" -version = "1.10.0" +version = "1.11.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "81dfa00651efa65069b0b6b651f4aaa31ba9e3c3ce0137aaad053604ee7e0314" +checksum = "f8c5f0a0af699448548ad1a2fbf920fb4bee257eae39953ba95cb84891a0446a" dependencies = [ "getrandom", "rand", @@ -4170,9 +4190,9 @@ checksum = "830b7e5d4d90034032940e4ace0d9a9a057e7a45cd94e6c007832e39edb82f6d" [[package]] name = "value-bag" -version = "1.9.0" +version = "1.10.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "5a84c137d37ab0142f0f2ddfe332651fdbf252e7b7dbb4e67b6c1f1b2e925101" +checksum = "3ef4c4aa54d5d05a279399bfa921ec387b7aba77caf7a682ae8d86785b8fdad2" [[package]] name = "vcpkg" @@ -4213,9 +4233,9 @@ checksum = "9c8d87e72b64a3b4db28d11ce29237c246188f4f51057d65a7eab63b7987e423" [[package]] name = "wasm-bindgen" -version = "0.2.93" +version = "0.2.95" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "a82edfc16a6c469f5f44dc7b571814045d60404b55a0ee849f9bcfa2e63dd9b5" +checksum = "128d1e363af62632b8eb57219c8fd7877144af57558fb2ef0368d0087bddeb2e" dependencies = [ "cfg-if", "once_cell", @@ -4224,24 +4244,24 @@ dependencies = [ [[package]] name = "wasm-bindgen-backend" -version = "0.2.93" +version = "0.2.95" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "9de396da306523044d3302746f1208fa71d7532227f15e347e2d93e4145dd77b" +checksum = "cb6dd4d3ca0ddffd1dd1c9c04f94b868c37ff5fac97c30b97cff2d74fce3a358" dependencies = [ "bumpalo", "log", "once_cell", "proc-macro2", "quote", - "syn 2.0.77", + "syn 2.0.86", "wasm-bindgen-shared", ] [[package]] name = "wasm-bindgen-futures" -version = "0.4.43" +version = "0.4.45" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "61e9300f63a621e96ed275155c108eb6f843b6a26d053f122ab69724559dc8ed" +checksum = "cc7ec4f8827a71586374db3e87abdb5a2bb3a15afed140221307c3ec06b1f63b" dependencies = [ "cfg-if", "js-sys", @@ -4251,9 +4271,9 @@ dependencies = [ [[package]] name = "wasm-bindgen-macro" -version = "0.2.93" +version = "0.2.95" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "585c4c91a46b072c92e908d99cb1dcdf95c5218eeb6f3bf1efa991ee7a68cccf" +checksum = "e79384be7f8f5a9dd5d7167216f022090cf1f9ec128e6e6a482a2cb5c5422c56" dependencies = [ "quote", "wasm-bindgen-macro-support", @@ -4261,28 +4281,28 @@ dependencies = [ [[package]] name = "wasm-bindgen-macro-support" -version = "0.2.93" +version = "0.2.95" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "afc340c74d9005395cf9dd098506f7f44e38f2b4a21c6aaacf9a105ea5e1e836" +checksum = "26c6ab57572f7a24a4985830b120de1594465e5d500f24afe89e16b4e833ef68" dependencies = [ "proc-macro2", "quote", - "syn 2.0.77", + "syn 2.0.86", "wasm-bindgen-backend", "wasm-bindgen-shared", ] [[package]] name = "wasm-bindgen-shared" -version = "0.2.93" +version = "0.2.95" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "c62a0a307cb4a311d3a07867860911ca130c3494e8c2719593806c08bc5d0484" +checksum = "65fc09f10666a9f147042251e0dda9c18f166ff7de300607007e96bdebc1068d" [[package]] name = "web-sys" -version = "0.3.70" +version = "0.3.72" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "26fdeaafd9bd129f65e7c031593c24d62186301e0c72c8978fa1678be7d532c0" +checksum = "f6488b90108c040df0fe62fa815cbdee25124641df01814dd7282749234c6112" dependencies = [ "js-sys", "wasm-bindgen", @@ -4454,9 +4474,9 @@ checksum = "589f6da84c646204747d1270a2a5661ea66ed1cced2631d546fdfb155959f9ec" [[package]] name = "winnow" -version = "0.6.18" +version = "0.6.20" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "68a9bda4691f099d435ad181000724da8e5899daa10713c2d432552b9ccd3a6f" +checksum = "36c1fec1a2bb5866f07c25f68c26e565c4c200aebb96d7e55710c19d3e8ac49b" dependencies = [ "memchr", ] @@ -4483,7 +4503,16 @@ source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "1b9b4fd18abc82b8136838da5d50bae7bdea537c574d8dc1a34ed098d6c166f0" dependencies = [ "byteorder", - "zerocopy-derive", + "zerocopy-derive 0.7.35", +] + +[[package]] +name = "zerocopy" +version = "0.8.8" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "5a4e33e6dce36f2adba29746927f8e848ba70989fdb61c772773bbdda8b5d6a7" +dependencies = [ + "zerocopy-derive 0.8.8", ] [[package]] @@ -4494,7 +4523,18 @@ checksum = "fa4f8080344d4671fb4e831a13ad1e68092748387dfc4f55e356242fae12ce3e" dependencies = [ "proc-macro2", "quote", - "syn 2.0.77", + "syn 2.0.86", +] + +[[package]] +name = "zerocopy-derive" +version = "0.8.8" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "3cd137b4cc21bde6ecce3bbbb3350130872cda0be2c6888874279ea76e17d4c1" +dependencies = [ + "proc-macro2", + "quote", + "syn 2.0.86", ] [[package]] From e58bdeb571878b3e1b68f1f5faacacca644d56ca Mon Sep 17 00:00:00 2001 From: Jose Celano Date: Fri, 1 Nov 2024 09:07:34 +0000 Subject: [PATCH 013/802] chore(deps): force 0.7 version for zerocopy The aquatic_udp_protocol crate uses version 0.7: https://github.com/greatest-ape/aquatic/blob/master/crates/udp_protocol/Cargo.toml#L19 We were having problems with trait `read_from`. Example: ```rust let data = PeerId::read_from(&bytes).expect("it should have the correct amount of bytes"); ``` There have been changes in version 0.8: https://github.com/google/zerocopy/discussions/1680 --- Cargo.lock | 38 ++++++-------------------- Cargo.toml | 2 +- packages/primitives/Cargo.toml | 2 +- packages/torrent-repository/Cargo.toml | 2 +- 4 files changed, 12 insertions(+), 32 deletions(-) diff --git a/Cargo.lock b/Cargo.lock index 9788725db..bcb27fb43 100644 --- a/Cargo.lock +++ b/Cargo.lock @@ -37,7 +37,7 @@ dependencies = [ "cfg-if", "once_cell", "version_check", - "zerocopy 0.7.35", + "zerocopy", ] [[package]] @@ -157,7 +157,7 @@ dependencies = [ "quickcheck", "regex", "serde", - "zerocopy 0.7.35", + "zerocopy", ] [[package]] @@ -169,7 +169,7 @@ dependencies = [ "aquatic_peer_id", "byteorder", "either", - "zerocopy 0.7.35", + "zerocopy", ] [[package]] @@ -613,7 +613,7 @@ dependencies = [ "serde", "serde_json", "thiserror", - "zerocopy 0.7.35", + "zerocopy", ] [[package]] @@ -2657,7 +2657,7 @@ version = "0.2.20" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "77957b295656769bb8ad2b6a6b09d897d94f05c41b069aede1fcdaa675eaea04" dependencies = [ - "zerocopy 0.7.35", + "zerocopy", ] [[package]] @@ -3866,7 +3866,7 @@ dependencies = [ "tracing-subscriber", "url", "uuid", - "zerocopy 0.8.8", + "zerocopy", ] [[package]] @@ -3923,7 +3923,7 @@ dependencies = [ "tdyne-peer-id", "tdyne-peer-id-registry", "thiserror", - "zerocopy 0.8.8", + "zerocopy", ] [[package]] @@ -3951,7 +3951,7 @@ dependencies = [ "torrust-tracker-clock", "torrust-tracker-configuration", "torrust-tracker-primitives", - "zerocopy 0.8.8", + "zerocopy", ] [[package]] @@ -4503,16 +4503,7 @@ source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "1b9b4fd18abc82b8136838da5d50bae7bdea537c574d8dc1a34ed098d6c166f0" dependencies = [ "byteorder", - "zerocopy-derive 0.7.35", -] - -[[package]] -name = "zerocopy" -version = "0.8.8" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "5a4e33e6dce36f2adba29746927f8e848ba70989fdb61c772773bbdda8b5d6a7" -dependencies = [ - "zerocopy-derive 0.8.8", + "zerocopy-derive", ] [[package]] @@ -4526,17 +4517,6 @@ dependencies = [ "syn 2.0.86", ] -[[package]] -name = "zerocopy-derive" -version = "0.8.8" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "3cd137b4cc21bde6ecce3bbbb3350130872cda0be2c6888874279ea76e17d4c1" -dependencies = [ - "proc-macro2", - "quote", - "syn 2.0.86", -] - [[package]] name = "zeroize" version = "1.8.1" diff --git a/Cargo.toml b/Cargo.toml index d69fa3e5e..e42702d06 100644 --- a/Cargo.toml +++ b/Cargo.toml @@ -82,7 +82,7 @@ tracing = "0" tracing-subscriber = { version = "0", features = ["json"] } url = { version = "2", features = ["serde"] } uuid = { version = "1", features = ["v4"] } -zerocopy = "0" +zerocopy = "0.7" [package.metadata.cargo-machete] ignored = ["crossbeam-skiplist", "dashmap", "figment", "parking_lot", "serde_bytes"] diff --git a/packages/primitives/Cargo.toml b/packages/primitives/Cargo.toml index 4b5abc8f3..4d18bdca6 100644 --- a/packages/primitives/Cargo.toml +++ b/packages/primitives/Cargo.toml @@ -23,4 +23,4 @@ serde = { version = "1", features = ["derive"] } tdyne-peer-id = "1" tdyne-peer-id-registry = "0" thiserror = "1" -zerocopy = "0" +zerocopy = "0.7" diff --git a/packages/torrent-repository/Cargo.toml b/packages/torrent-repository/Cargo.toml index 0933457d3..2097d57d2 100644 --- a/packages/torrent-repository/Cargo.toml +++ b/packages/torrent-repository/Cargo.toml @@ -26,7 +26,7 @@ tokio = { version = "1", features = ["macros", "net", "rt-multi-thread", "signal torrust-tracker-clock = { version = "3.0.0-develop", path = "../clock" } torrust-tracker-configuration = { version = "3.0.0-develop", path = "../configuration" } torrust-tracker-primitives = { version = "3.0.0-develop", path = "../primitives" } -zerocopy = "0" +zerocopy = "0.7" [dev-dependencies] async-std = { version = "1", features = ["attributes", "tokio1"] } From 093e8c9bbb86bde793c8f8903f25b52aa2d7c80d Mon Sep 17 00:00:00 2001 From: Jose Celano Date: Fri, 1 Nov 2024 16:31:08 +0000 Subject: [PATCH 014/802] feat: extract new package bittorrent-tracker-client This will allow other projects to reuse the tracker lib clients and console clients. --- Cargo.lock | 32 ++ Cargo.toml | 2 + packages/tracker-client/Cargo.toml | 42 +++ packages/tracker-client/README.md | 25 ++ .../docs/licenses/LICENSE-MIT_0 | 14 + .../src/bin/http_tracker_client.rs | 7 + .../tracker-client/src/bin/tracker_checker.rs | 7 + .../src/bin/udp_tracker_client.rs | 7 + .../src/console/clients/checker/app.rs | 120 ++++++++ .../console/clients/checker/checks/health.rs | 77 +++++ .../console/clients/checker/checks/http.rs | 104 +++++++ .../src/console/clients/checker/checks/mod.rs | 4 + .../console/clients/checker/checks/structs.rs | 12 + .../src/console/clients/checker/checks/udp.rs | 134 +++++++++ .../src/console/clients/checker/config.rs | 282 ++++++++++++++++++ .../src/console/clients/checker/console.rs | 38 +++ .../src/console/clients/checker/logger.rs | 72 +++++ .../src/console/clients/checker/mod.rs | 7 + .../src/console/clients/checker/printer.rs | 9 + .../src/console/clients/checker/service.rs | 62 ++++ .../src/console/clients/http/app.rs | 102 +++++++ .../src/console/clients/http/mod.rs | 34 +++ .../tracker-client/src/console/clients/mod.rs | 4 + .../src/console/clients/udp/app.rs | 208 +++++++++++++ .../src/console/clients/udp/checker.rs | 177 +++++++++++ .../src/console/clients/udp/mod.rs | 51 ++++ .../src/console/clients/udp/responses/dto.rs | 128 ++++++++ .../src/console/clients/udp/responses/json.rs | 25 ++ .../src/console/clients/udp/responses/mod.rs | 2 + packages/tracker-client/src/console/mod.rs | 2 + .../tracker-client/src/http/client/mod.rs | 220 ++++++++++++++ .../src/http/client/requests/announce.rs | 275 +++++++++++++++++ .../src/http/client/requests/mod.rs | 2 + .../src/http/client/requests/scrape.rs | 172 +++++++++++ .../src/http/client/responses/announce.rs | 126 ++++++++ .../src/http/client/responses/error.rs | 7 + .../src/http/client/responses/mod.rs | 3 + .../src/http/client/responses/scrape.rs | 230 ++++++++++++++ packages/tracker-client/src/http/mod.rs | 27 ++ .../tracker-client/src/http/url_encoding.rs | 132 ++++++++ packages/tracker-client/src/lib.rs | 3 + packages/tracker-client/src/udp/client.rs | 270 +++++++++++++++++ packages/tracker-client/src/udp/mod.rs | 68 +++++ 43 files changed, 3325 insertions(+) create mode 100644 packages/tracker-client/Cargo.toml create mode 100644 packages/tracker-client/README.md create mode 100644 packages/tracker-client/docs/licenses/LICENSE-MIT_0 create mode 100644 packages/tracker-client/src/bin/http_tracker_client.rs create mode 100644 packages/tracker-client/src/bin/tracker_checker.rs create mode 100644 packages/tracker-client/src/bin/udp_tracker_client.rs create mode 100644 packages/tracker-client/src/console/clients/checker/app.rs create mode 100644 packages/tracker-client/src/console/clients/checker/checks/health.rs create mode 100644 packages/tracker-client/src/console/clients/checker/checks/http.rs create mode 100644 packages/tracker-client/src/console/clients/checker/checks/mod.rs create mode 100644 packages/tracker-client/src/console/clients/checker/checks/structs.rs create mode 100644 packages/tracker-client/src/console/clients/checker/checks/udp.rs create mode 100644 packages/tracker-client/src/console/clients/checker/config.rs create mode 100644 packages/tracker-client/src/console/clients/checker/console.rs create mode 100644 packages/tracker-client/src/console/clients/checker/logger.rs create mode 100644 packages/tracker-client/src/console/clients/checker/mod.rs create mode 100644 packages/tracker-client/src/console/clients/checker/printer.rs create mode 100644 packages/tracker-client/src/console/clients/checker/service.rs create mode 100644 packages/tracker-client/src/console/clients/http/app.rs create mode 100644 packages/tracker-client/src/console/clients/http/mod.rs create mode 100644 packages/tracker-client/src/console/clients/mod.rs create mode 100644 packages/tracker-client/src/console/clients/udp/app.rs create mode 100644 packages/tracker-client/src/console/clients/udp/checker.rs create mode 100644 packages/tracker-client/src/console/clients/udp/mod.rs create mode 100644 packages/tracker-client/src/console/clients/udp/responses/dto.rs create mode 100644 packages/tracker-client/src/console/clients/udp/responses/json.rs create mode 100644 packages/tracker-client/src/console/clients/udp/responses/mod.rs create mode 100644 packages/tracker-client/src/console/mod.rs create mode 100644 packages/tracker-client/src/http/client/mod.rs create mode 100644 packages/tracker-client/src/http/client/requests/announce.rs create mode 100644 packages/tracker-client/src/http/client/requests/mod.rs create mode 100644 packages/tracker-client/src/http/client/requests/scrape.rs create mode 100644 packages/tracker-client/src/http/client/responses/announce.rs create mode 100644 packages/tracker-client/src/http/client/responses/error.rs create mode 100644 packages/tracker-client/src/http/client/responses/mod.rs create mode 100644 packages/tracker-client/src/http/client/responses/scrape.rs create mode 100644 packages/tracker-client/src/http/mod.rs create mode 100644 packages/tracker-client/src/http/url_encoding.rs create mode 100644 packages/tracker-client/src/lib.rs create mode 100644 packages/tracker-client/src/udp/client.rs create mode 100644 packages/tracker-client/src/udp/mod.rs diff --git a/Cargo.lock b/Cargo.lock index bcb27fb43..00d83fddb 100644 --- a/Cargo.lock +++ b/Cargo.lock @@ -616,6 +616,37 @@ dependencies = [ "zerocopy", ] +[[package]] +name = "bittorrent-tracker-client" +version = "3.0.0-develop" +dependencies = [ + "anyhow", + "aquatic_udp_protocol", + "bittorrent-primitives", + "clap", + "derive_more", + "futures", + "futures-util", + "hex-literal", + "hyper", + "percent-encoding", + "reqwest", + "serde", + "serde_bencode", + "serde_bytes", + "serde_json", + "serde_repr", + "thiserror", + "tokio", + "torrust-tracker-configuration", + "torrust-tracker-located-error", + "torrust-tracker-primitives", + "tracing", + "tracing-subscriber", + "url", + "zerocopy", +] + [[package]] name = "bitvec" version = "1.0.1" @@ -3818,6 +3849,7 @@ dependencies = [ "axum-extra", "axum-server", "bittorrent-primitives", + "bittorrent-tracker-client", "camino", "chrono", "clap", diff --git a/Cargo.toml b/Cargo.toml index e42702d06..574881a94 100644 --- a/Cargo.toml +++ b/Cargo.toml @@ -37,6 +37,7 @@ axum-client-ip = "0" axum-extra = { version = "0", features = ["query"] } axum-server = { version = "0", features = ["tls-rustls"] } bittorrent-primitives = "0.1.0" +bittorrent-tracker-client = { version = "3.0.0-develop", path = "packages/tracker-client" } camino = { version = "1", features = ["serde", "serde1"] } chrono = { version = "0", default-features = false, features = ["clock"] } clap = { version = "4", features = ["derive", "env"] } @@ -100,6 +101,7 @@ members = [ "packages/primitives", "packages/test-helpers", "packages/torrent-repository", + "packages/tracker-client", ] [profile.dev] diff --git a/packages/tracker-client/Cargo.toml b/packages/tracker-client/Cargo.toml new file mode 100644 index 000000000..85e10c03e --- /dev/null +++ b/packages/tracker-client/Cargo.toml @@ -0,0 +1,42 @@ +[package] +description = "A library with the primitive types shared by the Torrust tracker packages." +keywords = ["bittorrent", "client", "tracker"] +license = "LGPL-3.0" +name = "bittorrent-tracker-client" +readme = "README.md" + +authors.workspace = true +documentation.workspace = true +edition.workspace = true +homepage.workspace = true +publish.workspace = true +repository.workspace = true +rust-version.workspace = true +version.workspace = true + +[dependencies] +anyhow = "1" +aquatic_udp_protocol = "0" +bittorrent-primitives = "0.1.0" +clap = { version = "4", features = ["derive", "env"] } +derive_more = { version = "1", features = ["as_ref", "constructor", "from"] } +futures = "0" +futures-util = "0" +hex-literal = "0" +hyper = "1" +percent-encoding = "2" +reqwest = { version = "0", features = ["json"] } +serde = { version = "1", features = ["derive"] } +serde_bencode = "0" +serde_bytes = "0" +serde_json = { version = "1", features = ["preserve_order"] } +serde_repr = "0" +thiserror = "1" +tokio = { version = "1", features = ["macros", "net", "rt-multi-thread", "signal", "sync"] } +torrust-tracker-configuration = { version = "3.0.0-develop", path = "../configuration" } +torrust-tracker-located-error = { version = "3.0.0-develop", path = "../located-error" } +torrust-tracker-primitives = { version = "3.0.0-develop", path = "../primitives" } +tracing = "0" +tracing-subscriber = { version = "0", features = ["json"] } +url = { version = "2", features = ["serde"] } +zerocopy = "0.7" diff --git a/packages/tracker-client/README.md b/packages/tracker-client/README.md new file mode 100644 index 000000000..1d12f9c86 --- /dev/null +++ b/packages/tracker-client/README.md @@ -0,0 +1,25 @@ +# BitTorrent Tracker Client + +A library an console applications to interact with a BitTorrent tracker. + +> **Disclaimer**: This project is actively under development. We’re currently extracting and refining common types from the ][Torrust Tracker](https://github.com/torrust/torrust-tracker) to make them available to the BitTorrent community in Rust. While these types are functional, they are not yet ready for use in production or third-party projects. + +## License + +**Copyright (c) 2024 The Torrust Developers.** + +This program is free software: you can redistribute it and/or modify it under the terms of the [GNU Lesser General Public License][LGPL_3_0] as published by the [Free Software Foundation][FSF], version 3. + +This program is distributed in the hope that it will be useful, but WITHOUT ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the [GNU Lesser General Public License][LGPL_3_0] for more details. + +You should have received a copy of the *GNU Lesser General Public License* along with this program. If not, see . + +Some files include explicit copyright notices and/or license notices. + +### Legacy Exception + +For prosperity, versions of Torrust BitTorrent Tracker Client that are older than five years are automatically granted the [MIT-0][MIT_0] license in addition to the existing [LGPL-3.0-only][LGPL_3_0] license. + +[LGPL_3_0]: ./LICENSE +[MIT_0]: ./docs/licenses/LICENSE-MIT_0 +[FSF]: https://www.fsf.org/ diff --git a/packages/tracker-client/docs/licenses/LICENSE-MIT_0 b/packages/tracker-client/docs/licenses/LICENSE-MIT_0 new file mode 100644 index 000000000..fc06cc4fe --- /dev/null +++ b/packages/tracker-client/docs/licenses/LICENSE-MIT_0 @@ -0,0 +1,14 @@ +MIT No Attribution + +Permission is hereby granted, free of charge, to any person obtaining a copy of this +software and associated documentation files (the "Software"), to deal in the Software +without restriction, including without limitation the rights to use, copy, modify, +merge, publish, distribute, sublicense, and/or sell copies of the Software, and to +permit persons to whom the Software is furnished to do so. + +THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, +INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A +PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT +HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION +OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE +SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. diff --git a/packages/tracker-client/src/bin/http_tracker_client.rs b/packages/tracker-client/src/bin/http_tracker_client.rs new file mode 100644 index 000000000..8c2c0356d --- /dev/null +++ b/packages/tracker-client/src/bin/http_tracker_client.rs @@ -0,0 +1,7 @@ +//! Program to make request to HTTP trackers. +use bittorrent_tracker_client::console::clients::http::app; + +#[tokio::main] +async fn main() -> anyhow::Result<()> { + app::run().await +} diff --git a/packages/tracker-client/src/bin/tracker_checker.rs b/packages/tracker-client/src/bin/tracker_checker.rs new file mode 100644 index 000000000..eb2a7d82c --- /dev/null +++ b/packages/tracker-client/src/bin/tracker_checker.rs @@ -0,0 +1,7 @@ +//! Program to check running trackers. +use bittorrent_tracker_client::console::clients::checker::app; + +#[tokio::main] +async fn main() { + app::run().await.expect("Some checks fail"); +} diff --git a/packages/tracker-client/src/bin/udp_tracker_client.rs b/packages/tracker-client/src/bin/udp_tracker_client.rs new file mode 100644 index 000000000..5f6b4f50d --- /dev/null +++ b/packages/tracker-client/src/bin/udp_tracker_client.rs @@ -0,0 +1,7 @@ +//! Program to make request to UDP trackers. +use bittorrent_tracker_client::console::clients::udp::app; + +#[tokio::main] +async fn main() -> anyhow::Result<()> { + app::run().await +} diff --git a/packages/tracker-client/src/console/clients/checker/app.rs b/packages/tracker-client/src/console/clients/checker/app.rs new file mode 100644 index 000000000..395f65df9 --- /dev/null +++ b/packages/tracker-client/src/console/clients/checker/app.rs @@ -0,0 +1,120 @@ +//! Program to run checks against running trackers. +//! +//! Run providing a config file path: +//! +//! ```text +//! cargo run --bin tracker_checker -- --config-path "./share/default/config/tracker_checker.json" +//! TORRUST_CHECKER_CONFIG_PATH="./share/default/config/tracker_checker.json" cargo run --bin tracker_checker +//! ``` +//! +//! Run providing the configuration: +//! +//! ```text +//! TORRUST_CHECKER_CONFIG=$(cat "./share/default/config/tracker_checker.json") cargo run --bin tracker_checker +//! ``` +//! +//! Another real example to test the Torrust demo tracker: +//! +//! ```text +//! TORRUST_CHECKER_CONFIG='{ +//! "udp_trackers": ["144.126.245.19:6969"], +//! "http_trackers": ["https://tracker.torrust-demo.com"], +//! "health_checks": ["https://tracker.torrust-demo.com/api/health_check"] +//! }' cargo run --bin tracker_checker +//! ``` +//! +//! The output should be something like the following: +//! +//! ```json +//! { +//! "udp_trackers": [ +//! { +//! "url": "144.126.245.19:6969", +//! "status": { +//! "code": "ok", +//! "message": "" +//! } +//! } +//! ], +//! "http_trackers": [ +//! { +//! "url": "https://tracker.torrust-demo.com/", +//! "status": { +//! "code": "ok", +//! "message": "" +//! } +//! } +//! ], +//! "health_checks": [ +//! { +//! "url": "https://tracker.torrust-demo.com/api/health_check", +//! "status": { +//! "code": "ok", +//! "message": "" +//! } +//! } +//! ] +//! } +//! ``` +use std::path::PathBuf; +use std::sync::Arc; + +use anyhow::{Context, Result}; +use clap::Parser; +use tracing::level_filters::LevelFilter; + +use super::config::Configuration; +use super::console::Console; +use super::service::{CheckResult, Service}; +use crate::console::clients::checker::config::parse_from_json; + +#[derive(Parser, Debug)] +#[clap(author, version, about, long_about = None)] +struct Args { + /// Path to the JSON configuration file. + #[clap(short, long, env = "TORRUST_CHECKER_CONFIG_PATH")] + config_path: Option, + + /// Direct configuration content in JSON. + #[clap(env = "TORRUST_CHECKER_CONFIG", hide_env_values = true)] + config_content: Option, +} + +/// # Errors +/// +/// Will return an error if the configuration was not provided. +pub async fn run() -> Result> { + tracing_stdout_init(LevelFilter::INFO); + + let args = Args::parse(); + + let config = setup_config(args)?; + + let console_printer = Console {}; + + let service = Service { + config: Arc::new(config), + console: console_printer, + }; + + service.run_checks().await.context("it should run the check tasks") +} + +fn tracing_stdout_init(filter: LevelFilter) { + tracing_subscriber::fmt().with_max_level(filter).init(); + tracing::debug!("Logging initialized"); +} + +fn setup_config(args: Args) -> Result { + match (args.config_path, args.config_content) { + (Some(config_path), _) => load_config_from_file(&config_path), + (_, Some(config_content)) => parse_from_json(&config_content).context("invalid config format"), + _ => Err(anyhow::anyhow!("no configuration provided")), + } +} + +fn load_config_from_file(path: &PathBuf) -> Result { + let file_content = std::fs::read_to_string(path).with_context(|| format!("can't read config file {path:?}"))?; + + parse_from_json(&file_content).context("invalid config format") +} diff --git a/packages/tracker-client/src/console/clients/checker/checks/health.rs b/packages/tracker-client/src/console/clients/checker/checks/health.rs new file mode 100644 index 000000000..b1fb79148 --- /dev/null +++ b/packages/tracker-client/src/console/clients/checker/checks/health.rs @@ -0,0 +1,77 @@ +use std::sync::Arc; +use std::time::Duration; + +use anyhow::Result; +use hyper::StatusCode; +use reqwest::{Client as HttpClient, Response}; +use serde::Serialize; +use thiserror::Error; +use url::Url; + +#[derive(Debug, Clone, Error, Serialize)] +#[serde(into = "String")] +pub enum Error { + #[error("Failed to Build a Http Client: {err:?}")] + ClientBuildingError { err: Arc }, + #[error("Heath check failed to get a response: {err:?}")] + ResponseError { err: Arc }, + #[error("Http check returned a non-success code: \"{code}\" with the response: \"{response:?}\"")] + UnsuccessfulResponse { code: StatusCode, response: Arc }, +} + +impl From for String { + fn from(value: Error) -> Self { + value.to_string() + } +} + +#[derive(Debug, Clone, Serialize)] +pub struct Checks { + url: Url, + result: Result, +} + +pub async fn run(health_checks: Vec, timeout: Duration) -> Vec> { + let mut results = Vec::default(); + + tracing::debug!("Health checks ..."); + + for url in health_checks { + let result = match run_health_check(url.clone(), timeout).await { + Ok(response) => Ok(response.status().to_string()), + Err(err) => Err(err), + }; + + let check = Checks { url, result }; + + if check.result.is_err() { + results.push(Err(check)); + } else { + results.push(Ok(check)); + } + } + + results +} + +async fn run_health_check(url: Url, timeout: Duration) -> Result { + let client = HttpClient::builder() + .timeout(timeout) + .build() + .map_err(|e| Error::ClientBuildingError { err: e.into() })?; + + let response = client + .get(url.clone()) + .send() + .await + .map_err(|e| Error::ResponseError { err: e.into() })?; + + if response.status().is_success() { + Ok(response) + } else { + Err(Error::UnsuccessfulResponse { + code: response.status(), + response: response.into(), + }) + } +} diff --git a/packages/tracker-client/src/console/clients/checker/checks/http.rs b/packages/tracker-client/src/console/clients/checker/checks/http.rs new file mode 100644 index 000000000..48ce9678d --- /dev/null +++ b/packages/tracker-client/src/console/clients/checker/checks/http.rs @@ -0,0 +1,104 @@ +use std::str::FromStr as _; +use std::time::Duration; + +use bittorrent_primitives::info_hash::InfoHash; +use serde::Serialize; +use url::Url; + +use crate::console::clients::http::Error; +use crate::http::client::responses::announce::Announce; +use crate::http::client::responses::scrape; +use crate::http::client::{requests, Client}; + +#[derive(Debug, Clone, Serialize)] +pub struct Checks { + url: Url, + results: Vec<(Check, Result<(), Error>)>, +} + +#[derive(Debug, Clone, Serialize)] +pub enum Check { + Announce, + Scrape, +} + +pub async fn run(http_trackers: Vec, timeout: Duration) -> Vec> { + let mut results = Vec::default(); + + tracing::debug!("HTTP trackers ..."); + + for ref url in http_trackers { + let mut base_url = url.clone(); + base_url.set_path(""); + + let mut checks = Checks { + url: url.clone(), + results: Vec::default(), + }; + + // Announce + { + let check = check_http_announce(&base_url, timeout).await.map(|_| ()); + + checks.results.push((Check::Announce, check)); + } + + // Scrape + { + let check = check_http_scrape(&base_url, timeout).await.map(|_| ()); + + checks.results.push((Check::Scrape, check)); + } + + if checks.results.iter().any(|f| f.1.is_err()) { + results.push(Err(checks)); + } else { + results.push(Ok(checks)); + } + } + + results +} + +async fn check_http_announce(url: &Url, timeout: Duration) -> Result { + let info_hash_str = "9c38422213e30bff212b30c360d26f9a02136422".to_string(); // # DevSkim: ignore DS173237 + let info_hash = InfoHash::from_str(&info_hash_str).expect("a valid info-hash is required"); + + let client = Client::new(url.clone(), timeout).map_err(|err| Error::HttpClientError { err })?; + + let response = client + .announce( + &requests::announce::QueryBuilder::with_default_values() + .with_info_hash(&info_hash) + .query(), + ) + .await + .map_err(|err| Error::HttpClientError { err })?; + + let response = response.bytes().await.map_err(|e| Error::ResponseError { err: e.into() })?; + + let response = serde_bencode::from_bytes::(&response).map_err(|e| Error::ParseBencodeError { + data: response, + err: e.into(), + })?; + + Ok(response) +} + +async fn check_http_scrape(url: &Url, timeout: Duration) -> Result { + let info_hashes: Vec = vec!["9c38422213e30bff212b30c360d26f9a02136422".to_string()]; // # DevSkim: ignore DS173237 + let query = requests::scrape::Query::try_from(info_hashes).expect("a valid array of info-hashes is required"); + + let client = Client::new(url.clone(), timeout).map_err(|err| Error::HttpClientError { err })?; + + let response = client.scrape(&query).await.map_err(|err| Error::HttpClientError { err })?; + + let response = response.bytes().await.map_err(|e| Error::ResponseError { err: e.into() })?; + + let response = scrape::Response::try_from_bencoded(&response).map_err(|e| Error::BencodeParseError { + data: response, + err: e.into(), + })?; + + Ok(response) +} diff --git a/packages/tracker-client/src/console/clients/checker/checks/mod.rs b/packages/tracker-client/src/console/clients/checker/checks/mod.rs new file mode 100644 index 000000000..f8b03f749 --- /dev/null +++ b/packages/tracker-client/src/console/clients/checker/checks/mod.rs @@ -0,0 +1,4 @@ +pub mod health; +pub mod http; +pub mod structs; +pub mod udp; diff --git a/packages/tracker-client/src/console/clients/checker/checks/structs.rs b/packages/tracker-client/src/console/clients/checker/checks/structs.rs new file mode 100644 index 000000000..d28e20c04 --- /dev/null +++ b/packages/tracker-client/src/console/clients/checker/checks/structs.rs @@ -0,0 +1,12 @@ +use serde::{Deserialize, Serialize}; + +#[derive(Serialize, Deserialize)] +pub struct Status { + pub code: String, + pub message: String, +} +#[derive(Serialize, Deserialize)] +pub struct CheckerOutput { + pub url: String, + pub status: Status, +} diff --git a/packages/tracker-client/src/console/clients/checker/checks/udp.rs b/packages/tracker-client/src/console/clients/checker/checks/udp.rs new file mode 100644 index 000000000..21bdcd1b7 --- /dev/null +++ b/packages/tracker-client/src/console/clients/checker/checks/udp.rs @@ -0,0 +1,134 @@ +use std::net::SocketAddr; +use std::time::Duration; + +use aquatic_udp_protocol::TransactionId; +use hex_literal::hex; +use serde::Serialize; +use url::Url; + +use crate::console::clients::udp::checker::Client; +use crate::console::clients::udp::Error; + +#[derive(Debug, Clone, Serialize)] +pub struct Checks { + remote_addr: SocketAddr, + results: Vec<(Check, Result<(), Error>)>, +} + +#[derive(Debug, Clone, Serialize)] +pub enum Check { + Setup, + Connect, + Announce, + Scrape, +} + +#[allow(clippy::missing_panics_doc)] +pub async fn run(udp_trackers: Vec, timeout: Duration) -> Vec> { + let mut results = Vec::default(); + + tracing::debug!("UDP trackers ..."); + + let info_hash = aquatic_udp_protocol::InfoHash(hex!("9c38422213e30bff212b30c360d26f9a02136422")); // # DevSkim: ignore DS173237 + + for remote_url in udp_trackers { + let remote_addr = resolve_socket_addr(&remote_url); + + let mut checks = Checks { + remote_addr, + results: Vec::default(), + }; + + tracing::debug!("UDP tracker: {:?}", remote_url); + + // Setup + let client = match Client::new(remote_addr, timeout).await { + Ok(client) => { + checks.results.push((Check::Setup, Ok(()))); + client + } + Err(err) => { + checks.results.push((Check::Setup, Err(err))); + results.push(Err(checks)); + continue; + } + }; + + let transaction_id = TransactionId::new(1); + + // Connect Remote + let connection_id = match client.send_connection_request(transaction_id).await { + Ok(connection_id) => { + checks.results.push((Check::Connect, Ok(()))); + connection_id + } + Err(err) => { + checks.results.push((Check::Connect, Err(err))); + results.push(Err(checks)); + continue; + } + }; + + // Announce + { + let check = client + .send_announce_request(transaction_id, connection_id, info_hash.into()) + .await + .map(|_| ()); + + checks.results.push((Check::Announce, check)); + } + + // Scrape + { + let check = client + .send_scrape_request(connection_id, transaction_id, &[info_hash.into()]) + .await + .map(|_| ()); + + checks.results.push((Check::Scrape, check)); + } + + if checks.results.iter().any(|f| f.1.is_err()) { + results.push(Err(checks)); + } else { + results.push(Ok(checks)); + } + } + + results +} + +fn resolve_socket_addr(url: &Url) -> SocketAddr { + let socket_addr = url.socket_addrs(|| None).unwrap(); + *socket_addr.first().unwrap() +} + +#[cfg(test)] +mod tests { + use std::net::{IpAddr, Ipv4Addr, Ipv6Addr, SocketAddr}; + + use url::Url; + + use crate::console::clients::checker::checks::udp::resolve_socket_addr; + + #[test] + fn it_should_resolve_the_socket_address_for_udp_scheme_urls_containing_a_domain() { + let socket_addr = resolve_socket_addr(&Url::parse("udp://localhost:8080").unwrap()); + + assert!( + socket_addr == SocketAddr::new(IpAddr::V4(Ipv4Addr::new(127, 0, 0, 1)), 8080) + || socket_addr == SocketAddr::new(IpAddr::V6(Ipv6Addr::new(0, 0, 0, 0, 0, 0, 0, 1)), 8080) + ); + } + + #[test] + fn it_should_resolve_the_socket_address_for_udp_scheme_urls_containing_an_ip() { + let socket_addr = resolve_socket_addr(&Url::parse("udp://localhost:8080").unwrap()); + + assert!( + socket_addr == SocketAddr::new(IpAddr::V4(Ipv4Addr::new(127, 0, 0, 1)), 8080) + || socket_addr == SocketAddr::new(IpAddr::V6(Ipv6Addr::new(0, 0, 0, 0, 0, 0, 0, 1)), 8080) + ); + } +} diff --git a/packages/tracker-client/src/console/clients/checker/config.rs b/packages/tracker-client/src/console/clients/checker/config.rs new file mode 100644 index 000000000..154dcae85 --- /dev/null +++ b/packages/tracker-client/src/console/clients/checker/config.rs @@ -0,0 +1,282 @@ +use std::error::Error; +use std::fmt; + +use reqwest::Url as ServiceUrl; +use serde::Deserialize; + +/// It parses the configuration from a JSON format. +/// +/// # Errors +/// +/// Will return an error if the configuration is not valid. +/// +/// # Panics +/// +/// Will panic if unable to read the configuration file. +pub fn parse_from_json(json: &str) -> Result { + let plain_config: PlainConfiguration = serde_json::from_str(json).map_err(ConfigurationError::JsonParseError)?; + Configuration::try_from(plain_config) +} + +/// DTO for the configuration to serialize/deserialize configuration. +/// +/// Configuration does not need to be valid. +#[derive(Deserialize)] +struct PlainConfiguration { + pub udp_trackers: Vec, + pub http_trackers: Vec, + pub health_checks: Vec, +} + +/// Validated configuration +pub struct Configuration { + pub udp_trackers: Vec, + pub http_trackers: Vec, + pub health_checks: Vec, +} + +#[derive(Debug)] +pub enum ConfigurationError { + JsonParseError(serde_json::Error), + InvalidUdpAddress(std::net::AddrParseError), + InvalidUrl(url::ParseError), +} + +impl Error for ConfigurationError {} + +impl fmt::Display for ConfigurationError { + fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result { + match self { + ConfigurationError::JsonParseError(e) => write!(f, "JSON parse error: {e}"), + ConfigurationError::InvalidUdpAddress(e) => write!(f, "Invalid UDP address: {e}"), + ConfigurationError::InvalidUrl(e) => write!(f, "Invalid URL: {e}"), + } + } +} + +impl TryFrom for Configuration { + type Error = ConfigurationError; + + fn try_from(plain_config: PlainConfiguration) -> Result { + let udp_trackers = plain_config + .udp_trackers + .into_iter() + .map(|s| if s.starts_with("udp://") { s } else { format!("udp://{s}") }) + .map(|s| s.parse::().map_err(ConfigurationError::InvalidUrl)) + .collect::, _>>()?; + + let http_trackers = plain_config + .http_trackers + .into_iter() + .map(|s| s.parse::().map_err(ConfigurationError::InvalidUrl)) + .collect::, _>>()?; + + let health_checks = plain_config + .health_checks + .into_iter() + .map(|s| s.parse::().map_err(ConfigurationError::InvalidUrl)) + .collect::, _>>()?; + + Ok(Configuration { + udp_trackers, + http_trackers, + health_checks, + }) + } +} + +#[cfg(test)] +mod tests { + use super::*; + + #[test] + fn configuration_should_be_build_from_plain_serializable_configuration() { + let dto = PlainConfiguration { + udp_trackers: vec!["udp://127.0.0.1:8080".to_string()], + http_trackers: vec!["http://127.0.0.1:8080".to_string()], + health_checks: vec!["http://127.0.0.1:8080/health".to_string()], + }; + + let config = Configuration::try_from(dto).expect("A valid configuration"); + + assert_eq!(config.udp_trackers, vec![ServiceUrl::parse("udp://127.0.0.1:8080").unwrap()]); + + assert_eq!( + config.http_trackers, + vec![ServiceUrl::parse("http://127.0.0.1:8080").unwrap()] + ); + + assert_eq!( + config.health_checks, + vec![ServiceUrl::parse("http://127.0.0.1:8080/health").unwrap()] + ); + } + + mod building_configuration_from_plain_configuration_for { + + mod udp_trackers { + use crate::console::clients::checker::config::{Configuration, PlainConfiguration, ServiceUrl}; + + /* The plain configuration should allow UDP URLs with: + + - IP or domain. + - With or without scheme. + - With or without `announce` suffix. + - With or without `/` at the end of the authority section (with empty path). + + For example: + + 127.0.0.1:6969 + 127.0.0.1:6969/ + 127.0.0.1:6969/announce + + localhost:6969 + localhost:6969/ + localhost:6969/announce + + udp://127.0.0.1:6969 + udp://127.0.0.1:6969/ + udp://127.0.0.1:6969/announce + + udp://localhost:6969 + udp://localhost:6969/ + udp://localhost:6969/announce + + */ + + #[test] + fn it_should_fail_when_a_tracker_udp_url_is_invalid() { + let plain_config = PlainConfiguration { + udp_trackers: vec!["invalid URL".to_string()], + http_trackers: vec![], + health_checks: vec![], + }; + + assert!(Configuration::try_from(plain_config).is_err()); + } + + #[test] + fn it_should_add_the_udp_scheme_to_the_udp_url_when_it_is_missing() { + let plain_config = PlainConfiguration { + udp_trackers: vec!["127.0.0.1:6969".to_string()], + http_trackers: vec![], + health_checks: vec![], + }; + + let config = Configuration::try_from(plain_config).expect("Invalid plain configuration"); + + assert_eq!(config.udp_trackers[0], "udp://127.0.0.1:6969".parse::().unwrap()); + } + + #[test] + fn it_should_allow_using_domains() { + let plain_config = PlainConfiguration { + udp_trackers: vec!["udp://localhost:6969".to_string()], + http_trackers: vec![], + health_checks: vec![], + }; + + let config = Configuration::try_from(plain_config).expect("Invalid plain configuration"); + + assert_eq!(config.udp_trackers[0], "udp://localhost:6969".parse::().unwrap()); + } + + #[test] + fn it_should_allow_the_url_to_have_an_empty_path() { + let plain_config = PlainConfiguration { + udp_trackers: vec!["127.0.0.1:6969/".to_string()], + http_trackers: vec![], + health_checks: vec![], + }; + + let config = Configuration::try_from(plain_config).expect("Invalid plain configuration"); + + assert_eq!(config.udp_trackers[0], "udp://127.0.0.1:6969/".parse::().unwrap()); + } + + #[test] + fn it_should_allow_the_url_to_contain_a_path() { + // This is the common format for UDP tracker URLs: + // udp://domain.com:6969/announce + + let plain_config = PlainConfiguration { + udp_trackers: vec!["127.0.0.1:6969/announce".to_string()], + http_trackers: vec![], + health_checks: vec![], + }; + + let config = Configuration::try_from(plain_config).expect("Invalid plain configuration"); + + assert_eq!( + config.udp_trackers[0], + "udp://127.0.0.1:6969/announce".parse::().unwrap() + ); + } + } + + mod http_trackers { + use crate::console::clients::checker::config::{Configuration, PlainConfiguration, ServiceUrl}; + + #[test] + fn it_should_fail_when_a_tracker_http_url_is_invalid() { + let plain_config = PlainConfiguration { + udp_trackers: vec![], + http_trackers: vec!["invalid URL".to_string()], + health_checks: vec![], + }; + + assert!(Configuration::try_from(plain_config).is_err()); + } + + #[test] + fn it_should_allow_the_url_to_contain_a_path() { + // This is the common format for HTTP tracker URLs: + // http://domain.com:7070/announce + + let plain_config = PlainConfiguration { + udp_trackers: vec![], + http_trackers: vec!["http://127.0.0.1:7070/announce".to_string()], + health_checks: vec![], + }; + + let config = Configuration::try_from(plain_config).expect("Invalid plain configuration"); + + assert_eq!( + config.http_trackers[0], + "http://127.0.0.1:7070/announce".parse::().unwrap() + ); + } + + #[test] + fn it_should_allow_the_url_to_contain_an_empty_path() { + let plain_config = PlainConfiguration { + udp_trackers: vec![], + http_trackers: vec!["http://127.0.0.1:7070/".to_string()], + health_checks: vec![], + }; + + let config = Configuration::try_from(plain_config).expect("Invalid plain configuration"); + + assert_eq!( + config.http_trackers[0], + "http://127.0.0.1:7070/".parse::().unwrap() + ); + } + } + + mod health_checks { + use crate::console::clients::checker::config::{Configuration, PlainConfiguration}; + + #[test] + fn it_should_fail_when_a_health_check_http_url_is_invalid() { + let plain_config = PlainConfiguration { + udp_trackers: vec![], + http_trackers: vec![], + health_checks: vec!["invalid URL".to_string()], + }; + + assert!(Configuration::try_from(plain_config).is_err()); + } + } + } +} diff --git a/packages/tracker-client/src/console/clients/checker/console.rs b/packages/tracker-client/src/console/clients/checker/console.rs new file mode 100644 index 000000000..b55c559fc --- /dev/null +++ b/packages/tracker-client/src/console/clients/checker/console.rs @@ -0,0 +1,38 @@ +use super::printer::{Printer, CLEAR_SCREEN}; + +pub struct Console {} + +impl Default for Console { + fn default() -> Self { + Self::new() + } +} + +impl Console { + #[must_use] + pub fn new() -> Self { + Self {} + } +} + +impl Printer for Console { + fn clear(&self) { + self.print(CLEAR_SCREEN); + } + + fn print(&self, output: &str) { + print!("{}", &output); + } + + fn eprint(&self, output: &str) { + eprint!("{}", &output); + } + + fn println(&self, output: &str) { + println!("{}", &output); + } + + fn eprintln(&self, output: &str) { + eprintln!("{}", &output); + } +} diff --git a/packages/tracker-client/src/console/clients/checker/logger.rs b/packages/tracker-client/src/console/clients/checker/logger.rs new file mode 100644 index 000000000..50e97189f --- /dev/null +++ b/packages/tracker-client/src/console/clients/checker/logger.rs @@ -0,0 +1,72 @@ +use std::cell::RefCell; + +use super::printer::{Printer, CLEAR_SCREEN}; + +pub struct Logger { + output: RefCell, +} + +impl Default for Logger { + fn default() -> Self { + Self::new() + } +} + +impl Logger { + #[must_use] + pub fn new() -> Self { + Self { + output: RefCell::new(String::new()), + } + } + + pub fn log(&self) -> String { + self.output.borrow().clone() + } +} + +impl Printer for Logger { + fn clear(&self) { + self.print(CLEAR_SCREEN); + } + + fn print(&self, output: &str) { + *self.output.borrow_mut() = format!("{}{}", self.output.borrow(), &output); + } + + fn eprint(&self, output: &str) { + *self.output.borrow_mut() = format!("{}{}", self.output.borrow(), &output); + } + + fn println(&self, output: &str) { + self.print(&format!("{}/n", &output)); + } + + fn eprintln(&self, output: &str) { + self.eprint(&format!("{}/n", &output)); + } +} + +#[cfg(test)] +mod tests { + use crate::console::clients::checker::logger::Logger; + use crate::console::clients::checker::printer::{Printer, CLEAR_SCREEN}; + + #[test] + fn should_capture_the_clear_screen_command() { + let console_logger = Logger::new(); + + console_logger.clear(); + + assert_eq!(CLEAR_SCREEN, console_logger.log()); + } + + #[test] + fn should_capture_the_print_command_output() { + let console_logger = Logger::new(); + + console_logger.print("OUTPUT"); + + assert_eq!("OUTPUT", console_logger.log()); + } +} diff --git a/packages/tracker-client/src/console/clients/checker/mod.rs b/packages/tracker-client/src/console/clients/checker/mod.rs new file mode 100644 index 000000000..d26a4a686 --- /dev/null +++ b/packages/tracker-client/src/console/clients/checker/mod.rs @@ -0,0 +1,7 @@ +pub mod app; +pub mod checks; +pub mod config; +pub mod console; +pub mod logger; +pub mod printer; +pub mod service; diff --git a/packages/tracker-client/src/console/clients/checker/printer.rs b/packages/tracker-client/src/console/clients/checker/printer.rs new file mode 100644 index 000000000..d590dfedb --- /dev/null +++ b/packages/tracker-client/src/console/clients/checker/printer.rs @@ -0,0 +1,9 @@ +pub const CLEAR_SCREEN: &str = "\x1B[2J\x1B[1;1H"; + +pub trait Printer { + fn clear(&self); + fn print(&self, output: &str); + fn eprint(&self, output: &str); + fn println(&self, output: &str); + fn eprintln(&self, output: &str); +} diff --git a/packages/tracker-client/src/console/clients/checker/service.rs b/packages/tracker-client/src/console/clients/checker/service.rs new file mode 100644 index 000000000..acd312d8c --- /dev/null +++ b/packages/tracker-client/src/console/clients/checker/service.rs @@ -0,0 +1,62 @@ +use std::sync::Arc; + +use futures::FutureExt as _; +use serde::Serialize; +use tokio::task::{JoinError, JoinSet}; +use torrust_tracker_configuration::DEFAULT_TIMEOUT; + +use super::checks::{health, http, udp}; +use super::config::Configuration; +use super::console::Console; +use crate::console::clients::checker::printer::Printer; + +pub struct Service { + pub(crate) config: Arc, + pub(crate) console: Console, +} + +#[derive(Debug, Clone, Serialize)] +pub enum CheckResult { + Udp(Result), + Http(Result), + Health(Result), +} + +impl Service { + /// # Errors + /// + /// It will return an error if some of the tests panic or otherwise fail to run. + /// On success it will return a vector of `Ok(())` of [`CheckResult`]. + /// + /// # Panics + /// + /// It would panic if `serde_json` produces invalid json for the `to_string_pretty` function. + pub async fn run_checks(self) -> Result, JoinError> { + tracing::info!("Running checks for trackers ..."); + + let mut check_results = Vec::default(); + + let mut checks = JoinSet::new(); + checks.spawn( + udp::run(self.config.udp_trackers.clone(), DEFAULT_TIMEOUT).map(|mut f| f.drain(..).map(CheckResult::Udp).collect()), + ); + checks.spawn( + http::run(self.config.http_trackers.clone(), DEFAULT_TIMEOUT) + .map(|mut f| f.drain(..).map(CheckResult::Http).collect()), + ); + checks.spawn( + health::run(self.config.health_checks.clone(), DEFAULT_TIMEOUT) + .map(|mut f| f.drain(..).map(CheckResult::Health).collect()), + ); + + while let Some(results) = checks.join_next().await { + check_results.append(&mut results?); + } + + let json_output = serde_json::json!(check_results); + self.console + .println(&serde_json::to_string_pretty(&json_output).expect("it should consume valid json")); + + Ok(check_results) + } +} diff --git a/packages/tracker-client/src/console/clients/http/app.rs b/packages/tracker-client/src/console/clients/http/app.rs new file mode 100644 index 000000000..8db6fe46d --- /dev/null +++ b/packages/tracker-client/src/console/clients/http/app.rs @@ -0,0 +1,102 @@ +//! HTTP Tracker client: +//! +//! Examples: +//! +//! `Announce` request: +//! +//! ```text +//! cargo run --bin http_tracker_client announce http://127.0.0.1:7070 9c38422213e30bff212b30c360d26f9a02136422 | jq +//! ``` +//! +//! `Scrape` request: +//! +//! ```text +//! cargo run --bin http_tracker_client scrape http://127.0.0.1:7070 9c38422213e30bff212b30c360d26f9a02136422 | jq +//! ``` +use std::str::FromStr; +use std::time::Duration; + +use anyhow::Context; +use bittorrent_primitives::info_hash::InfoHash; +use clap::{Parser, Subcommand}; +use reqwest::Url; +use torrust_tracker_configuration::DEFAULT_TIMEOUT; + +use crate::http::client::requests::announce::QueryBuilder; +use crate::http::client::responses::announce::Announce; +use crate::http::client::responses::scrape; +use crate::http::client::{requests, Client}; + +#[derive(Parser, Debug)] +#[command(author, version, about, long_about = None)] +struct Args { + #[command(subcommand)] + command: Command, +} + +#[derive(Subcommand, Debug)] +enum Command { + Announce { tracker_url: String, info_hash: String }, + Scrape { tracker_url: String, info_hashes: Vec }, +} + +/// # Errors +/// +/// Will return an error if the command fails. +pub async fn run() -> anyhow::Result<()> { + let args = Args::parse(); + + match args.command { + Command::Announce { tracker_url, info_hash } => { + announce_command(tracker_url, info_hash, DEFAULT_TIMEOUT).await?; + } + Command::Scrape { + tracker_url, + info_hashes, + } => { + scrape_command(&tracker_url, &info_hashes, DEFAULT_TIMEOUT).await?; + } + } + + Ok(()) +} + +async fn announce_command(tracker_url: String, info_hash: String, timeout: Duration) -> anyhow::Result<()> { + let base_url = Url::parse(&tracker_url).context("failed to parse HTTP tracker base URL")?; + let info_hash = + InfoHash::from_str(&info_hash).expect("Invalid infohash. Example infohash: `9c38422213e30bff212b30c360d26f9a02136422`"); + + let response = Client::new(base_url, timeout)? + .announce(&QueryBuilder::with_default_values().with_info_hash(&info_hash).query()) + .await?; + + let body = response.bytes().await?; + + let announce_response: Announce = serde_bencode::from_bytes(&body) + .unwrap_or_else(|_| panic!("response body should be a valid announce response, got: \"{:#?}\"", &body)); + + let json = serde_json::to_string(&announce_response).context("failed to serialize scrape response into JSON")?; + + println!("{json}"); + + Ok(()) +} + +async fn scrape_command(tracker_url: &str, info_hashes: &[String], timeout: Duration) -> anyhow::Result<()> { + let base_url = Url::parse(tracker_url).context("failed to parse HTTP tracker base URL")?; + + let query = requests::scrape::Query::try_from(info_hashes).context("failed to parse infohashes")?; + + let response = Client::new(base_url, timeout)?.scrape(&query).await?; + + let body = response.bytes().await?; + + let scrape_response = scrape::Response::try_from_bencoded(&body) + .unwrap_or_else(|_| panic!("response body should be a valid scrape response, got: \"{:#?}\"", &body)); + + let json = serde_json::to_string(&scrape_response).context("failed to serialize scrape response into JSON")?; + + println!("{json}"); + + Ok(()) +} diff --git a/packages/tracker-client/src/console/clients/http/mod.rs b/packages/tracker-client/src/console/clients/http/mod.rs new file mode 100644 index 000000000..e4b6fbe57 --- /dev/null +++ b/packages/tracker-client/src/console/clients/http/mod.rs @@ -0,0 +1,34 @@ +use std::sync::Arc; + +use serde::Serialize; +use thiserror::Error; + +use crate::http::client::responses::scrape::BencodeParseError; + +pub mod app; + +#[derive(Debug, Clone, Error, Serialize)] +#[serde(into = "String")] +pub enum Error { + #[error("Http request did not receive a response within the timeout: {err:?}")] + HttpClientError { err: crate::http::client::Error }, + #[error("Http failed to get a response at all: {err:?}")] + ResponseError { err: Arc }, + #[error("Failed to deserialize the bencoded response data with the error: \"{err:?}\"")] + ParseBencodeError { + data: hyper::body::Bytes, + err: Arc, + }, + + #[error("Failed to deserialize the bencoded response data with the error: \"{err:?}\"")] + BencodeParseError { + data: hyper::body::Bytes, + err: Arc, + }, +} + +impl From for String { + fn from(value: Error) -> Self { + value.to_string() + } +} diff --git a/packages/tracker-client/src/console/clients/mod.rs b/packages/tracker-client/src/console/clients/mod.rs new file mode 100644 index 000000000..8492f8ba5 --- /dev/null +++ b/packages/tracker-client/src/console/clients/mod.rs @@ -0,0 +1,4 @@ +//! Console clients. +pub mod checker; +pub mod http; +pub mod udp; diff --git a/packages/tracker-client/src/console/clients/udp/app.rs b/packages/tracker-client/src/console/clients/udp/app.rs new file mode 100644 index 000000000..a2736c365 --- /dev/null +++ b/packages/tracker-client/src/console/clients/udp/app.rs @@ -0,0 +1,208 @@ +//! UDP Tracker client: +//! +//! Examples: +//! +//! Announce request: +//! +//! ```text +//! cargo run --bin udp_tracker_client announce 127.0.0.1:6969 9c38422213e30bff212b30c360d26f9a02136422 | jq +//! ``` +//! +//! Announce response: +//! +//! ```json +//! { +//! "transaction_id": -888840697 +//! "announce_interval": 120, +//! "leechers": 0, +//! "seeders": 1, +//! "peers": [ +//! "123.123.123.123:51289" +//! ], +//! } +//! ``` +//! +//! Scrape request: +//! +//! ```text +//! cargo run --bin udp_tracker_client scrape 127.0.0.1:6969 9c38422213e30bff212b30c360d26f9a02136422 | jq +//! ``` +//! +//! Scrape response: +//! +//! ```json +//! { +//! "transaction_id": -888840697, +//! "torrent_stats": [ +//! { +//! "completed": 0, +//! "leechers": 0, +//! "seeders": 0 +//! }, +//! { +//! "completed": 0, +//! "leechers": 0, +//! "seeders": 0 +//! } +//! ] +//! } +//! ``` +//! +//! You can use an URL with instead of the socket address. For example: +//! +//! ```text +//! cargo run --bin udp_tracker_client scrape udp://localhost:6969 9c38422213e30bff212b30c360d26f9a02136422 | jq +//! cargo run --bin udp_tracker_client scrape udp://localhost:6969/scrape 9c38422213e30bff212b30c360d26f9a02136422 | jq +//! ``` +//! +//! The protocol (`udp://`) in the URL is mandatory. The path (`\scrape`) is optional. It always uses `\scrape`. +use std::net::{SocketAddr, ToSocketAddrs}; +use std::str::FromStr; + +use anyhow::Context; +use aquatic_udp_protocol::{Response, TransactionId}; +use bittorrent_primitives::info_hash::InfoHash as TorrustInfoHash; +use clap::{Parser, Subcommand}; +use torrust_tracker_configuration::DEFAULT_TIMEOUT; +use tracing::level_filters::LevelFilter; +use url::Url; + +use super::Error; +use crate::console::clients::udp::checker; +use crate::console::clients::udp::responses::dto::SerializableResponse; +use crate::console::clients::udp::responses::json::ToJson; + +const RANDOM_TRANSACTION_ID: i32 = -888_840_697; + +#[derive(Parser, Debug)] +#[command(author, version, about, long_about = None)] +struct Args { + #[command(subcommand)] + command: Command, +} + +#[derive(Subcommand, Debug)] +enum Command { + Announce { + #[arg(value_parser = parse_socket_addr)] + tracker_socket_addr: SocketAddr, + #[arg(value_parser = parse_info_hash)] + info_hash: TorrustInfoHash, + }, + Scrape { + #[arg(value_parser = parse_socket_addr)] + tracker_socket_addr: SocketAddr, + #[arg(value_parser = parse_info_hash, num_args = 1..=74, value_delimiter = ' ')] + info_hashes: Vec, + }, +} + +/// # Errors +/// +/// Will return an error if the command fails. +/// +/// +pub async fn run() -> anyhow::Result<()> { + tracing_stdout_init(LevelFilter::INFO); + + let args = Args::parse(); + + let response = match args.command { + Command::Announce { + tracker_socket_addr: remote_addr, + info_hash, + } => handle_announce(remote_addr, &info_hash).await?, + Command::Scrape { + tracker_socket_addr: remote_addr, + info_hashes, + } => handle_scrape(remote_addr, &info_hashes).await?, + }; + + let response: SerializableResponse = response.into(); + let response_json = response.to_json_string()?; + + print!("{response_json}"); + + Ok(()) +} + +fn tracing_stdout_init(filter: LevelFilter) { + tracing_subscriber::fmt().with_max_level(filter).init(); + tracing::debug!("Logging initialized"); +} + +async fn handle_announce(remote_addr: SocketAddr, info_hash: &TorrustInfoHash) -> Result { + let transaction_id = TransactionId::new(RANDOM_TRANSACTION_ID); + + let client = checker::Client::new(remote_addr, DEFAULT_TIMEOUT).await?; + + let connection_id = client.send_connection_request(transaction_id).await?; + + client.send_announce_request(transaction_id, connection_id, *info_hash).await +} + +async fn handle_scrape(remote_addr: SocketAddr, info_hashes: &[TorrustInfoHash]) -> Result { + let transaction_id = TransactionId::new(RANDOM_TRANSACTION_ID); + + let client = checker::Client::new(remote_addr, DEFAULT_TIMEOUT).await?; + + let connection_id = client.send_connection_request(transaction_id).await?; + + client.send_scrape_request(connection_id, transaction_id, info_hashes).await +} + +fn parse_socket_addr(tracker_socket_addr_str: &str) -> anyhow::Result { + tracing::debug!("Tracker socket address: {tracker_socket_addr_str:#?}"); + + // Check if the address is a valid URL. If so, extract the host and port. + let resolved_addr = if let Ok(url) = Url::parse(tracker_socket_addr_str) { + tracing::debug!("Tracker socket address URL: {url:?}"); + + let host = url + .host_str() + .with_context(|| format!("invalid host in URL: `{tracker_socket_addr_str}`"))? + .to_owned(); + + let port = url + .port() + .with_context(|| format!("port not found in URL: `{tracker_socket_addr_str}`"))? + .to_owned(); + + (host, port) + } else { + // If not a URL, assume it's a host:port pair. + + let parts: Vec<&str> = tracker_socket_addr_str.split(':').collect(); + + if parts.len() != 2 { + return Err(anyhow::anyhow!( + "invalid address format: `{}`. Expected format is host:port", + tracker_socket_addr_str + )); + } + + let host = parts[0].to_owned(); + + let port = parts[1] + .parse::() + .with_context(|| format!("invalid port: `{}`", parts[1]))? + .to_owned(); + + (host, port) + }; + + tracing::debug!("Resolved address: {resolved_addr:#?}"); + + // Perform DNS resolution. + let socket_addrs: Vec<_> = resolved_addr.to_socket_addrs()?.collect(); + if socket_addrs.is_empty() { + Err(anyhow::anyhow!("DNS resolution failed for `{}`", tracker_socket_addr_str)) + } else { + Ok(socket_addrs[0]) + } +} + +fn parse_info_hash(info_hash_str: &str) -> anyhow::Result { + TorrustInfoHash::from_str(info_hash_str) + .map_err(|e| anyhow::Error::msg(format!("failed to parse info-hash `{info_hash_str}`: {e:?}"))) +} diff --git a/packages/tracker-client/src/console/clients/udp/checker.rs b/packages/tracker-client/src/console/clients/udp/checker.rs new file mode 100644 index 000000000..b9fd3a729 --- /dev/null +++ b/packages/tracker-client/src/console/clients/udp/checker.rs @@ -0,0 +1,177 @@ +use std::net::{Ipv4Addr, SocketAddr}; +use std::num::NonZeroU16; +use std::time::Duration; + +use aquatic_udp_protocol::common::InfoHash; +use aquatic_udp_protocol::{ + AnnounceActionPlaceholder, AnnounceEvent, AnnounceRequest, ConnectRequest, ConnectionId, NumberOfBytes, NumberOfPeers, + PeerId, PeerKey, Port, Response, ScrapeRequest, TransactionId, +}; +use bittorrent_primitives::info_hash::InfoHash as TorrustInfoHash; + +use super::Error; +use crate::udp::client::UdpTrackerClient; + +/// A UDP Tracker client to make test requests (checks). +#[derive(Debug)] +pub struct Client { + client: UdpTrackerClient, +} + +impl Client { + /// Creates a new `[Client]` for checking a UDP Tracker Service + /// + /// # Errors + /// + /// It will error if unable to bind and connect to the udp remote address. + /// + pub async fn new(remote_addr: SocketAddr, timeout: Duration) -> Result { + let client = UdpTrackerClient::new(remote_addr, timeout) + .await + .map_err(|err| Error::UnableToBindAndConnect { remote_addr, err })?; + + Ok(Self { client }) + } + + /// Returns the local addr of this [`Client`]. + /// + /// # Errors + /// + /// This function will return an error if the socket is somehow not bound. + pub fn local_addr(&self) -> std::io::Result { + self.client.client.socket.local_addr() + } + + /// Sends a connection request to the UDP Tracker server. + /// + /// # Errors + /// + /// Will return and error if + /// + /// - It can't connect to the remote UDP socket. + /// - It can't make a connection request successfully to the remote UDP + /// server (after successfully connecting to the remote UDP socket). + /// + /// # Panics + /// + /// Will panic if it receives an unexpected response. + pub async fn send_connection_request(&self, transaction_id: TransactionId) -> Result { + tracing::debug!("Sending connection request with transaction id: {transaction_id:#?}"); + + let connect_request = ConnectRequest { transaction_id }; + + let _ = self + .client + .send(connect_request.into()) + .await + .map_err(|err| Error::UnableToSendConnectionRequest { err })?; + + let response = self + .client + .receive() + .await + .map_err(|err| Error::UnableToReceiveConnectResponse { err })?; + + match response { + Response::Connect(connect_response) => Ok(connect_response.connection_id), + _ => Err(Error::UnexpectedConnectionResponse { response }), + } + } + + /// Sends an announce request to the UDP Tracker server. + /// + /// # Errors + /// + /// Will return and error if the client is not connected. You have to connect + /// before calling this function. + /// + /// # Panics + /// + /// It will panic if the `local_address` has a zero port. + pub async fn send_announce_request( + &self, + transaction_id: TransactionId, + connection_id: ConnectionId, + info_hash: TorrustInfoHash, + ) -> Result { + tracing::debug!("Sending announce request with transaction id: {transaction_id:#?}"); + + let port = NonZeroU16::new( + self.client + .client + .socket + .local_addr() + .expect("it should get the local address") + .port(), + ) + .expect("it should no be zero"); + + let announce_request = AnnounceRequest { + connection_id, + action_placeholder: AnnounceActionPlaceholder::default(), + transaction_id, + info_hash: InfoHash(info_hash.bytes()), + peer_id: PeerId(*b"-qB00000000000000001"), + bytes_downloaded: NumberOfBytes(0i64.into()), + bytes_uploaded: NumberOfBytes(0i64.into()), + bytes_left: NumberOfBytes(0i64.into()), + event: AnnounceEvent::Started.into(), + ip_address: Ipv4Addr::new(0, 0, 0, 0).into(), + key: PeerKey::new(0i32), + peers_wanted: NumberOfPeers(1i32.into()), + port: Port::new(port), + }; + + let _ = self + .client + .send(announce_request.into()) + .await + .map_err(|err| Error::UnableToSendAnnounceRequest { err })?; + + let response = self + .client + .receive() + .await + .map_err(|err| Error::UnableToReceiveAnnounceResponse { err })?; + + Ok(response) + } + + /// Sends a scrape request to the UDP Tracker server. + /// + /// # Errors + /// + /// Will return and error if the client is not connected. You have to connect + /// before calling this function. + pub async fn send_scrape_request( + &self, + connection_id: ConnectionId, + transaction_id: TransactionId, + info_hashes: &[TorrustInfoHash], + ) -> Result { + tracing::debug!("Sending scrape request with transaction id: {transaction_id:#?}"); + + let scrape_request = ScrapeRequest { + connection_id, + transaction_id, + info_hashes: info_hashes + .iter() + .map(|torrust_info_hash| InfoHash(torrust_info_hash.bytes())) + .collect(), + }; + + let _ = self + .client + .send(scrape_request.into()) + .await + .map_err(|err| Error::UnableToSendScrapeRequest { err })?; + + let response = self + .client + .receive() + .await + .map_err(|err| Error::UnableToReceiveScrapeResponse { err })?; + + Ok(response) + } +} diff --git a/packages/tracker-client/src/console/clients/udp/mod.rs b/packages/tracker-client/src/console/clients/udp/mod.rs new file mode 100644 index 000000000..ae6271a78 --- /dev/null +++ b/packages/tracker-client/src/console/clients/udp/mod.rs @@ -0,0 +1,51 @@ +use std::net::SocketAddr; + +use aquatic_udp_protocol::Response; +use serde::Serialize; +use thiserror::Error; + +use crate::udp; + +pub mod app; +pub mod checker; +pub mod responses; + +#[derive(Error, Debug, Clone, Serialize)] +#[serde(into = "String")] +pub enum Error { + #[error("Failed to Connect to: {remote_addr}, with error: {err}")] + UnableToBindAndConnect { remote_addr: SocketAddr, err: udp::Error }, + + #[error("Failed to send a connection request, with error: {err}")] + UnableToSendConnectionRequest { err: udp::Error }, + + #[error("Failed to receive a connect response, with error: {err}")] + UnableToReceiveConnectResponse { err: udp::Error }, + + #[error("Failed to send a announce request, with error: {err}")] + UnableToSendAnnounceRequest { err: udp::Error }, + + #[error("Failed to receive a announce response, with error: {err}")] + UnableToReceiveAnnounceResponse { err: udp::Error }, + + #[error("Failed to send a scrape request, with error: {err}")] + UnableToSendScrapeRequest { err: udp::Error }, + + #[error("Failed to receive a scrape response, with error: {err}")] + UnableToReceiveScrapeResponse { err: udp::Error }, + + #[error("Failed to receive a response, with error: {err}")] + UnableToReceiveResponse { err: udp::Error }, + + #[error("Failed to get local address for connection: {err}")] + UnableToGetLocalAddr { err: udp::Error }, + + #[error("Failed to get a connection response: {response:?}")] + UnexpectedConnectionResponse { response: Response }, +} + +impl From for String { + fn from(value: Error) -> Self { + value.to_string() + } +} diff --git a/packages/tracker-client/src/console/clients/udp/responses/dto.rs b/packages/tracker-client/src/console/clients/udp/responses/dto.rs new file mode 100644 index 000000000..93320b0f7 --- /dev/null +++ b/packages/tracker-client/src/console/clients/udp/responses/dto.rs @@ -0,0 +1,128 @@ +//! Aquatic responses are not serializable. These are the serializable wrappers. +use std::net::{Ipv4Addr, Ipv6Addr}; + +use aquatic_udp_protocol::Response::{self}; +use aquatic_udp_protocol::{AnnounceResponse, ConnectResponse, ErrorResponse, Ipv4AddrBytes, Ipv6AddrBytes, ScrapeResponse}; +use serde::Serialize; + +#[derive(Serialize)] +pub enum SerializableResponse { + Connect(ConnectSerializableResponse), + AnnounceIpv4(AnnounceSerializableResponse), + AnnounceIpv6(AnnounceSerializableResponse), + Scrape(ScrapeSerializableResponse), + Error(ErrorSerializableResponse), +} + +impl From for SerializableResponse { + fn from(response: Response) -> Self { + match response { + Response::Connect(response) => SerializableResponse::Connect(ConnectSerializableResponse::from(response)), + Response::AnnounceIpv4(response) => SerializableResponse::AnnounceIpv4(AnnounceSerializableResponse::from(response)), + Response::AnnounceIpv6(response) => SerializableResponse::AnnounceIpv6(AnnounceSerializableResponse::from(response)), + Response::Scrape(response) => SerializableResponse::Scrape(ScrapeSerializableResponse::from(response)), + Response::Error(response) => SerializableResponse::Error(ErrorSerializableResponse::from(response)), + } + } +} + +#[derive(Serialize)] +pub struct ConnectSerializableResponse { + transaction_id: i32, + connection_id: i64, +} + +impl From for ConnectSerializableResponse { + fn from(connect: ConnectResponse) -> Self { + Self { + transaction_id: connect.transaction_id.0.into(), + connection_id: connect.connection_id.0.into(), + } + } +} + +#[derive(Serialize)] +pub struct AnnounceSerializableResponse { + transaction_id: i32, + announce_interval: i32, + leechers: i32, + seeders: i32, + peers: Vec, +} + +impl From> for AnnounceSerializableResponse { + fn from(announce: AnnounceResponse) -> Self { + Self { + transaction_id: announce.fixed.transaction_id.0.into(), + announce_interval: announce.fixed.announce_interval.0.into(), + leechers: announce.fixed.leechers.0.into(), + seeders: announce.fixed.seeders.0.into(), + peers: announce + .peers + .iter() + .map(|peer| format!("{}:{}", Ipv4Addr::from(peer.ip_address), peer.port.0)) + .collect::>(), + } + } +} + +impl From> for AnnounceSerializableResponse { + fn from(announce: AnnounceResponse) -> Self { + Self { + transaction_id: announce.fixed.transaction_id.0.into(), + announce_interval: announce.fixed.announce_interval.0.into(), + leechers: announce.fixed.leechers.0.into(), + seeders: announce.fixed.seeders.0.into(), + peers: announce + .peers + .iter() + .map(|peer| format!("{}:{}", Ipv6Addr::from(peer.ip_address), peer.port.0)) + .collect::>(), + } + } +} + +#[derive(Serialize)] +pub struct ScrapeSerializableResponse { + transaction_id: i32, + torrent_stats: Vec, +} + +impl From for ScrapeSerializableResponse { + fn from(scrape: ScrapeResponse) -> Self { + Self { + transaction_id: scrape.transaction_id.0.into(), + torrent_stats: scrape + .torrent_stats + .iter() + .map(|torrent_scrape_statistics| TorrentStats { + seeders: torrent_scrape_statistics.seeders.0.into(), + completed: torrent_scrape_statistics.completed.0.into(), + leechers: torrent_scrape_statistics.leechers.0.into(), + }) + .collect::>(), + } + } +} + +#[derive(Serialize)] +pub struct ErrorSerializableResponse { + transaction_id: i32, + message: String, +} + +impl From for ErrorSerializableResponse { + fn from(error: ErrorResponse) -> Self { + Self { + transaction_id: error.transaction_id.0.into(), + message: error.message.to_string(), + } + } +} + +#[derive(Serialize)] +struct TorrentStats { + seeders: i32, + completed: i32, + leechers: i32, +} diff --git a/packages/tracker-client/src/console/clients/udp/responses/json.rs b/packages/tracker-client/src/console/clients/udp/responses/json.rs new file mode 100644 index 000000000..5d2bd6b89 --- /dev/null +++ b/packages/tracker-client/src/console/clients/udp/responses/json.rs @@ -0,0 +1,25 @@ +use anyhow::Context; +use serde::Serialize; + +use super::dto::SerializableResponse; + +#[allow(clippy::module_name_repetitions)] +pub trait ToJson { + /// + /// Returns a string with the JSON serialized version of the response + /// + /// # Errors + /// + /// Will return an error if serialization fails. + /// + fn to_json_string(&self) -> anyhow::Result + where + Self: Serialize, + { + let pretty_json = serde_json::to_string_pretty(self).context("response JSON serialization")?; + + Ok(pretty_json) + } +} + +impl ToJson for SerializableResponse {} diff --git a/packages/tracker-client/src/console/clients/udp/responses/mod.rs b/packages/tracker-client/src/console/clients/udp/responses/mod.rs new file mode 100644 index 000000000..e6d2e5e51 --- /dev/null +++ b/packages/tracker-client/src/console/clients/udp/responses/mod.rs @@ -0,0 +1,2 @@ +pub mod dto; +pub mod json; diff --git a/packages/tracker-client/src/console/mod.rs b/packages/tracker-client/src/console/mod.rs new file mode 100644 index 000000000..4b4cb9de4 --- /dev/null +++ b/packages/tracker-client/src/console/mod.rs @@ -0,0 +1,2 @@ +//! Console apps. +pub mod clients; diff --git a/packages/tracker-client/src/http/client/mod.rs b/packages/tracker-client/src/http/client/mod.rs new file mode 100644 index 000000000..3c904a7c9 --- /dev/null +++ b/packages/tracker-client/src/http/client/mod.rs @@ -0,0 +1,220 @@ +pub mod requests; +pub mod responses; + +use std::net::IpAddr; +use std::sync::Arc; +use std::time::Duration; + +use derive_more::Display; +use hyper::StatusCode; +use requests::{announce, scrape}; +use reqwest::{Response, Url}; +use serde::{Deserialize, Serialize}; +use thiserror::Error; + +#[derive(Debug, Clone, Error)] +pub enum Error { + #[error("Failed to Build a Http Client: {err:?}")] + ClientBuildingError { err: Arc }, + #[error("Failed to get a response: {err:?}")] + ResponseError { err: Arc }, + #[error("Returned a non-success code: \"{code}\" with the response: \"{response:?}\"")] + UnsuccessfulResponse { code: StatusCode, response: Arc }, +} + +/// HTTP Tracker Client +pub struct Client { + client: reqwest::Client, + base_url: Url, + key: Option, +} + +/// URL components in this context: +/// +/// ```text +/// http://127.0.0.1:62304/announce/YZ....rJ?info_hash=%9C8B%22%13%E3%0B%FF%21%2B0%C3%60%D2o%9A%02%13d%22 +/// \_____________________/\_______________/ \__________________________________________________________/ +/// | | | +/// base url path query +/// ``` +impl Client { + /// # Errors + /// + /// This method fails if the client builder fails. + pub fn new(base_url: Url, timeout: Duration) -> Result { + let client = reqwest::Client::builder() + .timeout(timeout) + .build() + .map_err(|e| Error::ClientBuildingError { err: e.into() })?; + + Ok(Self { + base_url, + client, + key: None, + }) + } + + /// Creates the new client binding it to an specific local address. + /// + /// # Errors + /// + /// This method fails if the client builder fails. + pub fn bind(base_url: Url, timeout: Duration, local_address: IpAddr) -> Result { + let client = reqwest::Client::builder() + .timeout(timeout) + .local_address(local_address) + .build() + .map_err(|e| Error::ClientBuildingError { err: e.into() })?; + + Ok(Self { + base_url, + client, + key: None, + }) + } + + /// # Errors + /// + /// This method fails if the client builder fails. + pub fn authenticated(base_url: Url, timeout: Duration, key: Key) -> Result { + let client = reqwest::Client::builder() + .timeout(timeout) + .build() + .map_err(|e| Error::ClientBuildingError { err: e.into() })?; + + Ok(Self { + base_url, + client, + key: Some(key), + }) + } + + /// # Errors + /// + /// This method fails if the returned response was not successful + pub async fn announce(&self, query: &announce::Query) -> Result { + let response = self.get(&self.build_announce_path_and_query(query)).await?; + + if response.status().is_success() { + Ok(response) + } else { + Err(Error::UnsuccessfulResponse { + code: response.status(), + response: response.into(), + }) + } + } + + /// # Errors + /// + /// This method fails if the returned response was not successful + pub async fn scrape(&self, query: &scrape::Query) -> Result { + let response = self.get(&self.build_scrape_path_and_query(query)).await?; + + if response.status().is_success() { + Ok(response) + } else { + Err(Error::UnsuccessfulResponse { + code: response.status(), + response: response.into(), + }) + } + } + + /// # Errors + /// + /// This method fails if the returned response was not successful + pub async fn announce_with_header(&self, query: &announce::Query, key: &str, value: &str) -> Result { + let response = self + .get_with_header(&self.build_announce_path_and_query(query), key, value) + .await?; + + if response.status().is_success() { + Ok(response) + } else { + Err(Error::UnsuccessfulResponse { + code: response.status(), + response: response.into(), + }) + } + } + + /// # Errors + /// + /// This method fails if the returned response was not successful + pub async fn health_check(&self) -> Result { + let response = self.get(&self.build_path("health_check")).await?; + + if response.status().is_success() { + Ok(response) + } else { + Err(Error::UnsuccessfulResponse { + code: response.status(), + response: response.into(), + }) + } + } + + /// # Errors + /// + /// This method fails if there was an error while sending request. + pub async fn get(&self, path: &str) -> Result { + self.client + .get(self.build_url(path)) + .send() + .await + .map_err(|e| Error::ResponseError { err: e.into() }) + } + + /// # Errors + /// + /// This method fails if there was an error while sending request. + pub async fn get_with_header(&self, path: &str, key: &str, value: &str) -> Result { + self.client + .get(self.build_url(path)) + .header(key, value) + .send() + .await + .map_err(|e| Error::ResponseError { err: e.into() }) + } + + fn build_announce_path_and_query(&self, query: &announce::Query) -> String { + format!("{}?{query}", self.build_path("announce")) + } + + fn build_scrape_path_and_query(&self, query: &scrape::Query) -> String { + format!("{}?{query}", self.build_path("scrape")) + } + + fn build_path(&self, path: &str) -> String { + match &self.key { + Some(key) => format!("{path}/{key}"), + None => path.to_string(), + } + } + + fn build_url(&self, path: &str) -> String { + let base_url = self.base_url(); + format!("{base_url}{path}") + } + + fn base_url(&self) -> String { + self.base_url.to_string() + } +} + +/// A token used for authentication. +#[derive(Serialize, Deserialize, Debug, Eq, PartialEq, Clone, Display, Hash)] +pub struct Key(String); + +impl Key { + #[must_use] + pub fn new(value: &str) -> Self { + Self(value.to_owned()) + } + + #[must_use] + pub fn value(&self) -> &str { + &self.0 + } +} diff --git a/packages/tracker-client/src/http/client/requests/announce.rs b/packages/tracker-client/src/http/client/requests/announce.rs new file mode 100644 index 000000000..8f81cc80e --- /dev/null +++ b/packages/tracker-client/src/http/client/requests/announce.rs @@ -0,0 +1,275 @@ +use std::fmt; +use std::net::{IpAddr, Ipv4Addr}; +use std::str::FromStr; + +use aquatic_udp_protocol::PeerId; +use bittorrent_primitives::info_hash::InfoHash; +use serde_repr::Serialize_repr; + +use crate::http::{percent_encode_byte_array, ByteArray20}; + +pub struct Query { + pub info_hash: ByteArray20, + pub peer_addr: IpAddr, + pub downloaded: BaseTenASCII, + pub uploaded: BaseTenASCII, + pub peer_id: ByteArray20, + pub port: PortNumber, + pub left: BaseTenASCII, + pub event: Option, + pub compact: Option, +} + +impl fmt::Display for Query { + fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result { + write!(f, "{}", self.build()) + } +} + +/// HTTP Tracker Announce Request: +/// +/// +/// +/// Some parameters in the specification are not implemented in this tracker yet. +impl Query { + /// It builds the URL query component for the announce request. + /// + /// This custom URL query params encoding is needed because `reqwest` does not allow + /// bytes arrays in query parameters. More info on this issue: + /// + /// + #[must_use] + pub fn build(&self) -> String { + self.params().to_string() + } + + #[must_use] + pub fn params(&self) -> QueryParams { + QueryParams::from(self) + } +} + +pub type BaseTenASCII = u64; +pub type PortNumber = u16; + +pub enum Event { + //Started, + //Stopped, + Completed, +} + +impl fmt::Display for Event { + fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result { + match self { + //Event::Started => write!(f, "started"), + //Event::Stopped => write!(f, "stopped"), + Event::Completed => write!(f, "completed"), + } + } +} + +#[derive(Serialize_repr, PartialEq, Debug)] +#[repr(u8)] +pub enum Compact { + Accepted = 1, + NotAccepted = 0, +} + +impl fmt::Display for Compact { + fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result { + match self { + Compact::Accepted => write!(f, "1"), + Compact::NotAccepted => write!(f, "0"), + } + } +} + +pub struct QueryBuilder { + announce_query: Query, +} + +impl QueryBuilder { + /// # Panics + /// + /// Will panic if the default info-hash value is not a valid info-hash. + #[must_use] + pub fn with_default_values() -> QueryBuilder { + let default_announce_query = Query { + info_hash: InfoHash::from_str("9c38422213e30bff212b30c360d26f9a02136422").unwrap().0, // # DevSkim: ignore DS173237 + peer_addr: IpAddr::V4(Ipv4Addr::new(192, 168, 1, 88)), + downloaded: 0, + uploaded: 0, + peer_id: PeerId(*b"-qB00000000000000001").0, + port: 17548, + left: 0, + event: Some(Event::Completed), + compact: Some(Compact::NotAccepted), + }; + Self { + announce_query: default_announce_query, + } + } + + #[must_use] + pub fn with_info_hash(mut self, info_hash: &InfoHash) -> Self { + self.announce_query.info_hash = info_hash.0; + self + } + + #[must_use] + pub fn with_peer_id(mut self, peer_id: &PeerId) -> Self { + self.announce_query.peer_id = peer_id.0; + self + } + + #[must_use] + pub fn with_compact(mut self, compact: Compact) -> Self { + self.announce_query.compact = Some(compact); + self + } + + #[must_use] + pub fn with_peer_addr(mut self, peer_addr: &IpAddr) -> Self { + self.announce_query.peer_addr = *peer_addr; + self + } + + #[must_use] + pub fn without_compact(mut self) -> Self { + self.announce_query.compact = None; + self + } + + #[must_use] + pub fn query(self) -> Query { + self.announce_query + } +} + +/// It contains all the GET parameters that can be used in a HTTP Announce request. +/// +/// Sample Announce URL with all the GET parameters (mandatory and optional): +/// +/// ```text +/// http://127.0.0.1:7070/announce? +/// info_hash=%9C8B%22%13%E3%0B%FF%21%2B0%C3%60%D2o%9A%02%13d%22 (mandatory) +/// peer_addr=192.168.1.88 +/// downloaded=0 +/// uploaded=0 +/// peer_id=%2DqB00000000000000000 (mandatory) +/// port=17548 (mandatory) +/// left=0 +/// event=completed +/// compact=0 +/// ``` +pub struct QueryParams { + pub info_hash: Option, + pub peer_addr: Option, + pub downloaded: Option, + pub uploaded: Option, + pub peer_id: Option, + pub port: Option, + pub left: Option, + pub event: Option, + pub compact: Option, +} + +impl std::fmt::Display for QueryParams { + fn fmt(&self, f: &mut std::fmt::Formatter<'_>) -> std::fmt::Result { + let mut params = vec![]; + + if let Some(info_hash) = &self.info_hash { + params.push(("info_hash", info_hash)); + } + if let Some(peer_addr) = &self.peer_addr { + params.push(("peer_addr", peer_addr)); + } + if let Some(downloaded) = &self.downloaded { + params.push(("downloaded", downloaded)); + } + if let Some(uploaded) = &self.uploaded { + params.push(("uploaded", uploaded)); + } + if let Some(peer_id) = &self.peer_id { + params.push(("peer_id", peer_id)); + } + if let Some(port) = &self.port { + params.push(("port", port)); + } + if let Some(left) = &self.left { + params.push(("left", left)); + } + if let Some(event) = &self.event { + params.push(("event", event)); + } + if let Some(compact) = &self.compact { + params.push(("compact", compact)); + } + + let query = params + .iter() + .map(|param| format!("{}={}", param.0, param.1)) + .collect::>() + .join("&"); + + write!(f, "{query}") + } +} + +impl QueryParams { + pub fn from(announce_query: &Query) -> Self { + let event = announce_query.event.as_ref().map(std::string::ToString::to_string); + let compact = announce_query.compact.as_ref().map(std::string::ToString::to_string); + + Self { + info_hash: Some(percent_encode_byte_array(&announce_query.info_hash)), + peer_addr: Some(announce_query.peer_addr.to_string()), + downloaded: Some(announce_query.downloaded.to_string()), + uploaded: Some(announce_query.uploaded.to_string()), + peer_id: Some(percent_encode_byte_array(&announce_query.peer_id)), + port: Some(announce_query.port.to_string()), + left: Some(announce_query.left.to_string()), + event, + compact, + } + } + + pub fn remove_optional_params(&mut self) { + // todo: make them optional with the Option<...> in the AnnounceQuery struct + // if they are really optional. So that we can crete a minimal AnnounceQuery + // instead of removing the optional params afterwards. + // + // The original specification on: + // + // says only `ip` and `event` are optional. + // + // On + // says only `ip`, `numwant`, `key` and `trackerid` are optional. + // + // but the server is responding if all these params are not included. + self.peer_addr = None; + self.downloaded = None; + self.uploaded = None; + self.left = None; + self.event = None; + self.compact = None; + } + + /// # Panics + /// + /// Will panic if invalid param name is provided. + pub fn set(&mut self, param_name: &str, param_value: &str) { + match param_name { + "info_hash" => self.info_hash = Some(param_value.to_string()), + "peer_addr" => self.peer_addr = Some(param_value.to_string()), + "downloaded" => self.downloaded = Some(param_value.to_string()), + "uploaded" => self.uploaded = Some(param_value.to_string()), + "peer_id" => self.peer_id = Some(param_value.to_string()), + "port" => self.port = Some(param_value.to_string()), + "left" => self.left = Some(param_value.to_string()), + "event" => self.event = Some(param_value.to_string()), + "compact" => self.compact = Some(param_value.to_string()), + &_ => panic!("Invalid param name for announce query"), + } + } +} diff --git a/packages/tracker-client/src/http/client/requests/mod.rs b/packages/tracker-client/src/http/client/requests/mod.rs new file mode 100644 index 000000000..776d2dfbf --- /dev/null +++ b/packages/tracker-client/src/http/client/requests/mod.rs @@ -0,0 +1,2 @@ +pub mod announce; +pub mod scrape; diff --git a/packages/tracker-client/src/http/client/requests/scrape.rs b/packages/tracker-client/src/http/client/requests/scrape.rs new file mode 100644 index 000000000..1b423390b --- /dev/null +++ b/packages/tracker-client/src/http/client/requests/scrape.rs @@ -0,0 +1,172 @@ +use std::error::Error; +use std::fmt::{self}; +use std::str::FromStr; + +use bittorrent_primitives::info_hash::InfoHash; + +use crate::http::{percent_encode_byte_array, ByteArray20}; + +pub struct Query { + pub info_hash: Vec, +} + +impl fmt::Display for Query { + fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result { + write!(f, "{}", self.build()) + } +} + +#[derive(Debug)] +#[allow(dead_code)] +pub struct ConversionError(String); + +impl fmt::Display for ConversionError { + fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result { + write!(f, "Invalid infohash: {}", self.0) + } +} + +impl Error for ConversionError {} + +impl TryFrom<&[String]> for Query { + type Error = ConversionError; + + fn try_from(info_hashes: &[String]) -> Result { + let mut validated_info_hashes: Vec = Vec::new(); + + for info_hash in info_hashes { + let validated_info_hash = InfoHash::from_str(info_hash).map_err(|_| ConversionError(info_hash.clone()))?; + validated_info_hashes.push(validated_info_hash.0); + } + + Ok(Self { + info_hash: validated_info_hashes, + }) + } +} + +impl TryFrom> for Query { + type Error = ConversionError; + + fn try_from(info_hashes: Vec) -> Result { + let mut validated_info_hashes: Vec = Vec::new(); + + for info_hash in info_hashes { + let validated_info_hash = InfoHash::from_str(&info_hash).map_err(|_| ConversionError(info_hash.clone()))?; + validated_info_hashes.push(validated_info_hash.0); + } + + Ok(Self { + info_hash: validated_info_hashes, + }) + } +} + +/// HTTP Tracker Scrape Request: +/// +/// +impl Query { + /// It builds the URL query component for the scrape request. + /// + /// This custom URL query params encoding is needed because `reqwest` does not allow + /// bytes arrays in query parameters. More info on this issue: + /// + /// + #[must_use] + pub fn build(&self) -> String { + self.params().to_string() + } + + #[must_use] + pub fn params(&self) -> QueryParams { + QueryParams::from(self) + } +} + +pub struct QueryBuilder { + scrape_query: Query, +} + +impl Default for QueryBuilder { + fn default() -> Self { + let default_scrape_query = Query { + info_hash: [InfoHash::from_str("9c38422213e30bff212b30c360d26f9a02136422").unwrap().0].to_vec(), // # DevSkim: ignore DS173237 + }; + Self { + scrape_query: default_scrape_query, + } + } +} + +impl QueryBuilder { + #[must_use] + pub fn with_one_info_hash(mut self, info_hash: &InfoHash) -> Self { + self.scrape_query.info_hash = [info_hash.0].to_vec(); + self + } + + #[must_use] + pub fn add_info_hash(mut self, info_hash: &InfoHash) -> Self { + self.scrape_query.info_hash.push(info_hash.0); + self + } + + #[must_use] + pub fn query(self) -> Query { + self.scrape_query + } +} + +/// It contains all the GET parameters that can be used in a HTTP Scrape request. +/// +/// The `info_hash` param is the percent encoded of the the 20-byte array info hash. +/// +/// Sample Scrape URL with all the GET parameters: +/// +/// For `IpV4`: +/// +/// ```text +/// http://127.0.0.1:7070/scrape?info_hash=%9C8B%22%13%E3%0B%FF%21%2B0%C3%60%D2o%9A%02%13d%22 +/// ``` +/// +/// For `IpV6`: +/// +/// ```text +/// http://[::1]:7070/scrape?info_hash=%9C8B%22%13%E3%0B%FF%21%2B0%C3%60%D2o%9A%02%13d%22 +/// ``` +/// +/// You can add as many info hashes as you want, just adding the same param again. +pub struct QueryParams { + pub info_hash: Vec, +} + +impl QueryParams { + pub fn set_one_info_hash_param(&mut self, info_hash: &str) { + self.info_hash = vec![info_hash.to_string()]; + } +} + +impl std::fmt::Display for QueryParams { + fn fmt(&self, f: &mut std::fmt::Formatter<'_>) -> std::fmt::Result { + let query = self + .info_hash + .iter() + .map(|info_hash| format!("info_hash={}", &info_hash)) + .collect::>() + .join("&"); + + write!(f, "{query}") + } +} + +impl QueryParams { + pub fn from(scrape_query: &Query) -> Self { + let info_hashes = scrape_query + .info_hash + .iter() + .map(percent_encode_byte_array) + .collect::>(); + + Self { info_hash: info_hashes } + } +} diff --git a/packages/tracker-client/src/http/client/responses/announce.rs b/packages/tracker-client/src/http/client/responses/announce.rs new file mode 100644 index 000000000..7f2d3611c --- /dev/null +++ b/packages/tracker-client/src/http/client/responses/announce.rs @@ -0,0 +1,126 @@ +use std::net::{IpAddr, Ipv4Addr, SocketAddr}; + +use serde::{Deserialize, Serialize}; +use torrust_tracker_primitives::peer; +use zerocopy::AsBytes as _; + +#[derive(Serialize, Deserialize, Debug, PartialEq)] +pub struct Announce { + pub complete: u32, + pub incomplete: u32, + pub interval: u32, + #[serde(rename = "min interval")] + pub min_interval: u32, + pub peers: Vec, // Peers using IPV4 and IPV6 +} + +#[derive(Serialize, Deserialize, Debug, PartialEq)] +pub struct DictionaryPeer { + pub ip: String, + #[serde(rename = "peer id")] + #[serde(with = "serde_bytes")] + pub peer_id: Vec, + pub port: u16, +} + +impl From for DictionaryPeer { + fn from(peer: peer::Peer) -> Self { + DictionaryPeer { + peer_id: peer.peer_id.as_bytes().to_vec(), + ip: peer.peer_addr.ip().to_string(), + port: peer.peer_addr.port(), + } + } +} + +#[derive(Serialize, Deserialize, Debug, PartialEq)] +pub struct DeserializedCompact { + pub complete: u32, + pub incomplete: u32, + pub interval: u32, + #[serde(rename = "min interval")] + pub min_interval: u32, + #[serde(with = "serde_bytes")] + pub peers: Vec, +} + +impl DeserializedCompact { + /// # Errors + /// + /// Will return an error if bytes can't be deserialized. + pub fn from_bytes(bytes: &[u8]) -> Result { + serde_bencode::from_bytes::(bytes) + } +} + +#[derive(Debug, PartialEq)] +pub struct Compact { + // code-review: there could be a way to deserialize this struct directly + // by using serde instead of doing it manually. Or at least using a custom deserializer. + pub complete: u32, + pub incomplete: u32, + pub interval: u32, + pub min_interval: u32, + pub peers: CompactPeerList, +} + +#[derive(Debug, PartialEq)] +pub struct CompactPeerList { + peers: Vec, +} + +impl CompactPeerList { + #[must_use] + pub fn new(peers: Vec) -> Self { + Self { peers } + } +} + +#[derive(Clone, Debug, PartialEq)] +pub struct CompactPeer { + ip: Ipv4Addr, + port: u16, +} + +impl CompactPeer { + /// # Panics + /// + /// Will panic if the provided socket address is a IPv6 IP address. + /// It's not supported for compact peers. + #[must_use] + pub fn new(socket_addr: &SocketAddr) -> Self { + match socket_addr.ip() { + IpAddr::V4(ip) => Self { + ip, + port: socket_addr.port(), + }, + IpAddr::V6(_ip) => panic!("IPV6 is not supported for compact peer"), + } + } + + #[must_use] + pub fn new_from_bytes(bytes: &[u8]) -> Self { + Self { + ip: Ipv4Addr::new(bytes[0], bytes[1], bytes[2], bytes[3]), + port: u16::from_be_bytes([bytes[4], bytes[5]]), + } + } +} + +impl From for Compact { + fn from(compact_announce: DeserializedCompact) -> Self { + let mut peers = vec![]; + + for peer_bytes in compact_announce.peers.chunks_exact(6) { + peers.push(CompactPeer::new_from_bytes(peer_bytes)); + } + + Self { + complete: compact_announce.complete, + incomplete: compact_announce.incomplete, + interval: compact_announce.interval, + min_interval: compact_announce.min_interval, + peers: CompactPeerList::new(peers), + } + } +} diff --git a/packages/tracker-client/src/http/client/responses/error.rs b/packages/tracker-client/src/http/client/responses/error.rs new file mode 100644 index 000000000..00befdb54 --- /dev/null +++ b/packages/tracker-client/src/http/client/responses/error.rs @@ -0,0 +1,7 @@ +use serde::{Deserialize, Serialize}; + +#[derive(Serialize, Deserialize, Debug, PartialEq)] +pub struct Error { + #[serde(rename = "failure reason")] + pub failure_reason: String, +} diff --git a/packages/tracker-client/src/http/client/responses/mod.rs b/packages/tracker-client/src/http/client/responses/mod.rs new file mode 100644 index 000000000..bdc689056 --- /dev/null +++ b/packages/tracker-client/src/http/client/responses/mod.rs @@ -0,0 +1,3 @@ +pub mod announce; +pub mod error; +pub mod scrape; diff --git a/packages/tracker-client/src/http/client/responses/scrape.rs b/packages/tracker-client/src/http/client/responses/scrape.rs new file mode 100644 index 000000000..6c0e8800a --- /dev/null +++ b/packages/tracker-client/src/http/client/responses/scrape.rs @@ -0,0 +1,230 @@ +use std::collections::HashMap; +use std::fmt::Write; +use std::str; + +use serde::ser::SerializeMap; +use serde::{Deserialize, Serialize, Serializer}; +use serde_bencode::value::Value; + +use crate::http::{ByteArray20, InfoHash}; + +#[derive(Debug, PartialEq, Default, Deserialize)] +pub struct Response { + pub files: HashMap, +} + +impl Response { + #[must_use] + pub fn with_one_file(info_hash_bytes: ByteArray20, file: File) -> Self { + let mut files: HashMap = HashMap::new(); + files.insert(info_hash_bytes, file); + Self { files } + } + + /// # Errors + /// + /// Will return an error if the deserialized bencoded response can't not be converted into a valid response. + /// + /// # Panics + /// + /// Will panic if it can't deserialize the bencoded response. + pub fn try_from_bencoded(bytes: &[u8]) -> Result { + let scrape_response: DeserializedResponse = + serde_bencode::from_bytes(bytes).expect("provided bytes should be a valid bencoded response"); + Self::try_from(scrape_response) + } +} + +#[derive(Serialize, Deserialize, Debug, PartialEq, Default)] +pub struct File { + pub complete: i64, // The number of active peers that have completed downloading + pub downloaded: i64, // The number of peers that have ever completed downloading + pub incomplete: i64, // The number of active peers that have not completed downloading +} + +impl File { + #[must_use] + pub fn zeroed() -> Self { + Self::default() + } +} + +impl TryFrom for Response { + type Error = BencodeParseError; + + fn try_from(scrape_response: DeserializedResponse) -> Result { + parse_bencoded_response(&scrape_response.files) + } +} + +#[derive(Serialize, Deserialize, Debug, PartialEq)] +struct DeserializedResponse { + pub files: Value, +} + +// Custom serialization for Response +impl Serialize for Response { + fn serialize(&self, serializer: S) -> Result + where + S: Serializer, + { + let mut map = serializer.serialize_map(Some(self.files.len()))?; + for (key, value) in &self.files { + // Convert ByteArray20 key to hex string + let hex_key = byte_array_to_hex_string(key); + map.serialize_entry(&hex_key, value)?; + } + map.end() + } +} + +// Helper function to convert ByteArray20 to hex string +fn byte_array_to_hex_string(byte_array: &ByteArray20) -> String { + let mut hex_string = String::with_capacity(byte_array.len() * 2); + for byte in byte_array { + write!(hex_string, "{byte:02x}").expect("Writing to string should never fail"); + } + hex_string +} + +#[derive(Default)] +pub struct ResponseBuilder { + response: Response, +} + +impl ResponseBuilder { + #[must_use] + pub fn add_file(mut self, info_hash_bytes: ByteArray20, file: File) -> Self { + self.response.files.insert(info_hash_bytes, file); + self + } + + #[must_use] + pub fn build(self) -> Response { + self.response + } +} + +#[derive(Debug)] +pub enum BencodeParseError { + InvalidValueExpectedDict { value: Value }, + InvalidValueExpectedInt { value: Value }, + InvalidFileField { value: Value }, + MissingFileField { field_name: String }, +} + +/// It parses a bencoded scrape response into a `Response` struct. +/// +/// For example: +/// +/// ```text +/// d5:filesd20:xxxxxxxxxxxxxxxxxxxxd8:completei11e10:downloadedi13772e10:incompletei19e +/// 20:yyyyyyyyyyyyyyyyyyyyd8:completei21e10:downloadedi206e10:incompletei20eee +/// ``` +/// +/// Response (JSON encoded for readability): +/// +/// ```text +/// { +/// 'files': { +/// 'xxxxxxxxxxxxxxxxxxxx': {'complete': 11, 'downloaded': 13772, 'incomplete': 19}, +/// 'yyyyyyyyyyyyyyyyyyyy': {'complete': 21, 'downloaded': 206, 'incomplete': 20} +/// } +/// } +fn parse_bencoded_response(value: &Value) -> Result { + let mut files: HashMap = HashMap::new(); + + match value { + Value::Dict(dict) => { + for file_element in dict { + let info_hash_byte_vec = file_element.0; + let file_value = file_element.1; + + let file = parse_bencoded_file(file_value).unwrap(); + + files.insert(InfoHash::new(info_hash_byte_vec).bytes(), file); + } + } + _ => return Err(BencodeParseError::InvalidValueExpectedDict { value: value.clone() }), + } + + Ok(Response { files }) +} + +/// It parses a bencoded dictionary into a `File` struct. +/// +/// For example: +/// +/// +/// ```text +/// d8:completei11e10:downloadedi13772e10:incompletei19ee +/// ``` +/// +/// into: +/// +/// ```text +/// File { +/// complete: 11, +/// downloaded: 13772, +/// incomplete: 19, +/// } +/// ``` +fn parse_bencoded_file(value: &Value) -> Result { + let file = match &value { + Value::Dict(dict) => { + let mut complete = None; + let mut downloaded = None; + let mut incomplete = None; + + for file_field in dict { + let field_name = file_field.0; + + let field_value = match file_field.1 { + Value::Int(number) => Ok(*number), + _ => Err(BencodeParseError::InvalidValueExpectedInt { + value: file_field.1.clone(), + }), + }?; + + if field_name == b"complete" { + complete = Some(field_value); + } else if field_name == b"downloaded" { + downloaded = Some(field_value); + } else if field_name == b"incomplete" { + incomplete = Some(field_value); + } else { + return Err(BencodeParseError::InvalidFileField { + value: file_field.1.clone(), + }); + } + } + + if complete.is_none() { + return Err(BencodeParseError::MissingFileField { + field_name: "complete".to_string(), + }); + } + + if downloaded.is_none() { + return Err(BencodeParseError::MissingFileField { + field_name: "downloaded".to_string(), + }); + } + + if incomplete.is_none() { + return Err(BencodeParseError::MissingFileField { + field_name: "incomplete".to_string(), + }); + } + + File { + complete: complete.unwrap(), + downloaded: downloaded.unwrap(), + incomplete: incomplete.unwrap(), + } + } + _ => return Err(BencodeParseError::InvalidValueExpectedDict { value: value.clone() }), + }; + + Ok(file) +} diff --git a/packages/tracker-client/src/http/mod.rs b/packages/tracker-client/src/http/mod.rs new file mode 100644 index 000000000..dc144814d --- /dev/null +++ b/packages/tracker-client/src/http/mod.rs @@ -0,0 +1,27 @@ +pub mod client; +pub mod url_encoding; + +use percent_encoding::NON_ALPHANUMERIC; + +pub type ByteArray20 = [u8; 20]; + +#[must_use] +pub fn percent_encode_byte_array(bytes: &ByteArray20) -> String { + percent_encoding::percent_encode(bytes, NON_ALPHANUMERIC).to_string() +} + +pub struct InfoHash(ByteArray20); + +impl InfoHash { + #[must_use] + pub fn new(vec: &[u8]) -> Self { + let mut byte_array_20: ByteArray20 = Default::default(); + byte_array_20.clone_from_slice(vec); + Self(byte_array_20) + } + + #[must_use] + pub fn bytes(&self) -> ByteArray20 { + self.0 + } +} diff --git a/packages/tracker-client/src/http/url_encoding.rs b/packages/tracker-client/src/http/url_encoding.rs new file mode 100644 index 000000000..ee7ab166e --- /dev/null +++ b/packages/tracker-client/src/http/url_encoding.rs @@ -0,0 +1,132 @@ +//! This module contains functions for percent decoding infohashes and peer IDs. +//! +//! Percent encoding is an encoding format used to encode arbitrary data in a +//! format that is safe to use in URLs. It is used by the HTTP tracker protocol +//! to encode infohashes and peer ids in the URLs of requests. +//! +//! `BitTorrent` infohashes and peer ids are percent encoded like any other +//! arbitrary URL parameter. But they are encoded from binary data (byte arrays) +//! which may not be valid UTF-8. That makes hard to use the `percent_encoding` +//! crate to decode them because all of them expect a well-formed UTF-8 string. +//! However, percent encoding is not limited to UTF-8 strings. +//! +//! More information about "Percent Encoding" can be found here: +//! +//! - +//! - +//! - +use aquatic_udp_protocol::PeerId; +use bittorrent_primitives::info_hash::{self, InfoHash}; +use torrust_tracker_primitives::peer; + +/* code-review: this module is duplicated in torrust_tracker::servers::http::percent_encoding. + Should we move it to torrust_tracker_primitives? +*/ + +/// Percent decodes a percent encoded infohash. Internally an +/// [`InfoHash`] is a 20-byte array. +/// +/// For example, given the infohash `3b245504cf5f11bbdbe1201cea6a6bf45aee1bc0`, +/// it's percent encoded representation is `%3B%24U%04%CF%5F%11%BB%DB%E1%20%1C%EAjk%F4Z%EE%1B%C0`. +/// +/// ```rust +/// use std::str::FromStr; +/// use torrust_tracker::servers::http::percent_encoding::percent_decode_info_hash; +/// use bittorrent_primitives::info_hash::InfoHash; +/// use torrust_tracker_primitives::peer; +/// +/// let encoded_infohash = "%3B%24U%04%CF%5F%11%BB%DB%E1%20%1C%EAjk%F4Z%EE%1B%C0"; +/// +/// let info_hash = percent_decode_info_hash(encoded_infohash).unwrap(); +/// +/// assert_eq!( +/// info_hash, +/// InfoHash::from_str("3b245504cf5f11bbdbe1201cea6a6bf45aee1bc0").unwrap() +/// ); +/// ``` +/// +/// # Errors +/// +/// Will return `Err` if the decoded bytes do not represent a valid +/// [`InfoHash`]. +pub fn percent_decode_info_hash(raw_info_hash: &str) -> Result { + let bytes = percent_encoding::percent_decode_str(raw_info_hash).collect::>(); + InfoHash::try_from(bytes) +} + +/// Percent decodes a percent encoded peer id. Internally a peer [`Id`](PeerId) +/// is a 20-byte array. +/// +/// For example, given the peer id `*b"-qB00000000000000000"`, +/// it's percent encoded representation is `%2DqB00000000000000000`. +/// +/// ```rust +/// use std::str::FromStr; +/// +/// use aquatic_udp_protocol::PeerId; +/// use torrust_tracker::servers::http::percent_encoding::percent_decode_peer_id; +/// use bittorrent_primitives::info_hash::InfoHash; +/// +/// let encoded_peer_id = "%2DqB00000000000000000"; +/// +/// let peer_id = percent_decode_peer_id(encoded_peer_id).unwrap(); +/// +/// assert_eq!(peer_id, PeerId(*b"-qB00000000000000000")); +/// ``` +/// +/// # Errors +/// +/// Will return `Err` if if the decoded bytes do not represent a valid [`PeerId`]. +pub fn percent_decode_peer_id(raw_peer_id: &str) -> Result { + let bytes = percent_encoding::percent_decode_str(raw_peer_id).collect::>(); + Ok(*peer::Id::try_from(bytes)?) +} + +#[cfg(test)] +mod tests { + use std::str::FromStr; + + use aquatic_udp_protocol::PeerId; + use bittorrent_primitives::info_hash::InfoHash; + + use crate::http::url_encoding::{percent_decode_info_hash, percent_decode_peer_id}; + + #[test] + fn it_should_decode_a_percent_encoded_info_hash() { + let encoded_infohash = "%3B%24U%04%CF%5F%11%BB%DB%E1%20%1C%EAjk%F4Z%EE%1B%C0"; + + let info_hash = percent_decode_info_hash(encoded_infohash).unwrap(); + + assert_eq!( + info_hash, + InfoHash::from_str("3b245504cf5f11bbdbe1201cea6a6bf45aee1bc0").unwrap() + ); + } + + #[test] + fn it_should_fail_decoding_an_invalid_percent_encoded_info_hash() { + let invalid_encoded_infohash = "invalid percent-encoded infohash"; + + let info_hash = percent_decode_info_hash(invalid_encoded_infohash); + + assert!(info_hash.is_err()); + } + + #[test] + fn it_should_decode_a_percent_encoded_peer_id() { + let encoded_peer_id = "%2DqB00000000000000000"; + + let peer_id = percent_decode_peer_id(encoded_peer_id).unwrap(); + + assert_eq!(peer_id, PeerId(*b"-qB00000000000000000")); + } + + #[test] + fn it_should_fail_decoding_an_invalid_percent_encoded_peer_id() { + let invalid_encoded_peer_id = "invalid percent-encoded peer id"; + + let peer_id = percent_decode_peer_id(invalid_encoded_peer_id); + + assert!(peer_id.is_err()); + } +} diff --git a/packages/tracker-client/src/lib.rs b/packages/tracker-client/src/lib.rs new file mode 100644 index 000000000..344e1b577 --- /dev/null +++ b/packages/tracker-client/src/lib.rs @@ -0,0 +1,3 @@ +pub mod console; +pub mod http; +pub mod udp; diff --git a/packages/tracker-client/src/udp/client.rs b/packages/tracker-client/src/udp/client.rs new file mode 100644 index 000000000..facdfac38 --- /dev/null +++ b/packages/tracker-client/src/udp/client.rs @@ -0,0 +1,270 @@ +use core::result::Result::{Err, Ok}; +use std::io::Cursor; +use std::net::{Ipv4Addr, Ipv6Addr, SocketAddr}; +use std::sync::Arc; +use std::time::Duration; + +use aquatic_udp_protocol::{ConnectRequest, Request, Response, TransactionId}; +use tokio::net::UdpSocket; +use tokio::time; +use torrust_tracker_configuration::DEFAULT_TIMEOUT; +use zerocopy::network_endian::I32; + +use super::Error; +use crate::udp::MAX_PACKET_SIZE; + +pub const UDP_CLIENT_LOG_TARGET: &str = "UDP CLIENT"; + +#[allow(clippy::module_name_repetitions)] +#[derive(Debug)] +pub struct UdpClient { + /// The socket to connect to + pub socket: Arc, + + /// Timeout for sending and receiving packets + pub timeout: Duration, +} + +impl UdpClient { + /// Creates a new `UdpClient` bound to the default port and ipv6 address + /// + /// # Errors + /// + /// Will return error if unable to bind to any port or ip address. + /// + async fn bound_to_default_ipv4(timeout: Duration) -> Result { + let addr = SocketAddr::new(Ipv4Addr::UNSPECIFIED.into(), 0); + + Self::bound(addr, timeout).await + } + + /// Creates a new `UdpClient` bound to the default port and ipv6 address + /// + /// # Errors + /// + /// Will return error if unable to bind to any port or ip address. + /// + async fn bound_to_default_ipv6(timeout: Duration) -> Result { + let addr = SocketAddr::new(Ipv6Addr::UNSPECIFIED.into(), 0); + + Self::bound(addr, timeout).await + } + + /// Creates a new `UdpClient` connected to a Udp server + /// + /// # Errors + /// + /// Will return any errors present in the call stack + /// + pub async fn connected(remote_addr: SocketAddr, timeout: Duration) -> Result { + let client = if remote_addr.is_ipv4() { + Self::bound_to_default_ipv4(timeout).await? + } else { + Self::bound_to_default_ipv6(timeout).await? + }; + + client.connect(remote_addr).await?; + Ok(client) + } + + /// Creates a `[UdpClient]` bound to a Socket. + /// + /// # Panics + /// + /// Panics if unable to get the `local_addr` of the bound socket. + /// + /// # Errors + /// + /// This function will return an error if the binding takes to long + /// or if there is an underlying OS error. + pub async fn bound(addr: SocketAddr, timeout: Duration) -> Result { + tracing::trace!(target: UDP_CLIENT_LOG_TARGET, "binding to socket: {addr:?} ..."); + + let socket = time::timeout(timeout, UdpSocket::bind(addr)) + .await + .map_err(|_| Error::TimeoutWhileBindingToSocket { addr })? + .map_err(|e| Error::UnableToBindToSocket { err: e.into(), addr })?; + + let addr = socket.local_addr().expect("it should get the local address"); + + tracing::debug!(target: UDP_CLIENT_LOG_TARGET, "bound to socket: {addr:?}."); + + let udp_client = Self { + socket: Arc::new(socket), + timeout, + }; + + Ok(udp_client) + } + + /// # Errors + /// + /// Will return error if can't connect to the socket. + pub async fn connect(&self, remote_addr: SocketAddr) -> Result<(), Error> { + tracing::trace!(target: UDP_CLIENT_LOG_TARGET, "connecting to remote: {remote_addr:?} ..."); + + let () = time::timeout(self.timeout, self.socket.connect(remote_addr)) + .await + .map_err(|_| Error::TimeoutWhileConnectingToRemote { remote_addr })? + .map_err(|e| Error::UnableToConnectToRemote { + err: e.into(), + remote_addr, + })?; + + tracing::debug!(target: UDP_CLIENT_LOG_TARGET, "connected to remote: {remote_addr:?}."); + + Ok(()) + } + + /// # Errors + /// + /// Will return error if: + /// + /// - Can't write to the socket. + /// - Can't send data. + pub async fn send(&self, bytes: &[u8]) -> Result { + tracing::trace!(target: UDP_CLIENT_LOG_TARGET, "sending {bytes:?} ..."); + + let () = time::timeout(self.timeout, self.socket.writable()) + .await + .map_err(|_| Error::TimeoutWaitForWriteableSocket)? + .map_err(|e| Error::UnableToGetWritableSocket { err: e.into() })?; + + let sent_bytes = time::timeout(self.timeout, self.socket.send(bytes)) + .await + .map_err(|_| Error::TimeoutWhileSendingData { data: bytes.to_vec() })? + .map_err(|e| Error::UnableToSendData { + err: e.into(), + data: bytes.to_vec(), + })?; + + tracing::debug!(target: UDP_CLIENT_LOG_TARGET, "sent {sent_bytes} bytes to remote."); + + Ok(sent_bytes) + } + + /// # Errors + /// + /// Will return error if: + /// + /// - Can't read from the socket. + /// - Can't receive data. + /// + /// # Panics + /// + pub async fn receive(&self) -> Result, Error> { + tracing::trace!(target: UDP_CLIENT_LOG_TARGET, "receiving ..."); + + let mut buffer = [0u8; MAX_PACKET_SIZE]; + + let () = time::timeout(self.timeout, self.socket.readable()) + .await + .map_err(|_| Error::TimeoutWaitForReadableSocket)? + .map_err(|e| Error::UnableToGetReadableSocket { err: e.into() })?; + + let received_bytes = time::timeout(self.timeout, self.socket.recv(&mut buffer)) + .await + .map_err(|_| Error::TimeoutWhileReceivingData)? + .map_err(|e| Error::UnableToReceivingData { err: e.into() })?; + + let mut received: Vec = buffer.to_vec(); + Vec::truncate(&mut received, received_bytes); + + tracing::debug!(target: UDP_CLIENT_LOG_TARGET, "received {received_bytes} bytes: {received:?}"); + + Ok(received) + } +} + +#[allow(clippy::module_name_repetitions)] +#[derive(Debug)] +pub struct UdpTrackerClient { + pub client: UdpClient, +} + +impl UdpTrackerClient { + /// Creates a new `UdpTrackerClient` connected to a Udp Tracker server + /// + /// # Errors + /// + /// If unable to connect to the remote address. + /// + pub async fn new(remote_addr: SocketAddr, timeout: Duration) -> Result { + let client = UdpClient::connected(remote_addr, timeout).await?; + Ok(UdpTrackerClient { client }) + } + + /// # Errors + /// + /// Will return error if can't write request to bytes. + pub async fn send(&self, request: Request) -> Result { + tracing::trace!(target: UDP_CLIENT_LOG_TARGET, "sending request {request:?} ..."); + + // Write request into a buffer + // todo: optimize the pre-allocated amount based upon request type. + let mut writer = Cursor::new(Vec::with_capacity(200)); + let () = request + .write_bytes(&mut writer) + .map_err(|e| Error::UnableToWriteDataFromRequest { err: e.into(), request })?; + + self.client.send(writer.get_ref()).await + } + + /// # Errors + /// + /// Will return error if can't create response from the received payload (bytes buffer). + pub async fn receive(&self) -> Result { + let response = self.client.receive().await?; + + tracing::debug!(target: UDP_CLIENT_LOG_TARGET, "received {} bytes: {response:?}", response.len()); + + Response::parse_bytes(&response, true).map_err(|e| Error::UnableToParseResponse { err: e.into(), response }) + } +} + +/// Helper Function to Check if a UDP Service is Connectable +/// +/// # Panics +/// +/// It will return an error if unable to connect to the UDP service. +/// +/// # Errors +/// +pub async fn check(remote_addr: &SocketAddr) -> Result { + tracing::debug!("Checking Service (detail): {remote_addr:?}."); + + match UdpTrackerClient::new(*remote_addr, DEFAULT_TIMEOUT).await { + Ok(client) => { + let connect_request = ConnectRequest { + transaction_id: TransactionId(I32::new(123)), + }; + + // client.send() return usize, but doesn't use here + match client.send(connect_request.into()).await { + Ok(_) => (), + Err(e) => tracing::debug!("Error: {e:?}."), + }; + + let process = move |response| { + if matches!(response, Response::Connect(_connect_response)) { + Ok("Connected".to_string()) + } else { + Err("Did not Connect".to_string()) + } + }; + + let sleep = time::sleep(Duration::from_millis(2000)); + tokio::pin!(sleep); + + tokio::select! { + () = &mut sleep => { + Err("Timed Out".to_string()) + } + response = client.receive() => { + process(response.unwrap()) + } + } + } + Err(e) => Err(format!("{e:?}")), + } +} diff --git a/packages/tracker-client/src/udp/mod.rs b/packages/tracker-client/src/udp/mod.rs new file mode 100644 index 000000000..b9d5f34f6 --- /dev/null +++ b/packages/tracker-client/src/udp/mod.rs @@ -0,0 +1,68 @@ +use std::net::SocketAddr; +use std::sync::Arc; + +use aquatic_udp_protocol::Request; +use thiserror::Error; +use torrust_tracker_located_error::DynError; + +pub mod client; + +/// The maximum number of bytes in a UDP packet. +pub const MAX_PACKET_SIZE: usize = 1496; +/// A magic 64-bit integer constant defined in the protocol that is used to +/// identify the protocol. +pub const PROTOCOL_ID: i64 = 0x0417_2710_1980; + +#[derive(Debug, Clone, Error)] +pub enum Error { + #[error("Timeout while waiting for socket to bind: {addr:?}")] + TimeoutWhileBindingToSocket { addr: SocketAddr }, + + #[error("Failed to bind to socket: {addr:?}, with error: {err:?}")] + UnableToBindToSocket { err: Arc, addr: SocketAddr }, + + #[error("Timeout while waiting for connection to remote: {remote_addr:?}")] + TimeoutWhileConnectingToRemote { remote_addr: SocketAddr }, + + #[error("Failed to connect to remote: {remote_addr:?}, with error: {err:?}")] + UnableToConnectToRemote { + err: Arc, + remote_addr: SocketAddr, + }, + + #[error("Timeout while waiting for the socket to become writable.")] + TimeoutWaitForWriteableSocket, + + #[error("Failed to get writable socket: {err:?}")] + UnableToGetWritableSocket { err: Arc }, + + #[error("Timeout while trying to send data: {data:?}")] + TimeoutWhileSendingData { data: Vec }, + + #[error("Failed to send data: {data:?}, with error: {err:?}")] + UnableToSendData { err: Arc, data: Vec }, + + #[error("Timeout while waiting for the socket to become readable.")] + TimeoutWaitForReadableSocket, + + #[error("Failed to get readable socket: {err:?}")] + UnableToGetReadableSocket { err: Arc }, + + #[error("Timeout while trying to receive data.")] + TimeoutWhileReceivingData, + + #[error("Failed to receive data: {err:?}")] + UnableToReceivingData { err: Arc }, + + #[error("Failed to get data from request: {request:?}, with error: {err:?}")] + UnableToWriteDataFromRequest { err: Arc, request: Request }, + + #[error("Failed to parse response: {response:?}, with error: {err:?}")] + UnableToParseResponse { err: Arc, response: Vec }, +} + +impl From for DynError { + fn from(e: Error) -> Self { + Arc::new(Box::new(e)) + } +} From 31ac6cf215f61b17d3593a0f6d97713aa287f8d6 Mon Sep 17 00:00:00 2001 From: Jose Celano Date: Fri, 1 Nov 2024 16:43:41 +0000 Subject: [PATCH 015/802] refactor: use extracted bittorrent-tracker-client --- src/bin/http_tracker_client.rs | 7 - src/bin/tracker_checker.rs | 7 - src/bin/udp_tracker_client.rs | 7 - src/console/clients/checker/app.rs | 120 -------- src/console/clients/checker/checks/health.rs | 77 ----- src/console/clients/checker/checks/http.rs | 104 ------- src/console/clients/checker/checks/mod.rs | 4 - src/console/clients/checker/checks/structs.rs | 12 - src/console/clients/checker/checks/udp.rs | 134 --------- src/console/clients/checker/config.rs | 282 ------------------ src/console/clients/checker/console.rs | 38 --- src/console/clients/checker/logger.rs | 72 ----- src/console/clients/checker/mod.rs | 7 - src/console/clients/checker/printer.rs | 9 - src/console/clients/checker/service.rs | 62 ---- src/console/clients/http/app.rs | 102 ------- src/console/clients/http/mod.rs | 36 --- src/console/clients/mod.rs | 4 - src/console/clients/udp/app.rs | 208 ------------- src/console/clients/udp/checker.rs | 177 ----------- src/console/clients/udp/mod.rs | 51 ---- src/console/clients/udp/responses/dto.rs | 128 -------- src/console/clients/udp/responses/json.rs | 25 -- src/console/clients/udp/responses/mod.rs | 2 - src/console/mod.rs | 1 - src/servers/udp/server/launcher.rs | 2 +- .../bit_torrent/tracker/http/client/mod.rs | 204 ------------- .../tracker/http/client/requests/announce.rs | 275 ----------------- .../tracker/http/client/requests/mod.rs | 2 - .../tracker/http/client/requests/scrape.rs | 172 ----------- .../tracker/http/client/responses/announce.rs | 126 -------- .../tracker/http/client/responses/error.rs | 7 - .../tracker/http/client/responses/mod.rs | 3 - .../tracker/http/client/responses/scrape.rs | 230 -------------- src/shared/bit_torrent/tracker/http/mod.rs | 26 -- src/shared/bit_torrent/tracker/mod.rs | 1 - src/shared/bit_torrent/tracker/udp/client.rs | 270 ----------------- src/shared/bit_torrent/tracker/udp/mod.rs | 64 +--- tests/servers/udp/contract.rs | 8 +- 39 files changed, 6 insertions(+), 3060 deletions(-) delete mode 100644 src/bin/http_tracker_client.rs delete mode 100644 src/bin/tracker_checker.rs delete mode 100644 src/bin/udp_tracker_client.rs delete mode 100644 src/console/clients/checker/app.rs delete mode 100644 src/console/clients/checker/checks/health.rs delete mode 100644 src/console/clients/checker/checks/http.rs delete mode 100644 src/console/clients/checker/checks/mod.rs delete mode 100644 src/console/clients/checker/checks/structs.rs delete mode 100644 src/console/clients/checker/checks/udp.rs delete mode 100644 src/console/clients/checker/config.rs delete mode 100644 src/console/clients/checker/console.rs delete mode 100644 src/console/clients/checker/logger.rs delete mode 100644 src/console/clients/checker/mod.rs delete mode 100644 src/console/clients/checker/printer.rs delete mode 100644 src/console/clients/checker/service.rs delete mode 100644 src/console/clients/http/app.rs delete mode 100644 src/console/clients/http/mod.rs delete mode 100644 src/console/clients/mod.rs delete mode 100644 src/console/clients/udp/app.rs delete mode 100644 src/console/clients/udp/checker.rs delete mode 100644 src/console/clients/udp/mod.rs delete mode 100644 src/console/clients/udp/responses/dto.rs delete mode 100644 src/console/clients/udp/responses/json.rs delete mode 100644 src/console/clients/udp/responses/mod.rs delete mode 100644 src/shared/bit_torrent/tracker/http/client/mod.rs delete mode 100644 src/shared/bit_torrent/tracker/http/client/requests/announce.rs delete mode 100644 src/shared/bit_torrent/tracker/http/client/requests/mod.rs delete mode 100644 src/shared/bit_torrent/tracker/http/client/requests/scrape.rs delete mode 100644 src/shared/bit_torrent/tracker/http/client/responses/announce.rs delete mode 100644 src/shared/bit_torrent/tracker/http/client/responses/error.rs delete mode 100644 src/shared/bit_torrent/tracker/http/client/responses/mod.rs delete mode 100644 src/shared/bit_torrent/tracker/http/client/responses/scrape.rs delete mode 100644 src/shared/bit_torrent/tracker/http/mod.rs delete mode 100644 src/shared/bit_torrent/tracker/udp/client.rs diff --git a/src/bin/http_tracker_client.rs b/src/bin/http_tracker_client.rs deleted file mode 100644 index 0de040549..000000000 --- a/src/bin/http_tracker_client.rs +++ /dev/null @@ -1,7 +0,0 @@ -//! Program to make request to HTTP trackers. -use torrust_tracker::console::clients::http::app; - -#[tokio::main] -async fn main() -> anyhow::Result<()> { - app::run().await -} diff --git a/src/bin/tracker_checker.rs b/src/bin/tracker_checker.rs deleted file mode 100644 index 87aeedeac..000000000 --- a/src/bin/tracker_checker.rs +++ /dev/null @@ -1,7 +0,0 @@ -//! Program to check running trackers. -use torrust_tracker::console::clients::checker::app; - -#[tokio::main] -async fn main() { - app::run().await.expect("Some checks fail"); -} diff --git a/src/bin/udp_tracker_client.rs b/src/bin/udp_tracker_client.rs deleted file mode 100644 index 909b296ca..000000000 --- a/src/bin/udp_tracker_client.rs +++ /dev/null @@ -1,7 +0,0 @@ -//! Program to make request to UDP trackers. -use torrust_tracker::console::clients::udp::app; - -#[tokio::main] -async fn main() -> anyhow::Result<()> { - app::run().await -} diff --git a/src/console/clients/checker/app.rs b/src/console/clients/checker/app.rs deleted file mode 100644 index 395f65df9..000000000 --- a/src/console/clients/checker/app.rs +++ /dev/null @@ -1,120 +0,0 @@ -//! Program to run checks against running trackers. -//! -//! Run providing a config file path: -//! -//! ```text -//! cargo run --bin tracker_checker -- --config-path "./share/default/config/tracker_checker.json" -//! TORRUST_CHECKER_CONFIG_PATH="./share/default/config/tracker_checker.json" cargo run --bin tracker_checker -//! ``` -//! -//! Run providing the configuration: -//! -//! ```text -//! TORRUST_CHECKER_CONFIG=$(cat "./share/default/config/tracker_checker.json") cargo run --bin tracker_checker -//! ``` -//! -//! Another real example to test the Torrust demo tracker: -//! -//! ```text -//! TORRUST_CHECKER_CONFIG='{ -//! "udp_trackers": ["144.126.245.19:6969"], -//! "http_trackers": ["https://tracker.torrust-demo.com"], -//! "health_checks": ["https://tracker.torrust-demo.com/api/health_check"] -//! }' cargo run --bin tracker_checker -//! ``` -//! -//! The output should be something like the following: -//! -//! ```json -//! { -//! "udp_trackers": [ -//! { -//! "url": "144.126.245.19:6969", -//! "status": { -//! "code": "ok", -//! "message": "" -//! } -//! } -//! ], -//! "http_trackers": [ -//! { -//! "url": "https://tracker.torrust-demo.com/", -//! "status": { -//! "code": "ok", -//! "message": "" -//! } -//! } -//! ], -//! "health_checks": [ -//! { -//! "url": "https://tracker.torrust-demo.com/api/health_check", -//! "status": { -//! "code": "ok", -//! "message": "" -//! } -//! } -//! ] -//! } -//! ``` -use std::path::PathBuf; -use std::sync::Arc; - -use anyhow::{Context, Result}; -use clap::Parser; -use tracing::level_filters::LevelFilter; - -use super::config::Configuration; -use super::console::Console; -use super::service::{CheckResult, Service}; -use crate::console::clients::checker::config::parse_from_json; - -#[derive(Parser, Debug)] -#[clap(author, version, about, long_about = None)] -struct Args { - /// Path to the JSON configuration file. - #[clap(short, long, env = "TORRUST_CHECKER_CONFIG_PATH")] - config_path: Option, - - /// Direct configuration content in JSON. - #[clap(env = "TORRUST_CHECKER_CONFIG", hide_env_values = true)] - config_content: Option, -} - -/// # Errors -/// -/// Will return an error if the configuration was not provided. -pub async fn run() -> Result> { - tracing_stdout_init(LevelFilter::INFO); - - let args = Args::parse(); - - let config = setup_config(args)?; - - let console_printer = Console {}; - - let service = Service { - config: Arc::new(config), - console: console_printer, - }; - - service.run_checks().await.context("it should run the check tasks") -} - -fn tracing_stdout_init(filter: LevelFilter) { - tracing_subscriber::fmt().with_max_level(filter).init(); - tracing::debug!("Logging initialized"); -} - -fn setup_config(args: Args) -> Result { - match (args.config_path, args.config_content) { - (Some(config_path), _) => load_config_from_file(&config_path), - (_, Some(config_content)) => parse_from_json(&config_content).context("invalid config format"), - _ => Err(anyhow::anyhow!("no configuration provided")), - } -} - -fn load_config_from_file(path: &PathBuf) -> Result { - let file_content = std::fs::read_to_string(path).with_context(|| format!("can't read config file {path:?}"))?; - - parse_from_json(&file_content).context("invalid config format") -} diff --git a/src/console/clients/checker/checks/health.rs b/src/console/clients/checker/checks/health.rs deleted file mode 100644 index b1fb79148..000000000 --- a/src/console/clients/checker/checks/health.rs +++ /dev/null @@ -1,77 +0,0 @@ -use std::sync::Arc; -use std::time::Duration; - -use anyhow::Result; -use hyper::StatusCode; -use reqwest::{Client as HttpClient, Response}; -use serde::Serialize; -use thiserror::Error; -use url::Url; - -#[derive(Debug, Clone, Error, Serialize)] -#[serde(into = "String")] -pub enum Error { - #[error("Failed to Build a Http Client: {err:?}")] - ClientBuildingError { err: Arc }, - #[error("Heath check failed to get a response: {err:?}")] - ResponseError { err: Arc }, - #[error("Http check returned a non-success code: \"{code}\" with the response: \"{response:?}\"")] - UnsuccessfulResponse { code: StatusCode, response: Arc }, -} - -impl From for String { - fn from(value: Error) -> Self { - value.to_string() - } -} - -#[derive(Debug, Clone, Serialize)] -pub struct Checks { - url: Url, - result: Result, -} - -pub async fn run(health_checks: Vec, timeout: Duration) -> Vec> { - let mut results = Vec::default(); - - tracing::debug!("Health checks ..."); - - for url in health_checks { - let result = match run_health_check(url.clone(), timeout).await { - Ok(response) => Ok(response.status().to_string()), - Err(err) => Err(err), - }; - - let check = Checks { url, result }; - - if check.result.is_err() { - results.push(Err(check)); - } else { - results.push(Ok(check)); - } - } - - results -} - -async fn run_health_check(url: Url, timeout: Duration) -> Result { - let client = HttpClient::builder() - .timeout(timeout) - .build() - .map_err(|e| Error::ClientBuildingError { err: e.into() })?; - - let response = client - .get(url.clone()) - .send() - .await - .map_err(|e| Error::ResponseError { err: e.into() })?; - - if response.status().is_success() { - Ok(response) - } else { - Err(Error::UnsuccessfulResponse { - code: response.status(), - response: response.into(), - }) - } -} diff --git a/src/console/clients/checker/checks/http.rs b/src/console/clients/checker/checks/http.rs deleted file mode 100644 index b64297bed..000000000 --- a/src/console/clients/checker/checks/http.rs +++ /dev/null @@ -1,104 +0,0 @@ -use std::str::FromStr as _; -use std::time::Duration; - -use bittorrent_primitives::info_hash::InfoHash; -use serde::Serialize; -use url::Url; - -use crate::console::clients::http::Error; -use crate::shared::bit_torrent::tracker::http::client::responses::announce::Announce; -use crate::shared::bit_torrent::tracker::http::client::responses::scrape; -use crate::shared::bit_torrent::tracker::http::client::{requests, Client}; - -#[derive(Debug, Clone, Serialize)] -pub struct Checks { - url: Url, - results: Vec<(Check, Result<(), Error>)>, -} - -#[derive(Debug, Clone, Serialize)] -pub enum Check { - Announce, - Scrape, -} - -pub async fn run(http_trackers: Vec, timeout: Duration) -> Vec> { - let mut results = Vec::default(); - - tracing::debug!("HTTP trackers ..."); - - for ref url in http_trackers { - let mut base_url = url.clone(); - base_url.set_path(""); - - let mut checks = Checks { - url: url.clone(), - results: Vec::default(), - }; - - // Announce - { - let check = check_http_announce(&base_url, timeout).await.map(|_| ()); - - checks.results.push((Check::Announce, check)); - } - - // Scrape - { - let check = check_http_scrape(&base_url, timeout).await.map(|_| ()); - - checks.results.push((Check::Scrape, check)); - } - - if checks.results.iter().any(|f| f.1.is_err()) { - results.push(Err(checks)); - } else { - results.push(Ok(checks)); - } - } - - results -} - -async fn check_http_announce(url: &Url, timeout: Duration) -> Result { - let info_hash_str = "9c38422213e30bff212b30c360d26f9a02136422".to_string(); // # DevSkim: ignore DS173237 - let info_hash = InfoHash::from_str(&info_hash_str).expect("a valid info-hash is required"); - - let client = Client::new(url.clone(), timeout).map_err(|err| Error::HttpClientError { err })?; - - let response = client - .announce( - &requests::announce::QueryBuilder::with_default_values() - .with_info_hash(&info_hash) - .query(), - ) - .await - .map_err(|err| Error::HttpClientError { err })?; - - let response = response.bytes().await.map_err(|e| Error::ResponseError { err: e.into() })?; - - let response = serde_bencode::from_bytes::(&response).map_err(|e| Error::ParseBencodeError { - data: response, - err: e.into(), - })?; - - Ok(response) -} - -async fn check_http_scrape(url: &Url, timeout: Duration) -> Result { - let info_hashes: Vec = vec!["9c38422213e30bff212b30c360d26f9a02136422".to_string()]; // # DevSkim: ignore DS173237 - let query = requests::scrape::Query::try_from(info_hashes).expect("a valid array of info-hashes is required"); - - let client = Client::new(url.clone(), timeout).map_err(|err| Error::HttpClientError { err })?; - - let response = client.scrape(&query).await.map_err(|err| Error::HttpClientError { err })?; - - let response = response.bytes().await.map_err(|e| Error::ResponseError { err: e.into() })?; - - let response = scrape::Response::try_from_bencoded(&response).map_err(|e| Error::BencodeParseError { - data: response, - err: e.into(), - })?; - - Ok(response) -} diff --git a/src/console/clients/checker/checks/mod.rs b/src/console/clients/checker/checks/mod.rs deleted file mode 100644 index f8b03f749..000000000 --- a/src/console/clients/checker/checks/mod.rs +++ /dev/null @@ -1,4 +0,0 @@ -pub mod health; -pub mod http; -pub mod structs; -pub mod udp; diff --git a/src/console/clients/checker/checks/structs.rs b/src/console/clients/checker/checks/structs.rs deleted file mode 100644 index d28e20c04..000000000 --- a/src/console/clients/checker/checks/structs.rs +++ /dev/null @@ -1,12 +0,0 @@ -use serde::{Deserialize, Serialize}; - -#[derive(Serialize, Deserialize)] -pub struct Status { - pub code: String, - pub message: String, -} -#[derive(Serialize, Deserialize)] -pub struct CheckerOutput { - pub url: String, - pub status: Status, -} diff --git a/src/console/clients/checker/checks/udp.rs b/src/console/clients/checker/checks/udp.rs deleted file mode 100644 index 21bdcd1b7..000000000 --- a/src/console/clients/checker/checks/udp.rs +++ /dev/null @@ -1,134 +0,0 @@ -use std::net::SocketAddr; -use std::time::Duration; - -use aquatic_udp_protocol::TransactionId; -use hex_literal::hex; -use serde::Serialize; -use url::Url; - -use crate::console::clients::udp::checker::Client; -use crate::console::clients::udp::Error; - -#[derive(Debug, Clone, Serialize)] -pub struct Checks { - remote_addr: SocketAddr, - results: Vec<(Check, Result<(), Error>)>, -} - -#[derive(Debug, Clone, Serialize)] -pub enum Check { - Setup, - Connect, - Announce, - Scrape, -} - -#[allow(clippy::missing_panics_doc)] -pub async fn run(udp_trackers: Vec, timeout: Duration) -> Vec> { - let mut results = Vec::default(); - - tracing::debug!("UDP trackers ..."); - - let info_hash = aquatic_udp_protocol::InfoHash(hex!("9c38422213e30bff212b30c360d26f9a02136422")); // # DevSkim: ignore DS173237 - - for remote_url in udp_trackers { - let remote_addr = resolve_socket_addr(&remote_url); - - let mut checks = Checks { - remote_addr, - results: Vec::default(), - }; - - tracing::debug!("UDP tracker: {:?}", remote_url); - - // Setup - let client = match Client::new(remote_addr, timeout).await { - Ok(client) => { - checks.results.push((Check::Setup, Ok(()))); - client - } - Err(err) => { - checks.results.push((Check::Setup, Err(err))); - results.push(Err(checks)); - continue; - } - }; - - let transaction_id = TransactionId::new(1); - - // Connect Remote - let connection_id = match client.send_connection_request(transaction_id).await { - Ok(connection_id) => { - checks.results.push((Check::Connect, Ok(()))); - connection_id - } - Err(err) => { - checks.results.push((Check::Connect, Err(err))); - results.push(Err(checks)); - continue; - } - }; - - // Announce - { - let check = client - .send_announce_request(transaction_id, connection_id, info_hash.into()) - .await - .map(|_| ()); - - checks.results.push((Check::Announce, check)); - } - - // Scrape - { - let check = client - .send_scrape_request(connection_id, transaction_id, &[info_hash.into()]) - .await - .map(|_| ()); - - checks.results.push((Check::Scrape, check)); - } - - if checks.results.iter().any(|f| f.1.is_err()) { - results.push(Err(checks)); - } else { - results.push(Ok(checks)); - } - } - - results -} - -fn resolve_socket_addr(url: &Url) -> SocketAddr { - let socket_addr = url.socket_addrs(|| None).unwrap(); - *socket_addr.first().unwrap() -} - -#[cfg(test)] -mod tests { - use std::net::{IpAddr, Ipv4Addr, Ipv6Addr, SocketAddr}; - - use url::Url; - - use crate::console::clients::checker::checks::udp::resolve_socket_addr; - - #[test] - fn it_should_resolve_the_socket_address_for_udp_scheme_urls_containing_a_domain() { - let socket_addr = resolve_socket_addr(&Url::parse("udp://localhost:8080").unwrap()); - - assert!( - socket_addr == SocketAddr::new(IpAddr::V4(Ipv4Addr::new(127, 0, 0, 1)), 8080) - || socket_addr == SocketAddr::new(IpAddr::V6(Ipv6Addr::new(0, 0, 0, 0, 0, 0, 0, 1)), 8080) - ); - } - - #[test] - fn it_should_resolve_the_socket_address_for_udp_scheme_urls_containing_an_ip() { - let socket_addr = resolve_socket_addr(&Url::parse("udp://localhost:8080").unwrap()); - - assert!( - socket_addr == SocketAddr::new(IpAddr::V4(Ipv4Addr::new(127, 0, 0, 1)), 8080) - || socket_addr == SocketAddr::new(IpAddr::V6(Ipv6Addr::new(0, 0, 0, 0, 0, 0, 0, 1)), 8080) - ); - } -} diff --git a/src/console/clients/checker/config.rs b/src/console/clients/checker/config.rs deleted file mode 100644 index 154dcae85..000000000 --- a/src/console/clients/checker/config.rs +++ /dev/null @@ -1,282 +0,0 @@ -use std::error::Error; -use std::fmt; - -use reqwest::Url as ServiceUrl; -use serde::Deserialize; - -/// It parses the configuration from a JSON format. -/// -/// # Errors -/// -/// Will return an error if the configuration is not valid. -/// -/// # Panics -/// -/// Will panic if unable to read the configuration file. -pub fn parse_from_json(json: &str) -> Result { - let plain_config: PlainConfiguration = serde_json::from_str(json).map_err(ConfigurationError::JsonParseError)?; - Configuration::try_from(plain_config) -} - -/// DTO for the configuration to serialize/deserialize configuration. -/// -/// Configuration does not need to be valid. -#[derive(Deserialize)] -struct PlainConfiguration { - pub udp_trackers: Vec, - pub http_trackers: Vec, - pub health_checks: Vec, -} - -/// Validated configuration -pub struct Configuration { - pub udp_trackers: Vec, - pub http_trackers: Vec, - pub health_checks: Vec, -} - -#[derive(Debug)] -pub enum ConfigurationError { - JsonParseError(serde_json::Error), - InvalidUdpAddress(std::net::AddrParseError), - InvalidUrl(url::ParseError), -} - -impl Error for ConfigurationError {} - -impl fmt::Display for ConfigurationError { - fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result { - match self { - ConfigurationError::JsonParseError(e) => write!(f, "JSON parse error: {e}"), - ConfigurationError::InvalidUdpAddress(e) => write!(f, "Invalid UDP address: {e}"), - ConfigurationError::InvalidUrl(e) => write!(f, "Invalid URL: {e}"), - } - } -} - -impl TryFrom for Configuration { - type Error = ConfigurationError; - - fn try_from(plain_config: PlainConfiguration) -> Result { - let udp_trackers = plain_config - .udp_trackers - .into_iter() - .map(|s| if s.starts_with("udp://") { s } else { format!("udp://{s}") }) - .map(|s| s.parse::().map_err(ConfigurationError::InvalidUrl)) - .collect::, _>>()?; - - let http_trackers = plain_config - .http_trackers - .into_iter() - .map(|s| s.parse::().map_err(ConfigurationError::InvalidUrl)) - .collect::, _>>()?; - - let health_checks = plain_config - .health_checks - .into_iter() - .map(|s| s.parse::().map_err(ConfigurationError::InvalidUrl)) - .collect::, _>>()?; - - Ok(Configuration { - udp_trackers, - http_trackers, - health_checks, - }) - } -} - -#[cfg(test)] -mod tests { - use super::*; - - #[test] - fn configuration_should_be_build_from_plain_serializable_configuration() { - let dto = PlainConfiguration { - udp_trackers: vec!["udp://127.0.0.1:8080".to_string()], - http_trackers: vec!["http://127.0.0.1:8080".to_string()], - health_checks: vec!["http://127.0.0.1:8080/health".to_string()], - }; - - let config = Configuration::try_from(dto).expect("A valid configuration"); - - assert_eq!(config.udp_trackers, vec![ServiceUrl::parse("udp://127.0.0.1:8080").unwrap()]); - - assert_eq!( - config.http_trackers, - vec![ServiceUrl::parse("http://127.0.0.1:8080").unwrap()] - ); - - assert_eq!( - config.health_checks, - vec![ServiceUrl::parse("http://127.0.0.1:8080/health").unwrap()] - ); - } - - mod building_configuration_from_plain_configuration_for { - - mod udp_trackers { - use crate::console::clients::checker::config::{Configuration, PlainConfiguration, ServiceUrl}; - - /* The plain configuration should allow UDP URLs with: - - - IP or domain. - - With or without scheme. - - With or without `announce` suffix. - - With or without `/` at the end of the authority section (with empty path). - - For example: - - 127.0.0.1:6969 - 127.0.0.1:6969/ - 127.0.0.1:6969/announce - - localhost:6969 - localhost:6969/ - localhost:6969/announce - - udp://127.0.0.1:6969 - udp://127.0.0.1:6969/ - udp://127.0.0.1:6969/announce - - udp://localhost:6969 - udp://localhost:6969/ - udp://localhost:6969/announce - - */ - - #[test] - fn it_should_fail_when_a_tracker_udp_url_is_invalid() { - let plain_config = PlainConfiguration { - udp_trackers: vec!["invalid URL".to_string()], - http_trackers: vec![], - health_checks: vec![], - }; - - assert!(Configuration::try_from(plain_config).is_err()); - } - - #[test] - fn it_should_add_the_udp_scheme_to_the_udp_url_when_it_is_missing() { - let plain_config = PlainConfiguration { - udp_trackers: vec!["127.0.0.1:6969".to_string()], - http_trackers: vec![], - health_checks: vec![], - }; - - let config = Configuration::try_from(plain_config).expect("Invalid plain configuration"); - - assert_eq!(config.udp_trackers[0], "udp://127.0.0.1:6969".parse::().unwrap()); - } - - #[test] - fn it_should_allow_using_domains() { - let plain_config = PlainConfiguration { - udp_trackers: vec!["udp://localhost:6969".to_string()], - http_trackers: vec![], - health_checks: vec![], - }; - - let config = Configuration::try_from(plain_config).expect("Invalid plain configuration"); - - assert_eq!(config.udp_trackers[0], "udp://localhost:6969".parse::().unwrap()); - } - - #[test] - fn it_should_allow_the_url_to_have_an_empty_path() { - let plain_config = PlainConfiguration { - udp_trackers: vec!["127.0.0.1:6969/".to_string()], - http_trackers: vec![], - health_checks: vec![], - }; - - let config = Configuration::try_from(plain_config).expect("Invalid plain configuration"); - - assert_eq!(config.udp_trackers[0], "udp://127.0.0.1:6969/".parse::().unwrap()); - } - - #[test] - fn it_should_allow_the_url_to_contain_a_path() { - // This is the common format for UDP tracker URLs: - // udp://domain.com:6969/announce - - let plain_config = PlainConfiguration { - udp_trackers: vec!["127.0.0.1:6969/announce".to_string()], - http_trackers: vec![], - health_checks: vec![], - }; - - let config = Configuration::try_from(plain_config).expect("Invalid plain configuration"); - - assert_eq!( - config.udp_trackers[0], - "udp://127.0.0.1:6969/announce".parse::().unwrap() - ); - } - } - - mod http_trackers { - use crate::console::clients::checker::config::{Configuration, PlainConfiguration, ServiceUrl}; - - #[test] - fn it_should_fail_when_a_tracker_http_url_is_invalid() { - let plain_config = PlainConfiguration { - udp_trackers: vec![], - http_trackers: vec!["invalid URL".to_string()], - health_checks: vec![], - }; - - assert!(Configuration::try_from(plain_config).is_err()); - } - - #[test] - fn it_should_allow_the_url_to_contain_a_path() { - // This is the common format for HTTP tracker URLs: - // http://domain.com:7070/announce - - let plain_config = PlainConfiguration { - udp_trackers: vec![], - http_trackers: vec!["http://127.0.0.1:7070/announce".to_string()], - health_checks: vec![], - }; - - let config = Configuration::try_from(plain_config).expect("Invalid plain configuration"); - - assert_eq!( - config.http_trackers[0], - "http://127.0.0.1:7070/announce".parse::().unwrap() - ); - } - - #[test] - fn it_should_allow_the_url_to_contain_an_empty_path() { - let plain_config = PlainConfiguration { - udp_trackers: vec![], - http_trackers: vec!["http://127.0.0.1:7070/".to_string()], - health_checks: vec![], - }; - - let config = Configuration::try_from(plain_config).expect("Invalid plain configuration"); - - assert_eq!( - config.http_trackers[0], - "http://127.0.0.1:7070/".parse::().unwrap() - ); - } - } - - mod health_checks { - use crate::console::clients::checker::config::{Configuration, PlainConfiguration}; - - #[test] - fn it_should_fail_when_a_health_check_http_url_is_invalid() { - let plain_config = PlainConfiguration { - udp_trackers: vec![], - http_trackers: vec![], - health_checks: vec!["invalid URL".to_string()], - }; - - assert!(Configuration::try_from(plain_config).is_err()); - } - } - } -} diff --git a/src/console/clients/checker/console.rs b/src/console/clients/checker/console.rs deleted file mode 100644 index b55c559fc..000000000 --- a/src/console/clients/checker/console.rs +++ /dev/null @@ -1,38 +0,0 @@ -use super::printer::{Printer, CLEAR_SCREEN}; - -pub struct Console {} - -impl Default for Console { - fn default() -> Self { - Self::new() - } -} - -impl Console { - #[must_use] - pub fn new() -> Self { - Self {} - } -} - -impl Printer for Console { - fn clear(&self) { - self.print(CLEAR_SCREEN); - } - - fn print(&self, output: &str) { - print!("{}", &output); - } - - fn eprint(&self, output: &str) { - eprint!("{}", &output); - } - - fn println(&self, output: &str) { - println!("{}", &output); - } - - fn eprintln(&self, output: &str) { - eprintln!("{}", &output); - } -} diff --git a/src/console/clients/checker/logger.rs b/src/console/clients/checker/logger.rs deleted file mode 100644 index 50e97189f..000000000 --- a/src/console/clients/checker/logger.rs +++ /dev/null @@ -1,72 +0,0 @@ -use std::cell::RefCell; - -use super::printer::{Printer, CLEAR_SCREEN}; - -pub struct Logger { - output: RefCell, -} - -impl Default for Logger { - fn default() -> Self { - Self::new() - } -} - -impl Logger { - #[must_use] - pub fn new() -> Self { - Self { - output: RefCell::new(String::new()), - } - } - - pub fn log(&self) -> String { - self.output.borrow().clone() - } -} - -impl Printer for Logger { - fn clear(&self) { - self.print(CLEAR_SCREEN); - } - - fn print(&self, output: &str) { - *self.output.borrow_mut() = format!("{}{}", self.output.borrow(), &output); - } - - fn eprint(&self, output: &str) { - *self.output.borrow_mut() = format!("{}{}", self.output.borrow(), &output); - } - - fn println(&self, output: &str) { - self.print(&format!("{}/n", &output)); - } - - fn eprintln(&self, output: &str) { - self.eprint(&format!("{}/n", &output)); - } -} - -#[cfg(test)] -mod tests { - use crate::console::clients::checker::logger::Logger; - use crate::console::clients::checker::printer::{Printer, CLEAR_SCREEN}; - - #[test] - fn should_capture_the_clear_screen_command() { - let console_logger = Logger::new(); - - console_logger.clear(); - - assert_eq!(CLEAR_SCREEN, console_logger.log()); - } - - #[test] - fn should_capture_the_print_command_output() { - let console_logger = Logger::new(); - - console_logger.print("OUTPUT"); - - assert_eq!("OUTPUT", console_logger.log()); - } -} diff --git a/src/console/clients/checker/mod.rs b/src/console/clients/checker/mod.rs deleted file mode 100644 index d26a4a686..000000000 --- a/src/console/clients/checker/mod.rs +++ /dev/null @@ -1,7 +0,0 @@ -pub mod app; -pub mod checks; -pub mod config; -pub mod console; -pub mod logger; -pub mod printer; -pub mod service; diff --git a/src/console/clients/checker/printer.rs b/src/console/clients/checker/printer.rs deleted file mode 100644 index d590dfedb..000000000 --- a/src/console/clients/checker/printer.rs +++ /dev/null @@ -1,9 +0,0 @@ -pub const CLEAR_SCREEN: &str = "\x1B[2J\x1B[1;1H"; - -pub trait Printer { - fn clear(&self); - fn print(&self, output: &str); - fn eprint(&self, output: &str); - fn println(&self, output: &str); - fn eprintln(&self, output: &str); -} diff --git a/src/console/clients/checker/service.rs b/src/console/clients/checker/service.rs deleted file mode 100644 index acd312d8c..000000000 --- a/src/console/clients/checker/service.rs +++ /dev/null @@ -1,62 +0,0 @@ -use std::sync::Arc; - -use futures::FutureExt as _; -use serde::Serialize; -use tokio::task::{JoinError, JoinSet}; -use torrust_tracker_configuration::DEFAULT_TIMEOUT; - -use super::checks::{health, http, udp}; -use super::config::Configuration; -use super::console::Console; -use crate::console::clients::checker::printer::Printer; - -pub struct Service { - pub(crate) config: Arc, - pub(crate) console: Console, -} - -#[derive(Debug, Clone, Serialize)] -pub enum CheckResult { - Udp(Result), - Http(Result), - Health(Result), -} - -impl Service { - /// # Errors - /// - /// It will return an error if some of the tests panic or otherwise fail to run. - /// On success it will return a vector of `Ok(())` of [`CheckResult`]. - /// - /// # Panics - /// - /// It would panic if `serde_json` produces invalid json for the `to_string_pretty` function. - pub async fn run_checks(self) -> Result, JoinError> { - tracing::info!("Running checks for trackers ..."); - - let mut check_results = Vec::default(); - - let mut checks = JoinSet::new(); - checks.spawn( - udp::run(self.config.udp_trackers.clone(), DEFAULT_TIMEOUT).map(|mut f| f.drain(..).map(CheckResult::Udp).collect()), - ); - checks.spawn( - http::run(self.config.http_trackers.clone(), DEFAULT_TIMEOUT) - .map(|mut f| f.drain(..).map(CheckResult::Http).collect()), - ); - checks.spawn( - health::run(self.config.health_checks.clone(), DEFAULT_TIMEOUT) - .map(|mut f| f.drain(..).map(CheckResult::Health).collect()), - ); - - while let Some(results) = checks.join_next().await { - check_results.append(&mut results?); - } - - let json_output = serde_json::json!(check_results); - self.console - .println(&serde_json::to_string_pretty(&json_output).expect("it should consume valid json")); - - Ok(check_results) - } -} diff --git a/src/console/clients/http/app.rs b/src/console/clients/http/app.rs deleted file mode 100644 index 6730c027d..000000000 --- a/src/console/clients/http/app.rs +++ /dev/null @@ -1,102 +0,0 @@ -//! HTTP Tracker client: -//! -//! Examples: -//! -//! `Announce` request: -//! -//! ```text -//! cargo run --bin http_tracker_client announce http://127.0.0.1:7070 9c38422213e30bff212b30c360d26f9a02136422 | jq -//! ``` -//! -//! `Scrape` request: -//! -//! ```text -//! cargo run --bin http_tracker_client scrape http://127.0.0.1:7070 9c38422213e30bff212b30c360d26f9a02136422 | jq -//! ``` -use std::str::FromStr; -use std::time::Duration; - -use anyhow::Context; -use bittorrent_primitives::info_hash::InfoHash; -use clap::{Parser, Subcommand}; -use reqwest::Url; -use torrust_tracker_configuration::DEFAULT_TIMEOUT; - -use crate::shared::bit_torrent::tracker::http::client::requests::announce::QueryBuilder; -use crate::shared::bit_torrent::tracker::http::client::responses::announce::Announce; -use crate::shared::bit_torrent::tracker::http::client::responses::scrape; -use crate::shared::bit_torrent::tracker::http::client::{requests, Client}; - -#[derive(Parser, Debug)] -#[command(author, version, about, long_about = None)] -struct Args { - #[command(subcommand)] - command: Command, -} - -#[derive(Subcommand, Debug)] -enum Command { - Announce { tracker_url: String, info_hash: String }, - Scrape { tracker_url: String, info_hashes: Vec }, -} - -/// # Errors -/// -/// Will return an error if the command fails. -pub async fn run() -> anyhow::Result<()> { - let args = Args::parse(); - - match args.command { - Command::Announce { tracker_url, info_hash } => { - announce_command(tracker_url, info_hash, DEFAULT_TIMEOUT).await?; - } - Command::Scrape { - tracker_url, - info_hashes, - } => { - scrape_command(&tracker_url, &info_hashes, DEFAULT_TIMEOUT).await?; - } - } - - Ok(()) -} - -async fn announce_command(tracker_url: String, info_hash: String, timeout: Duration) -> anyhow::Result<()> { - let base_url = Url::parse(&tracker_url).context("failed to parse HTTP tracker base URL")?; - let info_hash = - InfoHash::from_str(&info_hash).expect("Invalid infohash. Example infohash: `9c38422213e30bff212b30c360d26f9a02136422`"); - - let response = Client::new(base_url, timeout)? - .announce(&QueryBuilder::with_default_values().with_info_hash(&info_hash).query()) - .await?; - - let body = response.bytes().await?; - - let announce_response: Announce = serde_bencode::from_bytes(&body) - .unwrap_or_else(|_| panic!("response body should be a valid announce response, got: \"{:#?}\"", &body)); - - let json = serde_json::to_string(&announce_response).context("failed to serialize scrape response into JSON")?; - - println!("{json}"); - - Ok(()) -} - -async fn scrape_command(tracker_url: &str, info_hashes: &[String], timeout: Duration) -> anyhow::Result<()> { - let base_url = Url::parse(tracker_url).context("failed to parse HTTP tracker base URL")?; - - let query = requests::scrape::Query::try_from(info_hashes).context("failed to parse infohashes")?; - - let response = Client::new(base_url, timeout)?.scrape(&query).await?; - - let body = response.bytes().await?; - - let scrape_response = scrape::Response::try_from_bencoded(&body) - .unwrap_or_else(|_| panic!("response body should be a valid scrape response, got: \"{:#?}\"", &body)); - - let json = serde_json::to_string(&scrape_response).context("failed to serialize scrape response into JSON")?; - - println!("{json}"); - - Ok(()) -} diff --git a/src/console/clients/http/mod.rs b/src/console/clients/http/mod.rs deleted file mode 100644 index eaa71957f..000000000 --- a/src/console/clients/http/mod.rs +++ /dev/null @@ -1,36 +0,0 @@ -use std::sync::Arc; - -use serde::Serialize; -use thiserror::Error; - -use crate::shared::bit_torrent::tracker::http::client::responses::scrape::BencodeParseError; - -pub mod app; - -#[derive(Debug, Clone, Error, Serialize)] -#[serde(into = "String")] -pub enum Error { - #[error("Http request did not receive a response within the timeout: {err:?}")] - HttpClientError { - err: crate::shared::bit_torrent::tracker::http::client::Error, - }, - #[error("Http failed to get a response at all: {err:?}")] - ResponseError { err: Arc }, - #[error("Failed to deserialize the bencoded response data with the error: \"{err:?}\"")] - ParseBencodeError { - data: hyper::body::Bytes, - err: Arc, - }, - - #[error("Failed to deserialize the bencoded response data with the error: \"{err:?}\"")] - BencodeParseError { - data: hyper::body::Bytes, - err: Arc, - }, -} - -impl From for String { - fn from(value: Error) -> Self { - value.to_string() - } -} diff --git a/src/console/clients/mod.rs b/src/console/clients/mod.rs deleted file mode 100644 index 8492f8ba5..000000000 --- a/src/console/clients/mod.rs +++ /dev/null @@ -1,4 +0,0 @@ -//! Console clients. -pub mod checker; -pub mod http; -pub mod udp; diff --git a/src/console/clients/udp/app.rs b/src/console/clients/udp/app.rs deleted file mode 100644 index a2736c365..000000000 --- a/src/console/clients/udp/app.rs +++ /dev/null @@ -1,208 +0,0 @@ -//! UDP Tracker client: -//! -//! Examples: -//! -//! Announce request: -//! -//! ```text -//! cargo run --bin udp_tracker_client announce 127.0.0.1:6969 9c38422213e30bff212b30c360d26f9a02136422 | jq -//! ``` -//! -//! Announce response: -//! -//! ```json -//! { -//! "transaction_id": -888840697 -//! "announce_interval": 120, -//! "leechers": 0, -//! "seeders": 1, -//! "peers": [ -//! "123.123.123.123:51289" -//! ], -//! } -//! ``` -//! -//! Scrape request: -//! -//! ```text -//! cargo run --bin udp_tracker_client scrape 127.0.0.1:6969 9c38422213e30bff212b30c360d26f9a02136422 | jq -//! ``` -//! -//! Scrape response: -//! -//! ```json -//! { -//! "transaction_id": -888840697, -//! "torrent_stats": [ -//! { -//! "completed": 0, -//! "leechers": 0, -//! "seeders": 0 -//! }, -//! { -//! "completed": 0, -//! "leechers": 0, -//! "seeders": 0 -//! } -//! ] -//! } -//! ``` -//! -//! You can use an URL with instead of the socket address. For example: -//! -//! ```text -//! cargo run --bin udp_tracker_client scrape udp://localhost:6969 9c38422213e30bff212b30c360d26f9a02136422 | jq -//! cargo run --bin udp_tracker_client scrape udp://localhost:6969/scrape 9c38422213e30bff212b30c360d26f9a02136422 | jq -//! ``` -//! -//! The protocol (`udp://`) in the URL is mandatory. The path (`\scrape`) is optional. It always uses `\scrape`. -use std::net::{SocketAddr, ToSocketAddrs}; -use std::str::FromStr; - -use anyhow::Context; -use aquatic_udp_protocol::{Response, TransactionId}; -use bittorrent_primitives::info_hash::InfoHash as TorrustInfoHash; -use clap::{Parser, Subcommand}; -use torrust_tracker_configuration::DEFAULT_TIMEOUT; -use tracing::level_filters::LevelFilter; -use url::Url; - -use super::Error; -use crate::console::clients::udp::checker; -use crate::console::clients::udp::responses::dto::SerializableResponse; -use crate::console::clients::udp::responses::json::ToJson; - -const RANDOM_TRANSACTION_ID: i32 = -888_840_697; - -#[derive(Parser, Debug)] -#[command(author, version, about, long_about = None)] -struct Args { - #[command(subcommand)] - command: Command, -} - -#[derive(Subcommand, Debug)] -enum Command { - Announce { - #[arg(value_parser = parse_socket_addr)] - tracker_socket_addr: SocketAddr, - #[arg(value_parser = parse_info_hash)] - info_hash: TorrustInfoHash, - }, - Scrape { - #[arg(value_parser = parse_socket_addr)] - tracker_socket_addr: SocketAddr, - #[arg(value_parser = parse_info_hash, num_args = 1..=74, value_delimiter = ' ')] - info_hashes: Vec, - }, -} - -/// # Errors -/// -/// Will return an error if the command fails. -/// -/// -pub async fn run() -> anyhow::Result<()> { - tracing_stdout_init(LevelFilter::INFO); - - let args = Args::parse(); - - let response = match args.command { - Command::Announce { - tracker_socket_addr: remote_addr, - info_hash, - } => handle_announce(remote_addr, &info_hash).await?, - Command::Scrape { - tracker_socket_addr: remote_addr, - info_hashes, - } => handle_scrape(remote_addr, &info_hashes).await?, - }; - - let response: SerializableResponse = response.into(); - let response_json = response.to_json_string()?; - - print!("{response_json}"); - - Ok(()) -} - -fn tracing_stdout_init(filter: LevelFilter) { - tracing_subscriber::fmt().with_max_level(filter).init(); - tracing::debug!("Logging initialized"); -} - -async fn handle_announce(remote_addr: SocketAddr, info_hash: &TorrustInfoHash) -> Result { - let transaction_id = TransactionId::new(RANDOM_TRANSACTION_ID); - - let client = checker::Client::new(remote_addr, DEFAULT_TIMEOUT).await?; - - let connection_id = client.send_connection_request(transaction_id).await?; - - client.send_announce_request(transaction_id, connection_id, *info_hash).await -} - -async fn handle_scrape(remote_addr: SocketAddr, info_hashes: &[TorrustInfoHash]) -> Result { - let transaction_id = TransactionId::new(RANDOM_TRANSACTION_ID); - - let client = checker::Client::new(remote_addr, DEFAULT_TIMEOUT).await?; - - let connection_id = client.send_connection_request(transaction_id).await?; - - client.send_scrape_request(connection_id, transaction_id, info_hashes).await -} - -fn parse_socket_addr(tracker_socket_addr_str: &str) -> anyhow::Result { - tracing::debug!("Tracker socket address: {tracker_socket_addr_str:#?}"); - - // Check if the address is a valid URL. If so, extract the host and port. - let resolved_addr = if let Ok(url) = Url::parse(tracker_socket_addr_str) { - tracing::debug!("Tracker socket address URL: {url:?}"); - - let host = url - .host_str() - .with_context(|| format!("invalid host in URL: `{tracker_socket_addr_str}`"))? - .to_owned(); - - let port = url - .port() - .with_context(|| format!("port not found in URL: `{tracker_socket_addr_str}`"))? - .to_owned(); - - (host, port) - } else { - // If not a URL, assume it's a host:port pair. - - let parts: Vec<&str> = tracker_socket_addr_str.split(':').collect(); - - if parts.len() != 2 { - return Err(anyhow::anyhow!( - "invalid address format: `{}`. Expected format is host:port", - tracker_socket_addr_str - )); - } - - let host = parts[0].to_owned(); - - let port = parts[1] - .parse::() - .with_context(|| format!("invalid port: `{}`", parts[1]))? - .to_owned(); - - (host, port) - }; - - tracing::debug!("Resolved address: {resolved_addr:#?}"); - - // Perform DNS resolution. - let socket_addrs: Vec<_> = resolved_addr.to_socket_addrs()?.collect(); - if socket_addrs.is_empty() { - Err(anyhow::anyhow!("DNS resolution failed for `{}`", tracker_socket_addr_str)) - } else { - Ok(socket_addrs[0]) - } -} - -fn parse_info_hash(info_hash_str: &str) -> anyhow::Result { - TorrustInfoHash::from_str(info_hash_str) - .map_err(|e| anyhow::Error::msg(format!("failed to parse info-hash `{info_hash_str}`: {e:?}"))) -} diff --git a/src/console/clients/udp/checker.rs b/src/console/clients/udp/checker.rs deleted file mode 100644 index 14e94c132..000000000 --- a/src/console/clients/udp/checker.rs +++ /dev/null @@ -1,177 +0,0 @@ -use std::net::{Ipv4Addr, SocketAddr}; -use std::num::NonZeroU16; -use std::time::Duration; - -use aquatic_udp_protocol::common::InfoHash; -use aquatic_udp_protocol::{ - AnnounceActionPlaceholder, AnnounceEvent, AnnounceRequest, ConnectRequest, ConnectionId, NumberOfBytes, NumberOfPeers, - PeerId, PeerKey, Port, Response, ScrapeRequest, TransactionId, -}; -use bittorrent_primitives::info_hash::InfoHash as TorrustInfoHash; - -use super::Error; -use crate::shared::bit_torrent::tracker::udp::client::UdpTrackerClient; - -/// A UDP Tracker client to make test requests (checks). -#[derive(Debug)] -pub struct Client { - client: UdpTrackerClient, -} - -impl Client { - /// Creates a new `[Client]` for checking a UDP Tracker Service - /// - /// # Errors - /// - /// It will error if unable to bind and connect to the udp remote address. - /// - pub async fn new(remote_addr: SocketAddr, timeout: Duration) -> Result { - let client = UdpTrackerClient::new(remote_addr, timeout) - .await - .map_err(|err| Error::UnableToBindAndConnect { remote_addr, err })?; - - Ok(Self { client }) - } - - /// Returns the local addr of this [`Client`]. - /// - /// # Errors - /// - /// This function will return an error if the socket is somehow not bound. - pub fn local_addr(&self) -> std::io::Result { - self.client.client.socket.local_addr() - } - - /// Sends a connection request to the UDP Tracker server. - /// - /// # Errors - /// - /// Will return and error if - /// - /// - It can't connect to the remote UDP socket. - /// - It can't make a connection request successfully to the remote UDP - /// server (after successfully connecting to the remote UDP socket). - /// - /// # Panics - /// - /// Will panic if it receives an unexpected response. - pub async fn send_connection_request(&self, transaction_id: TransactionId) -> Result { - tracing::debug!("Sending connection request with transaction id: {transaction_id:#?}"); - - let connect_request = ConnectRequest { transaction_id }; - - let _ = self - .client - .send(connect_request.into()) - .await - .map_err(|err| Error::UnableToSendConnectionRequest { err })?; - - let response = self - .client - .receive() - .await - .map_err(|err| Error::UnableToReceiveConnectResponse { err })?; - - match response { - Response::Connect(connect_response) => Ok(connect_response.connection_id), - _ => Err(Error::UnexpectedConnectionResponse { response }), - } - } - - /// Sends an announce request to the UDP Tracker server. - /// - /// # Errors - /// - /// Will return and error if the client is not connected. You have to connect - /// before calling this function. - /// - /// # Panics - /// - /// It will panic if the `local_address` has a zero port. - pub async fn send_announce_request( - &self, - transaction_id: TransactionId, - connection_id: ConnectionId, - info_hash: TorrustInfoHash, - ) -> Result { - tracing::debug!("Sending announce request with transaction id: {transaction_id:#?}"); - - let port = NonZeroU16::new( - self.client - .client - .socket - .local_addr() - .expect("it should get the local address") - .port(), - ) - .expect("it should no be zero"); - - let announce_request = AnnounceRequest { - connection_id, - action_placeholder: AnnounceActionPlaceholder::default(), - transaction_id, - info_hash: InfoHash(info_hash.bytes()), - peer_id: PeerId(*b"-qB00000000000000001"), - bytes_downloaded: NumberOfBytes(0i64.into()), - bytes_uploaded: NumberOfBytes(0i64.into()), - bytes_left: NumberOfBytes(0i64.into()), - event: AnnounceEvent::Started.into(), - ip_address: Ipv4Addr::new(0, 0, 0, 0).into(), - key: PeerKey::new(0i32), - peers_wanted: NumberOfPeers(1i32.into()), - port: Port::new(port), - }; - - let _ = self - .client - .send(announce_request.into()) - .await - .map_err(|err| Error::UnableToSendAnnounceRequest { err })?; - - let response = self - .client - .receive() - .await - .map_err(|err| Error::UnableToReceiveAnnounceResponse { err })?; - - Ok(response) - } - - /// Sends a scrape request to the UDP Tracker server. - /// - /// # Errors - /// - /// Will return and error if the client is not connected. You have to connect - /// before calling this function. - pub async fn send_scrape_request( - &self, - connection_id: ConnectionId, - transaction_id: TransactionId, - info_hashes: &[TorrustInfoHash], - ) -> Result { - tracing::debug!("Sending scrape request with transaction id: {transaction_id:#?}"); - - let scrape_request = ScrapeRequest { - connection_id, - transaction_id, - info_hashes: info_hashes - .iter() - .map(|torrust_info_hash| InfoHash(torrust_info_hash.bytes())) - .collect(), - }; - - let _ = self - .client - .send(scrape_request.into()) - .await - .map_err(|err| Error::UnableToSendScrapeRequest { err })?; - - let response = self - .client - .receive() - .await - .map_err(|err| Error::UnableToReceiveScrapeResponse { err })?; - - Ok(response) - } -} diff --git a/src/console/clients/udp/mod.rs b/src/console/clients/udp/mod.rs deleted file mode 100644 index b92bed096..000000000 --- a/src/console/clients/udp/mod.rs +++ /dev/null @@ -1,51 +0,0 @@ -use std::net::SocketAddr; - -use aquatic_udp_protocol::Response; -use serde::Serialize; -use thiserror::Error; - -use crate::shared::bit_torrent::tracker::udp; - -pub mod app; -pub mod checker; -pub mod responses; - -#[derive(Error, Debug, Clone, Serialize)] -#[serde(into = "String")] -pub enum Error { - #[error("Failed to Connect to: {remote_addr}, with error: {err}")] - UnableToBindAndConnect { remote_addr: SocketAddr, err: udp::Error }, - - #[error("Failed to send a connection request, with error: {err}")] - UnableToSendConnectionRequest { err: udp::Error }, - - #[error("Failed to receive a connect response, with error: {err}")] - UnableToReceiveConnectResponse { err: udp::Error }, - - #[error("Failed to send a announce request, with error: {err}")] - UnableToSendAnnounceRequest { err: udp::Error }, - - #[error("Failed to receive a announce response, with error: {err}")] - UnableToReceiveAnnounceResponse { err: udp::Error }, - - #[error("Failed to send a scrape request, with error: {err}")] - UnableToSendScrapeRequest { err: udp::Error }, - - #[error("Failed to receive a scrape response, with error: {err}")] - UnableToReceiveScrapeResponse { err: udp::Error }, - - #[error("Failed to receive a response, with error: {err}")] - UnableToReceiveResponse { err: udp::Error }, - - #[error("Failed to get local address for connection: {err}")] - UnableToGetLocalAddr { err: udp::Error }, - - #[error("Failed to get a connection response: {response:?}")] - UnexpectedConnectionResponse { response: Response }, -} - -impl From for String { - fn from(value: Error) -> Self { - value.to_string() - } -} diff --git a/src/console/clients/udp/responses/dto.rs b/src/console/clients/udp/responses/dto.rs deleted file mode 100644 index 93320b0f7..000000000 --- a/src/console/clients/udp/responses/dto.rs +++ /dev/null @@ -1,128 +0,0 @@ -//! Aquatic responses are not serializable. These are the serializable wrappers. -use std::net::{Ipv4Addr, Ipv6Addr}; - -use aquatic_udp_protocol::Response::{self}; -use aquatic_udp_protocol::{AnnounceResponse, ConnectResponse, ErrorResponse, Ipv4AddrBytes, Ipv6AddrBytes, ScrapeResponse}; -use serde::Serialize; - -#[derive(Serialize)] -pub enum SerializableResponse { - Connect(ConnectSerializableResponse), - AnnounceIpv4(AnnounceSerializableResponse), - AnnounceIpv6(AnnounceSerializableResponse), - Scrape(ScrapeSerializableResponse), - Error(ErrorSerializableResponse), -} - -impl From for SerializableResponse { - fn from(response: Response) -> Self { - match response { - Response::Connect(response) => SerializableResponse::Connect(ConnectSerializableResponse::from(response)), - Response::AnnounceIpv4(response) => SerializableResponse::AnnounceIpv4(AnnounceSerializableResponse::from(response)), - Response::AnnounceIpv6(response) => SerializableResponse::AnnounceIpv6(AnnounceSerializableResponse::from(response)), - Response::Scrape(response) => SerializableResponse::Scrape(ScrapeSerializableResponse::from(response)), - Response::Error(response) => SerializableResponse::Error(ErrorSerializableResponse::from(response)), - } - } -} - -#[derive(Serialize)] -pub struct ConnectSerializableResponse { - transaction_id: i32, - connection_id: i64, -} - -impl From for ConnectSerializableResponse { - fn from(connect: ConnectResponse) -> Self { - Self { - transaction_id: connect.transaction_id.0.into(), - connection_id: connect.connection_id.0.into(), - } - } -} - -#[derive(Serialize)] -pub struct AnnounceSerializableResponse { - transaction_id: i32, - announce_interval: i32, - leechers: i32, - seeders: i32, - peers: Vec, -} - -impl From> for AnnounceSerializableResponse { - fn from(announce: AnnounceResponse) -> Self { - Self { - transaction_id: announce.fixed.transaction_id.0.into(), - announce_interval: announce.fixed.announce_interval.0.into(), - leechers: announce.fixed.leechers.0.into(), - seeders: announce.fixed.seeders.0.into(), - peers: announce - .peers - .iter() - .map(|peer| format!("{}:{}", Ipv4Addr::from(peer.ip_address), peer.port.0)) - .collect::>(), - } - } -} - -impl From> for AnnounceSerializableResponse { - fn from(announce: AnnounceResponse) -> Self { - Self { - transaction_id: announce.fixed.transaction_id.0.into(), - announce_interval: announce.fixed.announce_interval.0.into(), - leechers: announce.fixed.leechers.0.into(), - seeders: announce.fixed.seeders.0.into(), - peers: announce - .peers - .iter() - .map(|peer| format!("{}:{}", Ipv6Addr::from(peer.ip_address), peer.port.0)) - .collect::>(), - } - } -} - -#[derive(Serialize)] -pub struct ScrapeSerializableResponse { - transaction_id: i32, - torrent_stats: Vec, -} - -impl From for ScrapeSerializableResponse { - fn from(scrape: ScrapeResponse) -> Self { - Self { - transaction_id: scrape.transaction_id.0.into(), - torrent_stats: scrape - .torrent_stats - .iter() - .map(|torrent_scrape_statistics| TorrentStats { - seeders: torrent_scrape_statistics.seeders.0.into(), - completed: torrent_scrape_statistics.completed.0.into(), - leechers: torrent_scrape_statistics.leechers.0.into(), - }) - .collect::>(), - } - } -} - -#[derive(Serialize)] -pub struct ErrorSerializableResponse { - transaction_id: i32, - message: String, -} - -impl From for ErrorSerializableResponse { - fn from(error: ErrorResponse) -> Self { - Self { - transaction_id: error.transaction_id.0.into(), - message: error.message.to_string(), - } - } -} - -#[derive(Serialize)] -struct TorrentStats { - seeders: i32, - completed: i32, - leechers: i32, -} diff --git a/src/console/clients/udp/responses/json.rs b/src/console/clients/udp/responses/json.rs deleted file mode 100644 index 5d2bd6b89..000000000 --- a/src/console/clients/udp/responses/json.rs +++ /dev/null @@ -1,25 +0,0 @@ -use anyhow::Context; -use serde::Serialize; - -use super::dto::SerializableResponse; - -#[allow(clippy::module_name_repetitions)] -pub trait ToJson { - /// - /// Returns a string with the JSON serialized version of the response - /// - /// # Errors - /// - /// Will return an error if serialization fails. - /// - fn to_json_string(&self) -> anyhow::Result - where - Self: Serialize, - { - let pretty_json = serde_json::to_string_pretty(self).context("response JSON serialization")?; - - Ok(pretty_json) - } -} - -impl ToJson for SerializableResponse {} diff --git a/src/console/clients/udp/responses/mod.rs b/src/console/clients/udp/responses/mod.rs deleted file mode 100644 index e6d2e5e51..000000000 --- a/src/console/clients/udp/responses/mod.rs +++ /dev/null @@ -1,2 +0,0 @@ -pub mod dto; -pub mod json; diff --git a/src/console/mod.rs b/src/console/mod.rs index dab338e4b..0e0da3fa2 100644 --- a/src/console/mod.rs +++ b/src/console/mod.rs @@ -1,4 +1,3 @@ //! Console apps. pub mod ci; -pub mod clients; pub mod profiling; diff --git a/src/servers/udp/server/launcher.rs b/src/servers/udp/server/launcher.rs index c9ad213f6..7f31d7739 100644 --- a/src/servers/udp/server/launcher.rs +++ b/src/servers/udp/server/launcher.rs @@ -2,6 +2,7 @@ use std::net::SocketAddr; use std::sync::Arc; use std::time::Duration; +use bittorrent_tracker_client::udp::client::check; use derive_more::Constructor; use futures_util::StreamExt; use tokio::select; @@ -18,7 +19,6 @@ use crate::servers::udp::server::bound_socket::BoundSocket; use crate::servers::udp::server::processor::Processor; use crate::servers::udp::server::receiver::Receiver; use crate::servers::udp::UDP_TRACKER_LOG_TARGET; -use crate::shared::bit_torrent::tracker::udp::client::check; /// A UDP server instance launcher. #[derive(Constructor)] diff --git a/src/shared/bit_torrent/tracker/http/client/mod.rs b/src/shared/bit_torrent/tracker/http/client/mod.rs deleted file mode 100644 index 4c70cd68b..000000000 --- a/src/shared/bit_torrent/tracker/http/client/mod.rs +++ /dev/null @@ -1,204 +0,0 @@ -pub mod requests; -pub mod responses; - -use std::net::IpAddr; -use std::sync::Arc; -use std::time::Duration; - -use hyper::StatusCode; -use requests::{announce, scrape}; -use reqwest::{Response, Url}; -use thiserror::Error; - -use crate::core::auth::Key; - -#[derive(Debug, Clone, Error)] -pub enum Error { - #[error("Failed to Build a Http Client: {err:?}")] - ClientBuildingError { err: Arc }, - #[error("Failed to get a response: {err:?}")] - ResponseError { err: Arc }, - #[error("Returned a non-success code: \"{code}\" with the response: \"{response:?}\"")] - UnsuccessfulResponse { code: StatusCode, response: Arc }, -} - -/// HTTP Tracker Client -pub struct Client { - client: reqwest::Client, - base_url: Url, - key: Option, -} - -/// URL components in this context: -/// -/// ```text -/// http://127.0.0.1:62304/announce/YZ....rJ?info_hash=%9C8B%22%13%E3%0B%FF%21%2B0%C3%60%D2o%9A%02%13d%22 -/// \_____________________/\_______________/ \__________________________________________________________/ -/// | | | -/// base url path query -/// ``` -impl Client { - /// # Errors - /// - /// This method fails if the client builder fails. - pub fn new(base_url: Url, timeout: Duration) -> Result { - let client = reqwest::Client::builder() - .timeout(timeout) - .build() - .map_err(|e| Error::ClientBuildingError { err: e.into() })?; - - Ok(Self { - base_url, - client, - key: None, - }) - } - - /// Creates the new client binding it to an specific local address. - /// - /// # Errors - /// - /// This method fails if the client builder fails. - pub fn bind(base_url: Url, timeout: Duration, local_address: IpAddr) -> Result { - let client = reqwest::Client::builder() - .timeout(timeout) - .local_address(local_address) - .build() - .map_err(|e| Error::ClientBuildingError { err: e.into() })?; - - Ok(Self { - base_url, - client, - key: None, - }) - } - - /// # Errors - /// - /// This method fails if the client builder fails. - pub fn authenticated(base_url: Url, timeout: Duration, key: Key) -> Result { - let client = reqwest::Client::builder() - .timeout(timeout) - .build() - .map_err(|e| Error::ClientBuildingError { err: e.into() })?; - - Ok(Self { - base_url, - client, - key: Some(key), - }) - } - - /// # Errors - /// - /// This method fails if the returned response was not successful - pub async fn announce(&self, query: &announce::Query) -> Result { - let response = self.get(&self.build_announce_path_and_query(query)).await?; - - if response.status().is_success() { - Ok(response) - } else { - Err(Error::UnsuccessfulResponse { - code: response.status(), - response: response.into(), - }) - } - } - - /// # Errors - /// - /// This method fails if the returned response was not successful - pub async fn scrape(&self, query: &scrape::Query) -> Result { - let response = self.get(&self.build_scrape_path_and_query(query)).await?; - - if response.status().is_success() { - Ok(response) - } else { - Err(Error::UnsuccessfulResponse { - code: response.status(), - response: response.into(), - }) - } - } - - /// # Errors - /// - /// This method fails if the returned response was not successful - pub async fn announce_with_header(&self, query: &announce::Query, key: &str, value: &str) -> Result { - let response = self - .get_with_header(&self.build_announce_path_and_query(query), key, value) - .await?; - - if response.status().is_success() { - Ok(response) - } else { - Err(Error::UnsuccessfulResponse { - code: response.status(), - response: response.into(), - }) - } - } - - /// # Errors - /// - /// This method fails if the returned response was not successful - pub async fn health_check(&self) -> Result { - let response = self.get(&self.build_path("health_check")).await?; - - if response.status().is_success() { - Ok(response) - } else { - Err(Error::UnsuccessfulResponse { - code: response.status(), - response: response.into(), - }) - } - } - - /// # Errors - /// - /// This method fails if there was an error while sending request. - pub async fn get(&self, path: &str) -> Result { - self.client - .get(self.build_url(path)) - .send() - .await - .map_err(|e| Error::ResponseError { err: e.into() }) - } - - /// # Errors - /// - /// This method fails if there was an error while sending request. - pub async fn get_with_header(&self, path: &str, key: &str, value: &str) -> Result { - self.client - .get(self.build_url(path)) - .header(key, value) - .send() - .await - .map_err(|e| Error::ResponseError { err: e.into() }) - } - - fn build_announce_path_and_query(&self, query: &announce::Query) -> String { - format!("{}?{query}", self.build_path("announce")) - } - - fn build_scrape_path_and_query(&self, query: &scrape::Query) -> String { - format!("{}?{query}", self.build_path("scrape")) - } - - fn build_path(&self, path: &str) -> String { - match &self.key { - Some(key) => format!("{path}/{key}"), - None => path.to_string(), - } - } - - fn build_url(&self, path: &str) -> String { - let base_url = self.base_url(); - format!("{base_url}{path}") - } - - fn base_url(&self) -> String { - self.base_url.to_string() - } -} diff --git a/src/shared/bit_torrent/tracker/http/client/requests/announce.rs b/src/shared/bit_torrent/tracker/http/client/requests/announce.rs deleted file mode 100644 index f3ce327ea..000000000 --- a/src/shared/bit_torrent/tracker/http/client/requests/announce.rs +++ /dev/null @@ -1,275 +0,0 @@ -use std::fmt; -use std::net::{IpAddr, Ipv4Addr}; -use std::str::FromStr; - -use aquatic_udp_protocol::PeerId; -use bittorrent_primitives::info_hash::InfoHash; -use serde_repr::Serialize_repr; - -use crate::shared::bit_torrent::tracker::http::{percent_encode_byte_array, ByteArray20}; - -pub struct Query { - pub info_hash: ByteArray20, - pub peer_addr: IpAddr, - pub downloaded: BaseTenASCII, - pub uploaded: BaseTenASCII, - pub peer_id: ByteArray20, - pub port: PortNumber, - pub left: BaseTenASCII, - pub event: Option, - pub compact: Option, -} - -impl fmt::Display for Query { - fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result { - write!(f, "{}", self.build()) - } -} - -/// HTTP Tracker Announce Request: -/// -/// -/// -/// Some parameters in the specification are not implemented in this tracker yet. -impl Query { - /// It builds the URL query component for the announce request. - /// - /// This custom URL query params encoding is needed because `reqwest` does not allow - /// bytes arrays in query parameters. More info on this issue: - /// - /// - #[must_use] - pub fn build(&self) -> String { - self.params().to_string() - } - - #[must_use] - pub fn params(&self) -> QueryParams { - QueryParams::from(self) - } -} - -pub type BaseTenASCII = u64; -pub type PortNumber = u16; - -pub enum Event { - //Started, - //Stopped, - Completed, -} - -impl fmt::Display for Event { - fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result { - match self { - //Event::Started => write!(f, "started"), - //Event::Stopped => write!(f, "stopped"), - Event::Completed => write!(f, "completed"), - } - } -} - -#[derive(Serialize_repr, PartialEq, Debug)] -#[repr(u8)] -pub enum Compact { - Accepted = 1, - NotAccepted = 0, -} - -impl fmt::Display for Compact { - fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result { - match self { - Compact::Accepted => write!(f, "1"), - Compact::NotAccepted => write!(f, "0"), - } - } -} - -pub struct QueryBuilder { - announce_query: Query, -} - -impl QueryBuilder { - /// # Panics - /// - /// Will panic if the default info-hash value is not a valid info-hash. - #[must_use] - pub fn with_default_values() -> QueryBuilder { - let default_announce_query = Query { - info_hash: InfoHash::from_str("9c38422213e30bff212b30c360d26f9a02136422").unwrap().0, // # DevSkim: ignore DS173237 - peer_addr: IpAddr::V4(Ipv4Addr::new(192, 168, 1, 88)), - downloaded: 0, - uploaded: 0, - peer_id: PeerId(*b"-qB00000000000000001").0, - port: 17548, - left: 0, - event: Some(Event::Completed), - compact: Some(Compact::NotAccepted), - }; - Self { - announce_query: default_announce_query, - } - } - - #[must_use] - pub fn with_info_hash(mut self, info_hash: &InfoHash) -> Self { - self.announce_query.info_hash = info_hash.0; - self - } - - #[must_use] - pub fn with_peer_id(mut self, peer_id: &PeerId) -> Self { - self.announce_query.peer_id = peer_id.0; - self - } - - #[must_use] - pub fn with_compact(mut self, compact: Compact) -> Self { - self.announce_query.compact = Some(compact); - self - } - - #[must_use] - pub fn with_peer_addr(mut self, peer_addr: &IpAddr) -> Self { - self.announce_query.peer_addr = *peer_addr; - self - } - - #[must_use] - pub fn without_compact(mut self) -> Self { - self.announce_query.compact = None; - self - } - - #[must_use] - pub fn query(self) -> Query { - self.announce_query - } -} - -/// It contains all the GET parameters that can be used in a HTTP Announce request. -/// -/// Sample Announce URL with all the GET parameters (mandatory and optional): -/// -/// ```text -/// http://127.0.0.1:7070/announce? -/// info_hash=%9C8B%22%13%E3%0B%FF%21%2B0%C3%60%D2o%9A%02%13d%22 (mandatory) -/// peer_addr=192.168.1.88 -/// downloaded=0 -/// uploaded=0 -/// peer_id=%2DqB00000000000000000 (mandatory) -/// port=17548 (mandatory) -/// left=0 -/// event=completed -/// compact=0 -/// ``` -pub struct QueryParams { - pub info_hash: Option, - pub peer_addr: Option, - pub downloaded: Option, - pub uploaded: Option, - pub peer_id: Option, - pub port: Option, - pub left: Option, - pub event: Option, - pub compact: Option, -} - -impl std::fmt::Display for QueryParams { - fn fmt(&self, f: &mut std::fmt::Formatter<'_>) -> std::fmt::Result { - let mut params = vec![]; - - if let Some(info_hash) = &self.info_hash { - params.push(("info_hash", info_hash)); - } - if let Some(peer_addr) = &self.peer_addr { - params.push(("peer_addr", peer_addr)); - } - if let Some(downloaded) = &self.downloaded { - params.push(("downloaded", downloaded)); - } - if let Some(uploaded) = &self.uploaded { - params.push(("uploaded", uploaded)); - } - if let Some(peer_id) = &self.peer_id { - params.push(("peer_id", peer_id)); - } - if let Some(port) = &self.port { - params.push(("port", port)); - } - if let Some(left) = &self.left { - params.push(("left", left)); - } - if let Some(event) = &self.event { - params.push(("event", event)); - } - if let Some(compact) = &self.compact { - params.push(("compact", compact)); - } - - let query = params - .iter() - .map(|param| format!("{}={}", param.0, param.1)) - .collect::>() - .join("&"); - - write!(f, "{query}") - } -} - -impl QueryParams { - pub fn from(announce_query: &Query) -> Self { - let event = announce_query.event.as_ref().map(std::string::ToString::to_string); - let compact = announce_query.compact.as_ref().map(std::string::ToString::to_string); - - Self { - info_hash: Some(percent_encode_byte_array(&announce_query.info_hash)), - peer_addr: Some(announce_query.peer_addr.to_string()), - downloaded: Some(announce_query.downloaded.to_string()), - uploaded: Some(announce_query.uploaded.to_string()), - peer_id: Some(percent_encode_byte_array(&announce_query.peer_id)), - port: Some(announce_query.port.to_string()), - left: Some(announce_query.left.to_string()), - event, - compact, - } - } - - pub fn remove_optional_params(&mut self) { - // todo: make them optional with the Option<...> in the AnnounceQuery struct - // if they are really optional. So that we can crete a minimal AnnounceQuery - // instead of removing the optional params afterwards. - // - // The original specification on: - // - // says only `ip` and `event` are optional. - // - // On - // says only `ip`, `numwant`, `key` and `trackerid` are optional. - // - // but the server is responding if all these params are not included. - self.peer_addr = None; - self.downloaded = None; - self.uploaded = None; - self.left = None; - self.event = None; - self.compact = None; - } - - /// # Panics - /// - /// Will panic if invalid param name is provided. - pub fn set(&mut self, param_name: &str, param_value: &str) { - match param_name { - "info_hash" => self.info_hash = Some(param_value.to_string()), - "peer_addr" => self.peer_addr = Some(param_value.to_string()), - "downloaded" => self.downloaded = Some(param_value.to_string()), - "uploaded" => self.uploaded = Some(param_value.to_string()), - "peer_id" => self.peer_id = Some(param_value.to_string()), - "port" => self.port = Some(param_value.to_string()), - "left" => self.left = Some(param_value.to_string()), - "event" => self.event = Some(param_value.to_string()), - "compact" => self.compact = Some(param_value.to_string()), - &_ => panic!("Invalid param name for announce query"), - } - } -} diff --git a/src/shared/bit_torrent/tracker/http/client/requests/mod.rs b/src/shared/bit_torrent/tracker/http/client/requests/mod.rs deleted file mode 100644 index 776d2dfbf..000000000 --- a/src/shared/bit_torrent/tracker/http/client/requests/mod.rs +++ /dev/null @@ -1,2 +0,0 @@ -pub mod announce; -pub mod scrape; diff --git a/src/shared/bit_torrent/tracker/http/client/requests/scrape.rs b/src/shared/bit_torrent/tracker/http/client/requests/scrape.rs deleted file mode 100644 index 58b9e0dc7..000000000 --- a/src/shared/bit_torrent/tracker/http/client/requests/scrape.rs +++ /dev/null @@ -1,172 +0,0 @@ -use std::error::Error; -use std::fmt::{self}; -use std::str::FromStr; - -use bittorrent_primitives::info_hash::InfoHash; - -use crate::shared::bit_torrent::tracker::http::{percent_encode_byte_array, ByteArray20}; - -pub struct Query { - pub info_hash: Vec, -} - -impl fmt::Display for Query { - fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result { - write!(f, "{}", self.build()) - } -} - -#[derive(Debug)] -#[allow(dead_code)] -pub struct ConversionError(String); - -impl fmt::Display for ConversionError { - fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result { - write!(f, "Invalid infohash: {}", self.0) - } -} - -impl Error for ConversionError {} - -impl TryFrom<&[String]> for Query { - type Error = ConversionError; - - fn try_from(info_hashes: &[String]) -> Result { - let mut validated_info_hashes: Vec = Vec::new(); - - for info_hash in info_hashes { - let validated_info_hash = InfoHash::from_str(info_hash).map_err(|_| ConversionError(info_hash.clone()))?; - validated_info_hashes.push(validated_info_hash.0); - } - - Ok(Self { - info_hash: validated_info_hashes, - }) - } -} - -impl TryFrom> for Query { - type Error = ConversionError; - - fn try_from(info_hashes: Vec) -> Result { - let mut validated_info_hashes: Vec = Vec::new(); - - for info_hash in info_hashes { - let validated_info_hash = InfoHash::from_str(&info_hash).map_err(|_| ConversionError(info_hash.clone()))?; - validated_info_hashes.push(validated_info_hash.0); - } - - Ok(Self { - info_hash: validated_info_hashes, - }) - } -} - -/// HTTP Tracker Scrape Request: -/// -/// -impl Query { - /// It builds the URL query component for the scrape request. - /// - /// This custom URL query params encoding is needed because `reqwest` does not allow - /// bytes arrays in query parameters. More info on this issue: - /// - /// - #[must_use] - pub fn build(&self) -> String { - self.params().to_string() - } - - #[must_use] - pub fn params(&self) -> QueryParams { - QueryParams::from(self) - } -} - -pub struct QueryBuilder { - scrape_query: Query, -} - -impl Default for QueryBuilder { - fn default() -> Self { - let default_scrape_query = Query { - info_hash: [InfoHash::from_str("9c38422213e30bff212b30c360d26f9a02136422").unwrap().0].to_vec(), // # DevSkim: ignore DS173237 - }; - Self { - scrape_query: default_scrape_query, - } - } -} - -impl QueryBuilder { - #[must_use] - pub fn with_one_info_hash(mut self, info_hash: &InfoHash) -> Self { - self.scrape_query.info_hash = [info_hash.0].to_vec(); - self - } - - #[must_use] - pub fn add_info_hash(mut self, info_hash: &InfoHash) -> Self { - self.scrape_query.info_hash.push(info_hash.0); - self - } - - #[must_use] - pub fn query(self) -> Query { - self.scrape_query - } -} - -/// It contains all the GET parameters that can be used in a HTTP Scrape request. -/// -/// The `info_hash` param is the percent encoded of the the 20-byte array info hash. -/// -/// Sample Scrape URL with all the GET parameters: -/// -/// For `IpV4`: -/// -/// ```text -/// http://127.0.0.1:7070/scrape?info_hash=%9C8B%22%13%E3%0B%FF%21%2B0%C3%60%D2o%9A%02%13d%22 -/// ``` -/// -/// For `IpV6`: -/// -/// ```text -/// http://[::1]:7070/scrape?info_hash=%9C8B%22%13%E3%0B%FF%21%2B0%C3%60%D2o%9A%02%13d%22 -/// ``` -/// -/// You can add as many info hashes as you want, just adding the same param again. -pub struct QueryParams { - pub info_hash: Vec, -} - -impl QueryParams { - pub fn set_one_info_hash_param(&mut self, info_hash: &str) { - self.info_hash = vec![info_hash.to_string()]; - } -} - -impl std::fmt::Display for QueryParams { - fn fmt(&self, f: &mut std::fmt::Formatter<'_>) -> std::fmt::Result { - let query = self - .info_hash - .iter() - .map(|info_hash| format!("info_hash={}", &info_hash)) - .collect::>() - .join("&"); - - write!(f, "{query}") - } -} - -impl QueryParams { - pub fn from(scrape_query: &Query) -> Self { - let info_hashes = scrape_query - .info_hash - .iter() - .map(percent_encode_byte_array) - .collect::>(); - - Self { info_hash: info_hashes } - } -} diff --git a/src/shared/bit_torrent/tracker/http/client/responses/announce.rs b/src/shared/bit_torrent/tracker/http/client/responses/announce.rs deleted file mode 100644 index 7f2d3611c..000000000 --- a/src/shared/bit_torrent/tracker/http/client/responses/announce.rs +++ /dev/null @@ -1,126 +0,0 @@ -use std::net::{IpAddr, Ipv4Addr, SocketAddr}; - -use serde::{Deserialize, Serialize}; -use torrust_tracker_primitives::peer; -use zerocopy::AsBytes as _; - -#[derive(Serialize, Deserialize, Debug, PartialEq)] -pub struct Announce { - pub complete: u32, - pub incomplete: u32, - pub interval: u32, - #[serde(rename = "min interval")] - pub min_interval: u32, - pub peers: Vec, // Peers using IPV4 and IPV6 -} - -#[derive(Serialize, Deserialize, Debug, PartialEq)] -pub struct DictionaryPeer { - pub ip: String, - #[serde(rename = "peer id")] - #[serde(with = "serde_bytes")] - pub peer_id: Vec, - pub port: u16, -} - -impl From for DictionaryPeer { - fn from(peer: peer::Peer) -> Self { - DictionaryPeer { - peer_id: peer.peer_id.as_bytes().to_vec(), - ip: peer.peer_addr.ip().to_string(), - port: peer.peer_addr.port(), - } - } -} - -#[derive(Serialize, Deserialize, Debug, PartialEq)] -pub struct DeserializedCompact { - pub complete: u32, - pub incomplete: u32, - pub interval: u32, - #[serde(rename = "min interval")] - pub min_interval: u32, - #[serde(with = "serde_bytes")] - pub peers: Vec, -} - -impl DeserializedCompact { - /// # Errors - /// - /// Will return an error if bytes can't be deserialized. - pub fn from_bytes(bytes: &[u8]) -> Result { - serde_bencode::from_bytes::(bytes) - } -} - -#[derive(Debug, PartialEq)] -pub struct Compact { - // code-review: there could be a way to deserialize this struct directly - // by using serde instead of doing it manually. Or at least using a custom deserializer. - pub complete: u32, - pub incomplete: u32, - pub interval: u32, - pub min_interval: u32, - pub peers: CompactPeerList, -} - -#[derive(Debug, PartialEq)] -pub struct CompactPeerList { - peers: Vec, -} - -impl CompactPeerList { - #[must_use] - pub fn new(peers: Vec) -> Self { - Self { peers } - } -} - -#[derive(Clone, Debug, PartialEq)] -pub struct CompactPeer { - ip: Ipv4Addr, - port: u16, -} - -impl CompactPeer { - /// # Panics - /// - /// Will panic if the provided socket address is a IPv6 IP address. - /// It's not supported for compact peers. - #[must_use] - pub fn new(socket_addr: &SocketAddr) -> Self { - match socket_addr.ip() { - IpAddr::V4(ip) => Self { - ip, - port: socket_addr.port(), - }, - IpAddr::V6(_ip) => panic!("IPV6 is not supported for compact peer"), - } - } - - #[must_use] - pub fn new_from_bytes(bytes: &[u8]) -> Self { - Self { - ip: Ipv4Addr::new(bytes[0], bytes[1], bytes[2], bytes[3]), - port: u16::from_be_bytes([bytes[4], bytes[5]]), - } - } -} - -impl From for Compact { - fn from(compact_announce: DeserializedCompact) -> Self { - let mut peers = vec![]; - - for peer_bytes in compact_announce.peers.chunks_exact(6) { - peers.push(CompactPeer::new_from_bytes(peer_bytes)); - } - - Self { - complete: compact_announce.complete, - incomplete: compact_announce.incomplete, - interval: compact_announce.interval, - min_interval: compact_announce.min_interval, - peers: CompactPeerList::new(peers), - } - } -} diff --git a/src/shared/bit_torrent/tracker/http/client/responses/error.rs b/src/shared/bit_torrent/tracker/http/client/responses/error.rs deleted file mode 100644 index 00befdb54..000000000 --- a/src/shared/bit_torrent/tracker/http/client/responses/error.rs +++ /dev/null @@ -1,7 +0,0 @@ -use serde::{Deserialize, Serialize}; - -#[derive(Serialize, Deserialize, Debug, PartialEq)] -pub struct Error { - #[serde(rename = "failure reason")] - pub failure_reason: String, -} diff --git a/src/shared/bit_torrent/tracker/http/client/responses/mod.rs b/src/shared/bit_torrent/tracker/http/client/responses/mod.rs deleted file mode 100644 index bdc689056..000000000 --- a/src/shared/bit_torrent/tracker/http/client/responses/mod.rs +++ /dev/null @@ -1,3 +0,0 @@ -pub mod announce; -pub mod error; -pub mod scrape; diff --git a/src/shared/bit_torrent/tracker/http/client/responses/scrape.rs b/src/shared/bit_torrent/tracker/http/client/responses/scrape.rs deleted file mode 100644 index 25a2f0a81..000000000 --- a/src/shared/bit_torrent/tracker/http/client/responses/scrape.rs +++ /dev/null @@ -1,230 +0,0 @@ -use std::collections::HashMap; -use std::fmt::Write; -use std::str; - -use serde::ser::SerializeMap; -use serde::{Deserialize, Serialize, Serializer}; -use serde_bencode::value::Value; - -use crate::shared::bit_torrent::tracker::http::{ByteArray20, InfoHash}; - -#[derive(Debug, PartialEq, Default, Deserialize)] -pub struct Response { - pub files: HashMap, -} - -impl Response { - #[must_use] - pub fn with_one_file(info_hash_bytes: ByteArray20, file: File) -> Self { - let mut files: HashMap = HashMap::new(); - files.insert(info_hash_bytes, file); - Self { files } - } - - /// # Errors - /// - /// Will return an error if the deserialized bencoded response can't not be converted into a valid response. - /// - /// # Panics - /// - /// Will panic if it can't deserialize the bencoded response. - pub fn try_from_bencoded(bytes: &[u8]) -> Result { - let scrape_response: DeserializedResponse = - serde_bencode::from_bytes(bytes).expect("provided bytes should be a valid bencoded response"); - Self::try_from(scrape_response) - } -} - -#[derive(Serialize, Deserialize, Debug, PartialEq, Default)] -pub struct File { - pub complete: i64, // The number of active peers that have completed downloading - pub downloaded: i64, // The number of peers that have ever completed downloading - pub incomplete: i64, // The number of active peers that have not completed downloading -} - -impl File { - #[must_use] - pub fn zeroed() -> Self { - Self::default() - } -} - -impl TryFrom for Response { - type Error = BencodeParseError; - - fn try_from(scrape_response: DeserializedResponse) -> Result { - parse_bencoded_response(&scrape_response.files) - } -} - -#[derive(Serialize, Deserialize, Debug, PartialEq)] -struct DeserializedResponse { - pub files: Value, -} - -// Custom serialization for Response -impl Serialize for Response { - fn serialize(&self, serializer: S) -> Result - where - S: Serializer, - { - let mut map = serializer.serialize_map(Some(self.files.len()))?; - for (key, value) in &self.files { - // Convert ByteArray20 key to hex string - let hex_key = byte_array_to_hex_string(key); - map.serialize_entry(&hex_key, value)?; - } - map.end() - } -} - -// Helper function to convert ByteArray20 to hex string -fn byte_array_to_hex_string(byte_array: &ByteArray20) -> String { - let mut hex_string = String::with_capacity(byte_array.len() * 2); - for byte in byte_array { - write!(hex_string, "{byte:02x}").expect("Writing to string should never fail"); - } - hex_string -} - -#[derive(Default)] -pub struct ResponseBuilder { - response: Response, -} - -impl ResponseBuilder { - #[must_use] - pub fn add_file(mut self, info_hash_bytes: ByteArray20, file: File) -> Self { - self.response.files.insert(info_hash_bytes, file); - self - } - - #[must_use] - pub fn build(self) -> Response { - self.response - } -} - -#[derive(Debug)] -pub enum BencodeParseError { - InvalidValueExpectedDict { value: Value }, - InvalidValueExpectedInt { value: Value }, - InvalidFileField { value: Value }, - MissingFileField { field_name: String }, -} - -/// It parses a bencoded scrape response into a `Response` struct. -/// -/// For example: -/// -/// ```text -/// d5:filesd20:xxxxxxxxxxxxxxxxxxxxd8:completei11e10:downloadedi13772e10:incompletei19e -/// 20:yyyyyyyyyyyyyyyyyyyyd8:completei21e10:downloadedi206e10:incompletei20eee -/// ``` -/// -/// Response (JSON encoded for readability): -/// -/// ```text -/// { -/// 'files': { -/// 'xxxxxxxxxxxxxxxxxxxx': {'complete': 11, 'downloaded': 13772, 'incomplete': 19}, -/// 'yyyyyyyyyyyyyyyyyyyy': {'complete': 21, 'downloaded': 206, 'incomplete': 20} -/// } -/// } -fn parse_bencoded_response(value: &Value) -> Result { - let mut files: HashMap = HashMap::new(); - - match value { - Value::Dict(dict) => { - for file_element in dict { - let info_hash_byte_vec = file_element.0; - let file_value = file_element.1; - - let file = parse_bencoded_file(file_value).unwrap(); - - files.insert(InfoHash::new(info_hash_byte_vec).bytes(), file); - } - } - _ => return Err(BencodeParseError::InvalidValueExpectedDict { value: value.clone() }), - } - - Ok(Response { files }) -} - -/// It parses a bencoded dictionary into a `File` struct. -/// -/// For example: -/// -/// -/// ```text -/// d8:completei11e10:downloadedi13772e10:incompletei19ee -/// ``` -/// -/// into: -/// -/// ```text -/// File { -/// complete: 11, -/// downloaded: 13772, -/// incomplete: 19, -/// } -/// ``` -fn parse_bencoded_file(value: &Value) -> Result { - let file = match &value { - Value::Dict(dict) => { - let mut complete = None; - let mut downloaded = None; - let mut incomplete = None; - - for file_field in dict { - let field_name = file_field.0; - - let field_value = match file_field.1 { - Value::Int(number) => Ok(*number), - _ => Err(BencodeParseError::InvalidValueExpectedInt { - value: file_field.1.clone(), - }), - }?; - - if field_name == b"complete" { - complete = Some(field_value); - } else if field_name == b"downloaded" { - downloaded = Some(field_value); - } else if field_name == b"incomplete" { - incomplete = Some(field_value); - } else { - return Err(BencodeParseError::InvalidFileField { - value: file_field.1.clone(), - }); - } - } - - if complete.is_none() { - return Err(BencodeParseError::MissingFileField { - field_name: "complete".to_string(), - }); - } - - if downloaded.is_none() { - return Err(BencodeParseError::MissingFileField { - field_name: "downloaded".to_string(), - }); - } - - if incomplete.is_none() { - return Err(BencodeParseError::MissingFileField { - field_name: "incomplete".to_string(), - }); - } - - File { - complete: complete.unwrap(), - downloaded: downloaded.unwrap(), - incomplete: incomplete.unwrap(), - } - } - _ => return Err(BencodeParseError::InvalidValueExpectedDict { value: value.clone() }), - }; - - Ok(file) -} diff --git a/src/shared/bit_torrent/tracker/http/mod.rs b/src/shared/bit_torrent/tracker/http/mod.rs deleted file mode 100644 index 15723c1b7..000000000 --- a/src/shared/bit_torrent/tracker/http/mod.rs +++ /dev/null @@ -1,26 +0,0 @@ -pub mod client; - -use percent_encoding::NON_ALPHANUMERIC; - -pub type ByteArray20 = [u8; 20]; - -#[must_use] -pub fn percent_encode_byte_array(bytes: &ByteArray20) -> String { - percent_encoding::percent_encode(bytes, NON_ALPHANUMERIC).to_string() -} - -pub struct InfoHash(ByteArray20); - -impl InfoHash { - #[must_use] - pub fn new(vec: &[u8]) -> Self { - let mut byte_array_20: ByteArray20 = Default::default(); - byte_array_20.clone_from_slice(vec); - Self(byte_array_20) - } - - #[must_use] - pub fn bytes(&self) -> ByteArray20 { - self.0 - } -} diff --git a/src/shared/bit_torrent/tracker/mod.rs b/src/shared/bit_torrent/tracker/mod.rs index b08eaa622..7e5aaa137 100644 --- a/src/shared/bit_torrent/tracker/mod.rs +++ b/src/shared/bit_torrent/tracker/mod.rs @@ -1,2 +1 @@ -pub mod http; pub mod udp; diff --git a/src/shared/bit_torrent/tracker/udp/client.rs b/src/shared/bit_torrent/tracker/udp/client.rs deleted file mode 100644 index edb8adc85..000000000 --- a/src/shared/bit_torrent/tracker/udp/client.rs +++ /dev/null @@ -1,270 +0,0 @@ -use core::result::Result::{Err, Ok}; -use std::io::Cursor; -use std::net::{Ipv4Addr, Ipv6Addr, SocketAddr}; -use std::sync::Arc; -use std::time::Duration; - -use aquatic_udp_protocol::{ConnectRequest, Request, Response, TransactionId}; -use tokio::net::UdpSocket; -use tokio::time; -use torrust_tracker_configuration::DEFAULT_TIMEOUT; -use zerocopy::network_endian::I32; - -use super::Error; -use crate::shared::bit_torrent::tracker::udp::MAX_PACKET_SIZE; - -pub const UDP_CLIENT_LOG_TARGET: &str = "UDP CLIENT"; - -#[allow(clippy::module_name_repetitions)] -#[derive(Debug)] -pub struct UdpClient { - /// The socket to connect to - pub socket: Arc, - - /// Timeout for sending and receiving packets - pub timeout: Duration, -} - -impl UdpClient { - /// Creates a new `UdpClient` bound to the default port and ipv6 address - /// - /// # Errors - /// - /// Will return error if unable to bind to any port or ip address. - /// - async fn bound_to_default_ipv4(timeout: Duration) -> Result { - let addr = SocketAddr::new(Ipv4Addr::UNSPECIFIED.into(), 0); - - Self::bound(addr, timeout).await - } - - /// Creates a new `UdpClient` bound to the default port and ipv6 address - /// - /// # Errors - /// - /// Will return error if unable to bind to any port or ip address. - /// - async fn bound_to_default_ipv6(timeout: Duration) -> Result { - let addr = SocketAddr::new(Ipv6Addr::UNSPECIFIED.into(), 0); - - Self::bound(addr, timeout).await - } - - /// Creates a new `UdpClient` connected to a Udp server - /// - /// # Errors - /// - /// Will return any errors present in the call stack - /// - pub async fn connected(remote_addr: SocketAddr, timeout: Duration) -> Result { - let client = if remote_addr.is_ipv4() { - Self::bound_to_default_ipv4(timeout).await? - } else { - Self::bound_to_default_ipv6(timeout).await? - }; - - client.connect(remote_addr).await?; - Ok(client) - } - - /// Creates a `[UdpClient]` bound to a Socket. - /// - /// # Panics - /// - /// Panics if unable to get the `local_addr` of the bound socket. - /// - /// # Errors - /// - /// This function will return an error if the binding takes to long - /// or if there is an underlying OS error. - pub async fn bound(addr: SocketAddr, timeout: Duration) -> Result { - tracing::trace!(target: UDP_CLIENT_LOG_TARGET, "binding to socket: {addr:?} ..."); - - let socket = time::timeout(timeout, UdpSocket::bind(addr)) - .await - .map_err(|_| Error::TimeoutWhileBindingToSocket { addr })? - .map_err(|e| Error::UnableToBindToSocket { err: e.into(), addr })?; - - let addr = socket.local_addr().expect("it should get the local address"); - - tracing::debug!(target: UDP_CLIENT_LOG_TARGET, "bound to socket: {addr:?}."); - - let udp_client = Self { - socket: Arc::new(socket), - timeout, - }; - - Ok(udp_client) - } - - /// # Errors - /// - /// Will return error if can't connect to the socket. - pub async fn connect(&self, remote_addr: SocketAddr) -> Result<(), Error> { - tracing::trace!(target: UDP_CLIENT_LOG_TARGET, "connecting to remote: {remote_addr:?} ..."); - - let () = time::timeout(self.timeout, self.socket.connect(remote_addr)) - .await - .map_err(|_| Error::TimeoutWhileConnectingToRemote { remote_addr })? - .map_err(|e| Error::UnableToConnectToRemote { - err: e.into(), - remote_addr, - })?; - - tracing::debug!(target: UDP_CLIENT_LOG_TARGET, "connected to remote: {remote_addr:?}."); - - Ok(()) - } - - /// # Errors - /// - /// Will return error if: - /// - /// - Can't write to the socket. - /// - Can't send data. - pub async fn send(&self, bytes: &[u8]) -> Result { - tracing::trace!(target: UDP_CLIENT_LOG_TARGET, "sending {bytes:?} ..."); - - let () = time::timeout(self.timeout, self.socket.writable()) - .await - .map_err(|_| Error::TimeoutWaitForWriteableSocket)? - .map_err(|e| Error::UnableToGetWritableSocket { err: e.into() })?; - - let sent_bytes = time::timeout(self.timeout, self.socket.send(bytes)) - .await - .map_err(|_| Error::TimeoutWhileSendingData { data: bytes.to_vec() })? - .map_err(|e| Error::UnableToSendData { - err: e.into(), - data: bytes.to_vec(), - })?; - - tracing::debug!(target: UDP_CLIENT_LOG_TARGET, "sent {sent_bytes} bytes to remote."); - - Ok(sent_bytes) - } - - /// # Errors - /// - /// Will return error if: - /// - /// - Can't read from the socket. - /// - Can't receive data. - /// - /// # Panics - /// - pub async fn receive(&self) -> Result, Error> { - tracing::trace!(target: UDP_CLIENT_LOG_TARGET, "receiving ..."); - - let mut buffer = [0u8; MAX_PACKET_SIZE]; - - let () = time::timeout(self.timeout, self.socket.readable()) - .await - .map_err(|_| Error::TimeoutWaitForReadableSocket)? - .map_err(|e| Error::UnableToGetReadableSocket { err: e.into() })?; - - let received_bytes = time::timeout(self.timeout, self.socket.recv(&mut buffer)) - .await - .map_err(|_| Error::TimeoutWhileReceivingData)? - .map_err(|e| Error::UnableToReceivingData { err: e.into() })?; - - let mut received: Vec = buffer.to_vec(); - Vec::truncate(&mut received, received_bytes); - - tracing::debug!(target: UDP_CLIENT_LOG_TARGET, "received {received_bytes} bytes: {received:?}"); - - Ok(received) - } -} - -#[allow(clippy::module_name_repetitions)] -#[derive(Debug)] -pub struct UdpTrackerClient { - pub client: UdpClient, -} - -impl UdpTrackerClient { - /// Creates a new `UdpTrackerClient` connected to a Udp Tracker server - /// - /// # Errors - /// - /// If unable to connect to the remote address. - /// - pub async fn new(remote_addr: SocketAddr, timeout: Duration) -> Result { - let client = UdpClient::connected(remote_addr, timeout).await?; - Ok(UdpTrackerClient { client }) - } - - /// # Errors - /// - /// Will return error if can't write request to bytes. - pub async fn send(&self, request: Request) -> Result { - tracing::trace!(target: UDP_CLIENT_LOG_TARGET, "sending request {request:?} ..."); - - // Write request into a buffer - // todo: optimize the pre-allocated amount based upon request type. - let mut writer = Cursor::new(Vec::with_capacity(200)); - let () = request - .write_bytes(&mut writer) - .map_err(|e| Error::UnableToWriteDataFromRequest { err: e.into(), request })?; - - self.client.send(writer.get_ref()).await - } - - /// # Errors - /// - /// Will return error if can't create response from the received payload (bytes buffer). - pub async fn receive(&self) -> Result { - let response = self.client.receive().await?; - - tracing::debug!(target: UDP_CLIENT_LOG_TARGET, "received {} bytes: {response:?}", response.len()); - - Response::parse_bytes(&response, true).map_err(|e| Error::UnableToParseResponse { err: e.into(), response }) - } -} - -/// Helper Function to Check if a UDP Service is Connectable -/// -/// # Panics -/// -/// It will return an error if unable to connect to the UDP service. -/// -/// # Errors -/// -pub async fn check(remote_addr: &SocketAddr) -> Result { - tracing::debug!("Checking Service (detail): {remote_addr:?}."); - - match UdpTrackerClient::new(*remote_addr, DEFAULT_TIMEOUT).await { - Ok(client) => { - let connect_request = ConnectRequest { - transaction_id: TransactionId(I32::new(123)), - }; - - // client.send() return usize, but doesn't use here - match client.send(connect_request.into()).await { - Ok(_) => (), - Err(e) => tracing::debug!("Error: {e:?}."), - }; - - let process = move |response| { - if matches!(response, Response::Connect(_connect_response)) { - Ok("Connected".to_string()) - } else { - Err("Did not Connect".to_string()) - } - }; - - let sleep = time::sleep(Duration::from_millis(2000)); - tokio::pin!(sleep); - - tokio::select! { - () = &mut sleep => { - Err("Timed Out".to_string()) - } - response = client.receive() => { - process(response.unwrap()) - } - } - } - Err(e) => Err(format!("{e:?}")), - } -} diff --git a/src/shared/bit_torrent/tracker/udp/mod.rs b/src/shared/bit_torrent/tracker/udp/mod.rs index b9d5f34f6..1ceb8a08b 100644 --- a/src/shared/bit_torrent/tracker/udp/mod.rs +++ b/src/shared/bit_torrent/tracker/udp/mod.rs @@ -1,68 +1,6 @@ -use std::net::SocketAddr; -use std::sync::Arc; - -use aquatic_udp_protocol::Request; -use thiserror::Error; -use torrust_tracker_located_error::DynError; - -pub mod client; - /// The maximum number of bytes in a UDP packet. pub const MAX_PACKET_SIZE: usize = 1496; + /// A magic 64-bit integer constant defined in the protocol that is used to /// identify the protocol. pub const PROTOCOL_ID: i64 = 0x0417_2710_1980; - -#[derive(Debug, Clone, Error)] -pub enum Error { - #[error("Timeout while waiting for socket to bind: {addr:?}")] - TimeoutWhileBindingToSocket { addr: SocketAddr }, - - #[error("Failed to bind to socket: {addr:?}, with error: {err:?}")] - UnableToBindToSocket { err: Arc, addr: SocketAddr }, - - #[error("Timeout while waiting for connection to remote: {remote_addr:?}")] - TimeoutWhileConnectingToRemote { remote_addr: SocketAddr }, - - #[error("Failed to connect to remote: {remote_addr:?}, with error: {err:?}")] - UnableToConnectToRemote { - err: Arc, - remote_addr: SocketAddr, - }, - - #[error("Timeout while waiting for the socket to become writable.")] - TimeoutWaitForWriteableSocket, - - #[error("Failed to get writable socket: {err:?}")] - UnableToGetWritableSocket { err: Arc }, - - #[error("Timeout while trying to send data: {data:?}")] - TimeoutWhileSendingData { data: Vec }, - - #[error("Failed to send data: {data:?}, with error: {err:?}")] - UnableToSendData { err: Arc, data: Vec }, - - #[error("Timeout while waiting for the socket to become readable.")] - TimeoutWaitForReadableSocket, - - #[error("Failed to get readable socket: {err:?}")] - UnableToGetReadableSocket { err: Arc }, - - #[error("Timeout while trying to receive data.")] - TimeoutWhileReceivingData, - - #[error("Failed to receive data: {err:?}")] - UnableToReceivingData { err: Arc }, - - #[error("Failed to get data from request: {request:?}, with error: {err:?}")] - UnableToWriteDataFromRequest { err: Arc, request: Request }, - - #[error("Failed to parse response: {response:?}, with error: {err:?}")] - UnableToParseResponse { err: Arc, response: Vec }, -} - -impl From for DynError { - fn from(e: Error) -> Self { - Arc::new(Box::new(e)) - } -} diff --git a/tests/servers/udp/contract.rs b/tests/servers/udp/contract.rs index 1f9b71b62..73f7ce368 100644 --- a/tests/servers/udp/contract.rs +++ b/tests/servers/udp/contract.rs @@ -6,7 +6,7 @@ use core::panic; use aquatic_udp_protocol::{ConnectRequest, ConnectionId, Response, TransactionId}; -use torrust_tracker::shared::bit_torrent::tracker::udp::client::UdpTrackerClient; +use bittorrent_tracker_client::udp::client::UdpTrackerClient; use torrust_tracker::shared::bit_torrent::tracker::udp::MAX_PACKET_SIZE; use torrust_tracker_configuration::DEFAULT_TIMEOUT; use torrust_tracker_test_helpers::configuration; @@ -71,7 +71,7 @@ async fn should_return_a_bad_request_response_when_the_client_sends_an_empty_req mod receiving_a_connection_request { use aquatic_udp_protocol::{ConnectRequest, TransactionId}; - use torrust_tracker::shared::bit_torrent::tracker::udp::client::UdpTrackerClient; + use bittorrent_tracker_client::udp::client::UdpTrackerClient; use torrust_tracker_configuration::DEFAULT_TIMEOUT; use torrust_tracker_test_helpers::configuration; use tracing::level_filters::LevelFilter; @@ -120,7 +120,7 @@ mod receiving_an_announce_request { AnnounceActionPlaceholder, AnnounceEvent, AnnounceRequest, ConnectionId, InfoHash, NumberOfBytes, NumberOfPeers, PeerId, PeerKey, Port, TransactionId, }; - use torrust_tracker::shared::bit_torrent::tracker::udp::client::UdpTrackerClient; + use bittorrent_tracker_client::udp::client::UdpTrackerClient; use torrust_tracker_configuration::DEFAULT_TIMEOUT; use torrust_tracker_test_helpers::configuration; use tracing::level_filters::LevelFilter; @@ -214,7 +214,7 @@ mod receiving_an_announce_request { mod receiving_an_scrape_request { use aquatic_udp_protocol::{ConnectionId, InfoHash, ScrapeRequest, TransactionId}; - use torrust_tracker::shared::bit_torrent::tracker::udp::client::UdpTrackerClient; + use bittorrent_tracker_client::udp::client::UdpTrackerClient; use torrust_tracker_configuration::DEFAULT_TIMEOUT; use torrust_tracker_test_helpers::configuration; use tracing::level_filters::LevelFilter; From a5822cd63124a6617ee92676d176310918d822f4 Mon Sep 17 00:00:00 2001 From: Jose Celano Date: Fri, 1 Nov 2024 16:58:56 +0000 Subject: [PATCH 016/802] fix: cargo machete errors --- Cargo.lock | 2 -- Cargo.toml | 1 - packages/tracker-client/Cargo.toml | 4 +++- 3 files changed, 3 insertions(+), 4 deletions(-) diff --git a/Cargo.lock b/Cargo.lock index 00d83fddb..0bf1ad572 100644 --- a/Cargo.lock +++ b/Cargo.lock @@ -626,7 +626,6 @@ dependencies = [ "clap", "derive_more", "futures", - "futures-util", "hex-literal", "hyper", "percent-encoding", @@ -3859,7 +3858,6 @@ dependencies = [ "figment", "futures", "futures-util", - "hex-literal", "http-body", "hyper", "hyper-util", diff --git a/Cargo.toml b/Cargo.toml index 574881a94..a3d88be92 100644 --- a/Cargo.toml +++ b/Cargo.toml @@ -47,7 +47,6 @@ derive_more = { version = "1", features = ["as_ref", "constructor", "from"] } figment = "0" futures = "0" futures-util = "0" -hex-literal = "0" http-body = "1" hyper = "1" hyper-util = { version = "0", features = ["http1", "http2", "tokio"] } diff --git a/packages/tracker-client/Cargo.toml b/packages/tracker-client/Cargo.toml index 85e10c03e..3334e7b47 100644 --- a/packages/tracker-client/Cargo.toml +++ b/packages/tracker-client/Cargo.toml @@ -21,7 +21,6 @@ bittorrent-primitives = "0.1.0" clap = { version = "4", features = ["derive", "env"] } derive_more = { version = "1", features = ["as_ref", "constructor", "from"] } futures = "0" -futures-util = "0" hex-literal = "0" hyper = "1" percent-encoding = "2" @@ -40,3 +39,6 @@ tracing = "0" tracing-subscriber = { version = "0", features = ["json"] } url = { version = "2", features = ["serde"] } zerocopy = "0.7" + +[package.metadata.cargo-machete] +ignored = ["serde_bytes"] From e01995cf1bc9c08a99f9e3b59b6aa7d2a6620908 Mon Sep 17 00:00:00 2001 From: Jose Celano Date: Fri, 1 Nov 2024 17:34:50 +0000 Subject: [PATCH 017/802] fix: tracker checker execution in CI It was extractted in toa new package. --- src/console/ci/e2e/tracker_checker.rs | 6 ++++-- 1 file changed, 4 insertions(+), 2 deletions(-) diff --git a/src/console/ci/e2e/tracker_checker.rs b/src/console/ci/e2e/tracker_checker.rs index 192795e61..b4c2544ee 100644 --- a/src/console/ci/e2e/tracker_checker.rs +++ b/src/console/ci/e2e/tracker_checker.rs @@ -7,12 +7,14 @@ use std::process::Command; /// /// Will return an error if the Tracker Checker fails. pub fn run(config_content: &str) -> io::Result<()> { - tracing::info!("Running Tracker Checker: TORRUST_CHECKER_CONFIG=[config] cargo run --bin tracker_checker"); + tracing::info!( + "Running Tracker Checker: TORRUST_CHECKER_CONFIG=[config] cargo run -p bittorrent-tracker-client --bin tracker_checker" + ); tracing::info!("Tracker Checker config:\n{config_content}"); let status = Command::new("cargo") .env("TORRUST_CHECKER_CONFIG", config_content) - .args(["run", "--bin", "tracker_checker"]) + .args(["run", "-p", "bittorrent-tracker-client", "--bin", "tracker_checker"]) .status()?; if status.success() { From 9d8162488a6dade21e0a67cf8371a9745433f023 Mon Sep 17 00:00:00 2001 From: Jose Celano Date: Wed, 13 Nov 2024 10:23:45 +0000 Subject: [PATCH 018/802] feat: extract console clients into a new package --- .github/workflows/deployment.yaml | 9 +- Cargo.lock | 25 ++ Cargo.toml | 1 + console/tracker-client/Cargo.toml | 39 +++ console/tracker-client/README.md | 199 ++++++++++++ .../docs/licenses/LICENSE-MIT_0 | 14 + .../src/bin/http_tracker_client.rs | 7 + .../tracker-client/src/bin/tracker_checker.rs | 7 + .../src/bin/udp_tracker_client.rs | 7 + .../src/console/clients/checker/app.rs | 120 ++++++++ .../console/clients/checker/checks/health.rs | 77 +++++ .../console/clients/checker/checks/http.rs | 104 +++++++ .../src/console/clients/checker/checks/mod.rs | 4 + .../console/clients/checker/checks/structs.rs | 12 + .../src/console/clients/checker/checks/udp.rs | 134 +++++++++ .../src/console/clients/checker/config.rs | 282 ++++++++++++++++++ .../src/console/clients/checker/console.rs | 38 +++ .../src/console/clients/checker/logger.rs | 72 +++++ .../src/console/clients/checker/mod.rs | 7 + .../src/console/clients/checker/printer.rs | 9 + .../src/console/clients/checker/service.rs | 62 ++++ .../src/console/clients/http/app.rs | 101 +++++++ .../src/console/clients/http/mod.rs | 35 +++ .../tracker-client/src/console/clients/mod.rs | 4 + .../src/console/clients/udp/app.rs | 208 +++++++++++++ .../src/console/clients/udp/checker.rs | 177 +++++++++++ .../src/console/clients/udp/mod.rs | 50 ++++ .../src/console/clients/udp/responses/dto.rs | 128 ++++++++ .../src/console/clients/udp/responses/json.rs | 25 ++ .../src/console/clients/udp/responses/mod.rs | 2 + console/tracker-client/src/console/mod.rs | 2 + console/tracker-client/src/lib.rs | 1 + packages/tracker-client/README.md | 2 +- 33 files changed, 1959 insertions(+), 5 deletions(-) create mode 100644 console/tracker-client/Cargo.toml create mode 100644 console/tracker-client/README.md create mode 100644 console/tracker-client/docs/licenses/LICENSE-MIT_0 create mode 100644 console/tracker-client/src/bin/http_tracker_client.rs create mode 100644 console/tracker-client/src/bin/tracker_checker.rs create mode 100644 console/tracker-client/src/bin/udp_tracker_client.rs create mode 100644 console/tracker-client/src/console/clients/checker/app.rs create mode 100644 console/tracker-client/src/console/clients/checker/checks/health.rs create mode 100644 console/tracker-client/src/console/clients/checker/checks/http.rs create mode 100644 console/tracker-client/src/console/clients/checker/checks/mod.rs create mode 100644 console/tracker-client/src/console/clients/checker/checks/structs.rs create mode 100644 console/tracker-client/src/console/clients/checker/checks/udp.rs create mode 100644 console/tracker-client/src/console/clients/checker/config.rs create mode 100644 console/tracker-client/src/console/clients/checker/console.rs create mode 100644 console/tracker-client/src/console/clients/checker/logger.rs create mode 100644 console/tracker-client/src/console/clients/checker/mod.rs create mode 100644 console/tracker-client/src/console/clients/checker/printer.rs create mode 100644 console/tracker-client/src/console/clients/checker/service.rs create mode 100644 console/tracker-client/src/console/clients/http/app.rs create mode 100644 console/tracker-client/src/console/clients/http/mod.rs create mode 100644 console/tracker-client/src/console/clients/mod.rs create mode 100644 console/tracker-client/src/console/clients/udp/app.rs create mode 100644 console/tracker-client/src/console/clients/udp/checker.rs create mode 100644 console/tracker-client/src/console/clients/udp/mod.rs create mode 100644 console/tracker-client/src/console/clients/udp/responses/dto.rs create mode 100644 console/tracker-client/src/console/clients/udp/responses/json.rs create mode 100644 console/tracker-client/src/console/clients/udp/responses/mod.rs create mode 100644 console/tracker-client/src/console/mod.rs create mode 100644 console/tracker-client/src/lib.rs diff --git a/.github/workflows/deployment.yaml b/.github/workflows/deployment.yaml index e30eccc71..7f458cda2 100644 --- a/.github/workflows/deployment.yaml +++ b/.github/workflows/deployment.yaml @@ -55,11 +55,12 @@ jobs: env: CARGO_REGISTRY_TOKEN: "${{ secrets.TORRUST_UPDATE_CARGO_REGISTRY_TOKEN }}" run: | + cargo publish -p torrust-tracker + cargo publish -p torrust-tracker-client + cargo publish -p torrust-tracker-clock + cargo publish -p torrust-tracker-configuration cargo publish -p torrust-tracker-contrib-bencode cargo publish -p torrust-tracker-located-error cargo publish -p torrust-tracker-primitives - cargo publish -p torrust-tracker-clock - cargo publish -p torrust-tracker-configuration - cargo publish -p torrust-tracker-torrent-repository cargo publish -p torrust-tracker-test-helpers - cargo publish -p torrust-tracker + cargo publish -p torrust-tracker-torrent-repository diff --git a/Cargo.lock b/Cargo.lock index 0bf1ad572..ec723efff 100644 --- a/Cargo.lock +++ b/Cargo.lock @@ -3899,6 +3899,31 @@ dependencies = [ "zerocopy", ] +[[package]] +name = "torrust-tracker-client" +version = "3.0.0-develop" +dependencies = [ + "anyhow", + "aquatic_udp_protocol", + "bittorrent-primitives", + "bittorrent-tracker-client", + "clap", + "futures", + "hex-literal", + "hyper", + "reqwest", + "serde", + "serde_bencode", + "serde_bytes", + "serde_json", + "thiserror", + "tokio", + "torrust-tracker-configuration", + "tracing", + "tracing-subscriber", + "url", +] + [[package]] name = "torrust-tracker-clock" version = "3.0.0-develop" diff --git a/Cargo.toml b/Cargo.toml index a3d88be92..bc772d08a 100644 --- a/Cargo.toml +++ b/Cargo.toml @@ -94,6 +94,7 @@ torrust-tracker-test-helpers = { version = "3.0.0-develop", path = "packages/tes [workspace] members = [ + "console/tracker-client", "contrib/bencode", "packages/configuration", "packages/located-error", diff --git a/console/tracker-client/Cargo.toml b/console/tracker-client/Cargo.toml new file mode 100644 index 000000000..c9e951003 --- /dev/null +++ b/console/tracker-client/Cargo.toml @@ -0,0 +1,39 @@ +[package] +description = "A collection of console clients to make requests to BitTorrent trackers." +keywords = ["bittorrent", "client", "tracker"] +license = "LGPL-3.0" +name = "torrust-tracker-client" +readme = "README.md" + +authors.workspace = true +documentation.workspace = true +edition.workspace = true +homepage.workspace = true +publish.workspace = true +repository.workspace = true +rust-version.workspace = true +version.workspace = true + +[dependencies] +anyhow = "1" +aquatic_udp_protocol = "0" +bittorrent-primitives = "0.1.0" +bittorrent-tracker-client = { version = "3.0.0-develop", path = "../../packages/tracker-client" } +clap = { version = "4", features = ["derive", "env"] } +futures = "0" +hex-literal = "0" +hyper = "1" +reqwest = { version = "0", features = ["json"] } +serde = { version = "1", features = ["derive"] } +serde_bencode = "0" +serde_bytes = "0" +serde_json = { version = "1", features = ["preserve_order"] } +thiserror = "1" +tokio = { version = "1", features = ["macros", "net", "rt-multi-thread", "signal", "sync"] } +torrust-tracker-configuration = { version = "3.0.0-develop", path = "../../packages/configuration" } +tracing = "0" +tracing-subscriber = { version = "0", features = ["json"] } +url = { version = "2", features = ["serde"] } + +[package.metadata.cargo-machete] +ignored = ["serde_bytes"] diff --git a/console/tracker-client/README.md b/console/tracker-client/README.md new file mode 100644 index 000000000..87722657f --- /dev/null +++ b/console/tracker-client/README.md @@ -0,0 +1,199 @@ +# Torrust Tracker Client + +A collection of console clients to make requests to BitTorrent trackers. + +> **Disclaimer**: This project is actively under development. We’re currently extracting and refining common functionality from the[Torrust Tracker](https://github.com/torrust/torrust-tracker) to make it available to the BitTorrent community in Rust. While these tools are functional, they are not yet ready for use in production or third-party projects. + +There are currently three console clients available: + +- UDP Client +- HTTP Client +- Tracker Checker + +> **Notice**: [Console apps are planned to be merge into a single tracker client in the short-term](https://github.com/torrust/torrust-tracker/discussions/660). + +## UDP Client + +`Announce` request: + +```text +cargo run --bin udp_tracker_client announce udp://127.0.0.1:6969 9c38422213e30bff212b30c360d26f9a02136422 | jq +``` + +`Announce` response: + +```json +{ + "AnnounceIpv4": { + "transaction_id": -888840697, + "announce_interval": 120, + "leechers": 0, + "seeders": 1, + "peers": [] + } +} +``` + +`Scrape` request: + +```text +cargo run --bin udp_tracker_client scrape udp://127.0.0.1:6969 9c38422213e30bff212b30c360d26f9a02136422 | jq +``` + +`Scrape` response: + +```json +{ + "Scrape": { + "transaction_id": -888840697, + "torrent_stats": [ + { + "seeders": 1, + "completed": 0, + "leechers": 0 + } + ] + } +} +``` + +## HTTP Client + +`Announce` request: + +```text +cargo run --bin http_tracker_client announce http://127.0.0.1:7070 9c38422213e30bff212b30c360d26f9a02136422 | jq +``` + +`Announce` response: + +```json +{ + "complete": 1, + "incomplete": 0, + "interval": 120, + "min interval": 120, + "peers": [] +} +``` + +`Scrape` request: + +```text + cargo run --bin http_tracker_client scrape http://127.0.0.1:7070 9c38422213e30bff212b30c360d26f9a02136422 | jq +``` + +`Scrape` response: + +```json +{ + "9c38422213e30bff212b30c360d26f9a02136422": { + "complete": 1, + "downloaded": 1, + "incomplete": 0 + } +} +``` + +## Tracker Checker + +The Tracker Checker is a tool to check the health of a list of trackers. + +```console +TORRUST_CHECKER_CONFIG='{ + "udp_trackers": ["127.0.0.1:6969"], + "http_trackers": ["http://127.0.0.1:7070"], + "health_checks": ["http://127.0.0.1:1212/api/health_check"] + }' cargo run --bin tracker_checker +``` + +Output: + +```json +[ + { + "Udp": { + "Ok": { + "remote_addr": "127.0.0.1:6969", + "results": [ + [ + "Setup", + { + "Ok": null + } + ], + [ + "Connect", + { + "Ok": null + } + ], + [ + "Announce", + { + "Ok": null + } + ], + [ + "Scrape", + { + "Ok": null + } + ] + ] + } + } + }, + { + "Health": { + "Ok": { + "url": "http://127.0.0.1:1212/api/health_check", + "result": { + "Ok": "200 OK" + } + } + } + }, + { + "Http": { + "Ok": { + "url": "http://127.0.0.1:7070/", + "results": [ + [ + "Announce", + { + "Ok": null + } + ], + [ + "Scrape", + { + "Ok": null + } + ] + ] + } + } + } +] +``` + +## License + +**Copyright (c) 2024 The Torrust Developers.** + +This program is free software: you can redistribute it and/or modify it under the terms of the [GNU Lesser General Public License][LGPL_3_0] as published by the [Free Software Foundation][FSF], version 3. + +This program is distributed in the hope that it will be useful, but WITHOUT ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the [GNU Lesser General Public License][LGPL_3_0] for more details. + +You should have received a copy of the *GNU Lesser General Public License* along with this program. If not, see . + +Some files include explicit copyright notices and/or license notices. + +### Legacy Exception + +For prosperity, versions of Torrust BitTorrent Tracker Client that are older than five years are automatically granted the [MIT-0][MIT_0] license in addition to the existing [LGPL-3.0-only][LGPL_3_0] license. + +[LGPL_3_0]: ./LICENSE +[MIT_0]: ./docs/licenses/LICENSE-MIT_0 +[FSF]: https://www.fsf.org/ diff --git a/console/tracker-client/docs/licenses/LICENSE-MIT_0 b/console/tracker-client/docs/licenses/LICENSE-MIT_0 new file mode 100644 index 000000000..fc06cc4fe --- /dev/null +++ b/console/tracker-client/docs/licenses/LICENSE-MIT_0 @@ -0,0 +1,14 @@ +MIT No Attribution + +Permission is hereby granted, free of charge, to any person obtaining a copy of this +software and associated documentation files (the "Software"), to deal in the Software +without restriction, including without limitation the rights to use, copy, modify, +merge, publish, distribute, sublicense, and/or sell copies of the Software, and to +permit persons to whom the Software is furnished to do so. + +THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, +INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A +PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT +HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION +OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE +SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. diff --git a/console/tracker-client/src/bin/http_tracker_client.rs b/console/tracker-client/src/bin/http_tracker_client.rs new file mode 100644 index 000000000..be1b4821d --- /dev/null +++ b/console/tracker-client/src/bin/http_tracker_client.rs @@ -0,0 +1,7 @@ +//! Program to make request to HTTP trackers. +use torrust_tracker_client::console::clients::http::app; + +#[tokio::main] +async fn main() -> anyhow::Result<()> { + app::run().await +} diff --git a/console/tracker-client/src/bin/tracker_checker.rs b/console/tracker-client/src/bin/tracker_checker.rs new file mode 100644 index 000000000..3ff78eec1 --- /dev/null +++ b/console/tracker-client/src/bin/tracker_checker.rs @@ -0,0 +1,7 @@ +//! Program to check running trackers. +use torrust_tracker_client::console::clients::checker::app; + +#[tokio::main] +async fn main() { + app::run().await.expect("Some checks fail"); +} diff --git a/console/tracker-client/src/bin/udp_tracker_client.rs b/console/tracker-client/src/bin/udp_tracker_client.rs new file mode 100644 index 000000000..caf5ab0dc --- /dev/null +++ b/console/tracker-client/src/bin/udp_tracker_client.rs @@ -0,0 +1,7 @@ +//! Program to make request to UDP trackers. +use torrust_tracker_client::console::clients::udp::app; + +#[tokio::main] +async fn main() -> anyhow::Result<()> { + app::run().await +} diff --git a/console/tracker-client/src/console/clients/checker/app.rs b/console/tracker-client/src/console/clients/checker/app.rs new file mode 100644 index 000000000..395f65df9 --- /dev/null +++ b/console/tracker-client/src/console/clients/checker/app.rs @@ -0,0 +1,120 @@ +//! Program to run checks against running trackers. +//! +//! Run providing a config file path: +//! +//! ```text +//! cargo run --bin tracker_checker -- --config-path "./share/default/config/tracker_checker.json" +//! TORRUST_CHECKER_CONFIG_PATH="./share/default/config/tracker_checker.json" cargo run --bin tracker_checker +//! ``` +//! +//! Run providing the configuration: +//! +//! ```text +//! TORRUST_CHECKER_CONFIG=$(cat "./share/default/config/tracker_checker.json") cargo run --bin tracker_checker +//! ``` +//! +//! Another real example to test the Torrust demo tracker: +//! +//! ```text +//! TORRUST_CHECKER_CONFIG='{ +//! "udp_trackers": ["144.126.245.19:6969"], +//! "http_trackers": ["https://tracker.torrust-demo.com"], +//! "health_checks": ["https://tracker.torrust-demo.com/api/health_check"] +//! }' cargo run --bin tracker_checker +//! ``` +//! +//! The output should be something like the following: +//! +//! ```json +//! { +//! "udp_trackers": [ +//! { +//! "url": "144.126.245.19:6969", +//! "status": { +//! "code": "ok", +//! "message": "" +//! } +//! } +//! ], +//! "http_trackers": [ +//! { +//! "url": "https://tracker.torrust-demo.com/", +//! "status": { +//! "code": "ok", +//! "message": "" +//! } +//! } +//! ], +//! "health_checks": [ +//! { +//! "url": "https://tracker.torrust-demo.com/api/health_check", +//! "status": { +//! "code": "ok", +//! "message": "" +//! } +//! } +//! ] +//! } +//! ``` +use std::path::PathBuf; +use std::sync::Arc; + +use anyhow::{Context, Result}; +use clap::Parser; +use tracing::level_filters::LevelFilter; + +use super::config::Configuration; +use super::console::Console; +use super::service::{CheckResult, Service}; +use crate::console::clients::checker::config::parse_from_json; + +#[derive(Parser, Debug)] +#[clap(author, version, about, long_about = None)] +struct Args { + /// Path to the JSON configuration file. + #[clap(short, long, env = "TORRUST_CHECKER_CONFIG_PATH")] + config_path: Option, + + /// Direct configuration content in JSON. + #[clap(env = "TORRUST_CHECKER_CONFIG", hide_env_values = true)] + config_content: Option, +} + +/// # Errors +/// +/// Will return an error if the configuration was not provided. +pub async fn run() -> Result> { + tracing_stdout_init(LevelFilter::INFO); + + let args = Args::parse(); + + let config = setup_config(args)?; + + let console_printer = Console {}; + + let service = Service { + config: Arc::new(config), + console: console_printer, + }; + + service.run_checks().await.context("it should run the check tasks") +} + +fn tracing_stdout_init(filter: LevelFilter) { + tracing_subscriber::fmt().with_max_level(filter).init(); + tracing::debug!("Logging initialized"); +} + +fn setup_config(args: Args) -> Result { + match (args.config_path, args.config_content) { + (Some(config_path), _) => load_config_from_file(&config_path), + (_, Some(config_content)) => parse_from_json(&config_content).context("invalid config format"), + _ => Err(anyhow::anyhow!("no configuration provided")), + } +} + +fn load_config_from_file(path: &PathBuf) -> Result { + let file_content = std::fs::read_to_string(path).with_context(|| format!("can't read config file {path:?}"))?; + + parse_from_json(&file_content).context("invalid config format") +} diff --git a/console/tracker-client/src/console/clients/checker/checks/health.rs b/console/tracker-client/src/console/clients/checker/checks/health.rs new file mode 100644 index 000000000..b1fb79148 --- /dev/null +++ b/console/tracker-client/src/console/clients/checker/checks/health.rs @@ -0,0 +1,77 @@ +use std::sync::Arc; +use std::time::Duration; + +use anyhow::Result; +use hyper::StatusCode; +use reqwest::{Client as HttpClient, Response}; +use serde::Serialize; +use thiserror::Error; +use url::Url; + +#[derive(Debug, Clone, Error, Serialize)] +#[serde(into = "String")] +pub enum Error { + #[error("Failed to Build a Http Client: {err:?}")] + ClientBuildingError { err: Arc }, + #[error("Heath check failed to get a response: {err:?}")] + ResponseError { err: Arc }, + #[error("Http check returned a non-success code: \"{code}\" with the response: \"{response:?}\"")] + UnsuccessfulResponse { code: StatusCode, response: Arc }, +} + +impl From for String { + fn from(value: Error) -> Self { + value.to_string() + } +} + +#[derive(Debug, Clone, Serialize)] +pub struct Checks { + url: Url, + result: Result, +} + +pub async fn run(health_checks: Vec, timeout: Duration) -> Vec> { + let mut results = Vec::default(); + + tracing::debug!("Health checks ..."); + + for url in health_checks { + let result = match run_health_check(url.clone(), timeout).await { + Ok(response) => Ok(response.status().to_string()), + Err(err) => Err(err), + }; + + let check = Checks { url, result }; + + if check.result.is_err() { + results.push(Err(check)); + } else { + results.push(Ok(check)); + } + } + + results +} + +async fn run_health_check(url: Url, timeout: Duration) -> Result { + let client = HttpClient::builder() + .timeout(timeout) + .build() + .map_err(|e| Error::ClientBuildingError { err: e.into() })?; + + let response = client + .get(url.clone()) + .send() + .await + .map_err(|e| Error::ResponseError { err: e.into() })?; + + if response.status().is_success() { + Ok(response) + } else { + Err(Error::UnsuccessfulResponse { + code: response.status(), + response: response.into(), + }) + } +} diff --git a/console/tracker-client/src/console/clients/checker/checks/http.rs b/console/tracker-client/src/console/clients/checker/checks/http.rs new file mode 100644 index 000000000..0fd37ca48 --- /dev/null +++ b/console/tracker-client/src/console/clients/checker/checks/http.rs @@ -0,0 +1,104 @@ +use std::str::FromStr as _; +use std::time::Duration; + +use bittorrent_primitives::info_hash::InfoHash; +use bittorrent_tracker_client::http::client::responses::announce::Announce; +use bittorrent_tracker_client::http::client::responses::scrape; +use bittorrent_tracker_client::http::client::{requests, Client}; +use serde::Serialize; +use url::Url; + +use crate::console::clients::http::Error; + +#[derive(Debug, Clone, Serialize)] +pub struct Checks { + url: Url, + results: Vec<(Check, Result<(), Error>)>, +} + +#[derive(Debug, Clone, Serialize)] +pub enum Check { + Announce, + Scrape, +} + +pub async fn run(http_trackers: Vec, timeout: Duration) -> Vec> { + let mut results = Vec::default(); + + tracing::debug!("HTTP trackers ..."); + + for ref url in http_trackers { + let mut base_url = url.clone(); + base_url.set_path(""); + + let mut checks = Checks { + url: url.clone(), + results: Vec::default(), + }; + + // Announce + { + let check = check_http_announce(&base_url, timeout).await.map(|_| ()); + + checks.results.push((Check::Announce, check)); + } + + // Scrape + { + let check = check_http_scrape(&base_url, timeout).await.map(|_| ()); + + checks.results.push((Check::Scrape, check)); + } + + if checks.results.iter().any(|f| f.1.is_err()) { + results.push(Err(checks)); + } else { + results.push(Ok(checks)); + } + } + + results +} + +async fn check_http_announce(url: &Url, timeout: Duration) -> Result { + let info_hash_str = "9c38422213e30bff212b30c360d26f9a02136422".to_string(); // # DevSkim: ignore DS173237 + let info_hash = InfoHash::from_str(&info_hash_str).expect("a valid info-hash is required"); + + let client = Client::new(url.clone(), timeout).map_err(|err| Error::HttpClientError { err })?; + + let response = client + .announce( + &requests::announce::QueryBuilder::with_default_values() + .with_info_hash(&info_hash) + .query(), + ) + .await + .map_err(|err| Error::HttpClientError { err })?; + + let response = response.bytes().await.map_err(|e| Error::ResponseError { err: e.into() })?; + + let response = serde_bencode::from_bytes::(&response).map_err(|e| Error::ParseBencodeError { + data: response, + err: e.into(), + })?; + + Ok(response) +} + +async fn check_http_scrape(url: &Url, timeout: Duration) -> Result { + let info_hashes: Vec = vec!["9c38422213e30bff212b30c360d26f9a02136422".to_string()]; // # DevSkim: ignore DS173237 + let query = requests::scrape::Query::try_from(info_hashes).expect("a valid array of info-hashes is required"); + + let client = Client::new(url.clone(), timeout).map_err(|err| Error::HttpClientError { err })?; + + let response = client.scrape(&query).await.map_err(|err| Error::HttpClientError { err })?; + + let response = response.bytes().await.map_err(|e| Error::ResponseError { err: e.into() })?; + + let response = scrape::Response::try_from_bencoded(&response).map_err(|e| Error::BencodeParseError { + data: response, + err: e.into(), + })?; + + Ok(response) +} diff --git a/console/tracker-client/src/console/clients/checker/checks/mod.rs b/console/tracker-client/src/console/clients/checker/checks/mod.rs new file mode 100644 index 000000000..f8b03f749 --- /dev/null +++ b/console/tracker-client/src/console/clients/checker/checks/mod.rs @@ -0,0 +1,4 @@ +pub mod health; +pub mod http; +pub mod structs; +pub mod udp; diff --git a/console/tracker-client/src/console/clients/checker/checks/structs.rs b/console/tracker-client/src/console/clients/checker/checks/structs.rs new file mode 100644 index 000000000..d28e20c04 --- /dev/null +++ b/console/tracker-client/src/console/clients/checker/checks/structs.rs @@ -0,0 +1,12 @@ +use serde::{Deserialize, Serialize}; + +#[derive(Serialize, Deserialize)] +pub struct Status { + pub code: String, + pub message: String, +} +#[derive(Serialize, Deserialize)] +pub struct CheckerOutput { + pub url: String, + pub status: Status, +} diff --git a/console/tracker-client/src/console/clients/checker/checks/udp.rs b/console/tracker-client/src/console/clients/checker/checks/udp.rs new file mode 100644 index 000000000..21bdcd1b7 --- /dev/null +++ b/console/tracker-client/src/console/clients/checker/checks/udp.rs @@ -0,0 +1,134 @@ +use std::net::SocketAddr; +use std::time::Duration; + +use aquatic_udp_protocol::TransactionId; +use hex_literal::hex; +use serde::Serialize; +use url::Url; + +use crate::console::clients::udp::checker::Client; +use crate::console::clients::udp::Error; + +#[derive(Debug, Clone, Serialize)] +pub struct Checks { + remote_addr: SocketAddr, + results: Vec<(Check, Result<(), Error>)>, +} + +#[derive(Debug, Clone, Serialize)] +pub enum Check { + Setup, + Connect, + Announce, + Scrape, +} + +#[allow(clippy::missing_panics_doc)] +pub async fn run(udp_trackers: Vec, timeout: Duration) -> Vec> { + let mut results = Vec::default(); + + tracing::debug!("UDP trackers ..."); + + let info_hash = aquatic_udp_protocol::InfoHash(hex!("9c38422213e30bff212b30c360d26f9a02136422")); // # DevSkim: ignore DS173237 + + for remote_url in udp_trackers { + let remote_addr = resolve_socket_addr(&remote_url); + + let mut checks = Checks { + remote_addr, + results: Vec::default(), + }; + + tracing::debug!("UDP tracker: {:?}", remote_url); + + // Setup + let client = match Client::new(remote_addr, timeout).await { + Ok(client) => { + checks.results.push((Check::Setup, Ok(()))); + client + } + Err(err) => { + checks.results.push((Check::Setup, Err(err))); + results.push(Err(checks)); + continue; + } + }; + + let transaction_id = TransactionId::new(1); + + // Connect Remote + let connection_id = match client.send_connection_request(transaction_id).await { + Ok(connection_id) => { + checks.results.push((Check::Connect, Ok(()))); + connection_id + } + Err(err) => { + checks.results.push((Check::Connect, Err(err))); + results.push(Err(checks)); + continue; + } + }; + + // Announce + { + let check = client + .send_announce_request(transaction_id, connection_id, info_hash.into()) + .await + .map(|_| ()); + + checks.results.push((Check::Announce, check)); + } + + // Scrape + { + let check = client + .send_scrape_request(connection_id, transaction_id, &[info_hash.into()]) + .await + .map(|_| ()); + + checks.results.push((Check::Scrape, check)); + } + + if checks.results.iter().any(|f| f.1.is_err()) { + results.push(Err(checks)); + } else { + results.push(Ok(checks)); + } + } + + results +} + +fn resolve_socket_addr(url: &Url) -> SocketAddr { + let socket_addr = url.socket_addrs(|| None).unwrap(); + *socket_addr.first().unwrap() +} + +#[cfg(test)] +mod tests { + use std::net::{IpAddr, Ipv4Addr, Ipv6Addr, SocketAddr}; + + use url::Url; + + use crate::console::clients::checker::checks::udp::resolve_socket_addr; + + #[test] + fn it_should_resolve_the_socket_address_for_udp_scheme_urls_containing_a_domain() { + let socket_addr = resolve_socket_addr(&Url::parse("udp://localhost:8080").unwrap()); + + assert!( + socket_addr == SocketAddr::new(IpAddr::V4(Ipv4Addr::new(127, 0, 0, 1)), 8080) + || socket_addr == SocketAddr::new(IpAddr::V6(Ipv6Addr::new(0, 0, 0, 0, 0, 0, 0, 1)), 8080) + ); + } + + #[test] + fn it_should_resolve_the_socket_address_for_udp_scheme_urls_containing_an_ip() { + let socket_addr = resolve_socket_addr(&Url::parse("udp://localhost:8080").unwrap()); + + assert!( + socket_addr == SocketAddr::new(IpAddr::V4(Ipv4Addr::new(127, 0, 0, 1)), 8080) + || socket_addr == SocketAddr::new(IpAddr::V6(Ipv6Addr::new(0, 0, 0, 0, 0, 0, 0, 1)), 8080) + ); + } +} diff --git a/console/tracker-client/src/console/clients/checker/config.rs b/console/tracker-client/src/console/clients/checker/config.rs new file mode 100644 index 000000000..154dcae85 --- /dev/null +++ b/console/tracker-client/src/console/clients/checker/config.rs @@ -0,0 +1,282 @@ +use std::error::Error; +use std::fmt; + +use reqwest::Url as ServiceUrl; +use serde::Deserialize; + +/// It parses the configuration from a JSON format. +/// +/// # Errors +/// +/// Will return an error if the configuration is not valid. +/// +/// # Panics +/// +/// Will panic if unable to read the configuration file. +pub fn parse_from_json(json: &str) -> Result { + let plain_config: PlainConfiguration = serde_json::from_str(json).map_err(ConfigurationError::JsonParseError)?; + Configuration::try_from(plain_config) +} + +/// DTO for the configuration to serialize/deserialize configuration. +/// +/// Configuration does not need to be valid. +#[derive(Deserialize)] +struct PlainConfiguration { + pub udp_trackers: Vec, + pub http_trackers: Vec, + pub health_checks: Vec, +} + +/// Validated configuration +pub struct Configuration { + pub udp_trackers: Vec, + pub http_trackers: Vec, + pub health_checks: Vec, +} + +#[derive(Debug)] +pub enum ConfigurationError { + JsonParseError(serde_json::Error), + InvalidUdpAddress(std::net::AddrParseError), + InvalidUrl(url::ParseError), +} + +impl Error for ConfigurationError {} + +impl fmt::Display for ConfigurationError { + fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result { + match self { + ConfigurationError::JsonParseError(e) => write!(f, "JSON parse error: {e}"), + ConfigurationError::InvalidUdpAddress(e) => write!(f, "Invalid UDP address: {e}"), + ConfigurationError::InvalidUrl(e) => write!(f, "Invalid URL: {e}"), + } + } +} + +impl TryFrom for Configuration { + type Error = ConfigurationError; + + fn try_from(plain_config: PlainConfiguration) -> Result { + let udp_trackers = plain_config + .udp_trackers + .into_iter() + .map(|s| if s.starts_with("udp://") { s } else { format!("udp://{s}") }) + .map(|s| s.parse::().map_err(ConfigurationError::InvalidUrl)) + .collect::, _>>()?; + + let http_trackers = plain_config + .http_trackers + .into_iter() + .map(|s| s.parse::().map_err(ConfigurationError::InvalidUrl)) + .collect::, _>>()?; + + let health_checks = plain_config + .health_checks + .into_iter() + .map(|s| s.parse::().map_err(ConfigurationError::InvalidUrl)) + .collect::, _>>()?; + + Ok(Configuration { + udp_trackers, + http_trackers, + health_checks, + }) + } +} + +#[cfg(test)] +mod tests { + use super::*; + + #[test] + fn configuration_should_be_build_from_plain_serializable_configuration() { + let dto = PlainConfiguration { + udp_trackers: vec!["udp://127.0.0.1:8080".to_string()], + http_trackers: vec!["http://127.0.0.1:8080".to_string()], + health_checks: vec!["http://127.0.0.1:8080/health".to_string()], + }; + + let config = Configuration::try_from(dto).expect("A valid configuration"); + + assert_eq!(config.udp_trackers, vec![ServiceUrl::parse("udp://127.0.0.1:8080").unwrap()]); + + assert_eq!( + config.http_trackers, + vec![ServiceUrl::parse("http://127.0.0.1:8080").unwrap()] + ); + + assert_eq!( + config.health_checks, + vec![ServiceUrl::parse("http://127.0.0.1:8080/health").unwrap()] + ); + } + + mod building_configuration_from_plain_configuration_for { + + mod udp_trackers { + use crate::console::clients::checker::config::{Configuration, PlainConfiguration, ServiceUrl}; + + /* The plain configuration should allow UDP URLs with: + + - IP or domain. + - With or without scheme. + - With or without `announce` suffix. + - With or without `/` at the end of the authority section (with empty path). + + For example: + + 127.0.0.1:6969 + 127.0.0.1:6969/ + 127.0.0.1:6969/announce + + localhost:6969 + localhost:6969/ + localhost:6969/announce + + udp://127.0.0.1:6969 + udp://127.0.0.1:6969/ + udp://127.0.0.1:6969/announce + + udp://localhost:6969 + udp://localhost:6969/ + udp://localhost:6969/announce + + */ + + #[test] + fn it_should_fail_when_a_tracker_udp_url_is_invalid() { + let plain_config = PlainConfiguration { + udp_trackers: vec!["invalid URL".to_string()], + http_trackers: vec![], + health_checks: vec![], + }; + + assert!(Configuration::try_from(plain_config).is_err()); + } + + #[test] + fn it_should_add_the_udp_scheme_to_the_udp_url_when_it_is_missing() { + let plain_config = PlainConfiguration { + udp_trackers: vec!["127.0.0.1:6969".to_string()], + http_trackers: vec![], + health_checks: vec![], + }; + + let config = Configuration::try_from(plain_config).expect("Invalid plain configuration"); + + assert_eq!(config.udp_trackers[0], "udp://127.0.0.1:6969".parse::().unwrap()); + } + + #[test] + fn it_should_allow_using_domains() { + let plain_config = PlainConfiguration { + udp_trackers: vec!["udp://localhost:6969".to_string()], + http_trackers: vec![], + health_checks: vec![], + }; + + let config = Configuration::try_from(plain_config).expect("Invalid plain configuration"); + + assert_eq!(config.udp_trackers[0], "udp://localhost:6969".parse::().unwrap()); + } + + #[test] + fn it_should_allow_the_url_to_have_an_empty_path() { + let plain_config = PlainConfiguration { + udp_trackers: vec!["127.0.0.1:6969/".to_string()], + http_trackers: vec![], + health_checks: vec![], + }; + + let config = Configuration::try_from(plain_config).expect("Invalid plain configuration"); + + assert_eq!(config.udp_trackers[0], "udp://127.0.0.1:6969/".parse::().unwrap()); + } + + #[test] + fn it_should_allow_the_url_to_contain_a_path() { + // This is the common format for UDP tracker URLs: + // udp://domain.com:6969/announce + + let plain_config = PlainConfiguration { + udp_trackers: vec!["127.0.0.1:6969/announce".to_string()], + http_trackers: vec![], + health_checks: vec![], + }; + + let config = Configuration::try_from(plain_config).expect("Invalid plain configuration"); + + assert_eq!( + config.udp_trackers[0], + "udp://127.0.0.1:6969/announce".parse::().unwrap() + ); + } + } + + mod http_trackers { + use crate::console::clients::checker::config::{Configuration, PlainConfiguration, ServiceUrl}; + + #[test] + fn it_should_fail_when_a_tracker_http_url_is_invalid() { + let plain_config = PlainConfiguration { + udp_trackers: vec![], + http_trackers: vec!["invalid URL".to_string()], + health_checks: vec![], + }; + + assert!(Configuration::try_from(plain_config).is_err()); + } + + #[test] + fn it_should_allow_the_url_to_contain_a_path() { + // This is the common format for HTTP tracker URLs: + // http://domain.com:7070/announce + + let plain_config = PlainConfiguration { + udp_trackers: vec![], + http_trackers: vec!["http://127.0.0.1:7070/announce".to_string()], + health_checks: vec![], + }; + + let config = Configuration::try_from(plain_config).expect("Invalid plain configuration"); + + assert_eq!( + config.http_trackers[0], + "http://127.0.0.1:7070/announce".parse::().unwrap() + ); + } + + #[test] + fn it_should_allow_the_url_to_contain_an_empty_path() { + let plain_config = PlainConfiguration { + udp_trackers: vec![], + http_trackers: vec!["http://127.0.0.1:7070/".to_string()], + health_checks: vec![], + }; + + let config = Configuration::try_from(plain_config).expect("Invalid plain configuration"); + + assert_eq!( + config.http_trackers[0], + "http://127.0.0.1:7070/".parse::().unwrap() + ); + } + } + + mod health_checks { + use crate::console::clients::checker::config::{Configuration, PlainConfiguration}; + + #[test] + fn it_should_fail_when_a_health_check_http_url_is_invalid() { + let plain_config = PlainConfiguration { + udp_trackers: vec![], + http_trackers: vec![], + health_checks: vec!["invalid URL".to_string()], + }; + + assert!(Configuration::try_from(plain_config).is_err()); + } + } + } +} diff --git a/console/tracker-client/src/console/clients/checker/console.rs b/console/tracker-client/src/console/clients/checker/console.rs new file mode 100644 index 000000000..b55c559fc --- /dev/null +++ b/console/tracker-client/src/console/clients/checker/console.rs @@ -0,0 +1,38 @@ +use super::printer::{Printer, CLEAR_SCREEN}; + +pub struct Console {} + +impl Default for Console { + fn default() -> Self { + Self::new() + } +} + +impl Console { + #[must_use] + pub fn new() -> Self { + Self {} + } +} + +impl Printer for Console { + fn clear(&self) { + self.print(CLEAR_SCREEN); + } + + fn print(&self, output: &str) { + print!("{}", &output); + } + + fn eprint(&self, output: &str) { + eprint!("{}", &output); + } + + fn println(&self, output: &str) { + println!("{}", &output); + } + + fn eprintln(&self, output: &str) { + eprintln!("{}", &output); + } +} diff --git a/console/tracker-client/src/console/clients/checker/logger.rs b/console/tracker-client/src/console/clients/checker/logger.rs new file mode 100644 index 000000000..50e97189f --- /dev/null +++ b/console/tracker-client/src/console/clients/checker/logger.rs @@ -0,0 +1,72 @@ +use std::cell::RefCell; + +use super::printer::{Printer, CLEAR_SCREEN}; + +pub struct Logger { + output: RefCell, +} + +impl Default for Logger { + fn default() -> Self { + Self::new() + } +} + +impl Logger { + #[must_use] + pub fn new() -> Self { + Self { + output: RefCell::new(String::new()), + } + } + + pub fn log(&self) -> String { + self.output.borrow().clone() + } +} + +impl Printer for Logger { + fn clear(&self) { + self.print(CLEAR_SCREEN); + } + + fn print(&self, output: &str) { + *self.output.borrow_mut() = format!("{}{}", self.output.borrow(), &output); + } + + fn eprint(&self, output: &str) { + *self.output.borrow_mut() = format!("{}{}", self.output.borrow(), &output); + } + + fn println(&self, output: &str) { + self.print(&format!("{}/n", &output)); + } + + fn eprintln(&self, output: &str) { + self.eprint(&format!("{}/n", &output)); + } +} + +#[cfg(test)] +mod tests { + use crate::console::clients::checker::logger::Logger; + use crate::console::clients::checker::printer::{Printer, CLEAR_SCREEN}; + + #[test] + fn should_capture_the_clear_screen_command() { + let console_logger = Logger::new(); + + console_logger.clear(); + + assert_eq!(CLEAR_SCREEN, console_logger.log()); + } + + #[test] + fn should_capture_the_print_command_output() { + let console_logger = Logger::new(); + + console_logger.print("OUTPUT"); + + assert_eq!("OUTPUT", console_logger.log()); + } +} diff --git a/console/tracker-client/src/console/clients/checker/mod.rs b/console/tracker-client/src/console/clients/checker/mod.rs new file mode 100644 index 000000000..d26a4a686 --- /dev/null +++ b/console/tracker-client/src/console/clients/checker/mod.rs @@ -0,0 +1,7 @@ +pub mod app; +pub mod checks; +pub mod config; +pub mod console; +pub mod logger; +pub mod printer; +pub mod service; diff --git a/console/tracker-client/src/console/clients/checker/printer.rs b/console/tracker-client/src/console/clients/checker/printer.rs new file mode 100644 index 000000000..d590dfedb --- /dev/null +++ b/console/tracker-client/src/console/clients/checker/printer.rs @@ -0,0 +1,9 @@ +pub const CLEAR_SCREEN: &str = "\x1B[2J\x1B[1;1H"; + +pub trait Printer { + fn clear(&self); + fn print(&self, output: &str); + fn eprint(&self, output: &str); + fn println(&self, output: &str); + fn eprintln(&self, output: &str); +} diff --git a/console/tracker-client/src/console/clients/checker/service.rs b/console/tracker-client/src/console/clients/checker/service.rs new file mode 100644 index 000000000..acd312d8c --- /dev/null +++ b/console/tracker-client/src/console/clients/checker/service.rs @@ -0,0 +1,62 @@ +use std::sync::Arc; + +use futures::FutureExt as _; +use serde::Serialize; +use tokio::task::{JoinError, JoinSet}; +use torrust_tracker_configuration::DEFAULT_TIMEOUT; + +use super::checks::{health, http, udp}; +use super::config::Configuration; +use super::console::Console; +use crate::console::clients::checker::printer::Printer; + +pub struct Service { + pub(crate) config: Arc, + pub(crate) console: Console, +} + +#[derive(Debug, Clone, Serialize)] +pub enum CheckResult { + Udp(Result), + Http(Result), + Health(Result), +} + +impl Service { + /// # Errors + /// + /// It will return an error if some of the tests panic or otherwise fail to run. + /// On success it will return a vector of `Ok(())` of [`CheckResult`]. + /// + /// # Panics + /// + /// It would panic if `serde_json` produces invalid json for the `to_string_pretty` function. + pub async fn run_checks(self) -> Result, JoinError> { + tracing::info!("Running checks for trackers ..."); + + let mut check_results = Vec::default(); + + let mut checks = JoinSet::new(); + checks.spawn( + udp::run(self.config.udp_trackers.clone(), DEFAULT_TIMEOUT).map(|mut f| f.drain(..).map(CheckResult::Udp).collect()), + ); + checks.spawn( + http::run(self.config.http_trackers.clone(), DEFAULT_TIMEOUT) + .map(|mut f| f.drain(..).map(CheckResult::Http).collect()), + ); + checks.spawn( + health::run(self.config.health_checks.clone(), DEFAULT_TIMEOUT) + .map(|mut f| f.drain(..).map(CheckResult::Health).collect()), + ); + + while let Some(results) = checks.join_next().await { + check_results.append(&mut results?); + } + + let json_output = serde_json::json!(check_results); + self.console + .println(&serde_json::to_string_pretty(&json_output).expect("it should consume valid json")); + + Ok(check_results) + } +} diff --git a/console/tracker-client/src/console/clients/http/app.rs b/console/tracker-client/src/console/clients/http/app.rs new file mode 100644 index 000000000..105b18bff --- /dev/null +++ b/console/tracker-client/src/console/clients/http/app.rs @@ -0,0 +1,101 @@ +//! HTTP Tracker client: +//! +//! Examples: +//! +//! `Announce` request: +//! +//! ```text +//! cargo run --bin http_tracker_client announce http://127.0.0.1:7070 9c38422213e30bff212b30c360d26f9a02136422 | jq +//! ``` +//! +//! `Scrape` request: +//! +//! ```text +//! cargo run --bin http_tracker_client scrape http://127.0.0.1:7070 9c38422213e30bff212b30c360d26f9a02136422 | jq +//! ``` +use std::str::FromStr; +use std::time::Duration; + +use anyhow::Context; +use bittorrent_primitives::info_hash::InfoHash; +use bittorrent_tracker_client::http::client::requests::announce::QueryBuilder; +use bittorrent_tracker_client::http::client::responses::announce::Announce; +use bittorrent_tracker_client::http::client::responses::scrape; +use bittorrent_tracker_client::http::client::{requests, Client}; +use clap::{Parser, Subcommand}; +use reqwest::Url; +use torrust_tracker_configuration::DEFAULT_TIMEOUT; + +#[derive(Parser, Debug)] +#[command(author, version, about, long_about = None)] +struct Args { + #[command(subcommand)] + command: Command, +} + +#[derive(Subcommand, Debug)] +enum Command { + Announce { tracker_url: String, info_hash: String }, + Scrape { tracker_url: String, info_hashes: Vec }, +} + +/// # Errors +/// +/// Will return an error if the command fails. +pub async fn run() -> anyhow::Result<()> { + let args = Args::parse(); + + match args.command { + Command::Announce { tracker_url, info_hash } => { + announce_command(tracker_url, info_hash, DEFAULT_TIMEOUT).await?; + } + Command::Scrape { + tracker_url, + info_hashes, + } => { + scrape_command(&tracker_url, &info_hashes, DEFAULT_TIMEOUT).await?; + } + } + + Ok(()) +} + +async fn announce_command(tracker_url: String, info_hash: String, timeout: Duration) -> anyhow::Result<()> { + let base_url = Url::parse(&tracker_url).context("failed to parse HTTP tracker base URL")?; + let info_hash = + InfoHash::from_str(&info_hash).expect("Invalid infohash. Example infohash: `9c38422213e30bff212b30c360d26f9a02136422`"); + + let response = Client::new(base_url, timeout)? + .announce(&QueryBuilder::with_default_values().with_info_hash(&info_hash).query()) + .await?; + + let body = response.bytes().await?; + + let announce_response: Announce = serde_bencode::from_bytes(&body) + .unwrap_or_else(|_| panic!("response body should be a valid announce response, got: \"{:#?}\"", &body)); + + let json = serde_json::to_string(&announce_response).context("failed to serialize scrape response into JSON")?; + + println!("{json}"); + + Ok(()) +} + +async fn scrape_command(tracker_url: &str, info_hashes: &[String], timeout: Duration) -> anyhow::Result<()> { + let base_url = Url::parse(tracker_url).context("failed to parse HTTP tracker base URL")?; + + let query = requests::scrape::Query::try_from(info_hashes).context("failed to parse infohashes")?; + + let response = Client::new(base_url, timeout)?.scrape(&query).await?; + + let body = response.bytes().await?; + + let scrape_response = scrape::Response::try_from_bencoded(&body) + .unwrap_or_else(|_| panic!("response body should be a valid scrape response, got: \"{:#?}\"", &body)); + + let json = serde_json::to_string(&scrape_response).context("failed to serialize scrape response into JSON")?; + + println!("{json}"); + + Ok(()) +} diff --git a/console/tracker-client/src/console/clients/http/mod.rs b/console/tracker-client/src/console/clients/http/mod.rs new file mode 100644 index 000000000..917c94fa8 --- /dev/null +++ b/console/tracker-client/src/console/clients/http/mod.rs @@ -0,0 +1,35 @@ +use std::sync::Arc; + +use bittorrent_tracker_client::http::client::responses::scrape::BencodeParseError; +use serde::Serialize; +use thiserror::Error; + +pub mod app; + +#[derive(Debug, Clone, Error, Serialize)] +#[serde(into = "String")] +pub enum Error { + #[error("Http request did not receive a response within the timeout: {err:?}")] + HttpClientError { + err: bittorrent_tracker_client::http::client::Error, + }, + #[error("Http failed to get a response at all: {err:?}")] + ResponseError { err: Arc }, + #[error("Failed to deserialize the bencoded response data with the error: \"{err:?}\"")] + ParseBencodeError { + data: hyper::body::Bytes, + err: Arc, + }, + + #[error("Failed to deserialize the bencoded response data with the error: \"{err:?}\"")] + BencodeParseError { + data: hyper::body::Bytes, + err: Arc, + }, +} + +impl From for String { + fn from(value: Error) -> Self { + value.to_string() + } +} diff --git a/console/tracker-client/src/console/clients/mod.rs b/console/tracker-client/src/console/clients/mod.rs new file mode 100644 index 000000000..8492f8ba5 --- /dev/null +++ b/console/tracker-client/src/console/clients/mod.rs @@ -0,0 +1,4 @@ +//! Console clients. +pub mod checker; +pub mod http; +pub mod udp; diff --git a/console/tracker-client/src/console/clients/udp/app.rs b/console/tracker-client/src/console/clients/udp/app.rs new file mode 100644 index 000000000..a2736c365 --- /dev/null +++ b/console/tracker-client/src/console/clients/udp/app.rs @@ -0,0 +1,208 @@ +//! UDP Tracker client: +//! +//! Examples: +//! +//! Announce request: +//! +//! ```text +//! cargo run --bin udp_tracker_client announce 127.0.0.1:6969 9c38422213e30bff212b30c360d26f9a02136422 | jq +//! ``` +//! +//! Announce response: +//! +//! ```json +//! { +//! "transaction_id": -888840697 +//! "announce_interval": 120, +//! "leechers": 0, +//! "seeders": 1, +//! "peers": [ +//! "123.123.123.123:51289" +//! ], +//! } +//! ``` +//! +//! Scrape request: +//! +//! ```text +//! cargo run --bin udp_tracker_client scrape 127.0.0.1:6969 9c38422213e30bff212b30c360d26f9a02136422 | jq +//! ``` +//! +//! Scrape response: +//! +//! ```json +//! { +//! "transaction_id": -888840697, +//! "torrent_stats": [ +//! { +//! "completed": 0, +//! "leechers": 0, +//! "seeders": 0 +//! }, +//! { +//! "completed": 0, +//! "leechers": 0, +//! "seeders": 0 +//! } +//! ] +//! } +//! ``` +//! +//! You can use an URL with instead of the socket address. For example: +//! +//! ```text +//! cargo run --bin udp_tracker_client scrape udp://localhost:6969 9c38422213e30bff212b30c360d26f9a02136422 | jq +//! cargo run --bin udp_tracker_client scrape udp://localhost:6969/scrape 9c38422213e30bff212b30c360d26f9a02136422 | jq +//! ``` +//! +//! The protocol (`udp://`) in the URL is mandatory. The path (`\scrape`) is optional. It always uses `\scrape`. +use std::net::{SocketAddr, ToSocketAddrs}; +use std::str::FromStr; + +use anyhow::Context; +use aquatic_udp_protocol::{Response, TransactionId}; +use bittorrent_primitives::info_hash::InfoHash as TorrustInfoHash; +use clap::{Parser, Subcommand}; +use torrust_tracker_configuration::DEFAULT_TIMEOUT; +use tracing::level_filters::LevelFilter; +use url::Url; + +use super::Error; +use crate::console::clients::udp::checker; +use crate::console::clients::udp::responses::dto::SerializableResponse; +use crate::console::clients::udp::responses::json::ToJson; + +const RANDOM_TRANSACTION_ID: i32 = -888_840_697; + +#[derive(Parser, Debug)] +#[command(author, version, about, long_about = None)] +struct Args { + #[command(subcommand)] + command: Command, +} + +#[derive(Subcommand, Debug)] +enum Command { + Announce { + #[arg(value_parser = parse_socket_addr)] + tracker_socket_addr: SocketAddr, + #[arg(value_parser = parse_info_hash)] + info_hash: TorrustInfoHash, + }, + Scrape { + #[arg(value_parser = parse_socket_addr)] + tracker_socket_addr: SocketAddr, + #[arg(value_parser = parse_info_hash, num_args = 1..=74, value_delimiter = ' ')] + info_hashes: Vec, + }, +} + +/// # Errors +/// +/// Will return an error if the command fails. +/// +/// +pub async fn run() -> anyhow::Result<()> { + tracing_stdout_init(LevelFilter::INFO); + + let args = Args::parse(); + + let response = match args.command { + Command::Announce { + tracker_socket_addr: remote_addr, + info_hash, + } => handle_announce(remote_addr, &info_hash).await?, + Command::Scrape { + tracker_socket_addr: remote_addr, + info_hashes, + } => handle_scrape(remote_addr, &info_hashes).await?, + }; + + let response: SerializableResponse = response.into(); + let response_json = response.to_json_string()?; + + print!("{response_json}"); + + Ok(()) +} + +fn tracing_stdout_init(filter: LevelFilter) { + tracing_subscriber::fmt().with_max_level(filter).init(); + tracing::debug!("Logging initialized"); +} + +async fn handle_announce(remote_addr: SocketAddr, info_hash: &TorrustInfoHash) -> Result { + let transaction_id = TransactionId::new(RANDOM_TRANSACTION_ID); + + let client = checker::Client::new(remote_addr, DEFAULT_TIMEOUT).await?; + + let connection_id = client.send_connection_request(transaction_id).await?; + + client.send_announce_request(transaction_id, connection_id, *info_hash).await +} + +async fn handle_scrape(remote_addr: SocketAddr, info_hashes: &[TorrustInfoHash]) -> Result { + let transaction_id = TransactionId::new(RANDOM_TRANSACTION_ID); + + let client = checker::Client::new(remote_addr, DEFAULT_TIMEOUT).await?; + + let connection_id = client.send_connection_request(transaction_id).await?; + + client.send_scrape_request(connection_id, transaction_id, info_hashes).await +} + +fn parse_socket_addr(tracker_socket_addr_str: &str) -> anyhow::Result { + tracing::debug!("Tracker socket address: {tracker_socket_addr_str:#?}"); + + // Check if the address is a valid URL. If so, extract the host and port. + let resolved_addr = if let Ok(url) = Url::parse(tracker_socket_addr_str) { + tracing::debug!("Tracker socket address URL: {url:?}"); + + let host = url + .host_str() + .with_context(|| format!("invalid host in URL: `{tracker_socket_addr_str}`"))? + .to_owned(); + + let port = url + .port() + .with_context(|| format!("port not found in URL: `{tracker_socket_addr_str}`"))? + .to_owned(); + + (host, port) + } else { + // If not a URL, assume it's a host:port pair. + + let parts: Vec<&str> = tracker_socket_addr_str.split(':').collect(); + + if parts.len() != 2 { + return Err(anyhow::anyhow!( + "invalid address format: `{}`. Expected format is host:port", + tracker_socket_addr_str + )); + } + + let host = parts[0].to_owned(); + + let port = parts[1] + .parse::() + .with_context(|| format!("invalid port: `{}`", parts[1]))? + .to_owned(); + + (host, port) + }; + + tracing::debug!("Resolved address: {resolved_addr:#?}"); + + // Perform DNS resolution. + let socket_addrs: Vec<_> = resolved_addr.to_socket_addrs()?.collect(); + if socket_addrs.is_empty() { + Err(anyhow::anyhow!("DNS resolution failed for `{}`", tracker_socket_addr_str)) + } else { + Ok(socket_addrs[0]) + } +} + +fn parse_info_hash(info_hash_str: &str) -> anyhow::Result { + TorrustInfoHash::from_str(info_hash_str) + .map_err(|e| anyhow::Error::msg(format!("failed to parse info-hash `{info_hash_str}`: {e:?}"))) +} diff --git a/console/tracker-client/src/console/clients/udp/checker.rs b/console/tracker-client/src/console/clients/udp/checker.rs new file mode 100644 index 000000000..bf6b49782 --- /dev/null +++ b/console/tracker-client/src/console/clients/udp/checker.rs @@ -0,0 +1,177 @@ +use std::net::{Ipv4Addr, SocketAddr}; +use std::num::NonZeroU16; +use std::time::Duration; + +use aquatic_udp_protocol::common::InfoHash; +use aquatic_udp_protocol::{ + AnnounceActionPlaceholder, AnnounceEvent, AnnounceRequest, ConnectRequest, ConnectionId, NumberOfBytes, NumberOfPeers, + PeerId, PeerKey, Port, Response, ScrapeRequest, TransactionId, +}; +use bittorrent_primitives::info_hash::InfoHash as TorrustInfoHash; +use bittorrent_tracker_client::udp::client::UdpTrackerClient; + +use super::Error; + +/// A UDP Tracker client to make test requests (checks). +#[derive(Debug)] +pub struct Client { + client: UdpTrackerClient, +} + +impl Client { + /// Creates a new `[Client]` for checking a UDP Tracker Service + /// + /// # Errors + /// + /// It will error if unable to bind and connect to the udp remote address. + /// + pub async fn new(remote_addr: SocketAddr, timeout: Duration) -> Result { + let client = UdpTrackerClient::new(remote_addr, timeout) + .await + .map_err(|err| Error::UnableToBindAndConnect { remote_addr, err })?; + + Ok(Self { client }) + } + + /// Returns the local addr of this [`Client`]. + /// + /// # Errors + /// + /// This function will return an error if the socket is somehow not bound. + pub fn local_addr(&self) -> std::io::Result { + self.client.client.socket.local_addr() + } + + /// Sends a connection request to the UDP Tracker server. + /// + /// # Errors + /// + /// Will return and error if + /// + /// - It can't connect to the remote UDP socket. + /// - It can't make a connection request successfully to the remote UDP + /// server (after successfully connecting to the remote UDP socket). + /// + /// # Panics + /// + /// Will panic if it receives an unexpected response. + pub async fn send_connection_request(&self, transaction_id: TransactionId) -> Result { + tracing::debug!("Sending connection request with transaction id: {transaction_id:#?}"); + + let connect_request = ConnectRequest { transaction_id }; + + let _ = self + .client + .send(connect_request.into()) + .await + .map_err(|err| Error::UnableToSendConnectionRequest { err })?; + + let response = self + .client + .receive() + .await + .map_err(|err| Error::UnableToReceiveConnectResponse { err })?; + + match response { + Response::Connect(connect_response) => Ok(connect_response.connection_id), + _ => Err(Error::UnexpectedConnectionResponse { response }), + } + } + + /// Sends an announce request to the UDP Tracker server. + /// + /// # Errors + /// + /// Will return and error if the client is not connected. You have to connect + /// before calling this function. + /// + /// # Panics + /// + /// It will panic if the `local_address` has a zero port. + pub async fn send_announce_request( + &self, + transaction_id: TransactionId, + connection_id: ConnectionId, + info_hash: TorrustInfoHash, + ) -> Result { + tracing::debug!("Sending announce request with transaction id: {transaction_id:#?}"); + + let port = NonZeroU16::new( + self.client + .client + .socket + .local_addr() + .expect("it should get the local address") + .port(), + ) + .expect("it should no be zero"); + + let announce_request = AnnounceRequest { + connection_id, + action_placeholder: AnnounceActionPlaceholder::default(), + transaction_id, + info_hash: InfoHash(info_hash.bytes()), + peer_id: PeerId(*b"-qB00000000000000001"), + bytes_downloaded: NumberOfBytes(0i64.into()), + bytes_uploaded: NumberOfBytes(0i64.into()), + bytes_left: NumberOfBytes(0i64.into()), + event: AnnounceEvent::Started.into(), + ip_address: Ipv4Addr::new(0, 0, 0, 0).into(), + key: PeerKey::new(0i32), + peers_wanted: NumberOfPeers(1i32.into()), + port: Port::new(port), + }; + + let _ = self + .client + .send(announce_request.into()) + .await + .map_err(|err| Error::UnableToSendAnnounceRequest { err })?; + + let response = self + .client + .receive() + .await + .map_err(|err| Error::UnableToReceiveAnnounceResponse { err })?; + + Ok(response) + } + + /// Sends a scrape request to the UDP Tracker server. + /// + /// # Errors + /// + /// Will return and error if the client is not connected. You have to connect + /// before calling this function. + pub async fn send_scrape_request( + &self, + connection_id: ConnectionId, + transaction_id: TransactionId, + info_hashes: &[TorrustInfoHash], + ) -> Result { + tracing::debug!("Sending scrape request with transaction id: {transaction_id:#?}"); + + let scrape_request = ScrapeRequest { + connection_id, + transaction_id, + info_hashes: info_hashes + .iter() + .map(|torrust_info_hash| InfoHash(torrust_info_hash.bytes())) + .collect(), + }; + + let _ = self + .client + .send(scrape_request.into()) + .await + .map_err(|err| Error::UnableToSendScrapeRequest { err })?; + + let response = self + .client + .receive() + .await + .map_err(|err| Error::UnableToReceiveScrapeResponse { err })?; + + Ok(response) + } +} diff --git a/console/tracker-client/src/console/clients/udp/mod.rs b/console/tracker-client/src/console/clients/udp/mod.rs new file mode 100644 index 000000000..fbfd53770 --- /dev/null +++ b/console/tracker-client/src/console/clients/udp/mod.rs @@ -0,0 +1,50 @@ +use std::net::SocketAddr; + +use aquatic_udp_protocol::Response; +use bittorrent_tracker_client::udp; +use serde::Serialize; +use thiserror::Error; + +pub mod app; +pub mod checker; +pub mod responses; + +#[derive(Error, Debug, Clone, Serialize)] +#[serde(into = "String")] +pub enum Error { + #[error("Failed to Connect to: {remote_addr}, with error: {err}")] + UnableToBindAndConnect { remote_addr: SocketAddr, err: udp::Error }, + + #[error("Failed to send a connection request, with error: {err}")] + UnableToSendConnectionRequest { err: udp::Error }, + + #[error("Failed to receive a connect response, with error: {err}")] + UnableToReceiveConnectResponse { err: udp::Error }, + + #[error("Failed to send a announce request, with error: {err}")] + UnableToSendAnnounceRequest { err: udp::Error }, + + #[error("Failed to receive a announce response, with error: {err}")] + UnableToReceiveAnnounceResponse { err: udp::Error }, + + #[error("Failed to send a scrape request, with error: {err}")] + UnableToSendScrapeRequest { err: udp::Error }, + + #[error("Failed to receive a scrape response, with error: {err}")] + UnableToReceiveScrapeResponse { err: udp::Error }, + + #[error("Failed to receive a response, with error: {err}")] + UnableToReceiveResponse { err: udp::Error }, + + #[error("Failed to get local address for connection: {err}")] + UnableToGetLocalAddr { err: udp::Error }, + + #[error("Failed to get a connection response: {response:?}")] + UnexpectedConnectionResponse { response: Response }, +} + +impl From for String { + fn from(value: Error) -> Self { + value.to_string() + } +} diff --git a/console/tracker-client/src/console/clients/udp/responses/dto.rs b/console/tracker-client/src/console/clients/udp/responses/dto.rs new file mode 100644 index 000000000..93320b0f7 --- /dev/null +++ b/console/tracker-client/src/console/clients/udp/responses/dto.rs @@ -0,0 +1,128 @@ +//! Aquatic responses are not serializable. These are the serializable wrappers. +use std::net::{Ipv4Addr, Ipv6Addr}; + +use aquatic_udp_protocol::Response::{self}; +use aquatic_udp_protocol::{AnnounceResponse, ConnectResponse, ErrorResponse, Ipv4AddrBytes, Ipv6AddrBytes, ScrapeResponse}; +use serde::Serialize; + +#[derive(Serialize)] +pub enum SerializableResponse { + Connect(ConnectSerializableResponse), + AnnounceIpv4(AnnounceSerializableResponse), + AnnounceIpv6(AnnounceSerializableResponse), + Scrape(ScrapeSerializableResponse), + Error(ErrorSerializableResponse), +} + +impl From for SerializableResponse { + fn from(response: Response) -> Self { + match response { + Response::Connect(response) => SerializableResponse::Connect(ConnectSerializableResponse::from(response)), + Response::AnnounceIpv4(response) => SerializableResponse::AnnounceIpv4(AnnounceSerializableResponse::from(response)), + Response::AnnounceIpv6(response) => SerializableResponse::AnnounceIpv6(AnnounceSerializableResponse::from(response)), + Response::Scrape(response) => SerializableResponse::Scrape(ScrapeSerializableResponse::from(response)), + Response::Error(response) => SerializableResponse::Error(ErrorSerializableResponse::from(response)), + } + } +} + +#[derive(Serialize)] +pub struct ConnectSerializableResponse { + transaction_id: i32, + connection_id: i64, +} + +impl From for ConnectSerializableResponse { + fn from(connect: ConnectResponse) -> Self { + Self { + transaction_id: connect.transaction_id.0.into(), + connection_id: connect.connection_id.0.into(), + } + } +} + +#[derive(Serialize)] +pub struct AnnounceSerializableResponse { + transaction_id: i32, + announce_interval: i32, + leechers: i32, + seeders: i32, + peers: Vec, +} + +impl From> for AnnounceSerializableResponse { + fn from(announce: AnnounceResponse) -> Self { + Self { + transaction_id: announce.fixed.transaction_id.0.into(), + announce_interval: announce.fixed.announce_interval.0.into(), + leechers: announce.fixed.leechers.0.into(), + seeders: announce.fixed.seeders.0.into(), + peers: announce + .peers + .iter() + .map(|peer| format!("{}:{}", Ipv4Addr::from(peer.ip_address), peer.port.0)) + .collect::>(), + } + } +} + +impl From> for AnnounceSerializableResponse { + fn from(announce: AnnounceResponse) -> Self { + Self { + transaction_id: announce.fixed.transaction_id.0.into(), + announce_interval: announce.fixed.announce_interval.0.into(), + leechers: announce.fixed.leechers.0.into(), + seeders: announce.fixed.seeders.0.into(), + peers: announce + .peers + .iter() + .map(|peer| format!("{}:{}", Ipv6Addr::from(peer.ip_address), peer.port.0)) + .collect::>(), + } + } +} + +#[derive(Serialize)] +pub struct ScrapeSerializableResponse { + transaction_id: i32, + torrent_stats: Vec, +} + +impl From for ScrapeSerializableResponse { + fn from(scrape: ScrapeResponse) -> Self { + Self { + transaction_id: scrape.transaction_id.0.into(), + torrent_stats: scrape + .torrent_stats + .iter() + .map(|torrent_scrape_statistics| TorrentStats { + seeders: torrent_scrape_statistics.seeders.0.into(), + completed: torrent_scrape_statistics.completed.0.into(), + leechers: torrent_scrape_statistics.leechers.0.into(), + }) + .collect::>(), + } + } +} + +#[derive(Serialize)] +pub struct ErrorSerializableResponse { + transaction_id: i32, + message: String, +} + +impl From for ErrorSerializableResponse { + fn from(error: ErrorResponse) -> Self { + Self { + transaction_id: error.transaction_id.0.into(), + message: error.message.to_string(), + } + } +} + +#[derive(Serialize)] +struct TorrentStats { + seeders: i32, + completed: i32, + leechers: i32, +} diff --git a/console/tracker-client/src/console/clients/udp/responses/json.rs b/console/tracker-client/src/console/clients/udp/responses/json.rs new file mode 100644 index 000000000..5d2bd6b89 --- /dev/null +++ b/console/tracker-client/src/console/clients/udp/responses/json.rs @@ -0,0 +1,25 @@ +use anyhow::Context; +use serde::Serialize; + +use super::dto::SerializableResponse; + +#[allow(clippy::module_name_repetitions)] +pub trait ToJson { + /// + /// Returns a string with the JSON serialized version of the response + /// + /// # Errors + /// + /// Will return an error if serialization fails. + /// + fn to_json_string(&self) -> anyhow::Result + where + Self: Serialize, + { + let pretty_json = serde_json::to_string_pretty(self).context("response JSON serialization")?; + + Ok(pretty_json) + } +} + +impl ToJson for SerializableResponse {} diff --git a/console/tracker-client/src/console/clients/udp/responses/mod.rs b/console/tracker-client/src/console/clients/udp/responses/mod.rs new file mode 100644 index 000000000..e6d2e5e51 --- /dev/null +++ b/console/tracker-client/src/console/clients/udp/responses/mod.rs @@ -0,0 +1,2 @@ +pub mod dto; +pub mod json; diff --git a/console/tracker-client/src/console/mod.rs b/console/tracker-client/src/console/mod.rs new file mode 100644 index 000000000..4b4cb9de4 --- /dev/null +++ b/console/tracker-client/src/console/mod.rs @@ -0,0 +1,2 @@ +//! Console apps. +pub mod clients; diff --git a/console/tracker-client/src/lib.rs b/console/tracker-client/src/lib.rs new file mode 100644 index 000000000..5b9849fdc --- /dev/null +++ b/console/tracker-client/src/lib.rs @@ -0,0 +1 @@ +pub mod console; diff --git a/packages/tracker-client/README.md b/packages/tracker-client/README.md index 1d12f9c86..56a61e154 100644 --- a/packages/tracker-client/README.md +++ b/packages/tracker-client/README.md @@ -2,7 +2,7 @@ A library an console applications to interact with a BitTorrent tracker. -> **Disclaimer**: This project is actively under development. We’re currently extracting and refining common types from the ][Torrust Tracker](https://github.com/torrust/torrust-tracker) to make them available to the BitTorrent community in Rust. While these types are functional, they are not yet ready for use in production or third-party projects. +> **Disclaimer**: This project is actively under development. We’re currently extracting and refining common types from the[Torrust Tracker](https://github.com/torrust/torrust-tracker) to make them available to the BitTorrent community in Rust. While these types are functional, they are not yet ready for use in production or third-party projects. ## License From e966ae47e342683d3c51fd29b4db646317827ad0 Mon Sep 17 00:00:00 2001 From: Jose Celano Date: Wed, 13 Nov 2024 10:46:16 +0000 Subject: [PATCH 019/802] refactor!: remove tracker console client from tracker lib client --- Cargo.lock | 7 - packages/tracker-client/Cargo.toml | 9 +- packages/tracker-client/README.md | 2 +- .../src/bin/http_tracker_client.rs | 7 - .../tracker-client/src/bin/tracker_checker.rs | 7 - .../src/bin/udp_tracker_client.rs | 7 - .../src/console/clients/checker/app.rs | 120 -------- .../console/clients/checker/checks/health.rs | 77 ----- .../console/clients/checker/checks/http.rs | 104 ------- .../src/console/clients/checker/checks/mod.rs | 4 - .../console/clients/checker/checks/structs.rs | 12 - .../src/console/clients/checker/checks/udp.rs | 134 --------- .../src/console/clients/checker/config.rs | 282 ------------------ .../src/console/clients/checker/console.rs | 38 --- .../src/console/clients/checker/logger.rs | 72 ----- .../src/console/clients/checker/mod.rs | 7 - .../src/console/clients/checker/printer.rs | 9 - .../src/console/clients/checker/service.rs | 62 ---- .../src/console/clients/http/app.rs | 102 ------- .../src/console/clients/http/mod.rs | 34 --- .../tracker-client/src/console/clients/mod.rs | 4 - .../src/console/clients/udp/app.rs | 208 ------------- .../src/console/clients/udp/checker.rs | 177 ----------- .../src/console/clients/udp/mod.rs | 51 ---- .../src/console/clients/udp/responses/dto.rs | 128 -------- .../src/console/clients/udp/responses/json.rs | 25 -- .../src/console/clients/udp/responses/mod.rs | 2 - packages/tracker-client/src/console/mod.rs | 2 - .../tracker-client/src/http/url_encoding.rs | 4 +- packages/tracker-client/src/lib.rs | 1 - 30 files changed, 4 insertions(+), 1694 deletions(-) delete mode 100644 packages/tracker-client/src/bin/http_tracker_client.rs delete mode 100644 packages/tracker-client/src/bin/tracker_checker.rs delete mode 100644 packages/tracker-client/src/bin/udp_tracker_client.rs delete mode 100644 packages/tracker-client/src/console/clients/checker/app.rs delete mode 100644 packages/tracker-client/src/console/clients/checker/checks/health.rs delete mode 100644 packages/tracker-client/src/console/clients/checker/checks/http.rs delete mode 100644 packages/tracker-client/src/console/clients/checker/checks/mod.rs delete mode 100644 packages/tracker-client/src/console/clients/checker/checks/structs.rs delete mode 100644 packages/tracker-client/src/console/clients/checker/checks/udp.rs delete mode 100644 packages/tracker-client/src/console/clients/checker/config.rs delete mode 100644 packages/tracker-client/src/console/clients/checker/console.rs delete mode 100644 packages/tracker-client/src/console/clients/checker/logger.rs delete mode 100644 packages/tracker-client/src/console/clients/checker/mod.rs delete mode 100644 packages/tracker-client/src/console/clients/checker/printer.rs delete mode 100644 packages/tracker-client/src/console/clients/checker/service.rs delete mode 100644 packages/tracker-client/src/console/clients/http/app.rs delete mode 100644 packages/tracker-client/src/console/clients/http/mod.rs delete mode 100644 packages/tracker-client/src/console/clients/mod.rs delete mode 100644 packages/tracker-client/src/console/clients/udp/app.rs delete mode 100644 packages/tracker-client/src/console/clients/udp/checker.rs delete mode 100644 packages/tracker-client/src/console/clients/udp/mod.rs delete mode 100644 packages/tracker-client/src/console/clients/udp/responses/dto.rs delete mode 100644 packages/tracker-client/src/console/clients/udp/responses/json.rs delete mode 100644 packages/tracker-client/src/console/clients/udp/responses/mod.rs delete mode 100644 packages/tracker-client/src/console/mod.rs diff --git a/Cargo.lock b/Cargo.lock index ec723efff..bbb012cea 100644 --- a/Cargo.lock +++ b/Cargo.lock @@ -620,20 +620,15 @@ dependencies = [ name = "bittorrent-tracker-client" version = "3.0.0-develop" dependencies = [ - "anyhow", "aquatic_udp_protocol", "bittorrent-primitives", - "clap", "derive_more", - "futures", - "hex-literal", "hyper", "percent-encoding", "reqwest", "serde", "serde_bencode", "serde_bytes", - "serde_json", "serde_repr", "thiserror", "tokio", @@ -641,8 +636,6 @@ dependencies = [ "torrust-tracker-located-error", "torrust-tracker-primitives", "tracing", - "tracing-subscriber", - "url", "zerocopy", ] diff --git a/packages/tracker-client/Cargo.toml b/packages/tracker-client/Cargo.toml index 3334e7b47..52b0be639 100644 --- a/packages/tracker-client/Cargo.toml +++ b/packages/tracker-client/Cargo.toml @@ -1,5 +1,5 @@ [package] -description = "A library with the primitive types shared by the Torrust tracker packages." +description = "A library with the generic tracker clients." keywords = ["bittorrent", "client", "tracker"] license = "LGPL-3.0" name = "bittorrent-tracker-client" @@ -15,20 +15,15 @@ rust-version.workspace = true version.workspace = true [dependencies] -anyhow = "1" aquatic_udp_protocol = "0" bittorrent-primitives = "0.1.0" -clap = { version = "4", features = ["derive", "env"] } derive_more = { version = "1", features = ["as_ref", "constructor", "from"] } -futures = "0" -hex-literal = "0" hyper = "1" percent-encoding = "2" reqwest = { version = "0", features = ["json"] } serde = { version = "1", features = ["derive"] } serde_bencode = "0" serde_bytes = "0" -serde_json = { version = "1", features = ["preserve_order"] } serde_repr = "0" thiserror = "1" tokio = { version = "1", features = ["macros", "net", "rt-multi-thread", "signal", "sync"] } @@ -36,8 +31,6 @@ torrust-tracker-configuration = { version = "3.0.0-develop", path = "../configur torrust-tracker-located-error = { version = "3.0.0-develop", path = "../located-error" } torrust-tracker-primitives = { version = "3.0.0-develop", path = "../primitives" } tracing = "0" -tracing-subscriber = { version = "0", features = ["json"] } -url = { version = "2", features = ["serde"] } zerocopy = "0.7" [package.metadata.cargo-machete] diff --git a/packages/tracker-client/README.md b/packages/tracker-client/README.md index 56a61e154..ebd0c4bda 100644 --- a/packages/tracker-client/README.md +++ b/packages/tracker-client/README.md @@ -1,6 +1,6 @@ # BitTorrent Tracker Client -A library an console applications to interact with a BitTorrent tracker. +A library to interact with BitTorrent trackers. > **Disclaimer**: This project is actively under development. We’re currently extracting and refining common types from the[Torrust Tracker](https://github.com/torrust/torrust-tracker) to make them available to the BitTorrent community in Rust. While these types are functional, they are not yet ready for use in production or third-party projects. diff --git a/packages/tracker-client/src/bin/http_tracker_client.rs b/packages/tracker-client/src/bin/http_tracker_client.rs deleted file mode 100644 index 8c2c0356d..000000000 --- a/packages/tracker-client/src/bin/http_tracker_client.rs +++ /dev/null @@ -1,7 +0,0 @@ -//! Program to make request to HTTP trackers. -use bittorrent_tracker_client::console::clients::http::app; - -#[tokio::main] -async fn main() -> anyhow::Result<()> { - app::run().await -} diff --git a/packages/tracker-client/src/bin/tracker_checker.rs b/packages/tracker-client/src/bin/tracker_checker.rs deleted file mode 100644 index eb2a7d82c..000000000 --- a/packages/tracker-client/src/bin/tracker_checker.rs +++ /dev/null @@ -1,7 +0,0 @@ -//! Program to check running trackers. -use bittorrent_tracker_client::console::clients::checker::app; - -#[tokio::main] -async fn main() { - app::run().await.expect("Some checks fail"); -} diff --git a/packages/tracker-client/src/bin/udp_tracker_client.rs b/packages/tracker-client/src/bin/udp_tracker_client.rs deleted file mode 100644 index 5f6b4f50d..000000000 --- a/packages/tracker-client/src/bin/udp_tracker_client.rs +++ /dev/null @@ -1,7 +0,0 @@ -//! Program to make request to UDP trackers. -use bittorrent_tracker_client::console::clients::udp::app; - -#[tokio::main] -async fn main() -> anyhow::Result<()> { - app::run().await -} diff --git a/packages/tracker-client/src/console/clients/checker/app.rs b/packages/tracker-client/src/console/clients/checker/app.rs deleted file mode 100644 index 395f65df9..000000000 --- a/packages/tracker-client/src/console/clients/checker/app.rs +++ /dev/null @@ -1,120 +0,0 @@ -//! Program to run checks against running trackers. -//! -//! Run providing a config file path: -//! -//! ```text -//! cargo run --bin tracker_checker -- --config-path "./share/default/config/tracker_checker.json" -//! TORRUST_CHECKER_CONFIG_PATH="./share/default/config/tracker_checker.json" cargo run --bin tracker_checker -//! ``` -//! -//! Run providing the configuration: -//! -//! ```text -//! TORRUST_CHECKER_CONFIG=$(cat "./share/default/config/tracker_checker.json") cargo run --bin tracker_checker -//! ``` -//! -//! Another real example to test the Torrust demo tracker: -//! -//! ```text -//! TORRUST_CHECKER_CONFIG='{ -//! "udp_trackers": ["144.126.245.19:6969"], -//! "http_trackers": ["https://tracker.torrust-demo.com"], -//! "health_checks": ["https://tracker.torrust-demo.com/api/health_check"] -//! }' cargo run --bin tracker_checker -//! ``` -//! -//! The output should be something like the following: -//! -//! ```json -//! { -//! "udp_trackers": [ -//! { -//! "url": "144.126.245.19:6969", -//! "status": { -//! "code": "ok", -//! "message": "" -//! } -//! } -//! ], -//! "http_trackers": [ -//! { -//! "url": "https://tracker.torrust-demo.com/", -//! "status": { -//! "code": "ok", -//! "message": "" -//! } -//! } -//! ], -//! "health_checks": [ -//! { -//! "url": "https://tracker.torrust-demo.com/api/health_check", -//! "status": { -//! "code": "ok", -//! "message": "" -//! } -//! } -//! ] -//! } -//! ``` -use std::path::PathBuf; -use std::sync::Arc; - -use anyhow::{Context, Result}; -use clap::Parser; -use tracing::level_filters::LevelFilter; - -use super::config::Configuration; -use super::console::Console; -use super::service::{CheckResult, Service}; -use crate::console::clients::checker::config::parse_from_json; - -#[derive(Parser, Debug)] -#[clap(author, version, about, long_about = None)] -struct Args { - /// Path to the JSON configuration file. - #[clap(short, long, env = "TORRUST_CHECKER_CONFIG_PATH")] - config_path: Option, - - /// Direct configuration content in JSON. - #[clap(env = "TORRUST_CHECKER_CONFIG", hide_env_values = true)] - config_content: Option, -} - -/// # Errors -/// -/// Will return an error if the configuration was not provided. -pub async fn run() -> Result> { - tracing_stdout_init(LevelFilter::INFO); - - let args = Args::parse(); - - let config = setup_config(args)?; - - let console_printer = Console {}; - - let service = Service { - config: Arc::new(config), - console: console_printer, - }; - - service.run_checks().await.context("it should run the check tasks") -} - -fn tracing_stdout_init(filter: LevelFilter) { - tracing_subscriber::fmt().with_max_level(filter).init(); - tracing::debug!("Logging initialized"); -} - -fn setup_config(args: Args) -> Result { - match (args.config_path, args.config_content) { - (Some(config_path), _) => load_config_from_file(&config_path), - (_, Some(config_content)) => parse_from_json(&config_content).context("invalid config format"), - _ => Err(anyhow::anyhow!("no configuration provided")), - } -} - -fn load_config_from_file(path: &PathBuf) -> Result { - let file_content = std::fs::read_to_string(path).with_context(|| format!("can't read config file {path:?}"))?; - - parse_from_json(&file_content).context("invalid config format") -} diff --git a/packages/tracker-client/src/console/clients/checker/checks/health.rs b/packages/tracker-client/src/console/clients/checker/checks/health.rs deleted file mode 100644 index b1fb79148..000000000 --- a/packages/tracker-client/src/console/clients/checker/checks/health.rs +++ /dev/null @@ -1,77 +0,0 @@ -use std::sync::Arc; -use std::time::Duration; - -use anyhow::Result; -use hyper::StatusCode; -use reqwest::{Client as HttpClient, Response}; -use serde::Serialize; -use thiserror::Error; -use url::Url; - -#[derive(Debug, Clone, Error, Serialize)] -#[serde(into = "String")] -pub enum Error { - #[error("Failed to Build a Http Client: {err:?}")] - ClientBuildingError { err: Arc }, - #[error("Heath check failed to get a response: {err:?}")] - ResponseError { err: Arc }, - #[error("Http check returned a non-success code: \"{code}\" with the response: \"{response:?}\"")] - UnsuccessfulResponse { code: StatusCode, response: Arc }, -} - -impl From for String { - fn from(value: Error) -> Self { - value.to_string() - } -} - -#[derive(Debug, Clone, Serialize)] -pub struct Checks { - url: Url, - result: Result, -} - -pub async fn run(health_checks: Vec, timeout: Duration) -> Vec> { - let mut results = Vec::default(); - - tracing::debug!("Health checks ..."); - - for url in health_checks { - let result = match run_health_check(url.clone(), timeout).await { - Ok(response) => Ok(response.status().to_string()), - Err(err) => Err(err), - }; - - let check = Checks { url, result }; - - if check.result.is_err() { - results.push(Err(check)); - } else { - results.push(Ok(check)); - } - } - - results -} - -async fn run_health_check(url: Url, timeout: Duration) -> Result { - let client = HttpClient::builder() - .timeout(timeout) - .build() - .map_err(|e| Error::ClientBuildingError { err: e.into() })?; - - let response = client - .get(url.clone()) - .send() - .await - .map_err(|e| Error::ResponseError { err: e.into() })?; - - if response.status().is_success() { - Ok(response) - } else { - Err(Error::UnsuccessfulResponse { - code: response.status(), - response: response.into(), - }) - } -} diff --git a/packages/tracker-client/src/console/clients/checker/checks/http.rs b/packages/tracker-client/src/console/clients/checker/checks/http.rs deleted file mode 100644 index 48ce9678d..000000000 --- a/packages/tracker-client/src/console/clients/checker/checks/http.rs +++ /dev/null @@ -1,104 +0,0 @@ -use std::str::FromStr as _; -use std::time::Duration; - -use bittorrent_primitives::info_hash::InfoHash; -use serde::Serialize; -use url::Url; - -use crate::console::clients::http::Error; -use crate::http::client::responses::announce::Announce; -use crate::http::client::responses::scrape; -use crate::http::client::{requests, Client}; - -#[derive(Debug, Clone, Serialize)] -pub struct Checks { - url: Url, - results: Vec<(Check, Result<(), Error>)>, -} - -#[derive(Debug, Clone, Serialize)] -pub enum Check { - Announce, - Scrape, -} - -pub async fn run(http_trackers: Vec, timeout: Duration) -> Vec> { - let mut results = Vec::default(); - - tracing::debug!("HTTP trackers ..."); - - for ref url in http_trackers { - let mut base_url = url.clone(); - base_url.set_path(""); - - let mut checks = Checks { - url: url.clone(), - results: Vec::default(), - }; - - // Announce - { - let check = check_http_announce(&base_url, timeout).await.map(|_| ()); - - checks.results.push((Check::Announce, check)); - } - - // Scrape - { - let check = check_http_scrape(&base_url, timeout).await.map(|_| ()); - - checks.results.push((Check::Scrape, check)); - } - - if checks.results.iter().any(|f| f.1.is_err()) { - results.push(Err(checks)); - } else { - results.push(Ok(checks)); - } - } - - results -} - -async fn check_http_announce(url: &Url, timeout: Duration) -> Result { - let info_hash_str = "9c38422213e30bff212b30c360d26f9a02136422".to_string(); // # DevSkim: ignore DS173237 - let info_hash = InfoHash::from_str(&info_hash_str).expect("a valid info-hash is required"); - - let client = Client::new(url.clone(), timeout).map_err(|err| Error::HttpClientError { err })?; - - let response = client - .announce( - &requests::announce::QueryBuilder::with_default_values() - .with_info_hash(&info_hash) - .query(), - ) - .await - .map_err(|err| Error::HttpClientError { err })?; - - let response = response.bytes().await.map_err(|e| Error::ResponseError { err: e.into() })?; - - let response = serde_bencode::from_bytes::(&response).map_err(|e| Error::ParseBencodeError { - data: response, - err: e.into(), - })?; - - Ok(response) -} - -async fn check_http_scrape(url: &Url, timeout: Duration) -> Result { - let info_hashes: Vec = vec!["9c38422213e30bff212b30c360d26f9a02136422".to_string()]; // # DevSkim: ignore DS173237 - let query = requests::scrape::Query::try_from(info_hashes).expect("a valid array of info-hashes is required"); - - let client = Client::new(url.clone(), timeout).map_err(|err| Error::HttpClientError { err })?; - - let response = client.scrape(&query).await.map_err(|err| Error::HttpClientError { err })?; - - let response = response.bytes().await.map_err(|e| Error::ResponseError { err: e.into() })?; - - let response = scrape::Response::try_from_bencoded(&response).map_err(|e| Error::BencodeParseError { - data: response, - err: e.into(), - })?; - - Ok(response) -} diff --git a/packages/tracker-client/src/console/clients/checker/checks/mod.rs b/packages/tracker-client/src/console/clients/checker/checks/mod.rs deleted file mode 100644 index f8b03f749..000000000 --- a/packages/tracker-client/src/console/clients/checker/checks/mod.rs +++ /dev/null @@ -1,4 +0,0 @@ -pub mod health; -pub mod http; -pub mod structs; -pub mod udp; diff --git a/packages/tracker-client/src/console/clients/checker/checks/structs.rs b/packages/tracker-client/src/console/clients/checker/checks/structs.rs deleted file mode 100644 index d28e20c04..000000000 --- a/packages/tracker-client/src/console/clients/checker/checks/structs.rs +++ /dev/null @@ -1,12 +0,0 @@ -use serde::{Deserialize, Serialize}; - -#[derive(Serialize, Deserialize)] -pub struct Status { - pub code: String, - pub message: String, -} -#[derive(Serialize, Deserialize)] -pub struct CheckerOutput { - pub url: String, - pub status: Status, -} diff --git a/packages/tracker-client/src/console/clients/checker/checks/udp.rs b/packages/tracker-client/src/console/clients/checker/checks/udp.rs deleted file mode 100644 index 21bdcd1b7..000000000 --- a/packages/tracker-client/src/console/clients/checker/checks/udp.rs +++ /dev/null @@ -1,134 +0,0 @@ -use std::net::SocketAddr; -use std::time::Duration; - -use aquatic_udp_protocol::TransactionId; -use hex_literal::hex; -use serde::Serialize; -use url::Url; - -use crate::console::clients::udp::checker::Client; -use crate::console::clients::udp::Error; - -#[derive(Debug, Clone, Serialize)] -pub struct Checks { - remote_addr: SocketAddr, - results: Vec<(Check, Result<(), Error>)>, -} - -#[derive(Debug, Clone, Serialize)] -pub enum Check { - Setup, - Connect, - Announce, - Scrape, -} - -#[allow(clippy::missing_panics_doc)] -pub async fn run(udp_trackers: Vec, timeout: Duration) -> Vec> { - let mut results = Vec::default(); - - tracing::debug!("UDP trackers ..."); - - let info_hash = aquatic_udp_protocol::InfoHash(hex!("9c38422213e30bff212b30c360d26f9a02136422")); // # DevSkim: ignore DS173237 - - for remote_url in udp_trackers { - let remote_addr = resolve_socket_addr(&remote_url); - - let mut checks = Checks { - remote_addr, - results: Vec::default(), - }; - - tracing::debug!("UDP tracker: {:?}", remote_url); - - // Setup - let client = match Client::new(remote_addr, timeout).await { - Ok(client) => { - checks.results.push((Check::Setup, Ok(()))); - client - } - Err(err) => { - checks.results.push((Check::Setup, Err(err))); - results.push(Err(checks)); - continue; - } - }; - - let transaction_id = TransactionId::new(1); - - // Connect Remote - let connection_id = match client.send_connection_request(transaction_id).await { - Ok(connection_id) => { - checks.results.push((Check::Connect, Ok(()))); - connection_id - } - Err(err) => { - checks.results.push((Check::Connect, Err(err))); - results.push(Err(checks)); - continue; - } - }; - - // Announce - { - let check = client - .send_announce_request(transaction_id, connection_id, info_hash.into()) - .await - .map(|_| ()); - - checks.results.push((Check::Announce, check)); - } - - // Scrape - { - let check = client - .send_scrape_request(connection_id, transaction_id, &[info_hash.into()]) - .await - .map(|_| ()); - - checks.results.push((Check::Scrape, check)); - } - - if checks.results.iter().any(|f| f.1.is_err()) { - results.push(Err(checks)); - } else { - results.push(Ok(checks)); - } - } - - results -} - -fn resolve_socket_addr(url: &Url) -> SocketAddr { - let socket_addr = url.socket_addrs(|| None).unwrap(); - *socket_addr.first().unwrap() -} - -#[cfg(test)] -mod tests { - use std::net::{IpAddr, Ipv4Addr, Ipv6Addr, SocketAddr}; - - use url::Url; - - use crate::console::clients::checker::checks::udp::resolve_socket_addr; - - #[test] - fn it_should_resolve_the_socket_address_for_udp_scheme_urls_containing_a_domain() { - let socket_addr = resolve_socket_addr(&Url::parse("udp://localhost:8080").unwrap()); - - assert!( - socket_addr == SocketAddr::new(IpAddr::V4(Ipv4Addr::new(127, 0, 0, 1)), 8080) - || socket_addr == SocketAddr::new(IpAddr::V6(Ipv6Addr::new(0, 0, 0, 0, 0, 0, 0, 1)), 8080) - ); - } - - #[test] - fn it_should_resolve_the_socket_address_for_udp_scheme_urls_containing_an_ip() { - let socket_addr = resolve_socket_addr(&Url::parse("udp://localhost:8080").unwrap()); - - assert!( - socket_addr == SocketAddr::new(IpAddr::V4(Ipv4Addr::new(127, 0, 0, 1)), 8080) - || socket_addr == SocketAddr::new(IpAddr::V6(Ipv6Addr::new(0, 0, 0, 0, 0, 0, 0, 1)), 8080) - ); - } -} diff --git a/packages/tracker-client/src/console/clients/checker/config.rs b/packages/tracker-client/src/console/clients/checker/config.rs deleted file mode 100644 index 154dcae85..000000000 --- a/packages/tracker-client/src/console/clients/checker/config.rs +++ /dev/null @@ -1,282 +0,0 @@ -use std::error::Error; -use std::fmt; - -use reqwest::Url as ServiceUrl; -use serde::Deserialize; - -/// It parses the configuration from a JSON format. -/// -/// # Errors -/// -/// Will return an error if the configuration is not valid. -/// -/// # Panics -/// -/// Will panic if unable to read the configuration file. -pub fn parse_from_json(json: &str) -> Result { - let plain_config: PlainConfiguration = serde_json::from_str(json).map_err(ConfigurationError::JsonParseError)?; - Configuration::try_from(plain_config) -} - -/// DTO for the configuration to serialize/deserialize configuration. -/// -/// Configuration does not need to be valid. -#[derive(Deserialize)] -struct PlainConfiguration { - pub udp_trackers: Vec, - pub http_trackers: Vec, - pub health_checks: Vec, -} - -/// Validated configuration -pub struct Configuration { - pub udp_trackers: Vec, - pub http_trackers: Vec, - pub health_checks: Vec, -} - -#[derive(Debug)] -pub enum ConfigurationError { - JsonParseError(serde_json::Error), - InvalidUdpAddress(std::net::AddrParseError), - InvalidUrl(url::ParseError), -} - -impl Error for ConfigurationError {} - -impl fmt::Display for ConfigurationError { - fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result { - match self { - ConfigurationError::JsonParseError(e) => write!(f, "JSON parse error: {e}"), - ConfigurationError::InvalidUdpAddress(e) => write!(f, "Invalid UDP address: {e}"), - ConfigurationError::InvalidUrl(e) => write!(f, "Invalid URL: {e}"), - } - } -} - -impl TryFrom for Configuration { - type Error = ConfigurationError; - - fn try_from(plain_config: PlainConfiguration) -> Result { - let udp_trackers = plain_config - .udp_trackers - .into_iter() - .map(|s| if s.starts_with("udp://") { s } else { format!("udp://{s}") }) - .map(|s| s.parse::().map_err(ConfigurationError::InvalidUrl)) - .collect::, _>>()?; - - let http_trackers = plain_config - .http_trackers - .into_iter() - .map(|s| s.parse::().map_err(ConfigurationError::InvalidUrl)) - .collect::, _>>()?; - - let health_checks = plain_config - .health_checks - .into_iter() - .map(|s| s.parse::().map_err(ConfigurationError::InvalidUrl)) - .collect::, _>>()?; - - Ok(Configuration { - udp_trackers, - http_trackers, - health_checks, - }) - } -} - -#[cfg(test)] -mod tests { - use super::*; - - #[test] - fn configuration_should_be_build_from_plain_serializable_configuration() { - let dto = PlainConfiguration { - udp_trackers: vec!["udp://127.0.0.1:8080".to_string()], - http_trackers: vec!["http://127.0.0.1:8080".to_string()], - health_checks: vec!["http://127.0.0.1:8080/health".to_string()], - }; - - let config = Configuration::try_from(dto).expect("A valid configuration"); - - assert_eq!(config.udp_trackers, vec![ServiceUrl::parse("udp://127.0.0.1:8080").unwrap()]); - - assert_eq!( - config.http_trackers, - vec![ServiceUrl::parse("http://127.0.0.1:8080").unwrap()] - ); - - assert_eq!( - config.health_checks, - vec![ServiceUrl::parse("http://127.0.0.1:8080/health").unwrap()] - ); - } - - mod building_configuration_from_plain_configuration_for { - - mod udp_trackers { - use crate::console::clients::checker::config::{Configuration, PlainConfiguration, ServiceUrl}; - - /* The plain configuration should allow UDP URLs with: - - - IP or domain. - - With or without scheme. - - With or without `announce` suffix. - - With or without `/` at the end of the authority section (with empty path). - - For example: - - 127.0.0.1:6969 - 127.0.0.1:6969/ - 127.0.0.1:6969/announce - - localhost:6969 - localhost:6969/ - localhost:6969/announce - - udp://127.0.0.1:6969 - udp://127.0.0.1:6969/ - udp://127.0.0.1:6969/announce - - udp://localhost:6969 - udp://localhost:6969/ - udp://localhost:6969/announce - - */ - - #[test] - fn it_should_fail_when_a_tracker_udp_url_is_invalid() { - let plain_config = PlainConfiguration { - udp_trackers: vec!["invalid URL".to_string()], - http_trackers: vec![], - health_checks: vec![], - }; - - assert!(Configuration::try_from(plain_config).is_err()); - } - - #[test] - fn it_should_add_the_udp_scheme_to_the_udp_url_when_it_is_missing() { - let plain_config = PlainConfiguration { - udp_trackers: vec!["127.0.0.1:6969".to_string()], - http_trackers: vec![], - health_checks: vec![], - }; - - let config = Configuration::try_from(plain_config).expect("Invalid plain configuration"); - - assert_eq!(config.udp_trackers[0], "udp://127.0.0.1:6969".parse::().unwrap()); - } - - #[test] - fn it_should_allow_using_domains() { - let plain_config = PlainConfiguration { - udp_trackers: vec!["udp://localhost:6969".to_string()], - http_trackers: vec![], - health_checks: vec![], - }; - - let config = Configuration::try_from(plain_config).expect("Invalid plain configuration"); - - assert_eq!(config.udp_trackers[0], "udp://localhost:6969".parse::().unwrap()); - } - - #[test] - fn it_should_allow_the_url_to_have_an_empty_path() { - let plain_config = PlainConfiguration { - udp_trackers: vec!["127.0.0.1:6969/".to_string()], - http_trackers: vec![], - health_checks: vec![], - }; - - let config = Configuration::try_from(plain_config).expect("Invalid plain configuration"); - - assert_eq!(config.udp_trackers[0], "udp://127.0.0.1:6969/".parse::().unwrap()); - } - - #[test] - fn it_should_allow_the_url_to_contain_a_path() { - // This is the common format for UDP tracker URLs: - // udp://domain.com:6969/announce - - let plain_config = PlainConfiguration { - udp_trackers: vec!["127.0.0.1:6969/announce".to_string()], - http_trackers: vec![], - health_checks: vec![], - }; - - let config = Configuration::try_from(plain_config).expect("Invalid plain configuration"); - - assert_eq!( - config.udp_trackers[0], - "udp://127.0.0.1:6969/announce".parse::().unwrap() - ); - } - } - - mod http_trackers { - use crate::console::clients::checker::config::{Configuration, PlainConfiguration, ServiceUrl}; - - #[test] - fn it_should_fail_when_a_tracker_http_url_is_invalid() { - let plain_config = PlainConfiguration { - udp_trackers: vec![], - http_trackers: vec!["invalid URL".to_string()], - health_checks: vec![], - }; - - assert!(Configuration::try_from(plain_config).is_err()); - } - - #[test] - fn it_should_allow_the_url_to_contain_a_path() { - // This is the common format for HTTP tracker URLs: - // http://domain.com:7070/announce - - let plain_config = PlainConfiguration { - udp_trackers: vec![], - http_trackers: vec!["http://127.0.0.1:7070/announce".to_string()], - health_checks: vec![], - }; - - let config = Configuration::try_from(plain_config).expect("Invalid plain configuration"); - - assert_eq!( - config.http_trackers[0], - "http://127.0.0.1:7070/announce".parse::().unwrap() - ); - } - - #[test] - fn it_should_allow_the_url_to_contain_an_empty_path() { - let plain_config = PlainConfiguration { - udp_trackers: vec![], - http_trackers: vec!["http://127.0.0.1:7070/".to_string()], - health_checks: vec![], - }; - - let config = Configuration::try_from(plain_config).expect("Invalid plain configuration"); - - assert_eq!( - config.http_trackers[0], - "http://127.0.0.1:7070/".parse::().unwrap() - ); - } - } - - mod health_checks { - use crate::console::clients::checker::config::{Configuration, PlainConfiguration}; - - #[test] - fn it_should_fail_when_a_health_check_http_url_is_invalid() { - let plain_config = PlainConfiguration { - udp_trackers: vec![], - http_trackers: vec![], - health_checks: vec!["invalid URL".to_string()], - }; - - assert!(Configuration::try_from(plain_config).is_err()); - } - } - } -} diff --git a/packages/tracker-client/src/console/clients/checker/console.rs b/packages/tracker-client/src/console/clients/checker/console.rs deleted file mode 100644 index b55c559fc..000000000 --- a/packages/tracker-client/src/console/clients/checker/console.rs +++ /dev/null @@ -1,38 +0,0 @@ -use super::printer::{Printer, CLEAR_SCREEN}; - -pub struct Console {} - -impl Default for Console { - fn default() -> Self { - Self::new() - } -} - -impl Console { - #[must_use] - pub fn new() -> Self { - Self {} - } -} - -impl Printer for Console { - fn clear(&self) { - self.print(CLEAR_SCREEN); - } - - fn print(&self, output: &str) { - print!("{}", &output); - } - - fn eprint(&self, output: &str) { - eprint!("{}", &output); - } - - fn println(&self, output: &str) { - println!("{}", &output); - } - - fn eprintln(&self, output: &str) { - eprintln!("{}", &output); - } -} diff --git a/packages/tracker-client/src/console/clients/checker/logger.rs b/packages/tracker-client/src/console/clients/checker/logger.rs deleted file mode 100644 index 50e97189f..000000000 --- a/packages/tracker-client/src/console/clients/checker/logger.rs +++ /dev/null @@ -1,72 +0,0 @@ -use std::cell::RefCell; - -use super::printer::{Printer, CLEAR_SCREEN}; - -pub struct Logger { - output: RefCell, -} - -impl Default for Logger { - fn default() -> Self { - Self::new() - } -} - -impl Logger { - #[must_use] - pub fn new() -> Self { - Self { - output: RefCell::new(String::new()), - } - } - - pub fn log(&self) -> String { - self.output.borrow().clone() - } -} - -impl Printer for Logger { - fn clear(&self) { - self.print(CLEAR_SCREEN); - } - - fn print(&self, output: &str) { - *self.output.borrow_mut() = format!("{}{}", self.output.borrow(), &output); - } - - fn eprint(&self, output: &str) { - *self.output.borrow_mut() = format!("{}{}", self.output.borrow(), &output); - } - - fn println(&self, output: &str) { - self.print(&format!("{}/n", &output)); - } - - fn eprintln(&self, output: &str) { - self.eprint(&format!("{}/n", &output)); - } -} - -#[cfg(test)] -mod tests { - use crate::console::clients::checker::logger::Logger; - use crate::console::clients::checker::printer::{Printer, CLEAR_SCREEN}; - - #[test] - fn should_capture_the_clear_screen_command() { - let console_logger = Logger::new(); - - console_logger.clear(); - - assert_eq!(CLEAR_SCREEN, console_logger.log()); - } - - #[test] - fn should_capture_the_print_command_output() { - let console_logger = Logger::new(); - - console_logger.print("OUTPUT"); - - assert_eq!("OUTPUT", console_logger.log()); - } -} diff --git a/packages/tracker-client/src/console/clients/checker/mod.rs b/packages/tracker-client/src/console/clients/checker/mod.rs deleted file mode 100644 index d26a4a686..000000000 --- a/packages/tracker-client/src/console/clients/checker/mod.rs +++ /dev/null @@ -1,7 +0,0 @@ -pub mod app; -pub mod checks; -pub mod config; -pub mod console; -pub mod logger; -pub mod printer; -pub mod service; diff --git a/packages/tracker-client/src/console/clients/checker/printer.rs b/packages/tracker-client/src/console/clients/checker/printer.rs deleted file mode 100644 index d590dfedb..000000000 --- a/packages/tracker-client/src/console/clients/checker/printer.rs +++ /dev/null @@ -1,9 +0,0 @@ -pub const CLEAR_SCREEN: &str = "\x1B[2J\x1B[1;1H"; - -pub trait Printer { - fn clear(&self); - fn print(&self, output: &str); - fn eprint(&self, output: &str); - fn println(&self, output: &str); - fn eprintln(&self, output: &str); -} diff --git a/packages/tracker-client/src/console/clients/checker/service.rs b/packages/tracker-client/src/console/clients/checker/service.rs deleted file mode 100644 index acd312d8c..000000000 --- a/packages/tracker-client/src/console/clients/checker/service.rs +++ /dev/null @@ -1,62 +0,0 @@ -use std::sync::Arc; - -use futures::FutureExt as _; -use serde::Serialize; -use tokio::task::{JoinError, JoinSet}; -use torrust_tracker_configuration::DEFAULT_TIMEOUT; - -use super::checks::{health, http, udp}; -use super::config::Configuration; -use super::console::Console; -use crate::console::clients::checker::printer::Printer; - -pub struct Service { - pub(crate) config: Arc, - pub(crate) console: Console, -} - -#[derive(Debug, Clone, Serialize)] -pub enum CheckResult { - Udp(Result), - Http(Result), - Health(Result), -} - -impl Service { - /// # Errors - /// - /// It will return an error if some of the tests panic or otherwise fail to run. - /// On success it will return a vector of `Ok(())` of [`CheckResult`]. - /// - /// # Panics - /// - /// It would panic if `serde_json` produces invalid json for the `to_string_pretty` function. - pub async fn run_checks(self) -> Result, JoinError> { - tracing::info!("Running checks for trackers ..."); - - let mut check_results = Vec::default(); - - let mut checks = JoinSet::new(); - checks.spawn( - udp::run(self.config.udp_trackers.clone(), DEFAULT_TIMEOUT).map(|mut f| f.drain(..).map(CheckResult::Udp).collect()), - ); - checks.spawn( - http::run(self.config.http_trackers.clone(), DEFAULT_TIMEOUT) - .map(|mut f| f.drain(..).map(CheckResult::Http).collect()), - ); - checks.spawn( - health::run(self.config.health_checks.clone(), DEFAULT_TIMEOUT) - .map(|mut f| f.drain(..).map(CheckResult::Health).collect()), - ); - - while let Some(results) = checks.join_next().await { - check_results.append(&mut results?); - } - - let json_output = serde_json::json!(check_results); - self.console - .println(&serde_json::to_string_pretty(&json_output).expect("it should consume valid json")); - - Ok(check_results) - } -} diff --git a/packages/tracker-client/src/console/clients/http/app.rs b/packages/tracker-client/src/console/clients/http/app.rs deleted file mode 100644 index 8db6fe46d..000000000 --- a/packages/tracker-client/src/console/clients/http/app.rs +++ /dev/null @@ -1,102 +0,0 @@ -//! HTTP Tracker client: -//! -//! Examples: -//! -//! `Announce` request: -//! -//! ```text -//! cargo run --bin http_tracker_client announce http://127.0.0.1:7070 9c38422213e30bff212b30c360d26f9a02136422 | jq -//! ``` -//! -//! `Scrape` request: -//! -//! ```text -//! cargo run --bin http_tracker_client scrape http://127.0.0.1:7070 9c38422213e30bff212b30c360d26f9a02136422 | jq -//! ``` -use std::str::FromStr; -use std::time::Duration; - -use anyhow::Context; -use bittorrent_primitives::info_hash::InfoHash; -use clap::{Parser, Subcommand}; -use reqwest::Url; -use torrust_tracker_configuration::DEFAULT_TIMEOUT; - -use crate::http::client::requests::announce::QueryBuilder; -use crate::http::client::responses::announce::Announce; -use crate::http::client::responses::scrape; -use crate::http::client::{requests, Client}; - -#[derive(Parser, Debug)] -#[command(author, version, about, long_about = None)] -struct Args { - #[command(subcommand)] - command: Command, -} - -#[derive(Subcommand, Debug)] -enum Command { - Announce { tracker_url: String, info_hash: String }, - Scrape { tracker_url: String, info_hashes: Vec }, -} - -/// # Errors -/// -/// Will return an error if the command fails. -pub async fn run() -> anyhow::Result<()> { - let args = Args::parse(); - - match args.command { - Command::Announce { tracker_url, info_hash } => { - announce_command(tracker_url, info_hash, DEFAULT_TIMEOUT).await?; - } - Command::Scrape { - tracker_url, - info_hashes, - } => { - scrape_command(&tracker_url, &info_hashes, DEFAULT_TIMEOUT).await?; - } - } - - Ok(()) -} - -async fn announce_command(tracker_url: String, info_hash: String, timeout: Duration) -> anyhow::Result<()> { - let base_url = Url::parse(&tracker_url).context("failed to parse HTTP tracker base URL")?; - let info_hash = - InfoHash::from_str(&info_hash).expect("Invalid infohash. Example infohash: `9c38422213e30bff212b30c360d26f9a02136422`"); - - let response = Client::new(base_url, timeout)? - .announce(&QueryBuilder::with_default_values().with_info_hash(&info_hash).query()) - .await?; - - let body = response.bytes().await?; - - let announce_response: Announce = serde_bencode::from_bytes(&body) - .unwrap_or_else(|_| panic!("response body should be a valid announce response, got: \"{:#?}\"", &body)); - - let json = serde_json::to_string(&announce_response).context("failed to serialize scrape response into JSON")?; - - println!("{json}"); - - Ok(()) -} - -async fn scrape_command(tracker_url: &str, info_hashes: &[String], timeout: Duration) -> anyhow::Result<()> { - let base_url = Url::parse(tracker_url).context("failed to parse HTTP tracker base URL")?; - - let query = requests::scrape::Query::try_from(info_hashes).context("failed to parse infohashes")?; - - let response = Client::new(base_url, timeout)?.scrape(&query).await?; - - let body = response.bytes().await?; - - let scrape_response = scrape::Response::try_from_bencoded(&body) - .unwrap_or_else(|_| panic!("response body should be a valid scrape response, got: \"{:#?}\"", &body)); - - let json = serde_json::to_string(&scrape_response).context("failed to serialize scrape response into JSON")?; - - println!("{json}"); - - Ok(()) -} diff --git a/packages/tracker-client/src/console/clients/http/mod.rs b/packages/tracker-client/src/console/clients/http/mod.rs deleted file mode 100644 index e4b6fbe57..000000000 --- a/packages/tracker-client/src/console/clients/http/mod.rs +++ /dev/null @@ -1,34 +0,0 @@ -use std::sync::Arc; - -use serde::Serialize; -use thiserror::Error; - -use crate::http::client::responses::scrape::BencodeParseError; - -pub mod app; - -#[derive(Debug, Clone, Error, Serialize)] -#[serde(into = "String")] -pub enum Error { - #[error("Http request did not receive a response within the timeout: {err:?}")] - HttpClientError { err: crate::http::client::Error }, - #[error("Http failed to get a response at all: {err:?}")] - ResponseError { err: Arc }, - #[error("Failed to deserialize the bencoded response data with the error: \"{err:?}\"")] - ParseBencodeError { - data: hyper::body::Bytes, - err: Arc, - }, - - #[error("Failed to deserialize the bencoded response data with the error: \"{err:?}\"")] - BencodeParseError { - data: hyper::body::Bytes, - err: Arc, - }, -} - -impl From for String { - fn from(value: Error) -> Self { - value.to_string() - } -} diff --git a/packages/tracker-client/src/console/clients/mod.rs b/packages/tracker-client/src/console/clients/mod.rs deleted file mode 100644 index 8492f8ba5..000000000 --- a/packages/tracker-client/src/console/clients/mod.rs +++ /dev/null @@ -1,4 +0,0 @@ -//! Console clients. -pub mod checker; -pub mod http; -pub mod udp; diff --git a/packages/tracker-client/src/console/clients/udp/app.rs b/packages/tracker-client/src/console/clients/udp/app.rs deleted file mode 100644 index a2736c365..000000000 --- a/packages/tracker-client/src/console/clients/udp/app.rs +++ /dev/null @@ -1,208 +0,0 @@ -//! UDP Tracker client: -//! -//! Examples: -//! -//! Announce request: -//! -//! ```text -//! cargo run --bin udp_tracker_client announce 127.0.0.1:6969 9c38422213e30bff212b30c360d26f9a02136422 | jq -//! ``` -//! -//! Announce response: -//! -//! ```json -//! { -//! "transaction_id": -888840697 -//! "announce_interval": 120, -//! "leechers": 0, -//! "seeders": 1, -//! "peers": [ -//! "123.123.123.123:51289" -//! ], -//! } -//! ``` -//! -//! Scrape request: -//! -//! ```text -//! cargo run --bin udp_tracker_client scrape 127.0.0.1:6969 9c38422213e30bff212b30c360d26f9a02136422 | jq -//! ``` -//! -//! Scrape response: -//! -//! ```json -//! { -//! "transaction_id": -888840697, -//! "torrent_stats": [ -//! { -//! "completed": 0, -//! "leechers": 0, -//! "seeders": 0 -//! }, -//! { -//! "completed": 0, -//! "leechers": 0, -//! "seeders": 0 -//! } -//! ] -//! } -//! ``` -//! -//! You can use an URL with instead of the socket address. For example: -//! -//! ```text -//! cargo run --bin udp_tracker_client scrape udp://localhost:6969 9c38422213e30bff212b30c360d26f9a02136422 | jq -//! cargo run --bin udp_tracker_client scrape udp://localhost:6969/scrape 9c38422213e30bff212b30c360d26f9a02136422 | jq -//! ``` -//! -//! The protocol (`udp://`) in the URL is mandatory. The path (`\scrape`) is optional. It always uses `\scrape`. -use std::net::{SocketAddr, ToSocketAddrs}; -use std::str::FromStr; - -use anyhow::Context; -use aquatic_udp_protocol::{Response, TransactionId}; -use bittorrent_primitives::info_hash::InfoHash as TorrustInfoHash; -use clap::{Parser, Subcommand}; -use torrust_tracker_configuration::DEFAULT_TIMEOUT; -use tracing::level_filters::LevelFilter; -use url::Url; - -use super::Error; -use crate::console::clients::udp::checker; -use crate::console::clients::udp::responses::dto::SerializableResponse; -use crate::console::clients::udp::responses::json::ToJson; - -const RANDOM_TRANSACTION_ID: i32 = -888_840_697; - -#[derive(Parser, Debug)] -#[command(author, version, about, long_about = None)] -struct Args { - #[command(subcommand)] - command: Command, -} - -#[derive(Subcommand, Debug)] -enum Command { - Announce { - #[arg(value_parser = parse_socket_addr)] - tracker_socket_addr: SocketAddr, - #[arg(value_parser = parse_info_hash)] - info_hash: TorrustInfoHash, - }, - Scrape { - #[arg(value_parser = parse_socket_addr)] - tracker_socket_addr: SocketAddr, - #[arg(value_parser = parse_info_hash, num_args = 1..=74, value_delimiter = ' ')] - info_hashes: Vec, - }, -} - -/// # Errors -/// -/// Will return an error if the command fails. -/// -/// -pub async fn run() -> anyhow::Result<()> { - tracing_stdout_init(LevelFilter::INFO); - - let args = Args::parse(); - - let response = match args.command { - Command::Announce { - tracker_socket_addr: remote_addr, - info_hash, - } => handle_announce(remote_addr, &info_hash).await?, - Command::Scrape { - tracker_socket_addr: remote_addr, - info_hashes, - } => handle_scrape(remote_addr, &info_hashes).await?, - }; - - let response: SerializableResponse = response.into(); - let response_json = response.to_json_string()?; - - print!("{response_json}"); - - Ok(()) -} - -fn tracing_stdout_init(filter: LevelFilter) { - tracing_subscriber::fmt().with_max_level(filter).init(); - tracing::debug!("Logging initialized"); -} - -async fn handle_announce(remote_addr: SocketAddr, info_hash: &TorrustInfoHash) -> Result { - let transaction_id = TransactionId::new(RANDOM_TRANSACTION_ID); - - let client = checker::Client::new(remote_addr, DEFAULT_TIMEOUT).await?; - - let connection_id = client.send_connection_request(transaction_id).await?; - - client.send_announce_request(transaction_id, connection_id, *info_hash).await -} - -async fn handle_scrape(remote_addr: SocketAddr, info_hashes: &[TorrustInfoHash]) -> Result { - let transaction_id = TransactionId::new(RANDOM_TRANSACTION_ID); - - let client = checker::Client::new(remote_addr, DEFAULT_TIMEOUT).await?; - - let connection_id = client.send_connection_request(transaction_id).await?; - - client.send_scrape_request(connection_id, transaction_id, info_hashes).await -} - -fn parse_socket_addr(tracker_socket_addr_str: &str) -> anyhow::Result { - tracing::debug!("Tracker socket address: {tracker_socket_addr_str:#?}"); - - // Check if the address is a valid URL. If so, extract the host and port. - let resolved_addr = if let Ok(url) = Url::parse(tracker_socket_addr_str) { - tracing::debug!("Tracker socket address URL: {url:?}"); - - let host = url - .host_str() - .with_context(|| format!("invalid host in URL: `{tracker_socket_addr_str}`"))? - .to_owned(); - - let port = url - .port() - .with_context(|| format!("port not found in URL: `{tracker_socket_addr_str}`"))? - .to_owned(); - - (host, port) - } else { - // If not a URL, assume it's a host:port pair. - - let parts: Vec<&str> = tracker_socket_addr_str.split(':').collect(); - - if parts.len() != 2 { - return Err(anyhow::anyhow!( - "invalid address format: `{}`. Expected format is host:port", - tracker_socket_addr_str - )); - } - - let host = parts[0].to_owned(); - - let port = parts[1] - .parse::() - .with_context(|| format!("invalid port: `{}`", parts[1]))? - .to_owned(); - - (host, port) - }; - - tracing::debug!("Resolved address: {resolved_addr:#?}"); - - // Perform DNS resolution. - let socket_addrs: Vec<_> = resolved_addr.to_socket_addrs()?.collect(); - if socket_addrs.is_empty() { - Err(anyhow::anyhow!("DNS resolution failed for `{}`", tracker_socket_addr_str)) - } else { - Ok(socket_addrs[0]) - } -} - -fn parse_info_hash(info_hash_str: &str) -> anyhow::Result { - TorrustInfoHash::from_str(info_hash_str) - .map_err(|e| anyhow::Error::msg(format!("failed to parse info-hash `{info_hash_str}`: {e:?}"))) -} diff --git a/packages/tracker-client/src/console/clients/udp/checker.rs b/packages/tracker-client/src/console/clients/udp/checker.rs deleted file mode 100644 index b9fd3a729..000000000 --- a/packages/tracker-client/src/console/clients/udp/checker.rs +++ /dev/null @@ -1,177 +0,0 @@ -use std::net::{Ipv4Addr, SocketAddr}; -use std::num::NonZeroU16; -use std::time::Duration; - -use aquatic_udp_protocol::common::InfoHash; -use aquatic_udp_protocol::{ - AnnounceActionPlaceholder, AnnounceEvent, AnnounceRequest, ConnectRequest, ConnectionId, NumberOfBytes, NumberOfPeers, - PeerId, PeerKey, Port, Response, ScrapeRequest, TransactionId, -}; -use bittorrent_primitives::info_hash::InfoHash as TorrustInfoHash; - -use super::Error; -use crate::udp::client::UdpTrackerClient; - -/// A UDP Tracker client to make test requests (checks). -#[derive(Debug)] -pub struct Client { - client: UdpTrackerClient, -} - -impl Client { - /// Creates a new `[Client]` for checking a UDP Tracker Service - /// - /// # Errors - /// - /// It will error if unable to bind and connect to the udp remote address. - /// - pub async fn new(remote_addr: SocketAddr, timeout: Duration) -> Result { - let client = UdpTrackerClient::new(remote_addr, timeout) - .await - .map_err(|err| Error::UnableToBindAndConnect { remote_addr, err })?; - - Ok(Self { client }) - } - - /// Returns the local addr of this [`Client`]. - /// - /// # Errors - /// - /// This function will return an error if the socket is somehow not bound. - pub fn local_addr(&self) -> std::io::Result { - self.client.client.socket.local_addr() - } - - /// Sends a connection request to the UDP Tracker server. - /// - /// # Errors - /// - /// Will return and error if - /// - /// - It can't connect to the remote UDP socket. - /// - It can't make a connection request successfully to the remote UDP - /// server (after successfully connecting to the remote UDP socket). - /// - /// # Panics - /// - /// Will panic if it receives an unexpected response. - pub async fn send_connection_request(&self, transaction_id: TransactionId) -> Result { - tracing::debug!("Sending connection request with transaction id: {transaction_id:#?}"); - - let connect_request = ConnectRequest { transaction_id }; - - let _ = self - .client - .send(connect_request.into()) - .await - .map_err(|err| Error::UnableToSendConnectionRequest { err })?; - - let response = self - .client - .receive() - .await - .map_err(|err| Error::UnableToReceiveConnectResponse { err })?; - - match response { - Response::Connect(connect_response) => Ok(connect_response.connection_id), - _ => Err(Error::UnexpectedConnectionResponse { response }), - } - } - - /// Sends an announce request to the UDP Tracker server. - /// - /// # Errors - /// - /// Will return and error if the client is not connected. You have to connect - /// before calling this function. - /// - /// # Panics - /// - /// It will panic if the `local_address` has a zero port. - pub async fn send_announce_request( - &self, - transaction_id: TransactionId, - connection_id: ConnectionId, - info_hash: TorrustInfoHash, - ) -> Result { - tracing::debug!("Sending announce request with transaction id: {transaction_id:#?}"); - - let port = NonZeroU16::new( - self.client - .client - .socket - .local_addr() - .expect("it should get the local address") - .port(), - ) - .expect("it should no be zero"); - - let announce_request = AnnounceRequest { - connection_id, - action_placeholder: AnnounceActionPlaceholder::default(), - transaction_id, - info_hash: InfoHash(info_hash.bytes()), - peer_id: PeerId(*b"-qB00000000000000001"), - bytes_downloaded: NumberOfBytes(0i64.into()), - bytes_uploaded: NumberOfBytes(0i64.into()), - bytes_left: NumberOfBytes(0i64.into()), - event: AnnounceEvent::Started.into(), - ip_address: Ipv4Addr::new(0, 0, 0, 0).into(), - key: PeerKey::new(0i32), - peers_wanted: NumberOfPeers(1i32.into()), - port: Port::new(port), - }; - - let _ = self - .client - .send(announce_request.into()) - .await - .map_err(|err| Error::UnableToSendAnnounceRequest { err })?; - - let response = self - .client - .receive() - .await - .map_err(|err| Error::UnableToReceiveAnnounceResponse { err })?; - - Ok(response) - } - - /// Sends a scrape request to the UDP Tracker server. - /// - /// # Errors - /// - /// Will return and error if the client is not connected. You have to connect - /// before calling this function. - pub async fn send_scrape_request( - &self, - connection_id: ConnectionId, - transaction_id: TransactionId, - info_hashes: &[TorrustInfoHash], - ) -> Result { - tracing::debug!("Sending scrape request with transaction id: {transaction_id:#?}"); - - let scrape_request = ScrapeRequest { - connection_id, - transaction_id, - info_hashes: info_hashes - .iter() - .map(|torrust_info_hash| InfoHash(torrust_info_hash.bytes())) - .collect(), - }; - - let _ = self - .client - .send(scrape_request.into()) - .await - .map_err(|err| Error::UnableToSendScrapeRequest { err })?; - - let response = self - .client - .receive() - .await - .map_err(|err| Error::UnableToReceiveScrapeResponse { err })?; - - Ok(response) - } -} diff --git a/packages/tracker-client/src/console/clients/udp/mod.rs b/packages/tracker-client/src/console/clients/udp/mod.rs deleted file mode 100644 index ae6271a78..000000000 --- a/packages/tracker-client/src/console/clients/udp/mod.rs +++ /dev/null @@ -1,51 +0,0 @@ -use std::net::SocketAddr; - -use aquatic_udp_protocol::Response; -use serde::Serialize; -use thiserror::Error; - -use crate::udp; - -pub mod app; -pub mod checker; -pub mod responses; - -#[derive(Error, Debug, Clone, Serialize)] -#[serde(into = "String")] -pub enum Error { - #[error("Failed to Connect to: {remote_addr}, with error: {err}")] - UnableToBindAndConnect { remote_addr: SocketAddr, err: udp::Error }, - - #[error("Failed to send a connection request, with error: {err}")] - UnableToSendConnectionRequest { err: udp::Error }, - - #[error("Failed to receive a connect response, with error: {err}")] - UnableToReceiveConnectResponse { err: udp::Error }, - - #[error("Failed to send a announce request, with error: {err}")] - UnableToSendAnnounceRequest { err: udp::Error }, - - #[error("Failed to receive a announce response, with error: {err}")] - UnableToReceiveAnnounceResponse { err: udp::Error }, - - #[error("Failed to send a scrape request, with error: {err}")] - UnableToSendScrapeRequest { err: udp::Error }, - - #[error("Failed to receive a scrape response, with error: {err}")] - UnableToReceiveScrapeResponse { err: udp::Error }, - - #[error("Failed to receive a response, with error: {err}")] - UnableToReceiveResponse { err: udp::Error }, - - #[error("Failed to get local address for connection: {err}")] - UnableToGetLocalAddr { err: udp::Error }, - - #[error("Failed to get a connection response: {response:?}")] - UnexpectedConnectionResponse { response: Response }, -} - -impl From for String { - fn from(value: Error) -> Self { - value.to_string() - } -} diff --git a/packages/tracker-client/src/console/clients/udp/responses/dto.rs b/packages/tracker-client/src/console/clients/udp/responses/dto.rs deleted file mode 100644 index 93320b0f7..000000000 --- a/packages/tracker-client/src/console/clients/udp/responses/dto.rs +++ /dev/null @@ -1,128 +0,0 @@ -//! Aquatic responses are not serializable. These are the serializable wrappers. -use std::net::{Ipv4Addr, Ipv6Addr}; - -use aquatic_udp_protocol::Response::{self}; -use aquatic_udp_protocol::{AnnounceResponse, ConnectResponse, ErrorResponse, Ipv4AddrBytes, Ipv6AddrBytes, ScrapeResponse}; -use serde::Serialize; - -#[derive(Serialize)] -pub enum SerializableResponse { - Connect(ConnectSerializableResponse), - AnnounceIpv4(AnnounceSerializableResponse), - AnnounceIpv6(AnnounceSerializableResponse), - Scrape(ScrapeSerializableResponse), - Error(ErrorSerializableResponse), -} - -impl From for SerializableResponse { - fn from(response: Response) -> Self { - match response { - Response::Connect(response) => SerializableResponse::Connect(ConnectSerializableResponse::from(response)), - Response::AnnounceIpv4(response) => SerializableResponse::AnnounceIpv4(AnnounceSerializableResponse::from(response)), - Response::AnnounceIpv6(response) => SerializableResponse::AnnounceIpv6(AnnounceSerializableResponse::from(response)), - Response::Scrape(response) => SerializableResponse::Scrape(ScrapeSerializableResponse::from(response)), - Response::Error(response) => SerializableResponse::Error(ErrorSerializableResponse::from(response)), - } - } -} - -#[derive(Serialize)] -pub struct ConnectSerializableResponse { - transaction_id: i32, - connection_id: i64, -} - -impl From for ConnectSerializableResponse { - fn from(connect: ConnectResponse) -> Self { - Self { - transaction_id: connect.transaction_id.0.into(), - connection_id: connect.connection_id.0.into(), - } - } -} - -#[derive(Serialize)] -pub struct AnnounceSerializableResponse { - transaction_id: i32, - announce_interval: i32, - leechers: i32, - seeders: i32, - peers: Vec, -} - -impl From> for AnnounceSerializableResponse { - fn from(announce: AnnounceResponse) -> Self { - Self { - transaction_id: announce.fixed.transaction_id.0.into(), - announce_interval: announce.fixed.announce_interval.0.into(), - leechers: announce.fixed.leechers.0.into(), - seeders: announce.fixed.seeders.0.into(), - peers: announce - .peers - .iter() - .map(|peer| format!("{}:{}", Ipv4Addr::from(peer.ip_address), peer.port.0)) - .collect::>(), - } - } -} - -impl From> for AnnounceSerializableResponse { - fn from(announce: AnnounceResponse) -> Self { - Self { - transaction_id: announce.fixed.transaction_id.0.into(), - announce_interval: announce.fixed.announce_interval.0.into(), - leechers: announce.fixed.leechers.0.into(), - seeders: announce.fixed.seeders.0.into(), - peers: announce - .peers - .iter() - .map(|peer| format!("{}:{}", Ipv6Addr::from(peer.ip_address), peer.port.0)) - .collect::>(), - } - } -} - -#[derive(Serialize)] -pub struct ScrapeSerializableResponse { - transaction_id: i32, - torrent_stats: Vec, -} - -impl From for ScrapeSerializableResponse { - fn from(scrape: ScrapeResponse) -> Self { - Self { - transaction_id: scrape.transaction_id.0.into(), - torrent_stats: scrape - .torrent_stats - .iter() - .map(|torrent_scrape_statistics| TorrentStats { - seeders: torrent_scrape_statistics.seeders.0.into(), - completed: torrent_scrape_statistics.completed.0.into(), - leechers: torrent_scrape_statistics.leechers.0.into(), - }) - .collect::>(), - } - } -} - -#[derive(Serialize)] -pub struct ErrorSerializableResponse { - transaction_id: i32, - message: String, -} - -impl From for ErrorSerializableResponse { - fn from(error: ErrorResponse) -> Self { - Self { - transaction_id: error.transaction_id.0.into(), - message: error.message.to_string(), - } - } -} - -#[derive(Serialize)] -struct TorrentStats { - seeders: i32, - completed: i32, - leechers: i32, -} diff --git a/packages/tracker-client/src/console/clients/udp/responses/json.rs b/packages/tracker-client/src/console/clients/udp/responses/json.rs deleted file mode 100644 index 5d2bd6b89..000000000 --- a/packages/tracker-client/src/console/clients/udp/responses/json.rs +++ /dev/null @@ -1,25 +0,0 @@ -use anyhow::Context; -use serde::Serialize; - -use super::dto::SerializableResponse; - -#[allow(clippy::module_name_repetitions)] -pub trait ToJson { - /// - /// Returns a string with the JSON serialized version of the response - /// - /// # Errors - /// - /// Will return an error if serialization fails. - /// - fn to_json_string(&self) -> anyhow::Result - where - Self: Serialize, - { - let pretty_json = serde_json::to_string_pretty(self).context("response JSON serialization")?; - - Ok(pretty_json) - } -} - -impl ToJson for SerializableResponse {} diff --git a/packages/tracker-client/src/console/clients/udp/responses/mod.rs b/packages/tracker-client/src/console/clients/udp/responses/mod.rs deleted file mode 100644 index e6d2e5e51..000000000 --- a/packages/tracker-client/src/console/clients/udp/responses/mod.rs +++ /dev/null @@ -1,2 +0,0 @@ -pub mod dto; -pub mod json; diff --git a/packages/tracker-client/src/console/mod.rs b/packages/tracker-client/src/console/mod.rs deleted file mode 100644 index 4b4cb9de4..000000000 --- a/packages/tracker-client/src/console/mod.rs +++ /dev/null @@ -1,2 +0,0 @@ -//! Console apps. -pub mod clients; diff --git a/packages/tracker-client/src/http/url_encoding.rs b/packages/tracker-client/src/http/url_encoding.rs index ee7ab166e..6adb2e903 100644 --- a/packages/tracker-client/src/http/url_encoding.rs +++ b/packages/tracker-client/src/http/url_encoding.rs @@ -31,7 +31,7 @@ use torrust_tracker_primitives::peer; /// /// ```rust /// use std::str::FromStr; -/// use torrust_tracker::servers::http::percent_encoding::percent_decode_info_hash; +/// use bittorrent_tracker_client::http::url_encoding::percent_decode_info_hash; /// use bittorrent_primitives::info_hash::InfoHash; /// use torrust_tracker_primitives::peer; /// @@ -64,7 +64,7 @@ pub fn percent_decode_info_hash(raw_info_hash: &str) -> Result Date: Wed, 13 Nov 2024 10:52:25 +0000 Subject: [PATCH 020/802] fix: add missing package in deployment workflow --- .github/workflows/deployment.yaml | 1 + 1 file changed, 1 insertion(+) diff --git a/.github/workflows/deployment.yaml b/.github/workflows/deployment.yaml index 7f458cda2..59913d476 100644 --- a/.github/workflows/deployment.yaml +++ b/.github/workflows/deployment.yaml @@ -55,6 +55,7 @@ jobs: env: CARGO_REGISTRY_TOKEN: "${{ secrets.TORRUST_UPDATE_CARGO_REGISTRY_TOKEN }}" run: | + cargo publish -p bittorrent-tracker-client cargo publish -p torrust-tracker cargo publish -p torrust-tracker-client cargo publish -p torrust-tracker-clock From 33980246bdb74a625139fe9fe356cb4c3a914699 Mon Sep 17 00:00:00 2001 From: Jose Celano Date: Wed, 13 Nov 2024 11:22:02 +0000 Subject: [PATCH 021/802] fix: tracker checker execution in CI --- src/console/ci/e2e/tracker_checker.rs | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/src/console/ci/e2e/tracker_checker.rs b/src/console/ci/e2e/tracker_checker.rs index b4c2544ee..a39e68c93 100644 --- a/src/console/ci/e2e/tracker_checker.rs +++ b/src/console/ci/e2e/tracker_checker.rs @@ -8,13 +8,13 @@ use std::process::Command; /// Will return an error if the Tracker Checker fails. pub fn run(config_content: &str) -> io::Result<()> { tracing::info!( - "Running Tracker Checker: TORRUST_CHECKER_CONFIG=[config] cargo run -p bittorrent-tracker-client --bin tracker_checker" + "Running Tracker Checker: TORRUST_CHECKER_CONFIG=[config] cargo run -p torrust-tracker-client --bin tracker_checker" ); tracing::info!("Tracker Checker config:\n{config_content}"); let status = Command::new("cargo") .env("TORRUST_CHECKER_CONFIG", config_content) - .args(["run", "-p", "bittorrent-tracker-client", "--bin", "tracker_checker"]) + .args(["run", "-p", "torrust-tracker-client", "--bin", "tracker_checker"]) .status()?; if status.success() { From 4007530a4c773c48c9b5cfa38fb29d53503ff388 Mon Sep 17 00:00:00 2001 From: Jose Celano Date: Fri, 15 Nov 2024 07:03:30 +0000 Subject: [PATCH 022/802] chore(deps): update depencencies ```console cargo update Updating crates.io index Locking 59 packages to latest compatible versions Updating allocator-api2 v0.2.18 -> v0.2.20 Updating anstream v0.6.17 -> v0.6.18 Updating anstyle v1.0.9 -> v1.0.10 Updating anyhow v1.0.92 -> v1.0.93 Updating async-io v2.3.4 -> v2.4.0 Updating borsh v1.5.1 -> v1.5.3 Updating borsh-derive v1.5.1 -> v1.5.3 Updating cc v1.1.31 -> v1.2.1 Updating clap v4.5.20 -> v4.5.21 Updating clap_builder v4.5.20 -> v4.5.21 Updating clap_lex v0.7.2 -> v0.7.3 Updating cpufeatures v0.2.14 -> v0.2.15 Adding displaydoc v0.2.5 Updating fastrand v2.1.1 -> v2.2.0 Updating flate2 v1.0.34 -> v1.0.35 Updating futures-lite v2.4.0 -> v2.5.0 Updating hashbrown v0.15.0 -> v0.15.1 Removing heck v0.4.1 Adding icu_collections v1.5.0 Adding icu_locid v1.5.0 Adding icu_locid_transform v1.5.0 Adding icu_locid_transform_data v1.5.0 Adding icu_normalizer v1.5.0 Adding icu_normalizer_data v1.5.0 Adding icu_properties v1.5.1 Adding icu_properties_data v1.5.0 Adding icu_provider v1.5.0 Adding icu_provider_macros v1.5.0 Updating idna v0.5.0 -> v1.0.3 Adding idna_adapter v1.2.0 Updating libc v0.2.161 -> v0.2.162 Adding litemap v0.7.3 Updating mysql-common-derive v0.31.1 -> v0.31.2 Updating polling v3.7.3 -> v3.7.4 Removing proc-macro-error v1.0.4 Removing proc-macro-error-attr v1.0.4 Adding proc-macro-error-attr2 v2.0.0 Adding proc-macro-error2 v2.0.1 Updating regex-automata v0.4.8 -> v0.4.9 Updating rustix v0.38.38 -> v0.38.40 Updating security-framework-sys v2.12.0 -> v2.12.1 Updating serde v1.0.214 -> v1.0.215 Updating serde_derive v1.0.214 -> v1.0.215 Adding stable_deref_trait v1.2.0 Updating syn v2.0.86 -> v2.0.87 Removing syn_derive v0.1.8 Adding synstructure v0.13.1 Updating tempfile v3.13.0 -> v3.14.0 Updating thiserror v1.0.66 -> v1.0.69 (available: v2.0.3) Updating thiserror-impl v1.0.66 -> v1.0.69 Adding tinystr v0.7.6 Updating tokio v1.41.0 -> v1.41.1 Removing unicode-bidi v0.3.17 Removing unicode-normalization v0.1.24 Updating url v2.5.2 -> v2.5.3 Adding utf16_iter v1.0.5 Adding utf8_iter v1.0.4 Adding write16 v1.0.0 Adding writeable v0.5.5 Adding yoke v0.7.4 Adding yoke-derive v0.7.4 Adding zerofrom v0.1.4 Adding zerofrom-derive v0.1.4 Adding zerovec v0.10.4 Adding zerovec-derive v0.10.3 ``` --- Cargo.lock | 516 ++++++++++++++++++++++++++++++++++++++--------------- 1 file changed, 372 insertions(+), 144 deletions(-) diff --git a/Cargo.lock b/Cargo.lock index bbb012cea..20de3d0dc 100644 --- a/Cargo.lock +++ b/Cargo.lock @@ -66,9 +66,9 @@ dependencies = [ [[package]] name = "allocator-api2" -version = "0.2.18" +version = "0.2.20" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "5c6cb57a04249c6480766f7f7cef5467412af1490f8d1e243141daddada3264f" +checksum = "45862d1c77f2228b9e10bc609d5bc203d86ebc9b87ad8d5d5167a6c9abf739d9" [[package]] name = "android-tzdata" @@ -93,9 +93,9 @@ checksum = "4b46cbb362ab8752921c97e041f5e366ee6297bd428a31275b9fcf1e380f7299" [[package]] name = "anstream" -version = "0.6.17" +version = "0.6.18" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "23a1e53f0f5d86382dafe1cf314783b2044280f406e7e1506368220ad11b1338" +checksum = "8acc5369981196006228e28809f761875c0327210a891e941f4c683b3a99529b" dependencies = [ "anstyle", "anstyle-parse", @@ -108,9 +108,9 @@ dependencies = [ [[package]] name = "anstyle" -version = "1.0.9" +version = "1.0.10" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "8365de52b16c035ff4fcafe0092ba9390540e3e352870ac09933bebcaa2c8c56" +checksum = "55cc3b69f167a1ef2e161439aa98aed94e6028e5f9a59be9a6ffb47aef1651f9" [[package]] name = "anstyle-parse" @@ -142,9 +142,9 @@ dependencies = [ [[package]] name = "anyhow" -version = "1.0.92" +version = "1.0.93" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "74f37166d7d48a0284b99dd824694c26119c700b53bf0d1540cdb147dbdaaf13" +checksum = "4c95c10ba0b00a02636238b814946408b1322d5ac4760326e6fb8ec956d85775" [[package]] name = "aquatic_peer_id" @@ -264,9 +264,9 @@ dependencies = [ [[package]] name = "async-io" -version = "2.3.4" +version = "2.4.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "444b0228950ee6501b3568d3c93bf1176a1fdbc3b758dcd9475046d30f4dc7e8" +checksum = "43a2b323ccce0a1d90b449fd71f2a06ca7faa7c54c2751f06c9bd851fc061059" dependencies = [ "async-lock", "cfg-if", @@ -333,7 +333,7 @@ checksum = "721cae7de5c34fbb2acd27e21e6d2cf7b886dce0c27388d46c4e6c47ea4318dd" dependencies = [ "proc-macro2", "quote", - "syn 2.0.86", + "syn 2.0.87", ] [[package]] @@ -482,7 +482,7 @@ checksum = "57d123550fa8d071b7255cb0cc04dc302baa6c8c4a79f55701552684d8399bce" dependencies = [ "proc-macro2", "quote", - "syn 2.0.86", + "syn 2.0.87", ] [[package]] @@ -574,7 +574,7 @@ dependencies = [ "regex", "rustc-hash", "shlex", - "syn 2.0.86", + "syn 2.0.87", "which", ] @@ -593,7 +593,7 @@ dependencies = [ "regex", "rustc-hash", "shlex", - "syn 2.0.86", + "syn 2.0.87", ] [[package]] @@ -675,9 +675,9 @@ dependencies = [ [[package]] name = "borsh" -version = "1.5.1" +version = "1.5.3" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "a6362ed55def622cddc70a4746a68554d7b687713770de539e59a739b249f8ed" +checksum = "2506947f73ad44e344215ccd6403ac2ae18cd8e046e581a441bf8d199f257f03" dependencies = [ "borsh-derive", "cfg_aliases", @@ -685,16 +685,15 @@ dependencies = [ [[package]] name = "borsh-derive" -version = "1.5.1" +version = "1.5.3" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "c3ef8005764f53cd4dca619f5bf64cafd4664dada50ece25e4d81de54c80cc0b" +checksum = "c2593a3b8b938bd68373196c9832f516be11fa487ef4ae745eb282e6a56a7244" dependencies = [ "once_cell", "proc-macro-crate", "proc-macro2", "quote", - "syn 2.0.86", - "syn_derive", + "syn 2.0.87", ] [[package]] @@ -805,9 +804,9 @@ dependencies = [ [[package]] name = "cc" -version = "1.1.31" +version = "1.2.1" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "c2e7962b54006dcfcc61cb72735f4d89bb97061dd6a7ed882ec6b8ee53714c6f" +checksum = "fd9de9f2205d5ef3fd67e685b0df337994ddd4495e2a28d185500d0e1edfea47" dependencies = [ "jobserver", "libc", @@ -888,9 +887,9 @@ dependencies = [ [[package]] name = "clap" -version = "4.5.20" +version = "4.5.21" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "b97f376d85a664d5837dbae44bf546e6477a679ff6610010f17276f686d867e8" +checksum = "fb3b4b9e5a7c7514dfa52869339ee98b3156b0bfb4e8a77c4ff4babb64b1604f" dependencies = [ "clap_builder", "clap_derive", @@ -898,9 +897,9 @@ dependencies = [ [[package]] name = "clap_builder" -version = "4.5.20" +version = "4.5.21" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "19bc80abd44e4bed93ca373a0704ccbd1b710dc5749406201bb018272808dc54" +checksum = "b17a95aa67cc7b5ebd32aa5370189aa0d79069ef1c64ce893bd30fb24bff20ec" dependencies = [ "anstream", "anstyle", @@ -914,17 +913,17 @@ version = "4.5.18" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "4ac6a0c7b1a9e9a5186361f67dfa1b88213572f427fb9ab038efb2bd8c582dab" dependencies = [ - "heck 0.5.0", + "heck", "proc-macro2", "quote", - "syn 2.0.86", + "syn 2.0.87", ] [[package]] name = "clap_lex" -version = "0.7.2" +version = "0.7.3" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "1462739cb27611015575c0c11df5df7601141071f07518d56fcc1be504cbec97" +checksum = "afb84c814227b90d6895e01398aee0d8033c00e7466aca416fb6a8e0eb19d8a7" [[package]] name = "cmake" @@ -981,9 +980,9 @@ checksum = "773648b94d0e5d620f64f280777445740e61fe701025087ec8b57f45c791888b" [[package]] name = "cpufeatures" -version = "0.2.14" +version = "0.2.15" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "608697df725056feaccfa42cffdaeeec3fccc4ffc38358ecd19b243e716a78e0" +checksum = "0ca741a962e1b0bff6d724a1a0958b686406e853bb14061f218562e1896f95e6" dependencies = [ "libc", ] @@ -1138,7 +1137,7 @@ dependencies = [ "proc-macro2", "quote", "strsim", - "syn 2.0.86", + "syn 2.0.87", ] [[package]] @@ -1149,7 +1148,7 @@ checksum = "d336a2a514f6ccccaa3e09b02d41d35330c07ddf03a62165fcec10bb561c7806" dependencies = [ "darling_core", "quote", - "syn 2.0.86", + "syn 2.0.87", ] [[package]] @@ -1193,7 +1192,7 @@ checksum = "cb7330aeadfbe296029522e6c40f315320aba36fc43a5b3632f3795348f3bd22" dependencies = [ "proc-macro2", "quote", - "syn 2.0.86", + "syn 2.0.87", "unicode-xid", ] @@ -1205,7 +1204,7 @@ checksum = "65f152f4b8559c4da5d574bafc7af85454d706b4c5fe8b530d508cacbb6807ea" dependencies = [ "proc-macro2", "quote", - "syn 2.0.86", + "syn 2.0.87", ] [[package]] @@ -1218,6 +1217,17 @@ dependencies = [ "crypto-common", ] +[[package]] +name = "displaydoc" +version = "0.2.5" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "97369cbbc041bc366949bc74d34658d6cda5621039731c6310521892a3a20ae0" +dependencies = [ + "proc-macro2", + "quote", + "syn 2.0.87", +] + [[package]] name = "downcast" version = "0.11.0" @@ -1312,9 +1322,9 @@ checksum = "7360491ce676a36bf9bb3c56c1aa791658183a54d2744120f27285738d90465a" [[package]] name = "fastrand" -version = "2.1.1" +version = "2.2.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "e8c02a5121d4ea3eb16a80748c74f5549a5665e4c21333c6098f283870fbdea6" +checksum = "486f806e73c5707928240ddc295403b1b93c96a02038563881c4a2fd84b81ac4" [[package]] name = "figment" @@ -1334,9 +1344,9 @@ dependencies = [ [[package]] name = "flate2" -version = "1.0.34" +version = "1.0.35" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "a1b589b4dc103969ad3cf85c950899926ec64300a1a46d76c03a6072957036f0" +checksum = "c936bfdafb507ebbf50b8074c54fa31c5be9a1e7e5f467dd659697041407d07c" dependencies = [ "crc32fast", "libz-sys", @@ -1424,7 +1434,7 @@ checksum = "e99b8b3c28ae0e84b604c75f721c21dc77afb3706076af5e8216d15fd1deaae3" dependencies = [ "frunk_proc_macro_helpers", "quote", - "syn 2.0.86", + "syn 2.0.87", ] [[package]] @@ -1436,7 +1446,7 @@ dependencies = [ "frunk_core", "proc-macro2", "quote", - "syn 2.0.86", + "syn 2.0.87", ] [[package]] @@ -1448,7 +1458,7 @@ dependencies = [ "frunk_core", "frunk_proc_macro_helpers", "quote", - "syn 2.0.86", + "syn 2.0.87", ] [[package]] @@ -1513,9 +1523,9 @@ checksum = "9e5c1b78ca4aae1ac06c48a526a655760685149f0d465d21f37abfe57ce075c6" [[package]] name = "futures-lite" -version = "2.4.0" +version = "2.5.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "3f1fa2f9765705486b33fd2acf1577f8ec449c2ba1f318ae5447697b7c08d210" +checksum = "cef40d21ae2c515b51041df9ed313ed21e572df340ea58a922a0aefe7e8891a1" dependencies = [ "fastrand", "futures-core", @@ -1532,7 +1542,7 @@ checksum = "162ee34ebcb7c64a8abebc059ce0fee27c2262618d7b60ed8faf72fef13c3650" dependencies = [ "proc-macro2", "quote", - "syn 2.0.86", + "syn 2.0.87", ] [[package]] @@ -1665,9 +1675,9 @@ dependencies = [ [[package]] name = "hashbrown" -version = "0.15.0" +version = "0.15.1" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "1e087f84d4f86bf4b218b927129862374b72199ae7d8657835f1e89000eea4fb" +checksum = "3a9bfc1af68b1726ea47d3d5109de126281def866b33970e10fbab11b5dafab3" dependencies = [ "allocator-api2", "equivalent", @@ -1683,12 +1693,6 @@ dependencies = [ "hashbrown 0.14.5", ] -[[package]] -name = "heck" -version = "0.4.1" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "95505c38b4572b2d910cecb0281560f54b440a19336cbbcb27bf6ce6adc6f5a8" - [[package]] name = "heck" version = "0.5.0" @@ -1870,6 +1874,124 @@ dependencies = [ "cc", ] +[[package]] +name = "icu_collections" +version = "1.5.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "db2fa452206ebee18c4b5c2274dbf1de17008e874b4dc4f0aea9d01ca79e4526" +dependencies = [ + "displaydoc", + "yoke", + "zerofrom", + "zerovec", +] + +[[package]] +name = "icu_locid" +version = "1.5.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "13acbb8371917fc971be86fc8057c41a64b521c184808a698c02acc242dbf637" +dependencies = [ + "displaydoc", + "litemap", + "tinystr", + "writeable", + "zerovec", +] + +[[package]] +name = "icu_locid_transform" +version = "1.5.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "01d11ac35de8e40fdeda00d9e1e9d92525f3f9d887cdd7aa81d727596788b54e" +dependencies = [ + "displaydoc", + "icu_locid", + "icu_locid_transform_data", + "icu_provider", + "tinystr", + "zerovec", +] + +[[package]] +name = "icu_locid_transform_data" +version = "1.5.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "fdc8ff3388f852bede6b579ad4e978ab004f139284d7b28715f773507b946f6e" + +[[package]] +name = "icu_normalizer" +version = "1.5.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "19ce3e0da2ec68599d193c93d088142efd7f9c5d6fc9b803774855747dc6a84f" +dependencies = [ + "displaydoc", + "icu_collections", + "icu_normalizer_data", + "icu_properties", + "icu_provider", + "smallvec", + "utf16_iter", + "utf8_iter", + "write16", + "zerovec", +] + +[[package]] +name = "icu_normalizer_data" +version = "1.5.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "f8cafbf7aa791e9b22bec55a167906f9e1215fd475cd22adfcf660e03e989516" + +[[package]] +name = "icu_properties" +version = "1.5.1" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "93d6020766cfc6302c15dbbc9c8778c37e62c14427cb7f6e601d849e092aeef5" +dependencies = [ + "displaydoc", + "icu_collections", + "icu_locid_transform", + "icu_properties_data", + "icu_provider", + "tinystr", + "zerovec", +] + +[[package]] +name = "icu_properties_data" +version = "1.5.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "67a8effbc3dd3e4ba1afa8ad918d5684b8868b3b26500753effea8d2eed19569" + +[[package]] +name = "icu_provider" +version = "1.5.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "6ed421c8a8ef78d3e2dbc98a973be2f3770cb42b606e3ab18d6237c4dfde68d9" +dependencies = [ + "displaydoc", + "icu_locid", + "icu_provider_macros", + "stable_deref_trait", + "tinystr", + "writeable", + "yoke", + "zerofrom", + "zerovec", +] + +[[package]] +name = "icu_provider_macros" +version = "1.5.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "1ec89e9337638ecdc08744df490b221a7399bf8d164eb52a665454e60e075ad6" +dependencies = [ + "proc-macro2", + "quote", + "syn 2.0.87", +] + [[package]] name = "ident_case" version = "1.0.1" @@ -1878,12 +2000,23 @@ checksum = "b9e0384b61958566e926dc50660321d12159025e767c18e043daf26b70104c39" [[package]] name = "idna" -version = "0.5.0" +version = "1.0.3" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "686f825264d630750a544639377bae737628043f20d38bbc029e8f29ea968a7e" +dependencies = [ + "idna_adapter", + "smallvec", + "utf8_iter", +] + +[[package]] +name = "idna_adapter" +version = "1.2.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "634d9b1461af396cad843f47fdba5597a4f9e6ddd4bfb6ff5d85028c25cb12f6" +checksum = "daca1df1c957320b2cf139ac61e7bd64fed304c5040df000a745aa1de3b4ef71" dependencies = [ - "unicode-bidi", - "unicode-normalization", + "icu_normalizer", + "icu_properties", ] [[package]] @@ -1904,7 +2037,7 @@ source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "707907fe3c25f5424cce2cb7e1cbcafee6bdbe735ca90ef77c29e84591e5b9da" dependencies = [ "equivalent", - "hashbrown 0.15.0", + "hashbrown 0.15.1", "serde", ] @@ -2020,9 +2153,9 @@ checksum = "830d08ce1d1d941e6b30645f1a0eb5643013d835ce3779a5fc208261dbe10f55" [[package]] name = "libc" -version = "0.2.161" +version = "0.2.162" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "8e9489c2807c139ffd9c1794f4af0ebe86a828db53ecdc7fea2111d0fed085d1" +checksum = "18d287de67fe55fd7e1581fe933d965a5a9477b38e949cfa9f8574ef01506398" [[package]] name = "libloading" @@ -2068,6 +2201,12 @@ version = "0.4.14" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "78b3ae25bc7c8c38cec158d1f2757ee79e9b3740fbc7ccf0e59e4b08d793fa89" +[[package]] +name = "litemap" +version = "0.7.3" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "643cb0b8d4fcc284004d5fd0d67ccf61dfffadb7f75e1e71bc420f4688a3a704" + [[package]] name = "local-ip-address" version = "0.6.3" @@ -2105,7 +2244,7 @@ version = "0.12.5" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "234cf4f4a04dc1f57e24b96cc0cd600cf2af460d4161ac5ecdd0af8e1f3b2a38" dependencies = [ - "hashbrown 0.15.0", + "hashbrown 0.15.1", ] [[package]] @@ -2182,7 +2321,7 @@ dependencies = [ "cfg-if", "proc-macro2", "quote", - "syn 2.0.86", + "syn 2.0.87", ] [[package]] @@ -2221,18 +2360,18 @@ dependencies = [ [[package]] name = "mysql-common-derive" -version = "0.31.1" +version = "0.31.2" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "afe0450cc9344afff34915f8328600ab5ae19260802a334d0f72d2d5bdda3bfe" +checksum = "63c3512cf11487168e0e9db7157801bf5273be13055a9cc95356dc9e0035e49c" dependencies = [ "darling", - "heck 0.4.1", + "heck", "num-bigint", "proc-macro-crate", - "proc-macro-error", + "proc-macro-error2", "proc-macro2", "quote", - "syn 2.0.86", + "syn 2.0.87", "termcolor", "thiserror", ] @@ -2431,7 +2570,7 @@ checksum = "a948666b637a0f465e8564c73e89d4dde00d72d4d473cc972f390fc3dcee7d9c" dependencies = [ "proc-macro2", "quote", - "syn 2.0.86", + "syn 2.0.87", ] [[package]] @@ -2513,7 +2652,7 @@ dependencies = [ "proc-macro2", "proc-macro2-diagnostics", "quote", - "syn 2.0.86", + "syn 2.0.87", ] [[package]] @@ -2587,7 +2726,7 @@ checksum = "3c0f5fad0874fc7abcd4d750e76917eaebbecaa2c20bde22e1dbeeba8beb758c" dependencies = [ "proc-macro2", "quote", - "syn 2.0.86", + "syn 2.0.87", ] [[package]] @@ -2649,9 +2788,9 @@ dependencies = [ [[package]] name = "polling" -version = "3.7.3" +version = "3.7.4" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "cc2790cd301dec6cd3b7a025e4815cf825724a51c98dccfe6a3e55f05ffb6511" +checksum = "a604568c3202727d1507653cb121dbd627a58684eb09a820fd746bee38b4442f" dependencies = [ "cfg-if", "concurrent-queue", @@ -2716,7 +2855,7 @@ source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "64d1ec885c64d0457d564db4ec299b2dae3f9c02808b8ad9c3a089c591b18033" dependencies = [ "proc-macro2", - "syn 2.0.86", + "syn 2.0.87", ] [[package]] @@ -2729,27 +2868,25 @@ dependencies = [ ] [[package]] -name = "proc-macro-error" -version = "1.0.4" +name = "proc-macro-error-attr2" +version = "2.0.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "da25490ff9892aab3fcf7c36f08cfb902dd3e71ca0f9f9517bea02a73a5ce38c" +checksum = "96de42df36bb9bba5542fe9f1a054b8cc87e172759a1868aa05c1f3acc89dfc5" dependencies = [ - "proc-macro-error-attr", "proc-macro2", "quote", - "syn 1.0.109", - "version_check", ] [[package]] -name = "proc-macro-error-attr" -version = "1.0.4" +name = "proc-macro-error2" +version = "2.0.1" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "a1be40180e52ecc98ad80b184934baf3d0d29f979574e439af5a55274b35f869" +checksum = "11ec05c52be0a07b08061f7dd003e7d7092e0472bc731b4af7bb1ef876109802" dependencies = [ + "proc-macro-error-attr2", "proc-macro2", "quote", - "version_check", + "syn 2.0.87", ] [[package]] @@ -2769,7 +2906,7 @@ checksum = "af066a9c399a26e020ada66a034357a868728e72cd426f3adcd35f80d88d88c8" dependencies = [ "proc-macro2", "quote", - "syn 2.0.86", + "syn 2.0.87", "version_check", "yansi", ] @@ -2925,9 +3062,9 @@ dependencies = [ [[package]] name = "regex-automata" -version = "0.4.8" +version = "0.4.9" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "368758f23274712b504848e9d5a6f010445cc8b87a7cdb4d7cbee666c1288da3" +checksum = "809e8dc61f6de73b46c85f4c96486310fe304c434cfa43669d7b40f711150908" dependencies = [ "aho-corasick", "memchr", @@ -3078,7 +3215,7 @@ dependencies = [ "regex", "relative-path", "rustc_version", - "syn 2.0.86", + "syn 2.0.87", "unicode-ident", ] @@ -3135,9 +3272,9 @@ dependencies = [ [[package]] name = "rustix" -version = "0.38.38" +version = "0.38.40" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "aa260229e6538e52293eeb577aabd09945a09d6d9cc0fc550ed7529056c2e32a" +checksum = "99e4ea3e1cdc4b559b8e5650f9c8e5998e3e5c1343b4eaf034565f32318d63c0" dependencies = [ "bitflags", "errno", @@ -3259,9 +3396,9 @@ dependencies = [ [[package]] name = "security-framework-sys" -version = "2.12.0" +version = "2.12.1" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "ea4a292869320c0272d7bc55a5a6aafaff59b4f63404a003887b679a2e05b4b6" +checksum = "fa39c7303dc58b5543c94d22c1766b0d31f2ee58306363ea622b10bbc075eaa2" dependencies = [ "core-foundation-sys", "libc", @@ -3275,9 +3412,9 @@ checksum = "61697e0a1c7e512e84a621326239844a24d8207b4669b41bc18b32ea5cbf988b" [[package]] name = "serde" -version = "1.0.214" +version = "1.0.215" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "f55c3193aca71c12ad7890f1785d2b73e1b9f63a0bbc353c08ef26fe03fc56b5" +checksum = "6513c1ad0b11a9376da888e3e0baa0077f1aed55c17f50e7b2397136129fb88f" dependencies = [ "serde_derive", ] @@ -3303,13 +3440,13 @@ dependencies = [ [[package]] name = "serde_derive" -version = "1.0.214" +version = "1.0.215" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "de523f781f095e28fa605cdce0f8307e451cc0fd14e2eb4cd2e98a355b147766" +checksum = "ad1e866f866923f252f05c889987993144fb74e722403468a4ebd70c3cd756c0" dependencies = [ "proc-macro2", "quote", - "syn 2.0.86", + "syn 2.0.87", ] [[package]] @@ -3356,7 +3493,7 @@ checksum = "6c64451ba24fc7a6a2d60fc75dd9c83c90903b19028d4eff35e88fc1e86564e9" dependencies = [ "proc-macro2", "quote", - "syn 2.0.86", + "syn 2.0.87", ] [[package]] @@ -3407,7 +3544,7 @@ dependencies = [ "darling", "proc-macro2", "quote", - "syn 2.0.86", + "syn 2.0.87", ] [[package]] @@ -3499,6 +3636,12 @@ version = "0.9.8" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "6980e8d7511241f8acf4aebddbb1ff938df5eebe98691418c4468d0b72a96a67" +[[package]] +name = "stable_deref_trait" +version = "1.2.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "a8f112729512f8e442d81f95a8a7ddf2b7c6b8a1a6f509a95864142b30cab2d3" + [[package]] name = "static_assertions" version = "1.1.0" @@ -3540,27 +3683,15 @@ dependencies = [ [[package]] name = "syn" -version = "2.0.86" +version = "2.0.87" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "e89275301d38033efb81a6e60e3497e734dfcc62571f2854bf4b16690398824c" +checksum = "25aa4ce346d03a6dcd68dd8b4010bcb74e54e62c90c573f394c46eae99aba32d" dependencies = [ "proc-macro2", "quote", "unicode-ident", ] -[[package]] -name = "syn_derive" -version = "0.1.8" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "1329189c02ff984e9736652b1631330da25eaa6bc639089ed4915d25446cbe7b" -dependencies = [ - "proc-macro-error", - "proc-macro2", - "quote", - "syn 2.0.86", -] - [[package]] name = "sync_wrapper" version = "0.1.2" @@ -3576,6 +3707,17 @@ dependencies = [ "futures-core", ] +[[package]] +name = "synstructure" +version = "0.13.1" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "c8af7666ab7b6390ab78131fb5b0fce11d6b7a6951602017c35fa82800708971" +dependencies = [ + "proc-macro2", + "quote", + "syn 2.0.87", +] + [[package]] name = "system-configuration" version = "0.6.1" @@ -3622,9 +3764,9 @@ dependencies = [ [[package]] name = "tempfile" -version = "3.13.0" +version = "3.14.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "f0f2c9fc62d0beef6951ccffd757e241266a2c833136efbe35af6cd2567dca5b" +checksum = "28cce251fcbc87fac86a866eeb0d6c2d536fc16d06f184bb61aeae11aa4cee0c" dependencies = [ "cfg-if", "fastrand", @@ -3650,22 +3792,22 @@ checksum = "3369f5ac52d5eb6ab48c6b4ffdc8efbcad6b89c765749064ba298f2c68a16a76" [[package]] name = "thiserror" -version = "1.0.66" +version = "1.0.69" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "5d171f59dbaa811dbbb1aee1e73db92ec2b122911a48e1390dfe327a821ddede" +checksum = "b6aaf5339b578ea85b50e080feb250a3e8ae8cfcdff9a461c9ec2904bc923f52" dependencies = [ "thiserror-impl", ] [[package]] name = "thiserror-impl" -version = "1.0.66" +version = "1.0.69" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "b08be0f17bd307950653ce45db00cd31200d82b624b36e181337d9c7d92765b5" +checksum = "4fee6c4efc90059e10f81e6d42c60a18f76588c3d74cb83a0b242a2b6c7504c1" dependencies = [ "proc-macro2", "quote", - "syn 2.0.86", + "syn 2.0.87", ] [[package]] @@ -3709,6 +3851,16 @@ dependencies = [ "time-core", ] +[[package]] +name = "tinystr" +version = "0.7.6" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "9117f5d4db391c1cf6927e7bea3db74b9a1c1add8f7eda9ffd5364f40f57b82f" +dependencies = [ + "displaydoc", + "zerovec", +] + [[package]] name = "tinytemplate" version = "1.2.1" @@ -3736,9 +3888,9 @@ checksum = "1f3ccbac311fea05f86f61904b462b55fb3df8837a366dfc601a0161d0532f20" [[package]] name = "tokio" -version = "1.41.0" +version = "1.41.1" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "145f3413504347a2be84393cc8a7d2fb4d863b375909ea59f2158261aa258bbb" +checksum = "22cfb5bee7a6a52939ca9224d6ac897bb669134078daa8735560897f69de4d33" dependencies = [ "backtrace", "bytes", @@ -3759,7 +3911,7 @@ checksum = "693d596312e88961bc67d7f1f97af8a70227d9f90c31bba5806eec004978d752" dependencies = [ "proc-macro2", "quote", - "syn 2.0.86", + "syn 2.0.87", ] [[package]] @@ -4086,7 +4238,7 @@ checksum = "34704c8d6ebcbc939824180af020566b01a7c01f80641264eba0999f6c2b6be7" dependencies = [ "proc-macro2", "quote", - "syn 2.0.86", + "syn 2.0.87", ] [[package]] @@ -4169,27 +4321,12 @@ dependencies = [ "version_check", ] -[[package]] -name = "unicode-bidi" -version = "0.3.17" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "5ab17db44d7388991a428b2ee655ce0c212e862eff1768a455c58f9aad6e7893" - [[package]] name = "unicode-ident" version = "1.0.13" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "e91b56cd4cadaeb79bbf1a5645f6b4f8dc5bde8834ad5894a8db35fda9efa1fe" -[[package]] -name = "unicode-normalization" -version = "0.1.24" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "5033c97c4262335cded6d6fc3e5c18ab755e1a3dc96376350f3d8e9f009ad956" -dependencies = [ - "tinyvec", -] - [[package]] name = "unicode-xid" version = "0.2.6" @@ -4204,9 +4341,9 @@ checksum = "8ecb6da28b8a351d773b68d5825ac39017e680750f980f3a1a85cd8dd28a47c1" [[package]] name = "url" -version = "2.5.2" +version = "2.5.3" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "22784dbdf76fdde8af1aeda5622b546b422b6fc585325248a2bf9f5e41e94d6c" +checksum = "8d157f1b96d14500ffdc1f10ba712e780825526c03d9a49b4d0324b0d9113ada" dependencies = [ "form_urlencoded", "idna", @@ -4214,6 +4351,18 @@ dependencies = [ "serde", ] +[[package]] +name = "utf16_iter" +version = "1.0.5" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "c8232dd3cdaed5356e0f716d285e4b40b932ac434100fe9b7e0e8e935b9e6246" + +[[package]] +name = "utf8_iter" +version = "1.0.4" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "b6c140620e7ffbb22c2dee59cafe6084a59b5ffc27a8859a5f0d494b5d52b6be" + [[package]] name = "utf8parse" version = "0.2.2" @@ -4301,7 +4450,7 @@ dependencies = [ "once_cell", "proc-macro2", "quote", - "syn 2.0.86", + "syn 2.0.87", "wasm-bindgen-shared", ] @@ -4335,7 +4484,7 @@ checksum = "26c6ab57572f7a24a4985830b120de1594465e5d500f24afe89e16b4e833ef68" dependencies = [ "proc-macro2", "quote", - "syn 2.0.86", + "syn 2.0.87", "wasm-bindgen-backend", "wasm-bindgen-shared", ] @@ -4529,6 +4678,18 @@ dependencies = [ "memchr", ] +[[package]] +name = "write16" +version = "1.0.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "d1890f4022759daae28ed4fe62859b1236caebfc61ede2f63ed4e695f3f6d936" + +[[package]] +name = "writeable" +version = "0.5.5" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "1e9df38ee2d2c3c5948ea468a8406ff0db0b29ae1ffde1bcf20ef305bcc95c51" + [[package]] name = "wyz" version = "0.5.1" @@ -4544,6 +4705,30 @@ version = "1.0.1" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "cfe53a6657fd280eaa890a3bc59152892ffa3e30101319d168b781ed6529b049" +[[package]] +name = "yoke" +version = "0.7.4" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "6c5b1314b079b0930c31e3af543d8ee1757b1951ae1e1565ec704403a7240ca5" +dependencies = [ + "serde", + "stable_deref_trait", + "yoke-derive", + "zerofrom", +] + +[[package]] +name = "yoke-derive" +version = "0.7.4" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "28cc31741b18cb6f1d5ff12f5b7523e3d6eb0852bbbad19d73905511d9849b95" +dependencies = [ + "proc-macro2", + "quote", + "syn 2.0.87", + "synstructure", +] + [[package]] name = "zerocopy" version = "0.7.35" @@ -4562,7 +4747,28 @@ checksum = "fa4f8080344d4671fb4e831a13ad1e68092748387dfc4f55e356242fae12ce3e" dependencies = [ "proc-macro2", "quote", - "syn 2.0.86", + "syn 2.0.87", +] + +[[package]] +name = "zerofrom" +version = "0.1.4" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "91ec111ce797d0e0784a1116d0ddcdbea84322cd79e5d5ad173daeba4f93ab55" +dependencies = [ + "zerofrom-derive", +] + +[[package]] +name = "zerofrom-derive" +version = "0.1.4" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "0ea7b4a3637ea8669cedf0f1fd5c286a17f3de97b8dd5a70a6c167a1730e63a5" +dependencies = [ + "proc-macro2", + "quote", + "syn 2.0.87", + "synstructure", ] [[package]] @@ -4571,6 +4777,28 @@ version = "1.8.1" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "ced3678a2879b30306d323f4542626697a464a97c0a07c9aebf7ebca65cd4dde" +[[package]] +name = "zerovec" +version = "0.10.4" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "aa2b893d79df23bfb12d5461018d408ea19dfafe76c2c7ef6d4eba614f8ff079" +dependencies = [ + "yoke", + "zerofrom", + "zerovec-derive", +] + +[[package]] +name = "zerovec-derive" +version = "0.10.3" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "6eafa6dfb17584ea3e2bd6e76e0cc15ad7af12b09abdd1ca55961bed9b1063c6" +dependencies = [ + "proc-macro2", + "quote", + "syn 2.0.87", +] + [[package]] name = "zstd" version = "0.13.2" From fcef4aadb5ee7300acfaddc1ed25f398dda27de2 Mon Sep 17 00:00:00 2001 From: Jose Celano Date: Fri, 15 Nov 2024 07:42:09 +0000 Subject: [PATCH 023/802] chore(deps): bump thiserror from 1.0.66 to 2.0.3 --- Cargo.lock | 46 +++++++++++++++++++++++++++++++++------------- Cargo.toml | 2 +- 2 files changed, 34 insertions(+), 14 deletions(-) diff --git a/Cargo.lock b/Cargo.lock index 20de3d0dc..be96e6580 100644 --- a/Cargo.lock +++ b/Cargo.lock @@ -612,7 +612,7 @@ dependencies = [ "binascii", "serde", "serde_json", - "thiserror", + "thiserror 1.0.69", "zerocopy", ] @@ -630,7 +630,7 @@ dependencies = [ "serde_bencode", "serde_bytes", "serde_repr", - "thiserror", + "thiserror 1.0.69", "tokio", "torrust-tracker-configuration", "torrust-tracker-located-error", @@ -1396,7 +1396,7 @@ source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "8835f84f38484cc86f110a805655697908257fb9a7af005234060891557198e9" dependencies = [ "nonempty", - "thiserror", + "thiserror 1.0.69", ] [[package]] @@ -2215,7 +2215,7 @@ checksum = "3669cf5561f8d27e8fc84cc15e58350e70f557d4d65f70e3154e54cd2f8e1782" dependencies = [ "libc", "neli", - "thiserror", + "thiserror 1.0.69", "windows-sys 0.59.0", ] @@ -2373,7 +2373,7 @@ dependencies = [ "quote", "syn 2.0.87", "termcolor", - "thiserror", + "thiserror 1.0.69", ] [[package]] @@ -2409,7 +2409,7 @@ dependencies = [ "sha2", "smallvec", "subprocess", - "thiserror", + "thiserror 1.0.69", "time", "uuid", "zstd", @@ -3796,7 +3796,16 @@ version = "1.0.69" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "b6aaf5339b578ea85b50e080feb250a3e8ae8cfcdff9a461c9ec2904bc923f52" dependencies = [ - "thiserror-impl", + "thiserror-impl 1.0.69", +] + +[[package]] +name = "thiserror" +version = "2.0.3" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "c006c85c7651b3cf2ada4584faa36773bd07bac24acfb39f3c431b36d7e667aa" +dependencies = [ + "thiserror-impl 2.0.3", ] [[package]] @@ -3810,6 +3819,17 @@ dependencies = [ "syn 2.0.87", ] +[[package]] +name = "thiserror-impl" +version = "2.0.3" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "f077553d607adc1caf65430528a576c757a71ed73944b66ebb58ef2bbd243568" +dependencies = [ + "proc-macro2", + "quote", + "syn 2.0.87", +] + [[package]] name = "thread_local" version = "1.1.8" @@ -4026,7 +4046,7 @@ dependencies = [ "serde_json", "serde_repr", "serde_with", - "thiserror", + "thiserror 2.0.3", "tokio", "torrust-tracker-clock", "torrust-tracker-configuration", @@ -4061,7 +4081,7 @@ dependencies = [ "serde_bencode", "serde_bytes", "serde_json", - "thiserror", + "thiserror 1.0.69", "tokio", "torrust-tracker-configuration", "tracing", @@ -4088,7 +4108,7 @@ dependencies = [ "serde", "serde_json", "serde_with", - "thiserror", + "thiserror 1.0.69", "toml", "torrust-tracker-located-error", "url", @@ -4100,14 +4120,14 @@ name = "torrust-tracker-contrib-bencode" version = "3.0.0-develop" dependencies = [ "criterion", - "thiserror", + "thiserror 1.0.69", ] [[package]] name = "torrust-tracker-located-error" version = "3.0.0-develop" dependencies = [ - "thiserror", + "thiserror 1.0.69", "tracing", ] @@ -4122,7 +4142,7 @@ dependencies = [ "serde", "tdyne-peer-id", "tdyne-peer-id-registry", - "thiserror", + "thiserror 1.0.69", "zerocopy", ] diff --git a/Cargo.toml b/Cargo.toml index bc772d08a..f9e7eff3b 100644 --- a/Cargo.toml +++ b/Cargo.toml @@ -68,7 +68,7 @@ serde_bytes = "0" serde_json = { version = "1", features = ["preserve_order"] } serde_repr = "0" serde_with = { version = "3", features = ["json"] } -thiserror = "1" +thiserror = "2" tokio = { version = "1", features = ["macros", "net", "rt-multi-thread", "signal", "sync"] } torrust-tracker-clock = { version = "3.0.0-develop", path = "packages/clock" } torrust-tracker-configuration = { version = "3.0.0-develop", path = "packages/configuration" } From 4dd6659abc264f752997bff3bf8bd528bc517a6a Mon Sep 17 00:00:00 2001 From: Jose Celano Date: Fri, 15 Nov 2024 08:21:05 +0000 Subject: [PATCH 024/802] ci: [#1075] remove current coverage workflow --- .github/workflows/coverage.yaml | 85 --------------------------------- 1 file changed, 85 deletions(-) delete mode 100644 .github/workflows/coverage.yaml diff --git a/.github/workflows/coverage.yaml b/.github/workflows/coverage.yaml deleted file mode 100644 index 28c1be6d0..000000000 --- a/.github/workflows/coverage.yaml +++ /dev/null @@ -1,85 +0,0 @@ -name: Coverage - -on: - push: - branches: - - develop - pull_request_target: - branches: - - develop - -env: - CARGO_TERM_COLOR: always - -jobs: - report: - name: Report - environment: coverage - runs-on: ubuntu-latest - env: - CARGO_INCREMENTAL: "0" - RUSTFLAGS: "-Z profile -C codegen-units=1 -C opt-level=0 -C link-dead-code -C overflow-checks=off -Z panic_abort_tests -C panic=abort" - RUSTDOCFLAGS: "-Z profile -C codegen-units=1 -C opt-level=0 -C link-dead-code -C overflow-checks=off -Z panic_abort_tests -C panic=abort" - - steps: - - id: checkout_push - if: github.event_name == 'push' - name: Checkout Repository (Push) - uses: actions/checkout@v4 - - - id: checkout_pull_request_target - if: github.event_name == 'pull_request_target' - name: Checkout Repository (Pull Request Target) - uses: actions/checkout@v4 - with: - ref: "refs/pull/${{ github.event.pull_request.number }}/head" - - - id: setup - name: Setup Toolchain - uses: dtolnay/rust-toolchain@nightly - with: - toolchain: nightly - components: llvm-tools-preview - - - id: cache - name: Enable Workflow Cache - uses: Swatinem/rust-cache@v2 - - - id: tools - name: Install Tools - uses: taiki-e/install-action@v2 - with: - tool: grcov - - - id: check - name: Run Build Checks - run: cargo check --tests --benches --examples --workspace --all-targets --all-features - - - id: clean - name: Clean Build Directory - run: cargo clean - - - id: build - name: Pre-build Main Project - run: cargo build --workspace --all-targets --all-features --jobs 2 - - - id: build_tests - name: Pre-build Tests - run: cargo build --workspace --all-targets --all-features --tests --jobs 2 - - - id: test - name: Run Unit Tests - run: cargo test --tests --workspace --all-targets --all-features - - - id: coverage - name: Generate Coverage Report - uses: alekitto/grcov@v0.2 - - - id: upload - name: Upload Coverage Report - uses: codecov/codecov-action@v3 - with: - token: ${{ secrets.CODECOV_TOKEN }} - files: ${{ steps.coverage.outputs.report }} - verbose: true - fail_ci_if_error: true From 9d8174df6f0913abd65a90538619f9036cb38a13 Mon Sep 17 00:00:00 2001 From: Jose Celano Date: Fri, 15 Nov 2024 08:22:22 +0000 Subject: [PATCH 025/802] ci: [#1075] fix coverage report --- .cargo/config.toml | 1 + .github/workflows/generate_coverage.yaml | 87 +++++++++++++++++ .github/workflows/upload_coverage.yaml | 119 +++++++++++++++++++++++ .gitignore | 6 +- 4 files changed, 211 insertions(+), 2 deletions(-) create mode 100644 .github/workflows/generate_coverage.yaml create mode 100644 .github/workflows/upload_coverage.yaml diff --git a/.cargo/config.toml b/.cargo/config.toml index a88db5f38..28cde74ec 100644 --- a/.cargo/config.toml +++ b/.cargo/config.toml @@ -1,6 +1,7 @@ [alias] cov = "llvm-cov" cov-lcov = "llvm-cov --lcov --output-path=./.coverage/lcov.info" +cov-codecov = "llvm-cov --codecov --output-path=./.coverage/codecov.json" cov-html = "llvm-cov --html" time = "build --timings --all-targets" diff --git a/.github/workflows/generate_coverage.yaml b/.github/workflows/generate_coverage.yaml new file mode 100644 index 000000000..8de299c74 --- /dev/null +++ b/.github/workflows/generate_coverage.yaml @@ -0,0 +1,87 @@ +name: Generate Coverage Report + +on: + push: + branches: + - develop + pull_request: + branches: + - develop + +env: + CARGO_TERM_COLOR: always + +jobs: + coverage: + name: Generate Coverage Report + environment: coverage + runs-on: ubuntu-latest + env: + CARGO_INCREMENTAL: "0" + RUSTFLAGS: "-Cinstrument-coverage" + + steps: + - name: Checkout repository + uses: actions/checkout@v4 + + - name: Install LLVM tools + run: sudo apt-get update && sudo apt-get install -y llvm + + - id: setup + name: Setup Toolchain + uses: dtolnay/rust-toolchain@nightly + with: + toolchain: nightly + components: llvm-tools-preview + + - id: cache + name: Enable Workflow Cache + uses: Swatinem/rust-cache@v2 + + - id: tools + name: Install Tools + uses: taiki-e/install-action@v2 + with: + tool: grcov,cargo-llvm-cov + + - id: coverage + name: Generate Coverage Report + run: | + cargo clean + cargo llvm-cov --all-features --workspace --codecov --output-path ./codecov.json + + - name: Store PR number and commit SHA + run: | + echo "Storing PR number ${{ github.event.number }}" + echo "${{ github.event.number }}" > pr_number.txt + + echo "Storing commit SHA ${{ github.event.pull_request.head.sha }}" + echo "${{ github.event.pull_request.head.sha }}" > commit_sha.txt + + # Workaround for https://github.com/orgs/community/discussions/25220 + # Triggered sub-workflow is not able to detect the original commit/PR which is available + # in this workflow. + - name: Store PR number + uses: actions/upload-artifact@v4 + with: + name: pr_number + path: pr_number.txt + + - name: Store commit SHA + uses: actions/upload-artifact@v4 + with: + name: commit_sha + path: commit_sha.txt + + # This stores the coverage report in artifacts. The actual upload to Codecov + # is executed by a different workflow `upload_coverage.yml`. The reason for this + # split is because `on.pull_request` workflows don't have access to secrets. + - name: Store coverage report in artifacts + uses: actions/upload-artifact@v4 + with: + name: codecov_report + path: ./codecov.json + + - run: | + echo "The coverage report was stored in Github artifacts." + echo "It will be uploaded to Codecov using [upload_coverage.yml] workflow shortly." diff --git a/.github/workflows/upload_coverage.yaml b/.github/workflows/upload_coverage.yaml new file mode 100644 index 000000000..b9a65ae7c --- /dev/null +++ b/.github/workflows/upload_coverage.yaml @@ -0,0 +1,119 @@ +name: Upload Coverage Report + +on: + # This workflow is triggered after every successfull execution + # of `Generate Coverage Report` workflow. + workflow_run: + workflows: ["Generate Coverage Report"] + types: + - completed + +permissions: + actions: write + contents: write + issues: write + pull-requests: write + +jobs: + coverage: + name: Upload Coverage Report + environment: coverage + runs-on: ubuntu-latest + steps: + - name: "Download existing coverage report" + id: prepare_report + uses: actions/github-script@v7 + with: + script: | + var fs = require('fs'); + + // List artifacts of the workflow run that triggered this workflow + var artifacts = await github.rest.actions.listWorkflowRunArtifacts({ + owner: context.repo.owner, + repo: context.repo.repo, + run_id: context.payload.workflow_run.id, + }); + + let codecovReport = artifacts.data.artifacts.filter((artifact) => { + return artifact.name == "codecov_report"; + }); + + if (codecovReport.length != 1) { + throw new Error("Unexpected number of {codecov_report} artifacts: " + codecovReport.length); + } + + var download = await github.rest.actions.downloadArtifact({ + owner: context.repo.owner, + repo: context.repo.repo, + artifact_id: codecovReport[0].id, + archive_format: 'zip', + }); + fs.writeFileSync('codecov_report.zip', Buffer.from(download.data)); + + let prNumber = artifacts.data.artifacts.filter((artifact) => { + return artifact.name == "pr_number"; + }); + + if (prNumber.length != 1) { + throw new Error("Unexpected number of {pr_number} artifacts: " + prNumber.length); + } + + var download = await github.rest.actions.downloadArtifact({ + owner: context.repo.owner, + repo: context.repo.repo, + artifact_id: prNumber[0].id, + archive_format: 'zip', + }); + fs.writeFileSync('pr_number.zip', Buffer.from(download.data)); + + let commitSha = artifacts.data.artifacts.filter((artifact) => { + return artifact.name == "commit_sha"; + }); + + if (commitSha.length != 1) { + throw new Error("Unexpected number of {commit_sha} artifacts: " + commitSha.length); + } + + var download = await github.rest.actions.downloadArtifact({ + owner: context.repo.owner, + repo: context.repo.repo, + artifact_id: commitSha[0].id, + archive_format: 'zip', + }); + fs.writeFileSync('commit_sha.zip', Buffer.from(download.data)); + + - id: parse_previous_artifacts + run: | + unzip codecov_report.zip + unzip pr_number.zip + unzip commit_sha.zip + + echo "Detected PR is: $(> "$GITHUB_OUTPUT" + echo "override_commit=$(> "$GITHUB_OUTPUT" + + - name: Checkout repository + uses: actions/checkout@v4 + with: + ref: ${{ steps.parse_previous_artifacts.outputs.override_commit || '' }} + path: repo_root + + - name: Upload coverage to Codecov + uses: codecov/codecov-action@v5 + with: + verbose: true + token: ${{ secrets.CODECOV_TOKEN }} + files: ${{ github.workspace }}/codecov.json + fail_ci_if_error: true + # Manual overrides for these parameters are needed because automatic detection + # in codecov-action does not work for non-`pull_request` workflows. + # In `main` branch push, these default to empty strings since we want to run + # the analysis on HEAD. + override_commit: ${{ steps.parse_previous_artifacts.outputs.override_commit || '' }} + override_pr: ${{ steps.parse_previous_artifacts.outputs.override_pr || '' }} + working-directory: ${{ github.workspace }}/repo_root + # Location where coverage report files are searched for + directory: ${{ github.workspace }} diff --git a/.gitignore b/.gitignore index b60b28991..d9087bcff 100644 --- a/.gitignore +++ b/.gitignore @@ -1,4 +1,5 @@ .env +*.code-workspace **/*.rs.bk /.coverage/ /.idea/ @@ -12,5 +13,6 @@ /tracker.* /tracker.toml callgrind.out -perf.data* -*.code-workspace \ No newline at end of file +codecov.json +lcov.info +perf.data* \ No newline at end of file From 0950eb12b042f2e86a19b75e2a3fec1484a3e991 Mon Sep 17 00:00:00 2001 From: Jose Celano Date: Fri, 15 Nov 2024 11:06:12 +0000 Subject: [PATCH 026/802] ci: [1075] coverage report for push event The new coverage workflows (generate and upload) only work for PRs. We have to keep the old one for push events. --- .github/workflows/coverage.yaml | 57 +++++++++++++++++++ ...overage.yaml => generate_coverage_pr.yaml} | 5 +- ..._coverage.yaml => upload_coverage_pr.yaml} | 4 +- 3 files changed, 60 insertions(+), 6 deletions(-) create mode 100644 .github/workflows/coverage.yaml rename .github/workflows/{generate_coverage.yaml => generate_coverage_pr.yaml} (97%) rename .github/workflows/{upload_coverage.yaml => upload_coverage_pr.yaml} (98%) diff --git a/.github/workflows/coverage.yaml b/.github/workflows/coverage.yaml new file mode 100644 index 000000000..e10c5ac66 --- /dev/null +++ b/.github/workflows/coverage.yaml @@ -0,0 +1,57 @@ +name: Coverage + +on: + push: + branches: + - develop + +env: + CARGO_TERM_COLOR: always + +jobs: + report: + name: Generate Coverage Report + environment: coverage + runs-on: ubuntu-latest + env: + CARGO_INCREMENTAL: "0" + RUSTFLAGS: "-Cinstrument-coverage" + + steps: + - name: Checkout repository + uses: actions/checkout@v4 + + - name: Install LLVM tools + run: sudo apt-get update && sudo apt-get install -y llvm + + - id: setup + name: Setup Toolchain + uses: dtolnay/rust-toolchain@nightly + with: + toolchain: nightly + components: llvm-tools-preview + + - id: cache + name: Enable Workflow Cache + uses: Swatinem/rust-cache@v2 + + - id: tools + name: Install Tools + uses: taiki-e/install-action@v2 + with: + tool: grcov,cargo-llvm-cov + + - id: coverage + name: Generate Coverage Report + run: | + cargo clean + cargo llvm-cov --all-features --workspace --codecov --output-path ./codecov.json + + - id: upload + name: Upload Coverage Report + uses: codecov/codecov-action@v5 + with: + verbose: true + token: ${{ secrets.CODECOV_TOKEN }} + files: ${{ github.workspace }}/codecov.json + fail_ci_if_error: true \ No newline at end of file diff --git a/.github/workflows/generate_coverage.yaml b/.github/workflows/generate_coverage_pr.yaml similarity index 97% rename from .github/workflows/generate_coverage.yaml rename to .github/workflows/generate_coverage_pr.yaml index 8de299c74..d1b241b9d 100644 --- a/.github/workflows/generate_coverage.yaml +++ b/.github/workflows/generate_coverage_pr.yaml @@ -1,9 +1,6 @@ -name: Generate Coverage Report +name: Generate Coverage Report (PR) on: - push: - branches: - - develop pull_request: branches: - develop diff --git a/.github/workflows/upload_coverage.yaml b/.github/workflows/upload_coverage_pr.yaml similarity index 98% rename from .github/workflows/upload_coverage.yaml rename to .github/workflows/upload_coverage_pr.yaml index b9a65ae7c..1ed2f7bcc 100644 --- a/.github/workflows/upload_coverage.yaml +++ b/.github/workflows/upload_coverage_pr.yaml @@ -1,10 +1,10 @@ -name: Upload Coverage Report +name: Upload Coverage Report (PR) on: # This workflow is triggered after every successfull execution # of `Generate Coverage Report` workflow. workflow_run: - workflows: ["Generate Coverage Report"] + workflows: ["Generate Coverage Report (PR)"] types: - completed From e3562f0694b78eb3fe4941fea1ad4f85f359e854 Mon Sep 17 00:00:00 2001 From: Cameron Garnham Date: Mon, 18 Nov 2024 11:32:05 +0800 Subject: [PATCH 027/802] udp: symmetric encrypted cookie --- Cargo.lock | 31 + Cargo.toml | 2 + cSpell.json | 3 + packages/clock/src/lib.rs | 11 - packages/clock/src/time_extent/mod.rs | 665 ------------------ .../configuration/src/v2_0_0/udp_tracker.rs | 11 + packages/test-helpers/src/configuration.rs | 2 + src/bootstrap/app.rs | 22 + src/bootstrap/jobs/udp_tracker.rs | 3 +- src/lib.rs | 12 +- src/servers/udp/connection_cookie.rs | 468 ++++++------ src/servers/udp/error.rs | 22 +- src/servers/udp/handlers.rs | 286 ++++++-- src/servers/udp/server/launcher.rs | 9 +- src/servers/udp/server/mod.rs | 4 +- src/servers/udp/server/processor.rs | 25 +- src/servers/udp/server/spawner.rs | 4 +- src/servers/udp/server/states.rs | 10 +- src/shared/crypto/ephemeral_instance_keys.rs | 12 + src/shared/crypto/keys.rs | 192 +++-- tests/servers/udp/environment.rs | 7 +- 21 files changed, 711 insertions(+), 1090 deletions(-) delete mode 100644 packages/clock/src/time_extent/mod.rs diff --git a/Cargo.lock b/Cargo.lock index be96e6580..5d07ba62f 100644 --- a/Cargo.lock +++ b/Cargo.lock @@ -673,6 +673,16 @@ dependencies = [ "piper", ] +[[package]] +name = "blowfish" +version = "0.9.1" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "e412e2cd0f2b2d93e02543ceae7917b3c70331573df19ee046bcbc35e45e87d7" +dependencies = [ + "byteorder", + "cipher", +] + [[package]] name = "borsh" version = "1.5.3" @@ -874,6 +884,16 @@ dependencies = [ "half", ] +[[package]] +name = "cipher" +version = "0.4.4" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "773f3b9af64447d2ce9850330c473515014aa235e6a783b02db81ff39e4a3dad" +dependencies = [ + "crypto-common", + "inout", +] + [[package]] name = "clang-sys" version = "1.8.1" @@ -2047,6 +2067,15 @@ version = "0.1.15" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "c8fae54786f62fb2918dcfae3d568594e50eb9b5c25bf04371af6fe7516452fb" +[[package]] +name = "inout" +version = "0.1.3" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "a0c10553d664a4d0bcff9f4215d0aac67a639cc68ef660840afe309b807bc9f5" +dependencies = [ + "generic-array", +] + [[package]] name = "io-enum" version = "1.1.3" @@ -4014,8 +4043,10 @@ dependencies = [ "axum-server", "bittorrent-primitives", "bittorrent-tracker-client", + "blowfish", "camino", "chrono", + "cipher", "clap", "crossbeam-skiplist", "dashmap", diff --git a/Cargo.toml b/Cargo.toml index f9e7eff3b..35b1ac9a7 100644 --- a/Cargo.toml +++ b/Cargo.toml @@ -38,8 +38,10 @@ axum-extra = { version = "0", features = ["query"] } axum-server = { version = "0", features = ["tls-rustls"] } bittorrent-primitives = "0.1.0" bittorrent-tracker-client = { version = "3.0.0-develop", path = "packages/tracker-client" } +blowfish = "0" camino = { version = "1", features = ["serde", "serde1"] } chrono = { version = "0", default-features = false, features = ["clock"] } +cipher = "0" clap = { version = "4", features = ["derive", "env"] } crossbeam-skiplist = "0" dashmap = "6" diff --git a/cSpell.json b/cSpell.json index 6a9da0324..e2ecd1bc3 100644 --- a/cSpell.json +++ b/cSpell.json @@ -30,6 +30,7 @@ "canonicalized", "certbot", "chrono", + "ciphertext", "clippy", "codecov", "codegen", @@ -52,6 +53,7 @@ "downloadedi", "dtolnay", "elif", + "endianness", "Eray", "filesd", "flamegraph", @@ -161,6 +163,7 @@ "Trackon", "typenum", "Unamed", + "underflows", "untuple", "uroot", "Vagaa", diff --git a/packages/clock/src/lib.rs b/packages/clock/src/lib.rs index 295d22c16..b7d20620c 100644 --- a/packages/clock/src/lib.rs +++ b/packages/clock/src/lib.rs @@ -26,7 +26,6 @@ pub mod clock; pub mod conv; pub mod static_time; -pub mod time_extent; #[macro_use] extern crate lazy_static; @@ -41,13 +40,3 @@ pub(crate) type CurrentClock = clock::Working; #[cfg(test)] #[allow(dead_code)] pub(crate) type CurrentClock = clock::Stopped; - -/// Working version, for production. -#[cfg(not(test))] -#[allow(dead_code)] -pub(crate) type DefaultTimeExtentMaker = time_extent::WorkingTimeExtentMaker; - -/// Stopped version, for testing. -#[cfg(test)] -#[allow(dead_code)] -pub(crate) type DefaultTimeExtentMaker = time_extent::StoppedTimeExtentMaker; diff --git a/packages/clock/src/time_extent/mod.rs b/packages/clock/src/time_extent/mod.rs deleted file mode 100644 index c51849f21..000000000 --- a/packages/clock/src/time_extent/mod.rs +++ /dev/null @@ -1,665 +0,0 @@ -//! It includes functionality to handle time extents. -//! -//! Time extents are used to represent a duration of time which contains -//! N times intervals of the same duration. -//! -//! Given a duration of: 60 seconds. -//! -//! ```text -//! |------------------------------------------------------------| -//! ``` -//! -//! If we define a **base** duration of `10` seconds, we would have `6` intervals. -//! -//! ```text -//! |----------|----------|----------|----------|----------|----------| -//! ^--- 10 seconds -//! ``` -//! -//! Then, You can represent half of the duration (`30` seconds) as: -//! -//! ```text -//! |----------|----------|----------|----------|----------|----------| -//! ^--- 30 seconds -//! ``` -//! -//! `3` times (**multiplier**) the **base** interval (3*10 = 30 seconds): -//! -//! ```text -//! |----------|----------|----------|----------|----------|----------| -//! ^--- 30 seconds (3 units of 10 seconds) -//! ``` -//! -//! Time extents are a way to measure time duration using only one unit of time -//! (**base** duration) repeated `N` times (**multiplier**). -//! -//! Time extents are not clocks in a sense that they do not have a start time. -//! They are not synchronized with the real time. In order to measure time, -//! you need to define a start time for the intervals. -//! -//! For example, we could measure time is "lustrums" (5 years) since the start -//! of the 21st century. The time extent would contains a base 5-year duration -//! and the multiplier. The current "lustrum" (2023) would be 5th one if we -//! start counting "lustrums" at 1. -//! -//! ```text -//! Lustrum 1: 2000-2004 -//! Lustrum 2: 2005-2009 -//! Lustrum 3: 2010-2014 -//! Lustrum 4: 2015-2019 -//! Lustrum 5: 2020-2024 -//! ``` -//! -//! More practically time extents are used to represent number of time intervals -//! since the Unix Epoch. Each interval is typically an amount of seconds. -//! It's specially useful to check expiring dates. For example, you can have an -//! authentication token that expires after 120 seconds. If you divide the -//! current timestamp by 120 you get the number of 2-minute intervals since the -//! Unix Epoch, you can hash that value with a secret key and send it to a -//! client. The client can authenticate by sending the hashed value back to the -//! server. The server can build the same hash and compare it with the one sent -//! by the client. The hash would be the same during the 2-minute interval, but -//! it would change after that. This method is one of the methods used by UDP -//! trackers to generate and verify a connection ID, which a a token sent to -//! the client to identify the connection. -use std::num::{IntErrorKind, TryFromIntError}; -use std::time::Duration; - -use crate::clock::{self, Stopped, Working}; - -/// This trait defines the operations that can be performed on a `TimeExtent`. -pub trait Extent: Sized + Default { - type Base; - type Multiplier; - type Product; - - /// It creates a new `TimeExtent`. - fn new(unit: &Self::Base, count: &Self::Multiplier) -> Self; - - /// It increases the `TimeExtent` by a multiplier. - /// - /// # Errors - /// - /// Will return `IntErrorKind` if `add` would overflow the internal `Duration`. - fn increase(&self, add: Self::Multiplier) -> Result; - - /// It decreases the `TimeExtent` by a multiplier. - /// - /// # Errors - /// - /// Will return `IntErrorKind` if `sub` would underflow the internal `Duration`. - fn decrease(&self, sub: Self::Multiplier) -> Result; - - /// It returns the total `Duration` of the `TimeExtent`. - fn total(&self) -> Option>; - - /// It returns the total `Duration` of the `TimeExtent` plus one increment. - fn total_next(&self) -> Option>; -} - -/// The `TimeExtent` base `Duration`, which is the duration of a single interval. -pub type Base = Duration; -/// The `TimeExtent` `Multiplier`, which is the number of `Base` duration intervals. -pub type Multiplier = u64; -/// The `TimeExtent` product, which is the total duration of the `TimeExtent`. -pub type Product = Base; - -/// A `TimeExtent` is a duration of time which contains N times intervals -/// of the same duration. -#[derive(Debug, Default, Hash, PartialEq, Eq)] -pub struct TimeExtent { - pub increment: Base, - pub amount: Multiplier, -} - -/// A zero time extent. It's the additive identity for a `TimeExtent`. -pub const ZERO: TimeExtent = TimeExtent { - increment: Base::ZERO, - amount: Multiplier::MIN, -}; - -/// The maximum value for a `TimeExtent`. -pub const MAX: TimeExtent = TimeExtent { - increment: Base::MAX, - amount: Multiplier::MAX, -}; - -impl TimeExtent { - #[must_use] - pub const fn from_sec(seconds: u64, amount: &Multiplier) -> Self { - Self { - increment: Base::from_secs(seconds), - amount: *amount, - } - } -} - -fn checked_duration_from_nanos(time: u128) -> Result { - const NANOS_PER_SEC: u32 = 1_000_000_000; - - let secs = time.div_euclid(u128::from(NANOS_PER_SEC)); - let nanos = time.rem_euclid(u128::from(NANOS_PER_SEC)); - - assert!(nanos < u128::from(NANOS_PER_SEC)); - - match u64::try_from(secs) { - Err(error) => Err(error), - Ok(secs) => Ok(Duration::new(secs, nanos.try_into().unwrap())), - } -} - -impl Extent for TimeExtent { - type Base = Base; - type Multiplier = Multiplier; - type Product = Product; - - fn new(increment: &Self::Base, amount: &Self::Multiplier) -> Self { - Self { - increment: *increment, - amount: *amount, - } - } - - fn increase(&self, add: Self::Multiplier) -> Result { - match self.amount.checked_add(add) { - None => Err(IntErrorKind::PosOverflow), - Some(amount) => Ok(Self { - increment: self.increment, - amount, - }), - } - } - - fn decrease(&self, sub: Self::Multiplier) -> Result { - match self.amount.checked_sub(sub) { - None => Err(IntErrorKind::NegOverflow), - Some(amount) => Ok(Self { - increment: self.increment, - amount, - }), - } - } - - fn total(&self) -> Option> { - self.increment - .as_nanos() - .checked_mul(u128::from(self.amount)) - .map(checked_duration_from_nanos) - } - - fn total_next(&self) -> Option> { - self.increment - .as_nanos() - .checked_mul(u128::from(self.amount) + 1) - .map(checked_duration_from_nanos) - } -} - -/// A `TimeExtent` maker. It's a clock base on time extents. -/// It gives you the time in time extents. -pub trait Make: Sized -where - Clock: clock::Time, -{ - /// It gives you the current time extent (with a certain increment) for - /// the current time. It gets the current timestamp front the `Clock`. - /// - /// For example: - /// - /// - If the base increment is `1` second, it will return a time extent - /// whose duration is `1 second` and whose multiplier is the the number - /// of seconds since the Unix Epoch (time extent). - /// - If the base increment is `1` minute, it will return a time extent - /// whose duration is `60 seconds` and whose multiplier is the number of - /// minutes since the Unix Epoch (time extent). - #[must_use] - fn now(increment: &Base) -> Option> { - Clock::now() - .as_nanos() - .checked_div((*increment).as_nanos()) - .map(|amount| match Multiplier::try_from(amount) { - Err(error) => Err(error), - Ok(amount) => Ok(TimeExtent::new(increment, &amount)), - }) - } - - /// Same as [`now`](crate::time_extent::Make::now), but it - /// will add an extra duration to the current time before calculating the - /// time extent. It gives you a time extent for a time in the future. - #[must_use] - fn now_after(increment: &Base, add_time: &Duration) -> Option> { - match Clock::now_add(add_time) { - None => None, - Some(time) => time - .as_nanos() - .checked_div(increment.as_nanos()) - .map(|amount| match Multiplier::try_from(amount) { - Err(error) => Err(error), - Ok(amount) => Ok(TimeExtent::new(increment, &amount)), - }), - } - } - - /// Same as [`now`](crate::time_extent::Make::now), but it - /// will subtract a duration to the current time before calculating the - /// time extent. It gives you a time extent for a time in the past. - #[must_use] - fn now_before(increment: &Base, sub_time: &Duration) -> Option> { - match Clock::now_sub(sub_time) { - None => None, - Some(time) => time - .as_nanos() - .checked_div(increment.as_nanos()) - .map(|amount| match Multiplier::try_from(amount) { - Err(error) => Err(error), - Ok(amount) => Ok(TimeExtent::new(increment, &amount)), - }), - } - } -} - -/// A `TimeExtent` maker which makes `TimeExtents`. -/// -/// It's a clock which measures time in `TimeExtents`. -#[derive(Debug)] -pub struct Maker { - clock: std::marker::PhantomData, -} - -/// A `TimeExtent` maker which makes `TimeExtents` from the `Working` clock. -pub type WorkingTimeExtentMaker = Maker; - -/// A `TimeExtent` maker which makes `TimeExtents` from the `Stopped` clock. -pub type StoppedTimeExtentMaker = Maker; - -impl Make for WorkingTimeExtentMaker {} -impl Make for StoppedTimeExtentMaker {} - -#[cfg(test)] -mod test { - use crate::time_extent::TimeExtent; - - const TIME_EXTENT_VAL: TimeExtent = TimeExtent::from_sec(2, &239_812_388_723); - - mod fn_checked_duration_from_nanos { - use std::time::Duration; - - use crate::time_extent::checked_duration_from_nanos; - use crate::time_extent::test::TIME_EXTENT_VAL; - - const NANOS_PER_SEC: u32 = 1_000_000_000; - - #[test] - fn it_should_give_zero_for_zero_input() { - assert_eq!(checked_duration_from_nanos(0).unwrap(), Duration::ZERO); - } - - #[test] - fn it_should_be_the_same_as_duration_implementation_for_u64_numbers() { - assert_eq!( - checked_duration_from_nanos(1_232_143_214_343_432).unwrap(), - Duration::from_nanos(1_232_143_214_343_432) - ); - assert_eq!( - checked_duration_from_nanos(u128::from(u64::MAX)).unwrap(), - Duration::from_nanos(u64::MAX) - ); - } - - #[test] - fn it_should_work_for_some_numbers_larger_than_u64() { - assert_eq!( - checked_duration_from_nanos(u128::from(TIME_EXTENT_VAL.amount) * u128::from(NANOS_PER_SEC)).unwrap(), - Duration::from_secs(TIME_EXTENT_VAL.amount) - ); - } - - #[test] - fn it_should_fail_for_numbers_that_are_too_large() { - assert_eq!( - checked_duration_from_nanos(u128::MAX).unwrap_err(), - u64::try_from(u128::MAX).unwrap_err() - ); - } - } - - mod time_extent { - - mod fn_default { - use crate::time_extent::{TimeExtent, ZERO}; - - #[test] - fn it_should_default_initialize_to_zero() { - assert_eq!(TimeExtent::default(), ZERO); - } - } - - mod fn_from_sec { - use crate::time_extent::test::TIME_EXTENT_VAL; - use crate::time_extent::{Multiplier, TimeExtent, ZERO}; - - #[test] - fn it_should_make_empty_for_zero() { - assert_eq!(TimeExtent::from_sec(u64::MIN, &Multiplier::MIN), ZERO); - } - #[test] - fn it_should_make_from_seconds() { - assert_eq!( - TimeExtent::from_sec(TIME_EXTENT_VAL.increment.as_secs(), &TIME_EXTENT_VAL.amount), - TIME_EXTENT_VAL - ); - } - } - - mod fn_new { - use crate::time_extent::test::TIME_EXTENT_VAL; - use crate::time_extent::{Base, Extent, Multiplier, TimeExtent, ZERO}; - - #[test] - fn it_should_make_empty_for_zero() { - assert_eq!(TimeExtent::new(&Base::ZERO, &Multiplier::MIN), ZERO); - } - - #[test] - fn it_should_make_new() { - assert_eq!( - TimeExtent::new(&Base::from_millis(2), &TIME_EXTENT_VAL.amount), - TimeExtent { - increment: Base::from_millis(2), - amount: TIME_EXTENT_VAL.amount - } - ); - } - } - - mod fn_increase { - use std::num::IntErrorKind; - - use crate::time_extent::test::TIME_EXTENT_VAL; - use crate::time_extent::{Extent, TimeExtent, ZERO}; - - #[test] - fn it_should_not_increase_for_zero() { - assert_eq!(ZERO.increase(0).unwrap(), ZERO); - } - - #[test] - fn it_should_increase() { - assert_eq!( - TIME_EXTENT_VAL.increase(50).unwrap(), - TimeExtent { - increment: TIME_EXTENT_VAL.increment, - amount: TIME_EXTENT_VAL.amount + 50, - } - ); - } - - #[test] - fn it_should_fail_when_attempting_to_increase_beyond_bounds() { - assert_eq!(TIME_EXTENT_VAL.increase(u64::MAX), Err(IntErrorKind::PosOverflow)); - } - } - - mod fn_decrease { - use std::num::IntErrorKind; - - use crate::time_extent::test::TIME_EXTENT_VAL; - use crate::time_extent::{Extent, TimeExtent, ZERO}; - - #[test] - fn it_should_not_decrease_for_zero() { - assert_eq!(ZERO.decrease(0).unwrap(), ZERO); - } - - #[test] - fn it_should_decrease() { - assert_eq!( - TIME_EXTENT_VAL.decrease(50).unwrap(), - TimeExtent { - increment: TIME_EXTENT_VAL.increment, - amount: TIME_EXTENT_VAL.amount - 50, - } - ); - } - - #[test] - fn it_should_fail_when_attempting_to_decrease_beyond_bounds() { - assert_eq!(TIME_EXTENT_VAL.decrease(u64::MAX), Err(IntErrorKind::NegOverflow)); - } - } - - mod fn_total { - use crate::time_extent::test::TIME_EXTENT_VAL; - use crate::time_extent::{Base, Extent, Product, TimeExtent, MAX, ZERO}; - - #[test] - fn it_should_be_zero_for_zero() { - assert_eq!(ZERO.total().unwrap().unwrap(), Product::ZERO); - } - - #[test] - fn it_should_give_a_total() { - assert_eq!( - TIME_EXTENT_VAL.total().unwrap().unwrap(), - Product::from_secs(TIME_EXTENT_VAL.increment.as_secs() * TIME_EXTENT_VAL.amount) - ); - - assert_eq!( - TimeExtent::new(&Base::from_millis(2), &(TIME_EXTENT_VAL.amount * 1000)) - .total() - .unwrap() - .unwrap(), - Product::from_secs(TIME_EXTENT_VAL.increment.as_secs() * TIME_EXTENT_VAL.amount) - ); - - assert_eq!( - TimeExtent::new(&Base::from_secs(1), &(u64::MAX)).total().unwrap().unwrap(), - Product::from_secs(u64::MAX) - ); - } - - #[test] - fn it_should_fail_when_too_large() { - assert_eq!(MAX.total(), None); - } - - #[test] - fn it_should_fail_when_product_is_too_large() { - let time_extent = TimeExtent { - increment: MAX.increment, - amount: 2, - }; - assert_eq!( - time_extent.total().unwrap().unwrap_err(), - u64::try_from(u128::MAX).unwrap_err() - ); - } - } - - mod fn_total_next { - use crate::time_extent::test::TIME_EXTENT_VAL; - use crate::time_extent::{Base, Extent, Product, TimeExtent, MAX, ZERO}; - - #[test] - fn it_should_be_zero_for_zero() { - assert_eq!(ZERO.total_next().unwrap().unwrap(), Product::ZERO); - } - - #[test] - fn it_should_give_a_total() { - assert_eq!( - TIME_EXTENT_VAL.total_next().unwrap().unwrap(), - Product::from_secs(TIME_EXTENT_VAL.increment.as_secs() * (TIME_EXTENT_VAL.amount + 1)) - ); - - assert_eq!( - TimeExtent::new(&Base::from_millis(2), &(TIME_EXTENT_VAL.amount * 1000)) - .total_next() - .unwrap() - .unwrap(), - Product::new( - TIME_EXTENT_VAL.increment.as_secs() * (TIME_EXTENT_VAL.amount), - Base::from_millis(2).as_nanos().try_into().unwrap() - ) - ); - - assert_eq!( - TimeExtent::new(&Base::from_secs(1), &(u64::MAX - 1)) - .total_next() - .unwrap() - .unwrap(), - Product::from_secs(u64::MAX) - ); - } - - #[test] - fn it_should_fail_when_too_large() { - assert_eq!(MAX.total_next(), None); - } - - #[test] - fn it_should_fail_when_product_is_too_large() { - let time_extent = TimeExtent { - increment: MAX.increment, - amount: 2, - }; - assert_eq!( - time_extent.total_next().unwrap().unwrap_err(), - u64::try_from(u128::MAX).unwrap_err() - ); - } - } - } - - mod make_time_extent { - - mod fn_now { - use torrust_tracker_primitives::DurationSinceUnixEpoch; - - use crate::clock::stopped::Stopped as _; - use crate::time_extent::test::TIME_EXTENT_VAL; - use crate::time_extent::{Base, Make, TimeExtent}; - use crate::{CurrentClock, DefaultTimeExtentMaker}; - - #[test] - fn it_should_give_a_time_extent() { - assert_eq!( - DefaultTimeExtentMaker::now(&TIME_EXTENT_VAL.increment).unwrap().unwrap(), - TimeExtent { - increment: TIME_EXTENT_VAL.increment, - amount: 0 - } - ); - - CurrentClock::local_set(&DurationSinceUnixEpoch::from_secs(TIME_EXTENT_VAL.amount * 2)); - - assert_eq!( - DefaultTimeExtentMaker::now(&TIME_EXTENT_VAL.increment).unwrap().unwrap(), - TIME_EXTENT_VAL - ); - } - - #[test] - fn it_should_fail_for_zero() { - assert_eq!(DefaultTimeExtentMaker::now(&Base::ZERO), None); - } - - #[test] - fn it_should_fail_if_amount_exceeds_bounds() { - CurrentClock::local_set(&DurationSinceUnixEpoch::MAX); - assert_eq!( - DefaultTimeExtentMaker::now(&Base::from_millis(1)).unwrap().unwrap_err(), - u64::try_from(u128::MAX).unwrap_err() - ); - } - } - - mod fn_now_after { - use std::time::Duration; - - use torrust_tracker_primitives::DurationSinceUnixEpoch; - - use crate::clock::stopped::Stopped as _; - use crate::time_extent::test::TIME_EXTENT_VAL; - use crate::time_extent::{Base, Make}; - use crate::{CurrentClock, DefaultTimeExtentMaker}; - - #[test] - fn it_should_give_a_time_extent() { - assert_eq!( - DefaultTimeExtentMaker::now_after( - &TIME_EXTENT_VAL.increment, - &Duration::from_secs(TIME_EXTENT_VAL.amount * 2) - ) - .unwrap() - .unwrap(), - TIME_EXTENT_VAL - ); - } - - #[test] - fn it_should_fail_for_zero() { - assert_eq!(DefaultTimeExtentMaker::now_after(&Base::ZERO, &Duration::ZERO), None); - - CurrentClock::local_set(&DurationSinceUnixEpoch::MAX); - assert_eq!(DefaultTimeExtentMaker::now_after(&Base::ZERO, &Duration::MAX), None); - } - - #[test] - fn it_should_fail_if_amount_exceeds_bounds() { - CurrentClock::local_set(&DurationSinceUnixEpoch::MAX); - assert_eq!( - DefaultTimeExtentMaker::now_after(&Base::from_millis(1), &Duration::ZERO) - .unwrap() - .unwrap_err(), - u64::try_from(u128::MAX).unwrap_err() - ); - } - } - mod fn_now_before { - use std::time::Duration; - - use torrust_tracker_primitives::DurationSinceUnixEpoch; - - use crate::clock::stopped::Stopped as _; - use crate::time_extent::{Base, Make, TimeExtent}; - use crate::{CurrentClock, DefaultTimeExtentMaker}; - - #[test] - fn it_should_give_a_time_extent() { - CurrentClock::local_set(&DurationSinceUnixEpoch::MAX); - - assert_eq!( - DefaultTimeExtentMaker::now_before( - &Base::from_secs(u64::from(u32::MAX)), - &Duration::from_secs(u64::from(u32::MAX)) - ) - .unwrap() - .unwrap(), - TimeExtent { - increment: Base::from_secs(u64::from(u32::MAX)), - amount: 4_294_967_296 - } - ); - } - - #[test] - fn it_should_fail_for_zero() { - assert_eq!(DefaultTimeExtentMaker::now_before(&Base::ZERO, &Duration::ZERO), None); - - assert_eq!(DefaultTimeExtentMaker::now_before(&Base::ZERO, &Duration::MAX), None); - } - - #[test] - fn it_should_fail_if_amount_exceeds_bounds() { - CurrentClock::local_set(&DurationSinceUnixEpoch::MAX); - assert_eq!( - DefaultTimeExtentMaker::now_before(&Base::from_millis(1), &Duration::ZERO) - .unwrap() - .unwrap_err(), - u64::try_from(u128::MAX).unwrap_err() - ); - } - } - } -} diff --git a/packages/configuration/src/v2_0_0/udp_tracker.rs b/packages/configuration/src/v2_0_0/udp_tracker.rs index b3d420d72..0eee87700 100644 --- a/packages/configuration/src/v2_0_0/udp_tracker.rs +++ b/packages/configuration/src/v2_0_0/udp_tracker.rs @@ -1,4 +1,5 @@ use std::net::{IpAddr, Ipv4Addr, SocketAddr}; +use std::time::Duration; use serde::{Deserialize, Serialize}; @@ -10,11 +11,17 @@ pub struct UdpTracker { /// system to choose a random port, use port `0`. #[serde(default = "UdpTracker::default_bind_address")] pub bind_address: SocketAddr, + + /// The lifetime of the server-generated connection cookie, that is passed + /// the client as the `ConnectionId`. + #[serde(default = "UdpTracker::default_cookie_lifetime")] + pub cookie_lifetime: Duration, } impl Default for UdpTracker { fn default() -> Self { Self { bind_address: Self::default_bind_address(), + cookie_lifetime: Self::default_cookie_lifetime(), } } } @@ -23,4 +30,8 @@ impl UdpTracker { fn default_bind_address() -> SocketAddr { SocketAddr::new(IpAddr::V4(Ipv4Addr::new(0, 0, 0, 0)), 6969) } + + fn default_cookie_lifetime() -> Duration { + Duration::from_secs(120) + } } diff --git a/packages/test-helpers/src/configuration.rs b/packages/test-helpers/src/configuration.rs index dbd8eef9e..acedbc672 100644 --- a/packages/test-helpers/src/configuration.rs +++ b/packages/test-helpers/src/configuration.rs @@ -1,6 +1,7 @@ //! Tracker configuration factories for testing. use std::env; use std::net::{IpAddr, Ipv4Addr, Ipv6Addr, SocketAddr}; +use std::time::Duration; use torrust_tracker_configuration::{Configuration, HttpApi, HttpTracker, Threshold, UdpTracker}; @@ -47,6 +48,7 @@ pub fn ephemeral() -> Configuration { let udp_port = 0u16; config.udp_trackers = Some(vec![UdpTracker { bind_address: SocketAddr::new(IpAddr::V4(Ipv4Addr::new(127, 0, 0, 1)), udp_port), + cookie_lifetime: Duration::from_secs(120), }]); // Ephemeral socket address for HTTP tracker diff --git a/src/bootstrap/app.rs b/src/bootstrap/app.rs index 7c0cf45ac..e106f73cc 100644 --- a/src/bootstrap/app.rs +++ b/src/bootstrap/app.rs @@ -23,6 +23,7 @@ use crate::bootstrap; use crate::core::services::tracker_factory; use crate::core::Tracker; use crate::shared::crypto::ephemeral_instance_keys; +use crate::shared::crypto::keys::{self, Keeper as _}; /// It loads the configuration from the environment and builds the main domain [`Tracker`] struct. /// @@ -32,6 +33,9 @@ use crate::shared::crypto::ephemeral_instance_keys; #[must_use] #[instrument(skip())] pub fn setup() -> (Configuration, Arc) { + #[cfg(not(test))] + check_seed(); + let configuration = initialize_configuration(); if let Err(e) = configuration.validate() { @@ -45,6 +49,18 @@ pub fn setup() -> (Configuration, Arc) { (configuration, tracker) } +/// checks if the seed is the instance seed in production. +/// +/// # Panics +/// +/// It would panic if the seed is not the instance seed. +pub fn check_seed() { + let seed = keys::Current::get_seed(); + let instance = keys::Instance::get_seed(); + + assert_eq!(seed, instance, "maybe using zeroed see in production!?"); +} + /// It initializes the application with the given configuration. /// /// The configuration may be obtained from the environment (via config file or env vars). @@ -69,6 +85,12 @@ pub fn initialize_static() { // Initialize the Ephemeral Instance Random Seed lazy_static::initialize(&ephemeral_instance_keys::RANDOM_SEED); + + // Initialize the Ephemeral Instance Random Cipher + lazy_static::initialize(&ephemeral_instance_keys::RANDOM_CIPHER_BLOWFISH); + + // Initialize the Zeroed Cipher + lazy_static::initialize(&ephemeral_instance_keys::ZEROED_TEST_CIPHER_BLOWFISH); } /// It builds the domain tracker diff --git a/src/bootstrap/jobs/udp_tracker.rs b/src/bootstrap/jobs/udp_tracker.rs index ca503aa29..6aab06d4f 100644 --- a/src/bootstrap/jobs/udp_tracker.rs +++ b/src/bootstrap/jobs/udp_tracker.rs @@ -32,9 +32,10 @@ use crate::servers::udp::UDP_TRACKER_LOG_TARGET; #[instrument(skip(config, tracker, form))] pub async fn start_job(config: &UdpTracker, tracker: Arc, form: ServiceRegistrationForm) -> JoinHandle<()> { let bind_to = config.bind_address; + let cookie_lifetime = config.cookie_lifetime; let server = Server::new(Spawner::new(bind_to)) - .start(tracker, form) + .start(tracker, form, cookie_lifetime) .await .expect("it should be able to start the udp tracker"); diff --git a/src/lib.rs b/src/lib.rs index 5d7c92ae2..d7e4bc5b2 100644 --- a/src/lib.rs +++ b/src/lib.rs @@ -488,7 +488,7 @@ //! In addition to the production code documentation you can find a lot of //! examples on the integration and unit tests. -use torrust_tracker_clock::{clock, time_extent}; +use torrust_tracker_clock::clock; pub mod app; pub mod bootstrap; @@ -510,13 +510,3 @@ pub(crate) type CurrentClock = clock::Working; #[cfg(test)] #[allow(dead_code)] pub(crate) type CurrentClock = clock::Stopped; - -/// Working version, for production. -#[cfg(not(test))] -#[allow(dead_code)] -pub(crate) type DefaultTimeExtentMaker = time_extent::WorkingTimeExtentMaker; - -/// Stopped version, for testing. -#[cfg(test)] -#[allow(dead_code)] -pub(crate) type DefaultTimeExtentMaker = time_extent::StoppedTimeExtentMaker; diff --git a/src/servers/udp/connection_cookie.rs b/src/servers/udp/connection_cookie.rs index 36bf98304..31c6396e8 100644 --- a/src/servers/udp/connection_cookie.rs +++ b/src/servers/udp/connection_cookie.rs @@ -1,339 +1,305 @@ -//! Logic for generating and verifying connection IDs. +//! Module for Generating and Verifying Connection IDs (Cookies) in the UDP Tracker Protocol //! -//! The UDP tracker requires the client to connect to the server before it can -//! send any data. The server responds with a random 64-bit integer that the -//! client must use to identify itself. +//! **Overview:** //! -//! This connection ID is used to avoid spoofing attacks. The client must send -//! the connection ID in all requests to the server. The server will ignore any -//! requests that do not contain the correct connection ID. +//! In the `BitTorrent` UDP tracker protocol, clients initiate communication by obtaining a connection ID from the server. This connection ID serves as a safeguard against IP spoofing and replay attacks, ensuring that only legitimate clients can interact with the tracker. //! -//! The simplest way to implement this would be to generate a random number when -//! the client connects and store it in a hash table. However, this would -//! require the server to store a large number of connection IDs, which would be -//! a waste of memory. Instead, the server generates a connection ID based on -//! the client's IP address and the current time. This allows the server to -//! verify the connection ID without storing it. +//! To maintain a stateless server architecture, this module implements a method for generating and verifying connection IDs based on the client's fingerprint (typically derived from the client's IP address) and the time of issuance, without storing state on the server. //! -//! This module implements this method of generating connection IDs. It's the -//! most common way to generate connection IDs. The connection ID is generated -//! using a time based algorithm and it is valid for a certain amount of time -//! (usually two minutes). The connection ID is generated using the following: +//! The connection ID is an encrypted, opaque cookie held by the client. Since the same server that generates the cookie also validates it, endianness is not a concern. //! -//! ```text -//! connection ID = hash(client IP + current time slot + secret seed) -//! ``` +//! **Connection ID Generation Algorithm:** //! -//! Time slots are two minute intervals since the Unix epoch. The secret seed is -//! a random number that is generated when the server starts. And the client IP -//! is used in order generate a unique connection ID for each client. +//! 1. **Issue Time (`issue_at`):** +//! - Obtain a 64-bit floating-point number (`f64`), this number should be a normal number. //! -//! The BEP-15 recommends a two-minute time slot. +//! 2. **Fingerprint:** +//! - Use an 8-byte fingerprint unique to the client (e.g., derived from the client's IP address). //! -//! ```text -//! Timestamp (seconds from Unix epoch): -//! |------------|------------|------------|------------| -//! 0 120 240 360 480 -//! Time slots (two-minutes time extents from Unix epoch): -//! |------------|------------|------------|------------| -//! 0 1 2 3 4 -//! Peer connections: -//! Peer A |-------------------------| -//! Peer B |-------------------------| -//! Peer C |------------------| -//! Peer A connects at timestamp 120 slot 1 -> connection ID will be valid from timestamp 120 to 360 -//! Peer B connects at timestamp 240 slot 2 -> connection ID will be valid from timestamp 240 to 480 -//! Peer C connects at timestamp 180 slot 1 -> connection ID will be valid from timestamp 180 to 360 -//! ``` -//! > **NOTICE**: connection ID is always the same for a given peer -//! > (socket address) and time slot. +//! 3. **Assemble Cookie Value:** +//! - Interpret the bytes of `issue_at` as a 64-bit integer (`i64`) without altering the bit pattern. +//! - Similarly, interpret the fingerprint bytes as an `i64`. +//! - Compute the cookie value: +//! ```rust,ignore +//! let cookie_value = issue_at_i64.wrapping_add(fingerprint_i64); +//! ``` +//! - *Note:* Wrapping addition handles potential integer overflows gracefully. //! -//! > **NOTICE**: connection ID will be valid for two time extents, **not two -//! > minutes**. It'll be valid for the the current time extent and the next one. +//! 4. **Encrypt Cookie Value:** +//! - Encrypt `cookie_value` using a symmetric block cipher obtained from `Current::get_cipher()`. +//! - The encrypted `cookie_value` becomes the connection ID sent to the client. //! -//! Refer to [`Connect`](crate::servers::udp#connect) for more information about -//! the connection process. +//! **Connection ID Verification Algorithm:** //! -//! ## Advantages +//! When a client sends a request with a connection ID, the server verifies it using the following steps: //! -//! - It consumes less memory than storing a hash table of connection IDs. -//! - It's easy to implement. -//! - It's fast. +//! 1. **Decrypt Connection ID:** +//! - Decrypt the received connection ID using the same cipher to retrieve `cookie_value`. +//! - *Important:* The decryption is non-authenticated, meaning it does not verify the integrity or authenticity of the ciphertext. The decrypted `cookie_value` can be any byte sequence, including manipulated data. //! -//! ## Disadvantages +//! 2. **Recover Issue Time:** +//! - Interpret the fingerprint bytes as `i64`. +//! - Compute the issue time: +//! ```rust,ignore +//! let issue_at_i64 = cookie_value.wrapping_sub(fingerprint_i64); +//! ``` +//! - *Note:* Wrapping subtraction handles potential integer underflows gracefully. +//! - Reinterpret `issue_at_i64` bytes as an `f64` to get `issue_time`. +//! +//! 3. **Validate Issue Time:** +//! - **Handling Arbitrary `issue_time` Values:** +//! - Since the decrypted `cookie_value` may be arbitrary, `issue_time` can be any `f64` value, including special values like `NaN`, positive or negative infinity, and subnormal numbers. +//! - **Validation Steps:** +//! - **Step 1:** Check if `issue_time` is finite using `issue_time.is_finite()`. +//! - If `issue_time` is `NaN` or infinite, it is considered invalid. +//! - **Step 2:** If `issue_time` is finite, perform range checks: +//! - Verify that `min <= issue_time <= max`. +//! - If `issue_time` passes these checks, accept the connection ID; otherwise, reject it with an appropriate error. +//! +//! **Security Considerations:** +//! +//! - **Non-Authenticated Encryption:** +//! - Due to protocol constraints (an 8-byte connection ID), using an authenticated encryption algorithm is not feasible. +//! - As a result, attackers might attempt to forge or manipulate connection IDs. +//! - However, the probability of an arbitrary 64-bit value decrypting to a valid `issue_time` within the acceptable range is extremely low, effectively serving as a form of authentication. +//! +//! - **Handling Special `f64` Values:** +//! - By checking `issue_time.is_finite()`, the implementation excludes `NaN` and infinite values, ensuring that only valid, finite timestamps are considered. +//! +//! - **Probability of Successful Attack:** +//! - Given the narrow valid time window (usually around 2 minutes) compared to the vast range of `f64` values, the chance of successfully guessing a valid `issue_time` is negligible. +//! +//! **Key Points:** +//! +//! - The server maintains a stateless design, reducing resource consumption and complexity. +//! - Wrapping arithmetic ensures that the addition and subtraction of `i64` values are safe from overflow or underflow issues. +//! - The validation process is robust against malformed or malicious connection IDs due to stringent checks on the deserialized `issue_time`. +//! - The module leverages existing cryptographic primitives while acknowledging and addressing the limitations imposed by the protocol's specifications. //! -//! - It's not very flexible. The connection ID is only valid for a certain amount of time. -//! - It's not very accurate. The connection ID is valid for more than two minutes. -use std::net::SocketAddr; -use std::panic::Location; - -use aquatic_udp_protocol::ConnectionId; -use torrust_tracker_clock::time_extent::{Extent, TimeExtent}; -use zerocopy::network_endian::I64; -use zerocopy::AsBytes; - -use super::error::Error; - -pub type Cookie = [u8; 8]; - -pub type SinceUnixEpochTimeExtent = TimeExtent; - -pub const COOKIE_LIFETIME: TimeExtent = TimeExtent::from_sec(2, &60); -/// Converts a connection ID into a connection cookie. -#[must_use] -pub fn from_connection_id(connection_id: &ConnectionId) -> Cookie { - let mut cookie = [0u8; 8]; - connection_id.write_to(&mut cookie); - cookie -} +use aquatic_udp_protocol::ConnectionId as Cookie; +use cookie_builder::{assemble, decode, disassemble, encode}; +use zerocopy::AsBytes; -/// Converts a connection cookie into a connection ID. -#[must_use] -pub fn into_connection_id(connection_cookie: &Cookie) -> ConnectionId { - ConnectionId(I64::new(i64::from_be_bytes(*connection_cookie))) -} +use super::error::{self, Error}; +use crate::shared::crypto::keys::CipherArrayBlowfish; /// Generates a new connection cookie. -#[must_use] -pub fn make(remote_address: &SocketAddr) -> Cookie { - let time_extent = cookie_builder::get_last_time_extent(); +/// +/// # Errors +/// +/// It would error if the supplied `issue_at` value is a zero, infinite, subnormal, or NaN. +/// +/// # Panics +/// +/// It would panic if the cookie is not exactly 8 bytes is size. +/// +pub fn make(fingerprint: u64, issue_at: f64) -> Result { + if !issue_at.is_normal() { + return Err(Error::InvalidCookieIssueTime { invalid_value: issue_at }); + } - //println!("remote_address: {remote_address:?}, time_extent: {time_extent:?}, cookie: {cookie:?}"); - cookie_builder::build(remote_address, &time_extent) + let cookie = assemble(fingerprint, issue_at); + let cookie = encode(cookie); + + // using `read_from` as the array may be not correctly aligned + Ok(zerocopy::FromBytes::read_from(cookie.as_slice()).expect("it should be the same size")) } /// Checks if the supplied `connection_cookie` is valid. /// -/// # Panics +/// # Errors /// -/// It would panic if the `COOKIE_LIFETIME` constant would be an unreasonably large number. +/// It would error if the connection cookie is somehow invalid or expired. /// -/// # Errors +/// # Panics /// -/// Will return a `ServerError::InvalidConnectionId` if the supplied `connection_cookie` fails to verify. -pub fn check(remote_address: &SocketAddr, connection_cookie: &Cookie) -> Result { - // we loop backwards testing each time_extent until we find one that matches. - // (or the lifetime of time_extents is exhausted) - for offset in 0..=COOKIE_LIFETIME.amount { - let checking_time_extent = cookie_builder::get_last_time_extent().decrease(offset).unwrap(); - - let checking_cookie = cookie_builder::build(remote_address, &checking_time_extent); - //println!("remote_address: {remote_address:?}, time_extent: {checking_time_extent:?}, cookie: {checking_cookie:?}"); - - if *connection_cookie == checking_cookie { - return Ok(checking_time_extent); - } - } - Err(Error::InvalidConnectionId { - location: Location::caller(), - }) -} +/// It would panic if cookie min value is larger than the max value. +pub fn check(cookie: &Cookie, fingerprint: u64, min: f64, max: f64) -> Result { + assert!(min < max, "min is larger than max"); -mod cookie_builder { - use std::collections::hash_map::DefaultHasher; - use std::hash::{Hash, Hasher}; - use std::net::SocketAddr; - - use torrust_tracker_clock::time_extent::{Extent, Make, TimeExtent}; - - use super::{Cookie, SinceUnixEpochTimeExtent, COOKIE_LIFETIME}; - use crate::shared::crypto::keys::seeds::{Current, Keeper}; - use crate::DefaultTimeExtentMaker; - - pub(super) fn get_last_time_extent() -> SinceUnixEpochTimeExtent { - DefaultTimeExtentMaker::now(&COOKIE_LIFETIME.increment) - .unwrap() - .unwrap() - .increase(COOKIE_LIFETIME.amount) - .unwrap() - } + let cookie_bytes = CipherArrayBlowfish::from_slice(cookie.0.as_bytes()); + let cookie_bytes = decode(*cookie_bytes); - pub(super) fn build(remote_address: &SocketAddr, time_extent: &TimeExtent) -> Cookie { - let seed = Current::get_seed(); + let issue_time = disassemble(fingerprint, cookie_bytes); - let mut hasher = DefaultHasher::new(); - - remote_address.hash(&mut hasher); - time_extent.hash(&mut hasher); - seed.hash(&mut hasher); + if !issue_time.is_normal() { + return Err(Error::InvalidConnectionId { + bad_id: error::ConnectionCookie(*cookie), + }); + } - hasher.finish().to_le_bytes() + if issue_time < min { + return Err(Error::ConnectionIdExpired { + bad_age: issue_time, + min_age: min, + }); } -} -#[cfg(test)] -mod tests { - use std::net::{IpAddr, Ipv4Addr, Ipv6Addr, SocketAddr}; + if issue_time > max { + return Err(Error::ConnectionIdFromFuture { + future_age: issue_time, + max_age: max, + }); + } - use torrust_tracker_clock::clock::stopped::Stopped as _; - use torrust_tracker_clock::clock::{self}; - use torrust_tracker_clock::time_extent::{self, Extent}; + Ok(issue_time) +} - use super::cookie_builder::{self}; - use crate::servers::udp::connection_cookie::{check, make, Cookie, COOKIE_LIFETIME}; +mod cookie_builder { + use cipher::{BlockDecrypt, BlockEncrypt}; + use tracing::{instrument, Level}; + use zerocopy::{byteorder, AsBytes as _, NativeEndian}; - // #![feature(const_socketaddr)] - // const REMOTE_ADDRESS_IPV4_ZERO: SocketAddr = SocketAddr::new(IpAddr::V4(Ipv4Addr::UNSPECIFIED), 0); + pub type CookiePlainText = CipherArrayBlowfish; + pub type CookieCipherText = CipherArrayBlowfish; - #[test] - fn it_should_make_a_connection_cookie() { - // Note: This constant may need to be updated in the future as the hash - // is not guaranteed to to be stable between versions. - const ID_COOKIE_OLD_HASHER: Cookie = [41, 166, 45, 246, 249, 24, 108, 203]; - const ID_COOKIE_NEW_HASHER: Cookie = [185, 122, 191, 238, 6, 43, 2, 198]; + use crate::shared::crypto::keys::{CipherArrayBlowfish, Current, Keeper}; - clock::Stopped::local_set_to_unix_epoch(); + #[instrument(ret(level = Level::TRACE))] + pub(super) fn assemble(fingerprint: u64, issue_at: f64) -> CookiePlainText { + let issue_at: byteorder::I64 = + *zerocopy::FromBytes::ref_from(&issue_at.to_ne_bytes()).expect("it should be aligned"); + let fingerprint: byteorder::I64 = + *zerocopy::FromBytes::ref_from(&fingerprint.to_ne_bytes()).expect("it should be aligned"); - let cookie = make(&SocketAddr::new(IpAddr::V4(Ipv4Addr::UNSPECIFIED), 0)); + let cookie = issue_at.get().wrapping_add(fingerprint.get()); + let cookie: byteorder::I64 = + *zerocopy::FromBytes::ref_from(&cookie.to_ne_bytes()).expect("it should be aligned"); - assert!(cookie == ID_COOKIE_OLD_HASHER || cookie == ID_COOKIE_NEW_HASHER); + *CipherArrayBlowfish::from_slice(cookie.as_bytes()) } - #[test] - fn it_should_make_the_same_connection_cookie_for_the_same_input_data() { - let remote_address = SocketAddr::new(IpAddr::V4(Ipv4Addr::UNSPECIFIED), 0); - let time_extent_zero = time_extent::ZERO; + #[instrument(ret(level = Level::TRACE))] + pub(super) fn disassemble(fingerprint: u64, cookie: CookiePlainText) -> f64 { + let fingerprint: byteorder::I64 = + *zerocopy::FromBytes::ref_from(&fingerprint.to_ne_bytes()).expect("it should be aligned"); - let cookie = cookie_builder::build(&remote_address, &time_extent_zero); - let cookie_2 = cookie_builder::build(&remote_address, &time_extent_zero); + // the array may be not aligned, so we read instead of reference. + let cookie: byteorder::I64 = + zerocopy::FromBytes::read_from(cookie.as_bytes()).expect("it should be the same size"); - println!("remote_address: {remote_address:?}, time_extent: {time_extent_zero:?}, cookie: {cookie:?}"); - println!("remote_address: {remote_address:?}, time_extent: {time_extent_zero:?}, cookie: {cookie_2:?}"); + let issue_time_bytes = cookie.get().wrapping_sub(fingerprint.get()).to_ne_bytes(); - //remote_address: 127.0.0.1:8080, time_extent: TimeExtent { increment: 0ns, amount: 0 }, cookie: [212, 9, 204, 223, 176, 190, 150, 153] - //remote_address: 127.0.0.1:8080, time_extent: TimeExtent { increment: 0ns, amount: 0 }, cookie: [212, 9, 204, 223, 176, 190, 150, 153] + let issue_time: byteorder::F64 = + *zerocopy::FromBytes::ref_from(&issue_time_bytes).expect("it should be aligned"); - assert_eq!(cookie, cookie_2); + issue_time.get() } - #[test] - fn it_should_make_the_different_connection_cookie_for_different_ip() { - let remote_address = SocketAddr::new(IpAddr::V4(Ipv4Addr::UNSPECIFIED), 0); - let remote_address_2 = SocketAddr::new(IpAddr::V4(Ipv4Addr::BROADCAST), 0); - let time_extent_zero = time_extent::ZERO; - - let cookie = cookie_builder::build(&remote_address, &time_extent_zero); - let cookie_2 = cookie_builder::build(&remote_address_2, &time_extent_zero); - - println!("remote_address: {remote_address:?}, time_extent: {time_extent_zero:?}, cookie: {cookie:?}"); - println!("remote_address: {remote_address_2:?}, time_extent: {time_extent_zero:?}, cookie: {cookie_2:?}"); + #[instrument(ret(level = Level::TRACE))] + pub(super) fn encode(mut cookie: CookiePlainText) -> CookieCipherText { + let cipher = Current::get_cipher_blowfish(); - //remote_address: 0.0.0.0:0, time_extent: TimeExtent { increment: 0ns, amount: 0 }, cookie: [151, 130, 30, 157, 190, 41, 179, 135] - //remote_address: 255.255.255.255:0, time_extent: TimeExtent { increment: 0ns, amount: 0 }, cookie: [217, 87, 239, 178, 182, 126, 66, 166] + cipher.encrypt_block(&mut cookie); - assert_ne!(cookie, cookie_2); + cookie } - #[test] - fn it_should_make_the_different_connection_cookie_for_different_ip_version() { - let remote_address = SocketAddr::new(IpAddr::V4(Ipv4Addr::UNSPECIFIED), 0); - let remote_address_2 = SocketAddr::new(IpAddr::V6(Ipv6Addr::UNSPECIFIED), 0); - let time_extent_zero = time_extent::ZERO; - - let cookie = cookie_builder::build(&remote_address, &time_extent_zero); - let cookie_2 = cookie_builder::build(&remote_address_2, &time_extent_zero); + #[instrument(ret(level = Level::TRACE))] + pub(super) fn decode(mut cookie: CookieCipherText) -> CookiePlainText { + let cipher = Current::get_cipher_blowfish(); - println!("remote_address: {remote_address:?}, time_extent: {time_extent_zero:?}, cookie: {cookie:?}"); - println!("remote_address: {remote_address_2:?}, time_extent: {time_extent_zero:?}, cookie: {cookie_2:?}"); + cipher.decrypt_block(&mut cookie); - //remote_address: 0.0.0.0:0, time_extent: TimeExtent { increment: 0ns, amount: 0 }, cookie: [151, 130, 30, 157, 190, 41, 179, 135] - //remote_address: [::]:0, time_extent: TimeExtent { increment: 0ns, amount: 0 }, cookie: [99, 119, 230, 177, 20, 220, 163, 187] - - assert_ne!(cookie, cookie_2); + cookie } +} - #[test] - fn it_should_make_the_different_connection_cookie_for_different_socket() { - let remote_address = SocketAddr::new(IpAddr::V4(Ipv4Addr::UNSPECIFIED), 0); - let remote_address_2 = SocketAddr::new(IpAddr::V4(Ipv4Addr::UNSPECIFIED), 1); - let time_extent_zero = time_extent::ZERO; - - let cookie = cookie_builder::build(&remote_address, &time_extent_zero); - let cookie_2 = cookie_builder::build(&remote_address_2, &time_extent_zero); +#[cfg(test)] +mod tests { - println!("remote_address: {remote_address:?}, time_extent: {time_extent_zero:?}, cookie: {cookie:?}"); - println!("remote_address: {remote_address_2:?}, time_extent: {time_extent_zero:?}, cookie: {cookie_2:?}"); + use super::*; - //remote_address: 0.0.0.0:0, time_extent: TimeExtent { increment: 0ns, amount: 0 }, cookie: [151, 130, 30, 157, 190, 41, 179, 135] - //remote_address: 0.0.0.0:1, time_extent: TimeExtent { increment: 0ns, amount: 0 }, cookie: [38, 8, 0, 102, 92, 170, 220, 11] + #[test] + fn it_should_make_a_connection_cookie() { + let fingerprint = 1_000_000; + let issue_at = 1000.0; + let cookie = make(fingerprint, issue_at).unwrap().0.get(); - assert_ne!(cookie, cookie_2); + // Expected connection ID derived through experimentation + assert_eq!(cookie.to_le_bytes(), [10, 130, 175, 211, 244, 253, 230, 210]); } #[test] - fn it_should_make_the_different_connection_cookie_for_different_time_extents() { - let remote_address = SocketAddr::new(IpAddr::V4(Ipv4Addr::UNSPECIFIED), 0); - let time_extent_zero = time_extent::ZERO; - let time_extent_max = time_extent::MAX; - - let cookie = cookie_builder::build(&remote_address, &time_extent_zero); - let cookie_2 = cookie_builder::build(&remote_address, &time_extent_max); + fn it_should_create_same_cookie_for_same_input() { + let fingerprint = 1_000_000; + let issue_at = 1000.0; + let cookie1 = make(fingerprint, issue_at).unwrap(); + let cookie2 = make(fingerprint, issue_at).unwrap(); - println!("remote_address: {remote_address:?}, time_extent: {time_extent_zero:?}, cookie: {cookie:?}"); - println!("remote_address: {remote_address:?}, time_extent: {time_extent_max:?}, cookie: {cookie_2:?}"); - - //remote_address: 0.0.0.0:0, time_extent: TimeExtent { increment: 0ns, amount: 0 }, cookie: [151, 130, 30, 157, 190, 41, 179, 135] - //remote_address: 0.0.0.0:0, time_extent: TimeExtent { increment: 18446744073709551615.999999999s, amount: 18446744073709551615 }, cookie: [87, 111, 109, 125, 182, 206, 3, 201] - - assert_ne!(cookie, cookie_2); + assert_eq!(cookie1, cookie2); } #[test] - fn it_should_make_different_cookies_for_the_next_time_extent() { - let remote_address = SocketAddr::new(IpAddr::V4(Ipv4Addr::UNSPECIFIED), 0); - - let cookie = make(&remote_address); - - clock::Stopped::local_add(&COOKIE_LIFETIME.increment).unwrap(); - - let cookie_next = make(&remote_address); - - assert_ne!(cookie, cookie_next); + fn it_should_create_different_cookies_for_different_fingerprints() { + let fingerprint1 = 1_000_000; + let fingerprint2 = 2_000_000; + let issue_at = 1000.0; + let cookie1 = make(fingerprint1, issue_at).unwrap(); + let cookie2 = make(fingerprint2, issue_at).unwrap(); + + assert_ne!(cookie1, cookie2); } #[test] - fn it_should_be_valid_for_this_time_extent() { - let remote_address = SocketAddr::new(IpAddr::V4(Ipv4Addr::UNSPECIFIED), 0); - - let cookie = make(&remote_address); - - check(&remote_address, &cookie).unwrap(); + fn it_should_create_different_cookies_for_different_issue_times() { + let fingerprint = 1_000_000; + let issue_at1 = 1000.0; + let issue_at2 = 2000.0; + let cookie1 = make(fingerprint, issue_at1).unwrap(); + let cookie2 = make(fingerprint, issue_at2).unwrap(); + + assert_ne!(cookie1, cookie2); } #[test] - fn it_should_be_valid_for_the_next_time_extent() { - let remote_address = SocketAddr::new(IpAddr::V4(Ipv4Addr::UNSPECIFIED), 0); + fn it_should_validate_a_valid_cookie() { + let fingerprint = 1_000_000; + let issue_at = 1_000_000_000_f64; + let cookie = make(fingerprint, issue_at).unwrap(); - let cookie = make(&remote_address); + let min = issue_at - 10.0; + let max = issue_at + 10.0; - clock::Stopped::local_add(&COOKIE_LIFETIME.increment).unwrap(); + let result = check(&cookie, fingerprint, min, max).unwrap(); - check(&remote_address, &cookie).unwrap(); + // we should have exactly the same bytes returned + assert_eq!(result.to_ne_bytes(), issue_at.to_ne_bytes()); } #[test] - fn it_should_be_valid_for_the_last_time_extent() { - let remote_address = SocketAddr::new(IpAddr::V4(Ipv4Addr::UNSPECIFIED), 0); + fn it_should_reject_an_expired_cookie() { + let fingerprint = 1_000_000; + let issue_at = 1_000_000_000_f64; + let cookie = make(fingerprint, issue_at).unwrap(); - clock::Stopped::local_set_to_unix_epoch(); + let min = issue_at + 10.0; + let max = issue_at + 20.0; - let cookie = make(&remote_address); + let result = check(&cookie, fingerprint, min, max).unwrap_err(); - clock::Stopped::local_set(&COOKIE_LIFETIME.total().unwrap().unwrap()); - - check(&remote_address, &cookie).unwrap(); + match result { + Error::ConnectionIdExpired { .. } => {} // Expected error + _ => panic!("Expected ConnectionIdExpired error"), + } } #[test] - #[should_panic = "InvalidConnectionId"] - fn it_should_be_not_valid_after_their_last_time_extent() { - let remote_address = SocketAddr::new(IpAddr::V4(Ipv4Addr::UNSPECIFIED), 0); + fn it_should_reject_a_cookie_from_the_future() { + let fingerprint = 1_000_000; + let issue_at = 1_000_000_000_f64; - let cookie = make(&remote_address); + let cookie = make(fingerprint, issue_at).unwrap(); - clock::Stopped::local_set(&COOKIE_LIFETIME.total_next().unwrap().unwrap()); + let min = issue_at - 20.0; + let max = issue_at - 10.0; - check(&remote_address, &cookie).unwrap(); + let result = check(&cookie, fingerprint, min, max).unwrap_err(); + + match result { + Error::ConnectionIdFromFuture { .. } => {} // Expected error + _ => panic!("Expected ConnectionIdFromFuture error"), + } } } diff --git a/src/servers/udp/error.rs b/src/servers/udp/error.rs index 315c9d1cf..8f30b0138 100644 --- a/src/servers/udp/error.rs +++ b/src/servers/udp/error.rs @@ -1,12 +1,30 @@ //! Error types for the UDP server. use std::panic::Location; +use aquatic_udp_protocol::ConnectionId; +use derive_more::derive::Display; use thiserror::Error; use torrust_tracker_located_error::LocatedError; +#[derive(Display, Debug)] +#[display(":?")] +pub struct ConnectionCookie(pub ConnectionId); + /// Error returned by the UDP server. #[derive(Error, Debug)] pub enum Error { + #[error("the issue time should be a normal floating point number")] + InvalidCookieIssueTime { invalid_value: f64 }, + + #[error("connection id was decoded, but could not be understood")] + InvalidConnectionId { bad_id: ConnectionCookie }, + + #[error("connection id was decoded, but was expired (too old)")] + ConnectionIdExpired { bad_age: f64, min_age: f64 }, + + #[error("connection id was decoded, but was invalid (from future)")] + ConnectionIdFromFuture { future_age: f64, max_age: f64 }, + /// Error returned when the domain tracker returns an error. #[error("tracker server error: {source}")] TrackerError { @@ -20,10 +38,6 @@ pub enum Error { message: String, }, - /// Error returned when the connection id could not be verified. - #[error("connection id could not be verified")] - InvalidConnectionId { location: &'static Location<'static> }, - /// Error returned when the request is invalid. #[error("bad request: {source}")] BadRequest { diff --git a/src/servers/udp/handlers.rs b/src/servers/udp/handlers.rs index 6af634c32..ba75ac3c6 100644 --- a/src/servers/udp/handlers.rs +++ b/src/servers/udp/handlers.rs @@ -1,5 +1,6 @@ //! Handlers for the UDP server. use std::fmt; +use std::hash::{DefaultHasher, Hash, Hasher as _}; use std::net::{IpAddr, SocketAddr}; use std::panic::Location; use std::sync::Arc; @@ -16,7 +17,7 @@ use tracing::{instrument, Level}; use uuid::Uuid; use zerocopy::network_endian::I32; -use super::connection_cookie::{check, from_connection_id, into_connection_id, make}; +use super::connection_cookie::{check, make}; use super::RawRequest; use crate::core::{statistics, PeersWanted, ScrapeData, Tracker}; use crate::servers::udp::error::Error; @@ -33,7 +34,14 @@ use crate::shared::bit_torrent::common::MAX_SCRAPE_TORRENTS; /// /// It will return an `Error` response if the request is invalid. #[instrument(skip(udp_request, tracker, local_addr), ret(level = Level::TRACE))] -pub(crate) async fn handle_packet(udp_request: RawRequest, tracker: &Tracker, local_addr: SocketAddr) -> Response { +pub(crate) async fn handle_packet( + udp_request: RawRequest, + tracker: &Tracker, + local_addr: SocketAddr, + cookie_issue_time: f64, + cookie_expiry_time: f64, + cookie_tolerance_max_time: f64, +) -> Response { tracing::debug!("Handling Packets: {udp_request:?}"); let start_time = Instant::now(); @@ -55,7 +63,16 @@ pub(crate) async fn handle_packet(udp_request: RawRequest, tracker: &Tracker, lo Request::Scrape(scrape_request) => scrape_request.transaction_id, }; - let response = match handle_request(request, udp_request.from, tracker).await { + let response = match handle_request( + request, + udp_request.from, + tracker, + cookie_issue_time, + cookie_expiry_time, + cookie_tolerance_max_time, + ) + .await + { Ok(response) => response, Err(e) => handle_error(&e, transaction_id), }; @@ -89,12 +106,28 @@ pub(crate) async fn handle_packet(udp_request: RawRequest, tracker: &Tracker, lo /// /// If a error happens in the `handle_request` function, it will just return the `ServerError`. #[instrument(skip(request, remote_addr, tracker))] -pub async fn handle_request(request: Request, remote_addr: SocketAddr, tracker: &Tracker) -> Result { +pub async fn handle_request( + request: Request, + remote_addr: SocketAddr, + tracker: &Tracker, + cookie_issue_time: f64, + cookie_expiry_time: f64, + cookie_tolerance_max_time: f64, +) -> Result { tracing::trace!("handle request"); match request { - Request::Connect(connect_request) => handle_connect(remote_addr, &connect_request, tracker).await, - Request::Announce(announce_request) => handle_announce(remote_addr, &announce_request, tracker).await, + Request::Connect(connect_request) => handle_connect(remote_addr, &connect_request, tracker, cookie_issue_time).await, + Request::Announce(announce_request) => { + handle_announce( + remote_addr, + &announce_request, + tracker, + cookie_expiry_time, + cookie_tolerance_max_time, + ) + .await + } Request::Scrape(scrape_request) => handle_scrape(remote_addr, &scrape_request, tracker).await, } } @@ -106,11 +139,22 @@ pub async fn handle_request(request: Request, remote_addr: SocketAddr, tracker: /// /// This function does not ever return an error. #[instrument(skip(tracker), err, ret(level = Level::TRACE))] -pub async fn handle_connect(remote_addr: SocketAddr, request: &ConnectRequest, tracker: &Tracker) -> Result { +pub async fn handle_connect( + remote_addr: SocketAddr, + request: &ConnectRequest, + tracker: &Tracker, + cookie_issue_time: f64, +) -> Result { tracing::trace!("handle connect"); - let connection_cookie = make(&remote_addr); - let connection_id = into_connection_id(&connection_cookie); + let connection_id = make( + { + let mut state = DefaultHasher::new(); + remote_addr.hash(&mut state); + state.finish() + }, + cookie_issue_time, + )?; let response = ConnectResponse { transaction_id: request.transaction_id, @@ -141,6 +185,8 @@ pub async fn handle_announce( remote_addr: SocketAddr, announce_request: &AnnounceRequest, tracker: &Tracker, + cookie_expiry_time: f64, + cookie_tolerance_max_time: f64, ) -> Result { tracing::trace!("handle announce"); @@ -151,7 +197,16 @@ pub async fn handle_announce( }); } - check(&remote_addr, &from_connection_id(&announce_request.connection_id))?; + check( + &announce_request.connection_id, + { + let mut state = DefaultHasher::new(); + remote_addr.hash(&mut state); + state.finish() + }, + cookie_expiry_time, + cookie_tolerance_max_time, + )?; let info_hash = announce_request.info_hash.into(); let remote_client_ip = remote_addr.ip(); @@ -313,6 +368,7 @@ impl fmt::Display for RequestId { #[cfg(test)] mod tests { + use std::hash::{DefaultHasher, Hash as _, Hasher as _}; use std::net::{IpAddr, Ipv4Addr, Ipv6Addr, SocketAddr}; use std::sync::Arc; @@ -350,14 +406,28 @@ mod tests { tracker_factory(configuration).into() } + fn make_remote_addr_fingerprint(remote_addr: &SocketAddr) -> u64 { + let mut state = DefaultHasher::new(); + remote_addr.hash(&mut state); + state.finish() + } + fn sample_ipv4_remote_addr() -> SocketAddr { sample_ipv4_socket_address() } + fn sample_ipv4_remote_addr_fingerprint() -> u64 { + make_remote_addr_fingerprint(&sample_ipv4_socket_address()) + } + fn sample_ipv6_remote_addr() -> SocketAddr { sample_ipv6_socket_address() } + fn sample_ipv6_remote_addr_fingerprint() -> u64 { + make_remote_addr_fingerprint(&sample_ipv6_socket_address()) + } + fn sample_ipv4_socket_address() -> SocketAddr { SocketAddr::new(IpAddr::V4(Ipv4Addr::new(127, 0, 0, 1)), 8080) } @@ -366,6 +436,18 @@ mod tests { SocketAddr::new(IpAddr::V6(Ipv6Addr::new(0, 0, 0, 0, 0, 0, 0, 1)), 8080) } + fn sample_issue_time() -> f64 { + 1_000_000_000_f64 + } + + fn sample_expiry_time() -> f64 { + sample_issue_time() - 10.0 + } + + fn tolerance_max_time() -> f64 { + sample_issue_time() + 10.0 + } + #[derive(Debug, Default)] pub struct TorrentPeerBuilder { peer: peer::Peer, @@ -438,9 +520,12 @@ mod tests { use super::{sample_ipv4_socket_address, sample_ipv6_remote_addr, tracker_configuration}; use crate::core::{self, statistics}; - use crate::servers::udp::connection_cookie::{into_connection_id, make}; + use crate::servers::udp::connection_cookie::make; use crate::servers::udp::handlers::handle_connect; - use crate::servers::udp::handlers::tests::{public_tracker, sample_ipv4_remote_addr}; + use crate::servers::udp::handlers::tests::{ + public_tracker, sample_ipv4_remote_addr, sample_ipv4_remote_addr_fingerprint, sample_ipv6_remote_addr_fingerprint, + sample_issue_time, + }; fn sample_connect_request() -> ConnectRequest { ConnectRequest { @@ -454,14 +539,14 @@ mod tests { transaction_id: TransactionId(0i32.into()), }; - let response = handle_connect(sample_ipv4_remote_addr(), &request, &public_tracker()) + let response = handle_connect(sample_ipv4_remote_addr(), &request, &public_tracker(), sample_issue_time()) .await .unwrap(); assert_eq!( response, Response::Connect(ConnectResponse { - connection_id: into_connection_id(&make(&sample_ipv4_remote_addr())), + connection_id: make(sample_ipv4_remote_addr_fingerprint(), sample_issue_time()).unwrap(), transaction_id: request.transaction_id }) ); @@ -473,14 +558,33 @@ mod tests { transaction_id: TransactionId(0i32.into()), }; - let response = handle_connect(sample_ipv4_remote_addr(), &request, &public_tracker()) + let response = handle_connect(sample_ipv4_remote_addr(), &request, &public_tracker(), sample_issue_time()) + .await + .unwrap(); + + assert_eq!( + response, + Response::Connect(ConnectResponse { + connection_id: make(sample_ipv4_remote_addr_fingerprint(), sample_issue_time()).unwrap(), + transaction_id: request.transaction_id + }) + ); + } + + #[tokio::test] + async fn a_connect_response_should_contain_a_new_connection_id_ipv6() { + let request = ConnectRequest { + transaction_id: TransactionId(0i32.into()), + }; + + let response = handle_connect(sample_ipv6_remote_addr(), &request, &public_tracker(), sample_issue_time()) .await .unwrap(); assert_eq!( response, Response::Connect(ConnectResponse { - connection_id: into_connection_id(&make(&sample_ipv4_remote_addr())), + connection_id: make(sample_ipv6_remote_addr_fingerprint(), sample_issue_time()).unwrap(), transaction_id: request.transaction_id }) ); @@ -506,9 +610,14 @@ mod tests { ) .unwrap(), ); - handle_connect(client_socket_address, &sample_connect_request(), &torrent_tracker) - .await - .unwrap(); + handle_connect( + client_socket_address, + &sample_connect_request(), + &torrent_tracker, + sample_issue_time(), + ) + .await + .unwrap(); } #[tokio::test] @@ -529,9 +638,14 @@ mod tests { ) .unwrap(), ); - handle_connect(sample_ipv6_remote_addr(), &sample_connect_request(), &torrent_tracker) - .await - .unwrap(); + handle_connect( + sample_ipv6_remote_addr(), + &sample_connect_request(), + &torrent_tracker, + sample_issue_time(), + ) + .await + .unwrap(); } } @@ -545,8 +659,8 @@ mod tests { PeerId as AquaticPeerId, PeerKey, Port, TransactionId, }; - use crate::servers::udp::connection_cookie::{into_connection_id, make}; - use crate::servers::udp::handlers::tests::sample_ipv4_remote_addr; + use super::{sample_ipv4_remote_addr_fingerprint, sample_issue_time}; + use crate::servers::udp::connection_cookie::make; struct AnnounceRequestBuilder { request: AnnounceRequest, @@ -559,7 +673,7 @@ mod tests { let info_hash_aquatic = aquatic_udp_protocol::InfoHash([0u8; 20]); let default_request = AnnounceRequest { - connection_id: into_connection_id(&make(&sample_ipv4_remote_addr())), + connection_id: make(sample_ipv4_remote_addr_fingerprint(), sample_issue_time()).unwrap(), action_placeholder: AnnounceActionPlaceholder::default(), transaction_id: TransactionId(0i32.into()), info_hash: info_hash_aquatic, @@ -621,10 +735,11 @@ mod tests { use mockall::predicate::eq; use crate::core::{self, statistics}; - use crate::servers::udp::connection_cookie::{into_connection_id, make}; + use crate::servers::udp::connection_cookie::make; use crate::servers::udp::handlers::tests::announce_request::AnnounceRequestBuilder; use crate::servers::udp::handlers::tests::{ - public_tracker, sample_ipv4_socket_address, tracker_configuration, TorrentPeerBuilder, + make_remote_addr_fingerprint, public_tracker, sample_expiry_time, sample_ipv4_socket_address, sample_issue_time, + tolerance_max_time, tracker_configuration, TorrentPeerBuilder, }; use crate::servers::udp::handlers::{handle_announce, AnnounceResponseFixedData}; @@ -640,14 +755,16 @@ mod tests { let remote_addr = SocketAddr::new(IpAddr::V4(client_ip), client_port); let request = AnnounceRequestBuilder::default() - .with_connection_id(into_connection_id(&make(&remote_addr))) + .with_connection_id(make(make_remote_addr_fingerprint(&remote_addr), sample_issue_time()).unwrap()) .with_info_hash(info_hash) .with_peer_id(peer_id) .with_ip_address(client_ip) .with_port(client_port) .into(); - handle_announce(remote_addr, &request, &tracker).await.unwrap(); + handle_announce(remote_addr, &request, &tracker, sample_expiry_time(), tolerance_max_time()) + .await + .unwrap(); let peers = tracker.get_torrent_peers(&info_hash.0.into()); @@ -664,10 +781,18 @@ mod tests { let remote_addr = SocketAddr::new(IpAddr::V4(Ipv4Addr::new(126, 0, 0, 1)), 8080); let request = AnnounceRequestBuilder::default() - .with_connection_id(into_connection_id(&make(&remote_addr))) + .with_connection_id(make(make_remote_addr_fingerprint(&remote_addr), sample_issue_time()).unwrap()) .into(); - let response = handle_announce(remote_addr, &request, &public_tracker()).await.unwrap(); + let response = handle_announce( + remote_addr, + &request, + &public_tracker(), + sample_expiry_time(), + tolerance_max_time(), + ) + .await + .unwrap(); let empty_peer_vector: Vec> = vec![]; assert_eq!( @@ -703,14 +828,16 @@ mod tests { let remote_addr = SocketAddr::new(IpAddr::V4(remote_client_ip), remote_client_port); let request = AnnounceRequestBuilder::default() - .with_connection_id(into_connection_id(&make(&remote_addr))) + .with_connection_id(make(make_remote_addr_fingerprint(&remote_addr), sample_issue_time()).unwrap()) .with_info_hash(info_hash) .with_peer_id(peer_id) .with_ip_address(peer_address) .with_port(client_port) .into(); - handle_announce(remote_addr, &request, &tracker).await.unwrap(); + handle_announce(remote_addr, &request, &tracker, sample_expiry_time(), tolerance_max_time()) + .await + .unwrap(); let peers = tracker.get_torrent_peers(&info_hash.0.into()); @@ -736,10 +863,12 @@ mod tests { async fn announce_a_new_peer_using_ipv4(tracker: Arc) -> Response { let remote_addr = SocketAddr::new(IpAddr::V4(Ipv4Addr::new(126, 0, 0, 1)), 8080); let request = AnnounceRequestBuilder::default() - .with_connection_id(into_connection_id(&make(&remote_addr))) + .with_connection_id(make(make_remote_addr_fingerprint(&remote_addr), sample_issue_time()).unwrap()) .into(); - handle_announce(remote_addr, &request, &tracker).await.unwrap() + handle_announce(remote_addr, &request, &tracker, sample_expiry_time(), tolerance_max_time()) + .await + .unwrap() } #[tokio::test] @@ -782,6 +911,8 @@ mod tests { sample_ipv4_socket_address(), &AnnounceRequestBuilder::default().into(), &tracker, + sample_expiry_time(), + tolerance_max_time(), ) .await .unwrap(); @@ -793,10 +924,13 @@ mod tests { use aquatic_udp_protocol::{InfoHash as AquaticInfoHash, PeerId as AquaticPeerId}; - use crate::servers::udp::connection_cookie::{into_connection_id, make}; + use crate::servers::udp::connection_cookie::make; use crate::servers::udp::handlers::handle_announce; use crate::servers::udp::handlers::tests::announce_request::AnnounceRequestBuilder; - use crate::servers::udp::handlers::tests::{public_tracker, TorrentPeerBuilder}; + use crate::servers::udp::handlers::tests::{ + make_remote_addr_fingerprint, public_tracker, sample_expiry_time, sample_issue_time, tolerance_max_time, + TorrentPeerBuilder, + }; #[tokio::test] async fn the_peer_ip_should_be_changed_to_the_external_ip_in_the_tracker_configuration_if_defined() { @@ -810,14 +944,16 @@ mod tests { let remote_addr = SocketAddr::new(IpAddr::V4(client_ip), client_port); let request = AnnounceRequestBuilder::default() - .with_connection_id(into_connection_id(&make(&remote_addr))) + .with_connection_id(make(make_remote_addr_fingerprint(&remote_addr), sample_issue_time()).unwrap()) .with_info_hash(info_hash) .with_peer_id(peer_id) .with_ip_address(client_ip) .with_port(client_port) .into(); - handle_announce(remote_addr, &request, &tracker).await.unwrap(); + handle_announce(remote_addr, &request, &tracker, sample_expiry_time(), tolerance_max_time()) + .await + .unwrap(); let peers = tracker.get_torrent_peers(&info_hash.0.into()); @@ -846,10 +982,11 @@ mod tests { use mockall::predicate::eq; use crate::core::{self, statistics}; - use crate::servers::udp::connection_cookie::{into_connection_id, make}; + use crate::servers::udp::connection_cookie::make; use crate::servers::udp::handlers::tests::announce_request::AnnounceRequestBuilder; use crate::servers::udp::handlers::tests::{ - public_tracker, sample_ipv6_remote_addr, tracker_configuration, TorrentPeerBuilder, + make_remote_addr_fingerprint, public_tracker, sample_expiry_time, sample_ipv6_remote_addr, sample_issue_time, + tolerance_max_time, tracker_configuration, TorrentPeerBuilder, }; use crate::servers::udp::handlers::{handle_announce, AnnounceResponseFixedData}; @@ -866,14 +1003,16 @@ mod tests { let remote_addr = SocketAddr::new(IpAddr::V6(client_ip_v6), client_port); let request = AnnounceRequestBuilder::default() - .with_connection_id(into_connection_id(&make(&remote_addr))) + .with_connection_id(make(make_remote_addr_fingerprint(&remote_addr), sample_issue_time()).unwrap()) .with_info_hash(info_hash) .with_peer_id(peer_id) .with_ip_address(client_ip_v4) .with_port(client_port) .into(); - handle_announce(remote_addr, &request, &tracker).await.unwrap(); + handle_announce(remote_addr, &request, &tracker, sample_expiry_time(), tolerance_max_time()) + .await + .unwrap(); let peers = tracker.get_torrent_peers(&info_hash.0.into()); @@ -893,10 +1032,18 @@ mod tests { let remote_addr = SocketAddr::new(IpAddr::V6(client_ip_v6), 8080); let request = AnnounceRequestBuilder::default() - .with_connection_id(into_connection_id(&make(&remote_addr))) + .with_connection_id(make(make_remote_addr_fingerprint(&remote_addr), sample_issue_time()).unwrap()) .into(); - let response = handle_announce(remote_addr, &request, &public_tracker()).await.unwrap(); + let response = handle_announce( + remote_addr, + &request, + &public_tracker(), + sample_expiry_time(), + tolerance_max_time(), + ) + .await + .unwrap(); let empty_peer_vector: Vec> = vec![]; assert_eq!( @@ -932,14 +1079,16 @@ mod tests { let remote_addr = SocketAddr::new(IpAddr::V6(remote_client_ip), remote_client_port); let request = AnnounceRequestBuilder::default() - .with_connection_id(into_connection_id(&make(&remote_addr))) + .with_connection_id(make(make_remote_addr_fingerprint(&remote_addr), sample_issue_time()).unwrap()) .with_info_hash(info_hash) .with_peer_id(peer_id) .with_ip_address(peer_address) .with_port(client_port) .into(); - handle_announce(remote_addr, &request, &tracker).await.unwrap(); + handle_announce(remote_addr, &request, &tracker, sample_expiry_time(), tolerance_max_time()) + .await + .unwrap(); let peers = tracker.get_torrent_peers(&info_hash.0.into()); @@ -968,10 +1117,12 @@ mod tests { let client_port = 8080; let remote_addr = SocketAddr::new(IpAddr::V6(client_ip_v6), client_port); let request = AnnounceRequestBuilder::default() - .with_connection_id(into_connection_id(&make(&remote_addr))) + .with_connection_id(make(make_remote_addr_fingerprint(&remote_addr), sample_issue_time()).unwrap()) .into(); - handle_announce(remote_addr, &request, &tracker).await.unwrap() + handle_announce(remote_addr, &request, &tracker, sample_expiry_time(), tolerance_max_time()) + .await + .unwrap() } #[tokio::test] @@ -1013,10 +1164,18 @@ mod tests { let remote_addr = sample_ipv6_remote_addr(); let announce_request = AnnounceRequestBuilder::default() - .with_connection_id(into_connection_id(&make(&remote_addr))) + .with_connection_id(make(make_remote_addr_fingerprint(&remote_addr), sample_issue_time()).unwrap()) .into(); - handle_announce(remote_addr, &announce_request, &tracker).await.unwrap(); + handle_announce( + remote_addr, + &announce_request, + &tracker, + sample_expiry_time(), + tolerance_max_time(), + ) + .await + .unwrap(); } mod from_a_loopback_ip { @@ -1027,10 +1186,13 @@ mod tests { use crate::core; use crate::core::statistics::Keeper; - use crate::servers::udp::connection_cookie::{into_connection_id, make}; + use crate::servers::udp::connection_cookie::make; use crate::servers::udp::handlers::handle_announce; use crate::servers::udp::handlers::tests::announce_request::AnnounceRequestBuilder; - use crate::servers::udp::handlers::tests::TrackerConfigurationBuilder; + use crate::servers::udp::handlers::tests::{ + make_remote_addr_fingerprint, sample_expiry_time, sample_issue_time, tolerance_max_time, + TrackerConfigurationBuilder, + }; #[tokio::test] async fn the_peer_ip_should_be_changed_to_the_external_ip_in_the_tracker_configuration() { @@ -1052,14 +1214,16 @@ mod tests { let remote_addr = SocketAddr::new(IpAddr::V6(client_ip_v6), client_port); let request = AnnounceRequestBuilder::default() - .with_connection_id(into_connection_id(&make(&remote_addr))) + .with_connection_id(make(make_remote_addr_fingerprint(&remote_addr), sample_issue_time()).unwrap()) .with_info_hash(info_hash) .with_peer_id(peer_id) .with_ip_address(client_ip_v4) .with_port(client_port) .into(); - handle_announce(remote_addr, &request, &tracker).await.unwrap(); + handle_announce(remote_addr, &request, &tracker, sample_expiry_time(), tolerance_max_time()) + .await + .unwrap(); let peers = tracker.get_torrent_peers(&info_hash.0.into()); @@ -1087,11 +1251,11 @@ mod tests { TransactionId, }; - use super::TorrentPeerBuilder; + use super::{make_remote_addr_fingerprint, TorrentPeerBuilder}; use crate::core::{self}; - use crate::servers::udp::connection_cookie::{into_connection_id, make}; + use crate::servers::udp::connection_cookie::make; use crate::servers::udp::handlers::handle_scrape; - use crate::servers::udp::handlers::tests::{public_tracker, sample_ipv4_remote_addr}; + use crate::servers::udp::handlers::tests::{public_tracker, sample_ipv4_remote_addr, sample_issue_time}; fn zeroed_torrent_statistics() -> TorrentScrapeStatistics { TorrentScrapeStatistics { @@ -1109,7 +1273,7 @@ mod tests { let info_hashes = vec![info_hash]; let request = ScrapeRequest { - connection_id: into_connection_id(&make(&remote_addr)), + connection_id: make(make_remote_addr_fingerprint(&remote_addr), sample_issue_time()).unwrap(), transaction_id: TransactionId(0i32.into()), info_hashes, }; @@ -1143,7 +1307,7 @@ mod tests { let info_hashes = vec![*info_hash]; ScrapeRequest { - connection_id: into_connection_id(&make(remote_addr)), + connection_id: make(make_remote_addr_fingerprint(remote_addr), sample_issue_time()).unwrap(), transaction_id: TransactionId::new(0i32), info_hashes, } @@ -1285,7 +1449,7 @@ mod tests { let info_hashes = vec![info_hash]; ScrapeRequest { - connection_id: into_connection_id(&make(remote_addr)), + connection_id: make(make_remote_addr_fingerprint(remote_addr), sample_issue_time()).unwrap(), transaction_id: TransactionId(0i32.into()), info_hashes, } diff --git a/src/servers/udp/server/launcher.rs b/src/servers/udp/server/launcher.rs index 7f31d7739..348446876 100644 --- a/src/servers/udp/server/launcher.rs +++ b/src/servers/udp/server/launcher.rs @@ -35,6 +35,7 @@ impl Launcher { pub async fn run_with_graceful_shutdown( tracker: Arc, bind_to: SocketAddr, + cookie_lifetime: Duration, tx_start: oneshot::Sender, rx_halt: oneshot::Receiver, ) { @@ -65,7 +66,7 @@ impl Launcher { let local_addr = local_udp_url.clone(); tokio::task::spawn(async move { tracing::debug!(target: UDP_TRACKER_LOG_TARGET, local_addr, "Udp::run_with_graceful_shutdown::task (listening...)"); - let () = Self::run_udp_server_main(receiver, tracker.clone()).await; + let () = Self::run_udp_server_main(receiver, tracker.clone(), cookie_lifetime).await; }) }; @@ -103,14 +104,16 @@ impl Launcher { } #[instrument(skip(receiver, tracker))] - async fn run_udp_server_main(mut receiver: Receiver, tracker: Arc) { + async fn run_udp_server_main(mut receiver: Receiver, tracker: Arc, cookie_lifetime: Duration) { let active_requests = &mut ActiveRequests::default(); let addr = receiver.bound_socket_address(); let local_addr = format!("udp://{addr}"); + let cookie_lifetime = cookie_lifetime.as_secs_f64(); + loop { - let processor = Processor::new(receiver.socket.clone(), tracker.clone()); + let processor = Processor::new(receiver.socket.clone(), tracker.clone(), cookie_lifetime); if let Some(req) = { tracing::trace!(target: UDP_TRACKER_LOG_TARGET, local_addr, "Udp::run_udp_server (wait for request)"); diff --git a/src/servers/udp/server/mod.rs b/src/servers/udp/server/mod.rs index d81624cb2..7067512b6 100644 --- a/src/servers/udp/server/mod.rs +++ b/src/servers/udp/server/mod.rs @@ -76,7 +76,7 @@ mod tests { let stopped = Server::new(Spawner::new(bind_to)); let started = stopped - .start(tracker, register.give_form()) + .start(tracker, register.give_form(), config.cookie_lifetime) .await .expect("it should start the server"); @@ -98,7 +98,7 @@ mod tests { let stopped = Server::new(Spawner::new(bind_to)); let started = stopped - .start(tracker, register.give_form()) + .start(tracker, register.give_form(), config.cookie_lifetime) .await .expect("it should start the server"); diff --git a/src/servers/udp/server/processor.rs b/src/servers/udp/server/processor.rs index 9fa28a44d..2ac7f27cd 100644 --- a/src/servers/udp/server/processor.rs +++ b/src/servers/udp/server/processor.rs @@ -3,26 +3,45 @@ use std::net::SocketAddr; use std::sync::Arc; use aquatic_udp_protocol::Response; +use torrust_tracker_clock::clock::Time as _; use tracing::{instrument, Level}; use super::bound_socket::BoundSocket; use crate::core::Tracker; use crate::servers::udp::{handlers, RawRequest}; +use crate::CurrentClock; pub struct Processor { socket: Arc, tracker: Arc, + cookie_lifetime: f64, } impl Processor { - pub fn new(socket: Arc, tracker: Arc) -> Self { - Self { socket, tracker } + pub fn new(socket: Arc, tracker: Arc, cookie_lifetime: f64) -> Self { + Self { + socket, + tracker, + cookie_lifetime, + } } #[instrument(skip(self, request))] pub async fn process_request(self, request: RawRequest) { + let cookie_issue_time = CurrentClock::now().as_secs_f64(); + let cookie_expiry_time = cookie_issue_time - self.cookie_lifetime - 1.0; + let cookie_tolerance_max_time = cookie_issue_time + 1.0; + let from = request.from; - let response = handlers::handle_packet(request, &self.tracker, self.socket.address()).await; + let response = handlers::handle_packet( + request, + &self.tracker, + self.socket.address(), + cookie_issue_time, + cookie_expiry_time, + cookie_tolerance_max_time, + ) + .await; self.send_response(from, response).await; } diff --git a/src/servers/udp/server/spawner.rs b/src/servers/udp/server/spawner.rs index dea293ad7..acebdcf75 100644 --- a/src/servers/udp/server/spawner.rs +++ b/src/servers/udp/server/spawner.rs @@ -1,6 +1,7 @@ //! A thin wrapper for tokio spawn to launch the UDP server launcher as a new task. use std::net::SocketAddr; use std::sync::Arc; +use std::time::Duration; use derive_more::derive::Display; use derive_more::Constructor; @@ -27,13 +28,14 @@ impl Spawner { pub fn spawn_launcher( &self, tracker: Arc, + cookie_lifetime: Duration, tx_start: oneshot::Sender, rx_halt: oneshot::Receiver, ) -> JoinHandle { let spawner = Self::new(self.bind_to); tokio::spawn(async move { - Launcher::run_with_graceful_shutdown(tracker, spawner.bind_to, tx_start, rx_halt).await; + Launcher::run_with_graceful_shutdown(tracker, spawner.bind_to, cookie_lifetime, tx_start, rx_halt).await; spawner }) } diff --git a/src/servers/udp/server/states.rs b/src/servers/udp/server/states.rs index e90c4da54..8b87c6efb 100644 --- a/src/servers/udp/server/states.rs +++ b/src/servers/udp/server/states.rs @@ -1,6 +1,7 @@ use std::fmt::Debug; use std::net::SocketAddr; use std::sync::Arc; +use std::time::Duration; use derive_more::derive::Display; use derive_more::Constructor; @@ -62,14 +63,19 @@ impl Server { /// It panics if unable to receive the bound socket address from service. /// #[instrument(skip(self, tracker, form), err, ret(Display, level = Level::INFO))] - pub async fn start(self, tracker: Arc, form: ServiceRegistrationForm) -> Result, std::io::Error> { + pub async fn start( + self, + tracker: Arc, + form: ServiceRegistrationForm, + cookie_lifetime: Duration, + ) -> Result, std::io::Error> { let (tx_start, rx_start) = tokio::sync::oneshot::channel::(); let (tx_halt, rx_halt) = tokio::sync::oneshot::channel::(); assert!(!tx_halt.is_closed(), "Halt channel for UDP tracker should be open"); // May need to wrap in a task to about a tokio bug. - let task = self.state.spawner.spawn_launcher(tracker, tx_start, rx_halt); + let task = self.state.spawner.spawn_launcher(tracker, cookie_lifetime, tx_start, rx_halt); let local_addr = rx_start.await.expect("it should be able to start the service").address; diff --git a/src/shared/crypto/ephemeral_instance_keys.rs b/src/shared/crypto/ephemeral_instance_keys.rs index 44283365a..d214b6e6a 100644 --- a/src/shared/crypto/ephemeral_instance_keys.rs +++ b/src/shared/crypto/ephemeral_instance_keys.rs @@ -2,12 +2,24 @@ //! //! They are ephemeral because they are generated at runtime when the //! application starts and are not persisted anywhere. + +use blowfish::BlowfishLE; +use cipher::generic_array::GenericArray; +use cipher::{BlockSizeUser, KeyInit}; use rand::rngs::ThreadRng; use rand::Rng; pub type Seed = [u8; 32]; +pub type CipherBlowfish = BlowfishLE; +pub type CipherArrayBlowfish = GenericArray::BlockSize>; lazy_static! { /// The random static seed. pub static ref RANDOM_SEED: Seed = Rng::gen(&mut ThreadRng::default()); + + /// The random cipher from the seed. + pub static ref RANDOM_CIPHER_BLOWFISH: CipherBlowfish = CipherBlowfish::new_from_slice(&Rng::gen::(&mut ThreadRng::default())).expect("it could not generate key"); + + /// The constant cipher for testing. + pub static ref ZEROED_TEST_CIPHER_BLOWFISH: CipherBlowfish = CipherBlowfish::new_from_slice(&[0u8; 32]).expect("it could not generate key"); } diff --git a/src/shared/crypto/keys.rs b/src/shared/crypto/keys.rs index deb70574f..60dc16660 100644 --- a/src/shared/crypto/keys.rs +++ b/src/shared/crypto/keys.rs @@ -1,110 +1,154 @@ //! This module contains logic related to cryptographic keys. -pub mod seeds { - //! This module contains logic related to cryptographic seeds. - //! - //! Specifically, it contains the logic for storing the seed and providing - //! it to other modules. - //! - //! A **seed** is a pseudo-random number that is used as a secret key for - //! cryptographic operations. - use self::detail::CURRENT_SEED; - use crate::shared::crypto::ephemeral_instance_keys::{Seed, RANDOM_SEED}; - - /// This trait is for structures that can keep and provide a seed. - pub trait Keeper { - type Seed: Sized + Default + AsMut<[u8]>; - - /// It returns a reference to the seed that is keeping. - fn get_seed() -> &'static Self::Seed; +//! +//! Specifically, it contains the logic for storing the seed and providing +//! it to other modules. +//! +//! It also provides the logic for the cipher for encryption and decryption. + +use self::detail_cipher::CURRENT_CIPHER; +use self::detail_seed::CURRENT_SEED; +pub use crate::shared::crypto::ephemeral_instance_keys::CipherArrayBlowfish; +use crate::shared::crypto::ephemeral_instance_keys::{CipherBlowfish, Seed, RANDOM_CIPHER_BLOWFISH, RANDOM_SEED}; + +/// This trait is for structures that can keep and provide a seed. +pub trait Keeper { + type Seed: Sized + Default + AsMut<[u8]>; + type Cipher: cipher::BlockCipher; + + /// It returns a reference to the seed that is keeping. + fn get_seed() -> &'static Self::Seed; + fn get_cipher_blowfish() -> &'static Self::Cipher; +} + +/// The keeper for the instance. When the application is running +/// in production, this will be the seed keeper that is used. +pub struct Instance; + +/// The keeper for the current execution. It's a facade at compilation +/// time that will either be the instance seed keeper (with a randomly +/// generated key for production) or the zeroed seed keeper. +pub struct Current; + +impl Keeper for Instance { + type Seed = Seed; + type Cipher = CipherBlowfish; + + fn get_seed() -> &'static Self::Seed { + &RANDOM_SEED } - /// The seed keeper for the instance. When the application is running - /// in production, this will be the seed keeper that is used. - pub struct Instance; + fn get_cipher_blowfish() -> &'static Self::Cipher { + &RANDOM_CIPHER_BLOWFISH + } +} - /// The seed keeper for the current execution. It's a facade at compilation - /// time that will either be the instance seed keeper (with a randomly - /// generated key for production) or the zeroed seed keeper. - pub struct Current; +impl Keeper for Current { + type Seed = Seed; + type Cipher = CipherBlowfish; - impl Keeper for Instance { - type Seed = Seed; + #[allow(clippy::needless_borrow)] + fn get_seed() -> &'static Self::Seed { + &CURRENT_SEED + } - fn get_seed() -> &'static Self::Seed { - &RANDOM_SEED - } + fn get_cipher_blowfish() -> &'static Self::Cipher { + &CURRENT_CIPHER } +} + +#[cfg(test)] +mod tests { + + use super::detail_seed::ZEROED_TEST_SEED; + use super::{Current, Instance, Keeper}; + use crate::shared::crypto::ephemeral_instance_keys::{CipherBlowfish, Seed, ZEROED_TEST_CIPHER_BLOWFISH}; - impl Keeper for Current { + pub struct ZeroedTest; + + impl Keeper for ZeroedTest { type Seed = Seed; + type Cipher = CipherBlowfish; #[allow(clippy::needless_borrow)] fn get_seed() -> &'static Self::Seed { - &CURRENT_SEED + &ZEROED_TEST_SEED + } + + fn get_cipher_blowfish() -> &'static Self::Cipher { + &ZEROED_TEST_CIPHER_BLOWFISH } } + #[test] + fn the_default_seed_and_the_zeroed_seed_should_be_the_same_when_testing() { + assert_eq!(Current::get_seed(), ZeroedTest::get_seed()); + } + + #[test] + fn the_default_seed_and_the_instance_seed_should_be_different_when_testing() { + assert_ne!(Current::get_seed(), Instance::get_seed()); + } +} + +mod detail_seed { + use crate::shared::crypto::ephemeral_instance_keys::Seed; + + #[allow(dead_code)] + pub const ZEROED_TEST_SEED: Seed = [0u8; 32]; + #[cfg(test)] - mod tests { - use super::detail::ZEROED_TEST_SEED; - use super::{Current, Instance, Keeper}; - use crate::shared::crypto::ephemeral_instance_keys::Seed; + pub use ZEROED_TEST_SEED as CURRENT_SEED; - pub struct ZeroedTestSeed; + #[cfg(not(test))] + pub use crate::shared::crypto::ephemeral_instance_keys::RANDOM_SEED as CURRENT_SEED; - impl Keeper for ZeroedTestSeed { - type Seed = Seed; + #[cfg(test)] + mod tests { + use crate::shared::crypto::ephemeral_instance_keys::RANDOM_SEED; + use crate::shared::crypto::keys::detail_seed::ZEROED_TEST_SEED; + use crate::shared::crypto::keys::CURRENT_SEED; - #[allow(clippy::needless_borrow)] - fn get_seed() -> &'static Self::Seed { - &ZEROED_TEST_SEED - } + #[test] + fn it_should_have_a_zero_test_seed() { + assert_eq!(ZEROED_TEST_SEED, [0u8; 32]); } #[test] - fn the_default_seed_and_the_zeroed_seed_should_be_the_same_when_testing() { - assert_eq!(Current::get_seed(), ZeroedTestSeed::get_seed()); + fn it_should_default_to_zeroed_seed_when_testing() { + assert_eq!(CURRENT_SEED, ZEROED_TEST_SEED); } #[test] - fn the_default_seed_and_the_instance_seed_should_be_different_when_testing() { - assert_ne!(Current::get_seed(), Instance::get_seed()); + fn it_should_have_a_large_random_seed() { + assert!(u128::from_ne_bytes((*RANDOM_SEED)[..16].try_into().unwrap()) > u128::from(u64::MAX)); + assert!(u128::from_ne_bytes((*RANDOM_SEED)[16..].try_into().unwrap()) > u128::from(u64::MAX)); } } +} - mod detail { - use crate::shared::crypto::ephemeral_instance_keys::Seed; - - #[allow(dead_code)] - pub const ZEROED_TEST_SEED: &Seed = &[0u8; 32]; - - #[cfg(test)] - pub use ZEROED_TEST_SEED as CURRENT_SEED; +mod detail_cipher { + #[allow(unused_imports)] + #[cfg(not(test))] + pub use crate::shared::crypto::ephemeral_instance_keys::RANDOM_CIPHER_BLOWFISH as CURRENT_CIPHER; + #[cfg(test)] + pub use crate::shared::crypto::ephemeral_instance_keys::ZEROED_TEST_CIPHER_BLOWFISH as CURRENT_CIPHER; - #[cfg(not(test))] - pub use crate::shared::crypto::ephemeral_instance_keys::RANDOM_SEED as CURRENT_SEED; + #[cfg(test)] + mod tests { + use cipher::BlockEncrypt; - #[cfg(test)] - mod tests { - use crate::shared::crypto::ephemeral_instance_keys::RANDOM_SEED; - use crate::shared::crypto::keys::seeds::detail::ZEROED_TEST_SEED; - use crate::shared::crypto::keys::seeds::CURRENT_SEED; + use crate::shared::crypto::ephemeral_instance_keys::{CipherArrayBlowfish, ZEROED_TEST_CIPHER_BLOWFISH}; + use crate::shared::crypto::keys::detail_cipher::CURRENT_CIPHER; - #[test] - fn it_should_have_a_zero_test_seed() { - assert_eq!(*ZEROED_TEST_SEED, [0u8; 32]); - } + #[test] + fn it_should_default_to_zeroed_seed_when_testing() { + let mut data: cipher::generic_array::GenericArray = CipherArrayBlowfish::from([0u8; 8]); + let mut data_2 = CipherArrayBlowfish::from([0u8; 8]); - #[test] - fn it_should_default_to_zeroed_seed_when_testing() { - assert_eq!(*CURRENT_SEED, *ZEROED_TEST_SEED); - } + CURRENT_CIPHER.encrypt_block(&mut data); + ZEROED_TEST_CIPHER_BLOWFISH.encrypt_block(&mut data_2); - #[test] - fn it_should_have_a_large_random_seed() { - assert!(u128::from_ne_bytes((*RANDOM_SEED)[..16].try_into().unwrap()) > u128::from(u64::MAX)); - assert!(u128::from_ne_bytes((*RANDOM_SEED)[16..].try_into().unwrap()) > u128::from(u64::MAX)); - } + assert_eq!(data, data_2); } } } diff --git a/tests/servers/udp/environment.rs b/tests/servers/udp/environment.rs index 83dc076ce..f96ba2bea 100644 --- a/tests/servers/udp/environment.rs +++ b/tests/servers/udp/environment.rs @@ -55,11 +55,16 @@ impl Environment { #[allow(dead_code)] pub async fn start(self) -> Environment { + let cookie_lifetime = self.config.cookie_lifetime; Environment { config: self.config, tracker: self.tracker.clone(), registar: self.registar.clone(), - server: self.server.start(self.tracker, self.registar.give_form()).await.unwrap(), + server: self + .server + .start(self.tracker, self.registar.give_form(), cookie_lifetime) + .await + .unwrap(), } } } From c53e2895d96b2c6c30a7d454d23f793b53fee4f1 Mon Sep 17 00:00:00 2001 From: Cameron Garnham Date: Tue, 19 Nov 2024 16:15:23 +0800 Subject: [PATCH 028/802] udp: cookie fixups as suggested by Jose --- src/bootstrap/app.rs | 2 +- src/servers/udp/connection_cookie.rs | 32 +++-- src/servers/udp/error.rs | 12 +- src/servers/udp/handlers.rs | 201 +++++++++++---------------- src/servers/udp/server/processor.rs | 11 +- 5 files changed, 110 insertions(+), 148 deletions(-) diff --git a/src/bootstrap/app.rs b/src/bootstrap/app.rs index e106f73cc..4f3425469 100644 --- a/src/bootstrap/app.rs +++ b/src/bootstrap/app.rs @@ -58,7 +58,7 @@ pub fn check_seed() { let seed = keys::Current::get_seed(); let instance = keys::Instance::get_seed(); - assert_eq!(seed, instance, "maybe using zeroed see in production!?"); + assert_eq!(seed, instance, "maybe using zeroed seed in production!?"); } /// It initializes the application with the given configuration. diff --git a/src/servers/udp/connection_cookie.rs b/src/servers/udp/connection_cookie.rs index 31c6396e8..9ed1bcdc8 100644 --- a/src/servers/udp/connection_cookie.rs +++ b/src/servers/udp/connection_cookie.rs @@ -81,7 +81,7 @@ use aquatic_udp_protocol::ConnectionId as Cookie; use cookie_builder::{assemble, decode, disassemble, encode}; use zerocopy::AsBytes; -use super::error::{self, Error}; +use super::error::Error; use crate::shared::crypto::keys::CipherArrayBlowfish; /// Generates a new connection cookie. @@ -106,6 +106,8 @@ pub fn make(fingerprint: u64, issue_at: f64) -> Result { Ok(zerocopy::FromBytes::read_from(cookie.as_slice()).expect("it should be the same size")) } +use std::ops::Range; + /// Checks if the supplied `connection_cookie` is valid. /// /// # Errors @@ -114,9 +116,9 @@ pub fn make(fingerprint: u64, issue_at: f64) -> Result { /// /// # Panics /// -/// It would panic if cookie min value is larger than the max value. -pub fn check(cookie: &Cookie, fingerprint: u64, min: f64, max: f64) -> Result { - assert!(min < max, "min is larger than max"); +/// It would panic if the range start is not smaller than it's end. +pub fn check(cookie: &Cookie, fingerprint: u64, valid_range: Range) -> Result { + assert!(valid_range.start <= valid_range.end, "range start is larger than range end"); let cookie_bytes = CipherArrayBlowfish::from_slice(cookie.0.as_bytes()); let cookie_bytes = decode(*cookie_bytes); @@ -124,22 +126,22 @@ pub fn check(cookie: &Cookie, fingerprint: u64, min: f64, max: f64) -> Result max { + if issue_time > valid_range.end { return Err(Error::ConnectionIdFromFuture { - future_age: issue_time, - max_age: max, + future_value: issue_time, + max_value: valid_range.end, }); } @@ -262,7 +264,7 @@ mod tests { let min = issue_at - 10.0; let max = issue_at + 10.0; - let result = check(&cookie, fingerprint, min, max).unwrap(); + let result = check(&cookie, fingerprint, min..max).unwrap(); // we should have exactly the same bytes returned assert_eq!(result.to_ne_bytes(), issue_at.to_ne_bytes()); @@ -277,7 +279,7 @@ mod tests { let min = issue_at + 10.0; let max = issue_at + 20.0; - let result = check(&cookie, fingerprint, min, max).unwrap_err(); + let result = check(&cookie, fingerprint, min..max).unwrap_err(); match result { Error::ConnectionIdExpired { .. } => {} // Expected error @@ -295,7 +297,7 @@ mod tests { let min = issue_at - 20.0; let max = issue_at - 10.0; - let result = check(&cookie, fingerprint, min, max).unwrap_err(); + let result = check(&cookie, fingerprint, min..max).unwrap_err(); match result { Error::ConnectionIdFromFuture { .. } => {} // Expected error diff --git a/src/servers/udp/error.rs b/src/servers/udp/error.rs index 8f30b0138..5996cae73 100644 --- a/src/servers/udp/error.rs +++ b/src/servers/udp/error.rs @@ -16,14 +16,14 @@ pub enum Error { #[error("the issue time should be a normal floating point number")] InvalidCookieIssueTime { invalid_value: f64 }, - #[error("connection id was decoded, but could not be understood")] - InvalidConnectionId { bad_id: ConnectionCookie }, + #[error("connection id did not produce a normal value")] + ConnectionIdNotNormal { not_normal_value: f64 }, - #[error("connection id was decoded, but was expired (too old)")] - ConnectionIdExpired { bad_age: f64, min_age: f64 }, + #[error("connection id produced an expired value")] + ConnectionIdExpired { expired_value: f64, min_value: f64 }, - #[error("connection id was decoded, but was invalid (from future)")] - ConnectionIdFromFuture { future_age: f64, max_age: f64 }, + #[error("connection id produces a future value")] + ConnectionIdFromFuture { future_value: f64, max_value: f64 }, /// Error returned when the domain tracker returns an error. #[error("tracker server error: {source}")] diff --git a/src/servers/udp/handlers.rs b/src/servers/udp/handlers.rs index ba75ac3c6..814ee5e9e 100644 --- a/src/servers/udp/handlers.rs +++ b/src/servers/udp/handlers.rs @@ -2,6 +2,7 @@ use std::fmt; use std::hash::{DefaultHasher, Hash, Hasher as _}; use std::net::{IpAddr, SocketAddr}; +use std::ops::Range; use std::panic::Location; use std::sync::Arc; use std::time::Instant; @@ -12,6 +13,7 @@ use aquatic_udp_protocol::{ ScrapeRequest, ScrapeResponse, TorrentScrapeStatistics, TransactionId, }; use bittorrent_primitives::info_hash::InfoHash; +use torrust_tracker_clock::clock::Time as _; use torrust_tracker_located_error::DynError; use tracing::{instrument, Level}; use uuid::Uuid; @@ -24,6 +26,26 @@ use crate::servers::udp::error::Error; use crate::servers::udp::logging::{log_bad_request, log_error_response, log_request, log_response}; use crate::servers::udp::peer_builder; use crate::shared::bit_torrent::common::MAX_SCRAPE_TORRENTS; +use crate::CurrentClock; + +#[derive(Debug)] +pub(super) struct CookieTimeValues { + pub(super) issue_time: f64, + pub(super) valid_range: Range, +} + +impl CookieTimeValues { + pub(super) fn new(cookie_lifetime: f64) -> Self { + let issue_time = CurrentClock::now().as_secs_f64(); + let expiry_time = issue_time - cookie_lifetime - 1.0; + let tolerance_max_time = issue_time + 1.0; + + Self { + issue_time, + valid_range: expiry_time..tolerance_max_time, + } + } +} /// It handles the incoming UDP packets. /// @@ -38,9 +60,7 @@ pub(crate) async fn handle_packet( udp_request: RawRequest, tracker: &Tracker, local_addr: SocketAddr, - cookie_issue_time: f64, - cookie_expiry_time: f64, - cookie_tolerance_max_time: f64, + cookie_time_values: CookieTimeValues, ) -> Response { tracing::debug!("Handling Packets: {udp_request:?}"); @@ -63,16 +83,7 @@ pub(crate) async fn handle_packet( Request::Scrape(scrape_request) => scrape_request.transaction_id, }; - let response = match handle_request( - request, - udp_request.from, - tracker, - cookie_issue_time, - cookie_expiry_time, - cookie_tolerance_max_time, - ) - .await - { + let response = match handle_request(request, udp_request.from, tracker, cookie_time_values).await { Ok(response) => response, Err(e) => handle_error(&e, transaction_id), }; @@ -110,23 +121,16 @@ pub async fn handle_request( request: Request, remote_addr: SocketAddr, tracker: &Tracker, - cookie_issue_time: f64, - cookie_expiry_time: f64, - cookie_tolerance_max_time: f64, + cookie_time_values: CookieTimeValues, ) -> Result { tracing::trace!("handle request"); match request { - Request::Connect(connect_request) => handle_connect(remote_addr, &connect_request, tracker, cookie_issue_time).await, + Request::Connect(connect_request) => { + handle_connect(remote_addr, &connect_request, tracker, cookie_time_values.issue_time).await + } Request::Announce(announce_request) => { - handle_announce( - remote_addr, - &announce_request, - tracker, - cookie_expiry_time, - cookie_tolerance_max_time, - ) - .await + handle_announce(remote_addr, &announce_request, tracker, cookie_time_values.valid_range).await } Request::Scrape(scrape_request) => handle_scrape(remote_addr, &scrape_request, tracker).await, } @@ -147,14 +151,7 @@ pub async fn handle_connect( ) -> Result { tracing::trace!("handle connect"); - let connection_id = make( - { - let mut state = DefaultHasher::new(); - remote_addr.hash(&mut state); - state.finish() - }, - cookie_issue_time, - )?; + let connection_id = make(make_remote_fingerprint(&remote_addr), cookie_issue_time)?; let response = ConnectResponse { transaction_id: request.transaction_id, @@ -185,8 +182,7 @@ pub async fn handle_announce( remote_addr: SocketAddr, announce_request: &AnnounceRequest, tracker: &Tracker, - cookie_expiry_time: f64, - cookie_tolerance_max_time: f64, + cookie_valid_range: Range, ) -> Result { tracing::trace!("handle announce"); @@ -199,13 +195,8 @@ pub async fn handle_announce( check( &announce_request.connection_id, - { - let mut state = DefaultHasher::new(); - remote_addr.hash(&mut state); - state.finish() - }, - cookie_expiry_time, - cookie_tolerance_max_time, + make_remote_fingerprint(&remote_addr), + cookie_valid_range, )?; let info_hash = announce_request.info_hash.into(); @@ -349,6 +340,12 @@ fn handle_error(e: &Error, transaction_id: TransactionId) -> Response { }) } +fn make_remote_fingerprint(remote_addr: &SocketAddr) -> u64 { + let mut state = DefaultHasher::new(); + remote_addr.hash(&mut state); + state.finish() +} + /// An identifier for a request. #[derive(Debug, Clone)] pub struct RequestId(Uuid); @@ -368,8 +365,8 @@ impl fmt::Display for RequestId { #[cfg(test)] mod tests { - use std::hash::{DefaultHasher, Hash as _, Hasher as _}; use std::net::{IpAddr, Ipv4Addr, Ipv6Addr, SocketAddr}; + use std::ops::Range; use std::sync::Arc; use aquatic_udp_protocol::{NumberOfBytes, PeerId}; @@ -378,6 +375,7 @@ mod tests { use torrust_tracker_primitives::peer; use torrust_tracker_test_helpers::configuration; + use super::make_remote_fingerprint; use crate::core::services::tracker_factory; use crate::core::Tracker; use crate::CurrentClock; @@ -406,18 +404,12 @@ mod tests { tracker_factory(configuration).into() } - fn make_remote_addr_fingerprint(remote_addr: &SocketAddr) -> u64 { - let mut state = DefaultHasher::new(); - remote_addr.hash(&mut state); - state.finish() - } - fn sample_ipv4_remote_addr() -> SocketAddr { sample_ipv4_socket_address() } fn sample_ipv4_remote_addr_fingerprint() -> u64 { - make_remote_addr_fingerprint(&sample_ipv4_socket_address()) + make_remote_fingerprint(&sample_ipv4_socket_address()) } fn sample_ipv6_remote_addr() -> SocketAddr { @@ -425,7 +417,7 @@ mod tests { } fn sample_ipv6_remote_addr_fingerprint() -> u64 { - make_remote_addr_fingerprint(&sample_ipv6_socket_address()) + make_remote_fingerprint(&sample_ipv6_socket_address()) } fn sample_ipv4_socket_address() -> SocketAddr { @@ -440,12 +432,8 @@ mod tests { 1_000_000_000_f64 } - fn sample_expiry_time() -> f64 { - sample_issue_time() - 10.0 - } - - fn tolerance_max_time() -> f64 { - sample_issue_time() + 10.0 + fn sample_cookie_valid_range() -> Range { + sample_issue_time() - 10.0..sample_issue_time() + 10.0 } #[derive(Debug, Default)] @@ -738,8 +726,8 @@ mod tests { use crate::servers::udp::connection_cookie::make; use crate::servers::udp::handlers::tests::announce_request::AnnounceRequestBuilder; use crate::servers::udp::handlers::tests::{ - make_remote_addr_fingerprint, public_tracker, sample_expiry_time, sample_ipv4_socket_address, sample_issue_time, - tolerance_max_time, tracker_configuration, TorrentPeerBuilder, + make_remote_fingerprint, public_tracker, sample_cookie_valid_range, sample_ipv4_socket_address, + sample_issue_time, tracker_configuration, TorrentPeerBuilder, }; use crate::servers::udp::handlers::{handle_announce, AnnounceResponseFixedData}; @@ -755,14 +743,14 @@ mod tests { let remote_addr = SocketAddr::new(IpAddr::V4(client_ip), client_port); let request = AnnounceRequestBuilder::default() - .with_connection_id(make(make_remote_addr_fingerprint(&remote_addr), sample_issue_time()).unwrap()) + .with_connection_id(make(make_remote_fingerprint(&remote_addr), sample_issue_time()).unwrap()) .with_info_hash(info_hash) .with_peer_id(peer_id) .with_ip_address(client_ip) .with_port(client_port) .into(); - handle_announce(remote_addr, &request, &tracker, sample_expiry_time(), tolerance_max_time()) + handle_announce(remote_addr, &request, &tracker, sample_cookie_valid_range()) .await .unwrap(); @@ -781,18 +769,12 @@ mod tests { let remote_addr = SocketAddr::new(IpAddr::V4(Ipv4Addr::new(126, 0, 0, 1)), 8080); let request = AnnounceRequestBuilder::default() - .with_connection_id(make(make_remote_addr_fingerprint(&remote_addr), sample_issue_time()).unwrap()) + .with_connection_id(make(make_remote_fingerprint(&remote_addr), sample_issue_time()).unwrap()) .into(); - let response = handle_announce( - remote_addr, - &request, - &public_tracker(), - sample_expiry_time(), - tolerance_max_time(), - ) - .await - .unwrap(); + let response = handle_announce(remote_addr, &request, &public_tracker(), sample_cookie_valid_range()) + .await + .unwrap(); let empty_peer_vector: Vec> = vec![]; assert_eq!( @@ -828,14 +810,14 @@ mod tests { let remote_addr = SocketAddr::new(IpAddr::V4(remote_client_ip), remote_client_port); let request = AnnounceRequestBuilder::default() - .with_connection_id(make(make_remote_addr_fingerprint(&remote_addr), sample_issue_time()).unwrap()) + .with_connection_id(make(make_remote_fingerprint(&remote_addr), sample_issue_time()).unwrap()) .with_info_hash(info_hash) .with_peer_id(peer_id) .with_ip_address(peer_address) .with_port(client_port) .into(); - handle_announce(remote_addr, &request, &tracker, sample_expiry_time(), tolerance_max_time()) + handle_announce(remote_addr, &request, &tracker, sample_cookie_valid_range()) .await .unwrap(); @@ -863,10 +845,10 @@ mod tests { async fn announce_a_new_peer_using_ipv4(tracker: Arc) -> Response { let remote_addr = SocketAddr::new(IpAddr::V4(Ipv4Addr::new(126, 0, 0, 1)), 8080); let request = AnnounceRequestBuilder::default() - .with_connection_id(make(make_remote_addr_fingerprint(&remote_addr), sample_issue_time()).unwrap()) + .with_connection_id(make(make_remote_fingerprint(&remote_addr), sample_issue_time()).unwrap()) .into(); - handle_announce(remote_addr, &request, &tracker, sample_expiry_time(), tolerance_max_time()) + handle_announce(remote_addr, &request, &tracker, sample_cookie_valid_range()) .await .unwrap() } @@ -911,8 +893,7 @@ mod tests { sample_ipv4_socket_address(), &AnnounceRequestBuilder::default().into(), &tracker, - sample_expiry_time(), - tolerance_max_time(), + sample_cookie_valid_range(), ) .await .unwrap(); @@ -928,8 +909,7 @@ mod tests { use crate::servers::udp::handlers::handle_announce; use crate::servers::udp::handlers::tests::announce_request::AnnounceRequestBuilder; use crate::servers::udp::handlers::tests::{ - make_remote_addr_fingerprint, public_tracker, sample_expiry_time, sample_issue_time, tolerance_max_time, - TorrentPeerBuilder, + make_remote_fingerprint, public_tracker, sample_cookie_valid_range, sample_issue_time, TorrentPeerBuilder, }; #[tokio::test] @@ -944,14 +924,14 @@ mod tests { let remote_addr = SocketAddr::new(IpAddr::V4(client_ip), client_port); let request = AnnounceRequestBuilder::default() - .with_connection_id(make(make_remote_addr_fingerprint(&remote_addr), sample_issue_time()).unwrap()) + .with_connection_id(make(make_remote_fingerprint(&remote_addr), sample_issue_time()).unwrap()) .with_info_hash(info_hash) .with_peer_id(peer_id) .with_ip_address(client_ip) .with_port(client_port) .into(); - handle_announce(remote_addr, &request, &tracker, sample_expiry_time(), tolerance_max_time()) + handle_announce(remote_addr, &request, &tracker, sample_cookie_valid_range()) .await .unwrap(); @@ -985,8 +965,8 @@ mod tests { use crate::servers::udp::connection_cookie::make; use crate::servers::udp::handlers::tests::announce_request::AnnounceRequestBuilder; use crate::servers::udp::handlers::tests::{ - make_remote_addr_fingerprint, public_tracker, sample_expiry_time, sample_ipv6_remote_addr, sample_issue_time, - tolerance_max_time, tracker_configuration, TorrentPeerBuilder, + make_remote_fingerprint, public_tracker, sample_cookie_valid_range, sample_ipv6_remote_addr, sample_issue_time, + tracker_configuration, TorrentPeerBuilder, }; use crate::servers::udp::handlers::{handle_announce, AnnounceResponseFixedData}; @@ -1003,14 +983,14 @@ mod tests { let remote_addr = SocketAddr::new(IpAddr::V6(client_ip_v6), client_port); let request = AnnounceRequestBuilder::default() - .with_connection_id(make(make_remote_addr_fingerprint(&remote_addr), sample_issue_time()).unwrap()) + .with_connection_id(make(make_remote_fingerprint(&remote_addr), sample_issue_time()).unwrap()) .with_info_hash(info_hash) .with_peer_id(peer_id) .with_ip_address(client_ip_v4) .with_port(client_port) .into(); - handle_announce(remote_addr, &request, &tracker, sample_expiry_time(), tolerance_max_time()) + handle_announce(remote_addr, &request, &tracker, sample_cookie_valid_range()) .await .unwrap(); @@ -1032,18 +1012,12 @@ mod tests { let remote_addr = SocketAddr::new(IpAddr::V6(client_ip_v6), 8080); let request = AnnounceRequestBuilder::default() - .with_connection_id(make(make_remote_addr_fingerprint(&remote_addr), sample_issue_time()).unwrap()) + .with_connection_id(make(make_remote_fingerprint(&remote_addr), sample_issue_time()).unwrap()) .into(); - let response = handle_announce( - remote_addr, - &request, - &public_tracker(), - sample_expiry_time(), - tolerance_max_time(), - ) - .await - .unwrap(); + let response = handle_announce(remote_addr, &request, &public_tracker(), sample_cookie_valid_range()) + .await + .unwrap(); let empty_peer_vector: Vec> = vec![]; assert_eq!( @@ -1079,14 +1053,14 @@ mod tests { let remote_addr = SocketAddr::new(IpAddr::V6(remote_client_ip), remote_client_port); let request = AnnounceRequestBuilder::default() - .with_connection_id(make(make_remote_addr_fingerprint(&remote_addr), sample_issue_time()).unwrap()) + .with_connection_id(make(make_remote_fingerprint(&remote_addr), sample_issue_time()).unwrap()) .with_info_hash(info_hash) .with_peer_id(peer_id) .with_ip_address(peer_address) .with_port(client_port) .into(); - handle_announce(remote_addr, &request, &tracker, sample_expiry_time(), tolerance_max_time()) + handle_announce(remote_addr, &request, &tracker, sample_cookie_valid_range()) .await .unwrap(); @@ -1117,10 +1091,10 @@ mod tests { let client_port = 8080; let remote_addr = SocketAddr::new(IpAddr::V6(client_ip_v6), client_port); let request = AnnounceRequestBuilder::default() - .with_connection_id(make(make_remote_addr_fingerprint(&remote_addr), sample_issue_time()).unwrap()) + .with_connection_id(make(make_remote_fingerprint(&remote_addr), sample_issue_time()).unwrap()) .into(); - handle_announce(remote_addr, &request, &tracker, sample_expiry_time(), tolerance_max_time()) + handle_announce(remote_addr, &request, &tracker, sample_cookie_valid_range()) .await .unwrap() } @@ -1164,18 +1138,12 @@ mod tests { let remote_addr = sample_ipv6_remote_addr(); let announce_request = AnnounceRequestBuilder::default() - .with_connection_id(make(make_remote_addr_fingerprint(&remote_addr), sample_issue_time()).unwrap()) + .with_connection_id(make(make_remote_fingerprint(&remote_addr), sample_issue_time()).unwrap()) .into(); - handle_announce( - remote_addr, - &announce_request, - &tracker, - sample_expiry_time(), - tolerance_max_time(), - ) - .await - .unwrap(); + handle_announce(remote_addr, &announce_request, &tracker, sample_cookie_valid_range()) + .await + .unwrap(); } mod from_a_loopback_ip { @@ -1190,8 +1158,7 @@ mod tests { use crate::servers::udp::handlers::handle_announce; use crate::servers::udp::handlers::tests::announce_request::AnnounceRequestBuilder; use crate::servers::udp::handlers::tests::{ - make_remote_addr_fingerprint, sample_expiry_time, sample_issue_time, tolerance_max_time, - TrackerConfigurationBuilder, + make_remote_fingerprint, sample_cookie_valid_range, sample_issue_time, TrackerConfigurationBuilder, }; #[tokio::test] @@ -1214,14 +1181,14 @@ mod tests { let remote_addr = SocketAddr::new(IpAddr::V6(client_ip_v6), client_port); let request = AnnounceRequestBuilder::default() - .with_connection_id(make(make_remote_addr_fingerprint(&remote_addr), sample_issue_time()).unwrap()) + .with_connection_id(make(make_remote_fingerprint(&remote_addr), sample_issue_time()).unwrap()) .with_info_hash(info_hash) .with_peer_id(peer_id) .with_ip_address(client_ip_v4) .with_port(client_port) .into(); - handle_announce(remote_addr, &request, &tracker, sample_expiry_time(), tolerance_max_time()) + handle_announce(remote_addr, &request, &tracker, sample_cookie_valid_range()) .await .unwrap(); @@ -1251,7 +1218,7 @@ mod tests { TransactionId, }; - use super::{make_remote_addr_fingerprint, TorrentPeerBuilder}; + use super::{make_remote_fingerprint, TorrentPeerBuilder}; use crate::core::{self}; use crate::servers::udp::connection_cookie::make; use crate::servers::udp::handlers::handle_scrape; @@ -1273,7 +1240,7 @@ mod tests { let info_hashes = vec![info_hash]; let request = ScrapeRequest { - connection_id: make(make_remote_addr_fingerprint(&remote_addr), sample_issue_time()).unwrap(), + connection_id: make(make_remote_fingerprint(&remote_addr), sample_issue_time()).unwrap(), transaction_id: TransactionId(0i32.into()), info_hashes, }; @@ -1307,7 +1274,7 @@ mod tests { let info_hashes = vec![*info_hash]; ScrapeRequest { - connection_id: make(make_remote_addr_fingerprint(remote_addr), sample_issue_time()).unwrap(), + connection_id: make(make_remote_fingerprint(remote_addr), sample_issue_time()).unwrap(), transaction_id: TransactionId::new(0i32), info_hashes, } @@ -1449,7 +1416,7 @@ mod tests { let info_hashes = vec![info_hash]; ScrapeRequest { - connection_id: make(make_remote_addr_fingerprint(remote_addr), sample_issue_time()).unwrap(), + connection_id: make(make_remote_fingerprint(remote_addr), sample_issue_time()).unwrap(), transaction_id: TransactionId(0i32.into()), info_hashes, } diff --git a/src/servers/udp/server/processor.rs b/src/servers/udp/server/processor.rs index 2ac7f27cd..703367f35 100644 --- a/src/servers/udp/server/processor.rs +++ b/src/servers/udp/server/processor.rs @@ -3,13 +3,12 @@ use std::net::SocketAddr; use std::sync::Arc; use aquatic_udp_protocol::Response; -use torrust_tracker_clock::clock::Time as _; use tracing::{instrument, Level}; use super::bound_socket::BoundSocket; use crate::core::Tracker; +use crate::servers::udp::handlers::CookieTimeValues; use crate::servers::udp::{handlers, RawRequest}; -use crate::CurrentClock; pub struct Processor { socket: Arc, @@ -28,18 +27,12 @@ impl Processor { #[instrument(skip(self, request))] pub async fn process_request(self, request: RawRequest) { - let cookie_issue_time = CurrentClock::now().as_secs_f64(); - let cookie_expiry_time = cookie_issue_time - self.cookie_lifetime - 1.0; - let cookie_tolerance_max_time = cookie_issue_time + 1.0; - let from = request.from; let response = handlers::handle_packet( request, &self.tracker, self.socket.address(), - cookie_issue_time, - cookie_expiry_time, - cookie_tolerance_max_time, + CookieTimeValues::new(self.cookie_lifetime), ) .await; self.send_response(from, response).await; From 8c703955f010948199be75c2dcd3481d519d6bc4 Mon Sep 17 00:00:00 2001 From: Cameron Garnham Date: Wed, 20 Nov 2024 05:13:14 +0800 Subject: [PATCH 029/802] udp: various changes - remove old logging module - remove udp test for private mode - check `ConnectionID` for scrape - check `ConnectionID` when included in badly formatted responses - pass-through any errors when parsing a response --- cSpell.json | 1 + src/core/mod.rs | 2 + src/core/services/statistics/mod.rs | 2 + src/core/statistics.rs | 24 ++ .../apis/v1/context/stats/resources.rs | 22 +- src/servers/udp/connection_cookie.rs | 27 +- src/servers/udp/error.rs | 24 +- src/servers/udp/handlers.rs | 397 +++++++++--------- src/servers/udp/logging.rs | 87 ---- src/servers/udp/mod.rs | 1 - src/servers/udp/server/launcher.rs | 9 +- .../servers/api/v1/contract/context/stats.rs | 2 + tests/servers/udp/asserts.rs | 6 +- tests/servers/udp/contract.rs | 4 +- 14 files changed, 288 insertions(+), 320 deletions(-) delete mode 100644 src/servers/udp/logging.rs diff --git a/cSpell.json b/cSpell.json index e2ecd1bc3..090a2b0e3 100644 --- a/cSpell.json +++ b/cSpell.json @@ -164,6 +164,7 @@ "typenum", "Unamed", "underflows", + "Unsendable", "untuple", "uroot", "Vagaa", diff --git a/src/core/mod.rs b/src/core/mod.rs index a41ef2eba..835776e30 100644 --- a/src/core/mod.rs +++ b/src/core/mod.rs @@ -470,6 +470,7 @@ use torrust_tracker_primitives::torrent_metrics::TorrentsMetrics; use torrust_tracker_primitives::{peer, DurationSinceUnixEpoch}; use torrust_tracker_torrent_repository::entry::EntrySync; use torrust_tracker_torrent_repository::repository::Repository; +use tracing::instrument; use self::auth::Key; use self::error::Error; @@ -1092,6 +1093,7 @@ impl Tracker { /// /// Will return an error if the tracker is running in `listed` mode /// and the infohash is not whitelisted. + #[instrument(skip(self, info_hash), err)] pub async fn authorize(&self, info_hash: &InfoHash) -> Result<(), Error> { if !self.is_listed() { return Ok(()); diff --git a/src/core/services/statistics/mod.rs b/src/core/services/statistics/mod.rs index ee1c0c4fa..0e7735be2 100644 --- a/src/core/services/statistics/mod.rs +++ b/src/core/services/statistics/mod.rs @@ -76,9 +76,11 @@ pub async fn get_metrics(tracker: Arc) -> TrackerMetrics { udp4_connections_handled: stats.udp4_connections_handled, udp4_announces_handled: stats.udp4_announces_handled, udp4_scrapes_handled: stats.udp4_scrapes_handled, + udp4_errors_handled: stats.udp4_errors_handled, udp6_connections_handled: stats.udp6_connections_handled, udp6_announces_handled: stats.udp6_announces_handled, udp6_scrapes_handled: stats.udp6_scrapes_handled, + udp6_errors_handled: stats.udp6_errors_handled, }, } } diff --git a/src/core/statistics.rs b/src/core/statistics.rs index c9681d23c..b106b2691 100644 --- a/src/core/statistics.rs +++ b/src/core/statistics.rs @@ -47,9 +47,11 @@ pub enum Event { Udp4Connect, Udp4Announce, Udp4Scrape, + Udp4Error, Udp6Connect, Udp6Announce, Udp6Scrape, + Udp6Error, } /// Metrics collected by the tracker. @@ -82,12 +84,16 @@ pub struct Metrics { pub udp4_announces_handled: u64, /// Total number of UDP (UDP tracker) `scrape` requests from IPv4 peers. pub udp4_scrapes_handled: u64, + /// Total number of UDP (UDP tracker) `error` requests from IPv4 peers. + pub udp4_errors_handled: u64, /// Total number of UDP (UDP tracker) `connection` requests from IPv6 peers. pub udp6_connections_handled: u64, /// Total number of UDP (UDP tracker) `announce` requests from IPv6 peers. pub udp6_announces_handled: u64, /// Total number of UDP (UDP tracker) `scrape` requests from IPv6 peers. pub udp6_scrapes_handled: u64, + /// Total number of UDP (UDP tracker) `error` requests from IPv6 peers. + pub udp6_errors_handled: u64, } /// The service responsible for keeping tracker metrics (listening to statistics events and handle them). @@ -168,6 +174,9 @@ async fn event_handler(event: Event, stats_repository: &Repo) { Event::Udp4Scrape => { stats_repository.increase_udp4_scrapes().await; } + Event::Udp4Error => { + stats_repository.increase_udp4_errors().await; + } // UDP6 Event::Udp6Connect => { @@ -179,6 +188,9 @@ async fn event_handler(event: Event, stats_repository: &Repo) { Event::Udp6Scrape => { stats_repository.increase_udp6_scrapes().await; } + Event::Udp6Error => { + stats_repository.increase_udp6_errors().await; + } } tracing::debug!("stats: {:?}", stats_repository.get_stats().await); @@ -282,6 +294,12 @@ impl Repo { drop(stats_lock); } + pub async fn increase_udp4_errors(&self) { + let mut stats_lock = self.stats.write().await; + stats_lock.udp4_errors_handled += 1; + drop(stats_lock); + } + pub async fn increase_udp6_connections(&self) { let mut stats_lock = self.stats.write().await; stats_lock.udp6_connections_handled += 1; @@ -299,6 +317,12 @@ impl Repo { stats_lock.udp6_scrapes_handled += 1; drop(stats_lock); } + + pub async fn increase_udp6_errors(&self) { + let mut stats_lock = self.stats.write().await; + stats_lock.udp6_errors_handled += 1; + drop(stats_lock); + } } #[cfg(test)] diff --git a/src/servers/apis/v1/context/stats/resources.rs b/src/servers/apis/v1/context/stats/resources.rs index 9e8ab6bab..de6f6ca89 100644 --- a/src/servers/apis/v1/context/stats/resources.rs +++ b/src/servers/apis/v1/context/stats/resources.rs @@ -38,12 +38,16 @@ pub struct Stats { pub udp4_announces_handled: u64, /// Total number of UDP (UDP tracker) `scrape` requests from IPv4 peers. pub udp4_scrapes_handled: u64, + /// Total number of UDP (UDP tracker) `scrape` requests from IPv4 peers. + pub udp4_errors_handled: u64, /// Total number of UDP (UDP tracker) `connection` requests from IPv6 peers. pub udp6_connections_handled: u64, /// Total number of UDP (UDP tracker) `announce` requests from IPv6 peers. pub udp6_announces_handled: u64, /// Total number of UDP (UDP tracker) `scrape` requests from IPv6 peers. pub udp6_scrapes_handled: u64, + /// Total number of UDP (UDP tracker) `scrape` requests from IPv6 peers. + pub udp6_errors_handled: u64, } impl From for Stats { @@ -62,9 +66,11 @@ impl From for Stats { udp4_connections_handled: metrics.protocol_metrics.udp4_connections_handled, udp4_announces_handled: metrics.protocol_metrics.udp4_announces_handled, udp4_scrapes_handled: metrics.protocol_metrics.udp4_scrapes_handled, + udp4_errors_handled: metrics.protocol_metrics.udp4_errors_handled, udp6_connections_handled: metrics.protocol_metrics.udp6_connections_handled, udp6_announces_handled: metrics.protocol_metrics.udp6_announces_handled, udp6_scrapes_handled: metrics.protocol_metrics.udp6_scrapes_handled, + udp6_errors_handled: metrics.protocol_metrics.udp6_errors_handled, } } } @@ -97,9 +103,11 @@ mod tests { udp4_connections_handled: 11, udp4_announces_handled: 12, udp4_scrapes_handled: 13, - udp6_connections_handled: 14, - udp6_announces_handled: 15, - udp6_scrapes_handled: 16 + udp4_errors_handled: 14, + udp6_connections_handled: 15, + udp6_announces_handled: 16, + udp6_scrapes_handled: 17, + udp6_errors_handled: 18 } }), Stats { @@ -116,9 +124,11 @@ mod tests { udp4_connections_handled: 11, udp4_announces_handled: 12, udp4_scrapes_handled: 13, - udp6_connections_handled: 14, - udp6_announces_handled: 15, - udp6_scrapes_handled: 16 + udp4_errors_handled: 14, + udp6_connections_handled: 15, + udp6_announces_handled: 16, + udp6_scrapes_handled: 17, + udp6_errors_handled: 18 } ); } diff --git a/src/servers/udp/connection_cookie.rs b/src/servers/udp/connection_cookie.rs index 9ed1bcdc8..50359033c 100644 --- a/src/servers/udp/connection_cookie.rs +++ b/src/servers/udp/connection_cookie.rs @@ -79,6 +79,7 @@ use aquatic_udp_protocol::ConnectionId as Cookie; use cookie_builder::{assemble, decode, disassemble, encode}; +use tracing::instrument; use zerocopy::AsBytes; use super::error::Error; @@ -94,9 +95,12 @@ use crate::shared::crypto::keys::CipherArrayBlowfish; /// /// It would panic if the cookie is not exactly 8 bytes is size. /// +#[instrument(err)] pub fn make(fingerprint: u64, issue_at: f64) -> Result { if !issue_at.is_normal() { - return Err(Error::InvalidCookieIssueTime { invalid_value: issue_at }); + return Err(Error::CookieValueNotNormal { + not_normal_value: issue_at, + }); } let cookie = assemble(fingerprint, issue_at); @@ -117,6 +121,7 @@ use std::ops::Range; /// # Panics /// /// It would panic if the range start is not smaller than it's end. +#[instrument(err)] pub fn check(cookie: &Cookie, fingerprint: u64, valid_range: Range) -> Result { assert!(valid_range.start <= valid_range.end, "range start is larger than range end"); @@ -126,20 +131,20 @@ pub fn check(cookie: &Cookie, fingerprint: u64, valid_range: Range) -> Resu let issue_time = disassemble(fingerprint, cookie_bytes); if !issue_time.is_normal() { - return Err(Error::ConnectionIdNotNormal { + return Err(Error::CookieValueNotNormal { not_normal_value: issue_time, }); } if issue_time < valid_range.start { - return Err(Error::ConnectionIdExpired { + return Err(Error::CookieValueExpired { expired_value: issue_time, min_value: valid_range.start, }); } if issue_time > valid_range.end { - return Err(Error::ConnectionIdFromFuture { + return Err(Error::CookieValueFromFuture { future_value: issue_time, max_value: valid_range.end, }); @@ -150,7 +155,7 @@ pub fn check(cookie: &Cookie, fingerprint: u64, valid_range: Range) -> Resu mod cookie_builder { use cipher::{BlockDecrypt, BlockEncrypt}; - use tracing::{instrument, Level}; + use tracing::instrument; use zerocopy::{byteorder, AsBytes as _, NativeEndian}; pub type CookiePlainText = CipherArrayBlowfish; @@ -158,7 +163,7 @@ mod cookie_builder { use crate::shared::crypto::keys::{CipherArrayBlowfish, Current, Keeper}; - #[instrument(ret(level = Level::TRACE))] + #[instrument()] pub(super) fn assemble(fingerprint: u64, issue_at: f64) -> CookiePlainText { let issue_at: byteorder::I64 = *zerocopy::FromBytes::ref_from(&issue_at.to_ne_bytes()).expect("it should be aligned"); @@ -172,7 +177,7 @@ mod cookie_builder { *CipherArrayBlowfish::from_slice(cookie.as_bytes()) } - #[instrument(ret(level = Level::TRACE))] + #[instrument()] pub(super) fn disassemble(fingerprint: u64, cookie: CookiePlainText) -> f64 { let fingerprint: byteorder::I64 = *zerocopy::FromBytes::ref_from(&fingerprint.to_ne_bytes()).expect("it should be aligned"); @@ -189,7 +194,7 @@ mod cookie_builder { issue_time.get() } - #[instrument(ret(level = Level::TRACE))] + #[instrument()] pub(super) fn encode(mut cookie: CookiePlainText) -> CookieCipherText { let cipher = Current::get_cipher_blowfish(); @@ -198,7 +203,7 @@ mod cookie_builder { cookie } - #[instrument(ret(level = Level::TRACE))] + #[instrument()] pub(super) fn decode(mut cookie: CookieCipherText) -> CookiePlainText { let cipher = Current::get_cipher_blowfish(); @@ -282,7 +287,7 @@ mod tests { let result = check(&cookie, fingerprint, min..max).unwrap_err(); match result { - Error::ConnectionIdExpired { .. } => {} // Expected error + Error::CookieValueExpired { .. } => {} // Expected error _ => panic!("Expected ConnectionIdExpired error"), } } @@ -300,7 +305,7 @@ mod tests { let result = check(&cookie, fingerprint, min..max).unwrap_err(); match result { - Error::ConnectionIdFromFuture { .. } => {} // Expected error + Error::CookieValueFromFuture { .. } => {} // Expected error _ => panic!("Expected ConnectionIdFromFuture error"), } } diff --git a/src/servers/udp/error.rs b/src/servers/udp/error.rs index 5996cae73..cda562aed 100644 --- a/src/servers/udp/error.rs +++ b/src/servers/udp/error.rs @@ -1,7 +1,7 @@ //! Error types for the UDP server. use std::panic::Location; -use aquatic_udp_protocol::ConnectionId; +use aquatic_udp_protocol::{ConnectionId, RequestParseError}; use derive_more::derive::Display; use thiserror::Error; use torrust_tracker_located_error::LocatedError; @@ -13,17 +13,17 @@ pub struct ConnectionCookie(pub ConnectionId); /// Error returned by the UDP server. #[derive(Error, Debug)] pub enum Error { - #[error("the issue time should be a normal floating point number")] - InvalidCookieIssueTime { invalid_value: f64 }, + #[error("cookie value is not normal: {not_normal_value}")] + CookieValueNotNormal { not_normal_value: f64 }, - #[error("connection id did not produce a normal value")] - ConnectionIdNotNormal { not_normal_value: f64 }, + #[error("cookie value is expired: {expired_value}, expected > {min_value}")] + CookieValueExpired { expired_value: f64, min_value: f64 }, - #[error("connection id produced an expired value")] - ConnectionIdExpired { expired_value: f64, min_value: f64 }, + #[error("cookie value is from future: {future_value}, expected < {max_value}")] + CookieValueFromFuture { future_value: f64, max_value: f64 }, - #[error("connection id produces a future value")] - ConnectionIdFromFuture { future_value: f64, max_value: f64 }, + #[error("error when phrasing request: {request_parse_error:?}")] + RequestParseError { request_parse_error: RequestParseError }, /// Error returned when the domain tracker returns an error. #[error("tracker server error: {source}")] @@ -48,3 +48,9 @@ pub enum Error { #[error("domain tracker requires authentication but is not supported in current UDP implementation. Location: {location}")] TrackerAuthenticationRequired { location: &'static Location<'static> }, } + +impl From for Error { + fn from(request_parse_error: RequestParseError) -> Self { + Self::RequestParseError { request_parse_error } + } +} diff --git a/src/servers/udp/handlers.rs b/src/servers/udp/handlers.rs index 814ee5e9e..af22b263d 100644 --- a/src/servers/udp/handlers.rs +++ b/src/servers/udp/handlers.rs @@ -1,34 +1,30 @@ //! Handlers for the UDP server. -use std::fmt; use std::hash::{DefaultHasher, Hash, Hasher as _}; use std::net::{IpAddr, SocketAddr}; use std::ops::Range; -use std::panic::Location; use std::sync::Arc; use std::time::Instant; use aquatic_udp_protocol::{ AnnounceInterval, AnnounceRequest, AnnounceResponse, AnnounceResponseFixedData, ConnectRequest, ConnectResponse, - ErrorResponse, Ipv4AddrBytes, Ipv6AddrBytes, NumberOfDownloads, NumberOfPeers, Port, Request, Response, ResponsePeer, - ScrapeRequest, ScrapeResponse, TorrentScrapeStatistics, TransactionId, + ErrorResponse, Ipv4AddrBytes, Ipv6AddrBytes, NumberOfDownloads, NumberOfPeers, Port, Request, RequestParseError, Response, + ResponsePeer, ScrapeRequest, ScrapeResponse, TorrentScrapeStatistics, TransactionId, }; use bittorrent_primitives::info_hash::InfoHash; use torrust_tracker_clock::clock::Time as _; -use torrust_tracker_located_error::DynError; use tracing::{instrument, Level}; use uuid::Uuid; use zerocopy::network_endian::I32; use super::connection_cookie::{check, make}; use super::RawRequest; -use crate::core::{statistics, PeersWanted, ScrapeData, Tracker}; +use crate::core::{statistics, PeersWanted, Tracker}; use crate::servers::udp::error::Error; -use crate::servers::udp::logging::{log_bad_request, log_error_response, log_request, log_response}; use crate::servers::udp::peer_builder; use crate::shared::bit_torrent::common::MAX_SCRAPE_TORRENTS; use crate::CurrentClock; -#[derive(Debug)] +#[derive(Debug, Clone, PartialEq)] pub(super) struct CookieTimeValues { pub(super) issue_time: f64, pub(super) valid_range: Range, @@ -55,60 +51,40 @@ impl CookieTimeValues { /// - Delegating the request to the correct handler depending on the request type. /// /// It will return an `Error` response if the request is invalid. -#[instrument(skip(udp_request, tracker, local_addr), ret(level = Level::TRACE))] +#[instrument(fields(request_id), skip(udp_request, tracker, cookie_time_values), ret(level = Level::TRACE))] pub(crate) async fn handle_packet( udp_request: RawRequest, tracker: &Tracker, local_addr: SocketAddr, cookie_time_values: CookieTimeValues, ) -> Response { + tracing::Span::current().record("request_id", Uuid::new_v4().to_string()); tracing::debug!("Handling Packets: {udp_request:?}"); let start_time = Instant::now(); - let request_id = RequestId::make(&udp_request); - - match Request::parse_bytes(&udp_request.payload[..udp_request.payload.len()], MAX_SCRAPE_TORRENTS).map_err(|e| { - Error::InternalServer { - message: format!("{e:?}"), - location: Location::caller(), - } - }) { - Ok(request) => { - log_request(&request, &request_id, &local_addr); - - let transaction_id = match &request { - Request::Connect(connect_request) => connect_request.transaction_id, - Request::Announce(announce_request) => announce_request.transaction_id, - Request::Scrape(scrape_request) => scrape_request.transaction_id, - }; - - let response = match handle_request(request, udp_request.from, tracker, cookie_time_values).await { - Ok(response) => response, - Err(e) => handle_error(&e, transaction_id), - }; - - let latency = start_time.elapsed(); - - log_response(&response, &transaction_id, &request_id, &local_addr, latency); - - response - } - Err(e) => { - log_bad_request(&request_id); - - let response = handle_error( - &Error::BadRequest { - source: (Arc::new(e) as DynError).into(), - }, - TransactionId(I32::new(0)), - ); + let response = + match Request::parse_bytes(&udp_request.payload[..udp_request.payload.len()], MAX_SCRAPE_TORRENTS).map_err(Error::from) { + Ok(request) => match handle_request(request, udp_request.from, tracker, cookie_time_values.clone()).await { + Ok(response) => return response, + Err((e, transaction_id)) => { + handle_error( + udp_request.from, + tracker, + cookie_time_values.valid_range.clone(), + &e, + Some(transaction_id), + ) + .await + } + }, + Err(e) => handle_error(udp_request.from, tracker, cookie_time_values.valid_range.clone(), &e, None).await, + }; - log_error_response(&request_id); + let latency = start_time.elapsed(); + tracing::trace!(?latency, "responded"); - response - } - } + response } /// It dispatches the request to the correct handler. @@ -116,23 +92,25 @@ pub(crate) async fn handle_packet( /// # Errors /// /// If a error happens in the `handle_request` function, it will just return the `ServerError`. -#[instrument(skip(request, remote_addr, tracker))] +#[instrument(skip(request, remote_addr, tracker, cookie_time_values))] pub async fn handle_request( request: Request, remote_addr: SocketAddr, tracker: &Tracker, cookie_time_values: CookieTimeValues, -) -> Result { +) -> Result { tracing::trace!("handle request"); match request { Request::Connect(connect_request) => { - handle_connect(remote_addr, &connect_request, tracker, cookie_time_values.issue_time).await + Ok(handle_connect(remote_addr, &connect_request, tracker, cookie_time_values.issue_time).await) } Request::Announce(announce_request) => { handle_announce(remote_addr, &announce_request, tracker, cookie_time_values.valid_range).await } - Request::Scrape(scrape_request) => handle_scrape(remote_addr, &scrape_request, tracker).await, + Request::Scrape(scrape_request) => { + handle_scrape(remote_addr, &scrape_request, tracker, cookie_time_values.valid_range).await + } } } @@ -142,16 +120,18 @@ pub async fn handle_request( /// # Errors /// /// This function does not ever return an error. -#[instrument(skip(tracker), err, ret(level = Level::TRACE))] +#[instrument(fields(transaction_id), skip(tracker), ret(level = Level::TRACE))] pub async fn handle_connect( remote_addr: SocketAddr, request: &ConnectRequest, tracker: &Tracker, cookie_issue_time: f64, -) -> Result { +) -> Response { + tracing::Span::current().record("transaction_id", request.transaction_id.0.to_string()); + tracing::trace!("handle connect"); - let connection_id = make(make_remote_fingerprint(&remote_addr), cookie_issue_time)?; + let connection_id = make(gen_remote_fingerprint(&remote_addr), cookie_issue_time).expect("it should be a normal value"); let response = ConnectResponse { transaction_id: request.transaction_id, @@ -168,7 +148,7 @@ pub async fn handle_connect( } } - Ok(Response::from(response)) + Response::from(response) } /// It handles the `Announce` request. Refer to [`Announce`](crate::servers::udp#announce) @@ -177,38 +157,41 @@ pub async fn handle_connect( /// # Errors /// /// If a error happens in the `handle_announce` function, it will just return the `ServerError`. -#[instrument(skip(tracker), err, ret(level = Level::TRACE))] +#[instrument(fields(transaction_id, connection_id, info_hash), skip(tracker), ret(level = Level::TRACE))] pub async fn handle_announce( remote_addr: SocketAddr, - announce_request: &AnnounceRequest, + request: &AnnounceRequest, tracker: &Tracker, cookie_valid_range: Range, -) -> Result { - tracing::trace!("handle announce"); +) -> Result { + tracing::Span::current() + .record("transaction_id", request.transaction_id.0.to_string()) + .record("connection_id", request.connection_id.0.to_string()) + .record("info_hash", InfoHash::from_bytes(&request.info_hash.0).to_hex_string()); - // Authentication - if tracker.requires_authentication() { - return Err(Error::TrackerAuthenticationRequired { - location: Location::caller(), - }); - } + tracing::trace!("handle announce"); check( - &announce_request.connection_id, - make_remote_fingerprint(&remote_addr), + &request.connection_id, + gen_remote_fingerprint(&remote_addr), cookie_valid_range, - )?; + ) + .map_err(|e| (e, request.transaction_id))?; - let info_hash = announce_request.info_hash.into(); + let info_hash = request.info_hash.into(); let remote_client_ip = remote_addr.ip(); // Authorization - tracker.authorize(&info_hash).await.map_err(|e| Error::TrackerError { - source: (Arc::new(e) as Arc).into(), - })?; + tracker + .authorize(&info_hash) + .await + .map_err(|e| Error::TrackerError { + source: (Arc::new(e) as Arc).into(), + }) + .map_err(|e| (e, request.transaction_id))?; - let mut peer = peer_builder::from_request(announce_request, &remote_client_ip); - let peers_wanted: PeersWanted = i32::from(announce_request.peers_wanted.0).into(); + let mut peer = peer_builder::from_request(request, &remote_client_ip); + let peers_wanted: PeersWanted = i32::from(request.peers_wanted.0).into(); let response = tracker.announce(&info_hash, &mut peer, &remote_client_ip, &peers_wanted); @@ -225,7 +208,7 @@ pub async fn handle_announce( if remote_addr.is_ipv4() { let announce_response = AnnounceResponse { fixed: AnnounceResponseFixedData { - transaction_id: announce_request.transaction_id, + transaction_id: request.transaction_id, announce_interval: AnnounceInterval(I32::new(i64::from(tracker.get_announce_policy().interval) as i32)), leechers: NumberOfPeers(I32::new(i64::from(response.stats.incomplete) as i32)), seeders: NumberOfPeers(I32::new(i64::from(response.stats.complete) as i32)), @@ -250,7 +233,7 @@ pub async fn handle_announce( } else { let announce_response = AnnounceResponse { fixed: AnnounceResponseFixedData { - transaction_id: announce_request.transaction_id, + transaction_id: request.transaction_id, announce_interval: AnnounceInterval(I32::new(i64::from(tracker.get_announce_policy().interval) as i32)), leechers: NumberOfPeers(I32::new(i64::from(response.stats.incomplete) as i32)), seeders: NumberOfPeers(I32::new(i64::from(response.stats.complete) as i32)), @@ -281,21 +264,33 @@ pub async fn handle_announce( /// # Errors /// /// This function does not ever return an error. -#[instrument(skip(tracker), err, ret(level = Level::TRACE))] -pub async fn handle_scrape(remote_addr: SocketAddr, request: &ScrapeRequest, tracker: &Tracker) -> Result { +#[instrument(fields(transaction_id, connection_id), skip(tracker), ret(level = Level::TRACE))] +pub async fn handle_scrape( + remote_addr: SocketAddr, + request: &ScrapeRequest, + tracker: &Tracker, + cookie_valid_range: Range, +) -> Result { + tracing::Span::current() + .record("transaction_id", request.transaction_id.0.to_string()) + .record("connection_id", request.connection_id.0.to_string()); + tracing::trace!("handle scrape"); + check( + &request.connection_id, + gen_remote_fingerprint(&remote_addr), + cookie_valid_range, + ) + .map_err(|e| (e, request.transaction_id))?; + // Convert from aquatic infohashes let mut info_hashes: Vec = vec![]; for info_hash in &request.info_hashes { info_hashes.push((*info_hash).into()); } - let scrape_data = if tracker.requires_authentication() { - ScrapeData::zeroed(&info_hashes) - } else { - tracker.scrape(&info_hashes).await - }; + let scrape_data = tracker.scrape(&info_hashes).await; let mut torrent_stats: Vec = Vec::new(); @@ -332,36 +327,59 @@ pub async fn handle_scrape(remote_addr: SocketAddr, request: &ScrapeRequest, tra Ok(Response::from(response)) } -fn handle_error(e: &Error, transaction_id: TransactionId) -> Response { - let message = e.to_string(); +#[instrument(fields(transaction_id), skip(tracker), ret(level = Level::TRACE))] +async fn handle_error( + remote_addr: SocketAddr, + tracker: &Tracker, + cookie_valid_range: Range, + e: &Error, + transaction_id: Option, +) -> Response { + tracing::trace!("handle error"); + + let e = if let Error::RequestParseError { request_parse_error } = e { + match request_parse_error { + RequestParseError::Sendable { + connection_id, + transaction_id, + err, + } => { + if let Err(e) = check(connection_id, gen_remote_fingerprint(&remote_addr), cookie_valid_range) { + (e.to_string(), Some(*transaction_id)) + } else { + ((*err).to_string(), Some(*transaction_id)) + } + } + RequestParseError::Unsendable { err } => (err.to_string(), transaction_id), + } + } else { + (e.to_string(), transaction_id) + }; + + if e.1.is_some() { + // send stats event + match remote_addr { + SocketAddr::V4(_) => { + tracker.send_stats_event(statistics::Event::Udp4Error).await; + } + SocketAddr::V6(_) => { + tracker.send_stats_event(statistics::Event::Udp6Error).await; + } + } + } + Response::from(ErrorResponse { - transaction_id, - message: message.into(), + transaction_id: e.1.unwrap_or(TransactionId(I32::new(0))), + message: e.0.into(), }) } -fn make_remote_fingerprint(remote_addr: &SocketAddr) -> u64 { +fn gen_remote_fingerprint(remote_addr: &SocketAddr) -> u64 { let mut state = DefaultHasher::new(); remote_addr.hash(&mut state); state.finish() } -/// An identifier for a request. -#[derive(Debug, Clone)] -pub struct RequestId(Uuid); - -impl RequestId { - fn make(_request: &RawRequest) -> RequestId { - RequestId(Uuid::new_v4()) - } -} - -impl fmt::Display for RequestId { - fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result { - write!(f, "{}", self.0) - } -} - #[cfg(test)] mod tests { @@ -375,7 +393,7 @@ mod tests { use torrust_tracker_primitives::peer; use torrust_tracker_test_helpers::configuration; - use super::make_remote_fingerprint; + use super::gen_remote_fingerprint; use crate::core::services::tracker_factory; use crate::core::Tracker; use crate::CurrentClock; @@ -392,10 +410,6 @@ mod tests { initialized_tracker(&configuration::ephemeral_public()) } - fn private_tracker() -> Arc { - initialized_tracker(&configuration::ephemeral_private()) - } - fn whitelisted_tracker() -> Arc { initialized_tracker(&configuration::ephemeral_listed()) } @@ -409,7 +423,7 @@ mod tests { } fn sample_ipv4_remote_addr_fingerprint() -> u64 { - make_remote_fingerprint(&sample_ipv4_socket_address()) + gen_remote_fingerprint(&sample_ipv4_socket_address()) } fn sample_ipv6_remote_addr() -> SocketAddr { @@ -417,7 +431,7 @@ mod tests { } fn sample_ipv6_remote_addr_fingerprint() -> u64 { - make_remote_fingerprint(&sample_ipv6_socket_address()) + gen_remote_fingerprint(&sample_ipv6_socket_address()) } fn sample_ipv4_socket_address() -> SocketAddr { @@ -527,9 +541,7 @@ mod tests { transaction_id: TransactionId(0i32.into()), }; - let response = handle_connect(sample_ipv4_remote_addr(), &request, &public_tracker(), sample_issue_time()) - .await - .unwrap(); + let response = handle_connect(sample_ipv4_remote_addr(), &request, &public_tracker(), sample_issue_time()).await; assert_eq!( response, @@ -546,9 +558,7 @@ mod tests { transaction_id: TransactionId(0i32.into()), }; - let response = handle_connect(sample_ipv4_remote_addr(), &request, &public_tracker(), sample_issue_time()) - .await - .unwrap(); + let response = handle_connect(sample_ipv4_remote_addr(), &request, &public_tracker(), sample_issue_time()).await; assert_eq!( response, @@ -565,9 +575,7 @@ mod tests { transaction_id: TransactionId(0i32.into()), }; - let response = handle_connect(sample_ipv6_remote_addr(), &request, &public_tracker(), sample_issue_time()) - .await - .unwrap(); + let response = handle_connect(sample_ipv6_remote_addr(), &request, &public_tracker(), sample_issue_time()).await; assert_eq!( response, @@ -604,8 +612,7 @@ mod tests { &torrent_tracker, sample_issue_time(), ) - .await - .unwrap(); + .await; } #[tokio::test] @@ -632,8 +639,7 @@ mod tests { &torrent_tracker, sample_issue_time(), ) - .await - .unwrap(); + .await; } } @@ -726,8 +732,8 @@ mod tests { use crate::servers::udp::connection_cookie::make; use crate::servers::udp::handlers::tests::announce_request::AnnounceRequestBuilder; use crate::servers::udp::handlers::tests::{ - make_remote_fingerprint, public_tracker, sample_cookie_valid_range, sample_ipv4_socket_address, - sample_issue_time, tracker_configuration, TorrentPeerBuilder, + gen_remote_fingerprint, public_tracker, sample_cookie_valid_range, sample_ipv4_socket_address, sample_issue_time, + tracker_configuration, TorrentPeerBuilder, }; use crate::servers::udp::handlers::{handle_announce, AnnounceResponseFixedData}; @@ -743,7 +749,7 @@ mod tests { let remote_addr = SocketAddr::new(IpAddr::V4(client_ip), client_port); let request = AnnounceRequestBuilder::default() - .with_connection_id(make(make_remote_fingerprint(&remote_addr), sample_issue_time()).unwrap()) + .with_connection_id(make(gen_remote_fingerprint(&remote_addr), sample_issue_time()).unwrap()) .with_info_hash(info_hash) .with_peer_id(peer_id) .with_ip_address(client_ip) @@ -769,7 +775,7 @@ mod tests { let remote_addr = SocketAddr::new(IpAddr::V4(Ipv4Addr::new(126, 0, 0, 1)), 8080); let request = AnnounceRequestBuilder::default() - .with_connection_id(make(make_remote_fingerprint(&remote_addr), sample_issue_time()).unwrap()) + .with_connection_id(make(gen_remote_fingerprint(&remote_addr), sample_issue_time()).unwrap()) .into(); let response = handle_announce(remote_addr, &request, &public_tracker(), sample_cookie_valid_range()) @@ -810,7 +816,7 @@ mod tests { let remote_addr = SocketAddr::new(IpAddr::V4(remote_client_ip), remote_client_port); let request = AnnounceRequestBuilder::default() - .with_connection_id(make(make_remote_fingerprint(&remote_addr), sample_issue_time()).unwrap()) + .with_connection_id(make(gen_remote_fingerprint(&remote_addr), sample_issue_time()).unwrap()) .with_info_hash(info_hash) .with_peer_id(peer_id) .with_ip_address(peer_address) @@ -845,7 +851,7 @@ mod tests { async fn announce_a_new_peer_using_ipv4(tracker: Arc) -> Response { let remote_addr = SocketAddr::new(IpAddr::V4(Ipv4Addr::new(126, 0, 0, 1)), 8080); let request = AnnounceRequestBuilder::default() - .with_connection_id(make(make_remote_fingerprint(&remote_addr), sample_issue_time()).unwrap()) + .with_connection_id(make(gen_remote_fingerprint(&remote_addr), sample_issue_time()).unwrap()) .into(); handle_announce(remote_addr, &request, &tracker, sample_cookie_valid_range()) @@ -909,7 +915,7 @@ mod tests { use crate::servers::udp::handlers::handle_announce; use crate::servers::udp::handlers::tests::announce_request::AnnounceRequestBuilder; use crate::servers::udp::handlers::tests::{ - make_remote_fingerprint, public_tracker, sample_cookie_valid_range, sample_issue_time, TorrentPeerBuilder, + gen_remote_fingerprint, public_tracker, sample_cookie_valid_range, sample_issue_time, TorrentPeerBuilder, }; #[tokio::test] @@ -924,7 +930,7 @@ mod tests { let remote_addr = SocketAddr::new(IpAddr::V4(client_ip), client_port); let request = AnnounceRequestBuilder::default() - .with_connection_id(make(make_remote_fingerprint(&remote_addr), sample_issue_time()).unwrap()) + .with_connection_id(make(gen_remote_fingerprint(&remote_addr), sample_issue_time()).unwrap()) .with_info_hash(info_hash) .with_peer_id(peer_id) .with_ip_address(client_ip) @@ -965,7 +971,7 @@ mod tests { use crate::servers::udp::connection_cookie::make; use crate::servers::udp::handlers::tests::announce_request::AnnounceRequestBuilder; use crate::servers::udp::handlers::tests::{ - make_remote_fingerprint, public_tracker, sample_cookie_valid_range, sample_ipv6_remote_addr, sample_issue_time, + gen_remote_fingerprint, public_tracker, sample_cookie_valid_range, sample_ipv6_remote_addr, sample_issue_time, tracker_configuration, TorrentPeerBuilder, }; use crate::servers::udp::handlers::{handle_announce, AnnounceResponseFixedData}; @@ -983,7 +989,7 @@ mod tests { let remote_addr = SocketAddr::new(IpAddr::V6(client_ip_v6), client_port); let request = AnnounceRequestBuilder::default() - .with_connection_id(make(make_remote_fingerprint(&remote_addr), sample_issue_time()).unwrap()) + .with_connection_id(make(gen_remote_fingerprint(&remote_addr), sample_issue_time()).unwrap()) .with_info_hash(info_hash) .with_peer_id(peer_id) .with_ip_address(client_ip_v4) @@ -1012,7 +1018,7 @@ mod tests { let remote_addr = SocketAddr::new(IpAddr::V6(client_ip_v6), 8080); let request = AnnounceRequestBuilder::default() - .with_connection_id(make(make_remote_fingerprint(&remote_addr), sample_issue_time()).unwrap()) + .with_connection_id(make(gen_remote_fingerprint(&remote_addr), sample_issue_time()).unwrap()) .into(); let response = handle_announce(remote_addr, &request, &public_tracker(), sample_cookie_valid_range()) @@ -1053,7 +1059,7 @@ mod tests { let remote_addr = SocketAddr::new(IpAddr::V6(remote_client_ip), remote_client_port); let request = AnnounceRequestBuilder::default() - .with_connection_id(make(make_remote_fingerprint(&remote_addr), sample_issue_time()).unwrap()) + .with_connection_id(make(gen_remote_fingerprint(&remote_addr), sample_issue_time()).unwrap()) .with_info_hash(info_hash) .with_peer_id(peer_id) .with_ip_address(peer_address) @@ -1091,7 +1097,7 @@ mod tests { let client_port = 8080; let remote_addr = SocketAddr::new(IpAddr::V6(client_ip_v6), client_port); let request = AnnounceRequestBuilder::default() - .with_connection_id(make(make_remote_fingerprint(&remote_addr), sample_issue_time()).unwrap()) + .with_connection_id(make(gen_remote_fingerprint(&remote_addr), sample_issue_time()).unwrap()) .into(); handle_announce(remote_addr, &request, &tracker, sample_cookie_valid_range()) @@ -1138,7 +1144,7 @@ mod tests { let remote_addr = sample_ipv6_remote_addr(); let announce_request = AnnounceRequestBuilder::default() - .with_connection_id(make(make_remote_fingerprint(&remote_addr), sample_issue_time()).unwrap()) + .with_connection_id(make(gen_remote_fingerprint(&remote_addr), sample_issue_time()).unwrap()) .into(); handle_announce(remote_addr, &announce_request, &tracker, sample_cookie_valid_range()) @@ -1158,7 +1164,7 @@ mod tests { use crate::servers::udp::handlers::handle_announce; use crate::servers::udp::handlers::tests::announce_request::AnnounceRequestBuilder; use crate::servers::udp::handlers::tests::{ - make_remote_fingerprint, sample_cookie_valid_range, sample_issue_time, TrackerConfigurationBuilder, + gen_remote_fingerprint, sample_cookie_valid_range, sample_issue_time, TrackerConfigurationBuilder, }; #[tokio::test] @@ -1181,7 +1187,7 @@ mod tests { let remote_addr = SocketAddr::new(IpAddr::V6(client_ip_v6), client_port); let request = AnnounceRequestBuilder::default() - .with_connection_id(make(make_remote_fingerprint(&remote_addr), sample_issue_time()).unwrap()) + .with_connection_id(make(gen_remote_fingerprint(&remote_addr), sample_issue_time()).unwrap()) .with_info_hash(info_hash) .with_peer_id(peer_id) .with_ip_address(client_ip_v4) @@ -1218,11 +1224,13 @@ mod tests { TransactionId, }; - use super::{make_remote_fingerprint, TorrentPeerBuilder}; + use super::{gen_remote_fingerprint, TorrentPeerBuilder}; use crate::core::{self}; use crate::servers::udp::connection_cookie::make; use crate::servers::udp::handlers::handle_scrape; - use crate::servers::udp::handlers::tests::{public_tracker, sample_ipv4_remote_addr, sample_issue_time}; + use crate::servers::udp::handlers::tests::{ + public_tracker, sample_cookie_valid_range, sample_ipv4_remote_addr, sample_issue_time, + }; fn zeroed_torrent_statistics() -> TorrentScrapeStatistics { TorrentScrapeStatistics { @@ -1240,12 +1248,14 @@ mod tests { let info_hashes = vec![info_hash]; let request = ScrapeRequest { - connection_id: make(make_remote_fingerprint(&remote_addr), sample_issue_time()).unwrap(), + connection_id: make(gen_remote_fingerprint(&remote_addr), sample_issue_time()).unwrap(), transaction_id: TransactionId(0i32.into()), info_hashes, }; - let response = handle_scrape(remote_addr, &request, &public_tracker()).await.unwrap(); + let response = handle_scrape(remote_addr, &request, &public_tracker(), sample_cookie_valid_range()) + .await + .unwrap(); let expected_torrent_stats = vec![zeroed_torrent_statistics()]; @@ -1274,7 +1284,7 @@ mod tests { let info_hashes = vec![*info_hash]; ScrapeRequest { - connection_id: make(make_remote_fingerprint(remote_addr), sample_issue_time()).unwrap(), + connection_id: make(gen_remote_fingerprint(remote_addr), sample_issue_time()).unwrap(), transaction_id: TransactionId::new(0i32), info_hashes, } @@ -1288,7 +1298,9 @@ mod tests { let request = build_scrape_request(&remote_addr, &info_hash); - handle_scrape(remote_addr, &request, &tracker).await.unwrap() + handle_scrape(remote_addr, &request, &tracker, sample_cookie_valid_range()) + .await + .unwrap() } fn match_scrape_response(response: Response) -> Option { @@ -1320,45 +1332,6 @@ mod tests { } } - mod with_a_private_tracker { - - use aquatic_udp_protocol::InfoHash; - - use crate::servers::udp::handlers::handle_scrape; - use crate::servers::udp::handlers::tests::scrape_request::{ - add_a_sample_seeder_and_scrape, build_scrape_request, match_scrape_response, zeroed_torrent_statistics, - }; - use crate::servers::udp::handlers::tests::{private_tracker, sample_ipv4_remote_addr}; - - #[tokio::test] - async fn should_return_zeroed_statistics_when_the_tracker_does_not_have_the_requested_torrent() { - let tracker = private_tracker(); - - let remote_addr = sample_ipv4_remote_addr(); - let non_existing_info_hash = InfoHash([0u8; 20]); - - let request = build_scrape_request(&remote_addr, &non_existing_info_hash); - - let torrent_stats = match_scrape_response(handle_scrape(remote_addr, &request, &tracker).await.unwrap()).unwrap(); - - let expected_torrent_stats = vec![zeroed_torrent_statistics()]; - - assert_eq!(torrent_stats.torrent_stats, expected_torrent_stats); - } - - #[tokio::test] - async fn should_return_zeroed_statistics_when_the_tracker_has_the_requested_torrent_because_authenticated_requests_are_not_supported_in_udp_tracker( - ) { - let tracker = private_tracker(); - - let torrent_stats = match_scrape_response(add_a_sample_seeder_and_scrape(tracker.clone()).await).unwrap(); - - let expected_torrent_stats = vec![zeroed_torrent_statistics()]; - - assert_eq!(torrent_stats.torrent_stats, expected_torrent_stats); - } - } - mod with_a_whitelisted_tracker { use aquatic_udp_protocol::{InfoHash, NumberOfDownloads, NumberOfPeers, TorrentScrapeStatistics}; @@ -1366,7 +1339,7 @@ mod tests { use crate::servers::udp::handlers::tests::scrape_request::{ add_a_seeder, build_scrape_request, match_scrape_response, zeroed_torrent_statistics, }; - use crate::servers::udp::handlers::tests::{sample_ipv4_remote_addr, whitelisted_tracker}; + use crate::servers::udp::handlers::tests::{sample_cookie_valid_range, sample_ipv4_remote_addr, whitelisted_tracker}; #[tokio::test] async fn should_return_the_torrent_statistics_when_the_requested_torrent_is_whitelisted() { @@ -1381,7 +1354,12 @@ mod tests { let request = build_scrape_request(&remote_addr, &info_hash); - let torrent_stats = match_scrape_response(handle_scrape(remote_addr, &request, &tracker).await.unwrap()).unwrap(); + let torrent_stats = match_scrape_response( + handle_scrape(remote_addr, &request, &tracker, sample_cookie_valid_range()) + .await + .unwrap(), + ) + .unwrap(); let expected_torrent_stats = vec![TorrentScrapeStatistics { seeders: NumberOfPeers(1.into()), @@ -1403,7 +1381,12 @@ mod tests { let request = build_scrape_request(&remote_addr, &info_hash); - let torrent_stats = match_scrape_response(handle_scrape(remote_addr, &request, &tracker).await.unwrap()).unwrap(); + let torrent_stats = match_scrape_response( + handle_scrape(remote_addr, &request, &tracker, sample_cookie_valid_range()) + .await + .unwrap(), + ) + .unwrap(); let expected_torrent_stats = vec![zeroed_torrent_statistics()]; @@ -1416,7 +1399,7 @@ mod tests { let info_hashes = vec![info_hash]; ScrapeRequest { - connection_id: make(make_remote_fingerprint(remote_addr), sample_issue_time()).unwrap(), + connection_id: make(gen_remote_fingerprint(remote_addr), sample_issue_time()).unwrap(), transaction_id: TransactionId(0i32.into()), info_hashes, } @@ -1431,7 +1414,9 @@ mod tests { use super::sample_scrape_request; use crate::core::{self, statistics}; use crate::servers::udp::handlers::handle_scrape; - use crate::servers::udp::handlers::tests::{sample_ipv4_remote_addr, tracker_configuration}; + use crate::servers::udp::handlers::tests::{ + sample_cookie_valid_range, sample_ipv4_remote_addr, tracker_configuration, + }; #[tokio::test] async fn should_send_the_upd4_scrape_event() { @@ -1453,9 +1438,14 @@ mod tests { .unwrap(), ); - handle_scrape(remote_addr, &sample_scrape_request(&remote_addr), &tracker) - .await - .unwrap(); + handle_scrape( + remote_addr, + &sample_scrape_request(&remote_addr), + &tracker, + sample_cookie_valid_range(), + ) + .await + .unwrap(); } } @@ -1468,7 +1458,9 @@ mod tests { use super::sample_scrape_request; use crate::core::{self, statistics}; use crate::servers::udp::handlers::handle_scrape; - use crate::servers::udp::handlers::tests::{sample_ipv6_remote_addr, tracker_configuration}; + use crate::servers::udp::handlers::tests::{ + sample_cookie_valid_range, sample_ipv6_remote_addr, tracker_configuration, + }; #[tokio::test] async fn should_send_the_upd6_scrape_event() { @@ -1490,9 +1482,14 @@ mod tests { .unwrap(), ); - handle_scrape(remote_addr, &sample_scrape_request(&remote_addr), &tracker) - .await - .unwrap(); + handle_scrape( + remote_addr, + &sample_scrape_request(&remote_addr), + &tracker, + sample_cookie_valid_range(), + ) + .await + .unwrap(); } } } diff --git a/src/servers/udp/logging.rs b/src/servers/udp/logging.rs deleted file mode 100644 index a61668e83..000000000 --- a/src/servers/udp/logging.rs +++ /dev/null @@ -1,87 +0,0 @@ -//! Logging for UDP Tracker requests and responses. - -use std::net::SocketAddr; -use std::time::Duration; - -use aquatic_udp_protocol::{Request, Response, TransactionId}; -use bittorrent_primitives::info_hash::InfoHash; - -use super::handlers::RequestId; -use crate::servers::udp::UDP_TRACKER_LOG_TARGET; - -pub fn log_request(request: &Request, request_id: &RequestId, server_socket_addr: &SocketAddr) { - let action = map_action_name(request); - - match &request { - Request::Connect(connect_request) => { - let transaction_id = connect_request.transaction_id; - let transaction_id_str = transaction_id.0.to_string(); - - tracing::span!( - target: UDP_TRACKER_LOG_TARGET, - tracing::Level::INFO, "request", server_socket_addr = %server_socket_addr, action = %action, transaction_id = %transaction_id_str, request_id = %request_id); - } - Request::Announce(announce_request) => { - let transaction_id = announce_request.transaction_id; - let transaction_id_str = transaction_id.0.to_string(); - let connection_id_str = announce_request.connection_id.0.to_string(); - let info_hash_str = InfoHash::from_bytes(&announce_request.info_hash.0).to_hex_string(); - - tracing::span!( - target: UDP_TRACKER_LOG_TARGET, - tracing::Level::INFO, "request", server_socket_addr = %server_socket_addr, action = %action, transaction_id = %transaction_id_str, request_id = %request_id, connection_id = %connection_id_str, info_hash = %info_hash_str); - } - Request::Scrape(scrape_request) => { - let transaction_id = scrape_request.transaction_id; - let transaction_id_str = transaction_id.0.to_string(); - let connection_id_str = scrape_request.connection_id.0.to_string(); - - tracing::span!( - target: UDP_TRACKER_LOG_TARGET, - tracing::Level::INFO, - "request", - server_socket_addr = %server_socket_addr, - action = %action, - transaction_id = %transaction_id_str, - request_id = %request_id, - connection_id = %connection_id_str); - } - }; -} - -fn map_action_name(udp_request: &Request) -> String { - match udp_request { - Request::Connect(_connect_request) => "CONNECT".to_owned(), - Request::Announce(_announce_request) => "ANNOUNCE".to_owned(), - Request::Scrape(_scrape_request) => "SCRAPE".to_owned(), - } -} - -pub fn log_response( - _response: &Response, - transaction_id: &TransactionId, - request_id: &RequestId, - server_socket_addr: &SocketAddr, - latency: Duration, -) { - tracing::span!( - target: UDP_TRACKER_LOG_TARGET, - tracing::Level::INFO, - "response", - server_socket_addr = %server_socket_addr, - transaction_id = %transaction_id.0.to_string(), - request_id = %request_id, - latency_ms = %latency.as_millis()); -} - -pub fn log_bad_request(request_id: &RequestId) { - tracing::span!( - target: UDP_TRACKER_LOG_TARGET, - tracing::Level::INFO, "bad request", request_id = %request_id); -} - -pub fn log_error_response(request_id: &RequestId) { - tracing::span!( - target: UDP_TRACKER_LOG_TARGET, - tracing::Level::INFO, "response", request_id = %request_id); -} diff --git a/src/servers/udp/mod.rs b/src/servers/udp/mod.rs index d41bc8b3f..9b4d90c89 100644 --- a/src/servers/udp/mod.rs +++ b/src/servers/udp/mod.rs @@ -641,7 +641,6 @@ use std::net::SocketAddr; pub mod connection_cookie; pub mod error; pub mod handlers; -pub mod logging; pub mod peer_builder; pub mod server; diff --git a/src/servers/udp/server/launcher.rs b/src/servers/udp/server/launcher.rs index 348446876..c8bac8098 100644 --- a/src/servers/udp/server/launcher.rs +++ b/src/servers/udp/server/launcher.rs @@ -30,7 +30,9 @@ impl Launcher { /// # Panics /// /// It panics if unable to bind to udp socket, and get the address from the udp socket. - /// It also panics if unable to send address of socket. + /// It panics if unable to send address of socket. + /// It panics if the udp server is loaded when the tracker is private. + /// #[instrument(skip(tracker, bind_to, tx_start, rx_halt))] pub async fn run_with_graceful_shutdown( tracker: Arc, @@ -41,6 +43,11 @@ impl Launcher { ) { tracing::info!(target: UDP_TRACKER_LOG_TARGET, "Starting on: {bind_to}"); + if tracker.requires_authentication() { + tracing::error!("udp services cannot be used for private trackers"); + panic!("it should not use udp if using authentication"); + } + let socket = tokio::time::timeout(Duration::from_millis(5000), BoundSocket::new(bind_to)) .await .expect("it should bind to the socket within five seconds"); diff --git a/tests/servers/api/v1/contract/context/stats.rs b/tests/servers/api/v1/contract/context/stats.rs index 2c8e8d6a5..463dc563e 100644 --- a/tests/servers/api/v1/contract/context/stats.rs +++ b/tests/servers/api/v1/contract/context/stats.rs @@ -43,9 +43,11 @@ async fn should_allow_getting_tracker_statistics() { udp4_connections_handled: 0, udp4_announces_handled: 0, udp4_scrapes_handled: 0, + udp4_errors_handled: 0, udp6_connections_handled: 0, udp6_announces_handled: 0, udp6_scrapes_handled: 0, + udp6_errors_handled: 0, }, ) .await; diff --git a/tests/servers/udp/asserts.rs b/tests/servers/udp/asserts.rs index bf8fb6728..37c848e06 100644 --- a/tests/servers/udp/asserts.rs +++ b/tests/servers/udp/asserts.rs @@ -1,9 +1,9 @@ use aquatic_udp_protocol::{Response, TransactionId}; -pub fn is_error_response(response: &Response, error_message: &str) -> bool { +pub fn get_error_response_message(response: &Response) -> Option { match response { - Response::Error(error_response) => error_response.message.starts_with(error_message), - _ => false, + Response::Error(error_response) => Some(error_response.message.to_string()), + _ => None, } } diff --git a/tests/servers/udp/contract.rs b/tests/servers/udp/contract.rs index 73f7ce368..b12a8a900 100644 --- a/tests/servers/udp/contract.rs +++ b/tests/servers/udp/contract.rs @@ -13,7 +13,7 @@ use torrust_tracker_test_helpers::configuration; use tracing::level_filters::LevelFilter; use crate::common::logging::{tracing_stderr_init, INIT}; -use crate::servers::udp::asserts::is_error_response; +use crate::servers::udp::asserts::get_error_response_message; use crate::servers::udp::Started; fn empty_udp_request() -> [u8; MAX_PACKET_SIZE] { @@ -64,7 +64,7 @@ async fn should_return_a_bad_request_response_when_the_client_sends_an_empty_req let response = Response::parse_bytes(&response, true).unwrap(); - assert!(is_error_response(&response, "bad request")); + assert_eq!(get_error_response_message(&response).unwrap(), "Protocol identifier missing"); env.stop().await; } From 66a8648416d2e84d57d848939420c6cab69ee080 Mon Sep 17 00:00:00 2001 From: Power2All Date: Wed, 27 Nov 2024 15:07:58 +0000 Subject: [PATCH 030/802] fix: [#325] windows compiling --- Cargo.lock | 124 +---------------------------------------------------- Cargo.toml | 2 +- 2 files changed, 2 insertions(+), 124 deletions(-) diff --git a/Cargo.lock b/Cargo.lock index 5d07ba62f..6ece5ab7d 100644 --- a/Cargo.lock +++ b/Cargo.lock @@ -357,33 +357,6 @@ version = "1.4.0" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "ace50bade8e6234aa140d9a2f552bbee1db4d353f69b8217bc503490fc1a9f26" -[[package]] -name = "aws-lc-rs" -version = "1.10.0" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "cdd82dba44d209fddb11c190e0a94b78651f95299598e472215667417a03ff1d" -dependencies = [ - "aws-lc-sys", - "mirai-annotations", - "paste", - "zeroize", -] - -[[package]] -name = "aws-lc-sys" -version = "0.22.0" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "df7a4168111d7eb622a31b214057b8509c0a7e1794f44c546d742330dc793972" -dependencies = [ - "bindgen 0.69.5", - "cc", - "cmake", - "dunce", - "fs_extra", - "libc", - "paste", -] - [[package]] name = "axum" version = "0.7.7" @@ -555,29 +528,6 @@ version = "0.1.4" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "383d29d513d8764dcdc42ea295d979eb99c3c9f00607b3692cf68a431f7dca72" -[[package]] -name = "bindgen" -version = "0.69.5" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "271383c67ccabffb7381723dea0672a673f292304fcb45c01cc648c7a8d58088" -dependencies = [ - "bitflags", - "cexpr", - "clang-sys", - "itertools 0.12.1", - "lazy_static", - "lazycell", - "log", - "prettyplease", - "proc-macro2", - "quote", - "regex", - "rustc-hash", - "shlex", - "syn 2.0.87", - "which", -] - [[package]] name = "bindgen" version = "0.70.1" @@ -1254,12 +1204,6 @@ version = "0.11.0" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "1435fa1053d8b2fbbe9be7e97eca7f33d37b28409959813daefc1446a14247f1" -[[package]] -name = "dunce" -version = "1.0.5" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "92773504d58c093f6de2459af4af33faa518c13451eb8f2b5698ed3d36e7c813" - [[package]] name = "either" version = "1.13.0" @@ -1481,12 +1425,6 @@ dependencies = [ "syn 2.0.87", ] -[[package]] -name = "fs_extra" -version = "1.3.0" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "42703706b716c37f96a77aea830392ad231f44c9e9a67872fa5548707e11b11c" - [[package]] name = "funty" version = "2.0.0" @@ -1743,15 +1681,6 @@ version = "0.4.1" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "6fe2267d4ed49bc07b63801559be28c718ea06c4738b7a03c94df7386d2cde46" -[[package]] -name = "home" -version = "0.5.9" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "e3d1354bf6b7235cb4a0576c2619fd4ed18183f689b12b006a0ee7329eeff9a5" -dependencies = [ - "windows-sys 0.52.0", -] - [[package]] name = "http" version = "1.1.0" @@ -2117,15 +2046,6 @@ dependencies = [ "either", ] -[[package]] -name = "itertools" -version = "0.12.1" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "ba291022dbbd398a455acf126c1e341954079855bc60dfdda641363bd6922569" -dependencies = [ - "either", -] - [[package]] name = "itertools" version = "0.13.0" @@ -2174,12 +2094,6 @@ version = "1.5.0" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "bbd2bcb4c963f2ddae06a2efc7e9f3591312473c50c6685e1f298068316e66fe" -[[package]] -name = "lazycell" -version = "1.3.0" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "830d08ce1d1d941e6b30645f1a0eb5643013d835ce3779a5fc208261dbe10f55" - [[package]] name = "libc" version = "0.2.162" @@ -2321,12 +2235,6 @@ dependencies = [ "windows-sys 0.52.0", ] -[[package]] -name = "mirai-annotations" -version = "1.12.0" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "c9be0862c1b3f26a88803c4a49de6889c10e608b3ee9344e6ef5b45fb37ad3d1" - [[package]] name = "mockall" version = "0.13.0" @@ -2413,7 +2321,7 @@ checksum = "478b0ff3f7d67b79da2b96f56f334431aef65e15ba4b29dd74a4236e29582bdc" dependencies = [ "base64 0.21.7", "bigdecimal", - "bindgen 0.70.1", + "bindgen", "bitflags", "bitvec", "btoi", @@ -2655,12 +2563,6 @@ dependencies = [ "windows-targets", ] -[[package]] -name = "paste" -version = "1.0.15" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "57c0d7b74b563b49d38dae00a0c37d4d6de9b432382b2892f0574ddcae73fd0a" - [[package]] name = "pear" version = "0.2.9" @@ -2877,16 +2779,6 @@ dependencies = [ "termtree", ] -[[package]] -name = "prettyplease" -version = "0.2.25" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "64d1ec885c64d0457d564db4ec299b2dae3f9c02808b8ad9c3a089c591b18033" -dependencies = [ - "proc-macro2", - "syn 2.0.87", -] - [[package]] name = "proc-macro-crate" version = "3.2.0" @@ -3318,7 +3210,6 @@ version = "0.23.16" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "eee87ff5d9b36712a58574e12e9f0ea80f915a5b0ac518d322b24a465617925e" dependencies = [ - "aws-lc-rs", "once_cell", "rustls-pki-types", "rustls-webpki", @@ -3347,7 +3238,6 @@ version = "0.102.8" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "64ca1bc8749bd4cf37b5ce386cc146580777b4e8572c7b97baf22c83f444bee9" dependencies = [ - "aws-lc-rs", "ring", "rustls-pki-types", "untrusted", @@ -4556,18 +4446,6 @@ dependencies = [ "wasm-bindgen", ] -[[package]] -name = "which" -version = "4.4.2" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "87ba24419a2078cd2b0f2ede2691b6c66d8e47836da3b6db8265ebad47afbfc7" -dependencies = [ - "either", - "home", - "once_cell", - "rustix", -] - [[package]] name = "winapi" version = "0.3.9" diff --git a/Cargo.toml b/Cargo.toml index 35b1ac9a7..0a40f4917 100644 --- a/Cargo.toml +++ b/Cargo.toml @@ -35,7 +35,7 @@ aquatic_udp_protocol = "0" axum = { version = "0", features = ["macros"] } axum-client-ip = "0" axum-extra = { version = "0", features = ["query"] } -axum-server = { version = "0", features = ["tls-rustls"] } +axum-server = { version = "0", features = ["tls-rustls-no-provider"] } bittorrent-primitives = "0.1.0" bittorrent-tracker-client = { version = "3.0.0-develop", path = "packages/tracker-client" } blowfish = "0" From 38baaea557fc4e045ab9eac7af70fa07543e6ba9 Mon Sep 17 00:00:00 2001 From: Jose Celano Date: Wed, 27 Nov 2024 15:59:35 +0000 Subject: [PATCH 031/802] ci: [#1099] Add a new job to the testing workflow to test compilation in different OSs --- .github/workflows/testing.yaml | 21 +++++++++++++++++++++ 1 file changed, 21 insertions(+) diff --git a/.github/workflows/testing.yaml b/.github/workflows/testing.yaml index 124b13b5a..74dc254ef 100644 --- a/.github/workflows/testing.yaml +++ b/.github/workflows/testing.yaml @@ -85,6 +85,27 @@ jobs: name: Check Unused Dependencies run: cargo machete + build: + name: Build on ${{ matrix.os }} (${{ matrix.toolchain }}) + runs-on: ${{ matrix.os }} + + strategy: + matrix: + os: [ubuntu-latest, macos-latest, windows-latest] + toolchain: [nightly, stable] + + steps: + - name: Checkout code + uses: actions/checkout@v4 + + - id: setup + name: Setup Toolchain + uses: dtolnay/rust-toolchain@stable + with: + toolchain: ${{ matrix.toolchain }} + + - name: Build project + run: cargo build --verbose unit: name: Units From c3ded10ec1d2716b78ac619ee36798f54e15c03c Mon Sep 17 00:00:00 2001 From: Jose Celano Date: Fri, 29 Nov 2024 15:49:09 +0000 Subject: [PATCH 032/802] chore(deps): udpate dependencies --- Cargo.lock | 250 ++++++++++++++++++++++++++++------------------------- 1 file changed, 134 insertions(+), 116 deletions(-) diff --git a/Cargo.lock b/Cargo.lock index 6ece5ab7d..fe8d93ff8 100644 --- a/Cargo.lock +++ b/Cargo.lock @@ -219,9 +219,9 @@ dependencies = [ [[package]] name = "async-compression" -version = "0.4.17" +version = "0.4.18" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "0cb8f1d480b0ea3783ab015936d2a55c87e219676f0c0b7dec61494043f21857" +checksum = "df895a515f70646414f4b45c0b79082783b80552b373a68283012928df56f522" dependencies = [ "brotli", "flate2", @@ -333,7 +333,7 @@ checksum = "721cae7de5c34fbb2acd27e21e6d2cf7b886dce0c27388d46c4e6c47ea4318dd" dependencies = [ "proc-macro2", "quote", - "syn 2.0.87", + "syn 2.0.89", ] [[package]] @@ -359,9 +359,9 @@ checksum = "ace50bade8e6234aa140d9a2f552bbee1db4d353f69b8217bc503490fc1a9f26" [[package]] name = "axum" -version = "0.7.7" +version = "0.7.9" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "504e3947307ac8326a5437504c517c4b56716c9d98fac0028c2acc7ca47d70ae" +checksum = "edca88bc138befd0323b20752846e6587272d3b03b0343c8ea28a6f819e6e71f" dependencies = [ "async-trait", "axum-core", @@ -384,7 +384,7 @@ dependencies = [ "serde_json", "serde_path_to_error", "serde_urlencoded", - "sync_wrapper 1.0.1", + "sync_wrapper 1.0.2", "tokio", "tower 0.5.1", "tower-layer", @@ -418,7 +418,7 @@ dependencies = [ "mime", "pin-project-lite", "rustversion", - "sync_wrapper 1.0.1", + "sync_wrapper 1.0.2", "tower-layer", "tower-service", "tracing", @@ -426,25 +426,26 @@ dependencies = [ [[package]] name = "axum-extra" -version = "0.9.4" +version = "0.9.6" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "73c3220b188aea709cf1b6c5f9b01c3bd936bb08bd2b5184a12b35ac8131b1f9" +checksum = "c794b30c904f0a1c2fb7740f7df7f7972dfaa14ef6f57cb6178dc63e5dca2f04" dependencies = [ "axum", "axum-core", "bytes", + "fastrand", "futures-util", "http", "http-body", "http-body-util", "mime", + "multer", "pin-project-lite", "serde", "serde_html_form", "tower 0.5.1", "tower-layer", "tower-service", - "tracing", ] [[package]] @@ -455,7 +456,7 @@ checksum = "57d123550fa8d071b7255cb0cc04dc302baa6c8c4a79f55701552684d8399bce" dependencies = [ "proc-macro2", "quote", - "syn 2.0.87", + "syn 2.0.89", ] [[package]] @@ -543,7 +544,7 @@ dependencies = [ "regex", "rustc-hash", "shlex", - "syn 2.0.87", + "syn 2.0.89", ] [[package]] @@ -653,7 +654,7 @@ dependencies = [ "proc-macro-crate", "proc-macro2", "quote", - "syn 2.0.87", + "syn 2.0.89", ] [[package]] @@ -722,9 +723,9 @@ dependencies = [ [[package]] name = "bytemuck" -version = "1.19.0" +version = "1.20.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "8334215b81e418a0a7bdb8ef0849474f40bb10c8b71f1c4ed315cff49f32494d" +checksum = "8b37c88a63ffd85d15b406896cc343916d7cf57838a847b3a6f2ca5d39a5695a" [[package]] name = "byteorder" @@ -734,9 +735,9 @@ checksum = "1fd0f2584146f6f2ef48085050886acf353beff7305ebd1ae69500e27c67f64b" [[package]] name = "bytes" -version = "1.8.0" +version = "1.9.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "9ac0150caa2ae65ca5bd83f25c7de183dea78d4d366469f148435e2acfbad0da" +checksum = "325918d6fe32f23b19878fe4b34794ae41fc19ddbe53b10571a4874d44ffd39b" [[package]] name = "camino" @@ -764,9 +765,9 @@ dependencies = [ [[package]] name = "cc" -version = "1.2.1" +version = "1.2.2" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "fd9de9f2205d5ef3fd67e685b0df337994ddd4495e2a28d185500d0e1edfea47" +checksum = "f34d93e62b03caf570cccc334cbc6c2fceca82f39211051345108adcba3eebdc" dependencies = [ "jobserver", "libc", @@ -886,7 +887,7 @@ dependencies = [ "heck", "proc-macro2", "quote", - "syn 2.0.87", + "syn 2.0.89", ] [[package]] @@ -897,9 +898,9 @@ checksum = "afb84c814227b90d6895e01398aee0d8033c00e7466aca416fb6a8e0eb19d8a7" [[package]] name = "cmake" -version = "0.1.51" +version = "0.1.52" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "fb1e43aa7fd152b1f968787f7dbcdeb306d1867ff373c69955211876c053f91a" +checksum = "c682c223677e0e5b6b7f63a64b9351844c3f1b1678a68b7ee617e30fb082620e" dependencies = [ "cc", ] @@ -950,9 +951,9 @@ checksum = "773648b94d0e5d620f64f280777445740e61fe701025087ec8b57f45c791888b" [[package]] name = "cpufeatures" -version = "0.2.15" +version = "0.2.16" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "0ca741a962e1b0bff6d724a1a0958b686406e853bb14061f218562e1896f95e6" +checksum = "16b80225097f2e5ae4e7179dd2266824648f3e2f49d9134d584b76389d31c4c3" dependencies = [ "libc", ] @@ -1107,7 +1108,7 @@ dependencies = [ "proc-macro2", "quote", "strsim", - "syn 2.0.87", + "syn 2.0.89", ] [[package]] @@ -1118,7 +1119,7 @@ checksum = "d336a2a514f6ccccaa3e09b02d41d35330c07ddf03a62165fcec10bb561c7806" dependencies = [ "darling_core", "quote", - "syn 2.0.87", + "syn 2.0.89", ] [[package]] @@ -1162,7 +1163,7 @@ checksum = "cb7330aeadfbe296029522e6c40f315320aba36fc43a5b3632f3795348f3bd22" dependencies = [ "proc-macro2", "quote", - "syn 2.0.87", + "syn 2.0.89", "unicode-xid", ] @@ -1174,7 +1175,7 @@ checksum = "65f152f4b8559c4da5d574bafc7af85454d706b4c5fe8b530d508cacbb6807ea" dependencies = [ "proc-macro2", "quote", - "syn 2.0.87", + "syn 2.0.89", ] [[package]] @@ -1195,7 +1196,7 @@ checksum = "97369cbbc041bc366949bc74d34658d6cda5621039731c6310521892a3a20ae0" dependencies = [ "proc-macro2", "quote", - "syn 2.0.87", + "syn 2.0.89", ] [[package]] @@ -1237,12 +1238,12 @@ checksum = "5443807d6dff69373d433ab9ef5378ad8df50ca6298caf15de6e52e24aaf54d5" [[package]] name = "errno" -version = "0.3.9" +version = "0.3.10" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "534c5cf6194dfab3db3242765c03bbe257cf92f22b38f6bc0c58d59108a820ba" +checksum = "33d852cb9b869c2a9b3df2f71a3074817f01e1844f839a144f5fcef059a4eb5d" dependencies = [ "libc", - "windows-sys 0.52.0", + "windows-sys 0.59.0", ] [[package]] @@ -1398,7 +1399,7 @@ checksum = "e99b8b3c28ae0e84b604c75f721c21dc77afb3706076af5e8216d15fd1deaae3" dependencies = [ "frunk_proc_macro_helpers", "quote", - "syn 2.0.87", + "syn 2.0.89", ] [[package]] @@ -1410,7 +1411,7 @@ dependencies = [ "frunk_core", "proc-macro2", "quote", - "syn 2.0.87", + "syn 2.0.89", ] [[package]] @@ -1422,7 +1423,7 @@ dependencies = [ "frunk_core", "frunk_proc_macro_helpers", "quote", - "syn 2.0.87", + "syn 2.0.89", ] [[package]] @@ -1500,7 +1501,7 @@ checksum = "162ee34ebcb7c64a8abebc059ce0fee27c2262618d7b60ed8faf72fef13c3650" dependencies = [ "proc-macro2", "quote", - "syn 2.0.87", + "syn 2.0.89", ] [[package]] @@ -1586,9 +1587,9 @@ dependencies = [ [[package]] name = "h2" -version = "0.4.6" +version = "0.4.7" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "524e8ac6999421f49a846c2d4411f337e53497d8ec55d67753beffa43c5d9205" +checksum = "ccae279728d634d083c00f6099cb58f01cc99c145b84b8be2f6c74618d79922e" dependencies = [ "atomic-waker", "bytes", @@ -1633,9 +1634,9 @@ dependencies = [ [[package]] name = "hashbrown" -version = "0.15.1" +version = "0.15.2" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "3a9bfc1af68b1726ea47d3d5109de126281def866b33970e10fbab11b5dafab3" +checksum = "bf151400ff0baff5465007dd2f3e717f3fe502074ca563069ce3a6629d07b289" dependencies = [ "allocator-api2", "equivalent", @@ -1729,9 +1730,9 @@ checksum = "df3b46402a9d5adb4c86a0cf463f42e19994e3ee891101b1841f30a545cb49a9" [[package]] name = "hyper" -version = "1.5.0" +version = "1.5.1" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "bbbff0a806a4728c99295b254c8838933b5b082d75e3cb70c8dab21fdfbcfa9a" +checksum = "97818827ef4f364230e16705d4706e2897df2bb60617d6ca15d598025a3c481f" dependencies = [ "bytes", "futures-channel", @@ -1938,7 +1939,7 @@ checksum = "1ec89e9337638ecdc08744df490b221a7399bf8d164eb52a665454e60e075ad6" dependencies = [ "proc-macro2", "quote", - "syn 2.0.87", + "syn 2.0.89", ] [[package]] @@ -1986,7 +1987,7 @@ source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "707907fe3c25f5424cce2cb7e1cbcafee6bdbe735ca90ef77c29e84591e5b9da" dependencies = [ "equivalent", - "hashbrown 0.15.1", + "hashbrown 0.15.2", "serde", ] @@ -2057,9 +2058,9 @@ dependencies = [ [[package]] name = "itoa" -version = "1.0.11" +version = "1.0.14" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "49f1f14873335454500d59611f1cf4a4b0f786f9ac11f4312a78e4cf2566695b" +checksum = "d75a2a4b1b190afb6f5425f10f6a8f959d2ea0b9c2b1d79553551850539e4674" [[package]] name = "jobserver" @@ -2096,9 +2097,9 @@ checksum = "bbd2bcb4c963f2ddae06a2efc7e9f3591312473c50c6685e1f298068316e66fe" [[package]] name = "libc" -version = "0.2.162" +version = "0.2.167" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "18d287de67fe55fd7e1581fe933d965a5a9477b38e949cfa9f8574ef01506398" +checksum = "09d6582e104315a817dff97f75133544b2e094ee22447d2acf4a74e189ba06fc" [[package]] name = "libloading" @@ -2146,9 +2147,9 @@ checksum = "78b3ae25bc7c8c38cec158d1f2757ee79e9b3740fbc7ccf0e59e4b08d793fa89" [[package]] name = "litemap" -version = "0.7.3" +version = "0.7.4" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "643cb0b8d4fcc284004d5fd0d67ccf61dfffadb7f75e1e71bc420f4688a3a704" +checksum = "4ee93343901ab17bd981295f2cf0026d4ad018c7c31ba84549a4ddbb47a45104" [[package]] name = "local-ip-address" @@ -2187,7 +2188,7 @@ version = "0.12.5" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "234cf4f4a04dc1f57e24b96cc0cd600cf2af460d4161ac5ecdd0af8e1f3b2a38" dependencies = [ - "hashbrown 0.15.1", + "hashbrown 0.15.2", ] [[package]] @@ -2237,9 +2238,9 @@ dependencies = [ [[package]] name = "mockall" -version = "0.13.0" +version = "0.13.1" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "d4c28b3fb6d753d28c20e826cd46ee611fda1cf3cde03a443a974043247c065a" +checksum = "39a6bfcc6c8c7eed5ee98b9c3e33adc726054389233e201c95dab2d41a3839d2" dependencies = [ "cfg-if", "downcast", @@ -2251,14 +2252,31 @@ dependencies = [ [[package]] name = "mockall_derive" -version = "0.13.0" +version = "0.13.1" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "341014e7f530314e9a1fdbc7400b244efea7122662c96bfa248c31da5bfb2020" +checksum = "25ca3004c2efe9011bd4e461bd8256445052b9615405b4f7ea43fc8ca5c20898" dependencies = [ "cfg-if", "proc-macro2", "quote", - "syn 2.0.87", + "syn 2.0.89", +] + +[[package]] +name = "multer" +version = "3.1.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "83e87776546dc87511aa5ee218730c92b666d7264ab6ed41f9d215af9cd5224b" +dependencies = [ + "bytes", + "encoding_rs", + "futures-util", + "http", + "httparse", + "memchr", + "mime", + "spin", + "version_check", ] [[package]] @@ -2308,7 +2326,7 @@ dependencies = [ "proc-macro-error2", "proc-macro2", "quote", - "syn 2.0.87", + "syn 2.0.89", "termcolor", "thiserror 1.0.69", ] @@ -2507,7 +2525,7 @@ checksum = "a948666b637a0f465e8564c73e89d4dde00d72d4d473cc972f390fc3dcee7d9c" dependencies = [ "proc-macro2", "quote", - "syn 2.0.87", + "syn 2.0.89", ] [[package]] @@ -2583,7 +2601,7 @@ dependencies = [ "proc-macro2", "proc-macro2-diagnostics", "quote", - "syn 2.0.87", + "syn 2.0.89", ] [[package]] @@ -2657,7 +2675,7 @@ checksum = "3c0f5fad0874fc7abcd4d750e76917eaebbecaa2c20bde22e1dbeeba8beb758c" dependencies = [ "proc-macro2", "quote", - "syn 2.0.87", + "syn 2.0.89", ] [[package]] @@ -2734,9 +2752,9 @@ dependencies = [ [[package]] name = "portable-atomic" -version = "1.9.0" +version = "1.10.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "cc9c68a3f6da06753e9335d63e27f6b9754dd1920d941135b7ea8224f141adb2" +checksum = "280dc24453071f1b63954171985a0b0d30058d287960968b9b2aca264c8d4ee6" [[package]] name = "powerfmt" @@ -2807,14 +2825,14 @@ dependencies = [ "proc-macro-error-attr2", "proc-macro2", "quote", - "syn 2.0.87", + "syn 2.0.89", ] [[package]] name = "proc-macro2" -version = "1.0.89" +version = "1.0.92" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "f139b0662de085916d1fb67d2b4169d1addddda1919e696f3252b740b629986e" +checksum = "37d3544b3f2748c54e147655edb5025752e2303145b5aefb3c3ea2c78b973bb0" dependencies = [ "unicode-ident", ] @@ -2827,7 +2845,7 @@ checksum = "af066a9c399a26e020ada66a034357a868728e72cd426f3adcd35f80d88d88c8" dependencies = [ "proc-macro2", "quote", - "syn 2.0.87", + "syn 2.0.89", "version_check", "yansi", ] @@ -3044,7 +3062,7 @@ dependencies = [ "serde", "serde_json", "serde_urlencoded", - "sync_wrapper 1.0.1", + "sync_wrapper 1.0.2", "system-configuration", "tokio", "tokio-native-tls", @@ -3136,7 +3154,7 @@ dependencies = [ "regex", "relative-path", "rustc_version", - "syn 2.0.87", + "syn 2.0.89", "unicode-ident", ] @@ -3193,9 +3211,9 @@ dependencies = [ [[package]] name = "rustix" -version = "0.38.40" +version = "0.38.41" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "99e4ea3e1cdc4b559b8e5650f9c8e5998e3e5c1343b4eaf034565f32318d63c0" +checksum = "d7f649912bc1495e167a6edee79151c84b1bad49748cb4f1f1167f459f6224f6" dependencies = [ "bitflags", "errno", @@ -3206,9 +3224,9 @@ dependencies = [ [[package]] name = "rustls" -version = "0.23.16" +version = "0.23.19" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "eee87ff5d9b36712a58574e12e9f0ea80f915a5b0ac518d322b24a465617925e" +checksum = "934b404430bb06b3fae2cba809eb45a1ab1aecd64491213d7c3301b88393f8d1" dependencies = [ "once_cell", "rustls-pki-types", @@ -3272,9 +3290,9 @@ checksum = "ece8e78b2f38ec51c51f5d475df0a7187ba5111b2a28bdc761ee05b075d40a71" [[package]] name = "schannel" -version = "0.1.26" +version = "0.1.27" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "01227be5826fa0690321a2ba6c5cd57a19cf3f6a09e76973b58e61de6ab9d1c1" +checksum = "1f29ebaa345f945cec9fbbc532eb307f0fdad8161f281b6369539c8d84876b3d" dependencies = [ "windows-sys 0.59.0", ] @@ -3365,7 +3383,7 @@ checksum = "ad1e866f866923f252f05c889987993144fb74e722403468a4ebd70c3cd756c0" dependencies = [ "proc-macro2", "quote", - "syn 2.0.87", + "syn 2.0.89", ] [[package]] @@ -3383,9 +3401,9 @@ dependencies = [ [[package]] name = "serde_json" -version = "1.0.132" +version = "1.0.133" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "d726bfaff4b320266d395898905d0eba0345aae23b54aee3a737e260fd46db03" +checksum = "c7fceb2473b9166b2294ef05efcb65a3db80803f0b03ef86a5fc88a2b85ee377" dependencies = [ "indexmap 2.6.0", "itoa", @@ -3412,7 +3430,7 @@ checksum = "6c64451ba24fc7a6a2d60fc75dd9c83c90903b19028d4eff35e88fc1e86564e9" dependencies = [ "proc-macro2", "quote", - "syn 2.0.87", + "syn 2.0.89", ] [[package]] @@ -3463,7 +3481,7 @@ dependencies = [ "darling", "proc-macro2", "quote", - "syn 2.0.87", + "syn 2.0.89", ] [[package]] @@ -3541,9 +3559,9 @@ checksum = "3c5e1a9a646d36c3599cd173a41282daf47c44583ad367b8e6837255952e5c67" [[package]] name = "socket2" -version = "0.5.7" +version = "0.5.8" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "ce305eb0b4296696835b71df73eb912e0f1ffd2556a501fcede6e0c50349191c" +checksum = "c970269d99b64e60ec3bd6ad27270092a5394c4e309314b18ae3fe575695fbe8" dependencies = [ "libc", "windows-sys 0.52.0", @@ -3602,9 +3620,9 @@ dependencies = [ [[package]] name = "syn" -version = "2.0.87" +version = "2.0.89" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "25aa4ce346d03a6dcd68dd8b4010bcb74e54e62c90c573f394c46eae99aba32d" +checksum = "44d46482f1c1c87acd84dea20c1bf5ebff4c757009ed6bf19cfd36fb10e92c4e" dependencies = [ "proc-macro2", "quote", @@ -3619,9 +3637,9 @@ checksum = "2047c6ded9c721764247e62cd3b03c09ffc529b2ba5b10ec482ae507a4a70160" [[package]] name = "sync_wrapper" -version = "1.0.1" +version = "1.0.2" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "a7065abeca94b6a8a577f9bd45aa0867a2238b74e8eb67cf10d492bc39351394" +checksum = "0bf256ce5efdfa370213c1dabab5935a12e49f2c58d15e9eac2870d3b4f27263" dependencies = [ "futures-core", ] @@ -3634,7 +3652,7 @@ checksum = "c8af7666ab7b6390ab78131fb5b0fce11d6b7a6951602017c35fa82800708971" dependencies = [ "proc-macro2", "quote", - "syn 2.0.87", + "syn 2.0.89", ] [[package]] @@ -3735,7 +3753,7 @@ checksum = "4fee6c4efc90059e10f81e6d42c60a18f76588c3d74cb83a0b242a2b6c7504c1" dependencies = [ "proc-macro2", "quote", - "syn 2.0.87", + "syn 2.0.89", ] [[package]] @@ -3746,7 +3764,7 @@ checksum = "f077553d607adc1caf65430528a576c757a71ed73944b66ebb58ef2bbd243568" dependencies = [ "proc-macro2", "quote", - "syn 2.0.87", + "syn 2.0.89", ] [[package]] @@ -3850,7 +3868,7 @@ checksum = "693d596312e88961bc67d7f1f97af8a70227d9f90c31bba5806eec004978d752" dependencies = [ "proc-macro2", "quote", - "syn 2.0.87", + "syn 2.0.89", ] [[package]] @@ -4128,9 +4146,9 @@ dependencies = [ [[package]] name = "tower-http" -version = "0.6.1" +version = "0.6.2" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "8437150ab6bbc8c5f0f519e3d5ed4aa883a83dd4cdd3d1b21f9482936046cb97" +checksum = "403fa3b783d4b626a8ad51d766ab03cb6d2dbfc46b1c5d4448395e6628dc9697" dependencies = [ "async-compression", "bitflags", @@ -4161,9 +4179,9 @@ checksum = "8df9b6e13f2d32c91b9bd719c00d1958837bc7dec474d94952798cc8e69eeec3" [[package]] name = "tracing" -version = "0.1.40" +version = "0.1.41" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "c3523ab5a71916ccf420eebdf5521fcef02141234bbc0b8a49f2fdc4544364ef" +checksum = "784e0ac535deb450455cbfa28a6f0df145ea1bb7ae51b821cf5e7927fdcfbdd0" dependencies = [ "log", "pin-project-lite", @@ -4173,20 +4191,20 @@ dependencies = [ [[package]] name = "tracing-attributes" -version = "0.1.27" +version = "0.1.28" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "34704c8d6ebcbc939824180af020566b01a7c01f80641264eba0999f6c2b6be7" +checksum = "395ae124c09f9e6918a2310af6038fba074bcf474ac352496d5910dd59a2226d" dependencies = [ "proc-macro2", "quote", - "syn 2.0.87", + "syn 2.0.89", ] [[package]] name = "tracing-core" -version = "0.1.32" +version = "0.1.33" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "c06d3da6113f116aaee68e4d601191614c9053067f9ab7f6edbcb161237daa54" +checksum = "e672c95779cf947c5311f83787af4fa8fffd12fb27e4993211a84bdfd9610f9c" dependencies = [ "once_cell", "valuable", @@ -4264,9 +4282,9 @@ dependencies = [ [[package]] name = "unicode-ident" -version = "1.0.13" +version = "1.0.14" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "e91b56cd4cadaeb79bbf1a5645f6b4f8dc5bde8834ad5894a8db35fda9efa1fe" +checksum = "adb9e6ca4f869e1180728b7950e35922a7fc6397f7b641499e8f3ef06e50dc83" [[package]] name = "unicode-xid" @@ -4282,9 +4300,9 @@ checksum = "8ecb6da28b8a351d773b68d5825ac39017e680750f980f3a1a85cd8dd28a47c1" [[package]] name = "url" -version = "2.5.3" +version = "2.5.4" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "8d157f1b96d14500ffdc1f10ba712e780825526c03d9a49b4d0324b0d9113ada" +checksum = "32f8b686cadd1473f4bd0117a5d28d36b1ade384ea9b5069a1c40aefed7fda60" dependencies = [ "form_urlencoded", "idna", @@ -4391,7 +4409,7 @@ dependencies = [ "once_cell", "proc-macro2", "quote", - "syn 2.0.87", + "syn 2.0.89", "wasm-bindgen-shared", ] @@ -4425,7 +4443,7 @@ checksum = "26c6ab57572f7a24a4985830b120de1594465e5d500f24afe89e16b4e833ef68" dependencies = [ "proc-macro2", "quote", - "syn 2.0.87", + "syn 2.0.89", "wasm-bindgen-backend", "wasm-bindgen-shared", ] @@ -4636,9 +4654,9 @@ checksum = "cfe53a6657fd280eaa890a3bc59152892ffa3e30101319d168b781ed6529b049" [[package]] name = "yoke" -version = "0.7.4" +version = "0.7.5" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "6c5b1314b079b0930c31e3af543d8ee1757b1951ae1e1565ec704403a7240ca5" +checksum = "120e6aef9aa629e3d4f52dc8cc43a015c7724194c97dfaf45180d2daf2b77f40" dependencies = [ "serde", "stable_deref_trait", @@ -4648,13 +4666,13 @@ dependencies = [ [[package]] name = "yoke-derive" -version = "0.7.4" +version = "0.7.5" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "28cc31741b18cb6f1d5ff12f5b7523e3d6eb0852bbbad19d73905511d9849b95" +checksum = "2380878cad4ac9aac1e2435f3eb4020e8374b5f13c296cb75b4620ff8e229154" dependencies = [ "proc-macro2", "quote", - "syn 2.0.87", + "syn 2.0.89", "synstructure", ] @@ -4676,27 +4694,27 @@ checksum = "fa4f8080344d4671fb4e831a13ad1e68092748387dfc4f55e356242fae12ce3e" dependencies = [ "proc-macro2", "quote", - "syn 2.0.87", + "syn 2.0.89", ] [[package]] name = "zerofrom" -version = "0.1.4" +version = "0.1.5" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "91ec111ce797d0e0784a1116d0ddcdbea84322cd79e5d5ad173daeba4f93ab55" +checksum = "cff3ee08c995dee1859d998dea82f7374f2826091dd9cd47def953cae446cd2e" dependencies = [ "zerofrom-derive", ] [[package]] name = "zerofrom-derive" -version = "0.1.4" +version = "0.1.5" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "0ea7b4a3637ea8669cedf0f1fd5c286a17f3de97b8dd5a70a6c167a1730e63a5" +checksum = "595eed982f7d355beb85837f651fa22e90b3c044842dc7f2c2842c086f295808" dependencies = [ "proc-macro2", "quote", - "syn 2.0.87", + "syn 2.0.89", "synstructure", ] @@ -4725,7 +4743,7 @@ checksum = "6eafa6dfb17584ea3e2bd6e76e0cc15ad7af12b09abdd1ca55961bed9b1063c6" dependencies = [ "proc-macro2", "quote", - "syn 2.0.87", + "syn 2.0.89", ] [[package]] From 555d5b8522eca6e2b097cc4c88436598b39d1646 Mon Sep 17 00:00:00 2001 From: Jose Celano Date: Fri, 29 Nov 2024 16:17:42 +0000 Subject: [PATCH 033/802] fix: [#1097] by extracting duplicate module --- .github/workflows/deployment.yaml | 1 + Cargo.lock | 12 + Cargo.toml | 1 + packages/http-protocol/Cargo.toml | 21 + packages/http-protocol/LICENSE | 661 ++++++++++++++++++ packages/http-protocol/README.md | 11 + packages/http-protocol/src/lib.rs | 2 + .../http-protocol/src}/percent_encoding.rs | 6 +- packages/tracker-client/Cargo.toml | 1 + packages/tracker-client/src/http/mod.rs | 1 - .../tracker-client/src/http/url_encoding.rs | 132 ---- src/servers/http/mod.rs | 1 - src/servers/http/v1/requests/announce.rs | 2 +- src/servers/http/v1/requests/scrape.rs | 2 +- 14 files changed, 715 insertions(+), 139 deletions(-) create mode 100644 packages/http-protocol/Cargo.toml create mode 100644 packages/http-protocol/LICENSE create mode 100644 packages/http-protocol/README.md create mode 100644 packages/http-protocol/src/lib.rs rename {src/servers/http => packages/http-protocol/src}/percent_encoding.rs (94%) delete mode 100644 packages/tracker-client/src/http/url_encoding.rs diff --git a/.github/workflows/deployment.yaml b/.github/workflows/deployment.yaml index 59913d476..1e0f59b43 100644 --- a/.github/workflows/deployment.yaml +++ b/.github/workflows/deployment.yaml @@ -55,6 +55,7 @@ jobs: env: CARGO_REGISTRY_TOKEN: "${{ secrets.TORRUST_UPDATE_CARGO_REGISTRY_TOKEN }}" run: | + cargo publish -p bittorrent-http-protocol cargo publish -p bittorrent-tracker-client cargo publish -p torrust-tracker cargo publish -p torrust-tracker-client diff --git a/Cargo.lock b/Cargo.lock index fe8d93ff8..2931b0f8f 100644 --- a/Cargo.lock +++ b/Cargo.lock @@ -553,6 +553,16 @@ version = "2.6.0" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "b048fb63fd8b5923fc5aa7b340d8e156aec7ec02f0c78fa8a6ddc2613f6f71de" +[[package]] +name = "bittorrent-http-protocol" +version = "3.0.0-develop" +dependencies = [ + "aquatic_udp_protocol", + "bittorrent-primitives", + "percent-encoding", + "torrust-tracker-primitives", +] + [[package]] name = "bittorrent-primitives" version = "0.1.0" @@ -572,6 +582,7 @@ name = "bittorrent-tracker-client" version = "3.0.0-develop" dependencies = [ "aquatic_udp_protocol", + "bittorrent-http-protocol", "bittorrent-primitives", "derive_more", "hyper", @@ -3949,6 +3960,7 @@ dependencies = [ "axum-client-ip", "axum-extra", "axum-server", + "bittorrent-http-protocol", "bittorrent-primitives", "bittorrent-tracker-client", "blowfish", diff --git a/Cargo.toml b/Cargo.toml index 0a40f4917..f512dca92 100644 --- a/Cargo.toml +++ b/Cargo.toml @@ -36,6 +36,7 @@ axum = { version = "0", features = ["macros"] } axum-client-ip = "0" axum-extra = { version = "0", features = ["query"] } axum-server = { version = "0", features = ["tls-rustls-no-provider"] } +bittorrent-http-protocol = { version = "3.0.0-develop", path = "packages/http-protocol" } bittorrent-primitives = "0.1.0" bittorrent-tracker-client = { version = "3.0.0-develop", path = "packages/tracker-client" } blowfish = "0" diff --git a/packages/http-protocol/Cargo.toml b/packages/http-protocol/Cargo.toml new file mode 100644 index 000000000..4f20407b6 --- /dev/null +++ b/packages/http-protocol/Cargo.toml @@ -0,0 +1,21 @@ +[package] +description = "A library with the primitive types and functions for the BitTorrent HTTP tracker protocol." +keywords = ["api", "library", "primitives"] +name = "bittorrent-http-protocol" +readme = "README.md" + +authors.workspace = true +documentation.workspace = true +edition.workspace = true +homepage.workspace = true +license.workspace = true +publish.workspace = true +repository.workspace = true +rust-version.workspace = true +version.workspace = true + +[dependencies] +aquatic_udp_protocol = "0" +bittorrent-primitives = "0.1.0" +percent-encoding = "2" +torrust-tracker-primitives = { version = "3.0.0-develop", path = "../primitives" } diff --git a/packages/http-protocol/LICENSE b/packages/http-protocol/LICENSE new file mode 100644 index 000000000..0ad25db4b --- /dev/null +++ b/packages/http-protocol/LICENSE @@ -0,0 +1,661 @@ + GNU AFFERO GENERAL PUBLIC LICENSE + Version 3, 19 November 2007 + + Copyright (C) 2007 Free Software Foundation, Inc. + Everyone is permitted to copy and distribute verbatim copies + of this license document, but changing it is not allowed. + + Preamble + + The GNU Affero General Public License is a free, copyleft license for +software and other kinds of works, specifically designed to ensure +cooperation with the community in the case of network server software. + + The licenses for most software and other practical works are designed +to take away your freedom to share and change the works. By contrast, +our General Public Licenses are intended to guarantee your freedom to +share and change all versions of a program--to make sure it remains free +software for all its users. + + When we speak of free software, we are referring to freedom, not +price. Our General Public Licenses are designed to make sure that you +have the freedom to distribute copies of free software (and charge for +them if you wish), that you receive source code or can get it if you +want it, that you can change the software or use pieces of it in new +free programs, and that you know you can do these things. + + Developers that use our General Public Licenses protect your rights +with two steps: (1) assert copyright on the software, and (2) offer +you this License which gives you legal permission to copy, distribute +and/or modify the software. + + A secondary benefit of defending all users' freedom is that +improvements made in alternate versions of the program, if they +receive widespread use, become available for other developers to +incorporate. Many developers of free software are heartened and +encouraged by the resulting cooperation. However, in the case of +software used on network servers, this result may fail to come about. +The GNU General Public License permits making a modified version and +letting the public access it on a server without ever releasing its +source code to the public. + + The GNU Affero General Public License is designed specifically to +ensure that, in such cases, the modified source code becomes available +to the community. It requires the operator of a network server to +provide the source code of the modified version running there to the +users of that server. Therefore, public use of a modified version, on +a publicly accessible server, gives the public access to the source +code of the modified version. + + An older license, called the Affero General Public License and +published by Affero, was designed to accomplish similar goals. This is +a different license, not a version of the Affero GPL, but Affero has +released a new version of the Affero GPL which permits relicensing under +this license. + + The precise terms and conditions for copying, distribution and +modification follow. + + TERMS AND CONDITIONS + + 0. Definitions. + + "This License" refers to version 3 of the GNU Affero General Public License. + + "Copyright" also means copyright-like laws that apply to other kinds of +works, such as semiconductor masks. + + "The Program" refers to any copyrightable work licensed under this +License. Each licensee is addressed as "you". "Licensees" and +"recipients" may be individuals or organizations. + + To "modify" a work means to copy from or adapt all or part of the work +in a fashion requiring copyright permission, other than the making of an +exact copy. The resulting work is called a "modified version" of the +earlier work or a work "based on" the earlier work. + + A "covered work" means either the unmodified Program or a work based +on the Program. + + To "propagate" a work means to do anything with it that, without +permission, would make you directly or secondarily liable for +infringement under applicable copyright law, except executing it on a +computer or modifying a private copy. Propagation includes copying, +distribution (with or without modification), making available to the +public, and in some countries other activities as well. + + To "convey" a work means any kind of propagation that enables other +parties to make or receive copies. Mere interaction with a user through +a computer network, with no transfer of a copy, is not conveying. + + An interactive user interface displays "Appropriate Legal Notices" +to the extent that it includes a convenient and prominently visible +feature that (1) displays an appropriate copyright notice, and (2) +tells the user that there is no warranty for the work (except to the +extent that warranties are provided), that licensees may convey the +work under this License, and how to view a copy of this License. If +the interface presents a list of user commands or options, such as a +menu, a prominent item in the list meets this criterion. + + 1. Source Code. + + The "source code" for a work means the preferred form of the work +for making modifications to it. "Object code" means any non-source +form of a work. + + A "Standard Interface" means an interface that either is an official +standard defined by a recognized standards body, or, in the case of +interfaces specified for a particular programming language, one that +is widely used among developers working in that language. + + The "System Libraries" of an executable work include anything, other +than the work as a whole, that (a) is included in the normal form of +packaging a Major Component, but which is not part of that Major +Component, and (b) serves only to enable use of the work with that +Major Component, or to implement a Standard Interface for which an +implementation is available to the public in source code form. A +"Major Component", in this context, means a major essential component +(kernel, window system, and so on) of the specific operating system +(if any) on which the executable work runs, or a compiler used to +produce the work, or an object code interpreter used to run it. + + The "Corresponding Source" for a work in object code form means all +the source code needed to generate, install, and (for an executable +work) run the object code and to modify the work, including scripts to +control those activities. However, it does not include the work's +System Libraries, or general-purpose tools or generally available free +programs which are used unmodified in performing those activities but +which are not part of the work. For example, Corresponding Source +includes interface definition files associated with source files for +the work, and the source code for shared libraries and dynamically +linked subprograms that the work is specifically designed to require, +such as by intimate data communication or control flow between those +subprograms and other parts of the work. + + The Corresponding Source need not include anything that users +can regenerate automatically from other parts of the Corresponding +Source. + + The Corresponding Source for a work in source code form is that +same work. + + 2. Basic Permissions. + + All rights granted under this License are granted for the term of +copyright on the Program, and are irrevocable provided the stated +conditions are met. This License explicitly affirms your unlimited +permission to run the unmodified Program. The output from running a +covered work is covered by this License only if the output, given its +content, constitutes a covered work. This License acknowledges your +rights of fair use or other equivalent, as provided by copyright law. + + You may make, run and propagate covered works that you do not +convey, without conditions so long as your license otherwise remains +in force. You may convey covered works to others for the sole purpose +of having them make modifications exclusively for you, or provide you +with facilities for running those works, provided that you comply with +the terms of this License in conveying all material for which you do +not control copyright. Those thus making or running the covered works +for you must do so exclusively on your behalf, under your direction +and control, on terms that prohibit them from making any copies of +your copyrighted material outside their relationship with you. + + Conveying under any other circumstances is permitted solely under +the conditions stated below. Sublicensing is not allowed; section 10 +makes it unnecessary. + + 3. Protecting Users' Legal Rights From Anti-Circumvention Law. + + No covered work shall be deemed part of an effective technological +measure under any applicable law fulfilling obligations under article +11 of the WIPO copyright treaty adopted on 20 December 1996, or +similar laws prohibiting or restricting circumvention of such +measures. + + When you convey a covered work, you waive any legal power to forbid +circumvention of technological measures to the extent such circumvention +is effected by exercising rights under this License with respect to +the covered work, and you disclaim any intention to limit operation or +modification of the work as a means of enforcing, against the work's +users, your or third parties' legal rights to forbid circumvention of +technological measures. + + 4. Conveying Verbatim Copies. + + You may convey verbatim copies of the Program's source code as you +receive it, in any medium, provided that you conspicuously and +appropriately publish on each copy an appropriate copyright notice; +keep intact all notices stating that this License and any +non-permissive terms added in accord with section 7 apply to the code; +keep intact all notices of the absence of any warranty; and give all +recipients a copy of this License along with the Program. + + You may charge any price or no price for each copy that you convey, +and you may offer support or warranty protection for a fee. + + 5. Conveying Modified Source Versions. + + You may convey a work based on the Program, or the modifications to +produce it from the Program, in the form of source code under the +terms of section 4, provided that you also meet all of these conditions: + + a) The work must carry prominent notices stating that you modified + it, and giving a relevant date. + + b) The work must carry prominent notices stating that it is + released under this License and any conditions added under section + 7. This requirement modifies the requirement in section 4 to + "keep intact all notices". + + c) You must license the entire work, as a whole, under this + License to anyone who comes into possession of a copy. This + License will therefore apply, along with any applicable section 7 + additional terms, to the whole of the work, and all its parts, + regardless of how they are packaged. This License gives no + permission to license the work in any other way, but it does not + invalidate such permission if you have separately received it. + + d) If the work has interactive user interfaces, each must display + Appropriate Legal Notices; however, if the Program has interactive + interfaces that do not display Appropriate Legal Notices, your + work need not make them do so. + + A compilation of a covered work with other separate and independent +works, which are not by their nature extensions of the covered work, +and which are not combined with it such as to form a larger program, +in or on a volume of a storage or distribution medium, is called an +"aggregate" if the compilation and its resulting copyright are not +used to limit the access or legal rights of the compilation's users +beyond what the individual works permit. Inclusion of a covered work +in an aggregate does not cause this License to apply to the other +parts of the aggregate. + + 6. Conveying Non-Source Forms. + + You may convey a covered work in object code form under the terms +of sections 4 and 5, provided that you also convey the +machine-readable Corresponding Source under the terms of this License, +in one of these ways: + + a) Convey the object code in, or embodied in, a physical product + (including a physical distribution medium), accompanied by the + Corresponding Source fixed on a durable physical medium + customarily used for software interchange. + + b) Convey the object code in, or embodied in, a physical product + (including a physical distribution medium), accompanied by a + written offer, valid for at least three years and valid for as + long as you offer spare parts or customer support for that product + model, to give anyone who possesses the object code either (1) a + copy of the Corresponding Source for all the software in the + product that is covered by this License, on a durable physical + medium customarily used for software interchange, for a price no + more than your reasonable cost of physically performing this + conveying of source, or (2) access to copy the + Corresponding Source from a network server at no charge. + + c) Convey individual copies of the object code with a copy of the + written offer to provide the Corresponding Source. This + alternative is allowed only occasionally and noncommercially, and + only if you received the object code with such an offer, in accord + with subsection 6b. + + d) Convey the object code by offering access from a designated + place (gratis or for a charge), and offer equivalent access to the + Corresponding Source in the same way through the same place at no + further charge. You need not require recipients to copy the + Corresponding Source along with the object code. If the place to + copy the object code is a network server, the Corresponding Source + may be on a different server (operated by you or a third party) + that supports equivalent copying facilities, provided you maintain + clear directions next to the object code saying where to find the + Corresponding Source. Regardless of what server hosts the + Corresponding Source, you remain obligated to ensure that it is + available for as long as needed to satisfy these requirements. + + e) Convey the object code using peer-to-peer transmission, provided + you inform other peers where the object code and Corresponding + Source of the work are being offered to the general public at no + charge under subsection 6d. + + A separable portion of the object code, whose source code is excluded +from the Corresponding Source as a System Library, need not be +included in conveying the object code work. + + A "User Product" is either (1) a "consumer product", which means any +tangible personal property which is normally used for personal, family, +or household purposes, or (2) anything designed or sold for incorporation +into a dwelling. In determining whether a product is a consumer product, +doubtful cases shall be resolved in favor of coverage. For a particular +product received by a particular user, "normally used" refers to a +typical or common use of that class of product, regardless of the status +of the particular user or of the way in which the particular user +actually uses, or expects or is expected to use, the product. A product +is a consumer product regardless of whether the product has substantial +commercial, industrial or non-consumer uses, unless such uses represent +the only significant mode of use of the product. + + "Installation Information" for a User Product means any methods, +procedures, authorization keys, or other information required to install +and execute modified versions of a covered work in that User Product from +a modified version of its Corresponding Source. The information must +suffice to ensure that the continued functioning of the modified object +code is in no case prevented or interfered with solely because +modification has been made. + + If you convey an object code work under this section in, or with, or +specifically for use in, a User Product, and the conveying occurs as +part of a transaction in which the right of possession and use of the +User Product is transferred to the recipient in perpetuity or for a +fixed term (regardless of how the transaction is characterized), the +Corresponding Source conveyed under this section must be accompanied +by the Installation Information. But this requirement does not apply +if neither you nor any third party retains the ability to install +modified object code on the User Product (for example, the work has +been installed in ROM). + + The requirement to provide Installation Information does not include a +requirement to continue to provide support service, warranty, or updates +for a work that has been modified or installed by the recipient, or for +the User Product in which it has been modified or installed. Access to a +network may be denied when the modification itself materially and +adversely affects the operation of the network or violates the rules and +protocols for communication across the network. + + Corresponding Source conveyed, and Installation Information provided, +in accord with this section must be in a format that is publicly +documented (and with an implementation available to the public in +source code form), and must require no special password or key for +unpacking, reading or copying. + + 7. Additional Terms. + + "Additional permissions" are terms that supplement the terms of this +License by making exceptions from one or more of its conditions. +Additional permissions that are applicable to the entire Program shall +be treated as though they were included in this License, to the extent +that they are valid under applicable law. If additional permissions +apply only to part of the Program, that part may be used separately +under those permissions, but the entire Program remains governed by +this License without regard to the additional permissions. + + When you convey a copy of a covered work, you may at your option +remove any additional permissions from that copy, or from any part of +it. (Additional permissions may be written to require their own +removal in certain cases when you modify the work.) You may place +additional permissions on material, added by you to a covered work, +for which you have or can give appropriate copyright permission. + + Notwithstanding any other provision of this License, for material you +add to a covered work, you may (if authorized by the copyright holders of +that material) supplement the terms of this License with terms: + + a) Disclaiming warranty or limiting liability differently from the + terms of sections 15 and 16 of this License; or + + b) Requiring preservation of specified reasonable legal notices or + author attributions in that material or in the Appropriate Legal + Notices displayed by works containing it; or + + c) Prohibiting misrepresentation of the origin of that material, or + requiring that modified versions of such material be marked in + reasonable ways as different from the original version; or + + d) Limiting the use for publicity purposes of names of licensors or + authors of the material; or + + e) Declining to grant rights under trademark law for use of some + trade names, trademarks, or service marks; or + + f) Requiring indemnification of licensors and authors of that + material by anyone who conveys the material (or modified versions of + it) with contractual assumptions of liability to the recipient, for + any liability that these contractual assumptions directly impose on + those licensors and authors. + + All other non-permissive additional terms are considered "further +restrictions" within the meaning of section 10. If the Program as you +received it, or any part of it, contains a notice stating that it is +governed by this License along with a term that is a further +restriction, you may remove that term. If a license document contains +a further restriction but permits relicensing or conveying under this +License, you may add to a covered work material governed by the terms +of that license document, provided that the further restriction does +not survive such relicensing or conveying. + + If you add terms to a covered work in accord with this section, you +must place, in the relevant source files, a statement of the +additional terms that apply to those files, or a notice indicating +where to find the applicable terms. + + Additional terms, permissive or non-permissive, may be stated in the +form of a separately written license, or stated as exceptions; +the above requirements apply either way. + + 8. Termination. + + You may not propagate or modify a covered work except as expressly +provided under this License. Any attempt otherwise to propagate or +modify it is void, and will automatically terminate your rights under +this License (including any patent licenses granted under the third +paragraph of section 11). + + However, if you cease all violation of this License, then your +license from a particular copyright holder is reinstated (a) +provisionally, unless and until the copyright holder explicitly and +finally terminates your license, and (b) permanently, if the copyright +holder fails to notify you of the violation by some reasonable means +prior to 60 days after the cessation. + + Moreover, your license from a particular copyright holder is +reinstated permanently if the copyright holder notifies you of the +violation by some reasonable means, this is the first time you have +received notice of violation of this License (for any work) from that +copyright holder, and you cure the violation prior to 30 days after +your receipt of the notice. + + Termination of your rights under this section does not terminate the +licenses of parties who have received copies or rights from you under +this License. If your rights have been terminated and not permanently +reinstated, you do not qualify to receive new licenses for the same +material under section 10. + + 9. Acceptance Not Required for Having Copies. + + You are not required to accept this License in order to receive or +run a copy of the Program. Ancillary propagation of a covered work +occurring solely as a consequence of using peer-to-peer transmission +to receive a copy likewise does not require acceptance. However, +nothing other than this License grants you permission to propagate or +modify any covered work. These actions infringe copyright if you do +not accept this License. Therefore, by modifying or propagating a +covered work, you indicate your acceptance of this License to do so. + + 10. Automatic Licensing of Downstream Recipients. + + Each time you convey a covered work, the recipient automatically +receives a license from the original licensors, to run, modify and +propagate that work, subject to this License. You are not responsible +for enforcing compliance by third parties with this License. + + An "entity transaction" is a transaction transferring control of an +organization, or substantially all assets of one, or subdividing an +organization, or merging organizations. If propagation of a covered +work results from an entity transaction, each party to that +transaction who receives a copy of the work also receives whatever +licenses to the work the party's predecessor in interest had or could +give under the previous paragraph, plus a right to possession of the +Corresponding Source of the work from the predecessor in interest, if +the predecessor has it or can get it with reasonable efforts. + + You may not impose any further restrictions on the exercise of the +rights granted or affirmed under this License. For example, you may +not impose a license fee, royalty, or other charge for exercise of +rights granted under this License, and you may not initiate litigation +(including a cross-claim or counterclaim in a lawsuit) alleging that +any patent claim is infringed by making, using, selling, offering for +sale, or importing the Program or any portion of it. + + 11. Patents. + + A "contributor" is a copyright holder who authorizes use under this +License of the Program or a work on which the Program is based. The +work thus licensed is called the contributor's "contributor version". + + A contributor's "essential patent claims" are all patent claims +owned or controlled by the contributor, whether already acquired or +hereafter acquired, that would be infringed by some manner, permitted +by this License, of making, using, or selling its contributor version, +but do not include claims that would be infringed only as a +consequence of further modification of the contributor version. For +purposes of this definition, "control" includes the right to grant +patent sublicenses in a manner consistent with the requirements of +this License. + + Each contributor grants you a non-exclusive, worldwide, royalty-free +patent license under the contributor's essential patent claims, to +make, use, sell, offer for sale, import and otherwise run, modify and +propagate the contents of its contributor version. + + In the following three paragraphs, a "patent license" is any express +agreement or commitment, however denominated, not to enforce a patent +(such as an express permission to practice a patent or covenant not to +sue for patent infringement). To "grant" such a patent license to a +party means to make such an agreement or commitment not to enforce a +patent against the party. + + If you convey a covered work, knowingly relying on a patent license, +and the Corresponding Source of the work is not available for anyone +to copy, free of charge and under the terms of this License, through a +publicly available network server or other readily accessible means, +then you must either (1) cause the Corresponding Source to be so +available, or (2) arrange to deprive yourself of the benefit of the +patent license for this particular work, or (3) arrange, in a manner +consistent with the requirements of this License, to extend the patent +license to downstream recipients. "Knowingly relying" means you have +actual knowledge that, but for the patent license, your conveying the +covered work in a country, or your recipient's use of the covered work +in a country, would infringe one or more identifiable patents in that +country that you have reason to believe are valid. + + If, pursuant to or in connection with a single transaction or +arrangement, you convey, or propagate by procuring conveyance of, a +covered work, and grant a patent license to some of the parties +receiving the covered work authorizing them to use, propagate, modify +or convey a specific copy of the covered work, then the patent license +you grant is automatically extended to all recipients of the covered +work and works based on it. + + A patent license is "discriminatory" if it does not include within +the scope of its coverage, prohibits the exercise of, or is +conditioned on the non-exercise of one or more of the rights that are +specifically granted under this License. You may not convey a covered +work if you are a party to an arrangement with a third party that is +in the business of distributing software, under which you make payment +to the third party based on the extent of your activity of conveying +the work, and under which the third party grants, to any of the +parties who would receive the covered work from you, a discriminatory +patent license (a) in connection with copies of the covered work +conveyed by you (or copies made from those copies), or (b) primarily +for and in connection with specific products or compilations that +contain the covered work, unless you entered into that arrangement, +or that patent license was granted, prior to 28 March 2007. + + Nothing in this License shall be construed as excluding or limiting +any implied license or other defenses to infringement that may +otherwise be available to you under applicable patent law. + + 12. No Surrender of Others' Freedom. + + If conditions are imposed on you (whether by court order, agreement or +otherwise) that contradict the conditions of this License, they do not +excuse you from the conditions of this License. If you cannot convey a +covered work so as to satisfy simultaneously your obligations under this +License and any other pertinent obligations, then as a consequence you may +not convey it at all. For example, if you agree to terms that obligate you +to collect a royalty for further conveying from those to whom you convey +the Program, the only way you could satisfy both those terms and this +License would be to refrain entirely from conveying the Program. + + 13. Remote Network Interaction; Use with the GNU General Public License. + + Notwithstanding any other provision of this License, if you modify the +Program, your modified version must prominently offer all users +interacting with it remotely through a computer network (if your version +supports such interaction) an opportunity to receive the Corresponding +Source of your version by providing access to the Corresponding Source +from a network server at no charge, through some standard or customary +means of facilitating copying of software. This Corresponding Source +shall include the Corresponding Source for any work covered by version 3 +of the GNU General Public License that is incorporated pursuant to the +following paragraph. + + Notwithstanding any other provision of this License, you have +permission to link or combine any covered work with a work licensed +under version 3 of the GNU General Public License into a single +combined work, and to convey the resulting work. The terms of this +License will continue to apply to the part which is the covered work, +but the work with which it is combined will remain governed by version +3 of the GNU General Public License. + + 14. Revised Versions of this License. + + The Free Software Foundation may publish revised and/or new versions of +the GNU Affero General Public License from time to time. Such new versions +will be similar in spirit to the present version, but may differ in detail to +address new problems or concerns. + + Each version is given a distinguishing version number. If the +Program specifies that a certain numbered version of the GNU Affero General +Public License "or any later version" applies to it, you have the +option of following the terms and conditions either of that numbered +version or of any later version published by the Free Software +Foundation. If the Program does not specify a version number of the +GNU Affero General Public License, you may choose any version ever published +by the Free Software Foundation. + + If the Program specifies that a proxy can decide which future +versions of the GNU Affero General Public License can be used, that proxy's +public statement of acceptance of a version permanently authorizes you +to choose that version for the Program. + + Later license versions may give you additional or different +permissions. However, no additional obligations are imposed on any +author or copyright holder as a result of your choosing to follow a +later version. + + 15. Disclaimer of Warranty. + + THERE IS NO WARRANTY FOR THE PROGRAM, TO THE EXTENT PERMITTED BY +APPLICABLE LAW. EXCEPT WHEN OTHERWISE STATED IN WRITING THE COPYRIGHT +HOLDERS AND/OR OTHER PARTIES PROVIDE THE PROGRAM "AS IS" WITHOUT WARRANTY +OF ANY KIND, EITHER EXPRESSED OR IMPLIED, INCLUDING, BUT NOT LIMITED TO, +THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR +PURPOSE. THE ENTIRE RISK AS TO THE QUALITY AND PERFORMANCE OF THE PROGRAM +IS WITH YOU. SHOULD THE PROGRAM PROVE DEFECTIVE, YOU ASSUME THE COST OF +ALL NECESSARY SERVICING, REPAIR OR CORRECTION. + + 16. Limitation of Liability. + + IN NO EVENT UNLESS REQUIRED BY APPLICABLE LAW OR AGREED TO IN WRITING +WILL ANY COPYRIGHT HOLDER, OR ANY OTHER PARTY WHO MODIFIES AND/OR CONVEYS +THE PROGRAM AS PERMITTED ABOVE, BE LIABLE TO YOU FOR DAMAGES, INCLUDING ANY +GENERAL, SPECIAL, INCIDENTAL OR CONSEQUENTIAL DAMAGES ARISING OUT OF THE +USE OR INABILITY TO USE THE PROGRAM (INCLUDING BUT NOT LIMITED TO LOSS OF +DATA OR DATA BEING RENDERED INACCURATE OR LOSSES SUSTAINED BY YOU OR THIRD +PARTIES OR A FAILURE OF THE PROGRAM TO OPERATE WITH ANY OTHER PROGRAMS), +EVEN IF SUCH HOLDER OR OTHER PARTY HAS BEEN ADVISED OF THE POSSIBILITY OF +SUCH DAMAGES. + + 17. Interpretation of Sections 15 and 16. + + If the disclaimer of warranty and limitation of liability provided +above cannot be given local legal effect according to their terms, +reviewing courts shall apply local law that most closely approximates +an absolute waiver of all civil liability in connection with the +Program, unless a warranty or assumption of liability accompanies a +copy of the Program in return for a fee. + + END OF TERMS AND CONDITIONS + + How to Apply These Terms to Your New Programs + + If you develop a new program, and you want it to be of the greatest +possible use to the public, the best way to achieve this is to make it +free software which everyone can redistribute and change under these terms. + + To do so, attach the following notices to the program. It is safest +to attach them to the start of each source file to most effectively +state the exclusion of warranty; and each file should have at least +the "copyright" line and a pointer to where the full notice is found. + + + Copyright (C) + + This program is free software: you can redistribute it and/or modify + it under the terms of the GNU Affero General Public License as published + by the Free Software Foundation, either version 3 of the License, or + (at your option) any later version. + + This program is distributed in the hope that it will be useful, + but WITHOUT ANY WARRANTY; without even the implied warranty of + MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + GNU Affero General Public License for more details. + + You should have received a copy of the GNU Affero General Public License + along with this program. If not, see . + +Also add information on how to contact you by electronic and paper mail. + + If your software can interact with users remotely through a computer +network, you should also make sure that it provides a way for users to +get its source. For example, if your program is a web application, its +interface could display a "Source" link that leads users to an archive +of the code. There are many ways you could offer source, and different +solutions will be better for different programs; see section 13 for the +specific requirements. + + You should also get your employer (if you work as a programmer) or school, +if any, to sign a "copyright disclaimer" for the program, if necessary. +For more information on this, and how to apply and follow the GNU AGPL, see +. diff --git a/packages/http-protocol/README.md b/packages/http-protocol/README.md new file mode 100644 index 000000000..62de968d9 --- /dev/null +++ b/packages/http-protocol/README.md @@ -0,0 +1,11 @@ +# BitTorrent HTTP Tracker Protocol + +A library with the primitive types and functions used by BitTorrent HTTP trackers. + +## Documentation + +[Crate documentation](https://docs.rs/bittorrent-http-protocol). + +## License + +The project is licensed under the terms of the [GNU AFFERO GENERAL PUBLIC LICENSE](./LICENSE). diff --git a/packages/http-protocol/src/lib.rs b/packages/http-protocol/src/lib.rs new file mode 100644 index 000000000..44237d6fd --- /dev/null +++ b/packages/http-protocol/src/lib.rs @@ -0,0 +1,2 @@ +//! Primitive types and function for `BitTorrent` HTTP trackers. +pub mod percent_encoding; diff --git a/src/servers/http/percent_encoding.rs b/packages/http-protocol/src/percent_encoding.rs similarity index 94% rename from src/servers/http/percent_encoding.rs rename to packages/http-protocol/src/percent_encoding.rs index 323444cc7..b54c89a04 100644 --- a/src/servers/http/percent_encoding.rs +++ b/packages/http-protocol/src/percent_encoding.rs @@ -27,7 +27,7 @@ use torrust_tracker_primitives::peer; /// /// ```rust /// use std::str::FromStr; -/// use torrust_tracker::servers::http::percent_encoding::percent_decode_info_hash; +/// use bittorrent_http_protocol::percent_encoding::percent_decode_info_hash; /// use bittorrent_primitives::info_hash::InfoHash; /// use torrust_tracker_primitives::peer; /// @@ -60,7 +60,7 @@ pub fn percent_decode_info_hash(raw_info_hash: &str) -> Result -//! - -//! - -use aquatic_udp_protocol::PeerId; -use bittorrent_primitives::info_hash::{self, InfoHash}; -use torrust_tracker_primitives::peer; - -/* code-review: this module is duplicated in torrust_tracker::servers::http::percent_encoding. - Should we move it to torrust_tracker_primitives? -*/ - -/// Percent decodes a percent encoded infohash. Internally an -/// [`InfoHash`] is a 20-byte array. -/// -/// For example, given the infohash `3b245504cf5f11bbdbe1201cea6a6bf45aee1bc0`, -/// it's percent encoded representation is `%3B%24U%04%CF%5F%11%BB%DB%E1%20%1C%EAjk%F4Z%EE%1B%C0`. -/// -/// ```rust -/// use std::str::FromStr; -/// use bittorrent_tracker_client::http::url_encoding::percent_decode_info_hash; -/// use bittorrent_primitives::info_hash::InfoHash; -/// use torrust_tracker_primitives::peer; -/// -/// let encoded_infohash = "%3B%24U%04%CF%5F%11%BB%DB%E1%20%1C%EAjk%F4Z%EE%1B%C0"; -/// -/// let info_hash = percent_decode_info_hash(encoded_infohash).unwrap(); -/// -/// assert_eq!( -/// info_hash, -/// InfoHash::from_str("3b245504cf5f11bbdbe1201cea6a6bf45aee1bc0").unwrap() -/// ); -/// ``` -/// -/// # Errors -/// -/// Will return `Err` if the decoded bytes do not represent a valid -/// [`InfoHash`]. -pub fn percent_decode_info_hash(raw_info_hash: &str) -> Result { - let bytes = percent_encoding::percent_decode_str(raw_info_hash).collect::>(); - InfoHash::try_from(bytes) -} - -/// Percent decodes a percent encoded peer id. Internally a peer [`Id`](PeerId) -/// is a 20-byte array. -/// -/// For example, given the peer id `*b"-qB00000000000000000"`, -/// it's percent encoded representation is `%2DqB00000000000000000`. -/// -/// ```rust -/// use std::str::FromStr; -/// -/// use aquatic_udp_protocol::PeerId; -/// use bittorrent_tracker_client::http::url_encoding::percent_decode_peer_id; -/// use bittorrent_primitives::info_hash::InfoHash; -/// -/// let encoded_peer_id = "%2DqB00000000000000000"; -/// -/// let peer_id = percent_decode_peer_id(encoded_peer_id).unwrap(); -/// -/// assert_eq!(peer_id, PeerId(*b"-qB00000000000000000")); -/// ``` -/// -/// # Errors -/// -/// Will return `Err` if if the decoded bytes do not represent a valid [`PeerId`]. -pub fn percent_decode_peer_id(raw_peer_id: &str) -> Result { - let bytes = percent_encoding::percent_decode_str(raw_peer_id).collect::>(); - Ok(*peer::Id::try_from(bytes)?) -} - -#[cfg(test)] -mod tests { - use std::str::FromStr; - - use aquatic_udp_protocol::PeerId; - use bittorrent_primitives::info_hash::InfoHash; - - use crate::http::url_encoding::{percent_decode_info_hash, percent_decode_peer_id}; - - #[test] - fn it_should_decode_a_percent_encoded_info_hash() { - let encoded_infohash = "%3B%24U%04%CF%5F%11%BB%DB%E1%20%1C%EAjk%F4Z%EE%1B%C0"; - - let info_hash = percent_decode_info_hash(encoded_infohash).unwrap(); - - assert_eq!( - info_hash, - InfoHash::from_str("3b245504cf5f11bbdbe1201cea6a6bf45aee1bc0").unwrap() - ); - } - - #[test] - fn it_should_fail_decoding_an_invalid_percent_encoded_info_hash() { - let invalid_encoded_infohash = "invalid percent-encoded infohash"; - - let info_hash = percent_decode_info_hash(invalid_encoded_infohash); - - assert!(info_hash.is_err()); - } - - #[test] - fn it_should_decode_a_percent_encoded_peer_id() { - let encoded_peer_id = "%2DqB00000000000000000"; - - let peer_id = percent_decode_peer_id(encoded_peer_id).unwrap(); - - assert_eq!(peer_id, PeerId(*b"-qB00000000000000000")); - } - - #[test] - fn it_should_fail_decoding_an_invalid_percent_encoded_peer_id() { - let invalid_encoded_peer_id = "invalid percent-encoded peer id"; - - let peer_id = percent_decode_peer_id(invalid_encoded_peer_id); - - assert!(peer_id.is_err()); - } -} diff --git a/src/servers/http/mod.rs b/src/servers/http/mod.rs index 4ef5ca7ea..6dfb6ce7c 100644 --- a/src/servers/http/mod.rs +++ b/src/servers/http/mod.rs @@ -305,7 +305,6 @@ //! - [Bencode to Json Online converter](https://chocobo1.github.io/bencode_online). use serde::{Deserialize, Serialize}; -pub mod percent_encoding; pub mod server; pub mod v1; diff --git a/src/servers/http/v1/requests/announce.rs b/src/servers/http/v1/requests/announce.rs index a9a9f8a76..b84f07995 100644 --- a/src/servers/http/v1/requests/announce.rs +++ b/src/servers/http/v1/requests/announce.rs @@ -6,12 +6,12 @@ use std::panic::Location; use std::str::FromStr; use aquatic_udp_protocol::{NumberOfBytes, PeerId}; +use bittorrent_http_protocol::percent_encoding::{percent_decode_info_hash, percent_decode_peer_id}; use bittorrent_primitives::info_hash::{self, InfoHash}; use thiserror::Error; use torrust_tracker_located_error::{Located, LocatedError}; use torrust_tracker_primitives::peer; -use crate::servers::http::percent_encoding::{percent_decode_info_hash, percent_decode_peer_id}; use crate::servers::http::v1::query::{ParseQueryError, Query}; use crate::servers::http::v1::responses; diff --git a/src/servers/http/v1/requests/scrape.rs b/src/servers/http/v1/requests/scrape.rs index 0a47a4fb4..30052c8b4 100644 --- a/src/servers/http/v1/requests/scrape.rs +++ b/src/servers/http/v1/requests/scrape.rs @@ -3,11 +3,11 @@ //! Data structures and logic for parsing the `scrape` request. use std::panic::Location; +use bittorrent_http_protocol::percent_encoding::percent_decode_info_hash; use bittorrent_primitives::info_hash::{self, InfoHash}; use thiserror::Error; use torrust_tracker_located_error::{Located, LocatedError}; -use crate::servers::http::percent_encoding::percent_decode_info_hash; use crate::servers::http::v1::query::Query; use crate::servers::http::v1::responses; From c61cc9a5242909246c6db9ccf3806c68bc423a96 Mon Sep 17 00:00:00 2001 From: Jose Celano Date: Fri, 29 Nov 2024 16:31:55 +0000 Subject: [PATCH 034/802] fix: doc tests --- contrib/bencode/src/lib.rs | 6 +++--- packages/located-error/src/lib.rs | 2 +- 2 files changed, 4 insertions(+), 4 deletions(-) diff --git a/contrib/bencode/src/lib.rs b/contrib/bencode/src/lib.rs index 09aaa6867..c44ec07b2 100644 --- a/contrib/bencode/src/lib.rs +++ b/contrib/bencode/src/lib.rs @@ -5,9 +5,9 @@ //! Decoding bencoded data: //! //! ```rust -//! extern crate bencode; +//! extern crate torrust_tracker_contrib_bencode; //! -//! use bencode::{BencodeRef, BRefAccess, BDecodeOpt}; +//! use torrust_tracker_contrib_bencode::{BencodeRef, BRefAccess, BDecodeOpt}; //! //! fn main() { //! let data = b"d12:lucky_numberi7ee"; // cspell:disable-line @@ -22,7 +22,7 @@ //! //! ```rust //! #[macro_use] -//! extern crate bencode; +//! extern crate torrust_tracker_contrib_bencode; //! //! fn main() { //! let message = (ben_map!{ diff --git a/packages/located-error/src/lib.rs b/packages/located-error/src/lib.rs index c30043cd3..09bfbd185 100644 --- a/packages/located-error/src/lib.rs +++ b/packages/located-error/src/lib.rs @@ -23,7 +23,7 @@ //! let b: LocatedError = Located(e).into(); //! let l = get_caller_location(); //! -//! assert!(b.to_string().contains("Test, src/lib.rs")); +//! assert!(b.to_string().contains("src/lib.rs")); //! ``` //! //! # Credits From 39716a8e4765e4898f4530b9a137da5980ebcfe2 Mon Sep 17 00:00:00 2001 From: Jose Celano Date: Fri, 29 Nov 2024 16:32:48 +0000 Subject: [PATCH 035/802] ci: fix testing workflow. Run doc tests for all packages in the workspace. --- .github/workflows/testing.yaml | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/.github/workflows/testing.yaml b/.github/workflows/testing.yaml index 74dc254ef..28600dee9 100644 --- a/.github/workflows/testing.yaml +++ b/.github/workflows/testing.yaml @@ -140,7 +140,7 @@ jobs: - id: test-docs name: Run Documentation Tests - run: cargo test --doc + run: cargo test --doc --workspace - id: test name: Run Unit Tests From a62ae8215c556e9979042a5c6011249f72215945 Mon Sep 17 00:00:00 2001 From: Jose Celano Date: Fri, 29 Nov 2024 17:12:32 +0000 Subject: [PATCH 036/802] fix: cargo machete warning --- Cargo.lock | 1 - packages/tracker-client/Cargo.toml | 1 - 2 files changed, 2 deletions(-) diff --git a/Cargo.lock b/Cargo.lock index 2931b0f8f..71edc530c 100644 --- a/Cargo.lock +++ b/Cargo.lock @@ -582,7 +582,6 @@ name = "bittorrent-tracker-client" version = "3.0.0-develop" dependencies = [ "aquatic_udp_protocol", - "bittorrent-http-protocol", "bittorrent-primitives", "derive_more", "hyper", diff --git a/packages/tracker-client/Cargo.toml b/packages/tracker-client/Cargo.toml index 2c536677b..52b0be639 100644 --- a/packages/tracker-client/Cargo.toml +++ b/packages/tracker-client/Cargo.toml @@ -16,7 +16,6 @@ version.workspace = true [dependencies] aquatic_udp_protocol = "0" -bittorrent-http-protocol = { version = "3.0.0-develop", path = "../http-protocol" } bittorrent-primitives = "0.1.0" derive_more = { version = "1", features = ["as_ref", "constructor", "from"] } hyper = "1" From 2bc44af7e332aadaa2ef9d4fcef549827cb32874 Mon Sep 17 00:00:00 2001 From: Jose Celano Date: Fri, 29 Nov 2024 17:12:53 +0000 Subject: [PATCH 037/802] test: add test for percent_encode_byte_array --- packages/tracker-client/src/http/mod.rs | 16 ++++++++++++++++ 1 file changed, 16 insertions(+) diff --git a/packages/tracker-client/src/http/mod.rs b/packages/tracker-client/src/http/mod.rs index 15723c1b7..d8f8242e8 100644 --- a/packages/tracker-client/src/http/mod.rs +++ b/packages/tracker-client/src/http/mod.rs @@ -24,3 +24,19 @@ impl InfoHash { self.0 } } + +#[cfg(test)] +mod tests { + use crate::http::percent_encode_byte_array; + + #[test] + fn it_should_encode_a_20_byte_array() { + assert_eq!( + percent_encode_byte_array(&[ + 0x3b, 0x24, 0x55, 0x04, 0xcf, 0x5f, 0x11, 0xbb, 0xdb, 0xe1, 0x20, 0x1c, 0xea, 0x6a, 0x6b, 0xf4, 0x5a, 0xee, 0x1b, + 0xc0, + ]), + "%3B%24U%04%CF%5F%11%BB%DB%E1%20%1C%EAjk%F4Z%EE%1B%C0" + ); + } +} From 3af1928fe218b8f14de6549b3e4620903e49bef0 Mon Sep 17 00:00:00 2001 From: Binlogo Date: Mon, 2 Dec 2024 10:26:09 +0000 Subject: [PATCH 038/802] fix: [#1104] improve HTTP announce error message --- src/servers/http/v1/extractors/announce_request.rs | 10 +++++----- src/servers/http/v1/extractors/scrape_request.rs | 10 +++++----- src/servers/http/v1/requests/announce.rs | 4 ++-- src/servers/http/v1/requests/scrape.rs | 2 +- tests/servers/http/asserts.rs | 2 +- 5 files changed, 14 insertions(+), 14 deletions(-) diff --git a/src/servers/http/v1/extractors/announce_request.rs b/src/servers/http/v1/extractors/announce_request.rs index 5a642e3fc..ea9a22c7a 100644 --- a/src/servers/http/v1/extractors/announce_request.rs +++ b/src/servers/http/v1/extractors/announce_request.rs @@ -19,13 +19,13 @@ //! Missing query params for `announce` request: //! //! ```text -//! d14:failure reason149:Cannot parse query params for announce request: missing query params for announce request in src/servers/http/v1/extractors/announce_request.rs:54:23e +//! d14:failure reason149:Bad request. Cannot parse query params for announce request: missing query params for announce request in src/servers/http/v1/extractors/announce_request.rs:54:23e //! ``` //! //! Invalid query param (`info_hash`): //! //! ```text -//! d14:failure reason240:Cannot parse query params for announce request: invalid param value invalid for info_hash in not enough bytes for infohash: got 7 bytes, expected 20 src/shared/bit_torrent/info_hash.rs:240:27, src/servers/http/v1/requests/announce.rs:182:42e +//! d14:failure reason240:Bad request. Cannot parse query params for announce request: invalid param value invalid for info_hash in not enough bytes for infohash: got 7 bytes, expected 20 src/shared/bit_torrent/info_hash.rs:240:27, src/servers/http/v1/requests/announce.rs:182:42e //! ``` use std::panic::Location; @@ -137,7 +137,7 @@ mod tests { assert_error_response( &response, - "Cannot parse query params for announce request: missing query params for announce request", + "Bad request. Cannot parse query params for announce request: missing query params for announce request", ); } @@ -146,13 +146,13 @@ mod tests { let invalid_query = "param1=value1=value2"; let response = extract_announce_from(Some(invalid_query)).unwrap_err(); - assert_error_response(&response, "Cannot parse query params"); + assert_error_response(&response, "Bad request. Cannot parse query params"); } #[test] fn it_should_reject_a_request_with_a_query_that_cannot_be_parsed_into_an_announce_request() { let response = extract_announce_from(Some("param1=value1")).unwrap_err(); - assert_error_response(&response, "Cannot parse query params for announce request"); + assert_error_response(&response, "Bad request. Cannot parse query params for announce request"); } } diff --git a/src/servers/http/v1/extractors/scrape_request.rs b/src/servers/http/v1/extractors/scrape_request.rs index 80173a33c..35c0bb1b5 100644 --- a/src/servers/http/v1/extractors/scrape_request.rs +++ b/src/servers/http/v1/extractors/scrape_request.rs @@ -19,13 +19,13 @@ //! Missing query params for scrape request: //! //! ```text -//! d14:failure reason143:Cannot parse query params for scrape request: missing query params for scrape request in src/servers/http/v1/extractors/scrape_request.rs:52:23e +//! d14:failure reason143:Bad request. Cannot parse query params for scrape request: missing query params for scrape request in src/servers/http/v1/extractors/scrape_request.rs:52:23e //! ``` //! //! Invalid query params for scrape request: //! //! ```text -//! d14:failure reason235:Cannot parse query params for scrape request: invalid param value invalid for info_hash in not enough bytes for infohash: got 7 bytes, expected 20 src/shared/bit_torrent/info_hash.rs:240:27, src/servers/http/v1/requests/scrape.rs:66:46e +//! d14:failure reason235:Bad request. Cannot parse query params for scrape request: invalid param value invalid for info_hash in not enough bytes for infohash: got 7 bytes, expected 20 src/shared/bit_torrent/info_hash.rs:240:27, src/servers/http/v1/requests/scrape.rs:66:46e //! ``` use std::panic::Location; @@ -158,7 +158,7 @@ mod tests { assert_error_response( &response, - "Cannot parse query params for scrape request: missing query params for scrape request", + "Bad request. Cannot parse query params for scrape request: missing query params for scrape request", ); } @@ -167,13 +167,13 @@ mod tests { let invalid_query = "param1=value1=value2"; let response = extract_scrape_from(Some(invalid_query)).unwrap_err(); - assert_error_response(&response, "Cannot parse query params"); + assert_error_response(&response, "Bad request. Cannot parse query params"); } #[test] fn it_should_reject_a_request_with_a_query_that_cannot_be_parsed_into_a_scrape_request() { let response = extract_scrape_from(Some("param1=value1")).unwrap_err(); - assert_error_response(&response, "Cannot parse query params for scrape request"); + assert_error_response(&response, "Bad request. Cannot parse query params for scrape request"); } } diff --git a/src/servers/http/v1/requests/announce.rs b/src/servers/http/v1/requests/announce.rs index b84f07995..00bf53c6f 100644 --- a/src/servers/http/v1/requests/announce.rs +++ b/src/servers/http/v1/requests/announce.rs @@ -226,7 +226,7 @@ impl FromStr for Compact { impl From for responses::error::Error { fn from(err: ParseQueryError) -> Self { responses::error::Error { - failure_reason: format!("Cannot parse query params: {err}"), + failure_reason: format!("Bad request. Cannot parse query params: {err}"), } } } @@ -234,7 +234,7 @@ impl From for responses::error::Error { impl From for responses::error::Error { fn from(err: ParseAnnounceQueryError) -> Self { responses::error::Error { - failure_reason: format!("Cannot parse query params for announce request: {err}"), + failure_reason: format!("Bad request. Cannot parse query params for announce request: {err}"), } } } diff --git a/src/servers/http/v1/requests/scrape.rs b/src/servers/http/v1/requests/scrape.rs index 30052c8b4..a8e76282e 100644 --- a/src/servers/http/v1/requests/scrape.rs +++ b/src/servers/http/v1/requests/scrape.rs @@ -39,7 +39,7 @@ pub enum ParseScrapeQueryError { impl From for responses::error::Error { fn from(err: ParseScrapeQueryError) -> Self { responses::error::Error { - failure_reason: format!("Cannot parse query params for scrape request: {err}"), + failure_reason: format!("Bad request. Cannot parse query params for scrape request: {err}"), } } } diff --git a/tests/servers/http/asserts.rs b/tests/servers/http/asserts.rs index 3a2e67bf0..8d40d7e74 100644 --- a/tests/servers/http/asserts.rs +++ b/tests/servers/http/asserts.rs @@ -133,7 +133,7 @@ pub async fn assert_cannot_parse_query_params_error_response(response: Response, assert_bencoded_error( &response.text().await.unwrap(), - &format!("Cannot parse query params{failure}"), + &format!("Bad request. Cannot parse query params{failure}"), Location::caller(), ); } From 6862301855d8b2ad399d7f8ea6b5374b6eaa140e Mon Sep 17 00:00:00 2001 From: Jose Celano Date: Tue, 3 Dec 2024 11:08:53 +0000 Subject: [PATCH 039/802] chore(deps): update depencencies ``` cargo update Updating crates.io index Locking 18 packages to latest compatible versions Updating allocator-api2 v0.2.20 -> v0.2.21 Updating event-listener-strategy v0.5.2 -> v0.5.3 Removing hermit-abi v0.3.9 Updating indexmap v2.6.0 -> v2.7.0 Updating js-sys v0.3.72 -> v0.3.74 Updating libloading v0.8.5 -> v0.8.6 Updating mio v1.0.2 -> v1.0.3 Updating syn v2.0.89 -> v2.0.90 Updating time v0.3.36 -> v0.3.37 Updating time-macros v0.2.18 -> v0.2.19 Updating tracing-serde v0.1.3 -> v0.2.0 Updating tracing-subscriber v0.3.18 -> v0.3.19 Updating wasm-bindgen v0.2.95 -> v0.2.97 Updating wasm-bindgen-backend v0.2.95 -> v0.2.97 Updating wasm-bindgen-futures v0.4.45 -> v0.4.47 Updating wasm-bindgen-macro v0.2.95 -> v0.2.97 Updating wasm-bindgen-macro-support v0.2.95 -> v0.2.97 Updating wasm-bindgen-shared v0.2.95 -> v0.2.97 Updating web-sys v0.3.72 -> v0.3.74 ``` --- Cargo.lock | 169 ++++++++++++++++++++++++++--------------------------- 1 file changed, 82 insertions(+), 87 deletions(-) diff --git a/Cargo.lock b/Cargo.lock index 71edc530c..479c1dbc8 100644 --- a/Cargo.lock +++ b/Cargo.lock @@ -66,9 +66,9 @@ dependencies = [ [[package]] name = "allocator-api2" -version = "0.2.20" +version = "0.2.21" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "45862d1c77f2228b9e10bc609d5bc203d86ebc9b87ad8d5d5167a6c9abf739d9" +checksum = "683d7910e743518b0e34f1186f92494becacb047c7b6bf616c96772180fef923" [[package]] name = "android-tzdata" @@ -333,7 +333,7 @@ checksum = "721cae7de5c34fbb2acd27e21e6d2cf7b886dce0c27388d46c4e6c47ea4318dd" dependencies = [ "proc-macro2", "quote", - "syn 2.0.89", + "syn 2.0.90", ] [[package]] @@ -456,7 +456,7 @@ checksum = "57d123550fa8d071b7255cb0cc04dc302baa6c8c4a79f55701552684d8399bce" dependencies = [ "proc-macro2", "quote", - "syn 2.0.89", + "syn 2.0.90", ] [[package]] @@ -544,7 +544,7 @@ dependencies = [ "regex", "rustc-hash", "shlex", - "syn 2.0.89", + "syn 2.0.90", ] [[package]] @@ -664,7 +664,7 @@ dependencies = [ "proc-macro-crate", "proc-macro2", "quote", - "syn 2.0.89", + "syn 2.0.90", ] [[package]] @@ -897,7 +897,7 @@ dependencies = [ "heck", "proc-macro2", "quote", - "syn 2.0.89", + "syn 2.0.90", ] [[package]] @@ -1118,7 +1118,7 @@ dependencies = [ "proc-macro2", "quote", "strsim", - "syn 2.0.89", + "syn 2.0.90", ] [[package]] @@ -1129,7 +1129,7 @@ checksum = "d336a2a514f6ccccaa3e09b02d41d35330c07ddf03a62165fcec10bb561c7806" dependencies = [ "darling_core", "quote", - "syn 2.0.89", + "syn 2.0.90", ] [[package]] @@ -1173,7 +1173,7 @@ checksum = "cb7330aeadfbe296029522e6c40f315320aba36fc43a5b3632f3795348f3bd22" dependencies = [ "proc-macro2", "quote", - "syn 2.0.89", + "syn 2.0.90", "unicode-xid", ] @@ -1185,7 +1185,7 @@ checksum = "65f152f4b8559c4da5d574bafc7af85454d706b4c5fe8b530d508cacbb6807ea" dependencies = [ "proc-macro2", "quote", - "syn 2.0.89", + "syn 2.0.90", ] [[package]] @@ -1206,7 +1206,7 @@ checksum = "97369cbbc041bc366949bc74d34658d6cda5621039731c6310521892a3a20ae0" dependencies = [ "proc-macro2", "quote", - "syn 2.0.89", + "syn 2.0.90", ] [[package]] @@ -1275,9 +1275,9 @@ dependencies = [ [[package]] name = "event-listener-strategy" -version = "0.5.2" +version = "0.5.3" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "0f214dc438f977e6d4e3500aaa277f5ad94ca83fbbd9b1a15713ce2344ccc5a1" +checksum = "3c3e4e0dd3673c1139bf041f3008816d9cf2946bbfac2945c09e523b8d7b05b2" dependencies = [ "event-listener 5.3.1", "pin-project-lite", @@ -1409,7 +1409,7 @@ checksum = "e99b8b3c28ae0e84b604c75f721c21dc77afb3706076af5e8216d15fd1deaae3" dependencies = [ "frunk_proc_macro_helpers", "quote", - "syn 2.0.89", + "syn 2.0.90", ] [[package]] @@ -1421,7 +1421,7 @@ dependencies = [ "frunk_core", "proc-macro2", "quote", - "syn 2.0.89", + "syn 2.0.90", ] [[package]] @@ -1433,7 +1433,7 @@ dependencies = [ "frunk_core", "frunk_proc_macro_helpers", "quote", - "syn 2.0.89", + "syn 2.0.90", ] [[package]] @@ -1511,7 +1511,7 @@ checksum = "162ee34ebcb7c64a8abebc059ce0fee27c2262618d7b60ed8faf72fef13c3650" dependencies = [ "proc-macro2", "quote", - "syn 2.0.89", + "syn 2.0.90", ] [[package]] @@ -1607,7 +1607,7 @@ dependencies = [ "futures-core", "futures-sink", "http", - "indexmap 2.6.0", + "indexmap 2.7.0", "slab", "tokio", "tokio-util", @@ -1668,12 +1668,6 @@ version = "0.5.0" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "2304e00983f87ffb38b55b444b5e3b60a884b5d30c0fca7d82fe33449bbe55ea" -[[package]] -name = "hermit-abi" -version = "0.3.9" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "d231dfb89cfffdbc30e7fc41579ed6066ad03abda9e567ccafae602b97ec5024" - [[package]] name = "hermit-abi" version = "0.4.0" @@ -1949,7 +1943,7 @@ checksum = "1ec89e9337638ecdc08744df490b221a7399bf8d164eb52a665454e60e075ad6" dependencies = [ "proc-macro2", "quote", - "syn 2.0.89", + "syn 2.0.90", ] [[package]] @@ -1992,9 +1986,9 @@ dependencies = [ [[package]] name = "indexmap" -version = "2.6.0" +version = "2.7.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "707907fe3c25f5424cce2cb7e1cbcafee6bdbe735ca90ef77c29e84591e5b9da" +checksum = "62f822373a4fe84d4bb149bf54e584a7f4abec90e072ed49cda0edea5b95471f" dependencies = [ "equivalent", "hashbrown 0.15.2", @@ -2037,7 +2031,7 @@ version = "0.4.13" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "261f68e344040fbd0edea105bef17c66edf46f984ddb1115b775ce31be948f4b" dependencies = [ - "hermit-abi 0.4.0", + "hermit-abi", "libc", "windows-sys 0.52.0", ] @@ -2083,10 +2077,11 @@ dependencies = [ [[package]] name = "js-sys" -version = "0.3.72" +version = "0.3.74" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "6a88f1bda2bd75b0452a14784937d796722fdebfe50df998aeb3f0b7603019a9" +checksum = "a865e038f7f6ed956f788f0d7d60c541fff74c7bd74272c5d4cf15c63743e705" dependencies = [ + "once_cell", "wasm-bindgen", ] @@ -2113,9 +2108,9 @@ checksum = "09d6582e104315a817dff97f75133544b2e094ee22447d2acf4a74e189ba06fc" [[package]] name = "libloading" -version = "0.8.5" +version = "0.8.6" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "4979f22fdb869068da03c9f7528f8297c6fd2606bc3a4affe42e6a823fdb8da4" +checksum = "fc2f4eb4bc735547cfed7c0a4922cbd04a4655978c09b54f1f7b228750664c34" dependencies = [ "cfg-if", "windows-targets", @@ -2236,11 +2231,10 @@ dependencies = [ [[package]] name = "mio" -version = "1.0.2" +version = "1.0.3" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "80e04d1dcff3aae0704555fe5fee3bcfaf3d1fdf8a7e521d5b9d2b42acb52cec" +checksum = "2886843bf800fba2e3377cff24abf6379b4c4d5c6681eaf9ea5b0d15090450bd" dependencies = [ - "hermit-abi 0.3.9", "libc", "wasi", "windows-sys 0.52.0", @@ -2269,7 +2263,7 @@ dependencies = [ "cfg-if", "proc-macro2", "quote", - "syn 2.0.89", + "syn 2.0.90", ] [[package]] @@ -2336,7 +2330,7 @@ dependencies = [ "proc-macro-error2", "proc-macro2", "quote", - "syn 2.0.89", + "syn 2.0.90", "termcolor", "thiserror 1.0.69", ] @@ -2535,7 +2529,7 @@ checksum = "a948666b637a0f465e8564c73e89d4dde00d72d4d473cc972f390fc3dcee7d9c" dependencies = [ "proc-macro2", "quote", - "syn 2.0.89", + "syn 2.0.90", ] [[package]] @@ -2611,7 +2605,7 @@ dependencies = [ "proc-macro2", "proc-macro2-diagnostics", "quote", - "syn 2.0.89", + "syn 2.0.90", ] [[package]] @@ -2685,7 +2679,7 @@ checksum = "3c0f5fad0874fc7abcd4d750e76917eaebbecaa2c20bde22e1dbeeba8beb758c" dependencies = [ "proc-macro2", "quote", - "syn 2.0.89", + "syn 2.0.90", ] [[package]] @@ -2753,7 +2747,7 @@ checksum = "a604568c3202727d1507653cb121dbd627a58684eb09a820fd746bee38b4442f" dependencies = [ "cfg-if", "concurrent-queue", - "hermit-abi 0.4.0", + "hermit-abi", "pin-project-lite", "rustix", "tracing", @@ -2835,7 +2829,7 @@ dependencies = [ "proc-macro-error-attr2", "proc-macro2", "quote", - "syn 2.0.89", + "syn 2.0.90", ] [[package]] @@ -2855,7 +2849,7 @@ checksum = "af066a9c399a26e020ada66a034357a868728e72cd426f3adcd35f80d88d88c8" dependencies = [ "proc-macro2", "quote", - "syn 2.0.89", + "syn 2.0.90", "version_check", "yansi", ] @@ -3164,7 +3158,7 @@ dependencies = [ "regex", "relative-path", "rustc_version", - "syn 2.0.89", + "syn 2.0.90", "unicode-ident", ] @@ -3393,7 +3387,7 @@ checksum = "ad1e866f866923f252f05c889987993144fb74e722403468a4ebd70c3cd756c0" dependencies = [ "proc-macro2", "quote", - "syn 2.0.89", + "syn 2.0.90", ] [[package]] @@ -3403,7 +3397,7 @@ source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "8de514ef58196f1fc96dcaef80fe6170a1ce6215df9687a93fe8300e773fefc5" dependencies = [ "form_urlencoded", - "indexmap 2.6.0", + "indexmap 2.7.0", "itoa", "ryu", "serde", @@ -3415,7 +3409,7 @@ version = "1.0.133" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "c7fceb2473b9166b2294ef05efcb65a3db80803f0b03ef86a5fc88a2b85ee377" dependencies = [ - "indexmap 2.6.0", + "indexmap 2.7.0", "itoa", "memchr", "ryu", @@ -3440,7 +3434,7 @@ checksum = "6c64451ba24fc7a6a2d60fc75dd9c83c90903b19028d4eff35e88fc1e86564e9" dependencies = [ "proc-macro2", "quote", - "syn 2.0.89", + "syn 2.0.90", ] [[package]] @@ -3474,7 +3468,7 @@ dependencies = [ "chrono", "hex", "indexmap 1.9.3", - "indexmap 2.6.0", + "indexmap 2.7.0", "serde", "serde_derive", "serde_json", @@ -3491,7 +3485,7 @@ dependencies = [ "darling", "proc-macro2", "quote", - "syn 2.0.89", + "syn 2.0.90", ] [[package]] @@ -3630,9 +3624,9 @@ dependencies = [ [[package]] name = "syn" -version = "2.0.89" +version = "2.0.90" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "44d46482f1c1c87acd84dea20c1bf5ebff4c757009ed6bf19cfd36fb10e92c4e" +checksum = "919d3b74a5dd0ccd15aeb8f93e7006bd9e14c295087c9896a110f490752bcf31" dependencies = [ "proc-macro2", "quote", @@ -3662,7 +3656,7 @@ checksum = "c8af7666ab7b6390ab78131fb5b0fce11d6b7a6951602017c35fa82800708971" dependencies = [ "proc-macro2", "quote", - "syn 2.0.89", + "syn 2.0.90", ] [[package]] @@ -3763,7 +3757,7 @@ checksum = "4fee6c4efc90059e10f81e6d42c60a18f76588c3d74cb83a0b242a2b6c7504c1" dependencies = [ "proc-macro2", "quote", - "syn 2.0.89", + "syn 2.0.90", ] [[package]] @@ -3774,7 +3768,7 @@ checksum = "f077553d607adc1caf65430528a576c757a71ed73944b66ebb58ef2bbd243568" dependencies = [ "proc-macro2", "quote", - "syn 2.0.89", + "syn 2.0.90", ] [[package]] @@ -3789,9 +3783,9 @@ dependencies = [ [[package]] name = "time" -version = "0.3.36" +version = "0.3.37" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "5dfd88e563464686c916c7e46e623e520ddc6d79fa6641390f2e3fa86e83e885" +checksum = "35e7868883861bd0e56d9ac6efcaaca0d6d5d82a2a7ec8209ff492c07cf37b21" dependencies = [ "deranged", "itoa", @@ -3810,9 +3804,9 @@ checksum = "ef927ca75afb808a4d64dd374f00a2adf8d0fcff8e7b184af886c3c87ec4a3f3" [[package]] name = "time-macros" -version = "0.2.18" +version = "0.2.19" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "3f252a68540fde3a3877aeea552b832b40ab9a69e318efd078774a01ddee1ccf" +checksum = "2834e6017e3e5e4b9834939793b282bc03b37a3336245fa820e35e233e2a85de" dependencies = [ "num-conv", "time-core", @@ -3878,7 +3872,7 @@ checksum = "693d596312e88961bc67d7f1f97af8a70227d9f90c31bba5806eec004978d752" dependencies = [ "proc-macro2", "quote", - "syn 2.0.89", + "syn 2.0.90", ] [[package]] @@ -3942,7 +3936,7 @@ version = "0.22.22" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "4ae48d6208a266e853d946088ed816055e556cc6028c5e8e2b84d9fa5dd7c7f5" dependencies = [ - "indexmap 2.6.0", + "indexmap 2.7.0", "serde", "serde_spanned", "toml_datetime", @@ -4208,7 +4202,7 @@ checksum = "395ae124c09f9e6918a2310af6038fba074bcf474ac352496d5910dd59a2226d" dependencies = [ "proc-macro2", "quote", - "syn 2.0.89", + "syn 2.0.90", ] [[package]] @@ -4234,9 +4228,9 @@ dependencies = [ [[package]] name = "tracing-serde" -version = "0.1.3" +version = "0.2.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "bc6b213177105856957181934e4920de57730fc69bf42c37ee5bb664d406d9e1" +checksum = "704b1aeb7be0d0a84fc9828cae51dab5970fee5088f83d1dd7ee6f6246fc6ff1" dependencies = [ "serde", "tracing-core", @@ -4244,9 +4238,9 @@ dependencies = [ [[package]] name = "tracing-subscriber" -version = "0.3.18" +version = "0.3.19" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "ad0f048c97dbd9faa9b7df56362b8ebcaa52adb06b498c050d2f4e32f90a7a8b" +checksum = "e8189decb5ac0fa7bc8b96b7cb9b2701d60d48805aca84a238004d665fcc4008" dependencies = [ "nu-ansi-term", "serde", @@ -4400,9 +4394,9 @@ checksum = "9c8d87e72b64a3b4db28d11ce29237c246188f4f51057d65a7eab63b7987e423" [[package]] name = "wasm-bindgen" -version = "0.2.95" +version = "0.2.97" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "128d1e363af62632b8eb57219c8fd7877144af57558fb2ef0368d0087bddeb2e" +checksum = "d15e63b4482863c109d70a7b8706c1e364eb6ea449b201a76c5b89cedcec2d5c" dependencies = [ "cfg-if", "once_cell", @@ -4411,36 +4405,37 @@ dependencies = [ [[package]] name = "wasm-bindgen-backend" -version = "0.2.95" +version = "0.2.97" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "cb6dd4d3ca0ddffd1dd1c9c04f94b868c37ff5fac97c30b97cff2d74fce3a358" +checksum = "8d36ef12e3aaca16ddd3f67922bc63e48e953f126de60bd33ccc0101ef9998cd" dependencies = [ "bumpalo", "log", "once_cell", "proc-macro2", "quote", - "syn 2.0.89", + "syn 2.0.90", "wasm-bindgen-shared", ] [[package]] name = "wasm-bindgen-futures" -version = "0.4.45" +version = "0.4.47" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "cc7ec4f8827a71586374db3e87abdb5a2bb3a15afed140221307c3ec06b1f63b" +checksum = "9dfaf8f50e5f293737ee323940c7d8b08a66a95a419223d9f41610ca08b0833d" dependencies = [ "cfg-if", "js-sys", + "once_cell", "wasm-bindgen", "web-sys", ] [[package]] name = "wasm-bindgen-macro" -version = "0.2.95" +version = "0.2.97" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "e79384be7f8f5a9dd5d7167216f022090cf1f9ec128e6e6a482a2cb5c5422c56" +checksum = "705440e08b42d3e4b36de7d66c944be628d579796b8090bfa3471478a2260051" dependencies = [ "quote", "wasm-bindgen-macro-support", @@ -4448,28 +4443,28 @@ dependencies = [ [[package]] name = "wasm-bindgen-macro-support" -version = "0.2.95" +version = "0.2.97" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "26c6ab57572f7a24a4985830b120de1594465e5d500f24afe89e16b4e833ef68" +checksum = "98c9ae5a76e46f4deecd0f0255cc223cfa18dc9b261213b8aa0c7b36f61b3f1d" dependencies = [ "proc-macro2", "quote", - "syn 2.0.89", + "syn 2.0.90", "wasm-bindgen-backend", "wasm-bindgen-shared", ] [[package]] name = "wasm-bindgen-shared" -version = "0.2.95" +version = "0.2.97" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "65fc09f10666a9f147042251e0dda9c18f166ff7de300607007e96bdebc1068d" +checksum = "6ee99da9c5ba11bd675621338ef6fa52296b76b83305e9b6e5c77d4c286d6d49" [[package]] name = "web-sys" -version = "0.3.72" +version = "0.3.74" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "f6488b90108c040df0fe62fa815cbdee25124641df01814dd7282749234c6112" +checksum = "a98bc3c33f0fe7e59ad7cd041b89034fa82a7c2d4365ca538dda6cdaf513863c" dependencies = [ "js-sys", "wasm-bindgen", @@ -4683,7 +4678,7 @@ checksum = "2380878cad4ac9aac1e2435f3eb4020e8374b5f13c296cb75b4620ff8e229154" dependencies = [ "proc-macro2", "quote", - "syn 2.0.89", + "syn 2.0.90", "synstructure", ] @@ -4705,7 +4700,7 @@ checksum = "fa4f8080344d4671fb4e831a13ad1e68092748387dfc4f55e356242fae12ce3e" dependencies = [ "proc-macro2", "quote", - "syn 2.0.89", + "syn 2.0.90", ] [[package]] @@ -4725,7 +4720,7 @@ checksum = "595eed982f7d355beb85837f651fa22e90b3c044842dc7f2c2842c086f295808" dependencies = [ "proc-macro2", "quote", - "syn 2.0.89", + "syn 2.0.90", "synstructure", ] @@ -4754,7 +4749,7 @@ checksum = "6eafa6dfb17584ea3e2bd6e76e0cc15ad7af12b09abdd1ca55961bed9b1063c6" dependencies = [ "proc-macro2", "quote", - "syn 2.0.89", + "syn 2.0.90", ] [[package]] From 0d36fb2bf5ecc03110ebca2045fc5769284d2b93 Mon Sep 17 00:00:00 2001 From: Jose Celano Date: Wed, 4 Dec 2024 17:29:38 +0000 Subject: [PATCH 040/802] chore(deps): update depencencies ```ouput cargo update Updating crates.io index Locking 8 packages to latest compatible versions Updating anyhow v1.0.93 -> v1.0.94 Updating clap v4.5.21 -> v4.5.22 Updating clap_builder v4.5.21 -> v4.5.22 Updating http v1.1.0 -> v1.2.0 Updating thiserror v2.0.3 -> v2.0.4 Updating thiserror-impl v2.0.3 -> v2.0.4 Updating tokio v1.41.1 -> v1.42.0 Updating tokio-util v0.7.12 -> v0.7.13 ``` --- Cargo.lock | 36 ++++++++++++++++++------------------ 1 file changed, 18 insertions(+), 18 deletions(-) diff --git a/Cargo.lock b/Cargo.lock index 479c1dbc8..7134365df 100644 --- a/Cargo.lock +++ b/Cargo.lock @@ -142,9 +142,9 @@ dependencies = [ [[package]] name = "anyhow" -version = "1.0.93" +version = "1.0.94" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "4c95c10ba0b00a02636238b814946408b1322d5ac4760326e6fb8ec956d85775" +checksum = "c1fd03a028ef38ba2276dce7e33fcd6369c158a1bca17946c4b1b701891c1ff7" [[package]] name = "aquatic_peer_id" @@ -868,9 +868,9 @@ dependencies = [ [[package]] name = "clap" -version = "4.5.21" +version = "4.5.22" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "fb3b4b9e5a7c7514dfa52869339ee98b3156b0bfb4e8a77c4ff4babb64b1604f" +checksum = "69371e34337c4c984bbe322360c2547210bf632eb2814bbe78a6e87a2935bd2b" dependencies = [ "clap_builder", "clap_derive", @@ -878,9 +878,9 @@ dependencies = [ [[package]] name = "clap_builder" -version = "4.5.21" +version = "4.5.22" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "b17a95aa67cc7b5ebd32aa5370189aa0d79069ef1c64ce893bd30fb24bff20ec" +checksum = "6e24c1b4099818523236a8ca881d2b45db98dadfb4625cf6608c12069fcbbde1" dependencies = [ "anstream", "anstyle", @@ -1688,9 +1688,9 @@ checksum = "6fe2267d4ed49bc07b63801559be28c718ea06c4738b7a03c94df7386d2cde46" [[package]] name = "http" -version = "1.1.0" +version = "1.2.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "21b9ddb458710bc376481b842f5da65cdf31522de232c1ca8146abce2a358258" +checksum = "f16ca2af56261c99fba8bac40a10251ce8188205a4c448fbb745a2e4daa76fea" dependencies = [ "bytes", "fnv", @@ -3742,11 +3742,11 @@ dependencies = [ [[package]] name = "thiserror" -version = "2.0.3" +version = "2.0.4" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "c006c85c7651b3cf2ada4584faa36773bd07bac24acfb39f3c431b36d7e667aa" +checksum = "2f49a1853cf82743e3b7950f77e0f4d622ca36cf4317cba00c767838bac8d490" dependencies = [ - "thiserror-impl 2.0.3", + "thiserror-impl 2.0.4", ] [[package]] @@ -3762,9 +3762,9 @@ dependencies = [ [[package]] name = "thiserror-impl" -version = "2.0.3" +version = "2.0.4" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "f077553d607adc1caf65430528a576c757a71ed73944b66ebb58ef2bbd243568" +checksum = "8381894bb3efe0c4acac3ded651301ceee58a15d47c2e34885ed1908ad667061" dependencies = [ "proc-macro2", "quote", @@ -3849,9 +3849,9 @@ checksum = "1f3ccbac311fea05f86f61904b462b55fb3df8837a366dfc601a0161d0532f20" [[package]] name = "tokio" -version = "1.41.1" +version = "1.42.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "22cfb5bee7a6a52939ca9224d6ac897bb669134078daa8735560897f69de4d33" +checksum = "5cec9b21b0450273377fc97bd4c33a8acffc8c996c987a7c5b319a0083707551" dependencies = [ "backtrace", "bytes", @@ -3898,9 +3898,9 @@ dependencies = [ [[package]] name = "tokio-util" -version = "0.7.12" +version = "0.7.13" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "61e7c3654c13bcd040d4a03abee2c75b1d14a37b423cf5a813ceae1cc903ec6a" +checksum = "d7fcaa8d55a2bdd6b83ace262b016eca0d79ee02818c5c1bcdf0305114081078" dependencies = [ "bytes", "futures-core", @@ -3990,7 +3990,7 @@ dependencies = [ "serde_json", "serde_repr", "serde_with", - "thiserror 2.0.3", + "thiserror 2.0.4", "tokio", "torrust-tracker-clock", "torrust-tracker-configuration", From 52d3505bb4a54435c05775a25875993bff465a47 Mon Sep 17 00:00:00 2001 From: Jose Celano Date: Tue, 10 Dec 2024 18:24:43 +0000 Subject: [PATCH 041/802] feat: [#1126] add support for prometheus text format on stats API endpoint http://127.0.0.1:1212/api/v1/stats?token=MyAccessToken&format=prometheus ```text torrents 0 seeders 0 completed 0 leechers 0 tcp4_connections_handled 0 tcp4_announces_handled 0 tcp4_scrapes_handled 0 tcp6_connections_handled 0 tcp6_announces_handled 0 tcp6_scrapes_handled 0 udp4_connections_handled 0 udp4_announces_handled 0 udp4_scrapes_handled 0 udp4_errors_handled 0 udp6_connections_handled 0 udp6_announces_handled 0 udp6_scrapes_handled 0 udp6_errors_handled 0 ``` --- src/servers/apis/v1/context/stats/handlers.rs | 39 +++++++-- .../apis/v1/context/stats/responses.rs | 81 ++++++++++++++++++- 2 files changed, 111 insertions(+), 9 deletions(-) diff --git a/src/servers/apis/v1/context/stats/handlers.rs b/src/servers/apis/v1/context/stats/handlers.rs index c3be5dc7a..8b11b1ff1 100644 --- a/src/servers/apis/v1/context/stats/handlers.rs +++ b/src/servers/apis/v1/context/stats/handlers.rs @@ -3,19 +3,46 @@ use std::sync::Arc; use axum::extract::State; -use axum::response::Json; +use axum::response::Response; +use axum_extra::extract::Query; +use serde::Deserialize; -use super::resources::Stats; -use super::responses::stats_response; +use super::responses::{metrics_response, stats_response}; use crate::core::services::statistics::get_metrics; use crate::core::Tracker; +#[derive(Deserialize, Debug, Default)] +#[serde(rename_all = "lowercase")] +pub enum Format { + #[default] + Json, + Prometheus, +} + +#[derive(Deserialize, Debug)] +pub struct QueryParams { + /// The [`Format`] of the stats. + #[serde(default)] + pub format: Option, +} + /// It handles the request to get the tracker statistics. /// -/// It returns a `200` response with a json [`Stats`] +/// By default it returns a `200` response with the stats in JSON format. +/// +/// You can add the GET parameter `format=prometheus` to get the stats in +/// Prometheus Text Exposition Format. /// /// Refer to the [API endpoint documentation](crate::servers::apis::v1::context::stats#get-tracker-statistics) /// for more information about this endpoint. -pub async fn get_stats_handler(State(tracker): State>) -> Json { - stats_response(get_metrics(tracker.clone()).await) +pub async fn get_stats_handler(State(tracker): State>, params: Query) -> Response { + let metrics = get_metrics(tracker.clone()).await; + + match params.0.format { + Some(format) => match format { + Format::Json => stats_response(metrics), + Format::Prometheus => metrics_response(&metrics), + }, + None => stats_response(metrics), + } } diff --git a/src/servers/apis/v1/context/stats/responses.rs b/src/servers/apis/v1/context/stats/responses.rs index 9d03ccedf..4fd8be94f 100644 --- a/src/servers/apis/v1/context/stats/responses.rs +++ b/src/servers/apis/v1/context/stats/responses.rs @@ -1,11 +1,86 @@ //! API responses for the [`stats`](crate::servers::apis::v1::context::stats) //! API context. -use axum::response::Json; +use axum::response::{IntoResponse, Json, Response}; use super::resources::Stats; use crate::core::services::statistics::TrackerMetrics; /// `200` response that contains the [`Stats`] resource as json. -pub fn stats_response(tracker_metrics: TrackerMetrics) -> Json { - Json(Stats::from(tracker_metrics)) +#[must_use] +pub fn stats_response(tracker_metrics: TrackerMetrics) -> Response { + Json(Stats::from(tracker_metrics)).into_response() +} + +/// `200` response that contains the [`Stats`] resource in Prometheus Text Exposition Format . +#[must_use] +pub fn metrics_response(tracker_metrics: &TrackerMetrics) -> Response { + let mut lines = vec![]; + + lines.push(format!("torrents {}", tracker_metrics.torrents_metrics.torrents)); + lines.push(format!("seeders {}", tracker_metrics.torrents_metrics.complete)); + lines.push(format!("completed {}", tracker_metrics.torrents_metrics.downloaded)); + lines.push(format!("leechers {}", tracker_metrics.torrents_metrics.incomplete)); + + lines.push(format!( + "tcp4_connections_handled {}", + tracker_metrics.protocol_metrics.tcp4_connections_handled + )); + lines.push(format!( + "tcp4_announces_handled {}", + tracker_metrics.protocol_metrics.tcp4_announces_handled + )); + lines.push(format!( + "tcp4_scrapes_handled {}", + tracker_metrics.protocol_metrics.tcp4_scrapes_handled + )); + + lines.push(format!( + "tcp6_connections_handled {}", + tracker_metrics.protocol_metrics.tcp6_connections_handled + )); + lines.push(format!( + "tcp6_announces_handled {}", + tracker_metrics.protocol_metrics.tcp6_announces_handled + )); + lines.push(format!( + "tcp6_scrapes_handled {}", + tracker_metrics.protocol_metrics.tcp6_scrapes_handled + )); + + lines.push(format!( + "udp4_connections_handled {}", + tracker_metrics.protocol_metrics.udp4_connections_handled + )); + lines.push(format!( + "udp4_announces_handled {}", + tracker_metrics.protocol_metrics.udp4_announces_handled + )); + lines.push(format!( + "udp4_scrapes_handled {}", + tracker_metrics.protocol_metrics.udp4_scrapes_handled + )); + lines.push(format!( + "udp4_errors_handled {}", + tracker_metrics.protocol_metrics.udp4_errors_handled + )); + + lines.push(format!( + "udp6_connections_handled {}", + tracker_metrics.protocol_metrics.udp6_connections_handled + )); + lines.push(format!( + "udp6_announces_handled {}", + tracker_metrics.protocol_metrics.udp6_announces_handled + )); + lines.push(format!( + "udp6_scrapes_handled {}", + tracker_metrics.protocol_metrics.udp6_scrapes_handled + )); + lines.push(format!( + "udp6_errors_handled {}", + tracker_metrics.protocol_metrics.udp6_errors_handled + )); + + // Return the plain text response + lines.join("\n").into_response() } From 7993f13995f686db7ab8df9970a331e1bf65e9d6 Mon Sep 17 00:00:00 2001 From: Jose Celano Date: Fri, 13 Dec 2024 17:01:44 +0000 Subject: [PATCH 042/802] chore(deps): update depencencies ```output cargo update Updating crates.io index Locking 29 packages to latest compatible versions Updating bigdecimal v0.4.6 -> v0.4.7 Updating bindgen v0.70.1 -> v0.71.1 Updating cc v1.2.2 -> v1.2.4 Updating chrono v0.4.38 -> v0.4.39 Updating clap v4.5.22 -> v4.5.23 Updating clap_builder v4.5.22 -> v4.5.23 Updating clap_lex v0.7.3 -> v0.7.4 Updating fastrand v2.2.0 -> v2.3.0 Updating js-sys v0.3.74 -> v0.3.76 Updating libc v0.2.167 -> v0.2.168 Updating redox_syscall v0.5.7 -> v0.5.8 Updating rustc-hash v1.1.0 -> v2.1.0 Updating rustix v0.38.41 -> v0.38.42 Updating rustls v0.23.19 -> v0.23.20 Updating rustls-pki-types v1.10.0 -> v1.10.1 Updating semver v1.0.23 -> v1.0.24 Updating serde v1.0.215 -> v1.0.216 Updating serde_derive v1.0.215 -> v1.0.216 Removing sync_wrapper v0.1.2 Updating thiserror v2.0.4 -> v2.0.6 Updating thiserror-impl v2.0.4 -> v2.0.6 Updating tokio-rustls v0.26.0 -> v0.26.1 Updating tower v0.5.1 -> v0.5.2 Updating wasm-bindgen v0.2.97 -> v0.2.99 Updating wasm-bindgen-backend v0.2.97 -> v0.2.99 Updating wasm-bindgen-futures v0.4.47 -> v0.4.49 Updating wasm-bindgen-macro v0.2.97 -> v0.2.99 Updating wasm-bindgen-macro-support v0.2.97 -> v0.2.99 Updating wasm-bindgen-shared v0.2.97 -> v0.2.99 Updating web-sys v0.3.74 -> v0.3.76 ``` --- Cargo.lock | 144 +++++++++++++++++++++++++---------------------------- 1 file changed, 68 insertions(+), 76 deletions(-) diff --git a/Cargo.lock b/Cargo.lock index 7134365df..6573dcd0f 100644 --- a/Cargo.lock +++ b/Cargo.lock @@ -384,9 +384,9 @@ dependencies = [ "serde_json", "serde_path_to_error", "serde_urlencoded", - "sync_wrapper 1.0.2", + "sync_wrapper", "tokio", - "tower 0.5.1", + "tower 0.5.2", "tower-layer", "tower-service", "tracing", @@ -418,7 +418,7 @@ dependencies = [ "mime", "pin-project-lite", "rustversion", - "sync_wrapper 1.0.2", + "sync_wrapper", "tower-layer", "tower-service", "tracing", @@ -443,7 +443,7 @@ dependencies = [ "pin-project-lite", "serde", "serde_html_form", - "tower 0.5.1", + "tower 0.5.2", "tower-layer", "tower-service", ] @@ -512,9 +512,9 @@ checksum = "72b3254f16251a8381aa12e40e3c4d2f0199f8c6508fbecb9d91f575e0fbb8c6" [[package]] name = "bigdecimal" -version = "0.4.6" +version = "0.4.7" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "8f850665a0385e070b64c38d2354e6c104c8479c59868d1e48a0c13ee2c7a1c1" +checksum = "7f31f3af01c5c65a07985c804d3366560e6fa7883d640a122819b14ec327482c" dependencies = [ "autocfg", "libm", @@ -531,9 +531,9 @@ checksum = "383d29d513d8764dcdc42ea295d979eb99c3c9f00607b3692cf68a431f7dca72" [[package]] name = "bindgen" -version = "0.70.1" +version = "0.71.1" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "f49d8fed880d473ea71efb9bf597651e77201bdd4893efe54c9e5d65ae04ce6f" +checksum = "5f58bf3d7db68cfbac37cfc485a8d711e87e064c3d0fe0435b92f7a407f9d6b3" dependencies = [ "bitflags", "cexpr", @@ -775,9 +775,9 @@ dependencies = [ [[package]] name = "cc" -version = "1.2.2" +version = "1.2.4" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "f34d93e62b03caf570cccc334cbc6c2fceca82f39211051345108adcba3eebdc" +checksum = "9157bbaa6b165880c27a4293a474c91cdcf265cc68cc829bf10be0964a391caf" dependencies = [ "jobserver", "libc", @@ -807,9 +807,9 @@ checksum = "613afe47fcd5fac7ccf1db93babcb082c5994d996f20b8b159f2ad1658eb5724" [[package]] name = "chrono" -version = "0.4.38" +version = "0.4.39" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "a21f936df1771bf62b77f047b726c4625ff2e8aa607c01ec06e5a05bd8463401" +checksum = "7e36cc9d416881d2e24f9a963be5fb1cd90966419ac844274161d10488b3e825" dependencies = [ "android-tzdata", "iana-time-zone", @@ -868,9 +868,9 @@ dependencies = [ [[package]] name = "clap" -version = "4.5.22" +version = "4.5.23" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "69371e34337c4c984bbe322360c2547210bf632eb2814bbe78a6e87a2935bd2b" +checksum = "3135e7ec2ef7b10c6ed8950f0f792ed96ee093fa088608f1c76e569722700c84" dependencies = [ "clap_builder", "clap_derive", @@ -878,9 +878,9 @@ dependencies = [ [[package]] name = "clap_builder" -version = "4.5.22" +version = "4.5.23" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "6e24c1b4099818523236a8ca881d2b45db98dadfb4625cf6608c12069fcbbde1" +checksum = "30582fc632330df2bd26877bde0c1f4470d57c582bbc070376afcd04d8cb4838" dependencies = [ "anstream", "anstyle", @@ -902,9 +902,9 @@ dependencies = [ [[package]] name = "clap_lex" -version = "0.7.3" +version = "0.7.4" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "afb84c814227b90d6895e01398aee0d8033c00e7466aca416fb6a8e0eb19d8a7" +checksum = "f46ad14479a25103f283c0f10005961cf086d8dc42205bb44c46ac563475dca6" [[package]] name = "cmake" @@ -1297,9 +1297,9 @@ checksum = "7360491ce676a36bf9bb3c56c1aa791658183a54d2744120f27285738d90465a" [[package]] name = "fastrand" -version = "2.2.0" +version = "2.3.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "486f806e73c5707928240ddc295403b1b93c96a02038563881c4a2fd84b81ac4" +checksum = "37909eebbb50d72f9059c3b6d82c0463f2ff062c9e95845c43a6c9c0355411be" [[package]] name = "figment" @@ -2077,9 +2077,9 @@ dependencies = [ [[package]] name = "js-sys" -version = "0.3.74" +version = "0.3.76" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "a865e038f7f6ed956f788f0d7d60c541fff74c7bd74272c5d4cf15c63743e705" +checksum = "6717b6b5b077764fb5966237269cb3c64edddde4b14ce42647430a78ced9e7b7" dependencies = [ "once_cell", "wasm-bindgen", @@ -2102,9 +2102,9 @@ checksum = "bbd2bcb4c963f2ddae06a2efc7e9f3591312473c50c6685e1f298068316e66fe" [[package]] name = "libc" -version = "0.2.167" +version = "0.2.168" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "09d6582e104315a817dff97f75133544b2e094ee22447d2acf4a74e189ba06fc" +checksum = "5aaeb2981e0606ca11d79718f8bb01164f1d6ed75080182d3abf017e6d244b6d" [[package]] name = "libloading" @@ -2984,9 +2984,9 @@ dependencies = [ [[package]] name = "redox_syscall" -version = "0.5.7" +version = "0.5.8" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "9b6dfecf2c74bce2466cabf93f6664d6998a69eb21e39f4207930065b27b771f" +checksum = "03a862b389f93e68874fbf580b9de08dd02facb9a788ebadaf4a3fd33cf58834" dependencies = [ "bitflags", ] @@ -3066,7 +3066,7 @@ dependencies = [ "serde", "serde_json", "serde_urlencoded", - "sync_wrapper 1.0.2", + "sync_wrapper", "system-configuration", "tokio", "tokio-native-tls", @@ -3200,9 +3200,9 @@ checksum = "719b953e2095829ee67db738b3bfa9fa368c94900df327b3f07fe6e794d2fe1f" [[package]] name = "rustc-hash" -version = "1.1.0" +version = "2.1.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "08d43f7aa6b08d49f382cde6a7982047c3426db949b1424bc4b7ec9ae12c6ce2" +checksum = "c7fb8039b3032c191086b10f11f319a6e99e1e82889c5cc6046f515c9db1d497" [[package]] name = "rustc_version" @@ -3215,22 +3215,22 @@ dependencies = [ [[package]] name = "rustix" -version = "0.38.41" +version = "0.38.42" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "d7f649912bc1495e167a6edee79151c84b1bad49748cb4f1f1167f459f6224f6" +checksum = "f93dc38ecbab2eb790ff964bb77fa94faf256fd3e73285fd7ba0903b76bedb85" dependencies = [ "bitflags", "errno", "libc", "linux-raw-sys", - "windows-sys 0.52.0", + "windows-sys 0.59.0", ] [[package]] name = "rustls" -version = "0.23.19" +version = "0.23.20" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "934b404430bb06b3fae2cba809eb45a1ab1aecd64491213d7c3301b88393f8d1" +checksum = "5065c3f250cbd332cd894be57c40fa52387247659b14a2d6041d121547903b1b" dependencies = [ "once_cell", "rustls-pki-types", @@ -3250,9 +3250,9 @@ dependencies = [ [[package]] name = "rustls-pki-types" -version = "1.10.0" +version = "1.10.1" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "16f1201b3c9a7ee8039bcadc17b7e605e2945b27eee7631788c1bd2b0643674b" +checksum = "d2bf47e6ff922db3825eb750c4e2ff784c6ff8fb9e13046ef6a1d1c5401b0b37" [[package]] name = "rustls-webpki" @@ -3347,15 +3347,15 @@ dependencies = [ [[package]] name = "semver" -version = "1.0.23" +version = "1.0.24" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "61697e0a1c7e512e84a621326239844a24d8207b4669b41bc18b32ea5cbf988b" +checksum = "3cb6eb87a131f756572d7fb904f6e7b68633f09cca868c5df1c4b8d1a694bbba" [[package]] name = "serde" -version = "1.0.215" +version = "1.0.216" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "6513c1ad0b11a9376da888e3e0baa0077f1aed55c17f50e7b2397136129fb88f" +checksum = "0b9781016e935a97e8beecf0c933758c97a5520d32930e460142b4cd80c6338e" dependencies = [ "serde_derive", ] @@ -3381,9 +3381,9 @@ dependencies = [ [[package]] name = "serde_derive" -version = "1.0.215" +version = "1.0.216" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "ad1e866f866923f252f05c889987993144fb74e722403468a4ebd70c3cd756c0" +checksum = "46f859dbbf73865c6627ed570e78961cd3ac92407a2d117204c49232485da55e" dependencies = [ "proc-macro2", "quote", @@ -3633,12 +3633,6 @@ dependencies = [ "unicode-ident", ] -[[package]] -name = "sync_wrapper" -version = "0.1.2" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "2047c6ded9c721764247e62cd3b03c09ffc529b2ba5b10ec482ae507a4a70160" - [[package]] name = "sync_wrapper" version = "1.0.2" @@ -3742,11 +3736,11 @@ dependencies = [ [[package]] name = "thiserror" -version = "2.0.4" +version = "2.0.6" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "2f49a1853cf82743e3b7950f77e0f4d622ca36cf4317cba00c767838bac8d490" +checksum = "8fec2a1820ebd077e2b90c4df007bebf344cd394098a13c563957d0afc83ea47" dependencies = [ - "thiserror-impl 2.0.4", + "thiserror-impl 2.0.6", ] [[package]] @@ -3762,9 +3756,9 @@ dependencies = [ [[package]] name = "thiserror-impl" -version = "2.0.4" +version = "2.0.6" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "8381894bb3efe0c4acac3ded651301ceee58a15d47c2e34885ed1908ad667061" +checksum = "d65750cab40f4ff1929fb1ba509e9914eb756131cef4210da8d5d700d26f6312" dependencies = [ "proc-macro2", "quote", @@ -3887,12 +3881,11 @@ dependencies = [ [[package]] name = "tokio-rustls" -version = "0.26.0" +version = "0.26.1" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "0c7bc40d0e5a97695bb96e27995cd3a08538541b0a846f65bba7a359f36700d4" +checksum = "5f6d0975eaace0cf0fcadee4e4aaa5da15b5c079146f2cffb67c113be122bf37" dependencies = [ "rustls", - "rustls-pki-types", "tokio", ] @@ -3990,7 +3983,7 @@ dependencies = [ "serde_json", "serde_repr", "serde_with", - "thiserror 2.0.4", + "thiserror 2.0.6", "tokio", "torrust-tracker-clock", "torrust-tracker-configuration", @@ -3999,7 +3992,7 @@ dependencies = [ "torrust-tracker-primitives", "torrust-tracker-test-helpers", "torrust-tracker-torrent-repository", - "tower 0.5.1", + "tower 0.5.2", "tower-http", "tracing", "tracing-subscriber", @@ -4135,14 +4128,14 @@ dependencies = [ [[package]] name = "tower" -version = "0.5.1" +version = "0.5.2" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "2873938d487c3cfb9aed7546dc9f2711d867c9f90c46b889989a2cb84eba6b4f" +checksum = "d039ad9159c98b70ecfd540b2573b97f7f52c3e8d9f8ad57a24b916a536975f9" dependencies = [ "futures-core", "futures-util", "pin-project-lite", - "sync_wrapper 0.1.2", + "sync_wrapper", "tokio", "tower-layer", "tower-service", @@ -4394,9 +4387,9 @@ checksum = "9c8d87e72b64a3b4db28d11ce29237c246188f4f51057d65a7eab63b7987e423" [[package]] name = "wasm-bindgen" -version = "0.2.97" +version = "0.2.99" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "d15e63b4482863c109d70a7b8706c1e364eb6ea449b201a76c5b89cedcec2d5c" +checksum = "a474f6281d1d70c17ae7aa6a613c87fce69a127e2624002df63dcb39d6cf6396" dependencies = [ "cfg-if", "once_cell", @@ -4405,13 +4398,12 @@ dependencies = [ [[package]] name = "wasm-bindgen-backend" -version = "0.2.97" +version = "0.2.99" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "8d36ef12e3aaca16ddd3f67922bc63e48e953f126de60bd33ccc0101ef9998cd" +checksum = "5f89bb38646b4f81674e8f5c3fb81b562be1fd936d84320f3264486418519c79" dependencies = [ "bumpalo", "log", - "once_cell", "proc-macro2", "quote", "syn 2.0.90", @@ -4420,9 +4412,9 @@ dependencies = [ [[package]] name = "wasm-bindgen-futures" -version = "0.4.47" +version = "0.4.49" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "9dfaf8f50e5f293737ee323940c7d8b08a66a95a419223d9f41610ca08b0833d" +checksum = "38176d9b44ea84e9184eff0bc34cc167ed044f816accfe5922e54d84cf48eca2" dependencies = [ "cfg-if", "js-sys", @@ -4433,9 +4425,9 @@ dependencies = [ [[package]] name = "wasm-bindgen-macro" -version = "0.2.97" +version = "0.2.99" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "705440e08b42d3e4b36de7d66c944be628d579796b8090bfa3471478a2260051" +checksum = "2cc6181fd9a7492eef6fef1f33961e3695e4579b9872a6f7c83aee556666d4fe" dependencies = [ "quote", "wasm-bindgen-macro-support", @@ -4443,9 +4435,9 @@ dependencies = [ [[package]] name = "wasm-bindgen-macro-support" -version = "0.2.97" +version = "0.2.99" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "98c9ae5a76e46f4deecd0f0255cc223cfa18dc9b261213b8aa0c7b36f61b3f1d" +checksum = "30d7a95b763d3c45903ed6c81f156801839e5ee968bb07e534c44df0fcd330c2" dependencies = [ "proc-macro2", "quote", @@ -4456,15 +4448,15 @@ dependencies = [ [[package]] name = "wasm-bindgen-shared" -version = "0.2.97" +version = "0.2.99" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "6ee99da9c5ba11bd675621338ef6fa52296b76b83305e9b6e5c77d4c286d6d49" +checksum = "943aab3fdaaa029a6e0271b35ea10b72b943135afe9bffca82384098ad0e06a6" [[package]] name = "web-sys" -version = "0.3.74" +version = "0.3.76" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "a98bc3c33f0fe7e59ad7cd041b89034fa82a7c2d4365ca538dda6cdaf513863c" +checksum = "04dd7223427d52553d3702c004d3b2fe07c148165faa56313cb00211e31c12bc" dependencies = [ "js-sys", "wasm-bindgen", From abb242368b8fd4f8ade6eab265cebafea4ba8592 Mon Sep 17 00:00:00 2001 From: Jose Celano Date: Fri, 13 Dec 2024 17:07:33 +0000 Subject: [PATCH 043/802] chore(deps): update thiserror in workspace packages --- Cargo.lock | 12 ++++++------ console/tracker-client/Cargo.toml | 2 +- contrib/bencode/Cargo.toml | 2 +- packages/configuration/Cargo.toml | 2 +- packages/located-error/Cargo.toml | 2 +- packages/primitives/Cargo.toml | 2 +- packages/tracker-client/Cargo.toml | 2 +- 7 files changed, 12 insertions(+), 12 deletions(-) diff --git a/Cargo.lock b/Cargo.lock index 6573dcd0f..7bd2d7037 100644 --- a/Cargo.lock +++ b/Cargo.lock @@ -591,7 +591,7 @@ dependencies = [ "serde_bencode", "serde_bytes", "serde_repr", - "thiserror 1.0.69", + "thiserror 2.0.6", "tokio", "torrust-tracker-configuration", "torrust-tracker-located-error", @@ -4018,7 +4018,7 @@ dependencies = [ "serde_bencode", "serde_bytes", "serde_json", - "thiserror 1.0.69", + "thiserror 2.0.6", "tokio", "torrust-tracker-configuration", "tracing", @@ -4045,7 +4045,7 @@ dependencies = [ "serde", "serde_json", "serde_with", - "thiserror 1.0.69", + "thiserror 2.0.6", "toml", "torrust-tracker-located-error", "url", @@ -4057,14 +4057,14 @@ name = "torrust-tracker-contrib-bencode" version = "3.0.0-develop" dependencies = [ "criterion", - "thiserror 1.0.69", + "thiserror 2.0.6", ] [[package]] name = "torrust-tracker-located-error" version = "3.0.0-develop" dependencies = [ - "thiserror 1.0.69", + "thiserror 2.0.6", "tracing", ] @@ -4079,7 +4079,7 @@ dependencies = [ "serde", "tdyne-peer-id", "tdyne-peer-id-registry", - "thiserror 1.0.69", + "thiserror 2.0.6", "zerocopy", ] diff --git a/console/tracker-client/Cargo.toml b/console/tracker-client/Cargo.toml index c9e951003..4db6702cb 100644 --- a/console/tracker-client/Cargo.toml +++ b/console/tracker-client/Cargo.toml @@ -28,7 +28,7 @@ serde = { version = "1", features = ["derive"] } serde_bencode = "0" serde_bytes = "0" serde_json = { version = "1", features = ["preserve_order"] } -thiserror = "1" +thiserror = "2" tokio = { version = "1", features = ["macros", "net", "rt-multi-thread", "signal", "sync"] } torrust-tracker-configuration = { version = "3.0.0-develop", path = "../../packages/configuration" } tracing = "0" diff --git a/contrib/bencode/Cargo.toml b/contrib/bencode/Cargo.toml index e25a9b64f..f6355b6fc 100644 --- a/contrib/bencode/Cargo.toml +++ b/contrib/bencode/Cargo.toml @@ -16,7 +16,7 @@ rust-version.workspace = true version.workspace = true [dependencies] -thiserror = "1" +thiserror = "2" [dev-dependencies] criterion = "0" diff --git a/packages/configuration/Cargo.toml b/packages/configuration/Cargo.toml index 8706679f6..05789b882 100644 --- a/packages/configuration/Cargo.toml +++ b/packages/configuration/Cargo.toml @@ -21,7 +21,7 @@ figment = { version = "0", features = ["env", "test", "toml"] } serde = { version = "1", features = ["derive"] } serde_json = { version = "1", features = ["preserve_order"] } serde_with = "3" -thiserror = "1" +thiserror = "2" toml = "0" torrust-tracker-located-error = { version = "3.0.0-develop", path = "../located-error" } url = "2" diff --git a/packages/located-error/Cargo.toml b/packages/located-error/Cargo.toml index 637ea3055..29b0dfb2c 100644 --- a/packages/located-error/Cargo.toml +++ b/packages/located-error/Cargo.toml @@ -18,4 +18,4 @@ version.workspace = true tracing = "0" [dev-dependencies] -thiserror = "1" +thiserror = "2" diff --git a/packages/primitives/Cargo.toml b/packages/primitives/Cargo.toml index 4d18bdca6..66b81d65d 100644 --- a/packages/primitives/Cargo.toml +++ b/packages/primitives/Cargo.toml @@ -22,5 +22,5 @@ derive_more = { version = "1", features = ["constructor"] } serde = { version = "1", features = ["derive"] } tdyne-peer-id = "1" tdyne-peer-id-registry = "0" -thiserror = "1" +thiserror = "2" zerocopy = "0.7" diff --git a/packages/tracker-client/Cargo.toml b/packages/tracker-client/Cargo.toml index 52b0be639..67a4c767a 100644 --- a/packages/tracker-client/Cargo.toml +++ b/packages/tracker-client/Cargo.toml @@ -25,7 +25,7 @@ serde = { version = "1", features = ["derive"] } serde_bencode = "0" serde_bytes = "0" serde_repr = "0" -thiserror = "1" +thiserror = "2" tokio = { version = "1", features = ["macros", "net", "rt-multi-thread", "signal", "sync"] } torrust-tracker-configuration = { version = "3.0.0-develop", path = "../configuration" } torrust-tracker-located-error = { version = "3.0.0-develop", path = "../located-error" } From 286fe022f7186ed9376878774e42fe167cffb7b8 Mon Sep 17 00:00:00 2001 From: Jose Celano Date: Fri, 13 Dec 2024 17:48:05 +0000 Subject: [PATCH 044/802] feat: [#1128] add new metric UDP total requests In the stats enpoint the new values are: - udp4_requests - udp6_requests --- src/core/services/statistics/mod.rs | 2 + src/core/statistics.rs | 27 +++++++++++ .../apis/v1/context/stats/resources.rs | 45 ++++++++++++------- .../apis/v1/context/stats/responses.rs | 2 + src/servers/udp/server/launcher.rs | 13 +++++- .../servers/api/v1/contract/context/stats.rs | 2 + 6 files changed, 73 insertions(+), 18 deletions(-) diff --git a/src/core/services/statistics/mod.rs b/src/core/services/statistics/mod.rs index 0e7735be2..4d9035481 100644 --- a/src/core/services/statistics/mod.rs +++ b/src/core/services/statistics/mod.rs @@ -73,10 +73,12 @@ pub async fn get_metrics(tracker: Arc) -> TrackerMetrics { tcp6_connections_handled: stats.tcp6_connections_handled, tcp6_announces_handled: stats.tcp6_announces_handled, tcp6_scrapes_handled: stats.tcp6_scrapes_handled, + udp4_requests: stats.udp4_requests, udp4_connections_handled: stats.udp4_connections_handled, udp4_announces_handled: stats.udp4_announces_handled, udp4_scrapes_handled: stats.udp4_scrapes_handled, udp4_errors_handled: stats.udp4_errors_handled, + udp6_requests: stats.udp6_requests, udp6_connections_handled: stats.udp6_connections_handled, udp6_announces_handled: stats.udp6_announces_handled, udp6_scrapes_handled: stats.udp6_scrapes_handled, diff --git a/src/core/statistics.rs b/src/core/statistics.rs index b106b2691..37d3c8822 100644 --- a/src/core/statistics.rs +++ b/src/core/statistics.rs @@ -44,10 +44,12 @@ pub enum Event { Tcp4Scrape, Tcp6Announce, Tcp6Scrape, + Udp4Request, Udp4Connect, Udp4Announce, Udp4Scrape, Udp4Error, + Udp6Request, Udp6Connect, Udp6Announce, Udp6Scrape, @@ -72,12 +74,16 @@ pub struct Metrics { pub tcp4_announces_handled: u64, /// Total number of TCP (HTTP tracker) `scrape` requests from IPv4 peers. pub tcp4_scrapes_handled: u64, + /// Total number of TCP (HTTP tracker) connections from IPv6 peers. pub tcp6_connections_handled: u64, /// Total number of TCP (HTTP tracker) `announce` requests from IPv6 peers. pub tcp6_announces_handled: u64, /// Total number of TCP (HTTP tracker) `scrape` requests from IPv6 peers. pub tcp6_scrapes_handled: u64, + + /// Total number of UDP (UDP tracker) requests from IPv4 peers. + pub udp4_requests: u64, /// Total number of UDP (UDP tracker) connections from IPv4 peers. pub udp4_connections_handled: u64, /// Total number of UDP (UDP tracker) `announce` requests from IPv4 peers. @@ -86,6 +92,9 @@ pub struct Metrics { pub udp4_scrapes_handled: u64, /// Total number of UDP (UDP tracker) `error` requests from IPv4 peers. pub udp4_errors_handled: u64, + + /// Total number of UDP (UDP tracker) requests from IPv4 peers. + pub udp6_requests: u64, /// Total number of UDP (UDP tracker) `connection` requests from IPv6 peers. pub udp6_connections_handled: u64, /// Total number of UDP (UDP tracker) `announce` requests from IPv6 peers. @@ -165,6 +174,9 @@ async fn event_handler(event: Event, stats_repository: &Repo) { } // UDP4 + Event::Udp4Request => { + stats_repository.increase_udp4_requests().await; + } Event::Udp4Connect => { stats_repository.increase_udp4_connections().await; } @@ -179,6 +191,9 @@ async fn event_handler(event: Event, stats_repository: &Repo) { } // UDP6 + Event::Udp6Request => { + stats_repository.increase_udp6_requests().await; + } Event::Udp6Connect => { stats_repository.increase_udp6_connections().await; } @@ -276,6 +291,12 @@ impl Repo { drop(stats_lock); } + pub async fn increase_udp4_requests(&self) { + let mut stats_lock = self.stats.write().await; + stats_lock.udp4_requests += 1; + drop(stats_lock); + } + pub async fn increase_udp4_connections(&self) { let mut stats_lock = self.stats.write().await; stats_lock.udp4_connections_handled += 1; @@ -300,6 +321,12 @@ impl Repo { drop(stats_lock); } + pub async fn increase_udp6_requests(&self) { + let mut stats_lock = self.stats.write().await; + stats_lock.udp6_requests += 1; + drop(stats_lock); + } + pub async fn increase_udp6_connections(&self) { let mut stats_lock = self.stats.write().await; stats_lock.udp6_connections_handled += 1; diff --git a/src/servers/apis/v1/context/stats/resources.rs b/src/servers/apis/v1/context/stats/resources.rs index de6f6ca89..5a70e4aed 100644 --- a/src/servers/apis/v1/context/stats/resources.rs +++ b/src/servers/apis/v1/context/stats/resources.rs @@ -26,12 +26,16 @@ pub struct Stats { pub tcp4_announces_handled: u64, /// Total number of TCP (HTTP tracker) `scrape` requests from IPv4 peers. pub tcp4_scrapes_handled: u64, + /// Total number of TCP (HTTP tracker) connections from IPv6 peers. pub tcp6_connections_handled: u64, /// Total number of TCP (HTTP tracker) `announce` requests from IPv6 peers. pub tcp6_announces_handled: u64, /// Total number of TCP (HTTP tracker) `scrape` requests from IPv6 peers. pub tcp6_scrapes_handled: u64, + + /// Total number of UDP (UDP tracker) requests from IPv4 peers. + pub udp4_requests: u64, /// Total number of UDP (UDP tracker) connections from IPv4 peers. pub udp4_connections_handled: u64, /// Total number of UDP (UDP tracker) `announce` requests from IPv4 peers. @@ -40,6 +44,9 @@ pub struct Stats { pub udp4_scrapes_handled: u64, /// Total number of UDP (UDP tracker) `scrape` requests from IPv4 peers. pub udp4_errors_handled: u64, + + /// Total number of UDP (UDP tracker) requests from IPv6 peers. + pub udp6_requests: u64, /// Total number of UDP (UDP tracker) `connection` requests from IPv6 peers. pub udp6_connections_handled: u64, /// Total number of UDP (UDP tracker) `announce` requests from IPv6 peers. @@ -63,10 +70,12 @@ impl From for Stats { tcp6_connections_handled: metrics.protocol_metrics.tcp6_connections_handled, tcp6_announces_handled: metrics.protocol_metrics.tcp6_announces_handled, tcp6_scrapes_handled: metrics.protocol_metrics.tcp6_scrapes_handled, + udp4_requests: metrics.protocol_metrics.udp4_requests, udp4_connections_handled: metrics.protocol_metrics.udp4_connections_handled, udp4_announces_handled: metrics.protocol_metrics.udp4_announces_handled, udp4_scrapes_handled: metrics.protocol_metrics.udp4_scrapes_handled, udp4_errors_handled: metrics.protocol_metrics.udp4_errors_handled, + udp6_requests: metrics.protocol_metrics.udp6_requests, udp6_connections_handled: metrics.protocol_metrics.udp6_connections_handled, udp6_announces_handled: metrics.protocol_metrics.udp6_announces_handled, udp6_scrapes_handled: metrics.protocol_metrics.udp6_scrapes_handled, @@ -100,14 +109,16 @@ mod tests { tcp6_connections_handled: 8, tcp6_announces_handled: 9, tcp6_scrapes_handled: 10, - udp4_connections_handled: 11, - udp4_announces_handled: 12, - udp4_scrapes_handled: 13, - udp4_errors_handled: 14, - udp6_connections_handled: 15, - udp6_announces_handled: 16, - udp6_scrapes_handled: 17, - udp6_errors_handled: 18 + udp4_requests: 11, + udp4_connections_handled: 12, + udp4_announces_handled: 13, + udp4_scrapes_handled: 14, + udp4_errors_handled: 15, + udp6_requests: 16, + udp6_connections_handled: 17, + udp6_announces_handled: 18, + udp6_scrapes_handled: 19, + udp6_errors_handled: 20 } }), Stats { @@ -121,14 +132,16 @@ mod tests { tcp6_connections_handled: 8, tcp6_announces_handled: 9, tcp6_scrapes_handled: 10, - udp4_connections_handled: 11, - udp4_announces_handled: 12, - udp4_scrapes_handled: 13, - udp4_errors_handled: 14, - udp6_connections_handled: 15, - udp6_announces_handled: 16, - udp6_scrapes_handled: 17, - udp6_errors_handled: 18 + udp4_requests: 11, + udp4_connections_handled: 12, + udp4_announces_handled: 13, + udp4_scrapes_handled: 14, + udp4_errors_handled: 15, + udp6_requests: 16, + udp6_connections_handled: 17, + udp6_announces_handled: 18, + udp6_scrapes_handled: 19, + udp6_errors_handled: 20 } ); } diff --git a/src/servers/apis/v1/context/stats/responses.rs b/src/servers/apis/v1/context/stats/responses.rs index 4fd8be94f..3358a70cf 100644 --- a/src/servers/apis/v1/context/stats/responses.rs +++ b/src/servers/apis/v1/context/stats/responses.rs @@ -47,6 +47,7 @@ pub fn metrics_response(tracker_metrics: &TrackerMetrics) -> Response { tracker_metrics.protocol_metrics.tcp6_scrapes_handled )); + lines.push(format!("udp4_requests {}", tracker_metrics.protocol_metrics.udp4_requests)); lines.push(format!( "udp4_connections_handled {}", tracker_metrics.protocol_metrics.udp4_connections_handled @@ -64,6 +65,7 @@ pub fn metrics_response(tracker_metrics: &TrackerMetrics) -> Response { tracker_metrics.protocol_metrics.udp4_errors_handled )); + lines.push(format!("udp6_requests {}", tracker_metrics.protocol_metrics.udp6_requests)); lines.push(format!( "udp6_connections_handled {}", tracker_metrics.protocol_metrics.udp6_connections_handled diff --git a/src/servers/udp/server/launcher.rs b/src/servers/udp/server/launcher.rs index c8bac8098..6bd503e61 100644 --- a/src/servers/udp/server/launcher.rs +++ b/src/servers/udp/server/launcher.rs @@ -1,4 +1,4 @@ -use std::net::SocketAddr; +use std::net::{IpAddr, SocketAddr}; use std::sync::Arc; use std::time::Duration; @@ -11,7 +11,7 @@ use tracing::instrument; use super::request_buffer::ActiveRequests; use crate::bootstrap::jobs::Started; -use crate::core::Tracker; +use crate::core::{statistics, Tracker}; use crate::servers::logging::STARTED_ON; use crate::servers::registar::ServiceHealthCheckJob; use crate::servers::signals::{shutdown_signal_with_message, Halted}; @@ -140,6 +140,15 @@ impl Launcher { } }; + match req.from.ip() { + IpAddr::V4(_) => { + tracker.send_stats_event(statistics::Event::Udp4Request).await; + } + IpAddr::V6(_) => { + tracker.send_stats_event(statistics::Event::Udp6Request).await; + } + } + // We spawn the new task even if there active requests buffer is // full. This could seem counterintuitive because we are accepting // more request and consuming more memory even if the server is diff --git a/tests/servers/api/v1/contract/context/stats.rs b/tests/servers/api/v1/contract/context/stats.rs index 463dc563e..465b7b73a 100644 --- a/tests/servers/api/v1/contract/context/stats.rs +++ b/tests/servers/api/v1/contract/context/stats.rs @@ -40,10 +40,12 @@ async fn should_allow_getting_tracker_statistics() { tcp6_connections_handled: 0, tcp6_announces_handled: 0, tcp6_scrapes_handled: 0, + udp4_requests: 0, udp4_connections_handled: 0, udp4_announces_handled: 0, udp4_scrapes_handled: 0, udp4_errors_handled: 0, + udp6_requests: 0, udp6_connections_handled: 0, udp6_announces_handled: 0, udp6_scrapes_handled: 0, From 9499fd8924a926eece3a2774907ee8840fe96170 Mon Sep 17 00:00:00 2001 From: Jose Celano Date: Fri, 13 Dec 2024 18:21:49 +0000 Subject: [PATCH 045/802] feat: [#1128] add new metric UDP total responses In the stats enpoint the new values are: - udp4_responses - udp6_responses --- src/core/services/statistics/mod.rs | 2 ++ src/core/statistics.rs | 26 +++++++++++++- .../apis/v1/context/stats/resources.rs | 34 ++++++++++++------- .../apis/v1/context/stats/responses.rs | 2 ++ src/servers/udp/server/processor.rs | 13 +++++-- .../servers/api/v1/contract/context/stats.rs | 2 ++ 6 files changed, 64 insertions(+), 15 deletions(-) diff --git a/src/core/services/statistics/mod.rs b/src/core/services/statistics/mod.rs index 4d9035481..a037e53b9 100644 --- a/src/core/services/statistics/mod.rs +++ b/src/core/services/statistics/mod.rs @@ -77,11 +77,13 @@ pub async fn get_metrics(tracker: Arc) -> TrackerMetrics { udp4_connections_handled: stats.udp4_connections_handled, udp4_announces_handled: stats.udp4_announces_handled, udp4_scrapes_handled: stats.udp4_scrapes_handled, + udp4_responses: stats.udp4_responses, udp4_errors_handled: stats.udp4_errors_handled, udp6_requests: stats.udp6_requests, udp6_connections_handled: stats.udp6_connections_handled, udp6_announces_handled: stats.udp6_announces_handled, udp6_scrapes_handled: stats.udp6_scrapes_handled, + udp6_responses: stats.udp6_responses, udp6_errors_handled: stats.udp6_errors_handled, }, } diff --git a/src/core/statistics.rs b/src/core/statistics.rs index 37d3c8822..2df88ae97 100644 --- a/src/core/statistics.rs +++ b/src/core/statistics.rs @@ -48,11 +48,13 @@ pub enum Event { Udp4Connect, Udp4Announce, Udp4Scrape, + Udp4Response, Udp4Error, Udp6Request, Udp6Connect, Udp6Announce, Udp6Scrape, + Udp6Response, Udp6Error, } @@ -90,10 +92,12 @@ pub struct Metrics { pub udp4_announces_handled: u64, /// Total number of UDP (UDP tracker) `scrape` requests from IPv4 peers. pub udp4_scrapes_handled: u64, + /// Total number of UDP (UDP tracker) responses from IPv4 peers. + pub udp4_responses: u64, /// Total number of UDP (UDP tracker) `error` requests from IPv4 peers. pub udp4_errors_handled: u64, - /// Total number of UDP (UDP tracker) requests from IPv4 peers. + /// Total number of UDP (UDP tracker) requests from IPv6 peers. pub udp6_requests: u64, /// Total number of UDP (UDP tracker) `connection` requests from IPv6 peers. pub udp6_connections_handled: u64, @@ -101,6 +105,8 @@ pub struct Metrics { pub udp6_announces_handled: u64, /// Total number of UDP (UDP tracker) `scrape` requests from IPv6 peers. pub udp6_scrapes_handled: u64, + /// Total number of UDP (UDP tracker) responses from IPv6 peers. + pub udp6_responses: u64, /// Total number of UDP (UDP tracker) `error` requests from IPv6 peers. pub udp6_errors_handled: u64, } @@ -186,6 +192,9 @@ async fn event_handler(event: Event, stats_repository: &Repo) { Event::Udp4Scrape => { stats_repository.increase_udp4_scrapes().await; } + Event::Udp4Response => { + stats_repository.increase_udp4_responses().await; + } Event::Udp4Error => { stats_repository.increase_udp4_errors().await; } @@ -203,6 +212,9 @@ async fn event_handler(event: Event, stats_repository: &Repo) { Event::Udp6Scrape => { stats_repository.increase_udp6_scrapes().await; } + Event::Udp6Response => { + stats_repository.increase_udp6_responses().await; + } Event::Udp6Error => { stats_repository.increase_udp6_errors().await; } @@ -315,6 +327,12 @@ impl Repo { drop(stats_lock); } + pub async fn increase_udp4_responses(&self) { + let mut stats_lock = self.stats.write().await; + stats_lock.udp4_responses += 1; + drop(stats_lock); + } + pub async fn increase_udp4_errors(&self) { let mut stats_lock = self.stats.write().await; stats_lock.udp4_errors_handled += 1; @@ -345,6 +363,12 @@ impl Repo { drop(stats_lock); } + pub async fn increase_udp6_responses(&self) { + let mut stats_lock = self.stats.write().await; + stats_lock.udp6_responses += 1; + drop(stats_lock); + } + pub async fn increase_udp6_errors(&self) { let mut stats_lock = self.stats.write().await; stats_lock.udp6_errors_handled += 1; diff --git a/src/servers/apis/v1/context/stats/resources.rs b/src/servers/apis/v1/context/stats/resources.rs index 5a70e4aed..21a0dc04a 100644 --- a/src/servers/apis/v1/context/stats/resources.rs +++ b/src/servers/apis/v1/context/stats/resources.rs @@ -42,6 +42,8 @@ pub struct Stats { pub udp4_announces_handled: u64, /// Total number of UDP (UDP tracker) `scrape` requests from IPv4 peers. pub udp4_scrapes_handled: u64, + /// Total number of UDP (UDP tracker) responses from IPv4 peers. + pub udp4_responses: u64, /// Total number of UDP (UDP tracker) `scrape` requests from IPv4 peers. pub udp4_errors_handled: u64, @@ -53,6 +55,8 @@ pub struct Stats { pub udp6_announces_handled: u64, /// Total number of UDP (UDP tracker) `scrape` requests from IPv6 peers. pub udp6_scrapes_handled: u64, + /// Total number of UDP (UDP tracker) responses from IPv6 peers. + pub udp6_responses: u64, /// Total number of UDP (UDP tracker) `scrape` requests from IPv6 peers. pub udp6_errors_handled: u64, } @@ -74,11 +78,13 @@ impl From for Stats { udp4_connections_handled: metrics.protocol_metrics.udp4_connections_handled, udp4_announces_handled: metrics.protocol_metrics.udp4_announces_handled, udp4_scrapes_handled: metrics.protocol_metrics.udp4_scrapes_handled, + udp4_responses: metrics.protocol_metrics.udp4_responses, udp4_errors_handled: metrics.protocol_metrics.udp4_errors_handled, udp6_requests: metrics.protocol_metrics.udp6_requests, udp6_connections_handled: metrics.protocol_metrics.udp6_connections_handled, udp6_announces_handled: metrics.protocol_metrics.udp6_announces_handled, udp6_scrapes_handled: metrics.protocol_metrics.udp6_scrapes_handled, + udp6_responses: metrics.protocol_metrics.udp6_responses, udp6_errors_handled: metrics.protocol_metrics.udp6_errors_handled, } } @@ -113,12 +119,14 @@ mod tests { udp4_connections_handled: 12, udp4_announces_handled: 13, udp4_scrapes_handled: 14, - udp4_errors_handled: 15, - udp6_requests: 16, - udp6_connections_handled: 17, - udp6_announces_handled: 18, - udp6_scrapes_handled: 19, - udp6_errors_handled: 20 + udp4_responses: 15, + udp4_errors_handled: 16, + udp6_requests: 17, + udp6_connections_handled: 18, + udp6_announces_handled: 19, + udp6_scrapes_handled: 20, + udp6_responses: 21, + udp6_errors_handled: 22 } }), Stats { @@ -136,12 +144,14 @@ mod tests { udp4_connections_handled: 12, udp4_announces_handled: 13, udp4_scrapes_handled: 14, - udp4_errors_handled: 15, - udp6_requests: 16, - udp6_connections_handled: 17, - udp6_announces_handled: 18, - udp6_scrapes_handled: 19, - udp6_errors_handled: 20 + udp4_responses: 15, + udp4_errors_handled: 16, + udp6_requests: 17, + udp6_connections_handled: 18, + udp6_announces_handled: 19, + udp6_scrapes_handled: 20, + udp6_responses: 21, + udp6_errors_handled: 22 } ); } diff --git a/src/servers/apis/v1/context/stats/responses.rs b/src/servers/apis/v1/context/stats/responses.rs index 3358a70cf..e4d5b577d 100644 --- a/src/servers/apis/v1/context/stats/responses.rs +++ b/src/servers/apis/v1/context/stats/responses.rs @@ -60,6 +60,7 @@ pub fn metrics_response(tracker_metrics: &TrackerMetrics) -> Response { "udp4_scrapes_handled {}", tracker_metrics.protocol_metrics.udp4_scrapes_handled )); + lines.push(format!("udp4_responses {}", tracker_metrics.protocol_metrics.udp4_responses)); lines.push(format!( "udp4_errors_handled {}", tracker_metrics.protocol_metrics.udp4_errors_handled @@ -78,6 +79,7 @@ pub fn metrics_response(tracker_metrics: &TrackerMetrics) -> Response { "udp6_scrapes_handled {}", tracker_metrics.protocol_metrics.udp6_scrapes_handled )); + lines.push(format!("udp6_responses {}", tracker_metrics.protocol_metrics.udp6_responses)); lines.push(format!( "udp6_errors_handled {}", tracker_metrics.protocol_metrics.udp6_errors_handled diff --git a/src/servers/udp/server/processor.rs b/src/servers/udp/server/processor.rs index 703367f35..fc39f28b9 100644 --- a/src/servers/udp/server/processor.rs +++ b/src/servers/udp/server/processor.rs @@ -1,12 +1,12 @@ use std::io::Cursor; -use std::net::SocketAddr; +use std::net::{IpAddr, SocketAddr}; use std::sync::Arc; use aquatic_udp_protocol::Response; use tracing::{instrument, Level}; use super::bound_socket::BoundSocket; -use crate::core::Tracker; +use crate::core::{statistics, Tracker}; use crate::servers::udp::handlers::CookieTimeValues; use crate::servers::udp::{handlers, RawRequest}; @@ -64,6 +64,15 @@ impl Processor { } else { tracing::debug!(%bytes_count, %sent_bytes, "sent {response_type}"); } + + match target.ip() { + IpAddr::V4(_) => { + self.tracker.send_stats_event(statistics::Event::Udp4Response).await; + } + IpAddr::V6(_) => { + self.tracker.send_stats_event(statistics::Event::Udp6Response).await; + } + } } Err(error) => tracing::warn!(%bytes_count, %error, ?payload, "failed to send"), }; diff --git a/tests/servers/api/v1/contract/context/stats.rs b/tests/servers/api/v1/contract/context/stats.rs index 465b7b73a..f2dbd2118 100644 --- a/tests/servers/api/v1/contract/context/stats.rs +++ b/tests/servers/api/v1/contract/context/stats.rs @@ -44,11 +44,13 @@ async fn should_allow_getting_tracker_statistics() { udp4_connections_handled: 0, udp4_announces_handled: 0, udp4_scrapes_handled: 0, + udp4_responses: 0, udp4_errors_handled: 0, udp6_requests: 0, udp6_connections_handled: 0, udp6_announces_handled: 0, udp6_scrapes_handled: 0, + udp6_responses: 0, udp6_errors_handled: 0, }, ) From 6ca82e9d0661a829cebd90243ba932e1affeeb85 Mon Sep 17 00:00:00 2001 From: Jose Celano Date: Fri, 13 Dec 2024 18:39:34 +0000 Subject: [PATCH 046/802] feat: [#1128] add new metric UDP total requests aborted --- src/core/services/statistics/mod.rs | 3 + src/core/statistics.rs | 15 +++++ .../apis/v1/context/stats/resources.rs | 60 +++++++++++-------- .../apis/v1/context/stats/responses.rs | 5 ++ src/servers/udp/server/launcher.rs | 7 ++- src/servers/udp/server/request_buffer.rs | 11 +++- .../servers/api/v1/contract/context/stats.rs | 3 + 7 files changed, 77 insertions(+), 27 deletions(-) diff --git a/src/core/services/statistics/mod.rs b/src/core/services/statistics/mod.rs index a037e53b9..82ff359ab 100644 --- a/src/core/services/statistics/mod.rs +++ b/src/core/services/statistics/mod.rs @@ -67,12 +67,15 @@ pub async fn get_metrics(tracker: Arc) -> TrackerMetrics { TrackerMetrics { torrents_metrics, protocol_metrics: Metrics { + // TCP tcp4_connections_handled: stats.tcp4_connections_handled, tcp4_announces_handled: stats.tcp4_announces_handled, tcp4_scrapes_handled: stats.tcp4_scrapes_handled, tcp6_connections_handled: stats.tcp6_connections_handled, tcp6_announces_handled: stats.tcp6_announces_handled, tcp6_scrapes_handled: stats.tcp6_scrapes_handled, + // UDP + udp_requests_aborted: stats.udp_requests_aborted, udp4_requests: stats.udp4_requests, udp4_connections_handled: stats.udp4_connections_handled, udp4_announces_handled: stats.udp4_announces_handled, diff --git a/src/core/statistics.rs b/src/core/statistics.rs index 2df88ae97..6df7c4961 100644 --- a/src/core/statistics.rs +++ b/src/core/statistics.rs @@ -44,6 +44,7 @@ pub enum Event { Tcp4Scrape, Tcp6Announce, Tcp6Scrape, + Udp4RequestAborted, Udp4Request, Udp4Connect, Udp4Announce, @@ -84,6 +85,9 @@ pub struct Metrics { /// Total number of TCP (HTTP tracker) `scrape` requests from IPv6 peers. pub tcp6_scrapes_handled: u64, + /// Total number of UDP (UDP tracker) requests aborted. + pub udp_requests_aborted: u64, + /// Total number of UDP (UDP tracker) requests from IPv4 peers. pub udp4_requests: u64, /// Total number of UDP (UDP tracker) connections from IPv4 peers. @@ -179,6 +183,11 @@ async fn event_handler(event: Event, stats_repository: &Repo) { stats_repository.increase_tcp6_connections().await; } + // UDP + Event::Udp4RequestAborted => { + stats_repository.increase_udp_requests_aborted().await; + } + // UDP4 Event::Udp4Request => { stats_repository.increase_udp4_requests().await; @@ -303,6 +312,12 @@ impl Repo { drop(stats_lock); } + pub async fn increase_udp_requests_aborted(&self) { + let mut stats_lock = self.stats.write().await; + stats_lock.udp_requests_aborted += 1; + drop(stats_lock); + } + pub async fn increase_udp4_requests(&self) { let mut stats_lock = self.stats.write().await; stats_lock.udp4_requests += 1; diff --git a/src/servers/apis/v1/context/stats/resources.rs b/src/servers/apis/v1/context/stats/resources.rs index 21a0dc04a..e7057f30a 100644 --- a/src/servers/apis/v1/context/stats/resources.rs +++ b/src/servers/apis/v1/context/stats/resources.rs @@ -34,6 +34,9 @@ pub struct Stats { /// Total number of TCP (HTTP tracker) `scrape` requests from IPv6 peers. pub tcp6_scrapes_handled: u64, + /// Total number of UDP (UDP tracker) requests aborted. + pub udp_requests_aborted: u64, + /// Total number of UDP (UDP tracker) requests from IPv4 peers. pub udp4_requests: u64, /// Total number of UDP (UDP tracker) connections from IPv4 peers. @@ -68,12 +71,15 @@ impl From for Stats { seeders: metrics.torrents_metrics.complete, completed: metrics.torrents_metrics.downloaded, leechers: metrics.torrents_metrics.incomplete, + // TCP tcp4_connections_handled: metrics.protocol_metrics.tcp4_connections_handled, tcp4_announces_handled: metrics.protocol_metrics.tcp4_announces_handled, tcp4_scrapes_handled: metrics.protocol_metrics.tcp4_scrapes_handled, tcp6_connections_handled: metrics.protocol_metrics.tcp6_connections_handled, tcp6_announces_handled: metrics.protocol_metrics.tcp6_announces_handled, tcp6_scrapes_handled: metrics.protocol_metrics.tcp6_scrapes_handled, + // UDP + udp_requests_aborted: metrics.protocol_metrics.udp_requests_aborted, udp4_requests: metrics.protocol_metrics.udp4_requests, udp4_connections_handled: metrics.protocol_metrics.udp4_connections_handled, udp4_announces_handled: metrics.protocol_metrics.udp4_announces_handled, @@ -109,24 +115,27 @@ mod tests { torrents: 4 }, protocol_metrics: Metrics { + // TCP tcp4_connections_handled: 5, tcp4_announces_handled: 6, tcp4_scrapes_handled: 7, tcp6_connections_handled: 8, tcp6_announces_handled: 9, tcp6_scrapes_handled: 10, - udp4_requests: 11, - udp4_connections_handled: 12, - udp4_announces_handled: 13, - udp4_scrapes_handled: 14, - udp4_responses: 15, - udp4_errors_handled: 16, - udp6_requests: 17, - udp6_connections_handled: 18, - udp6_announces_handled: 19, - udp6_scrapes_handled: 20, - udp6_responses: 21, - udp6_errors_handled: 22 + // UDP + udp_requests_aborted: 11, + udp4_requests: 12, + udp4_connections_handled: 13, + udp4_announces_handled: 14, + udp4_scrapes_handled: 15, + udp4_responses: 16, + udp4_errors_handled: 17, + udp6_requests: 18, + udp6_connections_handled: 19, + udp6_announces_handled: 20, + udp6_scrapes_handled: 21, + udp6_responses: 22, + udp6_errors_handled: 23 } }), Stats { @@ -134,24 +143,27 @@ mod tests { seeders: 1, completed: 2, leechers: 3, + // TCP tcp4_connections_handled: 5, tcp4_announces_handled: 6, tcp4_scrapes_handled: 7, tcp6_connections_handled: 8, tcp6_announces_handled: 9, tcp6_scrapes_handled: 10, - udp4_requests: 11, - udp4_connections_handled: 12, - udp4_announces_handled: 13, - udp4_scrapes_handled: 14, - udp4_responses: 15, - udp4_errors_handled: 16, - udp6_requests: 17, - udp6_connections_handled: 18, - udp6_announces_handled: 19, - udp6_scrapes_handled: 20, - udp6_responses: 21, - udp6_errors_handled: 22 + // UDP + udp_requests_aborted: 11, + udp4_requests: 12, + udp4_connections_handled: 13, + udp4_announces_handled: 14, + udp4_scrapes_handled: 15, + udp4_responses: 16, + udp4_errors_handled: 17, + udp6_requests: 18, + udp6_connections_handled: 19, + udp6_announces_handled: 20, + udp6_scrapes_handled: 21, + udp6_responses: 22, + udp6_errors_handled: 23 } ); } diff --git a/src/servers/apis/v1/context/stats/responses.rs b/src/servers/apis/v1/context/stats/responses.rs index e4d5b577d..6b214d0c9 100644 --- a/src/servers/apis/v1/context/stats/responses.rs +++ b/src/servers/apis/v1/context/stats/responses.rs @@ -47,6 +47,11 @@ pub fn metrics_response(tracker_metrics: &TrackerMetrics) -> Response { tracker_metrics.protocol_metrics.tcp6_scrapes_handled )); + lines.push(format!( + "udp_requests_aborted {}", + tracker_metrics.protocol_metrics.udp_requests_aborted + )); + lines.push(format!("udp4_requests {}", tracker_metrics.protocol_metrics.udp4_requests)); lines.push(format!( "udp4_connections_handled {}", diff --git a/src/servers/udp/server/launcher.rs b/src/servers/udp/server/launcher.rs index 6bd503e61..d6827346d 100644 --- a/src/servers/udp/server/launcher.rs +++ b/src/servers/udp/server/launcher.rs @@ -166,7 +166,12 @@ impl Launcher { continue; } - active_requests.force_push(abort_handle, &local_addr).await; + let old_request_aborted = active_requests.force_push(abort_handle, &local_addr).await; + + if old_request_aborted { + // Evicted task from active requests buffer was aborted. + tracker.send_stats_event(statistics::Event::Udp4RequestAborted).await; + } } else { tokio::task::yield_now().await; diff --git a/src/servers/udp/server/request_buffer.rs b/src/servers/udp/server/request_buffer.rs index ffbd9565d..03cb6040f 100644 --- a/src/servers/udp/server/request_buffer.rs +++ b/src/servers/udp/server/request_buffer.rs @@ -41,6 +41,8 @@ impl ActiveRequests { /// 1. Removing finished tasks. /// 2. Removing the oldest unfinished task if no finished tasks are found. /// + /// Returns `true` if a task was removed, `false` otherwise. + /// /// # Panics /// /// This method will panic if it cannot make space for adding a new handle. @@ -49,17 +51,19 @@ impl ActiveRequests { /// /// * `abort_handle` - The `AbortHandle` for the UDP request processor task. /// * `local_addr` - A string slice representing the local address for logging. - pub async fn force_push(&mut self, new_task: AbortHandle, local_addr: &str) { + pub async fn force_push(&mut self, new_task: AbortHandle, local_addr: &str) -> bool { // Attempt to add the new handle to the buffer. match self.rb.try_push(new_task) { Ok(()) => { // Successfully added the task, no further action needed. + false } Err(new_task) => { // Buffer is full, attempt to make space. let mut finished: u64 = 0; let mut unfinished_task = None; + let mut old_task_aborted = false; for old_task in self.rb.pop_iter() { // We found a finished tasks ... increase the counter and @@ -96,6 +100,7 @@ impl ActiveRequests { if finished == 0 { // We make place aborting this task. old_task.abort(); + old_task_aborted = true; tracing::warn!( target: UDP_TRACKER_LOG_TARGET, @@ -134,7 +139,9 @@ impl ActiveRequests { if !new_task.is_finished() { self.rb.try_push(new_task).expect("it should have space for this new task."); } + + old_task_aborted } - }; + } } } diff --git a/tests/servers/api/v1/contract/context/stats.rs b/tests/servers/api/v1/contract/context/stats.rs index f2dbd2118..7853450e2 100644 --- a/tests/servers/api/v1/contract/context/stats.rs +++ b/tests/servers/api/v1/contract/context/stats.rs @@ -34,12 +34,15 @@ async fn should_allow_getting_tracker_statistics() { seeders: 1, completed: 0, leechers: 0, + // TCP tcp4_connections_handled: 0, tcp4_announces_handled: 0, tcp4_scrapes_handled: 0, tcp6_connections_handled: 0, tcp6_announces_handled: 0, tcp6_scrapes_handled: 0, + // UDP + udp_requests_aborted: 0, udp4_requests: 0, udp4_connections_handled: 0, udp4_announces_handled: 0, From 87401e894d8720b5e25a735cb822f58b2c92be94 Mon Sep 17 00:00:00 2001 From: Jose Celano Date: Mon, 9 Dec 2024 09:57:19 +0000 Subject: [PATCH 047/802] chore(deps): add dependency bloom --- Cargo.lock | 16 ++++++++++++++++ Cargo.toml | 1 + 2 files changed, 17 insertions(+) diff --git a/Cargo.lock b/Cargo.lock index 7bd2d7037..c9f388f48 100644 --- a/Cargo.lock +++ b/Cargo.lock @@ -547,6 +547,12 @@ dependencies = [ "syn 2.0.90", ] +[[package]] +name = "bit-vec" +version = "0.4.4" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "02b4ff8b16e6076c3e14220b39fbc1fabb6737522281a388998046859400895f" + [[package]] name = "bitflags" version = "2.6.0" @@ -634,6 +640,15 @@ dependencies = [ "piper", ] +[[package]] +name = "bloom" +version = "0.3.2" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "d00ac8e5056d6d65376a3c1aa5c7c34850d6949ace17f0266953a254eb3d6fe8" +dependencies = [ + "bit-vec", +] + [[package]] name = "blowfish" version = "0.9.1" @@ -3949,6 +3964,7 @@ dependencies = [ "bittorrent-http-protocol", "bittorrent-primitives", "bittorrent-tracker-client", + "bloom", "blowfish", "camino", "chrono", diff --git a/Cargo.toml b/Cargo.toml index f512dca92..6832f17f2 100644 --- a/Cargo.toml +++ b/Cargo.toml @@ -39,6 +39,7 @@ axum-server = { version = "0", features = ["tls-rustls-no-provider"] } bittorrent-http-protocol = { version = "3.0.0-develop", path = "packages/http-protocol" } bittorrent-primitives = "0.1.0" bittorrent-tracker-client = { version = "3.0.0-develop", path = "packages/tracker-client" } +bloom = "0.3.2" blowfish = "0" camino = { version = "1", features = ["serde", "serde1"] } chrono = { version = "0", default-features = false, features = ["clock"] } From 10f9bdaacf6b156da075224737f6f1ab83c84f53 Mon Sep 17 00:00:00 2001 From: Jose Celano Date: Mon, 9 Dec 2024 12:09:03 +0000 Subject: [PATCH 048/802] feat: [#1096] ban client IP when exceeds connection ID errors limit The life demo tracker is receiving many UDP requests with a wrong conenctions IDs. Errors are logged (write disk) and that decreases the tracker performance. This counts errors and bans Ips after 10 errors for 2 minutes. We use two levels of counters. 1. First level: A Counting Bloom Filter: fast and low memory consumption but innacurate (False Positives). 2. HashMap: Exact Counter for Ips. CBFs are fast and use litle memory but they are also innaccurate. They have False Positives meaning some IPs would be banned only becuase there are bucket colissions (IPs sharing the same counter). To avoid banning IPs incorrectly we decided to introduce a second counter, which is a HashMap that counts error precisely. IPs are only banned when this counter reaches the limit (over 10 errors). We keep the CBF as a first level filter. It's a fast-check IP filter without affecting tracker's performance. When the IP is banned according to the first filter we double-check in the HashMap. CBF is faster than checking always for banned IPs against the HashMap. This solution should be good if the number of IPs is low. We have to find another solution anyway for IPv6 where is cheaper to own a range of IPs. --- cSpell.json | 1 + src/servers/udp/handlers.rs | 16 ++- src/servers/udp/server/banning.rs | 150 ++++++++++++++++++++++++++++ src/servers/udp/server/launcher.rs | 62 +++++++++--- src/servers/udp/server/mod.rs | 1 + src/servers/udp/server/processor.rs | 8 +- tests/servers/udp/contract.rs | 87 ++++++++++++---- 7 files changed, 288 insertions(+), 37 deletions(-) create mode 100644 src/servers/udp/server/banning.rs diff --git a/cSpell.json b/cSpell.json index 090a2b0e3..a21e69b9f 100644 --- a/cSpell.json +++ b/cSpell.json @@ -5,6 +5,7 @@ "alekitto", "appuser", "Arvid", + "ASMS", "asyn", "autoclean", "AUTOINCREMENT", diff --git a/src/servers/udp/handlers.rs b/src/servers/udp/handlers.rs index af22b263d..1fb450e1a 100644 --- a/src/servers/udp/handlers.rs +++ b/src/servers/udp/handlers.rs @@ -11,12 +11,14 @@ use aquatic_udp_protocol::{ ResponsePeer, ScrapeRequest, ScrapeResponse, TorrentScrapeStatistics, TransactionId, }; use bittorrent_primitives::info_hash::InfoHash; +use tokio::sync::RwLock; use torrust_tracker_clock::clock::Time as _; use tracing::{instrument, Level}; use uuid::Uuid; use zerocopy::network_endian::I32; use super::connection_cookie::{check, make}; +use super::server::banning::BanService; use super::RawRequest; use crate::core::{statistics, PeersWanted, Tracker}; use crate::servers::udp::error::Error; @@ -51,12 +53,13 @@ impl CookieTimeValues { /// - Delegating the request to the correct handler depending on the request type. /// /// It will return an `Error` response if the request is invalid. -#[instrument(fields(request_id), skip(udp_request, tracker, cookie_time_values), ret(level = Level::TRACE))] +#[instrument(fields(request_id), skip(udp_request, tracker, cookie_time_values, ban_service), ret(level = Level::TRACE))] pub(crate) async fn handle_packet( udp_request: RawRequest, tracker: &Tracker, local_addr: SocketAddr, cookie_time_values: CookieTimeValues, + ban_service: Arc>, ) -> Response { tracing::Span::current().record("request_id", Uuid::new_v4().to_string()); tracing::debug!("Handling Packets: {udp_request:?}"); @@ -68,6 +71,17 @@ pub(crate) async fn handle_packet( Ok(request) => match handle_request(request, udp_request.from, tracker, cookie_time_values.clone()).await { Ok(response) => return response, Err((e, transaction_id)) => { + match &e { + Error::CookieValueNotNormal { .. } + | Error::CookieValueExpired { .. } + | Error::CookieValueFromFuture { .. } => { + // code-review: should we include `RequestParseError` and `BadRequest`? + let mut ban_service = ban_service.write().await; + ban_service.increase_counter(&udp_request.from.ip()); + } + _ => {} + } + handle_error( udp_request.from, tracker, diff --git a/src/servers/udp/server/banning.rs b/src/servers/udp/server/banning.rs new file mode 100644 index 000000000..df236820c --- /dev/null +++ b/src/servers/udp/server/banning.rs @@ -0,0 +1,150 @@ +//! Banning service for UDP tracker. +//! +//! It bans clients that send invalid connection id's. +//! +//! It uses two levels of filtering: +//! +//! 1. First, tt uses a Counting Bloom Filter to keep track of the number of +//! connection ID errors per ip. That means there can be false positives, but +//! not false negatives. 1 out of 100000 requests will be a false positive +//! and the client will be banned and not receive a response. +//! 2. Since we want to avoid false positives (banning a client that is not +//! sending invalid connection id's), we use a `HashMap` to keep track of the +//! exact number of connection ID errors per ip. +//! +//! This two level filtering is to avoid false positives. It has the advantage +//! of being fast by using a Counting Bloom Filter and not having false +//! negatives at the cost of increasing the memory usage. +use std::collections::HashMap; +use std::net::IpAddr; + +use bloom::{CountingBloomFilter, ASMS}; +use tokio::time::Instant; +use url::Url; + +use crate::servers::udp::UDP_TRACKER_LOG_TARGET; + +pub struct BanService { + max_connection_id_errors_per_ip: u32, + fuzzy_error_counter: CountingBloomFilter, + accurate_error_counter: HashMap, + local_addr: Url, + last_connection_id_errors_reset: Instant, +} + +impl BanService { + #[must_use] + pub fn new(max_connection_id_errors_per_ip: u32, local_addr: Url) -> Self { + Self { + max_connection_id_errors_per_ip, + local_addr, + fuzzy_error_counter: CountingBloomFilter::with_rate(4, 0.01, 100), + accurate_error_counter: HashMap::new(), + last_connection_id_errors_reset: tokio::time::Instant::now(), + } + } + + pub fn increase_counter(&mut self, ip: &IpAddr) { + self.fuzzy_error_counter.insert(&ip.to_string()); + *self.accurate_error_counter.entry(*ip).or_insert(0) += 1; + } + + #[must_use] + pub fn get_count(&self, ip: &IpAddr) -> Option { + self.accurate_error_counter.get(ip).copied() + } + + #[must_use] + pub fn get_estimate_count(&self, ip: &IpAddr) -> u32 { + self.fuzzy_error_counter.estimate_count(&ip.to_string()) + } + + /// Returns true if the given ip address is banned. + #[must_use] + pub fn is_banned(&self, ip: &IpAddr) -> bool { + // First check if the ip is in the bloom filter (fast check) + if self.fuzzy_error_counter.estimate_count(&ip.to_string()) <= self.max_connection_id_errors_per_ip { + return false; + } + + // Check with the exact counter (to avoid false positives) + match self.get_count(ip) { + Some(count) => count > self.max_connection_id_errors_per_ip, + None => false, + } + } + + /// Resets the filters and updates the reset timestamp. + pub fn reset_bans(&mut self) { + self.fuzzy_error_counter.clear(); + + self.accurate_error_counter.clear(); + + self.last_connection_id_errors_reset = Instant::now(); + + let local_addr = self.local_addr.to_string(); + tracing::info!(target: UDP_TRACKER_LOG_TARGET, local_addr, "Udp::run_udp_server::loop (connection id errors filter cleared)"); + } +} + +#[cfg(test)] +mod tests { + use std::net::IpAddr; + + use super::BanService; + + /// Sample service with one day ban duration. + fn ban_service(counter_limit: u32) -> BanService { + let udp_tracker_url = "udp://127.0.0.1".parse().unwrap(); + BanService::new(counter_limit, udp_tracker_url) + } + + #[test] + fn it_should_increase_the_errors_counter_for_a_given_ip() { + let mut ban_service = ban_service(1); + + let ip: IpAddr = "127.0.0.2".parse().unwrap(); + + ban_service.increase_counter(&ip); + + assert_eq!(ban_service.get_count(&ip), Some(1)); + } + + #[test] + fn it_should_ban_ips_with_counters_exceeding_a_predefined_limit() { + let mut ban_service = ban_service(1); + + let ip: IpAddr = "127.0.0.2".parse().unwrap(); + + ban_service.increase_counter(&ip); // Counter = 1 + ban_service.increase_counter(&ip); // Counter = 2 + + println!("Counter: {}", ban_service.get_count(&ip).unwrap()); + + assert!(ban_service.is_banned(&ip)); + } + + #[test] + fn it_should_not_ban_ips_whose_counters_do_not_exceed_the_predefined_limit() { + let mut ban_service = ban_service(1); + + let ip: IpAddr = "127.0.0.2".parse().unwrap(); + + ban_service.increase_counter(&ip); + + assert!(!ban_service.is_banned(&ip)); + } + + #[test] + fn it_should_allow_resetting_all_the_counters() { + let mut ban_service = ban_service(1); + + let ip: IpAddr = "127.0.0.2".parse().unwrap(); + + ban_service.increase_counter(&ip); // Counter = 1 + + ban_service.reset_bans(); + + assert_eq!(ban_service.get_estimate_count(&ip), 0); + } +} diff --git a/src/servers/udp/server/launcher.rs b/src/servers/udp/server/launcher.rs index d6827346d..f314e3721 100644 --- a/src/servers/udp/server/launcher.rs +++ b/src/servers/udp/server/launcher.rs @@ -6,9 +6,11 @@ use bittorrent_tracker_client::udp::client::check; use derive_more::Constructor; use futures_util::StreamExt; use tokio::select; -use tokio::sync::oneshot; +use tokio::sync::{oneshot, RwLock}; +use tokio::time::interval; use tracing::instrument; +use super::banning::BanService; use super::request_buffer::ActiveRequests; use crate::bootstrap::jobs::Started; use crate::core::{statistics, Tracker}; @@ -20,6 +22,11 @@ use crate::servers::udp::server::processor::Processor; use crate::servers::udp::server::receiver::Receiver; use crate::servers::udp::UDP_TRACKER_LOG_TARGET; +/// The maximum number of connection id errors per ip. Clients will be banned if +/// they exceed this limit. +const MAX_CONNECTION_ID_ERRORS_PER_IP: u32 = 10; +const IP_BANS_RESET_INTERVAL_IN_SECS: u64 = 120; + /// A UDP server instance launcher. #[derive(Constructor)] pub struct Launcher; @@ -115,13 +122,30 @@ impl Launcher { let active_requests = &mut ActiveRequests::default(); let addr = receiver.bound_socket_address(); + let local_addr = format!("udp://{addr}"); let cookie_lifetime = cookie_lifetime.as_secs_f64(); - loop { - let processor = Processor::new(receiver.socket.clone(), tracker.clone(), cookie_lifetime); + let ban_service = Arc::new(RwLock::new(BanService::new( + MAX_CONNECTION_ID_ERRORS_PER_IP, + local_addr.parse().unwrap(), + ))); + + let ban_cleaner = ban_service.clone(); + + tokio::spawn(async move { + let mut cleaner_interval = interval(Duration::from_secs(IP_BANS_RESET_INTERVAL_IN_SECS)); + + cleaner_interval.tick().await; + loop { + cleaner_interval.tick().await; + ban_cleaner.write().await.reset_bans(); + } + }); + + loop { if let Some(req) = { tracing::trace!(target: UDP_TRACKER_LOG_TARGET, local_addr, "Udp::run_udp_server (wait for request)"); receiver.next().await @@ -149,18 +173,26 @@ impl Launcher { } } - // We spawn the new task even if there active requests buffer is - // full. This could seem counterintuitive because we are accepting - // more request and consuming more memory even if the server is - // already busy. However, we "force_push" the new tasks in the - // buffer. That means, in the worst scenario we will abort a - // running task to make place for the new task. - // - // Once concern could be to reach an starvation point were we - // are only adding and removing tasks without given them the - // chance to finish. However, the buffer is yielding before - // aborting one tasks, giving it the chance to finish. - let abort_handle: tokio::task::AbortHandle = tokio::task::spawn(processor.process_request(req)).abort_handle(); + if ban_service.read().await.is_banned(&req.from.ip()) { + tracing::debug!(target: UDP_TRACKER_LOG_TARGET, local_addr, "Udp::run_udp_server::loop continue: (banned ip)"); + continue; + } + + let processor = Processor::new(receiver.socket.clone(), tracker.clone(), cookie_lifetime); + + /* We spawn the new task even if the active requests buffer is + full. This could seem counterintuitive because we are accepting + more request and consuming more memory even if the server is + already busy. However, we "force_push" the new tasks in the + buffer. That means, in the worst scenario we will abort a + running task to make place for the new task. + + Once concern could be to reach an starvation point were we are + only adding and removing tasks without given them the chance to + finish. However, the buffer is yielding before aborting one + tasks, giving it the chance to finish. */ + let abort_handle: tokio::task::AbortHandle = + tokio::task::spawn(processor.process_request(req, ban_service.clone())).abort_handle(); if abort_handle.is_finished() { continue; diff --git a/src/servers/udp/server/mod.rs b/src/servers/udp/server/mod.rs index 7067512b6..9f974ca8c 100644 --- a/src/servers/udp/server/mod.rs +++ b/src/servers/udp/server/mod.rs @@ -6,6 +6,7 @@ use thiserror::Error; use super::RawRequest; +pub mod banning; pub mod bound_socket; pub mod launcher; pub mod processor; diff --git a/src/servers/udp/server/processor.rs b/src/servers/udp/server/processor.rs index fc39f28b9..120196431 100644 --- a/src/servers/udp/server/processor.rs +++ b/src/servers/udp/server/processor.rs @@ -3,8 +3,10 @@ use std::net::{IpAddr, SocketAddr}; use std::sync::Arc; use aquatic_udp_protocol::Response; +use tokio::sync::RwLock; use tracing::{instrument, Level}; +use super::banning::BanService; use super::bound_socket::BoundSocket; use crate::core::{statistics, Tracker}; use crate::servers::udp::handlers::CookieTimeValues; @@ -25,16 +27,18 @@ impl Processor { } } - #[instrument(skip(self, request))] - pub async fn process_request(self, request: RawRequest) { + #[instrument(skip(self, request, ban_service))] + pub async fn process_request(self, request: RawRequest, ban_service: Arc>) { let from = request.from; let response = handlers::handle_packet( request, &self.tracker, self.socket.address(), CookieTimeValues::new(self.cookie_lifetime), + ban_service, ) .await; + self.send_response(from, response).await; } diff --git a/tests/servers/udp/contract.rs b/tests/servers/udp/contract.rs index b12a8a900..9e9085e62 100644 --- a/tests/servers/udp/contract.rs +++ b/tests/servers/udp/contract.rs @@ -130,10 +130,31 @@ mod receiving_an_announce_request { use crate::servers::udp::contract::send_connection_request; use crate::servers::udp::Started; - pub async fn send_and_get_announce(tx_id: TransactionId, c_id: ConnectionId, client: &UdpTrackerClient) { - // Send announce request + pub async fn assert_send_and_get_announce(tx_id: TransactionId, c_id: ConnectionId, client: &UdpTrackerClient) { + let response = send_and_get_announce(tx_id, c_id, client).await; + assert!(is_ipv4_announce_response(&response)); + } + + pub async fn send_and_get_announce( + tx_id: TransactionId, + c_id: ConnectionId, + client: &UdpTrackerClient, + ) -> aquatic_udp_protocol::Response { + let announce_request = build_sample_announce_request(tx_id, c_id, client.client.socket.local_addr().unwrap().port()); + + match client.send(announce_request.into()).await { + Ok(_) => (), + Err(err) => panic!("{err}"), + }; - let announce_request = AnnounceRequest { + match client.receive().await { + Ok(response) => response, + Err(err) => panic!("{err}"), + } + } + + fn build_sample_announce_request(tx_id: TransactionId, c_id: ConnectionId, port: u16) -> AnnounceRequest { + AnnounceRequest { connection_id: ConnectionId(c_id.0), action_placeholder: AnnounceActionPlaceholder::default(), transaction_id: tx_id, @@ -146,26 +167,34 @@ mod receiving_an_announce_request { ip_address: Ipv4Addr::new(0, 0, 0, 0).into(), key: PeerKey::new(0i32), peers_wanted: NumberOfPeers(1i32.into()), - port: Port(client.client.socket.local_addr().unwrap().port().into()), - }; + port: Port(port.into()), + } + } - match client.send(announce_request.into()).await { - Ok(_) => (), - Err(err) => panic!("{err}"), - }; + #[tokio::test] + async fn should_return_an_announce_response() { + INIT.call_once(|| { + tracing_stderr_init(LevelFilter::ERROR); + }); - let response = match client.receive().await { - Ok(response) => response, + let env = Started::new(&configuration::ephemeral().into()).await; + + let client = match UdpTrackerClient::new(env.bind_address(), DEFAULT_TIMEOUT).await { + Ok(udp_tracker_client) => udp_tracker_client, Err(err) => panic!("{err}"), }; - // println!("test response {response:?}"); + let tx_id = TransactionId::new(123); - assert!(is_ipv4_announce_response(&response)); + let c_id = send_connection_request(tx_id, &client).await; + + assert_send_and_get_announce(tx_id, c_id, &client).await; + + env.stop().await; } #[tokio::test] - async fn should_return_an_announce_response() { + async fn should_return_many_announce_response() { INIT.call_once(|| { tracing_stderr_init(LevelFilter::ERROR); }); @@ -181,13 +210,16 @@ mod receiving_an_announce_request { let c_id = send_connection_request(tx_id, &client).await; - send_and_get_announce(tx_id, c_id, &client).await; + for x in 0..1000 { + tracing::info!("req no: {x}"); + assert_send_and_get_announce(tx_id, c_id, &client).await; + } env.stop().await; } #[tokio::test] - async fn should_return_many_announce_response() { + async fn should_ban_the_client_ip_if_it_sends_more_than_10_requests_with_a_cookie_value_not_normal() { INIT.call_once(|| { tracing_stderr_init(LevelFilter::ERROR); }); @@ -201,13 +233,30 @@ mod receiving_an_announce_request { let tx_id = TransactionId::new(123); - let c_id = send_connection_request(tx_id, &client).await; + // The eleven first requests should be fine - for x in 0..1000 { + let invalid_connection_id = ConnectionId::new(0); // Zero is one of the not normal values. + + for x in 0..=10 { tracing::info!("req no: {x}"); - send_and_get_announce(tx_id, c_id, &client).await; + send_and_get_announce(tx_id, invalid_connection_id, &client).await; } + // The twelfth request should be banned (timeout error) + + let announce_request = build_sample_announce_request( + tx_id, + invalid_connection_id, + client.client.socket.local_addr().unwrap().port(), + ); + + match client.send(announce_request.into()).await { + Ok(_) => (), + Err(err) => panic!("{err}"), + }; + + assert!(client.receive().await.is_err()); + env.stop().await; } } From 29e506d2aa7946e201902e7b0bce06a1ba5a778b Mon Sep 17 00:00:00 2001 From: Jose Celano Date: Tue, 10 Dec 2024 10:10:01 +0000 Subject: [PATCH 049/802] feat: use default aquatic udp port for benchmarking Becuase we are using aquatic_udp_load_test with this ocndifugration ``` Starting client with config: Config { server_address: 127.0.0.1:3000, log_level: Error, workers: 1, duration: 0, summarize_last: 0, extra_statistics: true, network: NetworkConfig { multiple_client_ipv4s: true, sockets_per_worker: 4, recv_buffer: 8000000, }, requests: RequestConfig { number_of_torrents: 1000000, number_of_peers: 2000000, scrape_max_torrents: 10, announce_peers_wanted: 30, weight_connect: 50, weight_announce: 50, weight_scrape: 1, peer_seeder_probability: 0.75, }, } ``` --- share/default/config/tracker.udp.benchmarking.toml | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/share/default/config/tracker.udp.benchmarking.toml b/share/default/config/tracker.udp.benchmarking.toml index c6644d8dc..8a898153a 100644 --- a/share/default/config/tracker.udp.benchmarking.toml +++ b/share/default/config/tracker.udp.benchmarking.toml @@ -18,4 +18,4 @@ persistent_torrent_completed_stat = false remove_peerless_torrents = false [[udp_trackers]] -bind_address = "0.0.0.0:6969" +bind_address = "0.0.0.0:3000" From fe4103d297dda35382e4177ce1422ad50d1c34f2 Mon Sep 17 00:00:00 2001 From: Jose Celano Date: Tue, 17 Dec 2024 08:02:05 +0000 Subject: [PATCH 050/802] chore(deps): update depencencies ```output cargo update Updating crates.io index Locking 7 packages to latest compatible versions Updating crossbeam-channel v0.5.13 -> v0.5.14 Updating crossbeam-deque v0.8.5 -> v0.8.6 Updating crossbeam-queue v0.3.11 -> v0.3.12 Updating crossbeam-utils v0.8.20 -> v0.8.21 Updating hyper v1.5.1 -> v1.5.2 Updating thiserror v2.0.6 -> v2.0.7 Updating thiserror-impl v2.0.6 -> v2.0.7 ``` --- Cargo.lock | 44 ++++++++++++++++++++++---------------------- 1 file changed, 22 insertions(+), 22 deletions(-) diff --git a/Cargo.lock b/Cargo.lock index c9f388f48..dacd04454 100644 --- a/Cargo.lock +++ b/Cargo.lock @@ -597,7 +597,7 @@ dependencies = [ "serde_bencode", "serde_bytes", "serde_repr", - "thiserror 2.0.6", + "thiserror 2.0.7", "tokio", "torrust-tracker-configuration", "torrust-tracker-located-error", @@ -1045,18 +1045,18 @@ dependencies = [ [[package]] name = "crossbeam-channel" -version = "0.5.13" +version = "0.5.14" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "33480d6946193aa8033910124896ca395333cae7e2d1113d1fef6c3272217df2" +checksum = "06ba6d68e24814cb8de6bb986db8222d3a027d15872cabc0d18817bc3c0e4471" dependencies = [ "crossbeam-utils", ] [[package]] name = "crossbeam-deque" -version = "0.8.5" +version = "0.8.6" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "613f8cc01fe9cf1a3eb3d7f488fd2fa8388403e97039e2f73692932e291a770d" +checksum = "9dd111b7b7f7d55b72c0a6ae361660ee5853c9af73f70c3c2ef6858b950e2e51" dependencies = [ "crossbeam-epoch", "crossbeam-utils", @@ -1073,9 +1073,9 @@ dependencies = [ [[package]] name = "crossbeam-queue" -version = "0.3.11" +version = "0.3.12" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "df0346b5d5e76ac2fe4e327c5fd1118d6be7c51dfb18f9b7922923f287471e35" +checksum = "0f58bbc28f91df819d0aa2a2c00cd19754769c2fad90579b3592b1c9ba7a3115" dependencies = [ "crossbeam-utils", ] @@ -1092,9 +1092,9 @@ dependencies = [ [[package]] name = "crossbeam-utils" -version = "0.8.20" +version = "0.8.21" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "22ec99545bb0ed0ea7bb9b8e1e9122ea386ff8a48c0922e43f36d45ab09e0e80" +checksum = "d0a5c400df2834b80a4c3327b3aad3a4c4cd4de0629063962b03235697506a28" [[package]] name = "crunchy" @@ -1749,9 +1749,9 @@ checksum = "df3b46402a9d5adb4c86a0cf463f42e19994e3ee891101b1841f30a545cb49a9" [[package]] name = "hyper" -version = "1.5.1" +version = "1.5.2" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "97818827ef4f364230e16705d4706e2897df2bb60617d6ca15d598025a3c481f" +checksum = "256fb8d4bd6413123cc9d91832d78325c48ff41677595be797d90f42969beae0" dependencies = [ "bytes", "futures-channel", @@ -3751,11 +3751,11 @@ dependencies = [ [[package]] name = "thiserror" -version = "2.0.6" +version = "2.0.7" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "8fec2a1820ebd077e2b90c4df007bebf344cd394098a13c563957d0afc83ea47" +checksum = "93605438cbd668185516ab499d589afb7ee1859ea3d5fc8f6b0755e1c7443767" dependencies = [ - "thiserror-impl 2.0.6", + "thiserror-impl 2.0.7", ] [[package]] @@ -3771,9 +3771,9 @@ dependencies = [ [[package]] name = "thiserror-impl" -version = "2.0.6" +version = "2.0.7" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "d65750cab40f4ff1929fb1ba509e9914eb756131cef4210da8d5d700d26f6312" +checksum = "e1d8749b4531af2117677a5fcd12b1348a3fe2b81e36e61ffeac5c4aa3273e36" dependencies = [ "proc-macro2", "quote", @@ -3999,7 +3999,7 @@ dependencies = [ "serde_json", "serde_repr", "serde_with", - "thiserror 2.0.6", + "thiserror 2.0.7", "tokio", "torrust-tracker-clock", "torrust-tracker-configuration", @@ -4034,7 +4034,7 @@ dependencies = [ "serde_bencode", "serde_bytes", "serde_json", - "thiserror 2.0.6", + "thiserror 2.0.7", "tokio", "torrust-tracker-configuration", "tracing", @@ -4061,7 +4061,7 @@ dependencies = [ "serde", "serde_json", "serde_with", - "thiserror 2.0.6", + "thiserror 2.0.7", "toml", "torrust-tracker-located-error", "url", @@ -4073,14 +4073,14 @@ name = "torrust-tracker-contrib-bencode" version = "3.0.0-develop" dependencies = [ "criterion", - "thiserror 2.0.6", + "thiserror 2.0.7", ] [[package]] name = "torrust-tracker-located-error" version = "3.0.0-develop" dependencies = [ - "thiserror 2.0.6", + "thiserror 2.0.7", "tracing", ] @@ -4095,7 +4095,7 @@ dependencies = [ "serde", "tdyne-peer-id", "tdyne-peer-id-registry", - "thiserror 2.0.6", + "thiserror 2.0.7", "zerocopy", ] From 7cf08a608bcc1172deff5274a939026b294ec27e Mon Sep 17 00:00:00 2001 From: Jose Celano Date: Tue, 17 Dec 2024 12:18:53 +0000 Subject: [PATCH 051/802] refactor: reorganize statistics mod Preaparing to introduce new changes in the repository. --- src/core/mod.rs | 21 +- src/core/services/statistics/mod.rs | 12 +- src/core/services/statistics/setup.rs | 13 +- src/core/statistics.rs | 578 ------------------ src/core/statistics/event/handler.rs | 234 +++++++ src/core/statistics/event/listener.rs | 11 + src/core/statistics/event/mod.rs | 34 ++ src/core/statistics/event/sender.rs | 29 + src/core/statistics/keeper.rs | 77 +++ src/core/statistics/metrics.rs | 69 +++ src/core/statistics/mod.rs | 30 + src/core/statistics/repository.rs | 144 +++++ .../apis/v1/context/stats/resources.rs | 2 +- src/servers/http/v1/services/announce.rs | 31 +- src/servers/http/v1/services/scrape.rs | 30 +- src/servers/udp/handlers.rs | 54 +- src/servers/udp/server/launcher.rs | 6 +- src/servers/udp/server/processor.rs | 4 +- 18 files changed, 721 insertions(+), 658 deletions(-) delete mode 100644 src/core/statistics.rs create mode 100644 src/core/statistics/event/handler.rs create mode 100644 src/core/statistics/event/listener.rs create mode 100644 src/core/statistics/event/mod.rs create mode 100644 src/core/statistics/event/sender.rs create mode 100644 src/core/statistics/keeper.rs create mode 100644 src/core/statistics/metrics.rs create mode 100644 src/core/statistics/mod.rs create mode 100644 src/core/statistics/repository.rs diff --git a/src/core/mod.rs b/src/core/mod.rs index 835776e30..b5759709b 100644 --- a/src/core/mod.rs +++ b/src/core/mod.rs @@ -422,7 +422,7 @@ //! For example, the HTTP tracker would send an event like the following when it handles an `announce` request received from a peer using IP version 4. //! //! ```text -//! tracker.send_stats_event(statistics::Event::Tcp4Announce).await +//! tracker.send_stats_event(statistics::event::Event::Tcp4Announce).await //! ``` //! //! Refer to [`statistics`] module for more information about statistics. @@ -505,10 +505,10 @@ pub struct Tracker { torrents: Arc, /// Service to send stats events. - stats_event_sender: Option>, + stats_event_sender: Option>, /// The in-memory stats repo. - stats_repository: statistics::Repo, + stats_repository: statistics::repository::Repository, } /// Structure that holds the data returned by the `announce` request. @@ -624,8 +624,8 @@ impl Tracker { /// Will return a `databases::error::Error` if unable to connect to database. The `Tracker` is responsible for the persistence. pub fn new( config: &Core, - stats_event_sender: Option>, - stats_repository: statistics::Repo, + stats_event_sender: Option>, + stats_repository: statistics::repository::Repository, ) -> Result { let driver = match config.database.driver { database::Driver::Sqlite3 => Driver::Sqlite3, @@ -1207,17 +1207,20 @@ impl Tracker { Ok(()) } - /// It return the `Tracker` [`statistics::Metrics`]. + /// It return the `Tracker` [`statistics::metrics::Metrics`]. /// /// # Context: Statistics - pub async fn get_stats(&self) -> tokio::sync::RwLockReadGuard<'_, statistics::Metrics> { + pub async fn get_stats(&self) -> tokio::sync::RwLockReadGuard<'_, statistics::metrics::Metrics> { self.stats_repository.get_stats().await } - /// It allows to send a statistic events which eventually will be used to update [`statistics::Metrics`]. + /// It allows to send a statistic events which eventually will be used to update [`statistics::metrics::Metrics`]. /// /// # Context: Statistics - pub async fn send_stats_event(&self, event: statistics::Event) -> Option>> { + pub async fn send_stats_event( + &self, + event: statistics::event::Event, + ) -> Option>> { match &self.stats_event_sender { None => None, Some(stats_event_sender) => stats_event_sender.send_event(event).await, diff --git a/src/core/services/statistics/mod.rs b/src/core/services/statistics/mod.rs index 82ff359ab..10e1c60fa 100644 --- a/src/core/services/statistics/mod.rs +++ b/src/core/services/statistics/mod.rs @@ -3,14 +3,14 @@ //! It includes: //! //! - A [`factory`](crate::core::services::statistics::setup::factory) function to build the structs needed to collect the tracker metrics. -//! - A [`get_metrics`] service to get the [`tracker metrics`](crate::core::statistics::Metrics). +//! - A [`get_metrics`] service to get the tracker [`metrics`](crate::core::statistics::metrics::Metrics). //! //! Tracker metrics are collected using a Publisher-Subscribe pattern. //! //! The factory function builds two structs: //! -//! - An statistics [`EventSender`](crate::core::statistics::EventSender) -//! - An statistics [`Repo`](crate::core::statistics::Repo) +//! - An statistics event [`Sender`](crate::core::statistics::event::sender::Sender) +//! - An statistics [`Repository`](crate::core::statistics::repository::Repository) //! //! ```text //! let (stats_event_sender, stats_repository) = factory(tracker_usage_statistics); @@ -21,7 +21,7 @@ //! There is an event listener that is receiving all the events and processing them with an event handler. //! Then, the event handler updates the metrics depending on the received event. //! -//! For example, if you send the event [`Event::Udp4Connect`](crate::core::statistics::Event::Udp4Connect): +//! For example, if you send the event [`Event::Udp4Connect`](crate::core::statistics::event::Event::Udp4Connect): //! //! ```text //! let result = event_sender.send_event(Event::Udp4Connect).await; @@ -42,7 +42,7 @@ use std::sync::Arc; use torrust_tracker_primitives::torrent_metrics::TorrentsMetrics; -use crate::core::statistics::Metrics; +use crate::core::statistics::metrics::Metrics; use crate::core::Tracker; /// All the metrics collected by the tracker. @@ -118,7 +118,7 @@ mod tests { tracker_metrics, TrackerMetrics { torrents_metrics: TorrentsMetrics::default(), - protocol_metrics: core::statistics::Metrics::default(), + protocol_metrics: core::statistics::metrics::Metrics::default(), } ); } diff --git a/src/core/services/statistics/setup.rs b/src/core/services/statistics/setup.rs index 37603852b..e440a709c 100644 --- a/src/core/services/statistics/setup.rs +++ b/src/core/services/statistics/setup.rs @@ -7,16 +7,21 @@ use crate::core::statistics; /// /// It returns: /// -/// - An statistics [`EventSender`](crate::core::statistics::EventSender) that allows you to send events related to statistics. -/// - An statistics [`Repo`](crate::core::statistics::Repo) which is an in-memory repository for the tracker metrics. +/// - An statistics event [`Sender`](crate::core::statistics::event::sender::Sender) that allows you to send events related to statistics. +/// - An statistics [`Repository`](crate::core::statistics::repository::Repository) which is an in-memory repository for the tracker metrics. /// /// When the input argument `tracker_usage_statistics`is false the setup does not run the event listeners, consequently the statistics /// events are sent are received but not dispatched to the handler. #[must_use] -pub fn factory(tracker_usage_statistics: bool) -> (Option>, statistics::Repo) { +pub fn factory( + tracker_usage_statistics: bool, +) -> ( + Option>, + statistics::repository::Repository, +) { let mut stats_event_sender = None; - let mut stats_tracker = statistics::Keeper::new(); + let mut stats_tracker = statistics::keeper::Keeper::new(); if tracker_usage_statistics { stats_event_sender = Some(stats_tracker.run_event_listener()); diff --git a/src/core/statistics.rs b/src/core/statistics.rs deleted file mode 100644 index 6df7c4961..000000000 --- a/src/core/statistics.rs +++ /dev/null @@ -1,578 +0,0 @@ -//! Structs to collect and keep tracker metrics. -//! -//! The tracker collects metrics such as: -//! -//! - Number of connections handled -//! - Number of `announce` requests handled -//! - Number of `scrape` request handled -//! -//! These metrics are collected for each connection type: UDP and HTTP and -//! also for each IP version used by the peers: IPv4 and IPv6. -//! -//! > Notice: that UDP tracker have an specific `connection` request. For the HTTP metrics the counter counts one connection for each `announce` or `scrape` request. -//! -//! The data is collected by using an `event-sender -> event listener` model. -//! -//! The tracker uses an [`statistics::EventSender`](crate::core::statistics::EventSender) instance to send an event. -//! The [`statistics::Keeper`](crate::core::statistics::Keeper) listens to new events and uses the [`statistics::Repo`](crate::core::statistics::Repo) to upgrade and store metrics. -//! -//! See the [`statistics::Event`](crate::core::statistics::Event) enum to check which events are available. -use std::sync::Arc; - -use futures::future::BoxFuture; -use futures::FutureExt; -#[cfg(test)] -use mockall::{automock, predicate::str}; -use tokio::sync::mpsc::error::SendError; -use tokio::sync::{mpsc, RwLock, RwLockReadGuard}; - -const CHANNEL_BUFFER_SIZE: usize = 65_535; - -/// An statistics event. It is used to collect tracker metrics. -/// -/// - `Tcp` prefix means the event was triggered by the HTTP tracker -/// - `Udp` prefix means the event was triggered by the UDP tracker -/// - `4` or `6` prefixes means the IP version used by the peer -/// - Finally the event suffix is the type of request: `announce`, `scrape` or `connection` -/// -/// > NOTE: HTTP trackers do not use `connection` requests. -#[derive(Debug, PartialEq, Eq)] -pub enum Event { - // code-review: consider one single event for request type with data: Event::Announce { scheme: HTTPorUDP, ip_version: V4orV6 } - // Attributes are enums too. - Tcp4Announce, - Tcp4Scrape, - Tcp6Announce, - Tcp6Scrape, - Udp4RequestAborted, - Udp4Request, - Udp4Connect, - Udp4Announce, - Udp4Scrape, - Udp4Response, - Udp4Error, - Udp6Request, - Udp6Connect, - Udp6Announce, - Udp6Scrape, - Udp6Response, - Udp6Error, -} - -/// Metrics collected by the tracker. -/// -/// - Number of connections handled -/// - Number of `announce` requests handled -/// - Number of `scrape` request handled -/// -/// These metrics are collected for each connection type: UDP and HTTP -/// and also for each IP version used by the peers: IPv4 and IPv6. -#[derive(Debug, PartialEq, Default)] -pub struct Metrics { - /// Total number of TCP (HTTP tracker) connections from IPv4 peers. - /// Since the HTTP tracker spec does not require a handshake, this metric - /// increases for every HTTP request. - pub tcp4_connections_handled: u64, - /// Total number of TCP (HTTP tracker) `announce` requests from IPv4 peers. - pub tcp4_announces_handled: u64, - /// Total number of TCP (HTTP tracker) `scrape` requests from IPv4 peers. - pub tcp4_scrapes_handled: u64, - - /// Total number of TCP (HTTP tracker) connections from IPv6 peers. - pub tcp6_connections_handled: u64, - /// Total number of TCP (HTTP tracker) `announce` requests from IPv6 peers. - pub tcp6_announces_handled: u64, - /// Total number of TCP (HTTP tracker) `scrape` requests from IPv6 peers. - pub tcp6_scrapes_handled: u64, - - /// Total number of UDP (UDP tracker) requests aborted. - pub udp_requests_aborted: u64, - - /// Total number of UDP (UDP tracker) requests from IPv4 peers. - pub udp4_requests: u64, - /// Total number of UDP (UDP tracker) connections from IPv4 peers. - pub udp4_connections_handled: u64, - /// Total number of UDP (UDP tracker) `announce` requests from IPv4 peers. - pub udp4_announces_handled: u64, - /// Total number of UDP (UDP tracker) `scrape` requests from IPv4 peers. - pub udp4_scrapes_handled: u64, - /// Total number of UDP (UDP tracker) responses from IPv4 peers. - pub udp4_responses: u64, - /// Total number of UDP (UDP tracker) `error` requests from IPv4 peers. - pub udp4_errors_handled: u64, - - /// Total number of UDP (UDP tracker) requests from IPv6 peers. - pub udp6_requests: u64, - /// Total number of UDP (UDP tracker) `connection` requests from IPv6 peers. - pub udp6_connections_handled: u64, - /// Total number of UDP (UDP tracker) `announce` requests from IPv6 peers. - pub udp6_announces_handled: u64, - /// Total number of UDP (UDP tracker) `scrape` requests from IPv6 peers. - pub udp6_scrapes_handled: u64, - /// Total number of UDP (UDP tracker) responses from IPv6 peers. - pub udp6_responses: u64, - /// Total number of UDP (UDP tracker) `error` requests from IPv6 peers. - pub udp6_errors_handled: u64, -} - -/// The service responsible for keeping tracker metrics (listening to statistics events and handle them). -/// -/// It actively listen to new statistics events. When it receives a new event -/// it accordingly increases the counters. -pub struct Keeper { - pub repository: Repo, -} - -impl Default for Keeper { - fn default() -> Self { - Self::new() - } -} - -impl Keeper { - #[must_use] - pub fn new() -> Self { - Self { repository: Repo::new() } - } - - #[must_use] - pub fn new_active_instance() -> (Box, Repo) { - let mut stats_tracker = Self::new(); - - let stats_event_sender = stats_tracker.run_event_listener(); - - (stats_event_sender, stats_tracker.repository) - } - - pub fn run_event_listener(&mut self) -> Box { - let (sender, receiver) = mpsc::channel::(CHANNEL_BUFFER_SIZE); - - let stats_repository = self.repository.clone(); - - tokio::spawn(async move { event_listener(receiver, stats_repository).await }); - - Box::new(Sender { sender }) - } -} - -async fn event_listener(mut receiver: mpsc::Receiver, stats_repository: Repo) { - while let Some(event) = receiver.recv().await { - event_handler(event, &stats_repository).await; - } -} - -async fn event_handler(event: Event, stats_repository: &Repo) { - match event { - // TCP4 - Event::Tcp4Announce => { - stats_repository.increase_tcp4_announces().await; - stats_repository.increase_tcp4_connections().await; - } - Event::Tcp4Scrape => { - stats_repository.increase_tcp4_scrapes().await; - stats_repository.increase_tcp4_connections().await; - } - - // TCP6 - Event::Tcp6Announce => { - stats_repository.increase_tcp6_announces().await; - stats_repository.increase_tcp6_connections().await; - } - Event::Tcp6Scrape => { - stats_repository.increase_tcp6_scrapes().await; - stats_repository.increase_tcp6_connections().await; - } - - // UDP - Event::Udp4RequestAborted => { - stats_repository.increase_udp_requests_aborted().await; - } - - // UDP4 - Event::Udp4Request => { - stats_repository.increase_udp4_requests().await; - } - Event::Udp4Connect => { - stats_repository.increase_udp4_connections().await; - } - Event::Udp4Announce => { - stats_repository.increase_udp4_announces().await; - } - Event::Udp4Scrape => { - stats_repository.increase_udp4_scrapes().await; - } - Event::Udp4Response => { - stats_repository.increase_udp4_responses().await; - } - Event::Udp4Error => { - stats_repository.increase_udp4_errors().await; - } - - // UDP6 - Event::Udp6Request => { - stats_repository.increase_udp6_requests().await; - } - Event::Udp6Connect => { - stats_repository.increase_udp6_connections().await; - } - Event::Udp6Announce => { - stats_repository.increase_udp6_announces().await; - } - Event::Udp6Scrape => { - stats_repository.increase_udp6_scrapes().await; - } - Event::Udp6Response => { - stats_repository.increase_udp6_responses().await; - } - Event::Udp6Error => { - stats_repository.increase_udp6_errors().await; - } - } - - tracing::debug!("stats: {:?}", stats_repository.get_stats().await); -} - -/// A trait to allow sending statistics events -#[cfg_attr(test, automock)] -pub trait EventSender: Sync + Send { - fn send_event(&self, event: Event) -> BoxFuture<'_, Option>>>; -} - -/// An [`statistics::EventSender`](crate::core::statistics::EventSender) implementation. -/// -/// It uses a channel sender to send the statistic events. The channel is created by a -/// [`statistics::Keeper`](crate::core::statistics::Keeper) -pub struct Sender { - sender: mpsc::Sender, -} - -impl EventSender for Sender { - fn send_event(&self, event: Event) -> BoxFuture<'_, Option>>> { - async move { Some(self.sender.send(event).await) }.boxed() - } -} - -/// A repository for the tracker metrics. -#[derive(Clone)] -pub struct Repo { - pub stats: Arc>, -} - -impl Default for Repo { - fn default() -> Self { - Self::new() - } -} - -impl Repo { - #[must_use] - pub fn new() -> Self { - Self { - stats: Arc::new(RwLock::new(Metrics::default())), - } - } - - pub async fn get_stats(&self) -> RwLockReadGuard<'_, Metrics> { - self.stats.read().await - } - - pub async fn increase_tcp4_announces(&self) { - let mut stats_lock = self.stats.write().await; - stats_lock.tcp4_announces_handled += 1; - drop(stats_lock); - } - - pub async fn increase_tcp4_connections(&self) { - let mut stats_lock = self.stats.write().await; - stats_lock.tcp4_connections_handled += 1; - drop(stats_lock); - } - - pub async fn increase_tcp4_scrapes(&self) { - let mut stats_lock = self.stats.write().await; - stats_lock.tcp4_scrapes_handled += 1; - drop(stats_lock); - } - - pub async fn increase_tcp6_announces(&self) { - let mut stats_lock = self.stats.write().await; - stats_lock.tcp6_announces_handled += 1; - drop(stats_lock); - } - - pub async fn increase_tcp6_connections(&self) { - let mut stats_lock = self.stats.write().await; - stats_lock.tcp6_connections_handled += 1; - drop(stats_lock); - } - - pub async fn increase_tcp6_scrapes(&self) { - let mut stats_lock = self.stats.write().await; - stats_lock.tcp6_scrapes_handled += 1; - drop(stats_lock); - } - - pub async fn increase_udp_requests_aborted(&self) { - let mut stats_lock = self.stats.write().await; - stats_lock.udp_requests_aborted += 1; - drop(stats_lock); - } - - pub async fn increase_udp4_requests(&self) { - let mut stats_lock = self.stats.write().await; - stats_lock.udp4_requests += 1; - drop(stats_lock); - } - - pub async fn increase_udp4_connections(&self) { - let mut stats_lock = self.stats.write().await; - stats_lock.udp4_connections_handled += 1; - drop(stats_lock); - } - - pub async fn increase_udp4_announces(&self) { - let mut stats_lock = self.stats.write().await; - stats_lock.udp4_announces_handled += 1; - drop(stats_lock); - } - - pub async fn increase_udp4_scrapes(&self) { - let mut stats_lock = self.stats.write().await; - stats_lock.udp4_scrapes_handled += 1; - drop(stats_lock); - } - - pub async fn increase_udp4_responses(&self) { - let mut stats_lock = self.stats.write().await; - stats_lock.udp4_responses += 1; - drop(stats_lock); - } - - pub async fn increase_udp4_errors(&self) { - let mut stats_lock = self.stats.write().await; - stats_lock.udp4_errors_handled += 1; - drop(stats_lock); - } - - pub async fn increase_udp6_requests(&self) { - let mut stats_lock = self.stats.write().await; - stats_lock.udp6_requests += 1; - drop(stats_lock); - } - - pub async fn increase_udp6_connections(&self) { - let mut stats_lock = self.stats.write().await; - stats_lock.udp6_connections_handled += 1; - drop(stats_lock); - } - - pub async fn increase_udp6_announces(&self) { - let mut stats_lock = self.stats.write().await; - stats_lock.udp6_announces_handled += 1; - drop(stats_lock); - } - - pub async fn increase_udp6_scrapes(&self) { - let mut stats_lock = self.stats.write().await; - stats_lock.udp6_scrapes_handled += 1; - drop(stats_lock); - } - - pub async fn increase_udp6_responses(&self) { - let mut stats_lock = self.stats.write().await; - stats_lock.udp6_responses += 1; - drop(stats_lock); - } - - pub async fn increase_udp6_errors(&self) { - let mut stats_lock = self.stats.write().await; - stats_lock.udp6_errors_handled += 1; - drop(stats_lock); - } -} - -#[cfg(test)] -mod tests { - - mod stats_tracker { - use crate::core::statistics::{Event, Keeper, Metrics}; - - #[tokio::test] - async fn should_contain_the_tracker_statistics() { - let stats_tracker = Keeper::new(); - - let stats = stats_tracker.repository.get_stats().await; - - assert_eq!(stats.tcp4_announces_handled, Metrics::default().tcp4_announces_handled); - } - - #[tokio::test] - async fn should_create_an_event_sender_to_send_statistical_events() { - let mut stats_tracker = Keeper::new(); - - let event_sender = stats_tracker.run_event_listener(); - - let result = event_sender.send_event(Event::Udp4Connect).await; - - assert!(result.is_some()); - } - } - - mod event_handler { - use crate::core::statistics::{event_handler, Event, Repo}; - - #[tokio::test] - async fn should_increase_the_tcp4_announces_counter_when_it_receives_a_tcp4_announce_event() { - let stats_repository = Repo::new(); - - event_handler(Event::Tcp4Announce, &stats_repository).await; - - let stats = stats_repository.get_stats().await; - - assert_eq!(stats.tcp4_announces_handled, 1); - } - - #[tokio::test] - async fn should_increase_the_tcp4_connections_counter_when_it_receives_a_tcp4_announce_event() { - let stats_repository = Repo::new(); - - event_handler(Event::Tcp4Announce, &stats_repository).await; - - let stats = stats_repository.get_stats().await; - - assert_eq!(stats.tcp4_connections_handled, 1); - } - - #[tokio::test] - async fn should_increase_the_tcp4_scrapes_counter_when_it_receives_a_tcp4_scrape_event() { - let stats_repository = Repo::new(); - - event_handler(Event::Tcp4Scrape, &stats_repository).await; - - let stats = stats_repository.get_stats().await; - - assert_eq!(stats.tcp4_scrapes_handled, 1); - } - - #[tokio::test] - async fn should_increase_the_tcp4_connections_counter_when_it_receives_a_tcp4_scrape_event() { - let stats_repository = Repo::new(); - - event_handler(Event::Tcp4Scrape, &stats_repository).await; - - let stats = stats_repository.get_stats().await; - - assert_eq!(stats.tcp4_connections_handled, 1); - } - - #[tokio::test] - async fn should_increase_the_tcp6_announces_counter_when_it_receives_a_tcp6_announce_event() { - let stats_repository = Repo::new(); - - event_handler(Event::Tcp6Announce, &stats_repository).await; - - let stats = stats_repository.get_stats().await; - - assert_eq!(stats.tcp6_announces_handled, 1); - } - - #[tokio::test] - async fn should_increase_the_tcp6_connections_counter_when_it_receives_a_tcp6_announce_event() { - let stats_repository = Repo::new(); - - event_handler(Event::Tcp6Announce, &stats_repository).await; - - let stats = stats_repository.get_stats().await; - - assert_eq!(stats.tcp6_connections_handled, 1); - } - - #[tokio::test] - async fn should_increase_the_tcp6_scrapes_counter_when_it_receives_a_tcp6_scrape_event() { - let stats_repository = Repo::new(); - - event_handler(Event::Tcp6Scrape, &stats_repository).await; - - let stats = stats_repository.get_stats().await; - - assert_eq!(stats.tcp6_scrapes_handled, 1); - } - - #[tokio::test] - async fn should_increase_the_tcp6_connections_counter_when_it_receives_a_tcp6_scrape_event() { - let stats_repository = Repo::new(); - - event_handler(Event::Tcp6Scrape, &stats_repository).await; - - let stats = stats_repository.get_stats().await; - - assert_eq!(stats.tcp6_connections_handled, 1); - } - - #[tokio::test] - async fn should_increase_the_udp4_connections_counter_when_it_receives_a_udp4_connect_event() { - let stats_repository = Repo::new(); - - event_handler(Event::Udp4Connect, &stats_repository).await; - - let stats = stats_repository.get_stats().await; - - assert_eq!(stats.udp4_connections_handled, 1); - } - - #[tokio::test] - async fn should_increase_the_udp4_announces_counter_when_it_receives_a_udp4_announce_event() { - let stats_repository = Repo::new(); - - event_handler(Event::Udp4Announce, &stats_repository).await; - - let stats = stats_repository.get_stats().await; - - assert_eq!(stats.udp4_announces_handled, 1); - } - - #[tokio::test] - async fn should_increase_the_udp4_scrapes_counter_when_it_receives_a_udp4_scrape_event() { - let stats_repository = Repo::new(); - - event_handler(Event::Udp4Scrape, &stats_repository).await; - - let stats = stats_repository.get_stats().await; - - assert_eq!(stats.udp4_scrapes_handled, 1); - } - - #[tokio::test] - async fn should_increase_the_udp6_connections_counter_when_it_receives_a_udp6_connect_event() { - let stats_repository = Repo::new(); - - event_handler(Event::Udp6Connect, &stats_repository).await; - - let stats = stats_repository.get_stats().await; - - assert_eq!(stats.udp6_connections_handled, 1); - } - - #[tokio::test] - async fn should_increase_the_udp6_announces_counter_when_it_receives_a_udp6_announce_event() { - let stats_repository = Repo::new(); - - event_handler(Event::Udp6Announce, &stats_repository).await; - - let stats = stats_repository.get_stats().await; - - assert_eq!(stats.udp6_announces_handled, 1); - } - - #[tokio::test] - async fn should_increase_the_udp6_scrapes_counter_when_it_receives_a_udp6_scrape_event() { - let stats_repository = Repo::new(); - - event_handler(Event::Udp6Scrape, &stats_repository).await; - - let stats = stats_repository.get_stats().await; - - assert_eq!(stats.udp6_scrapes_handled, 1); - } - } -} diff --git a/src/core/statistics/event/handler.rs b/src/core/statistics/event/handler.rs new file mode 100644 index 000000000..5acc5e12c --- /dev/null +++ b/src/core/statistics/event/handler.rs @@ -0,0 +1,234 @@ +use crate::core::statistics::event::Event; +use crate::core::statistics::repository::Repository; + +pub async fn handle_event(event: Event, stats_repository: &Repository) { + match event { + // TCP4 + Event::Tcp4Announce => { + stats_repository.increase_tcp4_announces().await; + stats_repository.increase_tcp4_connections().await; + } + Event::Tcp4Scrape => { + stats_repository.increase_tcp4_scrapes().await; + stats_repository.increase_tcp4_connections().await; + } + + // TCP6 + Event::Tcp6Announce => { + stats_repository.increase_tcp6_announces().await; + stats_repository.increase_tcp6_connections().await; + } + Event::Tcp6Scrape => { + stats_repository.increase_tcp6_scrapes().await; + stats_repository.increase_tcp6_connections().await; + } + + // UDP + Event::Udp4RequestAborted => { + stats_repository.increase_udp_requests_aborted().await; + } + + // UDP4 + Event::Udp4Request => { + stats_repository.increase_udp4_requests().await; + } + Event::Udp4Connect => { + stats_repository.increase_udp4_connections().await; + } + Event::Udp4Announce => { + stats_repository.increase_udp4_announces().await; + } + Event::Udp4Scrape => { + stats_repository.increase_udp4_scrapes().await; + } + Event::Udp4Response => { + stats_repository.increase_udp4_responses().await; + } + Event::Udp4Error => { + stats_repository.increase_udp4_errors().await; + } + + // UDP6 + Event::Udp6Request => { + stats_repository.increase_udp6_requests().await; + } + Event::Udp6Connect => { + stats_repository.increase_udp6_connections().await; + } + Event::Udp6Announce => { + stats_repository.increase_udp6_announces().await; + } + Event::Udp6Scrape => { + stats_repository.increase_udp6_scrapes().await; + } + Event::Udp6Response => { + stats_repository.increase_udp6_responses().await; + } + Event::Udp6Error => { + stats_repository.increase_udp6_errors().await; + } + } + + tracing::debug!("stats: {:?}", stats_repository.get_stats().await); +} + +#[cfg(test)] +mod tests { + use crate::core::statistics::event::handler::handle_event; + use crate::core::statistics::event::Event; + use crate::core::statistics::repository::Repository; + + #[tokio::test] + async fn should_increase_the_tcp4_announces_counter_when_it_receives_a_tcp4_announce_event() { + let stats_repository = Repository::new(); + + handle_event(Event::Tcp4Announce, &stats_repository).await; + + let stats = stats_repository.get_stats().await; + + assert_eq!(stats.tcp4_announces_handled, 1); + } + + #[tokio::test] + async fn should_increase_the_tcp4_connections_counter_when_it_receives_a_tcp4_announce_event() { + let stats_repository = Repository::new(); + + handle_event(Event::Tcp4Announce, &stats_repository).await; + + let stats = stats_repository.get_stats().await; + + assert_eq!(stats.tcp4_connections_handled, 1); + } + + #[tokio::test] + async fn should_increase_the_tcp4_scrapes_counter_when_it_receives_a_tcp4_scrape_event() { + let stats_repository = Repository::new(); + + handle_event(Event::Tcp4Scrape, &stats_repository).await; + + let stats = stats_repository.get_stats().await; + + assert_eq!(stats.tcp4_scrapes_handled, 1); + } + + #[tokio::test] + async fn should_increase_the_tcp4_connections_counter_when_it_receives_a_tcp4_scrape_event() { + let stats_repository = Repository::new(); + + handle_event(Event::Tcp4Scrape, &stats_repository).await; + + let stats = stats_repository.get_stats().await; + + assert_eq!(stats.tcp4_connections_handled, 1); + } + + #[tokio::test] + async fn should_increase_the_tcp6_announces_counter_when_it_receives_a_tcp6_announce_event() { + let stats_repository = Repository::new(); + + handle_event(Event::Tcp6Announce, &stats_repository).await; + + let stats = stats_repository.get_stats().await; + + assert_eq!(stats.tcp6_announces_handled, 1); + } + + #[tokio::test] + async fn should_increase_the_tcp6_connections_counter_when_it_receives_a_tcp6_announce_event() { + let stats_repository = Repository::new(); + + handle_event(Event::Tcp6Announce, &stats_repository).await; + + let stats = stats_repository.get_stats().await; + + assert_eq!(stats.tcp6_connections_handled, 1); + } + + #[tokio::test] + async fn should_increase_the_tcp6_scrapes_counter_when_it_receives_a_tcp6_scrape_event() { + let stats_repository = Repository::new(); + + handle_event(Event::Tcp6Scrape, &stats_repository).await; + + let stats = stats_repository.get_stats().await; + + assert_eq!(stats.tcp6_scrapes_handled, 1); + } + + #[tokio::test] + async fn should_increase_the_tcp6_connections_counter_when_it_receives_a_tcp6_scrape_event() { + let stats_repository = Repository::new(); + + handle_event(Event::Tcp6Scrape, &stats_repository).await; + + let stats = stats_repository.get_stats().await; + + assert_eq!(stats.tcp6_connections_handled, 1); + } + + #[tokio::test] + async fn should_increase_the_udp4_connections_counter_when_it_receives_a_udp4_connect_event() { + let stats_repository = Repository::new(); + + handle_event(Event::Udp4Connect, &stats_repository).await; + + let stats = stats_repository.get_stats().await; + + assert_eq!(stats.udp4_connections_handled, 1); + } + + #[tokio::test] + async fn should_increase_the_udp4_announces_counter_when_it_receives_a_udp4_announce_event() { + let stats_repository = Repository::new(); + + handle_event(Event::Udp4Announce, &stats_repository).await; + + let stats = stats_repository.get_stats().await; + + assert_eq!(stats.udp4_announces_handled, 1); + } + + #[tokio::test] + async fn should_increase_the_udp4_scrapes_counter_when_it_receives_a_udp4_scrape_event() { + let stats_repository = Repository::new(); + + handle_event(Event::Udp4Scrape, &stats_repository).await; + + let stats = stats_repository.get_stats().await; + + assert_eq!(stats.udp4_scrapes_handled, 1); + } + + #[tokio::test] + async fn should_increase_the_udp6_connections_counter_when_it_receives_a_udp6_connect_event() { + let stats_repository = Repository::new(); + + handle_event(Event::Udp6Connect, &stats_repository).await; + + let stats = stats_repository.get_stats().await; + + assert_eq!(stats.udp6_connections_handled, 1); + } + + #[tokio::test] + async fn should_increase_the_udp6_announces_counter_when_it_receives_a_udp6_announce_event() { + let stats_repository = Repository::new(); + + handle_event(Event::Udp6Announce, &stats_repository).await; + + let stats = stats_repository.get_stats().await; + + assert_eq!(stats.udp6_announces_handled, 1); + } + + #[tokio::test] + async fn should_increase_the_udp6_scrapes_counter_when_it_receives_a_udp6_scrape_event() { + let stats_repository = Repository::new(); + + handle_event(Event::Udp6Scrape, &stats_repository).await; + + let stats = stats_repository.get_stats().await; + + assert_eq!(stats.udp6_scrapes_handled, 1); + } +} diff --git a/src/core/statistics/event/listener.rs b/src/core/statistics/event/listener.rs new file mode 100644 index 000000000..89ed7b41a --- /dev/null +++ b/src/core/statistics/event/listener.rs @@ -0,0 +1,11 @@ +use tokio::sync::mpsc; + +use super::handler::handle_event; +use super::Event; +use crate::core::statistics::repository::Repository; + +pub async fn dispatch_events(mut receiver: mpsc::Receiver, stats_repository: Repository) { + while let Some(event) = receiver.recv().await { + handle_event(event, &stats_repository).await; + } +} diff --git a/src/core/statistics/event/mod.rs b/src/core/statistics/event/mod.rs new file mode 100644 index 000000000..b14995cc1 --- /dev/null +++ b/src/core/statistics/event/mod.rs @@ -0,0 +1,34 @@ +pub mod handler; +pub mod listener; +pub mod sender; + +/// An statistics event. It is used to collect tracker metrics. +/// +/// - `Tcp` prefix means the event was triggered by the HTTP tracker +/// - `Udp` prefix means the event was triggered by the UDP tracker +/// - `4` or `6` prefixes means the IP version used by the peer +/// - Finally the event suffix is the type of request: `announce`, `scrape` or `connection` +/// +/// > NOTE: HTTP trackers do not use `connection` requests. +#[derive(Debug, PartialEq, Eq)] +pub enum Event { + // code-review: consider one single event for request type with data: Event::Announce { scheme: HTTPorUDP, ip_version: V4orV6 } + // Attributes are enums too. + Tcp4Announce, + Tcp4Scrape, + Tcp6Announce, + Tcp6Scrape, + Udp4RequestAborted, + Udp4Request, + Udp4Connect, + Udp4Announce, + Udp4Scrape, + Udp4Response, + Udp4Error, + Udp6Request, + Udp6Connect, + Udp6Announce, + Udp6Scrape, + Udp6Response, + Udp6Error, +} diff --git a/src/core/statistics/event/sender.rs b/src/core/statistics/event/sender.rs new file mode 100644 index 000000000..1b663b5d1 --- /dev/null +++ b/src/core/statistics/event/sender.rs @@ -0,0 +1,29 @@ +use futures::future::BoxFuture; +use futures::FutureExt; +#[cfg(test)] +use mockall::{automock, predicate::str}; +use tokio::sync::mpsc; +use tokio::sync::mpsc::error::SendError; + +use super::Event; + +/// A trait to allow sending statistics events +#[cfg_attr(test, automock)] +pub trait Sender: Sync + Send { + fn send_event(&self, event: Event) -> BoxFuture<'_, Option>>>; +} + +/// An [`statistics::EventSender`](crate::core::statistics::event::sender::Sender) implementation. +/// +/// It uses a channel sender to send the statistic events. The channel is created by a +/// [`statistics::Keeper`](crate::core::statistics::keeper::Keeper) +#[allow(clippy::module_name_repetitions)] +pub struct ChannelSender { + pub(crate) sender: mpsc::Sender, +} + +impl Sender for ChannelSender { + fn send_event(&self, event: Event) -> BoxFuture<'_, Option>>> { + async move { Some(self.sender.send(event).await) }.boxed() + } +} diff --git a/src/core/statistics/keeper.rs b/src/core/statistics/keeper.rs new file mode 100644 index 000000000..5427734e1 --- /dev/null +++ b/src/core/statistics/keeper.rs @@ -0,0 +1,77 @@ +use tokio::sync::mpsc; + +use super::event::listener::dispatch_events; +use super::event::sender::{ChannelSender, Sender}; +use super::event::Event; +use super::repository::Repository; + +const CHANNEL_BUFFER_SIZE: usize = 65_535; + +/// The service responsible for keeping tracker metrics (listening to statistics events and handle them). +/// +/// It actively listen to new statistics events. When it receives a new event +/// it accordingly increases the counters. +pub struct Keeper { + pub repository: Repository, +} + +impl Default for Keeper { + fn default() -> Self { + Self::new() + } +} + +impl Keeper { + #[must_use] + pub fn new() -> Self { + Self { + repository: Repository::new(), + } + } + + #[must_use] + pub fn new_active_instance() -> (Box, Repository) { + let mut stats_tracker = Self::new(); + + let stats_event_sender = stats_tracker.run_event_listener(); + + (stats_event_sender, stats_tracker.repository) + } + + pub fn run_event_listener(&mut self) -> Box { + let (sender, receiver) = mpsc::channel::(CHANNEL_BUFFER_SIZE); + + let stats_repository = self.repository.clone(); + + tokio::spawn(async move { dispatch_events(receiver, stats_repository).await }); + + Box::new(ChannelSender { sender }) + } +} + +#[cfg(test)] +mod tests { + use crate::core::statistics::event::Event; + use crate::core::statistics::keeper::Keeper; + use crate::core::statistics::metrics::Metrics; + + #[tokio::test] + async fn should_contain_the_tracker_statistics() { + let stats_tracker = Keeper::new(); + + let stats = stats_tracker.repository.get_stats().await; + + assert_eq!(stats.tcp4_announces_handled, Metrics::default().tcp4_announces_handled); + } + + #[tokio::test] + async fn should_create_an_event_sender_to_send_statistical_events() { + let mut stats_tracker = Keeper::new(); + + let event_sender = stats_tracker.run_event_listener(); + + let result = event_sender.send_event(Event::Udp4Connect).await; + + assert!(result.is_some()); + } +} diff --git a/src/core/statistics/metrics.rs b/src/core/statistics/metrics.rs new file mode 100644 index 000000000..970302816 --- /dev/null +++ b/src/core/statistics/metrics.rs @@ -0,0 +1,69 @@ +/// Metrics collected by the tracker. +/// +/// - Number of connections handled +/// - Number of `announce` requests handled +/// - Number of `scrape` request handled +/// +/// These metrics are collected for each connection type: UDP and HTTP +/// and also for each IP version used by the peers: IPv4 and IPv6. +#[derive(Debug, PartialEq, Default)] +pub struct Metrics { + /// Total number of TCP (HTTP tracker) connections from IPv4 peers. + /// Since the HTTP tracker spec does not require a handshake, this metric + /// increases for every HTTP request. + pub tcp4_connections_handled: u64, + + /// Total number of TCP (HTTP tracker) `announce` requests from IPv4 peers. + pub tcp4_announces_handled: u64, + + /// Total number of TCP (HTTP tracker) `scrape` requests from IPv4 peers. + pub tcp4_scrapes_handled: u64, + + /// Total number of TCP (HTTP tracker) connections from IPv6 peers. + pub tcp6_connections_handled: u64, + + /// Total number of TCP (HTTP tracker) `announce` requests from IPv6 peers. + pub tcp6_announces_handled: u64, + + /// Total number of TCP (HTTP tracker) `scrape` requests from IPv6 peers. + pub tcp6_scrapes_handled: u64, + + /// Total number of UDP (UDP tracker) requests aborted. + pub udp_requests_aborted: u64, + + /// Total number of UDP (UDP tracker) requests from IPv4 peers. + pub udp4_requests: u64, + + /// Total number of UDP (UDP tracker) connections from IPv4 peers. + pub udp4_connections_handled: u64, + + /// Total number of UDP (UDP tracker) `announce` requests from IPv4 peers. + pub udp4_announces_handled: u64, + + /// Total number of UDP (UDP tracker) `scrape` requests from IPv4 peers. + pub udp4_scrapes_handled: u64, + + /// Total number of UDP (UDP tracker) responses from IPv4 peers. + pub udp4_responses: u64, + + /// Total number of UDP (UDP tracker) `error` requests from IPv4 peers. + pub udp4_errors_handled: u64, + + /// Total number of UDP (UDP tracker) requests from IPv6 peers. + pub udp6_requests: u64, + + /// Total number of UDP (UDP tracker) `connection` requests from IPv6 peers. + pub udp6_connections_handled: u64, + + /// Total number of UDP (UDP tracker) `announce` requests from IPv6 peers. + pub udp6_announces_handled: u64, + + /// Total number of UDP (UDP tracker) `scrape` requests from IPv6 peers. + pub udp6_scrapes_handled: u64, + + /// Total number of UDP (UDP tracker) responses from IPv6 peers. + pub udp6_responses: u64, + + /// Total number of UDP (UDP tracker) `error` requests from IPv6 peers. + pub udp6_errors_handled: u64, +} diff --git a/src/core/statistics/mod.rs b/src/core/statistics/mod.rs new file mode 100644 index 000000000..49a82bea9 --- /dev/null +++ b/src/core/statistics/mod.rs @@ -0,0 +1,30 @@ +//! Structs to collect and keep tracker metrics. +//! +//! The tracker collects metrics such as: +//! +//! - Number of connections handled +//! - Number of `announce` requests handled +//! - Number of `scrape` request handled +//! +//! These metrics are collected for each connection type: UDP and HTTP and +//! also for each IP version used by the peers: IPv4 and IPv6. +//! +//! > Notice: that UDP tracker have an specific `connection` request. For the +//! > `HTTP` metrics the counter counts one connection for each `announce` or +//! > `scrape` request. +//! +//! The data is collected by using an `event-sender -> event listener` model. +//! +//! The tracker uses a [`Sender`](crate::core::statistics::event::sender::Sender) +//! instance to send an event. +//! +//! The [`statistics::keeper::Keeper`](crate::core::statistics::keeper::Keeper) listens to new +//! events and uses the [`statistics::repository::Repository`](crate::core::statistics::repository::Repository) to +//! upgrade and store metrics. +//! +//! See the [`statistics::event::Event`](crate::core::statistics::event::Event) enum to check +//! which events are available. +pub mod event; +pub mod keeper; +pub mod metrics; +pub mod repository; diff --git a/src/core/statistics/repository.rs b/src/core/statistics/repository.rs new file mode 100644 index 000000000..bdbc046de --- /dev/null +++ b/src/core/statistics/repository.rs @@ -0,0 +1,144 @@ +use std::sync::Arc; + +use tokio::sync::{RwLock, RwLockReadGuard}; + +use super::metrics::Metrics; + +/// A repository for the tracker metrics. +#[derive(Clone)] +pub struct Repository { + pub stats: Arc>, +} + +impl Default for Repository { + fn default() -> Self { + Self::new() + } +} + +impl Repository { + #[must_use] + pub fn new() -> Self { + Self { + stats: Arc::new(RwLock::new(Metrics::default())), + } + } + + pub async fn get_stats(&self) -> RwLockReadGuard<'_, Metrics> { + self.stats.read().await + } + + pub async fn increase_tcp4_announces(&self) { + let mut stats_lock = self.stats.write().await; + stats_lock.tcp4_announces_handled += 1; + drop(stats_lock); + } + + pub async fn increase_tcp4_connections(&self) { + let mut stats_lock = self.stats.write().await; + stats_lock.tcp4_connections_handled += 1; + drop(stats_lock); + } + + pub async fn increase_tcp4_scrapes(&self) { + let mut stats_lock = self.stats.write().await; + stats_lock.tcp4_scrapes_handled += 1; + drop(stats_lock); + } + + pub async fn increase_tcp6_announces(&self) { + let mut stats_lock = self.stats.write().await; + stats_lock.tcp6_announces_handled += 1; + drop(stats_lock); + } + + pub async fn increase_tcp6_connections(&self) { + let mut stats_lock = self.stats.write().await; + stats_lock.tcp6_connections_handled += 1; + drop(stats_lock); + } + + pub async fn increase_tcp6_scrapes(&self) { + let mut stats_lock = self.stats.write().await; + stats_lock.tcp6_scrapes_handled += 1; + drop(stats_lock); + } + + pub async fn increase_udp_requests_aborted(&self) { + let mut stats_lock = self.stats.write().await; + stats_lock.udp_requests_aborted += 1; + drop(stats_lock); + } + + pub async fn increase_udp4_requests(&self) { + let mut stats_lock = self.stats.write().await; + stats_lock.udp4_requests += 1; + drop(stats_lock); + } + + pub async fn increase_udp4_connections(&self) { + let mut stats_lock = self.stats.write().await; + stats_lock.udp4_connections_handled += 1; + drop(stats_lock); + } + + pub async fn increase_udp4_announces(&self) { + let mut stats_lock = self.stats.write().await; + stats_lock.udp4_announces_handled += 1; + drop(stats_lock); + } + + pub async fn increase_udp4_scrapes(&self) { + let mut stats_lock = self.stats.write().await; + stats_lock.udp4_scrapes_handled += 1; + drop(stats_lock); + } + + pub async fn increase_udp4_responses(&self) { + let mut stats_lock = self.stats.write().await; + stats_lock.udp4_responses += 1; + drop(stats_lock); + } + + pub async fn increase_udp4_errors(&self) { + let mut stats_lock = self.stats.write().await; + stats_lock.udp4_errors_handled += 1; + drop(stats_lock); + } + + pub async fn increase_udp6_requests(&self) { + let mut stats_lock = self.stats.write().await; + stats_lock.udp6_requests += 1; + drop(stats_lock); + } + + pub async fn increase_udp6_connections(&self) { + let mut stats_lock = self.stats.write().await; + stats_lock.udp6_connections_handled += 1; + drop(stats_lock); + } + + pub async fn increase_udp6_announces(&self) { + let mut stats_lock = self.stats.write().await; + stats_lock.udp6_announces_handled += 1; + drop(stats_lock); + } + + pub async fn increase_udp6_scrapes(&self) { + let mut stats_lock = self.stats.write().await; + stats_lock.udp6_scrapes_handled += 1; + drop(stats_lock); + } + + pub async fn increase_udp6_responses(&self) { + let mut stats_lock = self.stats.write().await; + stats_lock.udp6_responses += 1; + drop(stats_lock); + } + + pub async fn increase_udp6_errors(&self) { + let mut stats_lock = self.stats.write().await; + stats_lock.udp6_errors_handled += 1; + drop(stats_lock); + } +} diff --git a/src/servers/apis/v1/context/stats/resources.rs b/src/servers/apis/v1/context/stats/resources.rs index e7057f30a..55cb3a581 100644 --- a/src/servers/apis/v1/context/stats/resources.rs +++ b/src/servers/apis/v1/context/stats/resources.rs @@ -102,7 +102,7 @@ mod tests { use super::Stats; use crate::core::services::statistics::TrackerMetrics; - use crate::core::statistics::Metrics; + use crate::core::statistics::metrics::Metrics; #[test] fn stats_resource_should_be_converted_from_tracker_metrics() { diff --git a/src/servers/http/v1/services/announce.rs b/src/servers/http/v1/services/announce.rs index 51ec43d56..73d480c79 100644 --- a/src/servers/http/v1/services/announce.rs +++ b/src/servers/http/v1/services/announce.rs @@ -6,7 +6,7 @@ //! and it returns the [`AnnounceData`] returned //! by the [`Tracker`]. //! -//! It also sends an [`statistics::Event`] +//! It also sends an [`statistics::event::Event`] //! because events are specific for the HTTP tracker. use std::net::IpAddr; use std::sync::Arc; @@ -39,10 +39,10 @@ pub async fn invoke( match original_peer_ip { IpAddr::V4(_) => { - tracker.send_stats_event(statistics::Event::Tcp4Announce).await; + tracker.send_stats_event(statistics::event::Event::Tcp4Announce).await; } IpAddr::V6(_) => { - tracker.send_stats_event(statistics::Event::Tcp6Announce).await; + tracker.send_stats_event(statistics::event::Event::Tcp6Announce).await; } } @@ -132,10 +132,10 @@ mod tests { #[tokio::test] async fn it_should_send_the_tcp_4_announce_event_when_the_peer_uses_ipv4() { - let mut stats_event_sender_mock = statistics::MockEventSender::new(); + let mut stats_event_sender_mock = statistics::event::sender::MockSender::new(); stats_event_sender_mock .expect_send_event() - .with(eq(statistics::Event::Tcp4Announce)) + .with(eq(statistics::event::Event::Tcp4Announce)) .times(1) .returning(|_| Box::pin(future::ready(Some(Ok(()))))); let stats_event_sender = Box::new(stats_event_sender_mock); @@ -144,7 +144,7 @@ mod tests { Tracker::new( &configuration::ephemeral().core, Some(stats_event_sender), - statistics::Repo::new(), + statistics::repository::Repository::new(), ) .unwrap(), ); @@ -154,13 +154,18 @@ mod tests { let _announce_data = invoke(tracker, sample_info_hash(), &mut peer, &PeersWanted::All).await; } - fn tracker_with_an_ipv6_external_ip(stats_event_sender: Box) -> Tracker { + fn tracker_with_an_ipv6_external_ip(stats_event_sender: Box) -> Tracker { let mut configuration = configuration::ephemeral(); configuration.core.net.external_ip = Some(IpAddr::V6(Ipv6Addr::new( 0x6969, 0x6969, 0x6969, 0x6969, 0x6969, 0x6969, 0x6969, 0x6969, ))); - Tracker::new(&configuration.core, Some(stats_event_sender), statistics::Repo::new()).unwrap() + Tracker::new( + &configuration.core, + Some(stats_event_sender), + statistics::repository::Repository::new(), + ) + .unwrap() } fn peer_with_the_ipv4_loopback_ip() -> peer::Peer { @@ -176,10 +181,10 @@ mod tests { // Tracker changes the peer IP to the tracker external IP when the peer is using the loopback IP. // Assert that the event sent is a TCP4 event - let mut stats_event_sender_mock = statistics::MockEventSender::new(); + let mut stats_event_sender_mock = statistics::event::sender::MockSender::new(); stats_event_sender_mock .expect_send_event() - .with(eq(statistics::Event::Tcp4Announce)) + .with(eq(statistics::event::Event::Tcp4Announce)) .times(1) .returning(|_| Box::pin(future::ready(Some(Ok(()))))); let stats_event_sender = Box::new(stats_event_sender_mock); @@ -198,10 +203,10 @@ mod tests { #[tokio::test] async fn it_should_send_the_tcp_6_announce_event_when_the_peer_uses_ipv6_even_if_the_tracker_changes_the_peer_ip_to_ipv4() { - let mut stats_event_sender_mock = statistics::MockEventSender::new(); + let mut stats_event_sender_mock = statistics::event::sender::MockSender::new(); stats_event_sender_mock .expect_send_event() - .with(eq(statistics::Event::Tcp6Announce)) + .with(eq(statistics::event::Event::Tcp6Announce)) .times(1) .returning(|_| Box::pin(future::ready(Some(Ok(()))))); let stats_event_sender = Box::new(stats_event_sender_mock); @@ -210,7 +215,7 @@ mod tests { Tracker::new( &configuration::ephemeral().core, Some(stats_event_sender), - statistics::Repo::new(), + statistics::repository::Repository::new(), ) .unwrap(), ); diff --git a/src/servers/http/v1/services/scrape.rs b/src/servers/http/v1/services/scrape.rs index f040e0430..9eef263cb 100644 --- a/src/servers/http/v1/services/scrape.rs +++ b/src/servers/http/v1/services/scrape.rs @@ -6,7 +6,7 @@ //! and it returns the [`ScrapeData`] returned //! by the [`Tracker`]. //! -//! It also sends an [`statistics::Event`] +//! It also sends an [`statistics::event::Event`] //! because events are specific for the HTTP tracker. use std::net::IpAddr; use std::sync::Arc; @@ -48,10 +48,10 @@ pub async fn fake(tracker: &Arc, info_hashes: &Vec, original_ async fn send_scrape_event(original_peer_ip: &IpAddr, tracker: &Arc) { match original_peer_ip { IpAddr::V4(_) => { - tracker.send_stats_event(statistics::Event::Tcp4Scrape).await; + tracker.send_stats_event(statistics::event::Event::Tcp4Scrape).await; } IpAddr::V6(_) => { - tracker.send_stats_event(statistics::Event::Tcp6Scrape).await; + tracker.send_stats_event(statistics::event::Event::Tcp6Scrape).await; } } } @@ -138,10 +138,10 @@ mod tests { #[tokio::test] async fn it_should_send_the_tcp_4_scrape_event_when_the_peer_uses_ipv4() { - let mut stats_event_sender_mock = statistics::MockEventSender::new(); + let mut stats_event_sender_mock = statistics::event::sender::MockSender::new(); stats_event_sender_mock .expect_send_event() - .with(eq(statistics::Event::Tcp4Scrape)) + .with(eq(statistics::event::Event::Tcp4Scrape)) .times(1) .returning(|_| Box::pin(future::ready(Some(Ok(()))))); let stats_event_sender = Box::new(stats_event_sender_mock); @@ -150,7 +150,7 @@ mod tests { Tracker::new( &configuration::ephemeral().core, Some(stats_event_sender), - statistics::Repo::new(), + statistics::repository::Repository::new(), ) .unwrap(), ); @@ -162,10 +162,10 @@ mod tests { #[tokio::test] async fn it_should_send_the_tcp_6_scrape_event_when_the_peer_uses_ipv6() { - let mut stats_event_sender_mock = statistics::MockEventSender::new(); + let mut stats_event_sender_mock = statistics::event::sender::MockSender::new(); stats_event_sender_mock .expect_send_event() - .with(eq(statistics::Event::Tcp6Scrape)) + .with(eq(statistics::event::Event::Tcp6Scrape)) .times(1) .returning(|_| Box::pin(future::ready(Some(Ok(()))))); let stats_event_sender = Box::new(stats_event_sender_mock); @@ -174,7 +174,7 @@ mod tests { Tracker::new( &configuration::ephemeral().core, Some(stats_event_sender), - statistics::Repo::new(), + statistics::repository::Repository::new(), ) .unwrap(), ); @@ -221,10 +221,10 @@ mod tests { #[tokio::test] async fn it_should_send_the_tcp_4_scrape_event_when_the_peer_uses_ipv4() { - let mut stats_event_sender_mock = statistics::MockEventSender::new(); + let mut stats_event_sender_mock = statistics::event::sender::MockSender::new(); stats_event_sender_mock .expect_send_event() - .with(eq(statistics::Event::Tcp4Scrape)) + .with(eq(statistics::event::Event::Tcp4Scrape)) .times(1) .returning(|_| Box::pin(future::ready(Some(Ok(()))))); let stats_event_sender = Box::new(stats_event_sender_mock); @@ -233,7 +233,7 @@ mod tests { Tracker::new( &configuration::ephemeral().core, Some(stats_event_sender), - statistics::Repo::new(), + statistics::repository::Repository::new(), ) .unwrap(), ); @@ -245,10 +245,10 @@ mod tests { #[tokio::test] async fn it_should_send_the_tcp_6_scrape_event_when_the_peer_uses_ipv6() { - let mut stats_event_sender_mock = statistics::MockEventSender::new(); + let mut stats_event_sender_mock = statistics::event::sender::MockSender::new(); stats_event_sender_mock .expect_send_event() - .with(eq(statistics::Event::Tcp6Scrape)) + .with(eq(statistics::event::Event::Tcp6Scrape)) .times(1) .returning(|_| Box::pin(future::ready(Some(Ok(()))))); let stats_event_sender = Box::new(stats_event_sender_mock); @@ -257,7 +257,7 @@ mod tests { Tracker::new( &configuration::ephemeral().core, Some(stats_event_sender), - statistics::Repo::new(), + statistics::repository::Repository::new(), ) .unwrap(), ); diff --git a/src/servers/udp/handlers.rs b/src/servers/udp/handlers.rs index 1fb450e1a..1f838cd68 100644 --- a/src/servers/udp/handlers.rs +++ b/src/servers/udp/handlers.rs @@ -155,10 +155,10 @@ pub async fn handle_connect( // send stats event match remote_addr { SocketAddr::V4(_) => { - tracker.send_stats_event(statistics::Event::Udp4Connect).await; + tracker.send_stats_event(statistics::event::Event::Udp4Connect).await; } SocketAddr::V6(_) => { - tracker.send_stats_event(statistics::Event::Udp6Connect).await; + tracker.send_stats_event(statistics::event::Event::Udp6Connect).await; } } @@ -211,10 +211,10 @@ pub async fn handle_announce( match remote_client_ip { IpAddr::V4(_) => { - tracker.send_stats_event(statistics::Event::Udp4Announce).await; + tracker.send_stats_event(statistics::event::Event::Udp4Announce).await; } IpAddr::V6(_) => { - tracker.send_stats_event(statistics::Event::Udp6Announce).await; + tracker.send_stats_event(statistics::event::Event::Udp6Announce).await; } } @@ -326,10 +326,10 @@ pub async fn handle_scrape( // send stats event match remote_addr { SocketAddr::V4(_) => { - tracker.send_stats_event(statistics::Event::Udp4Scrape).await; + tracker.send_stats_event(statistics::event::Event::Udp4Scrape).await; } SocketAddr::V6(_) => { - tracker.send_stats_event(statistics::Event::Udp6Scrape).await; + tracker.send_stats_event(statistics::event::Event::Udp6Scrape).await; } } @@ -374,10 +374,10 @@ async fn handle_error( // send stats event match remote_addr { SocketAddr::V4(_) => { - tracker.send_stats_event(statistics::Event::Udp4Error).await; + tracker.send_stats_event(statistics::event::Event::Udp4Error).await; } SocketAddr::V6(_) => { - tracker.send_stats_event(statistics::Event::Udp6Error).await; + tracker.send_stats_event(statistics::event::Event::Udp6Error).await; } } } @@ -602,10 +602,10 @@ mod tests { #[tokio::test] async fn it_should_send_the_upd4_connect_event_when_a_client_tries_to_connect_using_a_ip4_socket_address() { - let mut stats_event_sender_mock = statistics::MockEventSender::new(); + let mut stats_event_sender_mock = statistics::event::sender::MockSender::new(); stats_event_sender_mock .expect_send_event() - .with(eq(statistics::Event::Udp4Connect)) + .with(eq(statistics::event::Event::Udp4Connect)) .times(1) .returning(|_| Box::pin(future::ready(Some(Ok(()))))); let stats_event_sender = Box::new(stats_event_sender_mock); @@ -616,7 +616,7 @@ mod tests { core::Tracker::new( &tracker_configuration().core, Some(stats_event_sender), - statistics::Repo::new(), + statistics::repository::Repository::new(), ) .unwrap(), ); @@ -631,10 +631,10 @@ mod tests { #[tokio::test] async fn it_should_send_the_upd6_connect_event_when_a_client_tries_to_connect_using_a_ip6_socket_address() { - let mut stats_event_sender_mock = statistics::MockEventSender::new(); + let mut stats_event_sender_mock = statistics::event::sender::MockSender::new(); stats_event_sender_mock .expect_send_event() - .with(eq(statistics::Event::Udp6Connect)) + .with(eq(statistics::event::Event::Udp6Connect)) .times(1) .returning(|_| Box::pin(future::ready(Some(Ok(()))))); let stats_event_sender = Box::new(stats_event_sender_mock); @@ -643,7 +643,7 @@ mod tests { core::Tracker::new( &tracker_configuration().core, Some(stats_event_sender), - statistics::Repo::new(), + statistics::repository::Repository::new(), ) .unwrap(), ); @@ -892,10 +892,10 @@ mod tests { #[tokio::test] async fn should_send_the_upd4_announce_event() { - let mut stats_event_sender_mock = statistics::MockEventSender::new(); + let mut stats_event_sender_mock = statistics::event::sender::MockSender::new(); stats_event_sender_mock .expect_send_event() - .with(eq(statistics::Event::Udp4Announce)) + .with(eq(statistics::event::Event::Udp4Announce)) .times(1) .returning(|_| Box::pin(future::ready(Some(Ok(()))))); let stats_event_sender = Box::new(stats_event_sender_mock); @@ -904,7 +904,7 @@ mod tests { core::Tracker::new( &tracker_configuration().core, Some(stats_event_sender), - statistics::Repo::new(), + statistics::repository::Repository::new(), ) .unwrap(), ); @@ -1138,10 +1138,10 @@ mod tests { #[tokio::test] async fn should_send_the_upd6_announce_event() { - let mut stats_event_sender_mock = statistics::MockEventSender::new(); + let mut stats_event_sender_mock = statistics::event::sender::MockSender::new(); stats_event_sender_mock .expect_send_event() - .with(eq(statistics::Event::Udp6Announce)) + .with(eq(statistics::event::Event::Udp6Announce)) .times(1) .returning(|_| Box::pin(future::ready(Some(Ok(()))))); let stats_event_sender = Box::new(stats_event_sender_mock); @@ -1150,7 +1150,7 @@ mod tests { core::Tracker::new( &tracker_configuration().core, Some(stats_event_sender), - statistics::Repo::new(), + statistics::repository::Repository::new(), ) .unwrap(), ); @@ -1173,7 +1173,7 @@ mod tests { use aquatic_udp_protocol::{InfoHash as AquaticInfoHash, PeerId as AquaticPeerId}; use crate::core; - use crate::core::statistics::Keeper; + use crate::core::statistics::keeper::Keeper; use crate::servers::udp::connection_cookie::make; use crate::servers::udp::handlers::handle_announce; use crate::servers::udp::handlers::tests::announce_request::AnnounceRequestBuilder; @@ -1434,10 +1434,10 @@ mod tests { #[tokio::test] async fn should_send_the_upd4_scrape_event() { - let mut stats_event_sender_mock = statistics::MockEventSender::new(); + let mut stats_event_sender_mock = statistics::event::sender::MockSender::new(); stats_event_sender_mock .expect_send_event() - .with(eq(statistics::Event::Udp4Scrape)) + .with(eq(statistics::event::Event::Udp4Scrape)) .times(1) .returning(|_| Box::pin(future::ready(Some(Ok(()))))); let stats_event_sender = Box::new(stats_event_sender_mock); @@ -1447,7 +1447,7 @@ mod tests { core::Tracker::new( &tracker_configuration().core, Some(stats_event_sender), - statistics::Repo::new(), + statistics::repository::Repository::new(), ) .unwrap(), ); @@ -1478,10 +1478,10 @@ mod tests { #[tokio::test] async fn should_send_the_upd6_scrape_event() { - let mut stats_event_sender_mock = statistics::MockEventSender::new(); + let mut stats_event_sender_mock = statistics::event::sender::MockSender::new(); stats_event_sender_mock .expect_send_event() - .with(eq(statistics::Event::Udp6Scrape)) + .with(eq(statistics::event::Event::Udp6Scrape)) .times(1) .returning(|_| Box::pin(future::ready(Some(Ok(()))))); let stats_event_sender = Box::new(stats_event_sender_mock); @@ -1491,7 +1491,7 @@ mod tests { core::Tracker::new( &tracker_configuration().core, Some(stats_event_sender), - statistics::Repo::new(), + statistics::repository::Repository::new(), ) .unwrap(), ); diff --git a/src/servers/udp/server/launcher.rs b/src/servers/udp/server/launcher.rs index f314e3721..1d1ba4de4 100644 --- a/src/servers/udp/server/launcher.rs +++ b/src/servers/udp/server/launcher.rs @@ -166,10 +166,10 @@ impl Launcher { match req.from.ip() { IpAddr::V4(_) => { - tracker.send_stats_event(statistics::Event::Udp4Request).await; + tracker.send_stats_event(statistics::event::Event::Udp4Request).await; } IpAddr::V6(_) => { - tracker.send_stats_event(statistics::Event::Udp6Request).await; + tracker.send_stats_event(statistics::event::Event::Udp6Request).await; } } @@ -202,7 +202,7 @@ impl Launcher { if old_request_aborted { // Evicted task from active requests buffer was aborted. - tracker.send_stats_event(statistics::Event::Udp4RequestAborted).await; + tracker.send_stats_event(statistics::event::Event::Udp4RequestAborted).await; } } else { tokio::task::yield_now().await; diff --git a/src/servers/udp/server/processor.rs b/src/servers/udp/server/processor.rs index 120196431..9a9798698 100644 --- a/src/servers/udp/server/processor.rs +++ b/src/servers/udp/server/processor.rs @@ -71,10 +71,10 @@ impl Processor { match target.ip() { IpAddr::V4(_) => { - self.tracker.send_stats_event(statistics::Event::Udp4Response).await; + self.tracker.send_stats_event(statistics::event::Event::Udp4Response).await; } IpAddr::V6(_) => { - self.tracker.send_stats_event(statistics::Event::Udp6Response).await; + self.tracker.send_stats_event(statistics::event::Event::Udp6Response).await; } } } From 0e13ff72554507218d2a548b54c4478c1410f48e Mon Sep 17 00:00:00 2001 From: Jose Celano Date: Wed, 18 Dec 2024 10:53:36 +0000 Subject: [PATCH 052/802] fix: [1097] do not compile slow doc tests It takes 32 seconds to compile and run all doc tests inckuding the ones that use the main tracker lib (each test compiles the main lib). This change disabled the compilation and execution of those tests. In terms of code coverage is not a problem becuase we have units test for that functionality. It only affects to the documentation. However there are too slow and they are even making crah some developers' computers. The good solution would be to extract those parts into new workspace packages to avoid compiling the main library. A new issue has been opened for it: https://github.com/torrust/torrust-tracker/issues/1140 --- src/core/auth.rs | 2 +- src/core/databases/driver.rs | 4 ++-- src/servers/http/v1/query.rs | 8 ++++---- src/servers/http/v1/requests/announce.rs | 2 +- src/servers/http/v1/responses/announce.rs | 4 ++-- src/servers/http/v1/responses/error.rs | 2 +- src/servers/http/v1/responses/scrape.rs | 2 +- src/servers/http/v1/services/peer_ip_resolver.rs | 4 ++-- 8 files changed, 14 insertions(+), 14 deletions(-) diff --git a/src/core/auth.rs b/src/core/auth.rs index 0243fceb4..7bbb25eca 100644 --- a/src/core/auth.rs +++ b/src/core/auth.rs @@ -196,7 +196,7 @@ impl Key { /// Error returned when a key cannot be parsed from a string. /// -/// ```rust,no_run +/// ```text /// use torrust_tracker::core::auth::Key; /// use std::str::FromStr; /// diff --git a/src/core/databases/driver.rs b/src/core/databases/driver.rs index a456a2650..3cbab9473 100644 --- a/src/core/databases/driver.rs +++ b/src/core/databases/driver.rs @@ -29,7 +29,7 @@ pub enum Driver { /// /// Example for `SQLite3`: /// -/// ```rust,no_run +/// ```text /// use torrust_tracker::core::databases; /// use torrust_tracker::core::databases::driver::Driver; /// @@ -40,7 +40,7 @@ pub enum Driver { /// /// Example for `MySQL`: /// -/// ```rust,no_run +/// ```text /// use torrust_tracker::core::databases; /// use torrust_tracker::core::databases::driver::Driver; /// diff --git a/src/servers/http/v1/query.rs b/src/servers/http/v1/query.rs index 3a078daae..abaf89845 100644 --- a/src/servers/http/v1/query.rs +++ b/src/servers/http/v1/query.rs @@ -30,7 +30,7 @@ impl Query { /// It return `Some(value)` for a URL query param if the param with the /// input `name` exists. For example: /// - /// ```rust + /// ```text /// use torrust_tracker::servers::http::v1::query::Query; /// /// let raw_query = "param1=value1¶m2=value2"; @@ -43,7 +43,7 @@ impl Query { /// /// It returns only the first param value even if it has multiple values: /// - /// ```rust + /// ```text /// use torrust_tracker::servers::http::v1::query::Query; /// /// let raw_query = "param1=value1¶m1=value2"; @@ -59,7 +59,7 @@ impl Query { /// Returns all the param values as a vector. /// - /// ```rust + /// ```text /// use torrust_tracker::servers::http::v1::query::Query; /// /// let query = "param1=value1¶m1=value2".parse::().unwrap(); @@ -72,7 +72,7 @@ impl Query { /// /// Returns all the param values as a vector even if it has only one value. /// - /// ```rust + /// ```text /// use torrust_tracker::servers::http::v1::query::Query; /// /// let query = "param1=value1".parse::().unwrap(); diff --git a/src/servers/http/v1/requests/announce.rs b/src/servers/http/v1/requests/announce.rs index 00bf53c6f..954d62c82 100644 --- a/src/servers/http/v1/requests/announce.rs +++ b/src/servers/http/v1/requests/announce.rs @@ -29,7 +29,7 @@ const NUMWANT: &str = "numwant"; /// The `Announce` request. Fields use the domain types after parsing the /// query params of the request. /// -/// ```rust +/// ```text /// use aquatic_udp_protocol::{NumberOfBytes, PeerId}; /// use torrust_tracker::servers::http::v1::requests::announce::{Announce, Compact, Event}; /// use bittorrent_primitives::info_hash::InfoHash; diff --git a/src/servers/http/v1/responses/announce.rs b/src/servers/http/v1/responses/announce.rs index f223a4bb0..bc63aa7fd 100644 --- a/src/servers/http/v1/responses/announce.rs +++ b/src/servers/http/v1/responses/announce.rs @@ -152,7 +152,7 @@ impl Into> for Compact { /// A [`NormalPeer`], for the [`Normal`] form. /// -/// ```rust +/// ```text /// use std::net::{IpAddr, Ipv4Addr}; /// use torrust_tracker::servers::http::v1::responses::announce::{Normal, NormalPeer}; /// @@ -204,7 +204,7 @@ impl From<&NormalPeer> for BencodeMut<'_> { /// A part from reducing the size of the response, this format does not contain /// the peer's ID. /// -/// ```rust +/// ```text /// use std::net::{IpAddr, Ipv4Addr}; /// use torrust_tracker::servers::http::v1::responses::announce::{Compact, CompactPeer, CompactPeerData}; /// diff --git a/src/servers/http/v1/responses/error.rs b/src/servers/http/v1/responses/error.rs index c406c797a..8572d861d 100644 --- a/src/servers/http/v1/responses/error.rs +++ b/src/servers/http/v1/responses/error.rs @@ -26,7 +26,7 @@ pub struct Error { impl Error { /// Returns the bencoded representation of the `Error` struct. /// - /// ```rust + /// ```text /// use torrust_tracker::servers::http::v1::responses::error::Error; /// /// let err = Error { diff --git a/src/servers/http/v1/responses/scrape.rs b/src/servers/http/v1/responses/scrape.rs index 0aef70cb1..878311ce7 100644 --- a/src/servers/http/v1/responses/scrape.rs +++ b/src/servers/http/v1/responses/scrape.rs @@ -11,7 +11,7 @@ use crate::core::ScrapeData; /// The `Scrape` response for the HTTP tracker. /// -/// ```rust +/// ```text /// use torrust_tracker::servers::http::v1::responses::scrape::Bencoded; /// use bittorrent_primitives::info_hash::InfoHash; /// use torrust_tracker_primitives::swarm_metadata::SwarmMetadata; diff --git a/src/servers/http/v1/services/peer_ip_resolver.rs b/src/servers/http/v1/services/peer_ip_resolver.rs index b8987bb4d..548a99756 100644 --- a/src/servers/http/v1/services/peer_ip_resolver.rs +++ b/src/servers/http/v1/services/peer_ip_resolver.rs @@ -59,7 +59,7 @@ pub enum PeerIpResolutionError { /// /// With the tracker running on reverse proxy mode: /// -/// ```rust +/// ```text /// use std::net::IpAddr; /// use std::str::FromStr; /// @@ -81,7 +81,7 @@ pub enum PeerIpResolutionError { /// /// With the tracker non running on reverse proxy mode: /// -/// ```rust +/// ```text /// use std::net::IpAddr; /// use std::str::FromStr; /// From fb77972fc5660e8ddc57e8b754e62b2d4a0b73f2 Mon Sep 17 00:00:00 2001 From: Jose Celano Date: Thu, 19 Dec 2024 11:31:33 +0000 Subject: [PATCH 053/802] feat: [#1139] increas ip ban duration to 1 hour --- src/servers/udp/server/launcher.rs | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/src/servers/udp/server/launcher.rs b/src/servers/udp/server/launcher.rs index 1d1ba4de4..ada50eb31 100644 --- a/src/servers/udp/server/launcher.rs +++ b/src/servers/udp/server/launcher.rs @@ -25,7 +25,7 @@ use crate::servers::udp::UDP_TRACKER_LOG_TARGET; /// The maximum number of connection id errors per ip. Clients will be banned if /// they exceed this limit. const MAX_CONNECTION_ID_ERRORS_PER_IP: u32 = 10; -const IP_BANS_RESET_INTERVAL_IN_SECS: u64 = 120; +const IP_BANS_RESET_INTERVAL_IN_SECS: u64 = 3600; /// A UDP server instance launcher. #[derive(Constructor)] From d11ab3260c45d264fe068d41a6b13ba79f72f0d6 Mon Sep 17 00:00:00 2001 From: Jose Celano Date: Mon, 23 Dec 2024 12:09:29 +0000 Subject: [PATCH 054/802] test: capture logs in tests This feature will be used in the future to write assertions about logs. It also changes when we show logs running tests. If you run tests with: ```console cargo test ``` logs win't be showed. If you want to see logs you have to execute tests with: ```console cargo test -- --nocapture ``` --- packages/test-helpers/src/configuration.rs | 7 +- src/bootstrap/logging.rs | 9 +- tests/common/clock.rs | 6 - tests/common/logging.rs | 166 ++++++++++-- .../servers/api/v1/contract/authentication.rs | 23 +- .../servers/api/v1/contract/configuration.rs | 9 - .../api/v1/contract/context/auth_key.rs | 74 ++---- .../api/v1/contract/context/health_check.rs | 7 +- .../servers/api/v1/contract/context/stats.rs | 11 +- .../api/v1/contract/context/torrent.rs | 51 +--- .../api/v1/contract/context/whitelist.rs | 51 +--- tests/servers/health_check_api/contract.rs | 40 +-- tests/servers/http/v1/contract.rs | 243 +++++------------- tests/servers/udp/contract.rs | 36 +-- tests/servers/udp/environment.rs | 7 +- 15 files changed, 302 insertions(+), 438 deletions(-) diff --git a/packages/test-helpers/src/configuration.rs b/packages/test-helpers/src/configuration.rs index acedbc672..e5de53fc2 100644 --- a/packages/test-helpers/src/configuration.rs +++ b/packages/test-helpers/src/configuration.rs @@ -29,7 +29,12 @@ pub fn ephemeral() -> Configuration { let mut config = Configuration::default(); - config.logging.threshold = Threshold::Off; // It should always be off here, the tests manage their own logging. + // This have to be Off otherwise the tracing global subscriber + // initialization will panic because you can't set a global subscriber more + // than once. You can use enable logging in tests with: + // `crate::common::logging::setup(LevelFilter::ERROR);` + // That will also allow you to capture logs and write assertions on them. + config.logging.threshold = Threshold::Off; // Ephemeral socket address for API let api_port = 0u16; diff --git a/src/bootstrap/logging.rs b/src/bootstrap/logging.rs index 34809c1ca..d7a100aed 100644 --- a/src/bootstrap/logging.rs +++ b/src/bootstrap/logging.rs @@ -28,7 +28,7 @@ pub fn setup(cfg: &Configuration) { } INIT.call_once(|| { - tracing_stdout_init(tracing_level, &TraceStyle::Default); + tracing_init(tracing_level, &TraceStyle::Default); }); } @@ -43,8 +43,11 @@ fn map_to_tracing_level_filter(threshold: &Threshold) -> LevelFilter { } } -fn tracing_stdout_init(filter: LevelFilter, style: &TraceStyle) { - let builder = tracing_subscriber::fmt().with_max_level(filter).with_ansi(true); +fn tracing_init(filter: LevelFilter, style: &TraceStyle) { + let builder = tracing_subscriber::fmt() + .with_max_level(filter) + .with_ansi(true) + .with_test_writer(); let () = match style { TraceStyle::Default => builder.init(), diff --git a/tests/common/clock.rs b/tests/common/clock.rs index de3cc7c65..5d94bb83d 100644 --- a/tests/common/clock.rs +++ b/tests/common/clock.rs @@ -1,17 +1,11 @@ use std::time::Duration; use torrust_tracker_clock::clock::Time; -use tracing::level_filters::LevelFilter; -use crate::common::logging::{tracing_stderr_init, INIT}; use crate::CurrentClock; #[test] fn it_should_use_stopped_time_for_testing() { - INIT.call_once(|| { - tracing_stderr_init(LevelFilter::ERROR); - }); - assert_eq!(CurrentClock::dbg_clock_type(), "Stopped".to_owned()); let time = CurrentClock::now(); diff --git a/tests/common/logging.rs b/tests/common/logging.rs index 71be2ece7..d2abc37b4 100644 --- a/tests/common/logging.rs +++ b/tests/common/logging.rs @@ -1,30 +1,156 @@ -#![allow(clippy::doc_markdown)] -//! Logging for the Integration Tests -//! -//! Tests should start their own logging. -//! -//! To find tests that do not start their own logging: -//! -//! ´´´ sh -//! awk 'BEGIN{RS=""; FS="\n"} /#\[tokio::test\]\s*async\s+fn\s+\w+\s*\(\s*\)\s*\{[^}]*\}/ && !/#\[tokio::test\]\s*async\s+fn\s+\w+\s*\(\s*\)\s*\{[^}]*INIT\.call_once/' $(find . -name "*.rs") -//! ´´´ -//! - -use std::sync::Once; +//! Setup for logging in tests. +use std::collections::VecDeque; +use std::io; +use std::sync::{Mutex, MutexGuard, Once, OnceLock}; +use torrust_tracker::bootstrap::logging::TraceStyle; use tracing::level_filters::LevelFilter; +use tracing_subscriber::fmt::MakeWriter; -#[allow(dead_code)] -pub static INIT: Once = Once::new(); +static INIT: Once = Once::new(); + +/// A global buffer containing the latest lines captured from logs. +#[doc(hidden)] +pub fn captured_logs_buffer() -> &'static Mutex { + static CAPTURED_LOGS_GLOBAL_BUFFER: OnceLock> = OnceLock::new(); + CAPTURED_LOGS_GLOBAL_BUFFER.get_or_init(|| Mutex::new(CircularBuffer::new(10000, 200))) +} + +pub fn setup() { + INIT.call_once(|| { + tracing_init(LevelFilter::ERROR, &TraceStyle::Default); + }); +} + +fn tracing_init(level_filter: LevelFilter, style: &TraceStyle) { + let mock_writer = LogCapturer::new(captured_logs_buffer()); -#[allow(dead_code)] -pub fn tracing_stderr_init(filter: LevelFilter) { let builder = tracing_subscriber::fmt() - .with_max_level(filter) + .with_max_level(level_filter) .with_ansi(true) - .with_writer(std::io::stderr); + .with_test_writer() + .with_writer(mock_writer); - builder.pretty().with_file(true).init(); + let () = match style { + TraceStyle::Default => builder.init(), + TraceStyle::Pretty(display_filename) => builder.pretty().with_file(*display_filename).init(), + TraceStyle::Compact => builder.compact().init(), + TraceStyle::Json => builder.json().init(), + }; tracing::info!("Logging initialized"); } + +/// It returns true is there is a log line containing all the texts passed. +/// +/// # Panics +/// +/// Will panic if it can't get the lock for the global buffer or convert it into +/// a vec. +#[must_use] +#[allow(dead_code)] +pub fn logs_contains_a_line_with(texts: &[&str]) -> bool { + // code-review: we can search directly in the buffer instead of converting + // the buffer into a string but that would slow down the tests because + // cloning should be faster that locking the buffer for searching. + // Because the buffer is not big. + let logs = String::from_utf8(captured_logs_buffer().lock().unwrap().as_vec()).unwrap(); + + for line in logs.split('\n') { + if contains(line, texts) { + return true; + } + } + + false +} + +#[allow(dead_code)] +fn contains(text: &str, texts: &[&str]) -> bool { + texts.iter().all(|&word| text.contains(word)) +} + +/// A tracing writer which captures the latests logs lines into a buffer. +/// It's used to capture the logs in the tests. +#[derive(Debug)] +pub struct LogCapturer<'a> { + logs: &'a Mutex, +} + +impl<'a> LogCapturer<'a> { + pub fn new(buf: &'a Mutex) -> Self { + Self { logs: buf } + } + + fn buf(&self) -> io::Result> { + self.logs.lock().map_err(|_| io::Error::from(io::ErrorKind::Other)) + } +} + +impl io::Write for LogCapturer<'_> { + fn write(&mut self, buf: &[u8]) -> io::Result { + print!("{}", String::from_utf8(buf.to_vec()).unwrap()); + + let mut target = self.buf()?; + + target.write(buf) + } + + fn flush(&mut self) -> io::Result<()> { + self.buf()?.flush() + } +} + +impl MakeWriter<'_> for LogCapturer<'_> { + type Writer = Self; + + fn make_writer(&self) -> Self::Writer { + LogCapturer::new(self.logs) + } +} + +#[derive(Debug)] +pub struct CircularBuffer { + max_size: usize, + buffer: VecDeque, +} + +impl CircularBuffer { + #[must_use] + pub fn new(max_lines: usize, average_line_size: usize) -> Self { + Self { + max_size: max_lines * average_line_size, + buffer: VecDeque::with_capacity(max_lines * average_line_size), + } + } + + /// # Errors + /// + /// Won't return any error. + #[allow(clippy::unnecessary_wraps)] + pub fn write(&mut self, buf: &[u8]) -> io::Result { + for &byte in buf { + if self.buffer.len() == self.max_size { + // Remove oldest byte to make space + self.buffer.pop_front(); + } + self.buffer.push_back(byte); + } + + Ok(buf.len()) + } + + /// # Errors + /// + /// Won't return any error. + #[allow(clippy::unnecessary_wraps)] + #[allow(clippy::unused_self)] + pub fn flush(&mut self) -> io::Result<()> { + Ok(()) + } + + #[must_use] + pub fn as_vec(&self) -> Vec { + self.buffer.iter().copied().collect() + } +} diff --git a/tests/servers/api/v1/contract/authentication.rs b/tests/servers/api/v1/contract/authentication.rs index 5c5cd3ae0..8f5ce8f53 100644 --- a/tests/servers/api/v1/contract/authentication.rs +++ b/tests/servers/api/v1/contract/authentication.rs @@ -1,17 +1,14 @@ use torrust_tracker_test_helpers::configuration; -use tracing::level_filters::LevelFilter; use crate::common::http::{Query, QueryParam}; -use crate::common::logging::{tracing_stderr_init, INIT}; +use crate::common::logging::{self}; use crate::servers::api::v1::asserts::{assert_token_not_valid, assert_unauthorized}; use crate::servers::api::v1::client::Client; use crate::servers::api::Started; #[tokio::test] async fn should_authenticate_requests_by_using_a_token_query_param() { - INIT.call_once(|| { - tracing_stderr_init(LevelFilter::ERROR); - }); + logging::setup(); let env = Started::new(&configuration::ephemeral().into()).await; @@ -28,9 +25,7 @@ async fn should_authenticate_requests_by_using_a_token_query_param() { #[tokio::test] async fn should_not_authenticate_requests_when_the_token_is_missing() { - INIT.call_once(|| { - tracing_stderr_init(LevelFilter::ERROR); - }); + logging::setup(); let env = Started::new(&configuration::ephemeral().into()).await; @@ -45,9 +40,7 @@ async fn should_not_authenticate_requests_when_the_token_is_missing() { #[tokio::test] async fn should_not_authenticate_requests_when_the_token_is_empty() { - INIT.call_once(|| { - tracing_stderr_init(LevelFilter::ERROR); - }); + logging::setup(); let env = Started::new(&configuration::ephemeral().into()).await; @@ -62,9 +55,7 @@ async fn should_not_authenticate_requests_when_the_token_is_empty() { #[tokio::test] async fn should_not_authenticate_requests_when_the_token_is_invalid() { - INIT.call_once(|| { - tracing_stderr_init(LevelFilter::ERROR); - }); + logging::setup(); let env = Started::new(&configuration::ephemeral().into()).await; @@ -79,9 +70,7 @@ async fn should_not_authenticate_requests_when_the_token_is_invalid() { #[tokio::test] async fn should_allow_the_token_query_param_to_be_at_any_position_in_the_url_query() { - INIT.call_once(|| { - tracing_stderr_init(LevelFilter::ERROR); - }); + logging::setup(); let env = Started::new(&configuration::ephemeral().into()).await; diff --git a/tests/servers/api/v1/contract/configuration.rs b/tests/servers/api/v1/contract/configuration.rs index be42f16ad..91aa138a8 100644 --- a/tests/servers/api/v1/contract/configuration.rs +++ b/tests/servers/api/v1/contract/configuration.rs @@ -7,18 +7,10 @@ // use crate::common::app::setup_with_configuration; // use crate::servers::api::environment::stopped_environment; -use tracing::level_filters::LevelFilter; - -use crate::common::logging::{tracing_stderr_init, INIT}; - #[tokio::test] #[ignore] #[should_panic = "Could not receive bind_address."] async fn should_fail_with_ssl_enabled_and_bad_ssl_config() { - INIT.call_once(|| { - tracing_stderr_init(LevelFilter::ERROR); - }); - // let tracker = setup_with_configuration(&Arc::new(configuration::ephemeral())); // let config = tracker.config.http_api.clone(); @@ -36,6 +28,5 @@ async fn should_fail_with_ssl_enabled_and_bad_ssl_config() { // }; // let env = new_stopped(tracker, bind_to, tls); - // env.start().await; } diff --git a/tests/servers/api/v1/contract/context/auth_key.rs b/tests/servers/api/v1/contract/context/auth_key.rs index 2792a513c..9560a2f49 100644 --- a/tests/servers/api/v1/contract/context/auth_key.rs +++ b/tests/servers/api/v1/contract/context/auth_key.rs @@ -3,9 +3,8 @@ use std::time::Duration; use serde::Serialize; use torrust_tracker::core::auth::Key; use torrust_tracker_test_helpers::configuration; -use tracing::level_filters::LevelFilter; -use crate::common::logging::{tracing_stderr_init, INIT}; +use crate::common::logging::{self}; use crate::servers::api::connection_info::{connection_with_invalid_token, connection_with_no_token}; use crate::servers::api::v1::asserts::{ assert_auth_key_utf8, assert_failed_to_delete_key, assert_failed_to_generate_key, assert_failed_to_reload_keys, @@ -17,9 +16,7 @@ use crate::servers::api::{force_database_error, Started}; #[tokio::test] async fn should_allow_generating_a_new_random_auth_key() { - INIT.call_once(|| { - tracing_stderr_init(LevelFilter::ERROR); - }); + logging::setup(); let env = Started::new(&configuration::ephemeral().into()).await; @@ -43,9 +40,7 @@ async fn should_allow_generating_a_new_random_auth_key() { #[tokio::test] async fn should_allow_uploading_a_preexisting_auth_key() { - INIT.call_once(|| { - tracing_stderr_init(LevelFilter::ERROR); - }); + logging::setup(); let env = Started::new(&configuration::ephemeral().into()).await; @@ -69,9 +64,7 @@ async fn should_allow_uploading_a_preexisting_auth_key() { #[tokio::test] async fn should_not_allow_generating_a_new_auth_key_for_unauthenticated_users() { - INIT.call_once(|| { - tracing_stderr_init(LevelFilter::ERROR); - }); + logging::setup(); let env = Started::new(&configuration::ephemeral().into()).await; @@ -98,9 +91,7 @@ async fn should_not_allow_generating_a_new_auth_key_for_unauthenticated_users() #[tokio::test] async fn should_fail_when_the_auth_key_cannot_be_generated() { - INIT.call_once(|| { - tracing_stderr_init(LevelFilter::ERROR); - }); + logging::setup(); let env = Started::new(&configuration::ephemeral().into()).await; @@ -120,9 +111,7 @@ async fn should_fail_when_the_auth_key_cannot_be_generated() { #[tokio::test] async fn should_allow_deleting_an_auth_key() { - INIT.call_once(|| { - tracing_stderr_init(LevelFilter::ERROR); - }); + logging::setup(); let env = Started::new(&configuration::ephemeral().into()).await; @@ -151,9 +140,7 @@ async fn should_fail_generating_a_new_auth_key_when_the_provided_key_is_invalid( pub seconds_valid: u64, } - INIT.call_once(|| { - tracing_stderr_init(LevelFilter::ERROR); - }); + logging::setup(); let env = Started::new(&configuration::ephemeral().into()).await; @@ -192,9 +179,7 @@ async fn should_fail_generating_a_new_auth_key_when_the_key_duration_is_invalid( pub seconds_valid: String, } - INIT.call_once(|| { - tracing_stderr_init(LevelFilter::ERROR); - }); + logging::setup(); let env = Started::new(&configuration::ephemeral().into()).await; @@ -223,9 +208,7 @@ async fn should_fail_generating_a_new_auth_key_when_the_key_duration_is_invalid( #[tokio::test] async fn should_fail_deleting_an_auth_key_when_the_key_id_is_invalid() { - INIT.call_once(|| { - tracing_stderr_init(LevelFilter::ERROR); - }); + logging::setup(); let env = Started::new(&configuration::ephemeral().into()).await; @@ -250,9 +233,7 @@ async fn should_fail_deleting_an_auth_key_when_the_key_id_is_invalid() { #[tokio::test] async fn should_fail_when_the_auth_key_cannot_be_deleted() { - INIT.call_once(|| { - tracing_stderr_init(LevelFilter::ERROR); - }); + logging::setup(); let env = Started::new(&configuration::ephemeral().into()).await; @@ -276,9 +257,7 @@ async fn should_fail_when_the_auth_key_cannot_be_deleted() { #[tokio::test] async fn should_not_allow_deleting_an_auth_key_for_unauthenticated_users() { - INIT.call_once(|| { - tracing_stderr_init(LevelFilter::ERROR); - }); + logging::setup(); let env = Started::new(&configuration::ephemeral().into()).await; @@ -315,9 +294,7 @@ async fn should_not_allow_deleting_an_auth_key_for_unauthenticated_users() { #[tokio::test] async fn should_allow_reloading_keys() { - INIT.call_once(|| { - tracing_stderr_init(LevelFilter::ERROR); - }); + logging::setup(); let env = Started::new(&configuration::ephemeral().into()).await; @@ -336,9 +313,7 @@ async fn should_allow_reloading_keys() { #[tokio::test] async fn should_fail_when_keys_cannot_be_reloaded() { - INIT.call_once(|| { - tracing_stderr_init(LevelFilter::ERROR); - }); + logging::setup(); let env = Started::new(&configuration::ephemeral().into()).await; @@ -359,9 +334,7 @@ async fn should_fail_when_keys_cannot_be_reloaded() { #[tokio::test] async fn should_not_allow_reloading_keys_for_unauthenticated_users() { - INIT.call_once(|| { - tracing_stderr_init(LevelFilter::ERROR); - }); + logging::setup(); let env = Started::new(&configuration::ephemeral().into()).await; @@ -390,9 +363,8 @@ mod deprecated_generate_key_endpoint { use torrust_tracker::core::auth::Key; use torrust_tracker_test_helpers::configuration; - use tracing::level_filters::LevelFilter; - use crate::common::logging::{tracing_stderr_init, INIT}; + use crate::common::logging::{self}; use crate::servers::api::connection_info::{connection_with_invalid_token, connection_with_no_token}; use crate::servers::api::v1::asserts::{ assert_auth_key_utf8, assert_failed_to_generate_key, assert_invalid_key_duration_param, assert_token_not_valid, @@ -403,9 +375,7 @@ mod deprecated_generate_key_endpoint { #[tokio::test] async fn should_allow_generating_a_new_auth_key() { - INIT.call_once(|| { - tracing_stderr_init(LevelFilter::ERROR); - }); + logging::setup(); let env = Started::new(&configuration::ephemeral().into()).await; @@ -426,9 +396,7 @@ mod deprecated_generate_key_endpoint { #[tokio::test] async fn should_not_allow_generating_a_new_auth_key_for_unauthenticated_users() { - INIT.call_once(|| { - tracing_stderr_init(LevelFilter::ERROR); - }); + logging::setup(); let env = Started::new(&configuration::ephemeral().into()).await; @@ -451,9 +419,7 @@ mod deprecated_generate_key_endpoint { #[tokio::test] async fn should_fail_generating_a_new_auth_key_when_the_key_duration_is_invalid() { - INIT.call_once(|| { - tracing_stderr_init(LevelFilter::ERROR); - }); + logging::setup(); let env = Started::new(&configuration::ephemeral().into()).await; @@ -476,9 +442,7 @@ mod deprecated_generate_key_endpoint { #[tokio::test] async fn should_fail_when_the_auth_key_cannot_be_generated() { - INIT.call_once(|| { - tracing_stderr_init(LevelFilter::ERROR); - }); + logging::setup(); let env = Started::new(&configuration::ephemeral().into()).await; diff --git a/tests/servers/api/v1/contract/context/health_check.rs b/tests/servers/api/v1/contract/context/health_check.rs index af46a5abe..0fd3f6ea6 100644 --- a/tests/servers/api/v1/contract/context/health_check.rs +++ b/tests/servers/api/v1/contract/context/health_check.rs @@ -1,16 +1,13 @@ use torrust_tracker::servers::apis::v1::context::health_check::resources::{Report, Status}; use torrust_tracker_test_helpers::configuration; -use tracing::level_filters::LevelFilter; -use crate::common::logging::{tracing_stderr_init, INIT}; +use crate::common::logging; use crate::servers::api::v1::client::get; use crate::servers::api::Started; #[tokio::test] async fn health_check_endpoint_should_return_status_ok_if_api_is_running() { - INIT.call_once(|| { - tracing_stderr_init(LevelFilter::ERROR); - }); + logging::setup(); let env = Started::new(&configuration::ephemeral().into()).await; diff --git a/tests/servers/api/v1/contract/context/stats.rs b/tests/servers/api/v1/contract/context/stats.rs index 7853450e2..e05107d25 100644 --- a/tests/servers/api/v1/contract/context/stats.rs +++ b/tests/servers/api/v1/contract/context/stats.rs @@ -4,9 +4,8 @@ use bittorrent_primitives::info_hash::InfoHash; use torrust_tracker::servers::apis::v1::context::stats::resources::Stats; use torrust_tracker_primitives::peer::fixture::PeerBuilder; use torrust_tracker_test_helpers::configuration; -use tracing::level_filters::LevelFilter; -use crate::common::logging::{tracing_stderr_init, INIT}; +use crate::common::logging::{self}; use crate::servers::api::connection_info::{connection_with_invalid_token, connection_with_no_token}; use crate::servers::api::v1::asserts::{assert_stats, assert_token_not_valid, assert_unauthorized}; use crate::servers::api::v1::client::Client; @@ -14,9 +13,7 @@ use crate::servers::api::Started; #[tokio::test] async fn should_allow_getting_tracker_statistics() { - INIT.call_once(|| { - tracing_stderr_init(LevelFilter::ERROR); - }); + logging::setup(); let env = Started::new(&configuration::ephemeral().into()).await; @@ -64,9 +61,7 @@ async fn should_allow_getting_tracker_statistics() { #[tokio::test] async fn should_not_allow_getting_tracker_statistics_for_unauthenticated_users() { - INIT.call_once(|| { - tracing_stderr_init(LevelFilter::ERROR); - }); + logging::setup(); let env = Started::new(&configuration::ephemeral().into()).await; diff --git a/tests/servers/api/v1/contract/context/torrent.rs b/tests/servers/api/v1/contract/context/torrent.rs index e500ac63c..55c25d228 100644 --- a/tests/servers/api/v1/contract/context/torrent.rs +++ b/tests/servers/api/v1/contract/context/torrent.rs @@ -5,10 +5,9 @@ use torrust_tracker::servers::apis::v1::context::torrent::resources::peer::Peer; use torrust_tracker::servers::apis::v1::context::torrent::resources::torrent::{self, Torrent}; use torrust_tracker_primitives::peer::fixture::PeerBuilder; use torrust_tracker_test_helpers::configuration; -use tracing::level_filters::LevelFilter; use crate::common::http::{Query, QueryParam}; -use crate::common::logging::{tracing_stderr_init, INIT}; +use crate::common::logging::{self}; use crate::servers::api::connection_info::{connection_with_invalid_token, connection_with_no_token}; use crate::servers::api::v1::asserts::{ assert_bad_request, assert_invalid_infohash_param, assert_not_found, assert_token_not_valid, assert_torrent_info, @@ -22,9 +21,7 @@ use crate::servers::api::Started; #[tokio::test] async fn should_allow_getting_all_torrents() { - INIT.call_once(|| { - tracing_stderr_init(LevelFilter::ERROR); - }); + logging::setup(); let env = Started::new(&configuration::ephemeral().into()).await; @@ -50,9 +47,7 @@ async fn should_allow_getting_all_torrents() { #[tokio::test] async fn should_allow_limiting_the_torrents_in_the_result() { - INIT.call_once(|| { - tracing_stderr_init(LevelFilter::ERROR); - }); + logging::setup(); let env = Started::new(&configuration::ephemeral().into()).await; @@ -83,9 +78,7 @@ async fn should_allow_limiting_the_torrents_in_the_result() { #[tokio::test] async fn should_allow_the_torrents_result_pagination() { - INIT.call_once(|| { - tracing_stderr_init(LevelFilter::ERROR); - }); + logging::setup(); let env = Started::new(&configuration::ephemeral().into()).await; @@ -116,9 +109,7 @@ async fn should_allow_the_torrents_result_pagination() { #[tokio::test] async fn should_allow_getting_a_list_of_torrents_providing_infohashes() { - INIT.call_once(|| { - tracing_stderr_init(LevelFilter::ERROR); - }); + logging::setup(); let env = Started::new(&configuration::ephemeral().into()).await; @@ -162,9 +153,7 @@ async fn should_allow_getting_a_list_of_torrents_providing_infohashes() { #[tokio::test] async fn should_fail_getting_torrents_when_the_offset_query_parameter_cannot_be_parsed() { - INIT.call_once(|| { - tracing_stderr_init(LevelFilter::ERROR); - }); + logging::setup(); let env = Started::new(&configuration::ephemeral().into()).await; @@ -183,9 +172,7 @@ async fn should_fail_getting_torrents_when_the_offset_query_parameter_cannot_be_ #[tokio::test] async fn should_fail_getting_torrents_when_the_limit_query_parameter_cannot_be_parsed() { - INIT.call_once(|| { - tracing_stderr_init(LevelFilter::ERROR); - }); + logging::setup(); let env = Started::new(&configuration::ephemeral().into()).await; @@ -204,9 +191,7 @@ async fn should_fail_getting_torrents_when_the_limit_query_parameter_cannot_be_p #[tokio::test] async fn should_fail_getting_torrents_when_the_info_hash_parameter_is_invalid() { - INIT.call_once(|| { - tracing_stderr_init(LevelFilter::ERROR); - }); + logging::setup(); let env = Started::new(&configuration::ephemeral().into()).await; @@ -229,9 +214,7 @@ async fn should_fail_getting_torrents_when_the_info_hash_parameter_is_invalid() #[tokio::test] async fn should_not_allow_getting_torrents_for_unauthenticated_users() { - INIT.call_once(|| { - tracing_stderr_init(LevelFilter::ERROR); - }); + logging::setup(); let env = Started::new(&configuration::ephemeral().into()).await; @@ -252,9 +235,7 @@ async fn should_not_allow_getting_torrents_for_unauthenticated_users() { #[tokio::test] async fn should_allow_getting_a_torrent_info() { - INIT.call_once(|| { - tracing_stderr_init(LevelFilter::ERROR); - }); + logging::setup(); let env = Started::new(&configuration::ephemeral().into()).await; @@ -285,9 +266,7 @@ async fn should_allow_getting_a_torrent_info() { #[tokio::test] async fn should_fail_while_getting_a_torrent_info_when_the_torrent_does_not_exist() { - INIT.call_once(|| { - tracing_stderr_init(LevelFilter::ERROR); - }); + logging::setup(); let env = Started::new(&configuration::ephemeral().into()).await; @@ -304,9 +283,7 @@ async fn should_fail_while_getting_a_torrent_info_when_the_torrent_does_not_exis #[tokio::test] async fn should_fail_getting_a_torrent_info_when_the_provided_infohash_is_invalid() { - INIT.call_once(|| { - tracing_stderr_init(LevelFilter::ERROR); - }); + logging::setup(); let env = Started::new(&configuration::ephemeral().into()).await; @@ -327,9 +304,7 @@ async fn should_fail_getting_a_torrent_info_when_the_provided_infohash_is_invali #[tokio::test] async fn should_not_allow_getting_a_torrent_info_for_unauthenticated_users() { - INIT.call_once(|| { - tracing_stderr_init(LevelFilter::ERROR); - }); + logging::setup(); let env = Started::new(&configuration::ephemeral().into()).await; diff --git a/tests/servers/api/v1/contract/context/whitelist.rs b/tests/servers/api/v1/contract/context/whitelist.rs index 49ce3e865..2be1706fc 100644 --- a/tests/servers/api/v1/contract/context/whitelist.rs +++ b/tests/servers/api/v1/contract/context/whitelist.rs @@ -2,9 +2,8 @@ use std::str::FromStr; use bittorrent_primitives::info_hash::InfoHash; use torrust_tracker_test_helpers::configuration; -use tracing::level_filters::LevelFilter; -use crate::common::logging::{tracing_stderr_init, INIT}; +use crate::common::logging::{self}; use crate::servers::api::connection_info::{connection_with_invalid_token, connection_with_no_token}; use crate::servers::api::v1::asserts::{ assert_failed_to_reload_whitelist, assert_failed_to_remove_torrent_from_whitelist, assert_failed_to_whitelist_torrent, @@ -18,9 +17,7 @@ use crate::servers::api::{force_database_error, Started}; #[tokio::test] async fn should_allow_whitelisting_a_torrent() { - INIT.call_once(|| { - tracing_stderr_init(LevelFilter::ERROR); - }); + logging::setup(); let env = Started::new(&configuration::ephemeral().into()).await; @@ -40,9 +37,7 @@ async fn should_allow_whitelisting_a_torrent() { #[tokio::test] async fn should_allow_whitelisting_a_torrent_that_has_been_already_whitelisted() { - INIT.call_once(|| { - tracing_stderr_init(LevelFilter::ERROR); - }); + logging::setup(); let env = Started::new(&configuration::ephemeral().into()).await; @@ -61,9 +56,7 @@ async fn should_allow_whitelisting_a_torrent_that_has_been_already_whitelisted() #[tokio::test] async fn should_not_allow_whitelisting_a_torrent_for_unauthenticated_users() { - INIT.call_once(|| { - tracing_stderr_init(LevelFilter::ERROR); - }); + logging::setup(); let env = Started::new(&configuration::ephemeral().into()).await; @@ -86,9 +79,7 @@ async fn should_not_allow_whitelisting_a_torrent_for_unauthenticated_users() { #[tokio::test] async fn should_fail_when_the_torrent_cannot_be_whitelisted() { - INIT.call_once(|| { - tracing_stderr_init(LevelFilter::ERROR); - }); + logging::setup(); let env = Started::new(&configuration::ephemeral().into()).await; @@ -105,9 +96,7 @@ async fn should_fail_when_the_torrent_cannot_be_whitelisted() { #[tokio::test] async fn should_fail_whitelisting_a_torrent_when_the_provided_infohash_is_invalid() { - INIT.call_once(|| { - tracing_stderr_init(LevelFilter::ERROR); - }); + logging::setup(); let env = Started::new(&configuration::ephemeral().into()).await; @@ -132,9 +121,7 @@ async fn should_fail_whitelisting_a_torrent_when_the_provided_infohash_is_invali #[tokio::test] async fn should_allow_removing_a_torrent_from_the_whitelist() { - INIT.call_once(|| { - tracing_stderr_init(LevelFilter::ERROR); - }); + logging::setup(); let env = Started::new(&configuration::ephemeral().into()).await; @@ -154,9 +141,7 @@ async fn should_allow_removing_a_torrent_from_the_whitelist() { #[tokio::test] async fn should_not_fail_trying_to_remove_a_non_whitelisted_torrent_from_the_whitelist() { - INIT.call_once(|| { - tracing_stderr_init(LevelFilter::ERROR); - }); + logging::setup(); let env = Started::new(&configuration::ephemeral().into()).await; @@ -173,9 +158,7 @@ async fn should_not_fail_trying_to_remove_a_non_whitelisted_torrent_from_the_whi #[tokio::test] async fn should_fail_removing_a_torrent_from_the_whitelist_when_the_provided_infohash_is_invalid() { - INIT.call_once(|| { - tracing_stderr_init(LevelFilter::ERROR); - }); + logging::setup(); let env = Started::new(&configuration::ephemeral().into()).await; @@ -200,9 +183,7 @@ async fn should_fail_removing_a_torrent_from_the_whitelist_when_the_provided_inf #[tokio::test] async fn should_fail_when_the_torrent_cannot_be_removed_from_the_whitelist() { - INIT.call_once(|| { - tracing_stderr_init(LevelFilter::ERROR); - }); + logging::setup(); let env = Started::new(&configuration::ephemeral().into()).await; @@ -223,9 +204,7 @@ async fn should_fail_when_the_torrent_cannot_be_removed_from_the_whitelist() { #[tokio::test] async fn should_not_allow_removing_a_torrent_from_the_whitelist_for_unauthenticated_users() { - INIT.call_once(|| { - tracing_stderr_init(LevelFilter::ERROR); - }); + logging::setup(); let env = Started::new(&configuration::ephemeral().into()).await; @@ -251,9 +230,7 @@ async fn should_not_allow_removing_a_torrent_from_the_whitelist_for_unauthentica #[tokio::test] async fn should_allow_reload_the_whitelist_from_the_database() { - INIT.call_once(|| { - tracing_stderr_init(LevelFilter::ERROR); - }); + logging::setup(); let env = Started::new(&configuration::ephemeral().into()).await; @@ -280,9 +257,7 @@ async fn should_allow_reload_the_whitelist_from_the_database() { #[tokio::test] async fn should_fail_when_the_whitelist_cannot_be_reloaded_from_the_database() { - INIT.call_once(|| { - tracing_stderr_init(LevelFilter::ERROR); - }); + logging::setup(); let env = Started::new(&configuration::ephemeral().into()).await; diff --git a/tests/servers/health_check_api/contract.rs b/tests/servers/health_check_api/contract.rs index d40899f98..9c79c4a37 100644 --- a/tests/servers/health_check_api/contract.rs +++ b/tests/servers/health_check_api/contract.rs @@ -1,17 +1,14 @@ use torrust_tracker::servers::health_check_api::resources::{Report, Status}; use torrust_tracker::servers::registar::Registar; use torrust_tracker_test_helpers::configuration; -use tracing::level_filters::LevelFilter; -use crate::common::logging::{tracing_stderr_init, INIT}; +use crate::common::logging; use crate::servers::health_check_api::client::get; use crate::servers::health_check_api::Started; #[tokio::test] async fn health_check_endpoint_should_return_status_ok_when_there_is_no_services_registered() { - INIT.call_once(|| { - tracing_stderr_init(LevelFilter::ERROR); - }); + logging::setup(); let configuration = configuration::ephemeral_with_no_services(); @@ -37,18 +34,15 @@ mod api { use torrust_tracker::servers::health_check_api::resources::{Report, Status}; use torrust_tracker_test_helpers::configuration; - use tracing::level_filters::LevelFilter; - use crate::common::logging::{tracing_stderr_init, INIT}; + use crate::common::logging; use crate::servers::api; use crate::servers::health_check_api::client::get; use crate::servers::health_check_api::Started; #[tokio::test] pub(crate) async fn it_should_return_good_health_for_api_service() { - INIT.call_once(|| { - tracing_stderr_init(LevelFilter::ERROR); - }); + logging::setup(); let configuration = Arc::new(configuration::ephemeral()); @@ -95,9 +89,7 @@ mod api { #[tokio::test] pub(crate) async fn it_should_return_error_when_api_service_was_stopped_after_registration() { - INIT.call_once(|| { - tracing_stderr_init(LevelFilter::ERROR); - }); + logging::setup(); let configuration = Arc::new(configuration::ephemeral()); @@ -152,18 +144,15 @@ mod http { use torrust_tracker::servers::health_check_api::resources::{Report, Status}; use torrust_tracker_test_helpers::configuration; - use tracing::level_filters::LevelFilter; - use crate::common::logging::{tracing_stderr_init, INIT}; + use crate::common::logging; use crate::servers::health_check_api::client::get; use crate::servers::health_check_api::Started; use crate::servers::http; #[tokio::test] pub(crate) async fn it_should_return_good_health_for_http_service() { - INIT.call_once(|| { - tracing_stderr_init(LevelFilter::ERROR); - }); + logging::setup(); let configuration = Arc::new(configuration::ephemeral()); @@ -209,9 +198,7 @@ mod http { #[tokio::test] pub(crate) async fn it_should_return_error_when_http_service_was_stopped_after_registration() { - INIT.call_once(|| { - tracing_stderr_init(LevelFilter::ERROR); - }); + logging::setup(); let configuration = Arc::new(configuration::ephemeral()); @@ -266,18 +253,15 @@ mod udp { use torrust_tracker::servers::health_check_api::resources::{Report, Status}; use torrust_tracker_test_helpers::configuration; - use tracing::level_filters::LevelFilter; - use crate::common::logging::{tracing_stderr_init, INIT}; + use crate::common::logging; use crate::servers::health_check_api::client::get; use crate::servers::health_check_api::Started; use crate::servers::udp; #[tokio::test] pub(crate) async fn it_should_return_good_health_for_udp_service() { - INIT.call_once(|| { - tracing_stderr_init(LevelFilter::ERROR); - }); + logging::setup(); let configuration = Arc::new(configuration::ephemeral()); @@ -320,9 +304,7 @@ mod udp { #[tokio::test] pub(crate) async fn it_should_return_error_when_udp_service_was_stopped_after_registration() { - INIT.call_once(|| { - tracing_stderr_init(LevelFilter::ERROR); - }); + logging::setup(); let configuration = Arc::new(configuration::ephemeral()); diff --git a/tests/servers/http/v1/contract.rs b/tests/servers/http/v1/contract.rs index 554849aee..632f38bf4 100644 --- a/tests/servers/http/v1/contract.rs +++ b/tests/servers/http/v1/contract.rs @@ -1,9 +1,12 @@ use torrust_tracker_test_helpers::configuration; +use crate::common::logging; use crate::servers::http::Started; #[tokio::test] async fn environment_should_be_started_and_stopped() { + logging::setup(); + let env = Started::new(&configuration::ephemeral().into()).await; env.stop().await; @@ -13,17 +16,14 @@ mod for_all_config_modes { use torrust_tracker::servers::http::v1::handlers::health_check::{Report, Status}; use torrust_tracker_test_helpers::configuration; - use tracing::level_filters::LevelFilter; - use crate::common::logging::{tracing_stderr_init, INIT}; + use crate::common::logging; use crate::servers::http::client::Client; use crate::servers::http::Started; #[tokio::test] async fn health_check_endpoint_should_return_ok_if_the_http_tracker_is_running() { - INIT.call_once(|| { - tracing_stderr_init(LevelFilter::ERROR); - }); + logging::setup(); let env = Started::new(&configuration::ephemeral_with_reverse_proxy().into()).await; @@ -38,9 +38,8 @@ mod for_all_config_modes { mod and_running_on_reverse_proxy { use torrust_tracker_test_helpers::configuration; - use tracing::level_filters::LevelFilter; - use crate::common::logging::{tracing_stderr_init, INIT}; + use crate::common::logging; use crate::servers::http::asserts::assert_could_not_find_remote_address_on_x_forwarded_for_header_error_response; use crate::servers::http::client::Client; use crate::servers::http::requests::announce::QueryBuilder; @@ -48,9 +47,7 @@ mod for_all_config_modes { #[tokio::test] async fn should_fail_when_the_http_request_does_not_include_the_xff_http_request_header() { - INIT.call_once(|| { - tracing_stderr_init(LevelFilter::ERROR); - }); + logging::setup(); // If the tracker is running behind a reverse proxy, the peer IP is the // right most IP in the `X-Forwarded-For` HTTP header, which is the IP of the proxy's client. @@ -68,9 +65,7 @@ mod for_all_config_modes { #[tokio::test] async fn should_fail_when_the_xff_http_request_header_contains_an_invalid_ip() { - INIT.call_once(|| { - tracing_stderr_init(LevelFilter::ERROR); - }); + logging::setup(); let env = Started::new(&configuration::ephemeral_with_reverse_proxy().into()).await; @@ -109,10 +104,9 @@ mod for_all_config_modes { use tokio::net::TcpListener; use torrust_tracker_primitives::peer::fixture::PeerBuilder; use torrust_tracker_test_helpers::configuration; - use tracing::level_filters::LevelFilter; use crate::common::fixtures::invalid_info_hashes; - use crate::common::logging::{tracing_stderr_init, INIT}; + use crate::common::logging; use crate::servers::http::asserts::{ assert_announce_response, assert_bad_announce_request_error_response, assert_cannot_parse_query_param_error_response, assert_cannot_parse_query_params_error_response, assert_compact_announce_response, assert_empty_announce_response, @@ -125,9 +119,7 @@ mod for_all_config_modes { #[tokio::test] async fn it_should_start_and_stop() { - INIT.call_once(|| { - tracing_stderr_init(LevelFilter::ERROR); - }); + logging::setup(); let env = Started::new(&configuration::ephemeral_public().into()).await; env.stop().await; @@ -135,9 +127,7 @@ mod for_all_config_modes { #[tokio::test] async fn should_respond_if_only_the_mandatory_fields_are_provided() { - INIT.call_once(|| { - tracing_stderr_init(LevelFilter::ERROR); - }); + logging::setup(); let env = Started::new(&configuration::ephemeral().into()).await; @@ -154,9 +144,7 @@ mod for_all_config_modes { #[tokio::test] async fn should_fail_when_the_url_query_component_is_empty() { - INIT.call_once(|| { - tracing_stderr_init(LevelFilter::ERROR); - }); + logging::setup(); let env = Started::new(&configuration::ephemeral().into()).await; @@ -169,9 +157,7 @@ mod for_all_config_modes { #[tokio::test] async fn should_fail_when_url_query_parameters_are_invalid() { - INIT.call_once(|| { - tracing_stderr_init(LevelFilter::ERROR); - }); + logging::setup(); let env = Started::new(&configuration::ephemeral().into()).await; @@ -188,9 +174,7 @@ mod for_all_config_modes { #[tokio::test] async fn should_fail_when_a_mandatory_field_is_missing() { - INIT.call_once(|| { - tracing_stderr_init(LevelFilter::ERROR); - }); + logging::setup(); let env = Started::new(&configuration::ephemeral().into()).await; @@ -229,9 +213,7 @@ mod for_all_config_modes { #[tokio::test] async fn should_fail_when_the_info_hash_param_is_invalid() { - INIT.call_once(|| { - tracing_stderr_init(LevelFilter::ERROR); - }); + logging::setup(); let env = Started::new(&configuration::ephemeral().into()).await; @@ -250,9 +232,7 @@ mod for_all_config_modes { #[tokio::test] async fn should_not_fail_when_the_peer_address_param_is_invalid() { - INIT.call_once(|| { - tracing_stderr_init(LevelFilter::ERROR); - }); + logging::setup(); // AnnounceQuery does not even contain the `peer_addr` // The peer IP is obtained in two ways: @@ -274,9 +254,7 @@ mod for_all_config_modes { #[tokio::test] async fn should_fail_when_the_downloaded_param_is_invalid() { - INIT.call_once(|| { - tracing_stderr_init(LevelFilter::ERROR); - }); + logging::setup(); let env = Started::new(&configuration::ephemeral().into()).await; @@ -297,9 +275,7 @@ mod for_all_config_modes { #[tokio::test] async fn should_fail_when_the_uploaded_param_is_invalid() { - INIT.call_once(|| { - tracing_stderr_init(LevelFilter::ERROR); - }); + logging::setup(); let env = Started::new(&configuration::ephemeral().into()).await; @@ -320,9 +296,7 @@ mod for_all_config_modes { #[tokio::test] async fn should_fail_when_the_peer_id_param_is_invalid() { - INIT.call_once(|| { - tracing_stderr_init(LevelFilter::ERROR); - }); + logging::setup(); let env = Started::new(&configuration::ephemeral().into()).await; @@ -350,9 +324,7 @@ mod for_all_config_modes { #[tokio::test] async fn should_fail_when_the_port_param_is_invalid() { - INIT.call_once(|| { - tracing_stderr_init(LevelFilter::ERROR); - }); + logging::setup(); let env = Started::new(&configuration::ephemeral().into()).await; @@ -373,9 +345,7 @@ mod for_all_config_modes { #[tokio::test] async fn should_fail_when_the_left_param_is_invalid() { - INIT.call_once(|| { - tracing_stderr_init(LevelFilter::ERROR); - }); + logging::setup(); let env = Started::new(&configuration::ephemeral().into()).await; @@ -396,9 +366,7 @@ mod for_all_config_modes { #[tokio::test] async fn should_fail_when_the_event_param_is_invalid() { - INIT.call_once(|| { - tracing_stderr_init(LevelFilter::ERROR); - }); + logging::setup(); let env = Started::new(&configuration::ephemeral().into()).await; @@ -427,9 +395,7 @@ mod for_all_config_modes { #[tokio::test] async fn should_fail_when_the_compact_param_is_invalid() { - INIT.call_once(|| { - tracing_stderr_init(LevelFilter::ERROR); - }); + logging::setup(); let env = Started::new(&configuration::ephemeral().into()).await; @@ -450,9 +416,7 @@ mod for_all_config_modes { #[tokio::test] async fn should_fail_when_the_numwant_param_is_invalid() { - INIT.call_once(|| { - tracing_stderr_init(LevelFilter::ERROR); - }); + logging::setup(); let env = Started::new(&configuration::ephemeral().into()).await; @@ -473,9 +437,7 @@ mod for_all_config_modes { #[tokio::test] async fn should_return_no_peers_if_the_announced_peer_is_the_first_one() { - INIT.call_once(|| { - tracing_stderr_init(LevelFilter::ERROR); - }); + logging::setup(); let env = Started::new(&configuration::ephemeral_public().into()).await; @@ -506,9 +468,7 @@ mod for_all_config_modes { #[tokio::test] async fn should_return_the_list_of_previously_announced_peers() { - INIT.call_once(|| { - tracing_stderr_init(LevelFilter::ERROR); - }); + logging::setup(); let env = Started::new(&configuration::ephemeral_public().into()).await; @@ -550,9 +510,7 @@ mod for_all_config_modes { #[tokio::test] async fn should_return_the_list_of_previously_announced_peers_including_peers_using_ipv4_and_ipv6() { - INIT.call_once(|| { - tracing_stderr_init(LevelFilter::ERROR); - }); + logging::setup(); let env = Started::new(&configuration::ephemeral_public().into()).await; @@ -606,9 +564,7 @@ mod for_all_config_modes { #[tokio::test] async fn should_consider_two_peers_to_be_the_same_when_they_have_the_same_peer_id_even_if_the_ip_is_different() { - INIT.call_once(|| { - tracing_stderr_init(LevelFilter::ERROR); - }); + logging::setup(); let env = Started::new(&configuration::ephemeral_public().into()).await; @@ -634,9 +590,7 @@ mod for_all_config_modes { #[tokio::test] async fn should_return_the_compact_response() { - INIT.call_once(|| { - tracing_stderr_init(LevelFilter::ERROR); - }); + logging::setup(); // Tracker Returns Compact Peer Lists // https://www.bittorrent.org/beps/bep_0023.html @@ -677,9 +631,7 @@ mod for_all_config_modes { #[tokio::test] async fn should_not_return_the_compact_response_by_default() { - INIT.call_once(|| { - tracing_stderr_init(LevelFilter::ERROR); - }); + logging::setup(); // code-review: the HTTP tracker does not return the compact response by default if the "compact" // param is not provided in the announce URL. The BEP 23 suggest to do so. @@ -720,9 +672,7 @@ mod for_all_config_modes { #[tokio::test] async fn should_increase_the_number_of_tcp4_connections_handled_in_statistics() { - INIT.call_once(|| { - tracing_stderr_init(LevelFilter::ERROR); - }); + logging::setup(); let env = Started::new(&configuration::ephemeral_public().into()).await; @@ -741,9 +691,7 @@ mod for_all_config_modes { #[tokio::test] async fn should_increase_the_number_of_tcp6_connections_handled_in_statistics() { - INIT.call_once(|| { - tracing_stderr_init(LevelFilter::ERROR); - }); + logging::setup(); if TcpListener::bind(SocketAddrV6::new(Ipv6Addr::LOCALHOST, 0, 0, 0)) .await @@ -769,9 +717,7 @@ mod for_all_config_modes { #[tokio::test] async fn should_not_increase_the_number_of_tcp6_connections_handled_if_the_client_is_not_using_an_ipv6_ip() { - INIT.call_once(|| { - tracing_stderr_init(LevelFilter::ERROR); - }); + logging::setup(); // The tracker ignores the peer address in the request param. It uses the client remote ip address. @@ -796,9 +742,7 @@ mod for_all_config_modes { #[tokio::test] async fn should_increase_the_number_of_tcp4_announce_requests_handled_in_statistics() { - INIT.call_once(|| { - tracing_stderr_init(LevelFilter::ERROR); - }); + logging::setup(); let env = Started::new(&configuration::ephemeral_public().into()).await; @@ -817,9 +761,7 @@ mod for_all_config_modes { #[tokio::test] async fn should_increase_the_number_of_tcp6_announce_requests_handled_in_statistics() { - INIT.call_once(|| { - tracing_stderr_init(LevelFilter::ERROR); - }); + logging::setup(); if TcpListener::bind(SocketAddrV6::new(Ipv6Addr::LOCALHOST, 0, 0, 0)) .await @@ -845,9 +787,7 @@ mod for_all_config_modes { #[tokio::test] async fn should_not_increase_the_number_of_tcp6_announce_requests_handled_if_the_client_is_not_using_an_ipv6_ip() { - INIT.call_once(|| { - tracing_stderr_init(LevelFilter::ERROR); - }); + logging::setup(); // The tracker ignores the peer address in the request param. It uses the client remote ip address. @@ -872,9 +812,7 @@ mod for_all_config_modes { #[tokio::test] async fn should_assign_to_the_peer_ip_the_remote_client_ip_instead_of_the_peer_address_in_the_request_param() { - INIT.call_once(|| { - tracing_stderr_init(LevelFilter::ERROR); - }); + logging::setup(); let env = Started::new(&configuration::ephemeral_public().into()).await; @@ -905,9 +843,7 @@ mod for_all_config_modes { #[tokio::test] async fn when_the_client_ip_is_a_loopback_ipv4_it_should_assign_to_the_peer_ip_the_external_ip_in_the_tracker_configuration( ) { - INIT.call_once(|| { - tracing_stderr_init(LevelFilter::ERROR); - }); + logging::setup(); /* We assume that both the client and tracker share the same public IP. @@ -945,9 +881,7 @@ mod for_all_config_modes { #[tokio::test] async fn when_the_client_ip_is_a_loopback_ipv6_it_should_assign_to_the_peer_ip_the_external_ip_in_the_tracker_configuration( ) { - INIT.call_once(|| { - tracing_stderr_init(LevelFilter::ERROR); - }); + logging::setup(); /* We assume that both the client and tracker share the same public IP. @@ -989,9 +923,7 @@ mod for_all_config_modes { #[tokio::test] async fn when_the_tracker_is_behind_a_reverse_proxy_it_should_assign_to_the_peer_ip_the_ip_in_the_x_forwarded_for_http_header( ) { - INIT.call_once(|| { - tracing_stderr_init(LevelFilter::ERROR); - }); + logging::setup(); /* client <-> http proxy <-> tracker <-> Internet @@ -1046,10 +978,9 @@ mod for_all_config_modes { use tokio::net::TcpListener; use torrust_tracker_primitives::peer::fixture::PeerBuilder; use torrust_tracker_test_helpers::configuration; - use tracing::level_filters::LevelFilter; use crate::common::fixtures::invalid_info_hashes; - use crate::common::logging::{tracing_stderr_init, INIT}; + use crate::common::logging; use crate::servers::http::asserts::{ assert_cannot_parse_query_params_error_response, assert_missing_query_params_for_scrape_request_error_response, assert_scrape_response, @@ -1062,9 +993,7 @@ mod for_all_config_modes { #[tokio::test] #[allow(dead_code)] async fn should_fail_when_the_request_is_empty() { - INIT.call_once(|| { - tracing_stderr_init(LevelFilter::ERROR); - }); + logging::setup(); let env = Started::new(&configuration::ephemeral_public().into()).await; let response = Client::new(*env.bind_address()).get("scrape").await; @@ -1076,9 +1005,7 @@ mod for_all_config_modes { #[tokio::test] async fn should_fail_when_the_info_hash_param_is_invalid() { - INIT.call_once(|| { - tracing_stderr_init(LevelFilter::ERROR); - }); + logging::setup(); let env = Started::new(&configuration::ephemeral_public().into()).await; @@ -1097,9 +1024,7 @@ mod for_all_config_modes { #[tokio::test] async fn should_return_the_file_with_the_incomplete_peer_when_there_is_one_peer_with_bytes_pending_to_download() { - INIT.call_once(|| { - tracing_stderr_init(LevelFilter::ERROR); - }); + logging::setup(); let env = Started::new(&configuration::ephemeral_public().into()).await; @@ -1139,9 +1064,7 @@ mod for_all_config_modes { #[tokio::test] async fn should_return_the_file_with_the_complete_peer_when_there_is_one_peer_with_no_bytes_pending_to_download() { - INIT.call_once(|| { - tracing_stderr_init(LevelFilter::ERROR); - }); + logging::setup(); let env = Started::new(&configuration::ephemeral_public().into()).await; @@ -1181,9 +1104,7 @@ mod for_all_config_modes { #[tokio::test] async fn should_return_a_file_with_zeroed_values_when_there_are_no_peers() { - INIT.call_once(|| { - tracing_stderr_init(LevelFilter::ERROR); - }); + logging::setup(); let env = Started::new(&configuration::ephemeral_public().into()).await; @@ -1204,9 +1125,7 @@ mod for_all_config_modes { #[tokio::test] async fn should_accept_multiple_infohashes() { - INIT.call_once(|| { - tracing_stderr_init(LevelFilter::ERROR); - }); + logging::setup(); let env = Started::new(&configuration::ephemeral_public().into()).await; @@ -1234,9 +1153,7 @@ mod for_all_config_modes { #[tokio::test] async fn should_increase_the_number_ot_tcp4_scrape_requests_handled_in_statistics() { - INIT.call_once(|| { - tracing_stderr_init(LevelFilter::ERROR); - }); + logging::setup(); let env = Started::new(&configuration::ephemeral_public().into()).await; @@ -1261,9 +1178,7 @@ mod for_all_config_modes { #[tokio::test] async fn should_increase_the_number_ot_tcp6_scrape_requests_handled_in_statistics() { - INIT.call_once(|| { - tracing_stderr_init(LevelFilter::ERROR); - }); + logging::setup(); if TcpListener::bind(SocketAddrV6::new(Ipv6Addr::LOCALHOST, 0, 0, 0)) .await @@ -1302,9 +1217,8 @@ mod configured_as_whitelisted { use bittorrent_primitives::info_hash::InfoHash; use torrust_tracker_test_helpers::configuration; - use tracing::level_filters::LevelFilter; - use crate::common::logging::{tracing_stderr_init, INIT}; + use crate::common::logging::{self}; use crate::servers::http::asserts::{assert_is_announce_response, assert_torrent_not_in_whitelist_error_response}; use crate::servers::http::client::Client; use crate::servers::http::requests::announce::QueryBuilder; @@ -1312,9 +1226,7 @@ mod configured_as_whitelisted { #[tokio::test] async fn should_fail_if_the_torrent_is_not_in_the_whitelist() { - INIT.call_once(|| { - tracing_stderr_init(LevelFilter::ERROR); - }); + logging::setup(); let env = Started::new(&configuration::ephemeral_listed().into()).await; @@ -1331,9 +1243,7 @@ mod configured_as_whitelisted { #[tokio::test] async fn should_allow_announcing_a_whitelisted_torrent() { - INIT.call_once(|| { - tracing_stderr_init(LevelFilter::ERROR); - }); + logging::setup(); let env = Started::new(&configuration::ephemeral_listed().into()).await; @@ -1361,9 +1271,8 @@ mod configured_as_whitelisted { use bittorrent_primitives::info_hash::InfoHash; use torrust_tracker_primitives::peer::fixture::PeerBuilder; use torrust_tracker_test_helpers::configuration; - use tracing::level_filters::LevelFilter; - use crate::common::logging::{tracing_stderr_init, INIT}; + use crate::common::logging::{self}; use crate::servers::http::asserts::assert_scrape_response; use crate::servers::http::client::Client; use crate::servers::http::responses::scrape::{File, ResponseBuilder}; @@ -1371,9 +1280,7 @@ mod configured_as_whitelisted { #[tokio::test] async fn should_return_the_zeroed_file_when_the_requested_file_is_not_whitelisted() { - INIT.call_once(|| { - tracing_stderr_init(LevelFilter::ERROR); - }); + logging::setup(); let env = Started::new(&configuration::ephemeral_listed().into()).await; @@ -1404,9 +1311,7 @@ mod configured_as_whitelisted { #[tokio::test] async fn should_return_the_file_stats_when_the_requested_file_is_whitelisted() { - INIT.call_once(|| { - tracing_stderr_init(LevelFilter::ERROR); - }); + logging::setup(); let env = Started::new(&configuration::ephemeral_listed().into()).await; @@ -1460,9 +1365,8 @@ mod configured_as_private { use bittorrent_primitives::info_hash::InfoHash; use torrust_tracker::core::auth::Key; use torrust_tracker_test_helpers::configuration; - use tracing::level_filters::LevelFilter; - use crate::common::logging::{tracing_stderr_init, INIT}; + use crate::common::logging; use crate::servers::http::asserts::{assert_authentication_error_response, assert_is_announce_response}; use crate::servers::http::client::Client; use crate::servers::http::requests::announce::QueryBuilder; @@ -1470,9 +1374,7 @@ mod configured_as_private { #[tokio::test] async fn should_respond_to_authenticated_peers() { - INIT.call_once(|| { - tracing_stderr_init(LevelFilter::ERROR); - }); + logging::setup(); let env = Started::new(&configuration::ephemeral_private().into()).await; @@ -1489,9 +1391,7 @@ mod configured_as_private { #[tokio::test] async fn should_fail_if_the_peer_has_not_provided_the_authentication_key() { - INIT.call_once(|| { - tracing_stderr_init(LevelFilter::ERROR); - }); + logging::setup(); let env = Started::new(&configuration::ephemeral_private().into()).await; @@ -1508,9 +1408,7 @@ mod configured_as_private { #[tokio::test] async fn should_fail_if_the_key_query_param_cannot_be_parsed() { - INIT.call_once(|| { - tracing_stderr_init(LevelFilter::ERROR); - }); + logging::setup(); let env = Started::new(&configuration::ephemeral_private().into()).await; @@ -1527,9 +1425,7 @@ mod configured_as_private { #[tokio::test] async fn should_fail_if_the_peer_cannot_be_authenticated_with_the_provided_key() { - INIT.call_once(|| { - tracing_stderr_init(LevelFilter::ERROR); - }); + logging::setup(); let env = Started::new(&configuration::ephemeral_private().into()).await; @@ -1556,9 +1452,8 @@ mod configured_as_private { use torrust_tracker::core::auth::Key; use torrust_tracker_primitives::peer::fixture::PeerBuilder; use torrust_tracker_test_helpers::configuration; - use tracing::level_filters::LevelFilter; - use crate::common::logging::{tracing_stderr_init, INIT}; + use crate::common::logging; use crate::servers::http::asserts::{assert_authentication_error_response, assert_scrape_response}; use crate::servers::http::client::Client; use crate::servers::http::responses::scrape::{File, ResponseBuilder}; @@ -1566,9 +1461,7 @@ mod configured_as_private { #[tokio::test] async fn should_fail_if_the_key_query_param_cannot_be_parsed() { - INIT.call_once(|| { - tracing_stderr_init(LevelFilter::ERROR); - }); + logging::setup(); let env = Started::new(&configuration::ephemeral_private().into()).await; @@ -1585,9 +1478,7 @@ mod configured_as_private { #[tokio::test] async fn should_return_the_zeroed_file_when_the_client_is_not_authenticated() { - INIT.call_once(|| { - tracing_stderr_init(LevelFilter::ERROR); - }); + logging::setup(); let env = Started::new(&configuration::ephemeral_private().into()).await; @@ -1618,9 +1509,7 @@ mod configured_as_private { #[tokio::test] async fn should_return_the_real_file_stats_when_the_client_is_authenticated() { - INIT.call_once(|| { - tracing_stderr_init(LevelFilter::ERROR); - }); + logging::setup(); let env = Started::new(&configuration::ephemeral_private().into()).await; @@ -1662,9 +1551,7 @@ mod configured_as_private { #[tokio::test] async fn should_return_the_zeroed_file_when_the_authentication_key_provided_by_the_client_is_invalid() { - INIT.call_once(|| { - tracing_stderr_init(LevelFilter::ERROR); - }); + logging::setup(); // There is not authentication error // code-review: should this really be this way? diff --git a/tests/servers/udp/contract.rs b/tests/servers/udp/contract.rs index 9e9085e62..86bb1d18c 100644 --- a/tests/servers/udp/contract.rs +++ b/tests/servers/udp/contract.rs @@ -10,9 +10,8 @@ use bittorrent_tracker_client::udp::client::UdpTrackerClient; use torrust_tracker::shared::bit_torrent::tracker::udp::MAX_PACKET_SIZE; use torrust_tracker_configuration::DEFAULT_TIMEOUT; use torrust_tracker_test_helpers::configuration; -use tracing::level_filters::LevelFilter; -use crate::common::logging::{tracing_stderr_init, INIT}; +use crate::common::logging; use crate::servers::udp::asserts::get_error_response_message; use crate::servers::udp::Started; @@ -41,9 +40,7 @@ async fn send_connection_request(transaction_id: TransactionId, client: &UdpTrac #[tokio::test] async fn should_return_a_bad_request_response_when_the_client_sends_an_empty_request() { - INIT.call_once(|| { - tracing_stderr_init(LevelFilter::ERROR); - }); + logging::setup(); let env = Started::new(&configuration::ephemeral().into()).await; @@ -74,17 +71,14 @@ mod receiving_a_connection_request { use bittorrent_tracker_client::udp::client::UdpTrackerClient; use torrust_tracker_configuration::DEFAULT_TIMEOUT; use torrust_tracker_test_helpers::configuration; - use tracing::level_filters::LevelFilter; - use crate::common::logging::{tracing_stderr_init, INIT}; + use crate::common::logging; use crate::servers::udp::asserts::is_connect_response; use crate::servers::udp::Started; #[tokio::test] async fn should_return_a_connect_response() { - INIT.call_once(|| { - tracing_stderr_init(LevelFilter::ERROR); - }); + logging::setup(); let env = Started::new(&configuration::ephemeral().into()).await; @@ -123,9 +117,8 @@ mod receiving_an_announce_request { use bittorrent_tracker_client::udp::client::UdpTrackerClient; use torrust_tracker_configuration::DEFAULT_TIMEOUT; use torrust_tracker_test_helpers::configuration; - use tracing::level_filters::LevelFilter; - use crate::common::logging::{tracing_stderr_init, INIT}; + use crate::common::logging; use crate::servers::udp::asserts::is_ipv4_announce_response; use crate::servers::udp::contract::send_connection_request; use crate::servers::udp::Started; @@ -173,9 +166,7 @@ mod receiving_an_announce_request { #[tokio::test] async fn should_return_an_announce_response() { - INIT.call_once(|| { - tracing_stderr_init(LevelFilter::ERROR); - }); + logging::setup(); let env = Started::new(&configuration::ephemeral().into()).await; @@ -195,9 +186,7 @@ mod receiving_an_announce_request { #[tokio::test] async fn should_return_many_announce_response() { - INIT.call_once(|| { - tracing_stderr_init(LevelFilter::ERROR); - }); + logging::setup(); let env = Started::new(&configuration::ephemeral().into()).await; @@ -220,9 +209,7 @@ mod receiving_an_announce_request { #[tokio::test] async fn should_ban_the_client_ip_if_it_sends_more_than_10_requests_with_a_cookie_value_not_normal() { - INIT.call_once(|| { - tracing_stderr_init(LevelFilter::ERROR); - }); + logging::setup(); let env = Started::new(&configuration::ephemeral().into()).await; @@ -266,18 +253,15 @@ mod receiving_an_scrape_request { use bittorrent_tracker_client::udp::client::UdpTrackerClient; use torrust_tracker_configuration::DEFAULT_TIMEOUT; use torrust_tracker_test_helpers::configuration; - use tracing::level_filters::LevelFilter; - use crate::common::logging::{tracing_stderr_init, INIT}; + use crate::common::logging; use crate::servers::udp::asserts::is_scrape_response; use crate::servers::udp::contract::send_connection_request; use crate::servers::udp::Started; #[tokio::test] async fn should_return_a_scrape_response() { - INIT.call_once(|| { - tracing_stderr_init(LevelFilter::ERROR); - }); + logging::setup(); let env = Started::new(&configuration::ephemeral().into()).await; diff --git a/tests/servers/udp/environment.rs b/tests/servers/udp/environment.rs index f96ba2bea..acfb199f2 100644 --- a/tests/servers/udp/environment.rs +++ b/tests/servers/udp/environment.rs @@ -101,16 +101,13 @@ mod tests { use tokio::time::sleep; use torrust_tracker_test_helpers::configuration; - use tracing::level_filters::LevelFilter; - use crate::common::logging::{tracing_stderr_init, INIT}; + use crate::common::logging; use crate::servers::udp::Started; #[tokio::test] async fn it_should_make_and_stop_udp_server() { - INIT.call_once(|| { - tracing_stderr_init(LevelFilter::ERROR); - }); + logging::setup(); let env = Started::new(&configuration::ephemeral().into()).await; sleep(Duration::from_secs(1)).await; From 9ac676c714ccf94feb6ece5a8e7cfc23d0650104 Mon Sep 17 00:00:00 2001 From: Jose Celano Date: Mon, 23 Dec 2024 16:08:12 +0000 Subject: [PATCH 055/802] feat: [#1146] override tower-http tracing error From: ``` 2024-12-23T15:54:25.842837Z ERROR tower_http::trace::on_failure: response failed classification=Status code: 500 Internal Server Error latency=0 ms ``` To: ``` 2024-12-23T16:06:53.553023Z ERROR API: response failed classification=Status code: 500 Internal Server Error latency=0 ms ``` The target has been changed: ``` 2024-12-23T15:54:25.842837Z ERROR tower_http::trace::on_failure: response failed classification=Status code: 500 Internal Server Error latency=0 ms 2024-12-23T16:06:53.553023Z ERROR API: response failed classification=Status code: 500 Internal Server Error latency=0 ms ``` It was changed to: - Easily identify the origin of the error in our code. - Allow to insert more fields in the future, for example, to write assertions about logs. --- src/servers/apis/routes.rs | 12 ++++++++++- src/servers/health_check_api/server.rs | 12 ++++++++++- src/servers/http/v1/routes.rs | 12 ++++++++++- src/servers/logging.rs | 29 ++++++++++++++++++++++++++ 4 files changed, 62 insertions(+), 3 deletions(-) diff --git a/src/servers/apis/routes.rs b/src/servers/apis/routes.rs index 327cab0c5..2ae422607 100644 --- a/src/servers/apis/routes.rs +++ b/src/servers/apis/routes.rs @@ -17,10 +17,12 @@ use hyper::{Request, StatusCode}; use torrust_tracker_configuration::{AccessTokens, DEFAULT_TIMEOUT}; use tower::timeout::TimeoutLayer; use tower::ServiceBuilder; +use tower_http::classify::ServerErrorsFailureClass; use tower_http::compression::CompressionLayer; use tower_http::propagate_header::PropagateHeaderLayer; use tower_http::request_id::{MakeRequestUuid, SetRequestIdLayer}; use tower_http::trace::{DefaultMakeSpan, TraceLayer}; +use tower_http::LatencyUnit; use tracing::{instrument, Level, Span}; use super::v1; @@ -28,6 +30,7 @@ use super::v1::context::health_check::handlers::health_check_handler; use super::v1::middlewares::auth::State; use crate::core::Tracker; use crate::servers::apis::API_LOG_TARGET; +use crate::servers::logging::Latency; /// Add all API routes to the router. #[allow(clippy::needless_pass_by_value)] @@ -75,7 +78,14 @@ pub fn router(tracker: Arc, access_tokens: Arc) -> Router tracing::span!( target: API_LOG_TARGET, tracing::Level::INFO, "response", latency = %latency_ms, status = %status_code, request_id = %request_id); - }), + }) + .on_failure(|failure_classification: ServerErrorsFailureClass, latency: Duration, _span: &Span| { + let latency = Latency::new( + LatencyUnit::Millis, + latency, + ); + tracing::error!(target: API_LOG_TARGET, "response failed classification={failure_classification} latency={latency}"); + }) ) .layer(SetRequestIdLayer::x_request_id(MakeRequestUuid)) .layer( diff --git a/src/servers/health_check_api/server.rs b/src/servers/health_check_api/server.rs index df4b1cf69..f8ca65b82 100644 --- a/src/servers/health_check_api/server.rs +++ b/src/servers/health_check_api/server.rs @@ -14,15 +14,18 @@ use futures::Future; use hyper::Request; use serde_json::json; use tokio::sync::oneshot::{Receiver, Sender}; +use tower_http::classify::ServerErrorsFailureClass; use tower_http::compression::CompressionLayer; use tower_http::propagate_header::PropagateHeaderLayer; use tower_http::request_id::{MakeRequestUuid, SetRequestIdLayer}; use tower_http::trace::{DefaultMakeSpan, TraceLayer}; +use tower_http::LatencyUnit; use tracing::{instrument, Level, Span}; use crate::bootstrap::jobs::Started; use crate::servers::health_check_api::handlers::health_check_handler; use crate::servers::health_check_api::HEALTH_CHECK_API_LOG_TARGET; +use crate::servers::logging::Latency; use crate::servers::registar::ServiceRegistry; use crate::servers::signals::{graceful_shutdown, Halted}; @@ -73,7 +76,14 @@ pub fn start( tracing::span!( target: HEALTH_CHECK_API_LOG_TARGET, tracing::Level::INFO, "response", latency = %latency_ms, status = %status_code, request_id = %request_id); - }), + }) + .on_failure(|failure_classification: ServerErrorsFailureClass, latency: Duration, _span: &Span| { + let latency = Latency::new( + LatencyUnit::Millis, + latency, + ); + tracing::error!(target: HEALTH_CHECK_API_LOG_TARGET, "response failed classification={failure_classification} latency={latency}"); + }) ) .layer(SetRequestIdLayer::x_request_id(MakeRequestUuid)); diff --git a/src/servers/http/v1/routes.rs b/src/servers/http/v1/routes.rs index 16e39b61b..6eacb1e5c 100644 --- a/src/servers/http/v1/routes.rs +++ b/src/servers/http/v1/routes.rs @@ -13,15 +13,18 @@ use hyper::{Request, StatusCode}; use torrust_tracker_configuration::DEFAULT_TIMEOUT; use tower::timeout::TimeoutLayer; use tower::ServiceBuilder; +use tower_http::classify::ServerErrorsFailureClass; use tower_http::compression::CompressionLayer; use tower_http::propagate_header::PropagateHeaderLayer; use tower_http::request_id::{MakeRequestUuid, SetRequestIdLayer}; use tower_http::trace::{DefaultMakeSpan, TraceLayer}; +use tower_http::LatencyUnit; use tracing::{instrument, Level, Span}; use super::handlers::{announce, health_check, scrape}; use crate::core::Tracker; use crate::servers::http::HTTP_TRACKER_LOG_TARGET; +use crate::servers::logging::Latency; /// It adds the routes to the router. /// @@ -72,7 +75,14 @@ pub fn router(tracker: Arc, server_socket_addr: SocketAddr) -> Router { tracing::span!( target: HTTP_TRACKER_LOG_TARGET, tracing::Level::INFO, "response", server_socket_addr= %server_socket_addr, latency = %latency_ms, status = %status_code, request_id = %request_id); - }), + }) + .on_failure(|failure_classification: ServerErrorsFailureClass, latency: Duration, _span: &Span| { + let latency = Latency::new( + LatencyUnit::Millis, + latency, + ); + tracing::error!(target: HTTP_TRACKER_LOG_TARGET, "response failed classification={failure_classification} latency={latency}"); + }) ) .layer(SetRequestIdLayer::x_request_id(MakeRequestUuid)) .layer( diff --git a/src/servers/logging.rs b/src/servers/logging.rs index ad9ccbbcc..c503cfd35 100644 --- a/src/servers/logging.rs +++ b/src/servers/logging.rs @@ -1,3 +1,8 @@ +use std::fmt; +use std::time::Duration; + +use tower_http::LatencyUnit; + /// This is the prefix used in logs to identify a started service. /// /// For example: @@ -27,3 +32,27 @@ We should use something like: ``` */ + +pub struct Latency { + unit: LatencyUnit, + duration: Duration, +} + +impl Latency { + #[must_use] + pub fn new(unit: LatencyUnit, duration: Duration) -> Self { + Self { unit, duration } + } +} + +impl fmt::Display for Latency { + fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result { + match self.unit { + LatencyUnit::Seconds => write!(f, "{} s", self.duration.as_secs_f64()), + LatencyUnit::Millis => write!(f, "{} ms", self.duration.as_millis()), + LatencyUnit::Micros => write!(f, "{} μs", self.duration.as_micros()), + LatencyUnit::Nanos => write!(f, "{} ns", self.duration.as_nanos()), + _ => panic!("Invalid latency unit"), + } + } +} From 97233f57aa9595e05bc6d44ad2dd41ae1087e3de Mon Sep 17 00:00:00 2001 From: Jose Celano Date: Mon, 23 Dec 2024 17:55:04 +0000 Subject: [PATCH 056/802] fix: missing logs for HTTP requests Some tracing log spans were missing. In adition to that we were using new spans to log requests and responses instead of using the span provided by the TraceLayer. That enables to join all the log events related tho the same requests. This fix is needed to contunie with this issue: https://github.com/torrust/torrust-tracker/issues/1150 Becuase it allos to use a "request-id" header to identify logs. We can write log assertions by mathich lines with the request-id used in the request. For example, it yo make this request: ```console curl -H "x-request-id: YOUR_REQUEST_ID" http://0.0.0.0:1212/api/v1/stats?token=InvalidToken ``` This is the new output (which was missing before this change): ```output 2024-12-23T17:53:06.530704Z INFO request{method=GET uri=/api/v1/stats?token=InvalidToken version=HTTP/1.1}: API: request method=GET uri=/api/v1/stats?token=InvalidToken request_id=YOUR_REQUEST_ID 2024-12-23T17:53:06.530777Z ERROR request{method=GET uri=/api/v1/stats?token=InvalidToken version=HTTP/1.1}: API: response latency_ms=0 status_code=500 Internal Server Error request_id=YOUR_REQUEST_ID 2024-12-23T17:53:06.530785Z ERROR request{method=GET uri=/api/v1/stats?token=InvalidToken version=HTTP/1.1}: API: response failed failure_classification=Status code: 500 Internal Server Error latency=0 ms ``` As you can see. now we have the "request_id=YOUR_REQUEST_ID" field which can be used to identify the test that made the request. --- src/servers/apis/routes.rs | 42 +++++++++++++++--------- src/servers/health_check_api/server.rs | 42 +++++++++++++++--------- src/servers/http/v1/routes.rs | 44 ++++++++++++++++---------- 3 files changed, 82 insertions(+), 46 deletions(-) diff --git a/src/servers/apis/routes.rs b/src/servers/apis/routes.rs index 2ae422607..c021cb215 100644 --- a/src/servers/apis/routes.rs +++ b/src/servers/apis/routes.rs @@ -53,7 +53,7 @@ pub fn router(tracker: Arc, access_tokens: Arc) -> Router .layer( TraceLayer::new_for_http() .make_span_with(DefaultMakeSpan::new().level(Level::INFO)) - .on_request(|request: &Request, _span: &Span| { + .on_request(|request: &Request, span: &Span| { let method = request.method().to_string(); let uri = request.uri().to_string(); let request_id = request @@ -62,30 +62,42 @@ pub fn router(tracker: Arc, access_tokens: Arc) -> Router .map(|v| v.to_str().unwrap_or_default()) .unwrap_or_default(); - tracing::span!( + span.record("request_id", request_id); + + tracing::event!( target: API_LOG_TARGET, - tracing::Level::INFO, "request", method = %method, uri = %uri, request_id = %request_id); + tracing::Level::INFO, %method, %uri, %request_id, "request"); }) - .on_response(|response: &Response, latency: Duration, _span: &Span| { + .on_response(|response: &Response, latency: Duration, span: &Span| { + let latency_ms = latency.as_millis(); let status_code = response.status(); let request_id = response .headers() .get("x-request-id") .map(|v| v.to_str().unwrap_or_default()) .unwrap_or_default(); - let latency_ms = latency.as_millis(); - tracing::span!( - target: API_LOG_TARGET, - tracing::Level::INFO, "response", latency = %latency_ms, status = %status_code, request_id = %request_id); - }) - .on_failure(|failure_classification: ServerErrorsFailureClass, latency: Duration, _span: &Span| { - let latency = Latency::new( - LatencyUnit::Millis, - latency, - ); - tracing::error!(target: API_LOG_TARGET, "response failed classification={failure_classification} latency={latency}"); + span.record("request_id", request_id); + + if status_code.is_server_error() { + tracing::event!( + target: API_LOG_TARGET, + tracing::Level::ERROR, %latency_ms, %status_code, %request_id, "response"); + } else { + tracing::event!( + target: API_LOG_TARGET, + tracing::Level::INFO, %latency_ms, %status_code, %request_id, "response"); + } }) + .on_failure( + |failure_classification: ServerErrorsFailureClass, latency: Duration, _span: &Span| { + let latency = Latency::new(LatencyUnit::Millis, latency); + + tracing::event!( + target: API_LOG_TARGET, + tracing::Level::ERROR, %failure_classification, %latency, "response failed"); + }, + ), ) .layer(SetRequestIdLayer::x_request_id(MakeRequestUuid)) .layer( diff --git a/src/servers/health_check_api/server.rs b/src/servers/health_check_api/server.rs index f8ca65b82..42111f507 100644 --- a/src/servers/health_check_api/server.rs +++ b/src/servers/health_check_api/server.rs @@ -51,7 +51,7 @@ pub fn start( .layer( TraceLayer::new_for_http() .make_span_with(DefaultMakeSpan::new().level(Level::INFO)) - .on_request(|request: &Request, _span: &Span| { + .on_request(|request: &Request, span: &Span| { let method = request.method().to_string(); let uri = request.uri().to_string(); let request_id = request @@ -60,30 +60,42 @@ pub fn start( .map(|v| v.to_str().unwrap_or_default()) .unwrap_or_default(); - tracing::span!( + span.record("request_id", request_id); + + tracing::event!( target: HEALTH_CHECK_API_LOG_TARGET, - tracing::Level::INFO, "request", method = %method, uri = %uri, request_id = %request_id); + tracing::Level::INFO, %method, %uri, %request_id, "request"); }) - .on_response(|response: &Response, latency: Duration, _span: &Span| { + .on_response(|response: &Response, latency: Duration, span: &Span| { + let latency_ms = latency.as_millis(); let status_code = response.status(); let request_id = response .headers() .get("x-request-id") .map(|v| v.to_str().unwrap_or_default()) .unwrap_or_default(); - let latency_ms = latency.as_millis(); - tracing::span!( - target: HEALTH_CHECK_API_LOG_TARGET, - tracing::Level::INFO, "response", latency = %latency_ms, status = %status_code, request_id = %request_id); - }) - .on_failure(|failure_classification: ServerErrorsFailureClass, latency: Duration, _span: &Span| { - let latency = Latency::new( - LatencyUnit::Millis, - latency, - ); - tracing::error!(target: HEALTH_CHECK_API_LOG_TARGET, "response failed classification={failure_classification} latency={latency}"); + span.record("request_id", request_id); + + if status_code.is_server_error() { + tracing::event!( + target: HEALTH_CHECK_API_LOG_TARGET, + tracing::Level::ERROR, %latency_ms, %status_code, %request_id, "response"); + } else { + tracing::event!( + target: HEALTH_CHECK_API_LOG_TARGET, + tracing::Level::INFO, %latency_ms, %status_code, %request_id, "response"); + } }) + .on_failure( + |failure_classification: ServerErrorsFailureClass, latency: Duration, _span: &Span| { + let latency = Latency::new(LatencyUnit::Millis, latency); + + tracing::event!( + target: HEALTH_CHECK_API_LOG_TARGET, + tracing::Level::ERROR, %failure_classification, %latency, "response failed"); + }, + ), ) .layer(SetRequestIdLayer::x_request_id(MakeRequestUuid)); diff --git a/src/servers/http/v1/routes.rs b/src/servers/http/v1/routes.rs index 6eacb1e5c..a5d402693 100644 --- a/src/servers/http/v1/routes.rs +++ b/src/servers/http/v1/routes.rs @@ -50,7 +50,7 @@ pub fn router(tracker: Arc, server_socket_addr: SocketAddr) -> Router { .layer( TraceLayer::new_for_http() .make_span_with(DefaultMakeSpan::new().level(Level::INFO)) - .on_request(move |request: &Request, _span: &Span| { + .on_request(move |request: &Request, span: &Span| { let method = request.method().to_string(); let uri = request.uri().to_string(); let request_id = request @@ -59,33 +59,45 @@ pub fn router(tracker: Arc, server_socket_addr: SocketAddr) -> Router { .map(|v| v.to_str().unwrap_or_default()) .unwrap_or_default(); - tracing::span!( + span.record("request_id", request_id); + + tracing::event!( target: HTTP_TRACKER_LOG_TARGET, - tracing::Level::INFO, "request", server_socket_addr= %server_socket_addr, method = %method, uri = %uri, request_id = %request_id); + tracing::Level::INFO, %server_socket_addr, %method, %uri, %request_id, "request"); }) - .on_response(move |response: &Response, latency: Duration, _span: &Span| { + .on_response(move |response: &Response, latency: Duration, span: &Span| { + let latency_ms = latency.as_millis(); let status_code = response.status(); let request_id = response .headers() .get("x-request-id") .map(|v| v.to_str().unwrap_or_default()) .unwrap_or_default(); - let latency_ms = latency.as_millis(); - tracing::span!( - target: HTTP_TRACKER_LOG_TARGET, - tracing::Level::INFO, "response", server_socket_addr= %server_socket_addr, latency = %latency_ms, status = %status_code, request_id = %request_id); - }) - .on_failure(|failure_classification: ServerErrorsFailureClass, latency: Duration, _span: &Span| { - let latency = Latency::new( - LatencyUnit::Millis, - latency, - ); - tracing::error!(target: HTTP_TRACKER_LOG_TARGET, "response failed classification={failure_classification} latency={latency}"); + span.record("request_id", request_id); + + if status_code.is_server_error() { + tracing::event!( + target: HTTP_TRACKER_LOG_TARGET, + tracing::Level::ERROR, %server_socket_addr, %latency_ms, %status_code, %request_id, "response"); + } else { + tracing::event!( + target: HTTP_TRACKER_LOG_TARGET, + tracing::Level::INFO, %server_socket_addr, %latency_ms, %status_code, %request_id, "response"); + } }) + .on_failure( + |failure_classification: ServerErrorsFailureClass, latency: Duration, _span: &Span| { + let latency = Latency::new(LatencyUnit::Millis, latency); + + tracing::event!( + target: HTTP_TRACKER_LOG_TARGET, + tracing::Level::ERROR, %failure_classification, %latency, "response failed"); + }, + ), ) .layer(SetRequestIdLayer::x_request_id(MakeRequestUuid)) - .layer( + .layer( ServiceBuilder::new() // this middleware goes above `TimeoutLayer` because it will receive // errors returned by `TimeoutLayer` From 86b046068389d37ab5e53ea4b37c9b0c2fe1329d Mon Sep 17 00:00:00 2001 From: Jose Celano Date: Mon, 23 Dec 2024 19:37:48 +0000 Subject: [PATCH 057/802] chore(deps): update depencencies ```output cargo update Updating crates.io index Locking 19 packages to latest compatible versions Updating anyhow v1.0.94 -> v1.0.95 Updating bytemuck v1.20.0 -> v1.21.0 Updating cc v1.2.4 -> v1.2.5 Updating foldhash v0.1.3 -> v0.1.4 Updating hyper-rustls v0.27.3 -> v0.27.5 Updating libc v0.2.168 -> v0.2.169 Updating miniz_oxide v0.8.0 -> v0.8.2 Updating object v0.36.5 -> v0.36.7 Updating predicates v3.1.2 -> v3.1.3 Updating predicates-core v1.0.8 -> v1.0.9 Updating predicates-tree v1.0.11 -> v1.0.12 Updating security-framework-sys v2.12.1 -> v2.13.0 Updating serde_html_form v0.2.6 -> v0.2.7 Updating serde_json v1.0.133 -> v1.0.134 Updating syn v2.0.90 -> v2.0.91 Updating termtree v0.4.1 -> v0.5.1 Updating thiserror v2.0.7 -> v2.0.9 Updating thiserror-impl v2.0.7 -> v2.0.9 Updating tinyvec v1.8.0 -> v1.8.1 ``` --- Cargo.lock | 166 ++++++++++++++++++++++++++--------------------------- 1 file changed, 83 insertions(+), 83 deletions(-) diff --git a/Cargo.lock b/Cargo.lock index dacd04454..98875f48d 100644 --- a/Cargo.lock +++ b/Cargo.lock @@ -142,9 +142,9 @@ dependencies = [ [[package]] name = "anyhow" -version = "1.0.94" +version = "1.0.95" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "c1fd03a028ef38ba2276dce7e33fcd6369c158a1bca17946c4b1b701891c1ff7" +checksum = "34ac096ce696dc2fcabef30516bb13c0a68a11d30131d3df6f04711467681b04" [[package]] name = "aquatic_peer_id" @@ -333,7 +333,7 @@ checksum = "721cae7de5c34fbb2acd27e21e6d2cf7b886dce0c27388d46c4e6c47ea4318dd" dependencies = [ "proc-macro2", "quote", - "syn 2.0.90", + "syn 2.0.91", ] [[package]] @@ -456,7 +456,7 @@ checksum = "57d123550fa8d071b7255cb0cc04dc302baa6c8c4a79f55701552684d8399bce" dependencies = [ "proc-macro2", "quote", - "syn 2.0.90", + "syn 2.0.91", ] [[package]] @@ -544,7 +544,7 @@ dependencies = [ "regex", "rustc-hash", "shlex", - "syn 2.0.90", + "syn 2.0.91", ] [[package]] @@ -597,7 +597,7 @@ dependencies = [ "serde_bencode", "serde_bytes", "serde_repr", - "thiserror 2.0.7", + "thiserror 2.0.9", "tokio", "torrust-tracker-configuration", "torrust-tracker-located-error", @@ -679,7 +679,7 @@ dependencies = [ "proc-macro-crate", "proc-macro2", "quote", - "syn 2.0.90", + "syn 2.0.91", ] [[package]] @@ -748,9 +748,9 @@ dependencies = [ [[package]] name = "bytemuck" -version = "1.20.0" +version = "1.21.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "8b37c88a63ffd85d15b406896cc343916d7cf57838a847b3a6f2ca5d39a5695a" +checksum = "ef657dfab802224e671f5818e9a4935f9b1957ed18e58292690cc39e7a4092a3" [[package]] name = "byteorder" @@ -790,9 +790,9 @@ dependencies = [ [[package]] name = "cc" -version = "1.2.4" +version = "1.2.5" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "9157bbaa6b165880c27a4293a474c91cdcf265cc68cc829bf10be0964a391caf" +checksum = "c31a0499c1dc64f458ad13872de75c0eb7e3fdb0e67964610c914b034fc5956e" dependencies = [ "jobserver", "libc", @@ -912,7 +912,7 @@ dependencies = [ "heck", "proc-macro2", "quote", - "syn 2.0.90", + "syn 2.0.91", ] [[package]] @@ -1133,7 +1133,7 @@ dependencies = [ "proc-macro2", "quote", "strsim", - "syn 2.0.90", + "syn 2.0.91", ] [[package]] @@ -1144,7 +1144,7 @@ checksum = "d336a2a514f6ccccaa3e09b02d41d35330c07ddf03a62165fcec10bb561c7806" dependencies = [ "darling_core", "quote", - "syn 2.0.90", + "syn 2.0.91", ] [[package]] @@ -1188,7 +1188,7 @@ checksum = "cb7330aeadfbe296029522e6c40f315320aba36fc43a5b3632f3795348f3bd22" dependencies = [ "proc-macro2", "quote", - "syn 2.0.90", + "syn 2.0.91", "unicode-xid", ] @@ -1200,7 +1200,7 @@ checksum = "65f152f4b8559c4da5d574bafc7af85454d706b4c5fe8b530d508cacbb6807ea" dependencies = [ "proc-macro2", "quote", - "syn 2.0.90", + "syn 2.0.91", ] [[package]] @@ -1221,7 +1221,7 @@ checksum = "97369cbbc041bc366949bc74d34658d6cda5621039731c6310521892a3a20ae0" dependencies = [ "proc-macro2", "quote", - "syn 2.0.90", + "syn 2.0.91", ] [[package]] @@ -1351,9 +1351,9 @@ checksum = "3f9eec918d3f24069decb9af1554cad7c880e2da24a9afd88aca000531ab82c1" [[package]] name = "foldhash" -version = "0.1.3" +version = "0.1.4" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "f81ec6369c545a7d40e4589b5597581fa1c441fe1cce96dd1de43159910a36a2" +checksum = "a0d2fde1f7b3d48b8395d5f2de76c18a528bd6a9cdde438df747bfcba3e05d6f" [[package]] name = "foreign-types" @@ -1424,7 +1424,7 @@ checksum = "e99b8b3c28ae0e84b604c75f721c21dc77afb3706076af5e8216d15fd1deaae3" dependencies = [ "frunk_proc_macro_helpers", "quote", - "syn 2.0.90", + "syn 2.0.91", ] [[package]] @@ -1436,7 +1436,7 @@ dependencies = [ "frunk_core", "proc-macro2", "quote", - "syn 2.0.90", + "syn 2.0.91", ] [[package]] @@ -1448,7 +1448,7 @@ dependencies = [ "frunk_core", "frunk_proc_macro_helpers", "quote", - "syn 2.0.90", + "syn 2.0.91", ] [[package]] @@ -1526,7 +1526,7 @@ checksum = "162ee34ebcb7c64a8abebc059ce0fee27c2262618d7b60ed8faf72fef13c3650" dependencies = [ "proc-macro2", "quote", - "syn 2.0.90", + "syn 2.0.91", ] [[package]] @@ -1770,9 +1770,9 @@ dependencies = [ [[package]] name = "hyper-rustls" -version = "0.27.3" +version = "0.27.5" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "08afdbb5c31130e3034af566421053ab03787c640246a446327f550d11bcb333" +checksum = "2d191583f3da1305256f22463b9bb0471acad48a4e534a5218b9963e9c1f59b2" dependencies = [ "futures-util", "http", @@ -1958,7 +1958,7 @@ checksum = "1ec89e9337638ecdc08744df490b221a7399bf8d164eb52a665454e60e075ad6" dependencies = [ "proc-macro2", "quote", - "syn 2.0.90", + "syn 2.0.91", ] [[package]] @@ -2117,9 +2117,9 @@ checksum = "bbd2bcb4c963f2ddae06a2efc7e9f3591312473c50c6685e1f298068316e66fe" [[package]] name = "libc" -version = "0.2.168" +version = "0.2.169" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "5aaeb2981e0606ca11d79718f8bb01164f1d6ed75080182d3abf017e6d244b6d" +checksum = "b5aba8db14291edd000dfcc4d620c7ebfb122c613afb886ca8803fa4e128a20a" [[package]] name = "libloading" @@ -2237,9 +2237,9 @@ checksum = "68354c5c6bd36d73ff3feceb05efa59b6acb7626617f4962be322a825e61f79a" [[package]] name = "miniz_oxide" -version = "0.8.0" +version = "0.8.2" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "e2d80299ef12ff69b16a84bb182e3b9df68b5a91574d3d4fa6e41b65deec4df1" +checksum = "4ffbe83022cedc1d264172192511ae958937694cd57ce297164951b8b3568394" dependencies = [ "adler2", ] @@ -2278,7 +2278,7 @@ dependencies = [ "cfg-if", "proc-macro2", "quote", - "syn 2.0.90", + "syn 2.0.91", ] [[package]] @@ -2345,7 +2345,7 @@ dependencies = [ "proc-macro-error2", "proc-macro2", "quote", - "syn 2.0.90", + "syn 2.0.91", "termcolor", "thiserror 1.0.69", ] @@ -2502,9 +2502,9 @@ dependencies = [ [[package]] name = "object" -version = "0.36.5" +version = "0.36.7" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "aedf0a2d09c573ed1d8d85b30c119153926a2b36dce0ab28322c09a117a4683e" +checksum = "62948e14d923ea95ea2c7c86c71013138b66525b86bdc08d2dcc262bdb497b87" dependencies = [ "memchr", ] @@ -2544,7 +2544,7 @@ checksum = "a948666b637a0f465e8564c73e89d4dde00d72d4d473cc972f390fc3dcee7d9c" dependencies = [ "proc-macro2", "quote", - "syn 2.0.90", + "syn 2.0.91", ] [[package]] @@ -2620,7 +2620,7 @@ dependencies = [ "proc-macro2", "proc-macro2-diagnostics", "quote", - "syn 2.0.90", + "syn 2.0.91", ] [[package]] @@ -2694,7 +2694,7 @@ checksum = "3c0f5fad0874fc7abcd4d750e76917eaebbecaa2c20bde22e1dbeeba8beb758c" dependencies = [ "proc-macro2", "quote", - "syn 2.0.90", + "syn 2.0.91", ] [[package]] @@ -2792,9 +2792,9 @@ dependencies = [ [[package]] name = "predicates" -version = "3.1.2" +version = "3.1.3" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "7e9086cc7640c29a356d1a29fd134380bee9d8f79a17410aa76e7ad295f42c97" +checksum = "a5d19ee57562043d37e82899fade9a22ebab7be9cef5026b07fda9cdd4293573" dependencies = [ "anstyle", "predicates-core", @@ -2802,15 +2802,15 @@ dependencies = [ [[package]] name = "predicates-core" -version = "1.0.8" +version = "1.0.9" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "ae8177bee8e75d6846599c6b9ff679ed51e882816914eec639944d7c9aa11931" +checksum = "727e462b119fe9c93fd0eb1429a5f7647394014cf3c04ab2c0350eeb09095ffa" [[package]] name = "predicates-tree" -version = "1.0.11" +version = "1.0.12" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "41b740d195ed3166cd147c8047ec98db0e22ec019eb8eeb76d343b795304fb13" +checksum = "72dd2d6d381dfb73a193c7fca536518d7caee39fc8503f74e7dc0be0531b425c" dependencies = [ "predicates-core", "termtree", @@ -2844,7 +2844,7 @@ dependencies = [ "proc-macro-error-attr2", "proc-macro2", "quote", - "syn 2.0.90", + "syn 2.0.91", ] [[package]] @@ -2864,7 +2864,7 @@ checksum = "af066a9c399a26e020ada66a034357a868728e72cd426f3adcd35f80d88d88c8" dependencies = [ "proc-macro2", "quote", - "syn 2.0.90", + "syn 2.0.91", "version_check", "yansi", ] @@ -3173,7 +3173,7 @@ dependencies = [ "regex", "relative-path", "rustc_version", - "syn 2.0.90", + "syn 2.0.91", "unicode-ident", ] @@ -3352,9 +3352,9 @@ dependencies = [ [[package]] name = "security-framework-sys" -version = "2.12.1" +version = "2.13.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "fa39c7303dc58b5543c94d22c1766b0d31f2ee58306363ea622b10bbc075eaa2" +checksum = "1863fd3768cd83c56a7f60faa4dc0d403f1b6df0a38c3c25f44b7894e45370d5" dependencies = [ "core-foundation-sys", "libc", @@ -3402,14 +3402,14 @@ checksum = "46f859dbbf73865c6627ed570e78961cd3ac92407a2d117204c49232485da55e" dependencies = [ "proc-macro2", "quote", - "syn 2.0.90", + "syn 2.0.91", ] [[package]] name = "serde_html_form" -version = "0.2.6" +version = "0.2.7" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "8de514ef58196f1fc96dcaef80fe6170a1ce6215df9687a93fe8300e773fefc5" +checksum = "9d2de91cf02bbc07cde38891769ccd5d4f073d22a40683aa4bc7a95781aaa2c4" dependencies = [ "form_urlencoded", "indexmap 2.7.0", @@ -3420,9 +3420,9 @@ dependencies = [ [[package]] name = "serde_json" -version = "1.0.133" +version = "1.0.134" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "c7fceb2473b9166b2294ef05efcb65a3db80803f0b03ef86a5fc88a2b85ee377" +checksum = "d00f4175c42ee48b15416f6193a959ba3a0d67fc699a0db9ad12df9f83991c7d" dependencies = [ "indexmap 2.7.0", "itoa", @@ -3449,7 +3449,7 @@ checksum = "6c64451ba24fc7a6a2d60fc75dd9c83c90903b19028d4eff35e88fc1e86564e9" dependencies = [ "proc-macro2", "quote", - "syn 2.0.90", + "syn 2.0.91", ] [[package]] @@ -3500,7 +3500,7 @@ dependencies = [ "darling", "proc-macro2", "quote", - "syn 2.0.90", + "syn 2.0.91", ] [[package]] @@ -3639,9 +3639,9 @@ dependencies = [ [[package]] name = "syn" -version = "2.0.90" +version = "2.0.91" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "919d3b74a5dd0ccd15aeb8f93e7006bd9e14c295087c9896a110f490752bcf31" +checksum = "d53cbcb5a243bd33b7858b1d7f4aca2153490815872d86d955d6ea29f743c035" dependencies = [ "proc-macro2", "quote", @@ -3665,7 +3665,7 @@ checksum = "c8af7666ab7b6390ab78131fb5b0fce11d6b7a6951602017c35fa82800708971" dependencies = [ "proc-macro2", "quote", - "syn 2.0.90", + "syn 2.0.91", ] [[package]] @@ -3736,9 +3736,9 @@ dependencies = [ [[package]] name = "termtree" -version = "0.4.1" +version = "0.5.1" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "3369f5ac52d5eb6ab48c6b4ffdc8efbcad6b89c765749064ba298f2c68a16a76" +checksum = "8f50febec83f5ee1df3015341d8bd429f2d1cc62bcba7ea2076759d315084683" [[package]] name = "thiserror" @@ -3751,11 +3751,11 @@ dependencies = [ [[package]] name = "thiserror" -version = "2.0.7" +version = "2.0.9" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "93605438cbd668185516ab499d589afb7ee1859ea3d5fc8f6b0755e1c7443767" +checksum = "f072643fd0190df67a8bab670c20ef5d8737177d6ac6b2e9a236cb096206b2cc" dependencies = [ - "thiserror-impl 2.0.7", + "thiserror-impl 2.0.9", ] [[package]] @@ -3766,18 +3766,18 @@ checksum = "4fee6c4efc90059e10f81e6d42c60a18f76588c3d74cb83a0b242a2b6c7504c1" dependencies = [ "proc-macro2", "quote", - "syn 2.0.90", + "syn 2.0.91", ] [[package]] name = "thiserror-impl" -version = "2.0.7" +version = "2.0.9" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "e1d8749b4531af2117677a5fcd12b1348a3fe2b81e36e61ffeac5c4aa3273e36" +checksum = "7b50fa271071aae2e6ee85f842e2e28ba8cd2c5fb67f11fcb1fd70b276f9e7d4" dependencies = [ "proc-macro2", "quote", - "syn 2.0.90", + "syn 2.0.91", ] [[package]] @@ -3843,9 +3843,9 @@ dependencies = [ [[package]] name = "tinyvec" -version = "1.8.0" +version = "1.8.1" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "445e881f4f6d382d5f27c034e25eb92edd7c784ceab92a0937db7f2e9471b938" +checksum = "022db8904dfa342efe721985167e9fcd16c29b226db4397ed752a761cfce81e8" dependencies = [ "tinyvec_macros", ] @@ -3881,7 +3881,7 @@ checksum = "693d596312e88961bc67d7f1f97af8a70227d9f90c31bba5806eec004978d752" dependencies = [ "proc-macro2", "quote", - "syn 2.0.90", + "syn 2.0.91", ] [[package]] @@ -3999,7 +3999,7 @@ dependencies = [ "serde_json", "serde_repr", "serde_with", - "thiserror 2.0.7", + "thiserror 2.0.9", "tokio", "torrust-tracker-clock", "torrust-tracker-configuration", @@ -4034,7 +4034,7 @@ dependencies = [ "serde_bencode", "serde_bytes", "serde_json", - "thiserror 2.0.7", + "thiserror 2.0.9", "tokio", "torrust-tracker-configuration", "tracing", @@ -4061,7 +4061,7 @@ dependencies = [ "serde", "serde_json", "serde_with", - "thiserror 2.0.7", + "thiserror 2.0.9", "toml", "torrust-tracker-located-error", "url", @@ -4073,14 +4073,14 @@ name = "torrust-tracker-contrib-bencode" version = "3.0.0-develop" dependencies = [ "criterion", - "thiserror 2.0.7", + "thiserror 2.0.9", ] [[package]] name = "torrust-tracker-located-error" version = "3.0.0-develop" dependencies = [ - "thiserror 2.0.7", + "thiserror 2.0.9", "tracing", ] @@ -4095,7 +4095,7 @@ dependencies = [ "serde", "tdyne-peer-id", "tdyne-peer-id-registry", - "thiserror 2.0.7", + "thiserror 2.0.9", "zerocopy", ] @@ -4211,7 +4211,7 @@ checksum = "395ae124c09f9e6918a2310af6038fba074bcf474ac352496d5910dd59a2226d" dependencies = [ "proc-macro2", "quote", - "syn 2.0.90", + "syn 2.0.91", ] [[package]] @@ -4422,7 +4422,7 @@ dependencies = [ "log", "proc-macro2", "quote", - "syn 2.0.90", + "syn 2.0.91", "wasm-bindgen-shared", ] @@ -4457,7 +4457,7 @@ checksum = "30d7a95b763d3c45903ed6c81f156801839e5ee968bb07e534c44df0fcd330c2" dependencies = [ "proc-macro2", "quote", - "syn 2.0.90", + "syn 2.0.91", "wasm-bindgen-backend", "wasm-bindgen-shared", ] @@ -4686,7 +4686,7 @@ checksum = "2380878cad4ac9aac1e2435f3eb4020e8374b5f13c296cb75b4620ff8e229154" dependencies = [ "proc-macro2", "quote", - "syn 2.0.90", + "syn 2.0.91", "synstructure", ] @@ -4708,7 +4708,7 @@ checksum = "fa4f8080344d4671fb4e831a13ad1e68092748387dfc4f55e356242fae12ce3e" dependencies = [ "proc-macro2", "quote", - "syn 2.0.90", + "syn 2.0.91", ] [[package]] @@ -4728,7 +4728,7 @@ checksum = "595eed982f7d355beb85837f651fa22e90b3c044842dc7f2c2842c086f295808" dependencies = [ "proc-macro2", "quote", - "syn 2.0.90", + "syn 2.0.91", "synstructure", ] @@ -4757,7 +4757,7 @@ checksum = "6eafa6dfb17584ea3e2bd6e76e0cc15ad7af12b09abdd1ca55961bed9b1063c6" dependencies = [ "proc-macro2", "quote", - "syn 2.0.90", + "syn 2.0.91", ] [[package]] From 88d3d497de51436d230d2c2dfd6ebcb82b55a049 Mon Sep 17 00:00:00 2001 From: Jose Celano Date: Thu, 26 Dec 2024 09:36:06 +0000 Subject: [PATCH 058/802] feat: add server socket address to logs in API ``` 2024-12-26T09:07:18.149759Z ERROR API: response latency_ms=0 status_code=500 Internal Server Error server_socket_addr=127.0.0.1:41579 request_id=44d8c2f6-630d-4eab-a399-65aed1dbc8ab ``` --- src/servers/apis/routes.rs | 13 +++++++------ src/servers/apis/server.rs | 3 ++- 2 files changed, 9 insertions(+), 7 deletions(-) diff --git a/src/servers/apis/routes.rs b/src/servers/apis/routes.rs index c021cb215..0b0862fb9 100644 --- a/src/servers/apis/routes.rs +++ b/src/servers/apis/routes.rs @@ -5,6 +5,7 @@ //! //! All the API routes have the `/api` prefix and the version number as the //! first path segment. For example: `/api/v1/torrents`. +use std::net::SocketAddr; use std::sync::Arc; use std::time::Duration; @@ -35,7 +36,7 @@ use crate::servers::logging::Latency; /// Add all API routes to the router. #[allow(clippy::needless_pass_by_value)] #[instrument(skip(tracker, access_tokens))] -pub fn router(tracker: Arc, access_tokens: Arc) -> Router { +pub fn router(tracker: Arc, access_tokens: Arc, server_socket_addr: SocketAddr) -> Router { let router = Router::new(); let api_url_prefix = "/api"; @@ -68,7 +69,7 @@ pub fn router(tracker: Arc, access_tokens: Arc) -> Router target: API_LOG_TARGET, tracing::Level::INFO, %method, %uri, %request_id, "request"); }) - .on_response(|response: &Response, latency: Duration, span: &Span| { + .on_response(move |response: &Response, latency: Duration, span: &Span| { let latency_ms = latency.as_millis(); let status_code = response.status(); let request_id = response @@ -82,20 +83,20 @@ pub fn router(tracker: Arc, access_tokens: Arc) -> Router if status_code.is_server_error() { tracing::event!( target: API_LOG_TARGET, - tracing::Level::ERROR, %latency_ms, %status_code, %request_id, "response"); + tracing::Level::ERROR, %latency_ms, %status_code, %server_socket_addr, %request_id, "response"); } else { tracing::event!( target: API_LOG_TARGET, - tracing::Level::INFO, %latency_ms, %status_code, %request_id, "response"); + tracing::Level::INFO, %latency_ms, %status_code, %server_socket_addr, %request_id, "response"); } }) .on_failure( - |failure_classification: ServerErrorsFailureClass, latency: Duration, _span: &Span| { + move |failure_classification: ServerErrorsFailureClass, latency: Duration, _span: &Span| { let latency = Latency::new(LatencyUnit::Millis, latency); tracing::event!( target: API_LOG_TARGET, - tracing::Level::ERROR, %failure_classification, %latency, "response failed"); + tracing::Level::ERROR, %failure_classification, %latency, %server_socket_addr, "response failed"); }, ), ) diff --git a/src/servers/apis/server.rs b/src/servers/apis/server.rs index 31220f497..eadadecf2 100644 --- a/src/servers/apis/server.rs +++ b/src/servers/apis/server.rs @@ -243,10 +243,11 @@ impl Launcher { tx_start: Sender, rx_halt: Receiver, ) -> BoxFuture<'static, ()> { - let router = router(tracker, access_tokens); let socket = std::net::TcpListener::bind(self.bind_to).expect("Could not bind tcp_listener to address."); let address = socket.local_addr().expect("Could not get local_addr from tcp_listener."); + let router = router(tracker, access_tokens, address); + let handle = Handle::new(); tokio::task::spawn(graceful_shutdown( From cc2840f25e69e4fd7bf801c6afe5048c8afc2164 Mon Sep 17 00:00:00 2001 From: Jose Celano Date: Thu, 26 Dec 2024 10:24:35 +0000 Subject: [PATCH 059/802] feat: add an option to pass the request id in the API client Now you can send a header `x-request-id` to the API. ```rust let request_id = Uuid::new_v4(); let response = Client::new(env.get_connection_info()) .get_request_with_query( "stats", Query::params([QueryParam::new("token", "")].to_vec()), Some(headers_with_request_id(request_id)), ) .await; ``` That ID is recorded in logs. So you can use it to track the request. --- tests/servers/api/v1/client.rs | 43 ++++++++++++------- .../servers/api/v1/contract/authentication.rs | 28 +++++++++--- .../api/v1/contract/context/health_check.rs | 2 +- 3 files changed, 50 insertions(+), 23 deletions(-) diff --git a/tests/servers/api/v1/client.rs b/tests/servers/api/v1/client.rs index 3d95c10ca..a447805d0 100644 --- a/tests/servers/api/v1/client.rs +++ b/tests/servers/api/v1/client.rs @@ -1,5 +1,7 @@ +use hyper::HeaderMap; use reqwest::Response; use serde::Serialize; +use uuid::Uuid; use crate::common::http::{Query, QueryParam, ReqwestQuery}; use crate::servers::api::connection_info::ConnectionInfo; @@ -65,7 +67,7 @@ impl Client { query.add_param(QueryParam::new("token", token)); }; - self.get_request_with_query(path, query).await + self.get_request_with_query(path, query, None).await } pub async fn post_empty(&self, path: &str) -> Response { @@ -96,12 +98,12 @@ impl Client { .unwrap() } - pub async fn get_request_with_query(&self, path: &str, params: Query) -> Response { - get(&self.base_url(path), Some(params)).await + pub async fn get_request_with_query(&self, path: &str, params: Query, headers: Option) -> Response { + get(&self.base_url(path), Some(params), headers).await } pub async fn get_request(&self, path: &str) -> Response { - get(&self.base_url(path), None).await + get(&self.base_url(path), None, None).await } fn query_with_token(&self) -> Query { @@ -116,18 +118,27 @@ impl Client { } } -pub async fn get(path: &str, query: Option) -> Response { - match query { - Some(params) => reqwest::Client::builder() - .build() - .unwrap() - .get(path) - .query(&ReqwestQuery::from(params)) - .send() - .await - .unwrap(), - None => reqwest::Client::builder().build().unwrap().get(path).send().await.unwrap(), - } +pub async fn get(path: &str, query: Option, headers: Option) -> Response { + let builder = reqwest::Client::builder().build().unwrap(); + + let builder = match query { + Some(params) => builder.get(path).query(&ReqwestQuery::from(params)), + None => builder.get(path), + }; + + let builder = match headers { + Some(headers) => builder.headers(headers), + None => builder, + }; + + builder.send().await.unwrap() +} + +/// Returns a `HeaderMap` with a request id header +pub fn headers_with_request_id(request_id: Uuid) -> HeaderMap { + let mut headers = HeaderMap::new(); + headers.insert("x-request-id", request_id.to_string().parse().unwrap()); + headers } #[derive(Serialize, Debug)] diff --git a/tests/servers/api/v1/contract/authentication.rs b/tests/servers/api/v1/contract/authentication.rs index 8f5ce8f53..dc50048fb 100644 --- a/tests/servers/api/v1/contract/authentication.rs +++ b/tests/servers/api/v1/contract/authentication.rs @@ -1,9 +1,10 @@ use torrust_tracker_test_helpers::configuration; +use uuid::Uuid; use crate::common::http::{Query, QueryParam}; -use crate::common::logging::{self}; +use crate::common::logging::{self, logs_contains_a_line_with}; use crate::servers::api::v1::asserts::{assert_token_not_valid, assert_unauthorized}; -use crate::servers::api::v1::client::Client; +use crate::servers::api::v1::client::{headers_with_request_id, Client}; use crate::servers::api::Started; #[tokio::test] @@ -15,7 +16,7 @@ async fn should_authenticate_requests_by_using_a_token_query_param() { let token = env.get_connection_info().api_token.unwrap(); let response = Client::new(env.get_connection_info()) - .get_request_with_query("stats", Query::params([QueryParam::new("token", &token)].to_vec())) + .get_request_with_query("stats", Query::params([QueryParam::new("token", &token)].to_vec()), None) .await; assert_eq!(response.status(), 200); @@ -30,7 +31,7 @@ async fn should_not_authenticate_requests_when_the_token_is_missing() { let env = Started::new(&configuration::ephemeral().into()).await; let response = Client::new(env.get_connection_info()) - .get_request_with_query("stats", Query::default()) + .get_request_with_query("stats", Query::default(), None) .await; assert_unauthorized(response).await; @@ -44,13 +45,24 @@ async fn should_not_authenticate_requests_when_the_token_is_empty() { let env = Started::new(&configuration::ephemeral().into()).await; + let request_id = Uuid::new_v4(); + let response = Client::new(env.get_connection_info()) - .get_request_with_query("stats", Query::params([QueryParam::new("token", "")].to_vec())) + .get_request_with_query( + "stats", + Query::params([QueryParam::new("token", "")].to_vec()), + Some(headers_with_request_id(request_id)), + ) .await; assert_token_not_valid(response).await; env.stop().await; + + assert!( + logs_contains_a_line_with(&["ERROR", "API", &format!("{request_id}")]), + "Expected logs to contain: ERROR ... API ... request_id={request_id}" + ); } #[tokio::test] @@ -60,7 +72,11 @@ async fn should_not_authenticate_requests_when_the_token_is_invalid() { let env = Started::new(&configuration::ephemeral().into()).await; let response = Client::new(env.get_connection_info()) - .get_request_with_query("stats", Query::params([QueryParam::new("token", "INVALID TOKEN")].to_vec())) + .get_request_with_query( + "stats", + Query::params([QueryParam::new("token", "INVALID TOKEN")].to_vec()), + None, + ) .await; assert_token_not_valid(response).await; diff --git a/tests/servers/api/v1/contract/context/health_check.rs b/tests/servers/api/v1/contract/context/health_check.rs index 0fd3f6ea6..fa6cfa094 100644 --- a/tests/servers/api/v1/contract/context/health_check.rs +++ b/tests/servers/api/v1/contract/context/health_check.rs @@ -13,7 +13,7 @@ async fn health_check_endpoint_should_return_status_ok_if_api_is_running() { let url = format!("http://{}/api/health_check", env.get_connection_info().bind_address); - let response = get(&url, None).await; + let response = get(&url, None, None).await; assert_eq!(response.status(), 200); assert_eq!(response.headers().get("content-type").unwrap(), "application/json"); From 5d49d48a74114f1eb80e60776433a71ec6ddb4a3 Mon Sep 17 00:00:00 2001 From: Jose Celano Date: Thu, 26 Dec 2024 11:07:03 +0000 Subject: [PATCH 060/802] test: [#1152] write assertions when API writes errors into logs --- tests/servers/api/v1/client.rs | 92 +++++---- .../servers/api/v1/contract/authentication.rs | 22 +- .../api/v1/contract/context/auth_key.rs | 194 ++++++++++++++---- .../servers/api/v1/contract/context/stats.rs | 29 ++- .../api/v1/contract/context/torrent.rs | 120 ++++++++--- .../api/v1/contract/context/whitelist.rs | 122 +++++++++-- 6 files changed, 450 insertions(+), 129 deletions(-) diff --git a/tests/servers/api/v1/client.rs b/tests/servers/api/v1/client.rs index a447805d0..635331078 100644 --- a/tests/servers/api/v1/client.rs +++ b/tests/servers/api/v1/client.rs @@ -20,82 +20,94 @@ impl Client { } } - pub async fn generate_auth_key(&self, seconds_valid: i32) -> Response { - self.post_empty(&format!("key/{}", &seconds_valid)).await + pub async fn generate_auth_key(&self, seconds_valid: i32, headers: Option) -> Response { + self.post_empty(&format!("key/{}", &seconds_valid), headers).await } - pub async fn add_auth_key(&self, add_key_form: AddKeyForm) -> Response { - self.post_form("keys", &add_key_form).await + pub async fn add_auth_key(&self, add_key_form: AddKeyForm, headers: Option) -> Response { + self.post_form("keys", &add_key_form, headers).await } - pub async fn delete_auth_key(&self, key: &str) -> Response { - self.delete(&format!("key/{}", &key)).await + pub async fn delete_auth_key(&self, key: &str, headers: Option) -> Response { + self.delete(&format!("key/{}", &key), headers).await } - pub async fn reload_keys(&self) -> Response { - self.get("keys/reload", Query::default()).await + pub async fn reload_keys(&self, headers: Option) -> Response { + self.get("keys/reload", Query::default(), headers).await } - pub async fn whitelist_a_torrent(&self, info_hash: &str) -> Response { - self.post_empty(&format!("whitelist/{}", &info_hash)).await + pub async fn whitelist_a_torrent(&self, info_hash: &str, headers: Option) -> Response { + self.post_empty(&format!("whitelist/{}", &info_hash), headers).await } - pub async fn remove_torrent_from_whitelist(&self, info_hash: &str) -> Response { - self.delete(&format!("whitelist/{}", &info_hash)).await + pub async fn remove_torrent_from_whitelist(&self, info_hash: &str, headers: Option) -> Response { + self.delete(&format!("whitelist/{}", &info_hash), headers).await } - pub async fn reload_whitelist(&self) -> Response { - self.get("whitelist/reload", Query::default()).await + pub async fn reload_whitelist(&self, headers: Option) -> Response { + self.get("whitelist/reload", Query::default(), headers).await } - pub async fn get_torrent(&self, info_hash: &str) -> Response { - self.get(&format!("torrent/{}", &info_hash), Query::default()).await + pub async fn get_torrent(&self, info_hash: &str, headers: Option) -> Response { + self.get(&format!("torrent/{}", &info_hash), Query::default(), headers).await } - pub async fn get_torrents(&self, params: Query) -> Response { - self.get("torrents", params).await + pub async fn get_torrents(&self, params: Query, headers: Option) -> Response { + self.get("torrents", params, headers).await } - pub async fn get_tracker_statistics(&self) -> Response { - self.get("stats", Query::default()).await + pub async fn get_tracker_statistics(&self, headers: Option) -> Response { + self.get("stats", Query::default(), headers).await } - pub async fn get(&self, path: &str, params: Query) -> Response { + pub async fn get(&self, path: &str, params: Query, headers: Option) -> Response { let mut query: Query = params; if let Some(token) = &self.connection_info.api_token { query.add_param(QueryParam::new("token", token)); }; - self.get_request_with_query(path, query, None).await + self.get_request_with_query(path, query, headers).await } - pub async fn post_empty(&self, path: &str) -> Response { - reqwest::Client::new() + pub async fn post_empty(&self, path: &str, headers: Option) -> Response { + let builder = reqwest::Client::new() .post(self.base_url(path).clone()) - .query(&ReqwestQuery::from(self.query_with_token())) - .send() - .await - .unwrap() + .query(&ReqwestQuery::from(self.query_with_token())); + + let builder = match headers { + Some(headers) => builder.headers(headers), + None => builder, + }; + + builder.send().await.unwrap() } - pub async fn post_form(&self, path: &str, form: &T) -> Response { - reqwest::Client::new() + pub async fn post_form(&self, path: &str, form: &T, headers: Option) -> Response { + let builder = reqwest::Client::new() .post(self.base_url(path).clone()) .query(&ReqwestQuery::from(self.query_with_token())) - .json(&form) - .send() - .await - .unwrap() + .json(&form); + + let builder = match headers { + Some(headers) => builder.headers(headers), + None => builder, + }; + + builder.send().await.unwrap() } - async fn delete(&self, path: &str) -> Response { - reqwest::Client::new() + async fn delete(&self, path: &str, headers: Option) -> Response { + let builder = reqwest::Client::new() .delete(self.base_url(path).clone()) - .query(&ReqwestQuery::from(self.query_with_token())) - .send() - .await - .unwrap() + .query(&ReqwestQuery::from(self.query_with_token())); + + let builder = match headers { + Some(headers) => builder.headers(headers), + None => builder, + }; + + builder.send().await.unwrap() } pub async fn get_request_with_query(&self, path: &str, params: Query, headers: Option) -> Response { diff --git a/tests/servers/api/v1/contract/authentication.rs b/tests/servers/api/v1/contract/authentication.rs index dc50048fb..4e0cf49da 100644 --- a/tests/servers/api/v1/contract/authentication.rs +++ b/tests/servers/api/v1/contract/authentication.rs @@ -30,12 +30,19 @@ async fn should_not_authenticate_requests_when_the_token_is_missing() { let env = Started::new(&configuration::ephemeral().into()).await; + let request_id = Uuid::new_v4(); + let response = Client::new(env.get_connection_info()) - .get_request_with_query("stats", Query::default(), None) + .get_request_with_query("stats", Query::default(), Some(headers_with_request_id(request_id))) .await; assert_unauthorized(response).await; + assert!( + logs_contains_a_line_with(&["ERROR", "API", &format!("{request_id}")]), + "Expected logs to contain: ERROR ... API ... request_id={request_id}" + ); + env.stop().await; } @@ -57,12 +64,12 @@ async fn should_not_authenticate_requests_when_the_token_is_empty() { assert_token_not_valid(response).await; - env.stop().await; - assert!( logs_contains_a_line_with(&["ERROR", "API", &format!("{request_id}")]), "Expected logs to contain: ERROR ... API ... request_id={request_id}" ); + + env.stop().await; } #[tokio::test] @@ -71,16 +78,23 @@ async fn should_not_authenticate_requests_when_the_token_is_invalid() { let env = Started::new(&configuration::ephemeral().into()).await; + let request_id = Uuid::new_v4(); + let response = Client::new(env.get_connection_info()) .get_request_with_query( "stats", Query::params([QueryParam::new("token", "INVALID TOKEN")].to_vec()), - None, + Some(headers_with_request_id(request_id)), ) .await; assert_token_not_valid(response).await; + assert!( + logs_contains_a_line_with(&["ERROR", "API", &format!("{request_id}")]), + "Expected logs to contain: ERROR ... API ... request_id={request_id}" + ); + env.stop().await; } diff --git a/tests/servers/api/v1/contract/context/auth_key.rs b/tests/servers/api/v1/contract/context/auth_key.rs index 9560a2f49..8ef72230e 100644 --- a/tests/servers/api/v1/contract/context/auth_key.rs +++ b/tests/servers/api/v1/contract/context/auth_key.rs @@ -3,15 +3,16 @@ use std::time::Duration; use serde::Serialize; use torrust_tracker::core::auth::Key; use torrust_tracker_test_helpers::configuration; +use uuid::Uuid; -use crate::common::logging::{self}; +use crate::common::logging::{self, logs_contains_a_line_with}; use crate::servers::api::connection_info::{connection_with_invalid_token, connection_with_no_token}; use crate::servers::api::v1::asserts::{ assert_auth_key_utf8, assert_failed_to_delete_key, assert_failed_to_generate_key, assert_failed_to_reload_keys, assert_invalid_auth_key_get_param, assert_invalid_auth_key_post_param, assert_ok, assert_token_not_valid, assert_unauthorized, assert_unprocessable_auth_key_duration_param, }; -use crate::servers::api::v1::client::{AddKeyForm, Client}; +use crate::servers::api::v1::client::{headers_with_request_id, AddKeyForm, Client}; use crate::servers::api::{force_database_error, Started}; #[tokio::test] @@ -20,11 +21,16 @@ async fn should_allow_generating_a_new_random_auth_key() { let env = Started::new(&configuration::ephemeral().into()).await; + let request_id = Uuid::new_v4(); + let response = Client::new(env.get_connection_info()) - .add_auth_key(AddKeyForm { - opt_key: None, - seconds_valid: Some(60), - }) + .add_auth_key( + AddKeyForm { + opt_key: None, + seconds_valid: Some(60), + }, + Some(headers_with_request_id(request_id)), + ) .await; let auth_key_resource = assert_auth_key_utf8(response).await; @@ -44,11 +50,16 @@ async fn should_allow_uploading_a_preexisting_auth_key() { let env = Started::new(&configuration::ephemeral().into()).await; + let request_id = Uuid::new_v4(); + let response = Client::new(env.get_connection_info()) - .add_auth_key(AddKeyForm { - opt_key: Some("Xc1L4PbQJSFGlrgSRZl8wxSFAuMa21z5".to_string()), - seconds_valid: Some(60), - }) + .add_auth_key( + AddKeyForm { + opt_key: Some("Xc1L4PbQJSFGlrgSRZl8wxSFAuMa21z5".to_string()), + seconds_valid: Some(60), + }, + Some(headers_with_request_id(request_id)), + ) .await; let auth_key_resource = assert_auth_key_utf8(response).await; @@ -68,24 +79,44 @@ async fn should_not_allow_generating_a_new_auth_key_for_unauthenticated_users() let env = Started::new(&configuration::ephemeral().into()).await; + let request_id = Uuid::new_v4(); + let response = Client::new(connection_with_invalid_token(env.get_connection_info().bind_address.as_str())) - .add_auth_key(AddKeyForm { - opt_key: None, - seconds_valid: Some(60), - }) + .add_auth_key( + AddKeyForm { + opt_key: None, + seconds_valid: Some(60), + }, + Some(headers_with_request_id(request_id)), + ) .await; assert_token_not_valid(response).await; + assert!( + logs_contains_a_line_with(&["ERROR", "API", &format!("{request_id}")]), + "Expected logs to contain: ERROR ... API ... request_id={request_id}" + ); + + let request_id = Uuid::new_v4(); + let response = Client::new(connection_with_no_token(env.get_connection_info().bind_address.as_str())) - .add_auth_key(AddKeyForm { - opt_key: None, - seconds_valid: Some(60), - }) + .add_auth_key( + AddKeyForm { + opt_key: None, + seconds_valid: Some(60), + }, + Some(headers_with_request_id(request_id)), + ) .await; assert_unauthorized(response).await; + assert!( + logs_contains_a_line_with(&["ERROR", "API", &format!("{request_id}")]), + "Expected logs to contain: ERROR ... API ... request_id={request_id}" + ); + env.stop().await; } @@ -97,15 +128,25 @@ async fn should_fail_when_the_auth_key_cannot_be_generated() { force_database_error(&env.tracker); + let request_id = Uuid::new_v4(); + let response = Client::new(env.get_connection_info()) - .add_auth_key(AddKeyForm { - opt_key: None, - seconds_valid: Some(60), - }) + .add_auth_key( + AddKeyForm { + opt_key: None, + seconds_valid: Some(60), + }, + Some(headers_with_request_id(request_id)), + ) .await; assert_failed_to_generate_key(response).await; + assert!( + logs_contains_a_line_with(&["ERROR", "API", &format!("{request_id}")]), + "Expected logs to contain: ERROR ... API ... request_id={request_id}" + ); + env.stop().await; } @@ -122,8 +163,10 @@ async fn should_allow_deleting_an_auth_key() { .await .unwrap(); + let request_id = Uuid::new_v4(); + let response = Client::new(env.get_connection_info()) - .delete_auth_key(&auth_key.key.to_string()) + .delete_auth_key(&auth_key.key.to_string(), Some(headers_with_request_id(request_id))) .await; assert_ok(response).await; @@ -154,6 +197,8 @@ async fn should_fail_generating_a_new_auth_key_when_the_provided_key_is_invalid( ]; for invalid_key in invalid_keys { + let request_id = Uuid::new_v4(); + let response = Client::new(env.get_connection_info()) .post_form( "keys", @@ -161,6 +206,7 @@ async fn should_fail_generating_a_new_auth_key_when_the_provided_key_is_invalid( opt_key: Some(invalid_key.to_string()), seconds_valid: 60, }, + Some(headers_with_request_id(request_id)), ) .await; @@ -190,6 +236,8 @@ async fn should_fail_generating_a_new_auth_key_when_the_key_duration_is_invalid( ]; for invalid_key_duration in invalid_key_durations { + let request_id = Uuid::new_v4(); + let response = Client::new(env.get_connection_info()) .post_form( "keys", @@ -197,6 +245,7 @@ async fn should_fail_generating_a_new_auth_key_when_the_key_duration_is_invalid( opt_key: None, seconds_valid: invalid_key_duration.to_string(), }, + Some(headers_with_request_id(request_id)), ) .await; @@ -223,7 +272,11 @@ async fn should_fail_deleting_an_auth_key_when_the_key_id_is_invalid() { ]; for invalid_auth_key in &invalid_auth_keys { - let response = Client::new(env.get_connection_info()).delete_auth_key(invalid_auth_key).await; + let request_id = Uuid::new_v4(); + + let response = Client::new(env.get_connection_info()) + .delete_auth_key(invalid_auth_key, Some(headers_with_request_id(request_id))) + .await; assert_invalid_auth_key_get_param(response, invalid_auth_key).await; } @@ -246,12 +299,19 @@ async fn should_fail_when_the_auth_key_cannot_be_deleted() { force_database_error(&env.tracker); + let request_id = Uuid::new_v4(); + let response = Client::new(env.get_connection_info()) - .delete_auth_key(&auth_key.key.to_string()) + .delete_auth_key(&auth_key.key.to_string(), Some(headers_with_request_id(request_id))) .await; assert_failed_to_delete_key(response).await; + assert!( + logs_contains_a_line_with(&["ERROR", "API", &format!("{request_id}")]), + "Expected logs to contain: ERROR ... API ... request_id={request_id}" + ); + env.stop().await; } @@ -270,12 +330,19 @@ async fn should_not_allow_deleting_an_auth_key_for_unauthenticated_users() { .await .unwrap(); + let request_id = Uuid::new_v4(); + let response = Client::new(connection_with_invalid_token(env.get_connection_info().bind_address.as_str())) - .delete_auth_key(&auth_key.key.to_string()) + .delete_auth_key(&auth_key.key.to_string(), Some(headers_with_request_id(request_id))) .await; assert_token_not_valid(response).await; + assert!( + logs_contains_a_line_with(&["ERROR", "API", &format!("{request_id}")]), + "Expected logs to contain: ERROR ... API ... request_id={request_id}" + ); + // Generate new auth key let auth_key = env .tracker @@ -283,12 +350,19 @@ async fn should_not_allow_deleting_an_auth_key_for_unauthenticated_users() { .await .unwrap(); + let request_id = Uuid::new_v4(); + let response = Client::new(connection_with_no_token(env.get_connection_info().bind_address.as_str())) - .delete_auth_key(&auth_key.key.to_string()) + .delete_auth_key(&auth_key.key.to_string(), Some(headers_with_request_id(request_id))) .await; assert_unauthorized(response).await; + assert!( + logs_contains_a_line_with(&["ERROR", "API", &format!("{request_id}")]), + "Expected logs to contain: ERROR ... API ... request_id={request_id}" + ); + env.stop().await; } @@ -304,7 +378,11 @@ async fn should_allow_reloading_keys() { .await .unwrap(); - let response = Client::new(env.get_connection_info()).reload_keys().await; + let request_id = Uuid::new_v4(); + + let response = Client::new(env.get_connection_info()) + .reload_keys(Some(headers_with_request_id(request_id))) + .await; assert_ok(response).await; @@ -317,7 +395,9 @@ async fn should_fail_when_keys_cannot_be_reloaded() { let env = Started::new(&configuration::ephemeral().into()).await; + let request_id = Uuid::new_v4(); let seconds_valid = 60; + env.tracker .generate_auth_key(Some(Duration::from_secs(seconds_valid))) .await @@ -325,10 +405,17 @@ async fn should_fail_when_keys_cannot_be_reloaded() { force_database_error(&env.tracker); - let response = Client::new(env.get_connection_info()).reload_keys().await; + let response = Client::new(env.get_connection_info()) + .reload_keys(Some(headers_with_request_id(request_id))) + .await; assert_failed_to_reload_keys(response).await; + assert!( + logs_contains_a_line_with(&["ERROR", "API", &format!("{request_id}")]), + "Expected logs to contain: ERROR ... API ... request_id={request_id}" + ); + env.stop().await; } @@ -344,18 +431,32 @@ async fn should_not_allow_reloading_keys_for_unauthenticated_users() { .await .unwrap(); + let request_id = Uuid::new_v4(); + let response = Client::new(connection_with_invalid_token(env.get_connection_info().bind_address.as_str())) - .reload_keys() + .reload_keys(Some(headers_with_request_id(request_id))) .await; assert_token_not_valid(response).await; + assert!( + logs_contains_a_line_with(&["ERROR", "API", &format!("{request_id}")]), + "Expected logs to contain: ERROR ... API ... request_id={request_id}" + ); + + let request_id = Uuid::new_v4(); + let response = Client::new(connection_with_no_token(env.get_connection_info().bind_address.as_str())) - .reload_keys() + .reload_keys(Some(headers_with_request_id(request_id))) .await; assert_unauthorized(response).await; + assert!( + logs_contains_a_line_with(&["ERROR", "API", &format!("{request_id}")]), + "Expected logs to contain: ERROR ... API ... request_id={request_id}" + ); + env.stop().await; } @@ -363,14 +464,15 @@ mod deprecated_generate_key_endpoint { use torrust_tracker::core::auth::Key; use torrust_tracker_test_helpers::configuration; + use uuid::Uuid; - use crate::common::logging::{self}; + use crate::common::logging::{self, logs_contains_a_line_with}; use crate::servers::api::connection_info::{connection_with_invalid_token, connection_with_no_token}; use crate::servers::api::v1::asserts::{ assert_auth_key_utf8, assert_failed_to_generate_key, assert_invalid_key_duration_param, assert_token_not_valid, assert_unauthorized, }; - use crate::servers::api::v1::client::Client; + use crate::servers::api::v1::client::{headers_with_request_id, Client}; use crate::servers::api::{force_database_error, Started}; #[tokio::test] @@ -381,7 +483,9 @@ mod deprecated_generate_key_endpoint { let seconds_valid = 60; - let response = Client::new(env.get_connection_info()).generate_auth_key(seconds_valid).await; + let response = Client::new(env.get_connection_info()) + .generate_auth_key(seconds_valid, None) + .await; let auth_key_resource = assert_auth_key_utf8(response).await; @@ -400,21 +504,27 @@ mod deprecated_generate_key_endpoint { let env = Started::new(&configuration::ephemeral().into()).await; + let request_id = Uuid::new_v4(); let seconds_valid = 60; let response = Client::new(connection_with_invalid_token(env.get_connection_info().bind_address.as_str())) - .generate_auth_key(seconds_valid) + .generate_auth_key(seconds_valid, Some(headers_with_request_id(request_id))) .await; assert_token_not_valid(response).await; let response = Client::new(connection_with_no_token(env.get_connection_info().bind_address.as_str())) - .generate_auth_key(seconds_valid) + .generate_auth_key(seconds_valid, None) .await; assert_unauthorized(response).await; env.stop().await; + + assert!( + logs_contains_a_line_with(&["ERROR", "API", &format!("{request_id}")]), + "Expected logs to contain: ERROR ... API ... request_id={request_id}" + ); } #[tokio::test] @@ -431,7 +541,7 @@ mod deprecated_generate_key_endpoint { for invalid_key_duration in invalid_key_durations { let response = Client::new(env.get_connection_info()) - .post_empty(&format!("key/{invalid_key_duration}")) + .post_empty(&format!("key/{invalid_key_duration}"), None) .await; assert_invalid_key_duration_param(response, invalid_key_duration).await; @@ -448,11 +558,19 @@ mod deprecated_generate_key_endpoint { force_database_error(&env.tracker); + let request_id = Uuid::new_v4(); let seconds_valid = 60; - let response = Client::new(env.get_connection_info()).generate_auth_key(seconds_valid).await; + let response = Client::new(env.get_connection_info()) + .generate_auth_key(seconds_valid, Some(headers_with_request_id(request_id))) + .await; assert_failed_to_generate_key(response).await; env.stop().await; + + assert!( + logs_contains_a_line_with(&["ERROR", "API", &format!("{request_id}")]), + "Expected logs to contain: ERROR ... API ... request_id={request_id}" + ); } } diff --git a/tests/servers/api/v1/contract/context/stats.rs b/tests/servers/api/v1/contract/context/stats.rs index e05107d25..d49d03535 100644 --- a/tests/servers/api/v1/contract/context/stats.rs +++ b/tests/servers/api/v1/contract/context/stats.rs @@ -4,11 +4,12 @@ use bittorrent_primitives::info_hash::InfoHash; use torrust_tracker::servers::apis::v1::context::stats::resources::Stats; use torrust_tracker_primitives::peer::fixture::PeerBuilder; use torrust_tracker_test_helpers::configuration; +use uuid::Uuid; -use crate::common::logging::{self}; +use crate::common::logging::{self, logs_contains_a_line_with}; use crate::servers::api::connection_info::{connection_with_invalid_token, connection_with_no_token}; use crate::servers::api::v1::asserts::{assert_stats, assert_token_not_valid, assert_unauthorized}; -use crate::servers::api::v1::client::Client; +use crate::servers::api::v1::client::{headers_with_request_id, Client}; use crate::servers::api::Started; #[tokio::test] @@ -22,7 +23,11 @@ async fn should_allow_getting_tracker_statistics() { &PeerBuilder::default().into(), ); - let response = Client::new(env.get_connection_info()).get_tracker_statistics().await; + let request_id = Uuid::new_v4(); + + let response = Client::new(env.get_connection_info()) + .get_tracker_statistics(Some(headers_with_request_id(request_id))) + .await; assert_stats( response, @@ -65,17 +70,31 @@ async fn should_not_allow_getting_tracker_statistics_for_unauthenticated_users() let env = Started::new(&configuration::ephemeral().into()).await; + let request_id = Uuid::new_v4(); + let response = Client::new(connection_with_invalid_token(env.get_connection_info().bind_address.as_str())) - .get_tracker_statistics() + .get_tracker_statistics(Some(headers_with_request_id(request_id))) .await; assert_token_not_valid(response).await; + assert!( + logs_contains_a_line_with(&["ERROR", "API", &format!("{request_id}")]), + "Expected logs to contain: ERROR ... API ... request_id={request_id}" + ); + + let request_id = Uuid::new_v4(); + let response = Client::new(connection_with_no_token(env.get_connection_info().bind_address.as_str())) - .get_tracker_statistics() + .get_tracker_statistics(Some(headers_with_request_id(request_id))) .await; assert_unauthorized(response).await; + assert!( + logs_contains_a_line_with(&["ERROR", "API", &format!("{request_id}")]), + "Expected logs to contain: ERROR ... API ... request_id={request_id}" + ); + env.stop().await; } diff --git a/tests/servers/api/v1/contract/context/torrent.rs b/tests/servers/api/v1/contract/context/torrent.rs index 55c25d228..b741a1a65 100644 --- a/tests/servers/api/v1/contract/context/torrent.rs +++ b/tests/servers/api/v1/contract/context/torrent.rs @@ -5,15 +5,16 @@ use torrust_tracker::servers::apis::v1::context::torrent::resources::peer::Peer; use torrust_tracker::servers::apis::v1::context::torrent::resources::torrent::{self, Torrent}; use torrust_tracker_primitives::peer::fixture::PeerBuilder; use torrust_tracker_test_helpers::configuration; +use uuid::Uuid; use crate::common::http::{Query, QueryParam}; -use crate::common::logging::{self}; +use crate::common::logging::{self, logs_contains_a_line_with}; use crate::servers::api::connection_info::{connection_with_invalid_token, connection_with_no_token}; use crate::servers::api::v1::asserts::{ assert_bad_request, assert_invalid_infohash_param, assert_not_found, assert_token_not_valid, assert_torrent_info, assert_torrent_list, assert_torrent_not_known, assert_unauthorized, }; -use crate::servers::api::v1::client::Client; +use crate::servers::api::v1::client::{headers_with_request_id, Client}; use crate::servers::api::v1::contract::fixtures::{ invalid_infohashes_returning_bad_request, invalid_infohashes_returning_not_found, }; @@ -29,7 +30,11 @@ async fn should_allow_getting_all_torrents() { env.add_torrent_peer(&info_hash, &PeerBuilder::default().into()); - let response = Client::new(env.get_connection_info()).get_torrents(Query::empty()).await; + let request_id = Uuid::new_v4(); + + let response = Client::new(env.get_connection_info()) + .get_torrents(Query::empty(), Some(headers_with_request_id(request_id))) + .await; assert_torrent_list( response, @@ -58,8 +63,13 @@ async fn should_allow_limiting_the_torrents_in_the_result() { env.add_torrent_peer(&info_hash_1, &PeerBuilder::default().into()); env.add_torrent_peer(&info_hash_2, &PeerBuilder::default().into()); + let request_id = Uuid::new_v4(); + let response = Client::new(env.get_connection_info()) - .get_torrents(Query::params([QueryParam::new("limit", "1")].to_vec())) + .get_torrents( + Query::params([QueryParam::new("limit", "1")].to_vec()), + Some(headers_with_request_id(request_id)), + ) .await; assert_torrent_list( @@ -89,8 +99,13 @@ async fn should_allow_the_torrents_result_pagination() { env.add_torrent_peer(&info_hash_1, &PeerBuilder::default().into()); env.add_torrent_peer(&info_hash_2, &PeerBuilder::default().into()); + let request_id = Uuid::new_v4(); + let response = Client::new(env.get_connection_info()) - .get_torrents(Query::params([QueryParam::new("offset", "1")].to_vec())) + .get_torrents( + Query::params([QueryParam::new("offset", "1")].to_vec()), + Some(headers_with_request_id(request_id)), + ) .await; assert_torrent_list( @@ -119,14 +134,19 @@ async fn should_allow_getting_a_list_of_torrents_providing_infohashes() { env.add_torrent_peer(&info_hash_1, &PeerBuilder::default().into()); env.add_torrent_peer(&info_hash_2, &PeerBuilder::default().into()); + let request_id = Uuid::new_v4(); + let response = Client::new(env.get_connection_info()) - .get_torrents(Query::params( - [ - QueryParam::new("info_hash", "9e0217d0fa71c87332cd8bf9dbeabcb2c2cf3c4d"), // DevSkim: ignore DS173237 - QueryParam::new("info_hash", "9e0217d0fa71c87332cd8bf9dbeabcb2c2cf3c4d"), // DevSkim: ignore DS173237 - ] - .to_vec(), - )) + .get_torrents( + Query::params( + [ + QueryParam::new("info_hash", "9e0217d0fa71c87332cd8bf9dbeabcb2c2cf3c4d"), // DevSkim: ignore DS173237 + QueryParam::new("info_hash", "9e0217d0fa71c87332cd8bf9dbeabcb2c2cf3c4d"), // DevSkim: ignore DS173237 + ] + .to_vec(), + ), + Some(headers_with_request_id(request_id)), + ) .await; assert_torrent_list( @@ -160,8 +180,13 @@ async fn should_fail_getting_torrents_when_the_offset_query_parameter_cannot_be_ let invalid_offsets = [" ", "-1", "1.1", "INVALID OFFSET"]; for invalid_offset in &invalid_offsets { + let request_id = Uuid::new_v4(); + let response = Client::new(env.get_connection_info()) - .get_torrents(Query::params([QueryParam::new("offset", invalid_offset)].to_vec())) + .get_torrents( + Query::params([QueryParam::new("offset", invalid_offset)].to_vec()), + Some(headers_with_request_id(request_id)), + ) .await; assert_bad_request(response, "Failed to deserialize query string: invalid digit found in string").await; @@ -179,8 +204,13 @@ async fn should_fail_getting_torrents_when_the_limit_query_parameter_cannot_be_p let invalid_limits = [" ", "-1", "1.1", "INVALID LIMIT"]; for invalid_limit in &invalid_limits { + let request_id = Uuid::new_v4(); + let response = Client::new(env.get_connection_info()) - .get_torrents(Query::params([QueryParam::new("limit", invalid_limit)].to_vec())) + .get_torrents( + Query::params([QueryParam::new("limit", invalid_limit)].to_vec()), + Some(headers_with_request_id(request_id)), + ) .await; assert_bad_request(response, "Failed to deserialize query string: invalid digit found in string").await; @@ -198,8 +228,13 @@ async fn should_fail_getting_torrents_when_the_info_hash_parameter_is_invalid() let invalid_info_hashes = [" ", "-1", "1.1", "INVALID INFO_HASH"]; for invalid_info_hash in &invalid_info_hashes { + let request_id = Uuid::new_v4(); + let response = Client::new(env.get_connection_info()) - .get_torrents(Query::params([QueryParam::new("info_hash", invalid_info_hash)].to_vec())) + .get_torrents( + Query::params([QueryParam::new("info_hash", invalid_info_hash)].to_vec()), + Some(headers_with_request_id(request_id)), + ) .await; assert_bad_request( @@ -218,18 +253,32 @@ async fn should_not_allow_getting_torrents_for_unauthenticated_users() { let env = Started::new(&configuration::ephemeral().into()).await; + let request_id = Uuid::new_v4(); + let response = Client::new(connection_with_invalid_token(env.get_connection_info().bind_address.as_str())) - .get_torrents(Query::empty()) + .get_torrents(Query::empty(), Some(headers_with_request_id(request_id))) .await; assert_token_not_valid(response).await; + assert!( + logs_contains_a_line_with(&["ERROR", "API", &format!("{request_id}")]), + "Expected logs to contain: ERROR ... API ... request_id={request_id}" + ); + + let request_id = Uuid::new_v4(); + let response = Client::new(connection_with_no_token(env.get_connection_info().bind_address.as_str())) - .get_torrents(Query::default()) + .get_torrents(Query::default(), Some(headers_with_request_id(request_id))) .await; assert_unauthorized(response).await; + assert!( + logs_contains_a_line_with(&["ERROR", "API", &format!("{request_id}")]), + "Expected logs to contain: ERROR ... API ... request_id={request_id}" + ); + env.stop().await; } @@ -245,8 +294,10 @@ async fn should_allow_getting_a_torrent_info() { env.add_torrent_peer(&info_hash, &peer); + let request_id = Uuid::new_v4(); + let response = Client::new(env.get_connection_info()) - .get_torrent(&info_hash.to_string()) + .get_torrent(&info_hash.to_string(), Some(headers_with_request_id(request_id))) .await; assert_torrent_info( @@ -270,10 +321,11 @@ async fn should_fail_while_getting_a_torrent_info_when_the_torrent_does_not_exis let env = Started::new(&configuration::ephemeral().into()).await; + let request_id = Uuid::new_v4(); let info_hash = InfoHash::from_str("9e0217d0fa71c87332cd8bf9dbeabcb2c2cf3c4d").unwrap(); let response = Client::new(env.get_connection_info()) - .get_torrent(&info_hash.to_string()) + .get_torrent(&info_hash.to_string(), Some(headers_with_request_id(request_id))) .await; assert_torrent_not_known(response).await; @@ -288,13 +340,21 @@ async fn should_fail_getting_a_torrent_info_when_the_provided_infohash_is_invali let env = Started::new(&configuration::ephemeral().into()).await; for invalid_infohash in &invalid_infohashes_returning_bad_request() { - let response = Client::new(env.get_connection_info()).get_torrent(invalid_infohash).await; + let request_id = Uuid::new_v4(); + + let response = Client::new(env.get_connection_info()) + .get_torrent(invalid_infohash, Some(headers_with_request_id(request_id))) + .await; assert_invalid_infohash_param(response, invalid_infohash).await; } for invalid_infohash in &invalid_infohashes_returning_not_found() { - let response = Client::new(env.get_connection_info()).get_torrent(invalid_infohash).await; + let request_id = Uuid::new_v4(); + + let response = Client::new(env.get_connection_info()) + .get_torrent(invalid_infohash, Some(headers_with_request_id(request_id))) + .await; assert_not_found(response).await; } @@ -312,17 +372,31 @@ async fn should_not_allow_getting_a_torrent_info_for_unauthenticated_users() { env.add_torrent_peer(&info_hash, &PeerBuilder::default().into()); + let request_id = Uuid::new_v4(); + let response = Client::new(connection_with_invalid_token(env.get_connection_info().bind_address.as_str())) - .get_torrent(&info_hash.to_string()) + .get_torrent(&info_hash.to_string(), Some(headers_with_request_id(request_id))) .await; assert_token_not_valid(response).await; + assert!( + logs_contains_a_line_with(&["ERROR", "API", &format!("{request_id}")]), + "Expected logs to contain: ERROR ... API ... request_id={request_id}" + ); + + let request_id = Uuid::new_v4(); + let response = Client::new(connection_with_no_token(env.get_connection_info().bind_address.as_str())) - .get_torrent(&info_hash.to_string()) + .get_torrent(&info_hash.to_string(), Some(headers_with_request_id(request_id))) .await; assert_unauthorized(response).await; + assert!( + logs_contains_a_line_with(&["ERROR", "API", &format!("{request_id}")]), + "Expected logs to contain: ERROR ... API ... request_id={request_id}" + ); + env.stop().await; } diff --git a/tests/servers/api/v1/contract/context/whitelist.rs b/tests/servers/api/v1/contract/context/whitelist.rs index 2be1706fc..d0a80e968 100644 --- a/tests/servers/api/v1/contract/context/whitelist.rs +++ b/tests/servers/api/v1/contract/context/whitelist.rs @@ -2,14 +2,15 @@ use std::str::FromStr; use bittorrent_primitives::info_hash::InfoHash; use torrust_tracker_test_helpers::configuration; +use uuid::Uuid; -use crate::common::logging::{self}; +use crate::common::logging::{self, logs_contains_a_line_with}; use crate::servers::api::connection_info::{connection_with_invalid_token, connection_with_no_token}; use crate::servers::api::v1::asserts::{ assert_failed_to_reload_whitelist, assert_failed_to_remove_torrent_from_whitelist, assert_failed_to_whitelist_torrent, assert_invalid_infohash_param, assert_not_found, assert_ok, assert_token_not_valid, assert_unauthorized, }; -use crate::servers::api::v1::client::Client; +use crate::servers::api::v1::client::{headers_with_request_id, Client}; use crate::servers::api::v1::contract::fixtures::{ invalid_infohashes_returning_bad_request, invalid_infohashes_returning_not_found, }; @@ -21,9 +22,12 @@ async fn should_allow_whitelisting_a_torrent() { let env = Started::new(&configuration::ephemeral().into()).await; + let request_id = Uuid::new_v4(); let info_hash = "9e0217d0fa71c87332cd8bf9dbeabcb2c2cf3c4d".to_owned(); - let response = Client::new(env.get_connection_info()).whitelist_a_torrent(&info_hash).await; + let response = Client::new(env.get_connection_info()) + .whitelist_a_torrent(&info_hash, Some(headers_with_request_id(request_id))) + .await; assert_ok(response).await; assert!( @@ -45,10 +49,18 @@ async fn should_allow_whitelisting_a_torrent_that_has_been_already_whitelisted() let api_client = Client::new(env.get_connection_info()); - let response = api_client.whitelist_a_torrent(&info_hash).await; + let request_id = Uuid::new_v4(); + + let response = api_client + .whitelist_a_torrent(&info_hash, Some(headers_with_request_id(request_id))) + .await; assert_ok(response).await; - let response = api_client.whitelist_a_torrent(&info_hash).await; + let request_id = Uuid::new_v4(); + + let response = api_client + .whitelist_a_torrent(&info_hash, Some(headers_with_request_id(request_id))) + .await; assert_ok(response).await; env.stop().await; @@ -62,18 +74,32 @@ async fn should_not_allow_whitelisting_a_torrent_for_unauthenticated_users() { let info_hash = "9e0217d0fa71c87332cd8bf9dbeabcb2c2cf3c4d".to_owned(); + let request_id = Uuid::new_v4(); + let response = Client::new(connection_with_invalid_token(env.get_connection_info().bind_address.as_str())) - .whitelist_a_torrent(&info_hash) + .whitelist_a_torrent(&info_hash, Some(headers_with_request_id(request_id))) .await; assert_token_not_valid(response).await; + assert!( + logs_contains_a_line_with(&["ERROR", "API", &format!("{request_id}")]), + "Expected logs to contain: ERROR ... API ... request_id={request_id}" + ); + + let request_id = Uuid::new_v4(); + let response = Client::new(connection_with_no_token(env.get_connection_info().bind_address.as_str())) - .whitelist_a_torrent(&info_hash) + .whitelist_a_torrent(&info_hash, Some(headers_with_request_id(request_id))) .await; assert_unauthorized(response).await; + assert!( + logs_contains_a_line_with(&["ERROR", "API", &format!("{request_id}")]), + "Expected logs to contain: ERROR ... API ... request_id={request_id}" + ); + env.stop().await; } @@ -87,10 +113,19 @@ async fn should_fail_when_the_torrent_cannot_be_whitelisted() { force_database_error(&env.tracker); - let response = Client::new(env.get_connection_info()).whitelist_a_torrent(&info_hash).await; + let request_id = Uuid::new_v4(); + + let response = Client::new(env.get_connection_info()) + .whitelist_a_torrent(&info_hash, Some(headers_with_request_id(request_id))) + .await; assert_failed_to_whitelist_torrent(response).await; + assert!( + logs_contains_a_line_with(&["ERROR", "API", &format!("{request_id}")]), + "Expected logs to contain: ERROR ... API ... request_id={request_id}" + ); + env.stop().await; } @@ -100,17 +135,21 @@ async fn should_fail_whitelisting_a_torrent_when_the_provided_infohash_is_invali let env = Started::new(&configuration::ephemeral().into()).await; + let request_id = Uuid::new_v4(); + for invalid_infohash in &invalid_infohashes_returning_bad_request() { let response = Client::new(env.get_connection_info()) - .whitelist_a_torrent(invalid_infohash) + .whitelist_a_torrent(invalid_infohash, Some(headers_with_request_id(request_id))) .await; assert_invalid_infohash_param(response, invalid_infohash).await; } + let request_id = Uuid::new_v4(); + for invalid_infohash in &invalid_infohashes_returning_not_found() { let response = Client::new(env.get_connection_info()) - .whitelist_a_torrent(invalid_infohash) + .whitelist_a_torrent(invalid_infohash, Some(headers_with_request_id(request_id))) .await; assert_not_found(response).await; @@ -127,10 +166,13 @@ async fn should_allow_removing_a_torrent_from_the_whitelist() { let hash = "9e0217d0fa71c87332cd8bf9dbeabcb2c2cf3c4d".to_owned(); let info_hash = InfoHash::from_str(&hash).unwrap(); + env.tracker.add_torrent_to_whitelist(&info_hash).await.unwrap(); + let request_id = Uuid::new_v4(); + let response = Client::new(env.get_connection_info()) - .remove_torrent_from_whitelist(&hash) + .remove_torrent_from_whitelist(&hash, Some(headers_with_request_id(request_id))) .await; assert_ok(response).await; @@ -147,8 +189,10 @@ async fn should_not_fail_trying_to_remove_a_non_whitelisted_torrent_from_the_whi let non_whitelisted_torrent_hash = "9e0217d0fa71c87332cd8bf9dbeabcb2c2cf3c4d".to_owned(); + let request_id = Uuid::new_v4(); + let response = Client::new(env.get_connection_info()) - .remove_torrent_from_whitelist(&non_whitelisted_torrent_hash) + .remove_torrent_from_whitelist(&non_whitelisted_torrent_hash, Some(headers_with_request_id(request_id))) .await; assert_ok(response).await; @@ -163,16 +207,20 @@ async fn should_fail_removing_a_torrent_from_the_whitelist_when_the_provided_inf let env = Started::new(&configuration::ephemeral().into()).await; for invalid_infohash in &invalid_infohashes_returning_bad_request() { + let request_id = Uuid::new_v4(); + let response = Client::new(env.get_connection_info()) - .remove_torrent_from_whitelist(invalid_infohash) + .remove_torrent_from_whitelist(invalid_infohash, Some(headers_with_request_id(request_id))) .await; assert_invalid_infohash_param(response, invalid_infohash).await; } for invalid_infohash in &invalid_infohashes_returning_not_found() { + let request_id = Uuid::new_v4(); + let response = Client::new(env.get_connection_info()) - .remove_torrent_from_whitelist(invalid_infohash) + .remove_torrent_from_whitelist(invalid_infohash, Some(headers_with_request_id(request_id))) .await; assert_not_found(response).await; @@ -193,12 +241,19 @@ async fn should_fail_when_the_torrent_cannot_be_removed_from_the_whitelist() { force_database_error(&env.tracker); + let request_id = Uuid::new_v4(); + let response = Client::new(env.get_connection_info()) - .remove_torrent_from_whitelist(&hash) + .remove_torrent_from_whitelist(&hash, Some(headers_with_request_id(request_id))) .await; assert_failed_to_remove_torrent_from_whitelist(response).await; + assert!( + logs_contains_a_line_with(&["ERROR", "API", &format!("{request_id}")]), + "Expected logs to contain: ERROR ... API ... request_id={request_id}" + ); + env.stop().await; } @@ -212,19 +267,35 @@ async fn should_not_allow_removing_a_torrent_from_the_whitelist_for_unauthentica let info_hash = InfoHash::from_str(&hash).unwrap(); env.tracker.add_torrent_to_whitelist(&info_hash).await.unwrap(); + + let request_id = Uuid::new_v4(); + let response = Client::new(connection_with_invalid_token(env.get_connection_info().bind_address.as_str())) - .remove_torrent_from_whitelist(&hash) + .remove_torrent_from_whitelist(&hash, Some(headers_with_request_id(request_id))) .await; assert_token_not_valid(response).await; + assert!( + logs_contains_a_line_with(&["ERROR", "API", &format!("{request_id}")]), + "Expected logs to contain: ERROR ... API ... request_id={request_id}" + ); + env.tracker.add_torrent_to_whitelist(&info_hash).await.unwrap(); + + let request_id = Uuid::new_v4(); + let response = Client::new(connection_with_no_token(env.get_connection_info().bind_address.as_str())) - .remove_torrent_from_whitelist(&hash) + .remove_torrent_from_whitelist(&hash, Some(headers_with_request_id(request_id))) .await; assert_unauthorized(response).await; + assert!( + logs_contains_a_line_with(&["ERROR", "API", &format!("{request_id}")]), + "Expected logs to contain: ERROR ... API ... request_id={request_id}" + ); + env.stop().await; } @@ -238,7 +309,11 @@ async fn should_allow_reload_the_whitelist_from_the_database() { let info_hash = InfoHash::from_str(&hash).unwrap(); env.tracker.add_torrent_to_whitelist(&info_hash).await.unwrap(); - let response = Client::new(env.get_connection_info()).reload_whitelist().await; + let request_id = Uuid::new_v4(); + + let response = Client::new(env.get_connection_info()) + .reload_whitelist(Some(headers_with_request_id(request_id))) + .await; assert_ok(response).await; /* todo: this assert fails because the whitelist has not been reloaded yet. @@ -267,9 +342,18 @@ async fn should_fail_when_the_whitelist_cannot_be_reloaded_from_the_database() { force_database_error(&env.tracker); - let response = Client::new(env.get_connection_info()).reload_whitelist().await; + let request_id = Uuid::new_v4(); + + let response = Client::new(env.get_connection_info()) + .reload_whitelist(Some(headers_with_request_id(request_id))) + .await; assert_failed_to_reload_whitelist(response).await; + assert!( + logs_contains_a_line_with(&["ERROR", "API", &format!("{request_id}")]), + "Expected logs to contain: ERROR ... API ... request_id={request_id}" + ); + env.stop().await; } From 03243cbd132ebe3d1d195b14ad528e7f48fff7d3 Mon Sep 17 00:00:00 2001 From: Jose Celano Date: Thu, 26 Dec 2024 12:12:22 +0000 Subject: [PATCH 061/802] tests: add assertions for HTTP tracker error logs It's using the info-hash to find the ERROR in logs. IT's generated a newly random info-hash for each tests. It could have been also used a `x-request-id` header in the HTTP request but this solution is simpler and the chances to have an info-hash collision is very low. --- tests/common/fixtures.rs | 10 ++++++++++ tests/servers/http/v1/contract.rs | 28 +++++++++++++++++++++++----- 2 files changed, 33 insertions(+), 5 deletions(-) diff --git a/tests/common/fixtures.rs b/tests/common/fixtures.rs index bbdebff76..562ed1544 100644 --- a/tests/common/fixtures.rs +++ b/tests/common/fixtures.rs @@ -1,3 +1,5 @@ +use bittorrent_primitives::info_hash::InfoHash; + #[allow(dead_code)] pub fn invalid_info_hashes() -> Vec { [ @@ -10,3 +12,11 @@ pub fn invalid_info_hashes() -> Vec { ] .to_vec() } + +/// Returns a random info hash. +pub fn random_info_hash() -> InfoHash { + let mut rng = rand::thread_rng(); + let random_bytes: [u8; 20] = rand::Rng::gen(&mut rng); + + InfoHash::from_bytes(&random_bytes) +} diff --git a/tests/servers/http/v1/contract.rs b/tests/servers/http/v1/contract.rs index 632f38bf4..83ebb9ae3 100644 --- a/tests/servers/http/v1/contract.rs +++ b/tests/servers/http/v1/contract.rs @@ -1217,8 +1217,10 @@ mod configured_as_whitelisted { use bittorrent_primitives::info_hash::InfoHash; use torrust_tracker_test_helpers::configuration; + use uuid::Uuid; - use crate::common::logging::{self}; + use crate::common::fixtures::random_info_hash; + use crate::common::logging::{self, logs_contains_a_line_with}; use crate::servers::http::asserts::{assert_is_announce_response, assert_torrent_not_in_whitelist_error_response}; use crate::servers::http::client::Client; use crate::servers::http::requests::announce::QueryBuilder; @@ -1230,14 +1232,24 @@ mod configured_as_whitelisted { let env = Started::new(&configuration::ephemeral_listed().into()).await; - let info_hash = InfoHash::from_str("9c38422213e30bff212b30c360d26f9a02136422").unwrap(); + let request_id = Uuid::new_v4(); + let info_hash = random_info_hash(); let response = Client::new(*env.bind_address()) - .announce(&QueryBuilder::default().with_info_hash(&info_hash).query()) + .announce_with_header( + &QueryBuilder::default().with_info_hash(&info_hash).query(), + "x-request-id", + &request_id.to_string(), + ) .await; assert_torrent_not_in_whitelist_error_response(response).await; + assert!( + logs_contains_a_line_with(&["ERROR", &format!("{info_hash}"), "is not whitelisted"]), + "Expected logs to contain: ERROR ... {info_hash} is not whitelisted" + ); + env.stop().await; } @@ -1272,7 +1284,8 @@ mod configured_as_whitelisted { use torrust_tracker_primitives::peer::fixture::PeerBuilder; use torrust_tracker_test_helpers::configuration; - use crate::common::logging::{self}; + use crate::common::fixtures::random_info_hash; + use crate::common::logging::{self, logs_contains_a_line_with}; use crate::servers::http::asserts::assert_scrape_response; use crate::servers::http::client::Client; use crate::servers::http::responses::scrape::{File, ResponseBuilder}; @@ -1284,7 +1297,7 @@ mod configured_as_whitelisted { let env = Started::new(&configuration::ephemeral_listed().into()).await; - let info_hash = InfoHash::from_str("9c38422213e30bff212b30c360d26f9a02136422").unwrap(); + let info_hash = random_info_hash(); env.add_torrent_peer( &info_hash, @@ -1306,6 +1319,11 @@ mod configured_as_whitelisted { assert_scrape_response(response, &expected_scrape_response).await; + assert!( + logs_contains_a_line_with(&["ERROR", &format!("{info_hash}"), "is not whitelisted"]), + "Expected logs to contain: ERROR ... {info_hash} is not whitelisted" + ); + env.stop().await; } From 33e72bb04e14370e90ec1c421b3666488e72e19a Mon Sep 17 00:00:00 2001 From: Jose Celano Date: Thu, 26 Dec 2024 15:46:43 +0000 Subject: [PATCH 062/802] tests: add info-hash to announce req fixture --- tests/servers/udp/contract.rs | 36 +++++++++++++++++++++++++++-------- 1 file changed, 28 insertions(+), 8 deletions(-) diff --git a/tests/servers/udp/contract.rs b/tests/servers/udp/contract.rs index 86bb1d18c..ddccf4f9a 100644 --- a/tests/servers/udp/contract.rs +++ b/tests/servers/udp/contract.rs @@ -118,22 +118,30 @@ mod receiving_an_announce_request { use torrust_tracker_configuration::DEFAULT_TIMEOUT; use torrust_tracker_test_helpers::configuration; + use crate::common::fixtures::random_info_hash; use crate::common::logging; use crate::servers::udp::asserts::is_ipv4_announce_response; use crate::servers::udp::contract::send_connection_request; use crate::servers::udp::Started; - pub async fn assert_send_and_get_announce(tx_id: TransactionId, c_id: ConnectionId, client: &UdpTrackerClient) { - let response = send_and_get_announce(tx_id, c_id, client).await; + pub async fn assert_send_and_get_announce( + tx_id: TransactionId, + c_id: ConnectionId, + info_hash: bittorrent_primitives::info_hash::InfoHash, + client: &UdpTrackerClient, + ) { + let response = send_and_get_announce(tx_id, c_id, info_hash, client).await; assert!(is_ipv4_announce_response(&response)); } pub async fn send_and_get_announce( tx_id: TransactionId, c_id: ConnectionId, + info_hash: bittorrent_primitives::info_hash::InfoHash, client: &UdpTrackerClient, ) -> aquatic_udp_protocol::Response { - let announce_request = build_sample_announce_request(tx_id, c_id, client.client.socket.local_addr().unwrap().port()); + let announce_request = + build_sample_announce_request(tx_id, c_id, client.client.socket.local_addr().unwrap().port(), info_hash); match client.send(announce_request.into()).await { Ok(_) => (), @@ -146,12 +154,17 @@ mod receiving_an_announce_request { } } - fn build_sample_announce_request(tx_id: TransactionId, c_id: ConnectionId, port: u16) -> AnnounceRequest { + fn build_sample_announce_request( + tx_id: TransactionId, + c_id: ConnectionId, + port: u16, + info_hash: bittorrent_primitives::info_hash::InfoHash, + ) -> AnnounceRequest { AnnounceRequest { connection_id: ConnectionId(c_id.0), action_placeholder: AnnounceActionPlaceholder::default(), transaction_id: tx_id, - info_hash: InfoHash([0u8; 20]), + info_hash: InfoHash(info_hash.0), peer_id: PeerId([255u8; 20]), bytes_downloaded: NumberOfBytes(0i64.into()), bytes_uploaded: NumberOfBytes(0i64.into()), @@ -179,7 +192,9 @@ mod receiving_an_announce_request { let c_id = send_connection_request(tx_id, &client).await; - assert_send_and_get_announce(tx_id, c_id, &client).await; + let info_hash = random_info_hash(); + + assert_send_and_get_announce(tx_id, c_id, info_hash, &client).await; env.stop().await; } @@ -199,9 +214,11 @@ mod receiving_an_announce_request { let c_id = send_connection_request(tx_id, &client).await; + let info_hash = random_info_hash(); + for x in 0..1000 { tracing::info!("req no: {x}"); - assert_send_and_get_announce(tx_id, c_id, &client).await; + assert_send_and_get_announce(tx_id, c_id, info_hash, &client).await; } env.stop().await; @@ -224,9 +241,11 @@ mod receiving_an_announce_request { let invalid_connection_id = ConnectionId::new(0); // Zero is one of the not normal values. + let info_hash = random_info_hash(); + for x in 0..=10 { tracing::info!("req no: {x}"); - send_and_get_announce(tx_id, invalid_connection_id, &client).await; + send_and_get_announce(tx_id, invalid_connection_id, info_hash, &client).await; } // The twelfth request should be banned (timeout error) @@ -235,6 +254,7 @@ mod receiving_an_announce_request { tx_id, invalid_connection_id, client.client.socket.local_addr().unwrap().port(), + info_hash, ); match client.send(announce_request.into()).await { From 5f206f0ad806a93c0878b3a3218a4ef8bc80434a Mon Sep 17 00:00:00 2001 From: Jose Celano Date: Thu, 26 Dec 2024 16:14:02 +0000 Subject: [PATCH 063/802] feat: add more fields to UDP error response log ``` 2024-12-26T16:12:06.336340Z ERROR UDP TRACKER: response error error=cookie value is from future: 6831818432388564000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000, expected < 1735229527.33634 remote_addr=127.0.0.1:35550 local_addr=127.0.0.1:36599 request_id=ce34c229-82c6-4a18-a60c-5eea1cf55919 transaction_id=123 ``` Added custom log with fields: - remote_addr - request_id - transaction_id --- src/servers/udp/connection_cookie.rs | 2 +- src/servers/udp/handlers.rs | 33 +++++++++++++++++++++++++--- 2 files changed, 31 insertions(+), 4 deletions(-) diff --git a/src/servers/udp/connection_cookie.rs b/src/servers/udp/connection_cookie.rs index 50359033c..439be9da7 100644 --- a/src/servers/udp/connection_cookie.rs +++ b/src/servers/udp/connection_cookie.rs @@ -121,7 +121,7 @@ use std::ops::Range; /// # Panics /// /// It would panic if the range start is not smaller than it's end. -#[instrument(err)] +#[instrument] pub fn check(cookie: &Cookie, fingerprint: u64, valid_range: Range) -> Result { assert!(valid_range.start <= valid_range.end, "range start is larger than range end"); diff --git a/src/servers/udp/handlers.rs b/src/servers/udp/handlers.rs index 1f838cd68..1a9c164e2 100644 --- a/src/servers/udp/handlers.rs +++ b/src/servers/udp/handlers.rs @@ -22,7 +22,7 @@ use super::server::banning::BanService; use super::RawRequest; use crate::core::{statistics, PeersWanted, Tracker}; use crate::servers::udp::error::Error; -use crate::servers::udp::peer_builder; +use crate::servers::udp::{peer_builder, UDP_TRACKER_LOG_TARGET}; use crate::shared::bit_torrent::common::MAX_SCRAPE_TORRENTS; use crate::CurrentClock; @@ -61,7 +61,9 @@ pub(crate) async fn handle_packet( cookie_time_values: CookieTimeValues, ban_service: Arc>, ) -> Response { - tracing::Span::current().record("request_id", Uuid::new_v4().to_string()); + let request_id = Uuid::new_v4(); + + tracing::Span::current().record("request_id", request_id.to_string()); tracing::debug!("Handling Packets: {udp_request:?}"); let start_time = Instant::now(); @@ -84,6 +86,8 @@ pub(crate) async fn handle_packet( handle_error( udp_request.from, + local_addr, + request_id, tracker, cookie_time_values.valid_range.clone(), &e, @@ -92,7 +96,18 @@ pub(crate) async fn handle_packet( .await } }, - Err(e) => handle_error(udp_request.from, tracker, cookie_time_values.valid_range.clone(), &e, None).await, + Err(e) => { + handle_error( + udp_request.from, + local_addr, + request_id, + tracker, + cookie_time_values.valid_range.clone(), + &e, + None, + ) + .await + } }; let latency = start_time.elapsed(); @@ -344,6 +359,8 @@ pub async fn handle_scrape( #[instrument(fields(transaction_id), skip(tracker), ret(level = Level::TRACE))] async fn handle_error( remote_addr: SocketAddr, + local_addr: SocketAddr, + request_id: Uuid, tracker: &Tracker, cookie_valid_range: Range, e: &Error, @@ -351,6 +368,16 @@ async fn handle_error( ) -> Response { tracing::trace!("handle error"); + match transaction_id { + Some(transaction_id) => { + let transaction_id = transaction_id.0.to_string(); + tracing::error!(target: UDP_TRACKER_LOG_TARGET, error = %e, %remote_addr, %local_addr, %request_id, %transaction_id, "response error"); + } + None => { + tracing::error!(target: UDP_TRACKER_LOG_TARGET, error = %e, %remote_addr, %local_addr, %request_id, "response error"); + } + } + let e = if let Error::RequestParseError { request_parse_error } = e { match request_parse_error { RequestParseError::Sendable { From 71e7ef7888768f2a47493b1b7be13a6f989d1f8a Mon Sep 17 00:00:00 2001 From: Jose Celano Date: Thu, 26 Dec 2024 16:34:53 +0000 Subject: [PATCH 064/802] test: [#1164] assert logged error when connection ID is wrong in UDP tracker. Only for the tests that is currently showing logging errors. --- tests/common/fixtures.rs | 7 +++++++ tests/servers/udp/contract.rs | 19 +++++++++++++++---- 2 files changed, 22 insertions(+), 4 deletions(-) diff --git a/tests/common/fixtures.rs b/tests/common/fixtures.rs index 562ed1544..f96b03dd1 100644 --- a/tests/common/fixtures.rs +++ b/tests/common/fixtures.rs @@ -1,3 +1,4 @@ +use aquatic_udp_protocol::TransactionId; use bittorrent_primitives::info_hash::InfoHash; #[allow(dead_code)] @@ -20,3 +21,9 @@ pub fn random_info_hash() -> InfoHash { InfoHash::from_bytes(&random_bytes) } + +/// Returns a random transaction id. +pub fn random_transaction_id() -> TransactionId { + let random_value = rand::Rng::gen::(&mut rand::thread_rng()); + TransactionId::new(random_value) +} diff --git a/tests/servers/udp/contract.rs b/tests/servers/udp/contract.rs index ddccf4f9a..9618bef65 100644 --- a/tests/servers/udp/contract.rs +++ b/tests/servers/udp/contract.rs @@ -118,8 +118,8 @@ mod receiving_an_announce_request { use torrust_tracker_configuration::DEFAULT_TIMEOUT; use torrust_tracker_test_helpers::configuration; - use crate::common::fixtures::random_info_hash; - use crate::common::logging; + use crate::common::fixtures::{random_info_hash, random_transaction_id}; + use crate::common::logging::{self, logs_contains_a_line_with}; use crate::servers::udp::asserts::is_ipv4_announce_response; use crate::servers::udp::contract::send_connection_request; use crate::servers::udp::Started; @@ -235,8 +235,6 @@ mod receiving_an_announce_request { Err(err) => panic!("{err}"), }; - let tx_id = TransactionId::new(123); - // The eleven first requests should be fine let invalid_connection_id = ConnectionId::new(0); // Zero is one of the not normal values. @@ -245,11 +243,23 @@ mod receiving_an_announce_request { for x in 0..=10 { tracing::info!("req no: {x}"); + + let tx_id = random_transaction_id(); + send_and_get_announce(tx_id, invalid_connection_id, info_hash, &client).await; + + let transaction_id = tx_id.0.to_string(); + + assert!( + logs_contains_a_line_with(&["ERROR", "UDP TRACKER", &transaction_id.to_string()]), + "Expected logs to contain: ERROR ... UDP TRACKER ... transaction_id={transaction_id}" + ); } // The twelfth request should be banned (timeout error) + let tx_id = random_transaction_id(); + let announce_request = build_sample_announce_request( tx_id, invalid_connection_id, @@ -257,6 +267,7 @@ mod receiving_an_announce_request { info_hash, ); + // This should return a timeout error match client.send(announce_request.into()).await { Ok(_) => (), Err(err) => panic!("{err}"), From bfaf08b394e16fee64ad5165eb0f1e376b1c189e Mon Sep 17 00:00:00 2001 From: Jose Celano Date: Fri, 27 Dec 2024 09:07:04 +0000 Subject: [PATCH 065/802] refactor: [#1137] rename lib to torrust_tracker_lib to avoid collisions in docs: ``` cargo doc --no-deps --bins --examples --workspace --all-features ``` becuase the main binary and lib have the same name. --- Cargo.toml | 3 +++ src/bin/e2e_tests_runner.rs | 2 +- src/bin/profiling.rs | 2 +- src/core/auth.rs | 6 +++--- src/core/databases/driver.rs | 8 ++++---- src/main.rs | 2 +- src/servers/http/v1/query.rs | 8 ++++---- src/servers/http/v1/requests/announce.rs | 2 +- src/servers/http/v1/responses/announce.rs | 4 ++-- src/servers/http/v1/responses/error.rs | 2 +- src/servers/http/v1/responses/scrape.rs | 4 ++-- src/servers/http/v1/services/peer_ip_resolver.rs | 4 ++-- tests/common/logging.rs | 2 +- tests/servers/api/environment.rs | 10 +++++----- tests/servers/api/mod.rs | 4 ++-- tests/servers/api/v1/asserts.rs | 6 +++--- tests/servers/api/v1/contract/context/auth_key.rs | 4 ++-- .../servers/api/v1/contract/context/health_check.rs | 2 +- tests/servers/api/v1/contract/context/stats.rs | 2 +- tests/servers/api/v1/contract/context/torrent.rs | 4 ++-- tests/servers/health_check_api/contract.rs | 10 +++++----- tests/servers/health_check_api/environment.rs | 8 ++++---- tests/servers/http/client.rs | 2 +- tests/servers/http/connection_info.rs | 2 +- tests/servers/http/environment.rs | 10 +++++----- tests/servers/http/mod.rs | 2 +- tests/servers/http/v1/contract.rs | 6 +++--- tests/servers/udp/contract.rs | 2 +- tests/servers/udp/environment.rs | 12 ++++++------ tests/servers/udp/mod.rs | 2 +- 30 files changed, 70 insertions(+), 67 deletions(-) diff --git a/Cargo.toml b/Cargo.toml index 6832f17f2..f1ae96dad 100644 --- a/Cargo.toml +++ b/Cargo.toml @@ -15,6 +15,9 @@ repository.workspace = true rust-version.workspace = true version.workspace = true +[lib] +name = "torrust_tracker_lib" + [workspace.package] authors = ["Nautilus Cyberneering , Mick van Dijke "] categories = ["network-programming", "web-programming"] diff --git a/src/bin/e2e_tests_runner.rs b/src/bin/e2e_tests_runner.rs index eb91c0d86..5787799dc 100644 --- a/src/bin/e2e_tests_runner.rs +++ b/src/bin/e2e_tests_runner.rs @@ -1,5 +1,5 @@ //! Program to run E2E tests. -use torrust_tracker::console::ci::e2e; +use torrust_tracker_lib::console::ci::e2e; fn main() -> anyhow::Result<()> { e2e::runner::run() diff --git a/src/bin/profiling.rs b/src/bin/profiling.rs index bc1ac6526..aca6ab98d 100644 --- a/src/bin/profiling.rs +++ b/src/bin/profiling.rs @@ -1,6 +1,6 @@ //! This binary is used for profiling with [valgrind](https://valgrind.org/) //! and [kcachegrind](https://kcachegrind.github.io/). -use torrust_tracker::console::profiling::run; +use torrust_tracker_lib::console::profiling::run; #[tokio::main] async fn main() { diff --git a/src/core/auth.rs b/src/core/auth.rs index 7bbb25eca..c92a4723d 100644 --- a/src/core/auth.rs +++ b/src/core/auth.rs @@ -12,7 +12,7 @@ //! Keys are stored in this struct: //! //! ```rust,no_run -//! use torrust_tracker::core::auth::Key; +//! use torrust_tracker_lib::core::auth::Key; //! use torrust_tracker_primitives::DurationSinceUnixEpoch; //! //! pub struct ExpiringKey { @@ -26,7 +26,7 @@ //! You can generate a new key valid for `9999` seconds and `0` nanoseconds from the current time with the following: //! //! ```rust,no_run -//! use torrust_tracker::core::auth; +//! use torrust_tracker_lib::core::auth; //! use std::time::Duration; //! //! let expiring_key = auth::generate_key(Some(Duration::new(9999, 0))); @@ -197,7 +197,7 @@ impl Key { /// Error returned when a key cannot be parsed from a string. /// /// ```text -/// use torrust_tracker::core::auth::Key; +/// use torrust_tracker_lib::core::auth::Key; /// use std::str::FromStr; /// /// let key_string = "YZSl4lMZupRuOpSRC3krIKR5BPB14nrJ"; diff --git a/src/core/databases/driver.rs b/src/core/databases/driver.rs index 3cbab9473..b5cb797aa 100644 --- a/src/core/databases/driver.rs +++ b/src/core/databases/driver.rs @@ -30,8 +30,8 @@ pub enum Driver { /// Example for `SQLite3`: /// /// ```text -/// use torrust_tracker::core::databases; -/// use torrust_tracker::core::databases::driver::Driver; +/// use torrust_tracker_lib::core::databases; +/// use torrust_tracker_lib::core::databases::driver::Driver; /// /// let db_driver = Driver::Sqlite3; /// let db_path = "./storage/tracker/lib/database/sqlite3.db".to_string(); @@ -41,8 +41,8 @@ pub enum Driver { /// Example for `MySQL`: /// /// ```text -/// use torrust_tracker::core::databases; -/// use torrust_tracker::core::databases::driver::Driver; +/// use torrust_tracker_lib::core::databases; +/// use torrust_tracker_lib::core::databases::driver::Driver; /// /// let db_driver = Driver::MySQL; /// let db_path = "mysql://db_user:db_user_secret_password@mysql:3306/torrust_tracker".to_string(); diff --git a/src/main.rs b/src/main.rs index e0b7bc4ab..0e2bcfbc9 100644 --- a/src/main.rs +++ b/src/main.rs @@ -1,4 +1,4 @@ -use torrust_tracker::{app, bootstrap}; +use torrust_tracker_lib::{app, bootstrap}; #[tokio::main] async fn main() { diff --git a/src/servers/http/v1/query.rs b/src/servers/http/v1/query.rs index abaf89845..e65f62ada 100644 --- a/src/servers/http/v1/query.rs +++ b/src/servers/http/v1/query.rs @@ -31,7 +31,7 @@ impl Query { /// input `name` exists. For example: /// /// ```text - /// use torrust_tracker::servers::http::v1::query::Query; + /// use torrust_tracker_lib::servers::http::v1::query::Query; /// /// let raw_query = "param1=value1¶m2=value2"; /// @@ -44,7 +44,7 @@ impl Query { /// It returns only the first param value even if it has multiple values: /// /// ```text - /// use torrust_tracker::servers::http::v1::query::Query; + /// use torrust_tracker_lib::servers::http::v1::query::Query; /// /// let raw_query = "param1=value1¶m1=value2"; /// @@ -60,7 +60,7 @@ impl Query { /// Returns all the param values as a vector. /// /// ```text - /// use torrust_tracker::servers::http::v1::query::Query; + /// use torrust_tracker_lib::servers::http::v1::query::Query; /// /// let query = "param1=value1¶m1=value2".parse::().unwrap(); /// @@ -73,7 +73,7 @@ impl Query { /// Returns all the param values as a vector even if it has only one value. /// /// ```text - /// use torrust_tracker::servers::http::v1::query::Query; + /// use torrust_tracker_lib::servers::http::v1::query::Query; /// /// let query = "param1=value1".parse::().unwrap(); /// diff --git a/src/servers/http/v1/requests/announce.rs b/src/servers/http/v1/requests/announce.rs index 954d62c82..e8a730e9c 100644 --- a/src/servers/http/v1/requests/announce.rs +++ b/src/servers/http/v1/requests/announce.rs @@ -31,7 +31,7 @@ const NUMWANT: &str = "numwant"; /// /// ```text /// use aquatic_udp_protocol::{NumberOfBytes, PeerId}; -/// use torrust_tracker::servers::http::v1::requests::announce::{Announce, Compact, Event}; +/// use torrust_tracker_lib::servers::http::v1::requests::announce::{Announce, Compact, Event}; /// use bittorrent_primitives::info_hash::InfoHash; /// /// let request = Announce { diff --git a/src/servers/http/v1/responses/announce.rs b/src/servers/http/v1/responses/announce.rs index bc63aa7fd..925c0893e 100644 --- a/src/servers/http/v1/responses/announce.rs +++ b/src/servers/http/v1/responses/announce.rs @@ -154,7 +154,7 @@ impl Into> for Compact { /// /// ```text /// use std::net::{IpAddr, Ipv4Addr}; -/// use torrust_tracker::servers::http::v1::responses::announce::{Normal, NormalPeer}; +/// use torrust_tracker_lib::servers::http::v1::responses::announce::{Normal, NormalPeer}; /// /// let peer = NormalPeer { /// peer_id: *b"-qB00000000000000001", @@ -206,7 +206,7 @@ impl From<&NormalPeer> for BencodeMut<'_> { /// /// ```text /// use std::net::{IpAddr, Ipv4Addr}; -/// use torrust_tracker::servers::http::v1::responses::announce::{Compact, CompactPeer, CompactPeerData}; +/// use torrust_tracker_lib::servers::http::v1::responses::announce::{Compact, CompactPeer, CompactPeerData}; /// /// let peer = CompactPeer::V4(CompactPeerData { /// ip: Ipv4Addr::new(0x69, 0x69, 0x69, 0x69), // 105.105.105.105 diff --git a/src/servers/http/v1/responses/error.rs b/src/servers/http/v1/responses/error.rs index 8572d861d..7223063fd 100644 --- a/src/servers/http/v1/responses/error.rs +++ b/src/servers/http/v1/responses/error.rs @@ -27,7 +27,7 @@ impl Error { /// Returns the bencoded representation of the `Error` struct. /// /// ```text - /// use torrust_tracker::servers::http::v1::responses::error::Error; + /// use torrust_tracker_lib::servers::http::v1::responses::error::Error; /// /// let err = Error { /// failure_reason: "error message".to_owned(), diff --git a/src/servers/http/v1/responses/scrape.rs b/src/servers/http/v1/responses/scrape.rs index 878311ce7..1f367a9c9 100644 --- a/src/servers/http/v1/responses/scrape.rs +++ b/src/servers/http/v1/responses/scrape.rs @@ -12,10 +12,10 @@ use crate::core::ScrapeData; /// The `Scrape` response for the HTTP tracker. /// /// ```text -/// use torrust_tracker::servers::http::v1::responses::scrape::Bencoded; +/// use torrust_tracker_lib::servers::http::v1::responses::scrape::Bencoded; /// use bittorrent_primitives::info_hash::InfoHash; /// use torrust_tracker_primitives::swarm_metadata::SwarmMetadata; -/// use torrust_tracker::core::ScrapeData; +/// use torrust_tracker_lib::core::ScrapeData; /// /// let info_hash = InfoHash::from_bytes(&[0x69; 20]); /// let mut scrape_data = ScrapeData::empty(); diff --git a/src/servers/http/v1/services/peer_ip_resolver.rs b/src/servers/http/v1/services/peer_ip_resolver.rs index 548a99756..56bd3d86f 100644 --- a/src/servers/http/v1/services/peer_ip_resolver.rs +++ b/src/servers/http/v1/services/peer_ip_resolver.rs @@ -63,7 +63,7 @@ pub enum PeerIpResolutionError { /// use std::net::IpAddr; /// use std::str::FromStr; /// -/// use torrust_tracker::servers::http::v1::services::peer_ip_resolver::{invoke, ClientIpSources, PeerIpResolutionError}; +/// use torrust_tracker_lib::servers::http::v1::services::peer_ip_resolver::{invoke, ClientIpSources, PeerIpResolutionError}; /// /// let on_reverse_proxy = true; /// @@ -85,7 +85,7 @@ pub enum PeerIpResolutionError { /// use std::net::IpAddr; /// use std::str::FromStr; /// -/// use torrust_tracker::servers::http::v1::services::peer_ip_resolver::{invoke, ClientIpSources, PeerIpResolutionError}; +/// use torrust_tracker_lib::servers::http::v1::services::peer_ip_resolver::{invoke, ClientIpSources, PeerIpResolutionError}; /// /// let on_reverse_proxy = false; /// diff --git a/tests/common/logging.rs b/tests/common/logging.rs index d2abc37b4..f04dcdc7d 100644 --- a/tests/common/logging.rs +++ b/tests/common/logging.rs @@ -3,7 +3,7 @@ use std::collections::VecDeque; use std::io; use std::sync::{Mutex, MutexGuard, Once, OnceLock}; -use torrust_tracker::bootstrap::logging::TraceStyle; +use torrust_tracker_lib::bootstrap::logging::TraceStyle; use tracing::level_filters::LevelFilter; use tracing_subscriber::fmt::MakeWriter; diff --git a/tests/servers/api/environment.rs b/tests/servers/api/environment.rs index bffe42603..f754e329f 100644 --- a/tests/servers/api/environment.rs +++ b/tests/servers/api/environment.rs @@ -3,12 +3,12 @@ use std::sync::Arc; use bittorrent_primitives::info_hash::InfoHash; use futures::executor::block_on; -use torrust_tracker::bootstrap::app::initialize_with_configuration; -use torrust_tracker::bootstrap::jobs::make_rust_tls; -use torrust_tracker::core::Tracker; -use torrust_tracker::servers::apis::server::{ApiServer, Launcher, Running, Stopped}; -use torrust_tracker::servers::registar::Registar; use torrust_tracker_configuration::{Configuration, HttpApi}; +use torrust_tracker_lib::bootstrap::app::initialize_with_configuration; +use torrust_tracker_lib::bootstrap::jobs::make_rust_tls; +use torrust_tracker_lib::core::Tracker; +use torrust_tracker_lib::servers::apis::server::{ApiServer, Launcher, Running, Stopped}; +use torrust_tracker_lib::servers::registar::Registar; use torrust_tracker_primitives::peer; use super::connection_info::ConnectionInfo; diff --git a/tests/servers/api/mod.rs b/tests/servers/api/mod.rs index 38df46e9b..278fd869d 100644 --- a/tests/servers/api/mod.rs +++ b/tests/servers/api/mod.rs @@ -1,7 +1,7 @@ use std::sync::Arc; -use torrust_tracker::core::Tracker; -use torrust_tracker::servers::apis::server; +use torrust_tracker_lib::core::Tracker; +use torrust_tracker_lib::servers::apis::server; pub mod connection_info; pub mod environment; diff --git a/tests/servers/api/v1/asserts.rs b/tests/servers/api/v1/asserts.rs index aeecfa170..f3d04d524 100644 --- a/tests/servers/api/v1/asserts.rs +++ b/tests/servers/api/v1/asserts.rs @@ -1,9 +1,9 @@ // code-review: should we use macros to return the exact line where the assert fails? use reqwest::Response; -use torrust_tracker::servers::apis::v1::context::auth_key::resources::AuthKey; -use torrust_tracker::servers::apis::v1::context::stats::resources::Stats; -use torrust_tracker::servers::apis::v1::context::torrent::resources::torrent::{ListItem, Torrent}; +use torrust_tracker_lib::servers::apis::v1::context::auth_key::resources::AuthKey; +use torrust_tracker_lib::servers::apis::v1::context::stats::resources::Stats; +use torrust_tracker_lib::servers::apis::v1::context::torrent::resources::torrent::{ListItem, Torrent}; // Resource responses diff --git a/tests/servers/api/v1/contract/context/auth_key.rs b/tests/servers/api/v1/contract/context/auth_key.rs index 8ef72230e..4dc039a9b 100644 --- a/tests/servers/api/v1/contract/context/auth_key.rs +++ b/tests/servers/api/v1/contract/context/auth_key.rs @@ -1,7 +1,7 @@ use std::time::Duration; use serde::Serialize; -use torrust_tracker::core::auth::Key; +use torrust_tracker_lib::core::auth::Key; use torrust_tracker_test_helpers::configuration; use uuid::Uuid; @@ -462,7 +462,7 @@ async fn should_not_allow_reloading_keys_for_unauthenticated_users() { mod deprecated_generate_key_endpoint { - use torrust_tracker::core::auth::Key; + use torrust_tracker_lib::core::auth::Key; use torrust_tracker_test_helpers::configuration; use uuid::Uuid; diff --git a/tests/servers/api/v1/contract/context/health_check.rs b/tests/servers/api/v1/contract/context/health_check.rs index fa6cfa094..32228575d 100644 --- a/tests/servers/api/v1/contract/context/health_check.rs +++ b/tests/servers/api/v1/contract/context/health_check.rs @@ -1,4 +1,4 @@ -use torrust_tracker::servers::apis::v1::context::health_check::resources::{Report, Status}; +use torrust_tracker_lib::servers::apis::v1::context::health_check::resources::{Report, Status}; use torrust_tracker_test_helpers::configuration; use crate::common::logging; diff --git a/tests/servers/api/v1/contract/context/stats.rs b/tests/servers/api/v1/contract/context/stats.rs index d49d03535..a81ad6f8c 100644 --- a/tests/servers/api/v1/contract/context/stats.rs +++ b/tests/servers/api/v1/contract/context/stats.rs @@ -1,7 +1,7 @@ use std::str::FromStr; use bittorrent_primitives::info_hash::InfoHash; -use torrust_tracker::servers::apis::v1::context::stats::resources::Stats; +use torrust_tracker_lib::servers::apis::v1::context::stats::resources::Stats; use torrust_tracker_primitives::peer::fixture::PeerBuilder; use torrust_tracker_test_helpers::configuration; use uuid::Uuid; diff --git a/tests/servers/api/v1/contract/context/torrent.rs b/tests/servers/api/v1/contract/context/torrent.rs index b741a1a65..6070eb4f4 100644 --- a/tests/servers/api/v1/contract/context/torrent.rs +++ b/tests/servers/api/v1/contract/context/torrent.rs @@ -1,8 +1,8 @@ use std::str::FromStr; use bittorrent_primitives::info_hash::InfoHash; -use torrust_tracker::servers::apis::v1::context::torrent::resources::peer::Peer; -use torrust_tracker::servers::apis::v1::context::torrent::resources::torrent::{self, Torrent}; +use torrust_tracker_lib::servers::apis::v1::context::torrent::resources::peer::Peer; +use torrust_tracker_lib::servers::apis::v1::context::torrent::resources::torrent::{self, Torrent}; use torrust_tracker_primitives::peer::fixture::PeerBuilder; use torrust_tracker_test_helpers::configuration; use uuid::Uuid; diff --git a/tests/servers/health_check_api/contract.rs b/tests/servers/health_check_api/contract.rs index 9c79c4a37..2c7efd547 100644 --- a/tests/servers/health_check_api/contract.rs +++ b/tests/servers/health_check_api/contract.rs @@ -1,5 +1,5 @@ -use torrust_tracker::servers::health_check_api::resources::{Report, Status}; -use torrust_tracker::servers::registar::Registar; +use torrust_tracker_lib::servers::health_check_api::resources::{Report, Status}; +use torrust_tracker_lib::servers::registar::Registar; use torrust_tracker_test_helpers::configuration; use crate::common::logging; @@ -32,7 +32,7 @@ async fn health_check_endpoint_should_return_status_ok_when_there_is_no_services mod api { use std::sync::Arc; - use torrust_tracker::servers::health_check_api::resources::{Report, Status}; + use torrust_tracker_lib::servers::health_check_api::resources::{Report, Status}; use torrust_tracker_test_helpers::configuration; use crate::common::logging; @@ -142,7 +142,7 @@ mod api { mod http { use std::sync::Arc; - use torrust_tracker::servers::health_check_api::resources::{Report, Status}; + use torrust_tracker_lib::servers::health_check_api::resources::{Report, Status}; use torrust_tracker_test_helpers::configuration; use crate::common::logging; @@ -251,7 +251,7 @@ mod http { mod udp { use std::sync::Arc; - use torrust_tracker::servers::health_check_api::resources::{Report, Status}; + use torrust_tracker_lib::servers::health_check_api::resources::{Report, Status}; use torrust_tracker_test_helpers::configuration; use crate::common::logging; diff --git a/tests/servers/health_check_api/environment.rs b/tests/servers/health_check_api/environment.rs index b101a54e7..17d87d666 100644 --- a/tests/servers/health_check_api/environment.rs +++ b/tests/servers/health_check_api/environment.rs @@ -3,11 +3,11 @@ use std::sync::Arc; use tokio::sync::oneshot::{self, Sender}; use tokio::task::JoinHandle; -use torrust_tracker::bootstrap::jobs::Started; -use torrust_tracker::servers::health_check_api::{server, HEALTH_CHECK_API_LOG_TARGET}; -use torrust_tracker::servers::registar::Registar; -use torrust_tracker::servers::signals::{self, Halted}; use torrust_tracker_configuration::HealthCheckApi; +use torrust_tracker_lib::bootstrap::jobs::Started; +use torrust_tracker_lib::servers::health_check_api::{server, HEALTH_CHECK_API_LOG_TARGET}; +use torrust_tracker_lib::servers::registar::Registar; +use torrust_tracker_lib::servers::signals::{self, Halted}; #[derive(Debug)] pub enum Error { diff --git a/tests/servers/http/client.rs b/tests/servers/http/client.rs index 288987c55..b64a616cd 100644 --- a/tests/servers/http/client.rs +++ b/tests/servers/http/client.rs @@ -1,7 +1,7 @@ use std::net::IpAddr; use reqwest::{Client as ReqwestClient, Response}; -use torrust_tracker::core::auth::Key; +use torrust_tracker_lib::core::auth::Key; use super::requests::announce::{self, Query}; use super::requests::scrape; diff --git a/tests/servers/http/connection_info.rs b/tests/servers/http/connection_info.rs index f4081d60e..123ac05f0 100644 --- a/tests/servers/http/connection_info.rs +++ b/tests/servers/http/connection_info.rs @@ -1,4 +1,4 @@ -use torrust_tracker::core::auth::Key; +use torrust_tracker_lib::core::auth::Key; #[derive(Clone, Debug)] pub struct ConnectionInfo { diff --git a/tests/servers/http/environment.rs b/tests/servers/http/environment.rs index 20b126c18..d615d7eaf 100644 --- a/tests/servers/http/environment.rs +++ b/tests/servers/http/environment.rs @@ -2,12 +2,12 @@ use std::sync::Arc; use bittorrent_primitives::info_hash::InfoHash; use futures::executor::block_on; -use torrust_tracker::bootstrap::app::initialize_with_configuration; -use torrust_tracker::bootstrap::jobs::make_rust_tls; -use torrust_tracker::core::Tracker; -use torrust_tracker::servers::http::server::{HttpServer, Launcher, Running, Stopped}; -use torrust_tracker::servers::registar::Registar; use torrust_tracker_configuration::{Configuration, HttpTracker}; +use torrust_tracker_lib::bootstrap::app::initialize_with_configuration; +use torrust_tracker_lib::bootstrap::jobs::make_rust_tls; +use torrust_tracker_lib::core::Tracker; +use torrust_tracker_lib::servers::http::server::{HttpServer, Launcher, Running, Stopped}; +use torrust_tracker_lib::servers::registar::Registar; use torrust_tracker_primitives::peer; pub struct Environment { diff --git a/tests/servers/http/mod.rs b/tests/servers/http/mod.rs index 65affc433..adcdcbf5e 100644 --- a/tests/servers/http/mod.rs +++ b/tests/servers/http/mod.rs @@ -8,7 +8,7 @@ pub mod v1; pub type Started = environment::Environment; use percent_encoding::NON_ALPHANUMERIC; -use torrust_tracker::servers::http::server; +use torrust_tracker_lib::servers::http::server; pub type ByteArray20 = [u8; 20]; diff --git a/tests/servers/http/v1/contract.rs b/tests/servers/http/v1/contract.rs index 83ebb9ae3..db03f526e 100644 --- a/tests/servers/http/v1/contract.rs +++ b/tests/servers/http/v1/contract.rs @@ -14,7 +14,7 @@ async fn environment_should_be_started_and_stopped() { mod for_all_config_modes { - use torrust_tracker::servers::http::v1::handlers::health_check::{Report, Status}; + use torrust_tracker_lib::servers::http::v1::handlers::health_check::{Report, Status}; use torrust_tracker_test_helpers::configuration; use crate::common::logging; @@ -1381,7 +1381,7 @@ mod configured_as_private { use std::time::Duration; use bittorrent_primitives::info_hash::InfoHash; - use torrust_tracker::core::auth::Key; + use torrust_tracker_lib::core::auth::Key; use torrust_tracker_test_helpers::configuration; use crate::common::logging; @@ -1467,7 +1467,7 @@ mod configured_as_private { use aquatic_udp_protocol::PeerId; use bittorrent_primitives::info_hash::InfoHash; - use torrust_tracker::core::auth::Key; + use torrust_tracker_lib::core::auth::Key; use torrust_tracker_primitives::peer::fixture::PeerBuilder; use torrust_tracker_test_helpers::configuration; diff --git a/tests/servers/udp/contract.rs b/tests/servers/udp/contract.rs index 9618bef65..de46b7c10 100644 --- a/tests/servers/udp/contract.rs +++ b/tests/servers/udp/contract.rs @@ -7,8 +7,8 @@ use core::panic; use aquatic_udp_protocol::{ConnectRequest, ConnectionId, Response, TransactionId}; use bittorrent_tracker_client::udp::client::UdpTrackerClient; -use torrust_tracker::shared::bit_torrent::tracker::udp::MAX_PACKET_SIZE; use torrust_tracker_configuration::DEFAULT_TIMEOUT; +use torrust_tracker_lib::shared::bit_torrent::tracker::udp::MAX_PACKET_SIZE; use torrust_tracker_test_helpers::configuration; use crate::common::logging; diff --git a/tests/servers/udp/environment.rs b/tests/servers/udp/environment.rs index acfb199f2..01639accc 100644 --- a/tests/servers/udp/environment.rs +++ b/tests/servers/udp/environment.rs @@ -2,13 +2,13 @@ use std::net::SocketAddr; use std::sync::Arc; use bittorrent_primitives::info_hash::InfoHash; -use torrust_tracker::bootstrap::app::initialize_with_configuration; -use torrust_tracker::core::Tracker; -use torrust_tracker::servers::registar::Registar; -use torrust_tracker::servers::udp::server::spawner::Spawner; -use torrust_tracker::servers::udp::server::states::{Running, Stopped}; -use torrust_tracker::servers::udp::server::Server; use torrust_tracker_configuration::{Configuration, UdpTracker, DEFAULT_TIMEOUT}; +use torrust_tracker_lib::bootstrap::app::initialize_with_configuration; +use torrust_tracker_lib::core::Tracker; +use torrust_tracker_lib::servers::registar::Registar; +use torrust_tracker_lib::servers::udp::server::spawner::Spawner; +use torrust_tracker_lib::servers::udp::server::states::{Running, Stopped}; +use torrust_tracker_lib::servers::udp::server::Server; use torrust_tracker_primitives::peer; pub struct Environment diff --git a/tests/servers/udp/mod.rs b/tests/servers/udp/mod.rs index 7eea8683f..4a89b667a 100644 --- a/tests/servers/udp/mod.rs +++ b/tests/servers/udp/mod.rs @@ -1,4 +1,4 @@ -use torrust_tracker::servers::udp::server::states::Running; +use torrust_tracker_lib::servers::udp::server::states::Running; pub mod asserts; pub mod contract; From 0100bfe1a2b3ff070e1da23fe94105e8102bce30 Mon Sep 17 00:00:00 2001 From: Jose Celano Date: Fri, 27 Dec 2024 09:50:39 +0000 Subject: [PATCH 066/802] chore(deps): update depencencies ``` cargo update Updating crates.io index Locking 7 packages to latest compatible versions Updating cc v1.2.5 -> v1.2.6 Updating quote v1.0.37 -> v1.0.38 Updating reqwest v0.12.9 -> v0.12.10 Updating rustversion v1.0.18 -> v1.0.19 Updating serde_with v3.11.0 -> v3.12.0 Updating serde_with_macros v3.11.0 -> v3.12.0 Updating syn v2.0.91 -> v2.0.92 ``` --- Cargo.lock | 103 +++++++++++++++++++++++++++-------------------------- 1 file changed, 52 insertions(+), 51 deletions(-) diff --git a/Cargo.lock b/Cargo.lock index 98875f48d..bf806101e 100644 --- a/Cargo.lock +++ b/Cargo.lock @@ -333,7 +333,7 @@ checksum = "721cae7de5c34fbb2acd27e21e6d2cf7b886dce0c27388d46c4e6c47ea4318dd" dependencies = [ "proc-macro2", "quote", - "syn 2.0.91", + "syn 2.0.92", ] [[package]] @@ -456,7 +456,7 @@ checksum = "57d123550fa8d071b7255cb0cc04dc302baa6c8c4a79f55701552684d8399bce" dependencies = [ "proc-macro2", "quote", - "syn 2.0.91", + "syn 2.0.92", ] [[package]] @@ -544,7 +544,7 @@ dependencies = [ "regex", "rustc-hash", "shlex", - "syn 2.0.91", + "syn 2.0.92", ] [[package]] @@ -679,7 +679,7 @@ dependencies = [ "proc-macro-crate", "proc-macro2", "quote", - "syn 2.0.91", + "syn 2.0.92", ] [[package]] @@ -790,9 +790,9 @@ dependencies = [ [[package]] name = "cc" -version = "1.2.5" +version = "1.2.6" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "c31a0499c1dc64f458ad13872de75c0eb7e3fdb0e67964610c914b034fc5956e" +checksum = "8d6dbb628b8f8555f86d0323c2eb39e3ec81901f4b83e091db8a6a76d316a333" dependencies = [ "jobserver", "libc", @@ -912,7 +912,7 @@ dependencies = [ "heck", "proc-macro2", "quote", - "syn 2.0.91", + "syn 2.0.92", ] [[package]] @@ -1133,7 +1133,7 @@ dependencies = [ "proc-macro2", "quote", "strsim", - "syn 2.0.91", + "syn 2.0.92", ] [[package]] @@ -1144,7 +1144,7 @@ checksum = "d336a2a514f6ccccaa3e09b02d41d35330c07ddf03a62165fcec10bb561c7806" dependencies = [ "darling_core", "quote", - "syn 2.0.91", + "syn 2.0.92", ] [[package]] @@ -1188,7 +1188,7 @@ checksum = "cb7330aeadfbe296029522e6c40f315320aba36fc43a5b3632f3795348f3bd22" dependencies = [ "proc-macro2", "quote", - "syn 2.0.91", + "syn 2.0.92", "unicode-xid", ] @@ -1200,7 +1200,7 @@ checksum = "65f152f4b8559c4da5d574bafc7af85454d706b4c5fe8b530d508cacbb6807ea" dependencies = [ "proc-macro2", "quote", - "syn 2.0.91", + "syn 2.0.92", ] [[package]] @@ -1221,7 +1221,7 @@ checksum = "97369cbbc041bc366949bc74d34658d6cda5621039731c6310521892a3a20ae0" dependencies = [ "proc-macro2", "quote", - "syn 2.0.91", + "syn 2.0.92", ] [[package]] @@ -1424,7 +1424,7 @@ checksum = "e99b8b3c28ae0e84b604c75f721c21dc77afb3706076af5e8216d15fd1deaae3" dependencies = [ "frunk_proc_macro_helpers", "quote", - "syn 2.0.91", + "syn 2.0.92", ] [[package]] @@ -1436,7 +1436,7 @@ dependencies = [ "frunk_core", "proc-macro2", "quote", - "syn 2.0.91", + "syn 2.0.92", ] [[package]] @@ -1448,7 +1448,7 @@ dependencies = [ "frunk_core", "frunk_proc_macro_helpers", "quote", - "syn 2.0.91", + "syn 2.0.92", ] [[package]] @@ -1526,7 +1526,7 @@ checksum = "162ee34ebcb7c64a8abebc059ce0fee27c2262618d7b60ed8faf72fef13c3650" dependencies = [ "proc-macro2", "quote", - "syn 2.0.91", + "syn 2.0.92", ] [[package]] @@ -1958,7 +1958,7 @@ checksum = "1ec89e9337638ecdc08744df490b221a7399bf8d164eb52a665454e60e075ad6" dependencies = [ "proc-macro2", "quote", - "syn 2.0.91", + "syn 2.0.92", ] [[package]] @@ -2278,7 +2278,7 @@ dependencies = [ "cfg-if", "proc-macro2", "quote", - "syn 2.0.91", + "syn 2.0.92", ] [[package]] @@ -2345,7 +2345,7 @@ dependencies = [ "proc-macro-error2", "proc-macro2", "quote", - "syn 2.0.91", + "syn 2.0.92", "termcolor", "thiserror 1.0.69", ] @@ -2544,7 +2544,7 @@ checksum = "a948666b637a0f465e8564c73e89d4dde00d72d4d473cc972f390fc3dcee7d9c" dependencies = [ "proc-macro2", "quote", - "syn 2.0.91", + "syn 2.0.92", ] [[package]] @@ -2620,7 +2620,7 @@ dependencies = [ "proc-macro2", "proc-macro2-diagnostics", "quote", - "syn 2.0.91", + "syn 2.0.92", ] [[package]] @@ -2694,7 +2694,7 @@ checksum = "3c0f5fad0874fc7abcd4d750e76917eaebbecaa2c20bde22e1dbeeba8beb758c" dependencies = [ "proc-macro2", "quote", - "syn 2.0.91", + "syn 2.0.92", ] [[package]] @@ -2844,7 +2844,7 @@ dependencies = [ "proc-macro-error-attr2", "proc-macro2", "quote", - "syn 2.0.91", + "syn 2.0.92", ] [[package]] @@ -2864,7 +2864,7 @@ checksum = "af066a9c399a26e020ada66a034357a868728e72cd426f3adcd35f80d88d88c8" dependencies = [ "proc-macro2", "quote", - "syn 2.0.91", + "syn 2.0.92", "version_check", "yansi", ] @@ -2902,9 +2902,9 @@ dependencies = [ [[package]] name = "quote" -version = "1.0.37" +version = "1.0.38" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "b5b9d34b8991d19d98081b46eacdd8eb58c6f2b201139f7c5f643cc155a633af" +checksum = "0e4dccaaaf89514f546c693ddc140f729f958c247918a13380cccc6078391acc" dependencies = [ "proc-macro2", ] @@ -3052,9 +3052,9 @@ dependencies = [ [[package]] name = "reqwest" -version = "0.12.9" +version = "0.12.10" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "a77c62af46e79de0a562e1a9849205ffcb7fc1238876e9bd743357570e04046f" +checksum = "3d3536321cfc54baa8cf3e273d5e1f63f889067829c4b410fcdbac8ca7b80994" dependencies = [ "base64 0.22.1", "bytes", @@ -3085,6 +3085,7 @@ dependencies = [ "system-configuration", "tokio", "tokio-native-tls", + "tower 0.5.2", "tower-service", "url", "wasm-bindgen", @@ -3173,7 +3174,7 @@ dependencies = [ "regex", "relative-path", "rustc_version", - "syn 2.0.91", + "syn 2.0.92", "unicode-ident", ] @@ -3282,9 +3283,9 @@ dependencies = [ [[package]] name = "rustversion" -version = "1.0.18" +version = "1.0.19" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "0e819f2bc632f285be6d7cd36e25940d45b2391dd6d9b939e79de557f7014248" +checksum = "f7c45b9784283f1b2e7fb61b42047c2fd678ef0960d4f6f1eba131594cc369d4" [[package]] name = "ryu" @@ -3402,7 +3403,7 @@ checksum = "46f859dbbf73865c6627ed570e78961cd3ac92407a2d117204c49232485da55e" dependencies = [ "proc-macro2", "quote", - "syn 2.0.91", + "syn 2.0.92", ] [[package]] @@ -3449,7 +3450,7 @@ checksum = "6c64451ba24fc7a6a2d60fc75dd9c83c90903b19028d4eff35e88fc1e86564e9" dependencies = [ "proc-macro2", "quote", - "syn 2.0.91", + "syn 2.0.92", ] [[package]] @@ -3475,9 +3476,9 @@ dependencies = [ [[package]] name = "serde_with" -version = "3.11.0" +version = "3.12.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "8e28bdad6db2b8340e449f7108f020b3b092e8583a9e3fb82713e1d4e71fe817" +checksum = "d6b6f7f2fcb69f747921f79f3926bd1e203fce4fef62c268dd3abfb6d86029aa" dependencies = [ "base64 0.22.1", "chrono", @@ -3493,14 +3494,14 @@ dependencies = [ [[package]] name = "serde_with_macros" -version = "3.11.0" +version = "3.12.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "9d846214a9854ef724f3da161b426242d8de7c1fc7de2f89bb1efcb154dca79d" +checksum = "8d00caa5193a3c8362ac2b73be6b9e768aa5a4b2f721d8f4b339600c3cb51f8e" dependencies = [ "darling", "proc-macro2", "quote", - "syn 2.0.91", + "syn 2.0.92", ] [[package]] @@ -3639,9 +3640,9 @@ dependencies = [ [[package]] name = "syn" -version = "2.0.91" +version = "2.0.92" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "d53cbcb5a243bd33b7858b1d7f4aca2153490815872d86d955d6ea29f743c035" +checksum = "70ae51629bf965c5c098cc9e87908a3df5301051a9e087d6f9bef5c9771ed126" dependencies = [ "proc-macro2", "quote", @@ -3665,7 +3666,7 @@ checksum = "c8af7666ab7b6390ab78131fb5b0fce11d6b7a6951602017c35fa82800708971" dependencies = [ "proc-macro2", "quote", - "syn 2.0.91", + "syn 2.0.92", ] [[package]] @@ -3766,7 +3767,7 @@ checksum = "4fee6c4efc90059e10f81e6d42c60a18f76588c3d74cb83a0b242a2b6c7504c1" dependencies = [ "proc-macro2", "quote", - "syn 2.0.91", + "syn 2.0.92", ] [[package]] @@ -3777,7 +3778,7 @@ checksum = "7b50fa271071aae2e6ee85f842e2e28ba8cd2c5fb67f11fcb1fd70b276f9e7d4" dependencies = [ "proc-macro2", "quote", - "syn 2.0.91", + "syn 2.0.92", ] [[package]] @@ -3881,7 +3882,7 @@ checksum = "693d596312e88961bc67d7f1f97af8a70227d9f90c31bba5806eec004978d752" dependencies = [ "proc-macro2", "quote", - "syn 2.0.91", + "syn 2.0.92", ] [[package]] @@ -4211,7 +4212,7 @@ checksum = "395ae124c09f9e6918a2310af6038fba074bcf474ac352496d5910dd59a2226d" dependencies = [ "proc-macro2", "quote", - "syn 2.0.91", + "syn 2.0.92", ] [[package]] @@ -4422,7 +4423,7 @@ dependencies = [ "log", "proc-macro2", "quote", - "syn 2.0.91", + "syn 2.0.92", "wasm-bindgen-shared", ] @@ -4457,7 +4458,7 @@ checksum = "30d7a95b763d3c45903ed6c81f156801839e5ee968bb07e534c44df0fcd330c2" dependencies = [ "proc-macro2", "quote", - "syn 2.0.91", + "syn 2.0.92", "wasm-bindgen-backend", "wasm-bindgen-shared", ] @@ -4686,7 +4687,7 @@ checksum = "2380878cad4ac9aac1e2435f3eb4020e8374b5f13c296cb75b4620ff8e229154" dependencies = [ "proc-macro2", "quote", - "syn 2.0.91", + "syn 2.0.92", "synstructure", ] @@ -4708,7 +4709,7 @@ checksum = "fa4f8080344d4671fb4e831a13ad1e68092748387dfc4f55e356242fae12ce3e" dependencies = [ "proc-macro2", "quote", - "syn 2.0.91", + "syn 2.0.92", ] [[package]] @@ -4728,7 +4729,7 @@ checksum = "595eed982f7d355beb85837f651fa22e90b3c044842dc7f2c2842c086f295808" dependencies = [ "proc-macro2", "quote", - "syn 2.0.91", + "syn 2.0.92", "synstructure", ] @@ -4757,7 +4758,7 @@ checksum = "6eafa6dfb17584ea3e2bd6e76e0cc15ad7af12b09abdd1ca55961bed9b1063c6" dependencies = [ "proc-macro2", "quote", - "syn 2.0.91", + "syn 2.0.92", ] [[package]] From 333794877233d3c409d49ef11a21716325fa64b8 Mon Sep 17 00:00:00 2001 From: Jose Celano Date: Tue, 7 Jan 2025 10:42:15 +0000 Subject: [PATCH 067/802] chore(deps): udpate dependencies ```output cargo update Updating crates.io index Locking 26 packages to latest compatible versions Removing async-trait v0.1.83 Updating axum v0.7.9 -> v0.8.1 Updating axum-client-ip v0.6.1 -> v0.7.0 Updating axum-core v0.4.5 -> v0.5.0 Updating axum-extra v0.9.6 -> v0.10.0 Updating axum-macros v0.4.2 -> v0.5.0 Updating btoi v0.4.3 -> v0.4.4 Updating cc v1.2.6 -> v1.2.7 Updating glob v0.3.1 -> v0.3.2 Updating matchit v0.7.3 -> v0.8.4 (available: v0.8.6) Removing multer v3.1.0 Updating phf v0.11.2 -> v0.11.3 Updating phf_codegen v0.11.2 -> v0.11.3 Updating phf_generator v0.11.2 -> v0.11.3 Updating phf_shared v0.11.2 -> v0.11.3 Updating pin-project v1.1.7 -> v1.1.8 Updating pin-project-internal v1.1.7 -> v1.1.8 Updating pin-project-lite v0.2.15 -> v0.2.16 Updating reqwest v0.12.10 -> v0.12.12 Updating rstest v0.23.0 -> v0.24.0 Updating rstest_macros v0.23.0 -> v0.24.0 Updating serde v1.0.216 -> v1.0.217 Updating serde_derive v1.0.216 -> v1.0.217 Updating serde_json v1.0.134 -> v1.0.135 Updating siphasher v0.3.11 -> v1.0.1 Updating syn v2.0.92 -> v2.0.95 Updating tempfile v3.14.0 -> v3.15.0 Updating winnow v0.6.20 -> v0.6.22 ``` --- Cargo.lock | 214 +++++++++++++++++++++++------------------------------ 1 file changed, 93 insertions(+), 121 deletions(-) diff --git a/Cargo.lock b/Cargo.lock index bf806101e..68e32ddbd 100644 --- a/Cargo.lock +++ b/Cargo.lock @@ -325,17 +325,6 @@ version = "4.7.1" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "8b75356056920673b02621b35afd0f7dda9306d03c79a30f5c56c44cf256e3de" -[[package]] -name = "async-trait" -version = "0.1.83" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "721cae7de5c34fbb2acd27e21e6d2cf7b886dce0c27388d46c4e6c47ea4318dd" -dependencies = [ - "proc-macro2", - "quote", - "syn 2.0.92", -] - [[package]] name = "atomic" version = "0.6.0" @@ -359,14 +348,14 @@ checksum = "ace50bade8e6234aa140d9a2f552bbee1db4d353f69b8217bc503490fc1a9f26" [[package]] name = "axum" -version = "0.7.9" +version = "0.8.1" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "edca88bc138befd0323b20752846e6587272d3b03b0343c8ea28a6f819e6e71f" +checksum = "6d6fd624c75e18b3b4c6b9caf42b1afe24437daaee904069137d8bab077be8b8" dependencies = [ - "async-trait", "axum-core", "axum-macros", "bytes", + "form_urlencoded", "futures-util", "http", "http-body", @@ -394,9 +383,9 @@ dependencies = [ [[package]] name = "axum-client-ip" -version = "0.6.1" +version = "0.7.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "9eefda7e2b27e1bda4d6fa8a06b50803b8793769045918bc37ad062d48a6efac" +checksum = "dff8ee1869817523c8f91c20bf17fd932707f66c2e7e0b0f811b29a227289562" dependencies = [ "axum", "forwarded-header-value", @@ -405,11 +394,10 @@ dependencies = [ [[package]] name = "axum-core" -version = "0.4.5" +version = "0.5.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "09f2bd6146b97ae3359fa0cc6d6b376d9539582c7b4220f041a33ec24c226199" +checksum = "df1362f362fd16024ae199c1970ce98f9661bf5ef94b9808fee734bc3698b733" dependencies = [ - "async-trait", "bytes", "futures-util", "http", @@ -426,23 +414,23 @@ dependencies = [ [[package]] name = "axum-extra" -version = "0.9.6" +version = "0.10.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "c794b30c904f0a1c2fb7740f7df7f7972dfaa14ef6f57cb6178dc63e5dca2f04" +checksum = "460fc6f625a1f7705c6cf62d0d070794e94668988b1c38111baeec177c715f7b" dependencies = [ "axum", "axum-core", "bytes", - "fastrand", + "form_urlencoded", "futures-util", "http", "http-body", "http-body-util", "mime", - "multer", "pin-project-lite", "serde", "serde_html_form", + "serde_path_to_error", "tower 0.5.2", "tower-layer", "tower-service", @@ -450,13 +438,13 @@ dependencies = [ [[package]] name = "axum-macros" -version = "0.4.2" +version = "0.5.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "57d123550fa8d071b7255cb0cc04dc302baa6c8c4a79f55701552684d8399bce" +checksum = "604fde5e028fea851ce1d8570bbdc034bec850d157f7569d10f347d06808c05c" dependencies = [ "proc-macro2", "quote", - "syn 2.0.92", + "syn 2.0.95", ] [[package]] @@ -544,7 +532,7 @@ dependencies = [ "regex", "rustc-hash", "shlex", - "syn 2.0.92", + "syn 2.0.95", ] [[package]] @@ -679,7 +667,7 @@ dependencies = [ "proc-macro-crate", "proc-macro2", "quote", - "syn 2.0.92", + "syn 2.0.95", ] [[package]] @@ -705,9 +693,9 @@ dependencies = [ [[package]] name = "btoi" -version = "0.4.3" +version = "0.4.4" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "9dd6407f73a9b8b6162d8a2ef999fe6afd7cc15902ebf42c5cd296addf17e0ad" +checksum = "9586aa4bb508d369941af10c87af0ce6f4ea051bb4f21047791b921c45822137" dependencies = [ "num-traits", ] @@ -790,9 +778,9 @@ dependencies = [ [[package]] name = "cc" -version = "1.2.6" +version = "1.2.7" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "8d6dbb628b8f8555f86d0323c2eb39e3ec81901f4b83e091db8a6a76d316a333" +checksum = "a012a0df96dd6d06ba9a1b29d6402d1a5d77c6befd2566afdc26e10603dc93d7" dependencies = [ "jobserver", "libc", @@ -912,7 +900,7 @@ dependencies = [ "heck", "proc-macro2", "quote", - "syn 2.0.92", + "syn 2.0.95", ] [[package]] @@ -1133,7 +1121,7 @@ dependencies = [ "proc-macro2", "quote", "strsim", - "syn 2.0.92", + "syn 2.0.95", ] [[package]] @@ -1144,7 +1132,7 @@ checksum = "d336a2a514f6ccccaa3e09b02d41d35330c07ddf03a62165fcec10bb561c7806" dependencies = [ "darling_core", "quote", - "syn 2.0.92", + "syn 2.0.95", ] [[package]] @@ -1188,7 +1176,7 @@ checksum = "cb7330aeadfbe296029522e6c40f315320aba36fc43a5b3632f3795348f3bd22" dependencies = [ "proc-macro2", "quote", - "syn 2.0.92", + "syn 2.0.95", "unicode-xid", ] @@ -1200,7 +1188,7 @@ checksum = "65f152f4b8559c4da5d574bafc7af85454d706b4c5fe8b530d508cacbb6807ea" dependencies = [ "proc-macro2", "quote", - "syn 2.0.92", + "syn 2.0.95", ] [[package]] @@ -1221,7 +1209,7 @@ checksum = "97369cbbc041bc366949bc74d34658d6cda5621039731c6310521892a3a20ae0" dependencies = [ "proc-macro2", "quote", - "syn 2.0.92", + "syn 2.0.95", ] [[package]] @@ -1424,7 +1412,7 @@ checksum = "e99b8b3c28ae0e84b604c75f721c21dc77afb3706076af5e8216d15fd1deaae3" dependencies = [ "frunk_proc_macro_helpers", "quote", - "syn 2.0.92", + "syn 2.0.95", ] [[package]] @@ -1436,7 +1424,7 @@ dependencies = [ "frunk_core", "proc-macro2", "quote", - "syn 2.0.92", + "syn 2.0.95", ] [[package]] @@ -1448,7 +1436,7 @@ dependencies = [ "frunk_core", "frunk_proc_macro_helpers", "quote", - "syn 2.0.92", + "syn 2.0.95", ] [[package]] @@ -1526,7 +1514,7 @@ checksum = "162ee34ebcb7c64a8abebc059ce0fee27c2262618d7b60ed8faf72fef13c3650" dependencies = [ "proc-macro2", "quote", - "syn 2.0.92", + "syn 2.0.95", ] [[package]] @@ -1594,9 +1582,9 @@ checksum = "07e28edb80900c19c28f1072f2e8aeca7fa06b23cd4169cefe1af5aa3260783f" [[package]] name = "glob" -version = "0.3.1" +version = "0.3.2" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "d2fabcfbdc87f4758337ca535fb41a6d701b65693ce38287d856d1674551ec9b" +checksum = "a8d1add55171497b4705a648c6b583acafb01d58050a51727785f0b2c8e0a2b2" [[package]] name = "gloo-timers" @@ -1958,7 +1946,7 @@ checksum = "1ec89e9337638ecdc08744df490b221a7399bf8d164eb52a665454e60e075ad6" dependencies = [ "proc-macro2", "quote", - "syn 2.0.92", + "syn 2.0.95", ] [[package]] @@ -2213,9 +2201,9 @@ dependencies = [ [[package]] name = "matchit" -version = "0.7.3" +version = "0.8.4" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "0e7465ac9959cc2b1404e8e2367b43684a6d13790fe23056cc8c6c5a6b7bcb94" +checksum = "47e1ffaa40ddd1f3ed91f717a33c8c0ee23fff369e3aa8772b9605cc1d22f4c3" [[package]] name = "memchr" @@ -2278,24 +2266,7 @@ dependencies = [ "cfg-if", "proc-macro2", "quote", - "syn 2.0.92", -] - -[[package]] -name = "multer" -version = "3.1.0" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "83e87776546dc87511aa5ee218730c92b666d7264ab6ed41f9d215af9cd5224b" -dependencies = [ - "bytes", - "encoding_rs", - "futures-util", - "http", - "httparse", - "memchr", - "mime", - "spin", - "version_check", + "syn 2.0.95", ] [[package]] @@ -2345,7 +2316,7 @@ dependencies = [ "proc-macro-error2", "proc-macro2", "quote", - "syn 2.0.92", + "syn 2.0.95", "termcolor", "thiserror 1.0.69", ] @@ -2544,7 +2515,7 @@ checksum = "a948666b637a0f465e8564c73e89d4dde00d72d4d473cc972f390fc3dcee7d9c" dependencies = [ "proc-macro2", "quote", - "syn 2.0.92", + "syn 2.0.95", ] [[package]] @@ -2620,7 +2591,7 @@ dependencies = [ "proc-macro2", "proc-macro2-diagnostics", "quote", - "syn 2.0.92", + "syn 2.0.95", ] [[package]] @@ -2641,18 +2612,18 @@ checksum = "e3148f5046208a5d56bcfc03053e3ca6334e51da8dfb19b6cdc8b306fae3283e" [[package]] name = "phf" -version = "0.11.2" +version = "0.11.3" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "ade2d8b8f33c7333b51bcf0428d37e217e9f32192ae4772156f65063b8ce03dc" +checksum = "1fd6780a80ae0c52cc120a26a1a42c1ae51b247a253e4e06113d23d2c2edd078" dependencies = [ "phf_shared", ] [[package]] name = "phf_codegen" -version = "0.11.2" +version = "0.11.3" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "e8d39688d359e6b34654d328e262234662d16cc0f60ec8dcbe5e718709342a5a" +checksum = "aef8048c789fa5e851558d709946d6d79a8ff88c0440c587967f8e94bfb1216a" dependencies = [ "phf_generator", "phf_shared", @@ -2660,9 +2631,9 @@ dependencies = [ [[package]] name = "phf_generator" -version = "0.11.2" +version = "0.11.3" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "48e4cc64c2ad9ebe670cb8fd69dd50ae301650392e81c05f9bfcb2d5bdbc24b0" +checksum = "3c80231409c20246a13fddb31776fb942c38553c51e871f8cbd687a4cfb5843d" dependencies = [ "phf_shared", "rand", @@ -2670,38 +2641,38 @@ dependencies = [ [[package]] name = "phf_shared" -version = "0.11.2" +version = "0.11.3" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "90fcb95eef784c2ac79119d1dd819e162b5da872ce6f3c3abe1e8ca1c082f72b" +checksum = "67eabc2ef2a60eb7faa00097bd1ffdb5bd28e62bf39990626a582201b7a754e5" dependencies = [ "siphasher", ] [[package]] name = "pin-project" -version = "1.1.7" +version = "1.1.8" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "be57f64e946e500c8ee36ef6331845d40a93055567ec57e8fae13efd33759b95" +checksum = "1e2ec53ad785f4d35dac0adea7f7dc6f1bb277ad84a680c7afefeae05d1f5916" dependencies = [ "pin-project-internal", ] [[package]] name = "pin-project-internal" -version = "1.1.7" +version = "1.1.8" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "3c0f5fad0874fc7abcd4d750e76917eaebbecaa2c20bde22e1dbeeba8beb758c" +checksum = "d56a66c0c55993aa927429d0f8a0abfd74f084e4d9c192cffed01e418d83eefb" dependencies = [ "proc-macro2", "quote", - "syn 2.0.92", + "syn 2.0.95", ] [[package]] name = "pin-project-lite" -version = "0.2.15" +version = "0.2.16" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "915a1e146535de9163f3987b8944ed8cf49a18bb0056bcebcdcece385cece4ff" +checksum = "3b3cff922bd51709b605d9ead9aa71031d81447142d828eb4a6eba76fe619f9b" [[package]] name = "pin-utils" @@ -2844,7 +2815,7 @@ dependencies = [ "proc-macro-error-attr2", "proc-macro2", "quote", - "syn 2.0.92", + "syn 2.0.95", ] [[package]] @@ -2864,7 +2835,7 @@ checksum = "af066a9c399a26e020ada66a034357a868728e72cd426f3adcd35f80d88d88c8" dependencies = [ "proc-macro2", "quote", - "syn 2.0.92", + "syn 2.0.95", "version_check", "yansi", ] @@ -3052,9 +3023,9 @@ dependencies = [ [[package]] name = "reqwest" -version = "0.12.10" +version = "0.12.12" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "3d3536321cfc54baa8cf3e273d5e1f63f889067829c4b410fcdbac8ca7b80994" +checksum = "43e734407157c3c2034e0258f5e4473ddb361b1e85f95a66690d67264d7cd1da" dependencies = [ "base64 0.22.1", "bytes", @@ -3150,21 +3121,21 @@ dependencies = [ [[package]] name = "rstest" -version = "0.23.0" +version = "0.24.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "0a2c585be59b6b5dd66a9d2084aa1d8bd52fbdb806eafdeffb52791147862035" +checksum = "03e905296805ab93e13c1ec3a03f4b6c4f35e9498a3d5fa96dc626d22c03cd89" dependencies = [ - "futures", "futures-timer", + "futures-util", "rstest_macros", "rustc_version", ] [[package]] name = "rstest_macros" -version = "0.23.0" +version = "0.24.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "825ea780781b15345a146be27eaefb05085e337e869bff01b4306a4fd4a9ad5a" +checksum = "ef0053bbffce09062bee4bcc499b0fbe7a57b879f1efe088d6d8d4c7adcdef9b" dependencies = [ "cfg-if", "glob", @@ -3174,7 +3145,7 @@ dependencies = [ "regex", "relative-path", "rustc_version", - "syn 2.0.92", + "syn 2.0.95", "unicode-ident", ] @@ -3369,9 +3340,9 @@ checksum = "3cb6eb87a131f756572d7fb904f6e7b68633f09cca868c5df1c4b8d1a694bbba" [[package]] name = "serde" -version = "1.0.216" +version = "1.0.217" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "0b9781016e935a97e8beecf0c933758c97a5520d32930e460142b4cd80c6338e" +checksum = "02fc4265df13d6fa1d00ecff087228cc0a2b5f3c0e87e258d8b94a156e984c70" dependencies = [ "serde_derive", ] @@ -3397,13 +3368,13 @@ dependencies = [ [[package]] name = "serde_derive" -version = "1.0.216" +version = "1.0.217" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "46f859dbbf73865c6627ed570e78961cd3ac92407a2d117204c49232485da55e" +checksum = "5a9bf7cf98d04a2b28aead066b7496853d4779c9cc183c440dbac457641e19a0" dependencies = [ "proc-macro2", "quote", - "syn 2.0.92", + "syn 2.0.95", ] [[package]] @@ -3421,9 +3392,9 @@ dependencies = [ [[package]] name = "serde_json" -version = "1.0.134" +version = "1.0.135" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "d00f4175c42ee48b15416f6193a959ba3a0d67fc699a0db9ad12df9f83991c7d" +checksum = "2b0d7ba2887406110130a978386c4e1befb98c674b4fba677954e4db976630d9" dependencies = [ "indexmap 2.7.0", "itoa", @@ -3450,7 +3421,7 @@ checksum = "6c64451ba24fc7a6a2d60fc75dd9c83c90903b19028d4eff35e88fc1e86564e9" dependencies = [ "proc-macro2", "quote", - "syn 2.0.92", + "syn 2.0.95", ] [[package]] @@ -3501,7 +3472,7 @@ dependencies = [ "darling", "proc-macro2", "quote", - "syn 2.0.92", + "syn 2.0.95", ] [[package]] @@ -3558,9 +3529,9 @@ checksum = "e3a9fe34e3e7a50316060351f37187a3f546bce95496156754b601a5fa71b76e" [[package]] name = "siphasher" -version = "0.3.11" +version = "1.0.1" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "38b58827f4464d87d377d175e90bf58eb00fd8716ff0a62f80356b5e61555d0d" +checksum = "56199f7ddabf13fe5074ce809e7d3f42b42ae711800501b5b16ea82ad029c39d" [[package]] name = "slab" @@ -3640,9 +3611,9 @@ dependencies = [ [[package]] name = "syn" -version = "2.0.92" +version = "2.0.95" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "70ae51629bf965c5c098cc9e87908a3df5301051a9e087d6f9bef5c9771ed126" +checksum = "46f71c0377baf4ef1cc3e3402ded576dccc315800fbc62dfc7fe04b009773b4a" dependencies = [ "proc-macro2", "quote", @@ -3666,7 +3637,7 @@ checksum = "c8af7666ab7b6390ab78131fb5b0fce11d6b7a6951602017c35fa82800708971" dependencies = [ "proc-macro2", "quote", - "syn 2.0.92", + "syn 2.0.95", ] [[package]] @@ -3715,12 +3686,13 @@ dependencies = [ [[package]] name = "tempfile" -version = "3.14.0" +version = "3.15.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "28cce251fcbc87fac86a866eeb0d6c2d536fc16d06f184bb61aeae11aa4cee0c" +checksum = "9a8a559c81686f576e8cd0290cd2a24a2a9ad80c98b3478856500fcbd7acd704" dependencies = [ "cfg-if", "fastrand", + "getrandom", "once_cell", "rustix", "windows-sys 0.59.0", @@ -3767,7 +3739,7 @@ checksum = "4fee6c4efc90059e10f81e6d42c60a18f76588c3d74cb83a0b242a2b6c7504c1" dependencies = [ "proc-macro2", "quote", - "syn 2.0.92", + "syn 2.0.95", ] [[package]] @@ -3778,7 +3750,7 @@ checksum = "7b50fa271071aae2e6ee85f842e2e28ba8cd2c5fb67f11fcb1fd70b276f9e7d4" dependencies = [ "proc-macro2", "quote", - "syn 2.0.92", + "syn 2.0.95", ] [[package]] @@ -3882,7 +3854,7 @@ checksum = "693d596312e88961bc67d7f1f97af8a70227d9f90c31bba5806eec004978d752" dependencies = [ "proc-macro2", "quote", - "syn 2.0.92", + "syn 2.0.95", ] [[package]] @@ -4212,7 +4184,7 @@ checksum = "395ae124c09f9e6918a2310af6038fba074bcf474ac352496d5910dd59a2226d" dependencies = [ "proc-macro2", "quote", - "syn 2.0.92", + "syn 2.0.95", ] [[package]] @@ -4423,7 +4395,7 @@ dependencies = [ "log", "proc-macro2", "quote", - "syn 2.0.92", + "syn 2.0.95", "wasm-bindgen-shared", ] @@ -4458,7 +4430,7 @@ checksum = "30d7a95b763d3c45903ed6c81f156801839e5ee968bb07e534c44df0fcd330c2" dependencies = [ "proc-macro2", "quote", - "syn 2.0.92", + "syn 2.0.95", "wasm-bindgen-backend", "wasm-bindgen-shared", ] @@ -4633,9 +4605,9 @@ checksum = "589f6da84c646204747d1270a2a5661ea66ed1cced2631d546fdfb155959f9ec" [[package]] name = "winnow" -version = "0.6.20" +version = "0.6.22" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "36c1fec1a2bb5866f07c25f68c26e565c4c200aebb96d7e55710c19d3e8ac49b" +checksum = "39281189af81c07ec09db316b302a3e67bf9bd7cbf6c820b50e35fee9c2fa980" dependencies = [ "memchr", ] @@ -4687,7 +4659,7 @@ checksum = "2380878cad4ac9aac1e2435f3eb4020e8374b5f13c296cb75b4620ff8e229154" dependencies = [ "proc-macro2", "quote", - "syn 2.0.92", + "syn 2.0.95", "synstructure", ] @@ -4709,7 +4681,7 @@ checksum = "fa4f8080344d4671fb4e831a13ad1e68092748387dfc4f55e356242fae12ce3e" dependencies = [ "proc-macro2", "quote", - "syn 2.0.92", + "syn 2.0.95", ] [[package]] @@ -4729,7 +4701,7 @@ checksum = "595eed982f7d355beb85837f651fa22e90b3c044842dc7f2c2842c086f295808" dependencies = [ "proc-macro2", "quote", - "syn 2.0.92", + "syn 2.0.95", "synstructure", ] @@ -4758,7 +4730,7 @@ checksum = "6eafa6dfb17584ea3e2bd6e76e0cc15ad7af12b09abdd1ca55961bed9b1063c6" dependencies = [ "proc-macro2", "quote", - "syn 2.0.92", + "syn 2.0.95", ] [[package]] From 366ef1c9df44ff7f66f03d025adf3f370d8f9624 Mon Sep 17 00:00:00 2001 From: Jose Celano Date: Tue, 7 Jan 2025 12:37:28 +0000 Subject: [PATCH 068/802] fix: lifetime parameters on function `from_request_parts` do not match the trait ``` error[E0195]: lifetime parameters or bounds on associated function `from_request_parts` do not match the trait declaration --> src/servers/http/v1/extractors/announce_request.rs:53:26 | 53 | fn from_request_parts<'life0, 'life1, 'async_trait>( | ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^ lifetimes do not match associated function in trait error[E0195]: lifetime parameters or bounds on associated function `from_request_parts` do not match the trait declaration --> src/servers/http/v1/extractors/authentication_key.rs:79:26 | 79 | fn from_request_parts<'life0, 'life1, 'async_trait>( | ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^ lifetimes do not match associated function in trait error[E0195]: lifetime parameters or bounds on associated function `from_request_parts` do not match the trait declaration --> src/servers/http/v1/extractors/client_ip_sources.rs:60:26 | 60 | fn from_request_parts<'life0, 'life1, 'async_trait>( | ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^ lifetimes do not match associated function in trait error[E0195]: lifetime parameters or bounds on associated function `from_request_parts` do not match the trait declaration --> src/servers/http/v1/extractors/scrape_request.rs:53:26 | 53 | fn from_request_parts<'life0, 'life1, 'async_trait>( | ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^ lifetimes do not match associated function in trait For more information about this error, try `rustc --explain E0195`. ``` --- .../http/v1/extractors/announce_request.rs | 13 ++---------- .../http/v1/extractors/authentication_key.rs | 20 +++++-------------- .../http/v1/extractors/client_ip_sources.rs | 18 ++++------------- .../http/v1/extractors/scrape_request.rs | 13 ++---------- 4 files changed, 13 insertions(+), 51 deletions(-) diff --git a/src/servers/http/v1/extractors/announce_request.rs b/src/servers/http/v1/extractors/announce_request.rs index ea9a22c7a..32b69ae0b 100644 --- a/src/servers/http/v1/extractors/announce_request.rs +++ b/src/servers/http/v1/extractors/announce_request.rs @@ -27,12 +27,12 @@ //! ```text //! d14:failure reason240:Bad request. Cannot parse query params for announce request: invalid param value invalid for info_hash in not enough bytes for infohash: got 7 bytes, expected 20 src/shared/bit_torrent/info_hash.rs:240:27, src/servers/http/v1/requests/announce.rs:182:42e //! ``` +use std::future::Future; use std::panic::Location; use axum::extract::FromRequestParts; use axum::http::request::Parts; use axum::response::{IntoResponse, Response}; -use futures::future::BoxFuture; use futures::FutureExt; use crate::servers::http::v1::query::Query; @@ -49,16 +49,7 @@ where { type Rejection = Response; - #[must_use] - fn from_request_parts<'life0, 'life1, 'async_trait>( - parts: &'life0 mut Parts, - _state: &'life1 S, - ) -> BoxFuture<'async_trait, Result> - where - 'life0: 'async_trait, - 'life1: 'async_trait, - Self: 'async_trait, - { + fn from_request_parts(parts: &mut Parts, _state: &S) -> impl Future> + Send { async { match extract_announce_from(parts.uri.query()) { Ok(announce_request) => Ok(ExtractRequest(announce_request)), diff --git a/src/servers/http/v1/extractors/authentication_key.rs b/src/servers/http/v1/extractors/authentication_key.rs index e86241edf..35efdf93d 100644 --- a/src/servers/http/v1/extractors/authentication_key.rs +++ b/src/servers/http/v1/extractors/authentication_key.rs @@ -42,14 +42,13 @@ //! > Neither [The `BitTorrent` Protocol Specification](https://www.bittorrent.org/beps/bep_0003.html) //! > nor [The Private Torrents](https://www.bittorrent.org/beps/bep_0027.html) //! > specifications specify any HTTP status code for authentication errors. +use std::future::Future; use std::panic::Location; use axum::extract::rejection::PathRejection; use axum::extract::{FromRequestParts, Path}; use axum::http::request::Parts; use axum::response::{IntoResponse, Response}; -use futures::future::BoxFuture; -use futures::FutureExt; use serde::Deserialize; use crate::core::auth::Key; @@ -71,21 +70,13 @@ impl KeyParam { impl FromRequestParts for Extract where - S: Send + Sync, + S: Send + Sync + 'static, { type Rejection = Response; - #[must_use] - fn from_request_parts<'life0, 'life1, 'async_trait>( - parts: &'life0 mut Parts, - state: &'life1 S, - ) -> BoxFuture<'async_trait, Result> - where - 'life0: 'async_trait, - 'life1: 'async_trait, - Self: 'async_trait, - { - async { + #[allow(clippy::manual_async_fn)] + fn from_request_parts(parts: &mut Parts, state: &S) -> impl Future> + Send { + async move { // Extract `key` from URL path with Axum `Path` extractor let maybe_path_with_key = Path::::from_request_parts(parts, state).await; @@ -94,7 +85,6 @@ where Err(error) => Err(error.into_response()), } } - .boxed() } } diff --git a/src/servers/http/v1/extractors/client_ip_sources.rs b/src/servers/http/v1/extractors/client_ip_sources.rs index 5b235fbe0..1ca5a22d0 100644 --- a/src/servers/http/v1/extractors/client_ip_sources.rs +++ b/src/servers/http/v1/extractors/client_ip_sources.rs @@ -35,14 +35,13 @@ //! `right_most_x_forwarded_for` = 126.0.0.2 //! `connection_info_ip` = 126.0.0.3 //! ``` +use std::future::Future; use std::net::SocketAddr; use axum::extract::{ConnectInfo, FromRequestParts}; use axum::http::request::Parts; use axum::response::Response; use axum_client_ip::RightmostXForwardedFor; -use futures::future::BoxFuture; -use futures::FutureExt; use crate::servers::http::v1::services::peer_ip_resolver::ClientIpSources; @@ -56,17 +55,9 @@ where { type Rejection = Response; - #[must_use] - fn from_request_parts<'life0, 'life1, 'async_trait>( - parts: &'life0 mut Parts, - state: &'life1 S, - ) -> BoxFuture<'async_trait, Result> - where - 'life0: 'async_trait, - 'life1: 'async_trait, - Self: 'async_trait, - { - async { + #[allow(clippy::manual_async_fn)] + fn from_request_parts(parts: &mut Parts, state: &S) -> impl Future> + Send { + async move { let right_most_x_forwarded_for = match RightmostXForwardedFor::from_request_parts(parts, state).await { Ok(right_most_x_forwarded_for) => Some(right_most_x_forwarded_for.0), Err(_) => None, @@ -82,6 +73,5 @@ where connection_info_ip, })) } - .boxed() } } diff --git a/src/servers/http/v1/extractors/scrape_request.rs b/src/servers/http/v1/extractors/scrape_request.rs index 35c0bb1b5..890c4033c 100644 --- a/src/servers/http/v1/extractors/scrape_request.rs +++ b/src/servers/http/v1/extractors/scrape_request.rs @@ -27,12 +27,12 @@ //! ```text //! d14:failure reason235:Bad request. Cannot parse query params for scrape request: invalid param value invalid for info_hash in not enough bytes for infohash: got 7 bytes, expected 20 src/shared/bit_torrent/info_hash.rs:240:27, src/servers/http/v1/requests/scrape.rs:66:46e //! ``` +use std::future::Future; use std::panic::Location; use axum::extract::FromRequestParts; use axum::http::request::Parts; use axum::response::{IntoResponse, Response}; -use futures::future::BoxFuture; use futures::FutureExt; use crate::servers::http::v1::query::Query; @@ -49,16 +49,7 @@ where { type Rejection = Response; - #[must_use] - fn from_request_parts<'life0, 'life1, 'async_trait>( - parts: &'life0 mut Parts, - _state: &'life1 S, - ) -> BoxFuture<'async_trait, Result> - where - 'life0: 'async_trait, - 'life1: 'async_trait, - Self: 'async_trait, - { + fn from_request_parts(parts: &mut Parts, _state: &S) -> impl Future> + Send { async { match extract_scrape_from(parts.uri.query()) { Ok(scrape_request) => Ok(ExtractRequest(scrape_request)), From bfab30f40c62ff113274a2fb55469ea3bde580f5 Mon Sep 17 00:00:00 2001 From: Jose Celano Date: Tue, 7 Jan 2025 12:51:09 +0000 Subject: [PATCH 069/802] fix: update axum routes captures You have to use `{capture}` instead of semicolon `:` to insert segment variables in the URL. It fixes these tests: ``` failures: ---- bootstrap::jobs::tracker_apis::tests::it_should_start_http_tracker stdout ---- thread 'bootstrap::jobs::tracker_apis::tests::it_should_start_http_tracker' panicked at src/servers/apis/v1/context/auth_key/routes.rs:21:10: Path segments must not start with `:`. For capture groups, use `{capture}`. If you meant to literally match a segment starting with a colon, call `without_v07_checks` on the router. note: run with `RUST_BACKTRACE=1` environment variable to display a backtrace thread 'bootstrap::jobs::tracker_apis::tests::it_should_start_http_tracker' panicked at src/servers/apis/server.rs:159:17: Unable to start API server: channel closed ---- bootstrap::jobs::http_tracker::tests::it_should_start_http_tracker stdout ---- thread 'bootstrap::jobs::http_tracker::tests::it_should_start_http_tracker' panicked at src/servers/http/v1/routes.rs:41:10: Path segments must not start with `:`. For capture groups, use `{capture}`. If you meant to literally match a segment starting with a colon, call `without_v07_checks` on the router. thread 'bootstrap::jobs::http_tracker::tests::it_should_start_http_tracker' panicked at src/servers/http/server.rs:170:38: it should be able to start the service: RecvError(()) ---- servers::http::server::tests::it_should_be_able_to_start_and_stop stdout ---- thread 'servers::http::server::tests::it_should_be_able_to_start_and_stop' panicked at src/servers/http/v1/routes.rs:41:10: Path segments must not start with `:`. For capture groups, use `{capture}`. If you meant to literally match a segment starting with a colon, call `without_v07_checks` on the router. thread 'servers::http::server::tests::it_should_be_able_to_start_and_stop' panicked at src/servers/http/server.rs:170:38: it should be able to start the service: RecvError(()) ---- servers::apis::server::tests::it_should_be_able_to_start_and_stop stdout ---- thread 'servers::apis::server::tests::it_should_be_able_to_start_and_stop' panicked at src/servers/apis/v1/context/auth_key/routes.rs:21:10: Path segments must not start with `:`. For capture groups, use `{capture}`. If you meant to literally match a segment starting with a colon, call `without_v07_checks` on the router. thread 'servers::apis::server::tests::it_should_be_able_to_start_and_stop' panicked at src/servers/apis/server.rs:159:17: Unable to start API server: channel closed failures: bootstrap::jobs::http_tracker::tests::it_should_start_http_tracker bootstrap::jobs::tracker_apis::tests::it_should_start_http_tracker servers::apis::server::tests::it_should_be_able_to_start_and_stop servers::http::server::tests::it_should_be_able_to_start_and_stop test result: FAILED. 203 passed; 4 failed; 0 ignored; 0 measured; 0 filtered out; finished in 3.05s ``` --- src/servers/apis/v1/context/auth_key/routes.rs | 2 +- src/servers/apis/v1/context/torrent/routes.rs | 2 +- src/servers/apis/v1/context/whitelist/routes.rs | 4 ++-- src/servers/http/v1/routes.rs | 4 ++-- 4 files changed, 6 insertions(+), 6 deletions(-) diff --git a/src/servers/apis/v1/context/auth_key/routes.rs b/src/servers/apis/v1/context/auth_key/routes.rs index 60ccd77ab..ac11281ee 100644 --- a/src/servers/apis/v1/context/auth_key/routes.rs +++ b/src/servers/apis/v1/context/auth_key/routes.rs @@ -27,7 +27,7 @@ pub fn add(prefix: &str, router: Router, tracker: Arc) -> Router { // // The POST /key/:seconds_valid has been deprecated and it will removed in the future. // Use POST /keys - &format!("{prefix}/key/:seconds_valid_or_key"), + &format!("{prefix}/key/{{seconds_valid_or_key}}"), post(generate_auth_key_handler) .with_state(tracker.clone()) .delete(delete_auth_key_handler) diff --git a/src/servers/apis/v1/context/torrent/routes.rs b/src/servers/apis/v1/context/torrent/routes.rs index 6f8c28df5..bca594e3d 100644 --- a/src/servers/apis/v1/context/torrent/routes.rs +++ b/src/servers/apis/v1/context/torrent/routes.rs @@ -17,7 +17,7 @@ pub fn add(prefix: &str, router: Router, tracker: Arc) -> Router { // Torrents router .route( - &format!("{prefix}/torrent/:info_hash"), + &format!("{prefix}/torrent/{{info_hash}}"), get(get_torrent_handler).with_state(tracker.clone()), ) .route(&format!("{prefix}/torrents"), get(get_torrents_handler).with_state(tracker)) diff --git a/src/servers/apis/v1/context/whitelist/routes.rs b/src/servers/apis/v1/context/whitelist/routes.rs index e4e85181f..35312ea97 100644 --- a/src/servers/apis/v1/context/whitelist/routes.rs +++ b/src/servers/apis/v1/context/whitelist/routes.rs @@ -20,11 +20,11 @@ pub fn add(prefix: &str, router: Router, tracker: Arc) -> Router { router // Whitelisted torrents .route( - &format!("{prefix}/:info_hash"), + &format!("{prefix}/{{info_hash}}"), post(add_torrent_to_whitelist_handler).with_state(tracker.clone()), ) .route( - &format!("{prefix}/:info_hash"), + &format!("{prefix}/{{info_hash}}"), delete(remove_torrent_from_whitelist_handler).with_state(tracker.clone()), ) // Whitelist commands diff --git a/src/servers/http/v1/routes.rs b/src/servers/http/v1/routes.rs index a5d402693..3c6926c37 100644 --- a/src/servers/http/v1/routes.rs +++ b/src/servers/http/v1/routes.rs @@ -38,10 +38,10 @@ pub fn router(tracker: Arc, server_socket_addr: SocketAddr) -> Router { .route("/health_check", get(health_check::handler)) // Announce request .route("/announce", get(announce::handle_without_key).with_state(tracker.clone())) - .route("/announce/:key", get(announce::handle_with_key).with_state(tracker.clone())) + .route("/announce/{key}", get(announce::handle_with_key).with_state(tracker.clone())) // Scrape request .route("/scrape", get(scrape::handle_without_key).with_state(tracker.clone())) - .route("/scrape/:key", get(scrape::handle_with_key).with_state(tracker)) + .route("/scrape/{key}", get(scrape::handle_with_key).with_state(tracker)) // Add extension to get the client IP from the connection info .layer(SecureClientIpSource::ConnectInfo.into_extension()) .layer(CompressionLayer::new()) From f4a73997969e58457f836f72cbbef93026bb21a8 Mon Sep 17 00:00:00 2001 From: Jose Celano Date: Tue, 7 Jan 2025 12:58:35 +0000 Subject: [PATCH 070/802] test: fix should_fail_generating_a_new_auth_key_when_the_key_duration_is_invalid --- tests/servers/api/v1/asserts.rs | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/tests/servers/api/v1/asserts.rs b/tests/servers/api/v1/asserts.rs index f3d04d524..b56144d3f 100644 --- a/tests/servers/api/v1/asserts.rs +++ b/tests/servers/api/v1/asserts.rs @@ -117,7 +117,7 @@ pub async fn assert_unprocessable_auth_key_duration_param(response: Response, _i pub async fn assert_invalid_key_duration_param(response: Response, invalid_key_duration: &str) { assert_bad_request( response, - &format!("Invalid URL: Cannot parse `\"{invalid_key_duration}\"` to a `u64`"), + &format!("Invalid URL: Cannot parse `{invalid_key_duration}` to a `u64`"), ) .await; } From cb406871472c8270b4eee3e53a3151db3fe4e8f0 Mon Sep 17 00:00:00 2001 From: Jose Celano Date: Tue, 7 Jan 2025 13:00:57 +0000 Subject: [PATCH 071/802] test: should_fail_getting_torrents_when_the_limit_query_parameter_cannot_be_parsed --- tests/servers/api/v1/contract/context/torrent.rs | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/tests/servers/api/v1/contract/context/torrent.rs b/tests/servers/api/v1/contract/context/torrent.rs index 6070eb4f4..602545273 100644 --- a/tests/servers/api/v1/contract/context/torrent.rs +++ b/tests/servers/api/v1/contract/context/torrent.rs @@ -213,7 +213,7 @@ async fn should_fail_getting_torrents_when_the_limit_query_parameter_cannot_be_p ) .await; - assert_bad_request(response, "Failed to deserialize query string: invalid digit found in string").await; + assert_bad_request(response, "Failed to deserialize query string: limit: invalid digit found in string").await; } env.stop().await; From ce12fe72f8dac7522a6bd1a7ae321404c3d21aa1 Mon Sep 17 00:00:00 2001 From: Jose Celano Date: Tue, 7 Jan 2025 13:03:08 +0000 Subject: [PATCH 072/802] test: fix test should_fail_getting_torrents_when_the_offset_query_parameter_cannot_be_parsed ``` servers::api::v1::contract::context::torrent::should_fail_getting_torrents_when_the_offset_query_parameter_cannot_be_parsed ``` --- tests/servers/api/v1/contract/context/torrent.rs | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/tests/servers/api/v1/contract/context/torrent.rs b/tests/servers/api/v1/contract/context/torrent.rs index 602545273..fff8129a5 100644 --- a/tests/servers/api/v1/contract/context/torrent.rs +++ b/tests/servers/api/v1/contract/context/torrent.rs @@ -189,7 +189,7 @@ async fn should_fail_getting_torrents_when_the_offset_query_parameter_cannot_be_ ) .await; - assert_bad_request(response, "Failed to deserialize query string: invalid digit found in string").await; + assert_bad_request(response, "Failed to deserialize query string: offset: invalid digit found in string").await; } env.stop().await; From 76329d742ff79aebddb33be39e0782309fbed300 Mon Sep 17 00:00:00 2001 From: Jose Celano Date: Tue, 7 Jan 2025 13:05:39 +0000 Subject: [PATCH 073/802] chore: fix format --- tests/servers/api/v1/contract/context/torrent.rs | 12 ++++++++++-- 1 file changed, 10 insertions(+), 2 deletions(-) diff --git a/tests/servers/api/v1/contract/context/torrent.rs b/tests/servers/api/v1/contract/context/torrent.rs index fff8129a5..260fe4a3a 100644 --- a/tests/servers/api/v1/contract/context/torrent.rs +++ b/tests/servers/api/v1/contract/context/torrent.rs @@ -189,7 +189,11 @@ async fn should_fail_getting_torrents_when_the_offset_query_parameter_cannot_be_ ) .await; - assert_bad_request(response, "Failed to deserialize query string: offset: invalid digit found in string").await; + assert_bad_request( + response, + "Failed to deserialize query string: offset: invalid digit found in string", + ) + .await; } env.stop().await; @@ -213,7 +217,11 @@ async fn should_fail_getting_torrents_when_the_limit_query_parameter_cannot_be_p ) .await; - assert_bad_request(response, "Failed to deserialize query string: limit: invalid digit found in string").await; + assert_bad_request( + response, + "Failed to deserialize query string: limit: invalid digit found in string", + ) + .await; } env.stop().await; From 2ff476b541794a56188f866927e5660e20ba268f Mon Sep 17 00:00:00 2001 From: Jose Celano Date: Tue, 7 Jan 2025 15:40:40 +0000 Subject: [PATCH 074/802] refactor: rename enum variand Udp4RequestAborted The event is used for both UDP 4 and UDP 6 requests aborted. --- src/core/statistics/event/handler.rs | 2 +- src/core/statistics/event/mod.rs | 2 +- src/servers/udp/server/launcher.rs | 2 +- 3 files changed, 3 insertions(+), 3 deletions(-) diff --git a/src/core/statistics/event/handler.rs b/src/core/statistics/event/handler.rs index 5acc5e12c..3e2e64866 100644 --- a/src/core/statistics/event/handler.rs +++ b/src/core/statistics/event/handler.rs @@ -24,7 +24,7 @@ pub async fn handle_event(event: Event, stats_repository: &Repository) { } // UDP - Event::Udp4RequestAborted => { + Event::UdpRequestAborted => { stats_repository.increase_udp_requests_aborted().await; } diff --git a/src/core/statistics/event/mod.rs b/src/core/statistics/event/mod.rs index b14995cc1..70c543c70 100644 --- a/src/core/statistics/event/mod.rs +++ b/src/core/statistics/event/mod.rs @@ -18,7 +18,7 @@ pub enum Event { Tcp4Scrape, Tcp6Announce, Tcp6Scrape, - Udp4RequestAborted, + UdpRequestAborted, Udp4Request, Udp4Connect, Udp4Announce, diff --git a/src/servers/udp/server/launcher.rs b/src/servers/udp/server/launcher.rs index ada50eb31..4fe0b1cba 100644 --- a/src/servers/udp/server/launcher.rs +++ b/src/servers/udp/server/launcher.rs @@ -202,7 +202,7 @@ impl Launcher { if old_request_aborted { // Evicted task from active requests buffer was aborted. - tracker.send_stats_event(statistics::event::Event::Udp4RequestAborted).await; + tracker.send_stats_event(statistics::event::Event::UdpRequestAborted).await; } } else { tokio::task::yield_now().await; From 6f9b44c4fa5d93d49fbcb0b51c39fff4ebf25d61 Mon Sep 17 00:00:00 2001 From: Jose Celano Date: Tue, 7 Jan 2025 16:01:47 +0000 Subject: [PATCH 075/802] feat: [#1145] add banned reqs counter to stats --- src/core/services/statistics/mod.rs | 1 + src/core/statistics/event/handler.rs | 3 ++ src/core/statistics/event/mod.rs | 1 + src/core/statistics/metrics.rs | 3 ++ src/core/statistics/repository.rs | 6 +++ .../apis/v1/context/stats/resources.rs | 53 ++++++++++--------- src/servers/udp/server/launcher.rs | 3 ++ .../servers/api/v1/contract/context/stats.rs | 1 + tests/servers/udp/contract.rs | 8 +++ 9 files changed, 55 insertions(+), 24 deletions(-) diff --git a/src/core/services/statistics/mod.rs b/src/core/services/statistics/mod.rs index 10e1c60fa..b4cc32198 100644 --- a/src/core/services/statistics/mod.rs +++ b/src/core/services/statistics/mod.rs @@ -76,6 +76,7 @@ pub async fn get_metrics(tracker: Arc) -> TrackerMetrics { tcp6_scrapes_handled: stats.tcp6_scrapes_handled, // UDP udp_requests_aborted: stats.udp_requests_aborted, + udp_requests_banned: stats.udp_requests_banned, udp4_requests: stats.udp4_requests, udp4_connections_handled: stats.udp4_connections_handled, udp4_announces_handled: stats.udp4_announces_handled, diff --git a/src/core/statistics/event/handler.rs b/src/core/statistics/event/handler.rs index 3e2e64866..06ff6abe2 100644 --- a/src/core/statistics/event/handler.rs +++ b/src/core/statistics/event/handler.rs @@ -27,6 +27,9 @@ pub async fn handle_event(event: Event, stats_repository: &Repository) { Event::UdpRequestAborted => { stats_repository.increase_udp_requests_aborted().await; } + Event::UdpRequestBanned => { + stats_repository.increase_udp_requests_banned().await; + } // UDP4 Event::Udp4Request => { diff --git a/src/core/statistics/event/mod.rs b/src/core/statistics/event/mod.rs index 70c543c70..b2344fb78 100644 --- a/src/core/statistics/event/mod.rs +++ b/src/core/statistics/event/mod.rs @@ -19,6 +19,7 @@ pub enum Event { Tcp6Announce, Tcp6Scrape, UdpRequestAborted, + UdpRequestBanned, Udp4Request, Udp4Connect, Udp4Announce, diff --git a/src/core/statistics/metrics.rs b/src/core/statistics/metrics.rs index 970302816..47bc5af6e 100644 --- a/src/core/statistics/metrics.rs +++ b/src/core/statistics/metrics.rs @@ -31,6 +31,9 @@ pub struct Metrics { /// Total number of UDP (UDP tracker) requests aborted. pub udp_requests_aborted: u64, + /// Total number of UDP (UDP tracker) requests banned. + pub udp_requests_banned: u64, + /// Total number of UDP (UDP tracker) requests from IPv4 peers. pub udp4_requests: u64, diff --git a/src/core/statistics/repository.rs b/src/core/statistics/repository.rs index bdbc046de..563e87534 100644 --- a/src/core/statistics/repository.rs +++ b/src/core/statistics/repository.rs @@ -70,6 +70,12 @@ impl Repository { drop(stats_lock); } + pub async fn increase_udp_requests_banned(&self) { + let mut stats_lock = self.stats.write().await; + stats_lock.udp_requests_banned += 1; + drop(stats_lock); + } + pub async fn increase_udp4_requests(&self) { let mut stats_lock = self.stats.write().await; stats_lock.udp4_requests += 1; diff --git a/src/servers/apis/v1/context/stats/resources.rs b/src/servers/apis/v1/context/stats/resources.rs index 55cb3a581..fd73499ef 100644 --- a/src/servers/apis/v1/context/stats/resources.rs +++ b/src/servers/apis/v1/context/stats/resources.rs @@ -36,6 +36,8 @@ pub struct Stats { /// Total number of UDP (UDP tracker) requests aborted. pub udp_requests_aborted: u64, + /// Total number of UDP (UDP tracker) requests banned. + pub udp_requests_banned: u64, /// Total number of UDP (UDP tracker) requests from IPv4 peers. pub udp4_requests: u64, @@ -80,6 +82,7 @@ impl From for Stats { tcp6_scrapes_handled: metrics.protocol_metrics.tcp6_scrapes_handled, // UDP udp_requests_aborted: metrics.protocol_metrics.udp_requests_aborted, + udp_requests_banned: metrics.protocol_metrics.udp_requests_banned, udp4_requests: metrics.protocol_metrics.udp4_requests, udp4_connections_handled: metrics.protocol_metrics.udp4_connections_handled, udp4_announces_handled: metrics.protocol_metrics.udp4_announces_handled, @@ -124,18 +127,19 @@ mod tests { tcp6_scrapes_handled: 10, // UDP udp_requests_aborted: 11, - udp4_requests: 12, - udp4_connections_handled: 13, - udp4_announces_handled: 14, - udp4_scrapes_handled: 15, - udp4_responses: 16, - udp4_errors_handled: 17, - udp6_requests: 18, - udp6_connections_handled: 19, - udp6_announces_handled: 20, - udp6_scrapes_handled: 21, - udp6_responses: 22, - udp6_errors_handled: 23 + udp_requests_banned: 12, + udp4_requests: 13, + udp4_connections_handled: 14, + udp4_announces_handled: 15, + udp4_scrapes_handled: 16, + udp4_responses: 17, + udp4_errors_handled: 18, + udp6_requests: 19, + udp6_connections_handled: 20, + udp6_announces_handled: 21, + udp6_scrapes_handled: 22, + udp6_responses: 23, + udp6_errors_handled: 24 } }), Stats { @@ -152,18 +156,19 @@ mod tests { tcp6_scrapes_handled: 10, // UDP udp_requests_aborted: 11, - udp4_requests: 12, - udp4_connections_handled: 13, - udp4_announces_handled: 14, - udp4_scrapes_handled: 15, - udp4_responses: 16, - udp4_errors_handled: 17, - udp6_requests: 18, - udp6_connections_handled: 19, - udp6_announces_handled: 20, - udp6_scrapes_handled: 21, - udp6_responses: 22, - udp6_errors_handled: 23 + udp_requests_banned: 12, + udp4_requests: 13, + udp4_connections_handled: 14, + udp4_announces_handled: 15, + udp4_scrapes_handled: 16, + udp4_responses: 17, + udp4_errors_handled: 18, + udp6_requests: 19, + udp6_connections_handled: 20, + udp6_announces_handled: 21, + udp6_scrapes_handled: 22, + udp6_responses: 23, + udp6_errors_handled: 24 } ); } diff --git a/src/servers/udp/server/launcher.rs b/src/servers/udp/server/launcher.rs index 4fe0b1cba..15c7ca017 100644 --- a/src/servers/udp/server/launcher.rs +++ b/src/servers/udp/server/launcher.rs @@ -175,6 +175,9 @@ impl Launcher { if ban_service.read().await.is_banned(&req.from.ip()) { tracing::debug!(target: UDP_TRACKER_LOG_TARGET, local_addr, "Udp::run_udp_server::loop continue: (banned ip)"); + + tracker.send_stats_event(statistics::event::Event::UdpRequestBanned).await; + continue; } diff --git a/tests/servers/api/v1/contract/context/stats.rs b/tests/servers/api/v1/contract/context/stats.rs index a81ad6f8c..087c36cc6 100644 --- a/tests/servers/api/v1/contract/context/stats.rs +++ b/tests/servers/api/v1/contract/context/stats.rs @@ -45,6 +45,7 @@ async fn should_allow_getting_tracker_statistics() { tcp6_scrapes_handled: 0, // UDP udp_requests_aborted: 0, + udp_requests_banned: 0, udp4_requests: 0, udp4_connections_handled: 0, udp4_announces_handled: 0, diff --git a/tests/servers/udp/contract.rs b/tests/servers/udp/contract.rs index de46b7c10..b77343785 100644 --- a/tests/servers/udp/contract.rs +++ b/tests/servers/udp/contract.rs @@ -229,6 +229,7 @@ mod receiving_an_announce_request { logging::setup(); let env = Started::new(&configuration::ephemeral().into()).await; + let tracker = env.tracker.clone(); let client = match UdpTrackerClient::new(env.bind_address(), DEFAULT_TIMEOUT).await { Ok(udp_tracker_client) => udp_tracker_client, @@ -267,6 +268,8 @@ mod receiving_an_announce_request { info_hash, ); + let udp_requests_banned_before = tracker.get_stats().await.udp_requests_banned; + // This should return a timeout error match client.send(announce_request.into()).await { Ok(_) => (), @@ -275,6 +278,11 @@ mod receiving_an_announce_request { assert!(client.receive().await.is_err()); + let udp_requests_banned_after = tracker.get_stats().await.udp_requests_banned; + + // UDP counter for banned requests should be increased by 1 + assert_eq!(udp_requests_banned_after, udp_requests_banned_before + 1); + env.stop().await; } } From 1299f17237923c36a5efa0ceb2d9e407437702b4 Mon Sep 17 00:00:00 2001 From: Jose Celano Date: Tue, 7 Jan 2025 16:35:30 +0000 Subject: [PATCH 076/802] feat: make ban service generic for all trackers All UDP tracker will share the same service. In the future, the HTTP trackers can also use it. The service was not include inside the tracker (easy solution) becuase the Tracker type is too big. It has became the app container. In fact, we want to reduce it in the future by extracting the services outside of the tracker: stats, whitelist, etc. Those services will be instantiate independently in the future in the app bootstrap. --- src/app.rs | 14 +++++++++++--- src/bootstrap/app.rs | 9 +++++++-- src/bootstrap/jobs/udp_tracker.rs | 13 ++++++++++--- src/console/profiling.rs | 4 ++-- src/main.rs | 4 ++-- src/servers/udp/server/banning.rs | 11 +++-------- src/servers/udp/server/launcher.rs | 21 +++++++++++---------- src/servers/udp/server/mod.rs | 13 +++++++++++-- src/servers/udp/server/spawner.rs | 6 ++++-- src/servers/udp/server/states.rs | 10 ++++++++-- tests/servers/udp/environment.rs | 10 +++++++++- 11 files changed, 78 insertions(+), 37 deletions(-) diff --git a/src/app.rs b/src/app.rs index 06fea4d2e..f40072132 100644 --- a/src/app.rs +++ b/src/app.rs @@ -23,12 +23,14 @@ //! - Tracker REST API: the tracker API can be enabled/disabled. use std::sync::Arc; +use tokio::sync::RwLock; use tokio::task::JoinHandle; use torrust_tracker_configuration::Configuration; use tracing::instrument; use crate::bootstrap::jobs::{health_check_api, http_tracker, torrent_cleanup, tracker_apis, udp_tracker}; use crate::servers::registar::Registar; +use crate::servers::udp::server::banning::BanService; use crate::{core, servers}; /// # Panics @@ -37,8 +39,12 @@ use crate::{core, servers}; /// /// - Can't retrieve tracker keys from database. /// - Can't load whitelist from database. -#[instrument(skip(config, tracker))] -pub async fn start(config: &Configuration, tracker: Arc) -> Vec> { +#[instrument(skip(config, tracker, ban_service))] +pub async fn start( + config: &Configuration, + tracker: Arc, + ban_service: Arc>, +) -> Vec> { if config.http_api.is_none() && (config.udp_trackers.is_none() || config.udp_trackers.as_ref().map_or(true, std::vec::Vec::is_empty)) && (config.http_trackers.is_none() || config.http_trackers.as_ref().map_or(true, std::vec::Vec::is_empty)) @@ -75,7 +81,9 @@ pub async fn start(config: &Configuration, tracker: Arc) -> Vec (Configuration, Arc) { +pub fn setup() -> (Configuration, Arc, Arc>) { #[cfg(not(test))] check_seed(); @@ -44,9 +47,11 @@ pub fn setup() -> (Configuration, Arc) { let tracker = initialize_with_configuration(&configuration); + let ban_service = Arc::new(RwLock::new(BanService::new(MAX_CONNECTION_ID_ERRORS_PER_IP))); + tracing::info!("Configuration:\n{}", configuration.clone().mask_secrets().to_json()); - (configuration, tracker) + (configuration, tracker, ban_service) } /// checks if the seed is the instance seed in production. diff --git a/src/bootstrap/jobs/udp_tracker.rs b/src/bootstrap/jobs/udp_tracker.rs index 6aab06d4f..8948811af 100644 --- a/src/bootstrap/jobs/udp_tracker.rs +++ b/src/bootstrap/jobs/udp_tracker.rs @@ -8,12 +8,14 @@ //! > for the configuration options. use std::sync::Arc; +use tokio::sync::RwLock; use tokio::task::JoinHandle; use torrust_tracker_configuration::UdpTracker; use tracing::instrument; use crate::core; use crate::servers::registar::ServiceRegistrationForm; +use crate::servers::udp::server::banning::BanService; use crate::servers::udp::server::spawner::Spawner; use crate::servers::udp::server::Server; use crate::servers::udp::UDP_TRACKER_LOG_TARGET; @@ -29,13 +31,18 @@ use crate::servers::udp::UDP_TRACKER_LOG_TARGET; /// It will panic if the task did not finish successfully. #[must_use] #[allow(clippy::async_yields_async)] -#[instrument(skip(config, tracker, form))] -pub async fn start_job(config: &UdpTracker, tracker: Arc, form: ServiceRegistrationForm) -> JoinHandle<()> { +#[instrument(skip(config, tracker, ban_service, form))] +pub async fn start_job( + config: &UdpTracker, + tracker: Arc, + ban_service: Arc>, + form: ServiceRegistrationForm, +) -> JoinHandle<()> { let bind_to = config.bind_address; let cookie_lifetime = config.cookie_lifetime; let server = Server::new(Spawner::new(bind_to)) - .start(tracker, form, cookie_lifetime) + .start(tracker, ban_service, form, cookie_lifetime) .await .expect("it should be able to start the udp tracker"); diff --git a/src/console/profiling.rs b/src/console/profiling.rs index 5fb507197..1d31af3ce 100644 --- a/src/console/profiling.rs +++ b/src/console/profiling.rs @@ -179,9 +179,9 @@ pub async fn run() { return; }; - let (config, tracker) = bootstrap::app::setup(); + let (config, tracker, ban_service) = bootstrap::app::setup(); - let jobs = app::start(&config, tracker).await; + let jobs = app::start(&config, tracker, ban_service).await; // Run the tracker for a fixed duration let run_duration = sleep(Duration::from_secs(duration_secs)); diff --git a/src/main.rs b/src/main.rs index 0e2bcfbc9..206633f8c 100644 --- a/src/main.rs +++ b/src/main.rs @@ -2,9 +2,9 @@ use torrust_tracker_lib::{app, bootstrap}; #[tokio::main] async fn main() { - let (config, tracker) = bootstrap::app::setup(); + let (config, tracker, ban_service) = bootstrap::app::setup(); - let jobs = app::start(&config, tracker).await; + let jobs = app::start(&config, tracker, ban_service).await; // handle the signals tokio::select! { diff --git a/src/servers/udp/server/banning.rs b/src/servers/udp/server/banning.rs index df236820c..dada592be 100644 --- a/src/servers/udp/server/banning.rs +++ b/src/servers/udp/server/banning.rs @@ -20,7 +20,6 @@ use std::net::IpAddr; use bloom::{CountingBloomFilter, ASMS}; use tokio::time::Instant; -use url::Url; use crate::servers::udp::UDP_TRACKER_LOG_TARGET; @@ -28,16 +27,14 @@ pub struct BanService { max_connection_id_errors_per_ip: u32, fuzzy_error_counter: CountingBloomFilter, accurate_error_counter: HashMap, - local_addr: Url, last_connection_id_errors_reset: Instant, } impl BanService { #[must_use] - pub fn new(max_connection_id_errors_per_ip: u32, local_addr: Url) -> Self { + pub fn new(max_connection_id_errors_per_ip: u32) -> Self { Self { max_connection_id_errors_per_ip, - local_addr, fuzzy_error_counter: CountingBloomFilter::with_rate(4, 0.01, 100), accurate_error_counter: HashMap::new(), last_connection_id_errors_reset: tokio::time::Instant::now(), @@ -82,8 +79,7 @@ impl BanService { self.last_connection_id_errors_reset = Instant::now(); - let local_addr = self.local_addr.to_string(); - tracing::info!(target: UDP_TRACKER_LOG_TARGET, local_addr, "Udp::run_udp_server::loop (connection id errors filter cleared)"); + tracing::info!(target: UDP_TRACKER_LOG_TARGET, "Udp::run_udp_server::loop (connection id errors filter cleared)"); } } @@ -95,8 +91,7 @@ mod tests { /// Sample service with one day ban duration. fn ban_service(counter_limit: u32) -> BanService { - let udp_tracker_url = "udp://127.0.0.1".parse().unwrap(); - BanService::new(counter_limit, udp_tracker_url) + BanService::new(counter_limit) } #[test] diff --git a/src/servers/udp/server/launcher.rs b/src/servers/udp/server/launcher.rs index 15c7ca017..753dc9915 100644 --- a/src/servers/udp/server/launcher.rs +++ b/src/servers/udp/server/launcher.rs @@ -24,7 +24,7 @@ use crate::servers::udp::UDP_TRACKER_LOG_TARGET; /// The maximum number of connection id errors per ip. Clients will be banned if /// they exceed this limit. -const MAX_CONNECTION_ID_ERRORS_PER_IP: u32 = 10; +pub const MAX_CONNECTION_ID_ERRORS_PER_IP: u32 = 10; const IP_BANS_RESET_INTERVAL_IN_SECS: u64 = 3600; /// A UDP server instance launcher. @@ -40,9 +40,10 @@ impl Launcher { /// It panics if unable to send address of socket. /// It panics if the udp server is loaded when the tracker is private. /// - #[instrument(skip(tracker, bind_to, tx_start, rx_halt))] + #[instrument(skip(tracker, ban_service, bind_to, tx_start, rx_halt))] pub async fn run_with_graceful_shutdown( tracker: Arc, + ban_service: Arc>, bind_to: SocketAddr, cookie_lifetime: Duration, tx_start: oneshot::Sender, @@ -80,7 +81,7 @@ impl Launcher { let local_addr = local_udp_url.clone(); tokio::task::spawn(async move { tracing::debug!(target: UDP_TRACKER_LOG_TARGET, local_addr, "Udp::run_with_graceful_shutdown::task (listening...)"); - let () = Self::run_udp_server_main(receiver, tracker.clone(), cookie_lifetime).await; + let () = Self::run_udp_server_main(receiver, tracker.clone(), ban_service.clone(), cookie_lifetime).await; }) }; @@ -117,8 +118,13 @@ impl Launcher { ServiceHealthCheckJob::new(binding, info, job) } - #[instrument(skip(receiver, tracker))] - async fn run_udp_server_main(mut receiver: Receiver, tracker: Arc, cookie_lifetime: Duration) { + #[instrument(skip(receiver, tracker, ban_service))] + async fn run_udp_server_main( + mut receiver: Receiver, + tracker: Arc, + ban_service: Arc>, + cookie_lifetime: Duration, + ) { let active_requests = &mut ActiveRequests::default(); let addr = receiver.bound_socket_address(); @@ -127,11 +133,6 @@ impl Launcher { let cookie_lifetime = cookie_lifetime.as_secs_f64(); - let ban_service = Arc::new(RwLock::new(BanService::new( - MAX_CONNECTION_ID_ERRORS_PER_IP, - local_addr.parse().unwrap(), - ))); - let ban_cleaner = ban_service.clone(); tokio::spawn(async move { diff --git a/src/servers/udp/server/mod.rs b/src/servers/udp/server/mod.rs index 9f974ca8c..6eb98a7b1 100644 --- a/src/servers/udp/server/mod.rs +++ b/src/servers/udp/server/mod.rs @@ -58,17 +58,23 @@ mod tests { use std::sync::Arc; use std::time::Duration; + use tokio::sync::RwLock; use torrust_tracker_test_helpers::configuration::ephemeral_public; use super::spawner::Spawner; use super::Server; use crate::bootstrap::app::initialize_with_configuration; use crate::servers::registar::Registar; + use crate::servers::udp::server::banning::BanService; + use crate::servers::udp::server::launcher::MAX_CONNECTION_ID_ERRORS_PER_IP; #[tokio::test] async fn it_should_be_able_to_start_and_stop() { let cfg = Arc::new(ephemeral_public()); + let tracker = initialize_with_configuration(&cfg); + let ban_service = Arc::new(RwLock::new(BanService::new(MAX_CONNECTION_ID_ERRORS_PER_IP))); + let udp_trackers = cfg.udp_trackers.clone().expect("missing UDP trackers configuration"); let config = &udp_trackers[0]; let bind_to = config.bind_address; @@ -77,7 +83,7 @@ mod tests { let stopped = Server::new(Spawner::new(bind_to)); let started = stopped - .start(tracker, register.give_form(), config.cookie_lifetime) + .start(tracker, ban_service, register.give_form(), config.cookie_lifetime) .await .expect("it should start the server"); @@ -91,7 +97,10 @@ mod tests { #[tokio::test] async fn it_should_be_able_to_start_and_stop_with_wait() { let cfg = Arc::new(ephemeral_public()); + let tracker = initialize_with_configuration(&cfg); + let ban_service = Arc::new(RwLock::new(BanService::new(MAX_CONNECTION_ID_ERRORS_PER_IP))); + let config = &cfg.udp_trackers.as_ref().unwrap().first().unwrap(); let bind_to = config.bind_address; let register = &Registar::default(); @@ -99,7 +108,7 @@ mod tests { let stopped = Server::new(Spawner::new(bind_to)); let started = stopped - .start(tracker, register.give_form(), config.cookie_lifetime) + .start(tracker, ban_service, register.give_form(), config.cookie_lifetime) .await .expect("it should start the server"); diff --git a/src/servers/udp/server/spawner.rs b/src/servers/udp/server/spawner.rs index acebdcf75..ce2fe8eae 100644 --- a/src/servers/udp/server/spawner.rs +++ b/src/servers/udp/server/spawner.rs @@ -5,9 +5,10 @@ use std::time::Duration; use derive_more::derive::Display; use derive_more::Constructor; -use tokio::sync::oneshot; +use tokio::sync::{oneshot, RwLock}; use tokio::task::JoinHandle; +use super::banning::BanService; use super::launcher::Launcher; use crate::bootstrap::jobs::Started; use crate::core::Tracker; @@ -28,6 +29,7 @@ impl Spawner { pub fn spawn_launcher( &self, tracker: Arc, + ban_service: Arc>, cookie_lifetime: Duration, tx_start: oneshot::Sender, rx_halt: oneshot::Receiver, @@ -35,7 +37,7 @@ impl Spawner { let spawner = Self::new(self.bind_to); tokio::spawn(async move { - Launcher::run_with_graceful_shutdown(tracker, spawner.bind_to, cookie_lifetime, tx_start, rx_halt).await; + Launcher::run_with_graceful_shutdown(tracker, ban_service, spawner.bind_to, cookie_lifetime, tx_start, rx_halt).await; spawner }) } diff --git a/src/servers/udp/server/states.rs b/src/servers/udp/server/states.rs index 8b87c6efb..02742049d 100644 --- a/src/servers/udp/server/states.rs +++ b/src/servers/udp/server/states.rs @@ -5,9 +5,11 @@ use std::time::Duration; use derive_more::derive::Display; use derive_more::Constructor; +use tokio::sync::RwLock; use tokio::task::JoinHandle; use tracing::{instrument, Level}; +use super::banning::BanService; use super::spawner::Spawner; use super::{Server, UdpError}; use crate::bootstrap::jobs::Started; @@ -62,10 +64,11 @@ impl Server { /// /// It panics if unable to receive the bound socket address from service. /// - #[instrument(skip(self, tracker, form), err, ret(Display, level = Level::INFO))] + #[instrument(skip(self, tracker, ban_service, form), err, ret(Display, level = Level::INFO))] pub async fn start( self, tracker: Arc, + ban_service: Arc>, form: ServiceRegistrationForm, cookie_lifetime: Duration, ) -> Result, std::io::Error> { @@ -75,7 +78,10 @@ impl Server { assert!(!tx_halt.is_closed(), "Halt channel for UDP tracker should be open"); // May need to wrap in a task to about a tokio bug. - let task = self.state.spawner.spawn_launcher(tracker, cookie_lifetime, tx_start, rx_halt); + let task = self + .state + .spawner + .spawn_launcher(tracker, ban_service, cookie_lifetime, tx_start, rx_halt); let local_addr = rx_start.await.expect("it should be able to start the service").address; diff --git a/tests/servers/udp/environment.rs b/tests/servers/udp/environment.rs index 01639accc..f744809c5 100644 --- a/tests/servers/udp/environment.rs +++ b/tests/servers/udp/environment.rs @@ -2,10 +2,13 @@ use std::net::SocketAddr; use std::sync::Arc; use bittorrent_primitives::info_hash::InfoHash; +use tokio::sync::RwLock; use torrust_tracker_configuration::{Configuration, UdpTracker, DEFAULT_TIMEOUT}; use torrust_tracker_lib::bootstrap::app::initialize_with_configuration; use torrust_tracker_lib::core::Tracker; use torrust_tracker_lib::servers::registar::Registar; +use torrust_tracker_lib::servers::udp::server::banning::BanService; +use torrust_tracker_lib::servers::udp::server::launcher::MAX_CONNECTION_ID_ERRORS_PER_IP; use torrust_tracker_lib::servers::udp::server::spawner::Spawner; use torrust_tracker_lib::servers::udp::server::states::{Running, Stopped}; use torrust_tracker_lib::servers::udp::server::Server; @@ -17,6 +20,7 @@ where { pub config: Arc, pub tracker: Arc, + pub ban_service: Arc>, pub registar: Registar, pub server: Server, } @@ -36,6 +40,7 @@ impl Environment { #[allow(dead_code)] pub fn new(configuration: &Arc) -> Self { let tracker = initialize_with_configuration(configuration); + let ban_service = Arc::new(RwLock::new(BanService::new(MAX_CONNECTION_ID_ERRORS_PER_IP))); let udp_tracker = configuration.udp_trackers.clone().expect("missing UDP tracker configuration"); @@ -48,6 +53,7 @@ impl Environment { Self { config, tracker, + ban_service, registar: Registar::default(), server, } @@ -59,10 +65,11 @@ impl Environment { Environment { config: self.config, tracker: self.tracker.clone(), + ban_service: self.ban_service.clone(), registar: self.registar.clone(), server: self .server - .start(self.tracker, self.registar.give_form(), cookie_lifetime) + .start(self.tracker, self.ban_service, self.registar.give_form(), cookie_lifetime) .await .unwrap(), } @@ -85,6 +92,7 @@ impl Environment { Environment { config: self.config, tracker: self.tracker, + ban_service: self.ban_service, registar: Registar::default(), server: stopped.expect("it stop the udp tracker service"), } From 1ce2e33271272c598050ab712110f2ab5048bf57 Mon Sep 17 00:00:00 2001 From: Jose Celano Date: Tue, 7 Jan 2025 17:35:35 +0000 Subject: [PATCH 077/802] feat: [#1145] add banned ips total for UDP to stats ```json { "torrents": 0, "seeders": 0, "completed": 0, "leechers": 0, "tcp4_connections_handled": 0, "tcp4_announces_handled": 0, "tcp4_scrapes_handled": 0, "tcp6_connections_handled": 0, "tcp6_announces_handled": 0, "tcp6_scrapes_handled": 0, "udp_requests_aborted": 0, "udp_requests_banned": 0, "udp_banned_ips_total": 0, "udp4_requests": 0, "udp4_connections_handled": 0, "udp4_announces_handled": 0, "udp4_scrapes_handled": 0, "udp4_responses": 0, "udp4_errors_handled": 0, "udp6_requests": 0, "udp6_connections_handled": 0, "udp6_announces_handled": 0, "udp6_scrapes_handled": 0, "udp6_responses": 0, "udp6_errors_handled": 0 } ``` The new metric: `udp_banned_ips_total`. It's the total number of IPs that have been banned for sending wrong connection IDs. --- src/app.rs | 1 + src/bootstrap/app.rs | 4 +- src/bootstrap/jobs/tracker_apis.rs | 18 +++++-- src/core/services/statistics/mod.rs | 12 ++++- src/core/statistics/metrics.rs | 3 ++ src/main.rs | 4 +- src/servers/apis/routes.rs | 13 +++-- src/servers/apis/server.rs | 18 +++++-- src/servers/apis/v1/context/stats/handlers.rs | 9 +++- .../apis/v1/context/stats/resources.rs | 53 ++++++++++--------- src/servers/apis/v1/context/stats/routes.rs | 9 +++- src/servers/apis/v1/routes.rs | 6 ++- src/servers/udp/server/banning.rs | 5 ++ tests/servers/api/environment.rs | 11 +++- .../servers/api/v1/contract/context/stats.rs | 1 + tests/servers/udp/contract.rs | 7 +++ 16 files changed, 124 insertions(+), 50 deletions(-) diff --git a/src/app.rs b/src/app.rs index f40072132..abfe75256 100644 --- a/src/app.rs +++ b/src/app.rs @@ -113,6 +113,7 @@ pub async fn start( if let Some(job) = tracker_apis::start_job( http_api_config, tracker.clone(), + ban_service.clone(), registar.give_form(), servers::apis::Version::V1, ) diff --git a/src/bootstrap/app.rs b/src/bootstrap/app.rs index a4bdd14ea..38b7d40c5 100644 --- a/src/bootstrap/app.rs +++ b/src/bootstrap/app.rs @@ -47,11 +47,11 @@ pub fn setup() -> (Configuration, Arc, Arc>) { let tracker = initialize_with_configuration(&configuration); - let ban_service = Arc::new(RwLock::new(BanService::new(MAX_CONNECTION_ID_ERRORS_PER_IP))); + let udp_ban_service = Arc::new(RwLock::new(BanService::new(MAX_CONNECTION_ID_ERRORS_PER_IP))); tracing::info!("Configuration:\n{}", configuration.clone().mask_secrets().to_json()); - (configuration, tracker, ban_service) + (configuration, tracker, udp_ban_service) } /// checks if the seed is the instance seed in production. diff --git a/src/bootstrap/jobs/tracker_apis.rs b/src/bootstrap/jobs/tracker_apis.rs index 35b13b7ce..858888540 100644 --- a/src/bootstrap/jobs/tracker_apis.rs +++ b/src/bootstrap/jobs/tracker_apis.rs @@ -24,6 +24,7 @@ use std::net::SocketAddr; use std::sync::Arc; use axum_server::tls_rustls::RustlsConfig; +use tokio::sync::RwLock; use tokio::task::JoinHandle; use torrust_tracker_configuration::{AccessTokens, HttpApi}; use tracing::instrument; @@ -33,6 +34,7 @@ use crate::core; use crate::servers::apis::server::{ApiServer, Launcher}; use crate::servers::apis::Version; use crate::servers::registar::ServiceRegistrationForm; +use crate::servers::udp::server::banning::BanService; /// This is the message that the "launcher" spawned task sends to the main /// application process to notify the API server was successfully started. @@ -54,10 +56,11 @@ pub struct ApiServerJobStarted(); /// It would panic if unable to send the `ApiServerJobStarted` notice. /// /// -#[instrument(skip(config, tracker, form))] +#[instrument(skip(config, tracker, ban_service, form))] pub async fn start_job( config: &HttpApi, tracker: Arc, + ban_service: Arc>, form: ServiceRegistrationForm, version: Version, ) -> Option> { @@ -70,21 +73,22 @@ pub async fn start_job( let access_tokens = Arc::new(config.access_tokens.clone()); match version { - Version::V1 => Some(start_v1(bind_to, tls, tracker.clone(), form, access_tokens).await), + Version::V1 => Some(start_v1(bind_to, tls, tracker.clone(), ban_service.clone(), form, access_tokens).await), } } #[allow(clippy::async_yields_async)] -#[instrument(skip(socket, tls, tracker, form, access_tokens))] +#[instrument(skip(socket, tls, tracker, ban_service, form, access_tokens))] async fn start_v1( socket: SocketAddr, tls: Option, tracker: Arc, + ban_service: Arc>, form: ServiceRegistrationForm, access_tokens: Arc, ) -> JoinHandle<()> { let server = ApiServer::new(Launcher::new(socket, tls)) - .start(tracker, form, access_tokens) + .start(tracker, ban_service, form, access_tokens) .await .expect("it should be able to start to the tracker api"); @@ -98,21 +102,25 @@ async fn start_v1( mod tests { use std::sync::Arc; + use tokio::sync::RwLock; use torrust_tracker_test_helpers::configuration::ephemeral_public; use crate::bootstrap::app::initialize_with_configuration; use crate::bootstrap::jobs::tracker_apis::start_job; use crate::servers::apis::Version; use crate::servers::registar::Registar; + use crate::servers::udp::server::banning::BanService; + use crate::servers::udp::server::launcher::MAX_CONNECTION_ID_ERRORS_PER_IP; #[tokio::test] async fn it_should_start_http_tracker() { let cfg = Arc::new(ephemeral_public()); let config = &cfg.http_api.clone().unwrap(); let tracker = initialize_with_configuration(&cfg); + let ban_service = Arc::new(RwLock::new(BanService::new(MAX_CONNECTION_ID_ERRORS_PER_IP))); let version = Version::V1; - start_job(config, tracker, Registar::default().give_form(), version) + start_job(config, tracker, ban_service, Registar::default().give_form(), version) .await .expect("it should be able to join to the tracker api start-job"); } diff --git a/src/core/services/statistics/mod.rs b/src/core/services/statistics/mod.rs index b4cc32198..41d2f2e10 100644 --- a/src/core/services/statistics/mod.rs +++ b/src/core/services/statistics/mod.rs @@ -40,10 +40,12 @@ pub mod setup; use std::sync::Arc; +use tokio::sync::RwLock; use torrust_tracker_primitives::torrent_metrics::TorrentsMetrics; use crate::core::statistics::metrics::Metrics; use crate::core::Tracker; +use crate::servers::udp::server::banning::BanService; /// All the metrics collected by the tracker. #[derive(Debug, PartialEq)] @@ -60,9 +62,10 @@ pub struct TrackerMetrics { } /// It returns all the [`TrackerMetrics`] -pub async fn get_metrics(tracker: Arc) -> TrackerMetrics { +pub async fn get_metrics(tracker: Arc, ban_service: Arc>) -> TrackerMetrics { let torrents_metrics = tracker.get_torrents_metrics(); let stats = tracker.get_stats().await; + let udp_banned_ips_total = ban_service.read().await.get_banned_ips_total(); TrackerMetrics { torrents_metrics, @@ -77,6 +80,7 @@ pub async fn get_metrics(tracker: Arc) -> TrackerMetrics { // UDP udp_requests_aborted: stats.udp_requests_aborted, udp_requests_banned: stats.udp_requests_banned, + udp_banned_ips_total: udp_banned_ips_total as u64, udp4_requests: stats.udp4_requests, udp4_connections_handled: stats.udp4_connections_handled, udp4_announces_handled: stats.udp4_announces_handled, @@ -97,6 +101,7 @@ pub async fn get_metrics(tracker: Arc) -> TrackerMetrics { mod tests { use std::sync::Arc; + use tokio::sync::RwLock; use torrust_tracker_configuration::Configuration; use torrust_tracker_primitives::torrent_metrics::TorrentsMetrics; use torrust_tracker_test_helpers::configuration; @@ -104,6 +109,8 @@ mod tests { use crate::core; use crate::core::services::statistics::{get_metrics, TrackerMetrics}; use crate::core::services::tracker_factory; + use crate::servers::udp::server::banning::BanService; + use crate::servers::udp::server::launcher::MAX_CONNECTION_ID_ERRORS_PER_IP; pub fn tracker_configuration() -> Configuration { configuration::ephemeral() @@ -112,8 +119,9 @@ mod tests { #[tokio::test] async fn the_statistics_service_should_return_the_tracker_metrics() { let tracker = Arc::new(tracker_factory(&tracker_configuration())); + let ban_service = Arc::new(RwLock::new(BanService::new(MAX_CONNECTION_ID_ERRORS_PER_IP))); - let tracker_metrics = get_metrics(tracker.clone()).await; + let tracker_metrics = get_metrics(tracker.clone(), ban_service.clone()).await; assert_eq!( tracker_metrics, diff --git a/src/core/statistics/metrics.rs b/src/core/statistics/metrics.rs index 47bc5af6e..2cbbf4b05 100644 --- a/src/core/statistics/metrics.rs +++ b/src/core/statistics/metrics.rs @@ -34,6 +34,9 @@ pub struct Metrics { /// Total number of UDP (UDP tracker) requests banned. pub udp_requests_banned: u64, + /// Total number of banned IPs. + pub udp_banned_ips_total: u64, + /// Total number of UDP (UDP tracker) requests from IPv4 peers. pub udp4_requests: u64, diff --git a/src/main.rs b/src/main.rs index 206633f8c..c93982191 100644 --- a/src/main.rs +++ b/src/main.rs @@ -2,9 +2,9 @@ use torrust_tracker_lib::{app, bootstrap}; #[tokio::main] async fn main() { - let (config, tracker, ban_service) = bootstrap::app::setup(); + let (config, tracker, udp_ban_service) = bootstrap::app::setup(); - let jobs = app::start(&config, tracker, ban_service).await; + let jobs = app::start(&config, tracker, udp_ban_service).await; // handle the signals tokio::select! { diff --git a/src/servers/apis/routes.rs b/src/servers/apis/routes.rs index 0b0862fb9..98442ea97 100644 --- a/src/servers/apis/routes.rs +++ b/src/servers/apis/routes.rs @@ -15,6 +15,7 @@ use axum::response::Response; use axum::routing::get; use axum::{middleware, BoxError, Router}; use hyper::{Request, StatusCode}; +use tokio::sync::RwLock; use torrust_tracker_configuration::{AccessTokens, DEFAULT_TIMEOUT}; use tower::timeout::TimeoutLayer; use tower::ServiceBuilder; @@ -32,16 +33,22 @@ use super::v1::middlewares::auth::State; use crate::core::Tracker; use crate::servers::apis::API_LOG_TARGET; use crate::servers::logging::Latency; +use crate::servers::udp::server::banning::BanService; /// Add all API routes to the router. #[allow(clippy::needless_pass_by_value)] -#[instrument(skip(tracker, access_tokens))] -pub fn router(tracker: Arc, access_tokens: Arc, server_socket_addr: SocketAddr) -> Router { +#[instrument(skip(tracker, ban_service, access_tokens))] +pub fn router( + tracker: Arc, + ban_service: Arc>, + access_tokens: Arc, + server_socket_addr: SocketAddr, +) -> Router { let router = Router::new(); let api_url_prefix = "/api"; - let router = v1::routes::add(api_url_prefix, router, tracker.clone()); + let router = v1::routes::add(api_url_prefix, router, tracker.clone(), ban_service.clone()); let state = State { access_tokens }; diff --git a/src/servers/apis/server.rs b/src/servers/apis/server.rs index eadadecf2..9d1c77c03 100644 --- a/src/servers/apis/server.rs +++ b/src/servers/apis/server.rs @@ -33,6 +33,7 @@ use derive_more::Constructor; use futures::future::BoxFuture; use thiserror::Error; use tokio::sync::oneshot::{Receiver, Sender}; +use tokio::sync::RwLock; use torrust_tracker_configuration::AccessTokens; use tracing::{instrument, Level}; @@ -44,6 +45,7 @@ use crate::servers::custom_axum_server::{self, TimeoutAcceptor}; use crate::servers::logging::STARTED_ON; use crate::servers::registar::{ServiceHealthCheckJob, ServiceRegistration, ServiceRegistrationForm}; use crate::servers::signals::{graceful_shutdown, Halted}; +use crate::servers::udp::server::banning::BanService; /// Errors that can occur when starting or stopping the API server. #[derive(Debug, Error)] @@ -122,10 +124,11 @@ impl ApiServer { /// # Panics /// /// It would panic if the bound socket address cannot be sent back to this starter. - #[instrument(skip(self, tracker, form, access_tokens), err, ret(Display, level = Level::INFO))] + #[instrument(skip(self, tracker, ban_service, form, access_tokens), err, ret(Display, level = Level::INFO))] pub async fn start( self, tracker: Arc, + ban_service: Arc>, form: ServiceRegistrationForm, access_tokens: Arc, ) -> Result, Error> { @@ -137,7 +140,7 @@ impl ApiServer { let task = tokio::spawn(async move { tracing::debug!(target: API_LOG_TARGET, "Starting with launcher in spawned task ..."); - let _task = launcher.start(tracker, access_tokens, tx_start, rx_halt).await; + let _task = launcher.start(tracker, ban_service, access_tokens, tx_start, rx_halt).await; tracing::debug!(target: API_LOG_TARGET, "Started with launcher in spawned task"); @@ -235,10 +238,11 @@ impl Launcher { /// /// Will panic if unable to bind to the socket, or unable to get the address of the bound socket. /// Will also panic if unable to send message regarding the bound socket address. - #[instrument(skip(self, tracker, access_tokens, tx_start, rx_halt))] + #[instrument(skip(self, tracker, ban_service, access_tokens, tx_start, rx_halt))] pub fn start( &self, tracker: Arc, + ban_service: Arc>, access_tokens: Arc, tx_start: Sender, rx_halt: Receiver, @@ -246,7 +250,7 @@ impl Launcher { let socket = std::net::TcpListener::bind(self.bind_to).expect("Could not bind tcp_listener to address."); let address = socket.local_addr().expect("Could not get local_addr from tcp_listener."); - let router = router(tracker, access_tokens, address); + let router = router(tracker, ban_service, access_tokens, address); let handle = Handle::new(); @@ -294,12 +298,15 @@ impl Launcher { mod tests { use std::sync::Arc; + use tokio::sync::RwLock; use torrust_tracker_test_helpers::configuration::ephemeral_public; use crate::bootstrap::app::initialize_with_configuration; use crate::bootstrap::jobs::make_rust_tls; use crate::servers::apis::server::{ApiServer, Launcher}; use crate::servers::registar::Registar; + use crate::servers::udp::server::banning::BanService; + use crate::servers::udp::server::launcher::MAX_CONNECTION_ID_ERRORS_PER_IP; #[tokio::test] async fn it_should_be_able_to_start_and_stop() { @@ -307,6 +314,7 @@ mod tests { let config = &cfg.http_api.clone().unwrap(); let tracker = initialize_with_configuration(&cfg); + let ban_service = Arc::new(RwLock::new(BanService::new(MAX_CONNECTION_ID_ERRORS_PER_IP))); let bind_to = config.bind_address; @@ -321,7 +329,7 @@ mod tests { let register = &Registar::default(); let started = stopped - .start(tracker, register.give_form(), access_tokens) + .start(tracker, ban_service, register.give_form(), access_tokens) .await .expect("it should start the server"); let stopped = started.stop().await.expect("it should stop the server"); diff --git a/src/servers/apis/v1/context/stats/handlers.rs b/src/servers/apis/v1/context/stats/handlers.rs index 8b11b1ff1..b630c763d 100644 --- a/src/servers/apis/v1/context/stats/handlers.rs +++ b/src/servers/apis/v1/context/stats/handlers.rs @@ -6,10 +6,12 @@ use axum::extract::State; use axum::response::Response; use axum_extra::extract::Query; use serde::Deserialize; +use tokio::sync::RwLock; use super::responses::{metrics_response, stats_response}; use crate::core::services::statistics::get_metrics; use crate::core::Tracker; +use crate::servers::udp::server::banning::BanService; #[derive(Deserialize, Debug, Default)] #[serde(rename_all = "lowercase")] @@ -35,8 +37,11 @@ pub struct QueryParams { /// /// Refer to the [API endpoint documentation](crate::servers::apis::v1::context::stats#get-tracker-statistics) /// for more information about this endpoint. -pub async fn get_stats_handler(State(tracker): State>, params: Query) -> Response { - let metrics = get_metrics(tracker.clone()).await; +pub async fn get_stats_handler( + State(state): State<(Arc, Arc>)>, + params: Query, +) -> Response { + let metrics = get_metrics(state.0.clone(), state.1.clone()).await; match params.0.format { Some(format) => match format { diff --git a/src/servers/apis/v1/context/stats/resources.rs b/src/servers/apis/v1/context/stats/resources.rs index fd73499ef..814f94b21 100644 --- a/src/servers/apis/v1/context/stats/resources.rs +++ b/src/servers/apis/v1/context/stats/resources.rs @@ -38,6 +38,8 @@ pub struct Stats { pub udp_requests_aborted: u64, /// Total number of UDP (UDP tracker) requests banned. pub udp_requests_banned: u64, + /// Total number of IPs banned for UDP (UDP tracker) requests. + pub udp_banned_ips_total: u64, /// Total number of UDP (UDP tracker) requests from IPv4 peers. pub udp4_requests: u64, @@ -83,6 +85,7 @@ impl From for Stats { // UDP udp_requests_aborted: metrics.protocol_metrics.udp_requests_aborted, udp_requests_banned: metrics.protocol_metrics.udp_requests_banned, + udp_banned_ips_total: metrics.protocol_metrics.udp_banned_ips_total, udp4_requests: metrics.protocol_metrics.udp4_requests, udp4_connections_handled: metrics.protocol_metrics.udp4_connections_handled, udp4_announces_handled: metrics.protocol_metrics.udp4_announces_handled, @@ -128,18 +131,19 @@ mod tests { // UDP udp_requests_aborted: 11, udp_requests_banned: 12, - udp4_requests: 13, - udp4_connections_handled: 14, - udp4_announces_handled: 15, - udp4_scrapes_handled: 16, - udp4_responses: 17, - udp4_errors_handled: 18, - udp6_requests: 19, - udp6_connections_handled: 20, - udp6_announces_handled: 21, - udp6_scrapes_handled: 22, - udp6_responses: 23, - udp6_errors_handled: 24 + udp_banned_ips_total: 13, + udp4_requests: 14, + udp4_connections_handled: 15, + udp4_announces_handled: 16, + udp4_scrapes_handled: 17, + udp4_responses: 18, + udp4_errors_handled: 19, + udp6_requests: 20, + udp6_connections_handled: 21, + udp6_announces_handled: 22, + udp6_scrapes_handled: 23, + udp6_responses: 24, + udp6_errors_handled: 25 } }), Stats { @@ -157,18 +161,19 @@ mod tests { // UDP udp_requests_aborted: 11, udp_requests_banned: 12, - udp4_requests: 13, - udp4_connections_handled: 14, - udp4_announces_handled: 15, - udp4_scrapes_handled: 16, - udp4_responses: 17, - udp4_errors_handled: 18, - udp6_requests: 19, - udp6_connections_handled: 20, - udp6_announces_handled: 21, - udp6_scrapes_handled: 22, - udp6_responses: 23, - udp6_errors_handled: 24 + udp_banned_ips_total: 13, + udp4_requests: 14, + udp4_connections_handled: 15, + udp4_announces_handled: 16, + udp4_scrapes_handled: 17, + udp4_responses: 18, + udp4_errors_handled: 19, + udp6_requests: 20, + udp6_connections_handled: 21, + udp6_announces_handled: 22, + udp6_scrapes_handled: 23, + udp6_responses: 24, + udp6_errors_handled: 25 } ); } diff --git a/src/servers/apis/v1/context/stats/routes.rs b/src/servers/apis/v1/context/stats/routes.rs index d8d552697..fde1056c3 100644 --- a/src/servers/apis/v1/context/stats/routes.rs +++ b/src/servers/apis/v1/context/stats/routes.rs @@ -7,11 +7,16 @@ use std::sync::Arc; use axum::routing::get; use axum::Router; +use tokio::sync::RwLock; use super::handlers::get_stats_handler; use crate::core::Tracker; +use crate::servers::udp::server::banning::BanService; /// It adds the routes to the router for the [`stats`](crate::servers::apis::v1::context::stats) API context. -pub fn add(prefix: &str, router: Router, tracker: Arc) -> Router { - router.route(&format!("{prefix}/stats"), get(get_stats_handler).with_state(tracker)) +pub fn add(prefix: &str, router: Router, tracker: Arc, ban_service: Arc>) -> Router { + router.route( + &format!("{prefix}/stats"), + get(get_stats_handler).with_state((tracker, ban_service)), + ) } diff --git a/src/servers/apis/v1/routes.rs b/src/servers/apis/v1/routes.rs index 3786b3532..23ef6c47e 100644 --- a/src/servers/apis/v1/routes.rs +++ b/src/servers/apis/v1/routes.rs @@ -2,16 +2,18 @@ use std::sync::Arc; use axum::Router; +use tokio::sync::RwLock; use super::context::{auth_key, stats, torrent, whitelist}; use crate::core::Tracker; +use crate::servers::udp::server::banning::BanService; /// Add the routes for the v1 API. -pub fn add(prefix: &str, router: Router, tracker: Arc) -> Router { +pub fn add(prefix: &str, router: Router, tracker: Arc, ban_service: Arc>) -> Router { let v1_prefix = format!("{prefix}/v1"); let router = auth_key::routes::add(&v1_prefix, router, tracker.clone()); - let router = stats::routes::add(&v1_prefix, router, tracker.clone()); + let router = stats::routes::add(&v1_prefix, router, tracker.clone(), ban_service); let router = whitelist::routes::add(&v1_prefix, router, tracker.clone()); torrent::routes::add(&v1_prefix, router, tracker) diff --git a/src/servers/udp/server/banning.rs b/src/servers/udp/server/banning.rs index dada592be..d32dfa541 100644 --- a/src/servers/udp/server/banning.rs +++ b/src/servers/udp/server/banning.rs @@ -51,6 +51,11 @@ impl BanService { self.accurate_error_counter.get(ip).copied() } + #[must_use] + pub fn get_banned_ips_total(&self) -> usize { + self.accurate_error_counter.len() + } + #[must_use] pub fn get_estimate_count(&self, ip: &IpAddr) -> u32 { self.fuzzy_error_counter.estimate_count(&ip.to_string()) diff --git a/tests/servers/api/environment.rs b/tests/servers/api/environment.rs index f754e329f..00fb9d05b 100644 --- a/tests/servers/api/environment.rs +++ b/tests/servers/api/environment.rs @@ -3,12 +3,15 @@ use std::sync::Arc; use bittorrent_primitives::info_hash::InfoHash; use futures::executor::block_on; +use tokio::sync::RwLock; use torrust_tracker_configuration::{Configuration, HttpApi}; use torrust_tracker_lib::bootstrap::app::initialize_with_configuration; use torrust_tracker_lib::bootstrap::jobs::make_rust_tls; use torrust_tracker_lib::core::Tracker; use torrust_tracker_lib::servers::apis::server::{ApiServer, Launcher, Running, Stopped}; use torrust_tracker_lib::servers::registar::Registar; +use torrust_tracker_lib::servers::udp::server::banning::BanService; +use torrust_tracker_lib::servers::udp::server::launcher::MAX_CONNECTION_ID_ERRORS_PER_IP; use torrust_tracker_primitives::peer; use super::connection_info::ConnectionInfo; @@ -19,6 +22,7 @@ where { pub config: Arc, pub tracker: Arc, + pub ban_service: Arc>, pub registar: Registar, pub server: ApiServer, } @@ -37,6 +41,8 @@ impl Environment { pub fn new(configuration: &Arc) -> Self { let tracker = initialize_with_configuration(configuration); + let ban_service = Arc::new(RwLock::new(BanService::new(MAX_CONNECTION_ID_ERRORS_PER_IP))); + let config = Arc::new(configuration.http_api.clone().expect("missing API configuration")); let bind_to = config.bind_address; @@ -48,6 +54,7 @@ impl Environment { Self { config, tracker, + ban_service, registar: Registar::default(), server, } @@ -59,10 +66,11 @@ impl Environment { Environment { config: self.config, tracker: self.tracker.clone(), + ban_service: self.ban_service.clone(), registar: self.registar.clone(), server: self .server - .start(self.tracker, self.registar.give_form(), access_tokens) + .start(self.tracker, self.ban_service, self.registar.give_form(), access_tokens) .await .unwrap(), } @@ -78,6 +86,7 @@ impl Environment { Environment { config: self.config, tracker: self.tracker, + ban_service: self.ban_service, registar: Registar::default(), server: self.server.stop().await.unwrap(), } diff --git a/tests/servers/api/v1/contract/context/stats.rs b/tests/servers/api/v1/contract/context/stats.rs index 087c36cc6..e99333d7a 100644 --- a/tests/servers/api/v1/contract/context/stats.rs +++ b/tests/servers/api/v1/contract/context/stats.rs @@ -46,6 +46,7 @@ async fn should_allow_getting_tracker_statistics() { // UDP udp_requests_aborted: 0, udp_requests_banned: 0, + udp_banned_ips_total: 0, udp4_requests: 0, udp4_connections_handled: 0, udp4_announces_handled: 0, diff --git a/tests/servers/udp/contract.rs b/tests/servers/udp/contract.rs index b77343785..f0ed98b21 100644 --- a/tests/servers/udp/contract.rs +++ b/tests/servers/udp/contract.rs @@ -230,12 +230,15 @@ mod receiving_an_announce_request { let env = Started::new(&configuration::ephemeral().into()).await; let tracker = env.tracker.clone(); + let ban_service = env.ban_service.clone(); let client = match UdpTrackerClient::new(env.bind_address(), DEFAULT_TIMEOUT).await { Ok(udp_tracker_client) => udp_tracker_client, Err(err) => panic!("{err}"), }; + let udp_banned_ips_total_before = ban_service.read().await.get_banned_ips_total(); + // The eleven first requests should be fine let invalid_connection_id = ConnectionId::new(0); // Zero is one of the not normal values. @@ -279,10 +282,14 @@ mod receiving_an_announce_request { assert!(client.receive().await.is_err()); let udp_requests_banned_after = tracker.get_stats().await.udp_requests_banned; + let udp_banned_ips_total_after = ban_service.read().await.get_banned_ips_total(); // UDP counter for banned requests should be increased by 1 assert_eq!(udp_requests_banned_after, udp_requests_banned_before + 1); + // UDP counter for banned IPs should be increased by 1 + assert_eq!(udp_banned_ips_total_after, udp_banned_ips_total_before + 1); + env.stop().await; } } From 08a862a5adbc95e0bd998c0b5dd6103d3e0a5f57 Mon Sep 17 00:00:00 2001 From: Jose Celano Date: Tue, 7 Jan 2025 18:15:13 +0000 Subject: [PATCH 078/802] refactor: [#1145] add type and processing time to UDP response events - The `kind`is the type of response: connect, annouince, etc - The req_processing_time is the time it took to process the requests on the backend, without including sending the response back to the client (network latency). --- src/core/statistics/event/handler.rs | 10 +++++++-- src/core/statistics/event/mod.rs | 20 +++++++++++++++-- src/servers/udp/server/processor.rs | 33 ++++++++++++++++++++++++---- 3 files changed, 55 insertions(+), 8 deletions(-) diff --git a/src/core/statistics/event/handler.rs b/src/core/statistics/event/handler.rs index 06ff6abe2..32b666d68 100644 --- a/src/core/statistics/event/handler.rs +++ b/src/core/statistics/event/handler.rs @@ -44,7 +44,10 @@ pub async fn handle_event(event: Event, stats_repository: &Repository) { Event::Udp4Scrape => { stats_repository.increase_udp4_scrapes().await; } - Event::Udp4Response => { + Event::Udp4Response { + kind: _, + req_processing_time: _, + } => { stats_repository.increase_udp4_responses().await; } Event::Udp4Error => { @@ -64,7 +67,10 @@ pub async fn handle_event(event: Event, stats_repository: &Repository) { Event::Udp6Scrape => { stats_repository.increase_udp6_scrapes().await; } - Event::Udp6Response => { + Event::Udp6Response { + kind: _, + req_processing_time: _, + } => { stats_repository.increase_udp6_responses().await; } Event::Udp6Error => { diff --git a/src/core/statistics/event/mod.rs b/src/core/statistics/event/mod.rs index b2344fb78..905aa0372 100644 --- a/src/core/statistics/event/mod.rs +++ b/src/core/statistics/event/mod.rs @@ -1,3 +1,5 @@ +use std::time::Duration; + pub mod handler; pub mod listener; pub mod sender; @@ -24,12 +26,26 @@ pub enum Event { Udp4Connect, Udp4Announce, Udp4Scrape, - Udp4Response, + Udp4Response { + kind: UdpResponseKind, + req_processing_time: Duration, + }, Udp4Error, Udp6Request, Udp6Connect, Udp6Announce, Udp6Scrape, - Udp6Response, + Udp6Response { + kind: UdpResponseKind, + req_processing_time: Duration, + }, Udp6Error, } + +#[derive(Debug, PartialEq, Eq)] +pub enum UdpResponseKind { + Connect, + Announce, + Scrape, + Error, +} diff --git a/src/servers/udp/server/processor.rs b/src/servers/udp/server/processor.rs index 9a9798698..e0f7c4624 100644 --- a/src/servers/udp/server/processor.rs +++ b/src/servers/udp/server/processor.rs @@ -1,13 +1,16 @@ use std::io::Cursor; use std::net::{IpAddr, SocketAddr}; use std::sync::Arc; +use std::time::Duration; use aquatic_udp_protocol::Response; use tokio::sync::RwLock; +use tokio::time::Instant; use tracing::{instrument, Level}; use super::banning::BanService; use super::bound_socket::BoundSocket; +use crate::core::statistics::event::UdpResponseKind; use crate::core::{statistics, Tracker}; use crate::servers::udp::handlers::CookieTimeValues; use crate::servers::udp::{handlers, RawRequest}; @@ -30,6 +33,9 @@ impl Processor { #[instrument(skip(self, request, ban_service))] pub async fn process_request(self, request: RawRequest, ban_service: Arc>) { let from = request.from; + + let start_time = Instant::now(); + let response = handlers::handle_packet( request, &self.tracker, @@ -39,11 +45,13 @@ impl Processor { ) .await; - self.send_response(from, response).await; + let elapsed_time = start_time.elapsed(); + + self.send_response(from, response, elapsed_time).await; } #[instrument(skip(self))] - async fn send_response(self, target: SocketAddr, response: Response) { + async fn send_response(self, target: SocketAddr, response: Response, req_processing_time: Duration) { tracing::debug!("send response"); let response_type = match &response { @@ -54,6 +62,13 @@ impl Processor { Response::Error(e) => format!("Error: {e:?}"), }; + let response_kind = match &response { + Response::Connect(_) => UdpResponseKind::Connect, + Response::AnnounceIpv4(_) | Response::AnnounceIpv6(_) => UdpResponseKind::Announce, + Response::Scrape(_) => UdpResponseKind::Scrape, + Response::Error(_e) => UdpResponseKind::Error, + }; + let mut writer = Cursor::new(Vec::with_capacity(200)); match response.write_bytes(&mut writer) { @@ -71,10 +86,20 @@ impl Processor { match target.ip() { IpAddr::V4(_) => { - self.tracker.send_stats_event(statistics::event::Event::Udp4Response).await; + self.tracker + .send_stats_event(statistics::event::Event::Udp4Response { + kind: response_kind, + req_processing_time, + }) + .await; } IpAddr::V6(_) => { - self.tracker.send_stats_event(statistics::event::Event::Udp6Response).await; + self.tracker + .send_stats_event(statistics::event::Event::Udp6Response { + kind: response_kind, + req_processing_time, + }) + .await; } } } From 903d47f7258a56d141228e95ba0d34552fe038f1 Mon Sep 17 00:00:00 2001 From: Jose Celano Date: Thu, 9 Jan 2025 16:54:04 +0000 Subject: [PATCH 079/802] feat: [#1145] add UDP avg processing time to stats ```json { "torrents": 1, "seeders": 1, "completed": 0, "leechers": 0, "tcp4_connections_handled": 0, "tcp4_announces_handled": 0, "tcp4_scrapes_handled": 0, "tcp6_connections_handled": 0, "tcp6_announces_handled": 0, "tcp6_scrapes_handled": 0, "udp_requests_aborted": 0, "udp_requests_banned": 0, "udp_banned_ips_total": 0, "udp_avg_connect_processing_time_ns": 37000, "udp_avg_announce_processing_time_ns": 42067, "udp_avg_scrape_processing_time_ns": 0, "udp4_requests": 60, "udp4_connections_handled": 30, "udp4_announces_handled": 30, "udp4_scrapes_handled": 0, "udp4_responses": 60, "udp4_errors_handled": 0, "udp6_requests": 0, "udp6_connections_handled": 0, "udp6_announces_handled": 0, "udp6_scrapes_handled": 0, "udp6_responses": 0, "udp6_errors_handled": 0 } ``` New metrcis are: - udp_avg_connect_processing_time_ns - udp_avg_announce_processing_time_ns - udp_avg_scrape_processing_time_ns --- src/core/services/statistics/mod.rs | 8 +- src/core/statistics/event/handler.rs | 25 ++++++- src/core/statistics/metrics.rs | 12 +++ src/core/statistics/repository.rs | 59 +++++++++++++++ .../apis/v1/context/stats/resources.rs | 75 ++++++++++++------- .../apis/v1/context/stats/responses.rs | 32 ++++++++ .../servers/api/v1/contract/context/stats.rs | 5 ++ 7 files changed, 187 insertions(+), 29 deletions(-) diff --git a/src/core/services/statistics/mod.rs b/src/core/services/statistics/mod.rs index 41d2f2e10..4143aaf1f 100644 --- a/src/core/services/statistics/mod.rs +++ b/src/core/services/statistics/mod.rs @@ -70,10 +70,11 @@ pub async fn get_metrics(tracker: Arc, ban_service: Arc, ban_service: Arc { stats_repository.increase_udp4_responses().await; + + match kind { + UdpResponseKind::Connect => { + stats_repository + .recalculate_udp_avg_connect_processing_time_ns(req_processing_time) + .await; + } + UdpResponseKind::Announce => { + stats_repository + .recalculate_udp_avg_announce_processing_time_ns(req_processing_time) + .await; + } + UdpResponseKind::Scrape => { + stats_repository + .recalculate_udp_avg_scrape_processing_time_ns(req_processing_time) + .await; + } + UdpResponseKind::Error => {} + } } Event::Udp4Error => { stats_repository.increase_udp4_errors().await; diff --git a/src/core/statistics/metrics.rs b/src/core/statistics/metrics.rs index 2cbbf4b05..40262efd6 100644 --- a/src/core/statistics/metrics.rs +++ b/src/core/statistics/metrics.rs @@ -28,6 +28,7 @@ pub struct Metrics { /// Total number of TCP (HTTP tracker) `scrape` requests from IPv6 peers. pub tcp6_scrapes_handled: u64, + // UDP /// Total number of UDP (UDP tracker) requests aborted. pub udp_requests_aborted: u64, @@ -37,6 +38,16 @@ pub struct Metrics { /// Total number of banned IPs. pub udp_banned_ips_total: u64, + /// Average rounded time spent processing UDP connect requests. + pub udp_avg_connect_processing_time_ns: u64, + + /// Average rounded time spent processing UDP announce requests. + pub udp_avg_announce_processing_time_ns: u64, + + /// Average rounded time spent processing UDP scrape requests. + pub udp_avg_scrape_processing_time_ns: u64, + + // UDPv4 /// Total number of UDP (UDP tracker) requests from IPv4 peers. pub udp4_requests: u64, @@ -55,6 +66,7 @@ pub struct Metrics { /// Total number of UDP (UDP tracker) `error` requests from IPv4 peers. pub udp4_errors_handled: u64, + // UDPv6 /// Total number of UDP (UDP tracker) requests from IPv6 peers. pub udp6_requests: u64, diff --git a/src/core/statistics/repository.rs b/src/core/statistics/repository.rs index 563e87534..ec5100073 100644 --- a/src/core/statistics/repository.rs +++ b/src/core/statistics/repository.rs @@ -1,4 +1,5 @@ use std::sync::Arc; +use std::time::Duration; use tokio::sync::{RwLock, RwLockReadGuard}; @@ -112,6 +113,64 @@ impl Repository { drop(stats_lock); } + #[allow(clippy::cast_precision_loss)] + #[allow(clippy::cast_possible_truncation)] + #[allow(clippy::cast_sign_loss)] + pub async fn recalculate_udp_avg_connect_processing_time_ns(&self, req_processing_time: Duration) { + let mut stats_lock = self.stats.write().await; + + let req_processing_time = req_processing_time.as_nanos() as f64; + let udp_connections_handled = (stats_lock.udp4_connections_handled + stats_lock.udp6_connections_handled) as f64; + + let previous_avg = stats_lock.udp_avg_connect_processing_time_ns; + + // Moving average: https://en.wikipedia.org/wiki/Moving_average + let new_avg = previous_avg as f64 + (req_processing_time - previous_avg as f64) / udp_connections_handled; + + stats_lock.udp_avg_connect_processing_time_ns = new_avg.ceil() as u64; + + drop(stats_lock); + } + + #[allow(clippy::cast_precision_loss)] + #[allow(clippy::cast_possible_truncation)] + #[allow(clippy::cast_sign_loss)] + pub async fn recalculate_udp_avg_announce_processing_time_ns(&self, req_processing_time: Duration) { + let mut stats_lock = self.stats.write().await; + + let req_processing_time = req_processing_time.as_nanos() as f64; + + let udp_announces_handled = (stats_lock.udp4_announces_handled + stats_lock.udp6_announces_handled) as f64; + + let previous_avg = stats_lock.udp_avg_announce_processing_time_ns; + + // Moving average: https://en.wikipedia.org/wiki/Moving_average + let new_avg = previous_avg as f64 + (req_processing_time - previous_avg as f64) / udp_announces_handled; + + stats_lock.udp_avg_announce_processing_time_ns = new_avg.ceil() as u64; + + drop(stats_lock); + } + + #[allow(clippy::cast_precision_loss)] + #[allow(clippy::cast_possible_truncation)] + #[allow(clippy::cast_sign_loss)] + pub async fn recalculate_udp_avg_scrape_processing_time_ns(&self, req_processing_time: Duration) { + let mut stats_lock = self.stats.write().await; + + let req_processing_time = req_processing_time.as_nanos() as f64; + let udp_scrapes_handled = (stats_lock.udp4_scrapes_handled + stats_lock.udp6_scrapes_handled) as f64; + + let previous_avg = stats_lock.udp_avg_scrape_processing_time_ns; + + // Moving average: https://en.wikipedia.org/wiki/Moving_average + let new_avg = previous_avg as f64 + (req_processing_time - previous_avg as f64) / udp_scrapes_handled; + + stats_lock.udp_avg_scrape_processing_time_ns = new_avg.ceil() as u64; + + drop(stats_lock); + } + pub async fn increase_udp6_requests(&self) { let mut stats_lock = self.stats.write().await; stats_lock.udp6_requests += 1; diff --git a/src/servers/apis/v1/context/stats/resources.rs b/src/servers/apis/v1/context/stats/resources.rs index 814f94b21..c6a526a7d 100644 --- a/src/servers/apis/v1/context/stats/resources.rs +++ b/src/servers/apis/v1/context/stats/resources.rs @@ -34,13 +34,21 @@ pub struct Stats { /// Total number of TCP (HTTP tracker) `scrape` requests from IPv6 peers. pub tcp6_scrapes_handled: u64, + // UDP /// Total number of UDP (UDP tracker) requests aborted. pub udp_requests_aborted: u64, /// Total number of UDP (UDP tracker) requests banned. pub udp_requests_banned: u64, /// Total number of IPs banned for UDP (UDP tracker) requests. pub udp_banned_ips_total: u64, + /// Average rounded time spent processing UDP connect requests. + pub udp_avg_connect_processing_time_ns: u64, + /// Average rounded time spent processing UDP announce requests. + pub udp_avg_announce_processing_time_ns: u64, + /// Average rounded time spent processing UDP scrape requests. + pub udp_avg_scrape_processing_time_ns: u64, + // UDPv4 /// Total number of UDP (UDP tracker) requests from IPv4 peers. pub udp4_requests: u64, /// Total number of UDP (UDP tracker) connections from IPv4 peers. @@ -54,6 +62,7 @@ pub struct Stats { /// Total number of UDP (UDP tracker) `scrape` requests from IPv4 peers. pub udp4_errors_handled: u64, + // UDPv6 /// Total number of UDP (UDP tracker) requests from IPv6 peers. pub udp6_requests: u64, /// Total number of UDP (UDP tracker) `connection` requests from IPv6 peers. @@ -86,12 +95,17 @@ impl From for Stats { udp_requests_aborted: metrics.protocol_metrics.udp_requests_aborted, udp_requests_banned: metrics.protocol_metrics.udp_requests_banned, udp_banned_ips_total: metrics.protocol_metrics.udp_banned_ips_total, + udp_avg_connect_processing_time_ns: metrics.protocol_metrics.udp_avg_connect_processing_time_ns, + udp_avg_announce_processing_time_ns: metrics.protocol_metrics.udp_avg_announce_processing_time_ns, + udp_avg_scrape_processing_time_ns: metrics.protocol_metrics.udp_avg_scrape_processing_time_ns, + // UDPv4 udp4_requests: metrics.protocol_metrics.udp4_requests, udp4_connections_handled: metrics.protocol_metrics.udp4_connections_handled, udp4_announces_handled: metrics.protocol_metrics.udp4_announces_handled, udp4_scrapes_handled: metrics.protocol_metrics.udp4_scrapes_handled, udp4_responses: metrics.protocol_metrics.udp4_responses, udp4_errors_handled: metrics.protocol_metrics.udp4_errors_handled, + // UDPv6 udp6_requests: metrics.protocol_metrics.udp6_requests, udp6_connections_handled: metrics.protocol_metrics.udp6_connections_handled, udp6_announces_handled: metrics.protocol_metrics.udp6_announces_handled, @@ -132,18 +146,23 @@ mod tests { udp_requests_aborted: 11, udp_requests_banned: 12, udp_banned_ips_total: 13, - udp4_requests: 14, - udp4_connections_handled: 15, - udp4_announces_handled: 16, - udp4_scrapes_handled: 17, - udp4_responses: 18, - udp4_errors_handled: 19, - udp6_requests: 20, - udp6_connections_handled: 21, - udp6_announces_handled: 22, - udp6_scrapes_handled: 23, - udp6_responses: 24, - udp6_errors_handled: 25 + udp_avg_connect_processing_time_ns: 14, + udp_avg_announce_processing_time_ns: 15, + udp_avg_scrape_processing_time_ns: 16, + // UDPv4 + udp4_requests: 17, + udp4_connections_handled: 18, + udp4_announces_handled: 19, + udp4_scrapes_handled: 20, + udp4_responses: 21, + udp4_errors_handled: 22, + // UDPv6 + udp6_requests: 23, + udp6_connections_handled: 24, + udp6_announces_handled: 25, + udp6_scrapes_handled: 26, + udp6_responses: 27, + udp6_errors_handled: 28 } }), Stats { @@ -151,10 +170,11 @@ mod tests { seeders: 1, completed: 2, leechers: 3, - // TCP + // TCPv4 tcp4_connections_handled: 5, tcp4_announces_handled: 6, tcp4_scrapes_handled: 7, + // TCPv6 tcp6_connections_handled: 8, tcp6_announces_handled: 9, tcp6_scrapes_handled: 10, @@ -162,18 +182,23 @@ mod tests { udp_requests_aborted: 11, udp_requests_banned: 12, udp_banned_ips_total: 13, - udp4_requests: 14, - udp4_connections_handled: 15, - udp4_announces_handled: 16, - udp4_scrapes_handled: 17, - udp4_responses: 18, - udp4_errors_handled: 19, - udp6_requests: 20, - udp6_connections_handled: 21, - udp6_announces_handled: 22, - udp6_scrapes_handled: 23, - udp6_responses: 24, - udp6_errors_handled: 25 + udp_avg_connect_processing_time_ns: 14, + udp_avg_announce_processing_time_ns: 15, + udp_avg_scrape_processing_time_ns: 16, + // UDPv4 + udp4_requests: 17, + udp4_connections_handled: 18, + udp4_announces_handled: 19, + udp4_scrapes_handled: 20, + udp4_responses: 21, + udp4_errors_handled: 22, + // UDPv6 + udp6_requests: 23, + udp6_connections_handled: 24, + udp6_announces_handled: 25, + udp6_scrapes_handled: 26, + udp6_responses: 27, + udp6_errors_handled: 28 } ); } diff --git a/src/servers/apis/v1/context/stats/responses.rs b/src/servers/apis/v1/context/stats/responses.rs index 6b214d0c9..a67b5328a 100644 --- a/src/servers/apis/v1/context/stats/responses.rs +++ b/src/servers/apis/v1/context/stats/responses.rs @@ -21,6 +21,10 @@ pub fn metrics_response(tracker_metrics: &TrackerMetrics) -> Response { lines.push(format!("completed {}", tracker_metrics.torrents_metrics.downloaded)); lines.push(format!("leechers {}", tracker_metrics.torrents_metrics.incomplete)); + // TCP + + // TCPv4 + lines.push(format!( "tcp4_connections_handled {}", tracker_metrics.protocol_metrics.tcp4_connections_handled @@ -34,6 +38,8 @@ pub fn metrics_response(tracker_metrics: &TrackerMetrics) -> Response { tracker_metrics.protocol_metrics.tcp4_scrapes_handled )); + // TCPv6 + lines.push(format!( "tcp6_connections_handled {}", tracker_metrics.protocol_metrics.tcp6_connections_handled @@ -47,10 +53,34 @@ pub fn metrics_response(tracker_metrics: &TrackerMetrics) -> Response { tracker_metrics.protocol_metrics.tcp6_scrapes_handled )); + // UDP + lines.push(format!( "udp_requests_aborted {}", tracker_metrics.protocol_metrics.udp_requests_aborted )); + lines.push(format!( + "udp_requests_banned {}", + tracker_metrics.protocol_metrics.udp_requests_banned + )); + lines.push(format!( + "udp_banned_ips_total {}", + tracker_metrics.protocol_metrics.udp_banned_ips_total + )); + lines.push(format!( + "udp_avg_connect_processing_time_ns {}", + tracker_metrics.protocol_metrics.udp_avg_connect_processing_time_ns + )); + lines.push(format!( + "udp_avg_announce_processing_time_ns {}", + tracker_metrics.protocol_metrics.udp_avg_announce_processing_time_ns + )); + lines.push(format!( + "udp_avg_scrape_processing_time_ns {}", + tracker_metrics.protocol_metrics.udp_avg_scrape_processing_time_ns + )); + + // UDPv4 lines.push(format!("udp4_requests {}", tracker_metrics.protocol_metrics.udp4_requests)); lines.push(format!( @@ -71,6 +101,8 @@ pub fn metrics_response(tracker_metrics: &TrackerMetrics) -> Response { tracker_metrics.protocol_metrics.udp4_errors_handled )); + // UDPv6 + lines.push(format!("udp6_requests {}", tracker_metrics.protocol_metrics.udp6_requests)); lines.push(format!( "udp6_connections_handled {}", diff --git a/tests/servers/api/v1/contract/context/stats.rs b/tests/servers/api/v1/contract/context/stats.rs index e99333d7a..bc6e495a3 100644 --- a/tests/servers/api/v1/contract/context/stats.rs +++ b/tests/servers/api/v1/contract/context/stats.rs @@ -47,12 +47,17 @@ async fn should_allow_getting_tracker_statistics() { udp_requests_aborted: 0, udp_requests_banned: 0, udp_banned_ips_total: 0, + udp_avg_connect_processing_time_ns: 0, + udp_avg_announce_processing_time_ns: 0, + udp_avg_scrape_processing_time_ns: 0, + // UDPv4 udp4_requests: 0, udp4_connections_handled: 0, udp4_announces_handled: 0, udp4_scrapes_handled: 0, udp4_responses: 0, udp4_errors_handled: 0, + // UDPv6 udp6_requests: 0, udp6_connections_handled: 0, udp6_announces_handled: 0, From a1ded65db45548ccdc9249debd2593fd542b9725 Mon Sep 17 00:00:00 2001 From: Jose Celano Date: Fri, 10 Jan 2025 12:43:34 +0000 Subject: [PATCH 080/802] feat: [#1159] extract new package tracker api client --- .github/workflows/deployment.yaml | 1 + Cargo.lock | 10 + Cargo.toml | 3 +- packages/tracker-api-client/Cargo.toml | 21 ++ packages/tracker-api-client/README.md | 23 +++ .../docs/licenses/LICENSE-MIT_0 | 14 ++ .../tracker-api-client/src/common/http.rs | 57 ++++++ packages/tracker-api-client/src/common/mod.rs | 1 + .../tracker-api-client/src/connection_info.rs | 33 ++++ packages/tracker-api-client/src/lib.rs | 3 + packages/tracker-api-client/src/v1/client.rs | 179 ++++++++++++++++++ packages/tracker-api-client/src/v1/mod.rs | 1 + 12 files changed, 345 insertions(+), 1 deletion(-) create mode 100644 packages/tracker-api-client/Cargo.toml create mode 100644 packages/tracker-api-client/README.md create mode 100644 packages/tracker-api-client/docs/licenses/LICENSE-MIT_0 create mode 100644 packages/tracker-api-client/src/common/http.rs create mode 100644 packages/tracker-api-client/src/common/mod.rs create mode 100644 packages/tracker-api-client/src/connection_info.rs create mode 100644 packages/tracker-api-client/src/lib.rs create mode 100644 packages/tracker-api-client/src/v1/client.rs create mode 100644 packages/tracker-api-client/src/v1/mod.rs diff --git a/.github/workflows/deployment.yaml b/.github/workflows/deployment.yaml index 1e0f59b43..fd4e0fd5c 100644 --- a/.github/workflows/deployment.yaml +++ b/.github/workflows/deployment.yaml @@ -58,6 +58,7 @@ jobs: cargo publish -p bittorrent-http-protocol cargo publish -p bittorrent-tracker-client cargo publish -p torrust-tracker + cargo publish -p torrust-tracker-api-client cargo publish -p torrust-tracker-client cargo publish -p torrust-tracker-clock cargo publish -p torrust-tracker-configuration diff --git a/Cargo.lock b/Cargo.lock index 68e32ddbd..3b0cc9093 100644 --- a/Cargo.lock +++ b/Cargo.lock @@ -3990,6 +3990,16 @@ dependencies = [ "zerocopy", ] +[[package]] +name = "torrust-tracker-api-client" +version = "3.0.0-develop" +dependencies = [ + "hyper", + "reqwest", + "serde", + "uuid", +] + [[package]] name = "torrust-tracker-client" version = "3.0.0-develop" diff --git a/Cargo.toml b/Cargo.toml index f1ae96dad..e57016596 100644 --- a/Cargo.toml +++ b/Cargo.toml @@ -16,7 +16,7 @@ rust-version.workspace = true version.workspace = true [lib] -name = "torrust_tracker_lib" +name = "torrust_tracker_lib" [workspace.package] authors = ["Nautilus Cyberneering , Mick van Dijke "] @@ -108,6 +108,7 @@ members = [ "packages/primitives", "packages/test-helpers", "packages/torrent-repository", + "packages/tracker-api-client", "packages/tracker-client", ] diff --git a/packages/tracker-api-client/Cargo.toml b/packages/tracker-api-client/Cargo.toml new file mode 100644 index 000000000..388ad4bd2 --- /dev/null +++ b/packages/tracker-api-client/Cargo.toml @@ -0,0 +1,21 @@ +[package] +description = "A library to interact with the Torrust Tracker REST API." +keywords = ["bittorrent", "client", "tracker"] +license = "LGPL-3.0" +name = "torrust-tracker-api-client" +readme = "README.md" + +authors.workspace = true +documentation.workspace = true +edition.workspace = true +homepage.workspace = true +publish.workspace = true +repository.workspace = true +rust-version.workspace = true +version.workspace = true + +[dependencies] +hyper = "1" +reqwest = { version = "0", features = ["json"] } +serde = { version = "1", features = ["derive"] } +uuid = { version = "1", features = ["v4"] } diff --git a/packages/tracker-api-client/README.md b/packages/tracker-api-client/README.md new file mode 100644 index 000000000..3c10cdb5c --- /dev/null +++ b/packages/tracker-api-client/README.md @@ -0,0 +1,23 @@ +# Torrust Tracker API Client + +A library to interact with the Torrust Tracker REST API. + +## License + +**Copyright (c) 2024 The Torrust Developers.** + +This program is free software: you can redistribute it and/or modify it under the terms of the [GNU Lesser General Public License][LGPL_3_0] as published by the [Free Software Foundation][FSF], version 3. + +This program is distributed in the hope that it will be useful, but WITHOUT ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the [GNU Lesser General Public License][LGPL_3_0] for more details. + +You should have received a copy of the *GNU Lesser General Public License* along with this program. If not, see . + +Some files include explicit copyright notices and/or license notices. + +### Legacy Exception + +For prosperity, versions of Torrust BitTorrent Tracker Client that are older than five years are automatically granted the [MIT-0][MIT_0] license in addition to the existing [LGPL-3.0-only][LGPL_3_0] license. + +[LGPL_3_0]: ./LICENSE +[MIT_0]: ./docs/licenses/LICENSE-MIT_0 +[FSF]: https://www.fsf.org/ diff --git a/packages/tracker-api-client/docs/licenses/LICENSE-MIT_0 b/packages/tracker-api-client/docs/licenses/LICENSE-MIT_0 new file mode 100644 index 000000000..fc06cc4fe --- /dev/null +++ b/packages/tracker-api-client/docs/licenses/LICENSE-MIT_0 @@ -0,0 +1,14 @@ +MIT No Attribution + +Permission is hereby granted, free of charge, to any person obtaining a copy of this +software and associated documentation files (the "Software"), to deal in the Software +without restriction, including without limitation the rights to use, copy, modify, +merge, publish, distribute, sublicense, and/or sell copies of the Software, and to +permit persons to whom the Software is furnished to do so. + +THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, +INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A +PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT +HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION +OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE +SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. diff --git a/packages/tracker-api-client/src/common/http.rs b/packages/tracker-api-client/src/common/http.rs new file mode 100644 index 000000000..adbc7dc15 --- /dev/null +++ b/packages/tracker-api-client/src/common/http.rs @@ -0,0 +1,57 @@ +pub type ReqwestQuery = Vec; +pub type ReqwestQueryParam = (String, String); + +/// URL Query component +#[derive(Default, Debug)] +pub struct Query { + params: Vec, +} + +impl Query { + #[must_use] + pub fn empty() -> Self { + Self { params: vec![] } + } + + #[must_use] + pub fn params(params: Vec) -> Self { + Self { params } + } + + pub fn add_param(&mut self, param: QueryParam) { + self.params.push(param); + } +} + +impl From for ReqwestQuery { + fn from(url_search_params: Query) -> Self { + url_search_params + .params + .iter() + .map(|param| ReqwestQueryParam::from((*param).clone())) + .collect() + } +} + +/// URL query param +#[derive(Clone, Debug)] +pub struct QueryParam { + name: String, + value: String, +} + +impl QueryParam { + #[must_use] + pub fn new(name: &str, value: &str) -> Self { + Self { + name: name.to_string(), + value: value.to_string(), + } + } +} + +impl From for ReqwestQueryParam { + fn from(param: QueryParam) -> Self { + (param.name, param.value) + } +} diff --git a/packages/tracker-api-client/src/common/mod.rs b/packages/tracker-api-client/src/common/mod.rs new file mode 100644 index 000000000..3883215fc --- /dev/null +++ b/packages/tracker-api-client/src/common/mod.rs @@ -0,0 +1 @@ +pub mod http; diff --git a/packages/tracker-api-client/src/connection_info.rs b/packages/tracker-api-client/src/connection_info.rs new file mode 100644 index 000000000..32eeda686 --- /dev/null +++ b/packages/tracker-api-client/src/connection_info.rs @@ -0,0 +1,33 @@ +#[must_use] +pub fn connection_with_invalid_token(bind_address: &str) -> ConnectionInfo { + ConnectionInfo::authenticated(bind_address, "invalid token") +} + +#[must_use] +pub fn connection_with_no_token(bind_address: &str) -> ConnectionInfo { + ConnectionInfo::anonymous(bind_address) +} + +#[derive(Clone)] +pub struct ConnectionInfo { + pub bind_address: String, + pub api_token: Option, +} + +impl ConnectionInfo { + #[must_use] + pub fn authenticated(bind_address: &str, api_token: &str) -> Self { + Self { + bind_address: bind_address.to_string(), + api_token: Some(api_token.to_string()), + } + } + + #[must_use] + pub fn anonymous(bind_address: &str) -> Self { + Self { + bind_address: bind_address.to_string(), + api_token: None, + } + } +} diff --git a/packages/tracker-api-client/src/lib.rs b/packages/tracker-api-client/src/lib.rs new file mode 100644 index 000000000..baf80e3cd --- /dev/null +++ b/packages/tracker-api-client/src/lib.rs @@ -0,0 +1,3 @@ +pub mod common; +pub mod connection_info; +pub mod v1; diff --git a/packages/tracker-api-client/src/v1/client.rs b/packages/tracker-api-client/src/v1/client.rs new file mode 100644 index 000000000..b5c0dc60b --- /dev/null +++ b/packages/tracker-api-client/src/v1/client.rs @@ -0,0 +1,179 @@ +use hyper::HeaderMap; +use reqwest::Response; +use serde::Serialize; +use uuid::Uuid; + +use crate::common::http::{Query, QueryParam, ReqwestQuery}; +use crate::connection_info::ConnectionInfo; + +/// API Client +pub struct Client { + connection_info: ConnectionInfo, + base_path: String, +} + +impl Client { + #[must_use] + pub fn new(connection_info: ConnectionInfo) -> Self { + Self { + connection_info, + base_path: "/api/v1/".to_string(), + } + } + + pub async fn generate_auth_key(&self, seconds_valid: i32, headers: Option) -> Response { + self.post_empty(&format!("key/{}", &seconds_valid), headers).await + } + + pub async fn add_auth_key(&self, add_key_form: AddKeyForm, headers: Option) -> Response { + self.post_form("keys", &add_key_form, headers).await + } + + pub async fn delete_auth_key(&self, key: &str, headers: Option) -> Response { + self.delete(&format!("key/{}", &key), headers).await + } + + pub async fn reload_keys(&self, headers: Option) -> Response { + self.get("keys/reload", Query::default(), headers).await + } + + pub async fn whitelist_a_torrent(&self, info_hash: &str, headers: Option) -> Response { + self.post_empty(&format!("whitelist/{}", &info_hash), headers).await + } + + pub async fn remove_torrent_from_whitelist(&self, info_hash: &str, headers: Option) -> Response { + self.delete(&format!("whitelist/{}", &info_hash), headers).await + } + + pub async fn reload_whitelist(&self, headers: Option) -> Response { + self.get("whitelist/reload", Query::default(), headers).await + } + + pub async fn get_torrent(&self, info_hash: &str, headers: Option) -> Response { + self.get(&format!("torrent/{}", &info_hash), Query::default(), headers).await + } + + pub async fn get_torrents(&self, params: Query, headers: Option) -> Response { + self.get("torrents", params, headers).await + } + + pub async fn get_tracker_statistics(&self, headers: Option) -> Response { + self.get("stats", Query::default(), headers).await + } + + pub async fn get(&self, path: &str, params: Query, headers: Option) -> Response { + let mut query: Query = params; + + if let Some(token) = &self.connection_info.api_token { + query.add_param(QueryParam::new("token", token)); + }; + + self.get_request_with_query(path, query, headers).await + } + + /// # Panics + /// + /// Will panic if the request can't be sent + pub async fn post_empty(&self, path: &str, headers: Option) -> Response { + let builder = reqwest::Client::new() + .post(self.base_url(path).clone()) + .query(&ReqwestQuery::from(self.query_with_token())); + + let builder = match headers { + Some(headers) => builder.headers(headers), + None => builder, + }; + + builder.send().await.unwrap() + } + + /// # Panics + /// + /// Will panic if the request can't be sent + pub async fn post_form(&self, path: &str, form: &T, headers: Option) -> Response { + let builder = reqwest::Client::new() + .post(self.base_url(path).clone()) + .query(&ReqwestQuery::from(self.query_with_token())) + .json(&form); + + let builder = match headers { + Some(headers) => builder.headers(headers), + None => builder, + }; + + builder.send().await.unwrap() + } + + /// # Panics + /// + /// Will panic if the request can't be sent + async fn delete(&self, path: &str, headers: Option) -> Response { + let builder = reqwest::Client::new() + .delete(self.base_url(path).clone()) + .query(&ReqwestQuery::from(self.query_with_token())); + + let builder = match headers { + Some(headers) => builder.headers(headers), + None => builder, + }; + + builder.send().await.unwrap() + } + + pub async fn get_request_with_query(&self, path: &str, params: Query, headers: Option) -> Response { + get(&self.base_url(path), Some(params), headers).await + } + + pub async fn get_request(&self, path: &str) -> Response { + get(&self.base_url(path), None, None).await + } + + fn query_with_token(&self) -> Query { + match &self.connection_info.api_token { + Some(token) => Query::params([QueryParam::new("token", token)].to_vec()), + None => Query::default(), + } + } + + fn base_url(&self, path: &str) -> String { + format!("http://{}{}{path}", &self.connection_info.bind_address, &self.base_path) + } +} + +/// # Panics +/// +/// Will panic if the request can't be sent +pub async fn get(path: &str, query: Option, headers: Option) -> Response { + let builder = reqwest::Client::builder().build().unwrap(); + + let builder = match query { + Some(params) => builder.get(path).query(&ReqwestQuery::from(params)), + None => builder.get(path), + }; + + let builder = match headers { + Some(headers) => builder.headers(headers), + None => builder, + }; + + builder.send().await.unwrap() +} + +/// Returns a `HeaderMap` with a request id header +/// +/// # Panics +/// +/// Will panic if the request ID can't be parsed into a string. +#[must_use] +pub fn headers_with_request_id(request_id: Uuid) -> HeaderMap { + let mut headers = HeaderMap::new(); + headers.insert("x-request-id", request_id.to_string().parse().unwrap()); + headers +} + +#[derive(Serialize, Debug)] +pub struct AddKeyForm { + #[serde(rename = "key")] + pub opt_key: Option, + pub seconds_valid: Option, +} diff --git a/packages/tracker-api-client/src/v1/mod.rs b/packages/tracker-api-client/src/v1/mod.rs new file mode 100644 index 000000000..b9babe5bc --- /dev/null +++ b/packages/tracker-api-client/src/v1/mod.rs @@ -0,0 +1 @@ +pub mod client; From e4b9875e18ebeb6d12d33b19b6a478c262e563e4 Mon Sep 17 00:00:00 2001 From: Jose Celano Date: Fri, 10 Jan 2025 16:03:29 +0000 Subject: [PATCH 081/802] refactor: [#1159] use new tracker api client package in tests The Tracker API client was extracted into a new package. This removes the duplicate client in tests and starts using the new package. --- Cargo.lock | 1 + Cargo.toml | 1 + .../tracker-api-client/src/connection_info.rs | 10 -- tests/servers/api/connection_info.rs | 24 +-- tests/servers/api/environment.rs | 3 +- tests/servers/api/v1/client.rs | 161 ------------------ .../servers/api/v1/contract/authentication.rs | 4 +- .../api/v1/contract/context/auth_key.rs | 4 +- .../api/v1/contract/context/health_check.rs | 2 +- .../servers/api/v1/contract/context/stats.rs | 2 +- .../api/v1/contract/context/torrent.rs | 4 +- .../api/v1/contract/context/whitelist.rs | 2 +- tests/servers/api/v1/mod.rs | 1 - 13 files changed, 14 insertions(+), 205 deletions(-) delete mode 100644 tests/servers/api/v1/client.rs diff --git a/Cargo.lock b/Cargo.lock index 3b0cc9093..a8b23465a 100644 --- a/Cargo.lock +++ b/Cargo.lock @@ -3974,6 +3974,7 @@ dependencies = [ "serde_with", "thiserror 2.0.9", "tokio", + "torrust-tracker-api-client", "torrust-tracker-clock", "torrust-tracker-configuration", "torrust-tracker-contrib-bencode", diff --git a/Cargo.toml b/Cargo.toml index e57016596..0bf3e39e3 100644 --- a/Cargo.toml +++ b/Cargo.toml @@ -97,6 +97,7 @@ ignored = ["crossbeam-skiplist", "dashmap", "figment", "parking_lot", "serde_byt [dev-dependencies] local-ip-address = "0" mockall = "0" +torrust-tracker-api-client = { version = "3.0.0-develop", path = "packages/tracker-api-client" } torrust-tracker-test-helpers = { version = "3.0.0-develop", path = "packages/test-helpers" } [workspace] diff --git a/packages/tracker-api-client/src/connection_info.rs b/packages/tracker-api-client/src/connection_info.rs index 32eeda686..5785f98e6 100644 --- a/packages/tracker-api-client/src/connection_info.rs +++ b/packages/tracker-api-client/src/connection_info.rs @@ -1,13 +1,3 @@ -#[must_use] -pub fn connection_with_invalid_token(bind_address: &str) -> ConnectionInfo { - ConnectionInfo::authenticated(bind_address, "invalid token") -} - -#[must_use] -pub fn connection_with_no_token(bind_address: &str) -> ConnectionInfo { - ConnectionInfo::anonymous(bind_address) -} - #[derive(Clone)] pub struct ConnectionInfo { pub bind_address: String, diff --git a/tests/servers/api/connection_info.rs b/tests/servers/api/connection_info.rs index 35314a2fd..1ae08921a 100644 --- a/tests/servers/api/connection_info.rs +++ b/tests/servers/api/connection_info.rs @@ -1,3 +1,5 @@ +use torrust_tracker_api_client::connection_info::ConnectionInfo; + pub fn connection_with_invalid_token(bind_address: &str) -> ConnectionInfo { ConnectionInfo::authenticated(bind_address, "invalid token") } @@ -5,25 +7,3 @@ pub fn connection_with_invalid_token(bind_address: &str) -> ConnectionInfo { pub fn connection_with_no_token(bind_address: &str) -> ConnectionInfo { ConnectionInfo::anonymous(bind_address) } - -#[derive(Clone)] -pub struct ConnectionInfo { - pub bind_address: String, - pub api_token: Option, -} - -impl ConnectionInfo { - pub fn authenticated(bind_address: &str, api_token: &str) -> Self { - Self { - bind_address: bind_address.to_string(), - api_token: Some(api_token.to_string()), - } - } - - pub fn anonymous(bind_address: &str) -> Self { - Self { - bind_address: bind_address.to_string(), - api_token: None, - } - } -} diff --git a/tests/servers/api/environment.rs b/tests/servers/api/environment.rs index 00fb9d05b..c4f503f9c 100644 --- a/tests/servers/api/environment.rs +++ b/tests/servers/api/environment.rs @@ -4,6 +4,7 @@ use std::sync::Arc; use bittorrent_primitives::info_hash::InfoHash; use futures::executor::block_on; use tokio::sync::RwLock; +use torrust_tracker_api_client::connection_info::ConnectionInfo; use torrust_tracker_configuration::{Configuration, HttpApi}; use torrust_tracker_lib::bootstrap::app::initialize_with_configuration; use torrust_tracker_lib::bootstrap::jobs::make_rust_tls; @@ -14,8 +15,6 @@ use torrust_tracker_lib::servers::udp::server::banning::BanService; use torrust_tracker_lib::servers::udp::server::launcher::MAX_CONNECTION_ID_ERRORS_PER_IP; use torrust_tracker_primitives::peer; -use super::connection_info::ConnectionInfo; - pub struct Environment where S: std::fmt::Debug + std::fmt::Display, diff --git a/tests/servers/api/v1/client.rs b/tests/servers/api/v1/client.rs deleted file mode 100644 index 635331078..000000000 --- a/tests/servers/api/v1/client.rs +++ /dev/null @@ -1,161 +0,0 @@ -use hyper::HeaderMap; -use reqwest::Response; -use serde::Serialize; -use uuid::Uuid; - -use crate::common::http::{Query, QueryParam, ReqwestQuery}; -use crate::servers::api::connection_info::ConnectionInfo; - -/// API Client -pub struct Client { - connection_info: ConnectionInfo, - base_path: String, -} - -impl Client { - pub fn new(connection_info: ConnectionInfo) -> Self { - Self { - connection_info, - base_path: "/api/v1/".to_string(), - } - } - - pub async fn generate_auth_key(&self, seconds_valid: i32, headers: Option) -> Response { - self.post_empty(&format!("key/{}", &seconds_valid), headers).await - } - - pub async fn add_auth_key(&self, add_key_form: AddKeyForm, headers: Option) -> Response { - self.post_form("keys", &add_key_form, headers).await - } - - pub async fn delete_auth_key(&self, key: &str, headers: Option) -> Response { - self.delete(&format!("key/{}", &key), headers).await - } - - pub async fn reload_keys(&self, headers: Option) -> Response { - self.get("keys/reload", Query::default(), headers).await - } - - pub async fn whitelist_a_torrent(&self, info_hash: &str, headers: Option) -> Response { - self.post_empty(&format!("whitelist/{}", &info_hash), headers).await - } - - pub async fn remove_torrent_from_whitelist(&self, info_hash: &str, headers: Option) -> Response { - self.delete(&format!("whitelist/{}", &info_hash), headers).await - } - - pub async fn reload_whitelist(&self, headers: Option) -> Response { - self.get("whitelist/reload", Query::default(), headers).await - } - - pub async fn get_torrent(&self, info_hash: &str, headers: Option) -> Response { - self.get(&format!("torrent/{}", &info_hash), Query::default(), headers).await - } - - pub async fn get_torrents(&self, params: Query, headers: Option) -> Response { - self.get("torrents", params, headers).await - } - - pub async fn get_tracker_statistics(&self, headers: Option) -> Response { - self.get("stats", Query::default(), headers).await - } - - pub async fn get(&self, path: &str, params: Query, headers: Option) -> Response { - let mut query: Query = params; - - if let Some(token) = &self.connection_info.api_token { - query.add_param(QueryParam::new("token", token)); - }; - - self.get_request_with_query(path, query, headers).await - } - - pub async fn post_empty(&self, path: &str, headers: Option) -> Response { - let builder = reqwest::Client::new() - .post(self.base_url(path).clone()) - .query(&ReqwestQuery::from(self.query_with_token())); - - let builder = match headers { - Some(headers) => builder.headers(headers), - None => builder, - }; - - builder.send().await.unwrap() - } - - pub async fn post_form(&self, path: &str, form: &T, headers: Option) -> Response { - let builder = reqwest::Client::new() - .post(self.base_url(path).clone()) - .query(&ReqwestQuery::from(self.query_with_token())) - .json(&form); - - let builder = match headers { - Some(headers) => builder.headers(headers), - None => builder, - }; - - builder.send().await.unwrap() - } - - async fn delete(&self, path: &str, headers: Option) -> Response { - let builder = reqwest::Client::new() - .delete(self.base_url(path).clone()) - .query(&ReqwestQuery::from(self.query_with_token())); - - let builder = match headers { - Some(headers) => builder.headers(headers), - None => builder, - }; - - builder.send().await.unwrap() - } - - pub async fn get_request_with_query(&self, path: &str, params: Query, headers: Option) -> Response { - get(&self.base_url(path), Some(params), headers).await - } - - pub async fn get_request(&self, path: &str) -> Response { - get(&self.base_url(path), None, None).await - } - - fn query_with_token(&self) -> Query { - match &self.connection_info.api_token { - Some(token) => Query::params([QueryParam::new("token", token)].to_vec()), - None => Query::default(), - } - } - - fn base_url(&self, path: &str) -> String { - format!("http://{}{}{path}", &self.connection_info.bind_address, &self.base_path) - } -} - -pub async fn get(path: &str, query: Option, headers: Option) -> Response { - let builder = reqwest::Client::builder().build().unwrap(); - - let builder = match query { - Some(params) => builder.get(path).query(&ReqwestQuery::from(params)), - None => builder.get(path), - }; - - let builder = match headers { - Some(headers) => builder.headers(headers), - None => builder, - }; - - builder.send().await.unwrap() -} - -/// Returns a `HeaderMap` with a request id header -pub fn headers_with_request_id(request_id: Uuid) -> HeaderMap { - let mut headers = HeaderMap::new(); - headers.insert("x-request-id", request_id.to_string().parse().unwrap()); - headers -} - -#[derive(Serialize, Debug)] -pub struct AddKeyForm { - #[serde(rename = "key")] - pub opt_key: Option, - pub seconds_valid: Option, -} diff --git a/tests/servers/api/v1/contract/authentication.rs b/tests/servers/api/v1/contract/authentication.rs index 4e0cf49da..6cb1e52b9 100644 --- a/tests/servers/api/v1/contract/authentication.rs +++ b/tests/servers/api/v1/contract/authentication.rs @@ -1,10 +1,10 @@ +use torrust_tracker_api_client::common::http::{Query, QueryParam}; +use torrust_tracker_api_client::v1::client::{headers_with_request_id, Client}; use torrust_tracker_test_helpers::configuration; use uuid::Uuid; -use crate::common::http::{Query, QueryParam}; use crate::common::logging::{self, logs_contains_a_line_with}; use crate::servers::api::v1::asserts::{assert_token_not_valid, assert_unauthorized}; -use crate::servers::api::v1::client::{headers_with_request_id, Client}; use crate::servers::api::Started; #[tokio::test] diff --git a/tests/servers/api/v1/contract/context/auth_key.rs b/tests/servers/api/v1/contract/context/auth_key.rs index 4dc039a9b..b143f6659 100644 --- a/tests/servers/api/v1/contract/context/auth_key.rs +++ b/tests/servers/api/v1/contract/context/auth_key.rs @@ -1,6 +1,7 @@ use std::time::Duration; use serde::Serialize; +use torrust_tracker_api_client::v1::client::{headers_with_request_id, AddKeyForm, Client}; use torrust_tracker_lib::core::auth::Key; use torrust_tracker_test_helpers::configuration; use uuid::Uuid; @@ -12,7 +13,6 @@ use crate::servers::api::v1::asserts::{ assert_invalid_auth_key_get_param, assert_invalid_auth_key_post_param, assert_ok, assert_token_not_valid, assert_unauthorized, assert_unprocessable_auth_key_duration_param, }; -use crate::servers::api::v1::client::{headers_with_request_id, AddKeyForm, Client}; use crate::servers::api::{force_database_error, Started}; #[tokio::test] @@ -462,6 +462,7 @@ async fn should_not_allow_reloading_keys_for_unauthenticated_users() { mod deprecated_generate_key_endpoint { + use torrust_tracker_api_client::v1::client::{headers_with_request_id, Client}; use torrust_tracker_lib::core::auth::Key; use torrust_tracker_test_helpers::configuration; use uuid::Uuid; @@ -472,7 +473,6 @@ mod deprecated_generate_key_endpoint { assert_auth_key_utf8, assert_failed_to_generate_key, assert_invalid_key_duration_param, assert_token_not_valid, assert_unauthorized, }; - use crate::servers::api::v1::client::{headers_with_request_id, Client}; use crate::servers::api::{force_database_error, Started}; #[tokio::test] diff --git a/tests/servers/api/v1/contract/context/health_check.rs b/tests/servers/api/v1/contract/context/health_check.rs index 32228575d..4c3509c66 100644 --- a/tests/servers/api/v1/contract/context/health_check.rs +++ b/tests/servers/api/v1/contract/context/health_check.rs @@ -1,8 +1,8 @@ +use torrust_tracker_api_client::v1::client::get; use torrust_tracker_lib::servers::apis::v1::context::health_check::resources::{Report, Status}; use torrust_tracker_test_helpers::configuration; use crate::common::logging; -use crate::servers::api::v1::client::get; use crate::servers::api::Started; #[tokio::test] diff --git a/tests/servers/api/v1/contract/context/stats.rs b/tests/servers/api/v1/contract/context/stats.rs index bc6e495a3..4a36e2561 100644 --- a/tests/servers/api/v1/contract/context/stats.rs +++ b/tests/servers/api/v1/contract/context/stats.rs @@ -1,6 +1,7 @@ use std::str::FromStr; use bittorrent_primitives::info_hash::InfoHash; +use torrust_tracker_api_client::v1::client::{headers_with_request_id, Client}; use torrust_tracker_lib::servers::apis::v1::context::stats::resources::Stats; use torrust_tracker_primitives::peer::fixture::PeerBuilder; use torrust_tracker_test_helpers::configuration; @@ -9,7 +10,6 @@ use uuid::Uuid; use crate::common::logging::{self, logs_contains_a_line_with}; use crate::servers::api::connection_info::{connection_with_invalid_token, connection_with_no_token}; use crate::servers::api::v1::asserts::{assert_stats, assert_token_not_valid, assert_unauthorized}; -use crate::servers::api::v1::client::{headers_with_request_id, Client}; use crate::servers::api::Started; #[tokio::test] diff --git a/tests/servers/api/v1/contract/context/torrent.rs b/tests/servers/api/v1/contract/context/torrent.rs index 260fe4a3a..c5d8e2547 100644 --- a/tests/servers/api/v1/contract/context/torrent.rs +++ b/tests/servers/api/v1/contract/context/torrent.rs @@ -1,20 +1,20 @@ use std::str::FromStr; use bittorrent_primitives::info_hash::InfoHash; +use torrust_tracker_api_client::common::http::{Query, QueryParam}; +use torrust_tracker_api_client::v1::client::{headers_with_request_id, Client}; use torrust_tracker_lib::servers::apis::v1::context::torrent::resources::peer::Peer; use torrust_tracker_lib::servers::apis::v1::context::torrent::resources::torrent::{self, Torrent}; use torrust_tracker_primitives::peer::fixture::PeerBuilder; use torrust_tracker_test_helpers::configuration; use uuid::Uuid; -use crate::common::http::{Query, QueryParam}; use crate::common::logging::{self, logs_contains_a_line_with}; use crate::servers::api::connection_info::{connection_with_invalid_token, connection_with_no_token}; use crate::servers::api::v1::asserts::{ assert_bad_request, assert_invalid_infohash_param, assert_not_found, assert_token_not_valid, assert_torrent_info, assert_torrent_list, assert_torrent_not_known, assert_unauthorized, }; -use crate::servers::api::v1::client::{headers_with_request_id, Client}; use crate::servers::api::v1::contract::fixtures::{ invalid_infohashes_returning_bad_request, invalid_infohashes_returning_not_found, }; diff --git a/tests/servers/api/v1/contract/context/whitelist.rs b/tests/servers/api/v1/contract/context/whitelist.rs index d0a80e968..360e057ec 100644 --- a/tests/servers/api/v1/contract/context/whitelist.rs +++ b/tests/servers/api/v1/contract/context/whitelist.rs @@ -1,6 +1,7 @@ use std::str::FromStr; use bittorrent_primitives::info_hash::InfoHash; +use torrust_tracker_api_client::v1::client::{headers_with_request_id, Client}; use torrust_tracker_test_helpers::configuration; use uuid::Uuid; @@ -10,7 +11,6 @@ use crate::servers::api::v1::asserts::{ assert_failed_to_reload_whitelist, assert_failed_to_remove_torrent_from_whitelist, assert_failed_to_whitelist_torrent, assert_invalid_infohash_param, assert_not_found, assert_ok, assert_token_not_valid, assert_unauthorized, }; -use crate::servers::api::v1::client::{headers_with_request_id, Client}; use crate::servers::api::v1::contract::fixtures::{ invalid_infohashes_returning_bad_request, invalid_infohashes_returning_not_found, }; diff --git a/tests/servers/api/v1/mod.rs b/tests/servers/api/v1/mod.rs index 37298b377..e2db6b4ce 100644 --- a/tests/servers/api/v1/mod.rs +++ b/tests/servers/api/v1/mod.rs @@ -1,3 +1,2 @@ pub mod asserts; -pub mod client; pub mod contract; From aa7ffdf20c8f89a563cca2db19be516a7f103d85 Mon Sep 17 00:00:00 2001 From: Jose Celano Date: Fri, 10 Jan 2025 17:11:09 +0000 Subject: [PATCH 082/802] refactor: [#1159] API client. Extract Origin type Instead of using a plain string we now use a Origin type containing hte base URL for the API without path or fragments. ``` scheme://host:port/ ``` --- Cargo.lock | 2 + packages/tracker-api-client/Cargo.toml | 2 + .../tracker-api-client/src/connection_info.rs | 145 +++++++++++++++++- packages/tracker-api-client/src/v1/client.rs | 13 +- tests/servers/api/connection_info.rs | 10 +- tests/servers/api/environment.rs | 6 +- .../api/v1/contract/context/auth_key.rs | 16 +- .../api/v1/contract/context/health_check.rs | 5 +- .../servers/api/v1/contract/context/stats.rs | 4 +- .../api/v1/contract/context/torrent.rs | 8 +- .../api/v1/contract/context/whitelist.rs | 8 +- 11 files changed, 179 insertions(+), 40 deletions(-) diff --git a/Cargo.lock b/Cargo.lock index a8b23465a..6e7a10116 100644 --- a/Cargo.lock +++ b/Cargo.lock @@ -3998,6 +3998,8 @@ dependencies = [ "hyper", "reqwest", "serde", + "thiserror 2.0.9", + "url", "uuid", ] diff --git a/packages/tracker-api-client/Cargo.toml b/packages/tracker-api-client/Cargo.toml index 388ad4bd2..ee45e12f7 100644 --- a/packages/tracker-api-client/Cargo.toml +++ b/packages/tracker-api-client/Cargo.toml @@ -18,4 +18,6 @@ version.workspace = true hyper = "1" reqwest = { version = "0", features = ["json"] } serde = { version = "1", features = ["derive"] } +thiserror = "2" +url = { version = "2", features = ["serde"] } uuid = { version = "1", features = ["v4"] } diff --git a/packages/tracker-api-client/src/connection_info.rs b/packages/tracker-api-client/src/connection_info.rs index 5785f98e6..1224527ae 100644 --- a/packages/tracker-api-client/src/connection_info.rs +++ b/packages/tracker-api-client/src/connection_info.rs @@ -1,23 +1,154 @@ +use std::str::FromStr; + +use thiserror::Error; +use url::Url; + #[derive(Clone)] pub struct ConnectionInfo { - pub bind_address: String, + pub origin: Origin, pub api_token: Option, } impl ConnectionInfo { #[must_use] - pub fn authenticated(bind_address: &str, api_token: &str) -> Self { + pub fn authenticated(origin: Origin, api_token: &str) -> Self { Self { - bind_address: bind_address.to_string(), + origin, api_token: Some(api_token.to_string()), } } #[must_use] - pub fn anonymous(bind_address: &str) -> Self { - Self { - bind_address: bind_address.to_string(), - api_token: None, + pub fn anonymous(origin: Origin) -> Self { + Self { origin, api_token: None } + } +} + +/// Represents the origin of a HTTP request. +/// +/// The format of the origin is a URL, but only the scheme, host, and port are used. +/// +/// Pattern: `scheme://host:port/` +#[derive(Debug, Clone)] +pub struct Origin { + url: Url, +} + +#[derive(Debug, Error)] +pub enum OriginError { + #[error("Invalid URL: {0}")] + InvalidUrl(#[from] url::ParseError), + + #[error("URL is missing scheme or host")] + InvalidOrigin, + + #[error("Invalid URL scheme, only http and https are supported")] + InvalidScheme, +} + +impl FromStr for Origin { + type Err = OriginError; + + fn from_str(s: &str) -> Result { + let mut url = Url::parse(s).map_err(OriginError::InvalidUrl)?; + + // Ensure the URL has a scheme and host + if url.scheme().is_empty() || url.host().is_none() { + return Err(OriginError::InvalidOrigin); + } + + if url.scheme() != "http" && url.scheme() != "https" { + return Err(OriginError::InvalidScheme); + } + + // Retain only the origin components + url.set_path("/"); + url.set_query(None); + url.set_fragment(None); + + Ok(Origin { url }) + } +} + +impl std::fmt::Display for Origin { + fn fmt(&self, f: &mut std::fmt::Formatter<'_>) -> std::fmt::Result { + write!(f, "{}", self.url) + } +} + +impl Origin { + /// # Errors + /// + /// Will return an error if the string is not a valid URL containing a + /// scheme and host. + pub fn new(s: &str) -> Result { + s.parse() + } + + #[must_use] + pub fn url(&self) -> &Url { + &self.url + } +} + +#[cfg(test)] +mod tests { + mod origin { + use crate::connection_info::Origin; + + #[test] + fn should_be_parsed_from_a_string_representing_a_url() { + let origin = Origin::new("https://example.com:8080/path?query#fragment").unwrap(); + + assert_eq!(origin.to_string(), "https://example.com:8080/"); + } + + mod when_parsing_from_url_string { + use crate::connection_info::Origin; + + #[test] + fn should_ignore_default_ports() { + let origin = Origin::new("http://example.com:80").unwrap(); // DevSkim: ignore DS137138 + assert_eq!(origin.to_string(), "http://example.com/"); // DevSkim: ignore DS137138 + + let origin = Origin::new("https://example.com:443").unwrap(); + assert_eq!(origin.to_string(), "https://example.com/"); + } + + #[test] + fn should_add_the_slash_after_the_host() { + let origin = Origin::new("https://example.com:1212").unwrap(); + + assert_eq!(origin.to_string(), "https://example.com:1212/"); + } + + #[test] + fn should_remove_extra_path_and_query_parameters() { + let origin = Origin::new("https://example.com:1212/path/to/resource?query=1#fragment").unwrap(); + + assert_eq!(origin.to_string(), "https://example.com:1212/"); + } + + #[test] + fn should_fail_when_the_scheme_is_missing() { + let result = Origin::new("example.com"); + + assert!(result.is_err()); + } + + #[test] + fn should_fail_when_the_scheme_is_not_supported() { + let result = Origin::new("udp://example.com"); + + assert!(result.is_err()); + } + + #[test] + fn should_fail_when_the_host_is_missing() { + let result = Origin::new("http://"); + + assert!(result.is_err()); + } } } } diff --git a/packages/tracker-api-client/src/v1/client.rs b/packages/tracker-api-client/src/v1/client.rs index b5c0dc60b..d48d4c008 100644 --- a/packages/tracker-api-client/src/v1/client.rs +++ b/packages/tracker-api-client/src/v1/client.rs @@ -1,6 +1,7 @@ use hyper::HeaderMap; use reqwest::Response; use serde::Serialize; +use url::Url; use uuid::Uuid; use crate::common::http::{Query, QueryParam, ReqwestQuery}; @@ -17,7 +18,7 @@ impl Client { pub fn new(connection_info: ConnectionInfo) -> Self { Self { connection_info, - base_path: "/api/v1/".to_string(), + base_path: "api/v1/".to_string(), } } @@ -121,11 +122,11 @@ impl Client { } pub async fn get_request_with_query(&self, path: &str, params: Query, headers: Option) -> Response { - get(&self.base_url(path), Some(params), headers).await + get(self.base_url(path), Some(params), headers).await } pub async fn get_request(&self, path: &str) -> Response { - get(&self.base_url(path), None, None).await + get(self.base_url(path), None, None).await } fn query_with_token(&self) -> Query { @@ -135,15 +136,15 @@ impl Client { } } - fn base_url(&self, path: &str) -> String { - format!("http://{}{}{path}", &self.connection_info.bind_address, &self.base_path) + fn base_url(&self, path: &str) -> Url { + Url::parse(&format!("{}{}{path}", &self.connection_info.origin, &self.base_path)).unwrap() } } /// # Panics /// /// Will panic if the request can't be sent -pub async fn get(path: &str, query: Option, headers: Option) -> Response { +pub async fn get(path: Url, query: Option, headers: Option) -> Response { let builder = reqwest::Client::builder().build().unwrap(); let builder = match query { diff --git a/tests/servers/api/connection_info.rs b/tests/servers/api/connection_info.rs index 1ae08921a..e78f4cbb7 100644 --- a/tests/servers/api/connection_info.rs +++ b/tests/servers/api/connection_info.rs @@ -1,9 +1,9 @@ -use torrust_tracker_api_client::connection_info::ConnectionInfo; +use torrust_tracker_api_client::connection_info::{ConnectionInfo, Origin}; -pub fn connection_with_invalid_token(bind_address: &str) -> ConnectionInfo { - ConnectionInfo::authenticated(bind_address, "invalid token") +pub fn connection_with_invalid_token(origin: Origin) -> ConnectionInfo { + ConnectionInfo::authenticated(origin, "invalid token") } -pub fn connection_with_no_token(bind_address: &str) -> ConnectionInfo { - ConnectionInfo::anonymous(bind_address) +pub fn connection_with_no_token(origin: Origin) -> ConnectionInfo { + ConnectionInfo::anonymous(origin) } diff --git a/tests/servers/api/environment.rs b/tests/servers/api/environment.rs index c4f503f9c..70f2d4c65 100644 --- a/tests/servers/api/environment.rs +++ b/tests/servers/api/environment.rs @@ -4,7 +4,7 @@ use std::sync::Arc; use bittorrent_primitives::info_hash::InfoHash; use futures::executor::block_on; use tokio::sync::RwLock; -use torrust_tracker_api_client::connection_info::ConnectionInfo; +use torrust_tracker_api_client::connection_info::{ConnectionInfo, Origin}; use torrust_tracker_configuration::{Configuration, HttpApi}; use torrust_tracker_lib::bootstrap::app::initialize_with_configuration; use torrust_tracker_lib::bootstrap::jobs::make_rust_tls; @@ -92,8 +92,10 @@ impl Environment { } pub fn get_connection_info(&self) -> ConnectionInfo { + let origin = Origin::new(&format!("http://{}/", self.server.state.local_addr)).unwrap(); // DevSkim: ignore DS137138 + ConnectionInfo { - bind_address: self.server.state.local_addr.to_string(), + origin, api_token: self.config.access_tokens.get("admin").cloned(), } } diff --git a/tests/servers/api/v1/contract/context/auth_key.rs b/tests/servers/api/v1/contract/context/auth_key.rs index b143f6659..9b2e740c0 100644 --- a/tests/servers/api/v1/contract/context/auth_key.rs +++ b/tests/servers/api/v1/contract/context/auth_key.rs @@ -81,7 +81,7 @@ async fn should_not_allow_generating_a_new_auth_key_for_unauthenticated_users() let request_id = Uuid::new_v4(); - let response = Client::new(connection_with_invalid_token(env.get_connection_info().bind_address.as_str())) + let response = Client::new(connection_with_invalid_token(env.get_connection_info().origin)) .add_auth_key( AddKeyForm { opt_key: None, @@ -100,7 +100,7 @@ async fn should_not_allow_generating_a_new_auth_key_for_unauthenticated_users() let request_id = Uuid::new_v4(); - let response = Client::new(connection_with_no_token(env.get_connection_info().bind_address.as_str())) + let response = Client::new(connection_with_no_token(env.get_connection_info().origin)) .add_auth_key( AddKeyForm { opt_key: None, @@ -332,7 +332,7 @@ async fn should_not_allow_deleting_an_auth_key_for_unauthenticated_users() { let request_id = Uuid::new_v4(); - let response = Client::new(connection_with_invalid_token(env.get_connection_info().bind_address.as_str())) + let response = Client::new(connection_with_invalid_token(env.get_connection_info().origin)) .delete_auth_key(&auth_key.key.to_string(), Some(headers_with_request_id(request_id))) .await; @@ -352,7 +352,7 @@ async fn should_not_allow_deleting_an_auth_key_for_unauthenticated_users() { let request_id = Uuid::new_v4(); - let response = Client::new(connection_with_no_token(env.get_connection_info().bind_address.as_str())) + let response = Client::new(connection_with_no_token(env.get_connection_info().origin)) .delete_auth_key(&auth_key.key.to_string(), Some(headers_with_request_id(request_id))) .await; @@ -433,7 +433,7 @@ async fn should_not_allow_reloading_keys_for_unauthenticated_users() { let request_id = Uuid::new_v4(); - let response = Client::new(connection_with_invalid_token(env.get_connection_info().bind_address.as_str())) + let response = Client::new(connection_with_invalid_token(env.get_connection_info().origin)) .reload_keys(Some(headers_with_request_id(request_id))) .await; @@ -446,7 +446,7 @@ async fn should_not_allow_reloading_keys_for_unauthenticated_users() { let request_id = Uuid::new_v4(); - let response = Client::new(connection_with_no_token(env.get_connection_info().bind_address.as_str())) + let response = Client::new(connection_with_no_token(env.get_connection_info().origin)) .reload_keys(Some(headers_with_request_id(request_id))) .await; @@ -507,13 +507,13 @@ mod deprecated_generate_key_endpoint { let request_id = Uuid::new_v4(); let seconds_valid = 60; - let response = Client::new(connection_with_invalid_token(env.get_connection_info().bind_address.as_str())) + let response = Client::new(connection_with_invalid_token(env.get_connection_info().origin)) .generate_auth_key(seconds_valid, Some(headers_with_request_id(request_id))) .await; assert_token_not_valid(response).await; - let response = Client::new(connection_with_no_token(env.get_connection_info().bind_address.as_str())) + let response = Client::new(connection_with_no_token(env.get_connection_info().origin)) .generate_auth_key(seconds_valid, None) .await; diff --git a/tests/servers/api/v1/contract/context/health_check.rs b/tests/servers/api/v1/contract/context/health_check.rs index 4c3509c66..4d37917fc 100644 --- a/tests/servers/api/v1/contract/context/health_check.rs +++ b/tests/servers/api/v1/contract/context/health_check.rs @@ -1,6 +1,7 @@ use torrust_tracker_api_client::v1::client::get; use torrust_tracker_lib::servers::apis::v1::context::health_check::resources::{Report, Status}; use torrust_tracker_test_helpers::configuration; +use url::Url; use crate::common::logging; use crate::servers::api::Started; @@ -11,9 +12,9 @@ async fn health_check_endpoint_should_return_status_ok_if_api_is_running() { let env = Started::new(&configuration::ephemeral().into()).await; - let url = format!("http://{}/api/health_check", env.get_connection_info().bind_address); + let url = Url::parse(&format!("{}api/health_check", env.get_connection_info().origin)).unwrap(); - let response = get(&url, None, None).await; + let response = get(url, None, None).await; assert_eq!(response.status(), 200); assert_eq!(response.headers().get("content-type").unwrap(), "application/json"); diff --git a/tests/servers/api/v1/contract/context/stats.rs b/tests/servers/api/v1/contract/context/stats.rs index 4a36e2561..2eda0ed4a 100644 --- a/tests/servers/api/v1/contract/context/stats.rs +++ b/tests/servers/api/v1/contract/context/stats.rs @@ -79,7 +79,7 @@ async fn should_not_allow_getting_tracker_statistics_for_unauthenticated_users() let request_id = Uuid::new_v4(); - let response = Client::new(connection_with_invalid_token(env.get_connection_info().bind_address.as_str())) + let response = Client::new(connection_with_invalid_token(env.get_connection_info().origin)) .get_tracker_statistics(Some(headers_with_request_id(request_id))) .await; @@ -92,7 +92,7 @@ async fn should_not_allow_getting_tracker_statistics_for_unauthenticated_users() let request_id = Uuid::new_v4(); - let response = Client::new(connection_with_no_token(env.get_connection_info().bind_address.as_str())) + let response = Client::new(connection_with_no_token(env.get_connection_info().origin)) .get_tracker_statistics(Some(headers_with_request_id(request_id))) .await; diff --git a/tests/servers/api/v1/contract/context/torrent.rs b/tests/servers/api/v1/contract/context/torrent.rs index c5d8e2547..76646db14 100644 --- a/tests/servers/api/v1/contract/context/torrent.rs +++ b/tests/servers/api/v1/contract/context/torrent.rs @@ -263,7 +263,7 @@ async fn should_not_allow_getting_torrents_for_unauthenticated_users() { let request_id = Uuid::new_v4(); - let response = Client::new(connection_with_invalid_token(env.get_connection_info().bind_address.as_str())) + let response = Client::new(connection_with_invalid_token(env.get_connection_info().origin)) .get_torrents(Query::empty(), Some(headers_with_request_id(request_id))) .await; @@ -276,7 +276,7 @@ async fn should_not_allow_getting_torrents_for_unauthenticated_users() { let request_id = Uuid::new_v4(); - let response = Client::new(connection_with_no_token(env.get_connection_info().bind_address.as_str())) + let response = Client::new(connection_with_no_token(env.get_connection_info().origin)) .get_torrents(Query::default(), Some(headers_with_request_id(request_id))) .await; @@ -382,7 +382,7 @@ async fn should_not_allow_getting_a_torrent_info_for_unauthenticated_users() { let request_id = Uuid::new_v4(); - let response = Client::new(connection_with_invalid_token(env.get_connection_info().bind_address.as_str())) + let response = Client::new(connection_with_invalid_token(env.get_connection_info().origin)) .get_torrent(&info_hash.to_string(), Some(headers_with_request_id(request_id))) .await; @@ -395,7 +395,7 @@ async fn should_not_allow_getting_a_torrent_info_for_unauthenticated_users() { let request_id = Uuid::new_v4(); - let response = Client::new(connection_with_no_token(env.get_connection_info().bind_address.as_str())) + let response = Client::new(connection_with_no_token(env.get_connection_info().origin)) .get_torrent(&info_hash.to_string(), Some(headers_with_request_id(request_id))) .await; diff --git a/tests/servers/api/v1/contract/context/whitelist.rs b/tests/servers/api/v1/contract/context/whitelist.rs index 360e057ec..6dde663a5 100644 --- a/tests/servers/api/v1/contract/context/whitelist.rs +++ b/tests/servers/api/v1/contract/context/whitelist.rs @@ -76,7 +76,7 @@ async fn should_not_allow_whitelisting_a_torrent_for_unauthenticated_users() { let request_id = Uuid::new_v4(); - let response = Client::new(connection_with_invalid_token(env.get_connection_info().bind_address.as_str())) + let response = Client::new(connection_with_invalid_token(env.get_connection_info().origin)) .whitelist_a_torrent(&info_hash, Some(headers_with_request_id(request_id))) .await; @@ -89,7 +89,7 @@ async fn should_not_allow_whitelisting_a_torrent_for_unauthenticated_users() { let request_id = Uuid::new_v4(); - let response = Client::new(connection_with_no_token(env.get_connection_info().bind_address.as_str())) + let response = Client::new(connection_with_no_token(env.get_connection_info().origin)) .whitelist_a_torrent(&info_hash, Some(headers_with_request_id(request_id))) .await; @@ -270,7 +270,7 @@ async fn should_not_allow_removing_a_torrent_from_the_whitelist_for_unauthentica let request_id = Uuid::new_v4(); - let response = Client::new(connection_with_invalid_token(env.get_connection_info().bind_address.as_str())) + let response = Client::new(connection_with_invalid_token(env.get_connection_info().origin)) .remove_torrent_from_whitelist(&hash, Some(headers_with_request_id(request_id))) .await; @@ -285,7 +285,7 @@ async fn should_not_allow_removing_a_torrent_from_the_whitelist_for_unauthentica let request_id = Uuid::new_v4(); - let response = Client::new(connection_with_no_token(env.get_connection_info().bind_address.as_str())) + let response = Client::new(connection_with_no_token(env.get_connection_info().origin)) .remove_torrent_from_whitelist(&hash, Some(headers_with_request_id(request_id))) .await; From a7ceb0f135c6673ba8e7a8845f43facd626708a9 Mon Sep 17 00:00:00 2001 From: Jose Celano Date: Sat, 11 Jan 2025 18:47:05 +0000 Subject: [PATCH 083/802] refactor: [#1140] move http tracker logic to http-protocol and primitives packages - Generic logic for http tracker has bben moved to http-protocol package (bittorrent-http-protocol crate). - Generic tracker types like AnnounceData and ScrapeData have been moved to the primitives package (torrust-tracker-primitives crate). This has also a desiderable side effect: generic re-usable domain logic has been decoupled from Axum framework. --- Cargo.lock | 11 +++- Cargo.toml | 2 - packages/http-protocol/Cargo.toml | 8 +++ packages/http-protocol/src/lib.rs | 1 + packages/http-protocol/src/v1/mod.rs | 4 ++ .../http-protocol/src}/v1/query.rs | 8 +-- .../src}/v1/requests/announce.rs | 14 ++--- .../http-protocol/src}/v1/requests/mod.rs | 0 .../http-protocol/src}/v1/requests/scrape.rs | 14 ++--- .../src}/v1/responses/announce.rs | 36 +++--------- .../http-protocol/src}/v1/responses/error.rs | 12 ++-- .../http-protocol/src/v1/responses/mod.rs | 9 +++ .../http-protocol/src}/v1/responses/scrape.rs | 17 ++---- packages/http-protocol/src/v1/services/mod.rs | 1 + .../src}/v1/services/peer_ip_resolver.rs | 4 +- packages/primitives/Cargo.toml | 1 + packages/primitives/src/core.rs | 58 +++++++++++++++++++ packages/primitives/src/lib.rs | 1 + src/core/error.rs | 9 +++ src/core/mod.rs | 52 +---------------- src/servers/http/mod.rs | 26 ++++----- .../http/v1/extractors/announce_request.rs | 18 +++--- .../http/v1/extractors/authentication_key.rs | 10 ++-- .../http/v1/extractors/client_ip_sources.rs | 3 +- .../http/v1/extractors/scrape_request.rs | 18 +++--- src/servers/http/v1/handlers/announce.rs | 33 ++++++----- src/servers/http/v1/handlers/common/auth.rs | 2 +- .../http/v1/handlers/common/peer_ip.rs | 16 +---- src/servers/http/v1/handlers/mod.rs | 11 ---- src/servers/http/v1/handlers/scrape.rs | 36 +++++++----- src/servers/http/v1/mod.rs | 3 - src/servers/http/v1/responses/mod.rs | 19 ------ src/servers/http/v1/services/announce.rs | 6 +- src/servers/http/v1/services/mod.rs | 1 - src/servers/http/v1/services/scrape.rs | 9 ++- 35 files changed, 235 insertions(+), 238 deletions(-) create mode 100644 packages/http-protocol/src/v1/mod.rs rename {src/servers/http => packages/http-protocol/src}/v1/query.rs (97%) rename {src/servers/http => packages/http-protocol/src}/v1/requests/announce.rs (97%) rename {src/servers/http => packages/http-protocol/src}/v1/requests/mod.rs (100%) rename {src/servers/http => packages/http-protocol/src}/v1/requests/scrape.rs (89%) rename {src/servers/http => packages/http-protocol/src}/v1/responses/announce.rs (90%) rename {src/servers/http => packages/http-protocol/src}/v1/responses/error.rs (88%) create mode 100644 packages/http-protocol/src/v1/responses/mod.rs rename {src/servers/http => packages/http-protocol/src}/v1/responses/scrape.rs (89%) create mode 100644 packages/http-protocol/src/v1/services/mod.rs rename {src/servers/http => packages/http-protocol/src}/v1/services/peer_ip_resolver.rs (96%) create mode 100644 packages/primitives/src/core.rs delete mode 100644 src/servers/http/v1/responses/mod.rs diff --git a/Cargo.lock b/Cargo.lock index 6e7a10116..c097ed80e 100644 --- a/Cargo.lock +++ b/Cargo.lock @@ -553,7 +553,15 @@ version = "3.0.0-develop" dependencies = [ "aquatic_udp_protocol", "bittorrent-primitives", + "derive_more", + "multimap", "percent-encoding", + "serde", + "serde_bencode", + "thiserror 2.0.9", + "torrust-tracker-configuration", + "torrust-tracker-contrib-bencode", + "torrust-tracker-located-error", "torrust-tracker-primitives", ] @@ -3955,7 +3963,6 @@ dependencies = [ "lazy_static", "local-ip-address", "mockall", - "multimap", "parking_lot", "percent-encoding", "pin-project-lite", @@ -3977,7 +3984,6 @@ dependencies = [ "torrust-tracker-api-client", "torrust-tracker-clock", "torrust-tracker-configuration", - "torrust-tracker-contrib-bencode", "torrust-tracker-located-error", "torrust-tracker-primitives", "torrust-tracker-test-helpers", @@ -4082,6 +4088,7 @@ dependencies = [ "tdyne-peer-id", "tdyne-peer-id-registry", "thiserror 2.0.9", + "torrust-tracker-configuration", "zerocopy", ] diff --git a/Cargo.toml b/Cargo.toml index 0bf3e39e3..4b4862cca 100644 --- a/Cargo.toml +++ b/Cargo.toml @@ -58,7 +58,6 @@ http-body = "1" hyper = "1" hyper-util = { version = "0", features = ["http1", "http2", "tokio"] } lazy_static = "1" -multimap = "0" parking_lot = "0" percent-encoding = "2" pin-project-lite = "0" @@ -79,7 +78,6 @@ thiserror = "2" tokio = { version = "1", features = ["macros", "net", "rt-multi-thread", "signal", "sync"] } torrust-tracker-clock = { version = "3.0.0-develop", path = "packages/clock" } torrust-tracker-configuration = { version = "3.0.0-develop", path = "packages/configuration" } -torrust-tracker-contrib-bencode = { version = "3.0.0-develop", path = "contrib/bencode" } torrust-tracker-located-error = { version = "3.0.0-develop", path = "packages/located-error" } torrust-tracker-primitives = { version = "3.0.0-develop", path = "packages/primitives" } torrust-tracker-torrent-repository = { version = "3.0.0-develop", path = "packages/torrent-repository" } diff --git a/packages/http-protocol/Cargo.toml b/packages/http-protocol/Cargo.toml index 4f20407b6..05b69d201 100644 --- a/packages/http-protocol/Cargo.toml +++ b/packages/http-protocol/Cargo.toml @@ -17,5 +17,13 @@ version.workspace = true [dependencies] aquatic_udp_protocol = "0" bittorrent-primitives = "0.1.0" +derive_more = { version = "1", features = ["as_ref", "constructor", "from"] } +multimap = "0" percent-encoding = "2" +serde = { version = "1", features = ["derive"] } +serde_bencode = "0" +thiserror = "2" +torrust-tracker-configuration = { version = "3.0.0-develop", path = "../configuration" } +torrust-tracker-contrib-bencode = { version = "3.0.0-develop", path = "../../contrib/bencode" } +torrust-tracker-located-error = { version = "3.0.0-develop", path = "../located-error" } torrust-tracker-primitives = { version = "3.0.0-develop", path = "../primitives" } diff --git a/packages/http-protocol/src/lib.rs b/packages/http-protocol/src/lib.rs index 44237d6fd..6525a6dca 100644 --- a/packages/http-protocol/src/lib.rs +++ b/packages/http-protocol/src/lib.rs @@ -1,2 +1,3 @@ //! Primitive types and function for `BitTorrent` HTTP trackers. pub mod percent_encoding; +pub mod v1; diff --git a/packages/http-protocol/src/v1/mod.rs b/packages/http-protocol/src/v1/mod.rs new file mode 100644 index 000000000..d52ba7609 --- /dev/null +++ b/packages/http-protocol/src/v1/mod.rs @@ -0,0 +1,4 @@ +pub mod query; +pub mod requests; +pub mod responses; +pub mod services; diff --git a/src/servers/http/v1/query.rs b/packages/http-protocol/src/v1/query.rs similarity index 97% rename from src/servers/http/v1/query.rs rename to packages/http-protocol/src/v1/query.rs index e65f62ada..8f9170aad 100644 --- a/src/servers/http/v1/query.rs +++ b/packages/http-protocol/src/v1/query.rs @@ -224,7 +224,7 @@ impl std::fmt::Display for FieldValuePairSet { mod tests { mod url_query { - use crate::servers::http::v1::query::Query; + use crate::v1::query::Query; #[test] fn should_parse_the_query_params_from_an_url_query_string() { @@ -277,7 +277,7 @@ mod tests { } mod should_allow_more_than_one_value_for_the_same_param { - use crate::servers::http::v1::query::Query; + use crate::v1::query::Query; #[test] fn instantiated_from_a_vector() { @@ -299,7 +299,7 @@ mod tests { } mod should_be_displayed { - use crate::servers::http::v1::query::Query; + use crate::v1::query::Query; #[test] fn with_one_param() { @@ -320,7 +320,7 @@ mod tests { } mod param_name_value_pair { - use crate::servers::http::v1::query::NameValuePair; + use crate::v1::query::NameValuePair; #[test] fn should_parse_a_single_query_param() { diff --git a/src/servers/http/v1/requests/announce.rs b/packages/http-protocol/src/v1/requests/announce.rs similarity index 97% rename from src/servers/http/v1/requests/announce.rs rename to packages/http-protocol/src/v1/requests/announce.rs index e8a730e9c..28cecd386 100644 --- a/src/servers/http/v1/requests/announce.rs +++ b/packages/http-protocol/src/v1/requests/announce.rs @@ -6,14 +6,14 @@ use std::panic::Location; use std::str::FromStr; use aquatic_udp_protocol::{NumberOfBytes, PeerId}; -use bittorrent_http_protocol::percent_encoding::{percent_decode_info_hash, percent_decode_peer_id}; use bittorrent_primitives::info_hash::{self, InfoHash}; use thiserror::Error; use torrust_tracker_located_error::{Located, LocatedError}; use torrust_tracker_primitives::peer; -use crate::servers::http::v1::query::{ParseQueryError, Query}; -use crate::servers::http::v1::responses; +use crate::percent_encoding::{percent_decode_info_hash, percent_decode_peer_id}; +use crate::v1::query::{ParseQueryError, Query}; +use crate::v1::responses; // Query param names const INFO_HASH: &str = "info_hash"; @@ -381,8 +381,8 @@ mod tests { use aquatic_udp_protocol::{NumberOfBytes, PeerId}; use bittorrent_primitives::info_hash::InfoHash; - use crate::servers::http::v1::query::Query; - use crate::servers::http::v1::requests::announce::{ + use crate::v1::query::Query; + use crate::v1::requests::announce::{ Announce, Compact, Event, COMPACT, DOWNLOADED, EVENT, INFO_HASH, LEFT, NUMWANT, PEER_ID, PORT, UPLOADED, }; @@ -452,8 +452,8 @@ mod tests { mod when_it_is_instantiated_from_the_url_query_params { - use crate::servers::http::v1::query::Query; - use crate::servers::http::v1::requests::announce::{ + use crate::v1::query::Query; + use crate::v1::requests::announce::{ Announce, COMPACT, DOWNLOADED, EVENT, INFO_HASH, LEFT, NUMWANT, PEER_ID, PORT, UPLOADED, }; diff --git a/src/servers/http/v1/requests/mod.rs b/packages/http-protocol/src/v1/requests/mod.rs similarity index 100% rename from src/servers/http/v1/requests/mod.rs rename to packages/http-protocol/src/v1/requests/mod.rs diff --git a/src/servers/http/v1/requests/scrape.rs b/packages/http-protocol/src/v1/requests/scrape.rs similarity index 89% rename from src/servers/http/v1/requests/scrape.rs rename to packages/http-protocol/src/v1/requests/scrape.rs index a8e76282e..ae8e41cc2 100644 --- a/src/servers/http/v1/requests/scrape.rs +++ b/packages/http-protocol/src/v1/requests/scrape.rs @@ -3,13 +3,13 @@ //! Data structures and logic for parsing the `scrape` request. use std::panic::Location; -use bittorrent_http_protocol::percent_encoding::percent_decode_info_hash; use bittorrent_primitives::info_hash::{self, InfoHash}; use thiserror::Error; use torrust_tracker_located_error::{Located, LocatedError}; -use crate::servers::http::v1::query::Query; -use crate::servers::http::v1::responses; +use crate::percent_encoding::percent_decode_info_hash; +use crate::v1::query::Query; +use crate::v1::responses; // Query param names const INFO_HASH: &str = "info_hash"; @@ -86,8 +86,8 @@ mod tests { use bittorrent_primitives::info_hash::InfoHash; - use crate::servers::http::v1::query::Query; - use crate::servers::http::v1::requests::scrape::{Scrape, INFO_HASH}; + use crate::v1::query::Query; + use crate::v1::requests::scrape::{Scrape, INFO_HASH}; #[test] fn should_be_instantiated_from_the_url_query_with_only_one_infohash() { @@ -107,8 +107,8 @@ mod tests { mod when_it_is_instantiated_from_the_url_query_params { - use crate::servers::http::v1::query::Query; - use crate::servers::http::v1::requests::scrape::{Scrape, INFO_HASH}; + use crate::v1::query::Query; + use crate::v1::requests::scrape::{Scrape, INFO_HASH}; #[test] fn it_should_fail_if_the_query_does_not_include_the_info_hash_param() { diff --git a/src/servers/http/v1/responses/announce.rs b/packages/http-protocol/src/v1/responses/announce.rs similarity index 90% rename from src/servers/http/v1/responses/announce.rs rename to packages/http-protocol/src/v1/responses/announce.rs index 925c0893e..986a881a5 100644 --- a/src/servers/http/v1/responses/announce.rs +++ b/packages/http-protocol/src/v1/responses/announce.rs @@ -1,18 +1,14 @@ -//! `Announce` response for the HTTP tracker [`announce`](crate::servers::http::v1::requests::announce::Announce) request. +//! `Announce` response for the HTTP tracker [`announce`](bittorrent_http_protocol::v1::requests::announce::Announce) request. //! //! Data structures and logic to build the `announce` response. use std::io::Write; use std::net::{IpAddr, Ipv4Addr, Ipv6Addr}; -use axum::http::StatusCode; use derive_more::{AsRef, Constructor, From}; use torrust_tracker_contrib_bencode::{ben_bytes, ben_int, ben_list, ben_map, BMutAccess, BencodeMut}; +use torrust_tracker_primitives::core::AnnounceData; use torrust_tracker_primitives::peer; -use super::Response; -use crate::core::AnnounceData; -use crate::servers::http::v1::responses; - /// An [`Announce`] response, that can be anything that is convertible from [`AnnounceData`]. /// /// The [`Announce`] can built from any data that implements: [`From`] and [`Into>`]. @@ -35,7 +31,7 @@ pub struct Announce where E: From + Into>, { - data: E, + pub data: E, } /// Build any [`Announce`] from an [`AnnounceData`]. @@ -45,24 +41,6 @@ impl + Into>> From for Announce { } } -/// Convert any Announce [`Announce`] into a [`axum::response::Response`] -impl + Into>> axum::response::IntoResponse for Announce -where - Announce: Response, -{ - fn into_response(self) -> axum::response::Response { - axum::response::IntoResponse::into_response(self.body().map(|bytes| (StatusCode::OK, bytes))) - } -} - -/// Implement the [`Response`] for the [`Announce`]. -/// -impl + Into>> Response for Announce { - fn body(self) -> Result, responses::error::Error> { - Ok(self.data.into()) - } -} - /// Format of the [`Normal`] (Non-Compact) Encoding pub struct Normal { complete: i64, @@ -302,11 +280,11 @@ mod tests { use aquatic_udp_protocol::PeerId; use torrust_tracker_configuration::AnnouncePolicy; + use torrust_tracker_primitives::core::AnnounceData; use torrust_tracker_primitives::peer::fixture::PeerBuilder; use torrust_tracker_primitives::swarm_metadata::SwarmMetadata; - use crate::core::AnnounceData; - use crate::servers::http::v1::responses::announce::{Announce, Compact, Normal, Response}; + use crate::v1::responses::announce::{Announce, Compact, Normal}; // Some ascii values used in tests: // @@ -345,7 +323,7 @@ mod tests { #[test] fn non_compact_announce_response_can_be_bencoded() { let response: Announce = setup_announce_data().into(); - let bytes = response.body().expect("it should encode the response"); + let bytes = response.data.into(); // cspell:disable-next-line let expected_bytes = b"d8:completei333e10:incompletei444e8:intervali111e12:min intervali222e5:peersld2:ip15:105.105.105.1057:peer id20:-qB000000000000000014:porti28784eed2:ip39:6969:6969:6969:6969:6969:6969:6969:69697:peer id20:-qB000000000000000024:porti28784eeee"; @@ -359,7 +337,7 @@ mod tests { #[test] fn compact_announce_response_can_be_bencoded() { let response: Announce = setup_announce_data().into(); - let bytes = response.body().expect("it should encode the response"); + let bytes = response.data.into(); let expected_bytes = // cspell:disable-next-line diff --git a/src/servers/http/v1/responses/error.rs b/packages/http-protocol/src/v1/responses/error.rs similarity index 88% rename from src/servers/http/v1/responses/error.rs rename to packages/http-protocol/src/v1/responses/error.rs index 7223063fd..9aca9c71c 100644 --- a/src/servers/http/v1/responses/error.rs +++ b/packages/http-protocol/src/v1/responses/error.rs @@ -11,10 +11,10 @@ //! > **NOTICE**: error responses are bencoded and always have a `200 OK` status //! > code. The official `BitTorrent` specification does not specify the status //! > code. -use axum::http::StatusCode; -use axum::response::{IntoResponse, Response}; use serde::Serialize; +use crate::v1::services::peer_ip_resolver::PeerIpResolutionError; + /// `Error` response for the [`HTTP tracker`](crate::servers::http). #[derive(Serialize, Debug, PartialEq)] pub struct Error { @@ -47,9 +47,11 @@ impl Error { } } -impl IntoResponse for Error { - fn into_response(self) -> Response { - (StatusCode::OK, self.write()).into_response() +impl From for Error { + fn from(err: PeerIpResolutionError) -> Self { + Self { + failure_reason: format!("Error resolving peer IP: {err}"), + } } } diff --git a/packages/http-protocol/src/v1/responses/mod.rs b/packages/http-protocol/src/v1/responses/mod.rs new file mode 100644 index 000000000..495b1eb84 --- /dev/null +++ b/packages/http-protocol/src/v1/responses/mod.rs @@ -0,0 +1,9 @@ +//! HTTP responses for the HTTP tracker. +//! +//! Refer to the generic [HTTP server documentation](crate::servers::http) for +//! more information about the HTTP tracker. +pub mod announce; +pub mod error; +pub mod scrape; + +pub use announce::{Announce, Compact, Normal}; diff --git a/src/servers/http/v1/responses/scrape.rs b/packages/http-protocol/src/v1/responses/scrape.rs similarity index 89% rename from src/servers/http/v1/responses/scrape.rs rename to packages/http-protocol/src/v1/responses/scrape.rs index 1f367a9c9..a52fa263c 100644 --- a/src/servers/http/v1/responses/scrape.rs +++ b/packages/http-protocol/src/v1/responses/scrape.rs @@ -1,13 +1,10 @@ -//! `Scrape` response for the HTTP tracker [`scrape`](crate::servers::http::v1::requests::scrape::Scrape) request. +//! `Scrape` response for the HTTP tracker [`scrape`](bittorrent_http_protocol::v1::requests::scrape::Scrape) request. //! //! Data structures and logic to build the `scrape` response. use std::borrow::Cow; -use axum::http::StatusCode; -use axum::response::{IntoResponse, Response}; use torrust_tracker_contrib_bencode::{ben_int, ben_map, BMutAccess}; - -use crate::core::ScrapeData; +use torrust_tracker_primitives::core::ScrapeData; /// The `Scrape` response for the HTTP tracker. /// @@ -82,21 +79,15 @@ impl From for Bencoded { } } -impl IntoResponse for Bencoded { - fn into_response(self) -> Response { - (StatusCode::OK, self.body()).into_response() - } -} - #[cfg(test)] mod tests { mod scrape_response { use bittorrent_primitives::info_hash::InfoHash; + use torrust_tracker_primitives::core::ScrapeData; use torrust_tracker_primitives::swarm_metadata::SwarmMetadata; - use crate::core::ScrapeData; - use crate::servers::http::v1::responses::scrape::Bencoded; + use crate::v1::responses::scrape::Bencoded; fn sample_scrape_data() -> ScrapeData { let info_hash = InfoHash::from_bytes(&[0x69; 20]); diff --git a/packages/http-protocol/src/v1/services/mod.rs b/packages/http-protocol/src/v1/services/mod.rs new file mode 100644 index 000000000..de800f630 --- /dev/null +++ b/packages/http-protocol/src/v1/services/mod.rs @@ -0,0 +1 @@ +pub mod peer_ip_resolver; diff --git a/src/servers/http/v1/services/peer_ip_resolver.rs b/packages/http-protocol/src/v1/services/peer_ip_resolver.rs similarity index 96% rename from src/servers/http/v1/services/peer_ip_resolver.rs rename to packages/http-protocol/src/v1/services/peer_ip_resolver.rs index 56bd3d86f..366f8820c 100644 --- a/src/servers/http/v1/services/peer_ip_resolver.rs +++ b/packages/http-protocol/src/v1/services/peer_ip_resolver.rs @@ -142,7 +142,7 @@ mod tests { use std::str::FromStr; use super::invoke; - use crate::servers::http::v1::services::peer_ip_resolver::{ClientIpSources, PeerIpResolutionError}; + use crate::v1::services::peer_ip_resolver::{ClientIpSources, PeerIpResolutionError}; #[test] fn it_should_get_the_peer_ip_from_the_connection_info() { @@ -181,7 +181,7 @@ mod tests { use std::net::IpAddr; use std::str::FromStr; - use crate::servers::http::v1::services::peer_ip_resolver::{invoke, ClientIpSources, PeerIpResolutionError}; + use crate::v1::services::peer_ip_resolver::{invoke, ClientIpSources, PeerIpResolutionError}; #[test] fn it_should_get_the_peer_ip_from_the_right_most_ip_in_the_x_forwarded_for_header() { diff --git a/packages/primitives/Cargo.toml b/packages/primitives/Cargo.toml index 66b81d65d..b83886385 100644 --- a/packages/primitives/Cargo.toml +++ b/packages/primitives/Cargo.toml @@ -23,4 +23,5 @@ serde = { version = "1", features = ["derive"] } tdyne-peer-id = "1" tdyne-peer-id-registry = "0" thiserror = "2" +torrust-tracker-configuration = { version = "3.0.0-develop", path = "../configuration" } zerocopy = "0.7" diff --git a/packages/primitives/src/core.rs b/packages/primitives/src/core.rs new file mode 100644 index 000000000..0c0f68b8b --- /dev/null +++ b/packages/primitives/src/core.rs @@ -0,0 +1,58 @@ +use std::collections::HashMap; +use std::sync::Arc; + +use bittorrent_primitives::info_hash::InfoHash; +use derive_more::derive::Constructor; +use torrust_tracker_configuration::AnnouncePolicy; + +use crate::peer; +use crate::swarm_metadata::SwarmMetadata; + +/// Structure that holds the data returned by the `announce` request. +#[derive(Clone, Debug, PartialEq, Constructor, Default)] +pub struct AnnounceData { + /// The list of peers that are downloading the same torrent. + /// It excludes the peer that made the request. + pub peers: Vec>, + /// Swarm statistics + pub stats: SwarmMetadata, + pub policy: AnnouncePolicy, +} + +/// Structure that holds the data returned by the `scrape` request. +#[derive(Debug, PartialEq, Default)] +pub struct ScrapeData { + /// A map of infohashes and swarm metadata for each torrent. + pub files: HashMap, +} + +impl ScrapeData { + /// Creates a new empty `ScrapeData` with no files (torrents). + #[must_use] + pub fn empty() -> Self { + let files: HashMap = HashMap::new(); + Self { files } + } + + /// Creates a new `ScrapeData` with zeroed metadata for each torrent. + #[must_use] + pub fn zeroed(info_hashes: &Vec) -> Self { + let mut scrape_data = Self::empty(); + + for info_hash in info_hashes { + scrape_data.add_file(info_hash, SwarmMetadata::zeroed()); + } + + scrape_data + } + + /// Adds a torrent to the `ScrapeData`. + pub fn add_file(&mut self, info_hash: &InfoHash, swarm_metadata: SwarmMetadata) { + self.files.insert(*info_hash, swarm_metadata); + } + + /// Adds a torrent to the `ScrapeData` with zeroed metadata. + pub fn add_file_with_zeroed_metadata(&mut self, info_hash: &InfoHash) { + self.files.insert(*info_hash, SwarmMetadata::zeroed()); + } +} diff --git a/packages/primitives/src/lib.rs b/packages/primitives/src/lib.rs index d5c6fc525..55f90ef20 100644 --- a/packages/primitives/src/lib.rs +++ b/packages/primitives/src/lib.rs @@ -4,6 +4,7 @@ //! which is a `BitTorrent` tracker server. These structures are used not only //! by the tracker server crate, but also by other crates in the Torrust //! ecosystem. +pub mod core; pub mod pagination; pub mod peer; pub mod swarm_metadata; diff --git a/src/core/error.rs b/src/core/error.rs index ba87c84c8..f0de7df40 100644 --- a/src/core/error.rs +++ b/src/core/error.rs @@ -8,6 +8,7 @@ //! use std::panic::Location; +use bittorrent_http_protocol::v1::responses; use bittorrent_primitives::info_hash::InfoHash; use torrust_tracker_located_error::LocatedError; @@ -53,3 +54,11 @@ pub enum PeerKeyError { source: LocatedError<'static, databases::error::Error>, }, } + +impl From for responses::error::Error { + fn from(err: Error) -> Self { + responses::error::Error { + failure_reason: format!("Tracker error: {err}"), + } + } +} diff --git a/src/core/mod.rs b/src/core/mod.rs index b5759709b..6ba8e94ad 100644 --- a/src/core/mod.rs +++ b/src/core/mod.rs @@ -449,7 +449,6 @@ pub mod torrent; pub mod peer_tests; use std::cmp::max; -use std::collections::HashMap; use std::net::IpAddr; use std::panic::Location; use std::sync::Arc; @@ -458,13 +457,13 @@ use std::time::Duration; use auth::PeerKey; use bittorrent_primitives::info_hash::InfoHash; use databases::driver::Driver; -use derive_more::Constructor; use error::PeerKeyError; use tokio::sync::mpsc::error::SendError; use torrust_tracker_clock::clock::Time; use torrust_tracker_configuration::v2_0_0::database; use torrust_tracker_configuration::{AnnouncePolicy, Core, TORRENT_PEERS_LIMIT}; use torrust_tracker_located_error::Located; +use torrust_tracker_primitives::core::{AnnounceData, ScrapeData}; use torrust_tracker_primitives::swarm_metadata::SwarmMetadata; use torrust_tracker_primitives::torrent_metrics::TorrentsMetrics; use torrust_tracker_primitives::{peer, DurationSinceUnixEpoch}; @@ -511,17 +510,6 @@ pub struct Tracker { stats_repository: statistics::repository::Repository, } -/// Structure that holds the data returned by the `announce` request. -#[derive(Clone, Debug, PartialEq, Constructor, Default)] -pub struct AnnounceData { - /// The list of peers that are downloading the same torrent. - /// It excludes the peer that made the request. - pub peers: Vec>, - /// Swarm statistics - pub stats: SwarmMetadata, - pub policy: AnnouncePolicy, -} - /// How many peers the peer announcing wants in the announce response. #[derive(Clone, Debug, PartialEq, Default)] pub enum PeersWanted { @@ -564,44 +552,6 @@ impl From for PeersWanted { } } -/// Structure that holds the data returned by the `scrape` request. -#[derive(Debug, PartialEq, Default)] -pub struct ScrapeData { - /// A map of infohashes and swarm metadata for each torrent. - pub files: HashMap, -} - -impl ScrapeData { - /// Creates a new empty `ScrapeData` with no files (torrents). - #[must_use] - pub fn empty() -> Self { - let files: HashMap = HashMap::new(); - Self { files } - } - - /// Creates a new `ScrapeData` with zeroed metadata for each torrent. - #[must_use] - pub fn zeroed(info_hashes: &Vec) -> Self { - let mut scrape_data = Self::empty(); - - for info_hash in info_hashes { - scrape_data.add_file(info_hash, SwarmMetadata::zeroed()); - } - - scrape_data - } - - /// Adds a torrent to the `ScrapeData`. - pub fn add_file(&mut self, info_hash: &InfoHash, swarm_metadata: SwarmMetadata) { - self.files.insert(*info_hash, swarm_metadata); - } - - /// Adds a torrent to the `ScrapeData` with zeroed metadata. - pub fn add_file_with_zeroed_metadata(&mut self, info_hash: &InfoHash) { - self.files.insert(*info_hash, SwarmMetadata::zeroed()); - } -} - /// This type contains the info needed to add a new tracker key. /// /// You can upload a pre-generated key or let the app to generate a new one. diff --git a/src/servers/http/mod.rs b/src/servers/http/mod.rs index 6dfb6ce7c..fa0ccc776 100644 --- a/src/servers/http/mod.rs +++ b/src/servers/http/mod.rs @@ -43,18 +43,18 @@ //! //! Parameter | Type | Description | Required | Default | Example //! ---|---|---|---|---|--- -//! [`info_hash`](crate::servers::http::v1::requests::announce::Announce::info_hash) | percent encoded of 20-byte array | The `Info Hash` of the torrent. | Yes | No | `%81%00%00%00%00%00%00%00%00%00%00%00%00%00%00%00%00%00%00%00` +//! [`info_hash`](bittorrent_http_protocol::v1::requests::announce::Announce::info_hash) | percent encoded of 20-byte array | The `Info Hash` of the torrent. | Yes | No | `%81%00%00%00%00%00%00%00%00%00%00%00%00%00%00%00%00%00%00%00` //! `peer_addr` | string |The IP address of the peer. | No | No | `2.137.87.41` -//! [`downloaded`](crate::servers::http::v1::requests::announce::Announce::downloaded) | positive integer |The number of bytes downloaded by the peer. | No | `0` | `0` -//! [`uploaded`](crate::servers::http::v1::requests::announce::Announce::uploaded) | positive integer | The number of bytes uploaded by the peer. | No | `0` | `0` -//! [`peer_id`](crate::servers::http::v1::requests::announce::Announce::peer_id) | percent encoded of 20-byte array | The ID of the peer. | Yes | No | `-qB00000000000000001` -//! [`port`](crate::servers::http::v1::requests::announce::Announce::port) | positive integer | The port used by the peer. | Yes | No | `17548` -//! [`left`](crate::servers::http::v1::requests::announce::Announce::left) | positive integer | The number of bytes pending to download. | No | `0` | `0` -//! [`event`](crate::servers::http::v1::requests::announce::Announce::event) | positive integer | The event that triggered the `Announce` request: `started`, `completed`, `stopped` | No | `None` | `completed` -//! [`compact`](crate::servers::http::v1::requests::announce::Announce::compact) | `0` or `1` | Whether the tracker should return a compact peer list. | No | `None` | `0` +//! [`downloaded`](bittorrent_http_protocol::v1::requests::announce::Announce::downloaded) | positive integer |The number of bytes downloaded by the peer. | No | `0` | `0` +//! [`uploaded`](bittorrent_http_protocol::v1::requests::announce::Announce::uploaded) | positive integer | The number of bytes uploaded by the peer. | No | `0` | `0` +//! [`peer_id`](bittorrent_http_protocol::v1::requests::announce::Announce::peer_id) | percent encoded of 20-byte array | The ID of the peer. | Yes | No | `-qB00000000000000001` +//! [`port`](bittorrent_http_protocol::v1::requests::announce::Announce::port) | positive integer | The port used by the peer. | Yes | No | `17548` +//! [`left`](bittorrent_http_protocol::v1::requests::announce::Announce::left) | positive integer | The number of bytes pending to download. | No | `0` | `0` +//! [`event`](bittorrent_http_protocol::v1::requests::announce::Announce::event) | positive integer | The event that triggered the `Announce` request: `started`, `completed`, `stopped` | No | `None` | `completed` +//! [`compact`](bittorrent_http_protocol::v1::requests::announce::Announce::compact) | `0` or `1` | Whether the tracker should return a compact peer list. | No | `None` | `0` //! `numwant` | positive integer | **Not implemented**. The maximum number of peers you want in the reply. | No | `50` | `50` //! -//! Refer to the [`Announce`](crate::servers::http::v1::requests::announce::Announce) +//! Refer to the [`Announce`](bittorrent_http_protocol::v1::requests::announce::Announce) //! request for more information about the parameters. //! //! > **NOTICE**: the [BEP 03](https://www.bittorrent.org/beps/bep_0003.html) @@ -152,7 +152,7 @@ //! 000000f0: 65 e //! ``` //! -//! Refer to the [`Normal`](crate::servers::http::v1::responses::announce::Normal), i.e. `Non-Compact` +//! Refer to the [`Normal`](bittorrent_http_protocol::v1::responses::announce::Normal), i.e. `Non-Compact` //! response for more information about the response. //! //! **Sample compact response** @@ -190,7 +190,7 @@ //! 0000070: 7065 pe //! ``` //! -//! Refer to the [`Compact`](crate::servers::http::v1::responses::announce::Compact) +//! Refer to the [`Compact`](bittorrent_http_protocol::v1::responses::announce::Compact) //! response for more information about the response. //! //! **Protocol** @@ -220,12 +220,12 @@ //! //! Parameter | Type | Description | Required | Default | Example //! ---|---|---|---|---|--- -//! [`info_hash`](crate::servers::http::v1::requests::scrape::Scrape::info_hashes) | percent encoded of 20-byte array | The `Info Hash` of the torrent. | Yes | No | `%81%00%00%00%00%00%00%00%00%00%00%00%00%00%00%00%00%00%00%00` +//! [`info_hash`](bittorrent_http_protocol::v1::requests::scrape::Scrape::info_hashes) | percent encoded of 20-byte array | The `Info Hash` of the torrent. | Yes | No | `%81%00%00%00%00%00%00%00%00%00%00%00%00%00%00%00%00%00%00%00` //! //! > **NOTICE**: you can scrape multiple torrents at the same time by passing //! > multiple `info_hash` parameters. //! -//! Refer to the [`Scrape`](crate::servers::http::v1::requests::scrape::Scrape) +//! Refer to the [`Scrape`](bittorrent_http_protocol::v1::requests::scrape::Scrape) //! request for more information about the parameters. //! //! **Sample scrape URL** diff --git a/src/servers/http/v1/extractors/announce_request.rs b/src/servers/http/v1/extractors/announce_request.rs index 32b69ae0b..74c9ab8c1 100644 --- a/src/servers/http/v1/extractors/announce_request.rs +++ b/src/servers/http/v1/extractors/announce_request.rs @@ -4,10 +4,10 @@ //! It parses the query parameters returning an [`Announce`] //! request. //! -//! Refer to [`Announce`](crate::servers::http::v1::requests::announce) for more +//! Refer to [`Announce`](bittorrent_http_protocol::v1::requests::announce) for more //! information about the returned structure. //! -//! It returns a bencoded [`Error`](crate::servers::http::v1::responses::error) +//! It returns a bencoded [`Error`](bittorrent_http_protocol::v1::responses::error) //! response (`500`) if the query parameters are missing or invalid. //! //! **Sample announce request** @@ -33,11 +33,11 @@ use std::panic::Location; use axum::extract::FromRequestParts; use axum::http::request::Parts; use axum::response::{IntoResponse, Response}; +use bittorrent_http_protocol::v1::query::Query; +use bittorrent_http_protocol::v1::requests::announce::{Announce, ParseAnnounceQueryError}; +use bittorrent_http_protocol::v1::responses; use futures::FutureExt; - -use crate::servers::http::v1::query::Query; -use crate::servers::http::v1::requests::announce::{Announce, ParseAnnounceQueryError}; -use crate::servers::http::v1::responses; +use hyper::StatusCode; /// Extractor for the [`Announce`] /// request. @@ -53,7 +53,7 @@ where async { match extract_announce_from(parts.uri.query()) { Ok(announce_request) => Ok(ExtractRequest(announce_request)), - Err(error) => Err(error.into_response()), + Err(error) => Err((StatusCode::OK, error.write()).into_response()), } } .boxed() @@ -87,11 +87,11 @@ mod tests { use std::str::FromStr; use aquatic_udp_protocol::{NumberOfBytes, PeerId}; + use bittorrent_http_protocol::v1::requests::announce::{Announce, Compact, Event}; + use bittorrent_http_protocol::v1::responses::error::Error; use bittorrent_primitives::info_hash::InfoHash; use super::extract_announce_from; - use crate::servers::http::v1::requests::announce::{Announce, Compact, Event}; - use crate::servers::http::v1::responses::error::Error; fn assert_error_response(error: &Error, error_message: &str) { assert!( diff --git a/src/servers/http/v1/extractors/authentication_key.rs b/src/servers/http/v1/extractors/authentication_key.rs index 35efdf93d..6610b197a 100644 --- a/src/servers/http/v1/extractors/authentication_key.rs +++ b/src/servers/http/v1/extractors/authentication_key.rs @@ -9,7 +9,7 @@ //! It's a wrapper for Axum `Path` extractor in order to return custom //! authentication errors. //! -//! It returns a bencoded [`Error`](crate::servers::http::v1::responses::error) +//! It returns a bencoded [`Error`](bittorrent_http_protocol::v1::responses::error) //! response (`500`) if the `key` parameter are missing or invalid. //! //! **Sample authentication error responses** @@ -49,11 +49,12 @@ use axum::extract::rejection::PathRejection; use axum::extract::{FromRequestParts, Path}; use axum::http::request::Parts; use axum::response::{IntoResponse, Response}; +use bittorrent_http_protocol::v1::responses; +use hyper::StatusCode; use serde::Deserialize; use crate::core::auth::Key; use crate::servers::http::v1::handlers::common::auth; -use crate::servers::http::v1::responses; /// Extractor for the [`Key`] struct. pub struct Extract(pub Key); @@ -82,7 +83,7 @@ where match extract_key(maybe_path_with_key) { Ok(key) => Ok(Extract(key)), - Err(error) => Err(error.into_response()), + Err(error) => Err((StatusCode::OK, error.write()).into_response()), } } } @@ -130,8 +131,9 @@ fn custom_error(rejection: &PathRejection) -> responses::error::Error { #[cfg(test)] mod tests { + use bittorrent_http_protocol::v1::responses::error::Error; + use super::parse_key; - use crate::servers::http::v1::responses::error::Error; fn assert_error_response(error: &Error, error_message: &str) { assert!( diff --git a/src/servers/http/v1/extractors/client_ip_sources.rs b/src/servers/http/v1/extractors/client_ip_sources.rs index 1ca5a22d0..02265554e 100644 --- a/src/servers/http/v1/extractors/client_ip_sources.rs +++ b/src/servers/http/v1/extractors/client_ip_sources.rs @@ -42,8 +42,7 @@ use axum::extract::{ConnectInfo, FromRequestParts}; use axum::http::request::Parts; use axum::response::Response; use axum_client_ip::RightmostXForwardedFor; - -use crate::servers::http::v1::services::peer_ip_resolver::ClientIpSources; +use bittorrent_http_protocol::v1::services::peer_ip_resolver::ClientIpSources; /// Extractor for the [`ClientIpSources`] /// struct. diff --git a/src/servers/http/v1/extractors/scrape_request.rs b/src/servers/http/v1/extractors/scrape_request.rs index 890c4033c..bacd36169 100644 --- a/src/servers/http/v1/extractors/scrape_request.rs +++ b/src/servers/http/v1/extractors/scrape_request.rs @@ -4,10 +4,10 @@ //! It parses the query parameters returning an [`Scrape`] //! request. //! -//! Refer to [`Scrape`](crate::servers::http::v1::requests::scrape) for more +//! Refer to [`Scrape`](bittorrent_http_protocol::v1::requests::scrape) for more //! information about the returned structure. //! -//! It returns a bencoded [`Error`](crate::servers::http::v1::responses::error) +//! It returns a bencoded [`Error`](bittorrent_http_protocol::v1::responses::error) //! response (`500`) if the query parameters are missing or invalid. //! //! **Sample scrape request** @@ -33,11 +33,11 @@ use std::panic::Location; use axum::extract::FromRequestParts; use axum::http::request::Parts; use axum::response::{IntoResponse, Response}; +use bittorrent_http_protocol::v1::query::Query; +use bittorrent_http_protocol::v1::requests::scrape::{ParseScrapeQueryError, Scrape}; +use bittorrent_http_protocol::v1::responses; use futures::FutureExt; - -use crate::servers::http::v1::query::Query; -use crate::servers::http::v1::requests::scrape::{ParseScrapeQueryError, Scrape}; -use crate::servers::http::v1::responses; +use hyper::StatusCode; /// Extractor for the [`Scrape`] /// request. @@ -53,7 +53,7 @@ where async { match extract_scrape_from(parts.uri.query()) { Ok(scrape_request) => Ok(ExtractRequest(scrape_request)), - Err(error) => Err(error.into_response()), + Err(error) => Err((StatusCode::OK, error.write()).into_response()), } } .boxed() @@ -86,11 +86,11 @@ fn extract_scrape_from(maybe_raw_query: Option<&str>) -> Result Response { let announce_data = match handle_announce(tracker, announce_request, client_ip_sources, maybe_key).await { Ok(announce_data) => announce_data, - Err(error) => return error.into_response(), + Err(error) => return (StatusCode::OK, error.write()).into_response(), }; build_response(announce_request, announce_data) } @@ -123,10 +126,12 @@ async fn handle_announce( fn build_response(announce_request: &Announce, announce_data: AnnounceData) -> Response { if announce_request.compact.as_ref().is_some_and(|f| *f == Compact::Accepted) { let response: responses::Announce = announce_data.into(); - response.into_response() + let bytes: Vec = response.data.into(); + (StatusCode::OK, bytes).into_response() } else { let response: responses::Announce = announce_data.into(); - response.into_response() + let bytes: Vec = response.data.into(); + (StatusCode::OK, bytes).into_response() } } @@ -174,14 +179,14 @@ pub fn map_to_torrust_event(event: &Option) -> AnnounceEvent { mod tests { use aquatic_udp_protocol::PeerId; + use bittorrent_http_protocol::v1::requests::announce::Announce; + use bittorrent_http_protocol::v1::responses; + use bittorrent_http_protocol::v1::services::peer_ip_resolver::ClientIpSources; use bittorrent_primitives::info_hash::InfoHash; use torrust_tracker_test_helpers::configuration; use crate::core::services::tracker_factory; use crate::core::Tracker; - use crate::servers::http::v1::requests::announce::Announce; - use crate::servers::http::v1::responses; - use crate::servers::http::v1::services::peer_ip_resolver::ClientIpSources; fn private_tracker() -> Tracker { tracker_factory(&configuration::ephemeral_private()) @@ -301,10 +306,11 @@ mod tests { use std::sync::Arc; + use bittorrent_http_protocol::v1::services::peer_ip_resolver::ClientIpSources; + use super::{sample_announce_request, tracker_on_reverse_proxy}; use crate::servers::http::v1::handlers::announce::handle_announce; use crate::servers::http::v1::handlers::announce::tests::assert_error_response; - use crate::servers::http::v1::services::peer_ip_resolver::ClientIpSources; #[tokio::test] async fn it_should_fail_when_the_right_most_x_forwarded_for_header_ip_is_not_available() { @@ -330,10 +336,11 @@ mod tests { use std::sync::Arc; + use bittorrent_http_protocol::v1::services::peer_ip_resolver::ClientIpSources; + use super::{sample_announce_request, tracker_not_on_reverse_proxy}; use crate::servers::http::v1::handlers::announce::handle_announce; use crate::servers::http::v1::handlers::announce::tests::assert_error_response; - use crate::servers::http::v1::services::peer_ip_resolver::ClientIpSources; #[tokio::test] async fn it_should_fail_when_the_client_ip_from_the_connection_info_is_not_available() { diff --git a/src/servers/http/v1/handlers/common/auth.rs b/src/servers/http/v1/handlers/common/auth.rs index f9a7796a4..ff1d47e91 100644 --- a/src/servers/http/v1/handlers/common/auth.rs +++ b/src/servers/http/v1/handlers/common/auth.rs @@ -3,10 +3,10 @@ //! response. use std::panic::Location; +use bittorrent_http_protocol::v1::responses; use thiserror::Error; use crate::core::auth; -use crate::servers::http::v1::responses; /// Authentication error. /// diff --git a/src/servers/http/v1/handlers/common/peer_ip.rs b/src/servers/http/v1/handlers/common/peer_ip.rs index 5602bd26c..0fe7c14f1 100644 --- a/src/servers/http/v1/handlers/common/peer_ip.rs +++ b/src/servers/http/v1/handlers/common/peer_ip.rs @@ -2,25 +2,15 @@ //! //! The HTTP tracker may fail to resolve the peer IP address. This module //! contains the logic to convert those -//! [`PeerIpResolutionError`] +//! [`PeerIpResolutionError`](bittorrent_http_protocol::v1::services::peer_ip_resolver::PeerIpResolutionError) //! errors into responses. -use crate::servers::http::v1::responses; -use crate::servers::http::v1::services::peer_ip_resolver::PeerIpResolutionError; - -impl From for responses::error::Error { - fn from(err: PeerIpResolutionError) -> Self { - responses::error::Error { - failure_reason: format!("Error resolving peer IP: {err}"), - } - } -} #[cfg(test)] mod tests { use std::panic::Location; - use crate::servers::http::v1::responses; - use crate::servers::http::v1::services::peer_ip_resolver::PeerIpResolutionError; + use bittorrent_http_protocol::v1::responses; + use bittorrent_http_protocol::v1::services::peer_ip_resolver::PeerIpResolutionError; fn assert_error_response(error: &responses::error::Error, error_message: &str) { assert!( diff --git a/src/servers/http/v1/handlers/mod.rs b/src/servers/http/v1/handlers/mod.rs index 7b3a1e7c3..f9305cf20 100644 --- a/src/servers/http/v1/handlers/mod.rs +++ b/src/servers/http/v1/handlers/mod.rs @@ -2,18 +2,7 @@ //! //! Refer to the generic [HTTP server documentation](crate::servers::http) for //! more information about the HTTP tracker. -use super::responses; -use crate::core::error::Error; - pub mod announce; pub mod common; pub mod health_check; pub mod scrape; - -impl From for responses::error::Error { - fn from(err: Error) -> Self { - responses::error::Error { - failure_reason: format!("Tracker error: {err}"), - } - } -} diff --git a/src/servers/http/v1/handlers/scrape.rs b/src/servers/http/v1/handlers/scrape.rs index 10f945d70..2aa1bd9f8 100644 --- a/src/servers/http/v1/handlers/scrape.rs +++ b/src/servers/http/v1/handlers/scrape.rs @@ -9,15 +9,18 @@ use std::sync::Arc; use axum::extract::State; use axum::response::{IntoResponse, Response}; +use bittorrent_http_protocol::v1::requests::scrape::Scrape; +use bittorrent_http_protocol::v1::responses; +use bittorrent_http_protocol::v1::services::peer_ip_resolver::{self, ClientIpSources}; +use hyper::StatusCode; +use torrust_tracker_primitives::core::ScrapeData; use crate::core::auth::Key; -use crate::core::{ScrapeData, Tracker}; +use crate::core::Tracker; use crate::servers::http::v1::extractors::authentication_key::Extract as ExtractKey; use crate::servers::http::v1::extractors::client_ip_sources::Extract as ExtractClientIpSources; use crate::servers::http::v1::extractors::scrape_request::ExtractRequest; -use crate::servers::http::v1::requests::scrape::Scrape; -use crate::servers::http::v1::services::peer_ip_resolver::{self, ClientIpSources}; -use crate::servers::http::v1::{responses, services}; +use crate::servers::http::v1::services; /// It handles the `scrape` request when the HTTP tracker is configured /// to run in `public` mode. @@ -56,7 +59,7 @@ async fn handle( ) -> Response { let scrape_data = match handle_scrape(tracker, scrape_request, client_ip_sources, maybe_key).await { Ok(scrape_data) => scrape_data, - Err(error) => return error.into_response(), + Err(error) => return (StatusCode::OK, error.write()).into_response(), }; build_response(scrape_data) } @@ -102,7 +105,9 @@ async fn handle_scrape( } fn build_response(scrape_data: ScrapeData) -> Response { - responses::scrape::Bencoded::from(scrape_data).into_response() + let response = responses::scrape::Bencoded::from(scrape_data); + + (StatusCode::OK, response.body()).into_response() } #[cfg(test)] @@ -110,14 +115,14 @@ mod tests { use std::net::IpAddr; use std::str::FromStr; + use bittorrent_http_protocol::v1::requests::scrape::Scrape; + use bittorrent_http_protocol::v1::responses; + use bittorrent_http_protocol::v1::services::peer_ip_resolver::ClientIpSources; use bittorrent_primitives::info_hash::InfoHash; use torrust_tracker_test_helpers::configuration; use crate::core::services::tracker_factory; use crate::core::Tracker; - use crate::servers::http::v1::requests::scrape::Scrape; - use crate::servers::http::v1::responses; - use crate::servers::http::v1::services::peer_ip_resolver::ClientIpSources; fn private_tracker() -> Tracker { tracker_factory(&configuration::ephemeral_private()) @@ -159,8 +164,10 @@ mod tests { use std::str::FromStr; use std::sync::Arc; + use torrust_tracker_primitives::core::ScrapeData; + use super::{private_tracker, sample_client_ip_sources, sample_scrape_request}; - use crate::core::{auth, ScrapeData}; + use crate::core::auth; use crate::servers::http::v1::handlers::scrape::handle_scrape; #[tokio::test] @@ -201,8 +208,9 @@ mod tests { use std::sync::Arc; + use torrust_tracker_primitives::core::ScrapeData; + use super::{sample_client_ip_sources, sample_scrape_request, whitelisted_tracker}; - use crate::core::ScrapeData; use crate::servers::http::v1::handlers::scrape::handle_scrape; #[tokio::test] @@ -224,10 +232,11 @@ mod tests { mod with_tracker_on_reverse_proxy { use std::sync::Arc; + use bittorrent_http_protocol::v1::services::peer_ip_resolver::ClientIpSources; + use super::{sample_scrape_request, tracker_on_reverse_proxy}; use crate::servers::http::v1::handlers::scrape::handle_scrape; use crate::servers::http::v1::handlers::scrape::tests::assert_error_response; - use crate::servers::http::v1::services::peer_ip_resolver::ClientIpSources; #[tokio::test] async fn it_should_fail_when_the_right_most_x_forwarded_for_header_ip_is_not_available() { @@ -252,10 +261,11 @@ mod tests { mod with_tracker_not_on_reverse_proxy { use std::sync::Arc; + use bittorrent_http_protocol::v1::services::peer_ip_resolver::ClientIpSources; + use super::{sample_scrape_request, tracker_not_on_reverse_proxy}; use crate::servers::http::v1::handlers::scrape::handle_scrape; use crate::servers::http::v1::handlers::scrape::tests::assert_error_response; - use crate::servers::http::v1::services::peer_ip_resolver::ClientIpSources; #[tokio::test] async fn it_should_fail_when_the_client_ip_from_the_connection_info_is_not_available() { diff --git a/src/servers/http/v1/mod.rs b/src/servers/http/v1/mod.rs index 9d2745692..48dac5663 100644 --- a/src/servers/http/v1/mod.rs +++ b/src/servers/http/v1/mod.rs @@ -4,8 +4,5 @@ //! more information about the endpoints and their usage. pub mod extractors; pub mod handlers; -pub mod query; -pub mod requests; -pub mod responses; pub mod routes; pub mod services; diff --git a/src/servers/http/v1/responses/mod.rs b/src/servers/http/v1/responses/mod.rs deleted file mode 100644 index e22879c6d..000000000 --- a/src/servers/http/v1/responses/mod.rs +++ /dev/null @@ -1,19 +0,0 @@ -//! HTTP responses for the HTTP tracker. -//! -//! Refer to the generic [HTTP server documentation](crate::servers::http) for -//! more information about the HTTP tracker. -pub mod announce; -pub mod error; -pub mod scrape; - -pub use announce::{Announce, Compact, Normal}; - -/// Trait that defines the Announce Response Format -pub trait Response: axum::response::IntoResponse { - /// Returns the Body of the Announce Response - /// - /// # Errors - /// - /// If unable to generate the response, it will return an error. - fn body(self) -> Result, error::Error>; -} diff --git a/src/servers/http/v1/services/announce.rs b/src/servers/http/v1/services/announce.rs index 73d480c79..df827aee2 100644 --- a/src/servers/http/v1/services/announce.rs +++ b/src/servers/http/v1/services/announce.rs @@ -12,9 +12,10 @@ use std::net::IpAddr; use std::sync::Arc; use bittorrent_primitives::info_hash::InfoHash; +use torrust_tracker_primitives::core::AnnounceData; use torrust_tracker_primitives::peer; -use crate::core::{statistics, AnnounceData, PeersWanted, Tracker}; +use crate::core::{statistics, PeersWanted, Tracker}; /// The HTTP tracker `announce` service. /// @@ -100,12 +101,13 @@ mod tests { use std::sync::Arc; use mockall::predicate::eq; + use torrust_tracker_primitives::core::AnnounceData; use torrust_tracker_primitives::peer; use torrust_tracker_primitives::swarm_metadata::SwarmMetadata; use torrust_tracker_test_helpers::configuration; use super::{sample_peer_using_ipv4, sample_peer_using_ipv6}; - use crate::core::{statistics, AnnounceData, PeersWanted, Tracker}; + use crate::core::{statistics, PeersWanted, Tracker}; use crate::servers::http::v1::services::announce::invoke; use crate::servers::http::v1::services::announce::tests::{public_tracker, sample_info_hash, sample_peer}; diff --git a/src/servers/http/v1/services/mod.rs b/src/servers/http/v1/services/mod.rs index 2e6285d1a..ce99c6856 100644 --- a/src/servers/http/v1/services/mod.rs +++ b/src/servers/http/v1/services/mod.rs @@ -6,5 +6,4 @@ //! //! Refer to [`torrust_tracker`](crate) documentation. pub mod announce; -pub mod peer_ip_resolver; pub mod scrape; diff --git a/src/servers/http/v1/services/scrape.rs b/src/servers/http/v1/services/scrape.rs index 9eef263cb..80d81d78a 100644 --- a/src/servers/http/v1/services/scrape.rs +++ b/src/servers/http/v1/services/scrape.rs @@ -12,8 +12,9 @@ use std::net::IpAddr; use std::sync::Arc; use bittorrent_primitives::info_hash::InfoHash; +use torrust_tracker_primitives::core::ScrapeData; -use crate::core::{statistics, ScrapeData, Tracker}; +use crate::core::{statistics, Tracker}; /// The HTTP tracker `scrape` service. /// @@ -100,10 +101,11 @@ mod tests { use std::sync::Arc; use mockall::predicate::eq; + use torrust_tracker_primitives::core::ScrapeData; use torrust_tracker_primitives::swarm_metadata::SwarmMetadata; use torrust_tracker_test_helpers::configuration; - use crate::core::{statistics, PeersWanted, ScrapeData, Tracker}; + use crate::core::{statistics, PeersWanted, Tracker}; use crate::servers::http::v1::services::scrape::invoke; use crate::servers::http::v1::services::scrape::tests::{ public_tracker, sample_info_hash, sample_info_hashes, sample_peer, @@ -192,9 +194,10 @@ mod tests { use std::sync::Arc; use mockall::predicate::eq; + use torrust_tracker_primitives::core::ScrapeData; use torrust_tracker_test_helpers::configuration; - use crate::core::{statistics, PeersWanted, ScrapeData, Tracker}; + use crate::core::{statistics, PeersWanted, Tracker}; use crate::servers::http::v1::services::scrape::fake; use crate::servers::http::v1::services::scrape::tests::{ public_tracker, sample_info_hash, sample_info_hashes, sample_peer, From c2d134e792216fbc58f9df67d89762434cf7bae2 Mon Sep 17 00:00:00 2001 From: Jose Celano Date: Sat, 11 Jan 2025 21:09:40 +0000 Subject: [PATCH 084/802] test: re-enable slow tests Some doc tests were slow becuase they required to compile the main library. The code used from the main library was moved to workspace pacakages and there is no dependency with the main tracker lib. See https://github.com/torrust/torrust-tracker/issues/1097 --- packages/http-protocol/src/v1/query.rs | 16 ++++++++-------- .../http-protocol/src/v1/requests/announce.rs | 4 ++-- .../http-protocol/src/v1/responses/announce.rs | 8 ++++---- packages/http-protocol/src/v1/responses/error.rs | 4 ++-- .../http-protocol/src/v1/responses/scrape.rs | 6 +++--- .../src/v1/services/peer_ip_resolver.rs | 8 ++++---- 6 files changed, 23 insertions(+), 23 deletions(-) diff --git a/packages/http-protocol/src/v1/query.rs b/packages/http-protocol/src/v1/query.rs index 8f9170aad..f77145cb6 100644 --- a/packages/http-protocol/src/v1/query.rs +++ b/packages/http-protocol/src/v1/query.rs @@ -30,8 +30,8 @@ impl Query { /// It return `Some(value)` for a URL query param if the param with the /// input `name` exists. For example: /// - /// ```text - /// use torrust_tracker_lib::servers::http::v1::query::Query; + /// ```rust + /// use bittorrent_http_protocol::v1::query::Query; /// /// let raw_query = "param1=value1¶m2=value2"; /// @@ -43,8 +43,8 @@ impl Query { /// /// It returns only the first param value even if it has multiple values: /// - /// ```text - /// use torrust_tracker_lib::servers::http::v1::query::Query; + /// ```rust + /// use bittorrent_http_protocol::v1::query::Query; /// /// let raw_query = "param1=value1¶m1=value2"; /// @@ -59,8 +59,8 @@ impl Query { /// Returns all the param values as a vector. /// - /// ```text - /// use torrust_tracker_lib::servers::http::v1::query::Query; + /// ```rust + /// use bittorrent_http_protocol::v1::query::Query; /// /// let query = "param1=value1¶m1=value2".parse::().unwrap(); /// @@ -72,8 +72,8 @@ impl Query { /// /// Returns all the param values as a vector even if it has only one value. /// - /// ```text - /// use torrust_tracker_lib::servers::http::v1::query::Query; + /// ```rust + /// use bittorrent_http_protocol::v1::query::Query; /// /// let query = "param1=value1".parse::().unwrap(); /// diff --git a/packages/http-protocol/src/v1/requests/announce.rs b/packages/http-protocol/src/v1/requests/announce.rs index 28cecd386..ea76771dd 100644 --- a/packages/http-protocol/src/v1/requests/announce.rs +++ b/packages/http-protocol/src/v1/requests/announce.rs @@ -29,9 +29,9 @@ const NUMWANT: &str = "numwant"; /// The `Announce` request. Fields use the domain types after parsing the /// query params of the request. /// -/// ```text +/// ```rust /// use aquatic_udp_protocol::{NumberOfBytes, PeerId}; -/// use torrust_tracker_lib::servers::http::v1::requests::announce::{Announce, Compact, Event}; +/// use bittorrent_http_protocol::v1::requests::announce::{Announce, Compact, Event}; /// use bittorrent_primitives::info_hash::InfoHash; /// /// let request = Announce { diff --git a/packages/http-protocol/src/v1/responses/announce.rs b/packages/http-protocol/src/v1/responses/announce.rs index 986a881a5..3854c9f34 100644 --- a/packages/http-protocol/src/v1/responses/announce.rs +++ b/packages/http-protocol/src/v1/responses/announce.rs @@ -130,9 +130,9 @@ impl Into> for Compact { /// A [`NormalPeer`], for the [`Normal`] form. /// -/// ```text +/// ```rust /// use std::net::{IpAddr, Ipv4Addr}; -/// use torrust_tracker_lib::servers::http::v1::responses::announce::{Normal, NormalPeer}; +/// use bittorrent_http_protocol::v1::responses::announce::{Normal, NormalPeer}; /// /// let peer = NormalPeer { /// peer_id: *b"-qB00000000000000001", @@ -182,9 +182,9 @@ impl From<&NormalPeer> for BencodeMut<'_> { /// A part from reducing the size of the response, this format does not contain /// the peer's ID. /// -/// ```text +/// ```rust /// use std::net::{IpAddr, Ipv4Addr}; -/// use torrust_tracker_lib::servers::http::v1::responses::announce::{Compact, CompactPeer, CompactPeerData}; +/// use bittorrent_http_protocol::v1::responses::announce::{Compact, CompactPeer, CompactPeerData}; /// /// let peer = CompactPeer::V4(CompactPeerData { /// ip: Ipv4Addr::new(0x69, 0x69, 0x69, 0x69), // 105.105.105.105 diff --git a/packages/http-protocol/src/v1/responses/error.rs b/packages/http-protocol/src/v1/responses/error.rs index 9aca9c71c..7516cd39e 100644 --- a/packages/http-protocol/src/v1/responses/error.rs +++ b/packages/http-protocol/src/v1/responses/error.rs @@ -26,8 +26,8 @@ pub struct Error { impl Error { /// Returns the bencoded representation of the `Error` struct. /// - /// ```text - /// use torrust_tracker_lib::servers::http::v1::responses::error::Error; + /// ```rust + /// use bittorrent_http_protocol::v1::responses::error::Error; /// /// let err = Error { /// failure_reason: "error message".to_owned(), diff --git a/packages/http-protocol/src/v1/responses/scrape.rs b/packages/http-protocol/src/v1/responses/scrape.rs index a52fa263c..ee4c4155b 100644 --- a/packages/http-protocol/src/v1/responses/scrape.rs +++ b/packages/http-protocol/src/v1/responses/scrape.rs @@ -8,11 +8,11 @@ use torrust_tracker_primitives::core::ScrapeData; /// The `Scrape` response for the HTTP tracker. /// -/// ```text -/// use torrust_tracker_lib::servers::http::v1::responses::scrape::Bencoded; +/// ```rust +/// use bittorrent_http_protocol::v1::responses::scrape::Bencoded; /// use bittorrent_primitives::info_hash::InfoHash; /// use torrust_tracker_primitives::swarm_metadata::SwarmMetadata; -/// use torrust_tracker_lib::core::ScrapeData; +/// use torrust_tracker_primitives::core::ScrapeData; /// /// let info_hash = InfoHash::from_bytes(&[0x69; 20]); /// let mut scrape_data = ScrapeData::empty(); diff --git a/packages/http-protocol/src/v1/services/peer_ip_resolver.rs b/packages/http-protocol/src/v1/services/peer_ip_resolver.rs index 366f8820c..f0ad6a83e 100644 --- a/packages/http-protocol/src/v1/services/peer_ip_resolver.rs +++ b/packages/http-protocol/src/v1/services/peer_ip_resolver.rs @@ -59,11 +59,11 @@ pub enum PeerIpResolutionError { /// /// With the tracker running on reverse proxy mode: /// -/// ```text +/// ```rust /// use std::net::IpAddr; /// use std::str::FromStr; /// -/// use torrust_tracker_lib::servers::http::v1::services::peer_ip_resolver::{invoke, ClientIpSources, PeerIpResolutionError}; +/// use bittorrent_http_protocol::v1::services::peer_ip_resolver::{invoke, ClientIpSources, PeerIpResolutionError}; /// /// let on_reverse_proxy = true; /// @@ -81,11 +81,11 @@ pub enum PeerIpResolutionError { /// /// With the tracker non running on reverse proxy mode: /// -/// ```text +/// ```rust /// use std::net::IpAddr; /// use std::str::FromStr; /// -/// use torrust_tracker_lib::servers::http::v1::services::peer_ip_resolver::{invoke, ClientIpSources, PeerIpResolutionError}; +/// use bittorrent_http_protocol::v1::services::peer_ip_resolver::{invoke, ClientIpSources, PeerIpResolutionError}; /// /// let on_reverse_proxy = false; /// From 40eb805934567fd54619581567786e5ad05003ad Mon Sep 17 00:00:00 2001 From: Jose Celano Date: Wed, 15 Jan 2025 10:01:41 +0000 Subject: [PATCH 085/802] refactor: [1182] extract WhitelistManager --- src/core/mod.rs | 174 ++++++++++++++++++++++++++++++++++++------------ 1 file changed, 130 insertions(+), 44 deletions(-) diff --git a/src/core/mod.rs b/src/core/mod.rs index 6ba8e94ad..066792eb3 100644 --- a/src/core/mod.rs +++ b/src/core/mod.rs @@ -498,7 +498,7 @@ pub struct Tracker { keys: tokio::sync::RwLock>, /// The list of allowed torrents. Only for listed trackers. - whitelist: tokio::sync::RwLock>, + whitelist_manager: WhiteListManager, /// The in-memory torrents repository. torrents: Arc, @@ -510,6 +510,123 @@ pub struct Tracker { stats_repository: statistics::repository::Repository, } +pub struct WhiteListManager { + /// A database driver implementation: [`Sqlite3`](crate::core::databases::sqlite) + /// or [`MySQL`](crate::core::databases::mysql) + database: Arc>, + + /// The list of allowed torrents. Only for listed trackers. + whitelist: tokio::sync::RwLock>, +} + +impl WhiteListManager { + #[must_use] + pub fn new(database: Arc>) -> Self { + Self { + database, + whitelist: tokio::sync::RwLock::new(std::collections::HashSet::new()), + } + } + + /// It adds a torrent to the whitelist. + /// Adding torrents is not relevant to public trackers. + /// + /// # Context: Whitelist + /// + /// # Errors + /// + /// Will return a `database::Error` if unable to add the `info_hash` into the whitelist database. + pub async fn add_torrent_to_whitelist(&self, info_hash: &InfoHash) -> Result<(), databases::error::Error> { + self.add_torrent_to_database_whitelist(info_hash)?; + self.add_torrent_to_memory_whitelist(info_hash).await; + Ok(()) + } + + /// It adds a torrent to the whitelist if it has not been whitelisted previously + fn add_torrent_to_database_whitelist(&self, info_hash: &InfoHash) -> Result<(), databases::error::Error> { + let is_whitelisted = self.database.is_info_hash_whitelisted(*info_hash)?; + + if is_whitelisted { + return Ok(()); + } + + self.database.add_info_hash_to_whitelist(*info_hash)?; + + Ok(()) + } + + pub async fn add_torrent_to_memory_whitelist(&self, info_hash: &InfoHash) -> bool { + self.whitelist.write().await.insert(*info_hash) + } + + /// It removes a torrent from the whitelist. + /// Removing torrents is not relevant to public trackers. + /// + /// # Context: Whitelist + /// + /// # Errors + /// + /// Will return a `database::Error` if unable to remove the `info_hash` from the whitelist database. + pub async fn remove_torrent_from_whitelist(&self, info_hash: &InfoHash) -> Result<(), databases::error::Error> { + self.remove_torrent_from_database_whitelist(info_hash)?; + self.remove_torrent_from_memory_whitelist(info_hash).await; + Ok(()) + } + + /// It removes a torrent from the whitelist in the database. + /// + /// # Context: Whitelist + /// + /// # Errors + /// + /// Will return a `database::Error` if unable to remove the `info_hash` from the whitelist database. + pub fn remove_torrent_from_database_whitelist(&self, info_hash: &InfoHash) -> Result<(), databases::error::Error> { + let is_whitelisted = self.database.is_info_hash_whitelisted(*info_hash)?; + + if !is_whitelisted { + return Ok(()); + } + + self.database.remove_info_hash_from_whitelist(*info_hash)?; + + Ok(()) + } + + /// It removes a torrent from the whitelist in memory. + /// + /// # Context: Whitelist + pub async fn remove_torrent_from_memory_whitelist(&self, info_hash: &InfoHash) -> bool { + self.whitelist.write().await.remove(info_hash) + } + + /// It checks if a torrent is whitelisted. + /// + /// # Context: Whitelist + pub async fn is_info_hash_whitelisted(&self, info_hash: &InfoHash) -> bool { + self.whitelist.read().await.contains(info_hash) + } + + /// It loads the whitelist from the database. + /// + /// # Context: Whitelist + /// + /// # Errors + /// + /// Will return a `database::Error` if unable to load the list whitelisted `info_hash`s from the database. + pub async fn load_whitelist_from_database(&self) -> Result<(), databases::error::Error> { + let whitelisted_torrents_from_database = self.database.load_whitelist()?; + let mut whitelist = self.whitelist.write().await; + + whitelist.clear(); + + for info_hash in whitelisted_torrents_from_database { + let _: bool = whitelist.insert(info_hash); + } + + Ok(()) + } +} + /// How many peers the peer announcing wants in the announce response. #[derive(Clone, Debug, PartialEq, Default)] pub enum PeersWanted { @@ -587,7 +704,7 @@ impl Tracker { Ok(Tracker { config: config.clone(), keys: tokio::sync::RwLock::new(std::collections::HashMap::new()), - whitelist: tokio::sync::RwLock::new(std::collections::HashSet::new()), + whitelist_manager: WhiteListManager::new(database.clone()), torrents: Arc::default(), stats_event_sender, stats_repository, @@ -1068,26 +1185,11 @@ impl Tracker { /// /// Will return a `database::Error` if unable to add the `info_hash` into the whitelist database. pub async fn add_torrent_to_whitelist(&self, info_hash: &InfoHash) -> Result<(), databases::error::Error> { - self.add_torrent_to_database_whitelist(info_hash)?; - self.add_torrent_to_memory_whitelist(info_hash).await; - Ok(()) - } - - /// It adds a torrent to the whitelist if it has not been whitelisted previously - fn add_torrent_to_database_whitelist(&self, info_hash: &InfoHash) -> Result<(), databases::error::Error> { - let is_whitelisted = self.database.is_info_hash_whitelisted(*info_hash)?; - - if is_whitelisted { - return Ok(()); - } - - self.database.add_info_hash_to_whitelist(*info_hash)?; - - Ok(()) + self.whitelist_manager.add_torrent_to_whitelist(info_hash).await } pub async fn add_torrent_to_memory_whitelist(&self, info_hash: &InfoHash) -> bool { - self.whitelist.write().await.insert(*info_hash) + self.whitelist_manager.add_torrent_to_memory_whitelist(info_hash).await } /// It removes a torrent from the whitelist. @@ -1099,9 +1201,7 @@ impl Tracker { /// /// Will return a `database::Error` if unable to remove the `info_hash` from the whitelist database. pub async fn remove_torrent_from_whitelist(&self, info_hash: &InfoHash) -> Result<(), databases::error::Error> { - self.remove_torrent_from_database_whitelist(info_hash)?; - self.remove_torrent_from_memory_whitelist(info_hash).await; - Ok(()) + self.whitelist_manager.remove_torrent_from_whitelist(info_hash).await } /// It removes a torrent from the whitelist in the database. @@ -1112,29 +1212,21 @@ impl Tracker { /// /// Will return a `database::Error` if unable to remove the `info_hash` from the whitelist database. pub fn remove_torrent_from_database_whitelist(&self, info_hash: &InfoHash) -> Result<(), databases::error::Error> { - let is_whitelisted = self.database.is_info_hash_whitelisted(*info_hash)?; - - if !is_whitelisted { - return Ok(()); - } - - self.database.remove_info_hash_from_whitelist(*info_hash)?; - - Ok(()) + self.whitelist_manager.remove_torrent_from_database_whitelist(info_hash) } /// It removes a torrent from the whitelist in memory. /// /// # Context: Whitelist pub async fn remove_torrent_from_memory_whitelist(&self, info_hash: &InfoHash) -> bool { - self.whitelist.write().await.remove(info_hash) + self.whitelist_manager.remove_torrent_from_memory_whitelist(info_hash).await } /// It checks if a torrent is whitelisted. /// /// # Context: Whitelist pub async fn is_info_hash_whitelisted(&self, info_hash: &InfoHash) -> bool { - self.whitelist.read().await.contains(info_hash) + self.whitelist_manager.is_info_hash_whitelisted(info_hash).await } /// It loads the whitelist from the database. @@ -1145,16 +1237,7 @@ impl Tracker { /// /// Will return a `database::Error` if unable to load the list whitelisted `info_hash`s from the database. pub async fn load_whitelist_from_database(&self) -> Result<(), databases::error::Error> { - let whitelisted_torrents_from_database = self.database.load_whitelist()?; - let mut whitelist = self.whitelist.write().await; - - whitelist.clear(); - - for info_hash in whitelisted_torrents_from_database { - let _: bool = whitelist.insert(info_hash); - } - - Ok(()) + self.whitelist_manager.load_whitelist_from_database().await } /// It return the `Tracker` [`statistics::metrics::Metrics`]. @@ -1821,7 +1904,10 @@ mod tests { tracker.add_torrent_to_whitelist(&info_hash).await.unwrap(); // Remove torrent from the in-memory whitelist - tracker.whitelist.write().await.remove(&info_hash); + tracker + .whitelist_manager + .remove_torrent_from_memory_whitelist(&info_hash) + .await; assert!(!tracker.is_info_hash_whitelisted(&info_hash).await); tracker.load_whitelist_from_database().await.unwrap(); From 439493d7ecf7b051f0fc3b1041b346e0a7d69be7 Mon Sep 17 00:00:00 2001 From: Jose Celano Date: Wed, 15 Jan 2025 10:09:23 +0000 Subject: [PATCH 086/802] refactor: [#1182] move extracted service to new mod --- src/core/mod.rs | 119 +------------------------------------ src/core/whitelist/mod.rs | 122 ++++++++++++++++++++++++++++++++++++++ 2 files changed, 124 insertions(+), 117 deletions(-) create mode 100644 src/core/whitelist/mod.rs diff --git a/src/core/mod.rs b/src/core/mod.rs index 066792eb3..4b5f97a92 100644 --- a/src/core/mod.rs +++ b/src/core/mod.rs @@ -445,6 +445,7 @@ pub mod error; pub mod services; pub mod statistics; pub mod torrent; +pub mod whitelist; pub mod peer_tests; @@ -470,6 +471,7 @@ use torrust_tracker_primitives::{peer, DurationSinceUnixEpoch}; use torrust_tracker_torrent_repository::entry::EntrySync; use torrust_tracker_torrent_repository::repository::Repository; use tracing::instrument; +use whitelist::WhiteListManager; use self::auth::Key; use self::error::Error; @@ -510,123 +512,6 @@ pub struct Tracker { stats_repository: statistics::repository::Repository, } -pub struct WhiteListManager { - /// A database driver implementation: [`Sqlite3`](crate::core::databases::sqlite) - /// or [`MySQL`](crate::core::databases::mysql) - database: Arc>, - - /// The list of allowed torrents. Only for listed trackers. - whitelist: tokio::sync::RwLock>, -} - -impl WhiteListManager { - #[must_use] - pub fn new(database: Arc>) -> Self { - Self { - database, - whitelist: tokio::sync::RwLock::new(std::collections::HashSet::new()), - } - } - - /// It adds a torrent to the whitelist. - /// Adding torrents is not relevant to public trackers. - /// - /// # Context: Whitelist - /// - /// # Errors - /// - /// Will return a `database::Error` if unable to add the `info_hash` into the whitelist database. - pub async fn add_torrent_to_whitelist(&self, info_hash: &InfoHash) -> Result<(), databases::error::Error> { - self.add_torrent_to_database_whitelist(info_hash)?; - self.add_torrent_to_memory_whitelist(info_hash).await; - Ok(()) - } - - /// It adds a torrent to the whitelist if it has not been whitelisted previously - fn add_torrent_to_database_whitelist(&self, info_hash: &InfoHash) -> Result<(), databases::error::Error> { - let is_whitelisted = self.database.is_info_hash_whitelisted(*info_hash)?; - - if is_whitelisted { - return Ok(()); - } - - self.database.add_info_hash_to_whitelist(*info_hash)?; - - Ok(()) - } - - pub async fn add_torrent_to_memory_whitelist(&self, info_hash: &InfoHash) -> bool { - self.whitelist.write().await.insert(*info_hash) - } - - /// It removes a torrent from the whitelist. - /// Removing torrents is not relevant to public trackers. - /// - /// # Context: Whitelist - /// - /// # Errors - /// - /// Will return a `database::Error` if unable to remove the `info_hash` from the whitelist database. - pub async fn remove_torrent_from_whitelist(&self, info_hash: &InfoHash) -> Result<(), databases::error::Error> { - self.remove_torrent_from_database_whitelist(info_hash)?; - self.remove_torrent_from_memory_whitelist(info_hash).await; - Ok(()) - } - - /// It removes a torrent from the whitelist in the database. - /// - /// # Context: Whitelist - /// - /// # Errors - /// - /// Will return a `database::Error` if unable to remove the `info_hash` from the whitelist database. - pub fn remove_torrent_from_database_whitelist(&self, info_hash: &InfoHash) -> Result<(), databases::error::Error> { - let is_whitelisted = self.database.is_info_hash_whitelisted(*info_hash)?; - - if !is_whitelisted { - return Ok(()); - } - - self.database.remove_info_hash_from_whitelist(*info_hash)?; - - Ok(()) - } - - /// It removes a torrent from the whitelist in memory. - /// - /// # Context: Whitelist - pub async fn remove_torrent_from_memory_whitelist(&self, info_hash: &InfoHash) -> bool { - self.whitelist.write().await.remove(info_hash) - } - - /// It checks if a torrent is whitelisted. - /// - /// # Context: Whitelist - pub async fn is_info_hash_whitelisted(&self, info_hash: &InfoHash) -> bool { - self.whitelist.read().await.contains(info_hash) - } - - /// It loads the whitelist from the database. - /// - /// # Context: Whitelist - /// - /// # Errors - /// - /// Will return a `database::Error` if unable to load the list whitelisted `info_hash`s from the database. - pub async fn load_whitelist_from_database(&self) -> Result<(), databases::error::Error> { - let whitelisted_torrents_from_database = self.database.load_whitelist()?; - let mut whitelist = self.whitelist.write().await; - - whitelist.clear(); - - for info_hash in whitelisted_torrents_from_database { - let _: bool = whitelist.insert(info_hash); - } - - Ok(()) - } -} - /// How many peers the peer announcing wants in the announce response. #[derive(Clone, Debug, PartialEq, Default)] pub enum PeersWanted { diff --git a/src/core/whitelist/mod.rs b/src/core/whitelist/mod.rs new file mode 100644 index 000000000..266bcec23 --- /dev/null +++ b/src/core/whitelist/mod.rs @@ -0,0 +1,122 @@ +use std::sync::Arc; + +use bittorrent_primitives::info_hash::InfoHash; + +use super::databases::{self, Database}; + +pub struct WhiteListManager { + /// A database driver implementation: [`Sqlite3`](crate::core::databases::sqlite) + /// or [`MySQL`](crate::core::databases::mysql) + database: Arc>, + + /// The list of allowed torrents. Only for listed trackers. + whitelist: tokio::sync::RwLock>, +} + +impl WhiteListManager { + #[must_use] + pub fn new(database: Arc>) -> Self { + Self { + database, + whitelist: tokio::sync::RwLock::new(std::collections::HashSet::new()), + } + } + + /// It adds a torrent to the whitelist. + /// Adding torrents is not relevant to public trackers. + /// + /// # Context: Whitelist + /// + /// # Errors + /// + /// Will return a `database::Error` if unable to add the `info_hash` into the whitelist database. + pub async fn add_torrent_to_whitelist(&self, info_hash: &InfoHash) -> Result<(), databases::error::Error> { + self.add_torrent_to_database_whitelist(info_hash)?; + self.add_torrent_to_memory_whitelist(info_hash).await; + Ok(()) + } + + /// It adds a torrent to the whitelist if it has not been whitelisted previously + fn add_torrent_to_database_whitelist(&self, info_hash: &InfoHash) -> Result<(), databases::error::Error> { + let is_whitelisted = self.database.is_info_hash_whitelisted(*info_hash)?; + + if is_whitelisted { + return Ok(()); + } + + self.database.add_info_hash_to_whitelist(*info_hash)?; + + Ok(()) + } + + pub async fn add_torrent_to_memory_whitelist(&self, info_hash: &InfoHash) -> bool { + self.whitelist.write().await.insert(*info_hash) + } + + /// It removes a torrent from the whitelist. + /// Removing torrents is not relevant to public trackers. + /// + /// # Context: Whitelist + /// + /// # Errors + /// + /// Will return a `database::Error` if unable to remove the `info_hash` from the whitelist database. + pub async fn remove_torrent_from_whitelist(&self, info_hash: &InfoHash) -> Result<(), databases::error::Error> { + self.remove_torrent_from_database_whitelist(info_hash)?; + self.remove_torrent_from_memory_whitelist(info_hash).await; + Ok(()) + } + + /// It removes a torrent from the whitelist in the database. + /// + /// # Context: Whitelist + /// + /// # Errors + /// + /// Will return a `database::Error` if unable to remove the `info_hash` from the whitelist database. + pub fn remove_torrent_from_database_whitelist(&self, info_hash: &InfoHash) -> Result<(), databases::error::Error> { + let is_whitelisted = self.database.is_info_hash_whitelisted(*info_hash)?; + + if !is_whitelisted { + return Ok(()); + } + + self.database.remove_info_hash_from_whitelist(*info_hash)?; + + Ok(()) + } + + /// It removes a torrent from the whitelist in memory. + /// + /// # Context: Whitelist + pub async fn remove_torrent_from_memory_whitelist(&self, info_hash: &InfoHash) -> bool { + self.whitelist.write().await.remove(info_hash) + } + + /// It checks if a torrent is whitelisted. + /// + /// # Context: Whitelist + pub async fn is_info_hash_whitelisted(&self, info_hash: &InfoHash) -> bool { + self.whitelist.read().await.contains(info_hash) + } + + /// It loads the whitelist from the database. + /// + /// # Context: Whitelist + /// + /// # Errors + /// + /// Will return a `database::Error` if unable to load the list whitelisted `info_hash`s from the database. + pub async fn load_whitelist_from_database(&self) -> Result<(), databases::error::Error> { + let whitelisted_torrents_from_database = self.database.load_whitelist()?; + let mut whitelist = self.whitelist.write().await; + + whitelist.clear(); + + for info_hash in whitelisted_torrents_from_database { + let _: bool = whitelist.insert(info_hash); + } + + Ok(()) + } +} From 39d1620b338a31328ac96aaaa49e327443183c23 Mon Sep 17 00:00:00 2001 From: Jose Celano Date: Wed, 15 Jan 2025 10:13:15 +0000 Subject: [PATCH 087/802] refactor: [#1182] remove unused methods --- src/core/mod.rs | 24 +----------------------- src/servers/udp/handlers.rs | 2 +- 2 files changed, 2 insertions(+), 24 deletions(-) diff --git a/src/core/mod.rs b/src/core/mod.rs index 4b5f97a92..875992da5 100644 --- a/src/core/mod.rs +++ b/src/core/mod.rs @@ -500,7 +500,7 @@ pub struct Tracker { keys: tokio::sync::RwLock>, /// The list of allowed torrents. Only for listed trackers. - whitelist_manager: WhiteListManager, + pub whitelist_manager: WhiteListManager, /// The in-memory torrents repository. torrents: Arc, @@ -1073,10 +1073,6 @@ impl Tracker { self.whitelist_manager.add_torrent_to_whitelist(info_hash).await } - pub async fn add_torrent_to_memory_whitelist(&self, info_hash: &InfoHash) -> bool { - self.whitelist_manager.add_torrent_to_memory_whitelist(info_hash).await - } - /// It removes a torrent from the whitelist. /// Removing torrents is not relevant to public trackers. /// @@ -1089,24 +1085,6 @@ impl Tracker { self.whitelist_manager.remove_torrent_from_whitelist(info_hash).await } - /// It removes a torrent from the whitelist in the database. - /// - /// # Context: Whitelist - /// - /// # Errors - /// - /// Will return a `database::Error` if unable to remove the `info_hash` from the whitelist database. - pub fn remove_torrent_from_database_whitelist(&self, info_hash: &InfoHash) -> Result<(), databases::error::Error> { - self.whitelist_manager.remove_torrent_from_database_whitelist(info_hash) - } - - /// It removes a torrent from the whitelist in memory. - /// - /// # Context: Whitelist - pub async fn remove_torrent_from_memory_whitelist(&self, info_hash: &InfoHash) -> bool { - self.whitelist_manager.remove_torrent_from_memory_whitelist(info_hash).await - } - /// It checks if a torrent is whitelisted. /// /// # Context: Whitelist diff --git a/src/servers/udp/handlers.rs b/src/servers/udp/handlers.rs index 1a9c164e2..3d7d411ce 100644 --- a/src/servers/udp/handlers.rs +++ b/src/servers/udp/handlers.rs @@ -1391,7 +1391,7 @@ mod tests { add_a_seeder(tracker.clone(), &remote_addr, &info_hash).await; - tracker.add_torrent_to_memory_whitelist(&info_hash.0.into()).await; + tracker.whitelist_manager.add_torrent_to_memory_whitelist(&info_hash.0.into()).await; let request = build_scrape_request(&remote_addr, &info_hash); From ea35ba5fa7caae38ace7af089e87c63e19391f54 Mon Sep 17 00:00:00 2001 From: Jose Celano Date: Wed, 15 Jan 2025 10:55:37 +0000 Subject: [PATCH 088/802] refactor: [#1182] extract struct InMemoryWhitelist --- src/core/whitelist/mod.rs | 134 +++++++++++++++++++++++++++++++------- 1 file changed, 109 insertions(+), 25 deletions(-) diff --git a/src/core/whitelist/mod.rs b/src/core/whitelist/mod.rs index 266bcec23..84469ca37 100644 --- a/src/core/whitelist/mod.rs +++ b/src/core/whitelist/mod.rs @@ -4,13 +4,14 @@ use bittorrent_primitives::info_hash::InfoHash; use super::databases::{self, Database}; +/// It handles the list of allowed torrents. Only for listed trackers. pub struct WhiteListManager { /// A database driver implementation: [`Sqlite3`](crate::core::databases::sqlite) /// or [`MySQL`](crate::core::databases::mysql) database: Arc>, - /// The list of allowed torrents. Only for listed trackers. - whitelist: tokio::sync::RwLock>, + /// The in-memory list of allowed torrents. + in_memory_whitelist: InMemoryWhitelist, } impl WhiteListManager { @@ -18,21 +19,19 @@ impl WhiteListManager { pub fn new(database: Arc>) -> Self { Self { database, - whitelist: tokio::sync::RwLock::new(std::collections::HashSet::new()), + in_memory_whitelist: InMemoryWhitelist::new(), } } /// It adds a torrent to the whitelist. /// Adding torrents is not relevant to public trackers. /// - /// # Context: Whitelist - /// /// # Errors /// /// Will return a `database::Error` if unable to add the `info_hash` into the whitelist database. pub async fn add_torrent_to_whitelist(&self, info_hash: &InfoHash) -> Result<(), databases::error::Error> { self.add_torrent_to_database_whitelist(info_hash)?; - self.add_torrent_to_memory_whitelist(info_hash).await; + self.in_memory_whitelist.add(info_hash).await; Ok(()) } @@ -49,15 +48,9 @@ impl WhiteListManager { Ok(()) } - pub async fn add_torrent_to_memory_whitelist(&self, info_hash: &InfoHash) -> bool { - self.whitelist.write().await.insert(*info_hash) - } - /// It removes a torrent from the whitelist. /// Removing torrents is not relevant to public trackers. /// - /// # Context: Whitelist - /// /// # Errors /// /// Will return a `database::Error` if unable to remove the `info_hash` from the whitelist database. @@ -69,8 +62,6 @@ impl WhiteListManager { /// It removes a torrent from the whitelist in the database. /// - /// # Context: Whitelist - /// /// # Errors /// /// Will return a `database::Error` if unable to remove the `info_hash` from the whitelist database. @@ -86,37 +77,130 @@ impl WhiteListManager { Ok(()) } + /// It adds a torrent from the whitelist in memory. + pub async fn add_torrent_to_memory_whitelist(&self, info_hash: &InfoHash) -> bool { + self.in_memory_whitelist.add(info_hash).await + } + /// It removes a torrent from the whitelist in memory. - /// - /// # Context: Whitelist pub async fn remove_torrent_from_memory_whitelist(&self, info_hash: &InfoHash) -> bool { - self.whitelist.write().await.remove(info_hash) + self.in_memory_whitelist.remove(info_hash).await } /// It checks if a torrent is whitelisted. - /// - /// # Context: Whitelist pub async fn is_info_hash_whitelisted(&self, info_hash: &InfoHash) -> bool { - self.whitelist.read().await.contains(info_hash) + self.in_memory_whitelist.contains(info_hash).await } /// It loads the whitelist from the database. /// - /// # Context: Whitelist - /// /// # Errors /// /// Will return a `database::Error` if unable to load the list whitelisted `info_hash`s from the database. pub async fn load_whitelist_from_database(&self) -> Result<(), databases::error::Error> { let whitelisted_torrents_from_database = self.database.load_whitelist()?; - let mut whitelist = self.whitelist.write().await; - whitelist.clear(); + self.in_memory_whitelist.clear().await; for info_hash in whitelisted_torrents_from_database { - let _: bool = whitelist.insert(info_hash); + let _: bool = self.in_memory_whitelist.add(&info_hash).await; } Ok(()) } } + +struct InMemoryWhitelist { + /// The list of allowed torrents. Only for listed trackers. + whitelist: tokio::sync::RwLock>, +} + +impl InMemoryWhitelist { + pub fn new() -> Self { + Self { + whitelist: tokio::sync::RwLock::new(std::collections::HashSet::new()), + } + } + + /// It adds a torrent from the whitelist in memory. + pub async fn add(&self, info_hash: &InfoHash) -> bool { + self.whitelist.write().await.insert(*info_hash) + } + + /// It removes a torrent from the whitelist in memory. + pub async fn remove(&self, info_hash: &InfoHash) -> bool { + self.whitelist.write().await.remove(info_hash) + } + + /// It checks if it contains an info-hash. + pub async fn contains(&self, info_hash: &InfoHash) -> bool { + self.whitelist.read().await.contains(info_hash) + } + + /// It clears the whitelist. + pub async fn clear(&self) { + let mut whitelist = self.whitelist.write().await; + whitelist.clear(); + } +} + +#[cfg(test)] +mod tests { + use bittorrent_primitives::info_hash::InfoHash; + + fn sample_info_hash() -> InfoHash { + "3b245504cf5f11bbdbe1201cea6a6bf45aee1bc0".parse::().unwrap() // # DevSkim: ignore DS173237 + } + + mod in_memory_whitelist { + + use crate::core::whitelist::tests::sample_info_hash; + use crate::core::whitelist::InMemoryWhitelist; + + #[tokio::test] + async fn should_allow_adding_a_new_torrent_to_the_whitelist() { + let info_hash = sample_info_hash(); + + let whitelist = InMemoryWhitelist::new(); + + whitelist.add(&info_hash).await; + + assert!(whitelist.contains(&info_hash).await); + } + + #[tokio::test] + async fn should_allow_removing_a_new_torrent_to_the_whitelist() { + let info_hash = sample_info_hash(); + + let whitelist = InMemoryWhitelist::new(); + + whitelist.add(&info_hash).await; + whitelist.remove(&sample_info_hash()).await; + + assert!(!whitelist.contains(&info_hash).await); + } + + #[tokio::test] + async fn should_allow_clearing_the_whitelist() { + let info_hash = sample_info_hash(); + + let whitelist = InMemoryWhitelist::new(); + + whitelist.add(&info_hash).await; + whitelist.clear().await; + + assert!(!whitelist.contains(&info_hash).await); + } + + #[tokio::test] + async fn should_allow_checking_if_an_infohash_is_whitelisted() { + let info_hash = sample_info_hash(); + + let whitelist = InMemoryWhitelist::new(); + + whitelist.add(&info_hash).await; + + assert!(whitelist.contains(&info_hash).await); + } + } +} From 07f53a4ad665c33a2047867075c0657700cb7260 Mon Sep 17 00:00:00 2001 From: Jose Celano Date: Wed, 15 Jan 2025 11:09:42 +0000 Subject: [PATCH 089/802] refactor: [#1182] extract strcut DatabaseWhitelist --- src/core/whitelist/mod.rs | 98 ++++++++++++++++++++++++++------------- 1 file changed, 66 insertions(+), 32 deletions(-) diff --git a/src/core/whitelist/mod.rs b/src/core/whitelist/mod.rs index 84469ca37..97affa1ea 100644 --- a/src/core/whitelist/mod.rs +++ b/src/core/whitelist/mod.rs @@ -6,20 +6,19 @@ use super::databases::{self, Database}; /// It handles the list of allowed torrents. Only for listed trackers. pub struct WhiteListManager { - /// A database driver implementation: [`Sqlite3`](crate::core::databases::sqlite) - /// or [`MySQL`](crate::core::databases::mysql) - database: Arc>, - /// The in-memory list of allowed torrents. in_memory_whitelist: InMemoryWhitelist, + + /// The persisted list of allowed torrents. + database_whitelist: DatabaseWhitelist, } impl WhiteListManager { #[must_use] pub fn new(database: Arc>) -> Self { Self { - database, in_memory_whitelist: InMemoryWhitelist::new(), + database_whitelist: DatabaseWhitelist::new(database), } } @@ -30,24 +29,11 @@ impl WhiteListManager { /// /// Will return a `database::Error` if unable to add the `info_hash` into the whitelist database. pub async fn add_torrent_to_whitelist(&self, info_hash: &InfoHash) -> Result<(), databases::error::Error> { - self.add_torrent_to_database_whitelist(info_hash)?; + self.database_whitelist.add_torrent_to_database_whitelist(info_hash)?; self.in_memory_whitelist.add(info_hash).await; Ok(()) } - /// It adds a torrent to the whitelist if it has not been whitelisted previously - fn add_torrent_to_database_whitelist(&self, info_hash: &InfoHash) -> Result<(), databases::error::Error> { - let is_whitelisted = self.database.is_info_hash_whitelisted(*info_hash)?; - - if is_whitelisted { - return Ok(()); - } - - self.database.add_info_hash_to_whitelist(*info_hash)?; - - Ok(()) - } - /// It removes a torrent from the whitelist. /// Removing torrents is not relevant to public trackers. /// @@ -55,8 +41,8 @@ impl WhiteListManager { /// /// Will return a `database::Error` if unable to remove the `info_hash` from the whitelist database. pub async fn remove_torrent_from_whitelist(&self, info_hash: &InfoHash) -> Result<(), databases::error::Error> { - self.remove_torrent_from_database_whitelist(info_hash)?; - self.remove_torrent_from_memory_whitelist(info_hash).await; + self.database_whitelist.remove_torrent_from_database_whitelist(info_hash)?; + self.in_memory_whitelist.remove(info_hash).await; Ok(()) } @@ -66,15 +52,7 @@ impl WhiteListManager { /// /// Will return a `database::Error` if unable to remove the `info_hash` from the whitelist database. pub fn remove_torrent_from_database_whitelist(&self, info_hash: &InfoHash) -> Result<(), databases::error::Error> { - let is_whitelisted = self.database.is_info_hash_whitelisted(*info_hash)?; - - if !is_whitelisted { - return Ok(()); - } - - self.database.remove_info_hash_from_whitelist(*info_hash)?; - - Ok(()) + self.database_whitelist.remove_torrent_from_database_whitelist(info_hash) } /// It adds a torrent from the whitelist in memory. @@ -98,7 +76,7 @@ impl WhiteListManager { /// /// Will return a `database::Error` if unable to load the list whitelisted `info_hash`s from the database. pub async fn load_whitelist_from_database(&self) -> Result<(), databases::error::Error> { - let whitelisted_torrents_from_database = self.database.load_whitelist()?; + let whitelisted_torrents_from_database = self.database_whitelist.load_whitelist_from_database()?; self.in_memory_whitelist.clear().await; @@ -110,8 +88,9 @@ impl WhiteListManager { } } +/// The in-memory list of allowed torrents. struct InMemoryWhitelist { - /// The list of allowed torrents. Only for listed trackers. + /// The list of allowed torrents. whitelist: tokio::sync::RwLock>, } @@ -144,6 +123,59 @@ impl InMemoryWhitelist { } } +/// The persisted list of allowed torrents. +struct DatabaseWhitelist { + /// A database driver implementation: [`Sqlite3`](crate::core::databases::sqlite) + /// or [`MySQL`](crate::core::databases::mysql) + database: Arc>, +} + +impl DatabaseWhitelist { + #[must_use] + pub fn new(database: Arc>) -> Self { + Self { database } + } + + /// It adds a torrent to the whitelist if it has not been whitelisted previously + fn add_torrent_to_database_whitelist(&self, info_hash: &InfoHash) -> Result<(), databases::error::Error> { + let is_whitelisted = self.database.is_info_hash_whitelisted(*info_hash)?; + + if is_whitelisted { + return Ok(()); + } + + self.database.add_info_hash_to_whitelist(*info_hash)?; + + Ok(()) + } + + /// It removes a torrent from the whitelist in the database. + /// + /// # Errors + /// + /// Will return a `database::Error` if unable to remove the `info_hash` from the whitelist database. + pub fn remove_torrent_from_database_whitelist(&self, info_hash: &InfoHash) -> Result<(), databases::error::Error> { + let is_whitelisted = self.database.is_info_hash_whitelisted(*info_hash)?; + + if !is_whitelisted { + return Ok(()); + } + + self.database.remove_info_hash_from_whitelist(*info_hash)?; + + Ok(()) + } + + /// It loads the whitelist from the database. + /// + /// # Errors + /// + /// Will return a `database::Error` if unable to load the list whitelisted `info_hash`s from the database. + pub fn load_whitelist_from_database(&self) -> Result, databases::error::Error> { + self.database.load_whitelist() + } +} + #[cfg(test)] mod tests { use bittorrent_primitives::info_hash::InfoHash; @@ -203,4 +235,6 @@ mod tests { assert!(whitelist.contains(&info_hash).await); } } + + mod database_whitelist {} } From cc2bc7bb8e54772fac42523dc8b412429df5a0b7 Mon Sep 17 00:00:00 2001 From: Jose Celano Date: Wed, 15 Jan 2025 11:17:48 +0000 Subject: [PATCH 090/802] refactor: [#1182] move structs to new mods in whitelist --- src/core/whitelist/in_memory.rs | 88 +++++++++++++++++ src/core/whitelist/mod.rs | 166 ++------------------------------ src/core/whitelist/persisted.rs | 62 ++++++++++++ src/servers/udp/handlers.rs | 5 +- 4 files changed, 164 insertions(+), 157 deletions(-) create mode 100644 src/core/whitelist/in_memory.rs create mode 100644 src/core/whitelist/persisted.rs diff --git a/src/core/whitelist/in_memory.rs b/src/core/whitelist/in_memory.rs new file mode 100644 index 000000000..78e0eb11f --- /dev/null +++ b/src/core/whitelist/in_memory.rs @@ -0,0 +1,88 @@ +use bittorrent_primitives::info_hash::InfoHash; + +/// The in-memory list of allowed torrents. +#[derive(Debug, Default)] +pub struct InMemoryWhitelist { + /// The list of allowed torrents. + whitelist: tokio::sync::RwLock>, +} + +impl InMemoryWhitelist { + /// It adds a torrent from the whitelist in memory. + pub async fn add(&self, info_hash: &InfoHash) -> bool { + self.whitelist.write().await.insert(*info_hash) + } + + /// It removes a torrent from the whitelist in memory. + pub async fn remove(&self, info_hash: &InfoHash) -> bool { + self.whitelist.write().await.remove(info_hash) + } + + /// It checks if it contains an info-hash. + pub async fn contains(&self, info_hash: &InfoHash) -> bool { + self.whitelist.read().await.contains(info_hash) + } + + /// It clears the whitelist. + pub async fn clear(&self) { + let mut whitelist = self.whitelist.write().await; + whitelist.clear(); + } +} + +#[cfg(test)] +mod tests { + use bittorrent_primitives::info_hash::InfoHash; + + use crate::core::whitelist::in_memory::InMemoryWhitelist; + + fn sample_info_hash() -> InfoHash { + "3b245504cf5f11bbdbe1201cea6a6bf45aee1bc0".parse::().unwrap() // # DevSkim: ignore DS173237 + } + + #[tokio::test] + async fn should_allow_adding_a_new_torrent_to_the_whitelist() { + let info_hash = sample_info_hash(); + + let whitelist = InMemoryWhitelist::default(); + + whitelist.add(&info_hash).await; + + assert!(whitelist.contains(&info_hash).await); + } + + #[tokio::test] + async fn should_allow_removing_a_new_torrent_to_the_whitelist() { + let info_hash = sample_info_hash(); + + let whitelist = InMemoryWhitelist::default(); + + whitelist.add(&info_hash).await; + whitelist.remove(&sample_info_hash()).await; + + assert!(!whitelist.contains(&info_hash).await); + } + + #[tokio::test] + async fn should_allow_clearing_the_whitelist() { + let info_hash = sample_info_hash(); + + let whitelist = InMemoryWhitelist::default(); + + whitelist.add(&info_hash).await; + whitelist.clear().await; + + assert!(!whitelist.contains(&info_hash).await); + } + + #[tokio::test] + async fn should_allow_checking_if_an_infohash_is_whitelisted() { + let info_hash = sample_info_hash(); + + let whitelist = InMemoryWhitelist::default(); + + whitelist.add(&info_hash).await; + + assert!(whitelist.contains(&info_hash).await); + } +} diff --git a/src/core/whitelist/mod.rs b/src/core/whitelist/mod.rs index 97affa1ea..5096bacc9 100644 --- a/src/core/whitelist/mod.rs +++ b/src/core/whitelist/mod.rs @@ -1,6 +1,11 @@ +pub mod in_memory; +pub mod persisted; + use std::sync::Arc; use bittorrent_primitives::info_hash::InfoHash; +use in_memory::InMemoryWhitelist; +use persisted::DatabaseWhitelist; use super::databases::{self, Database}; @@ -17,7 +22,7 @@ impl WhiteListManager { #[must_use] pub fn new(database: Arc>) -> Self { Self { - in_memory_whitelist: InMemoryWhitelist::new(), + in_memory_whitelist: InMemoryWhitelist::default(), database_whitelist: DatabaseWhitelist::new(database), } } @@ -29,7 +34,7 @@ impl WhiteListManager { /// /// Will return a `database::Error` if unable to add the `info_hash` into the whitelist database. pub async fn add_torrent_to_whitelist(&self, info_hash: &InfoHash) -> Result<(), databases::error::Error> { - self.database_whitelist.add_torrent_to_database_whitelist(info_hash)?; + self.database_whitelist.add(info_hash)?; self.in_memory_whitelist.add(info_hash).await; Ok(()) } @@ -41,7 +46,7 @@ impl WhiteListManager { /// /// Will return a `database::Error` if unable to remove the `info_hash` from the whitelist database. pub async fn remove_torrent_from_whitelist(&self, info_hash: &InfoHash) -> Result<(), databases::error::Error> { - self.database_whitelist.remove_torrent_from_database_whitelist(info_hash)?; + self.database_whitelist.remove(info_hash)?; self.in_memory_whitelist.remove(info_hash).await; Ok(()) } @@ -52,7 +57,7 @@ impl WhiteListManager { /// /// Will return a `database::Error` if unable to remove the `info_hash` from the whitelist database. pub fn remove_torrent_from_database_whitelist(&self, info_hash: &InfoHash) -> Result<(), databases::error::Error> { - self.database_whitelist.remove_torrent_from_database_whitelist(info_hash) + self.database_whitelist.remove(info_hash) } /// It adds a torrent from the whitelist in memory. @@ -76,7 +81,7 @@ impl WhiteListManager { /// /// Will return a `database::Error` if unable to load the list whitelisted `info_hash`s from the database. pub async fn load_whitelist_from_database(&self) -> Result<(), databases::error::Error> { - let whitelisted_torrents_from_database = self.database_whitelist.load_whitelist_from_database()?; + let whitelisted_torrents_from_database = self.database_whitelist.load_from_database()?; self.in_memory_whitelist.clear().await; @@ -87,154 +92,3 @@ impl WhiteListManager { Ok(()) } } - -/// The in-memory list of allowed torrents. -struct InMemoryWhitelist { - /// The list of allowed torrents. - whitelist: tokio::sync::RwLock>, -} - -impl InMemoryWhitelist { - pub fn new() -> Self { - Self { - whitelist: tokio::sync::RwLock::new(std::collections::HashSet::new()), - } - } - - /// It adds a torrent from the whitelist in memory. - pub async fn add(&self, info_hash: &InfoHash) -> bool { - self.whitelist.write().await.insert(*info_hash) - } - - /// It removes a torrent from the whitelist in memory. - pub async fn remove(&self, info_hash: &InfoHash) -> bool { - self.whitelist.write().await.remove(info_hash) - } - - /// It checks if it contains an info-hash. - pub async fn contains(&self, info_hash: &InfoHash) -> bool { - self.whitelist.read().await.contains(info_hash) - } - - /// It clears the whitelist. - pub async fn clear(&self) { - let mut whitelist = self.whitelist.write().await; - whitelist.clear(); - } -} - -/// The persisted list of allowed torrents. -struct DatabaseWhitelist { - /// A database driver implementation: [`Sqlite3`](crate::core::databases::sqlite) - /// or [`MySQL`](crate::core::databases::mysql) - database: Arc>, -} - -impl DatabaseWhitelist { - #[must_use] - pub fn new(database: Arc>) -> Self { - Self { database } - } - - /// It adds a torrent to the whitelist if it has not been whitelisted previously - fn add_torrent_to_database_whitelist(&self, info_hash: &InfoHash) -> Result<(), databases::error::Error> { - let is_whitelisted = self.database.is_info_hash_whitelisted(*info_hash)?; - - if is_whitelisted { - return Ok(()); - } - - self.database.add_info_hash_to_whitelist(*info_hash)?; - - Ok(()) - } - - /// It removes a torrent from the whitelist in the database. - /// - /// # Errors - /// - /// Will return a `database::Error` if unable to remove the `info_hash` from the whitelist database. - pub fn remove_torrent_from_database_whitelist(&self, info_hash: &InfoHash) -> Result<(), databases::error::Error> { - let is_whitelisted = self.database.is_info_hash_whitelisted(*info_hash)?; - - if !is_whitelisted { - return Ok(()); - } - - self.database.remove_info_hash_from_whitelist(*info_hash)?; - - Ok(()) - } - - /// It loads the whitelist from the database. - /// - /// # Errors - /// - /// Will return a `database::Error` if unable to load the list whitelisted `info_hash`s from the database. - pub fn load_whitelist_from_database(&self) -> Result, databases::error::Error> { - self.database.load_whitelist() - } -} - -#[cfg(test)] -mod tests { - use bittorrent_primitives::info_hash::InfoHash; - - fn sample_info_hash() -> InfoHash { - "3b245504cf5f11bbdbe1201cea6a6bf45aee1bc0".parse::().unwrap() // # DevSkim: ignore DS173237 - } - - mod in_memory_whitelist { - - use crate::core::whitelist::tests::sample_info_hash; - use crate::core::whitelist::InMemoryWhitelist; - - #[tokio::test] - async fn should_allow_adding_a_new_torrent_to_the_whitelist() { - let info_hash = sample_info_hash(); - - let whitelist = InMemoryWhitelist::new(); - - whitelist.add(&info_hash).await; - - assert!(whitelist.contains(&info_hash).await); - } - - #[tokio::test] - async fn should_allow_removing_a_new_torrent_to_the_whitelist() { - let info_hash = sample_info_hash(); - - let whitelist = InMemoryWhitelist::new(); - - whitelist.add(&info_hash).await; - whitelist.remove(&sample_info_hash()).await; - - assert!(!whitelist.contains(&info_hash).await); - } - - #[tokio::test] - async fn should_allow_clearing_the_whitelist() { - let info_hash = sample_info_hash(); - - let whitelist = InMemoryWhitelist::new(); - - whitelist.add(&info_hash).await; - whitelist.clear().await; - - assert!(!whitelist.contains(&info_hash).await); - } - - #[tokio::test] - async fn should_allow_checking_if_an_infohash_is_whitelisted() { - let info_hash = sample_info_hash(); - - let whitelist = InMemoryWhitelist::new(); - - whitelist.add(&info_hash).await; - - assert!(whitelist.contains(&info_hash).await); - } - } - - mod database_whitelist {} -} diff --git a/src/core/whitelist/persisted.rs b/src/core/whitelist/persisted.rs new file mode 100644 index 000000000..993060139 --- /dev/null +++ b/src/core/whitelist/persisted.rs @@ -0,0 +1,62 @@ +use std::sync::Arc; + +use bittorrent_primitives::info_hash::InfoHash; + +use super::databases::{self, Database}; + +/// The persisted list of allowed torrents. +pub struct DatabaseWhitelist { + /// A database driver implementation: [`Sqlite3`](crate::core::databases::sqlite) + /// or [`MySQL`](crate::core::databases::mysql) + database: Arc>, +} + +impl DatabaseWhitelist { + #[must_use] + pub fn new(database: Arc>) -> Self { + Self { database } + } + + /// It adds a torrent to the whitelist if it has not been whitelisted previously + /// + /// # Errors + /// + /// Will return a `database::Error` if unable to add the `info_hash` to the whitelist database. + pub fn add(&self, info_hash: &InfoHash) -> Result<(), databases::error::Error> { + let is_whitelisted = self.database.is_info_hash_whitelisted(*info_hash)?; + + if is_whitelisted { + return Ok(()); + } + + self.database.add_info_hash_to_whitelist(*info_hash)?; + + Ok(()) + } + + /// It removes a torrent from the whitelist in the database. + /// + /// # Errors + /// + /// Will return a `database::Error` if unable to remove the `info_hash` from the whitelist database. + pub fn remove(&self, info_hash: &InfoHash) -> Result<(), databases::error::Error> { + let is_whitelisted = self.database.is_info_hash_whitelisted(*info_hash)?; + + if !is_whitelisted { + return Ok(()); + } + + self.database.remove_info_hash_from_whitelist(*info_hash)?; + + Ok(()) + } + + /// It loads the whitelist from the database. + /// + /// # Errors + /// + /// Will return a `database::Error` if unable to load the list whitelisted `info_hash`s from the database. + pub fn load_from_database(&self) -> Result, databases::error::Error> { + self.database.load_whitelist() + } +} diff --git a/src/servers/udp/handlers.rs b/src/servers/udp/handlers.rs index 3d7d411ce..62f7d0a02 100644 --- a/src/servers/udp/handlers.rs +++ b/src/servers/udp/handlers.rs @@ -1391,7 +1391,10 @@ mod tests { add_a_seeder(tracker.clone(), &remote_addr, &info_hash).await; - tracker.whitelist_manager.add_torrent_to_memory_whitelist(&info_hash.0.into()).await; + tracker + .whitelist_manager + .add_torrent_to_memory_whitelist(&info_hash.0.into()) + .await; let request = build_scrape_request(&remote_addr, &info_hash); From 2f1abeb873a7c9d273247d064f61163ef3bf0168 Mon Sep 17 00:00:00 2001 From: Jose Celano Date: Wed, 15 Jan 2025 16:14:51 +0000 Subject: [PATCH 091/802] refactor: [#1182] inject database and whitelist in tracker as dep Inject the database (persistence) and whitelist manager into the Tracker via the contructor to be able to use the whitelist manager directly in Axum handlers. --- src/core/mod.rs | 17 ++-- src/core/services/mod.rs | 38 +++++++- src/core/whitelist/mod.rs | 8 +- src/servers/http/v1/services/announce.rs | 43 +++++---- src/servers/http/v1/services/scrape.rs | 67 ++++++-------- src/servers/udp/handlers.rs | 109 ++++++++++------------- 6 files changed, 140 insertions(+), 142 deletions(-) diff --git a/src/core/mod.rs b/src/core/mod.rs index 875992da5..5f9d44fdb 100644 --- a/src/core/mod.rs +++ b/src/core/mod.rs @@ -457,11 +457,9 @@ use std::time::Duration; use auth::PeerKey; use bittorrent_primitives::info_hash::InfoHash; -use databases::driver::Driver; use error::PeerKeyError; use tokio::sync::mpsc::error::SendError; use torrust_tracker_clock::clock::Time; -use torrust_tracker_configuration::v2_0_0::database; use torrust_tracker_configuration::{AnnouncePolicy, Core, TORRENT_PEERS_LIMIT}; use torrust_tracker_located_error::Located; use torrust_tracker_primitives::core::{AnnounceData, ScrapeData}; @@ -500,7 +498,7 @@ pub struct Tracker { keys: tokio::sync::RwLock>, /// The list of allowed torrents. Only for listed trackers. - pub whitelist_manager: WhiteListManager, + pub whitelist_manager: Arc, /// The in-memory torrents repository. torrents: Arc, @@ -576,24 +574,19 @@ impl Tracker { /// Will return a `databases::error::Error` if unable to connect to database. The `Tracker` is responsible for the persistence. pub fn new( config: &Core, + database: &Arc>, + whitelist_manager: &Arc, stats_event_sender: Option>, stats_repository: statistics::repository::Repository, ) -> Result { - let driver = match config.database.driver { - database::Driver::Sqlite3 => Driver::Sqlite3, - database::Driver::MySQL => Driver::MySQL, - }; - - let database = Arc::new(databases::driver::build(&driver, &config.database.path)?); - Ok(Tracker { config: config.clone(), + database: database.clone(), keys: tokio::sync::RwLock::new(std::collections::HashMap::new()), - whitelist_manager: WhiteListManager::new(database.clone()), + whitelist_manager: whitelist_manager.clone(), torrents: Arc::default(), stats_event_sender, stats_repository, - database, }) } diff --git a/src/core/services/mod.rs b/src/core/services/mod.rs index 166f40df4..67d5113bc 100644 --- a/src/core/services/mod.rs +++ b/src/core/services/mod.rs @@ -9,8 +9,13 @@ pub mod torrent; use std::sync::Arc; +use databases::driver::Driver; +use torrust_tracker_configuration::v2_0_0::database; use torrust_tracker_configuration::Configuration; +use super::databases::{self, Database}; +use super::whitelist::persisted::DatabaseWhitelist; +use super::whitelist::WhiteListManager; use crate::core::Tracker; /// It returns a new tracker building its dependencies. @@ -20,14 +25,41 @@ use crate::core::Tracker; /// Will panic if tracker cannot be instantiated. #[must_use] pub fn tracker_factory(config: &Configuration) -> Tracker { - // Initialize statistics + let database = initialize_database(config); + + let whitelist_manager = initialize_whitelist(database.clone()); + let (stats_event_sender, stats_repository) = statistics::setup::factory(config.core.tracker_usage_statistics); - // Initialize Torrust tracker - match Tracker::new(&Arc::new(config).core, stats_event_sender, stats_repository) { + match Tracker::new( + &Arc::new(config).core, + &database, + &whitelist_manager, + stats_event_sender, + stats_repository, + ) { Ok(tracker) => tracker, Err(error) => { panic!("{}", error) } } } + +/// # Panics +/// +/// Will panic if database cannot be initialized. +#[must_use] +pub fn initialize_database(config: &Configuration) -> Arc> { + let driver = match config.core.database.driver { + database::Driver::Sqlite3 => Driver::Sqlite3, + database::Driver::MySQL => Driver::MySQL, + }; + + Arc::new(databases::driver::build(&driver, &config.core.database.path).expect("Database driver build failed.")) +} + +#[must_use] +pub fn initialize_whitelist(database: Arc>) -> Arc { + let database_whitelist = Arc::new(DatabaseWhitelist::new(database)); + Arc::new(WhiteListManager::new(database_whitelist)) +} diff --git a/src/core/whitelist/mod.rs b/src/core/whitelist/mod.rs index 5096bacc9..3a88b404c 100644 --- a/src/core/whitelist/mod.rs +++ b/src/core/whitelist/mod.rs @@ -7,7 +7,7 @@ use bittorrent_primitives::info_hash::InfoHash; use in_memory::InMemoryWhitelist; use persisted::DatabaseWhitelist; -use super::databases::{self, Database}; +use super::databases::{self}; /// It handles the list of allowed torrents. Only for listed trackers. pub struct WhiteListManager { @@ -15,15 +15,15 @@ pub struct WhiteListManager { in_memory_whitelist: InMemoryWhitelist, /// The persisted list of allowed torrents. - database_whitelist: DatabaseWhitelist, + database_whitelist: Arc, } impl WhiteListManager { #[must_use] - pub fn new(database: Arc>) -> Self { + pub fn new(database_whitelist: Arc) -> Self { Self { in_memory_whitelist: InMemoryWhitelist::default(), - database_whitelist: DatabaseWhitelist::new(database), + database_whitelist, } } diff --git a/src/servers/http/v1/services/announce.rs b/src/servers/http/v1/services/announce.rs index df827aee2..06aad669f 100644 --- a/src/servers/http/v1/services/announce.rs +++ b/src/servers/http/v1/services/announce.rs @@ -107,10 +107,28 @@ mod tests { use torrust_tracker_test_helpers::configuration; use super::{sample_peer_using_ipv4, sample_peer_using_ipv6}; + use crate::core::services::{initialize_database, initialize_whitelist}; use crate::core::{statistics, PeersWanted, Tracker}; use crate::servers::http::v1::services::announce::invoke; use crate::servers::http::v1::services::announce::tests::{public_tracker, sample_info_hash, sample_peer}; + fn test_tracker_factory(stats_event_sender: Option>) -> Tracker { + let config = configuration::ephemeral(); + + let database = initialize_database(&config); + + let whitelist_manager = initialize_whitelist(database.clone()); + + Tracker::new( + &config.core, + &database, + &whitelist_manager, + stats_event_sender, + statistics::repository::Repository::new(), + ) + .unwrap() + } + #[tokio::test] async fn it_should_return_the_announce_data() { let tracker = Arc::new(public_tracker()); @@ -142,14 +160,7 @@ mod tests { .returning(|_| Box::pin(future::ready(Some(Ok(()))))); let stats_event_sender = Box::new(stats_event_sender_mock); - let tracker = Arc::new( - Tracker::new( - &configuration::ephemeral().core, - Some(stats_event_sender), - statistics::repository::Repository::new(), - ) - .unwrap(), - ); + let tracker = Arc::new(test_tracker_factory(Some(stats_event_sender))); let mut peer = sample_peer_using_ipv4(); @@ -162,12 +173,7 @@ mod tests { 0x6969, 0x6969, 0x6969, 0x6969, 0x6969, 0x6969, 0x6969, 0x6969, ))); - Tracker::new( - &configuration.core, - Some(stats_event_sender), - statistics::repository::Repository::new(), - ) - .unwrap() + test_tracker_factory(Some(stats_event_sender)) } fn peer_with_the_ipv4_loopback_ip() -> peer::Peer { @@ -213,14 +219,7 @@ mod tests { .returning(|_| Box::pin(future::ready(Some(Ok(()))))); let stats_event_sender = Box::new(stats_event_sender_mock); - let tracker = Arc::new( - Tracker::new( - &configuration::ephemeral().core, - Some(stats_event_sender), - statistics::repository::Repository::new(), - ) - .unwrap(), - ); + let tracker = Arc::new(test_tracker_factory(Some(stats_event_sender))); let mut peer = sample_peer_using_ipv6(); diff --git a/src/servers/http/v1/services/scrape.rs b/src/servers/http/v1/services/scrape.rs index 80d81d78a..6ab11bb4a 100644 --- a/src/servers/http/v1/services/scrape.rs +++ b/src/servers/http/v1/services/scrape.rs @@ -67,8 +67,8 @@ mod tests { use torrust_tracker_primitives::{peer, DurationSinceUnixEpoch}; use torrust_tracker_test_helpers::configuration; - use crate::core::services::tracker_factory; - use crate::core::Tracker; + use crate::core::services::{initialize_database, initialize_whitelist, tracker_factory}; + use crate::core::{statistics, Tracker}; fn public_tracker() -> Tracker { tracker_factory(&configuration::ephemeral_public()) @@ -94,6 +94,23 @@ mod tests { } } + fn test_tracker_factory(stats_event_sender: Option>) -> Tracker { + let config = configuration::ephemeral(); + + let database = initialize_database(&config); + + let whitelist_manager = initialize_whitelist(database.clone()); + + Tracker::new( + &config.core, + &database, + &whitelist_manager, + stats_event_sender, + statistics::repository::Repository::new(), + ) + .unwrap() + } + mod with_real_data { use std::future; @@ -103,12 +120,11 @@ mod tests { use mockall::predicate::eq; use torrust_tracker_primitives::core::ScrapeData; use torrust_tracker_primitives::swarm_metadata::SwarmMetadata; - use torrust_tracker_test_helpers::configuration; - use crate::core::{statistics, PeersWanted, Tracker}; + use crate::core::{statistics, PeersWanted}; use crate::servers::http::v1::services::scrape::invoke; use crate::servers::http::v1::services::scrape::tests::{ - public_tracker, sample_info_hash, sample_info_hashes, sample_peer, + public_tracker, sample_info_hash, sample_info_hashes, sample_peer, test_tracker_factory, }; #[tokio::test] @@ -148,14 +164,7 @@ mod tests { .returning(|_| Box::pin(future::ready(Some(Ok(()))))); let stats_event_sender = Box::new(stats_event_sender_mock); - let tracker = Arc::new( - Tracker::new( - &configuration::ephemeral().core, - Some(stats_event_sender), - statistics::repository::Repository::new(), - ) - .unwrap(), - ); + let tracker = Arc::new(test_tracker_factory(Some(stats_event_sender))); let peer_ip = IpAddr::V4(Ipv4Addr::new(126, 0, 0, 1)); @@ -172,14 +181,7 @@ mod tests { .returning(|_| Box::pin(future::ready(Some(Ok(()))))); let stats_event_sender = Box::new(stats_event_sender_mock); - let tracker = Arc::new( - Tracker::new( - &configuration::ephemeral().core, - Some(stats_event_sender), - statistics::repository::Repository::new(), - ) - .unwrap(), - ); + let tracker = Arc::new(test_tracker_factory(Some(stats_event_sender))); let peer_ip = IpAddr::V6(Ipv6Addr::new(0x6969, 0x6969, 0x6969, 0x6969, 0x6969, 0x6969, 0x6969, 0x6969)); @@ -195,12 +197,11 @@ mod tests { use mockall::predicate::eq; use torrust_tracker_primitives::core::ScrapeData; - use torrust_tracker_test_helpers::configuration; - use crate::core::{statistics, PeersWanted, Tracker}; + use crate::core::{statistics, PeersWanted}; use crate::servers::http::v1::services::scrape::fake; use crate::servers::http::v1::services::scrape::tests::{ - public_tracker, sample_info_hash, sample_info_hashes, sample_peer, + public_tracker, sample_info_hash, sample_info_hashes, sample_peer, test_tracker_factory, }; #[tokio::test] @@ -232,14 +233,7 @@ mod tests { .returning(|_| Box::pin(future::ready(Some(Ok(()))))); let stats_event_sender = Box::new(stats_event_sender_mock); - let tracker = Arc::new( - Tracker::new( - &configuration::ephemeral().core, - Some(stats_event_sender), - statistics::repository::Repository::new(), - ) - .unwrap(), - ); + let tracker = Arc::new(test_tracker_factory(Some(stats_event_sender))); let peer_ip = IpAddr::V4(Ipv4Addr::new(126, 0, 0, 1)); @@ -256,14 +250,7 @@ mod tests { .returning(|_| Box::pin(future::ready(Some(Ok(()))))); let stats_event_sender = Box::new(stats_event_sender_mock); - let tracker = Arc::new( - Tracker::new( - &configuration::ephemeral().core, - Some(stats_event_sender), - statistics::repository::Repository::new(), - ) - .unwrap(), - ); + let tracker = Arc::new(test_tracker_factory(Some(stats_event_sender))); let peer_ip = IpAddr::V6(Ipv6Addr::new(0x6969, 0x6969, 0x6969, 0x6969, 0x6969, 0x6969, 0x6969, 0x6969)); diff --git a/src/servers/udp/handlers.rs b/src/servers/udp/handlers.rs index 62f7d0a02..5fc695f88 100644 --- a/src/servers/udp/handlers.rs +++ b/src/servers/udp/handlers.rs @@ -435,8 +435,8 @@ mod tests { use torrust_tracker_test_helpers::configuration; use super::gen_remote_fingerprint; - use crate::core::services::tracker_factory; - use crate::core::Tracker; + use crate::core::services::{initialize_database, initialize_whitelist, tracker_factory}; + use crate::core::{statistics, Tracker}; use crate::CurrentClock; fn tracker_configuration() -> Configuration { @@ -553,6 +553,23 @@ mod tests { } } + fn test_tracker_factory(stats_event_sender: Option>) -> Tracker { + let config = tracker_configuration(); + + let database = initialize_database(&config); + + let whitelist_manager = initialize_whitelist(database.clone()); + + Tracker::new( + &config.core, + &database, + &whitelist_manager, + stats_event_sender, + statistics::repository::Repository::new(), + ) + .unwrap() + } + mod connect_request { use std::future; @@ -561,13 +578,13 @@ mod tests { use aquatic_udp_protocol::{ConnectRequest, ConnectResponse, Response, TransactionId}; use mockall::predicate::eq; - use super::{sample_ipv4_socket_address, sample_ipv6_remote_addr, tracker_configuration}; - use crate::core::{self, statistics}; + use super::{sample_ipv4_socket_address, sample_ipv6_remote_addr}; + use crate::core::statistics; use crate::servers::udp::connection_cookie::make; use crate::servers::udp::handlers::handle_connect; use crate::servers::udp::handlers::tests::{ public_tracker, sample_ipv4_remote_addr, sample_ipv4_remote_addr_fingerprint, sample_ipv6_remote_addr_fingerprint, - sample_issue_time, + sample_issue_time, test_tracker_factory, }; fn sample_connect_request() -> ConnectRequest { @@ -639,14 +656,7 @@ mod tests { let client_socket_address = sample_ipv4_socket_address(); - let torrent_tracker = Arc::new( - core::Tracker::new( - &tracker_configuration().core, - Some(stats_event_sender), - statistics::repository::Repository::new(), - ) - .unwrap(), - ); + let torrent_tracker = Arc::new(test_tracker_factory(Some(stats_event_sender))); handle_connect( client_socket_address, &sample_connect_request(), @@ -666,14 +676,7 @@ mod tests { .returning(|_| Box::pin(future::ready(Some(Ok(()))))); let stats_event_sender = Box::new(stats_event_sender_mock); - let torrent_tracker = Arc::new( - core::Tracker::new( - &tracker_configuration().core, - Some(stats_event_sender), - statistics::repository::Repository::new(), - ) - .unwrap(), - ); + let torrent_tracker = Arc::new(test_tracker_factory(Some(stats_event_sender))); handle_connect( sample_ipv6_remote_addr(), &sample_connect_request(), @@ -774,7 +777,7 @@ mod tests { use crate::servers::udp::handlers::tests::announce_request::AnnounceRequestBuilder; use crate::servers::udp::handlers::tests::{ gen_remote_fingerprint, public_tracker, sample_cookie_valid_range, sample_ipv4_socket_address, sample_issue_time, - tracker_configuration, TorrentPeerBuilder, + test_tracker_factory, TorrentPeerBuilder, }; use crate::servers::udp::handlers::{handle_announce, AnnounceResponseFixedData}; @@ -927,14 +930,7 @@ mod tests { .returning(|_| Box::pin(future::ready(Some(Ok(()))))); let stats_event_sender = Box::new(stats_event_sender_mock); - let tracker = Arc::new( - core::Tracker::new( - &tracker_configuration().core, - Some(stats_event_sender), - statistics::repository::Repository::new(), - ) - .unwrap(), - ); + let tracker = Arc::new(test_tracker_factory(Some(stats_event_sender))); handle_announce( sample_ipv4_socket_address(), @@ -1013,7 +1009,7 @@ mod tests { use crate::servers::udp::handlers::tests::announce_request::AnnounceRequestBuilder; use crate::servers::udp::handlers::tests::{ gen_remote_fingerprint, public_tracker, sample_cookie_valid_range, sample_ipv6_remote_addr, sample_issue_time, - tracker_configuration, TorrentPeerBuilder, + test_tracker_factory, TorrentPeerBuilder, }; use crate::servers::udp::handlers::{handle_announce, AnnounceResponseFixedData}; @@ -1173,14 +1169,7 @@ mod tests { .returning(|_| Box::pin(future::ready(Some(Ok(()))))); let stats_event_sender = Box::new(stats_event_sender_mock); - let tracker = Arc::new( - core::Tracker::new( - &tracker_configuration().core, - Some(stats_event_sender), - statistics::repository::Repository::new(), - ) - .unwrap(), - ); + let tracker = Arc::new(test_tracker_factory(Some(stats_event_sender))); let remote_addr = sample_ipv6_remote_addr(); @@ -1200,6 +1189,7 @@ mod tests { use aquatic_udp_protocol::{InfoHash as AquaticInfoHash, PeerId as AquaticPeerId}; use crate::core; + use crate::core::services::{initialize_database, initialize_whitelist}; use crate::core::statistics::keeper::Keeper; use crate::servers::udp::connection_cookie::make; use crate::servers::udp::handlers::handle_announce; @@ -1211,9 +1201,20 @@ mod tests { #[tokio::test] async fn the_peer_ip_should_be_changed_to_the_external_ip_in_the_tracker_configuration() { let configuration = Arc::new(TrackerConfigurationBuilder::default().with_external_ip("::126.0.0.1").into()); + let database = initialize_database(&configuration); + let whitelist_manager = initialize_whitelist(database.clone()); let (stats_event_sender, stats_repository) = Keeper::new_active_instance(); - let tracker = - Arc::new(core::Tracker::new(&configuration.core, Some(stats_event_sender), stats_repository).unwrap()); + + let tracker = Arc::new( + core::Tracker::new( + &configuration.core, + &database, + &whitelist_manager, + Some(stats_event_sender), + stats_repository, + ) + .unwrap(), + ); let loopback_ipv4 = Ipv4Addr::new(127, 0, 0, 1); let loopback_ipv6 = Ipv6Addr::new(0, 0, 0, 0, 0, 0, 0, 1); @@ -1456,10 +1457,10 @@ mod tests { use mockall::predicate::eq; use super::sample_scrape_request; - use crate::core::{self, statistics}; + use crate::core::statistics; use crate::servers::udp::handlers::handle_scrape; use crate::servers::udp::handlers::tests::{ - sample_cookie_valid_range, sample_ipv4_remote_addr, tracker_configuration, + sample_cookie_valid_range, sample_ipv4_remote_addr, test_tracker_factory, }; #[tokio::test] @@ -1473,14 +1474,7 @@ mod tests { let stats_event_sender = Box::new(stats_event_sender_mock); let remote_addr = sample_ipv4_remote_addr(); - let tracker = Arc::new( - core::Tracker::new( - &tracker_configuration().core, - Some(stats_event_sender), - statistics::repository::Repository::new(), - ) - .unwrap(), - ); + let tracker = Arc::new(test_tracker_factory(Some(stats_event_sender))); handle_scrape( remote_addr, @@ -1500,10 +1494,10 @@ mod tests { use mockall::predicate::eq; use super::sample_scrape_request; - use crate::core::{self, statistics}; + use crate::core::statistics; use crate::servers::udp::handlers::handle_scrape; use crate::servers::udp::handlers::tests::{ - sample_cookie_valid_range, sample_ipv6_remote_addr, tracker_configuration, + sample_cookie_valid_range, sample_ipv6_remote_addr, test_tracker_factory, }; #[tokio::test] @@ -1517,14 +1511,7 @@ mod tests { let stats_event_sender = Box::new(stats_event_sender_mock); let remote_addr = sample_ipv6_remote_addr(); - let tracker = Arc::new( - core::Tracker::new( - &tracker_configuration().core, - Some(stats_event_sender), - statistics::repository::Repository::new(), - ) - .unwrap(), - ); + let tracker = Arc::new(test_tracker_factory(Some(stats_event_sender))); handle_scrape( remote_addr, From 4253d0f7b62d5e8bf9bb1e757ef3b20347992491 Mon Sep 17 00:00:00 2001 From: Jose Celano Date: Wed, 15 Jan 2025 16:30:18 +0000 Subject: [PATCH 092/802] refactor: [#1182] use WhitelistManager in API handlers directly, instead of using it via the Tracker. --- src/servers/apis/v1/context/whitelist/handlers.rs | 14 +++++++------- src/servers/apis/v1/context/whitelist/routes.rs | 11 +++++++---- src/servers/apis/v1/routes.rs | 2 +- 3 files changed, 15 insertions(+), 12 deletions(-) diff --git a/src/servers/apis/v1/context/whitelist/handlers.rs b/src/servers/apis/v1/context/whitelist/handlers.rs index 04085f8ab..f548f5dc4 100644 --- a/src/servers/apis/v1/context/whitelist/handlers.rs +++ b/src/servers/apis/v1/context/whitelist/handlers.rs @@ -10,7 +10,7 @@ use bittorrent_primitives::info_hash::InfoHash; use super::responses::{ failed_to_reload_whitelist_response, failed_to_remove_torrent_from_whitelist_response, failed_to_whitelist_torrent_response, }; -use crate::core::Tracker; +use crate::core::whitelist::WhiteListManager; use crate::servers::apis::v1::responses::{invalid_info_hash_param_response, ok_response}; use crate::servers::apis::InfoHashParam; @@ -24,12 +24,12 @@ use crate::servers::apis::InfoHashParam; /// Refer to the [API endpoint documentation](crate::servers::apis::v1::context::whitelist#add-a-torrent-to-the-whitelist) /// for more information about this endpoint. pub async fn add_torrent_to_whitelist_handler( - State(tracker): State>, + State(whitelist_manager): State>, Path(info_hash): Path, ) -> Response { match InfoHash::from_str(&info_hash.0) { Err(_) => invalid_info_hash_param_response(&info_hash.0), - Ok(info_hash) => match tracker.add_torrent_to_whitelist(&info_hash).await { + Ok(info_hash) => match whitelist_manager.add_torrent_to_whitelist(&info_hash).await { Ok(()) => ok_response(), Err(e) => failed_to_whitelist_torrent_response(e), }, @@ -47,12 +47,12 @@ pub async fn add_torrent_to_whitelist_handler( /// Refer to the [API endpoint documentation](crate::servers::apis::v1::context::whitelist#remove-a-torrent-from-the-whitelist) /// for more information about this endpoint. pub async fn remove_torrent_from_whitelist_handler( - State(tracker): State>, + State(whitelist_manager): State>, Path(info_hash): Path, ) -> Response { match InfoHash::from_str(&info_hash.0) { Err(_) => invalid_info_hash_param_response(&info_hash.0), - Ok(info_hash) => match tracker.remove_torrent_from_whitelist(&info_hash).await { + Ok(info_hash) => match whitelist_manager.remove_torrent_from_whitelist(&info_hash).await { Ok(()) => ok_response(), Err(e) => failed_to_remove_torrent_from_whitelist_response(e), }, @@ -69,8 +69,8 @@ pub async fn remove_torrent_from_whitelist_handler( /// /// Refer to the [API endpoint documentation](crate::servers::apis::v1::context::whitelist#reload-the-whitelist) /// for more information about this endpoint. -pub async fn reload_whitelist_handler(State(tracker): State>) -> Response { - match tracker.load_whitelist_from_database().await { +pub async fn reload_whitelist_handler(State(whitelist_manager): State>) -> Response { + match whitelist_manager.load_whitelist_from_database().await { Ok(()) => ok_response(), Err(e) => failed_to_reload_whitelist_response(e), } diff --git a/src/servers/apis/v1/context/whitelist/routes.rs b/src/servers/apis/v1/context/whitelist/routes.rs index 35312ea97..c58aa7177 100644 --- a/src/servers/apis/v1/context/whitelist/routes.rs +++ b/src/servers/apis/v1/context/whitelist/routes.rs @@ -14,19 +14,22 @@ use super::handlers::{add_torrent_to_whitelist_handler, reload_whitelist_handler use crate::core::Tracker; /// It adds the routes to the router for the [`whitelist`](crate::servers::apis::v1::context::whitelist) API context. -pub fn add(prefix: &str, router: Router, tracker: Arc) -> Router { +pub fn add(prefix: &str, router: Router, tracker: &Arc) -> Router { let prefix = format!("{prefix}/whitelist"); router // Whitelisted torrents .route( &format!("{prefix}/{{info_hash}}"), - post(add_torrent_to_whitelist_handler).with_state(tracker.clone()), + post(add_torrent_to_whitelist_handler).with_state(tracker.whitelist_manager.clone()), ) .route( &format!("{prefix}/{{info_hash}}"), - delete(remove_torrent_from_whitelist_handler).with_state(tracker.clone()), + delete(remove_torrent_from_whitelist_handler).with_state(tracker.whitelist_manager.clone()), ) // Whitelist commands - .route(&format!("{prefix}/reload"), get(reload_whitelist_handler).with_state(tracker)) + .route( + &format!("{prefix}/reload"), + get(reload_whitelist_handler).with_state(tracker.whitelist_manager.clone()), + ) } diff --git a/src/servers/apis/v1/routes.rs b/src/servers/apis/v1/routes.rs index 23ef6c47e..4c97c7578 100644 --- a/src/servers/apis/v1/routes.rs +++ b/src/servers/apis/v1/routes.rs @@ -14,7 +14,7 @@ pub fn add(prefix: &str, router: Router, tracker: Arc, ban_service: Arc let router = auth_key::routes::add(&v1_prefix, router, tracker.clone()); let router = stats::routes::add(&v1_prefix, router, tracker.clone(), ban_service); - let router = whitelist::routes::add(&v1_prefix, router, tracker.clone()); + let router = whitelist::routes::add(&v1_prefix, router, &tracker); torrent::routes::add(&v1_prefix, router, tracker) } From 658d2be631be303eaaaf4e35260d3bfe0f89769a Mon Sep 17 00:00:00 2001 From: Jose Celano Date: Wed, 15 Jan 2025 17:09:56 +0000 Subject: [PATCH 093/802] refactor: [#1182] inject database and whitelist manager in tracker factory Refactor in progress. The final goal is to inject the whitelist manager directly wherever is needed (for example, test evns) to avoid injecting the whole tracker. Adn to finally remove the whitelist manager from the Tracker (A higer level refator in progress: remove responsabilities fromcore Tracker). --- src/bootstrap/app.rs | 8 +++-- src/core/mod.rs | 25 ++++++++++----- src/core/services/mod.rs | 14 ++++----- src/core/services/statistics/mod.rs | 7 +++-- src/core/services/torrent.rs | 39 ++++++++++++++++++------ src/servers/http/v1/handlers/announce.rs | 22 ++++++++++--- src/servers/http/v1/handlers/scrape.rs | 22 ++++++++++--- src/servers/http/v1/services/announce.rs | 7 +++-- src/servers/http/v1/services/scrape.rs | 5 ++- src/servers/udp/handlers.rs | 4 ++- 10 files changed, 112 insertions(+), 41 deletions(-) diff --git a/src/bootstrap/app.rs b/src/bootstrap/app.rs index 38b7d40c5..9be52359b 100644 --- a/src/bootstrap/app.rs +++ b/src/bootstrap/app.rs @@ -21,7 +21,7 @@ use tracing::instrument; use super::config::initialize_configuration; use crate::bootstrap; -use crate::core::services::tracker_factory; +use crate::core::services::{initialize_database, initialize_whitelist, tracker_factory}; use crate::core::Tracker; use crate::servers::udp::server::banning::BanService; use crate::servers::udp::server::launcher::MAX_CONNECTION_ID_ERRORS_PER_IP; @@ -105,7 +105,11 @@ pub fn initialize_static() { #[must_use] #[instrument(skip(config))] pub fn initialize_tracker(config: &Configuration) -> Tracker { - tracker_factory(config) + let database = initialize_database(config); + + let whitelist_manager = initialize_whitelist(database.clone()); + + tracker_factory(config, &database, &whitelist_manager) } /// It initializes the log threshold, format and channel. diff --git a/src/core/mod.rs b/src/core/mod.rs index 5f9d44fdb..51d330880 100644 --- a/src/core/mod.rs +++ b/src/core/mod.rs @@ -1154,25 +1154,36 @@ mod tests { use torrust_tracker_test_helpers::configuration; use crate::core::peer::Peer; - use crate::core::services::tracker_factory; + use crate::core::services::{initialize_database, initialize_whitelist, tracker_factory}; use crate::core::{TorrentsMetrics, Tracker}; fn public_tracker() -> Tracker { - tracker_factory(&configuration::ephemeral_public()) + let config = configuration::ephemeral_public(); + let database = initialize_database(&config); + let whitelist_manager = initialize_whitelist(database.clone()); + tracker_factory(&config, &database, &whitelist_manager) } fn private_tracker() -> Tracker { - tracker_factory(&configuration::ephemeral_private()) + let config = configuration::ephemeral_private(); + let database = initialize_database(&config); + let whitelist_manager = initialize_whitelist(database.clone()); + tracker_factory(&config, &database, &whitelist_manager) } fn whitelisted_tracker() -> Tracker { - tracker_factory(&configuration::ephemeral_listed()) + let config = configuration::ephemeral_listed(); + let database = initialize_database(&config); + let whitelist_manager = initialize_whitelist(database.clone()); + tracker_factory(&config, &database, &whitelist_manager) } pub fn tracker_persisting_torrents_in_database() -> Tracker { - let mut configuration = configuration::ephemeral(); - configuration.core.tracker_policy.persistent_torrent_completed_stat = true; - tracker_factory(&configuration) + let mut config = configuration::ephemeral_listed(); + config.core.tracker_policy.persistent_torrent_completed_stat = true; + let database = initialize_database(&config); + let whitelist_manager = initialize_whitelist(database.clone()); + tracker_factory(&config, &database, &whitelist_manager) } fn sample_info_hash() -> InfoHash { diff --git a/src/core/services/mod.rs b/src/core/services/mod.rs index 67d5113bc..a6b5e3371 100644 --- a/src/core/services/mod.rs +++ b/src/core/services/mod.rs @@ -24,17 +24,17 @@ use crate::core::Tracker; /// /// Will panic if tracker cannot be instantiated. #[must_use] -pub fn tracker_factory(config: &Configuration) -> Tracker { - let database = initialize_database(config); - - let whitelist_manager = initialize_whitelist(database.clone()); - +pub fn tracker_factory( + config: &Configuration, + database: &Arc>, + whitelist_manager: &Arc, +) -> Tracker { let (stats_event_sender, stats_repository) = statistics::setup::factory(config.core.tracker_usage_statistics); match Tracker::new( &Arc::new(config).core, - &database, - &whitelist_manager, + database, + whitelist_manager, stats_event_sender, stats_repository, ) { diff --git a/src/core/services/statistics/mod.rs b/src/core/services/statistics/mod.rs index 4143aaf1f..2352953eb 100644 --- a/src/core/services/statistics/mod.rs +++ b/src/core/services/statistics/mod.rs @@ -114,7 +114,7 @@ mod tests { use crate::core; use crate::core::services::statistics::{get_metrics, TrackerMetrics}; - use crate::core::services::tracker_factory; + use crate::core::services::{initialize_database, initialize_whitelist, tracker_factory}; use crate::servers::udp::server::banning::BanService; use crate::servers::udp::server::launcher::MAX_CONNECTION_ID_ERRORS_PER_IP; @@ -124,7 +124,10 @@ mod tests { #[tokio::test] async fn the_statistics_service_should_return_the_tracker_metrics() { - let tracker = Arc::new(tracker_factory(&tracker_configuration())); + let config = tracker_configuration(); + let database = initialize_database(&config); + let whitelist_manager = initialize_whitelist(database.clone()); + let tracker = Arc::new(tracker_factory(&tracker_configuration(), &database, &whitelist_manager)); let ban_service = Arc::new(RwLock::new(BanService::new(MAX_CONNECTION_ID_ERRORS_PER_IP))); let tracker_metrics = get_metrics(tracker.clone(), ban_service.clone()).await; diff --git a/src/core/services/torrent.rs b/src/core/services/torrent.rs index e63d2efa2..0b89de7ef 100644 --- a/src/core/services/torrent.rs +++ b/src/core/services/torrent.rs @@ -131,7 +131,7 @@ mod tests { use crate::core::services::torrent::tests::sample_peer; use crate::core::services::torrent::{get_torrent_info, Info}; - use crate::core::services::tracker_factory; + use crate::core::services::{initialize_database, initialize_whitelist, tracker_factory}; pub fn tracker_configuration() -> Configuration { configuration::ephemeral() @@ -139,7 +139,10 @@ mod tests { #[tokio::test] async fn should_return_none_if_the_tracker_does_not_have_the_torrent() { - let tracker = Arc::new(tracker_factory(&tracker_configuration())); + let config = tracker_configuration(); + let database = initialize_database(&config); + let whitelist_manager = initialize_whitelist(database.clone()); + let tracker = Arc::new(tracker_factory(&config, &database, &whitelist_manager)); let torrent_info = get_torrent_info( tracker.clone(), @@ -152,7 +155,10 @@ mod tests { #[tokio::test] async fn should_return_the_torrent_info_if_the_tracker_has_the_torrent() { - let tracker = Arc::new(tracker_factory(&tracker_configuration())); + let config = tracker_configuration(); + let database = initialize_database(&config); + let whitelist_manager = initialize_whitelist(database.clone()); + let tracker = Arc::new(tracker_factory(&config, &database, &whitelist_manager)); let hash = "9e0217d0fa71c87332cd8bf9dbeabcb2c2cf3c4d".to_owned(); let info_hash = InfoHash::from_str(&hash).unwrap(); @@ -184,7 +190,7 @@ mod tests { use crate::core::services::torrent::tests::sample_peer; use crate::core::services::torrent::{get_torrents_page, BasicInfo, Pagination}; - use crate::core::services::tracker_factory; + use crate::core::services::{initialize_database, initialize_whitelist, tracker_factory}; pub fn tracker_configuration() -> Configuration { configuration::ephemeral() @@ -192,7 +198,10 @@ mod tests { #[tokio::test] async fn should_return_an_empty_result_if_the_tracker_does_not_have_any_torrent() { - let tracker = Arc::new(tracker_factory(&tracker_configuration())); + let config = tracker_configuration(); + let database = initialize_database(&config); + let whitelist_manager = initialize_whitelist(database.clone()); + let tracker = Arc::new(tracker_factory(&config, &database, &whitelist_manager)); let torrents = get_torrents_page(tracker.clone(), Some(&Pagination::default())).await; @@ -201,7 +210,10 @@ mod tests { #[tokio::test] async fn should_return_a_summarized_info_for_all_torrents() { - let tracker = Arc::new(tracker_factory(&tracker_configuration())); + let config = tracker_configuration(); + let database = initialize_database(&config); + let whitelist_manager = initialize_whitelist(database.clone()); + let tracker = Arc::new(tracker_factory(&config, &database, &whitelist_manager)); let hash = "9e0217d0fa71c87332cd8bf9dbeabcb2c2cf3c4d".to_owned(); let info_hash = InfoHash::from_str(&hash).unwrap(); @@ -223,7 +235,10 @@ mod tests { #[tokio::test] async fn should_allow_limiting_the_number_of_torrents_in_the_result() { - let tracker = Arc::new(tracker_factory(&tracker_configuration())); + let config = tracker_configuration(); + let database = initialize_database(&config); + let whitelist_manager = initialize_whitelist(database.clone()); + let tracker = Arc::new(tracker_factory(&config, &database, &whitelist_manager)); let hash1 = "9e0217d0fa71c87332cd8bf9dbeabcb2c2cf3c4d".to_owned(); let info_hash1 = InfoHash::from_str(&hash1).unwrap(); @@ -243,7 +258,10 @@ mod tests { #[tokio::test] async fn should_allow_using_pagination_in_the_result() { - let tracker = Arc::new(tracker_factory(&tracker_configuration())); + let config = tracker_configuration(); + let database = initialize_database(&config); + let whitelist_manager = initialize_whitelist(database.clone()); + let tracker = Arc::new(tracker_factory(&config, &database, &whitelist_manager)); let hash1 = "9e0217d0fa71c87332cd8bf9dbeabcb2c2cf3c4d".to_owned(); let info_hash1 = InfoHash::from_str(&hash1).unwrap(); @@ -272,7 +290,10 @@ mod tests { #[tokio::test] async fn should_return_torrents_ordered_by_info_hash() { - let tracker = Arc::new(tracker_factory(&tracker_configuration())); + let config = tracker_configuration(); + let database = initialize_database(&config); + let whitelist_manager = initialize_whitelist(database.clone()); + let tracker = Arc::new(tracker_factory(&config, &database, &whitelist_manager)); let hash1 = "9e0217d0fa71c87332cd8bf9dbeabcb2c2cf3c4d".to_owned(); let info_hash1 = InfoHash::from_str(&hash1).unwrap(); diff --git a/src/servers/http/v1/handlers/announce.rs b/src/servers/http/v1/handlers/announce.rs index a17e877fa..fc2739db4 100644 --- a/src/servers/http/v1/handlers/announce.rs +++ b/src/servers/http/v1/handlers/announce.rs @@ -185,23 +185,35 @@ mod tests { use bittorrent_primitives::info_hash::InfoHash; use torrust_tracker_test_helpers::configuration; - use crate::core::services::tracker_factory; + use crate::core::services::{initialize_database, initialize_whitelist, tracker_factory}; use crate::core::Tracker; fn private_tracker() -> Tracker { - tracker_factory(&configuration::ephemeral_private()) + let config = configuration::ephemeral_private(); + let database = initialize_database(&config); + let whitelist_manager = initialize_whitelist(database.clone()); + tracker_factory(&config, &database, &whitelist_manager) } fn whitelisted_tracker() -> Tracker { - tracker_factory(&configuration::ephemeral_listed()) + let config = configuration::ephemeral_listed(); + let database = initialize_database(&config); + let whitelist_manager = initialize_whitelist(database.clone()); + tracker_factory(&config, &database, &whitelist_manager) } fn tracker_on_reverse_proxy() -> Tracker { - tracker_factory(&configuration::ephemeral_with_reverse_proxy()) + let config = configuration::ephemeral_with_reverse_proxy(); + let database = initialize_database(&config); + let whitelist_manager = initialize_whitelist(database.clone()); + tracker_factory(&config, &database, &whitelist_manager) } fn tracker_not_on_reverse_proxy() -> Tracker { - tracker_factory(&configuration::ephemeral_without_reverse_proxy()) + let config = configuration::ephemeral_without_reverse_proxy(); + let database = initialize_database(&config); + let whitelist_manager = initialize_whitelist(database.clone()); + tracker_factory(&config, &database, &whitelist_manager) } fn sample_announce_request() -> Announce { diff --git a/src/servers/http/v1/handlers/scrape.rs b/src/servers/http/v1/handlers/scrape.rs index 2aa1bd9f8..88d4c92de 100644 --- a/src/servers/http/v1/handlers/scrape.rs +++ b/src/servers/http/v1/handlers/scrape.rs @@ -121,23 +121,35 @@ mod tests { use bittorrent_primitives::info_hash::InfoHash; use torrust_tracker_test_helpers::configuration; - use crate::core::services::tracker_factory; + use crate::core::services::{initialize_database, initialize_whitelist, tracker_factory}; use crate::core::Tracker; fn private_tracker() -> Tracker { - tracker_factory(&configuration::ephemeral_private()) + let config = configuration::ephemeral_private(); + let database = initialize_database(&config); + let whitelist_manager = initialize_whitelist(database.clone()); + tracker_factory(&config, &database, &whitelist_manager) } fn whitelisted_tracker() -> Tracker { - tracker_factory(&configuration::ephemeral_listed()) + let config = configuration::ephemeral_listed(); + let database = initialize_database(&config); + let whitelist_manager = initialize_whitelist(database.clone()); + tracker_factory(&config, &database, &whitelist_manager) } fn tracker_on_reverse_proxy() -> Tracker { - tracker_factory(&configuration::ephemeral_with_reverse_proxy()) + let config = configuration::ephemeral_with_reverse_proxy(); + let database = initialize_database(&config); + let whitelist_manager = initialize_whitelist(database.clone()); + tracker_factory(&config, &database, &whitelist_manager) } fn tracker_not_on_reverse_proxy() -> Tracker { - tracker_factory(&configuration::ephemeral_without_reverse_proxy()) + let config = configuration::ephemeral_without_reverse_proxy(); + let database = initialize_database(&config); + let whitelist_manager = initialize_whitelist(database.clone()); + tracker_factory(&config, &database, &whitelist_manager) } fn sample_scrape_request() -> Scrape { diff --git a/src/servers/http/v1/services/announce.rs b/src/servers/http/v1/services/announce.rs index 06aad669f..937560692 100644 --- a/src/servers/http/v1/services/announce.rs +++ b/src/servers/http/v1/services/announce.rs @@ -59,11 +59,14 @@ mod tests { use torrust_tracker_primitives::{peer, DurationSinceUnixEpoch}; use torrust_tracker_test_helpers::configuration; - use crate::core::services::tracker_factory; + use crate::core::services::{initialize_database, initialize_whitelist, tracker_factory}; use crate::core::Tracker; fn public_tracker() -> Tracker { - tracker_factory(&configuration::ephemeral_public()) + let config = configuration::ephemeral_public(); + let database = initialize_database(&config); + let whitelist_manager = initialize_whitelist(database.clone()); + tracker_factory(&config, &database, &whitelist_manager) } fn sample_info_hash() -> InfoHash { diff --git a/src/servers/http/v1/services/scrape.rs b/src/servers/http/v1/services/scrape.rs index 6ab11bb4a..ea2712b6e 100644 --- a/src/servers/http/v1/services/scrape.rs +++ b/src/servers/http/v1/services/scrape.rs @@ -71,7 +71,10 @@ mod tests { use crate::core::{statistics, Tracker}; fn public_tracker() -> Tracker { - tracker_factory(&configuration::ephemeral_public()) + let config = configuration::ephemeral_public(); + let database = initialize_database(&config); + let whitelist_manager = initialize_whitelist(database.clone()); + tracker_factory(&config, &database, &whitelist_manager) } fn sample_info_hashes() -> Vec { diff --git a/src/servers/udp/handlers.rs b/src/servers/udp/handlers.rs index 5fc695f88..6110af530 100644 --- a/src/servers/udp/handlers.rs +++ b/src/servers/udp/handlers.rs @@ -456,7 +456,9 @@ mod tests { } fn initialized_tracker(configuration: &Configuration) -> Arc { - tracker_factory(configuration).into() + let database = initialize_database(configuration); + let whitelist_manager = initialize_whitelist(database.clone()); + tracker_factory(configuration, &database, &whitelist_manager).into() } fn sample_ipv4_remote_addr() -> SocketAddr { From 882af33a179290a817a532a8eeaf7d3d4b9979c6 Mon Sep 17 00:00:00 2001 From: Jose Celano Date: Wed, 15 Jan 2025 17:25:21 +0000 Subject: [PATCH 094/802] refactor: [#1182] remove duplicate code --- src/bootstrap/app.rs | 12 +++++++++-- src/core/mod.rs | 15 ++++++------- src/core/services/statistics/mod.rs | 6 +++--- src/core/services/torrent.rs | 27 ++++++++++-------------- src/servers/http/v1/handlers/announce.rs | 15 ++++++------- src/servers/http/v1/handlers/scrape.rs | 15 ++++++------- src/servers/http/v1/services/announce.rs | 12 +++++------ src/servers/http/v1/services/scrape.rs | 10 ++++----- src/servers/udp/handlers.rs | 23 +++++++++----------- 9 files changed, 61 insertions(+), 74 deletions(-) diff --git a/src/bootstrap/app.rs b/src/bootstrap/app.rs index 9be52359b..788037b0b 100644 --- a/src/bootstrap/app.rs +++ b/src/bootstrap/app.rs @@ -21,7 +21,9 @@ use tracing::instrument; use super::config::initialize_configuration; use crate::bootstrap; +use crate::core::databases::Database; use crate::core::services::{initialize_database, initialize_whitelist, tracker_factory}; +use crate::core::whitelist::WhiteListManager; use crate::core::Tracker; use crate::servers::udp::server::banning::BanService; use crate::servers::udp::server::launcher::MAX_CONNECTION_ID_ERRORS_PER_IP; @@ -105,11 +107,17 @@ pub fn initialize_static() { #[must_use] #[instrument(skip(config))] pub fn initialize_tracker(config: &Configuration) -> Tracker { - let database = initialize_database(config); + let (database, whitelist_manager) = initialize_tracker_dependencies(config); + + tracker_factory(config, &database, &whitelist_manager) +} +#[must_use] +pub fn initialize_tracker_dependencies(config: &Configuration) -> (Arc>, Arc) { + let database = initialize_database(config); let whitelist_manager = initialize_whitelist(database.clone()); - tracker_factory(config, &database, &whitelist_manager) + (database, whitelist_manager) } /// It initializes the log threshold, format and channel. diff --git a/src/core/mod.rs b/src/core/mod.rs index 51d330880..c8bb7f6a9 100644 --- a/src/core/mod.rs +++ b/src/core/mod.rs @@ -1153,36 +1153,33 @@ mod tests { use torrust_tracker_primitives::DurationSinceUnixEpoch; use torrust_tracker_test_helpers::configuration; + use crate::bootstrap::app::initialize_tracker_dependencies; use crate::core::peer::Peer; - use crate::core::services::{initialize_database, initialize_whitelist, tracker_factory}; + use crate::core::services::tracker_factory; use crate::core::{TorrentsMetrics, Tracker}; fn public_tracker() -> Tracker { let config = configuration::ephemeral_public(); - let database = initialize_database(&config); - let whitelist_manager = initialize_whitelist(database.clone()); + let (database, whitelist_manager) = initialize_tracker_dependencies(&config); tracker_factory(&config, &database, &whitelist_manager) } fn private_tracker() -> Tracker { let config = configuration::ephemeral_private(); - let database = initialize_database(&config); - let whitelist_manager = initialize_whitelist(database.clone()); + let (database, whitelist_manager) = initialize_tracker_dependencies(&config); tracker_factory(&config, &database, &whitelist_manager) } fn whitelisted_tracker() -> Tracker { let config = configuration::ephemeral_listed(); - let database = initialize_database(&config); - let whitelist_manager = initialize_whitelist(database.clone()); + let (database, whitelist_manager) = initialize_tracker_dependencies(&config); tracker_factory(&config, &database, &whitelist_manager) } pub fn tracker_persisting_torrents_in_database() -> Tracker { let mut config = configuration::ephemeral_listed(); config.core.tracker_policy.persistent_torrent_completed_stat = true; - let database = initialize_database(&config); - let whitelist_manager = initialize_whitelist(database.clone()); + let (database, whitelist_manager) = initialize_tracker_dependencies(&config); tracker_factory(&config, &database, &whitelist_manager) } diff --git a/src/core/services/statistics/mod.rs b/src/core/services/statistics/mod.rs index 2352953eb..d4e77ce4c 100644 --- a/src/core/services/statistics/mod.rs +++ b/src/core/services/statistics/mod.rs @@ -112,9 +112,10 @@ mod tests { use torrust_tracker_primitives::torrent_metrics::TorrentsMetrics; use torrust_tracker_test_helpers::configuration; + use crate::bootstrap::app::initialize_tracker_dependencies; use crate::core; use crate::core::services::statistics::{get_metrics, TrackerMetrics}; - use crate::core::services::{initialize_database, initialize_whitelist, tracker_factory}; + use crate::core::services::tracker_factory; use crate::servers::udp::server::banning::BanService; use crate::servers::udp::server::launcher::MAX_CONNECTION_ID_ERRORS_PER_IP; @@ -125,8 +126,7 @@ mod tests { #[tokio::test] async fn the_statistics_service_should_return_the_tracker_metrics() { let config = tracker_configuration(); - let database = initialize_database(&config); - let whitelist_manager = initialize_whitelist(database.clone()); + let (database, whitelist_manager) = initialize_tracker_dependencies(&config); let tracker = Arc::new(tracker_factory(&tracker_configuration(), &database, &whitelist_manager)); let ban_service = Arc::new(RwLock::new(BanService::new(MAX_CONNECTION_ID_ERRORS_PER_IP))); diff --git a/src/core/services/torrent.rs b/src/core/services/torrent.rs index 0b89de7ef..1be2acc93 100644 --- a/src/core/services/torrent.rs +++ b/src/core/services/torrent.rs @@ -129,9 +129,10 @@ mod tests { use torrust_tracker_configuration::Configuration; use torrust_tracker_test_helpers::configuration; + use crate::bootstrap::app::initialize_tracker_dependencies; use crate::core::services::torrent::tests::sample_peer; use crate::core::services::torrent::{get_torrent_info, Info}; - use crate::core::services::{initialize_database, initialize_whitelist, tracker_factory}; + use crate::core::services::tracker_factory; pub fn tracker_configuration() -> Configuration { configuration::ephemeral() @@ -140,8 +141,7 @@ mod tests { #[tokio::test] async fn should_return_none_if_the_tracker_does_not_have_the_torrent() { let config = tracker_configuration(); - let database = initialize_database(&config); - let whitelist_manager = initialize_whitelist(database.clone()); + let (database, whitelist_manager) = initialize_tracker_dependencies(&config); let tracker = Arc::new(tracker_factory(&config, &database, &whitelist_manager)); let torrent_info = get_torrent_info( @@ -156,8 +156,7 @@ mod tests { #[tokio::test] async fn should_return_the_torrent_info_if_the_tracker_has_the_torrent() { let config = tracker_configuration(); - let database = initialize_database(&config); - let whitelist_manager = initialize_whitelist(database.clone()); + let (database, whitelist_manager) = initialize_tracker_dependencies(&config); let tracker = Arc::new(tracker_factory(&config, &database, &whitelist_manager)); let hash = "9e0217d0fa71c87332cd8bf9dbeabcb2c2cf3c4d".to_owned(); @@ -188,9 +187,10 @@ mod tests { use torrust_tracker_configuration::Configuration; use torrust_tracker_test_helpers::configuration; + use crate::bootstrap::app::initialize_tracker_dependencies; use crate::core::services::torrent::tests::sample_peer; use crate::core::services::torrent::{get_torrents_page, BasicInfo, Pagination}; - use crate::core::services::{initialize_database, initialize_whitelist, tracker_factory}; + use crate::core::services::tracker_factory; pub fn tracker_configuration() -> Configuration { configuration::ephemeral() @@ -199,8 +199,7 @@ mod tests { #[tokio::test] async fn should_return_an_empty_result_if_the_tracker_does_not_have_any_torrent() { let config = tracker_configuration(); - let database = initialize_database(&config); - let whitelist_manager = initialize_whitelist(database.clone()); + let (database, whitelist_manager) = initialize_tracker_dependencies(&config); let tracker = Arc::new(tracker_factory(&config, &database, &whitelist_manager)); let torrents = get_torrents_page(tracker.clone(), Some(&Pagination::default())).await; @@ -211,8 +210,7 @@ mod tests { #[tokio::test] async fn should_return_a_summarized_info_for_all_torrents() { let config = tracker_configuration(); - let database = initialize_database(&config); - let whitelist_manager = initialize_whitelist(database.clone()); + let (database, whitelist_manager) = initialize_tracker_dependencies(&config); let tracker = Arc::new(tracker_factory(&config, &database, &whitelist_manager)); let hash = "9e0217d0fa71c87332cd8bf9dbeabcb2c2cf3c4d".to_owned(); @@ -236,8 +234,7 @@ mod tests { #[tokio::test] async fn should_allow_limiting_the_number_of_torrents_in_the_result() { let config = tracker_configuration(); - let database = initialize_database(&config); - let whitelist_manager = initialize_whitelist(database.clone()); + let (database, whitelist_manager) = initialize_tracker_dependencies(&config); let tracker = Arc::new(tracker_factory(&config, &database, &whitelist_manager)); let hash1 = "9e0217d0fa71c87332cd8bf9dbeabcb2c2cf3c4d".to_owned(); @@ -259,8 +256,7 @@ mod tests { #[tokio::test] async fn should_allow_using_pagination_in_the_result() { let config = tracker_configuration(); - let database = initialize_database(&config); - let whitelist_manager = initialize_whitelist(database.clone()); + let (database, whitelist_manager) = initialize_tracker_dependencies(&config); let tracker = Arc::new(tracker_factory(&config, &database, &whitelist_manager)); let hash1 = "9e0217d0fa71c87332cd8bf9dbeabcb2c2cf3c4d".to_owned(); @@ -291,8 +287,7 @@ mod tests { #[tokio::test] async fn should_return_torrents_ordered_by_info_hash() { let config = tracker_configuration(); - let database = initialize_database(&config); - let whitelist_manager = initialize_whitelist(database.clone()); + let (database, whitelist_manager) = initialize_tracker_dependencies(&config); let tracker = Arc::new(tracker_factory(&config, &database, &whitelist_manager)); let hash1 = "9e0217d0fa71c87332cd8bf9dbeabcb2c2cf3c4d".to_owned(); diff --git a/src/servers/http/v1/handlers/announce.rs b/src/servers/http/v1/handlers/announce.rs index fc2739db4..df4658420 100644 --- a/src/servers/http/v1/handlers/announce.rs +++ b/src/servers/http/v1/handlers/announce.rs @@ -185,34 +185,31 @@ mod tests { use bittorrent_primitives::info_hash::InfoHash; use torrust_tracker_test_helpers::configuration; - use crate::core::services::{initialize_database, initialize_whitelist, tracker_factory}; + use crate::bootstrap::app::initialize_tracker_dependencies; + use crate::core::services::tracker_factory; use crate::core::Tracker; fn private_tracker() -> Tracker { let config = configuration::ephemeral_private(); - let database = initialize_database(&config); - let whitelist_manager = initialize_whitelist(database.clone()); + let (database, whitelist_manager) = initialize_tracker_dependencies(&config); tracker_factory(&config, &database, &whitelist_manager) } fn whitelisted_tracker() -> Tracker { let config = configuration::ephemeral_listed(); - let database = initialize_database(&config); - let whitelist_manager = initialize_whitelist(database.clone()); + let (database, whitelist_manager) = initialize_tracker_dependencies(&config); tracker_factory(&config, &database, &whitelist_manager) } fn tracker_on_reverse_proxy() -> Tracker { let config = configuration::ephemeral_with_reverse_proxy(); - let database = initialize_database(&config); - let whitelist_manager = initialize_whitelist(database.clone()); + let (database, whitelist_manager) = initialize_tracker_dependencies(&config); tracker_factory(&config, &database, &whitelist_manager) } fn tracker_not_on_reverse_proxy() -> Tracker { let config = configuration::ephemeral_without_reverse_proxy(); - let database = initialize_database(&config); - let whitelist_manager = initialize_whitelist(database.clone()); + let (database, whitelist_manager) = initialize_tracker_dependencies(&config); tracker_factory(&config, &database, &whitelist_manager) } diff --git a/src/servers/http/v1/handlers/scrape.rs b/src/servers/http/v1/handlers/scrape.rs index 88d4c92de..dd144d898 100644 --- a/src/servers/http/v1/handlers/scrape.rs +++ b/src/servers/http/v1/handlers/scrape.rs @@ -121,34 +121,31 @@ mod tests { use bittorrent_primitives::info_hash::InfoHash; use torrust_tracker_test_helpers::configuration; - use crate::core::services::{initialize_database, initialize_whitelist, tracker_factory}; + use crate::bootstrap::app::initialize_tracker_dependencies; + use crate::core::services::tracker_factory; use crate::core::Tracker; fn private_tracker() -> Tracker { let config = configuration::ephemeral_private(); - let database = initialize_database(&config); - let whitelist_manager = initialize_whitelist(database.clone()); + let (database, whitelist_manager) = initialize_tracker_dependencies(&config); tracker_factory(&config, &database, &whitelist_manager) } fn whitelisted_tracker() -> Tracker { let config = configuration::ephemeral_listed(); - let database = initialize_database(&config); - let whitelist_manager = initialize_whitelist(database.clone()); + let (database, whitelist_manager) = initialize_tracker_dependencies(&config); tracker_factory(&config, &database, &whitelist_manager) } fn tracker_on_reverse_proxy() -> Tracker { let config = configuration::ephemeral_with_reverse_proxy(); - let database = initialize_database(&config); - let whitelist_manager = initialize_whitelist(database.clone()); + let (database, whitelist_manager) = initialize_tracker_dependencies(&config); tracker_factory(&config, &database, &whitelist_manager) } fn tracker_not_on_reverse_proxy() -> Tracker { let config = configuration::ephemeral_without_reverse_proxy(); - let database = initialize_database(&config); - let whitelist_manager = initialize_whitelist(database.clone()); + let (database, whitelist_manager) = initialize_tracker_dependencies(&config); tracker_factory(&config, &database, &whitelist_manager) } diff --git a/src/servers/http/v1/services/announce.rs b/src/servers/http/v1/services/announce.rs index 937560692..f19c69c2f 100644 --- a/src/servers/http/v1/services/announce.rs +++ b/src/servers/http/v1/services/announce.rs @@ -59,13 +59,13 @@ mod tests { use torrust_tracker_primitives::{peer, DurationSinceUnixEpoch}; use torrust_tracker_test_helpers::configuration; - use crate::core::services::{initialize_database, initialize_whitelist, tracker_factory}; + use crate::bootstrap::app::initialize_tracker_dependencies; + use crate::core::services::tracker_factory; use crate::core::Tracker; fn public_tracker() -> Tracker { let config = configuration::ephemeral_public(); - let database = initialize_database(&config); - let whitelist_manager = initialize_whitelist(database.clone()); + let (database, whitelist_manager) = initialize_tracker_dependencies(&config); tracker_factory(&config, &database, &whitelist_manager) } @@ -110,7 +110,7 @@ mod tests { use torrust_tracker_test_helpers::configuration; use super::{sample_peer_using_ipv4, sample_peer_using_ipv6}; - use crate::core::services::{initialize_database, initialize_whitelist}; + use crate::bootstrap::app::initialize_tracker_dependencies; use crate::core::{statistics, PeersWanted, Tracker}; use crate::servers::http::v1::services::announce::invoke; use crate::servers::http::v1::services::announce::tests::{public_tracker, sample_info_hash, sample_peer}; @@ -118,9 +118,7 @@ mod tests { fn test_tracker_factory(stats_event_sender: Option>) -> Tracker { let config = configuration::ephemeral(); - let database = initialize_database(&config); - - let whitelist_manager = initialize_whitelist(database.clone()); + let (database, whitelist_manager) = initialize_tracker_dependencies(&config); Tracker::new( &config.core, diff --git a/src/servers/http/v1/services/scrape.rs b/src/servers/http/v1/services/scrape.rs index ea2712b6e..0a96031a0 100644 --- a/src/servers/http/v1/services/scrape.rs +++ b/src/servers/http/v1/services/scrape.rs @@ -67,13 +67,13 @@ mod tests { use torrust_tracker_primitives::{peer, DurationSinceUnixEpoch}; use torrust_tracker_test_helpers::configuration; - use crate::core::services::{initialize_database, initialize_whitelist, tracker_factory}; + use crate::bootstrap::app::initialize_tracker_dependencies; + use crate::core::services::tracker_factory; use crate::core::{statistics, Tracker}; fn public_tracker() -> Tracker { let config = configuration::ephemeral_public(); - let database = initialize_database(&config); - let whitelist_manager = initialize_whitelist(database.clone()); + let (database, whitelist_manager) = initialize_tracker_dependencies(&config); tracker_factory(&config, &database, &whitelist_manager) } @@ -100,9 +100,7 @@ mod tests { fn test_tracker_factory(stats_event_sender: Option>) -> Tracker { let config = configuration::ephemeral(); - let database = initialize_database(&config); - - let whitelist_manager = initialize_whitelist(database.clone()); + let (database, whitelist_manager) = initialize_tracker_dependencies(&config); Tracker::new( &config.core, diff --git a/src/servers/udp/handlers.rs b/src/servers/udp/handlers.rs index 6110af530..292ccfd3a 100644 --- a/src/servers/udp/handlers.rs +++ b/src/servers/udp/handlers.rs @@ -435,7 +435,8 @@ mod tests { use torrust_tracker_test_helpers::configuration; use super::gen_remote_fingerprint; - use crate::core::services::{initialize_database, initialize_whitelist, tracker_factory}; + use crate::bootstrap::app::initialize_tracker_dependencies; + use crate::core::services::tracker_factory; use crate::core::{statistics, Tracker}; use crate::CurrentClock; @@ -455,10 +456,9 @@ mod tests { initialized_tracker(&configuration::ephemeral_listed()) } - fn initialized_tracker(configuration: &Configuration) -> Arc { - let database = initialize_database(configuration); - let whitelist_manager = initialize_whitelist(database.clone()); - tracker_factory(configuration, &database, &whitelist_manager).into() + fn initialized_tracker(config: &Configuration) -> Arc { + let (database, whitelist_manager) = initialize_tracker_dependencies(config); + tracker_factory(config, &database, &whitelist_manager).into() } fn sample_ipv4_remote_addr() -> SocketAddr { @@ -558,9 +558,7 @@ mod tests { fn test_tracker_factory(stats_event_sender: Option>) -> Tracker { let config = tracker_configuration(); - let database = initialize_database(&config); - - let whitelist_manager = initialize_whitelist(database.clone()); + let (database, whitelist_manager) = initialize_tracker_dependencies(&config); Tracker::new( &config.core, @@ -1190,8 +1188,8 @@ mod tests { use aquatic_udp_protocol::{InfoHash as AquaticInfoHash, PeerId as AquaticPeerId}; + use crate::bootstrap::app::initialize_tracker_dependencies; use crate::core; - use crate::core::services::{initialize_database, initialize_whitelist}; use crate::core::statistics::keeper::Keeper; use crate::servers::udp::connection_cookie::make; use crate::servers::udp::handlers::handle_announce; @@ -1202,14 +1200,13 @@ mod tests { #[tokio::test] async fn the_peer_ip_should_be_changed_to_the_external_ip_in_the_tracker_configuration() { - let configuration = Arc::new(TrackerConfigurationBuilder::default().with_external_ip("::126.0.0.1").into()); - let database = initialize_database(&configuration); - let whitelist_manager = initialize_whitelist(database.clone()); + let config = Arc::new(TrackerConfigurationBuilder::default().with_external_ip("::126.0.0.1").into()); + let (database, whitelist_manager) = initialize_tracker_dependencies(&config); let (stats_event_sender, stats_repository) = Keeper::new_active_instance(); let tracker = Arc::new( core::Tracker::new( - &configuration.core, + &config.core, &database, &whitelist_manager, Some(stats_event_sender), From 57455cabc88556b432a9eaf15f360ca8f428d0f2 Mon Sep 17 00:00:00 2001 From: Jose Celano Date: Wed, 15 Jan 2025 17:51:12 +0000 Subject: [PATCH 095/802] refactor: [#1182] remove whitelist context methods from core tracker --- src/app.rs | 1 + src/core/mod.rs | 95 ++++++------------- tests/servers/api/environment.rs | 8 ++ .../api/v1/contract/context/whitelist.rs | 16 ++-- tests/servers/http/environment.rs | 7 ++ tests/servers/http/v1/contract.rs | 4 +- 6 files changed, 55 insertions(+), 76 deletions(-) diff --git a/src/app.rs b/src/app.rs index abfe75256..1cfc57c2e 100644 --- a/src/app.rs +++ b/src/app.rs @@ -67,6 +67,7 @@ pub async fn start( // Load whitelisted torrents if tracker.is_listed() { tracker + .whitelist_manager .load_whitelist_from_database() .await .expect("Could not load whitelist from database."); diff --git a/src/core/mod.rs b/src/core/mod.rs index c8bb7f6a9..f142fa26e 100644 --- a/src/core/mod.rs +++ b/src/core/mod.rs @@ -1044,7 +1044,7 @@ impl Tracker { return Ok(()); } - if self.is_info_hash_whitelisted(info_hash).await { + if self.whitelist_manager.is_info_hash_whitelisted(info_hash).await { return Ok(()); } @@ -1054,48 +1054,6 @@ impl Tracker { }) } - /// It adds a torrent to the whitelist. - /// Adding torrents is not relevant to public trackers. - /// - /// # Context: Whitelist - /// - /// # Errors - /// - /// Will return a `database::Error` if unable to add the `info_hash` into the whitelist database. - pub async fn add_torrent_to_whitelist(&self, info_hash: &InfoHash) -> Result<(), databases::error::Error> { - self.whitelist_manager.add_torrent_to_whitelist(info_hash).await - } - - /// It removes a torrent from the whitelist. - /// Removing torrents is not relevant to public trackers. - /// - /// # Context: Whitelist - /// - /// # Errors - /// - /// Will return a `database::Error` if unable to remove the `info_hash` from the whitelist database. - pub async fn remove_torrent_from_whitelist(&self, info_hash: &InfoHash) -> Result<(), databases::error::Error> { - self.whitelist_manager.remove_torrent_from_whitelist(info_hash).await - } - - /// It checks if a torrent is whitelisted. - /// - /// # Context: Whitelist - pub async fn is_info_hash_whitelisted(&self, info_hash: &InfoHash) -> bool { - self.whitelist_manager.is_info_hash_whitelisted(info_hash).await - } - - /// It loads the whitelist from the database. - /// - /// # Context: Whitelist - /// - /// # Errors - /// - /// Will return a `database::Error` if unable to load the list whitelisted `info_hash`s from the database. - pub async fn load_whitelist_from_database(&self) -> Result<(), databases::error::Error> { - self.whitelist_manager.load_whitelist_from_database().await - } - /// It return the `Tracker` [`statistics::metrics::Metrics`]. /// /// # Context: Statistics @@ -1156,6 +1114,7 @@ mod tests { use crate::bootstrap::app::initialize_tracker_dependencies; use crate::core::peer::Peer; use crate::core::services::tracker_factory; + use crate::core::whitelist::WhiteListManager; use crate::core::{TorrentsMetrics, Tracker}; fn public_tracker() -> Tracker { @@ -1170,10 +1129,12 @@ mod tests { tracker_factory(&config, &database, &whitelist_manager) } - fn whitelisted_tracker() -> Tracker { + fn whitelisted_tracker() -> (Tracker, Arc) { let config = configuration::ephemeral_listed(); let (database, whitelist_manager) = initialize_tracker_dependencies(&config); - tracker_factory(&config, &database, &whitelist_manager) + let tracker = tracker_factory(&config, &database, &whitelist_manager); + + (tracker, whitelist_manager) } pub fn tracker_persisting_torrents_in_database() -> Tracker { @@ -1707,11 +1668,11 @@ mod tests { #[tokio::test] async fn it_should_authorize_the_announce_and_scrape_actions_on_whitelisted_torrents() { - let tracker = whitelisted_tracker(); + let (tracker, whitelist_manager) = whitelisted_tracker(); let info_hash = sample_info_hash(); - let result = tracker.add_torrent_to_whitelist(&info_hash).await; + let result = whitelist_manager.add_torrent_to_whitelist(&info_hash).await; assert!(result.is_ok()); let result = tracker.authorize(&info_hash).await; @@ -1720,7 +1681,7 @@ mod tests { #[tokio::test] async fn it_should_not_authorize_the_announce_and_scrape_actions_on_not_whitelisted_torrents() { - let tracker = whitelisted_tracker(); + let (tracker, _whitelist_manager) = whitelisted_tracker(); let info_hash = sample_info_hash(); @@ -1732,28 +1693,33 @@ mod tests { mod handling_the_torrent_whitelist { use crate::core::tests::the_tracker::{sample_info_hash, whitelisted_tracker}; + // todo: after extracting the WhitelistManager from the Tracker, + // there is no need to use the tracker to test the whitelist. + // Test not using the `tracker` (`_tracker` variable) should be + // moved to the whitelist module. + #[tokio::test] async fn it_should_add_a_torrent_to_the_whitelist() { - let tracker = whitelisted_tracker(); + let (_tracker, whitelist_manager) = whitelisted_tracker(); let info_hash = sample_info_hash(); - tracker.add_torrent_to_whitelist(&info_hash).await.unwrap(); + whitelist_manager.add_torrent_to_whitelist(&info_hash).await.unwrap(); - assert!(tracker.is_info_hash_whitelisted(&info_hash).await); + assert!(whitelist_manager.is_info_hash_whitelisted(&info_hash).await); } #[tokio::test] async fn it_should_remove_a_torrent_from_the_whitelist() { - let tracker = whitelisted_tracker(); + let (_tracker, whitelist_manager) = whitelisted_tracker(); let info_hash = sample_info_hash(); - tracker.add_torrent_to_whitelist(&info_hash).await.unwrap(); + whitelist_manager.add_torrent_to_whitelist(&info_hash).await.unwrap(); - tracker.remove_torrent_from_whitelist(&info_hash).await.unwrap(); + whitelist_manager.remove_torrent_from_whitelist(&info_hash).await.unwrap(); - assert!(!tracker.is_info_hash_whitelisted(&info_hash).await); + assert!(!whitelist_manager.is_info_hash_whitelisted(&info_hash).await); } mod persistence { @@ -1761,22 +1727,19 @@ mod tests { #[tokio::test] async fn it_should_load_the_whitelist_from_the_database() { - let tracker = whitelisted_tracker(); + let (_tracker, whitelist_manager) = whitelisted_tracker(); let info_hash = sample_info_hash(); - tracker.add_torrent_to_whitelist(&info_hash).await.unwrap(); + whitelist_manager.add_torrent_to_whitelist(&info_hash).await.unwrap(); + + whitelist_manager.remove_torrent_from_memory_whitelist(&info_hash).await; - // Remove torrent from the in-memory whitelist - tracker - .whitelist_manager - .remove_torrent_from_memory_whitelist(&info_hash) - .await; - assert!(!tracker.is_info_hash_whitelisted(&info_hash).await); + assert!(!whitelist_manager.is_info_hash_whitelisted(&info_hash).await); - tracker.load_whitelist_from_database().await.unwrap(); + whitelist_manager.load_whitelist_from_database().await.unwrap(); - assert!(tracker.is_info_hash_whitelisted(&info_hash).await); + assert!(whitelist_manager.is_info_hash_whitelisted(&info_hash).await); } } } @@ -1807,7 +1770,7 @@ mod tests { #[tokio::test] async fn it_should_return_the_zeroed_swarm_metadata_for_the_requested_file_if_it_is_not_whitelisted() { - let tracker = whitelisted_tracker(); + let (tracker, _whitelist_manager) = whitelisted_tracker(); let info_hash = "3b245504cf5f11bbdbe1201cea6a6bf45aee1bc0".parse::().unwrap(); diff --git a/tests/servers/api/environment.rs b/tests/servers/api/environment.rs index 70f2d4c65..37d031e1c 100644 --- a/tests/servers/api/environment.rs +++ b/tests/servers/api/environment.rs @@ -8,6 +8,7 @@ use torrust_tracker_api_client::connection_info::{ConnectionInfo, Origin}; use torrust_tracker_configuration::{Configuration, HttpApi}; use torrust_tracker_lib::bootstrap::app::initialize_with_configuration; use torrust_tracker_lib::bootstrap::jobs::make_rust_tls; +use torrust_tracker_lib::core::whitelist::WhiteListManager; use torrust_tracker_lib::core::Tracker; use torrust_tracker_lib::servers::apis::server::{ApiServer, Launcher, Running, Stopped}; use torrust_tracker_lib::servers::registar::Registar; @@ -21,6 +22,7 @@ where { pub config: Arc, pub tracker: Arc, + pub whitelist_manager: Arc, pub ban_service: Arc>, pub registar: Registar, pub server: ApiServer, @@ -40,6 +42,9 @@ impl Environment { pub fn new(configuration: &Arc) -> Self { let tracker = initialize_with_configuration(configuration); + // todo: get from `initialize_with_configuration` + let whitelist_manager = tracker.whitelist_manager.clone(); + let ban_service = Arc::new(RwLock::new(BanService::new(MAX_CONNECTION_ID_ERRORS_PER_IP))); let config = Arc::new(configuration.http_api.clone().expect("missing API configuration")); @@ -53,6 +58,7 @@ impl Environment { Self { config, tracker, + whitelist_manager, ban_service, registar: Registar::default(), server, @@ -65,6 +71,7 @@ impl Environment { Environment { config: self.config, tracker: self.tracker.clone(), + whitelist_manager: self.whitelist_manager.clone(), ban_service: self.ban_service.clone(), registar: self.registar.clone(), server: self @@ -85,6 +92,7 @@ impl Environment { Environment { config: self.config, tracker: self.tracker, + whitelist_manager: self.whitelist_manager, ban_service: self.ban_service, registar: Registar::default(), server: self.server.stop().await.unwrap(), diff --git a/tests/servers/api/v1/contract/context/whitelist.rs b/tests/servers/api/v1/contract/context/whitelist.rs index 6dde663a5..aef1db4f1 100644 --- a/tests/servers/api/v1/contract/context/whitelist.rs +++ b/tests/servers/api/v1/contract/context/whitelist.rs @@ -31,7 +31,7 @@ async fn should_allow_whitelisting_a_torrent() { assert_ok(response).await; assert!( - env.tracker + env.whitelist_manager .is_info_hash_whitelisted(&InfoHash::from_str(&info_hash).unwrap()) .await ); @@ -167,7 +167,7 @@ async fn should_allow_removing_a_torrent_from_the_whitelist() { let hash = "9e0217d0fa71c87332cd8bf9dbeabcb2c2cf3c4d".to_owned(); let info_hash = InfoHash::from_str(&hash).unwrap(); - env.tracker.add_torrent_to_whitelist(&info_hash).await.unwrap(); + env.whitelist_manager.add_torrent_to_whitelist(&info_hash).await.unwrap(); let request_id = Uuid::new_v4(); @@ -176,7 +176,7 @@ async fn should_allow_removing_a_torrent_from_the_whitelist() { .await; assert_ok(response).await; - assert!(!env.tracker.is_info_hash_whitelisted(&info_hash).await); + assert!(!env.whitelist_manager.is_info_hash_whitelisted(&info_hash).await); env.stop().await; } @@ -237,7 +237,7 @@ async fn should_fail_when_the_torrent_cannot_be_removed_from_the_whitelist() { let hash = "9e0217d0fa71c87332cd8bf9dbeabcb2c2cf3c4d".to_owned(); let info_hash = InfoHash::from_str(&hash).unwrap(); - env.tracker.add_torrent_to_whitelist(&info_hash).await.unwrap(); + env.whitelist_manager.add_torrent_to_whitelist(&info_hash).await.unwrap(); force_database_error(&env.tracker); @@ -266,7 +266,7 @@ async fn should_not_allow_removing_a_torrent_from_the_whitelist_for_unauthentica let hash = "9e0217d0fa71c87332cd8bf9dbeabcb2c2cf3c4d".to_owned(); let info_hash = InfoHash::from_str(&hash).unwrap(); - env.tracker.add_torrent_to_whitelist(&info_hash).await.unwrap(); + env.whitelist_manager.add_torrent_to_whitelist(&info_hash).await.unwrap(); let request_id = Uuid::new_v4(); @@ -281,7 +281,7 @@ async fn should_not_allow_removing_a_torrent_from_the_whitelist_for_unauthentica "Expected logs to contain: ERROR ... API ... request_id={request_id}" ); - env.tracker.add_torrent_to_whitelist(&info_hash).await.unwrap(); + env.whitelist_manager.add_torrent_to_whitelist(&info_hash).await.unwrap(); let request_id = Uuid::new_v4(); @@ -307,7 +307,7 @@ async fn should_allow_reload_the_whitelist_from_the_database() { let hash = "9e0217d0fa71c87332cd8bf9dbeabcb2c2cf3c4d".to_owned(); let info_hash = InfoHash::from_str(&hash).unwrap(); - env.tracker.add_torrent_to_whitelist(&info_hash).await.unwrap(); + env.whitelist_manager.add_torrent_to_whitelist(&info_hash).await.unwrap(); let request_id = Uuid::new_v4(); @@ -338,7 +338,7 @@ async fn should_fail_when_the_whitelist_cannot_be_reloaded_from_the_database() { let hash = "9e0217d0fa71c87332cd8bf9dbeabcb2c2cf3c4d".to_owned(); let info_hash = InfoHash::from_str(&hash).unwrap(); - env.tracker.add_torrent_to_whitelist(&info_hash).await.unwrap(); + env.whitelist_manager.add_torrent_to_whitelist(&info_hash).await.unwrap(); force_database_error(&env.tracker); diff --git a/tests/servers/http/environment.rs b/tests/servers/http/environment.rs index d615d7eaf..6d4001e6c 100644 --- a/tests/servers/http/environment.rs +++ b/tests/servers/http/environment.rs @@ -5,6 +5,7 @@ use futures::executor::block_on; use torrust_tracker_configuration::{Configuration, HttpTracker}; use torrust_tracker_lib::bootstrap::app::initialize_with_configuration; use torrust_tracker_lib::bootstrap::jobs::make_rust_tls; +use torrust_tracker_lib::core::whitelist::WhiteListManager; use torrust_tracker_lib::core::Tracker; use torrust_tracker_lib::servers::http::server::{HttpServer, Launcher, Running, Stopped}; use torrust_tracker_lib::servers::registar::Registar; @@ -13,6 +14,7 @@ use torrust_tracker_primitives::peer; pub struct Environment { pub config: Arc, pub tracker: Arc, + pub whitelist_manager: Arc, pub registar: Registar, pub server: HttpServer, } @@ -29,6 +31,8 @@ impl Environment { pub fn new(configuration: &Arc) -> Self { let tracker = initialize_with_configuration(configuration); + let whitelist_manager = tracker.whitelist_manager.clone(); + let http_tracker = configuration .http_trackers .clone() @@ -45,6 +49,7 @@ impl Environment { Self { config, tracker, + whitelist_manager, registar: Registar::default(), server, } @@ -55,6 +60,7 @@ impl Environment { Environment { config: self.config, tracker: self.tracker.clone(), + whitelist_manager: self.whitelist_manager.clone(), registar: self.registar.clone(), server: self.server.start(self.tracker, self.registar.give_form()).await.unwrap(), } @@ -70,6 +76,7 @@ impl Environment { Environment { config: self.config, tracker: self.tracker, + whitelist_manager: self.whitelist_manager, registar: Registar::default(), server: self.server.stop().await.unwrap(), diff --git a/tests/servers/http/v1/contract.rs b/tests/servers/http/v1/contract.rs index db03f526e..37d0288f4 100644 --- a/tests/servers/http/v1/contract.rs +++ b/tests/servers/http/v1/contract.rs @@ -1261,7 +1261,7 @@ mod configured_as_whitelisted { let info_hash = InfoHash::from_str("9c38422213e30bff212b30c360d26f9a02136422").unwrap(); - env.tracker + env.whitelist_manager .add_torrent_to_whitelist(&info_hash) .await .expect("should add the torrent to the whitelist"); @@ -1343,7 +1343,7 @@ mod configured_as_whitelisted { .build(), ); - env.tracker + env.whitelist_manager .add_torrent_to_whitelist(&info_hash) .await .expect("should add the torrent to the whitelist"); From 4c2d61e6d4160a1b4309af3ee05de774e2308751 Mon Sep 17 00:00:00 2001 From: Jose Celano Date: Thu, 16 Jan 2025 09:36:48 +0000 Subject: [PATCH 096/802] chore(deps): udpate dependencies ```output cargo update Updating crates.io index Locking 36 packages to latest compatible versions Updating anstyle-wincon v3.0.6 -> v3.0.7 Updating bitflags v2.6.0 -> v2.8.0 Updating borsh v1.5.3 -> v1.5.4 Updating borsh-derive v1.5.3 -> v1.5.4 Downgrading btoi v0.4.4 -> v0.4.3 Updating cc v1.2.7 -> v1.2.9 Updating clap v4.5.23 -> v4.5.26 Updating clap_builder v4.5.23 -> v4.5.26 Updating clap_derive v4.5.18 -> v4.5.24 Updating event-listener v5.3.1 -> v5.4.0 Updating futures-lite v2.5.0 -> v2.6.0 Updating js-sys v0.3.76 -> v0.3.77 Updating libz-sys v1.1.20 -> v1.1.21 Updating linux-raw-sys v0.4.14 -> v0.4.15 Updating log v0.4.22 -> v0.4.25 Updating miniz_oxide v0.8.2 -> v0.8.3 Updating neli v0.6.4 -> v0.6.5 Updating neli-proc-macros v0.1.3 -> v0.1.4 Updating proc-macro2 v1.0.92 -> v1.0.93 Updating rustix v0.38.42 -> v0.38.43 Updating rustls v0.23.20 -> v0.23.21 Updating security-framework-sys v2.13.0 -> v2.14.0 Updating syn v2.0.95 -> v2.0.96 Updating thiserror v2.0.9 -> v2.0.11 Updating thiserror-impl v2.0.9 -> v2.0.11 Updating tokio v1.42.0 -> v1.43.0 Updating tokio-macros v2.4.0 -> v2.5.0 Updating uuid v1.11.0 -> v1.12.0 Updating wasm-bindgen v0.2.99 -> v0.2.100 Updating wasm-bindgen-backend v0.2.99 -> v0.2.100 Updating wasm-bindgen-futures v0.4.49 -> v0.4.50 Updating wasm-bindgen-macro v0.2.99 -> v0.2.100 Updating wasm-bindgen-macro-support v0.2.99 -> v0.2.100 Updating wasm-bindgen-shared v0.2.99 -> v0.2.100 Updating web-sys v0.3.76 -> v0.3.77 Updating winnow v0.6.22 -> v0.6.24 ``` --- Cargo.lock | 245 +++++++++++++++++++++++++++-------------------------- 1 file changed, 125 insertions(+), 120 deletions(-) diff --git a/Cargo.lock b/Cargo.lock index c097ed80e..7ab861d2b 100644 --- a/Cargo.lock +++ b/Cargo.lock @@ -132,11 +132,12 @@ dependencies = [ [[package]] name = "anstyle-wincon" -version = "3.0.6" +version = "3.0.7" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "2109dbce0e72be3ec00bed26e6a7479ca384ad226efdd66db8fa2e3a38c83125" +checksum = "ca3534e77181a9cc07539ad51f2141fe32f6c3ffd4df76db8ad92346b003ae4e" dependencies = [ "anstyle", + "once_cell", "windows-sys 0.59.0", ] @@ -287,7 +288,7 @@ version = "3.4.0" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "ff6e472cdea888a4bd64f342f09b3f50e1886d32afe8df3d663c01140b811b18" dependencies = [ - "event-listener 5.3.1", + "event-listener 5.4.0", "event-listener-strategy", "pin-project-lite", ] @@ -444,7 +445,7 @@ checksum = "604fde5e028fea851ce1d8570bbdc034bec850d157f7569d10f347d06808c05c" dependencies = [ "proc-macro2", "quote", - "syn 2.0.95", + "syn 2.0.96", ] [[package]] @@ -532,7 +533,7 @@ dependencies = [ "regex", "rustc-hash", "shlex", - "syn 2.0.95", + "syn 2.0.96", ] [[package]] @@ -543,9 +544,9 @@ checksum = "02b4ff8b16e6076c3e14220b39fbc1fabb6737522281a388998046859400895f" [[package]] name = "bitflags" -version = "2.6.0" +version = "2.8.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "b048fb63fd8b5923fc5aa7b340d8e156aec7ec02f0c78fa8a6ddc2613f6f71de" +checksum = "8f68f53c83ab957f72c32642f3868eec03eb974d1fb82e453128456482613d36" [[package]] name = "bittorrent-http-protocol" @@ -558,7 +559,7 @@ dependencies = [ "percent-encoding", "serde", "serde_bencode", - "thiserror 2.0.9", + "thiserror 2.0.11", "torrust-tracker-configuration", "torrust-tracker-contrib-bencode", "torrust-tracker-located-error", @@ -593,7 +594,7 @@ dependencies = [ "serde_bencode", "serde_bytes", "serde_repr", - "thiserror 2.0.9", + "thiserror 2.0.11", "tokio", "torrust-tracker-configuration", "torrust-tracker-located-error", @@ -657,9 +658,9 @@ dependencies = [ [[package]] name = "borsh" -version = "1.5.3" +version = "1.5.4" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "2506947f73ad44e344215ccd6403ac2ae18cd8e046e581a441bf8d199f257f03" +checksum = "9fb65153674e51d3a42c8f27b05b9508cea85edfaade8aa46bc8fc18cecdfef3" dependencies = [ "borsh-derive", "cfg_aliases", @@ -667,15 +668,15 @@ dependencies = [ [[package]] name = "borsh-derive" -version = "1.5.3" +version = "1.5.4" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "c2593a3b8b938bd68373196c9832f516be11fa487ef4ae745eb282e6a56a7244" +checksum = "a396e17ad94059c650db3d253bb6e25927f1eb462eede7e7a153bb6e75dce0a7" dependencies = [ "once_cell", "proc-macro-crate", "proc-macro2", "quote", - "syn 2.0.95", + "syn 2.0.96", ] [[package]] @@ -701,9 +702,9 @@ dependencies = [ [[package]] name = "btoi" -version = "0.4.4" +version = "0.4.3" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "9586aa4bb508d369941af10c87af0ce6f4ea051bb4f21047791b921c45822137" +checksum = "9dd6407f73a9b8b6162d8a2ef999fe6afd7cc15902ebf42c5cd296addf17e0ad" dependencies = [ "num-traits", ] @@ -786,9 +787,9 @@ dependencies = [ [[package]] name = "cc" -version = "1.2.7" +version = "1.2.9" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "a012a0df96dd6d06ba9a1b29d6402d1a5d77c6befd2566afdc26e10603dc93d7" +checksum = "c8293772165d9345bdaaa39b45b2109591e63fe5e6fbc23c6ff930a048aa310b" dependencies = [ "jobserver", "libc", @@ -879,9 +880,9 @@ dependencies = [ [[package]] name = "clap" -version = "4.5.23" +version = "4.5.26" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "3135e7ec2ef7b10c6ed8950f0f792ed96ee093fa088608f1c76e569722700c84" +checksum = "a8eb5e908ef3a6efbe1ed62520fb7287959888c88485abe072543190ecc66783" dependencies = [ "clap_builder", "clap_derive", @@ -889,9 +890,9 @@ dependencies = [ [[package]] name = "clap_builder" -version = "4.5.23" +version = "4.5.26" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "30582fc632330df2bd26877bde0c1f4470d57c582bbc070376afcd04d8cb4838" +checksum = "96b01801b5fc6a0a232407abc821660c9c6d25a1cafc0d4f85f29fb8d9afc121" dependencies = [ "anstream", "anstyle", @@ -901,14 +902,14 @@ dependencies = [ [[package]] name = "clap_derive" -version = "4.5.18" +version = "4.5.24" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "4ac6a0c7b1a9e9a5186361f67dfa1b88213572f427fb9ab038efb2bd8c582dab" +checksum = "54b755194d6389280185988721fffba69495eed5ee9feeee9a599b53db80318c" dependencies = [ "heck", "proc-macro2", "quote", - "syn 2.0.95", + "syn 2.0.96", ] [[package]] @@ -1129,7 +1130,7 @@ dependencies = [ "proc-macro2", "quote", "strsim", - "syn 2.0.95", + "syn 2.0.96", ] [[package]] @@ -1140,7 +1141,7 @@ checksum = "d336a2a514f6ccccaa3e09b02d41d35330c07ddf03a62165fcec10bb561c7806" dependencies = [ "darling_core", "quote", - "syn 2.0.95", + "syn 2.0.96", ] [[package]] @@ -1184,7 +1185,7 @@ checksum = "cb7330aeadfbe296029522e6c40f315320aba36fc43a5b3632f3795348f3bd22" dependencies = [ "proc-macro2", "quote", - "syn 2.0.95", + "syn 2.0.96", "unicode-xid", ] @@ -1196,7 +1197,7 @@ checksum = "65f152f4b8559c4da5d574bafc7af85454d706b4c5fe8b530d508cacbb6807ea" dependencies = [ "proc-macro2", "quote", - "syn 2.0.95", + "syn 2.0.96", ] [[package]] @@ -1217,7 +1218,7 @@ checksum = "97369cbbc041bc366949bc74d34658d6cda5621039731c6310521892a3a20ae0" dependencies = [ "proc-macro2", "quote", - "syn 2.0.95", + "syn 2.0.96", ] [[package]] @@ -1275,9 +1276,9 @@ checksum = "0206175f82b8d6bf6652ff7d71a1e27fd2e4efde587fd368662814d6ec1d9ce0" [[package]] name = "event-listener" -version = "5.3.1" +version = "5.4.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "6032be9bd27023a771701cc49f9f053c751055f71efb2e0ae5c15809093675ba" +checksum = "3492acde4c3fc54c845eaab3eed8bd00c7a7d881f78bfc801e43a93dec1331ae" dependencies = [ "concurrent-queue", "parking", @@ -1290,7 +1291,7 @@ version = "0.5.3" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "3c3e4e0dd3673c1139bf041f3008816d9cf2946bbfac2945c09e523b8d7b05b2" dependencies = [ - "event-listener 5.3.1", + "event-listener 5.4.0", "pin-project-lite", ] @@ -1420,7 +1421,7 @@ checksum = "e99b8b3c28ae0e84b604c75f721c21dc77afb3706076af5e8216d15fd1deaae3" dependencies = [ "frunk_proc_macro_helpers", "quote", - "syn 2.0.95", + "syn 2.0.96", ] [[package]] @@ -1432,7 +1433,7 @@ dependencies = [ "frunk_core", "proc-macro2", "quote", - "syn 2.0.95", + "syn 2.0.96", ] [[package]] @@ -1444,7 +1445,7 @@ dependencies = [ "frunk_core", "frunk_proc_macro_helpers", "quote", - "syn 2.0.95", + "syn 2.0.96", ] [[package]] @@ -1503,9 +1504,9 @@ checksum = "9e5c1b78ca4aae1ac06c48a526a655760685149f0d465d21f37abfe57ce075c6" [[package]] name = "futures-lite" -version = "2.5.0" +version = "2.6.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "cef40d21ae2c515b51041df9ed313ed21e572df340ea58a922a0aefe7e8891a1" +checksum = "f5edaec856126859abb19ed65f39e90fea3a9574b9707f13539acf4abf7eb532" dependencies = [ "fastrand", "futures-core", @@ -1522,7 +1523,7 @@ checksum = "162ee34ebcb7c64a8abebc059ce0fee27c2262618d7b60ed8faf72fef13c3650" dependencies = [ "proc-macro2", "quote", - "syn 2.0.95", + "syn 2.0.96", ] [[package]] @@ -1954,7 +1955,7 @@ checksum = "1ec89e9337638ecdc08744df490b221a7399bf8d164eb52a665454e60e075ad6" dependencies = [ "proc-macro2", "quote", - "syn 2.0.95", + "syn 2.0.96", ] [[package]] @@ -2088,9 +2089,9 @@ dependencies = [ [[package]] name = "js-sys" -version = "0.3.76" +version = "0.3.77" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "6717b6b5b077764fb5966237269cb3c64edddde4b14ce42647430a78ced9e7b7" +checksum = "1cfaf33c695fc6e08064efbc1f72ec937429614f25eef83af942d0e227c3a28f" dependencies = [ "once_cell", "wasm-bindgen", @@ -2146,9 +2147,9 @@ dependencies = [ [[package]] name = "libz-sys" -version = "1.1.20" +version = "1.1.21" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "d2d16453e800a8cf6dd2fc3eb4bc99b786a9b90c663b8559a5b1a041bf89e472" +checksum = "df9b68e50e6e0b26f672573834882eb57759f6db9b3be2ea3c35c91188bb4eaa" dependencies = [ "cc", "pkg-config", @@ -2157,9 +2158,9 @@ dependencies = [ [[package]] name = "linux-raw-sys" -version = "0.4.14" +version = "0.4.15" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "78b3ae25bc7c8c38cec158d1f2757ee79e9b3740fbc7ccf0e59e4b08d793fa89" +checksum = "d26c52dbd32dccf2d10cac7725f8eae5296885fb5703b261f7d0a0739ec807ab" [[package]] name = "litemap" @@ -2191,9 +2192,9 @@ dependencies = [ [[package]] name = "log" -version = "0.4.22" +version = "0.4.25" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "a7a70ba024b9dc04c27ea2f0c0548feb474ec5c54bba33a7f72f873a39d07b24" +checksum = "04cbf5b083de1c7e0222a7a51dbfdba1cbe1c6ab0b15e29fff3f6c077fd9cd9f" dependencies = [ "value-bag", ] @@ -2233,9 +2234,9 @@ checksum = "68354c5c6bd36d73ff3feceb05efa59b6acb7626617f4962be322a825e61f79a" [[package]] name = "miniz_oxide" -version = "0.8.2" +version = "0.8.3" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "4ffbe83022cedc1d264172192511ae958937694cd57ce297164951b8b3568394" +checksum = "b8402cab7aefae129c6977bb0ff1b8fd9a04eb5b51efc50a70bea51cda0c7924" dependencies = [ "adler2", ] @@ -2274,7 +2275,7 @@ dependencies = [ "cfg-if", "proc-macro2", "quote", - "syn 2.0.95", + "syn 2.0.96", ] [[package]] @@ -2324,7 +2325,7 @@ dependencies = [ "proc-macro-error2", "proc-macro2", "quote", - "syn 2.0.95", + "syn 2.0.96", "termcolor", "thiserror 1.0.69", ] @@ -2396,9 +2397,9 @@ dependencies = [ [[package]] name = "neli" -version = "0.6.4" +version = "0.6.5" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "1100229e06604150b3becd61a4965d5c70f3be1759544ea7274166f4be41ef43" +checksum = "93062a0dce6da2517ea35f301dfc88184ce18d3601ec786a727a87bf535deca9" dependencies = [ "byteorder", "libc", @@ -2408,9 +2409,9 @@ dependencies = [ [[package]] name = "neli-proc-macros" -version = "0.1.3" +version = "0.1.4" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "c168194d373b1e134786274020dae7fc5513d565ea2ebb9bc9ff17ffb69106d4" +checksum = "0c8034b7fbb6f9455b2a96c19e6edf8dc9fc34c70449938d8ee3b4df363f61fe" dependencies = [ "either", "proc-macro2", @@ -2523,7 +2524,7 @@ checksum = "a948666b637a0f465e8564c73e89d4dde00d72d4d473cc972f390fc3dcee7d9c" dependencies = [ "proc-macro2", "quote", - "syn 2.0.95", + "syn 2.0.96", ] [[package]] @@ -2599,7 +2600,7 @@ dependencies = [ "proc-macro2", "proc-macro2-diagnostics", "quote", - "syn 2.0.95", + "syn 2.0.96", ] [[package]] @@ -2673,7 +2674,7 @@ checksum = "d56a66c0c55993aa927429d0f8a0abfd74f084e4d9c192cffed01e418d83eefb" dependencies = [ "proc-macro2", "quote", - "syn 2.0.95", + "syn 2.0.96", ] [[package]] @@ -2823,14 +2824,14 @@ dependencies = [ "proc-macro-error-attr2", "proc-macro2", "quote", - "syn 2.0.95", + "syn 2.0.96", ] [[package]] name = "proc-macro2" -version = "1.0.92" +version = "1.0.93" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "37d3544b3f2748c54e147655edb5025752e2303145b5aefb3c3ea2c78b973bb0" +checksum = "60946a68e5f9d28b0dc1c21bb8a97ee7d018a8b322fa57838ba31cc878e22d99" dependencies = [ "unicode-ident", ] @@ -2843,7 +2844,7 @@ checksum = "af066a9c399a26e020ada66a034357a868728e72cd426f3adcd35f80d88d88c8" dependencies = [ "proc-macro2", "quote", - "syn 2.0.95", + "syn 2.0.96", "version_check", "yansi", ] @@ -3153,7 +3154,7 @@ dependencies = [ "regex", "relative-path", "rustc_version", - "syn 2.0.95", + "syn 2.0.96", "unicode-ident", ] @@ -3210,9 +3211,9 @@ dependencies = [ [[package]] name = "rustix" -version = "0.38.42" +version = "0.38.43" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "f93dc38ecbab2eb790ff964bb77fa94faf256fd3e73285fd7ba0903b76bedb85" +checksum = "a78891ee6bf2340288408954ac787aa063d8e8817e9f53abb37c695c6d834ef6" dependencies = [ "bitflags", "errno", @@ -3223,9 +3224,9 @@ dependencies = [ [[package]] name = "rustls" -version = "0.23.20" +version = "0.23.21" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "5065c3f250cbd332cd894be57c40fa52387247659b14a2d6041d121547903b1b" +checksum = "8f287924602bf649d949c63dc8ac8b235fa5387d394020705b80c4eb597ce5b8" dependencies = [ "once_cell", "rustls-pki-types", @@ -3332,9 +3333,9 @@ dependencies = [ [[package]] name = "security-framework-sys" -version = "2.13.0" +version = "2.14.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "1863fd3768cd83c56a7f60faa4dc0d403f1b6df0a38c3c25f44b7894e45370d5" +checksum = "49db231d56a190491cb4aeda9527f1ad45345af50b0851622a7adb8c03b01c32" dependencies = [ "core-foundation-sys", "libc", @@ -3382,7 +3383,7 @@ checksum = "5a9bf7cf98d04a2b28aead066b7496853d4779c9cc183c440dbac457641e19a0" dependencies = [ "proc-macro2", "quote", - "syn 2.0.95", + "syn 2.0.96", ] [[package]] @@ -3429,7 +3430,7 @@ checksum = "6c64451ba24fc7a6a2d60fc75dd9c83c90903b19028d4eff35e88fc1e86564e9" dependencies = [ "proc-macro2", "quote", - "syn 2.0.95", + "syn 2.0.96", ] [[package]] @@ -3480,7 +3481,7 @@ dependencies = [ "darling", "proc-macro2", "quote", - "syn 2.0.95", + "syn 2.0.96", ] [[package]] @@ -3619,9 +3620,9 @@ dependencies = [ [[package]] name = "syn" -version = "2.0.95" +version = "2.0.96" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "46f71c0377baf4ef1cc3e3402ded576dccc315800fbc62dfc7fe04b009773b4a" +checksum = "d5d0adab1ae378d7f53bdebc67a39f1f151407ef230f0ce2883572f5d8985c80" dependencies = [ "proc-macro2", "quote", @@ -3645,7 +3646,7 @@ checksum = "c8af7666ab7b6390ab78131fb5b0fce11d6b7a6951602017c35fa82800708971" dependencies = [ "proc-macro2", "quote", - "syn 2.0.95", + "syn 2.0.96", ] [[package]] @@ -3732,11 +3733,11 @@ dependencies = [ [[package]] name = "thiserror" -version = "2.0.9" +version = "2.0.11" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "f072643fd0190df67a8bab670c20ef5d8737177d6ac6b2e9a236cb096206b2cc" +checksum = "d452f284b73e6d76dd36758a0c8684b1d5be31f92b89d07fd5822175732206fc" dependencies = [ - "thiserror-impl 2.0.9", + "thiserror-impl 2.0.11", ] [[package]] @@ -3747,18 +3748,18 @@ checksum = "4fee6c4efc90059e10f81e6d42c60a18f76588c3d74cb83a0b242a2b6c7504c1" dependencies = [ "proc-macro2", "quote", - "syn 2.0.95", + "syn 2.0.96", ] [[package]] name = "thiserror-impl" -version = "2.0.9" +version = "2.0.11" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "7b50fa271071aae2e6ee85f842e2e28ba8cd2c5fb67f11fcb1fd70b276f9e7d4" +checksum = "26afc1baea8a989337eeb52b6e72a039780ce45c3edfcc9c5b9d112feeb173c2" dependencies = [ "proc-macro2", "quote", - "syn 2.0.95", + "syn 2.0.96", ] [[package]] @@ -3839,9 +3840,9 @@ checksum = "1f3ccbac311fea05f86f61904b462b55fb3df8837a366dfc601a0161d0532f20" [[package]] name = "tokio" -version = "1.42.0" +version = "1.43.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "5cec9b21b0450273377fc97bd4c33a8acffc8c996c987a7c5b319a0083707551" +checksum = "3d61fa4ffa3de412bfea335c6ecff681de2b609ba3c77ef3e00e521813a9ed9e" dependencies = [ "backtrace", "bytes", @@ -3856,13 +3857,13 @@ dependencies = [ [[package]] name = "tokio-macros" -version = "2.4.0" +version = "2.5.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "693d596312e88961bc67d7f1f97af8a70227d9f90c31bba5806eec004978d752" +checksum = "6e06d43f1345a3bcd39f6a56dbb7dcab2ba47e68e8ac134855e7e2bdbaf8cab8" dependencies = [ "proc-macro2", "quote", - "syn 2.0.95", + "syn 2.0.96", ] [[package]] @@ -3979,7 +3980,7 @@ dependencies = [ "serde_json", "serde_repr", "serde_with", - "thiserror 2.0.9", + "thiserror 2.0.11", "tokio", "torrust-tracker-api-client", "torrust-tracker-clock", @@ -4004,7 +4005,7 @@ dependencies = [ "hyper", "reqwest", "serde", - "thiserror 2.0.9", + "thiserror 2.0.11", "url", "uuid", ] @@ -4026,7 +4027,7 @@ dependencies = [ "serde_bencode", "serde_bytes", "serde_json", - "thiserror 2.0.9", + "thiserror 2.0.11", "tokio", "torrust-tracker-configuration", "tracing", @@ -4053,7 +4054,7 @@ dependencies = [ "serde", "serde_json", "serde_with", - "thiserror 2.0.9", + "thiserror 2.0.11", "toml", "torrust-tracker-located-error", "url", @@ -4065,14 +4066,14 @@ name = "torrust-tracker-contrib-bencode" version = "3.0.0-develop" dependencies = [ "criterion", - "thiserror 2.0.9", + "thiserror 2.0.11", ] [[package]] name = "torrust-tracker-located-error" version = "3.0.0-develop" dependencies = [ - "thiserror 2.0.9", + "thiserror 2.0.11", "tracing", ] @@ -4087,7 +4088,7 @@ dependencies = [ "serde", "tdyne-peer-id", "tdyne-peer-id-registry", - "thiserror 2.0.9", + "thiserror 2.0.11", "torrust-tracker-configuration", "zerocopy", ] @@ -4204,7 +4205,7 @@ checksum = "395ae124c09f9e6918a2310af6038fba074bcf474ac352496d5910dd59a2226d" dependencies = [ "proc-macro2", "quote", - "syn 2.0.95", + "syn 2.0.96", ] [[package]] @@ -4337,9 +4338,9 @@ checksum = "06abde3611657adf66d383f00b093d7faecc7fa57071cce2578660c9f1010821" [[package]] name = "uuid" -version = "1.11.0" +version = "1.12.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "f8c5f0a0af699448548ad1a2fbf920fb4bee257eae39953ba95cb84891a0446a" +checksum = "744018581f9a3454a9e15beb8a33b017183f1e7c0cd170232a2d1453b23a51c4" dependencies = [ "getrandom", "rand", @@ -4396,34 +4397,35 @@ checksum = "9c8d87e72b64a3b4db28d11ce29237c246188f4f51057d65a7eab63b7987e423" [[package]] name = "wasm-bindgen" -version = "0.2.99" +version = "0.2.100" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "a474f6281d1d70c17ae7aa6a613c87fce69a127e2624002df63dcb39d6cf6396" +checksum = "1edc8929d7499fc4e8f0be2262a241556cfc54a0bea223790e71446f2aab1ef5" dependencies = [ "cfg-if", "once_cell", + "rustversion", "wasm-bindgen-macro", ] [[package]] name = "wasm-bindgen-backend" -version = "0.2.99" +version = "0.2.100" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "5f89bb38646b4f81674e8f5c3fb81b562be1fd936d84320f3264486418519c79" +checksum = "2f0a0651a5c2bc21487bde11ee802ccaf4c51935d0d3d42a6101f98161700bc6" dependencies = [ "bumpalo", "log", "proc-macro2", "quote", - "syn 2.0.95", + "syn 2.0.96", "wasm-bindgen-shared", ] [[package]] name = "wasm-bindgen-futures" -version = "0.4.49" +version = "0.4.50" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "38176d9b44ea84e9184eff0bc34cc167ed044f816accfe5922e54d84cf48eca2" +checksum = "555d470ec0bc3bb57890405e5d4322cc9ea83cebb085523ced7be4144dac1e61" dependencies = [ "cfg-if", "js-sys", @@ -4434,9 +4436,9 @@ dependencies = [ [[package]] name = "wasm-bindgen-macro" -version = "0.2.99" +version = "0.2.100" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "2cc6181fd9a7492eef6fef1f33961e3695e4579b9872a6f7c83aee556666d4fe" +checksum = "7fe63fc6d09ed3792bd0897b314f53de8e16568c2b3f7982f468c0bf9bd0b407" dependencies = [ "quote", "wasm-bindgen-macro-support", @@ -4444,28 +4446,31 @@ dependencies = [ [[package]] name = "wasm-bindgen-macro-support" -version = "0.2.99" +version = "0.2.100" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "30d7a95b763d3c45903ed6c81f156801839e5ee968bb07e534c44df0fcd330c2" +checksum = "8ae87ea40c9f689fc23f209965b6fb8a99ad69aeeb0231408be24920604395de" dependencies = [ "proc-macro2", "quote", - "syn 2.0.95", + "syn 2.0.96", "wasm-bindgen-backend", "wasm-bindgen-shared", ] [[package]] name = "wasm-bindgen-shared" -version = "0.2.99" +version = "0.2.100" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "943aab3fdaaa029a6e0271b35ea10b72b943135afe9bffca82384098ad0e06a6" +checksum = "1a05d73b933a847d6cccdda8f838a22ff101ad9bf93e33684f39c1f5f0eece3d" +dependencies = [ + "unicode-ident", +] [[package]] name = "web-sys" -version = "0.3.76" +version = "0.3.77" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "04dd7223427d52553d3702c004d3b2fe07c148165faa56313cb00211e31c12bc" +checksum = "33b6dd2ef9186f1f2072e409e99cd22a975331a6b3591b12c764e0e55c60d5d2" dependencies = [ "js-sys", "wasm-bindgen", @@ -4625,9 +4630,9 @@ checksum = "589f6da84c646204747d1270a2a5661ea66ed1cced2631d546fdfb155959f9ec" [[package]] name = "winnow" -version = "0.6.22" +version = "0.6.24" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "39281189af81c07ec09db316b302a3e67bf9bd7cbf6c820b50e35fee9c2fa980" +checksum = "c8d71a593cc5c42ad7876e2c1fda56f314f3754c084128833e64f1345ff8a03a" dependencies = [ "memchr", ] @@ -4679,7 +4684,7 @@ checksum = "2380878cad4ac9aac1e2435f3eb4020e8374b5f13c296cb75b4620ff8e229154" dependencies = [ "proc-macro2", "quote", - "syn 2.0.95", + "syn 2.0.96", "synstructure", ] @@ -4701,7 +4706,7 @@ checksum = "fa4f8080344d4671fb4e831a13ad1e68092748387dfc4f55e356242fae12ce3e" dependencies = [ "proc-macro2", "quote", - "syn 2.0.95", + "syn 2.0.96", ] [[package]] @@ -4721,7 +4726,7 @@ checksum = "595eed982f7d355beb85837f651fa22e90b3c044842dc7f2c2842c086f295808" dependencies = [ "proc-macro2", "quote", - "syn 2.0.95", + "syn 2.0.96", "synstructure", ] @@ -4750,7 +4755,7 @@ checksum = "6eafa6dfb17584ea3e2bd6e76e0cc15ad7af12b09abdd1ca55961bed9b1063c6" dependencies = [ "proc-macro2", "quote", - "syn 2.0.95", + "syn 2.0.96", ] [[package]] From c415430fca76341d9571dc61673831274c5f6a8a Mon Sep 17 00:00:00 2001 From: Jose Celano Date: Thu, 16 Jan 2025 17:54:22 +0000 Subject: [PATCH 097/802] refactor: [#1184] inject stats event sender and repository into the core tracker This is part of a big refactor. We are extracting responsabilities from the tracker. The first step is to inject the service into the tracker and later we will use the extracted services directly. Finally we will removed the injected service from the tracker when it's not used anymore via the tracker. --- src/bootstrap/app.rs | 23 ++++++-- src/core/mod.rs | 30 +++++------ src/core/services/mod.rs | 6 ++- src/core/services/statistics/mod.rs | 12 ++++- src/core/services/torrent.rs | 67 +++++++++++++++++++----- src/servers/http/v1/handlers/announce.rs | 16 +++--- src/servers/http/v1/handlers/scrape.rs | 16 +++--- src/servers/http/v1/services/announce.rs | 14 +++-- src/servers/http/v1/services/scrape.rs | 15 ++++-- src/servers/udp/handlers.rs | 23 +++++--- 10 files changed, 151 insertions(+), 71 deletions(-) diff --git a/src/bootstrap/app.rs b/src/bootstrap/app.rs index 788037b0b..2c6c23ab9 100644 --- a/src/bootstrap/app.rs +++ b/src/bootstrap/app.rs @@ -22,7 +22,9 @@ use tracing::instrument; use super::config::initialize_configuration; use crate::bootstrap; use crate::core::databases::Database; -use crate::core::services::{initialize_database, initialize_whitelist, tracker_factory}; +use crate::core::services::{initialize_database, initialize_whitelist, statistics, tracker_factory}; +use crate::core::statistics::event::sender::Sender; +use crate::core::statistics::repository::Repository; use crate::core::whitelist::WhiteListManager; use crate::core::Tracker; use crate::servers::udp::server::banning::BanService; @@ -107,17 +109,28 @@ pub fn initialize_static() { #[must_use] #[instrument(skip(config))] pub fn initialize_tracker(config: &Configuration) -> Tracker { - let (database, whitelist_manager) = initialize_tracker_dependencies(config); + let (database, whitelist_manager, stats_event_sender, stats_repository) = initialize_tracker_dependencies(config); - tracker_factory(config, &database, &whitelist_manager) + tracker_factory(config, &database, &whitelist_manager, &stats_event_sender, &stats_repository) } +#[allow(clippy::type_complexity)] #[must_use] -pub fn initialize_tracker_dependencies(config: &Configuration) -> (Arc>, Arc) { +pub fn initialize_tracker_dependencies( + config: &Configuration, +) -> ( + Arc>, + Arc, + Arc>>, + Arc, +) { let database = initialize_database(config); let whitelist_manager = initialize_whitelist(database.clone()); + let (stats_event_sender, stats_repository) = statistics::setup::factory(config.core.tracker_usage_statistics); + let stats_event_sender = Arc::new(stats_event_sender); + let stats_repository = Arc::new(stats_repository); - (database, whitelist_manager) + (database, whitelist_manager, stats_event_sender, stats_repository) } /// It initializes the log threshold, format and channel. diff --git a/src/core/mod.rs b/src/core/mod.rs index f142fa26e..d61474c2c 100644 --- a/src/core/mod.rs +++ b/src/core/mod.rs @@ -504,10 +504,10 @@ pub struct Tracker { torrents: Arc, /// Service to send stats events. - stats_event_sender: Option>, + stats_event_sender: Arc>>, /// The in-memory stats repo. - stats_repository: statistics::repository::Repository, + stats_repository: Arc, } /// How many peers the peer announcing wants in the announce response. @@ -576,8 +576,8 @@ impl Tracker { config: &Core, database: &Arc>, whitelist_manager: &Arc, - stats_event_sender: Option>, - stats_repository: statistics::repository::Repository, + stats_event_sender: &Arc>>, + stats_repository: &Arc, ) -> Result { Ok(Tracker { config: config.clone(), @@ -585,8 +585,8 @@ impl Tracker { keys: tokio::sync::RwLock::new(std::collections::HashMap::new()), whitelist_manager: whitelist_manager.clone(), torrents: Arc::default(), - stats_event_sender, - stats_repository, + stats_event_sender: stats_event_sender.clone(), + stats_repository: stats_repository.clone(), }) } @@ -1068,7 +1068,7 @@ impl Tracker { &self, event: statistics::event::Event, ) -> Option>> { - match &self.stats_event_sender { + match &*self.stats_event_sender { None => None, Some(stats_event_sender) => stats_event_sender.send_event(event).await, } @@ -1119,20 +1119,20 @@ mod tests { fn public_tracker() -> Tracker { let config = configuration::ephemeral_public(); - let (database, whitelist_manager) = initialize_tracker_dependencies(&config); - tracker_factory(&config, &database, &whitelist_manager) + let (database, whitelist_manager, stats_event_sender, stats_repository) = initialize_tracker_dependencies(&config); + tracker_factory(&config, &database, &whitelist_manager, &stats_event_sender, &stats_repository) } fn private_tracker() -> Tracker { let config = configuration::ephemeral_private(); - let (database, whitelist_manager) = initialize_tracker_dependencies(&config); - tracker_factory(&config, &database, &whitelist_manager) + let (database, whitelist_manager, stats_event_sender, stats_repository) = initialize_tracker_dependencies(&config); + tracker_factory(&config, &database, &whitelist_manager, &stats_event_sender, &stats_repository) } fn whitelisted_tracker() -> (Tracker, Arc) { let config = configuration::ephemeral_listed(); - let (database, whitelist_manager) = initialize_tracker_dependencies(&config); - let tracker = tracker_factory(&config, &database, &whitelist_manager); + let (database, whitelist_manager, stats_event_sender, stats_repository) = initialize_tracker_dependencies(&config); + let tracker = tracker_factory(&config, &database, &whitelist_manager, &stats_event_sender, &stats_repository); (tracker, whitelist_manager) } @@ -1140,8 +1140,8 @@ mod tests { pub fn tracker_persisting_torrents_in_database() -> Tracker { let mut config = configuration::ephemeral_listed(); config.core.tracker_policy.persistent_torrent_completed_stat = true; - let (database, whitelist_manager) = initialize_tracker_dependencies(&config); - tracker_factory(&config, &database, &whitelist_manager) + let (database, whitelist_manager, stats_event_sender, stats_repository) = initialize_tracker_dependencies(&config); + tracker_factory(&config, &database, &whitelist_manager, &stats_event_sender, &stats_repository) } fn sample_info_hash() -> InfoHash { diff --git a/src/core/services/mod.rs b/src/core/services/mod.rs index a6b5e3371..4034925cd 100644 --- a/src/core/services/mod.rs +++ b/src/core/services/mod.rs @@ -14,6 +14,8 @@ use torrust_tracker_configuration::v2_0_0::database; use torrust_tracker_configuration::Configuration; use super::databases::{self, Database}; +use super::statistics::event::sender::Sender; +use super::statistics::repository::Repository; use super::whitelist::persisted::DatabaseWhitelist; use super::whitelist::WhiteListManager; use crate::core::Tracker; @@ -28,8 +30,10 @@ pub fn tracker_factory( config: &Configuration, database: &Arc>, whitelist_manager: &Arc, + stats_event_sender: &Arc>>, + stats_repository: &Arc, ) -> Tracker { - let (stats_event_sender, stats_repository) = statistics::setup::factory(config.core.tracker_usage_statistics); + //let (stats_event_sender, stats_repository) = statistics::setup::factory(config.core.tracker_usage_statistics); match Tracker::new( &Arc::new(config).core, diff --git a/src/core/services/statistics/mod.rs b/src/core/services/statistics/mod.rs index d4e77ce4c..3c44bc310 100644 --- a/src/core/services/statistics/mod.rs +++ b/src/core/services/statistics/mod.rs @@ -126,8 +126,16 @@ mod tests { #[tokio::test] async fn the_statistics_service_should_return_the_tracker_metrics() { let config = tracker_configuration(); - let (database, whitelist_manager) = initialize_tracker_dependencies(&config); - let tracker = Arc::new(tracker_factory(&tracker_configuration(), &database, &whitelist_manager)); + + let (database, whitelist_manager, stats_event_sender, stats_repository) = initialize_tracker_dependencies(&config); + let tracker = Arc::new(tracker_factory( + &config, + &database, + &whitelist_manager, + &stats_event_sender, + &stats_repository, + )); + let ban_service = Arc::new(RwLock::new(BanService::new(MAX_CONNECTION_ID_ERRORS_PER_IP))); let tracker_metrics = get_metrics(tracker.clone(), ban_service.clone()).await; diff --git a/src/core/services/torrent.rs b/src/core/services/torrent.rs index 1be2acc93..a4db67979 100644 --- a/src/core/services/torrent.rs +++ b/src/core/services/torrent.rs @@ -141,8 +141,11 @@ mod tests { #[tokio::test] async fn should_return_none_if_the_tracker_does_not_have_the_torrent() { let config = tracker_configuration(); - let (database, whitelist_manager) = initialize_tracker_dependencies(&config); - let tracker = Arc::new(tracker_factory(&config, &database, &whitelist_manager)); + + let (database, whitelist_manager, stats_event_sender, stats_repository) = initialize_tracker_dependencies(&config); + let tracker = tracker_factory(&config, &database, &whitelist_manager, &stats_event_sender, &stats_repository); + + let tracker = Arc::new(tracker); let torrent_info = get_torrent_info( tracker.clone(), @@ -156,8 +159,14 @@ mod tests { #[tokio::test] async fn should_return_the_torrent_info_if_the_tracker_has_the_torrent() { let config = tracker_configuration(); - let (database, whitelist_manager) = initialize_tracker_dependencies(&config); - let tracker = Arc::new(tracker_factory(&config, &database, &whitelist_manager)); + let (database, whitelist_manager, stats_event_sender, stats_repository) = initialize_tracker_dependencies(&config); + let tracker = Arc::new(tracker_factory( + &config, + &database, + &whitelist_manager, + &stats_event_sender, + &stats_repository, + )); let hash = "9e0217d0fa71c87332cd8bf9dbeabcb2c2cf3c4d".to_owned(); let info_hash = InfoHash::from_str(&hash).unwrap(); @@ -199,8 +208,14 @@ mod tests { #[tokio::test] async fn should_return_an_empty_result_if_the_tracker_does_not_have_any_torrent() { let config = tracker_configuration(); - let (database, whitelist_manager) = initialize_tracker_dependencies(&config); - let tracker = Arc::new(tracker_factory(&config, &database, &whitelist_manager)); + let (database, whitelist_manager, stats_event_sender, stats_repository) = initialize_tracker_dependencies(&config); + let tracker = Arc::new(tracker_factory( + &config, + &database, + &whitelist_manager, + &stats_event_sender, + &stats_repository, + )); let torrents = get_torrents_page(tracker.clone(), Some(&Pagination::default())).await; @@ -210,8 +225,14 @@ mod tests { #[tokio::test] async fn should_return_a_summarized_info_for_all_torrents() { let config = tracker_configuration(); - let (database, whitelist_manager) = initialize_tracker_dependencies(&config); - let tracker = Arc::new(tracker_factory(&config, &database, &whitelist_manager)); + let (database, whitelist_manager, stats_event_sender, stats_repository) = initialize_tracker_dependencies(&config); + let tracker = Arc::new(tracker_factory( + &config, + &database, + &whitelist_manager, + &stats_event_sender, + &stats_repository, + )); let hash = "9e0217d0fa71c87332cd8bf9dbeabcb2c2cf3c4d".to_owned(); let info_hash = InfoHash::from_str(&hash).unwrap(); @@ -234,8 +255,14 @@ mod tests { #[tokio::test] async fn should_allow_limiting_the_number_of_torrents_in_the_result() { let config = tracker_configuration(); - let (database, whitelist_manager) = initialize_tracker_dependencies(&config); - let tracker = Arc::new(tracker_factory(&config, &database, &whitelist_manager)); + let (database, whitelist_manager, stats_event_sender, stats_repository) = initialize_tracker_dependencies(&config); + let tracker = Arc::new(tracker_factory( + &config, + &database, + &whitelist_manager, + &stats_event_sender, + &stats_repository, + )); let hash1 = "9e0217d0fa71c87332cd8bf9dbeabcb2c2cf3c4d".to_owned(); let info_hash1 = InfoHash::from_str(&hash1).unwrap(); @@ -256,8 +283,14 @@ mod tests { #[tokio::test] async fn should_allow_using_pagination_in_the_result() { let config = tracker_configuration(); - let (database, whitelist_manager) = initialize_tracker_dependencies(&config); - let tracker = Arc::new(tracker_factory(&config, &database, &whitelist_manager)); + let (database, whitelist_manager, stats_event_sender, stats_repository) = initialize_tracker_dependencies(&config); + let tracker = Arc::new(tracker_factory( + &config, + &database, + &whitelist_manager, + &stats_event_sender, + &stats_repository, + )); let hash1 = "9e0217d0fa71c87332cd8bf9dbeabcb2c2cf3c4d".to_owned(); let info_hash1 = InfoHash::from_str(&hash1).unwrap(); @@ -287,8 +320,14 @@ mod tests { #[tokio::test] async fn should_return_torrents_ordered_by_info_hash() { let config = tracker_configuration(); - let (database, whitelist_manager) = initialize_tracker_dependencies(&config); - let tracker = Arc::new(tracker_factory(&config, &database, &whitelist_manager)); + let (database, whitelist_manager, stats_event_sender, stats_repository) = initialize_tracker_dependencies(&config); + let tracker = Arc::new(tracker_factory( + &config, + &database, + &whitelist_manager, + &stats_event_sender, + &stats_repository, + )); let hash1 = "9e0217d0fa71c87332cd8bf9dbeabcb2c2cf3c4d".to_owned(); let info_hash1 = InfoHash::from_str(&hash1).unwrap(); diff --git a/src/servers/http/v1/handlers/announce.rs b/src/servers/http/v1/handlers/announce.rs index df4658420..b21f035da 100644 --- a/src/servers/http/v1/handlers/announce.rs +++ b/src/servers/http/v1/handlers/announce.rs @@ -191,26 +191,26 @@ mod tests { fn private_tracker() -> Tracker { let config = configuration::ephemeral_private(); - let (database, whitelist_manager) = initialize_tracker_dependencies(&config); - tracker_factory(&config, &database, &whitelist_manager) + let (database, whitelist_manager, stats_event_sender, stats_repository) = initialize_tracker_dependencies(&config); + tracker_factory(&config, &database, &whitelist_manager, &stats_event_sender, &stats_repository) } fn whitelisted_tracker() -> Tracker { let config = configuration::ephemeral_listed(); - let (database, whitelist_manager) = initialize_tracker_dependencies(&config); - tracker_factory(&config, &database, &whitelist_manager) + let (database, whitelist_manager, stats_event_sender, stats_repository) = initialize_tracker_dependencies(&config); + tracker_factory(&config, &database, &whitelist_manager, &stats_event_sender, &stats_repository) } fn tracker_on_reverse_proxy() -> Tracker { let config = configuration::ephemeral_with_reverse_proxy(); - let (database, whitelist_manager) = initialize_tracker_dependencies(&config); - tracker_factory(&config, &database, &whitelist_manager) + let (database, whitelist_manager, stats_event_sender, stats_repository) = initialize_tracker_dependencies(&config); + tracker_factory(&config, &database, &whitelist_manager, &stats_event_sender, &stats_repository) } fn tracker_not_on_reverse_proxy() -> Tracker { let config = configuration::ephemeral_without_reverse_proxy(); - let (database, whitelist_manager) = initialize_tracker_dependencies(&config); - tracker_factory(&config, &database, &whitelist_manager) + let (database, whitelist_manager, stats_event_sender, stats_repository) = initialize_tracker_dependencies(&config); + tracker_factory(&config, &database, &whitelist_manager, &stats_event_sender, &stats_repository) } fn sample_announce_request() -> Announce { diff --git a/src/servers/http/v1/handlers/scrape.rs b/src/servers/http/v1/handlers/scrape.rs index dd144d898..41afb6bbb 100644 --- a/src/servers/http/v1/handlers/scrape.rs +++ b/src/servers/http/v1/handlers/scrape.rs @@ -127,26 +127,26 @@ mod tests { fn private_tracker() -> Tracker { let config = configuration::ephemeral_private(); - let (database, whitelist_manager) = initialize_tracker_dependencies(&config); - tracker_factory(&config, &database, &whitelist_manager) + let (database, whitelist_manager, stats_event_sender, stats_repository) = initialize_tracker_dependencies(&config); + tracker_factory(&config, &database, &whitelist_manager, &stats_event_sender, &stats_repository) } fn whitelisted_tracker() -> Tracker { let config = configuration::ephemeral_listed(); - let (database, whitelist_manager) = initialize_tracker_dependencies(&config); - tracker_factory(&config, &database, &whitelist_manager) + let (database, whitelist_manager, stats_event_sender, stats_repository) = initialize_tracker_dependencies(&config); + tracker_factory(&config, &database, &whitelist_manager, &stats_event_sender, &stats_repository) } fn tracker_on_reverse_proxy() -> Tracker { let config = configuration::ephemeral_with_reverse_proxy(); - let (database, whitelist_manager) = initialize_tracker_dependencies(&config); - tracker_factory(&config, &database, &whitelist_manager) + let (database, whitelist_manager, stats_event_sender, stats_repository) = initialize_tracker_dependencies(&config); + tracker_factory(&config, &database, &whitelist_manager, &stats_event_sender, &stats_repository) } fn tracker_not_on_reverse_proxy() -> Tracker { let config = configuration::ephemeral_without_reverse_proxy(); - let (database, whitelist_manager) = initialize_tracker_dependencies(&config); - tracker_factory(&config, &database, &whitelist_manager) + let (database, whitelist_manager, stats_event_sender, stats_repository) = initialize_tracker_dependencies(&config); + tracker_factory(&config, &database, &whitelist_manager, &stats_event_sender, &stats_repository) } fn sample_scrape_request() -> Scrape { diff --git a/src/servers/http/v1/services/announce.rs b/src/servers/http/v1/services/announce.rs index f19c69c2f..3a4d4820a 100644 --- a/src/servers/http/v1/services/announce.rs +++ b/src/servers/http/v1/services/announce.rs @@ -65,8 +65,8 @@ mod tests { fn public_tracker() -> Tracker { let config = configuration::ephemeral_public(); - let (database, whitelist_manager) = initialize_tracker_dependencies(&config); - tracker_factory(&config, &database, &whitelist_manager) + let (database, whitelist_manager, stats_event_sender, stats_repository) = initialize_tracker_dependencies(&config); + tracker_factory(&config, &database, &whitelist_manager, &stats_event_sender, &stats_repository) } fn sample_info_hash() -> InfoHash { @@ -118,14 +118,18 @@ mod tests { fn test_tracker_factory(stats_event_sender: Option>) -> Tracker { let config = configuration::ephemeral(); - let (database, whitelist_manager) = initialize_tracker_dependencies(&config); + let (database, whitelist_manager, _stats_event_sender, _stats_repository) = initialize_tracker_dependencies(&config); + + let stats_event_sender = Arc::new(stats_event_sender); + + let stats_repository = Arc::new(statistics::repository::Repository::new()); Tracker::new( &config.core, &database, &whitelist_manager, - stats_event_sender, - statistics::repository::Repository::new(), + &stats_event_sender, + &stats_repository, ) .unwrap() } diff --git a/src/servers/http/v1/services/scrape.rs b/src/servers/http/v1/services/scrape.rs index 0a96031a0..01be81db6 100644 --- a/src/servers/http/v1/services/scrape.rs +++ b/src/servers/http/v1/services/scrape.rs @@ -61,6 +61,7 @@ async fn send_scrape_event(original_peer_ip: &IpAddr, tracker: &Arc) { mod tests { use std::net::{IpAddr, Ipv4Addr, SocketAddr}; + use std::sync::Arc; use aquatic_udp_protocol::{AnnounceEvent, NumberOfBytes, PeerId}; use bittorrent_primitives::info_hash::InfoHash; @@ -73,8 +74,8 @@ mod tests { fn public_tracker() -> Tracker { let config = configuration::ephemeral_public(); - let (database, whitelist_manager) = initialize_tracker_dependencies(&config); - tracker_factory(&config, &database, &whitelist_manager) + let (database, whitelist_manager, stats_event_sender, stats_repository) = initialize_tracker_dependencies(&config); + tracker_factory(&config, &database, &whitelist_manager, &stats_event_sender, &stats_repository) } fn sample_info_hashes() -> Vec { @@ -100,14 +101,18 @@ mod tests { fn test_tracker_factory(stats_event_sender: Option>) -> Tracker { let config = configuration::ephemeral(); - let (database, whitelist_manager) = initialize_tracker_dependencies(&config); + let (database, whitelist_manager, _stats_event_sender, _stats_repository) = initialize_tracker_dependencies(&config); + + let stats_event_sender = Arc::new(stats_event_sender); + + let stats_repository = Arc::new(statistics::repository::Repository::new()); Tracker::new( &config.core, &database, &whitelist_manager, - stats_event_sender, - statistics::repository::Repository::new(), + &stats_event_sender, + &stats_repository, ) .unwrap() } diff --git a/src/servers/udp/handlers.rs b/src/servers/udp/handlers.rs index 292ccfd3a..4b20c2ac5 100644 --- a/src/servers/udp/handlers.rs +++ b/src/servers/udp/handlers.rs @@ -457,8 +457,8 @@ mod tests { } fn initialized_tracker(config: &Configuration) -> Arc { - let (database, whitelist_manager) = initialize_tracker_dependencies(config); - tracker_factory(config, &database, &whitelist_manager).into() + let (database, whitelist_manager, stats_event_sender, stats_repository) = initialize_tracker_dependencies(config); + tracker_factory(config, &database, &whitelist_manager, &stats_event_sender, &stats_repository).into() } fn sample_ipv4_remote_addr() -> SocketAddr { @@ -558,14 +558,18 @@ mod tests { fn test_tracker_factory(stats_event_sender: Option>) -> Tracker { let config = tracker_configuration(); - let (database, whitelist_manager) = initialize_tracker_dependencies(&config); + let (database, whitelist_manager, _stats_event_sender, _stats_repository) = initialize_tracker_dependencies(&config); + + let stats_event_sender = Arc::new(stats_event_sender); + + let stats_repository = Arc::new(statistics::repository::Repository::new()); Tracker::new( &config.core, &database, &whitelist_manager, - stats_event_sender, - statistics::repository::Repository::new(), + &stats_event_sender, + &stats_repository, ) .unwrap() } @@ -1201,7 +1205,10 @@ mod tests { #[tokio::test] async fn the_peer_ip_should_be_changed_to_the_external_ip_in_the_tracker_configuration() { let config = Arc::new(TrackerConfigurationBuilder::default().with_external_ip("::126.0.0.1").into()); - let (database, whitelist_manager) = initialize_tracker_dependencies(&config); + + let (database, whitelist_manager, _stats_event_sender, _stats_repository) = + initialize_tracker_dependencies(&config); + let (stats_event_sender, stats_repository) = Keeper::new_active_instance(); let tracker = Arc::new( @@ -1209,8 +1216,8 @@ mod tests { &config.core, &database, &whitelist_manager, - Some(stats_event_sender), - stats_repository, + &Arc::new(Some(stats_event_sender)), + &Arc::new(stats_repository), ) .unwrap(), ); From 2a0bc4789fb13cd3e69c88d3cabf30642f6fb2d6 Mon Sep 17 00:00:00 2001 From: Jose Celano Date: Fri, 17 Jan 2025 09:43:58 +0000 Subject: [PATCH 098/802] refactor: [#1184] remove stats functionality from core tracker --- src/app.rs | 18 +- src/bootstrap/app.rs | 37 +- src/bootstrap/jobs/http_tracker.rs | 18 +- src/bootstrap/jobs/tracker_apis.rs | 66 ++- src/bootstrap/jobs/udp_tracker.rs | 6 +- src/console/profiling.rs | 4 +- src/core/mod.rs | 49 +- src/core/services/mod.rs | 14 +- src/core/services/statistics/mod.rs | 28 +- src/core/services/torrent.rs | 70 +-- src/main.rs | 4 +- src/servers/apis/routes.rs | 15 +- src/servers/apis/server.rs | 57 ++- src/servers/apis/v1/context/stats/handlers.rs | 6 +- src/servers/apis/v1/context/stats/routes.rs | 13 +- src/servers/apis/v1/routes.rs | 20 +- src/servers/http/server.rs | 30 +- src/servers/http/v1/handlers/announce.rs | 156 ++++-- src/servers/http/v1/handlers/scrape.rs | 137 +++-- src/servers/http/v1/routes.rs | 25 +- src/servers/http/v1/services/announce.rs | 89 ++-- src/servers/http/v1/services/scrape.rs | 102 ++-- src/servers/udp/handlers.rs | 481 ++++++++++++------ src/servers/udp/server/launcher.rs | 50 +- src/servers/udp/server/mod.rs | 25 +- src/servers/udp/server/processor.rs | 45 +- src/servers/udp/server/spawner.rs | 13 +- src/servers/udp/server/states.rs | 16 +- tests/servers/api/environment.rs | 29 +- tests/servers/http/environment.rs | 22 +- tests/servers/http/v1/contract.rs | 16 +- tests/servers/udp/contract.rs | 5 +- tests/servers/udp/environment.rs | 25 +- 33 files changed, 1134 insertions(+), 557 deletions(-) diff --git a/src/app.rs b/src/app.rs index 1cfc57c2e..14dc0b07f 100644 --- a/src/app.rs +++ b/src/app.rs @@ -29,6 +29,8 @@ use torrust_tracker_configuration::Configuration; use tracing::instrument; use crate::bootstrap::jobs::{health_check_api, http_tracker, torrent_cleanup, tracker_apis, udp_tracker}; +use crate::core::statistics::event::sender::Sender; +use crate::core::statistics::repository::Repository; use crate::servers::registar::Registar; use crate::servers::udp::server::banning::BanService; use crate::{core, servers}; @@ -39,11 +41,13 @@ use crate::{core, servers}; /// /// - Can't retrieve tracker keys from database. /// - Can't load whitelist from database. -#[instrument(skip(config, tracker, ban_service))] +#[instrument(skip(config, tracker, ban_service, stats_event_sender, stats_repository))] pub async fn start( config: &Configuration, tracker: Arc, ban_service: Arc>, + stats_event_sender: Arc>>, + stats_repository: Arc, ) -> Vec> { if config.http_api.is_none() && (config.udp_trackers.is_none() || config.udp_trackers.as_ref().map_or(true, std::vec::Vec::is_empty)) @@ -83,7 +87,14 @@ pub async fn start( ); } else { jobs.push( - udp_tracker::start_job(udp_tracker_config, tracker.clone(), ban_service.clone(), registar.give_form()).await, + udp_tracker::start_job( + udp_tracker_config, + tracker.clone(), + stats_event_sender.clone(), + ban_service.clone(), + registar.give_form(), + ) + .await, ); } } @@ -97,6 +108,7 @@ pub async fn start( if let Some(job) = http_tracker::start_job( http_tracker_config, tracker.clone(), + stats_event_sender.clone(), registar.give_form(), servers::http::Version::V1, ) @@ -115,6 +127,8 @@ pub async fn start( http_api_config, tracker.clone(), ban_service.clone(), + stats_event_sender.clone(), + stats_repository.clone(), registar.give_form(), servers::apis::Version::V1, ) diff --git a/src/bootstrap/app.rs b/src/bootstrap/app.rs index 2c6c23ab9..d63b414e1 100644 --- a/src/bootstrap/app.rs +++ b/src/bootstrap/app.rs @@ -38,8 +38,15 @@ use crate::shared::crypto::keys::{self, Keeper as _}; /// /// Setup can file if the configuration is invalid. #[must_use] +#[allow(clippy::type_complexity)] #[instrument(skip())] -pub fn setup() -> (Configuration, Arc, Arc>) { +pub fn setup() -> ( + Configuration, + Arc, + Arc>, + Arc>>, + Arc, +) { #[cfg(not(test))] check_seed(); @@ -49,13 +56,19 @@ pub fn setup() -> (Configuration, Arc, Arc>) { panic!("Configuration error: {e}"); } - let tracker = initialize_with_configuration(&configuration); + // Initialize services + + let (stats_event_sender, stats_repository) = statistics::setup::factory(configuration.core.tracker_usage_statistics); + let stats_event_sender = Arc::new(stats_event_sender); + let stats_repository = Arc::new(stats_repository); let udp_ban_service = Arc::new(RwLock::new(BanService::new(MAX_CONNECTION_ID_ERRORS_PER_IP))); + let tracker = initialize_with_configuration(&configuration); + tracing::info!("Configuration:\n{}", configuration.clone().mask_secrets().to_json()); - (configuration, tracker, udp_ban_service) + (configuration, tracker, udp_ban_service, stats_event_sender, stats_repository) } /// checks if the seed is the instance seed in production. @@ -109,28 +122,18 @@ pub fn initialize_static() { #[must_use] #[instrument(skip(config))] pub fn initialize_tracker(config: &Configuration) -> Tracker { - let (database, whitelist_manager, stats_event_sender, stats_repository) = initialize_tracker_dependencies(config); + let (database, whitelist_manager) = initialize_tracker_dependencies(config); - tracker_factory(config, &database, &whitelist_manager, &stats_event_sender, &stats_repository) + tracker_factory(config, &database, &whitelist_manager) } #[allow(clippy::type_complexity)] #[must_use] -pub fn initialize_tracker_dependencies( - config: &Configuration, -) -> ( - Arc>, - Arc, - Arc>>, - Arc, -) { +pub fn initialize_tracker_dependencies(config: &Configuration) -> (Arc>, Arc) { let database = initialize_database(config); let whitelist_manager = initialize_whitelist(database.clone()); - let (stats_event_sender, stats_repository) = statistics::setup::factory(config.core.tracker_usage_statistics); - let stats_event_sender = Arc::new(stats_event_sender); - let stats_repository = Arc::new(stats_repository); - (database, whitelist_manager, stats_event_sender, stats_repository) + (database, whitelist_manager) } /// It initializes the log threshold, format and channel. diff --git a/src/bootstrap/jobs/http_tracker.rs b/src/bootstrap/jobs/http_tracker.rs index c55723bc6..9135a8828 100644 --- a/src/bootstrap/jobs/http_tracker.rs +++ b/src/bootstrap/jobs/http_tracker.rs @@ -19,7 +19,8 @@ use torrust_tracker_configuration::HttpTracker; use tracing::instrument; use super::make_rust_tls; -use crate::core; +use crate::core::statistics::event::sender::Sender; +use crate::core::{self, statistics}; use crate::servers::http::server::{HttpServer, Launcher}; use crate::servers::http::Version; use crate::servers::registar::ServiceRegistrationForm; @@ -33,10 +34,11 @@ use crate::servers::registar::ServiceRegistrationForm; /// /// It would panic if the `config::HttpTracker` struct would contain inappropriate values. /// -#[instrument(skip(config, tracker, form))] +#[instrument(skip(config, tracker, stats_event_sender, form))] pub async fn start_job( config: &HttpTracker, tracker: Arc, + stats_event_sender: Arc>>, form: ServiceRegistrationForm, version: Version, ) -> Option> { @@ -47,20 +49,21 @@ pub async fn start_job( .map(|tls| tls.expect("it should have a valid http tracker tls configuration")); match version { - Version::V1 => Some(start_v1(socket, tls, tracker.clone(), form).await), + Version::V1 => Some(start_v1(socket, tls, tracker.clone(), stats_event_sender.clone(), form).await), } } #[allow(clippy::async_yields_async)] -#[instrument(skip(socket, tls, tracker, form))] +#[instrument(skip(socket, tls, tracker, stats_event_sender, form))] async fn start_v1( socket: SocketAddr, tls: Option, tracker: Arc, + stats_event_sender: Arc>>, form: ServiceRegistrationForm, ) -> JoinHandle<()> { let server = HttpServer::new(Launcher::new(socket, tls)) - .start(tracker, form) + .start(tracker, stats_event_sender, form) .await .expect("it should be able to start to the http tracker"); @@ -85,6 +88,7 @@ mod tests { use crate::bootstrap::app::initialize_with_configuration; use crate::bootstrap::jobs::http_tracker::start_job; + use crate::core::services::statistics; use crate::servers::http::Version; use crate::servers::registar::Registar; @@ -93,10 +97,12 @@ mod tests { let cfg = Arc::new(ephemeral_public()); let http_tracker = cfg.http_trackers.clone().expect("missing HTTP tracker configuration"); let config = &http_tracker[0]; + let (stats_event_sender, _stats_repository) = statistics::setup::factory(cfg.core.tracker_usage_statistics); + let stats_event_sender = Arc::new(stats_event_sender); let tracker = initialize_with_configuration(&cfg); let version = Version::V1; - start_job(config, tracker, Registar::default().give_form(), version) + start_job(config, tracker, stats_event_sender, Registar::default().give_form(), version) .await .expect("it should be able to join to the http tracker start-job"); } diff --git a/src/bootstrap/jobs/tracker_apis.rs b/src/bootstrap/jobs/tracker_apis.rs index 858888540..d84bb08a9 100644 --- a/src/bootstrap/jobs/tracker_apis.rs +++ b/src/bootstrap/jobs/tracker_apis.rs @@ -31,6 +31,8 @@ use tracing::instrument; use super::make_rust_tls; use crate::core; +use crate::core::statistics::event::sender::Sender; +use crate::core::statistics::repository::Repository; use crate::servers::apis::server::{ApiServer, Launcher}; use crate::servers::apis::Version; use crate::servers::registar::ServiceRegistrationForm; @@ -56,11 +58,13 @@ pub struct ApiServerJobStarted(); /// It would panic if unable to send the `ApiServerJobStarted` notice. /// /// -#[instrument(skip(config, tracker, ban_service, form))] +#[instrument(skip(config, tracker, ban_service, stats_event_sender, stats_repository, form))] pub async fn start_job( config: &HttpApi, tracker: Arc, ban_service: Arc>, + stats_event_sender: Arc>>, + stats_repository: Arc, form: ServiceRegistrationForm, version: Version, ) -> Option> { @@ -73,22 +77,53 @@ pub async fn start_job( let access_tokens = Arc::new(config.access_tokens.clone()); match version { - Version::V1 => Some(start_v1(bind_to, tls, tracker.clone(), ban_service.clone(), form, access_tokens).await), + Version::V1 => Some( + start_v1( + bind_to, + tls, + tracker.clone(), + ban_service.clone(), + stats_event_sender.clone(), + stats_repository.clone(), + form, + access_tokens, + ) + .await, + ), } } #[allow(clippy::async_yields_async)] -#[instrument(skip(socket, tls, tracker, ban_service, form, access_tokens))] +#[allow(clippy::too_many_arguments)] +#[instrument(skip( + socket, + tls, + tracker, + ban_service, + stats_event_sender, + stats_repository, + form, + access_tokens +))] async fn start_v1( socket: SocketAddr, tls: Option, tracker: Arc, ban_service: Arc>, + stats_event_sender: Arc>>, + stats_repository: Arc, form: ServiceRegistrationForm, access_tokens: Arc, ) -> JoinHandle<()> { let server = ApiServer::new(Launcher::new(socket, tls)) - .start(tracker, ban_service, form, access_tokens) + .start( + tracker, + stats_event_sender, + stats_repository, + ban_service, + form, + access_tokens, + ) .await .expect("it should be able to start to the tracker api"); @@ -107,6 +142,7 @@ mod tests { use crate::bootstrap::app::initialize_with_configuration; use crate::bootstrap::jobs::tracker_apis::start_job; + use crate::core::services::statistics; use crate::servers::apis::Version; use crate::servers::registar::Registar; use crate::servers::udp::server::banning::BanService; @@ -116,12 +152,26 @@ mod tests { async fn it_should_start_http_tracker() { let cfg = Arc::new(ephemeral_public()); let config = &cfg.http_api.clone().unwrap(); - let tracker = initialize_with_configuration(&cfg); + let ban_service = Arc::new(RwLock::new(BanService::new(MAX_CONNECTION_ID_ERRORS_PER_IP))); + let (stats_event_sender, stats_repository) = statistics::setup::factory(cfg.core.tracker_usage_statistics); + let stats_event_sender = Arc::new(stats_event_sender); + let stats_repository = Arc::new(stats_repository); + + let tracker = initialize_with_configuration(&cfg); + let version = Version::V1; - start_job(config, tracker, ban_service, Registar::default().give_form(), version) - .await - .expect("it should be able to join to the tracker api start-job"); + start_job( + config, + tracker, + ban_service, + stats_event_sender, + stats_repository, + Registar::default().give_form(), + version, + ) + .await + .expect("it should be able to join to the tracker api start-job"); } } diff --git a/src/bootstrap/jobs/udp_tracker.rs b/src/bootstrap/jobs/udp_tracker.rs index 8948811af..105c7f723 100644 --- a/src/bootstrap/jobs/udp_tracker.rs +++ b/src/bootstrap/jobs/udp_tracker.rs @@ -14,6 +14,7 @@ use torrust_tracker_configuration::UdpTracker; use tracing::instrument; use crate::core; +use crate::core::statistics::event::sender::Sender; use crate::servers::registar::ServiceRegistrationForm; use crate::servers::udp::server::banning::BanService; use crate::servers::udp::server::spawner::Spawner; @@ -31,10 +32,11 @@ use crate::servers::udp::UDP_TRACKER_LOG_TARGET; /// It will panic if the task did not finish successfully. #[must_use] #[allow(clippy::async_yields_async)] -#[instrument(skip(config, tracker, ban_service, form))] +#[instrument(skip(config, tracker, stats_event_sender, ban_service, form))] pub async fn start_job( config: &UdpTracker, tracker: Arc, + stats_event_sender: Arc>>, ban_service: Arc>, form: ServiceRegistrationForm, ) -> JoinHandle<()> { @@ -42,7 +44,7 @@ pub async fn start_job( let cookie_lifetime = config.cookie_lifetime; let server = Server::new(Spawner::new(bind_to)) - .start(tracker, ban_service, form, cookie_lifetime) + .start(tracker, stats_event_sender, ban_service, form, cookie_lifetime) .await .expect("it should be able to start the udp tracker"); diff --git a/src/console/profiling.rs b/src/console/profiling.rs index 1d31af3ce..2f6471906 100644 --- a/src/console/profiling.rs +++ b/src/console/profiling.rs @@ -179,9 +179,9 @@ pub async fn run() { return; }; - let (config, tracker, ban_service) = bootstrap::app::setup(); + let (config, tracker, ban_service, stats_event_sender, stats_repository) = bootstrap::app::setup(); - let jobs = app::start(&config, tracker, ban_service).await; + let jobs = app::start(&config, tracker, ban_service, stats_event_sender, stats_repository).await; // Run the tracker for a fixed duration let run_duration = sleep(Duration::from_secs(duration_secs)); diff --git a/src/core/mod.rs b/src/core/mod.rs index d61474c2c..9aef1b2f2 100644 --- a/src/core/mod.rs +++ b/src/core/mod.rs @@ -422,7 +422,7 @@ //! For example, the HTTP tracker would send an event like the following when it handles an `announce` request received from a peer using IP version 4. //! //! ```text -//! tracker.send_stats_event(statistics::event::Event::Tcp4Announce).await +//! stats_event_sender.send_stats_event(statistics::event::Event::Tcp4Announce).await //! ``` //! //! Refer to [`statistics`] module for more information about statistics. @@ -458,7 +458,6 @@ use std::time::Duration; use auth::PeerKey; use bittorrent_primitives::info_hash::InfoHash; use error::PeerKeyError; -use tokio::sync::mpsc::error::SendError; use torrust_tracker_clock::clock::Time; use torrust_tracker_configuration::{AnnouncePolicy, Core, TORRENT_PEERS_LIMIT}; use torrust_tracker_located_error::Located; @@ -502,12 +501,6 @@ pub struct Tracker { /// The in-memory torrents repository. torrents: Arc, - - /// Service to send stats events. - stats_event_sender: Arc>>, - - /// The in-memory stats repo. - stats_repository: Arc, } /// How many peers the peer announcing wants in the announce response. @@ -576,8 +569,6 @@ impl Tracker { config: &Core, database: &Arc>, whitelist_manager: &Arc, - stats_event_sender: &Arc>>, - stats_repository: &Arc, ) -> Result { Ok(Tracker { config: config.clone(), @@ -585,8 +576,6 @@ impl Tracker { keys: tokio::sync::RwLock::new(std::collections::HashMap::new()), whitelist_manager: whitelist_manager.clone(), torrents: Arc::default(), - stats_event_sender: stats_event_sender.clone(), - stats_repository: stats_repository.clone(), }) } @@ -1054,26 +1043,6 @@ impl Tracker { }) } - /// It return the `Tracker` [`statistics::metrics::Metrics`]. - /// - /// # Context: Statistics - pub async fn get_stats(&self) -> tokio::sync::RwLockReadGuard<'_, statistics::metrics::Metrics> { - self.stats_repository.get_stats().await - } - - /// It allows to send a statistic events which eventually will be used to update [`statistics::metrics::Metrics`]. - /// - /// # Context: Statistics - pub async fn send_stats_event( - &self, - event: statistics::event::Event, - ) -> Option>> { - match &*self.stats_event_sender { - None => None, - Some(stats_event_sender) => stats_event_sender.send_event(event).await, - } - } - /// It drops the database tables. /// /// # Errors @@ -1119,20 +1088,20 @@ mod tests { fn public_tracker() -> Tracker { let config = configuration::ephemeral_public(); - let (database, whitelist_manager, stats_event_sender, stats_repository) = initialize_tracker_dependencies(&config); - tracker_factory(&config, &database, &whitelist_manager, &stats_event_sender, &stats_repository) + let (database, whitelist_manager) = initialize_tracker_dependencies(&config); + tracker_factory(&config, &database, &whitelist_manager) } fn private_tracker() -> Tracker { let config = configuration::ephemeral_private(); - let (database, whitelist_manager, stats_event_sender, stats_repository) = initialize_tracker_dependencies(&config); - tracker_factory(&config, &database, &whitelist_manager, &stats_event_sender, &stats_repository) + let (database, whitelist_manager) = initialize_tracker_dependencies(&config); + tracker_factory(&config, &database, &whitelist_manager) } fn whitelisted_tracker() -> (Tracker, Arc) { let config = configuration::ephemeral_listed(); - let (database, whitelist_manager, stats_event_sender, stats_repository) = initialize_tracker_dependencies(&config); - let tracker = tracker_factory(&config, &database, &whitelist_manager, &stats_event_sender, &stats_repository); + let (database, whitelist_manager) = initialize_tracker_dependencies(&config); + let tracker = tracker_factory(&config, &database, &whitelist_manager); (tracker, whitelist_manager) } @@ -1140,8 +1109,8 @@ mod tests { pub fn tracker_persisting_torrents_in_database() -> Tracker { let mut config = configuration::ephemeral_listed(); config.core.tracker_policy.persistent_torrent_completed_stat = true; - let (database, whitelist_manager, stats_event_sender, stats_repository) = initialize_tracker_dependencies(&config); - tracker_factory(&config, &database, &whitelist_manager, &stats_event_sender, &stats_repository) + let (database, whitelist_manager) = initialize_tracker_dependencies(&config); + tracker_factory(&config, &database, &whitelist_manager) } fn sample_info_hash() -> InfoHash { diff --git a/src/core/services/mod.rs b/src/core/services/mod.rs index 4034925cd..d3336068c 100644 --- a/src/core/services/mod.rs +++ b/src/core/services/mod.rs @@ -14,8 +14,6 @@ use torrust_tracker_configuration::v2_0_0::database; use torrust_tracker_configuration::Configuration; use super::databases::{self, Database}; -use super::statistics::event::sender::Sender; -use super::statistics::repository::Repository; use super::whitelist::persisted::DatabaseWhitelist; use super::whitelist::WhiteListManager; use crate::core::Tracker; @@ -30,18 +28,8 @@ pub fn tracker_factory( config: &Configuration, database: &Arc>, whitelist_manager: &Arc, - stats_event_sender: &Arc>>, - stats_repository: &Arc, ) -> Tracker { - //let (stats_event_sender, stats_repository) = statistics::setup::factory(config.core.tracker_usage_statistics); - - match Tracker::new( - &Arc::new(config).core, - database, - whitelist_manager, - stats_event_sender, - stats_repository, - ) { + match Tracker::new(&Arc::new(config).core, database, whitelist_manager) { Ok(tracker) => tracker, Err(error) => { panic!("{}", error) diff --git a/src/core/services/statistics/mod.rs b/src/core/services/statistics/mod.rs index 3c44bc310..657f3eb06 100644 --- a/src/core/services/statistics/mod.rs +++ b/src/core/services/statistics/mod.rs @@ -10,7 +10,7 @@ //! The factory function builds two structs: //! //! - An statistics event [`Sender`](crate::core::statistics::event::sender::Sender) -//! - An statistics [`Repository`](crate::core::statistics::repository::Repository) +//! - An statistics [`Repository`] //! //! ```text //! let (stats_event_sender, stats_repository) = factory(tracker_usage_statistics); @@ -44,6 +44,7 @@ use tokio::sync::RwLock; use torrust_tracker_primitives::torrent_metrics::TorrentsMetrics; use crate::core::statistics::metrics::Metrics; +use crate::core::statistics::repository::Repository; use crate::core::Tracker; use crate::servers::udp::server::banning::BanService; @@ -62,9 +63,13 @@ pub struct TrackerMetrics { } /// It returns all the [`TrackerMetrics`] -pub async fn get_metrics(tracker: Arc, ban_service: Arc>) -> TrackerMetrics { +pub async fn get_metrics( + tracker: Arc, + ban_service: Arc>, + stats_repository: Arc, +) -> TrackerMetrics { let torrents_metrics = tracker.get_torrents_metrics(); - let stats = tracker.get_stats().await; + let stats = stats_repository.get_stats().await; let udp_banned_ips_total = ban_service.read().await.get_banned_ips_total(); TrackerMetrics { @@ -114,7 +119,7 @@ mod tests { use crate::bootstrap::app::initialize_tracker_dependencies; use crate::core; - use crate::core::services::statistics::{get_metrics, TrackerMetrics}; + use crate::core::services::statistics::{self, get_metrics, TrackerMetrics}; use crate::core::services::tracker_factory; use crate::servers::udp::server::banning::BanService; use crate::servers::udp::server::launcher::MAX_CONNECTION_ID_ERRORS_PER_IP; @@ -127,18 +132,15 @@ mod tests { async fn the_statistics_service_should_return_the_tracker_metrics() { let config = tracker_configuration(); - let (database, whitelist_manager, stats_event_sender, stats_repository) = initialize_tracker_dependencies(&config); - let tracker = Arc::new(tracker_factory( - &config, - &database, - &whitelist_manager, - &stats_event_sender, - &stats_repository, - )); + let (database, whitelist_manager) = initialize_tracker_dependencies(&config); + let (_stats_event_sender, stats_repository) = statistics::setup::factory(config.core.tracker_usage_statistics); + let stats_repository = Arc::new(stats_repository); + + let tracker = Arc::new(tracker_factory(&config, &database, &whitelist_manager)); let ban_service = Arc::new(RwLock::new(BanService::new(MAX_CONNECTION_ID_ERRORS_PER_IP))); - let tracker_metrics = get_metrics(tracker.clone(), ban_service.clone()).await; + let tracker_metrics = get_metrics(tracker.clone(), ban_service.clone(), stats_repository.clone()).await; assert_eq!( tracker_metrics, diff --git a/src/core/services/torrent.rs b/src/core/services/torrent.rs index a4db67979..9a1a2a725 100644 --- a/src/core/services/torrent.rs +++ b/src/core/services/torrent.rs @@ -142,8 +142,8 @@ mod tests { async fn should_return_none_if_the_tracker_does_not_have_the_torrent() { let config = tracker_configuration(); - let (database, whitelist_manager, stats_event_sender, stats_repository) = initialize_tracker_dependencies(&config); - let tracker = tracker_factory(&config, &database, &whitelist_manager, &stats_event_sender, &stats_repository); + let (database, whitelist_manager) = initialize_tracker_dependencies(&config); + let tracker = tracker_factory(&config, &database, &whitelist_manager); let tracker = Arc::new(tracker); @@ -159,14 +159,9 @@ mod tests { #[tokio::test] async fn should_return_the_torrent_info_if_the_tracker_has_the_torrent() { let config = tracker_configuration(); - let (database, whitelist_manager, stats_event_sender, stats_repository) = initialize_tracker_dependencies(&config); - let tracker = Arc::new(tracker_factory( - &config, - &database, - &whitelist_manager, - &stats_event_sender, - &stats_repository, - )); + + let (database, whitelist_manager) = initialize_tracker_dependencies(&config); + let tracker = Arc::new(tracker_factory(&config, &database, &whitelist_manager)); let hash = "9e0217d0fa71c87332cd8bf9dbeabcb2c2cf3c4d".to_owned(); let info_hash = InfoHash::from_str(&hash).unwrap(); @@ -208,14 +203,9 @@ mod tests { #[tokio::test] async fn should_return_an_empty_result_if_the_tracker_does_not_have_any_torrent() { let config = tracker_configuration(); - let (database, whitelist_manager, stats_event_sender, stats_repository) = initialize_tracker_dependencies(&config); - let tracker = Arc::new(tracker_factory( - &config, - &database, - &whitelist_manager, - &stats_event_sender, - &stats_repository, - )); + + let (database, whitelist_manager) = initialize_tracker_dependencies(&config); + let tracker = Arc::new(tracker_factory(&config, &database, &whitelist_manager)); let torrents = get_torrents_page(tracker.clone(), Some(&Pagination::default())).await; @@ -225,14 +215,9 @@ mod tests { #[tokio::test] async fn should_return_a_summarized_info_for_all_torrents() { let config = tracker_configuration(); - let (database, whitelist_manager, stats_event_sender, stats_repository) = initialize_tracker_dependencies(&config); - let tracker = Arc::new(tracker_factory( - &config, - &database, - &whitelist_manager, - &stats_event_sender, - &stats_repository, - )); + + let (database, whitelist_manager) = initialize_tracker_dependencies(&config); + let tracker = Arc::new(tracker_factory(&config, &database, &whitelist_manager)); let hash = "9e0217d0fa71c87332cd8bf9dbeabcb2c2cf3c4d".to_owned(); let info_hash = InfoHash::from_str(&hash).unwrap(); @@ -255,14 +240,9 @@ mod tests { #[tokio::test] async fn should_allow_limiting_the_number_of_torrents_in_the_result() { let config = tracker_configuration(); - let (database, whitelist_manager, stats_event_sender, stats_repository) = initialize_tracker_dependencies(&config); - let tracker = Arc::new(tracker_factory( - &config, - &database, - &whitelist_manager, - &stats_event_sender, - &stats_repository, - )); + + let (database, whitelist_manager) = initialize_tracker_dependencies(&config); + let tracker = Arc::new(tracker_factory(&config, &database, &whitelist_manager)); let hash1 = "9e0217d0fa71c87332cd8bf9dbeabcb2c2cf3c4d".to_owned(); let info_hash1 = InfoHash::from_str(&hash1).unwrap(); @@ -283,14 +263,9 @@ mod tests { #[tokio::test] async fn should_allow_using_pagination_in_the_result() { let config = tracker_configuration(); - let (database, whitelist_manager, stats_event_sender, stats_repository) = initialize_tracker_dependencies(&config); - let tracker = Arc::new(tracker_factory( - &config, - &database, - &whitelist_manager, - &stats_event_sender, - &stats_repository, - )); + + let (database, whitelist_manager) = initialize_tracker_dependencies(&config); + let tracker = Arc::new(tracker_factory(&config, &database, &whitelist_manager)); let hash1 = "9e0217d0fa71c87332cd8bf9dbeabcb2c2cf3c4d".to_owned(); let info_hash1 = InfoHash::from_str(&hash1).unwrap(); @@ -320,14 +295,9 @@ mod tests { #[tokio::test] async fn should_return_torrents_ordered_by_info_hash() { let config = tracker_configuration(); - let (database, whitelist_manager, stats_event_sender, stats_repository) = initialize_tracker_dependencies(&config); - let tracker = Arc::new(tracker_factory( - &config, - &database, - &whitelist_manager, - &stats_event_sender, - &stats_repository, - )); + + let (database, whitelist_manager) = initialize_tracker_dependencies(&config); + let tracker = Arc::new(tracker_factory(&config, &database, &whitelist_manager)); let hash1 = "9e0217d0fa71c87332cd8bf9dbeabcb2c2cf3c4d".to_owned(); let info_hash1 = InfoHash::from_str(&hash1).unwrap(); diff --git a/src/main.rs b/src/main.rs index c93982191..e536124a2 100644 --- a/src/main.rs +++ b/src/main.rs @@ -2,9 +2,9 @@ use torrust_tracker_lib::{app, bootstrap}; #[tokio::main] async fn main() { - let (config, tracker, udp_ban_service) = bootstrap::app::setup(); + let (config, tracker, udp_ban_service, stats_event_sender, stats_repository) = bootstrap::app::setup(); - let jobs = app::start(&config, tracker, udp_ban_service).await; + let jobs = app::start(&config, tracker, udp_ban_service, stats_event_sender, stats_repository).await; // handle the signals tokio::select! { diff --git a/src/servers/apis/routes.rs b/src/servers/apis/routes.rs index 98442ea97..cb3789a06 100644 --- a/src/servers/apis/routes.rs +++ b/src/servers/apis/routes.rs @@ -30,6 +30,8 @@ use tracing::{instrument, Level, Span}; use super::v1; use super::v1::context::health_check::handlers::health_check_handler; use super::v1::middlewares::auth::State; +use crate::core::statistics::event::sender::Sender; +use crate::core::statistics::repository::Repository; use crate::core::Tracker; use crate::servers::apis::API_LOG_TARGET; use crate::servers::logging::Latency; @@ -37,10 +39,12 @@ use crate::servers::udp::server::banning::BanService; /// Add all API routes to the router. #[allow(clippy::needless_pass_by_value)] -#[instrument(skip(tracker, ban_service, access_tokens))] +#[instrument(skip(tracker, ban_service, stats_event_sender, stats_repository, access_tokens))] pub fn router( tracker: Arc, ban_service: Arc>, + stats_event_sender: Arc>>, + stats_repository: Arc, access_tokens: Arc, server_socket_addr: SocketAddr, ) -> Router { @@ -48,7 +52,14 @@ pub fn router( let api_url_prefix = "/api"; - let router = v1::routes::add(api_url_prefix, router, tracker.clone(), ban_service.clone()); + let router = v1::routes::add( + api_url_prefix, + router, + tracker.clone(), + ban_service.clone(), + stats_event_sender.clone(), + stats_repository.clone(), + ); let state = State { access_tokens }; diff --git a/src/servers/apis/server.rs b/src/servers/apis/server.rs index 9d1c77c03..bf1511edb 100644 --- a/src/servers/apis/server.rs +++ b/src/servers/apis/server.rs @@ -39,7 +39,8 @@ use tracing::{instrument, Level}; use super::routes::router; use crate::bootstrap::jobs::Started; -use crate::core::Tracker; +use crate::core::statistics::repository::Repository; +use crate::core::{statistics, Tracker}; use crate::servers::apis::API_LOG_TARGET; use crate::servers::custom_axum_server::{self, TimeoutAcceptor}; use crate::servers::logging::STARTED_ON; @@ -124,10 +125,12 @@ impl ApiServer { /// # Panics /// /// It would panic if the bound socket address cannot be sent back to this starter. - #[instrument(skip(self, tracker, ban_service, form, access_tokens), err, ret(Display, level = Level::INFO))] + #[instrument(skip(self, tracker, stats_event_sender, ban_service, stats_repository, form, access_tokens), err, ret(Display, level = Level::INFO))] pub async fn start( self, tracker: Arc, + stats_event_sender: Arc>>, + stats_repository: Arc, ban_service: Arc>, form: ServiceRegistrationForm, access_tokens: Arc, @@ -140,7 +143,17 @@ impl ApiServer { let task = tokio::spawn(async move { tracing::debug!(target: API_LOG_TARGET, "Starting with launcher in spawned task ..."); - let _task = launcher.start(tracker, ban_service, access_tokens, tx_start, rx_halt).await; + let _task = launcher + .start( + tracker, + ban_service, + stats_event_sender, + stats_repository, + access_tokens, + tx_start, + rx_halt, + ) + .await; tracing::debug!(target: API_LOG_TARGET, "Started with launcher in spawned task"); @@ -238,11 +251,23 @@ impl Launcher { /// /// Will panic if unable to bind to the socket, or unable to get the address of the bound socket. /// Will also panic if unable to send message regarding the bound socket address. - #[instrument(skip(self, tracker, ban_service, access_tokens, tx_start, rx_halt))] + #[allow(clippy::too_many_arguments)] + #[instrument(skip( + self, + tracker, + ban_service, + stats_event_sender, + stats_repository, + access_tokens, + tx_start, + rx_halt + ))] pub fn start( &self, tracker: Arc, ban_service: Arc>, + stats_event_sender: Arc>>, + stats_repository: Arc, access_tokens: Arc, tx_start: Sender, rx_halt: Receiver, @@ -250,7 +275,14 @@ impl Launcher { let socket = std::net::TcpListener::bind(self.bind_to).expect("Could not bind tcp_listener to address."); let address = socket.local_addr().expect("Could not get local_addr from tcp_listener."); - let router = router(tracker, ban_service, access_tokens, address); + let router = router( + tracker, + ban_service, + stats_event_sender, + stats_repository, + access_tokens, + address, + ); let handle = Handle::new(); @@ -303,6 +335,7 @@ mod tests { use crate::bootstrap::app::initialize_with_configuration; use crate::bootstrap::jobs::make_rust_tls; + use crate::core::services::statistics; use crate::servers::apis::server::{ApiServer, Launcher}; use crate::servers::registar::Registar; use crate::servers::udp::server::banning::BanService; @@ -313,8 +346,11 @@ mod tests { let cfg = Arc::new(ephemeral_public()); let config = &cfg.http_api.clone().unwrap(); - let tracker = initialize_with_configuration(&cfg); let ban_service = Arc::new(RwLock::new(BanService::new(MAX_CONNECTION_ID_ERRORS_PER_IP))); + let (stats_event_sender, stats_repository) = statistics::setup::factory(cfg.core.tracker_usage_statistics); + let stats_event_sender = Arc::new(stats_event_sender); + let stats_repository = Arc::new(stats_repository); + let tracker = initialize_with_configuration(&cfg); let bind_to = config.bind_address; @@ -329,7 +365,14 @@ mod tests { let register = &Registar::default(); let started = stopped - .start(tracker, ban_service, register.give_form(), access_tokens) + .start( + tracker, + stats_event_sender, + stats_repository, + ban_service, + register.give_form(), + access_tokens, + ) .await .expect("it should start the server"); let stopped = started.stop().await.expect("it should stop the server"); diff --git a/src/servers/apis/v1/context/stats/handlers.rs b/src/servers/apis/v1/context/stats/handlers.rs index b630c763d..af7e1c239 100644 --- a/src/servers/apis/v1/context/stats/handlers.rs +++ b/src/servers/apis/v1/context/stats/handlers.rs @@ -10,6 +10,7 @@ use tokio::sync::RwLock; use super::responses::{metrics_response, stats_response}; use crate::core::services::statistics::get_metrics; +use crate::core::statistics::repository::Repository; use crate::core::Tracker; use crate::servers::udp::server::banning::BanService; @@ -37,11 +38,12 @@ pub struct QueryParams { /// /// Refer to the [API endpoint documentation](crate::servers::apis::v1::context::stats#get-tracker-statistics) /// for more information about this endpoint. +#[allow(clippy::type_complexity)] pub async fn get_stats_handler( - State(state): State<(Arc, Arc>)>, + State(state): State<(Arc, Arc>, Arc)>, params: Query, ) -> Response { - let metrics = get_metrics(state.0.clone(), state.1.clone()).await; + let metrics = get_metrics(state.0.clone(), state.1.clone(), state.2.clone()).await; match params.0.format { Some(format) => match format { diff --git a/src/servers/apis/v1/context/stats/routes.rs b/src/servers/apis/v1/context/stats/routes.rs index fde1056c3..b5df32963 100644 --- a/src/servers/apis/v1/context/stats/routes.rs +++ b/src/servers/apis/v1/context/stats/routes.rs @@ -10,13 +10,22 @@ use axum::Router; use tokio::sync::RwLock; use super::handlers::get_stats_handler; +use crate::core::statistics::event::sender::Sender; +use crate::core::statistics::repository::Repository; use crate::core::Tracker; use crate::servers::udp::server::banning::BanService; /// It adds the routes to the router for the [`stats`](crate::servers::apis::v1::context::stats) API context. -pub fn add(prefix: &str, router: Router, tracker: Arc, ban_service: Arc>) -> Router { +pub fn add( + prefix: &str, + router: Router, + tracker: Arc, + ban_service: Arc>, + _stats_event_sender: Arc>>, + stats_repository: Arc, +) -> Router { router.route( &format!("{prefix}/stats"), - get(get_stats_handler).with_state((tracker, ban_service)), + get(get_stats_handler).with_state((tracker, ban_service, stats_repository)), ) } diff --git a/src/servers/apis/v1/routes.rs b/src/servers/apis/v1/routes.rs index 4c97c7578..9fbd5da0e 100644 --- a/src/servers/apis/v1/routes.rs +++ b/src/servers/apis/v1/routes.rs @@ -5,15 +5,31 @@ use axum::Router; use tokio::sync::RwLock; use super::context::{auth_key, stats, torrent, whitelist}; +use crate::core::statistics::event::sender::Sender; +use crate::core::statistics::repository::Repository; use crate::core::Tracker; use crate::servers::udp::server::banning::BanService; /// Add the routes for the v1 API. -pub fn add(prefix: &str, router: Router, tracker: Arc, ban_service: Arc>) -> Router { +pub fn add( + prefix: &str, + router: Router, + tracker: Arc, + ban_service: Arc>, + stats_event_sender: Arc>>, + stats_repository: Arc, +) -> Router { let v1_prefix = format!("{prefix}/v1"); let router = auth_key::routes::add(&v1_prefix, router, tracker.clone()); - let router = stats::routes::add(&v1_prefix, router, tracker.clone(), ban_service); + let router = stats::routes::add( + &v1_prefix, + router, + tracker.clone(), + ban_service, + stats_event_sender, + stats_repository, + ); let router = whitelist::routes::add(&v1_prefix, router, &tracker); torrent::routes::add(&v1_prefix, router, tracker) diff --git a/src/servers/http/server.rs b/src/servers/http/server.rs index 560d91681..537fc37fb 100644 --- a/src/servers/http/server.rs +++ b/src/servers/http/server.rs @@ -11,7 +11,7 @@ use tracing::instrument; use super::v1::routes::router; use crate::bootstrap::jobs::Started; -use crate::core::Tracker; +use crate::core::{statistics, Tracker}; use crate::servers::custom_axum_server::{self, TimeoutAcceptor}; use crate::servers::http::HTTP_TRACKER_LOG_TARGET; use crate::servers::logging::STARTED_ON; @@ -42,8 +42,14 @@ pub struct Launcher { } impl Launcher { - #[instrument(skip(self, tracker, tx_start, rx_halt))] - fn start(&self, tracker: Arc, tx_start: Sender, rx_halt: Receiver) -> BoxFuture<'static, ()> { + #[instrument(skip(self, tracker, stats_event_sender, tx_start, rx_halt))] + fn start( + &self, + tracker: Arc, + stats_event_sender: Arc>>, + tx_start: Sender, + rx_halt: Receiver, + ) -> BoxFuture<'static, ()> { let socket = std::net::TcpListener::bind(self.bind_to).expect("Could not bind tcp_listener to address."); let address = socket.local_addr().expect("Could not get local_addr from tcp_listener."); @@ -60,7 +66,7 @@ impl Launcher { tracing::info!(target: HTTP_TRACKER_LOG_TARGET, "Starting on: {protocol}://{}", address); - let app = router(tracker, address); + let app = router(tracker, stats_event_sender, address); let running = Box::pin(async { match tls { @@ -153,14 +159,19 @@ impl HttpServer { /// /// It would panic spawned HTTP server launcher cannot send the bound `SocketAddr` /// back to the main thread. - pub async fn start(self, tracker: Arc, form: ServiceRegistrationForm) -> Result, Error> { + pub async fn start( + self, + tracker: Arc, + stats_event_sender: Arc>>, + form: ServiceRegistrationForm, + ) -> Result, Error> { let (tx_start, rx_start) = tokio::sync::oneshot::channel::(); let (tx_halt, rx_halt) = tokio::sync::oneshot::channel::(); let launcher = self.state.launcher; let task = tokio::spawn(async move { - let server = launcher.start(tracker, tx_start, rx_halt); + let server = launcher.start(tracker, stats_event_sender, tx_start, rx_halt); server.await; @@ -233,13 +244,18 @@ mod tests { use crate::bootstrap::app::initialize_with_configuration; use crate::bootstrap::jobs::make_rust_tls; + use crate::core::services::statistics; use crate::servers::http::server::{HttpServer, Launcher}; use crate::servers::registar::Registar; #[tokio::test] async fn it_should_be_able_to_start_and_stop() { let cfg = Arc::new(ephemeral_public()); + + let (stats_event_sender, _stats_repository) = statistics::setup::factory(cfg.core.tracker_usage_statistics); + let stats_event_sender = Arc::new(stats_event_sender); let tracker = initialize_with_configuration(&cfg); + let http_trackers = cfg.http_trackers.clone().expect("missing HTTP trackers configuration"); let config = &http_trackers[0]; @@ -253,7 +269,7 @@ mod tests { let stopped = HttpServer::new(Launcher::new(bind_to, tls)); let started = stopped - .start(tracker, register.give_form()) + .start(tracker, stats_event_sender, register.give_form()) .await .expect("it should start the server"); let stopped = started.stop().await.expect("it should stop the server"); diff --git a/src/servers/http/v1/handlers/announce.rs b/src/servers/http/v1/handlers/announce.rs index b21f035da..1c8779625 100644 --- a/src/servers/http/v1/handlers/announce.rs +++ b/src/servers/http/v1/handlers/announce.rs @@ -22,6 +22,7 @@ use torrust_tracker_primitives::core::AnnounceData; use torrust_tracker_primitives::peer; use crate::core::auth::Key; +use crate::core::statistics::event::sender::Sender; use crate::core::{PeersWanted, Tracker}; use crate::servers::http::v1::extractors::announce_request::ExtractRequest; use crate::servers::http::v1::extractors::authentication_key::Extract as ExtractKey; @@ -33,28 +34,30 @@ use crate::CurrentClock; /// It handles the `announce` request when the HTTP tracker does not require /// authentication (no PATH `key` parameter required). #[allow(clippy::unused_async)] +#[allow(clippy::type_complexity)] pub async fn handle_without_key( - State(tracker): State>, + State(state): State<(Arc, Arc>>)>, ExtractRequest(announce_request): ExtractRequest, ExtractClientIpSources(client_ip_sources): ExtractClientIpSources, ) -> Response { tracing::debug!("http announce request: {:#?}", announce_request); - handle(&tracker, &announce_request, &client_ip_sources, None).await + handle(&state.0, &state.1, &announce_request, &client_ip_sources, None).await } /// It handles the `announce` request when the HTTP tracker requires /// authentication (PATH `key` parameter required). #[allow(clippy::unused_async)] +#[allow(clippy::type_complexity)] pub async fn handle_with_key( - State(tracker): State>, + State(state): State<(Arc, Arc>>)>, ExtractRequest(announce_request): ExtractRequest, ExtractClientIpSources(client_ip_sources): ExtractClientIpSources, ExtractKey(key): ExtractKey, ) -> Response { tracing::debug!("http announce request: {:#?}", announce_request); - handle(&tracker, &announce_request, &client_ip_sources, Some(key)).await + handle(&state.0, &state.1, &announce_request, &client_ip_sources, Some(key)).await } /// It handles the `announce` request. @@ -63,11 +66,20 @@ pub async fn handle_with_key( /// `unauthenticated` modes. async fn handle( tracker: &Arc, + opt_stats_event_sender: &Arc>>, announce_request: &Announce, client_ip_sources: &ClientIpSources, maybe_key: Option, ) -> Response { - let announce_data = match handle_announce(tracker, announce_request, client_ip_sources, maybe_key).await { + let announce_data = match handle_announce( + tracker, + opt_stats_event_sender, + announce_request, + client_ip_sources, + maybe_key, + ) + .await + { Ok(announce_data) => announce_data, Err(error) => return (StatusCode::OK, error.write()).into_response(), }; @@ -82,6 +94,7 @@ async fn handle( async fn handle_announce( tracker: &Arc, + opt_stats_event_sender: &Arc>>, announce_request: &Announce, client_ip_sources: &ClientIpSources, maybe_key: Option, @@ -118,7 +131,14 @@ async fn handle_announce( None => PeersWanted::All, }; - let announce_data = services::announce::invoke(tracker.clone(), announce_request.info_hash, &mut peer, &peers_wanted).await; + let announce_data = services::announce::invoke( + tracker.clone(), + opt_stats_event_sender.clone(), + announce_request.info_hash, + &mut peer, + &peers_wanted, + ) + .await; Ok(announce_data) } @@ -186,31 +206,44 @@ mod tests { use torrust_tracker_test_helpers::configuration; use crate::bootstrap::app::initialize_tracker_dependencies; - use crate::core::services::tracker_factory; + use crate::core::services::{statistics, tracker_factory}; + use crate::core::statistics::event::sender::Sender; use crate::core::Tracker; - fn private_tracker() -> Tracker { + fn private_tracker() -> (Tracker, Option>) { let config = configuration::ephemeral_private(); - let (database, whitelist_manager, stats_event_sender, stats_repository) = initialize_tracker_dependencies(&config); - tracker_factory(&config, &database, &whitelist_manager, &stats_event_sender, &stats_repository) + + let (database, whitelist_manager) = initialize_tracker_dependencies(&config); + let (stats_event_sender, _stats_repository) = statistics::setup::factory(config.core.tracker_usage_statistics); + + (tracker_factory(&config, &database, &whitelist_manager), stats_event_sender) } - fn whitelisted_tracker() -> Tracker { + fn whitelisted_tracker() -> (Tracker, Option>) { let config = configuration::ephemeral_listed(); - let (database, whitelist_manager, stats_event_sender, stats_repository) = initialize_tracker_dependencies(&config); - tracker_factory(&config, &database, &whitelist_manager, &stats_event_sender, &stats_repository) + + let (database, whitelist_manager) = initialize_tracker_dependencies(&config); + let (stats_event_sender, _stats_repository) = statistics::setup::factory(config.core.tracker_usage_statistics); + + (tracker_factory(&config, &database, &whitelist_manager), stats_event_sender) } - fn tracker_on_reverse_proxy() -> Tracker { + fn tracker_on_reverse_proxy() -> (Tracker, Option>) { let config = configuration::ephemeral_with_reverse_proxy(); - let (database, whitelist_manager, stats_event_sender, stats_repository) = initialize_tracker_dependencies(&config); - tracker_factory(&config, &database, &whitelist_manager, &stats_event_sender, &stats_repository) + + let (database, whitelist_manager) = initialize_tracker_dependencies(&config); + let (stats_event_sender, _stats_repository) = statistics::setup::factory(config.core.tracker_usage_statistics); + + (tracker_factory(&config, &database, &whitelist_manager), stats_event_sender) } - fn tracker_not_on_reverse_proxy() -> Tracker { + fn tracker_not_on_reverse_proxy() -> (Tracker, Option>) { let config = configuration::ephemeral_without_reverse_proxy(); - let (database, whitelist_manager, stats_event_sender, stats_repository) = initialize_tracker_dependencies(&config); - tracker_factory(&config, &database, &whitelist_manager, &stats_event_sender, &stats_repository) + + let (database, whitelist_manager) = initialize_tracker_dependencies(&config); + let (stats_event_sender, _stats_repository) = statistics::setup::factory(config.core.tracker_usage_statistics); + + (tracker_factory(&config, &database, &whitelist_manager), stats_event_sender) } fn sample_announce_request() -> Announce { @@ -253,13 +286,22 @@ mod tests { #[tokio::test] async fn it_should_fail_when_the_authentication_key_is_missing() { - let tracker = Arc::new(private_tracker()); + let (tracker, stats_event_sender) = private_tracker(); + + let tracker = Arc::new(tracker); + let stats_event_sender = Arc::new(stats_event_sender); let maybe_key = None; - let response = handle_announce(&tracker, &sample_announce_request(), &sample_client_ip_sources(), maybe_key) - .await - .unwrap_err(); + let response = handle_announce( + &tracker, + &stats_event_sender, + &sample_announce_request(), + &sample_client_ip_sources(), + maybe_key, + ) + .await + .unwrap_err(); assert_error_response( &response, @@ -269,15 +311,24 @@ mod tests { #[tokio::test] async fn it_should_fail_when_the_authentication_key_is_invalid() { - let tracker = Arc::new(private_tracker()); + let (tracker, stats_event_sender) = private_tracker(); + + let tracker = Arc::new(tracker); + let stats_event_sender = Arc::new(stats_event_sender); let unregistered_key = auth::Key::from_str("YZSl4lMZupRuOpSRC3krIKR5BPB14nrJ").unwrap(); let maybe_key = Some(unregistered_key); - let response = handle_announce(&tracker, &sample_announce_request(), &sample_client_ip_sources(), maybe_key) - .await - .unwrap_err(); + let response = handle_announce( + &tracker, + &stats_event_sender, + &sample_announce_request(), + &sample_client_ip_sources(), + maybe_key, + ) + .await + .unwrap_err(); assert_error_response(&response, "Authentication error: Failed to read key"); } @@ -293,13 +344,22 @@ mod tests { #[tokio::test] async fn it_should_fail_when_the_announced_torrent_is_not_whitelisted() { - let tracker = Arc::new(whitelisted_tracker()); + let (tracker, stats_event_sender) = whitelisted_tracker(); + + let tracker = Arc::new(tracker); + let stats_event_sender = Arc::new(stats_event_sender); let announce_request = sample_announce_request(); - let response = handle_announce(&tracker, &announce_request, &sample_client_ip_sources(), None) - .await - .unwrap_err(); + let response = handle_announce( + &tracker, + &stats_event_sender, + &announce_request, + &sample_client_ip_sources(), + None, + ) + .await + .unwrap_err(); assert_error_response( &response, @@ -323,16 +383,25 @@ mod tests { #[tokio::test] async fn it_should_fail_when_the_right_most_x_forwarded_for_header_ip_is_not_available() { - let tracker = Arc::new(tracker_on_reverse_proxy()); + let (tracker, stats_event_sender) = tracker_on_reverse_proxy(); + + let tracker = Arc::new(tracker); + let stats_event_sender = Arc::new(stats_event_sender); let client_ip_sources = ClientIpSources { right_most_x_forwarded_for: None, connection_info_ip: None, }; - let response = handle_announce(&tracker, &sample_announce_request(), &client_ip_sources, None) - .await - .unwrap_err(); + let response = handle_announce( + &tracker, + &stats_event_sender, + &sample_announce_request(), + &client_ip_sources, + None, + ) + .await + .unwrap_err(); assert_error_response( &response, @@ -353,16 +422,25 @@ mod tests { #[tokio::test] async fn it_should_fail_when_the_client_ip_from_the_connection_info_is_not_available() { - let tracker = Arc::new(tracker_not_on_reverse_proxy()); + let (tracker, stats_event_sender) = tracker_not_on_reverse_proxy(); + + let tracker = Arc::new(tracker); + let stats_event_sender = Arc::new(stats_event_sender); let client_ip_sources = ClientIpSources { right_most_x_forwarded_for: None, connection_info_ip: None, }; - let response = handle_announce(&tracker, &sample_announce_request(), &client_ip_sources, None) - .await - .unwrap_err(); + let response = handle_announce( + &tracker, + &stats_event_sender, + &sample_announce_request(), + &client_ip_sources, + None, + ) + .await + .unwrap_err(); assert_error_response( &response, diff --git a/src/servers/http/v1/handlers/scrape.rs b/src/servers/http/v1/handlers/scrape.rs index 41afb6bbb..6ff8a61cf 100644 --- a/src/servers/http/v1/handlers/scrape.rs +++ b/src/servers/http/v1/handlers/scrape.rs @@ -16,6 +16,7 @@ use hyper::StatusCode; use torrust_tracker_primitives::core::ScrapeData; use crate::core::auth::Key; +use crate::core::statistics::event::sender::Sender; use crate::core::Tracker; use crate::servers::http::v1::extractors::authentication_key::Extract as ExtractKey; use crate::servers::http::v1::extractors::client_ip_sources::Extract as ExtractClientIpSources; @@ -25,14 +26,15 @@ use crate::servers::http::v1::services; /// It handles the `scrape` request when the HTTP tracker is configured /// to run in `public` mode. #[allow(clippy::unused_async)] +#[allow(clippy::type_complexity)] pub async fn handle_without_key( - State(tracker): State>, + State(state): State<(Arc, Arc>>)>, ExtractRequest(scrape_request): ExtractRequest, ExtractClientIpSources(client_ip_sources): ExtractClientIpSources, ) -> Response { tracing::debug!("http scrape request: {:#?}", &scrape_request); - handle(&tracker, &scrape_request, &client_ip_sources, None).await + handle(&state.0, &state.1, &scrape_request, &client_ip_sources, None).await } /// It handles the `scrape` request when the HTTP tracker is configured @@ -40,24 +42,26 @@ pub async fn handle_without_key( /// /// In this case, the authentication `key` parameter is required. #[allow(clippy::unused_async)] +#[allow(clippy::type_complexity)] pub async fn handle_with_key( - State(tracker): State>, + State(state): State<(Arc, Arc>>)>, ExtractRequest(scrape_request): ExtractRequest, ExtractClientIpSources(client_ip_sources): ExtractClientIpSources, ExtractKey(key): ExtractKey, ) -> Response { tracing::debug!("http scrape request: {:#?}", &scrape_request); - handle(&tracker, &scrape_request, &client_ip_sources, Some(key)).await + handle(&state.0, &state.1, &scrape_request, &client_ip_sources, Some(key)).await } async fn handle( tracker: &Arc, + stats_event_sender: &Arc>>, scrape_request: &Scrape, client_ip_sources: &ClientIpSources, maybe_key: Option, ) -> Response { - let scrape_data = match handle_scrape(tracker, scrape_request, client_ip_sources, maybe_key).await { + let scrape_data = match handle_scrape(tracker, stats_event_sender, scrape_request, client_ip_sources, maybe_key).await { Ok(scrape_data) => scrape_data, Err(error) => return (StatusCode::OK, error.write()).into_response(), }; @@ -72,6 +76,7 @@ async fn handle( async fn handle_scrape( tracker: &Arc, + opt_stats_event_sender: &Arc>>, scrape_request: &Scrape, client_ip_sources: &ClientIpSources, maybe_key: Option, @@ -98,9 +103,9 @@ async fn handle_scrape( }; if return_real_scrape_data { - Ok(services::scrape::invoke(tracker, &scrape_request.info_hashes, &peer_ip).await) + Ok(services::scrape::invoke(tracker, opt_stats_event_sender, &scrape_request.info_hashes, &peer_ip).await) } else { - Ok(services::scrape::fake(tracker, &scrape_request.info_hashes, &peer_ip).await) + Ok(services::scrape::fake(opt_stats_event_sender, &scrape_request.info_hashes, &peer_ip).await) } } @@ -122,31 +127,43 @@ mod tests { use torrust_tracker_test_helpers::configuration; use crate::bootstrap::app::initialize_tracker_dependencies; - use crate::core::services::tracker_factory; + use crate::core::services::{statistics, tracker_factory}; use crate::core::Tracker; - fn private_tracker() -> Tracker { + fn private_tracker() -> (Tracker, Option>) { let config = configuration::ephemeral_private(); - let (database, whitelist_manager, stats_event_sender, stats_repository) = initialize_tracker_dependencies(&config); - tracker_factory(&config, &database, &whitelist_manager, &stats_event_sender, &stats_repository) + + let (database, whitelist_manager) = initialize_tracker_dependencies(&config); + let (stats_event_sender, _stats_repository) = statistics::setup::factory(config.core.tracker_usage_statistics); + + (tracker_factory(&config, &database, &whitelist_manager), stats_event_sender) } - fn whitelisted_tracker() -> Tracker { + fn whitelisted_tracker() -> (Tracker, Option>) { let config = configuration::ephemeral_listed(); - let (database, whitelist_manager, stats_event_sender, stats_repository) = initialize_tracker_dependencies(&config); - tracker_factory(&config, &database, &whitelist_manager, &stats_event_sender, &stats_repository) + + let (database, whitelist_manager) = initialize_tracker_dependencies(&config); + let (stats_event_sender, _stats_repository) = statistics::setup::factory(config.core.tracker_usage_statistics); + + (tracker_factory(&config, &database, &whitelist_manager), stats_event_sender) } - fn tracker_on_reverse_proxy() -> Tracker { + fn tracker_on_reverse_proxy() -> (Tracker, Option>) { let config = configuration::ephemeral_with_reverse_proxy(); - let (database, whitelist_manager, stats_event_sender, stats_repository) = initialize_tracker_dependencies(&config); - tracker_factory(&config, &database, &whitelist_manager, &stats_event_sender, &stats_repository) + + let (database, whitelist_manager) = initialize_tracker_dependencies(&config); + let (stats_event_sender, _stats_repository) = statistics::setup::factory(config.core.tracker_usage_statistics); + + (tracker_factory(&config, &database, &whitelist_manager), stats_event_sender) } - fn tracker_not_on_reverse_proxy() -> Tracker { + fn tracker_not_on_reverse_proxy() -> (Tracker, Option>) { let config = configuration::ephemeral_without_reverse_proxy(); - let (database, whitelist_manager, stats_event_sender, stats_repository) = initialize_tracker_dependencies(&config); - tracker_factory(&config, &database, &whitelist_manager, &stats_event_sender, &stats_repository) + + let (database, whitelist_manager) = initialize_tracker_dependencies(&config); + let (stats_event_sender, _stats_repository) = statistics::setup::factory(config.core.tracker_usage_statistics); + + (tracker_factory(&config, &database, &whitelist_manager), stats_event_sender) } fn sample_scrape_request() -> Scrape { @@ -181,14 +198,22 @@ mod tests { #[tokio::test] async fn it_should_return_zeroed_swarm_metadata_when_the_authentication_key_is_missing() { - let tracker = Arc::new(private_tracker()); + let (tracker, stats_event_sender) = private_tracker(); + let tracker = Arc::new(tracker); + let stats_event_sender = Arc::new(stats_event_sender); let scrape_request = sample_scrape_request(); let maybe_key = None; - let scrape_data = handle_scrape(&tracker, &scrape_request, &sample_client_ip_sources(), maybe_key) - .await - .unwrap(); + let scrape_data = handle_scrape( + &tracker, + &stats_event_sender, + &scrape_request, + &sample_client_ip_sources(), + maybe_key, + ) + .await + .unwrap(); let expected_scrape_data = ScrapeData::zeroed(&scrape_request.info_hashes); @@ -197,15 +222,23 @@ mod tests { #[tokio::test] async fn it_should_return_zeroed_swarm_metadata_when_the_authentication_key_is_invalid() { - let tracker = Arc::new(private_tracker()); + let (tracker, stats_event_sender) = private_tracker(); + let tracker = Arc::new(tracker); + let stats_event_sender = Arc::new(stats_event_sender); let scrape_request = sample_scrape_request(); let unregistered_key = auth::Key::from_str("YZSl4lMZupRuOpSRC3krIKR5BPB14nrJ").unwrap(); let maybe_key = Some(unregistered_key); - let scrape_data = handle_scrape(&tracker, &scrape_request, &sample_client_ip_sources(), maybe_key) - .await - .unwrap(); + let scrape_data = handle_scrape( + &tracker, + &stats_event_sender, + &scrape_request, + &sample_client_ip_sources(), + maybe_key, + ) + .await + .unwrap(); let expected_scrape_data = ScrapeData::zeroed(&scrape_request.info_hashes); @@ -224,13 +257,21 @@ mod tests { #[tokio::test] async fn it_should_return_zeroed_swarm_metadata_when_the_torrent_is_not_whitelisted() { - let tracker = Arc::new(whitelisted_tracker()); + let (tracker, stats_event_sender) = whitelisted_tracker(); + let tracker: Arc = Arc::new(tracker); + let stats_event_sender = Arc::new(stats_event_sender); let scrape_request = sample_scrape_request(); - let scrape_data = handle_scrape(&tracker, &scrape_request, &sample_client_ip_sources(), None) - .await - .unwrap(); + let scrape_data = handle_scrape( + &tracker, + &stats_event_sender, + &scrape_request, + &sample_client_ip_sources(), + None, + ) + .await + .unwrap(); let expected_scrape_data = ScrapeData::zeroed(&scrape_request.info_hashes); @@ -249,16 +290,24 @@ mod tests { #[tokio::test] async fn it_should_fail_when_the_right_most_x_forwarded_for_header_ip_is_not_available() { - let tracker = Arc::new(tracker_on_reverse_proxy()); + let (tracker, stats_event_sender) = tracker_on_reverse_proxy(); + let tracker: Arc = Arc::new(tracker); + let stats_event_sender = Arc::new(stats_event_sender); let client_ip_sources = ClientIpSources { right_most_x_forwarded_for: None, connection_info_ip: None, }; - let response = handle_scrape(&tracker, &sample_scrape_request(), &client_ip_sources, None) - .await - .unwrap_err(); + let response = handle_scrape( + &tracker, + &stats_event_sender, + &sample_scrape_request(), + &client_ip_sources, + None, + ) + .await + .unwrap_err(); assert_error_response( &response, @@ -278,16 +327,24 @@ mod tests { #[tokio::test] async fn it_should_fail_when_the_client_ip_from_the_connection_info_is_not_available() { - let tracker = Arc::new(tracker_not_on_reverse_proxy()); + let (tracker, stats_event_sender) = tracker_not_on_reverse_proxy(); + let tracker: Arc = Arc::new(tracker); + let stats_event_sender = Arc::new(stats_event_sender); let client_ip_sources = ClientIpSources { right_most_x_forwarded_for: None, connection_info_ip: None, }; - let response = handle_scrape(&tracker, &sample_scrape_request(), &client_ip_sources, None) - .await - .unwrap_err(); + let response = handle_scrape( + &tracker, + &stats_event_sender, + &sample_scrape_request(), + &client_ip_sources, + None, + ) + .await + .unwrap_err(); assert_error_response( &response, diff --git a/src/servers/http/v1/routes.rs b/src/servers/http/v1/routes.rs index 3c6926c37..97eb5b95d 100644 --- a/src/servers/http/v1/routes.rs +++ b/src/servers/http/v1/routes.rs @@ -22,6 +22,7 @@ use tower_http::LatencyUnit; use tracing::{instrument, Level, Span}; use super::handlers::{announce, health_check, scrape}; +use crate::core::statistics::event::sender::Sender; use crate::core::Tracker; use crate::servers::http::HTTP_TRACKER_LOG_TARGET; use crate::servers::logging::Latency; @@ -31,17 +32,29 @@ use crate::servers::logging::Latency; /// > **NOTICE**: it's added a layer to get the client IP from the connection /// > info. The tracker could use the connection info to get the client IP. #[allow(clippy::needless_pass_by_value)] -#[instrument(skip(tracker, server_socket_addr))] -pub fn router(tracker: Arc, server_socket_addr: SocketAddr) -> Router { +#[instrument(skip(tracker, stats_event_sender, server_socket_addr))] +pub fn router(tracker: Arc, stats_event_sender: Arc>>, server_socket_addr: SocketAddr) -> Router { Router::new() // Health check .route("/health_check", get(health_check::handler)) // Announce request - .route("/announce", get(announce::handle_without_key).with_state(tracker.clone())) - .route("/announce/{key}", get(announce::handle_with_key).with_state(tracker.clone())) + .route( + "/announce", + get(announce::handle_without_key).with_state((tracker.clone(), stats_event_sender.clone())), + ) + .route( + "/announce/{key}", + get(announce::handle_with_key).with_state((tracker.clone(), stats_event_sender.clone())), + ) // Scrape request - .route("/scrape", get(scrape::handle_without_key).with_state(tracker.clone())) - .route("/scrape/{key}", get(scrape::handle_with_key).with_state(tracker)) + .route( + "/scrape", + get(scrape::handle_without_key).with_state((tracker.clone(), stats_event_sender.clone())), + ) + .route( + "/scrape/{key}", + get(scrape::handle_with_key).with_state((tracker.clone(), stats_event_sender.clone())), + ) // Add extension to get the client IP from the connection info .layer(SecureClientIpSource::ConnectInfo.into_extension()) .layer(CompressionLayer::new()) diff --git a/src/servers/http/v1/services/announce.rs b/src/servers/http/v1/services/announce.rs index 3a4d4820a..45bcb5843 100644 --- a/src/servers/http/v1/services/announce.rs +++ b/src/servers/http/v1/services/announce.rs @@ -15,7 +15,9 @@ use bittorrent_primitives::info_hash::InfoHash; use torrust_tracker_primitives::core::AnnounceData; use torrust_tracker_primitives::peer; -use crate::core::{statistics, PeersWanted, Tracker}; +use crate::core::statistics::event::sender::Sender; +use crate::core::statistics::{self}; +use crate::core::{PeersWanted, Tracker}; /// The HTTP tracker `announce` service. /// @@ -29,6 +31,7 @@ use crate::core::{statistics, PeersWanted, Tracker}; /// > each `announce` request. pub async fn invoke( tracker: Arc, + opt_stats_event_sender: Arc>>, info_hash: InfoHash, peer: &mut peer::Peer, peers_wanted: &PeersWanted, @@ -38,12 +41,14 @@ pub async fn invoke( // The tracker could change the original peer ip let announce_data = tracker.announce(&info_hash, peer, &original_peer_ip, peers_wanted); - match original_peer_ip { - IpAddr::V4(_) => { - tracker.send_stats_event(statistics::event::Event::Tcp4Announce).await; - } - IpAddr::V6(_) => { - tracker.send_stats_event(statistics::event::Event::Tcp6Announce).await; + if let Some(stats_event_sender) = opt_stats_event_sender.as_deref() { + match original_peer_ip { + IpAddr::V4(_) => { + stats_event_sender.send_event(statistics::event::Event::Tcp4Announce).await; + } + IpAddr::V6(_) => { + stats_event_sender.send_event(statistics::event::Event::Tcp6Announce).await; + } } } @@ -53,6 +58,7 @@ pub async fn invoke( #[cfg(test)] mod tests { use std::net::{IpAddr, Ipv4Addr, Ipv6Addr, SocketAddr}; + use std::sync::Arc; use aquatic_udp_protocol::{AnnounceEvent, NumberOfBytes, PeerId}; use bittorrent_primitives::info_hash::InfoHash; @@ -60,13 +66,20 @@ mod tests { use torrust_tracker_test_helpers::configuration; use crate::bootstrap::app::initialize_tracker_dependencies; - use crate::core::services::tracker_factory; + use crate::core::services::{statistics, tracker_factory}; + use crate::core::statistics::event::sender::Sender; use crate::core::Tracker; - fn public_tracker() -> Tracker { + fn public_tracker() -> (Tracker, Arc>>) { let config = configuration::ephemeral_public(); - let (database, whitelist_manager, stats_event_sender, stats_repository) = initialize_tracker_dependencies(&config); - tracker_factory(&config, &database, &whitelist_manager, &stats_event_sender, &stats_repository) + + let (database, whitelist_manager) = initialize_tracker_dependencies(&config); + let (stats_event_sender, _stats_repository) = statistics::setup::factory(config.core.tracker_usage_statistics); + let stats_event_sender = Arc::new(stats_event_sender); + + let tracker = tracker_factory(&config, &database, &whitelist_manager); + + (tracker, stats_event_sender) } fn sample_info_hash() -> InfoHash { @@ -115,32 +128,30 @@ mod tests { use crate::servers::http::v1::services::announce::invoke; use crate::servers::http::v1::services::announce::tests::{public_tracker, sample_info_hash, sample_peer}; - fn test_tracker_factory(stats_event_sender: Option>) -> Tracker { + fn test_tracker_factory() -> Tracker { let config = configuration::ephemeral(); - let (database, whitelist_manager, _stats_event_sender, _stats_repository) = initialize_tracker_dependencies(&config); + let (database, whitelist_manager) = initialize_tracker_dependencies(&config); - let stats_event_sender = Arc::new(stats_event_sender); - - let stats_repository = Arc::new(statistics::repository::Repository::new()); - - Tracker::new( - &config.core, - &database, - &whitelist_manager, - &stats_event_sender, - &stats_repository, - ) - .unwrap() + Tracker::new(&config.core, &database, &whitelist_manager).unwrap() } #[tokio::test] async fn it_should_return_the_announce_data() { - let tracker = Arc::new(public_tracker()); + let (tracker, stats_event_sender) = public_tracker(); + + let tracker = Arc::new(tracker); let mut peer = sample_peer(); - let announce_data = invoke(tracker.clone(), sample_info_hash(), &mut peer, &PeersWanted::All).await; + let announce_data = invoke( + tracker.clone(), + stats_event_sender.clone(), + sample_info_hash(), + &mut peer, + &PeersWanted::All, + ) + .await; let expected_announce_data = AnnounceData { peers: vec![], @@ -163,22 +174,23 @@ mod tests { .with(eq(statistics::event::Event::Tcp4Announce)) .times(1) .returning(|_| Box::pin(future::ready(Some(Ok(()))))); - let stats_event_sender = Box::new(stats_event_sender_mock); + let stats_event_sender: Arc>> = + Arc::new(Some(Box::new(stats_event_sender_mock))); - let tracker = Arc::new(test_tracker_factory(Some(stats_event_sender))); + let tracker = Arc::new(test_tracker_factory()); let mut peer = sample_peer_using_ipv4(); - let _announce_data = invoke(tracker, sample_info_hash(), &mut peer, &PeersWanted::All).await; + let _announce_data = invoke(tracker, stats_event_sender, sample_info_hash(), &mut peer, &PeersWanted::All).await; } - fn tracker_with_an_ipv6_external_ip(stats_event_sender: Box) -> Tracker { + fn tracker_with_an_ipv6_external_ip() -> Tracker { let mut configuration = configuration::ephemeral(); configuration.core.net.external_ip = Some(IpAddr::V6(Ipv6Addr::new( 0x6969, 0x6969, 0x6969, 0x6969, 0x6969, 0x6969, 0x6969, 0x6969, ))); - test_tracker_factory(Some(stats_event_sender)) + test_tracker_factory() } fn peer_with_the_ipv4_loopback_ip() -> peer::Peer { @@ -200,12 +212,14 @@ mod tests { .with(eq(statistics::event::Event::Tcp4Announce)) .times(1) .returning(|_| Box::pin(future::ready(Some(Ok(()))))); - let stats_event_sender = Box::new(stats_event_sender_mock); + let stats_event_sender: Arc>> = + Arc::new(Some(Box::new(stats_event_sender_mock))); let mut peer = peer_with_the_ipv4_loopback_ip(); let _announce_data = invoke( - tracker_with_an_ipv6_external_ip(stats_event_sender).into(), + tracker_with_an_ipv6_external_ip().into(), + stats_event_sender, sample_info_hash(), &mut peer, &PeersWanted::All, @@ -222,13 +236,14 @@ mod tests { .with(eq(statistics::event::Event::Tcp6Announce)) .times(1) .returning(|_| Box::pin(future::ready(Some(Ok(()))))); - let stats_event_sender = Box::new(stats_event_sender_mock); + let stats_event_sender: Arc>> = + Arc::new(Some(Box::new(stats_event_sender_mock))); - let tracker = Arc::new(test_tracker_factory(Some(stats_event_sender))); + let tracker = Arc::new(test_tracker_factory()); let mut peer = sample_peer_using_ipv6(); - let _announce_data = invoke(tracker, sample_info_hash(), &mut peer, &PeersWanted::All).await; + let _announce_data = invoke(tracker, stats_event_sender, sample_info_hash(), &mut peer, &PeersWanted::All).await; } } } diff --git a/src/servers/http/v1/services/scrape.rs b/src/servers/http/v1/services/scrape.rs index 01be81db6..9805dd8a4 100644 --- a/src/servers/http/v1/services/scrape.rs +++ b/src/servers/http/v1/services/scrape.rs @@ -14,7 +14,9 @@ use std::sync::Arc; use bittorrent_primitives::info_hash::InfoHash; use torrust_tracker_primitives::core::ScrapeData; -use crate::core::{statistics, Tracker}; +use crate::core::statistics::event::sender::Sender; +use crate::core::statistics::{self}; +use crate::core::Tracker; /// The HTTP tracker `scrape` service. /// @@ -26,10 +28,15 @@ use crate::core::{statistics, Tracker}; /// > **NOTICE**: as the HTTP tracker does not requires a connection request /// > like the UDP tracker, the number of TCP connections is incremented for /// > each `scrape` request. -pub async fn invoke(tracker: &Arc, info_hashes: &Vec, original_peer_ip: &IpAddr) -> ScrapeData { +pub async fn invoke( + tracker: &Arc, + opt_stats_event_sender: &Arc>>, + info_hashes: &Vec, + original_peer_ip: &IpAddr, +) -> ScrapeData { let scrape_data = tracker.scrape(info_hashes).await; - send_scrape_event(original_peer_ip, tracker).await; + send_scrape_event(original_peer_ip, opt_stats_event_sender).await; scrape_data } @@ -40,19 +47,25 @@ pub async fn invoke(tracker: &Arc, info_hashes: &Vec, origina /// the tracker returns empty stats for all the torrents. /// /// > **NOTICE**: tracker statistics are not updated in this case. -pub async fn fake(tracker: &Arc, info_hashes: &Vec, original_peer_ip: &IpAddr) -> ScrapeData { - send_scrape_event(original_peer_ip, tracker).await; +pub async fn fake( + opt_stats_event_sender: &Arc>>, + info_hashes: &Vec, + original_peer_ip: &IpAddr, +) -> ScrapeData { + send_scrape_event(original_peer_ip, opt_stats_event_sender).await; ScrapeData::zeroed(info_hashes) } -async fn send_scrape_event(original_peer_ip: &IpAddr, tracker: &Arc) { - match original_peer_ip { - IpAddr::V4(_) => { - tracker.send_stats_event(statistics::event::Event::Tcp4Scrape).await; - } - IpAddr::V6(_) => { - tracker.send_stats_event(statistics::event::Event::Tcp6Scrape).await; +async fn send_scrape_event(original_peer_ip: &IpAddr, opt_stats_event_sender: &Arc>>) { + if let Some(stats_event_sender) = opt_stats_event_sender.as_deref() { + match original_peer_ip { + IpAddr::V4(_) => { + stats_event_sender.send_event(statistics::event::Event::Tcp4Scrape).await; + } + IpAddr::V6(_) => { + stats_event_sender.send_event(statistics::event::Event::Tcp6Scrape).await; + } } } } @@ -61,7 +74,6 @@ async fn send_scrape_event(original_peer_ip: &IpAddr, tracker: &Arc) { mod tests { use std::net::{IpAddr, Ipv4Addr, SocketAddr}; - use std::sync::Arc; use aquatic_udp_protocol::{AnnounceEvent, NumberOfBytes, PeerId}; use bittorrent_primitives::info_hash::InfoHash; @@ -70,12 +82,14 @@ mod tests { use crate::bootstrap::app::initialize_tracker_dependencies; use crate::core::services::tracker_factory; - use crate::core::{statistics, Tracker}; + use crate::core::Tracker; fn public_tracker() -> Tracker { let config = configuration::ephemeral_public(); - let (database, whitelist_manager, stats_event_sender, stats_repository) = initialize_tracker_dependencies(&config); - tracker_factory(&config, &database, &whitelist_manager, &stats_event_sender, &stats_repository) + + let (database, whitelist_manager) = initialize_tracker_dependencies(&config); + + tracker_factory(&config, &database, &whitelist_manager) } fn sample_info_hashes() -> Vec { @@ -98,23 +112,12 @@ mod tests { } } - fn test_tracker_factory(stats_event_sender: Option>) -> Tracker { + fn test_tracker_factory() -> Tracker { let config = configuration::ephemeral(); - let (database, whitelist_manager, _stats_event_sender, _stats_repository) = initialize_tracker_dependencies(&config); - - let stats_event_sender = Arc::new(stats_event_sender); - - let stats_repository = Arc::new(statistics::repository::Repository::new()); + let (database, whitelist_manager) = initialize_tracker_dependencies(&config); - Tracker::new( - &config.core, - &database, - &whitelist_manager, - &stats_event_sender, - &stats_repository, - ) - .unwrap() + Tracker::new(&config.core, &database, &whitelist_manager).unwrap() } mod with_real_data { @@ -135,6 +138,9 @@ mod tests { #[tokio::test] async fn it_should_return_the_scrape_data_for_a_torrent() { + let (stats_event_sender, _stats_repository) = crate::core::services::statistics::setup::factory(false); + let stats_event_sender = Arc::new(stats_event_sender); + let tracker = Arc::new(public_tracker()); let info_hash = sample_info_hash(); @@ -145,7 +151,7 @@ mod tests { let original_peer_ip = peer.ip(); tracker.announce(&info_hash, &mut peer, &original_peer_ip, &PeersWanted::All); - let scrape_data = invoke(&tracker, &info_hashes, &original_peer_ip).await; + let scrape_data = invoke(&tracker, &stats_event_sender, &info_hashes, &original_peer_ip).await; let mut expected_scrape_data = ScrapeData::empty(); expected_scrape_data.add_file( @@ -168,13 +174,14 @@ mod tests { .with(eq(statistics::event::Event::Tcp4Scrape)) .times(1) .returning(|_| Box::pin(future::ready(Some(Ok(()))))); - let stats_event_sender = Box::new(stats_event_sender_mock); + let stats_event_sender: Arc>> = + Arc::new(Some(Box::new(stats_event_sender_mock))); - let tracker = Arc::new(test_tracker_factory(Some(stats_event_sender))); + let tracker = Arc::new(test_tracker_factory()); let peer_ip = IpAddr::V4(Ipv4Addr::new(126, 0, 0, 1)); - invoke(&tracker, &sample_info_hashes(), &peer_ip).await; + invoke(&tracker, &stats_event_sender, &sample_info_hashes(), &peer_ip).await; } #[tokio::test] @@ -185,13 +192,14 @@ mod tests { .with(eq(statistics::event::Event::Tcp6Scrape)) .times(1) .returning(|_| Box::pin(future::ready(Some(Ok(()))))); - let stats_event_sender = Box::new(stats_event_sender_mock); + let stats_event_sender: Arc>> = + Arc::new(Some(Box::new(stats_event_sender_mock))); - let tracker = Arc::new(test_tracker_factory(Some(stats_event_sender))); + let tracker = Arc::new(test_tracker_factory()); let peer_ip = IpAddr::V6(Ipv6Addr::new(0x6969, 0x6969, 0x6969, 0x6969, 0x6969, 0x6969, 0x6969, 0x6969)); - invoke(&tracker, &sample_info_hashes(), &peer_ip).await; + invoke(&tracker, &stats_event_sender, &sample_info_hashes(), &peer_ip).await; } } @@ -207,11 +215,13 @@ mod tests { use crate::core::{statistics, PeersWanted}; use crate::servers::http::v1::services::scrape::fake; use crate::servers::http::v1::services::scrape::tests::{ - public_tracker, sample_info_hash, sample_info_hashes, sample_peer, test_tracker_factory, + public_tracker, sample_info_hash, sample_info_hashes, sample_peer, }; #[tokio::test] async fn it_should_always_return_the_zeroed_scrape_data_for_a_torrent() { + let (stats_event_sender, _stats_repository) = crate::core::services::statistics::setup::factory(false); + let stats_event_sender = Arc::new(stats_event_sender); let tracker = Arc::new(public_tracker()); let info_hash = sample_info_hash(); @@ -222,7 +232,7 @@ mod tests { let original_peer_ip = peer.ip(); tracker.announce(&info_hash, &mut peer, &original_peer_ip, &PeersWanted::All); - let scrape_data = fake(&tracker, &info_hashes, &original_peer_ip).await; + let scrape_data = fake(&stats_event_sender, &info_hashes, &original_peer_ip).await; let expected_scrape_data = ScrapeData::zeroed(&info_hashes); @@ -237,13 +247,12 @@ mod tests { .with(eq(statistics::event::Event::Tcp4Scrape)) .times(1) .returning(|_| Box::pin(future::ready(Some(Ok(()))))); - let stats_event_sender = Box::new(stats_event_sender_mock); - - let tracker = Arc::new(test_tracker_factory(Some(stats_event_sender))); + let stats_event_sender: Arc>> = + Arc::new(Some(Box::new(stats_event_sender_mock))); let peer_ip = IpAddr::V4(Ipv4Addr::new(126, 0, 0, 1)); - fake(&tracker, &sample_info_hashes(), &peer_ip).await; + fake(&stats_event_sender, &sample_info_hashes(), &peer_ip).await; } #[tokio::test] @@ -254,13 +263,12 @@ mod tests { .with(eq(statistics::event::Event::Tcp6Scrape)) .times(1) .returning(|_| Box::pin(future::ready(Some(Ok(()))))); - let stats_event_sender = Box::new(stats_event_sender_mock); - - let tracker = Arc::new(test_tracker_factory(Some(stats_event_sender))); + let stats_event_sender: Arc>> = + Arc::new(Some(Box::new(stats_event_sender_mock))); let peer_ip = IpAddr::V6(Ipv6Addr::new(0x6969, 0x6969, 0x6969, 0x6969, 0x6969, 0x6969, 0x6969, 0x6969)); - fake(&tracker, &sample_info_hashes(), &peer_ip).await; + fake(&stats_event_sender, &sample_info_hashes(), &peer_ip).await; } } } diff --git a/src/servers/udp/handlers.rs b/src/servers/udp/handlers.rs index 4b20c2ac5..9883de54b 100644 --- a/src/servers/udp/handlers.rs +++ b/src/servers/udp/handlers.rs @@ -20,6 +20,7 @@ use zerocopy::network_endian::I32; use super::connection_cookie::{check, make}; use super::server::banning::BanService; use super::RawRequest; +use crate::core::statistics::event::sender::Sender; use crate::core::{statistics, PeersWanted, Tracker}; use crate::servers::udp::error::Error; use crate::servers::udp::{peer_builder, UDP_TRACKER_LOG_TARGET}; @@ -53,10 +54,11 @@ impl CookieTimeValues { /// - Delegating the request to the correct handler depending on the request type. /// /// It will return an `Error` response if the request is invalid. -#[instrument(fields(request_id), skip(udp_request, tracker, cookie_time_values, ban_service), ret(level = Level::TRACE))] +#[instrument(fields(request_id), skip(udp_request, tracker, opt_stats_event_sender, cookie_time_values, ban_service), ret(level = Level::TRACE))] pub(crate) async fn handle_packet( udp_request: RawRequest, tracker: &Tracker, + opt_stats_event_sender: &Arc>>, local_addr: SocketAddr, cookie_time_values: CookieTimeValues, ban_service: Arc>, @@ -70,7 +72,15 @@ pub(crate) async fn handle_packet( let response = match Request::parse_bytes(&udp_request.payload[..udp_request.payload.len()], MAX_SCRAPE_TORRENTS).map_err(Error::from) { - Ok(request) => match handle_request(request, udp_request.from, tracker, cookie_time_values.clone()).await { + Ok(request) => match handle_request( + request, + udp_request.from, + tracker, + opt_stats_event_sender, + cookie_time_values.clone(), + ) + .await + { Ok(response) => return response, Err((e, transaction_id)) => { match &e { @@ -88,7 +98,7 @@ pub(crate) async fn handle_packet( udp_request.from, local_addr, request_id, - tracker, + opt_stats_event_sender, cookie_time_values.valid_range.clone(), &e, Some(transaction_id), @@ -101,7 +111,7 @@ pub(crate) async fn handle_packet( udp_request.from, local_addr, request_id, - tracker, + opt_stats_event_sender, cookie_time_values.valid_range.clone(), &e, None, @@ -121,24 +131,43 @@ pub(crate) async fn handle_packet( /// # Errors /// /// If a error happens in the `handle_request` function, it will just return the `ServerError`. -#[instrument(skip(request, remote_addr, tracker, cookie_time_values))] +#[instrument(skip(request, remote_addr, tracker, opt_stats_event_sender, cookie_time_values))] pub async fn handle_request( request: Request, remote_addr: SocketAddr, tracker: &Tracker, + opt_stats_event_sender: &Arc>>, cookie_time_values: CookieTimeValues, ) -> Result { tracing::trace!("handle request"); match request { - Request::Connect(connect_request) => { - Ok(handle_connect(remote_addr, &connect_request, tracker, cookie_time_values.issue_time).await) - } + Request::Connect(connect_request) => Ok(handle_connect( + remote_addr, + &connect_request, + opt_stats_event_sender, + cookie_time_values.issue_time, + ) + .await), Request::Announce(announce_request) => { - handle_announce(remote_addr, &announce_request, tracker, cookie_time_values.valid_range).await + handle_announce( + remote_addr, + &announce_request, + tracker, + opt_stats_event_sender, + cookie_time_values.valid_range, + ) + .await } Request::Scrape(scrape_request) => { - handle_scrape(remote_addr, &scrape_request, tracker, cookie_time_values.valid_range).await + handle_scrape( + remote_addr, + &scrape_request, + tracker, + opt_stats_event_sender, + cookie_time_values.valid_range, + ) + .await } } } @@ -149,11 +178,11 @@ pub async fn handle_request( /// # Errors /// /// This function does not ever return an error. -#[instrument(fields(transaction_id), skip(tracker), ret(level = Level::TRACE))] +#[instrument(fields(transaction_id), skip(opt_stats_event_sender), ret(level = Level::TRACE))] pub async fn handle_connect( remote_addr: SocketAddr, request: &ConnectRequest, - tracker: &Tracker, + opt_stats_event_sender: &Arc>>, cookie_issue_time: f64, ) -> Response { tracing::Span::current().record("transaction_id", request.transaction_id.0.to_string()); @@ -167,13 +196,14 @@ pub async fn handle_connect( connection_id, }; - // send stats event - match remote_addr { - SocketAddr::V4(_) => { - tracker.send_stats_event(statistics::event::Event::Udp4Connect).await; - } - SocketAddr::V6(_) => { - tracker.send_stats_event(statistics::event::Event::Udp6Connect).await; + if let Some(stats_event_sender) = opt_stats_event_sender.as_deref() { + match remote_addr { + SocketAddr::V4(_) => { + stats_event_sender.send_event(statistics::event::Event::Udp4Connect).await; + } + SocketAddr::V6(_) => { + stats_event_sender.send_event(statistics::event::Event::Udp6Connect).await; + } } } @@ -186,11 +216,12 @@ pub async fn handle_connect( /// # Errors /// /// If a error happens in the `handle_announce` function, it will just return the `ServerError`. -#[instrument(fields(transaction_id, connection_id, info_hash), skip(tracker), ret(level = Level::TRACE))] +#[instrument(fields(transaction_id, connection_id, info_hash), skip(tracker, opt_stats_event_sender), ret(level = Level::TRACE))] pub async fn handle_announce( remote_addr: SocketAddr, request: &AnnounceRequest, tracker: &Tracker, + opt_stats_event_sender: &Arc>>, cookie_valid_range: Range, ) -> Result { tracing::Span::current() @@ -224,12 +255,14 @@ pub async fn handle_announce( let response = tracker.announce(&info_hash, &mut peer, &remote_client_ip, &peers_wanted); - match remote_client_ip { - IpAddr::V4(_) => { - tracker.send_stats_event(statistics::event::Event::Udp4Announce).await; - } - IpAddr::V6(_) => { - tracker.send_stats_event(statistics::event::Event::Udp6Announce).await; + if let Some(stats_event_sender) = opt_stats_event_sender.as_deref() { + match remote_client_ip { + IpAddr::V4(_) => { + stats_event_sender.send_event(statistics::event::Event::Udp4Announce).await; + } + IpAddr::V6(_) => { + stats_event_sender.send_event(statistics::event::Event::Udp6Announce).await; + } } } @@ -293,11 +326,12 @@ pub async fn handle_announce( /// # Errors /// /// This function does not ever return an error. -#[instrument(fields(transaction_id, connection_id), skip(tracker), ret(level = Level::TRACE))] +#[instrument(fields(transaction_id, connection_id), skip(tracker, opt_stats_event_sender), ret(level = Level::TRACE))] pub async fn handle_scrape( remote_addr: SocketAddr, request: &ScrapeRequest, tracker: &Tracker, + opt_stats_event_sender: &Arc>>, cookie_valid_range: Range, ) -> Result { tracing::Span::current() @@ -338,13 +372,14 @@ pub async fn handle_scrape( torrent_stats.push(scrape_entry); } - // send stats event - match remote_addr { - SocketAddr::V4(_) => { - tracker.send_stats_event(statistics::event::Event::Udp4Scrape).await; - } - SocketAddr::V6(_) => { - tracker.send_stats_event(statistics::event::Event::Udp6Scrape).await; + if let Some(stats_event_sender) = opt_stats_event_sender.as_deref() { + match remote_addr { + SocketAddr::V4(_) => { + stats_event_sender.send_event(statistics::event::Event::Udp4Scrape).await; + } + SocketAddr::V6(_) => { + stats_event_sender.send_event(statistics::event::Event::Udp6Scrape).await; + } } } @@ -356,12 +391,12 @@ pub async fn handle_scrape( Ok(Response::from(response)) } -#[instrument(fields(transaction_id), skip(tracker), ret(level = Level::TRACE))] +#[instrument(fields(transaction_id), skip(opt_stats_event_sender), ret(level = Level::TRACE))] async fn handle_error( remote_addr: SocketAddr, local_addr: SocketAddr, request_id: Uuid, - tracker: &Tracker, + opt_stats_event_sender: &Arc>>, cookie_valid_range: Range, e: &Error, transaction_id: Option, @@ -398,13 +433,14 @@ async fn handle_error( }; if e.1.is_some() { - // send stats event - match remote_addr { - SocketAddr::V4(_) => { - tracker.send_stats_event(statistics::event::Event::Udp4Error).await; - } - SocketAddr::V6(_) => { - tracker.send_stats_event(statistics::event::Event::Udp6Error).await; + if let Some(stats_event_sender) = opt_stats_event_sender.as_deref() { + match remote_addr { + SocketAddr::V4(_) => { + stats_event_sender.send_event(statistics::event::Event::Udp4Error).await; + } + SocketAddr::V6(_) => { + stats_event_sender.send_event(statistics::event::Event::Udp6Error).await; + } } } } @@ -426,7 +462,6 @@ mod tests { use std::net::{IpAddr, Ipv4Addr, Ipv6Addr, SocketAddr}; use std::ops::Range; - use std::sync::Arc; use aquatic_udp_protocol::{NumberOfBytes, PeerId}; use torrust_tracker_clock::clock::Time; @@ -436,8 +471,9 @@ mod tests { use super::gen_remote_fingerprint; use crate::bootstrap::app::initialize_tracker_dependencies; - use crate::core::services::tracker_factory; - use crate::core::{statistics, Tracker}; + use crate::core::services::{statistics, tracker_factory}; + use crate::core::statistics::event::sender::Sender; + use crate::core::Tracker; use crate::CurrentClock; fn tracker_configuration() -> Configuration { @@ -448,17 +484,19 @@ mod tests { configuration::ephemeral() } - fn public_tracker() -> Arc { + fn public_tracker() -> (Tracker, Option>) { initialized_tracker(&configuration::ephemeral_public()) } - fn whitelisted_tracker() -> Arc { + fn whitelisted_tracker() -> (Tracker, Option>) { initialized_tracker(&configuration::ephemeral_listed()) } - fn initialized_tracker(config: &Configuration) -> Arc { - let (database, whitelist_manager, stats_event_sender, stats_repository) = initialize_tracker_dependencies(config); - tracker_factory(config, &database, &whitelist_manager, &stats_event_sender, &stats_repository).into() + fn initialized_tracker(config: &Configuration) -> (Tracker, Option>) { + let (database, whitelist_manager) = initialize_tracker_dependencies(config); + let (stats_event_sender, _stats_repository) = statistics::setup::factory(config.core.tracker_usage_statistics); + + (tracker_factory(config, &database, &whitelist_manager), stats_event_sender) } fn sample_ipv4_remote_addr() -> SocketAddr { @@ -555,23 +593,12 @@ mod tests { } } - fn test_tracker_factory(stats_event_sender: Option>) -> Tracker { + fn test_tracker_factory() -> Tracker { let config = tracker_configuration(); - let (database, whitelist_manager, _stats_event_sender, _stats_repository) = initialize_tracker_dependencies(&config); - - let stats_event_sender = Arc::new(stats_event_sender); - - let stats_repository = Arc::new(statistics::repository::Repository::new()); + let (database, whitelist_manager) = initialize_tracker_dependencies(&config); - Tracker::new( - &config.core, - &database, - &whitelist_manager, - &stats_event_sender, - &stats_repository, - ) - .unwrap() + Tracker::new(&config.core, &database, &whitelist_manager).unwrap() } mod connect_request { @@ -587,8 +614,7 @@ mod tests { use crate::servers::udp::connection_cookie::make; use crate::servers::udp::handlers::handle_connect; use crate::servers::udp::handlers::tests::{ - public_tracker, sample_ipv4_remote_addr, sample_ipv4_remote_addr_fingerprint, sample_ipv6_remote_addr_fingerprint, - sample_issue_time, test_tracker_factory, + sample_ipv4_remote_addr, sample_ipv4_remote_addr_fingerprint, sample_ipv6_remote_addr_fingerprint, sample_issue_time, }; fn sample_connect_request() -> ConnectRequest { @@ -599,11 +625,14 @@ mod tests { #[tokio::test] async fn a_connect_response_should_contain_the_same_transaction_id_as_the_connect_request() { + let (stats_event_sender, _stats_repository) = crate::core::services::statistics::setup::factory(false); + let stats_event_sender = Arc::new(stats_event_sender); + let request = ConnectRequest { transaction_id: TransactionId(0i32.into()), }; - let response = handle_connect(sample_ipv4_remote_addr(), &request, &public_tracker(), sample_issue_time()).await; + let response = handle_connect(sample_ipv4_remote_addr(), &request, &stats_event_sender, sample_issue_time()).await; assert_eq!( response, @@ -616,11 +645,14 @@ mod tests { #[tokio::test] async fn a_connect_response_should_contain_a_new_connection_id() { + let (stats_event_sender, _stats_repository) = crate::core::services::statistics::setup::factory(false); + let stats_event_sender = Arc::new(stats_event_sender); + let request = ConnectRequest { transaction_id: TransactionId(0i32.into()), }; - let response = handle_connect(sample_ipv4_remote_addr(), &request, &public_tracker(), sample_issue_time()).await; + let response = handle_connect(sample_ipv4_remote_addr(), &request, &stats_event_sender, sample_issue_time()).await; assert_eq!( response, @@ -633,11 +665,14 @@ mod tests { #[tokio::test] async fn a_connect_response_should_contain_a_new_connection_id_ipv6() { + let (stats_event_sender, _stats_repository) = crate::core::services::statistics::setup::factory(false); + let stats_event_sender = Arc::new(stats_event_sender); + let request = ConnectRequest { transaction_id: TransactionId(0i32.into()), }; - let response = handle_connect(sample_ipv6_remote_addr(), &request, &public_tracker(), sample_issue_time()).await; + let response = handle_connect(sample_ipv6_remote_addr(), &request, &stats_event_sender, sample_issue_time()).await; assert_eq!( response, @@ -656,15 +691,15 @@ mod tests { .with(eq(statistics::event::Event::Udp4Connect)) .times(1) .returning(|_| Box::pin(future::ready(Some(Ok(()))))); - let stats_event_sender = Box::new(stats_event_sender_mock); + let stats_event_sender: Arc>> = + Arc::new(Some(Box::new(stats_event_sender_mock))); let client_socket_address = sample_ipv4_socket_address(); - let torrent_tracker = Arc::new(test_tracker_factory(Some(stats_event_sender))); handle_connect( client_socket_address, &sample_connect_request(), - &torrent_tracker, + &stats_event_sender, sample_issue_time(), ) .await; @@ -678,13 +713,13 @@ mod tests { .with(eq(statistics::event::Event::Udp6Connect)) .times(1) .returning(|_| Box::pin(future::ready(Some(Ok(()))))); - let stats_event_sender = Box::new(stats_event_sender_mock); + let stats_event_sender: Arc>> = + Arc::new(Some(Box::new(stats_event_sender_mock))); - let torrent_tracker = Arc::new(test_tracker_factory(Some(stats_event_sender))); handle_connect( sample_ipv6_remote_addr(), &sample_connect_request(), - &torrent_tracker, + &stats_event_sender, sample_issue_time(), ) .await; @@ -787,7 +822,9 @@ mod tests { #[tokio::test] async fn an_announced_peer_should_be_added_to_the_tracker() { - let tracker = public_tracker(); + let (tracker, stats_event_sender) = public_tracker(); + let tracker = Arc::new(tracker); + let stats_event_sender = Arc::new(stats_event_sender); let client_ip = Ipv4Addr::new(126, 0, 0, 1); let client_port = 8080; @@ -804,9 +841,15 @@ mod tests { .with_port(client_port) .into(); - handle_announce(remote_addr, &request, &tracker, sample_cookie_valid_range()) - .await - .unwrap(); + handle_announce( + remote_addr, + &request, + &tracker, + &stats_event_sender, + sample_cookie_valid_range(), + ) + .await + .unwrap(); let peers = tracker.get_torrent_peers(&info_hash.0.into()); @@ -820,15 +863,25 @@ mod tests { #[tokio::test] async fn the_announced_peer_should_not_be_included_in_the_response() { + let (tracker, stats_event_sender) = public_tracker(); + let tracker = Arc::new(tracker); + let stats_event_sender = Arc::new(stats_event_sender); + let remote_addr = SocketAddr::new(IpAddr::V4(Ipv4Addr::new(126, 0, 0, 1)), 8080); let request = AnnounceRequestBuilder::default() .with_connection_id(make(gen_remote_fingerprint(&remote_addr), sample_issue_time()).unwrap()) .into(); - let response = handle_announce(remote_addr, &request, &public_tracker(), sample_cookie_valid_range()) - .await - .unwrap(); + let response = handle_announce( + remote_addr, + &request, + &tracker, + &stats_event_sender, + sample_cookie_valid_range(), + ) + .await + .unwrap(); let empty_peer_vector: Vec> = vec![]; assert_eq!( @@ -851,7 +904,9 @@ mod tests { // From the BEP 15 (https://www.bittorrent.org/beps/bep_0015.html): // "Do note that most trackers will only honor the IP address field under limited circumstances." - let tracker = public_tracker(); + let (tracker, stats_event_sender) = public_tracker(); + let tracker = Arc::new(tracker); + let stats_event_sender = Arc::new(stats_event_sender); let info_hash = AquaticInfoHash([0u8; 20]); let peer_id = AquaticPeerId([255u8; 20]); @@ -871,9 +926,15 @@ mod tests { .with_port(client_port) .into(); - handle_announce(remote_addr, &request, &tracker, sample_cookie_valid_range()) - .await - .unwrap(); + handle_announce( + remote_addr, + &request, + &tracker, + &stats_event_sender, + sample_cookie_valid_range(), + ) + .await + .unwrap(); let peers = tracker.get_torrent_peers(&info_hash.0.into()); @@ -897,19 +958,29 @@ mod tests { } async fn announce_a_new_peer_using_ipv4(tracker: Arc) -> Response { + let (stats_event_sender, _stats_repository) = crate::core::services::statistics::setup::factory(false); + let stats_event_sender = Arc::new(stats_event_sender); + let remote_addr = SocketAddr::new(IpAddr::V4(Ipv4Addr::new(126, 0, 0, 1)), 8080); let request = AnnounceRequestBuilder::default() .with_connection_id(make(gen_remote_fingerprint(&remote_addr), sample_issue_time()).unwrap()) .into(); - handle_announce(remote_addr, &request, &tracker, sample_cookie_valid_range()) - .await - .unwrap() + handle_announce( + remote_addr, + &request, + &tracker, + &stats_event_sender, + sample_cookie_valid_range(), + ) + .await + .unwrap() } #[tokio::test] async fn when_the_announce_request_comes_from_a_client_using_ipv4_the_response_should_not_include_peers_using_ipv6() { - let tracker = public_tracker(); + let (tracker, _stats_event_sender) = public_tracker(); + let tracker = Arc::new(tracker); add_a_torrent_peer_using_ipv6(&tracker); @@ -932,14 +1003,16 @@ mod tests { .with(eq(statistics::event::Event::Udp4Announce)) .times(1) .returning(|_| Box::pin(future::ready(Some(Ok(()))))); - let stats_event_sender = Box::new(stats_event_sender_mock); + let stats_event_sender: Arc>> = + Arc::new(Some(Box::new(stats_event_sender_mock))); - let tracker = Arc::new(test_tracker_factory(Some(stats_event_sender))); + let tracker = Arc::new(test_tracker_factory()); handle_announce( sample_ipv4_socket_address(), &AnnounceRequestBuilder::default().into(), &tracker, + &stats_event_sender, sample_cookie_valid_range(), ) .await @@ -961,7 +1034,9 @@ mod tests { #[tokio::test] async fn the_peer_ip_should_be_changed_to_the_external_ip_in_the_tracker_configuration_if_defined() { - let tracker = public_tracker(); + let (tracker, stats_event_sender) = public_tracker(); + let tracker = Arc::new(tracker); + let stats_event_sender = Arc::new(stats_event_sender); let client_ip = Ipv4Addr::new(127, 0, 0, 1); let client_port = 8080; @@ -978,9 +1053,15 @@ mod tests { .with_port(client_port) .into(); - handle_announce(remote_addr, &request, &tracker, sample_cookie_valid_range()) - .await - .unwrap(); + handle_announce( + remote_addr, + &request, + &tracker, + &stats_event_sender, + sample_cookie_valid_range(), + ) + .await + .unwrap(); let peers = tracker.get_torrent_peers(&info_hash.0.into()); @@ -1019,7 +1100,9 @@ mod tests { #[tokio::test] async fn an_announced_peer_should_be_added_to_the_tracker() { - let tracker = public_tracker(); + let (tracker, stats_event_sender) = public_tracker(); + let tracker = Arc::new(tracker); + let stats_event_sender = Arc::new(stats_event_sender); let client_ip_v4 = Ipv4Addr::new(126, 0, 0, 1); let client_ip_v6 = client_ip_v4.to_ipv6_compatible(); @@ -1037,9 +1120,15 @@ mod tests { .with_port(client_port) .into(); - handle_announce(remote_addr, &request, &tracker, sample_cookie_valid_range()) - .await - .unwrap(); + handle_announce( + remote_addr, + &request, + &tracker, + &stats_event_sender, + sample_cookie_valid_range(), + ) + .await + .unwrap(); let peers = tracker.get_torrent_peers(&info_hash.0.into()); @@ -1053,6 +1142,10 @@ mod tests { #[tokio::test] async fn the_announced_peer_should_not_be_included_in_the_response() { + let (tracker, stats_event_sender) = public_tracker(); + let tracker = Arc::new(tracker); + let stats_event_sender = Arc::new(stats_event_sender); + let client_ip_v4 = Ipv4Addr::new(126, 0, 0, 1); let client_ip_v6 = client_ip_v4.to_ipv6_compatible(); @@ -1062,9 +1155,15 @@ mod tests { .with_connection_id(make(gen_remote_fingerprint(&remote_addr), sample_issue_time()).unwrap()) .into(); - let response = handle_announce(remote_addr, &request, &public_tracker(), sample_cookie_valid_range()) - .await - .unwrap(); + let response = handle_announce( + remote_addr, + &request, + &tracker, + &stats_event_sender, + sample_cookie_valid_range(), + ) + .await + .unwrap(); let empty_peer_vector: Vec> = vec![]; assert_eq!( @@ -1087,7 +1186,9 @@ mod tests { // From the BEP 15 (https://www.bittorrent.org/beps/bep_0015.html): // "Do note that most trackers will only honor the IP address field under limited circumstances." - let tracker = public_tracker(); + let (tracker, stats_event_sender) = public_tracker(); + let tracker = Arc::new(tracker); + let stats_event_sender = Arc::new(stats_event_sender); let info_hash = AquaticInfoHash([0u8; 20]); let peer_id = AquaticPeerId([255u8; 20]); @@ -1107,9 +1208,15 @@ mod tests { .with_port(client_port) .into(); - handle_announce(remote_addr, &request, &tracker, sample_cookie_valid_range()) - .await - .unwrap(); + handle_announce( + remote_addr, + &request, + &tracker, + &stats_event_sender, + sample_cookie_valid_range(), + ) + .await + .unwrap(); let peers = tracker.get_torrent_peers(&info_hash.0.into()); @@ -1133,6 +1240,9 @@ mod tests { } async fn announce_a_new_peer_using_ipv6(tracker: Arc) -> Response { + let (stats_event_sender, _stats_repository) = crate::core::services::statistics::setup::factory(false); + let stats_event_sender = Arc::new(stats_event_sender); + let client_ip_v4 = Ipv4Addr::new(126, 0, 0, 1); let client_ip_v6 = client_ip_v4.to_ipv6_compatible(); let client_port = 8080; @@ -1141,14 +1251,21 @@ mod tests { .with_connection_id(make(gen_remote_fingerprint(&remote_addr), sample_issue_time()).unwrap()) .into(); - handle_announce(remote_addr, &request, &tracker, sample_cookie_valid_range()) - .await - .unwrap() + handle_announce( + remote_addr, + &request, + &tracker, + &stats_event_sender, + sample_cookie_valid_range(), + ) + .await + .unwrap() } #[tokio::test] async fn when_the_announce_request_comes_from_a_client_using_ipv6_the_response_should_not_include_peers_using_ipv4() { - let tracker = public_tracker(); + let (tracker, _stats_event_sender) = public_tracker(); + let tracker = Arc::new(tracker); add_a_torrent_peer_using_ipv4(&tracker); @@ -1171,9 +1288,10 @@ mod tests { .with(eq(statistics::event::Event::Udp6Announce)) .times(1) .returning(|_| Box::pin(future::ready(Some(Ok(()))))); - let stats_event_sender = Box::new(stats_event_sender_mock); + let stats_event_sender: Arc>> = + Arc::new(Some(Box::new(stats_event_sender_mock))); - let tracker = Arc::new(test_tracker_factory(Some(stats_event_sender))); + let tracker = Arc::new(test_tracker_factory()); let remote_addr = sample_ipv6_remote_addr(); @@ -1181,20 +1299,27 @@ mod tests { .with_connection_id(make(gen_remote_fingerprint(&remote_addr), sample_issue_time()).unwrap()) .into(); - handle_announce(remote_addr, &announce_request, &tracker, sample_cookie_valid_range()) - .await - .unwrap(); + handle_announce( + remote_addr, + &announce_request, + &tracker, + &stats_event_sender, + sample_cookie_valid_range(), + ) + .await + .unwrap(); } mod from_a_loopback_ip { + use std::future; use std::net::{IpAddr, Ipv4Addr, Ipv6Addr, SocketAddr}; use std::sync::Arc; use aquatic_udp_protocol::{InfoHash as AquaticInfoHash, PeerId as AquaticPeerId}; + use mockall::predicate::eq; use crate::bootstrap::app::initialize_tracker_dependencies; - use crate::core; - use crate::core::statistics::keeper::Keeper; + use crate::core::{self, statistics}; use crate::servers::udp::connection_cookie::make; use crate::servers::udp::handlers::handle_announce; use crate::servers::udp::handlers::tests::announce_request::AnnounceRequestBuilder; @@ -1206,21 +1331,18 @@ mod tests { async fn the_peer_ip_should_be_changed_to_the_external_ip_in_the_tracker_configuration() { let config = Arc::new(TrackerConfigurationBuilder::default().with_external_ip("::126.0.0.1").into()); - let (database, whitelist_manager, _stats_event_sender, _stats_repository) = - initialize_tracker_dependencies(&config); + let (database, whitelist_manager) = initialize_tracker_dependencies(&config); - let (stats_event_sender, stats_repository) = Keeper::new_active_instance(); + let mut stats_event_sender_mock = statistics::event::sender::MockSender::new(); + stats_event_sender_mock + .expect_send_event() + .with(eq(statistics::event::Event::Udp6Announce)) + .times(1) + .returning(|_| Box::pin(future::ready(Some(Ok(()))))); + let stats_event_sender: Arc>> = + Arc::new(Some(Box::new(stats_event_sender_mock))); - let tracker = Arc::new( - core::Tracker::new( - &config.core, - &database, - &whitelist_manager, - &Arc::new(Some(stats_event_sender)), - &Arc::new(stats_repository), - ) - .unwrap(), - ); + let tracker = Arc::new(core::Tracker::new(&config.core, &database, &whitelist_manager).unwrap()); let loopback_ipv4 = Ipv4Addr::new(127, 0, 0, 1); let loopback_ipv6 = Ipv6Addr::new(0, 0, 0, 0, 0, 0, 0, 1); @@ -1242,9 +1364,15 @@ mod tests { .with_port(client_port) .into(); - handle_announce(remote_addr, &request, &tracker, sample_cookie_valid_range()) - .await - .unwrap(); + handle_announce( + remote_addr, + &request, + &tracker, + &stats_event_sender, + sample_cookie_valid_range(), + ) + .await + .unwrap(); let peers = tracker.get_torrent_peers(&info_hash.0.into()); @@ -1273,6 +1401,7 @@ mod tests { }; use super::{gen_remote_fingerprint, TorrentPeerBuilder}; + use crate::core::services::statistics; use crate::core::{self}; use crate::servers::udp::connection_cookie::make; use crate::servers::udp::handlers::handle_scrape; @@ -1290,6 +1419,10 @@ mod tests { #[tokio::test] async fn should_return_no_stats_when_the_tracker_does_not_have_any_torrent() { + let (tracker, stats_event_sender) = public_tracker(); + let tracker = Arc::new(tracker); + let stats_event_sender = Arc::new(stats_event_sender); + let remote_addr = sample_ipv4_remote_addr(); let info_hash = InfoHash([0u8; 20]); @@ -1301,9 +1434,15 @@ mod tests { info_hashes, }; - let response = handle_scrape(remote_addr, &request, &public_tracker(), sample_cookie_valid_range()) - .await - .unwrap(); + let response = handle_scrape( + remote_addr, + &request, + &tracker, + &stats_event_sender, + sample_cookie_valid_range(), + ) + .await + .unwrap(); let expected_torrent_stats = vec![zeroed_torrent_statistics()]; @@ -1339,6 +1478,9 @@ mod tests { } async fn add_a_sample_seeder_and_scrape(tracker: Arc) -> Response { + let (stats_event_sender, _stats_repository) = statistics::setup::factory(false); + let stats_event_sender = Arc::new(stats_event_sender); + let remote_addr = sample_ipv4_remote_addr(); let info_hash = InfoHash([0u8; 20]); @@ -1346,9 +1488,15 @@ mod tests { let request = build_scrape_request(&remote_addr, &info_hash); - handle_scrape(remote_addr, &request, &tracker, sample_cookie_valid_range()) - .await - .unwrap() + handle_scrape( + remote_addr, + &request, + &tracker, + &stats_event_sender, + sample_cookie_valid_range(), + ) + .await + .unwrap() } fn match_scrape_response(response: Response) -> Option { @@ -1359,6 +1507,8 @@ mod tests { } mod with_a_public_tracker { + use std::sync::Arc; + use aquatic_udp_protocol::{NumberOfDownloads, NumberOfPeers, TorrentScrapeStatistics}; use crate::servers::udp::handlers::tests::public_tracker; @@ -1366,7 +1516,8 @@ mod tests { #[tokio::test] async fn should_return_torrent_statistics_when_the_tracker_has_the_requested_torrent() { - let tracker = public_tracker(); + let (tracker, _stats_event_sender) = public_tracker(); + let tracker = Arc::new(tracker); let torrent_stats = match_scrape_response(add_a_sample_seeder_and_scrape(tracker.clone()).await); @@ -1381,6 +1532,8 @@ mod tests { } mod with_a_whitelisted_tracker { + use std::sync::Arc; + use aquatic_udp_protocol::{InfoHash, NumberOfDownloads, NumberOfPeers, TorrentScrapeStatistics}; use crate::servers::udp::handlers::handle_scrape; @@ -1391,7 +1544,9 @@ mod tests { #[tokio::test] async fn should_return_the_torrent_statistics_when_the_requested_torrent_is_whitelisted() { - let tracker = whitelisted_tracker(); + let (tracker, stats_event_sender) = whitelisted_tracker(); + let tracker = Arc::new(tracker); + let stats_event_sender = Arc::new(stats_event_sender); let remote_addr = sample_ipv4_remote_addr(); let info_hash = InfoHash([0u8; 20]); @@ -1406,9 +1561,15 @@ mod tests { let request = build_scrape_request(&remote_addr, &info_hash); let torrent_stats = match_scrape_response( - handle_scrape(remote_addr, &request, &tracker, sample_cookie_valid_range()) - .await - .unwrap(), + handle_scrape( + remote_addr, + &request, + &tracker, + &stats_event_sender, + sample_cookie_valid_range(), + ) + .await + .unwrap(), ) .unwrap(); @@ -1423,7 +1584,9 @@ mod tests { #[tokio::test] async fn should_return_zeroed_statistics_when_the_requested_torrent_is_not_whitelisted() { - let tracker = whitelisted_tracker(); + let (tracker, stats_event_sender) = whitelisted_tracker(); + let tracker = Arc::new(tracker); + let stats_event_sender = Arc::new(stats_event_sender); let remote_addr = sample_ipv4_remote_addr(); let info_hash = InfoHash([0u8; 20]); @@ -1433,9 +1596,15 @@ mod tests { let request = build_scrape_request(&remote_addr, &info_hash); let torrent_stats = match_scrape_response( - handle_scrape(remote_addr, &request, &tracker, sample_cookie_valid_range()) - .await - .unwrap(), + handle_scrape( + remote_addr, + &request, + &tracker, + &stats_event_sender, + sample_cookie_valid_range(), + ) + .await + .unwrap(), ) .unwrap(); @@ -1477,15 +1646,17 @@ mod tests { .with(eq(statistics::event::Event::Udp4Scrape)) .times(1) .returning(|_| Box::pin(future::ready(Some(Ok(()))))); - let stats_event_sender = Box::new(stats_event_sender_mock); + let stats_event_sender: Arc>> = + Arc::new(Some(Box::new(stats_event_sender_mock))); let remote_addr = sample_ipv4_remote_addr(); - let tracker = Arc::new(test_tracker_factory(Some(stats_event_sender))); + let tracker = Arc::new(test_tracker_factory()); handle_scrape( remote_addr, &sample_scrape_request(&remote_addr), &tracker, + &stats_event_sender, sample_cookie_valid_range(), ) .await @@ -1514,15 +1685,17 @@ mod tests { .with(eq(statistics::event::Event::Udp6Scrape)) .times(1) .returning(|_| Box::pin(future::ready(Some(Ok(()))))); - let stats_event_sender = Box::new(stats_event_sender_mock); + let stats_event_sender: Arc>> = + Arc::new(Some(Box::new(stats_event_sender_mock))); let remote_addr = sample_ipv6_remote_addr(); - let tracker = Arc::new(test_tracker_factory(Some(stats_event_sender))); + let tracker = Arc::new(test_tracker_factory()); handle_scrape( remote_addr, &sample_scrape_request(&remote_addr), &tracker, + &stats_event_sender, sample_cookie_valid_range(), ) .await diff --git a/src/servers/udp/server/launcher.rs b/src/servers/udp/server/launcher.rs index 753dc9915..d71ffcfd1 100644 --- a/src/servers/udp/server/launcher.rs +++ b/src/servers/udp/server/launcher.rs @@ -13,6 +13,7 @@ use tracing::instrument; use super::banning::BanService; use super::request_buffer::ActiveRequests; use crate::bootstrap::jobs::Started; +use crate::core::statistics::event::sender::Sender; use crate::core::{statistics, Tracker}; use crate::servers::logging::STARTED_ON; use crate::servers::registar::ServiceHealthCheckJob; @@ -40,9 +41,10 @@ impl Launcher { /// It panics if unable to send address of socket. /// It panics if the udp server is loaded when the tracker is private. /// - #[instrument(skip(tracker, ban_service, bind_to, tx_start, rx_halt))] + #[instrument(skip(tracker, opt_stats_event_sender, ban_service, bind_to, tx_start, rx_halt))] pub async fn run_with_graceful_shutdown( tracker: Arc, + opt_stats_event_sender: Arc>>, ban_service: Arc>, bind_to: SocketAddr, cookie_lifetime: Duration, @@ -81,7 +83,14 @@ impl Launcher { let local_addr = local_udp_url.clone(); tokio::task::spawn(async move { tracing::debug!(target: UDP_TRACKER_LOG_TARGET, local_addr, "Udp::run_with_graceful_shutdown::task (listening...)"); - let () = Self::run_udp_server_main(receiver, tracker.clone(), ban_service.clone(), cookie_lifetime).await; + let () = Self::run_udp_server_main( + receiver, + tracker.clone(), + opt_stats_event_sender.clone(), + ban_service.clone(), + cookie_lifetime, + ) + .await; }) }; @@ -118,10 +127,11 @@ impl Launcher { ServiceHealthCheckJob::new(binding, info, job) } - #[instrument(skip(receiver, tracker, ban_service))] + #[instrument(skip(receiver, tracker, opt_stats_event_sender, ban_service))] async fn run_udp_server_main( mut receiver: Receiver, tracker: Arc, + opt_stats_event_sender: Arc>>, ban_service: Arc>, cookie_lifetime: Duration, ) { @@ -165,24 +175,35 @@ impl Launcher { } }; - match req.from.ip() { - IpAddr::V4(_) => { - tracker.send_stats_event(statistics::event::Event::Udp4Request).await; - } - IpAddr::V6(_) => { - tracker.send_stats_event(statistics::event::Event::Udp6Request).await; + if let Some(stats_event_sender) = opt_stats_event_sender.as_deref() { + match req.from.ip() { + IpAddr::V4(_) => { + stats_event_sender.send_event(statistics::event::Event::Udp4Request).await; + } + IpAddr::V6(_) => { + stats_event_sender.send_event(statistics::event::Event::Udp6Request).await; + } } } if ban_service.read().await.is_banned(&req.from.ip()) { tracing::debug!(target: UDP_TRACKER_LOG_TARGET, local_addr, "Udp::run_udp_server::loop continue: (banned ip)"); - tracker.send_stats_event(statistics::event::Event::UdpRequestBanned).await; + if let Some(stats_event_sender) = opt_stats_event_sender.as_deref() { + stats_event_sender + .send_event(statistics::event::Event::UdpRequestBanned) + .await; + } continue; } - let processor = Processor::new(receiver.socket.clone(), tracker.clone(), cookie_lifetime); + let processor = Processor::new( + receiver.socket.clone(), + tracker.clone(), + opt_stats_event_sender.clone(), + cookie_lifetime, + ); /* We spawn the new task even if the active requests buffer is full. This could seem counterintuitive because we are accepting @@ -206,7 +227,12 @@ impl Launcher { if old_request_aborted { // Evicted task from active requests buffer was aborted. - tracker.send_stats_event(statistics::event::Event::UdpRequestAborted).await; + + if let Some(stats_event_sender) = opt_stats_event_sender.as_deref() { + stats_event_sender + .send_event(statistics::event::Event::UdpRequestAborted) + .await; + }; } } else { tokio::task::yield_now().await; diff --git a/src/servers/udp/server/mod.rs b/src/servers/udp/server/mod.rs index 6eb98a7b1..1b0a1da9a 100644 --- a/src/servers/udp/server/mod.rs +++ b/src/servers/udp/server/mod.rs @@ -64,6 +64,7 @@ mod tests { use super::spawner::Spawner; use super::Server; use crate::bootstrap::app::initialize_with_configuration; + use crate::core::services::statistics; use crate::servers::registar::Registar; use crate::servers::udp::server::banning::BanService; use crate::servers::udp::server::launcher::MAX_CONNECTION_ID_ERRORS_PER_IP; @@ -72,8 +73,10 @@ mod tests { async fn it_should_be_able_to_start_and_stop() { let cfg = Arc::new(ephemeral_public()); - let tracker = initialize_with_configuration(&cfg); + let (stats_event_sender, _stats_repository) = statistics::setup::factory(cfg.core.tracker_usage_statistics); + let stats_event_sender = Arc::new(stats_event_sender); let ban_service = Arc::new(RwLock::new(BanService::new(MAX_CONNECTION_ID_ERRORS_PER_IP))); + let tracker = initialize_with_configuration(&cfg); let udp_trackers = cfg.udp_trackers.clone().expect("missing UDP trackers configuration"); let config = &udp_trackers[0]; @@ -83,7 +86,13 @@ mod tests { let stopped = Server::new(Spawner::new(bind_to)); let started = stopped - .start(tracker, ban_service, register.give_form(), config.cookie_lifetime) + .start( + tracker, + stats_event_sender, + ban_service, + register.give_form(), + config.cookie_lifetime, + ) .await .expect("it should start the server"); @@ -98,8 +107,10 @@ mod tests { async fn it_should_be_able_to_start_and_stop_with_wait() { let cfg = Arc::new(ephemeral_public()); - let tracker = initialize_with_configuration(&cfg); + let (stats_event_sender, _stats_repository) = statistics::setup::factory(cfg.core.tracker_usage_statistics); + let stats_event_sender = Arc::new(stats_event_sender); let ban_service = Arc::new(RwLock::new(BanService::new(MAX_CONNECTION_ID_ERRORS_PER_IP))); + let tracker = initialize_with_configuration(&cfg); let config = &cfg.udp_trackers.as_ref().unwrap().first().unwrap(); let bind_to = config.bind_address; @@ -108,7 +119,13 @@ mod tests { let stopped = Server::new(Spawner::new(bind_to)); let started = stopped - .start(tracker, ban_service, register.give_form(), config.cookie_lifetime) + .start( + tracker, + stats_event_sender, + ban_service, + register.give_form(), + config.cookie_lifetime, + ) .await .expect("it should start the server"); diff --git a/src/servers/udp/server/processor.rs b/src/servers/udp/server/processor.rs index e0f7c4624..2ef7cc482 100644 --- a/src/servers/udp/server/processor.rs +++ b/src/servers/udp/server/processor.rs @@ -10,6 +10,7 @@ use tracing::{instrument, Level}; use super::banning::BanService; use super::bound_socket::BoundSocket; +use crate::core::statistics::event::sender::Sender; use crate::core::statistics::event::UdpResponseKind; use crate::core::{statistics, Tracker}; use crate::servers::udp::handlers::CookieTimeValues; @@ -18,14 +19,21 @@ use crate::servers::udp::{handlers, RawRequest}; pub struct Processor { socket: Arc, tracker: Arc, + opt_stats_event_sender: Arc>>, cookie_lifetime: f64, } impl Processor { - pub fn new(socket: Arc, tracker: Arc, cookie_lifetime: f64) -> Self { + pub fn new( + socket: Arc, + tracker: Arc, + opt_stats_event_sender: Arc>>, + cookie_lifetime: f64, + ) -> Self { Self { socket, tracker, + opt_stats_event_sender, cookie_lifetime, } } @@ -39,6 +47,7 @@ impl Processor { let response = handlers::handle_packet( request, &self.tracker, + &self.opt_stats_event_sender, self.socket.address(), CookieTimeValues::new(self.cookie_lifetime), ban_service, @@ -84,22 +93,24 @@ impl Processor { tracing::debug!(%bytes_count, %sent_bytes, "sent {response_type}"); } - match target.ip() { - IpAddr::V4(_) => { - self.tracker - .send_stats_event(statistics::event::Event::Udp4Response { - kind: response_kind, - req_processing_time, - }) - .await; - } - IpAddr::V6(_) => { - self.tracker - .send_stats_event(statistics::event::Event::Udp6Response { - kind: response_kind, - req_processing_time, - }) - .await; + if let Some(stats_event_sender) = self.opt_stats_event_sender.as_deref() { + match target.ip() { + IpAddr::V4(_) => { + stats_event_sender + .send_event(statistics::event::Event::Udp4Response { + kind: response_kind, + req_processing_time, + }) + .await; + } + IpAddr::V6(_) => { + stats_event_sender + .send_event(statistics::event::Event::Udp6Response { + kind: response_kind, + req_processing_time, + }) + .await; + } } } } diff --git a/src/servers/udp/server/spawner.rs b/src/servers/udp/server/spawner.rs index ce2fe8eae..5d7a97877 100644 --- a/src/servers/udp/server/spawner.rs +++ b/src/servers/udp/server/spawner.rs @@ -11,6 +11,7 @@ use tokio::task::JoinHandle; use super::banning::BanService; use super::launcher::Launcher; use crate::bootstrap::jobs::Started; +use crate::core::statistics::event::sender::Sender; use crate::core::Tracker; use crate::servers::signals::Halted; @@ -29,6 +30,7 @@ impl Spawner { pub fn spawn_launcher( &self, tracker: Arc, + opt_stats_event_sender: Arc>>, ban_service: Arc>, cookie_lifetime: Duration, tx_start: oneshot::Sender, @@ -37,7 +39,16 @@ impl Spawner { let spawner = Self::new(self.bind_to); tokio::spawn(async move { - Launcher::run_with_graceful_shutdown(tracker, ban_service, spawner.bind_to, cookie_lifetime, tx_start, rx_halt).await; + Launcher::run_with_graceful_shutdown( + tracker, + opt_stats_event_sender, + ban_service, + spawner.bind_to, + cookie_lifetime, + tx_start, + rx_halt, + ) + .await; spawner }) } diff --git a/src/servers/udp/server/states.rs b/src/servers/udp/server/states.rs index 02742049d..5cdca5a7d 100644 --- a/src/servers/udp/server/states.rs +++ b/src/servers/udp/server/states.rs @@ -13,6 +13,7 @@ use super::banning::BanService; use super::spawner::Spawner; use super::{Server, UdpError}; use crate::bootstrap::jobs::Started; +use crate::core::statistics::event::sender::Sender; use crate::core::Tracker; use crate::servers::registar::{ServiceRegistration, ServiceRegistrationForm}; use crate::servers::signals::Halted; @@ -64,10 +65,11 @@ impl Server { /// /// It panics if unable to receive the bound socket address from service. /// - #[instrument(skip(self, tracker, ban_service, form), err, ret(Display, level = Level::INFO))] + #[instrument(skip(self, tracker, opt_stats_event_sender, ban_service, form), err, ret(Display, level = Level::INFO))] pub async fn start( self, tracker: Arc, + opt_stats_event_sender: Arc>>, ban_service: Arc>, form: ServiceRegistrationForm, cookie_lifetime: Duration, @@ -78,10 +80,14 @@ impl Server { assert!(!tx_halt.is_closed(), "Halt channel for UDP tracker should be open"); // May need to wrap in a task to about a tokio bug. - let task = self - .state - .spawner - .spawn_launcher(tracker, ban_service, cookie_lifetime, tx_start, rx_halt); + let task = self.state.spawner.spawn_launcher( + tracker, + opt_stats_event_sender, + ban_service, + cookie_lifetime, + tx_start, + rx_halt, + ); let local_addr = rx_start.await.expect("it should be able to start the service").address; diff --git a/tests/servers/api/environment.rs b/tests/servers/api/environment.rs index 37d031e1c..6658c27da 100644 --- a/tests/servers/api/environment.rs +++ b/tests/servers/api/environment.rs @@ -8,6 +8,9 @@ use torrust_tracker_api_client::connection_info::{ConnectionInfo, Origin}; use torrust_tracker_configuration::{Configuration, HttpApi}; use torrust_tracker_lib::bootstrap::app::initialize_with_configuration; use torrust_tracker_lib::bootstrap::jobs::make_rust_tls; +use torrust_tracker_lib::core::services::statistics; +use torrust_tracker_lib::core::statistics::event::sender::Sender; +use torrust_tracker_lib::core::statistics::repository::Repository; use torrust_tracker_lib::core::whitelist::WhiteListManager; use torrust_tracker_lib::core::Tracker; use torrust_tracker_lib::servers::apis::server::{ApiServer, Launcher, Running, Stopped}; @@ -22,6 +25,8 @@ where { pub config: Arc, pub tracker: Arc, + pub stats_event_sender: Arc>>, + pub stats_repository: Arc, pub whitelist_manager: Arc, pub ban_service: Arc>, pub registar: Registar, @@ -40,13 +45,16 @@ where impl Environment { pub fn new(configuration: &Arc) -> Self { + let (stats_event_sender, stats_repository) = statistics::setup::factory(configuration.core.tracker_usage_statistics); + let stats_event_sender = Arc::new(stats_event_sender); + let stats_repository = Arc::new(stats_repository); + let ban_service = Arc::new(RwLock::new(BanService::new(MAX_CONNECTION_ID_ERRORS_PER_IP))); + let tracker = initialize_with_configuration(configuration); - // todo: get from `initialize_with_configuration` + // todo: instantiate outside of `initialize_with_configuration` let whitelist_manager = tracker.whitelist_manager.clone(); - let ban_service = Arc::new(RwLock::new(BanService::new(MAX_CONNECTION_ID_ERRORS_PER_IP))); - let config = Arc::new(configuration.http_api.clone().expect("missing API configuration")); let bind_to = config.bind_address; @@ -58,6 +66,8 @@ impl Environment { Self { config, tracker, + stats_event_sender, + stats_repository, whitelist_manager, ban_service, registar: Registar::default(), @@ -71,12 +81,21 @@ impl Environment { Environment { config: self.config, tracker: self.tracker.clone(), + stats_event_sender: self.stats_event_sender.clone(), + stats_repository: self.stats_repository.clone(), whitelist_manager: self.whitelist_manager.clone(), ban_service: self.ban_service.clone(), registar: self.registar.clone(), server: self .server - .start(self.tracker, self.ban_service, self.registar.give_form(), access_tokens) + .start( + self.tracker, + self.stats_event_sender, + self.stats_repository, + self.ban_service, + self.registar.give_form(), + access_tokens, + ) .await .unwrap(), } @@ -92,6 +111,8 @@ impl Environment { Environment { config: self.config, tracker: self.tracker, + stats_event_sender: self.stats_event_sender, + stats_repository: self.stats_repository, whitelist_manager: self.whitelist_manager, ban_service: self.ban_service, registar: Registar::default(), diff --git a/tests/servers/http/environment.rs b/tests/servers/http/environment.rs index 6d4001e6c..845d9d440 100644 --- a/tests/servers/http/environment.rs +++ b/tests/servers/http/environment.rs @@ -5,6 +5,9 @@ use futures::executor::block_on; use torrust_tracker_configuration::{Configuration, HttpTracker}; use torrust_tracker_lib::bootstrap::app::initialize_with_configuration; use torrust_tracker_lib::bootstrap::jobs::make_rust_tls; +use torrust_tracker_lib::core::services::statistics; +use torrust_tracker_lib::core::statistics::event::sender::Sender; +use torrust_tracker_lib::core::statistics::repository::Repository; use torrust_tracker_lib::core::whitelist::WhiteListManager; use torrust_tracker_lib::core::Tracker; use torrust_tracker_lib::servers::http::server::{HttpServer, Launcher, Running, Stopped}; @@ -14,6 +17,8 @@ use torrust_tracker_primitives::peer; pub struct Environment { pub config: Arc, pub tracker: Arc, + pub stats_event_sender: Arc>>, + pub stats_repository: Arc, pub whitelist_manager: Arc, pub registar: Registar, pub server: HttpServer, @@ -29,8 +34,13 @@ impl Environment { impl Environment { #[allow(dead_code)] pub fn new(configuration: &Arc) -> Self { + let (stats_event_sender, stats_repository) = statistics::setup::factory(configuration.core.tracker_usage_statistics); + let stats_event_sender = Arc::new(stats_event_sender); + let stats_repository = Arc::new(stats_repository); + let tracker = initialize_with_configuration(configuration); + // todo: instantiate outside of `initialize_with_configuration` let whitelist_manager = tracker.whitelist_manager.clone(); let http_tracker = configuration @@ -49,6 +59,8 @@ impl Environment { Self { config, tracker, + stats_event_sender, + stats_repository, whitelist_manager, registar: Registar::default(), server, @@ -60,9 +72,15 @@ impl Environment { Environment { config: self.config, tracker: self.tracker.clone(), + stats_event_sender: self.stats_event_sender.clone(), + stats_repository: self.stats_repository.clone(), whitelist_manager: self.whitelist_manager.clone(), registar: self.registar.clone(), - server: self.server.start(self.tracker, self.registar.give_form()).await.unwrap(), + server: self + .server + .start(self.tracker, self.stats_event_sender, self.registar.give_form()) + .await + .unwrap(), } } } @@ -76,6 +94,8 @@ impl Environment { Environment { config: self.config, tracker: self.tracker, + stats_event_sender: self.stats_event_sender, + stats_repository: self.stats_repository, whitelist_manager: self.whitelist_manager, registar: Registar::default(), diff --git a/tests/servers/http/v1/contract.rs b/tests/servers/http/v1/contract.rs index 37d0288f4..2cec1790f 100644 --- a/tests/servers/http/v1/contract.rs +++ b/tests/servers/http/v1/contract.rs @@ -680,7 +680,7 @@ mod for_all_config_modes { .announce(&QueryBuilder::default().query()) .await; - let stats = env.tracker.get_stats().await; + let stats = env.stats_repository.get_stats().await; assert_eq!(stats.tcp4_connections_handled, 1); @@ -706,7 +706,7 @@ mod for_all_config_modes { .announce(&QueryBuilder::default().query()) .await; - let stats = env.tracker.get_stats().await; + let stats = env.stats_repository.get_stats().await; assert_eq!(stats.tcp6_connections_handled, 1); @@ -731,7 +731,7 @@ mod for_all_config_modes { ) .await; - let stats = env.tracker.get_stats().await; + let stats = env.stats_repository.get_stats().await; assert_eq!(stats.tcp6_connections_handled, 0); @@ -750,7 +750,7 @@ mod for_all_config_modes { .announce(&QueryBuilder::default().query()) .await; - let stats = env.tracker.get_stats().await; + let stats = env.stats_repository.get_stats().await; assert_eq!(stats.tcp4_announces_handled, 1); @@ -776,7 +776,7 @@ mod for_all_config_modes { .announce(&QueryBuilder::default().query()) .await; - let stats = env.tracker.get_stats().await; + let stats = env.stats_repository.get_stats().await; assert_eq!(stats.tcp6_announces_handled, 1); @@ -801,7 +801,7 @@ mod for_all_config_modes { ) .await; - let stats = env.tracker.get_stats().await; + let stats = env.stats_repository.get_stats().await; assert_eq!(stats.tcp6_announces_handled, 0); @@ -1167,7 +1167,7 @@ mod for_all_config_modes { ) .await; - let stats = env.tracker.get_stats().await; + let stats = env.stats_repository.get_stats().await; assert_eq!(stats.tcp4_scrapes_handled, 1); @@ -1199,7 +1199,7 @@ mod for_all_config_modes { ) .await; - let stats = env.tracker.get_stats().await; + let stats = env.stats_repository.get_stats().await; assert_eq!(stats.tcp6_scrapes_handled, 1); diff --git a/tests/servers/udp/contract.rs b/tests/servers/udp/contract.rs index f0ed98b21..0767d5f07 100644 --- a/tests/servers/udp/contract.rs +++ b/tests/servers/udp/contract.rs @@ -229,7 +229,6 @@ mod receiving_an_announce_request { logging::setup(); let env = Started::new(&configuration::ephemeral().into()).await; - let tracker = env.tracker.clone(); let ban_service = env.ban_service.clone(); let client = match UdpTrackerClient::new(env.bind_address(), DEFAULT_TIMEOUT).await { @@ -271,7 +270,7 @@ mod receiving_an_announce_request { info_hash, ); - let udp_requests_banned_before = tracker.get_stats().await.udp_requests_banned; + let udp_requests_banned_before = env.stats_repository.get_stats().await.udp_requests_banned; // This should return a timeout error match client.send(announce_request.into()).await { @@ -281,7 +280,7 @@ mod receiving_an_announce_request { assert!(client.receive().await.is_err()); - let udp_requests_banned_after = tracker.get_stats().await.udp_requests_banned; + let udp_requests_banned_after = env.stats_repository.get_stats().await.udp_requests_banned; let udp_banned_ips_total_after = ban_service.read().await.get_banned_ips_total(); // UDP counter for banned requests should be increased by 1 diff --git a/tests/servers/udp/environment.rs b/tests/servers/udp/environment.rs index f744809c5..06a22229e 100644 --- a/tests/servers/udp/environment.rs +++ b/tests/servers/udp/environment.rs @@ -5,6 +5,9 @@ use bittorrent_primitives::info_hash::InfoHash; use tokio::sync::RwLock; use torrust_tracker_configuration::{Configuration, UdpTracker, DEFAULT_TIMEOUT}; use torrust_tracker_lib::bootstrap::app::initialize_with_configuration; +use torrust_tracker_lib::core::services::statistics; +use torrust_tracker_lib::core::statistics::event::sender::Sender; +use torrust_tracker_lib::core::statistics::repository::Repository; use torrust_tracker_lib::core::Tracker; use torrust_tracker_lib::servers::registar::Registar; use torrust_tracker_lib::servers::udp::server::banning::BanService; @@ -20,6 +23,8 @@ where { pub config: Arc, pub tracker: Arc, + pub stats_event_sender: Arc>>, + pub stats_repository: Arc, pub ban_service: Arc>, pub registar: Registar, pub server: Server, @@ -39,9 +44,13 @@ where impl Environment { #[allow(dead_code)] pub fn new(configuration: &Arc) -> Self { - let tracker = initialize_with_configuration(configuration); + let (stats_event_sender, stats_repository) = statistics::setup::factory(configuration.core.tracker_usage_statistics); + let stats_event_sender = Arc::new(stats_event_sender); + let stats_repository = Arc::new(stats_repository); let ban_service = Arc::new(RwLock::new(BanService::new(MAX_CONNECTION_ID_ERRORS_PER_IP))); + let tracker = initialize_with_configuration(configuration); + let udp_tracker = configuration.udp_trackers.clone().expect("missing UDP tracker configuration"); let config = Arc::new(udp_tracker[0].clone()); @@ -53,6 +62,8 @@ impl Environment { Self { config, tracker, + stats_event_sender, + stats_repository, ban_service, registar: Registar::default(), server, @@ -65,11 +76,19 @@ impl Environment { Environment { config: self.config, tracker: self.tracker.clone(), + stats_event_sender: self.stats_event_sender.clone(), + stats_repository: self.stats_repository.clone(), ban_service: self.ban_service.clone(), registar: self.registar.clone(), server: self .server - .start(self.tracker, self.ban_service, self.registar.give_form(), cookie_lifetime) + .start( + self.tracker, + self.stats_event_sender, + self.ban_service, + self.registar.give_form(), + cookie_lifetime, + ) .await .unwrap(), } @@ -92,6 +111,8 @@ impl Environment { Environment { config: self.config, tracker: self.tracker, + stats_event_sender: self.stats_event_sender, + stats_repository: self.stats_repository, ban_service: self.ban_service, registar: Registar::default(), server: stopped.expect("it stop the udp tracker service"), From 8bea5213c3a55bfd26101073c52c86124440b958 Mon Sep 17 00:00:00 2001 From: Jose Celano Date: Fri, 17 Jan 2025 16:19:05 +0000 Subject: [PATCH 099/802] refactor: [#1187] extract IoC Container --- src/app.rs | 51 +++++++++++++++++----------------------- src/bootstrap/app.rs | 24 +++++++++---------- src/console/profiling.rs | 4 ++-- src/container.rs | 15 ++++++++++++ src/lib.rs | 1 + src/main.rs | 4 ++-- 6 files changed, 53 insertions(+), 46 deletions(-) create mode 100644 src/container.rs diff --git a/src/app.rs b/src/app.rs index 14dc0b07f..64119aa34 100644 --- a/src/app.rs +++ b/src/app.rs @@ -21,19 +21,14 @@ //! - UDP trackers: the user can enable multiple UDP tracker on several ports. //! - HTTP trackers: the user can enable multiple HTTP tracker on several ports. //! - Tracker REST API: the tracker API can be enabled/disabled. -use std::sync::Arc; - -use tokio::sync::RwLock; use tokio::task::JoinHandle; use torrust_tracker_configuration::Configuration; use tracing::instrument; use crate::bootstrap::jobs::{health_check_api, http_tracker, torrent_cleanup, tracker_apis, udp_tracker}; -use crate::core::statistics::event::sender::Sender; -use crate::core::statistics::repository::Repository; +use crate::container::AppContainer; +use crate::servers; use crate::servers::registar::Registar; -use crate::servers::udp::server::banning::BanService; -use crate::{core, servers}; /// # Panics /// @@ -41,14 +36,8 @@ use crate::{core, servers}; /// /// - Can't retrieve tracker keys from database. /// - Can't load whitelist from database. -#[instrument(skip(config, tracker, ban_service, stats_event_sender, stats_repository))] -pub async fn start( - config: &Configuration, - tracker: Arc, - ban_service: Arc>, - stats_event_sender: Arc>>, - stats_repository: Arc, -) -> Vec> { +#[instrument(skip(config, app_container))] +pub async fn start(config: &Configuration, app_container: &AppContainer) -> Vec> { if config.http_api.is_none() && (config.udp_trackers.is_none() || config.udp_trackers.as_ref().map_or(true, std::vec::Vec::is_empty)) && (config.http_trackers.is_none() || config.http_trackers.as_ref().map_or(true, std::vec::Vec::is_empty)) @@ -61,16 +50,18 @@ pub async fn start( let registar = Registar::default(); // Load peer keys - if tracker.is_private() { - tracker + if app_container.tracker.is_private() { + app_container + .tracker .load_keys_from_database() .await .expect("Could not retrieve keys from database."); } // Load whitelisted torrents - if tracker.is_listed() { - tracker + if app_container.tracker.is_listed() { + app_container + .tracker .whitelist_manager .load_whitelist_from_database() .await @@ -80,7 +71,7 @@ pub async fn start( // Start the UDP blocks if let Some(udp_trackers) = &config.udp_trackers { for udp_tracker_config in udp_trackers { - if tracker.is_private() { + if app_container.tracker.is_private() { tracing::warn!( "Could not start UDP tracker on: {} while in private mode. UDP is not safe for private trackers!", udp_tracker_config.bind_address @@ -89,9 +80,9 @@ pub async fn start( jobs.push( udp_tracker::start_job( udp_tracker_config, - tracker.clone(), - stats_event_sender.clone(), - ban_service.clone(), + app_container.tracker.clone(), + app_container.stats_event_sender.clone(), + app_container.ban_service.clone(), registar.give_form(), ) .await, @@ -107,8 +98,8 @@ pub async fn start( for http_tracker_config in http_trackers { if let Some(job) = http_tracker::start_job( http_tracker_config, - tracker.clone(), - stats_event_sender.clone(), + app_container.tracker.clone(), + app_container.stats_event_sender.clone(), registar.give_form(), servers::http::Version::V1, ) @@ -125,10 +116,10 @@ pub async fn start( if let Some(http_api_config) = &config.http_api { if let Some(job) = tracker_apis::start_job( http_api_config, - tracker.clone(), - ban_service.clone(), - stats_event_sender.clone(), - stats_repository.clone(), + app_container.tracker.clone(), + app_container.ban_service.clone(), + app_container.stats_event_sender.clone(), + app_container.stats_repository.clone(), registar.give_form(), servers::apis::Version::V1, ) @@ -142,7 +133,7 @@ pub async fn start( // Start runners to remove torrents without peers, every interval if config.core.inactive_peer_cleanup_interval > 0 { - jobs.push(torrent_cleanup::start_job(&config.core, &tracker)); + jobs.push(torrent_cleanup::start_job(&config.core, &app_container.tracker)); } // Start Health Check API diff --git a/src/bootstrap/app.rs b/src/bootstrap/app.rs index d63b414e1..68ec93e38 100644 --- a/src/bootstrap/app.rs +++ b/src/bootstrap/app.rs @@ -21,10 +21,9 @@ use tracing::instrument; use super::config::initialize_configuration; use crate::bootstrap; +use crate::container::AppContainer; use crate::core::databases::Database; use crate::core::services::{initialize_database, initialize_whitelist, statistics, tracker_factory}; -use crate::core::statistics::event::sender::Sender; -use crate::core::statistics::repository::Repository; use crate::core::whitelist::WhiteListManager; use crate::core::Tracker; use crate::servers::udp::server::banning::BanService; @@ -38,15 +37,8 @@ use crate::shared::crypto::keys::{self, Keeper as _}; /// /// Setup can file if the configuration is invalid. #[must_use] -#[allow(clippy::type_complexity)] #[instrument(skip())] -pub fn setup() -> ( - Configuration, - Arc, - Arc>, - Arc>>, - Arc, -) { +pub fn setup() -> (Configuration, AppContainer) { #[cfg(not(test))] check_seed(); @@ -62,13 +54,21 @@ pub fn setup() -> ( let stats_event_sender = Arc::new(stats_event_sender); let stats_repository = Arc::new(stats_repository); - let udp_ban_service = Arc::new(RwLock::new(BanService::new(MAX_CONNECTION_ID_ERRORS_PER_IP))); + let ban_service = Arc::new(RwLock::new(BanService::new(MAX_CONNECTION_ID_ERRORS_PER_IP))); let tracker = initialize_with_configuration(&configuration); tracing::info!("Configuration:\n{}", configuration.clone().mask_secrets().to_json()); - (configuration, tracker, udp_ban_service, stats_event_sender, stats_repository) + ( + configuration, + AppContainer { + tracker, + ban_service, + stats_event_sender, + stats_repository, + }, + ) } /// checks if the seed is the instance seed in production. diff --git a/src/console/profiling.rs b/src/console/profiling.rs index 2f6471906..318fce1e8 100644 --- a/src/console/profiling.rs +++ b/src/console/profiling.rs @@ -179,9 +179,9 @@ pub async fn run() { return; }; - let (config, tracker, ban_service, stats_event_sender, stats_repository) = bootstrap::app::setup(); + let (config, app_container) = bootstrap::app::setup(); - let jobs = app::start(&config, tracker, ban_service, stats_event_sender, stats_repository).await; + let jobs = app::start(&config, &app_container).await; // Run the tracker for a fixed duration let run_duration = sleep(Duration::from_secs(duration_secs)); diff --git a/src/container.rs b/src/container.rs new file mode 100644 index 000000000..961b32a12 --- /dev/null +++ b/src/container.rs @@ -0,0 +1,15 @@ +use std::sync::Arc; + +use tokio::sync::RwLock; + +use crate::core::statistics::event::sender::Sender; +use crate::core::statistics::repository::Repository; +use crate::core::Tracker; +use crate::servers::udp::server::banning::BanService; + +pub struct AppContainer { + pub tracker: Arc, + pub ban_service: Arc>, + pub stats_event_sender: Arc>>, + pub stats_repository: Arc, +} diff --git a/src/lib.rs b/src/lib.rs index d7e4bc5b2..212430605 100644 --- a/src/lib.rs +++ b/src/lib.rs @@ -493,6 +493,7 @@ use torrust_tracker_clock::clock; pub mod app; pub mod bootstrap; pub mod console; +pub mod container; pub mod core; pub mod servers; pub mod shared; diff --git a/src/main.rs b/src/main.rs index e536124a2..f05de0327 100644 --- a/src/main.rs +++ b/src/main.rs @@ -2,9 +2,9 @@ use torrust_tracker_lib::{app, bootstrap}; #[tokio::main] async fn main() { - let (config, tracker, udp_ban_service, stats_event_sender, stats_repository) = bootstrap::app::setup(); + let (config, app_container) = bootstrap::app::setup(); - let jobs = app::start(&config, tracker, udp_ban_service, stats_event_sender, stats_repository).await; + let jobs = app::start(&config, &app_container).await; // handle the signals tokio::select! { From 747b58d88c7496c7eafbd135d07b524079aa65ec Mon Sep 17 00:00:00 2001 From: Jose Celano Date: Fri, 17 Jan 2025 16:32:06 +0000 Subject: [PATCH 100/802] refactor: [#1187] extract one function and rename another one --- src/bootstrap/app.rs | 12 +++++++++--- src/bootstrap/jobs/http_tracker.rs | 4 ++-- src/bootstrap/jobs/tracker_apis.rs | 4 ++-- src/servers/apis/server.rs | 4 ++-- src/servers/http/server.rs | 4 ++-- src/servers/udp/server/mod.rs | 6 +++--- tests/servers/api/environment.rs | 6 +++--- tests/servers/http/environment.rs | 6 +++--- tests/servers/udp/environment.rs | 4 ++-- 9 files changed, 28 insertions(+), 22 deletions(-) diff --git a/src/bootstrap/app.rs b/src/bootstrap/app.rs index 68ec93e38..294de64e3 100644 --- a/src/bootstrap/app.rs +++ b/src/bootstrap/app.rs @@ -56,7 +56,7 @@ pub fn setup() -> (Configuration, AppContainer) { let ban_service = Arc::new(RwLock::new(BanService::new(MAX_CONNECTION_ID_ERRORS_PER_IP))); - let tracker = initialize_with_configuration(&configuration); + let tracker = initialize_globals_and_tracker(&configuration); tracing::info!("Configuration:\n{}", configuration.clone().mask_secrets().to_json()); @@ -88,10 +88,16 @@ pub fn check_seed() { /// The configuration may be obtained from the environment (via config file or env vars). #[must_use] #[instrument(skip())] -pub fn initialize_with_configuration(configuration: &Configuration) -> Arc { +pub fn initialize_globals_and_tracker(configuration: &Configuration) -> Arc { + initialize_global_services(configuration); + Arc::new(initialize_tracker(configuration)) +} + +/// It initializes the global services. +#[instrument(skip())] +pub fn initialize_global_services(configuration: &Configuration) { initialize_static(); initialize_logging(configuration); - Arc::new(initialize_tracker(configuration)) } /// It initializes the application static values. diff --git a/src/bootstrap/jobs/http_tracker.rs b/src/bootstrap/jobs/http_tracker.rs index 9135a8828..abb531049 100644 --- a/src/bootstrap/jobs/http_tracker.rs +++ b/src/bootstrap/jobs/http_tracker.rs @@ -86,7 +86,7 @@ mod tests { use torrust_tracker_test_helpers::configuration::ephemeral_public; - use crate::bootstrap::app::initialize_with_configuration; + use crate::bootstrap::app::initialize_globals_and_tracker; use crate::bootstrap::jobs::http_tracker::start_job; use crate::core::services::statistics; use crate::servers::http::Version; @@ -99,7 +99,7 @@ mod tests { let config = &http_tracker[0]; let (stats_event_sender, _stats_repository) = statistics::setup::factory(cfg.core.tracker_usage_statistics); let stats_event_sender = Arc::new(stats_event_sender); - let tracker = initialize_with_configuration(&cfg); + let tracker = initialize_globals_and_tracker(&cfg); let version = Version::V1; start_job(config, tracker, stats_event_sender, Registar::default().give_form(), version) diff --git a/src/bootstrap/jobs/tracker_apis.rs b/src/bootstrap/jobs/tracker_apis.rs index d84bb08a9..1932888de 100644 --- a/src/bootstrap/jobs/tracker_apis.rs +++ b/src/bootstrap/jobs/tracker_apis.rs @@ -140,7 +140,7 @@ mod tests { use tokio::sync::RwLock; use torrust_tracker_test_helpers::configuration::ephemeral_public; - use crate::bootstrap::app::initialize_with_configuration; + use crate::bootstrap::app::initialize_globals_and_tracker; use crate::bootstrap::jobs::tracker_apis::start_job; use crate::core::services::statistics; use crate::servers::apis::Version; @@ -158,7 +158,7 @@ mod tests { let stats_event_sender = Arc::new(stats_event_sender); let stats_repository = Arc::new(stats_repository); - let tracker = initialize_with_configuration(&cfg); + let tracker = initialize_globals_and_tracker(&cfg); let version = Version::V1; diff --git a/src/servers/apis/server.rs b/src/servers/apis/server.rs index bf1511edb..e0123a173 100644 --- a/src/servers/apis/server.rs +++ b/src/servers/apis/server.rs @@ -333,7 +333,7 @@ mod tests { use tokio::sync::RwLock; use torrust_tracker_test_helpers::configuration::ephemeral_public; - use crate::bootstrap::app::initialize_with_configuration; + use crate::bootstrap::app::initialize_globals_and_tracker; use crate::bootstrap::jobs::make_rust_tls; use crate::core::services::statistics; use crate::servers::apis::server::{ApiServer, Launcher}; @@ -350,7 +350,7 @@ mod tests { let (stats_event_sender, stats_repository) = statistics::setup::factory(cfg.core.tracker_usage_statistics); let stats_event_sender = Arc::new(stats_event_sender); let stats_repository = Arc::new(stats_repository); - let tracker = initialize_with_configuration(&cfg); + let tracker = initialize_globals_and_tracker(&cfg); let bind_to = config.bind_address; diff --git a/src/servers/http/server.rs b/src/servers/http/server.rs index 537fc37fb..40035bc52 100644 --- a/src/servers/http/server.rs +++ b/src/servers/http/server.rs @@ -242,7 +242,7 @@ mod tests { use torrust_tracker_test_helpers::configuration::ephemeral_public; - use crate::bootstrap::app::initialize_with_configuration; + use crate::bootstrap::app::initialize_globals_and_tracker; use crate::bootstrap::jobs::make_rust_tls; use crate::core::services::statistics; use crate::servers::http::server::{HttpServer, Launcher}; @@ -254,7 +254,7 @@ mod tests { let (stats_event_sender, _stats_repository) = statistics::setup::factory(cfg.core.tracker_usage_statistics); let stats_event_sender = Arc::new(stats_event_sender); - let tracker = initialize_with_configuration(&cfg); + let tracker = initialize_globals_and_tracker(&cfg); let http_trackers = cfg.http_trackers.clone().expect("missing HTTP trackers configuration"); let config = &http_trackers[0]; diff --git a/src/servers/udp/server/mod.rs b/src/servers/udp/server/mod.rs index 1b0a1da9a..373541f75 100644 --- a/src/servers/udp/server/mod.rs +++ b/src/servers/udp/server/mod.rs @@ -63,7 +63,7 @@ mod tests { use super::spawner::Spawner; use super::Server; - use crate::bootstrap::app::initialize_with_configuration; + use crate::bootstrap::app::initialize_globals_and_tracker; use crate::core::services::statistics; use crate::servers::registar::Registar; use crate::servers::udp::server::banning::BanService; @@ -76,7 +76,7 @@ mod tests { let (stats_event_sender, _stats_repository) = statistics::setup::factory(cfg.core.tracker_usage_statistics); let stats_event_sender = Arc::new(stats_event_sender); let ban_service = Arc::new(RwLock::new(BanService::new(MAX_CONNECTION_ID_ERRORS_PER_IP))); - let tracker = initialize_with_configuration(&cfg); + let tracker = initialize_globals_and_tracker(&cfg); let udp_trackers = cfg.udp_trackers.clone().expect("missing UDP trackers configuration"); let config = &udp_trackers[0]; @@ -110,7 +110,7 @@ mod tests { let (stats_event_sender, _stats_repository) = statistics::setup::factory(cfg.core.tracker_usage_statistics); let stats_event_sender = Arc::new(stats_event_sender); let ban_service = Arc::new(RwLock::new(BanService::new(MAX_CONNECTION_ID_ERRORS_PER_IP))); - let tracker = initialize_with_configuration(&cfg); + let tracker = initialize_globals_and_tracker(&cfg); let config = &cfg.udp_trackers.as_ref().unwrap().first().unwrap(); let bind_to = config.bind_address; diff --git a/tests/servers/api/environment.rs b/tests/servers/api/environment.rs index 6658c27da..32db0ab5d 100644 --- a/tests/servers/api/environment.rs +++ b/tests/servers/api/environment.rs @@ -6,7 +6,7 @@ use futures::executor::block_on; use tokio::sync::RwLock; use torrust_tracker_api_client::connection_info::{ConnectionInfo, Origin}; use torrust_tracker_configuration::{Configuration, HttpApi}; -use torrust_tracker_lib::bootstrap::app::initialize_with_configuration; +use torrust_tracker_lib::bootstrap::app::initialize_globals_and_tracker; use torrust_tracker_lib::bootstrap::jobs::make_rust_tls; use torrust_tracker_lib::core::services::statistics; use torrust_tracker_lib::core::statistics::event::sender::Sender; @@ -50,9 +50,9 @@ impl Environment { let stats_repository = Arc::new(stats_repository); let ban_service = Arc::new(RwLock::new(BanService::new(MAX_CONNECTION_ID_ERRORS_PER_IP))); - let tracker = initialize_with_configuration(configuration); + let tracker = initialize_globals_and_tracker(configuration); - // todo: instantiate outside of `initialize_with_configuration` + // todo: instantiate outside of `initialize_globals_and_tracker` let whitelist_manager = tracker.whitelist_manager.clone(); let config = Arc::new(configuration.http_api.clone().expect("missing API configuration")); diff --git a/tests/servers/http/environment.rs b/tests/servers/http/environment.rs index 845d9d440..b6f98f32c 100644 --- a/tests/servers/http/environment.rs +++ b/tests/servers/http/environment.rs @@ -3,7 +3,7 @@ use std::sync::Arc; use bittorrent_primitives::info_hash::InfoHash; use futures::executor::block_on; use torrust_tracker_configuration::{Configuration, HttpTracker}; -use torrust_tracker_lib::bootstrap::app::initialize_with_configuration; +use torrust_tracker_lib::bootstrap::app::initialize_globals_and_tracker; use torrust_tracker_lib::bootstrap::jobs::make_rust_tls; use torrust_tracker_lib::core::services::statistics; use torrust_tracker_lib::core::statistics::event::sender::Sender; @@ -38,9 +38,9 @@ impl Environment { let stats_event_sender = Arc::new(stats_event_sender); let stats_repository = Arc::new(stats_repository); - let tracker = initialize_with_configuration(configuration); + let tracker = initialize_globals_and_tracker(configuration); - // todo: instantiate outside of `initialize_with_configuration` + // todo: instantiate outside of `initialize_globals_and_tracker` let whitelist_manager = tracker.whitelist_manager.clone(); let http_tracker = configuration diff --git a/tests/servers/udp/environment.rs b/tests/servers/udp/environment.rs index 06a22229e..69952ecda 100644 --- a/tests/servers/udp/environment.rs +++ b/tests/servers/udp/environment.rs @@ -4,7 +4,7 @@ use std::sync::Arc; use bittorrent_primitives::info_hash::InfoHash; use tokio::sync::RwLock; use torrust_tracker_configuration::{Configuration, UdpTracker, DEFAULT_TIMEOUT}; -use torrust_tracker_lib::bootstrap::app::initialize_with_configuration; +use torrust_tracker_lib::bootstrap::app::initialize_globals_and_tracker; use torrust_tracker_lib::core::services::statistics; use torrust_tracker_lib::core::statistics::event::sender::Sender; use torrust_tracker_lib::core::statistics::repository::Repository; @@ -49,7 +49,7 @@ impl Environment { let stats_repository = Arc::new(stats_repository); let ban_service = Arc::new(RwLock::new(BanService::new(MAX_CONNECTION_ID_ERRORS_PER_IP))); - let tracker = initialize_with_configuration(configuration); + let tracker = initialize_globals_and_tracker(configuration); let udp_tracker = configuration.udp_trackers.clone().expect("missing UDP tracker configuration"); From 4aea9db2215b64eedf44a1264d56f53ecaa63812 Mon Sep 17 00:00:00 2001 From: Jose Celano Date: Fri, 17 Jan 2025 16:42:53 +0000 Subject: [PATCH 101/802] refactor: [#1187] extract fn initialize_app_container --- src/bootstrap/app.rs | 41 +++++++++++++++++++++++------------------ 1 file changed, 23 insertions(+), 18 deletions(-) diff --git a/src/bootstrap/app.rs b/src/bootstrap/app.rs index 294de64e3..65b0549ec 100644 --- a/src/bootstrap/app.rs +++ b/src/bootstrap/app.rs @@ -48,27 +48,13 @@ pub fn setup() -> (Configuration, AppContainer) { panic!("Configuration error: {e}"); } - // Initialize services - - let (stats_event_sender, stats_repository) = statistics::setup::factory(configuration.core.tracker_usage_statistics); - let stats_event_sender = Arc::new(stats_event_sender); - let stats_repository = Arc::new(stats_repository); - - let ban_service = Arc::new(RwLock::new(BanService::new(MAX_CONNECTION_ID_ERRORS_PER_IP))); - - let tracker = initialize_globals_and_tracker(&configuration); + initialize_global_services(&configuration); tracing::info!("Configuration:\n{}", configuration.clone().mask_secrets().to_json()); - ( - configuration, - AppContainer { - tracker, - ban_service, - stats_event_sender, - stats_repository, - }, - ) + let app_container = initialize_app_container(&configuration); + + (configuration, app_container) } /// checks if the seed is the instance seed in production. @@ -100,6 +86,25 @@ pub fn initialize_global_services(configuration: &Configuration) { initialize_logging(configuration); } +/// It initializes the stIoC Container. +#[instrument(skip())] +pub fn initialize_app_container(configuration: &Configuration) -> AppContainer { + let (stats_event_sender, stats_repository) = statistics::setup::factory(configuration.core.tracker_usage_statistics); + let stats_event_sender = Arc::new(stats_event_sender); + let stats_repository = Arc::new(stats_repository); + + let ban_service = Arc::new(RwLock::new(BanService::new(MAX_CONNECTION_ID_ERRORS_PER_IP))); + + let tracker = Arc::new(initialize_tracker(configuration)); + + AppContainer { + tracker, + ban_service, + stats_event_sender, + stats_repository, + } +} + /// It initializes the application static values. /// /// These values are accessible throughout the entire application: From 36db088fb6d79301984fb59a81698237f354a062 Mon Sep 17 00:00:00 2001 From: Jose Celano Date: Fri, 17 Jan 2025 17:00:30 +0000 Subject: [PATCH 102/802] refactor: [#1187] inline fn initialize_globals_and_tracker --- src/bootstrap/app.rs | 12 +----------- src/bootstrap/jobs/http_tracker.rs | 8 ++++++-- src/bootstrap/jobs/tracker_apis.rs | 5 +++-- src/servers/apis/server.rs | 6 ++++-- src/servers/http/server.rs | 6 ++++-- src/servers/udp/server/mod.rs | 10 +++++++--- tests/servers/api/environment.rs | 7 ++++--- tests/servers/http/environment.rs | 7 ++++--- tests/servers/udp/environment.rs | 5 +++-- 9 files changed, 36 insertions(+), 30 deletions(-) diff --git a/src/bootstrap/app.rs b/src/bootstrap/app.rs index 65b0549ec..b44cf745a 100644 --- a/src/bootstrap/app.rs +++ b/src/bootstrap/app.rs @@ -69,16 +69,6 @@ pub fn check_seed() { assert_eq!(seed, instance, "maybe using zeroed seed in production!?"); } -/// It initializes the application with the given configuration. -/// -/// The configuration may be obtained from the environment (via config file or env vars). -#[must_use] -#[instrument(skip())] -pub fn initialize_globals_and_tracker(configuration: &Configuration) -> Arc { - initialize_global_services(configuration); - Arc::new(initialize_tracker(configuration)) -} - /// It initializes the global services. #[instrument(skip())] pub fn initialize_global_services(configuration: &Configuration) { @@ -86,7 +76,7 @@ pub fn initialize_global_services(configuration: &Configuration) { initialize_logging(configuration); } -/// It initializes the stIoC Container. +/// It initializes the IoC Container. #[instrument(skip())] pub fn initialize_app_container(configuration: &Configuration) -> AppContainer { let (stats_event_sender, stats_repository) = statistics::setup::factory(configuration.core.tracker_usage_statistics); diff --git a/src/bootstrap/jobs/http_tracker.rs b/src/bootstrap/jobs/http_tracker.rs index abb531049..aff9a2e11 100644 --- a/src/bootstrap/jobs/http_tracker.rs +++ b/src/bootstrap/jobs/http_tracker.rs @@ -86,7 +86,7 @@ mod tests { use torrust_tracker_test_helpers::configuration::ephemeral_public; - use crate::bootstrap::app::initialize_globals_and_tracker; + use crate::bootstrap::app::{initialize_global_services, initialize_tracker}; use crate::bootstrap::jobs::http_tracker::start_job; use crate::core::services::statistics; use crate::servers::http::Version; @@ -97,9 +97,13 @@ mod tests { let cfg = Arc::new(ephemeral_public()); let http_tracker = cfg.http_trackers.clone().expect("missing HTTP tracker configuration"); let config = &http_tracker[0]; + let (stats_event_sender, _stats_repository) = statistics::setup::factory(cfg.core.tracker_usage_statistics); let stats_event_sender = Arc::new(stats_event_sender); - let tracker = initialize_globals_and_tracker(&cfg); + + initialize_global_services(&cfg); + let tracker = Arc::new(initialize_tracker(&cfg)); + let version = Version::V1; start_job(config, tracker, stats_event_sender, Registar::default().give_form(), version) diff --git a/src/bootstrap/jobs/tracker_apis.rs b/src/bootstrap/jobs/tracker_apis.rs index 1932888de..e4d73849a 100644 --- a/src/bootstrap/jobs/tracker_apis.rs +++ b/src/bootstrap/jobs/tracker_apis.rs @@ -140,7 +140,7 @@ mod tests { use tokio::sync::RwLock; use torrust_tracker_test_helpers::configuration::ephemeral_public; - use crate::bootstrap::app::initialize_globals_and_tracker; + use crate::bootstrap::app::{initialize_global_services, initialize_tracker}; use crate::bootstrap::jobs::tracker_apis::start_job; use crate::core::services::statistics; use crate::servers::apis::Version; @@ -158,7 +158,8 @@ mod tests { let stats_event_sender = Arc::new(stats_event_sender); let stats_repository = Arc::new(stats_repository); - let tracker = initialize_globals_and_tracker(&cfg); + initialize_global_services(&cfg); + let tracker = Arc::new(initialize_tracker(&cfg)); let version = Version::V1; diff --git a/src/servers/apis/server.rs b/src/servers/apis/server.rs index e0123a173..edec7fc1a 100644 --- a/src/servers/apis/server.rs +++ b/src/servers/apis/server.rs @@ -333,7 +333,7 @@ mod tests { use tokio::sync::RwLock; use torrust_tracker_test_helpers::configuration::ephemeral_public; - use crate::bootstrap::app::initialize_globals_and_tracker; + use crate::bootstrap::app::{initialize_global_services, initialize_tracker}; use crate::bootstrap::jobs::make_rust_tls; use crate::core::services::statistics; use crate::servers::apis::server::{ApiServer, Launcher}; @@ -350,7 +350,9 @@ mod tests { let (stats_event_sender, stats_repository) = statistics::setup::factory(cfg.core.tracker_usage_statistics); let stats_event_sender = Arc::new(stats_event_sender); let stats_repository = Arc::new(stats_repository); - let tracker = initialize_globals_and_tracker(&cfg); + + initialize_global_services(&cfg); + let tracker = Arc::new(initialize_tracker(&cfg)); let bind_to = config.bind_address; diff --git a/src/servers/http/server.rs b/src/servers/http/server.rs index 40035bc52..ec466ae4a 100644 --- a/src/servers/http/server.rs +++ b/src/servers/http/server.rs @@ -242,7 +242,7 @@ mod tests { use torrust_tracker_test_helpers::configuration::ephemeral_public; - use crate::bootstrap::app::initialize_globals_and_tracker; + use crate::bootstrap::app::{initialize_global_services, initialize_tracker}; use crate::bootstrap::jobs::make_rust_tls; use crate::core::services::statistics; use crate::servers::http::server::{HttpServer, Launcher}; @@ -254,7 +254,9 @@ mod tests { let (stats_event_sender, _stats_repository) = statistics::setup::factory(cfg.core.tracker_usage_statistics); let stats_event_sender = Arc::new(stats_event_sender); - let tracker = initialize_globals_and_tracker(&cfg); + + initialize_global_services(&cfg); + let tracker = Arc::new(initialize_tracker(&cfg)); let http_trackers = cfg.http_trackers.clone().expect("missing HTTP trackers configuration"); let config = &http_trackers[0]; diff --git a/src/servers/udp/server/mod.rs b/src/servers/udp/server/mod.rs index 373541f75..950c0fa74 100644 --- a/src/servers/udp/server/mod.rs +++ b/src/servers/udp/server/mod.rs @@ -63,7 +63,7 @@ mod tests { use super::spawner::Spawner; use super::Server; - use crate::bootstrap::app::initialize_globals_and_tracker; + use crate::bootstrap::app::{initialize_global_services, initialize_tracker}; use crate::core::services::statistics; use crate::servers::registar::Registar; use crate::servers::udp::server::banning::BanService; @@ -76,7 +76,9 @@ mod tests { let (stats_event_sender, _stats_repository) = statistics::setup::factory(cfg.core.tracker_usage_statistics); let stats_event_sender = Arc::new(stats_event_sender); let ban_service = Arc::new(RwLock::new(BanService::new(MAX_CONNECTION_ID_ERRORS_PER_IP))); - let tracker = initialize_globals_and_tracker(&cfg); + + initialize_global_services(&cfg); + let tracker = Arc::new(initialize_tracker(&cfg)); let udp_trackers = cfg.udp_trackers.clone().expect("missing UDP trackers configuration"); let config = &udp_trackers[0]; @@ -110,7 +112,9 @@ mod tests { let (stats_event_sender, _stats_repository) = statistics::setup::factory(cfg.core.tracker_usage_statistics); let stats_event_sender = Arc::new(stats_event_sender); let ban_service = Arc::new(RwLock::new(BanService::new(MAX_CONNECTION_ID_ERRORS_PER_IP))); - let tracker = initialize_globals_and_tracker(&cfg); + + initialize_global_services(&cfg); + let tracker = Arc::new(initialize_tracker(&cfg)); let config = &cfg.udp_trackers.as_ref().unwrap().first().unwrap(); let bind_to = config.bind_address; diff --git a/tests/servers/api/environment.rs b/tests/servers/api/environment.rs index 32db0ab5d..e7fc319d8 100644 --- a/tests/servers/api/environment.rs +++ b/tests/servers/api/environment.rs @@ -6,7 +6,7 @@ use futures::executor::block_on; use tokio::sync::RwLock; use torrust_tracker_api_client::connection_info::{ConnectionInfo, Origin}; use torrust_tracker_configuration::{Configuration, HttpApi}; -use torrust_tracker_lib::bootstrap::app::initialize_globals_and_tracker; +use torrust_tracker_lib::bootstrap::app::{initialize_global_services, initialize_tracker}; use torrust_tracker_lib::bootstrap::jobs::make_rust_tls; use torrust_tracker_lib::core::services::statistics; use torrust_tracker_lib::core::statistics::event::sender::Sender; @@ -50,9 +50,10 @@ impl Environment { let stats_repository = Arc::new(stats_repository); let ban_service = Arc::new(RwLock::new(BanService::new(MAX_CONNECTION_ID_ERRORS_PER_IP))); - let tracker = initialize_globals_and_tracker(configuration); + initialize_global_services(configuration); + let tracker = Arc::new(initialize_tracker(configuration)); - // todo: instantiate outside of `initialize_globals_and_tracker` + // todo: instantiate outside of `initialize_tracker_dependencies` let whitelist_manager = tracker.whitelist_manager.clone(); let config = Arc::new(configuration.http_api.clone().expect("missing API configuration")); diff --git a/tests/servers/http/environment.rs b/tests/servers/http/environment.rs index b6f98f32c..2137ca0d4 100644 --- a/tests/servers/http/environment.rs +++ b/tests/servers/http/environment.rs @@ -3,7 +3,7 @@ use std::sync::Arc; use bittorrent_primitives::info_hash::InfoHash; use futures::executor::block_on; use torrust_tracker_configuration::{Configuration, HttpTracker}; -use torrust_tracker_lib::bootstrap::app::initialize_globals_and_tracker; +use torrust_tracker_lib::bootstrap::app::{initialize_global_services, initialize_tracker}; use torrust_tracker_lib::bootstrap::jobs::make_rust_tls; use torrust_tracker_lib::core::services::statistics; use torrust_tracker_lib::core::statistics::event::sender::Sender; @@ -38,9 +38,10 @@ impl Environment { let stats_event_sender = Arc::new(stats_event_sender); let stats_repository = Arc::new(stats_repository); - let tracker = initialize_globals_and_tracker(configuration); + initialize_global_services(configuration); + let tracker = Arc::new(initialize_tracker(configuration)); - // todo: instantiate outside of `initialize_globals_and_tracker` + // todo: instantiate outside of `initialize_tracker_dependencies` let whitelist_manager = tracker.whitelist_manager.clone(); let http_tracker = configuration diff --git a/tests/servers/udp/environment.rs b/tests/servers/udp/environment.rs index 69952ecda..0a0125714 100644 --- a/tests/servers/udp/environment.rs +++ b/tests/servers/udp/environment.rs @@ -4,7 +4,7 @@ use std::sync::Arc; use bittorrent_primitives::info_hash::InfoHash; use tokio::sync::RwLock; use torrust_tracker_configuration::{Configuration, UdpTracker, DEFAULT_TIMEOUT}; -use torrust_tracker_lib::bootstrap::app::initialize_globals_and_tracker; +use torrust_tracker_lib::bootstrap::app::{initialize_global_services, initialize_tracker}; use torrust_tracker_lib::core::services::statistics; use torrust_tracker_lib::core::statistics::event::sender::Sender; use torrust_tracker_lib::core::statistics::repository::Repository; @@ -49,7 +49,8 @@ impl Environment { let stats_repository = Arc::new(stats_repository); let ban_service = Arc::new(RwLock::new(BanService::new(MAX_CONNECTION_ID_ERRORS_PER_IP))); - let tracker = initialize_globals_and_tracker(configuration); + initialize_global_services(configuration); + let tracker = Arc::new(initialize_tracker(configuration)); let udp_tracker = configuration.udp_trackers.clone().expect("missing UDP tracker configuration"); From a4d8da0eff07f1ccd471b31b21193682de35b955 Mon Sep 17 00:00:00 2001 From: Jose Celano Date: Fri, 17 Jan 2025 17:23:04 +0000 Subject: [PATCH 103/802] refactor: [#1187] inline fn initialize_tracker --- src/bootstrap/app.rs | 19 +++---------------- src/bootstrap/jobs/http_tracker.rs | 9 ++++++--- src/bootstrap/jobs/tracker_apis.rs | 9 ++++++--- src/servers/apis/server.rs | 9 ++++++--- src/servers/http/server.rs | 9 ++++++--- src/servers/udp/server/mod.rs | 14 ++++++++++---- tests/servers/api/environment.rs | 10 +++++----- tests/servers/http/environment.rs | 10 +++++----- tests/servers/udp/environment.rs | 9 ++++++--- 9 files changed, 53 insertions(+), 45 deletions(-) diff --git a/src/bootstrap/app.rs b/src/bootstrap/app.rs index b44cf745a..b0df03404 100644 --- a/src/bootstrap/app.rs +++ b/src/bootstrap/app.rs @@ -25,7 +25,6 @@ use crate::container::AppContainer; use crate::core::databases::Database; use crate::core::services::{initialize_database, initialize_whitelist, statistics, tracker_factory}; use crate::core::whitelist::WhiteListManager; -use crate::core::Tracker; use crate::servers::udp::server::banning::BanService; use crate::servers::udp::server::launcher::MAX_CONNECTION_ID_ERRORS_PER_IP; use crate::shared::crypto::ephemeral_instance_keys; @@ -82,10 +81,10 @@ pub fn initialize_app_container(configuration: &Configuration) -> AppContainer { let (stats_event_sender, stats_repository) = statistics::setup::factory(configuration.core.tracker_usage_statistics); let stats_event_sender = Arc::new(stats_event_sender); let stats_repository = Arc::new(stats_repository); - let ban_service = Arc::new(RwLock::new(BanService::new(MAX_CONNECTION_ID_ERRORS_PER_IP))); - - let tracker = Arc::new(initialize_tracker(configuration)); + let database = initialize_database(configuration); + let whitelist_manager = initialize_whitelist(database.clone()); + let tracker = Arc::new(tracker_factory(configuration, &database, &whitelist_manager)); AppContainer { tracker, @@ -116,18 +115,6 @@ pub fn initialize_static() { lazy_static::initialize(&ephemeral_instance_keys::ZEROED_TEST_CIPHER_BLOWFISH); } -/// It builds the domain tracker -/// -/// The tracker is the domain layer service. It's the entrypoint to make requests to the domain layer. -/// It's used by other higher-level components like the UDP and HTTP trackers or the tracker API. -#[must_use] -#[instrument(skip(config))] -pub fn initialize_tracker(config: &Configuration) -> Tracker { - let (database, whitelist_manager) = initialize_tracker_dependencies(config); - - tracker_factory(config, &database, &whitelist_manager) -} - #[allow(clippy::type_complexity)] #[must_use] pub fn initialize_tracker_dependencies(config: &Configuration) -> (Arc>, Arc) { diff --git a/src/bootstrap/jobs/http_tracker.rs b/src/bootstrap/jobs/http_tracker.rs index aff9a2e11..0714cbd72 100644 --- a/src/bootstrap/jobs/http_tracker.rs +++ b/src/bootstrap/jobs/http_tracker.rs @@ -86,9 +86,9 @@ mod tests { use torrust_tracker_test_helpers::configuration::ephemeral_public; - use crate::bootstrap::app::{initialize_global_services, initialize_tracker}; + use crate::bootstrap::app::initialize_global_services; use crate::bootstrap::jobs::http_tracker::start_job; - use crate::core::services::statistics; + use crate::core::services::{initialize_database, initialize_whitelist, statistics, tracker_factory}; use crate::servers::http::Version; use crate::servers::registar::Registar; @@ -102,7 +102,10 @@ mod tests { let stats_event_sender = Arc::new(stats_event_sender); initialize_global_services(&cfg); - let tracker = Arc::new(initialize_tracker(&cfg)); + + let database = initialize_database(&cfg); + let whitelist_manager = initialize_whitelist(database.clone()); + let tracker = Arc::new(tracker_factory(&cfg, &database, &whitelist_manager)); let version = Version::V1; diff --git a/src/bootstrap/jobs/tracker_apis.rs b/src/bootstrap/jobs/tracker_apis.rs index e4d73849a..dabed1509 100644 --- a/src/bootstrap/jobs/tracker_apis.rs +++ b/src/bootstrap/jobs/tracker_apis.rs @@ -140,9 +140,9 @@ mod tests { use tokio::sync::RwLock; use torrust_tracker_test_helpers::configuration::ephemeral_public; - use crate::bootstrap::app::{initialize_global_services, initialize_tracker}; + use crate::bootstrap::app::initialize_global_services; use crate::bootstrap::jobs::tracker_apis::start_job; - use crate::core::services::statistics; + use crate::core::services::{initialize_database, initialize_whitelist, statistics, tracker_factory}; use crate::servers::apis::Version; use crate::servers::registar::Registar; use crate::servers::udp::server::banning::BanService; @@ -159,7 +159,10 @@ mod tests { let stats_repository = Arc::new(stats_repository); initialize_global_services(&cfg); - let tracker = Arc::new(initialize_tracker(&cfg)); + + let database = initialize_database(&cfg); + let whitelist_manager = initialize_whitelist(database.clone()); + let tracker = Arc::new(tracker_factory(&cfg, &database, &whitelist_manager)); let version = Version::V1; diff --git a/src/servers/apis/server.rs b/src/servers/apis/server.rs index edec7fc1a..e74b2265e 100644 --- a/src/servers/apis/server.rs +++ b/src/servers/apis/server.rs @@ -333,9 +333,9 @@ mod tests { use tokio::sync::RwLock; use torrust_tracker_test_helpers::configuration::ephemeral_public; - use crate::bootstrap::app::{initialize_global_services, initialize_tracker}; + use crate::bootstrap::app::initialize_global_services; use crate::bootstrap::jobs::make_rust_tls; - use crate::core::services::statistics; + use crate::core::services::{initialize_database, initialize_whitelist, statistics, tracker_factory}; use crate::servers::apis::server::{ApiServer, Launcher}; use crate::servers::registar::Registar; use crate::servers::udp::server::banning::BanService; @@ -352,7 +352,10 @@ mod tests { let stats_repository = Arc::new(stats_repository); initialize_global_services(&cfg); - let tracker = Arc::new(initialize_tracker(&cfg)); + + let database = initialize_database(&cfg); + let whitelist_manager = initialize_whitelist(database.clone()); + let tracker = Arc::new(tracker_factory(&cfg, &database, &whitelist_manager)); let bind_to = config.bind_address; diff --git a/src/servers/http/server.rs b/src/servers/http/server.rs index ec466ae4a..1cb8d7adb 100644 --- a/src/servers/http/server.rs +++ b/src/servers/http/server.rs @@ -242,9 +242,9 @@ mod tests { use torrust_tracker_test_helpers::configuration::ephemeral_public; - use crate::bootstrap::app::{initialize_global_services, initialize_tracker}; + use crate::bootstrap::app::initialize_global_services; use crate::bootstrap::jobs::make_rust_tls; - use crate::core::services::statistics; + use crate::core::services::{initialize_database, initialize_whitelist, statistics, tracker_factory}; use crate::servers::http::server::{HttpServer, Launcher}; use crate::servers::registar::Registar; @@ -256,7 +256,10 @@ mod tests { let stats_event_sender = Arc::new(stats_event_sender); initialize_global_services(&cfg); - let tracker = Arc::new(initialize_tracker(&cfg)); + + let database = initialize_database(&cfg); + let whitelist_manager = initialize_whitelist(database.clone()); + let tracker = Arc::new(tracker_factory(&cfg, &database, &whitelist_manager)); let http_trackers = cfg.http_trackers.clone().expect("missing HTTP trackers configuration"); let config = &http_trackers[0]; diff --git a/src/servers/udp/server/mod.rs b/src/servers/udp/server/mod.rs index 950c0fa74..d5fe554ea 100644 --- a/src/servers/udp/server/mod.rs +++ b/src/servers/udp/server/mod.rs @@ -63,8 +63,8 @@ mod tests { use super::spawner::Spawner; use super::Server; - use crate::bootstrap::app::{initialize_global_services, initialize_tracker}; - use crate::core::services::statistics; + use crate::bootstrap::app::initialize_global_services; + use crate::core::services::{initialize_database, initialize_whitelist, statistics, tracker_factory}; use crate::servers::registar::Registar; use crate::servers::udp::server::banning::BanService; use crate::servers::udp::server::launcher::MAX_CONNECTION_ID_ERRORS_PER_IP; @@ -78,7 +78,10 @@ mod tests { let ban_service = Arc::new(RwLock::new(BanService::new(MAX_CONNECTION_ID_ERRORS_PER_IP))); initialize_global_services(&cfg); - let tracker = Arc::new(initialize_tracker(&cfg)); + + let database = initialize_database(&cfg); + let whitelist_manager = initialize_whitelist(database.clone()); + let tracker = Arc::new(tracker_factory(&cfg, &database, &whitelist_manager)); let udp_trackers = cfg.udp_trackers.clone().expect("missing UDP trackers configuration"); let config = &udp_trackers[0]; @@ -114,7 +117,10 @@ mod tests { let ban_service = Arc::new(RwLock::new(BanService::new(MAX_CONNECTION_ID_ERRORS_PER_IP))); initialize_global_services(&cfg); - let tracker = Arc::new(initialize_tracker(&cfg)); + + let database = initialize_database(&cfg); + let whitelist_manager = initialize_whitelist(database.clone()); + let tracker = Arc::new(tracker_factory(&cfg, &database, &whitelist_manager)); let config = &cfg.udp_trackers.as_ref().unwrap().first().unwrap(); let bind_to = config.bind_address; diff --git a/tests/servers/api/environment.rs b/tests/servers/api/environment.rs index e7fc319d8..b230562f3 100644 --- a/tests/servers/api/environment.rs +++ b/tests/servers/api/environment.rs @@ -6,9 +6,9 @@ use futures::executor::block_on; use tokio::sync::RwLock; use torrust_tracker_api_client::connection_info::{ConnectionInfo, Origin}; use torrust_tracker_configuration::{Configuration, HttpApi}; -use torrust_tracker_lib::bootstrap::app::{initialize_global_services, initialize_tracker}; +use torrust_tracker_lib::bootstrap::app::initialize_global_services; use torrust_tracker_lib::bootstrap::jobs::make_rust_tls; -use torrust_tracker_lib::core::services::statistics; +use torrust_tracker_lib::core::services::{initialize_database, initialize_whitelist, statistics, tracker_factory}; use torrust_tracker_lib::core::statistics::event::sender::Sender; use torrust_tracker_lib::core::statistics::repository::Repository; use torrust_tracker_lib::core::whitelist::WhiteListManager; @@ -51,10 +51,10 @@ impl Environment { let ban_service = Arc::new(RwLock::new(BanService::new(MAX_CONNECTION_ID_ERRORS_PER_IP))); initialize_global_services(configuration); - let tracker = Arc::new(initialize_tracker(configuration)); - // todo: instantiate outside of `initialize_tracker_dependencies` - let whitelist_manager = tracker.whitelist_manager.clone(); + let database = initialize_database(configuration); + let whitelist_manager = initialize_whitelist(database.clone()); + let tracker = Arc::new(tracker_factory(configuration, &database, &whitelist_manager)); let config = Arc::new(configuration.http_api.clone().expect("missing API configuration")); diff --git a/tests/servers/http/environment.rs b/tests/servers/http/environment.rs index 2137ca0d4..3732e9341 100644 --- a/tests/servers/http/environment.rs +++ b/tests/servers/http/environment.rs @@ -3,9 +3,9 @@ use std::sync::Arc; use bittorrent_primitives::info_hash::InfoHash; use futures::executor::block_on; use torrust_tracker_configuration::{Configuration, HttpTracker}; -use torrust_tracker_lib::bootstrap::app::{initialize_global_services, initialize_tracker}; +use torrust_tracker_lib::bootstrap::app::initialize_global_services; use torrust_tracker_lib::bootstrap::jobs::make_rust_tls; -use torrust_tracker_lib::core::services::statistics; +use torrust_tracker_lib::core::services::{initialize_database, initialize_whitelist, statistics, tracker_factory}; use torrust_tracker_lib::core::statistics::event::sender::Sender; use torrust_tracker_lib::core::statistics::repository::Repository; use torrust_tracker_lib::core::whitelist::WhiteListManager; @@ -39,10 +39,10 @@ impl Environment { let stats_repository = Arc::new(stats_repository); initialize_global_services(configuration); - let tracker = Arc::new(initialize_tracker(configuration)); - // todo: instantiate outside of `initialize_tracker_dependencies` - let whitelist_manager = tracker.whitelist_manager.clone(); + let database = initialize_database(configuration); + let whitelist_manager = initialize_whitelist(database.clone()); + let tracker = Arc::new(tracker_factory(configuration, &database, &whitelist_manager)); let http_tracker = configuration .http_trackers diff --git a/tests/servers/udp/environment.rs b/tests/servers/udp/environment.rs index 0a0125714..34841df16 100644 --- a/tests/servers/udp/environment.rs +++ b/tests/servers/udp/environment.rs @@ -4,8 +4,8 @@ use std::sync::Arc; use bittorrent_primitives::info_hash::InfoHash; use tokio::sync::RwLock; use torrust_tracker_configuration::{Configuration, UdpTracker, DEFAULT_TIMEOUT}; -use torrust_tracker_lib::bootstrap::app::{initialize_global_services, initialize_tracker}; -use torrust_tracker_lib::core::services::statistics; +use torrust_tracker_lib::bootstrap::app::initialize_global_services; +use torrust_tracker_lib::core::services::{initialize_database, initialize_whitelist, statistics, tracker_factory}; use torrust_tracker_lib::core::statistics::event::sender::Sender; use torrust_tracker_lib::core::statistics::repository::Repository; use torrust_tracker_lib::core::Tracker; @@ -50,7 +50,10 @@ impl Environment { let ban_service = Arc::new(RwLock::new(BanService::new(MAX_CONNECTION_ID_ERRORS_PER_IP))); initialize_global_services(configuration); - let tracker = Arc::new(initialize_tracker(configuration)); + + let database = initialize_database(configuration); + let whitelist_manager = initialize_whitelist(database.clone()); + let tracker = Arc::new(tracker_factory(configuration, &database, &whitelist_manager)); let udp_tracker = configuration.udp_trackers.clone().expect("missing UDP tracker configuration"); From aa9f1c314e4b020a32d591f3e32338d18de4d609 Mon Sep 17 00:00:00 2001 From: Jose Celano Date: Fri, 17 Jan 2025 17:31:19 +0000 Subject: [PATCH 104/802] refactor: [#1187] move fn initialize_tracker_dependencies --- src/app_test.rs | 18 ++++++++++++++++++ src/bootstrap/app.rs | 11 ----------- src/core/mod.rs | 2 +- src/core/services/statistics/mod.rs | 2 +- src/core/services/torrent.rs | 4 ++-- src/lib.rs | 1 + src/servers/http/v1/handlers/announce.rs | 2 +- src/servers/http/v1/handlers/scrape.rs | 2 +- src/servers/http/v1/services/announce.rs | 4 ++-- src/servers/http/v1/services/scrape.rs | 2 +- src/servers/udp/handlers.rs | 4 ++-- 11 files changed, 30 insertions(+), 22 deletions(-) create mode 100644 src/app_test.rs diff --git a/src/app_test.rs b/src/app_test.rs new file mode 100644 index 000000000..c50f87965 --- /dev/null +++ b/src/app_test.rs @@ -0,0 +1,18 @@ +//! This file contains only functions used for testing. +use std::sync::Arc; + +use torrust_tracker_configuration::Configuration; + +use crate::core::databases::Database; +use crate::core::services::{initialize_database, initialize_whitelist}; +use crate::core::whitelist::WhiteListManager; + +/// Initialize the tracker dependencies. +#[allow(clippy::type_complexity)] +#[must_use] +pub fn initialize_tracker_dependencies(config: &Configuration) -> (Arc>, Arc) { + let database = initialize_database(config); + let whitelist_manager = initialize_whitelist(database.clone()); + + (database, whitelist_manager) +} diff --git a/src/bootstrap/app.rs b/src/bootstrap/app.rs index b0df03404..fb3f50b65 100644 --- a/src/bootstrap/app.rs +++ b/src/bootstrap/app.rs @@ -22,9 +22,7 @@ use tracing::instrument; use super::config::initialize_configuration; use crate::bootstrap; use crate::container::AppContainer; -use crate::core::databases::Database; use crate::core::services::{initialize_database, initialize_whitelist, statistics, tracker_factory}; -use crate::core::whitelist::WhiteListManager; use crate::servers::udp::server::banning::BanService; use crate::servers::udp::server::launcher::MAX_CONNECTION_ID_ERRORS_PER_IP; use crate::shared::crypto::ephemeral_instance_keys; @@ -115,15 +113,6 @@ pub fn initialize_static() { lazy_static::initialize(&ephemeral_instance_keys::ZEROED_TEST_CIPHER_BLOWFISH); } -#[allow(clippy::type_complexity)] -#[must_use] -pub fn initialize_tracker_dependencies(config: &Configuration) -> (Arc>, Arc) { - let database = initialize_database(config); - let whitelist_manager = initialize_whitelist(database.clone()); - - (database, whitelist_manager) -} - /// It initializes the log threshold, format and channel. /// /// See [the logging setup](crate::bootstrap::logging::setup) for more info about logging. diff --git a/src/core/mod.rs b/src/core/mod.rs index 9aef1b2f2..a5f9988b9 100644 --- a/src/core/mod.rs +++ b/src/core/mod.rs @@ -1080,7 +1080,7 @@ mod tests { use torrust_tracker_primitives::DurationSinceUnixEpoch; use torrust_tracker_test_helpers::configuration; - use crate::bootstrap::app::initialize_tracker_dependencies; + use crate::app_test::initialize_tracker_dependencies; use crate::core::peer::Peer; use crate::core::services::tracker_factory; use crate::core::whitelist::WhiteListManager; diff --git a/src/core/services/statistics/mod.rs b/src/core/services/statistics/mod.rs index 657f3eb06..fd185a727 100644 --- a/src/core/services/statistics/mod.rs +++ b/src/core/services/statistics/mod.rs @@ -117,7 +117,7 @@ mod tests { use torrust_tracker_primitives::torrent_metrics::TorrentsMetrics; use torrust_tracker_test_helpers::configuration; - use crate::bootstrap::app::initialize_tracker_dependencies; + use crate::app_test::initialize_tracker_dependencies; use crate::core; use crate::core::services::statistics::{self, get_metrics, TrackerMetrics}; use crate::core::services::tracker_factory; diff --git a/src/core/services/torrent.rs b/src/core/services/torrent.rs index 9a1a2a725..445b8fb8f 100644 --- a/src/core/services/torrent.rs +++ b/src/core/services/torrent.rs @@ -129,7 +129,7 @@ mod tests { use torrust_tracker_configuration::Configuration; use torrust_tracker_test_helpers::configuration; - use crate::bootstrap::app::initialize_tracker_dependencies; + use crate::app_test::initialize_tracker_dependencies; use crate::core::services::torrent::tests::sample_peer; use crate::core::services::torrent::{get_torrent_info, Info}; use crate::core::services::tracker_factory; @@ -191,7 +191,7 @@ mod tests { use torrust_tracker_configuration::Configuration; use torrust_tracker_test_helpers::configuration; - use crate::bootstrap::app::initialize_tracker_dependencies; + use crate::app_test::initialize_tracker_dependencies; use crate::core::services::torrent::tests::sample_peer; use crate::core::services::torrent::{get_torrents_page, BasicInfo, Pagination}; use crate::core::services::tracker_factory; diff --git a/src/lib.rs b/src/lib.rs index 212430605..8e0e64db0 100644 --- a/src/lib.rs +++ b/src/lib.rs @@ -491,6 +491,7 @@ use torrust_tracker_clock::clock; pub mod app; +pub mod app_test; pub mod bootstrap; pub mod console; pub mod container; diff --git a/src/servers/http/v1/handlers/announce.rs b/src/servers/http/v1/handlers/announce.rs index 1c8779625..6cfe0871f 100644 --- a/src/servers/http/v1/handlers/announce.rs +++ b/src/servers/http/v1/handlers/announce.rs @@ -205,7 +205,7 @@ mod tests { use bittorrent_primitives::info_hash::InfoHash; use torrust_tracker_test_helpers::configuration; - use crate::bootstrap::app::initialize_tracker_dependencies; + use crate::app_test::initialize_tracker_dependencies; use crate::core::services::{statistics, tracker_factory}; use crate::core::statistics::event::sender::Sender; use crate::core::Tracker; diff --git a/src/servers/http/v1/handlers/scrape.rs b/src/servers/http/v1/handlers/scrape.rs index 6ff8a61cf..553b50882 100644 --- a/src/servers/http/v1/handlers/scrape.rs +++ b/src/servers/http/v1/handlers/scrape.rs @@ -126,7 +126,7 @@ mod tests { use bittorrent_primitives::info_hash::InfoHash; use torrust_tracker_test_helpers::configuration; - use crate::bootstrap::app::initialize_tracker_dependencies; + use crate::app_test::initialize_tracker_dependencies; use crate::core::services::{statistics, tracker_factory}; use crate::core::Tracker; diff --git a/src/servers/http/v1/services/announce.rs b/src/servers/http/v1/services/announce.rs index 45bcb5843..263f5dbc7 100644 --- a/src/servers/http/v1/services/announce.rs +++ b/src/servers/http/v1/services/announce.rs @@ -65,7 +65,7 @@ mod tests { use torrust_tracker_primitives::{peer, DurationSinceUnixEpoch}; use torrust_tracker_test_helpers::configuration; - use crate::bootstrap::app::initialize_tracker_dependencies; + use crate::app_test::initialize_tracker_dependencies; use crate::core::services::{statistics, tracker_factory}; use crate::core::statistics::event::sender::Sender; use crate::core::Tracker; @@ -123,7 +123,7 @@ mod tests { use torrust_tracker_test_helpers::configuration; use super::{sample_peer_using_ipv4, sample_peer_using_ipv6}; - use crate::bootstrap::app::initialize_tracker_dependencies; + use crate::app_test::initialize_tracker_dependencies; use crate::core::{statistics, PeersWanted, Tracker}; use crate::servers::http::v1::services::announce::invoke; use crate::servers::http::v1::services::announce::tests::{public_tracker, sample_info_hash, sample_peer}; diff --git a/src/servers/http/v1/services/scrape.rs b/src/servers/http/v1/services/scrape.rs index 9805dd8a4..61542478d 100644 --- a/src/servers/http/v1/services/scrape.rs +++ b/src/servers/http/v1/services/scrape.rs @@ -80,7 +80,7 @@ mod tests { use torrust_tracker_primitives::{peer, DurationSinceUnixEpoch}; use torrust_tracker_test_helpers::configuration; - use crate::bootstrap::app::initialize_tracker_dependencies; + use crate::app_test::initialize_tracker_dependencies; use crate::core::services::tracker_factory; use crate::core::Tracker; diff --git a/src/servers/udp/handlers.rs b/src/servers/udp/handlers.rs index 9883de54b..cad246b6f 100644 --- a/src/servers/udp/handlers.rs +++ b/src/servers/udp/handlers.rs @@ -470,7 +470,7 @@ mod tests { use torrust_tracker_test_helpers::configuration; use super::gen_remote_fingerprint; - use crate::bootstrap::app::initialize_tracker_dependencies; + use crate::app_test::initialize_tracker_dependencies; use crate::core::services::{statistics, tracker_factory}; use crate::core::statistics::event::sender::Sender; use crate::core::Tracker; @@ -1318,7 +1318,7 @@ mod tests { use aquatic_udp_protocol::{InfoHash as AquaticInfoHash, PeerId as AquaticPeerId}; use mockall::predicate::eq; - use crate::bootstrap::app::initialize_tracker_dependencies; + use crate::app_test::initialize_tracker_dependencies; use crate::core::{self, statistics}; use crate::servers::udp::connection_cookie::make; use crate::servers::udp::handlers::handle_announce; From c45a12b9f595e9ac091701e11d6ea416b18e1aeb Mon Sep 17 00:00:00 2001 From: Jose Celano Date: Fri, 17 Jan 2025 17:38:16 +0000 Subject: [PATCH 105/802] refactor: [#1187] rename fn tracker_factory to initialize_tracker --- src/bootstrap/app.rs | 5 +++-- src/bootstrap/jobs/http_tracker.rs | 4 ++-- src/bootstrap/jobs/tracker_apis.rs | 4 ++-- src/container.rs | 2 ++ src/core/mod.rs | 10 +++++----- src/core/services/mod.rs | 2 +- src/core/services/statistics/mod.rs | 4 ++-- src/core/services/torrent.rs | 18 +++++++++--------- src/servers/apis/server.rs | 4 ++-- src/servers/http/server.rs | 4 ++-- src/servers/http/v1/handlers/announce.rs | 10 +++++----- src/servers/http/v1/handlers/scrape.rs | 10 +++++----- src/servers/http/v1/services/announce.rs | 4 ++-- src/servers/http/v1/services/scrape.rs | 4 ++-- src/servers/udp/handlers.rs | 4 ++-- src/servers/udp/server/mod.rs | 6 +++--- tests/servers/api/environment.rs | 23 +++++++---------------- tests/servers/http/environment.rs | 4 ++-- tests/servers/udp/environment.rs | 4 ++-- 19 files changed, 60 insertions(+), 66 deletions(-) diff --git a/src/bootstrap/app.rs b/src/bootstrap/app.rs index fb3f50b65..d17aec3a7 100644 --- a/src/bootstrap/app.rs +++ b/src/bootstrap/app.rs @@ -22,7 +22,7 @@ use tracing::instrument; use super::config::initialize_configuration; use crate::bootstrap; use crate::container::AppContainer; -use crate::core::services::{initialize_database, initialize_whitelist, statistics, tracker_factory}; +use crate::core::services::{initialize_database, initialize_tracker, initialize_whitelist, statistics}; use crate::servers::udp::server::banning::BanService; use crate::servers::udp::server::launcher::MAX_CONNECTION_ID_ERRORS_PER_IP; use crate::shared::crypto::ephemeral_instance_keys; @@ -82,13 +82,14 @@ pub fn initialize_app_container(configuration: &Configuration) -> AppContainer { let ban_service = Arc::new(RwLock::new(BanService::new(MAX_CONNECTION_ID_ERRORS_PER_IP))); let database = initialize_database(configuration); let whitelist_manager = initialize_whitelist(database.clone()); - let tracker = Arc::new(tracker_factory(configuration, &database, &whitelist_manager)); + let tracker = Arc::new(initialize_tracker(configuration, &database, &whitelist_manager)); AppContainer { tracker, ban_service, stats_event_sender, stats_repository, + whitelist_manager, } } diff --git a/src/bootstrap/jobs/http_tracker.rs b/src/bootstrap/jobs/http_tracker.rs index 0714cbd72..d32e8d4aa 100644 --- a/src/bootstrap/jobs/http_tracker.rs +++ b/src/bootstrap/jobs/http_tracker.rs @@ -88,7 +88,7 @@ mod tests { use crate::bootstrap::app::initialize_global_services; use crate::bootstrap::jobs::http_tracker::start_job; - use crate::core::services::{initialize_database, initialize_whitelist, statistics, tracker_factory}; + use crate::core::services::{initialize_database, initialize_tracker, initialize_whitelist, statistics}; use crate::servers::http::Version; use crate::servers::registar::Registar; @@ -105,7 +105,7 @@ mod tests { let database = initialize_database(&cfg); let whitelist_manager = initialize_whitelist(database.clone()); - let tracker = Arc::new(tracker_factory(&cfg, &database, &whitelist_manager)); + let tracker = Arc::new(initialize_tracker(&cfg, &database, &whitelist_manager)); let version = Version::V1; diff --git a/src/bootstrap/jobs/tracker_apis.rs b/src/bootstrap/jobs/tracker_apis.rs index dabed1509..9c284fbfc 100644 --- a/src/bootstrap/jobs/tracker_apis.rs +++ b/src/bootstrap/jobs/tracker_apis.rs @@ -142,7 +142,7 @@ mod tests { use crate::bootstrap::app::initialize_global_services; use crate::bootstrap::jobs::tracker_apis::start_job; - use crate::core::services::{initialize_database, initialize_whitelist, statistics, tracker_factory}; + use crate::core::services::{initialize_database, initialize_tracker, initialize_whitelist, statistics}; use crate::servers::apis::Version; use crate::servers::registar::Registar; use crate::servers::udp::server::banning::BanService; @@ -162,7 +162,7 @@ mod tests { let database = initialize_database(&cfg); let whitelist_manager = initialize_whitelist(database.clone()); - let tracker = Arc::new(tracker_factory(&cfg, &database, &whitelist_manager)); + let tracker = Arc::new(initialize_tracker(&cfg, &database, &whitelist_manager)); let version = Version::V1; diff --git a/src/container.rs b/src/container.rs index 961b32a12..7a2b86d18 100644 --- a/src/container.rs +++ b/src/container.rs @@ -4,6 +4,7 @@ use tokio::sync::RwLock; use crate::core::statistics::event::sender::Sender; use crate::core::statistics::repository::Repository; +use crate::core::whitelist::WhiteListManager; use crate::core::Tracker; use crate::servers::udp::server::banning::BanService; @@ -12,4 +13,5 @@ pub struct AppContainer { pub ban_service: Arc>, pub stats_event_sender: Arc>>, pub stats_repository: Arc, + pub whitelist_manager: Arc, } diff --git a/src/core/mod.rs b/src/core/mod.rs index a5f9988b9..e802b2c43 100644 --- a/src/core/mod.rs +++ b/src/core/mod.rs @@ -1082,26 +1082,26 @@ mod tests { use crate::app_test::initialize_tracker_dependencies; use crate::core::peer::Peer; - use crate::core::services::tracker_factory; + use crate::core::services::initialize_tracker; use crate::core::whitelist::WhiteListManager; use crate::core::{TorrentsMetrics, Tracker}; fn public_tracker() -> Tracker { let config = configuration::ephemeral_public(); let (database, whitelist_manager) = initialize_tracker_dependencies(&config); - tracker_factory(&config, &database, &whitelist_manager) + initialize_tracker(&config, &database, &whitelist_manager) } fn private_tracker() -> Tracker { let config = configuration::ephemeral_private(); let (database, whitelist_manager) = initialize_tracker_dependencies(&config); - tracker_factory(&config, &database, &whitelist_manager) + initialize_tracker(&config, &database, &whitelist_manager) } fn whitelisted_tracker() -> (Tracker, Arc) { let config = configuration::ephemeral_listed(); let (database, whitelist_manager) = initialize_tracker_dependencies(&config); - let tracker = tracker_factory(&config, &database, &whitelist_manager); + let tracker = initialize_tracker(&config, &database, &whitelist_manager); (tracker, whitelist_manager) } @@ -1110,7 +1110,7 @@ mod tests { let mut config = configuration::ephemeral_listed(); config.core.tracker_policy.persistent_torrent_completed_stat = true; let (database, whitelist_manager) = initialize_tracker_dependencies(&config); - tracker_factory(&config, &database, &whitelist_manager) + initialize_tracker(&config, &database, &whitelist_manager) } fn sample_info_hash() -> InfoHash { diff --git a/src/core/services/mod.rs b/src/core/services/mod.rs index d3336068c..fd301b62d 100644 --- a/src/core/services/mod.rs +++ b/src/core/services/mod.rs @@ -24,7 +24,7 @@ use crate::core::Tracker; /// /// Will panic if tracker cannot be instantiated. #[must_use] -pub fn tracker_factory( +pub fn initialize_tracker( config: &Configuration, database: &Arc>, whitelist_manager: &Arc, diff --git a/src/core/services/statistics/mod.rs b/src/core/services/statistics/mod.rs index fd185a727..1e0403c2a 100644 --- a/src/core/services/statistics/mod.rs +++ b/src/core/services/statistics/mod.rs @@ -119,8 +119,8 @@ mod tests { use crate::app_test::initialize_tracker_dependencies; use crate::core; + use crate::core::services::initialize_tracker; use crate::core::services::statistics::{self, get_metrics, TrackerMetrics}; - use crate::core::services::tracker_factory; use crate::servers::udp::server::banning::BanService; use crate::servers::udp::server::launcher::MAX_CONNECTION_ID_ERRORS_PER_IP; @@ -136,7 +136,7 @@ mod tests { let (_stats_event_sender, stats_repository) = statistics::setup::factory(config.core.tracker_usage_statistics); let stats_repository = Arc::new(stats_repository); - let tracker = Arc::new(tracker_factory(&config, &database, &whitelist_manager)); + let tracker = Arc::new(initialize_tracker(&config, &database, &whitelist_manager)); let ban_service = Arc::new(RwLock::new(BanService::new(MAX_CONNECTION_ID_ERRORS_PER_IP))); diff --git a/src/core/services/torrent.rs b/src/core/services/torrent.rs index 445b8fb8f..593d8be8c 100644 --- a/src/core/services/torrent.rs +++ b/src/core/services/torrent.rs @@ -130,9 +130,9 @@ mod tests { use torrust_tracker_test_helpers::configuration; use crate::app_test::initialize_tracker_dependencies; + use crate::core::services::initialize_tracker; use crate::core::services::torrent::tests::sample_peer; use crate::core::services::torrent::{get_torrent_info, Info}; - use crate::core::services::tracker_factory; pub fn tracker_configuration() -> Configuration { configuration::ephemeral() @@ -143,7 +143,7 @@ mod tests { let config = tracker_configuration(); let (database, whitelist_manager) = initialize_tracker_dependencies(&config); - let tracker = tracker_factory(&config, &database, &whitelist_manager); + let tracker = initialize_tracker(&config, &database, &whitelist_manager); let tracker = Arc::new(tracker); @@ -161,7 +161,7 @@ mod tests { let config = tracker_configuration(); let (database, whitelist_manager) = initialize_tracker_dependencies(&config); - let tracker = Arc::new(tracker_factory(&config, &database, &whitelist_manager)); + let tracker = Arc::new(initialize_tracker(&config, &database, &whitelist_manager)); let hash = "9e0217d0fa71c87332cd8bf9dbeabcb2c2cf3c4d".to_owned(); let info_hash = InfoHash::from_str(&hash).unwrap(); @@ -192,9 +192,9 @@ mod tests { use torrust_tracker_test_helpers::configuration; use crate::app_test::initialize_tracker_dependencies; + use crate::core::services::initialize_tracker; use crate::core::services::torrent::tests::sample_peer; use crate::core::services::torrent::{get_torrents_page, BasicInfo, Pagination}; - use crate::core::services::tracker_factory; pub fn tracker_configuration() -> Configuration { configuration::ephemeral() @@ -205,7 +205,7 @@ mod tests { let config = tracker_configuration(); let (database, whitelist_manager) = initialize_tracker_dependencies(&config); - let tracker = Arc::new(tracker_factory(&config, &database, &whitelist_manager)); + let tracker = Arc::new(initialize_tracker(&config, &database, &whitelist_manager)); let torrents = get_torrents_page(tracker.clone(), Some(&Pagination::default())).await; @@ -217,7 +217,7 @@ mod tests { let config = tracker_configuration(); let (database, whitelist_manager) = initialize_tracker_dependencies(&config); - let tracker = Arc::new(tracker_factory(&config, &database, &whitelist_manager)); + let tracker = Arc::new(initialize_tracker(&config, &database, &whitelist_manager)); let hash = "9e0217d0fa71c87332cd8bf9dbeabcb2c2cf3c4d".to_owned(); let info_hash = InfoHash::from_str(&hash).unwrap(); @@ -242,7 +242,7 @@ mod tests { let config = tracker_configuration(); let (database, whitelist_manager) = initialize_tracker_dependencies(&config); - let tracker = Arc::new(tracker_factory(&config, &database, &whitelist_manager)); + let tracker = Arc::new(initialize_tracker(&config, &database, &whitelist_manager)); let hash1 = "9e0217d0fa71c87332cd8bf9dbeabcb2c2cf3c4d".to_owned(); let info_hash1 = InfoHash::from_str(&hash1).unwrap(); @@ -265,7 +265,7 @@ mod tests { let config = tracker_configuration(); let (database, whitelist_manager) = initialize_tracker_dependencies(&config); - let tracker = Arc::new(tracker_factory(&config, &database, &whitelist_manager)); + let tracker = Arc::new(initialize_tracker(&config, &database, &whitelist_manager)); let hash1 = "9e0217d0fa71c87332cd8bf9dbeabcb2c2cf3c4d".to_owned(); let info_hash1 = InfoHash::from_str(&hash1).unwrap(); @@ -297,7 +297,7 @@ mod tests { let config = tracker_configuration(); let (database, whitelist_manager) = initialize_tracker_dependencies(&config); - let tracker = Arc::new(tracker_factory(&config, &database, &whitelist_manager)); + let tracker = Arc::new(initialize_tracker(&config, &database, &whitelist_manager)); let hash1 = "9e0217d0fa71c87332cd8bf9dbeabcb2c2cf3c4d".to_owned(); let info_hash1 = InfoHash::from_str(&hash1).unwrap(); diff --git a/src/servers/apis/server.rs b/src/servers/apis/server.rs index e74b2265e..c4fae6ebf 100644 --- a/src/servers/apis/server.rs +++ b/src/servers/apis/server.rs @@ -335,7 +335,7 @@ mod tests { use crate::bootstrap::app::initialize_global_services; use crate::bootstrap::jobs::make_rust_tls; - use crate::core::services::{initialize_database, initialize_whitelist, statistics, tracker_factory}; + use crate::core::services::{initialize_database, initialize_tracker, initialize_whitelist, statistics}; use crate::servers::apis::server::{ApiServer, Launcher}; use crate::servers::registar::Registar; use crate::servers::udp::server::banning::BanService; @@ -355,7 +355,7 @@ mod tests { let database = initialize_database(&cfg); let whitelist_manager = initialize_whitelist(database.clone()); - let tracker = Arc::new(tracker_factory(&cfg, &database, &whitelist_manager)); + let tracker = Arc::new(initialize_tracker(&cfg, &database, &whitelist_manager)); let bind_to = config.bind_address; diff --git a/src/servers/http/server.rs b/src/servers/http/server.rs index 1cb8d7adb..82b65c2ff 100644 --- a/src/servers/http/server.rs +++ b/src/servers/http/server.rs @@ -244,7 +244,7 @@ mod tests { use crate::bootstrap::app::initialize_global_services; use crate::bootstrap::jobs::make_rust_tls; - use crate::core::services::{initialize_database, initialize_whitelist, statistics, tracker_factory}; + use crate::core::services::{initialize_database, initialize_tracker, initialize_whitelist, statistics}; use crate::servers::http::server::{HttpServer, Launcher}; use crate::servers::registar::Registar; @@ -259,7 +259,7 @@ mod tests { let database = initialize_database(&cfg); let whitelist_manager = initialize_whitelist(database.clone()); - let tracker = Arc::new(tracker_factory(&cfg, &database, &whitelist_manager)); + let tracker = Arc::new(initialize_tracker(&cfg, &database, &whitelist_manager)); let http_trackers = cfg.http_trackers.clone().expect("missing HTTP trackers configuration"); let config = &http_trackers[0]; diff --git a/src/servers/http/v1/handlers/announce.rs b/src/servers/http/v1/handlers/announce.rs index 6cfe0871f..24beadbc2 100644 --- a/src/servers/http/v1/handlers/announce.rs +++ b/src/servers/http/v1/handlers/announce.rs @@ -206,7 +206,7 @@ mod tests { use torrust_tracker_test_helpers::configuration; use crate::app_test::initialize_tracker_dependencies; - use crate::core::services::{statistics, tracker_factory}; + use crate::core::services::{initialize_tracker, statistics}; use crate::core::statistics::event::sender::Sender; use crate::core::Tracker; @@ -216,7 +216,7 @@ mod tests { let (database, whitelist_manager) = initialize_tracker_dependencies(&config); let (stats_event_sender, _stats_repository) = statistics::setup::factory(config.core.tracker_usage_statistics); - (tracker_factory(&config, &database, &whitelist_manager), stats_event_sender) + (initialize_tracker(&config, &database, &whitelist_manager), stats_event_sender) } fn whitelisted_tracker() -> (Tracker, Option>) { @@ -225,7 +225,7 @@ mod tests { let (database, whitelist_manager) = initialize_tracker_dependencies(&config); let (stats_event_sender, _stats_repository) = statistics::setup::factory(config.core.tracker_usage_statistics); - (tracker_factory(&config, &database, &whitelist_manager), stats_event_sender) + (initialize_tracker(&config, &database, &whitelist_manager), stats_event_sender) } fn tracker_on_reverse_proxy() -> (Tracker, Option>) { @@ -234,7 +234,7 @@ mod tests { let (database, whitelist_manager) = initialize_tracker_dependencies(&config); let (stats_event_sender, _stats_repository) = statistics::setup::factory(config.core.tracker_usage_statistics); - (tracker_factory(&config, &database, &whitelist_manager), stats_event_sender) + (initialize_tracker(&config, &database, &whitelist_manager), stats_event_sender) } fn tracker_not_on_reverse_proxy() -> (Tracker, Option>) { @@ -243,7 +243,7 @@ mod tests { let (database, whitelist_manager) = initialize_tracker_dependencies(&config); let (stats_event_sender, _stats_repository) = statistics::setup::factory(config.core.tracker_usage_statistics); - (tracker_factory(&config, &database, &whitelist_manager), stats_event_sender) + (initialize_tracker(&config, &database, &whitelist_manager), stats_event_sender) } fn sample_announce_request() -> Announce { diff --git a/src/servers/http/v1/handlers/scrape.rs b/src/servers/http/v1/handlers/scrape.rs index 553b50882..a5cf58129 100644 --- a/src/servers/http/v1/handlers/scrape.rs +++ b/src/servers/http/v1/handlers/scrape.rs @@ -127,7 +127,7 @@ mod tests { use torrust_tracker_test_helpers::configuration; use crate::app_test::initialize_tracker_dependencies; - use crate::core::services::{statistics, tracker_factory}; + use crate::core::services::{initialize_tracker, statistics}; use crate::core::Tracker; fn private_tracker() -> (Tracker, Option>) { @@ -136,7 +136,7 @@ mod tests { let (database, whitelist_manager) = initialize_tracker_dependencies(&config); let (stats_event_sender, _stats_repository) = statistics::setup::factory(config.core.tracker_usage_statistics); - (tracker_factory(&config, &database, &whitelist_manager), stats_event_sender) + (initialize_tracker(&config, &database, &whitelist_manager), stats_event_sender) } fn whitelisted_tracker() -> (Tracker, Option>) { @@ -145,7 +145,7 @@ mod tests { let (database, whitelist_manager) = initialize_tracker_dependencies(&config); let (stats_event_sender, _stats_repository) = statistics::setup::factory(config.core.tracker_usage_statistics); - (tracker_factory(&config, &database, &whitelist_manager), stats_event_sender) + (initialize_tracker(&config, &database, &whitelist_manager), stats_event_sender) } fn tracker_on_reverse_proxy() -> (Tracker, Option>) { @@ -154,7 +154,7 @@ mod tests { let (database, whitelist_manager) = initialize_tracker_dependencies(&config); let (stats_event_sender, _stats_repository) = statistics::setup::factory(config.core.tracker_usage_statistics); - (tracker_factory(&config, &database, &whitelist_manager), stats_event_sender) + (initialize_tracker(&config, &database, &whitelist_manager), stats_event_sender) } fn tracker_not_on_reverse_proxy() -> (Tracker, Option>) { @@ -163,7 +163,7 @@ mod tests { let (database, whitelist_manager) = initialize_tracker_dependencies(&config); let (stats_event_sender, _stats_repository) = statistics::setup::factory(config.core.tracker_usage_statistics); - (tracker_factory(&config, &database, &whitelist_manager), stats_event_sender) + (initialize_tracker(&config, &database, &whitelist_manager), stats_event_sender) } fn sample_scrape_request() -> Scrape { diff --git a/src/servers/http/v1/services/announce.rs b/src/servers/http/v1/services/announce.rs index 263f5dbc7..63a904182 100644 --- a/src/servers/http/v1/services/announce.rs +++ b/src/servers/http/v1/services/announce.rs @@ -66,7 +66,7 @@ mod tests { use torrust_tracker_test_helpers::configuration; use crate::app_test::initialize_tracker_dependencies; - use crate::core::services::{statistics, tracker_factory}; + use crate::core::services::{initialize_tracker, statistics}; use crate::core::statistics::event::sender::Sender; use crate::core::Tracker; @@ -77,7 +77,7 @@ mod tests { let (stats_event_sender, _stats_repository) = statistics::setup::factory(config.core.tracker_usage_statistics); let stats_event_sender = Arc::new(stats_event_sender); - let tracker = tracker_factory(&config, &database, &whitelist_manager); + let tracker = initialize_tracker(&config, &database, &whitelist_manager); (tracker, stats_event_sender) } diff --git a/src/servers/http/v1/services/scrape.rs b/src/servers/http/v1/services/scrape.rs index 61542478d..56c18cbb3 100644 --- a/src/servers/http/v1/services/scrape.rs +++ b/src/servers/http/v1/services/scrape.rs @@ -81,7 +81,7 @@ mod tests { use torrust_tracker_test_helpers::configuration; use crate::app_test::initialize_tracker_dependencies; - use crate::core::services::tracker_factory; + use crate::core::services::initialize_tracker; use crate::core::Tracker; fn public_tracker() -> Tracker { @@ -89,7 +89,7 @@ mod tests { let (database, whitelist_manager) = initialize_tracker_dependencies(&config); - tracker_factory(&config, &database, &whitelist_manager) + initialize_tracker(&config, &database, &whitelist_manager) } fn sample_info_hashes() -> Vec { diff --git a/src/servers/udp/handlers.rs b/src/servers/udp/handlers.rs index cad246b6f..a7d964391 100644 --- a/src/servers/udp/handlers.rs +++ b/src/servers/udp/handlers.rs @@ -471,7 +471,7 @@ mod tests { use super::gen_remote_fingerprint; use crate::app_test::initialize_tracker_dependencies; - use crate::core::services::{statistics, tracker_factory}; + use crate::core::services::{initialize_tracker, statistics}; use crate::core::statistics::event::sender::Sender; use crate::core::Tracker; use crate::CurrentClock; @@ -496,7 +496,7 @@ mod tests { let (database, whitelist_manager) = initialize_tracker_dependencies(config); let (stats_event_sender, _stats_repository) = statistics::setup::factory(config.core.tracker_usage_statistics); - (tracker_factory(config, &database, &whitelist_manager), stats_event_sender) + (initialize_tracker(config, &database, &whitelist_manager), stats_event_sender) } fn sample_ipv4_remote_addr() -> SocketAddr { diff --git a/src/servers/udp/server/mod.rs b/src/servers/udp/server/mod.rs index d5fe554ea..b5da9d326 100644 --- a/src/servers/udp/server/mod.rs +++ b/src/servers/udp/server/mod.rs @@ -64,7 +64,7 @@ mod tests { use super::spawner::Spawner; use super::Server; use crate::bootstrap::app::initialize_global_services; - use crate::core::services::{initialize_database, initialize_whitelist, statistics, tracker_factory}; + use crate::core::services::{initialize_database, initialize_tracker, initialize_whitelist, statistics}; use crate::servers::registar::Registar; use crate::servers::udp::server::banning::BanService; use crate::servers::udp::server::launcher::MAX_CONNECTION_ID_ERRORS_PER_IP; @@ -81,7 +81,7 @@ mod tests { let database = initialize_database(&cfg); let whitelist_manager = initialize_whitelist(database.clone()); - let tracker = Arc::new(tracker_factory(&cfg, &database, &whitelist_manager)); + let tracker = Arc::new(initialize_tracker(&cfg, &database, &whitelist_manager)); let udp_trackers = cfg.udp_trackers.clone().expect("missing UDP trackers configuration"); let config = &udp_trackers[0]; @@ -120,7 +120,7 @@ mod tests { let database = initialize_database(&cfg); let whitelist_manager = initialize_whitelist(database.clone()); - let tracker = Arc::new(tracker_factory(&cfg, &database, &whitelist_manager)); + let tracker = Arc::new(initialize_tracker(&cfg, &database, &whitelist_manager)); let config = &cfg.udp_trackers.as_ref().unwrap().first().unwrap(); let bind_to = config.bind_address; diff --git a/tests/servers/api/environment.rs b/tests/servers/api/environment.rs index b230562f3..dcfe526f1 100644 --- a/tests/servers/api/environment.rs +++ b/tests/servers/api/environment.rs @@ -6,9 +6,8 @@ use futures::executor::block_on; use tokio::sync::RwLock; use torrust_tracker_api_client::connection_info::{ConnectionInfo, Origin}; use torrust_tracker_configuration::{Configuration, HttpApi}; -use torrust_tracker_lib::bootstrap::app::initialize_global_services; +use torrust_tracker_lib::bootstrap::app::{initialize_app_container, initialize_global_services}; use torrust_tracker_lib::bootstrap::jobs::make_rust_tls; -use torrust_tracker_lib::core::services::{initialize_database, initialize_whitelist, statistics, tracker_factory}; use torrust_tracker_lib::core::statistics::event::sender::Sender; use torrust_tracker_lib::core::statistics::repository::Repository; use torrust_tracker_lib::core::whitelist::WhiteListManager; @@ -16,7 +15,6 @@ use torrust_tracker_lib::core::Tracker; use torrust_tracker_lib::servers::apis::server::{ApiServer, Launcher, Running, Stopped}; use torrust_tracker_lib::servers::registar::Registar; use torrust_tracker_lib::servers::udp::server::banning::BanService; -use torrust_tracker_lib::servers::udp::server::launcher::MAX_CONNECTION_ID_ERRORS_PER_IP; use torrust_tracker_primitives::peer; pub struct Environment @@ -45,16 +43,9 @@ where impl Environment { pub fn new(configuration: &Arc) -> Self { - let (stats_event_sender, stats_repository) = statistics::setup::factory(configuration.core.tracker_usage_statistics); - let stats_event_sender = Arc::new(stats_event_sender); - let stats_repository = Arc::new(stats_repository); - let ban_service = Arc::new(RwLock::new(BanService::new(MAX_CONNECTION_ID_ERRORS_PER_IP))); - initialize_global_services(configuration); - let database = initialize_database(configuration); - let whitelist_manager = initialize_whitelist(database.clone()); - let tracker = Arc::new(tracker_factory(configuration, &database, &whitelist_manager)); + let app_container = initialize_app_container(configuration); let config = Arc::new(configuration.http_api.clone().expect("missing API configuration")); @@ -66,11 +57,11 @@ impl Environment { Self { config, - tracker, - stats_event_sender, - stats_repository, - whitelist_manager, - ban_service, + tracker: app_container.tracker.clone(), + stats_event_sender: app_container.stats_event_sender.clone(), + stats_repository: app_container.stats_repository.clone(), + whitelist_manager: app_container.whitelist_manager.clone(), + ban_service: app_container.ban_service.clone(), registar: Registar::default(), server, } diff --git a/tests/servers/http/environment.rs b/tests/servers/http/environment.rs index 3732e9341..f1f0e8247 100644 --- a/tests/servers/http/environment.rs +++ b/tests/servers/http/environment.rs @@ -5,7 +5,7 @@ use futures::executor::block_on; use torrust_tracker_configuration::{Configuration, HttpTracker}; use torrust_tracker_lib::bootstrap::app::initialize_global_services; use torrust_tracker_lib::bootstrap::jobs::make_rust_tls; -use torrust_tracker_lib::core::services::{initialize_database, initialize_whitelist, statistics, tracker_factory}; +use torrust_tracker_lib::core::services::{initialize_database, initialize_tracker, initialize_whitelist, statistics}; use torrust_tracker_lib::core::statistics::event::sender::Sender; use torrust_tracker_lib::core::statistics::repository::Repository; use torrust_tracker_lib::core::whitelist::WhiteListManager; @@ -42,7 +42,7 @@ impl Environment { let database = initialize_database(configuration); let whitelist_manager = initialize_whitelist(database.clone()); - let tracker = Arc::new(tracker_factory(configuration, &database, &whitelist_manager)); + let tracker = Arc::new(initialize_tracker(configuration, &database, &whitelist_manager)); let http_tracker = configuration .http_trackers diff --git a/tests/servers/udp/environment.rs b/tests/servers/udp/environment.rs index 34841df16..10ef97f47 100644 --- a/tests/servers/udp/environment.rs +++ b/tests/servers/udp/environment.rs @@ -5,7 +5,7 @@ use bittorrent_primitives::info_hash::InfoHash; use tokio::sync::RwLock; use torrust_tracker_configuration::{Configuration, UdpTracker, DEFAULT_TIMEOUT}; use torrust_tracker_lib::bootstrap::app::initialize_global_services; -use torrust_tracker_lib::core::services::{initialize_database, initialize_whitelist, statistics, tracker_factory}; +use torrust_tracker_lib::core::services::{initialize_database, initialize_tracker, initialize_whitelist, statistics}; use torrust_tracker_lib::core::statistics::event::sender::Sender; use torrust_tracker_lib::core::statistics::repository::Repository; use torrust_tracker_lib::core::Tracker; @@ -53,7 +53,7 @@ impl Environment { let database = initialize_database(configuration); let whitelist_manager = initialize_whitelist(database.clone()); - let tracker = Arc::new(tracker_factory(configuration, &database, &whitelist_manager)); + let tracker = Arc::new(initialize_tracker(configuration, &database, &whitelist_manager)); let udp_tracker = configuration.udp_trackers.clone().expect("missing UDP tracker configuration"); From 3d0f4f820bcb7ff1348f59ad647e2efe5179de49 Mon Sep 17 00:00:00 2001 From: Jose Celano Date: Fri, 17 Jan 2025 17:54:56 +0000 Subject: [PATCH 106/802] refactor: [#1187] use AppContainer in test environments --- tests/servers/http/environment.rs | 19 ++++++------------- tests/servers/udp/environment.rs | 21 ++++++--------------- 2 files changed, 12 insertions(+), 28 deletions(-) diff --git a/tests/servers/http/environment.rs b/tests/servers/http/environment.rs index f1f0e8247..131fe4ac1 100644 --- a/tests/servers/http/environment.rs +++ b/tests/servers/http/environment.rs @@ -3,9 +3,8 @@ use std::sync::Arc; use bittorrent_primitives::info_hash::InfoHash; use futures::executor::block_on; use torrust_tracker_configuration::{Configuration, HttpTracker}; -use torrust_tracker_lib::bootstrap::app::initialize_global_services; +use torrust_tracker_lib::bootstrap::app::{initialize_app_container, initialize_global_services}; use torrust_tracker_lib::bootstrap::jobs::make_rust_tls; -use torrust_tracker_lib::core::services::{initialize_database, initialize_tracker, initialize_whitelist, statistics}; use torrust_tracker_lib::core::statistics::event::sender::Sender; use torrust_tracker_lib::core::statistics::repository::Repository; use torrust_tracker_lib::core::whitelist::WhiteListManager; @@ -34,15 +33,9 @@ impl Environment { impl Environment { #[allow(dead_code)] pub fn new(configuration: &Arc) -> Self { - let (stats_event_sender, stats_repository) = statistics::setup::factory(configuration.core.tracker_usage_statistics); - let stats_event_sender = Arc::new(stats_event_sender); - let stats_repository = Arc::new(stats_repository); - initialize_global_services(configuration); - let database = initialize_database(configuration); - let whitelist_manager = initialize_whitelist(database.clone()); - let tracker = Arc::new(initialize_tracker(configuration, &database, &whitelist_manager)); + let app_container = initialize_app_container(configuration); let http_tracker = configuration .http_trackers @@ -59,10 +52,10 @@ impl Environment { Self { config, - tracker, - stats_event_sender, - stats_repository, - whitelist_manager, + tracker: app_container.tracker.clone(), + stats_event_sender: app_container.stats_event_sender.clone(), + stats_repository: app_container.stats_repository.clone(), + whitelist_manager: app_container.whitelist_manager.clone(), registar: Registar::default(), server, } diff --git a/tests/servers/udp/environment.rs b/tests/servers/udp/environment.rs index 10ef97f47..81e626e1c 100644 --- a/tests/servers/udp/environment.rs +++ b/tests/servers/udp/environment.rs @@ -4,14 +4,12 @@ use std::sync::Arc; use bittorrent_primitives::info_hash::InfoHash; use tokio::sync::RwLock; use torrust_tracker_configuration::{Configuration, UdpTracker, DEFAULT_TIMEOUT}; -use torrust_tracker_lib::bootstrap::app::initialize_global_services; -use torrust_tracker_lib::core::services::{initialize_database, initialize_tracker, initialize_whitelist, statistics}; +use torrust_tracker_lib::bootstrap::app::{initialize_app_container, initialize_global_services}; use torrust_tracker_lib::core::statistics::event::sender::Sender; use torrust_tracker_lib::core::statistics::repository::Repository; use torrust_tracker_lib::core::Tracker; use torrust_tracker_lib::servers::registar::Registar; use torrust_tracker_lib::servers::udp::server::banning::BanService; -use torrust_tracker_lib::servers::udp::server::launcher::MAX_CONNECTION_ID_ERRORS_PER_IP; use torrust_tracker_lib::servers::udp::server::spawner::Spawner; use torrust_tracker_lib::servers::udp::server::states::{Running, Stopped}; use torrust_tracker_lib::servers::udp::server::Server; @@ -44,16 +42,9 @@ where impl Environment { #[allow(dead_code)] pub fn new(configuration: &Arc) -> Self { - let (stats_event_sender, stats_repository) = statistics::setup::factory(configuration.core.tracker_usage_statistics); - let stats_event_sender = Arc::new(stats_event_sender); - let stats_repository = Arc::new(stats_repository); - let ban_service = Arc::new(RwLock::new(BanService::new(MAX_CONNECTION_ID_ERRORS_PER_IP))); - initialize_global_services(configuration); - let database = initialize_database(configuration); - let whitelist_manager = initialize_whitelist(database.clone()); - let tracker = Arc::new(initialize_tracker(configuration, &database, &whitelist_manager)); + let app_container = initialize_app_container(configuration); let udp_tracker = configuration.udp_trackers.clone().expect("missing UDP tracker configuration"); @@ -65,10 +56,10 @@ impl Environment { Self { config, - tracker, - stats_event_sender, - stats_repository, - ban_service, + tracker: app_container.tracker.clone(), + stats_event_sender: app_container.stats_event_sender.clone(), + stats_repository: app_container.stats_repository.clone(), + ban_service: app_container.ban_service.clone(), registar: Registar::default(), server, } From 20018abe446af1503463a9ac99631c616a0618dd Mon Sep 17 00:00:00 2001 From: Jose Celano Date: Fri, 17 Jan 2025 18:14:57 +0000 Subject: [PATCH 107/802] fix: [#1187] doc link error --- src/bootstrap/app.rs | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/src/bootstrap/app.rs b/src/bootstrap/app.rs index d17aec3a7..53bf44f79 100644 --- a/src/bootstrap/app.rs +++ b/src/bootstrap/app.rs @@ -28,7 +28,7 @@ use crate::servers::udp::server::launcher::MAX_CONNECTION_ID_ERRORS_PER_IP; use crate::shared::crypto::ephemeral_instance_keys; use crate::shared::crypto::keys::{self, Keeper as _}; -/// It loads the configuration from the environment and builds the main domain [`Tracker`] struct. +/// It loads the configuration from the environment and builds app container. /// /// # Panics /// From c35c1243f9cdfee45b98ee253749e352e2819eec Mon Sep 17 00:00:00 2001 From: Jose Celano Date: Mon, 20 Jan 2025 08:48:30 +0000 Subject: [PATCH 108/802] refactor: [#1189] new whitelist::repository module --- src/core/services/mod.rs | 2 +- src/core/whitelist/mod.rs | 7 +++---- src/core/whitelist/{ => repository}/in_memory.rs | 2 +- src/core/whitelist/repository/mod.rs | 2 ++ src/core/whitelist/{ => repository}/persisted.rs | 2 +- 5 files changed, 8 insertions(+), 7 deletions(-) rename src/core/whitelist/{ => repository}/in_memory.rs (97%) create mode 100644 src/core/whitelist/repository/mod.rs rename src/core/whitelist/{ => repository}/persisted.rs (97%) diff --git a/src/core/services/mod.rs b/src/core/services/mod.rs index fd301b62d..cfb62d625 100644 --- a/src/core/services/mod.rs +++ b/src/core/services/mod.rs @@ -14,7 +14,7 @@ use torrust_tracker_configuration::v2_0_0::database; use torrust_tracker_configuration::Configuration; use super::databases::{self, Database}; -use super::whitelist::persisted::DatabaseWhitelist; +use super::whitelist::repository::persisted::DatabaseWhitelist; use super::whitelist::WhiteListManager; use crate::core::Tracker; diff --git a/src/core/whitelist/mod.rs b/src/core/whitelist/mod.rs index 3a88b404c..1504838dc 100644 --- a/src/core/whitelist/mod.rs +++ b/src/core/whitelist/mod.rs @@ -1,11 +1,10 @@ -pub mod in_memory; -pub mod persisted; +pub mod repository; use std::sync::Arc; use bittorrent_primitives::info_hash::InfoHash; -use in_memory::InMemoryWhitelist; -use persisted::DatabaseWhitelist; +use repository::in_memory::InMemoryWhitelist; +use repository::persisted::DatabaseWhitelist; use super::databases::{self}; diff --git a/src/core/whitelist/in_memory.rs b/src/core/whitelist/repository/in_memory.rs similarity index 97% rename from src/core/whitelist/in_memory.rs rename to src/core/whitelist/repository/in_memory.rs index 78e0eb11f..8d919f1e4 100644 --- a/src/core/whitelist/in_memory.rs +++ b/src/core/whitelist/repository/in_memory.rs @@ -34,7 +34,7 @@ impl InMemoryWhitelist { mod tests { use bittorrent_primitives::info_hash::InfoHash; - use crate::core::whitelist::in_memory::InMemoryWhitelist; + use crate::core::whitelist::repository::in_memory::InMemoryWhitelist; fn sample_info_hash() -> InfoHash { "3b245504cf5f11bbdbe1201cea6a6bf45aee1bc0".parse::().unwrap() // # DevSkim: ignore DS173237 diff --git a/src/core/whitelist/repository/mod.rs b/src/core/whitelist/repository/mod.rs new file mode 100644 index 000000000..51723b68d --- /dev/null +++ b/src/core/whitelist/repository/mod.rs @@ -0,0 +1,2 @@ +pub mod in_memory; +pub mod persisted; diff --git a/src/core/whitelist/persisted.rs b/src/core/whitelist/repository/persisted.rs similarity index 97% rename from src/core/whitelist/persisted.rs rename to src/core/whitelist/repository/persisted.rs index 993060139..fd56d56b5 100644 --- a/src/core/whitelist/persisted.rs +++ b/src/core/whitelist/repository/persisted.rs @@ -2,7 +2,7 @@ use std::sync::Arc; use bittorrent_primitives::info_hash::InfoHash; -use super::databases::{self, Database}; +use crate::core::databases::{self, Database}; /// The persisted list of allowed torrents. pub struct DatabaseWhitelist { From 597c9862cf4b3d07a470d12898aa43020f31991a Mon Sep 17 00:00:00 2001 From: Jose Celano Date: Mon, 20 Jan 2025 08:53:52 +0000 Subject: [PATCH 109/802] refactor: [#1189] extract mod whitelist::manager --- src/app_test.rs | 2 +- src/container.rs | 2 +- src/core/mod.rs | 4 +- src/core/services/mod.rs | 2 +- src/core/whitelist/manager.rs | 91 ++++++++++++++++++ src/core/whitelist/mod.rs | 93 +------------------ .../apis/v1/context/whitelist/handlers.rs | 2 +- tests/servers/api/environment.rs | 2 +- tests/servers/http/environment.rs | 2 +- 9 files changed, 100 insertions(+), 100 deletions(-) create mode 100644 src/core/whitelist/manager.rs diff --git a/src/app_test.rs b/src/app_test.rs index c50f87965..92f64cc7b 100644 --- a/src/app_test.rs +++ b/src/app_test.rs @@ -5,7 +5,7 @@ use torrust_tracker_configuration::Configuration; use crate::core::databases::Database; use crate::core::services::{initialize_database, initialize_whitelist}; -use crate::core::whitelist::WhiteListManager; +use crate::core::whitelist::manager::WhiteListManager; /// Initialize the tracker dependencies. #[allow(clippy::type_complexity)] diff --git a/src/container.rs b/src/container.rs index 7a2b86d18..3f7028d4b 100644 --- a/src/container.rs +++ b/src/container.rs @@ -4,7 +4,7 @@ use tokio::sync::RwLock; use crate::core::statistics::event::sender::Sender; use crate::core::statistics::repository::Repository; -use crate::core::whitelist::WhiteListManager; +use crate::core::whitelist::manager::WhiteListManager; use crate::core::Tracker; use crate::servers::udp::server::banning::BanService; diff --git a/src/core/mod.rs b/src/core/mod.rs index e802b2c43..0349fd935 100644 --- a/src/core/mod.rs +++ b/src/core/mod.rs @@ -468,7 +468,7 @@ use torrust_tracker_primitives::{peer, DurationSinceUnixEpoch}; use torrust_tracker_torrent_repository::entry::EntrySync; use torrust_tracker_torrent_repository::repository::Repository; use tracing::instrument; -use whitelist::WhiteListManager; +use whitelist::manager::WhiteListManager; use self::auth::Key; use self::error::Error; @@ -1083,7 +1083,7 @@ mod tests { use crate::app_test::initialize_tracker_dependencies; use crate::core::peer::Peer; use crate::core::services::initialize_tracker; - use crate::core::whitelist::WhiteListManager; + use crate::core::whitelist::manager::WhiteListManager; use crate::core::{TorrentsMetrics, Tracker}; fn public_tracker() -> Tracker { diff --git a/src/core/services/mod.rs b/src/core/services/mod.rs index cfb62d625..e0f67305a 100644 --- a/src/core/services/mod.rs +++ b/src/core/services/mod.rs @@ -14,8 +14,8 @@ use torrust_tracker_configuration::v2_0_0::database; use torrust_tracker_configuration::Configuration; use super::databases::{self, Database}; +use super::whitelist::manager::WhiteListManager; use super::whitelist::repository::persisted::DatabaseWhitelist; -use super::whitelist::WhiteListManager; use crate::core::Tracker; /// It returns a new tracker building its dependencies. diff --git a/src/core/whitelist/manager.rs b/src/core/whitelist/manager.rs new file mode 100644 index 000000000..832af6892 --- /dev/null +++ b/src/core/whitelist/manager.rs @@ -0,0 +1,91 @@ +use std::sync::Arc; + +use bittorrent_primitives::info_hash::InfoHash; + +use super::repository::in_memory::InMemoryWhitelist; +use super::repository::persisted::DatabaseWhitelist; +use crate::core::databases; + +/// It handles the list of allowed torrents. Only for listed trackers. +pub struct WhiteListManager { + /// The in-memory list of allowed torrents. + in_memory_whitelist: InMemoryWhitelist, + + /// The persisted list of allowed torrents. + database_whitelist: Arc, +} + +impl WhiteListManager { + #[must_use] + pub fn new(database_whitelist: Arc) -> Self { + Self { + in_memory_whitelist: InMemoryWhitelist::default(), + database_whitelist, + } + } + + /// It adds a torrent to the whitelist. + /// Adding torrents is not relevant to public trackers. + /// + /// # Errors + /// + /// Will return a `database::Error` if unable to add the `info_hash` into the whitelist database. + pub async fn add_torrent_to_whitelist(&self, info_hash: &InfoHash) -> Result<(), databases::error::Error> { + self.database_whitelist.add(info_hash)?; + self.in_memory_whitelist.add(info_hash).await; + Ok(()) + } + + /// It removes a torrent from the whitelist. + /// Removing torrents is not relevant to public trackers. + /// + /// # Errors + /// + /// Will return a `database::Error` if unable to remove the `info_hash` from the whitelist database. + pub async fn remove_torrent_from_whitelist(&self, info_hash: &InfoHash) -> Result<(), databases::error::Error> { + self.database_whitelist.remove(info_hash)?; + self.in_memory_whitelist.remove(info_hash).await; + Ok(()) + } + + /// It removes a torrent from the whitelist in the database. + /// + /// # Errors + /// + /// Will return a `database::Error` if unable to remove the `info_hash` from the whitelist database. + pub fn remove_torrent_from_database_whitelist(&self, info_hash: &InfoHash) -> Result<(), databases::error::Error> { + self.database_whitelist.remove(info_hash) + } + + /// It adds a torrent from the whitelist in memory. + pub async fn add_torrent_to_memory_whitelist(&self, info_hash: &InfoHash) -> bool { + self.in_memory_whitelist.add(info_hash).await + } + + /// It removes a torrent from the whitelist in memory. + pub async fn remove_torrent_from_memory_whitelist(&self, info_hash: &InfoHash) -> bool { + self.in_memory_whitelist.remove(info_hash).await + } + + /// It checks if a torrent is whitelisted. + pub async fn is_info_hash_whitelisted(&self, info_hash: &InfoHash) -> bool { + self.in_memory_whitelist.contains(info_hash).await + } + + /// It loads the whitelist from the database. + /// + /// # Errors + /// + /// Will return a `database::Error` if unable to load the list whitelisted `info_hash`s from the database. + pub async fn load_whitelist_from_database(&self) -> Result<(), databases::error::Error> { + let whitelisted_torrents_from_database = self.database_whitelist.load_from_database()?; + + self.in_memory_whitelist.clear().await; + + for info_hash in whitelisted_torrents_from_database { + let _: bool = self.in_memory_whitelist.add(&info_hash).await; + } + + Ok(()) + } +} diff --git a/src/core/whitelist/mod.rs b/src/core/whitelist/mod.rs index 1504838dc..faf83c87b 100644 --- a/src/core/whitelist/mod.rs +++ b/src/core/whitelist/mod.rs @@ -1,93 +1,2 @@ +pub mod manager; pub mod repository; - -use std::sync::Arc; - -use bittorrent_primitives::info_hash::InfoHash; -use repository::in_memory::InMemoryWhitelist; -use repository::persisted::DatabaseWhitelist; - -use super::databases::{self}; - -/// It handles the list of allowed torrents. Only for listed trackers. -pub struct WhiteListManager { - /// The in-memory list of allowed torrents. - in_memory_whitelist: InMemoryWhitelist, - - /// The persisted list of allowed torrents. - database_whitelist: Arc, -} - -impl WhiteListManager { - #[must_use] - pub fn new(database_whitelist: Arc) -> Self { - Self { - in_memory_whitelist: InMemoryWhitelist::default(), - database_whitelist, - } - } - - /// It adds a torrent to the whitelist. - /// Adding torrents is not relevant to public trackers. - /// - /// # Errors - /// - /// Will return a `database::Error` if unable to add the `info_hash` into the whitelist database. - pub async fn add_torrent_to_whitelist(&self, info_hash: &InfoHash) -> Result<(), databases::error::Error> { - self.database_whitelist.add(info_hash)?; - self.in_memory_whitelist.add(info_hash).await; - Ok(()) - } - - /// It removes a torrent from the whitelist. - /// Removing torrents is not relevant to public trackers. - /// - /// # Errors - /// - /// Will return a `database::Error` if unable to remove the `info_hash` from the whitelist database. - pub async fn remove_torrent_from_whitelist(&self, info_hash: &InfoHash) -> Result<(), databases::error::Error> { - self.database_whitelist.remove(info_hash)?; - self.in_memory_whitelist.remove(info_hash).await; - Ok(()) - } - - /// It removes a torrent from the whitelist in the database. - /// - /// # Errors - /// - /// Will return a `database::Error` if unable to remove the `info_hash` from the whitelist database. - pub fn remove_torrent_from_database_whitelist(&self, info_hash: &InfoHash) -> Result<(), databases::error::Error> { - self.database_whitelist.remove(info_hash) - } - - /// It adds a torrent from the whitelist in memory. - pub async fn add_torrent_to_memory_whitelist(&self, info_hash: &InfoHash) -> bool { - self.in_memory_whitelist.add(info_hash).await - } - - /// It removes a torrent from the whitelist in memory. - pub async fn remove_torrent_from_memory_whitelist(&self, info_hash: &InfoHash) -> bool { - self.in_memory_whitelist.remove(info_hash).await - } - - /// It checks if a torrent is whitelisted. - pub async fn is_info_hash_whitelisted(&self, info_hash: &InfoHash) -> bool { - self.in_memory_whitelist.contains(info_hash).await - } - - /// It loads the whitelist from the database. - /// - /// # Errors - /// - /// Will return a `database::Error` if unable to load the list whitelisted `info_hash`s from the database. - pub async fn load_whitelist_from_database(&self) -> Result<(), databases::error::Error> { - let whitelisted_torrents_from_database = self.database_whitelist.load_from_database()?; - - self.in_memory_whitelist.clear().await; - - for info_hash in whitelisted_torrents_from_database { - let _: bool = self.in_memory_whitelist.add(&info_hash).await; - } - - Ok(()) - } -} diff --git a/src/servers/apis/v1/context/whitelist/handlers.rs b/src/servers/apis/v1/context/whitelist/handlers.rs index f548f5dc4..473ed56c5 100644 --- a/src/servers/apis/v1/context/whitelist/handlers.rs +++ b/src/servers/apis/v1/context/whitelist/handlers.rs @@ -10,7 +10,7 @@ use bittorrent_primitives::info_hash::InfoHash; use super::responses::{ failed_to_reload_whitelist_response, failed_to_remove_torrent_from_whitelist_response, failed_to_whitelist_torrent_response, }; -use crate::core::whitelist::WhiteListManager; +use crate::core::whitelist::manager::WhiteListManager; use crate::servers::apis::v1::responses::{invalid_info_hash_param_response, ok_response}; use crate::servers::apis::InfoHashParam; diff --git a/tests/servers/api/environment.rs b/tests/servers/api/environment.rs index dcfe526f1..cf997eb7c 100644 --- a/tests/servers/api/environment.rs +++ b/tests/servers/api/environment.rs @@ -10,7 +10,7 @@ use torrust_tracker_lib::bootstrap::app::{initialize_app_container, initialize_g use torrust_tracker_lib::bootstrap::jobs::make_rust_tls; use torrust_tracker_lib::core::statistics::event::sender::Sender; use torrust_tracker_lib::core::statistics::repository::Repository; -use torrust_tracker_lib::core::whitelist::WhiteListManager; +use torrust_tracker_lib::core::whitelist::manager::WhiteListManager; use torrust_tracker_lib::core::Tracker; use torrust_tracker_lib::servers::apis::server::{ApiServer, Launcher, Running, Stopped}; use torrust_tracker_lib::servers::registar::Registar; diff --git a/tests/servers/http/environment.rs b/tests/servers/http/environment.rs index 131fe4ac1..d68924e07 100644 --- a/tests/servers/http/environment.rs +++ b/tests/servers/http/environment.rs @@ -7,7 +7,7 @@ use torrust_tracker_lib::bootstrap::app::{initialize_app_container, initialize_g use torrust_tracker_lib::bootstrap::jobs::make_rust_tls; use torrust_tracker_lib::core::statistics::event::sender::Sender; use torrust_tracker_lib::core::statistics::repository::Repository; -use torrust_tracker_lib::core::whitelist::WhiteListManager; +use torrust_tracker_lib::core::whitelist::manager::WhiteListManager; use torrust_tracker_lib::core::Tracker; use torrust_tracker_lib::servers::http::server::{HttpServer, Launcher, Running, Stopped}; use torrust_tracker_lib::servers::registar::Registar; From a5f41fc1c18c998f88b266daeb247a555f817a00 Mon Sep 17 00:00:00 2001 From: Jose Celano Date: Mon, 20 Jan 2025 13:36:47 +0000 Subject: [PATCH 110/802] refactor: [#1189] extract whitelist::authorization::Authorization --- src/app.rs | 4 +- src/app_test.rs | 21 +- src/bootstrap/app.rs | 15 +- src/bootstrap/jobs/http_tracker.rs | 47 +++-- src/bootstrap/jobs/tracker_apis.rs | 26 ++- src/bootstrap/jobs/udp_tracker.rs | 14 +- src/container.rs | 3 +- src/core/mod.rs | 85 +++----- src/core/services/mod.rs | 13 +- src/core/services/statistics/mod.rs | 7 +- src/core/services/torrent.rs | 28 +-- src/core/whitelist/authorization.rs | 59 ++++++ src/core/whitelist/manager.rs | 6 +- src/core/whitelist/mod.rs | 1 + src/servers/apis/routes.rs | 12 +- src/servers/apis/server.rs | 23 ++- .../apis/v1/context/whitelist/routes.rs | 10 +- src/servers/apis/v1/routes.rs | 4 +- src/servers/http/server.rs | 25 ++- src/servers/http/v1/handlers/announce.rs | 87 +++++---- src/servers/http/v1/handlers/scrape.rs | 28 ++- src/servers/http/v1/routes.rs | 23 ++- src/servers/http/v1/services/announce.rs | 8 +- src/servers/http/v1/services/scrape.rs | 8 +- src/servers/udp/handlers.rs | 184 +++++++++++------- src/servers/udp/server/launcher.rs | 20 +- src/servers/udp/server/mod.rs | 24 ++- src/servers/udp/server/processor.rs | 6 +- src/servers/udp/server/spawner.rs | 5 +- src/servers/udp/server/states.rs | 6 +- tests/servers/api/environment.rs | 1 + tests/servers/http/environment.rs | 13 +- tests/servers/udp/environment.rs | 7 +- 33 files changed, 551 insertions(+), 272 deletions(-) create mode 100644 src/core/whitelist/authorization.rs diff --git a/src/app.rs b/src/app.rs index 64119aa34..289db1fdc 100644 --- a/src/app.rs +++ b/src/app.rs @@ -61,7 +61,6 @@ pub async fn start(config: &Configuration, app_container: &AppContainer) -> Vec< // Load whitelisted torrents if app_container.tracker.is_listed() { app_container - .tracker .whitelist_manager .load_whitelist_from_database() .await @@ -81,6 +80,7 @@ pub async fn start(config: &Configuration, app_container: &AppContainer) -> Vec< udp_tracker::start_job( udp_tracker_config, app_container.tracker.clone(), + app_container.whitelist_authorization.clone(), app_container.stats_event_sender.clone(), app_container.ban_service.clone(), registar.give_form(), @@ -99,6 +99,7 @@ pub async fn start(config: &Configuration, app_container: &AppContainer) -> Vec< if let Some(job) = http_tracker::start_job( http_tracker_config, app_container.tracker.clone(), + app_container.whitelist_authorization.clone(), app_container.stats_event_sender.clone(), registar.give_form(), servers::http::Version::V1, @@ -117,6 +118,7 @@ pub async fn start(config: &Configuration, app_container: &AppContainer) -> Vec< if let Some(job) = tracker_apis::start_job( http_api_config, app_container.tracker.clone(), + app_container.whitelist_manager.clone(), app_container.ban_service.clone(), app_container.stats_event_sender.clone(), app_container.stats_repository.clone(), diff --git a/src/app_test.rs b/src/app_test.rs index 92f64cc7b..ffd55581e 100644 --- a/src/app_test.rs +++ b/src/app_test.rs @@ -4,15 +4,26 @@ use std::sync::Arc; use torrust_tracker_configuration::Configuration; use crate::core::databases::Database; -use crate::core::services::{initialize_database, initialize_whitelist}; -use crate::core::whitelist::manager::WhiteListManager; +use crate::core::services::initialize_database; +use crate::core::whitelist; +use crate::core::whitelist::repository::in_memory::InMemoryWhitelist; /// Initialize the tracker dependencies. #[allow(clippy::type_complexity)] #[must_use] -pub fn initialize_tracker_dependencies(config: &Configuration) -> (Arc>, Arc) { +pub fn initialize_tracker_dependencies( + config: &Configuration, +) -> ( + Arc>, + Arc, + Arc, +) { let database = initialize_database(config); - let whitelist_manager = initialize_whitelist(database.clone()); + let in_memory_whitelist = Arc::new(InMemoryWhitelist::default()); + let whitelist_authorization = Arc::new(whitelist::authorization::Authorization::new( + &config.core, + &in_memory_whitelist.clone(), + )); - (database, whitelist_manager) + (database, in_memory_whitelist, whitelist_authorization) } diff --git a/src/bootstrap/app.rs b/src/bootstrap/app.rs index 53bf44f79..5dbdd15cb 100644 --- a/src/bootstrap/app.rs +++ b/src/bootstrap/app.rs @@ -22,7 +22,9 @@ use tracing::instrument; use super::config::initialize_configuration; use crate::bootstrap; use crate::container::AppContainer; -use crate::core::services::{initialize_database, initialize_tracker, initialize_whitelist, statistics}; +use crate::core::services::{initialize_database, initialize_tracker, initialize_whitelist_manager, statistics}; +use crate::core::whitelist; +use crate::core::whitelist::repository::in_memory::InMemoryWhitelist; use crate::servers::udp::server::banning::BanService; use crate::servers::udp::server::launcher::MAX_CONNECTION_ID_ERRORS_PER_IP; use crate::shared::crypto::ephemeral_instance_keys; @@ -81,11 +83,18 @@ pub fn initialize_app_container(configuration: &Configuration) -> AppContainer { let stats_repository = Arc::new(stats_repository); let ban_service = Arc::new(RwLock::new(BanService::new(MAX_CONNECTION_ID_ERRORS_PER_IP))); let database = initialize_database(configuration); - let whitelist_manager = initialize_whitelist(database.clone()); - let tracker = Arc::new(initialize_tracker(configuration, &database, &whitelist_manager)); + let in_memory_whitelist = Arc::new(InMemoryWhitelist::default()); + let whitelist_authorization = Arc::new(whitelist::authorization::Authorization::new( + &configuration.core, + &in_memory_whitelist.clone(), + )); + let whitelist_manager = initialize_whitelist_manager(database.clone(), in_memory_whitelist.clone()); + + let tracker = Arc::new(initialize_tracker(configuration, &database, &whitelist_authorization)); AppContainer { tracker, + whitelist_authorization, ban_service, stats_event_sender, stats_repository, diff --git a/src/bootstrap/jobs/http_tracker.rs b/src/bootstrap/jobs/http_tracker.rs index d32e8d4aa..dea866648 100644 --- a/src/bootstrap/jobs/http_tracker.rs +++ b/src/bootstrap/jobs/http_tracker.rs @@ -20,7 +20,7 @@ use tracing::instrument; use super::make_rust_tls; use crate::core::statistics::event::sender::Sender; -use crate::core::{self, statistics}; +use crate::core::{self, statistics, whitelist}; use crate::servers::http::server::{HttpServer, Launcher}; use crate::servers::http::Version; use crate::servers::registar::ServiceRegistrationForm; @@ -34,10 +34,11 @@ use crate::servers::registar::ServiceRegistrationForm; /// /// It would panic if the `config::HttpTracker` struct would contain inappropriate values. /// -#[instrument(skip(config, tracker, stats_event_sender, form))] +#[instrument(skip(config, tracker, whitelist_authorization, stats_event_sender, form))] pub async fn start_job( config: &HttpTracker, tracker: Arc, + whitelist_authorization: Arc, stats_event_sender: Arc>>, form: ServiceRegistrationForm, version: Version, @@ -49,21 +50,32 @@ pub async fn start_job( .map(|tls| tls.expect("it should have a valid http tracker tls configuration")); match version { - Version::V1 => Some(start_v1(socket, tls, tracker.clone(), stats_event_sender.clone(), form).await), + Version::V1 => Some( + start_v1( + socket, + tls, + tracker.clone(), + whitelist_authorization.clone(), + stats_event_sender.clone(), + form, + ) + .await, + ), } } #[allow(clippy::async_yields_async)] -#[instrument(skip(socket, tls, tracker, stats_event_sender, form))] +#[instrument(skip(socket, tls, tracker, whitelist_authorization, stats_event_sender, form))] async fn start_v1( socket: SocketAddr, tls: Option, tracker: Arc, + whitelist_authorization: Arc, stats_event_sender: Arc>>, form: ServiceRegistrationForm, ) -> JoinHandle<()> { let server = HttpServer::new(Launcher::new(socket, tls)) - .start(tracker, stats_event_sender, form) + .start(tracker, whitelist_authorization, stats_event_sender, form) .await .expect("it should be able to start to the http tracker"); @@ -88,7 +100,9 @@ mod tests { use crate::bootstrap::app::initialize_global_services; use crate::bootstrap::jobs::http_tracker::start_job; - use crate::core::services::{initialize_database, initialize_tracker, initialize_whitelist, statistics}; + use crate::core::services::{initialize_database, initialize_tracker, statistics}; + use crate::core::whitelist; + use crate::core::whitelist::repository::in_memory::InMemoryWhitelist; use crate::servers::http::Version; use crate::servers::registar::Registar; @@ -104,13 +118,24 @@ mod tests { initialize_global_services(&cfg); let database = initialize_database(&cfg); - let whitelist_manager = initialize_whitelist(database.clone()); - let tracker = Arc::new(initialize_tracker(&cfg, &database, &whitelist_manager)); + let in_memory_whitelist = Arc::new(InMemoryWhitelist::default()); + let whitelist_authorization = Arc::new(whitelist::authorization::Authorization::new( + &cfg.core, + &in_memory_whitelist.clone(), + )); + let tracker = Arc::new(initialize_tracker(&cfg, &database, &whitelist_authorization)); let version = Version::V1; - start_job(config, tracker, stats_event_sender, Registar::default().give_form(), version) - .await - .expect("it should be able to join to the http tracker start-job"); + start_job( + config, + tracker, + whitelist_authorization, + stats_event_sender, + Registar::default().give_form(), + version, + ) + .await + .expect("it should be able to join to the http tracker start-job"); } } diff --git a/src/bootstrap/jobs/tracker_apis.rs b/src/bootstrap/jobs/tracker_apis.rs index 9c284fbfc..7e06829c4 100644 --- a/src/bootstrap/jobs/tracker_apis.rs +++ b/src/bootstrap/jobs/tracker_apis.rs @@ -30,9 +30,10 @@ use torrust_tracker_configuration::{AccessTokens, HttpApi}; use tracing::instrument; use super::make_rust_tls; -use crate::core; use crate::core::statistics::event::sender::Sender; use crate::core::statistics::repository::Repository; +use crate::core::whitelist::manager::WhiteListManager; +use crate::core::{self}; use crate::servers::apis::server::{ApiServer, Launcher}; use crate::servers::apis::Version; use crate::servers::registar::ServiceRegistrationForm; @@ -58,10 +59,12 @@ pub struct ApiServerJobStarted(); /// It would panic if unable to send the `ApiServerJobStarted` notice. /// /// -#[instrument(skip(config, tracker, ban_service, stats_event_sender, stats_repository, form))] +#[allow(clippy::too_many_arguments)] +#[instrument(skip(config, tracker, whitelist_manager, ban_service, stats_event_sender, stats_repository, form))] pub async fn start_job( config: &HttpApi, tracker: Arc, + whitelist_manager: Arc, ban_service: Arc>, stats_event_sender: Arc>>, stats_repository: Arc, @@ -82,6 +85,7 @@ pub async fn start_job( bind_to, tls, tracker.clone(), + whitelist_manager.clone(), ban_service.clone(), stats_event_sender.clone(), stats_repository.clone(), @@ -99,6 +103,7 @@ pub async fn start_job( socket, tls, tracker, + whitelist_manager, ban_service, stats_event_sender, stats_repository, @@ -109,6 +114,7 @@ async fn start_v1( socket: SocketAddr, tls: Option, tracker: Arc, + whitelist_manager: Arc, ban_service: Arc>, stats_event_sender: Arc>>, stats_repository: Arc, @@ -118,6 +124,7 @@ async fn start_v1( let server = ApiServer::new(Launcher::new(socket, tls)) .start( tracker, + whitelist_manager, stats_event_sender, stats_repository, ban_service, @@ -142,7 +149,9 @@ mod tests { use crate::bootstrap::app::initialize_global_services; use crate::bootstrap::jobs::tracker_apis::start_job; - use crate::core::services::{initialize_database, initialize_tracker, initialize_whitelist, statistics}; + use crate::core::services::{initialize_database, initialize_tracker, initialize_whitelist_manager, statistics}; + use crate::core::whitelist; + use crate::core::whitelist::repository::in_memory::InMemoryWhitelist; use crate::servers::apis::Version; use crate::servers::registar::Registar; use crate::servers::udp::server::banning::BanService; @@ -161,14 +170,21 @@ mod tests { initialize_global_services(&cfg); let database = initialize_database(&cfg); - let whitelist_manager = initialize_whitelist(database.clone()); - let tracker = Arc::new(initialize_tracker(&cfg, &database, &whitelist_manager)); + let in_memory_whitelist = Arc::new(InMemoryWhitelist::default()); + let whitelist_authorization = Arc::new(whitelist::authorization::Authorization::new( + &cfg.core, + &in_memory_whitelist.clone(), + )); + let whitelist_manager = initialize_whitelist_manager(database.clone(), in_memory_whitelist.clone()); + + let tracker = Arc::new(initialize_tracker(&cfg, &database, &whitelist_authorization)); let version = Version::V1; start_job( config, tracker, + whitelist_manager, ban_service, stats_event_sender, stats_repository, diff --git a/src/bootstrap/jobs/udp_tracker.rs b/src/bootstrap/jobs/udp_tracker.rs index 105c7f723..724e2043e 100644 --- a/src/bootstrap/jobs/udp_tracker.rs +++ b/src/bootstrap/jobs/udp_tracker.rs @@ -13,8 +13,8 @@ use tokio::task::JoinHandle; use torrust_tracker_configuration::UdpTracker; use tracing::instrument; -use crate::core; use crate::core::statistics::event::sender::Sender; +use crate::core::{self, whitelist}; use crate::servers::registar::ServiceRegistrationForm; use crate::servers::udp::server::banning::BanService; use crate::servers::udp::server::spawner::Spawner; @@ -32,10 +32,11 @@ use crate::servers::udp::UDP_TRACKER_LOG_TARGET; /// It will panic if the task did not finish successfully. #[must_use] #[allow(clippy::async_yields_async)] -#[instrument(skip(config, tracker, stats_event_sender, ban_service, form))] +#[instrument(skip(config, tracker, whitelist_authorization, stats_event_sender, ban_service, form))] pub async fn start_job( config: &UdpTracker, tracker: Arc, + whitelist_authorization: Arc, stats_event_sender: Arc>>, ban_service: Arc>, form: ServiceRegistrationForm, @@ -44,7 +45,14 @@ pub async fn start_job( let cookie_lifetime = config.cookie_lifetime; let server = Server::new(Spawner::new(bind_to)) - .start(tracker, stats_event_sender, ban_service, form, cookie_lifetime) + .start( + tracker, + whitelist_authorization, + stats_event_sender, + ban_service, + form, + cookie_lifetime, + ) .await .expect("it should be able to start the udp tracker"); diff --git a/src/container.rs b/src/container.rs index 3f7028d4b..fd75601ae 100644 --- a/src/container.rs +++ b/src/container.rs @@ -5,11 +5,12 @@ use tokio::sync::RwLock; use crate::core::statistics::event::sender::Sender; use crate::core::statistics::repository::Repository; use crate::core::whitelist::manager::WhiteListManager; -use crate::core::Tracker; +use crate::core::{whitelist, Tracker}; use crate::servers::udp::server::banning::BanService; pub struct AppContainer { pub tracker: Arc, + pub whitelist_authorization: Arc, pub ban_service: Arc>, pub stats_event_sender: Arc>>, pub stats_repository: Arc, diff --git a/src/core/mod.rs b/src/core/mod.rs index 0349fd935..480d0e971 100644 --- a/src/core/mod.rs +++ b/src/core/mod.rs @@ -467,11 +467,8 @@ use torrust_tracker_primitives::torrent_metrics::TorrentsMetrics; use torrust_tracker_primitives::{peer, DurationSinceUnixEpoch}; use torrust_tracker_torrent_repository::entry::EntrySync; use torrust_tracker_torrent_repository::repository::Repository; -use tracing::instrument; -use whitelist::manager::WhiteListManager; use self::auth::Key; -use self::error::Error; use self::torrent::Torrents; use crate::core::databases::Database; use crate::CurrentClock; @@ -496,8 +493,8 @@ pub struct Tracker { /// Tracker users' keys. Only for private trackers. keys: tokio::sync::RwLock>, - /// The list of allowed torrents. Only for listed trackers. - pub whitelist_manager: Arc, + /// The service to check is a torrent is whitelisted. + pub whitelist_authorization: Arc, /// The in-memory torrents repository. torrents: Arc, @@ -568,13 +565,13 @@ impl Tracker { pub fn new( config: &Core, database: &Arc>, - whitelist_manager: &Arc, + whitelist_authorization: &Arc, ) -> Result { Ok(Tracker { config: config.clone(), database: database.clone(), keys: tokio::sync::RwLock::new(std::collections::HashMap::new()), - whitelist_manager: whitelist_manager.clone(), + whitelist_authorization: whitelist_authorization.clone(), torrents: Arc::default(), }) } @@ -663,7 +660,7 @@ impl Tracker { let mut scrape_data = ScrapeData::empty(); for info_hash in info_hashes { - let swarm_metadata = match self.authorize(info_hash).await { + let swarm_metadata = match self.whitelist_authorization.authorize(info_hash).await { Ok(()) => self.get_swarm_metadata(info_hash), Err(_) => SwarmMetadata::zeroed(), }; @@ -1018,31 +1015,6 @@ impl Tracker { Ok(()) } - /// Right now, there is only authorization when the `Tracker` runs in - /// `listed` or `private_listed` modes. - /// - /// # Context: Authorization - /// - /// # Errors - /// - /// Will return an error if the tracker is running in `listed` mode - /// and the infohash is not whitelisted. - #[instrument(skip(self, info_hash), err)] - pub async fn authorize(&self, info_hash: &InfoHash) -> Result<(), Error> { - if !self.is_listed() { - return Ok(()); - } - - if self.whitelist_manager.is_info_hash_whitelisted(info_hash).await { - return Ok(()); - } - - Err(Error::TorrentNotWhitelisted { - info_hash: *info_hash, - location: Location::caller(), - }) - } - /// It drops the database tables. /// /// # Errors @@ -1082,35 +1054,42 @@ mod tests { use crate::app_test::initialize_tracker_dependencies; use crate::core::peer::Peer; - use crate::core::services::initialize_tracker; + use crate::core::services::{initialize_tracker, initialize_whitelist_manager}; use crate::core::whitelist::manager::WhiteListManager; - use crate::core::{TorrentsMetrics, Tracker}; + use crate::core::{whitelist, TorrentsMetrics, Tracker}; fn public_tracker() -> Tracker { let config = configuration::ephemeral_public(); - let (database, whitelist_manager) = initialize_tracker_dependencies(&config); - initialize_tracker(&config, &database, &whitelist_manager) + let (database, _in_memory_whitelist, whitelist_authorization) = initialize_tracker_dependencies(&config); + + initialize_tracker(&config, &database, &whitelist_authorization) } fn private_tracker() -> Tracker { let config = configuration::ephemeral_private(); - let (database, whitelist_manager) = initialize_tracker_dependencies(&config); - initialize_tracker(&config, &database, &whitelist_manager) + let (database, _in_memory_whitelist, whitelist_authorization) = initialize_tracker_dependencies(&config); + + initialize_tracker(&config, &database, &whitelist_authorization) } - fn whitelisted_tracker() -> (Tracker, Arc) { + fn whitelisted_tracker() -> (Tracker, Arc, Arc) { let config = configuration::ephemeral_listed(); - let (database, whitelist_manager) = initialize_tracker_dependencies(&config); - let tracker = initialize_tracker(&config, &database, &whitelist_manager); - (tracker, whitelist_manager) + let (database, in_memory_whitelist, whitelist_authorization) = initialize_tracker_dependencies(&config); + + let whitelist_manager = initialize_whitelist_manager(database.clone(), in_memory_whitelist.clone()); + + let tracker = initialize_tracker(&config, &database, &whitelist_authorization); + + (tracker, whitelist_authorization, whitelist_manager) } pub fn tracker_persisting_torrents_in_database() -> Tracker { let mut config = configuration::ephemeral_listed(); config.core.tracker_policy.persistent_torrent_completed_stat = true; - let (database, whitelist_manager) = initialize_tracker_dependencies(&config); - initialize_tracker(&config, &database, &whitelist_manager) + let (database, _in_memory_whitelist, whitelist_authorization) = initialize_tracker_dependencies(&config); + + initialize_tracker(&config, &database, &whitelist_authorization) } fn sample_info_hash() -> InfoHash { @@ -1637,24 +1616,24 @@ mod tests { #[tokio::test] async fn it_should_authorize_the_announce_and_scrape_actions_on_whitelisted_torrents() { - let (tracker, whitelist_manager) = whitelisted_tracker(); + let (tracker, _whitelist_authorization, whitelist_manager) = whitelisted_tracker(); let info_hash = sample_info_hash(); let result = whitelist_manager.add_torrent_to_whitelist(&info_hash).await; assert!(result.is_ok()); - let result = tracker.authorize(&info_hash).await; + let result = tracker.whitelist_authorization.authorize(&info_hash).await; assert!(result.is_ok()); } #[tokio::test] async fn it_should_not_authorize_the_announce_and_scrape_actions_on_not_whitelisted_torrents() { - let (tracker, _whitelist_manager) = whitelisted_tracker(); + let (tracker, _whitelist_authorization, _whitelist_manager) = whitelisted_tracker(); let info_hash = sample_info_hash(); - let result = tracker.authorize(&info_hash).await; + let result = tracker.whitelist_authorization.authorize(&info_hash).await; assert!(result.is_err()); } } @@ -1669,7 +1648,7 @@ mod tests { #[tokio::test] async fn it_should_add_a_torrent_to_the_whitelist() { - let (_tracker, whitelist_manager) = whitelisted_tracker(); + let (_tracker, _whitelist_authorization, whitelist_manager) = whitelisted_tracker(); let info_hash = sample_info_hash(); @@ -1680,7 +1659,7 @@ mod tests { #[tokio::test] async fn it_should_remove_a_torrent_from_the_whitelist() { - let (_tracker, whitelist_manager) = whitelisted_tracker(); + let (_tracker, _whitelist_authorization, whitelist_manager) = whitelisted_tracker(); let info_hash = sample_info_hash(); @@ -1696,7 +1675,7 @@ mod tests { #[tokio::test] async fn it_should_load_the_whitelist_from_the_database() { - let (_tracker, whitelist_manager) = whitelisted_tracker(); + let (_tracker, _whitelist_authorization, whitelist_manager) = whitelisted_tracker(); let info_hash = sample_info_hash(); @@ -1739,7 +1718,7 @@ mod tests { #[tokio::test] async fn it_should_return_the_zeroed_swarm_metadata_for_the_requested_file_if_it_is_not_whitelisted() { - let (tracker, _whitelist_manager) = whitelisted_tracker(); + let (tracker, _whitelist_authorization, _whitelist_manager) = whitelisted_tracker(); let info_hash = "3b245504cf5f11bbdbe1201cea6a6bf45aee1bc0".parse::().unwrap(); diff --git a/src/core/services/mod.rs b/src/core/services/mod.rs index e0f67305a..611ea24d2 100644 --- a/src/core/services/mod.rs +++ b/src/core/services/mod.rs @@ -14,7 +14,9 @@ use torrust_tracker_configuration::v2_0_0::database; use torrust_tracker_configuration::Configuration; use super::databases::{self, Database}; +use super::whitelist; use super::whitelist::manager::WhiteListManager; +use super::whitelist::repository::in_memory::InMemoryWhitelist; use super::whitelist::repository::persisted::DatabaseWhitelist; use crate::core::Tracker; @@ -27,9 +29,9 @@ use crate::core::Tracker; pub fn initialize_tracker( config: &Configuration, database: &Arc>, - whitelist_manager: &Arc, + whitelist_authorization: &Arc, ) -> Tracker { - match Tracker::new(&Arc::new(config).core, database, whitelist_manager) { + match Tracker::new(&Arc::new(config).core, database, whitelist_authorization) { Ok(tracker) => tracker, Err(error) => { panic!("{}", error) @@ -51,7 +53,10 @@ pub fn initialize_database(config: &Configuration) -> Arc> { } #[must_use] -pub fn initialize_whitelist(database: Arc>) -> Arc { +pub fn initialize_whitelist_manager( + database: Arc>, + in_memory_whitelist: Arc, +) -> Arc { let database_whitelist = Arc::new(DatabaseWhitelist::new(database)); - Arc::new(WhiteListManager::new(database_whitelist)) + Arc::new(WhiteListManager::new(database_whitelist, in_memory_whitelist)) } diff --git a/src/core/services/statistics/mod.rs b/src/core/services/statistics/mod.rs index 1e0403c2a..3567de2a9 100644 --- a/src/core/services/statistics/mod.rs +++ b/src/core/services/statistics/mod.rs @@ -118,9 +118,9 @@ mod tests { use torrust_tracker_test_helpers::configuration; use crate::app_test::initialize_tracker_dependencies; - use crate::core; use crate::core::services::initialize_tracker; use crate::core::services::statistics::{self, get_metrics, TrackerMetrics}; + use crate::core::{self}; use crate::servers::udp::server::banning::BanService; use crate::servers::udp::server::launcher::MAX_CONNECTION_ID_ERRORS_PER_IP; @@ -132,11 +132,10 @@ mod tests { async fn the_statistics_service_should_return_the_tracker_metrics() { let config = tracker_configuration(); - let (database, whitelist_manager) = initialize_tracker_dependencies(&config); + let (database, _in_memory_whitelist, whitelist_authorization) = initialize_tracker_dependencies(&config); let (_stats_event_sender, stats_repository) = statistics::setup::factory(config.core.tracker_usage_statistics); let stats_repository = Arc::new(stats_repository); - - let tracker = Arc::new(initialize_tracker(&config, &database, &whitelist_manager)); + let tracker = Arc::new(initialize_tracker(&config, &database, &whitelist_authorization)); let ban_service = Arc::new(RwLock::new(BanService::new(MAX_CONNECTION_ID_ERRORS_PER_IP))); diff --git a/src/core/services/torrent.rs b/src/core/services/torrent.rs index 593d8be8c..457aa54d8 100644 --- a/src/core/services/torrent.rs +++ b/src/core/services/torrent.rs @@ -142,8 +142,8 @@ mod tests { async fn should_return_none_if_the_tracker_does_not_have_the_torrent() { let config = tracker_configuration(); - let (database, whitelist_manager) = initialize_tracker_dependencies(&config); - let tracker = initialize_tracker(&config, &database, &whitelist_manager); + let (database, _in_memory_whitelist, whitelist_authorization) = initialize_tracker_dependencies(&config); + let tracker = initialize_tracker(&config, &database, &whitelist_authorization); let tracker = Arc::new(tracker); @@ -160,8 +160,8 @@ mod tests { async fn should_return_the_torrent_info_if_the_tracker_has_the_torrent() { let config = tracker_configuration(); - let (database, whitelist_manager) = initialize_tracker_dependencies(&config); - let tracker = Arc::new(initialize_tracker(&config, &database, &whitelist_manager)); + let (database, _in_memory_whitelist, whitelist_authorization) = initialize_tracker_dependencies(&config); + let tracker = Arc::new(initialize_tracker(&config, &database, &whitelist_authorization)); let hash = "9e0217d0fa71c87332cd8bf9dbeabcb2c2cf3c4d".to_owned(); let info_hash = InfoHash::from_str(&hash).unwrap(); @@ -204,8 +204,8 @@ mod tests { async fn should_return_an_empty_result_if_the_tracker_does_not_have_any_torrent() { let config = tracker_configuration(); - let (database, whitelist_manager) = initialize_tracker_dependencies(&config); - let tracker = Arc::new(initialize_tracker(&config, &database, &whitelist_manager)); + let (database, _in_memory_whitelist, whitelist_authorization) = initialize_tracker_dependencies(&config); + let tracker = Arc::new(initialize_tracker(&config, &database, &whitelist_authorization)); let torrents = get_torrents_page(tracker.clone(), Some(&Pagination::default())).await; @@ -216,8 +216,8 @@ mod tests { async fn should_return_a_summarized_info_for_all_torrents() { let config = tracker_configuration(); - let (database, whitelist_manager) = initialize_tracker_dependencies(&config); - let tracker = Arc::new(initialize_tracker(&config, &database, &whitelist_manager)); + let (database, _in_memory_whitelist, whitelist_authorization) = initialize_tracker_dependencies(&config); + let tracker = Arc::new(initialize_tracker(&config, &database, &whitelist_authorization)); let hash = "9e0217d0fa71c87332cd8bf9dbeabcb2c2cf3c4d".to_owned(); let info_hash = InfoHash::from_str(&hash).unwrap(); @@ -241,8 +241,8 @@ mod tests { async fn should_allow_limiting_the_number_of_torrents_in_the_result() { let config = tracker_configuration(); - let (database, whitelist_manager) = initialize_tracker_dependencies(&config); - let tracker = Arc::new(initialize_tracker(&config, &database, &whitelist_manager)); + let (database, _in_memory_whitelist, whitelist_authorization) = initialize_tracker_dependencies(&config); + let tracker = Arc::new(initialize_tracker(&config, &database, &whitelist_authorization)); let hash1 = "9e0217d0fa71c87332cd8bf9dbeabcb2c2cf3c4d".to_owned(); let info_hash1 = InfoHash::from_str(&hash1).unwrap(); @@ -264,8 +264,8 @@ mod tests { async fn should_allow_using_pagination_in_the_result() { let config = tracker_configuration(); - let (database, whitelist_manager) = initialize_tracker_dependencies(&config); - let tracker = Arc::new(initialize_tracker(&config, &database, &whitelist_manager)); + let (database, _in_memory_whitelist, whitelist_authorization) = initialize_tracker_dependencies(&config); + let tracker = Arc::new(initialize_tracker(&config, &database, &whitelist_authorization)); let hash1 = "9e0217d0fa71c87332cd8bf9dbeabcb2c2cf3c4d".to_owned(); let info_hash1 = InfoHash::from_str(&hash1).unwrap(); @@ -296,8 +296,8 @@ mod tests { async fn should_return_torrents_ordered_by_info_hash() { let config = tracker_configuration(); - let (database, whitelist_manager) = initialize_tracker_dependencies(&config); - let tracker = Arc::new(initialize_tracker(&config, &database, &whitelist_manager)); + let (database, _in_memory_whitelist, whitelist_authorization) = initialize_tracker_dependencies(&config); + let tracker = Arc::new(initialize_tracker(&config, &database, &whitelist_authorization)); let hash1 = "9e0217d0fa71c87332cd8bf9dbeabcb2c2cf3c4d".to_owned(); let info_hash1 = InfoHash::from_str(&hash1).unwrap(); diff --git a/src/core/whitelist/authorization.rs b/src/core/whitelist/authorization.rs new file mode 100644 index 000000000..74029495f --- /dev/null +++ b/src/core/whitelist/authorization.rs @@ -0,0 +1,59 @@ +use std::panic::Location; +use std::sync::Arc; + +use bittorrent_primitives::info_hash::InfoHash; +use torrust_tracker_configuration::Core; +use tracing::instrument; + +use super::repository::in_memory::InMemoryWhitelist; +use crate::core::error::Error; + +pub struct Authorization { + /// Core tracker configuration. + config: Core, + + /// The in-memory list of allowed torrents. + in_memory_whitelist: Arc, +} + +impl Authorization { + /// Creates a new authorization instance. + pub fn new(config: &Core, in_memory_whitelist: &Arc) -> Self { + Self { + config: config.clone(), + in_memory_whitelist: in_memory_whitelist.clone(), + } + } + + /// It returns true if the torrent is authorized. + /// + /// # Errors + /// + /// Will return an error if the tracker is running in `listed` mode + /// and the infohash is not whitelisted. + #[instrument(skip(self, info_hash), err)] + pub async fn authorize(&self, info_hash: &InfoHash) -> Result<(), Error> { + if !self.is_listed() { + return Ok(()); + } + + if self.is_info_hash_whitelisted(info_hash).await { + return Ok(()); + } + + Err(Error::TorrentNotWhitelisted { + info_hash: *info_hash, + location: Location::caller(), + }) + } + + /// Returns `true` is the tracker is in listed mode. + fn is_listed(&self) -> bool { + self.config.listed + } + + /// It checks if a torrent is whitelisted. + async fn is_info_hash_whitelisted(&self, info_hash: &InfoHash) -> bool { + self.in_memory_whitelist.contains(info_hash).await + } +} diff --git a/src/core/whitelist/manager.rs b/src/core/whitelist/manager.rs index 832af6892..757053f71 100644 --- a/src/core/whitelist/manager.rs +++ b/src/core/whitelist/manager.rs @@ -9,7 +9,7 @@ use crate::core::databases; /// It handles the list of allowed torrents. Only for listed trackers. pub struct WhiteListManager { /// The in-memory list of allowed torrents. - in_memory_whitelist: InMemoryWhitelist, + in_memory_whitelist: Arc, /// The persisted list of allowed torrents. database_whitelist: Arc, @@ -17,9 +17,9 @@ pub struct WhiteListManager { impl WhiteListManager { #[must_use] - pub fn new(database_whitelist: Arc) -> Self { + pub fn new(database_whitelist: Arc, in_memory_whitelist: Arc) -> Self { Self { - in_memory_whitelist: InMemoryWhitelist::default(), + in_memory_whitelist, database_whitelist, } } diff --git a/src/core/whitelist/mod.rs b/src/core/whitelist/mod.rs index faf83c87b..89c69b761 100644 --- a/src/core/whitelist/mod.rs +++ b/src/core/whitelist/mod.rs @@ -1,2 +1,3 @@ +pub mod authorization; pub mod manager; pub mod repository; diff --git a/src/servers/apis/routes.rs b/src/servers/apis/routes.rs index cb3789a06..a5c33d5ee 100644 --- a/src/servers/apis/routes.rs +++ b/src/servers/apis/routes.rs @@ -32,6 +32,7 @@ use super::v1::context::health_check::handlers::health_check_handler; use super::v1::middlewares::auth::State; use crate::core::statistics::event::sender::Sender; use crate::core::statistics::repository::Repository; +use crate::core::whitelist::manager::WhiteListManager; use crate::core::Tracker; use crate::servers::apis::API_LOG_TARGET; use crate::servers::logging::Latency; @@ -39,9 +40,17 @@ use crate::servers::udp::server::banning::BanService; /// Add all API routes to the router. #[allow(clippy::needless_pass_by_value)] -#[instrument(skip(tracker, ban_service, stats_event_sender, stats_repository, access_tokens))] +#[instrument(skip( + tracker, + whitelist_manager, + ban_service, + stats_event_sender, + stats_repository, + access_tokens +))] pub fn router( tracker: Arc, + whitelist_manager: Arc, ban_service: Arc>, stats_event_sender: Arc>>, stats_repository: Arc, @@ -56,6 +65,7 @@ pub fn router( api_url_prefix, router, tracker.clone(), + &whitelist_manager.clone(), ban_service.clone(), stats_event_sender.clone(), stats_repository.clone(), diff --git a/src/servers/apis/server.rs b/src/servers/apis/server.rs index c4fae6ebf..f98770359 100644 --- a/src/servers/apis/server.rs +++ b/src/servers/apis/server.rs @@ -40,6 +40,7 @@ use tracing::{instrument, Level}; use super::routes::router; use crate::bootstrap::jobs::Started; use crate::core::statistics::repository::Repository; +use crate::core::whitelist::manager::WhiteListManager; use crate::core::{statistics, Tracker}; use crate::servers::apis::API_LOG_TARGET; use crate::servers::custom_axum_server::{self, TimeoutAcceptor}; @@ -125,10 +126,12 @@ impl ApiServer { /// # Panics /// /// It would panic if the bound socket address cannot be sent back to this starter. - #[instrument(skip(self, tracker, stats_event_sender, ban_service, stats_repository, form, access_tokens), err, ret(Display, level = Level::INFO))] + #[allow(clippy::too_many_arguments)] + #[instrument(skip(self, tracker, whitelist_manager, stats_event_sender, ban_service, stats_repository, form, access_tokens), err, ret(Display, level = Level::INFO))] pub async fn start( self, tracker: Arc, + whitelist_manager: Arc, stats_event_sender: Arc>>, stats_repository: Arc, ban_service: Arc>, @@ -146,6 +149,7 @@ impl ApiServer { let _task = launcher .start( tracker, + whitelist_manager, ban_service, stats_event_sender, stats_repository, @@ -255,6 +259,7 @@ impl Launcher { #[instrument(skip( self, tracker, + whitelist_manager, ban_service, stats_event_sender, stats_repository, @@ -265,6 +270,7 @@ impl Launcher { pub fn start( &self, tracker: Arc, + whitelist_manager: Arc, ban_service: Arc>, stats_event_sender: Arc>>, stats_repository: Arc, @@ -277,6 +283,7 @@ impl Launcher { let router = router( tracker, + whitelist_manager, ban_service, stats_event_sender, stats_repository, @@ -335,7 +342,9 @@ mod tests { use crate::bootstrap::app::initialize_global_services; use crate::bootstrap::jobs::make_rust_tls; - use crate::core::services::{initialize_database, initialize_tracker, initialize_whitelist, statistics}; + use crate::core::services::{initialize_database, initialize_tracker, initialize_whitelist_manager, statistics}; + use crate::core::whitelist; + use crate::core::whitelist::repository::in_memory::InMemoryWhitelist; use crate::servers::apis::server::{ApiServer, Launcher}; use crate::servers::registar::Registar; use crate::servers::udp::server::banning::BanService; @@ -354,8 +363,13 @@ mod tests { initialize_global_services(&cfg); let database = initialize_database(&cfg); - let whitelist_manager = initialize_whitelist(database.clone()); - let tracker = Arc::new(initialize_tracker(&cfg, &database, &whitelist_manager)); + let in_memory_whitelist = Arc::new(InMemoryWhitelist::default()); + let whitelist_authorization = Arc::new(whitelist::authorization::Authorization::new( + &cfg.core, + &in_memory_whitelist.clone(), + )); + let whitelist_manager = initialize_whitelist_manager(database.clone(), in_memory_whitelist.clone()); + let tracker = Arc::new(initialize_tracker(&cfg, &database, &whitelist_authorization)); let bind_to = config.bind_address; @@ -372,6 +386,7 @@ mod tests { let started = stopped .start( tracker, + whitelist_manager, stats_event_sender, stats_repository, ban_service, diff --git a/src/servers/apis/v1/context/whitelist/routes.rs b/src/servers/apis/v1/context/whitelist/routes.rs index c58aa7177..34f1393b8 100644 --- a/src/servers/apis/v1/context/whitelist/routes.rs +++ b/src/servers/apis/v1/context/whitelist/routes.rs @@ -11,25 +11,25 @@ use axum::routing::{delete, get, post}; use axum::Router; use super::handlers::{add_torrent_to_whitelist_handler, reload_whitelist_handler, remove_torrent_from_whitelist_handler}; -use crate::core::Tracker; +use crate::core::whitelist::manager::WhiteListManager; /// It adds the routes to the router for the [`whitelist`](crate::servers::apis::v1::context::whitelist) API context. -pub fn add(prefix: &str, router: Router, tracker: &Arc) -> Router { +pub fn add(prefix: &str, router: Router, whitelist_manager: &Arc) -> Router { let prefix = format!("{prefix}/whitelist"); router // Whitelisted torrents .route( &format!("{prefix}/{{info_hash}}"), - post(add_torrent_to_whitelist_handler).with_state(tracker.whitelist_manager.clone()), + post(add_torrent_to_whitelist_handler).with_state(whitelist_manager.clone()), ) .route( &format!("{prefix}/{{info_hash}}"), - delete(remove_torrent_from_whitelist_handler).with_state(tracker.whitelist_manager.clone()), + delete(remove_torrent_from_whitelist_handler).with_state(whitelist_manager.clone()), ) // Whitelist commands .route( &format!("{prefix}/reload"), - get(reload_whitelist_handler).with_state(tracker.whitelist_manager.clone()), + get(reload_whitelist_handler).with_state(whitelist_manager.clone()), ) } diff --git a/src/servers/apis/v1/routes.rs b/src/servers/apis/v1/routes.rs index 9fbd5da0e..1954af2e4 100644 --- a/src/servers/apis/v1/routes.rs +++ b/src/servers/apis/v1/routes.rs @@ -7,6 +7,7 @@ use tokio::sync::RwLock; use super::context::{auth_key, stats, torrent, whitelist}; use crate::core::statistics::event::sender::Sender; use crate::core::statistics::repository::Repository; +use crate::core::whitelist::manager::WhiteListManager; use crate::core::Tracker; use crate::servers::udp::server::banning::BanService; @@ -15,6 +16,7 @@ pub fn add( prefix: &str, router: Router, tracker: Arc, + whitelist_manager: &Arc, ban_service: Arc>, stats_event_sender: Arc>>, stats_repository: Arc, @@ -30,7 +32,7 @@ pub fn add( stats_event_sender, stats_repository, ); - let router = whitelist::routes::add(&v1_prefix, router, &tracker); + let router = whitelist::routes::add(&v1_prefix, router, whitelist_manager); torrent::routes::add(&v1_prefix, router, tracker) } diff --git a/src/servers/http/server.rs b/src/servers/http/server.rs index 82b65c2ff..b053628ce 100644 --- a/src/servers/http/server.rs +++ b/src/servers/http/server.rs @@ -11,7 +11,7 @@ use tracing::instrument; use super::v1::routes::router; use crate::bootstrap::jobs::Started; -use crate::core::{statistics, Tracker}; +use crate::core::{statistics, whitelist, Tracker}; use crate::servers::custom_axum_server::{self, TimeoutAcceptor}; use crate::servers::http::HTTP_TRACKER_LOG_TARGET; use crate::servers::logging::STARTED_ON; @@ -42,10 +42,11 @@ pub struct Launcher { } impl Launcher { - #[instrument(skip(self, tracker, stats_event_sender, tx_start, rx_halt))] + #[instrument(skip(self, tracker, whitelist_authorization, stats_event_sender, tx_start, rx_halt))] fn start( &self, tracker: Arc, + whitelist_authorization: Arc, stats_event_sender: Arc>>, tx_start: Sender, rx_halt: Receiver, @@ -66,7 +67,7 @@ impl Launcher { tracing::info!(target: HTTP_TRACKER_LOG_TARGET, "Starting on: {protocol}://{}", address); - let app = router(tracker, stats_event_sender, address); + let app = router(tracker, whitelist_authorization, stats_event_sender, address); let running = Box::pin(async { match tls { @@ -162,6 +163,7 @@ impl HttpServer { pub async fn start( self, tracker: Arc, + whitelist_authorization: Arc, stats_event_sender: Arc>>, form: ServiceRegistrationForm, ) -> Result, Error> { @@ -171,7 +173,7 @@ impl HttpServer { let launcher = self.state.launcher; let task = tokio::spawn(async move { - let server = launcher.start(tracker, stats_event_sender, tx_start, rx_halt); + let server = launcher.start(tracker, whitelist_authorization, stats_event_sender, tx_start, rx_halt); server.await; @@ -244,7 +246,9 @@ mod tests { use crate::bootstrap::app::initialize_global_services; use crate::bootstrap::jobs::make_rust_tls; - use crate::core::services::{initialize_database, initialize_tracker, initialize_whitelist, statistics}; + use crate::core::services::{initialize_database, initialize_tracker, initialize_whitelist_manager, statistics}; + use crate::core::whitelist; + use crate::core::whitelist::repository::in_memory::InMemoryWhitelist; use crate::servers::http::server::{HttpServer, Launcher}; use crate::servers::registar::Registar; @@ -258,8 +262,13 @@ mod tests { initialize_global_services(&cfg); let database = initialize_database(&cfg); - let whitelist_manager = initialize_whitelist(database.clone()); - let tracker = Arc::new(initialize_tracker(&cfg, &database, &whitelist_manager)); + let in_memory_whitelist = Arc::new(InMemoryWhitelist::default()); + let whitelist_authorization = Arc::new(whitelist::authorization::Authorization::new( + &cfg.core, + &in_memory_whitelist.clone(), + )); + let _whitelist_manager = initialize_whitelist_manager(database.clone(), in_memory_whitelist.clone()); + let tracker = Arc::new(initialize_tracker(&cfg, &database, &whitelist_authorization)); let http_trackers = cfg.http_trackers.clone().expect("missing HTTP trackers configuration"); let config = &http_trackers[0]; @@ -274,7 +283,7 @@ mod tests { let stopped = HttpServer::new(Launcher::new(bind_to, tls)); let started = stopped - .start(tracker, stats_event_sender, register.give_form()) + .start(tracker, whitelist_authorization, stats_event_sender, register.give_form()) .await .expect("it should start the server"); let stopped = started.stop().await.expect("it should stop the server"); diff --git a/src/servers/http/v1/handlers/announce.rs b/src/servers/http/v1/handlers/announce.rs index 24beadbc2..61464f1d5 100644 --- a/src/servers/http/v1/handlers/announce.rs +++ b/src/servers/http/v1/handlers/announce.rs @@ -23,7 +23,7 @@ use torrust_tracker_primitives::peer; use crate::core::auth::Key; use crate::core::statistics::event::sender::Sender; -use crate::core::{PeersWanted, Tracker}; +use crate::core::{whitelist, PeersWanted, Tracker}; use crate::servers::http::v1::extractors::announce_request::ExtractRequest; use crate::servers::http::v1::extractors::authentication_key::Extract as ExtractKey; use crate::servers::http::v1::extractors::client_ip_sources::Extract as ExtractClientIpSources; @@ -36,13 +36,17 @@ use crate::CurrentClock; #[allow(clippy::unused_async)] #[allow(clippy::type_complexity)] pub async fn handle_without_key( - State(state): State<(Arc, Arc>>)>, + State(state): State<( + Arc, + Arc, + Arc>>, + )>, ExtractRequest(announce_request): ExtractRequest, ExtractClientIpSources(client_ip_sources): ExtractClientIpSources, ) -> Response { tracing::debug!("http announce request: {:#?}", announce_request); - handle(&state.0, &state.1, &announce_request, &client_ip_sources, None).await + handle(&state.0, &state.1, &state.2, &announce_request, &client_ip_sources, None).await } /// It handles the `announce` request when the HTTP tracker requires @@ -50,14 +54,18 @@ pub async fn handle_without_key( #[allow(clippy::unused_async)] #[allow(clippy::type_complexity)] pub async fn handle_with_key( - State(state): State<(Arc, Arc>>)>, + State(state): State<( + Arc, + Arc, + Arc>>, + )>, ExtractRequest(announce_request): ExtractRequest, ExtractClientIpSources(client_ip_sources): ExtractClientIpSources, ExtractKey(key): ExtractKey, ) -> Response { tracing::debug!("http announce request: {:#?}", announce_request); - handle(&state.0, &state.1, &announce_request, &client_ip_sources, Some(key)).await + handle(&state.0, &state.1, &state.2, &announce_request, &client_ip_sources, Some(key)).await } /// It handles the `announce` request. @@ -66,6 +74,7 @@ pub async fn handle_with_key( /// `unauthenticated` modes. async fn handle( tracker: &Arc, + whitelist_authorization: &Arc, opt_stats_event_sender: &Arc>>, announce_request: &Announce, client_ip_sources: &ClientIpSources, @@ -73,6 +82,7 @@ async fn handle( ) -> Response { let announce_data = match handle_announce( tracker, + whitelist_authorization, opt_stats_event_sender, announce_request, client_ip_sources, @@ -94,6 +104,7 @@ async fn handle( async fn handle_announce( tracker: &Arc, + whitelist_authorization: &Arc, opt_stats_event_sender: &Arc>>, announce_request: &Announce, client_ip_sources: &ClientIpSources, @@ -115,7 +126,7 @@ async fn handle_announce( } // Authorization - match tracker.authorize(&announce_request.info_hash).await { + match whitelist_authorization.authorize(&announce_request.info_hash).await { Ok(()) => (), Err(error) => return Err(responses::error::Error::from(error)), } @@ -198,52 +209,51 @@ pub fn map_to_torrust_event(event: &Option) -> AnnounceEvent { #[cfg(test)] mod tests { + use std::sync::Arc; + use aquatic_udp_protocol::PeerId; use bittorrent_http_protocol::v1::requests::announce::Announce; use bittorrent_http_protocol::v1::responses; use bittorrent_http_protocol::v1::services::peer_ip_resolver::ClientIpSources; use bittorrent_primitives::info_hash::InfoHash; + use torrust_tracker_configuration::Configuration; use torrust_tracker_test_helpers::configuration; use crate::app_test::initialize_tracker_dependencies; use crate::core::services::{initialize_tracker, statistics}; use crate::core::statistics::event::sender::Sender; - use crate::core::Tracker; - - fn private_tracker() -> (Tracker, Option>) { - let config = configuration::ephemeral_private(); + use crate::core::{whitelist, Tracker}; - let (database, whitelist_manager) = initialize_tracker_dependencies(&config); - let (stats_event_sender, _stats_repository) = statistics::setup::factory(config.core.tracker_usage_statistics); + type TrackerAndDeps = ( + Arc, + Arc>>, + Arc, + ); - (initialize_tracker(&config, &database, &whitelist_manager), stats_event_sender) + fn private_tracker() -> TrackerAndDeps { + initialize_tracker_and_deps(&configuration::ephemeral_private()) } - fn whitelisted_tracker() -> (Tracker, Option>) { - let config = configuration::ephemeral_listed(); - - let (database, whitelist_manager) = initialize_tracker_dependencies(&config); - let (stats_event_sender, _stats_repository) = statistics::setup::factory(config.core.tracker_usage_statistics); - - (initialize_tracker(&config, &database, &whitelist_manager), stats_event_sender) + fn whitelisted_tracker() -> TrackerAndDeps { + initialize_tracker_and_deps(&configuration::ephemeral_listed()) } - fn tracker_on_reverse_proxy() -> (Tracker, Option>) { - let config = configuration::ephemeral_with_reverse_proxy(); - - let (database, whitelist_manager) = initialize_tracker_dependencies(&config); - let (stats_event_sender, _stats_repository) = statistics::setup::factory(config.core.tracker_usage_statistics); - - (initialize_tracker(&config, &database, &whitelist_manager), stats_event_sender) + fn tracker_on_reverse_proxy() -> TrackerAndDeps { + initialize_tracker_and_deps(&configuration::ephemeral_with_reverse_proxy()) } - fn tracker_not_on_reverse_proxy() -> (Tracker, Option>) { - let config = configuration::ephemeral_without_reverse_proxy(); + fn tracker_not_on_reverse_proxy() -> TrackerAndDeps { + initialize_tracker_and_deps(&configuration::ephemeral_without_reverse_proxy()) + } - let (database, whitelist_manager) = initialize_tracker_dependencies(&config); + /// Initialize tracker's dependencies and tracker. + fn initialize_tracker_and_deps(config: &Configuration) -> TrackerAndDeps { + let (database, _in_memory_whitelist, whitelist_authorization) = initialize_tracker_dependencies(config); let (stats_event_sender, _stats_repository) = statistics::setup::factory(config.core.tracker_usage_statistics); + let stats_event_sender = Arc::new(stats_event_sender); + let tracker = Arc::new(initialize_tracker(config, &database, &whitelist_authorization)); - (initialize_tracker(&config, &database, &whitelist_manager), stats_event_sender) + (tracker, stats_event_sender, whitelist_authorization) } fn sample_announce_request() -> Announce { @@ -286,7 +296,7 @@ mod tests { #[tokio::test] async fn it_should_fail_when_the_authentication_key_is_missing() { - let (tracker, stats_event_sender) = private_tracker(); + let (tracker, stats_event_sender, whitelist_authorization) = private_tracker(); let tracker = Arc::new(tracker); let stats_event_sender = Arc::new(stats_event_sender); @@ -295,6 +305,7 @@ mod tests { let response = handle_announce( &tracker, + &whitelist_authorization, &stats_event_sender, &sample_announce_request(), &sample_client_ip_sources(), @@ -311,7 +322,7 @@ mod tests { #[tokio::test] async fn it_should_fail_when_the_authentication_key_is_invalid() { - let (tracker, stats_event_sender) = private_tracker(); + let (tracker, stats_event_sender, whitelist_authorization) = private_tracker(); let tracker = Arc::new(tracker); let stats_event_sender = Arc::new(stats_event_sender); @@ -322,6 +333,7 @@ mod tests { let response = handle_announce( &tracker, + &whitelist_authorization, &stats_event_sender, &sample_announce_request(), &sample_client_ip_sources(), @@ -344,7 +356,7 @@ mod tests { #[tokio::test] async fn it_should_fail_when_the_announced_torrent_is_not_whitelisted() { - let (tracker, stats_event_sender) = whitelisted_tracker(); + let (tracker, stats_event_sender, whitelist_authorization) = whitelisted_tracker(); let tracker = Arc::new(tracker); let stats_event_sender = Arc::new(stats_event_sender); @@ -353,6 +365,7 @@ mod tests { let response = handle_announce( &tracker, + &whitelist_authorization, &stats_event_sender, &announce_request, &sample_client_ip_sources(), @@ -383,7 +396,7 @@ mod tests { #[tokio::test] async fn it_should_fail_when_the_right_most_x_forwarded_for_header_ip_is_not_available() { - let (tracker, stats_event_sender) = tracker_on_reverse_proxy(); + let (tracker, stats_event_sender, whitelist_authorization) = tracker_on_reverse_proxy(); let tracker = Arc::new(tracker); let stats_event_sender = Arc::new(stats_event_sender); @@ -395,6 +408,7 @@ mod tests { let response = handle_announce( &tracker, + &whitelist_authorization, &stats_event_sender, &sample_announce_request(), &client_ip_sources, @@ -422,7 +436,7 @@ mod tests { #[tokio::test] async fn it_should_fail_when_the_client_ip_from_the_connection_info_is_not_available() { - let (tracker, stats_event_sender) = tracker_not_on_reverse_proxy(); + let (tracker, stats_event_sender, whitelist_authorization) = tracker_not_on_reverse_proxy(); let tracker = Arc::new(tracker); let stats_event_sender = Arc::new(stats_event_sender); @@ -434,6 +448,7 @@ mod tests { let response = handle_announce( &tracker, + &whitelist_authorization, &stats_event_sender, &sample_announce_request(), &client_ip_sources, diff --git a/src/servers/http/v1/handlers/scrape.rs b/src/servers/http/v1/handlers/scrape.rs index a5cf58129..9c57eda58 100644 --- a/src/servers/http/v1/handlers/scrape.rs +++ b/src/servers/http/v1/handlers/scrape.rs @@ -133,37 +133,49 @@ mod tests { fn private_tracker() -> (Tracker, Option>) { let config = configuration::ephemeral_private(); - let (database, whitelist_manager) = initialize_tracker_dependencies(&config); + let (database, _in_memory_whitelist, whitelist_authorization) = initialize_tracker_dependencies(&config); let (stats_event_sender, _stats_repository) = statistics::setup::factory(config.core.tracker_usage_statistics); - (initialize_tracker(&config, &database, &whitelist_manager), stats_event_sender) + ( + initialize_tracker(&config, &database, &whitelist_authorization), + stats_event_sender, + ) } fn whitelisted_tracker() -> (Tracker, Option>) { let config = configuration::ephemeral_listed(); - let (database, whitelist_manager) = initialize_tracker_dependencies(&config); + let (database, _in_memory_whitelist, whitelist_authorization) = initialize_tracker_dependencies(&config); let (stats_event_sender, _stats_repository) = statistics::setup::factory(config.core.tracker_usage_statistics); - (initialize_tracker(&config, &database, &whitelist_manager), stats_event_sender) + ( + initialize_tracker(&config, &database, &whitelist_authorization), + stats_event_sender, + ) } fn tracker_on_reverse_proxy() -> (Tracker, Option>) { let config = configuration::ephemeral_with_reverse_proxy(); - let (database, whitelist_manager) = initialize_tracker_dependencies(&config); + let (database, _in_memory_whitelist, whitelist_authorization) = initialize_tracker_dependencies(&config); let (stats_event_sender, _stats_repository) = statistics::setup::factory(config.core.tracker_usage_statistics); - (initialize_tracker(&config, &database, &whitelist_manager), stats_event_sender) + ( + initialize_tracker(&config, &database, &whitelist_authorization), + stats_event_sender, + ) } fn tracker_not_on_reverse_proxy() -> (Tracker, Option>) { let config = configuration::ephemeral_without_reverse_proxy(); - let (database, whitelist_manager) = initialize_tracker_dependencies(&config); + let (database, _in_memory_whitelist, whitelist_authorization) = initialize_tracker_dependencies(&config); let (stats_event_sender, _stats_repository) = statistics::setup::factory(config.core.tracker_usage_statistics); - (initialize_tracker(&config, &database, &whitelist_manager), stats_event_sender) + ( + initialize_tracker(&config, &database, &whitelist_authorization), + stats_event_sender, + ) } fn sample_scrape_request() -> Scrape { diff --git a/src/servers/http/v1/routes.rs b/src/servers/http/v1/routes.rs index 97eb5b95d..d37c55c7a 100644 --- a/src/servers/http/v1/routes.rs +++ b/src/servers/http/v1/routes.rs @@ -23,7 +23,7 @@ use tracing::{instrument, Level, Span}; use super::handlers::{announce, health_check, scrape}; use crate::core::statistics::event::sender::Sender; -use crate::core::Tracker; +use crate::core::{whitelist, Tracker}; use crate::servers::http::HTTP_TRACKER_LOG_TARGET; use crate::servers::logging::Latency; @@ -32,19 +32,32 @@ use crate::servers::logging::Latency; /// > **NOTICE**: it's added a layer to get the client IP from the connection /// > info. The tracker could use the connection info to get the client IP. #[allow(clippy::needless_pass_by_value)] -#[instrument(skip(tracker, stats_event_sender, server_socket_addr))] -pub fn router(tracker: Arc, stats_event_sender: Arc>>, server_socket_addr: SocketAddr) -> Router { +#[instrument(skip(tracker, whitelist_authorization, stats_event_sender, server_socket_addr))] +pub fn router( + tracker: Arc, + whitelist_authorization: Arc, + stats_event_sender: Arc>>, + server_socket_addr: SocketAddr, +) -> Router { Router::new() // Health check .route("/health_check", get(health_check::handler)) // Announce request .route( "/announce", - get(announce::handle_without_key).with_state((tracker.clone(), stats_event_sender.clone())), + get(announce::handle_without_key).with_state(( + tracker.clone(), + whitelist_authorization.clone(), + stats_event_sender.clone(), + )), ) .route( "/announce/{key}", - get(announce::handle_with_key).with_state((tracker.clone(), stats_event_sender.clone())), + get(announce::handle_with_key).with_state(( + tracker.clone(), + whitelist_authorization.clone(), + stats_event_sender.clone(), + )), ) // Scrape request .route( diff --git a/src/servers/http/v1/services/announce.rs b/src/servers/http/v1/services/announce.rs index 63a904182..17598904c 100644 --- a/src/servers/http/v1/services/announce.rs +++ b/src/servers/http/v1/services/announce.rs @@ -73,11 +73,11 @@ mod tests { fn public_tracker() -> (Tracker, Arc>>) { let config = configuration::ephemeral_public(); - let (database, whitelist_manager) = initialize_tracker_dependencies(&config); + let (database, _in_memory_whitelist, whitelist_authorization) = initialize_tracker_dependencies(&config); let (stats_event_sender, _stats_repository) = statistics::setup::factory(config.core.tracker_usage_statistics); let stats_event_sender = Arc::new(stats_event_sender); - let tracker = initialize_tracker(&config, &database, &whitelist_manager); + let tracker = initialize_tracker(&config, &database, &whitelist_authorization); (tracker, stats_event_sender) } @@ -131,9 +131,9 @@ mod tests { fn test_tracker_factory() -> Tracker { let config = configuration::ephemeral(); - let (database, whitelist_manager) = initialize_tracker_dependencies(&config); + let (database, _in_memory_whitelist, whitelist_authorization) = initialize_tracker_dependencies(&config); - Tracker::new(&config.core, &database, &whitelist_manager).unwrap() + Tracker::new(&config.core, &database, &whitelist_authorization).unwrap() } #[tokio::test] diff --git a/src/servers/http/v1/services/scrape.rs b/src/servers/http/v1/services/scrape.rs index 56c18cbb3..0a25bccaf 100644 --- a/src/servers/http/v1/services/scrape.rs +++ b/src/servers/http/v1/services/scrape.rs @@ -87,9 +87,9 @@ mod tests { fn public_tracker() -> Tracker { let config = configuration::ephemeral_public(); - let (database, whitelist_manager) = initialize_tracker_dependencies(&config); + let (database, _in_memory_whitelist, whitelist_authorization) = initialize_tracker_dependencies(&config); - initialize_tracker(&config, &database, &whitelist_manager) + initialize_tracker(&config, &database, &whitelist_authorization) } fn sample_info_hashes() -> Vec { @@ -115,9 +115,9 @@ mod tests { fn test_tracker_factory() -> Tracker { let config = configuration::ephemeral(); - let (database, whitelist_manager) = initialize_tracker_dependencies(&config); + let (database, _in_memory_whitelist, whitelist_authorization) = initialize_tracker_dependencies(&config); - Tracker::new(&config.core, &database, &whitelist_manager).unwrap() + Tracker::new(&config.core, &database, &whitelist_authorization).unwrap() } mod with_real_data { diff --git a/src/servers/udp/handlers.rs b/src/servers/udp/handlers.rs index a7d964391..c01dc2548 100644 --- a/src/servers/udp/handlers.rs +++ b/src/servers/udp/handlers.rs @@ -21,7 +21,7 @@ use super::connection_cookie::{check, make}; use super::server::banning::BanService; use super::RawRequest; use crate::core::statistics::event::sender::Sender; -use crate::core::{statistics, PeersWanted, Tracker}; +use crate::core::{statistics, whitelist, PeersWanted, Tracker}; use crate::servers::udp::error::Error; use crate::servers::udp::{peer_builder, UDP_TRACKER_LOG_TARGET}; use crate::shared::bit_torrent::common::MAX_SCRAPE_TORRENTS; @@ -54,10 +54,11 @@ impl CookieTimeValues { /// - Delegating the request to the correct handler depending on the request type. /// /// It will return an `Error` response if the request is invalid. -#[instrument(fields(request_id), skip(udp_request, tracker, opt_stats_event_sender, cookie_time_values, ban_service), ret(level = Level::TRACE))] +#[instrument(fields(request_id), skip(udp_request, tracker, whitelist_authorization, opt_stats_event_sender, cookie_time_values, ban_service), ret(level = Level::TRACE))] pub(crate) async fn handle_packet( udp_request: RawRequest, tracker: &Tracker, + whitelist_authorization: &Arc, opt_stats_event_sender: &Arc>>, local_addr: SocketAddr, cookie_time_values: CookieTimeValues, @@ -76,6 +77,7 @@ pub(crate) async fn handle_packet( request, udp_request.from, tracker, + whitelist_authorization, opt_stats_event_sender, cookie_time_values.clone(), ) @@ -131,11 +133,19 @@ pub(crate) async fn handle_packet( /// # Errors /// /// If a error happens in the `handle_request` function, it will just return the `ServerError`. -#[instrument(skip(request, remote_addr, tracker, opt_stats_event_sender, cookie_time_values))] +#[instrument(skip( + request, + remote_addr, + tracker, + whitelist_authorization, + opt_stats_event_sender, + cookie_time_values +))] pub async fn handle_request( request: Request, remote_addr: SocketAddr, tracker: &Tracker, + whitelist_authorization: &Arc, opt_stats_event_sender: &Arc>>, cookie_time_values: CookieTimeValues, ) -> Result { @@ -154,6 +164,7 @@ pub async fn handle_request( remote_addr, &announce_request, tracker, + whitelist_authorization, opt_stats_event_sender, cookie_time_values.valid_range, ) @@ -216,11 +227,12 @@ pub async fn handle_connect( /// # Errors /// /// If a error happens in the `handle_announce` function, it will just return the `ServerError`. -#[instrument(fields(transaction_id, connection_id, info_hash), skip(tracker, opt_stats_event_sender), ret(level = Level::TRACE))] +#[instrument(fields(transaction_id, connection_id, info_hash), skip(tracker, whitelist_authorization, opt_stats_event_sender), ret(level = Level::TRACE))] pub async fn handle_announce( remote_addr: SocketAddr, request: &AnnounceRequest, tracker: &Tracker, + whitelist_authorization: &Arc, opt_stats_event_sender: &Arc>>, cookie_valid_range: Range, ) -> Result { @@ -242,7 +254,7 @@ pub async fn handle_announce( let remote_client_ip = remote_addr.ip(); // Authorization - tracker + whitelist_authorization .authorize(&info_hash) .await .map_err(|e| Error::TrackerError { @@ -462,6 +474,7 @@ mod tests { use std::net::{IpAddr, Ipv4Addr, Ipv6Addr, SocketAddr}; use std::ops::Range; + use std::sync::Arc; use aquatic_udp_protocol::{NumberOfBytes, PeerId}; use torrust_tracker_clock::clock::Time; @@ -471,11 +484,21 @@ mod tests { use super::gen_remote_fingerprint; use crate::app_test::initialize_tracker_dependencies; - use crate::core::services::{initialize_tracker, statistics}; + use crate::core::services::{initialize_tracker, initialize_whitelist_manager, statistics}; use crate::core::statistics::event::sender::Sender; - use crate::core::Tracker; + use crate::core::whitelist::manager::WhiteListManager; + use crate::core::whitelist::repository::in_memory::InMemoryWhitelist; + use crate::core::{whitelist, Tracker}; use crate::CurrentClock; + type TrackerAndDeps = ( + Arc, + Arc>>, + Arc, + Arc, + Arc, + ); + fn tracker_configuration() -> Configuration { default_testing_tracker_configuration() } @@ -484,19 +507,29 @@ mod tests { configuration::ephemeral() } - fn public_tracker() -> (Tracker, Option>) { - initialized_tracker(&configuration::ephemeral_public()) + fn public_tracker() -> TrackerAndDeps { + initialize_tracker_and_deps(&configuration::ephemeral_public()) } - fn whitelisted_tracker() -> (Tracker, Option>) { - initialized_tracker(&configuration::ephemeral_listed()) + fn whitelisted_tracker() -> TrackerAndDeps { + initialize_tracker_and_deps(&configuration::ephemeral_listed()) } - fn initialized_tracker(config: &Configuration) -> (Tracker, Option>) { - let (database, whitelist_manager) = initialize_tracker_dependencies(config); + fn initialize_tracker_and_deps(config: &Configuration) -> TrackerAndDeps { + let (database, in_memory_whitelist, whitelist_authorization) = initialize_tracker_dependencies(config); let (stats_event_sender, _stats_repository) = statistics::setup::factory(config.core.tracker_usage_statistics); + let stats_event_sender = Arc::new(stats_event_sender); + let whitelist_manager = initialize_whitelist_manager(database.clone(), in_memory_whitelist.clone()); + + let tracker = Arc::new(initialize_tracker(config, &database, &whitelist_authorization)); - (initialize_tracker(config, &database, &whitelist_manager), stats_event_sender) + ( + tracker, + stats_event_sender, + in_memory_whitelist, + whitelist_manager, + whitelist_authorization, + ) } fn sample_ipv4_remote_addr() -> SocketAddr { @@ -593,12 +626,14 @@ mod tests { } } - fn test_tracker_factory() -> Tracker { + fn test_tracker_factory() -> (Arc, Arc) { let config = tracker_configuration(); - let (database, whitelist_manager) = initialize_tracker_dependencies(&config); + let (database, _in_memory_whitelist, whitelist_authorization) = initialize_tracker_dependencies(&config); - Tracker::new(&config.core, &database, &whitelist_manager).unwrap() + let tracker = Arc::new(Tracker::new(&config.core, &database, &whitelist_authorization).unwrap()); + + (tracker, whitelist_authorization) } mod connect_request { @@ -811,7 +846,7 @@ mod tests { }; use mockall::predicate::eq; - use crate::core::{self, statistics}; + use crate::core::{self, statistics, whitelist}; use crate::servers::udp::connection_cookie::make; use crate::servers::udp::handlers::tests::announce_request::AnnounceRequestBuilder; use crate::servers::udp::handlers::tests::{ @@ -822,9 +857,8 @@ mod tests { #[tokio::test] async fn an_announced_peer_should_be_added_to_the_tracker() { - let (tracker, stats_event_sender) = public_tracker(); - let tracker = Arc::new(tracker); - let stats_event_sender = Arc::new(stats_event_sender); + let (tracker, stats_event_sender, _in_memory_whitelist, _whitelist_manager, whitelist_authorization) = + public_tracker(); let client_ip = Ipv4Addr::new(126, 0, 0, 1); let client_port = 8080; @@ -845,6 +879,7 @@ mod tests { remote_addr, &request, &tracker, + &whitelist_authorization, &stats_event_sender, sample_cookie_valid_range(), ) @@ -863,9 +898,8 @@ mod tests { #[tokio::test] async fn the_announced_peer_should_not_be_included_in_the_response() { - let (tracker, stats_event_sender) = public_tracker(); - let tracker = Arc::new(tracker); - let stats_event_sender = Arc::new(stats_event_sender); + let (tracker, stats_event_sender, _in_memory_whitelist, _whitelist_manager, whitelist_authorization) = + public_tracker(); let remote_addr = SocketAddr::new(IpAddr::V4(Ipv4Addr::new(126, 0, 0, 1)), 8080); @@ -877,6 +911,7 @@ mod tests { remote_addr, &request, &tracker, + &whitelist_authorization, &stats_event_sender, sample_cookie_valid_range(), ) @@ -904,9 +939,8 @@ mod tests { // From the BEP 15 (https://www.bittorrent.org/beps/bep_0015.html): // "Do note that most trackers will only honor the IP address field under limited circumstances." - let (tracker, stats_event_sender) = public_tracker(); - let tracker = Arc::new(tracker); - let stats_event_sender = Arc::new(stats_event_sender); + let (tracker, stats_event_sender, _in_memory_whitelist, _whitelist_manager, whitelist_authorization) = + public_tracker(); let info_hash = AquaticInfoHash([0u8; 20]); let peer_id = AquaticPeerId([255u8; 20]); @@ -930,6 +964,7 @@ mod tests { remote_addr, &request, &tracker, + &whitelist_authorization, &stats_event_sender, sample_cookie_valid_range(), ) @@ -957,7 +992,10 @@ mod tests { tracker.upsert_peer_and_get_stats(&info_hash.0.into(), &peer_using_ipv6); } - async fn announce_a_new_peer_using_ipv4(tracker: Arc) -> Response { + async fn announce_a_new_peer_using_ipv4( + tracker: Arc, + whitelist_authorization: Arc, + ) -> Response { let (stats_event_sender, _stats_repository) = crate::core::services::statistics::setup::factory(false); let stats_event_sender = Arc::new(stats_event_sender); @@ -970,6 +1008,7 @@ mod tests { remote_addr, &request, &tracker, + &whitelist_authorization, &stats_event_sender, sample_cookie_valid_range(), ) @@ -979,12 +1018,12 @@ mod tests { #[tokio::test] async fn when_the_announce_request_comes_from_a_client_using_ipv4_the_response_should_not_include_peers_using_ipv6() { - let (tracker, _stats_event_sender) = public_tracker(); - let tracker = Arc::new(tracker); + let (tracker, _stats_event_sender, _in_memory_whitelist, _whitelist_manager, whitelist_authorization) = + public_tracker(); add_a_torrent_peer_using_ipv6(&tracker); - let response = announce_a_new_peer_using_ipv4(tracker.clone()).await; + let response = announce_a_new_peer_using_ipv4(tracker.clone(), whitelist_authorization).await; // The response should not contain the peer using IPV6 let peers: Option>> = match response { @@ -1006,12 +1045,13 @@ mod tests { let stats_event_sender: Arc>> = Arc::new(Some(Box::new(stats_event_sender_mock))); - let tracker = Arc::new(test_tracker_factory()); + let (tracker, whitelist_authorization) = test_tracker_factory(); handle_announce( sample_ipv4_socket_address(), &AnnounceRequestBuilder::default().into(), &tracker, + &whitelist_authorization, &stats_event_sender, sample_cookie_valid_range(), ) @@ -1034,9 +1074,8 @@ mod tests { #[tokio::test] async fn the_peer_ip_should_be_changed_to_the_external_ip_in_the_tracker_configuration_if_defined() { - let (tracker, stats_event_sender) = public_tracker(); - let tracker = Arc::new(tracker); - let stats_event_sender = Arc::new(stats_event_sender); + let (tracker, stats_event_sender, _in_memory_whitelist, _whitelist_manager, whitelist_authorization) = + public_tracker(); let client_ip = Ipv4Addr::new(127, 0, 0, 1); let client_port = 8080; @@ -1057,6 +1096,7 @@ mod tests { remote_addr, &request, &tracker, + &whitelist_authorization, &stats_event_sender, sample_cookie_valid_range(), ) @@ -1089,7 +1129,7 @@ mod tests { }; use mockall::predicate::eq; - use crate::core::{self, statistics}; + use crate::core::{self, statistics, whitelist}; use crate::servers::udp::connection_cookie::make; use crate::servers::udp::handlers::tests::announce_request::AnnounceRequestBuilder; use crate::servers::udp::handlers::tests::{ @@ -1100,9 +1140,8 @@ mod tests { #[tokio::test] async fn an_announced_peer_should_be_added_to_the_tracker() { - let (tracker, stats_event_sender) = public_tracker(); - let tracker = Arc::new(tracker); - let stats_event_sender = Arc::new(stats_event_sender); + let (tracker, stats_event_sender, _in_memory_whitelist, _whitelist_manager, whitelist_authorization) = + public_tracker(); let client_ip_v4 = Ipv4Addr::new(126, 0, 0, 1); let client_ip_v6 = client_ip_v4.to_ipv6_compatible(); @@ -1124,6 +1163,7 @@ mod tests { remote_addr, &request, &tracker, + &whitelist_authorization, &stats_event_sender, sample_cookie_valid_range(), ) @@ -1142,9 +1182,8 @@ mod tests { #[tokio::test] async fn the_announced_peer_should_not_be_included_in_the_response() { - let (tracker, stats_event_sender) = public_tracker(); - let tracker = Arc::new(tracker); - let stats_event_sender = Arc::new(stats_event_sender); + let (tracker, stats_event_sender, _in_memory_whitelist, _whitelist_manager, whitelist_authorization) = + public_tracker(); let client_ip_v4 = Ipv4Addr::new(126, 0, 0, 1); let client_ip_v6 = client_ip_v4.to_ipv6_compatible(); @@ -1159,6 +1198,7 @@ mod tests { remote_addr, &request, &tracker, + &whitelist_authorization, &stats_event_sender, sample_cookie_valid_range(), ) @@ -1186,9 +1226,8 @@ mod tests { // From the BEP 15 (https://www.bittorrent.org/beps/bep_0015.html): // "Do note that most trackers will only honor the IP address field under limited circumstances." - let (tracker, stats_event_sender) = public_tracker(); - let tracker = Arc::new(tracker); - let stats_event_sender = Arc::new(stats_event_sender); + let (tracker, stats_event_sender, _in_memory_whitelist, _whitelist_manager, whitelist_authorization) = + public_tracker(); let info_hash = AquaticInfoHash([0u8; 20]); let peer_id = AquaticPeerId([255u8; 20]); @@ -1212,6 +1251,7 @@ mod tests { remote_addr, &request, &tracker, + &whitelist_authorization, &stats_event_sender, sample_cookie_valid_range(), ) @@ -1239,7 +1279,10 @@ mod tests { tracker.upsert_peer_and_get_stats(&info_hash.0.into(), &peer_using_ipv4); } - async fn announce_a_new_peer_using_ipv6(tracker: Arc) -> Response { + async fn announce_a_new_peer_using_ipv6( + tracker: Arc, + whitelist_authorization: Arc, + ) -> Response { let (stats_event_sender, _stats_repository) = crate::core::services::statistics::setup::factory(false); let stats_event_sender = Arc::new(stats_event_sender); @@ -1255,6 +1298,7 @@ mod tests { remote_addr, &request, &tracker, + &whitelist_authorization, &stats_event_sender, sample_cookie_valid_range(), ) @@ -1264,12 +1308,12 @@ mod tests { #[tokio::test] async fn when_the_announce_request_comes_from_a_client_using_ipv6_the_response_should_not_include_peers_using_ipv4() { - let (tracker, _stats_event_sender) = public_tracker(); - let tracker = Arc::new(tracker); + let (tracker, _stats_event_sender, _in_memory_whitelist, _whitelist_manager, whitelist_authorization) = + public_tracker(); add_a_torrent_peer_using_ipv4(&tracker); - let response = announce_a_new_peer_using_ipv6(tracker.clone()).await; + let response = announce_a_new_peer_using_ipv6(tracker.clone(), whitelist_authorization).await; // The response should not contain the peer using IPV4 let peers: Option>> = match response { @@ -1291,7 +1335,7 @@ mod tests { let stats_event_sender: Arc>> = Arc::new(Some(Box::new(stats_event_sender_mock))); - let tracker = Arc::new(test_tracker_factory()); + let (tracker, whitelist_authorization) = test_tracker_factory(); let remote_addr = sample_ipv6_remote_addr(); @@ -1303,6 +1347,7 @@ mod tests { remote_addr, &announce_request, &tracker, + &whitelist_authorization, &stats_event_sender, sample_cookie_valid_range(), ) @@ -1331,7 +1376,7 @@ mod tests { async fn the_peer_ip_should_be_changed_to_the_external_ip_in_the_tracker_configuration() { let config = Arc::new(TrackerConfigurationBuilder::default().with_external_ip("::126.0.0.1").into()); - let (database, whitelist_manager) = initialize_tracker_dependencies(&config); + let (database, _in_memory_whitelist, whitelist_authorization) = initialize_tracker_dependencies(&config); let mut stats_event_sender_mock = statistics::event::sender::MockSender::new(); stats_event_sender_mock @@ -1342,7 +1387,7 @@ mod tests { let stats_event_sender: Arc>> = Arc::new(Some(Box::new(stats_event_sender_mock))); - let tracker = Arc::new(core::Tracker::new(&config.core, &database, &whitelist_manager).unwrap()); + let tracker = Arc::new(core::Tracker::new(&config.core, &database, &whitelist_authorization).unwrap()); let loopback_ipv4 = Ipv4Addr::new(127, 0, 0, 1); let loopback_ipv6 = Ipv6Addr::new(0, 0, 0, 0, 0, 0, 0, 1); @@ -1368,6 +1413,7 @@ mod tests { remote_addr, &request, &tracker, + &whitelist_authorization, &stats_event_sender, sample_cookie_valid_range(), ) @@ -1419,9 +1465,8 @@ mod tests { #[tokio::test] async fn should_return_no_stats_when_the_tracker_does_not_have_any_torrent() { - let (tracker, stats_event_sender) = public_tracker(); - let tracker = Arc::new(tracker); - let stats_event_sender = Arc::new(stats_event_sender); + let (tracker, stats_event_sender, _in_memory_whitelist, _whitelist_manager, _whitelist_authorization) = + public_tracker(); let remote_addr = sample_ipv4_remote_addr(); @@ -1507,8 +1552,6 @@ mod tests { } mod with_a_public_tracker { - use std::sync::Arc; - use aquatic_udp_protocol::{NumberOfDownloads, NumberOfPeers, TorrentScrapeStatistics}; use crate::servers::udp::handlers::tests::public_tracker; @@ -1516,8 +1559,8 @@ mod tests { #[tokio::test] async fn should_return_torrent_statistics_when_the_tracker_has_the_requested_torrent() { - let (tracker, _stats_event_sender) = public_tracker(); - let tracker = Arc::new(tracker); + let (tracker, _stats_event_sender, _in_memory_whitelist, _whitelist_manager, _whitelist_authorization) = + public_tracker(); let torrent_stats = match_scrape_response(add_a_sample_seeder_and_scrape(tracker.clone()).await); @@ -1532,8 +1575,6 @@ mod tests { } mod with_a_whitelisted_tracker { - use std::sync::Arc; - use aquatic_udp_protocol::{InfoHash, NumberOfDownloads, NumberOfPeers, TorrentScrapeStatistics}; use crate::servers::udp::handlers::handle_scrape; @@ -1544,19 +1585,15 @@ mod tests { #[tokio::test] async fn should_return_the_torrent_statistics_when_the_requested_torrent_is_whitelisted() { - let (tracker, stats_event_sender) = whitelisted_tracker(); - let tracker = Arc::new(tracker); - let stats_event_sender = Arc::new(stats_event_sender); + let (tracker, stats_event_sender, in_memory_whitelist, _whitelist_manager, _whitelist_authorization) = + whitelisted_tracker(); let remote_addr = sample_ipv4_remote_addr(); let info_hash = InfoHash([0u8; 20]); add_a_seeder(tracker.clone(), &remote_addr, &info_hash).await; - tracker - .whitelist_manager - .add_torrent_to_memory_whitelist(&info_hash.0.into()) - .await; + in_memory_whitelist.add(&info_hash.0.into()).await; let request = build_scrape_request(&remote_addr, &info_hash); @@ -1584,9 +1621,8 @@ mod tests { #[tokio::test] async fn should_return_zeroed_statistics_when_the_requested_torrent_is_not_whitelisted() { - let (tracker, stats_event_sender) = whitelisted_tracker(); - let tracker = Arc::new(tracker); - let stats_event_sender = Arc::new(stats_event_sender); + let (tracker, stats_event_sender, _in_memory_whitelist, _whitelist_manager, _whitelist_authorization) = + whitelisted_tracker(); let remote_addr = sample_ipv4_remote_addr(); let info_hash = InfoHash([0u8; 20]); @@ -1650,7 +1686,8 @@ mod tests { Arc::new(Some(Box::new(stats_event_sender_mock))); let remote_addr = sample_ipv4_remote_addr(); - let tracker = Arc::new(test_tracker_factory()); + + let (tracker, _whitelist_authorization) = test_tracker_factory(); handle_scrape( remote_addr, @@ -1689,7 +1726,8 @@ mod tests { Arc::new(Some(Box::new(stats_event_sender_mock))); let remote_addr = sample_ipv6_remote_addr(); - let tracker = Arc::new(test_tracker_factory()); + + let (tracker, _whitelist_authorization) = test_tracker_factory(); handle_scrape( remote_addr, diff --git a/src/servers/udp/server/launcher.rs b/src/servers/udp/server/launcher.rs index d71ffcfd1..bb5c30d44 100644 --- a/src/servers/udp/server/launcher.rs +++ b/src/servers/udp/server/launcher.rs @@ -14,7 +14,7 @@ use super::banning::BanService; use super::request_buffer::ActiveRequests; use crate::bootstrap::jobs::Started; use crate::core::statistics::event::sender::Sender; -use crate::core::{statistics, Tracker}; +use crate::core::{statistics, whitelist, Tracker}; use crate::servers::logging::STARTED_ON; use crate::servers::registar::ServiceHealthCheckJob; use crate::servers::signals::{shutdown_signal_with_message, Halted}; @@ -40,10 +40,19 @@ impl Launcher { /// It panics if unable to bind to udp socket, and get the address from the udp socket. /// It panics if unable to send address of socket. /// It panics if the udp server is loaded when the tracker is private. - /// - #[instrument(skip(tracker, opt_stats_event_sender, ban_service, bind_to, tx_start, rx_halt))] + #[allow(clippy::too_many_arguments)] + #[instrument(skip( + tracker, + whitelist_authorization, + opt_stats_event_sender, + ban_service, + bind_to, + tx_start, + rx_halt + ))] pub async fn run_with_graceful_shutdown( tracker: Arc, + whitelist_authorization: Arc, opt_stats_event_sender: Arc>>, ban_service: Arc>, bind_to: SocketAddr, @@ -86,6 +95,7 @@ impl Launcher { let () = Self::run_udp_server_main( receiver, tracker.clone(), + whitelist_authorization.clone(), opt_stats_event_sender.clone(), ban_service.clone(), cookie_lifetime, @@ -127,10 +137,11 @@ impl Launcher { ServiceHealthCheckJob::new(binding, info, job) } - #[instrument(skip(receiver, tracker, opt_stats_event_sender, ban_service))] + #[instrument(skip(receiver, tracker, whitelist_authorization, opt_stats_event_sender, ban_service))] async fn run_udp_server_main( mut receiver: Receiver, tracker: Arc, + whitelist_authorization: Arc, opt_stats_event_sender: Arc>>, ban_service: Arc>, cookie_lifetime: Duration, @@ -201,6 +212,7 @@ impl Launcher { let processor = Processor::new( receiver.socket.clone(), tracker.clone(), + whitelist_authorization.clone(), opt_stats_event_sender.clone(), cookie_lifetime, ); diff --git a/src/servers/udp/server/mod.rs b/src/servers/udp/server/mod.rs index b5da9d326..f47e0b1db 100644 --- a/src/servers/udp/server/mod.rs +++ b/src/servers/udp/server/mod.rs @@ -64,7 +64,9 @@ mod tests { use super::spawner::Spawner; use super::Server; use crate::bootstrap::app::initialize_global_services; - use crate::core::services::{initialize_database, initialize_tracker, initialize_whitelist, statistics}; + use crate::core::services::{initialize_database, initialize_tracker, initialize_whitelist_manager, statistics}; + use crate::core::whitelist; + use crate::core::whitelist::repository::in_memory::InMemoryWhitelist; use crate::servers::registar::Registar; use crate::servers::udp::server::banning::BanService; use crate::servers::udp::server::launcher::MAX_CONNECTION_ID_ERRORS_PER_IP; @@ -80,8 +82,13 @@ mod tests { initialize_global_services(&cfg); let database = initialize_database(&cfg); - let whitelist_manager = initialize_whitelist(database.clone()); - let tracker = Arc::new(initialize_tracker(&cfg, &database, &whitelist_manager)); + let in_memory_whitelist = Arc::new(InMemoryWhitelist::default()); + let whitelist_authorization = Arc::new(whitelist::authorization::Authorization::new( + &cfg.core, + &in_memory_whitelist.clone(), + )); + let _whitelist_manager = initialize_whitelist_manager(database.clone(), in_memory_whitelist.clone()); + let tracker = Arc::new(initialize_tracker(&cfg, &database, &whitelist_authorization)); let udp_trackers = cfg.udp_trackers.clone().expect("missing UDP trackers configuration"); let config = &udp_trackers[0]; @@ -93,6 +100,7 @@ mod tests { let started = stopped .start( tracker, + whitelist_authorization, stats_event_sender, ban_service, register.give_form(), @@ -119,8 +127,13 @@ mod tests { initialize_global_services(&cfg); let database = initialize_database(&cfg); - let whitelist_manager = initialize_whitelist(database.clone()); - let tracker = Arc::new(initialize_tracker(&cfg, &database, &whitelist_manager)); + let in_memory_whitelist = Arc::new(InMemoryWhitelist::default()); + let whitelist_authorization = Arc::new(whitelist::authorization::Authorization::new( + &cfg.core, + &in_memory_whitelist.clone(), + )); + + let tracker = Arc::new(initialize_tracker(&cfg, &database, &whitelist_authorization)); let config = &cfg.udp_trackers.as_ref().unwrap().first().unwrap(); let bind_to = config.bind_address; @@ -131,6 +144,7 @@ mod tests { let started = stopped .start( tracker, + whitelist_authorization, stats_event_sender, ban_service, register.give_form(), diff --git a/src/servers/udp/server/processor.rs b/src/servers/udp/server/processor.rs index 2ef7cc482..fe3666c1d 100644 --- a/src/servers/udp/server/processor.rs +++ b/src/servers/udp/server/processor.rs @@ -12,13 +12,14 @@ use super::banning::BanService; use super::bound_socket::BoundSocket; use crate::core::statistics::event::sender::Sender; use crate::core::statistics::event::UdpResponseKind; -use crate::core::{statistics, Tracker}; +use crate::core::{statistics, whitelist, Tracker}; use crate::servers::udp::handlers::CookieTimeValues; use crate::servers::udp::{handlers, RawRequest}; pub struct Processor { socket: Arc, tracker: Arc, + whitelist_authorization: Arc, opt_stats_event_sender: Arc>>, cookie_lifetime: f64, } @@ -27,12 +28,14 @@ impl Processor { pub fn new( socket: Arc, tracker: Arc, + whitelist_authorization: Arc, opt_stats_event_sender: Arc>>, cookie_lifetime: f64, ) -> Self { Self { socket, tracker, + whitelist_authorization, opt_stats_event_sender, cookie_lifetime, } @@ -47,6 +50,7 @@ impl Processor { let response = handlers::handle_packet( request, &self.tracker, + &self.whitelist_authorization, &self.opt_stats_event_sender, self.socket.address(), CookieTimeValues::new(self.cookie_lifetime), diff --git a/src/servers/udp/server/spawner.rs b/src/servers/udp/server/spawner.rs index 5d7a97877..aecba39ec 100644 --- a/src/servers/udp/server/spawner.rs +++ b/src/servers/udp/server/spawner.rs @@ -12,7 +12,7 @@ use super::banning::BanService; use super::launcher::Launcher; use crate::bootstrap::jobs::Started; use crate::core::statistics::event::sender::Sender; -use crate::core::Tracker; +use crate::core::{whitelist, Tracker}; use crate::servers::signals::Halted; #[derive(Constructor, Copy, Clone, Debug, Display)] @@ -27,9 +27,11 @@ impl Spawner { /// # Panics /// /// It would panic if unable to resolve the `local_addr` from the supplied ´socket´. + #[allow(clippy::too_many_arguments)] pub fn spawn_launcher( &self, tracker: Arc, + whitelist_authorization: Arc, opt_stats_event_sender: Arc>>, ban_service: Arc>, cookie_lifetime: Duration, @@ -41,6 +43,7 @@ impl Spawner { tokio::spawn(async move { Launcher::run_with_graceful_shutdown( tracker, + whitelist_authorization, opt_stats_event_sender, ban_service, spawner.bind_to, diff --git a/src/servers/udp/server/states.rs b/src/servers/udp/server/states.rs index 5cdca5a7d..9a01b5c6d 100644 --- a/src/servers/udp/server/states.rs +++ b/src/servers/udp/server/states.rs @@ -14,7 +14,7 @@ use super::spawner::Spawner; use super::{Server, UdpError}; use crate::bootstrap::jobs::Started; use crate::core::statistics::event::sender::Sender; -use crate::core::Tracker; +use crate::core::{whitelist, Tracker}; use crate::servers::registar::{ServiceRegistration, ServiceRegistrationForm}; use crate::servers::signals::Halted; use crate::servers::udp::server::launcher::Launcher; @@ -65,10 +65,11 @@ impl Server { /// /// It panics if unable to receive the bound socket address from service. /// - #[instrument(skip(self, tracker, opt_stats_event_sender, ban_service, form), err, ret(Display, level = Level::INFO))] + #[instrument(skip(self, tracker, whitelist_authorization, opt_stats_event_sender, ban_service, form), err, ret(Display, level = Level::INFO))] pub async fn start( self, tracker: Arc, + whitelist_authorization: Arc, opt_stats_event_sender: Arc>>, ban_service: Arc>, form: ServiceRegistrationForm, @@ -82,6 +83,7 @@ impl Server { // May need to wrap in a task to about a tokio bug. let task = self.state.spawner.spawn_launcher( tracker, + whitelist_authorization, opt_stats_event_sender, ban_service, cookie_lifetime, diff --git a/tests/servers/api/environment.rs b/tests/servers/api/environment.rs index cf997eb7c..a9628f053 100644 --- a/tests/servers/api/environment.rs +++ b/tests/servers/api/environment.rs @@ -82,6 +82,7 @@ impl Environment { .server .start( self.tracker, + self.whitelist_manager, self.stats_event_sender, self.stats_repository, self.ban_service, diff --git a/tests/servers/http/environment.rs b/tests/servers/http/environment.rs index d68924e07..160cb49f8 100644 --- a/tests/servers/http/environment.rs +++ b/tests/servers/http/environment.rs @@ -8,7 +8,7 @@ use torrust_tracker_lib::bootstrap::jobs::make_rust_tls; use torrust_tracker_lib::core::statistics::event::sender::Sender; use torrust_tracker_lib::core::statistics::repository::Repository; use torrust_tracker_lib::core::whitelist::manager::WhiteListManager; -use torrust_tracker_lib::core::Tracker; +use torrust_tracker_lib::core::{whitelist, Tracker}; use torrust_tracker_lib::servers::http::server::{HttpServer, Launcher, Running, Stopped}; use torrust_tracker_lib::servers::registar::Registar; use torrust_tracker_primitives::peer; @@ -18,6 +18,7 @@ pub struct Environment { pub tracker: Arc, pub stats_event_sender: Arc>>, pub stats_repository: Arc, + pub whitelist_authorization: Arc, pub whitelist_manager: Arc, pub registar: Registar, pub server: HttpServer, @@ -55,6 +56,7 @@ impl Environment { tracker: app_container.tracker.clone(), stats_event_sender: app_container.stats_event_sender.clone(), stats_repository: app_container.stats_repository.clone(), + whitelist_authorization: app_container.whitelist_authorization.clone(), whitelist_manager: app_container.whitelist_manager.clone(), registar: Registar::default(), server, @@ -66,13 +68,19 @@ impl Environment { Environment { config: self.config, tracker: self.tracker.clone(), + whitelist_authorization: self.whitelist_authorization.clone(), stats_event_sender: self.stats_event_sender.clone(), stats_repository: self.stats_repository.clone(), whitelist_manager: self.whitelist_manager.clone(), registar: self.registar.clone(), server: self .server - .start(self.tracker, self.stats_event_sender, self.registar.give_form()) + .start( + self.tracker, + self.whitelist_authorization, + self.stats_event_sender, + self.registar.give_form(), + ) .await .unwrap(), } @@ -88,6 +96,7 @@ impl Environment { Environment { config: self.config, tracker: self.tracker, + whitelist_authorization: self.whitelist_authorization, stats_event_sender: self.stats_event_sender, stats_repository: self.stats_repository, whitelist_manager: self.whitelist_manager, diff --git a/tests/servers/udp/environment.rs b/tests/servers/udp/environment.rs index 81e626e1c..43778ef6e 100644 --- a/tests/servers/udp/environment.rs +++ b/tests/servers/udp/environment.rs @@ -7,7 +7,7 @@ use torrust_tracker_configuration::{Configuration, UdpTracker, DEFAULT_TIMEOUT}; use torrust_tracker_lib::bootstrap::app::{initialize_app_container, initialize_global_services}; use torrust_tracker_lib::core::statistics::event::sender::Sender; use torrust_tracker_lib::core::statistics::repository::Repository; -use torrust_tracker_lib::core::Tracker; +use torrust_tracker_lib::core::{whitelist, Tracker}; use torrust_tracker_lib::servers::registar::Registar; use torrust_tracker_lib::servers::udp::server::banning::BanService; use torrust_tracker_lib::servers::udp::server::spawner::Spawner; @@ -21,6 +21,7 @@ where { pub config: Arc, pub tracker: Arc, + pub whitelist_authorization: Arc, pub stats_event_sender: Arc>>, pub stats_repository: Arc, pub ban_service: Arc>, @@ -57,6 +58,7 @@ impl Environment { Self { config, tracker: app_container.tracker.clone(), + whitelist_authorization: app_container.whitelist_authorization.clone(), stats_event_sender: app_container.stats_event_sender.clone(), stats_repository: app_container.stats_repository.clone(), ban_service: app_container.ban_service.clone(), @@ -71,6 +73,7 @@ impl Environment { Environment { config: self.config, tracker: self.tracker.clone(), + whitelist_authorization: self.whitelist_authorization.clone(), stats_event_sender: self.stats_event_sender.clone(), stats_repository: self.stats_repository.clone(), ban_service: self.ban_service.clone(), @@ -79,6 +82,7 @@ impl Environment { .server .start( self.tracker, + self.whitelist_authorization, self.stats_event_sender, self.ban_service, self.registar.give_form(), @@ -106,6 +110,7 @@ impl Environment { Environment { config: self.config, tracker: self.tracker, + whitelist_authorization: self.whitelist_authorization, stats_event_sender: self.stats_event_sender, stats_repository: self.stats_repository, ban_service: self.ban_service, From 88560ce4748c6af347f7380ab452e2953196b3c8 Mon Sep 17 00:00:00 2001 From: Jose Celano Date: Mon, 20 Jan 2025 15:29:42 +0000 Subject: [PATCH 111/802] refactor: [#1191] create dir for mod We will add more mods inside. This is part of a bigger refactor. --- src/core/{auth.rs => auth/mod.rs} | 0 1 file changed, 0 insertions(+), 0 deletions(-) rename src/core/{auth.rs => auth/mod.rs} (100%) diff --git a/src/core/auth.rs b/src/core/auth/mod.rs similarity index 100% rename from src/core/auth.rs rename to src/core/auth/mod.rs From f216b052f967c9b365603c951237c0182a6e81db Mon Sep 17 00:00:00 2001 From: Jose Celano Date: Mon, 20 Jan 2025 16:17:10 +0000 Subject: [PATCH 112/802] refactor: [#1191] extract mod auth::key --- src/core/auth/key.rs | 348 ++++++++++++++++++++++++++++++++++++++++++ src/core/auth/mod.rs | 349 +------------------------------------------ src/core/error.rs | 3 +- src/core/mod.rs | 6 +- 4 files changed, 356 insertions(+), 350 deletions(-) create mode 100644 src/core/auth/key.rs diff --git a/src/core/auth/key.rs b/src/core/auth/key.rs new file mode 100644 index 000000000..f0adf5946 --- /dev/null +++ b/src/core/auth/key.rs @@ -0,0 +1,348 @@ +//! Tracker authentication services and structs. +//! +//! This module contains functions to handle tracker keys. +//! Tracker keys are tokens used to authenticate the tracker clients when the tracker runs +//! in `private` or `private_listed` modes. +//! +//! There are services to [`generate_key`] and [`verify_key_expiration`] authentication keys. +//! +//! Authentication keys are used only by [`HTTP`](crate::servers::http) trackers. All keys have an expiration time, that means +//! they are only valid during a period of time. After that time the expiring key will no longer be valid. +//! +//! Keys are stored in this struct: +//! +//! ```rust,no_run +//! use torrust_tracker_lib::core::auth::Key; +//! use torrust_tracker_primitives::DurationSinceUnixEpoch; +//! +//! pub struct PeerKey { +//! /// Random 32-char string. For example: `YZSl4lMZupRuOpSRC3krIKR5BPB14nrJ` +//! pub key: Key, +//! +//! /// Timestamp, the key will be no longer valid after this timestamp. +//! /// If `None` the keys will not expire (permanent key). +//! pub valid_until: Option, +//! } +//! ``` +//! +//! You can generate a new key valid for `9999` seconds and `0` nanoseconds from the current time with the following: +//! +//! ```rust,no_run +//! use torrust_tracker_lib::core::auth; +//! use std::time::Duration; +//! +//! let expiring_key = auth::key::generate_key(Some(Duration::new(9999, 0))); +//! +//! // And you can later verify it with: +//! +//! assert!(auth::key::verify_key_expiration(&expiring_key).is_ok()); +//! ``` + +use std::panic::Location; +use std::str::FromStr; +use std::sync::Arc; +use std::time::Duration; + +use derive_more::Display; +use rand::distributions::Alphanumeric; +use rand::{thread_rng, Rng}; +use serde::{Deserialize, Serialize}; +use thiserror::Error; +use torrust_tracker_clock::clock::Time; +use torrust_tracker_clock::conv::convert_from_timestamp_to_datetime_utc; +use torrust_tracker_located_error::{DynError, LocatedError}; +use torrust_tracker_primitives::DurationSinceUnixEpoch; + +use crate::shared::bit_torrent::common::AUTH_KEY_LENGTH; +use crate::CurrentClock; + +/// It generates a new permanent random key [`PeerKey`]. +#[must_use] +pub fn generate_permanent_key() -> PeerKey { + generate_key(None) +} + +/// It generates a new random 32-char authentication [`PeerKey`]. +/// +/// It can be an expiring or permanent key. +/// +/// # Panics +/// +/// It would panic if the `lifetime: Duration` + Duration is more than `Duration::MAX`. +/// +/// # Arguments +/// +/// * `lifetime`: if `None` the key will be permanent. +#[must_use] +pub fn generate_key(lifetime: Option) -> PeerKey { + let random_id: String = thread_rng() + .sample_iter(&Alphanumeric) + .take(AUTH_KEY_LENGTH) + .map(char::from) + .collect(); + + if let Some(lifetime) = lifetime { + tracing::debug!("Generated key: {}, valid for: {:?} seconds", random_id, lifetime); + + PeerKey { + key: random_id.parse::().unwrap(), + valid_until: Some(CurrentClock::now_add(&lifetime).unwrap()), + } + } else { + tracing::debug!("Generated key: {}, permanent", random_id); + + PeerKey { + key: random_id.parse::().unwrap(), + valid_until: None, + } + } +} + +/// It verifies an [`PeerKey`]. It checks if the expiration date has passed. +/// Permanent keys without duration (`None`) do not expire. +/// +/// # Errors +/// +/// Will return: +/// +/// - `Error::KeyExpired` if `auth_key.valid_until` is past the `current_time`. +/// - `Error::KeyInvalid` if `auth_key.valid_until` is past the `None`. +pub fn verify_key_expiration(auth_key: &PeerKey) -> Result<(), Error> { + let current_time: DurationSinceUnixEpoch = CurrentClock::now(); + + match auth_key.valid_until { + Some(valid_until) => { + if valid_until < current_time { + Err(Error::KeyExpired { + location: Location::caller(), + }) + } else { + Ok(()) + } + } + None => Ok(()), // Permanent key + } +} + +/// An authentication key which can potentially have an expiration time. +/// After that time is will automatically become invalid. +#[derive(Serialize, Deserialize, Debug, Eq, PartialEq, Clone)] +pub struct PeerKey { + /// Random 32-char string. For example: `YZSl4lMZupRuOpSRC3krIKR5BPB14nrJ` + pub key: Key, + + /// Timestamp, the key will be no longer valid after this timestamp. + /// If `None` the keys will not expire (permanent key). + pub valid_until: Option, +} + +impl std::fmt::Display for PeerKey { + fn fmt(&self, f: &mut std::fmt::Formatter<'_>) -> std::fmt::Result { + match self.expiry_time() { + Some(expire_time) => write!(f, "key: `{}`, valid until `{}`", self.key, expire_time), + None => write!(f, "key: `{}`, permanent", self.key), + } + } +} + +impl PeerKey { + #[must_use] + pub fn key(&self) -> Key { + self.key.clone() + } + + /// It returns the expiry time. For example, for the starting time for Unix Epoch + /// (timestamp 0) it will return a `DateTime` whose string representation is + /// `1970-01-01 00:00:00 UTC`. + /// + /// # Panics + /// + /// Will panic when the key timestamp overflows the internal i64 type. + /// (this will naturally happen in 292.5 billion years) + #[must_use] + pub fn expiry_time(&self) -> Option> { + self.valid_until.map(convert_from_timestamp_to_datetime_utc) + } +} + +/// A token used for authentication. +/// +/// - It contains only ascii alphanumeric chars: lower and uppercase letters and +/// numbers. +/// - It's a 32-char string. +#[derive(Serialize, Deserialize, Debug, Eq, PartialEq, Clone, Display, Hash)] +pub struct Key(String); + +impl Key { + /// # Errors + /// + /// Will return an error is the string represents an invalid key. + /// Valid keys can only contain 32 chars including 0-9, a-z and A-Z. + pub fn new(value: &str) -> Result { + if value.len() != AUTH_KEY_LENGTH { + return Err(ParseKeyError::InvalidKeyLength); + } + + if !value.chars().all(|c| c.is_ascii_alphanumeric()) { + return Err(ParseKeyError::InvalidChars); + } + + Ok(Self(value.to_owned())) + } + + #[must_use] + pub fn value(&self) -> &str { + &self.0 + } +} + +/// Error returned when a key cannot be parsed from a string. +/// +/// ```text +/// use torrust_tracker_lib::core::auth::Key; +/// use std::str::FromStr; +/// +/// let key_string = "YZSl4lMZupRuOpSRC3krIKR5BPB14nrJ"; +/// let key = Key::from_str(key_string); +/// +/// assert!(key.is_ok()); +/// assert_eq!(key.unwrap().to_string(), key_string); +/// ``` +/// +/// If the string does not contains a valid key, the parser function will return +/// this error. +#[derive(Debug, Error)] +pub enum ParseKeyError { + #[error("Invalid key length. Key must be have 32 chars")] + InvalidKeyLength, + #[error("Invalid chars for key. Key can only alphanumeric chars (0-9, a-z, A-Z)")] + InvalidChars, +} + +impl FromStr for Key { + type Err = ParseKeyError; + + fn from_str(s: &str) -> Result { + Key::new(s)?; + Ok(Self(s.to_string())) + } +} + +/// Verification error. Error returned when an [`PeerKey`] cannot be +/// verified with the (`crate::core::auth::verify_key`) function. +#[derive(Debug, Error)] +#[allow(dead_code)] +pub enum Error { + #[error("Key could not be verified: {source}")] + KeyVerificationError { + source: LocatedError<'static, dyn std::error::Error + Send + Sync>, + }, + #[error("Failed to read key: {key}, {location}")] + UnableToReadKey { + location: &'static Location<'static>, + key: Box, + }, + #[error("Key has expired, {location}")] + KeyExpired { location: &'static Location<'static> }, +} + +impl From for Error { + fn from(e: r2d2_sqlite::rusqlite::Error) -> Self { + Error::KeyVerificationError { + source: (Arc::new(e) as DynError).into(), + } + } +} + +#[cfg(test)] +mod tests { + + mod key { + use std::str::FromStr; + + use crate::core::auth::Key; + + #[test] + fn should_be_parsed_from_an_string() { + let key_string = "YZSl4lMZupRuOpSRC3krIKR5BPB14nrJ"; + let key = Key::from_str(key_string); + + assert!(key.is_ok()); + assert_eq!(key.unwrap().to_string(), key_string); + } + + #[test] + fn length_should_be_32() { + let key = Key::new(""); + assert!(key.is_err()); + + let string_longer_than_32 = "012345678901234567890123456789012"; // DevSkim: ignore DS173237 + let key = Key::new(string_longer_than_32); + assert!(key.is_err()); + } + + #[test] + fn should_only_include_alphanumeric_chars() { + let key = Key::new("%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%"); + assert!(key.is_err()); + } + } + + mod expiring_auth_key { + use std::str::FromStr; + use std::time::Duration; + + use torrust_tracker_clock::clock; + use torrust_tracker_clock::clock::stopped::Stopped as _; + + use crate::core::auth; + + #[test] + fn should_be_parsed_from_an_string() { + let key_string = "YZSl4lMZupRuOpSRC3krIKR5BPB14nrJ"; + let auth_key = auth::Key::from_str(key_string); + + assert!(auth_key.is_ok()); + assert_eq!(auth_key.unwrap().to_string(), key_string); + } + + #[test] + fn should_be_displayed() { + // Set the time to the current time. + clock::Stopped::local_set_to_unix_epoch(); + + let expiring_key = auth::key::generate_key(Some(Duration::from_secs(0))); + + assert_eq!( + expiring_key.to_string(), + format!("key: `{}`, valid until `1970-01-01 00:00:00 UTC`", expiring_key.key) // cspell:disable-line + ); + } + + #[test] + fn should_be_generated_with_a_expiration_time() { + let expiring_key = auth::key::generate_key(Some(Duration::new(9999, 0))); + + assert!(auth::key::verify_key_expiration(&expiring_key).is_ok()); + } + + #[test] + fn should_be_generate_and_verified() { + // Set the time to the current time. + clock::Stopped::local_set_to_system_time_now(); + + // Make key that is valid for 19 seconds. + let expiring_key = auth::key::generate_key(Some(Duration::from_secs(19))); + + // Mock the time has passed 10 sec. + clock::Stopped::local_add(&Duration::from_secs(10)).unwrap(); + + assert!(auth::key::verify_key_expiration(&expiring_key).is_ok()); + + // Mock the time has passed another 10 sec. + clock::Stopped::local_add(&Duration::from_secs(10)).unwrap(); + + assert!(auth::key::verify_key_expiration(&expiring_key).is_err()); + } + } +} diff --git a/src/core/auth/mod.rs b/src/core/auth/mod.rs index c92a4723d..d0f72340d 100644 --- a/src/core/auth/mod.rs +++ b/src/core/auth/mod.rs @@ -1,346 +1,5 @@ -//! Tracker authentication services and structs. -//! -//! This module contains functions to handle tracker keys. -//! Tracker keys are tokens used to authenticate the tracker clients when the tracker runs -//! in `private` or `private_listed` modes. -//! -//! There are services to [`generate_key`] and [`verify_key_expiration`] authentication keys. -//! -//! Authentication keys are used only by [`HTTP`](crate::servers::http) trackers. All keys have an expiration time, that means -//! they are only valid during a period of time. After that time the expiring key will no longer be valid. -//! -//! Keys are stored in this struct: -//! -//! ```rust,no_run -//! use torrust_tracker_lib::core::auth::Key; -//! use torrust_tracker_primitives::DurationSinceUnixEpoch; -//! -//! pub struct ExpiringKey { -//! /// Random 32-char string. For example: `YZSl4lMZupRuOpSRC3krIKR5BPB14nrJ` -//! pub key: Key, -//! /// Timestamp, the key will be no longer valid after this timestamp -//! pub valid_until: Option, -//! } -//! ``` -//! -//! You can generate a new key valid for `9999` seconds and `0` nanoseconds from the current time with the following: -//! -//! ```rust,no_run -//! use torrust_tracker_lib::core::auth; -//! use std::time::Duration; -//! -//! let expiring_key = auth::generate_key(Some(Duration::new(9999, 0))); -//! -//! // And you can later verify it with: -//! -//! assert!(auth::verify_key_expiration(&expiring_key).is_ok()); -//! ``` +pub mod key; -use std::panic::Location; -use std::str::FromStr; -use std::sync::Arc; -use std::time::Duration; - -use derive_more::Display; -use rand::distributions::Alphanumeric; -use rand::{thread_rng, Rng}; -use serde::{Deserialize, Serialize}; -use thiserror::Error; -use torrust_tracker_clock::clock::Time; -use torrust_tracker_clock::conv::convert_from_timestamp_to_datetime_utc; -use torrust_tracker_located_error::{DynError, LocatedError}; -use torrust_tracker_primitives::DurationSinceUnixEpoch; - -use crate::shared::bit_torrent::common::AUTH_KEY_LENGTH; -use crate::CurrentClock; - -/// It generates a new permanent random key [`PeerKey`]. -#[must_use] -pub fn generate_permanent_key() -> PeerKey { - generate_key(None) -} - -/// It generates a new random 32-char authentication [`PeerKey`]. -/// -/// It can be an expiring or permanent key. -/// -/// # Panics -/// -/// It would panic if the `lifetime: Duration` + Duration is more than `Duration::MAX`. -/// -/// # Arguments -/// -/// * `lifetime`: if `None` the key will be permanent. -#[must_use] -pub fn generate_key(lifetime: Option) -> PeerKey { - let random_id: String = thread_rng() - .sample_iter(&Alphanumeric) - .take(AUTH_KEY_LENGTH) - .map(char::from) - .collect(); - - if let Some(lifetime) = lifetime { - tracing::debug!("Generated key: {}, valid for: {:?} seconds", random_id, lifetime); - - PeerKey { - key: random_id.parse::().unwrap(), - valid_until: Some(CurrentClock::now_add(&lifetime).unwrap()), - } - } else { - tracing::debug!("Generated key: {}, permanent", random_id); - - PeerKey { - key: random_id.parse::().unwrap(), - valid_until: None, - } - } -} - -/// It verifies an [`PeerKey`]. It checks if the expiration date has passed. -/// Permanent keys without duration (`None`) do not expire. -/// -/// # Errors -/// -/// Will return: -/// -/// - `Error::KeyExpired` if `auth_key.valid_until` is past the `current_time`. -/// - `Error::KeyInvalid` if `auth_key.valid_until` is past the `None`. -pub fn verify_key_expiration(auth_key: &PeerKey) -> Result<(), Error> { - let current_time: DurationSinceUnixEpoch = CurrentClock::now(); - - match auth_key.valid_until { - Some(valid_until) => { - if valid_until < current_time { - Err(Error::KeyExpired { - location: Location::caller(), - }) - } else { - Ok(()) - } - } - None => Ok(()), // Permanent key - } -} - -/// An authentication key which can potentially have an expiration time. -/// After that time is will automatically become invalid. -#[derive(Serialize, Deserialize, Debug, Eq, PartialEq, Clone)] -pub struct PeerKey { - /// Random 32-char string. For example: `YZSl4lMZupRuOpSRC3krIKR5BPB14nrJ` - pub key: Key, - - /// Timestamp, the key will be no longer valid after this timestamp. - /// If `None` the keys will not expire (permanent key). - pub valid_until: Option, -} - -impl std::fmt::Display for PeerKey { - fn fmt(&self, f: &mut std::fmt::Formatter<'_>) -> std::fmt::Result { - match self.expiry_time() { - Some(expire_time) => write!(f, "key: `{}`, valid until `{}`", self.key, expire_time), - None => write!(f, "key: `{}`, permanent", self.key), - } - } -} - -impl PeerKey { - #[must_use] - pub fn key(&self) -> Key { - self.key.clone() - } - - /// It returns the expiry time. For example, for the starting time for Unix Epoch - /// (timestamp 0) it will return a `DateTime` whose string representation is - /// `1970-01-01 00:00:00 UTC`. - /// - /// # Panics - /// - /// Will panic when the key timestamp overflows the internal i64 type. - /// (this will naturally happen in 292.5 billion years) - #[must_use] - pub fn expiry_time(&self) -> Option> { - self.valid_until.map(convert_from_timestamp_to_datetime_utc) - } -} - -/// A token used for authentication. -/// -/// - It contains only ascii alphanumeric chars: lower and uppercase letters and -/// numbers. -/// - It's a 32-char string. -#[derive(Serialize, Deserialize, Debug, Eq, PartialEq, Clone, Display, Hash)] -pub struct Key(String); - -impl Key { - /// # Errors - /// - /// Will return an error is the string represents an invalid key. - /// Valid keys can only contain 32 chars including 0-9, a-z and A-Z. - pub fn new(value: &str) -> Result { - if value.len() != AUTH_KEY_LENGTH { - return Err(ParseKeyError::InvalidKeyLength); - } - - if !value.chars().all(|c| c.is_ascii_alphanumeric()) { - return Err(ParseKeyError::InvalidChars); - } - - Ok(Self(value.to_owned())) - } - - #[must_use] - pub fn value(&self) -> &str { - &self.0 - } -} - -/// Error returned when a key cannot be parsed from a string. -/// -/// ```text -/// use torrust_tracker_lib::core::auth::Key; -/// use std::str::FromStr; -/// -/// let key_string = "YZSl4lMZupRuOpSRC3krIKR5BPB14nrJ"; -/// let key = Key::from_str(key_string); -/// -/// assert!(key.is_ok()); -/// assert_eq!(key.unwrap().to_string(), key_string); -/// ``` -/// -/// If the string does not contains a valid key, the parser function will return -/// this error. -#[derive(Debug, Error)] -pub enum ParseKeyError { - #[error("Invalid key length. Key must be have 32 chars")] - InvalidKeyLength, - #[error("Invalid chars for key. Key can only alphanumeric chars (0-9, a-z, A-Z)")] - InvalidChars, -} - -impl FromStr for Key { - type Err = ParseKeyError; - - fn from_str(s: &str) -> Result { - Key::new(s)?; - Ok(Self(s.to_string())) - } -} - -/// Verification error. Error returned when an [`PeerKey`] cannot be -/// verified with the (`crate::core::auth::verify_key`) function. -#[derive(Debug, Error)] -#[allow(dead_code)] -pub enum Error { - #[error("Key could not be verified: {source}")] - KeyVerificationError { - source: LocatedError<'static, dyn std::error::Error + Send + Sync>, - }, - #[error("Failed to read key: {key}, {location}")] - UnableToReadKey { - location: &'static Location<'static>, - key: Box, - }, - #[error("Key has expired, {location}")] - KeyExpired { location: &'static Location<'static> }, -} - -impl From for Error { - fn from(e: r2d2_sqlite::rusqlite::Error) -> Self { - Error::KeyVerificationError { - source: (Arc::new(e) as DynError).into(), - } - } -} - -#[cfg(test)] -mod tests { - - mod key { - use std::str::FromStr; - - use crate::core::auth::Key; - - #[test] - fn should_be_parsed_from_an_string() { - let key_string = "YZSl4lMZupRuOpSRC3krIKR5BPB14nrJ"; - let key = Key::from_str(key_string); - - assert!(key.is_ok()); - assert_eq!(key.unwrap().to_string(), key_string); - } - - #[test] - fn length_should_be_32() { - let key = Key::new(""); - assert!(key.is_err()); - - let string_longer_than_32 = "012345678901234567890123456789012"; // DevSkim: ignore DS173237 - let key = Key::new(string_longer_than_32); - assert!(key.is_err()); - } - - #[test] - fn should_only_include_alphanumeric_chars() { - let key = Key::new("%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%"); - assert!(key.is_err()); - } - } - - mod expiring_auth_key { - use std::str::FromStr; - use std::time::Duration; - - use torrust_tracker_clock::clock; - use torrust_tracker_clock::clock::stopped::Stopped as _; - - use crate::core::auth; - - #[test] - fn should_be_parsed_from_an_string() { - let key_string = "YZSl4lMZupRuOpSRC3krIKR5BPB14nrJ"; - let auth_key = auth::Key::from_str(key_string); - - assert!(auth_key.is_ok()); - assert_eq!(auth_key.unwrap().to_string(), key_string); - } - - #[test] - fn should_be_displayed() { - // Set the time to the current time. - clock::Stopped::local_set_to_unix_epoch(); - - let expiring_key = auth::generate_key(Some(Duration::from_secs(0))); - - assert_eq!( - expiring_key.to_string(), - format!("key: `{}`, valid until `1970-01-01 00:00:00 UTC`", expiring_key.key) // cspell:disable-line - ); - } - - #[test] - fn should_be_generated_with_a_expiration_time() { - let expiring_key = auth::generate_key(Some(Duration::new(9999, 0))); - - assert!(auth::verify_key_expiration(&expiring_key).is_ok()); - } - - #[test] - fn should_be_generate_and_verified() { - // Set the time to the current time. - clock::Stopped::local_set_to_system_time_now(); - - // Make key that is valid for 19 seconds. - let expiring_key = auth::generate_key(Some(Duration::from_secs(19))); - - // Mock the time has passed 10 sec. - clock::Stopped::local_add(&Duration::from_secs(10)).unwrap(); - - assert!(auth::verify_key_expiration(&expiring_key).is_ok()); - - // Mock the time has passed another 10 sec. - clock::Stopped::local_add(&Duration::from_secs(10)).unwrap(); - - assert!(auth::verify_key_expiration(&expiring_key).is_err()); - } - } -} +pub type PeerKey = key::PeerKey; +pub type Key = key::Key; +pub type Error = key::Error; diff --git a/src/core/error.rs b/src/core/error.rs index f0de7df40..f0e4b849e 100644 --- a/src/core/error.rs +++ b/src/core/error.rs @@ -12,8 +12,7 @@ use bittorrent_http_protocol::v1::responses; use bittorrent_primitives::info_hash::InfoHash; use torrust_tracker_located_error::LocatedError; -use super::auth::ParseKeyError; -use super::databases; +use super::{auth::key::ParseKeyError, databases}; /// Authentication or authorization error returned by the core `Tracker` #[derive(thiserror::Error, Debug, Clone)] diff --git a/src/core/mod.rs b/src/core/mod.rs index 480d0e971..e0e53128d 100644 --- a/src/core/mod.rs +++ b/src/core/mod.rs @@ -897,7 +897,7 @@ impl Tracker { /// * `lifetime` - The duration in seconds for the new key. The key will be /// no longer valid after `lifetime` seconds. pub async fn generate_auth_key(&self, lifetime: Option) -> Result { - let auth_key = auth::generate_key(lifetime); + let auth_key = auth::key::generate_key(lifetime); self.database.add_key_to_keys(&auth_key)?; self.keys.write().await.insert(auth_key.key.clone(), auth_key.clone()); @@ -982,12 +982,12 @@ impl Tracker { Some(key) => match self.config.private_mode { Some(private_mode) => { if private_mode.check_keys_expiration { - return auth::verify_key_expiration(key); + return auth::key::verify_key_expiration(key); } Ok(()) } - None => auth::verify_key_expiration(key), + None => auth::key::verify_key_expiration(key), }, } } From 2b7373a260d7bb532744a271192e9f1d85dfed5b Mon Sep 17 00:00:00 2001 From: Jose Celano Date: Mon, 20 Jan 2025 16:28:14 +0000 Subject: [PATCH 113/802] refactor: [#1191] rename mod core::auth to core::authentication The `auth` name is ambiguous, it could mean authorization. --- src/core/{auth => authentication}/key.rs | 30 ++++++------- src/core/{auth => authentication}/mod.rs | 0 src/core/databases/mod.rs | 10 ++--- src/core/databases/mysql.rs | 18 ++++---- src/core/databases/sqlite.rs | 18 ++++---- src/core/error.rs | 4 +- src/core/mod.rs | 45 ++++++++++--------- .../apis/v1/context/auth_key/handlers.rs | 2 +- .../apis/v1/context/auth_key/resources.rs | 18 ++++---- .../http/v1/extractors/authentication_key.rs | 2 +- src/servers/http/v1/handlers/announce.rs | 6 +-- src/servers/http/v1/handlers/common/auth.rs | 6 +-- src/servers/http/v1/handlers/scrape.rs | 6 +-- src/shared/bit_torrent/common.rs | 4 +- .../api/v1/contract/context/auth_key.rs | 4 +- tests/servers/http/client.rs | 2 +- tests/servers/http/connection_info.rs | 2 +- tests/servers/http/v1/contract.rs | 4 +- 18 files changed, 92 insertions(+), 89 deletions(-) rename src/core/{auth => authentication}/key.rs (89%) rename src/core/{auth => authentication}/mod.rs (100%) diff --git a/src/core/auth/key.rs b/src/core/authentication/key.rs similarity index 89% rename from src/core/auth/key.rs rename to src/core/authentication/key.rs index f0adf5946..8858361ec 100644 --- a/src/core/auth/key.rs +++ b/src/core/authentication/key.rs @@ -12,7 +12,7 @@ //! Keys are stored in this struct: //! //! ```rust,no_run -//! use torrust_tracker_lib::core::auth::Key; +//! use torrust_tracker_lib::core::authentication::Key; //! use torrust_tracker_primitives::DurationSinceUnixEpoch; //! //! pub struct PeerKey { @@ -28,14 +28,14 @@ //! You can generate a new key valid for `9999` seconds and `0` nanoseconds from the current time with the following: //! //! ```rust,no_run -//! use torrust_tracker_lib::core::auth; +//! use torrust_tracker_lib::core::authentication; //! use std::time::Duration; //! -//! let expiring_key = auth::key::generate_key(Some(Duration::new(9999, 0))); +//! let expiring_key = authentication::key::generate_key(Some(Duration::new(9999, 0))); //! //! // And you can later verify it with: //! -//! assert!(auth::key::verify_key_expiration(&expiring_key).is_ok()); +//! assert!(authentication::key::verify_key_expiration(&expiring_key).is_ok()); //! ``` use std::panic::Location; @@ -199,7 +199,7 @@ impl Key { /// Error returned when a key cannot be parsed from a string. /// /// ```text -/// use torrust_tracker_lib::core::auth::Key; +/// use torrust_tracker_lib::core::authentication::Key; /// use std::str::FromStr; /// /// let key_string = "YZSl4lMZupRuOpSRC3krIKR5BPB14nrJ"; @@ -229,7 +229,7 @@ impl FromStr for Key { } /// Verification error. Error returned when an [`PeerKey`] cannot be -/// verified with the (`crate::core::auth::verify_key`) function. +/// verified with the (`crate::core::authentication::verify_key`) function. #[derive(Debug, Error)] #[allow(dead_code)] pub enum Error { @@ -260,7 +260,7 @@ mod tests { mod key { use std::str::FromStr; - use crate::core::auth::Key; + use crate::core::authentication::Key; #[test] fn should_be_parsed_from_an_string() { @@ -295,12 +295,12 @@ mod tests { use torrust_tracker_clock::clock; use torrust_tracker_clock::clock::stopped::Stopped as _; - use crate::core::auth; + use crate::core::authentication; #[test] fn should_be_parsed_from_an_string() { let key_string = "YZSl4lMZupRuOpSRC3krIKR5BPB14nrJ"; - let auth_key = auth::Key::from_str(key_string); + let auth_key = authentication::Key::from_str(key_string); assert!(auth_key.is_ok()); assert_eq!(auth_key.unwrap().to_string(), key_string); @@ -311,7 +311,7 @@ mod tests { // Set the time to the current time. clock::Stopped::local_set_to_unix_epoch(); - let expiring_key = auth::key::generate_key(Some(Duration::from_secs(0))); + let expiring_key = authentication::key::generate_key(Some(Duration::from_secs(0))); assert_eq!( expiring_key.to_string(), @@ -321,9 +321,9 @@ mod tests { #[test] fn should_be_generated_with_a_expiration_time() { - let expiring_key = auth::key::generate_key(Some(Duration::new(9999, 0))); + let expiring_key = authentication::key::generate_key(Some(Duration::new(9999, 0))); - assert!(auth::key::verify_key_expiration(&expiring_key).is_ok()); + assert!(authentication::key::verify_key_expiration(&expiring_key).is_ok()); } #[test] @@ -332,17 +332,17 @@ mod tests { clock::Stopped::local_set_to_system_time_now(); // Make key that is valid for 19 seconds. - let expiring_key = auth::key::generate_key(Some(Duration::from_secs(19))); + let expiring_key = authentication::key::generate_key(Some(Duration::from_secs(19))); // Mock the time has passed 10 sec. clock::Stopped::local_add(&Duration::from_secs(10)).unwrap(); - assert!(auth::key::verify_key_expiration(&expiring_key).is_ok()); + assert!(authentication::key::verify_key_expiration(&expiring_key).is_ok()); // Mock the time has passed another 10 sec. clock::Stopped::local_add(&Duration::from_secs(10)).unwrap(); - assert!(auth::key::verify_key_expiration(&expiring_key).is_err()); + assert!(authentication::key::verify_key_expiration(&expiring_key).is_err()); } } } diff --git a/src/core/auth/mod.rs b/src/core/authentication/mod.rs similarity index 100% rename from src/core/auth/mod.rs rename to src/core/authentication/mod.rs diff --git a/src/core/databases/mod.rs b/src/core/databases/mod.rs index e29ce22e8..e0b1b4f1b 100644 --- a/src/core/databases/mod.rs +++ b/src/core/databases/mod.rs @@ -54,7 +54,7 @@ use bittorrent_primitives::info_hash::InfoHash; use torrust_tracker_primitives::PersistentTorrents; use self::error::Error; -use crate::core::auth::{self, Key}; +use crate::core::authentication::{self, Key}; struct Builder where @@ -195,11 +195,11 @@ pub trait Database: Sync + Send { /// # Errors /// /// Will return `Err` if unable to load. - fn load_keys(&self) -> Result, Error>; + fn load_keys(&self) -> Result, Error>; /// It gets an expiring authentication key from the database. /// - /// It returns `Some(PeerKey)` if a [`PeerKey`](crate::core::auth::PeerKey) + /// It returns `Some(PeerKey)` if a [`PeerKey`](crate::core::authentication::PeerKey) /// with the input [`Key`] exists, `None` otherwise. /// /// # Context: Authentication Keys @@ -207,7 +207,7 @@ pub trait Database: Sync + Send { /// # Errors /// /// Will return `Err` if unable to load. - fn get_key_from_keys(&self, key: &Key) -> Result, Error>; + fn get_key_from_keys(&self, key: &Key) -> Result, Error>; /// It adds an expiring authentication key to the database. /// @@ -216,7 +216,7 @@ pub trait Database: Sync + Send { /// # Errors /// /// Will return `Err` if unable to save. - fn add_key_to_keys(&self, auth_key: &auth::PeerKey) -> Result; + fn add_key_to_keys(&self, auth_key: &authentication::PeerKey) -> Result; /// It removes an expiring authentication key from the database. /// diff --git a/src/core/databases/mysql.rs b/src/core/databases/mysql.rs index 1b849421b..213f6300a 100644 --- a/src/core/databases/mysql.rs +++ b/src/core/databases/mysql.rs @@ -11,7 +11,7 @@ use torrust_tracker_primitives::PersistentTorrents; use super::driver::Driver; use super::{Database, Error}; -use crate::core::auth::{self, Key}; +use crate::core::authentication::{self, Key}; use crate::shared::bit_torrent::common::AUTH_KEY_LENGTH; const DRIVER: Driver = Driver::MySQL; @@ -63,7 +63,7 @@ impl Database for Mysql { PRIMARY KEY (`id`), UNIQUE (`key`) );", - i8::try_from(AUTH_KEY_LENGTH).expect("auth::Auth Key Length Should fit within a i8!") + i8::try_from(AUTH_KEY_LENGTH).expect("authentication key length should fit within a i8!") ); let mut conn = self.pool.get().map_err(|e| (e, DRIVER))?; @@ -118,17 +118,17 @@ impl Database for Mysql { } /// Refer to [`databases::Database::load_keys`](crate::core::databases::Database::load_keys). - fn load_keys(&self) -> Result, Error> { + fn load_keys(&self) -> Result, Error> { let mut conn = self.pool.get().map_err(|e| (e, DRIVER))?; let keys = conn.query_map( "SELECT `key`, valid_until FROM `keys`", |(key, valid_until): (String, Option)| match valid_until { - Some(valid_until) => auth::PeerKey { + Some(valid_until) => authentication::PeerKey { key: key.parse::().unwrap(), valid_until: Some(Duration::from_secs(valid_until.unsigned_abs())), }, - None => auth::PeerKey { + None => authentication::PeerKey { key: key.parse::().unwrap(), valid_until: None, }, @@ -202,7 +202,7 @@ impl Database for Mysql { } /// Refer to [`databases::Database::get_key_from_keys`](crate::core::databases::Database::get_key_from_keys). - fn get_key_from_keys(&self, key: &Key) -> Result, Error> { + fn get_key_from_keys(&self, key: &Key) -> Result, Error> { let mut conn = self.pool.get().map_err(|e| (e, DRIVER))?; let query = conn.exec_first::<(String, Option), _, _>( @@ -213,11 +213,11 @@ impl Database for Mysql { let key = query?; Ok(key.map(|(key, opt_valid_until)| match opt_valid_until { - Some(valid_until) => auth::PeerKey { + Some(valid_until) => authentication::PeerKey { key: key.parse::().unwrap(), valid_until: Some(Duration::from_secs(valid_until.unsigned_abs())), }, - None => auth::PeerKey { + None => authentication::PeerKey { key: key.parse::().unwrap(), valid_until: None, }, @@ -225,7 +225,7 @@ impl Database for Mysql { } /// Refer to [`databases::Database::add_key_to_keys`](crate::core::databases::Database::add_key_to_keys). - fn add_key_to_keys(&self, auth_key: &auth::PeerKey) -> Result { + fn add_key_to_keys(&self, auth_key: &authentication::PeerKey) -> Result { let mut conn = self.pool.get().map_err(|e| (e, DRIVER))?; let key = auth_key.key.to_string(); diff --git a/src/core/databases/sqlite.rs b/src/core/databases/sqlite.rs index 5bb23bb3e..6fe9ac599 100644 --- a/src/core/databases/sqlite.rs +++ b/src/core/databases/sqlite.rs @@ -11,7 +11,7 @@ use torrust_tracker_primitives::{DurationSinceUnixEpoch, PersistentTorrents}; use super::driver::Driver; use super::{Database, Error}; -use crate::core::auth::{self, Key}; +use crate::core::authentication::{self, Key}; const DRIVER: Driver = Driver::Sqlite3; @@ -106,7 +106,7 @@ impl Database for Sqlite { } /// Refer to [`databases::Database::load_keys`](crate::core::databases::Database::load_keys). - fn load_keys(&self) -> Result, Error> { + fn load_keys(&self) -> Result, Error> { let conn = self.pool.get().map_err(|e| (e, DRIVER))?; let mut stmt = conn.prepare("SELECT key, valid_until FROM keys")?; @@ -116,18 +116,18 @@ impl Database for Sqlite { let opt_valid_until: Option = row.get(1)?; match opt_valid_until { - Some(valid_until) => Ok(auth::PeerKey { + Some(valid_until) => Ok(authentication::PeerKey { key: key.parse::().unwrap(), valid_until: Some(DurationSinceUnixEpoch::from_secs(valid_until.unsigned_abs())), }), - None => Ok(auth::PeerKey { + None => Ok(authentication::PeerKey { key: key.parse::().unwrap(), valid_until: None, }), } })?; - let keys: Vec = keys_iter.filter_map(std::result::Result::ok).collect(); + let keys: Vec = keys_iter.filter_map(std::result::Result::ok).collect(); Ok(keys) } @@ -216,7 +216,7 @@ impl Database for Sqlite { } /// Refer to [`databases::Database::get_key_from_keys`](crate::core::databases::Database::get_key_from_keys). - fn get_key_from_keys(&self, key: &Key) -> Result, Error> { + fn get_key_from_keys(&self, key: &Key) -> Result, Error> { let conn = self.pool.get().map_err(|e| (e, DRIVER))?; let mut stmt = conn.prepare("SELECT key, valid_until FROM keys WHERE key = ?")?; @@ -230,11 +230,11 @@ impl Database for Sqlite { let key: String = f.get(0).unwrap(); match valid_until { - Some(valid_until) => auth::PeerKey { + Some(valid_until) => authentication::PeerKey { key: key.parse::().unwrap(), valid_until: Some(DurationSinceUnixEpoch::from_secs(valid_until.unsigned_abs())), }, - None => auth::PeerKey { + None => authentication::PeerKey { key: key.parse::().unwrap(), valid_until: None, }, @@ -243,7 +243,7 @@ impl Database for Sqlite { } /// Refer to [`databases::Database::add_key_to_keys`](crate::core::databases::Database::add_key_to_keys). - fn add_key_to_keys(&self, auth_key: &auth::PeerKey) -> Result { + fn add_key_to_keys(&self, auth_key: &authentication::PeerKey) -> Result { let conn = self.pool.get().map_err(|e| (e, DRIVER))?; let insert = match auth_key.valid_until { diff --git a/src/core/error.rs b/src/core/error.rs index f0e4b849e..434e3c825 100644 --- a/src/core/error.rs +++ b/src/core/error.rs @@ -12,7 +12,7 @@ use bittorrent_http_protocol::v1::responses; use bittorrent_primitives::info_hash::InfoHash; use torrust_tracker_located_error::LocatedError; -use super::{auth::key::ParseKeyError, databases}; +use super::{authentication::key::ParseKeyError, databases}; /// Authentication or authorization error returned by the core `Tracker` #[derive(thiserror::Error, Debug, Clone)] @@ -20,7 +20,7 @@ pub enum Error { // Authentication errors #[error("The supplied key: {key:?}, is not valid: {source}")] PeerKeyNotValid { - key: super::auth::Key, + key: super::authentication::Key, source: LocatedError<'static, dyn std::error::Error + Send + Sync>, }, diff --git a/src/core/mod.rs b/src/core/mod.rs index e0e53128d..11945a79a 100644 --- a/src/core/mod.rs +++ b/src/core/mod.rs @@ -439,7 +439,7 @@ //! - Torrent metrics //! //! Refer to [`databases`] module for more information about persistence. -pub mod auth; +pub mod authentication; pub mod databases; pub mod error; pub mod services; @@ -455,7 +455,7 @@ use std::panic::Location; use std::sync::Arc; use std::time::Duration; -use auth::PeerKey; +use authentication::PeerKey; use bittorrent_primitives::info_hash::InfoHash; use error::PeerKeyError; use torrust_tracker_clock::clock::Time; @@ -468,7 +468,7 @@ use torrust_tracker_primitives::{peer, DurationSinceUnixEpoch}; use torrust_tracker_torrent_repository::entry::EntrySync; use torrust_tracker_torrent_repository::repository::Repository; -use self::auth::Key; +use self::authentication::Key; use self::torrent::Torrents; use crate::core::databases::Database; use crate::CurrentClock; @@ -491,7 +491,7 @@ pub struct Tracker { database: Arc>, /// Tracker users' keys. Only for private trackers. - keys: tokio::sync::RwLock>, + keys: tokio::sync::RwLock>, /// The service to check is a torrent is whitelisted. pub whitelist_authorization: Arc, @@ -786,7 +786,7 @@ impl Tracker { /// Will return an error if the the authentication key cannot be verified. /// /// # Context: Authentication - pub async fn authenticate(&self, key: &Key) -> Result<(), auth::Error> { + pub async fn authenticate(&self, key: &Key) -> Result<(), authentication::Error> { if self.is_private() { self.verify_auth_key(key).await } else { @@ -805,7 +805,7 @@ impl Tracker { /// - The key duration overflows the duration type maximum value. /// - The provided pre-generated key is invalid. /// - The key could not been persisted due to database issues. - pub async fn add_peer_key(&self, add_key_req: AddKeyRequest) -> Result { + pub async fn add_peer_key(&self, add_key_req: AddKeyRequest) -> Result { // code-review: all methods related to keys should be moved to a new independent "keys" service. match add_key_req.opt_key { @@ -878,7 +878,7 @@ impl Tracker { /// # Errors /// /// Will return a `database::Error` if unable to add the `auth_key` to the database. - pub async fn generate_permanent_auth_key(&self) -> Result { + pub async fn generate_permanent_auth_key(&self) -> Result { self.generate_auth_key(None).await } @@ -896,8 +896,11 @@ impl Tracker { /// /// * `lifetime` - The duration in seconds for the new key. The key will be /// no longer valid after `lifetime` seconds. - pub async fn generate_auth_key(&self, lifetime: Option) -> Result { - let auth_key = auth::key::generate_key(lifetime); + pub async fn generate_auth_key( + &self, + lifetime: Option, + ) -> Result { + let auth_key = authentication::key::generate_key(lifetime); self.database.add_key_to_keys(&auth_key)?; self.keys.write().await.insert(auth_key.key.clone(), auth_key.clone()); @@ -918,7 +921,7 @@ impl Tracker { /// # Arguments /// /// * `key` - The pre-generated key. - pub async fn add_permanent_auth_key(&self, key: Key) -> Result { + pub async fn add_permanent_auth_key(&self, key: Key) -> Result { self.add_auth_key(key, None).await } @@ -942,7 +945,7 @@ impl Tracker { &self, key: Key, valid_until: Option, - ) -> Result { + ) -> Result { let auth_key = PeerKey { key, valid_until }; // code-review: should we return a friendly error instead of the DB @@ -973,21 +976,21 @@ impl Tracker { /// # Errors /// /// Will return a `key::Error` if unable to get any `auth_key`. - async fn verify_auth_key(&self, key: &Key) -> Result<(), auth::Error> { + async fn verify_auth_key(&self, key: &Key) -> Result<(), authentication::Error> { match self.keys.read().await.get(key) { - None => Err(auth::Error::UnableToReadKey { + None => Err(authentication::Error::UnableToReadKey { location: Location::caller(), key: Box::new(key.clone()), }), Some(key) => match self.config.private_mode { Some(private_mode) => { if private_mode.check_keys_expiration { - return auth::key::verify_key_expiration(key); + return authentication::key::verify_key_expiration(key); } Ok(()) } - None => auth::key::verify_key_expiration(key), + None => authentication::key::verify_key_expiration(key), }, } } @@ -1746,14 +1749,14 @@ mod tests { use std::str::FromStr; use std::time::Duration; - use crate::core::auth::{self}; + use crate::core::authentication::{self}; use crate::core::tests::the_tracker::private_tracker; #[tokio::test] async fn it_should_fail_authenticating_a_peer_when_it_uses_an_unregistered_key() { let tracker = private_tracker(); - let unregistered_key = auth::Key::from_str("YZSl4lMZupRuOpSRC3krIKR5BPB14nrJ").unwrap(); + let unregistered_key = authentication::Key::from_str("YZSl4lMZupRuOpSRC3krIKR5BPB14nrJ").unwrap(); let result = tracker.authenticate(&unregistered_key).await; @@ -1764,7 +1767,7 @@ mod tests { async fn it_should_fail_verifying_an_unregistered_authentication_key() { let tracker = private_tracker(); - let unregistered_key = auth::Key::from_str("YZSl4lMZupRuOpSRC3krIKR5BPB14nrJ").unwrap(); + let unregistered_key = authentication::Key::from_str("YZSl4lMZupRuOpSRC3krIKR5BPB14nrJ").unwrap(); assert!(tracker.verify_auth_key(&unregistered_key).await.is_err()); } @@ -1804,7 +1807,7 @@ mod tests { use torrust_tracker_clock::clock::Time; use torrust_tracker_configuration::v2_0_0::core::PrivateMode; - use crate::core::auth::Key; + use crate::core::authentication::Key; use crate::core::tests::the_tracker::private_tracker; use crate::CurrentClock; @@ -1856,7 +1859,7 @@ mod tests { use torrust_tracker_clock::clock::Time; use torrust_tracker_configuration::v2_0_0::core::PrivateMode; - use crate::core::auth::Key; + use crate::core::authentication::Key; use crate::core::tests::the_tracker::private_tracker; use crate::core::AddKeyRequest; use crate::CurrentClock; @@ -1944,7 +1947,7 @@ mod tests { } mod pre_generated_keys { - use crate::core::auth::Key; + use crate::core::authentication::Key; use crate::core::tests::the_tracker::private_tracker; use crate::core::AddKeyRequest; diff --git a/src/servers/apis/v1/context/auth_key/handlers.rs b/src/servers/apis/v1/context/auth_key/handlers.rs index fed3ad301..bb8a98744 100644 --- a/src/servers/apis/v1/context/auth_key/handlers.rs +++ b/src/servers/apis/v1/context/auth_key/handlers.rs @@ -12,7 +12,7 @@ use super::responses::{ auth_key_response, failed_to_delete_key_response, failed_to_generate_key_response, failed_to_reload_keys_response, invalid_auth_key_duration_response, invalid_auth_key_response, }; -use crate::core::auth::Key; +use crate::core::authentication::Key; use crate::core::{AddKeyRequest, Tracker}; use crate::servers::apis::v1::context::auth_key::resources::AuthKey; use crate::servers::apis::v1::responses::{invalid_auth_key_param_response, ok_response}; diff --git a/src/servers/apis/v1/context/auth_key/resources.rs b/src/servers/apis/v1/context/auth_key/resources.rs index c26b2c4d3..a65eb2ab2 100644 --- a/src/servers/apis/v1/context/auth_key/resources.rs +++ b/src/servers/apis/v1/context/auth_key/resources.rs @@ -3,7 +3,7 @@ use serde::{Deserialize, Serialize}; use torrust_tracker_clock::conv::convert_from_iso_8601_to_timestamp; -use crate::core::auth::{self, Key}; +use crate::core::authentication::{self, Key}; /// A resource that represents an authentication key. #[derive(Serialize, Deserialize, Debug, PartialEq, Eq)] @@ -17,9 +17,9 @@ pub struct AuthKey { pub expiry_time: Option, } -impl From for auth::PeerKey { +impl From for authentication::PeerKey { fn from(auth_key_resource: AuthKey) -> Self { - auth::PeerKey { + authentication::PeerKey { key: auth_key_resource.key.parse::().unwrap(), valid_until: auth_key_resource .expiry_time @@ -29,8 +29,8 @@ impl From for auth::PeerKey { } #[allow(deprecated)] -impl From for AuthKey { - fn from(auth_key: auth::PeerKey) -> Self { +impl From for AuthKey { + fn from(auth_key: authentication::PeerKey) -> Self { match (auth_key.valid_until, auth_key.expiry_time()) { (Some(valid_until), Some(expiry_time)) => AuthKey { key: auth_key.key.to_string(), @@ -54,7 +54,7 @@ mod tests { use torrust_tracker_clock::clock::{self, Time}; use super::AuthKey; - use crate::core::auth::{self, Key}; + use crate::core::authentication::{self, Key}; use crate::CurrentClock; struct TestTime { @@ -86,8 +86,8 @@ mod tests { }; assert_eq!( - auth::PeerKey::from(auth_key_resource), - auth::PeerKey { + authentication::PeerKey::from(auth_key_resource), + authentication::PeerKey { key: "IaWDneuFNZi8IB4MPA3qW1CD0M30EZSM".parse::().unwrap(), // cspell:disable-line valid_until: Some(CurrentClock::now_add(&Duration::new(one_hour_after_unix_epoch().timestamp, 0)).unwrap()) } @@ -99,7 +99,7 @@ mod tests { fn it_should_be_convertible_from_an_auth_key() { clock::Stopped::local_set_to_unix_epoch(); - let auth_key = auth::PeerKey { + let auth_key = authentication::PeerKey { key: "IaWDneuFNZi8IB4MPA3qW1CD0M30EZSM".parse::().unwrap(), // cspell:disable-line valid_until: Some(CurrentClock::now_add(&Duration::new(one_hour_after_unix_epoch().timestamp, 0)).unwrap()), }; diff --git a/src/servers/http/v1/extractors/authentication_key.rs b/src/servers/http/v1/extractors/authentication_key.rs index 6610b197a..d3b77c31a 100644 --- a/src/servers/http/v1/extractors/authentication_key.rs +++ b/src/servers/http/v1/extractors/authentication_key.rs @@ -53,7 +53,7 @@ use bittorrent_http_protocol::v1::responses; use hyper::StatusCode; use serde::Deserialize; -use crate::core::auth::Key; +use crate::core::authentication::Key; use crate::servers::http::v1::handlers::common::auth; /// Extractor for the [`Key`] struct. diff --git a/src/servers/http/v1/handlers/announce.rs b/src/servers/http/v1/handlers/announce.rs index 61464f1d5..fe3825a41 100644 --- a/src/servers/http/v1/handlers/announce.rs +++ b/src/servers/http/v1/handlers/announce.rs @@ -21,7 +21,7 @@ use torrust_tracker_clock::clock::Time; use torrust_tracker_primitives::core::AnnounceData; use torrust_tracker_primitives::peer; -use crate::core::auth::Key; +use crate::core::authentication::Key; use crate::core::statistics::event::sender::Sender; use crate::core::{whitelist, PeersWanted, Tracker}; use crate::servers::http::v1::extractors::announce_request::ExtractRequest; @@ -290,7 +290,7 @@ mod tests { use std::sync::Arc; use super::{private_tracker, sample_announce_request, sample_client_ip_sources}; - use crate::core::auth; + use crate::core::authentication; use crate::servers::http::v1::handlers::announce::handle_announce; use crate::servers::http::v1::handlers::announce::tests::assert_error_response; @@ -327,7 +327,7 @@ mod tests { let tracker = Arc::new(tracker); let stats_event_sender = Arc::new(stats_event_sender); - let unregistered_key = auth::Key::from_str("YZSl4lMZupRuOpSRC3krIKR5BPB14nrJ").unwrap(); + let unregistered_key = authentication::Key::from_str("YZSl4lMZupRuOpSRC3krIKR5BPB14nrJ").unwrap(); let maybe_key = Some(unregistered_key); diff --git a/src/servers/http/v1/handlers/common/auth.rs b/src/servers/http/v1/handlers/common/auth.rs index ff1d47e91..5497427d8 100644 --- a/src/servers/http/v1/handlers/common/auth.rs +++ b/src/servers/http/v1/handlers/common/auth.rs @@ -6,7 +6,7 @@ use std::panic::Location; use bittorrent_http_protocol::v1::responses; use thiserror::Error; -use crate::core::auth; +use crate::core::authentication; /// Authentication error. /// @@ -31,8 +31,8 @@ impl From for responses::error::Error { } } -impl From for responses::error::Error { - fn from(err: auth::Error) -> Self { +impl From for responses::error::Error { + fn from(err: authentication::Error) -> Self { responses::error::Error { failure_reason: format!("Authentication error: {err}"), } diff --git a/src/servers/http/v1/handlers/scrape.rs b/src/servers/http/v1/handlers/scrape.rs index 9c57eda58..58c2d012b 100644 --- a/src/servers/http/v1/handlers/scrape.rs +++ b/src/servers/http/v1/handlers/scrape.rs @@ -15,7 +15,7 @@ use bittorrent_http_protocol::v1::services::peer_ip_resolver::{self, ClientIpSou use hyper::StatusCode; use torrust_tracker_primitives::core::ScrapeData; -use crate::core::auth::Key; +use crate::core::authentication::Key; use crate::core::statistics::event::sender::Sender; use crate::core::Tracker; use crate::servers::http::v1::extractors::authentication_key::Extract as ExtractKey; @@ -205,7 +205,7 @@ mod tests { use torrust_tracker_primitives::core::ScrapeData; use super::{private_tracker, sample_client_ip_sources, sample_scrape_request}; - use crate::core::auth; + use crate::core::authentication; use crate::servers::http::v1::handlers::scrape::handle_scrape; #[tokio::test] @@ -239,7 +239,7 @@ mod tests { let stats_event_sender = Arc::new(stats_event_sender); let scrape_request = sample_scrape_request(); - let unregistered_key = auth::Key::from_str("YZSl4lMZupRuOpSRC3krIKR5BPB14nrJ").unwrap(); + let unregistered_key = authentication::Key::from_str("YZSl4lMZupRuOpSRC3krIKR5BPB14nrJ").unwrap(); let maybe_key = Some(unregistered_key); let scrape_data = handle_scrape( diff --git a/src/shared/bit_torrent/common.rs b/src/shared/bit_torrent/common.rs index 46026ac47..5ba2e0492 100644 --- a/src/shared/bit_torrent/common.rs +++ b/src/shared/bit_torrent/common.rs @@ -17,6 +17,6 @@ pub const MAX_SCRAPE_TORRENTS: u8 = 74; /// HTTP tracker authentication key length. /// -/// For more information see function [`generate_key`](crate::core::auth::generate_key) to generate the -/// [`PeerKey`](crate::core::auth::PeerKey). +/// For more information see function [`generate_key`](crate::core::authentication::generate_key) to generate the +/// [`PeerKey`](crate::core::authentication::PeerKey). pub const AUTH_KEY_LENGTH: usize = 32; diff --git a/tests/servers/api/v1/contract/context/auth_key.rs b/tests/servers/api/v1/contract/context/auth_key.rs index 9b2e740c0..40c10be5f 100644 --- a/tests/servers/api/v1/contract/context/auth_key.rs +++ b/tests/servers/api/v1/contract/context/auth_key.rs @@ -2,7 +2,7 @@ use std::time::Duration; use serde::Serialize; use torrust_tracker_api_client::v1::client::{headers_with_request_id, AddKeyForm, Client}; -use torrust_tracker_lib::core::auth::Key; +use torrust_tracker_lib::core::authentication::Key; use torrust_tracker_test_helpers::configuration; use uuid::Uuid; @@ -463,7 +463,7 @@ async fn should_not_allow_reloading_keys_for_unauthenticated_users() { mod deprecated_generate_key_endpoint { use torrust_tracker_api_client::v1::client::{headers_with_request_id, Client}; - use torrust_tracker_lib::core::auth::Key; + use torrust_tracker_lib::core::authentication::Key; use torrust_tracker_test_helpers::configuration; use uuid::Uuid; diff --git a/tests/servers/http/client.rs b/tests/servers/http/client.rs index b64a616cd..9fc278536 100644 --- a/tests/servers/http/client.rs +++ b/tests/servers/http/client.rs @@ -1,7 +1,7 @@ use std::net::IpAddr; use reqwest::{Client as ReqwestClient, Response}; -use torrust_tracker_lib::core::auth::Key; +use torrust_tracker_lib::core::authentication::Key; use super::requests::announce::{self, Query}; use super::requests::scrape; diff --git a/tests/servers/http/connection_info.rs b/tests/servers/http/connection_info.rs index 123ac05f0..327bc0073 100644 --- a/tests/servers/http/connection_info.rs +++ b/tests/servers/http/connection_info.rs @@ -1,4 +1,4 @@ -use torrust_tracker_lib::core::auth::Key; +use torrust_tracker_lib::core::authentication::Key; #[derive(Clone, Debug)] pub struct ConnectionInfo { diff --git a/tests/servers/http/v1/contract.rs b/tests/servers/http/v1/contract.rs index 2cec1790f..961caf017 100644 --- a/tests/servers/http/v1/contract.rs +++ b/tests/servers/http/v1/contract.rs @@ -1381,7 +1381,7 @@ mod configured_as_private { use std::time::Duration; use bittorrent_primitives::info_hash::InfoHash; - use torrust_tracker_lib::core::auth::Key; + use torrust_tracker_lib::core::authentication::Key; use torrust_tracker_test_helpers::configuration; use crate::common::logging; @@ -1467,7 +1467,7 @@ mod configured_as_private { use aquatic_udp_protocol::PeerId; use bittorrent_primitives::info_hash::InfoHash; - use torrust_tracker_lib::core::auth::Key; + use torrust_tracker_lib::core::authentication::Key; use torrust_tracker_primitives::peer::fixture::PeerBuilder; use torrust_tracker_test_helpers::configuration; From e75728a807303bd525faacdb6436930a993a45ba Mon Sep 17 00:00:00 2001 From: Jose Celano Date: Mon, 20 Jan 2025 16:48:29 +0000 Subject: [PATCH 114/802] refactor: [#1191] extract core::authentication::Facade type It's a temporary type to extract responsability from the tracker. --- src/core/authentication/mod.rs | 292 +++++++++++++++++++++++++++++++++ 1 file changed, 292 insertions(+) diff --git a/src/core/authentication/mod.rs b/src/core/authentication/mod.rs index d0f72340d..358678def 100644 --- a/src/core/authentication/mod.rs +++ b/src/core/authentication/mod.rs @@ -1,5 +1,297 @@ +use std::panic::Location; +use std::sync::Arc; +use std::time::Duration; + +use torrust_tracker_clock::clock::Time; +use torrust_tracker_configuration::Core; +use torrust_tracker_located_error::Located; +use torrust_tracker_primitives::DurationSinceUnixEpoch; + +use super::databases::{self, Database}; +use super::error::PeerKeyError; +use crate::CurrentClock; + pub mod key; pub type PeerKey = key::PeerKey; pub type Key = key::Key; pub type Error = key::Error; + +/// This type contains the info needed to add a new tracker key. +/// +/// You can upload a pre-generated key or let the app to generate a new one. +/// You can also set an expiration date or leave it empty (`None`) if you want +/// to create a permanent key that does not expire. +#[derive(Debug)] +pub struct AddKeyRequest { + /// The pre-generated key. Use `None` to generate a random key. + pub opt_key: Option, + + /// How long the key will be valid in seconds. Use `None` for permanent keys. + pub opt_seconds_valid: Option, +} + +pub struct Facade { + /// The tracker configuration. + config: Core, + + /// A database driver implementation: [`Sqlite3`](crate::core::databases::sqlite) + /// or [`MySQL`](crate::core::databases::mysql) + database: Arc>, + + /// Tracker users' keys. Only for private trackers. + keys: tokio::sync::RwLock>, +} + +impl Facade { + #[must_use] + pub fn new(config: &Core, database: &Arc>) -> Self { + Self { + config: config.clone(), + database: database.clone(), + keys: tokio::sync::RwLock::new(std::collections::HashMap::new()), + } + } + + /// It authenticates the peer `key` against the `Tracker` authentication + /// key list. + /// + /// # Errors + /// + /// Will return an error if the the authentication key cannot be verified. + /// + /// # Context: Authentication + pub async fn authenticate(&self, key: &Key) -> Result<(), Error> { + if self.is_private() { + self.verify_auth_key(key).await + } else { + Ok(()) + } + } + + /// Returns `true` is the tracker is in private mode. + pub fn is_private(&self) -> bool { + self.config.private + } + + /// It verifies an authentication key. + /// + /// # Context: Authentication + /// + /// # Errors + /// + /// Will return a `key::Error` if unable to get any `auth_key`. + async fn verify_auth_key(&self, key: &Key) -> Result<(), Error> { + match self.keys.read().await.get(key) { + None => Err(Error::UnableToReadKey { + location: Location::caller(), + key: Box::new(key.clone()), + }), + Some(key) => match self.config.private_mode { + Some(private_mode) => { + if private_mode.check_keys_expiration { + return key::verify_key_expiration(key); + } + + Ok(()) + } + None => key::verify_key_expiration(key), + }, + } + } + + /// Adds new peer keys to the tracker. + /// + /// Keys can be pre-generated or randomly created. They can also be permanent or expire. + /// + /// # Errors + /// + /// Will return an error if: + /// + /// - The key duration overflows the duration type maximum value. + /// - The provided pre-generated key is invalid. + /// - The key could not been persisted due to database issues. + pub async fn add_peer_key(&self, add_key_req: AddKeyRequest) -> Result { + // code-review: all methods related to keys should be moved to a new independent "keys" service. + + match add_key_req.opt_key { + // Upload pre-generated key + Some(pre_existing_key) => { + if let Some(seconds_valid) = add_key_req.opt_seconds_valid { + // Expiring key + let Some(valid_until) = CurrentClock::now_add(&Duration::from_secs(seconds_valid)) else { + return Err(PeerKeyError::DurationOverflow { seconds_valid }); + }; + + let key = pre_existing_key.parse::(); + + match key { + Ok(key) => match self.add_auth_key(key, Some(valid_until)).await { + Ok(auth_key) => Ok(auth_key), + Err(err) => Err(PeerKeyError::DatabaseError { + source: Located(err).into(), + }), + }, + Err(err) => Err(PeerKeyError::InvalidKey { + key: pre_existing_key, + source: Located(err).into(), + }), + } + } else { + // Permanent key + let key = pre_existing_key.parse::(); + + match key { + Ok(key) => match self.add_permanent_auth_key(key).await { + Ok(auth_key) => Ok(auth_key), + Err(err) => Err(PeerKeyError::DatabaseError { + source: Located(err).into(), + }), + }, + Err(err) => Err(PeerKeyError::InvalidKey { + key: pre_existing_key, + source: Located(err).into(), + }), + } + } + } + // Generate a new random key + None => match add_key_req.opt_seconds_valid { + // Expiring key + Some(seconds_valid) => match self.generate_auth_key(Some(Duration::from_secs(seconds_valid))).await { + Ok(auth_key) => Ok(auth_key), + Err(err) => Err(PeerKeyError::DatabaseError { + source: Located(err).into(), + }), + }, + // Permanent key + None => match self.generate_permanent_auth_key().await { + Ok(auth_key) => Ok(auth_key), + Err(err) => Err(PeerKeyError::DatabaseError { + source: Located(err).into(), + }), + }, + }, + } + } + + /// It generates a new permanent authentication key. + /// + /// Authentication keys are used by HTTP trackers. + /// + /// # Context: Authentication + /// + /// # Errors + /// + /// Will return a `database::Error` if unable to add the `auth_key` to the database. + pub async fn generate_permanent_auth_key(&self) -> Result { + self.generate_auth_key(None).await + } + + /// It generates a new expiring authentication key. + /// + /// Authentication keys are used by HTTP trackers. + /// + /// # Context: Authentication + /// + /// # Errors + /// + /// Will return a `database::Error` if unable to add the `auth_key` to the database. + /// + /// # Arguments + /// + /// * `lifetime` - The duration in seconds for the new key. The key will be + /// no longer valid after `lifetime` seconds. + pub async fn generate_auth_key(&self, lifetime: Option) -> Result { + let auth_key = key::generate_key(lifetime); + + self.database.add_key_to_keys(&auth_key)?; + self.keys.write().await.insert(auth_key.key.clone(), auth_key.clone()); + Ok(auth_key) + } + + /// It adds a pre-generated permanent authentication key. + /// + /// Authentication keys are used by HTTP trackers. + /// + /// # Context: Authentication + /// + /// # Errors + /// + /// Will return a `database::Error` if unable to add the `auth_key` to the + /// database. For example, if the key already exist. + /// + /// # Arguments + /// + /// * `key` - The pre-generated key. + pub async fn add_permanent_auth_key(&self, key: Key) -> Result { + self.add_auth_key(key, None).await + } + + /// It adds a pre-generated authentication key. + /// + /// Authentication keys are used by HTTP trackers. + /// + /// # Context: Authentication + /// + /// # Errors + /// + /// Will return a `database::Error` if unable to add the `auth_key` to the + /// database. For example, if the key already exist. + /// + /// # Arguments + /// + /// * `key` - The pre-generated key. + /// * `lifetime` - The duration in seconds for the new key. The key will be + /// no longer valid after `lifetime` seconds. + pub async fn add_auth_key( + &self, + key: Key, + valid_until: Option, + ) -> Result { + let auth_key = PeerKey { key, valid_until }; + + // code-review: should we return a friendly error instead of the DB + // constrain error when the key already exist? For now, it's returning + // the specif error for each DB driver when a UNIQUE constrain fails. + self.database.add_key_to_keys(&auth_key)?; + self.keys.write().await.insert(auth_key.key.clone(), auth_key.clone()); + Ok(auth_key) + } + + /// It removes an authentication key. + /// + /// # Context: Authentication + /// + /// # Errors + /// + /// Will return a `database::Error` if unable to remove the `key` to the database. + pub async fn remove_auth_key(&self, key: &Key) -> Result<(), databases::error::Error> { + self.database.remove_key_from_keys(key)?; + self.keys.write().await.remove(key); + Ok(()) + } + + /// The `Tracker` stores the authentication keys in memory and in the database. + /// In case you need to restart the `Tracker` you can load the keys from the database + /// into memory with this function. Keys are automatically stored in the database when they + /// are generated. + /// + /// # Context: Authentication + /// + /// # Errors + /// + /// Will return a `database::Error` if unable to `load_keys` from the database. + pub async fn load_keys_from_database(&self) -> Result<(), databases::error::Error> { + let keys_from_database = self.database.load_keys()?; + let mut keys = self.keys.write().await; + + keys.clear(); + + for key in keys_from_database { + keys.insert(key.key.clone(), key); + } + + Ok(()) + } +} From 986a2f661a0012a8f1038910f19f271da31abda8 Mon Sep 17 00:00:00 2001 From: Jose Celano Date: Mon, 20 Jan 2025 17:01:56 +0000 Subject: [PATCH 115/802] fix: [#1191] format --- src/core/error.rs | 3 ++- 1 file changed, 2 insertions(+), 1 deletion(-) diff --git a/src/core/error.rs b/src/core/error.rs index 434e3c825..1d0e974e5 100644 --- a/src/core/error.rs +++ b/src/core/error.rs @@ -12,7 +12,8 @@ use bittorrent_http_protocol::v1::responses; use bittorrent_primitives::info_hash::InfoHash; use torrust_tracker_located_error::LocatedError; -use super::{authentication::key::ParseKeyError, databases}; +use super::authentication::key::ParseKeyError; +use super::databases; /// Authentication or authorization error returned by the core `Tracker` #[derive(thiserror::Error, Debug, Clone)] From 39c2a8fb80902bd4a013c79f400fa5f75c790c79 Mon Sep 17 00:00:00 2001 From: Jose Celano Date: Mon, 20 Jan 2025 17:41:36 +0000 Subject: [PATCH 116/802] refactor: [#1191] replace authentication methods with extracted service in the core tracker --- src/app_test.rs | 6 +- src/bootstrap/app.rs | 11 +- src/bootstrap/jobs/http_tracker.rs | 6 +- src/bootstrap/jobs/tracker_apis.rs | 5 +- src/container.rs | 3 +- src/core/authentication/mod.rs | 11 +- src/core/mod.rs | 249 ++++++------------ src/core/services/mod.rs | 5 +- src/core/services/statistics/mod.rs | 10 +- src/core/services/torrent.rs | 88 +++++-- src/servers/apis/server.rs | 6 +- .../apis/v1/context/auth_key/handlers.rs | 4 +- src/servers/http/server.rs | 6 +- src/servers/http/v1/handlers/announce.rs | 10 +- src/servers/http/v1/handlers/scrape.rs | 16 +- src/servers/http/v1/services/announce.rs | 9 +- src/servers/http/v1/services/scrape.rs | 8 +- src/servers/udp/handlers.rs | 25 +- src/servers/udp/server/mod.rs | 9 +- src/shared/bit_torrent/common.rs | 2 +- tests/servers/api/environment.rs | 2 +- tests/servers/http/environment.rs | 2 +- tests/servers/udp/environment.rs | 2 +- 23 files changed, 243 insertions(+), 252 deletions(-) diff --git a/src/app_test.rs b/src/app_test.rs index ffd55581e..884aed6ef 100644 --- a/src/app_test.rs +++ b/src/app_test.rs @@ -5,8 +5,8 @@ use torrust_tracker_configuration::Configuration; use crate::core::databases::Database; use crate::core::services::initialize_database; -use crate::core::whitelist; use crate::core::whitelist::repository::in_memory::InMemoryWhitelist; +use crate::core::{authentication, whitelist}; /// Initialize the tracker dependencies. #[allow(clippy::type_complexity)] @@ -17,6 +17,7 @@ pub fn initialize_tracker_dependencies( Arc>, Arc, Arc, + Arc, ) { let database = initialize_database(config); let in_memory_whitelist = Arc::new(InMemoryWhitelist::default()); @@ -24,6 +25,7 @@ pub fn initialize_tracker_dependencies( &config.core, &in_memory_whitelist.clone(), )); + let authentication = Arc::new(authentication::Facade::new(&config.core, &database.clone())); - (database, in_memory_whitelist, whitelist_authorization) + (database, in_memory_whitelist, whitelist_authorization, authentication) } diff --git a/src/bootstrap/app.rs b/src/bootstrap/app.rs index 5dbdd15cb..bc6b7a6bd 100644 --- a/src/bootstrap/app.rs +++ b/src/bootstrap/app.rs @@ -23,8 +23,8 @@ use super::config::initialize_configuration; use crate::bootstrap; use crate::container::AppContainer; use crate::core::services::{initialize_database, initialize_tracker, initialize_whitelist_manager, statistics}; -use crate::core::whitelist; use crate::core::whitelist::repository::in_memory::InMemoryWhitelist; +use crate::core::{authentication, whitelist}; use crate::servers::udp::server::banning::BanService; use crate::servers::udp::server::launcher::MAX_CONNECTION_ID_ERRORS_PER_IP; use crate::shared::crypto::ephemeral_instance_keys; @@ -89,8 +89,14 @@ pub fn initialize_app_container(configuration: &Configuration) -> AppContainer { &in_memory_whitelist.clone(), )); let whitelist_manager = initialize_whitelist_manager(database.clone(), in_memory_whitelist.clone()); + let authentication = Arc::new(authentication::Facade::new(&configuration.core, &database.clone())); - let tracker = Arc::new(initialize_tracker(configuration, &database, &whitelist_authorization)); + let tracker = Arc::new(initialize_tracker( + configuration, + &database, + &whitelist_authorization, + &authentication, + )); AppContainer { tracker, @@ -99,6 +105,7 @@ pub fn initialize_app_container(configuration: &Configuration) -> AppContainer { stats_event_sender, stats_repository, whitelist_manager, + authentication, } } diff --git a/src/bootstrap/jobs/http_tracker.rs b/src/bootstrap/jobs/http_tracker.rs index dea866648..b07ff935c 100644 --- a/src/bootstrap/jobs/http_tracker.rs +++ b/src/bootstrap/jobs/http_tracker.rs @@ -101,8 +101,8 @@ mod tests { use crate::bootstrap::app::initialize_global_services; use crate::bootstrap::jobs::http_tracker::start_job; use crate::core::services::{initialize_database, initialize_tracker, statistics}; - use crate::core::whitelist; use crate::core::whitelist::repository::in_memory::InMemoryWhitelist; + use crate::core::{authentication, whitelist}; use crate::servers::http::Version; use crate::servers::registar::Registar; @@ -123,7 +123,9 @@ mod tests { &cfg.core, &in_memory_whitelist.clone(), )); - let tracker = Arc::new(initialize_tracker(&cfg, &database, &whitelist_authorization)); + let authentication = Arc::new(authentication::Facade::new(&cfg.core, &database.clone())); + + let tracker = Arc::new(initialize_tracker(&cfg, &database, &whitelist_authorization, &authentication)); let version = Version::V1; diff --git a/src/bootstrap/jobs/tracker_apis.rs b/src/bootstrap/jobs/tracker_apis.rs index 7e06829c4..70e2e6737 100644 --- a/src/bootstrap/jobs/tracker_apis.rs +++ b/src/bootstrap/jobs/tracker_apis.rs @@ -150,8 +150,8 @@ mod tests { use crate::bootstrap::app::initialize_global_services; use crate::bootstrap::jobs::tracker_apis::start_job; use crate::core::services::{initialize_database, initialize_tracker, initialize_whitelist_manager, statistics}; - use crate::core::whitelist; use crate::core::whitelist::repository::in_memory::InMemoryWhitelist; + use crate::core::{authentication, whitelist}; use crate::servers::apis::Version; use crate::servers::registar::Registar; use crate::servers::udp::server::banning::BanService; @@ -176,8 +176,9 @@ mod tests { &in_memory_whitelist.clone(), )); let whitelist_manager = initialize_whitelist_manager(database.clone(), in_memory_whitelist.clone()); + let authentication = Arc::new(authentication::Facade::new(&cfg.core, &database.clone())); - let tracker = Arc::new(initialize_tracker(&cfg, &database, &whitelist_authorization)); + let tracker = Arc::new(initialize_tracker(&cfg, &database, &whitelist_authorization, &authentication)); let version = Version::V1; diff --git a/src/container.rs b/src/container.rs index fd75601ae..3c9229b89 100644 --- a/src/container.rs +++ b/src/container.rs @@ -5,7 +5,7 @@ use tokio::sync::RwLock; use crate::core::statistics::event::sender::Sender; use crate::core::statistics::repository::Repository; use crate::core::whitelist::manager::WhiteListManager; -use crate::core::{whitelist, Tracker}; +use crate::core::{authentication, whitelist, Tracker}; use crate::servers::udp::server::banning::BanService; pub struct AppContainer { @@ -15,4 +15,5 @@ pub struct AppContainer { pub stats_event_sender: Arc>>, pub stats_repository: Arc, pub whitelist_manager: Arc, + pub authentication: Arc, } diff --git a/src/core/authentication/mod.rs b/src/core/authentication/mod.rs index 358678def..d03502988 100644 --- a/src/core/authentication/mod.rs +++ b/src/core/authentication/mod.rs @@ -81,7 +81,7 @@ impl Facade { /// # Errors /// /// Will return a `key::Error` if unable to get any `auth_key`. - async fn verify_auth_key(&self, key: &Key) -> Result<(), Error> { + pub async fn verify_auth_key(&self, key: &Key) -> Result<(), Error> { match self.keys.read().await.get(key) { None => Err(Error::UnableToReadKey { location: Location::caller(), @@ -268,10 +268,17 @@ impl Facade { /// Will return a `database::Error` if unable to remove the `key` to the database. pub async fn remove_auth_key(&self, key: &Key) -> Result<(), databases::error::Error> { self.database.remove_key_from_keys(key)?; - self.keys.write().await.remove(key); + self.remove_in_memory_auth_key(key).await; Ok(()) } + /// It removes an authentication key from memory. + /// + /// # Context: Authentication + pub async fn remove_in_memory_auth_key(&self, key: &Key) { + self.keys.write().await.remove(key); + } + /// The `Tracker` stores the authentication keys in memory and in the database. /// In case you need to restart the `Tracker` you can load the keys from the database /// into memory with this function. Keys are automatically stored in the database when they diff --git a/src/core/mod.rs b/src/core/mod.rs index 11945a79a..4b8d4c7f2 100644 --- a/src/core/mod.rs +++ b/src/core/mod.rs @@ -371,7 +371,7 @@ //! //! To learn more about tracker authentication, refer to the following modules : //! -//! - [`auth`] module. +//! - [`authentication`] module. //! - [`core`](crate::core) module. //! - [`http`](crate::servers::http) module. //! @@ -451,16 +451,14 @@ pub mod peer_tests; use std::cmp::max; use std::net::IpAddr; -use std::panic::Location; use std::sync::Arc; use std::time::Duration; -use authentication::PeerKey; +use authentication::AddKeyRequest; use bittorrent_primitives::info_hash::InfoHash; use error::PeerKeyError; use torrust_tracker_clock::clock::Time; use torrust_tracker_configuration::{AnnouncePolicy, Core, TORRENT_PEERS_LIMIT}; -use torrust_tracker_located_error::Located; use torrust_tracker_primitives::core::{AnnounceData, ScrapeData}; use torrust_tracker_primitives::swarm_metadata::SwarmMetadata; use torrust_tracker_primitives::torrent_metrics::TorrentsMetrics; @@ -490,14 +488,14 @@ pub struct Tracker { /// or [`MySQL`](crate::core::databases::mysql) database: Arc>, - /// Tracker users' keys. Only for private trackers. - keys: tokio::sync::RwLock>, - /// The service to check is a torrent is whitelisted. pub whitelist_authorization: Arc, /// The in-memory torrents repository. torrents: Arc, + + /// The service to authenticate peers. + authentication: Arc, } /// How many peers the peer announcing wants in the announce response. @@ -542,20 +540,6 @@ impl From for PeersWanted { } } -/// This type contains the info needed to add a new tracker key. -/// -/// You can upload a pre-generated key or let the app to generate a new one. -/// You can also set an expiration date or leave it empty (`None`) if you want -/// to create a permanent key that does not expire. -#[derive(Debug)] -pub struct AddKeyRequest { - /// The pre-generated key. Use `None` to generate a random key. - pub opt_key: Option, - - /// How long the key will be valid in seconds. Use `None` for permanent keys. - pub opt_seconds_valid: Option, -} - impl Tracker { /// `Tracker` constructor. /// @@ -566,45 +550,53 @@ impl Tracker { config: &Core, database: &Arc>, whitelist_authorization: &Arc, + authentication: &Arc, ) -> Result { Ok(Tracker { config: config.clone(), database: database.clone(), - keys: tokio::sync::RwLock::new(std::collections::HashMap::new()), whitelist_authorization: whitelist_authorization.clone(), torrents: Arc::default(), + authentication: authentication.clone(), }) } /// Returns `true` is the tracker is in public mode. + #[must_use] pub fn is_public(&self) -> bool { !self.config.private } /// Returns `true` is the tracker is in private mode. + #[must_use] pub fn is_private(&self) -> bool { self.config.private } /// Returns `true` is the tracker is in whitelisted mode. + #[must_use] pub fn is_listed(&self) -> bool { self.config.listed } /// Returns `true` if the tracker requires authentication. + #[must_use] pub fn requires_authentication(&self) -> bool { self.is_private() } /// Returns `true` is the tracker is in whitelisted mode. + #[must_use] pub fn is_behind_reverse_proxy(&self) -> bool { self.config.net.on_reverse_proxy } + #[must_use] pub fn get_announce_policy(&self) -> AnnouncePolicy { self.config.announce_policy } + #[must_use] pub fn get_maybe_external_ip(&self) -> Option { self.config.net.external_ip } @@ -709,6 +701,7 @@ impl Tracker { /// # Context: Tracker /// /// Get torrent peers for a given torrent. + #[must_use] pub fn get_torrent_peers(&self, info_hash: &InfoHash) -> Vec> { match self.torrents.get(info_hash) { None => vec![], @@ -721,6 +714,7 @@ impl Tracker { /// needed for a `announce` request response. /// /// # Context: Tracker + #[must_use] pub fn upsert_peer_and_get_stats(&self, info_hash: &InfoHash, peer: &peer::Peer) -> SwarmMetadata { let swarm_metadata_before = match self.torrents.get_swarm_metadata(info_hash) { Some(swarm_metadata) => swarm_metadata, @@ -760,6 +754,7 @@ impl Tracker { /// /// # Panics /// Panics if unable to get the torrent metrics. + #[must_use] pub fn get_torrents_metrics(&self) -> TorrentsMetrics { self.torrents.get_metrics() } @@ -781,23 +776,21 @@ impl Tracker { /// It authenticates the peer `key` against the `Tracker` authentication /// key list. /// + /// # Context: Authentication + /// /// # Errors /// /// Will return an error if the the authentication key cannot be verified. - /// - /// # Context: Authentication pub async fn authenticate(&self, key: &Key) -> Result<(), authentication::Error> { - if self.is_private() { - self.verify_auth_key(key).await - } else { - Ok(()) - } + self.authentication.authenticate(key).await } /// Adds new peer keys to the tracker. /// /// Keys can be pre-generated or randomly created. They can also be permanent or expire. /// + /// # Context: Authentication + /// /// # Errors /// /// Will return an error if: @@ -806,67 +799,7 @@ impl Tracker { /// - The provided pre-generated key is invalid. /// - The key could not been persisted due to database issues. pub async fn add_peer_key(&self, add_key_req: AddKeyRequest) -> Result { - // code-review: all methods related to keys should be moved to a new independent "keys" service. - - match add_key_req.opt_key { - // Upload pre-generated key - Some(pre_existing_key) => { - if let Some(seconds_valid) = add_key_req.opt_seconds_valid { - // Expiring key - let Some(valid_until) = CurrentClock::now_add(&Duration::from_secs(seconds_valid)) else { - return Err(PeerKeyError::DurationOverflow { seconds_valid }); - }; - - let key = pre_existing_key.parse::(); - - match key { - Ok(key) => match self.add_auth_key(key, Some(valid_until)).await { - Ok(auth_key) => Ok(auth_key), - Err(err) => Err(PeerKeyError::DatabaseError { - source: Located(err).into(), - }), - }, - Err(err) => Err(PeerKeyError::InvalidKey { - key: pre_existing_key, - source: Located(err).into(), - }), - } - } else { - // Permanent key - let key = pre_existing_key.parse::(); - - match key { - Ok(key) => match self.add_permanent_auth_key(key).await { - Ok(auth_key) => Ok(auth_key), - Err(err) => Err(PeerKeyError::DatabaseError { - source: Located(err).into(), - }), - }, - Err(err) => Err(PeerKeyError::InvalidKey { - key: pre_existing_key, - source: Located(err).into(), - }), - } - } - } - // Generate a new random key - None => match add_key_req.opt_seconds_valid { - // Expiring key - Some(seconds_valid) => match self.generate_auth_key(Some(Duration::from_secs(seconds_valid))).await { - Ok(auth_key) => Ok(auth_key), - Err(err) => Err(PeerKeyError::DatabaseError { - source: Located(err).into(), - }), - }, - // Permanent key - None => match self.generate_permanent_auth_key().await { - Ok(auth_key) => Ok(auth_key), - Err(err) => Err(PeerKeyError::DatabaseError { - source: Located(err).into(), - }), - }, - }, - } + self.authentication.add_peer_key(add_key_req).await } /// It generates a new permanent authentication key. @@ -879,7 +812,7 @@ impl Tracker { /// /// Will return a `database::Error` if unable to add the `auth_key` to the database. pub async fn generate_permanent_auth_key(&self) -> Result { - self.generate_auth_key(None).await + self.authentication.generate_auth_key(None).await } /// It generates a new expiring authentication key. @@ -900,11 +833,7 @@ impl Tracker { &self, lifetime: Option, ) -> Result { - let auth_key = authentication::key::generate_key(lifetime); - - self.database.add_key_to_keys(&auth_key)?; - self.keys.write().await.insert(auth_key.key.clone(), auth_key.clone()); - Ok(auth_key) + self.authentication.generate_auth_key(lifetime).await } /// It adds a pre-generated permanent authentication key. @@ -922,7 +851,7 @@ impl Tracker { /// /// * `key` - The pre-generated key. pub async fn add_permanent_auth_key(&self, key: Key) -> Result { - self.add_auth_key(key, None).await + self.authentication.add_auth_key(key, None).await } /// It adds a pre-generated authentication key. @@ -946,14 +875,7 @@ impl Tracker { key: Key, valid_until: Option, ) -> Result { - let auth_key = PeerKey { key, valid_until }; - - // code-review: should we return a friendly error instead of the DB - // constrain error when the key already exist? For now, it's returning - // the specif error for each DB driver when a UNIQUE constrain fails. - self.database.add_key_to_keys(&auth_key)?; - self.keys.write().await.insert(auth_key.key.clone(), auth_key.clone()); - Ok(auth_key) + self.authentication.add_auth_key(key, valid_until).await } /// It removes an authentication key. @@ -964,35 +886,7 @@ impl Tracker { /// /// Will return a `database::Error` if unable to remove the `key` to the database. pub async fn remove_auth_key(&self, key: &Key) -> Result<(), databases::error::Error> { - self.database.remove_key_from_keys(key)?; - self.keys.write().await.remove(key); - Ok(()) - } - - /// It verifies an authentication key. - /// - /// # Context: Authentication - /// - /// # Errors - /// - /// Will return a `key::Error` if unable to get any `auth_key`. - async fn verify_auth_key(&self, key: &Key) -> Result<(), authentication::Error> { - match self.keys.read().await.get(key) { - None => Err(authentication::Error::UnableToReadKey { - location: Location::caller(), - key: Box::new(key.clone()), - }), - Some(key) => match self.config.private_mode { - Some(private_mode) => { - if private_mode.check_keys_expiration { - return authentication::key::verify_key_expiration(key); - } - - Ok(()) - } - None => authentication::key::verify_key_expiration(key), - }, - } + self.authentication.remove_auth_key(key).await } /// The `Tracker` stores the authentication keys in memory and in the database. @@ -1006,16 +900,7 @@ impl Tracker { /// /// Will return a `database::Error` if unable to `load_keys` from the database. pub async fn load_keys_from_database(&self) -> Result<(), databases::error::Error> { - let keys_from_database = self.database.load_keys()?; - let mut keys = self.keys.write().await; - - keys.clear(); - - for key in keys_from_database { - keys.insert(key.key.clone(), key); - } - - Ok(()) + self.authentication.load_keys_from_database().await } /// It drops the database tables. @@ -1051,6 +936,7 @@ mod tests { use aquatic_udp_protocol::{AnnounceEvent, NumberOfBytes, PeerId}; use bittorrent_primitives::info_hash::fixture::gen_seeded_infohash; use bittorrent_primitives::info_hash::InfoHash; + use torrust_tracker_configuration::v2_0_0::core::PrivateMode; use torrust_tracker_configuration::TORRENT_PEERS_LIMIT; use torrust_tracker_primitives::DurationSinceUnixEpoch; use torrust_tracker_test_helpers::configuration; @@ -1063,36 +949,56 @@ mod tests { fn public_tracker() -> Tracker { let config = configuration::ephemeral_public(); - let (database, _in_memory_whitelist, whitelist_authorization) = initialize_tracker_dependencies(&config); - initialize_tracker(&config, &database, &whitelist_authorization) + let (database, _in_memory_whitelist, whitelist_authorization, authentication) = + initialize_tracker_dependencies(&config); + + initialize_tracker(&config, &database, &whitelist_authorization, &authentication) } fn private_tracker() -> Tracker { let config = configuration::ephemeral_private(); - let (database, _in_memory_whitelist, whitelist_authorization) = initialize_tracker_dependencies(&config); - initialize_tracker(&config, &database, &whitelist_authorization) + let (database, _in_memory_whitelist, whitelist_authorization, authentication) = + initialize_tracker_dependencies(&config); + + initialize_tracker(&config, &database, &whitelist_authorization, &authentication) } fn whitelisted_tracker() -> (Tracker, Arc, Arc) { let config = configuration::ephemeral_listed(); - let (database, in_memory_whitelist, whitelist_authorization) = initialize_tracker_dependencies(&config); + let (database, in_memory_whitelist, whitelist_authorization, authentication) = + initialize_tracker_dependencies(&config); let whitelist_manager = initialize_whitelist_manager(database.clone(), in_memory_whitelist.clone()); - let tracker = initialize_tracker(&config, &database, &whitelist_authorization); + let tracker = initialize_tracker(&config, &database, &whitelist_authorization, &authentication); (tracker, whitelist_authorization, whitelist_manager) } + fn private_tracker_without_checking_keys_expiration() -> Tracker { + let mut config = configuration::ephemeral_private(); + + config.core.private_mode = Some(PrivateMode { + check_keys_expiration: false, + }); + + let (database, _in_memory_whitelist, whitelist_authorization, authentication) = + initialize_tracker_dependencies(&config); + + initialize_tracker(&config, &database, &whitelist_authorization, &authentication) + } + pub fn tracker_persisting_torrents_in_database() -> Tracker { let mut config = configuration::ephemeral_listed(); config.core.tracker_policy.persistent_torrent_completed_stat = true; - let (database, _in_memory_whitelist, whitelist_authorization) = initialize_tracker_dependencies(&config); - initialize_tracker(&config, &database, &whitelist_authorization) + let (database, _in_memory_whitelist, whitelist_authorization, authentication) = + initialize_tracker_dependencies(&config); + + initialize_tracker(&config, &database, &whitelist_authorization, &authentication) } fn sample_info_hash() -> InfoHash { @@ -1203,7 +1109,7 @@ mod tests { let info_hash = sample_info_hash(); let peer = sample_peer(); - tracker.upsert_peer_and_get_stats(&info_hash, &peer); + let _ = tracker.upsert_peer_and_get_stats(&info_hash, &peer); let peers = tracker.get_torrent_peers(&info_hash); @@ -1245,7 +1151,7 @@ mod tests { event: AnnounceEvent::Completed, }; - tracker.upsert_peer_and_get_stats(&info_hash, &peer); + let _ = tracker.upsert_peer_and_get_stats(&info_hash, &peer); } let peers = tracker.get_torrent_peers(&info_hash); @@ -1260,7 +1166,7 @@ mod tests { let info_hash = sample_info_hash(); let peer = sample_peer(); - tracker.upsert_peer_and_get_stats(&info_hash, &peer); + let _ = tracker.upsert_peer_and_get_stats(&info_hash, &peer); let peers = tracker.get_peers_for(&info_hash, &peer, TORRENT_PEERS_LIMIT); @@ -1275,7 +1181,7 @@ mod tests { let excluded_peer = sample_peer(); - tracker.upsert_peer_and_get_stats(&info_hash, &excluded_peer); + let _ = tracker.upsert_peer_and_get_stats(&info_hash, &excluded_peer); // Add 74 peers for idx in 2..=75 { @@ -1289,7 +1195,7 @@ mod tests { event: AnnounceEvent::Completed, }; - tracker.upsert_peer_and_get_stats(&info_hash, &peer); + let _ = tracker.upsert_peer_and_get_stats(&info_hash, &peer); } let peers = tracker.get_peers_for(&info_hash, &excluded_peer, TORRENT_PEERS_LIMIT); @@ -1301,7 +1207,7 @@ mod tests { async fn it_should_return_the_torrent_metrics() { let tracker = public_tracker(); - tracker.upsert_peer_and_get_stats(&sample_info_hash(), &leecher()); + let _ = tracker.upsert_peer_and_get_stats(&sample_info_hash(), &leecher()); let torrent_metrics = tracker.get_torrents_metrics(); @@ -1322,7 +1228,7 @@ mod tests { let start_time = std::time::Instant::now(); for i in 0..1_000_000 { - tracker.upsert_peer_and_get_stats(&gen_seeded_infohash(&i), &leecher()); + let _ = tracker.upsert_peer_and_get_stats(&gen_seeded_infohash(&i), &leecher()); } let result_a = start_time.elapsed(); @@ -1769,7 +1675,7 @@ mod tests { let unregistered_key = authentication::Key::from_str("YZSl4lMZupRuOpSRC3krIKR5BPB14nrJ").unwrap(); - assert!(tracker.verify_auth_key(&unregistered_key).await.is_err()); + assert!(tracker.authentication.verify_auth_key(&unregistered_key).await.is_err()); } #[tokio::test] @@ -1781,7 +1687,7 @@ mod tests { let result = tracker.remove_auth_key(&expiring_key.key()).await; assert!(result.is_ok()); - assert!(tracker.verify_auth_key(&expiring_key.key()).await.is_err()); + assert!(tracker.authentication.verify_auth_key(&expiring_key.key()).await.is_err()); } #[tokio::test] @@ -1791,12 +1697,12 @@ mod tests { let expiring_key = tracker.generate_auth_key(Some(Duration::from_secs(100))).await.unwrap(); // Remove the newly generated key in memory - tracker.keys.write().await.remove(&expiring_key.key()); + tracker.authentication.remove_in_memory_auth_key(&expiring_key.key()).await; let result = tracker.load_keys_from_database().await; assert!(result.is_ok()); - assert!(tracker.verify_auth_key(&expiring_key.key()).await.is_ok()); + assert!(tracker.authentication.verify_auth_key(&expiring_key.key()).await.is_ok()); } mod with_expiring_and { @@ -1805,10 +1711,11 @@ mod tests { use std::time::Duration; use torrust_tracker_clock::clock::Time; - use torrust_tracker_configuration::v2_0_0::core::PrivateMode; use crate::core::authentication::Key; - use crate::core::tests::the_tracker::private_tracker; + use crate::core::tests::the_tracker::{ + private_tracker, private_tracker_without_checking_keys_expiration, + }; use crate::CurrentClock; #[tokio::test] @@ -1836,11 +1743,7 @@ mod tests { #[tokio::test] async fn it_should_accept_an_expired_key_when_checking_expiration_is_disabled_in_configuration() { - let mut tracker = private_tracker(); - - tracker.config.private_mode = Some(PrivateMode { - check_keys_expiration: false, - }); + let tracker = private_tracker_without_checking_keys_expiration(); let past_timestamp = Duration::ZERO; @@ -1859,9 +1762,8 @@ mod tests { use torrust_tracker_clock::clock::Time; use torrust_tracker_configuration::v2_0_0::core::PrivateMode; - use crate::core::authentication::Key; + use crate::core::authentication::{AddKeyRequest, Key}; use crate::core::tests::the_tracker::private_tracker; - use crate::core::AddKeyRequest; use crate::CurrentClock; #[tokio::test] @@ -1947,9 +1849,8 @@ mod tests { } mod pre_generated_keys { - use crate::core::authentication::Key; + use crate::core::authentication::{AddKeyRequest, Key}; use crate::core::tests::the_tracker::private_tracker; - use crate::core::AddKeyRequest; #[tokio::test] async fn it_should_add_a_pre_generated_key() { diff --git a/src/core/services/mod.rs b/src/core/services/mod.rs index 611ea24d2..b1d0d441d 100644 --- a/src/core/services/mod.rs +++ b/src/core/services/mod.rs @@ -14,10 +14,10 @@ use torrust_tracker_configuration::v2_0_0::database; use torrust_tracker_configuration::Configuration; use super::databases::{self, Database}; -use super::whitelist; use super::whitelist::manager::WhiteListManager; use super::whitelist::repository::in_memory::InMemoryWhitelist; use super::whitelist::repository::persisted::DatabaseWhitelist; +use super::{authentication, whitelist}; use crate::core::Tracker; /// It returns a new tracker building its dependencies. @@ -30,8 +30,9 @@ pub fn initialize_tracker( config: &Configuration, database: &Arc>, whitelist_authorization: &Arc, + authentication: &Arc, ) -> Tracker { - match Tracker::new(&Arc::new(config).core, database, whitelist_authorization) { + match Tracker::new(&Arc::new(config).core, database, whitelist_authorization, authentication) { Ok(tracker) => tracker, Err(error) => { panic!("{}", error) diff --git a/src/core/services/statistics/mod.rs b/src/core/services/statistics/mod.rs index 3567de2a9..4081fd6bb 100644 --- a/src/core/services/statistics/mod.rs +++ b/src/core/services/statistics/mod.rs @@ -132,10 +132,16 @@ mod tests { async fn the_statistics_service_should_return_the_tracker_metrics() { let config = tracker_configuration(); - let (database, _in_memory_whitelist, whitelist_authorization) = initialize_tracker_dependencies(&config); + let (database, _in_memory_whitelist, whitelist_authorization, authentication) = initialize_tracker_dependencies(&config); let (_stats_event_sender, stats_repository) = statistics::setup::factory(config.core.tracker_usage_statistics); let stats_repository = Arc::new(stats_repository); - let tracker = Arc::new(initialize_tracker(&config, &database, &whitelist_authorization)); + + let tracker = Arc::new(initialize_tracker( + &config, + &database, + &whitelist_authorization, + &authentication, + )); let ban_service = Arc::new(RwLock::new(BanService::new(MAX_CONNECTION_ID_ERRORS_PER_IP))); diff --git a/src/core/services/torrent.rs b/src/core/services/torrent.rs index 457aa54d8..c23c7e04b 100644 --- a/src/core/services/torrent.rs +++ b/src/core/services/torrent.rs @@ -142,8 +142,10 @@ mod tests { async fn should_return_none_if_the_tracker_does_not_have_the_torrent() { let config = tracker_configuration(); - let (database, _in_memory_whitelist, whitelist_authorization) = initialize_tracker_dependencies(&config); - let tracker = initialize_tracker(&config, &database, &whitelist_authorization); + let (database, _in_memory_whitelist, whitelist_authorization, authentication) = + initialize_tracker_dependencies(&config); + + let tracker = initialize_tracker(&config, &database, &whitelist_authorization, &authentication); let tracker = Arc::new(tracker); @@ -160,12 +162,19 @@ mod tests { async fn should_return_the_torrent_info_if_the_tracker_has_the_torrent() { let config = tracker_configuration(); - let (database, _in_memory_whitelist, whitelist_authorization) = initialize_tracker_dependencies(&config); - let tracker = Arc::new(initialize_tracker(&config, &database, &whitelist_authorization)); + let (database, _in_memory_whitelist, whitelist_authorization, authentication) = + initialize_tracker_dependencies(&config); + + let tracker = Arc::new(initialize_tracker( + &config, + &database, + &whitelist_authorization, + &authentication, + )); let hash = "9e0217d0fa71c87332cd8bf9dbeabcb2c2cf3c4d".to_owned(); let info_hash = InfoHash::from_str(&hash).unwrap(); - tracker.upsert_peer_and_get_stats(&info_hash, &sample_peer()); + let _ = tracker.upsert_peer_and_get_stats(&info_hash, &sample_peer()); let torrent_info = get_torrent_info(tracker.clone(), &info_hash).await.unwrap(); @@ -204,8 +213,15 @@ mod tests { async fn should_return_an_empty_result_if_the_tracker_does_not_have_any_torrent() { let config = tracker_configuration(); - let (database, _in_memory_whitelist, whitelist_authorization) = initialize_tracker_dependencies(&config); - let tracker = Arc::new(initialize_tracker(&config, &database, &whitelist_authorization)); + let (database, _in_memory_whitelist, whitelist_authorization, authentication) = + initialize_tracker_dependencies(&config); + + let tracker = Arc::new(initialize_tracker( + &config, + &database, + &whitelist_authorization, + &authentication, + )); let torrents = get_torrents_page(tracker.clone(), Some(&Pagination::default())).await; @@ -216,13 +232,20 @@ mod tests { async fn should_return_a_summarized_info_for_all_torrents() { let config = tracker_configuration(); - let (database, _in_memory_whitelist, whitelist_authorization) = initialize_tracker_dependencies(&config); - let tracker = Arc::new(initialize_tracker(&config, &database, &whitelist_authorization)); + let (database, _in_memory_whitelist, whitelist_authorization, authentication) = + initialize_tracker_dependencies(&config); + + let tracker = Arc::new(initialize_tracker( + &config, + &database, + &whitelist_authorization, + &authentication, + )); let hash = "9e0217d0fa71c87332cd8bf9dbeabcb2c2cf3c4d".to_owned(); let info_hash = InfoHash::from_str(&hash).unwrap(); - tracker.upsert_peer_and_get_stats(&info_hash, &sample_peer()); + let _ = tracker.upsert_peer_and_get_stats(&info_hash, &sample_peer()); let torrents = get_torrents_page(tracker.clone(), Some(&Pagination::default())).await; @@ -241,16 +264,23 @@ mod tests { async fn should_allow_limiting_the_number_of_torrents_in_the_result() { let config = tracker_configuration(); - let (database, _in_memory_whitelist, whitelist_authorization) = initialize_tracker_dependencies(&config); - let tracker = Arc::new(initialize_tracker(&config, &database, &whitelist_authorization)); + let (database, _in_memory_whitelist, whitelist_authorization, authentication) = + initialize_tracker_dependencies(&config); + + let tracker = Arc::new(initialize_tracker( + &config, + &database, + &whitelist_authorization, + &authentication, + )); let hash1 = "9e0217d0fa71c87332cd8bf9dbeabcb2c2cf3c4d".to_owned(); let info_hash1 = InfoHash::from_str(&hash1).unwrap(); let hash2 = "03840548643af2a7b63a9f5cbca348bc7150ca3a".to_owned(); let info_hash2 = InfoHash::from_str(&hash2).unwrap(); - tracker.upsert_peer_and_get_stats(&info_hash1, &sample_peer()); - tracker.upsert_peer_and_get_stats(&info_hash2, &sample_peer()); + let _ = tracker.upsert_peer_and_get_stats(&info_hash1, &sample_peer()); + let _ = tracker.upsert_peer_and_get_stats(&info_hash2, &sample_peer()); let offset = 0; let limit = 1; @@ -264,16 +294,23 @@ mod tests { async fn should_allow_using_pagination_in_the_result() { let config = tracker_configuration(); - let (database, _in_memory_whitelist, whitelist_authorization) = initialize_tracker_dependencies(&config); - let tracker = Arc::new(initialize_tracker(&config, &database, &whitelist_authorization)); + let (database, _in_memory_whitelist, whitelist_authorization, authentication) = + initialize_tracker_dependencies(&config); + + let tracker = Arc::new(initialize_tracker( + &config, + &database, + &whitelist_authorization, + &authentication, + )); let hash1 = "9e0217d0fa71c87332cd8bf9dbeabcb2c2cf3c4d".to_owned(); let info_hash1 = InfoHash::from_str(&hash1).unwrap(); let hash2 = "03840548643af2a7b63a9f5cbca348bc7150ca3a".to_owned(); let info_hash2 = InfoHash::from_str(&hash2).unwrap(); - tracker.upsert_peer_and_get_stats(&info_hash1, &sample_peer()); - tracker.upsert_peer_and_get_stats(&info_hash2, &sample_peer()); + let _ = tracker.upsert_peer_and_get_stats(&info_hash1, &sample_peer()); + let _ = tracker.upsert_peer_and_get_stats(&info_hash2, &sample_peer()); let offset = 1; let limit = 4000; @@ -296,16 +333,23 @@ mod tests { async fn should_return_torrents_ordered_by_info_hash() { let config = tracker_configuration(); - let (database, _in_memory_whitelist, whitelist_authorization) = initialize_tracker_dependencies(&config); - let tracker = Arc::new(initialize_tracker(&config, &database, &whitelist_authorization)); + let (database, _in_memory_whitelist, whitelist_authorization, authentication) = + initialize_tracker_dependencies(&config); + + let tracker = Arc::new(initialize_tracker( + &config, + &database, + &whitelist_authorization, + &authentication, + )); let hash1 = "9e0217d0fa71c87332cd8bf9dbeabcb2c2cf3c4d".to_owned(); let info_hash1 = InfoHash::from_str(&hash1).unwrap(); - tracker.upsert_peer_and_get_stats(&info_hash1, &sample_peer()); + let _ = tracker.upsert_peer_and_get_stats(&info_hash1, &sample_peer()); let hash2 = "03840548643af2a7b63a9f5cbca348bc7150ca3a".to_owned(); let info_hash2 = InfoHash::from_str(&hash2).unwrap(); - tracker.upsert_peer_and_get_stats(&info_hash2, &sample_peer()); + let _ = tracker.upsert_peer_and_get_stats(&info_hash2, &sample_peer()); let torrents = get_torrents_page(tracker.clone(), Some(&Pagination::default())).await; diff --git a/src/servers/apis/server.rs b/src/servers/apis/server.rs index f98770359..a11442a53 100644 --- a/src/servers/apis/server.rs +++ b/src/servers/apis/server.rs @@ -343,8 +343,8 @@ mod tests { use crate::bootstrap::app::initialize_global_services; use crate::bootstrap::jobs::make_rust_tls; use crate::core::services::{initialize_database, initialize_tracker, initialize_whitelist_manager, statistics}; - use crate::core::whitelist; use crate::core::whitelist::repository::in_memory::InMemoryWhitelist; + use crate::core::{authentication, whitelist}; use crate::servers::apis::server::{ApiServer, Launcher}; use crate::servers::registar::Registar; use crate::servers::udp::server::banning::BanService; @@ -369,7 +369,9 @@ mod tests { &in_memory_whitelist.clone(), )); let whitelist_manager = initialize_whitelist_manager(database.clone(), in_memory_whitelist.clone()); - let tracker = Arc::new(initialize_tracker(&cfg, &database, &whitelist_authorization)); + let authentication = Arc::new(authentication::Facade::new(&cfg.core, &database.clone())); + + let tracker = Arc::new(initialize_tracker(&cfg, &database, &whitelist_authorization, &authentication)); let bind_to = config.bind_address; diff --git a/src/servers/apis/v1/context/auth_key/handlers.rs b/src/servers/apis/v1/context/auth_key/handlers.rs index bb8a98744..bccc7d9eb 100644 --- a/src/servers/apis/v1/context/auth_key/handlers.rs +++ b/src/servers/apis/v1/context/auth_key/handlers.rs @@ -12,8 +12,8 @@ use super::responses::{ auth_key_response, failed_to_delete_key_response, failed_to_generate_key_response, failed_to_reload_keys_response, invalid_auth_key_duration_response, invalid_auth_key_response, }; -use crate::core::authentication::Key; -use crate::core::{AddKeyRequest, Tracker}; +use crate::core::authentication::{AddKeyRequest, Key}; +use crate::core::Tracker; use crate::servers::apis::v1::context::auth_key::resources::AuthKey; use crate::servers::apis::v1::responses::{invalid_auth_key_param_response, ok_response}; diff --git a/src/servers/http/server.rs b/src/servers/http/server.rs index b053628ce..e6370c775 100644 --- a/src/servers/http/server.rs +++ b/src/servers/http/server.rs @@ -247,8 +247,8 @@ mod tests { use crate::bootstrap::app::initialize_global_services; use crate::bootstrap::jobs::make_rust_tls; use crate::core::services::{initialize_database, initialize_tracker, initialize_whitelist_manager, statistics}; - use crate::core::whitelist; use crate::core::whitelist::repository::in_memory::InMemoryWhitelist; + use crate::core::{authentication, whitelist}; use crate::servers::http::server::{HttpServer, Launcher}; use crate::servers::registar::Registar; @@ -268,7 +268,9 @@ mod tests { &in_memory_whitelist.clone(), )); let _whitelist_manager = initialize_whitelist_manager(database.clone(), in_memory_whitelist.clone()); - let tracker = Arc::new(initialize_tracker(&cfg, &database, &whitelist_authorization)); + let authentication = Arc::new(authentication::Facade::new(&cfg.core, &database.clone())); + + let tracker = Arc::new(initialize_tracker(&cfg, &database, &whitelist_authorization, &authentication)); let http_trackers = cfg.http_trackers.clone().expect("missing HTTP trackers configuration"); let config = &http_trackers[0]; diff --git a/src/servers/http/v1/handlers/announce.rs b/src/servers/http/v1/handlers/announce.rs index fe3825a41..ac39b0422 100644 --- a/src/servers/http/v1/handlers/announce.rs +++ b/src/servers/http/v1/handlers/announce.rs @@ -248,10 +248,16 @@ mod tests { /// Initialize tracker's dependencies and tracker. fn initialize_tracker_and_deps(config: &Configuration) -> TrackerAndDeps { - let (database, _in_memory_whitelist, whitelist_authorization) = initialize_tracker_dependencies(config); + let (database, _in_memory_whitelist, whitelist_authorization, authentication) = initialize_tracker_dependencies(config); let (stats_event_sender, _stats_repository) = statistics::setup::factory(config.core.tracker_usage_statistics); let stats_event_sender = Arc::new(stats_event_sender); - let tracker = Arc::new(initialize_tracker(config, &database, &whitelist_authorization)); + + let tracker = Arc::new(initialize_tracker( + config, + &database, + &whitelist_authorization, + &authentication, + )); (tracker, stats_event_sender, whitelist_authorization) } diff --git a/src/servers/http/v1/handlers/scrape.rs b/src/servers/http/v1/handlers/scrape.rs index 58c2d012b..d973735ff 100644 --- a/src/servers/http/v1/handlers/scrape.rs +++ b/src/servers/http/v1/handlers/scrape.rs @@ -133,11 +133,11 @@ mod tests { fn private_tracker() -> (Tracker, Option>) { let config = configuration::ephemeral_private(); - let (database, _in_memory_whitelist, whitelist_authorization) = initialize_tracker_dependencies(&config); + let (database, _in_memory_whitelist, whitelist_authorization, authentication) = initialize_tracker_dependencies(&config); let (stats_event_sender, _stats_repository) = statistics::setup::factory(config.core.tracker_usage_statistics); ( - initialize_tracker(&config, &database, &whitelist_authorization), + initialize_tracker(&config, &database, &whitelist_authorization, &authentication), stats_event_sender, ) } @@ -145,11 +145,11 @@ mod tests { fn whitelisted_tracker() -> (Tracker, Option>) { let config = configuration::ephemeral_listed(); - let (database, _in_memory_whitelist, whitelist_authorization) = initialize_tracker_dependencies(&config); + let (database, _in_memory_whitelist, whitelist_authorization, authentication) = initialize_tracker_dependencies(&config); let (stats_event_sender, _stats_repository) = statistics::setup::factory(config.core.tracker_usage_statistics); ( - initialize_tracker(&config, &database, &whitelist_authorization), + initialize_tracker(&config, &database, &whitelist_authorization, &authentication), stats_event_sender, ) } @@ -157,11 +157,11 @@ mod tests { fn tracker_on_reverse_proxy() -> (Tracker, Option>) { let config = configuration::ephemeral_with_reverse_proxy(); - let (database, _in_memory_whitelist, whitelist_authorization) = initialize_tracker_dependencies(&config); + let (database, _in_memory_whitelist, whitelist_authorization, authentication) = initialize_tracker_dependencies(&config); let (stats_event_sender, _stats_repository) = statistics::setup::factory(config.core.tracker_usage_statistics); ( - initialize_tracker(&config, &database, &whitelist_authorization), + initialize_tracker(&config, &database, &whitelist_authorization, &authentication), stats_event_sender, ) } @@ -169,11 +169,11 @@ mod tests { fn tracker_not_on_reverse_proxy() -> (Tracker, Option>) { let config = configuration::ephemeral_without_reverse_proxy(); - let (database, _in_memory_whitelist, whitelist_authorization) = initialize_tracker_dependencies(&config); + let (database, _in_memory_whitelist, whitelist_authorization, authentication) = initialize_tracker_dependencies(&config); let (stats_event_sender, _stats_repository) = statistics::setup::factory(config.core.tracker_usage_statistics); ( - initialize_tracker(&config, &database, &whitelist_authorization), + initialize_tracker(&config, &database, &whitelist_authorization, &authentication), stats_event_sender, ) } diff --git a/src/servers/http/v1/services/announce.rs b/src/servers/http/v1/services/announce.rs index 17598904c..929b00ff4 100644 --- a/src/servers/http/v1/services/announce.rs +++ b/src/servers/http/v1/services/announce.rs @@ -73,11 +73,11 @@ mod tests { fn public_tracker() -> (Tracker, Arc>>) { let config = configuration::ephemeral_public(); - let (database, _in_memory_whitelist, whitelist_authorization) = initialize_tracker_dependencies(&config); + let (database, _in_memory_whitelist, whitelist_authorization, authentication) = initialize_tracker_dependencies(&config); let (stats_event_sender, _stats_repository) = statistics::setup::factory(config.core.tracker_usage_statistics); let stats_event_sender = Arc::new(stats_event_sender); - let tracker = initialize_tracker(&config, &database, &whitelist_authorization); + let tracker = initialize_tracker(&config, &database, &whitelist_authorization, &authentication); (tracker, stats_event_sender) } @@ -131,9 +131,10 @@ mod tests { fn test_tracker_factory() -> Tracker { let config = configuration::ephemeral(); - let (database, _in_memory_whitelist, whitelist_authorization) = initialize_tracker_dependencies(&config); + let (database, _in_memory_whitelist, whitelist_authorization, authentication) = + initialize_tracker_dependencies(&config); - Tracker::new(&config.core, &database, &whitelist_authorization).unwrap() + Tracker::new(&config.core, &database, &whitelist_authorization, &authentication).unwrap() } #[tokio::test] diff --git a/src/servers/http/v1/services/scrape.rs b/src/servers/http/v1/services/scrape.rs index 0a25bccaf..856b2ae72 100644 --- a/src/servers/http/v1/services/scrape.rs +++ b/src/servers/http/v1/services/scrape.rs @@ -87,9 +87,9 @@ mod tests { fn public_tracker() -> Tracker { let config = configuration::ephemeral_public(); - let (database, _in_memory_whitelist, whitelist_authorization) = initialize_tracker_dependencies(&config); + let (database, _in_memory_whitelist, whitelist_authorization, authentication) = initialize_tracker_dependencies(&config); - initialize_tracker(&config, &database, &whitelist_authorization) + initialize_tracker(&config, &database, &whitelist_authorization, &authentication) } fn sample_info_hashes() -> Vec { @@ -115,9 +115,9 @@ mod tests { fn test_tracker_factory() -> Tracker { let config = configuration::ephemeral(); - let (database, _in_memory_whitelist, whitelist_authorization) = initialize_tracker_dependencies(&config); + let (database, _in_memory_whitelist, whitelist_authorization, authentication) = initialize_tracker_dependencies(&config); - Tracker::new(&config.core, &database, &whitelist_authorization).unwrap() + Tracker::new(&config.core, &database, &whitelist_authorization, &authentication).unwrap() } mod with_real_data { diff --git a/src/servers/udp/handlers.rs b/src/servers/udp/handlers.rs index c01dc2548..67bb35c5a 100644 --- a/src/servers/udp/handlers.rs +++ b/src/servers/udp/handlers.rs @@ -516,12 +516,17 @@ mod tests { } fn initialize_tracker_and_deps(config: &Configuration) -> TrackerAndDeps { - let (database, in_memory_whitelist, whitelist_authorization) = initialize_tracker_dependencies(config); + let (database, in_memory_whitelist, whitelist_authorization, authentication) = initialize_tracker_dependencies(config); let (stats_event_sender, _stats_repository) = statistics::setup::factory(config.core.tracker_usage_statistics); let stats_event_sender = Arc::new(stats_event_sender); let whitelist_manager = initialize_whitelist_manager(database.clone(), in_memory_whitelist.clone()); - let tracker = Arc::new(initialize_tracker(config, &database, &whitelist_authorization)); + let tracker = Arc::new(initialize_tracker( + config, + &database, + &whitelist_authorization, + &authentication, + )); ( tracker, @@ -629,9 +634,9 @@ mod tests { fn test_tracker_factory() -> (Arc, Arc) { let config = tracker_configuration(); - let (database, _in_memory_whitelist, whitelist_authorization) = initialize_tracker_dependencies(&config); + let (database, _in_memory_whitelist, whitelist_authorization, authentication) = initialize_tracker_dependencies(&config); - let tracker = Arc::new(Tracker::new(&config.core, &database, &whitelist_authorization).unwrap()); + let tracker = Arc::new(Tracker::new(&config.core, &database, &whitelist_authorization, &authentication).unwrap()); (tracker, whitelist_authorization) } @@ -989,7 +994,7 @@ mod tests { .with_peer_address(SocketAddr::new(IpAddr::V6(client_ip_v6), client_port)) .into(); - tracker.upsert_peer_and_get_stats(&info_hash.0.into(), &peer_using_ipv6); + let _ = tracker.upsert_peer_and_get_stats(&info_hash.0.into(), &peer_using_ipv6); } async fn announce_a_new_peer_using_ipv4( @@ -1276,7 +1281,7 @@ mod tests { .with_peer_address(SocketAddr::new(IpAddr::V4(client_ip_v4), client_port)) .into(); - tracker.upsert_peer_and_get_stats(&info_hash.0.into(), &peer_using_ipv4); + let _ = tracker.upsert_peer_and_get_stats(&info_hash.0.into(), &peer_using_ipv4); } async fn announce_a_new_peer_using_ipv6( @@ -1376,7 +1381,8 @@ mod tests { async fn the_peer_ip_should_be_changed_to_the_external_ip_in_the_tracker_configuration() { let config = Arc::new(TrackerConfigurationBuilder::default().with_external_ip("::126.0.0.1").into()); - let (database, _in_memory_whitelist, whitelist_authorization) = initialize_tracker_dependencies(&config); + let (database, _in_memory_whitelist, whitelist_authorization, authentication) = + initialize_tracker_dependencies(&config); let mut stats_event_sender_mock = statistics::event::sender::MockSender::new(); stats_event_sender_mock @@ -1387,7 +1393,8 @@ mod tests { let stats_event_sender: Arc>> = Arc::new(Some(Box::new(stats_event_sender_mock))); - let tracker = Arc::new(core::Tracker::new(&config.core, &database, &whitelist_authorization).unwrap()); + let tracker = + Arc::new(core::Tracker::new(&config.core, &database, &whitelist_authorization, &authentication).unwrap()); let loopback_ipv4 = Ipv4Addr::new(127, 0, 0, 1); let loopback_ipv6 = Ipv6Addr::new(0, 0, 0, 0, 0, 0, 0, 1); @@ -1509,7 +1516,7 @@ mod tests { .with_number_of_bytes_left(0) .into(); - tracker.upsert_peer_and_get_stats(&info_hash.0.into(), &peer); + let _ = tracker.upsert_peer_and_get_stats(&info_hash.0.into(), &peer); } fn build_scrape_request(remote_addr: &SocketAddr, info_hash: &InfoHash) -> ScrapeRequest { diff --git a/src/servers/udp/server/mod.rs b/src/servers/udp/server/mod.rs index f47e0b1db..078510bcd 100644 --- a/src/servers/udp/server/mod.rs +++ b/src/servers/udp/server/mod.rs @@ -65,8 +65,8 @@ mod tests { use super::Server; use crate::bootstrap::app::initialize_global_services; use crate::core::services::{initialize_database, initialize_tracker, initialize_whitelist_manager, statistics}; - use crate::core::whitelist; use crate::core::whitelist::repository::in_memory::InMemoryWhitelist; + use crate::core::{authentication, whitelist}; use crate::servers::registar::Registar; use crate::servers::udp::server::banning::BanService; use crate::servers::udp::server::launcher::MAX_CONNECTION_ID_ERRORS_PER_IP; @@ -88,7 +88,8 @@ mod tests { &in_memory_whitelist.clone(), )); let _whitelist_manager = initialize_whitelist_manager(database.clone(), in_memory_whitelist.clone()); - let tracker = Arc::new(initialize_tracker(&cfg, &database, &whitelist_authorization)); + let authentication = Arc::new(authentication::Facade::new(&cfg.core, &database.clone())); + let tracker = Arc::new(initialize_tracker(&cfg, &database, &whitelist_authorization, &authentication)); let udp_trackers = cfg.udp_trackers.clone().expect("missing UDP trackers configuration"); let config = &udp_trackers[0]; @@ -132,8 +133,8 @@ mod tests { &cfg.core, &in_memory_whitelist.clone(), )); - - let tracker = Arc::new(initialize_tracker(&cfg, &database, &whitelist_authorization)); + let authentication = Arc::new(authentication::Facade::new(&cfg.core, &database.clone())); + let tracker = Arc::new(initialize_tracker(&cfg, &database, &whitelist_authorization, &authentication)); let config = &cfg.udp_trackers.as_ref().unwrap().first().unwrap(); let bind_to = config.bind_address; diff --git a/src/shared/bit_torrent/common.rs b/src/shared/bit_torrent/common.rs index 5ba2e0492..2f93b5a08 100644 --- a/src/shared/bit_torrent/common.rs +++ b/src/shared/bit_torrent/common.rs @@ -17,6 +17,6 @@ pub const MAX_SCRAPE_TORRENTS: u8 = 74; /// HTTP tracker authentication key length. /// -/// For more information see function [`generate_key`](crate::core::authentication::generate_key) to generate the +/// For more information see function [`generate_key`](crate::core::authentication::key::generate_key) to generate the /// [`PeerKey`](crate::core::authentication::PeerKey). pub const AUTH_KEY_LENGTH: usize = 32; diff --git a/tests/servers/api/environment.rs b/tests/servers/api/environment.rs index a9628f053..3bac7e570 100644 --- a/tests/servers/api/environment.rs +++ b/tests/servers/api/environment.rs @@ -37,7 +37,7 @@ where { /// Add a torrent to the tracker pub fn add_torrent_peer(&self, info_hash: &InfoHash, peer: &peer::Peer) { - self.tracker.upsert_peer_and_get_stats(info_hash, peer); + let _ = self.tracker.upsert_peer_and_get_stats(info_hash, peer); } } diff --git a/tests/servers/http/environment.rs b/tests/servers/http/environment.rs index 160cb49f8..a8e5fc572 100644 --- a/tests/servers/http/environment.rs +++ b/tests/servers/http/environment.rs @@ -27,7 +27,7 @@ pub struct Environment { impl Environment { /// Add a torrent to the tracker pub fn add_torrent_peer(&self, info_hash: &InfoHash, peer: &peer::Peer) { - self.tracker.upsert_peer_and_get_stats(info_hash, peer); + let _ = self.tracker.upsert_peer_and_get_stats(info_hash, peer); } } diff --git a/tests/servers/udp/environment.rs b/tests/servers/udp/environment.rs index 43778ef6e..b728509c0 100644 --- a/tests/servers/udp/environment.rs +++ b/tests/servers/udp/environment.rs @@ -36,7 +36,7 @@ where /// Add a torrent to the tracker #[allow(dead_code)] pub fn add_torrent(&self, info_hash: &InfoHash, peer: &peer::Peer) { - self.tracker.upsert_peer_and_get_stats(info_hash, peer); + let _ = self.tracker.upsert_peer_and_get_stats(info_hash, peer); } } From a0936805c0a6b282a651ace3ec89a43206cca176 Mon Sep 17 00:00:00 2001 From: Jose Celano Date: Tue, 21 Jan 2025 08:16:18 +0000 Subject: [PATCH 117/802] refactor: [#1191] remove authentication wrapper methods from core tracker --- src/app.rs | 1 + src/core/mod.rs | 189 ++++-------------- .../apis/v1/context/auth_key/handlers.rs | 11 +- src/servers/http/v1/handlers/announce.rs | 2 +- src/servers/http/v1/handlers/scrape.rs | 2 +- .../api/v1/contract/context/auth_key.rs | 10 + tests/servers/http/v1/contract.rs | 14 +- 7 files changed, 72 insertions(+), 157 deletions(-) diff --git a/src/app.rs b/src/app.rs index 289db1fdc..da8795ffe 100644 --- a/src/app.rs +++ b/src/app.rs @@ -53,6 +53,7 @@ pub async fn start(config: &Configuration, app_container: &AppContainer) -> Vec< if app_container.tracker.is_private() { app_container .tracker + .authentication .load_keys_from_database() .await .expect("Could not retrieve keys from database."); diff --git a/src/core/mod.rs b/src/core/mod.rs index 4b8d4c7f2..bd585893d 100644 --- a/src/core/mod.rs +++ b/src/core/mod.rs @@ -454,19 +454,16 @@ use std::net::IpAddr; use std::sync::Arc; use std::time::Duration; -use authentication::AddKeyRequest; use bittorrent_primitives::info_hash::InfoHash; -use error::PeerKeyError; use torrust_tracker_clock::clock::Time; use torrust_tracker_configuration::{AnnouncePolicy, Core, TORRENT_PEERS_LIMIT}; use torrust_tracker_primitives::core::{AnnounceData, ScrapeData}; +use torrust_tracker_primitives::peer; use torrust_tracker_primitives::swarm_metadata::SwarmMetadata; use torrust_tracker_primitives::torrent_metrics::TorrentsMetrics; -use torrust_tracker_primitives::{peer, DurationSinceUnixEpoch}; use torrust_tracker_torrent_repository::entry::EntrySync; use torrust_tracker_torrent_repository::repository::Repository; -use self::authentication::Key; use self::torrent::Torrents; use crate::core::databases::Database; use crate::CurrentClock; @@ -495,7 +492,7 @@ pub struct Tracker { torrents: Arc, /// The service to authenticate peers. - authentication: Arc, + pub authentication: Arc, } /// How many peers the peer announcing wants in the announce response. @@ -773,136 +770,6 @@ impl Tracker { } } - /// It authenticates the peer `key` against the `Tracker` authentication - /// key list. - /// - /// # Context: Authentication - /// - /// # Errors - /// - /// Will return an error if the the authentication key cannot be verified. - pub async fn authenticate(&self, key: &Key) -> Result<(), authentication::Error> { - self.authentication.authenticate(key).await - } - - /// Adds new peer keys to the tracker. - /// - /// Keys can be pre-generated or randomly created. They can also be permanent or expire. - /// - /// # Context: Authentication - /// - /// # Errors - /// - /// Will return an error if: - /// - /// - The key duration overflows the duration type maximum value. - /// - The provided pre-generated key is invalid. - /// - The key could not been persisted due to database issues. - pub async fn add_peer_key(&self, add_key_req: AddKeyRequest) -> Result { - self.authentication.add_peer_key(add_key_req).await - } - - /// It generates a new permanent authentication key. - /// - /// Authentication keys are used by HTTP trackers. - /// - /// # Context: Authentication - /// - /// # Errors - /// - /// Will return a `database::Error` if unable to add the `auth_key` to the database. - pub async fn generate_permanent_auth_key(&self) -> Result { - self.authentication.generate_auth_key(None).await - } - - /// It generates a new expiring authentication key. - /// - /// Authentication keys are used by HTTP trackers. - /// - /// # Context: Authentication - /// - /// # Errors - /// - /// Will return a `database::Error` if unable to add the `auth_key` to the database. - /// - /// # Arguments - /// - /// * `lifetime` - The duration in seconds for the new key. The key will be - /// no longer valid after `lifetime` seconds. - pub async fn generate_auth_key( - &self, - lifetime: Option, - ) -> Result { - self.authentication.generate_auth_key(lifetime).await - } - - /// It adds a pre-generated permanent authentication key. - /// - /// Authentication keys are used by HTTP trackers. - /// - /// # Context: Authentication - /// - /// # Errors - /// - /// Will return a `database::Error` if unable to add the `auth_key` to the - /// database. For example, if the key already exist. - /// - /// # Arguments - /// - /// * `key` - The pre-generated key. - pub async fn add_permanent_auth_key(&self, key: Key) -> Result { - self.authentication.add_auth_key(key, None).await - } - - /// It adds a pre-generated authentication key. - /// - /// Authentication keys are used by HTTP trackers. - /// - /// # Context: Authentication - /// - /// # Errors - /// - /// Will return a `database::Error` if unable to add the `auth_key` to the - /// database. For example, if the key already exist. - /// - /// # Arguments - /// - /// * `key` - The pre-generated key. - /// * `lifetime` - The duration in seconds for the new key. The key will be - /// no longer valid after `lifetime` seconds. - pub async fn add_auth_key( - &self, - key: Key, - valid_until: Option, - ) -> Result { - self.authentication.add_auth_key(key, valid_until).await - } - - /// It removes an authentication key. - /// - /// # Context: Authentication - /// - /// # Errors - /// - /// Will return a `database::Error` if unable to remove the `key` to the database. - pub async fn remove_auth_key(&self, key: &Key) -> Result<(), databases::error::Error> { - self.authentication.remove_auth_key(key).await - } - - /// The `Tracker` stores the authentication keys in memory and in the database. - /// In case you need to restart the `Tracker` you can load the keys from the database - /// into memory with this function. Keys are automatically stored in the database when they - /// are generated. - /// - /// # Context: Authentication - /// - /// # Errors - /// - /// Will return a `database::Error` if unable to `load_keys` from the database. - pub async fn load_keys_from_database(&self) -> Result<(), databases::error::Error> { - self.authentication.load_keys_from_database().await - } - /// It drops the database tables. /// /// # Errors @@ -1664,7 +1531,7 @@ mod tests { let unregistered_key = authentication::Key::from_str("YZSl4lMZupRuOpSRC3krIKR5BPB14nrJ").unwrap(); - let result = tracker.authenticate(&unregistered_key).await; + let result = tracker.authentication.authenticate(&unregistered_key).await; assert!(result.is_err()); } @@ -1682,9 +1549,13 @@ mod tests { async fn it_should_remove_an_authentication_key() { let tracker = private_tracker(); - let expiring_key = tracker.generate_auth_key(Some(Duration::from_secs(100))).await.unwrap(); + let expiring_key = tracker + .authentication + .generate_auth_key(Some(Duration::from_secs(100))) + .await + .unwrap(); - let result = tracker.remove_auth_key(&expiring_key.key()).await; + let result = tracker.authentication.remove_auth_key(&expiring_key.key()).await; assert!(result.is_ok()); assert!(tracker.authentication.verify_auth_key(&expiring_key.key()).await.is_err()); @@ -1694,12 +1565,16 @@ mod tests { async fn it_should_load_authentication_keys_from_the_database() { let tracker = private_tracker(); - let expiring_key = tracker.generate_auth_key(Some(Duration::from_secs(100))).await.unwrap(); + let expiring_key = tracker + .authentication + .generate_auth_key(Some(Duration::from_secs(100))) + .await + .unwrap(); // Remove the newly generated key in memory tracker.authentication.remove_in_memory_auth_key(&expiring_key.key()).await; - let result = tracker.load_keys_from_database().await; + let result = tracker.authentication.load_keys_from_database().await; assert!(result.is_ok()); assert!(tracker.authentication.verify_auth_key(&expiring_key.key()).await.is_ok()); @@ -1722,7 +1597,11 @@ mod tests { async fn it_should_generate_the_key() { let tracker = private_tracker(); - let peer_key = tracker.generate_auth_key(Some(Duration::from_secs(100))).await.unwrap(); + let peer_key = tracker + .authentication + .generate_auth_key(Some(Duration::from_secs(100))) + .await + .unwrap(); assert_eq!( peer_key.valid_until, @@ -1734,9 +1613,13 @@ mod tests { async fn it_should_authenticate_a_peer_with_the_key() { let tracker = private_tracker(); - let peer_key = tracker.generate_auth_key(Some(Duration::from_secs(100))).await.unwrap(); + let peer_key = tracker + .authentication + .generate_auth_key(Some(Duration::from_secs(100))) + .await + .unwrap(); - let result = tracker.authenticate(&peer_key.key()).await; + let result = tracker.authentication.authenticate(&peer_key.key()).await; assert!(result.is_ok()); } @@ -1748,11 +1631,12 @@ mod tests { let past_timestamp = Duration::ZERO; let peer_key = tracker + .authentication .add_auth_key(Key::new("YZSl4lMZupRuOpSRC3krIKR5BPB14nrJ").unwrap(), Some(past_timestamp)) .await .unwrap(); - assert!(tracker.authenticate(&peer_key.key()).await.is_ok()); + assert!(tracker.authentication.authenticate(&peer_key.key()).await.is_ok()); } } @@ -1771,6 +1655,7 @@ mod tests { let tracker = private_tracker(); let peer_key = tracker + .authentication .add_peer_key(AddKeyRequest { opt_key: Some(Key::new("YZSl4lMZupRuOpSRC3krIKR5BPB14nrJ").unwrap().to_string()), opt_seconds_valid: Some(100), @@ -1789,6 +1674,7 @@ mod tests { let tracker = private_tracker(); let peer_key = tracker + .authentication .add_peer_key(AddKeyRequest { opt_key: Some(Key::new("YZSl4lMZupRuOpSRC3krIKR5BPB14nrJ").unwrap().to_string()), opt_seconds_valid: Some(100), @@ -1796,7 +1682,7 @@ mod tests { .await .unwrap(); - let result = tracker.authenticate(&peer_key.key()).await; + let result = tracker.authentication.authenticate(&peer_key.key()).await; assert!(result.is_ok()); } @@ -1810,6 +1696,7 @@ mod tests { }); let peer_key = tracker + .authentication .add_peer_key(AddKeyRequest { opt_key: Some(Key::new("YZSl4lMZupRuOpSRC3krIKR5BPB14nrJ").unwrap().to_string()), opt_seconds_valid: Some(0), @@ -1817,7 +1704,7 @@ mod tests { .await .unwrap(); - assert!(tracker.authenticate(&peer_key.key()).await.is_ok()); + assert!(tracker.authentication.authenticate(&peer_key.key()).await.is_ok()); } } } @@ -1831,7 +1718,7 @@ mod tests { async fn it_should_generate_the_key() { let tracker = private_tracker(); - let peer_key = tracker.generate_permanent_auth_key().await.unwrap(); + let peer_key = tracker.authentication.generate_permanent_auth_key().await.unwrap(); assert_eq!(peer_key.valid_until, None); } @@ -1840,9 +1727,9 @@ mod tests { async fn it_should_authenticate_a_peer_with_the_key() { let tracker = private_tracker(); - let peer_key = tracker.generate_permanent_auth_key().await.unwrap(); + let peer_key = tracker.authentication.generate_permanent_auth_key().await.unwrap(); - let result = tracker.authenticate(&peer_key.key()).await; + let result = tracker.authentication.authenticate(&peer_key.key()).await; assert!(result.is_ok()); } @@ -1857,6 +1744,7 @@ mod tests { let tracker = private_tracker(); let peer_key = tracker + .authentication .add_peer_key(AddKeyRequest { opt_key: Some(Key::new("YZSl4lMZupRuOpSRC3krIKR5BPB14nrJ").unwrap().to_string()), opt_seconds_valid: None, @@ -1872,6 +1760,7 @@ mod tests { let tracker = private_tracker(); let peer_key = tracker + .authentication .add_peer_key(AddKeyRequest { opt_key: Some(Key::new("YZSl4lMZupRuOpSRC3krIKR5BPB14nrJ").unwrap().to_string()), opt_seconds_valid: None, @@ -1879,7 +1768,7 @@ mod tests { .await .unwrap(); - let result = tracker.authenticate(&peer_key.key()).await; + let result = tracker.authentication.authenticate(&peer_key.key()).await; assert!(result.is_ok()); } diff --git a/src/servers/apis/v1/context/auth_key/handlers.rs b/src/servers/apis/v1/context/auth_key/handlers.rs index bccc7d9eb..ba345d8a5 100644 --- a/src/servers/apis/v1/context/auth_key/handlers.rs +++ b/src/servers/apis/v1/context/auth_key/handlers.rs @@ -35,6 +35,7 @@ pub async fn add_auth_key_handler( extract::Json(add_key_form): extract::Json, ) -> Response { match tracker + .authentication .add_peer_key(AddKeyRequest { opt_key: add_key_form.opt_key.clone(), opt_seconds_valid: add_key_form.opt_seconds_valid, @@ -67,7 +68,11 @@ pub async fn add_auth_key_handler( /// This endpoint has been deprecated. Use [`add_auth_key_handler`]. pub async fn generate_auth_key_handler(State(tracker): State>, Path(seconds_valid_or_key): Path) -> Response { let seconds_valid = seconds_valid_or_key; - match tracker.generate_auth_key(Some(Duration::from_secs(seconds_valid))).await { + match tracker + .authentication + .generate_auth_key(Some(Duration::from_secs(seconds_valid))) + .await + { Ok(auth_key) => auth_key_response(&AuthKey::from(auth_key)), Err(e) => failed_to_generate_key_response(e), } @@ -108,7 +113,7 @@ pub async fn delete_auth_key_handler( ) -> Response { match Key::from_str(&seconds_valid_or_key.0) { Err(_) => invalid_auth_key_param_response(&seconds_valid_or_key.0), - Ok(key) => match tracker.remove_auth_key(&key).await { + Ok(key) => match tracker.authentication.remove_auth_key(&key).await { Ok(()) => ok_response(), Err(e) => failed_to_delete_key_response(e), }, @@ -128,7 +133,7 @@ pub async fn delete_auth_key_handler( /// Refer to the [API endpoint documentation](crate::servers::apis::v1::context::auth_key#reload-authentication-keys) /// for more information about this endpoint. pub async fn reload_keys_handler(State(tracker): State>) -> Response { - match tracker.load_keys_from_database().await { + match tracker.authentication.load_keys_from_database().await { Ok(()) => ok_response(), Err(e) => failed_to_reload_keys_response(e), } diff --git a/src/servers/http/v1/handlers/announce.rs b/src/servers/http/v1/handlers/announce.rs index ac39b0422..7af2b9261 100644 --- a/src/servers/http/v1/handlers/announce.rs +++ b/src/servers/http/v1/handlers/announce.rs @@ -113,7 +113,7 @@ async fn handle_announce( // Authentication if tracker.requires_authentication() { match maybe_key { - Some(key) => match tracker.authenticate(&key).await { + Some(key) => match tracker.authentication.authenticate(&key).await { Ok(()) => (), Err(error) => return Err(responses::error::Error::from(error)), }, diff --git a/src/servers/http/v1/handlers/scrape.rs b/src/servers/http/v1/handlers/scrape.rs index d973735ff..062a017f8 100644 --- a/src/servers/http/v1/handlers/scrape.rs +++ b/src/servers/http/v1/handlers/scrape.rs @@ -84,7 +84,7 @@ async fn handle_scrape( // Authentication let return_real_scrape_data = if tracker.requires_authentication() { match maybe_key { - Some(key) => match tracker.authenticate(&key).await { + Some(key) => match tracker.authentication.authenticate(&key).await { Ok(()) => true, Err(_error) => false, }, diff --git a/tests/servers/api/v1/contract/context/auth_key.rs b/tests/servers/api/v1/contract/context/auth_key.rs index 40c10be5f..cee6b4034 100644 --- a/tests/servers/api/v1/contract/context/auth_key.rs +++ b/tests/servers/api/v1/contract/context/auth_key.rs @@ -37,6 +37,7 @@ async fn should_allow_generating_a_new_random_auth_key() { assert!(env .tracker + .authentication .authenticate(&auth_key_resource.key.parse::().unwrap()) .await .is_ok()); @@ -66,6 +67,7 @@ async fn should_allow_uploading_a_preexisting_auth_key() { assert!(env .tracker + .authentication .authenticate(&auth_key_resource.key.parse::().unwrap()) .await .is_ok()); @@ -159,6 +161,7 @@ async fn should_allow_deleting_an_auth_key() { let seconds_valid = 60; let auth_key = env .tracker + .authentication .generate_auth_key(Some(Duration::from_secs(seconds_valid))) .await .unwrap(); @@ -293,6 +296,7 @@ async fn should_fail_when_the_auth_key_cannot_be_deleted() { let seconds_valid = 60; let auth_key = env .tracker + .authentication .generate_auth_key(Some(Duration::from_secs(seconds_valid))) .await .unwrap(); @@ -326,6 +330,7 @@ async fn should_not_allow_deleting_an_auth_key_for_unauthenticated_users() { // Generate new auth key let auth_key = env .tracker + .authentication .generate_auth_key(Some(Duration::from_secs(seconds_valid))) .await .unwrap(); @@ -346,6 +351,7 @@ async fn should_not_allow_deleting_an_auth_key_for_unauthenticated_users() { // Generate new auth key let auth_key = env .tracker + .authentication .generate_auth_key(Some(Duration::from_secs(seconds_valid))) .await .unwrap(); @@ -374,6 +380,7 @@ async fn should_allow_reloading_keys() { let seconds_valid = 60; env.tracker + .authentication .generate_auth_key(Some(Duration::from_secs(seconds_valid))) .await .unwrap(); @@ -399,6 +406,7 @@ async fn should_fail_when_keys_cannot_be_reloaded() { let seconds_valid = 60; env.tracker + .authentication .generate_auth_key(Some(Duration::from_secs(seconds_valid))) .await .unwrap(); @@ -427,6 +435,7 @@ async fn should_not_allow_reloading_keys_for_unauthenticated_users() { let seconds_valid = 60; env.tracker + .authentication .generate_auth_key(Some(Duration::from_secs(seconds_valid))) .await .unwrap(); @@ -491,6 +500,7 @@ mod deprecated_generate_key_endpoint { assert!(env .tracker + .authentication .authenticate(&auth_key_resource.key.parse::().unwrap()) .await .is_ok()); diff --git a/tests/servers/http/v1/contract.rs b/tests/servers/http/v1/contract.rs index 961caf017..d8b1c92c2 100644 --- a/tests/servers/http/v1/contract.rs +++ b/tests/servers/http/v1/contract.rs @@ -1396,7 +1396,12 @@ mod configured_as_private { let env = Started::new(&configuration::ephemeral_private().into()).await; - let expiring_key = env.tracker.generate_auth_key(Some(Duration::from_secs(60))).await.unwrap(); + let expiring_key = env + .tracker + .authentication + .generate_auth_key(Some(Duration::from_secs(60))) + .await + .unwrap(); let response = Client::authenticated(*env.bind_address(), expiring_key.key()) .announce(&QueryBuilder::default().query()) @@ -1541,7 +1546,12 @@ mod configured_as_private { .build(), ); - let expiring_key = env.tracker.generate_auth_key(Some(Duration::from_secs(60))).await.unwrap(); + let expiring_key = env + .tracker + .authentication + .generate_auth_key(Some(Duration::from_secs(60))) + .await + .unwrap(); let response = Client::authenticated(*env.bind_address(), expiring_key.key()) .scrape( From 9a60c0c7e084cd6a1256e5b4edcec4d5297f965a Mon Sep 17 00:00:00 2001 From: Jose Celano Date: Tue, 21 Jan 2025 11:31:38 +0000 Subject: [PATCH 118/802] refactor: [#11191] copy private tracker tests to authentication module --- src/core/authentication/mod.rs | 298 +++++++++++++++++++++++++++++++++ 1 file changed, 298 insertions(+) diff --git a/src/core/authentication/mod.rs b/src/core/authentication/mod.rs index d03502988..70101dbf6 100644 --- a/src/core/authentication/mod.rs +++ b/src/core/authentication/mod.rs @@ -302,3 +302,301 @@ impl Facade { Ok(()) } } + +#[cfg(test)] +mod tests { + + mod the_tracker { + + use torrust_tracker_configuration::v2_0_0::core::PrivateMode; + use torrust_tracker_test_helpers::configuration; + + use crate::app_test::initialize_tracker_dependencies; + use crate::core::services::initialize_tracker; + use crate::core::Tracker; + + fn private_tracker() -> Tracker { + let config = configuration::ephemeral_private(); + + let (database, _in_memory_whitelist, whitelist_authorization, authentication) = + initialize_tracker_dependencies(&config); + + initialize_tracker(&config, &database, &whitelist_authorization, &authentication) + } + + fn private_tracker_without_checking_keys_expiration() -> Tracker { + let mut config = configuration::ephemeral_private(); + + config.core.private_mode = Some(PrivateMode { + check_keys_expiration: false, + }); + + let (database, _in_memory_whitelist, whitelist_authorization, authentication) = + initialize_tracker_dependencies(&config); + + initialize_tracker(&config, &database, &whitelist_authorization, &authentication) + } + + mod configured_as_private { + + mod handling_authentication { + use std::str::FromStr; + use std::time::Duration; + + use crate::core::authentication::tests::the_tracker::private_tracker; + use crate::core::authentication::{self}; + + #[tokio::test] + async fn it_should_fail_authenticating_a_peer_when_it_uses_an_unregistered_key() { + let tracker = private_tracker(); + + let unregistered_key = authentication::Key::from_str("YZSl4lMZupRuOpSRC3krIKR5BPB14nrJ").unwrap(); + + let result = tracker.authentication.authenticate(&unregistered_key).await; + + assert!(result.is_err()); + } + + #[tokio::test] + async fn it_should_fail_verifying_an_unregistered_authentication_key() { + let tracker = private_tracker(); + + let unregistered_key = authentication::Key::from_str("YZSl4lMZupRuOpSRC3krIKR5BPB14nrJ").unwrap(); + + assert!(tracker.authentication.verify_auth_key(&unregistered_key).await.is_err()); + } + + #[tokio::test] + async fn it_should_remove_an_authentication_key() { + let tracker = private_tracker(); + + let expiring_key = tracker + .authentication + .generate_auth_key(Some(Duration::from_secs(100))) + .await + .unwrap(); + + let result = tracker.authentication.remove_auth_key(&expiring_key.key()).await; + + assert!(result.is_ok()); + assert!(tracker.authentication.verify_auth_key(&expiring_key.key()).await.is_err()); + } + + #[tokio::test] + async fn it_should_load_authentication_keys_from_the_database() { + let tracker = private_tracker(); + + let expiring_key = tracker + .authentication + .generate_auth_key(Some(Duration::from_secs(100))) + .await + .unwrap(); + + // Remove the newly generated key in memory + tracker.authentication.remove_in_memory_auth_key(&expiring_key.key()).await; + + let result = tracker.authentication.load_keys_from_database().await; + + assert!(result.is_ok()); + assert!(tracker.authentication.verify_auth_key(&expiring_key.key()).await.is_ok()); + } + + mod with_expiring_and { + + mod randomly_generated_keys { + use std::time::Duration; + + use torrust_tracker_clock::clock::Time; + + use crate::core::authentication::tests::the_tracker::{ + private_tracker, private_tracker_without_checking_keys_expiration, + }; + use crate::core::authentication::Key; + use crate::CurrentClock; + + #[tokio::test] + async fn it_should_generate_the_key() { + let tracker = private_tracker(); + + let peer_key = tracker + .authentication + .generate_auth_key(Some(Duration::from_secs(100))) + .await + .unwrap(); + + assert_eq!( + peer_key.valid_until, + Some(CurrentClock::now_add(&Duration::from_secs(100)).unwrap()) + ); + } + + #[tokio::test] + async fn it_should_authenticate_a_peer_with_the_key() { + let tracker = private_tracker(); + + let peer_key = tracker + .authentication + .generate_auth_key(Some(Duration::from_secs(100))) + .await + .unwrap(); + + let result = tracker.authentication.authenticate(&peer_key.key()).await; + + assert!(result.is_ok()); + } + + #[tokio::test] + async fn it_should_accept_an_expired_key_when_checking_expiration_is_disabled_in_configuration() { + let tracker = private_tracker_without_checking_keys_expiration(); + + let past_timestamp = Duration::ZERO; + + let peer_key = tracker + .authentication + .add_auth_key(Key::new("YZSl4lMZupRuOpSRC3krIKR5BPB14nrJ").unwrap(), Some(past_timestamp)) + .await + .unwrap(); + + assert!(tracker.authentication.authenticate(&peer_key.key()).await.is_ok()); + } + } + + mod pre_generated_keys { + use std::time::Duration; + + use torrust_tracker_clock::clock::Time; + + use crate::core::authentication::tests::the_tracker::{ + private_tracker, private_tracker_without_checking_keys_expiration, + }; + use crate::core::authentication::{AddKeyRequest, Key}; + use crate::CurrentClock; + + #[tokio::test] + async fn it_should_add_a_pre_generated_key() { + let tracker = private_tracker(); + + let peer_key = tracker + .authentication + .add_peer_key(AddKeyRequest { + opt_key: Some(Key::new("YZSl4lMZupRuOpSRC3krIKR5BPB14nrJ").unwrap().to_string()), + opt_seconds_valid: Some(100), + }) + .await + .unwrap(); + + assert_eq!( + peer_key.valid_until, + Some(CurrentClock::now_add(&Duration::from_secs(100)).unwrap()) + ); + } + + #[tokio::test] + async fn it_should_authenticate_a_peer_with_the_key() { + let tracker = private_tracker(); + + let peer_key = tracker + .authentication + .add_peer_key(AddKeyRequest { + opt_key: Some(Key::new("YZSl4lMZupRuOpSRC3krIKR5BPB14nrJ").unwrap().to_string()), + opt_seconds_valid: Some(100), + }) + .await + .unwrap(); + + let result = tracker.authentication.authenticate(&peer_key.key()).await; + + assert!(result.is_ok()); + } + + #[tokio::test] + async fn it_should_accept_an_expired_key_when_checking_expiration_is_disabled_in_configuration() { + let tracker = private_tracker_without_checking_keys_expiration(); + + let peer_key = tracker + .authentication + .add_peer_key(AddKeyRequest { + opt_key: Some(Key::new("YZSl4lMZupRuOpSRC3krIKR5BPB14nrJ").unwrap().to_string()), + opt_seconds_valid: Some(0), + }) + .await + .unwrap(); + + assert!(tracker.authentication.authenticate(&peer_key.key()).await.is_ok()); + } + } + } + + mod with_permanent_and { + + mod randomly_generated_keys { + use crate::core::authentication::tests::the_tracker::private_tracker; + + #[tokio::test] + async fn it_should_generate_the_key() { + let tracker = private_tracker(); + + let peer_key = tracker.authentication.generate_permanent_auth_key().await.unwrap(); + + assert_eq!(peer_key.valid_until, None); + } + + #[tokio::test] + async fn it_should_authenticate_a_peer_with_the_key() { + let tracker = private_tracker(); + + let peer_key = tracker.authentication.generate_permanent_auth_key().await.unwrap(); + + let result = tracker.authentication.authenticate(&peer_key.key()).await; + + assert!(result.is_ok()); + } + } + + mod pre_generated_keys { + use crate::core::authentication::tests::the_tracker::private_tracker; + use crate::core::authentication::{AddKeyRequest, Key}; + + #[tokio::test] + async fn it_should_add_a_pre_generated_key() { + let tracker = private_tracker(); + + let peer_key = tracker + .authentication + .add_peer_key(AddKeyRequest { + opt_key: Some(Key::new("YZSl4lMZupRuOpSRC3krIKR5BPB14nrJ").unwrap().to_string()), + opt_seconds_valid: None, + }) + .await + .unwrap(); + + assert_eq!(peer_key.valid_until, None); + } + + #[tokio::test] + async fn it_should_authenticate_a_peer_with_the_key() { + let tracker = private_tracker(); + + let peer_key = tracker + .authentication + .add_peer_key(AddKeyRequest { + opt_key: Some(Key::new("YZSl4lMZupRuOpSRC3krIKR5BPB14nrJ").unwrap().to_string()), + opt_seconds_valid: None, + }) + .await + .unwrap(); + + let result = tracker.authentication.authenticate(&peer_key.key()).await; + + assert!(result.is_ok()); + } + } + } + } + + mod handling_an_announce_request {} + + mod handling_an_scrape_request {} + } + } +} From f41a524e46788a3ba819442c4aec2359adba4c89 Mon Sep 17 00:00:00 2001 From: Jose Celano Date: Tue, 21 Jan 2025 11:37:04 +0000 Subject: [PATCH 119/802] refactor: [#1191] remove duplicate tests for private tracker There were copied to the authentication module. --- src/core/mod.rs | 295 ------------------------------------------------ 1 file changed, 295 deletions(-) diff --git a/src/core/mod.rs b/src/core/mod.rs index bd585893d..9a5692690 100644 --- a/src/core/mod.rs +++ b/src/core/mod.rs @@ -803,7 +803,6 @@ mod tests { use aquatic_udp_protocol::{AnnounceEvent, NumberOfBytes, PeerId}; use bittorrent_primitives::info_hash::fixture::gen_seeded_infohash; use bittorrent_primitives::info_hash::InfoHash; - use torrust_tracker_configuration::v2_0_0::core::PrivateMode; use torrust_tracker_configuration::TORRENT_PEERS_LIMIT; use torrust_tracker_primitives::DurationSinceUnixEpoch; use torrust_tracker_test_helpers::configuration; @@ -823,15 +822,6 @@ mod tests { initialize_tracker(&config, &database, &whitelist_authorization, &authentication) } - fn private_tracker() -> Tracker { - let config = configuration::ephemeral_private(); - - let (database, _in_memory_whitelist, whitelist_authorization, authentication) = - initialize_tracker_dependencies(&config); - - initialize_tracker(&config, &database, &whitelist_authorization, &authentication) - } - fn whitelisted_tracker() -> (Tracker, Arc, Arc) { let config = configuration::ephemeral_listed(); @@ -845,19 +835,6 @@ mod tests { (tracker, whitelist_authorization, whitelist_manager) } - fn private_tracker_without_checking_keys_expiration() -> Tracker { - let mut config = configuration::ephemeral_private(); - - config.core.private_mode = Some(PrivateMode { - check_keys_expiration: false, - }); - - let (database, _in_memory_whitelist, whitelist_authorization, authentication) = - initialize_tracker_dependencies(&config); - - initialize_tracker(&config, &database, &whitelist_authorization, &authentication) - } - pub fn tracker_persisting_torrents_in_database() -> Tracker { let mut config = configuration::ephemeral_listed(); config.core.tracker_policy.persistent_torrent_completed_stat = true; @@ -1516,278 +1493,6 @@ mod tests { } } - mod configured_as_private { - - mod handling_authentication { - use std::str::FromStr; - use std::time::Duration; - - use crate::core::authentication::{self}; - use crate::core::tests::the_tracker::private_tracker; - - #[tokio::test] - async fn it_should_fail_authenticating_a_peer_when_it_uses_an_unregistered_key() { - let tracker = private_tracker(); - - let unregistered_key = authentication::Key::from_str("YZSl4lMZupRuOpSRC3krIKR5BPB14nrJ").unwrap(); - - let result = tracker.authentication.authenticate(&unregistered_key).await; - - assert!(result.is_err()); - } - - #[tokio::test] - async fn it_should_fail_verifying_an_unregistered_authentication_key() { - let tracker = private_tracker(); - - let unregistered_key = authentication::Key::from_str("YZSl4lMZupRuOpSRC3krIKR5BPB14nrJ").unwrap(); - - assert!(tracker.authentication.verify_auth_key(&unregistered_key).await.is_err()); - } - - #[tokio::test] - async fn it_should_remove_an_authentication_key() { - let tracker = private_tracker(); - - let expiring_key = tracker - .authentication - .generate_auth_key(Some(Duration::from_secs(100))) - .await - .unwrap(); - - let result = tracker.authentication.remove_auth_key(&expiring_key.key()).await; - - assert!(result.is_ok()); - assert!(tracker.authentication.verify_auth_key(&expiring_key.key()).await.is_err()); - } - - #[tokio::test] - async fn it_should_load_authentication_keys_from_the_database() { - let tracker = private_tracker(); - - let expiring_key = tracker - .authentication - .generate_auth_key(Some(Duration::from_secs(100))) - .await - .unwrap(); - - // Remove the newly generated key in memory - tracker.authentication.remove_in_memory_auth_key(&expiring_key.key()).await; - - let result = tracker.authentication.load_keys_from_database().await; - - assert!(result.is_ok()); - assert!(tracker.authentication.verify_auth_key(&expiring_key.key()).await.is_ok()); - } - - mod with_expiring_and { - - mod randomly_generated_keys { - use std::time::Duration; - - use torrust_tracker_clock::clock::Time; - - use crate::core::authentication::Key; - use crate::core::tests::the_tracker::{ - private_tracker, private_tracker_without_checking_keys_expiration, - }; - use crate::CurrentClock; - - #[tokio::test] - async fn it_should_generate_the_key() { - let tracker = private_tracker(); - - let peer_key = tracker - .authentication - .generate_auth_key(Some(Duration::from_secs(100))) - .await - .unwrap(); - - assert_eq!( - peer_key.valid_until, - Some(CurrentClock::now_add(&Duration::from_secs(100)).unwrap()) - ); - } - - #[tokio::test] - async fn it_should_authenticate_a_peer_with_the_key() { - let tracker = private_tracker(); - - let peer_key = tracker - .authentication - .generate_auth_key(Some(Duration::from_secs(100))) - .await - .unwrap(); - - let result = tracker.authentication.authenticate(&peer_key.key()).await; - - assert!(result.is_ok()); - } - - #[tokio::test] - async fn it_should_accept_an_expired_key_when_checking_expiration_is_disabled_in_configuration() { - let tracker = private_tracker_without_checking_keys_expiration(); - - let past_timestamp = Duration::ZERO; - - let peer_key = tracker - .authentication - .add_auth_key(Key::new("YZSl4lMZupRuOpSRC3krIKR5BPB14nrJ").unwrap(), Some(past_timestamp)) - .await - .unwrap(); - - assert!(tracker.authentication.authenticate(&peer_key.key()).await.is_ok()); - } - } - - mod pre_generated_keys { - use std::time::Duration; - - use torrust_tracker_clock::clock::Time; - use torrust_tracker_configuration::v2_0_0::core::PrivateMode; - - use crate::core::authentication::{AddKeyRequest, Key}; - use crate::core::tests::the_tracker::private_tracker; - use crate::CurrentClock; - - #[tokio::test] - async fn it_should_add_a_pre_generated_key() { - let tracker = private_tracker(); - - let peer_key = tracker - .authentication - .add_peer_key(AddKeyRequest { - opt_key: Some(Key::new("YZSl4lMZupRuOpSRC3krIKR5BPB14nrJ").unwrap().to_string()), - opt_seconds_valid: Some(100), - }) - .await - .unwrap(); - - assert_eq!( - peer_key.valid_until, - Some(CurrentClock::now_add(&Duration::from_secs(100)).unwrap()) - ); - } - - #[tokio::test] - async fn it_should_authenticate_a_peer_with_the_key() { - let tracker = private_tracker(); - - let peer_key = tracker - .authentication - .add_peer_key(AddKeyRequest { - opt_key: Some(Key::new("YZSl4lMZupRuOpSRC3krIKR5BPB14nrJ").unwrap().to_string()), - opt_seconds_valid: Some(100), - }) - .await - .unwrap(); - - let result = tracker.authentication.authenticate(&peer_key.key()).await; - - assert!(result.is_ok()); - } - - #[tokio::test] - async fn it_should_accept_an_expired_key_when_checking_expiration_is_disabled_in_configuration() { - let mut tracker = private_tracker(); - - tracker.config.private_mode = Some(PrivateMode { - check_keys_expiration: false, - }); - - let peer_key = tracker - .authentication - .add_peer_key(AddKeyRequest { - opt_key: Some(Key::new("YZSl4lMZupRuOpSRC3krIKR5BPB14nrJ").unwrap().to_string()), - opt_seconds_valid: Some(0), - }) - .await - .unwrap(); - - assert!(tracker.authentication.authenticate(&peer_key.key()).await.is_ok()); - } - } - } - - mod with_permanent_and { - - mod randomly_generated_keys { - use crate::core::tests::the_tracker::private_tracker; - - #[tokio::test] - async fn it_should_generate_the_key() { - let tracker = private_tracker(); - - let peer_key = tracker.authentication.generate_permanent_auth_key().await.unwrap(); - - assert_eq!(peer_key.valid_until, None); - } - - #[tokio::test] - async fn it_should_authenticate_a_peer_with_the_key() { - let tracker = private_tracker(); - - let peer_key = tracker.authentication.generate_permanent_auth_key().await.unwrap(); - - let result = tracker.authentication.authenticate(&peer_key.key()).await; - - assert!(result.is_ok()); - } - } - - mod pre_generated_keys { - use crate::core::authentication::{AddKeyRequest, Key}; - use crate::core::tests::the_tracker::private_tracker; - - #[tokio::test] - async fn it_should_add_a_pre_generated_key() { - let tracker = private_tracker(); - - let peer_key = tracker - .authentication - .add_peer_key(AddKeyRequest { - opt_key: Some(Key::new("YZSl4lMZupRuOpSRC3krIKR5BPB14nrJ").unwrap().to_string()), - opt_seconds_valid: None, - }) - .await - .unwrap(); - - assert_eq!(peer_key.valid_until, None); - } - - #[tokio::test] - async fn it_should_authenticate_a_peer_with_the_key() { - let tracker = private_tracker(); - - let peer_key = tracker - .authentication - .add_peer_key(AddKeyRequest { - opt_key: Some(Key::new("YZSl4lMZupRuOpSRC3krIKR5BPB14nrJ").unwrap().to_string()), - opt_seconds_valid: None, - }) - .await - .unwrap(); - - let result = tracker.authentication.authenticate(&peer_key.key()).await; - - assert!(result.is_ok()); - } - } - } - } - - mod handling_an_announce_request {} - - mod handling_an_scrape_request {} - } - - mod configured_as_private_and_whitelisted { - - mod handling_an_announce_request {} - - mod handling_an_scrape_request {} - } - mod handling_torrent_persistence { use aquatic_udp_protocol::AnnounceEvent; From 6584fe434067c3a8c5eb12d5aa108df817291ccb Mon Sep 17 00:00:00 2001 From: Jose Celano Date: Tue, 21 Jan 2025 11:54:13 +0000 Subject: [PATCH 120/802] refactor: [#1191] remove tracker dependency for authentication tests --- src/core/authentication/mod.rs | 407 +++++++++++++++------------------ 1 file changed, 190 insertions(+), 217 deletions(-) diff --git a/src/core/authentication/mod.rs b/src/core/authentication/mod.rs index 70101dbf6..d611ae57e 100644 --- a/src/core/authentication/mod.rs +++ b/src/core/authentication/mod.rs @@ -306,297 +306,270 @@ impl Facade { #[cfg(test)] mod tests { - mod the_tracker { + mod the_tracker_configured_as_private { + + use std::str::FromStr; + use std::time::Duration; use torrust_tracker_configuration::v2_0_0::core::PrivateMode; use torrust_tracker_test_helpers::configuration; - use crate::app_test::initialize_tracker_dependencies; - use crate::core::services::initialize_tracker; - use crate::core::Tracker; + use crate::core::authentication; + use crate::core::services::initialize_database; - fn private_tracker() -> Tracker { + fn instantiate_authentication() -> authentication::Facade { let config = configuration::ephemeral_private(); - let (database, _in_memory_whitelist, whitelist_authorization, authentication) = - initialize_tracker_dependencies(&config); - - initialize_tracker(&config, &database, &whitelist_authorization, &authentication) + let database = initialize_database(&config); + authentication::Facade::new(&config.core, &database.clone()) } - fn private_tracker_without_checking_keys_expiration() -> Tracker { + fn instantiate_authentication_with_checking_keys_expiration_disabled() -> authentication::Facade { let mut config = configuration::ephemeral_private(); config.core.private_mode = Some(PrivateMode { check_keys_expiration: false, }); - let (database, _in_memory_whitelist, whitelist_authorization, authentication) = - initialize_tracker_dependencies(&config); + let database = initialize_database(&config); + authentication::Facade::new(&config.core, &database.clone()) + } + + #[tokio::test] + async fn it_should_fail_authenticating_a_peer_when_it_uses_an_unregistered_key() { + let authentication = instantiate_authentication(); + + let unregistered_key = authentication::Key::from_str("YZSl4lMZupRuOpSRC3krIKR5BPB14nrJ").unwrap(); - initialize_tracker(&config, &database, &whitelist_authorization, &authentication) + let result = authentication.authenticate(&unregistered_key).await; + + assert!(result.is_err()); } - mod configured_as_private { + #[tokio::test] + async fn it_should_fail_verifying_an_unregistered_authentication_key() { + let authentication = instantiate_authentication(); - mod handling_authentication { - use std::str::FromStr; - use std::time::Duration; + let unregistered_key = authentication::Key::from_str("YZSl4lMZupRuOpSRC3krIKR5BPB14nrJ").unwrap(); - use crate::core::authentication::tests::the_tracker::private_tracker; - use crate::core::authentication::{self}; + assert!(authentication.verify_auth_key(&unregistered_key).await.is_err()); + } - #[tokio::test] - async fn it_should_fail_authenticating_a_peer_when_it_uses_an_unregistered_key() { - let tracker = private_tracker(); + #[tokio::test] + async fn it_should_remove_an_authentication_key() { + let authentication = instantiate_authentication(); - let unregistered_key = authentication::Key::from_str("YZSl4lMZupRuOpSRC3krIKR5BPB14nrJ").unwrap(); + let expiring_key = authentication + .generate_auth_key(Some(Duration::from_secs(100))) + .await + .unwrap(); - let result = tracker.authentication.authenticate(&unregistered_key).await; + let result = authentication.remove_auth_key(&expiring_key.key()).await; - assert!(result.is_err()); - } + assert!(result.is_ok()); + assert!(authentication.verify_auth_key(&expiring_key.key()).await.is_err()); + } - #[tokio::test] - async fn it_should_fail_verifying_an_unregistered_authentication_key() { - let tracker = private_tracker(); + #[tokio::test] + async fn it_should_load_authentication_keys_from_the_database() { + let authentication = instantiate_authentication(); - let unregistered_key = authentication::Key::from_str("YZSl4lMZupRuOpSRC3krIKR5BPB14nrJ").unwrap(); + let expiring_key = authentication + .generate_auth_key(Some(Duration::from_secs(100))) + .await + .unwrap(); - assert!(tracker.authentication.verify_auth_key(&unregistered_key).await.is_err()); - } + // Remove the newly generated key in memory + authentication.remove_in_memory_auth_key(&expiring_key.key()).await; + + let result = authentication.load_keys_from_database().await; + + assert!(result.is_ok()); + assert!(authentication.verify_auth_key(&expiring_key.key()).await.is_ok()); + } + + mod with_expiring_and { + + mod randomly_generated_keys { + use std::time::Duration; + + use torrust_tracker_clock::clock::Time; + + use crate::core::authentication::tests::the_tracker_configured_as_private::{ + instantiate_authentication, instantiate_authentication_with_checking_keys_expiration_disabled, + }; + use crate::core::authentication::Key; + use crate::CurrentClock; #[tokio::test] - async fn it_should_remove_an_authentication_key() { - let tracker = private_tracker(); + async fn it_should_generate_the_key() { + let authentication = instantiate_authentication(); - let expiring_key = tracker - .authentication + let peer_key = authentication .generate_auth_key(Some(Duration::from_secs(100))) .await .unwrap(); - let result = tracker.authentication.remove_auth_key(&expiring_key.key()).await; - - assert!(result.is_ok()); - assert!(tracker.authentication.verify_auth_key(&expiring_key.key()).await.is_err()); + assert_eq!( + peer_key.valid_until, + Some(CurrentClock::now_add(&Duration::from_secs(100)).unwrap()) + ); } #[tokio::test] - async fn it_should_load_authentication_keys_from_the_database() { - let tracker = private_tracker(); + async fn it_should_authenticate_a_peer_with_the_key() { + let authentication = instantiate_authentication(); - let expiring_key = tracker - .authentication + let peer_key = authentication .generate_auth_key(Some(Duration::from_secs(100))) .await .unwrap(); - // Remove the newly generated key in memory - tracker.authentication.remove_in_memory_auth_key(&expiring_key.key()).await; - - let result = tracker.authentication.load_keys_from_database().await; + let result = authentication.authenticate(&peer_key.key()).await; assert!(result.is_ok()); - assert!(tracker.authentication.verify_auth_key(&expiring_key.key()).await.is_ok()); } - mod with_expiring_and { + #[tokio::test] + async fn it_should_accept_an_expired_key_when_checking_expiration_is_disabled_in_configuration() { + let authentication = instantiate_authentication_with_checking_keys_expiration_disabled(); - mod randomly_generated_keys { - use std::time::Duration; + let past_timestamp = Duration::ZERO; - use torrust_tracker_clock::clock::Time; + let peer_key = authentication + .add_auth_key(Key::new("YZSl4lMZupRuOpSRC3krIKR5BPB14nrJ").unwrap(), Some(past_timestamp)) + .await + .unwrap(); - use crate::core::authentication::tests::the_tracker::{ - private_tracker, private_tracker_without_checking_keys_expiration, - }; - use crate::core::authentication::Key; - use crate::CurrentClock; + assert!(authentication.authenticate(&peer_key.key()).await.is_ok()); + } + } - #[tokio::test] - async fn it_should_generate_the_key() { - let tracker = private_tracker(); + mod pre_generated_keys { + use std::time::Duration; - let peer_key = tracker - .authentication - .generate_auth_key(Some(Duration::from_secs(100))) - .await - .unwrap(); + use torrust_tracker_clock::clock::Time; - assert_eq!( - peer_key.valid_until, - Some(CurrentClock::now_add(&Duration::from_secs(100)).unwrap()) - ); - } + use crate::core::authentication::tests::the_tracker_configured_as_private::{ + instantiate_authentication, instantiate_authentication_with_checking_keys_expiration_disabled, + }; + use crate::core::authentication::{AddKeyRequest, Key}; + use crate::CurrentClock; - #[tokio::test] - async fn it_should_authenticate_a_peer_with_the_key() { - let tracker = private_tracker(); + #[tokio::test] + async fn it_should_add_a_pre_generated_key() { + let authentication = instantiate_authentication(); + + let peer_key = authentication + .add_peer_key(AddKeyRequest { + opt_key: Some(Key::new("YZSl4lMZupRuOpSRC3krIKR5BPB14nrJ").unwrap().to_string()), + opt_seconds_valid: Some(100), + }) + .await + .unwrap(); - let peer_key = tracker - .authentication - .generate_auth_key(Some(Duration::from_secs(100))) - .await - .unwrap(); + assert_eq!( + peer_key.valid_until, + Some(CurrentClock::now_add(&Duration::from_secs(100)).unwrap()) + ); + } - let result = tracker.authentication.authenticate(&peer_key.key()).await; + #[tokio::test] + async fn it_should_authenticate_a_peer_with_the_key() { + let authentication = instantiate_authentication(); + + let peer_key = authentication + .add_peer_key(AddKeyRequest { + opt_key: Some(Key::new("YZSl4lMZupRuOpSRC3krIKR5BPB14nrJ").unwrap().to_string()), + opt_seconds_valid: Some(100), + }) + .await + .unwrap(); - assert!(result.is_ok()); - } + let result = authentication.authenticate(&peer_key.key()).await; - #[tokio::test] - async fn it_should_accept_an_expired_key_when_checking_expiration_is_disabled_in_configuration() { - let tracker = private_tracker_without_checking_keys_expiration(); + assert!(result.is_ok()); + } - let past_timestamp = Duration::ZERO; + #[tokio::test] + async fn it_should_accept_an_expired_key_when_checking_expiration_is_disabled_in_configuration() { + let authentication = instantiate_authentication_with_checking_keys_expiration_disabled(); + + let peer_key = authentication + .add_peer_key(AddKeyRequest { + opt_key: Some(Key::new("YZSl4lMZupRuOpSRC3krIKR5BPB14nrJ").unwrap().to_string()), + opt_seconds_valid: Some(0), + }) + .await + .unwrap(); - let peer_key = tracker - .authentication - .add_auth_key(Key::new("YZSl4lMZupRuOpSRC3krIKR5BPB14nrJ").unwrap(), Some(past_timestamp)) - .await - .unwrap(); + assert!(authentication.authenticate(&peer_key.key()).await.is_ok()); + } + } + } - assert!(tracker.authentication.authenticate(&peer_key.key()).await.is_ok()); - } - } + mod with_permanent_and { - mod pre_generated_keys { - use std::time::Duration; - - use torrust_tracker_clock::clock::Time; - - use crate::core::authentication::tests::the_tracker::{ - private_tracker, private_tracker_without_checking_keys_expiration, - }; - use crate::core::authentication::{AddKeyRequest, Key}; - use crate::CurrentClock; - - #[tokio::test] - async fn it_should_add_a_pre_generated_key() { - let tracker = private_tracker(); - - let peer_key = tracker - .authentication - .add_peer_key(AddKeyRequest { - opt_key: Some(Key::new("YZSl4lMZupRuOpSRC3krIKR5BPB14nrJ").unwrap().to_string()), - opt_seconds_valid: Some(100), - }) - .await - .unwrap(); - - assert_eq!( - peer_key.valid_until, - Some(CurrentClock::now_add(&Duration::from_secs(100)).unwrap()) - ); - } - - #[tokio::test] - async fn it_should_authenticate_a_peer_with_the_key() { - let tracker = private_tracker(); - - let peer_key = tracker - .authentication - .add_peer_key(AddKeyRequest { - opt_key: Some(Key::new("YZSl4lMZupRuOpSRC3krIKR5BPB14nrJ").unwrap().to_string()), - opt_seconds_valid: Some(100), - }) - .await - .unwrap(); - - let result = tracker.authentication.authenticate(&peer_key.key()).await; - - assert!(result.is_ok()); - } - - #[tokio::test] - async fn it_should_accept_an_expired_key_when_checking_expiration_is_disabled_in_configuration() { - let tracker = private_tracker_without_checking_keys_expiration(); - - let peer_key = tracker - .authentication - .add_peer_key(AddKeyRequest { - opt_key: Some(Key::new("YZSl4lMZupRuOpSRC3krIKR5BPB14nrJ").unwrap().to_string()), - opt_seconds_valid: Some(0), - }) - .await - .unwrap(); - - assert!(tracker.authentication.authenticate(&peer_key.key()).await.is_ok()); - } - } - } + mod randomly_generated_keys { + use crate::core::authentication::tests::the_tracker_configured_as_private::instantiate_authentication; - mod with_permanent_and { + #[tokio::test] + async fn it_should_generate_the_key() { + let authentication = instantiate_authentication(); - mod randomly_generated_keys { - use crate::core::authentication::tests::the_tracker::private_tracker; + let peer_key = authentication.generate_permanent_auth_key().await.unwrap(); - #[tokio::test] - async fn it_should_generate_the_key() { - let tracker = private_tracker(); + assert_eq!(peer_key.valid_until, None); + } - let peer_key = tracker.authentication.generate_permanent_auth_key().await.unwrap(); + #[tokio::test] + async fn it_should_authenticate_a_peer_with_the_key() { + let authentication = instantiate_authentication(); - assert_eq!(peer_key.valid_until, None); - } + let peer_key = authentication.generate_permanent_auth_key().await.unwrap(); - #[tokio::test] - async fn it_should_authenticate_a_peer_with_the_key() { - let tracker = private_tracker(); + let result = authentication.authenticate(&peer_key.key()).await; - let peer_key = tracker.authentication.generate_permanent_auth_key().await.unwrap(); + assert!(result.is_ok()); + } + } - let result = tracker.authentication.authenticate(&peer_key.key()).await; + mod pre_generated_keys { + use crate::core::authentication::tests::the_tracker_configured_as_private::instantiate_authentication; + use crate::core::authentication::{AddKeyRequest, Key}; - assert!(result.is_ok()); - } - } + #[tokio::test] + async fn it_should_add_a_pre_generated_key() { + let authentication = instantiate_authentication(); + + let peer_key = authentication + .add_peer_key(AddKeyRequest { + opt_key: Some(Key::new("YZSl4lMZupRuOpSRC3krIKR5BPB14nrJ").unwrap().to_string()), + opt_seconds_valid: None, + }) + .await + .unwrap(); - mod pre_generated_keys { - use crate::core::authentication::tests::the_tracker::private_tracker; - use crate::core::authentication::{AddKeyRequest, Key}; - - #[tokio::test] - async fn it_should_add_a_pre_generated_key() { - let tracker = private_tracker(); - - let peer_key = tracker - .authentication - .add_peer_key(AddKeyRequest { - opt_key: Some(Key::new("YZSl4lMZupRuOpSRC3krIKR5BPB14nrJ").unwrap().to_string()), - opt_seconds_valid: None, - }) - .await - .unwrap(); - - assert_eq!(peer_key.valid_until, None); - } - - #[tokio::test] - async fn it_should_authenticate_a_peer_with_the_key() { - let tracker = private_tracker(); - - let peer_key = tracker - .authentication - .add_peer_key(AddKeyRequest { - opt_key: Some(Key::new("YZSl4lMZupRuOpSRC3krIKR5BPB14nrJ").unwrap().to_string()), - opt_seconds_valid: None, - }) - .await - .unwrap(); - - let result = tracker.authentication.authenticate(&peer_key.key()).await; - - assert!(result.is_ok()); - } - } + assert_eq!(peer_key.valid_until, None); } - } - mod handling_an_announce_request {} + #[tokio::test] + async fn it_should_authenticate_a_peer_with_the_key() { + let authentication = instantiate_authentication(); + + let peer_key = authentication + .add_peer_key(AddKeyRequest { + opt_key: Some(Key::new("YZSl4lMZupRuOpSRC3krIKR5BPB14nrJ").unwrap().to_string()), + opt_seconds_valid: None, + }) + .await + .unwrap(); + + let result = authentication.authenticate(&peer_key.key()).await; - mod handling_an_scrape_request {} + assert!(result.is_ok()); + } + } } } } From 44255e243c062ad2a101a426c9c02f03b1cfb5d2 Mon Sep 17 00:00:00 2001 From: Jose Celano Date: Tue, 21 Jan 2025 16:45:36 +0000 Subject: [PATCH 121/802] refactor: [#1195] create dir for mod We will add more submodules. --- src/core/authentication/{key.rs => key/mod.rs} | 0 1 file changed, 0 insertions(+), 0 deletions(-) rename src/core/authentication/{key.rs => key/mod.rs} (100%) diff --git a/src/core/authentication/key.rs b/src/core/authentication/key/mod.rs similarity index 100% rename from src/core/authentication/key.rs rename to src/core/authentication/key/mod.rs From f4c7b9746562a45331d108b11ebe7dc794253a2a Mon Sep 17 00:00:00 2001 From: Jose Celano Date: Tue, 21 Jan 2025 17:08:47 +0000 Subject: [PATCH 122/802] refactor: [#1195] extract DatabaseKeyRepository --- src/core/authentication/key/mod.rs | 1 + src/core/authentication/key/repository/mod.rs | 1 + .../key/repository/persisted.rs | 48 +++++++++++++++++++ src/core/authentication/mod.rs | 23 +++++---- 4 files changed, 64 insertions(+), 9 deletions(-) create mode 100644 src/core/authentication/key/repository/mod.rs create mode 100644 src/core/authentication/key/repository/persisted.rs diff --git a/src/core/authentication/key/mod.rs b/src/core/authentication/key/mod.rs index 8858361ec..49d559e42 100644 --- a/src/core/authentication/key/mod.rs +++ b/src/core/authentication/key/mod.rs @@ -37,6 +37,7 @@ //! //! assert!(authentication::key::verify_key_expiration(&expiring_key).is_ok()); //! ``` +pub mod repository; use std::panic::Location; use std::str::FromStr; diff --git a/src/core/authentication/key/repository/mod.rs b/src/core/authentication/key/repository/mod.rs new file mode 100644 index 000000000..fe3bdd68c --- /dev/null +++ b/src/core/authentication/key/repository/mod.rs @@ -0,0 +1 @@ +pub mod persisted; diff --git a/src/core/authentication/key/repository/persisted.rs b/src/core/authentication/key/repository/persisted.rs new file mode 100644 index 000000000..736a409eb --- /dev/null +++ b/src/core/authentication/key/repository/persisted.rs @@ -0,0 +1,48 @@ +use std::sync::Arc; + +use crate::core::authentication::key::{Key, PeerKey}; +use crate::core::databases::{self, Database}; + +/// The database repository for the authentication keys. +pub struct DatabaseKeyRepository { + database: Arc>, +} + +impl DatabaseKeyRepository { + #[must_use] + pub fn new(database: &Arc>) -> Self { + Self { + database: database.clone(), + } + } + + /// It adds a new key to the database. + /// + /// # Errors + /// + /// Will return a `databases::error::Error` if unable to add the `auth_key` to the database. + pub fn add(&self, peer_key: &PeerKey) -> Result<(), databases::error::Error> { + self.database.add_key_to_keys(peer_key)?; + Ok(()) + } + + /// It removes an key from the database. + /// + /// # Errors + /// + /// Will return a `database::Error` if unable to remove the `key` from the database. + pub fn remove(&self, key: &Key) -> Result<(), databases::error::Error> { + self.database.remove_key_from_keys(key)?; + Ok(()) + } + + /// It loads all keys from the database. + /// + /// # Errors + /// + /// Will return a `database::Error` if unable to load the keys from the database. + pub fn load_keys(&self) -> Result, databases::error::Error> { + let keys = self.database.load_keys()?; + Ok(keys) + } +} diff --git a/src/core/authentication/mod.rs b/src/core/authentication/mod.rs index d611ae57e..618f57f91 100644 --- a/src/core/authentication/mod.rs +++ b/src/core/authentication/mod.rs @@ -2,6 +2,7 @@ use std::panic::Location; use std::sync::Arc; use std::time::Duration; +use key::repository::persisted::DatabaseKeyRepository; use torrust_tracker_clock::clock::Time; use torrust_tracker_configuration::Core; use torrust_tracker_located_error::Located; @@ -35,12 +36,11 @@ pub struct Facade { /// The tracker configuration. config: Core, - /// A database driver implementation: [`Sqlite3`](crate::core::databases::sqlite) - /// or [`MySQL`](crate::core::databases::mysql) - database: Arc>, - /// Tracker users' keys. Only for private trackers. keys: tokio::sync::RwLock>, + + /// The database repository for the authentication keys. + db_key_repository: DatabaseKeyRepository, } impl Facade { @@ -48,8 +48,8 @@ impl Facade { pub fn new(config: &Core, database: &Arc>) -> Self { Self { config: config.clone(), - database: database.clone(), keys: tokio::sync::RwLock::new(std::collections::HashMap::new()), + db_key_repository: DatabaseKeyRepository::new(database), } } @@ -205,7 +205,8 @@ impl Facade { pub async fn generate_auth_key(&self, lifetime: Option) -> Result { let auth_key = key::generate_key(lifetime); - self.database.add_key_to_keys(&auth_key)?; + self.db_key_repository.add(&auth_key)?; + self.keys.write().await.insert(auth_key.key.clone(), auth_key.clone()); Ok(auth_key) } @@ -254,7 +255,8 @@ impl Facade { // code-review: should we return a friendly error instead of the DB // constrain error when the key already exist? For now, it's returning // the specif error for each DB driver when a UNIQUE constrain fails. - self.database.add_key_to_keys(&auth_key)?; + self.db_key_repository.add(&auth_key)?; + self.keys.write().await.insert(auth_key.key.clone(), auth_key.clone()); Ok(auth_key) } @@ -267,8 +269,10 @@ impl Facade { /// /// Will return a `database::Error` if unable to remove the `key` to the database. pub async fn remove_auth_key(&self, key: &Key) -> Result<(), databases::error::Error> { - self.database.remove_key_from_keys(key)?; + self.db_key_repository.remove(key)?; + self.remove_in_memory_auth_key(key).await; + Ok(()) } @@ -290,7 +294,8 @@ impl Facade { /// /// Will return a `database::Error` if unable to `load_keys` from the database. pub async fn load_keys_from_database(&self) -> Result<(), databases::error::Error> { - let keys_from_database = self.database.load_keys()?; + let keys_from_database = self.db_key_repository.load_keys()?; + let mut keys = self.keys.write().await; keys.clear(); From 12a62ceaef3bdec903f575be00c81c781549f323 Mon Sep 17 00:00:00 2001 From: Jose Celano Date: Tue, 21 Jan 2025 17:28:12 +0000 Subject: [PATCH 123/802] refactor: [#1195] extract InMemoryKeyRepository --- .../key/repository/in_memory.rs | 30 ++++++++++++++ src/core/authentication/key/repository/mod.rs | 1 + src/core/authentication/mod.rs | 41 ++++++++++--------- 3 files changed, 52 insertions(+), 20 deletions(-) create mode 100644 src/core/authentication/key/repository/in_memory.rs diff --git a/src/core/authentication/key/repository/in_memory.rs b/src/core/authentication/key/repository/in_memory.rs new file mode 100644 index 000000000..266d5a5fb --- /dev/null +++ b/src/core/authentication/key/repository/in_memory.rs @@ -0,0 +1,30 @@ +use crate::core::authentication::key::{Key, PeerKey}; + +/// In-memory implementation of the authentication key repository. +#[derive(Debug, Default)] +pub struct InMemoryKeyRepository { + /// Tracker users' keys. Only for private trackers. + keys: tokio::sync::RwLock>, +} + +impl InMemoryKeyRepository { + /// It adds a new authentication key. + pub async fn insert(&self, auth_key: &PeerKey) { + self.keys.write().await.insert(auth_key.key.clone(), auth_key.clone()); + } + + /// It removes an authentication key. + pub async fn remove(&self, key: &Key) { + self.keys.write().await.remove(key); + } + + pub async fn get(&self, key: &Key) -> Option { + self.keys.read().await.get(key).cloned() + } + + /// It clears all the authentication keys. + pub async fn clear(&self) { + let mut keys = self.keys.write().await; + keys.clear(); + } +} diff --git a/src/core/authentication/key/repository/mod.rs b/src/core/authentication/key/repository/mod.rs index fe3bdd68c..51723b68d 100644 --- a/src/core/authentication/key/repository/mod.rs +++ b/src/core/authentication/key/repository/mod.rs @@ -1 +1,2 @@ +pub mod in_memory; pub mod persisted; diff --git a/src/core/authentication/mod.rs b/src/core/authentication/mod.rs index 618f57f91..4a65a26a7 100644 --- a/src/core/authentication/mod.rs +++ b/src/core/authentication/mod.rs @@ -2,6 +2,7 @@ use std::panic::Location; use std::sync::Arc; use std::time::Duration; +use key::repository::in_memory::InMemoryKeyRepository; use key::repository::persisted::DatabaseKeyRepository; use torrust_tracker_clock::clock::Time; use torrust_tracker_configuration::Core; @@ -36,11 +37,11 @@ pub struct Facade { /// The tracker configuration. config: Core, - /// Tracker users' keys. Only for private trackers. - keys: tokio::sync::RwLock>, - /// The database repository for the authentication keys. db_key_repository: DatabaseKeyRepository, + + /// In-memory implementation of the authentication key repository. + in_memory_key_repository: InMemoryKeyRepository, } impl Facade { @@ -48,8 +49,8 @@ impl Facade { pub fn new(config: &Core, database: &Arc>) -> Self { Self { config: config.clone(), - keys: tokio::sync::RwLock::new(std::collections::HashMap::new()), db_key_repository: DatabaseKeyRepository::new(database), + in_memory_key_repository: InMemoryKeyRepository::default(), } } @@ -82,7 +83,7 @@ impl Facade { /// /// Will return a `key::Error` if unable to get any `auth_key`. pub async fn verify_auth_key(&self, key: &Key) -> Result<(), Error> { - match self.keys.read().await.get(key) { + match self.in_memory_key_repository.get(key).await { None => Err(Error::UnableToReadKey { location: Location::caller(), key: Box::new(key.clone()), @@ -90,12 +91,12 @@ impl Facade { Some(key) => match self.config.private_mode { Some(private_mode) => { if private_mode.check_keys_expiration { - return key::verify_key_expiration(key); + return key::verify_key_expiration(&key); } Ok(()) } - None => key::verify_key_expiration(key), + None => key::verify_key_expiration(&key), }, } } @@ -203,12 +204,13 @@ impl Facade { /// * `lifetime` - The duration in seconds for the new key. The key will be /// no longer valid after `lifetime` seconds. pub async fn generate_auth_key(&self, lifetime: Option) -> Result { - let auth_key = key::generate_key(lifetime); + let peer_key = key::generate_key(lifetime); + + self.db_key_repository.add(&peer_key)?; - self.db_key_repository.add(&auth_key)?; + self.in_memory_key_repository.insert(&peer_key).await; - self.keys.write().await.insert(auth_key.key.clone(), auth_key.clone()); - Ok(auth_key) + Ok(peer_key) } /// It adds a pre-generated permanent authentication key. @@ -250,15 +252,16 @@ impl Facade { key: Key, valid_until: Option, ) -> Result { - let auth_key = PeerKey { key, valid_until }; + let peer_key = PeerKey { key, valid_until }; // code-review: should we return a friendly error instead of the DB // constrain error when the key already exist? For now, it's returning // the specif error for each DB driver when a UNIQUE constrain fails. - self.db_key_repository.add(&auth_key)?; + self.db_key_repository.add(&peer_key)?; - self.keys.write().await.insert(auth_key.key.clone(), auth_key.clone()); - Ok(auth_key) + self.in_memory_key_repository.insert(&peer_key).await; + + Ok(peer_key) } /// It removes an authentication key. @@ -280,7 +283,7 @@ impl Facade { /// /// # Context: Authentication pub async fn remove_in_memory_auth_key(&self, key: &Key) { - self.keys.write().await.remove(key); + self.in_memory_key_repository.remove(key).await; } /// The `Tracker` stores the authentication keys in memory and in the database. @@ -296,12 +299,10 @@ impl Facade { pub async fn load_keys_from_database(&self) -> Result<(), databases::error::Error> { let keys_from_database = self.db_key_repository.load_keys()?; - let mut keys = self.keys.write().await; - - keys.clear(); + self.in_memory_key_repository.clear().await; for key in keys_from_database { - keys.insert(key.key.clone(), key); + self.in_memory_key_repository.insert(&key).await; } Ok(()) From a93a79c5a6bdf63d37ac273a3f23c98b3f4e0801 Mon Sep 17 00:00:00 2001 From: Jose Celano Date: Tue, 21 Jan 2025 17:34:51 +0000 Subject: [PATCH 124/802] refactor: [#1195] remove deprecated context section in docs It was used to group methods by services in the old core tracker. --- src/core/authentication/mod.rs | 18 ------------------ 1 file changed, 18 deletions(-) diff --git a/src/core/authentication/mod.rs b/src/core/authentication/mod.rs index 4a65a26a7..1b841767e 100644 --- a/src/core/authentication/mod.rs +++ b/src/core/authentication/mod.rs @@ -60,8 +60,6 @@ impl Facade { /// # Errors /// /// Will return an error if the the authentication key cannot be verified. - /// - /// # Context: Authentication pub async fn authenticate(&self, key: &Key) -> Result<(), Error> { if self.is_private() { self.verify_auth_key(key).await @@ -77,8 +75,6 @@ impl Facade { /// It verifies an authentication key. /// - /// # Context: Authentication - /// /// # Errors /// /// Will return a `key::Error` if unable to get any `auth_key`. @@ -180,8 +176,6 @@ impl Facade { /// /// Authentication keys are used by HTTP trackers. /// - /// # Context: Authentication - /// /// # Errors /// /// Will return a `database::Error` if unable to add the `auth_key` to the database. @@ -193,8 +187,6 @@ impl Facade { /// /// Authentication keys are used by HTTP trackers. /// - /// # Context: Authentication - /// /// # Errors /// /// Will return a `database::Error` if unable to add the `auth_key` to the database. @@ -217,8 +209,6 @@ impl Facade { /// /// Authentication keys are used by HTTP trackers. /// - /// # Context: Authentication - /// /// # Errors /// /// Will return a `database::Error` if unable to add the `auth_key` to the @@ -235,8 +225,6 @@ impl Facade { /// /// Authentication keys are used by HTTP trackers. /// - /// # Context: Authentication - /// /// # Errors /// /// Will return a `database::Error` if unable to add the `auth_key` to the @@ -266,8 +254,6 @@ impl Facade { /// It removes an authentication key. /// - /// # Context: Authentication - /// /// # Errors /// /// Will return a `database::Error` if unable to remove the `key` to the database. @@ -280,8 +266,6 @@ impl Facade { } /// It removes an authentication key from memory. - /// - /// # Context: Authentication pub async fn remove_in_memory_auth_key(&self, key: &Key) { self.in_memory_key_repository.remove(key).await; } @@ -291,8 +275,6 @@ impl Facade { /// into memory with this function. Keys are automatically stored in the database when they /// are generated. /// - /// # Context: Authentication - /// /// # Errors /// /// Will return a `database::Error` if unable to `load_keys` from the database. From cd542dc3523ce52a26e32961f222f5f4167dd63f Mon Sep 17 00:00:00 2001 From: Jose Celano Date: Tue, 21 Jan 2025 17:53:55 +0000 Subject: [PATCH 125/802] refactor: [#1195] extract authentication::Service --- src/core/authentication/mod.rs | 69 +++++++++++------------------ src/core/authentication/service.rs | 70 ++++++++++++++++++++++++++++++ 2 files changed, 95 insertions(+), 44 deletions(-) create mode 100644 src/core/authentication/service.rs diff --git a/src/core/authentication/mod.rs b/src/core/authentication/mod.rs index 1b841767e..7e60648f5 100644 --- a/src/core/authentication/mod.rs +++ b/src/core/authentication/mod.rs @@ -1,4 +1,3 @@ -use std::panic::Location; use std::sync::Arc; use std::time::Duration; @@ -14,6 +13,7 @@ use super::error::PeerKeyError; use crate::CurrentClock; pub mod key; +pub mod service; pub type PeerKey = key::PeerKey; pub type Key = key::Key; @@ -34,23 +34,25 @@ pub struct AddKeyRequest { } pub struct Facade { - /// The tracker configuration. - config: Core, - /// The database repository for the authentication keys. db_key_repository: DatabaseKeyRepository, /// In-memory implementation of the authentication key repository. - in_memory_key_repository: InMemoryKeyRepository, + in_memory_key_repository: Arc, + + /// The authentication service. + authentication_service: service::Service, } impl Facade { #[must_use] pub fn new(config: &Core, database: &Arc>) -> Self { + let in_memory_key_repository = Arc::new(InMemoryKeyRepository::default()); + Self { - config: config.clone(), db_key_repository: DatabaseKeyRepository::new(database), - in_memory_key_repository: InMemoryKeyRepository::default(), + in_memory_key_repository: in_memory_key_repository.clone(), + authentication_service: service::Service::new(config, &in_memory_key_repository), } } @@ -61,40 +63,7 @@ impl Facade { /// /// Will return an error if the the authentication key cannot be verified. pub async fn authenticate(&self, key: &Key) -> Result<(), Error> { - if self.is_private() { - self.verify_auth_key(key).await - } else { - Ok(()) - } - } - - /// Returns `true` is the tracker is in private mode. - pub fn is_private(&self) -> bool { - self.config.private - } - - /// It verifies an authentication key. - /// - /// # Errors - /// - /// Will return a `key::Error` if unable to get any `auth_key`. - pub async fn verify_auth_key(&self, key: &Key) -> Result<(), Error> { - match self.in_memory_key_repository.get(key).await { - None => Err(Error::UnableToReadKey { - location: Location::caller(), - key: Box::new(key.clone()), - }), - Some(key) => match self.config.private_mode { - Some(private_mode) => { - if private_mode.check_keys_expiration { - return key::verify_key_expiration(&key); - } - - Ok(()) - } - None => key::verify_key_expiration(&key), - }, - } + self.authentication_service.authenticate(key).await } /// Adds new peer keys to the tracker. @@ -340,7 +309,11 @@ mod tests { let unregistered_key = authentication::Key::from_str("YZSl4lMZupRuOpSRC3krIKR5BPB14nrJ").unwrap(); - assert!(authentication.verify_auth_key(&unregistered_key).await.is_err()); + assert!(authentication + .authentication_service + .verify_auth_key(&unregistered_key) + .await + .is_err()); } #[tokio::test] @@ -355,7 +328,11 @@ mod tests { let result = authentication.remove_auth_key(&expiring_key.key()).await; assert!(result.is_ok()); - assert!(authentication.verify_auth_key(&expiring_key.key()).await.is_err()); + assert!(authentication + .authentication_service + .verify_auth_key(&expiring_key.key()) + .await + .is_err()); } #[tokio::test] @@ -373,7 +350,11 @@ mod tests { let result = authentication.load_keys_from_database().await; assert!(result.is_ok()); - assert!(authentication.verify_auth_key(&expiring_key.key()).await.is_ok()); + assert!(authentication + .authentication_service + .verify_auth_key(&expiring_key.key()) + .await + .is_ok()); } mod with_expiring_and { diff --git a/src/core/authentication/service.rs b/src/core/authentication/service.rs new file mode 100644 index 000000000..d33ed673b --- /dev/null +++ b/src/core/authentication/service.rs @@ -0,0 +1,70 @@ +use std::panic::Location; +use std::sync::Arc; + +use torrust_tracker_configuration::Core; + +use super::key::repository::in_memory::InMemoryKeyRepository; +use super::{key, Error, Key}; + +#[derive(Debug)] +pub struct Service { + /// The tracker configuration. + config: Core, + + /// In-memory implementation of the authentication key repository. + in_memory_key_repository: Arc, +} + +impl Service { + #[must_use] + pub fn new(config: &Core, in_memory_key_repository: &Arc) -> Self { + Self { + config: config.clone(), + in_memory_key_repository: in_memory_key_repository.clone(), + } + } + + /// It authenticates the peer `key` against the `Tracker` authentication + /// key list. + /// + /// # Errors + /// + /// Will return an error if the the authentication key cannot be verified. + pub async fn authenticate(&self, key: &Key) -> Result<(), Error> { + if self.is_private() { + self.verify_auth_key(key).await + } else { + Ok(()) + } + } + + /// Returns `true` is the tracker is in private mode. + #[must_use] + pub fn is_private(&self) -> bool { + self.config.private + } + + /// It verifies an authentication key. + /// + /// # Errors + /// + /// Will return a `key::Error` if unable to get any `auth_key`. + pub async fn verify_auth_key(&self, key: &Key) -> Result<(), Error> { + match self.in_memory_key_repository.get(key).await { + None => Err(Error::UnableToReadKey { + location: Location::caller(), + key: Box::new(key.clone()), + }), + Some(key) => match self.config.private_mode { + Some(private_mode) => { + if private_mode.check_keys_expiration { + return key::verify_key_expiration(&key); + } + + Ok(()) + } + None => key::verify_key_expiration(&key), + }, + } + } +} From 81b4b3c0a006378264c2248a9b188d45e0783246 Mon Sep 17 00:00:00 2001 From: Jose Celano Date: Tue, 21 Jan 2025 18:00:35 +0000 Subject: [PATCH 126/802] refactor: [#1195] extract and move method --- .../authentication/key/repository/in_memory.rs | 11 +++++++++++ src/core/authentication/mod.rs | 14 +++++--------- 2 files changed, 16 insertions(+), 9 deletions(-) diff --git a/src/core/authentication/key/repository/in_memory.rs b/src/core/authentication/key/repository/in_memory.rs index 266d5a5fb..a15f9ecfa 100644 --- a/src/core/authentication/key/repository/in_memory.rs +++ b/src/core/authentication/key/repository/in_memory.rs @@ -27,4 +27,15 @@ impl InMemoryKeyRepository { let mut keys = self.keys.write().await; keys.clear(); } + + /// It resets the authentication keys with a new list of keys. + pub async fn reset_with(&self, peer_keys: Vec) { + let mut keys_lock = self.keys.write().await; + + keys_lock.clear(); + + for key in peer_keys { + keys_lock.insert(key.key.clone(), key.clone()); + } + } } diff --git a/src/core/authentication/mod.rs b/src/core/authentication/mod.rs index 7e60648f5..9d47409e3 100644 --- a/src/core/authentication/mod.rs +++ b/src/core/authentication/mod.rs @@ -239,10 +239,10 @@ impl Facade { self.in_memory_key_repository.remove(key).await; } - /// The `Tracker` stores the authentication keys in memory and in the database. - /// In case you need to restart the `Tracker` you can load the keys from the database - /// into memory with this function. Keys are automatically stored in the database when they - /// are generated. + /// The `Tracker` stores the authentication keys in memory and in the + /// database. In case you need to restart the `Tracker` you can load the + /// keys from the database into memory with this function. Keys are + /// automatically stored in the database when they are generated. /// /// # Errors /// @@ -250,11 +250,7 @@ impl Facade { pub async fn load_keys_from_database(&self) -> Result<(), databases::error::Error> { let keys_from_database = self.db_key_repository.load_keys()?; - self.in_memory_key_repository.clear().await; - - for key in keys_from_database { - self.in_memory_key_repository.insert(&key).await; - } + self.in_memory_key_repository.reset_with(keys_from_database).await; Ok(()) } From 23590e7bff89f842682209f0801b20ff37ebd98f Mon Sep 17 00:00:00 2001 From: Jose Celano Date: Tue, 21 Jan 2025 18:08:06 +0000 Subject: [PATCH 127/802] refactor: [#1195] make method private --- src/core/authentication/mod.rs | 8 ++++---- 1 file changed, 4 insertions(+), 4 deletions(-) diff --git a/src/core/authentication/mod.rs b/src/core/authentication/mod.rs index 9d47409e3..098524ed9 100644 --- a/src/core/authentication/mod.rs +++ b/src/core/authentication/mod.rs @@ -186,7 +186,7 @@ impl Facade { /// # Arguments /// /// * `key` - The pre-generated key. - pub async fn add_permanent_auth_key(&self, key: Key) -> Result { + async fn add_permanent_auth_key(&self, key: Key) -> Result { self.add_auth_key(key, None).await } @@ -239,9 +239,9 @@ impl Facade { self.in_memory_key_repository.remove(key).await; } - /// The `Tracker` stores the authentication keys in memory and in the - /// database. In case you need to restart the `Tracker` you can load the - /// keys from the database into memory with this function. Keys are + /// The `Tracker` stores the authentication keys in memory and in the + /// database. In case you need to restart the `Tracker` you can load the + /// keys from the database into memory with this function. Keys are /// automatically stored in the database when they are generated. /// /// # Errors From bb2f9e07072af8b58330ef9842d1b99434437ae9 Mon Sep 17 00:00:00 2001 From: Jose Celano Date: Tue, 21 Jan 2025 18:28:31 +0000 Subject: [PATCH 128/802] refactor: [#1195] extract core::authentication::handler::KeysHandler --- src/core/authentication/handler.rs | 233 ++++++++++++++++++ src/core/authentication/mod.rs | 144 ++--------- .../apis/v1/context/auth_key/handlers.rs | 3 +- 3 files changed, 249 insertions(+), 131 deletions(-) create mode 100644 src/core/authentication/handler.rs diff --git a/src/core/authentication/handler.rs b/src/core/authentication/handler.rs new file mode 100644 index 000000000..f327c4fdd --- /dev/null +++ b/src/core/authentication/handler.rs @@ -0,0 +1,233 @@ +use std::sync::Arc; +use std::time::Duration; + +use torrust_tracker_clock::clock::Time; +use torrust_tracker_located_error::Located; +use torrust_tracker_primitives::DurationSinceUnixEpoch; + +use super::key::repository::in_memory::InMemoryKeyRepository; +use super::key::repository::persisted::DatabaseKeyRepository; +use super::{key, CurrentClock, Key, PeerKey}; +use crate::core::databases; +use crate::core::error::PeerKeyError; + +/// This type contains the info needed to add a new tracker key. +/// +/// You can upload a pre-generated key or let the app to generate a new one. +/// You can also set an expiration date or leave it empty (`None`) if you want +/// to create a permanent key that does not expire. +#[derive(Debug)] +pub struct AddKeyRequest { + /// The pre-generated key. Use `None` to generate a random key. + pub opt_key: Option, + + /// How long the key will be valid in seconds. Use `None` for permanent keys. + pub opt_seconds_valid: Option, +} + +pub struct KeysHandler { + /// The database repository for the authentication keys. + db_key_repository: Arc, + + /// In-memory implementation of the authentication key repository. + in_memory_key_repository: Arc, +} + +impl KeysHandler { + #[must_use] + pub fn new(db_key_repository: &Arc, in_memory_key_repository: &Arc) -> Self { + Self { + db_key_repository: db_key_repository.clone(), + in_memory_key_repository: in_memory_key_repository.clone(), + } + } + + /// Adds new peer keys to the tracker. + /// + /// Keys can be pre-generated or randomly created. They can also be permanent or expire. + /// + /// # Errors + /// + /// Will return an error if: + /// + /// - The key duration overflows the duration type maximum value. + /// - The provided pre-generated key is invalid. + /// - The key could not been persisted due to database issues. + pub async fn add_peer_key(&self, add_key_req: AddKeyRequest) -> Result { + // code-review: all methods related to keys should be moved to a new independent "keys" service. + + match add_key_req.opt_key { + // Upload pre-generated key + Some(pre_existing_key) => { + if let Some(seconds_valid) = add_key_req.opt_seconds_valid { + // Expiring key + let Some(valid_until) = CurrentClock::now_add(&Duration::from_secs(seconds_valid)) else { + return Err(PeerKeyError::DurationOverflow { seconds_valid }); + }; + + let key = pre_existing_key.parse::(); + + match key { + Ok(key) => match self.add_auth_key(key, Some(valid_until)).await { + Ok(auth_key) => Ok(auth_key), + Err(err) => Err(PeerKeyError::DatabaseError { + source: Located(err).into(), + }), + }, + Err(err) => Err(PeerKeyError::InvalidKey { + key: pre_existing_key, + source: Located(err).into(), + }), + } + } else { + // Permanent key + let key = pre_existing_key.parse::(); + + match key { + Ok(key) => match self.add_permanent_auth_key(key).await { + Ok(auth_key) => Ok(auth_key), + Err(err) => Err(PeerKeyError::DatabaseError { + source: Located(err).into(), + }), + }, + Err(err) => Err(PeerKeyError::InvalidKey { + key: pre_existing_key, + source: Located(err).into(), + }), + } + } + } + // Generate a new random key + None => match add_key_req.opt_seconds_valid { + // Expiring key + Some(seconds_valid) => match self.generate_auth_key(Some(Duration::from_secs(seconds_valid))).await { + Ok(auth_key) => Ok(auth_key), + Err(err) => Err(PeerKeyError::DatabaseError { + source: Located(err).into(), + }), + }, + // Permanent key + None => match self.generate_permanent_auth_key().await { + Ok(auth_key) => Ok(auth_key), + Err(err) => Err(PeerKeyError::DatabaseError { + source: Located(err).into(), + }), + }, + }, + } + } + + /// It generates a new permanent authentication key. + /// + /// Authentication keys are used by HTTP trackers. + /// + /// # Errors + /// + /// Will return a `database::Error` if unable to add the `auth_key` to the database. + pub async fn generate_permanent_auth_key(&self) -> Result { + self.generate_auth_key(None).await + } + + /// It generates a new expiring authentication key. + /// + /// Authentication keys are used by HTTP trackers. + /// + /// # Errors + /// + /// Will return a `database::Error` if unable to add the `auth_key` to the database. + /// + /// # Arguments + /// + /// * `lifetime` - The duration in seconds for the new key. The key will be + /// no longer valid after `lifetime` seconds. + pub async fn generate_auth_key(&self, lifetime: Option) -> Result { + let peer_key = key::generate_key(lifetime); + + self.db_key_repository.add(&peer_key)?; + + self.in_memory_key_repository.insert(&peer_key).await; + + Ok(peer_key) + } + + /// It adds a pre-generated permanent authentication key. + /// + /// Authentication keys are used by HTTP trackers. + /// + /// # Errors + /// + /// Will return a `database::Error` if unable to add the `auth_key` to the + /// database. For example, if the key already exist. + /// + /// # Arguments + /// + /// * `key` - The pre-generated key. + pub async fn add_permanent_auth_key(&self, key: Key) -> Result { + self.add_auth_key(key, None).await + } + + /// It adds a pre-generated authentication key. + /// + /// Authentication keys are used by HTTP trackers. + /// + /// # Errors + /// + /// Will return a `database::Error` if unable to add the `auth_key` to the + /// database. For example, if the key already exist. + /// + /// # Arguments + /// + /// * `key` - The pre-generated key. + /// * `lifetime` - The duration in seconds for the new key. The key will be + /// no longer valid after `lifetime` seconds. + pub async fn add_auth_key( + &self, + key: Key, + valid_until: Option, + ) -> Result { + let peer_key = PeerKey { key, valid_until }; + + // code-review: should we return a friendly error instead of the DB + // constrain error when the key already exist? For now, it's returning + // the specif error for each DB driver when a UNIQUE constrain fails. + self.db_key_repository.add(&peer_key)?; + + self.in_memory_key_repository.insert(&peer_key).await; + + Ok(peer_key) + } + + /// It removes an authentication key. + /// + /// # Errors + /// + /// Will return a `database::Error` if unable to remove the `key` to the database. + pub async fn remove_auth_key(&self, key: &Key) -> Result<(), databases::error::Error> { + self.db_key_repository.remove(key)?; + + self.remove_in_memory_auth_key(key).await; + + Ok(()) + } + + /// It removes an authentication key from memory. + pub async fn remove_in_memory_auth_key(&self, key: &Key) { + self.in_memory_key_repository.remove(key).await; + } + + /// The `Tracker` stores the authentication keys in memory and in the + /// database. In case you need to restart the `Tracker` you can load the + /// keys from the database into memory with this function. Keys are + /// automatically stored in the database when they are generated. + /// + /// # Errors + /// + /// Will return a `database::Error` if unable to `load_keys` from the database. + pub async fn load_keys_from_database(&self) -> Result<(), databases::error::Error> { + let keys_from_database = self.db_key_repository.load_keys()?; + + self.in_memory_key_repository.reset_with(keys_from_database).await; + + Ok(()) + } +} diff --git a/src/core/authentication/mod.rs b/src/core/authentication/mod.rs index 098524ed9..5c9356c10 100644 --- a/src/core/authentication/mod.rs +++ b/src/core/authentication/mod.rs @@ -1,17 +1,17 @@ use std::sync::Arc; use std::time::Duration; +use handler::{AddKeyRequest, KeysHandler}; use key::repository::in_memory::InMemoryKeyRepository; use key::repository::persisted::DatabaseKeyRepository; -use torrust_tracker_clock::clock::Time; use torrust_tracker_configuration::Core; -use torrust_tracker_located_error::Located; use torrust_tracker_primitives::DurationSinceUnixEpoch; use super::databases::{self, Database}; use super::error::PeerKeyError; use crate::CurrentClock; +pub mod handler; pub mod key; pub mod service; @@ -19,40 +19,23 @@ pub type PeerKey = key::PeerKey; pub type Key = key::Key; pub type Error = key::Error; -/// This type contains the info needed to add a new tracker key. -/// -/// You can upload a pre-generated key or let the app to generate a new one. -/// You can also set an expiration date or leave it empty (`None`) if you want -/// to create a permanent key that does not expire. -#[derive(Debug)] -pub struct AddKeyRequest { - /// The pre-generated key. Use `None` to generate a random key. - pub opt_key: Option, - - /// How long the key will be valid in seconds. Use `None` for permanent keys. - pub opt_seconds_valid: Option, -} - pub struct Facade { - /// The database repository for the authentication keys. - db_key_repository: DatabaseKeyRepository, - - /// In-memory implementation of the authentication key repository. - in_memory_key_repository: Arc, - /// The authentication service. authentication_service: service::Service, + + /// The keys handler. + keys_handler: handler::KeysHandler, } impl Facade { #[must_use] pub fn new(config: &Core, database: &Arc>) -> Self { + let db_key_repository = Arc::new(DatabaseKeyRepository::new(database)); let in_memory_key_repository = Arc::new(InMemoryKeyRepository::default()); Self { - db_key_repository: DatabaseKeyRepository::new(database), - in_memory_key_repository: in_memory_key_repository.clone(), authentication_service: service::Service::new(config, &in_memory_key_repository), + keys_handler: KeysHandler::new(&db_key_repository.clone(), &in_memory_key_repository.clone()), } } @@ -78,67 +61,7 @@ impl Facade { /// - The provided pre-generated key is invalid. /// - The key could not been persisted due to database issues. pub async fn add_peer_key(&self, add_key_req: AddKeyRequest) -> Result { - // code-review: all methods related to keys should be moved to a new independent "keys" service. - - match add_key_req.opt_key { - // Upload pre-generated key - Some(pre_existing_key) => { - if let Some(seconds_valid) = add_key_req.opt_seconds_valid { - // Expiring key - let Some(valid_until) = CurrentClock::now_add(&Duration::from_secs(seconds_valid)) else { - return Err(PeerKeyError::DurationOverflow { seconds_valid }); - }; - - let key = pre_existing_key.parse::(); - - match key { - Ok(key) => match self.add_auth_key(key, Some(valid_until)).await { - Ok(auth_key) => Ok(auth_key), - Err(err) => Err(PeerKeyError::DatabaseError { - source: Located(err).into(), - }), - }, - Err(err) => Err(PeerKeyError::InvalidKey { - key: pre_existing_key, - source: Located(err).into(), - }), - } - } else { - // Permanent key - let key = pre_existing_key.parse::(); - - match key { - Ok(key) => match self.add_permanent_auth_key(key).await { - Ok(auth_key) => Ok(auth_key), - Err(err) => Err(PeerKeyError::DatabaseError { - source: Located(err).into(), - }), - }, - Err(err) => Err(PeerKeyError::InvalidKey { - key: pre_existing_key, - source: Located(err).into(), - }), - } - } - } - // Generate a new random key - None => match add_key_req.opt_seconds_valid { - // Expiring key - Some(seconds_valid) => match self.generate_auth_key(Some(Duration::from_secs(seconds_valid))).await { - Ok(auth_key) => Ok(auth_key), - Err(err) => Err(PeerKeyError::DatabaseError { - source: Located(err).into(), - }), - }, - // Permanent key - None => match self.generate_permanent_auth_key().await { - Ok(auth_key) => Ok(auth_key), - Err(err) => Err(PeerKeyError::DatabaseError { - source: Located(err).into(), - }), - }, - }, - } + self.keys_handler.add_peer_key(add_key_req).await } /// It generates a new permanent authentication key. @@ -149,7 +72,7 @@ impl Facade { /// /// Will return a `database::Error` if unable to add the `auth_key` to the database. pub async fn generate_permanent_auth_key(&self) -> Result { - self.generate_auth_key(None).await + self.keys_handler.generate_permanent_auth_key().await } /// It generates a new expiring authentication key. @@ -165,29 +88,7 @@ impl Facade { /// * `lifetime` - The duration in seconds for the new key. The key will be /// no longer valid after `lifetime` seconds. pub async fn generate_auth_key(&self, lifetime: Option) -> Result { - let peer_key = key::generate_key(lifetime); - - self.db_key_repository.add(&peer_key)?; - - self.in_memory_key_repository.insert(&peer_key).await; - - Ok(peer_key) - } - - /// It adds a pre-generated permanent authentication key. - /// - /// Authentication keys are used by HTTP trackers. - /// - /// # Errors - /// - /// Will return a `database::Error` if unable to add the `auth_key` to the - /// database. For example, if the key already exist. - /// - /// # Arguments - /// - /// * `key` - The pre-generated key. - async fn add_permanent_auth_key(&self, key: Key) -> Result { - self.add_auth_key(key, None).await + self.keys_handler.generate_auth_key(lifetime).await } /// It adds a pre-generated authentication key. @@ -209,16 +110,7 @@ impl Facade { key: Key, valid_until: Option, ) -> Result { - let peer_key = PeerKey { key, valid_until }; - - // code-review: should we return a friendly error instead of the DB - // constrain error when the key already exist? For now, it's returning - // the specif error for each DB driver when a UNIQUE constrain fails. - self.db_key_repository.add(&peer_key)?; - - self.in_memory_key_repository.insert(&peer_key).await; - - Ok(peer_key) + self.keys_handler.add_auth_key(key, valid_until).await } /// It removes an authentication key. @@ -227,16 +119,12 @@ impl Facade { /// /// Will return a `database::Error` if unable to remove the `key` to the database. pub async fn remove_auth_key(&self, key: &Key) -> Result<(), databases::error::Error> { - self.db_key_repository.remove(key)?; - - self.remove_in_memory_auth_key(key).await; - - Ok(()) + self.keys_handler.remove_auth_key(key).await } /// It removes an authentication key from memory. pub async fn remove_in_memory_auth_key(&self, key: &Key) { - self.in_memory_key_repository.remove(key).await; + self.keys_handler.remove_in_memory_auth_key(key).await; } /// The `Tracker` stores the authentication keys in memory and in the @@ -248,11 +136,7 @@ impl Facade { /// /// Will return a `database::Error` if unable to `load_keys` from the database. pub async fn load_keys_from_database(&self) -> Result<(), databases::error::Error> { - let keys_from_database = self.db_key_repository.load_keys()?; - - self.in_memory_key_repository.reset_with(keys_from_database).await; - - Ok(()) + self.keys_handler.load_keys_from_database().await } } diff --git a/src/servers/apis/v1/context/auth_key/handlers.rs b/src/servers/apis/v1/context/auth_key/handlers.rs index ba345d8a5..f0c131bbf 100644 --- a/src/servers/apis/v1/context/auth_key/handlers.rs +++ b/src/servers/apis/v1/context/auth_key/handlers.rs @@ -12,7 +12,8 @@ use super::responses::{ auth_key_response, failed_to_delete_key_response, failed_to_generate_key_response, failed_to_reload_keys_response, invalid_auth_key_duration_response, invalid_auth_key_response, }; -use crate::core::authentication::{AddKeyRequest, Key}; +use crate::core::authentication::handler::AddKeyRequest; +use crate::core::authentication::Key; use crate::core::Tracker; use crate::servers::apis::v1::context::auth_key::resources::AuthKey; use crate::servers::apis::v1::responses::{invalid_auth_key_param_response, ok_response}; From c06da07c44300ced5fe28976119b4386a68d369b Mon Sep 17 00:00:00 2001 From: Jose Celano Date: Wed, 22 Jan 2025 08:09:42 +0000 Subject: [PATCH 129/802] refactor: [#1195] more authentication tests to authentication service --- src/core/authentication/mod.rs | 33 +++++--------------------- src/core/authentication/service.rs | 37 +++++++++++++++++++++++++++++- 2 files changed, 42 insertions(+), 28 deletions(-) diff --git a/src/core/authentication/mod.rs b/src/core/authentication/mod.rs index 5c9356c10..86337b714 100644 --- a/src/core/authentication/mod.rs +++ b/src/core/authentication/mod.rs @@ -145,7 +145,6 @@ mod tests { mod the_tracker_configured_as_private { - use std::str::FromStr; use std::time::Duration; use torrust_tracker_configuration::v2_0_0::core::PrivateMode; @@ -172,30 +171,6 @@ mod tests { authentication::Facade::new(&config.core, &database.clone()) } - #[tokio::test] - async fn it_should_fail_authenticating_a_peer_when_it_uses_an_unregistered_key() { - let authentication = instantiate_authentication(); - - let unregistered_key = authentication::Key::from_str("YZSl4lMZupRuOpSRC3krIKR5BPB14nrJ").unwrap(); - - let result = authentication.authenticate(&unregistered_key).await; - - assert!(result.is_err()); - } - - #[tokio::test] - async fn it_should_fail_verifying_an_unregistered_authentication_key() { - let authentication = instantiate_authentication(); - - let unregistered_key = authentication::Key::from_str("YZSl4lMZupRuOpSRC3krIKR5BPB14nrJ").unwrap(); - - assert!(authentication - .authentication_service - .verify_auth_key(&unregistered_key) - .await - .is_err()); - } - #[tokio::test] async fn it_should_remove_an_authentication_key() { let authentication = instantiate_authentication(); @@ -208,9 +183,11 @@ mod tests { let result = authentication.remove_auth_key(&expiring_key.key()).await; assert!(result.is_ok()); + + // The key should no longer be valid assert!(authentication .authentication_service - .verify_auth_key(&expiring_key.key()) + .authenticate(&expiring_key.key()) .await .is_err()); } @@ -230,9 +207,11 @@ mod tests { let result = authentication.load_keys_from_database().await; assert!(result.is_ok()); + + // The key should no longer be valid assert!(authentication .authentication_service - .verify_auth_key(&expiring_key.key()) + .authenticate(&expiring_key.key()) .await .is_ok()); } diff --git a/src/core/authentication/service.rs b/src/core/authentication/service.rs index d33ed673b..d7572136f 100644 --- a/src/core/authentication/service.rs +++ b/src/core/authentication/service.rs @@ -49,7 +49,7 @@ impl Service { /// # Errors /// /// Will return a `key::Error` if unable to get any `auth_key`. - pub async fn verify_auth_key(&self, key: &Key) -> Result<(), Error> { + async fn verify_auth_key(&self, key: &Key) -> Result<(), Error> { match self.in_memory_key_repository.get(key).await { None => Err(Error::UnableToReadKey { location: Location::caller(), @@ -68,3 +68,38 @@ impl Service { } } } + +#[cfg(test)] +mod tests { + + mod the_tracker_configured_as_private { + + use std::str::FromStr; + use std::sync::Arc; + + use torrust_tracker_test_helpers::configuration; + + use crate::core::authentication; + use crate::core::authentication::key::repository::in_memory::InMemoryKeyRepository; + use crate::core::authentication::service::Service; + + fn instantiate_authentication() -> Service { + let config = configuration::ephemeral_private(); + + let in_memory_key_repository = Arc::new(InMemoryKeyRepository::default()); + + Service::new(&config.core, &in_memory_key_repository.clone()) + } + + #[tokio::test] + async fn it_should_not_authenticate_an_unregistered_key() { + let authentication = instantiate_authentication(); + + let unregistered_key = authentication::Key::from_str("YZSl4lMZupRuOpSRC3krIKR5BPB14nrJ").unwrap(); + + let result = authentication.authenticate(&unregistered_key).await; + + assert!(result.is_err()); + } + } +} From 9c61b2685d0d0ffbba7be78ba032232f4e654931 Mon Sep 17 00:00:00 2001 From: Jose Celano Date: Wed, 22 Jan 2025 08:26:06 +0000 Subject: [PATCH 130/802] refactor: [#1195] move tests to KeysHandler These tests do not require the authentication service. --- src/core/authentication/handler.rs | 133 +++++++++++++++++++++++++++++ src/core/authentication/mod.rs | 64 -------------- 2 files changed, 133 insertions(+), 64 deletions(-) diff --git a/src/core/authentication/handler.rs b/src/core/authentication/handler.rs index f327c4fdd..3ada2b110 100644 --- a/src/core/authentication/handler.rs +++ b/src/core/authentication/handler.rs @@ -231,3 +231,136 @@ impl KeysHandler { Ok(()) } } + +#[cfg(test)] +mod tests { + + mod the_keys_handler_when_tracker_is_configured_as_private { + + use std::sync::Arc; + + use torrust_tracker_configuration::v2_0_0::core::PrivateMode; + use torrust_tracker_configuration::Configuration; + use torrust_tracker_test_helpers::configuration; + + use crate::core::authentication::handler::KeysHandler; + use crate::core::authentication::key::repository::in_memory::InMemoryKeyRepository; + use crate::core::authentication::key::repository::persisted::DatabaseKeyRepository; + use crate::core::services::initialize_database; + + fn instantiate_keys_handler() -> KeysHandler { + let config = configuration::ephemeral_private(); + + instantiate_keys_handler_with_configuration(&config) + } + + #[allow(dead_code)] + fn instantiate_keys_handler_with_checking_keys_expiration_disabled() -> KeysHandler { + let mut config = configuration::ephemeral_private(); + + config.core.private_mode = Some(PrivateMode { + check_keys_expiration: false, + }); + + instantiate_keys_handler_with_configuration(&config) + } + + fn instantiate_keys_handler_with_configuration(config: &Configuration) -> KeysHandler { + let database = initialize_database(config); + + let db_key_repository = Arc::new(DatabaseKeyRepository::new(&database)); + let in_memory_key_repository = Arc::new(InMemoryKeyRepository::default()); + + KeysHandler::new(&db_key_repository, &in_memory_key_repository) + } + + mod with_expiring_and { + + mod randomly_generated_keys { + use std::time::Duration; + + use torrust_tracker_clock::clock::Time; + + use crate::core::authentication::handler::tests::the_keys_handler_when_tracker_is_configured_as_private::instantiate_keys_handler; + use crate::CurrentClock; + + #[tokio::test] + async fn it_should_generate_the_key() { + let keys_handler = instantiate_keys_handler(); + + let peer_key = keys_handler.generate_auth_key(Some(Duration::from_secs(100))).await.unwrap(); + + assert_eq!( + peer_key.valid_until, + Some(CurrentClock::now_add(&Duration::from_secs(100)).unwrap()) + ); + } + } + + mod pre_generated_keys { + use std::time::Duration; + + use torrust_tracker_clock::clock::Time; + + use crate::core::authentication::handler::tests::the_keys_handler_when_tracker_is_configured_as_private::instantiate_keys_handler; + use crate::core::authentication::{AddKeyRequest, Key}; + use crate::CurrentClock; + + #[tokio::test] + async fn it_should_add_a_pre_generated_key() { + let keys_handler = instantiate_keys_handler(); + + let peer_key = keys_handler + .add_peer_key(AddKeyRequest { + opt_key: Some(Key::new("YZSl4lMZupRuOpSRC3krIKR5BPB14nrJ").unwrap().to_string()), + opt_seconds_valid: Some(100), + }) + .await + .unwrap(); + + assert_eq!( + peer_key.valid_until, + Some(CurrentClock::now_add(&Duration::from_secs(100)).unwrap()) + ); + } + } + } + + mod with_permanent_and { + + mod randomly_generated_keys { + use crate::core::authentication::handler::tests::the_keys_handler_when_tracker_is_configured_as_private::instantiate_keys_handler; + + #[tokio::test] + async fn it_should_generate_the_key() { + let keys_handler = instantiate_keys_handler(); + + let peer_key = keys_handler.generate_permanent_auth_key().await.unwrap(); + + assert_eq!(peer_key.valid_until, None); + } + } + + mod pre_generated_keys { + + use crate::core::authentication::handler::tests::the_keys_handler_when_tracker_is_configured_as_private::instantiate_keys_handler; + use crate::core::authentication::{AddKeyRequest, Key}; + + #[tokio::test] + async fn it_should_add_a_pre_generated_key() { + let keys_handler = instantiate_keys_handler(); + + let peer_key = keys_handler + .add_peer_key(AddKeyRequest { + opt_key: Some(Key::new("YZSl4lMZupRuOpSRC3krIKR5BPB14nrJ").unwrap().to_string()), + opt_seconds_valid: None, + }) + .await + .unwrap(); + + assert_eq!(peer_key.valid_until, None); + } + } + } + } +} diff --git a/src/core/authentication/mod.rs b/src/core/authentication/mod.rs index 86337b714..4d0001fd2 100644 --- a/src/core/authentication/mod.rs +++ b/src/core/authentication/mod.rs @@ -221,28 +221,10 @@ mod tests { mod randomly_generated_keys { use std::time::Duration; - use torrust_tracker_clock::clock::Time; - use crate::core::authentication::tests::the_tracker_configured_as_private::{ instantiate_authentication, instantiate_authentication_with_checking_keys_expiration_disabled, }; use crate::core::authentication::Key; - use crate::CurrentClock; - - #[tokio::test] - async fn it_should_generate_the_key() { - let authentication = instantiate_authentication(); - - let peer_key = authentication - .generate_auth_key(Some(Duration::from_secs(100))) - .await - .unwrap(); - - assert_eq!( - peer_key.valid_until, - Some(CurrentClock::now_add(&Duration::from_secs(100)).unwrap()) - ); - } #[tokio::test] async fn it_should_authenticate_a_peer_with_the_key() { @@ -274,33 +256,11 @@ mod tests { } mod pre_generated_keys { - use std::time::Duration; - - use torrust_tracker_clock::clock::Time; use crate::core::authentication::tests::the_tracker_configured_as_private::{ instantiate_authentication, instantiate_authentication_with_checking_keys_expiration_disabled, }; use crate::core::authentication::{AddKeyRequest, Key}; - use crate::CurrentClock; - - #[tokio::test] - async fn it_should_add_a_pre_generated_key() { - let authentication = instantiate_authentication(); - - let peer_key = authentication - .add_peer_key(AddKeyRequest { - opt_key: Some(Key::new("YZSl4lMZupRuOpSRC3krIKR5BPB14nrJ").unwrap().to_string()), - opt_seconds_valid: Some(100), - }) - .await - .unwrap(); - - assert_eq!( - peer_key.valid_until, - Some(CurrentClock::now_add(&Duration::from_secs(100)).unwrap()) - ); - } #[tokio::test] async fn it_should_authenticate_a_peer_with_the_key() { @@ -341,15 +301,6 @@ mod tests { mod randomly_generated_keys { use crate::core::authentication::tests::the_tracker_configured_as_private::instantiate_authentication; - #[tokio::test] - async fn it_should_generate_the_key() { - let authentication = instantiate_authentication(); - - let peer_key = authentication.generate_permanent_auth_key().await.unwrap(); - - assert_eq!(peer_key.valid_until, None); - } - #[tokio::test] async fn it_should_authenticate_a_peer_with_the_key() { let authentication = instantiate_authentication(); @@ -366,21 +317,6 @@ mod tests { use crate::core::authentication::tests::the_tracker_configured_as_private::instantiate_authentication; use crate::core::authentication::{AddKeyRequest, Key}; - #[tokio::test] - async fn it_should_add_a_pre_generated_key() { - let authentication = instantiate_authentication(); - - let peer_key = authentication - .add_peer_key(AddKeyRequest { - opt_key: Some(Key::new("YZSl4lMZupRuOpSRC3krIKR5BPB14nrJ").unwrap().to_string()), - opt_seconds_valid: None, - }) - .await - .unwrap(); - - assert_eq!(peer_key.valid_until, None); - } - #[tokio::test] async fn it_should_authenticate_a_peer_with_the_key() { let authentication = instantiate_authentication(); From 663250bfa5921fd9e4ab949bd4af582fc1dfa771 Mon Sep 17 00:00:00 2001 From: Jose Celano Date: Wed, 22 Jan 2025 08:29:39 +0000 Subject: [PATCH 131/802] refactor: [#1195] rename methods --- src/core/authentication/mod.rs | 30 ++++++++++++++++-------------- 1 file changed, 16 insertions(+), 14 deletions(-) diff --git a/src/core/authentication/mod.rs b/src/core/authentication/mod.rs index 4d0001fd2..bab0de8a3 100644 --- a/src/core/authentication/mod.rs +++ b/src/core/authentication/mod.rs @@ -153,14 +153,15 @@ mod tests { use crate::core::authentication; use crate::core::services::initialize_database; - fn instantiate_authentication() -> authentication::Facade { + fn instantiate_authentication_facade() -> authentication::Facade { let config = configuration::ephemeral_private(); let database = initialize_database(&config); + authentication::Facade::new(&config.core, &database.clone()) } - fn instantiate_authentication_with_checking_keys_expiration_disabled() -> authentication::Facade { + fn instantiate_authentication_facade_with_checking_keys_expiration_disabled() -> authentication::Facade { let mut config = configuration::ephemeral_private(); config.core.private_mode = Some(PrivateMode { @@ -168,12 +169,13 @@ mod tests { }); let database = initialize_database(&config); + authentication::Facade::new(&config.core, &database.clone()) } #[tokio::test] async fn it_should_remove_an_authentication_key() { - let authentication = instantiate_authentication(); + let authentication = instantiate_authentication_facade(); let expiring_key = authentication .generate_auth_key(Some(Duration::from_secs(100))) @@ -194,7 +196,7 @@ mod tests { #[tokio::test] async fn it_should_load_authentication_keys_from_the_database() { - let authentication = instantiate_authentication(); + let authentication = instantiate_authentication_facade(); let expiring_key = authentication .generate_auth_key(Some(Duration::from_secs(100))) @@ -222,13 +224,13 @@ mod tests { use std::time::Duration; use crate::core::authentication::tests::the_tracker_configured_as_private::{ - instantiate_authentication, instantiate_authentication_with_checking_keys_expiration_disabled, + instantiate_authentication_facade, instantiate_authentication_facade_with_checking_keys_expiration_disabled, }; use crate::core::authentication::Key; #[tokio::test] async fn it_should_authenticate_a_peer_with_the_key() { - let authentication = instantiate_authentication(); + let authentication = instantiate_authentication_facade(); let peer_key = authentication .generate_auth_key(Some(Duration::from_secs(100))) @@ -242,7 +244,7 @@ mod tests { #[tokio::test] async fn it_should_accept_an_expired_key_when_checking_expiration_is_disabled_in_configuration() { - let authentication = instantiate_authentication_with_checking_keys_expiration_disabled(); + let authentication = instantiate_authentication_facade_with_checking_keys_expiration_disabled(); let past_timestamp = Duration::ZERO; @@ -258,13 +260,13 @@ mod tests { mod pre_generated_keys { use crate::core::authentication::tests::the_tracker_configured_as_private::{ - instantiate_authentication, instantiate_authentication_with_checking_keys_expiration_disabled, + instantiate_authentication_facade, instantiate_authentication_facade_with_checking_keys_expiration_disabled, }; use crate::core::authentication::{AddKeyRequest, Key}; #[tokio::test] async fn it_should_authenticate_a_peer_with_the_key() { - let authentication = instantiate_authentication(); + let authentication = instantiate_authentication_facade(); let peer_key = authentication .add_peer_key(AddKeyRequest { @@ -281,7 +283,7 @@ mod tests { #[tokio::test] async fn it_should_accept_an_expired_key_when_checking_expiration_is_disabled_in_configuration() { - let authentication = instantiate_authentication_with_checking_keys_expiration_disabled(); + let authentication = instantiate_authentication_facade_with_checking_keys_expiration_disabled(); let peer_key = authentication .add_peer_key(AddKeyRequest { @@ -299,11 +301,11 @@ mod tests { mod with_permanent_and { mod randomly_generated_keys { - use crate::core::authentication::tests::the_tracker_configured_as_private::instantiate_authentication; + use crate::core::authentication::tests::the_tracker_configured_as_private::instantiate_authentication_facade; #[tokio::test] async fn it_should_authenticate_a_peer_with_the_key() { - let authentication = instantiate_authentication(); + let authentication = instantiate_authentication_facade(); let peer_key = authentication.generate_permanent_auth_key().await.unwrap(); @@ -314,12 +316,12 @@ mod tests { } mod pre_generated_keys { - use crate::core::authentication::tests::the_tracker_configured_as_private::instantiate_authentication; + use crate::core::authentication::tests::the_tracker_configured_as_private::instantiate_authentication_facade; use crate::core::authentication::{AddKeyRequest, Key}; #[tokio::test] async fn it_should_authenticate_a_peer_with_the_key() { - let authentication = instantiate_authentication(); + let authentication = instantiate_authentication_facade(); let peer_key = authentication .add_peer_key(AddKeyRequest { From 504357c2d2e1c2db0dcf0f4e2b3673907a87122a Mon Sep 17 00:00:00 2001 From: Jose Celano Date: Wed, 22 Jan 2025 08:44:17 +0000 Subject: [PATCH 132/802] refactor: [#1195] inject dependencies in authenticatio::Facade Facade service will be removed. --- src/app_test.rs | 10 ++++++++- src/bootstrap/app.rs | 10 ++++++++- src/bootstrap/jobs/http_tracker.rs | 10 ++++++++- src/bootstrap/jobs/tracker_apis.rs | 10 ++++++++- src/core/authentication/mod.rs | 34 +++++++++++++++++++----------- src/servers/apis/server.rs | 10 ++++++++- src/servers/http/server.rs | 10 ++++++++- src/servers/udp/server/mod.rs | 20 ++++++++++++++++-- 8 files changed, 94 insertions(+), 20 deletions(-) diff --git a/src/app_test.rs b/src/app_test.rs index 884aed6ef..13b10fefa 100644 --- a/src/app_test.rs +++ b/src/app_test.rs @@ -3,6 +3,8 @@ use std::sync::Arc; use torrust_tracker_configuration::Configuration; +use crate::core::authentication::key::repository::in_memory::InMemoryKeyRepository; +use crate::core::authentication::key::repository::persisted::DatabaseKeyRepository; use crate::core::databases::Database; use crate::core::services::initialize_database; use crate::core::whitelist::repository::in_memory::InMemoryWhitelist; @@ -25,7 +27,13 @@ pub fn initialize_tracker_dependencies( &config.core, &in_memory_whitelist.clone(), )); - let authentication = Arc::new(authentication::Facade::new(&config.core, &database.clone())); + let db_key_repository = Arc::new(DatabaseKeyRepository::new(&database)); + let in_memory_key_repository = Arc::new(InMemoryKeyRepository::default()); + let authentication = Arc::new(authentication::Facade::new( + &config.core, + &db_key_repository.clone(), + &in_memory_key_repository.clone(), + )); (database, in_memory_whitelist, whitelist_authorization, authentication) } diff --git a/src/bootstrap/app.rs b/src/bootstrap/app.rs index bc6b7a6bd..a87e4ca8e 100644 --- a/src/bootstrap/app.rs +++ b/src/bootstrap/app.rs @@ -22,6 +22,8 @@ use tracing::instrument; use super::config::initialize_configuration; use crate::bootstrap; use crate::container::AppContainer; +use crate::core::authentication::key::repository::in_memory::InMemoryKeyRepository; +use crate::core::authentication::key::repository::persisted::DatabaseKeyRepository; use crate::core::services::{initialize_database, initialize_tracker, initialize_whitelist_manager, statistics}; use crate::core::whitelist::repository::in_memory::InMemoryWhitelist; use crate::core::{authentication, whitelist}; @@ -89,7 +91,13 @@ pub fn initialize_app_container(configuration: &Configuration) -> AppContainer { &in_memory_whitelist.clone(), )); let whitelist_manager = initialize_whitelist_manager(database.clone(), in_memory_whitelist.clone()); - let authentication = Arc::new(authentication::Facade::new(&configuration.core, &database.clone())); + let db_key_repository = Arc::new(DatabaseKeyRepository::new(&database)); + let in_memory_key_repository = Arc::new(InMemoryKeyRepository::default()); + let authentication = Arc::new(authentication::Facade::new( + &configuration.core, + &db_key_repository.clone(), + &in_memory_key_repository.clone(), + )); let tracker = Arc::new(initialize_tracker( configuration, diff --git a/src/bootstrap/jobs/http_tracker.rs b/src/bootstrap/jobs/http_tracker.rs index b07ff935c..abcc2a08c 100644 --- a/src/bootstrap/jobs/http_tracker.rs +++ b/src/bootstrap/jobs/http_tracker.rs @@ -100,6 +100,8 @@ mod tests { use crate::bootstrap::app::initialize_global_services; use crate::bootstrap::jobs::http_tracker::start_job; + use crate::core::authentication::key::repository::in_memory::InMemoryKeyRepository; + use crate::core::authentication::key::repository::persisted::DatabaseKeyRepository; use crate::core::services::{initialize_database, initialize_tracker, statistics}; use crate::core::whitelist::repository::in_memory::InMemoryWhitelist; use crate::core::{authentication, whitelist}; @@ -123,7 +125,13 @@ mod tests { &cfg.core, &in_memory_whitelist.clone(), )); - let authentication = Arc::new(authentication::Facade::new(&cfg.core, &database.clone())); + let db_key_repository = Arc::new(DatabaseKeyRepository::new(&database)); + let in_memory_key_repository = Arc::new(InMemoryKeyRepository::default()); + let authentication = Arc::new(authentication::Facade::new( + &cfg.core, + &db_key_repository.clone(), + &in_memory_key_repository.clone(), + )); let tracker = Arc::new(initialize_tracker(&cfg, &database, &whitelist_authorization, &authentication)); diff --git a/src/bootstrap/jobs/tracker_apis.rs b/src/bootstrap/jobs/tracker_apis.rs index 70e2e6737..56e4a2e44 100644 --- a/src/bootstrap/jobs/tracker_apis.rs +++ b/src/bootstrap/jobs/tracker_apis.rs @@ -149,6 +149,8 @@ mod tests { use crate::bootstrap::app::initialize_global_services; use crate::bootstrap::jobs::tracker_apis::start_job; + use crate::core::authentication::key::repository::in_memory::InMemoryKeyRepository; + use crate::core::authentication::key::repository::persisted::DatabaseKeyRepository; use crate::core::services::{initialize_database, initialize_tracker, initialize_whitelist_manager, statistics}; use crate::core::whitelist::repository::in_memory::InMemoryWhitelist; use crate::core::{authentication, whitelist}; @@ -176,7 +178,13 @@ mod tests { &in_memory_whitelist.clone(), )); let whitelist_manager = initialize_whitelist_manager(database.clone(), in_memory_whitelist.clone()); - let authentication = Arc::new(authentication::Facade::new(&cfg.core, &database.clone())); + let db_key_repository = Arc::new(DatabaseKeyRepository::new(&database)); + let in_memory_key_repository = Arc::new(InMemoryKeyRepository::default()); + let authentication = Arc::new(authentication::Facade::new( + &cfg.core, + &db_key_repository.clone(), + &in_memory_key_repository.clone(), + )); let tracker = Arc::new(initialize_tracker(&cfg, &database, &whitelist_authorization, &authentication)); diff --git a/src/core/authentication/mod.rs b/src/core/authentication/mod.rs index bab0de8a3..d26379b09 100644 --- a/src/core/authentication/mod.rs +++ b/src/core/authentication/mod.rs @@ -7,7 +7,7 @@ use key::repository::persisted::DatabaseKeyRepository; use torrust_tracker_configuration::Core; use torrust_tracker_primitives::DurationSinceUnixEpoch; -use super::databases::{self, Database}; +use super::databases::{self}; use super::error::PeerKeyError; use crate::CurrentClock; @@ -29,12 +29,13 @@ pub struct Facade { impl Facade { #[must_use] - pub fn new(config: &Core, database: &Arc>) -> Self { - let db_key_repository = Arc::new(DatabaseKeyRepository::new(database)); - let in_memory_key_repository = Arc::new(InMemoryKeyRepository::default()); - + pub fn new( + config: &Core, + db_key_repository: &Arc, + in_memory_key_repository: &Arc, + ) -> Self { Self { - authentication_service: service::Service::new(config, &in_memory_key_repository), + authentication_service: service::Service::new(config, in_memory_key_repository), keys_handler: KeysHandler::new(&db_key_repository.clone(), &in_memory_key_repository.clone()), } } @@ -145,20 +146,22 @@ mod tests { mod the_tracker_configured_as_private { + use std::sync::Arc; use std::time::Duration; use torrust_tracker_configuration::v2_0_0::core::PrivateMode; + use torrust_tracker_configuration::Configuration; use torrust_tracker_test_helpers::configuration; use crate::core::authentication; + use crate::core::authentication::key::repository::in_memory::InMemoryKeyRepository; + use crate::core::authentication::key::repository::persisted::DatabaseKeyRepository; use crate::core::services::initialize_database; fn instantiate_authentication_facade() -> authentication::Facade { let config = configuration::ephemeral_private(); - let database = initialize_database(&config); - - authentication::Facade::new(&config.core, &database.clone()) + instantiate_authentication_facade_with_configuration(&config) } fn instantiate_authentication_facade_with_checking_keys_expiration_disabled() -> authentication::Facade { @@ -168,9 +171,16 @@ mod tests { check_keys_expiration: false, }); - let database = initialize_database(&config); - - authentication::Facade::new(&config.core, &database.clone()) + instantiate_authentication_facade_with_configuration(&config) + } + + fn instantiate_authentication_facade_with_configuration(config: &Configuration) -> authentication::Facade { + let database = initialize_database(config); + + let db_key_repository = Arc::new(DatabaseKeyRepository::new(&database)); + let in_memory_key_repository = Arc::new(InMemoryKeyRepository::default()); + + authentication::Facade::new(&config.core, &db_key_repository.clone(), &in_memory_key_repository.clone()) } #[tokio::test] diff --git a/src/servers/apis/server.rs b/src/servers/apis/server.rs index a11442a53..b6ff50995 100644 --- a/src/servers/apis/server.rs +++ b/src/servers/apis/server.rs @@ -342,6 +342,8 @@ mod tests { use crate::bootstrap::app::initialize_global_services; use crate::bootstrap::jobs::make_rust_tls; + use crate::core::authentication::key::repository::in_memory::InMemoryKeyRepository; + use crate::core::authentication::key::repository::persisted::DatabaseKeyRepository; use crate::core::services::{initialize_database, initialize_tracker, initialize_whitelist_manager, statistics}; use crate::core::whitelist::repository::in_memory::InMemoryWhitelist; use crate::core::{authentication, whitelist}; @@ -369,7 +371,13 @@ mod tests { &in_memory_whitelist.clone(), )); let whitelist_manager = initialize_whitelist_manager(database.clone(), in_memory_whitelist.clone()); - let authentication = Arc::new(authentication::Facade::new(&cfg.core, &database.clone())); + let db_key_repository = Arc::new(DatabaseKeyRepository::new(&database)); + let in_memory_key_repository = Arc::new(InMemoryKeyRepository::default()); + let authentication = Arc::new(authentication::Facade::new( + &cfg.core, + &db_key_repository.clone(), + &in_memory_key_repository.clone(), + )); let tracker = Arc::new(initialize_tracker(&cfg, &database, &whitelist_authorization, &authentication)); diff --git a/src/servers/http/server.rs b/src/servers/http/server.rs index e6370c775..140ef4e07 100644 --- a/src/servers/http/server.rs +++ b/src/servers/http/server.rs @@ -246,6 +246,8 @@ mod tests { use crate::bootstrap::app::initialize_global_services; use crate::bootstrap::jobs::make_rust_tls; + use crate::core::authentication::key::repository::in_memory::InMemoryKeyRepository; + use crate::core::authentication::key::repository::persisted::DatabaseKeyRepository; use crate::core::services::{initialize_database, initialize_tracker, initialize_whitelist_manager, statistics}; use crate::core::whitelist::repository::in_memory::InMemoryWhitelist; use crate::core::{authentication, whitelist}; @@ -268,7 +270,13 @@ mod tests { &in_memory_whitelist.clone(), )); let _whitelist_manager = initialize_whitelist_manager(database.clone(), in_memory_whitelist.clone()); - let authentication = Arc::new(authentication::Facade::new(&cfg.core, &database.clone())); + let db_key_repository = Arc::new(DatabaseKeyRepository::new(&database)); + let in_memory_key_repository = Arc::new(InMemoryKeyRepository::default()); + let authentication = Arc::new(authentication::Facade::new( + &cfg.core, + &db_key_repository.clone(), + &in_memory_key_repository.clone(), + )); let tracker = Arc::new(initialize_tracker(&cfg, &database, &whitelist_authorization, &authentication)); diff --git a/src/servers/udp/server/mod.rs b/src/servers/udp/server/mod.rs index 078510bcd..fafb82997 100644 --- a/src/servers/udp/server/mod.rs +++ b/src/servers/udp/server/mod.rs @@ -64,6 +64,8 @@ mod tests { use super::spawner::Spawner; use super::Server; use crate::bootstrap::app::initialize_global_services; + use crate::core::authentication::key::repository::in_memory::InMemoryKeyRepository; + use crate::core::authentication::key::repository::persisted::DatabaseKeyRepository; use crate::core::services::{initialize_database, initialize_tracker, initialize_whitelist_manager, statistics}; use crate::core::whitelist::repository::in_memory::InMemoryWhitelist; use crate::core::{authentication, whitelist}; @@ -88,7 +90,14 @@ mod tests { &in_memory_whitelist.clone(), )); let _whitelist_manager = initialize_whitelist_manager(database.clone(), in_memory_whitelist.clone()); - let authentication = Arc::new(authentication::Facade::new(&cfg.core, &database.clone())); + let db_key_repository = Arc::new(DatabaseKeyRepository::new(&database)); + let in_memory_key_repository = Arc::new(InMemoryKeyRepository::default()); + let authentication = Arc::new(authentication::Facade::new( + &cfg.core, + &db_key_repository.clone(), + &in_memory_key_repository.clone(), + )); + let tracker = Arc::new(initialize_tracker(&cfg, &database, &whitelist_authorization, &authentication)); let udp_trackers = cfg.udp_trackers.clone().expect("missing UDP trackers configuration"); @@ -133,7 +142,14 @@ mod tests { &cfg.core, &in_memory_whitelist.clone(), )); - let authentication = Arc::new(authentication::Facade::new(&cfg.core, &database.clone())); + let db_key_repository = Arc::new(DatabaseKeyRepository::new(&database)); + let in_memory_key_repository = Arc::new(InMemoryKeyRepository::default()); + let authentication = Arc::new(authentication::Facade::new( + &cfg.core, + &db_key_repository.clone(), + &in_memory_key_repository.clone(), + )); + let tracker = Arc::new(initialize_tracker(&cfg, &database, &whitelist_authorization, &authentication)); let config = &cfg.udp_trackers.as_ref().unwrap().first().unwrap(); From 965e911cdf9a16b57ba0370b358398e2d6a6cc4d Mon Sep 17 00:00:00 2001 From: Jose Celano Date: Wed, 22 Jan 2025 09:56:40 +0000 Subject: [PATCH 133/802] refactor: [#1195] inject dependencies into authenticattion::Facade The Facade will be replaced by its dependencies. --- src/app_test.rs | 7 +++++-- src/bootstrap/app.rs | 7 +++++-- src/bootstrap/jobs/http_tracker.rs | 7 +++++-- src/bootstrap/jobs/tracker_apis.rs | 7 +++++-- src/core/authentication/mod.rs | 30 +++++++++++++++--------------- src/servers/apis/server.rs | 7 +++++-- src/servers/http/server.rs | 7 +++++-- src/servers/udp/server/mod.rs | 12 ++++++++---- 8 files changed, 53 insertions(+), 31 deletions(-) diff --git a/src/app_test.rs b/src/app_test.rs index 13b10fefa..6aa318e97 100644 --- a/src/app_test.rs +++ b/src/app_test.rs @@ -3,8 +3,10 @@ use std::sync::Arc; use torrust_tracker_configuration::Configuration; +use crate::core::authentication::handler::KeysHandler; use crate::core::authentication::key::repository::in_memory::InMemoryKeyRepository; use crate::core::authentication::key::repository::persisted::DatabaseKeyRepository; +use crate::core::authentication::service; use crate::core::databases::Database; use crate::core::services::initialize_database; use crate::core::whitelist::repository::in_memory::InMemoryWhitelist; @@ -29,11 +31,12 @@ pub fn initialize_tracker_dependencies( )); let db_key_repository = Arc::new(DatabaseKeyRepository::new(&database)); let in_memory_key_repository = Arc::new(InMemoryKeyRepository::default()); - let authentication = Arc::new(authentication::Facade::new( - &config.core, + let authentication_service = Arc::new(service::Service::new(&config.core, &in_memory_key_repository)); + let keys_handler = Arc::new(KeysHandler::new( &db_key_repository.clone(), &in_memory_key_repository.clone(), )); + let authentication = Arc::new(authentication::Facade::new(&authentication_service, &keys_handler)); (database, in_memory_whitelist, whitelist_authorization, authentication) } diff --git a/src/bootstrap/app.rs b/src/bootstrap/app.rs index a87e4ca8e..59b484cce 100644 --- a/src/bootstrap/app.rs +++ b/src/bootstrap/app.rs @@ -22,8 +22,10 @@ use tracing::instrument; use super::config::initialize_configuration; use crate::bootstrap; use crate::container::AppContainer; +use crate::core::authentication::handler::KeysHandler; use crate::core::authentication::key::repository::in_memory::InMemoryKeyRepository; use crate::core::authentication::key::repository::persisted::DatabaseKeyRepository; +use crate::core::authentication::service; use crate::core::services::{initialize_database, initialize_tracker, initialize_whitelist_manager, statistics}; use crate::core::whitelist::repository::in_memory::InMemoryWhitelist; use crate::core::{authentication, whitelist}; @@ -93,11 +95,12 @@ pub fn initialize_app_container(configuration: &Configuration) -> AppContainer { let whitelist_manager = initialize_whitelist_manager(database.clone(), in_memory_whitelist.clone()); let db_key_repository = Arc::new(DatabaseKeyRepository::new(&database)); let in_memory_key_repository = Arc::new(InMemoryKeyRepository::default()); - let authentication = Arc::new(authentication::Facade::new( - &configuration.core, + let authentication_service = Arc::new(service::Service::new(&configuration.core, &in_memory_key_repository)); + let keys_handler = Arc::new(KeysHandler::new( &db_key_repository.clone(), &in_memory_key_repository.clone(), )); + let authentication = Arc::new(authentication::Facade::new(&authentication_service, &keys_handler)); let tracker = Arc::new(initialize_tracker( configuration, diff --git a/src/bootstrap/jobs/http_tracker.rs b/src/bootstrap/jobs/http_tracker.rs index abcc2a08c..a68686224 100644 --- a/src/bootstrap/jobs/http_tracker.rs +++ b/src/bootstrap/jobs/http_tracker.rs @@ -100,8 +100,10 @@ mod tests { use crate::bootstrap::app::initialize_global_services; use crate::bootstrap::jobs::http_tracker::start_job; + use crate::core::authentication::handler::KeysHandler; use crate::core::authentication::key::repository::in_memory::InMemoryKeyRepository; use crate::core::authentication::key::repository::persisted::DatabaseKeyRepository; + use crate::core::authentication::service; use crate::core::services::{initialize_database, initialize_tracker, statistics}; use crate::core::whitelist::repository::in_memory::InMemoryWhitelist; use crate::core::{authentication, whitelist}; @@ -127,11 +129,12 @@ mod tests { )); let db_key_repository = Arc::new(DatabaseKeyRepository::new(&database)); let in_memory_key_repository = Arc::new(InMemoryKeyRepository::default()); - let authentication = Arc::new(authentication::Facade::new( - &cfg.core, + let authentication_service = Arc::new(service::Service::new(&cfg.core, &in_memory_key_repository)); + let keys_handler = Arc::new(KeysHandler::new( &db_key_repository.clone(), &in_memory_key_repository.clone(), )); + let authentication = Arc::new(authentication::Facade::new(&authentication_service, &keys_handler)); let tracker = Arc::new(initialize_tracker(&cfg, &database, &whitelist_authorization, &authentication)); diff --git a/src/bootstrap/jobs/tracker_apis.rs b/src/bootstrap/jobs/tracker_apis.rs index 56e4a2e44..9ddd095c8 100644 --- a/src/bootstrap/jobs/tracker_apis.rs +++ b/src/bootstrap/jobs/tracker_apis.rs @@ -149,8 +149,10 @@ mod tests { use crate::bootstrap::app::initialize_global_services; use crate::bootstrap::jobs::tracker_apis::start_job; + use crate::core::authentication::handler::KeysHandler; use crate::core::authentication::key::repository::in_memory::InMemoryKeyRepository; use crate::core::authentication::key::repository::persisted::DatabaseKeyRepository; + use crate::core::authentication::service; use crate::core::services::{initialize_database, initialize_tracker, initialize_whitelist_manager, statistics}; use crate::core::whitelist::repository::in_memory::InMemoryWhitelist; use crate::core::{authentication, whitelist}; @@ -180,11 +182,12 @@ mod tests { let whitelist_manager = initialize_whitelist_manager(database.clone(), in_memory_whitelist.clone()); let db_key_repository = Arc::new(DatabaseKeyRepository::new(&database)); let in_memory_key_repository = Arc::new(InMemoryKeyRepository::default()); - let authentication = Arc::new(authentication::Facade::new( - &cfg.core, + let authentication_service = Arc::new(service::Service::new(&cfg.core, &in_memory_key_repository)); + let keys_handler = Arc::new(KeysHandler::new( &db_key_repository.clone(), &in_memory_key_repository.clone(), )); + let authentication = Arc::new(authentication::Facade::new(&authentication_service, &keys_handler)); let tracker = Arc::new(initialize_tracker(&cfg, &database, &whitelist_authorization, &authentication)); diff --git a/src/core/authentication/mod.rs b/src/core/authentication/mod.rs index d26379b09..9ac3638fc 100644 --- a/src/core/authentication/mod.rs +++ b/src/core/authentication/mod.rs @@ -1,10 +1,7 @@ use std::sync::Arc; use std::time::Duration; -use handler::{AddKeyRequest, KeysHandler}; -use key::repository::in_memory::InMemoryKeyRepository; -use key::repository::persisted::DatabaseKeyRepository; -use torrust_tracker_configuration::Core; +use handler::AddKeyRequest; use torrust_tracker_primitives::DurationSinceUnixEpoch; use super::databases::{self}; @@ -21,22 +18,18 @@ pub type Error = key::Error; pub struct Facade { /// The authentication service. - authentication_service: service::Service, + authentication_service: Arc, /// The keys handler. - keys_handler: handler::KeysHandler, + keys_handler: Arc, } impl Facade { #[must_use] - pub fn new( - config: &Core, - db_key_repository: &Arc, - in_memory_key_repository: &Arc, - ) -> Self { + pub fn new(authentication_service: &Arc, keys_handler: &Arc) -> Self { Self { - authentication_service: service::Service::new(config, in_memory_key_repository), - keys_handler: KeysHandler::new(&db_key_repository.clone(), &in_memory_key_repository.clone()), + authentication_service: authentication_service.clone(), + keys_handler: keys_handler.clone(), } } @@ -153,9 +146,10 @@ mod tests { use torrust_tracker_configuration::Configuration; use torrust_tracker_test_helpers::configuration; - use crate::core::authentication; + use crate::core::authentication::handler::KeysHandler; use crate::core::authentication::key::repository::in_memory::InMemoryKeyRepository; use crate::core::authentication::key::repository::persisted::DatabaseKeyRepository; + use crate::core::authentication::{self, service}; use crate::core::services::initialize_database; fn instantiate_authentication_facade() -> authentication::Facade { @@ -180,7 +174,13 @@ mod tests { let db_key_repository = Arc::new(DatabaseKeyRepository::new(&database)); let in_memory_key_repository = Arc::new(InMemoryKeyRepository::default()); - authentication::Facade::new(&config.core, &db_key_repository.clone(), &in_memory_key_repository.clone()) + let authentication_service = Arc::new(service::Service::new(&config.core, &in_memory_key_repository)); + let keys_handler = Arc::new(KeysHandler::new( + &db_key_repository.clone(), + &in_memory_key_repository.clone(), + )); + + authentication::Facade::new(&authentication_service, &keys_handler) } #[tokio::test] diff --git a/src/servers/apis/server.rs b/src/servers/apis/server.rs index b6ff50995..956d54799 100644 --- a/src/servers/apis/server.rs +++ b/src/servers/apis/server.rs @@ -342,8 +342,10 @@ mod tests { use crate::bootstrap::app::initialize_global_services; use crate::bootstrap::jobs::make_rust_tls; + use crate::core::authentication::handler::KeysHandler; use crate::core::authentication::key::repository::in_memory::InMemoryKeyRepository; use crate::core::authentication::key::repository::persisted::DatabaseKeyRepository; + use crate::core::authentication::service; use crate::core::services::{initialize_database, initialize_tracker, initialize_whitelist_manager, statistics}; use crate::core::whitelist::repository::in_memory::InMemoryWhitelist; use crate::core::{authentication, whitelist}; @@ -373,11 +375,12 @@ mod tests { let whitelist_manager = initialize_whitelist_manager(database.clone(), in_memory_whitelist.clone()); let db_key_repository = Arc::new(DatabaseKeyRepository::new(&database)); let in_memory_key_repository = Arc::new(InMemoryKeyRepository::default()); - let authentication = Arc::new(authentication::Facade::new( - &cfg.core, + let authentication_service = Arc::new(service::Service::new(&cfg.core, &in_memory_key_repository)); + let keys_handler = Arc::new(KeysHandler::new( &db_key_repository.clone(), &in_memory_key_repository.clone(), )); + let authentication = Arc::new(authentication::Facade::new(&authentication_service, &keys_handler)); let tracker = Arc::new(initialize_tracker(&cfg, &database, &whitelist_authorization, &authentication)); diff --git a/src/servers/http/server.rs b/src/servers/http/server.rs index 140ef4e07..751ac5d5c 100644 --- a/src/servers/http/server.rs +++ b/src/servers/http/server.rs @@ -246,8 +246,10 @@ mod tests { use crate::bootstrap::app::initialize_global_services; use crate::bootstrap::jobs::make_rust_tls; + use crate::core::authentication::handler::KeysHandler; use crate::core::authentication::key::repository::in_memory::InMemoryKeyRepository; use crate::core::authentication::key::repository::persisted::DatabaseKeyRepository; + use crate::core::authentication::service; use crate::core::services::{initialize_database, initialize_tracker, initialize_whitelist_manager, statistics}; use crate::core::whitelist::repository::in_memory::InMemoryWhitelist; use crate::core::{authentication, whitelist}; @@ -272,11 +274,12 @@ mod tests { let _whitelist_manager = initialize_whitelist_manager(database.clone(), in_memory_whitelist.clone()); let db_key_repository = Arc::new(DatabaseKeyRepository::new(&database)); let in_memory_key_repository = Arc::new(InMemoryKeyRepository::default()); - let authentication = Arc::new(authentication::Facade::new( - &cfg.core, + let authentication_service = Arc::new(service::Service::new(&cfg.core, &in_memory_key_repository)); + let keys_handler = Arc::new(KeysHandler::new( &db_key_repository.clone(), &in_memory_key_repository.clone(), )); + let authentication = Arc::new(authentication::Facade::new(&authentication_service, &keys_handler)); let tracker = Arc::new(initialize_tracker(&cfg, &database, &whitelist_authorization, &authentication)); diff --git a/src/servers/udp/server/mod.rs b/src/servers/udp/server/mod.rs index fafb82997..cebeb9b0a 100644 --- a/src/servers/udp/server/mod.rs +++ b/src/servers/udp/server/mod.rs @@ -64,8 +64,10 @@ mod tests { use super::spawner::Spawner; use super::Server; use crate::bootstrap::app::initialize_global_services; + use crate::core::authentication::handler::KeysHandler; use crate::core::authentication::key::repository::in_memory::InMemoryKeyRepository; use crate::core::authentication::key::repository::persisted::DatabaseKeyRepository; + use crate::core::authentication::service; use crate::core::services::{initialize_database, initialize_tracker, initialize_whitelist_manager, statistics}; use crate::core::whitelist::repository::in_memory::InMemoryWhitelist; use crate::core::{authentication, whitelist}; @@ -92,11 +94,12 @@ mod tests { let _whitelist_manager = initialize_whitelist_manager(database.clone(), in_memory_whitelist.clone()); let db_key_repository = Arc::new(DatabaseKeyRepository::new(&database)); let in_memory_key_repository = Arc::new(InMemoryKeyRepository::default()); - let authentication = Arc::new(authentication::Facade::new( - &cfg.core, + let authentication_service = Arc::new(service::Service::new(&cfg.core, &in_memory_key_repository)); + let keys_handler = Arc::new(KeysHandler::new( &db_key_repository.clone(), &in_memory_key_repository.clone(), )); + let authentication = Arc::new(authentication::Facade::new(&authentication_service, &keys_handler)); let tracker = Arc::new(initialize_tracker(&cfg, &database, &whitelist_authorization, &authentication)); @@ -144,11 +147,12 @@ mod tests { )); let db_key_repository = Arc::new(DatabaseKeyRepository::new(&database)); let in_memory_key_repository = Arc::new(InMemoryKeyRepository::default()); - let authentication = Arc::new(authentication::Facade::new( - &cfg.core, + let authentication_service = Arc::new(service::Service::new(&cfg.core, &in_memory_key_repository)); + let keys_handler = Arc::new(KeysHandler::new( &db_key_repository.clone(), &in_memory_key_repository.clone(), )); + let authentication = Arc::new(authentication::Facade::new(&authentication_service, &keys_handler)); let tracker = Arc::new(initialize_tracker(&cfg, &database, &whitelist_authorization, &authentication)); From 457d01b36b6bda0e8e8099a861a79bfaa883a2c1 Mon Sep 17 00:00:00 2001 From: Jose Celano Date: Wed, 22 Jan 2025 09:59:57 +0000 Subject: [PATCH 134/802] refactor: [#1195] rename service to AuthenticationService --- src/app_test.rs | 2 +- src/bootstrap/app.rs | 5 ++++- src/bootstrap/jobs/http_tracker.rs | 2 +- src/bootstrap/jobs/tracker_apis.rs | 2 +- src/core/authentication/mod.rs | 6 +++--- src/core/authentication/service.rs | 10 +++++----- src/servers/apis/server.rs | 2 +- src/servers/http/server.rs | 2 +- src/servers/udp/server/mod.rs | 4 ++-- 9 files changed, 19 insertions(+), 16 deletions(-) diff --git a/src/app_test.rs b/src/app_test.rs index 6aa318e97..461a18758 100644 --- a/src/app_test.rs +++ b/src/app_test.rs @@ -31,7 +31,7 @@ pub fn initialize_tracker_dependencies( )); let db_key_repository = Arc::new(DatabaseKeyRepository::new(&database)); let in_memory_key_repository = Arc::new(InMemoryKeyRepository::default()); - let authentication_service = Arc::new(service::Service::new(&config.core, &in_memory_key_repository)); + let authentication_service = Arc::new(service::AuthenticationService::new(&config.core, &in_memory_key_repository)); let keys_handler = Arc::new(KeysHandler::new( &db_key_repository.clone(), &in_memory_key_repository.clone(), diff --git a/src/bootstrap/app.rs b/src/bootstrap/app.rs index 59b484cce..5c2d47c40 100644 --- a/src/bootstrap/app.rs +++ b/src/bootstrap/app.rs @@ -95,7 +95,10 @@ pub fn initialize_app_container(configuration: &Configuration) -> AppContainer { let whitelist_manager = initialize_whitelist_manager(database.clone(), in_memory_whitelist.clone()); let db_key_repository = Arc::new(DatabaseKeyRepository::new(&database)); let in_memory_key_repository = Arc::new(InMemoryKeyRepository::default()); - let authentication_service = Arc::new(service::Service::new(&configuration.core, &in_memory_key_repository)); + let authentication_service = Arc::new(service::AuthenticationService::new( + &configuration.core, + &in_memory_key_repository, + )); let keys_handler = Arc::new(KeysHandler::new( &db_key_repository.clone(), &in_memory_key_repository.clone(), diff --git a/src/bootstrap/jobs/http_tracker.rs b/src/bootstrap/jobs/http_tracker.rs index a68686224..a26906fa5 100644 --- a/src/bootstrap/jobs/http_tracker.rs +++ b/src/bootstrap/jobs/http_tracker.rs @@ -129,7 +129,7 @@ mod tests { )); let db_key_repository = Arc::new(DatabaseKeyRepository::new(&database)); let in_memory_key_repository = Arc::new(InMemoryKeyRepository::default()); - let authentication_service = Arc::new(service::Service::new(&cfg.core, &in_memory_key_repository)); + let authentication_service = Arc::new(service::AuthenticationService::new(&cfg.core, &in_memory_key_repository)); let keys_handler = Arc::new(KeysHandler::new( &db_key_repository.clone(), &in_memory_key_repository.clone(), diff --git a/src/bootstrap/jobs/tracker_apis.rs b/src/bootstrap/jobs/tracker_apis.rs index 9ddd095c8..dfc5b108a 100644 --- a/src/bootstrap/jobs/tracker_apis.rs +++ b/src/bootstrap/jobs/tracker_apis.rs @@ -182,7 +182,7 @@ mod tests { let whitelist_manager = initialize_whitelist_manager(database.clone(), in_memory_whitelist.clone()); let db_key_repository = Arc::new(DatabaseKeyRepository::new(&database)); let in_memory_key_repository = Arc::new(InMemoryKeyRepository::default()); - let authentication_service = Arc::new(service::Service::new(&cfg.core, &in_memory_key_repository)); + let authentication_service = Arc::new(service::AuthenticationService::new(&cfg.core, &in_memory_key_repository)); let keys_handler = Arc::new(KeysHandler::new( &db_key_repository.clone(), &in_memory_key_repository.clone(), diff --git a/src/core/authentication/mod.rs b/src/core/authentication/mod.rs index 9ac3638fc..ebc7b1fe1 100644 --- a/src/core/authentication/mod.rs +++ b/src/core/authentication/mod.rs @@ -18,7 +18,7 @@ pub type Error = key::Error; pub struct Facade { /// The authentication service. - authentication_service: Arc, + authentication_service: Arc, /// The keys handler. keys_handler: Arc, @@ -26,7 +26,7 @@ pub struct Facade { impl Facade { #[must_use] - pub fn new(authentication_service: &Arc, keys_handler: &Arc) -> Self { + pub fn new(authentication_service: &Arc, keys_handler: &Arc) -> Self { Self { authentication_service: authentication_service.clone(), keys_handler: keys_handler.clone(), @@ -174,7 +174,7 @@ mod tests { let db_key_repository = Arc::new(DatabaseKeyRepository::new(&database)); let in_memory_key_repository = Arc::new(InMemoryKeyRepository::default()); - let authentication_service = Arc::new(service::Service::new(&config.core, &in_memory_key_repository)); + let authentication_service = Arc::new(service::AuthenticationService::new(&config.core, &in_memory_key_repository)); let keys_handler = Arc::new(KeysHandler::new( &db_key_repository.clone(), &in_memory_key_repository.clone(), diff --git a/src/core/authentication/service.rs b/src/core/authentication/service.rs index d7572136f..d100e3a70 100644 --- a/src/core/authentication/service.rs +++ b/src/core/authentication/service.rs @@ -7,7 +7,7 @@ use super::key::repository::in_memory::InMemoryKeyRepository; use super::{key, Error, Key}; #[derive(Debug)] -pub struct Service { +pub struct AuthenticationService { /// The tracker configuration. config: Core, @@ -15,7 +15,7 @@ pub struct Service { in_memory_key_repository: Arc, } -impl Service { +impl AuthenticationService { #[must_use] pub fn new(config: &Core, in_memory_key_repository: &Arc) -> Self { Self { @@ -81,14 +81,14 @@ mod tests { use crate::core::authentication; use crate::core::authentication::key::repository::in_memory::InMemoryKeyRepository; - use crate::core::authentication::service::Service; + use crate::core::authentication::service::AuthenticationService; - fn instantiate_authentication() -> Service { + fn instantiate_authentication() -> AuthenticationService { let config = configuration::ephemeral_private(); let in_memory_key_repository = Arc::new(InMemoryKeyRepository::default()); - Service::new(&config.core, &in_memory_key_repository.clone()) + AuthenticationService::new(&config.core, &in_memory_key_repository.clone()) } #[tokio::test] diff --git a/src/servers/apis/server.rs b/src/servers/apis/server.rs index 956d54799..de7845eba 100644 --- a/src/servers/apis/server.rs +++ b/src/servers/apis/server.rs @@ -375,7 +375,7 @@ mod tests { let whitelist_manager = initialize_whitelist_manager(database.clone(), in_memory_whitelist.clone()); let db_key_repository = Arc::new(DatabaseKeyRepository::new(&database)); let in_memory_key_repository = Arc::new(InMemoryKeyRepository::default()); - let authentication_service = Arc::new(service::Service::new(&cfg.core, &in_memory_key_repository)); + let authentication_service = Arc::new(service::AuthenticationService::new(&cfg.core, &in_memory_key_repository)); let keys_handler = Arc::new(KeysHandler::new( &db_key_repository.clone(), &in_memory_key_repository.clone(), diff --git a/src/servers/http/server.rs b/src/servers/http/server.rs index 751ac5d5c..ef13a3535 100644 --- a/src/servers/http/server.rs +++ b/src/servers/http/server.rs @@ -274,7 +274,7 @@ mod tests { let _whitelist_manager = initialize_whitelist_manager(database.clone(), in_memory_whitelist.clone()); let db_key_repository = Arc::new(DatabaseKeyRepository::new(&database)); let in_memory_key_repository = Arc::new(InMemoryKeyRepository::default()); - let authentication_service = Arc::new(service::Service::new(&cfg.core, &in_memory_key_repository)); + let authentication_service = Arc::new(service::AuthenticationService::new(&cfg.core, &in_memory_key_repository)); let keys_handler = Arc::new(KeysHandler::new( &db_key_repository.clone(), &in_memory_key_repository.clone(), diff --git a/src/servers/udp/server/mod.rs b/src/servers/udp/server/mod.rs index cebeb9b0a..9658b1bca 100644 --- a/src/servers/udp/server/mod.rs +++ b/src/servers/udp/server/mod.rs @@ -94,7 +94,7 @@ mod tests { let _whitelist_manager = initialize_whitelist_manager(database.clone(), in_memory_whitelist.clone()); let db_key_repository = Arc::new(DatabaseKeyRepository::new(&database)); let in_memory_key_repository = Arc::new(InMemoryKeyRepository::default()); - let authentication_service = Arc::new(service::Service::new(&cfg.core, &in_memory_key_repository)); + let authentication_service = Arc::new(service::AuthenticationService::new(&cfg.core, &in_memory_key_repository)); let keys_handler = Arc::new(KeysHandler::new( &db_key_repository.clone(), &in_memory_key_repository.clone(), @@ -147,7 +147,7 @@ mod tests { )); let db_key_repository = Arc::new(DatabaseKeyRepository::new(&database)); let in_memory_key_repository = Arc::new(InMemoryKeyRepository::default()); - let authentication_service = Arc::new(service::Service::new(&cfg.core, &in_memory_key_repository)); + let authentication_service = Arc::new(service::AuthenticationService::new(&cfg.core, &in_memory_key_repository)); let keys_handler = Arc::new(KeysHandler::new( &db_key_repository.clone(), &in_memory_key_repository.clone(), From 747b6089b92aad1c08804d64eec5f35a7ac8e376 Mon Sep 17 00:00:00 2001 From: Jose Celano Date: Wed, 22 Jan 2025 10:37:57 +0000 Subject: [PATCH 135/802] refactor: [#1195] use AuthenticationService directy Instead of via the authentication::Facade. The Facade will be removed. --- src/app.rs | 1 + src/app_test.rs | 13 ++- src/bootstrap/app.rs | 1 + src/bootstrap/jobs/http_tracker.rs | 15 +++- src/container.rs | 2 + src/core/mod.rs | 6 +- src/core/services/statistics/mod.rs | 3 +- src/core/services/torrent.rs | 14 +-- src/servers/http/server.rs | 38 ++++++++- src/servers/http/v1/handlers/announce.rs | 52 +++++++++--- src/servers/http/v1/handlers/scrape.rs | 85 ++++++++++++++----- src/servers/http/v1/routes.rs | 24 +++++- src/servers/http/v1/services/announce.rs | 4 +- src/servers/http/v1/services/scrape.rs | 6 +- src/servers/udp/handlers.rs | 8 +- tests/servers/api/environment.rs | 5 ++ .../api/v1/contract/context/auth_key.rs | 9 +- tests/servers/http/environment.rs | 6 ++ 18 files changed, 227 insertions(+), 65 deletions(-) diff --git a/src/app.rs b/src/app.rs index da8795ffe..8fa14da54 100644 --- a/src/app.rs +++ b/src/app.rs @@ -100,6 +100,7 @@ pub async fn start(config: &Configuration, app_container: &AppContainer) -> Vec< if let Some(job) = http_tracker::start_job( http_tracker_config, app_container.tracker.clone(), + app_container.authentication_service.clone(), app_container.whitelist_authorization.clone(), app_container.stats_event_sender.clone(), registar.give_form(), diff --git a/src/app_test.rs b/src/app_test.rs index 461a18758..d4e8df961 100644 --- a/src/app_test.rs +++ b/src/app_test.rs @@ -6,7 +6,7 @@ use torrust_tracker_configuration::Configuration; use crate::core::authentication::handler::KeysHandler; use crate::core::authentication::key::repository::in_memory::InMemoryKeyRepository; use crate::core::authentication::key::repository::persisted::DatabaseKeyRepository; -use crate::core::authentication::service; +use crate::core::authentication::service::{self, AuthenticationService}; use crate::core::databases::Database; use crate::core::services::initialize_database; use crate::core::whitelist::repository::in_memory::InMemoryWhitelist; @@ -22,6 +22,7 @@ pub fn initialize_tracker_dependencies( Arc, Arc, Arc, + Arc, ) { let database = initialize_database(config); let in_memory_whitelist = Arc::new(InMemoryWhitelist::default()); @@ -36,7 +37,13 @@ pub fn initialize_tracker_dependencies( &db_key_repository.clone(), &in_memory_key_repository.clone(), )); - let authentication = Arc::new(authentication::Facade::new(&authentication_service, &keys_handler)); + let authentication_facade = Arc::new(authentication::Facade::new(&authentication_service, &keys_handler)); - (database, in_memory_whitelist, whitelist_authorization, authentication) + ( + database, + in_memory_whitelist, + whitelist_authorization, + authentication_facade, + authentication_service, + ) } diff --git a/src/bootstrap/app.rs b/src/bootstrap/app.rs index 5c2d47c40..31689005d 100644 --- a/src/bootstrap/app.rs +++ b/src/bootstrap/app.rs @@ -114,6 +114,7 @@ pub fn initialize_app_container(configuration: &Configuration) -> AppContainer { AppContainer { tracker, + authentication_service, whitelist_authorization, ban_service, stats_event_sender, diff --git a/src/bootstrap/jobs/http_tracker.rs b/src/bootstrap/jobs/http_tracker.rs index a26906fa5..46e627b6b 100644 --- a/src/bootstrap/jobs/http_tracker.rs +++ b/src/bootstrap/jobs/http_tracker.rs @@ -19,6 +19,7 @@ use torrust_tracker_configuration::HttpTracker; use tracing::instrument; use super::make_rust_tls; +use crate::core::authentication::service::AuthenticationService; use crate::core::statistics::event::sender::Sender; use crate::core::{self, statistics, whitelist}; use crate::servers::http::server::{HttpServer, Launcher}; @@ -34,10 +35,11 @@ use crate::servers::registar::ServiceRegistrationForm; /// /// It would panic if the `config::HttpTracker` struct would contain inappropriate values. /// -#[instrument(skip(config, tracker, whitelist_authorization, stats_event_sender, form))] +#[instrument(skip(config, tracker, authentication_service, whitelist_authorization, stats_event_sender, form))] pub async fn start_job( config: &HttpTracker, tracker: Arc, + authentication_service: Arc, whitelist_authorization: Arc, stats_event_sender: Arc>>, form: ServiceRegistrationForm, @@ -55,6 +57,7 @@ pub async fn start_job( socket, tls, tracker.clone(), + authentication_service.clone(), whitelist_authorization.clone(), stats_event_sender.clone(), form, @@ -70,12 +73,19 @@ async fn start_v1( socket: SocketAddr, tls: Option, tracker: Arc, + authentication_service: Arc, whitelist_authorization: Arc, stats_event_sender: Arc>>, form: ServiceRegistrationForm, ) -> JoinHandle<()> { let server = HttpServer::new(Launcher::new(socket, tls)) - .start(tracker, whitelist_authorization, stats_event_sender, form) + .start( + tracker, + authentication_service, + whitelist_authorization, + stats_event_sender, + form, + ) .await .expect("it should be able to start to the http tracker"); @@ -143,6 +153,7 @@ mod tests { start_job( config, tracker, + authentication_service, whitelist_authorization, stats_event_sender, Registar::default().give_form(), diff --git a/src/container.rs b/src/container.rs index 3c9229b89..0ea8e3c03 100644 --- a/src/container.rs +++ b/src/container.rs @@ -2,6 +2,7 @@ use std::sync::Arc; use tokio::sync::RwLock; +use crate::core::authentication::service::AuthenticationService; use crate::core::statistics::event::sender::Sender; use crate::core::statistics::repository::Repository; use crate::core::whitelist::manager::WhiteListManager; @@ -10,6 +11,7 @@ use crate::servers::udp::server::banning::BanService; pub struct AppContainer { pub tracker: Arc, + pub authentication_service: Arc, pub whitelist_authorization: Arc, pub ban_service: Arc>, pub stats_event_sender: Arc>>, diff --git a/src/core/mod.rs b/src/core/mod.rs index 9a5692690..2b13bc0c0 100644 --- a/src/core/mod.rs +++ b/src/core/mod.rs @@ -816,7 +816,7 @@ mod tests { fn public_tracker() -> Tracker { let config = configuration::ephemeral_public(); - let (database, _in_memory_whitelist, whitelist_authorization, authentication) = + let (database, _in_memory_whitelist, whitelist_authorization, authentication, _authentication_service) = initialize_tracker_dependencies(&config); initialize_tracker(&config, &database, &whitelist_authorization, &authentication) @@ -825,7 +825,7 @@ mod tests { fn whitelisted_tracker() -> (Tracker, Arc, Arc) { let config = configuration::ephemeral_listed(); - let (database, in_memory_whitelist, whitelist_authorization, authentication) = + let (database, in_memory_whitelist, whitelist_authorization, authentication, _authentication_service) = initialize_tracker_dependencies(&config); let whitelist_manager = initialize_whitelist_manager(database.clone(), in_memory_whitelist.clone()); @@ -839,7 +839,7 @@ mod tests { let mut config = configuration::ephemeral_listed(); config.core.tracker_policy.persistent_torrent_completed_stat = true; - let (database, _in_memory_whitelist, whitelist_authorization, authentication) = + let (database, _in_memory_whitelist, whitelist_authorization, authentication, _authentication_service) = initialize_tracker_dependencies(&config); initialize_tracker(&config, &database, &whitelist_authorization, &authentication) diff --git a/src/core/services/statistics/mod.rs b/src/core/services/statistics/mod.rs index 4081fd6bb..a30588472 100644 --- a/src/core/services/statistics/mod.rs +++ b/src/core/services/statistics/mod.rs @@ -132,7 +132,8 @@ mod tests { async fn the_statistics_service_should_return_the_tracker_metrics() { let config = tracker_configuration(); - let (database, _in_memory_whitelist, whitelist_authorization, authentication) = initialize_tracker_dependencies(&config); + let (database, _in_memory_whitelist, whitelist_authorization, authentication, _authentication_service) = + initialize_tracker_dependencies(&config); let (_stats_event_sender, stats_repository) = statistics::setup::factory(config.core.tracker_usage_statistics); let stats_repository = Arc::new(stats_repository); diff --git a/src/core/services/torrent.rs b/src/core/services/torrent.rs index c23c7e04b..462f10101 100644 --- a/src/core/services/torrent.rs +++ b/src/core/services/torrent.rs @@ -142,7 +142,7 @@ mod tests { async fn should_return_none_if_the_tracker_does_not_have_the_torrent() { let config = tracker_configuration(); - let (database, _in_memory_whitelist, whitelist_authorization, authentication) = + let (database, _in_memory_whitelist, whitelist_authorization, authentication, _authentication_service) = initialize_tracker_dependencies(&config); let tracker = initialize_tracker(&config, &database, &whitelist_authorization, &authentication); @@ -162,7 +162,7 @@ mod tests { async fn should_return_the_torrent_info_if_the_tracker_has_the_torrent() { let config = tracker_configuration(); - let (database, _in_memory_whitelist, whitelist_authorization, authentication) = + let (database, _in_memory_whitelist, whitelist_authorization, authentication, _authentication_service) = initialize_tracker_dependencies(&config); let tracker = Arc::new(initialize_tracker( @@ -213,7 +213,7 @@ mod tests { async fn should_return_an_empty_result_if_the_tracker_does_not_have_any_torrent() { let config = tracker_configuration(); - let (database, _in_memory_whitelist, whitelist_authorization, authentication) = + let (database, _in_memory_whitelist, whitelist_authorization, authentication, _authentication_service) = initialize_tracker_dependencies(&config); let tracker = Arc::new(initialize_tracker( @@ -232,7 +232,7 @@ mod tests { async fn should_return_a_summarized_info_for_all_torrents() { let config = tracker_configuration(); - let (database, _in_memory_whitelist, whitelist_authorization, authentication) = + let (database, _in_memory_whitelist, whitelist_authorization, authentication, _authentication_service) = initialize_tracker_dependencies(&config); let tracker = Arc::new(initialize_tracker( @@ -264,7 +264,7 @@ mod tests { async fn should_allow_limiting_the_number_of_torrents_in_the_result() { let config = tracker_configuration(); - let (database, _in_memory_whitelist, whitelist_authorization, authentication) = + let (database, _in_memory_whitelist, whitelist_authorization, authentication, _authentication_service) = initialize_tracker_dependencies(&config); let tracker = Arc::new(initialize_tracker( @@ -294,7 +294,7 @@ mod tests { async fn should_allow_using_pagination_in_the_result() { let config = tracker_configuration(); - let (database, _in_memory_whitelist, whitelist_authorization, authentication) = + let (database, _in_memory_whitelist, whitelist_authorization, authentication, _authentication_service) = initialize_tracker_dependencies(&config); let tracker = Arc::new(initialize_tracker( @@ -333,7 +333,7 @@ mod tests { async fn should_return_torrents_ordered_by_info_hash() { let config = tracker_configuration(); - let (database, _in_memory_whitelist, whitelist_authorization, authentication) = + let (database, _in_memory_whitelist, whitelist_authorization, authentication, _authentication_service) = initialize_tracker_dependencies(&config); let tracker = Arc::new(initialize_tracker( diff --git a/src/servers/http/server.rs b/src/servers/http/server.rs index ef13a3535..3bc6773dd 100644 --- a/src/servers/http/server.rs +++ b/src/servers/http/server.rs @@ -11,6 +11,7 @@ use tracing::instrument; use super::v1::routes::router; use crate::bootstrap::jobs::Started; +use crate::core::authentication::service::AuthenticationService; use crate::core::{statistics, whitelist, Tracker}; use crate::servers::custom_axum_server::{self, TimeoutAcceptor}; use crate::servers::http::HTTP_TRACKER_LOG_TARGET; @@ -42,10 +43,19 @@ pub struct Launcher { } impl Launcher { - #[instrument(skip(self, tracker, whitelist_authorization, stats_event_sender, tx_start, rx_halt))] + #[instrument(skip( + self, + tracker, + authentication_service, + whitelist_authorization, + stats_event_sender, + tx_start, + rx_halt + ))] fn start( &self, tracker: Arc, + authentication_service: Arc, whitelist_authorization: Arc, stats_event_sender: Arc>>, tx_start: Sender, @@ -67,7 +77,13 @@ impl Launcher { tracing::info!(target: HTTP_TRACKER_LOG_TARGET, "Starting on: {protocol}://{}", address); - let app = router(tracker, whitelist_authorization, stats_event_sender, address); + let app = router( + tracker, + authentication_service, + whitelist_authorization, + stats_event_sender, + address, + ); let running = Box::pin(async { match tls { @@ -163,6 +179,7 @@ impl HttpServer { pub async fn start( self, tracker: Arc, + authentication_service: Arc, whitelist_authorization: Arc, stats_event_sender: Arc>>, form: ServiceRegistrationForm, @@ -173,7 +190,14 @@ impl HttpServer { let launcher = self.state.launcher; let task = tokio::spawn(async move { - let server = launcher.start(tracker, whitelist_authorization, stats_event_sender, tx_start, rx_halt); + let server = launcher.start( + tracker, + authentication_service, + whitelist_authorization, + stats_event_sender, + tx_start, + rx_halt, + ); server.await; @@ -296,7 +320,13 @@ mod tests { let stopped = HttpServer::new(Launcher::new(bind_to, tls)); let started = stopped - .start(tracker, whitelist_authorization, stats_event_sender, register.give_form()) + .start( + tracker, + authentication_service, + whitelist_authorization, + stats_event_sender, + register.give_form(), + ) .await .expect("it should start the server"); let stopped = started.stop().await.expect("it should stop the server"); diff --git a/src/servers/http/v1/handlers/announce.rs b/src/servers/http/v1/handlers/announce.rs index 7af2b9261..fbadde967 100644 --- a/src/servers/http/v1/handlers/announce.rs +++ b/src/servers/http/v1/handlers/announce.rs @@ -21,6 +21,7 @@ use torrust_tracker_clock::clock::Time; use torrust_tracker_primitives::core::AnnounceData; use torrust_tracker_primitives::peer; +use crate::core::authentication::service::AuthenticationService; use crate::core::authentication::Key; use crate::core::statistics::event::sender::Sender; use crate::core::{whitelist, PeersWanted, Tracker}; @@ -38,6 +39,7 @@ use crate::CurrentClock; pub async fn handle_without_key( State(state): State<( Arc, + Arc, Arc, Arc>>, )>, @@ -46,7 +48,16 @@ pub async fn handle_without_key( ) -> Response { tracing::debug!("http announce request: {:#?}", announce_request); - handle(&state.0, &state.1, &state.2, &announce_request, &client_ip_sources, None).await + handle( + &state.0, + &state.1, + &state.2, + &state.3, + &announce_request, + &client_ip_sources, + None, + ) + .await } /// It handles the `announce` request when the HTTP tracker requires @@ -56,6 +67,7 @@ pub async fn handle_without_key( pub async fn handle_with_key( State(state): State<( Arc, + Arc, Arc, Arc>>, )>, @@ -65,7 +77,16 @@ pub async fn handle_with_key( ) -> Response { tracing::debug!("http announce request: {:#?}", announce_request); - handle(&state.0, &state.1, &state.2, &announce_request, &client_ip_sources, Some(key)).await + handle( + &state.0, + &state.1, + &state.2, + &state.3, + &announce_request, + &client_ip_sources, + Some(key), + ) + .await } /// It handles the `announce` request. @@ -74,6 +95,7 @@ pub async fn handle_with_key( /// `unauthenticated` modes. async fn handle( tracker: &Arc, + authentication_service: &Arc, whitelist_authorization: &Arc, opt_stats_event_sender: &Arc>>, announce_request: &Announce, @@ -82,6 +104,7 @@ async fn handle( ) -> Response { let announce_data = match handle_announce( tracker, + authentication_service, whitelist_authorization, opt_stats_event_sender, announce_request, @@ -104,6 +127,7 @@ async fn handle( async fn handle_announce( tracker: &Arc, + authentication_service: &Arc, whitelist_authorization: &Arc, opt_stats_event_sender: &Arc>>, announce_request: &Announce, @@ -113,7 +137,7 @@ async fn handle_announce( // Authentication if tracker.requires_authentication() { match maybe_key { - Some(key) => match tracker.authentication.authenticate(&key).await { + Some(key) => match authentication_service.authenticate(&key).await { Ok(()) => (), Err(error) => return Err(responses::error::Error::from(error)), }, @@ -220,6 +244,7 @@ mod tests { use torrust_tracker_test_helpers::configuration; use crate::app_test::initialize_tracker_dependencies; + use crate::core::authentication::service::AuthenticationService; use crate::core::services::{initialize_tracker, statistics}; use crate::core::statistics::event::sender::Sender; use crate::core::{whitelist, Tracker}; @@ -228,6 +253,7 @@ mod tests { Arc, Arc>>, Arc, + Arc, ); fn private_tracker() -> TrackerAndDeps { @@ -248,7 +274,8 @@ mod tests { /// Initialize tracker's dependencies and tracker. fn initialize_tracker_and_deps(config: &Configuration) -> TrackerAndDeps { - let (database, _in_memory_whitelist, whitelist_authorization, authentication) = initialize_tracker_dependencies(config); + let (database, _in_memory_whitelist, whitelist_authorization, authentication, authentication_service) = + initialize_tracker_dependencies(config); let (stats_event_sender, _stats_repository) = statistics::setup::factory(config.core.tracker_usage_statistics); let stats_event_sender = Arc::new(stats_event_sender); @@ -259,7 +286,7 @@ mod tests { &authentication, )); - (tracker, stats_event_sender, whitelist_authorization) + (tracker, stats_event_sender, whitelist_authorization, authentication_service) } fn sample_announce_request() -> Announce { @@ -302,7 +329,7 @@ mod tests { #[tokio::test] async fn it_should_fail_when_the_authentication_key_is_missing() { - let (tracker, stats_event_sender, whitelist_authorization) = private_tracker(); + let (tracker, stats_event_sender, whitelist_authorization, authentication_service) = private_tracker(); let tracker = Arc::new(tracker); let stats_event_sender = Arc::new(stats_event_sender); @@ -311,6 +338,7 @@ mod tests { let response = handle_announce( &tracker, + &authentication_service, &whitelist_authorization, &stats_event_sender, &sample_announce_request(), @@ -328,7 +356,7 @@ mod tests { #[tokio::test] async fn it_should_fail_when_the_authentication_key_is_invalid() { - let (tracker, stats_event_sender, whitelist_authorization) = private_tracker(); + let (tracker, stats_event_sender, whitelist_authorization, authentication_service) = private_tracker(); let tracker = Arc::new(tracker); let stats_event_sender = Arc::new(stats_event_sender); @@ -339,6 +367,7 @@ mod tests { let response = handle_announce( &tracker, + &authentication_service, &whitelist_authorization, &stats_event_sender, &sample_announce_request(), @@ -362,7 +391,7 @@ mod tests { #[tokio::test] async fn it_should_fail_when_the_announced_torrent_is_not_whitelisted() { - let (tracker, stats_event_sender, whitelist_authorization) = whitelisted_tracker(); + let (tracker, stats_event_sender, whitelist_authorization, authentication_service) = whitelisted_tracker(); let tracker = Arc::new(tracker); let stats_event_sender = Arc::new(stats_event_sender); @@ -371,6 +400,7 @@ mod tests { let response = handle_announce( &tracker, + &authentication_service, &whitelist_authorization, &stats_event_sender, &announce_request, @@ -402,7 +432,7 @@ mod tests { #[tokio::test] async fn it_should_fail_when_the_right_most_x_forwarded_for_header_ip_is_not_available() { - let (tracker, stats_event_sender, whitelist_authorization) = tracker_on_reverse_proxy(); + let (tracker, stats_event_sender, whitelist_authorization, authentication_service) = tracker_on_reverse_proxy(); let tracker = Arc::new(tracker); let stats_event_sender = Arc::new(stats_event_sender); @@ -414,6 +444,7 @@ mod tests { let response = handle_announce( &tracker, + &authentication_service, &whitelist_authorization, &stats_event_sender, &sample_announce_request(), @@ -442,7 +473,7 @@ mod tests { #[tokio::test] async fn it_should_fail_when_the_client_ip_from_the_connection_info_is_not_available() { - let (tracker, stats_event_sender, whitelist_authorization) = tracker_not_on_reverse_proxy(); + let (tracker, stats_event_sender, whitelist_authorization, authentication_service) = tracker_not_on_reverse_proxy(); let tracker = Arc::new(tracker); let stats_event_sender = Arc::new(stats_event_sender); @@ -454,6 +485,7 @@ mod tests { let response = handle_announce( &tracker, + &authentication_service, &whitelist_authorization, &stats_event_sender, &sample_announce_request(), diff --git a/src/servers/http/v1/handlers/scrape.rs b/src/servers/http/v1/handlers/scrape.rs index 062a017f8..f7be42bff 100644 --- a/src/servers/http/v1/handlers/scrape.rs +++ b/src/servers/http/v1/handlers/scrape.rs @@ -15,6 +15,7 @@ use bittorrent_http_protocol::v1::services::peer_ip_resolver::{self, ClientIpSou use hyper::StatusCode; use torrust_tracker_primitives::core::ScrapeData; +use crate::core::authentication::service::AuthenticationService; use crate::core::authentication::Key; use crate::core::statistics::event::sender::Sender; use crate::core::Tracker; @@ -28,13 +29,13 @@ use crate::servers::http::v1::services; #[allow(clippy::unused_async)] #[allow(clippy::type_complexity)] pub async fn handle_without_key( - State(state): State<(Arc, Arc>>)>, + State(state): State<(Arc, Arc, Arc>>)>, ExtractRequest(scrape_request): ExtractRequest, ExtractClientIpSources(client_ip_sources): ExtractClientIpSources, ) -> Response { tracing::debug!("http scrape request: {:#?}", &scrape_request); - handle(&state.0, &state.1, &scrape_request, &client_ip_sources, None).await + handle(&state.0, &state.1, &state.2, &scrape_request, &client_ip_sources, None).await } /// It handles the `scrape` request when the HTTP tracker is configured @@ -44,24 +45,34 @@ pub async fn handle_without_key( #[allow(clippy::unused_async)] #[allow(clippy::type_complexity)] pub async fn handle_with_key( - State(state): State<(Arc, Arc>>)>, + State(state): State<(Arc, Arc, Arc>>)>, ExtractRequest(scrape_request): ExtractRequest, ExtractClientIpSources(client_ip_sources): ExtractClientIpSources, ExtractKey(key): ExtractKey, ) -> Response { tracing::debug!("http scrape request: {:#?}", &scrape_request); - handle(&state.0, &state.1, &scrape_request, &client_ip_sources, Some(key)).await + handle(&state.0, &state.1, &state.2, &scrape_request, &client_ip_sources, Some(key)).await } async fn handle( tracker: &Arc, + authentication_service: &Arc, stats_event_sender: &Arc>>, scrape_request: &Scrape, client_ip_sources: &ClientIpSources, maybe_key: Option, ) -> Response { - let scrape_data = match handle_scrape(tracker, stats_event_sender, scrape_request, client_ip_sources, maybe_key).await { + let scrape_data = match handle_scrape( + tracker, + authentication_service, + stats_event_sender, + scrape_request, + client_ip_sources, + maybe_key, + ) + .await + { Ok(scrape_data) => scrape_data, Err(error) => return (StatusCode::OK, error.write()).into_response(), }; @@ -76,6 +87,7 @@ async fn handle( async fn handle_scrape( tracker: &Arc, + authentication_service: &Arc, opt_stats_event_sender: &Arc>>, scrape_request: &Scrape, client_ip_sources: &ClientIpSources, @@ -84,7 +96,7 @@ async fn handle_scrape( // Authentication let return_real_scrape_data = if tracker.requires_authentication() { match maybe_key { - Some(key) => match tracker.authentication.authenticate(&key).await { + Some(key) => match authentication_service.authenticate(&key).await { Ok(()) => true, Err(_error) => false, }, @@ -119,6 +131,7 @@ fn build_response(scrape_data: ScrapeData) -> Response { mod tests { use std::net::IpAddr; use std::str::FromStr; + use std::sync::Arc; use bittorrent_http_protocol::v1::requests::scrape::Scrape; use bittorrent_http_protocol::v1::responses; @@ -127,54 +140,83 @@ mod tests { use torrust_tracker_test_helpers::configuration; use crate::app_test::initialize_tracker_dependencies; + use crate::core::authentication::service::AuthenticationService; use crate::core::services::{initialize_tracker, statistics}; use crate::core::Tracker; - fn private_tracker() -> (Tracker, Option>) { + fn private_tracker() -> ( + Tracker, + Option>, + Arc, + ) { let config = configuration::ephemeral_private(); - let (database, _in_memory_whitelist, whitelist_authorization, authentication) = initialize_tracker_dependencies(&config); + let (database, _in_memory_whitelist, whitelist_authorization, authentication, authentication_service) = + initialize_tracker_dependencies(&config); + let (stats_event_sender, _stats_repository) = statistics::setup::factory(config.core.tracker_usage_statistics); ( initialize_tracker(&config, &database, &whitelist_authorization, &authentication), stats_event_sender, + authentication_service, ) } - fn whitelisted_tracker() -> (Tracker, Option>) { + fn whitelisted_tracker() -> ( + Tracker, + Option>, + Arc, + ) { let config = configuration::ephemeral_listed(); - let (database, _in_memory_whitelist, whitelist_authorization, authentication) = initialize_tracker_dependencies(&config); + let (database, _in_memory_whitelist, whitelist_authorization, authentication, authentication_service) = + initialize_tracker_dependencies(&config); + let (stats_event_sender, _stats_repository) = statistics::setup::factory(config.core.tracker_usage_statistics); ( initialize_tracker(&config, &database, &whitelist_authorization, &authentication), stats_event_sender, + authentication_service, ) } - fn tracker_on_reverse_proxy() -> (Tracker, Option>) { + fn tracker_on_reverse_proxy() -> ( + Tracker, + Option>, + Arc, + ) { let config = configuration::ephemeral_with_reverse_proxy(); - let (database, _in_memory_whitelist, whitelist_authorization, authentication) = initialize_tracker_dependencies(&config); + let (database, _in_memory_whitelist, whitelist_authorization, authentication, authentication_service) = + initialize_tracker_dependencies(&config); + let (stats_event_sender, _stats_repository) = statistics::setup::factory(config.core.tracker_usage_statistics); ( initialize_tracker(&config, &database, &whitelist_authorization, &authentication), stats_event_sender, + authentication_service, ) } - fn tracker_not_on_reverse_proxy() -> (Tracker, Option>) { + fn tracker_not_on_reverse_proxy() -> ( + Tracker, + Option>, + Arc, + ) { let config = configuration::ephemeral_without_reverse_proxy(); - let (database, _in_memory_whitelist, whitelist_authorization, authentication) = initialize_tracker_dependencies(&config); + let (database, _in_memory_whitelist, whitelist_authorization, authentication, authentication_service) = + initialize_tracker_dependencies(&config); + let (stats_event_sender, _stats_repository) = statistics::setup::factory(config.core.tracker_usage_statistics); ( initialize_tracker(&config, &database, &whitelist_authorization, &authentication), stats_event_sender, + authentication_service, ) } @@ -210,7 +252,7 @@ mod tests { #[tokio::test] async fn it_should_return_zeroed_swarm_metadata_when_the_authentication_key_is_missing() { - let (tracker, stats_event_sender) = private_tracker(); + let (tracker, stats_event_sender, authentication_service) = private_tracker(); let tracker = Arc::new(tracker); let stats_event_sender = Arc::new(stats_event_sender); @@ -219,6 +261,7 @@ mod tests { let scrape_data = handle_scrape( &tracker, + &authentication_service, &stats_event_sender, &scrape_request, &sample_client_ip_sources(), @@ -234,7 +277,7 @@ mod tests { #[tokio::test] async fn it_should_return_zeroed_swarm_metadata_when_the_authentication_key_is_invalid() { - let (tracker, stats_event_sender) = private_tracker(); + let (tracker, stats_event_sender, authentication_service) = private_tracker(); let tracker = Arc::new(tracker); let stats_event_sender = Arc::new(stats_event_sender); @@ -244,6 +287,7 @@ mod tests { let scrape_data = handle_scrape( &tracker, + &authentication_service, &stats_event_sender, &scrape_request, &sample_client_ip_sources(), @@ -269,7 +313,7 @@ mod tests { #[tokio::test] async fn it_should_return_zeroed_swarm_metadata_when_the_torrent_is_not_whitelisted() { - let (tracker, stats_event_sender) = whitelisted_tracker(); + let (tracker, stats_event_sender, authentication_service) = whitelisted_tracker(); let tracker: Arc = Arc::new(tracker); let stats_event_sender = Arc::new(stats_event_sender); @@ -277,6 +321,7 @@ mod tests { let scrape_data = handle_scrape( &tracker, + &authentication_service, &stats_event_sender, &scrape_request, &sample_client_ip_sources(), @@ -302,7 +347,7 @@ mod tests { #[tokio::test] async fn it_should_fail_when_the_right_most_x_forwarded_for_header_ip_is_not_available() { - let (tracker, stats_event_sender) = tracker_on_reverse_proxy(); + let (tracker, stats_event_sender, authentication_service) = tracker_on_reverse_proxy(); let tracker: Arc = Arc::new(tracker); let stats_event_sender = Arc::new(stats_event_sender); @@ -313,6 +358,7 @@ mod tests { let response = handle_scrape( &tracker, + &authentication_service, &stats_event_sender, &sample_scrape_request(), &client_ip_sources, @@ -339,7 +385,7 @@ mod tests { #[tokio::test] async fn it_should_fail_when_the_client_ip_from_the_connection_info_is_not_available() { - let (tracker, stats_event_sender) = tracker_not_on_reverse_proxy(); + let (tracker, stats_event_sender, authentication_service) = tracker_not_on_reverse_proxy(); let tracker: Arc = Arc::new(tracker); let stats_event_sender = Arc::new(stats_event_sender); @@ -350,6 +396,7 @@ mod tests { let response = handle_scrape( &tracker, + &authentication_service, &stats_event_sender, &sample_scrape_request(), &client_ip_sources, diff --git a/src/servers/http/v1/routes.rs b/src/servers/http/v1/routes.rs index d37c55c7a..7a1465500 100644 --- a/src/servers/http/v1/routes.rs +++ b/src/servers/http/v1/routes.rs @@ -22,6 +22,7 @@ use tower_http::LatencyUnit; use tracing::{instrument, Level, Span}; use super::handlers::{announce, health_check, scrape}; +use crate::core::authentication::service::AuthenticationService; use crate::core::statistics::event::sender::Sender; use crate::core::{whitelist, Tracker}; use crate::servers::http::HTTP_TRACKER_LOG_TARGET; @@ -32,9 +33,16 @@ use crate::servers::logging::Latency; /// > **NOTICE**: it's added a layer to get the client IP from the connection /// > info. The tracker could use the connection info to get the client IP. #[allow(clippy::needless_pass_by_value)] -#[instrument(skip(tracker, whitelist_authorization, stats_event_sender, server_socket_addr))] +#[instrument(skip( + tracker, + authentication_service, + whitelist_authorization, + stats_event_sender, + server_socket_addr +))] pub fn router( tracker: Arc, + authentication_service: Arc, whitelist_authorization: Arc, stats_event_sender: Arc>>, server_socket_addr: SocketAddr, @@ -47,6 +55,7 @@ pub fn router( "/announce", get(announce::handle_without_key).with_state(( tracker.clone(), + authentication_service.clone(), whitelist_authorization.clone(), stats_event_sender.clone(), )), @@ -55,6 +64,7 @@ pub fn router( "/announce/{key}", get(announce::handle_with_key).with_state(( tracker.clone(), + authentication_service.clone(), whitelist_authorization.clone(), stats_event_sender.clone(), )), @@ -62,11 +72,19 @@ pub fn router( // Scrape request .route( "/scrape", - get(scrape::handle_without_key).with_state((tracker.clone(), stats_event_sender.clone())), + get(scrape::handle_without_key).with_state(( + tracker.clone(), + authentication_service.clone(), + stats_event_sender.clone(), + )), ) .route( "/scrape/{key}", - get(scrape::handle_with_key).with_state((tracker.clone(), stats_event_sender.clone())), + get(scrape::handle_with_key).with_state(( + tracker.clone(), + authentication_service.clone(), + stats_event_sender.clone(), + )), ) // Add extension to get the client IP from the connection info .layer(SecureClientIpSource::ConnectInfo.into_extension()) diff --git a/src/servers/http/v1/services/announce.rs b/src/servers/http/v1/services/announce.rs index 929b00ff4..446af1db3 100644 --- a/src/servers/http/v1/services/announce.rs +++ b/src/servers/http/v1/services/announce.rs @@ -73,7 +73,7 @@ mod tests { fn public_tracker() -> (Tracker, Arc>>) { let config = configuration::ephemeral_public(); - let (database, _in_memory_whitelist, whitelist_authorization, authentication) = initialize_tracker_dependencies(&config); + let (database, _in_memory_whitelist, whitelist_authorization, authentication, _authentication_service) = initialize_tracker_dependencies(&config); let (stats_event_sender, _stats_repository) = statistics::setup::factory(config.core.tracker_usage_statistics); let stats_event_sender = Arc::new(stats_event_sender); @@ -131,7 +131,7 @@ mod tests { fn test_tracker_factory() -> Tracker { let config = configuration::ephemeral(); - let (database, _in_memory_whitelist, whitelist_authorization, authentication) = + let (database, _in_memory_whitelist, whitelist_authorization, authentication, _authentication_service) = initialize_tracker_dependencies(&config); Tracker::new(&config.core, &database, &whitelist_authorization, &authentication).unwrap() diff --git a/src/servers/http/v1/services/scrape.rs b/src/servers/http/v1/services/scrape.rs index 856b2ae72..35b264363 100644 --- a/src/servers/http/v1/services/scrape.rs +++ b/src/servers/http/v1/services/scrape.rs @@ -87,7 +87,8 @@ mod tests { fn public_tracker() -> Tracker { let config = configuration::ephemeral_public(); - let (database, _in_memory_whitelist, whitelist_authorization, authentication) = initialize_tracker_dependencies(&config); + let (database, _in_memory_whitelist, whitelist_authorization, authentication, _authentication_service) = + initialize_tracker_dependencies(&config); initialize_tracker(&config, &database, &whitelist_authorization, &authentication) } @@ -115,7 +116,8 @@ mod tests { fn test_tracker_factory() -> Tracker { let config = configuration::ephemeral(); - let (database, _in_memory_whitelist, whitelist_authorization, authentication) = initialize_tracker_dependencies(&config); + let (database, _in_memory_whitelist, whitelist_authorization, authentication, _authentication_service) = + initialize_tracker_dependencies(&config); Tracker::new(&config.core, &database, &whitelist_authorization, &authentication).unwrap() } diff --git a/src/servers/udp/handlers.rs b/src/servers/udp/handlers.rs index 67bb35c5a..f0f7719e2 100644 --- a/src/servers/udp/handlers.rs +++ b/src/servers/udp/handlers.rs @@ -516,7 +516,8 @@ mod tests { } fn initialize_tracker_and_deps(config: &Configuration) -> TrackerAndDeps { - let (database, in_memory_whitelist, whitelist_authorization, authentication) = initialize_tracker_dependencies(config); + let (database, in_memory_whitelist, whitelist_authorization, authentication, _authentication_service) = + initialize_tracker_dependencies(config); let (stats_event_sender, _stats_repository) = statistics::setup::factory(config.core.tracker_usage_statistics); let stats_event_sender = Arc::new(stats_event_sender); let whitelist_manager = initialize_whitelist_manager(database.clone(), in_memory_whitelist.clone()); @@ -634,7 +635,8 @@ mod tests { fn test_tracker_factory() -> (Arc, Arc) { let config = tracker_configuration(); - let (database, _in_memory_whitelist, whitelist_authorization, authentication) = initialize_tracker_dependencies(&config); + let (database, _in_memory_whitelist, whitelist_authorization, authentication, _authentication_service) = + initialize_tracker_dependencies(&config); let tracker = Arc::new(Tracker::new(&config.core, &database, &whitelist_authorization, &authentication).unwrap()); @@ -1381,7 +1383,7 @@ mod tests { async fn the_peer_ip_should_be_changed_to_the_external_ip_in_the_tracker_configuration() { let config = Arc::new(TrackerConfigurationBuilder::default().with_external_ip("::126.0.0.1").into()); - let (database, _in_memory_whitelist, whitelist_authorization, authentication) = + let (database, _in_memory_whitelist, whitelist_authorization, authentication, _authentication_service) = initialize_tracker_dependencies(&config); let mut stats_event_sender_mock = statistics::event::sender::MockSender::new(); diff --git a/tests/servers/api/environment.rs b/tests/servers/api/environment.rs index 3bac7e570..3e8fedd0e 100644 --- a/tests/servers/api/environment.rs +++ b/tests/servers/api/environment.rs @@ -8,6 +8,7 @@ use torrust_tracker_api_client::connection_info::{ConnectionInfo, Origin}; use torrust_tracker_configuration::{Configuration, HttpApi}; use torrust_tracker_lib::bootstrap::app::{initialize_app_container, initialize_global_services}; use torrust_tracker_lib::bootstrap::jobs::make_rust_tls; +use torrust_tracker_lib::core::authentication::service::AuthenticationService; use torrust_tracker_lib::core::statistics::event::sender::Sender; use torrust_tracker_lib::core::statistics::repository::Repository; use torrust_tracker_lib::core::whitelist::manager::WhiteListManager; @@ -23,6 +24,7 @@ where { pub config: Arc, pub tracker: Arc, + pub authentication_service: Arc, pub stats_event_sender: Arc>>, pub stats_repository: Arc, pub whitelist_manager: Arc, @@ -58,6 +60,7 @@ impl Environment { Self { config, tracker: app_container.tracker.clone(), + authentication_service: app_container.authentication_service.clone(), stats_event_sender: app_container.stats_event_sender.clone(), stats_repository: app_container.stats_repository.clone(), whitelist_manager: app_container.whitelist_manager.clone(), @@ -73,6 +76,7 @@ impl Environment { Environment { config: self.config, tracker: self.tracker.clone(), + authentication_service: self.authentication_service.clone(), stats_event_sender: self.stats_event_sender.clone(), stats_repository: self.stats_repository.clone(), whitelist_manager: self.whitelist_manager.clone(), @@ -104,6 +108,7 @@ impl Environment { Environment { config: self.config, tracker: self.tracker, + authentication_service: self.authentication_service, stats_event_sender: self.stats_event_sender, stats_repository: self.stats_repository, whitelist_manager: self.whitelist_manager, diff --git a/tests/servers/api/v1/contract/context/auth_key.rs b/tests/servers/api/v1/contract/context/auth_key.rs index cee6b4034..6a270f894 100644 --- a/tests/servers/api/v1/contract/context/auth_key.rs +++ b/tests/servers/api/v1/contract/context/auth_key.rs @@ -36,8 +36,7 @@ async fn should_allow_generating_a_new_random_auth_key() { let auth_key_resource = assert_auth_key_utf8(response).await; assert!(env - .tracker - .authentication + .authentication_service .authenticate(&auth_key_resource.key.parse::().unwrap()) .await .is_ok()); @@ -66,8 +65,7 @@ async fn should_allow_uploading_a_preexisting_auth_key() { let auth_key_resource = assert_auth_key_utf8(response).await; assert!(env - .tracker - .authentication + .authentication_service .authenticate(&auth_key_resource.key.parse::().unwrap()) .await .is_ok()); @@ -499,8 +497,7 @@ mod deprecated_generate_key_endpoint { let auth_key_resource = assert_auth_key_utf8(response).await; assert!(env - .tracker - .authentication + .authentication_service .authenticate(&auth_key_resource.key.parse::().unwrap()) .await .is_ok()); diff --git a/tests/servers/http/environment.rs b/tests/servers/http/environment.rs index a8e5fc572..85921cd37 100644 --- a/tests/servers/http/environment.rs +++ b/tests/servers/http/environment.rs @@ -5,6 +5,7 @@ use futures::executor::block_on; use torrust_tracker_configuration::{Configuration, HttpTracker}; use torrust_tracker_lib::bootstrap::app::{initialize_app_container, initialize_global_services}; use torrust_tracker_lib::bootstrap::jobs::make_rust_tls; +use torrust_tracker_lib::core::authentication::service::AuthenticationService; use torrust_tracker_lib::core::statistics::event::sender::Sender; use torrust_tracker_lib::core::statistics::repository::Repository; use torrust_tracker_lib::core::whitelist::manager::WhiteListManager; @@ -16,6 +17,7 @@ use torrust_tracker_primitives::peer; pub struct Environment { pub config: Arc, pub tracker: Arc, + pub authentication_service: Arc, pub stats_event_sender: Arc>>, pub stats_repository: Arc, pub whitelist_authorization: Arc, @@ -54,6 +56,7 @@ impl Environment { Self { config, tracker: app_container.tracker.clone(), + authentication_service: app_container.authentication_service.clone(), stats_event_sender: app_container.stats_event_sender.clone(), stats_repository: app_container.stats_repository.clone(), whitelist_authorization: app_container.whitelist_authorization.clone(), @@ -68,6 +71,7 @@ impl Environment { Environment { config: self.config, tracker: self.tracker.clone(), + authentication_service: self.authentication_service.clone(), whitelist_authorization: self.whitelist_authorization.clone(), stats_event_sender: self.stats_event_sender.clone(), stats_repository: self.stats_repository.clone(), @@ -77,6 +81,7 @@ impl Environment { .server .start( self.tracker, + self.authentication_service, self.whitelist_authorization, self.stats_event_sender, self.registar.give_form(), @@ -96,6 +101,7 @@ impl Environment { Environment { config: self.config, tracker: self.tracker, + authentication_service: self.authentication_service, whitelist_authorization: self.whitelist_authorization, stats_event_sender: self.stats_event_sender, stats_repository: self.stats_repository, From 661fe6abe741321ecdc3cb8e3519bd5bb5a2b714 Mon Sep 17 00:00:00 2001 From: Jose Celano Date: Wed, 22 Jan 2025 10:49:59 +0000 Subject: [PATCH 136/802] refactor: [#1195] remove AuthenticationService from authentication Facade It's now used directly. --- src/app_test.rs | 2 +- src/bootstrap/app.rs | 2 +- src/bootstrap/jobs/http_tracker.rs | 2 +- src/bootstrap/jobs/tracker_apis.rs | 4 +- src/core/authentication/mod.rs | 117 ++++++++++++----------------- src/servers/apis/server.rs | 4 +- src/servers/http/server.rs | 2 +- src/servers/udp/server/mod.rs | 8 +- 8 files changed, 60 insertions(+), 81 deletions(-) diff --git a/src/app_test.rs b/src/app_test.rs index d4e8df961..a8ad9f967 100644 --- a/src/app_test.rs +++ b/src/app_test.rs @@ -37,7 +37,7 @@ pub fn initialize_tracker_dependencies( &db_key_repository.clone(), &in_memory_key_repository.clone(), )); - let authentication_facade = Arc::new(authentication::Facade::new(&authentication_service, &keys_handler)); + let authentication_facade = Arc::new(authentication::Facade::new(&keys_handler)); ( database, diff --git a/src/bootstrap/app.rs b/src/bootstrap/app.rs index 31689005d..b5067f5f6 100644 --- a/src/bootstrap/app.rs +++ b/src/bootstrap/app.rs @@ -103,7 +103,7 @@ pub fn initialize_app_container(configuration: &Configuration) -> AppContainer { &db_key_repository.clone(), &in_memory_key_repository.clone(), )); - let authentication = Arc::new(authentication::Facade::new(&authentication_service, &keys_handler)); + let authentication = Arc::new(authentication::Facade::new(&keys_handler)); let tracker = Arc::new(initialize_tracker( configuration, diff --git a/src/bootstrap/jobs/http_tracker.rs b/src/bootstrap/jobs/http_tracker.rs index 46e627b6b..a0e11a688 100644 --- a/src/bootstrap/jobs/http_tracker.rs +++ b/src/bootstrap/jobs/http_tracker.rs @@ -144,7 +144,7 @@ mod tests { &db_key_repository.clone(), &in_memory_key_repository.clone(), )); - let authentication = Arc::new(authentication::Facade::new(&authentication_service, &keys_handler)); + let authentication = Arc::new(authentication::Facade::new(&keys_handler)); let tracker = Arc::new(initialize_tracker(&cfg, &database, &whitelist_authorization, &authentication)); diff --git a/src/bootstrap/jobs/tracker_apis.rs b/src/bootstrap/jobs/tracker_apis.rs index dfc5b108a..39bfc112d 100644 --- a/src/bootstrap/jobs/tracker_apis.rs +++ b/src/bootstrap/jobs/tracker_apis.rs @@ -182,12 +182,12 @@ mod tests { let whitelist_manager = initialize_whitelist_manager(database.clone(), in_memory_whitelist.clone()); let db_key_repository = Arc::new(DatabaseKeyRepository::new(&database)); let in_memory_key_repository = Arc::new(InMemoryKeyRepository::default()); - let authentication_service = Arc::new(service::AuthenticationService::new(&cfg.core, &in_memory_key_repository)); + let _authentication_service = Arc::new(service::AuthenticationService::new(&cfg.core, &in_memory_key_repository)); let keys_handler = Arc::new(KeysHandler::new( &db_key_repository.clone(), &in_memory_key_repository.clone(), )); - let authentication = Arc::new(authentication::Facade::new(&authentication_service, &keys_handler)); + let authentication = Arc::new(authentication::Facade::new(&keys_handler)); let tracker = Arc::new(initialize_tracker(&cfg, &database, &whitelist_authorization, &authentication)); diff --git a/src/core/authentication/mod.rs b/src/core/authentication/mod.rs index ebc7b1fe1..ac5db55d1 100644 --- a/src/core/authentication/mod.rs +++ b/src/core/authentication/mod.rs @@ -17,32 +17,18 @@ pub type Key = key::Key; pub type Error = key::Error; pub struct Facade { - /// The authentication service. - authentication_service: Arc, - /// The keys handler. keys_handler: Arc, } impl Facade { #[must_use] - pub fn new(authentication_service: &Arc, keys_handler: &Arc) -> Self { + pub fn new(keys_handler: &Arc) -> Self { Self { - authentication_service: authentication_service.clone(), keys_handler: keys_handler.clone(), } } - /// It authenticates the peer `key` against the `Tracker` authentication - /// key list. - /// - /// # Errors - /// - /// Will return an error if the the authentication key cannot be verified. - pub async fn authenticate(&self, key: &Key) -> Result<(), Error> { - self.authentication_service.authenticate(key).await - } - /// Adds new peer keys to the tracker. /// /// Keys can be pre-generated or randomly created. They can also be permanent or expire. @@ -149,26 +135,30 @@ mod tests { use crate::core::authentication::handler::KeysHandler; use crate::core::authentication::key::repository::in_memory::InMemoryKeyRepository; use crate::core::authentication::key::repository::persisted::DatabaseKeyRepository; + use crate::core::authentication::service::AuthenticationService; use crate::core::authentication::{self, service}; use crate::core::services::initialize_database; - fn instantiate_authentication_facade() -> authentication::Facade { + fn instantiate_keys_manager_and_authentication() -> (authentication::Facade, Arc) { let config = configuration::ephemeral_private(); - instantiate_authentication_facade_with_configuration(&config) + instantiate_keys_manager_and_authentication_with_configuration(&config) } - fn instantiate_authentication_facade_with_checking_keys_expiration_disabled() -> authentication::Facade { + fn instantiate_keys_manager_and_authentication_with_checking_keys_expiration_disabled( + ) -> (authentication::Facade, Arc) { let mut config = configuration::ephemeral_private(); config.core.private_mode = Some(PrivateMode { check_keys_expiration: false, }); - instantiate_authentication_facade_with_configuration(&config) + instantiate_keys_manager_and_authentication_with_configuration(&config) } - fn instantiate_authentication_facade_with_configuration(config: &Configuration) -> authentication::Facade { + fn instantiate_keys_manager_and_authentication_with_configuration( + config: &Configuration, + ) -> (authentication::Facade, Arc) { let database = initialize_database(config); let db_key_repository = Arc::new(DatabaseKeyRepository::new(&database)); @@ -180,52 +170,40 @@ mod tests { &in_memory_key_repository.clone(), )); - authentication::Facade::new(&authentication_service, &keys_handler) + let facade = authentication::Facade::new(&keys_handler); + + (facade, authentication_service) } #[tokio::test] async fn it_should_remove_an_authentication_key() { - let authentication = instantiate_authentication_facade(); + let (keys_manager, authentication_service) = instantiate_keys_manager_and_authentication(); - let expiring_key = authentication - .generate_auth_key(Some(Duration::from_secs(100))) - .await - .unwrap(); + let expiring_key = keys_manager.generate_auth_key(Some(Duration::from_secs(100))).await.unwrap(); - let result = authentication.remove_auth_key(&expiring_key.key()).await; + let result = keys_manager.remove_auth_key(&expiring_key.key()).await; assert!(result.is_ok()); // The key should no longer be valid - assert!(authentication - .authentication_service - .authenticate(&expiring_key.key()) - .await - .is_err()); + assert!(authentication_service.authenticate(&expiring_key.key()).await.is_err()); } #[tokio::test] async fn it_should_load_authentication_keys_from_the_database() { - let authentication = instantiate_authentication_facade(); + let (keys_manager, authentication_service) = instantiate_keys_manager_and_authentication(); - let expiring_key = authentication - .generate_auth_key(Some(Duration::from_secs(100))) - .await - .unwrap(); + let expiring_key = keys_manager.generate_auth_key(Some(Duration::from_secs(100))).await.unwrap(); // Remove the newly generated key in memory - authentication.remove_in_memory_auth_key(&expiring_key.key()).await; + keys_manager.remove_in_memory_auth_key(&expiring_key.key()).await; - let result = authentication.load_keys_from_database().await; + let result = keys_manager.load_keys_from_database().await; assert!(result.is_ok()); // The key should no longer be valid - assert!(authentication - .authentication_service - .authenticate(&expiring_key.key()) - .await - .is_ok()); + assert!(authentication_service.authenticate(&expiring_key.key()).await.is_ok()); } mod with_expiring_and { @@ -234,51 +212,51 @@ mod tests { use std::time::Duration; use crate::core::authentication::tests::the_tracker_configured_as_private::{ - instantiate_authentication_facade, instantiate_authentication_facade_with_checking_keys_expiration_disabled, + instantiate_keys_manager_and_authentication, + instantiate_keys_manager_and_authentication_with_checking_keys_expiration_disabled, }; use crate::core::authentication::Key; #[tokio::test] async fn it_should_authenticate_a_peer_with_the_key() { - let authentication = instantiate_authentication_facade(); + let (keys_manager, authentication_service) = instantiate_keys_manager_and_authentication(); - let peer_key = authentication - .generate_auth_key(Some(Duration::from_secs(100))) - .await - .unwrap(); + let peer_key = keys_manager.generate_auth_key(Some(Duration::from_secs(100))).await.unwrap(); - let result = authentication.authenticate(&peer_key.key()).await; + let result = authentication_service.authenticate(&peer_key.key()).await; assert!(result.is_ok()); } #[tokio::test] async fn it_should_accept_an_expired_key_when_checking_expiration_is_disabled_in_configuration() { - let authentication = instantiate_authentication_facade_with_checking_keys_expiration_disabled(); + let (keys_manager, authentication_service) = + instantiate_keys_manager_and_authentication_with_checking_keys_expiration_disabled(); let past_timestamp = Duration::ZERO; - let peer_key = authentication + let peer_key = keys_manager .add_auth_key(Key::new("YZSl4lMZupRuOpSRC3krIKR5BPB14nrJ").unwrap(), Some(past_timestamp)) .await .unwrap(); - assert!(authentication.authenticate(&peer_key.key()).await.is_ok()); + assert!(authentication_service.authenticate(&peer_key.key()).await.is_ok()); } } mod pre_generated_keys { use crate::core::authentication::tests::the_tracker_configured_as_private::{ - instantiate_authentication_facade, instantiate_authentication_facade_with_checking_keys_expiration_disabled, + instantiate_keys_manager_and_authentication, + instantiate_keys_manager_and_authentication_with_checking_keys_expiration_disabled, }; use crate::core::authentication::{AddKeyRequest, Key}; #[tokio::test] async fn it_should_authenticate_a_peer_with_the_key() { - let authentication = instantiate_authentication_facade(); + let (keys_manager, authentication_service) = instantiate_keys_manager_and_authentication(); - let peer_key = authentication + let peer_key = keys_manager .add_peer_key(AddKeyRequest { opt_key: Some(Key::new("YZSl4lMZupRuOpSRC3krIKR5BPB14nrJ").unwrap().to_string()), opt_seconds_valid: Some(100), @@ -286,16 +264,17 @@ mod tests { .await .unwrap(); - let result = authentication.authenticate(&peer_key.key()).await; + let result = authentication_service.authenticate(&peer_key.key()).await; assert!(result.is_ok()); } #[tokio::test] async fn it_should_accept_an_expired_key_when_checking_expiration_is_disabled_in_configuration() { - let authentication = instantiate_authentication_facade_with_checking_keys_expiration_disabled(); + let (keys_manager, authentication_service) = + instantiate_keys_manager_and_authentication_with_checking_keys_expiration_disabled(); - let peer_key = authentication + let peer_key = keys_manager .add_peer_key(AddKeyRequest { opt_key: Some(Key::new("YZSl4lMZupRuOpSRC3krIKR5BPB14nrJ").unwrap().to_string()), opt_seconds_valid: Some(0), @@ -303,7 +282,7 @@ mod tests { .await .unwrap(); - assert!(authentication.authenticate(&peer_key.key()).await.is_ok()); + assert!(authentication_service.authenticate(&peer_key.key()).await.is_ok()); } } } @@ -311,29 +290,29 @@ mod tests { mod with_permanent_and { mod randomly_generated_keys { - use crate::core::authentication::tests::the_tracker_configured_as_private::instantiate_authentication_facade; + use crate::core::authentication::tests::the_tracker_configured_as_private::instantiate_keys_manager_and_authentication; #[tokio::test] async fn it_should_authenticate_a_peer_with_the_key() { - let authentication = instantiate_authentication_facade(); + let (keys_manager, authentication_service) = instantiate_keys_manager_and_authentication(); - let peer_key = authentication.generate_permanent_auth_key().await.unwrap(); + let peer_key = keys_manager.generate_permanent_auth_key().await.unwrap(); - let result = authentication.authenticate(&peer_key.key()).await; + let result = authentication_service.authenticate(&peer_key.key()).await; assert!(result.is_ok()); } } mod pre_generated_keys { - use crate::core::authentication::tests::the_tracker_configured_as_private::instantiate_authentication_facade; + use crate::core::authentication::tests::the_tracker_configured_as_private::instantiate_keys_manager_and_authentication; use crate::core::authentication::{AddKeyRequest, Key}; #[tokio::test] async fn it_should_authenticate_a_peer_with_the_key() { - let authentication = instantiate_authentication_facade(); + let (keys_manager, authentication_service) = instantiate_keys_manager_and_authentication(); - let peer_key = authentication + let peer_key = keys_manager .add_peer_key(AddKeyRequest { opt_key: Some(Key::new("YZSl4lMZupRuOpSRC3krIKR5BPB14nrJ").unwrap().to_string()), opt_seconds_valid: None, @@ -341,7 +320,7 @@ mod tests { .await .unwrap(); - let result = authentication.authenticate(&peer_key.key()).await; + let result = authentication_service.authenticate(&peer_key.key()).await; assert!(result.is_ok()); } diff --git a/src/servers/apis/server.rs b/src/servers/apis/server.rs index de7845eba..c9fc2f185 100644 --- a/src/servers/apis/server.rs +++ b/src/servers/apis/server.rs @@ -375,12 +375,12 @@ mod tests { let whitelist_manager = initialize_whitelist_manager(database.clone(), in_memory_whitelist.clone()); let db_key_repository = Arc::new(DatabaseKeyRepository::new(&database)); let in_memory_key_repository = Arc::new(InMemoryKeyRepository::default()); - let authentication_service = Arc::new(service::AuthenticationService::new(&cfg.core, &in_memory_key_repository)); + let _authentication_service = Arc::new(service::AuthenticationService::new(&cfg.core, &in_memory_key_repository)); let keys_handler = Arc::new(KeysHandler::new( &db_key_repository.clone(), &in_memory_key_repository.clone(), )); - let authentication = Arc::new(authentication::Facade::new(&authentication_service, &keys_handler)); + let authentication = Arc::new(authentication::Facade::new(&keys_handler)); let tracker = Arc::new(initialize_tracker(&cfg, &database, &whitelist_authorization, &authentication)); diff --git a/src/servers/http/server.rs b/src/servers/http/server.rs index 3bc6773dd..a9b618c84 100644 --- a/src/servers/http/server.rs +++ b/src/servers/http/server.rs @@ -303,7 +303,7 @@ mod tests { &db_key_repository.clone(), &in_memory_key_repository.clone(), )); - let authentication = Arc::new(authentication::Facade::new(&authentication_service, &keys_handler)); + let authentication = Arc::new(authentication::Facade::new(&keys_handler)); let tracker = Arc::new(initialize_tracker(&cfg, &database, &whitelist_authorization, &authentication)); diff --git a/src/servers/udp/server/mod.rs b/src/servers/udp/server/mod.rs index 9658b1bca..c507b3cb6 100644 --- a/src/servers/udp/server/mod.rs +++ b/src/servers/udp/server/mod.rs @@ -94,12 +94,12 @@ mod tests { let _whitelist_manager = initialize_whitelist_manager(database.clone(), in_memory_whitelist.clone()); let db_key_repository = Arc::new(DatabaseKeyRepository::new(&database)); let in_memory_key_repository = Arc::new(InMemoryKeyRepository::default()); - let authentication_service = Arc::new(service::AuthenticationService::new(&cfg.core, &in_memory_key_repository)); + let _authentication_service = Arc::new(service::AuthenticationService::new(&cfg.core, &in_memory_key_repository)); let keys_handler = Arc::new(KeysHandler::new( &db_key_repository.clone(), &in_memory_key_repository.clone(), )); - let authentication = Arc::new(authentication::Facade::new(&authentication_service, &keys_handler)); + let authentication = Arc::new(authentication::Facade::new(&keys_handler)); let tracker = Arc::new(initialize_tracker(&cfg, &database, &whitelist_authorization, &authentication)); @@ -147,12 +147,12 @@ mod tests { )); let db_key_repository = Arc::new(DatabaseKeyRepository::new(&database)); let in_memory_key_repository = Arc::new(InMemoryKeyRepository::default()); - let authentication_service = Arc::new(service::AuthenticationService::new(&cfg.core, &in_memory_key_repository)); + let _authentication_service = Arc::new(service::AuthenticationService::new(&cfg.core, &in_memory_key_repository)); let keys_handler = Arc::new(KeysHandler::new( &db_key_repository.clone(), &in_memory_key_repository.clone(), )); - let authentication = Arc::new(authentication::Facade::new(&authentication_service, &keys_handler)); + let authentication = Arc::new(authentication::Facade::new(&keys_handler)); let tracker = Arc::new(initialize_tracker(&cfg, &database, &whitelist_authorization, &authentication)); From 77eccdc33da86f8029b6fee7e467697650fd4fa6 Mon Sep 17 00:00:00 2001 From: Jose Celano Date: Wed, 22 Jan 2025 10:50:49 +0000 Subject: [PATCH 137/802] fix: [#1195] format --- src/servers/http/v1/services/announce.rs | 3 ++- 1 file changed, 2 insertions(+), 1 deletion(-) diff --git a/src/servers/http/v1/services/announce.rs b/src/servers/http/v1/services/announce.rs index 446af1db3..2f2876f5b 100644 --- a/src/servers/http/v1/services/announce.rs +++ b/src/servers/http/v1/services/announce.rs @@ -73,7 +73,8 @@ mod tests { fn public_tracker() -> (Tracker, Arc>>) { let config = configuration::ephemeral_public(); - let (database, _in_memory_whitelist, whitelist_authorization, authentication, _authentication_service) = initialize_tracker_dependencies(&config); + let (database, _in_memory_whitelist, whitelist_authorization, authentication, _authentication_service) = + initialize_tracker_dependencies(&config); let (stats_event_sender, _stats_repository) = statistics::setup::factory(config.core.tracker_usage_statistics); let stats_event_sender = Arc::new(stats_event_sender); From dff6bca1bbf587074ad31ddd8a73f3c4c15ba058 Mon Sep 17 00:00:00 2001 From: Jose Celano Date: Wed, 22 Jan 2025 11:27:57 +0000 Subject: [PATCH 138/802] refactor: [#1195] remove authentication::Facade service --- src/app.rs | 4 +- src/app_test.rs | 14 +- src/bootstrap/app.rs | 12 +- src/bootstrap/jobs/http_tracker.rs | 7 +- src/bootstrap/jobs/tracker_apis.rs | 23 ++- src/container.rs | 5 +- src/core/authentication/handler.rs | 6 +- src/core/authentication/mod.rs | 132 ++---------------- src/core/mod.rs | 17 +-- src/core/services/mod.rs | 5 +- src/core/services/statistics/mod.rs | 9 +- src/core/services/torrent.rs | 58 ++------ src/servers/apis/routes.rs | 5 + src/servers/apis/server.rs | 14 +- .../apis/v1/context/auth_key/handlers.rs | 27 ++-- .../apis/v1/context/auth_key/routes.rs | 12 +- src/servers/apis/v1/routes.rs | 5 +- src/servers/http/server.rs | 7 +- src/servers/http/v1/handlers/announce.rs | 9 +- src/servers/http/v1/handlers/scrape.rs | 16 +-- src/servers/http/v1/services/announce.rs | 8 +- src/servers/http/v1/services/scrape.rs | 8 +- src/servers/udp/handlers.rs | 18 +-- src/servers/udp/server/mod.rs | 12 +- tests/servers/api/environment.rs | 6 + .../api/v1/contract/context/auth_key.rs | 21 +-- tests/servers/http/environment.rs | 5 + tests/servers/http/v1/contract.rs | 6 +- 28 files changed, 161 insertions(+), 310 deletions(-) diff --git a/src/app.rs b/src/app.rs index 8fa14da54..e41f227e7 100644 --- a/src/app.rs +++ b/src/app.rs @@ -52,8 +52,7 @@ pub async fn start(config: &Configuration, app_container: &AppContainer) -> Vec< // Load peer keys if app_container.tracker.is_private() { app_container - .tracker - .authentication + .keys_handler .load_keys_from_database() .await .expect("Could not retrieve keys from database."); @@ -120,6 +119,7 @@ pub async fn start(config: &Configuration, app_container: &AppContainer) -> Vec< if let Some(job) = tracker_apis::start_job( http_api_config, app_container.tracker.clone(), + app_container.keys_handler.clone(), app_container.whitelist_manager.clone(), app_container.ban_service.clone(), app_container.stats_event_sender.clone(), diff --git a/src/app_test.rs b/src/app_test.rs index a8ad9f967..929a23418 100644 --- a/src/app_test.rs +++ b/src/app_test.rs @@ -9,8 +9,8 @@ use crate::core::authentication::key::repository::persisted::DatabaseKeyReposito use crate::core::authentication::service::{self, AuthenticationService}; use crate::core::databases::Database; use crate::core::services::initialize_database; +use crate::core::whitelist; use crate::core::whitelist::repository::in_memory::InMemoryWhitelist; -use crate::core::{authentication, whitelist}; /// Initialize the tracker dependencies. #[allow(clippy::type_complexity)] @@ -21,7 +21,6 @@ pub fn initialize_tracker_dependencies( Arc>, Arc, Arc, - Arc, Arc, ) { let database = initialize_database(config); @@ -33,17 +32,10 @@ pub fn initialize_tracker_dependencies( let db_key_repository = Arc::new(DatabaseKeyRepository::new(&database)); let in_memory_key_repository = Arc::new(InMemoryKeyRepository::default()); let authentication_service = Arc::new(service::AuthenticationService::new(&config.core, &in_memory_key_repository)); - let keys_handler = Arc::new(KeysHandler::new( + let _keys_handler = Arc::new(KeysHandler::new( &db_key_repository.clone(), &in_memory_key_repository.clone(), )); - let authentication_facade = Arc::new(authentication::Facade::new(&keys_handler)); - ( - database, - in_memory_whitelist, - whitelist_authorization, - authentication_facade, - authentication_service, - ) + (database, in_memory_whitelist, whitelist_authorization, authentication_service) } diff --git a/src/bootstrap/app.rs b/src/bootstrap/app.rs index b5067f5f6..a0c7887cf 100644 --- a/src/bootstrap/app.rs +++ b/src/bootstrap/app.rs @@ -27,8 +27,8 @@ use crate::core::authentication::key::repository::in_memory::InMemoryKeyReposito use crate::core::authentication::key::repository::persisted::DatabaseKeyRepository; use crate::core::authentication::service; use crate::core::services::{initialize_database, initialize_tracker, initialize_whitelist_manager, statistics}; +use crate::core::whitelist; use crate::core::whitelist::repository::in_memory::InMemoryWhitelist; -use crate::core::{authentication, whitelist}; use crate::servers::udp::server::banning::BanService; use crate::servers::udp::server::launcher::MAX_CONNECTION_ID_ERRORS_PER_IP; use crate::shared::crypto::ephemeral_instance_keys; @@ -103,24 +103,18 @@ pub fn initialize_app_container(configuration: &Configuration) -> AppContainer { &db_key_repository.clone(), &in_memory_key_repository.clone(), )); - let authentication = Arc::new(authentication::Facade::new(&keys_handler)); - let tracker = Arc::new(initialize_tracker( - configuration, - &database, - &whitelist_authorization, - &authentication, - )); + let tracker = Arc::new(initialize_tracker(configuration, &database, &whitelist_authorization)); AppContainer { tracker, + keys_handler, authentication_service, whitelist_authorization, ban_service, stats_event_sender, stats_repository, whitelist_manager, - authentication, } } diff --git a/src/bootstrap/jobs/http_tracker.rs b/src/bootstrap/jobs/http_tracker.rs index a0e11a688..4df669675 100644 --- a/src/bootstrap/jobs/http_tracker.rs +++ b/src/bootstrap/jobs/http_tracker.rs @@ -115,8 +115,8 @@ mod tests { use crate::core::authentication::key::repository::persisted::DatabaseKeyRepository; use crate::core::authentication::service; use crate::core::services::{initialize_database, initialize_tracker, statistics}; + use crate::core::whitelist; use crate::core::whitelist::repository::in_memory::InMemoryWhitelist; - use crate::core::{authentication, whitelist}; use crate::servers::http::Version; use crate::servers::registar::Registar; @@ -140,13 +140,12 @@ mod tests { let db_key_repository = Arc::new(DatabaseKeyRepository::new(&database)); let in_memory_key_repository = Arc::new(InMemoryKeyRepository::default()); let authentication_service = Arc::new(service::AuthenticationService::new(&cfg.core, &in_memory_key_repository)); - let keys_handler = Arc::new(KeysHandler::new( + let _keys_handler = Arc::new(KeysHandler::new( &db_key_repository.clone(), &in_memory_key_repository.clone(), )); - let authentication = Arc::new(authentication::Facade::new(&keys_handler)); - let tracker = Arc::new(initialize_tracker(&cfg, &database, &whitelist_authorization, &authentication)); + let tracker = Arc::new(initialize_tracker(&cfg, &database, &whitelist_authorization)); let version = Version::V1; diff --git a/src/bootstrap/jobs/tracker_apis.rs b/src/bootstrap/jobs/tracker_apis.rs index 39bfc112d..9bb7e6d45 100644 --- a/src/bootstrap/jobs/tracker_apis.rs +++ b/src/bootstrap/jobs/tracker_apis.rs @@ -30,6 +30,7 @@ use torrust_tracker_configuration::{AccessTokens, HttpApi}; use tracing::instrument; use super::make_rust_tls; +use crate::core::authentication::handler::KeysHandler; use crate::core::statistics::event::sender::Sender; use crate::core::statistics::repository::Repository; use crate::core::whitelist::manager::WhiteListManager; @@ -60,10 +61,20 @@ pub struct ApiServerJobStarted(); /// /// #[allow(clippy::too_many_arguments)] -#[instrument(skip(config, tracker, whitelist_manager, ban_service, stats_event_sender, stats_repository, form))] +#[instrument(skip( + config, + tracker, + keys_handler, + whitelist_manager, + ban_service, + stats_event_sender, + stats_repository, + form +))] pub async fn start_job( config: &HttpApi, tracker: Arc, + keys_handler: Arc, whitelist_manager: Arc, ban_service: Arc>, stats_event_sender: Arc>>, @@ -85,6 +96,7 @@ pub async fn start_job( bind_to, tls, tracker.clone(), + keys_handler.clone(), whitelist_manager.clone(), ban_service.clone(), stats_event_sender.clone(), @@ -103,6 +115,7 @@ pub async fn start_job( socket, tls, tracker, + keys_handler, whitelist_manager, ban_service, stats_event_sender, @@ -114,6 +127,7 @@ async fn start_v1( socket: SocketAddr, tls: Option, tracker: Arc, + keys_handler: Arc, whitelist_manager: Arc, ban_service: Arc>, stats_event_sender: Arc>>, @@ -124,6 +138,7 @@ async fn start_v1( let server = ApiServer::new(Launcher::new(socket, tls)) .start( tracker, + keys_handler, whitelist_manager, stats_event_sender, stats_repository, @@ -154,8 +169,8 @@ mod tests { use crate::core::authentication::key::repository::persisted::DatabaseKeyRepository; use crate::core::authentication::service; use crate::core::services::{initialize_database, initialize_tracker, initialize_whitelist_manager, statistics}; + use crate::core::whitelist; use crate::core::whitelist::repository::in_memory::InMemoryWhitelist; - use crate::core::{authentication, whitelist}; use crate::servers::apis::Version; use crate::servers::registar::Registar; use crate::servers::udp::server::banning::BanService; @@ -187,15 +202,15 @@ mod tests { &db_key_repository.clone(), &in_memory_key_repository.clone(), )); - let authentication = Arc::new(authentication::Facade::new(&keys_handler)); - let tracker = Arc::new(initialize_tracker(&cfg, &database, &whitelist_authorization, &authentication)); + let tracker = Arc::new(initialize_tracker(&cfg, &database, &whitelist_authorization)); let version = Version::V1; start_job( config, tracker, + keys_handler, whitelist_manager, ban_service, stats_event_sender, diff --git a/src/container.rs b/src/container.rs index 0ea8e3c03..14c4b5d7b 100644 --- a/src/container.rs +++ b/src/container.rs @@ -2,20 +2,21 @@ use std::sync::Arc; use tokio::sync::RwLock; +use crate::core::authentication::handler::KeysHandler; use crate::core::authentication::service::AuthenticationService; use crate::core::statistics::event::sender::Sender; use crate::core::statistics::repository::Repository; use crate::core::whitelist::manager::WhiteListManager; -use crate::core::{authentication, whitelist, Tracker}; +use crate::core::{whitelist, Tracker}; use crate::servers::udp::server::banning::BanService; pub struct AppContainer { pub tracker: Arc, + pub keys_handler: Arc, pub authentication_service: Arc, pub whitelist_authorization: Arc, pub ban_service: Arc>, pub stats_event_sender: Arc>>, pub stats_repository: Arc, pub whitelist_manager: Arc, - pub authentication: Arc, } diff --git a/src/core/authentication/handler.rs b/src/core/authentication/handler.rs index 3ada2b110..17033aefc 100644 --- a/src/core/authentication/handler.rs +++ b/src/core/authentication/handler.rs @@ -303,7 +303,8 @@ mod tests { use torrust_tracker_clock::clock::Time; use crate::core::authentication::handler::tests::the_keys_handler_when_tracker_is_configured_as_private::instantiate_keys_handler; - use crate::core::authentication::{AddKeyRequest, Key}; + use crate::core::authentication::handler::AddKeyRequest; + use crate::core::authentication::Key; use crate::CurrentClock; #[tokio::test] @@ -344,7 +345,8 @@ mod tests { mod pre_generated_keys { use crate::core::authentication::handler::tests::the_keys_handler_when_tracker_is_configured_as_private::instantiate_keys_handler; - use crate::core::authentication::{AddKeyRequest, Key}; + use crate::core::authentication::handler::AddKeyRequest; + use crate::core::authentication::Key; #[tokio::test] async fn it_should_add_a_pre_generated_key() { diff --git a/src/core/authentication/mod.rs b/src/core/authentication/mod.rs index ac5db55d1..799263752 100644 --- a/src/core/authentication/mod.rs +++ b/src/core/authentication/mod.rs @@ -1,11 +1,3 @@ -use std::sync::Arc; -use std::time::Duration; - -use handler::AddKeyRequest; -use torrust_tracker_primitives::DurationSinceUnixEpoch; - -use super::databases::{self}; -use super::error::PeerKeyError; use crate::CurrentClock; pub mod handler; @@ -16,113 +8,11 @@ pub type PeerKey = key::PeerKey; pub type Key = key::Key; pub type Error = key::Error; -pub struct Facade { - /// The keys handler. - keys_handler: Arc, -} - -impl Facade { - #[must_use] - pub fn new(keys_handler: &Arc) -> Self { - Self { - keys_handler: keys_handler.clone(), - } - } - - /// Adds new peer keys to the tracker. - /// - /// Keys can be pre-generated or randomly created. They can also be permanent or expire. - /// - /// # Errors - /// - /// Will return an error if: - /// - /// - The key duration overflows the duration type maximum value. - /// - The provided pre-generated key is invalid. - /// - The key could not been persisted due to database issues. - pub async fn add_peer_key(&self, add_key_req: AddKeyRequest) -> Result { - self.keys_handler.add_peer_key(add_key_req).await - } - - /// It generates a new permanent authentication key. - /// - /// Authentication keys are used by HTTP trackers. - /// - /// # Errors - /// - /// Will return a `database::Error` if unable to add the `auth_key` to the database. - pub async fn generate_permanent_auth_key(&self) -> Result { - self.keys_handler.generate_permanent_auth_key().await - } - - /// It generates a new expiring authentication key. - /// - /// Authentication keys are used by HTTP trackers. - /// - /// # Errors - /// - /// Will return a `database::Error` if unable to add the `auth_key` to the database. - /// - /// # Arguments - /// - /// * `lifetime` - The duration in seconds for the new key. The key will be - /// no longer valid after `lifetime` seconds. - pub async fn generate_auth_key(&self, lifetime: Option) -> Result { - self.keys_handler.generate_auth_key(lifetime).await - } - - /// It adds a pre-generated authentication key. - /// - /// Authentication keys are used by HTTP trackers. - /// - /// # Errors - /// - /// Will return a `database::Error` if unable to add the `auth_key` to the - /// database. For example, if the key already exist. - /// - /// # Arguments - /// - /// * `key` - The pre-generated key. - /// * `lifetime` - The duration in seconds for the new key. The key will be - /// no longer valid after `lifetime` seconds. - pub async fn add_auth_key( - &self, - key: Key, - valid_until: Option, - ) -> Result { - self.keys_handler.add_auth_key(key, valid_until).await - } - - /// It removes an authentication key. - /// - /// # Errors - /// - /// Will return a `database::Error` if unable to remove the `key` to the database. - pub async fn remove_auth_key(&self, key: &Key) -> Result<(), databases::error::Error> { - self.keys_handler.remove_auth_key(key).await - } - - /// It removes an authentication key from memory. - pub async fn remove_in_memory_auth_key(&self, key: &Key) { - self.keys_handler.remove_in_memory_auth_key(key).await; - } - - /// The `Tracker` stores the authentication keys in memory and in the - /// database. In case you need to restart the `Tracker` you can load the - /// keys from the database into memory with this function. Keys are - /// automatically stored in the database when they are generated. - /// - /// # Errors - /// - /// Will return a `database::Error` if unable to `load_keys` from the database. - pub async fn load_keys_from_database(&self) -> Result<(), databases::error::Error> { - self.keys_handler.load_keys_from_database().await - } -} - #[cfg(test)] mod tests { + // Integration tests for authentication. + mod the_tracker_configured_as_private { use std::sync::Arc; @@ -135,18 +25,18 @@ mod tests { use crate::core::authentication::handler::KeysHandler; use crate::core::authentication::key::repository::in_memory::InMemoryKeyRepository; use crate::core::authentication::key::repository::persisted::DatabaseKeyRepository; + use crate::core::authentication::service; use crate::core::authentication::service::AuthenticationService; - use crate::core::authentication::{self, service}; use crate::core::services::initialize_database; - fn instantiate_keys_manager_and_authentication() -> (authentication::Facade, Arc) { + fn instantiate_keys_manager_and_authentication() -> (Arc, Arc) { let config = configuration::ephemeral_private(); instantiate_keys_manager_and_authentication_with_configuration(&config) } fn instantiate_keys_manager_and_authentication_with_checking_keys_expiration_disabled( - ) -> (authentication::Facade, Arc) { + ) -> (Arc, Arc) { let mut config = configuration::ephemeral_private(); config.core.private_mode = Some(PrivateMode { @@ -158,7 +48,7 @@ mod tests { fn instantiate_keys_manager_and_authentication_with_configuration( config: &Configuration, - ) -> (authentication::Facade, Arc) { + ) -> (Arc, Arc) { let database = initialize_database(config); let db_key_repository = Arc::new(DatabaseKeyRepository::new(&database)); @@ -170,9 +60,7 @@ mod tests { &in_memory_key_repository.clone(), )); - let facade = authentication::Facade::new(&keys_handler); - - (facade, authentication_service) + (keys_handler, authentication_service) } #[tokio::test] @@ -246,11 +134,12 @@ mod tests { mod pre_generated_keys { + use crate::core::authentication::handler::AddKeyRequest; use crate::core::authentication::tests::the_tracker_configured_as_private::{ instantiate_keys_manager_and_authentication, instantiate_keys_manager_and_authentication_with_checking_keys_expiration_disabled, }; - use crate::core::authentication::{AddKeyRequest, Key}; + use crate::core::authentication::Key; #[tokio::test] async fn it_should_authenticate_a_peer_with_the_key() { @@ -305,8 +194,9 @@ mod tests { } mod pre_generated_keys { + use crate::core::authentication::handler::AddKeyRequest; use crate::core::authentication::tests::the_tracker_configured_as_private::instantiate_keys_manager_and_authentication; - use crate::core::authentication::{AddKeyRequest, Key}; + use crate::core::authentication::Key; #[tokio::test] async fn it_should_authenticate_a_peer_with_the_key() { diff --git a/src/core/mod.rs b/src/core/mod.rs index 2b13bc0c0..26ef69bfa 100644 --- a/src/core/mod.rs +++ b/src/core/mod.rs @@ -490,9 +490,6 @@ pub struct Tracker { /// The in-memory torrents repository. torrents: Arc, - - /// The service to authenticate peers. - pub authentication: Arc, } /// How many peers the peer announcing wants in the announce response. @@ -547,14 +544,12 @@ impl Tracker { config: &Core, database: &Arc>, whitelist_authorization: &Arc, - authentication: &Arc, ) -> Result { Ok(Tracker { config: config.clone(), database: database.clone(), whitelist_authorization: whitelist_authorization.clone(), torrents: Arc::default(), - authentication: authentication.clone(), }) } @@ -816,21 +811,21 @@ mod tests { fn public_tracker() -> Tracker { let config = configuration::ephemeral_public(); - let (database, _in_memory_whitelist, whitelist_authorization, authentication, _authentication_service) = + let (database, _in_memory_whitelist, whitelist_authorization, _authentication_service) = initialize_tracker_dependencies(&config); - initialize_tracker(&config, &database, &whitelist_authorization, &authentication) + initialize_tracker(&config, &database, &whitelist_authorization) } fn whitelisted_tracker() -> (Tracker, Arc, Arc) { let config = configuration::ephemeral_listed(); - let (database, in_memory_whitelist, whitelist_authorization, authentication, _authentication_service) = + let (database, in_memory_whitelist, whitelist_authorization, _authentication_service) = initialize_tracker_dependencies(&config); let whitelist_manager = initialize_whitelist_manager(database.clone(), in_memory_whitelist.clone()); - let tracker = initialize_tracker(&config, &database, &whitelist_authorization, &authentication); + let tracker = initialize_tracker(&config, &database, &whitelist_authorization); (tracker, whitelist_authorization, whitelist_manager) } @@ -839,10 +834,10 @@ mod tests { let mut config = configuration::ephemeral_listed(); config.core.tracker_policy.persistent_torrent_completed_stat = true; - let (database, _in_memory_whitelist, whitelist_authorization, authentication, _authentication_service) = + let (database, _in_memory_whitelist, whitelist_authorization, _authentication_service) = initialize_tracker_dependencies(&config); - initialize_tracker(&config, &database, &whitelist_authorization, &authentication) + initialize_tracker(&config, &database, &whitelist_authorization) } fn sample_info_hash() -> InfoHash { diff --git a/src/core/services/mod.rs b/src/core/services/mod.rs index b1d0d441d..611ea24d2 100644 --- a/src/core/services/mod.rs +++ b/src/core/services/mod.rs @@ -14,10 +14,10 @@ use torrust_tracker_configuration::v2_0_0::database; use torrust_tracker_configuration::Configuration; use super::databases::{self, Database}; +use super::whitelist; use super::whitelist::manager::WhiteListManager; use super::whitelist::repository::in_memory::InMemoryWhitelist; use super::whitelist::repository::persisted::DatabaseWhitelist; -use super::{authentication, whitelist}; use crate::core::Tracker; /// It returns a new tracker building its dependencies. @@ -30,9 +30,8 @@ pub fn initialize_tracker( config: &Configuration, database: &Arc>, whitelist_authorization: &Arc, - authentication: &Arc, ) -> Tracker { - match Tracker::new(&Arc::new(config).core, database, whitelist_authorization, authentication) { + match Tracker::new(&Arc::new(config).core, database, whitelist_authorization) { Ok(tracker) => tracker, Err(error) => { panic!("{}", error) diff --git a/src/core/services/statistics/mod.rs b/src/core/services/statistics/mod.rs index a30588472..cc59bcf12 100644 --- a/src/core/services/statistics/mod.rs +++ b/src/core/services/statistics/mod.rs @@ -132,17 +132,12 @@ mod tests { async fn the_statistics_service_should_return_the_tracker_metrics() { let config = tracker_configuration(); - let (database, _in_memory_whitelist, whitelist_authorization, authentication, _authentication_service) = + let (database, _in_memory_whitelist, whitelist_authorization, _authentication_service) = initialize_tracker_dependencies(&config); let (_stats_event_sender, stats_repository) = statistics::setup::factory(config.core.tracker_usage_statistics); let stats_repository = Arc::new(stats_repository); - let tracker = Arc::new(initialize_tracker( - &config, - &database, - &whitelist_authorization, - &authentication, - )); + let tracker = Arc::new(initialize_tracker(&config, &database, &whitelist_authorization)); let ban_service = Arc::new(RwLock::new(BanService::new(MAX_CONNECTION_ID_ERRORS_PER_IP))); diff --git a/src/core/services/torrent.rs b/src/core/services/torrent.rs index 462f10101..9b7254098 100644 --- a/src/core/services/torrent.rs +++ b/src/core/services/torrent.rs @@ -142,10 +142,10 @@ mod tests { async fn should_return_none_if_the_tracker_does_not_have_the_torrent() { let config = tracker_configuration(); - let (database, _in_memory_whitelist, whitelist_authorization, authentication, _authentication_service) = + let (database, _in_memory_whitelist, whitelist_authorization, _authentication_service) = initialize_tracker_dependencies(&config); - let tracker = initialize_tracker(&config, &database, &whitelist_authorization, &authentication); + let tracker = initialize_tracker(&config, &database, &whitelist_authorization); let tracker = Arc::new(tracker); @@ -162,15 +162,10 @@ mod tests { async fn should_return_the_torrent_info_if_the_tracker_has_the_torrent() { let config = tracker_configuration(); - let (database, _in_memory_whitelist, whitelist_authorization, authentication, _authentication_service) = + let (database, _in_memory_whitelist, whitelist_authorization, _authentication_service) = initialize_tracker_dependencies(&config); - let tracker = Arc::new(initialize_tracker( - &config, - &database, - &whitelist_authorization, - &authentication, - )); + let tracker = Arc::new(initialize_tracker(&config, &database, &whitelist_authorization)); let hash = "9e0217d0fa71c87332cd8bf9dbeabcb2c2cf3c4d".to_owned(); let info_hash = InfoHash::from_str(&hash).unwrap(); @@ -213,15 +208,10 @@ mod tests { async fn should_return_an_empty_result_if_the_tracker_does_not_have_any_torrent() { let config = tracker_configuration(); - let (database, _in_memory_whitelist, whitelist_authorization, authentication, _authentication_service) = + let (database, _in_memory_whitelist, whitelist_authorization, _authentication_service) = initialize_tracker_dependencies(&config); - let tracker = Arc::new(initialize_tracker( - &config, - &database, - &whitelist_authorization, - &authentication, - )); + let tracker = Arc::new(initialize_tracker(&config, &database, &whitelist_authorization)); let torrents = get_torrents_page(tracker.clone(), Some(&Pagination::default())).await; @@ -232,15 +222,10 @@ mod tests { async fn should_return_a_summarized_info_for_all_torrents() { let config = tracker_configuration(); - let (database, _in_memory_whitelist, whitelist_authorization, authentication, _authentication_service) = + let (database, _in_memory_whitelist, whitelist_authorization, _authentication_service) = initialize_tracker_dependencies(&config); - let tracker = Arc::new(initialize_tracker( - &config, - &database, - &whitelist_authorization, - &authentication, - )); + let tracker = Arc::new(initialize_tracker(&config, &database, &whitelist_authorization)); let hash = "9e0217d0fa71c87332cd8bf9dbeabcb2c2cf3c4d".to_owned(); let info_hash = InfoHash::from_str(&hash).unwrap(); @@ -264,15 +249,10 @@ mod tests { async fn should_allow_limiting_the_number_of_torrents_in_the_result() { let config = tracker_configuration(); - let (database, _in_memory_whitelist, whitelist_authorization, authentication, _authentication_service) = + let (database, _in_memory_whitelist, whitelist_authorization, _authentication_service) = initialize_tracker_dependencies(&config); - let tracker = Arc::new(initialize_tracker( - &config, - &database, - &whitelist_authorization, - &authentication, - )); + let tracker = Arc::new(initialize_tracker(&config, &database, &whitelist_authorization)); let hash1 = "9e0217d0fa71c87332cd8bf9dbeabcb2c2cf3c4d".to_owned(); let info_hash1 = InfoHash::from_str(&hash1).unwrap(); @@ -294,15 +274,10 @@ mod tests { async fn should_allow_using_pagination_in_the_result() { let config = tracker_configuration(); - let (database, _in_memory_whitelist, whitelist_authorization, authentication, _authentication_service) = + let (database, _in_memory_whitelist, whitelist_authorization, _authentication_service) = initialize_tracker_dependencies(&config); - let tracker = Arc::new(initialize_tracker( - &config, - &database, - &whitelist_authorization, - &authentication, - )); + let tracker = Arc::new(initialize_tracker(&config, &database, &whitelist_authorization)); let hash1 = "9e0217d0fa71c87332cd8bf9dbeabcb2c2cf3c4d".to_owned(); let info_hash1 = InfoHash::from_str(&hash1).unwrap(); @@ -333,15 +308,10 @@ mod tests { async fn should_return_torrents_ordered_by_info_hash() { let config = tracker_configuration(); - let (database, _in_memory_whitelist, whitelist_authorization, authentication, _authentication_service) = + let (database, _in_memory_whitelist, whitelist_authorization, _authentication_service) = initialize_tracker_dependencies(&config); - let tracker = Arc::new(initialize_tracker( - &config, - &database, - &whitelist_authorization, - &authentication, - )); + let tracker = Arc::new(initialize_tracker(&config, &database, &whitelist_authorization)); let hash1 = "9e0217d0fa71c87332cd8bf9dbeabcb2c2cf3c4d".to_owned(); let info_hash1 = InfoHash::from_str(&hash1).unwrap(); diff --git a/src/servers/apis/routes.rs b/src/servers/apis/routes.rs index a5c33d5ee..4a005393d 100644 --- a/src/servers/apis/routes.rs +++ b/src/servers/apis/routes.rs @@ -30,6 +30,7 @@ use tracing::{instrument, Level, Span}; use super::v1; use super::v1::context::health_check::handlers::health_check_handler; use super::v1::middlewares::auth::State; +use crate::core::authentication::handler::KeysHandler; use crate::core::statistics::event::sender::Sender; use crate::core::statistics::repository::Repository; use crate::core::whitelist::manager::WhiteListManager; @@ -39,9 +40,11 @@ use crate::servers::logging::Latency; use crate::servers::udp::server::banning::BanService; /// Add all API routes to the router. +#[allow(clippy::too_many_arguments)] #[allow(clippy::needless_pass_by_value)] #[instrument(skip( tracker, + keys_handler, whitelist_manager, ban_service, stats_event_sender, @@ -50,6 +53,7 @@ use crate::servers::udp::server::banning::BanService; ))] pub fn router( tracker: Arc, + keys_handler: Arc, whitelist_manager: Arc, ban_service: Arc>, stats_event_sender: Arc>>, @@ -65,6 +69,7 @@ pub fn router( api_url_prefix, router, tracker.clone(), + &keys_handler.clone(), &whitelist_manager.clone(), ban_service.clone(), stats_event_sender.clone(), diff --git a/src/servers/apis/server.rs b/src/servers/apis/server.rs index c9fc2f185..f219ca023 100644 --- a/src/servers/apis/server.rs +++ b/src/servers/apis/server.rs @@ -39,6 +39,7 @@ use tracing::{instrument, Level}; use super::routes::router; use crate::bootstrap::jobs::Started; +use crate::core::authentication::handler::KeysHandler; use crate::core::statistics::repository::Repository; use crate::core::whitelist::manager::WhiteListManager; use crate::core::{statistics, Tracker}; @@ -127,10 +128,11 @@ impl ApiServer { /// /// It would panic if the bound socket address cannot be sent back to this starter. #[allow(clippy::too_many_arguments)] - #[instrument(skip(self, tracker, whitelist_manager, stats_event_sender, ban_service, stats_repository, form, access_tokens), err, ret(Display, level = Level::INFO))] + #[instrument(skip(self, tracker, keys_handler, whitelist_manager, stats_event_sender, ban_service, stats_repository, form, access_tokens), err, ret(Display, level = Level::INFO))] pub async fn start( self, tracker: Arc, + keys_handler: Arc, whitelist_manager: Arc, stats_event_sender: Arc>>, stats_repository: Arc, @@ -149,6 +151,7 @@ impl ApiServer { let _task = launcher .start( tracker, + keys_handler, whitelist_manager, ban_service, stats_event_sender, @@ -259,6 +262,7 @@ impl Launcher { #[instrument(skip( self, tracker, + keys_handler, whitelist_manager, ban_service, stats_event_sender, @@ -270,6 +274,7 @@ impl Launcher { pub fn start( &self, tracker: Arc, + keys_handler: Arc, whitelist_manager: Arc, ban_service: Arc>, stats_event_sender: Arc>>, @@ -283,6 +288,7 @@ impl Launcher { let router = router( tracker, + keys_handler, whitelist_manager, ban_service, stats_event_sender, @@ -347,8 +353,8 @@ mod tests { use crate::core::authentication::key::repository::persisted::DatabaseKeyRepository; use crate::core::authentication::service; use crate::core::services::{initialize_database, initialize_tracker, initialize_whitelist_manager, statistics}; + use crate::core::whitelist; use crate::core::whitelist::repository::in_memory::InMemoryWhitelist; - use crate::core::{authentication, whitelist}; use crate::servers::apis::server::{ApiServer, Launcher}; use crate::servers::registar::Registar; use crate::servers::udp::server::banning::BanService; @@ -380,9 +386,8 @@ mod tests { &db_key_repository.clone(), &in_memory_key_repository.clone(), )); - let authentication = Arc::new(authentication::Facade::new(&keys_handler)); - let tracker = Arc::new(initialize_tracker(&cfg, &database, &whitelist_authorization, &authentication)); + let tracker = Arc::new(initialize_tracker(&cfg, &database, &whitelist_authorization)); let bind_to = config.bind_address; @@ -399,6 +404,7 @@ mod tests { let started = stopped .start( tracker, + keys_handler, whitelist_manager, stats_event_sender, stats_repository, diff --git a/src/servers/apis/v1/context/auth_key/handlers.rs b/src/servers/apis/v1/context/auth_key/handlers.rs index f0c131bbf..045a9d211 100644 --- a/src/servers/apis/v1/context/auth_key/handlers.rs +++ b/src/servers/apis/v1/context/auth_key/handlers.rs @@ -12,9 +12,8 @@ use super::responses::{ auth_key_response, failed_to_delete_key_response, failed_to_generate_key_response, failed_to_reload_keys_response, invalid_auth_key_duration_response, invalid_auth_key_response, }; -use crate::core::authentication::handler::AddKeyRequest; +use crate::core::authentication::handler::{AddKeyRequest, KeysHandler}; use crate::core::authentication::Key; -use crate::core::Tracker; use crate::servers::apis::v1::context::auth_key::resources::AuthKey; use crate::servers::apis::v1::responses::{invalid_auth_key_param_response, ok_response}; @@ -32,11 +31,10 @@ use crate::servers::apis::v1::responses::{invalid_auth_key_param_response, ok_re /// Refer to the [API endpoint documentation](crate::servers::apis::v1::context::auth_key#generate-a-new-authentication-key) /// for more information about this endpoint. pub async fn add_auth_key_handler( - State(tracker): State>, + State(keys_handler): State>, extract::Json(add_key_form): extract::Json, ) -> Response { - match tracker - .authentication + match keys_handler .add_peer_key(AddKeyRequest { opt_key: add_key_form.opt_key.clone(), opt_seconds_valid: add_key_form.opt_seconds_valid, @@ -67,13 +65,12 @@ pub async fn add_auth_key_handler( /// for more information about this endpoint. /// /// This endpoint has been deprecated. Use [`add_auth_key_handler`]. -pub async fn generate_auth_key_handler(State(tracker): State>, Path(seconds_valid_or_key): Path) -> Response { +pub async fn generate_auth_key_handler( + State(keys_handler): State>, + Path(seconds_valid_or_key): Path, +) -> Response { let seconds_valid = seconds_valid_or_key; - match tracker - .authentication - .generate_auth_key(Some(Duration::from_secs(seconds_valid))) - .await - { + match keys_handler.generate_auth_key(Some(Duration::from_secs(seconds_valid))).await { Ok(auth_key) => auth_key_response(&AuthKey::from(auth_key)), Err(e) => failed_to_generate_key_response(e), } @@ -109,12 +106,12 @@ pub struct KeyParam(String); /// Refer to the [API endpoint documentation](crate::servers::apis::v1::context::auth_key#delete-an-authentication-key) /// for more information about this endpoint. pub async fn delete_auth_key_handler( - State(tracker): State>, + State(keys_handler): State>, Path(seconds_valid_or_key): Path, ) -> Response { match Key::from_str(&seconds_valid_or_key.0) { Err(_) => invalid_auth_key_param_response(&seconds_valid_or_key.0), - Ok(key) => match tracker.authentication.remove_auth_key(&key).await { + Ok(key) => match keys_handler.remove_auth_key(&key).await { Ok(()) => ok_response(), Err(e) => failed_to_delete_key_response(e), }, @@ -133,8 +130,8 @@ pub async fn delete_auth_key_handler( /// /// Refer to the [API endpoint documentation](crate::servers::apis::v1::context::auth_key#reload-authentication-keys) /// for more information about this endpoint. -pub async fn reload_keys_handler(State(tracker): State>) -> Response { - match tracker.authentication.load_keys_from_database().await { +pub async fn reload_keys_handler(State(keys_handler): State>) -> Response { + match keys_handler.load_keys_from_database().await { Ok(()) => ok_response(), Err(e) => failed_to_reload_keys_response(e), } diff --git a/src/servers/apis/v1/context/auth_key/routes.rs b/src/servers/apis/v1/context/auth_key/routes.rs index ac11281ee..45aeb02ec 100644 --- a/src/servers/apis/v1/context/auth_key/routes.rs +++ b/src/servers/apis/v1/context/auth_key/routes.rs @@ -12,10 +12,10 @@ use axum::routing::{get, post}; use axum::Router; use super::handlers::{add_auth_key_handler, delete_auth_key_handler, generate_auth_key_handler, reload_keys_handler}; -use crate::core::Tracker; +use crate::core::authentication::handler::KeysHandler; /// It adds the routes to the router for the [`auth_key`](crate::servers::apis::v1::context::auth_key) API context. -pub fn add(prefix: &str, router: Router, tracker: Arc) -> Router { +pub fn add(prefix: &str, router: Router, keys_handler: Arc) -> Router { // Keys router .route( @@ -29,14 +29,14 @@ pub fn add(prefix: &str, router: Router, tracker: Arc) -> Router { // Use POST /keys &format!("{prefix}/key/{{seconds_valid_or_key}}"), post(generate_auth_key_handler) - .with_state(tracker.clone()) + .with_state(keys_handler.clone()) .delete(delete_auth_key_handler) - .with_state(tracker.clone()), + .with_state(keys_handler.clone()), ) // Keys command .route( &format!("{prefix}/keys/reload"), - get(reload_keys_handler).with_state(tracker.clone()), + get(reload_keys_handler).with_state(keys_handler.clone()), ) - .route(&format!("{prefix}/keys"), post(add_auth_key_handler).with_state(tracker)) + .route(&format!("{prefix}/keys"), post(add_auth_key_handler).with_state(keys_handler)) } diff --git a/src/servers/apis/v1/routes.rs b/src/servers/apis/v1/routes.rs index 1954af2e4..c26ce4f3d 100644 --- a/src/servers/apis/v1/routes.rs +++ b/src/servers/apis/v1/routes.rs @@ -5,6 +5,7 @@ use axum::Router; use tokio::sync::RwLock; use super::context::{auth_key, stats, torrent, whitelist}; +use crate::core::authentication::handler::KeysHandler; use crate::core::statistics::event::sender::Sender; use crate::core::statistics::repository::Repository; use crate::core::whitelist::manager::WhiteListManager; @@ -12,10 +13,12 @@ use crate::core::Tracker; use crate::servers::udp::server::banning::BanService; /// Add the routes for the v1 API. +#[allow(clippy::too_many_arguments)] pub fn add( prefix: &str, router: Router, tracker: Arc, + keys_handler: &Arc, whitelist_manager: &Arc, ban_service: Arc>, stats_event_sender: Arc>>, @@ -23,7 +26,7 @@ pub fn add( ) -> Router { let v1_prefix = format!("{prefix}/v1"); - let router = auth_key::routes::add(&v1_prefix, router, tracker.clone()); + let router = auth_key::routes::add(&v1_prefix, router, keys_handler.clone()); let router = stats::routes::add( &v1_prefix, router, diff --git a/src/servers/http/server.rs b/src/servers/http/server.rs index a9b618c84..5f62a2013 100644 --- a/src/servers/http/server.rs +++ b/src/servers/http/server.rs @@ -275,8 +275,8 @@ mod tests { use crate::core::authentication::key::repository::persisted::DatabaseKeyRepository; use crate::core::authentication::service; use crate::core::services::{initialize_database, initialize_tracker, initialize_whitelist_manager, statistics}; + use crate::core::whitelist; use crate::core::whitelist::repository::in_memory::InMemoryWhitelist; - use crate::core::{authentication, whitelist}; use crate::servers::http::server::{HttpServer, Launcher}; use crate::servers::registar::Registar; @@ -299,13 +299,12 @@ mod tests { let db_key_repository = Arc::new(DatabaseKeyRepository::new(&database)); let in_memory_key_repository = Arc::new(InMemoryKeyRepository::default()); let authentication_service = Arc::new(service::AuthenticationService::new(&cfg.core, &in_memory_key_repository)); - let keys_handler = Arc::new(KeysHandler::new( + let _keys_handler = Arc::new(KeysHandler::new( &db_key_repository.clone(), &in_memory_key_repository.clone(), )); - let authentication = Arc::new(authentication::Facade::new(&keys_handler)); - let tracker = Arc::new(initialize_tracker(&cfg, &database, &whitelist_authorization, &authentication)); + let tracker = Arc::new(initialize_tracker(&cfg, &database, &whitelist_authorization)); let http_trackers = cfg.http_trackers.clone().expect("missing HTTP trackers configuration"); let config = &http_trackers[0]; diff --git a/src/servers/http/v1/handlers/announce.rs b/src/servers/http/v1/handlers/announce.rs index fbadde967..c42981d4c 100644 --- a/src/servers/http/v1/handlers/announce.rs +++ b/src/servers/http/v1/handlers/announce.rs @@ -274,17 +274,12 @@ mod tests { /// Initialize tracker's dependencies and tracker. fn initialize_tracker_and_deps(config: &Configuration) -> TrackerAndDeps { - let (database, _in_memory_whitelist, whitelist_authorization, authentication, authentication_service) = + let (database, _in_memory_whitelist, whitelist_authorization, authentication_service) = initialize_tracker_dependencies(config); let (stats_event_sender, _stats_repository) = statistics::setup::factory(config.core.tracker_usage_statistics); let stats_event_sender = Arc::new(stats_event_sender); - let tracker = Arc::new(initialize_tracker( - config, - &database, - &whitelist_authorization, - &authentication, - )); + let tracker = Arc::new(initialize_tracker(config, &database, &whitelist_authorization)); (tracker, stats_event_sender, whitelist_authorization, authentication_service) } diff --git a/src/servers/http/v1/handlers/scrape.rs b/src/servers/http/v1/handlers/scrape.rs index f7be42bff..de4610a61 100644 --- a/src/servers/http/v1/handlers/scrape.rs +++ b/src/servers/http/v1/handlers/scrape.rs @@ -151,13 +151,13 @@ mod tests { ) { let config = configuration::ephemeral_private(); - let (database, _in_memory_whitelist, whitelist_authorization, authentication, authentication_service) = + let (database, _in_memory_whitelist, whitelist_authorization, authentication_service) = initialize_tracker_dependencies(&config); let (stats_event_sender, _stats_repository) = statistics::setup::factory(config.core.tracker_usage_statistics); ( - initialize_tracker(&config, &database, &whitelist_authorization, &authentication), + initialize_tracker(&config, &database, &whitelist_authorization), stats_event_sender, authentication_service, ) @@ -170,13 +170,13 @@ mod tests { ) { let config = configuration::ephemeral_listed(); - let (database, _in_memory_whitelist, whitelist_authorization, authentication, authentication_service) = + let (database, _in_memory_whitelist, whitelist_authorization, authentication_service) = initialize_tracker_dependencies(&config); let (stats_event_sender, _stats_repository) = statistics::setup::factory(config.core.tracker_usage_statistics); ( - initialize_tracker(&config, &database, &whitelist_authorization, &authentication), + initialize_tracker(&config, &database, &whitelist_authorization), stats_event_sender, authentication_service, ) @@ -189,13 +189,13 @@ mod tests { ) { let config = configuration::ephemeral_with_reverse_proxy(); - let (database, _in_memory_whitelist, whitelist_authorization, authentication, authentication_service) = + let (database, _in_memory_whitelist, whitelist_authorization, authentication_service) = initialize_tracker_dependencies(&config); let (stats_event_sender, _stats_repository) = statistics::setup::factory(config.core.tracker_usage_statistics); ( - initialize_tracker(&config, &database, &whitelist_authorization, &authentication), + initialize_tracker(&config, &database, &whitelist_authorization), stats_event_sender, authentication_service, ) @@ -208,13 +208,13 @@ mod tests { ) { let config = configuration::ephemeral_without_reverse_proxy(); - let (database, _in_memory_whitelist, whitelist_authorization, authentication, authentication_service) = + let (database, _in_memory_whitelist, whitelist_authorization, authentication_service) = initialize_tracker_dependencies(&config); let (stats_event_sender, _stats_repository) = statistics::setup::factory(config.core.tracker_usage_statistics); ( - initialize_tracker(&config, &database, &whitelist_authorization, &authentication), + initialize_tracker(&config, &database, &whitelist_authorization), stats_event_sender, authentication_service, ) diff --git a/src/servers/http/v1/services/announce.rs b/src/servers/http/v1/services/announce.rs index 2f2876f5b..018348d7e 100644 --- a/src/servers/http/v1/services/announce.rs +++ b/src/servers/http/v1/services/announce.rs @@ -73,12 +73,12 @@ mod tests { fn public_tracker() -> (Tracker, Arc>>) { let config = configuration::ephemeral_public(); - let (database, _in_memory_whitelist, whitelist_authorization, authentication, _authentication_service) = + let (database, _in_memory_whitelist, whitelist_authorization, _authentication_service) = initialize_tracker_dependencies(&config); let (stats_event_sender, _stats_repository) = statistics::setup::factory(config.core.tracker_usage_statistics); let stats_event_sender = Arc::new(stats_event_sender); - let tracker = initialize_tracker(&config, &database, &whitelist_authorization, &authentication); + let tracker = initialize_tracker(&config, &database, &whitelist_authorization); (tracker, stats_event_sender) } @@ -132,10 +132,10 @@ mod tests { fn test_tracker_factory() -> Tracker { let config = configuration::ephemeral(); - let (database, _in_memory_whitelist, whitelist_authorization, authentication, _authentication_service) = + let (database, _in_memory_whitelist, whitelist_authorization, _authentication_service) = initialize_tracker_dependencies(&config); - Tracker::new(&config.core, &database, &whitelist_authorization, &authentication).unwrap() + Tracker::new(&config.core, &database, &whitelist_authorization).unwrap() } #[tokio::test] diff --git a/src/servers/http/v1/services/scrape.rs b/src/servers/http/v1/services/scrape.rs index 35b264363..9ad741234 100644 --- a/src/servers/http/v1/services/scrape.rs +++ b/src/servers/http/v1/services/scrape.rs @@ -87,10 +87,10 @@ mod tests { fn public_tracker() -> Tracker { let config = configuration::ephemeral_public(); - let (database, _in_memory_whitelist, whitelist_authorization, authentication, _authentication_service) = + let (database, _in_memory_whitelist, whitelist_authorization, _authentication_service) = initialize_tracker_dependencies(&config); - initialize_tracker(&config, &database, &whitelist_authorization, &authentication) + initialize_tracker(&config, &database, &whitelist_authorization) } fn sample_info_hashes() -> Vec { @@ -116,10 +116,10 @@ mod tests { fn test_tracker_factory() -> Tracker { let config = configuration::ephemeral(); - let (database, _in_memory_whitelist, whitelist_authorization, authentication, _authentication_service) = + let (database, _in_memory_whitelist, whitelist_authorization, _authentication_service) = initialize_tracker_dependencies(&config); - Tracker::new(&config.core, &database, &whitelist_authorization, &authentication).unwrap() + Tracker::new(&config.core, &database, &whitelist_authorization).unwrap() } mod with_real_data { diff --git a/src/servers/udp/handlers.rs b/src/servers/udp/handlers.rs index f0f7719e2..feeca4e40 100644 --- a/src/servers/udp/handlers.rs +++ b/src/servers/udp/handlers.rs @@ -516,18 +516,13 @@ mod tests { } fn initialize_tracker_and_deps(config: &Configuration) -> TrackerAndDeps { - let (database, in_memory_whitelist, whitelist_authorization, authentication, _authentication_service) = + let (database, in_memory_whitelist, whitelist_authorization, _authentication_service) = initialize_tracker_dependencies(config); let (stats_event_sender, _stats_repository) = statistics::setup::factory(config.core.tracker_usage_statistics); let stats_event_sender = Arc::new(stats_event_sender); let whitelist_manager = initialize_whitelist_manager(database.clone(), in_memory_whitelist.clone()); - let tracker = Arc::new(initialize_tracker( - config, - &database, - &whitelist_authorization, - &authentication, - )); + let tracker = Arc::new(initialize_tracker(config, &database, &whitelist_authorization)); ( tracker, @@ -635,10 +630,10 @@ mod tests { fn test_tracker_factory() -> (Arc, Arc) { let config = tracker_configuration(); - let (database, _in_memory_whitelist, whitelist_authorization, authentication, _authentication_service) = + let (database, _in_memory_whitelist, whitelist_authorization, _authentication_service) = initialize_tracker_dependencies(&config); - let tracker = Arc::new(Tracker::new(&config.core, &database, &whitelist_authorization, &authentication).unwrap()); + let tracker = Arc::new(Tracker::new(&config.core, &database, &whitelist_authorization).unwrap()); (tracker, whitelist_authorization) } @@ -1383,7 +1378,7 @@ mod tests { async fn the_peer_ip_should_be_changed_to_the_external_ip_in_the_tracker_configuration() { let config = Arc::new(TrackerConfigurationBuilder::default().with_external_ip("::126.0.0.1").into()); - let (database, _in_memory_whitelist, whitelist_authorization, authentication, _authentication_service) = + let (database, _in_memory_whitelist, whitelist_authorization, _authentication_service) = initialize_tracker_dependencies(&config); let mut stats_event_sender_mock = statistics::event::sender::MockSender::new(); @@ -1395,8 +1390,7 @@ mod tests { let stats_event_sender: Arc>> = Arc::new(Some(Box::new(stats_event_sender_mock))); - let tracker = - Arc::new(core::Tracker::new(&config.core, &database, &whitelist_authorization, &authentication).unwrap()); + let tracker = Arc::new(core::Tracker::new(&config.core, &database, &whitelist_authorization).unwrap()); let loopback_ipv4 = Ipv4Addr::new(127, 0, 0, 1); let loopback_ipv6 = Ipv6Addr::new(0, 0, 0, 0, 0, 0, 0, 1); diff --git a/src/servers/udp/server/mod.rs b/src/servers/udp/server/mod.rs index c507b3cb6..844c18678 100644 --- a/src/servers/udp/server/mod.rs +++ b/src/servers/udp/server/mod.rs @@ -69,8 +69,8 @@ mod tests { use crate::core::authentication::key::repository::persisted::DatabaseKeyRepository; use crate::core::authentication::service; use crate::core::services::{initialize_database, initialize_tracker, initialize_whitelist_manager, statistics}; + use crate::core::whitelist; use crate::core::whitelist::repository::in_memory::InMemoryWhitelist; - use crate::core::{authentication, whitelist}; use crate::servers::registar::Registar; use crate::servers::udp::server::banning::BanService; use crate::servers::udp::server::launcher::MAX_CONNECTION_ID_ERRORS_PER_IP; @@ -95,13 +95,12 @@ mod tests { let db_key_repository = Arc::new(DatabaseKeyRepository::new(&database)); let in_memory_key_repository = Arc::new(InMemoryKeyRepository::default()); let _authentication_service = Arc::new(service::AuthenticationService::new(&cfg.core, &in_memory_key_repository)); - let keys_handler = Arc::new(KeysHandler::new( + let _keys_handler = Arc::new(KeysHandler::new( &db_key_repository.clone(), &in_memory_key_repository.clone(), )); - let authentication = Arc::new(authentication::Facade::new(&keys_handler)); - let tracker = Arc::new(initialize_tracker(&cfg, &database, &whitelist_authorization, &authentication)); + let tracker = Arc::new(initialize_tracker(&cfg, &database, &whitelist_authorization)); let udp_trackers = cfg.udp_trackers.clone().expect("missing UDP trackers configuration"); let config = &udp_trackers[0]; @@ -148,13 +147,12 @@ mod tests { let db_key_repository = Arc::new(DatabaseKeyRepository::new(&database)); let in_memory_key_repository = Arc::new(InMemoryKeyRepository::default()); let _authentication_service = Arc::new(service::AuthenticationService::new(&cfg.core, &in_memory_key_repository)); - let keys_handler = Arc::new(KeysHandler::new( + let _keys_handler = Arc::new(KeysHandler::new( &db_key_repository.clone(), &in_memory_key_repository.clone(), )); - let authentication = Arc::new(authentication::Facade::new(&keys_handler)); - let tracker = Arc::new(initialize_tracker(&cfg, &database, &whitelist_authorization, &authentication)); + let tracker = Arc::new(initialize_tracker(&cfg, &database, &whitelist_authorization)); let config = &cfg.udp_trackers.as_ref().unwrap().first().unwrap(); let bind_to = config.bind_address; diff --git a/tests/servers/api/environment.rs b/tests/servers/api/environment.rs index 3e8fedd0e..f014df36f 100644 --- a/tests/servers/api/environment.rs +++ b/tests/servers/api/environment.rs @@ -8,6 +8,7 @@ use torrust_tracker_api_client::connection_info::{ConnectionInfo, Origin}; use torrust_tracker_configuration::{Configuration, HttpApi}; use torrust_tracker_lib::bootstrap::app::{initialize_app_container, initialize_global_services}; use torrust_tracker_lib::bootstrap::jobs::make_rust_tls; +use torrust_tracker_lib::core::authentication::handler::KeysHandler; use torrust_tracker_lib::core::authentication::service::AuthenticationService; use torrust_tracker_lib::core::statistics::event::sender::Sender; use torrust_tracker_lib::core::statistics::repository::Repository; @@ -24,6 +25,7 @@ where { pub config: Arc, pub tracker: Arc, + pub keys_handler: Arc, pub authentication_service: Arc, pub stats_event_sender: Arc>>, pub stats_repository: Arc, @@ -60,6 +62,7 @@ impl Environment { Self { config, tracker: app_container.tracker.clone(), + keys_handler: app_container.keys_handler.clone(), authentication_service: app_container.authentication_service.clone(), stats_event_sender: app_container.stats_event_sender.clone(), stats_repository: app_container.stats_repository.clone(), @@ -76,6 +79,7 @@ impl Environment { Environment { config: self.config, tracker: self.tracker.clone(), + keys_handler: self.keys_handler.clone(), authentication_service: self.authentication_service.clone(), stats_event_sender: self.stats_event_sender.clone(), stats_repository: self.stats_repository.clone(), @@ -86,6 +90,7 @@ impl Environment { .server .start( self.tracker, + self.keys_handler, self.whitelist_manager, self.stats_event_sender, self.stats_repository, @@ -108,6 +113,7 @@ impl Environment { Environment { config: self.config, tracker: self.tracker, + keys_handler: self.keys_handler, authentication_service: self.authentication_service, stats_event_sender: self.stats_event_sender, stats_repository: self.stats_repository, diff --git a/tests/servers/api/v1/contract/context/auth_key.rs b/tests/servers/api/v1/contract/context/auth_key.rs index 6a270f894..73860c9c2 100644 --- a/tests/servers/api/v1/contract/context/auth_key.rs +++ b/tests/servers/api/v1/contract/context/auth_key.rs @@ -158,8 +158,7 @@ async fn should_allow_deleting_an_auth_key() { let seconds_valid = 60; let auth_key = env - .tracker - .authentication + .keys_handler .generate_auth_key(Some(Duration::from_secs(seconds_valid))) .await .unwrap(); @@ -293,8 +292,7 @@ async fn should_fail_when_the_auth_key_cannot_be_deleted() { let seconds_valid = 60; let auth_key = env - .tracker - .authentication + .keys_handler .generate_auth_key(Some(Duration::from_secs(seconds_valid))) .await .unwrap(); @@ -327,8 +325,7 @@ async fn should_not_allow_deleting_an_auth_key_for_unauthenticated_users() { // Generate new auth key let auth_key = env - .tracker - .authentication + .keys_handler .generate_auth_key(Some(Duration::from_secs(seconds_valid))) .await .unwrap(); @@ -348,8 +345,7 @@ async fn should_not_allow_deleting_an_auth_key_for_unauthenticated_users() { // Generate new auth key let auth_key = env - .tracker - .authentication + .keys_handler .generate_auth_key(Some(Duration::from_secs(seconds_valid))) .await .unwrap(); @@ -377,8 +373,7 @@ async fn should_allow_reloading_keys() { let env = Started::new(&configuration::ephemeral().into()).await; let seconds_valid = 60; - env.tracker - .authentication + env.keys_handler .generate_auth_key(Some(Duration::from_secs(seconds_valid))) .await .unwrap(); @@ -403,8 +398,7 @@ async fn should_fail_when_keys_cannot_be_reloaded() { let request_id = Uuid::new_v4(); let seconds_valid = 60; - env.tracker - .authentication + env.keys_handler .generate_auth_key(Some(Duration::from_secs(seconds_valid))) .await .unwrap(); @@ -432,8 +426,7 @@ async fn should_not_allow_reloading_keys_for_unauthenticated_users() { let env = Started::new(&configuration::ephemeral().into()).await; let seconds_valid = 60; - env.tracker - .authentication + env.keys_handler .generate_auth_key(Some(Duration::from_secs(seconds_valid))) .await .unwrap(); diff --git a/tests/servers/http/environment.rs b/tests/servers/http/environment.rs index 85921cd37..81b6a12e2 100644 --- a/tests/servers/http/environment.rs +++ b/tests/servers/http/environment.rs @@ -5,6 +5,7 @@ use futures::executor::block_on; use torrust_tracker_configuration::{Configuration, HttpTracker}; use torrust_tracker_lib::bootstrap::app::{initialize_app_container, initialize_global_services}; use torrust_tracker_lib::bootstrap::jobs::make_rust_tls; +use torrust_tracker_lib::core::authentication::handler::KeysHandler; use torrust_tracker_lib::core::authentication::service::AuthenticationService; use torrust_tracker_lib::core::statistics::event::sender::Sender; use torrust_tracker_lib::core::statistics::repository::Repository; @@ -17,6 +18,7 @@ use torrust_tracker_primitives::peer; pub struct Environment { pub config: Arc, pub tracker: Arc, + pub keys_handler: Arc, pub authentication_service: Arc, pub stats_event_sender: Arc>>, pub stats_repository: Arc, @@ -56,6 +58,7 @@ impl Environment { Self { config, tracker: app_container.tracker.clone(), + keys_handler: app_container.keys_handler.clone(), authentication_service: app_container.authentication_service.clone(), stats_event_sender: app_container.stats_event_sender.clone(), stats_repository: app_container.stats_repository.clone(), @@ -71,6 +74,7 @@ impl Environment { Environment { config: self.config, tracker: self.tracker.clone(), + keys_handler: self.keys_handler.clone(), authentication_service: self.authentication_service.clone(), whitelist_authorization: self.whitelist_authorization.clone(), stats_event_sender: self.stats_event_sender.clone(), @@ -101,6 +105,7 @@ impl Environment { Environment { config: self.config, tracker: self.tracker, + keys_handler: self.keys_handler, authentication_service: self.authentication_service, whitelist_authorization: self.whitelist_authorization, stats_event_sender: self.stats_event_sender, diff --git a/tests/servers/http/v1/contract.rs b/tests/servers/http/v1/contract.rs index d8b1c92c2..0aafbd213 100644 --- a/tests/servers/http/v1/contract.rs +++ b/tests/servers/http/v1/contract.rs @@ -1397,8 +1397,7 @@ mod configured_as_private { let env = Started::new(&configuration::ephemeral_private().into()).await; let expiring_key = env - .tracker - .authentication + .keys_handler .generate_auth_key(Some(Duration::from_secs(60))) .await .unwrap(); @@ -1547,8 +1546,7 @@ mod configured_as_private { ); let expiring_key = env - .tracker - .authentication + .keys_handler .generate_auth_key(Some(Duration::from_secs(60))) .await .unwrap(); From 718e960f50ee4a332513e80d968eaee532e16cb0 Mon Sep 17 00:00:00 2001 From: Jose Celano Date: Wed, 22 Jan 2025 15:54:49 +0000 Subject: [PATCH 139/802] chore(deps): udpate dependencies ```output cargo update Updating crates.io index Locking 21 packages to latest compatible versions Removing ahash v0.8.11 Updating borsh v1.5.4 -> v1.5.5 Updating borsh-derive v1.5.4 -> v1.5.5 Updating brotli-decompressor v4.0.1 -> v4.0.2 Updating cc v1.2.9 -> v1.2.10 Updating clap v4.5.26 -> v4.5.27 Updating clap_builder v4.5.26 -> v4.5.27 Updating crunchy v0.2.2 -> v0.2.3 Updating derive_utils v0.14.2 -> v0.15.0 Updating hashlink v0.9.1 -> v0.10.0 Updating indexmap v2.7.0 -> v2.7.1 Updating io-enum v1.1.3 -> v1.2.0 Updating ipnet v2.10.1 -> v2.11.0 Updating is-terminal v0.4.13 -> v0.4.15 Updating libsqlite3-sys v0.30.1 -> v0.31.0 Updating r2d2_sqlite v0.25.0 -> v0.26.0 Updating rusqlite v0.32.1 -> v0.33.0 Updating rustix v0.38.43 -> v0.38.44 Updating semver v1.0.24 -> v1.0.25 Updating serde_json v1.0.135 -> v1.0.137 Updating uuid v1.12.0 -> v1.12.1 Updating valuable v0.1.0 -> v0.1.1 ``` --- Cargo.lock | 115 +++++++++++++++++++++++------------------------------ 1 file changed, 50 insertions(+), 65 deletions(-) diff --git a/Cargo.lock b/Cargo.lock index 7ab861d2b..355457721 100644 --- a/Cargo.lock +++ b/Cargo.lock @@ -28,18 +28,6 @@ dependencies = [ "version_check", ] -[[package]] -name = "ahash" -version = "0.8.11" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "e89da841a80418a9b391ebaea17f5c112ffaaa96f621d2c285b5174da76b9011" -dependencies = [ - "cfg-if", - "once_cell", - "version_check", - "zerocopy", -] - [[package]] name = "aho-corasick" version = "1.1.3" @@ -658,9 +646,9 @@ dependencies = [ [[package]] name = "borsh" -version = "1.5.4" +version = "1.5.5" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "9fb65153674e51d3a42c8f27b05b9508cea85edfaade8aa46bc8fc18cecdfef3" +checksum = "5430e3be710b68d984d1391c854eb431a9d548640711faa54eecb1df93db91cc" dependencies = [ "borsh-derive", "cfg_aliases", @@ -668,9 +656,9 @@ dependencies = [ [[package]] name = "borsh-derive" -version = "1.5.4" +version = "1.5.5" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "a396e17ad94059c650db3d253bb6e25927f1eb462eede7e7a153bb6e75dce0a7" +checksum = "f8b668d39970baad5356d7c83a86fee3a539e6f93bf6764c97368243e17a0487" dependencies = [ "once_cell", "proc-macro-crate", @@ -692,9 +680,9 @@ dependencies = [ [[package]] name = "brotli-decompressor" -version = "4.0.1" +version = "4.0.2" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "9a45bd2e4095a8b518033b128020dd4a55aab1c0a381ba4404a472630f4bc362" +checksum = "74fa05ad7d803d413eb8380983b092cbbaf9a85f151b871360e7b00cd7060b37" dependencies = [ "alloc-no-stdlib", "alloc-stdlib", @@ -787,9 +775,9 @@ dependencies = [ [[package]] name = "cc" -version = "1.2.9" +version = "1.2.10" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "c8293772165d9345bdaaa39b45b2109591e63fe5e6fbc23c6ff930a048aa310b" +checksum = "13208fcbb66eaeffe09b99fffbe1af420f00a7b35aa99ad683dfc1aa76145229" dependencies = [ "jobserver", "libc", @@ -880,9 +868,9 @@ dependencies = [ [[package]] name = "clap" -version = "4.5.26" +version = "4.5.27" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "a8eb5e908ef3a6efbe1ed62520fb7287959888c88485abe072543190ecc66783" +checksum = "769b0145982b4b48713e01ec42d61614425f27b7058bda7180a3a41f30104796" dependencies = [ "clap_builder", "clap_derive", @@ -890,9 +878,9 @@ dependencies = [ [[package]] name = "clap_builder" -version = "4.5.26" +version = "4.5.27" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "96b01801b5fc6a0a232407abc821660c9c6d25a1cafc0d4f85f29fb8d9afc121" +checksum = "1b26884eb4b57140e4d2d93652abfa49498b938b3c9179f9fc487b0acc3edad7" dependencies = [ "anstream", "anstyle", @@ -1095,9 +1083,9 @@ checksum = "d0a5c400df2834b80a4c3327b3aad3a4c4cd4de0629063962b03235697506a28" [[package]] name = "crunchy" -version = "0.2.2" +version = "0.2.3" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "7a81dae078cea95a014a339291cec439d2f232ebe854a9d672b796c6afafa9b7" +checksum = "43da5946c66ffcc7745f48db692ffbb10a83bfe0afd96235c5c2a4fb23994929" [[package]] name = "crypto-common" @@ -1191,9 +1179,9 @@ dependencies = [ [[package]] name = "derive_utils" -version = "0.14.2" +version = "0.15.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "65f152f4b8559c4da5d574bafc7af85454d706b4c5fe8b530d508cacbb6807ea" +checksum = "ccfae181bab5ab6c5478b2ccb69e4c68a02f8c3ec72f6616bfec9dbc599d2ee0" dependencies = [ "proc-macro2", "quote", @@ -1619,7 +1607,7 @@ dependencies = [ "futures-core", "futures-sink", "http", - "indexmap 2.7.0", + "indexmap 2.7.1", "slab", "tokio", "tokio-util", @@ -1642,7 +1630,7 @@ version = "0.12.3" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "8a9ee70c43aaf417c914396645a0fa852624801b24ebb7ae78fe8272889ac888" dependencies = [ - "ahash 0.7.8", + "ahash", ] [[package]] @@ -1650,9 +1638,6 @@ name = "hashbrown" version = "0.14.5" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "e5274423e17b7c9fc20b6e7e208532f9b19825d82dfd615708b70edd83df41f1" -dependencies = [ - "ahash 0.8.11", -] [[package]] name = "hashbrown" @@ -1667,11 +1652,11 @@ dependencies = [ [[package]] name = "hashlink" -version = "0.9.1" +version = "0.10.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "6ba4ff7128dee98c7dc9794b6a411377e1404dba1c97deb8d1a55297bd25d8af" +checksum = "7382cf6263419f2d8df38c55d7da83da5c18aef87fc7a7fc1fb1e344edfe14c1" dependencies = [ - "hashbrown 0.14.5", + "hashbrown 0.15.2", ] [[package]] @@ -1998,9 +1983,9 @@ dependencies = [ [[package]] name = "indexmap" -version = "2.7.0" +version = "2.7.1" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "62f822373a4fe84d4bb149bf54e584a7f4abec90e072ed49cda0edea5b95471f" +checksum = "8c9c992b02b5b4c94ea26e32fe5bccb7aa7d9f390ab5c1221ff895bc7ea8b652" dependencies = [ "equivalent", "hashbrown 0.15.2", @@ -2024,28 +2009,28 @@ dependencies = [ [[package]] name = "io-enum" -version = "1.1.3" +version = "1.2.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "53b53d712d99a73eec59ee5e4fe6057f8052142d38eeafbbffcb06b36d738a6e" +checksum = "d197db2f7ebf90507296df3aebaf65d69f5dce8559d8dbd82776a6cadab61bbf" dependencies = [ "derive_utils", ] [[package]] name = "ipnet" -version = "2.10.1" +version = "2.11.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "ddc24109865250148c2e0f3d25d4f0f479571723792d3802153c60922a4fb708" +checksum = "469fb0b9cefa57e3ef31275ee7cacb78f2fdca44e4765491884a2b119d4eb130" [[package]] name = "is-terminal" -version = "0.4.13" +version = "0.4.15" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "261f68e344040fbd0edea105bef17c66edf46f984ddb1115b775ce31be948f4b" +checksum = "e19b23d53f35ce9f56aebc7d1bb4e6ac1e9c0db7ac85c8d1760c04379edced37" dependencies = [ "hermit-abi", "libc", - "windows-sys 0.52.0", + "windows-sys 0.59.0", ] [[package]] @@ -2136,9 +2121,9 @@ checksum = "8355be11b20d696c8f18f6cc018c4e372165b1fa8126cef092399c9951984ffa" [[package]] name = "libsqlite3-sys" -version = "0.30.1" +version = "0.31.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "2e99fb7a497b1e3339bc746195567ed8d3e24945ecd636e3619d20b9de9e9149" +checksum = "ad8935b44e7c13394a179a438e0cebba0fe08fe01b54f152e29a93b5cf993fd4" dependencies = [ "cc", "pkg-config", @@ -2912,9 +2897,9 @@ dependencies = [ [[package]] name = "r2d2_sqlite" -version = "0.25.0" +version = "0.26.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "eb14dba8247a6a15b7fdbc7d389e2e6f03ee9f184f87117706d509c092dfe846" +checksum = "ee025287c0188d75ae2563bcb91c9b0d1843cfc56e4bd3ab867597971b5cc256" dependencies = [ "r2d2", "rusqlite", @@ -3160,9 +3145,9 @@ dependencies = [ [[package]] name = "rusqlite" -version = "0.32.1" +version = "0.33.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "7753b721174eb8ff87a9a0e799e2d7bc3749323e773db92e0984debb00019d6e" +checksum = "1c6d5e5acb6f6129fe3f7ba0a7fc77bca1942cb568535e18e7bc40262baf3110" dependencies = [ "bitflags", "fallible-iterator", @@ -3211,9 +3196,9 @@ dependencies = [ [[package]] name = "rustix" -version = "0.38.43" +version = "0.38.44" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "a78891ee6bf2340288408954ac787aa063d8e8817e9f53abb37c695c6d834ef6" +checksum = "fdb5bc1ae2baa591800df16c9ca78619bf65c0488b41b96ccec5d11220d8c154" dependencies = [ "bitflags", "errno", @@ -3343,9 +3328,9 @@ dependencies = [ [[package]] name = "semver" -version = "1.0.24" +version = "1.0.25" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "3cb6eb87a131f756572d7fb904f6e7b68633f09cca868c5df1c4b8d1a694bbba" +checksum = "f79dfe2d285b0488816f30e700a7438c5a73d816b5b7d3ac72fbc48b0d185e03" [[package]] name = "serde" @@ -3393,7 +3378,7 @@ source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "9d2de91cf02bbc07cde38891769ccd5d4f073d22a40683aa4bc7a95781aaa2c4" dependencies = [ "form_urlencoded", - "indexmap 2.7.0", + "indexmap 2.7.1", "itoa", "ryu", "serde", @@ -3401,11 +3386,11 @@ dependencies = [ [[package]] name = "serde_json" -version = "1.0.135" +version = "1.0.137" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "2b0d7ba2887406110130a978386c4e1befb98c674b4fba677954e4db976630d9" +checksum = "930cfb6e6abf99298aaad7d29abbef7a9999a9a8806a40088f55f0dcec03146b" dependencies = [ - "indexmap 2.7.0", + "indexmap 2.7.1", "itoa", "memchr", "ryu", @@ -3464,7 +3449,7 @@ dependencies = [ "chrono", "hex", "indexmap 1.9.3", - "indexmap 2.7.0", + "indexmap 2.7.1", "serde", "serde_derive", "serde_json", @@ -3926,7 +3911,7 @@ version = "0.22.22" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "4ae48d6208a266e853d946088ed816055e556cc6028c5e8e2b84d9fa5dd7c7f5" dependencies = [ - "indexmap 2.7.0", + "indexmap 2.7.1", "serde", "serde_spanned", "toml_datetime", @@ -4338,9 +4323,9 @@ checksum = "06abde3611657adf66d383f00b093d7faecc7fa57071cce2578660c9f1010821" [[package]] name = "uuid" -version = "1.12.0" +version = "1.12.1" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "744018581f9a3454a9e15beb8a33b017183f1e7c0cd170232a2d1453b23a51c4" +checksum = "b3758f5e68192bb96cc8f9b7e2c2cfdabb435499a28499a42f8f984092adad4b" dependencies = [ "getrandom", "rand", @@ -4348,9 +4333,9 @@ dependencies = [ [[package]] name = "valuable" -version = "0.1.0" +version = "0.1.1" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "830b7e5d4d90034032940e4ace0d9a9a057e7a45cd94e6c007832e39edb82f6d" +checksum = "ba73ea9cf16a25df0c8caa16c51acb937d5712a8429db78a3ee29d5dcacd3a65" [[package]] name = "value-bag" From df6ed93e4eb1589dee88bd5248d2644382ec5696 Mon Sep 17 00:00:00 2001 From: Jose Celano Date: Wed, 22 Jan 2025 17:10:43 +0000 Subject: [PATCH 140/802] refactor: [#1198] remove duplicate code in app init in tests --- src/bootstrap/jobs/http_tracker.rs | 36 +++------------- src/bootstrap/jobs/tracker_apis.rs | 46 ++++---------------- src/core/authentication/handler.rs | 1 - src/core/authentication/mod.rs | 2 - src/servers/apis/server.rs | 46 ++++---------------- src/servers/http/server.rs | 37 +++------------- src/servers/udp/server/mod.rs | 69 +++++------------------------- 7 files changed, 39 insertions(+), 198 deletions(-) diff --git a/src/bootstrap/jobs/http_tracker.rs b/src/bootstrap/jobs/http_tracker.rs index 4df669675..92a255c9e 100644 --- a/src/bootstrap/jobs/http_tracker.rs +++ b/src/bootstrap/jobs/http_tracker.rs @@ -108,15 +108,8 @@ mod tests { use torrust_tracker_test_helpers::configuration::ephemeral_public; - use crate::bootstrap::app::initialize_global_services; + use crate::bootstrap::app::{initialize_app_container, initialize_global_services}; use crate::bootstrap::jobs::http_tracker::start_job; - use crate::core::authentication::handler::KeysHandler; - use crate::core::authentication::key::repository::in_memory::InMemoryKeyRepository; - use crate::core::authentication::key::repository::persisted::DatabaseKeyRepository; - use crate::core::authentication::service; - use crate::core::services::{initialize_database, initialize_tracker, statistics}; - use crate::core::whitelist; - use crate::core::whitelist::repository::in_memory::InMemoryWhitelist; use crate::servers::http::Version; use crate::servers::registar::Registar; @@ -126,35 +119,18 @@ mod tests { let http_tracker = cfg.http_trackers.clone().expect("missing HTTP tracker configuration"); let config = &http_tracker[0]; - let (stats_event_sender, _stats_repository) = statistics::setup::factory(cfg.core.tracker_usage_statistics); - let stats_event_sender = Arc::new(stats_event_sender); - initialize_global_services(&cfg); - let database = initialize_database(&cfg); - let in_memory_whitelist = Arc::new(InMemoryWhitelist::default()); - let whitelist_authorization = Arc::new(whitelist::authorization::Authorization::new( - &cfg.core, - &in_memory_whitelist.clone(), - )); - let db_key_repository = Arc::new(DatabaseKeyRepository::new(&database)); - let in_memory_key_repository = Arc::new(InMemoryKeyRepository::default()); - let authentication_service = Arc::new(service::AuthenticationService::new(&cfg.core, &in_memory_key_repository)); - let _keys_handler = Arc::new(KeysHandler::new( - &db_key_repository.clone(), - &in_memory_key_repository.clone(), - )); - - let tracker = Arc::new(initialize_tracker(&cfg, &database, &whitelist_authorization)); + let app_container = initialize_app_container(&cfg); let version = Version::V1; start_job( config, - tracker, - authentication_service, - whitelist_authorization, - stats_event_sender, + app_container.tracker, + app_container.authentication_service, + app_container.whitelist_authorization, + app_container.stats_event_sender, Registar::default().give_form(), version, ) diff --git a/src/bootstrap/jobs/tracker_apis.rs b/src/bootstrap/jobs/tracker_apis.rs index 9bb7e6d45..1047fa418 100644 --- a/src/bootstrap/jobs/tracker_apis.rs +++ b/src/bootstrap/jobs/tracker_apis.rs @@ -159,62 +159,32 @@ async fn start_v1( mod tests { use std::sync::Arc; - use tokio::sync::RwLock; use torrust_tracker_test_helpers::configuration::ephemeral_public; - use crate::bootstrap::app::initialize_global_services; + use crate::bootstrap::app::{initialize_app_container, initialize_global_services}; use crate::bootstrap::jobs::tracker_apis::start_job; - use crate::core::authentication::handler::KeysHandler; - use crate::core::authentication::key::repository::in_memory::InMemoryKeyRepository; - use crate::core::authentication::key::repository::persisted::DatabaseKeyRepository; - use crate::core::authentication::service; - use crate::core::services::{initialize_database, initialize_tracker, initialize_whitelist_manager, statistics}; - use crate::core::whitelist; - use crate::core::whitelist::repository::in_memory::InMemoryWhitelist; use crate::servers::apis::Version; use crate::servers::registar::Registar; - use crate::servers::udp::server::banning::BanService; - use crate::servers::udp::server::launcher::MAX_CONNECTION_ID_ERRORS_PER_IP; #[tokio::test] async fn it_should_start_http_tracker() { let cfg = Arc::new(ephemeral_public()); let config = &cfg.http_api.clone().unwrap(); - let ban_service = Arc::new(RwLock::new(BanService::new(MAX_CONNECTION_ID_ERRORS_PER_IP))); - let (stats_event_sender, stats_repository) = statistics::setup::factory(cfg.core.tracker_usage_statistics); - let stats_event_sender = Arc::new(stats_event_sender); - let stats_repository = Arc::new(stats_repository); - initialize_global_services(&cfg); - let database = initialize_database(&cfg); - let in_memory_whitelist = Arc::new(InMemoryWhitelist::default()); - let whitelist_authorization = Arc::new(whitelist::authorization::Authorization::new( - &cfg.core, - &in_memory_whitelist.clone(), - )); - let whitelist_manager = initialize_whitelist_manager(database.clone(), in_memory_whitelist.clone()); - let db_key_repository = Arc::new(DatabaseKeyRepository::new(&database)); - let in_memory_key_repository = Arc::new(InMemoryKeyRepository::default()); - let _authentication_service = Arc::new(service::AuthenticationService::new(&cfg.core, &in_memory_key_repository)); - let keys_handler = Arc::new(KeysHandler::new( - &db_key_repository.clone(), - &in_memory_key_repository.clone(), - )); - - let tracker = Arc::new(initialize_tracker(&cfg, &database, &whitelist_authorization)); + let app_container = initialize_app_container(&cfg); let version = Version::V1; start_job( config, - tracker, - keys_handler, - whitelist_manager, - ban_service, - stats_event_sender, - stats_repository, + app_container.tracker, + app_container.keys_handler, + app_container.whitelist_manager, + app_container.ban_service, + app_container.stats_event_sender, + app_container.stats_repository, Registar::default().give_form(), version, ) diff --git a/src/core/authentication/handler.rs b/src/core/authentication/handler.rs index 17033aefc..5ec9a11b4 100644 --- a/src/core/authentication/handler.rs +++ b/src/core/authentication/handler.rs @@ -267,7 +267,6 @@ mod tests { fn instantiate_keys_handler_with_configuration(config: &Configuration) -> KeysHandler { let database = initialize_database(config); - let db_key_repository = Arc::new(DatabaseKeyRepository::new(&database)); let in_memory_key_repository = Arc::new(InMemoryKeyRepository::default()); diff --git a/src/core/authentication/mod.rs b/src/core/authentication/mod.rs index 799263752..0180b3a1e 100644 --- a/src/core/authentication/mod.rs +++ b/src/core/authentication/mod.rs @@ -50,10 +50,8 @@ mod tests { config: &Configuration, ) -> (Arc, Arc) { let database = initialize_database(config); - let db_key_repository = Arc::new(DatabaseKeyRepository::new(&database)); let in_memory_key_repository = Arc::new(InMemoryKeyRepository::default()); - let authentication_service = Arc::new(service::AuthenticationService::new(&config.core, &in_memory_key_repository)); let keys_handler = Arc::new(KeysHandler::new( &db_key_repository.clone(), diff --git a/src/servers/apis/server.rs b/src/servers/apis/server.rs index f219ca023..e65d6643d 100644 --- a/src/servers/apis/server.rs +++ b/src/servers/apis/server.rs @@ -343,51 +343,21 @@ impl Launcher { mod tests { use std::sync::Arc; - use tokio::sync::RwLock; use torrust_tracker_test_helpers::configuration::ephemeral_public; - use crate::bootstrap::app::initialize_global_services; + use crate::bootstrap::app::{initialize_app_container, initialize_global_services}; use crate::bootstrap::jobs::make_rust_tls; - use crate::core::authentication::handler::KeysHandler; - use crate::core::authentication::key::repository::in_memory::InMemoryKeyRepository; - use crate::core::authentication::key::repository::persisted::DatabaseKeyRepository; - use crate::core::authentication::service; - use crate::core::services::{initialize_database, initialize_tracker, initialize_whitelist_manager, statistics}; - use crate::core::whitelist; - use crate::core::whitelist::repository::in_memory::InMemoryWhitelist; use crate::servers::apis::server::{ApiServer, Launcher}; use crate::servers::registar::Registar; - use crate::servers::udp::server::banning::BanService; - use crate::servers::udp::server::launcher::MAX_CONNECTION_ID_ERRORS_PER_IP; #[tokio::test] async fn it_should_be_able_to_start_and_stop() { let cfg = Arc::new(ephemeral_public()); let config = &cfg.http_api.clone().unwrap(); - let ban_service = Arc::new(RwLock::new(BanService::new(MAX_CONNECTION_ID_ERRORS_PER_IP))); - let (stats_event_sender, stats_repository) = statistics::setup::factory(cfg.core.tracker_usage_statistics); - let stats_event_sender = Arc::new(stats_event_sender); - let stats_repository = Arc::new(stats_repository); - initialize_global_services(&cfg); - let database = initialize_database(&cfg); - let in_memory_whitelist = Arc::new(InMemoryWhitelist::default()); - let whitelist_authorization = Arc::new(whitelist::authorization::Authorization::new( - &cfg.core, - &in_memory_whitelist.clone(), - )); - let whitelist_manager = initialize_whitelist_manager(database.clone(), in_memory_whitelist.clone()); - let db_key_repository = Arc::new(DatabaseKeyRepository::new(&database)); - let in_memory_key_repository = Arc::new(InMemoryKeyRepository::default()); - let _authentication_service = Arc::new(service::AuthenticationService::new(&cfg.core, &in_memory_key_repository)); - let keys_handler = Arc::new(KeysHandler::new( - &db_key_repository.clone(), - &in_memory_key_repository.clone(), - )); - - let tracker = Arc::new(initialize_tracker(&cfg, &database, &whitelist_authorization)); + let app_container = initialize_app_container(&cfg); let bind_to = config.bind_address; @@ -403,12 +373,12 @@ mod tests { let started = stopped .start( - tracker, - keys_handler, - whitelist_manager, - stats_event_sender, - stats_repository, - ban_service, + app_container.tracker, + app_container.keys_handler, + app_container.whitelist_manager, + app_container.stats_event_sender, + app_container.stats_repository, + app_container.ban_service, register.give_form(), access_tokens, ) diff --git a/src/servers/http/server.rs b/src/servers/http/server.rs index 5f62a2013..e7a3a92ec 100644 --- a/src/servers/http/server.rs +++ b/src/servers/http/server.rs @@ -268,15 +268,8 @@ mod tests { use torrust_tracker_test_helpers::configuration::ephemeral_public; - use crate::bootstrap::app::initialize_global_services; + use crate::bootstrap::app::{initialize_app_container, initialize_global_services}; use crate::bootstrap::jobs::make_rust_tls; - use crate::core::authentication::handler::KeysHandler; - use crate::core::authentication::key::repository::in_memory::InMemoryKeyRepository; - use crate::core::authentication::key::repository::persisted::DatabaseKeyRepository; - use crate::core::authentication::service; - use crate::core::services::{initialize_database, initialize_tracker, initialize_whitelist_manager, statistics}; - use crate::core::whitelist; - use crate::core::whitelist::repository::in_memory::InMemoryWhitelist; use crate::servers::http::server::{HttpServer, Launcher}; use crate::servers::registar::Registar; @@ -284,27 +277,9 @@ mod tests { async fn it_should_be_able_to_start_and_stop() { let cfg = Arc::new(ephemeral_public()); - let (stats_event_sender, _stats_repository) = statistics::setup::factory(cfg.core.tracker_usage_statistics); - let stats_event_sender = Arc::new(stats_event_sender); - initialize_global_services(&cfg); - let database = initialize_database(&cfg); - let in_memory_whitelist = Arc::new(InMemoryWhitelist::default()); - let whitelist_authorization = Arc::new(whitelist::authorization::Authorization::new( - &cfg.core, - &in_memory_whitelist.clone(), - )); - let _whitelist_manager = initialize_whitelist_manager(database.clone(), in_memory_whitelist.clone()); - let db_key_repository = Arc::new(DatabaseKeyRepository::new(&database)); - let in_memory_key_repository = Arc::new(InMemoryKeyRepository::default()); - let authentication_service = Arc::new(service::AuthenticationService::new(&cfg.core, &in_memory_key_repository)); - let _keys_handler = Arc::new(KeysHandler::new( - &db_key_repository.clone(), - &in_memory_key_repository.clone(), - )); - - let tracker = Arc::new(initialize_tracker(&cfg, &database, &whitelist_authorization)); + let app_container = initialize_app_container(&cfg); let http_trackers = cfg.http_trackers.clone().expect("missing HTTP trackers configuration"); let config = &http_trackers[0]; @@ -320,10 +295,10 @@ mod tests { let stopped = HttpServer::new(Launcher::new(bind_to, tls)); let started = stopped .start( - tracker, - authentication_service, - whitelist_authorization, - stats_event_sender, + app_container.tracker, + app_container.authentication_service, + app_container.whitelist_authorization, + app_container.stats_event_sender, register.give_form(), ) .await diff --git a/src/servers/udp/server/mod.rs b/src/servers/udp/server/mod.rs index 844c18678..af51b7fb7 100644 --- a/src/servers/udp/server/mod.rs +++ b/src/servers/udp/server/mod.rs @@ -58,49 +58,20 @@ mod tests { use std::sync::Arc; use std::time::Duration; - use tokio::sync::RwLock; use torrust_tracker_test_helpers::configuration::ephemeral_public; use super::spawner::Spawner; use super::Server; - use crate::bootstrap::app::initialize_global_services; - use crate::core::authentication::handler::KeysHandler; - use crate::core::authentication::key::repository::in_memory::InMemoryKeyRepository; - use crate::core::authentication::key::repository::persisted::DatabaseKeyRepository; - use crate::core::authentication::service; - use crate::core::services::{initialize_database, initialize_tracker, initialize_whitelist_manager, statistics}; - use crate::core::whitelist; - use crate::core::whitelist::repository::in_memory::InMemoryWhitelist; + use crate::bootstrap::app::{initialize_app_container, initialize_global_services}; use crate::servers::registar::Registar; - use crate::servers::udp::server::banning::BanService; - use crate::servers::udp::server::launcher::MAX_CONNECTION_ID_ERRORS_PER_IP; #[tokio::test] async fn it_should_be_able_to_start_and_stop() { let cfg = Arc::new(ephemeral_public()); - let (stats_event_sender, _stats_repository) = statistics::setup::factory(cfg.core.tracker_usage_statistics); - let stats_event_sender = Arc::new(stats_event_sender); - let ban_service = Arc::new(RwLock::new(BanService::new(MAX_CONNECTION_ID_ERRORS_PER_IP))); - initialize_global_services(&cfg); - let database = initialize_database(&cfg); - let in_memory_whitelist = Arc::new(InMemoryWhitelist::default()); - let whitelist_authorization = Arc::new(whitelist::authorization::Authorization::new( - &cfg.core, - &in_memory_whitelist.clone(), - )); - let _whitelist_manager = initialize_whitelist_manager(database.clone(), in_memory_whitelist.clone()); - let db_key_repository = Arc::new(DatabaseKeyRepository::new(&database)); - let in_memory_key_repository = Arc::new(InMemoryKeyRepository::default()); - let _authentication_service = Arc::new(service::AuthenticationService::new(&cfg.core, &in_memory_key_repository)); - let _keys_handler = Arc::new(KeysHandler::new( - &db_key_repository.clone(), - &in_memory_key_repository.clone(), - )); - - let tracker = Arc::new(initialize_tracker(&cfg, &database, &whitelist_authorization)); + let app_container = initialize_app_container(&cfg); let udp_trackers = cfg.udp_trackers.clone().expect("missing UDP trackers configuration"); let config = &udp_trackers[0]; @@ -111,10 +82,10 @@ mod tests { let started = stopped .start( - tracker, - whitelist_authorization, - stats_event_sender, - ban_service, + app_container.tracker, + app_container.whitelist_authorization, + app_container.stats_event_sender, + app_container.ban_service, register.give_form(), config.cookie_lifetime, ) @@ -132,27 +103,9 @@ mod tests { async fn it_should_be_able_to_start_and_stop_with_wait() { let cfg = Arc::new(ephemeral_public()); - let (stats_event_sender, _stats_repository) = statistics::setup::factory(cfg.core.tracker_usage_statistics); - let stats_event_sender = Arc::new(stats_event_sender); - let ban_service = Arc::new(RwLock::new(BanService::new(MAX_CONNECTION_ID_ERRORS_PER_IP))); - initialize_global_services(&cfg); - let database = initialize_database(&cfg); - let in_memory_whitelist = Arc::new(InMemoryWhitelist::default()); - let whitelist_authorization = Arc::new(whitelist::authorization::Authorization::new( - &cfg.core, - &in_memory_whitelist.clone(), - )); - let db_key_repository = Arc::new(DatabaseKeyRepository::new(&database)); - let in_memory_key_repository = Arc::new(InMemoryKeyRepository::default()); - let _authentication_service = Arc::new(service::AuthenticationService::new(&cfg.core, &in_memory_key_repository)); - let _keys_handler = Arc::new(KeysHandler::new( - &db_key_repository.clone(), - &in_memory_key_repository.clone(), - )); - - let tracker = Arc::new(initialize_tracker(&cfg, &database, &whitelist_authorization)); + let app_container = initialize_app_container(&cfg); let config = &cfg.udp_trackers.as_ref().unwrap().first().unwrap(); let bind_to = config.bind_address; @@ -162,10 +115,10 @@ mod tests { let started = stopped .start( - tracker, - whitelist_authorization, - stats_event_sender, - ban_service, + app_container.tracker, + app_container.whitelist_authorization, + app_container.stats_event_sender, + app_container.ban_service, register.give_form(), config.cookie_lifetime, ) From 03ef7f6178ae9c25f4fca925741c9d48bf809886 Mon Sep 17 00:00:00 2001 From: Jose Celano Date: Thu, 23 Jan 2025 16:23:28 +0000 Subject: [PATCH 141/802] refactor: [1201] extract InMemoryTorrentRepository --- src/core/mod.rs | 33 +++----- src/core/services/torrent.rs | 1 - src/core/torrent/mod.rs | 2 + src/core/torrent/repository/in_memory.rs | 103 +++++++++++++++++++++++ src/core/torrent/repository/mod.rs | 1 + 5 files changed, 116 insertions(+), 24 deletions(-) create mode 100644 src/core/torrent/repository/in_memory.rs create mode 100644 src/core/torrent/repository/mod.rs diff --git a/src/core/mod.rs b/src/core/mod.rs index 26ef69bfa..fd25a0506 100644 --- a/src/core/mod.rs +++ b/src/core/mod.rs @@ -449,22 +449,19 @@ pub mod whitelist; pub mod peer_tests; -use std::cmp::max; use std::net::IpAddr; use std::sync::Arc; use std::time::Duration; use bittorrent_primitives::info_hash::InfoHash; +use torrent::repository::in_memory::InMemoryTorrentRepository; use torrust_tracker_clock::clock::Time; use torrust_tracker_configuration::{AnnouncePolicy, Core, TORRENT_PEERS_LIMIT}; use torrust_tracker_primitives::core::{AnnounceData, ScrapeData}; use torrust_tracker_primitives::peer; use torrust_tracker_primitives::swarm_metadata::SwarmMetadata; use torrust_tracker_primitives::torrent_metrics::TorrentsMetrics; -use torrust_tracker_torrent_repository::entry::EntrySync; -use torrust_tracker_torrent_repository::repository::Repository; -use self::torrent::Torrents; use crate::core::databases::Database; use crate::CurrentClock; @@ -489,7 +486,7 @@ pub struct Tracker { pub whitelist_authorization: Arc, /// The in-memory torrents repository. - torrents: Arc, + torrents: Arc, } /// How many peers the peer announcing wants in the announce response. @@ -549,7 +546,7 @@ impl Tracker { config: config.clone(), database: database.clone(), whitelist_authorization: whitelist_authorization.clone(), - torrents: Arc::default(), + torrents: Arc::new(InMemoryTorrentRepository::default()), }) } @@ -656,10 +653,7 @@ impl Tracker { /// It returns the data for a `scrape` response. fn get_swarm_metadata(&self, info_hash: &InfoHash) -> SwarmMetadata { - match self.torrents.get(info_hash) { - Some(torrent_entry) => torrent_entry.get_swarm_metadata(), - None => SwarmMetadata::default(), - } + self.torrents.get_swarm_metadata(info_hash) } /// It loads the torrents from database into memory. It only loads the torrent entry list with the number of seeders for each torrent. @@ -684,10 +678,7 @@ impl Tracker { /// /// It filters out the client making the request. fn get_peers_for(&self, info_hash: &InfoHash, peer: &peer::Peer, limit: usize) -> Vec> { - match self.torrents.get(info_hash) { - None => vec![], - Some(entry) => entry.get_peers_for_client(&peer.peer_addr, Some(max(limit, TORRENT_PEERS_LIMIT))), - } + self.torrents.get_peers_for(info_hash, peer, limit) } /// # Context: Tracker @@ -695,10 +686,7 @@ impl Tracker { /// Get torrent peers for a given torrent. #[must_use] pub fn get_torrent_peers(&self, info_hash: &InfoHash) -> Vec> { - match self.torrents.get(info_hash) { - None => vec![], - Some(entry) => entry.get_peers(Some(TORRENT_PEERS_LIMIT)), - } + self.torrents.get_torrent_peers(info_hash) } /// It updates the torrent entry in memory, it also stores in the database @@ -708,14 +696,14 @@ impl Tracker { /// # Context: Tracker #[must_use] pub fn upsert_peer_and_get_stats(&self, info_hash: &InfoHash, peer: &peer::Peer) -> SwarmMetadata { - let swarm_metadata_before = match self.torrents.get_swarm_metadata(info_hash) { + let swarm_metadata_before = match self.torrents.get_opt_swarm_metadata(info_hash) { Some(swarm_metadata) => swarm_metadata, None => SwarmMetadata::zeroed(), }; self.torrents.upsert_peer(info_hash, peer); - let swarm_metadata_after = match self.torrents.get_swarm_metadata(info_hash) { + let swarm_metadata_after = match self.torrents.get_opt_swarm_metadata(info_hash) { Some(swarm_metadata) => swarm_metadata, None => SwarmMetadata::zeroed(), }; @@ -748,7 +736,7 @@ impl Tracker { /// Panics if unable to get the torrent metrics. #[must_use] pub fn get_torrents_metrics(&self) -> TorrentsMetrics { - self.torrents.get_metrics() + self.torrents.get_torrents_metrics() } /// Remove inactive peers and (optionally) peerless torrents. @@ -1492,7 +1480,6 @@ mod tests { use aquatic_udp_protocol::AnnounceEvent; use torrust_tracker_torrent_repository::entry::EntrySync; - use torrust_tracker_torrent_repository::repository::Repository; use crate::core::tests::the_tracker::{sample_info_hash, sample_peer, tracker_persisting_torrents_in_database}; @@ -1513,7 +1500,7 @@ mod tests { assert_eq!(swarm_stats.downloaded, 1); // Remove the newly updated torrent from memory - tracker.torrents.remove(&info_hash); + let _unused = tracker.torrents.remove(&info_hash); tracker.load_torrents_from_database().unwrap(); diff --git a/src/core/services/torrent.rs b/src/core/services/torrent.rs index 9b7254098..7f99451eb 100644 --- a/src/core/services/torrent.rs +++ b/src/core/services/torrent.rs @@ -10,7 +10,6 @@ use bittorrent_primitives::info_hash::InfoHash; use torrust_tracker_primitives::pagination::Pagination; use torrust_tracker_primitives::peer; use torrust_tracker_torrent_repository::entry::EntrySync; -use torrust_tracker_torrent_repository::repository::Repository; use crate::core::Tracker; diff --git a/src/core/torrent/mod.rs b/src/core/torrent/mod.rs index 38311864b..3e3e065f2 100644 --- a/src/core/torrent/mod.rs +++ b/src/core/torrent/mod.rs @@ -25,6 +25,8 @@ //! - The number of peers that have NOT completed downloading the torrent and are still active, that means they are actively participating in the network. //! Peer that don not have a full copy of the torrent data are called "leechers". //! +pub mod repository; + use torrust_tracker_torrent_repository::TorrentsSkipMapMutexStd; pub type Torrents = TorrentsSkipMapMutexStd; // Currently Used diff --git a/src/core/torrent/repository/in_memory.rs b/src/core/torrent/repository/in_memory.rs new file mode 100644 index 000000000..6b1902d95 --- /dev/null +++ b/src/core/torrent/repository/in_memory.rs @@ -0,0 +1,103 @@ +use std::cmp::max; +use std::sync::Arc; + +use bittorrent_primitives::info_hash::InfoHash; +use torrust_tracker_configuration::{TrackerPolicy, TORRENT_PEERS_LIMIT}; +use torrust_tracker_primitives::pagination::Pagination; +use torrust_tracker_primitives::swarm_metadata::SwarmMetadata; +use torrust_tracker_primitives::torrent_metrics::TorrentsMetrics; +use torrust_tracker_primitives::{peer, DurationSinceUnixEpoch, PersistentTorrents}; +use torrust_tracker_torrent_repository::entry::EntrySync; +use torrust_tracker_torrent_repository::repository::Repository; +use torrust_tracker_torrent_repository::EntryMutexStd; + +use crate::core::torrent::Torrents; + +/// The in-memory torrents repository. +/// +/// There are many implementations of the repository trait. We tried with +/// different types of data structures, but the best performance was with +/// the one we use for production. We kept the other implementations for +/// reference. +#[derive(Debug, Default)] +pub struct InMemoryTorrentRepository { + /// The in-memory torrents repository implementation. + torrents: Arc, +} + +impl InMemoryTorrentRepository { + /// It inserts (or updates if it's already in the list) the peer in the + /// torrent entry. + pub fn upsert_peer(&self, info_hash: &InfoHash, peer: &peer::Peer) { + self.torrents.upsert_peer(info_hash, peer); + } + + #[must_use] + pub fn remove(&self, key: &InfoHash) -> Option { + self.torrents.remove(key) + } + + pub fn remove_inactive_peers(&self, current_cutoff: DurationSinceUnixEpoch) { + self.torrents.remove_inactive_peers(current_cutoff); + } + + pub fn remove_peerless_torrents(&self, policy: &TrackerPolicy) { + self.torrents.remove_peerless_torrents(policy); + } + + #[must_use] + pub fn get(&self, key: &InfoHash) -> Option { + self.torrents.get(key) + } + + #[must_use] + pub fn get_paginated(&self, pagination: Option<&Pagination>) -> Vec<(InfoHash, EntryMutexStd)> { + self.torrents.get_paginated(pagination) + } + + /// It returns the data for a `scrape` response or empty if the torrent is + /// not found. + #[must_use] + pub fn get_swarm_metadata(&self, info_hash: &InfoHash) -> SwarmMetadata { + match self.torrents.get(info_hash) { + Some(torrent_entry) => torrent_entry.get_swarm_metadata(), + None => SwarmMetadata::default(), + } + } + + /// It returns the data for a `scrape` response if the torrent is found. + #[must_use] + pub fn get_opt_swarm_metadata(&self, info_hash: &InfoHash) -> Option { + self.torrents.get_swarm_metadata(info_hash) + } + + /// Get torrent peers for a given torrent and client. + /// + /// It filters out the client making the request. + #[must_use] + pub fn get_peers_for(&self, info_hash: &InfoHash, peer: &peer::Peer, limit: usize) -> Vec> { + match self.torrents.get(info_hash) { + None => vec![], + Some(entry) => entry.get_peers_for_client(&peer.peer_addr, Some(max(limit, TORRENT_PEERS_LIMIT))), + } + } + + /// Get torrent peers for a given torrent. + #[must_use] + pub fn get_torrent_peers(&self, info_hash: &InfoHash) -> Vec> { + match self.torrents.get(info_hash) { + None => vec![], + Some(entry) => entry.get_peers(Some(TORRENT_PEERS_LIMIT)), + } + } + + /// It calculates and returns the general [`TorrentsMetrics`]. + #[must_use] + pub fn get_torrents_metrics(&self) -> TorrentsMetrics { + self.torrents.get_metrics() + } + + pub fn import_persistent(&self, persistent_torrents: &PersistentTorrents) { + self.torrents.import_persistent(persistent_torrents); + } +} diff --git a/src/core/torrent/repository/mod.rs b/src/core/torrent/repository/mod.rs new file mode 100644 index 000000000..fa2e12699 --- /dev/null +++ b/src/core/torrent/repository/mod.rs @@ -0,0 +1 @@ +pub mod in_memory; From 9b5f776c28483e17d5dfa39f4026faacf7aa6546 Mon Sep 17 00:00:00 2001 From: Jose Celano Date: Thu, 23 Jan 2025 16:50:56 +0000 Subject: [PATCH 142/802] refactor: [#1201] exatrct DatabasePersistentTorrentRepository --- src/core/mod.rs | 11 ++++-- src/core/torrent/repository/mod.rs | 1 + src/core/torrent/repository/persisted.rs | 44 ++++++++++++++++++++++++ 3 files changed, 53 insertions(+), 3 deletions(-) create mode 100644 src/core/torrent/repository/persisted.rs diff --git a/src/core/mod.rs b/src/core/mod.rs index fd25a0506..4cb4a04ef 100644 --- a/src/core/mod.rs +++ b/src/core/mod.rs @@ -455,6 +455,7 @@ use std::time::Duration; use bittorrent_primitives::info_hash::InfoHash; use torrent::repository::in_memory::InMemoryTorrentRepository; +use torrent::repository::persisted::DatabasePersistentTorrentRepository; use torrust_tracker_clock::clock::Time; use torrust_tracker_configuration::{AnnouncePolicy, Core, TORRENT_PEERS_LIMIT}; use torrust_tracker_primitives::core::{AnnounceData, ScrapeData}; @@ -487,6 +488,9 @@ pub struct Tracker { /// The in-memory torrents repository. torrents: Arc, + + /// The persistent torrents repository. + db_torrent_repository: Arc, } /// How many peers the peer announcing wants in the announce response. @@ -547,6 +551,7 @@ impl Tracker { database: database.clone(), whitelist_authorization: whitelist_authorization.clone(), torrents: Arc::new(InMemoryTorrentRepository::default()), + db_torrent_repository: Arc::new(DatabasePersistentTorrentRepository::new(database)), }) } @@ -665,7 +670,7 @@ impl Tracker { /// /// Will return a `database::Error` if unable to load the list of `persistent_torrents` from the database. pub fn load_torrents_from_database(&self) -> Result<(), databases::error::Error> { - let persistent_torrents = self.database.load_persistent_torrents()?; + let persistent_torrents = self.db_torrent_repository.load_all()?; self.torrents.import_persistent(&persistent_torrents); @@ -723,7 +728,7 @@ impl Tracker { let completed = swarm_metadata.downloaded; let info_hash = *info_hash; - drop(self.database.save_persistent_torrent(&info_hash, completed)); + drop(self.db_torrent_repository.save(&info_hash, completed)); } } @@ -759,7 +764,7 @@ impl Tracker { /// /// Will return `Err` if unable to drop tables. pub fn drop_database_tables(&self) -> Result<(), databases::error::Error> { - // todo: this is only used for testing. WE have to pass the database + // todo: this is only used for testing. We have to pass the database // reference directly to the tests instead of via the tracker. self.database.drop_database_tables() } diff --git a/src/core/torrent/repository/mod.rs b/src/core/torrent/repository/mod.rs index fa2e12699..51723b68d 100644 --- a/src/core/torrent/repository/mod.rs +++ b/src/core/torrent/repository/mod.rs @@ -1 +1,2 @@ pub mod in_memory; +pub mod persisted; diff --git a/src/core/torrent/repository/persisted.rs b/src/core/torrent/repository/persisted.rs new file mode 100644 index 000000000..86a3db0e3 --- /dev/null +++ b/src/core/torrent/repository/persisted.rs @@ -0,0 +1,44 @@ +use std::sync::Arc; + +use bittorrent_primitives::info_hash::InfoHash; +use torrust_tracker_primitives::PersistentTorrents; + +use crate::core::databases::error::Error; +use crate::core::databases::Database; + +/// Torrent repository implementation that persists the torrents in a database. +/// +/// Not all the torrent in-memory data is persisted. For now only some of the +/// torrent metrics are persisted. +pub struct DatabasePersistentTorrentRepository { + /// A database driver implementation: [`Sqlite3`](crate::core::databases::sqlite) + /// or [`MySQL`](crate::core::databases::mysql) + database: Arc>, +} + +impl DatabasePersistentTorrentRepository { + #[must_use] + pub fn new(database: &Arc>) -> DatabasePersistentTorrentRepository { + Self { + database: database.clone(), + } + } + + /// It loads the persistent torrents from the database. + /// + /// # Errors + /// + /// Will return a database `Err` if unable to load. + pub fn load_all(&self) -> Result { + self.database.load_persistent_torrents() + } + + /// It saves the persistent torrent into the database. + /// + /// # Errors + /// + /// Will return a database `Err` if unable to save. + pub fn save(&self, info_hash: &InfoHash, downloaded: u32) -> Result<(), Error> { + self.database.save_persistent_torrent(info_hash, downloaded) + } +} From f4dcb51f1f5eaa6e2a34c441d2b4d816807f56bd Mon Sep 17 00:00:00 2001 From: Jose Celano Date: Thu, 23 Jan 2025 16:52:55 +0000 Subject: [PATCH 143/802] refactor: [#1201] rename tracker field --- src/core/mod.rs | 32 ++++++++++++++++++-------------- src/core/services/torrent.rs | 10 +++++++--- 2 files changed, 25 insertions(+), 17 deletions(-) diff --git a/src/core/mod.rs b/src/core/mod.rs index 4cb4a04ef..c9a7e9dc3 100644 --- a/src/core/mod.rs +++ b/src/core/mod.rs @@ -487,7 +487,7 @@ pub struct Tracker { pub whitelist_authorization: Arc, /// The in-memory torrents repository. - torrents: Arc, + in_memory_torrent_repository: Arc, /// The persistent torrents repository. db_torrent_repository: Arc, @@ -550,7 +550,7 @@ impl Tracker { config: config.clone(), database: database.clone(), whitelist_authorization: whitelist_authorization.clone(), - torrents: Arc::new(InMemoryTorrentRepository::default()), + in_memory_torrent_repository: Arc::new(InMemoryTorrentRepository::default()), db_torrent_repository: Arc::new(DatabasePersistentTorrentRepository::new(database)), }) } @@ -658,7 +658,7 @@ impl Tracker { /// It returns the data for a `scrape` response. fn get_swarm_metadata(&self, info_hash: &InfoHash) -> SwarmMetadata { - self.torrents.get_swarm_metadata(info_hash) + self.in_memory_torrent_repository.get_swarm_metadata(info_hash) } /// It loads the torrents from database into memory. It only loads the torrent entry list with the number of seeders for each torrent. @@ -672,7 +672,7 @@ impl Tracker { pub fn load_torrents_from_database(&self) -> Result<(), databases::error::Error> { let persistent_torrents = self.db_torrent_repository.load_all()?; - self.torrents.import_persistent(&persistent_torrents); + self.in_memory_torrent_repository.import_persistent(&persistent_torrents); Ok(()) } @@ -683,7 +683,7 @@ impl Tracker { /// /// It filters out the client making the request. fn get_peers_for(&self, info_hash: &InfoHash, peer: &peer::Peer, limit: usize) -> Vec> { - self.torrents.get_peers_for(info_hash, peer, limit) + self.in_memory_torrent_repository.get_peers_for(info_hash, peer, limit) } /// # Context: Tracker @@ -691,7 +691,7 @@ impl Tracker { /// Get torrent peers for a given torrent. #[must_use] pub fn get_torrent_peers(&self, info_hash: &InfoHash) -> Vec> { - self.torrents.get_torrent_peers(info_hash) + self.in_memory_torrent_repository.get_torrent_peers(info_hash) } /// It updates the torrent entry in memory, it also stores in the database @@ -701,14 +701,14 @@ impl Tracker { /// # Context: Tracker #[must_use] pub fn upsert_peer_and_get_stats(&self, info_hash: &InfoHash, peer: &peer::Peer) -> SwarmMetadata { - let swarm_metadata_before = match self.torrents.get_opt_swarm_metadata(info_hash) { + let swarm_metadata_before = match self.in_memory_torrent_repository.get_opt_swarm_metadata(info_hash) { Some(swarm_metadata) => swarm_metadata, None => SwarmMetadata::zeroed(), }; - self.torrents.upsert_peer(info_hash, peer); + self.in_memory_torrent_repository.upsert_peer(info_hash, peer); - let swarm_metadata_after = match self.torrents.get_opt_swarm_metadata(info_hash) { + let swarm_metadata_after = match self.in_memory_torrent_repository.get_opt_swarm_metadata(info_hash) { Some(swarm_metadata) => swarm_metadata, None => SwarmMetadata::zeroed(), }; @@ -741,7 +741,7 @@ impl Tracker { /// Panics if unable to get the torrent metrics. #[must_use] pub fn get_torrents_metrics(&self) -> TorrentsMetrics { - self.torrents.get_torrents_metrics() + self.in_memory_torrent_repository.get_torrents_metrics() } /// Remove inactive peers and (optionally) peerless torrents. @@ -751,10 +751,11 @@ impl Tracker { let current_cutoff = CurrentClock::now_sub(&Duration::from_secs(u64::from(self.config.tracker_policy.max_peer_timeout))) .unwrap_or_default(); - self.torrents.remove_inactive_peers(current_cutoff); + self.in_memory_torrent_repository.remove_inactive_peers(current_cutoff); if self.config.tracker_policy.remove_peerless_torrents { - self.torrents.remove_peerless_torrents(&self.config.tracker_policy); + self.in_memory_torrent_repository + .remove_peerless_torrents(&self.config.tracker_policy); } } @@ -1505,11 +1506,14 @@ mod tests { assert_eq!(swarm_stats.downloaded, 1); // Remove the newly updated torrent from memory - let _unused = tracker.torrents.remove(&info_hash); + let _unused = tracker.in_memory_torrent_repository.remove(&info_hash); tracker.load_torrents_from_database().unwrap(); - let torrent_entry = tracker.torrents.get(&info_hash).expect("it should be able to get entry"); + let torrent_entry = tracker + .in_memory_torrent_repository + .get(&info_hash) + .expect("it should be able to get entry"); // It persists the number of completed peers. assert_eq!(torrent_entry.get_swarm_metadata().downloaded, 1); diff --git a/src/core/services/torrent.rs b/src/core/services/torrent.rs index 7f99451eb..032b526dd 100644 --- a/src/core/services/torrent.rs +++ b/src/core/services/torrent.rs @@ -45,7 +45,7 @@ pub struct BasicInfo { /// It returns all the information the tracker has about one torrent in a [Info] struct. pub async fn get_torrent_info(tracker: Arc, info_hash: &InfoHash) -> Option { - let torrent_entry_option = tracker.torrents.get(info_hash); + let torrent_entry_option = tracker.in_memory_torrent_repository.get(info_hash); let torrent_entry = torrent_entry_option?; @@ -68,7 +68,7 @@ pub async fn get_torrent_info(tracker: Arc, info_hash: &InfoHash) -> Op pub async fn get_torrents_page(tracker: Arc, pagination: Option<&Pagination>) -> Vec { let mut basic_infos: Vec = vec![]; - for (info_hash, torrent_entry) in tracker.torrents.get_paginated(pagination) { + for (info_hash, torrent_entry) in tracker.in_memory_torrent_repository.get_paginated(pagination) { let stats = torrent_entry.get_swarm_metadata(); basic_infos.push(BasicInfo { @@ -87,7 +87,11 @@ pub async fn get_torrents(tracker: Arc, info_hashes: &[InfoHash]) -> Ve let mut basic_infos: Vec = vec![]; for info_hash in info_hashes { - if let Some(stats) = tracker.torrents.get(info_hash).map(|t| t.get_swarm_metadata()) { + if let Some(stats) = tracker + .in_memory_torrent_repository + .get(info_hash) + .map(|t| t.get_swarm_metadata()) + { basic_infos.push(BasicInfo { info_hash: *info_hash, seeders: u64::from(stats.complete), From 6332261af8a6a5e5d6de235283187d3440325133 Mon Sep 17 00:00:00 2001 From: Jose Celano Date: Thu, 23 Jan 2025 17:12:26 +0000 Subject: [PATCH 144/802] refactor: [#1201] extract TorrentsManager --- src/core/mod.rs | 35 ++++++++++---------- src/core/torrent/manager.rs | 64 +++++++++++++++++++++++++++++++++++++ src/core/torrent/mod.rs | 1 + 3 files changed, 81 insertions(+), 19 deletions(-) create mode 100644 src/core/torrent/manager.rs diff --git a/src/core/mod.rs b/src/core/mod.rs index c9a7e9dc3..61f194f3d 100644 --- a/src/core/mod.rs +++ b/src/core/mod.rs @@ -451,12 +451,11 @@ pub mod peer_tests; use std::net::IpAddr; use std::sync::Arc; -use std::time::Duration; use bittorrent_primitives::info_hash::InfoHash; +use torrent::manager::TorrentsManager; use torrent::repository::in_memory::InMemoryTorrentRepository; use torrent::repository::persisted::DatabasePersistentTorrentRepository; -use torrust_tracker_clock::clock::Time; use torrust_tracker_configuration::{AnnouncePolicy, Core, TORRENT_PEERS_LIMIT}; use torrust_tracker_primitives::core::{AnnounceData, ScrapeData}; use torrust_tracker_primitives::peer; @@ -464,7 +463,6 @@ use torrust_tracker_primitives::swarm_metadata::SwarmMetadata; use torrust_tracker_primitives::torrent_metrics::TorrentsMetrics; use crate::core::databases::Database; -use crate::CurrentClock; /// The domain layer tracker service. /// @@ -491,6 +489,9 @@ pub struct Tracker { /// The persistent torrents repository. db_torrent_repository: Arc, + + /// The service to run torrents tasks. + torrents_manager: Arc, } /// How many peers the peer announcing wants in the announce response. @@ -546,12 +547,20 @@ impl Tracker { database: &Arc>, whitelist_authorization: &Arc, ) -> Result { + let in_memory_torrent_repository = Arc::new(InMemoryTorrentRepository::default()); + let db_torrent_repository = Arc::new(DatabasePersistentTorrentRepository::new(database)); + Ok(Tracker { config: config.clone(), database: database.clone(), whitelist_authorization: whitelist_authorization.clone(), - in_memory_torrent_repository: Arc::new(InMemoryTorrentRepository::default()), - db_torrent_repository: Arc::new(DatabasePersistentTorrentRepository::new(database)), + in_memory_torrent_repository: in_memory_torrent_repository.clone(), + db_torrent_repository: db_torrent_repository.clone(), + torrents_manager: Arc::new(TorrentsManager::new( + config, + &in_memory_torrent_repository, + &db_torrent_repository, + )), }) } @@ -670,11 +679,7 @@ impl Tracker { /// /// Will return a `database::Error` if unable to load the list of `persistent_torrents` from the database. pub fn load_torrents_from_database(&self) -> Result<(), databases::error::Error> { - let persistent_torrents = self.db_torrent_repository.load_all()?; - - self.in_memory_torrent_repository.import_persistent(&persistent_torrents); - - Ok(()) + self.torrents_manager.load_torrents_from_database() } /// # Context: Tracker @@ -748,15 +753,7 @@ impl Tracker { /// /// # Context: Tracker pub fn cleanup_torrents(&self) { - let current_cutoff = CurrentClock::now_sub(&Duration::from_secs(u64::from(self.config.tracker_policy.max_peer_timeout))) - .unwrap_or_default(); - - self.in_memory_torrent_repository.remove_inactive_peers(current_cutoff); - - if self.config.tracker_policy.remove_peerless_torrents { - self.in_memory_torrent_repository - .remove_peerless_torrents(&self.config.tracker_policy); - } + self.torrents_manager.cleanup_torrents(); } /// It drops the database tables. diff --git a/src/core/torrent/manager.rs b/src/core/torrent/manager.rs new file mode 100644 index 000000000..261376755 --- /dev/null +++ b/src/core/torrent/manager.rs @@ -0,0 +1,64 @@ +use std::sync::Arc; +use std::time::Duration; + +use torrust_tracker_clock::clock::Time; +use torrust_tracker_configuration::Core; + +use super::repository::in_memory::InMemoryTorrentRepository; +use super::repository::persisted::DatabasePersistentTorrentRepository; +use crate::core::databases; +use crate::CurrentClock; + +pub struct TorrentsManager { + /// The tracker configuration. + config: Core, + + /// The in-memory torrents repository. + in_memory_torrent_repository: Arc, + + /// The persistent torrents repository. + db_torrent_repository: Arc, +} + +impl TorrentsManager { + #[must_use] + pub fn new( + config: &Core, + in_memory_torrent_repository: &Arc, + db_torrent_repository: &Arc, + ) -> Self { + Self { + config: config.clone(), + in_memory_torrent_repository: in_memory_torrent_repository.clone(), + db_torrent_repository: db_torrent_repository.clone(), + } + } + + /// It loads the torrents from database into memory. It only loads the + /// torrent entry list with the number of seeders for each torrent. Peers + /// data is not persisted. + /// + /// # Errors + /// + /// Will return a `database::Error` if unable to load the list of `persistent_torrents` from the database. + pub fn load_torrents_from_database(&self) -> Result<(), databases::error::Error> { + let persistent_torrents = self.db_torrent_repository.load_all()?; + + self.in_memory_torrent_repository.import_persistent(&persistent_torrents); + + Ok(()) + } + + /// Remove inactive peers and (optionally) peerless torrents. + pub fn cleanup_torrents(&self) { + let current_cutoff = CurrentClock::now_sub(&Duration::from_secs(u64::from(self.config.tracker_policy.max_peer_timeout))) + .unwrap_or_default(); + + self.in_memory_torrent_repository.remove_inactive_peers(current_cutoff); + + if self.config.tracker_policy.remove_peerless_torrents { + self.in_memory_torrent_repository + .remove_peerless_torrents(&self.config.tracker_policy); + } + } +} diff --git a/src/core/torrent/mod.rs b/src/core/torrent/mod.rs index 3e3e065f2..95a5ff1eb 100644 --- a/src/core/torrent/mod.rs +++ b/src/core/torrent/mod.rs @@ -25,6 +25,7 @@ //! - The number of peers that have NOT completed downloading the torrent and are still active, that means they are actively participating in the network. //! Peer that don not have a full copy of the torrent data are called "leechers". //! +pub mod manager; pub mod repository; use torrust_tracker_torrent_repository::TorrentsSkipMapMutexStd; From 4e3dbae05b8e7999f08bf8035575499c300bd496 Mon Sep 17 00:00:00 2001 From: Jose Celano Date: Thu, 23 Jan 2025 17:34:13 +0000 Subject: [PATCH 145/802] refactor: [#1201] inject new extracted sercvies in core tracker --- src/app_test.rs | 23 +++- src/bootstrap/app.rs | 19 ++- src/core/mod.rs | 76 ++++++++--- src/core/services/mod.rs | 15 ++- src/core/services/statistics/mod.rs | 21 +++- src/core/services/torrent.rs | 154 ++++++++++++++++++----- src/servers/http/v1/handlers/announce.rs | 20 ++- src/servers/http/v1/handlers/scrape.rs | 80 ++++++++++-- src/servers/http/v1/services/announce.rs | 43 +++++-- src/servers/http/v1/services/scrape.rs | 45 +++++-- src/servers/udp/handlers.rs | 68 ++++++++-- 11 files changed, 471 insertions(+), 93 deletions(-) diff --git a/src/app_test.rs b/src/app_test.rs index 929a23418..5f189f391 100644 --- a/src/app_test.rs +++ b/src/app_test.rs @@ -9,6 +9,9 @@ use crate::core::authentication::key::repository::persisted::DatabaseKeyReposito use crate::core::authentication::service::{self, AuthenticationService}; use crate::core::databases::Database; use crate::core::services::initialize_database; +use crate::core::torrent::manager::TorrentsManager; +use crate::core::torrent::repository::in_memory::InMemoryTorrentRepository; +use crate::core::torrent::repository::persisted::DatabasePersistentTorrentRepository; use crate::core::whitelist; use crate::core::whitelist::repository::in_memory::InMemoryWhitelist; @@ -22,6 +25,9 @@ pub fn initialize_tracker_dependencies( Arc, Arc, Arc, + Arc, + Arc, + Arc, ) { let database = initialize_database(config); let in_memory_whitelist = Arc::new(InMemoryWhitelist::default()); @@ -36,6 +42,21 @@ pub fn initialize_tracker_dependencies( &db_key_repository.clone(), &in_memory_key_repository.clone(), )); + let in_memory_torrent_repository = Arc::new(InMemoryTorrentRepository::default()); + let db_torrent_repository = Arc::new(DatabasePersistentTorrentRepository::new(&database)); + let torrents_manager = Arc::new(TorrentsManager::new( + &config.core, + &in_memory_torrent_repository, + &db_torrent_repository, + )); - (database, in_memory_whitelist, whitelist_authorization, authentication_service) + ( + database, + in_memory_whitelist, + whitelist_authorization, + authentication_service, + in_memory_torrent_repository, + db_torrent_repository, + torrents_manager, + ) } diff --git a/src/bootstrap/app.rs b/src/bootstrap/app.rs index a0c7887cf..ea7d7f030 100644 --- a/src/bootstrap/app.rs +++ b/src/bootstrap/app.rs @@ -27,6 +27,9 @@ use crate::core::authentication::key::repository::in_memory::InMemoryKeyReposito use crate::core::authentication::key::repository::persisted::DatabaseKeyRepository; use crate::core::authentication::service; use crate::core::services::{initialize_database, initialize_tracker, initialize_whitelist_manager, statistics}; +use crate::core::torrent::manager::TorrentsManager; +use crate::core::torrent::repository::in_memory::InMemoryTorrentRepository; +use crate::core::torrent::repository::persisted::DatabasePersistentTorrentRepository; use crate::core::whitelist; use crate::core::whitelist::repository::in_memory::InMemoryWhitelist; use crate::servers::udp::server::banning::BanService; @@ -103,8 +106,22 @@ pub fn initialize_app_container(configuration: &Configuration) -> AppContainer { &db_key_repository.clone(), &in_memory_key_repository.clone(), )); + let in_memory_torrent_repository = Arc::new(InMemoryTorrentRepository::default()); + let db_torrent_repository = Arc::new(DatabasePersistentTorrentRepository::new(&database)); + let torrents_manager = Arc::new(TorrentsManager::new( + &configuration.core, + &in_memory_torrent_repository, + &db_torrent_repository, + )); - let tracker = Arc::new(initialize_tracker(configuration, &database, &whitelist_authorization)); + let tracker = Arc::new(initialize_tracker( + configuration, + &database, + &whitelist_authorization, + &in_memory_torrent_repository, + &db_torrent_repository, + &torrents_manager, + )); AppContainer { tracker, diff --git a/src/core/mod.rs b/src/core/mod.rs index 61f194f3d..161607857 100644 --- a/src/core/mod.rs +++ b/src/core/mod.rs @@ -546,21 +546,17 @@ impl Tracker { config: &Core, database: &Arc>, whitelist_authorization: &Arc, + in_memory_torrent_repository: &Arc, + db_torrent_repository: &Arc, + torrents_manager: &Arc, ) -> Result { - let in_memory_torrent_repository = Arc::new(InMemoryTorrentRepository::default()); - let db_torrent_repository = Arc::new(DatabasePersistentTorrentRepository::new(database)); - Ok(Tracker { config: config.clone(), database: database.clone(), whitelist_authorization: whitelist_authorization.clone(), in_memory_torrent_repository: in_memory_torrent_repository.clone(), db_torrent_repository: db_torrent_repository.clone(), - torrents_manager: Arc::new(TorrentsManager::new( - config, - &in_memory_torrent_repository, - &db_torrent_repository, - )), + torrents_manager: torrents_manager.clone(), }) } @@ -802,21 +798,49 @@ mod tests { fn public_tracker() -> Tracker { let config = configuration::ephemeral_public(); - let (database, _in_memory_whitelist, whitelist_authorization, _authentication_service) = - initialize_tracker_dependencies(&config); - - initialize_tracker(&config, &database, &whitelist_authorization) + let ( + database, + _in_memory_whitelist, + whitelist_authorization, + _authentication_service, + in_memory_torrent_repository, + db_torrent_repository, + torrents_manager, + ) = initialize_tracker_dependencies(&config); + + initialize_tracker( + &config, + &database, + &whitelist_authorization, + &in_memory_torrent_repository, + &db_torrent_repository, + &torrents_manager, + ) } fn whitelisted_tracker() -> (Tracker, Arc, Arc) { let config = configuration::ephemeral_listed(); - let (database, in_memory_whitelist, whitelist_authorization, _authentication_service) = - initialize_tracker_dependencies(&config); + let ( + database, + in_memory_whitelist, + whitelist_authorization, + _authentication_service, + in_memory_torrent_repository, + db_torrent_repository, + torrents_manager, + ) = initialize_tracker_dependencies(&config); let whitelist_manager = initialize_whitelist_manager(database.clone(), in_memory_whitelist.clone()); - let tracker = initialize_tracker(&config, &database, &whitelist_authorization); + let tracker = initialize_tracker( + &config, + &database, + &whitelist_authorization, + &in_memory_torrent_repository, + &db_torrent_repository, + &torrents_manager, + ); (tracker, whitelist_authorization, whitelist_manager) } @@ -825,10 +849,24 @@ mod tests { let mut config = configuration::ephemeral_listed(); config.core.tracker_policy.persistent_torrent_completed_stat = true; - let (database, _in_memory_whitelist, whitelist_authorization, _authentication_service) = - initialize_tracker_dependencies(&config); - - initialize_tracker(&config, &database, &whitelist_authorization) + let ( + database, + _in_memory_whitelist, + whitelist_authorization, + _authentication_service, + in_memory_torrent_repository, + db_torrent_repository, + torrents_manager, + ) = initialize_tracker_dependencies(&config); + + initialize_tracker( + &config, + &database, + &whitelist_authorization, + &in_memory_torrent_repository, + &db_torrent_repository, + &torrents_manager, + ) } fn sample_info_hash() -> InfoHash { diff --git a/src/core/services/mod.rs b/src/core/services/mod.rs index 611ea24d2..f5d9bd375 100644 --- a/src/core/services/mod.rs +++ b/src/core/services/mod.rs @@ -14,6 +14,9 @@ use torrust_tracker_configuration::v2_0_0::database; use torrust_tracker_configuration::Configuration; use super::databases::{self, Database}; +use super::torrent::manager::TorrentsManager; +use super::torrent::repository::in_memory::InMemoryTorrentRepository; +use super::torrent::repository::persisted::DatabasePersistentTorrentRepository; use super::whitelist; use super::whitelist::manager::WhiteListManager; use super::whitelist::repository::in_memory::InMemoryWhitelist; @@ -30,8 +33,18 @@ pub fn initialize_tracker( config: &Configuration, database: &Arc>, whitelist_authorization: &Arc, + in_memory_torrent_repository: &Arc, + db_torrent_repository: &Arc, + torrents_manager: &Arc, ) -> Tracker { - match Tracker::new(&Arc::new(config).core, database, whitelist_authorization) { + match Tracker::new( + &Arc::new(config).core, + database, + whitelist_authorization, + in_memory_torrent_repository, + db_torrent_repository, + torrents_manager, + ) { Ok(tracker) => tracker, Err(error) => { panic!("{}", error) diff --git a/src/core/services/statistics/mod.rs b/src/core/services/statistics/mod.rs index cc59bcf12..9e4696f48 100644 --- a/src/core/services/statistics/mod.rs +++ b/src/core/services/statistics/mod.rs @@ -132,12 +132,27 @@ mod tests { async fn the_statistics_service_should_return_the_tracker_metrics() { let config = tracker_configuration(); - let (database, _in_memory_whitelist, whitelist_authorization, _authentication_service) = - initialize_tracker_dependencies(&config); + let ( + database, + _in_memory_whitelist, + whitelist_authorization, + _authentication_service, + in_memory_torrent_repository, + db_torrent_repository, + torrents_manager, + ) = initialize_tracker_dependencies(&config); + let (_stats_event_sender, stats_repository) = statistics::setup::factory(config.core.tracker_usage_statistics); let stats_repository = Arc::new(stats_repository); - let tracker = Arc::new(initialize_tracker(&config, &database, &whitelist_authorization)); + let tracker = Arc::new(initialize_tracker( + &config, + &database, + &whitelist_authorization, + &in_memory_torrent_repository, + &db_torrent_repository, + &torrents_manager, + )); let ban_service = Arc::new(RwLock::new(BanService::new(MAX_CONNECTION_ID_ERRORS_PER_IP))); diff --git a/src/core/services/torrent.rs b/src/core/services/torrent.rs index 032b526dd..1e3f67eba 100644 --- a/src/core/services/torrent.rs +++ b/src/core/services/torrent.rs @@ -145,10 +145,24 @@ mod tests { async fn should_return_none_if_the_tracker_does_not_have_the_torrent() { let config = tracker_configuration(); - let (database, _in_memory_whitelist, whitelist_authorization, _authentication_service) = - initialize_tracker_dependencies(&config); - - let tracker = initialize_tracker(&config, &database, &whitelist_authorization); + let ( + database, + _in_memory_whitelist, + whitelist_authorization, + _authentication_service, + in_memory_torrent_repository, + db_torrent_repository, + torrents_manager, + ) = initialize_tracker_dependencies(&config); + + let tracker = initialize_tracker( + &config, + &database, + &whitelist_authorization, + &in_memory_torrent_repository, + &db_torrent_repository, + &torrents_manager, + ); let tracker = Arc::new(tracker); @@ -165,10 +179,24 @@ mod tests { async fn should_return_the_torrent_info_if_the_tracker_has_the_torrent() { let config = tracker_configuration(); - let (database, _in_memory_whitelist, whitelist_authorization, _authentication_service) = - initialize_tracker_dependencies(&config); - - let tracker = Arc::new(initialize_tracker(&config, &database, &whitelist_authorization)); + let ( + database, + _in_memory_whitelist, + whitelist_authorization, + _authentication_service, + in_memory_torrent_repository, + db_torrent_repository, + torrents_manager, + ) = initialize_tracker_dependencies(&config); + + let tracker = Arc::new(initialize_tracker( + &config, + &database, + &whitelist_authorization, + &in_memory_torrent_repository, + &db_torrent_repository, + &torrents_manager, + )); let hash = "9e0217d0fa71c87332cd8bf9dbeabcb2c2cf3c4d".to_owned(); let info_hash = InfoHash::from_str(&hash).unwrap(); @@ -211,10 +239,24 @@ mod tests { async fn should_return_an_empty_result_if_the_tracker_does_not_have_any_torrent() { let config = tracker_configuration(); - let (database, _in_memory_whitelist, whitelist_authorization, _authentication_service) = - initialize_tracker_dependencies(&config); - - let tracker = Arc::new(initialize_tracker(&config, &database, &whitelist_authorization)); + let ( + database, + _in_memory_whitelist, + whitelist_authorization, + _authentication_service, + in_memory_torrent_repository, + db_torrent_repository, + torrents_manager, + ) = initialize_tracker_dependencies(&config); + + let tracker = Arc::new(initialize_tracker( + &config, + &database, + &whitelist_authorization, + &in_memory_torrent_repository, + &db_torrent_repository, + &torrents_manager, + )); let torrents = get_torrents_page(tracker.clone(), Some(&Pagination::default())).await; @@ -225,10 +267,24 @@ mod tests { async fn should_return_a_summarized_info_for_all_torrents() { let config = tracker_configuration(); - let (database, _in_memory_whitelist, whitelist_authorization, _authentication_service) = - initialize_tracker_dependencies(&config); - - let tracker = Arc::new(initialize_tracker(&config, &database, &whitelist_authorization)); + let ( + database, + _in_memory_whitelist, + whitelist_authorization, + _authentication_service, + in_memory_torrent_repository, + db_torrent_repository, + torrents_manager, + ) = initialize_tracker_dependencies(&config); + + let tracker = Arc::new(initialize_tracker( + &config, + &database, + &whitelist_authorization, + &in_memory_torrent_repository, + &db_torrent_repository, + &torrents_manager, + )); let hash = "9e0217d0fa71c87332cd8bf9dbeabcb2c2cf3c4d".to_owned(); let info_hash = InfoHash::from_str(&hash).unwrap(); @@ -252,10 +308,24 @@ mod tests { async fn should_allow_limiting_the_number_of_torrents_in_the_result() { let config = tracker_configuration(); - let (database, _in_memory_whitelist, whitelist_authorization, _authentication_service) = - initialize_tracker_dependencies(&config); - - let tracker = Arc::new(initialize_tracker(&config, &database, &whitelist_authorization)); + let ( + database, + _in_memory_whitelist, + whitelist_authorization, + _authentication_service, + in_memory_torrent_repository, + db_torrent_repository, + torrents_manager, + ) = initialize_tracker_dependencies(&config); + + let tracker = Arc::new(initialize_tracker( + &config, + &database, + &whitelist_authorization, + &in_memory_torrent_repository, + &db_torrent_repository, + &torrents_manager, + )); let hash1 = "9e0217d0fa71c87332cd8bf9dbeabcb2c2cf3c4d".to_owned(); let info_hash1 = InfoHash::from_str(&hash1).unwrap(); @@ -277,10 +347,24 @@ mod tests { async fn should_allow_using_pagination_in_the_result() { let config = tracker_configuration(); - let (database, _in_memory_whitelist, whitelist_authorization, _authentication_service) = - initialize_tracker_dependencies(&config); - - let tracker = Arc::new(initialize_tracker(&config, &database, &whitelist_authorization)); + let ( + database, + _in_memory_whitelist, + whitelist_authorization, + _authentication_service, + in_memory_torrent_repository, + db_torrent_repository, + torrents_manager, + ) = initialize_tracker_dependencies(&config); + + let tracker = Arc::new(initialize_tracker( + &config, + &database, + &whitelist_authorization, + &in_memory_torrent_repository, + &db_torrent_repository, + &torrents_manager, + )); let hash1 = "9e0217d0fa71c87332cd8bf9dbeabcb2c2cf3c4d".to_owned(); let info_hash1 = InfoHash::from_str(&hash1).unwrap(); @@ -311,10 +395,24 @@ mod tests { async fn should_return_torrents_ordered_by_info_hash() { let config = tracker_configuration(); - let (database, _in_memory_whitelist, whitelist_authorization, _authentication_service) = - initialize_tracker_dependencies(&config); - - let tracker = Arc::new(initialize_tracker(&config, &database, &whitelist_authorization)); + let ( + database, + _in_memory_whitelist, + whitelist_authorization, + _authentication_service, + in_memory_torrent_repository, + db_torrent_repository, + torrents_manager, + ) = initialize_tracker_dependencies(&config); + + let tracker = Arc::new(initialize_tracker( + &config, + &database, + &whitelist_authorization, + &in_memory_torrent_repository, + &db_torrent_repository, + &torrents_manager, + )); let hash1 = "9e0217d0fa71c87332cd8bf9dbeabcb2c2cf3c4d".to_owned(); let info_hash1 = InfoHash::from_str(&hash1).unwrap(); diff --git a/src/servers/http/v1/handlers/announce.rs b/src/servers/http/v1/handlers/announce.rs index c42981d4c..b18671422 100644 --- a/src/servers/http/v1/handlers/announce.rs +++ b/src/servers/http/v1/handlers/announce.rs @@ -274,12 +274,26 @@ mod tests { /// Initialize tracker's dependencies and tracker. fn initialize_tracker_and_deps(config: &Configuration) -> TrackerAndDeps { - let (database, _in_memory_whitelist, whitelist_authorization, authentication_service) = - initialize_tracker_dependencies(config); + let ( + database, + _in_memory_whitelist, + whitelist_authorization, + authentication_service, + in_memory_torrent_repository, + db_torrent_repository, + torrents_manager, + ) = initialize_tracker_dependencies(config); let (stats_event_sender, _stats_repository) = statistics::setup::factory(config.core.tracker_usage_statistics); let stats_event_sender = Arc::new(stats_event_sender); - let tracker = Arc::new(initialize_tracker(config, &database, &whitelist_authorization)); + let tracker = Arc::new(initialize_tracker( + config, + &database, + &whitelist_authorization, + &in_memory_torrent_repository, + &db_torrent_repository, + &torrents_manager, + )); (tracker, stats_event_sender, whitelist_authorization, authentication_service) } diff --git a/src/servers/http/v1/handlers/scrape.rs b/src/servers/http/v1/handlers/scrape.rs index de4610a61..e619ba120 100644 --- a/src/servers/http/v1/handlers/scrape.rs +++ b/src/servers/http/v1/handlers/scrape.rs @@ -151,13 +151,27 @@ mod tests { ) { let config = configuration::ephemeral_private(); - let (database, _in_memory_whitelist, whitelist_authorization, authentication_service) = - initialize_tracker_dependencies(&config); + let ( + database, + _in_memory_whitelist, + whitelist_authorization, + authentication_service, + in_memory_torrent_repository, + db_torrent_repository, + torrents_manager, + ) = initialize_tracker_dependencies(&config); let (stats_event_sender, _stats_repository) = statistics::setup::factory(config.core.tracker_usage_statistics); ( - initialize_tracker(&config, &database, &whitelist_authorization), + initialize_tracker( + &config, + &database, + &whitelist_authorization, + &in_memory_torrent_repository, + &db_torrent_repository, + &torrents_manager, + ), stats_event_sender, authentication_service, ) @@ -170,13 +184,27 @@ mod tests { ) { let config = configuration::ephemeral_listed(); - let (database, _in_memory_whitelist, whitelist_authorization, authentication_service) = - initialize_tracker_dependencies(&config); + let ( + database, + _in_memory_whitelist, + whitelist_authorization, + authentication_service, + in_memory_torrent_repository, + db_torrent_repository, + torrents_manager, + ) = initialize_tracker_dependencies(&config); let (stats_event_sender, _stats_repository) = statistics::setup::factory(config.core.tracker_usage_statistics); ( - initialize_tracker(&config, &database, &whitelist_authorization), + initialize_tracker( + &config, + &database, + &whitelist_authorization, + &in_memory_torrent_repository, + &db_torrent_repository, + &torrents_manager, + ), stats_event_sender, authentication_service, ) @@ -189,13 +217,27 @@ mod tests { ) { let config = configuration::ephemeral_with_reverse_proxy(); - let (database, _in_memory_whitelist, whitelist_authorization, authentication_service) = - initialize_tracker_dependencies(&config); + let ( + database, + _in_memory_whitelist, + whitelist_authorization, + authentication_service, + in_memory_torrent_repository, + db_torrent_repository, + torrents_manager, + ) = initialize_tracker_dependencies(&config); let (stats_event_sender, _stats_repository) = statistics::setup::factory(config.core.tracker_usage_statistics); ( - initialize_tracker(&config, &database, &whitelist_authorization), + initialize_tracker( + &config, + &database, + &whitelist_authorization, + &in_memory_torrent_repository, + &db_torrent_repository, + &torrents_manager, + ), stats_event_sender, authentication_service, ) @@ -208,13 +250,27 @@ mod tests { ) { let config = configuration::ephemeral_without_reverse_proxy(); - let (database, _in_memory_whitelist, whitelist_authorization, authentication_service) = - initialize_tracker_dependencies(&config); + let ( + database, + _in_memory_whitelist, + whitelist_authorization, + authentication_service, + in_memory_torrent_repository, + db_torrent_repository, + torrents_manager, + ) = initialize_tracker_dependencies(&config); let (stats_event_sender, _stats_repository) = statistics::setup::factory(config.core.tracker_usage_statistics); ( - initialize_tracker(&config, &database, &whitelist_authorization), + initialize_tracker( + &config, + &database, + &whitelist_authorization, + &in_memory_torrent_repository, + &db_torrent_repository, + &torrents_manager, + ), stats_event_sender, authentication_service, ) diff --git a/src/servers/http/v1/services/announce.rs b/src/servers/http/v1/services/announce.rs index 018348d7e..99724f728 100644 --- a/src/servers/http/v1/services/announce.rs +++ b/src/servers/http/v1/services/announce.rs @@ -73,12 +73,26 @@ mod tests { fn public_tracker() -> (Tracker, Arc>>) { let config = configuration::ephemeral_public(); - let (database, _in_memory_whitelist, whitelist_authorization, _authentication_service) = - initialize_tracker_dependencies(&config); + let ( + database, + _in_memory_whitelist, + whitelist_authorization, + _authentication_service, + in_memory_torrent_repository, + db_torrent_repository, + torrents_manager, + ) = initialize_tracker_dependencies(&config); let (stats_event_sender, _stats_repository) = statistics::setup::factory(config.core.tracker_usage_statistics); let stats_event_sender = Arc::new(stats_event_sender); - let tracker = initialize_tracker(&config, &database, &whitelist_authorization); + let tracker = initialize_tracker( + &config, + &database, + &whitelist_authorization, + &in_memory_torrent_repository, + &db_torrent_repository, + &torrents_manager, + ); (tracker, stats_event_sender) } @@ -132,10 +146,25 @@ mod tests { fn test_tracker_factory() -> Tracker { let config = configuration::ephemeral(); - let (database, _in_memory_whitelist, whitelist_authorization, _authentication_service) = - initialize_tracker_dependencies(&config); - - Tracker::new(&config.core, &database, &whitelist_authorization).unwrap() + let ( + database, + _in_memory_whitelist, + whitelist_authorization, + _authentication_service, + in_memory_torrent_repository, + db_torrent_repository, + torrents_manager, + ) = initialize_tracker_dependencies(&config); + + Tracker::new( + &config.core, + &database, + &whitelist_authorization, + &in_memory_torrent_repository, + &db_torrent_repository, + &torrents_manager, + ) + .unwrap() } #[tokio::test] diff --git a/src/servers/http/v1/services/scrape.rs b/src/servers/http/v1/services/scrape.rs index 9ad741234..c9e657d11 100644 --- a/src/servers/http/v1/services/scrape.rs +++ b/src/servers/http/v1/services/scrape.rs @@ -87,10 +87,24 @@ mod tests { fn public_tracker() -> Tracker { let config = configuration::ephemeral_public(); - let (database, _in_memory_whitelist, whitelist_authorization, _authentication_service) = - initialize_tracker_dependencies(&config); - - initialize_tracker(&config, &database, &whitelist_authorization) + let ( + database, + _in_memory_whitelist, + whitelist_authorization, + _authentication_service, + in_memory_torrent_repository, + db_torrent_repository, + torrents_manager, + ) = initialize_tracker_dependencies(&config); + + initialize_tracker( + &config, + &database, + &whitelist_authorization, + &in_memory_torrent_repository, + &db_torrent_repository, + &torrents_manager, + ) } fn sample_info_hashes() -> Vec { @@ -116,10 +130,25 @@ mod tests { fn test_tracker_factory() -> Tracker { let config = configuration::ephemeral(); - let (database, _in_memory_whitelist, whitelist_authorization, _authentication_service) = - initialize_tracker_dependencies(&config); - - Tracker::new(&config.core, &database, &whitelist_authorization).unwrap() + let ( + database, + _in_memory_whitelist, + whitelist_authorization, + _authentication_service, + in_memory_torrent_repository, + db_torrent_repository, + torrents_manager, + ) = initialize_tracker_dependencies(&config); + + Tracker::new( + &config.core, + &database, + &whitelist_authorization, + &in_memory_torrent_repository, + &db_torrent_repository, + &torrents_manager, + ) + .unwrap() } mod with_real_data { diff --git a/src/servers/udp/handlers.rs b/src/servers/udp/handlers.rs index feeca4e40..840b789a1 100644 --- a/src/servers/udp/handlers.rs +++ b/src/servers/udp/handlers.rs @@ -516,13 +516,27 @@ mod tests { } fn initialize_tracker_and_deps(config: &Configuration) -> TrackerAndDeps { - let (database, in_memory_whitelist, whitelist_authorization, _authentication_service) = - initialize_tracker_dependencies(config); + let ( + database, + in_memory_whitelist, + whitelist_authorization, + _authentication_service, + in_memory_torrent_repository, + db_torrent_repository, + torrents_manager, + ) = initialize_tracker_dependencies(config); let (stats_event_sender, _stats_repository) = statistics::setup::factory(config.core.tracker_usage_statistics); let stats_event_sender = Arc::new(stats_event_sender); let whitelist_manager = initialize_whitelist_manager(database.clone(), in_memory_whitelist.clone()); - let tracker = Arc::new(initialize_tracker(config, &database, &whitelist_authorization)); + let tracker = Arc::new(initialize_tracker( + config, + &database, + &whitelist_authorization, + &in_memory_torrent_repository, + &db_torrent_repository, + &torrents_manager, + )); ( tracker, @@ -630,10 +644,27 @@ mod tests { fn test_tracker_factory() -> (Arc, Arc) { let config = tracker_configuration(); - let (database, _in_memory_whitelist, whitelist_authorization, _authentication_service) = - initialize_tracker_dependencies(&config); - - let tracker = Arc::new(Tracker::new(&config.core, &database, &whitelist_authorization).unwrap()); + let ( + database, + _in_memory_whitelist, + whitelist_authorization, + _authentication_service, + in_memory_torrent_repository, + db_torrent_repository, + torrents_manager, + ) = initialize_tracker_dependencies(&config); + + let tracker = Arc::new( + Tracker::new( + &config.core, + &database, + &whitelist_authorization, + &in_memory_torrent_repository, + &db_torrent_repository, + &torrents_manager, + ) + .unwrap(), + ); (tracker, whitelist_authorization) } @@ -1378,8 +1409,15 @@ mod tests { async fn the_peer_ip_should_be_changed_to_the_external_ip_in_the_tracker_configuration() { let config = Arc::new(TrackerConfigurationBuilder::default().with_external_ip("::126.0.0.1").into()); - let (database, _in_memory_whitelist, whitelist_authorization, _authentication_service) = - initialize_tracker_dependencies(&config); + let ( + database, + _in_memory_whitelist, + whitelist_authorization, + _authentication_service, + in_memory_torrent_repository, + db_torrent_repository, + torrents_manager, + ) = initialize_tracker_dependencies(&config); let mut stats_event_sender_mock = statistics::event::sender::MockSender::new(); stats_event_sender_mock @@ -1390,7 +1428,17 @@ mod tests { let stats_event_sender: Arc>> = Arc::new(Some(Box::new(stats_event_sender_mock))); - let tracker = Arc::new(core::Tracker::new(&config.core, &database, &whitelist_authorization).unwrap()); + let tracker = Arc::new( + core::Tracker::new( + &config.core, + &database, + &whitelist_authorization, + &in_memory_torrent_repository, + &db_torrent_repository, + &torrents_manager, + ) + .unwrap(), + ); let loopback_ipv4 = Ipv4Addr::new(127, 0, 0, 1); let loopback_ipv6 = Ipv6Addr::new(0, 0, 0, 0, 0, 0, 0, 1); From 6fb632ee841dc2a10421a20ba935585c17099d85 Mon Sep 17 00:00:00 2001 From: Jose Celano Date: Thu, 23 Jan 2025 17:48:17 +0000 Subject: [PATCH 146/802] refactor: [#1201] remove duplicate code --- src/core/services/torrent.rs | 148 ++++++----------------- src/servers/http/v1/handlers/announce.rs | 1 + src/servers/udp/handlers.rs | 1 + 3 files changed, 37 insertions(+), 113 deletions(-) diff --git a/src/core/services/torrent.rs b/src/core/services/torrent.rs index 1e3f67eba..f8da88d6f 100644 --- a/src/core/services/torrent.rs +++ b/src/core/services/torrent.rs @@ -107,10 +107,37 @@ pub async fn get_torrents(tracker: Arc, info_hashes: &[InfoHash]) -> Ve #[cfg(test)] mod tests { use std::net::{IpAddr, Ipv4Addr, SocketAddr}; + use std::sync::Arc; use aquatic_udp_protocol::{AnnounceEvent, NumberOfBytes, PeerId}; + use torrust_tracker_configuration::Configuration; use torrust_tracker_primitives::{peer, DurationSinceUnixEpoch}; + use crate::app_test::initialize_tracker_dependencies; + use crate::core::services::initialize_tracker; + use crate::core::Tracker; + + fn initialize_tracker_and_deps(config: &Configuration) -> Arc { + let ( + database, + _in_memory_whitelist, + whitelist_authorization, + _authentication_service, + in_memory_torrent_repository, + db_torrent_repository, + torrents_manager, + ) = initialize_tracker_dependencies(config); + + Arc::new(initialize_tracker( + config, + &database, + &whitelist_authorization, + &in_memory_torrent_repository, + &db_torrent_repository, + &torrents_manager, + )) + } + fn sample_peer() -> peer::Peer { peer::Peer { peer_id: PeerId(*b"-qB00000000000000000"), @@ -134,7 +161,7 @@ mod tests { use crate::app_test::initialize_tracker_dependencies; use crate::core::services::initialize_tracker; - use crate::core::services::torrent::tests::sample_peer; + use crate::core::services::torrent::tests::{initialize_tracker_and_deps, sample_peer}; use crate::core::services::torrent::{get_torrent_info, Info}; pub fn tracker_configuration() -> Configuration { @@ -179,24 +206,7 @@ mod tests { async fn should_return_the_torrent_info_if_the_tracker_has_the_torrent() { let config = tracker_configuration(); - let ( - database, - _in_memory_whitelist, - whitelist_authorization, - _authentication_service, - in_memory_torrent_repository, - db_torrent_repository, - torrents_manager, - ) = initialize_tracker_dependencies(&config); - - let tracker = Arc::new(initialize_tracker( - &config, - &database, - &whitelist_authorization, - &in_memory_torrent_repository, - &db_torrent_repository, - &torrents_manager, - )); + let tracker = initialize_tracker_and_deps(&config); let hash = "9e0217d0fa71c87332cd8bf9dbeabcb2c2cf3c4d".to_owned(); let info_hash = InfoHash::from_str(&hash).unwrap(); @@ -220,15 +230,12 @@ mod tests { mod searching_for_torrents { use std::str::FromStr; - use std::sync::Arc; use bittorrent_primitives::info_hash::InfoHash; use torrust_tracker_configuration::Configuration; use torrust_tracker_test_helpers::configuration; - use crate::app_test::initialize_tracker_dependencies; - use crate::core::services::initialize_tracker; - use crate::core::services::torrent::tests::sample_peer; + use crate::core::services::torrent::tests::{initialize_tracker_and_deps, sample_peer}; use crate::core::services::torrent::{get_torrents_page, BasicInfo, Pagination}; pub fn tracker_configuration() -> Configuration { @@ -239,24 +246,7 @@ mod tests { async fn should_return_an_empty_result_if_the_tracker_does_not_have_any_torrent() { let config = tracker_configuration(); - let ( - database, - _in_memory_whitelist, - whitelist_authorization, - _authentication_service, - in_memory_torrent_repository, - db_torrent_repository, - torrents_manager, - ) = initialize_tracker_dependencies(&config); - - let tracker = Arc::new(initialize_tracker( - &config, - &database, - &whitelist_authorization, - &in_memory_torrent_repository, - &db_torrent_repository, - &torrents_manager, - )); + let tracker = initialize_tracker_and_deps(&config); let torrents = get_torrents_page(tracker.clone(), Some(&Pagination::default())).await; @@ -267,24 +257,7 @@ mod tests { async fn should_return_a_summarized_info_for_all_torrents() { let config = tracker_configuration(); - let ( - database, - _in_memory_whitelist, - whitelist_authorization, - _authentication_service, - in_memory_torrent_repository, - db_torrent_repository, - torrents_manager, - ) = initialize_tracker_dependencies(&config); - - let tracker = Arc::new(initialize_tracker( - &config, - &database, - &whitelist_authorization, - &in_memory_torrent_repository, - &db_torrent_repository, - &torrents_manager, - )); + let tracker = initialize_tracker_and_deps(&config); let hash = "9e0217d0fa71c87332cd8bf9dbeabcb2c2cf3c4d".to_owned(); let info_hash = InfoHash::from_str(&hash).unwrap(); @@ -308,24 +281,7 @@ mod tests { async fn should_allow_limiting_the_number_of_torrents_in_the_result() { let config = tracker_configuration(); - let ( - database, - _in_memory_whitelist, - whitelist_authorization, - _authentication_service, - in_memory_torrent_repository, - db_torrent_repository, - torrents_manager, - ) = initialize_tracker_dependencies(&config); - - let tracker = Arc::new(initialize_tracker( - &config, - &database, - &whitelist_authorization, - &in_memory_torrent_repository, - &db_torrent_repository, - &torrents_manager, - )); + let tracker = initialize_tracker_and_deps(&config); let hash1 = "9e0217d0fa71c87332cd8bf9dbeabcb2c2cf3c4d".to_owned(); let info_hash1 = InfoHash::from_str(&hash1).unwrap(); @@ -347,24 +303,7 @@ mod tests { async fn should_allow_using_pagination_in_the_result() { let config = tracker_configuration(); - let ( - database, - _in_memory_whitelist, - whitelist_authorization, - _authentication_service, - in_memory_torrent_repository, - db_torrent_repository, - torrents_manager, - ) = initialize_tracker_dependencies(&config); - - let tracker = Arc::new(initialize_tracker( - &config, - &database, - &whitelist_authorization, - &in_memory_torrent_repository, - &db_torrent_repository, - &torrents_manager, - )); + let tracker = initialize_tracker_and_deps(&config); let hash1 = "9e0217d0fa71c87332cd8bf9dbeabcb2c2cf3c4d".to_owned(); let info_hash1 = InfoHash::from_str(&hash1).unwrap(); @@ -395,24 +334,7 @@ mod tests { async fn should_return_torrents_ordered_by_info_hash() { let config = tracker_configuration(); - let ( - database, - _in_memory_whitelist, - whitelist_authorization, - _authentication_service, - in_memory_torrent_repository, - db_torrent_repository, - torrents_manager, - ) = initialize_tracker_dependencies(&config); - - let tracker = Arc::new(initialize_tracker( - &config, - &database, - &whitelist_authorization, - &in_memory_torrent_repository, - &db_torrent_repository, - &torrents_manager, - )); + let tracker = initialize_tracker_and_deps(&config); let hash1 = "9e0217d0fa71c87332cd8bf9dbeabcb2c2cf3c4d".to_owned(); let info_hash1 = InfoHash::from_str(&hash1).unwrap(); diff --git a/src/servers/http/v1/handlers/announce.rs b/src/servers/http/v1/handlers/announce.rs index b18671422..b0b54fa0d 100644 --- a/src/servers/http/v1/handlers/announce.rs +++ b/src/servers/http/v1/handlers/announce.rs @@ -283,6 +283,7 @@ mod tests { db_torrent_repository, torrents_manager, ) = initialize_tracker_dependencies(config); + let (stats_event_sender, _stats_repository) = statistics::setup::factory(config.core.tracker_usage_statistics); let stats_event_sender = Arc::new(stats_event_sender); diff --git a/src/servers/udp/handlers.rs b/src/servers/udp/handlers.rs index 840b789a1..6abbd95c6 100644 --- a/src/servers/udp/handlers.rs +++ b/src/servers/udp/handlers.rs @@ -525,6 +525,7 @@ mod tests { db_torrent_repository, torrents_manager, ) = initialize_tracker_dependencies(config); + let (stats_event_sender, _stats_repository) = statistics::setup::factory(config.core.tracker_usage_statistics); let stats_event_sender = Arc::new(stats_event_sender); let whitelist_manager = initialize_whitelist_manager(database.clone(), in_memory_whitelist.clone()); From a4a2d678bb5a2cd3dfa87f05cf9f748e9efbc1bc Mon Sep 17 00:00:00 2001 From: Jose Celano Date: Thu, 23 Jan 2025 17:54:18 +0000 Subject: [PATCH 147/802] refactor: [#1201] remove pub fn from tracker --- src/core/mod.rs | 14 +------------- 1 file changed, 1 insertion(+), 13 deletions(-) diff --git a/src/core/mod.rs b/src/core/mod.rs index 161607857..a1b496c56 100644 --- a/src/core/mod.rs +++ b/src/core/mod.rs @@ -666,18 +666,6 @@ impl Tracker { self.in_memory_torrent_repository.get_swarm_metadata(info_hash) } - /// It loads the torrents from database into memory. It only loads the torrent entry list with the number of seeders for each torrent. - /// Peers data is not persisted. - /// - /// # Context: Tracker - /// - /// # Errors - /// - /// Will return a `database::Error` if unable to load the list of `persistent_torrents` from the database. - pub fn load_torrents_from_database(&self) -> Result<(), databases::error::Error> { - self.torrents_manager.load_torrents_from_database() - } - /// # Context: Tracker /// /// Get torrent peers for a given torrent and client. @@ -1543,7 +1531,7 @@ mod tests { // Remove the newly updated torrent from memory let _unused = tracker.in_memory_torrent_repository.remove(&info_hash); - tracker.load_torrents_from_database().unwrap(); + tracker.torrents_manager.load_torrents_from_database().unwrap(); let torrent_entry = tracker .in_memory_torrent_repository From bdc3f22a8c5e67a7a67a2589ced713373d227697 Mon Sep 17 00:00:00 2001 From: Jose Celano Date: Thu, 23 Jan 2025 18:01:09 +0000 Subject: [PATCH 148/802] refactor: [#1201] add database to app container y environments --- src/bootstrap/app.rs | 1 + src/container.rs | 2 ++ tests/servers/api/environment.rs | 5 +++++ tests/servers/http/environment.rs | 5 +++++ tests/servers/udp/environment.rs | 5 +++++ 5 files changed, 18 insertions(+) diff --git a/src/bootstrap/app.rs b/src/bootstrap/app.rs index ea7d7f030..ea5dc41b6 100644 --- a/src/bootstrap/app.rs +++ b/src/bootstrap/app.rs @@ -124,6 +124,7 @@ pub fn initialize_app_container(configuration: &Configuration) -> AppContainer { )); AppContainer { + database, tracker, keys_handler, authentication_service, diff --git a/src/container.rs b/src/container.rs index 14c4b5d7b..d8c95c42b 100644 --- a/src/container.rs +++ b/src/container.rs @@ -4,6 +4,7 @@ use tokio::sync::RwLock; use crate::core::authentication::handler::KeysHandler; use crate::core::authentication::service::AuthenticationService; +use crate::core::databases::Database; use crate::core::statistics::event::sender::Sender; use crate::core::statistics::repository::Repository; use crate::core::whitelist::manager::WhiteListManager; @@ -11,6 +12,7 @@ use crate::core::{whitelist, Tracker}; use crate::servers::udp::server::banning::BanService; pub struct AppContainer { + pub database: Arc>, pub tracker: Arc, pub keys_handler: Arc, pub authentication_service: Arc, diff --git a/tests/servers/api/environment.rs b/tests/servers/api/environment.rs index f014df36f..8967ff830 100644 --- a/tests/servers/api/environment.rs +++ b/tests/servers/api/environment.rs @@ -10,6 +10,7 @@ use torrust_tracker_lib::bootstrap::app::{initialize_app_container, initialize_g use torrust_tracker_lib::bootstrap::jobs::make_rust_tls; use torrust_tracker_lib::core::authentication::handler::KeysHandler; use torrust_tracker_lib::core::authentication::service::AuthenticationService; +use torrust_tracker_lib::core::databases::Database; use torrust_tracker_lib::core::statistics::event::sender::Sender; use torrust_tracker_lib::core::statistics::repository::Repository; use torrust_tracker_lib::core::whitelist::manager::WhiteListManager; @@ -24,6 +25,7 @@ where S: std::fmt::Debug + std::fmt::Display, { pub config: Arc, + pub database: Arc>, pub tracker: Arc, pub keys_handler: Arc, pub authentication_service: Arc, @@ -61,6 +63,7 @@ impl Environment { Self { config, + database: app_container.database.clone(), tracker: app_container.tracker.clone(), keys_handler: app_container.keys_handler.clone(), authentication_service: app_container.authentication_service.clone(), @@ -78,6 +81,7 @@ impl Environment { Environment { config: self.config, + database: self.database.clone(), tracker: self.tracker.clone(), keys_handler: self.keys_handler.clone(), authentication_service: self.authentication_service.clone(), @@ -112,6 +116,7 @@ impl Environment { pub async fn stop(self) -> Environment { Environment { config: self.config, + database: self.database, tracker: self.tracker, keys_handler: self.keys_handler, authentication_service: self.authentication_service, diff --git a/tests/servers/http/environment.rs b/tests/servers/http/environment.rs index 81b6a12e2..80c042a21 100644 --- a/tests/servers/http/environment.rs +++ b/tests/servers/http/environment.rs @@ -7,6 +7,7 @@ use torrust_tracker_lib::bootstrap::app::{initialize_app_container, initialize_g use torrust_tracker_lib::bootstrap::jobs::make_rust_tls; use torrust_tracker_lib::core::authentication::handler::KeysHandler; use torrust_tracker_lib::core::authentication::service::AuthenticationService; +use torrust_tracker_lib::core::databases::Database; use torrust_tracker_lib::core::statistics::event::sender::Sender; use torrust_tracker_lib::core::statistics::repository::Repository; use torrust_tracker_lib::core::whitelist::manager::WhiteListManager; @@ -17,6 +18,7 @@ use torrust_tracker_primitives::peer; pub struct Environment { pub config: Arc, + pub database: Arc>, pub tracker: Arc, pub keys_handler: Arc, pub authentication_service: Arc, @@ -57,6 +59,7 @@ impl Environment { Self { config, + database: app_container.database.clone(), tracker: app_container.tracker.clone(), keys_handler: app_container.keys_handler.clone(), authentication_service: app_container.authentication_service.clone(), @@ -73,6 +76,7 @@ impl Environment { pub async fn start(self) -> Environment { Environment { config: self.config, + database: self.database.clone(), tracker: self.tracker.clone(), keys_handler: self.keys_handler.clone(), authentication_service: self.authentication_service.clone(), @@ -104,6 +108,7 @@ impl Environment { pub async fn stop(self) -> Environment { Environment { config: self.config, + database: self.database, tracker: self.tracker, keys_handler: self.keys_handler, authentication_service: self.authentication_service, diff --git a/tests/servers/udp/environment.rs b/tests/servers/udp/environment.rs index b728509c0..c02e35e6e 100644 --- a/tests/servers/udp/environment.rs +++ b/tests/servers/udp/environment.rs @@ -5,6 +5,7 @@ use bittorrent_primitives::info_hash::InfoHash; use tokio::sync::RwLock; use torrust_tracker_configuration::{Configuration, UdpTracker, DEFAULT_TIMEOUT}; use torrust_tracker_lib::bootstrap::app::{initialize_app_container, initialize_global_services}; +use torrust_tracker_lib::core::databases::Database; use torrust_tracker_lib::core::statistics::event::sender::Sender; use torrust_tracker_lib::core::statistics::repository::Repository; use torrust_tracker_lib::core::{whitelist, Tracker}; @@ -20,6 +21,7 @@ where S: std::fmt::Debug + std::fmt::Display, { pub config: Arc, + pub database: Arc>, pub tracker: Arc, pub whitelist_authorization: Arc, pub stats_event_sender: Arc>>, @@ -57,6 +59,7 @@ impl Environment { Self { config, + database: app_container.database.clone(), tracker: app_container.tracker.clone(), whitelist_authorization: app_container.whitelist_authorization.clone(), stats_event_sender: app_container.stats_event_sender.clone(), @@ -72,6 +75,7 @@ impl Environment { let cookie_lifetime = self.config.cookie_lifetime; Environment { config: self.config, + database: self.database.clone(), tracker: self.tracker.clone(), whitelist_authorization: self.whitelist_authorization.clone(), stats_event_sender: self.stats_event_sender.clone(), @@ -109,6 +113,7 @@ impl Environment { Environment { config: self.config, + database: self.database, tracker: self.tracker, whitelist_authorization: self.whitelist_authorization, stats_event_sender: self.stats_event_sender, From b3fcdb4c156feb610ad719afca90ead8912f29db Mon Sep 17 00:00:00 2001 From: Jose Celano Date: Thu, 23 Jan 2025 18:11:47 +0000 Subject: [PATCH 149/802] refactor: [#1201] remove direct dependency on database from tracker --- src/bootstrap/app.rs | 1 - src/core/mod.rs | 26 ++----------------- src/core/services/mod.rs | 2 -- src/core/services/statistics/mod.rs | 3 +-- src/core/services/torrent.rs | 6 ++--- src/servers/http/v1/handlers/announce.rs | 5 ++-- src/servers/http/v1/handlers/scrape.rs | 12 +++------ src/servers/http/v1/services/announce.rs | 6 ++--- src/servers/http/v1/services/scrape.rs | 6 ++--- src/servers/udp/handlers.rs | 9 +++---- tests/servers/api/mod.rs | 11 +++++--- .../api/v1/contract/context/auth_key.rs | 8 +++--- .../api/v1/contract/context/whitelist.rs | 6 ++--- 13 files changed, 32 insertions(+), 69 deletions(-) diff --git a/src/bootstrap/app.rs b/src/bootstrap/app.rs index ea5dc41b6..2340eb0ac 100644 --- a/src/bootstrap/app.rs +++ b/src/bootstrap/app.rs @@ -116,7 +116,6 @@ pub fn initialize_app_container(configuration: &Configuration) -> AppContainer { let tracker = Arc::new(initialize_tracker( configuration, - &database, &whitelist_authorization, &in_memory_torrent_repository, &db_torrent_repository, diff --git a/src/core/mod.rs b/src/core/mod.rs index a1b496c56..18c99d3b9 100644 --- a/src/core/mod.rs +++ b/src/core/mod.rs @@ -462,8 +462,6 @@ use torrust_tracker_primitives::peer; use torrust_tracker_primitives::swarm_metadata::SwarmMetadata; use torrust_tracker_primitives::torrent_metrics::TorrentsMetrics; -use crate::core::databases::Database; - /// The domain layer tracker service. /// /// Its main responsibility is to handle the `announce` and `scrape` requests. @@ -477,10 +475,6 @@ pub struct Tracker { /// The tracker configuration. config: Core, - /// A database driver implementation: [`Sqlite3`](crate::core::databases::sqlite) - /// or [`MySQL`](crate::core::databases::mysql) - database: Arc>, - /// The service to check is a torrent is whitelisted. pub whitelist_authorization: Arc, @@ -544,7 +538,6 @@ impl Tracker { /// Will return a `databases::error::Error` if unable to connect to database. The `Tracker` is responsible for the persistence. pub fn new( config: &Core, - database: &Arc>, whitelist_authorization: &Arc, in_memory_torrent_repository: &Arc, db_torrent_repository: &Arc, @@ -552,7 +545,6 @@ impl Tracker { ) -> Result { Ok(Tracker { config: config.clone(), - database: database.clone(), whitelist_authorization: whitelist_authorization.clone(), in_memory_torrent_repository: in_memory_torrent_repository.clone(), db_torrent_repository: db_torrent_repository.clone(), @@ -739,17 +731,6 @@ impl Tracker { pub fn cleanup_torrents(&self) { self.torrents_manager.cleanup_torrents(); } - - /// It drops the database tables. - /// - /// # Errors - /// - /// Will return `Err` if unable to drop tables. - pub fn drop_database_tables(&self) -> Result<(), databases::error::Error> { - // todo: this is only used for testing. We have to pass the database - // reference directly to the tests instead of via the tracker. - self.database.drop_database_tables() - } } #[must_use] @@ -787,7 +768,7 @@ mod tests { let config = configuration::ephemeral_public(); let ( - database, + _database, _in_memory_whitelist, whitelist_authorization, _authentication_service, @@ -798,7 +779,6 @@ mod tests { initialize_tracker( &config, - &database, &whitelist_authorization, &in_memory_torrent_repository, &db_torrent_repository, @@ -823,7 +803,6 @@ mod tests { let tracker = initialize_tracker( &config, - &database, &whitelist_authorization, &in_memory_torrent_repository, &db_torrent_repository, @@ -838,7 +817,7 @@ mod tests { config.core.tracker_policy.persistent_torrent_completed_stat = true; let ( - database, + _database, _in_memory_whitelist, whitelist_authorization, _authentication_service, @@ -849,7 +828,6 @@ mod tests { initialize_tracker( &config, - &database, &whitelist_authorization, &in_memory_torrent_repository, &db_torrent_repository, diff --git a/src/core/services/mod.rs b/src/core/services/mod.rs index f5d9bd375..3a684ac8f 100644 --- a/src/core/services/mod.rs +++ b/src/core/services/mod.rs @@ -31,7 +31,6 @@ use crate::core::Tracker; #[must_use] pub fn initialize_tracker( config: &Configuration, - database: &Arc>, whitelist_authorization: &Arc, in_memory_torrent_repository: &Arc, db_torrent_repository: &Arc, @@ -39,7 +38,6 @@ pub fn initialize_tracker( ) -> Tracker { match Tracker::new( &Arc::new(config).core, - database, whitelist_authorization, in_memory_torrent_repository, db_torrent_repository, diff --git a/src/core/services/statistics/mod.rs b/src/core/services/statistics/mod.rs index 9e4696f48..01e49df71 100644 --- a/src/core/services/statistics/mod.rs +++ b/src/core/services/statistics/mod.rs @@ -133,7 +133,7 @@ mod tests { let config = tracker_configuration(); let ( - database, + _database, _in_memory_whitelist, whitelist_authorization, _authentication_service, @@ -147,7 +147,6 @@ mod tests { let tracker = Arc::new(initialize_tracker( &config, - &database, &whitelist_authorization, &in_memory_torrent_repository, &db_torrent_repository, diff --git a/src/core/services/torrent.rs b/src/core/services/torrent.rs index f8da88d6f..dc07405ee 100644 --- a/src/core/services/torrent.rs +++ b/src/core/services/torrent.rs @@ -119,7 +119,7 @@ mod tests { fn initialize_tracker_and_deps(config: &Configuration) -> Arc { let ( - database, + _database, _in_memory_whitelist, whitelist_authorization, _authentication_service, @@ -130,7 +130,6 @@ mod tests { Arc::new(initialize_tracker( config, - &database, &whitelist_authorization, &in_memory_torrent_repository, &db_torrent_repository, @@ -173,7 +172,7 @@ mod tests { let config = tracker_configuration(); let ( - database, + _database, _in_memory_whitelist, whitelist_authorization, _authentication_service, @@ -184,7 +183,6 @@ mod tests { let tracker = initialize_tracker( &config, - &database, &whitelist_authorization, &in_memory_torrent_repository, &db_torrent_repository, diff --git a/src/servers/http/v1/handlers/announce.rs b/src/servers/http/v1/handlers/announce.rs index b0b54fa0d..4088ab73c 100644 --- a/src/servers/http/v1/handlers/announce.rs +++ b/src/servers/http/v1/handlers/announce.rs @@ -275,7 +275,7 @@ mod tests { /// Initialize tracker's dependencies and tracker. fn initialize_tracker_and_deps(config: &Configuration) -> TrackerAndDeps { let ( - database, + _database, _in_memory_whitelist, whitelist_authorization, authentication_service, @@ -283,13 +283,12 @@ mod tests { db_torrent_repository, torrents_manager, ) = initialize_tracker_dependencies(config); - + let (stats_event_sender, _stats_repository) = statistics::setup::factory(config.core.tracker_usage_statistics); let stats_event_sender = Arc::new(stats_event_sender); let tracker = Arc::new(initialize_tracker( config, - &database, &whitelist_authorization, &in_memory_torrent_repository, &db_torrent_repository, diff --git a/src/servers/http/v1/handlers/scrape.rs b/src/servers/http/v1/handlers/scrape.rs index e619ba120..24b1c783d 100644 --- a/src/servers/http/v1/handlers/scrape.rs +++ b/src/servers/http/v1/handlers/scrape.rs @@ -152,7 +152,7 @@ mod tests { let config = configuration::ephemeral_private(); let ( - database, + _database, _in_memory_whitelist, whitelist_authorization, authentication_service, @@ -166,7 +166,6 @@ mod tests { ( initialize_tracker( &config, - &database, &whitelist_authorization, &in_memory_torrent_repository, &db_torrent_repository, @@ -185,7 +184,7 @@ mod tests { let config = configuration::ephemeral_listed(); let ( - database, + _database, _in_memory_whitelist, whitelist_authorization, authentication_service, @@ -199,7 +198,6 @@ mod tests { ( initialize_tracker( &config, - &database, &whitelist_authorization, &in_memory_torrent_repository, &db_torrent_repository, @@ -218,7 +216,7 @@ mod tests { let config = configuration::ephemeral_with_reverse_proxy(); let ( - database, + _database, _in_memory_whitelist, whitelist_authorization, authentication_service, @@ -232,7 +230,6 @@ mod tests { ( initialize_tracker( &config, - &database, &whitelist_authorization, &in_memory_torrent_repository, &db_torrent_repository, @@ -251,7 +248,7 @@ mod tests { let config = configuration::ephemeral_without_reverse_proxy(); let ( - database, + _database, _in_memory_whitelist, whitelist_authorization, authentication_service, @@ -265,7 +262,6 @@ mod tests { ( initialize_tracker( &config, - &database, &whitelist_authorization, &in_memory_torrent_repository, &db_torrent_repository, diff --git a/src/servers/http/v1/services/announce.rs b/src/servers/http/v1/services/announce.rs index 99724f728..56b2dd1e3 100644 --- a/src/servers/http/v1/services/announce.rs +++ b/src/servers/http/v1/services/announce.rs @@ -74,7 +74,7 @@ mod tests { let config = configuration::ephemeral_public(); let ( - database, + _database, _in_memory_whitelist, whitelist_authorization, _authentication_service, @@ -87,7 +87,6 @@ mod tests { let tracker = initialize_tracker( &config, - &database, &whitelist_authorization, &in_memory_torrent_repository, &db_torrent_repository, @@ -147,7 +146,7 @@ mod tests { let config = configuration::ephemeral(); let ( - database, + _database, _in_memory_whitelist, whitelist_authorization, _authentication_service, @@ -158,7 +157,6 @@ mod tests { Tracker::new( &config.core, - &database, &whitelist_authorization, &in_memory_torrent_repository, &db_torrent_repository, diff --git a/src/servers/http/v1/services/scrape.rs b/src/servers/http/v1/services/scrape.rs index c9e657d11..ea4cb2702 100644 --- a/src/servers/http/v1/services/scrape.rs +++ b/src/servers/http/v1/services/scrape.rs @@ -88,7 +88,7 @@ mod tests { let config = configuration::ephemeral_public(); let ( - database, + _database, _in_memory_whitelist, whitelist_authorization, _authentication_service, @@ -99,7 +99,6 @@ mod tests { initialize_tracker( &config, - &database, &whitelist_authorization, &in_memory_torrent_repository, &db_torrent_repository, @@ -131,7 +130,7 @@ mod tests { let config = configuration::ephemeral(); let ( - database, + _database, _in_memory_whitelist, whitelist_authorization, _authentication_service, @@ -142,7 +141,6 @@ mod tests { Tracker::new( &config.core, - &database, &whitelist_authorization, &in_memory_torrent_repository, &db_torrent_repository, diff --git a/src/servers/udp/handlers.rs b/src/servers/udp/handlers.rs index 6abbd95c6..c85efc1fa 100644 --- a/src/servers/udp/handlers.rs +++ b/src/servers/udp/handlers.rs @@ -525,14 +525,13 @@ mod tests { db_torrent_repository, torrents_manager, ) = initialize_tracker_dependencies(config); - + let (stats_event_sender, _stats_repository) = statistics::setup::factory(config.core.tracker_usage_statistics); let stats_event_sender = Arc::new(stats_event_sender); let whitelist_manager = initialize_whitelist_manager(database.clone(), in_memory_whitelist.clone()); let tracker = Arc::new(initialize_tracker( config, - &database, &whitelist_authorization, &in_memory_torrent_repository, &db_torrent_repository, @@ -646,7 +645,7 @@ mod tests { let config = tracker_configuration(); let ( - database, + _database, _in_memory_whitelist, whitelist_authorization, _authentication_service, @@ -658,7 +657,6 @@ mod tests { let tracker = Arc::new( Tracker::new( &config.core, - &database, &whitelist_authorization, &in_memory_torrent_repository, &db_torrent_repository, @@ -1411,7 +1409,7 @@ mod tests { let config = Arc::new(TrackerConfigurationBuilder::default().with_external_ip("::126.0.0.1").into()); let ( - database, + _database, _in_memory_whitelist, whitelist_authorization, _authentication_service, @@ -1432,7 +1430,6 @@ mod tests { let tracker = Arc::new( core::Tracker::new( &config.core, - &database, &whitelist_authorization, &in_memory_torrent_repository, &db_torrent_repository, diff --git a/tests/servers/api/mod.rs b/tests/servers/api/mod.rs index 278fd869d..92bc19a5f 100644 --- a/tests/servers/api/mod.rs +++ b/tests/servers/api/mod.rs @@ -1,6 +1,6 @@ use std::sync::Arc; -use torrust_tracker_lib::core::Tracker; +use torrust_tracker_lib::core::databases::Database; use torrust_tracker_lib::servers::apis::server; pub mod connection_info; @@ -9,12 +9,15 @@ pub mod v1; pub type Started = environment::Environment; -/// It forces a database error by dropping all tables. -/// That makes any query fail. +/// It forces a database error by dropping all tables. That makes all queries +/// fail. +/// /// code-review: +/// /// Alternatively we could: +/// /// - Inject a database mock in the future. /// - Inject directly the database reference passed to the Tracker type. -pub fn force_database_error(tracker: &Arc) { +pub fn force_database_error(tracker: &Arc>) { tracker.drop_database_tables().unwrap(); } diff --git a/tests/servers/api/v1/contract/context/auth_key.rs b/tests/servers/api/v1/contract/context/auth_key.rs index 73860c9c2..3b7d2d6ba 100644 --- a/tests/servers/api/v1/contract/context/auth_key.rs +++ b/tests/servers/api/v1/contract/context/auth_key.rs @@ -126,7 +126,7 @@ async fn should_fail_when_the_auth_key_cannot_be_generated() { let env = Started::new(&configuration::ephemeral().into()).await; - force_database_error(&env.tracker); + force_database_error(&env.database); let request_id = Uuid::new_v4(); @@ -297,7 +297,7 @@ async fn should_fail_when_the_auth_key_cannot_be_deleted() { .await .unwrap(); - force_database_error(&env.tracker); + force_database_error(&env.database); let request_id = Uuid::new_v4(); @@ -403,7 +403,7 @@ async fn should_fail_when_keys_cannot_be_reloaded() { .await .unwrap(); - force_database_error(&env.tracker); + force_database_error(&env.database); let response = Client::new(env.get_connection_info()) .reload_keys(Some(headers_with_request_id(request_id))) @@ -556,7 +556,7 @@ mod deprecated_generate_key_endpoint { let env = Started::new(&configuration::ephemeral().into()).await; - force_database_error(&env.tracker); + force_database_error(&env.database); let request_id = Uuid::new_v4(); let seconds_valid = 60; diff --git a/tests/servers/api/v1/contract/context/whitelist.rs b/tests/servers/api/v1/contract/context/whitelist.rs index aef1db4f1..78850d3bf 100644 --- a/tests/servers/api/v1/contract/context/whitelist.rs +++ b/tests/servers/api/v1/contract/context/whitelist.rs @@ -111,7 +111,7 @@ async fn should_fail_when_the_torrent_cannot_be_whitelisted() { let info_hash = "9e0217d0fa71c87332cd8bf9dbeabcb2c2cf3c4d".to_owned(); - force_database_error(&env.tracker); + force_database_error(&env.database); let request_id = Uuid::new_v4(); @@ -239,7 +239,7 @@ async fn should_fail_when_the_torrent_cannot_be_removed_from_the_whitelist() { let info_hash = InfoHash::from_str(&hash).unwrap(); env.whitelist_manager.add_torrent_to_whitelist(&info_hash).await.unwrap(); - force_database_error(&env.tracker); + force_database_error(&env.database); let request_id = Uuid::new_v4(); @@ -340,7 +340,7 @@ async fn should_fail_when_the_whitelist_cannot_be_reloaded_from_the_database() { let info_hash = InfoHash::from_str(&hash).unwrap(); env.whitelist_manager.add_torrent_to_whitelist(&info_hash).await.unwrap(); - force_database_error(&env.tracker); + force_database_error(&env.database); let request_id = Uuid::new_v4(); From 3a2e8f0d4c3a1d06d77fd83b7f2d065f250e8390 Mon Sep 17 00:00:00 2001 From: Jose Celano Date: Thu, 23 Jan 2025 18:17:37 +0000 Subject: [PATCH 150/802] refactor: [#1201] reorganize methods in tracker Grouping similar methods. --- src/core/mod.rs | 44 ++++++++++++++++++++++---------------------- 1 file changed, 22 insertions(+), 22 deletions(-) diff --git a/src/core/mod.rs b/src/core/mod.rs index 18c99d3b9..d50d2b545 100644 --- a/src/core/mod.rs +++ b/src/core/mod.rs @@ -653,28 +653,6 @@ impl Tracker { scrape_data } - /// It returns the data for a `scrape` response. - fn get_swarm_metadata(&self, info_hash: &InfoHash) -> SwarmMetadata { - self.in_memory_torrent_repository.get_swarm_metadata(info_hash) - } - - /// # Context: Tracker - /// - /// Get torrent peers for a given torrent and client. - /// - /// It filters out the client making the request. - fn get_peers_for(&self, info_hash: &InfoHash, peer: &peer::Peer, limit: usize) -> Vec> { - self.in_memory_torrent_repository.get_peers_for(info_hash, peer, limit) - } - - /// # Context: Tracker - /// - /// Get torrent peers for a given torrent. - #[must_use] - pub fn get_torrent_peers(&self, info_hash: &InfoHash) -> Vec> { - self.in_memory_torrent_repository.get_torrent_peers(info_hash) - } - /// It updates the torrent entry in memory, it also stores in the database /// the torrent info data which is persistent, and finally return the data /// needed for a `announce` request response. @@ -713,6 +691,28 @@ impl Tracker { } } + /// It returns the data for a `scrape` response. + fn get_swarm_metadata(&self, info_hash: &InfoHash) -> SwarmMetadata { + self.in_memory_torrent_repository.get_swarm_metadata(info_hash) + } + + /// # Context: Tracker + /// + /// Get torrent peers for a given torrent and client. + /// + /// It filters out the client making the request. + fn get_peers_for(&self, info_hash: &InfoHash, peer: &peer::Peer, limit: usize) -> Vec> { + self.in_memory_torrent_repository.get_peers_for(info_hash, peer, limit) + } + + /// # Context: Tracker + /// + /// Get torrent peers for a given torrent. + #[must_use] + pub fn get_torrent_peers(&self, info_hash: &InfoHash) -> Vec> { + self.in_memory_torrent_repository.get_torrent_peers(info_hash) + } + /// It calculates and returns the general `Tracker` /// [`TorrentsMetrics`] /// From 612f7293d5a2190e710852dfd51330248e50c61a Mon Sep 17 00:00:00 2001 From: Jose Celano Date: Thu, 23 Jan 2025 18:28:16 +0000 Subject: [PATCH 151/802] refactor: [#1201] remove tracker dependency on TorrentsManager --- src/app.rs | 2 +- src/bootstrap/app.rs | 4 ++- src/bootstrap/jobs/torrent_cleanup.rs | 12 ++++----- src/container.rs | 6 +++++ src/core/mod.rs | 33 +++++++----------------- src/core/services/mod.rs | 3 --- src/core/services/statistics/mod.rs | 3 +-- src/core/services/torrent.rs | 6 ++--- src/servers/http/v1/handlers/announce.rs | 3 +-- src/servers/http/v1/handlers/scrape.rs | 12 +++------ src/servers/http/v1/services/announce.rs | 6 ++--- src/servers/http/v1/services/scrape.rs | 6 ++--- src/servers/udp/handlers.rs | 9 +++---- 13 files changed, 41 insertions(+), 64 deletions(-) diff --git a/src/app.rs b/src/app.rs index e41f227e7..c71237443 100644 --- a/src/app.rs +++ b/src/app.rs @@ -137,7 +137,7 @@ pub async fn start(config: &Configuration, app_container: &AppContainer) -> Vec< // Start runners to remove torrents without peers, every interval if config.core.inactive_peer_cleanup_interval > 0 { - jobs.push(torrent_cleanup::start_job(&config.core, &app_container.tracker)); + jobs.push(torrent_cleanup::start_job(&config.core, &app_container.torrents_manager)); } // Start Health Check API diff --git a/src/bootstrap/app.rs b/src/bootstrap/app.rs index 2340eb0ac..294b2ca73 100644 --- a/src/bootstrap/app.rs +++ b/src/bootstrap/app.rs @@ -119,7 +119,6 @@ pub fn initialize_app_container(configuration: &Configuration) -> AppContainer { &whitelist_authorization, &in_memory_torrent_repository, &db_torrent_repository, - &torrents_manager, )); AppContainer { @@ -132,6 +131,9 @@ pub fn initialize_app_container(configuration: &Configuration) -> AppContainer { stats_event_sender, stats_repository, whitelist_manager, + in_memory_torrent_repository, + db_torrent_repository, + torrents_manager, } } diff --git a/src/bootstrap/jobs/torrent_cleanup.rs b/src/bootstrap/jobs/torrent_cleanup.rs index 6abb4f26b..45e6e9e68 100644 --- a/src/bootstrap/jobs/torrent_cleanup.rs +++ b/src/bootstrap/jobs/torrent_cleanup.rs @@ -17,7 +17,7 @@ use tokio::task::JoinHandle; use torrust_tracker_configuration::Core; use tracing::instrument; -use crate::core; +use crate::core::torrent::manager::TorrentsManager; /// It starts a jobs for cleaning up the torrent data in the tracker. /// @@ -25,9 +25,9 @@ use crate::core; /// /// Refer to [`torrust-tracker-configuration documentation`](https://docs.rs/torrust-tracker-configuration) for more info about that option. #[must_use] -#[instrument(skip(config, tracker))] -pub fn start_job(config: &Core, tracker: &Arc) -> JoinHandle<()> { - let weak_tracker = std::sync::Arc::downgrade(tracker); +#[instrument(skip(config, torrents_manager))] +pub fn start_job(config: &Core, torrents_manager: &Arc) -> JoinHandle<()> { + let weak_torrents_manager = std::sync::Arc::downgrade(torrents_manager); let interval = config.inactive_peer_cleanup_interval; tokio::spawn(async move { @@ -42,10 +42,10 @@ pub fn start_job(config: &Core, tracker: &Arc) -> JoinHandle<()> break; } _ = interval.tick() => { - if let Some(tracker) = weak_tracker.upgrade() { + if let Some(torrents_manager) = weak_torrents_manager.upgrade() { let start_time = Utc::now().time(); tracing::info!("Cleaning up torrents.."); - tracker.cleanup_torrents(); + torrents_manager.cleanup_torrents(); tracing::info!("Cleaned up torrents in: {}ms", (Utc::now().time() - start_time).num_milliseconds()); } else { break; diff --git a/src/container.rs b/src/container.rs index d8c95c42b..8407d0b69 100644 --- a/src/container.rs +++ b/src/container.rs @@ -7,6 +7,9 @@ use crate::core::authentication::service::AuthenticationService; use crate::core::databases::Database; use crate::core::statistics::event::sender::Sender; use crate::core::statistics::repository::Repository; +use crate::core::torrent::manager::TorrentsManager; +use crate::core::torrent::repository::in_memory::InMemoryTorrentRepository; +use crate::core::torrent::repository::persisted::DatabasePersistentTorrentRepository; use crate::core::whitelist::manager::WhiteListManager; use crate::core::{whitelist, Tracker}; use crate::servers::udp::server::banning::BanService; @@ -21,4 +24,7 @@ pub struct AppContainer { pub stats_event_sender: Arc>>, pub stats_repository: Arc, pub whitelist_manager: Arc, + pub in_memory_torrent_repository: Arc, + pub db_torrent_repository: Arc, + pub torrents_manager: Arc, } diff --git a/src/core/mod.rs b/src/core/mod.rs index d50d2b545..f2483b21e 100644 --- a/src/core/mod.rs +++ b/src/core/mod.rs @@ -453,7 +453,6 @@ use std::net::IpAddr; use std::sync::Arc; use bittorrent_primitives::info_hash::InfoHash; -use torrent::manager::TorrentsManager; use torrent::repository::in_memory::InMemoryTorrentRepository; use torrent::repository::persisted::DatabasePersistentTorrentRepository; use torrust_tracker_configuration::{AnnouncePolicy, Core, TORRENT_PEERS_LIMIT}; @@ -483,9 +482,6 @@ pub struct Tracker { /// The persistent torrents repository. db_torrent_repository: Arc, - - /// The service to run torrents tasks. - torrents_manager: Arc, } /// How many peers the peer announcing wants in the announce response. @@ -541,14 +537,12 @@ impl Tracker { whitelist_authorization: &Arc, in_memory_torrent_repository: &Arc, db_torrent_repository: &Arc, - torrents_manager: &Arc, ) -> Result { Ok(Tracker { config: config.clone(), whitelist_authorization: whitelist_authorization.clone(), in_memory_torrent_repository: in_memory_torrent_repository.clone(), db_torrent_repository: db_torrent_repository.clone(), - torrents_manager: torrents_manager.clone(), }) } @@ -724,13 +718,6 @@ impl Tracker { pub fn get_torrents_metrics(&self) -> TorrentsMetrics { self.in_memory_torrent_repository.get_torrents_metrics() } - - /// Remove inactive peers and (optionally) peerless torrents. - /// - /// # Context: Tracker - pub fn cleanup_torrents(&self) { - self.torrents_manager.cleanup_torrents(); - } } #[must_use] @@ -761,6 +748,7 @@ mod tests { use crate::app_test::initialize_tracker_dependencies; use crate::core::peer::Peer; use crate::core::services::{initialize_tracker, initialize_whitelist_manager}; + use crate::core::torrent::manager::TorrentsManager; use crate::core::whitelist::manager::WhiteListManager; use crate::core::{whitelist, TorrentsMetrics, Tracker}; @@ -774,7 +762,7 @@ mod tests { _authentication_service, in_memory_torrent_repository, db_torrent_repository, - torrents_manager, + _torrents_manager, ) = initialize_tracker_dependencies(&config); initialize_tracker( @@ -782,7 +770,6 @@ mod tests { &whitelist_authorization, &in_memory_torrent_repository, &db_torrent_repository, - &torrents_manager, ) } @@ -796,7 +783,7 @@ mod tests { _authentication_service, in_memory_torrent_repository, db_torrent_repository, - torrents_manager, + _torrents_manager, ) = initialize_tracker_dependencies(&config); let whitelist_manager = initialize_whitelist_manager(database.clone(), in_memory_whitelist.clone()); @@ -806,13 +793,12 @@ mod tests { &whitelist_authorization, &in_memory_torrent_repository, &db_torrent_repository, - &torrents_manager, ); (tracker, whitelist_authorization, whitelist_manager) } - pub fn tracker_persisting_torrents_in_database() -> Tracker { + pub fn tracker_persisting_torrents_in_database() -> (Tracker, Arc) { let mut config = configuration::ephemeral_listed(); config.core.tracker_policy.persistent_torrent_completed_stat = true; @@ -826,13 +812,14 @@ mod tests { torrents_manager, ) = initialize_tracker_dependencies(&config); - initialize_tracker( + let tracker = initialize_tracker( &config, &whitelist_authorization, &in_memory_torrent_repository, &db_torrent_repository, - &torrents_manager, - ) + ); + + (tracker, torrents_manager) } fn sample_info_hash() -> InfoHash { @@ -1492,7 +1479,7 @@ mod tests { #[tokio::test] async fn it_should_persist_the_number_of_completed_peers_for_all_torrents_into_the_database() { - let tracker = tracker_persisting_torrents_in_database(); + let (tracker, torrents_manager) = tracker_persisting_torrents_in_database(); let info_hash = sample_info_hash(); @@ -1509,7 +1496,7 @@ mod tests { // Remove the newly updated torrent from memory let _unused = tracker.in_memory_torrent_repository.remove(&info_hash); - tracker.torrents_manager.load_torrents_from_database().unwrap(); + torrents_manager.load_torrents_from_database().unwrap(); let torrent_entry = tracker .in_memory_torrent_repository diff --git a/src/core/services/mod.rs b/src/core/services/mod.rs index 3a684ac8f..a9bca2df7 100644 --- a/src/core/services/mod.rs +++ b/src/core/services/mod.rs @@ -14,7 +14,6 @@ use torrust_tracker_configuration::v2_0_0::database; use torrust_tracker_configuration::Configuration; use super::databases::{self, Database}; -use super::torrent::manager::TorrentsManager; use super::torrent::repository::in_memory::InMemoryTorrentRepository; use super::torrent::repository::persisted::DatabasePersistentTorrentRepository; use super::whitelist; @@ -34,14 +33,12 @@ pub fn initialize_tracker( whitelist_authorization: &Arc, in_memory_torrent_repository: &Arc, db_torrent_repository: &Arc, - torrents_manager: &Arc, ) -> Tracker { match Tracker::new( &Arc::new(config).core, whitelist_authorization, in_memory_torrent_repository, db_torrent_repository, - torrents_manager, ) { Ok(tracker) => tracker, Err(error) => { diff --git a/src/core/services/statistics/mod.rs b/src/core/services/statistics/mod.rs index 01e49df71..7c2233efd 100644 --- a/src/core/services/statistics/mod.rs +++ b/src/core/services/statistics/mod.rs @@ -139,7 +139,7 @@ mod tests { _authentication_service, in_memory_torrent_repository, db_torrent_repository, - torrents_manager, + _torrents_manager, ) = initialize_tracker_dependencies(&config); let (_stats_event_sender, stats_repository) = statistics::setup::factory(config.core.tracker_usage_statistics); @@ -150,7 +150,6 @@ mod tests { &whitelist_authorization, &in_memory_torrent_repository, &db_torrent_repository, - &torrents_manager, )); let ban_service = Arc::new(RwLock::new(BanService::new(MAX_CONNECTION_ID_ERRORS_PER_IP))); diff --git a/src/core/services/torrent.rs b/src/core/services/torrent.rs index dc07405ee..c2ffa05aa 100644 --- a/src/core/services/torrent.rs +++ b/src/core/services/torrent.rs @@ -125,7 +125,7 @@ mod tests { _authentication_service, in_memory_torrent_repository, db_torrent_repository, - torrents_manager, + _torrents_manager, ) = initialize_tracker_dependencies(config); Arc::new(initialize_tracker( @@ -133,7 +133,6 @@ mod tests { &whitelist_authorization, &in_memory_torrent_repository, &db_torrent_repository, - &torrents_manager, )) } @@ -178,7 +177,7 @@ mod tests { _authentication_service, in_memory_torrent_repository, db_torrent_repository, - torrents_manager, + _torrents_manager, ) = initialize_tracker_dependencies(&config); let tracker = initialize_tracker( @@ -186,7 +185,6 @@ mod tests { &whitelist_authorization, &in_memory_torrent_repository, &db_torrent_repository, - &torrents_manager, ); let tracker = Arc::new(tracker); diff --git a/src/servers/http/v1/handlers/announce.rs b/src/servers/http/v1/handlers/announce.rs index 4088ab73c..a9567fb81 100644 --- a/src/servers/http/v1/handlers/announce.rs +++ b/src/servers/http/v1/handlers/announce.rs @@ -281,7 +281,7 @@ mod tests { authentication_service, in_memory_torrent_repository, db_torrent_repository, - torrents_manager, + _torrents_manager, ) = initialize_tracker_dependencies(config); let (stats_event_sender, _stats_repository) = statistics::setup::factory(config.core.tracker_usage_statistics); @@ -292,7 +292,6 @@ mod tests { &whitelist_authorization, &in_memory_torrent_repository, &db_torrent_repository, - &torrents_manager, )); (tracker, stats_event_sender, whitelist_authorization, authentication_service) diff --git a/src/servers/http/v1/handlers/scrape.rs b/src/servers/http/v1/handlers/scrape.rs index 24b1c783d..116d717a1 100644 --- a/src/servers/http/v1/handlers/scrape.rs +++ b/src/servers/http/v1/handlers/scrape.rs @@ -158,7 +158,7 @@ mod tests { authentication_service, in_memory_torrent_repository, db_torrent_repository, - torrents_manager, + _torrents_manager, ) = initialize_tracker_dependencies(&config); let (stats_event_sender, _stats_repository) = statistics::setup::factory(config.core.tracker_usage_statistics); @@ -169,7 +169,6 @@ mod tests { &whitelist_authorization, &in_memory_torrent_repository, &db_torrent_repository, - &torrents_manager, ), stats_event_sender, authentication_service, @@ -190,7 +189,7 @@ mod tests { authentication_service, in_memory_torrent_repository, db_torrent_repository, - torrents_manager, + _torrents_manager, ) = initialize_tracker_dependencies(&config); let (stats_event_sender, _stats_repository) = statistics::setup::factory(config.core.tracker_usage_statistics); @@ -201,7 +200,6 @@ mod tests { &whitelist_authorization, &in_memory_torrent_repository, &db_torrent_repository, - &torrents_manager, ), stats_event_sender, authentication_service, @@ -222,7 +220,7 @@ mod tests { authentication_service, in_memory_torrent_repository, db_torrent_repository, - torrents_manager, + _torrents_manager, ) = initialize_tracker_dependencies(&config); let (stats_event_sender, _stats_repository) = statistics::setup::factory(config.core.tracker_usage_statistics); @@ -233,7 +231,6 @@ mod tests { &whitelist_authorization, &in_memory_torrent_repository, &db_torrent_repository, - &torrents_manager, ), stats_event_sender, authentication_service, @@ -254,7 +251,7 @@ mod tests { authentication_service, in_memory_torrent_repository, db_torrent_repository, - torrents_manager, + _torrents_manager, ) = initialize_tracker_dependencies(&config); let (stats_event_sender, _stats_repository) = statistics::setup::factory(config.core.tracker_usage_statistics); @@ -265,7 +262,6 @@ mod tests { &whitelist_authorization, &in_memory_torrent_repository, &db_torrent_repository, - &torrents_manager, ), stats_event_sender, authentication_service, diff --git a/src/servers/http/v1/services/announce.rs b/src/servers/http/v1/services/announce.rs index 56b2dd1e3..322bc80eb 100644 --- a/src/servers/http/v1/services/announce.rs +++ b/src/servers/http/v1/services/announce.rs @@ -80,7 +80,7 @@ mod tests { _authentication_service, in_memory_torrent_repository, db_torrent_repository, - torrents_manager, + _torrents_manager, ) = initialize_tracker_dependencies(&config); let (stats_event_sender, _stats_repository) = statistics::setup::factory(config.core.tracker_usage_statistics); let stats_event_sender = Arc::new(stats_event_sender); @@ -90,7 +90,6 @@ mod tests { &whitelist_authorization, &in_memory_torrent_repository, &db_torrent_repository, - &torrents_manager, ); (tracker, stats_event_sender) @@ -152,7 +151,7 @@ mod tests { _authentication_service, in_memory_torrent_repository, db_torrent_repository, - torrents_manager, + _torrents_manager, ) = initialize_tracker_dependencies(&config); Tracker::new( @@ -160,7 +159,6 @@ mod tests { &whitelist_authorization, &in_memory_torrent_repository, &db_torrent_repository, - &torrents_manager, ) .unwrap() } diff --git a/src/servers/http/v1/services/scrape.rs b/src/servers/http/v1/services/scrape.rs index ea4cb2702..299938f84 100644 --- a/src/servers/http/v1/services/scrape.rs +++ b/src/servers/http/v1/services/scrape.rs @@ -94,7 +94,7 @@ mod tests { _authentication_service, in_memory_torrent_repository, db_torrent_repository, - torrents_manager, + _torrents_manager, ) = initialize_tracker_dependencies(&config); initialize_tracker( @@ -102,7 +102,6 @@ mod tests { &whitelist_authorization, &in_memory_torrent_repository, &db_torrent_repository, - &torrents_manager, ) } @@ -136,7 +135,7 @@ mod tests { _authentication_service, in_memory_torrent_repository, db_torrent_repository, - torrents_manager, + _torrents_manager, ) = initialize_tracker_dependencies(&config); Tracker::new( @@ -144,7 +143,6 @@ mod tests { &whitelist_authorization, &in_memory_torrent_repository, &db_torrent_repository, - &torrents_manager, ) .unwrap() } diff --git a/src/servers/udp/handlers.rs b/src/servers/udp/handlers.rs index c85efc1fa..5584c167b 100644 --- a/src/servers/udp/handlers.rs +++ b/src/servers/udp/handlers.rs @@ -523,7 +523,7 @@ mod tests { _authentication_service, in_memory_torrent_repository, db_torrent_repository, - torrents_manager, + _torrents_manager, ) = initialize_tracker_dependencies(config); let (stats_event_sender, _stats_repository) = statistics::setup::factory(config.core.tracker_usage_statistics); @@ -535,7 +535,6 @@ mod tests { &whitelist_authorization, &in_memory_torrent_repository, &db_torrent_repository, - &torrents_manager, )); ( @@ -651,7 +650,7 @@ mod tests { _authentication_service, in_memory_torrent_repository, db_torrent_repository, - torrents_manager, + _torrents_manager, ) = initialize_tracker_dependencies(&config); let tracker = Arc::new( @@ -660,7 +659,6 @@ mod tests { &whitelist_authorization, &in_memory_torrent_repository, &db_torrent_repository, - &torrents_manager, ) .unwrap(), ); @@ -1415,7 +1413,7 @@ mod tests { _authentication_service, in_memory_torrent_repository, db_torrent_repository, - torrents_manager, + _torrents_manager, ) = initialize_tracker_dependencies(&config); let mut stats_event_sender_mock = statistics::event::sender::MockSender::new(); @@ -1433,7 +1431,6 @@ mod tests { &whitelist_authorization, &in_memory_torrent_repository, &db_torrent_repository, - &torrents_manager, ) .unwrap(), ); From 94673d677c59d75b41c4846b5014f98f073e8bbd Mon Sep 17 00:00:00 2001 From: Jose Celano Date: Fri, 24 Jan 2025 08:34:07 +0000 Subject: [PATCH 152/802] refactor: [#1203] inline methods in core tracker For the InMemoryTorrentRepository. --- src/core/mod.rs | 64 ++++++++--------------------- src/core/services/statistics/mod.rs | 2 +- src/servers/udp/handlers.rs | 12 +++--- tests/servers/http/v1/contract.rs | 8 ++-- 4 files changed, 29 insertions(+), 57 deletions(-) diff --git a/src/core/mod.rs b/src/core/mod.rs index f2483b21e..d00db88a8 100644 --- a/src/core/mod.rs +++ b/src/core/mod.rs @@ -459,7 +459,6 @@ use torrust_tracker_configuration::{AnnouncePolicy, Core, TORRENT_PEERS_LIMIT}; use torrust_tracker_primitives::core::{AnnounceData, ScrapeData}; use torrust_tracker_primitives::peer; use torrust_tracker_primitives::swarm_metadata::SwarmMetadata; -use torrust_tracker_primitives::torrent_metrics::TorrentsMetrics; /// The domain layer tracker service. /// @@ -478,7 +477,7 @@ pub struct Tracker { pub whitelist_authorization: Arc, /// The in-memory torrents repository. - in_memory_torrent_repository: Arc, + pub in_memory_torrent_repository: Arc, /// The persistent torrents repository. db_torrent_repository: Arc, @@ -619,7 +618,9 @@ impl Tracker { let stats = self.upsert_peer_and_get_stats(info_hash, peer); - let peers = self.get_peers_for(info_hash, peer, peers_wanted.limit()); + let peers = self + .in_memory_torrent_repository + .get_peers_for(info_hash, peer, peers_wanted.limit()); AnnounceData { peers, @@ -638,7 +639,7 @@ impl Tracker { for info_hash in info_hashes { let swarm_metadata = match self.whitelist_authorization.authorize(info_hash).await { - Ok(()) => self.get_swarm_metadata(info_hash), + Ok(()) => self.in_memory_torrent_repository.get_swarm_metadata(info_hash), Err(_) => SwarmMetadata::zeroed(), }; scrape_data.add_file(info_hash, swarm_metadata); @@ -684,40 +685,6 @@ impl Tracker { drop(self.db_torrent_repository.save(&info_hash, completed)); } } - - /// It returns the data for a `scrape` response. - fn get_swarm_metadata(&self, info_hash: &InfoHash) -> SwarmMetadata { - self.in_memory_torrent_repository.get_swarm_metadata(info_hash) - } - - /// # Context: Tracker - /// - /// Get torrent peers for a given torrent and client. - /// - /// It filters out the client making the request. - fn get_peers_for(&self, info_hash: &InfoHash, peer: &peer::Peer, limit: usize) -> Vec> { - self.in_memory_torrent_repository.get_peers_for(info_hash, peer, limit) - } - - /// # Context: Tracker - /// - /// Get torrent peers for a given torrent. - #[must_use] - pub fn get_torrent_peers(&self, info_hash: &InfoHash) -> Vec> { - self.in_memory_torrent_repository.get_torrent_peers(info_hash) - } - - /// It calculates and returns the general `Tracker` - /// [`TorrentsMetrics`] - /// - /// # Context: Tracker - /// - /// # Panics - /// Panics if unable to get the torrent metrics. - #[must_use] - pub fn get_torrents_metrics(&self) -> TorrentsMetrics { - self.in_memory_torrent_repository.get_torrents_metrics() - } } #[must_use] @@ -742,6 +709,7 @@ mod tests { use bittorrent_primitives::info_hash::fixture::gen_seeded_infohash; use bittorrent_primitives::info_hash::InfoHash; use torrust_tracker_configuration::TORRENT_PEERS_LIMIT; + use torrust_tracker_primitives::torrent_metrics::TorrentsMetrics; use torrust_tracker_primitives::DurationSinceUnixEpoch; use torrust_tracker_test_helpers::configuration; @@ -750,7 +718,7 @@ mod tests { use crate::core::services::{initialize_tracker, initialize_whitelist_manager}; use crate::core::torrent::manager::TorrentsManager; use crate::core::whitelist::manager::WhiteListManager; - use crate::core::{whitelist, TorrentsMetrics, Tracker}; + use crate::core::{whitelist, Tracker}; fn public_tracker() -> Tracker { let config = configuration::ephemeral_public(); @@ -910,7 +878,7 @@ mod tests { async fn should_collect_torrent_metrics() { let tracker = public_tracker(); - let torrents_metrics = tracker.get_torrents_metrics(); + let torrents_metrics = tracker.in_memory_torrent_repository.get_torrents_metrics(); assert_eq!( torrents_metrics, @@ -932,7 +900,7 @@ mod tests { let _ = tracker.upsert_peer_and_get_stats(&info_hash, &peer); - let peers = tracker.get_torrent_peers(&info_hash); + let peers = tracker.in_memory_torrent_repository.get_torrent_peers(&info_hash); assert_eq!(peers, vec![Arc::new(peer)]); } @@ -975,7 +943,7 @@ mod tests { let _ = tracker.upsert_peer_and_get_stats(&info_hash, &peer); } - let peers = tracker.get_torrent_peers(&info_hash); + let peers = tracker.in_memory_torrent_repository.get_torrent_peers(&info_hash); assert_eq!(peers.len(), 74); } @@ -989,7 +957,9 @@ mod tests { let _ = tracker.upsert_peer_and_get_stats(&info_hash, &peer); - let peers = tracker.get_peers_for(&info_hash, &peer, TORRENT_PEERS_LIMIT); + let peers = tracker + .in_memory_torrent_repository + .get_peers_for(&info_hash, &peer, TORRENT_PEERS_LIMIT); assert_eq!(peers, vec![]); } @@ -1019,7 +989,9 @@ mod tests { let _ = tracker.upsert_peer_and_get_stats(&info_hash, &peer); } - let peers = tracker.get_peers_for(&info_hash, &excluded_peer, TORRENT_PEERS_LIMIT); + let peers = tracker + .in_memory_torrent_repository + .get_peers_for(&info_hash, &excluded_peer, TORRENT_PEERS_LIMIT); assert_eq!(peers.len(), 74); } @@ -1030,7 +1002,7 @@ mod tests { let _ = tracker.upsert_peer_and_get_stats(&sample_info_hash(), &leecher()); - let torrent_metrics = tracker.get_torrents_metrics(); + let torrent_metrics = tracker.in_memory_torrent_repository.get_torrents_metrics(); assert_eq!( torrent_metrics, @@ -1054,7 +1026,7 @@ mod tests { let result_a = start_time.elapsed(); let start_time = std::time::Instant::now(); - let torrent_metrics = tracker.get_torrents_metrics(); + let torrent_metrics = tracker.in_memory_torrent_repository.get_torrents_metrics(); let result_b = start_time.elapsed(); assert_eq!( diff --git a/src/core/services/statistics/mod.rs b/src/core/services/statistics/mod.rs index 7c2233efd..fefe17933 100644 --- a/src/core/services/statistics/mod.rs +++ b/src/core/services/statistics/mod.rs @@ -68,7 +68,7 @@ pub async fn get_metrics( ban_service: Arc>, stats_repository: Arc, ) -> TrackerMetrics { - let torrents_metrics = tracker.get_torrents_metrics(); + let torrents_metrics = tracker.in_memory_torrent_repository.get_torrents_metrics(); let stats = stats_repository.get_stats().await; let udp_banned_ips_total = ban_service.read().await.get_banned_ips_total(); diff --git a/src/servers/udp/handlers.rs b/src/servers/udp/handlers.rs index 5584c167b..fd2a37683 100644 --- a/src/servers/udp/handlers.rs +++ b/src/servers/udp/handlers.rs @@ -916,7 +916,7 @@ mod tests { .await .unwrap(); - let peers = tracker.get_torrent_peers(&info_hash.0.into()); + let peers = tracker.in_memory_torrent_repository.get_torrent_peers(&info_hash.0.into()); let expected_peer = TorrentPeerBuilder::new() .with_peer_id(peer_id) @@ -1001,7 +1001,7 @@ mod tests { .await .unwrap(); - let peers = tracker.get_torrent_peers(&info_hash.0.into()); + let peers = tracker.in_memory_torrent_repository.get_torrent_peers(&info_hash.0.into()); assert_eq!(peers[0].peer_addr, SocketAddr::new(IpAddr::V4(remote_client_ip), client_port)); } @@ -1133,7 +1133,7 @@ mod tests { .await .unwrap(); - let peers = tracker.get_torrent_peers(&info_hash.0.into()); + let peers = tracker.in_memory_torrent_repository.get_torrent_peers(&info_hash.0.into()); let external_ip_in_tracker_configuration = tracker.get_maybe_external_ip().unwrap(); @@ -1200,7 +1200,7 @@ mod tests { .await .unwrap(); - let peers = tracker.get_torrent_peers(&info_hash.0.into()); + let peers = tracker.in_memory_torrent_repository.get_torrent_peers(&info_hash.0.into()); let expected_peer = TorrentPeerBuilder::new() .with_peer_id(peer_id) @@ -1288,7 +1288,7 @@ mod tests { .await .unwrap(); - let peers = tracker.get_torrent_peers(&info_hash.0.into()); + let peers = tracker.in_memory_torrent_repository.get_torrent_peers(&info_hash.0.into()); // When using IPv6 the tracker converts the remote client ip into a IPv4 address assert_eq!(peers[0].peer_addr, SocketAddr::new(IpAddr::V6(remote_client_ip), client_port)); @@ -1466,7 +1466,7 @@ mod tests { .await .unwrap(); - let peers = tracker.get_torrent_peers(&info_hash.0.into()); + let peers = tracker.in_memory_torrent_repository.get_torrent_peers(&info_hash.0.into()); let external_ip_in_tracker_configuration = tracker.get_maybe_external_ip().unwrap(); diff --git a/tests/servers/http/v1/contract.rs b/tests/servers/http/v1/contract.rs index 0aafbd213..31aae9b50 100644 --- a/tests/servers/http/v1/contract.rs +++ b/tests/servers/http/v1/contract.rs @@ -831,7 +831,7 @@ mod for_all_config_modes { assert_eq!(status, StatusCode::OK); } - let peers = env.tracker.get_torrent_peers(&info_hash); + let peers = env.tracker.in_memory_torrent_repository.get_torrent_peers(&info_hash); let peer_addr = peers[0].peer_addr; assert_eq!(peer_addr.ip(), client_ip); @@ -869,7 +869,7 @@ mod for_all_config_modes { assert_eq!(status, StatusCode::OK); } - let peers = env.tracker.get_torrent_peers(&info_hash); + let peers = env.tracker.in_memory_torrent_repository.get_torrent_peers(&info_hash); let peer_addr = peers[0].peer_addr; assert_eq!(peer_addr.ip(), env.tracker.get_maybe_external_ip().unwrap()); @@ -911,7 +911,7 @@ mod for_all_config_modes { assert_eq!(status, StatusCode::OK); } - let peers = env.tracker.get_torrent_peers(&info_hash); + let peers = env.tracker.in_memory_torrent_repository.get_torrent_peers(&info_hash); let peer_addr = peers[0].peer_addr; assert_eq!(peer_addr.ip(), env.tracker.get_maybe_external_ip().unwrap()); @@ -951,7 +951,7 @@ mod for_all_config_modes { assert_eq!(status, StatusCode::OK); } - let peers = env.tracker.get_torrent_peers(&info_hash); + let peers = env.tracker.in_memory_torrent_repository.get_torrent_peers(&info_hash); let peer_addr = peers[0].peer_addr; assert_eq!(peer_addr.ip(), IpAddr::from_str("150.172.238.178").unwrap()); From 2ac68f6935966ff8ecfed6b75a2a2f0d7a2cf197 Mon Sep 17 00:00:00 2001 From: Jose Celano Date: Fri, 24 Jan 2025 09:28:35 +0000 Subject: [PATCH 153/802] refactor: [#1203] move test --- src/core/mod.rs | 17 ---------------- src/core/torrent/repository/in_memory.rs | 26 ++++++++++++++++++++++++ 2 files changed, 26 insertions(+), 17 deletions(-) diff --git a/src/core/mod.rs b/src/core/mod.rs index d00db88a8..ff5b03e09 100644 --- a/src/core/mod.rs +++ b/src/core/mod.rs @@ -874,23 +874,6 @@ mod tests { } } - #[tokio::test] - async fn should_collect_torrent_metrics() { - let tracker = public_tracker(); - - let torrents_metrics = tracker.in_memory_torrent_repository.get_torrents_metrics(); - - assert_eq!( - torrents_metrics, - TorrentsMetrics { - complete: 0, - downloaded: 0, - incomplete: 0, - torrents: 0 - } - ); - } - #[tokio::test] async fn it_should_return_the_peers_for_a_given_torrent() { let tracker = public_tracker(); diff --git a/src/core/torrent/repository/in_memory.rs b/src/core/torrent/repository/in_memory.rs index 6b1902d95..7d469a0f5 100644 --- a/src/core/torrent/repository/in_memory.rs +++ b/src/core/torrent/repository/in_memory.rs @@ -101,3 +101,29 @@ impl InMemoryTorrentRepository { self.torrents.import_persistent(persistent_torrents); } } + +#[cfg(test)] +mod tests { + use std::sync::Arc; + + use torrust_tracker_primitives::torrent_metrics::TorrentsMetrics; + + use crate::core::torrent::repository::in_memory::InMemoryTorrentRepository; + + #[tokio::test] + async fn should_collect_torrent_metrics() { + let in_memory_torrent_repository = Arc::new(InMemoryTorrentRepository::default()); + + let torrents_metrics = in_memory_torrent_repository.get_torrents_metrics(); + + assert_eq!( + torrents_metrics, + TorrentsMetrics { + complete: 0, + downloaded: 0, + incomplete: 0, + torrents: 0 + } + ); + } +} From 0f1b2fb9ce551088daca799fc007733889fe2424 Mon Sep 17 00:00:00 2001 From: Jose Celano Date: Fri, 24 Jan 2025 09:53:40 +0000 Subject: [PATCH 154/802] refactor: [#1203] use InMemoryTorrentRepository directly in core tracker tests --- src/core/mod.rs | 48 ++++++++++++++++++++++++++++++++++++------------ 1 file changed, 36 insertions(+), 12 deletions(-) diff --git a/src/core/mod.rs b/src/core/mod.rs index ff5b03e09..d6d079cf6 100644 --- a/src/core/mod.rs +++ b/src/core/mod.rs @@ -717,6 +717,7 @@ mod tests { use crate::core::peer::Peer; use crate::core::services::{initialize_tracker, initialize_whitelist_manager}; use crate::core::torrent::manager::TorrentsManager; + use crate::core::torrent::repository::in_memory::InMemoryTorrentRepository; use crate::core::whitelist::manager::WhiteListManager; use crate::core::{whitelist, Tracker}; @@ -741,6 +742,29 @@ mod tests { ) } + fn public_tracker_and_in_memory_torrents_repository() -> (Arc, Arc) { + let config = configuration::ephemeral_public(); + + let ( + _database, + _in_memory_whitelist, + whitelist_authorization, + _authentication_service, + in_memory_torrent_repository, + db_torrent_repository, + _torrents_manager, + ) = initialize_tracker_dependencies(&config); + + let tracker = Arc::new(initialize_tracker( + &config, + &whitelist_authorization, + &in_memory_torrent_repository, + &db_torrent_repository, + )); + + (tracker, in_memory_torrent_repository) + } + fn whitelisted_tracker() -> (Tracker, Arc, Arc) { let config = configuration::ephemeral_listed(); @@ -766,7 +790,7 @@ mod tests { (tracker, whitelist_authorization, whitelist_manager) } - pub fn tracker_persisting_torrents_in_database() -> (Tracker, Arc) { + pub fn tracker_persisting_torrents_in_database() -> (Tracker, Arc, Arc) { let mut config = configuration::ephemeral_listed(); config.core.tracker_policy.persistent_torrent_completed_stat = true; @@ -787,7 +811,7 @@ mod tests { &db_torrent_repository, ); - (tracker, torrents_manager) + (tracker, torrents_manager, in_memory_torrent_repository) } fn sample_info_hash() -> InfoHash { @@ -876,14 +900,14 @@ mod tests { #[tokio::test] async fn it_should_return_the_peers_for_a_given_torrent() { - let tracker = public_tracker(); + let (tracker, in_memory_torrent_repository) = public_tracker_and_in_memory_torrents_repository(); let info_hash = sample_info_hash(); let peer = sample_peer(); let _ = tracker.upsert_peer_and_get_stats(&info_hash, &peer); - let peers = tracker.in_memory_torrent_repository.get_torrent_peers(&info_hash); + let peers = in_memory_torrent_repository.get_torrent_peers(&info_hash); assert_eq!(peers, vec![Arc::new(peer)]); } @@ -908,7 +932,7 @@ mod tests { #[tokio::test] async fn it_should_return_74_peers_at_the_most_for_a_given_torrent() { - let tracker = public_tracker(); + let (tracker, in_memory_torrent_repository) = public_tracker_and_in_memory_torrents_repository(); let info_hash = sample_info_hash(); @@ -926,7 +950,7 @@ mod tests { let _ = tracker.upsert_peer_and_get_stats(&info_hash, &peer); } - let peers = tracker.in_memory_torrent_repository.get_torrent_peers(&info_hash); + let peers = in_memory_torrent_repository.get_torrent_peers(&info_hash); assert_eq!(peers.len(), 74); } @@ -981,11 +1005,11 @@ mod tests { #[tokio::test] async fn it_should_return_the_torrent_metrics() { - let tracker = public_tracker(); + let (tracker, in_memory_torrent_repository) = public_tracker_and_in_memory_torrents_repository(); let _ = tracker.upsert_peer_and_get_stats(&sample_info_hash(), &leecher()); - let torrent_metrics = tracker.in_memory_torrent_repository.get_torrents_metrics(); + let torrent_metrics = in_memory_torrent_repository.get_torrents_metrics(); assert_eq!( torrent_metrics, @@ -1000,7 +1024,7 @@ mod tests { #[tokio::test] async fn it_should_get_many_the_torrent_metrics() { - let tracker = public_tracker(); + let (tracker, in_memory_torrent_repository) = public_tracker_and_in_memory_torrents_repository(); let start_time = std::time::Instant::now(); for i in 0..1_000_000 { @@ -1009,7 +1033,7 @@ mod tests { let result_a = start_time.elapsed(); let start_time = std::time::Instant::now(); - let torrent_metrics = tracker.in_memory_torrent_repository.get_torrents_metrics(); + let torrent_metrics = in_memory_torrent_repository.get_torrents_metrics(); let result_b = start_time.elapsed(); assert_eq!( @@ -1434,7 +1458,7 @@ mod tests { #[tokio::test] async fn it_should_persist_the_number_of_completed_peers_for_all_torrents_into_the_database() { - let (tracker, torrents_manager) = tracker_persisting_torrents_in_database(); + let (tracker, torrents_manager, in_memory_torrent_repository) = tracker_persisting_torrents_in_database(); let info_hash = sample_info_hash(); @@ -1449,7 +1473,7 @@ mod tests { assert_eq!(swarm_stats.downloaded, 1); // Remove the newly updated torrent from memory - let _unused = tracker.in_memory_torrent_repository.remove(&info_hash); + let _unused = in_memory_torrent_repository.remove(&info_hash); torrents_manager.load_torrents_from_database().unwrap(); From 046578d87706137bb9e9fbc3d07c734b89c4ba80 Mon Sep 17 00:00:00 2001 From: Jose Celano Date: Fri, 24 Jan 2025 10:34:41 +0000 Subject: [PATCH 155/802] refactor: [#1203] use directly the InMemoryTorrentRepository --- src/app.rs | 2 +- src/bootstrap/jobs/tracker_apis.rs | 14 +- src/core/mod.rs | 12 +- src/core/services/statistics/mod.rs | 15 +- src/core/services/torrent.rs | 92 +++++------ src/servers/apis/routes.rs | 7 +- src/servers/apis/server.rs | 16 +- src/servers/apis/v1/context/stats/handlers.rs | 4 +- src/servers/apis/v1/context/stats/routes.rs | 6 +- .../apis/v1/context/torrent/handlers.rs | 20 ++- src/servers/apis/v1/context/torrent/routes.rs | 11 +- src/servers/apis/v1/routes.rs | 8 +- src/servers/udp/handlers.rs | 145 ++++++++++++++---- tests/servers/api/environment.rs | 7 +- tests/servers/http/environment.rs | 5 + tests/servers/http/v1/contract.rs | 8 +- 16 files changed, 233 insertions(+), 139 deletions(-) diff --git a/src/app.rs b/src/app.rs index c71237443..67a319549 100644 --- a/src/app.rs +++ b/src/app.rs @@ -118,7 +118,7 @@ pub async fn start(config: &Configuration, app_container: &AppContainer) -> Vec< if let Some(http_api_config) = &config.http_api { if let Some(job) = tracker_apis::start_job( http_api_config, - app_container.tracker.clone(), + app_container.in_memory_torrent_repository.clone(), app_container.keys_handler.clone(), app_container.whitelist_manager.clone(), app_container.ban_service.clone(), diff --git a/src/bootstrap/jobs/tracker_apis.rs b/src/bootstrap/jobs/tracker_apis.rs index 1047fa418..f735bc4d7 100644 --- a/src/bootstrap/jobs/tracker_apis.rs +++ b/src/bootstrap/jobs/tracker_apis.rs @@ -33,8 +33,8 @@ use super::make_rust_tls; use crate::core::authentication::handler::KeysHandler; use crate::core::statistics::event::sender::Sender; use crate::core::statistics::repository::Repository; +use crate::core::torrent::repository::in_memory::InMemoryTorrentRepository; use crate::core::whitelist::manager::WhiteListManager; -use crate::core::{self}; use crate::servers::apis::server::{ApiServer, Launcher}; use crate::servers::apis::Version; use crate::servers::registar::ServiceRegistrationForm; @@ -63,7 +63,6 @@ pub struct ApiServerJobStarted(); #[allow(clippy::too_many_arguments)] #[instrument(skip( config, - tracker, keys_handler, whitelist_manager, ban_service, @@ -73,7 +72,7 @@ pub struct ApiServerJobStarted(); ))] pub async fn start_job( config: &HttpApi, - tracker: Arc, + in_memory_torrent_repository: Arc, keys_handler: Arc, whitelist_manager: Arc, ban_service: Arc>, @@ -95,7 +94,7 @@ pub async fn start_job( start_v1( bind_to, tls, - tracker.clone(), + in_memory_torrent_repository.clone(), keys_handler.clone(), whitelist_manager.clone(), ban_service.clone(), @@ -114,7 +113,6 @@ pub async fn start_job( #[instrument(skip( socket, tls, - tracker, keys_handler, whitelist_manager, ban_service, @@ -126,7 +124,7 @@ pub async fn start_job( async fn start_v1( socket: SocketAddr, tls: Option, - tracker: Arc, + in_memory_torrent_repository: Arc, keys_handler: Arc, whitelist_manager: Arc, ban_service: Arc>, @@ -137,7 +135,7 @@ async fn start_v1( ) -> JoinHandle<()> { let server = ApiServer::new(Launcher::new(socket, tls)) .start( - tracker, + in_memory_torrent_repository, keys_handler, whitelist_manager, stats_event_sender, @@ -179,7 +177,7 @@ mod tests { start_job( config, - app_container.tracker, + app_container.in_memory_torrent_repository, app_container.keys_handler, app_container.whitelist_manager, app_container.ban_service, diff --git a/src/core/mod.rs b/src/core/mod.rs index d6d079cf6..6ad48289f 100644 --- a/src/core/mod.rs +++ b/src/core/mod.rs @@ -474,10 +474,10 @@ pub struct Tracker { config: Core, /// The service to check is a torrent is whitelisted. - pub whitelist_authorization: Arc, + whitelist_authorization: Arc, /// The in-memory torrents repository. - pub in_memory_torrent_repository: Arc, + in_memory_torrent_repository: Arc, /// The persistent torrents repository. db_torrent_repository: Arc, @@ -1325,24 +1325,24 @@ mod tests { #[tokio::test] async fn it_should_authorize_the_announce_and_scrape_actions_on_whitelisted_torrents() { - let (tracker, _whitelist_authorization, whitelist_manager) = whitelisted_tracker(); + let (_tracker, whitelist_authorization, whitelist_manager) = whitelisted_tracker(); let info_hash = sample_info_hash(); let result = whitelist_manager.add_torrent_to_whitelist(&info_hash).await; assert!(result.is_ok()); - let result = tracker.whitelist_authorization.authorize(&info_hash).await; + let result = whitelist_authorization.authorize(&info_hash).await; assert!(result.is_ok()); } #[tokio::test] async fn it_should_not_authorize_the_announce_and_scrape_actions_on_not_whitelisted_torrents() { - let (tracker, _whitelist_authorization, _whitelist_manager) = whitelisted_tracker(); + let (_tracker, whitelist_authorization, _whitelist_manager) = whitelisted_tracker(); let info_hash = sample_info_hash(); - let result = tracker.whitelist_authorization.authorize(&info_hash).await; + let result = whitelist_authorization.authorize(&info_hash).await; assert!(result.is_err()); } } diff --git a/src/core/services/statistics/mod.rs b/src/core/services/statistics/mod.rs index fefe17933..ea7ebe994 100644 --- a/src/core/services/statistics/mod.rs +++ b/src/core/services/statistics/mod.rs @@ -45,7 +45,7 @@ use torrust_tracker_primitives::torrent_metrics::TorrentsMetrics; use crate::core::statistics::metrics::Metrics; use crate::core::statistics::repository::Repository; -use crate::core::Tracker; +use crate::core::torrent::repository::in_memory::InMemoryTorrentRepository; use crate::servers::udp::server::banning::BanService; /// All the metrics collected by the tracker. @@ -64,11 +64,11 @@ pub struct TrackerMetrics { /// It returns all the [`TrackerMetrics`] pub async fn get_metrics( - tracker: Arc, + in_memory_torrent_repository: Arc, ban_service: Arc>, stats_repository: Arc, ) -> TrackerMetrics { - let torrents_metrics = tracker.in_memory_torrent_repository.get_torrents_metrics(); + let torrents_metrics = in_memory_torrent_repository.get_torrents_metrics(); let stats = stats_repository.get_stats().await; let udp_banned_ips_total = ban_service.read().await.get_banned_ips_total(); @@ -145,7 +145,7 @@ mod tests { let (_stats_event_sender, stats_repository) = statistics::setup::factory(config.core.tracker_usage_statistics); let stats_repository = Arc::new(stats_repository); - let tracker = Arc::new(initialize_tracker( + let _tracker = Arc::new(initialize_tracker( &config, &whitelist_authorization, &in_memory_torrent_repository, @@ -154,7 +154,12 @@ mod tests { let ban_service = Arc::new(RwLock::new(BanService::new(MAX_CONNECTION_ID_ERRORS_PER_IP))); - let tracker_metrics = get_metrics(tracker.clone(), ban_service.clone(), stats_repository.clone()).await; + let tracker_metrics = get_metrics( + in_memory_torrent_repository.clone(), + ban_service.clone(), + stats_repository.clone(), + ) + .await; assert_eq!( tracker_metrics, diff --git a/src/core/services/torrent.rs b/src/core/services/torrent.rs index c2ffa05aa..dae619d62 100644 --- a/src/core/services/torrent.rs +++ b/src/core/services/torrent.rs @@ -11,7 +11,7 @@ use torrust_tracker_primitives::pagination::Pagination; use torrust_tracker_primitives::peer; use torrust_tracker_torrent_repository::entry::EntrySync; -use crate::core::Tracker; +use crate::core::torrent::repository::in_memory::InMemoryTorrentRepository; /// It contains all the information the tracker has about a torrent #[derive(Debug, PartialEq)] @@ -44,8 +44,11 @@ pub struct BasicInfo { } /// It returns all the information the tracker has about one torrent in a [Info] struct. -pub async fn get_torrent_info(tracker: Arc, info_hash: &InfoHash) -> Option { - let torrent_entry_option = tracker.in_memory_torrent_repository.get(info_hash); +pub async fn get_torrent_info( + in_memory_torrent_repository: Arc, + info_hash: &InfoHash, +) -> Option { + let torrent_entry_option = in_memory_torrent_repository.get(info_hash); let torrent_entry = torrent_entry_option?; @@ -65,10 +68,13 @@ pub async fn get_torrent_info(tracker: Arc, info_hash: &InfoHash) -> Op } /// It returns all the information the tracker has about multiple torrents in a [`BasicInfo`] struct, excluding the peer list. -pub async fn get_torrents_page(tracker: Arc, pagination: Option<&Pagination>) -> Vec { +pub async fn get_torrents_page( + in_memory_torrent_repository: Arc, + pagination: Option<&Pagination>, +) -> Vec { let mut basic_infos: Vec = vec![]; - for (info_hash, torrent_entry) in tracker.in_memory_torrent_repository.get_paginated(pagination) { + for (info_hash, torrent_entry) in in_memory_torrent_repository.get_paginated(pagination) { let stats = torrent_entry.get_swarm_metadata(); basic_infos.push(BasicInfo { @@ -83,15 +89,14 @@ pub async fn get_torrents_page(tracker: Arc, pagination: Option<&Pagina } /// It returns all the information the tracker has about multiple torrents in a [`BasicInfo`] struct, excluding the peer list. -pub async fn get_torrents(tracker: Arc, info_hashes: &[InfoHash]) -> Vec { +pub async fn get_torrents( + in_memory_torrent_repository: Arc, + info_hashes: &[InfoHash], +) -> Vec { let mut basic_infos: Vec = vec![]; for info_hash in info_hashes { - if let Some(stats) = tracker - .in_memory_torrent_repository - .get(info_hash) - .map(|t| t.get_swarm_metadata()) - { + if let Some(stats) = in_memory_torrent_repository.get(info_hash).map(|t| t.get_swarm_metadata()) { basic_infos.push(BasicInfo { info_hash: *info_hash, seeders: u64::from(stats.complete), @@ -115,9 +120,10 @@ mod tests { use crate::app_test::initialize_tracker_dependencies; use crate::core::services::initialize_tracker; + use crate::core::torrent::repository::in_memory::InMemoryTorrentRepository; use crate::core::Tracker; - fn initialize_tracker_and_deps(config: &Configuration) -> Arc { + fn initialize_tracker_and_deps(config: &Configuration) -> (Arc, Arc) { let ( _database, _in_memory_whitelist, @@ -128,12 +134,14 @@ mod tests { _torrents_manager, ) = initialize_tracker_dependencies(config); - Arc::new(initialize_tracker( + let tracker = Arc::new(initialize_tracker( config, &whitelist_authorization, &in_memory_torrent_repository, &db_torrent_repository, - )) + )); + + (tracker, in_memory_torrent_repository) } fn sample_peer() -> peer::Peer { @@ -157,10 +165,9 @@ mod tests { use torrust_tracker_configuration::Configuration; use torrust_tracker_test_helpers::configuration; - use crate::app_test::initialize_tracker_dependencies; - use crate::core::services::initialize_tracker; use crate::core::services::torrent::tests::{initialize_tracker_and_deps, sample_peer}; use crate::core::services::torrent::{get_torrent_info, Info}; + use crate::core::torrent::repository::in_memory::InMemoryTorrentRepository; pub fn tracker_configuration() -> Configuration { configuration::ephemeral() @@ -168,29 +175,10 @@ mod tests { #[tokio::test] async fn should_return_none_if_the_tracker_does_not_have_the_torrent() { - let config = tracker_configuration(); - - let ( - _database, - _in_memory_whitelist, - whitelist_authorization, - _authentication_service, - in_memory_torrent_repository, - db_torrent_repository, - _torrents_manager, - ) = initialize_tracker_dependencies(&config); - - let tracker = initialize_tracker( - &config, - &whitelist_authorization, - &in_memory_torrent_repository, - &db_torrent_repository, - ); - - let tracker = Arc::new(tracker); + let in_memory_torrent_repository = Arc::new(InMemoryTorrentRepository::default()); let torrent_info = get_torrent_info( - tracker.clone(), + in_memory_torrent_repository.clone(), &InfoHash::from_str("0b3aea4adc213ce32295be85d3883a63bca25446").unwrap(), ) .await; @@ -202,13 +190,15 @@ mod tests { async fn should_return_the_torrent_info_if_the_tracker_has_the_torrent() { let config = tracker_configuration(); - let tracker = initialize_tracker_and_deps(&config); + let (tracker, in_memory_torrent_repository) = initialize_tracker_and_deps(&config); let hash = "9e0217d0fa71c87332cd8bf9dbeabcb2c2cf3c4d".to_owned(); let info_hash = InfoHash::from_str(&hash).unwrap(); let _ = tracker.upsert_peer_and_get_stats(&info_hash, &sample_peer()); - let torrent_info = get_torrent_info(tracker.clone(), &info_hash).await.unwrap(); + let torrent_info = get_torrent_info(in_memory_torrent_repository.clone(), &info_hash) + .await + .unwrap(); assert_eq!( torrent_info, @@ -226,6 +216,7 @@ mod tests { mod searching_for_torrents { use std::str::FromStr; + use std::sync::Arc; use bittorrent_primitives::info_hash::InfoHash; use torrust_tracker_configuration::Configuration; @@ -233,6 +224,7 @@ mod tests { use crate::core::services::torrent::tests::{initialize_tracker_and_deps, sample_peer}; use crate::core::services::torrent::{get_torrents_page, BasicInfo, Pagination}; + use crate::core::torrent::repository::in_memory::InMemoryTorrentRepository; pub fn tracker_configuration() -> Configuration { configuration::ephemeral() @@ -240,11 +232,9 @@ mod tests { #[tokio::test] async fn should_return_an_empty_result_if_the_tracker_does_not_have_any_torrent() { - let config = tracker_configuration(); - - let tracker = initialize_tracker_and_deps(&config); + let in_memory_torrent_repository = Arc::new(InMemoryTorrentRepository::default()); - let torrents = get_torrents_page(tracker.clone(), Some(&Pagination::default())).await; + let torrents = get_torrents_page(in_memory_torrent_repository.clone(), Some(&Pagination::default())).await; assert_eq!(torrents, vec![]); } @@ -253,14 +243,14 @@ mod tests { async fn should_return_a_summarized_info_for_all_torrents() { let config = tracker_configuration(); - let tracker = initialize_tracker_and_deps(&config); + let (tracker, in_memory_torrent_repository) = initialize_tracker_and_deps(&config); let hash = "9e0217d0fa71c87332cd8bf9dbeabcb2c2cf3c4d".to_owned(); let info_hash = InfoHash::from_str(&hash).unwrap(); let _ = tracker.upsert_peer_and_get_stats(&info_hash, &sample_peer()); - let torrents = get_torrents_page(tracker.clone(), Some(&Pagination::default())).await; + let torrents = get_torrents_page(in_memory_torrent_repository.clone(), Some(&Pagination::default())).await; assert_eq!( torrents, @@ -277,7 +267,7 @@ mod tests { async fn should_allow_limiting_the_number_of_torrents_in_the_result() { let config = tracker_configuration(); - let tracker = initialize_tracker_and_deps(&config); + let (tracker, in_memory_torrent_repository) = initialize_tracker_and_deps(&config); let hash1 = "9e0217d0fa71c87332cd8bf9dbeabcb2c2cf3c4d".to_owned(); let info_hash1 = InfoHash::from_str(&hash1).unwrap(); @@ -290,7 +280,7 @@ mod tests { let offset = 0; let limit = 1; - let torrents = get_torrents_page(tracker.clone(), Some(&Pagination::new(offset, limit))).await; + let torrents = get_torrents_page(in_memory_torrent_repository.clone(), Some(&Pagination::new(offset, limit))).await; assert_eq!(torrents.len(), 1); } @@ -299,7 +289,7 @@ mod tests { async fn should_allow_using_pagination_in_the_result() { let config = tracker_configuration(); - let tracker = initialize_tracker_and_deps(&config); + let (tracker, in_memory_torrent_repository) = initialize_tracker_and_deps(&config); let hash1 = "9e0217d0fa71c87332cd8bf9dbeabcb2c2cf3c4d".to_owned(); let info_hash1 = InfoHash::from_str(&hash1).unwrap(); @@ -312,7 +302,7 @@ mod tests { let offset = 1; let limit = 4000; - let torrents = get_torrents_page(tracker.clone(), Some(&Pagination::new(offset, limit))).await; + let torrents = get_torrents_page(in_memory_torrent_repository.clone(), Some(&Pagination::new(offset, limit))).await; assert_eq!(torrents.len(), 1); assert_eq!( @@ -330,7 +320,7 @@ mod tests { async fn should_return_torrents_ordered_by_info_hash() { let config = tracker_configuration(); - let tracker = initialize_tracker_and_deps(&config); + let (tracker, in_memory_torrent_repository) = initialize_tracker_and_deps(&config); let hash1 = "9e0217d0fa71c87332cd8bf9dbeabcb2c2cf3c4d".to_owned(); let info_hash1 = InfoHash::from_str(&hash1).unwrap(); @@ -340,7 +330,7 @@ mod tests { let info_hash2 = InfoHash::from_str(&hash2).unwrap(); let _ = tracker.upsert_peer_and_get_stats(&info_hash2, &sample_peer()); - let torrents = get_torrents_page(tracker.clone(), Some(&Pagination::default())).await; + let torrents = get_torrents_page(in_memory_torrent_repository.clone(), Some(&Pagination::default())).await; assert_eq!( torrents, diff --git a/src/servers/apis/routes.rs b/src/servers/apis/routes.rs index 4a005393d..c27b5f906 100644 --- a/src/servers/apis/routes.rs +++ b/src/servers/apis/routes.rs @@ -33,8 +33,8 @@ use super::v1::middlewares::auth::State; use crate::core::authentication::handler::KeysHandler; use crate::core::statistics::event::sender::Sender; use crate::core::statistics::repository::Repository; +use crate::core::torrent::repository::in_memory::InMemoryTorrentRepository; use crate::core::whitelist::manager::WhiteListManager; -use crate::core::Tracker; use crate::servers::apis::API_LOG_TARGET; use crate::servers::logging::Latency; use crate::servers::udp::server::banning::BanService; @@ -43,7 +43,6 @@ use crate::servers::udp::server::banning::BanService; #[allow(clippy::too_many_arguments)] #[allow(clippy::needless_pass_by_value)] #[instrument(skip( - tracker, keys_handler, whitelist_manager, ban_service, @@ -52,7 +51,7 @@ use crate::servers::udp::server::banning::BanService; access_tokens ))] pub fn router( - tracker: Arc, + in_memory_torrent_repository: Arc, keys_handler: Arc, whitelist_manager: Arc, ban_service: Arc>, @@ -68,7 +67,7 @@ pub fn router( let router = v1::routes::add( api_url_prefix, router, - tracker.clone(), + &in_memory_torrent_repository.clone(), &keys_handler.clone(), &whitelist_manager.clone(), ban_service.clone(), diff --git a/src/servers/apis/server.rs b/src/servers/apis/server.rs index e65d6643d..b37f71d5b 100644 --- a/src/servers/apis/server.rs +++ b/src/servers/apis/server.rs @@ -40,9 +40,10 @@ use tracing::{instrument, Level}; use super::routes::router; use crate::bootstrap::jobs::Started; use crate::core::authentication::handler::KeysHandler; +use crate::core::statistics; use crate::core::statistics::repository::Repository; +use crate::core::torrent::repository::in_memory::InMemoryTorrentRepository; use crate::core::whitelist::manager::WhiteListManager; -use crate::core::{statistics, Tracker}; use crate::servers::apis::API_LOG_TARGET; use crate::servers::custom_axum_server::{self, TimeoutAcceptor}; use crate::servers::logging::STARTED_ON; @@ -128,10 +129,10 @@ impl ApiServer { /// /// It would panic if the bound socket address cannot be sent back to this starter. #[allow(clippy::too_many_arguments)] - #[instrument(skip(self, tracker, keys_handler, whitelist_manager, stats_event_sender, ban_service, stats_repository, form, access_tokens), err, ret(Display, level = Level::INFO))] + #[instrument(skip(self, in_memory_torrent_repository, keys_handler, whitelist_manager, stats_event_sender, ban_service, stats_repository, form, access_tokens), err, ret(Display, level = Level::INFO))] pub async fn start( self, - tracker: Arc, + in_memory_torrent_repository: Arc, keys_handler: Arc, whitelist_manager: Arc, stats_event_sender: Arc>>, @@ -150,7 +151,7 @@ impl ApiServer { let _task = launcher .start( - tracker, + in_memory_torrent_repository, keys_handler, whitelist_manager, ban_service, @@ -261,7 +262,6 @@ impl Launcher { #[allow(clippy::too_many_arguments)] #[instrument(skip( self, - tracker, keys_handler, whitelist_manager, ban_service, @@ -273,7 +273,7 @@ impl Launcher { ))] pub fn start( &self, - tracker: Arc, + in_memory_torrent_repository: Arc, keys_handler: Arc, whitelist_manager: Arc, ban_service: Arc>, @@ -287,7 +287,7 @@ impl Launcher { let address = socket.local_addr().expect("Could not get local_addr from tcp_listener."); let router = router( - tracker, + in_memory_torrent_repository, keys_handler, whitelist_manager, ban_service, @@ -373,7 +373,7 @@ mod tests { let started = stopped .start( - app_container.tracker, + app_container.in_memory_torrent_repository, app_container.keys_handler, app_container.whitelist_manager, app_container.stats_event_sender, diff --git a/src/servers/apis/v1/context/stats/handlers.rs b/src/servers/apis/v1/context/stats/handlers.rs index af7e1c239..da87696fc 100644 --- a/src/servers/apis/v1/context/stats/handlers.rs +++ b/src/servers/apis/v1/context/stats/handlers.rs @@ -11,7 +11,7 @@ use tokio::sync::RwLock; use super::responses::{metrics_response, stats_response}; use crate::core::services::statistics::get_metrics; use crate::core::statistics::repository::Repository; -use crate::core::Tracker; +use crate::core::torrent::repository::in_memory::InMemoryTorrentRepository; use crate::servers::udp::server::banning::BanService; #[derive(Deserialize, Debug, Default)] @@ -40,7 +40,7 @@ pub struct QueryParams { /// for more information about this endpoint. #[allow(clippy::type_complexity)] pub async fn get_stats_handler( - State(state): State<(Arc, Arc>, Arc)>, + State(state): State<(Arc, Arc>, Arc)>, params: Query, ) -> Response { let metrics = get_metrics(state.0.clone(), state.1.clone(), state.2.clone()).await; diff --git a/src/servers/apis/v1/context/stats/routes.rs b/src/servers/apis/v1/context/stats/routes.rs index b5df32963..083c72b10 100644 --- a/src/servers/apis/v1/context/stats/routes.rs +++ b/src/servers/apis/v1/context/stats/routes.rs @@ -12,20 +12,20 @@ use tokio::sync::RwLock; use super::handlers::get_stats_handler; use crate::core::statistics::event::sender::Sender; use crate::core::statistics::repository::Repository; -use crate::core::Tracker; +use crate::core::torrent::repository::in_memory::InMemoryTorrentRepository; use crate::servers::udp::server::banning::BanService; /// It adds the routes to the router for the [`stats`](crate::servers::apis::v1::context::stats) API context. pub fn add( prefix: &str, router: Router, - tracker: Arc, + in_memory_torrent_repository: Arc, ban_service: Arc>, _stats_event_sender: Arc>>, stats_repository: Arc, ) -> Router { router.route( &format!("{prefix}/stats"), - get(get_stats_handler).with_state((tracker, ban_service, stats_repository)), + get(get_stats_handler).with_state((in_memory_torrent_repository, ban_service, stats_repository)), ) } diff --git a/src/servers/apis/v1/context/torrent/handlers.rs b/src/servers/apis/v1/context/torrent/handlers.rs index 0ba713f62..8fe20ab80 100644 --- a/src/servers/apis/v1/context/torrent/handlers.rs +++ b/src/servers/apis/v1/context/torrent/handlers.rs @@ -14,7 +14,7 @@ use torrust_tracker_primitives::pagination::Pagination; use super::responses::{torrent_info_response, torrent_list_response, torrent_not_known_response}; use crate::core::services::torrent::{get_torrent_info, get_torrents, get_torrents_page}; -use crate::core::Tracker; +use crate::core::torrent::repository::in_memory::InMemoryTorrentRepository; use crate::servers::apis::v1::responses::invalid_info_hash_param_response; use crate::servers::apis::InfoHashParam; @@ -27,10 +27,13 @@ use crate::servers::apis::InfoHashParam; /// /// Refer to the [API endpoint documentation](crate::servers::apis::v1::context::torrent#get-a-torrent) /// for more information about this endpoint. -pub async fn get_torrent_handler(State(tracker): State>, Path(info_hash): Path) -> Response { +pub async fn get_torrent_handler( + State(in_memory_torrent_repository): State>, + Path(info_hash): Path, +) -> Response { match InfoHash::from_str(&info_hash.0) { Err(_) => invalid_info_hash_param_response(&info_hash.0), - Ok(info_hash) => match get_torrent_info(tracker.clone(), &info_hash).await { + Ok(info_hash) => match get_torrent_info(in_memory_torrent_repository.clone(), &info_hash).await { Some(info) => torrent_info_response(info).into_response(), None => torrent_not_known_response(), }, @@ -75,13 +78,16 @@ pub struct QueryParams { /// /// Refer to the [API endpoint documentation](crate::servers::apis::v1::context::torrent#list-torrents) /// for more information about this endpoint. -pub async fn get_torrents_handler(State(tracker): State>, pagination: Query) -> Response { +pub async fn get_torrents_handler( + State(in_memory_torrent_repository): State>, + pagination: Query, +) -> Response { tracing::debug!("pagination: {:?}", pagination); if pagination.0.info_hashes.is_empty() { torrent_list_response( &get_torrents_page( - tracker.clone(), + in_memory_torrent_repository.clone(), Some(&Pagination::new_with_options(pagination.0.offset, pagination.0.limit)), ) .await, @@ -89,7 +95,9 @@ pub async fn get_torrents_handler(State(tracker): State>, paginatio .into_response() } else { match parse_info_hashes(pagination.0.info_hashes) { - Ok(info_hashes) => torrent_list_response(&get_torrents(tracker.clone(), &info_hashes).await).into_response(), + Ok(info_hashes) => { + torrent_list_response(&get_torrents(in_memory_torrent_repository.clone(), &info_hashes).await).into_response() + } Err(err) => match err { QueryParamError::InvalidInfoHash { info_hash } => invalid_info_hash_param_response(&info_hash), }, diff --git a/src/servers/apis/v1/context/torrent/routes.rs b/src/servers/apis/v1/context/torrent/routes.rs index bca594e3d..dc66a1753 100644 --- a/src/servers/apis/v1/context/torrent/routes.rs +++ b/src/servers/apis/v1/context/torrent/routes.rs @@ -10,15 +10,18 @@ use axum::routing::get; use axum::Router; use super::handlers::{get_torrent_handler, get_torrents_handler}; -use crate::core::Tracker; +use crate::core::torrent::repository::in_memory::InMemoryTorrentRepository; /// It adds the routes to the router for the [`torrent`](crate::servers::apis::v1::context::torrent) API context. -pub fn add(prefix: &str, router: Router, tracker: Arc) -> Router { +pub fn add(prefix: &str, router: Router, in_memory_torrent_repository: Arc) -> Router { // Torrents router .route( &format!("{prefix}/torrent/{{info_hash}}"), - get(get_torrent_handler).with_state(tracker.clone()), + get(get_torrent_handler).with_state(in_memory_torrent_repository.clone()), + ) + .route( + &format!("{prefix}/torrents"), + get(get_torrents_handler).with_state(in_memory_torrent_repository), ) - .route(&format!("{prefix}/torrents"), get(get_torrents_handler).with_state(tracker)) } diff --git a/src/servers/apis/v1/routes.rs b/src/servers/apis/v1/routes.rs index c26ce4f3d..8fac453b8 100644 --- a/src/servers/apis/v1/routes.rs +++ b/src/servers/apis/v1/routes.rs @@ -8,8 +8,8 @@ use super::context::{auth_key, stats, torrent, whitelist}; use crate::core::authentication::handler::KeysHandler; use crate::core::statistics::event::sender::Sender; use crate::core::statistics::repository::Repository; +use crate::core::torrent::repository::in_memory::InMemoryTorrentRepository; use crate::core::whitelist::manager::WhiteListManager; -use crate::core::Tracker; use crate::servers::udp::server::banning::BanService; /// Add the routes for the v1 API. @@ -17,7 +17,7 @@ use crate::servers::udp::server::banning::BanService; pub fn add( prefix: &str, router: Router, - tracker: Arc, + in_memory_torrent_repository: &Arc, keys_handler: &Arc, whitelist_manager: &Arc, ban_service: Arc>, @@ -30,12 +30,12 @@ pub fn add( let router = stats::routes::add( &v1_prefix, router, - tracker.clone(), + in_memory_torrent_repository.clone(), ban_service, stats_event_sender, stats_repository, ); let router = whitelist::routes::add(&v1_prefix, router, whitelist_manager); - torrent::routes::add(&v1_prefix, router, tracker) + torrent::routes::add(&v1_prefix, router, in_memory_torrent_repository.clone()) } diff --git a/src/servers/udp/handlers.rs b/src/servers/udp/handlers.rs index fd2a37683..c88f6fdc9 100644 --- a/src/servers/udp/handlers.rs +++ b/src/servers/udp/handlers.rs @@ -486,6 +486,7 @@ mod tests { use crate::app_test::initialize_tracker_dependencies; use crate::core::services::{initialize_tracker, initialize_whitelist_manager, statistics}; use crate::core::statistics::event::sender::Sender; + use crate::core::torrent::repository::in_memory::InMemoryTorrentRepository; use crate::core::whitelist::manager::WhiteListManager; use crate::core::whitelist::repository::in_memory::InMemoryWhitelist; use crate::core::{whitelist, Tracker}; @@ -493,6 +494,7 @@ mod tests { type TrackerAndDeps = ( Arc, + Arc, Arc>>, Arc, Arc, @@ -539,6 +541,7 @@ mod tests { ( tracker, + in_memory_torrent_repository, stats_event_sender, in_memory_whitelist, whitelist_manager, @@ -887,8 +890,14 @@ mod tests { #[tokio::test] async fn an_announced_peer_should_be_added_to_the_tracker() { - let (tracker, stats_event_sender, _in_memory_whitelist, _whitelist_manager, whitelist_authorization) = - public_tracker(); + let ( + tracker, + in_memory_torrent_repository, + stats_event_sender, + _in_memory_whitelist, + _whitelist_manager, + whitelist_authorization, + ) = public_tracker(); let client_ip = Ipv4Addr::new(126, 0, 0, 1); let client_port = 8080; @@ -916,7 +925,7 @@ mod tests { .await .unwrap(); - let peers = tracker.in_memory_torrent_repository.get_torrent_peers(&info_hash.0.into()); + let peers = in_memory_torrent_repository.get_torrent_peers(&info_hash.0.into()); let expected_peer = TorrentPeerBuilder::new() .with_peer_id(peer_id) @@ -928,8 +937,14 @@ mod tests { #[tokio::test] async fn the_announced_peer_should_not_be_included_in_the_response() { - let (tracker, stats_event_sender, _in_memory_whitelist, _whitelist_manager, whitelist_authorization) = - public_tracker(); + let ( + tracker, + _in_memory_torrent_repository, + stats_event_sender, + _in_memory_whitelist, + _whitelist_manager, + whitelist_authorization, + ) = public_tracker(); let remote_addr = SocketAddr::new(IpAddr::V4(Ipv4Addr::new(126, 0, 0, 1)), 8080); @@ -969,8 +984,14 @@ mod tests { // From the BEP 15 (https://www.bittorrent.org/beps/bep_0015.html): // "Do note that most trackers will only honor the IP address field under limited circumstances." - let (tracker, stats_event_sender, _in_memory_whitelist, _whitelist_manager, whitelist_authorization) = - public_tracker(); + let ( + tracker, + in_memory_torrent_repository, + stats_event_sender, + _in_memory_whitelist, + _whitelist_manager, + whitelist_authorization, + ) = public_tracker(); let info_hash = AquaticInfoHash([0u8; 20]); let peer_id = AquaticPeerId([255u8; 20]); @@ -1001,7 +1022,7 @@ mod tests { .await .unwrap(); - let peers = tracker.in_memory_torrent_repository.get_torrent_peers(&info_hash.0.into()); + let peers = in_memory_torrent_repository.get_torrent_peers(&info_hash.0.into()); assert_eq!(peers[0].peer_addr, SocketAddr::new(IpAddr::V4(remote_client_ip), client_port)); } @@ -1048,8 +1069,14 @@ mod tests { #[tokio::test] async fn when_the_announce_request_comes_from_a_client_using_ipv4_the_response_should_not_include_peers_using_ipv6() { - let (tracker, _stats_event_sender, _in_memory_whitelist, _whitelist_manager, whitelist_authorization) = - public_tracker(); + let ( + tracker, + _in_memory_torrent_repository, + _stats_event_sender, + _in_memory_whitelist, + _whitelist_manager, + whitelist_authorization, + ) = public_tracker(); add_a_torrent_peer_using_ipv6(&tracker); @@ -1104,8 +1131,14 @@ mod tests { #[tokio::test] async fn the_peer_ip_should_be_changed_to_the_external_ip_in_the_tracker_configuration_if_defined() { - let (tracker, stats_event_sender, _in_memory_whitelist, _whitelist_manager, whitelist_authorization) = - public_tracker(); + let ( + tracker, + in_memory_torrent_repository, + stats_event_sender, + _in_memory_whitelist, + _whitelist_manager, + whitelist_authorization, + ) = public_tracker(); let client_ip = Ipv4Addr::new(127, 0, 0, 1); let client_port = 8080; @@ -1133,7 +1166,7 @@ mod tests { .await .unwrap(); - let peers = tracker.in_memory_torrent_repository.get_torrent_peers(&info_hash.0.into()); + let peers = in_memory_torrent_repository.get_torrent_peers(&info_hash.0.into()); let external_ip_in_tracker_configuration = tracker.get_maybe_external_ip().unwrap(); @@ -1170,8 +1203,14 @@ mod tests { #[tokio::test] async fn an_announced_peer_should_be_added_to_the_tracker() { - let (tracker, stats_event_sender, _in_memory_whitelist, _whitelist_manager, whitelist_authorization) = - public_tracker(); + let ( + tracker, + in_memory_torrent_repository, + stats_event_sender, + _in_memory_whitelist, + _whitelist_manager, + whitelist_authorization, + ) = public_tracker(); let client_ip_v4 = Ipv4Addr::new(126, 0, 0, 1); let client_ip_v6 = client_ip_v4.to_ipv6_compatible(); @@ -1200,7 +1239,7 @@ mod tests { .await .unwrap(); - let peers = tracker.in_memory_torrent_repository.get_torrent_peers(&info_hash.0.into()); + let peers = in_memory_torrent_repository.get_torrent_peers(&info_hash.0.into()); let expected_peer = TorrentPeerBuilder::new() .with_peer_id(peer_id) @@ -1212,8 +1251,14 @@ mod tests { #[tokio::test] async fn the_announced_peer_should_not_be_included_in_the_response() { - let (tracker, stats_event_sender, _in_memory_whitelist, _whitelist_manager, whitelist_authorization) = - public_tracker(); + let ( + tracker, + _in_memory_torrent_repository, + stats_event_sender, + _in_memory_whitelist, + _whitelist_manager, + whitelist_authorization, + ) = public_tracker(); let client_ip_v4 = Ipv4Addr::new(126, 0, 0, 1); let client_ip_v6 = client_ip_v4.to_ipv6_compatible(); @@ -1256,8 +1301,14 @@ mod tests { // From the BEP 15 (https://www.bittorrent.org/beps/bep_0015.html): // "Do note that most trackers will only honor the IP address field under limited circumstances." - let (tracker, stats_event_sender, _in_memory_whitelist, _whitelist_manager, whitelist_authorization) = - public_tracker(); + let ( + tracker, + in_memory_torrent_repository, + stats_event_sender, + _in_memory_whitelist, + _whitelist_manager, + whitelist_authorization, + ) = public_tracker(); let info_hash = AquaticInfoHash([0u8; 20]); let peer_id = AquaticPeerId([255u8; 20]); @@ -1288,7 +1339,7 @@ mod tests { .await .unwrap(); - let peers = tracker.in_memory_torrent_repository.get_torrent_peers(&info_hash.0.into()); + let peers = in_memory_torrent_repository.get_torrent_peers(&info_hash.0.into()); // When using IPv6 the tracker converts the remote client ip into a IPv4 address assert_eq!(peers[0].peer_addr, SocketAddr::new(IpAddr::V6(remote_client_ip), client_port)); @@ -1338,8 +1389,14 @@ mod tests { #[tokio::test] async fn when_the_announce_request_comes_from_a_client_using_ipv6_the_response_should_not_include_peers_using_ipv4() { - let (tracker, _stats_event_sender, _in_memory_whitelist, _whitelist_manager, whitelist_authorization) = - public_tracker(); + let ( + tracker, + _in_memory_torrent_repository, + _stats_event_sender, + _in_memory_whitelist, + _whitelist_manager, + whitelist_authorization, + ) = public_tracker(); add_a_torrent_peer_using_ipv4(&tracker); @@ -1466,7 +1523,7 @@ mod tests { .await .unwrap(); - let peers = tracker.in_memory_torrent_repository.get_torrent_peers(&info_hash.0.into()); + let peers = in_memory_torrent_repository.get_torrent_peers(&info_hash.0.into()); let external_ip_in_tracker_configuration = tracker.get_maybe_external_ip().unwrap(); @@ -1511,8 +1568,14 @@ mod tests { #[tokio::test] async fn should_return_no_stats_when_the_tracker_does_not_have_any_torrent() { - let (tracker, stats_event_sender, _in_memory_whitelist, _whitelist_manager, _whitelist_authorization) = - public_tracker(); + let ( + tracker, + _in_memory_torrent_repository, + stats_event_sender, + _in_memory_whitelist, + _whitelist_manager, + _whitelist_authorization, + ) = public_tracker(); let remote_addr = sample_ipv4_remote_addr(); @@ -1605,8 +1668,14 @@ mod tests { #[tokio::test] async fn should_return_torrent_statistics_when_the_tracker_has_the_requested_torrent() { - let (tracker, _stats_event_sender, _in_memory_whitelist, _whitelist_manager, _whitelist_authorization) = - public_tracker(); + let ( + tracker, + _in_memory_torrent_repository, + _stats_event_sender, + _in_memory_whitelist, + _whitelist_manager, + _whitelist_authorization, + ) = public_tracker(); let torrent_stats = match_scrape_response(add_a_sample_seeder_and_scrape(tracker.clone()).await); @@ -1631,8 +1700,14 @@ mod tests { #[tokio::test] async fn should_return_the_torrent_statistics_when_the_requested_torrent_is_whitelisted() { - let (tracker, stats_event_sender, in_memory_whitelist, _whitelist_manager, _whitelist_authorization) = - whitelisted_tracker(); + let ( + tracker, + _in_memory_torrent_repository, + stats_event_sender, + in_memory_whitelist, + _whitelist_manager, + _whitelist_authorization, + ) = whitelisted_tracker(); let remote_addr = sample_ipv4_remote_addr(); let info_hash = InfoHash([0u8; 20]); @@ -1667,8 +1742,14 @@ mod tests { #[tokio::test] async fn should_return_zeroed_statistics_when_the_requested_torrent_is_not_whitelisted() { - let (tracker, stats_event_sender, _in_memory_whitelist, _whitelist_manager, _whitelist_authorization) = - whitelisted_tracker(); + let ( + tracker, + _in_memory_torrent_repository, + stats_event_sender, + _in_memory_whitelist, + _whitelist_manager, + _whitelist_authorization, + ) = whitelisted_tracker(); let remote_addr = sample_ipv4_remote_addr(); let info_hash = InfoHash([0u8; 20]); diff --git a/tests/servers/api/environment.rs b/tests/servers/api/environment.rs index 8967ff830..70f071bf4 100644 --- a/tests/servers/api/environment.rs +++ b/tests/servers/api/environment.rs @@ -13,6 +13,7 @@ use torrust_tracker_lib::core::authentication::service::AuthenticationService; use torrust_tracker_lib::core::databases::Database; use torrust_tracker_lib::core::statistics::event::sender::Sender; use torrust_tracker_lib::core::statistics::repository::Repository; +use torrust_tracker_lib::core::torrent::repository::in_memory::InMemoryTorrentRepository; use torrust_tracker_lib::core::whitelist::manager::WhiteListManager; use torrust_tracker_lib::core::Tracker; use torrust_tracker_lib::servers::apis::server::{ApiServer, Launcher, Running, Stopped}; @@ -27,6 +28,7 @@ where pub config: Arc, pub database: Arc>, pub tracker: Arc, + pub in_memory_torrent_repository: Arc, pub keys_handler: Arc, pub authentication_service: Arc, pub stats_event_sender: Arc>>, @@ -65,6 +67,7 @@ impl Environment { config, database: app_container.database.clone(), tracker: app_container.tracker.clone(), + in_memory_torrent_repository: app_container.in_memory_torrent_repository.clone(), keys_handler: app_container.keys_handler.clone(), authentication_service: app_container.authentication_service.clone(), stats_event_sender: app_container.stats_event_sender.clone(), @@ -83,6 +86,7 @@ impl Environment { config: self.config, database: self.database.clone(), tracker: self.tracker.clone(), + in_memory_torrent_repository: self.in_memory_torrent_repository.clone(), keys_handler: self.keys_handler.clone(), authentication_service: self.authentication_service.clone(), stats_event_sender: self.stats_event_sender.clone(), @@ -93,7 +97,7 @@ impl Environment { server: self .server .start( - self.tracker, + self.in_memory_torrent_repository, self.keys_handler, self.whitelist_manager, self.stats_event_sender, @@ -118,6 +122,7 @@ impl Environment { config: self.config, database: self.database, tracker: self.tracker, + in_memory_torrent_repository: self.in_memory_torrent_repository, keys_handler: self.keys_handler, authentication_service: self.authentication_service, stats_event_sender: self.stats_event_sender, diff --git a/tests/servers/http/environment.rs b/tests/servers/http/environment.rs index 80c042a21..c0de4efbe 100644 --- a/tests/servers/http/environment.rs +++ b/tests/servers/http/environment.rs @@ -10,6 +10,7 @@ use torrust_tracker_lib::core::authentication::service::AuthenticationService; use torrust_tracker_lib::core::databases::Database; use torrust_tracker_lib::core::statistics::event::sender::Sender; use torrust_tracker_lib::core::statistics::repository::Repository; +use torrust_tracker_lib::core::torrent::repository::in_memory::InMemoryTorrentRepository; use torrust_tracker_lib::core::whitelist::manager::WhiteListManager; use torrust_tracker_lib::core::{whitelist, Tracker}; use torrust_tracker_lib::servers::http::server::{HttpServer, Launcher, Running, Stopped}; @@ -20,6 +21,7 @@ pub struct Environment { pub config: Arc, pub database: Arc>, pub tracker: Arc, + pub in_memory_torrent_repository: Arc, pub keys_handler: Arc, pub authentication_service: Arc, pub stats_event_sender: Arc>>, @@ -61,6 +63,7 @@ impl Environment { config, database: app_container.database.clone(), tracker: app_container.tracker.clone(), + in_memory_torrent_repository: app_container.in_memory_torrent_repository.clone(), keys_handler: app_container.keys_handler.clone(), authentication_service: app_container.authentication_service.clone(), stats_event_sender: app_container.stats_event_sender.clone(), @@ -78,6 +81,7 @@ impl Environment { config: self.config, database: self.database.clone(), tracker: self.tracker.clone(), + in_memory_torrent_repository: self.in_memory_torrent_repository.clone(), keys_handler: self.keys_handler.clone(), authentication_service: self.authentication_service.clone(), whitelist_authorization: self.whitelist_authorization.clone(), @@ -110,6 +114,7 @@ impl Environment { config: self.config, database: self.database, tracker: self.tracker, + in_memory_torrent_repository: self.in_memory_torrent_repository, keys_handler: self.keys_handler, authentication_service: self.authentication_service, whitelist_authorization: self.whitelist_authorization, diff --git a/tests/servers/http/v1/contract.rs b/tests/servers/http/v1/contract.rs index 31aae9b50..8a65d941a 100644 --- a/tests/servers/http/v1/contract.rs +++ b/tests/servers/http/v1/contract.rs @@ -831,7 +831,7 @@ mod for_all_config_modes { assert_eq!(status, StatusCode::OK); } - let peers = env.tracker.in_memory_torrent_repository.get_torrent_peers(&info_hash); + let peers = env.in_memory_torrent_repository.get_torrent_peers(&info_hash); let peer_addr = peers[0].peer_addr; assert_eq!(peer_addr.ip(), client_ip); @@ -869,7 +869,7 @@ mod for_all_config_modes { assert_eq!(status, StatusCode::OK); } - let peers = env.tracker.in_memory_torrent_repository.get_torrent_peers(&info_hash); + let peers = env.in_memory_torrent_repository.get_torrent_peers(&info_hash); let peer_addr = peers[0].peer_addr; assert_eq!(peer_addr.ip(), env.tracker.get_maybe_external_ip().unwrap()); @@ -911,7 +911,7 @@ mod for_all_config_modes { assert_eq!(status, StatusCode::OK); } - let peers = env.tracker.in_memory_torrent_repository.get_torrent_peers(&info_hash); + let peers = env.in_memory_torrent_repository.get_torrent_peers(&info_hash); let peer_addr = peers[0].peer_addr; assert_eq!(peer_addr.ip(), env.tracker.get_maybe_external_ip().unwrap()); @@ -951,7 +951,7 @@ mod for_all_config_modes { assert_eq!(status, StatusCode::OK); } - let peers = env.tracker.in_memory_torrent_repository.get_torrent_peers(&info_hash); + let peers = env.in_memory_torrent_repository.get_torrent_peers(&info_hash); let peer_addr = peers[0].peer_addr; assert_eq!(peer_addr.ip(), IpAddr::from_str("150.172.238.178").unwrap()); From 3867bbbca3917a8e8d28a1f8e10681e0ba4a7497 Mon Sep 17 00:00:00 2001 From: Jose Celano Date: Mon, 27 Jan 2025 10:58:15 +0000 Subject: [PATCH 156/802] refactor: [#1205] extract ScrapeHandler --- src/app.rs | 2 + src/bootstrap/app.rs | 5 +- src/bootstrap/jobs/http_tracker.rs | 21 ++- src/bootstrap/jobs/udp_tracker.rs | 13 +- src/container.rs | 2 + src/core/mod.rs | 145 ++++++------------ src/core/scrape_handler.rs | 108 +++++++++++++ src/core/services/mod.rs | 9 +- src/core/services/statistics/mod.rs | 3 +- src/core/services/torrent.rs | 3 +- src/servers/http/server.rs | 8 + src/servers/http/v1/handlers/announce.rs | 1 - src/servers/http/v1/handlers/scrape.rs | 183 ++++++++++++++--------- src/servers/http/v1/routes.rs | 5 + src/servers/http/v1/services/announce.rs | 19 +-- src/servers/http/v1/services/scrape.rs | 59 ++++---- src/servers/udp/handlers.rs | 87 ++++++----- src/servers/udp/server/launcher.rs | 15 +- src/servers/udp/server/mod.rs | 2 + src/servers/udp/server/processor.rs | 5 + src/servers/udp/server/spawner.rs | 3 + src/servers/udp/server/states.rs | 7 +- tests/servers/http/environment.rs | 6 + tests/servers/udp/environment.rs | 6 + 24 files changed, 442 insertions(+), 275 deletions(-) create mode 100644 src/core/scrape_handler.rs diff --git a/src/app.rs b/src/app.rs index 67a319549..3f0e8d399 100644 --- a/src/app.rs +++ b/src/app.rs @@ -80,6 +80,7 @@ pub async fn start(config: &Configuration, app_container: &AppContainer) -> Vec< udp_tracker::start_job( udp_tracker_config, app_container.tracker.clone(), + app_container.scrape_handler.clone(), app_container.whitelist_authorization.clone(), app_container.stats_event_sender.clone(), app_container.ban_service.clone(), @@ -99,6 +100,7 @@ pub async fn start(config: &Configuration, app_container: &AppContainer) -> Vec< if let Some(job) = http_tracker::start_job( http_tracker_config, app_container.tracker.clone(), + app_container.scrape_handler.clone(), app_container.authentication_service.clone(), app_container.whitelist_authorization.clone(), app_container.stats_event_sender.clone(), diff --git a/src/bootstrap/app.rs b/src/bootstrap/app.rs index 294b2ca73..a0b6df3ca 100644 --- a/src/bootstrap/app.rs +++ b/src/bootstrap/app.rs @@ -26,6 +26,7 @@ use crate::core::authentication::handler::KeysHandler; use crate::core::authentication::key::repository::in_memory::InMemoryKeyRepository; use crate::core::authentication::key::repository::persisted::DatabaseKeyRepository; use crate::core::authentication::service; +use crate::core::scrape_handler::ScrapeHandler; use crate::core::services::{initialize_database, initialize_tracker, initialize_whitelist_manager, statistics}; use crate::core::torrent::manager::TorrentsManager; use crate::core::torrent::repository::in_memory::InMemoryTorrentRepository; @@ -116,14 +117,16 @@ pub fn initialize_app_container(configuration: &Configuration) -> AppContainer { let tracker = Arc::new(initialize_tracker( configuration, - &whitelist_authorization, &in_memory_torrent_repository, &db_torrent_repository, )); + let scrape_handler = Arc::new(ScrapeHandler::new(&whitelist_authorization, &in_memory_torrent_repository)); + AppContainer { database, tracker, + scrape_handler, keys_handler, authentication_service, whitelist_authorization, diff --git a/src/bootstrap/jobs/http_tracker.rs b/src/bootstrap/jobs/http_tracker.rs index 92a255c9e..2e76e2f31 100644 --- a/src/bootstrap/jobs/http_tracker.rs +++ b/src/bootstrap/jobs/http_tracker.rs @@ -20,6 +20,7 @@ use tracing::instrument; use super::make_rust_tls; use crate::core::authentication::service::AuthenticationService; +use crate::core::scrape_handler::ScrapeHandler; use crate::core::statistics::event::sender::Sender; use crate::core::{self, statistics, whitelist}; use crate::servers::http::server::{HttpServer, Launcher}; @@ -34,11 +35,20 @@ use crate::servers::registar::ServiceRegistrationForm; /// # Panics /// /// It would panic if the `config::HttpTracker` struct would contain inappropriate values. -/// -#[instrument(skip(config, tracker, authentication_service, whitelist_authorization, stats_event_sender, form))] +#[allow(clippy::too_many_arguments)] +#[instrument(skip( + config, + tracker, + scrape_handler, + authentication_service, + whitelist_authorization, + stats_event_sender, + form +))] pub async fn start_job( config: &HttpTracker, tracker: Arc, + scrape_handler: Arc, authentication_service: Arc, whitelist_authorization: Arc, stats_event_sender: Arc>>, @@ -57,6 +67,7 @@ pub async fn start_job( socket, tls, tracker.clone(), + scrape_handler.clone(), authentication_service.clone(), whitelist_authorization.clone(), stats_event_sender.clone(), @@ -67,12 +78,14 @@ pub async fn start_job( } } +#[allow(clippy::too_many_arguments)] #[allow(clippy::async_yields_async)] -#[instrument(skip(socket, tls, tracker, whitelist_authorization, stats_event_sender, form))] +#[instrument(skip(socket, tls, tracker, scrape_handler, whitelist_authorization, stats_event_sender, form))] async fn start_v1( socket: SocketAddr, tls: Option, tracker: Arc, + scrape_handler: Arc, authentication_service: Arc, whitelist_authorization: Arc, stats_event_sender: Arc>>, @@ -81,6 +94,7 @@ async fn start_v1( let server = HttpServer::new(Launcher::new(socket, tls)) .start( tracker, + scrape_handler, authentication_service, whitelist_authorization, stats_event_sender, @@ -128,6 +142,7 @@ mod tests { start_job( config, app_container.tracker, + app_container.scrape_handler, app_container.authentication_service, app_container.whitelist_authorization, app_container.stats_event_sender, diff --git a/src/bootstrap/jobs/udp_tracker.rs b/src/bootstrap/jobs/udp_tracker.rs index 724e2043e..dd55e4b8b 100644 --- a/src/bootstrap/jobs/udp_tracker.rs +++ b/src/bootstrap/jobs/udp_tracker.rs @@ -13,6 +13,7 @@ use tokio::task::JoinHandle; use torrust_tracker_configuration::UdpTracker; use tracing::instrument; +use crate::core::scrape_handler::ScrapeHandler; use crate::core::statistics::event::sender::Sender; use crate::core::{self, whitelist}; use crate::servers::registar::ServiceRegistrationForm; @@ -32,10 +33,19 @@ use crate::servers::udp::UDP_TRACKER_LOG_TARGET; /// It will panic if the task did not finish successfully. #[must_use] #[allow(clippy::async_yields_async)] -#[instrument(skip(config, tracker, whitelist_authorization, stats_event_sender, ban_service, form))] +#[instrument(skip( + config, + tracker, + scrape_handler, + whitelist_authorization, + stats_event_sender, + ban_service, + form +))] pub async fn start_job( config: &UdpTracker, tracker: Arc, + scrape_handler: Arc, whitelist_authorization: Arc, stats_event_sender: Arc>>, ban_service: Arc>, @@ -47,6 +57,7 @@ pub async fn start_job( let server = Server::new(Spawner::new(bind_to)) .start( tracker, + scrape_handler, whitelist_authorization, stats_event_sender, ban_service, diff --git a/src/container.rs b/src/container.rs index 8407d0b69..a73862006 100644 --- a/src/container.rs +++ b/src/container.rs @@ -5,6 +5,7 @@ use tokio::sync::RwLock; use crate::core::authentication::handler::KeysHandler; use crate::core::authentication::service::AuthenticationService; use crate::core::databases::Database; +use crate::core::scrape_handler::ScrapeHandler; use crate::core::statistics::event::sender::Sender; use crate::core::statistics::repository::Repository; use crate::core::torrent::manager::TorrentsManager; @@ -17,6 +18,7 @@ use crate::servers::udp::server::banning::BanService; pub struct AppContainer { pub database: Arc>, pub tracker: Arc, + pub scrape_handler: Arc, pub keys_handler: Arc, pub authentication_service: Arc, pub whitelist_authorization: Arc, diff --git a/src/core/mod.rs b/src/core/mod.rs index 6ad48289f..064e8eb3e 100644 --- a/src/core/mod.rs +++ b/src/core/mod.rs @@ -442,6 +442,7 @@ pub mod authentication; pub mod databases; pub mod error; +pub mod scrape_handler; pub mod services; pub mod statistics; pub mod torrent; @@ -456,7 +457,7 @@ use bittorrent_primitives::info_hash::InfoHash; use torrent::repository::in_memory::InMemoryTorrentRepository; use torrent::repository::persisted::DatabasePersistentTorrentRepository; use torrust_tracker_configuration::{AnnouncePolicy, Core, TORRENT_PEERS_LIMIT}; -use torrust_tracker_primitives::core::{AnnounceData, ScrapeData}; +use torrust_tracker_primitives::core::AnnounceData; use torrust_tracker_primitives::peer; use torrust_tracker_primitives::swarm_metadata::SwarmMetadata; @@ -473,9 +474,6 @@ pub struct Tracker { /// The tracker configuration. config: Core, - /// The service to check is a torrent is whitelisted. - whitelist_authorization: Arc, - /// The in-memory torrents repository. in_memory_torrent_repository: Arc, @@ -533,13 +531,11 @@ impl Tracker { /// Will return a `databases::error::Error` if unable to connect to database. The `Tracker` is responsible for the persistence. pub fn new( config: &Core, - whitelist_authorization: &Arc, in_memory_torrent_repository: &Arc, db_torrent_repository: &Arc, ) -> Result { Ok(Tracker { config: config.clone(), - whitelist_authorization: whitelist_authorization.clone(), in_memory_torrent_repository: in_memory_torrent_repository.clone(), db_torrent_repository: db_torrent_repository.clone(), }) @@ -629,25 +625,6 @@ impl Tracker { } } - /// It handles a scrape request. - /// - /// # Context: Tracker - /// - /// BEP 48: [Tracker Protocol Extension: Scrape](https://www.bittorrent.org/beps/bep_0048.html). - pub async fn scrape(&self, info_hashes: &Vec) -> ScrapeData { - let mut scrape_data = ScrapeData::empty(); - - for info_hash in info_hashes { - let swarm_metadata = match self.whitelist_authorization.authorize(info_hash).await { - Ok(()) => self.in_memory_torrent_repository.get_swarm_metadata(info_hash), - Err(_) => SwarmMetadata::zeroed(), - }; - scrape_data.add_file(info_hash, swarm_metadata); - } - - scrape_data - } - /// It updates the torrent entry in memory, it also stores in the database /// the torrent info data which is persistent, and finally return the data /// needed for a `announce` request response. @@ -715,13 +692,14 @@ mod tests { use crate::app_test::initialize_tracker_dependencies; use crate::core::peer::Peer; + use crate::core::scrape_handler::ScrapeHandler; use crate::core::services::{initialize_tracker, initialize_whitelist_manager}; use crate::core::torrent::manager::TorrentsManager; use crate::core::torrent::repository::in_memory::InMemoryTorrentRepository; use crate::core::whitelist::manager::WhiteListManager; use crate::core::{whitelist, Tracker}; - fn public_tracker() -> Tracker { + fn public_tracker() -> (Arc, Arc) { let config = configuration::ephemeral_public(); let ( @@ -734,12 +712,15 @@ mod tests { _torrents_manager, ) = initialize_tracker_dependencies(&config); - initialize_tracker( + let tracker = Arc::new(initialize_tracker( &config, - &whitelist_authorization, &in_memory_torrent_repository, &db_torrent_repository, - ) + )); + + let scrape_handler = Arc::new(ScrapeHandler::new(&whitelist_authorization, &in_memory_torrent_repository)); + + (tracker, scrape_handler) } fn public_tracker_and_in_memory_torrents_repository() -> (Arc, Arc) { @@ -748,7 +729,7 @@ mod tests { let ( _database, _in_memory_whitelist, - whitelist_authorization, + _whitelist_authorization, _authentication_service, in_memory_torrent_repository, db_torrent_repository, @@ -757,7 +738,6 @@ mod tests { let tracker = Arc::new(initialize_tracker( &config, - &whitelist_authorization, &in_memory_torrent_repository, &db_torrent_repository, )); @@ -765,7 +745,12 @@ mod tests { (tracker, in_memory_torrent_repository) } - fn whitelisted_tracker() -> (Tracker, Arc, Arc) { + fn whitelisted_tracker() -> ( + Tracker, + Arc, + Arc, + Arc, + ) { let config = configuration::ephemeral_listed(); let ( @@ -780,14 +765,11 @@ mod tests { let whitelist_manager = initialize_whitelist_manager(database.clone(), in_memory_whitelist.clone()); - let tracker = initialize_tracker( - &config, - &whitelist_authorization, - &in_memory_torrent_repository, - &db_torrent_repository, - ); + let tracker = initialize_tracker(&config, &in_memory_torrent_repository, &db_torrent_repository); - (tracker, whitelist_authorization, whitelist_manager) + let scrape_handler = Arc::new(ScrapeHandler::new(&whitelist_authorization, &in_memory_torrent_repository)); + + (tracker, whitelist_authorization, whitelist_manager, scrape_handler) } pub fn tracker_persisting_torrents_in_database() -> (Tracker, Arc, Arc) { @@ -797,19 +779,14 @@ mod tests { let ( _database, _in_memory_whitelist, - whitelist_authorization, + _whitelist_authorization, _authentication_service, in_memory_torrent_repository, db_torrent_repository, torrents_manager, ) = initialize_tracker_dependencies(&config); - let tracker = initialize_tracker( - &config, - &whitelist_authorization, - &in_memory_torrent_repository, - &db_torrent_repository, - ); + let tracker = initialize_tracker(&config, &in_memory_torrent_repository, &db_torrent_repository); (tracker, torrents_manager, in_memory_torrent_repository) } @@ -957,7 +934,7 @@ mod tests { #[tokio::test] async fn it_should_return_the_peers_for_a_given_torrent_excluding_a_given_peer() { - let tracker = public_tracker(); + let (tracker, _scrape_handler) = public_tracker(); let info_hash = sample_info_hash(); let peer = sample_peer(); @@ -973,7 +950,7 @@ mod tests { #[tokio::test] async fn it_should_return_74_peers_at_the_most_for_a_given_torrent_when_it_filters_out_a_given_peer() { - let tracker = public_tracker(); + let (tracker, _scrape_handler) = public_tracker(); let info_hash = sample_info_hash(); @@ -1159,7 +1136,7 @@ mod tests { #[tokio::test] async fn it_should_return_the_announce_data_with_an_empty_peer_list_when_it_is_the_first_announced_peer() { - let tracker = public_tracker(); + let (tracker, _scrape_handler) = public_tracker(); let mut peer = sample_peer(); @@ -1170,7 +1147,7 @@ mod tests { #[tokio::test] async fn it_should_return_the_announce_data_with_the_previously_announced_peers() { - let tracker = public_tracker(); + let (tracker, _scrape_handler) = public_tracker(); let mut previously_announced_peer = sample_peer_1(); tracker.announce( @@ -1195,7 +1172,7 @@ mod tests { #[tokio::test] async fn when_the_peer_is_a_seeder() { - let tracker = public_tracker(); + let (tracker, _scrape_handler) = public_tracker(); let mut peer = seeder(); @@ -1206,7 +1183,7 @@ mod tests { #[tokio::test] async fn when_the_peer_is_a_leecher() { - let tracker = public_tracker(); + let (tracker, _scrape_handler) = public_tracker(); let mut peer = leecher(); @@ -1217,7 +1194,7 @@ mod tests { #[tokio::test] async fn when_a_previously_announced_started_peer_has_completed_downloading() { - let tracker = public_tracker(); + let (tracker, _scrape_handler) = public_tracker(); // We have to announce with "started" event because peer does not count if peer was not previously known let mut started_peer = started_peer(); @@ -1237,31 +1214,16 @@ mod tests { use std::net::{IpAddr, Ipv4Addr}; use bittorrent_primitives::info_hash::InfoHash; + use torrust_tracker_primitives::core::ScrapeData; use crate::core::tests::the_tracker::{complete_peer, incomplete_peer, public_tracker}; - use crate::core::{PeersWanted, ScrapeData, SwarmMetadata}; - - #[tokio::test] - async fn it_should_return_a_zeroed_swarm_metadata_for_the_requested_file_if_the_tracker_does_not_have_that_torrent( - ) { - let tracker = public_tracker(); - - let info_hashes = vec!["3b245504cf5f11bbdbe1201cea6a6bf45aee1bc0".parse::().unwrap()]; - - let scrape_data = tracker.scrape(&info_hashes).await; - - let mut expected_scrape_data = ScrapeData::empty(); - - expected_scrape_data.add_file_with_zeroed_metadata(&info_hashes[0]); - - assert_eq!(scrape_data, expected_scrape_data); - } + use crate::core::{PeersWanted, SwarmMetadata}; #[tokio::test] async fn it_should_return_the_swarm_metadata_for_the_requested_file_if_the_tracker_has_that_torrent() { - let tracker = public_tracker(); + let (tracker, scrape_handler) = public_tracker(); - let info_hash = "3b245504cf5f11bbdbe1201cea6a6bf45aee1bc0".parse::().unwrap(); + let info_hash = "3b245504cf5f11bbdbe1201cea6a6bf45aee1bc0".parse::().unwrap(); // # DevSkim: ignore DS173237 // Announce a "complete" peer for the torrent let mut complete_peer = complete_peer(); @@ -1282,7 +1244,7 @@ mod tests { ); // Scrape - let scrape_data = tracker.scrape(&vec![info_hash]).await; + let scrape_data = scrape_handler.scrape(&vec![info_hash]).await; // The expected swarm metadata for the file let mut expected_scrape_data = ScrapeData::empty(); @@ -1297,24 +1259,6 @@ mod tests { assert_eq!(scrape_data, expected_scrape_data); } - - #[tokio::test] - async fn it_should_allow_scraping_for_multiple_torrents() { - let tracker = public_tracker(); - - let info_hashes = vec![ - "3b245504cf5f11bbdbe1201cea6a6bf45aee1bc0".parse::().unwrap(), - "99c82bb73505a3c0b453f9fa0e881d6e5a32a0c1".parse::().unwrap(), - ]; - - let scrape_data = tracker.scrape(&info_hashes).await; - - let mut expected_scrape_data = ScrapeData::empty(); - expected_scrape_data.add_file_with_zeroed_metadata(&info_hashes[0]); - expected_scrape_data.add_file_with_zeroed_metadata(&info_hashes[1]); - - assert_eq!(scrape_data, expected_scrape_data); - } } } @@ -1325,7 +1269,7 @@ mod tests { #[tokio::test] async fn it_should_authorize_the_announce_and_scrape_actions_on_whitelisted_torrents() { - let (_tracker, whitelist_authorization, whitelist_manager) = whitelisted_tracker(); + let (_tracker, whitelist_authorization, whitelist_manager, _scrape_handler) = whitelisted_tracker(); let info_hash = sample_info_hash(); @@ -1338,7 +1282,7 @@ mod tests { #[tokio::test] async fn it_should_not_authorize_the_announce_and_scrape_actions_on_not_whitelisted_torrents() { - let (_tracker, whitelist_authorization, _whitelist_manager) = whitelisted_tracker(); + let (_tracker, whitelist_authorization, _whitelist_manager, _scrape_handler) = whitelisted_tracker(); let info_hash = sample_info_hash(); @@ -1357,7 +1301,7 @@ mod tests { #[tokio::test] async fn it_should_add_a_torrent_to_the_whitelist() { - let (_tracker, _whitelist_authorization, whitelist_manager) = whitelisted_tracker(); + let (_tracker, _whitelist_authorization, whitelist_manager, _scrape_handler) = whitelisted_tracker(); let info_hash = sample_info_hash(); @@ -1368,7 +1312,7 @@ mod tests { #[tokio::test] async fn it_should_remove_a_torrent_from_the_whitelist() { - let (_tracker, _whitelist_authorization, whitelist_manager) = whitelisted_tracker(); + let (_tracker, _whitelist_authorization, whitelist_manager, _scrape_handler) = whitelisted_tracker(); let info_hash = sample_info_hash(); @@ -1384,7 +1328,7 @@ mod tests { #[tokio::test] async fn it_should_load_the_whitelist_from_the_database() { - let (_tracker, _whitelist_authorization, whitelist_manager) = whitelisted_tracker(); + let (_tracker, _whitelist_authorization, whitelist_manager, _scrape_handler) = whitelisted_tracker(); let info_hash = sample_info_hash(); @@ -1406,12 +1350,13 @@ mod tests { mod handling_an_scrape_request { use bittorrent_primitives::info_hash::InfoHash; + use torrust_tracker_primitives::core::ScrapeData; use torrust_tracker_primitives::swarm_metadata::SwarmMetadata; use crate::core::tests::the_tracker::{ complete_peer, incomplete_peer, peer_ip, sample_info_hash, whitelisted_tracker, }; - use crate::core::{PeersWanted, ScrapeData}; + use crate::core::PeersWanted; #[test] fn it_should_be_able_to_build_a_zeroed_scrape_data_for_a_list_of_info_hashes() { @@ -1427,9 +1372,9 @@ mod tests { #[tokio::test] async fn it_should_return_the_zeroed_swarm_metadata_for_the_requested_file_if_it_is_not_whitelisted() { - let (tracker, _whitelist_authorization, _whitelist_manager) = whitelisted_tracker(); + let (tracker, _whitelist_authorization, _whitelist_manager, scrape_handler) = whitelisted_tracker(); - let info_hash = "3b245504cf5f11bbdbe1201cea6a6bf45aee1bc0".parse::().unwrap(); + let info_hash = "3b245504cf5f11bbdbe1201cea6a6bf45aee1bc0".parse::().unwrap(); // # DevSkim: ignore DS173237 let mut peer = incomplete_peer(); tracker.announce(&info_hash, &mut peer, &peer_ip(), &PeersWanted::All); @@ -1438,7 +1383,7 @@ mod tests { let mut peer = complete_peer(); tracker.announce(&info_hash, &mut peer, &peer_ip(), &PeersWanted::All); - let scrape_data = tracker.scrape(&vec![info_hash]).await; + let scrape_data = scrape_handler.scrape(&vec![info_hash]).await; // The expected zeroed swarm metadata for the file let mut expected_scrape_data = ScrapeData::empty(); diff --git a/src/core/scrape_handler.rs b/src/core/scrape_handler.rs new file mode 100644 index 000000000..47049ed71 --- /dev/null +++ b/src/core/scrape_handler.rs @@ -0,0 +1,108 @@ +use std::sync::Arc; + +use bittorrent_primitives::info_hash::InfoHash; +use torrust_tracker_primitives::core::ScrapeData; +use torrust_tracker_primitives::swarm_metadata::SwarmMetadata; + +use super::torrent::repository::in_memory::InMemoryTorrentRepository; +use super::whitelist; + +pub struct ScrapeHandler { + /// The service to check is a torrent is whitelisted. + whitelist_authorization: Arc, + + /// The in-memory torrents repository. + in_memory_torrent_repository: Arc, +} + +impl ScrapeHandler { + #[must_use] + pub fn new( + whitelist_authorization: &Arc, + in_memory_torrent_repository: &Arc, + ) -> Self { + Self { + whitelist_authorization: whitelist_authorization.clone(), + in_memory_torrent_repository: in_memory_torrent_repository.clone(), + } + } + + /// It handles a scrape request. + /// + /// # Context: Tracker + /// + /// BEP 48: [Tracker Protocol Extension: Scrape](https://www.bittorrent.org/beps/bep_0048.html). + pub async fn scrape(&self, info_hashes: &Vec) -> ScrapeData { + let mut scrape_data = ScrapeData::empty(); + + for info_hash in info_hashes { + let swarm_metadata = match self.whitelist_authorization.authorize(info_hash).await { + Ok(()) => self.in_memory_torrent_repository.get_swarm_metadata(info_hash), + Err(_) => SwarmMetadata::zeroed(), + }; + scrape_data.add_file(info_hash, swarm_metadata); + } + + scrape_data + } +} + +#[cfg(test)] +mod tests { + use std::sync::Arc; + + use bittorrent_primitives::info_hash::InfoHash; + use torrust_tracker_primitives::core::ScrapeData; + use torrust_tracker_test_helpers::configuration; + + use super::ScrapeHandler; + use crate::core::torrent::repository::in_memory::InMemoryTorrentRepository; + use crate::core::whitelist::repository::in_memory::InMemoryWhitelist; + use crate::core::whitelist::{self}; + + fn scrape_handler() -> ScrapeHandler { + let config = configuration::ephemeral_public(); + + let in_memory_whitelist = Arc::new(InMemoryWhitelist::default()); + let whitelist_authorization = Arc::new(whitelist::authorization::Authorization::new( + &config.core, + &in_memory_whitelist.clone(), + )); + let in_memory_torrent_repository = Arc::new(InMemoryTorrentRepository::default()); + + ScrapeHandler::new(&whitelist_authorization, &in_memory_torrent_repository) + } + + #[tokio::test] + async fn it_should_return_a_zeroed_swarm_metadata_for_the_requested_file_if_the_tracker_does_not_have_that_torrent() { + let scrape_handler = scrape_handler(); + + let info_hashes = vec!["3b245504cf5f11bbdbe1201cea6a6bf45aee1bc0".parse::().unwrap()]; // # DevSkim: ignore DS173237 + + let scrape_data = scrape_handler.scrape(&info_hashes).await; + + let mut expected_scrape_data = ScrapeData::empty(); + + expected_scrape_data.add_file_with_zeroed_metadata(&info_hashes[0]); + + assert_eq!(scrape_data, expected_scrape_data); + } + + #[tokio::test] + async fn it_should_allow_scraping_for_multiple_torrents() { + let scrape_handler = scrape_handler(); + + let info_hashes = vec![ + "3b245504cf5f11bbdbe1201cea6a6bf45aee1bc0".parse::().unwrap(), // # DevSkim: ignore DS173237 + "99c82bb73505a3c0b453f9fa0e881d6e5a32a0c1".parse::().unwrap(), // # DevSkim: ignore DS173237 + ]; + + let scrape_data = scrape_handler.scrape(&info_hashes).await; + + let mut expected_scrape_data = ScrapeData::empty(); + expected_scrape_data.add_file_with_zeroed_metadata(&info_hashes[0]); + expected_scrape_data.add_file_with_zeroed_metadata(&info_hashes[1]); + + assert_eq!(scrape_data, expected_scrape_data); + } +} diff --git a/src/core/services/mod.rs b/src/core/services/mod.rs index a9bca2df7..a6cf54d60 100644 --- a/src/core/services/mod.rs +++ b/src/core/services/mod.rs @@ -16,7 +16,6 @@ use torrust_tracker_configuration::Configuration; use super::databases::{self, Database}; use super::torrent::repository::in_memory::InMemoryTorrentRepository; use super::torrent::repository::persisted::DatabasePersistentTorrentRepository; -use super::whitelist; use super::whitelist::manager::WhiteListManager; use super::whitelist::repository::in_memory::InMemoryWhitelist; use super::whitelist::repository::persisted::DatabaseWhitelist; @@ -30,16 +29,10 @@ use crate::core::Tracker; #[must_use] pub fn initialize_tracker( config: &Configuration, - whitelist_authorization: &Arc, in_memory_torrent_repository: &Arc, db_torrent_repository: &Arc, ) -> Tracker { - match Tracker::new( - &Arc::new(config).core, - whitelist_authorization, - in_memory_torrent_repository, - db_torrent_repository, - ) { + match Tracker::new(&Arc::new(config).core, in_memory_torrent_repository, db_torrent_repository) { Ok(tracker) => tracker, Err(error) => { panic!("{}", error) diff --git a/src/core/services/statistics/mod.rs b/src/core/services/statistics/mod.rs index ea7ebe994..680504607 100644 --- a/src/core/services/statistics/mod.rs +++ b/src/core/services/statistics/mod.rs @@ -135,7 +135,7 @@ mod tests { let ( _database, _in_memory_whitelist, - whitelist_authorization, + _whitelist_authorization, _authentication_service, in_memory_torrent_repository, db_torrent_repository, @@ -147,7 +147,6 @@ mod tests { let _tracker = Arc::new(initialize_tracker( &config, - &whitelist_authorization, &in_memory_torrent_repository, &db_torrent_repository, )); diff --git a/src/core/services/torrent.rs b/src/core/services/torrent.rs index dae619d62..8677096cc 100644 --- a/src/core/services/torrent.rs +++ b/src/core/services/torrent.rs @@ -127,7 +127,7 @@ mod tests { let ( _database, _in_memory_whitelist, - whitelist_authorization, + _whitelist_authorization, _authentication_service, in_memory_torrent_repository, db_torrent_repository, @@ -136,7 +136,6 @@ mod tests { let tracker = Arc::new(initialize_tracker( config, - &whitelist_authorization, &in_memory_torrent_repository, &db_torrent_repository, )); diff --git a/src/servers/http/server.rs b/src/servers/http/server.rs index e7a3a92ec..573337ba9 100644 --- a/src/servers/http/server.rs +++ b/src/servers/http/server.rs @@ -12,6 +12,7 @@ use tracing::instrument; use super::v1::routes::router; use crate::bootstrap::jobs::Started; use crate::core::authentication::service::AuthenticationService; +use crate::core::scrape_handler::ScrapeHandler; use crate::core::{statistics, whitelist, Tracker}; use crate::servers::custom_axum_server::{self, TimeoutAcceptor}; use crate::servers::http::HTTP_TRACKER_LOG_TARGET; @@ -43,9 +44,11 @@ pub struct Launcher { } impl Launcher { + #[allow(clippy::too_many_arguments)] #[instrument(skip( self, tracker, + scrape_handler, authentication_service, whitelist_authorization, stats_event_sender, @@ -55,6 +58,7 @@ impl Launcher { fn start( &self, tracker: Arc, + scrape_handler: Arc, authentication_service: Arc, whitelist_authorization: Arc, stats_event_sender: Arc>>, @@ -79,6 +83,7 @@ impl Launcher { let app = router( tracker, + scrape_handler, authentication_service, whitelist_authorization, stats_event_sender, @@ -179,6 +184,7 @@ impl HttpServer { pub async fn start( self, tracker: Arc, + scrape_handler: Arc, authentication_service: Arc, whitelist_authorization: Arc, stats_event_sender: Arc>>, @@ -192,6 +198,7 @@ impl HttpServer { let task = tokio::spawn(async move { let server = launcher.start( tracker, + scrape_handler, authentication_service, whitelist_authorization, stats_event_sender, @@ -296,6 +303,7 @@ mod tests { let started = stopped .start( app_container.tracker, + app_container.scrape_handler, app_container.authentication_service, app_container.whitelist_authorization, app_container.stats_event_sender, diff --git a/src/servers/http/v1/handlers/announce.rs b/src/servers/http/v1/handlers/announce.rs index a9567fb81..8b57ce543 100644 --- a/src/servers/http/v1/handlers/announce.rs +++ b/src/servers/http/v1/handlers/announce.rs @@ -289,7 +289,6 @@ mod tests { let tracker = Arc::new(initialize_tracker( config, - &whitelist_authorization, &in_memory_torrent_repository, &db_torrent_repository, )); diff --git a/src/servers/http/v1/handlers/scrape.rs b/src/servers/http/v1/handlers/scrape.rs index 116d717a1..3c19fe324 100644 --- a/src/servers/http/v1/handlers/scrape.rs +++ b/src/servers/http/v1/handlers/scrape.rs @@ -17,6 +17,7 @@ use torrust_tracker_primitives::core::ScrapeData; use crate::core::authentication::service::AuthenticationService; use crate::core::authentication::Key; +use crate::core::scrape_handler::ScrapeHandler; use crate::core::statistics::event::sender::Sender; use crate::core::Tracker; use crate::servers::http::v1::extractors::authentication_key::Extract as ExtractKey; @@ -29,13 +30,27 @@ use crate::servers::http::v1::services; #[allow(clippy::unused_async)] #[allow(clippy::type_complexity)] pub async fn handle_without_key( - State(state): State<(Arc, Arc, Arc>>)>, + State(state): State<( + Arc, + Arc, + Arc, + Arc>>, + )>, ExtractRequest(scrape_request): ExtractRequest, ExtractClientIpSources(client_ip_sources): ExtractClientIpSources, ) -> Response { tracing::debug!("http scrape request: {:#?}", &scrape_request); - handle(&state.0, &state.1, &state.2, &scrape_request, &client_ip_sources, None).await + handle( + &state.0, + &state.1, + &state.2, + &state.3, + &scrape_request, + &client_ip_sources, + None, + ) + .await } /// It handles the `scrape` request when the HTTP tracker is configured @@ -45,18 +60,33 @@ pub async fn handle_without_key( #[allow(clippy::unused_async)] #[allow(clippy::type_complexity)] pub async fn handle_with_key( - State(state): State<(Arc, Arc, Arc>>)>, + State(state): State<( + Arc, + Arc, + Arc, + Arc>>, + )>, ExtractRequest(scrape_request): ExtractRequest, ExtractClientIpSources(client_ip_sources): ExtractClientIpSources, ExtractKey(key): ExtractKey, ) -> Response { tracing::debug!("http scrape request: {:#?}", &scrape_request); - handle(&state.0, &state.1, &state.2, &scrape_request, &client_ip_sources, Some(key)).await + handle( + &state.0, + &state.1, + &state.2, + &state.3, + &scrape_request, + &client_ip_sources, + Some(key), + ) + .await } async fn handle( tracker: &Arc, + scrape_handler: &Arc, authentication_service: &Arc, stats_event_sender: &Arc>>, scrape_request: &Scrape, @@ -65,6 +95,7 @@ async fn handle( ) -> Response { let scrape_data = match handle_scrape( tracker, + scrape_handler, authentication_service, stats_event_sender, scrape_request, @@ -87,6 +118,7 @@ async fn handle( async fn handle_scrape( tracker: &Arc, + scrape_handler: &Arc, authentication_service: &Arc, opt_stats_event_sender: &Arc>>, scrape_request: &Scrape, @@ -115,7 +147,7 @@ async fn handle_scrape( }; if return_real_scrape_data { - Ok(services::scrape::invoke(tracker, opt_stats_event_sender, &scrape_request.info_hashes, &peer_ip).await) + Ok(services::scrape::invoke(scrape_handler, opt_stats_event_sender, &scrape_request.info_hashes, &peer_ip).await) } else { Ok(services::scrape::fake(opt_stats_event_sender, &scrape_request.info_hashes, &peer_ip).await) } @@ -141,12 +173,15 @@ mod tests { use crate::app_test::initialize_tracker_dependencies; use crate::core::authentication::service::AuthenticationService; + use crate::core::scrape_handler::ScrapeHandler; use crate::core::services::{initialize_tracker, statistics}; use crate::core::Tracker; + #[allow(clippy::type_complexity)] fn private_tracker() -> ( - Tracker, - Option>, + Arc, + Arc, + Arc>>, Arc, ) { let config = configuration::ephemeral_private(); @@ -163,21 +198,24 @@ mod tests { let (stats_event_sender, _stats_repository) = statistics::setup::factory(config.core.tracker_usage_statistics); - ( - initialize_tracker( - &config, - &whitelist_authorization, - &in_memory_torrent_repository, - &db_torrent_repository, - ), - stats_event_sender, - authentication_service, - ) + let stats_event_sender = Arc::new(stats_event_sender); + + let tracker = Arc::new(initialize_tracker( + &config, + &in_memory_torrent_repository, + &db_torrent_repository, + )); + + let scrape_handler = Arc::new(ScrapeHandler::new(&whitelist_authorization, &in_memory_torrent_repository)); + + (tracker, scrape_handler, stats_event_sender, authentication_service) } + #[allow(clippy::type_complexity)] fn whitelisted_tracker() -> ( - Tracker, - Option>, + Arc, + Arc, + Arc>>, Arc, ) { let config = configuration::ephemeral_listed(); @@ -194,21 +232,24 @@ mod tests { let (stats_event_sender, _stats_repository) = statistics::setup::factory(config.core.tracker_usage_statistics); - ( - initialize_tracker( - &config, - &whitelist_authorization, - &in_memory_torrent_repository, - &db_torrent_repository, - ), - stats_event_sender, - authentication_service, - ) + let stats_event_sender = Arc::new(stats_event_sender); + + let tracker = Arc::new(initialize_tracker( + &config, + &in_memory_torrent_repository, + &db_torrent_repository, + )); + + let scrape_handler = Arc::new(ScrapeHandler::new(&whitelist_authorization, &in_memory_torrent_repository)); + + (tracker, scrape_handler, stats_event_sender, authentication_service) } + #[allow(clippy::type_complexity)] fn tracker_on_reverse_proxy() -> ( - Tracker, - Option>, + Arc, + Arc, + Arc>>, Arc, ) { let config = configuration::ephemeral_with_reverse_proxy(); @@ -225,21 +266,24 @@ mod tests { let (stats_event_sender, _stats_repository) = statistics::setup::factory(config.core.tracker_usage_statistics); - ( - initialize_tracker( - &config, - &whitelist_authorization, - &in_memory_torrent_repository, - &db_torrent_repository, - ), - stats_event_sender, - authentication_service, - ) + let stats_event_sender = Arc::new(stats_event_sender); + + let tracker = Arc::new(initialize_tracker( + &config, + &in_memory_torrent_repository, + &db_torrent_repository, + )); + + let scrape_handler = Arc::new(ScrapeHandler::new(&whitelist_authorization, &in_memory_torrent_repository)); + + (tracker, scrape_handler, stats_event_sender, authentication_service) } + #[allow(clippy::type_complexity)] fn tracker_not_on_reverse_proxy() -> ( - Tracker, - Option>, + Arc, + Arc, + Arc>>, Arc, ) { let config = configuration::ephemeral_without_reverse_proxy(); @@ -256,21 +300,22 @@ mod tests { let (stats_event_sender, _stats_repository) = statistics::setup::factory(config.core.tracker_usage_statistics); - ( - initialize_tracker( - &config, - &whitelist_authorization, - &in_memory_torrent_repository, - &db_torrent_repository, - ), - stats_event_sender, - authentication_service, - ) + let stats_event_sender = Arc::new(stats_event_sender); + + let tracker = Arc::new(initialize_tracker( + &config, + &in_memory_torrent_repository, + &db_torrent_repository, + )); + + let scrape_handler = Arc::new(ScrapeHandler::new(&whitelist_authorization, &in_memory_torrent_repository)); + + (tracker, scrape_handler, stats_event_sender, authentication_service) } fn sample_scrape_request() -> Scrape { Scrape { - info_hashes: vec!["3b245504cf5f11bbdbe1201cea6a6bf45aee1bc0".parse::().unwrap()], + info_hashes: vec!["3b245504cf5f11bbdbe1201cea6a6bf45aee1bc0".parse::().unwrap()], // # DevSkim: ignore DS173237 } } @@ -290,7 +335,6 @@ mod tests { mod with_tracker_in_private_mode { use std::str::FromStr; - use std::sync::Arc; use torrust_tracker_primitives::core::ScrapeData; @@ -300,15 +344,14 @@ mod tests { #[tokio::test] async fn it_should_return_zeroed_swarm_metadata_when_the_authentication_key_is_missing() { - let (tracker, stats_event_sender, authentication_service) = private_tracker(); - let tracker = Arc::new(tracker); - let stats_event_sender = Arc::new(stats_event_sender); + let (tracker, scrape_handler, stats_event_sender, authentication_service) = private_tracker(); let scrape_request = sample_scrape_request(); let maybe_key = None; let scrape_data = handle_scrape( &tracker, + &scrape_handler, &authentication_service, &stats_event_sender, &scrape_request, @@ -325,9 +368,7 @@ mod tests { #[tokio::test] async fn it_should_return_zeroed_swarm_metadata_when_the_authentication_key_is_invalid() { - let (tracker, stats_event_sender, authentication_service) = private_tracker(); - let tracker = Arc::new(tracker); - let stats_event_sender = Arc::new(stats_event_sender); + let (tracker, scrape_handler, stats_event_sender, authentication_service) = private_tracker(); let scrape_request = sample_scrape_request(); let unregistered_key = authentication::Key::from_str("YZSl4lMZupRuOpSRC3krIKR5BPB14nrJ").unwrap(); @@ -335,6 +376,7 @@ mod tests { let scrape_data = handle_scrape( &tracker, + &scrape_handler, &authentication_service, &stats_event_sender, &scrape_request, @@ -352,8 +394,6 @@ mod tests { mod with_tracker_in_listed_mode { - use std::sync::Arc; - use torrust_tracker_primitives::core::ScrapeData; use super::{sample_client_ip_sources, sample_scrape_request, whitelisted_tracker}; @@ -361,14 +401,13 @@ mod tests { #[tokio::test] async fn it_should_return_zeroed_swarm_metadata_when_the_torrent_is_not_whitelisted() { - let (tracker, stats_event_sender, authentication_service) = whitelisted_tracker(); - let tracker: Arc = Arc::new(tracker); - let stats_event_sender = Arc::new(stats_event_sender); + let (tracker, scrape_handler, stats_event_sender, authentication_service) = whitelisted_tracker(); let scrape_request = sample_scrape_request(); let scrape_data = handle_scrape( &tracker, + &scrape_handler, &authentication_service, &stats_event_sender, &scrape_request, @@ -385,7 +424,6 @@ mod tests { } mod with_tracker_on_reverse_proxy { - use std::sync::Arc; use bittorrent_http_protocol::v1::services::peer_ip_resolver::ClientIpSources; @@ -395,9 +433,7 @@ mod tests { #[tokio::test] async fn it_should_fail_when_the_right_most_x_forwarded_for_header_ip_is_not_available() { - let (tracker, stats_event_sender, authentication_service) = tracker_on_reverse_proxy(); - let tracker: Arc = Arc::new(tracker); - let stats_event_sender = Arc::new(stats_event_sender); + let (tracker, scrape_handler, stats_event_sender, authentication_service) = tracker_on_reverse_proxy(); let client_ip_sources = ClientIpSources { right_most_x_forwarded_for: None, @@ -406,6 +442,7 @@ mod tests { let response = handle_scrape( &tracker, + &scrape_handler, &authentication_service, &stats_event_sender, &sample_scrape_request(), @@ -423,7 +460,6 @@ mod tests { } mod with_tracker_not_on_reverse_proxy { - use std::sync::Arc; use bittorrent_http_protocol::v1::services::peer_ip_resolver::ClientIpSources; @@ -433,9 +469,7 @@ mod tests { #[tokio::test] async fn it_should_fail_when_the_client_ip_from_the_connection_info_is_not_available() { - let (tracker, stats_event_sender, authentication_service) = tracker_not_on_reverse_proxy(); - let tracker: Arc = Arc::new(tracker); - let stats_event_sender = Arc::new(stats_event_sender); + let (tracker, scrape_handler, stats_event_sender, authentication_service) = tracker_not_on_reverse_proxy(); let client_ip_sources = ClientIpSources { right_most_x_forwarded_for: None, @@ -444,6 +478,7 @@ mod tests { let response = handle_scrape( &tracker, + &scrape_handler, &authentication_service, &stats_event_sender, &sample_scrape_request(), diff --git a/src/servers/http/v1/routes.rs b/src/servers/http/v1/routes.rs index 7a1465500..0c0be5bd5 100644 --- a/src/servers/http/v1/routes.rs +++ b/src/servers/http/v1/routes.rs @@ -23,6 +23,7 @@ use tracing::{instrument, Level, Span}; use super::handlers::{announce, health_check, scrape}; use crate::core::authentication::service::AuthenticationService; +use crate::core::scrape_handler::ScrapeHandler; use crate::core::statistics::event::sender::Sender; use crate::core::{whitelist, Tracker}; use crate::servers::http::HTTP_TRACKER_LOG_TARGET; @@ -35,6 +36,7 @@ use crate::servers::logging::Latency; #[allow(clippy::needless_pass_by_value)] #[instrument(skip( tracker, + scrape_handler, authentication_service, whitelist_authorization, stats_event_sender, @@ -42,6 +44,7 @@ use crate::servers::logging::Latency; ))] pub fn router( tracker: Arc, + scrape_handler: Arc, authentication_service: Arc, whitelist_authorization: Arc, stats_event_sender: Arc>>, @@ -74,6 +77,7 @@ pub fn router( "/scrape", get(scrape::handle_without_key).with_state(( tracker.clone(), + scrape_handler.clone(), authentication_service.clone(), stats_event_sender.clone(), )), @@ -82,6 +86,7 @@ pub fn router( "/scrape/{key}", get(scrape::handle_with_key).with_state(( tracker.clone(), + scrape_handler.clone(), authentication_service.clone(), stats_event_sender.clone(), )), diff --git a/src/servers/http/v1/services/announce.rs b/src/servers/http/v1/services/announce.rs index 322bc80eb..9e381d8b2 100644 --- a/src/servers/http/v1/services/announce.rs +++ b/src/servers/http/v1/services/announce.rs @@ -76,7 +76,7 @@ mod tests { let ( _database, _in_memory_whitelist, - whitelist_authorization, + _whitelist_authorization, _authentication_service, in_memory_torrent_repository, db_torrent_repository, @@ -85,12 +85,7 @@ mod tests { let (stats_event_sender, _stats_repository) = statistics::setup::factory(config.core.tracker_usage_statistics); let stats_event_sender = Arc::new(stats_event_sender); - let tracker = initialize_tracker( - &config, - &whitelist_authorization, - &in_memory_torrent_repository, - &db_torrent_repository, - ); + let tracker = initialize_tracker(&config, &in_memory_torrent_repository, &db_torrent_repository); (tracker, stats_event_sender) } @@ -147,20 +142,14 @@ mod tests { let ( _database, _in_memory_whitelist, - whitelist_authorization, + _whitelist_authorization, _authentication_service, in_memory_torrent_repository, db_torrent_repository, _torrents_manager, ) = initialize_tracker_dependencies(&config); - Tracker::new( - &config.core, - &whitelist_authorization, - &in_memory_torrent_repository, - &db_torrent_repository, - ) - .unwrap() + Tracker::new(&config.core, &in_memory_torrent_repository, &db_torrent_repository).unwrap() } #[tokio::test] diff --git a/src/servers/http/v1/services/scrape.rs b/src/servers/http/v1/services/scrape.rs index 299938f84..e3ee6560f 100644 --- a/src/servers/http/v1/services/scrape.rs +++ b/src/servers/http/v1/services/scrape.rs @@ -2,9 +2,8 @@ //! //! The service is responsible for handling the `scrape` requests. //! -//! It delegates the `scrape` logic to the [`Tracker`](crate::core::Tracker::scrape) -//! and it returns the [`ScrapeData`] returned -//! by the [`Tracker`]. +//! It delegates the `scrape` logic to the [`ScrapeHandler`] and it returns the +//! [`ScrapeData`]. //! //! It also sends an [`statistics::event::Event`] //! because events are specific for the HTTP tracker. @@ -14,9 +13,9 @@ use std::sync::Arc; use bittorrent_primitives::info_hash::InfoHash; use torrust_tracker_primitives::core::ScrapeData; +use crate::core::scrape_handler::ScrapeHandler; use crate::core::statistics::event::sender::Sender; use crate::core::statistics::{self}; -use crate::core::Tracker; /// The HTTP tracker `scrape` service. /// @@ -29,12 +28,12 @@ use crate::core::Tracker; /// > like the UDP tracker, the number of TCP connections is incremented for /// > each `scrape` request. pub async fn invoke( - tracker: &Arc, + scrape_handler: &Arc, opt_stats_event_sender: &Arc>>, info_hashes: &Vec, original_peer_ip: &IpAddr, ) -> ScrapeData { - let scrape_data = tracker.scrape(info_hashes).await; + let scrape_data = scrape_handler.scrape(info_hashes).await; send_scrape_event(original_peer_ip, opt_stats_event_sender).await; @@ -74,6 +73,7 @@ async fn send_scrape_event(original_peer_ip: &IpAddr, opt_stats_event_sender: &A mod tests { use std::net::{IpAddr, Ipv4Addr, SocketAddr}; + use std::sync::Arc; use aquatic_udp_protocol::{AnnounceEvent, NumberOfBytes, PeerId}; use bittorrent_primitives::info_hash::InfoHash; @@ -81,10 +81,11 @@ mod tests { use torrust_tracker_test_helpers::configuration; use crate::app_test::initialize_tracker_dependencies; + use crate::core::scrape_handler::ScrapeHandler; use crate::core::services::initialize_tracker; use crate::core::Tracker; - fn public_tracker() -> Tracker { + fn public_tracker_and_scrape_handler() -> (Arc, Arc) { let config = configuration::ephemeral_public(); let ( @@ -97,12 +98,15 @@ mod tests { _torrents_manager, ) = initialize_tracker_dependencies(&config); - initialize_tracker( + let tracker = Arc::new(initialize_tracker( &config, - &whitelist_authorization, &in_memory_torrent_repository, &db_torrent_repository, - ) + )); + + let scrape_handler = Arc::new(ScrapeHandler::new(&whitelist_authorization, &in_memory_torrent_repository)); + + (tracker, scrape_handler) } fn sample_info_hashes() -> Vec { @@ -110,7 +114,7 @@ mod tests { } fn sample_info_hash() -> InfoHash { - "3b245504cf5f11bbdbe1201cea6a6bf45aee1bc0".parse::().unwrap() + "3b245504cf5f11bbdbe1201cea6a6bf45aee1bc0".parse::().unwrap() // # DevSkim: ignore DS173237 } fn sample_peer() -> peer::Peer { @@ -125,7 +129,7 @@ mod tests { } } - fn test_tracker_factory() -> Tracker { + fn test_tracker_factory() -> (Arc, Arc) { let config = configuration::ephemeral(); let ( @@ -138,13 +142,11 @@ mod tests { _torrents_manager, ) = initialize_tracker_dependencies(&config); - Tracker::new( - &config.core, - &whitelist_authorization, - &in_memory_torrent_repository, - &db_torrent_repository, - ) - .unwrap() + let tracker = Arc::new(Tracker::new(&config.core, &in_memory_torrent_repository, &db_torrent_repository).unwrap()); + + let scrape_handler = Arc::new(ScrapeHandler::new(&whitelist_authorization, &in_memory_torrent_repository)); + + (tracker, scrape_handler) } mod with_real_data { @@ -160,7 +162,7 @@ mod tests { use crate::core::{statistics, PeersWanted}; use crate::servers::http::v1::services::scrape::invoke; use crate::servers::http::v1::services::scrape::tests::{ - public_tracker, sample_info_hash, sample_info_hashes, sample_peer, test_tracker_factory, + public_tracker_and_scrape_handler, sample_info_hash, sample_info_hashes, sample_peer, test_tracker_factory, }; #[tokio::test] @@ -168,7 +170,7 @@ mod tests { let (stats_event_sender, _stats_repository) = crate::core::services::statistics::setup::factory(false); let stats_event_sender = Arc::new(stats_event_sender); - let tracker = Arc::new(public_tracker()); + let (tracker, scrape_handler) = public_tracker_and_scrape_handler(); let info_hash = sample_info_hash(); let info_hashes = vec![info_hash]; @@ -178,7 +180,7 @@ mod tests { let original_peer_ip = peer.ip(); tracker.announce(&info_hash, &mut peer, &original_peer_ip, &PeersWanted::All); - let scrape_data = invoke(&tracker, &stats_event_sender, &info_hashes, &original_peer_ip).await; + let scrape_data = invoke(&scrape_handler, &stats_event_sender, &info_hashes, &original_peer_ip).await; let mut expected_scrape_data = ScrapeData::empty(); expected_scrape_data.add_file( @@ -204,11 +206,11 @@ mod tests { let stats_event_sender: Arc>> = Arc::new(Some(Box::new(stats_event_sender_mock))); - let tracker = Arc::new(test_tracker_factory()); + let (_tracker, scrape_handler) = test_tracker_factory(); let peer_ip = IpAddr::V4(Ipv4Addr::new(126, 0, 0, 1)); - invoke(&tracker, &stats_event_sender, &sample_info_hashes(), &peer_ip).await; + invoke(&scrape_handler, &stats_event_sender, &sample_info_hashes(), &peer_ip).await; } #[tokio::test] @@ -222,11 +224,11 @@ mod tests { let stats_event_sender: Arc>> = Arc::new(Some(Box::new(stats_event_sender_mock))); - let tracker = Arc::new(test_tracker_factory()); + let (_tracker, scrape_handler) = test_tracker_factory(); let peer_ip = IpAddr::V6(Ipv6Addr::new(0x6969, 0x6969, 0x6969, 0x6969, 0x6969, 0x6969, 0x6969, 0x6969)); - invoke(&tracker, &stats_event_sender, &sample_info_hashes(), &peer_ip).await; + invoke(&scrape_handler, &stats_event_sender, &sample_info_hashes(), &peer_ip).await; } } @@ -242,14 +244,15 @@ mod tests { use crate::core::{statistics, PeersWanted}; use crate::servers::http::v1::services::scrape::fake; use crate::servers::http::v1::services::scrape::tests::{ - public_tracker, sample_info_hash, sample_info_hashes, sample_peer, + public_tracker_and_scrape_handler, sample_info_hash, sample_info_hashes, sample_peer, }; #[tokio::test] async fn it_should_always_return_the_zeroed_scrape_data_for_a_torrent() { let (stats_event_sender, _stats_repository) = crate::core::services::statistics::setup::factory(false); let stats_event_sender = Arc::new(stats_event_sender); - let tracker = Arc::new(public_tracker()); + + let (tracker, _scrape_handler) = public_tracker_and_scrape_handler(); let info_hash = sample_info_hash(); let info_hashes = vec![info_hash]; diff --git a/src/servers/udp/handlers.rs b/src/servers/udp/handlers.rs index c88f6fdc9..f8f57aea9 100644 --- a/src/servers/udp/handlers.rs +++ b/src/servers/udp/handlers.rs @@ -20,6 +20,7 @@ use zerocopy::network_endian::I32; use super::connection_cookie::{check, make}; use super::server::banning::BanService; use super::RawRequest; +use crate::core::scrape_handler::ScrapeHandler; use crate::core::statistics::event::sender::Sender; use crate::core::{statistics, whitelist, PeersWanted, Tracker}; use crate::servers::udp::error::Error; @@ -54,10 +55,12 @@ impl CookieTimeValues { /// - Delegating the request to the correct handler depending on the request type. /// /// It will return an `Error` response if the request is invalid. -#[instrument(fields(request_id), skip(udp_request, tracker, whitelist_authorization, opt_stats_event_sender, cookie_time_values, ban_service), ret(level = Level::TRACE))] +#[allow(clippy::too_many_arguments)] +#[instrument(fields(request_id), skip(udp_request, tracker, scrape_handler, whitelist_authorization, opt_stats_event_sender, cookie_time_values, ban_service), ret(level = Level::TRACE))] pub(crate) async fn handle_packet( udp_request: RawRequest, tracker: &Tracker, + scrape_handler: &Arc, whitelist_authorization: &Arc, opt_stats_event_sender: &Arc>>, local_addr: SocketAddr, @@ -77,6 +80,7 @@ pub(crate) async fn handle_packet( request, udp_request.from, tracker, + scrape_handler, whitelist_authorization, opt_stats_event_sender, cookie_time_values.clone(), @@ -137,6 +141,7 @@ pub(crate) async fn handle_packet( request, remote_addr, tracker, + scrape_handler, whitelist_authorization, opt_stats_event_sender, cookie_time_values @@ -145,6 +150,7 @@ pub async fn handle_request( request: Request, remote_addr: SocketAddr, tracker: &Tracker, + scrape_handler: &Arc, whitelist_authorization: &Arc, opt_stats_event_sender: &Arc>>, cookie_time_values: CookieTimeValues, @@ -174,7 +180,7 @@ pub async fn handle_request( handle_scrape( remote_addr, &scrape_request, - tracker, + scrape_handler, opt_stats_event_sender, cookie_time_values.valid_range, ) @@ -338,11 +344,11 @@ pub async fn handle_announce( /// # Errors /// /// This function does not ever return an error. -#[instrument(fields(transaction_id, connection_id), skip(tracker, opt_stats_event_sender), ret(level = Level::TRACE))] +#[instrument(fields(transaction_id, connection_id), skip(scrape_handler, opt_stats_event_sender), ret(level = Level::TRACE))] pub async fn handle_scrape( remote_addr: SocketAddr, request: &ScrapeRequest, - tracker: &Tracker, + scrape_handler: &Arc, opt_stats_event_sender: &Arc>>, cookie_valid_range: Range, ) -> Result { @@ -365,7 +371,7 @@ pub async fn handle_scrape( info_hashes.push((*info_hash).into()); } - let scrape_data = tracker.scrape(&info_hashes).await; + let scrape_data = scrape_handler.scrape(&info_hashes).await; let mut torrent_stats: Vec = Vec::new(); @@ -484,6 +490,7 @@ mod tests { use super::gen_remote_fingerprint; use crate::app_test::initialize_tracker_dependencies; + use crate::core::scrape_handler::ScrapeHandler; use crate::core::services::{initialize_tracker, initialize_whitelist_manager, statistics}; use crate::core::statistics::event::sender::Sender; use crate::core::torrent::repository::in_memory::InMemoryTorrentRepository; @@ -494,6 +501,7 @@ mod tests { type TrackerAndDeps = ( Arc, + Arc, Arc, Arc>>, Arc, @@ -534,13 +542,15 @@ mod tests { let tracker = Arc::new(initialize_tracker( config, - &whitelist_authorization, &in_memory_torrent_repository, &db_torrent_repository, )); + let scrape_handler = Arc::new(ScrapeHandler::new(&whitelist_authorization, &in_memory_torrent_repository)); + ( tracker, + scrape_handler, in_memory_torrent_repository, stats_event_sender, in_memory_whitelist, @@ -643,7 +653,7 @@ mod tests { } } - fn test_tracker_factory() -> (Arc, Arc) { + fn test_tracker_factory() -> (Arc, Arc, Arc) { let config = tracker_configuration(); let ( @@ -656,17 +666,11 @@ mod tests { _torrents_manager, ) = initialize_tracker_dependencies(&config); - let tracker = Arc::new( - Tracker::new( - &config.core, - &whitelist_authorization, - &in_memory_torrent_repository, - &db_torrent_repository, - ) - .unwrap(), - ); + let tracker = Arc::new(Tracker::new(&config.core, &in_memory_torrent_repository, &db_torrent_repository).unwrap()); + + let scrape_handler = Arc::new(ScrapeHandler::new(&whitelist_authorization, &in_memory_torrent_repository)); - (tracker, whitelist_authorization) + (tracker, scrape_handler, whitelist_authorization) } mod connect_request { @@ -892,6 +896,7 @@ mod tests { async fn an_announced_peer_should_be_added_to_the_tracker() { let ( tracker, + _scrape_handler, in_memory_torrent_repository, stats_event_sender, _in_memory_whitelist, @@ -939,6 +944,7 @@ mod tests { async fn the_announced_peer_should_not_be_included_in_the_response() { let ( tracker, + _scrape_handler, _in_memory_torrent_repository, stats_event_sender, _in_memory_whitelist, @@ -986,6 +992,7 @@ mod tests { let ( tracker, + _scrape_handler, in_memory_torrent_repository, stats_event_sender, _in_memory_whitelist, @@ -1071,6 +1078,7 @@ mod tests { async fn when_the_announce_request_comes_from_a_client_using_ipv4_the_response_should_not_include_peers_using_ipv6() { let ( tracker, + _scrape_handler, _in_memory_torrent_repository, _stats_event_sender, _in_memory_whitelist, @@ -1102,7 +1110,7 @@ mod tests { let stats_event_sender: Arc>> = Arc::new(Some(Box::new(stats_event_sender_mock))); - let (tracker, whitelist_authorization) = test_tracker_factory(); + let (tracker, _scrape_handler, whitelist_authorization) = test_tracker_factory(); handle_announce( sample_ipv4_socket_address(), @@ -1133,6 +1141,7 @@ mod tests { async fn the_peer_ip_should_be_changed_to_the_external_ip_in_the_tracker_configuration_if_defined() { let ( tracker, + _scrape_handler, in_memory_torrent_repository, stats_event_sender, _in_memory_whitelist, @@ -1205,6 +1214,7 @@ mod tests { async fn an_announced_peer_should_be_added_to_the_tracker() { let ( tracker, + _scrape_handler, in_memory_torrent_repository, stats_event_sender, _in_memory_whitelist, @@ -1253,6 +1263,7 @@ mod tests { async fn the_announced_peer_should_not_be_included_in_the_response() { let ( tracker, + _scrape_handler, _in_memory_torrent_repository, stats_event_sender, _in_memory_whitelist, @@ -1303,6 +1314,7 @@ mod tests { let ( tracker, + _scrape_handler, in_memory_torrent_repository, stats_event_sender, _in_memory_whitelist, @@ -1391,6 +1403,7 @@ mod tests { async fn when_the_announce_request_comes_from_a_client_using_ipv6_the_response_should_not_include_peers_using_ipv4() { let ( tracker, + _scrape_handler, _in_memory_torrent_repository, _stats_event_sender, _in_memory_whitelist, @@ -1422,7 +1435,7 @@ mod tests { let stats_event_sender: Arc>> = Arc::new(Some(Box::new(stats_event_sender_mock))); - let (tracker, whitelist_authorization) = test_tracker_factory(); + let (tracker, _scrape_handler, whitelist_authorization) = test_tracker_factory(); let remote_addr = sample_ipv6_remote_addr(); @@ -1483,13 +1496,7 @@ mod tests { Arc::new(Some(Box::new(stats_event_sender_mock))); let tracker = Arc::new( - core::Tracker::new( - &config.core, - &whitelist_authorization, - &in_memory_torrent_repository, - &db_torrent_repository, - ) - .unwrap(), + core::Tracker::new(&config.core, &in_memory_torrent_repository, &db_torrent_repository).unwrap(), ); let loopback_ipv4 = Ipv4Addr::new(127, 0, 0, 1); @@ -1550,6 +1557,7 @@ mod tests { }; use super::{gen_remote_fingerprint, TorrentPeerBuilder}; + use crate::core::scrape_handler::ScrapeHandler; use crate::core::services::statistics; use crate::core::{self}; use crate::servers::udp::connection_cookie::make; @@ -1569,7 +1577,8 @@ mod tests { #[tokio::test] async fn should_return_no_stats_when_the_tracker_does_not_have_any_torrent() { let ( - tracker, + _tracker, + scrape_handler, _in_memory_torrent_repository, stats_event_sender, _in_memory_whitelist, @@ -1591,7 +1600,7 @@ mod tests { let response = handle_scrape( remote_addr, &request, - &tracker, + &scrape_handler, &stats_event_sender, sample_cookie_valid_range(), ) @@ -1631,7 +1640,7 @@ mod tests { } } - async fn add_a_sample_seeder_and_scrape(tracker: Arc) -> Response { + async fn add_a_sample_seeder_and_scrape(tracker: Arc, scrape_handler: Arc) -> Response { let (stats_event_sender, _stats_repository) = statistics::setup::factory(false); let stats_event_sender = Arc::new(stats_event_sender); @@ -1645,7 +1654,7 @@ mod tests { handle_scrape( remote_addr, &request, - &tracker, + &scrape_handler, &stats_event_sender, sample_cookie_valid_range(), ) @@ -1670,6 +1679,7 @@ mod tests { async fn should_return_torrent_statistics_when_the_tracker_has_the_requested_torrent() { let ( tracker, + scrape_handler, _in_memory_torrent_repository, _stats_event_sender, _in_memory_whitelist, @@ -1677,7 +1687,8 @@ mod tests { _whitelist_authorization, ) = public_tracker(); - let torrent_stats = match_scrape_response(add_a_sample_seeder_and_scrape(tracker.clone()).await); + let torrent_stats = + match_scrape_response(add_a_sample_seeder_and_scrape(tracker.clone(), scrape_handler.clone()).await); let expected_torrent_stats = vec![TorrentScrapeStatistics { seeders: NumberOfPeers(1.into()), @@ -1702,6 +1713,7 @@ mod tests { async fn should_return_the_torrent_statistics_when_the_requested_torrent_is_whitelisted() { let ( tracker, + scrape_handler, _in_memory_torrent_repository, stats_event_sender, in_memory_whitelist, @@ -1722,7 +1734,7 @@ mod tests { handle_scrape( remote_addr, &request, - &tracker, + &scrape_handler, &stats_event_sender, sample_cookie_valid_range(), ) @@ -1744,6 +1756,7 @@ mod tests { async fn should_return_zeroed_statistics_when_the_requested_torrent_is_not_whitelisted() { let ( tracker, + scrape_handler, _in_memory_torrent_repository, stats_event_sender, _in_memory_whitelist, @@ -1762,7 +1775,7 @@ mod tests { handle_scrape( remote_addr, &request, - &tracker, + &scrape_handler, &stats_event_sender, sample_cookie_valid_range(), ) @@ -1814,12 +1827,12 @@ mod tests { let remote_addr = sample_ipv4_remote_addr(); - let (tracker, _whitelist_authorization) = test_tracker_factory(); + let (_tracker, scrape_handler, _whitelist_authorization) = test_tracker_factory(); handle_scrape( remote_addr, &sample_scrape_request(&remote_addr), - &tracker, + &scrape_handler, &stats_event_sender, sample_cookie_valid_range(), ) @@ -1854,12 +1867,12 @@ mod tests { let remote_addr = sample_ipv6_remote_addr(); - let (tracker, _whitelist_authorization) = test_tracker_factory(); + let (_tracker, scrape_handler, _whitelist_authorization) = test_tracker_factory(); handle_scrape( remote_addr, &sample_scrape_request(&remote_addr), - &tracker, + &scrape_handler, &stats_event_sender, sample_cookie_valid_range(), ) diff --git a/src/servers/udp/server/launcher.rs b/src/servers/udp/server/launcher.rs index bb5c30d44..d6bc230e1 100644 --- a/src/servers/udp/server/launcher.rs +++ b/src/servers/udp/server/launcher.rs @@ -13,6 +13,7 @@ use tracing::instrument; use super::banning::BanService; use super::request_buffer::ActiveRequests; use crate::bootstrap::jobs::Started; +use crate::core::scrape_handler::ScrapeHandler; use crate::core::statistics::event::sender::Sender; use crate::core::{statistics, whitelist, Tracker}; use crate::servers::logging::STARTED_ON; @@ -43,6 +44,7 @@ impl Launcher { #[allow(clippy::too_many_arguments)] #[instrument(skip( tracker, + scrape_handler, whitelist_authorization, opt_stats_event_sender, ban_service, @@ -52,6 +54,7 @@ impl Launcher { ))] pub async fn run_with_graceful_shutdown( tracker: Arc, + scrape_handler: Arc, whitelist_authorization: Arc, opt_stats_event_sender: Arc>>, ban_service: Arc>, @@ -95,6 +98,7 @@ impl Launcher { let () = Self::run_udp_server_main( receiver, tracker.clone(), + scrape_handler.clone(), whitelist_authorization.clone(), opt_stats_event_sender.clone(), ban_service.clone(), @@ -137,10 +141,18 @@ impl Launcher { ServiceHealthCheckJob::new(binding, info, job) } - #[instrument(skip(receiver, tracker, whitelist_authorization, opt_stats_event_sender, ban_service))] + #[instrument(skip( + receiver, + tracker, + scrape_handler, + whitelist_authorization, + opt_stats_event_sender, + ban_service + ))] async fn run_udp_server_main( mut receiver: Receiver, tracker: Arc, + scrape_handler: Arc, whitelist_authorization: Arc, opt_stats_event_sender: Arc>>, ban_service: Arc>, @@ -212,6 +224,7 @@ impl Launcher { let processor = Processor::new( receiver.socket.clone(), tracker.clone(), + scrape_handler.clone(), whitelist_authorization.clone(), opt_stats_event_sender.clone(), cookie_lifetime, diff --git a/src/servers/udp/server/mod.rs b/src/servers/udp/server/mod.rs index af51b7fb7..668265752 100644 --- a/src/servers/udp/server/mod.rs +++ b/src/servers/udp/server/mod.rs @@ -83,6 +83,7 @@ mod tests { let started = stopped .start( app_container.tracker, + app_container.scrape_handler, app_container.whitelist_authorization, app_container.stats_event_sender, app_container.ban_service, @@ -116,6 +117,7 @@ mod tests { let started = stopped .start( app_container.tracker, + app_container.scrape_handler, app_container.whitelist_authorization, app_container.stats_event_sender, app_container.ban_service, diff --git a/src/servers/udp/server/processor.rs b/src/servers/udp/server/processor.rs index fe3666c1d..889a2a913 100644 --- a/src/servers/udp/server/processor.rs +++ b/src/servers/udp/server/processor.rs @@ -10,6 +10,7 @@ use tracing::{instrument, Level}; use super::banning::BanService; use super::bound_socket::BoundSocket; +use crate::core::scrape_handler::ScrapeHandler; use crate::core::statistics::event::sender::Sender; use crate::core::statistics::event::UdpResponseKind; use crate::core::{statistics, whitelist, Tracker}; @@ -19,6 +20,7 @@ use crate::servers::udp::{handlers, RawRequest}; pub struct Processor { socket: Arc, tracker: Arc, + scrape_handler: Arc, whitelist_authorization: Arc, opt_stats_event_sender: Arc>>, cookie_lifetime: f64, @@ -28,6 +30,7 @@ impl Processor { pub fn new( socket: Arc, tracker: Arc, + scrape_handler: Arc, whitelist_authorization: Arc, opt_stats_event_sender: Arc>>, cookie_lifetime: f64, @@ -35,6 +38,7 @@ impl Processor { Self { socket, tracker, + scrape_handler, whitelist_authorization, opt_stats_event_sender, cookie_lifetime, @@ -50,6 +54,7 @@ impl Processor { let response = handlers::handle_packet( request, &self.tracker, + &self.scrape_handler, &self.whitelist_authorization, &self.opt_stats_event_sender, self.socket.address(), diff --git a/src/servers/udp/server/spawner.rs b/src/servers/udp/server/spawner.rs index aecba39ec..82fd808c4 100644 --- a/src/servers/udp/server/spawner.rs +++ b/src/servers/udp/server/spawner.rs @@ -11,6 +11,7 @@ use tokio::task::JoinHandle; use super::banning::BanService; use super::launcher::Launcher; use crate::bootstrap::jobs::Started; +use crate::core::scrape_handler::ScrapeHandler; use crate::core::statistics::event::sender::Sender; use crate::core::{whitelist, Tracker}; use crate::servers::signals::Halted; @@ -31,6 +32,7 @@ impl Spawner { pub fn spawn_launcher( &self, tracker: Arc, + scrape_handler: Arc, whitelist_authorization: Arc, opt_stats_event_sender: Arc>>, ban_service: Arc>, @@ -43,6 +45,7 @@ impl Spawner { tokio::spawn(async move { Launcher::run_with_graceful_shutdown( tracker, + scrape_handler, whitelist_authorization, opt_stats_event_sender, ban_service, diff --git a/src/servers/udp/server/states.rs b/src/servers/udp/server/states.rs index 9a01b5c6d..d2c91b03d 100644 --- a/src/servers/udp/server/states.rs +++ b/src/servers/udp/server/states.rs @@ -13,6 +13,7 @@ use super::banning::BanService; use super::spawner::Spawner; use super::{Server, UdpError}; use crate::bootstrap::jobs::Started; +use crate::core::scrape_handler::ScrapeHandler; use crate::core::statistics::event::sender::Sender; use crate::core::{whitelist, Tracker}; use crate::servers::registar::{ServiceRegistration, ServiceRegistrationForm}; @@ -64,11 +65,12 @@ impl Server { /// # Panics /// /// It panics if unable to receive the bound socket address from service. - /// - #[instrument(skip(self, tracker, whitelist_authorization, opt_stats_event_sender, ban_service, form), err, ret(Display, level = Level::INFO))] + #[allow(clippy::too_many_arguments)] + #[instrument(skip(self, tracker, scrape_handler, whitelist_authorization, opt_stats_event_sender, ban_service, form), err, ret(Display, level = Level::INFO))] pub async fn start( self, tracker: Arc, + scrape_handler: Arc, whitelist_authorization: Arc, opt_stats_event_sender: Arc>>, ban_service: Arc>, @@ -83,6 +85,7 @@ impl Server { // May need to wrap in a task to about a tokio bug. let task = self.state.spawner.spawn_launcher( tracker, + scrape_handler, whitelist_authorization, opt_stats_event_sender, ban_service, diff --git a/tests/servers/http/environment.rs b/tests/servers/http/environment.rs index c0de4efbe..63d372880 100644 --- a/tests/servers/http/environment.rs +++ b/tests/servers/http/environment.rs @@ -8,6 +8,7 @@ use torrust_tracker_lib::bootstrap::jobs::make_rust_tls; use torrust_tracker_lib::core::authentication::handler::KeysHandler; use torrust_tracker_lib::core::authentication::service::AuthenticationService; use torrust_tracker_lib::core::databases::Database; +use torrust_tracker_lib::core::scrape_handler::ScrapeHandler; use torrust_tracker_lib::core::statistics::event::sender::Sender; use torrust_tracker_lib::core::statistics::repository::Repository; use torrust_tracker_lib::core::torrent::repository::in_memory::InMemoryTorrentRepository; @@ -21,6 +22,7 @@ pub struct Environment { pub config: Arc, pub database: Arc>, pub tracker: Arc, + pub scrape_handler: Arc, pub in_memory_torrent_repository: Arc, pub keys_handler: Arc, pub authentication_service: Arc, @@ -63,6 +65,7 @@ impl Environment { config, database: app_container.database.clone(), tracker: app_container.tracker.clone(), + scrape_handler: app_container.scrape_handler.clone(), in_memory_torrent_repository: app_container.in_memory_torrent_repository.clone(), keys_handler: app_container.keys_handler.clone(), authentication_service: app_container.authentication_service.clone(), @@ -81,6 +84,7 @@ impl Environment { config: self.config, database: self.database.clone(), tracker: self.tracker.clone(), + scrape_handler: self.scrape_handler.clone(), in_memory_torrent_repository: self.in_memory_torrent_repository.clone(), keys_handler: self.keys_handler.clone(), authentication_service: self.authentication_service.clone(), @@ -93,6 +97,7 @@ impl Environment { .server .start( self.tracker, + self.scrape_handler, self.authentication_service, self.whitelist_authorization, self.stats_event_sender, @@ -114,6 +119,7 @@ impl Environment { config: self.config, database: self.database, tracker: self.tracker, + scrape_handler: self.scrape_handler, in_memory_torrent_repository: self.in_memory_torrent_repository, keys_handler: self.keys_handler, authentication_service: self.authentication_service, diff --git a/tests/servers/udp/environment.rs b/tests/servers/udp/environment.rs index c02e35e6e..16719c317 100644 --- a/tests/servers/udp/environment.rs +++ b/tests/servers/udp/environment.rs @@ -6,6 +6,7 @@ use tokio::sync::RwLock; use torrust_tracker_configuration::{Configuration, UdpTracker, DEFAULT_TIMEOUT}; use torrust_tracker_lib::bootstrap::app::{initialize_app_container, initialize_global_services}; use torrust_tracker_lib::core::databases::Database; +use torrust_tracker_lib::core::scrape_handler::ScrapeHandler; use torrust_tracker_lib::core::statistics::event::sender::Sender; use torrust_tracker_lib::core::statistics::repository::Repository; use torrust_tracker_lib::core::{whitelist, Tracker}; @@ -23,6 +24,7 @@ where pub config: Arc, pub database: Arc>, pub tracker: Arc, + pub scrape_handler: Arc, pub whitelist_authorization: Arc, pub stats_event_sender: Arc>>, pub stats_repository: Arc, @@ -61,6 +63,7 @@ impl Environment { config, database: app_container.database.clone(), tracker: app_container.tracker.clone(), + scrape_handler: app_container.scrape_handler.clone(), whitelist_authorization: app_container.whitelist_authorization.clone(), stats_event_sender: app_container.stats_event_sender.clone(), stats_repository: app_container.stats_repository.clone(), @@ -77,6 +80,7 @@ impl Environment { config: self.config, database: self.database.clone(), tracker: self.tracker.clone(), + scrape_handler: self.scrape_handler.clone(), whitelist_authorization: self.whitelist_authorization.clone(), stats_event_sender: self.stats_event_sender.clone(), stats_repository: self.stats_repository.clone(), @@ -86,6 +90,7 @@ impl Environment { .server .start( self.tracker, + self.scrape_handler, self.whitelist_authorization, self.stats_event_sender, self.ban_service, @@ -115,6 +120,7 @@ impl Environment { config: self.config, database: self.database, tracker: self.tracker, + scrape_handler: self.scrape_handler, whitelist_authorization: self.whitelist_authorization, stats_event_sender: self.stats_event_sender, stats_repository: self.stats_repository, From f23a3fc2a974f483a7a42f1d9270ea885f9a239f Mon Sep 17 00:00:00 2001 From: Jose Celano Date: Mon, 27 Jan 2025 12:04:23 +0000 Subject: [PATCH 157/802] chore: [#1207] remove deprecated comment --- src/core/mod.rs | 6 ------ src/core/scrape_handler.rs | 2 -- 2 files changed, 8 deletions(-) diff --git a/src/core/mod.rs b/src/core/mod.rs index 064e8eb3e..1d969068a 100644 --- a/src/core/mod.rs +++ b/src/core/mod.rs @@ -583,8 +583,6 @@ impl Tracker { /// It handles an announce request. /// - /// # Context: Tracker - /// /// BEP 03: [The `BitTorrent` Protocol Specification](https://www.bittorrent.org/beps/bep_0003.html). pub fn announce( &self, @@ -628,8 +626,6 @@ impl Tracker { /// It updates the torrent entry in memory, it also stores in the database /// the torrent info data which is persistent, and finally return the data /// needed for a `announce` request response. - /// - /// # Context: Tracker #[must_use] pub fn upsert_peer_and_get_stats(&self, info_hash: &InfoHash, peer: &peer::Peer) -> SwarmMetadata { let swarm_metadata_before = match self.in_memory_torrent_repository.get_opt_swarm_metadata(info_hash) { @@ -652,8 +648,6 @@ impl Tracker { } /// It stores the torrents stats into the database (if persistency is enabled). - /// - /// # Context: Tracker fn persist_stats(&self, info_hash: &InfoHash, swarm_metadata: &SwarmMetadata) { if self.config.tracker_policy.persistent_torrent_completed_stat { let completed = swarm_metadata.downloaded; diff --git a/src/core/scrape_handler.rs b/src/core/scrape_handler.rs index 47049ed71..1d513a5a9 100644 --- a/src/core/scrape_handler.rs +++ b/src/core/scrape_handler.rs @@ -29,8 +29,6 @@ impl ScrapeHandler { /// It handles a scrape request. /// - /// # Context: Tracker - /// /// BEP 48: [Tracker Protocol Extension: Scrape](https://www.bittorrent.org/beps/bep_0048.html). pub async fn scrape(&self, info_hashes: &Vec) -> ScrapeData { let mut scrape_data = ScrapeData::empty(); From f741d06db2281f44451e8097dcc0ce4e8b4972aa Mon Sep 17 00:00:00 2001 From: Jose Celano Date: Mon, 27 Jan 2025 12:38:34 +0000 Subject: [PATCH 158/802] refactor: [#1207] use InMemoryTorrentRepository in tests to add torrents --- src/core/mod.rs | 42 +++++++++++------------ src/core/services/torrent.rs | 28 +++++++++------- src/servers/udp/handlers.rs | 56 ++++++++++++++++++------------- tests/servers/api/environment.rs | 2 +- tests/servers/http/environment.rs | 2 +- tests/servers/udp/environment.rs | 7 +++- 6 files changed, 77 insertions(+), 60 deletions(-) diff --git a/src/core/mod.rs b/src/core/mod.rs index 1d969068a..5d501b003 100644 --- a/src/core/mod.rs +++ b/src/core/mod.rs @@ -693,7 +693,7 @@ mod tests { use crate::core::whitelist::manager::WhiteListManager; use crate::core::{whitelist, Tracker}; - fn public_tracker() -> (Arc, Arc) { + fn public_tracker() -> (Arc, Arc, Arc) { let config = configuration::ephemeral_public(); let ( @@ -714,7 +714,7 @@ mod tests { let scrape_handler = Arc::new(ScrapeHandler::new(&whitelist_authorization, &in_memory_torrent_repository)); - (tracker, scrape_handler) + (tracker, in_memory_torrent_repository, scrape_handler) } fn public_tracker_and_in_memory_torrents_repository() -> (Arc, Arc) { @@ -871,12 +871,12 @@ mod tests { #[tokio::test] async fn it_should_return_the_peers_for_a_given_torrent() { - let (tracker, in_memory_torrent_repository) = public_tracker_and_in_memory_torrents_repository(); + let (_tracker, in_memory_torrent_repository) = public_tracker_and_in_memory_torrents_repository(); let info_hash = sample_info_hash(); let peer = sample_peer(); - let _ = tracker.upsert_peer_and_get_stats(&info_hash, &peer); + let () = in_memory_torrent_repository.upsert_peer(&info_hash, &peer); let peers = in_memory_torrent_repository.get_torrent_peers(&info_hash); @@ -903,7 +903,7 @@ mod tests { #[tokio::test] async fn it_should_return_74_peers_at_the_most_for_a_given_torrent() { - let (tracker, in_memory_torrent_repository) = public_tracker_and_in_memory_torrents_repository(); + let (_tracker, in_memory_torrent_repository) = public_tracker_and_in_memory_torrents_repository(); let info_hash = sample_info_hash(); @@ -918,7 +918,7 @@ mod tests { event: AnnounceEvent::Completed, }; - let _ = tracker.upsert_peer_and_get_stats(&info_hash, &peer); + let () = in_memory_torrent_repository.upsert_peer(&info_hash, &peer); } let peers = in_memory_torrent_repository.get_torrent_peers(&info_hash); @@ -928,12 +928,12 @@ mod tests { #[tokio::test] async fn it_should_return_the_peers_for_a_given_torrent_excluding_a_given_peer() { - let (tracker, _scrape_handler) = public_tracker(); + let (tracker, in_memory_torrent_repository, _scrape_handler) = public_tracker(); let info_hash = sample_info_hash(); let peer = sample_peer(); - let _ = tracker.upsert_peer_and_get_stats(&info_hash, &peer); + let () = in_memory_torrent_repository.upsert_peer(&info_hash, &peer); let peers = tracker .in_memory_torrent_repository @@ -944,13 +944,13 @@ mod tests { #[tokio::test] async fn it_should_return_74_peers_at_the_most_for_a_given_torrent_when_it_filters_out_a_given_peer() { - let (tracker, _scrape_handler) = public_tracker(); + let (tracker, in_memory_torrent_repository, _scrape_handler) = public_tracker(); let info_hash = sample_info_hash(); let excluded_peer = sample_peer(); - let _ = tracker.upsert_peer_and_get_stats(&info_hash, &excluded_peer); + let () = in_memory_torrent_repository.upsert_peer(&info_hash, &excluded_peer); // Add 74 peers for idx in 2..=75 { @@ -964,7 +964,7 @@ mod tests { event: AnnounceEvent::Completed, }; - let _ = tracker.upsert_peer_and_get_stats(&info_hash, &peer); + let () = in_memory_torrent_repository.upsert_peer(&info_hash, &peer); } let peers = tracker @@ -976,9 +976,9 @@ mod tests { #[tokio::test] async fn it_should_return_the_torrent_metrics() { - let (tracker, in_memory_torrent_repository) = public_tracker_and_in_memory_torrents_repository(); + let (_tracker, in_memory_torrent_repository) = public_tracker_and_in_memory_torrents_repository(); - let _ = tracker.upsert_peer_and_get_stats(&sample_info_hash(), &leecher()); + let () = in_memory_torrent_repository.upsert_peer(&sample_info_hash(), &leecher()); let torrent_metrics = in_memory_torrent_repository.get_torrents_metrics(); @@ -995,11 +995,11 @@ mod tests { #[tokio::test] async fn it_should_get_many_the_torrent_metrics() { - let (tracker, in_memory_torrent_repository) = public_tracker_and_in_memory_torrents_repository(); + let (_tracker, in_memory_torrent_repository) = public_tracker_and_in_memory_torrents_repository(); let start_time = std::time::Instant::now(); for i in 0..1_000_000 { - let _ = tracker.upsert_peer_and_get_stats(&gen_seeded_infohash(&i), &leecher()); + let () = in_memory_torrent_repository.upsert_peer(&gen_seeded_infohash(&i), &leecher()); } let result_a = start_time.elapsed(); @@ -1130,7 +1130,7 @@ mod tests { #[tokio::test] async fn it_should_return_the_announce_data_with_an_empty_peer_list_when_it_is_the_first_announced_peer() { - let (tracker, _scrape_handler) = public_tracker(); + let (tracker, _in_memory_torrent_repository, _scrape_handler) = public_tracker(); let mut peer = sample_peer(); @@ -1141,7 +1141,7 @@ mod tests { #[tokio::test] async fn it_should_return_the_announce_data_with_the_previously_announced_peers() { - let (tracker, _scrape_handler) = public_tracker(); + let (tracker, _in_memory_torrent_repository, _scrape_handler) = public_tracker(); let mut previously_announced_peer = sample_peer_1(); tracker.announce( @@ -1166,7 +1166,7 @@ mod tests { #[tokio::test] async fn when_the_peer_is_a_seeder() { - let (tracker, _scrape_handler) = public_tracker(); + let (tracker, _in_memory_torrent_repository, _scrape_handler) = public_tracker(); let mut peer = seeder(); @@ -1177,7 +1177,7 @@ mod tests { #[tokio::test] async fn when_the_peer_is_a_leecher() { - let (tracker, _scrape_handler) = public_tracker(); + let (tracker, _in_memory_torrent_repository, _scrape_handler) = public_tracker(); let mut peer = leecher(); @@ -1188,7 +1188,7 @@ mod tests { #[tokio::test] async fn when_a_previously_announced_started_peer_has_completed_downloading() { - let (tracker, _scrape_handler) = public_tracker(); + let (tracker, _in_memory_torrent_repository, _scrape_handler) = public_tracker(); // We have to announce with "started" event because peer does not count if peer was not previously known let mut started_peer = started_peer(); @@ -1215,7 +1215,7 @@ mod tests { #[tokio::test] async fn it_should_return_the_swarm_metadata_for_the_requested_file_if_the_tracker_has_that_torrent() { - let (tracker, scrape_handler) = public_tracker(); + let (tracker, _in_memory_torrent_repository, scrape_handler) = public_tracker(); let info_hash = "3b245504cf5f11bbdbe1201cea6a6bf45aee1bc0".parse::().unwrap(); // # DevSkim: ignore DS173237 diff --git a/src/core/services/torrent.rs b/src/core/services/torrent.rs index 8677096cc..5faaef1d1 100644 --- a/src/core/services/torrent.rs +++ b/src/core/services/torrent.rs @@ -189,11 +189,11 @@ mod tests { async fn should_return_the_torrent_info_if_the_tracker_has_the_torrent() { let config = tracker_configuration(); - let (tracker, in_memory_torrent_repository) = initialize_tracker_and_deps(&config); + let (_tracker, in_memory_torrent_repository) = initialize_tracker_and_deps(&config); let hash = "9e0217d0fa71c87332cd8bf9dbeabcb2c2cf3c4d".to_owned(); let info_hash = InfoHash::from_str(&hash).unwrap(); - let _ = tracker.upsert_peer_and_get_stats(&info_hash, &sample_peer()); + let () = in_memory_torrent_repository.upsert_peer(&info_hash, &sample_peer()); let torrent_info = get_torrent_info(in_memory_torrent_repository.clone(), &info_hash) .await @@ -242,12 +242,12 @@ mod tests { async fn should_return_a_summarized_info_for_all_torrents() { let config = tracker_configuration(); - let (tracker, in_memory_torrent_repository) = initialize_tracker_and_deps(&config); + let (_tracker, in_memory_torrent_repository) = initialize_tracker_and_deps(&config); let hash = "9e0217d0fa71c87332cd8bf9dbeabcb2c2cf3c4d".to_owned(); let info_hash = InfoHash::from_str(&hash).unwrap(); - let _ = tracker.upsert_peer_and_get_stats(&info_hash, &sample_peer()); + let () = in_memory_torrent_repository.upsert_peer(&info_hash, &sample_peer()); let torrents = get_torrents_page(in_memory_torrent_repository.clone(), Some(&Pagination::default())).await; @@ -266,15 +266,16 @@ mod tests { async fn should_allow_limiting_the_number_of_torrents_in_the_result() { let config = tracker_configuration(); - let (tracker, in_memory_torrent_repository) = initialize_tracker_and_deps(&config); + let (_tracker, in_memory_torrent_repository) = initialize_tracker_and_deps(&config); let hash1 = "9e0217d0fa71c87332cd8bf9dbeabcb2c2cf3c4d".to_owned(); let info_hash1 = InfoHash::from_str(&hash1).unwrap(); + let hash2 = "03840548643af2a7b63a9f5cbca348bc7150ca3a".to_owned(); let info_hash2 = InfoHash::from_str(&hash2).unwrap(); - let _ = tracker.upsert_peer_and_get_stats(&info_hash1, &sample_peer()); - let _ = tracker.upsert_peer_and_get_stats(&info_hash2, &sample_peer()); + let () = in_memory_torrent_repository.upsert_peer(&info_hash1, &sample_peer()); + let () = in_memory_torrent_repository.upsert_peer(&info_hash2, &sample_peer()); let offset = 0; let limit = 1; @@ -288,15 +289,16 @@ mod tests { async fn should_allow_using_pagination_in_the_result() { let config = tracker_configuration(); - let (tracker, in_memory_torrent_repository) = initialize_tracker_and_deps(&config); + let (_tracker, in_memory_torrent_repository) = initialize_tracker_and_deps(&config); let hash1 = "9e0217d0fa71c87332cd8bf9dbeabcb2c2cf3c4d".to_owned(); let info_hash1 = InfoHash::from_str(&hash1).unwrap(); + let hash2 = "03840548643af2a7b63a9f5cbca348bc7150ca3a".to_owned(); let info_hash2 = InfoHash::from_str(&hash2).unwrap(); - let _ = tracker.upsert_peer_and_get_stats(&info_hash1, &sample_peer()); - let _ = tracker.upsert_peer_and_get_stats(&info_hash2, &sample_peer()); + let () = in_memory_torrent_repository.upsert_peer(&info_hash1, &sample_peer()); + let () = in_memory_torrent_repository.upsert_peer(&info_hash2, &sample_peer()); let offset = 1; let limit = 4000; @@ -319,15 +321,15 @@ mod tests { async fn should_return_torrents_ordered_by_info_hash() { let config = tracker_configuration(); - let (tracker, in_memory_torrent_repository) = initialize_tracker_and_deps(&config); + let (_tracker, in_memory_torrent_repository) = initialize_tracker_and_deps(&config); let hash1 = "9e0217d0fa71c87332cd8bf9dbeabcb2c2cf3c4d".to_owned(); let info_hash1 = InfoHash::from_str(&hash1).unwrap(); - let _ = tracker.upsert_peer_and_get_stats(&info_hash1, &sample_peer()); + let () = in_memory_torrent_repository.upsert_peer(&info_hash1, &sample_peer()); let hash2 = "03840548643af2a7b63a9f5cbca348bc7150ca3a".to_owned(); let info_hash2 = InfoHash::from_str(&hash2).unwrap(); - let _ = tracker.upsert_peer_and_get_stats(&info_hash2, &sample_peer()); + let () = in_memory_torrent_repository.upsert_peer(&info_hash2, &sample_peer()); let torrents = get_torrents_page(in_memory_torrent_repository.clone(), Some(&Pagination::default())).await; diff --git a/src/servers/udp/handlers.rs b/src/servers/udp/handlers.rs index f8f57aea9..d6073d2e8 100644 --- a/src/servers/udp/handlers.rs +++ b/src/servers/udp/handlers.rs @@ -883,6 +883,7 @@ mod tests { }; use mockall::predicate::eq; + use crate::core::torrent::repository::in_memory::InMemoryTorrentRepository; use crate::core::{self, statistics, whitelist}; use crate::servers::udp::connection_cookie::make; use crate::servers::udp::handlers::tests::announce_request::AnnounceRequestBuilder; @@ -1034,7 +1035,7 @@ mod tests { assert_eq!(peers[0].peer_addr, SocketAddr::new(IpAddr::V4(remote_client_ip), client_port)); } - fn add_a_torrent_peer_using_ipv6(tracker: &Arc) { + fn add_a_torrent_peer_using_ipv6(in_memory_torrent_repository: &Arc) { let info_hash = AquaticInfoHash([0u8; 20]); let client_ip_v4 = Ipv4Addr::new(126, 0, 0, 1); @@ -1047,7 +1048,7 @@ mod tests { .with_peer_address(SocketAddr::new(IpAddr::V6(client_ip_v6), client_port)) .into(); - let _ = tracker.upsert_peer_and_get_stats(&info_hash.0.into(), &peer_using_ipv6); + let () = in_memory_torrent_repository.upsert_peer(&info_hash.0.into(), &peer_using_ipv6); } async fn announce_a_new_peer_using_ipv4( @@ -1079,14 +1080,14 @@ mod tests { let ( tracker, _scrape_handler, - _in_memory_torrent_repository, + in_memory_torrent_repository, _stats_event_sender, _in_memory_whitelist, _whitelist_manager, whitelist_authorization, ) = public_tracker(); - add_a_torrent_peer_using_ipv6(&tracker); + add_a_torrent_peer_using_ipv6(&in_memory_torrent_repository); let response = announce_a_new_peer_using_ipv4(tracker.clone(), whitelist_authorization).await; @@ -1201,6 +1202,7 @@ mod tests { }; use mockall::predicate::eq; + use crate::core::torrent::repository::in_memory::InMemoryTorrentRepository; use crate::core::{self, statistics, whitelist}; use crate::servers::udp::connection_cookie::make; use crate::servers::udp::handlers::tests::announce_request::AnnounceRequestBuilder; @@ -1357,7 +1359,7 @@ mod tests { assert_eq!(peers[0].peer_addr, SocketAddr::new(IpAddr::V6(remote_client_ip), client_port)); } - fn add_a_torrent_peer_using_ipv4(tracker: &Arc) { + fn add_a_torrent_peer_using_ipv4(in_memory_torrent_repository: &Arc) { let info_hash = AquaticInfoHash([0u8; 20]); let client_ip_v4 = Ipv4Addr::new(126, 0, 0, 1); @@ -1369,7 +1371,7 @@ mod tests { .with_peer_address(SocketAddr::new(IpAddr::V4(client_ip_v4), client_port)) .into(); - let _ = tracker.upsert_peer_and_get_stats(&info_hash.0.into(), &peer_using_ipv4); + let () = in_memory_torrent_repository.upsert_peer(&info_hash.0.into(), &peer_using_ipv4); } async fn announce_a_new_peer_using_ipv6( @@ -1404,14 +1406,14 @@ mod tests { let ( tracker, _scrape_handler, - _in_memory_torrent_repository, + in_memory_torrent_repository, _stats_event_sender, _in_memory_whitelist, _whitelist_manager, whitelist_authorization, ) = public_tracker(); - add_a_torrent_peer_using_ipv4(&tracker); + add_a_torrent_peer_using_ipv4(&in_memory_torrent_repository); let response = announce_a_new_peer_using_ipv6(tracker.clone(), whitelist_authorization).await; @@ -1559,7 +1561,7 @@ mod tests { use super::{gen_remote_fingerprint, TorrentPeerBuilder}; use crate::core::scrape_handler::ScrapeHandler; use crate::core::services::statistics; - use crate::core::{self}; + use crate::core::torrent::repository::in_memory::InMemoryTorrentRepository; use crate::servers::udp::connection_cookie::make; use crate::servers::udp::handlers::handle_scrape; use crate::servers::udp::handlers::tests::{ @@ -1618,7 +1620,11 @@ mod tests { ); } - async fn add_a_seeder(tracker: Arc, remote_addr: &SocketAddr, info_hash: &InfoHash) { + async fn add_a_seeder( + in_memory_torrent_repository: Arc, + remote_addr: &SocketAddr, + info_hash: &InfoHash, + ) { let peer_id = PeerId([255u8; 20]); let peer = TorrentPeerBuilder::new() @@ -1627,7 +1633,7 @@ mod tests { .with_number_of_bytes_left(0) .into(); - let _ = tracker.upsert_peer_and_get_stats(&info_hash.0.into(), &peer); + let () = in_memory_torrent_repository.upsert_peer(&info_hash.0.into(), &peer); } fn build_scrape_request(remote_addr: &SocketAddr, info_hash: &InfoHash) -> ScrapeRequest { @@ -1640,14 +1646,17 @@ mod tests { } } - async fn add_a_sample_seeder_and_scrape(tracker: Arc, scrape_handler: Arc) -> Response { + async fn add_a_sample_seeder_and_scrape( + in_memory_torrent_repository: Arc, + scrape_handler: Arc, + ) -> Response { let (stats_event_sender, _stats_repository) = statistics::setup::factory(false); let stats_event_sender = Arc::new(stats_event_sender); let remote_addr = sample_ipv4_remote_addr(); let info_hash = InfoHash([0u8; 20]); - add_a_seeder(tracker.clone(), &remote_addr, &info_hash).await; + add_a_seeder(in_memory_torrent_repository.clone(), &remote_addr, &info_hash).await; let request = build_scrape_request(&remote_addr, &info_hash); @@ -1678,17 +1687,18 @@ mod tests { #[tokio::test] async fn should_return_torrent_statistics_when_the_tracker_has_the_requested_torrent() { let ( - tracker, + _tracker, scrape_handler, - _in_memory_torrent_repository, + in_memory_torrent_repository, _stats_event_sender, _in_memory_whitelist, _whitelist_manager, _whitelist_authorization, ) = public_tracker(); - let torrent_stats = - match_scrape_response(add_a_sample_seeder_and_scrape(tracker.clone(), scrape_handler.clone()).await); + let torrent_stats = match_scrape_response( + add_a_sample_seeder_and_scrape(in_memory_torrent_repository.clone(), scrape_handler.clone()).await, + ); let expected_torrent_stats = vec![TorrentScrapeStatistics { seeders: NumberOfPeers(1.into()), @@ -1712,9 +1722,9 @@ mod tests { #[tokio::test] async fn should_return_the_torrent_statistics_when_the_requested_torrent_is_whitelisted() { let ( - tracker, + _tracker, scrape_handler, - _in_memory_torrent_repository, + in_memory_torrent_repository, stats_event_sender, in_memory_whitelist, _whitelist_manager, @@ -1724,7 +1734,7 @@ mod tests { let remote_addr = sample_ipv4_remote_addr(); let info_hash = InfoHash([0u8; 20]); - add_a_seeder(tracker.clone(), &remote_addr, &info_hash).await; + add_a_seeder(in_memory_torrent_repository.clone(), &remote_addr, &info_hash).await; in_memory_whitelist.add(&info_hash.0.into()).await; @@ -1755,9 +1765,9 @@ mod tests { #[tokio::test] async fn should_return_zeroed_statistics_when_the_requested_torrent_is_not_whitelisted() { let ( - tracker, + _tracker, scrape_handler, - _in_memory_torrent_repository, + in_memory_torrent_repository, stats_event_sender, _in_memory_whitelist, _whitelist_manager, @@ -1767,7 +1777,7 @@ mod tests { let remote_addr = sample_ipv4_remote_addr(); let info_hash = InfoHash([0u8; 20]); - add_a_seeder(tracker.clone(), &remote_addr, &info_hash).await; + add_a_seeder(in_memory_torrent_repository.clone(), &remote_addr, &info_hash).await; let request = build_scrape_request(&remote_addr, &info_hash); diff --git a/tests/servers/api/environment.rs b/tests/servers/api/environment.rs index 70f071bf4..927f76efe 100644 --- a/tests/servers/api/environment.rs +++ b/tests/servers/api/environment.rs @@ -45,7 +45,7 @@ where { /// Add a torrent to the tracker pub fn add_torrent_peer(&self, info_hash: &InfoHash, peer: &peer::Peer) { - let _ = self.tracker.upsert_peer_and_get_stats(info_hash, peer); + let () = self.in_memory_torrent_repository.upsert_peer(info_hash, peer); } } diff --git a/tests/servers/http/environment.rs b/tests/servers/http/environment.rs index 63d372880..beaf2d38c 100644 --- a/tests/servers/http/environment.rs +++ b/tests/servers/http/environment.rs @@ -37,7 +37,7 @@ pub struct Environment { impl Environment { /// Add a torrent to the tracker pub fn add_torrent_peer(&self, info_hash: &InfoHash, peer: &peer::Peer) { - let _ = self.tracker.upsert_peer_and_get_stats(info_hash, peer); + let () = self.in_memory_torrent_repository.upsert_peer(info_hash, peer); } } diff --git a/tests/servers/udp/environment.rs b/tests/servers/udp/environment.rs index 16719c317..09714146d 100644 --- a/tests/servers/udp/environment.rs +++ b/tests/servers/udp/environment.rs @@ -9,6 +9,7 @@ use torrust_tracker_lib::core::databases::Database; use torrust_tracker_lib::core::scrape_handler::ScrapeHandler; use torrust_tracker_lib::core::statistics::event::sender::Sender; use torrust_tracker_lib::core::statistics::repository::Repository; +use torrust_tracker_lib::core::torrent::repository::in_memory::InMemoryTorrentRepository; use torrust_tracker_lib::core::{whitelist, Tracker}; use torrust_tracker_lib::servers::registar::Registar; use torrust_tracker_lib::servers::udp::server::banning::BanService; @@ -24,6 +25,7 @@ where pub config: Arc, pub database: Arc>, pub tracker: Arc, + pub in_memory_torrent_repository: Arc, pub scrape_handler: Arc, pub whitelist_authorization: Arc, pub stats_event_sender: Arc>>, @@ -40,7 +42,7 @@ where /// Add a torrent to the tracker #[allow(dead_code)] pub fn add_torrent(&self, info_hash: &InfoHash, peer: &peer::Peer) { - let _ = self.tracker.upsert_peer_and_get_stats(info_hash, peer); + let () = self.in_memory_torrent_repository.upsert_peer(info_hash, peer); } } @@ -63,6 +65,7 @@ impl Environment { config, database: app_container.database.clone(), tracker: app_container.tracker.clone(), + in_memory_torrent_repository: app_container.in_memory_torrent_repository.clone(), scrape_handler: app_container.scrape_handler.clone(), whitelist_authorization: app_container.whitelist_authorization.clone(), stats_event_sender: app_container.stats_event_sender.clone(), @@ -80,6 +83,7 @@ impl Environment { config: self.config, database: self.database.clone(), tracker: self.tracker.clone(), + in_memory_torrent_repository: self.in_memory_torrent_repository.clone(), scrape_handler: self.scrape_handler.clone(), whitelist_authorization: self.whitelist_authorization.clone(), stats_event_sender: self.stats_event_sender.clone(), @@ -120,6 +124,7 @@ impl Environment { config: self.config, database: self.database, tracker: self.tracker, + in_memory_torrent_repository: self.in_memory_torrent_repository, scrape_handler: self.scrape_handler, whitelist_authorization: self.whitelist_authorization, stats_event_sender: self.stats_event_sender, From 401c228b4e066a85af98af8a301e63ccaa366b3b Mon Sep 17 00:00:00 2001 From: Jose Celano Date: Mon, 27 Jan 2025 12:45:53 +0000 Subject: [PATCH 159/802] refactor: [#1207] make method upsert_peer_and_get_stats private --- src/core/mod.rs | 15 +++++++++------ 1 file changed, 9 insertions(+), 6 deletions(-) diff --git a/src/core/mod.rs b/src/core/mod.rs index 5d501b003..5e5c20699 100644 --- a/src/core/mod.rs +++ b/src/core/mod.rs @@ -627,7 +627,7 @@ impl Tracker { /// the torrent info data which is persistent, and finally return the data /// needed for a `announce` request response. #[must_use] - pub fn upsert_peer_and_get_stats(&self, info_hash: &InfoHash, peer: &peer::Peer) -> SwarmMetadata { + fn upsert_peer_and_get_stats(&self, info_hash: &InfoHash, peer: &peer::Peer) -> SwarmMetadata { let swarm_metadata_before = match self.in_memory_torrent_repository.get_opt_swarm_metadata(info_hash) { Some(swarm_metadata) => swarm_metadata, None => SwarmMetadata::zeroed(), @@ -1393,7 +1393,10 @@ mod tests { use aquatic_udp_protocol::AnnounceEvent; use torrust_tracker_torrent_repository::entry::EntrySync; - use crate::core::tests::the_tracker::{sample_info_hash, sample_peer, tracker_persisting_torrents_in_database}; + use crate::core::tests::the_tracker::{ + peer_ip, sample_info_hash, sample_peer, tracker_persisting_torrents_in_database, + }; + use crate::core::PeersWanted; #[tokio::test] async fn it_should_persist_the_number_of_completed_peers_for_all_torrents_into_the_database() { @@ -1404,12 +1407,12 @@ mod tests { let mut peer = sample_peer(); peer.event = AnnounceEvent::Started; - let swarm_stats = tracker.upsert_peer_and_get_stats(&info_hash, &peer); - assert_eq!(swarm_stats.downloaded, 0); + let announce_data = tracker.announce(&info_hash, &mut peer, &peer_ip(), &PeersWanted::All); + assert_eq!(announce_data.stats.downloaded, 0); peer.event = AnnounceEvent::Completed; - let swarm_stats = tracker.upsert_peer_and_get_stats(&info_hash, &peer); - assert_eq!(swarm_stats.downloaded, 1); + let announce_data = tracker.announce(&info_hash, &mut peer, &peer_ip(), &PeersWanted::All); + assert_eq!(announce_data.stats.downloaded, 1); // Remove the newly updated torrent from memory let _unused = in_memory_torrent_repository.remove(&info_hash); From 026f957192072b4b62b53e4f291b3f38904390cd Mon Sep 17 00:00:00 2001 From: Jose Celano Date: Mon, 27 Jan 2025 13:47:11 +0000 Subject: [PATCH 160/802] refactor: [#1207] extract AnnounceHandler --- src/app.rs | 2 + src/bootstrap/app.rs | 8 + src/bootstrap/jobs/http_tracker.rs | 18 +- src/bootstrap/jobs/udp_tracker.rs | 5 + src/container.rs | 2 + src/core/announce_handler.rs | 164 +++++++++++++ src/core/mod.rs | 298 +++++++++-------------- src/servers/http/server.rs | 8 + src/servers/http/v1/handlers/announce.rs | 49 +++- src/servers/http/v1/routes.rs | 5 + src/servers/http/v1/services/announce.rs | 79 ++++-- src/servers/http/v1/services/scrape.rs | 30 ++- src/servers/udp/handlers.rs | 90 ++++++- src/servers/udp/server/launcher.rs | 8 + src/servers/udp/server/mod.rs | 2 + src/servers/udp/server/processor.rs | 5 + src/servers/udp/server/spawner.rs | 3 + src/servers/udp/server/states.rs | 5 +- tests/servers/http/environment.rs | 6 + tests/servers/udp/environment.rs | 6 + 20 files changed, 554 insertions(+), 239 deletions(-) create mode 100644 src/core/announce_handler.rs diff --git a/src/app.rs b/src/app.rs index 3f0e8d399..00414bc10 100644 --- a/src/app.rs +++ b/src/app.rs @@ -80,6 +80,7 @@ pub async fn start(config: &Configuration, app_container: &AppContainer) -> Vec< udp_tracker::start_job( udp_tracker_config, app_container.tracker.clone(), + app_container.announce_handler.clone(), app_container.scrape_handler.clone(), app_container.whitelist_authorization.clone(), app_container.stats_event_sender.clone(), @@ -100,6 +101,7 @@ pub async fn start(config: &Configuration, app_container: &AppContainer) -> Vec< if let Some(job) = http_tracker::start_job( http_tracker_config, app_container.tracker.clone(), + app_container.announce_handler.clone(), app_container.scrape_handler.clone(), app_container.authentication_service.clone(), app_container.whitelist_authorization.clone(), diff --git a/src/bootstrap/app.rs b/src/bootstrap/app.rs index a0b6df3ca..fa45998bb 100644 --- a/src/bootstrap/app.rs +++ b/src/bootstrap/app.rs @@ -22,6 +22,7 @@ use tracing::instrument; use super::config::initialize_configuration; use crate::bootstrap; use crate::container::AppContainer; +use crate::core::announce_handler::AnnounceHandler; use crate::core::authentication::handler::KeysHandler; use crate::core::authentication::key::repository::in_memory::InMemoryKeyRepository; use crate::core::authentication::key::repository::persisted::DatabaseKeyRepository; @@ -121,11 +122,18 @@ pub fn initialize_app_container(configuration: &Configuration) -> AppContainer { &db_torrent_repository, )); + let announce_handler = Arc::new(AnnounceHandler::new( + &configuration.core, + &in_memory_torrent_repository, + &db_torrent_repository, + )); + let scrape_handler = Arc::new(ScrapeHandler::new(&whitelist_authorization, &in_memory_torrent_repository)); AppContainer { database, tracker, + announce_handler, scrape_handler, keys_handler, authentication_service, diff --git a/src/bootstrap/jobs/http_tracker.rs b/src/bootstrap/jobs/http_tracker.rs index 2e76e2f31..5da7da739 100644 --- a/src/bootstrap/jobs/http_tracker.rs +++ b/src/bootstrap/jobs/http_tracker.rs @@ -19,6 +19,7 @@ use torrust_tracker_configuration::HttpTracker; use tracing::instrument; use super::make_rust_tls; +use crate::core::announce_handler::AnnounceHandler; use crate::core::authentication::service::AuthenticationService; use crate::core::scrape_handler::ScrapeHandler; use crate::core::statistics::event::sender::Sender; @@ -39,6 +40,7 @@ use crate::servers::registar::ServiceRegistrationForm; #[instrument(skip( config, tracker, + announce_handler, scrape_handler, authentication_service, whitelist_authorization, @@ -48,6 +50,7 @@ use crate::servers::registar::ServiceRegistrationForm; pub async fn start_job( config: &HttpTracker, tracker: Arc, + announce_handler: Arc, scrape_handler: Arc, authentication_service: Arc, whitelist_authorization: Arc, @@ -67,6 +70,7 @@ pub async fn start_job( socket, tls, tracker.clone(), + announce_handler.clone(), scrape_handler.clone(), authentication_service.clone(), whitelist_authorization.clone(), @@ -80,11 +84,21 @@ pub async fn start_job( #[allow(clippy::too_many_arguments)] #[allow(clippy::async_yields_async)] -#[instrument(skip(socket, tls, tracker, scrape_handler, whitelist_authorization, stats_event_sender, form))] +#[instrument(skip( + socket, + tls, + tracker, + announce_handler, + scrape_handler, + whitelist_authorization, + stats_event_sender, + form +))] async fn start_v1( socket: SocketAddr, tls: Option, tracker: Arc, + announce_handler: Arc, scrape_handler: Arc, authentication_service: Arc, whitelist_authorization: Arc, @@ -94,6 +108,7 @@ async fn start_v1( let server = HttpServer::new(Launcher::new(socket, tls)) .start( tracker, + announce_handler, scrape_handler, authentication_service, whitelist_authorization, @@ -142,6 +157,7 @@ mod tests { start_job( config, app_container.tracker, + app_container.announce_handler, app_container.scrape_handler, app_container.authentication_service, app_container.whitelist_authorization, diff --git a/src/bootstrap/jobs/udp_tracker.rs b/src/bootstrap/jobs/udp_tracker.rs index dd55e4b8b..d43c1c930 100644 --- a/src/bootstrap/jobs/udp_tracker.rs +++ b/src/bootstrap/jobs/udp_tracker.rs @@ -13,6 +13,7 @@ use tokio::task::JoinHandle; use torrust_tracker_configuration::UdpTracker; use tracing::instrument; +use crate::core::announce_handler::AnnounceHandler; use crate::core::scrape_handler::ScrapeHandler; use crate::core::statistics::event::sender::Sender; use crate::core::{self, whitelist}; @@ -32,10 +33,12 @@ use crate::servers::udp::UDP_TRACKER_LOG_TARGET; /// It will panic if it is unable to start the UDP service. /// It will panic if the task did not finish successfully. #[must_use] +#[allow(clippy::too_many_arguments)] #[allow(clippy::async_yields_async)] #[instrument(skip( config, tracker, + announce_handler, scrape_handler, whitelist_authorization, stats_event_sender, @@ -45,6 +48,7 @@ use crate::servers::udp::UDP_TRACKER_LOG_TARGET; pub async fn start_job( config: &UdpTracker, tracker: Arc, + announce_handler: Arc, scrape_handler: Arc, whitelist_authorization: Arc, stats_event_sender: Arc>>, @@ -57,6 +61,7 @@ pub async fn start_job( let server = Server::new(Spawner::new(bind_to)) .start( tracker, + announce_handler, scrape_handler, whitelist_authorization, stats_event_sender, diff --git a/src/container.rs b/src/container.rs index a73862006..4e958b6ed 100644 --- a/src/container.rs +++ b/src/container.rs @@ -2,6 +2,7 @@ use std::sync::Arc; use tokio::sync::RwLock; +use crate::core::announce_handler::AnnounceHandler; use crate::core::authentication::handler::KeysHandler; use crate::core::authentication::service::AuthenticationService; use crate::core::databases::Database; @@ -18,6 +19,7 @@ use crate::servers::udp::server::banning::BanService; pub struct AppContainer { pub database: Arc>, pub tracker: Arc, + pub announce_handler: Arc, pub scrape_handler: Arc, pub keys_handler: Arc, pub authentication_service: Arc, diff --git a/src/core/announce_handler.rs b/src/core/announce_handler.rs new file mode 100644 index 000000000..a037d33d4 --- /dev/null +++ b/src/core/announce_handler.rs @@ -0,0 +1,164 @@ +use std::net::IpAddr; +use std::sync::Arc; + +use bittorrent_primitives::info_hash::InfoHash; +use torrust_tracker_configuration::{Core, TORRENT_PEERS_LIMIT}; +use torrust_tracker_primitives::core::AnnounceData; +use torrust_tracker_primitives::peer; +use torrust_tracker_primitives::swarm_metadata::SwarmMetadata; + +use super::torrent::repository::in_memory::InMemoryTorrentRepository; +use super::torrent::repository::persisted::DatabasePersistentTorrentRepository; + +pub struct AnnounceHandler { + /// The tracker configuration. + config: Core, + + /// The in-memory torrents repository. + in_memory_torrent_repository: Arc, + + /// The persistent torrents repository. + db_torrent_repository: Arc, +} + +impl AnnounceHandler { + #[must_use] + pub fn new( + config: &Core, + in_memory_torrent_repository: &Arc, + db_torrent_repository: &Arc, + ) -> Self { + Self { + config: config.clone(), + in_memory_torrent_repository: in_memory_torrent_repository.clone(), + db_torrent_repository: db_torrent_repository.clone(), + } + } + + /// It handles an announce request. + /// + /// BEP 03: [The `BitTorrent` Protocol Specification](https://www.bittorrent.org/beps/bep_0003.html). + pub fn announce( + &self, + info_hash: &InfoHash, + peer: &mut peer::Peer, + remote_client_ip: &IpAddr, + peers_wanted: &PeersWanted, + ) -> AnnounceData { + // code-review: maybe instead of mutating the peer we could just return + // a tuple with the new peer and the announce data: (Peer, AnnounceData). + // It could even be a different struct: `StoredPeer` or `PublicPeer`. + + // code-review: in the `scrape` function we perform an authorization check. + // We check if the torrent is whitelisted. Should we also check authorization here? + // I think so because the `Tracker` has the responsibility for checking authentication and authorization. + // The `Tracker` has delegated that responsibility to the handlers + // (because we want to return a friendly error response) but that does not mean we should + // double-check authorization at this domain level too. + // I would propose to return a `Result` here. + // Besides, regarding authentication the `Tracker` is also responsible for authentication but + // we are actually handling authentication at the handlers level. So I would extract that + // responsibility into another authentication service. + + tracing::debug!("Before: {peer:?}"); + peer.change_ip(&assign_ip_address_to_peer(remote_client_ip, self.config.net.external_ip)); + tracing::debug!("After: {peer:?}"); + + let stats = self.upsert_peer_and_get_stats(info_hash, peer); + + let peers = self + .in_memory_torrent_repository + .get_peers_for(info_hash, peer, peers_wanted.limit()); + + AnnounceData { + peers, + stats, + policy: self.config.announce_policy, + } + } + + /// It updates the torrent entry in memory, it also stores in the database + /// the torrent info data which is persistent, and finally return the data + /// needed for a `announce` request response. + #[must_use] + fn upsert_peer_and_get_stats(&self, info_hash: &InfoHash, peer: &peer::Peer) -> SwarmMetadata { + let swarm_metadata_before = match self.in_memory_torrent_repository.get_opt_swarm_metadata(info_hash) { + Some(swarm_metadata) => swarm_metadata, + None => SwarmMetadata::zeroed(), + }; + + self.in_memory_torrent_repository.upsert_peer(info_hash, peer); + + let swarm_metadata_after = match self.in_memory_torrent_repository.get_opt_swarm_metadata(info_hash) { + Some(swarm_metadata) => swarm_metadata, + None => SwarmMetadata::zeroed(), + }; + + if swarm_metadata_before != swarm_metadata_after { + self.persist_stats(info_hash, &swarm_metadata_after); + } + + swarm_metadata_after + } + + /// It stores the torrents stats into the database (if persistency is enabled). + fn persist_stats(&self, info_hash: &InfoHash, swarm_metadata: &SwarmMetadata) { + if self.config.tracker_policy.persistent_torrent_completed_stat { + let completed = swarm_metadata.downloaded; + let info_hash = *info_hash; + + drop(self.db_torrent_repository.save(&info_hash, completed)); + } + } +} + +/// How many peers the peer announcing wants in the announce response. +#[derive(Clone, Debug, PartialEq, Default)] +pub enum PeersWanted { + /// The peer wants as many peers as possible in the announce response. + #[default] + All, + /// The peer only wants a certain amount of peers in the announce response. + Only { amount: usize }, +} + +impl PeersWanted { + #[must_use] + pub fn only(limit: u32) -> Self { + let amount: usize = match limit.try_into() { + Ok(amount) => amount, + Err(_) => TORRENT_PEERS_LIMIT, + }; + + Self::Only { amount } + } + + fn limit(&self) -> usize { + match self { + PeersWanted::All => TORRENT_PEERS_LIMIT, + PeersWanted::Only { amount } => *amount, + } + } +} + +impl From for PeersWanted { + fn from(value: i32) -> Self { + if value > 0 { + match value.try_into() { + Ok(peers_wanted) => Self::Only { amount: peers_wanted }, + Err(_) => Self::All, + } + } else { + Self::All + } + } +} + +#[must_use] +pub fn assign_ip_address_to_peer(remote_client_ip: &IpAddr, tracker_external_ip: Option) -> IpAddr { + if let Some(host_ip) = tracker_external_ip.filter(|_| remote_client_ip.is_loopback()) { + host_ip + } else { + *remote_client_ip + } +} diff --git a/src/core/mod.rs b/src/core/mod.rs index 5e5c20699..2151ec1ef 100644 --- a/src/core/mod.rs +++ b/src/core/mod.rs @@ -439,6 +439,7 @@ //! - Torrent metrics //! //! Refer to [`databases`] module for more information about persistence. +pub mod announce_handler; pub mod authentication; pub mod databases; pub mod error; @@ -453,13 +454,9 @@ pub mod peer_tests; use std::net::IpAddr; use std::sync::Arc; -use bittorrent_primitives::info_hash::InfoHash; use torrent::repository::in_memory::InMemoryTorrentRepository; use torrent::repository::persisted::DatabasePersistentTorrentRepository; -use torrust_tracker_configuration::{AnnouncePolicy, Core, TORRENT_PEERS_LIMIT}; -use torrust_tracker_primitives::core::AnnounceData; -use torrust_tracker_primitives::peer; -use torrust_tracker_primitives::swarm_metadata::SwarmMetadata; +use torrust_tracker_configuration::{AnnouncePolicy, Core}; /// The domain layer tracker service. /// @@ -475,52 +472,10 @@ pub struct Tracker { config: Core, /// The in-memory torrents repository. - in_memory_torrent_repository: Arc, + _in_memory_torrent_repository: Arc, /// The persistent torrents repository. - db_torrent_repository: Arc, -} - -/// How many peers the peer announcing wants in the announce response. -#[derive(Clone, Debug, PartialEq, Default)] -pub enum PeersWanted { - /// The peer wants as many peers as possible in the announce response. - #[default] - All, - /// The peer only wants a certain amount of peers in the announce response. - Only { amount: usize }, -} - -impl PeersWanted { - #[must_use] - pub fn only(limit: u32) -> Self { - let amount: usize = match limit.try_into() { - Ok(amount) => amount, - Err(_) => TORRENT_PEERS_LIMIT, - }; - - Self::Only { amount } - } - - fn limit(&self) -> usize { - match self { - PeersWanted::All => TORRENT_PEERS_LIMIT, - PeersWanted::Only { amount } => *amount, - } - } -} - -impl From for PeersWanted { - fn from(value: i32) -> Self { - if value > 0 { - match value.try_into() { - Ok(peers_wanted) => Self::Only { amount: peers_wanted }, - Err(_) => Self::All, - } - } else { - Self::All - } - } + _db_torrent_repository: Arc, } impl Tracker { @@ -536,8 +491,8 @@ impl Tracker { ) -> Result { Ok(Tracker { config: config.clone(), - in_memory_torrent_repository: in_memory_torrent_repository.clone(), - db_torrent_repository: db_torrent_repository.clone(), + _in_memory_torrent_repository: in_memory_torrent_repository.clone(), + _db_torrent_repository: db_torrent_repository.clone(), }) } @@ -580,91 +535,6 @@ impl Tracker { pub fn get_maybe_external_ip(&self) -> Option { self.config.net.external_ip } - - /// It handles an announce request. - /// - /// BEP 03: [The `BitTorrent` Protocol Specification](https://www.bittorrent.org/beps/bep_0003.html). - pub fn announce( - &self, - info_hash: &InfoHash, - peer: &mut peer::Peer, - remote_client_ip: &IpAddr, - peers_wanted: &PeersWanted, - ) -> AnnounceData { - // code-review: maybe instead of mutating the peer we could just return - // a tuple with the new peer and the announce data: (Peer, AnnounceData). - // It could even be a different struct: `StoredPeer` or `PublicPeer`. - - // code-review: in the `scrape` function we perform an authorization check. - // We check if the torrent is whitelisted. Should we also check authorization here? - // I think so because the `Tracker` has the responsibility for checking authentication and authorization. - // The `Tracker` has delegated that responsibility to the handlers - // (because we want to return a friendly error response) but that does not mean we should - // double-check authorization at this domain level too. - // I would propose to return a `Result` here. - // Besides, regarding authentication the `Tracker` is also responsible for authentication but - // we are actually handling authentication at the handlers level. So I would extract that - // responsibility into another authentication service. - - tracing::debug!("Before: {peer:?}"); - peer.change_ip(&assign_ip_address_to_peer(remote_client_ip, self.config.net.external_ip)); - tracing::debug!("After: {peer:?}"); - - let stats = self.upsert_peer_and_get_stats(info_hash, peer); - - let peers = self - .in_memory_torrent_repository - .get_peers_for(info_hash, peer, peers_wanted.limit()); - - AnnounceData { - peers, - stats, - policy: self.get_announce_policy(), - } - } - - /// It updates the torrent entry in memory, it also stores in the database - /// the torrent info data which is persistent, and finally return the data - /// needed for a `announce` request response. - #[must_use] - fn upsert_peer_and_get_stats(&self, info_hash: &InfoHash, peer: &peer::Peer) -> SwarmMetadata { - let swarm_metadata_before = match self.in_memory_torrent_repository.get_opt_swarm_metadata(info_hash) { - Some(swarm_metadata) => swarm_metadata, - None => SwarmMetadata::zeroed(), - }; - - self.in_memory_torrent_repository.upsert_peer(info_hash, peer); - - let swarm_metadata_after = match self.in_memory_torrent_repository.get_opt_swarm_metadata(info_hash) { - Some(swarm_metadata) => swarm_metadata, - None => SwarmMetadata::zeroed(), - }; - - if swarm_metadata_before != swarm_metadata_after { - self.persist_stats(info_hash, &swarm_metadata_after); - } - - swarm_metadata_after - } - - /// It stores the torrents stats into the database (if persistency is enabled). - fn persist_stats(&self, info_hash: &InfoHash, swarm_metadata: &SwarmMetadata) { - if self.config.tracker_policy.persistent_torrent_completed_stat { - let completed = swarm_metadata.downloaded; - let info_hash = *info_hash; - - drop(self.db_torrent_repository.save(&info_hash, completed)); - } - } -} - -#[must_use] -fn assign_ip_address_to_peer(remote_client_ip: &IpAddr, tracker_external_ip: Option) -> IpAddr { - if let Some(host_ip) = tracker_external_ip.filter(|_| remote_client_ip.is_loopback()) { - host_ip - } else { - *remote_client_ip - } } #[cfg(test)] @@ -680,12 +550,13 @@ mod tests { use bittorrent_primitives::info_hash::fixture::gen_seeded_infohash; use bittorrent_primitives::info_hash::InfoHash; use torrust_tracker_configuration::TORRENT_PEERS_LIMIT; + use torrust_tracker_primitives::peer::Peer; use torrust_tracker_primitives::torrent_metrics::TorrentsMetrics; use torrust_tracker_primitives::DurationSinceUnixEpoch; use torrust_tracker_test_helpers::configuration; use crate::app_test::initialize_tracker_dependencies; - use crate::core::peer::Peer; + use crate::core::announce_handler::AnnounceHandler; use crate::core::scrape_handler::ScrapeHandler; use crate::core::services::{initialize_tracker, initialize_whitelist_manager}; use crate::core::torrent::manager::TorrentsManager; @@ -693,7 +564,12 @@ mod tests { use crate::core::whitelist::manager::WhiteListManager; use crate::core::{whitelist, Tracker}; - fn public_tracker() -> (Arc, Arc, Arc) { + fn public_tracker() -> ( + Arc, + Arc, + Arc, + Arc, + ) { let config = configuration::ephemeral_public(); let ( @@ -712,9 +588,15 @@ mod tests { &db_torrent_repository, )); + let announce_handler = Arc::new(AnnounceHandler::new( + &config.core, + &in_memory_torrent_repository, + &db_torrent_repository, + )); + let scrape_handler = Arc::new(ScrapeHandler::new(&whitelist_authorization, &in_memory_torrent_repository)); - (tracker, in_memory_torrent_repository, scrape_handler) + (tracker, announce_handler, in_memory_torrent_repository, scrape_handler) } fn public_tracker_and_in_memory_torrents_repository() -> (Arc, Arc) { @@ -739,8 +621,10 @@ mod tests { (tracker, in_memory_torrent_repository) } + #[allow(clippy::type_complexity)] fn whitelisted_tracker() -> ( - Tracker, + Arc, + Arc, Arc, Arc, Arc, @@ -759,14 +643,35 @@ mod tests { let whitelist_manager = initialize_whitelist_manager(database.clone(), in_memory_whitelist.clone()); - let tracker = initialize_tracker(&config, &in_memory_torrent_repository, &db_torrent_repository); + let tracker = Arc::new(initialize_tracker( + &config, + &in_memory_torrent_repository, + &db_torrent_repository, + )); + + let announce_handler = Arc::new(AnnounceHandler::new( + &config.core, + &in_memory_torrent_repository, + &db_torrent_repository, + )); let scrape_handler = Arc::new(ScrapeHandler::new(&whitelist_authorization, &in_memory_torrent_repository)); - (tracker, whitelist_authorization, whitelist_manager, scrape_handler) + ( + tracker, + announce_handler, + whitelist_authorization, + whitelist_manager, + scrape_handler, + ) } - pub fn tracker_persisting_torrents_in_database() -> (Tracker, Arc, Arc) { + pub fn tracker_persisting_torrents_in_database() -> ( + Arc, + Arc, + Arc, + Arc, + ) { let mut config = configuration::ephemeral_listed(); config.core.tracker_policy.persistent_torrent_completed_stat = true; @@ -780,9 +685,19 @@ mod tests { torrents_manager, ) = initialize_tracker_dependencies(&config); - let tracker = initialize_tracker(&config, &in_memory_torrent_repository, &db_torrent_repository); + let tracker = Arc::new(initialize_tracker( + &config, + &in_memory_torrent_repository, + &db_torrent_repository, + )); - (tracker, torrents_manager, in_memory_torrent_repository) + let announce_handler = Arc::new(AnnounceHandler::new( + &config.core, + &in_memory_torrent_repository, + &db_torrent_repository, + )); + + (tracker, announce_handler, torrents_manager, in_memory_torrent_repository) } fn sample_info_hash() -> InfoHash { @@ -928,23 +843,21 @@ mod tests { #[tokio::test] async fn it_should_return_the_peers_for_a_given_torrent_excluding_a_given_peer() { - let (tracker, in_memory_torrent_repository, _scrape_handler) = public_tracker(); + let (_tracker, _announce_handler, in_memory_torrent_repository, _scrape_handler) = public_tracker(); let info_hash = sample_info_hash(); let peer = sample_peer(); let () = in_memory_torrent_repository.upsert_peer(&info_hash, &peer); - let peers = tracker - .in_memory_torrent_repository - .get_peers_for(&info_hash, &peer, TORRENT_PEERS_LIMIT); + let peers = in_memory_torrent_repository.get_peers_for(&info_hash, &peer, TORRENT_PEERS_LIMIT); assert_eq!(peers, vec![]); } #[tokio::test] async fn it_should_return_74_peers_at_the_most_for_a_given_torrent_when_it_filters_out_a_given_peer() { - let (tracker, in_memory_torrent_repository, _scrape_handler) = public_tracker(); + let (_tracker, _announce_handler, in_memory_torrent_repository, _scrape_handler) = public_tracker(); let info_hash = sample_info_hash(); @@ -967,9 +880,7 @@ mod tests { let () = in_memory_torrent_repository.upsert_peer(&info_hash, &peer); } - let peers = tracker - .in_memory_torrent_repository - .get_peers_for(&info_hash, &excluded_peer, TORRENT_PEERS_LIMIT); + let peers = in_memory_torrent_repository.get_peers_for(&info_hash, &excluded_peer, TORRENT_PEERS_LIMIT); assert_eq!(peers.len(), 74); } @@ -1025,16 +936,16 @@ mod tests { use std::sync::Arc; + use crate::core::announce_handler::PeersWanted; use crate::core::tests::the_tracker::{ peer_ip, public_tracker, sample_info_hash, sample_peer, sample_peer_1, sample_peer_2, }; - use crate::core::PeersWanted; mod should_assign_the_ip_to_the_peer { use std::net::{IpAddr, Ipv4Addr}; - use crate::core::assign_ip_address_to_peer; + use crate::core::announce_handler::assign_ip_address_to_peer; #[test] fn using_the_source_ip_instead_of_the_ip_in_the_announce_request() { @@ -1050,7 +961,7 @@ mod tests { use std::net::{IpAddr, Ipv4Addr, Ipv6Addr}; use std::str::FromStr; - use crate::core::assign_ip_address_to_peer; + use crate::core::announce_handler::assign_ip_address_to_peer; #[test] fn it_should_use_the_loopback_ip_if_the_tracker_does_not_have_the_external_ip_configuration() { @@ -1091,7 +1002,7 @@ mod tests { use std::net::{IpAddr, Ipv4Addr, Ipv6Addr}; use std::str::FromStr; - use crate::core::assign_ip_address_to_peer; + use crate::core::announce_handler::assign_ip_address_to_peer; #[test] fn it_should_use_the_loopback_ip_if_the_tracker_does_not_have_the_external_ip_configuration() { @@ -1130,21 +1041,21 @@ mod tests { #[tokio::test] async fn it_should_return_the_announce_data_with_an_empty_peer_list_when_it_is_the_first_announced_peer() { - let (tracker, _in_memory_torrent_repository, _scrape_handler) = public_tracker(); + let (_tracker, announce_handler, _in_memory_torrent_repository, _scrape_handler) = public_tracker(); let mut peer = sample_peer(); - let announce_data = tracker.announce(&sample_info_hash(), &mut peer, &peer_ip(), &PeersWanted::All); + let announce_data = announce_handler.announce(&sample_info_hash(), &mut peer, &peer_ip(), &PeersWanted::All); assert_eq!(announce_data.peers, vec![]); } #[tokio::test] async fn it_should_return_the_announce_data_with_the_previously_announced_peers() { - let (tracker, _in_memory_torrent_repository, _scrape_handler) = public_tracker(); + let (_tracker, announce_handler, _in_memory_torrent_repository, _scrape_handler) = public_tracker(); let mut previously_announced_peer = sample_peer_1(); - tracker.announce( + announce_handler.announce( &sample_info_hash(), &mut previously_announced_peer, &peer_ip(), @@ -1152,51 +1063,53 @@ mod tests { ); let mut peer = sample_peer_2(); - let announce_data = tracker.announce(&sample_info_hash(), &mut peer, &peer_ip(), &PeersWanted::All); + let announce_data = announce_handler.announce(&sample_info_hash(), &mut peer, &peer_ip(), &PeersWanted::All); assert_eq!(announce_data.peers, vec![Arc::new(previously_announced_peer)]); } mod it_should_update_the_swarm_stats_for_the_torrent { + use crate::core::announce_handler::PeersWanted; use crate::core::tests::the_tracker::{ completed_peer, leecher, peer_ip, public_tracker, sample_info_hash, seeder, started_peer, }; - use crate::core::PeersWanted; #[tokio::test] async fn when_the_peer_is_a_seeder() { - let (tracker, _in_memory_torrent_repository, _scrape_handler) = public_tracker(); + let (_tracker, announce_handler, _in_memory_torrent_repository, _scrape_handler) = public_tracker(); let mut peer = seeder(); - let announce_data = tracker.announce(&sample_info_hash(), &mut peer, &peer_ip(), &PeersWanted::All); + let announce_data = + announce_handler.announce(&sample_info_hash(), &mut peer, &peer_ip(), &PeersWanted::All); assert_eq!(announce_data.stats.complete, 1); } #[tokio::test] async fn when_the_peer_is_a_leecher() { - let (tracker, _in_memory_torrent_repository, _scrape_handler) = public_tracker(); + let (_tracker, announce_handler, _in_memory_torrent_repository, _scrape_handler) = public_tracker(); let mut peer = leecher(); - let announce_data = tracker.announce(&sample_info_hash(), &mut peer, &peer_ip(), &PeersWanted::All); + let announce_data = + announce_handler.announce(&sample_info_hash(), &mut peer, &peer_ip(), &PeersWanted::All); assert_eq!(announce_data.stats.incomplete, 1); } #[tokio::test] async fn when_a_previously_announced_started_peer_has_completed_downloading() { - let (tracker, _in_memory_torrent_repository, _scrape_handler) = public_tracker(); + let (_tracker, announce_handler, _in_memory_torrent_repository, _scrape_handler) = public_tracker(); // We have to announce with "started" event because peer does not count if peer was not previously known let mut started_peer = started_peer(); - tracker.announce(&sample_info_hash(), &mut started_peer, &peer_ip(), &PeersWanted::All); + announce_handler.announce(&sample_info_hash(), &mut started_peer, &peer_ip(), &PeersWanted::All); let mut completed_peer = completed_peer(); let announce_data = - tracker.announce(&sample_info_hash(), &mut completed_peer, &peer_ip(), &PeersWanted::All); + announce_handler.announce(&sample_info_hash(), &mut completed_peer, &peer_ip(), &PeersWanted::All); assert_eq!(announce_data.stats.downloaded, 1); } @@ -1209,19 +1122,20 @@ mod tests { use bittorrent_primitives::info_hash::InfoHash; use torrust_tracker_primitives::core::ScrapeData; + use torrust_tracker_primitives::swarm_metadata::SwarmMetadata; + use crate::core::announce_handler::PeersWanted; use crate::core::tests::the_tracker::{complete_peer, incomplete_peer, public_tracker}; - use crate::core::{PeersWanted, SwarmMetadata}; #[tokio::test] async fn it_should_return_the_swarm_metadata_for_the_requested_file_if_the_tracker_has_that_torrent() { - let (tracker, _in_memory_torrent_repository, scrape_handler) = public_tracker(); + let (_tracker, announce_handler, _in_memory_torrent_repository, scrape_handler) = public_tracker(); let info_hash = "3b245504cf5f11bbdbe1201cea6a6bf45aee1bc0".parse::().unwrap(); // # DevSkim: ignore DS173237 // Announce a "complete" peer for the torrent let mut complete_peer = complete_peer(); - tracker.announce( + announce_handler.announce( &info_hash, &mut complete_peer, &IpAddr::V4(Ipv4Addr::new(126, 0, 0, 10)), @@ -1230,7 +1144,7 @@ mod tests { // Announce an "incomplete" peer for the torrent let mut incomplete_peer = incomplete_peer(); - tracker.announce( + announce_handler.announce( &info_hash, &mut incomplete_peer, &IpAddr::V4(Ipv4Addr::new(126, 0, 0, 11)), @@ -1263,7 +1177,8 @@ mod tests { #[tokio::test] async fn it_should_authorize_the_announce_and_scrape_actions_on_whitelisted_torrents() { - let (_tracker, whitelist_authorization, whitelist_manager, _scrape_handler) = whitelisted_tracker(); + let (_tracker, _announce_handler, whitelist_authorization, whitelist_manager, _scrape_handler) = + whitelisted_tracker(); let info_hash = sample_info_hash(); @@ -1276,7 +1191,8 @@ mod tests { #[tokio::test] async fn it_should_not_authorize_the_announce_and_scrape_actions_on_not_whitelisted_torrents() { - let (_tracker, whitelist_authorization, _whitelist_manager, _scrape_handler) = whitelisted_tracker(); + let (_tracker, _announce_handler, whitelist_authorization, _whitelist_manager, _scrape_handler) = + whitelisted_tracker(); let info_hash = sample_info_hash(); @@ -1295,7 +1211,8 @@ mod tests { #[tokio::test] async fn it_should_add_a_torrent_to_the_whitelist() { - let (_tracker, _whitelist_authorization, whitelist_manager, _scrape_handler) = whitelisted_tracker(); + let (_tracker, _announce_handler, _whitelist_authorization, whitelist_manager, _scrape_handler) = + whitelisted_tracker(); let info_hash = sample_info_hash(); @@ -1306,7 +1223,8 @@ mod tests { #[tokio::test] async fn it_should_remove_a_torrent_from_the_whitelist() { - let (_tracker, _whitelist_authorization, whitelist_manager, _scrape_handler) = whitelisted_tracker(); + let (_tracker, _announce_handler, _whitelist_authorization, whitelist_manager, _scrape_handler) = + whitelisted_tracker(); let info_hash = sample_info_hash(); @@ -1322,7 +1240,8 @@ mod tests { #[tokio::test] async fn it_should_load_the_whitelist_from_the_database() { - let (_tracker, _whitelist_authorization, whitelist_manager, _scrape_handler) = whitelisted_tracker(); + let (_tracker, _announce_handler, _whitelist_authorization, whitelist_manager, _scrape_handler) = + whitelisted_tracker(); let info_hash = sample_info_hash(); @@ -1347,10 +1266,10 @@ mod tests { use torrust_tracker_primitives::core::ScrapeData; use torrust_tracker_primitives::swarm_metadata::SwarmMetadata; + use crate::core::announce_handler::PeersWanted; use crate::core::tests::the_tracker::{ complete_peer, incomplete_peer, peer_ip, sample_info_hash, whitelisted_tracker, }; - use crate::core::PeersWanted; #[test] fn it_should_be_able_to_build_a_zeroed_scrape_data_for_a_list_of_info_hashes() { @@ -1366,16 +1285,17 @@ mod tests { #[tokio::test] async fn it_should_return_the_zeroed_swarm_metadata_for_the_requested_file_if_it_is_not_whitelisted() { - let (tracker, _whitelist_authorization, _whitelist_manager, scrape_handler) = whitelisted_tracker(); + let (_tracker, announce_handler, _whitelist_authorization, _whitelist_manager, scrape_handler) = + whitelisted_tracker(); let info_hash = "3b245504cf5f11bbdbe1201cea6a6bf45aee1bc0".parse::().unwrap(); // # DevSkim: ignore DS173237 let mut peer = incomplete_peer(); - tracker.announce(&info_hash, &mut peer, &peer_ip(), &PeersWanted::All); + announce_handler.announce(&info_hash, &mut peer, &peer_ip(), &PeersWanted::All); // Announce twice to force non zeroed swarm metadata let mut peer = complete_peer(); - tracker.announce(&info_hash, &mut peer, &peer_ip(), &PeersWanted::All); + announce_handler.announce(&info_hash, &mut peer, &peer_ip(), &PeersWanted::All); let scrape_data = scrape_handler.scrape(&vec![info_hash]).await; @@ -1393,25 +1313,26 @@ mod tests { use aquatic_udp_protocol::AnnounceEvent; use torrust_tracker_torrent_repository::entry::EntrySync; + use crate::core::announce_handler::PeersWanted; use crate::core::tests::the_tracker::{ peer_ip, sample_info_hash, sample_peer, tracker_persisting_torrents_in_database, }; - use crate::core::PeersWanted; #[tokio::test] async fn it_should_persist_the_number_of_completed_peers_for_all_torrents_into_the_database() { - let (tracker, torrents_manager, in_memory_torrent_repository) = tracker_persisting_torrents_in_database(); + let (_tracker, announce_handler, torrents_manager, in_memory_torrent_repository) = + tracker_persisting_torrents_in_database(); let info_hash = sample_info_hash(); let mut peer = sample_peer(); peer.event = AnnounceEvent::Started; - let announce_data = tracker.announce(&info_hash, &mut peer, &peer_ip(), &PeersWanted::All); + let announce_data = announce_handler.announce(&info_hash, &mut peer, &peer_ip(), &PeersWanted::All); assert_eq!(announce_data.stats.downloaded, 0); peer.event = AnnounceEvent::Completed; - let announce_data = tracker.announce(&info_hash, &mut peer, &peer_ip(), &PeersWanted::All); + let announce_data = announce_handler.announce(&info_hash, &mut peer, &peer_ip(), &PeersWanted::All); assert_eq!(announce_data.stats.downloaded, 1); // Remove the newly updated torrent from memory @@ -1419,8 +1340,7 @@ mod tests { torrents_manager.load_torrents_from_database().unwrap(); - let torrent_entry = tracker - .in_memory_torrent_repository + let torrent_entry = in_memory_torrent_repository .get(&info_hash) .expect("it should be able to get entry"); diff --git a/src/servers/http/server.rs b/src/servers/http/server.rs index 573337ba9..3bb49c1ad 100644 --- a/src/servers/http/server.rs +++ b/src/servers/http/server.rs @@ -11,6 +11,7 @@ use tracing::instrument; use super::v1::routes::router; use crate::bootstrap::jobs::Started; +use crate::core::announce_handler::AnnounceHandler; use crate::core::authentication::service::AuthenticationService; use crate::core::scrape_handler::ScrapeHandler; use crate::core::{statistics, whitelist, Tracker}; @@ -48,6 +49,7 @@ impl Launcher { #[instrument(skip( self, tracker, + announce_handler, scrape_handler, authentication_service, whitelist_authorization, @@ -58,6 +60,7 @@ impl Launcher { fn start( &self, tracker: Arc, + announce_handler: Arc, scrape_handler: Arc, authentication_service: Arc, whitelist_authorization: Arc, @@ -83,6 +86,7 @@ impl Launcher { let app = router( tracker, + announce_handler, scrape_handler, authentication_service, whitelist_authorization, @@ -181,9 +185,11 @@ impl HttpServer { /// /// It would panic spawned HTTP server launcher cannot send the bound `SocketAddr` /// back to the main thread. + #[allow(clippy::too_many_arguments)] pub async fn start( self, tracker: Arc, + announce_handler: Arc, scrape_handler: Arc, authentication_service: Arc, whitelist_authorization: Arc, @@ -198,6 +204,7 @@ impl HttpServer { let task = tokio::spawn(async move { let server = launcher.start( tracker, + announce_handler, scrape_handler, authentication_service, whitelist_authorization, @@ -303,6 +310,7 @@ mod tests { let started = stopped .start( app_container.tracker, + app_container.announce_handler, app_container.scrape_handler, app_container.authentication_service, app_container.whitelist_authorization, diff --git a/src/servers/http/v1/handlers/announce.rs b/src/servers/http/v1/handlers/announce.rs index 8b57ce543..39ddd1710 100644 --- a/src/servers/http/v1/handlers/announce.rs +++ b/src/servers/http/v1/handlers/announce.rs @@ -21,10 +21,11 @@ use torrust_tracker_clock::clock::Time; use torrust_tracker_primitives::core::AnnounceData; use torrust_tracker_primitives::peer; +use crate::core::announce_handler::{AnnounceHandler, PeersWanted}; use crate::core::authentication::service::AuthenticationService; use crate::core::authentication::Key; use crate::core::statistics::event::sender::Sender; -use crate::core::{whitelist, PeersWanted, Tracker}; +use crate::core::{whitelist, Tracker}; use crate::servers::http::v1::extractors::announce_request::ExtractRequest; use crate::servers::http::v1::extractors::authentication_key::Extract as ExtractKey; use crate::servers::http::v1::extractors::client_ip_sources::Extract as ExtractClientIpSources; @@ -39,6 +40,7 @@ use crate::CurrentClock; pub async fn handle_without_key( State(state): State<( Arc, + Arc, Arc, Arc, Arc>>, @@ -53,6 +55,7 @@ pub async fn handle_without_key( &state.1, &state.2, &state.3, + &state.4, &announce_request, &client_ip_sources, None, @@ -67,6 +70,7 @@ pub async fn handle_without_key( pub async fn handle_with_key( State(state): State<( Arc, + Arc, Arc, Arc, Arc>>, @@ -82,6 +86,7 @@ pub async fn handle_with_key( &state.1, &state.2, &state.3, + &state.4, &announce_request, &client_ip_sources, Some(key), @@ -93,8 +98,10 @@ pub async fn handle_with_key( /// /// Internal implementation that handles both the `authenticated` and /// `unauthenticated` modes. +#[allow(clippy::too_many_arguments)] async fn handle( tracker: &Arc, + announce_handler: &Arc, authentication_service: &Arc, whitelist_authorization: &Arc, opt_stats_event_sender: &Arc>>, @@ -104,6 +111,7 @@ async fn handle( ) -> Response { let announce_data = match handle_announce( tracker, + announce_handler, authentication_service, whitelist_authorization, opt_stats_event_sender, @@ -125,8 +133,10 @@ async fn handle( See https://github.com/torrust/torrust-tracker/discussions/240. */ +#[allow(clippy::too_many_arguments)] async fn handle_announce( tracker: &Arc, + announce_handler: &Arc, authentication_service: &Arc, whitelist_authorization: &Arc, opt_stats_event_sender: &Arc>>, @@ -168,6 +178,7 @@ async fn handle_announce( let announce_data = services::announce::invoke( tracker.clone(), + announce_handler.clone(), opt_stats_event_sender.clone(), announce_request.info_hash, &mut peer, @@ -244,6 +255,7 @@ mod tests { use torrust_tracker_test_helpers::configuration; use crate::app_test::initialize_tracker_dependencies; + use crate::core::announce_handler::AnnounceHandler; use crate::core::authentication::service::AuthenticationService; use crate::core::services::{initialize_tracker, statistics}; use crate::core::statistics::event::sender::Sender; @@ -251,6 +263,7 @@ mod tests { type TrackerAndDeps = ( Arc, + Arc, Arc>>, Arc, Arc, @@ -293,7 +306,19 @@ mod tests { &db_torrent_repository, )); - (tracker, stats_event_sender, whitelist_authorization, authentication_service) + let announce_handler = Arc::new(AnnounceHandler::new( + &config.core, + &in_memory_torrent_repository, + &db_torrent_repository, + )); + + ( + tracker, + announce_handler, + stats_event_sender, + whitelist_authorization, + authentication_service, + ) } fn sample_announce_request() -> Announce { @@ -336,7 +361,8 @@ mod tests { #[tokio::test] async fn it_should_fail_when_the_authentication_key_is_missing() { - let (tracker, stats_event_sender, whitelist_authorization, authentication_service) = private_tracker(); + let (tracker, announce_handler, stats_event_sender, whitelist_authorization, authentication_service) = + private_tracker(); let tracker = Arc::new(tracker); let stats_event_sender = Arc::new(stats_event_sender); @@ -345,6 +371,7 @@ mod tests { let response = handle_announce( &tracker, + &announce_handler, &authentication_service, &whitelist_authorization, &stats_event_sender, @@ -363,7 +390,8 @@ mod tests { #[tokio::test] async fn it_should_fail_when_the_authentication_key_is_invalid() { - let (tracker, stats_event_sender, whitelist_authorization, authentication_service) = private_tracker(); + let (tracker, announce_handler, stats_event_sender, whitelist_authorization, authentication_service) = + private_tracker(); let tracker = Arc::new(tracker); let stats_event_sender = Arc::new(stats_event_sender); @@ -374,6 +402,7 @@ mod tests { let response = handle_announce( &tracker, + &announce_handler, &authentication_service, &whitelist_authorization, &stats_event_sender, @@ -398,7 +427,8 @@ mod tests { #[tokio::test] async fn it_should_fail_when_the_announced_torrent_is_not_whitelisted() { - let (tracker, stats_event_sender, whitelist_authorization, authentication_service) = whitelisted_tracker(); + let (tracker, announce_handler, stats_event_sender, whitelist_authorization, authentication_service) = + whitelisted_tracker(); let tracker = Arc::new(tracker); let stats_event_sender = Arc::new(stats_event_sender); @@ -407,6 +437,7 @@ mod tests { let response = handle_announce( &tracker, + &announce_handler, &authentication_service, &whitelist_authorization, &stats_event_sender, @@ -439,7 +470,8 @@ mod tests { #[tokio::test] async fn it_should_fail_when_the_right_most_x_forwarded_for_header_ip_is_not_available() { - let (tracker, stats_event_sender, whitelist_authorization, authentication_service) = tracker_on_reverse_proxy(); + let (tracker, announce_handler, stats_event_sender, whitelist_authorization, authentication_service) = + tracker_on_reverse_proxy(); let tracker = Arc::new(tracker); let stats_event_sender = Arc::new(stats_event_sender); @@ -451,6 +483,7 @@ mod tests { let response = handle_announce( &tracker, + &announce_handler, &authentication_service, &whitelist_authorization, &stats_event_sender, @@ -480,7 +513,8 @@ mod tests { #[tokio::test] async fn it_should_fail_when_the_client_ip_from_the_connection_info_is_not_available() { - let (tracker, stats_event_sender, whitelist_authorization, authentication_service) = tracker_not_on_reverse_proxy(); + let (tracker, announce_handler, stats_event_sender, whitelist_authorization, authentication_service) = + tracker_not_on_reverse_proxy(); let tracker = Arc::new(tracker); let stats_event_sender = Arc::new(stats_event_sender); @@ -492,6 +526,7 @@ mod tests { let response = handle_announce( &tracker, + &announce_handler, &authentication_service, &whitelist_authorization, &stats_event_sender, diff --git a/src/servers/http/v1/routes.rs b/src/servers/http/v1/routes.rs index 0c0be5bd5..50e1494be 100644 --- a/src/servers/http/v1/routes.rs +++ b/src/servers/http/v1/routes.rs @@ -22,6 +22,7 @@ use tower_http::LatencyUnit; use tracing::{instrument, Level, Span}; use super::handlers::{announce, health_check, scrape}; +use crate::core::announce_handler::AnnounceHandler; use crate::core::authentication::service::AuthenticationService; use crate::core::scrape_handler::ScrapeHandler; use crate::core::statistics::event::sender::Sender; @@ -36,6 +37,7 @@ use crate::servers::logging::Latency; #[allow(clippy::needless_pass_by_value)] #[instrument(skip( tracker, + announce_handler, scrape_handler, authentication_service, whitelist_authorization, @@ -44,6 +46,7 @@ use crate::servers::logging::Latency; ))] pub fn router( tracker: Arc, + announce_handler: Arc, scrape_handler: Arc, authentication_service: Arc, whitelist_authorization: Arc, @@ -58,6 +61,7 @@ pub fn router( "/announce", get(announce::handle_without_key).with_state(( tracker.clone(), + announce_handler.clone(), authentication_service.clone(), whitelist_authorization.clone(), stats_event_sender.clone(), @@ -67,6 +71,7 @@ pub fn router( "/announce/{key}", get(announce::handle_with_key).with_state(( tracker.clone(), + announce_handler.clone(), authentication_service.clone(), whitelist_authorization.clone(), stats_event_sender.clone(), diff --git a/src/servers/http/v1/services/announce.rs b/src/servers/http/v1/services/announce.rs index 9e381d8b2..2c88ebc60 100644 --- a/src/servers/http/v1/services/announce.rs +++ b/src/servers/http/v1/services/announce.rs @@ -15,9 +15,10 @@ use bittorrent_primitives::info_hash::InfoHash; use torrust_tracker_primitives::core::AnnounceData; use torrust_tracker_primitives::peer; +use crate::core::announce_handler::{AnnounceHandler, PeersWanted}; use crate::core::statistics::event::sender::Sender; use crate::core::statistics::{self}; -use crate::core::{PeersWanted, Tracker}; +use crate::core::Tracker; /// The HTTP tracker `announce` service. /// @@ -30,7 +31,8 @@ use crate::core::{PeersWanted, Tracker}; /// > like the UDP tracker, the number of TCP connections is incremented for /// > each `announce` request. pub async fn invoke( - tracker: Arc, + _tracker: Arc, + announce_handler: Arc, opt_stats_event_sender: Arc>>, info_hash: InfoHash, peer: &mut peer::Peer, @@ -39,7 +41,7 @@ pub async fn invoke( let original_peer_ip = peer.peer_addr.ip(); // The tracker could change the original peer ip - let announce_data = tracker.announce(&info_hash, peer, &original_peer_ip, peers_wanted); + let announce_data = announce_handler.announce(&info_hash, peer, &original_peer_ip, peers_wanted); if let Some(stats_event_sender) = opt_stats_event_sender.as_deref() { match original_peer_ip { @@ -66,11 +68,13 @@ mod tests { use torrust_tracker_test_helpers::configuration; use crate::app_test::initialize_tracker_dependencies; + use crate::core::announce_handler::AnnounceHandler; use crate::core::services::{initialize_tracker, statistics}; use crate::core::statistics::event::sender::Sender; use crate::core::Tracker; - fn public_tracker() -> (Tracker, Arc>>) { + #[allow(clippy::type_complexity)] + fn public_tracker() -> (Arc, Arc, Arc>>) { let config = configuration::ephemeral_public(); let ( @@ -85,9 +89,19 @@ mod tests { let (stats_event_sender, _stats_repository) = statistics::setup::factory(config.core.tracker_usage_statistics); let stats_event_sender = Arc::new(stats_event_sender); - let tracker = initialize_tracker(&config, &in_memory_torrent_repository, &db_torrent_repository); + let tracker = Arc::new(initialize_tracker( + &config, + &in_memory_torrent_repository, + &db_torrent_repository, + )); - (tracker, stats_event_sender) + let announce_handler = Arc::new(AnnounceHandler::new( + &config.core, + &in_memory_torrent_repository, + &db_torrent_repository, + )); + + (tracker, announce_handler, stats_event_sender) } fn sample_info_hash() -> InfoHash { @@ -132,11 +146,12 @@ mod tests { use super::{sample_peer_using_ipv4, sample_peer_using_ipv6}; use crate::app_test::initialize_tracker_dependencies; - use crate::core::{statistics, PeersWanted, Tracker}; + use crate::core::announce_handler::{AnnounceHandler, PeersWanted}; + use crate::core::{statistics, Tracker}; use crate::servers::http::v1::services::announce::invoke; use crate::servers::http::v1::services::announce::tests::{public_tracker, sample_info_hash, sample_peer}; - fn test_tracker_factory() -> Tracker { + fn initialize_tracker_and_announce_handler() -> (Arc, Arc) { let config = configuration::ephemeral(); let ( @@ -149,19 +164,26 @@ mod tests { _torrents_manager, ) = initialize_tracker_dependencies(&config); - Tracker::new(&config.core, &in_memory_torrent_repository, &db_torrent_repository).unwrap() + let tracker = Arc::new(Tracker::new(&config.core, &in_memory_torrent_repository, &db_torrent_repository).unwrap()); + + let announce_handler = Arc::new(AnnounceHandler::new( + &config.core, + &in_memory_torrent_repository, + &db_torrent_repository, + )); + + (tracker, announce_handler) } #[tokio::test] async fn it_should_return_the_announce_data() { - let (tracker, stats_event_sender) = public_tracker(); - - let tracker = Arc::new(tracker); + let (tracker, announce_handler, stats_event_sender) = public_tracker(); let mut peer = sample_peer(); let announce_data = invoke( tracker.clone(), + announce_handler.clone(), stats_event_sender.clone(), sample_info_hash(), &mut peer, @@ -193,20 +215,28 @@ mod tests { let stats_event_sender: Arc>> = Arc::new(Some(Box::new(stats_event_sender_mock))); - let tracker = Arc::new(test_tracker_factory()); + let (tracker, announce_handler) = initialize_tracker_and_announce_handler(); let mut peer = sample_peer_using_ipv4(); - let _announce_data = invoke(tracker, stats_event_sender, sample_info_hash(), &mut peer, &PeersWanted::All).await; + let _announce_data = invoke( + tracker, + announce_handler, + stats_event_sender, + sample_info_hash(), + &mut peer, + &PeersWanted::All, + ) + .await; } - fn tracker_with_an_ipv6_external_ip() -> Tracker { + fn tracker_with_an_ipv6_external_ip() -> (Arc, Arc) { let mut configuration = configuration::ephemeral(); configuration.core.net.external_ip = Some(IpAddr::V6(Ipv6Addr::new( 0x6969, 0x6969, 0x6969, 0x6969, 0x6969, 0x6969, 0x6969, 0x6969, ))); - test_tracker_factory() + initialize_tracker_and_announce_handler() } fn peer_with_the_ipv4_loopback_ip() -> peer::Peer { @@ -233,8 +263,11 @@ mod tests { let mut peer = peer_with_the_ipv4_loopback_ip(); + let (tracker, announce_handler) = tracker_with_an_ipv6_external_ip(); + let _announce_data = invoke( - tracker_with_an_ipv6_external_ip().into(), + tracker, + announce_handler, stats_event_sender, sample_info_hash(), &mut peer, @@ -255,11 +288,19 @@ mod tests { let stats_event_sender: Arc>> = Arc::new(Some(Box::new(stats_event_sender_mock))); - let tracker = Arc::new(test_tracker_factory()); + let (tracker, announce_handler) = initialize_tracker_and_announce_handler(); let mut peer = sample_peer_using_ipv6(); - let _announce_data = invoke(tracker, stats_event_sender, sample_info_hash(), &mut peer, &PeersWanted::All).await; + let _announce_data = invoke( + tracker, + announce_handler, + stats_event_sender, + sample_info_hash(), + &mut peer, + &PeersWanted::All, + ) + .await; } } } diff --git a/src/servers/http/v1/services/scrape.rs b/src/servers/http/v1/services/scrape.rs index e3ee6560f..6df267d3a 100644 --- a/src/servers/http/v1/services/scrape.rs +++ b/src/servers/http/v1/services/scrape.rs @@ -81,11 +81,12 @@ mod tests { use torrust_tracker_test_helpers::configuration; use crate::app_test::initialize_tracker_dependencies; + use crate::core::announce_handler::AnnounceHandler; use crate::core::scrape_handler::ScrapeHandler; use crate::core::services::initialize_tracker; use crate::core::Tracker; - fn public_tracker_and_scrape_handler() -> (Arc, Arc) { + fn public_tracker_and_announce_and_scrape_handlers() -> (Arc, Arc, Arc) { let config = configuration::ephemeral_public(); let ( @@ -104,9 +105,15 @@ mod tests { &db_torrent_repository, )); + let announce_handler = Arc::new(AnnounceHandler::new( + &config.core, + &in_memory_torrent_repository, + &db_torrent_repository, + )); + let scrape_handler = Arc::new(ScrapeHandler::new(&whitelist_authorization, &in_memory_torrent_repository)); - (tracker, scrape_handler) + (tracker, announce_handler, scrape_handler) } fn sample_info_hashes() -> Vec { @@ -159,10 +166,12 @@ mod tests { use torrust_tracker_primitives::core::ScrapeData; use torrust_tracker_primitives::swarm_metadata::SwarmMetadata; - use crate::core::{statistics, PeersWanted}; + use crate::core::announce_handler::PeersWanted; + use crate::core::statistics; use crate::servers::http::v1::services::scrape::invoke; use crate::servers::http::v1::services::scrape::tests::{ - public_tracker_and_scrape_handler, sample_info_hash, sample_info_hashes, sample_peer, test_tracker_factory, + public_tracker_and_announce_and_scrape_handlers, sample_info_hash, sample_info_hashes, sample_peer, + test_tracker_factory, }; #[tokio::test] @@ -170,7 +179,7 @@ mod tests { let (stats_event_sender, _stats_repository) = crate::core::services::statistics::setup::factory(false); let stats_event_sender = Arc::new(stats_event_sender); - let (tracker, scrape_handler) = public_tracker_and_scrape_handler(); + let (_tracker, announce_handler, scrape_handler) = public_tracker_and_announce_and_scrape_handlers(); let info_hash = sample_info_hash(); let info_hashes = vec![info_hash]; @@ -178,7 +187,7 @@ mod tests { // Announce a new peer to force scrape data to contain not zeroed data let mut peer = sample_peer(); let original_peer_ip = peer.ip(); - tracker.announce(&info_hash, &mut peer, &original_peer_ip, &PeersWanted::All); + announce_handler.announce(&info_hash, &mut peer, &original_peer_ip, &PeersWanted::All); let scrape_data = invoke(&scrape_handler, &stats_event_sender, &info_hashes, &original_peer_ip).await; @@ -241,10 +250,11 @@ mod tests { use mockall::predicate::eq; use torrust_tracker_primitives::core::ScrapeData; - use crate::core::{statistics, PeersWanted}; + use crate::core::announce_handler::PeersWanted; + use crate::core::statistics; use crate::servers::http::v1::services::scrape::fake; use crate::servers::http::v1::services::scrape::tests::{ - public_tracker_and_scrape_handler, sample_info_hash, sample_info_hashes, sample_peer, + public_tracker_and_announce_and_scrape_handlers, sample_info_hash, sample_info_hashes, sample_peer, }; #[tokio::test] @@ -252,7 +262,7 @@ mod tests { let (stats_event_sender, _stats_repository) = crate::core::services::statistics::setup::factory(false); let stats_event_sender = Arc::new(stats_event_sender); - let (tracker, _scrape_handler) = public_tracker_and_scrape_handler(); + let (_tracker, announce_handler, _scrape_handler) = public_tracker_and_announce_and_scrape_handlers(); let info_hash = sample_info_hash(); let info_hashes = vec![info_hash]; @@ -260,7 +270,7 @@ mod tests { // Announce a new peer to force scrape data to contain not zeroed data let mut peer = sample_peer(); let original_peer_ip = peer.ip(); - tracker.announce(&info_hash, &mut peer, &original_peer_ip, &PeersWanted::All); + announce_handler.announce(&info_hash, &mut peer, &original_peer_ip, &PeersWanted::All); let scrape_data = fake(&stats_event_sender, &info_hashes, &original_peer_ip).await; diff --git a/src/servers/udp/handlers.rs b/src/servers/udp/handlers.rs index d6073d2e8..03a0248d4 100644 --- a/src/servers/udp/handlers.rs +++ b/src/servers/udp/handlers.rs @@ -20,9 +20,10 @@ use zerocopy::network_endian::I32; use super::connection_cookie::{check, make}; use super::server::banning::BanService; use super::RawRequest; +use crate::core::announce_handler::{AnnounceHandler, PeersWanted}; use crate::core::scrape_handler::ScrapeHandler; use crate::core::statistics::event::sender::Sender; -use crate::core::{statistics, whitelist, PeersWanted, Tracker}; +use crate::core::{statistics, whitelist, Tracker}; use crate::servers::udp::error::Error; use crate::servers::udp::{peer_builder, UDP_TRACKER_LOG_TARGET}; use crate::shared::bit_torrent::common::MAX_SCRAPE_TORRENTS; @@ -56,10 +57,11 @@ impl CookieTimeValues { /// /// It will return an `Error` response if the request is invalid. #[allow(clippy::too_many_arguments)] -#[instrument(fields(request_id), skip(udp_request, tracker, scrape_handler, whitelist_authorization, opt_stats_event_sender, cookie_time_values, ban_service), ret(level = Level::TRACE))] +#[instrument(fields(request_id), skip(udp_request, tracker, announce_handler, scrape_handler, whitelist_authorization, opt_stats_event_sender, cookie_time_values, ban_service), ret(level = Level::TRACE))] pub(crate) async fn handle_packet( udp_request: RawRequest, tracker: &Tracker, + announce_handler: &Arc, scrape_handler: &Arc, whitelist_authorization: &Arc, opt_stats_event_sender: &Arc>>, @@ -80,6 +82,7 @@ pub(crate) async fn handle_packet( request, udp_request.from, tracker, + announce_handler, scrape_handler, whitelist_authorization, opt_stats_event_sender, @@ -137,10 +140,12 @@ pub(crate) async fn handle_packet( /// # Errors /// /// If a error happens in the `handle_request` function, it will just return the `ServerError`. +#[allow(clippy::too_many_arguments)] #[instrument(skip( request, remote_addr, tracker, + announce_handler, scrape_handler, whitelist_authorization, opt_stats_event_sender, @@ -150,6 +155,7 @@ pub async fn handle_request( request: Request, remote_addr: SocketAddr, tracker: &Tracker, + announce_handler: &Arc, scrape_handler: &Arc, whitelist_authorization: &Arc, opt_stats_event_sender: &Arc>>, @@ -170,6 +176,7 @@ pub async fn handle_request( remote_addr, &announce_request, tracker, + announce_handler, whitelist_authorization, opt_stats_event_sender, cookie_time_values.valid_range, @@ -233,11 +240,12 @@ pub async fn handle_connect( /// # Errors /// /// If a error happens in the `handle_announce` function, it will just return the `ServerError`. -#[instrument(fields(transaction_id, connection_id, info_hash), skip(tracker, whitelist_authorization, opt_stats_event_sender), ret(level = Level::TRACE))] +#[instrument(fields(transaction_id, connection_id, info_hash), skip(tracker, announce_handler, whitelist_authorization, opt_stats_event_sender), ret(level = Level::TRACE))] pub async fn handle_announce( remote_addr: SocketAddr, request: &AnnounceRequest, tracker: &Tracker, + announce_handler: &Arc, whitelist_authorization: &Arc, opt_stats_event_sender: &Arc>>, cookie_valid_range: Range, @@ -271,7 +279,7 @@ pub async fn handle_announce( let mut peer = peer_builder::from_request(request, &remote_client_ip); let peers_wanted: PeersWanted = i32::from(request.peers_wanted.0).into(); - let response = tracker.announce(&info_hash, &mut peer, &remote_client_ip, &peers_wanted); + let response = announce_handler.announce(&info_hash, &mut peer, &remote_client_ip, &peers_wanted); if let Some(stats_event_sender) = opt_stats_event_sender.as_deref() { match remote_client_ip { @@ -490,6 +498,7 @@ mod tests { use super::gen_remote_fingerprint; use crate::app_test::initialize_tracker_dependencies; + use crate::core::announce_handler::AnnounceHandler; use crate::core::scrape_handler::ScrapeHandler; use crate::core::services::{initialize_tracker, initialize_whitelist_manager, statistics}; use crate::core::statistics::event::sender::Sender; @@ -501,6 +510,7 @@ mod tests { type TrackerAndDeps = ( Arc, + Arc, Arc, Arc, Arc>>, @@ -546,10 +556,17 @@ mod tests { &db_torrent_repository, )); + let announce_handler = Arc::new(AnnounceHandler::new( + &config.core, + &in_memory_torrent_repository, + &db_torrent_repository, + )); + let scrape_handler = Arc::new(ScrapeHandler::new(&whitelist_authorization, &in_memory_torrent_repository)); ( tracker, + announce_handler, scrape_handler, in_memory_torrent_repository, stats_event_sender, @@ -653,7 +670,12 @@ mod tests { } } - fn test_tracker_factory() -> (Arc, Arc, Arc) { + fn test_tracker_factory() -> ( + Arc, + Arc, + Arc, + Arc, + ) { let config = tracker_configuration(); let ( @@ -668,9 +690,15 @@ mod tests { let tracker = Arc::new(Tracker::new(&config.core, &in_memory_torrent_repository, &db_torrent_repository).unwrap()); + let announce_handler = Arc::new(AnnounceHandler::new( + &config.core, + &in_memory_torrent_repository, + &db_torrent_repository, + )); + let scrape_handler = Arc::new(ScrapeHandler::new(&whitelist_authorization, &in_memory_torrent_repository)); - (tracker, scrape_handler, whitelist_authorization) + (tracker, announce_handler, scrape_handler, whitelist_authorization) } mod connect_request { @@ -883,6 +911,7 @@ mod tests { }; use mockall::predicate::eq; + use crate::core::announce_handler::AnnounceHandler; use crate::core::torrent::repository::in_memory::InMemoryTorrentRepository; use crate::core::{self, statistics, whitelist}; use crate::servers::udp::connection_cookie::make; @@ -897,6 +926,7 @@ mod tests { async fn an_announced_peer_should_be_added_to_the_tracker() { let ( tracker, + announce_handler, _scrape_handler, in_memory_torrent_repository, stats_event_sender, @@ -924,6 +954,7 @@ mod tests { remote_addr, &request, &tracker, + &announce_handler, &whitelist_authorization, &stats_event_sender, sample_cookie_valid_range(), @@ -945,6 +976,7 @@ mod tests { async fn the_announced_peer_should_not_be_included_in_the_response() { let ( tracker, + announce_handler, _scrape_handler, _in_memory_torrent_repository, stats_event_sender, @@ -963,6 +995,7 @@ mod tests { remote_addr, &request, &tracker, + &announce_handler, &whitelist_authorization, &stats_event_sender, sample_cookie_valid_range(), @@ -993,6 +1026,7 @@ mod tests { let ( tracker, + announce_handler, _scrape_handler, in_memory_torrent_repository, stats_event_sender, @@ -1023,6 +1057,7 @@ mod tests { remote_addr, &request, &tracker, + &announce_handler, &whitelist_authorization, &stats_event_sender, sample_cookie_valid_range(), @@ -1053,6 +1088,7 @@ mod tests { async fn announce_a_new_peer_using_ipv4( tracker: Arc, + announce_handler: Arc, whitelist_authorization: Arc, ) -> Response { let (stats_event_sender, _stats_repository) = crate::core::services::statistics::setup::factory(false); @@ -1067,6 +1103,7 @@ mod tests { remote_addr, &request, &tracker, + &announce_handler, &whitelist_authorization, &stats_event_sender, sample_cookie_valid_range(), @@ -1079,6 +1116,7 @@ mod tests { async fn when_the_announce_request_comes_from_a_client_using_ipv4_the_response_should_not_include_peers_using_ipv6() { let ( tracker, + announce_handler, _scrape_handler, in_memory_torrent_repository, _stats_event_sender, @@ -1089,7 +1127,8 @@ mod tests { add_a_torrent_peer_using_ipv6(&in_memory_torrent_repository); - let response = announce_a_new_peer_using_ipv4(tracker.clone(), whitelist_authorization).await; + let response = + announce_a_new_peer_using_ipv4(tracker.clone(), announce_handler.clone(), whitelist_authorization).await; // The response should not contain the peer using IPV6 let peers: Option>> = match response { @@ -1111,12 +1150,13 @@ mod tests { let stats_event_sender: Arc>> = Arc::new(Some(Box::new(stats_event_sender_mock))); - let (tracker, _scrape_handler, whitelist_authorization) = test_tracker_factory(); + let (tracker, announce_handler, _scrape_handler, whitelist_authorization) = test_tracker_factory(); handle_announce( sample_ipv4_socket_address(), &AnnounceRequestBuilder::default().into(), &tracker, + &announce_handler, &whitelist_authorization, &stats_event_sender, sample_cookie_valid_range(), @@ -1142,6 +1182,7 @@ mod tests { async fn the_peer_ip_should_be_changed_to_the_external_ip_in_the_tracker_configuration_if_defined() { let ( tracker, + announce_handler, _scrape_handler, in_memory_torrent_repository, stats_event_sender, @@ -1169,6 +1210,7 @@ mod tests { remote_addr, &request, &tracker, + &announce_handler, &whitelist_authorization, &stats_event_sender, sample_cookie_valid_range(), @@ -1202,6 +1244,7 @@ mod tests { }; use mockall::predicate::eq; + use crate::core::announce_handler::AnnounceHandler; use crate::core::torrent::repository::in_memory::InMemoryTorrentRepository; use crate::core::{self, statistics, whitelist}; use crate::servers::udp::connection_cookie::make; @@ -1216,6 +1259,7 @@ mod tests { async fn an_announced_peer_should_be_added_to_the_tracker() { let ( tracker, + announce_handler, _scrape_handler, in_memory_torrent_repository, stats_event_sender, @@ -1244,6 +1288,7 @@ mod tests { remote_addr, &request, &tracker, + &announce_handler, &whitelist_authorization, &stats_event_sender, sample_cookie_valid_range(), @@ -1265,6 +1310,7 @@ mod tests { async fn the_announced_peer_should_not_be_included_in_the_response() { let ( tracker, + announce_handler, _scrape_handler, _in_memory_torrent_repository, stats_event_sender, @@ -1286,6 +1332,7 @@ mod tests { remote_addr, &request, &tracker, + &announce_handler, &whitelist_authorization, &stats_event_sender, sample_cookie_valid_range(), @@ -1316,6 +1363,7 @@ mod tests { let ( tracker, + announce_handler, _scrape_handler, in_memory_torrent_repository, stats_event_sender, @@ -1346,6 +1394,7 @@ mod tests { remote_addr, &request, &tracker, + &announce_handler, &whitelist_authorization, &stats_event_sender, sample_cookie_valid_range(), @@ -1376,6 +1425,7 @@ mod tests { async fn announce_a_new_peer_using_ipv6( tracker: Arc, + announce_handler: Arc, whitelist_authorization: Arc, ) -> Response { let (stats_event_sender, _stats_repository) = crate::core::services::statistics::setup::factory(false); @@ -1393,6 +1443,7 @@ mod tests { remote_addr, &request, &tracker, + &announce_handler, &whitelist_authorization, &stats_event_sender, sample_cookie_valid_range(), @@ -1405,6 +1456,7 @@ mod tests { async fn when_the_announce_request_comes_from_a_client_using_ipv6_the_response_should_not_include_peers_using_ipv4() { let ( tracker, + announce_handler, _scrape_handler, in_memory_torrent_repository, _stats_event_sender, @@ -1415,7 +1467,8 @@ mod tests { add_a_torrent_peer_using_ipv4(&in_memory_torrent_repository); - let response = announce_a_new_peer_using_ipv6(tracker.clone(), whitelist_authorization).await; + let response = + announce_a_new_peer_using_ipv6(tracker.clone(), announce_handler.clone(), whitelist_authorization).await; // The response should not contain the peer using IPV4 let peers: Option>> = match response { @@ -1437,7 +1490,7 @@ mod tests { let stats_event_sender: Arc>> = Arc::new(Some(Box::new(stats_event_sender_mock))); - let (tracker, _scrape_handler, whitelist_authorization) = test_tracker_factory(); + let (tracker, announce_handler, _scrape_handler, whitelist_authorization) = test_tracker_factory(); let remote_addr = sample_ipv6_remote_addr(); @@ -1449,6 +1502,7 @@ mod tests { remote_addr, &announce_request, &tracker, + &announce_handler, &whitelist_authorization, &stats_event_sender, sample_cookie_valid_range(), @@ -1466,6 +1520,7 @@ mod tests { use mockall::predicate::eq; use crate::app_test::initialize_tracker_dependencies; + use crate::core::announce_handler::AnnounceHandler; use crate::core::{self, statistics}; use crate::servers::udp::connection_cookie::make; use crate::servers::udp::handlers::handle_announce; @@ -1501,6 +1556,12 @@ mod tests { core::Tracker::new(&config.core, &in_memory_torrent_repository, &db_torrent_repository).unwrap(), ); + let announce_handler = Arc::new(AnnounceHandler::new( + &config.core, + &in_memory_torrent_repository, + &db_torrent_repository, + )); + let loopback_ipv4 = Ipv4Addr::new(127, 0, 0, 1); let loopback_ipv6 = Ipv6Addr::new(0, 0, 0, 0, 0, 0, 0, 1); @@ -1525,6 +1586,7 @@ mod tests { remote_addr, &request, &tracker, + &announce_handler, &whitelist_authorization, &stats_event_sender, sample_cookie_valid_range(), @@ -1580,6 +1642,7 @@ mod tests { async fn should_return_no_stats_when_the_tracker_does_not_have_any_torrent() { let ( _tracker, + _announce_handler, scrape_handler, _in_memory_torrent_repository, stats_event_sender, @@ -1688,6 +1751,7 @@ mod tests { async fn should_return_torrent_statistics_when_the_tracker_has_the_requested_torrent() { let ( _tracker, + _announce_handler, scrape_handler, in_memory_torrent_repository, _stats_event_sender, @@ -1723,6 +1787,7 @@ mod tests { async fn should_return_the_torrent_statistics_when_the_requested_torrent_is_whitelisted() { let ( _tracker, + _announce_handler, scrape_handler, in_memory_torrent_repository, stats_event_sender, @@ -1766,6 +1831,7 @@ mod tests { async fn should_return_zeroed_statistics_when_the_requested_torrent_is_not_whitelisted() { let ( _tracker, + _announce_handler, scrape_handler, in_memory_torrent_repository, stats_event_sender, @@ -1837,7 +1903,7 @@ mod tests { let remote_addr = sample_ipv4_remote_addr(); - let (_tracker, scrape_handler, _whitelist_authorization) = test_tracker_factory(); + let (_tracker, _announce_handler, scrape_handler, _whitelist_authorization) = test_tracker_factory(); handle_scrape( remote_addr, @@ -1877,7 +1943,7 @@ mod tests { let remote_addr = sample_ipv6_remote_addr(); - let (_tracker, scrape_handler, _whitelist_authorization) = test_tracker_factory(); + let (_tracker, _announce_handler, scrape_handler, _whitelist_authorization) = test_tracker_factory(); handle_scrape( remote_addr, diff --git a/src/servers/udp/server/launcher.rs b/src/servers/udp/server/launcher.rs index d6bc230e1..f1d0e4859 100644 --- a/src/servers/udp/server/launcher.rs +++ b/src/servers/udp/server/launcher.rs @@ -13,6 +13,7 @@ use tracing::instrument; use super::banning::BanService; use super::request_buffer::ActiveRequests; use crate::bootstrap::jobs::Started; +use crate::core::announce_handler::AnnounceHandler; use crate::core::scrape_handler::ScrapeHandler; use crate::core::statistics::event::sender::Sender; use crate::core::{statistics, whitelist, Tracker}; @@ -44,6 +45,7 @@ impl Launcher { #[allow(clippy::too_many_arguments)] #[instrument(skip( tracker, + announce_handler, scrape_handler, whitelist_authorization, opt_stats_event_sender, @@ -54,6 +56,7 @@ impl Launcher { ))] pub async fn run_with_graceful_shutdown( tracker: Arc, + announce_handler: Arc, scrape_handler: Arc, whitelist_authorization: Arc, opt_stats_event_sender: Arc>>, @@ -98,6 +101,7 @@ impl Launcher { let () = Self::run_udp_server_main( receiver, tracker.clone(), + announce_handler.clone(), scrape_handler.clone(), whitelist_authorization.clone(), opt_stats_event_sender.clone(), @@ -141,9 +145,11 @@ impl Launcher { ServiceHealthCheckJob::new(binding, info, job) } + #[allow(clippy::too_many_arguments)] #[instrument(skip( receiver, tracker, + announce_handler, scrape_handler, whitelist_authorization, opt_stats_event_sender, @@ -152,6 +158,7 @@ impl Launcher { async fn run_udp_server_main( mut receiver: Receiver, tracker: Arc, + announce_handler: Arc, scrape_handler: Arc, whitelist_authorization: Arc, opt_stats_event_sender: Arc>>, @@ -224,6 +231,7 @@ impl Launcher { let processor = Processor::new( receiver.socket.clone(), tracker.clone(), + announce_handler.clone(), scrape_handler.clone(), whitelist_authorization.clone(), opt_stats_event_sender.clone(), diff --git a/src/servers/udp/server/mod.rs b/src/servers/udp/server/mod.rs index 668265752..53ba588d4 100644 --- a/src/servers/udp/server/mod.rs +++ b/src/servers/udp/server/mod.rs @@ -83,6 +83,7 @@ mod tests { let started = stopped .start( app_container.tracker, + app_container.announce_handler, app_container.scrape_handler, app_container.whitelist_authorization, app_container.stats_event_sender, @@ -117,6 +118,7 @@ mod tests { let started = stopped .start( app_container.tracker, + app_container.announce_handler, app_container.scrape_handler, app_container.whitelist_authorization, app_container.stats_event_sender, diff --git a/src/servers/udp/server/processor.rs b/src/servers/udp/server/processor.rs index 889a2a913..4cecbc36a 100644 --- a/src/servers/udp/server/processor.rs +++ b/src/servers/udp/server/processor.rs @@ -10,6 +10,7 @@ use tracing::{instrument, Level}; use super::banning::BanService; use super::bound_socket::BoundSocket; +use crate::core::announce_handler::AnnounceHandler; use crate::core::scrape_handler::ScrapeHandler; use crate::core::statistics::event::sender::Sender; use crate::core::statistics::event::UdpResponseKind; @@ -20,6 +21,7 @@ use crate::servers::udp::{handlers, RawRequest}; pub struct Processor { socket: Arc, tracker: Arc, + announce_handler: Arc, scrape_handler: Arc, whitelist_authorization: Arc, opt_stats_event_sender: Arc>>, @@ -30,6 +32,7 @@ impl Processor { pub fn new( socket: Arc, tracker: Arc, + announce_handler: Arc, scrape_handler: Arc, whitelist_authorization: Arc, opt_stats_event_sender: Arc>>, @@ -38,6 +41,7 @@ impl Processor { Self { socket, tracker, + announce_handler, scrape_handler, whitelist_authorization, opt_stats_event_sender, @@ -54,6 +58,7 @@ impl Processor { let response = handlers::handle_packet( request, &self.tracker, + &self.announce_handler, &self.scrape_handler, &self.whitelist_authorization, &self.opt_stats_event_sender, diff --git a/src/servers/udp/server/spawner.rs b/src/servers/udp/server/spawner.rs index 82fd808c4..ea12b1c0b 100644 --- a/src/servers/udp/server/spawner.rs +++ b/src/servers/udp/server/spawner.rs @@ -11,6 +11,7 @@ use tokio::task::JoinHandle; use super::banning::BanService; use super::launcher::Launcher; use crate::bootstrap::jobs::Started; +use crate::core::announce_handler::AnnounceHandler; use crate::core::scrape_handler::ScrapeHandler; use crate::core::statistics::event::sender::Sender; use crate::core::{whitelist, Tracker}; @@ -32,6 +33,7 @@ impl Spawner { pub fn spawn_launcher( &self, tracker: Arc, + announce_handler: Arc, scrape_handler: Arc, whitelist_authorization: Arc, opt_stats_event_sender: Arc>>, @@ -45,6 +47,7 @@ impl Spawner { tokio::spawn(async move { Launcher::run_with_graceful_shutdown( tracker, + announce_handler, scrape_handler, whitelist_authorization, opt_stats_event_sender, diff --git a/src/servers/udp/server/states.rs b/src/servers/udp/server/states.rs index d2c91b03d..bab04fdcc 100644 --- a/src/servers/udp/server/states.rs +++ b/src/servers/udp/server/states.rs @@ -13,6 +13,7 @@ use super::banning::BanService; use super::spawner::Spawner; use super::{Server, UdpError}; use crate::bootstrap::jobs::Started; +use crate::core::announce_handler::AnnounceHandler; use crate::core::scrape_handler::ScrapeHandler; use crate::core::statistics::event::sender::Sender; use crate::core::{whitelist, Tracker}; @@ -66,10 +67,11 @@ impl Server { /// /// It panics if unable to receive the bound socket address from service. #[allow(clippy::too_many_arguments)] - #[instrument(skip(self, tracker, scrape_handler, whitelist_authorization, opt_stats_event_sender, ban_service, form), err, ret(Display, level = Level::INFO))] + #[instrument(skip(self, tracker, announce_handler, scrape_handler, whitelist_authorization, opt_stats_event_sender, ban_service, form), err, ret(Display, level = Level::INFO))] pub async fn start( self, tracker: Arc, + announce_handler: Arc, scrape_handler: Arc, whitelist_authorization: Arc, opt_stats_event_sender: Arc>>, @@ -85,6 +87,7 @@ impl Server { // May need to wrap in a task to about a tokio bug. let task = self.state.spawner.spawn_launcher( tracker, + announce_handler, scrape_handler, whitelist_authorization, opt_stats_event_sender, diff --git a/tests/servers/http/environment.rs b/tests/servers/http/environment.rs index beaf2d38c..78051cbbb 100644 --- a/tests/servers/http/environment.rs +++ b/tests/servers/http/environment.rs @@ -5,6 +5,7 @@ use futures::executor::block_on; use torrust_tracker_configuration::{Configuration, HttpTracker}; use torrust_tracker_lib::bootstrap::app::{initialize_app_container, initialize_global_services}; use torrust_tracker_lib::bootstrap::jobs::make_rust_tls; +use torrust_tracker_lib::core::announce_handler::AnnounceHandler; use torrust_tracker_lib::core::authentication::handler::KeysHandler; use torrust_tracker_lib::core::authentication::service::AuthenticationService; use torrust_tracker_lib::core::databases::Database; @@ -22,6 +23,7 @@ pub struct Environment { pub config: Arc, pub database: Arc>, pub tracker: Arc, + pub announce_handler: Arc, pub scrape_handler: Arc, pub in_memory_torrent_repository: Arc, pub keys_handler: Arc, @@ -65,6 +67,7 @@ impl Environment { config, database: app_container.database.clone(), tracker: app_container.tracker.clone(), + announce_handler: app_container.announce_handler.clone(), scrape_handler: app_container.scrape_handler.clone(), in_memory_torrent_repository: app_container.in_memory_torrent_repository.clone(), keys_handler: app_container.keys_handler.clone(), @@ -84,6 +87,7 @@ impl Environment { config: self.config, database: self.database.clone(), tracker: self.tracker.clone(), + announce_handler: self.announce_handler.clone(), scrape_handler: self.scrape_handler.clone(), in_memory_torrent_repository: self.in_memory_torrent_repository.clone(), keys_handler: self.keys_handler.clone(), @@ -97,6 +101,7 @@ impl Environment { .server .start( self.tracker, + self.announce_handler, self.scrape_handler, self.authentication_service, self.whitelist_authorization, @@ -119,6 +124,7 @@ impl Environment { config: self.config, database: self.database, tracker: self.tracker, + announce_handler: self.announce_handler, scrape_handler: self.scrape_handler, in_memory_torrent_repository: self.in_memory_torrent_repository, keys_handler: self.keys_handler, diff --git a/tests/servers/udp/environment.rs b/tests/servers/udp/environment.rs index 09714146d..fafb7ef7a 100644 --- a/tests/servers/udp/environment.rs +++ b/tests/servers/udp/environment.rs @@ -5,6 +5,7 @@ use bittorrent_primitives::info_hash::InfoHash; use tokio::sync::RwLock; use torrust_tracker_configuration::{Configuration, UdpTracker, DEFAULT_TIMEOUT}; use torrust_tracker_lib::bootstrap::app::{initialize_app_container, initialize_global_services}; +use torrust_tracker_lib::core::announce_handler::AnnounceHandler; use torrust_tracker_lib::core::databases::Database; use torrust_tracker_lib::core::scrape_handler::ScrapeHandler; use torrust_tracker_lib::core::statistics::event::sender::Sender; @@ -26,6 +27,7 @@ where pub database: Arc>, pub tracker: Arc, pub in_memory_torrent_repository: Arc, + pub announce_handler: Arc, pub scrape_handler: Arc, pub whitelist_authorization: Arc, pub stats_event_sender: Arc>>, @@ -66,6 +68,7 @@ impl Environment { database: app_container.database.clone(), tracker: app_container.tracker.clone(), in_memory_torrent_repository: app_container.in_memory_torrent_repository.clone(), + announce_handler: app_container.announce_handler.clone(), scrape_handler: app_container.scrape_handler.clone(), whitelist_authorization: app_container.whitelist_authorization.clone(), stats_event_sender: app_container.stats_event_sender.clone(), @@ -84,6 +87,7 @@ impl Environment { database: self.database.clone(), tracker: self.tracker.clone(), in_memory_torrent_repository: self.in_memory_torrent_repository.clone(), + announce_handler: self.announce_handler.clone(), scrape_handler: self.scrape_handler.clone(), whitelist_authorization: self.whitelist_authorization.clone(), stats_event_sender: self.stats_event_sender.clone(), @@ -94,6 +98,7 @@ impl Environment { .server .start( self.tracker, + self.announce_handler, self.scrape_handler, self.whitelist_authorization, self.stats_event_sender, @@ -125,6 +130,7 @@ impl Environment { database: self.database, tracker: self.tracker, in_memory_torrent_repository: self.in_memory_torrent_repository, + announce_handler: self.announce_handler, scrape_handler: self.scrape_handler, whitelist_authorization: self.whitelist_authorization, stats_event_sender: self.stats_event_sender, From 209b52c0511a9b69e47a1a45f60310f5ae040723 Mon Sep 17 00:00:00 2001 From: Jose Celano Date: Mon, 27 Jan 2025 13:53:48 +0000 Subject: [PATCH 161/802] refactor: [#1207] inline methods --- src/app.rs | 6 +++--- src/core/mod.rs | 20 +------------------- 2 files changed, 4 insertions(+), 22 deletions(-) diff --git a/src/app.rs b/src/app.rs index 00414bc10..aafae5ebf 100644 --- a/src/app.rs +++ b/src/app.rs @@ -50,7 +50,7 @@ pub async fn start(config: &Configuration, app_container: &AppContainer) -> Vec< let registar = Registar::default(); // Load peer keys - if app_container.tracker.is_private() { + if config.core.private { app_container .keys_handler .load_keys_from_database() @@ -59,7 +59,7 @@ pub async fn start(config: &Configuration, app_container: &AppContainer) -> Vec< } // Load whitelisted torrents - if app_container.tracker.is_listed() { + if config.core.listed { app_container .whitelist_manager .load_whitelist_from_database() @@ -70,7 +70,7 @@ pub async fn start(config: &Configuration, app_container: &AppContainer) -> Vec< // Start the UDP blocks if let Some(udp_trackers) = &config.udp_trackers { for udp_tracker_config in udp_trackers { - if app_container.tracker.is_private() { + if config.core.private { tracing::warn!( "Could not start UDP tracker on: {} while in private mode. UDP is not safe for private trackers!", udp_tracker_config.bind_address diff --git a/src/core/mod.rs b/src/core/mod.rs index 2151ec1ef..ae728dc12 100644 --- a/src/core/mod.rs +++ b/src/core/mod.rs @@ -496,28 +496,10 @@ impl Tracker { }) } - /// Returns `true` is the tracker is in public mode. - #[must_use] - pub fn is_public(&self) -> bool { - !self.config.private - } - - /// Returns `true` is the tracker is in private mode. - #[must_use] - pub fn is_private(&self) -> bool { - self.config.private - } - - /// Returns `true` is the tracker is in whitelisted mode. - #[must_use] - pub fn is_listed(&self) -> bool { - self.config.listed - } - /// Returns `true` if the tracker requires authentication. #[must_use] pub fn requires_authentication(&self) -> bool { - self.is_private() + self.config.private } /// Returns `true` is the tracker is in whitelisted mode. From 2f74cafda7ee3cbc5c1030da448aca7664e8972c Mon Sep 17 00:00:00 2001 From: Jose Celano Date: Mon, 27 Jan 2025 14:08:01 +0000 Subject: [PATCH 162/802] chore: fix linting errors --- packages/configuration/src/lib.rs | 2 +- packages/configuration/src/v2_0_0/mod.rs | 12 ++++++------ packages/http-protocol/src/v1/requests/announce.rs | 4 ++-- packages/http-protocol/src/v1/requests/mod.rs | 3 --- packages/http-protocol/src/v1/responses/announce.rs | 2 +- packages/http-protocol/src/v1/responses/error.rs | 4 ++-- packages/http-protocol/src/v1/responses/mod.rs | 3 --- packages/http-protocol/src/v1/responses/scrape.rs | 2 +- src/core/mod.rs | 6 +++--- src/servers/http/v1/services/announce.rs | 5 ++--- 10 files changed, 18 insertions(+), 25 deletions(-) diff --git a/packages/configuration/src/lib.rs b/packages/configuration/src/lib.rs index 1ab3479fa..7e384297d 100644 --- a/packages/configuration/src/lib.rs +++ b/packages/configuration/src/lib.rs @@ -3,7 +3,7 @@ //! This module contains the configuration data structures for the //! Torrust Tracker, which is a `BitTorrent` tracker server. //! -//! The current version for configuration is [`v2`]. +//! The current version for configuration is [`v2_0_0`]. pub mod v2_0_0; pub mod validator; diff --git a/packages/configuration/src/v2_0_0/mod.rs b/packages/configuration/src/v2_0_0/mod.rs index 5067210bb..fd742d8d2 100644 --- a/packages/configuration/src/v2_0_0/mod.rs +++ b/packages/configuration/src/v2_0_0/mod.rs @@ -39,11 +39,11 @@ //! Please refer to the documentation of each structure for more information //! about each section. //! -//! - [`Core configuration`](crate::v2::Configuration) -//! - [`HTTP API configuration`](crate::v2::tracker_api::HttpApi) -//! - [`HTTP Tracker configuration`](crate::v2::http_tracker::HttpTracker) -//! - [`UDP Tracker configuration`](crate::v2::udp_tracker::UdpTracker) -//! - [`Health Check API configuration`](crate::v2::health_check_api::HealthCheckApi) +//! - [`Core configuration`](crate::v2_0_0::Configuration) +//! - [`HTTP API configuration`](crate::v2_0_0::tracker_api::HttpApi) +//! - [`HTTP Tracker configuration`](crate::v2_0_0::http_tracker::HttpTracker) +//! - [`UDP Tracker configuration`](crate::v2_0_0::udp_tracker::UdpTracker) +//! - [`Health Check API configuration`](crate::v2_0_0::health_check_api::HealthCheckApi) //! //! ## Port binding //! @@ -78,7 +78,7 @@ //! //! Alternatively, you could setup a reverse proxy like Nginx or Apache to //! handle the SSL/TLS part and forward the requests to the tracker. If you do -//! that, you should set [`on_reverse_proxy`](crate::v2::network::Network::on_reverse_proxy) +//! that, you should set [`on_reverse_proxy`](crate::v2_0_0::network::Network::on_reverse_proxy) //! to `true` in the configuration file. It's out of scope for this //! documentation to explain in detail how to setup a reverse proxy, but the //! configuration file should be something like this: diff --git a/packages/http-protocol/src/v1/requests/announce.rs b/packages/http-protocol/src/v1/requests/announce.rs index ea76771dd..9bde7ec13 100644 --- a/packages/http-protocol/src/v1/requests/announce.rs +++ b/packages/http-protocol/src/v1/requests/announce.rs @@ -185,8 +185,8 @@ impl fmt::Display for Event { /// Depending on the value of this param, the tracker will return a different /// response: /// -/// - [`Normal`](crate::servers::http::v1::responses::announce::Normal), i.e. a `non-compact` response. -/// - [`Compact`](crate::servers::http::v1::responses::announce::Compact) response. +/// - [`Normal`](crate::v1::responses::announce::Normal), i.e. a `non-compact` response. +/// - [`Compact`](crate::v1::responses::announce::Compact) response. /// /// Refer to [BEP 23. Tracker Returns Compact Peer Lists](https://www.bittorrent.org/beps/bep_0023.html) #[derive(PartialEq, Debug)] diff --git a/packages/http-protocol/src/v1/requests/mod.rs b/packages/http-protocol/src/v1/requests/mod.rs index ee34ca72a..d19bd78d3 100644 --- a/packages/http-protocol/src/v1/requests/mod.rs +++ b/packages/http-protocol/src/v1/requests/mod.rs @@ -1,6 +1,3 @@ //! HTTP requests for the HTTP tracker. -//! -//! Refer to the generic [HTTP server documentation](crate::servers::http) for -//! more information about the HTTP tracker. pub mod announce; pub mod scrape; diff --git a/packages/http-protocol/src/v1/responses/announce.rs b/packages/http-protocol/src/v1/responses/announce.rs index 3854c9f34..df187fdd1 100644 --- a/packages/http-protocol/src/v1/responses/announce.rs +++ b/packages/http-protocol/src/v1/responses/announce.rs @@ -1,4 +1,4 @@ -//! `Announce` response for the HTTP tracker [`announce`](bittorrent_http_protocol::v1::requests::announce::Announce) request. +//! `Announce` response for the HTTP tracker [`announce`](crate::v1::requests::announce::Announce) request. //! //! Data structures and logic to build the `announce` response. use std::io::Write; diff --git a/packages/http-protocol/src/v1/responses/error.rs b/packages/http-protocol/src/v1/responses/error.rs index 7516cd39e..f939ce298 100644 --- a/packages/http-protocol/src/v1/responses/error.rs +++ b/packages/http-protocol/src/v1/responses/error.rs @@ -1,4 +1,4 @@ -//! `Error` response for the [`HTTP tracker`](crate::servers::http). +//! `Error` response for the HTTP tracker. //! //! Data structures and logic to build the error responses. //! @@ -15,7 +15,7 @@ use serde::Serialize; use crate::v1::services::peer_ip_resolver::PeerIpResolutionError; -/// `Error` response for the [`HTTP tracker`](crate::servers::http). +/// `Error` response for the HTTP tracker. #[derive(Serialize, Debug, PartialEq)] pub struct Error { /// Human readable string which explains why the request failed. diff --git a/packages/http-protocol/src/v1/responses/mod.rs b/packages/http-protocol/src/v1/responses/mod.rs index 495b1eb84..e704d8908 100644 --- a/packages/http-protocol/src/v1/responses/mod.rs +++ b/packages/http-protocol/src/v1/responses/mod.rs @@ -1,7 +1,4 @@ //! HTTP responses for the HTTP tracker. -//! -//! Refer to the generic [HTTP server documentation](crate::servers::http) for -//! more information about the HTTP tracker. pub mod announce; pub mod error; pub mod scrape; diff --git a/packages/http-protocol/src/v1/responses/scrape.rs b/packages/http-protocol/src/v1/responses/scrape.rs index ee4c4155b..6b4dcc793 100644 --- a/packages/http-protocol/src/v1/responses/scrape.rs +++ b/packages/http-protocol/src/v1/responses/scrape.rs @@ -1,4 +1,4 @@ -//! `Scrape` response for the HTTP tracker [`scrape`](bittorrent_http_protocol::v1::requests::scrape::Scrape) request. +//! `Scrape` response for the HTTP tracker [`scrape`](crate::v1::requests::scrape::Scrape) request. //! //! Data structures and logic to build the `scrape` response. use std::borrow::Cow; diff --git a/src/core/mod.rs b/src/core/mod.rs index ae728dc12..d30c47c6d 100644 --- a/src/core/mod.rs +++ b/src/core/mod.rs @@ -52,7 +52,7 @@ //! The tracker responds to the peer with the list of other peers in the swarm so that //! the peer can contact them to start downloading pieces of the file from them. //! -//! Once you have instantiated the `Tracker` you can `announce` a new [`peer::Peer`] with: +//! Once you have instantiated the `AnnounceHandler` you can `announce` a new [`peer::Peer`](torrust_tracker_primitives::peer::Peer) with: //! //! ```rust,no_run //! use std::net::SocketAddr; @@ -81,7 +81,7 @@ //! ``` //! //! ```text -//! let announce_data = tracker.announce(&info_hash, &mut peer, &peer_ip).await; +//! let announce_data = announce_handler.announce(&info_hash, &mut peer, &peer_ip).await; //! ``` //! //! The `Tracker` returns the list of peers for the torrent with the infohash `3b245504cf5f11bbdbe1201cea6a6bf45aee1bc0`, @@ -306,7 +306,7 @@ //! `c1277613db1d28709b034a017ab2cae4be07ae10` is the torrent infohash and `completed` contains the number of peers //! that have a full version of the torrent data, also known as seeders. //! -//! Refer to [`peer`] module for more information about peers. +//! Refer to [`peer`](torrust_tracker_primitives::peer) for more information about peers. //! //! # Configuration //! diff --git a/src/servers/http/v1/services/announce.rs b/src/servers/http/v1/services/announce.rs index 2c88ebc60..1923037b3 100644 --- a/src/servers/http/v1/services/announce.rs +++ b/src/servers/http/v1/services/announce.rs @@ -2,9 +2,8 @@ //! //! The service is responsible for handling the `announce` requests. //! -//! It delegates the `announce` logic to the [`Tracker`](crate::core::Tracker::announce) -//! and it returns the [`AnnounceData`] returned -//! by the [`Tracker`]. +//! It delegates the `announce` logic to the [`AnnounceHandler`] and it returns +//! the [`AnnounceData`]. //! //! It also sends an [`statistics::event::Event`] //! because events are specific for the HTTP tracker. From 85a2a2969f29d1287da280f99d6f62b90dcb490e Mon Sep 17 00:00:00 2001 From: Jose Celano Date: Mon, 27 Jan 2025 16:37:06 +0000 Subject: [PATCH 163/802] refactor: [#1209] inline core Tracker methods --- src/app.rs | 4 + src/bootstrap/jobs/http_tracker.rs | 7 +- src/bootstrap/jobs/udp_tracker.rs | 4 +- src/core/mod.rs | 31 +------- src/servers/http/server.rs | 6 ++ src/servers/http/v1/handlers/announce.rs | 47 ++++++++---- src/servers/http/v1/handlers/scrape.rs | 77 ++++++++++++++++--- src/servers/http/v1/routes.rs | 8 +- src/servers/http/v1/services/announce.rs | 11 ++- src/servers/udp/handlers.rs | 96 ++++++++++++++++++++---- src/servers/udp/server/launcher.rs | 7 +- src/servers/udp/server/mod.rs | 2 + src/servers/udp/server/processor.rs | 6 ++ src/servers/udp/server/spawner.rs | 3 + src/servers/udp/server/states.rs | 3 + tests/servers/http/environment.rs | 15 ++-- tests/servers/http/v1/contract.rs | 10 +-- tests/servers/udp/environment.rs | 7 +- 18 files changed, 255 insertions(+), 89 deletions(-) diff --git a/src/app.rs b/src/app.rs index aafae5ebf..54ccbc60c 100644 --- a/src/app.rs +++ b/src/app.rs @@ -21,6 +21,8 @@ //! - UDP trackers: the user can enable multiple UDP tracker on several ports. //! - HTTP trackers: the user can enable multiple HTTP tracker on several ports. //! - Tracker REST API: the tracker API can be enabled/disabled. +use std::sync::Arc; + use tokio::task::JoinHandle; use torrust_tracker_configuration::Configuration; use tracing::instrument; @@ -78,6 +80,7 @@ pub async fn start(config: &Configuration, app_container: &AppContainer) -> Vec< } else { jobs.push( udp_tracker::start_job( + Arc::new(config.core.clone()), udp_tracker_config, app_container.tracker.clone(), app_container.announce_handler.clone(), @@ -100,6 +103,7 @@ pub async fn start(config: &Configuration, app_container: &AppContainer) -> Vec< for http_tracker_config in http_trackers { if let Some(job) = http_tracker::start_job( http_tracker_config, + Arc::new(config.core.clone()), app_container.tracker.clone(), app_container.announce_handler.clone(), app_container.scrape_handler.clone(), diff --git a/src/bootstrap/jobs/http_tracker.rs b/src/bootstrap/jobs/http_tracker.rs index 5da7da739..5767f30ce 100644 --- a/src/bootstrap/jobs/http_tracker.rs +++ b/src/bootstrap/jobs/http_tracker.rs @@ -15,7 +15,7 @@ use std::sync::Arc; use axum_server::tls_rustls::RustlsConfig; use tokio::task::JoinHandle; -use torrust_tracker_configuration::HttpTracker; +use torrust_tracker_configuration::{Core, HttpTracker}; use tracing::instrument; use super::make_rust_tls; @@ -49,6 +49,7 @@ use crate::servers::registar::ServiceRegistrationForm; ))] pub async fn start_job( config: &HttpTracker, + core_config: Arc, tracker: Arc, announce_handler: Arc, scrape_handler: Arc, @@ -69,6 +70,7 @@ pub async fn start_job( start_v1( socket, tls, + core_config.clone(), tracker.clone(), announce_handler.clone(), scrape_handler.clone(), @@ -97,6 +99,7 @@ pub async fn start_job( async fn start_v1( socket: SocketAddr, tls: Option, + config: Arc, tracker: Arc, announce_handler: Arc, scrape_handler: Arc, @@ -107,6 +110,7 @@ async fn start_v1( ) -> JoinHandle<()> { let server = HttpServer::new(Launcher::new(socket, tls)) .start( + config, tracker, announce_handler, scrape_handler, @@ -156,6 +160,7 @@ mod tests { start_job( config, + Arc::new(cfg.core.clone()), app_container.tracker, app_container.announce_handler, app_container.scrape_handler, diff --git a/src/bootstrap/jobs/udp_tracker.rs b/src/bootstrap/jobs/udp_tracker.rs index d43c1c930..36f3cd7b0 100644 --- a/src/bootstrap/jobs/udp_tracker.rs +++ b/src/bootstrap/jobs/udp_tracker.rs @@ -10,7 +10,7 @@ use std::sync::Arc; use tokio::sync::RwLock; use tokio::task::JoinHandle; -use torrust_tracker_configuration::UdpTracker; +use torrust_tracker_configuration::{Core, UdpTracker}; use tracing::instrument; use crate::core::announce_handler::AnnounceHandler; @@ -46,6 +46,7 @@ use crate::servers::udp::UDP_TRACKER_LOG_TARGET; form ))] pub async fn start_job( + core_config: Arc, config: &UdpTracker, tracker: Arc, announce_handler: Arc, @@ -60,6 +61,7 @@ pub async fn start_job( let server = Server::new(Spawner::new(bind_to)) .start( + core_config, tracker, announce_handler, scrape_handler, diff --git a/src/core/mod.rs b/src/core/mod.rs index d30c47c6d..43a2aa11d 100644 --- a/src/core/mod.rs +++ b/src/core/mod.rs @@ -451,12 +451,11 @@ pub mod whitelist; pub mod peer_tests; -use std::net::IpAddr; use std::sync::Arc; use torrent::repository::in_memory::InMemoryTorrentRepository; use torrent::repository::persisted::DatabasePersistentTorrentRepository; -use torrust_tracker_configuration::{AnnouncePolicy, Core}; +use torrust_tracker_configuration::Core; /// The domain layer tracker service. /// @@ -469,7 +468,7 @@ use torrust_tracker_configuration::{AnnouncePolicy, Core}; /// > the network layer. pub struct Tracker { /// The tracker configuration. - config: Core, + _core_config: Core, /// The in-memory torrents repository. _in_memory_torrent_repository: Arc, @@ -485,38 +484,16 @@ impl Tracker { /// /// Will return a `databases::error::Error` if unable to connect to database. The `Tracker` is responsible for the persistence. pub fn new( - config: &Core, + core_config: &Core, in_memory_torrent_repository: &Arc, db_torrent_repository: &Arc, ) -> Result { Ok(Tracker { - config: config.clone(), + _core_config: core_config.clone(), _in_memory_torrent_repository: in_memory_torrent_repository.clone(), _db_torrent_repository: db_torrent_repository.clone(), }) } - - /// Returns `true` if the tracker requires authentication. - #[must_use] - pub fn requires_authentication(&self) -> bool { - self.config.private - } - - /// Returns `true` is the tracker is in whitelisted mode. - #[must_use] - pub fn is_behind_reverse_proxy(&self) -> bool { - self.config.net.on_reverse_proxy - } - - #[must_use] - pub fn get_announce_policy(&self) -> AnnouncePolicy { - self.config.announce_policy - } - - #[must_use] - pub fn get_maybe_external_ip(&self) -> Option { - self.config.net.external_ip - } } #[cfg(test)] diff --git a/src/servers/http/server.rs b/src/servers/http/server.rs index 3bb49c1ad..3817882df 100644 --- a/src/servers/http/server.rs +++ b/src/servers/http/server.rs @@ -7,6 +7,7 @@ use axum_server::Handle; use derive_more::Constructor; use futures::future::BoxFuture; use tokio::sync::oneshot::{Receiver, Sender}; +use torrust_tracker_configuration::Core; use tracing::instrument; use super::v1::routes::router; @@ -59,6 +60,7 @@ impl Launcher { ))] fn start( &self, + config: Arc, tracker: Arc, announce_handler: Arc, scrape_handler: Arc, @@ -85,6 +87,7 @@ impl Launcher { tracing::info!(target: HTTP_TRACKER_LOG_TARGET, "Starting on: {protocol}://{}", address); let app = router( + config, tracker, announce_handler, scrape_handler, @@ -188,6 +191,7 @@ impl HttpServer { #[allow(clippy::too_many_arguments)] pub async fn start( self, + core_config: Arc, tracker: Arc, announce_handler: Arc, scrape_handler: Arc, @@ -203,6 +207,7 @@ impl HttpServer { let task = tokio::spawn(async move { let server = launcher.start( + core_config, tracker, announce_handler, scrape_handler, @@ -309,6 +314,7 @@ mod tests { let stopped = HttpServer::new(Launcher::new(bind_to, tls)); let started = stopped .start( + Arc::new(cfg.core.clone()), app_container.tracker, app_container.announce_handler, app_container.scrape_handler, diff --git a/src/servers/http/v1/handlers/announce.rs b/src/servers/http/v1/handlers/announce.rs index 39ddd1710..ebdb717c3 100644 --- a/src/servers/http/v1/handlers/announce.rs +++ b/src/servers/http/v1/handlers/announce.rs @@ -18,6 +18,7 @@ use bittorrent_http_protocol::v1::services::peer_ip_resolver; use bittorrent_http_protocol::v1::services::peer_ip_resolver::ClientIpSources; use hyper::StatusCode; use torrust_tracker_clock::clock::Time; +use torrust_tracker_configuration::Core; use torrust_tracker_primitives::core::AnnounceData; use torrust_tracker_primitives::peer; @@ -39,6 +40,7 @@ use crate::CurrentClock; #[allow(clippy::type_complexity)] pub async fn handle_without_key( State(state): State<( + Arc, Arc, Arc, Arc, @@ -56,6 +58,7 @@ pub async fn handle_without_key( &state.2, &state.3, &state.4, + &state.5, &announce_request, &client_ip_sources, None, @@ -69,6 +72,7 @@ pub async fn handle_without_key( #[allow(clippy::type_complexity)] pub async fn handle_with_key( State(state): State<( + Arc, Arc, Arc, Arc, @@ -87,6 +91,7 @@ pub async fn handle_with_key( &state.2, &state.3, &state.4, + &state.5, &announce_request, &client_ip_sources, Some(key), @@ -100,6 +105,7 @@ pub async fn handle_with_key( /// `unauthenticated` modes. #[allow(clippy::too_many_arguments)] async fn handle( + config: &Arc, tracker: &Arc, announce_handler: &Arc, authentication_service: &Arc, @@ -110,6 +116,7 @@ async fn handle( maybe_key: Option, ) -> Response { let announce_data = match handle_announce( + config, tracker, announce_handler, authentication_service, @@ -135,6 +142,7 @@ async fn handle( #[allow(clippy::too_many_arguments)] async fn handle_announce( + core_config: &Arc, tracker: &Arc, announce_handler: &Arc, authentication_service: &Arc, @@ -145,7 +153,7 @@ async fn handle_announce( maybe_key: Option, ) -> Result { // Authentication - if tracker.requires_authentication() { + if core_config.private { match maybe_key { Some(key) => match authentication_service.authenticate(&key).await { Ok(()) => (), @@ -165,7 +173,7 @@ async fn handle_announce( Err(error) => return Err(responses::error::Error::from(error)), } - let peer_ip = match peer_ip_resolver::invoke(tracker.is_behind_reverse_proxy(), client_ip_sources) { + let peer_ip = match peer_ip_resolver::invoke(core_config.net.on_reverse_proxy, client_ip_sources) { Ok(peer_ip) => peer_ip, Err(error) => return Err(responses::error::Error::from(error)), }; @@ -251,7 +259,7 @@ mod tests { use bittorrent_http_protocol::v1::responses; use bittorrent_http_protocol::v1::services::peer_ip_resolver::ClientIpSources; use bittorrent_primitives::info_hash::InfoHash; - use torrust_tracker_configuration::Configuration; + use torrust_tracker_configuration::{Configuration, Core}; use torrust_tracker_test_helpers::configuration; use crate::app_test::initialize_tracker_dependencies; @@ -262,6 +270,7 @@ mod tests { use crate::core::{whitelist, Tracker}; type TrackerAndDeps = ( + Arc, Arc, Arc, Arc>>, @@ -270,23 +279,23 @@ mod tests { ); fn private_tracker() -> TrackerAndDeps { - initialize_tracker_and_deps(&configuration::ephemeral_private()) + initialize_tracker_and_deps(configuration::ephemeral_private()) } fn whitelisted_tracker() -> TrackerAndDeps { - initialize_tracker_and_deps(&configuration::ephemeral_listed()) + initialize_tracker_and_deps(configuration::ephemeral_listed()) } fn tracker_on_reverse_proxy() -> TrackerAndDeps { - initialize_tracker_and_deps(&configuration::ephemeral_with_reverse_proxy()) + initialize_tracker_and_deps(configuration::ephemeral_with_reverse_proxy()) } fn tracker_not_on_reverse_proxy() -> TrackerAndDeps { - initialize_tracker_and_deps(&configuration::ephemeral_without_reverse_proxy()) + initialize_tracker_and_deps(configuration::ephemeral_without_reverse_proxy()) } /// Initialize tracker's dependencies and tracker. - fn initialize_tracker_and_deps(config: &Configuration) -> TrackerAndDeps { + fn initialize_tracker_and_deps(config: Configuration) -> TrackerAndDeps { let ( _database, _in_memory_whitelist, @@ -295,13 +304,13 @@ mod tests { in_memory_torrent_repository, db_torrent_repository, _torrents_manager, - ) = initialize_tracker_dependencies(config); + ) = initialize_tracker_dependencies(&config); let (stats_event_sender, _stats_repository) = statistics::setup::factory(config.core.tracker_usage_statistics); let stats_event_sender = Arc::new(stats_event_sender); let tracker = Arc::new(initialize_tracker( - config, + &config, &in_memory_torrent_repository, &db_torrent_repository, )); @@ -312,7 +321,10 @@ mod tests { &db_torrent_repository, )); + let config = Arc::new(config.core); + ( + config, tracker, announce_handler, stats_event_sender, @@ -361,7 +373,7 @@ mod tests { #[tokio::test] async fn it_should_fail_when_the_authentication_key_is_missing() { - let (tracker, announce_handler, stats_event_sender, whitelist_authorization, authentication_service) = + let (config, tracker, announce_handler, stats_event_sender, whitelist_authorization, authentication_service) = private_tracker(); let tracker = Arc::new(tracker); @@ -370,6 +382,7 @@ mod tests { let maybe_key = None; let response = handle_announce( + &config, &tracker, &announce_handler, &authentication_service, @@ -390,7 +403,7 @@ mod tests { #[tokio::test] async fn it_should_fail_when_the_authentication_key_is_invalid() { - let (tracker, announce_handler, stats_event_sender, whitelist_authorization, authentication_service) = + let (config, tracker, announce_handler, stats_event_sender, whitelist_authorization, authentication_service) = private_tracker(); let tracker = Arc::new(tracker); @@ -401,6 +414,7 @@ mod tests { let maybe_key = Some(unregistered_key); let response = handle_announce( + &config, &tracker, &announce_handler, &authentication_service, @@ -427,7 +441,7 @@ mod tests { #[tokio::test] async fn it_should_fail_when_the_announced_torrent_is_not_whitelisted() { - let (tracker, announce_handler, stats_event_sender, whitelist_authorization, authentication_service) = + let (config, tracker, announce_handler, stats_event_sender, whitelist_authorization, authentication_service) = whitelisted_tracker(); let tracker = Arc::new(tracker); @@ -436,6 +450,7 @@ mod tests { let announce_request = sample_announce_request(); let response = handle_announce( + &config, &tracker, &announce_handler, &authentication_service, @@ -470,7 +485,7 @@ mod tests { #[tokio::test] async fn it_should_fail_when_the_right_most_x_forwarded_for_header_ip_is_not_available() { - let (tracker, announce_handler, stats_event_sender, whitelist_authorization, authentication_service) = + let (config, tracker, announce_handler, stats_event_sender, whitelist_authorization, authentication_service) = tracker_on_reverse_proxy(); let tracker = Arc::new(tracker); @@ -482,6 +497,7 @@ mod tests { }; let response = handle_announce( + &config, &tracker, &announce_handler, &authentication_service, @@ -513,7 +529,7 @@ mod tests { #[tokio::test] async fn it_should_fail_when_the_client_ip_from_the_connection_info_is_not_available() { - let (tracker, announce_handler, stats_event_sender, whitelist_authorization, authentication_service) = + let (config, tracker, announce_handler, stats_event_sender, whitelist_authorization, authentication_service) = tracker_not_on_reverse_proxy(); let tracker = Arc::new(tracker); @@ -525,6 +541,7 @@ mod tests { }; let response = handle_announce( + &config, &tracker, &announce_handler, &authentication_service, diff --git a/src/servers/http/v1/handlers/scrape.rs b/src/servers/http/v1/handlers/scrape.rs index 3c19fe324..4f47a066f 100644 --- a/src/servers/http/v1/handlers/scrape.rs +++ b/src/servers/http/v1/handlers/scrape.rs @@ -13,6 +13,7 @@ use bittorrent_http_protocol::v1::requests::scrape::Scrape; use bittorrent_http_protocol::v1::responses; use bittorrent_http_protocol::v1::services::peer_ip_resolver::{self, ClientIpSources}; use hyper::StatusCode; +use torrust_tracker_configuration::Core; use torrust_tracker_primitives::core::ScrapeData; use crate::core::authentication::service::AuthenticationService; @@ -31,6 +32,7 @@ use crate::servers::http::v1::services; #[allow(clippy::type_complexity)] pub async fn handle_without_key( State(state): State<( + Arc, Arc, Arc, Arc, @@ -46,6 +48,7 @@ pub async fn handle_without_key( &state.1, &state.2, &state.3, + &state.4, &scrape_request, &client_ip_sources, None, @@ -61,6 +64,7 @@ pub async fn handle_without_key( #[allow(clippy::type_complexity)] pub async fn handle_with_key( State(state): State<( + Arc, Arc, Arc, Arc, @@ -77,6 +81,7 @@ pub async fn handle_with_key( &state.1, &state.2, &state.3, + &state.4, &scrape_request, &client_ip_sources, Some(key), @@ -84,7 +89,9 @@ pub async fn handle_with_key( .await } +#[allow(clippy::too_many_arguments)] async fn handle( + core_config: &Arc, tracker: &Arc, scrape_handler: &Arc, authentication_service: &Arc, @@ -94,6 +101,7 @@ async fn handle( maybe_key: Option, ) -> Response { let scrape_data = match handle_scrape( + core_config, tracker, scrape_handler, authentication_service, @@ -116,8 +124,10 @@ async fn handle( See https://github.com/torrust/torrust-tracker/discussions/240. */ +#[allow(clippy::too_many_arguments)] async fn handle_scrape( - tracker: &Arc, + core_config: &Arc, + _tracker: &Arc, scrape_handler: &Arc, authentication_service: &Arc, opt_stats_event_sender: &Arc>>, @@ -126,7 +136,7 @@ async fn handle_scrape( maybe_key: Option, ) -> Result { // Authentication - let return_real_scrape_data = if tracker.requires_authentication() { + let return_real_scrape_data = if core_config.private { match maybe_key { Some(key) => match authentication_service.authenticate(&key).await { Ok(()) => true, @@ -141,7 +151,7 @@ async fn handle_scrape( // Authorization for scrape requests is handled at the `Tracker` level // for each torrent. - let peer_ip = match peer_ip_resolver::invoke(tracker.is_behind_reverse_proxy(), client_ip_sources) { + let peer_ip = match peer_ip_resolver::invoke(core_config.net.on_reverse_proxy, client_ip_sources) { Ok(peer_ip) => peer_ip, Err(error) => return Err(responses::error::Error::from(error)), }; @@ -169,6 +179,7 @@ mod tests { use bittorrent_http_protocol::v1::responses; use bittorrent_http_protocol::v1::services::peer_ip_resolver::ClientIpSources; use bittorrent_primitives::info_hash::InfoHash; + use torrust_tracker_configuration::Core; use torrust_tracker_test_helpers::configuration; use crate::app_test::initialize_tracker_dependencies; @@ -179,6 +190,7 @@ mod tests { #[allow(clippy::type_complexity)] fn private_tracker() -> ( + Arc, Arc, Arc, Arc>>, @@ -200,6 +212,8 @@ mod tests { let stats_event_sender = Arc::new(stats_event_sender); + let core_config = Arc::new(config.core.clone()); + let tracker = Arc::new(initialize_tracker( &config, &in_memory_torrent_repository, @@ -208,11 +222,18 @@ mod tests { let scrape_handler = Arc::new(ScrapeHandler::new(&whitelist_authorization, &in_memory_torrent_repository)); - (tracker, scrape_handler, stats_event_sender, authentication_service) + ( + core_config, + tracker, + scrape_handler, + stats_event_sender, + authentication_service, + ) } #[allow(clippy::type_complexity)] fn whitelisted_tracker() -> ( + Arc, Arc, Arc, Arc>>, @@ -234,6 +255,8 @@ mod tests { let stats_event_sender = Arc::new(stats_event_sender); + let core_config = Arc::new(config.core.clone()); + let tracker = Arc::new(initialize_tracker( &config, &in_memory_torrent_repository, @@ -242,11 +265,18 @@ mod tests { let scrape_handler = Arc::new(ScrapeHandler::new(&whitelist_authorization, &in_memory_torrent_repository)); - (tracker, scrape_handler, stats_event_sender, authentication_service) + ( + core_config, + tracker, + scrape_handler, + stats_event_sender, + authentication_service, + ) } #[allow(clippy::type_complexity)] fn tracker_on_reverse_proxy() -> ( + Arc, Arc, Arc, Arc>>, @@ -268,6 +298,8 @@ mod tests { let stats_event_sender = Arc::new(stats_event_sender); + let core_config = Arc::new(config.core.clone()); + let tracker = Arc::new(initialize_tracker( &config, &in_memory_torrent_repository, @@ -276,11 +308,18 @@ mod tests { let scrape_handler = Arc::new(ScrapeHandler::new(&whitelist_authorization, &in_memory_torrent_repository)); - (tracker, scrape_handler, stats_event_sender, authentication_service) + ( + core_config, + tracker, + scrape_handler, + stats_event_sender, + authentication_service, + ) } #[allow(clippy::type_complexity)] fn tracker_not_on_reverse_proxy() -> ( + Arc, Arc, Arc, Arc>>, @@ -302,6 +341,8 @@ mod tests { let stats_event_sender = Arc::new(stats_event_sender); + let core_config = Arc::new(config.core.clone()); + let tracker = Arc::new(initialize_tracker( &config, &in_memory_torrent_repository, @@ -310,7 +351,13 @@ mod tests { let scrape_handler = Arc::new(ScrapeHandler::new(&whitelist_authorization, &in_memory_torrent_repository)); - (tracker, scrape_handler, stats_event_sender, authentication_service) + ( + core_config, + tracker, + scrape_handler, + stats_event_sender, + authentication_service, + ) } fn sample_scrape_request() -> Scrape { @@ -344,12 +391,13 @@ mod tests { #[tokio::test] async fn it_should_return_zeroed_swarm_metadata_when_the_authentication_key_is_missing() { - let (tracker, scrape_handler, stats_event_sender, authentication_service) = private_tracker(); + let (core_config, tracker, scrape_handler, stats_event_sender, authentication_service) = private_tracker(); let scrape_request = sample_scrape_request(); let maybe_key = None; let scrape_data = handle_scrape( + &core_config, &tracker, &scrape_handler, &authentication_service, @@ -368,13 +416,14 @@ mod tests { #[tokio::test] async fn it_should_return_zeroed_swarm_metadata_when_the_authentication_key_is_invalid() { - let (tracker, scrape_handler, stats_event_sender, authentication_service) = private_tracker(); + let (core_config, tracker, scrape_handler, stats_event_sender, authentication_service) = private_tracker(); let scrape_request = sample_scrape_request(); let unregistered_key = authentication::Key::from_str("YZSl4lMZupRuOpSRC3krIKR5BPB14nrJ").unwrap(); let maybe_key = Some(unregistered_key); let scrape_data = handle_scrape( + &core_config, &tracker, &scrape_handler, &authentication_service, @@ -401,11 +450,12 @@ mod tests { #[tokio::test] async fn it_should_return_zeroed_swarm_metadata_when_the_torrent_is_not_whitelisted() { - let (tracker, scrape_handler, stats_event_sender, authentication_service) = whitelisted_tracker(); + let (core_config, tracker, scrape_handler, stats_event_sender, authentication_service) = whitelisted_tracker(); let scrape_request = sample_scrape_request(); let scrape_data = handle_scrape( + &core_config, &tracker, &scrape_handler, &authentication_service, @@ -433,7 +483,7 @@ mod tests { #[tokio::test] async fn it_should_fail_when_the_right_most_x_forwarded_for_header_ip_is_not_available() { - let (tracker, scrape_handler, stats_event_sender, authentication_service) = tracker_on_reverse_proxy(); + let (core_config, tracker, scrape_handler, stats_event_sender, authentication_service) = tracker_on_reverse_proxy(); let client_ip_sources = ClientIpSources { right_most_x_forwarded_for: None, @@ -441,6 +491,7 @@ mod tests { }; let response = handle_scrape( + &core_config, &tracker, &scrape_handler, &authentication_service, @@ -469,7 +520,8 @@ mod tests { #[tokio::test] async fn it_should_fail_when_the_client_ip_from_the_connection_info_is_not_available() { - let (tracker, scrape_handler, stats_event_sender, authentication_service) = tracker_not_on_reverse_proxy(); + let (core_config, tracker, scrape_handler, stats_event_sender, authentication_service) = + tracker_not_on_reverse_proxy(); let client_ip_sources = ClientIpSources { right_most_x_forwarded_for: None, @@ -477,6 +529,7 @@ mod tests { }; let response = handle_scrape( + &core_config, &tracker, &scrape_handler, &authentication_service, diff --git a/src/servers/http/v1/routes.rs b/src/servers/http/v1/routes.rs index 50e1494be..85564ca8c 100644 --- a/src/servers/http/v1/routes.rs +++ b/src/servers/http/v1/routes.rs @@ -10,7 +10,7 @@ use axum::routing::get; use axum::{BoxError, Router}; use axum_client_ip::SecureClientIpSource; use hyper::{Request, StatusCode}; -use torrust_tracker_configuration::DEFAULT_TIMEOUT; +use torrust_tracker_configuration::{Core, DEFAULT_TIMEOUT}; use tower::timeout::TimeoutLayer; use tower::ServiceBuilder; use tower_http::classify::ServerErrorsFailureClass; @@ -34,6 +34,7 @@ use crate::servers::logging::Latency; /// /// > **NOTICE**: it's added a layer to get the client IP from the connection /// > info. The tracker could use the connection info to get the client IP. +#[allow(clippy::too_many_arguments)] #[allow(clippy::needless_pass_by_value)] #[instrument(skip( tracker, @@ -45,6 +46,7 @@ use crate::servers::logging::Latency; server_socket_addr ))] pub fn router( + core_config: Arc, tracker: Arc, announce_handler: Arc, scrape_handler: Arc, @@ -60,6 +62,7 @@ pub fn router( .route( "/announce", get(announce::handle_without_key).with_state(( + core_config.clone(), tracker.clone(), announce_handler.clone(), authentication_service.clone(), @@ -70,6 +73,7 @@ pub fn router( .route( "/announce/{key}", get(announce::handle_with_key).with_state(( + core_config.clone(), tracker.clone(), announce_handler.clone(), authentication_service.clone(), @@ -81,6 +85,7 @@ pub fn router( .route( "/scrape", get(scrape::handle_without_key).with_state(( + core_config.clone(), tracker.clone(), scrape_handler.clone(), authentication_service.clone(), @@ -90,6 +95,7 @@ pub fn router( .route( "/scrape/{key}", get(scrape::handle_with_key).with_state(( + core_config.clone(), tracker.clone(), scrape_handler.clone(), authentication_service.clone(), diff --git a/src/servers/http/v1/services/announce.rs b/src/servers/http/v1/services/announce.rs index 1923037b3..5e2e8f716 100644 --- a/src/servers/http/v1/services/announce.rs +++ b/src/servers/http/v1/services/announce.rs @@ -63,6 +63,7 @@ mod tests { use aquatic_udp_protocol::{AnnounceEvent, NumberOfBytes, PeerId}; use bittorrent_primitives::info_hash::InfoHash; + use torrust_tracker_configuration::Core; use torrust_tracker_primitives::{peer, DurationSinceUnixEpoch}; use torrust_tracker_test_helpers::configuration; @@ -73,7 +74,7 @@ mod tests { use crate::core::Tracker; #[allow(clippy::type_complexity)] - fn public_tracker() -> (Arc, Arc, Arc>>) { + fn public_tracker() -> (Arc, Arc, Arc, Arc>>) { let config = configuration::ephemeral_public(); let ( @@ -100,7 +101,9 @@ mod tests { &db_torrent_repository, )); - (tracker, announce_handler, stats_event_sender) + let core_config = Arc::new(config.core.clone()); + + (core_config, tracker, announce_handler, stats_event_sender) } fn sample_info_hash() -> InfoHash { @@ -176,7 +179,7 @@ mod tests { #[tokio::test] async fn it_should_return_the_announce_data() { - let (tracker, announce_handler, stats_event_sender) = public_tracker(); + let (core_config, tracker, announce_handler, stats_event_sender) = public_tracker(); let mut peer = sample_peer(); @@ -197,7 +200,7 @@ mod tests { complete: 1, incomplete: 0, }, - policy: tracker.get_announce_policy(), + policy: core_config.announce_policy, }; assert_eq!(announce_data, expected_announce_data); diff --git a/src/servers/udp/handlers.rs b/src/servers/udp/handlers.rs index 03a0248d4..b3ec1cb06 100644 --- a/src/servers/udp/handlers.rs +++ b/src/servers/udp/handlers.rs @@ -13,6 +13,7 @@ use aquatic_udp_protocol::{ use bittorrent_primitives::info_hash::InfoHash; use tokio::sync::RwLock; use torrust_tracker_clock::clock::Time as _; +use torrust_tracker_configuration::Core; use tracing::{instrument, Level}; use uuid::Uuid; use zerocopy::network_endian::I32; @@ -60,6 +61,7 @@ impl CookieTimeValues { #[instrument(fields(request_id), skip(udp_request, tracker, announce_handler, scrape_handler, whitelist_authorization, opt_stats_event_sender, cookie_time_values, ban_service), ret(level = Level::TRACE))] pub(crate) async fn handle_packet( udp_request: RawRequest, + core_config: &Arc, tracker: &Tracker, announce_handler: &Arc, scrape_handler: &Arc, @@ -81,6 +83,7 @@ pub(crate) async fn handle_packet( Ok(request) => match handle_request( request, udp_request.from, + core_config, tracker, announce_handler, scrape_handler, @@ -154,6 +157,7 @@ pub(crate) async fn handle_packet( pub async fn handle_request( request: Request, remote_addr: SocketAddr, + core_config: &Arc, tracker: &Tracker, announce_handler: &Arc, scrape_handler: &Arc, @@ -175,6 +179,7 @@ pub async fn handle_request( handle_announce( remote_addr, &announce_request, + core_config, tracker, announce_handler, whitelist_authorization, @@ -240,11 +245,13 @@ pub async fn handle_connect( /// # Errors /// /// If a error happens in the `handle_announce` function, it will just return the `ServerError`. -#[instrument(fields(transaction_id, connection_id, info_hash), skip(tracker, announce_handler, whitelist_authorization, opt_stats_event_sender), ret(level = Level::TRACE))] +#[allow(clippy::too_many_arguments)] +#[instrument(fields(transaction_id, connection_id, info_hash), skip(_tracker, announce_handler, whitelist_authorization, opt_stats_event_sender), ret(level = Level::TRACE))] pub async fn handle_announce( remote_addr: SocketAddr, request: &AnnounceRequest, - tracker: &Tracker, + core_config: &Arc, + _tracker: &Tracker, announce_handler: &Arc, whitelist_authorization: &Arc, opt_stats_event_sender: &Arc>>, @@ -297,7 +304,7 @@ pub async fn handle_announce( let announce_response = AnnounceResponse { fixed: AnnounceResponseFixedData { transaction_id: request.transaction_id, - announce_interval: AnnounceInterval(I32::new(i64::from(tracker.get_announce_policy().interval) as i32)), + announce_interval: AnnounceInterval(I32::new(i64::from(core_config.announce_policy.interval) as i32)), leechers: NumberOfPeers(I32::new(i64::from(response.stats.incomplete) as i32)), seeders: NumberOfPeers(I32::new(i64::from(response.stats.complete) as i32)), }, @@ -322,7 +329,7 @@ pub async fn handle_announce( let announce_response = AnnounceResponse { fixed: AnnounceResponseFixedData { transaction_id: request.transaction_id, - announce_interval: AnnounceInterval(I32::new(i64::from(tracker.get_announce_policy().interval) as i32)), + announce_interval: AnnounceInterval(I32::new(i64::from(core_config.announce_policy.interval) as i32)), leechers: NumberOfPeers(I32::new(i64::from(response.stats.incomplete) as i32)), seeders: NumberOfPeers(I32::new(i64::from(response.stats.complete) as i32)), }, @@ -492,7 +499,7 @@ mod tests { use aquatic_udp_protocol::{NumberOfBytes, PeerId}; use torrust_tracker_clock::clock::Time; - use torrust_tracker_configuration::Configuration; + use torrust_tracker_configuration::{Configuration, Core}; use torrust_tracker_primitives::peer; use torrust_tracker_test_helpers::configuration; @@ -509,6 +516,7 @@ mod tests { use crate::CurrentClock; type TrackerAndDeps = ( + Arc, Arc, Arc, Arc, @@ -536,6 +544,8 @@ mod tests { } fn initialize_tracker_and_deps(config: &Configuration) -> TrackerAndDeps { + let core_config = Arc::new(config.core.clone()); + let ( database, in_memory_whitelist, @@ -565,6 +575,7 @@ mod tests { let scrape_handler = Arc::new(ScrapeHandler::new(&whitelist_authorization, &in_memory_torrent_repository)); ( + core_config, tracker, announce_handler, scrape_handler, @@ -670,7 +681,9 @@ mod tests { } } + #[allow(clippy::type_complexity)] fn test_tracker_factory() -> ( + Arc, Arc, Arc, Arc, @@ -678,6 +691,8 @@ mod tests { ) { let config = tracker_configuration(); + let core_config = Arc::new(config.core.clone()); + let ( _database, _in_memory_whitelist, @@ -698,7 +713,13 @@ mod tests { let scrape_handler = Arc::new(ScrapeHandler::new(&whitelist_authorization, &in_memory_torrent_repository)); - (tracker, announce_handler, scrape_handler, whitelist_authorization) + ( + core_config, + tracker, + announce_handler, + scrape_handler, + whitelist_authorization, + ) } mod connect_request { @@ -910,6 +931,7 @@ mod tests { PeerId as AquaticPeerId, Response, ResponsePeer, }; use mockall::predicate::eq; + use torrust_tracker_configuration::Core; use crate::core::announce_handler::AnnounceHandler; use crate::core::torrent::repository::in_memory::InMemoryTorrentRepository; @@ -925,6 +947,7 @@ mod tests { #[tokio::test] async fn an_announced_peer_should_be_added_to_the_tracker() { let ( + core_config, tracker, announce_handler, _scrape_handler, @@ -953,6 +976,7 @@ mod tests { handle_announce( remote_addr, &request, + &core_config, &tracker, &announce_handler, &whitelist_authorization, @@ -975,6 +999,7 @@ mod tests { #[tokio::test] async fn the_announced_peer_should_not_be_included_in_the_response() { let ( + core_config, tracker, announce_handler, _scrape_handler, @@ -994,6 +1019,7 @@ mod tests { let response = handle_announce( remote_addr, &request, + &core_config, &tracker, &announce_handler, &whitelist_authorization, @@ -1025,6 +1051,7 @@ mod tests { // "Do note that most trackers will only honor the IP address field under limited circumstances." let ( + core_config, tracker, announce_handler, _scrape_handler, @@ -1056,6 +1083,7 @@ mod tests { handle_announce( remote_addr, &request, + &core_config, &tracker, &announce_handler, &whitelist_authorization, @@ -1087,6 +1115,7 @@ mod tests { } async fn announce_a_new_peer_using_ipv4( + core_config: Arc, tracker: Arc, announce_handler: Arc, whitelist_authorization: Arc, @@ -1102,6 +1131,7 @@ mod tests { handle_announce( remote_addr, &request, + &core_config, &tracker, &announce_handler, &whitelist_authorization, @@ -1115,6 +1145,7 @@ mod tests { #[tokio::test] async fn when_the_announce_request_comes_from_a_client_using_ipv4_the_response_should_not_include_peers_using_ipv6() { let ( + core_config, tracker, announce_handler, _scrape_handler, @@ -1127,8 +1158,13 @@ mod tests { add_a_torrent_peer_using_ipv6(&in_memory_torrent_repository); - let response = - announce_a_new_peer_using_ipv4(tracker.clone(), announce_handler.clone(), whitelist_authorization).await; + let response = announce_a_new_peer_using_ipv4( + core_config.clone(), + tracker.clone(), + announce_handler.clone(), + whitelist_authorization, + ) + .await; // The response should not contain the peer using IPV6 let peers: Option>> = match response { @@ -1150,11 +1186,12 @@ mod tests { let stats_event_sender: Arc>> = Arc::new(Some(Box::new(stats_event_sender_mock))); - let (tracker, announce_handler, _scrape_handler, whitelist_authorization) = test_tracker_factory(); + let (core_config, tracker, announce_handler, _scrape_handler, whitelist_authorization) = test_tracker_factory(); handle_announce( sample_ipv4_socket_address(), &AnnounceRequestBuilder::default().into(), + &core_config, &tracker, &announce_handler, &whitelist_authorization, @@ -1181,6 +1218,7 @@ mod tests { #[tokio::test] async fn the_peer_ip_should_be_changed_to_the_external_ip_in_the_tracker_configuration_if_defined() { let ( + core_config, tracker, announce_handler, _scrape_handler, @@ -1209,6 +1247,7 @@ mod tests { handle_announce( remote_addr, &request, + &core_config, &tracker, &announce_handler, &whitelist_authorization, @@ -1220,7 +1259,7 @@ mod tests { let peers = in_memory_torrent_repository.get_torrent_peers(&info_hash.0.into()); - let external_ip_in_tracker_configuration = tracker.get_maybe_external_ip().unwrap(); + let external_ip_in_tracker_configuration = core_config.net.external_ip.unwrap(); let expected_peer = TorrentPeerBuilder::new() .with_peer_id(peer_id) @@ -1243,6 +1282,7 @@ mod tests { PeerId as AquaticPeerId, Response, ResponsePeer, }; use mockall::predicate::eq; + use torrust_tracker_configuration::Core; use crate::core::announce_handler::AnnounceHandler; use crate::core::torrent::repository::in_memory::InMemoryTorrentRepository; @@ -1258,6 +1298,7 @@ mod tests { #[tokio::test] async fn an_announced_peer_should_be_added_to_the_tracker() { let ( + core_config, tracker, announce_handler, _scrape_handler, @@ -1287,6 +1328,7 @@ mod tests { handle_announce( remote_addr, &request, + &core_config, &tracker, &announce_handler, &whitelist_authorization, @@ -1309,6 +1351,7 @@ mod tests { #[tokio::test] async fn the_announced_peer_should_not_be_included_in_the_response() { let ( + core_config, tracker, announce_handler, _scrape_handler, @@ -1331,6 +1374,7 @@ mod tests { let response = handle_announce( remote_addr, &request, + &core_config, &tracker, &announce_handler, &whitelist_authorization, @@ -1362,6 +1406,7 @@ mod tests { // "Do note that most trackers will only honor the IP address field under limited circumstances." let ( + core_config, tracker, announce_handler, _scrape_handler, @@ -1393,6 +1438,7 @@ mod tests { handle_announce( remote_addr, &request, + &core_config, &tracker, &announce_handler, &whitelist_authorization, @@ -1424,6 +1470,7 @@ mod tests { } async fn announce_a_new_peer_using_ipv6( + core_config: Arc, tracker: Arc, announce_handler: Arc, whitelist_authorization: Arc, @@ -1442,6 +1489,7 @@ mod tests { handle_announce( remote_addr, &request, + &core_config, &tracker, &announce_handler, &whitelist_authorization, @@ -1455,6 +1503,7 @@ mod tests { #[tokio::test] async fn when_the_announce_request_comes_from_a_client_using_ipv6_the_response_should_not_include_peers_using_ipv4() { let ( + core_config, tracker, announce_handler, _scrape_handler, @@ -1467,8 +1516,13 @@ mod tests { add_a_torrent_peer_using_ipv4(&in_memory_torrent_repository); - let response = - announce_a_new_peer_using_ipv6(tracker.clone(), announce_handler.clone(), whitelist_authorization).await; + let response = announce_a_new_peer_using_ipv6( + core_config.clone(), + tracker.clone(), + announce_handler.clone(), + whitelist_authorization, + ) + .await; // The response should not contain the peer using IPV4 let peers: Option>> = match response { @@ -1490,7 +1544,7 @@ mod tests { let stats_event_sender: Arc>> = Arc::new(Some(Box::new(stats_event_sender_mock))); - let (tracker, announce_handler, _scrape_handler, whitelist_authorization) = test_tracker_factory(); + let (core_config, tracker, announce_handler, _scrape_handler, whitelist_authorization) = test_tracker_factory(); let remote_addr = sample_ipv6_remote_addr(); @@ -1501,6 +1555,7 @@ mod tests { handle_announce( remote_addr, &announce_request, + &core_config, &tracker, &announce_handler, &whitelist_authorization, @@ -1582,9 +1637,12 @@ mod tests { .with_port(client_port) .into(); + let core_config = Arc::new(config.core.clone()); + handle_announce( remote_addr, &request, + &core_config, &tracker, &announce_handler, &whitelist_authorization, @@ -1596,7 +1654,7 @@ mod tests { let peers = in_memory_torrent_repository.get_torrent_peers(&info_hash.0.into()); - let external_ip_in_tracker_configuration = tracker.get_maybe_external_ip().unwrap(); + let external_ip_in_tracker_configuration = core_config.net.external_ip.unwrap(); assert!(external_ip_in_tracker_configuration.is_ipv6()); @@ -1641,6 +1699,7 @@ mod tests { #[tokio::test] async fn should_return_no_stats_when_the_tracker_does_not_have_any_torrent() { let ( + _core_config, _tracker, _announce_handler, scrape_handler, @@ -1750,6 +1809,7 @@ mod tests { #[tokio::test] async fn should_return_torrent_statistics_when_the_tracker_has_the_requested_torrent() { let ( + _core_config, _tracker, _announce_handler, scrape_handler, @@ -1786,6 +1846,7 @@ mod tests { #[tokio::test] async fn should_return_the_torrent_statistics_when_the_requested_torrent_is_whitelisted() { let ( + _core_config, _tracker, _announce_handler, scrape_handler, @@ -1830,6 +1891,7 @@ mod tests { #[tokio::test] async fn should_return_zeroed_statistics_when_the_requested_torrent_is_not_whitelisted() { let ( + _core_config, _tracker, _announce_handler, scrape_handler, @@ -1903,7 +1965,8 @@ mod tests { let remote_addr = sample_ipv4_remote_addr(); - let (_tracker, _announce_handler, scrape_handler, _whitelist_authorization) = test_tracker_factory(); + let (_core_config, _tracker, _announce_handler, scrape_handler, _whitelist_authorization) = + test_tracker_factory(); handle_scrape( remote_addr, @@ -1943,7 +2006,8 @@ mod tests { let remote_addr = sample_ipv6_remote_addr(); - let (_tracker, _announce_handler, scrape_handler, _whitelist_authorization) = test_tracker_factory(); + let (_core_config, _tracker, _announce_handler, scrape_handler, _whitelist_authorization) = + test_tracker_factory(); handle_scrape( remote_addr, diff --git a/src/servers/udp/server/launcher.rs b/src/servers/udp/server/launcher.rs index f1d0e4859..d0ae14029 100644 --- a/src/servers/udp/server/launcher.rs +++ b/src/servers/udp/server/launcher.rs @@ -8,6 +8,7 @@ use futures_util::StreamExt; use tokio::select; use tokio::sync::{oneshot, RwLock}; use tokio::time::interval; +use torrust_tracker_configuration::Core; use tracing::instrument; use super::banning::BanService; @@ -55,6 +56,7 @@ impl Launcher { rx_halt ))] pub async fn run_with_graceful_shutdown( + core_config: Arc, tracker: Arc, announce_handler: Arc, scrape_handler: Arc, @@ -68,7 +70,7 @@ impl Launcher { ) { tracing::info!(target: UDP_TRACKER_LOG_TARGET, "Starting on: {bind_to}"); - if tracker.requires_authentication() { + if core_config.private { tracing::error!("udp services cannot be used for private trackers"); panic!("it should not use udp if using authentication"); } @@ -100,6 +102,7 @@ impl Launcher { tracing::debug!(target: UDP_TRACKER_LOG_TARGET, local_addr, "Udp::run_with_graceful_shutdown::task (listening...)"); let () = Self::run_udp_server_main( receiver, + core_config.clone(), tracker.clone(), announce_handler.clone(), scrape_handler.clone(), @@ -157,6 +160,7 @@ impl Launcher { ))] async fn run_udp_server_main( mut receiver: Receiver, + core_config: Arc, tracker: Arc, announce_handler: Arc, scrape_handler: Arc, @@ -230,6 +234,7 @@ impl Launcher { let processor = Processor::new( receiver.socket.clone(), + core_config.clone(), tracker.clone(), announce_handler.clone(), scrape_handler.clone(), diff --git a/src/servers/udp/server/mod.rs b/src/servers/udp/server/mod.rs index 53ba588d4..f93d84a65 100644 --- a/src/servers/udp/server/mod.rs +++ b/src/servers/udp/server/mod.rs @@ -82,6 +82,7 @@ mod tests { let started = stopped .start( + Arc::new(cfg.core.clone()), app_container.tracker, app_container.announce_handler, app_container.scrape_handler, @@ -117,6 +118,7 @@ mod tests { let started = stopped .start( + Arc::new(cfg.core.clone()), app_container.tracker, app_container.announce_handler, app_container.scrape_handler, diff --git a/src/servers/udp/server/processor.rs b/src/servers/udp/server/processor.rs index 4cecbc36a..0bb7c92c4 100644 --- a/src/servers/udp/server/processor.rs +++ b/src/servers/udp/server/processor.rs @@ -6,6 +6,7 @@ use std::time::Duration; use aquatic_udp_protocol::Response; use tokio::sync::RwLock; use tokio::time::Instant; +use torrust_tracker_configuration::Core; use tracing::{instrument, Level}; use super::banning::BanService; @@ -20,6 +21,7 @@ use crate::servers::udp::{handlers, RawRequest}; pub struct Processor { socket: Arc, + core_config: Arc, tracker: Arc, announce_handler: Arc, scrape_handler: Arc, @@ -29,8 +31,10 @@ pub struct Processor { } impl Processor { + #[allow(clippy::too_many_arguments)] pub fn new( socket: Arc, + core_config: Arc, tracker: Arc, announce_handler: Arc, scrape_handler: Arc, @@ -40,6 +44,7 @@ impl Processor { ) -> Self { Self { socket, + core_config, tracker, announce_handler, scrape_handler, @@ -57,6 +62,7 @@ impl Processor { let response = handlers::handle_packet( request, + &self.core_config, &self.tracker, &self.announce_handler, &self.scrape_handler, diff --git a/src/servers/udp/server/spawner.rs b/src/servers/udp/server/spawner.rs index ea12b1c0b..ced5fbf4a 100644 --- a/src/servers/udp/server/spawner.rs +++ b/src/servers/udp/server/spawner.rs @@ -7,6 +7,7 @@ use derive_more::derive::Display; use derive_more::Constructor; use tokio::sync::{oneshot, RwLock}; use tokio::task::JoinHandle; +use torrust_tracker_configuration::Core; use super::banning::BanService; use super::launcher::Launcher; @@ -32,6 +33,7 @@ impl Spawner { #[allow(clippy::too_many_arguments)] pub fn spawn_launcher( &self, + core_config: Arc, tracker: Arc, announce_handler: Arc, scrape_handler: Arc, @@ -46,6 +48,7 @@ impl Spawner { tokio::spawn(async move { Launcher::run_with_graceful_shutdown( + core_config, tracker, announce_handler, scrape_handler, diff --git a/src/servers/udp/server/states.rs b/src/servers/udp/server/states.rs index bab04fdcc..4d63dc0a8 100644 --- a/src/servers/udp/server/states.rs +++ b/src/servers/udp/server/states.rs @@ -7,6 +7,7 @@ use derive_more::derive::Display; use derive_more::Constructor; use tokio::sync::RwLock; use tokio::task::JoinHandle; +use torrust_tracker_configuration::Core; use tracing::{instrument, Level}; use super::banning::BanService; @@ -70,6 +71,7 @@ impl Server { #[instrument(skip(self, tracker, announce_handler, scrape_handler, whitelist_authorization, opt_stats_event_sender, ban_service, form), err, ret(Display, level = Level::INFO))] pub async fn start( self, + core_config: Arc, tracker: Arc, announce_handler: Arc, scrape_handler: Arc, @@ -86,6 +88,7 @@ impl Server { // May need to wrap in a task to about a tokio bug. let task = self.state.spawner.spawn_launcher( + core_config, tracker, announce_handler, scrape_handler, diff --git a/tests/servers/http/environment.rs b/tests/servers/http/environment.rs index 78051cbbb..203dc880e 100644 --- a/tests/servers/http/environment.rs +++ b/tests/servers/http/environment.rs @@ -2,7 +2,7 @@ use std::sync::Arc; use bittorrent_primitives::info_hash::InfoHash; use futures::executor::block_on; -use torrust_tracker_configuration::{Configuration, HttpTracker}; +use torrust_tracker_configuration::{Configuration, Core, HttpTracker}; use torrust_tracker_lib::bootstrap::app::{initialize_app_container, initialize_global_services}; use torrust_tracker_lib::bootstrap::jobs::make_rust_tls; use torrust_tracker_lib::core::announce_handler::AnnounceHandler; @@ -20,7 +20,8 @@ use torrust_tracker_lib::servers::registar::Registar; use torrust_tracker_primitives::peer; pub struct Environment { - pub config: Arc, + pub core_config: Arc, + pub http_tracker_config: Arc, pub database: Arc>, pub tracker: Arc, pub announce_handler: Arc, @@ -64,7 +65,8 @@ impl Environment { let server = HttpServer::new(Launcher::new(bind_to, tls)); Self { - config, + http_tracker_config: config, + core_config: Arc::new(configuration.core.clone()), database: app_container.database.clone(), tracker: app_container.tracker.clone(), announce_handler: app_container.announce_handler.clone(), @@ -84,7 +86,8 @@ impl Environment { #[allow(dead_code)] pub async fn start(self) -> Environment { Environment { - config: self.config, + http_tracker_config: self.http_tracker_config, + core_config: self.core_config.clone(), database: self.database.clone(), tracker: self.tracker.clone(), announce_handler: self.announce_handler.clone(), @@ -100,6 +103,7 @@ impl Environment { server: self .server .start( + self.core_config, self.tracker, self.announce_handler, self.scrape_handler, @@ -121,7 +125,8 @@ impl Environment { pub async fn stop(self) -> Environment { Environment { - config: self.config, + http_tracker_config: self.http_tracker_config, + core_config: self.core_config, database: self.database, tracker: self.tracker, announce_handler: self.announce_handler, diff --git a/tests/servers/http/v1/contract.rs b/tests/servers/http/v1/contract.rs index 8a65d941a..33faf8578 100644 --- a/tests/servers/http/v1/contract.rs +++ b/tests/servers/http/v1/contract.rs @@ -449,7 +449,7 @@ mod for_all_config_modes { ) .await; - let announce_policy = env.tracker.get_announce_policy(); + let announce_policy = env.core_config.announce_policy; assert_announce_response( response, @@ -490,7 +490,7 @@ mod for_all_config_modes { ) .await; - let announce_policy = env.tracker.get_announce_policy(); + let announce_policy = env.core_config.announce_policy; // It should only contain the previously announced peer assert_announce_response( @@ -543,7 +543,7 @@ mod for_all_config_modes { ) .await; - let announce_policy = env.tracker.get_announce_policy(); + let announce_policy = env.core_config.announce_policy; // The newly announced peer is not included on the response peer list, // but all the previously announced peers should be included regardless the IP version they are using. @@ -872,7 +872,7 @@ mod for_all_config_modes { let peers = env.in_memory_torrent_repository.get_torrent_peers(&info_hash); let peer_addr = peers[0].peer_addr; - assert_eq!(peer_addr.ip(), env.tracker.get_maybe_external_ip().unwrap()); + assert_eq!(peer_addr.ip(), env.core_config.net.external_ip.unwrap()); assert_ne!(peer_addr.ip(), IpAddr::from_str("2.2.2.2").unwrap()); env.stop().await; @@ -914,7 +914,7 @@ mod for_all_config_modes { let peers = env.in_memory_torrent_repository.get_torrent_peers(&info_hash); let peer_addr = peers[0].peer_addr; - assert_eq!(peer_addr.ip(), env.tracker.get_maybe_external_ip().unwrap()); + assert_eq!(peer_addr.ip(), env.core_config.net.external_ip.unwrap()); assert_ne!(peer_addr.ip(), IpAddr::from_str("2.2.2.2").unwrap()); env.stop().await; diff --git a/tests/servers/udp/environment.rs b/tests/servers/udp/environment.rs index fafb7ef7a..11967aeed 100644 --- a/tests/servers/udp/environment.rs +++ b/tests/servers/udp/environment.rs @@ -3,7 +3,7 @@ use std::sync::Arc; use bittorrent_primitives::info_hash::InfoHash; use tokio::sync::RwLock; -use torrust_tracker_configuration::{Configuration, UdpTracker, DEFAULT_TIMEOUT}; +use torrust_tracker_configuration::{Configuration, Core, UdpTracker, DEFAULT_TIMEOUT}; use torrust_tracker_lib::bootstrap::app::{initialize_app_container, initialize_global_services}; use torrust_tracker_lib::core::announce_handler::AnnounceHandler; use torrust_tracker_lib::core::databases::Database; @@ -23,6 +23,7 @@ pub struct Environment where S: std::fmt::Debug + std::fmt::Display, { + pub core_config: Arc, pub config: Arc, pub database: Arc>, pub tracker: Arc, @@ -64,6 +65,7 @@ impl Environment { let server = Server::new(Spawner::new(bind_to)); Self { + core_config: Arc::new(configuration.core.clone()), config, database: app_container.database.clone(), tracker: app_container.tracker.clone(), @@ -83,6 +85,7 @@ impl Environment { pub async fn start(self) -> Environment { let cookie_lifetime = self.config.cookie_lifetime; Environment { + core_config: self.core_config.clone(), config: self.config, database: self.database.clone(), tracker: self.tracker.clone(), @@ -97,6 +100,7 @@ impl Environment { server: self .server .start( + self.core_config, self.tracker, self.announce_handler, self.scrape_handler, @@ -126,6 +130,7 @@ impl Environment { .expect("it should stop the environment within the timeout"); Environment { + core_config: self.core_config, config: self.config, database: self.database, tracker: self.tracker, From 73560a5612ed5a468c1d61bc910dd0eda166d022 Mon Sep 17 00:00:00 2001 From: Jose Celano Date: Mon, 27 Jan 2025 17:52:10 +0000 Subject: [PATCH 164/802] refactor: [#1209] remove core::Tracker --- src/app.rs | 2 - src/bootstrap/app.rs | 9 +- src/bootstrap/jobs/http_tracker.rs | 9 +- src/bootstrap/jobs/udp_tracker.rs | 5 +- src/container.rs | 3 +- src/core/mod.rs | 151 +++++------------------ src/core/services/mod.rs | 22 ---- src/core/services/statistics/mod.rs | 9 +- src/core/services/torrent.rs | 28 ++--- src/servers/http/server.rs | 8 +- src/servers/http/v1/handlers/announce.rs | 42 ++----- src/servers/http/v1/handlers/scrape.rs | 95 +++----------- src/servers/http/v1/routes.rs | 8 +- src/servers/http/v1/services/announce.rs | 43 ++----- src/servers/http/v1/services/scrape.rs | 34 ++--- src/servers/udp/handlers.rs | 100 +++------------ src/servers/udp/mod.rs | 3 +- src/servers/udp/server/launcher.rs | 8 +- src/servers/udp/server/mod.rs | 2 - src/servers/udp/server/processor.rs | 6 +- src/servers/udp/server/spawner.rs | 4 +- src/servers/udp/server/states.rs | 6 +- tests/servers/api/environment.rs | 5 - tests/servers/http/environment.rs | 7 +- tests/servers/udp/environment.rs | 7 +- 25 files changed, 119 insertions(+), 497 deletions(-) diff --git a/src/app.rs b/src/app.rs index 54ccbc60c..75c2e13bc 100644 --- a/src/app.rs +++ b/src/app.rs @@ -82,7 +82,6 @@ pub async fn start(config: &Configuration, app_container: &AppContainer) -> Vec< udp_tracker::start_job( Arc::new(config.core.clone()), udp_tracker_config, - app_container.tracker.clone(), app_container.announce_handler.clone(), app_container.scrape_handler.clone(), app_container.whitelist_authorization.clone(), @@ -104,7 +103,6 @@ pub async fn start(config: &Configuration, app_container: &AppContainer) -> Vec< if let Some(job) = http_tracker::start_job( http_tracker_config, Arc::new(config.core.clone()), - app_container.tracker.clone(), app_container.announce_handler.clone(), app_container.scrape_handler.clone(), app_container.authentication_service.clone(), diff --git a/src/bootstrap/app.rs b/src/bootstrap/app.rs index fa45998bb..da63048e0 100644 --- a/src/bootstrap/app.rs +++ b/src/bootstrap/app.rs @@ -28,7 +28,7 @@ use crate::core::authentication::key::repository::in_memory::InMemoryKeyReposito use crate::core::authentication::key::repository::persisted::DatabaseKeyRepository; use crate::core::authentication::service; use crate::core::scrape_handler::ScrapeHandler; -use crate::core::services::{initialize_database, initialize_tracker, initialize_whitelist_manager, statistics}; +use crate::core::services::{initialize_database, initialize_whitelist_manager, statistics}; use crate::core::torrent::manager::TorrentsManager; use crate::core::torrent::repository::in_memory::InMemoryTorrentRepository; use crate::core::torrent::repository::persisted::DatabasePersistentTorrentRepository; @@ -116,12 +116,6 @@ pub fn initialize_app_container(configuration: &Configuration) -> AppContainer { &db_torrent_repository, )); - let tracker = Arc::new(initialize_tracker( - configuration, - &in_memory_torrent_repository, - &db_torrent_repository, - )); - let announce_handler = Arc::new(AnnounceHandler::new( &configuration.core, &in_memory_torrent_repository, @@ -132,7 +126,6 @@ pub fn initialize_app_container(configuration: &Configuration) -> AppContainer { AppContainer { database, - tracker, announce_handler, scrape_handler, keys_handler, diff --git a/src/bootstrap/jobs/http_tracker.rs b/src/bootstrap/jobs/http_tracker.rs index 5767f30ce..4a3aa7a9f 100644 --- a/src/bootstrap/jobs/http_tracker.rs +++ b/src/bootstrap/jobs/http_tracker.rs @@ -23,7 +23,7 @@ use crate::core::announce_handler::AnnounceHandler; use crate::core::authentication::service::AuthenticationService; use crate::core::scrape_handler::ScrapeHandler; use crate::core::statistics::event::sender::Sender; -use crate::core::{self, statistics, whitelist}; +use crate::core::{statistics, whitelist}; use crate::servers::http::server::{HttpServer, Launcher}; use crate::servers::http::Version; use crate::servers::registar::ServiceRegistrationForm; @@ -39,7 +39,6 @@ use crate::servers::registar::ServiceRegistrationForm; #[allow(clippy::too_many_arguments)] #[instrument(skip( config, - tracker, announce_handler, scrape_handler, authentication_service, @@ -50,7 +49,6 @@ use crate::servers::registar::ServiceRegistrationForm; pub async fn start_job( config: &HttpTracker, core_config: Arc, - tracker: Arc, announce_handler: Arc, scrape_handler: Arc, authentication_service: Arc, @@ -71,7 +69,6 @@ pub async fn start_job( socket, tls, core_config.clone(), - tracker.clone(), announce_handler.clone(), scrape_handler.clone(), authentication_service.clone(), @@ -89,7 +86,6 @@ pub async fn start_job( #[instrument(skip( socket, tls, - tracker, announce_handler, scrape_handler, whitelist_authorization, @@ -100,7 +96,6 @@ async fn start_v1( socket: SocketAddr, tls: Option, config: Arc, - tracker: Arc, announce_handler: Arc, scrape_handler: Arc, authentication_service: Arc, @@ -111,7 +106,6 @@ async fn start_v1( let server = HttpServer::new(Launcher::new(socket, tls)) .start( config, - tracker, announce_handler, scrape_handler, authentication_service, @@ -161,7 +155,6 @@ mod tests { start_job( config, Arc::new(cfg.core.clone()), - app_container.tracker, app_container.announce_handler, app_container.scrape_handler, app_container.authentication_service, diff --git a/src/bootstrap/jobs/udp_tracker.rs b/src/bootstrap/jobs/udp_tracker.rs index 36f3cd7b0..3679c3195 100644 --- a/src/bootstrap/jobs/udp_tracker.rs +++ b/src/bootstrap/jobs/udp_tracker.rs @@ -16,7 +16,7 @@ use tracing::instrument; use crate::core::announce_handler::AnnounceHandler; use crate::core::scrape_handler::ScrapeHandler; use crate::core::statistics::event::sender::Sender; -use crate::core::{self, whitelist}; +use crate::core::whitelist; use crate::servers::registar::ServiceRegistrationForm; use crate::servers::udp::server::banning::BanService; use crate::servers::udp::server::spawner::Spawner; @@ -37,7 +37,6 @@ use crate::servers::udp::UDP_TRACKER_LOG_TARGET; #[allow(clippy::async_yields_async)] #[instrument(skip( config, - tracker, announce_handler, scrape_handler, whitelist_authorization, @@ -48,7 +47,6 @@ use crate::servers::udp::UDP_TRACKER_LOG_TARGET; pub async fn start_job( core_config: Arc, config: &UdpTracker, - tracker: Arc, announce_handler: Arc, scrape_handler: Arc, whitelist_authorization: Arc, @@ -62,7 +60,6 @@ pub async fn start_job( let server = Server::new(Spawner::new(bind_to)) .start( core_config, - tracker, announce_handler, scrape_handler, whitelist_authorization, diff --git a/src/container.rs b/src/container.rs index 4e958b6ed..544abd02e 100644 --- a/src/container.rs +++ b/src/container.rs @@ -12,13 +12,12 @@ use crate::core::statistics::repository::Repository; use crate::core::torrent::manager::TorrentsManager; use crate::core::torrent::repository::in_memory::InMemoryTorrentRepository; use crate::core::torrent::repository::persisted::DatabasePersistentTorrentRepository; +use crate::core::whitelist; use crate::core::whitelist::manager::WhiteListManager; -use crate::core::{whitelist, Tracker}; use crate::servers::udp::server::banning::BanService; pub struct AppContainer { pub database: Arc>, - pub tracker: Arc, pub announce_handler: Arc, pub scrape_handler: Arc, pub keys_handler: Arc, diff --git a/src/core/mod.rs b/src/core/mod.rs index 43a2aa11d..f09e7d417 100644 --- a/src/core/mod.rs +++ b/src/core/mod.rs @@ -451,53 +451,9 @@ pub mod whitelist; pub mod peer_tests; -use std::sync::Arc; - -use torrent::repository::in_memory::InMemoryTorrentRepository; -use torrent::repository::persisted::DatabasePersistentTorrentRepository; -use torrust_tracker_configuration::Core; - -/// The domain layer tracker service. -/// -/// Its main responsibility is to handle the `announce` and `scrape` requests. -/// But it's also a container for the `Tracker` configuration, persistence, -/// authentication and other services. -/// -/// > **NOTICE**: the `Tracker` is not responsible for handling the network layer. -/// > Typically, the `Tracker` is used by a higher application service that handles -/// > the network layer. -pub struct Tracker { - /// The tracker configuration. - _core_config: Core, - - /// The in-memory torrents repository. - _in_memory_torrent_repository: Arc, - - /// The persistent torrents repository. - _db_torrent_repository: Arc, -} - -impl Tracker { - /// `Tracker` constructor. - /// - /// # Errors - /// - /// Will return a `databases::error::Error` if unable to connect to database. The `Tracker` is responsible for the persistence. - pub fn new( - core_config: &Core, - in_memory_torrent_repository: &Arc, - db_torrent_repository: &Arc, - ) -> Result { - Ok(Tracker { - _core_config: core_config.clone(), - _in_memory_torrent_repository: in_memory_torrent_repository.clone(), - _db_torrent_repository: db_torrent_repository.clone(), - }) - } -} - #[cfg(test)] mod tests { + // Integration tests for the core module. mod the_tracker { @@ -517,18 +473,13 @@ mod tests { use crate::app_test::initialize_tracker_dependencies; use crate::core::announce_handler::AnnounceHandler; use crate::core::scrape_handler::ScrapeHandler; - use crate::core::services::{initialize_tracker, initialize_whitelist_manager}; + use crate::core::services::initialize_whitelist_manager; use crate::core::torrent::manager::TorrentsManager; use crate::core::torrent::repository::in_memory::InMemoryTorrentRepository; + use crate::core::whitelist; use crate::core::whitelist::manager::WhiteListManager; - use crate::core::{whitelist, Tracker}; - fn public_tracker() -> ( - Arc, - Arc, - Arc, - Arc, - ) { + fn public_tracker() -> (Arc, Arc, Arc) { let config = configuration::ephemeral_public(); let ( @@ -541,12 +492,6 @@ mod tests { _torrents_manager, ) = initialize_tracker_dependencies(&config); - let tracker = Arc::new(initialize_tracker( - &config, - &in_memory_torrent_repository, - &db_torrent_repository, - )); - let announce_handler = Arc::new(AnnounceHandler::new( &config.core, &in_memory_torrent_repository, @@ -555,10 +500,10 @@ mod tests { let scrape_handler = Arc::new(ScrapeHandler::new(&whitelist_authorization, &in_memory_torrent_repository)); - (tracker, announce_handler, in_memory_torrent_repository, scrape_handler) + (announce_handler, in_memory_torrent_repository, scrape_handler) } - fn public_tracker_and_in_memory_torrents_repository() -> (Arc, Arc) { + fn initialize_in_memory_torrents_repository() -> Arc { let config = configuration::ephemeral_public(); let ( @@ -567,22 +512,15 @@ mod tests { _whitelist_authorization, _authentication_service, in_memory_torrent_repository, - db_torrent_repository, + _db_torrent_repository, _torrents_manager, ) = initialize_tracker_dependencies(&config); - let tracker = Arc::new(initialize_tracker( - &config, - &in_memory_torrent_repository, - &db_torrent_repository, - )); - - (tracker, in_memory_torrent_repository) + in_memory_torrent_repository } #[allow(clippy::type_complexity)] fn whitelisted_tracker() -> ( - Arc, Arc, Arc, Arc, @@ -602,12 +540,6 @@ mod tests { let whitelist_manager = initialize_whitelist_manager(database.clone(), in_memory_whitelist.clone()); - let tracker = Arc::new(initialize_tracker( - &config, - &in_memory_torrent_repository, - &db_torrent_repository, - )); - let announce_handler = Arc::new(AnnounceHandler::new( &config.core, &in_memory_torrent_repository, @@ -616,21 +548,11 @@ mod tests { let scrape_handler = Arc::new(ScrapeHandler::new(&whitelist_authorization, &in_memory_torrent_repository)); - ( - tracker, - announce_handler, - whitelist_authorization, - whitelist_manager, - scrape_handler, - ) + (announce_handler, whitelist_authorization, whitelist_manager, scrape_handler) } - pub fn tracker_persisting_torrents_in_database() -> ( - Arc, - Arc, - Arc, - Arc, - ) { + pub fn tracker_persisting_torrents_in_database( + ) -> (Arc, Arc, Arc) { let mut config = configuration::ephemeral_listed(); config.core.tracker_policy.persistent_torrent_completed_stat = true; @@ -644,19 +566,13 @@ mod tests { torrents_manager, ) = initialize_tracker_dependencies(&config); - let tracker = Arc::new(initialize_tracker( - &config, - &in_memory_torrent_repository, - &db_torrent_repository, - )); - let announce_handler = Arc::new(AnnounceHandler::new( &config.core, &in_memory_torrent_repository, &db_torrent_repository, )); - (tracker, announce_handler, torrents_manager, in_memory_torrent_repository) + (announce_handler, torrents_manager, in_memory_torrent_repository) } fn sample_info_hash() -> InfoHash { @@ -745,7 +661,7 @@ mod tests { #[tokio::test] async fn it_should_return_the_peers_for_a_given_torrent() { - let (_tracker, in_memory_torrent_repository) = public_tracker_and_in_memory_torrents_repository(); + let in_memory_torrent_repository = initialize_in_memory_torrents_repository(); let info_hash = sample_info_hash(); let peer = sample_peer(); @@ -777,7 +693,7 @@ mod tests { #[tokio::test] async fn it_should_return_74_peers_at_the_most_for_a_given_torrent() { - let (_tracker, in_memory_torrent_repository) = public_tracker_and_in_memory_torrents_repository(); + let in_memory_torrent_repository = initialize_in_memory_torrents_repository(); let info_hash = sample_info_hash(); @@ -802,7 +718,7 @@ mod tests { #[tokio::test] async fn it_should_return_the_peers_for_a_given_torrent_excluding_a_given_peer() { - let (_tracker, _announce_handler, in_memory_torrent_repository, _scrape_handler) = public_tracker(); + let (_announce_handler, in_memory_torrent_repository, _scrape_handler) = public_tracker(); let info_hash = sample_info_hash(); let peer = sample_peer(); @@ -816,7 +732,7 @@ mod tests { #[tokio::test] async fn it_should_return_74_peers_at_the_most_for_a_given_torrent_when_it_filters_out_a_given_peer() { - let (_tracker, _announce_handler, in_memory_torrent_repository, _scrape_handler) = public_tracker(); + let (_announce_handler, in_memory_torrent_repository, _scrape_handler) = public_tracker(); let info_hash = sample_info_hash(); @@ -846,7 +762,7 @@ mod tests { #[tokio::test] async fn it_should_return_the_torrent_metrics() { - let (_tracker, in_memory_torrent_repository) = public_tracker_and_in_memory_torrents_repository(); + let in_memory_torrent_repository = initialize_in_memory_torrents_repository(); let () = in_memory_torrent_repository.upsert_peer(&sample_info_hash(), &leecher()); @@ -865,7 +781,7 @@ mod tests { #[tokio::test] async fn it_should_get_many_the_torrent_metrics() { - let (_tracker, in_memory_torrent_repository) = public_tracker_and_in_memory_torrents_repository(); + let in_memory_torrent_repository = initialize_in_memory_torrents_repository(); let start_time = std::time::Instant::now(); for i in 0..1_000_000 { @@ -1000,7 +916,7 @@ mod tests { #[tokio::test] async fn it_should_return_the_announce_data_with_an_empty_peer_list_when_it_is_the_first_announced_peer() { - let (_tracker, announce_handler, _in_memory_torrent_repository, _scrape_handler) = public_tracker(); + let (announce_handler, _in_memory_torrent_repository, _scrape_handler) = public_tracker(); let mut peer = sample_peer(); @@ -1011,7 +927,7 @@ mod tests { #[tokio::test] async fn it_should_return_the_announce_data_with_the_previously_announced_peers() { - let (_tracker, announce_handler, _in_memory_torrent_repository, _scrape_handler) = public_tracker(); + let (announce_handler, _in_memory_torrent_repository, _scrape_handler) = public_tracker(); let mut previously_announced_peer = sample_peer_1(); announce_handler.announce( @@ -1036,7 +952,7 @@ mod tests { #[tokio::test] async fn when_the_peer_is_a_seeder() { - let (_tracker, announce_handler, _in_memory_torrent_repository, _scrape_handler) = public_tracker(); + let (announce_handler, _in_memory_torrent_repository, _scrape_handler) = public_tracker(); let mut peer = seeder(); @@ -1048,7 +964,7 @@ mod tests { #[tokio::test] async fn when_the_peer_is_a_leecher() { - let (_tracker, announce_handler, _in_memory_torrent_repository, _scrape_handler) = public_tracker(); + let (announce_handler, _in_memory_torrent_repository, _scrape_handler) = public_tracker(); let mut peer = leecher(); @@ -1060,7 +976,7 @@ mod tests { #[tokio::test] async fn when_a_previously_announced_started_peer_has_completed_downloading() { - let (_tracker, announce_handler, _in_memory_torrent_repository, _scrape_handler) = public_tracker(); + let (announce_handler, _in_memory_torrent_repository, _scrape_handler) = public_tracker(); // We have to announce with "started" event because peer does not count if peer was not previously known let mut started_peer = started_peer(); @@ -1088,7 +1004,7 @@ mod tests { #[tokio::test] async fn it_should_return_the_swarm_metadata_for_the_requested_file_if_the_tracker_has_that_torrent() { - let (_tracker, announce_handler, _in_memory_torrent_repository, scrape_handler) = public_tracker(); + let (announce_handler, _in_memory_torrent_repository, scrape_handler) = public_tracker(); let info_hash = "3b245504cf5f11bbdbe1201cea6a6bf45aee1bc0".parse::().unwrap(); // # DevSkim: ignore DS173237 @@ -1136,8 +1052,7 @@ mod tests { #[tokio::test] async fn it_should_authorize_the_announce_and_scrape_actions_on_whitelisted_torrents() { - let (_tracker, _announce_handler, whitelist_authorization, whitelist_manager, _scrape_handler) = - whitelisted_tracker(); + let (_announce_handler, whitelist_authorization, whitelist_manager, _scrape_handler) = whitelisted_tracker(); let info_hash = sample_info_hash(); @@ -1150,8 +1065,7 @@ mod tests { #[tokio::test] async fn it_should_not_authorize_the_announce_and_scrape_actions_on_not_whitelisted_torrents() { - let (_tracker, _announce_handler, whitelist_authorization, _whitelist_manager, _scrape_handler) = - whitelisted_tracker(); + let (_announce_handler, whitelist_authorization, _whitelist_manager, _scrape_handler) = whitelisted_tracker(); let info_hash = sample_info_hash(); @@ -1170,8 +1084,7 @@ mod tests { #[tokio::test] async fn it_should_add_a_torrent_to_the_whitelist() { - let (_tracker, _announce_handler, _whitelist_authorization, whitelist_manager, _scrape_handler) = - whitelisted_tracker(); + let (_announce_handler, _whitelist_authorization, whitelist_manager, _scrape_handler) = whitelisted_tracker(); let info_hash = sample_info_hash(); @@ -1182,8 +1095,7 @@ mod tests { #[tokio::test] async fn it_should_remove_a_torrent_from_the_whitelist() { - let (_tracker, _announce_handler, _whitelist_authorization, whitelist_manager, _scrape_handler) = - whitelisted_tracker(); + let (_announce_handler, _whitelist_authorization, whitelist_manager, _scrape_handler) = whitelisted_tracker(); let info_hash = sample_info_hash(); @@ -1199,7 +1111,7 @@ mod tests { #[tokio::test] async fn it_should_load_the_whitelist_from_the_database() { - let (_tracker, _announce_handler, _whitelist_authorization, whitelist_manager, _scrape_handler) = + let (_announce_handler, _whitelist_authorization, whitelist_manager, _scrape_handler) = whitelisted_tracker(); let info_hash = sample_info_hash(); @@ -1244,8 +1156,7 @@ mod tests { #[tokio::test] async fn it_should_return_the_zeroed_swarm_metadata_for_the_requested_file_if_it_is_not_whitelisted() { - let (_tracker, announce_handler, _whitelist_authorization, _whitelist_manager, scrape_handler) = - whitelisted_tracker(); + let (announce_handler, _whitelist_authorization, _whitelist_manager, scrape_handler) = whitelisted_tracker(); let info_hash = "3b245504cf5f11bbdbe1201cea6a6bf45aee1bc0".parse::().unwrap(); // # DevSkim: ignore DS173237 @@ -1279,7 +1190,7 @@ mod tests { #[tokio::test] async fn it_should_persist_the_number_of_completed_peers_for_all_torrents_into_the_database() { - let (_tracker, announce_handler, torrents_manager, in_memory_torrent_repository) = + let (announce_handler, torrents_manager, in_memory_torrent_repository) = tracker_persisting_torrents_in_database(); let info_hash = sample_info_hash(); diff --git a/src/core/services/mod.rs b/src/core/services/mod.rs index a6cf54d60..73328aaeb 100644 --- a/src/core/services/mod.rs +++ b/src/core/services/mod.rs @@ -14,31 +14,9 @@ use torrust_tracker_configuration::v2_0_0::database; use torrust_tracker_configuration::Configuration; use super::databases::{self, Database}; -use super::torrent::repository::in_memory::InMemoryTorrentRepository; -use super::torrent::repository::persisted::DatabasePersistentTorrentRepository; use super::whitelist::manager::WhiteListManager; use super::whitelist::repository::in_memory::InMemoryWhitelist; use super::whitelist::repository::persisted::DatabaseWhitelist; -use crate::core::Tracker; - -/// It returns a new tracker building its dependencies. -/// -/// # Panics -/// -/// Will panic if tracker cannot be instantiated. -#[must_use] -pub fn initialize_tracker( - config: &Configuration, - in_memory_torrent_repository: &Arc, - db_torrent_repository: &Arc, -) -> Tracker { - match Tracker::new(&Arc::new(config).core, in_memory_torrent_repository, db_torrent_repository) { - Ok(tracker) => tracker, - Err(error) => { - panic!("{}", error) - } - } -} /// # Panics /// diff --git a/src/core/services/statistics/mod.rs b/src/core/services/statistics/mod.rs index 680504607..18d96605e 100644 --- a/src/core/services/statistics/mod.rs +++ b/src/core/services/statistics/mod.rs @@ -118,7 +118,6 @@ mod tests { use torrust_tracker_test_helpers::configuration; use crate::app_test::initialize_tracker_dependencies; - use crate::core::services::initialize_tracker; use crate::core::services::statistics::{self, get_metrics, TrackerMetrics}; use crate::core::{self}; use crate::servers::udp::server::banning::BanService; @@ -138,19 +137,13 @@ mod tests { _whitelist_authorization, _authentication_service, in_memory_torrent_repository, - db_torrent_repository, + _db_torrent_repository, _torrents_manager, ) = initialize_tracker_dependencies(&config); let (_stats_event_sender, stats_repository) = statistics::setup::factory(config.core.tracker_usage_statistics); let stats_repository = Arc::new(stats_repository); - let _tracker = Arc::new(initialize_tracker( - &config, - &in_memory_torrent_repository, - &db_torrent_repository, - )); - let ban_service = Arc::new(RwLock::new(BanService::new(MAX_CONNECTION_ID_ERRORS_PER_IP))); let tracker_metrics = get_metrics( diff --git a/src/core/services/torrent.rs b/src/core/services/torrent.rs index 5faaef1d1..6ae2c26a4 100644 --- a/src/core/services/torrent.rs +++ b/src/core/services/torrent.rs @@ -119,28 +119,20 @@ mod tests { use torrust_tracker_primitives::{peer, DurationSinceUnixEpoch}; use crate::app_test::initialize_tracker_dependencies; - use crate::core::services::initialize_tracker; use crate::core::torrent::repository::in_memory::InMemoryTorrentRepository; - use crate::core::Tracker; - fn initialize_tracker_and_deps(config: &Configuration) -> (Arc, Arc) { + fn initialize_in_memory_torrent_repository(config: &Configuration) -> Arc { let ( _database, _in_memory_whitelist, _whitelist_authorization, _authentication_service, in_memory_torrent_repository, - db_torrent_repository, + _db_torrent_repository, _torrents_manager, ) = initialize_tracker_dependencies(config); - let tracker = Arc::new(initialize_tracker( - config, - &in_memory_torrent_repository, - &db_torrent_repository, - )); - - (tracker, in_memory_torrent_repository) + in_memory_torrent_repository } fn sample_peer() -> peer::Peer { @@ -164,7 +156,7 @@ mod tests { use torrust_tracker_configuration::Configuration; use torrust_tracker_test_helpers::configuration; - use crate::core::services::torrent::tests::{initialize_tracker_and_deps, sample_peer}; + use crate::core::services::torrent::tests::{initialize_in_memory_torrent_repository, sample_peer}; use crate::core::services::torrent::{get_torrent_info, Info}; use crate::core::torrent::repository::in_memory::InMemoryTorrentRepository; @@ -189,7 +181,7 @@ mod tests { async fn should_return_the_torrent_info_if_the_tracker_has_the_torrent() { let config = tracker_configuration(); - let (_tracker, in_memory_torrent_repository) = initialize_tracker_and_deps(&config); + let in_memory_torrent_repository = initialize_in_memory_torrent_repository(&config); let hash = "9e0217d0fa71c87332cd8bf9dbeabcb2c2cf3c4d".to_owned(); let info_hash = InfoHash::from_str(&hash).unwrap(); @@ -221,7 +213,7 @@ mod tests { use torrust_tracker_configuration::Configuration; use torrust_tracker_test_helpers::configuration; - use crate::core::services::torrent::tests::{initialize_tracker_and_deps, sample_peer}; + use crate::core::services::torrent::tests::{initialize_in_memory_torrent_repository, sample_peer}; use crate::core::services::torrent::{get_torrents_page, BasicInfo, Pagination}; use crate::core::torrent::repository::in_memory::InMemoryTorrentRepository; @@ -242,7 +234,7 @@ mod tests { async fn should_return_a_summarized_info_for_all_torrents() { let config = tracker_configuration(); - let (_tracker, in_memory_torrent_repository) = initialize_tracker_and_deps(&config); + let in_memory_torrent_repository = initialize_in_memory_torrent_repository(&config); let hash = "9e0217d0fa71c87332cd8bf9dbeabcb2c2cf3c4d".to_owned(); let info_hash = InfoHash::from_str(&hash).unwrap(); @@ -266,7 +258,7 @@ mod tests { async fn should_allow_limiting_the_number_of_torrents_in_the_result() { let config = tracker_configuration(); - let (_tracker, in_memory_torrent_repository) = initialize_tracker_and_deps(&config); + let in_memory_torrent_repository = initialize_in_memory_torrent_repository(&config); let hash1 = "9e0217d0fa71c87332cd8bf9dbeabcb2c2cf3c4d".to_owned(); let info_hash1 = InfoHash::from_str(&hash1).unwrap(); @@ -289,7 +281,7 @@ mod tests { async fn should_allow_using_pagination_in_the_result() { let config = tracker_configuration(); - let (_tracker, in_memory_torrent_repository) = initialize_tracker_and_deps(&config); + let in_memory_torrent_repository = initialize_in_memory_torrent_repository(&config); let hash1 = "9e0217d0fa71c87332cd8bf9dbeabcb2c2cf3c4d".to_owned(); let info_hash1 = InfoHash::from_str(&hash1).unwrap(); @@ -321,7 +313,7 @@ mod tests { async fn should_return_torrents_ordered_by_info_hash() { let config = tracker_configuration(); - let (_tracker, in_memory_torrent_repository) = initialize_tracker_and_deps(&config); + let in_memory_torrent_repository = initialize_in_memory_torrent_repository(&config); let hash1 = "9e0217d0fa71c87332cd8bf9dbeabcb2c2cf3c4d".to_owned(); let info_hash1 = InfoHash::from_str(&hash1).unwrap(); diff --git a/src/servers/http/server.rs b/src/servers/http/server.rs index 3817882df..28f407ad3 100644 --- a/src/servers/http/server.rs +++ b/src/servers/http/server.rs @@ -15,7 +15,7 @@ use crate::bootstrap::jobs::Started; use crate::core::announce_handler::AnnounceHandler; use crate::core::authentication::service::AuthenticationService; use crate::core::scrape_handler::ScrapeHandler; -use crate::core::{statistics, whitelist, Tracker}; +use crate::core::{statistics, whitelist}; use crate::servers::custom_axum_server::{self, TimeoutAcceptor}; use crate::servers::http::HTTP_TRACKER_LOG_TARGET; use crate::servers::logging::STARTED_ON; @@ -49,7 +49,6 @@ impl Launcher { #[allow(clippy::too_many_arguments)] #[instrument(skip( self, - tracker, announce_handler, scrape_handler, authentication_service, @@ -61,7 +60,6 @@ impl Launcher { fn start( &self, config: Arc, - tracker: Arc, announce_handler: Arc, scrape_handler: Arc, authentication_service: Arc, @@ -88,7 +86,6 @@ impl Launcher { let app = router( config, - tracker, announce_handler, scrape_handler, authentication_service, @@ -192,7 +189,6 @@ impl HttpServer { pub async fn start( self, core_config: Arc, - tracker: Arc, announce_handler: Arc, scrape_handler: Arc, authentication_service: Arc, @@ -208,7 +204,6 @@ impl HttpServer { let task = tokio::spawn(async move { let server = launcher.start( core_config, - tracker, announce_handler, scrape_handler, authentication_service, @@ -315,7 +310,6 @@ mod tests { let started = stopped .start( Arc::new(cfg.core.clone()), - app_container.tracker, app_container.announce_handler, app_container.scrape_handler, app_container.authentication_service, diff --git a/src/servers/http/v1/handlers/announce.rs b/src/servers/http/v1/handlers/announce.rs index ebdb717c3..632688763 100644 --- a/src/servers/http/v1/handlers/announce.rs +++ b/src/servers/http/v1/handlers/announce.rs @@ -26,7 +26,7 @@ use crate::core::announce_handler::{AnnounceHandler, PeersWanted}; use crate::core::authentication::service::AuthenticationService; use crate::core::authentication::Key; use crate::core::statistics::event::sender::Sender; -use crate::core::{whitelist, Tracker}; +use crate::core::whitelist; use crate::servers::http::v1::extractors::announce_request::ExtractRequest; use crate::servers::http::v1::extractors::authentication_key::Extract as ExtractKey; use crate::servers::http::v1::extractors::client_ip_sources::Extract as ExtractClientIpSources; @@ -41,7 +41,6 @@ use crate::CurrentClock; pub async fn handle_without_key( State(state): State<( Arc, - Arc, Arc, Arc, Arc, @@ -58,7 +57,6 @@ pub async fn handle_without_key( &state.2, &state.3, &state.4, - &state.5, &announce_request, &client_ip_sources, None, @@ -73,7 +71,6 @@ pub async fn handle_without_key( pub async fn handle_with_key( State(state): State<( Arc, - Arc, Arc, Arc, Arc, @@ -91,7 +88,6 @@ pub async fn handle_with_key( &state.2, &state.3, &state.4, - &state.5, &announce_request, &client_ip_sources, Some(key), @@ -106,7 +102,6 @@ pub async fn handle_with_key( #[allow(clippy::too_many_arguments)] async fn handle( config: &Arc, - tracker: &Arc, announce_handler: &Arc, authentication_service: &Arc, whitelist_authorization: &Arc, @@ -117,7 +112,6 @@ async fn handle( ) -> Response { let announce_data = match handle_announce( config, - tracker, announce_handler, authentication_service, whitelist_authorization, @@ -143,7 +137,6 @@ async fn handle( #[allow(clippy::too_many_arguments)] async fn handle_announce( core_config: &Arc, - tracker: &Arc, announce_handler: &Arc, authentication_service: &Arc, whitelist_authorization: &Arc, @@ -185,7 +178,6 @@ async fn handle_announce( }; let announce_data = services::announce::invoke( - tracker.clone(), announce_handler.clone(), opt_stats_event_sender.clone(), announce_request.info_hash, @@ -265,13 +257,12 @@ mod tests { use crate::app_test::initialize_tracker_dependencies; use crate::core::announce_handler::AnnounceHandler; use crate::core::authentication::service::AuthenticationService; - use crate::core::services::{initialize_tracker, statistics}; + use crate::core::services::statistics; use crate::core::statistics::event::sender::Sender; - use crate::core::{whitelist, Tracker}; + use crate::core::whitelist; type TrackerAndDeps = ( Arc, - Arc, Arc, Arc>>, Arc, @@ -309,12 +300,6 @@ mod tests { let (stats_event_sender, _stats_repository) = statistics::setup::factory(config.core.tracker_usage_statistics); let stats_event_sender = Arc::new(stats_event_sender); - let tracker = Arc::new(initialize_tracker( - &config, - &in_memory_torrent_repository, - &db_torrent_repository, - )); - let announce_handler = Arc::new(AnnounceHandler::new( &config.core, &in_memory_torrent_repository, @@ -325,7 +310,6 @@ mod tests { ( config, - tracker, announce_handler, stats_event_sender, whitelist_authorization, @@ -373,17 +357,15 @@ mod tests { #[tokio::test] async fn it_should_fail_when_the_authentication_key_is_missing() { - let (config, tracker, announce_handler, stats_event_sender, whitelist_authorization, authentication_service) = + let (config, announce_handler, stats_event_sender, whitelist_authorization, authentication_service) = private_tracker(); - let tracker = Arc::new(tracker); let stats_event_sender = Arc::new(stats_event_sender); let maybe_key = None; let response = handle_announce( &config, - &tracker, &announce_handler, &authentication_service, &whitelist_authorization, @@ -403,10 +385,9 @@ mod tests { #[tokio::test] async fn it_should_fail_when_the_authentication_key_is_invalid() { - let (config, tracker, announce_handler, stats_event_sender, whitelist_authorization, authentication_service) = + let (config, announce_handler, stats_event_sender, whitelist_authorization, authentication_service) = private_tracker(); - let tracker = Arc::new(tracker); let stats_event_sender = Arc::new(stats_event_sender); let unregistered_key = authentication::Key::from_str("YZSl4lMZupRuOpSRC3krIKR5BPB14nrJ").unwrap(); @@ -415,7 +396,6 @@ mod tests { let response = handle_announce( &config, - &tracker, &announce_handler, &authentication_service, &whitelist_authorization, @@ -441,17 +421,15 @@ mod tests { #[tokio::test] async fn it_should_fail_when_the_announced_torrent_is_not_whitelisted() { - let (config, tracker, announce_handler, stats_event_sender, whitelist_authorization, authentication_service) = + let (config, announce_handler, stats_event_sender, whitelist_authorization, authentication_service) = whitelisted_tracker(); - let tracker = Arc::new(tracker); let stats_event_sender = Arc::new(stats_event_sender); let announce_request = sample_announce_request(); let response = handle_announce( &config, - &tracker, &announce_handler, &authentication_service, &whitelist_authorization, @@ -485,10 +463,9 @@ mod tests { #[tokio::test] async fn it_should_fail_when_the_right_most_x_forwarded_for_header_ip_is_not_available() { - let (config, tracker, announce_handler, stats_event_sender, whitelist_authorization, authentication_service) = + let (config, announce_handler, stats_event_sender, whitelist_authorization, authentication_service) = tracker_on_reverse_proxy(); - let tracker = Arc::new(tracker); let stats_event_sender = Arc::new(stats_event_sender); let client_ip_sources = ClientIpSources { @@ -498,7 +475,6 @@ mod tests { let response = handle_announce( &config, - &tracker, &announce_handler, &authentication_service, &whitelist_authorization, @@ -529,10 +505,9 @@ mod tests { #[tokio::test] async fn it_should_fail_when_the_client_ip_from_the_connection_info_is_not_available() { - let (config, tracker, announce_handler, stats_event_sender, whitelist_authorization, authentication_service) = + let (config, announce_handler, stats_event_sender, whitelist_authorization, authentication_service) = tracker_not_on_reverse_proxy(); - let tracker = Arc::new(tracker); let stats_event_sender = Arc::new(stats_event_sender); let client_ip_sources = ClientIpSources { @@ -542,7 +517,6 @@ mod tests { let response = handle_announce( &config, - &tracker, &announce_handler, &authentication_service, &whitelist_authorization, diff --git a/src/servers/http/v1/handlers/scrape.rs b/src/servers/http/v1/handlers/scrape.rs index 4f47a066f..c4013d8e9 100644 --- a/src/servers/http/v1/handlers/scrape.rs +++ b/src/servers/http/v1/handlers/scrape.rs @@ -20,7 +20,6 @@ use crate::core::authentication::service::AuthenticationService; use crate::core::authentication::Key; use crate::core::scrape_handler::ScrapeHandler; use crate::core::statistics::event::sender::Sender; -use crate::core::Tracker; use crate::servers::http::v1::extractors::authentication_key::Extract as ExtractKey; use crate::servers::http::v1::extractors::client_ip_sources::Extract as ExtractClientIpSources; use crate::servers::http::v1::extractors::scrape_request::ExtractRequest; @@ -33,7 +32,6 @@ use crate::servers::http::v1::services; pub async fn handle_without_key( State(state): State<( Arc, - Arc, Arc, Arc, Arc>>, @@ -48,7 +46,6 @@ pub async fn handle_without_key( &state.1, &state.2, &state.3, - &state.4, &scrape_request, &client_ip_sources, None, @@ -65,7 +62,6 @@ pub async fn handle_without_key( pub async fn handle_with_key( State(state): State<( Arc, - Arc, Arc, Arc, Arc>>, @@ -81,7 +77,6 @@ pub async fn handle_with_key( &state.1, &state.2, &state.3, - &state.4, &scrape_request, &client_ip_sources, Some(key), @@ -92,7 +87,6 @@ pub async fn handle_with_key( #[allow(clippy::too_many_arguments)] async fn handle( core_config: &Arc, - tracker: &Arc, scrape_handler: &Arc, authentication_service: &Arc, stats_event_sender: &Arc>>, @@ -102,7 +96,6 @@ async fn handle( ) -> Response { let scrape_data = match handle_scrape( core_config, - tracker, scrape_handler, authentication_service, stats_event_sender, @@ -127,7 +120,6 @@ async fn handle( #[allow(clippy::too_many_arguments)] async fn handle_scrape( core_config: &Arc, - _tracker: &Arc, scrape_handler: &Arc, authentication_service: &Arc, opt_stats_event_sender: &Arc>>, @@ -185,13 +177,11 @@ mod tests { use crate::app_test::initialize_tracker_dependencies; use crate::core::authentication::service::AuthenticationService; use crate::core::scrape_handler::ScrapeHandler; - use crate::core::services::{initialize_tracker, statistics}; - use crate::core::Tracker; + use crate::core::services::statistics; #[allow(clippy::type_complexity)] fn private_tracker() -> ( Arc, - Arc, Arc, Arc>>, Arc, @@ -204,7 +194,7 @@ mod tests { whitelist_authorization, authentication_service, in_memory_torrent_repository, - db_torrent_repository, + _db_torrent_repository, _torrents_manager, ) = initialize_tracker_dependencies(&config); @@ -214,27 +204,14 @@ mod tests { let core_config = Arc::new(config.core.clone()); - let tracker = Arc::new(initialize_tracker( - &config, - &in_memory_torrent_repository, - &db_torrent_repository, - )); - let scrape_handler = Arc::new(ScrapeHandler::new(&whitelist_authorization, &in_memory_torrent_repository)); - ( - core_config, - tracker, - scrape_handler, - stats_event_sender, - authentication_service, - ) + (core_config, scrape_handler, stats_event_sender, authentication_service) } #[allow(clippy::type_complexity)] fn whitelisted_tracker() -> ( Arc, - Arc, Arc, Arc>>, Arc, @@ -247,7 +224,7 @@ mod tests { whitelist_authorization, authentication_service, in_memory_torrent_repository, - db_torrent_repository, + _db_torrent_repository, _torrents_manager, ) = initialize_tracker_dependencies(&config); @@ -257,27 +234,14 @@ mod tests { let core_config = Arc::new(config.core.clone()); - let tracker = Arc::new(initialize_tracker( - &config, - &in_memory_torrent_repository, - &db_torrent_repository, - )); - let scrape_handler = Arc::new(ScrapeHandler::new(&whitelist_authorization, &in_memory_torrent_repository)); - ( - core_config, - tracker, - scrape_handler, - stats_event_sender, - authentication_service, - ) + (core_config, scrape_handler, stats_event_sender, authentication_service) } #[allow(clippy::type_complexity)] fn tracker_on_reverse_proxy() -> ( Arc, - Arc, Arc, Arc>>, Arc, @@ -290,7 +254,7 @@ mod tests { whitelist_authorization, authentication_service, in_memory_torrent_repository, - db_torrent_repository, + _db_torrent_repository, _torrents_manager, ) = initialize_tracker_dependencies(&config); @@ -300,27 +264,14 @@ mod tests { let core_config = Arc::new(config.core.clone()); - let tracker = Arc::new(initialize_tracker( - &config, - &in_memory_torrent_repository, - &db_torrent_repository, - )); - let scrape_handler = Arc::new(ScrapeHandler::new(&whitelist_authorization, &in_memory_torrent_repository)); - ( - core_config, - tracker, - scrape_handler, - stats_event_sender, - authentication_service, - ) + (core_config, scrape_handler, stats_event_sender, authentication_service) } #[allow(clippy::type_complexity)] fn tracker_not_on_reverse_proxy() -> ( Arc, - Arc, Arc, Arc>>, Arc, @@ -333,7 +284,7 @@ mod tests { whitelist_authorization, authentication_service, in_memory_torrent_repository, - db_torrent_repository, + _db_torrent_repository, _torrents_manager, ) = initialize_tracker_dependencies(&config); @@ -343,21 +294,9 @@ mod tests { let core_config = Arc::new(config.core.clone()); - let tracker = Arc::new(initialize_tracker( - &config, - &in_memory_torrent_repository, - &db_torrent_repository, - )); - let scrape_handler = Arc::new(ScrapeHandler::new(&whitelist_authorization, &in_memory_torrent_repository)); - ( - core_config, - tracker, - scrape_handler, - stats_event_sender, - authentication_service, - ) + (core_config, scrape_handler, stats_event_sender, authentication_service) } fn sample_scrape_request() -> Scrape { @@ -391,14 +330,13 @@ mod tests { #[tokio::test] async fn it_should_return_zeroed_swarm_metadata_when_the_authentication_key_is_missing() { - let (core_config, tracker, scrape_handler, stats_event_sender, authentication_service) = private_tracker(); + let (core_config, scrape_handler, stats_event_sender, authentication_service) = private_tracker(); let scrape_request = sample_scrape_request(); let maybe_key = None; let scrape_data = handle_scrape( &core_config, - &tracker, &scrape_handler, &authentication_service, &stats_event_sender, @@ -416,7 +354,7 @@ mod tests { #[tokio::test] async fn it_should_return_zeroed_swarm_metadata_when_the_authentication_key_is_invalid() { - let (core_config, tracker, scrape_handler, stats_event_sender, authentication_service) = private_tracker(); + let (core_config, scrape_handler, stats_event_sender, authentication_service) = private_tracker(); let scrape_request = sample_scrape_request(); let unregistered_key = authentication::Key::from_str("YZSl4lMZupRuOpSRC3krIKR5BPB14nrJ").unwrap(); @@ -424,7 +362,6 @@ mod tests { let scrape_data = handle_scrape( &core_config, - &tracker, &scrape_handler, &authentication_service, &stats_event_sender, @@ -450,13 +387,12 @@ mod tests { #[tokio::test] async fn it_should_return_zeroed_swarm_metadata_when_the_torrent_is_not_whitelisted() { - let (core_config, tracker, scrape_handler, stats_event_sender, authentication_service) = whitelisted_tracker(); + let (core_config, scrape_handler, stats_event_sender, authentication_service) = whitelisted_tracker(); let scrape_request = sample_scrape_request(); let scrape_data = handle_scrape( &core_config, - &tracker, &scrape_handler, &authentication_service, &stats_event_sender, @@ -483,7 +419,7 @@ mod tests { #[tokio::test] async fn it_should_fail_when_the_right_most_x_forwarded_for_header_ip_is_not_available() { - let (core_config, tracker, scrape_handler, stats_event_sender, authentication_service) = tracker_on_reverse_proxy(); + let (core_config, scrape_handler, stats_event_sender, authentication_service) = tracker_on_reverse_proxy(); let client_ip_sources = ClientIpSources { right_most_x_forwarded_for: None, @@ -492,7 +428,6 @@ mod tests { let response = handle_scrape( &core_config, - &tracker, &scrape_handler, &authentication_service, &stats_event_sender, @@ -520,8 +455,7 @@ mod tests { #[tokio::test] async fn it_should_fail_when_the_client_ip_from_the_connection_info_is_not_available() { - let (core_config, tracker, scrape_handler, stats_event_sender, authentication_service) = - tracker_not_on_reverse_proxy(); + let (core_config, scrape_handler, stats_event_sender, authentication_service) = tracker_not_on_reverse_proxy(); let client_ip_sources = ClientIpSources { right_most_x_forwarded_for: None, @@ -530,7 +464,6 @@ mod tests { let response = handle_scrape( &core_config, - &tracker, &scrape_handler, &authentication_service, &stats_event_sender, diff --git a/src/servers/http/v1/routes.rs b/src/servers/http/v1/routes.rs index 85564ca8c..757a7d1bd 100644 --- a/src/servers/http/v1/routes.rs +++ b/src/servers/http/v1/routes.rs @@ -26,7 +26,7 @@ use crate::core::announce_handler::AnnounceHandler; use crate::core::authentication::service::AuthenticationService; use crate::core::scrape_handler::ScrapeHandler; use crate::core::statistics::event::sender::Sender; -use crate::core::{whitelist, Tracker}; +use crate::core::whitelist; use crate::servers::http::HTTP_TRACKER_LOG_TARGET; use crate::servers::logging::Latency; @@ -37,7 +37,6 @@ use crate::servers::logging::Latency; #[allow(clippy::too_many_arguments)] #[allow(clippy::needless_pass_by_value)] #[instrument(skip( - tracker, announce_handler, scrape_handler, authentication_service, @@ -47,7 +46,6 @@ use crate::servers::logging::Latency; ))] pub fn router( core_config: Arc, - tracker: Arc, announce_handler: Arc, scrape_handler: Arc, authentication_service: Arc, @@ -63,7 +61,6 @@ pub fn router( "/announce", get(announce::handle_without_key).with_state(( core_config.clone(), - tracker.clone(), announce_handler.clone(), authentication_service.clone(), whitelist_authorization.clone(), @@ -74,7 +71,6 @@ pub fn router( "/announce/{key}", get(announce::handle_with_key).with_state(( core_config.clone(), - tracker.clone(), announce_handler.clone(), authentication_service.clone(), whitelist_authorization.clone(), @@ -86,7 +82,6 @@ pub fn router( "/scrape", get(scrape::handle_without_key).with_state(( core_config.clone(), - tracker.clone(), scrape_handler.clone(), authentication_service.clone(), stats_event_sender.clone(), @@ -96,7 +91,6 @@ pub fn router( "/scrape/{key}", get(scrape::handle_with_key).with_state(( core_config.clone(), - tracker.clone(), scrape_handler.clone(), authentication_service.clone(), stats_event_sender.clone(), diff --git a/src/servers/http/v1/services/announce.rs b/src/servers/http/v1/services/announce.rs index 5e2e8f716..e70377fd6 100644 --- a/src/servers/http/v1/services/announce.rs +++ b/src/servers/http/v1/services/announce.rs @@ -17,7 +17,6 @@ use torrust_tracker_primitives::peer; use crate::core::announce_handler::{AnnounceHandler, PeersWanted}; use crate::core::statistics::event::sender::Sender; use crate::core::statistics::{self}; -use crate::core::Tracker; /// The HTTP tracker `announce` service. /// @@ -30,7 +29,6 @@ use crate::core::Tracker; /// > like the UDP tracker, the number of TCP connections is incremented for /// > each `announce` request. pub async fn invoke( - _tracker: Arc, announce_handler: Arc, opt_stats_event_sender: Arc>>, info_hash: InfoHash, @@ -69,12 +67,11 @@ mod tests { use crate::app_test::initialize_tracker_dependencies; use crate::core::announce_handler::AnnounceHandler; - use crate::core::services::{initialize_tracker, statistics}; + use crate::core::services::statistics; use crate::core::statistics::event::sender::Sender; - use crate::core::Tracker; #[allow(clippy::type_complexity)] - fn public_tracker() -> (Arc, Arc, Arc, Arc>>) { + fn public_tracker() -> (Arc, Arc, Arc>>) { let config = configuration::ephemeral_public(); let ( @@ -89,12 +86,6 @@ mod tests { let (stats_event_sender, _stats_repository) = statistics::setup::factory(config.core.tracker_usage_statistics); let stats_event_sender = Arc::new(stats_event_sender); - let tracker = Arc::new(initialize_tracker( - &config, - &in_memory_torrent_repository, - &db_torrent_repository, - )); - let announce_handler = Arc::new(AnnounceHandler::new( &config.core, &in_memory_torrent_repository, @@ -103,7 +94,7 @@ mod tests { let core_config = Arc::new(config.core.clone()); - (core_config, tracker, announce_handler, stats_event_sender) + (core_config, announce_handler, stats_event_sender) } fn sample_info_hash() -> InfoHash { @@ -149,11 +140,11 @@ mod tests { use super::{sample_peer_using_ipv4, sample_peer_using_ipv6}; use crate::app_test::initialize_tracker_dependencies; use crate::core::announce_handler::{AnnounceHandler, PeersWanted}; - use crate::core::{statistics, Tracker}; + use crate::core::statistics; use crate::servers::http::v1::services::announce::invoke; use crate::servers::http::v1::services::announce::tests::{public_tracker, sample_info_hash, sample_peer}; - fn initialize_tracker_and_announce_handler() -> (Arc, Arc) { + fn initialize_announce_handler() -> Arc { let config = configuration::ephemeral(); let ( @@ -166,25 +157,20 @@ mod tests { _torrents_manager, ) = initialize_tracker_dependencies(&config); - let tracker = Arc::new(Tracker::new(&config.core, &in_memory_torrent_repository, &db_torrent_repository).unwrap()); - - let announce_handler = Arc::new(AnnounceHandler::new( + Arc::new(AnnounceHandler::new( &config.core, &in_memory_torrent_repository, &db_torrent_repository, - )); - - (tracker, announce_handler) + )) } #[tokio::test] async fn it_should_return_the_announce_data() { - let (core_config, tracker, announce_handler, stats_event_sender) = public_tracker(); + let (core_config, announce_handler, stats_event_sender) = public_tracker(); let mut peer = sample_peer(); let announce_data = invoke( - tracker.clone(), announce_handler.clone(), stats_event_sender.clone(), sample_info_hash(), @@ -217,12 +203,11 @@ mod tests { let stats_event_sender: Arc>> = Arc::new(Some(Box::new(stats_event_sender_mock))); - let (tracker, announce_handler) = initialize_tracker_and_announce_handler(); + let announce_handler = initialize_announce_handler(); let mut peer = sample_peer_using_ipv4(); let _announce_data = invoke( - tracker, announce_handler, stats_event_sender, sample_info_hash(), @@ -232,13 +217,13 @@ mod tests { .await; } - fn tracker_with_an_ipv6_external_ip() -> (Arc, Arc) { + fn tracker_with_an_ipv6_external_ip() -> Arc { let mut configuration = configuration::ephemeral(); configuration.core.net.external_ip = Some(IpAddr::V6(Ipv6Addr::new( 0x6969, 0x6969, 0x6969, 0x6969, 0x6969, 0x6969, 0x6969, 0x6969, ))); - initialize_tracker_and_announce_handler() + initialize_announce_handler() } fn peer_with_the_ipv4_loopback_ip() -> peer::Peer { @@ -265,10 +250,9 @@ mod tests { let mut peer = peer_with_the_ipv4_loopback_ip(); - let (tracker, announce_handler) = tracker_with_an_ipv6_external_ip(); + let announce_handler = tracker_with_an_ipv6_external_ip(); let _announce_data = invoke( - tracker, announce_handler, stats_event_sender, sample_info_hash(), @@ -290,12 +274,11 @@ mod tests { let stats_event_sender: Arc>> = Arc::new(Some(Box::new(stats_event_sender_mock))); - let (tracker, announce_handler) = initialize_tracker_and_announce_handler(); + let announce_handler = initialize_announce_handler(); let mut peer = sample_peer_using_ipv6(); let _announce_data = invoke( - tracker, announce_handler, stats_event_sender, sample_info_hash(), diff --git a/src/servers/http/v1/services/scrape.rs b/src/servers/http/v1/services/scrape.rs index 6df267d3a..06c21d945 100644 --- a/src/servers/http/v1/services/scrape.rs +++ b/src/servers/http/v1/services/scrape.rs @@ -83,10 +83,8 @@ mod tests { use crate::app_test::initialize_tracker_dependencies; use crate::core::announce_handler::AnnounceHandler; use crate::core::scrape_handler::ScrapeHandler; - use crate::core::services::initialize_tracker; - use crate::core::Tracker; - fn public_tracker_and_announce_and_scrape_handlers() -> (Arc, Arc, Arc) { + fn public_tracker_and_announce_and_scrape_handlers() -> (Arc, Arc) { let config = configuration::ephemeral_public(); let ( @@ -99,12 +97,6 @@ mod tests { _torrents_manager, ) = initialize_tracker_dependencies(&config); - let tracker = Arc::new(initialize_tracker( - &config, - &in_memory_torrent_repository, - &db_torrent_repository, - )); - let announce_handler = Arc::new(AnnounceHandler::new( &config.core, &in_memory_torrent_repository, @@ -113,7 +105,7 @@ mod tests { let scrape_handler = Arc::new(ScrapeHandler::new(&whitelist_authorization, &in_memory_torrent_repository)); - (tracker, announce_handler, scrape_handler) + (announce_handler, scrape_handler) } fn sample_info_hashes() -> Vec { @@ -136,7 +128,7 @@ mod tests { } } - fn test_tracker_factory() -> (Arc, Arc) { + fn initialize_scrape_handler() -> Arc { let config = configuration::ephemeral(); let ( @@ -145,15 +137,11 @@ mod tests { whitelist_authorization, _authentication_service, in_memory_torrent_repository, - db_torrent_repository, + _db_torrent_repository, _torrents_manager, ) = initialize_tracker_dependencies(&config); - let tracker = Arc::new(Tracker::new(&config.core, &in_memory_torrent_repository, &db_torrent_repository).unwrap()); - - let scrape_handler = Arc::new(ScrapeHandler::new(&whitelist_authorization, &in_memory_torrent_repository)); - - (tracker, scrape_handler) + Arc::new(ScrapeHandler::new(&whitelist_authorization, &in_memory_torrent_repository)) } mod with_real_data { @@ -170,8 +158,8 @@ mod tests { use crate::core::statistics; use crate::servers::http::v1::services::scrape::invoke; use crate::servers::http::v1::services::scrape::tests::{ - public_tracker_and_announce_and_scrape_handlers, sample_info_hash, sample_info_hashes, sample_peer, - test_tracker_factory, + initialize_scrape_handler, public_tracker_and_announce_and_scrape_handlers, sample_info_hash, sample_info_hashes, + sample_peer, }; #[tokio::test] @@ -179,7 +167,7 @@ mod tests { let (stats_event_sender, _stats_repository) = crate::core::services::statistics::setup::factory(false); let stats_event_sender = Arc::new(stats_event_sender); - let (_tracker, announce_handler, scrape_handler) = public_tracker_and_announce_and_scrape_handlers(); + let (announce_handler, scrape_handler) = public_tracker_and_announce_and_scrape_handlers(); let info_hash = sample_info_hash(); let info_hashes = vec![info_hash]; @@ -215,7 +203,7 @@ mod tests { let stats_event_sender: Arc>> = Arc::new(Some(Box::new(stats_event_sender_mock))); - let (_tracker, scrape_handler) = test_tracker_factory(); + let scrape_handler = initialize_scrape_handler(); let peer_ip = IpAddr::V4(Ipv4Addr::new(126, 0, 0, 1)); @@ -233,7 +221,7 @@ mod tests { let stats_event_sender: Arc>> = Arc::new(Some(Box::new(stats_event_sender_mock))); - let (_tracker, scrape_handler) = test_tracker_factory(); + let scrape_handler = initialize_scrape_handler(); let peer_ip = IpAddr::V6(Ipv6Addr::new(0x6969, 0x6969, 0x6969, 0x6969, 0x6969, 0x6969, 0x6969, 0x6969)); @@ -262,7 +250,7 @@ mod tests { let (stats_event_sender, _stats_repository) = crate::core::services::statistics::setup::factory(false); let stats_event_sender = Arc::new(stats_event_sender); - let (_tracker, announce_handler, _scrape_handler) = public_tracker_and_announce_and_scrape_handlers(); + let (announce_handler, _scrape_handler) = public_tracker_and_announce_and_scrape_handlers(); let info_hash = sample_info_hash(); let info_hashes = vec![info_hash]; diff --git a/src/servers/udp/handlers.rs b/src/servers/udp/handlers.rs index b3ec1cb06..5589331a7 100644 --- a/src/servers/udp/handlers.rs +++ b/src/servers/udp/handlers.rs @@ -24,7 +24,7 @@ use super::RawRequest; use crate::core::announce_handler::{AnnounceHandler, PeersWanted}; use crate::core::scrape_handler::ScrapeHandler; use crate::core::statistics::event::sender::Sender; -use crate::core::{statistics, whitelist, Tracker}; +use crate::core::{statistics, whitelist}; use crate::servers::udp::error::Error; use crate::servers::udp::{peer_builder, UDP_TRACKER_LOG_TARGET}; use crate::shared::bit_torrent::common::MAX_SCRAPE_TORRENTS; @@ -58,11 +58,10 @@ impl CookieTimeValues { /// /// It will return an `Error` response if the request is invalid. #[allow(clippy::too_many_arguments)] -#[instrument(fields(request_id), skip(udp_request, tracker, announce_handler, scrape_handler, whitelist_authorization, opt_stats_event_sender, cookie_time_values, ban_service), ret(level = Level::TRACE))] +#[instrument(fields(request_id), skip(udp_request, announce_handler, scrape_handler, whitelist_authorization, opt_stats_event_sender, cookie_time_values, ban_service), ret(level = Level::TRACE))] pub(crate) async fn handle_packet( udp_request: RawRequest, core_config: &Arc, - tracker: &Tracker, announce_handler: &Arc, scrape_handler: &Arc, whitelist_authorization: &Arc, @@ -84,7 +83,6 @@ pub(crate) async fn handle_packet( request, udp_request.from, core_config, - tracker, announce_handler, scrape_handler, whitelist_authorization, @@ -147,7 +145,6 @@ pub(crate) async fn handle_packet( #[instrument(skip( request, remote_addr, - tracker, announce_handler, scrape_handler, whitelist_authorization, @@ -158,7 +155,6 @@ pub async fn handle_request( request: Request, remote_addr: SocketAddr, core_config: &Arc, - tracker: &Tracker, announce_handler: &Arc, scrape_handler: &Arc, whitelist_authorization: &Arc, @@ -180,7 +176,6 @@ pub async fn handle_request( remote_addr, &announce_request, core_config, - tracker, announce_handler, whitelist_authorization, opt_stats_event_sender, @@ -246,12 +241,11 @@ pub async fn handle_connect( /// /// If a error happens in the `handle_announce` function, it will just return the `ServerError`. #[allow(clippy::too_many_arguments)] -#[instrument(fields(transaction_id, connection_id, info_hash), skip(_tracker, announce_handler, whitelist_authorization, opt_stats_event_sender), ret(level = Level::TRACE))] +#[instrument(fields(transaction_id, connection_id, info_hash), skip(announce_handler, whitelist_authorization, opt_stats_event_sender), ret(level = Level::TRACE))] pub async fn handle_announce( remote_addr: SocketAddr, request: &AnnounceRequest, core_config: &Arc, - _tracker: &Tracker, announce_handler: &Arc, whitelist_authorization: &Arc, opt_stats_event_sender: &Arc>>, @@ -507,17 +501,16 @@ mod tests { use crate::app_test::initialize_tracker_dependencies; use crate::core::announce_handler::AnnounceHandler; use crate::core::scrape_handler::ScrapeHandler; - use crate::core::services::{initialize_tracker, initialize_whitelist_manager, statistics}; + use crate::core::services::{initialize_whitelist_manager, statistics}; use crate::core::statistics::event::sender::Sender; use crate::core::torrent::repository::in_memory::InMemoryTorrentRepository; + use crate::core::whitelist; use crate::core::whitelist::manager::WhiteListManager; use crate::core::whitelist::repository::in_memory::InMemoryWhitelist; - use crate::core::{whitelist, Tracker}; use crate::CurrentClock; type TrackerAndDeps = ( Arc, - Arc, Arc, Arc, Arc, @@ -560,12 +553,6 @@ mod tests { let stats_event_sender = Arc::new(stats_event_sender); let whitelist_manager = initialize_whitelist_manager(database.clone(), in_memory_whitelist.clone()); - let tracker = Arc::new(initialize_tracker( - config, - &in_memory_torrent_repository, - &db_torrent_repository, - )); - let announce_handler = Arc::new(AnnounceHandler::new( &config.core, &in_memory_torrent_repository, @@ -576,7 +563,6 @@ mod tests { ( core_config, - tracker, announce_handler, scrape_handler, in_memory_torrent_repository, @@ -684,7 +670,6 @@ mod tests { #[allow(clippy::type_complexity)] fn test_tracker_factory() -> ( Arc, - Arc, Arc, Arc, Arc, @@ -703,8 +688,6 @@ mod tests { _torrents_manager, ) = initialize_tracker_dependencies(&config); - let tracker = Arc::new(Tracker::new(&config.core, &in_memory_torrent_repository, &db_torrent_repository).unwrap()); - let announce_handler = Arc::new(AnnounceHandler::new( &config.core, &in_memory_torrent_repository, @@ -713,13 +696,7 @@ mod tests { let scrape_handler = Arc::new(ScrapeHandler::new(&whitelist_authorization, &in_memory_torrent_repository)); - ( - core_config, - tracker, - announce_handler, - scrape_handler, - whitelist_authorization, - ) + (core_config, announce_handler, scrape_handler, whitelist_authorization) } mod connect_request { @@ -935,7 +912,7 @@ mod tests { use crate::core::announce_handler::AnnounceHandler; use crate::core::torrent::repository::in_memory::InMemoryTorrentRepository; - use crate::core::{self, statistics, whitelist}; + use crate::core::{statistics, whitelist}; use crate::servers::udp::connection_cookie::make; use crate::servers::udp::handlers::tests::announce_request::AnnounceRequestBuilder; use crate::servers::udp::handlers::tests::{ @@ -948,7 +925,6 @@ mod tests { async fn an_announced_peer_should_be_added_to_the_tracker() { let ( core_config, - tracker, announce_handler, _scrape_handler, in_memory_torrent_repository, @@ -977,7 +953,6 @@ mod tests { remote_addr, &request, &core_config, - &tracker, &announce_handler, &whitelist_authorization, &stats_event_sender, @@ -1000,7 +975,6 @@ mod tests { async fn the_announced_peer_should_not_be_included_in_the_response() { let ( core_config, - tracker, announce_handler, _scrape_handler, _in_memory_torrent_repository, @@ -1020,7 +994,6 @@ mod tests { remote_addr, &request, &core_config, - &tracker, &announce_handler, &whitelist_authorization, &stats_event_sender, @@ -1052,7 +1025,6 @@ mod tests { let ( core_config, - tracker, announce_handler, _scrape_handler, in_memory_torrent_repository, @@ -1084,7 +1056,6 @@ mod tests { remote_addr, &request, &core_config, - &tracker, &announce_handler, &whitelist_authorization, &stats_event_sender, @@ -1116,7 +1087,6 @@ mod tests { async fn announce_a_new_peer_using_ipv4( core_config: Arc, - tracker: Arc, announce_handler: Arc, whitelist_authorization: Arc, ) -> Response { @@ -1132,7 +1102,6 @@ mod tests { remote_addr, &request, &core_config, - &tracker, &announce_handler, &whitelist_authorization, &stats_event_sender, @@ -1146,7 +1115,6 @@ mod tests { async fn when_the_announce_request_comes_from_a_client_using_ipv4_the_response_should_not_include_peers_using_ipv6() { let ( core_config, - tracker, announce_handler, _scrape_handler, in_memory_torrent_repository, @@ -1158,13 +1126,8 @@ mod tests { add_a_torrent_peer_using_ipv6(&in_memory_torrent_repository); - let response = announce_a_new_peer_using_ipv4( - core_config.clone(), - tracker.clone(), - announce_handler.clone(), - whitelist_authorization, - ) - .await; + let response = + announce_a_new_peer_using_ipv4(core_config.clone(), announce_handler.clone(), whitelist_authorization).await; // The response should not contain the peer using IPV6 let peers: Option>> = match response { @@ -1186,13 +1149,12 @@ mod tests { let stats_event_sender: Arc>> = Arc::new(Some(Box::new(stats_event_sender_mock))); - let (core_config, tracker, announce_handler, _scrape_handler, whitelist_authorization) = test_tracker_factory(); + let (core_config, announce_handler, _scrape_handler, whitelist_authorization) = test_tracker_factory(); handle_announce( sample_ipv4_socket_address(), &AnnounceRequestBuilder::default().into(), &core_config, - &tracker, &announce_handler, &whitelist_authorization, &stats_event_sender, @@ -1219,7 +1181,6 @@ mod tests { async fn the_peer_ip_should_be_changed_to_the_external_ip_in_the_tracker_configuration_if_defined() { let ( core_config, - tracker, announce_handler, _scrape_handler, in_memory_torrent_repository, @@ -1248,7 +1209,6 @@ mod tests { remote_addr, &request, &core_config, - &tracker, &announce_handler, &whitelist_authorization, &stats_event_sender, @@ -1286,7 +1246,7 @@ mod tests { use crate::core::announce_handler::AnnounceHandler; use crate::core::torrent::repository::in_memory::InMemoryTorrentRepository; - use crate::core::{self, statistics, whitelist}; + use crate::core::{statistics, whitelist}; use crate::servers::udp::connection_cookie::make; use crate::servers::udp::handlers::tests::announce_request::AnnounceRequestBuilder; use crate::servers::udp::handlers::tests::{ @@ -1299,7 +1259,6 @@ mod tests { async fn an_announced_peer_should_be_added_to_the_tracker() { let ( core_config, - tracker, announce_handler, _scrape_handler, in_memory_torrent_repository, @@ -1329,7 +1288,6 @@ mod tests { remote_addr, &request, &core_config, - &tracker, &announce_handler, &whitelist_authorization, &stats_event_sender, @@ -1352,7 +1310,6 @@ mod tests { async fn the_announced_peer_should_not_be_included_in_the_response() { let ( core_config, - tracker, announce_handler, _scrape_handler, _in_memory_torrent_repository, @@ -1375,7 +1332,6 @@ mod tests { remote_addr, &request, &core_config, - &tracker, &announce_handler, &whitelist_authorization, &stats_event_sender, @@ -1407,7 +1363,6 @@ mod tests { let ( core_config, - tracker, announce_handler, _scrape_handler, in_memory_torrent_repository, @@ -1439,7 +1394,6 @@ mod tests { remote_addr, &request, &core_config, - &tracker, &announce_handler, &whitelist_authorization, &stats_event_sender, @@ -1471,7 +1425,6 @@ mod tests { async fn announce_a_new_peer_using_ipv6( core_config: Arc, - tracker: Arc, announce_handler: Arc, whitelist_authorization: Arc, ) -> Response { @@ -1490,7 +1443,6 @@ mod tests { remote_addr, &request, &core_config, - &tracker, &announce_handler, &whitelist_authorization, &stats_event_sender, @@ -1504,7 +1456,6 @@ mod tests { async fn when_the_announce_request_comes_from_a_client_using_ipv6_the_response_should_not_include_peers_using_ipv4() { let ( core_config, - tracker, announce_handler, _scrape_handler, in_memory_torrent_repository, @@ -1516,13 +1467,8 @@ mod tests { add_a_torrent_peer_using_ipv4(&in_memory_torrent_repository); - let response = announce_a_new_peer_using_ipv6( - core_config.clone(), - tracker.clone(), - announce_handler.clone(), - whitelist_authorization, - ) - .await; + let response = + announce_a_new_peer_using_ipv6(core_config.clone(), announce_handler.clone(), whitelist_authorization).await; // The response should not contain the peer using IPV4 let peers: Option>> = match response { @@ -1544,7 +1490,7 @@ mod tests { let stats_event_sender: Arc>> = Arc::new(Some(Box::new(stats_event_sender_mock))); - let (core_config, tracker, announce_handler, _scrape_handler, whitelist_authorization) = test_tracker_factory(); + let (core_config, announce_handler, _scrape_handler, whitelist_authorization) = test_tracker_factory(); let remote_addr = sample_ipv6_remote_addr(); @@ -1556,7 +1502,6 @@ mod tests { remote_addr, &announce_request, &core_config, - &tracker, &announce_handler, &whitelist_authorization, &stats_event_sender, @@ -1576,7 +1521,7 @@ mod tests { use crate::app_test::initialize_tracker_dependencies; use crate::core::announce_handler::AnnounceHandler; - use crate::core::{self, statistics}; + use crate::core::statistics; use crate::servers::udp::connection_cookie::make; use crate::servers::udp::handlers::handle_announce; use crate::servers::udp::handlers::tests::announce_request::AnnounceRequestBuilder; @@ -1607,10 +1552,6 @@ mod tests { let stats_event_sender: Arc>> = Arc::new(Some(Box::new(stats_event_sender_mock))); - let tracker = Arc::new( - core::Tracker::new(&config.core, &in_memory_torrent_repository, &db_torrent_repository).unwrap(), - ); - let announce_handler = Arc::new(AnnounceHandler::new( &config.core, &in_memory_torrent_repository, @@ -1643,7 +1584,6 @@ mod tests { remote_addr, &request, &core_config, - &tracker, &announce_handler, &whitelist_authorization, &stats_event_sender, @@ -1700,7 +1640,6 @@ mod tests { async fn should_return_no_stats_when_the_tracker_does_not_have_any_torrent() { let ( _core_config, - _tracker, _announce_handler, scrape_handler, _in_memory_torrent_repository, @@ -1810,7 +1749,6 @@ mod tests { async fn should_return_torrent_statistics_when_the_tracker_has_the_requested_torrent() { let ( _core_config, - _tracker, _announce_handler, scrape_handler, in_memory_torrent_repository, @@ -1847,7 +1785,6 @@ mod tests { async fn should_return_the_torrent_statistics_when_the_requested_torrent_is_whitelisted() { let ( _core_config, - _tracker, _announce_handler, scrape_handler, in_memory_torrent_repository, @@ -1892,7 +1829,6 @@ mod tests { async fn should_return_zeroed_statistics_when_the_requested_torrent_is_not_whitelisted() { let ( _core_config, - _tracker, _announce_handler, scrape_handler, in_memory_torrent_repository, @@ -1965,8 +1901,7 @@ mod tests { let remote_addr = sample_ipv4_remote_addr(); - let (_core_config, _tracker, _announce_handler, scrape_handler, _whitelist_authorization) = - test_tracker_factory(); + let (_core_config, _announce_handler, scrape_handler, _whitelist_authorization) = test_tracker_factory(); handle_scrape( remote_addr, @@ -2006,8 +1941,7 @@ mod tests { let remote_addr = sample_ipv6_remote_addr(); - let (_core_config, _tracker, _announce_handler, scrape_handler, _whitelist_authorization) = - test_tracker_factory(); + let (_core_config, _announce_handler, scrape_handler, _whitelist_authorization) = test_tracker_factory(); handle_scrape( remote_addr, diff --git a/src/servers/udp/mod.rs b/src/servers/udp/mod.rs index 9b4d90c89..b141cc322 100644 --- a/src/servers/udp/mod.rs +++ b/src/servers/udp/mod.rs @@ -52,8 +52,7 @@ //! is designed to be as simple as possible. It uses a single UDP port and //! supports only three types of requests: `Connect`, `Announce` and `Scrape`. //! -//! Request are parsed from UDP packets using the [`aquatic_udp_protocol`](https://crates.io/crates/aquatic_udp_protocol) -//! crate and then handled by the [`Tracker`](crate::core::Tracker) struct. +//! Request are parsed from UDP packets using the [`aquatic_udp_protocol`](https://crates.io/crates/aquatic_udp_protocol). //! And then the response is also build using the [`aquatic_udp_protocol`](https://crates.io/crates/aquatic_udp_protocol) //! and converted to a UDP packet. //! diff --git a/src/servers/udp/server/launcher.rs b/src/servers/udp/server/launcher.rs index d0ae14029..f1b14860d 100644 --- a/src/servers/udp/server/launcher.rs +++ b/src/servers/udp/server/launcher.rs @@ -17,7 +17,7 @@ use crate::bootstrap::jobs::Started; use crate::core::announce_handler::AnnounceHandler; use crate::core::scrape_handler::ScrapeHandler; use crate::core::statistics::event::sender::Sender; -use crate::core::{statistics, whitelist, Tracker}; +use crate::core::{statistics, whitelist}; use crate::servers::logging::STARTED_ON; use crate::servers::registar::ServiceHealthCheckJob; use crate::servers::signals::{shutdown_signal_with_message, Halted}; @@ -45,7 +45,6 @@ impl Launcher { /// It panics if the udp server is loaded when the tracker is private. #[allow(clippy::too_many_arguments)] #[instrument(skip( - tracker, announce_handler, scrape_handler, whitelist_authorization, @@ -57,7 +56,6 @@ impl Launcher { ))] pub async fn run_with_graceful_shutdown( core_config: Arc, - tracker: Arc, announce_handler: Arc, scrape_handler: Arc, whitelist_authorization: Arc, @@ -103,7 +101,6 @@ impl Launcher { let () = Self::run_udp_server_main( receiver, core_config.clone(), - tracker.clone(), announce_handler.clone(), scrape_handler.clone(), whitelist_authorization.clone(), @@ -151,7 +148,6 @@ impl Launcher { #[allow(clippy::too_many_arguments)] #[instrument(skip( receiver, - tracker, announce_handler, scrape_handler, whitelist_authorization, @@ -161,7 +157,6 @@ impl Launcher { async fn run_udp_server_main( mut receiver: Receiver, core_config: Arc, - tracker: Arc, announce_handler: Arc, scrape_handler: Arc, whitelist_authorization: Arc, @@ -235,7 +230,6 @@ impl Launcher { let processor = Processor::new( receiver.socket.clone(), core_config.clone(), - tracker.clone(), announce_handler.clone(), scrape_handler.clone(), whitelist_authorization.clone(), diff --git a/src/servers/udp/server/mod.rs b/src/servers/udp/server/mod.rs index f93d84a65..c87728361 100644 --- a/src/servers/udp/server/mod.rs +++ b/src/servers/udp/server/mod.rs @@ -83,7 +83,6 @@ mod tests { let started = stopped .start( Arc::new(cfg.core.clone()), - app_container.tracker, app_container.announce_handler, app_container.scrape_handler, app_container.whitelist_authorization, @@ -119,7 +118,6 @@ mod tests { let started = stopped .start( Arc::new(cfg.core.clone()), - app_container.tracker, app_container.announce_handler, app_container.scrape_handler, app_container.whitelist_authorization, diff --git a/src/servers/udp/server/processor.rs b/src/servers/udp/server/processor.rs index 0bb7c92c4..475a36b74 100644 --- a/src/servers/udp/server/processor.rs +++ b/src/servers/udp/server/processor.rs @@ -15,14 +15,13 @@ use crate::core::announce_handler::AnnounceHandler; use crate::core::scrape_handler::ScrapeHandler; use crate::core::statistics::event::sender::Sender; use crate::core::statistics::event::UdpResponseKind; -use crate::core::{statistics, whitelist, Tracker}; +use crate::core::{statistics, whitelist}; use crate::servers::udp::handlers::CookieTimeValues; use crate::servers::udp::{handlers, RawRequest}; pub struct Processor { socket: Arc, core_config: Arc, - tracker: Arc, announce_handler: Arc, scrape_handler: Arc, whitelist_authorization: Arc, @@ -35,7 +34,6 @@ impl Processor { pub fn new( socket: Arc, core_config: Arc, - tracker: Arc, announce_handler: Arc, scrape_handler: Arc, whitelist_authorization: Arc, @@ -45,7 +43,6 @@ impl Processor { Self { socket, core_config, - tracker, announce_handler, scrape_handler, whitelist_authorization, @@ -63,7 +60,6 @@ impl Processor { let response = handlers::handle_packet( request, &self.core_config, - &self.tracker, &self.announce_handler, &self.scrape_handler, &self.whitelist_authorization, diff --git a/src/servers/udp/server/spawner.rs b/src/servers/udp/server/spawner.rs index ced5fbf4a..2415b2631 100644 --- a/src/servers/udp/server/spawner.rs +++ b/src/servers/udp/server/spawner.rs @@ -15,7 +15,7 @@ use crate::bootstrap::jobs::Started; use crate::core::announce_handler::AnnounceHandler; use crate::core::scrape_handler::ScrapeHandler; use crate::core::statistics::event::sender::Sender; -use crate::core::{whitelist, Tracker}; +use crate::core::whitelist; use crate::servers::signals::Halted; #[derive(Constructor, Copy, Clone, Debug, Display)] @@ -34,7 +34,6 @@ impl Spawner { pub fn spawn_launcher( &self, core_config: Arc, - tracker: Arc, announce_handler: Arc, scrape_handler: Arc, whitelist_authorization: Arc, @@ -49,7 +48,6 @@ impl Spawner { tokio::spawn(async move { Launcher::run_with_graceful_shutdown( core_config, - tracker, announce_handler, scrape_handler, whitelist_authorization, diff --git a/src/servers/udp/server/states.rs b/src/servers/udp/server/states.rs index 4d63dc0a8..4d18593fe 100644 --- a/src/servers/udp/server/states.rs +++ b/src/servers/udp/server/states.rs @@ -17,7 +17,7 @@ use crate::bootstrap::jobs::Started; use crate::core::announce_handler::AnnounceHandler; use crate::core::scrape_handler::ScrapeHandler; use crate::core::statistics::event::sender::Sender; -use crate::core::{whitelist, Tracker}; +use crate::core::whitelist; use crate::servers::registar::{ServiceRegistration, ServiceRegistrationForm}; use crate::servers::signals::Halted; use crate::servers::udp::server::launcher::Launcher; @@ -68,11 +68,10 @@ impl Server { /// /// It panics if unable to receive the bound socket address from service. #[allow(clippy::too_many_arguments)] - #[instrument(skip(self, tracker, announce_handler, scrape_handler, whitelist_authorization, opt_stats_event_sender, ban_service, form), err, ret(Display, level = Level::INFO))] + #[instrument(skip(self, announce_handler, scrape_handler, whitelist_authorization, opt_stats_event_sender, ban_service, form), err, ret(Display, level = Level::INFO))] pub async fn start( self, core_config: Arc, - tracker: Arc, announce_handler: Arc, scrape_handler: Arc, whitelist_authorization: Arc, @@ -89,7 +88,6 @@ impl Server { // May need to wrap in a task to about a tokio bug. let task = self.state.spawner.spawn_launcher( core_config, - tracker, announce_handler, scrape_handler, whitelist_authorization, diff --git a/tests/servers/api/environment.rs b/tests/servers/api/environment.rs index 927f76efe..3488456e7 100644 --- a/tests/servers/api/environment.rs +++ b/tests/servers/api/environment.rs @@ -15,7 +15,6 @@ use torrust_tracker_lib::core::statistics::event::sender::Sender; use torrust_tracker_lib::core::statistics::repository::Repository; use torrust_tracker_lib::core::torrent::repository::in_memory::InMemoryTorrentRepository; use torrust_tracker_lib::core::whitelist::manager::WhiteListManager; -use torrust_tracker_lib::core::Tracker; use torrust_tracker_lib::servers::apis::server::{ApiServer, Launcher, Running, Stopped}; use torrust_tracker_lib::servers::registar::Registar; use torrust_tracker_lib::servers::udp::server::banning::BanService; @@ -27,7 +26,6 @@ where { pub config: Arc, pub database: Arc>, - pub tracker: Arc, pub in_memory_torrent_repository: Arc, pub keys_handler: Arc, pub authentication_service: Arc, @@ -66,7 +64,6 @@ impl Environment { Self { config, database: app_container.database.clone(), - tracker: app_container.tracker.clone(), in_memory_torrent_repository: app_container.in_memory_torrent_repository.clone(), keys_handler: app_container.keys_handler.clone(), authentication_service: app_container.authentication_service.clone(), @@ -85,7 +82,6 @@ impl Environment { Environment { config: self.config, database: self.database.clone(), - tracker: self.tracker.clone(), in_memory_torrent_repository: self.in_memory_torrent_repository.clone(), keys_handler: self.keys_handler.clone(), authentication_service: self.authentication_service.clone(), @@ -121,7 +117,6 @@ impl Environment { Environment { config: self.config, database: self.database, - tracker: self.tracker, in_memory_torrent_repository: self.in_memory_torrent_repository, keys_handler: self.keys_handler, authentication_service: self.authentication_service, diff --git a/tests/servers/http/environment.rs b/tests/servers/http/environment.rs index 203dc880e..589430848 100644 --- a/tests/servers/http/environment.rs +++ b/tests/servers/http/environment.rs @@ -13,8 +13,8 @@ use torrust_tracker_lib::core::scrape_handler::ScrapeHandler; use torrust_tracker_lib::core::statistics::event::sender::Sender; use torrust_tracker_lib::core::statistics::repository::Repository; use torrust_tracker_lib::core::torrent::repository::in_memory::InMemoryTorrentRepository; +use torrust_tracker_lib::core::whitelist; use torrust_tracker_lib::core::whitelist::manager::WhiteListManager; -use torrust_tracker_lib::core::{whitelist, Tracker}; use torrust_tracker_lib::servers::http::server::{HttpServer, Launcher, Running, Stopped}; use torrust_tracker_lib::servers::registar::Registar; use torrust_tracker_primitives::peer; @@ -23,7 +23,6 @@ pub struct Environment { pub core_config: Arc, pub http_tracker_config: Arc, pub database: Arc>, - pub tracker: Arc, pub announce_handler: Arc, pub scrape_handler: Arc, pub in_memory_torrent_repository: Arc, @@ -68,7 +67,6 @@ impl Environment { http_tracker_config: config, core_config: Arc::new(configuration.core.clone()), database: app_container.database.clone(), - tracker: app_container.tracker.clone(), announce_handler: app_container.announce_handler.clone(), scrape_handler: app_container.scrape_handler.clone(), in_memory_torrent_repository: app_container.in_memory_torrent_repository.clone(), @@ -89,7 +87,6 @@ impl Environment { http_tracker_config: self.http_tracker_config, core_config: self.core_config.clone(), database: self.database.clone(), - tracker: self.tracker.clone(), announce_handler: self.announce_handler.clone(), scrape_handler: self.scrape_handler.clone(), in_memory_torrent_repository: self.in_memory_torrent_repository.clone(), @@ -104,7 +101,6 @@ impl Environment { .server .start( self.core_config, - self.tracker, self.announce_handler, self.scrape_handler, self.authentication_service, @@ -128,7 +124,6 @@ impl Environment { http_tracker_config: self.http_tracker_config, core_config: self.core_config, database: self.database, - tracker: self.tracker, announce_handler: self.announce_handler, scrape_handler: self.scrape_handler, in_memory_torrent_repository: self.in_memory_torrent_repository, diff --git a/tests/servers/udp/environment.rs b/tests/servers/udp/environment.rs index 11967aeed..a6ddd7a83 100644 --- a/tests/servers/udp/environment.rs +++ b/tests/servers/udp/environment.rs @@ -11,7 +11,7 @@ use torrust_tracker_lib::core::scrape_handler::ScrapeHandler; use torrust_tracker_lib::core::statistics::event::sender::Sender; use torrust_tracker_lib::core::statistics::repository::Repository; use torrust_tracker_lib::core::torrent::repository::in_memory::InMemoryTorrentRepository; -use torrust_tracker_lib::core::{whitelist, Tracker}; +use torrust_tracker_lib::core::whitelist; use torrust_tracker_lib::servers::registar::Registar; use torrust_tracker_lib::servers::udp::server::banning::BanService; use torrust_tracker_lib::servers::udp::server::spawner::Spawner; @@ -26,7 +26,6 @@ where pub core_config: Arc, pub config: Arc, pub database: Arc>, - pub tracker: Arc, pub in_memory_torrent_repository: Arc, pub announce_handler: Arc, pub scrape_handler: Arc, @@ -68,7 +67,6 @@ impl Environment { core_config: Arc::new(configuration.core.clone()), config, database: app_container.database.clone(), - tracker: app_container.tracker.clone(), in_memory_torrent_repository: app_container.in_memory_torrent_repository.clone(), announce_handler: app_container.announce_handler.clone(), scrape_handler: app_container.scrape_handler.clone(), @@ -88,7 +86,6 @@ impl Environment { core_config: self.core_config.clone(), config: self.config, database: self.database.clone(), - tracker: self.tracker.clone(), in_memory_torrent_repository: self.in_memory_torrent_repository.clone(), announce_handler: self.announce_handler.clone(), scrape_handler: self.scrape_handler.clone(), @@ -101,7 +98,6 @@ impl Environment { .server .start( self.core_config, - self.tracker, self.announce_handler, self.scrape_handler, self.whitelist_authorization, @@ -133,7 +129,6 @@ impl Environment { core_config: self.core_config, config: self.config, database: self.database, - tracker: self.tracker, in_memory_torrent_repository: self.in_memory_torrent_repository, announce_handler: self.announce_handler, scrape_handler: self.scrape_handler, From c3f0bc76a6f5312d58cfaef73216e665606cf062 Mon Sep 17 00:00:00 2001 From: Jose Celano Date: Mon, 27 Jan 2025 18:52:02 +0000 Subject: [PATCH 165/802] refactor: [#1211] move tracker tests to InMemoryTorrentRepository --- src/core/mod.rs | 165 ------------------- src/core/torrent/repository/in_memory.rs | 196 ++++++++++++++++++++++- 2 files changed, 195 insertions(+), 166 deletions(-) diff --git a/src/core/mod.rs b/src/core/mod.rs index f09e7d417..2c22f561b 100644 --- a/src/core/mod.rs +++ b/src/core/mod.rs @@ -462,11 +462,8 @@ mod tests { use std::sync::Arc; use aquatic_udp_protocol::{AnnounceEvent, NumberOfBytes, PeerId}; - use bittorrent_primitives::info_hash::fixture::gen_seeded_infohash; use bittorrent_primitives::info_hash::InfoHash; - use torrust_tracker_configuration::TORRENT_PEERS_LIMIT; use torrust_tracker_primitives::peer::Peer; - use torrust_tracker_primitives::torrent_metrics::TorrentsMetrics; use torrust_tracker_primitives::DurationSinceUnixEpoch; use torrust_tracker_test_helpers::configuration; @@ -503,22 +500,6 @@ mod tests { (announce_handler, in_memory_torrent_repository, scrape_handler) } - fn initialize_in_memory_torrents_repository() -> Arc { - let config = configuration::ephemeral_public(); - - let ( - _database, - _in_memory_whitelist, - _whitelist_authorization, - _authentication_service, - in_memory_torrent_repository, - _db_torrent_repository, - _torrents_manager, - ) = initialize_tracker_dependencies(&config); - - in_memory_torrent_repository - } - #[allow(clippy::type_complexity)] fn whitelisted_tracker() -> ( Arc, @@ -659,152 +640,6 @@ mod tests { } } - #[tokio::test] - async fn it_should_return_the_peers_for_a_given_torrent() { - let in_memory_torrent_repository = initialize_in_memory_torrents_repository(); - - let info_hash = sample_info_hash(); - let peer = sample_peer(); - - let () = in_memory_torrent_repository.upsert_peer(&info_hash, &peer); - - let peers = in_memory_torrent_repository.get_torrent_peers(&info_hash); - - assert_eq!(peers, vec![Arc::new(peer)]); - } - - /// It generates a peer id from a number where the number is the last - /// part of the peer ID. For example, for `12` it returns - /// `-qB00000000000000012`. - fn numeric_peer_id(two_digits_value: i32) -> PeerId { - // Format idx as a string with leading zeros, ensuring it has exactly 2 digits - let idx_str = format!("{two_digits_value:02}"); - - // Create the base part of the peer ID. - let base = b"-qB00000000000000000"; - - // Concatenate the base with idx bytes, ensuring the total length is 20 bytes. - let mut peer_id_bytes = [0u8; 20]; - peer_id_bytes[..base.len()].copy_from_slice(base); - peer_id_bytes[base.len() - idx_str.len()..].copy_from_slice(idx_str.as_bytes()); - - PeerId(peer_id_bytes) - } - - #[tokio::test] - async fn it_should_return_74_peers_at_the_most_for_a_given_torrent() { - let in_memory_torrent_repository = initialize_in_memory_torrents_repository(); - - let info_hash = sample_info_hash(); - - for idx in 1..=75 { - let peer = Peer { - peer_id: numeric_peer_id(idx), - peer_addr: SocketAddr::new(IpAddr::V4(Ipv4Addr::new(126, 0, 0, idx.try_into().unwrap())), 8080), - updated: DurationSinceUnixEpoch::new(1_669_397_478_934, 0), - uploaded: NumberOfBytes::new(0), - downloaded: NumberOfBytes::new(0), - left: NumberOfBytes::new(0), // No bytes left to download - event: AnnounceEvent::Completed, - }; - - let () = in_memory_torrent_repository.upsert_peer(&info_hash, &peer); - } - - let peers = in_memory_torrent_repository.get_torrent_peers(&info_hash); - - assert_eq!(peers.len(), 74); - } - - #[tokio::test] - async fn it_should_return_the_peers_for_a_given_torrent_excluding_a_given_peer() { - let (_announce_handler, in_memory_torrent_repository, _scrape_handler) = public_tracker(); - - let info_hash = sample_info_hash(); - let peer = sample_peer(); - - let () = in_memory_torrent_repository.upsert_peer(&info_hash, &peer); - - let peers = in_memory_torrent_repository.get_peers_for(&info_hash, &peer, TORRENT_PEERS_LIMIT); - - assert_eq!(peers, vec![]); - } - - #[tokio::test] - async fn it_should_return_74_peers_at_the_most_for_a_given_torrent_when_it_filters_out_a_given_peer() { - let (_announce_handler, in_memory_torrent_repository, _scrape_handler) = public_tracker(); - - let info_hash = sample_info_hash(); - - let excluded_peer = sample_peer(); - - let () = in_memory_torrent_repository.upsert_peer(&info_hash, &excluded_peer); - - // Add 74 peers - for idx in 2..=75 { - let peer = Peer { - peer_id: numeric_peer_id(idx), - peer_addr: SocketAddr::new(IpAddr::V4(Ipv4Addr::new(126, 0, 0, idx.try_into().unwrap())), 8080), - updated: DurationSinceUnixEpoch::new(1_669_397_478_934, 0), - uploaded: NumberOfBytes::new(0), - downloaded: NumberOfBytes::new(0), - left: NumberOfBytes::new(0), // No bytes left to download - event: AnnounceEvent::Completed, - }; - - let () = in_memory_torrent_repository.upsert_peer(&info_hash, &peer); - } - - let peers = in_memory_torrent_repository.get_peers_for(&info_hash, &excluded_peer, TORRENT_PEERS_LIMIT); - - assert_eq!(peers.len(), 74); - } - - #[tokio::test] - async fn it_should_return_the_torrent_metrics() { - let in_memory_torrent_repository = initialize_in_memory_torrents_repository(); - - let () = in_memory_torrent_repository.upsert_peer(&sample_info_hash(), &leecher()); - - let torrent_metrics = in_memory_torrent_repository.get_torrents_metrics(); - - assert_eq!( - torrent_metrics, - TorrentsMetrics { - complete: 0, - downloaded: 0, - incomplete: 1, - torrents: 1, - } - ); - } - - #[tokio::test] - async fn it_should_get_many_the_torrent_metrics() { - let in_memory_torrent_repository = initialize_in_memory_torrents_repository(); - - let start_time = std::time::Instant::now(); - for i in 0..1_000_000 { - let () = in_memory_torrent_repository.upsert_peer(&gen_seeded_infohash(&i), &leecher()); - } - let result_a = start_time.elapsed(); - - let start_time = std::time::Instant::now(); - let torrent_metrics = in_memory_torrent_repository.get_torrents_metrics(); - let result_b = start_time.elapsed(); - - assert_eq!( - (torrent_metrics), - (TorrentsMetrics { - complete: 0, - downloaded: 0, - incomplete: 1_000_000, - torrents: 1_000_000, - }), - "{result_a:?} {result_b:?}" - ); - } - mod for_all_config_modes { mod handling_an_announce_request { diff --git a/src/core/torrent/repository/in_memory.rs b/src/core/torrent/repository/in_memory.rs index 7d469a0f5..50858d4f3 100644 --- a/src/core/torrent/repository/in_memory.rs +++ b/src/core/torrent/repository/in_memory.rs @@ -104,14 +104,80 @@ impl InMemoryTorrentRepository { #[cfg(test)] mod tests { + use std::net::{IpAddr, Ipv4Addr, SocketAddr}; use std::sync::Arc; + use aquatic_udp_protocol::{AnnounceEvent, NumberOfBytes, PeerId}; + use bittorrent_primitives::info_hash::fixture::gen_seeded_infohash; + use bittorrent_primitives::info_hash::InfoHash; + use torrust_tracker_configuration::TORRENT_PEERS_LIMIT; + use torrust_tracker_primitives::peer::Peer; use torrust_tracker_primitives::torrent_metrics::TorrentsMetrics; + use torrust_tracker_primitives::DurationSinceUnixEpoch; use crate::core::torrent::repository::in_memory::InMemoryTorrentRepository; + fn sample_info_hash() -> InfoHash { + "3b245504cf5f11bbdbe1201cea6a6bf45aee1bc0".parse::().unwrap() + } + + /// Sample peer whose state is not relevant for the tests + fn sample_peer() -> Peer { + complete_peer() + } + + fn leecher() -> Peer { + incomplete_peer() + } + + /// A peer that counts as `complete` is swarm metadata + /// IMPORTANT!: it only counts if the it has been announce at least once before + /// announcing the `AnnounceEvent::Completed` event. + fn complete_peer() -> Peer { + Peer { + peer_id: PeerId(*b"-qB00000000000000000"), + peer_addr: SocketAddr::new(IpAddr::V4(Ipv4Addr::new(126, 0, 0, 1)), 8080), + updated: DurationSinceUnixEpoch::new(1_669_397_478_934, 0), + uploaded: NumberOfBytes::new(0), + downloaded: NumberOfBytes::new(0), + left: NumberOfBytes::new(0), // No bytes left to download + event: AnnounceEvent::Completed, + } + } + + /// A peer that counts as `incomplete` is swarm metadata + fn incomplete_peer() -> Peer { + Peer { + peer_id: PeerId(*b"-qB00000000000000000"), + peer_addr: SocketAddr::new(IpAddr::V4(Ipv4Addr::new(126, 0, 0, 1)), 8080), + updated: DurationSinceUnixEpoch::new(1_669_397_478_934, 0), + uploaded: NumberOfBytes::new(0), + downloaded: NumberOfBytes::new(0), + left: NumberOfBytes::new(1000), // Still bytes to download + event: AnnounceEvent::Started, + } + } + + /// It generates a peer id from a number where the number is the last + /// part of the peer ID. For example, for `12` it returns + /// `-qB00000000000000012`. + fn numeric_peer_id(two_digits_value: i32) -> PeerId { + // Format idx as a string with leading zeros, ensuring it has exactly 2 digits + let idx_str = format!("{two_digits_value:02}"); + + // Create the base part of the peer ID. + let base = b"-qB00000000000000000"; + + // Concatenate the base with idx bytes, ensuring the total length is 20 bytes. + let mut peer_id_bytes = [0u8; 20]; + peer_id_bytes[..base.len()].copy_from_slice(base); + peer_id_bytes[base.len() - idx_str.len()..].copy_from_slice(idx_str.as_bytes()); + + PeerId(peer_id_bytes) + } + #[tokio::test] - async fn should_collect_torrent_metrics() { + async fn it_should_collect_torrent_metrics() { let in_memory_torrent_repository = Arc::new(InMemoryTorrentRepository::default()); let torrents_metrics = in_memory_torrent_repository.get_torrents_metrics(); @@ -126,4 +192,132 @@ mod tests { } ); } + + #[tokio::test] + async fn it_should_return_74_peers_at_the_most_for_a_given_torrent() { + let in_memory_torrent_repository = Arc::new(InMemoryTorrentRepository::default()); + + let info_hash = sample_info_hash(); + + for idx in 1..=75 { + let peer = Peer { + peer_id: numeric_peer_id(idx), + peer_addr: SocketAddr::new(IpAddr::V4(Ipv4Addr::new(126, 0, 0, idx.try_into().unwrap())), 8080), + updated: DurationSinceUnixEpoch::new(1_669_397_478_934, 0), + uploaded: NumberOfBytes::new(0), + downloaded: NumberOfBytes::new(0), + left: NumberOfBytes::new(0), // No bytes left to download + event: AnnounceEvent::Completed, + }; + + let () = in_memory_torrent_repository.upsert_peer(&info_hash, &peer); + } + + let peers = in_memory_torrent_repository.get_torrent_peers(&info_hash); + + assert_eq!(peers.len(), 74); + } + + #[tokio::test] + async fn it_should_return_the_peers_for_a_given_torrent_excluding_a_given_peer() { + let in_memory_torrent_repository = Arc::new(InMemoryTorrentRepository::default()); + + let info_hash = sample_info_hash(); + let peer = sample_peer(); + + let () = in_memory_torrent_repository.upsert_peer(&info_hash, &peer); + + let peers = in_memory_torrent_repository.get_peers_for(&info_hash, &peer, TORRENT_PEERS_LIMIT); + + assert_eq!(peers, vec![]); + } + + #[tokio::test] + async fn it_should_return_74_peers_at_the_most_for_a_given_torrent_when_it_filters_out_a_given_peer() { + let in_memory_torrent_repository = Arc::new(InMemoryTorrentRepository::default()); + + let info_hash = sample_info_hash(); + + let excluded_peer = sample_peer(); + + let () = in_memory_torrent_repository.upsert_peer(&info_hash, &excluded_peer); + + // Add 74 peers + for idx in 2..=75 { + let peer = Peer { + peer_id: numeric_peer_id(idx), + peer_addr: SocketAddr::new(IpAddr::V4(Ipv4Addr::new(126, 0, 0, idx.try_into().unwrap())), 8080), + updated: DurationSinceUnixEpoch::new(1_669_397_478_934, 0), + uploaded: NumberOfBytes::new(0), + downloaded: NumberOfBytes::new(0), + left: NumberOfBytes::new(0), // No bytes left to download + event: AnnounceEvent::Completed, + }; + + let () = in_memory_torrent_repository.upsert_peer(&info_hash, &peer); + } + + let peers = in_memory_torrent_repository.get_peers_for(&info_hash, &excluded_peer, TORRENT_PEERS_LIMIT); + + assert_eq!(peers.len(), 74); + } + + #[tokio::test] + async fn it_should_return_the_torrent_metrics() { + let in_memory_torrent_repository = Arc::new(InMemoryTorrentRepository::default()); + + let () = in_memory_torrent_repository.upsert_peer(&sample_info_hash(), &leecher()); + + let torrent_metrics = in_memory_torrent_repository.get_torrents_metrics(); + + assert_eq!( + torrent_metrics, + TorrentsMetrics { + complete: 0, + downloaded: 0, + incomplete: 1, + torrents: 1, + } + ); + } + + #[tokio::test] + async fn it_should_get_many_the_torrent_metrics() { + let in_memory_torrent_repository = Arc::new(InMemoryTorrentRepository::default()); + + let start_time = std::time::Instant::now(); + for i in 0..1_000_000 { + let () = in_memory_torrent_repository.upsert_peer(&gen_seeded_infohash(&i), &leecher()); + } + let result_a = start_time.elapsed(); + + let start_time = std::time::Instant::now(); + let torrent_metrics = in_memory_torrent_repository.get_torrents_metrics(); + let result_b = start_time.elapsed(); + + assert_eq!( + (torrent_metrics), + (TorrentsMetrics { + complete: 0, + downloaded: 0, + incomplete: 1_000_000, + torrents: 1_000_000, + }), + "{result_a:?} {result_b:?}" + ); + } + + #[tokio::test] + async fn it_should_return_the_peers_for_a_given_torrent() { + let in_memory_torrent_repository = Arc::new(InMemoryTorrentRepository::default()); + + let info_hash = sample_info_hash(); + let peer = sample_peer(); + + let () = in_memory_torrent_repository.upsert_peer(&info_hash, &peer); + + let peers = in_memory_torrent_repository.get_torrent_peers(&info_hash); + + assert_eq!(peers, vec![Arc::new(peer)]); + } } From c785fd158e228eab61a6aa666b5b7120442b85dc Mon Sep 17 00:00:00 2001 From: Jose Celano Date: Tue, 28 Jan 2025 07:16:01 +0000 Subject: [PATCH 166/802] refactor: [#1211] move tests to AnnounceHandler --- src/core/announce_handler.rs | 389 ++++++++++++++++++++++++++++++++++- src/core/mod.rs | 302 --------------------------- 2 files changed, 388 insertions(+), 303 deletions(-) diff --git a/src/core/announce_handler.rs b/src/core/announce_handler.rs index a037d33d4..9abf4c509 100644 --- a/src/core/announce_handler.rs +++ b/src/core/announce_handler.rs @@ -155,10 +155,397 @@ impl From for PeersWanted { } #[must_use] -pub fn assign_ip_address_to_peer(remote_client_ip: &IpAddr, tracker_external_ip: Option) -> IpAddr { +fn assign_ip_address_to_peer(remote_client_ip: &IpAddr, tracker_external_ip: Option) -> IpAddr { if let Some(host_ip) = tracker_external_ip.filter(|_| remote_client_ip.is_loopback()) { host_ip } else { *remote_client_ip } } + +#[cfg(test)] +mod tests { + // Integration tests for the core module. + + mod the_announce_handler { + + use std::net::{IpAddr, Ipv4Addr, SocketAddr}; + use std::str::FromStr; + use std::sync::Arc; + + use aquatic_udp_protocol::{AnnounceEvent, NumberOfBytes, PeerId}; + use bittorrent_primitives::info_hash::InfoHash; + use torrust_tracker_primitives::peer::Peer; + use torrust_tracker_primitives::DurationSinceUnixEpoch; + use torrust_tracker_test_helpers::configuration; + + use crate::app_test::initialize_tracker_dependencies; + use crate::core::announce_handler::AnnounceHandler; + use crate::core::scrape_handler::ScrapeHandler; + use crate::core::torrent::manager::TorrentsManager; + use crate::core::torrent::repository::in_memory::InMemoryTorrentRepository; + + fn public_tracker() -> (Arc, Arc, Arc) { + let config = configuration::ephemeral_public(); + + let ( + _database, + _in_memory_whitelist, + whitelist_authorization, + _authentication_service, + in_memory_torrent_repository, + db_torrent_repository, + _torrents_manager, + ) = initialize_tracker_dependencies(&config); + + let announce_handler = Arc::new(AnnounceHandler::new( + &config.core, + &in_memory_torrent_repository, + &db_torrent_repository, + )); + + let scrape_handler = Arc::new(ScrapeHandler::new(&whitelist_authorization, &in_memory_torrent_repository)); + + (announce_handler, in_memory_torrent_repository, scrape_handler) + } + + pub fn tracker_persisting_torrents_in_database( + ) -> (Arc, Arc, Arc) { + let mut config = configuration::ephemeral_listed(); + config.core.tracker_policy.persistent_torrent_completed_stat = true; + + let ( + _database, + _in_memory_whitelist, + _whitelist_authorization, + _authentication_service, + in_memory_torrent_repository, + db_torrent_repository, + torrents_manager, + ) = initialize_tracker_dependencies(&config); + + let announce_handler = Arc::new(AnnounceHandler::new( + &config.core, + &in_memory_torrent_repository, + &db_torrent_repository, + )); + + (announce_handler, torrents_manager, in_memory_torrent_repository) + } + + fn sample_info_hash() -> InfoHash { + "3b245504cf5f11bbdbe1201cea6a6bf45aee1bc0".parse::().unwrap() + } + + // The client peer IP + fn peer_ip() -> IpAddr { + IpAddr::V4(Ipv4Addr::from_str("126.0.0.1").unwrap()) + } + + /// Sample peer whose state is not relevant for the tests + fn sample_peer() -> Peer { + complete_peer() + } + + /// Sample peer when for tests that need more than one peer + fn sample_peer_1() -> Peer { + Peer { + peer_id: PeerId(*b"-qB00000000000000001"), + peer_addr: SocketAddr::new(IpAddr::V4(Ipv4Addr::new(126, 0, 0, 1)), 8081), + updated: DurationSinceUnixEpoch::new(1_669_397_478_934, 0), + uploaded: NumberOfBytes::new(0), + downloaded: NumberOfBytes::new(0), + left: NumberOfBytes::new(0), + event: AnnounceEvent::Completed, + } + } + + /// Sample peer when for tests that need more than one peer + fn sample_peer_2() -> Peer { + Peer { + peer_id: PeerId(*b"-qB00000000000000002"), + peer_addr: SocketAddr::new(IpAddr::V4(Ipv4Addr::new(126, 0, 0, 2)), 8082), + updated: DurationSinceUnixEpoch::new(1_669_397_478_934, 0), + uploaded: NumberOfBytes::new(0), + downloaded: NumberOfBytes::new(0), + left: NumberOfBytes::new(0), + event: AnnounceEvent::Completed, + } + } + + fn seeder() -> Peer { + complete_peer() + } + + fn leecher() -> Peer { + incomplete_peer() + } + + fn started_peer() -> Peer { + incomplete_peer() + } + + fn completed_peer() -> Peer { + complete_peer() + } + + /// A peer that counts as `complete` is swarm metadata + /// IMPORTANT!: it only counts if the it has been announce at least once before + /// announcing the `AnnounceEvent::Completed` event. + fn complete_peer() -> Peer { + Peer { + peer_id: PeerId(*b"-qB00000000000000000"), + peer_addr: SocketAddr::new(IpAddr::V4(Ipv4Addr::new(126, 0, 0, 1)), 8080), + updated: DurationSinceUnixEpoch::new(1_669_397_478_934, 0), + uploaded: NumberOfBytes::new(0), + downloaded: NumberOfBytes::new(0), + left: NumberOfBytes::new(0), // No bytes left to download + event: AnnounceEvent::Completed, + } + } + + /// A peer that counts as `incomplete` is swarm metadata + fn incomplete_peer() -> Peer { + Peer { + peer_id: PeerId(*b"-qB00000000000000000"), + peer_addr: SocketAddr::new(IpAddr::V4(Ipv4Addr::new(126, 0, 0, 1)), 8080), + updated: DurationSinceUnixEpoch::new(1_669_397_478_934, 0), + uploaded: NumberOfBytes::new(0), + downloaded: NumberOfBytes::new(0), + left: NumberOfBytes::new(1000), // Still bytes to download + event: AnnounceEvent::Started, + } + } + + mod for_all_tracker_config_modes { + + mod handling_an_announce_request { + + use std::sync::Arc; + + use crate::core::announce_handler::tests::the_announce_handler::{ + peer_ip, public_tracker, sample_info_hash, sample_peer, sample_peer_1, sample_peer_2, + }; + use crate::core::announce_handler::PeersWanted; + + mod should_assign_the_ip_to_the_peer { + + use std::net::{IpAddr, Ipv4Addr}; + + use crate::core::announce_handler::assign_ip_address_to_peer; + + #[test] + fn using_the_source_ip_instead_of_the_ip_in_the_announce_request() { + let remote_ip = IpAddr::V4(Ipv4Addr::new(126, 0, 0, 2)); + + let peer_ip = assign_ip_address_to_peer(&remote_ip, None); + + assert_eq!(peer_ip, remote_ip); + } + + mod and_when_the_client_ip_is_a_ipv4_loopback_ip { + + use std::net::{IpAddr, Ipv4Addr, Ipv6Addr}; + use std::str::FromStr; + + use crate::core::announce_handler::assign_ip_address_to_peer; + + #[test] + fn it_should_use_the_loopback_ip_if_the_tracker_does_not_have_the_external_ip_configuration() { + let remote_ip = IpAddr::V4(Ipv4Addr::LOCALHOST); + + let peer_ip = assign_ip_address_to_peer(&remote_ip, None); + + assert_eq!(peer_ip, remote_ip); + } + + #[test] + fn it_should_use_the_external_tracker_ip_in_tracker_configuration_if_it_is_defined() { + let remote_ip = IpAddr::V4(Ipv4Addr::LOCALHOST); + + let tracker_external_ip = IpAddr::V4(Ipv4Addr::from_str("126.0.0.1").unwrap()); + + let peer_ip = assign_ip_address_to_peer(&remote_ip, Some(tracker_external_ip)); + + assert_eq!(peer_ip, tracker_external_ip); + } + + #[test] + fn it_should_use_the_external_ip_in_the_tracker_configuration_if_it_is_defined_even_if_the_external_ip_is_an_ipv6_ip( + ) { + let remote_ip = IpAddr::V4(Ipv4Addr::LOCALHOST); + + let tracker_external_ip = + IpAddr::V6(Ipv6Addr::from_str("2345:0425:2CA1:0000:0000:0567:5673:23b5").unwrap()); + + let peer_ip = assign_ip_address_to_peer(&remote_ip, Some(tracker_external_ip)); + + assert_eq!(peer_ip, tracker_external_ip); + } + } + + mod and_when_client_ip_is_a_ipv6_loopback_ip { + + use std::net::{IpAddr, Ipv4Addr, Ipv6Addr}; + use std::str::FromStr; + + use crate::core::announce_handler::assign_ip_address_to_peer; + + #[test] + fn it_should_use_the_loopback_ip_if_the_tracker_does_not_have_the_external_ip_configuration() { + let remote_ip = IpAddr::V6(Ipv6Addr::LOCALHOST); + + let peer_ip = assign_ip_address_to_peer(&remote_ip, None); + + assert_eq!(peer_ip, remote_ip); + } + + #[test] + fn it_should_use_the_external_ip_in_tracker_configuration_if_it_is_defined() { + let remote_ip = IpAddr::V6(Ipv6Addr::LOCALHOST); + + let tracker_external_ip = + IpAddr::V6(Ipv6Addr::from_str("2345:0425:2CA1:0000:0000:0567:5673:23b5").unwrap()); + + let peer_ip = assign_ip_address_to_peer(&remote_ip, Some(tracker_external_ip)); + + assert_eq!(peer_ip, tracker_external_ip); + } + + #[test] + fn it_should_use_the_external_ip_in_the_tracker_configuration_if_it_is_defined_even_if_the_external_ip_is_an_ipv4_ip( + ) { + let remote_ip = IpAddr::V6(Ipv6Addr::LOCALHOST); + + let tracker_external_ip = IpAddr::V4(Ipv4Addr::from_str("126.0.0.1").unwrap()); + + let peer_ip = assign_ip_address_to_peer(&remote_ip, Some(tracker_external_ip)); + + assert_eq!(peer_ip, tracker_external_ip); + } + } + } + + #[tokio::test] + async fn it_should_return_the_announce_data_with_an_empty_peer_list_when_it_is_the_first_announced_peer() { + let (announce_handler, _in_memory_torrent_repository, _scrape_handler) = public_tracker(); + + let mut peer = sample_peer(); + + let announce_data = announce_handler.announce(&sample_info_hash(), &mut peer, &peer_ip(), &PeersWanted::All); + + assert_eq!(announce_data.peers, vec![]); + } + + #[tokio::test] + async fn it_should_return_the_announce_data_with_the_previously_announced_peers() { + let (announce_handler, _in_memory_torrent_repository, _scrape_handler) = public_tracker(); + + let mut previously_announced_peer = sample_peer_1(); + announce_handler.announce( + &sample_info_hash(), + &mut previously_announced_peer, + &peer_ip(), + &PeersWanted::All, + ); + + let mut peer = sample_peer_2(); + let announce_data = announce_handler.announce(&sample_info_hash(), &mut peer, &peer_ip(), &PeersWanted::All); + + assert_eq!(announce_data.peers, vec![Arc::new(previously_announced_peer)]); + } + + mod it_should_update_the_swarm_stats_for_the_torrent { + + use crate::core::announce_handler::tests::the_announce_handler::{ + completed_peer, leecher, peer_ip, public_tracker, sample_info_hash, seeder, started_peer, + }; + use crate::core::announce_handler::PeersWanted; + + #[tokio::test] + async fn when_the_peer_is_a_seeder() { + let (announce_handler, _in_memory_torrent_repository, _scrape_handler) = public_tracker(); + + let mut peer = seeder(); + + let announce_data = + announce_handler.announce(&sample_info_hash(), &mut peer, &peer_ip(), &PeersWanted::All); + + assert_eq!(announce_data.stats.complete, 1); + } + + #[tokio::test] + async fn when_the_peer_is_a_leecher() { + let (announce_handler, _in_memory_torrent_repository, _scrape_handler) = public_tracker(); + + let mut peer = leecher(); + + let announce_data = + announce_handler.announce(&sample_info_hash(), &mut peer, &peer_ip(), &PeersWanted::All); + + assert_eq!(announce_data.stats.incomplete, 1); + } + + #[tokio::test] + async fn when_a_previously_announced_started_peer_has_completed_downloading() { + let (announce_handler, _in_memory_torrent_repository, _scrape_handler) = public_tracker(); + + // We have to announce with "started" event because peer does not count if peer was not previously known + let mut started_peer = started_peer(); + announce_handler.announce(&sample_info_hash(), &mut started_peer, &peer_ip(), &PeersWanted::All); + + let mut completed_peer = completed_peer(); + let announce_data = + announce_handler.announce(&sample_info_hash(), &mut completed_peer, &peer_ip(), &PeersWanted::All); + + assert_eq!(announce_data.stats.downloaded, 1); + } + } + } + } + + mod handling_torrent_persistence { + + use aquatic_udp_protocol::AnnounceEvent; + use torrust_tracker_torrent_repository::entry::EntrySync; + + use crate::core::announce_handler::tests::the_announce_handler::{ + peer_ip, sample_info_hash, sample_peer, tracker_persisting_torrents_in_database, + }; + use crate::core::announce_handler::PeersWanted; + + #[tokio::test] + async fn it_should_persist_the_number_of_completed_peers_for_all_torrents_into_the_database() { + let (announce_handler, torrents_manager, in_memory_torrent_repository) = + tracker_persisting_torrents_in_database(); + + let info_hash = sample_info_hash(); + + let mut peer = sample_peer(); + + peer.event = AnnounceEvent::Started; + let announce_data = announce_handler.announce(&info_hash, &mut peer, &peer_ip(), &PeersWanted::All); + assert_eq!(announce_data.stats.downloaded, 0); + + peer.event = AnnounceEvent::Completed; + let announce_data = announce_handler.announce(&info_hash, &mut peer, &peer_ip(), &PeersWanted::All); + assert_eq!(announce_data.stats.downloaded, 1); + + // Remove the newly updated torrent from memory + let _unused = in_memory_torrent_repository.remove(&info_hash); + + torrents_manager.load_torrents_from_database().unwrap(); + + let torrent_entry = in_memory_torrent_repository + .get(&info_hash) + .expect("it should be able to get entry"); + + // It persists the number of completed peers. + assert_eq!(torrent_entry.get_swarm_metadata().downloaded, 1); + + // It does not persist the peers + assert!(torrent_entry.peers_is_empty()); + } + } + } +} diff --git a/src/core/mod.rs b/src/core/mod.rs index 2c22f561b..26d5a43df 100644 --- a/src/core/mod.rs +++ b/src/core/mod.rs @@ -471,7 +471,6 @@ mod tests { use crate::core::announce_handler::AnnounceHandler; use crate::core::scrape_handler::ScrapeHandler; use crate::core::services::initialize_whitelist_manager; - use crate::core::torrent::manager::TorrentsManager; use crate::core::torrent::repository::in_memory::InMemoryTorrentRepository; use crate::core::whitelist; use crate::core::whitelist::manager::WhiteListManager; @@ -532,30 +531,6 @@ mod tests { (announce_handler, whitelist_authorization, whitelist_manager, scrape_handler) } - pub fn tracker_persisting_torrents_in_database( - ) -> (Arc, Arc, Arc) { - let mut config = configuration::ephemeral_listed(); - config.core.tracker_policy.persistent_torrent_completed_stat = true; - - let ( - _database, - _in_memory_whitelist, - _whitelist_authorization, - _authentication_service, - in_memory_torrent_repository, - db_torrent_repository, - torrents_manager, - ) = initialize_tracker_dependencies(&config); - - let announce_handler = Arc::new(AnnounceHandler::new( - &config.core, - &in_memory_torrent_repository, - &db_torrent_repository, - )); - - (announce_handler, torrents_manager, in_memory_torrent_repository) - } - fn sample_info_hash() -> InfoHash { "3b245504cf5f11bbdbe1201cea6a6bf45aee1bc0".parse::().unwrap() } @@ -565,53 +540,6 @@ mod tests { IpAddr::V4(Ipv4Addr::from_str("126.0.0.1").unwrap()) } - /// Sample peer whose state is not relevant for the tests - fn sample_peer() -> Peer { - complete_peer() - } - - /// Sample peer when for tests that need more than one peer - fn sample_peer_1() -> Peer { - Peer { - peer_id: PeerId(*b"-qB00000000000000001"), - peer_addr: SocketAddr::new(IpAddr::V4(Ipv4Addr::new(126, 0, 0, 1)), 8081), - updated: DurationSinceUnixEpoch::new(1_669_397_478_934, 0), - uploaded: NumberOfBytes::new(0), - downloaded: NumberOfBytes::new(0), - left: NumberOfBytes::new(0), - event: AnnounceEvent::Completed, - } - } - - /// Sample peer when for tests that need more than one peer - fn sample_peer_2() -> Peer { - Peer { - peer_id: PeerId(*b"-qB00000000000000002"), - peer_addr: SocketAddr::new(IpAddr::V4(Ipv4Addr::new(126, 0, 0, 2)), 8082), - updated: DurationSinceUnixEpoch::new(1_669_397_478_934, 0), - uploaded: NumberOfBytes::new(0), - downloaded: NumberOfBytes::new(0), - left: NumberOfBytes::new(0), - event: AnnounceEvent::Completed, - } - } - - fn seeder() -> Peer { - complete_peer() - } - - fn leecher() -> Peer { - incomplete_peer() - } - - fn started_peer() -> Peer { - incomplete_peer() - } - - fn completed_peer() -> Peer { - complete_peer() - } - /// A peer that counts as `complete` is swarm metadata /// IMPORTANT!: it only counts if the it has been announce at least once before /// announcing the `AnnounceEvent::Completed` event. @@ -642,190 +570,6 @@ mod tests { mod for_all_config_modes { - mod handling_an_announce_request { - - use std::sync::Arc; - - use crate::core::announce_handler::PeersWanted; - use crate::core::tests::the_tracker::{ - peer_ip, public_tracker, sample_info_hash, sample_peer, sample_peer_1, sample_peer_2, - }; - - mod should_assign_the_ip_to_the_peer { - - use std::net::{IpAddr, Ipv4Addr}; - - use crate::core::announce_handler::assign_ip_address_to_peer; - - #[test] - fn using_the_source_ip_instead_of_the_ip_in_the_announce_request() { - let remote_ip = IpAddr::V4(Ipv4Addr::new(126, 0, 0, 2)); - - let peer_ip = assign_ip_address_to_peer(&remote_ip, None); - - assert_eq!(peer_ip, remote_ip); - } - - mod and_when_the_client_ip_is_a_ipv4_loopback_ip { - - use std::net::{IpAddr, Ipv4Addr, Ipv6Addr}; - use std::str::FromStr; - - use crate::core::announce_handler::assign_ip_address_to_peer; - - #[test] - fn it_should_use_the_loopback_ip_if_the_tracker_does_not_have_the_external_ip_configuration() { - let remote_ip = IpAddr::V4(Ipv4Addr::LOCALHOST); - - let peer_ip = assign_ip_address_to_peer(&remote_ip, None); - - assert_eq!(peer_ip, remote_ip); - } - - #[test] - fn it_should_use_the_external_tracker_ip_in_tracker_configuration_if_it_is_defined() { - let remote_ip = IpAddr::V4(Ipv4Addr::LOCALHOST); - - let tracker_external_ip = IpAddr::V4(Ipv4Addr::from_str("126.0.0.1").unwrap()); - - let peer_ip = assign_ip_address_to_peer(&remote_ip, Some(tracker_external_ip)); - - assert_eq!(peer_ip, tracker_external_ip); - } - - #[test] - fn it_should_use_the_external_ip_in_the_tracker_configuration_if_it_is_defined_even_if_the_external_ip_is_an_ipv6_ip( - ) { - let remote_ip = IpAddr::V4(Ipv4Addr::LOCALHOST); - - let tracker_external_ip = - IpAddr::V6(Ipv6Addr::from_str("2345:0425:2CA1:0000:0000:0567:5673:23b5").unwrap()); - - let peer_ip = assign_ip_address_to_peer(&remote_ip, Some(tracker_external_ip)); - - assert_eq!(peer_ip, tracker_external_ip); - } - } - - mod and_when_client_ip_is_a_ipv6_loopback_ip { - - use std::net::{IpAddr, Ipv4Addr, Ipv6Addr}; - use std::str::FromStr; - - use crate::core::announce_handler::assign_ip_address_to_peer; - - #[test] - fn it_should_use_the_loopback_ip_if_the_tracker_does_not_have_the_external_ip_configuration() { - let remote_ip = IpAddr::V6(Ipv6Addr::LOCALHOST); - - let peer_ip = assign_ip_address_to_peer(&remote_ip, None); - - assert_eq!(peer_ip, remote_ip); - } - - #[test] - fn it_should_use_the_external_ip_in_tracker_configuration_if_it_is_defined() { - let remote_ip = IpAddr::V6(Ipv6Addr::LOCALHOST); - - let tracker_external_ip = - IpAddr::V6(Ipv6Addr::from_str("2345:0425:2CA1:0000:0000:0567:5673:23b5").unwrap()); - - let peer_ip = assign_ip_address_to_peer(&remote_ip, Some(tracker_external_ip)); - - assert_eq!(peer_ip, tracker_external_ip); - } - - #[test] - fn it_should_use_the_external_ip_in_the_tracker_configuration_if_it_is_defined_even_if_the_external_ip_is_an_ipv4_ip( - ) { - let remote_ip = IpAddr::V6(Ipv6Addr::LOCALHOST); - - let tracker_external_ip = IpAddr::V4(Ipv4Addr::from_str("126.0.0.1").unwrap()); - - let peer_ip = assign_ip_address_to_peer(&remote_ip, Some(tracker_external_ip)); - - assert_eq!(peer_ip, tracker_external_ip); - } - } - } - - #[tokio::test] - async fn it_should_return_the_announce_data_with_an_empty_peer_list_when_it_is_the_first_announced_peer() { - let (announce_handler, _in_memory_torrent_repository, _scrape_handler) = public_tracker(); - - let mut peer = sample_peer(); - - let announce_data = announce_handler.announce(&sample_info_hash(), &mut peer, &peer_ip(), &PeersWanted::All); - - assert_eq!(announce_data.peers, vec![]); - } - - #[tokio::test] - async fn it_should_return_the_announce_data_with_the_previously_announced_peers() { - let (announce_handler, _in_memory_torrent_repository, _scrape_handler) = public_tracker(); - - let mut previously_announced_peer = sample_peer_1(); - announce_handler.announce( - &sample_info_hash(), - &mut previously_announced_peer, - &peer_ip(), - &PeersWanted::All, - ); - - let mut peer = sample_peer_2(); - let announce_data = announce_handler.announce(&sample_info_hash(), &mut peer, &peer_ip(), &PeersWanted::All); - - assert_eq!(announce_data.peers, vec![Arc::new(previously_announced_peer)]); - } - - mod it_should_update_the_swarm_stats_for_the_torrent { - - use crate::core::announce_handler::PeersWanted; - use crate::core::tests::the_tracker::{ - completed_peer, leecher, peer_ip, public_tracker, sample_info_hash, seeder, started_peer, - }; - - #[tokio::test] - async fn when_the_peer_is_a_seeder() { - let (announce_handler, _in_memory_torrent_repository, _scrape_handler) = public_tracker(); - - let mut peer = seeder(); - - let announce_data = - announce_handler.announce(&sample_info_hash(), &mut peer, &peer_ip(), &PeersWanted::All); - - assert_eq!(announce_data.stats.complete, 1); - } - - #[tokio::test] - async fn when_the_peer_is_a_leecher() { - let (announce_handler, _in_memory_torrent_repository, _scrape_handler) = public_tracker(); - - let mut peer = leecher(); - - let announce_data = - announce_handler.announce(&sample_info_hash(), &mut peer, &peer_ip(), &PeersWanted::All); - - assert_eq!(announce_data.stats.incomplete, 1); - } - - #[tokio::test] - async fn when_a_previously_announced_started_peer_has_completed_downloading() { - let (announce_handler, _in_memory_torrent_repository, _scrape_handler) = public_tracker(); - - // We have to announce with "started" event because peer does not count if peer was not previously known - let mut started_peer = started_peer(); - announce_handler.announce(&sample_info_hash(), &mut started_peer, &peer_ip(), &PeersWanted::All); - - let mut completed_peer = completed_peer(); - let announce_data = - announce_handler.announce(&sample_info_hash(), &mut completed_peer, &peer_ip(), &PeersWanted::All); - - assert_eq!(announce_data.stats.downloaded, 1); - } - } - } - mod handling_a_scrape_request { use std::net::{IpAddr, Ipv4Addr}; @@ -964,8 +708,6 @@ mod tests { } } - mod handling_an_announce_request {} - mod handling_an_scrape_request { use bittorrent_primitives::info_hash::InfoHash; @@ -1012,49 +754,5 @@ mod tests { } } } - - mod handling_torrent_persistence { - - use aquatic_udp_protocol::AnnounceEvent; - use torrust_tracker_torrent_repository::entry::EntrySync; - - use crate::core::announce_handler::PeersWanted; - use crate::core::tests::the_tracker::{ - peer_ip, sample_info_hash, sample_peer, tracker_persisting_torrents_in_database, - }; - - #[tokio::test] - async fn it_should_persist_the_number_of_completed_peers_for_all_torrents_into_the_database() { - let (announce_handler, torrents_manager, in_memory_torrent_repository) = - tracker_persisting_torrents_in_database(); - - let info_hash = sample_info_hash(); - - let mut peer = sample_peer(); - - peer.event = AnnounceEvent::Started; - let announce_data = announce_handler.announce(&info_hash, &mut peer, &peer_ip(), &PeersWanted::All); - assert_eq!(announce_data.stats.downloaded, 0); - - peer.event = AnnounceEvent::Completed; - let announce_data = announce_handler.announce(&info_hash, &mut peer, &peer_ip(), &PeersWanted::All); - assert_eq!(announce_data.stats.downloaded, 1); - - // Remove the newly updated torrent from memory - let _unused = in_memory_torrent_repository.remove(&info_hash); - - torrents_manager.load_torrents_from_database().unwrap(); - - let torrent_entry = in_memory_torrent_repository - .get(&info_hash) - .expect("it should be able to get entry"); - - // It persists the number of completed peers. - assert_eq!(torrent_entry.get_swarm_metadata().downloaded, 1); - - // It does not persist the peers - assert!(torrent_entry.peers_is_empty()); - } - } } } From e2d573b0d4b855cb1d2d006047899ce236a88440 Mon Sep 17 00:00:00 2001 From: Jose Celano Date: Tue, 28 Jan 2025 07:33:53 +0000 Subject: [PATCH 167/802] refactor: [#1211] move tests to whitelist module --- src/core/announce_handler.rs | 2 - src/core/mod.rs | 84 --------------------- src/core/whitelist/authorization.rs | 82 +++++++++++++++++++++ src/core/whitelist/manager.rs | 109 ++++++++++++++++++++++++++++ src/core/whitelist/mod.rs | 82 +++++++++++++++++++++ 5 files changed, 273 insertions(+), 86 deletions(-) diff --git a/src/core/announce_handler.rs b/src/core/announce_handler.rs index 9abf4c509..2ebd4daf0 100644 --- a/src/core/announce_handler.rs +++ b/src/core/announce_handler.rs @@ -165,8 +165,6 @@ fn assign_ip_address_to_peer(remote_client_ip: &IpAddr, tracker_external_ip: Opt #[cfg(test)] mod tests { - // Integration tests for the core module. - mod the_announce_handler { use std::net::{IpAddr, Ipv4Addr, SocketAddr}; diff --git a/src/core/mod.rs b/src/core/mod.rs index 26d5a43df..937cc4f78 100644 --- a/src/core/mod.rs +++ b/src/core/mod.rs @@ -453,8 +453,6 @@ pub mod peer_tests; #[cfg(test)] mod tests { - // Integration tests for the core module. - mod the_tracker { use std::net::{IpAddr, Ipv4Addr, SocketAddr}; @@ -626,88 +624,6 @@ mod tests { mod configured_as_whitelisted { - mod handling_authorization { - use crate::core::tests::the_tracker::{sample_info_hash, whitelisted_tracker}; - - #[tokio::test] - async fn it_should_authorize_the_announce_and_scrape_actions_on_whitelisted_torrents() { - let (_announce_handler, whitelist_authorization, whitelist_manager, _scrape_handler) = whitelisted_tracker(); - - let info_hash = sample_info_hash(); - - let result = whitelist_manager.add_torrent_to_whitelist(&info_hash).await; - assert!(result.is_ok()); - - let result = whitelist_authorization.authorize(&info_hash).await; - assert!(result.is_ok()); - } - - #[tokio::test] - async fn it_should_not_authorize_the_announce_and_scrape_actions_on_not_whitelisted_torrents() { - let (_announce_handler, whitelist_authorization, _whitelist_manager, _scrape_handler) = whitelisted_tracker(); - - let info_hash = sample_info_hash(); - - let result = whitelist_authorization.authorize(&info_hash).await; - assert!(result.is_err()); - } - } - - mod handling_the_torrent_whitelist { - use crate::core::tests::the_tracker::{sample_info_hash, whitelisted_tracker}; - - // todo: after extracting the WhitelistManager from the Tracker, - // there is no need to use the tracker to test the whitelist. - // Test not using the `tracker` (`_tracker` variable) should be - // moved to the whitelist module. - - #[tokio::test] - async fn it_should_add_a_torrent_to_the_whitelist() { - let (_announce_handler, _whitelist_authorization, whitelist_manager, _scrape_handler) = whitelisted_tracker(); - - let info_hash = sample_info_hash(); - - whitelist_manager.add_torrent_to_whitelist(&info_hash).await.unwrap(); - - assert!(whitelist_manager.is_info_hash_whitelisted(&info_hash).await); - } - - #[tokio::test] - async fn it_should_remove_a_torrent_from_the_whitelist() { - let (_announce_handler, _whitelist_authorization, whitelist_manager, _scrape_handler) = whitelisted_tracker(); - - let info_hash = sample_info_hash(); - - whitelist_manager.add_torrent_to_whitelist(&info_hash).await.unwrap(); - - whitelist_manager.remove_torrent_from_whitelist(&info_hash).await.unwrap(); - - assert!(!whitelist_manager.is_info_hash_whitelisted(&info_hash).await); - } - - mod persistence { - use crate::core::tests::the_tracker::{sample_info_hash, whitelisted_tracker}; - - #[tokio::test] - async fn it_should_load_the_whitelist_from_the_database() { - let (_announce_handler, _whitelist_authorization, whitelist_manager, _scrape_handler) = - whitelisted_tracker(); - - let info_hash = sample_info_hash(); - - whitelist_manager.add_torrent_to_whitelist(&info_hash).await.unwrap(); - - whitelist_manager.remove_torrent_from_memory_whitelist(&info_hash).await; - - assert!(!whitelist_manager.is_info_hash_whitelisted(&info_hash).await); - - whitelist_manager.load_whitelist_from_database().await.unwrap(); - - assert!(whitelist_manager.is_info_hash_whitelisted(&info_hash).await); - } - } - } - mod handling_an_scrape_request { use bittorrent_primitives::info_hash::InfoHash; diff --git a/src/core/whitelist/authorization.rs b/src/core/whitelist/authorization.rs index 74029495f..55410d934 100644 --- a/src/core/whitelist/authorization.rs +++ b/src/core/whitelist/authorization.rs @@ -57,3 +57,85 @@ impl Authorization { self.in_memory_whitelist.contains(info_hash).await } } + +#[cfg(test)] +mod tests { + + use std::sync::Arc; + + use bittorrent_primitives::info_hash::InfoHash; + use torrust_tracker_test_helpers::configuration; + + use crate::app_test::initialize_tracker_dependencies; + use crate::core::announce_handler::AnnounceHandler; + use crate::core::scrape_handler::ScrapeHandler; + use crate::core::services::initialize_whitelist_manager; + use crate::core::whitelist; + use crate::core::whitelist::manager::WhiteListManager; + + #[allow(clippy::type_complexity)] + fn whitelisted_tracker() -> ( + Arc, + Arc, + Arc, + Arc, + ) { + let config = configuration::ephemeral_listed(); + + let ( + database, + in_memory_whitelist, + whitelist_authorization, + _authentication_service, + in_memory_torrent_repository, + db_torrent_repository, + _torrents_manager, + ) = initialize_tracker_dependencies(&config); + + let whitelist_manager = initialize_whitelist_manager(database.clone(), in_memory_whitelist.clone()); + + let announce_handler = Arc::new(AnnounceHandler::new( + &config.core, + &in_memory_torrent_repository, + &db_torrent_repository, + )); + + let scrape_handler = Arc::new(ScrapeHandler::new(&whitelist_authorization, &in_memory_torrent_repository)); + + (announce_handler, whitelist_authorization, whitelist_manager, scrape_handler) + } + + fn sample_info_hash() -> InfoHash { + "3b245504cf5f11bbdbe1201cea6a6bf45aee1bc0".parse::().unwrap() + } + + mod configured_as_whitelisted { + + mod handling_authorization { + use crate::core::whitelist::authorization::tests::{sample_info_hash, whitelisted_tracker}; + + #[tokio::test] + async fn it_should_authorize_the_announce_and_scrape_actions_on_whitelisted_torrents() { + let (_announce_handler, whitelist_authorization, whitelist_manager, _scrape_handler) = whitelisted_tracker(); + + let info_hash = sample_info_hash(); + + let result = whitelist_manager.add_torrent_to_whitelist(&info_hash).await; + assert!(result.is_ok()); + + let result = whitelist_authorization.authorize(&info_hash).await; + assert!(result.is_ok()); + } + + #[tokio::test] + async fn it_should_not_authorize_the_announce_and_scrape_actions_on_not_whitelisted_torrents() { + let (_announce_handler, whitelist_authorization, _whitelist_manager, _scrape_handler) = whitelisted_tracker(); + + let info_hash = sample_info_hash(); + + let result = whitelist_authorization.authorize(&info_hash).await; + assert!(result.is_err()); + } + } + } +} diff --git a/src/core/whitelist/manager.rs b/src/core/whitelist/manager.rs index 757053f71..23095cfb7 100644 --- a/src/core/whitelist/manager.rs +++ b/src/core/whitelist/manager.rs @@ -89,3 +89,112 @@ impl WhiteListManager { Ok(()) } } + +#[cfg(test)] +mod tests { + + use std::sync::Arc; + + use bittorrent_primitives::info_hash::InfoHash; + use torrust_tracker_test_helpers::configuration; + + use crate::app_test::initialize_tracker_dependencies; + use crate::core::announce_handler::AnnounceHandler; + use crate::core::scrape_handler::ScrapeHandler; + use crate::core::services::initialize_whitelist_manager; + use crate::core::whitelist; + use crate::core::whitelist::manager::WhiteListManager; + + #[allow(clippy::type_complexity)] + fn whitelisted_tracker() -> ( + Arc, + Arc, + Arc, + Arc, + ) { + let config = configuration::ephemeral_listed(); + + let ( + database, + in_memory_whitelist, + whitelist_authorization, + _authentication_service, + in_memory_torrent_repository, + db_torrent_repository, + _torrents_manager, + ) = initialize_tracker_dependencies(&config); + + let whitelist_manager = initialize_whitelist_manager(database.clone(), in_memory_whitelist.clone()); + + let announce_handler = Arc::new(AnnounceHandler::new( + &config.core, + &in_memory_torrent_repository, + &db_torrent_repository, + )); + + let scrape_handler = Arc::new(ScrapeHandler::new(&whitelist_authorization, &in_memory_torrent_repository)); + + (announce_handler, whitelist_authorization, whitelist_manager, scrape_handler) + } + + fn sample_info_hash() -> InfoHash { + "3b245504cf5f11bbdbe1201cea6a6bf45aee1bc0".parse::().unwrap() + } + + mod configured_as_whitelisted { + + mod handling_the_torrent_whitelist { + use crate::core::whitelist::manager::tests::{sample_info_hash, whitelisted_tracker}; + + // todo: after extracting the WhitelistManager from the Tracker, + // there is no need to use the tracker to test the whitelist. + // Test not using the `tracker` (`_tracker` variable) should be + // moved to the whitelist module. + + #[tokio::test] + async fn it_should_add_a_torrent_to_the_whitelist() { + let (_announce_handler, _whitelist_authorization, whitelist_manager, _scrape_handler) = whitelisted_tracker(); + + let info_hash = sample_info_hash(); + + whitelist_manager.add_torrent_to_whitelist(&info_hash).await.unwrap(); + + assert!(whitelist_manager.is_info_hash_whitelisted(&info_hash).await); + } + + #[tokio::test] + async fn it_should_remove_a_torrent_from_the_whitelist() { + let (_announce_handler, _whitelist_authorization, whitelist_manager, _scrape_handler) = whitelisted_tracker(); + + let info_hash = sample_info_hash(); + + whitelist_manager.add_torrent_to_whitelist(&info_hash).await.unwrap(); + + whitelist_manager.remove_torrent_from_whitelist(&info_hash).await.unwrap(); + + assert!(!whitelist_manager.is_info_hash_whitelisted(&info_hash).await); + } + + mod persistence { + use crate::core::whitelist::manager::tests::{sample_info_hash, whitelisted_tracker}; + + #[tokio::test] + async fn it_should_load_the_whitelist_from_the_database() { + let (_announce_handler, _whitelist_authorization, whitelist_manager, _scrape_handler) = whitelisted_tracker(); + + let info_hash = sample_info_hash(); + + whitelist_manager.add_torrent_to_whitelist(&info_hash).await.unwrap(); + + whitelist_manager.remove_torrent_from_memory_whitelist(&info_hash).await; + + assert!(!whitelist_manager.is_info_hash_whitelisted(&info_hash).await); + + whitelist_manager.load_whitelist_from_database().await.unwrap(); + + assert!(whitelist_manager.is_info_hash_whitelisted(&info_hash).await); + } + } + } + } +} diff --git a/src/core/whitelist/mod.rs b/src/core/whitelist/mod.rs index 89c69b761..cd4c238f7 100644 --- a/src/core/whitelist/mod.rs +++ b/src/core/whitelist/mod.rs @@ -1,3 +1,85 @@ pub mod authorization; pub mod manager; pub mod repository; + +#[cfg(test)] +mod tests { + + use std::sync::Arc; + + use bittorrent_primitives::info_hash::InfoHash; + use torrust_tracker_test_helpers::configuration; + + use crate::app_test::initialize_tracker_dependencies; + use crate::core::announce_handler::AnnounceHandler; + use crate::core::scrape_handler::ScrapeHandler; + use crate::core::services::initialize_whitelist_manager; + use crate::core::whitelist; + use crate::core::whitelist::manager::WhiteListManager; + + #[allow(clippy::type_complexity)] + fn whitelisted_tracker() -> ( + Arc, + Arc, + Arc, + Arc, + ) { + let config = configuration::ephemeral_listed(); + + let ( + database, + in_memory_whitelist, + whitelist_authorization, + _authentication_service, + in_memory_torrent_repository, + db_torrent_repository, + _torrents_manager, + ) = initialize_tracker_dependencies(&config); + + let whitelist_manager = initialize_whitelist_manager(database.clone(), in_memory_whitelist.clone()); + + let announce_handler = Arc::new(AnnounceHandler::new( + &config.core, + &in_memory_torrent_repository, + &db_torrent_repository, + )); + + let scrape_handler = Arc::new(ScrapeHandler::new(&whitelist_authorization, &in_memory_torrent_repository)); + + (announce_handler, whitelist_authorization, whitelist_manager, scrape_handler) + } + + fn sample_info_hash() -> InfoHash { + "3b245504cf5f11bbdbe1201cea6a6bf45aee1bc0".parse::().unwrap() + } + + mod configured_as_whitelisted { + + mod handling_authorization { + use crate::core::whitelist::tests::{sample_info_hash, whitelisted_tracker}; + + #[tokio::test] + async fn it_should_authorize_the_announce_and_scrape_actions_on_whitelisted_torrents() { + let (_announce_handler, whitelist_authorization, whitelist_manager, _scrape_handler) = whitelisted_tracker(); + + let info_hash = sample_info_hash(); + + let result = whitelist_manager.add_torrent_to_whitelist(&info_hash).await; + assert!(result.is_ok()); + + let result = whitelist_authorization.authorize(&info_hash).await; + assert!(result.is_ok()); + } + + #[tokio::test] + async fn it_should_not_authorize_the_announce_and_scrape_actions_on_not_whitelisted_torrents() { + let (_announce_handler, whitelist_authorization, _whitelist_manager, _scrape_handler) = whitelisted_tracker(); + + let info_hash = sample_info_hash(); + + let result = whitelist_authorization.authorize(&info_hash).await; + assert!(result.is_err()); + } + } + } +} From 22320f5ba009d13333d3c6f21e27be682dde2f4d Mon Sep 17 00:00:00 2001 From: Jose Celano Date: Tue, 28 Jan 2025 07:39:39 +0000 Subject: [PATCH 168/802] refactor: [#1211] move test to torrust_tracker_primitives::core --- packages/primitives/src/core.rs | 23 +++++++++++++++++++++++ src/core/mod.rs | 21 +-------------------- 2 files changed, 24 insertions(+), 20 deletions(-) diff --git a/packages/primitives/src/core.rs b/packages/primitives/src/core.rs index 0c0f68b8b..fe69c8959 100644 --- a/packages/primitives/src/core.rs +++ b/packages/primitives/src/core.rs @@ -56,3 +56,26 @@ impl ScrapeData { self.files.insert(*info_hash, SwarmMetadata::zeroed()); } } + +#[cfg(test)] +mod tests { + use bittorrent_primitives::info_hash::InfoHash; + + use crate::core::ScrapeData; + + fn sample_info_hash() -> InfoHash { + "3b245504cf5f11bbdbe1201cea6a6bf45aee1bc0".parse::().unwrap() + } + + #[test] + fn it_should_be_able_to_build_a_zeroed_scrape_data_for_a_list_of_info_hashes() { + // Zeroed scrape data is used when the authentication for the scrape request fails. + + let sample_info_hash = sample_info_hash(); + + let mut expected_scrape_data = ScrapeData::empty(); + expected_scrape_data.add_file_with_zeroed_metadata(&sample_info_hash); + + assert_eq!(ScrapeData::zeroed(&vec![sample_info_hash]), expected_scrape_data); + } +} diff --git a/src/core/mod.rs b/src/core/mod.rs index 937cc4f78..1a3c77555 100644 --- a/src/core/mod.rs +++ b/src/core/mod.rs @@ -460,7 +460,6 @@ mod tests { use std::sync::Arc; use aquatic_udp_protocol::{AnnounceEvent, NumberOfBytes, PeerId}; - use bittorrent_primitives::info_hash::InfoHash; use torrust_tracker_primitives::peer::Peer; use torrust_tracker_primitives::DurationSinceUnixEpoch; use torrust_tracker_test_helpers::configuration; @@ -529,10 +528,6 @@ mod tests { (announce_handler, whitelist_authorization, whitelist_manager, scrape_handler) } - fn sample_info_hash() -> InfoHash { - "3b245504cf5f11bbdbe1201cea6a6bf45aee1bc0".parse::().unwrap() - } - // The client peer IP fn peer_ip() -> IpAddr { IpAddr::V4(Ipv4Addr::from_str("126.0.0.1").unwrap()) @@ -631,21 +626,7 @@ mod tests { use torrust_tracker_primitives::swarm_metadata::SwarmMetadata; use crate::core::announce_handler::PeersWanted; - use crate::core::tests::the_tracker::{ - complete_peer, incomplete_peer, peer_ip, sample_info_hash, whitelisted_tracker, - }; - - #[test] - fn it_should_be_able_to_build_a_zeroed_scrape_data_for_a_list_of_info_hashes() { - // Zeroed scrape data is used when the authentication for the scrape request fails. - - let sample_info_hash = sample_info_hash(); - - let mut expected_scrape_data = ScrapeData::empty(); - expected_scrape_data.add_file_with_zeroed_metadata(&sample_info_hash); - - assert_eq!(ScrapeData::zeroed(&vec![sample_info_hash]), expected_scrape_data); - } + use crate::core::tests::the_tracker::{complete_peer, incomplete_peer, peer_ip, whitelisted_tracker}; #[tokio::test] async fn it_should_return_the_zeroed_swarm_metadata_for_the_requested_file_if_it_is_not_whitelisted() { From e8a2c8b843d5e78eb2c6b0914f947dbdcd67b467 Mon Sep 17 00:00:00 2001 From: Jose Celano Date: Tue, 28 Jan 2025 07:55:07 +0000 Subject: [PATCH 169/802] refactor: [#1211] clean tests in core mod --- src/core/mod.rs | 74 +++++++++++++++++-------------------------------- 1 file changed, 26 insertions(+), 48 deletions(-) diff --git a/src/core/mod.rs b/src/core/mod.rs index 1a3c77555..5a7bbc2fb 100644 --- a/src/core/mod.rs +++ b/src/core/mod.rs @@ -460,62 +460,38 @@ mod tests { use std::sync::Arc; use aquatic_udp_protocol::{AnnounceEvent, NumberOfBytes, PeerId}; + use torrust_tracker_configuration::Configuration; use torrust_tracker_primitives::peer::Peer; use torrust_tracker_primitives::DurationSinceUnixEpoch; use torrust_tracker_test_helpers::configuration; - use crate::app_test::initialize_tracker_dependencies; use crate::core::announce_handler::AnnounceHandler; use crate::core::scrape_handler::ScrapeHandler; - use crate::core::services::initialize_whitelist_manager; + use crate::core::services::initialize_database; use crate::core::torrent::repository::in_memory::InMemoryTorrentRepository; + use crate::core::torrent::repository::persisted::DatabasePersistentTorrentRepository; use crate::core::whitelist; - use crate::core::whitelist::manager::WhiteListManager; + use crate::core::whitelist::repository::in_memory::InMemoryWhitelist; - fn public_tracker() -> (Arc, Arc, Arc) { + fn initialize_handlers_for_public_tracker() -> (Arc, Arc) { let config = configuration::ephemeral_public(); - - let ( - _database, - _in_memory_whitelist, - whitelist_authorization, - _authentication_service, - in_memory_torrent_repository, - db_torrent_repository, - _torrents_manager, - ) = initialize_tracker_dependencies(&config); - - let announce_handler = Arc::new(AnnounceHandler::new( - &config.core, - &in_memory_torrent_repository, - &db_torrent_repository, - )); - - let scrape_handler = Arc::new(ScrapeHandler::new(&whitelist_authorization, &in_memory_torrent_repository)); - - (announce_handler, in_memory_torrent_repository, scrape_handler) + initialize_handlers(&config) } - #[allow(clippy::type_complexity)] - fn whitelisted_tracker() -> ( - Arc, - Arc, - Arc, - Arc, - ) { + fn initialize_handlers_for_listed_tracker() -> (Arc, Arc) { let config = configuration::ephemeral_listed(); + initialize_handlers(&config) + } - let ( - database, - in_memory_whitelist, - whitelist_authorization, - _authentication_service, - in_memory_torrent_repository, - db_torrent_repository, - _torrents_manager, - ) = initialize_tracker_dependencies(&config); - - let whitelist_manager = initialize_whitelist_manager(database.clone(), in_memory_whitelist.clone()); + fn initialize_handlers(config: &Configuration) -> (Arc, Arc) { + let database = initialize_database(config); + let in_memory_whitelist = Arc::new(InMemoryWhitelist::default()); + let whitelist_authorization = Arc::new(whitelist::authorization::Authorization::new( + &config.core, + &in_memory_whitelist.clone(), + )); + let in_memory_torrent_repository = Arc::new(InMemoryTorrentRepository::default()); + let db_torrent_repository = Arc::new(DatabasePersistentTorrentRepository::new(&database)); let announce_handler = Arc::new(AnnounceHandler::new( &config.core, @@ -525,7 +501,7 @@ mod tests { let scrape_handler = Arc::new(ScrapeHandler::new(&whitelist_authorization, &in_memory_torrent_repository)); - (announce_handler, whitelist_authorization, whitelist_manager, scrape_handler) + (announce_handler, scrape_handler) } // The client peer IP @@ -572,11 +548,11 @@ mod tests { use torrust_tracker_primitives::swarm_metadata::SwarmMetadata; use crate::core::announce_handler::PeersWanted; - use crate::core::tests::the_tracker::{complete_peer, incomplete_peer, public_tracker}; + use crate::core::tests::the_tracker::{complete_peer, incomplete_peer, initialize_handlers_for_public_tracker}; #[tokio::test] async fn it_should_return_the_swarm_metadata_for_the_requested_file_if_the_tracker_has_that_torrent() { - let (announce_handler, _in_memory_torrent_repository, scrape_handler) = public_tracker(); + let (announce_handler, scrape_handler) = initialize_handlers_for_public_tracker(); let info_hash = "3b245504cf5f11bbdbe1201cea6a6bf45aee1bc0".parse::().unwrap(); // # DevSkim: ignore DS173237 @@ -619,18 +595,20 @@ mod tests { mod configured_as_whitelisted { - mod handling_an_scrape_request { + mod handling_a_scrape_request { use bittorrent_primitives::info_hash::InfoHash; use torrust_tracker_primitives::core::ScrapeData; use torrust_tracker_primitives::swarm_metadata::SwarmMetadata; use crate::core::announce_handler::PeersWanted; - use crate::core::tests::the_tracker::{complete_peer, incomplete_peer, peer_ip, whitelisted_tracker}; + use crate::core::tests::the_tracker::{ + complete_peer, incomplete_peer, initialize_handlers_for_listed_tracker, peer_ip, + }; #[tokio::test] async fn it_should_return_the_zeroed_swarm_metadata_for_the_requested_file_if_it_is_not_whitelisted() { - let (announce_handler, _whitelist_authorization, _whitelist_manager, scrape_handler) = whitelisted_tracker(); + let (announce_handler, scrape_handler) = initialize_handlers_for_listed_tracker(); let info_hash = "3b245504cf5f11bbdbe1201cea6a6bf45aee1bc0".parse::().unwrap(); // # DevSkim: ignore DS173237 From 55dc8b0177864b8915cb0aa894f6fb7653ada5e7 Mon Sep 17 00:00:00 2001 From: Jose Celano Date: Tue, 28 Jan 2025 10:16:52 +0000 Subject: [PATCH 170/802] refactor: [#1211] clean AnnounceHandler tests --- src/core/announce_handler.rs | 97 ++++++++++++++---------------------- src/core/core_tests.rs | 33 ++++++++++++ src/core/mod.rs | 29 +---------- 3 files changed, 72 insertions(+), 87 deletions(-) create mode 100644 src/core/core_tests.rs diff --git a/src/core/announce_handler.rs b/src/core/announce_handler.rs index 2ebd4daf0..b30b071d3 100644 --- a/src/core/announce_handler.rs +++ b/src/core/announce_handler.rs @@ -177,62 +177,19 @@ mod tests { use torrust_tracker_primitives::DurationSinceUnixEpoch; use torrust_tracker_test_helpers::configuration; - use crate::app_test::initialize_tracker_dependencies; use crate::core::announce_handler::AnnounceHandler; + use crate::core::core_tests::initialize_handlers; use crate::core::scrape_handler::ScrapeHandler; - use crate::core::torrent::manager::TorrentsManager; - use crate::core::torrent::repository::in_memory::InMemoryTorrentRepository; - fn public_tracker() -> (Arc, Arc, Arc) { + fn public_tracker() -> (Arc, Arc) { let config = configuration::ephemeral_public(); - - let ( - _database, - _in_memory_whitelist, - whitelist_authorization, - _authentication_service, - in_memory_torrent_repository, - db_torrent_repository, - _torrents_manager, - ) = initialize_tracker_dependencies(&config); - - let announce_handler = Arc::new(AnnounceHandler::new( - &config.core, - &in_memory_torrent_repository, - &db_torrent_repository, - )); - - let scrape_handler = Arc::new(ScrapeHandler::new(&whitelist_authorization, &in_memory_torrent_repository)); - - (announce_handler, in_memory_torrent_repository, scrape_handler) - } - - pub fn tracker_persisting_torrents_in_database( - ) -> (Arc, Arc, Arc) { - let mut config = configuration::ephemeral_listed(); - config.core.tracker_policy.persistent_torrent_completed_stat = true; - - let ( - _database, - _in_memory_whitelist, - _whitelist_authorization, - _authentication_service, - in_memory_torrent_repository, - db_torrent_repository, - torrents_manager, - ) = initialize_tracker_dependencies(&config); - - let announce_handler = Arc::new(AnnounceHandler::new( - &config.core, - &in_memory_torrent_repository, - &db_torrent_repository, - )); - - (announce_handler, torrents_manager, in_memory_torrent_repository) + initialize_handlers(&config) } fn sample_info_hash() -> InfoHash { - "3b245504cf5f11bbdbe1201cea6a6bf45aee1bc0".parse::().unwrap() + "3b245504cf5f11bbdbe1201cea6a6bf45aee1bc0" // DevSkim: ignore DS173237 + .parse::() + .expect("String should be a valid info hash") } // The client peer IP @@ -426,7 +383,7 @@ mod tests { #[tokio::test] async fn it_should_return_the_announce_data_with_an_empty_peer_list_when_it_is_the_first_announced_peer() { - let (announce_handler, _in_memory_torrent_repository, _scrape_handler) = public_tracker(); + let (announce_handler, _scrape_handler) = public_tracker(); let mut peer = sample_peer(); @@ -437,7 +394,7 @@ mod tests { #[tokio::test] async fn it_should_return_the_announce_data_with_the_previously_announced_peers() { - let (announce_handler, _in_memory_torrent_repository, _scrape_handler) = public_tracker(); + let (announce_handler, _scrape_handler) = public_tracker(); let mut previously_announced_peer = sample_peer_1(); announce_handler.announce( @@ -462,7 +419,7 @@ mod tests { #[tokio::test] async fn when_the_peer_is_a_seeder() { - let (announce_handler, _in_memory_torrent_repository, _scrape_handler) = public_tracker(); + let (announce_handler, _scrape_handler) = public_tracker(); let mut peer = seeder(); @@ -474,7 +431,7 @@ mod tests { #[tokio::test] async fn when_the_peer_is_a_leecher() { - let (announce_handler, _in_memory_torrent_repository, _scrape_handler) = public_tracker(); + let (announce_handler, _scrape_handler) = public_tracker(); let mut peer = leecher(); @@ -486,7 +443,7 @@ mod tests { #[tokio::test] async fn when_a_previously_announced_started_peer_has_completed_downloading() { - let (announce_handler, _in_memory_torrent_repository, _scrape_handler) = public_tracker(); + let (announce_handler, _scrape_handler) = public_tracker(); // We have to announce with "started" event because peer does not count if peer was not previously known let mut started_peer = started_peer(); @@ -504,18 +461,38 @@ mod tests { mod handling_torrent_persistence { + use std::sync::Arc; + use aquatic_udp_protocol::AnnounceEvent; + use torrust_tracker_test_helpers::configuration; use torrust_tracker_torrent_repository::entry::EntrySync; - use crate::core::announce_handler::tests::the_announce_handler::{ - peer_ip, sample_info_hash, sample_peer, tracker_persisting_torrents_in_database, - }; - use crate::core::announce_handler::PeersWanted; + use crate::core::announce_handler::tests::the_announce_handler::{peer_ip, sample_info_hash, sample_peer}; + use crate::core::announce_handler::{AnnounceHandler, PeersWanted}; + use crate::core::services::initialize_database; + use crate::core::torrent::manager::TorrentsManager; + use crate::core::torrent::repository::in_memory::InMemoryTorrentRepository; + use crate::core::torrent::repository::persisted::DatabasePersistentTorrentRepository; #[tokio::test] async fn it_should_persist_the_number_of_completed_peers_for_all_torrents_into_the_database() { - let (announce_handler, torrents_manager, in_memory_torrent_repository) = - tracker_persisting_torrents_in_database(); + let mut config = configuration::ephemeral_listed(); + + config.core.tracker_policy.persistent_torrent_completed_stat = true; + + let database = initialize_database(&config); + let in_memory_torrent_repository = Arc::new(InMemoryTorrentRepository::default()); + let db_torrent_repository = Arc::new(DatabasePersistentTorrentRepository::new(&database)); + let torrents_manager = Arc::new(TorrentsManager::new( + &config.core, + &in_memory_torrent_repository, + &db_torrent_repository, + )); + let announce_handler = Arc::new(AnnounceHandler::new( + &config.core, + &in_memory_torrent_repository, + &db_torrent_repository, + )); let info_hash = sample_info_hash(); diff --git a/src/core/core_tests.rs b/src/core/core_tests.rs new file mode 100644 index 000000000..6b9947700 --- /dev/null +++ b/src/core/core_tests.rs @@ -0,0 +1,33 @@ +use std::sync::Arc; + +use torrust_tracker_configuration::Configuration; + +use super::announce_handler::AnnounceHandler; +use super::scrape_handler::ScrapeHandler; +use super::services::initialize_database; +use super::torrent::repository::in_memory::InMemoryTorrentRepository; +use super::torrent::repository::persisted::DatabasePersistentTorrentRepository; +use super::whitelist::repository::in_memory::InMemoryWhitelist; +use super::whitelist::{self}; + +#[must_use] +pub fn initialize_handlers(config: &Configuration) -> (Arc, Arc) { + let database = initialize_database(config); + let in_memory_whitelist = Arc::new(InMemoryWhitelist::default()); + let whitelist_authorization = Arc::new(whitelist::authorization::Authorization::new( + &config.core, + &in_memory_whitelist.clone(), + )); + let in_memory_torrent_repository = Arc::new(InMemoryTorrentRepository::default()); + let db_torrent_repository = Arc::new(DatabasePersistentTorrentRepository::new(&database)); + + let announce_handler = Arc::new(AnnounceHandler::new( + &config.core, + &in_memory_torrent_repository, + &db_torrent_repository, + )); + + let scrape_handler = Arc::new(ScrapeHandler::new(&whitelist_authorization, &in_memory_torrent_repository)); + + (announce_handler, scrape_handler) +} diff --git a/src/core/mod.rs b/src/core/mod.rs index 5a7bbc2fb..581dd02f6 100644 --- a/src/core/mod.rs +++ b/src/core/mod.rs @@ -449,6 +449,7 @@ pub mod statistics; pub mod torrent; pub mod whitelist; +pub mod core_tests; pub mod peer_tests; #[cfg(test)] @@ -460,18 +461,13 @@ mod tests { use std::sync::Arc; use aquatic_udp_protocol::{AnnounceEvent, NumberOfBytes, PeerId}; - use torrust_tracker_configuration::Configuration; use torrust_tracker_primitives::peer::Peer; use torrust_tracker_primitives::DurationSinceUnixEpoch; use torrust_tracker_test_helpers::configuration; use crate::core::announce_handler::AnnounceHandler; + use crate::core::core_tests::initialize_handlers; use crate::core::scrape_handler::ScrapeHandler; - use crate::core::services::initialize_database; - use crate::core::torrent::repository::in_memory::InMemoryTorrentRepository; - use crate::core::torrent::repository::persisted::DatabasePersistentTorrentRepository; - use crate::core::whitelist; - use crate::core::whitelist::repository::in_memory::InMemoryWhitelist; fn initialize_handlers_for_public_tracker() -> (Arc, Arc) { let config = configuration::ephemeral_public(); @@ -483,27 +479,6 @@ mod tests { initialize_handlers(&config) } - fn initialize_handlers(config: &Configuration) -> (Arc, Arc) { - let database = initialize_database(config); - let in_memory_whitelist = Arc::new(InMemoryWhitelist::default()); - let whitelist_authorization = Arc::new(whitelist::authorization::Authorization::new( - &config.core, - &in_memory_whitelist.clone(), - )); - let in_memory_torrent_repository = Arc::new(InMemoryTorrentRepository::default()); - let db_torrent_repository = Arc::new(DatabasePersistentTorrentRepository::new(&database)); - - let announce_handler = Arc::new(AnnounceHandler::new( - &config.core, - &in_memory_torrent_repository, - &db_torrent_repository, - )); - - let scrape_handler = Arc::new(ScrapeHandler::new(&whitelist_authorization, &in_memory_torrent_repository)); - - (announce_handler, scrape_handler) - } - // The client peer IP fn peer_ip() -> IpAddr { IpAddr::V4(Ipv4Addr::from_str("126.0.0.1").unwrap()) From 65290213144799787544808db0366b47aaf975b5 Mon Sep 17 00:00:00 2001 From: Jose Celano Date: Tue, 28 Jan 2025 10:24:54 +0000 Subject: [PATCH 171/802] refactor: [#1211] remove duplicate function --- packages/primitives/src/core.rs | 11 +++++++++-- src/core/announce_handler.rs | 16 ++++++---------- src/core/core_tests.rs | 11 +++++++++++ src/core/torrent/repository/in_memory.rs | 6 +----- src/core/whitelist/authorization.rs | 8 ++------ src/core/whitelist/manager.rs | 11 ++++------- src/core/whitelist/mod.rs | 8 ++------ src/core/whitelist/repository/in_memory.rs | 6 +----- src/servers/http/v1/services/announce.rs | 8 ++------ src/servers/http/v1/services/scrape.rs | 5 +---- 10 files changed, 39 insertions(+), 51 deletions(-) diff --git a/packages/primitives/src/core.rs b/packages/primitives/src/core.rs index fe69c8959..aa2fe6926 100644 --- a/packages/primitives/src/core.rs +++ b/packages/primitives/src/core.rs @@ -59,12 +59,19 @@ impl ScrapeData { #[cfg(test)] mod tests { + use bittorrent_primitives::info_hash::InfoHash; use crate::core::ScrapeData; - fn sample_info_hash() -> InfoHash { - "3b245504cf5f11bbdbe1201cea6a6bf45aee1bc0".parse::().unwrap() + /// # Panics + /// + /// Will panic if the string representation of the info hash is not a valid info hash. + #[must_use] + pub fn sample_info_hash() -> InfoHash { + "3b245504cf5f11bbdbe1201cea6a6bf45aee1bc0" // DevSkim: ignore DS173237 + .parse::() + .expect("String should be a valid info hash") } #[test] diff --git a/src/core/announce_handler.rs b/src/core/announce_handler.rs index b30b071d3..e19a1798b 100644 --- a/src/core/announce_handler.rs +++ b/src/core/announce_handler.rs @@ -172,7 +172,6 @@ mod tests { use std::sync::Arc; use aquatic_udp_protocol::{AnnounceEvent, NumberOfBytes, PeerId}; - use bittorrent_primitives::info_hash::InfoHash; use torrust_tracker_primitives::peer::Peer; use torrust_tracker_primitives::DurationSinceUnixEpoch; use torrust_tracker_test_helpers::configuration; @@ -186,12 +185,6 @@ mod tests { initialize_handlers(&config) } - fn sample_info_hash() -> InfoHash { - "3b245504cf5f11bbdbe1201cea6a6bf45aee1bc0" // DevSkim: ignore DS173237 - .parse::() - .expect("String should be a valid info hash") - } - // The client peer IP fn peer_ip() -> IpAddr { IpAddr::V4(Ipv4Addr::from_str("126.0.0.1").unwrap()) @@ -279,9 +272,10 @@ mod tests { use std::sync::Arc; use crate::core::announce_handler::tests::the_announce_handler::{ - peer_ip, public_tracker, sample_info_hash, sample_peer, sample_peer_1, sample_peer_2, + peer_ip, public_tracker, sample_peer, sample_peer_1, sample_peer_2, }; use crate::core::announce_handler::PeersWanted; + use crate::core::core_tests::sample_info_hash; mod should_assign_the_ip_to_the_peer { @@ -413,9 +407,10 @@ mod tests { mod it_should_update_the_swarm_stats_for_the_torrent { use crate::core::announce_handler::tests::the_announce_handler::{ - completed_peer, leecher, peer_ip, public_tracker, sample_info_hash, seeder, started_peer, + completed_peer, leecher, peer_ip, public_tracker, seeder, started_peer, }; use crate::core::announce_handler::PeersWanted; + use crate::core::core_tests::sample_info_hash; #[tokio::test] async fn when_the_peer_is_a_seeder() { @@ -467,8 +462,9 @@ mod tests { use torrust_tracker_test_helpers::configuration; use torrust_tracker_torrent_repository::entry::EntrySync; - use crate::core::announce_handler::tests::the_announce_handler::{peer_ip, sample_info_hash, sample_peer}; + use crate::core::announce_handler::tests::the_announce_handler::{peer_ip, sample_peer}; use crate::core::announce_handler::{AnnounceHandler, PeersWanted}; + use crate::core::core_tests::sample_info_hash; use crate::core::services::initialize_database; use crate::core::torrent::manager::TorrentsManager; use crate::core::torrent::repository::in_memory::InMemoryTorrentRepository; diff --git a/src/core/core_tests.rs b/src/core/core_tests.rs index 6b9947700..be93fb1dc 100644 --- a/src/core/core_tests.rs +++ b/src/core/core_tests.rs @@ -1,5 +1,6 @@ use std::sync::Arc; +use bittorrent_primitives::info_hash::InfoHash; use torrust_tracker_configuration::Configuration; use super::announce_handler::AnnounceHandler; @@ -10,6 +11,16 @@ use super::torrent::repository::persisted::DatabasePersistentTorrentRepository; use super::whitelist::repository::in_memory::InMemoryWhitelist; use super::whitelist::{self}; +/// # Panics +/// +/// Will panic if the string representation of the info hash is not a valid info hash. +#[must_use] +pub fn sample_info_hash() -> InfoHash { + "3b245504cf5f11bbdbe1201cea6a6bf45aee1bc0" // DevSkim: ignore DS173237 + .parse::() + .expect("String should be a valid info hash") +} + #[must_use] pub fn initialize_handlers(config: &Configuration) -> (Arc, Arc) { let database = initialize_database(config); diff --git a/src/core/torrent/repository/in_memory.rs b/src/core/torrent/repository/in_memory.rs index 50858d4f3..908abd143 100644 --- a/src/core/torrent/repository/in_memory.rs +++ b/src/core/torrent/repository/in_memory.rs @@ -109,18 +109,14 @@ mod tests { use aquatic_udp_protocol::{AnnounceEvent, NumberOfBytes, PeerId}; use bittorrent_primitives::info_hash::fixture::gen_seeded_infohash; - use bittorrent_primitives::info_hash::InfoHash; use torrust_tracker_configuration::TORRENT_PEERS_LIMIT; use torrust_tracker_primitives::peer::Peer; use torrust_tracker_primitives::torrent_metrics::TorrentsMetrics; use torrust_tracker_primitives::DurationSinceUnixEpoch; + use crate::core::core_tests::sample_info_hash; use crate::core::torrent::repository::in_memory::InMemoryTorrentRepository; - fn sample_info_hash() -> InfoHash { - "3b245504cf5f11bbdbe1201cea6a6bf45aee1bc0".parse::().unwrap() - } - /// Sample peer whose state is not relevant for the tests fn sample_peer() -> Peer { complete_peer() diff --git a/src/core/whitelist/authorization.rs b/src/core/whitelist/authorization.rs index 55410d934..bd85a8d44 100644 --- a/src/core/whitelist/authorization.rs +++ b/src/core/whitelist/authorization.rs @@ -63,7 +63,6 @@ mod tests { use std::sync::Arc; - use bittorrent_primitives::info_hash::InfoHash; use torrust_tracker_test_helpers::configuration; use crate::app_test::initialize_tracker_dependencies; @@ -105,14 +104,11 @@ mod tests { (announce_handler, whitelist_authorization, whitelist_manager, scrape_handler) } - fn sample_info_hash() -> InfoHash { - "3b245504cf5f11bbdbe1201cea6a6bf45aee1bc0".parse::().unwrap() - } - mod configured_as_whitelisted { mod handling_authorization { - use crate::core::whitelist::authorization::tests::{sample_info_hash, whitelisted_tracker}; + use crate::core::core_tests::sample_info_hash; + use crate::core::whitelist::authorization::tests::whitelisted_tracker; #[tokio::test] async fn it_should_authorize_the_announce_and_scrape_actions_on_whitelisted_torrents() { diff --git a/src/core/whitelist/manager.rs b/src/core/whitelist/manager.rs index 23095cfb7..9a4568f88 100644 --- a/src/core/whitelist/manager.rs +++ b/src/core/whitelist/manager.rs @@ -95,7 +95,6 @@ mod tests { use std::sync::Arc; - use bittorrent_primitives::info_hash::InfoHash; use torrust_tracker_test_helpers::configuration; use crate::app_test::initialize_tracker_dependencies; @@ -137,14 +136,11 @@ mod tests { (announce_handler, whitelist_authorization, whitelist_manager, scrape_handler) } - fn sample_info_hash() -> InfoHash { - "3b245504cf5f11bbdbe1201cea6a6bf45aee1bc0".parse::().unwrap() - } - mod configured_as_whitelisted { mod handling_the_torrent_whitelist { - use crate::core::whitelist::manager::tests::{sample_info_hash, whitelisted_tracker}; + use crate::core::core_tests::sample_info_hash; + use crate::core::whitelist::manager::tests::whitelisted_tracker; // todo: after extracting the WhitelistManager from the Tracker, // there is no need to use the tracker to test the whitelist. @@ -176,7 +172,8 @@ mod tests { } mod persistence { - use crate::core::whitelist::manager::tests::{sample_info_hash, whitelisted_tracker}; + use crate::core::core_tests::sample_info_hash; + use crate::core::whitelist::manager::tests::whitelisted_tracker; #[tokio::test] async fn it_should_load_the_whitelist_from_the_database() { diff --git a/src/core/whitelist/mod.rs b/src/core/whitelist/mod.rs index cd4c238f7..aa06e20cc 100644 --- a/src/core/whitelist/mod.rs +++ b/src/core/whitelist/mod.rs @@ -7,7 +7,6 @@ mod tests { use std::sync::Arc; - use bittorrent_primitives::info_hash::InfoHash; use torrust_tracker_test_helpers::configuration; use crate::app_test::initialize_tracker_dependencies; @@ -49,14 +48,11 @@ mod tests { (announce_handler, whitelist_authorization, whitelist_manager, scrape_handler) } - fn sample_info_hash() -> InfoHash { - "3b245504cf5f11bbdbe1201cea6a6bf45aee1bc0".parse::().unwrap() - } - mod configured_as_whitelisted { mod handling_authorization { - use crate::core::whitelist::tests::{sample_info_hash, whitelisted_tracker}; + use crate::core::core_tests::sample_info_hash; + use crate::core::whitelist::tests::whitelisted_tracker; #[tokio::test] async fn it_should_authorize_the_announce_and_scrape_actions_on_whitelisted_torrents() { diff --git a/src/core/whitelist/repository/in_memory.rs b/src/core/whitelist/repository/in_memory.rs index 8d919f1e4..f023c1610 100644 --- a/src/core/whitelist/repository/in_memory.rs +++ b/src/core/whitelist/repository/in_memory.rs @@ -32,14 +32,10 @@ impl InMemoryWhitelist { #[cfg(test)] mod tests { - use bittorrent_primitives::info_hash::InfoHash; + use crate::core::core_tests::sample_info_hash; use crate::core::whitelist::repository::in_memory::InMemoryWhitelist; - fn sample_info_hash() -> InfoHash { - "3b245504cf5f11bbdbe1201cea6a6bf45aee1bc0".parse::().unwrap() // # DevSkim: ignore DS173237 - } - #[tokio::test] async fn should_allow_adding_a_new_torrent_to_the_whitelist() { let info_hash = sample_info_hash(); diff --git a/src/servers/http/v1/services/announce.rs b/src/servers/http/v1/services/announce.rs index e70377fd6..c8c2980c3 100644 --- a/src/servers/http/v1/services/announce.rs +++ b/src/servers/http/v1/services/announce.rs @@ -60,7 +60,6 @@ mod tests { use std::sync::Arc; use aquatic_udp_protocol::{AnnounceEvent, NumberOfBytes, PeerId}; - use bittorrent_primitives::info_hash::InfoHash; use torrust_tracker_configuration::Core; use torrust_tracker_primitives::{peer, DurationSinceUnixEpoch}; use torrust_tracker_test_helpers::configuration; @@ -97,10 +96,6 @@ mod tests { (core_config, announce_handler, stats_event_sender) } - fn sample_info_hash() -> InfoHash { - "3b245504cf5f11bbdbe1201cea6a6bf45aee1bc0".parse::().unwrap() - } - fn sample_peer_using_ipv4() -> peer::Peer { sample_peer() } @@ -140,9 +135,10 @@ mod tests { use super::{sample_peer_using_ipv4, sample_peer_using_ipv6}; use crate::app_test::initialize_tracker_dependencies; use crate::core::announce_handler::{AnnounceHandler, PeersWanted}; + use crate::core::core_tests::sample_info_hash; use crate::core::statistics; use crate::servers::http::v1::services::announce::invoke; - use crate::servers::http::v1::services::announce::tests::{public_tracker, sample_info_hash, sample_peer}; + use crate::servers::http::v1::services::announce::tests::{public_tracker, sample_peer}; fn initialize_announce_handler() -> Arc { let config = configuration::ephemeral(); diff --git a/src/servers/http/v1/services/scrape.rs b/src/servers/http/v1/services/scrape.rs index 06c21d945..6cd7213be 100644 --- a/src/servers/http/v1/services/scrape.rs +++ b/src/servers/http/v1/services/scrape.rs @@ -82,6 +82,7 @@ mod tests { use crate::app_test::initialize_tracker_dependencies; use crate::core::announce_handler::AnnounceHandler; + use crate::core::core_tests::sample_info_hash; use crate::core::scrape_handler::ScrapeHandler; fn public_tracker_and_announce_and_scrape_handlers() -> (Arc, Arc) { @@ -112,10 +113,6 @@ mod tests { vec![sample_info_hash()] } - fn sample_info_hash() -> InfoHash { - "3b245504cf5f11bbdbe1201cea6a6bf45aee1bc0".parse::().unwrap() // # DevSkim: ignore DS173237 - } - fn sample_peer() -> peer::Peer { peer::Peer { peer_id: PeerId(*b"-qB00000000000000000"), From b51018fbade39baeab40aabbecc513b95294150f Mon Sep 17 00:00:00 2001 From: Jose Celano Date: Tue, 28 Jan 2025 10:56:56 +0000 Subject: [PATCH 172/802] refactor: [#1211] extract duplicate code --- src/core/announce_handler.rs | 63 +++--------------------- src/core/core_tests.rs | 61 +++++++++++++++++++++++ src/core/mod.rs | 42 ++-------------- src/core/torrent/repository/in_memory.rs | 39 +-------------- 4 files changed, 73 insertions(+), 132 deletions(-) diff --git a/src/core/announce_handler.rs b/src/core/announce_handler.rs index e19a1798b..1a5f84d47 100644 --- a/src/core/announce_handler.rs +++ b/src/core/announce_handler.rs @@ -190,11 +190,6 @@ mod tests { IpAddr::V4(Ipv4Addr::from_str("126.0.0.1").unwrap()) } - /// Sample peer whose state is not relevant for the tests - fn sample_peer() -> Peer { - complete_peer() - } - /// Sample peer when for tests that need more than one peer fn sample_peer_1() -> Peer { Peer { @@ -221,50 +216,6 @@ mod tests { } } - fn seeder() -> Peer { - complete_peer() - } - - fn leecher() -> Peer { - incomplete_peer() - } - - fn started_peer() -> Peer { - incomplete_peer() - } - - fn completed_peer() -> Peer { - complete_peer() - } - - /// A peer that counts as `complete` is swarm metadata - /// IMPORTANT!: it only counts if the it has been announce at least once before - /// announcing the `AnnounceEvent::Completed` event. - fn complete_peer() -> Peer { - Peer { - peer_id: PeerId(*b"-qB00000000000000000"), - peer_addr: SocketAddr::new(IpAddr::V4(Ipv4Addr::new(126, 0, 0, 1)), 8080), - updated: DurationSinceUnixEpoch::new(1_669_397_478_934, 0), - uploaded: NumberOfBytes::new(0), - downloaded: NumberOfBytes::new(0), - left: NumberOfBytes::new(0), // No bytes left to download - event: AnnounceEvent::Completed, - } - } - - /// A peer that counts as `incomplete` is swarm metadata - fn incomplete_peer() -> Peer { - Peer { - peer_id: PeerId(*b"-qB00000000000000000"), - peer_addr: SocketAddr::new(IpAddr::V4(Ipv4Addr::new(126, 0, 0, 1)), 8080), - updated: DurationSinceUnixEpoch::new(1_669_397_478_934, 0), - uploaded: NumberOfBytes::new(0), - downloaded: NumberOfBytes::new(0), - left: NumberOfBytes::new(1000), // Still bytes to download - event: AnnounceEvent::Started, - } - } - mod for_all_tracker_config_modes { mod handling_an_announce_request { @@ -272,10 +223,10 @@ mod tests { use std::sync::Arc; use crate::core::announce_handler::tests::the_announce_handler::{ - peer_ip, public_tracker, sample_peer, sample_peer_1, sample_peer_2, + peer_ip, public_tracker, sample_peer_1, sample_peer_2, }; use crate::core::announce_handler::PeersWanted; - use crate::core::core_tests::sample_info_hash; + use crate::core::core_tests::{sample_info_hash, sample_peer}; mod should_assign_the_ip_to_the_peer { @@ -406,11 +357,9 @@ mod tests { mod it_should_update_the_swarm_stats_for_the_torrent { - use crate::core::announce_handler::tests::the_announce_handler::{ - completed_peer, leecher, peer_ip, public_tracker, seeder, started_peer, - }; + use crate::core::announce_handler::tests::the_announce_handler::{peer_ip, public_tracker}; use crate::core::announce_handler::PeersWanted; - use crate::core::core_tests::sample_info_hash; + use crate::core::core_tests::{completed_peer, leecher, sample_info_hash, seeder, started_peer}; #[tokio::test] async fn when_the_peer_is_a_seeder() { @@ -462,9 +411,9 @@ mod tests { use torrust_tracker_test_helpers::configuration; use torrust_tracker_torrent_repository::entry::EntrySync; - use crate::core::announce_handler::tests::the_announce_handler::{peer_ip, sample_peer}; + use crate::core::announce_handler::tests::the_announce_handler::peer_ip; use crate::core::announce_handler::{AnnounceHandler, PeersWanted}; - use crate::core::core_tests::sample_info_hash; + use crate::core::core_tests::{sample_info_hash, sample_peer}; use crate::core::services::initialize_database; use crate::core::torrent::manager::TorrentsManager; use crate::core::torrent::repository::in_memory::InMemoryTorrentRepository; diff --git a/src/core/core_tests.rs b/src/core/core_tests.rs index be93fb1dc..037dab5dd 100644 --- a/src/core/core_tests.rs +++ b/src/core/core_tests.rs @@ -1,7 +1,12 @@ +//! Some generic test helpers functions. +use std::net::{IpAddr, Ipv4Addr, SocketAddr}; use std::sync::Arc; +use aquatic_udp_protocol::{AnnounceEvent, NumberOfBytes, PeerId}; use bittorrent_primitives::info_hash::InfoHash; use torrust_tracker_configuration::Configuration; +use torrust_tracker_primitives::peer::Peer; +use torrust_tracker_primitives::DurationSinceUnixEpoch; use super::announce_handler::AnnounceHandler; use super::scrape_handler::ScrapeHandler; @@ -21,6 +26,62 @@ pub fn sample_info_hash() -> InfoHash { .expect("String should be a valid info hash") } +/// Sample peer whose state is not relevant for the tests. +#[must_use] +pub fn sample_peer() -> Peer { + complete_peer() +} + +#[must_use] +pub fn seeder() -> Peer { + complete_peer() +} + +#[must_use] +pub fn leecher() -> Peer { + incomplete_peer() +} + +#[must_use] +pub fn started_peer() -> Peer { + incomplete_peer() +} + +#[must_use] +pub fn completed_peer() -> Peer { + complete_peer() +} + +/// A peer that counts as `complete` is swarm metadata +/// IMPORTANT!: it only counts if the it has been announce at least once before +/// announcing the `AnnounceEvent::Completed` event. +#[must_use] +pub fn complete_peer() -> Peer { + Peer { + peer_id: PeerId(*b"-qB00000000000000000"), + peer_addr: SocketAddr::new(IpAddr::V4(Ipv4Addr::new(126, 0, 0, 1)), 8080), + updated: DurationSinceUnixEpoch::new(1_669_397_478_934, 0), + uploaded: NumberOfBytes::new(0), + downloaded: NumberOfBytes::new(0), + left: NumberOfBytes::new(0), // No bytes left to download + event: AnnounceEvent::Completed, + } +} + +/// A peer that counts as `incomplete` is swarm metadata +#[must_use] +pub fn incomplete_peer() -> Peer { + Peer { + peer_id: PeerId(*b"-qB00000000000000000"), + peer_addr: SocketAddr::new(IpAddr::V4(Ipv4Addr::new(126, 0, 0, 1)), 8080), + updated: DurationSinceUnixEpoch::new(1_669_397_478_934, 0), + uploaded: NumberOfBytes::new(0), + downloaded: NumberOfBytes::new(0), + left: NumberOfBytes::new(1000), // Still bytes to download + event: AnnounceEvent::Started, + } +} + #[must_use] pub fn initialize_handlers(config: &Configuration) -> (Arc, Arc) { let database = initialize_database(config); diff --git a/src/core/mod.rs b/src/core/mod.rs index 581dd02f6..77d8e1450 100644 --- a/src/core/mod.rs +++ b/src/core/mod.rs @@ -455,14 +455,10 @@ pub mod peer_tests; #[cfg(test)] mod tests { mod the_tracker { - - use std::net::{IpAddr, Ipv4Addr, SocketAddr}; + use std::net::{IpAddr, Ipv4Addr}; use std::str::FromStr; use std::sync::Arc; - use aquatic_udp_protocol::{AnnounceEvent, NumberOfBytes, PeerId}; - use torrust_tracker_primitives::peer::Peer; - use torrust_tracker_primitives::DurationSinceUnixEpoch; use torrust_tracker_test_helpers::configuration; use crate::core::announce_handler::AnnounceHandler; @@ -484,34 +480,6 @@ mod tests { IpAddr::V4(Ipv4Addr::from_str("126.0.0.1").unwrap()) } - /// A peer that counts as `complete` is swarm metadata - /// IMPORTANT!: it only counts if the it has been announce at least once before - /// announcing the `AnnounceEvent::Completed` event. - fn complete_peer() -> Peer { - Peer { - peer_id: PeerId(*b"-qB00000000000000000"), - peer_addr: SocketAddr::new(IpAddr::V4(Ipv4Addr::new(126, 0, 0, 1)), 8080), - updated: DurationSinceUnixEpoch::new(1_669_397_478_934, 0), - uploaded: NumberOfBytes::new(0), - downloaded: NumberOfBytes::new(0), - left: NumberOfBytes::new(0), // No bytes left to download - event: AnnounceEvent::Completed, - } - } - - /// A peer that counts as `incomplete` is swarm metadata - fn incomplete_peer() -> Peer { - Peer { - peer_id: PeerId(*b"-qB00000000000000000"), - peer_addr: SocketAddr::new(IpAddr::V4(Ipv4Addr::new(126, 0, 0, 1)), 8080), - updated: DurationSinceUnixEpoch::new(1_669_397_478_934, 0), - uploaded: NumberOfBytes::new(0), - downloaded: NumberOfBytes::new(0), - left: NumberOfBytes::new(1000), // Still bytes to download - event: AnnounceEvent::Started, - } - } - mod for_all_config_modes { mod handling_a_scrape_request { @@ -523,7 +491,8 @@ mod tests { use torrust_tracker_primitives::swarm_metadata::SwarmMetadata; use crate::core::announce_handler::PeersWanted; - use crate::core::tests::the_tracker::{complete_peer, incomplete_peer, initialize_handlers_for_public_tracker}; + use crate::core::core_tests::{complete_peer, incomplete_peer}; + use crate::core::tests::the_tracker::initialize_handlers_for_public_tracker; #[tokio::test] async fn it_should_return_the_swarm_metadata_for_the_requested_file_if_the_tracker_has_that_torrent() { @@ -577,9 +546,8 @@ mod tests { use torrust_tracker_primitives::swarm_metadata::SwarmMetadata; use crate::core::announce_handler::PeersWanted; - use crate::core::tests::the_tracker::{ - complete_peer, incomplete_peer, initialize_handlers_for_listed_tracker, peer_ip, - }; + use crate::core::core_tests::{complete_peer, incomplete_peer}; + use crate::core::tests::the_tracker::{initialize_handlers_for_listed_tracker, peer_ip}; #[tokio::test] async fn it_should_return_the_zeroed_swarm_metadata_for_the_requested_file_if_it_is_not_whitelisted() { diff --git a/src/core/torrent/repository/in_memory.rs b/src/core/torrent/repository/in_memory.rs index 908abd143..2e80a2e9b 100644 --- a/src/core/torrent/repository/in_memory.rs +++ b/src/core/torrent/repository/in_memory.rs @@ -114,46 +114,9 @@ mod tests { use torrust_tracker_primitives::torrent_metrics::TorrentsMetrics; use torrust_tracker_primitives::DurationSinceUnixEpoch; - use crate::core::core_tests::sample_info_hash; + use crate::core::core_tests::{leecher, sample_info_hash, sample_peer}; use crate::core::torrent::repository::in_memory::InMemoryTorrentRepository; - /// Sample peer whose state is not relevant for the tests - fn sample_peer() -> Peer { - complete_peer() - } - - fn leecher() -> Peer { - incomplete_peer() - } - - /// A peer that counts as `complete` is swarm metadata - /// IMPORTANT!: it only counts if the it has been announce at least once before - /// announcing the `AnnounceEvent::Completed` event. - fn complete_peer() -> Peer { - Peer { - peer_id: PeerId(*b"-qB00000000000000000"), - peer_addr: SocketAddr::new(IpAddr::V4(Ipv4Addr::new(126, 0, 0, 1)), 8080), - updated: DurationSinceUnixEpoch::new(1_669_397_478_934, 0), - uploaded: NumberOfBytes::new(0), - downloaded: NumberOfBytes::new(0), - left: NumberOfBytes::new(0), // No bytes left to download - event: AnnounceEvent::Completed, - } - } - - /// A peer that counts as `incomplete` is swarm metadata - fn incomplete_peer() -> Peer { - Peer { - peer_id: PeerId(*b"-qB00000000000000000"), - peer_addr: SocketAddr::new(IpAddr::V4(Ipv4Addr::new(126, 0, 0, 1)), 8080), - updated: DurationSinceUnixEpoch::new(1_669_397_478_934, 0), - uploaded: NumberOfBytes::new(0), - downloaded: NumberOfBytes::new(0), - left: NumberOfBytes::new(1000), // Still bytes to download - event: AnnounceEvent::Started, - } - } - /// It generates a peer id from a number where the number is the last /// part of the peer ID. For example, for `12` it returns /// `-qB00000000000000012`. From 7fa2b15840875c4e30cebed7cae6ba370c5ad8c7 Mon Sep 17 00:00:00 2001 From: Jose Celano Date: Tue, 28 Jan 2025 11:06:41 +0000 Subject: [PATCH 173/802] refactor: [#1211] clean tests in core::whitelist::authorization --- src/core/whitelist/authorization.rs | 45 +++++++---------------------- 1 file changed, 11 insertions(+), 34 deletions(-) diff --git a/src/core/whitelist/authorization.rs b/src/core/whitelist/authorization.rs index bd85a8d44..9d51601fd 100644 --- a/src/core/whitelist/authorization.rs +++ b/src/core/whitelist/authorization.rs @@ -65,54 +65,31 @@ mod tests { use torrust_tracker_test_helpers::configuration; - use crate::app_test::initialize_tracker_dependencies; - use crate::core::announce_handler::AnnounceHandler; - use crate::core::scrape_handler::ScrapeHandler; - use crate::core::services::initialize_whitelist_manager; - use crate::core::whitelist; + use super::Authorization; + use crate::core::services::{initialize_database, initialize_whitelist_manager}; use crate::core::whitelist::manager::WhiteListManager; + use crate::core::whitelist::repository::in_memory::InMemoryWhitelist; - #[allow(clippy::type_complexity)] - fn whitelisted_tracker() -> ( - Arc, - Arc, - Arc, - Arc, - ) { + fn initialize_whitelist_services() -> (Arc, Arc) { let config = configuration::ephemeral_listed(); - let ( - database, - in_memory_whitelist, - whitelist_authorization, - _authentication_service, - in_memory_torrent_repository, - db_torrent_repository, - _torrents_manager, - ) = initialize_tracker_dependencies(&config); - + let database = initialize_database(&config); + let in_memory_whitelist = Arc::new(InMemoryWhitelist::default()); + let whitelist_authorization = Arc::new(Authorization::new(&config.core, &in_memory_whitelist.clone())); let whitelist_manager = initialize_whitelist_manager(database.clone(), in_memory_whitelist.clone()); - let announce_handler = Arc::new(AnnounceHandler::new( - &config.core, - &in_memory_torrent_repository, - &db_torrent_repository, - )); - - let scrape_handler = Arc::new(ScrapeHandler::new(&whitelist_authorization, &in_memory_torrent_repository)); - - (announce_handler, whitelist_authorization, whitelist_manager, scrape_handler) + (whitelist_authorization, whitelist_manager) } mod configured_as_whitelisted { mod handling_authorization { use crate::core::core_tests::sample_info_hash; - use crate::core::whitelist::authorization::tests::whitelisted_tracker; + use crate::core::whitelist::authorization::tests::initialize_whitelist_services; #[tokio::test] async fn it_should_authorize_the_announce_and_scrape_actions_on_whitelisted_torrents() { - let (_announce_handler, whitelist_authorization, whitelist_manager, _scrape_handler) = whitelisted_tracker(); + let (whitelist_authorization, whitelist_manager) = initialize_whitelist_services(); let info_hash = sample_info_hash(); @@ -125,7 +102,7 @@ mod tests { #[tokio::test] async fn it_should_not_authorize_the_announce_and_scrape_actions_on_not_whitelisted_torrents() { - let (_announce_handler, whitelist_authorization, _whitelist_manager, _scrape_handler) = whitelisted_tracker(); + let (whitelist_authorization, _whitelist_manager) = initialize_whitelist_services(); let info_hash = sample_info_hash(); From 69d4505057affbb0995db9d08e168a92980bafd7 Mon Sep 17 00:00:00 2001 From: Jose Celano Date: Tue, 28 Jan 2025 11:08:15 +0000 Subject: [PATCH 174/802] refactor: [#1211] rename type to WhitelistAuthorization --- src/app_test.rs | 4 ++-- src/bootstrap/app.rs | 2 +- src/bootstrap/jobs/http_tracker.rs | 4 ++-- src/bootstrap/jobs/udp_tracker.rs | 2 +- src/container.rs | 2 +- src/core/core_tests.rs | 2 +- src/core/scrape_handler.rs | 6 +++--- src/core/whitelist/authorization.rs | 10 +++++----- src/core/whitelist/manager.rs | 2 +- src/core/whitelist/mod.rs | 2 +- src/servers/http/server.rs | 4 ++-- src/servers/http/v1/handlers/announce.rs | 10 +++++----- src/servers/http/v1/routes.rs | 2 +- src/servers/udp/handlers.rs | 14 +++++++------- src/servers/udp/server/launcher.rs | 4 ++-- src/servers/udp/server/processor.rs | 4 ++-- src/servers/udp/server/spawner.rs | 2 +- src/servers/udp/server/states.rs | 2 +- tests/servers/http/environment.rs | 2 +- tests/servers/udp/environment.rs | 2 +- 20 files changed, 41 insertions(+), 41 deletions(-) diff --git a/src/app_test.rs b/src/app_test.rs index 5f189f391..fb1dd01c8 100644 --- a/src/app_test.rs +++ b/src/app_test.rs @@ -23,7 +23,7 @@ pub fn initialize_tracker_dependencies( ) -> ( Arc>, Arc, - Arc, + Arc, Arc, Arc, Arc, @@ -31,7 +31,7 @@ pub fn initialize_tracker_dependencies( ) { let database = initialize_database(config); let in_memory_whitelist = Arc::new(InMemoryWhitelist::default()); - let whitelist_authorization = Arc::new(whitelist::authorization::Authorization::new( + let whitelist_authorization = Arc::new(whitelist::authorization::WhitelistAuthorization::new( &config.core, &in_memory_whitelist.clone(), )); diff --git a/src/bootstrap/app.rs b/src/bootstrap/app.rs index da63048e0..c69162322 100644 --- a/src/bootstrap/app.rs +++ b/src/bootstrap/app.rs @@ -93,7 +93,7 @@ pub fn initialize_app_container(configuration: &Configuration) -> AppContainer { let ban_service = Arc::new(RwLock::new(BanService::new(MAX_CONNECTION_ID_ERRORS_PER_IP))); let database = initialize_database(configuration); let in_memory_whitelist = Arc::new(InMemoryWhitelist::default()); - let whitelist_authorization = Arc::new(whitelist::authorization::Authorization::new( + let whitelist_authorization = Arc::new(whitelist::authorization::WhitelistAuthorization::new( &configuration.core, &in_memory_whitelist.clone(), )); diff --git a/src/bootstrap/jobs/http_tracker.rs b/src/bootstrap/jobs/http_tracker.rs index 4a3aa7a9f..dc6ed6b60 100644 --- a/src/bootstrap/jobs/http_tracker.rs +++ b/src/bootstrap/jobs/http_tracker.rs @@ -52,7 +52,7 @@ pub async fn start_job( announce_handler: Arc, scrape_handler: Arc, authentication_service: Arc, - whitelist_authorization: Arc, + whitelist_authorization: Arc, stats_event_sender: Arc>>, form: ServiceRegistrationForm, version: Version, @@ -99,7 +99,7 @@ async fn start_v1( announce_handler: Arc, scrape_handler: Arc, authentication_service: Arc, - whitelist_authorization: Arc, + whitelist_authorization: Arc, stats_event_sender: Arc>>, form: ServiceRegistrationForm, ) -> JoinHandle<()> { diff --git a/src/bootstrap/jobs/udp_tracker.rs b/src/bootstrap/jobs/udp_tracker.rs index 3679c3195..4f54ecb59 100644 --- a/src/bootstrap/jobs/udp_tracker.rs +++ b/src/bootstrap/jobs/udp_tracker.rs @@ -49,7 +49,7 @@ pub async fn start_job( config: &UdpTracker, announce_handler: Arc, scrape_handler: Arc, - whitelist_authorization: Arc, + whitelist_authorization: Arc, stats_event_sender: Arc>>, ban_service: Arc>, form: ServiceRegistrationForm, diff --git a/src/container.rs b/src/container.rs index 544abd02e..d8fae07e5 100644 --- a/src/container.rs +++ b/src/container.rs @@ -22,7 +22,7 @@ pub struct AppContainer { pub scrape_handler: Arc, pub keys_handler: Arc, pub authentication_service: Arc, - pub whitelist_authorization: Arc, + pub whitelist_authorization: Arc, pub ban_service: Arc>, pub stats_event_sender: Arc>>, pub stats_repository: Arc, diff --git a/src/core/core_tests.rs b/src/core/core_tests.rs index 037dab5dd..45949bae2 100644 --- a/src/core/core_tests.rs +++ b/src/core/core_tests.rs @@ -86,7 +86,7 @@ pub fn incomplete_peer() -> Peer { pub fn initialize_handlers(config: &Configuration) -> (Arc, Arc) { let database = initialize_database(config); let in_memory_whitelist = Arc::new(InMemoryWhitelist::default()); - let whitelist_authorization = Arc::new(whitelist::authorization::Authorization::new( + let whitelist_authorization = Arc::new(whitelist::authorization::WhitelistAuthorization::new( &config.core, &in_memory_whitelist.clone(), )); diff --git a/src/core/scrape_handler.rs b/src/core/scrape_handler.rs index 1d513a5a9..7de82aa06 100644 --- a/src/core/scrape_handler.rs +++ b/src/core/scrape_handler.rs @@ -9,7 +9,7 @@ use super::whitelist; pub struct ScrapeHandler { /// The service to check is a torrent is whitelisted. - whitelist_authorization: Arc, + whitelist_authorization: Arc, /// The in-memory torrents repository. in_memory_torrent_repository: Arc, @@ -18,7 +18,7 @@ pub struct ScrapeHandler { impl ScrapeHandler { #[must_use] pub fn new( - whitelist_authorization: &Arc, + whitelist_authorization: &Arc, in_memory_torrent_repository: &Arc, ) -> Self { Self { @@ -62,7 +62,7 @@ mod tests { let config = configuration::ephemeral_public(); let in_memory_whitelist = Arc::new(InMemoryWhitelist::default()); - let whitelist_authorization = Arc::new(whitelist::authorization::Authorization::new( + let whitelist_authorization = Arc::new(whitelist::authorization::WhitelistAuthorization::new( &config.core, &in_memory_whitelist.clone(), )); diff --git a/src/core/whitelist/authorization.rs b/src/core/whitelist/authorization.rs index 9d51601fd..e9450270f 100644 --- a/src/core/whitelist/authorization.rs +++ b/src/core/whitelist/authorization.rs @@ -8,7 +8,7 @@ use tracing::instrument; use super::repository::in_memory::InMemoryWhitelist; use crate::core::error::Error; -pub struct Authorization { +pub struct WhitelistAuthorization { /// Core tracker configuration. config: Core, @@ -16,7 +16,7 @@ pub struct Authorization { in_memory_whitelist: Arc, } -impl Authorization { +impl WhitelistAuthorization { /// Creates a new authorization instance. pub fn new(config: &Core, in_memory_whitelist: &Arc) -> Self { Self { @@ -65,17 +65,17 @@ mod tests { use torrust_tracker_test_helpers::configuration; - use super::Authorization; + use super::WhitelistAuthorization; use crate::core::services::{initialize_database, initialize_whitelist_manager}; use crate::core::whitelist::manager::WhiteListManager; use crate::core::whitelist::repository::in_memory::InMemoryWhitelist; - fn initialize_whitelist_services() -> (Arc, Arc) { + fn initialize_whitelist_services() -> (Arc, Arc) { let config = configuration::ephemeral_listed(); let database = initialize_database(&config); let in_memory_whitelist = Arc::new(InMemoryWhitelist::default()); - let whitelist_authorization = Arc::new(Authorization::new(&config.core, &in_memory_whitelist.clone())); + let whitelist_authorization = Arc::new(WhitelistAuthorization::new(&config.core, &in_memory_whitelist.clone())); let whitelist_manager = initialize_whitelist_manager(database.clone(), in_memory_whitelist.clone()); (whitelist_authorization, whitelist_manager) diff --git a/src/core/whitelist/manager.rs b/src/core/whitelist/manager.rs index 9a4568f88..289dd6d5b 100644 --- a/src/core/whitelist/manager.rs +++ b/src/core/whitelist/manager.rs @@ -107,7 +107,7 @@ mod tests { #[allow(clippy::type_complexity)] fn whitelisted_tracker() -> ( Arc, - Arc, + Arc, Arc, Arc, ) { diff --git a/src/core/whitelist/mod.rs b/src/core/whitelist/mod.rs index aa06e20cc..bdc09d2b1 100644 --- a/src/core/whitelist/mod.rs +++ b/src/core/whitelist/mod.rs @@ -19,7 +19,7 @@ mod tests { #[allow(clippy::type_complexity)] fn whitelisted_tracker() -> ( Arc, - Arc, + Arc, Arc, Arc, ) { diff --git a/src/servers/http/server.rs b/src/servers/http/server.rs index 28f407ad3..2792697b3 100644 --- a/src/servers/http/server.rs +++ b/src/servers/http/server.rs @@ -63,7 +63,7 @@ impl Launcher { announce_handler: Arc, scrape_handler: Arc, authentication_service: Arc, - whitelist_authorization: Arc, + whitelist_authorization: Arc, stats_event_sender: Arc>>, tx_start: Sender, rx_halt: Receiver, @@ -192,7 +192,7 @@ impl HttpServer { announce_handler: Arc, scrape_handler: Arc, authentication_service: Arc, - whitelist_authorization: Arc, + whitelist_authorization: Arc, stats_event_sender: Arc>>, form: ServiceRegistrationForm, ) -> Result, Error> { diff --git a/src/servers/http/v1/handlers/announce.rs b/src/servers/http/v1/handlers/announce.rs index 632688763..247c6b8c6 100644 --- a/src/servers/http/v1/handlers/announce.rs +++ b/src/servers/http/v1/handlers/announce.rs @@ -43,7 +43,7 @@ pub async fn handle_without_key( Arc, Arc, Arc, - Arc, + Arc, Arc>>, )>, ExtractRequest(announce_request): ExtractRequest, @@ -73,7 +73,7 @@ pub async fn handle_with_key( Arc, Arc, Arc, - Arc, + Arc, Arc>>, )>, ExtractRequest(announce_request): ExtractRequest, @@ -104,7 +104,7 @@ async fn handle( config: &Arc, announce_handler: &Arc, authentication_service: &Arc, - whitelist_authorization: &Arc, + whitelist_authorization: &Arc, opt_stats_event_sender: &Arc>>, announce_request: &Announce, client_ip_sources: &ClientIpSources, @@ -139,7 +139,7 @@ async fn handle_announce( core_config: &Arc, announce_handler: &Arc, authentication_service: &Arc, - whitelist_authorization: &Arc, + whitelist_authorization: &Arc, opt_stats_event_sender: &Arc>>, announce_request: &Announce, client_ip_sources: &ClientIpSources, @@ -265,7 +265,7 @@ mod tests { Arc, Arc, Arc>>, - Arc, + Arc, Arc, ); diff --git a/src/servers/http/v1/routes.rs b/src/servers/http/v1/routes.rs index 757a7d1bd..f80760955 100644 --- a/src/servers/http/v1/routes.rs +++ b/src/servers/http/v1/routes.rs @@ -49,7 +49,7 @@ pub fn router( announce_handler: Arc, scrape_handler: Arc, authentication_service: Arc, - whitelist_authorization: Arc, + whitelist_authorization: Arc, stats_event_sender: Arc>>, server_socket_addr: SocketAddr, ) -> Router { diff --git a/src/servers/udp/handlers.rs b/src/servers/udp/handlers.rs index 5589331a7..b96ecc154 100644 --- a/src/servers/udp/handlers.rs +++ b/src/servers/udp/handlers.rs @@ -64,7 +64,7 @@ pub(crate) async fn handle_packet( core_config: &Arc, announce_handler: &Arc, scrape_handler: &Arc, - whitelist_authorization: &Arc, + whitelist_authorization: &Arc, opt_stats_event_sender: &Arc>>, local_addr: SocketAddr, cookie_time_values: CookieTimeValues, @@ -157,7 +157,7 @@ pub async fn handle_request( core_config: &Arc, announce_handler: &Arc, scrape_handler: &Arc, - whitelist_authorization: &Arc, + whitelist_authorization: &Arc, opt_stats_event_sender: &Arc>>, cookie_time_values: CookieTimeValues, ) -> Result { @@ -247,7 +247,7 @@ pub async fn handle_announce( request: &AnnounceRequest, core_config: &Arc, announce_handler: &Arc, - whitelist_authorization: &Arc, + whitelist_authorization: &Arc, opt_stats_event_sender: &Arc>>, cookie_valid_range: Range, ) -> Result { @@ -517,7 +517,7 @@ mod tests { Arc>>, Arc, Arc, - Arc, + Arc, ); fn tracker_configuration() -> Configuration { @@ -672,7 +672,7 @@ mod tests { Arc, Arc, Arc, - Arc, + Arc, ) { let config = tracker_configuration(); @@ -1088,7 +1088,7 @@ mod tests { async fn announce_a_new_peer_using_ipv4( core_config: Arc, announce_handler: Arc, - whitelist_authorization: Arc, + whitelist_authorization: Arc, ) -> Response { let (stats_event_sender, _stats_repository) = crate::core::services::statistics::setup::factory(false); let stats_event_sender = Arc::new(stats_event_sender); @@ -1426,7 +1426,7 @@ mod tests { async fn announce_a_new_peer_using_ipv6( core_config: Arc, announce_handler: Arc, - whitelist_authorization: Arc, + whitelist_authorization: Arc, ) -> Response { let (stats_event_sender, _stats_repository) = crate::core::services::statistics::setup::factory(false); let stats_event_sender = Arc::new(stats_event_sender); diff --git a/src/servers/udp/server/launcher.rs b/src/servers/udp/server/launcher.rs index f1b14860d..4aaf87ae2 100644 --- a/src/servers/udp/server/launcher.rs +++ b/src/servers/udp/server/launcher.rs @@ -58,7 +58,7 @@ impl Launcher { core_config: Arc, announce_handler: Arc, scrape_handler: Arc, - whitelist_authorization: Arc, + whitelist_authorization: Arc, opt_stats_event_sender: Arc>>, ban_service: Arc>, bind_to: SocketAddr, @@ -159,7 +159,7 @@ impl Launcher { core_config: Arc, announce_handler: Arc, scrape_handler: Arc, - whitelist_authorization: Arc, + whitelist_authorization: Arc, opt_stats_event_sender: Arc>>, ban_service: Arc>, cookie_lifetime: Duration, diff --git a/src/servers/udp/server/processor.rs b/src/servers/udp/server/processor.rs index 475a36b74..24a34f98d 100644 --- a/src/servers/udp/server/processor.rs +++ b/src/servers/udp/server/processor.rs @@ -24,7 +24,7 @@ pub struct Processor { core_config: Arc, announce_handler: Arc, scrape_handler: Arc, - whitelist_authorization: Arc, + whitelist_authorization: Arc, opt_stats_event_sender: Arc>>, cookie_lifetime: f64, } @@ -36,7 +36,7 @@ impl Processor { core_config: Arc, announce_handler: Arc, scrape_handler: Arc, - whitelist_authorization: Arc, + whitelist_authorization: Arc, opt_stats_event_sender: Arc>>, cookie_lifetime: f64, ) -> Self { diff --git a/src/servers/udp/server/spawner.rs b/src/servers/udp/server/spawner.rs index 2415b2631..d5fd5d58e 100644 --- a/src/servers/udp/server/spawner.rs +++ b/src/servers/udp/server/spawner.rs @@ -36,7 +36,7 @@ impl Spawner { core_config: Arc, announce_handler: Arc, scrape_handler: Arc, - whitelist_authorization: Arc, + whitelist_authorization: Arc, opt_stats_event_sender: Arc>>, ban_service: Arc>, cookie_lifetime: Duration, diff --git a/src/servers/udp/server/states.rs b/src/servers/udp/server/states.rs index 4d18593fe..9bcde9003 100644 --- a/src/servers/udp/server/states.rs +++ b/src/servers/udp/server/states.rs @@ -74,7 +74,7 @@ impl Server { core_config: Arc, announce_handler: Arc, scrape_handler: Arc, - whitelist_authorization: Arc, + whitelist_authorization: Arc, opt_stats_event_sender: Arc>>, ban_service: Arc>, form: ServiceRegistrationForm, diff --git a/tests/servers/http/environment.rs b/tests/servers/http/environment.rs index 589430848..6c9f8e4b8 100644 --- a/tests/servers/http/environment.rs +++ b/tests/servers/http/environment.rs @@ -30,7 +30,7 @@ pub struct Environment { pub authentication_service: Arc, pub stats_event_sender: Arc>>, pub stats_repository: Arc, - pub whitelist_authorization: Arc, + pub whitelist_authorization: Arc, pub whitelist_manager: Arc, pub registar: Registar, pub server: HttpServer, diff --git a/tests/servers/udp/environment.rs b/tests/servers/udp/environment.rs index a6ddd7a83..b3a2670e8 100644 --- a/tests/servers/udp/environment.rs +++ b/tests/servers/udp/environment.rs @@ -29,7 +29,7 @@ where pub in_memory_torrent_repository: Arc, pub announce_handler: Arc, pub scrape_handler: Arc, - pub whitelist_authorization: Arc, + pub whitelist_authorization: Arc, pub stats_event_sender: Arc>>, pub stats_repository: Arc, pub ban_service: Arc>, From 115159d1c011d12aebebfba8cdbc9346ecc34c98 Mon Sep 17 00:00:00 2001 From: Jose Celano Date: Tue, 28 Jan 2025 11:24:17 +0000 Subject: [PATCH 175/802] refactor: [#1211] clean core::whitelist module tests --- src/core/whitelist/authorization.rs | 26 ++------------ src/core/whitelist/manager.rs | 51 +++++---------------------- src/core/whitelist/mod.rs | 50 +++----------------------- src/core/whitelist/whitelist_tests.rs | 26 ++++++++++++++ 4 files changed, 42 insertions(+), 111 deletions(-) create mode 100644 src/core/whitelist/whitelist_tests.rs diff --git a/src/core/whitelist/authorization.rs b/src/core/whitelist/authorization.rs index e9450270f..1a6d8b758 100644 --- a/src/core/whitelist/authorization.rs +++ b/src/core/whitelist/authorization.rs @@ -61,35 +61,15 @@ impl WhitelistAuthorization { #[cfg(test)] mod tests { - use std::sync::Arc; - - use torrust_tracker_test_helpers::configuration; - - use super::WhitelistAuthorization; - use crate::core::services::{initialize_database, initialize_whitelist_manager}; - use crate::core::whitelist::manager::WhiteListManager; - use crate::core::whitelist::repository::in_memory::InMemoryWhitelist; - - fn initialize_whitelist_services() -> (Arc, Arc) { - let config = configuration::ephemeral_listed(); - - let database = initialize_database(&config); - let in_memory_whitelist = Arc::new(InMemoryWhitelist::default()); - let whitelist_authorization = Arc::new(WhitelistAuthorization::new(&config.core, &in_memory_whitelist.clone())); - let whitelist_manager = initialize_whitelist_manager(database.clone(), in_memory_whitelist.clone()); - - (whitelist_authorization, whitelist_manager) - } - mod configured_as_whitelisted { mod handling_authorization { use crate::core::core_tests::sample_info_hash; - use crate::core::whitelist::authorization::tests::initialize_whitelist_services; + use crate::core::whitelist::whitelist_tests::initialize_whitelist_services_for_listed_tracker; #[tokio::test] async fn it_should_authorize_the_announce_and_scrape_actions_on_whitelisted_torrents() { - let (whitelist_authorization, whitelist_manager) = initialize_whitelist_services(); + let (whitelist_authorization, whitelist_manager) = initialize_whitelist_services_for_listed_tracker(); let info_hash = sample_info_hash(); @@ -102,7 +82,7 @@ mod tests { #[tokio::test] async fn it_should_not_authorize_the_announce_and_scrape_actions_on_not_whitelisted_torrents() { - let (whitelist_authorization, _whitelist_manager) = initialize_whitelist_services(); + let (whitelist_authorization, _whitelist_manager) = initialize_whitelist_services_for_listed_tracker(); let info_hash = sample_info_hash(); diff --git a/src/core/whitelist/manager.rs b/src/core/whitelist/manager.rs index 289dd6d5b..4f4792443 100644 --- a/src/core/whitelist/manager.rs +++ b/src/core/whitelist/manager.rs @@ -97,59 +97,26 @@ mod tests { use torrust_tracker_test_helpers::configuration; - use crate::app_test::initialize_tracker_dependencies; - use crate::core::announce_handler::AnnounceHandler; - use crate::core::scrape_handler::ScrapeHandler; - use crate::core::services::initialize_whitelist_manager; - use crate::core::whitelist; use crate::core::whitelist::manager::WhiteListManager; + use crate::core::whitelist::whitelist_tests::initialize_whitelist_services; - #[allow(clippy::type_complexity)] - fn whitelisted_tracker() -> ( - Arc, - Arc, - Arc, - Arc, - ) { + fn initialize_whitelist_manager_for_whitelisted_tracker() -> Arc { let config = configuration::ephemeral_listed(); - let ( - database, - in_memory_whitelist, - whitelist_authorization, - _authentication_service, - in_memory_torrent_repository, - db_torrent_repository, - _torrents_manager, - ) = initialize_tracker_dependencies(&config); - - let whitelist_manager = initialize_whitelist_manager(database.clone(), in_memory_whitelist.clone()); - - let announce_handler = Arc::new(AnnounceHandler::new( - &config.core, - &in_memory_torrent_repository, - &db_torrent_repository, - )); + let (_whitelist_authorization, whitelist_manager) = initialize_whitelist_services(&config); - let scrape_handler = Arc::new(ScrapeHandler::new(&whitelist_authorization, &in_memory_torrent_repository)); - - (announce_handler, whitelist_authorization, whitelist_manager, scrape_handler) + whitelist_manager } mod configured_as_whitelisted { mod handling_the_torrent_whitelist { use crate::core::core_tests::sample_info_hash; - use crate::core::whitelist::manager::tests::whitelisted_tracker; - - // todo: after extracting the WhitelistManager from the Tracker, - // there is no need to use the tracker to test the whitelist. - // Test not using the `tracker` (`_tracker` variable) should be - // moved to the whitelist module. + use crate::core::whitelist::manager::tests::initialize_whitelist_manager_for_whitelisted_tracker; #[tokio::test] async fn it_should_add_a_torrent_to_the_whitelist() { - let (_announce_handler, _whitelist_authorization, whitelist_manager, _scrape_handler) = whitelisted_tracker(); + let whitelist_manager = initialize_whitelist_manager_for_whitelisted_tracker(); let info_hash = sample_info_hash(); @@ -160,7 +127,7 @@ mod tests { #[tokio::test] async fn it_should_remove_a_torrent_from_the_whitelist() { - let (_announce_handler, _whitelist_authorization, whitelist_manager, _scrape_handler) = whitelisted_tracker(); + let whitelist_manager = initialize_whitelist_manager_for_whitelisted_tracker(); let info_hash = sample_info_hash(); @@ -173,11 +140,11 @@ mod tests { mod persistence { use crate::core::core_tests::sample_info_hash; - use crate::core::whitelist::manager::tests::whitelisted_tracker; + use crate::core::whitelist::manager::tests::initialize_whitelist_manager_for_whitelisted_tracker; #[tokio::test] async fn it_should_load_the_whitelist_from_the_database() { - let (_announce_handler, _whitelist_authorization, whitelist_manager, _scrape_handler) = whitelisted_tracker(); + let whitelist_manager = initialize_whitelist_manager_for_whitelisted_tracker(); let info_hash = sample_info_hash(); diff --git a/src/core/whitelist/mod.rs b/src/core/whitelist/mod.rs index bdc09d2b1..c23740111 100644 --- a/src/core/whitelist/mod.rs +++ b/src/core/whitelist/mod.rs @@ -1,62 +1,20 @@ pub mod authorization; pub mod manager; pub mod repository; +pub mod whitelist_tests; #[cfg(test)] mod tests { - use std::sync::Arc; - - use torrust_tracker_test_helpers::configuration; - - use crate::app_test::initialize_tracker_dependencies; - use crate::core::announce_handler::AnnounceHandler; - use crate::core::scrape_handler::ScrapeHandler; - use crate::core::services::initialize_whitelist_manager; - use crate::core::whitelist; - use crate::core::whitelist::manager::WhiteListManager; - - #[allow(clippy::type_complexity)] - fn whitelisted_tracker() -> ( - Arc, - Arc, - Arc, - Arc, - ) { - let config = configuration::ephemeral_listed(); - - let ( - database, - in_memory_whitelist, - whitelist_authorization, - _authentication_service, - in_memory_torrent_repository, - db_torrent_repository, - _torrents_manager, - ) = initialize_tracker_dependencies(&config); - - let whitelist_manager = initialize_whitelist_manager(database.clone(), in_memory_whitelist.clone()); - - let announce_handler = Arc::new(AnnounceHandler::new( - &config.core, - &in_memory_torrent_repository, - &db_torrent_repository, - )); - - let scrape_handler = Arc::new(ScrapeHandler::new(&whitelist_authorization, &in_memory_torrent_repository)); - - (announce_handler, whitelist_authorization, whitelist_manager, scrape_handler) - } - mod configured_as_whitelisted { mod handling_authorization { use crate::core::core_tests::sample_info_hash; - use crate::core::whitelist::tests::whitelisted_tracker; + use crate::core::whitelist::whitelist_tests::initialize_whitelist_services_for_listed_tracker; #[tokio::test] async fn it_should_authorize_the_announce_and_scrape_actions_on_whitelisted_torrents() { - let (_announce_handler, whitelist_authorization, whitelist_manager, _scrape_handler) = whitelisted_tracker(); + let (whitelist_authorization, whitelist_manager) = initialize_whitelist_services_for_listed_tracker(); let info_hash = sample_info_hash(); @@ -69,7 +27,7 @@ mod tests { #[tokio::test] async fn it_should_not_authorize_the_announce_and_scrape_actions_on_not_whitelisted_torrents() { - let (_announce_handler, whitelist_authorization, _whitelist_manager, _scrape_handler) = whitelisted_tracker(); + let (whitelist_authorization, _whitelist_manager) = initialize_whitelist_services_for_listed_tracker(); let info_hash = sample_info_hash(); diff --git a/src/core/whitelist/whitelist_tests.rs b/src/core/whitelist/whitelist_tests.rs new file mode 100644 index 000000000..ceb2ab8a0 --- /dev/null +++ b/src/core/whitelist/whitelist_tests.rs @@ -0,0 +1,26 @@ +use std::sync::Arc; + +use torrust_tracker_configuration::Configuration; + +use super::authorization::WhitelistAuthorization; +use super::manager::WhiteListManager; +use super::repository::in_memory::InMemoryWhitelist; +use crate::core::services::{initialize_database, initialize_whitelist_manager}; + +#[must_use] +pub fn initialize_whitelist_services(config: &Configuration) -> (Arc, Arc) { + let database = initialize_database(config); + let in_memory_whitelist = Arc::new(InMemoryWhitelist::default()); + let whitelist_authorization = Arc::new(WhitelistAuthorization::new(&config.core, &in_memory_whitelist.clone())); + let whitelist_manager = initialize_whitelist_manager(database.clone(), in_memory_whitelist.clone()); + + (whitelist_authorization, whitelist_manager) +} + +#[cfg(test)] +#[must_use] +pub fn initialize_whitelist_services_for_listed_tracker() -> (Arc, Arc) { + use torrust_tracker_test_helpers::configuration; + + initialize_whitelist_services(&configuration::ephemeral_listed()) +} From 7ce52f95dc2af02f36603f31c201905dfa923e9d Mon Sep 17 00:00:00 2001 From: Jose Celano Date: Tue, 28 Jan 2025 11:36:47 +0000 Subject: [PATCH 176/802] refactor: [#1211] rename type to WhitelistManager --- src/bootstrap/jobs/tracker_apis.rs | 6 +++--- src/container.rs | 4 ++-- src/core/services/mod.rs | 6 +++--- src/core/whitelist/manager.rs | 8 ++++---- src/core/whitelist/whitelist_tests.rs | 6 +++--- src/servers/apis/routes.rs | 4 ++-- src/servers/apis/server.rs | 6 +++--- src/servers/apis/v1/context/whitelist/handlers.rs | 8 ++++---- src/servers/apis/v1/context/whitelist/routes.rs | 4 ++-- src/servers/apis/v1/routes.rs | 4 ++-- src/servers/udp/handlers.rs | 4 ++-- tests/servers/api/environment.rs | 4 ++-- tests/servers/http/environment.rs | 4 ++-- 13 files changed, 34 insertions(+), 34 deletions(-) diff --git a/src/bootstrap/jobs/tracker_apis.rs b/src/bootstrap/jobs/tracker_apis.rs index f735bc4d7..ce6f3912c 100644 --- a/src/bootstrap/jobs/tracker_apis.rs +++ b/src/bootstrap/jobs/tracker_apis.rs @@ -34,7 +34,7 @@ use crate::core::authentication::handler::KeysHandler; use crate::core::statistics::event::sender::Sender; use crate::core::statistics::repository::Repository; use crate::core::torrent::repository::in_memory::InMemoryTorrentRepository; -use crate::core::whitelist::manager::WhiteListManager; +use crate::core::whitelist::manager::WhitelistManager; use crate::servers::apis::server::{ApiServer, Launcher}; use crate::servers::apis::Version; use crate::servers::registar::ServiceRegistrationForm; @@ -74,7 +74,7 @@ pub async fn start_job( config: &HttpApi, in_memory_torrent_repository: Arc, keys_handler: Arc, - whitelist_manager: Arc, + whitelist_manager: Arc, ban_service: Arc>, stats_event_sender: Arc>>, stats_repository: Arc, @@ -126,7 +126,7 @@ async fn start_v1( tls: Option, in_memory_torrent_repository: Arc, keys_handler: Arc, - whitelist_manager: Arc, + whitelist_manager: Arc, ban_service: Arc>, stats_event_sender: Arc>>, stats_repository: Arc, diff --git a/src/container.rs b/src/container.rs index d8fae07e5..192fa62f1 100644 --- a/src/container.rs +++ b/src/container.rs @@ -13,7 +13,7 @@ use crate::core::torrent::manager::TorrentsManager; use crate::core::torrent::repository::in_memory::InMemoryTorrentRepository; use crate::core::torrent::repository::persisted::DatabasePersistentTorrentRepository; use crate::core::whitelist; -use crate::core::whitelist::manager::WhiteListManager; +use crate::core::whitelist::manager::WhitelistManager; use crate::servers::udp::server::banning::BanService; pub struct AppContainer { @@ -26,7 +26,7 @@ pub struct AppContainer { pub ban_service: Arc>, pub stats_event_sender: Arc>>, pub stats_repository: Arc, - pub whitelist_manager: Arc, + pub whitelist_manager: Arc, pub in_memory_torrent_repository: Arc, pub db_torrent_repository: Arc, pub torrents_manager: Arc, diff --git a/src/core/services/mod.rs b/src/core/services/mod.rs index 73328aaeb..f2ee79993 100644 --- a/src/core/services/mod.rs +++ b/src/core/services/mod.rs @@ -14,7 +14,7 @@ use torrust_tracker_configuration::v2_0_0::database; use torrust_tracker_configuration::Configuration; use super::databases::{self, Database}; -use super::whitelist::manager::WhiteListManager; +use super::whitelist::manager::WhitelistManager; use super::whitelist::repository::in_memory::InMemoryWhitelist; use super::whitelist::repository::persisted::DatabaseWhitelist; @@ -35,7 +35,7 @@ pub fn initialize_database(config: &Configuration) -> Arc> { pub fn initialize_whitelist_manager( database: Arc>, in_memory_whitelist: Arc, -) -> Arc { +) -> Arc { let database_whitelist = Arc::new(DatabaseWhitelist::new(database)); - Arc::new(WhiteListManager::new(database_whitelist, in_memory_whitelist)) + Arc::new(WhitelistManager::new(database_whitelist, in_memory_whitelist)) } diff --git a/src/core/whitelist/manager.rs b/src/core/whitelist/manager.rs index 4f4792443..0d9751994 100644 --- a/src/core/whitelist/manager.rs +++ b/src/core/whitelist/manager.rs @@ -7,7 +7,7 @@ use super::repository::persisted::DatabaseWhitelist; use crate::core::databases; /// It handles the list of allowed torrents. Only for listed trackers. -pub struct WhiteListManager { +pub struct WhitelistManager { /// The in-memory list of allowed torrents. in_memory_whitelist: Arc, @@ -15,7 +15,7 @@ pub struct WhiteListManager { database_whitelist: Arc, } -impl WhiteListManager { +impl WhitelistManager { #[must_use] pub fn new(database_whitelist: Arc, in_memory_whitelist: Arc) -> Self { Self { @@ -97,10 +97,10 @@ mod tests { use torrust_tracker_test_helpers::configuration; - use crate::core::whitelist::manager::WhiteListManager; + use crate::core::whitelist::manager::WhitelistManager; use crate::core::whitelist::whitelist_tests::initialize_whitelist_services; - fn initialize_whitelist_manager_for_whitelisted_tracker() -> Arc { + fn initialize_whitelist_manager_for_whitelisted_tracker() -> Arc { let config = configuration::ephemeral_listed(); let (_whitelist_authorization, whitelist_manager) = initialize_whitelist_services(&config); diff --git a/src/core/whitelist/whitelist_tests.rs b/src/core/whitelist/whitelist_tests.rs index ceb2ab8a0..aa9c5ca14 100644 --- a/src/core/whitelist/whitelist_tests.rs +++ b/src/core/whitelist/whitelist_tests.rs @@ -3,12 +3,12 @@ use std::sync::Arc; use torrust_tracker_configuration::Configuration; use super::authorization::WhitelistAuthorization; -use super::manager::WhiteListManager; +use super::manager::WhitelistManager; use super::repository::in_memory::InMemoryWhitelist; use crate::core::services::{initialize_database, initialize_whitelist_manager}; #[must_use] -pub fn initialize_whitelist_services(config: &Configuration) -> (Arc, Arc) { +pub fn initialize_whitelist_services(config: &Configuration) -> (Arc, Arc) { let database = initialize_database(config); let in_memory_whitelist = Arc::new(InMemoryWhitelist::default()); let whitelist_authorization = Arc::new(WhitelistAuthorization::new(&config.core, &in_memory_whitelist.clone())); @@ -19,7 +19,7 @@ pub fn initialize_whitelist_services(config: &Configuration) -> (Arc (Arc, Arc) { +pub fn initialize_whitelist_services_for_listed_tracker() -> (Arc, Arc) { use torrust_tracker_test_helpers::configuration; initialize_whitelist_services(&configuration::ephemeral_listed()) diff --git a/src/servers/apis/routes.rs b/src/servers/apis/routes.rs index c27b5f906..92ecb067d 100644 --- a/src/servers/apis/routes.rs +++ b/src/servers/apis/routes.rs @@ -34,7 +34,7 @@ use crate::core::authentication::handler::KeysHandler; use crate::core::statistics::event::sender::Sender; use crate::core::statistics::repository::Repository; use crate::core::torrent::repository::in_memory::InMemoryTorrentRepository; -use crate::core::whitelist::manager::WhiteListManager; +use crate::core::whitelist::manager::WhitelistManager; use crate::servers::apis::API_LOG_TARGET; use crate::servers::logging::Latency; use crate::servers::udp::server::banning::BanService; @@ -53,7 +53,7 @@ use crate::servers::udp::server::banning::BanService; pub fn router( in_memory_torrent_repository: Arc, keys_handler: Arc, - whitelist_manager: Arc, + whitelist_manager: Arc, ban_service: Arc>, stats_event_sender: Arc>>, stats_repository: Arc, diff --git a/src/servers/apis/server.rs b/src/servers/apis/server.rs index b37f71d5b..b3621de0e 100644 --- a/src/servers/apis/server.rs +++ b/src/servers/apis/server.rs @@ -43,7 +43,7 @@ use crate::core::authentication::handler::KeysHandler; use crate::core::statistics; use crate::core::statistics::repository::Repository; use crate::core::torrent::repository::in_memory::InMemoryTorrentRepository; -use crate::core::whitelist::manager::WhiteListManager; +use crate::core::whitelist::manager::WhitelistManager; use crate::servers::apis::API_LOG_TARGET; use crate::servers::custom_axum_server::{self, TimeoutAcceptor}; use crate::servers::logging::STARTED_ON; @@ -134,7 +134,7 @@ impl ApiServer { self, in_memory_torrent_repository: Arc, keys_handler: Arc, - whitelist_manager: Arc, + whitelist_manager: Arc, stats_event_sender: Arc>>, stats_repository: Arc, ban_service: Arc>, @@ -275,7 +275,7 @@ impl Launcher { &self, in_memory_torrent_repository: Arc, keys_handler: Arc, - whitelist_manager: Arc, + whitelist_manager: Arc, ban_service: Arc>, stats_event_sender: Arc>>, stats_repository: Arc, diff --git a/src/servers/apis/v1/context/whitelist/handlers.rs b/src/servers/apis/v1/context/whitelist/handlers.rs index 473ed56c5..ebe0bb15c 100644 --- a/src/servers/apis/v1/context/whitelist/handlers.rs +++ b/src/servers/apis/v1/context/whitelist/handlers.rs @@ -10,7 +10,7 @@ use bittorrent_primitives::info_hash::InfoHash; use super::responses::{ failed_to_reload_whitelist_response, failed_to_remove_torrent_from_whitelist_response, failed_to_whitelist_torrent_response, }; -use crate::core::whitelist::manager::WhiteListManager; +use crate::core::whitelist::manager::WhitelistManager; use crate::servers::apis::v1::responses::{invalid_info_hash_param_response, ok_response}; use crate::servers::apis::InfoHashParam; @@ -24,7 +24,7 @@ use crate::servers::apis::InfoHashParam; /// Refer to the [API endpoint documentation](crate::servers::apis::v1::context::whitelist#add-a-torrent-to-the-whitelist) /// for more information about this endpoint. pub async fn add_torrent_to_whitelist_handler( - State(whitelist_manager): State>, + State(whitelist_manager): State>, Path(info_hash): Path, ) -> Response { match InfoHash::from_str(&info_hash.0) { @@ -47,7 +47,7 @@ pub async fn add_torrent_to_whitelist_handler( /// Refer to the [API endpoint documentation](crate::servers::apis::v1::context::whitelist#remove-a-torrent-from-the-whitelist) /// for more information about this endpoint. pub async fn remove_torrent_from_whitelist_handler( - State(whitelist_manager): State>, + State(whitelist_manager): State>, Path(info_hash): Path, ) -> Response { match InfoHash::from_str(&info_hash.0) { @@ -69,7 +69,7 @@ pub async fn remove_torrent_from_whitelist_handler( /// /// Refer to the [API endpoint documentation](crate::servers::apis::v1::context::whitelist#reload-the-whitelist) /// for more information about this endpoint. -pub async fn reload_whitelist_handler(State(whitelist_manager): State>) -> Response { +pub async fn reload_whitelist_handler(State(whitelist_manager): State>) -> Response { match whitelist_manager.load_whitelist_from_database().await { Ok(()) => ok_response(), Err(e) => failed_to_reload_whitelist_response(e), diff --git a/src/servers/apis/v1/context/whitelist/routes.rs b/src/servers/apis/v1/context/whitelist/routes.rs index 34f1393b8..5069332af 100644 --- a/src/servers/apis/v1/context/whitelist/routes.rs +++ b/src/servers/apis/v1/context/whitelist/routes.rs @@ -11,10 +11,10 @@ use axum::routing::{delete, get, post}; use axum::Router; use super::handlers::{add_torrent_to_whitelist_handler, reload_whitelist_handler, remove_torrent_from_whitelist_handler}; -use crate::core::whitelist::manager::WhiteListManager; +use crate::core::whitelist::manager::WhitelistManager; /// It adds the routes to the router for the [`whitelist`](crate::servers::apis::v1::context::whitelist) API context. -pub fn add(prefix: &str, router: Router, whitelist_manager: &Arc) -> Router { +pub fn add(prefix: &str, router: Router, whitelist_manager: &Arc) -> Router { let prefix = format!("{prefix}/whitelist"); router diff --git a/src/servers/apis/v1/routes.rs b/src/servers/apis/v1/routes.rs index 8fac453b8..87c28de08 100644 --- a/src/servers/apis/v1/routes.rs +++ b/src/servers/apis/v1/routes.rs @@ -9,7 +9,7 @@ use crate::core::authentication::handler::KeysHandler; use crate::core::statistics::event::sender::Sender; use crate::core::statistics::repository::Repository; use crate::core::torrent::repository::in_memory::InMemoryTorrentRepository; -use crate::core::whitelist::manager::WhiteListManager; +use crate::core::whitelist::manager::WhitelistManager; use crate::servers::udp::server::banning::BanService; /// Add the routes for the v1 API. @@ -19,7 +19,7 @@ pub fn add( router: Router, in_memory_torrent_repository: &Arc, keys_handler: &Arc, - whitelist_manager: &Arc, + whitelist_manager: &Arc, ban_service: Arc>, stats_event_sender: Arc>>, stats_repository: Arc, diff --git a/src/servers/udp/handlers.rs b/src/servers/udp/handlers.rs index b96ecc154..2e753404d 100644 --- a/src/servers/udp/handlers.rs +++ b/src/servers/udp/handlers.rs @@ -505,7 +505,7 @@ mod tests { use crate::core::statistics::event::sender::Sender; use crate::core::torrent::repository::in_memory::InMemoryTorrentRepository; use crate::core::whitelist; - use crate::core::whitelist::manager::WhiteListManager; + use crate::core::whitelist::manager::WhitelistManager; use crate::core::whitelist::repository::in_memory::InMemoryWhitelist; use crate::CurrentClock; @@ -516,7 +516,7 @@ mod tests { Arc, Arc>>, Arc, - Arc, + Arc, Arc, ); diff --git a/tests/servers/api/environment.rs b/tests/servers/api/environment.rs index 3488456e7..66018032e 100644 --- a/tests/servers/api/environment.rs +++ b/tests/servers/api/environment.rs @@ -14,7 +14,7 @@ use torrust_tracker_lib::core::databases::Database; use torrust_tracker_lib::core::statistics::event::sender::Sender; use torrust_tracker_lib::core::statistics::repository::Repository; use torrust_tracker_lib::core::torrent::repository::in_memory::InMemoryTorrentRepository; -use torrust_tracker_lib::core::whitelist::manager::WhiteListManager; +use torrust_tracker_lib::core::whitelist::manager::WhitelistManager; use torrust_tracker_lib::servers::apis::server::{ApiServer, Launcher, Running, Stopped}; use torrust_tracker_lib::servers::registar::Registar; use torrust_tracker_lib::servers::udp::server::banning::BanService; @@ -31,7 +31,7 @@ where pub authentication_service: Arc, pub stats_event_sender: Arc>>, pub stats_repository: Arc, - pub whitelist_manager: Arc, + pub whitelist_manager: Arc, pub ban_service: Arc>, pub registar: Registar, pub server: ApiServer, diff --git a/tests/servers/http/environment.rs b/tests/servers/http/environment.rs index 6c9f8e4b8..5bf1d1c65 100644 --- a/tests/servers/http/environment.rs +++ b/tests/servers/http/environment.rs @@ -14,7 +14,7 @@ use torrust_tracker_lib::core::statistics::event::sender::Sender; use torrust_tracker_lib::core::statistics::repository::Repository; use torrust_tracker_lib::core::torrent::repository::in_memory::InMemoryTorrentRepository; use torrust_tracker_lib::core::whitelist; -use torrust_tracker_lib::core::whitelist::manager::WhiteListManager; +use torrust_tracker_lib::core::whitelist::manager::WhitelistManager; use torrust_tracker_lib::servers::http::server::{HttpServer, Launcher, Running, Stopped}; use torrust_tracker_lib::servers::registar::Registar; use torrust_tracker_primitives::peer; @@ -31,7 +31,7 @@ pub struct Environment { pub stats_event_sender: Arc>>, pub stats_repository: Arc, pub whitelist_authorization: Arc, - pub whitelist_manager: Arc, + pub whitelist_manager: Arc, pub registar: Registar, pub server: HttpServer, } From 8f02fb936b108e1e9389f8a2e0f845e1b348102a Mon Sep 17 00:00:00 2001 From: Jose Celano Date: Tue, 28 Jan 2025 16:08:50 +0000 Subject: [PATCH 177/802] refactor: [#1215] instantiate only needed services --- src/app_test.rs | 62 ---- src/core/services/statistics/mod.rs | 14 +- src/core/services/torrent.rs | 55 +-- src/lib.rs | 1 - src/servers/http/v1/handlers/announce.rs | 165 ++++----- src/servers/http/v1/handlers/scrape.rs | 203 ++++------ src/servers/http/v1/services/announce.rs | 62 ++-- src/servers/http/v1/services/scrape.rs | 46 +-- src/servers/udp/handlers.rs | 453 +++++++++-------------- 9 files changed, 359 insertions(+), 702 deletions(-) delete mode 100644 src/app_test.rs diff --git a/src/app_test.rs b/src/app_test.rs deleted file mode 100644 index fb1dd01c8..000000000 --- a/src/app_test.rs +++ /dev/null @@ -1,62 +0,0 @@ -//! This file contains only functions used for testing. -use std::sync::Arc; - -use torrust_tracker_configuration::Configuration; - -use crate::core::authentication::handler::KeysHandler; -use crate::core::authentication::key::repository::in_memory::InMemoryKeyRepository; -use crate::core::authentication::key::repository::persisted::DatabaseKeyRepository; -use crate::core::authentication::service::{self, AuthenticationService}; -use crate::core::databases::Database; -use crate::core::services::initialize_database; -use crate::core::torrent::manager::TorrentsManager; -use crate::core::torrent::repository::in_memory::InMemoryTorrentRepository; -use crate::core::torrent::repository::persisted::DatabasePersistentTorrentRepository; -use crate::core::whitelist; -use crate::core::whitelist::repository::in_memory::InMemoryWhitelist; - -/// Initialize the tracker dependencies. -#[allow(clippy::type_complexity)] -#[must_use] -pub fn initialize_tracker_dependencies( - config: &Configuration, -) -> ( - Arc>, - Arc, - Arc, - Arc, - Arc, - Arc, - Arc, -) { - let database = initialize_database(config); - let in_memory_whitelist = Arc::new(InMemoryWhitelist::default()); - let whitelist_authorization = Arc::new(whitelist::authorization::WhitelistAuthorization::new( - &config.core, - &in_memory_whitelist.clone(), - )); - let db_key_repository = Arc::new(DatabaseKeyRepository::new(&database)); - let in_memory_key_repository = Arc::new(InMemoryKeyRepository::default()); - let authentication_service = Arc::new(service::AuthenticationService::new(&config.core, &in_memory_key_repository)); - let _keys_handler = Arc::new(KeysHandler::new( - &db_key_repository.clone(), - &in_memory_key_repository.clone(), - )); - let in_memory_torrent_repository = Arc::new(InMemoryTorrentRepository::default()); - let db_torrent_repository = Arc::new(DatabasePersistentTorrentRepository::new(&database)); - let torrents_manager = Arc::new(TorrentsManager::new( - &config.core, - &in_memory_torrent_repository, - &db_torrent_repository, - )); - - ( - database, - in_memory_whitelist, - whitelist_authorization, - authentication_service, - in_memory_torrent_repository, - db_torrent_repository, - torrents_manager, - ) -} diff --git a/src/core/services/statistics/mod.rs b/src/core/services/statistics/mod.rs index 18d96605e..79bc5f268 100644 --- a/src/core/services/statistics/mod.rs +++ b/src/core/services/statistics/mod.rs @@ -117,8 +117,8 @@ mod tests { use torrust_tracker_primitives::torrent_metrics::TorrentsMetrics; use torrust_tracker_test_helpers::configuration; - use crate::app_test::initialize_tracker_dependencies; use crate::core::services::statistics::{self, get_metrics, TrackerMetrics}; + use crate::core::torrent::repository::in_memory::InMemoryTorrentRepository; use crate::core::{self}; use crate::servers::udp::server::banning::BanService; use crate::servers::udp::server::launcher::MAX_CONNECTION_ID_ERRORS_PER_IP; @@ -131,19 +131,9 @@ mod tests { async fn the_statistics_service_should_return_the_tracker_metrics() { let config = tracker_configuration(); - let ( - _database, - _in_memory_whitelist, - _whitelist_authorization, - _authentication_service, - in_memory_torrent_repository, - _db_torrent_repository, - _torrents_manager, - ) = initialize_tracker_dependencies(&config); - + let in_memory_torrent_repository = Arc::new(InMemoryTorrentRepository::default()); let (_stats_event_sender, stats_repository) = statistics::setup::factory(config.core.tracker_usage_statistics); let stats_repository = Arc::new(stats_repository); - let ban_service = Arc::new(RwLock::new(BanService::new(MAX_CONNECTION_ID_ERRORS_PER_IP))); let tracker_metrics = get_metrics( diff --git a/src/core/services/torrent.rs b/src/core/services/torrent.rs index 6ae2c26a4..d809fc266 100644 --- a/src/core/services/torrent.rs +++ b/src/core/services/torrent.rs @@ -112,29 +112,10 @@ pub async fn get_torrents( #[cfg(test)] mod tests { use std::net::{IpAddr, Ipv4Addr, SocketAddr}; - use std::sync::Arc; use aquatic_udp_protocol::{AnnounceEvent, NumberOfBytes, PeerId}; - use torrust_tracker_configuration::Configuration; use torrust_tracker_primitives::{peer, DurationSinceUnixEpoch}; - use crate::app_test::initialize_tracker_dependencies; - use crate::core::torrent::repository::in_memory::InMemoryTorrentRepository; - - fn initialize_in_memory_torrent_repository(config: &Configuration) -> Arc { - let ( - _database, - _in_memory_whitelist, - _whitelist_authorization, - _authentication_service, - in_memory_torrent_repository, - _db_torrent_repository, - _torrents_manager, - ) = initialize_tracker_dependencies(config); - - in_memory_torrent_repository - } - fn sample_peer() -> peer::Peer { peer::Peer { peer_id: PeerId(*b"-qB00000000000000000"), @@ -153,17 +134,11 @@ mod tests { use std::sync::Arc; use bittorrent_primitives::info_hash::InfoHash; - use torrust_tracker_configuration::Configuration; - use torrust_tracker_test_helpers::configuration; - use crate::core::services::torrent::tests::{initialize_in_memory_torrent_repository, sample_peer}; + use crate::core::services::torrent::tests::sample_peer; use crate::core::services::torrent::{get_torrent_info, Info}; use crate::core::torrent::repository::in_memory::InMemoryTorrentRepository; - pub fn tracker_configuration() -> Configuration { - configuration::ephemeral() - } - #[tokio::test] async fn should_return_none_if_the_tracker_does_not_have_the_torrent() { let in_memory_torrent_repository = Arc::new(InMemoryTorrentRepository::default()); @@ -179,9 +154,7 @@ mod tests { #[tokio::test] async fn should_return_the_torrent_info_if_the_tracker_has_the_torrent() { - let config = tracker_configuration(); - - let in_memory_torrent_repository = initialize_in_memory_torrent_repository(&config); + let in_memory_torrent_repository = Arc::new(InMemoryTorrentRepository::default()); let hash = "9e0217d0fa71c87332cd8bf9dbeabcb2c2cf3c4d".to_owned(); let info_hash = InfoHash::from_str(&hash).unwrap(); @@ -210,17 +183,11 @@ mod tests { use std::sync::Arc; use bittorrent_primitives::info_hash::InfoHash; - use torrust_tracker_configuration::Configuration; - use torrust_tracker_test_helpers::configuration; - use crate::core::services::torrent::tests::{initialize_in_memory_torrent_repository, sample_peer}; + use crate::core::services::torrent::tests::sample_peer; use crate::core::services::torrent::{get_torrents_page, BasicInfo, Pagination}; use crate::core::torrent::repository::in_memory::InMemoryTorrentRepository; - pub fn tracker_configuration() -> Configuration { - configuration::ephemeral() - } - #[tokio::test] async fn should_return_an_empty_result_if_the_tracker_does_not_have_any_torrent() { let in_memory_torrent_repository = Arc::new(InMemoryTorrentRepository::default()); @@ -232,9 +199,7 @@ mod tests { #[tokio::test] async fn should_return_a_summarized_info_for_all_torrents() { - let config = tracker_configuration(); - - let in_memory_torrent_repository = initialize_in_memory_torrent_repository(&config); + let in_memory_torrent_repository = Arc::new(InMemoryTorrentRepository::default()); let hash = "9e0217d0fa71c87332cd8bf9dbeabcb2c2cf3c4d".to_owned(); let info_hash = InfoHash::from_str(&hash).unwrap(); @@ -256,9 +221,7 @@ mod tests { #[tokio::test] async fn should_allow_limiting_the_number_of_torrents_in_the_result() { - let config = tracker_configuration(); - - let in_memory_torrent_repository = initialize_in_memory_torrent_repository(&config); + let in_memory_torrent_repository = Arc::new(InMemoryTorrentRepository::default()); let hash1 = "9e0217d0fa71c87332cd8bf9dbeabcb2c2cf3c4d".to_owned(); let info_hash1 = InfoHash::from_str(&hash1).unwrap(); @@ -279,9 +242,7 @@ mod tests { #[tokio::test] async fn should_allow_using_pagination_in_the_result() { - let config = tracker_configuration(); - - let in_memory_torrent_repository = initialize_in_memory_torrent_repository(&config); + let in_memory_torrent_repository = Arc::new(InMemoryTorrentRepository::default()); let hash1 = "9e0217d0fa71c87332cd8bf9dbeabcb2c2cf3c4d".to_owned(); let info_hash1 = InfoHash::from_str(&hash1).unwrap(); @@ -311,9 +272,7 @@ mod tests { #[tokio::test] async fn should_return_torrents_ordered_by_info_hash() { - let config = tracker_configuration(); - - let in_memory_torrent_repository = initialize_in_memory_torrent_repository(&config); + let in_memory_torrent_repository = Arc::new(InMemoryTorrentRepository::default()); let hash1 = "9e0217d0fa71c87332cd8bf9dbeabcb2c2cf3c4d".to_owned(); let info_hash1 = InfoHash::from_str(&hash1).unwrap(); diff --git a/src/lib.rs b/src/lib.rs index 8e0e64db0..212430605 100644 --- a/src/lib.rs +++ b/src/lib.rs @@ -491,7 +491,6 @@ use torrust_tracker_clock::clock; pub mod app; -pub mod app_test; pub mod bootstrap; pub mod console; pub mod container; diff --git a/src/servers/http/v1/handlers/announce.rs b/src/servers/http/v1/handlers/announce.rs index 247c6b8c6..d6c850327 100644 --- a/src/servers/http/v1/handlers/announce.rs +++ b/src/servers/http/v1/handlers/announce.rs @@ -250,76 +250,73 @@ mod tests { use bittorrent_http_protocol::v1::requests::announce::Announce; use bittorrent_http_protocol::v1::responses; use bittorrent_http_protocol::v1::services::peer_ip_resolver::ClientIpSources; - use bittorrent_primitives::info_hash::InfoHash; use torrust_tracker_configuration::{Configuration, Core}; use torrust_tracker_test_helpers::configuration; - use crate::app_test::initialize_tracker_dependencies; use crate::core::announce_handler::AnnounceHandler; + use crate::core::authentication::key::repository::in_memory::InMemoryKeyRepository; use crate::core::authentication::service::AuthenticationService; - use crate::core::services::statistics; + use crate::core::core_tests::sample_info_hash; + use crate::core::services::{initialize_database, statistics}; use crate::core::statistics::event::sender::Sender; - use crate::core::whitelist; - - type TrackerAndDeps = ( - Arc, - Arc, - Arc>>, - Arc, - Arc, - ); - - fn private_tracker() -> TrackerAndDeps { - initialize_tracker_and_deps(configuration::ephemeral_private()) + use crate::core::torrent::repository::in_memory::InMemoryTorrentRepository; + use crate::core::torrent::repository::persisted::DatabasePersistentTorrentRepository; + use crate::core::whitelist::authorization::WhitelistAuthorization; + use crate::core::whitelist::repository::in_memory::InMemoryWhitelist; + + struct CoreTrackerServices { + pub core_config: Arc, + pub announce_handler: Arc, + pub stats_event_sender: Arc>>, + pub whitelist_authorization: Arc, + pub authentication_service: Arc, } - fn whitelisted_tracker() -> TrackerAndDeps { - initialize_tracker_and_deps(configuration::ephemeral_listed()) + fn initialize_private_tracker() -> CoreTrackerServices { + initialize_core_tracker_services(&configuration::ephemeral_private()) } - fn tracker_on_reverse_proxy() -> TrackerAndDeps { - initialize_tracker_and_deps(configuration::ephemeral_with_reverse_proxy()) + fn initialize_listed_tracker() -> CoreTrackerServices { + initialize_core_tracker_services(&configuration::ephemeral_listed()) } - fn tracker_not_on_reverse_proxy() -> TrackerAndDeps { - initialize_tracker_and_deps(configuration::ephemeral_without_reverse_proxy()) + fn initialize_tracker_on_reverse_proxy() -> CoreTrackerServices { + initialize_core_tracker_services(&configuration::ephemeral_with_reverse_proxy()) } - /// Initialize tracker's dependencies and tracker. - fn initialize_tracker_and_deps(config: Configuration) -> TrackerAndDeps { - let ( - _database, - _in_memory_whitelist, - whitelist_authorization, - authentication_service, - in_memory_torrent_repository, - db_torrent_repository, - _torrents_manager, - ) = initialize_tracker_dependencies(&config); + fn initialize_tracker_not_on_reverse_proxy() -> CoreTrackerServices { + initialize_core_tracker_services(&configuration::ephemeral_without_reverse_proxy()) + } + fn initialize_core_tracker_services(config: &Configuration) -> CoreTrackerServices { + let core_config = Arc::new(config.core.clone()); + let database = initialize_database(config); + let in_memory_whitelist = Arc::new(InMemoryWhitelist::default()); + let whitelist_authorization = Arc::new(WhitelistAuthorization::new(&config.core, &in_memory_whitelist.clone())); + let in_memory_key_repository = Arc::new(InMemoryKeyRepository::default()); + let authentication_service = Arc::new(AuthenticationService::new(&config.core, &in_memory_key_repository)); + let in_memory_torrent_repository = Arc::new(InMemoryTorrentRepository::default()); + let db_torrent_repository = Arc::new(DatabasePersistentTorrentRepository::new(&database)); let (stats_event_sender, _stats_repository) = statistics::setup::factory(config.core.tracker_usage_statistics); let stats_event_sender = Arc::new(stats_event_sender); - let announce_handler = Arc::new(AnnounceHandler::new( &config.core, &in_memory_torrent_repository, &db_torrent_repository, )); - let config = Arc::new(config.core); - - ( - config, + CoreTrackerServices { + core_config, announce_handler, stats_event_sender, whitelist_authorization, authentication_service, - ) + } } fn sample_announce_request() -> Announce { Announce { - info_hash: "3b245504cf5f11bbdbe1201cea6a6bf45aee1bc0".parse::().unwrap(), + info_hash: sample_info_hash(), peer_id: PeerId(*b"-qB00000000000000001"), port: 17548, downloaded: None, @@ -348,28 +345,24 @@ mod tests { mod with_tracker_in_private_mode { use std::str::FromStr; - use std::sync::Arc; - use super::{private_tracker, sample_announce_request, sample_client_ip_sources}; + use super::{initialize_private_tracker, sample_announce_request, sample_client_ip_sources}; use crate::core::authentication; use crate::servers::http::v1::handlers::announce::handle_announce; use crate::servers::http::v1::handlers::announce::tests::assert_error_response; #[tokio::test] async fn it_should_fail_when_the_authentication_key_is_missing() { - let (config, announce_handler, stats_event_sender, whitelist_authorization, authentication_service) = - private_tracker(); - - let stats_event_sender = Arc::new(stats_event_sender); + let core_tracker_services = initialize_private_tracker(); let maybe_key = None; let response = handle_announce( - &config, - &announce_handler, - &authentication_service, - &whitelist_authorization, - &stats_event_sender, + &core_tracker_services.core_config, + &core_tracker_services.announce_handler, + &core_tracker_services.authentication_service, + &core_tracker_services.whitelist_authorization, + &core_tracker_services.stats_event_sender, &sample_announce_request(), &sample_client_ip_sources(), maybe_key, @@ -385,21 +378,18 @@ mod tests { #[tokio::test] async fn it_should_fail_when_the_authentication_key_is_invalid() { - let (config, announce_handler, stats_event_sender, whitelist_authorization, authentication_service) = - private_tracker(); - - let stats_event_sender = Arc::new(stats_event_sender); + let core_tracker_services = initialize_private_tracker(); let unregistered_key = authentication::Key::from_str("YZSl4lMZupRuOpSRC3krIKR5BPB14nrJ").unwrap(); let maybe_key = Some(unregistered_key); let response = handle_announce( - &config, - &announce_handler, - &authentication_service, - &whitelist_authorization, - &stats_event_sender, + &core_tracker_services.core_config, + &core_tracker_services.announce_handler, + &core_tracker_services.authentication_service, + &core_tracker_services.whitelist_authorization, + &core_tracker_services.stats_event_sender, &sample_announce_request(), &sample_client_ip_sources(), maybe_key, @@ -413,27 +403,22 @@ mod tests { mod with_tracker_in_listed_mode { - use std::sync::Arc; - - use super::{sample_announce_request, sample_client_ip_sources, whitelisted_tracker}; + use super::{initialize_listed_tracker, sample_announce_request, sample_client_ip_sources}; use crate::servers::http::v1::handlers::announce::handle_announce; use crate::servers::http::v1::handlers::announce::tests::assert_error_response; #[tokio::test] async fn it_should_fail_when_the_announced_torrent_is_not_whitelisted() { - let (config, announce_handler, stats_event_sender, whitelist_authorization, authentication_service) = - whitelisted_tracker(); - - let stats_event_sender = Arc::new(stats_event_sender); + let core_tracker_services = initialize_listed_tracker(); let announce_request = sample_announce_request(); let response = handle_announce( - &config, - &announce_handler, - &authentication_service, - &whitelist_authorization, - &stats_event_sender, + &core_tracker_services.core_config, + &core_tracker_services.announce_handler, + &core_tracker_services.authentication_service, + &core_tracker_services.whitelist_authorization, + &core_tracker_services.stats_event_sender, &announce_request, &sample_client_ip_sources(), None, @@ -453,20 +438,15 @@ mod tests { mod with_tracker_on_reverse_proxy { - use std::sync::Arc; - use bittorrent_http_protocol::v1::services::peer_ip_resolver::ClientIpSources; - use super::{sample_announce_request, tracker_on_reverse_proxy}; + use super::{initialize_tracker_on_reverse_proxy, sample_announce_request}; use crate::servers::http::v1::handlers::announce::handle_announce; use crate::servers::http::v1::handlers::announce::tests::assert_error_response; #[tokio::test] async fn it_should_fail_when_the_right_most_x_forwarded_for_header_ip_is_not_available() { - let (config, announce_handler, stats_event_sender, whitelist_authorization, authentication_service) = - tracker_on_reverse_proxy(); - - let stats_event_sender = Arc::new(stats_event_sender); + let core_tracker_services = initialize_tracker_on_reverse_proxy(); let client_ip_sources = ClientIpSources { right_most_x_forwarded_for: None, @@ -474,11 +454,11 @@ mod tests { }; let response = handle_announce( - &config, - &announce_handler, - &authentication_service, - &whitelist_authorization, - &stats_event_sender, + &core_tracker_services.core_config, + &core_tracker_services.announce_handler, + &core_tracker_services.authentication_service, + &core_tracker_services.whitelist_authorization, + &core_tracker_services.stats_event_sender, &sample_announce_request(), &client_ip_sources, None, @@ -495,20 +475,15 @@ mod tests { mod with_tracker_not_on_reverse_proxy { - use std::sync::Arc; - use bittorrent_http_protocol::v1::services::peer_ip_resolver::ClientIpSources; - use super::{sample_announce_request, tracker_not_on_reverse_proxy}; + use super::{initialize_tracker_not_on_reverse_proxy, sample_announce_request}; use crate::servers::http::v1::handlers::announce::handle_announce; use crate::servers::http::v1::handlers::announce::tests::assert_error_response; #[tokio::test] async fn it_should_fail_when_the_client_ip_from_the_connection_info_is_not_available() { - let (config, announce_handler, stats_event_sender, whitelist_authorization, authentication_service) = - tracker_not_on_reverse_proxy(); - - let stats_event_sender = Arc::new(stats_event_sender); + let core_tracker_services = initialize_tracker_not_on_reverse_proxy(); let client_ip_sources = ClientIpSources { right_most_x_forwarded_for: None, @@ -516,11 +491,11 @@ mod tests { }; let response = handle_announce( - &config, - &announce_handler, - &authentication_service, - &whitelist_authorization, - &stats_event_sender, + &core_tracker_services.core_config, + &core_tracker_services.announce_handler, + &core_tracker_services.authentication_service, + &core_tracker_services.whitelist_authorization, + &core_tracker_services.stats_event_sender, &sample_announce_request(), &client_ip_sources, None, diff --git a/src/servers/http/v1/handlers/scrape.rs b/src/servers/http/v1/handlers/scrape.rs index c4013d8e9..a197263e8 100644 --- a/src/servers/http/v1/handlers/scrape.rs +++ b/src/servers/http/v1/handlers/scrape.rs @@ -171,137 +171,62 @@ mod tests { use bittorrent_http_protocol::v1::responses; use bittorrent_http_protocol::v1::services::peer_ip_resolver::ClientIpSources; use bittorrent_primitives::info_hash::InfoHash; - use torrust_tracker_configuration::Core; + use torrust_tracker_configuration::{Configuration, Core}; use torrust_tracker_test_helpers::configuration; - use crate::app_test::initialize_tracker_dependencies; + use crate::core::authentication::key::repository::in_memory::InMemoryKeyRepository; use crate::core::authentication::service::AuthenticationService; use crate::core::scrape_handler::ScrapeHandler; use crate::core::services::statistics; - - #[allow(clippy::type_complexity)] - fn private_tracker() -> ( - Arc, - Arc, - Arc>>, - Arc, - ) { - let config = configuration::ephemeral_private(); - - let ( - _database, - _in_memory_whitelist, - whitelist_authorization, - authentication_service, - in_memory_torrent_repository, - _db_torrent_repository, - _torrents_manager, - ) = initialize_tracker_dependencies(&config); - - let (stats_event_sender, _stats_repository) = statistics::setup::factory(config.core.tracker_usage_statistics); - - let stats_event_sender = Arc::new(stats_event_sender); - - let core_config = Arc::new(config.core.clone()); - - let scrape_handler = Arc::new(ScrapeHandler::new(&whitelist_authorization, &in_memory_torrent_repository)); - - (core_config, scrape_handler, stats_event_sender, authentication_service) + use crate::core::torrent::repository::in_memory::InMemoryTorrentRepository; + use crate::core::whitelist::authorization::WhitelistAuthorization; + use crate::core::whitelist::repository::in_memory::InMemoryWhitelist; + + struct CoreTrackerServices { + pub core_config: Arc, + pub scrape_handler: Arc, + pub stats_event_sender: Arc>>, + pub authentication_service: Arc, } - #[allow(clippy::type_complexity)] - fn whitelisted_tracker() -> ( - Arc, - Arc, - Arc>>, - Arc, - ) { - let config = configuration::ephemeral_listed(); - - let ( - _database, - _in_memory_whitelist, - whitelist_authorization, - authentication_service, - in_memory_torrent_repository, - _db_torrent_repository, - _torrents_manager, - ) = initialize_tracker_dependencies(&config); - - let (stats_event_sender, _stats_repository) = statistics::setup::factory(config.core.tracker_usage_statistics); - - let stats_event_sender = Arc::new(stats_event_sender); - - let core_config = Arc::new(config.core.clone()); - - let scrape_handler = Arc::new(ScrapeHandler::new(&whitelist_authorization, &in_memory_torrent_repository)); - - (core_config, scrape_handler, stats_event_sender, authentication_service) + fn initialize_private_tracker() -> CoreTrackerServices { + initialize_core_tracker_services(&configuration::ephemeral_private()) } - #[allow(clippy::type_complexity)] - fn tracker_on_reverse_proxy() -> ( - Arc, - Arc, - Arc>>, - Arc, - ) { - let config = configuration::ephemeral_with_reverse_proxy(); - - let ( - _database, - _in_memory_whitelist, - whitelist_authorization, - authentication_service, - in_memory_torrent_repository, - _db_torrent_repository, - _torrents_manager, - ) = initialize_tracker_dependencies(&config); - - let (stats_event_sender, _stats_repository) = statistics::setup::factory(config.core.tracker_usage_statistics); - - let stats_event_sender = Arc::new(stats_event_sender); - - let core_config = Arc::new(config.core.clone()); - - let scrape_handler = Arc::new(ScrapeHandler::new(&whitelist_authorization, &in_memory_torrent_repository)); - - (core_config, scrape_handler, stats_event_sender, authentication_service) + fn initialize_listed_tracker() -> CoreTrackerServices { + initialize_core_tracker_services(&configuration::ephemeral_listed()) } - #[allow(clippy::type_complexity)] - fn tracker_not_on_reverse_proxy() -> ( - Arc, - Arc, - Arc>>, - Arc, - ) { - let config = configuration::ephemeral_without_reverse_proxy(); + fn initialize_tracker_on_reverse_proxy() -> CoreTrackerServices { + initialize_core_tracker_services(&configuration::ephemeral_with_reverse_proxy()) + } - let ( - _database, - _in_memory_whitelist, - whitelist_authorization, - authentication_service, - in_memory_torrent_repository, - _db_torrent_repository, - _torrents_manager, - ) = initialize_tracker_dependencies(&config); + fn initialize_tracker_not_on_reverse_proxy() -> CoreTrackerServices { + initialize_core_tracker_services(&configuration::ephemeral_without_reverse_proxy()) + } + fn initialize_core_tracker_services(config: &Configuration) -> CoreTrackerServices { + let core_config = Arc::new(config.core.clone()); + let in_memory_whitelist = Arc::new(InMemoryWhitelist::default()); + let whitelist_authorization = Arc::new(WhitelistAuthorization::new(&config.core, &in_memory_whitelist.clone())); + let in_memory_key_repository = Arc::new(InMemoryKeyRepository::default()); + let authentication_service = Arc::new(AuthenticationService::new(&config.core, &in_memory_key_repository)); + let in_memory_torrent_repository = Arc::new(InMemoryTorrentRepository::default()); let (stats_event_sender, _stats_repository) = statistics::setup::factory(config.core.tracker_usage_statistics); - let stats_event_sender = Arc::new(stats_event_sender); - - let core_config = Arc::new(config.core.clone()); - let scrape_handler = Arc::new(ScrapeHandler::new(&whitelist_authorization, &in_memory_torrent_repository)); - (core_config, scrape_handler, stats_event_sender, authentication_service) + CoreTrackerServices { + core_config, + scrape_handler, + stats_event_sender, + authentication_service, + } } fn sample_scrape_request() -> Scrape { Scrape { - info_hashes: vec!["3b245504cf5f11bbdbe1201cea6a6bf45aee1bc0".parse::().unwrap()], // # DevSkim: ignore DS173237 + info_hashes: vec!["3b245504cf5f11bbdbe1201cea6a6bf45aee1bc0".parse::().unwrap()], // DevSkim: ignore DS173237 } } @@ -324,22 +249,22 @@ mod tests { use torrust_tracker_primitives::core::ScrapeData; - use super::{private_tracker, sample_client_ip_sources, sample_scrape_request}; + use super::{initialize_private_tracker, sample_client_ip_sources, sample_scrape_request}; use crate::core::authentication; use crate::servers::http::v1::handlers::scrape::handle_scrape; #[tokio::test] async fn it_should_return_zeroed_swarm_metadata_when_the_authentication_key_is_missing() { - let (core_config, scrape_handler, stats_event_sender, authentication_service) = private_tracker(); + let core_tracker_services = initialize_private_tracker(); let scrape_request = sample_scrape_request(); let maybe_key = None; let scrape_data = handle_scrape( - &core_config, - &scrape_handler, - &authentication_service, - &stats_event_sender, + &core_tracker_services.core_config, + &core_tracker_services.scrape_handler, + &core_tracker_services.authentication_service, + &core_tracker_services.stats_event_sender, &scrape_request, &sample_client_ip_sources(), maybe_key, @@ -354,17 +279,17 @@ mod tests { #[tokio::test] async fn it_should_return_zeroed_swarm_metadata_when_the_authentication_key_is_invalid() { - let (core_config, scrape_handler, stats_event_sender, authentication_service) = private_tracker(); + let core_tracker_services = initialize_private_tracker(); let scrape_request = sample_scrape_request(); let unregistered_key = authentication::Key::from_str("YZSl4lMZupRuOpSRC3krIKR5BPB14nrJ").unwrap(); let maybe_key = Some(unregistered_key); let scrape_data = handle_scrape( - &core_config, - &scrape_handler, - &authentication_service, - &stats_event_sender, + &core_tracker_services.core_config, + &core_tracker_services.scrape_handler, + &core_tracker_services.authentication_service, + &core_tracker_services.stats_event_sender, &scrape_request, &sample_client_ip_sources(), maybe_key, @@ -382,20 +307,20 @@ mod tests { use torrust_tracker_primitives::core::ScrapeData; - use super::{sample_client_ip_sources, sample_scrape_request, whitelisted_tracker}; + use super::{initialize_listed_tracker, sample_client_ip_sources, sample_scrape_request}; use crate::servers::http::v1::handlers::scrape::handle_scrape; #[tokio::test] async fn it_should_return_zeroed_swarm_metadata_when_the_torrent_is_not_whitelisted() { - let (core_config, scrape_handler, stats_event_sender, authentication_service) = whitelisted_tracker(); + let core_tracker_services = initialize_listed_tracker(); let scrape_request = sample_scrape_request(); let scrape_data = handle_scrape( - &core_config, - &scrape_handler, - &authentication_service, - &stats_event_sender, + &core_tracker_services.core_config, + &core_tracker_services.scrape_handler, + &core_tracker_services.authentication_service, + &core_tracker_services.stats_event_sender, &scrape_request, &sample_client_ip_sources(), None, @@ -413,13 +338,13 @@ mod tests { use bittorrent_http_protocol::v1::services::peer_ip_resolver::ClientIpSources; - use super::{sample_scrape_request, tracker_on_reverse_proxy}; + use super::{initialize_tracker_on_reverse_proxy, sample_scrape_request}; use crate::servers::http::v1::handlers::scrape::handle_scrape; use crate::servers::http::v1::handlers::scrape::tests::assert_error_response; #[tokio::test] async fn it_should_fail_when_the_right_most_x_forwarded_for_header_ip_is_not_available() { - let (core_config, scrape_handler, stats_event_sender, authentication_service) = tracker_on_reverse_proxy(); + let core_tracker_services = initialize_tracker_on_reverse_proxy(); let client_ip_sources = ClientIpSources { right_most_x_forwarded_for: None, @@ -427,10 +352,10 @@ mod tests { }; let response = handle_scrape( - &core_config, - &scrape_handler, - &authentication_service, - &stats_event_sender, + &core_tracker_services.core_config, + &core_tracker_services.scrape_handler, + &core_tracker_services.authentication_service, + &core_tracker_services.stats_event_sender, &sample_scrape_request(), &client_ip_sources, None, @@ -449,13 +374,13 @@ mod tests { use bittorrent_http_protocol::v1::services::peer_ip_resolver::ClientIpSources; - use super::{sample_scrape_request, tracker_not_on_reverse_proxy}; + use super::{initialize_tracker_not_on_reverse_proxy, sample_scrape_request}; use crate::servers::http::v1::handlers::scrape::handle_scrape; use crate::servers::http::v1::handlers::scrape::tests::assert_error_response; #[tokio::test] async fn it_should_fail_when_the_client_ip_from_the_connection_info_is_not_available() { - let (core_config, scrape_handler, stats_event_sender, authentication_service) = tracker_not_on_reverse_proxy(); + let core_tracker_services = initialize_tracker_not_on_reverse_proxy(); let client_ip_sources = ClientIpSources { right_most_x_forwarded_for: None, @@ -463,10 +388,10 @@ mod tests { }; let response = handle_scrape( - &core_config, - &scrape_handler, - &authentication_service, - &stats_event_sender, + &core_tracker_services.core_config, + &core_tracker_services.scrape_handler, + &core_tracker_services.authentication_service, + &core_tracker_services.stats_event_sender, &sample_scrape_request(), &client_ip_sources, None, diff --git a/src/servers/http/v1/services/announce.rs b/src/servers/http/v1/services/announce.rs index c8c2980c3..e96face6a 100644 --- a/src/servers/http/v1/services/announce.rs +++ b/src/servers/http/v1/services/announce.rs @@ -64,36 +64,38 @@ mod tests { use torrust_tracker_primitives::{peer, DurationSinceUnixEpoch}; use torrust_tracker_test_helpers::configuration; - use crate::app_test::initialize_tracker_dependencies; use crate::core::announce_handler::AnnounceHandler; - use crate::core::services::statistics; + use crate::core::services::{initialize_database, statistics}; use crate::core::statistics::event::sender::Sender; + use crate::core::torrent::repository::in_memory::InMemoryTorrentRepository; + use crate::core::torrent::repository::persisted::DatabasePersistentTorrentRepository; - #[allow(clippy::type_complexity)] - fn public_tracker() -> (Arc, Arc, Arc>>) { + struct CoreTrackerServices { + pub core_config: Arc, + pub announce_handler: Arc, + pub stats_event_sender: Arc>>, + } + + fn initialize_core_tracker_services() -> CoreTrackerServices { let config = configuration::ephemeral_public(); - let ( - _database, - _in_memory_whitelist, - _whitelist_authorization, - _authentication_service, - in_memory_torrent_repository, - db_torrent_repository, - _torrents_manager, - ) = initialize_tracker_dependencies(&config); + let core_config = Arc::new(config.core.clone()); + let database = initialize_database(&config); + let in_memory_torrent_repository = Arc::new(InMemoryTorrentRepository::default()); + let db_torrent_repository = Arc::new(DatabasePersistentTorrentRepository::new(&database)); let (stats_event_sender, _stats_repository) = statistics::setup::factory(config.core.tracker_usage_statistics); let stats_event_sender = Arc::new(stats_event_sender); - let announce_handler = Arc::new(AnnounceHandler::new( &config.core, &in_memory_torrent_repository, &db_torrent_repository, )); - let core_config = Arc::new(config.core.clone()); - - (core_config, announce_handler, stats_event_sender) + CoreTrackerServices { + core_config, + announce_handler, + stats_event_sender, + } } fn sample_peer_using_ipv4() -> peer::Peer { @@ -133,25 +135,21 @@ mod tests { use torrust_tracker_test_helpers::configuration; use super::{sample_peer_using_ipv4, sample_peer_using_ipv6}; - use crate::app_test::initialize_tracker_dependencies; use crate::core::announce_handler::{AnnounceHandler, PeersWanted}; use crate::core::core_tests::sample_info_hash; + use crate::core::services::initialize_database; use crate::core::statistics; + use crate::core::torrent::repository::in_memory::InMemoryTorrentRepository; + use crate::core::torrent::repository::persisted::DatabasePersistentTorrentRepository; use crate::servers::http::v1::services::announce::invoke; - use crate::servers::http::v1::services::announce::tests::{public_tracker, sample_peer}; + use crate::servers::http::v1::services::announce::tests::{initialize_core_tracker_services, sample_peer}; fn initialize_announce_handler() -> Arc { let config = configuration::ephemeral(); - let ( - _database, - _in_memory_whitelist, - _whitelist_authorization, - _authentication_service, - in_memory_torrent_repository, - db_torrent_repository, - _torrents_manager, - ) = initialize_tracker_dependencies(&config); + let database = initialize_database(&config); + let in_memory_torrent_repository = Arc::new(InMemoryTorrentRepository::default()); + let db_torrent_repository = Arc::new(DatabasePersistentTorrentRepository::new(&database)); Arc::new(AnnounceHandler::new( &config.core, @@ -162,13 +160,13 @@ mod tests { #[tokio::test] async fn it_should_return_the_announce_data() { - let (core_config, announce_handler, stats_event_sender) = public_tracker(); + let core_tracker_services = initialize_core_tracker_services(); let mut peer = sample_peer(); let announce_data = invoke( - announce_handler.clone(), - stats_event_sender.clone(), + core_tracker_services.announce_handler.clone(), + core_tracker_services.stats_event_sender.clone(), sample_info_hash(), &mut peer, &PeersWanted::All, @@ -182,7 +180,7 @@ mod tests { complete: 1, incomplete: 0, }, - policy: core_config.announce_policy, + policy: core_tracker_services.core_config.announce_policy, }; assert_eq!(announce_data, expected_announce_data); diff --git a/src/servers/http/v1/services/scrape.rs b/src/servers/http/v1/services/scrape.rs index 6cd7213be..7e65b9442 100644 --- a/src/servers/http/v1/services/scrape.rs +++ b/src/servers/http/v1/services/scrape.rs @@ -80,30 +80,28 @@ mod tests { use torrust_tracker_primitives::{peer, DurationSinceUnixEpoch}; use torrust_tracker_test_helpers::configuration; - use crate::app_test::initialize_tracker_dependencies; use crate::core::announce_handler::AnnounceHandler; use crate::core::core_tests::sample_info_hash; use crate::core::scrape_handler::ScrapeHandler; + use crate::core::services::initialize_database; + use crate::core::torrent::repository::in_memory::InMemoryTorrentRepository; + use crate::core::torrent::repository::persisted::DatabasePersistentTorrentRepository; + use crate::core::whitelist::authorization::WhitelistAuthorization; + use crate::core::whitelist::repository::in_memory::InMemoryWhitelist; - fn public_tracker_and_announce_and_scrape_handlers() -> (Arc, Arc) { + fn initialize_announce_and_scrape_handlers_for_public_tracker() -> (Arc, Arc) { let config = configuration::ephemeral_public(); - let ( - _database, - _in_memory_whitelist, - whitelist_authorization, - _authentication_service, - in_memory_torrent_repository, - db_torrent_repository, - _torrents_manager, - ) = initialize_tracker_dependencies(&config); - + let database = initialize_database(&config); + let in_memory_whitelist = Arc::new(InMemoryWhitelist::default()); + let whitelist_authorization = Arc::new(WhitelistAuthorization::new(&config.core, &in_memory_whitelist.clone())); + let in_memory_torrent_repository = Arc::new(InMemoryTorrentRepository::default()); + let db_torrent_repository = Arc::new(DatabasePersistentTorrentRepository::new(&database)); let announce_handler = Arc::new(AnnounceHandler::new( &config.core, &in_memory_torrent_repository, &db_torrent_repository, )); - let scrape_handler = Arc::new(ScrapeHandler::new(&whitelist_authorization, &in_memory_torrent_repository)); (announce_handler, scrape_handler) @@ -128,15 +126,9 @@ mod tests { fn initialize_scrape_handler() -> Arc { let config = configuration::ephemeral(); - let ( - _database, - _in_memory_whitelist, - whitelist_authorization, - _authentication_service, - in_memory_torrent_repository, - _db_torrent_repository, - _torrents_manager, - ) = initialize_tracker_dependencies(&config); + let in_memory_whitelist = Arc::new(InMemoryWhitelist::default()); + let whitelist_authorization = Arc::new(WhitelistAuthorization::new(&config.core, &in_memory_whitelist.clone())); + let in_memory_torrent_repository = Arc::new(InMemoryTorrentRepository::default()); Arc::new(ScrapeHandler::new(&whitelist_authorization, &in_memory_torrent_repository)) } @@ -155,8 +147,8 @@ mod tests { use crate::core::statistics; use crate::servers::http::v1::services::scrape::invoke; use crate::servers::http::v1::services::scrape::tests::{ - initialize_scrape_handler, public_tracker_and_announce_and_scrape_handlers, sample_info_hash, sample_info_hashes, - sample_peer, + initialize_announce_and_scrape_handlers_for_public_tracker, initialize_scrape_handler, sample_info_hash, + sample_info_hashes, sample_peer, }; #[tokio::test] @@ -164,7 +156,7 @@ mod tests { let (stats_event_sender, _stats_repository) = crate::core::services::statistics::setup::factory(false); let stats_event_sender = Arc::new(stats_event_sender); - let (announce_handler, scrape_handler) = public_tracker_and_announce_and_scrape_handlers(); + let (announce_handler, scrape_handler) = initialize_announce_and_scrape_handlers_for_public_tracker(); let info_hash = sample_info_hash(); let info_hashes = vec![info_hash]; @@ -239,7 +231,7 @@ mod tests { use crate::core::statistics; use crate::servers::http::v1::services::scrape::fake; use crate::servers::http::v1::services::scrape::tests::{ - public_tracker_and_announce_and_scrape_handlers, sample_info_hash, sample_info_hashes, sample_peer, + initialize_announce_and_scrape_handlers_for_public_tracker, sample_info_hash, sample_info_hashes, sample_peer, }; #[tokio::test] @@ -247,7 +239,7 @@ mod tests { let (stats_event_sender, _stats_repository) = crate::core::services::statistics::setup::factory(false); let stats_event_sender = Arc::new(stats_event_sender); - let (announce_handler, _scrape_handler) = public_tracker_and_announce_and_scrape_handlers(); + let (announce_handler, _scrape_handler) = initialize_announce_and_scrape_handlers_for_public_tracker(); let info_hash = sample_info_hash(); let info_hashes = vec![info_hash]; diff --git a/src/servers/udp/handlers.rs b/src/servers/udp/handlers.rs index 2e753404d..43dc69019 100644 --- a/src/servers/udp/handlers.rs +++ b/src/servers/udp/handlers.rs @@ -498,79 +498,68 @@ mod tests { use torrust_tracker_test_helpers::configuration; use super::gen_remote_fingerprint; - use crate::app_test::initialize_tracker_dependencies; use crate::core::announce_handler::AnnounceHandler; use crate::core::scrape_handler::ScrapeHandler; - use crate::core::services::{initialize_whitelist_manager, statistics}; + use crate::core::services::{initialize_database, statistics}; use crate::core::statistics::event::sender::Sender; use crate::core::torrent::repository::in_memory::InMemoryTorrentRepository; + use crate::core::torrent::repository::persisted::DatabasePersistentTorrentRepository; use crate::core::whitelist; - use crate::core::whitelist::manager::WhitelistManager; + use crate::core::whitelist::authorization::WhitelistAuthorization; use crate::core::whitelist::repository::in_memory::InMemoryWhitelist; use crate::CurrentClock; - type TrackerAndDeps = ( - Arc, - Arc, - Arc, - Arc, - Arc>>, - Arc, - Arc, - Arc, - ); - - fn tracker_configuration() -> Configuration { - default_testing_tracker_configuration() + struct CoreTrackerServices { + pub core_config: Arc, + pub announce_handler: Arc, + pub scrape_handler: Arc, + pub in_memory_torrent_repository: Arc, + pub stats_event_sender: Arc>>, + pub in_memory_whitelist: Arc, + pub whitelist_authorization: Arc, } fn default_testing_tracker_configuration() -> Configuration { configuration::ephemeral() } - fn public_tracker() -> TrackerAndDeps { - initialize_tracker_and_deps(&configuration::ephemeral_public()) + fn initialize_core_tracker_services_for_default_tracker_configuration() -> CoreTrackerServices { + initialize_core_tracker_services(&default_testing_tracker_configuration()) } - fn whitelisted_tracker() -> TrackerAndDeps { - initialize_tracker_and_deps(&configuration::ephemeral_listed()) + fn initialize_core_tracker_services_for_public_tracker() -> CoreTrackerServices { + initialize_core_tracker_services(&configuration::ephemeral_public()) } - fn initialize_tracker_and_deps(config: &Configuration) -> TrackerAndDeps { - let core_config = Arc::new(config.core.clone()); - - let ( - database, - in_memory_whitelist, - whitelist_authorization, - _authentication_service, - in_memory_torrent_repository, - db_torrent_repository, - _torrents_manager, - ) = initialize_tracker_dependencies(config); + fn initialize_core_tracker_services_for_listed_tracker() -> CoreTrackerServices { + initialize_core_tracker_services(&configuration::ephemeral_listed()) + } + fn initialize_core_tracker_services(config: &Configuration) -> CoreTrackerServices { + let core_config = Arc::new(config.core.clone()); + let database = initialize_database(config); + let in_memory_whitelist = Arc::new(InMemoryWhitelist::default()); + let whitelist_authorization = Arc::new(WhitelistAuthorization::new(&config.core, &in_memory_whitelist.clone())); + let in_memory_torrent_repository = Arc::new(InMemoryTorrentRepository::default()); + let db_torrent_repository = Arc::new(DatabasePersistentTorrentRepository::new(&database)); let (stats_event_sender, _stats_repository) = statistics::setup::factory(config.core.tracker_usage_statistics); let stats_event_sender = Arc::new(stats_event_sender); - let whitelist_manager = initialize_whitelist_manager(database.clone(), in_memory_whitelist.clone()); - let announce_handler = Arc::new(AnnounceHandler::new( &config.core, &in_memory_torrent_repository, &db_torrent_repository, )); - let scrape_handler = Arc::new(ScrapeHandler::new(&whitelist_authorization, &in_memory_torrent_repository)); - ( + CoreTrackerServices { core_config, announce_handler, scrape_handler, in_memory_torrent_repository, stats_event_sender, in_memory_whitelist, - whitelist_manager, whitelist_authorization, - ) + } } fn sample_ipv4_remote_addr() -> SocketAddr { @@ -667,38 +656,6 @@ mod tests { } } - #[allow(clippy::type_complexity)] - fn test_tracker_factory() -> ( - Arc, - Arc, - Arc, - Arc, - ) { - let config = tracker_configuration(); - - let core_config = Arc::new(config.core.clone()); - - let ( - _database, - _in_memory_whitelist, - whitelist_authorization, - _authentication_service, - in_memory_torrent_repository, - db_torrent_repository, - _torrents_manager, - ) = initialize_tracker_dependencies(&config); - - let announce_handler = Arc::new(AnnounceHandler::new( - &config.core, - &in_memory_torrent_repository, - &db_torrent_repository, - )); - - let scrape_handler = Arc::new(ScrapeHandler::new(&whitelist_authorization, &in_memory_torrent_repository)); - - (core_config, announce_handler, scrape_handler, whitelist_authorization) - } - mod connect_request { use std::future; @@ -916,23 +873,15 @@ mod tests { use crate::servers::udp::connection_cookie::make; use crate::servers::udp::handlers::tests::announce_request::AnnounceRequestBuilder; use crate::servers::udp::handlers::tests::{ - gen_remote_fingerprint, public_tracker, sample_cookie_valid_range, sample_ipv4_socket_address, sample_issue_time, - test_tracker_factory, TorrentPeerBuilder, + gen_remote_fingerprint, initialize_core_tracker_services_for_default_tracker_configuration, + initialize_core_tracker_services_for_public_tracker, sample_cookie_valid_range, sample_ipv4_socket_address, + sample_issue_time, TorrentPeerBuilder, }; use crate::servers::udp::handlers::{handle_announce, AnnounceResponseFixedData}; #[tokio::test] async fn an_announced_peer_should_be_added_to_the_tracker() { - let ( - core_config, - announce_handler, - _scrape_handler, - in_memory_torrent_repository, - stats_event_sender, - _in_memory_whitelist, - _whitelist_manager, - whitelist_authorization, - ) = public_tracker(); + let core_tracker_services = initialize_core_tracker_services_for_public_tracker(); let client_ip = Ipv4Addr::new(126, 0, 0, 1); let client_port = 8080; @@ -952,16 +901,18 @@ mod tests { handle_announce( remote_addr, &request, - &core_config, - &announce_handler, - &whitelist_authorization, - &stats_event_sender, + &core_tracker_services.core_config, + &core_tracker_services.announce_handler, + &core_tracker_services.whitelist_authorization, + &core_tracker_services.stats_event_sender, sample_cookie_valid_range(), ) .await .unwrap(); - let peers = in_memory_torrent_repository.get_torrent_peers(&info_hash.0.into()); + let peers = core_tracker_services + .in_memory_torrent_repository + .get_torrent_peers(&info_hash.0.into()); let expected_peer = TorrentPeerBuilder::new() .with_peer_id(peer_id) @@ -973,16 +924,7 @@ mod tests { #[tokio::test] async fn the_announced_peer_should_not_be_included_in_the_response() { - let ( - core_config, - announce_handler, - _scrape_handler, - _in_memory_torrent_repository, - stats_event_sender, - _in_memory_whitelist, - _whitelist_manager, - whitelist_authorization, - ) = public_tracker(); + let core_tracker_services = initialize_core_tracker_services_for_public_tracker(); let remote_addr = SocketAddr::new(IpAddr::V4(Ipv4Addr::new(126, 0, 0, 1)), 8080); @@ -993,10 +935,10 @@ mod tests { let response = handle_announce( remote_addr, &request, - &core_config, - &announce_handler, - &whitelist_authorization, - &stats_event_sender, + &core_tracker_services.core_config, + &core_tracker_services.announce_handler, + &core_tracker_services.whitelist_authorization, + &core_tracker_services.stats_event_sender, sample_cookie_valid_range(), ) .await @@ -1023,16 +965,7 @@ mod tests { // From the BEP 15 (https://www.bittorrent.org/beps/bep_0015.html): // "Do note that most trackers will only honor the IP address field under limited circumstances." - let ( - core_config, - announce_handler, - _scrape_handler, - in_memory_torrent_repository, - stats_event_sender, - _in_memory_whitelist, - _whitelist_manager, - whitelist_authorization, - ) = public_tracker(); + let core_tracker_services = initialize_core_tracker_services_for_public_tracker(); let info_hash = AquaticInfoHash([0u8; 20]); let peer_id = AquaticPeerId([255u8; 20]); @@ -1055,16 +988,18 @@ mod tests { handle_announce( remote_addr, &request, - &core_config, - &announce_handler, - &whitelist_authorization, - &stats_event_sender, + &core_tracker_services.core_config, + &core_tracker_services.announce_handler, + &core_tracker_services.whitelist_authorization, + &core_tracker_services.stats_event_sender, sample_cookie_valid_range(), ) .await .unwrap(); - let peers = in_memory_torrent_repository.get_torrent_peers(&info_hash.0.into()); + let peers = core_tracker_services + .in_memory_torrent_repository + .get_torrent_peers(&info_hash.0.into()); assert_eq!(peers[0].peer_addr, SocketAddr::new(IpAddr::V4(remote_client_ip), client_port)); } @@ -1113,21 +1048,16 @@ mod tests { #[tokio::test] async fn when_the_announce_request_comes_from_a_client_using_ipv4_the_response_should_not_include_peers_using_ipv6() { - let ( - core_config, - announce_handler, - _scrape_handler, - in_memory_torrent_repository, - _stats_event_sender, - _in_memory_whitelist, - _whitelist_manager, - whitelist_authorization, - ) = public_tracker(); - - add_a_torrent_peer_using_ipv6(&in_memory_torrent_repository); - - let response = - announce_a_new_peer_using_ipv4(core_config.clone(), announce_handler.clone(), whitelist_authorization).await; + let core_tracker_services = initialize_core_tracker_services_for_public_tracker(); + + add_a_torrent_peer_using_ipv6(&core_tracker_services.in_memory_torrent_repository); + + let response = announce_a_new_peer_using_ipv4( + core_tracker_services.core_config.clone(), + core_tracker_services.announce_handler.clone(), + core_tracker_services.whitelist_authorization, + ) + .await; // The response should not contain the peer using IPV6 let peers: Option>> = match response { @@ -1149,14 +1079,14 @@ mod tests { let stats_event_sender: Arc>> = Arc::new(Some(Box::new(stats_event_sender_mock))); - let (core_config, announce_handler, _scrape_handler, whitelist_authorization) = test_tracker_factory(); + let core_tracker_services = initialize_core_tracker_services_for_default_tracker_configuration(); handle_announce( sample_ipv4_socket_address(), &AnnounceRequestBuilder::default().into(), - &core_config, - &announce_handler, - &whitelist_authorization, + &core_tracker_services.core_config, + &core_tracker_services.announce_handler, + &core_tracker_services.whitelist_authorization, &stats_event_sender, sample_cookie_valid_range(), ) @@ -1174,21 +1104,13 @@ mod tests { use crate::servers::udp::handlers::handle_announce; use crate::servers::udp::handlers::tests::announce_request::AnnounceRequestBuilder; use crate::servers::udp::handlers::tests::{ - gen_remote_fingerprint, public_tracker, sample_cookie_valid_range, sample_issue_time, TorrentPeerBuilder, + gen_remote_fingerprint, initialize_core_tracker_services_for_public_tracker, sample_cookie_valid_range, + sample_issue_time, TorrentPeerBuilder, }; #[tokio::test] async fn the_peer_ip_should_be_changed_to_the_external_ip_in_the_tracker_configuration_if_defined() { - let ( - core_config, - announce_handler, - _scrape_handler, - in_memory_torrent_repository, - stats_event_sender, - _in_memory_whitelist, - _whitelist_manager, - whitelist_authorization, - ) = public_tracker(); + let core_tracker_services = initialize_core_tracker_services_for_public_tracker(); let client_ip = Ipv4Addr::new(127, 0, 0, 1); let client_port = 8080; @@ -1208,18 +1130,20 @@ mod tests { handle_announce( remote_addr, &request, - &core_config, - &announce_handler, - &whitelist_authorization, - &stats_event_sender, + &core_tracker_services.core_config, + &core_tracker_services.announce_handler, + &core_tracker_services.whitelist_authorization, + &core_tracker_services.stats_event_sender, sample_cookie_valid_range(), ) .await .unwrap(); - let peers = in_memory_torrent_repository.get_torrent_peers(&info_hash.0.into()); + let peers = core_tracker_services + .in_memory_torrent_repository + .get_torrent_peers(&info_hash.0.into()); - let external_ip_in_tracker_configuration = core_config.net.external_ip.unwrap(); + let external_ip_in_tracker_configuration = core_tracker_services.core_config.net.external_ip.unwrap(); let expected_peer = TorrentPeerBuilder::new() .with_peer_id(peer_id) @@ -1250,23 +1174,15 @@ mod tests { use crate::servers::udp::connection_cookie::make; use crate::servers::udp::handlers::tests::announce_request::AnnounceRequestBuilder; use crate::servers::udp::handlers::tests::{ - gen_remote_fingerprint, public_tracker, sample_cookie_valid_range, sample_ipv6_remote_addr, sample_issue_time, - test_tracker_factory, TorrentPeerBuilder, + gen_remote_fingerprint, initialize_core_tracker_services_for_default_tracker_configuration, + initialize_core_tracker_services_for_public_tracker, sample_cookie_valid_range, sample_ipv6_remote_addr, + sample_issue_time, TorrentPeerBuilder, }; use crate::servers::udp::handlers::{handle_announce, AnnounceResponseFixedData}; #[tokio::test] async fn an_announced_peer_should_be_added_to_the_tracker() { - let ( - core_config, - announce_handler, - _scrape_handler, - in_memory_torrent_repository, - stats_event_sender, - _in_memory_whitelist, - _whitelist_manager, - whitelist_authorization, - ) = public_tracker(); + let core_tracker_services = initialize_core_tracker_services_for_public_tracker(); let client_ip_v4 = Ipv4Addr::new(126, 0, 0, 1); let client_ip_v6 = client_ip_v4.to_ipv6_compatible(); @@ -1287,16 +1203,18 @@ mod tests { handle_announce( remote_addr, &request, - &core_config, - &announce_handler, - &whitelist_authorization, - &stats_event_sender, + &core_tracker_services.core_config, + &core_tracker_services.announce_handler, + &core_tracker_services.whitelist_authorization, + &core_tracker_services.stats_event_sender, sample_cookie_valid_range(), ) .await .unwrap(); - let peers = in_memory_torrent_repository.get_torrent_peers(&info_hash.0.into()); + let peers = core_tracker_services + .in_memory_torrent_repository + .get_torrent_peers(&info_hash.0.into()); let expected_peer = TorrentPeerBuilder::new() .with_peer_id(peer_id) @@ -1308,16 +1226,7 @@ mod tests { #[tokio::test] async fn the_announced_peer_should_not_be_included_in_the_response() { - let ( - core_config, - announce_handler, - _scrape_handler, - _in_memory_torrent_repository, - stats_event_sender, - _in_memory_whitelist, - _whitelist_manager, - whitelist_authorization, - ) = public_tracker(); + let core_tracker_services = initialize_core_tracker_services_for_public_tracker(); let client_ip_v4 = Ipv4Addr::new(126, 0, 0, 1); let client_ip_v6 = client_ip_v4.to_ipv6_compatible(); @@ -1331,10 +1240,10 @@ mod tests { let response = handle_announce( remote_addr, &request, - &core_config, - &announce_handler, - &whitelist_authorization, - &stats_event_sender, + &core_tracker_services.core_config, + &core_tracker_services.announce_handler, + &core_tracker_services.whitelist_authorization, + &core_tracker_services.stats_event_sender, sample_cookie_valid_range(), ) .await @@ -1361,16 +1270,7 @@ mod tests { // From the BEP 15 (https://www.bittorrent.org/beps/bep_0015.html): // "Do note that most trackers will only honor the IP address field under limited circumstances." - let ( - core_config, - announce_handler, - _scrape_handler, - in_memory_torrent_repository, - stats_event_sender, - _in_memory_whitelist, - _whitelist_manager, - whitelist_authorization, - ) = public_tracker(); + let core_tracker_services = initialize_core_tracker_services_for_public_tracker(); let info_hash = AquaticInfoHash([0u8; 20]); let peer_id = AquaticPeerId([255u8; 20]); @@ -1393,16 +1293,18 @@ mod tests { handle_announce( remote_addr, &request, - &core_config, - &announce_handler, - &whitelist_authorization, - &stats_event_sender, + &core_tracker_services.core_config, + &core_tracker_services.announce_handler, + &core_tracker_services.whitelist_authorization, + &core_tracker_services.stats_event_sender, sample_cookie_valid_range(), ) .await .unwrap(); - let peers = in_memory_torrent_repository.get_torrent_peers(&info_hash.0.into()); + let peers = core_tracker_services + .in_memory_torrent_repository + .get_torrent_peers(&info_hash.0.into()); // When using IPv6 the tracker converts the remote client ip into a IPv4 address assert_eq!(peers[0].peer_addr, SocketAddr::new(IpAddr::V6(remote_client_ip), client_port)); @@ -1454,21 +1356,16 @@ mod tests { #[tokio::test] async fn when_the_announce_request_comes_from_a_client_using_ipv6_the_response_should_not_include_peers_using_ipv4() { - let ( - core_config, - announce_handler, - _scrape_handler, - in_memory_torrent_repository, - _stats_event_sender, - _in_memory_whitelist, - _whitelist_manager, - whitelist_authorization, - ) = public_tracker(); - - add_a_torrent_peer_using_ipv4(&in_memory_torrent_repository); - - let response = - announce_a_new_peer_using_ipv6(core_config.clone(), announce_handler.clone(), whitelist_authorization).await; + let core_tracker_services = initialize_core_tracker_services_for_public_tracker(); + + add_a_torrent_peer_using_ipv4(&core_tracker_services.in_memory_torrent_repository); + + let response = announce_a_new_peer_using_ipv6( + core_tracker_services.core_config.clone(), + core_tracker_services.announce_handler.clone(), + core_tracker_services.whitelist_authorization, + ) + .await; // The response should not contain the peer using IPV4 let peers: Option>> = match response { @@ -1490,7 +1387,7 @@ mod tests { let stats_event_sender: Arc>> = Arc::new(Some(Box::new(stats_event_sender_mock))); - let (core_config, announce_handler, _scrape_handler, whitelist_authorization) = test_tracker_factory(); + let core_tracker_services = initialize_core_tracker_services_for_default_tracker_configuration(); let remote_addr = sample_ipv6_remote_addr(); @@ -1501,9 +1398,9 @@ mod tests { handle_announce( remote_addr, &announce_request, - &core_config, - &announce_handler, - &whitelist_authorization, + &core_tracker_services.core_config, + &core_tracker_services.announce_handler, + &core_tracker_services.whitelist_authorization, &stats_event_sender, sample_cookie_valid_range(), ) @@ -1519,9 +1416,13 @@ mod tests { use aquatic_udp_protocol::{InfoHash as AquaticInfoHash, PeerId as AquaticPeerId}; use mockall::predicate::eq; - use crate::app_test::initialize_tracker_dependencies; use crate::core::announce_handler::AnnounceHandler; + use crate::core::services::initialize_database; use crate::core::statistics; + use crate::core::torrent::repository::in_memory::InMemoryTorrentRepository; + use crate::core::torrent::repository::persisted::DatabasePersistentTorrentRepository; + use crate::core::whitelist::authorization::WhitelistAuthorization; + use crate::core::whitelist::repository::in_memory::InMemoryWhitelist; use crate::servers::udp::connection_cookie::make; use crate::servers::udp::handlers::handle_announce; use crate::servers::udp::handlers::tests::announce_request::AnnounceRequestBuilder; @@ -1533,15 +1434,12 @@ mod tests { async fn the_peer_ip_should_be_changed_to_the_external_ip_in_the_tracker_configuration() { let config = Arc::new(TrackerConfigurationBuilder::default().with_external_ip("::126.0.0.1").into()); - let ( - _database, - _in_memory_whitelist, - whitelist_authorization, - _authentication_service, - in_memory_torrent_repository, - db_torrent_repository, - _torrents_manager, - ) = initialize_tracker_dependencies(&config); + let database = initialize_database(&config); + let in_memory_whitelist = Arc::new(InMemoryWhitelist::default()); + let whitelist_authorization = + Arc::new(WhitelistAuthorization::new(&config.core, &in_memory_whitelist.clone())); + let in_memory_torrent_repository = Arc::new(InMemoryTorrentRepository::default()); + let db_torrent_repository = Arc::new(DatabasePersistentTorrentRepository::new(&database)); let mut stats_event_sender_mock = statistics::event::sender::MockSender::new(); stats_event_sender_mock @@ -1625,7 +1523,8 @@ mod tests { use crate::servers::udp::connection_cookie::make; use crate::servers::udp::handlers::handle_scrape; use crate::servers::udp::handlers::tests::{ - public_tracker, sample_cookie_valid_range, sample_ipv4_remote_addr, sample_issue_time, + initialize_core_tracker_services_for_public_tracker, sample_cookie_valid_range, sample_ipv4_remote_addr, + sample_issue_time, }; fn zeroed_torrent_statistics() -> TorrentScrapeStatistics { @@ -1638,16 +1537,7 @@ mod tests { #[tokio::test] async fn should_return_no_stats_when_the_tracker_does_not_have_any_torrent() { - let ( - _core_config, - _announce_handler, - scrape_handler, - _in_memory_torrent_repository, - stats_event_sender, - _in_memory_whitelist, - _whitelist_manager, - _whitelist_authorization, - ) = public_tracker(); + let core_tracker_services = initialize_core_tracker_services_for_public_tracker(); let remote_addr = sample_ipv4_remote_addr(); @@ -1663,8 +1553,8 @@ mod tests { let response = handle_scrape( remote_addr, &request, - &scrape_handler, - &stats_event_sender, + &core_tracker_services.scrape_handler, + &core_tracker_services.stats_event_sender, sample_cookie_valid_range(), ) .await @@ -1742,24 +1632,19 @@ mod tests { mod with_a_public_tracker { use aquatic_udp_protocol::{NumberOfDownloads, NumberOfPeers, TorrentScrapeStatistics}; - use crate::servers::udp::handlers::tests::public_tracker; + use crate::servers::udp::handlers::tests::initialize_core_tracker_services_for_public_tracker; use crate::servers::udp::handlers::tests::scrape_request::{add_a_sample_seeder_and_scrape, match_scrape_response}; #[tokio::test] async fn should_return_torrent_statistics_when_the_tracker_has_the_requested_torrent() { - let ( - _core_config, - _announce_handler, - scrape_handler, - in_memory_torrent_repository, - _stats_event_sender, - _in_memory_whitelist, - _whitelist_manager, - _whitelist_authorization, - ) = public_tracker(); + let core_tracker_services = initialize_core_tracker_services_for_public_tracker(); let torrent_stats = match_scrape_response( - add_a_sample_seeder_and_scrape(in_memory_torrent_repository.clone(), scrape_handler.clone()).await, + add_a_sample_seeder_and_scrape( + core_tracker_services.in_memory_torrent_repository.clone(), + core_tracker_services.scrape_handler.clone(), + ) + .await, ); let expected_torrent_stats = vec![TorrentScrapeStatistics { @@ -1779,27 +1664,25 @@ mod tests { use crate::servers::udp::handlers::tests::scrape_request::{ add_a_seeder, build_scrape_request, match_scrape_response, zeroed_torrent_statistics, }; - use crate::servers::udp::handlers::tests::{sample_cookie_valid_range, sample_ipv4_remote_addr, whitelisted_tracker}; + use crate::servers::udp::handlers::tests::{ + initialize_core_tracker_services_for_listed_tracker, sample_cookie_valid_range, sample_ipv4_remote_addr, + }; #[tokio::test] async fn should_return_the_torrent_statistics_when_the_requested_torrent_is_whitelisted() { - let ( - _core_config, - _announce_handler, - scrape_handler, - in_memory_torrent_repository, - stats_event_sender, - in_memory_whitelist, - _whitelist_manager, - _whitelist_authorization, - ) = whitelisted_tracker(); + let core_tracker_services = initialize_core_tracker_services_for_listed_tracker(); let remote_addr = sample_ipv4_remote_addr(); let info_hash = InfoHash([0u8; 20]); - add_a_seeder(in_memory_torrent_repository.clone(), &remote_addr, &info_hash).await; + add_a_seeder( + core_tracker_services.in_memory_torrent_repository.clone(), + &remote_addr, + &info_hash, + ) + .await; - in_memory_whitelist.add(&info_hash.0.into()).await; + core_tracker_services.in_memory_whitelist.add(&info_hash.0.into()).await; let request = build_scrape_request(&remote_addr, &info_hash); @@ -1807,8 +1690,8 @@ mod tests { handle_scrape( remote_addr, &request, - &scrape_handler, - &stats_event_sender, + &core_tracker_services.scrape_handler, + &core_tracker_services.stats_event_sender, sample_cookie_valid_range(), ) .await @@ -1827,21 +1710,17 @@ mod tests { #[tokio::test] async fn should_return_zeroed_statistics_when_the_requested_torrent_is_not_whitelisted() { - let ( - _core_config, - _announce_handler, - scrape_handler, - in_memory_torrent_repository, - stats_event_sender, - _in_memory_whitelist, - _whitelist_manager, - _whitelist_authorization, - ) = whitelisted_tracker(); + let core_tracker_services = initialize_core_tracker_services_for_listed_tracker(); let remote_addr = sample_ipv4_remote_addr(); let info_hash = InfoHash([0u8; 20]); - add_a_seeder(in_memory_torrent_repository.clone(), &remote_addr, &info_hash).await; + add_a_seeder( + core_tracker_services.in_memory_torrent_repository.clone(), + &remote_addr, + &info_hash, + ) + .await; let request = build_scrape_request(&remote_addr, &info_hash); @@ -1849,8 +1728,8 @@ mod tests { handle_scrape( remote_addr, &request, - &scrape_handler, - &stats_event_sender, + &core_tracker_services.scrape_handler, + &core_tracker_services.stats_event_sender, sample_cookie_valid_range(), ) .await @@ -1885,7 +1764,8 @@ mod tests { use crate::core::statistics; use crate::servers::udp::handlers::handle_scrape; use crate::servers::udp::handlers::tests::{ - sample_cookie_valid_range, sample_ipv4_remote_addr, test_tracker_factory, + initialize_core_tracker_services_for_default_tracker_configuration, sample_cookie_valid_range, + sample_ipv4_remote_addr, }; #[tokio::test] @@ -1901,12 +1781,12 @@ mod tests { let remote_addr = sample_ipv4_remote_addr(); - let (_core_config, _announce_handler, scrape_handler, _whitelist_authorization) = test_tracker_factory(); + let core_tracker_services = initialize_core_tracker_services_for_default_tracker_configuration(); handle_scrape( remote_addr, &sample_scrape_request(&remote_addr), - &scrape_handler, + &core_tracker_services.scrape_handler, &stats_event_sender, sample_cookie_valid_range(), ) @@ -1925,7 +1805,8 @@ mod tests { use crate::core::statistics; use crate::servers::udp::handlers::handle_scrape; use crate::servers::udp::handlers::tests::{ - sample_cookie_valid_range, sample_ipv6_remote_addr, test_tracker_factory, + initialize_core_tracker_services_for_default_tracker_configuration, sample_cookie_valid_range, + sample_ipv6_remote_addr, }; #[tokio::test] @@ -1941,12 +1822,12 @@ mod tests { let remote_addr = sample_ipv6_remote_addr(); - let (_core_config, _announce_handler, scrape_handler, _whitelist_authorization) = test_tracker_factory(); + let core_tracker_services = initialize_core_tracker_services_for_default_tracker_configuration(); handle_scrape( remote_addr, &sample_scrape_request(&remote_addr), - &scrape_handler, + &core_tracker_services.scrape_handler, &stats_event_sender, sample_cookie_valid_range(), ) From 5342a5d65af9be07ee6d7cf7101b1e13c4d8204e Mon Sep 17 00:00:00 2001 From: Jose Celano Date: Tue, 28 Jan 2025 18:48:27 +0000 Subject: [PATCH 178/802] refactor: [#1217] extract UdpTrackerContainer --- src/app.rs | 20 ++----- src/bootstrap/app.rs | 9 ++-- src/bootstrap/jobs/udp_tracker.rs | 45 +++------------- src/console/profiling.rs | 3 ++ src/container.rs | 27 ++++++++++ src/main.rs | 4 ++ src/servers/udp/handlers.rs | 56 ++++++------------- src/servers/udp/server/launcher.rs | 83 ++++++----------------------- src/servers/udp/server/mod.rs | 35 +++++------- src/servers/udp/server/processor.rs | 44 ++++----------- src/servers/udp/server/spawner.rs | 33 +++--------- src/servers/udp/server/states.rs | 33 +++--------- tests/servers/udp/contract.rs | 2 +- tests/servers/udp/environment.rs | 77 ++++++++++---------------- 14 files changed, 145 insertions(+), 326 deletions(-) diff --git a/src/app.rs b/src/app.rs index 75c2e13bc..e0f231611 100644 --- a/src/app.rs +++ b/src/app.rs @@ -28,7 +28,7 @@ use torrust_tracker_configuration::Configuration; use tracing::instrument; use crate::bootstrap::jobs::{health_check_api, http_tracker, torrent_cleanup, tracker_apis, udp_tracker}; -use crate::container::AppContainer; +use crate::container::{AppContainer, UdpTrackerContainer}; use crate::servers; use crate::servers::registar::Registar; @@ -39,7 +39,7 @@ use crate::servers::registar::Registar; /// - Can't retrieve tracker keys from database. /// - Can't load whitelist from database. #[instrument(skip(config, app_container))] -pub async fn start(config: &Configuration, app_container: &AppContainer) -> Vec> { +pub async fn start(config: &Configuration, app_container: &Arc) -> Vec> { if config.http_api.is_none() && (config.udp_trackers.is_none() || config.udp_trackers.as_ref().map_or(true, std::vec::Vec::is_empty)) && (config.http_trackers.is_none() || config.http_trackers.as_ref().map_or(true, std::vec::Vec::is_empty)) @@ -78,19 +78,9 @@ pub async fn start(config: &Configuration, app_container: &AppContainer) -> Vec< udp_tracker_config.bind_address ); } else { - jobs.push( - udp_tracker::start_job( - Arc::new(config.core.clone()), - udp_tracker_config, - app_container.announce_handler.clone(), - app_container.scrape_handler.clone(), - app_container.whitelist_authorization.clone(), - app_container.stats_event_sender.clone(), - app_container.ban_service.clone(), - registar.give_form(), - ) - .await, - ); + let udp_tracker_config = Arc::new(udp_tracker_config.clone()); + let udp_tracker_container = Arc::new(UdpTrackerContainer::from_app_container(&udp_tracker_config, app_container)); + jobs.push(udp_tracker::start_job(udp_tracker_container, registar.give_form()).await); } } } else { diff --git a/src/bootstrap/app.rs b/src/bootstrap/app.rs index c69162322..71684a7e3 100644 --- a/src/bootstrap/app.rs +++ b/src/bootstrap/app.rs @@ -32,7 +32,7 @@ use crate::core::services::{initialize_database, initialize_whitelist_manager, s use crate::core::torrent::manager::TorrentsManager; use crate::core::torrent::repository::in_memory::InMemoryTorrentRepository; use crate::core::torrent::repository::persisted::DatabasePersistentTorrentRepository; -use crate::core::whitelist; +use crate::core::whitelist::authorization::WhitelistAuthorization; use crate::core::whitelist::repository::in_memory::InMemoryWhitelist; use crate::servers::udp::server::banning::BanService; use crate::servers::udp::server::launcher::MAX_CONNECTION_ID_ERRORS_PER_IP; @@ -87,16 +87,14 @@ pub fn initialize_global_services(configuration: &Configuration) { /// It initializes the IoC Container. #[instrument(skip())] pub fn initialize_app_container(configuration: &Configuration) -> AppContainer { + let core_config = Arc::new(configuration.core.clone()); let (stats_event_sender, stats_repository) = statistics::setup::factory(configuration.core.tracker_usage_statistics); let stats_event_sender = Arc::new(stats_event_sender); let stats_repository = Arc::new(stats_repository); let ban_service = Arc::new(RwLock::new(BanService::new(MAX_CONNECTION_ID_ERRORS_PER_IP))); let database = initialize_database(configuration); let in_memory_whitelist = Arc::new(InMemoryWhitelist::default()); - let whitelist_authorization = Arc::new(whitelist::authorization::WhitelistAuthorization::new( - &configuration.core, - &in_memory_whitelist.clone(), - )); + let whitelist_authorization = Arc::new(WhitelistAuthorization::new(&configuration.core, &in_memory_whitelist.clone())); let whitelist_manager = initialize_whitelist_manager(database.clone(), in_memory_whitelist.clone()); let db_key_repository = Arc::new(DatabaseKeyRepository::new(&database)); let in_memory_key_repository = Arc::new(InMemoryKeyRepository::default()); @@ -125,6 +123,7 @@ pub fn initialize_app_container(configuration: &Configuration) -> AppContainer { let scrape_handler = Arc::new(ScrapeHandler::new(&whitelist_authorization, &in_memory_torrent_repository)); AppContainer { + core_config, database, announce_handler, scrape_handler, diff --git a/src/bootstrap/jobs/udp_tracker.rs b/src/bootstrap/jobs/udp_tracker.rs index 4f54ecb59..387fdd6ae 100644 --- a/src/bootstrap/jobs/udp_tracker.rs +++ b/src/bootstrap/jobs/udp_tracker.rs @@ -8,17 +8,11 @@ //! > for the configuration options. use std::sync::Arc; -use tokio::sync::RwLock; use tokio::task::JoinHandle; -use torrust_tracker_configuration::{Core, UdpTracker}; use tracing::instrument; -use crate::core::announce_handler::AnnounceHandler; -use crate::core::scrape_handler::ScrapeHandler; -use crate::core::statistics::event::sender::Sender; -use crate::core::whitelist; +use crate::container::UdpTrackerContainer; use crate::servers::registar::ServiceRegistrationForm; -use crate::servers::udp::server::banning::BanService; use crate::servers::udp::server::spawner::Spawner; use crate::servers::udp::server::Server; use crate::servers::udp::UDP_TRACKER_LOG_TARGET; @@ -33,41 +27,14 @@ use crate::servers::udp::UDP_TRACKER_LOG_TARGET; /// It will panic if it is unable to start the UDP service. /// It will panic if the task did not finish successfully. #[must_use] -#[allow(clippy::too_many_arguments)] #[allow(clippy::async_yields_async)] -#[instrument(skip( - config, - announce_handler, - scrape_handler, - whitelist_authorization, - stats_event_sender, - ban_service, - form -))] -pub async fn start_job( - core_config: Arc, - config: &UdpTracker, - announce_handler: Arc, - scrape_handler: Arc, - whitelist_authorization: Arc, - stats_event_sender: Arc>>, - ban_service: Arc>, - form: ServiceRegistrationForm, -) -> JoinHandle<()> { - let bind_to = config.bind_address; - let cookie_lifetime = config.cookie_lifetime; +#[instrument(skip(udp_tracker_container, form))] +pub async fn start_job(udp_tracker_container: Arc, form: ServiceRegistrationForm) -> JoinHandle<()> { + let bind_to = udp_tracker_container.udp_tracker_config.bind_address; + let cookie_lifetime = udp_tracker_container.udp_tracker_config.cookie_lifetime; let server = Server::new(Spawner::new(bind_to)) - .start( - core_config, - announce_handler, - scrape_handler, - whitelist_authorization, - stats_event_sender, - ban_service, - form, - cookie_lifetime, - ) + .start(udp_tracker_container, form, cookie_lifetime) .await .expect("it should be able to start the udp tracker"); diff --git a/src/console/profiling.rs b/src/console/profiling.rs index 318fce1e8..f3829c073 100644 --- a/src/console/profiling.rs +++ b/src/console/profiling.rs @@ -157,6 +157,7 @@ //! kcachegrind callgrind.out //! ``` use std::env; +use std::sync::Arc; use std::time::Duration; use tokio::time::sleep; @@ -181,6 +182,8 @@ pub async fn run() { let (config, app_container) = bootstrap::app::setup(); + let app_container = Arc::new(app_container); + let jobs = app::start(&config, &app_container).await; // Run the tracker for a fixed duration diff --git a/src/container.rs b/src/container.rs index 192fa62f1..1d137680e 100644 --- a/src/container.rs +++ b/src/container.rs @@ -1,6 +1,7 @@ use std::sync::Arc; use tokio::sync::RwLock; +use torrust_tracker_configuration::{Core, UdpTracker}; use crate::core::announce_handler::AnnounceHandler; use crate::core::authentication::handler::KeysHandler; @@ -17,6 +18,7 @@ use crate::core::whitelist::manager::WhitelistManager; use crate::servers::udp::server::banning::BanService; pub struct AppContainer { + pub core_config: Arc, pub database: Arc>, pub announce_handler: Arc, pub scrape_handler: Arc, @@ -31,3 +33,28 @@ pub struct AppContainer { pub db_torrent_repository: Arc, pub torrents_manager: Arc, } + +pub struct UdpTrackerContainer { + pub core_config: Arc, + pub udp_tracker_config: Arc, + pub announce_handler: Arc, + pub scrape_handler: Arc, + pub whitelist_authorization: Arc, + pub stats_event_sender: Arc>>, + pub ban_service: Arc>, +} + +impl UdpTrackerContainer { + #[must_use] + pub fn from_app_container(udp_tracker_config: &Arc, app_container: &Arc) -> Self { + Self { + udp_tracker_config: udp_tracker_config.clone(), + core_config: app_container.core_config.clone(), + announce_handler: app_container.announce_handler.clone(), + scrape_handler: app_container.scrape_handler.clone(), + whitelist_authorization: app_container.whitelist_authorization.clone(), + stats_event_sender: app_container.stats_event_sender.clone(), + ban_service: app_container.ban_service.clone(), + } + } +} diff --git a/src/main.rs b/src/main.rs index f05de0327..77f6e32a3 100644 --- a/src/main.rs +++ b/src/main.rs @@ -1,9 +1,13 @@ +use std::sync::Arc; + use torrust_tracker_lib::{app, bootstrap}; #[tokio::main] async fn main() { let (config, app_container) = bootstrap::app::setup(); + let app_container = Arc::new(app_container); + let jobs = app::start(&config, &app_container).await; // handle the signals diff --git a/src/servers/udp/handlers.rs b/src/servers/udp/handlers.rs index 43dc69019..992f27a44 100644 --- a/src/servers/udp/handlers.rs +++ b/src/servers/udp/handlers.rs @@ -11,7 +11,6 @@ use aquatic_udp_protocol::{ ResponsePeer, ScrapeRequest, ScrapeResponse, TorrentScrapeStatistics, TransactionId, }; use bittorrent_primitives::info_hash::InfoHash; -use tokio::sync::RwLock; use torrust_tracker_clock::clock::Time as _; use torrust_tracker_configuration::Core; use tracing::{instrument, Level}; @@ -19,8 +18,8 @@ use uuid::Uuid; use zerocopy::network_endian::I32; use super::connection_cookie::{check, make}; -use super::server::banning::BanService; use super::RawRequest; +use crate::container::UdpTrackerContainer; use crate::core::announce_handler::{AnnounceHandler, PeersWanted}; use crate::core::scrape_handler::ScrapeHandler; use crate::core::statistics::event::sender::Sender; @@ -57,18 +56,12 @@ impl CookieTimeValues { /// - Delegating the request to the correct handler depending on the request type. /// /// It will return an `Error` response if the request is invalid. -#[allow(clippy::too_many_arguments)] -#[instrument(fields(request_id), skip(udp_request, announce_handler, scrape_handler, whitelist_authorization, opt_stats_event_sender, cookie_time_values, ban_service), ret(level = Level::TRACE))] +#[instrument(fields(request_id), skip(udp_request, udp_tracker_container, cookie_time_values), ret(level = Level::TRACE))] pub(crate) async fn handle_packet( udp_request: RawRequest, - core_config: &Arc, - announce_handler: &Arc, - scrape_handler: &Arc, - whitelist_authorization: &Arc, - opt_stats_event_sender: &Arc>>, + udp_tracker_container: Arc, local_addr: SocketAddr, cookie_time_values: CookieTimeValues, - ban_service: Arc>, ) -> Response { let request_id = Uuid::new_v4(); @@ -82,11 +75,7 @@ pub(crate) async fn handle_packet( Ok(request) => match handle_request( request, udp_request.from, - core_config, - announce_handler, - scrape_handler, - whitelist_authorization, - opt_stats_event_sender, + udp_tracker_container.clone(), cookie_time_values.clone(), ) .await @@ -98,7 +87,7 @@ pub(crate) async fn handle_packet( | Error::CookieValueExpired { .. } | Error::CookieValueFromFuture { .. } => { // code-review: should we include `RequestParseError` and `BadRequest`? - let mut ban_service = ban_service.write().await; + let mut ban_service = udp_tracker_container.ban_service.write().await; ban_service.increase_counter(&udp_request.from.ip()); } _ => {} @@ -108,7 +97,7 @@ pub(crate) async fn handle_packet( udp_request.from, local_addr, request_id, - opt_stats_event_sender, + &udp_tracker_container.stats_event_sender, cookie_time_values.valid_range.clone(), &e, Some(transaction_id), @@ -121,7 +110,7 @@ pub(crate) async fn handle_packet( udp_request.from, local_addr, request_id, - opt_stats_event_sender, + &udp_tracker_container.stats_event_sender, cookie_time_values.valid_range.clone(), &e, None, @@ -141,24 +130,11 @@ pub(crate) async fn handle_packet( /// # Errors /// /// If a error happens in the `handle_request` function, it will just return the `ServerError`. -#[allow(clippy::too_many_arguments)] -#[instrument(skip( - request, - remote_addr, - announce_handler, - scrape_handler, - whitelist_authorization, - opt_stats_event_sender, - cookie_time_values -))] +#[instrument(skip(request, remote_addr, udp_tracker_container, cookie_time_values))] pub async fn handle_request( request: Request, remote_addr: SocketAddr, - core_config: &Arc, - announce_handler: &Arc, - scrape_handler: &Arc, - whitelist_authorization: &Arc, - opt_stats_event_sender: &Arc>>, + udp_tracker_container: Arc, cookie_time_values: CookieTimeValues, ) -> Result { tracing::trace!("handle request"); @@ -167,7 +143,7 @@ pub async fn handle_request( Request::Connect(connect_request) => Ok(handle_connect( remote_addr, &connect_request, - opt_stats_event_sender, + &udp_tracker_container.stats_event_sender, cookie_time_values.issue_time, ) .await), @@ -175,10 +151,10 @@ pub async fn handle_request( handle_announce( remote_addr, &announce_request, - core_config, - announce_handler, - whitelist_authorization, - opt_stats_event_sender, + &udp_tracker_container.core_config, + &udp_tracker_container.announce_handler, + &udp_tracker_container.whitelist_authorization, + &udp_tracker_container.stats_event_sender, cookie_time_values.valid_range, ) .await @@ -187,8 +163,8 @@ pub async fn handle_request( handle_scrape( remote_addr, &scrape_request, - scrape_handler, - opt_stats_event_sender, + &udp_tracker_container.scrape_handler, + &udp_tracker_container.stats_event_sender, cookie_time_values.valid_range, ) .await diff --git a/src/servers/udp/server/launcher.rs b/src/servers/udp/server/launcher.rs index 4aaf87ae2..e4edadd8f 100644 --- a/src/servers/udp/server/launcher.rs +++ b/src/servers/udp/server/launcher.rs @@ -6,18 +6,14 @@ use bittorrent_tracker_client::udp::client::check; use derive_more::Constructor; use futures_util::StreamExt; use tokio::select; -use tokio::sync::{oneshot, RwLock}; +use tokio::sync::oneshot; use tokio::time::interval; -use torrust_tracker_configuration::Core; use tracing::instrument; -use super::banning::BanService; use super::request_buffer::ActiveRequests; use crate::bootstrap::jobs::Started; -use crate::core::announce_handler::AnnounceHandler; -use crate::core::scrape_handler::ScrapeHandler; -use crate::core::statistics::event::sender::Sender; -use crate::core::{statistics, whitelist}; +use crate::container::UdpTrackerContainer; +use crate::core::statistics; use crate::servers::logging::STARTED_ON; use crate::servers::registar::ServiceHealthCheckJob; use crate::servers::signals::{shutdown_signal_with_message, Halted}; @@ -43,24 +39,9 @@ impl Launcher { /// It panics if unable to bind to udp socket, and get the address from the udp socket. /// It panics if unable to send address of socket. /// It panics if the udp server is loaded when the tracker is private. - #[allow(clippy::too_many_arguments)] - #[instrument(skip( - announce_handler, - scrape_handler, - whitelist_authorization, - opt_stats_event_sender, - ban_service, - bind_to, - tx_start, - rx_halt - ))] + #[instrument(skip(udp_tracker_container, bind_to, tx_start, rx_halt))] pub async fn run_with_graceful_shutdown( - core_config: Arc, - announce_handler: Arc, - scrape_handler: Arc, - whitelist_authorization: Arc, - opt_stats_event_sender: Arc>>, - ban_service: Arc>, + udp_tracker_container: Arc, bind_to: SocketAddr, cookie_lifetime: Duration, tx_start: oneshot::Sender, @@ -68,7 +49,7 @@ impl Launcher { ) { tracing::info!(target: UDP_TRACKER_LOG_TARGET, "Starting on: {bind_to}"); - if core_config.private { + if udp_tracker_container.core_config.private { tracing::error!("udp services cannot be used for private trackers"); panic!("it should not use udp if using authentication"); } @@ -98,17 +79,7 @@ impl Launcher { let local_addr = local_udp_url.clone(); tokio::task::spawn(async move { tracing::debug!(target: UDP_TRACKER_LOG_TARGET, local_addr, "Udp::run_with_graceful_shutdown::task (listening...)"); - let () = Self::run_udp_server_main( - receiver, - core_config.clone(), - announce_handler.clone(), - scrape_handler.clone(), - whitelist_authorization.clone(), - opt_stats_event_sender.clone(), - ban_service.clone(), - cookie_lifetime, - ) - .await; + let () = Self::run_udp_server_main(receiver, udp_tracker_container, cookie_lifetime).await; }) }; @@ -145,23 +116,10 @@ impl Launcher { ServiceHealthCheckJob::new(binding, info, job) } - #[allow(clippy::too_many_arguments)] - #[instrument(skip( - receiver, - announce_handler, - scrape_handler, - whitelist_authorization, - opt_stats_event_sender, - ban_service - ))] + #[instrument(skip(receiver, udp_tracker_container))] async fn run_udp_server_main( mut receiver: Receiver, - core_config: Arc, - announce_handler: Arc, - scrape_handler: Arc, - whitelist_authorization: Arc, - opt_stats_event_sender: Arc>>, - ban_service: Arc>, + udp_tracker_container: Arc, cookie_lifetime: Duration, ) { let active_requests = &mut ActiveRequests::default(); @@ -172,7 +130,7 @@ impl Launcher { let cookie_lifetime = cookie_lifetime.as_secs_f64(); - let ban_cleaner = ban_service.clone(); + let ban_cleaner = udp_tracker_container.ban_service.clone(); tokio::spawn(async move { let mut cleaner_interval = interval(Duration::from_secs(IP_BANS_RESET_INTERVAL_IN_SECS)); @@ -204,7 +162,7 @@ impl Launcher { } }; - if let Some(stats_event_sender) = opt_stats_event_sender.as_deref() { + if let Some(stats_event_sender) = udp_tracker_container.stats_event_sender.as_deref() { match req.from.ip() { IpAddr::V4(_) => { stats_event_sender.send_event(statistics::event::Event::Udp4Request).await; @@ -215,10 +173,10 @@ impl Launcher { } } - if ban_service.read().await.is_banned(&req.from.ip()) { + if udp_tracker_container.ban_service.read().await.is_banned(&req.from.ip()) { tracing::debug!(target: UDP_TRACKER_LOG_TARGET, local_addr, "Udp::run_udp_server::loop continue: (banned ip)"); - if let Some(stats_event_sender) = opt_stats_event_sender.as_deref() { + if let Some(stats_event_sender) = udp_tracker_container.stats_event_sender.as_deref() { stats_event_sender .send_event(statistics::event::Event::UdpRequestBanned) .await; @@ -227,15 +185,7 @@ impl Launcher { continue; } - let processor = Processor::new( - receiver.socket.clone(), - core_config.clone(), - announce_handler.clone(), - scrape_handler.clone(), - whitelist_authorization.clone(), - opt_stats_event_sender.clone(), - cookie_lifetime, - ); + let processor = Processor::new(receiver.socket.clone(), udp_tracker_container.clone(), cookie_lifetime); /* We spawn the new task even if the active requests buffer is full. This could seem counterintuitive because we are accepting @@ -248,8 +198,7 @@ impl Launcher { only adding and removing tasks without given them the chance to finish. However, the buffer is yielding before aborting one tasks, giving it the chance to finish. */ - let abort_handle: tokio::task::AbortHandle = - tokio::task::spawn(processor.process_request(req, ban_service.clone())).abort_handle(); + let abort_handle: tokio::task::AbortHandle = tokio::task::spawn(processor.process_request(req)).abort_handle(); if abort_handle.is_finished() { continue; @@ -260,7 +209,7 @@ impl Launcher { if old_request_aborted { // Evicted task from active requests buffer was aborted. - if let Some(stats_event_sender) = opt_stats_event_sender.as_deref() { + if let Some(stats_event_sender) = udp_tracker_container.stats_event_sender.as_deref() { stats_event_sender .send_event(statistics::event::Event::UdpRequestAborted) .await; diff --git a/src/servers/udp/server/mod.rs b/src/servers/udp/server/mod.rs index c87728361..941f6b5cb 100644 --- a/src/servers/udp/server/mod.rs +++ b/src/servers/udp/server/mod.rs @@ -63,6 +63,7 @@ mod tests { use super::spawner::Spawner; use super::Server; use crate::bootstrap::app::{initialize_app_container, initialize_global_services}; + use crate::container::UdpTrackerContainer; use crate::servers::registar::Registar; #[tokio::test] @@ -71,7 +72,7 @@ mod tests { initialize_global_services(&cfg); - let app_container = initialize_app_container(&cfg); + let app_container = Arc::new(initialize_app_container(&cfg)); let udp_trackers = cfg.udp_trackers.clone().expect("missing UDP trackers configuration"); let config = &udp_trackers[0]; @@ -80,17 +81,11 @@ mod tests { let stopped = Server::new(Spawner::new(bind_to)); + let udp_tracker_config = Arc::new(config.clone()); + let udp_tracker_container = Arc::new(UdpTrackerContainer::from_app_container(&udp_tracker_config, &app_container)); + let started = stopped - .start( - Arc::new(cfg.core.clone()), - app_container.announce_handler, - app_container.scrape_handler, - app_container.whitelist_authorization, - app_container.stats_event_sender, - app_container.ban_service, - register.give_form(), - config.cookie_lifetime, - ) + .start(udp_tracker_container, register.give_form(), config.cookie_lifetime) .await .expect("it should start the server"); @@ -107,25 +102,19 @@ mod tests { initialize_global_services(&cfg); - let app_container = initialize_app_container(&cfg); + let app_container = Arc::new(initialize_app_container(&cfg)); - let config = &cfg.udp_trackers.as_ref().unwrap().first().unwrap(); + let config = cfg.udp_trackers.as_ref().unwrap().first().unwrap(); let bind_to = config.bind_address; let register = &Registar::default(); let stopped = Server::new(Spawner::new(bind_to)); + let udp_tracker_config = Arc::new(config.clone()); + let udp_tracker_container = Arc::new(UdpTrackerContainer::from_app_container(&udp_tracker_config, &app_container)); + let started = stopped - .start( - Arc::new(cfg.core.clone()), - app_container.announce_handler, - app_container.scrape_handler, - app_container.whitelist_authorization, - app_container.stats_event_sender, - app_container.ban_service, - register.give_form(), - config.cookie_lifetime, - ) + .start(udp_tracker_container, register.give_form(), config.cookie_lifetime) .await .expect("it should start the server"); diff --git a/src/servers/udp/server/processor.rs b/src/servers/udp/server/processor.rs index 24a34f98d..86a16d2d4 100644 --- a/src/servers/udp/server/processor.rs +++ b/src/servers/udp/server/processor.rs @@ -4,69 +4,43 @@ use std::sync::Arc; use std::time::Duration; use aquatic_udp_protocol::Response; -use tokio::sync::RwLock; use tokio::time::Instant; -use torrust_tracker_configuration::Core; use tracing::{instrument, Level}; -use super::banning::BanService; use super::bound_socket::BoundSocket; -use crate::core::announce_handler::AnnounceHandler; -use crate::core::scrape_handler::ScrapeHandler; -use crate::core::statistics::event::sender::Sender; +use crate::container::UdpTrackerContainer; +use crate::core::statistics; use crate::core::statistics::event::UdpResponseKind; -use crate::core::{statistics, whitelist}; use crate::servers::udp::handlers::CookieTimeValues; use crate::servers::udp::{handlers, RawRequest}; pub struct Processor { socket: Arc, - core_config: Arc, - announce_handler: Arc, - scrape_handler: Arc, - whitelist_authorization: Arc, - opt_stats_event_sender: Arc>>, + udp_tracker_container: Arc, cookie_lifetime: f64, } impl Processor { #[allow(clippy::too_many_arguments)] - pub fn new( - socket: Arc, - core_config: Arc, - announce_handler: Arc, - scrape_handler: Arc, - whitelist_authorization: Arc, - opt_stats_event_sender: Arc>>, - cookie_lifetime: f64, - ) -> Self { + pub fn new(socket: Arc, udp_tracker_container: Arc, cookie_lifetime: f64) -> Self { Self { socket, - core_config, - announce_handler, - scrape_handler, - whitelist_authorization, - opt_stats_event_sender, + udp_tracker_container, cookie_lifetime, } } - #[instrument(skip(self, request, ban_service))] - pub async fn process_request(self, request: RawRequest, ban_service: Arc>) { + #[instrument(skip(self, request))] + pub async fn process_request(self, request: RawRequest) { let from = request.from; let start_time = Instant::now(); let response = handlers::handle_packet( request, - &self.core_config, - &self.announce_handler, - &self.scrape_handler, - &self.whitelist_authorization, - &self.opt_stats_event_sender, + self.udp_tracker_container.clone(), self.socket.address(), CookieTimeValues::new(self.cookie_lifetime), - ban_service, ) .await; @@ -109,7 +83,7 @@ impl Processor { tracing::debug!(%bytes_count, %sent_bytes, "sent {response_type}"); } - if let Some(stats_event_sender) = self.opt_stats_event_sender.as_deref() { + if let Some(stats_event_sender) = self.udp_tracker_container.stats_event_sender.as_deref() { match target.ip() { IpAddr::V4(_) => { stats_event_sender diff --git a/src/servers/udp/server/spawner.rs b/src/servers/udp/server/spawner.rs index d5fd5d58e..88ce5a245 100644 --- a/src/servers/udp/server/spawner.rs +++ b/src/servers/udp/server/spawner.rs @@ -5,17 +5,12 @@ use std::time::Duration; use derive_more::derive::Display; use derive_more::Constructor; -use tokio::sync::{oneshot, RwLock}; +use tokio::sync::oneshot; use tokio::task::JoinHandle; -use torrust_tracker_configuration::Core; -use super::banning::BanService; use super::launcher::Launcher; use crate::bootstrap::jobs::Started; -use crate::core::announce_handler::AnnounceHandler; -use crate::core::scrape_handler::ScrapeHandler; -use crate::core::statistics::event::sender::Sender; -use crate::core::whitelist; +use crate::container::UdpTrackerContainer; use crate::servers::signals::Halted; #[derive(Constructor, Copy, Clone, Debug, Display)] @@ -30,15 +25,10 @@ impl Spawner { /// # Panics /// /// It would panic if unable to resolve the `local_addr` from the supplied ´socket´. - #[allow(clippy::too_many_arguments)] + #[must_use] pub fn spawn_launcher( &self, - core_config: Arc, - announce_handler: Arc, - scrape_handler: Arc, - whitelist_authorization: Arc, - opt_stats_event_sender: Arc>>, - ban_service: Arc>, + udp_tracker_container: Arc, cookie_lifetime: Duration, tx_start: oneshot::Sender, rx_halt: oneshot::Receiver, @@ -46,19 +36,8 @@ impl Spawner { let spawner = Self::new(self.bind_to); tokio::spawn(async move { - Launcher::run_with_graceful_shutdown( - core_config, - announce_handler, - scrape_handler, - whitelist_authorization, - opt_stats_event_sender, - ban_service, - spawner.bind_to, - cookie_lifetime, - tx_start, - rx_halt, - ) - .await; + Launcher::run_with_graceful_shutdown(udp_tracker_container, spawner.bind_to, cookie_lifetime, tx_start, rx_halt) + .await; spawner }) } diff --git a/src/servers/udp/server/states.rs b/src/servers/udp/server/states.rs index 9bcde9003..abce9720a 100644 --- a/src/servers/udp/server/states.rs +++ b/src/servers/udp/server/states.rs @@ -5,19 +5,13 @@ use std::time::Duration; use derive_more::derive::Display; use derive_more::Constructor; -use tokio::sync::RwLock; use tokio::task::JoinHandle; -use torrust_tracker_configuration::Core; use tracing::{instrument, Level}; -use super::banning::BanService; use super::spawner::Spawner; use super::{Server, UdpError}; use crate::bootstrap::jobs::Started; -use crate::core::announce_handler::AnnounceHandler; -use crate::core::scrape_handler::ScrapeHandler; -use crate::core::statistics::event::sender::Sender; -use crate::core::whitelist; +use crate::container::UdpTrackerContainer; use crate::servers::registar::{ServiceRegistration, ServiceRegistrationForm}; use crate::servers::signals::Halted; use crate::servers::udp::server::launcher::Launcher; @@ -67,16 +61,10 @@ impl Server { /// # Panics /// /// It panics if unable to receive the bound socket address from service. - #[allow(clippy::too_many_arguments)] - #[instrument(skip(self, announce_handler, scrape_handler, whitelist_authorization, opt_stats_event_sender, ban_service, form), err, ret(Display, level = Level::INFO))] + #[instrument(skip(self, udp_tracker_container, form), err, ret(Display, level = Level::INFO))] pub async fn start( self, - core_config: Arc, - announce_handler: Arc, - scrape_handler: Arc, - whitelist_authorization: Arc, - opt_stats_event_sender: Arc>>, - ban_service: Arc>, + udp_tracker_container: Arc, form: ServiceRegistrationForm, cookie_lifetime: Duration, ) -> Result, std::io::Error> { @@ -86,17 +74,10 @@ impl Server { assert!(!tx_halt.is_closed(), "Halt channel for UDP tracker should be open"); // May need to wrap in a task to about a tokio bug. - let task = self.state.spawner.spawn_launcher( - core_config, - announce_handler, - scrape_handler, - whitelist_authorization, - opt_stats_event_sender, - ban_service, - cookie_lifetime, - tx_start, - rx_halt, - ); + let task = self + .state + .spawner + .spawn_launcher(udp_tracker_container, cookie_lifetime, tx_start, rx_halt); let local_addr = rx_start.await.expect("it should be able to start the service").address; diff --git a/tests/servers/udp/contract.rs b/tests/servers/udp/contract.rs index 0767d5f07..f6a1feb06 100644 --- a/tests/servers/udp/contract.rs +++ b/tests/servers/udp/contract.rs @@ -229,7 +229,7 @@ mod receiving_an_announce_request { logging::setup(); let env = Started::new(&configuration::ephemeral().into()).await; - let ban_service = env.ban_service.clone(); + let ban_service = env.udp_tracker_container.ban_service.clone(); let client = match UdpTrackerClient::new(env.bind_address(), DEFAULT_TIMEOUT).await { Ok(udp_tracker_client) => udp_tracker_client, diff --git a/tests/servers/udp/environment.rs b/tests/servers/udp/environment.rs index b3a2670e8..af0b04e5c 100644 --- a/tests/servers/udp/environment.rs +++ b/tests/servers/udp/environment.rs @@ -2,18 +2,13 @@ use std::net::SocketAddr; use std::sync::Arc; use bittorrent_primitives::info_hash::InfoHash; -use tokio::sync::RwLock; -use torrust_tracker_configuration::{Configuration, Core, UdpTracker, DEFAULT_TIMEOUT}; +use torrust_tracker_configuration::{Configuration, DEFAULT_TIMEOUT}; use torrust_tracker_lib::bootstrap::app::{initialize_app_container, initialize_global_services}; -use torrust_tracker_lib::core::announce_handler::AnnounceHandler; +use torrust_tracker_lib::container::UdpTrackerContainer; use torrust_tracker_lib::core::databases::Database; -use torrust_tracker_lib::core::scrape_handler::ScrapeHandler; -use torrust_tracker_lib::core::statistics::event::sender::Sender; use torrust_tracker_lib::core::statistics::repository::Repository; use torrust_tracker_lib::core::torrent::repository::in_memory::InMemoryTorrentRepository; -use torrust_tracker_lib::core::whitelist; use torrust_tracker_lib::servers::registar::Registar; -use torrust_tracker_lib::servers::udp::server::banning::BanService; use torrust_tracker_lib::servers::udp::server::spawner::Spawner; use torrust_tracker_lib::servers::udp::server::states::{Running, Stopped}; use torrust_tracker_lib::servers::udp::server::Server; @@ -23,16 +18,12 @@ pub struct Environment where S: std::fmt::Debug + std::fmt::Display, { - pub core_config: Arc, - pub config: Arc, + pub udp_tracker_container: Arc, + pub database: Arc>, pub in_memory_torrent_repository: Arc, - pub announce_handler: Arc, - pub scrape_handler: Arc, - pub whitelist_authorization: Arc, - pub stats_event_sender: Arc>>, pub stats_repository: Arc, - pub ban_service: Arc>, + pub registar: Registar, pub server: Server, } @@ -55,25 +46,31 @@ impl Environment { let app_container = initialize_app_container(configuration); - let udp_tracker = configuration.udp_trackers.clone().expect("missing UDP tracker configuration"); + let udp_tracker_configurations = configuration.udp_trackers.clone().expect("missing UDP tracker configuration"); - let config = Arc::new(udp_tracker[0].clone()); + let udp_tracker_config = Arc::new(udp_tracker_configurations[0].clone()); - let bind_to = config.bind_address; + let bind_to = udp_tracker_config.bind_address; let server = Server::new(Spawner::new(bind_to)); - Self { - core_config: Arc::new(configuration.core.clone()), - config, - database: app_container.database.clone(), - in_memory_torrent_repository: app_container.in_memory_torrent_repository.clone(), + let udp_tracker_container = Arc::new(UdpTrackerContainer { + udp_tracker_config: udp_tracker_config.clone(), + core_config: app_container.core_config.clone(), announce_handler: app_container.announce_handler.clone(), scrape_handler: app_container.scrape_handler.clone(), whitelist_authorization: app_container.whitelist_authorization.clone(), stats_event_sender: app_container.stats_event_sender.clone(), - stats_repository: app_container.stats_repository.clone(), ban_service: app_container.ban_service.clone(), + }); + + Self { + udp_tracker_container, + + database: app_container.database.clone(), + in_memory_torrent_repository: app_container.in_memory_torrent_repository.clone(), + stats_repository: app_container.stats_repository.clone(), + registar: Registar::default(), server, } @@ -81,31 +78,19 @@ impl Environment { #[allow(dead_code)] pub async fn start(self) -> Environment { - let cookie_lifetime = self.config.cookie_lifetime; + let cookie_lifetime = self.udp_tracker_container.udp_tracker_config.cookie_lifetime; + Environment { - core_config: self.core_config.clone(), - config: self.config, + udp_tracker_container: self.udp_tracker_container.clone(), + database: self.database.clone(), in_memory_torrent_repository: self.in_memory_torrent_repository.clone(), - announce_handler: self.announce_handler.clone(), - scrape_handler: self.scrape_handler.clone(), - whitelist_authorization: self.whitelist_authorization.clone(), - stats_event_sender: self.stats_event_sender.clone(), stats_repository: self.stats_repository.clone(), - ban_service: self.ban_service.clone(), + registar: self.registar.clone(), server: self .server - .start( - self.core_config, - self.announce_handler, - self.scrape_handler, - self.whitelist_authorization, - self.stats_event_sender, - self.ban_service, - self.registar.give_form(), - cookie_lifetime, - ) + .start(self.udp_tracker_container, self.registar.give_form(), cookie_lifetime) .await .unwrap(), } @@ -126,16 +111,12 @@ impl Environment { .expect("it should stop the environment within the timeout"); Environment { - core_config: self.core_config, - config: self.config, + udp_tracker_container: self.udp_tracker_container, + database: self.database, in_memory_torrent_repository: self.in_memory_torrent_repository, - announce_handler: self.announce_handler, - scrape_handler: self.scrape_handler, - whitelist_authorization: self.whitelist_authorization, - stats_event_sender: self.stats_event_sender, stats_repository: self.stats_repository, - ban_service: self.ban_service, + registar: Registar::default(), server: stopped.expect("it stop the udp tracker service"), } From a2bf1cd88a34deb8c6e5ea6852d6efd34dab4a0d Mon Sep 17 00:00:00 2001 From: Jose Celano Date: Wed, 29 Jan 2025 10:18:03 +0000 Subject: [PATCH 179/802] refactor: [torrust#1217] extract HttpTrackerContainer --- src/app.rs | 20 +++--- src/bootstrap/jobs/http_tracker.rs | 96 +++++------------------------ src/container.rs | 27 +++++++- src/servers/http/server.rs | 79 +++++------------------- src/servers/http/v1/routes.rs | 65 +++++++------------ src/servers/udp/server/processor.rs | 1 - tests/servers/http/environment.rs | 76 +++++++++-------------- tests/servers/http/v1/contract.rs | 16 +++-- 8 files changed, 127 insertions(+), 253 deletions(-) diff --git a/src/app.rs b/src/app.rs index e0f231611..617d75726 100644 --- a/src/app.rs +++ b/src/app.rs @@ -28,7 +28,7 @@ use torrust_tracker_configuration::Configuration; use tracing::instrument; use crate::bootstrap::jobs::{health_check_api, http_tracker, torrent_cleanup, tracker_apis, udp_tracker}; -use crate::container::{AppContainer, UdpTrackerContainer}; +use crate::container::{AppContainer, HttpTrackerContainer, UdpTrackerContainer}; use crate::servers; use crate::servers::registar::Registar; @@ -80,6 +80,7 @@ pub async fn start(config: &Configuration, app_container: &Arc) -> } else { let udp_tracker_config = Arc::new(udp_tracker_config.clone()); let udp_tracker_container = Arc::new(UdpTrackerContainer::from_app_container(&udp_tracker_config, app_container)); + jobs.push(udp_tracker::start_job(udp_tracker_container, registar.give_form()).await); } } @@ -90,18 +91,11 @@ pub async fn start(config: &Configuration, app_container: &Arc) -> // Start the HTTP blocks if let Some(http_trackers) = &config.http_trackers { for http_tracker_config in http_trackers { - if let Some(job) = http_tracker::start_job( - http_tracker_config, - Arc::new(config.core.clone()), - app_container.announce_handler.clone(), - app_container.scrape_handler.clone(), - app_container.authentication_service.clone(), - app_container.whitelist_authorization.clone(), - app_container.stats_event_sender.clone(), - registar.give_form(), - servers::http::Version::V1, - ) - .await + let http_tracker_config = Arc::new(http_tracker_config.clone()); + let http_tracker_container = Arc::new(HttpTrackerContainer::from_app_container(&http_tracker_config, app_container)); + + if let Some(job) = + http_tracker::start_job(http_tracker_container, registar.give_form(), servers::http::Version::V1).await { jobs.push(job); }; diff --git a/src/bootstrap/jobs/http_tracker.rs b/src/bootstrap/jobs/http_tracker.rs index dc6ed6b60..83cc0ae02 100644 --- a/src/bootstrap/jobs/http_tracker.rs +++ b/src/bootstrap/jobs/http_tracker.rs @@ -15,15 +15,10 @@ use std::sync::Arc; use axum_server::tls_rustls::RustlsConfig; use tokio::task::JoinHandle; -use torrust_tracker_configuration::{Core, HttpTracker}; use tracing::instrument; use super::make_rust_tls; -use crate::core::announce_handler::AnnounceHandler; -use crate::core::authentication::service::AuthenticationService; -use crate::core::scrape_handler::ScrapeHandler; -use crate::core::statistics::event::sender::Sender; -use crate::core::{statistics, whitelist}; +use crate::container::HttpTrackerContainer; use crate::servers::http::server::{HttpServer, Launcher}; use crate::servers::http::Version; use crate::servers::registar::ServiceRegistrationForm; @@ -36,83 +31,33 @@ use crate::servers::registar::ServiceRegistrationForm; /// # Panics /// /// It would panic if the `config::HttpTracker` struct would contain inappropriate values. -#[allow(clippy::too_many_arguments)] -#[instrument(skip( - config, - announce_handler, - scrape_handler, - authentication_service, - whitelist_authorization, - stats_event_sender, - form -))] +#[instrument(skip(http_tracker_container, form))] pub async fn start_job( - config: &HttpTracker, - core_config: Arc, - announce_handler: Arc, - scrape_handler: Arc, - authentication_service: Arc, - whitelist_authorization: Arc, - stats_event_sender: Arc>>, + http_tracker_container: Arc, form: ServiceRegistrationForm, version: Version, ) -> Option> { - let socket = config.bind_address; + let socket = http_tracker_container.http_tracker_config.bind_address; - let tls = make_rust_tls(&config.tsl_config) + let tls = make_rust_tls(&http_tracker_container.http_tracker_config.tsl_config) .await .map(|tls| tls.expect("it should have a valid http tracker tls configuration")); match version { - Version::V1 => Some( - start_v1( - socket, - tls, - core_config.clone(), - announce_handler.clone(), - scrape_handler.clone(), - authentication_service.clone(), - whitelist_authorization.clone(), - stats_event_sender.clone(), - form, - ) - .await, - ), + Version::V1 => Some(start_v1(socket, tls, http_tracker_container, form).await), } } -#[allow(clippy::too_many_arguments)] #[allow(clippy::async_yields_async)] -#[instrument(skip( - socket, - tls, - announce_handler, - scrape_handler, - whitelist_authorization, - stats_event_sender, - form -))] +#[instrument(skip(socket, tls, http_tracker_container, form))] async fn start_v1( socket: SocketAddr, tls: Option, - config: Arc, - announce_handler: Arc, - scrape_handler: Arc, - authentication_service: Arc, - whitelist_authorization: Arc, - stats_event_sender: Arc>>, + http_tracker_container: Arc, form: ServiceRegistrationForm, ) -> JoinHandle<()> { let server = HttpServer::new(Launcher::new(socket, tls)) - .start( - config, - announce_handler, - scrape_handler, - authentication_service, - whitelist_authorization, - stats_event_sender, - form, - ) + .start(http_tracker_container, form) .await .expect("it should be able to start to the http tracker"); @@ -137,6 +82,7 @@ mod tests { use crate::bootstrap::app::{initialize_app_container, initialize_global_services}; use crate::bootstrap::jobs::http_tracker::start_job; + use crate::container::HttpTrackerContainer; use crate::servers::http::Version; use crate::servers::registar::Registar; @@ -144,26 +90,18 @@ mod tests { async fn it_should_start_http_tracker() { let cfg = Arc::new(ephemeral_public()); let http_tracker = cfg.http_trackers.clone().expect("missing HTTP tracker configuration"); - let config = &http_tracker[0]; + let http_tracker_config = Arc::new(http_tracker[0].clone()); initialize_global_services(&cfg); - let app_container = initialize_app_container(&cfg); + let app_container = Arc::new(initialize_app_container(&cfg)); + + let http_tracker_container = Arc::new(HttpTrackerContainer::from_app_container(&http_tracker_config, &app_container)); let version = Version::V1; - start_job( - config, - Arc::new(cfg.core.clone()), - app_container.announce_handler, - app_container.scrape_handler, - app_container.authentication_service, - app_container.whitelist_authorization, - app_container.stats_event_sender, - Registar::default().give_form(), - version, - ) - .await - .expect("it should be able to join to the http tracker start-job"); + start_job(http_tracker_container, Registar::default().give_form(), version) + .await + .expect("it should be able to join to the http tracker start-job"); } } diff --git a/src/container.rs b/src/container.rs index 1d137680e..ad1185d64 100644 --- a/src/container.rs +++ b/src/container.rs @@ -1,7 +1,7 @@ use std::sync::Arc; use tokio::sync::RwLock; -use torrust_tracker_configuration::{Core, UdpTracker}; +use torrust_tracker_configuration::{Core, HttpTracker, UdpTracker}; use crate::core::announce_handler::AnnounceHandler; use crate::core::authentication::handler::KeysHandler; @@ -58,3 +58,28 @@ impl UdpTrackerContainer { } } } + +pub struct HttpTrackerContainer { + pub core_config: Arc, + pub http_tracker_config: Arc, + pub announce_handler: Arc, + pub scrape_handler: Arc, + pub whitelist_authorization: Arc, + pub stats_event_sender: Arc>>, + pub authentication_service: Arc, +} + +impl HttpTrackerContainer { + #[must_use] + pub fn from_app_container(http_tracker_config: &Arc, app_container: &Arc) -> Self { + Self { + http_tracker_config: http_tracker_config.clone(), + core_config: app_container.core_config.clone(), + announce_handler: app_container.announce_handler.clone(), + scrape_handler: app_container.scrape_handler.clone(), + whitelist_authorization: app_container.whitelist_authorization.clone(), + stats_event_sender: app_container.stats_event_sender.clone(), + authentication_service: app_container.authentication_service.clone(), + } + } +} diff --git a/src/servers/http/server.rs b/src/servers/http/server.rs index 2792697b3..2355bedf9 100644 --- a/src/servers/http/server.rs +++ b/src/servers/http/server.rs @@ -7,15 +7,11 @@ use axum_server::Handle; use derive_more::Constructor; use futures::future::BoxFuture; use tokio::sync::oneshot::{Receiver, Sender}; -use torrust_tracker_configuration::Core; use tracing::instrument; use super::v1::routes::router; use crate::bootstrap::jobs::Started; -use crate::core::announce_handler::AnnounceHandler; -use crate::core::authentication::service::AuthenticationService; -use crate::core::scrape_handler::ScrapeHandler; -use crate::core::{statistics, whitelist}; +use crate::container::HttpTrackerContainer; use crate::servers::custom_axum_server::{self, TimeoutAcceptor}; use crate::servers::http::HTTP_TRACKER_LOG_TARGET; use crate::servers::logging::STARTED_ON; @@ -46,25 +42,10 @@ pub struct Launcher { } impl Launcher { - #[allow(clippy::too_many_arguments)] - #[instrument(skip( - self, - announce_handler, - scrape_handler, - authentication_service, - whitelist_authorization, - stats_event_sender, - tx_start, - rx_halt - ))] + #[instrument(skip(self, http_tracker_container, tx_start, rx_halt))] fn start( &self, - config: Arc, - announce_handler: Arc, - scrape_handler: Arc, - authentication_service: Arc, - whitelist_authorization: Arc, - stats_event_sender: Arc>>, + http_tracker_container: Arc, tx_start: Sender, rx_halt: Receiver, ) -> BoxFuture<'static, ()> { @@ -84,15 +65,7 @@ impl Launcher { tracing::info!(target: HTTP_TRACKER_LOG_TARGET, "Starting on: {protocol}://{}", address); - let app = router( - config, - announce_handler, - scrape_handler, - authentication_service, - whitelist_authorization, - stats_event_sender, - address, - ); + let app = router(http_tracker_container, address); let running = Box::pin(async { match tls { @@ -185,15 +158,9 @@ impl HttpServer { /// /// It would panic spawned HTTP server launcher cannot send the bound `SocketAddr` /// back to the main thread. - #[allow(clippy::too_many_arguments)] pub async fn start( self, - core_config: Arc, - announce_handler: Arc, - scrape_handler: Arc, - authentication_service: Arc, - whitelist_authorization: Arc, - stats_event_sender: Arc>>, + http_tracker_container: Arc, form: ServiceRegistrationForm, ) -> Result, Error> { let (tx_start, rx_start) = tokio::sync::oneshot::channel::(); @@ -202,16 +169,7 @@ impl HttpServer { let launcher = self.state.launcher; let task = tokio::spawn(async move { - let server = launcher.start( - core_config, - announce_handler, - scrape_handler, - authentication_service, - whitelist_authorization, - stats_event_sender, - tx_start, - rx_halt, - ); + let server = launcher.start(http_tracker_container, tx_start, rx_halt); server.await; @@ -284,6 +242,7 @@ mod tests { use crate::bootstrap::app::{initialize_app_container, initialize_global_services}; use crate::bootstrap::jobs::make_rust_tls; + use crate::container::HttpTrackerContainer; use crate::servers::http::server::{HttpServer, Launcher}; use crate::servers::registar::Registar; @@ -293,30 +252,24 @@ mod tests { initialize_global_services(&cfg); - let app_container = initialize_app_container(&cfg); + let app_container = Arc::new(initialize_app_container(&cfg)); let http_trackers = cfg.http_trackers.clone().expect("missing HTTP trackers configuration"); - let config = &http_trackers[0]; - - let bind_to = config.bind_address; + let http_tracker_config = &http_trackers[0]; + let bind_to = http_tracker_config.bind_address; - let tls = make_rust_tls(&config.tsl_config) + let tls = make_rust_tls(&http_tracker_config.tsl_config) .await .map(|tls| tls.expect("tls config failed")); - let register = &Registar::default(); + let http_tracker_config = Arc::new(http_tracker_config.clone()); + let http_tracker_container = Arc::new(HttpTrackerContainer::from_app_container(&http_tracker_config, &app_container)); + let register = &Registar::default(); let stopped = HttpServer::new(Launcher::new(bind_to, tls)); + let started = stopped - .start( - Arc::new(cfg.core.clone()), - app_container.announce_handler, - app_container.scrape_handler, - app_container.authentication_service, - app_container.whitelist_authorization, - app_container.stats_event_sender, - register.give_form(), - ) + .start(http_tracker_container, register.give_form()) .await .expect("it should start the server"); let stopped = started.stop().await.expect("it should stop the server"); diff --git a/src/servers/http/v1/routes.rs b/src/servers/http/v1/routes.rs index f80760955..ed9aa05e6 100644 --- a/src/servers/http/v1/routes.rs +++ b/src/servers/http/v1/routes.rs @@ -10,7 +10,7 @@ use axum::routing::get; use axum::{BoxError, Router}; use axum_client_ip::SecureClientIpSource; use hyper::{Request, StatusCode}; -use torrust_tracker_configuration::{Core, DEFAULT_TIMEOUT}; +use torrust_tracker_configuration::DEFAULT_TIMEOUT; use tower::timeout::TimeoutLayer; use tower::ServiceBuilder; use tower_http::classify::ServerErrorsFailureClass; @@ -22,11 +22,7 @@ use tower_http::LatencyUnit; use tracing::{instrument, Level, Span}; use super::handlers::{announce, health_check, scrape}; -use crate::core::announce_handler::AnnounceHandler; -use crate::core::authentication::service::AuthenticationService; -use crate::core::scrape_handler::ScrapeHandler; -use crate::core::statistics::event::sender::Sender; -use crate::core::whitelist; +use crate::container::HttpTrackerContainer; use crate::servers::http::HTTP_TRACKER_LOG_TARGET; use crate::servers::logging::Latency; @@ -34,25 +30,8 @@ use crate::servers::logging::Latency; /// /// > **NOTICE**: it's added a layer to get the client IP from the connection /// > info. The tracker could use the connection info to get the client IP. -#[allow(clippy::too_many_arguments)] -#[allow(clippy::needless_pass_by_value)] -#[instrument(skip( - announce_handler, - scrape_handler, - authentication_service, - whitelist_authorization, - stats_event_sender, - server_socket_addr -))] -pub fn router( - core_config: Arc, - announce_handler: Arc, - scrape_handler: Arc, - authentication_service: Arc, - whitelist_authorization: Arc, - stats_event_sender: Arc>>, - server_socket_addr: SocketAddr, -) -> Router { +#[instrument(skip(http_tracker_container, server_socket_addr))] +pub fn router(http_tracker_container: Arc, server_socket_addr: SocketAddr) -> Router { Router::new() // Health check .route("/health_check", get(health_check::handler)) @@ -60,40 +39,40 @@ pub fn router( .route( "/announce", get(announce::handle_without_key).with_state(( - core_config.clone(), - announce_handler.clone(), - authentication_service.clone(), - whitelist_authorization.clone(), - stats_event_sender.clone(), + http_tracker_container.core_config.clone(), + http_tracker_container.announce_handler.clone(), + http_tracker_container.authentication_service.clone(), + http_tracker_container.whitelist_authorization.clone(), + http_tracker_container.stats_event_sender.clone(), )), ) .route( "/announce/{key}", get(announce::handle_with_key).with_state(( - core_config.clone(), - announce_handler.clone(), - authentication_service.clone(), - whitelist_authorization.clone(), - stats_event_sender.clone(), + http_tracker_container.core_config.clone(), + http_tracker_container.announce_handler.clone(), + http_tracker_container.authentication_service.clone(), + http_tracker_container.whitelist_authorization.clone(), + http_tracker_container.stats_event_sender.clone(), )), ) // Scrape request .route( "/scrape", get(scrape::handle_without_key).with_state(( - core_config.clone(), - scrape_handler.clone(), - authentication_service.clone(), - stats_event_sender.clone(), + http_tracker_container.core_config.clone(), + http_tracker_container.scrape_handler.clone(), + http_tracker_container.authentication_service.clone(), + http_tracker_container.stats_event_sender.clone(), )), ) .route( "/scrape/{key}", get(scrape::handle_with_key).with_state(( - core_config.clone(), - scrape_handler.clone(), - authentication_service.clone(), - stats_event_sender.clone(), + http_tracker_container.core_config.clone(), + http_tracker_container.scrape_handler.clone(), + http_tracker_container.authentication_service.clone(), + http_tracker_container.stats_event_sender.clone(), )), ) // Add extension to get the client IP from the connection info diff --git a/src/servers/udp/server/processor.rs b/src/servers/udp/server/processor.rs index 86a16d2d4..e2beb2377 100644 --- a/src/servers/udp/server/processor.rs +++ b/src/servers/udp/server/processor.rs @@ -21,7 +21,6 @@ pub struct Processor { } impl Processor { - #[allow(clippy::too_many_arguments)] pub fn new(socket: Arc, udp_tracker_container: Arc, cookie_lifetime: f64) -> Self { Self { socket, diff --git a/tests/servers/http/environment.rs b/tests/servers/http/environment.rs index 5bf1d1c65..07ff2bc8c 100644 --- a/tests/servers/http/environment.rs +++ b/tests/servers/http/environment.rs @@ -2,36 +2,28 @@ use std::sync::Arc; use bittorrent_primitives::info_hash::InfoHash; use futures::executor::block_on; -use torrust_tracker_configuration::{Configuration, Core, HttpTracker}; +use torrust_tracker_configuration::Configuration; use torrust_tracker_lib::bootstrap::app::{initialize_app_container, initialize_global_services}; use torrust_tracker_lib::bootstrap::jobs::make_rust_tls; -use torrust_tracker_lib::core::announce_handler::AnnounceHandler; +use torrust_tracker_lib::container::HttpTrackerContainer; use torrust_tracker_lib::core::authentication::handler::KeysHandler; -use torrust_tracker_lib::core::authentication::service::AuthenticationService; use torrust_tracker_lib::core::databases::Database; -use torrust_tracker_lib::core::scrape_handler::ScrapeHandler; -use torrust_tracker_lib::core::statistics::event::sender::Sender; use torrust_tracker_lib::core::statistics::repository::Repository; use torrust_tracker_lib::core::torrent::repository::in_memory::InMemoryTorrentRepository; -use torrust_tracker_lib::core::whitelist; use torrust_tracker_lib::core::whitelist::manager::WhitelistManager; use torrust_tracker_lib::servers::http::server::{HttpServer, Launcher, Running, Stopped}; use torrust_tracker_lib::servers::registar::Registar; use torrust_tracker_primitives::peer; pub struct Environment { - pub core_config: Arc, - pub http_tracker_config: Arc, + pub http_tracker_container: Arc, + pub database: Arc>, - pub announce_handler: Arc, - pub scrape_handler: Arc, pub in_memory_torrent_repository: Arc, pub keys_handler: Arc, - pub authentication_service: Arc, - pub stats_event_sender: Arc>>, pub stats_repository: Arc, - pub whitelist_authorization: Arc, pub whitelist_manager: Arc, + pub registar: Registar, pub server: HttpServer, } @@ -54,28 +46,33 @@ impl Environment { .http_trackers .clone() .expect("missing HTTP tracker configuration"); + let http_tracker_config = Arc::new(http_tracker[0].clone()); - let config = Arc::new(http_tracker[0].clone()); - - let bind_to = config.bind_address; + let bind_to = http_tracker_config.bind_address; - let tls = block_on(make_rust_tls(&config.tsl_config)).map(|tls| tls.expect("tls config failed")); + let tls = block_on(make_rust_tls(&http_tracker_config.tsl_config)).map(|tls| tls.expect("tls config failed")); let server = HttpServer::new(Launcher::new(bind_to, tls)); - Self { - http_tracker_config: config, - core_config: Arc::new(configuration.core.clone()), - database: app_container.database.clone(), + let http_tracker_container = Arc::new(HttpTrackerContainer { + core_config: app_container.core_config.clone(), + http_tracker_config: http_tracker_config.clone(), announce_handler: app_container.announce_handler.clone(), scrape_handler: app_container.scrape_handler.clone(), + whitelist_authorization: app_container.whitelist_authorization.clone(), + stats_event_sender: app_container.stats_event_sender.clone(), + authentication_service: app_container.authentication_service.clone(), + }); + + Self { + http_tracker_container, + + database: app_container.database.clone(), in_memory_torrent_repository: app_container.in_memory_torrent_repository.clone(), keys_handler: app_container.keys_handler.clone(), - authentication_service: app_container.authentication_service.clone(), - stats_event_sender: app_container.stats_event_sender.clone(), stats_repository: app_container.stats_repository.clone(), - whitelist_authorization: app_container.whitelist_authorization.clone(), whitelist_manager: app_container.whitelist_manager.clone(), + registar: Registar::default(), server, } @@ -84,30 +81,18 @@ impl Environment { #[allow(dead_code)] pub async fn start(self) -> Environment { Environment { - http_tracker_config: self.http_tracker_config, - core_config: self.core_config.clone(), + http_tracker_container: self.http_tracker_container.clone(), + database: self.database.clone(), - announce_handler: self.announce_handler.clone(), - scrape_handler: self.scrape_handler.clone(), in_memory_torrent_repository: self.in_memory_torrent_repository.clone(), keys_handler: self.keys_handler.clone(), - authentication_service: self.authentication_service.clone(), - whitelist_authorization: self.whitelist_authorization.clone(), - stats_event_sender: self.stats_event_sender.clone(), stats_repository: self.stats_repository.clone(), whitelist_manager: self.whitelist_manager.clone(), + registar: self.registar.clone(), server: self .server - .start( - self.core_config, - self.announce_handler, - self.scrape_handler, - self.authentication_service, - self.whitelist_authorization, - self.stats_event_sender, - self.registar.give_form(), - ) + .start(self.http_tracker_container, self.registar.give_form()) .await .unwrap(), } @@ -121,20 +106,15 @@ impl Environment { pub async fn stop(self) -> Environment { Environment { - http_tracker_config: self.http_tracker_config, - core_config: self.core_config, + http_tracker_container: self.http_tracker_container, + database: self.database, - announce_handler: self.announce_handler, - scrape_handler: self.scrape_handler, in_memory_torrent_repository: self.in_memory_torrent_repository, keys_handler: self.keys_handler, - authentication_service: self.authentication_service, - whitelist_authorization: self.whitelist_authorization, - stats_event_sender: self.stats_event_sender, stats_repository: self.stats_repository, whitelist_manager: self.whitelist_manager, - registar: Registar::default(), + registar: Registar::default(), server: self.server.stop().await.unwrap(), } } diff --git a/tests/servers/http/v1/contract.rs b/tests/servers/http/v1/contract.rs index 33faf8578..f434467fc 100644 --- a/tests/servers/http/v1/contract.rs +++ b/tests/servers/http/v1/contract.rs @@ -449,7 +449,7 @@ mod for_all_config_modes { ) .await; - let announce_policy = env.core_config.announce_policy; + let announce_policy = env.http_tracker_container.core_config.announce_policy; assert_announce_response( response, @@ -490,7 +490,7 @@ mod for_all_config_modes { ) .await; - let announce_policy = env.core_config.announce_policy; + let announce_policy = env.http_tracker_container.core_config.announce_policy; // It should only contain the previously announced peer assert_announce_response( @@ -543,7 +543,7 @@ mod for_all_config_modes { ) .await; - let announce_policy = env.core_config.announce_policy; + let announce_policy = env.http_tracker_container.core_config.announce_policy; // The newly announced peer is not included on the response peer list, // but all the previously announced peers should be included regardless the IP version they are using. @@ -872,7 +872,10 @@ mod for_all_config_modes { let peers = env.in_memory_torrent_repository.get_torrent_peers(&info_hash); let peer_addr = peers[0].peer_addr; - assert_eq!(peer_addr.ip(), env.core_config.net.external_ip.unwrap()); + assert_eq!( + peer_addr.ip(), + env.http_tracker_container.core_config.net.external_ip.unwrap() + ); assert_ne!(peer_addr.ip(), IpAddr::from_str("2.2.2.2").unwrap()); env.stop().await; @@ -914,7 +917,10 @@ mod for_all_config_modes { let peers = env.in_memory_torrent_repository.get_torrent_peers(&info_hash); let peer_addr = peers[0].peer_addr; - assert_eq!(peer_addr.ip(), env.core_config.net.external_ip.unwrap()); + assert_eq!( + peer_addr.ip(), + env.http_tracker_container.core_config.net.external_ip.unwrap() + ); assert_ne!(peer_addr.ip(), IpAddr::from_str("2.2.2.2").unwrap()); env.stop().await; From 66b2b5601182aa4ac8eafa767679de919bc2e665 Mon Sep 17 00:00:00 2001 From: Jose Celano Date: Wed, 29 Jan 2025 11:00:07 +0000 Subject: [PATCH 180/802] refactor: [torrust#1217] extract HttpApiContainer --- src/app.rs | 19 +--- src/bootstrap/jobs/tracker_apis.rs | 105 ++++-------------- src/container.rs | 29 ++++- src/servers/apis/routes.rs | 37 +----- src/servers/apis/server.rs | 87 +++------------ .../apis/v1/context/auth_key/routes.rs | 7 +- src/servers/apis/v1/context/stats/routes.rs | 21 ++-- src/servers/apis/v1/context/torrent/routes.rs | 4 +- src/servers/apis/v1/routes.rs | 35 +----- tests/servers/api/environment.rs | 85 ++++++-------- .../api/v1/contract/context/auth_key.rs | 13 ++- .../api/v1/contract/context/whitelist.rs | 46 ++++++-- 12 files changed, 174 insertions(+), 314 deletions(-) diff --git a/src/app.rs b/src/app.rs index 617d75726..13bdc904a 100644 --- a/src/app.rs +++ b/src/app.rs @@ -28,7 +28,7 @@ use torrust_tracker_configuration::Configuration; use tracing::instrument; use crate::bootstrap::jobs::{health_check_api, http_tracker, torrent_cleanup, tracker_apis, udp_tracker}; -use crate::container::{AppContainer, HttpTrackerContainer, UdpTrackerContainer}; +use crate::container::{AppContainer, HttpApiContainer, HttpTrackerContainer, UdpTrackerContainer}; use crate::servers; use crate::servers::registar::Registar; @@ -106,19 +106,10 @@ pub async fn start(config: &Configuration, app_container: &Arc) -> // Start HTTP API if let Some(http_api_config) = &config.http_api { - if let Some(job) = tracker_apis::start_job( - http_api_config, - app_container.in_memory_torrent_repository.clone(), - app_container.keys_handler.clone(), - app_container.whitelist_manager.clone(), - app_container.ban_service.clone(), - app_container.stats_event_sender.clone(), - app_container.stats_repository.clone(), - registar.give_form(), - servers::apis::Version::V1, - ) - .await - { + let http_api_config = Arc::new(http_api_config.clone()); + let http_api_container = Arc::new(HttpApiContainer::from_app_container(&http_api_config, app_container)); + + if let Some(job) = tracker_apis::start_job(http_api_container, registar.give_form(), servers::apis::Version::V1).await { jobs.push(job); }; } else { diff --git a/src/bootstrap/jobs/tracker_apis.rs b/src/bootstrap/jobs/tracker_apis.rs index ce6f3912c..cee6cbae2 100644 --- a/src/bootstrap/jobs/tracker_apis.rs +++ b/src/bootstrap/jobs/tracker_apis.rs @@ -24,21 +24,15 @@ use std::net::SocketAddr; use std::sync::Arc; use axum_server::tls_rustls::RustlsConfig; -use tokio::sync::RwLock; use tokio::task::JoinHandle; -use torrust_tracker_configuration::{AccessTokens, HttpApi}; +use torrust_tracker_configuration::AccessTokens; use tracing::instrument; use super::make_rust_tls; -use crate::core::authentication::handler::KeysHandler; -use crate::core::statistics::event::sender::Sender; -use crate::core::statistics::repository::Repository; -use crate::core::torrent::repository::in_memory::InMemoryTorrentRepository; -use crate::core::whitelist::manager::WhitelistManager; +use crate::container::HttpApiContainer; use crate::servers::apis::server::{ApiServer, Launcher}; use crate::servers::apis::Version; use crate::servers::registar::ServiceRegistrationForm; -use crate::servers::udp::server::banning::BanService; /// This is the message that the "launcher" spawned task sends to the main /// application process to notify the API server was successfully started. @@ -60,90 +54,36 @@ pub struct ApiServerJobStarted(); /// It would panic if unable to send the `ApiServerJobStarted` notice. /// /// -#[allow(clippy::too_many_arguments)] -#[instrument(skip( - config, - keys_handler, - whitelist_manager, - ban_service, - stats_event_sender, - stats_repository, - form -))] +#[instrument(skip(http_api_container, form))] pub async fn start_job( - config: &HttpApi, - in_memory_torrent_repository: Arc, - keys_handler: Arc, - whitelist_manager: Arc, - ban_service: Arc>, - stats_event_sender: Arc>>, - stats_repository: Arc, + http_api_container: Arc, form: ServiceRegistrationForm, version: Version, ) -> Option> { - let bind_to = config.bind_address; + let bind_to = http_api_container.http_api_config.bind_address; - let tls = make_rust_tls(&config.tsl_config) + let tls = make_rust_tls(&http_api_container.http_api_config.tsl_config) .await .map(|tls| tls.expect("it should have a valid tracker api tls configuration")); - let access_tokens = Arc::new(config.access_tokens.clone()); + let access_tokens = Arc::new(http_api_container.http_api_config.access_tokens.clone()); match version { - Version::V1 => Some( - start_v1( - bind_to, - tls, - in_memory_torrent_repository.clone(), - keys_handler.clone(), - whitelist_manager.clone(), - ban_service.clone(), - stats_event_sender.clone(), - stats_repository.clone(), - form, - access_tokens, - ) - .await, - ), + Version::V1 => Some(start_v1(bind_to, tls, http_api_container, form, access_tokens).await), } } #[allow(clippy::async_yields_async)] -#[allow(clippy::too_many_arguments)] -#[instrument(skip( - socket, - tls, - keys_handler, - whitelist_manager, - ban_service, - stats_event_sender, - stats_repository, - form, - access_tokens -))] +#[instrument(skip(socket, tls, http_api_container, form, access_tokens))] async fn start_v1( socket: SocketAddr, tls: Option, - in_memory_torrent_repository: Arc, - keys_handler: Arc, - whitelist_manager: Arc, - ban_service: Arc>, - stats_event_sender: Arc>>, - stats_repository: Arc, + http_api_container: Arc, form: ServiceRegistrationForm, access_tokens: Arc, ) -> JoinHandle<()> { let server = ApiServer::new(Launcher::new(socket, tls)) - .start( - in_memory_torrent_repository, - keys_handler, - whitelist_manager, - stats_event_sender, - stats_repository, - ban_service, - form, - access_tokens, - ) + .start(http_api_container, form, access_tokens) .await .expect("it should be able to start to the tracker api"); @@ -161,32 +101,25 @@ mod tests { use crate::bootstrap::app::{initialize_app_container, initialize_global_services}; use crate::bootstrap::jobs::tracker_apis::start_job; + use crate::container::HttpApiContainer; use crate::servers::apis::Version; use crate::servers::registar::Registar; #[tokio::test] async fn it_should_start_http_tracker() { let cfg = Arc::new(ephemeral_public()); - let config = &cfg.http_api.clone().unwrap(); + let http_api_config = Arc::new(cfg.http_api.clone().unwrap()); initialize_global_services(&cfg); - let app_container = initialize_app_container(&cfg); + let app_container = Arc::new(initialize_app_container(&cfg)); + + let http_api_container = Arc::new(HttpApiContainer::from_app_container(&http_api_config, &app_container)); let version = Version::V1; - start_job( - config, - app_container.in_memory_torrent_repository, - app_container.keys_handler, - app_container.whitelist_manager, - app_container.ban_service, - app_container.stats_event_sender, - app_container.stats_repository, - Registar::default().give_form(), - version, - ) - .await - .expect("it should be able to join to the tracker api start-job"); + start_job(http_api_container, Registar::default().give_form(), version) + .await + .expect("it should be able to join to the tracker api start-job"); } } diff --git a/src/container.rs b/src/container.rs index ad1185d64..1a2a029ee 100644 --- a/src/container.rs +++ b/src/container.rs @@ -1,7 +1,7 @@ use std::sync::Arc; use tokio::sync::RwLock; -use torrust_tracker_configuration::{Core, HttpTracker, UdpTracker}; +use torrust_tracker_configuration::{Core, HttpApi, HttpTracker, UdpTracker}; use crate::core::announce_handler::AnnounceHandler; use crate::core::authentication::handler::KeysHandler; @@ -83,3 +83,30 @@ impl HttpTrackerContainer { } } } + +pub struct HttpApiContainer { + pub core_config: Arc, + pub http_api_config: Arc, + pub in_memory_torrent_repository: Arc, + pub keys_handler: Arc, + pub whitelist_manager: Arc, + pub ban_service: Arc>, + pub stats_event_sender: Arc>>, + pub stats_repository: Arc, +} + +impl HttpApiContainer { + #[must_use] + pub fn from_app_container(http_api_config: &Arc, app_container: &Arc) -> Self { + Self { + http_api_config: http_api_config.clone(), + core_config: app_container.core_config.clone(), + in_memory_torrent_repository: app_container.in_memory_torrent_repository.clone(), + keys_handler: app_container.keys_handler.clone(), + whitelist_manager: app_container.whitelist_manager.clone(), + ban_service: app_container.ban_service.clone(), + stats_event_sender: app_container.stats_event_sender.clone(), + stats_repository: app_container.stats_repository.clone(), + } + } +} diff --git a/src/servers/apis/routes.rs b/src/servers/apis/routes.rs index 92ecb067d..137975259 100644 --- a/src/servers/apis/routes.rs +++ b/src/servers/apis/routes.rs @@ -15,7 +15,6 @@ use axum::response::Response; use axum::routing::get; use axum::{middleware, BoxError, Router}; use hyper::{Request, StatusCode}; -use tokio::sync::RwLock; use torrust_tracker_configuration::{AccessTokens, DEFAULT_TIMEOUT}; use tower::timeout::TimeoutLayer; use tower::ServiceBuilder; @@ -30,33 +29,14 @@ use tracing::{instrument, Level, Span}; use super::v1; use super::v1::context::health_check::handlers::health_check_handler; use super::v1::middlewares::auth::State; -use crate::core::authentication::handler::KeysHandler; -use crate::core::statistics::event::sender::Sender; -use crate::core::statistics::repository::Repository; -use crate::core::torrent::repository::in_memory::InMemoryTorrentRepository; -use crate::core::whitelist::manager::WhitelistManager; +use crate::container::HttpApiContainer; use crate::servers::apis::API_LOG_TARGET; use crate::servers::logging::Latency; -use crate::servers::udp::server::banning::BanService; /// Add all API routes to the router. -#[allow(clippy::too_many_arguments)] -#[allow(clippy::needless_pass_by_value)] -#[instrument(skip( - keys_handler, - whitelist_manager, - ban_service, - stats_event_sender, - stats_repository, - access_tokens -))] +#[instrument(skip(http_api_container, access_tokens))] pub fn router( - in_memory_torrent_repository: Arc, - keys_handler: Arc, - whitelist_manager: Arc, - ban_service: Arc>, - stats_event_sender: Arc>>, - stats_repository: Arc, + http_api_container: Arc, access_tokens: Arc, server_socket_addr: SocketAddr, ) -> Router { @@ -64,16 +44,7 @@ pub fn router( let api_url_prefix = "/api"; - let router = v1::routes::add( - api_url_prefix, - router, - &in_memory_torrent_repository.clone(), - &keys_handler.clone(), - &whitelist_manager.clone(), - ban_service.clone(), - stats_event_sender.clone(), - stats_repository.clone(), - ); + let router = v1::routes::add(api_url_prefix, router, &http_api_container); let state = State { access_tokens }; diff --git a/src/servers/apis/server.rs b/src/servers/apis/server.rs index b3621de0e..7388a1851 100644 --- a/src/servers/apis/server.rs +++ b/src/servers/apis/server.rs @@ -33,23 +33,17 @@ use derive_more::Constructor; use futures::future::BoxFuture; use thiserror::Error; use tokio::sync::oneshot::{Receiver, Sender}; -use tokio::sync::RwLock; use torrust_tracker_configuration::AccessTokens; use tracing::{instrument, Level}; use super::routes::router; use crate::bootstrap::jobs::Started; -use crate::core::authentication::handler::KeysHandler; -use crate::core::statistics; -use crate::core::statistics::repository::Repository; -use crate::core::torrent::repository::in_memory::InMemoryTorrentRepository; -use crate::core::whitelist::manager::WhitelistManager; +use crate::container::HttpApiContainer; use crate::servers::apis::API_LOG_TARGET; use crate::servers::custom_axum_server::{self, TimeoutAcceptor}; use crate::servers::logging::STARTED_ON; use crate::servers::registar::{ServiceHealthCheckJob, ServiceRegistration, ServiceRegistrationForm}; use crate::servers::signals::{graceful_shutdown, Halted}; -use crate::servers::udp::server::banning::BanService; /// Errors that can occur when starting or stopping the API server. #[derive(Debug, Error)] @@ -128,16 +122,10 @@ impl ApiServer { /// # Panics /// /// It would panic if the bound socket address cannot be sent back to this starter. - #[allow(clippy::too_many_arguments)] - #[instrument(skip(self, in_memory_torrent_repository, keys_handler, whitelist_manager, stats_event_sender, ban_service, stats_repository, form, access_tokens), err, ret(Display, level = Level::INFO))] + #[instrument(skip(self, http_api_container, form, access_tokens), err, ret(Display, level = Level::INFO))] pub async fn start( self, - in_memory_torrent_repository: Arc, - keys_handler: Arc, - whitelist_manager: Arc, - stats_event_sender: Arc>>, - stats_repository: Arc, - ban_service: Arc>, + http_api_container: Arc, form: ServiceRegistrationForm, access_tokens: Arc, ) -> Result, Error> { @@ -149,19 +137,7 @@ impl ApiServer { let task = tokio::spawn(async move { tracing::debug!(target: API_LOG_TARGET, "Starting with launcher in spawned task ..."); - let _task = launcher - .start( - in_memory_torrent_repository, - keys_handler, - whitelist_manager, - ban_service, - stats_event_sender, - stats_repository, - access_tokens, - tx_start, - rx_halt, - ) - .await; + let _task = launcher.start(http_api_container, access_tokens, tx_start, rx_halt).await; tracing::debug!(target: API_LOG_TARGET, "Started with launcher in spawned task"); @@ -259,26 +235,10 @@ impl Launcher { /// /// Will panic if unable to bind to the socket, or unable to get the address of the bound socket. /// Will also panic if unable to send message regarding the bound socket address. - #[allow(clippy::too_many_arguments)] - #[instrument(skip( - self, - keys_handler, - whitelist_manager, - ban_service, - stats_event_sender, - stats_repository, - access_tokens, - tx_start, - rx_halt - ))] + #[instrument(skip(self, http_api_container, access_tokens, tx_start, rx_halt))] pub fn start( &self, - in_memory_torrent_repository: Arc, - keys_handler: Arc, - whitelist_manager: Arc, - ban_service: Arc>, - stats_event_sender: Arc>>, - stats_repository: Arc, + http_api_container: Arc, access_tokens: Arc, tx_start: Sender, rx_halt: Receiver, @@ -286,16 +246,7 @@ impl Launcher { let socket = std::net::TcpListener::bind(self.bind_to).expect("Could not bind tcp_listener to address."); let address = socket.local_addr().expect("Could not get local_addr from tcp_listener."); - let router = router( - in_memory_torrent_repository, - keys_handler, - whitelist_manager, - ban_service, - stats_event_sender, - stats_repository, - access_tokens, - address, - ); + let router = router(http_api_container, access_tokens, address); let handle = Handle::new(); @@ -347,41 +298,35 @@ mod tests { use crate::bootstrap::app::{initialize_app_container, initialize_global_services}; use crate::bootstrap::jobs::make_rust_tls; + use crate::container::HttpApiContainer; use crate::servers::apis::server::{ApiServer, Launcher}; use crate::servers::registar::Registar; #[tokio::test] async fn it_should_be_able_to_start_and_stop() { let cfg = Arc::new(ephemeral_public()); - let config = &cfg.http_api.clone().unwrap(); + let http_api_config = Arc::new(cfg.http_api.clone().unwrap()); initialize_global_services(&cfg); - let app_container = initialize_app_container(&cfg); + let app_container = Arc::new(initialize_app_container(&cfg)); - let bind_to = config.bind_address; + let bind_to = http_api_config.bind_address; - let tls = make_rust_tls(&config.tsl_config) + let tls = make_rust_tls(&http_api_config.tsl_config) .await .map(|tls| tls.expect("tls config failed")); - let access_tokens = Arc::new(config.access_tokens.clone()); + let access_tokens = Arc::new(http_api_config.access_tokens.clone()); let stopped = ApiServer::new(Launcher::new(bind_to, tls)); let register = &Registar::default(); + let http_api_container = Arc::new(HttpApiContainer::from_app_container(&http_api_config, &app_container)); + let started = stopped - .start( - app_container.in_memory_torrent_repository, - app_container.keys_handler, - app_container.whitelist_manager, - app_container.stats_event_sender, - app_container.stats_repository, - app_container.ban_service, - register.give_form(), - access_tokens, - ) + .start(http_api_container, register.give_form(), access_tokens) .await .expect("it should start the server"); let stopped = started.stop().await.expect("it should stop the server"); diff --git a/src/servers/apis/v1/context/auth_key/routes.rs b/src/servers/apis/v1/context/auth_key/routes.rs index 45aeb02ec..ee9f3252c 100644 --- a/src/servers/apis/v1/context/auth_key/routes.rs +++ b/src/servers/apis/v1/context/auth_key/routes.rs @@ -15,7 +15,7 @@ use super::handlers::{add_auth_key_handler, delete_auth_key_handler, generate_au use crate::core::authentication::handler::KeysHandler; /// It adds the routes to the router for the [`auth_key`](crate::servers::apis::v1::context::auth_key) API context. -pub fn add(prefix: &str, router: Router, keys_handler: Arc) -> Router { +pub fn add(prefix: &str, router: Router, keys_handler: &Arc) -> Router { // Keys router .route( @@ -38,5 +38,8 @@ pub fn add(prefix: &str, router: Router, keys_handler: Arc) -> Rout &format!("{prefix}/keys/reload"), get(reload_keys_handler).with_state(keys_handler.clone()), ) - .route(&format!("{prefix}/keys"), post(add_auth_key_handler).with_state(keys_handler)) + .route( + &format!("{prefix}/keys"), + post(add_auth_key_handler).with_state(keys_handler.clone()), + ) } diff --git a/src/servers/apis/v1/context/stats/routes.rs b/src/servers/apis/v1/context/stats/routes.rs index 083c72b10..4c80f110d 100644 --- a/src/servers/apis/v1/context/stats/routes.rs +++ b/src/servers/apis/v1/context/stats/routes.rs @@ -7,25 +7,18 @@ use std::sync::Arc; use axum::routing::get; use axum::Router; -use tokio::sync::RwLock; use super::handlers::get_stats_handler; -use crate::core::statistics::event::sender::Sender; -use crate::core::statistics::repository::Repository; -use crate::core::torrent::repository::in_memory::InMemoryTorrentRepository; -use crate::servers::udp::server::banning::BanService; +use crate::container::HttpApiContainer; /// It adds the routes to the router for the [`stats`](crate::servers::apis::v1::context::stats) API context. -pub fn add( - prefix: &str, - router: Router, - in_memory_torrent_repository: Arc, - ban_service: Arc>, - _stats_event_sender: Arc>>, - stats_repository: Arc, -) -> Router { +pub fn add(prefix: &str, router: Router, http_api_container: &Arc) -> Router { router.route( &format!("{prefix}/stats"), - get(get_stats_handler).with_state((in_memory_torrent_repository, ban_service, stats_repository)), + get(get_stats_handler).with_state(( + http_api_container.in_memory_torrent_repository.clone(), + http_api_container.ban_service.clone(), + http_api_container.stats_repository.clone(), + )), ) } diff --git a/src/servers/apis/v1/context/torrent/routes.rs b/src/servers/apis/v1/context/torrent/routes.rs index dc66a1753..3ea8c639c 100644 --- a/src/servers/apis/v1/context/torrent/routes.rs +++ b/src/servers/apis/v1/context/torrent/routes.rs @@ -13,7 +13,7 @@ use super::handlers::{get_torrent_handler, get_torrents_handler}; use crate::core::torrent::repository::in_memory::InMemoryTorrentRepository; /// It adds the routes to the router for the [`torrent`](crate::servers::apis::v1::context::torrent) API context. -pub fn add(prefix: &str, router: Router, in_memory_torrent_repository: Arc) -> Router { +pub fn add(prefix: &str, router: Router, in_memory_torrent_repository: &Arc) -> Router { // Torrents router .route( @@ -22,6 +22,6 @@ pub fn add(prefix: &str, router: Router, in_memory_torrent_repository: Arc, - keys_handler: &Arc, - whitelist_manager: &Arc, - ban_service: Arc>, - stats_event_sender: Arc>>, - stats_repository: Arc, -) -> Router { +pub fn add(prefix: &str, router: Router, http_api_container: &Arc) -> Router { let v1_prefix = format!("{prefix}/v1"); - let router = auth_key::routes::add(&v1_prefix, router, keys_handler.clone()); - let router = stats::routes::add( - &v1_prefix, - router, - in_memory_torrent_repository.clone(), - ban_service, - stats_event_sender, - stats_repository, - ); - let router = whitelist::routes::add(&v1_prefix, router, whitelist_manager); + let router = auth_key::routes::add(&v1_prefix, router, &http_api_container.keys_handler.clone()); + let router = stats::routes::add(&v1_prefix, router, http_api_container); + let router = whitelist::routes::add(&v1_prefix, router, &http_api_container.whitelist_manager); - torrent::routes::add(&v1_prefix, router, in_memory_torrent_repository.clone()) + torrent::routes::add(&v1_prefix, router, &http_api_container.in_memory_torrent_repository.clone()) } diff --git a/tests/servers/api/environment.rs b/tests/servers/api/environment.rs index 66018032e..297e169d4 100644 --- a/tests/servers/api/environment.rs +++ b/tests/servers/api/environment.rs @@ -3,36 +3,26 @@ use std::sync::Arc; use bittorrent_primitives::info_hash::InfoHash; use futures::executor::block_on; -use tokio::sync::RwLock; use torrust_tracker_api_client::connection_info::{ConnectionInfo, Origin}; -use torrust_tracker_configuration::{Configuration, HttpApi}; +use torrust_tracker_configuration::Configuration; use torrust_tracker_lib::bootstrap::app::{initialize_app_container, initialize_global_services}; use torrust_tracker_lib::bootstrap::jobs::make_rust_tls; -use torrust_tracker_lib::core::authentication::handler::KeysHandler; +use torrust_tracker_lib::container::HttpApiContainer; use torrust_tracker_lib::core::authentication::service::AuthenticationService; use torrust_tracker_lib::core::databases::Database; -use torrust_tracker_lib::core::statistics::event::sender::Sender; -use torrust_tracker_lib::core::statistics::repository::Repository; -use torrust_tracker_lib::core::torrent::repository::in_memory::InMemoryTorrentRepository; -use torrust_tracker_lib::core::whitelist::manager::WhitelistManager; use torrust_tracker_lib::servers::apis::server::{ApiServer, Launcher, Running, Stopped}; use torrust_tracker_lib::servers::registar::Registar; -use torrust_tracker_lib::servers::udp::server::banning::BanService; use torrust_tracker_primitives::peer; pub struct Environment where S: std::fmt::Debug + std::fmt::Display, { - pub config: Arc, + pub http_api_container: Arc, + pub database: Arc>, - pub in_memory_torrent_repository: Arc, - pub keys_handler: Arc, pub authentication_service: Arc, - pub stats_event_sender: Arc>>, - pub stats_repository: Arc, - pub whitelist_manager: Arc, - pub ban_service: Arc>, + pub registar: Registar, pub server: ApiServer, } @@ -43,7 +33,10 @@ where { /// Add a torrent to the tracker pub fn add_torrent_peer(&self, info_hash: &InfoHash, peer: &peer::Peer) { - let () = self.in_memory_torrent_repository.upsert_peer(info_hash, peer); + let () = self + .http_api_container + .in_memory_torrent_repository + .upsert_peer(info_hash, peer); } } @@ -53,55 +46,49 @@ impl Environment { let app_container = initialize_app_container(configuration); - let config = Arc::new(configuration.http_api.clone().expect("missing API configuration")); + let http_api_config = Arc::new(configuration.http_api.clone().expect("missing API configuration")); - let bind_to = config.bind_address; + let bind_to = http_api_config.bind_address; - let tls = block_on(make_rust_tls(&config.tsl_config)).map(|tls| tls.expect("tls config failed")); + let tls = block_on(make_rust_tls(&http_api_config.tsl_config)).map(|tls| tls.expect("tls config failed")); let server = ApiServer::new(Launcher::new(bind_to, tls)); - Self { - config, - database: app_container.database.clone(), + let http_api_container = Arc::new(HttpApiContainer { + http_api_config: http_api_config.clone(), + core_config: app_container.core_config.clone(), in_memory_torrent_repository: app_container.in_memory_torrent_repository.clone(), keys_handler: app_container.keys_handler.clone(), - authentication_service: app_container.authentication_service.clone(), - stats_event_sender: app_container.stats_event_sender.clone(), - stats_repository: app_container.stats_repository.clone(), whitelist_manager: app_container.whitelist_manager.clone(), ban_service: app_container.ban_service.clone(), + stats_event_sender: app_container.stats_event_sender.clone(), + stats_repository: app_container.stats_repository.clone(), + }); + + Self { + http_api_container, + + database: app_container.database.clone(), + authentication_service: app_container.authentication_service.clone(), + registar: Registar::default(), server, } } pub async fn start(self) -> Environment { - let access_tokens = Arc::new(self.config.access_tokens.clone()); + let access_tokens = Arc::new(self.http_api_container.http_api_config.access_tokens.clone()); Environment { - config: self.config, + http_api_container: self.http_api_container.clone(), + database: self.database.clone(), - in_memory_torrent_repository: self.in_memory_torrent_repository.clone(), - keys_handler: self.keys_handler.clone(), authentication_service: self.authentication_service.clone(), - stats_event_sender: self.stats_event_sender.clone(), - stats_repository: self.stats_repository.clone(), - whitelist_manager: self.whitelist_manager.clone(), - ban_service: self.ban_service.clone(), + registar: self.registar.clone(), server: self .server - .start( - self.in_memory_torrent_repository, - self.keys_handler, - self.whitelist_manager, - self.stats_event_sender, - self.stats_repository, - self.ban_service, - self.registar.give_form(), - access_tokens, - ) + .start(self.http_api_container, self.registar.give_form(), access_tokens) .await .unwrap(), } @@ -115,15 +102,11 @@ impl Environment { pub async fn stop(self) -> Environment { Environment { - config: self.config, + http_api_container: self.http_api_container, + database: self.database, - in_memory_torrent_repository: self.in_memory_torrent_repository, - keys_handler: self.keys_handler, authentication_service: self.authentication_service, - stats_event_sender: self.stats_event_sender, - stats_repository: self.stats_repository, - whitelist_manager: self.whitelist_manager, - ban_service: self.ban_service, + registar: Registar::default(), server: self.server.stop().await.unwrap(), } @@ -134,7 +117,7 @@ impl Environment { ConnectionInfo { origin, - api_token: self.config.access_tokens.get("admin").cloned(), + api_token: self.http_api_container.http_api_config.access_tokens.get("admin").cloned(), } } diff --git a/tests/servers/api/v1/contract/context/auth_key.rs b/tests/servers/api/v1/contract/context/auth_key.rs index 3b7d2d6ba..3242c3ccc 100644 --- a/tests/servers/api/v1/contract/context/auth_key.rs +++ b/tests/servers/api/v1/contract/context/auth_key.rs @@ -158,6 +158,7 @@ async fn should_allow_deleting_an_auth_key() { let seconds_valid = 60; let auth_key = env + .http_api_container .keys_handler .generate_auth_key(Some(Duration::from_secs(seconds_valid))) .await @@ -292,6 +293,7 @@ async fn should_fail_when_the_auth_key_cannot_be_deleted() { let seconds_valid = 60; let auth_key = env + .http_api_container .keys_handler .generate_auth_key(Some(Duration::from_secs(seconds_valid))) .await @@ -325,6 +327,7 @@ async fn should_not_allow_deleting_an_auth_key_for_unauthenticated_users() { // Generate new auth key let auth_key = env + .http_api_container .keys_handler .generate_auth_key(Some(Duration::from_secs(seconds_valid))) .await @@ -345,6 +348,7 @@ async fn should_not_allow_deleting_an_auth_key_for_unauthenticated_users() { // Generate new auth key let auth_key = env + .http_api_container .keys_handler .generate_auth_key(Some(Duration::from_secs(seconds_valid))) .await @@ -373,7 +377,8 @@ async fn should_allow_reloading_keys() { let env = Started::new(&configuration::ephemeral().into()).await; let seconds_valid = 60; - env.keys_handler + env.http_api_container + .keys_handler .generate_auth_key(Some(Duration::from_secs(seconds_valid))) .await .unwrap(); @@ -398,7 +403,8 @@ async fn should_fail_when_keys_cannot_be_reloaded() { let request_id = Uuid::new_v4(); let seconds_valid = 60; - env.keys_handler + env.http_api_container + .keys_handler .generate_auth_key(Some(Duration::from_secs(seconds_valid))) .await .unwrap(); @@ -426,7 +432,8 @@ async fn should_not_allow_reloading_keys_for_unauthenticated_users() { let env = Started::new(&configuration::ephemeral().into()).await; let seconds_valid = 60; - env.keys_handler + env.http_api_container + .keys_handler .generate_auth_key(Some(Duration::from_secs(seconds_valid))) .await .unwrap(); diff --git a/tests/servers/api/v1/contract/context/whitelist.rs b/tests/servers/api/v1/contract/context/whitelist.rs index 78850d3bf..3f8271e40 100644 --- a/tests/servers/api/v1/contract/context/whitelist.rs +++ b/tests/servers/api/v1/contract/context/whitelist.rs @@ -31,7 +31,8 @@ async fn should_allow_whitelisting_a_torrent() { assert_ok(response).await; assert!( - env.whitelist_manager + env.http_api_container + .whitelist_manager .is_info_hash_whitelisted(&InfoHash::from_str(&info_hash).unwrap()) .await ); @@ -167,7 +168,11 @@ async fn should_allow_removing_a_torrent_from_the_whitelist() { let hash = "9e0217d0fa71c87332cd8bf9dbeabcb2c2cf3c4d".to_owned(); let info_hash = InfoHash::from_str(&hash).unwrap(); - env.whitelist_manager.add_torrent_to_whitelist(&info_hash).await.unwrap(); + env.http_api_container + .whitelist_manager + .add_torrent_to_whitelist(&info_hash) + .await + .unwrap(); let request_id = Uuid::new_v4(); @@ -176,7 +181,12 @@ async fn should_allow_removing_a_torrent_from_the_whitelist() { .await; assert_ok(response).await; - assert!(!env.whitelist_manager.is_info_hash_whitelisted(&info_hash).await); + assert!( + !env.http_api_container + .whitelist_manager + .is_info_hash_whitelisted(&info_hash) + .await + ); env.stop().await; } @@ -237,7 +247,11 @@ async fn should_fail_when_the_torrent_cannot_be_removed_from_the_whitelist() { let hash = "9e0217d0fa71c87332cd8bf9dbeabcb2c2cf3c4d".to_owned(); let info_hash = InfoHash::from_str(&hash).unwrap(); - env.whitelist_manager.add_torrent_to_whitelist(&info_hash).await.unwrap(); + env.http_api_container + .whitelist_manager + .add_torrent_to_whitelist(&info_hash) + .await + .unwrap(); force_database_error(&env.database); @@ -266,7 +280,11 @@ async fn should_not_allow_removing_a_torrent_from_the_whitelist_for_unauthentica let hash = "9e0217d0fa71c87332cd8bf9dbeabcb2c2cf3c4d".to_owned(); let info_hash = InfoHash::from_str(&hash).unwrap(); - env.whitelist_manager.add_torrent_to_whitelist(&info_hash).await.unwrap(); + env.http_api_container + .whitelist_manager + .add_torrent_to_whitelist(&info_hash) + .await + .unwrap(); let request_id = Uuid::new_v4(); @@ -281,7 +299,11 @@ async fn should_not_allow_removing_a_torrent_from_the_whitelist_for_unauthentica "Expected logs to contain: ERROR ... API ... request_id={request_id}" ); - env.whitelist_manager.add_torrent_to_whitelist(&info_hash).await.unwrap(); + env.http_api_container + .whitelist_manager + .add_torrent_to_whitelist(&info_hash) + .await + .unwrap(); let request_id = Uuid::new_v4(); @@ -307,7 +329,11 @@ async fn should_allow_reload_the_whitelist_from_the_database() { let hash = "9e0217d0fa71c87332cd8bf9dbeabcb2c2cf3c4d".to_owned(); let info_hash = InfoHash::from_str(&hash).unwrap(); - env.whitelist_manager.add_torrent_to_whitelist(&info_hash).await.unwrap(); + env.http_api_container + .whitelist_manager + .add_torrent_to_whitelist(&info_hash) + .await + .unwrap(); let request_id = Uuid::new_v4(); @@ -338,7 +364,11 @@ async fn should_fail_when_the_whitelist_cannot_be_reloaded_from_the_database() { let hash = "9e0217d0fa71c87332cd8bf9dbeabcb2c2cf3c4d".to_owned(); let info_hash = InfoHash::from_str(&hash).unwrap(); - env.whitelist_manager.add_torrent_to_whitelist(&info_hash).await.unwrap(); + env.http_api_container + .whitelist_manager + .add_torrent_to_whitelist(&info_hash) + .await + .unwrap(); force_database_error(&env.database); From b38e4af4ca09164133a06602577ba3394dbd5b11 Mon Sep 17 00:00:00 2001 From: Jose Celano Date: Wed, 29 Jan 2025 11:34:57 +0000 Subject: [PATCH 181/802] chore: add DevSkim ignore DS173237 to avoid IDE warnings for infohashe values in tests. --- .../console/clients/checker/checks/http.rs | 4 ++-- .../src/console/clients/checker/checks/udp.rs | 2 +- .../src/http/client/requests/announce.rs | 2 +- .../src/http/client/requests/scrape.rs | 2 +- src/core/mod.rs | 4 ++-- src/core/scrape_handler.rs | 6 ++--- src/core/services/torrent.rs | 12 +++++----- .../v1/context/torrent/resources/torrent.rs | 8 +++---- .../servers/api/v1/contract/context/stats.rs | 2 +- .../api/v1/contract/context/torrent.rs | 24 +++++++++---------- .../api/v1/contract/context/whitelist.rs | 20 ++++++++-------- 11 files changed, 43 insertions(+), 43 deletions(-) diff --git a/console/tracker-client/src/console/clients/checker/checks/http.rs b/console/tracker-client/src/console/clients/checker/checks/http.rs index 0fd37ca48..1a69d9c22 100644 --- a/console/tracker-client/src/console/clients/checker/checks/http.rs +++ b/console/tracker-client/src/console/clients/checker/checks/http.rs @@ -61,7 +61,7 @@ pub async fn run(http_trackers: Vec, timeout: Duration) -> Vec Result { - let info_hash_str = "9c38422213e30bff212b30c360d26f9a02136422".to_string(); // # DevSkim: ignore DS173237 + let info_hash_str = "9c38422213e30bff212b30c360d26f9a02136422".to_string(); // DevSkim: ignore DS173237 let info_hash = InfoHash::from_str(&info_hash_str).expect("a valid info-hash is required"); let client = Client::new(url.clone(), timeout).map_err(|err| Error::HttpClientError { err })?; @@ -86,7 +86,7 @@ async fn check_http_announce(url: &Url, timeout: Duration) -> Result Result { - let info_hashes: Vec = vec!["9c38422213e30bff212b30c360d26f9a02136422".to_string()]; // # DevSkim: ignore DS173237 + let info_hashes: Vec = vec!["9c38422213e30bff212b30c360d26f9a02136422".to_string()]; // DevSkim: ignore DS173237 let query = requests::scrape::Query::try_from(info_hashes).expect("a valid array of info-hashes is required"); let client = Client::new(url.clone(), timeout).map_err(|err| Error::HttpClientError { err })?; diff --git a/console/tracker-client/src/console/clients/checker/checks/udp.rs b/console/tracker-client/src/console/clients/checker/checks/udp.rs index 21bdcd1b7..b4edb2e2c 100644 --- a/console/tracker-client/src/console/clients/checker/checks/udp.rs +++ b/console/tracker-client/src/console/clients/checker/checks/udp.rs @@ -29,7 +29,7 @@ pub async fn run(udp_trackers: Vec, timeout: Duration) -> Vec QueryBuilder { let default_announce_query = Query { - info_hash: InfoHash::from_str("9c38422213e30bff212b30c360d26f9a02136422").unwrap().0, // # DevSkim: ignore DS173237 + info_hash: InfoHash::from_str("9c38422213e30bff212b30c360d26f9a02136422").unwrap().0, // DevSkim: ignore DS173237 peer_addr: IpAddr::V4(Ipv4Addr::new(192, 168, 1, 88)), downloaded: 0, uploaded: 0, diff --git a/packages/tracker-client/src/http/client/requests/scrape.rs b/packages/tracker-client/src/http/client/requests/scrape.rs index 1b423390b..b25c3c4c7 100644 --- a/packages/tracker-client/src/http/client/requests/scrape.rs +++ b/packages/tracker-client/src/http/client/requests/scrape.rs @@ -90,7 +90,7 @@ pub struct QueryBuilder { impl Default for QueryBuilder { fn default() -> Self { let default_scrape_query = Query { - info_hash: [InfoHash::from_str("9c38422213e30bff212b30c360d26f9a02136422").unwrap().0].to_vec(), // # DevSkim: ignore DS173237 + info_hash: [InfoHash::from_str("9c38422213e30bff212b30c360d26f9a02136422").unwrap().0].to_vec(), // DevSkim: ignore DS173237 }; Self { scrape_query: default_scrape_query, diff --git a/src/core/mod.rs b/src/core/mod.rs index 77d8e1450..7d5e7d4d6 100644 --- a/src/core/mod.rs +++ b/src/core/mod.rs @@ -498,7 +498,7 @@ mod tests { async fn it_should_return_the_swarm_metadata_for_the_requested_file_if_the_tracker_has_that_torrent() { let (announce_handler, scrape_handler) = initialize_handlers_for_public_tracker(); - let info_hash = "3b245504cf5f11bbdbe1201cea6a6bf45aee1bc0".parse::().unwrap(); // # DevSkim: ignore DS173237 + let info_hash = "3b245504cf5f11bbdbe1201cea6a6bf45aee1bc0".parse::().unwrap(); // DevSkim: ignore DS173237 // Announce a "complete" peer for the torrent let mut complete_peer = complete_peer(); @@ -553,7 +553,7 @@ mod tests { async fn it_should_return_the_zeroed_swarm_metadata_for_the_requested_file_if_it_is_not_whitelisted() { let (announce_handler, scrape_handler) = initialize_handlers_for_listed_tracker(); - let info_hash = "3b245504cf5f11bbdbe1201cea6a6bf45aee1bc0".parse::().unwrap(); // # DevSkim: ignore DS173237 + let info_hash = "3b245504cf5f11bbdbe1201cea6a6bf45aee1bc0".parse::().unwrap(); // DevSkim: ignore DS173237 let mut peer = incomplete_peer(); announce_handler.announce(&info_hash, &mut peer, &peer_ip(), &PeersWanted::All); diff --git a/src/core/scrape_handler.rs b/src/core/scrape_handler.rs index 7de82aa06..33bb6ca6a 100644 --- a/src/core/scrape_handler.rs +++ b/src/core/scrape_handler.rs @@ -75,7 +75,7 @@ mod tests { async fn it_should_return_a_zeroed_swarm_metadata_for_the_requested_file_if_the_tracker_does_not_have_that_torrent() { let scrape_handler = scrape_handler(); - let info_hashes = vec!["3b245504cf5f11bbdbe1201cea6a6bf45aee1bc0".parse::().unwrap()]; // # DevSkim: ignore DS173237 + let info_hashes = vec!["3b245504cf5f11bbdbe1201cea6a6bf45aee1bc0".parse::().unwrap()]; // DevSkim: ignore DS173237 let scrape_data = scrape_handler.scrape(&info_hashes).await; @@ -91,8 +91,8 @@ mod tests { let scrape_handler = scrape_handler(); let info_hashes = vec![ - "3b245504cf5f11bbdbe1201cea6a6bf45aee1bc0".parse::().unwrap(), // # DevSkim: ignore DS173237 - "99c82bb73505a3c0b453f9fa0e881d6e5a32a0c1".parse::().unwrap(), // # DevSkim: ignore DS173237 + "3b245504cf5f11bbdbe1201cea6a6bf45aee1bc0".parse::().unwrap(), // DevSkim: ignore DS173237 + "99c82bb73505a3c0b453f9fa0e881d6e5a32a0c1".parse::().unwrap(), // DevSkim: ignore DS173237 ]; let scrape_data = scrape_handler.scrape(&info_hashes).await; diff --git a/src/core/services/torrent.rs b/src/core/services/torrent.rs index d809fc266..dac93ce16 100644 --- a/src/core/services/torrent.rs +++ b/src/core/services/torrent.rs @@ -145,7 +145,7 @@ mod tests { let torrent_info = get_torrent_info( in_memory_torrent_repository.clone(), - &InfoHash::from_str("0b3aea4adc213ce32295be85d3883a63bca25446").unwrap(), + &InfoHash::from_str("0b3aea4adc213ce32295be85d3883a63bca25446").unwrap(), // DevSkim: ignore DS173237 ) .await; @@ -156,7 +156,7 @@ mod tests { async fn should_return_the_torrent_info_if_the_tracker_has_the_torrent() { let in_memory_torrent_repository = Arc::new(InMemoryTorrentRepository::default()); - let hash = "9e0217d0fa71c87332cd8bf9dbeabcb2c2cf3c4d".to_owned(); + let hash = "9e0217d0fa71c87332cd8bf9dbeabcb2c2cf3c4d".to_owned(); // DevSkim: ignore DS173237 let info_hash = InfoHash::from_str(&hash).unwrap(); let () = in_memory_torrent_repository.upsert_peer(&info_hash, &sample_peer()); @@ -201,7 +201,7 @@ mod tests { async fn should_return_a_summarized_info_for_all_torrents() { let in_memory_torrent_repository = Arc::new(InMemoryTorrentRepository::default()); - let hash = "9e0217d0fa71c87332cd8bf9dbeabcb2c2cf3c4d".to_owned(); + let hash = "9e0217d0fa71c87332cd8bf9dbeabcb2c2cf3c4d".to_owned(); // DevSkim: ignore DS173237 let info_hash = InfoHash::from_str(&hash).unwrap(); let () = in_memory_torrent_repository.upsert_peer(&info_hash, &sample_peer()); @@ -223,7 +223,7 @@ mod tests { async fn should_allow_limiting_the_number_of_torrents_in_the_result() { let in_memory_torrent_repository = Arc::new(InMemoryTorrentRepository::default()); - let hash1 = "9e0217d0fa71c87332cd8bf9dbeabcb2c2cf3c4d".to_owned(); + let hash1 = "9e0217d0fa71c87332cd8bf9dbeabcb2c2cf3c4d".to_owned(); // DevSkim: ignore DS173237 let info_hash1 = InfoHash::from_str(&hash1).unwrap(); let hash2 = "03840548643af2a7b63a9f5cbca348bc7150ca3a".to_owned(); @@ -244,7 +244,7 @@ mod tests { async fn should_allow_using_pagination_in_the_result() { let in_memory_torrent_repository = Arc::new(InMemoryTorrentRepository::default()); - let hash1 = "9e0217d0fa71c87332cd8bf9dbeabcb2c2cf3c4d".to_owned(); + let hash1 = "9e0217d0fa71c87332cd8bf9dbeabcb2c2cf3c4d".to_owned(); // DevSkim: ignore DS173237 let info_hash1 = InfoHash::from_str(&hash1).unwrap(); let hash2 = "03840548643af2a7b63a9f5cbca348bc7150ca3a".to_owned(); @@ -274,7 +274,7 @@ mod tests { async fn should_return_torrents_ordered_by_info_hash() { let in_memory_torrent_repository = Arc::new(InMemoryTorrentRepository::default()); - let hash1 = "9e0217d0fa71c87332cd8bf9dbeabcb2c2cf3c4d".to_owned(); + let hash1 = "9e0217d0fa71c87332cd8bf9dbeabcb2c2cf3c4d".to_owned(); // DevSkim: ignore DS173237 let info_hash1 = InfoHash::from_str(&hash1).unwrap(); let () = in_memory_torrent_repository.upsert_peer(&info_hash1, &sample_peer()); diff --git a/src/servers/apis/v1/context/torrent/resources/torrent.rs b/src/servers/apis/v1/context/torrent/resources/torrent.rs index 8fbb89418..237470d88 100644 --- a/src/servers/apis/v1/context/torrent/resources/torrent.rs +++ b/src/servers/apis/v1/context/torrent/resources/torrent.rs @@ -122,14 +122,14 @@ mod tests { fn torrent_resource_should_be_converted_from_torrent_info() { assert_eq!( Torrent::from(Info { - info_hash: InfoHash::from_str("9e0217d0fa71c87332cd8bf9dbeabcb2c2cf3c4d").unwrap(), + info_hash: InfoHash::from_str("9e0217d0fa71c87332cd8bf9dbeabcb2c2cf3c4d").unwrap(), // DevSkim: ignore DS173237 seeders: 1, completed: 2, leechers: 3, peers: Some(vec![sample_peer()]), }), Torrent { - info_hash: "9e0217d0fa71c87332cd8bf9dbeabcb2c2cf3c4d".to_string(), + info_hash: "9e0217d0fa71c87332cd8bf9dbeabcb2c2cf3c4d".to_string(), // DevSkim: ignore DS173237 seeders: 1, completed: 2, leechers: 3, @@ -142,13 +142,13 @@ mod tests { fn torrent_resource_list_item_should_be_converted_from_the_basic_torrent_info() { assert_eq!( ListItem::from(BasicInfo { - info_hash: InfoHash::from_str("9e0217d0fa71c87332cd8bf9dbeabcb2c2cf3c4d").unwrap(), + info_hash: InfoHash::from_str("9e0217d0fa71c87332cd8bf9dbeabcb2c2cf3c4d").unwrap(), // DevSkim: ignore DS173237 seeders: 1, completed: 2, leechers: 3, }), ListItem { - info_hash: "9e0217d0fa71c87332cd8bf9dbeabcb2c2cf3c4d".to_string(), + info_hash: "9e0217d0fa71c87332cd8bf9dbeabcb2c2cf3c4d".to_string(), // DevSkim: ignore DS173237 seeders: 1, completed: 2, leechers: 3, diff --git a/tests/servers/api/v1/contract/context/stats.rs b/tests/servers/api/v1/contract/context/stats.rs index 2eda0ed4a..55d3cd869 100644 --- a/tests/servers/api/v1/contract/context/stats.rs +++ b/tests/servers/api/v1/contract/context/stats.rs @@ -19,7 +19,7 @@ async fn should_allow_getting_tracker_statistics() { let env = Started::new(&configuration::ephemeral().into()).await; env.add_torrent_peer( - &InfoHash::from_str("9e0217d0fa71c87332cd8bf9dbeabcb2c2cf3c4d").unwrap(), + &InfoHash::from_str("9e0217d0fa71c87332cd8bf9dbeabcb2c2cf3c4d").unwrap(), // DevSkim: ignore DS173237 &PeerBuilder::default().into(), ); diff --git a/tests/servers/api/v1/contract/context/torrent.rs b/tests/servers/api/v1/contract/context/torrent.rs index 76646db14..8aa408173 100644 --- a/tests/servers/api/v1/contract/context/torrent.rs +++ b/tests/servers/api/v1/contract/context/torrent.rs @@ -26,7 +26,7 @@ async fn should_allow_getting_all_torrents() { let env = Started::new(&configuration::ephemeral().into()).await; - let info_hash = InfoHash::from_str("9e0217d0fa71c87332cd8bf9dbeabcb2c2cf3c4d").unwrap(); + let info_hash = InfoHash::from_str("9e0217d0fa71c87332cd8bf9dbeabcb2c2cf3c4d").unwrap(); // DevSkim: ignore DS173237 env.add_torrent_peer(&info_hash, &PeerBuilder::default().into()); @@ -39,7 +39,7 @@ async fn should_allow_getting_all_torrents() { assert_torrent_list( response, vec![torrent::ListItem { - info_hash: "9e0217d0fa71c87332cd8bf9dbeabcb2c2cf3c4d".to_string(), + info_hash: "9e0217d0fa71c87332cd8bf9dbeabcb2c2cf3c4d".to_string(), // DevSkim: ignore DS173237 seeders: 1, completed: 0, leechers: 0, @@ -57,8 +57,8 @@ async fn should_allow_limiting_the_torrents_in_the_result() { let env = Started::new(&configuration::ephemeral().into()).await; // torrents are ordered alphabetically by infohashes - let info_hash_1 = InfoHash::from_str("9e0217d0fa71c87332cd8bf9dbeabcb2c2cf3c4d").unwrap(); - let info_hash_2 = InfoHash::from_str("0b3aea4adc213ce32295be85d3883a63bca25446").unwrap(); + let info_hash_1 = InfoHash::from_str("9e0217d0fa71c87332cd8bf9dbeabcb2c2cf3c4d").unwrap(); // DevSkim: ignore DS173237 + let info_hash_2 = InfoHash::from_str("0b3aea4adc213ce32295be85d3883a63bca25446").unwrap(); // DevSkim: ignore DS173237 env.add_torrent_peer(&info_hash_1, &PeerBuilder::default().into()); env.add_torrent_peer(&info_hash_2, &PeerBuilder::default().into()); @@ -75,7 +75,7 @@ async fn should_allow_limiting_the_torrents_in_the_result() { assert_torrent_list( response, vec![torrent::ListItem { - info_hash: "0b3aea4adc213ce32295be85d3883a63bca25446".to_string(), + info_hash: "0b3aea4adc213ce32295be85d3883a63bca25446".to_string(), // DevSkim: ignore DS173237 seeders: 1, completed: 0, leechers: 0, @@ -93,8 +93,8 @@ async fn should_allow_the_torrents_result_pagination() { let env = Started::new(&configuration::ephemeral().into()).await; // torrents are ordered alphabetically by infohashes - let info_hash_1 = InfoHash::from_str("9e0217d0fa71c87332cd8bf9dbeabcb2c2cf3c4d").unwrap(); - let info_hash_2 = InfoHash::from_str("0b3aea4adc213ce32295be85d3883a63bca25446").unwrap(); + let info_hash_1 = InfoHash::from_str("9e0217d0fa71c87332cd8bf9dbeabcb2c2cf3c4d").unwrap(); // DevSkim: ignore DS173237 + let info_hash_2 = InfoHash::from_str("0b3aea4adc213ce32295be85d3883a63bca25446").unwrap(); // DevSkim: ignore DS173237 env.add_torrent_peer(&info_hash_1, &PeerBuilder::default().into()); env.add_torrent_peer(&info_hash_2, &PeerBuilder::default().into()); @@ -111,7 +111,7 @@ async fn should_allow_the_torrents_result_pagination() { assert_torrent_list( response, vec![torrent::ListItem { - info_hash: "9e0217d0fa71c87332cd8bf9dbeabcb2c2cf3c4d".to_string(), + info_hash: "9e0217d0fa71c87332cd8bf9dbeabcb2c2cf3c4d".to_string(), // DevSkim: ignore DS173237 seeders: 1, completed: 0, leechers: 0, @@ -296,7 +296,7 @@ async fn should_allow_getting_a_torrent_info() { let env = Started::new(&configuration::ephemeral().into()).await; - let info_hash = InfoHash::from_str("9e0217d0fa71c87332cd8bf9dbeabcb2c2cf3c4d").unwrap(); + let info_hash = InfoHash::from_str("9e0217d0fa71c87332cd8bf9dbeabcb2c2cf3c4d").unwrap(); // DevSkim: ignore DS173237 let peer = PeerBuilder::default().into(); @@ -311,7 +311,7 @@ async fn should_allow_getting_a_torrent_info() { assert_torrent_info( response, Torrent { - info_hash: "9e0217d0fa71c87332cd8bf9dbeabcb2c2cf3c4d".to_string(), + info_hash: "9e0217d0fa71c87332cd8bf9dbeabcb2c2cf3c4d".to_string(), // DevSkim: ignore DS173237 seeders: 1, completed: 0, leechers: 0, @@ -330,7 +330,7 @@ async fn should_fail_while_getting_a_torrent_info_when_the_torrent_does_not_exis let env = Started::new(&configuration::ephemeral().into()).await; let request_id = Uuid::new_v4(); - let info_hash = InfoHash::from_str("9e0217d0fa71c87332cd8bf9dbeabcb2c2cf3c4d").unwrap(); + let info_hash = InfoHash::from_str("9e0217d0fa71c87332cd8bf9dbeabcb2c2cf3c4d").unwrap(); // DevSkim: ignore DS173237 let response = Client::new(env.get_connection_info()) .get_torrent(&info_hash.to_string(), Some(headers_with_request_id(request_id))) @@ -376,7 +376,7 @@ async fn should_not_allow_getting_a_torrent_info_for_unauthenticated_users() { let env = Started::new(&configuration::ephemeral().into()).await; - let info_hash = InfoHash::from_str("9e0217d0fa71c87332cd8bf9dbeabcb2c2cf3c4d").unwrap(); + let info_hash = InfoHash::from_str("9e0217d0fa71c87332cd8bf9dbeabcb2c2cf3c4d").unwrap(); // DevSkim: ignore DS173237 env.add_torrent_peer(&info_hash, &PeerBuilder::default().into()); diff --git a/tests/servers/api/v1/contract/context/whitelist.rs b/tests/servers/api/v1/contract/context/whitelist.rs index 3f8271e40..945cb00b5 100644 --- a/tests/servers/api/v1/contract/context/whitelist.rs +++ b/tests/servers/api/v1/contract/context/whitelist.rs @@ -23,7 +23,7 @@ async fn should_allow_whitelisting_a_torrent() { let env = Started::new(&configuration::ephemeral().into()).await; let request_id = Uuid::new_v4(); - let info_hash = "9e0217d0fa71c87332cd8bf9dbeabcb2c2cf3c4d".to_owned(); + let info_hash = "9e0217d0fa71c87332cd8bf9dbeabcb2c2cf3c4d".to_owned(); // DevSkim: ignore DS173237 let response = Client::new(env.get_connection_info()) .whitelist_a_torrent(&info_hash, Some(headers_with_request_id(request_id))) @@ -46,7 +46,7 @@ async fn should_allow_whitelisting_a_torrent_that_has_been_already_whitelisted() let env = Started::new(&configuration::ephemeral().into()).await; - let info_hash = "9e0217d0fa71c87332cd8bf9dbeabcb2c2cf3c4d".to_owned(); + let info_hash = "9e0217d0fa71c87332cd8bf9dbeabcb2c2cf3c4d".to_owned(); // DevSkim: ignore DS173237 let api_client = Client::new(env.get_connection_info()); @@ -73,7 +73,7 @@ async fn should_not_allow_whitelisting_a_torrent_for_unauthenticated_users() { let env = Started::new(&configuration::ephemeral().into()).await; - let info_hash = "9e0217d0fa71c87332cd8bf9dbeabcb2c2cf3c4d".to_owned(); + let info_hash = "9e0217d0fa71c87332cd8bf9dbeabcb2c2cf3c4d".to_owned(); // DevSkim: ignore DS173237 let request_id = Uuid::new_v4(); @@ -110,7 +110,7 @@ async fn should_fail_when_the_torrent_cannot_be_whitelisted() { let env = Started::new(&configuration::ephemeral().into()).await; - let info_hash = "9e0217d0fa71c87332cd8bf9dbeabcb2c2cf3c4d".to_owned(); + let info_hash = "9e0217d0fa71c87332cd8bf9dbeabcb2c2cf3c4d".to_owned(); // DevSkim: ignore DS173237 force_database_error(&env.database); @@ -165,7 +165,7 @@ async fn should_allow_removing_a_torrent_from_the_whitelist() { let env = Started::new(&configuration::ephemeral().into()).await; - let hash = "9e0217d0fa71c87332cd8bf9dbeabcb2c2cf3c4d".to_owned(); + let hash = "9e0217d0fa71c87332cd8bf9dbeabcb2c2cf3c4d".to_owned(); // DevSkim: ignore DS173237 let info_hash = InfoHash::from_str(&hash).unwrap(); env.http_api_container @@ -197,7 +197,7 @@ async fn should_not_fail_trying_to_remove_a_non_whitelisted_torrent_from_the_whi let env = Started::new(&configuration::ephemeral().into()).await; - let non_whitelisted_torrent_hash = "9e0217d0fa71c87332cd8bf9dbeabcb2c2cf3c4d".to_owned(); + let non_whitelisted_torrent_hash = "9e0217d0fa71c87332cd8bf9dbeabcb2c2cf3c4d".to_owned(); // DevSkim: ignore DS173237 let request_id = Uuid::new_v4(); @@ -245,7 +245,7 @@ async fn should_fail_when_the_torrent_cannot_be_removed_from_the_whitelist() { let env = Started::new(&configuration::ephemeral().into()).await; - let hash = "9e0217d0fa71c87332cd8bf9dbeabcb2c2cf3c4d".to_owned(); + let hash = "9e0217d0fa71c87332cd8bf9dbeabcb2c2cf3c4d".to_owned(); // DevSkim: ignore DS173237 let info_hash = InfoHash::from_str(&hash).unwrap(); env.http_api_container .whitelist_manager @@ -277,7 +277,7 @@ async fn should_not_allow_removing_a_torrent_from_the_whitelist_for_unauthentica let env = Started::new(&configuration::ephemeral().into()).await; - let hash = "9e0217d0fa71c87332cd8bf9dbeabcb2c2cf3c4d".to_owned(); + let hash = "9e0217d0fa71c87332cd8bf9dbeabcb2c2cf3c4d".to_owned(); // DevSkim: ignore DS173237 let info_hash = InfoHash::from_str(&hash).unwrap(); env.http_api_container @@ -327,7 +327,7 @@ async fn should_allow_reload_the_whitelist_from_the_database() { let env = Started::new(&configuration::ephemeral().into()).await; - let hash = "9e0217d0fa71c87332cd8bf9dbeabcb2c2cf3c4d".to_owned(); + let hash = "9e0217d0fa71c87332cd8bf9dbeabcb2c2cf3c4d".to_owned(); // DevSkim: ignore DS173237 let info_hash = InfoHash::from_str(&hash).unwrap(); env.http_api_container .whitelist_manager @@ -362,7 +362,7 @@ async fn should_fail_when_the_whitelist_cannot_be_reloaded_from_the_database() { let env = Started::new(&configuration::ephemeral().into()).await; - let hash = "9e0217d0fa71c87332cd8bf9dbeabcb2c2cf3c4d".to_owned(); + let hash = "9e0217d0fa71c87332cd8bf9dbeabcb2c2cf3c4d".to_owned(); // DevSkim: ignore DS173237 let info_hash = InfoHash::from_str(&hash).unwrap(); env.http_api_container .whitelist_manager From 948cc8c2bcacc83fed4653145fcd769979c68a00 Mon Sep 17 00:00:00 2001 From: Jose Celano Date: Wed, 29 Jan 2025 12:46:10 +0000 Subject: [PATCH 182/802] refactor: [#1221] move core statistics mod to statistics context --- src/bootstrap/app.rs | 3 ++- src/core/services/mod.rs | 7 ------- src/core/statistics/mod.rs | 2 ++ .../statistics/mod.rs => statistics/services.rs} | 8 +++----- src/core/{services => }/statistics/setup.rs | 0 src/servers/apis/v1/context/stats/handlers.rs | 2 +- src/servers/apis/v1/context/stats/resources.rs | 4 ++-- src/servers/apis/v1/context/stats/responses.rs | 2 +- src/servers/http/v1/handlers/announce.rs | 3 ++- src/servers/http/v1/handlers/scrape.rs | 2 +- src/servers/http/v1/services/announce.rs | 3 ++- src/servers/http/v1/services/scrape.rs | 4 ++-- src/servers/udp/handlers.rs | 16 ++++++++-------- 13 files changed, 26 insertions(+), 30 deletions(-) rename src/core/{services/statistics/mod.rs => statistics/services.rs} (95%) rename src/core/{services => }/statistics/setup.rs (100%) diff --git a/src/bootstrap/app.rs b/src/bootstrap/app.rs index 71684a7e3..7661a36ec 100644 --- a/src/bootstrap/app.rs +++ b/src/bootstrap/app.rs @@ -28,7 +28,8 @@ use crate::core::authentication::key::repository::in_memory::InMemoryKeyReposito use crate::core::authentication::key::repository::persisted::DatabaseKeyRepository; use crate::core::authentication::service; use crate::core::scrape_handler::ScrapeHandler; -use crate::core::services::{initialize_database, initialize_whitelist_manager, statistics}; +use crate::core::services::{initialize_database, initialize_whitelist_manager}; +use crate::core::statistics; use crate::core::torrent::manager::TorrentsManager; use crate::core::torrent::repository::in_memory::InMemoryTorrentRepository; use crate::core::torrent::repository::persisted::DatabasePersistentTorrentRepository; diff --git a/src/core/services/mod.rs b/src/core/services/mod.rs index f2ee79993..30a05a992 100644 --- a/src/core/services/mod.rs +++ b/src/core/services/mod.rs @@ -1,10 +1,3 @@ -//! Tracker domain services. Core and statistics services. -//! -//! There are two types of service: -//! -//! - [Core tracker services](crate::core::services::torrent): related to the tracker main functionalities like getting info about torrents. -//! - [Services for statistics](crate::core::services::statistics): related to tracker metrics. Aggregate data about the tracker server. -pub mod statistics; pub mod torrent; use std::sync::Arc; diff --git a/src/core/statistics/mod.rs b/src/core/statistics/mod.rs index 49a82bea9..2ffbc0c8f 100644 --- a/src/core/statistics/mod.rs +++ b/src/core/statistics/mod.rs @@ -28,3 +28,5 @@ pub mod event; pub mod keeper; pub mod metrics; pub mod repository; +pub mod services; +pub mod setup; diff --git a/src/core/services/statistics/mod.rs b/src/core/statistics/services.rs similarity index 95% rename from src/core/services/statistics/mod.rs rename to src/core/statistics/services.rs index 79bc5f268..337731aea 100644 --- a/src/core/services/statistics/mod.rs +++ b/src/core/statistics/services.rs @@ -2,7 +2,7 @@ //! //! It includes: //! -//! - A [`factory`](crate::core::services::statistics::setup::factory) function to build the structs needed to collect the tracker metrics. +//! - A [`factory`](crate::core::statistics::setup::factory) function to build the structs needed to collect the tracker metrics. //! - A [`get_metrics`] service to get the tracker [`metrics`](crate::core::statistics::metrics::Metrics). //! //! Tracker metrics are collected using a Publisher-Subscribe pattern. @@ -36,8 +36,6 @@ //! // ... //! } //! ``` -pub mod setup; - use std::sync::Arc; use tokio::sync::RwLock; @@ -117,9 +115,9 @@ mod tests { use torrust_tracker_primitives::torrent_metrics::TorrentsMetrics; use torrust_tracker_test_helpers::configuration; - use crate::core::services::statistics::{self, get_metrics, TrackerMetrics}; + use crate::core::statistics::services::{get_metrics, TrackerMetrics}; use crate::core::torrent::repository::in_memory::InMemoryTorrentRepository; - use crate::core::{self}; + use crate::core::{self, statistics}; use crate::servers::udp::server::banning::BanService; use crate::servers::udp::server::launcher::MAX_CONNECTION_ID_ERRORS_PER_IP; diff --git a/src/core/services/statistics/setup.rs b/src/core/statistics/setup.rs similarity index 100% rename from src/core/services/statistics/setup.rs rename to src/core/statistics/setup.rs diff --git a/src/servers/apis/v1/context/stats/handlers.rs b/src/servers/apis/v1/context/stats/handlers.rs index da87696fc..b8e7abd87 100644 --- a/src/servers/apis/v1/context/stats/handlers.rs +++ b/src/servers/apis/v1/context/stats/handlers.rs @@ -9,8 +9,8 @@ use serde::Deserialize; use tokio::sync::RwLock; use super::responses::{metrics_response, stats_response}; -use crate::core::services::statistics::get_metrics; use crate::core::statistics::repository::Repository; +use crate::core::statistics::services::get_metrics; use crate::core::torrent::repository::in_memory::InMemoryTorrentRepository; use crate::servers::udp::server::banning::BanService; diff --git a/src/servers/apis/v1/context/stats/resources.rs b/src/servers/apis/v1/context/stats/resources.rs index c6a526a7d..97ece22fc 100644 --- a/src/servers/apis/v1/context/stats/resources.rs +++ b/src/servers/apis/v1/context/stats/resources.rs @@ -2,7 +2,7 @@ //! API context. use serde::{Deserialize, Serialize}; -use crate::core::services::statistics::TrackerMetrics; +use crate::core::statistics::services::TrackerMetrics; /// It contains all the statistics generated by the tracker. #[derive(Serialize, Deserialize, Debug, PartialEq, Eq)] @@ -121,8 +121,8 @@ mod tests { use torrust_tracker_primitives::torrent_metrics::TorrentsMetrics; use super::Stats; - use crate::core::services::statistics::TrackerMetrics; use crate::core::statistics::metrics::Metrics; + use crate::core::statistics::services::TrackerMetrics; #[test] fn stats_resource_should_be_converted_from_tracker_metrics() { diff --git a/src/servers/apis/v1/context/stats/responses.rs b/src/servers/apis/v1/context/stats/responses.rs index a67b5328a..6fda43f8c 100644 --- a/src/servers/apis/v1/context/stats/responses.rs +++ b/src/servers/apis/v1/context/stats/responses.rs @@ -3,7 +3,7 @@ use axum::response::{IntoResponse, Json, Response}; use super::resources::Stats; -use crate::core::services::statistics::TrackerMetrics; +use crate::core::statistics::services::TrackerMetrics; /// `200` response that contains the [`Stats`] resource as json. #[must_use] diff --git a/src/servers/http/v1/handlers/announce.rs b/src/servers/http/v1/handlers/announce.rs index d6c850327..3de17df58 100644 --- a/src/servers/http/v1/handlers/announce.rs +++ b/src/servers/http/v1/handlers/announce.rs @@ -257,7 +257,8 @@ mod tests { use crate::core::authentication::key::repository::in_memory::InMemoryKeyRepository; use crate::core::authentication::service::AuthenticationService; use crate::core::core_tests::sample_info_hash; - use crate::core::services::{initialize_database, statistics}; + use crate::core::services::initialize_database; + use crate::core::statistics; use crate::core::statistics::event::sender::Sender; use crate::core::torrent::repository::in_memory::InMemoryTorrentRepository; use crate::core::torrent::repository::persisted::DatabasePersistentTorrentRepository; diff --git a/src/servers/http/v1/handlers/scrape.rs b/src/servers/http/v1/handlers/scrape.rs index a197263e8..35c5b1409 100644 --- a/src/servers/http/v1/handlers/scrape.rs +++ b/src/servers/http/v1/handlers/scrape.rs @@ -177,7 +177,7 @@ mod tests { use crate::core::authentication::key::repository::in_memory::InMemoryKeyRepository; use crate::core::authentication::service::AuthenticationService; use crate::core::scrape_handler::ScrapeHandler; - use crate::core::services::statistics; + use crate::core::statistics; use crate::core::torrent::repository::in_memory::InMemoryTorrentRepository; use crate::core::whitelist::authorization::WhitelistAuthorization; use crate::core::whitelist::repository::in_memory::InMemoryWhitelist; diff --git a/src/servers/http/v1/services/announce.rs b/src/servers/http/v1/services/announce.rs index e96face6a..6314c0a98 100644 --- a/src/servers/http/v1/services/announce.rs +++ b/src/servers/http/v1/services/announce.rs @@ -65,7 +65,8 @@ mod tests { use torrust_tracker_test_helpers::configuration; use crate::core::announce_handler::AnnounceHandler; - use crate::core::services::{initialize_database, statistics}; + use crate::core::services::initialize_database; + use crate::core::statistics; use crate::core::statistics::event::sender::Sender; use crate::core::torrent::repository::in_memory::InMemoryTorrentRepository; use crate::core::torrent::repository::persisted::DatabasePersistentTorrentRepository; diff --git a/src/servers/http/v1/services/scrape.rs b/src/servers/http/v1/services/scrape.rs index 7e65b9442..16821e724 100644 --- a/src/servers/http/v1/services/scrape.rs +++ b/src/servers/http/v1/services/scrape.rs @@ -153,7 +153,7 @@ mod tests { #[tokio::test] async fn it_should_return_the_scrape_data_for_a_torrent() { - let (stats_event_sender, _stats_repository) = crate::core::services::statistics::setup::factory(false); + let (stats_event_sender, _stats_repository) = crate::core::statistics::setup::factory(false); let stats_event_sender = Arc::new(stats_event_sender); let (announce_handler, scrape_handler) = initialize_announce_and_scrape_handlers_for_public_tracker(); @@ -236,7 +236,7 @@ mod tests { #[tokio::test] async fn it_should_always_return_the_zeroed_scrape_data_for_a_torrent() { - let (stats_event_sender, _stats_repository) = crate::core::services::statistics::setup::factory(false); + let (stats_event_sender, _stats_repository) = crate::core::statistics::setup::factory(false); let stats_event_sender = Arc::new(stats_event_sender); let (announce_handler, _scrape_handler) = initialize_announce_and_scrape_handlers_for_public_tracker(); diff --git a/src/servers/udp/handlers.rs b/src/servers/udp/handlers.rs index 992f27a44..f531718db 100644 --- a/src/servers/udp/handlers.rs +++ b/src/servers/udp/handlers.rs @@ -476,13 +476,13 @@ mod tests { use super::gen_remote_fingerprint; use crate::core::announce_handler::AnnounceHandler; use crate::core::scrape_handler::ScrapeHandler; - use crate::core::services::{initialize_database, statistics}; + use crate::core::services::initialize_database; use crate::core::statistics::event::sender::Sender; use crate::core::torrent::repository::in_memory::InMemoryTorrentRepository; use crate::core::torrent::repository::persisted::DatabasePersistentTorrentRepository; - use crate::core::whitelist; use crate::core::whitelist::authorization::WhitelistAuthorization; use crate::core::whitelist::repository::in_memory::InMemoryWhitelist; + use crate::core::{statistics, whitelist}; use crate::CurrentClock; struct CoreTrackerServices { @@ -656,7 +656,7 @@ mod tests { #[tokio::test] async fn a_connect_response_should_contain_the_same_transaction_id_as_the_connect_request() { - let (stats_event_sender, _stats_repository) = crate::core::services::statistics::setup::factory(false); + let (stats_event_sender, _stats_repository) = crate::core::statistics::setup::factory(false); let stats_event_sender = Arc::new(stats_event_sender); let request = ConnectRequest { @@ -676,7 +676,7 @@ mod tests { #[tokio::test] async fn a_connect_response_should_contain_a_new_connection_id() { - let (stats_event_sender, _stats_repository) = crate::core::services::statistics::setup::factory(false); + let (stats_event_sender, _stats_repository) = crate::core::statistics::setup::factory(false); let stats_event_sender = Arc::new(stats_event_sender); let request = ConnectRequest { @@ -696,7 +696,7 @@ mod tests { #[tokio::test] async fn a_connect_response_should_contain_a_new_connection_id_ipv6() { - let (stats_event_sender, _stats_repository) = crate::core::services::statistics::setup::factory(false); + let (stats_event_sender, _stats_repository) = crate::core::statistics::setup::factory(false); let stats_event_sender = Arc::new(stats_event_sender); let request = ConnectRequest { @@ -1001,7 +1001,7 @@ mod tests { announce_handler: Arc, whitelist_authorization: Arc, ) -> Response { - let (stats_event_sender, _stats_repository) = crate::core::services::statistics::setup::factory(false); + let (stats_event_sender, _stats_repository) = crate::core::statistics::setup::factory(false); let stats_event_sender = Arc::new(stats_event_sender); let remote_addr = SocketAddr::new(IpAddr::V4(Ipv4Addr::new(126, 0, 0, 1)), 8080); @@ -1306,7 +1306,7 @@ mod tests { announce_handler: Arc, whitelist_authorization: Arc, ) -> Response { - let (stats_event_sender, _stats_repository) = crate::core::services::statistics::setup::factory(false); + let (stats_event_sender, _stats_repository) = crate::core::statistics::setup::factory(false); let stats_event_sender = Arc::new(stats_event_sender); let client_ip_v4 = Ipv4Addr::new(126, 0, 0, 1); @@ -1494,7 +1494,7 @@ mod tests { use super::{gen_remote_fingerprint, TorrentPeerBuilder}; use crate::core::scrape_handler::ScrapeHandler; - use crate::core::services::statistics; + use crate::core::statistics; use crate::core::torrent::repository::in_memory::InMemoryTorrentRepository; use crate::servers::udp::connection_cookie::make; use crate::servers::udp::handlers::handle_scrape; From d830c78cf865398ae27efbcbd519feb9040a5640 Mon Sep 17 00:00:00 2001 From: Jose Celano Date: Wed, 29 Jan 2025 13:08:16 +0000 Subject: [PATCH 183/802] refactor: [#1221] move core torrent mod to torrent context --- src/core/services/mod.rs | 2 -- src/core/torrent/mod.rs | 1 + src/core/{services/torrent.rs => torrent/services.rs} | 8 ++++---- src/servers/apis/v1/context/torrent/handlers.rs | 2 +- src/servers/apis/v1/context/torrent/resources/torrent.rs | 4 ++-- src/servers/apis/v1/context/torrent/responses.rs | 2 +- 6 files changed, 9 insertions(+), 10 deletions(-) rename src/core/{services/torrent.rs => torrent/services.rs} (97%) diff --git a/src/core/services/mod.rs b/src/core/services/mod.rs index 30a05a992..1050e41f8 100644 --- a/src/core/services/mod.rs +++ b/src/core/services/mod.rs @@ -1,5 +1,3 @@ -pub mod torrent; - use std::sync::Arc; use databases::driver::Driver; diff --git a/src/core/torrent/mod.rs b/src/core/torrent/mod.rs index 95a5ff1eb..2aa19130e 100644 --- a/src/core/torrent/mod.rs +++ b/src/core/torrent/mod.rs @@ -27,6 +27,7 @@ //! pub mod manager; pub mod repository; +pub mod services; use torrust_tracker_torrent_repository::TorrentsSkipMapMutexStd; diff --git a/src/core/services/torrent.rs b/src/core/torrent/services.rs similarity index 97% rename from src/core/services/torrent.rs rename to src/core/torrent/services.rs index dac93ce16..5a4810412 100644 --- a/src/core/services/torrent.rs +++ b/src/core/torrent/services.rs @@ -135,9 +135,9 @@ mod tests { use bittorrent_primitives::info_hash::InfoHash; - use crate::core::services::torrent::tests::sample_peer; - use crate::core::services::torrent::{get_torrent_info, Info}; use crate::core::torrent::repository::in_memory::InMemoryTorrentRepository; + use crate::core::torrent::services::tests::sample_peer; + use crate::core::torrent::services::{get_torrent_info, Info}; #[tokio::test] async fn should_return_none_if_the_tracker_does_not_have_the_torrent() { @@ -184,9 +184,9 @@ mod tests { use bittorrent_primitives::info_hash::InfoHash; - use crate::core::services::torrent::tests::sample_peer; - use crate::core::services::torrent::{get_torrents_page, BasicInfo, Pagination}; use crate::core::torrent::repository::in_memory::InMemoryTorrentRepository; + use crate::core::torrent::services::tests::sample_peer; + use crate::core::torrent::services::{get_torrents_page, BasicInfo, Pagination}; #[tokio::test] async fn should_return_an_empty_result_if_the_tracker_does_not_have_any_torrent() { diff --git a/src/servers/apis/v1/context/torrent/handlers.rs b/src/servers/apis/v1/context/torrent/handlers.rs index 8fe20ab80..0ec90441d 100644 --- a/src/servers/apis/v1/context/torrent/handlers.rs +++ b/src/servers/apis/v1/context/torrent/handlers.rs @@ -13,8 +13,8 @@ use thiserror::Error; use torrust_tracker_primitives::pagination::Pagination; use super::responses::{torrent_info_response, torrent_list_response, torrent_not_known_response}; -use crate::core::services::torrent::{get_torrent_info, get_torrents, get_torrents_page}; use crate::core::torrent::repository::in_memory::InMemoryTorrentRepository; +use crate::core::torrent::services::{get_torrent_info, get_torrents, get_torrents_page}; use crate::servers::apis::v1::responses::invalid_info_hash_param_response; use crate::servers::apis::InfoHashParam; diff --git a/src/servers/apis/v1/context/torrent/resources/torrent.rs b/src/servers/apis/v1/context/torrent/resources/torrent.rs index 237470d88..c90a2a05f 100644 --- a/src/servers/apis/v1/context/torrent/resources/torrent.rs +++ b/src/servers/apis/v1/context/torrent/resources/torrent.rs @@ -6,7 +6,7 @@ //! the JSON response. use serde::{Deserialize, Serialize}; -use crate::core::services::torrent::{BasicInfo, Info}; +use crate::core::torrent::services::{BasicInfo, Info}; /// `Torrent` API resource. #[derive(Serialize, Deserialize, Debug, PartialEq, Eq)] @@ -102,7 +102,7 @@ mod tests { use torrust_tracker_primitives::{peer, DurationSinceUnixEpoch}; use super::Torrent; - use crate::core::services::torrent::{BasicInfo, Info}; + use crate::core::torrent::services::{BasicInfo, Info}; use crate::servers::apis::v1::context::torrent::resources::peer::Peer; use crate::servers::apis::v1::context::torrent::resources::torrent::ListItem; diff --git a/src/servers/apis/v1/context/torrent/responses.rs b/src/servers/apis/v1/context/torrent/responses.rs index 5daceaf94..5174c9abe 100644 --- a/src/servers/apis/v1/context/torrent/responses.rs +++ b/src/servers/apis/v1/context/torrent/responses.rs @@ -4,7 +4,7 @@ use axum::response::{IntoResponse, Json, Response}; use serde_json::json; use super::resources::torrent::{ListItem, Torrent}; -use crate::core::services::torrent::{BasicInfo, Info}; +use crate::core::torrent::services::{BasicInfo, Info}; /// `200` response that contains an array of /// [`ListItem`] From 716e7b2fe050b03d524a47bc9fed41646a250ff1 Mon Sep 17 00:00:00 2001 From: Jose Celano Date: Wed, 29 Jan 2025 13:13:58 +0000 Subject: [PATCH 184/802] refactor: [#1221] move DB setup to databases context --- src/bootstrap/app.rs | 3 ++- src/core/announce_handler.rs | 2 +- src/core/authentication/handler.rs | 2 +- src/core/authentication/mod.rs | 2 +- src/core/core_tests.rs | 2 +- src/core/databases/mod.rs | 1 + src/core/databases/setup.rs | 20 ++++++++++++++++++++ src/core/services/mod.rs | 19 +------------------ src/core/whitelist/whitelist_tests.rs | 3 ++- src/servers/http/v1/handlers/announce.rs | 2 +- src/servers/http/v1/services/announce.rs | 4 ++-- src/servers/http/v1/services/scrape.rs | 2 +- src/servers/udp/handlers.rs | 4 ++-- 13 files changed, 36 insertions(+), 30 deletions(-) create mode 100644 src/core/databases/setup.rs diff --git a/src/bootstrap/app.rs b/src/bootstrap/app.rs index 7661a36ec..44fc4ea00 100644 --- a/src/bootstrap/app.rs +++ b/src/bootstrap/app.rs @@ -27,8 +27,9 @@ use crate::core::authentication::handler::KeysHandler; use crate::core::authentication::key::repository::in_memory::InMemoryKeyRepository; use crate::core::authentication::key::repository::persisted::DatabaseKeyRepository; use crate::core::authentication::service; +use crate::core::databases::setup::initialize_database; use crate::core::scrape_handler::ScrapeHandler; -use crate::core::services::{initialize_database, initialize_whitelist_manager}; +use crate::core::services::initialize_whitelist_manager; use crate::core::statistics; use crate::core::torrent::manager::TorrentsManager; use crate::core::torrent::repository::in_memory::InMemoryTorrentRepository; diff --git a/src/core/announce_handler.rs b/src/core/announce_handler.rs index 1a5f84d47..816663bf6 100644 --- a/src/core/announce_handler.rs +++ b/src/core/announce_handler.rs @@ -414,7 +414,7 @@ mod tests { use crate::core::announce_handler::tests::the_announce_handler::peer_ip; use crate::core::announce_handler::{AnnounceHandler, PeersWanted}; use crate::core::core_tests::{sample_info_hash, sample_peer}; - use crate::core::services::initialize_database; + use crate::core::databases::setup::initialize_database; use crate::core::torrent::manager::TorrentsManager; use crate::core::torrent::repository::in_memory::InMemoryTorrentRepository; use crate::core::torrent::repository::persisted::DatabasePersistentTorrentRepository; diff --git a/src/core/authentication/handler.rs b/src/core/authentication/handler.rs index 5ec9a11b4..d6477a948 100644 --- a/src/core/authentication/handler.rs +++ b/src/core/authentication/handler.rs @@ -246,7 +246,7 @@ mod tests { use crate::core::authentication::handler::KeysHandler; use crate::core::authentication::key::repository::in_memory::InMemoryKeyRepository; use crate::core::authentication::key::repository::persisted::DatabaseKeyRepository; - use crate::core::services::initialize_database; + use crate::core::databases::setup::initialize_database; fn instantiate_keys_handler() -> KeysHandler { let config = configuration::ephemeral_private(); diff --git a/src/core/authentication/mod.rs b/src/core/authentication/mod.rs index 0180b3a1e..eddcc1ae7 100644 --- a/src/core/authentication/mod.rs +++ b/src/core/authentication/mod.rs @@ -27,7 +27,7 @@ mod tests { use crate::core::authentication::key::repository::persisted::DatabaseKeyRepository; use crate::core::authentication::service; use crate::core::authentication::service::AuthenticationService; - use crate::core::services::initialize_database; + use crate::core::databases::setup::initialize_database; fn instantiate_keys_manager_and_authentication() -> (Arc, Arc) { let config = configuration::ephemeral_private(); diff --git a/src/core/core_tests.rs b/src/core/core_tests.rs index 45949bae2..35d5fb9b7 100644 --- a/src/core/core_tests.rs +++ b/src/core/core_tests.rs @@ -9,8 +9,8 @@ use torrust_tracker_primitives::peer::Peer; use torrust_tracker_primitives::DurationSinceUnixEpoch; use super::announce_handler::AnnounceHandler; +use super::databases::setup::initialize_database; use super::scrape_handler::ScrapeHandler; -use super::services::initialize_database; use super::torrent::repository::in_memory::InMemoryTorrentRepository; use super::torrent::repository::persisted::DatabasePersistentTorrentRepository; use super::whitelist::repository::in_memory::InMemoryWhitelist; diff --git a/src/core/databases/mod.rs b/src/core/databases/mod.rs index e0b1b4f1b..dec6b799d 100644 --- a/src/core/databases/mod.rs +++ b/src/core/databases/mod.rs @@ -46,6 +46,7 @@ pub mod driver; pub mod error; pub mod mysql; +pub mod setup; pub mod sqlite; use std::marker::PhantomData; diff --git a/src/core/databases/setup.rs b/src/core/databases/setup.rs new file mode 100644 index 000000000..728913e05 --- /dev/null +++ b/src/core/databases/setup.rs @@ -0,0 +1,20 @@ +use std::sync::Arc; + +use torrust_tracker_configuration::v2_0_0::database; +use torrust_tracker_configuration::Configuration; + +use super::driver::{self, Driver}; +use super::Database; + +/// # Panics +/// +/// Will panic if database cannot be initialized. +#[must_use] +pub fn initialize_database(config: &Configuration) -> Arc> { + let driver = match config.core.database.driver { + database::Driver::Sqlite3 => Driver::Sqlite3, + database::Driver::MySQL => Driver::MySQL, + }; + + Arc::new(driver::build(&driver, &config.core.database.path).expect("Database driver build failed.")) +} diff --git a/src/core/services/mod.rs b/src/core/services/mod.rs index 1050e41f8..4d30fa966 100644 --- a/src/core/services/mod.rs +++ b/src/core/services/mod.rs @@ -1,27 +1,10 @@ use std::sync::Arc; -use databases::driver::Driver; -use torrust_tracker_configuration::v2_0_0::database; -use torrust_tracker_configuration::Configuration; - -use super::databases::{self, Database}; +use super::databases::Database; use super::whitelist::manager::WhitelistManager; use super::whitelist::repository::in_memory::InMemoryWhitelist; use super::whitelist::repository::persisted::DatabaseWhitelist; -/// # Panics -/// -/// Will panic if database cannot be initialized. -#[must_use] -pub fn initialize_database(config: &Configuration) -> Arc> { - let driver = match config.core.database.driver { - database::Driver::Sqlite3 => Driver::Sqlite3, - database::Driver::MySQL => Driver::MySQL, - }; - - Arc::new(databases::driver::build(&driver, &config.core.database.path).expect("Database driver build failed.")) -} - #[must_use] pub fn initialize_whitelist_manager( database: Arc>, diff --git a/src/core/whitelist/whitelist_tests.rs b/src/core/whitelist/whitelist_tests.rs index aa9c5ca14..cbe1e6488 100644 --- a/src/core/whitelist/whitelist_tests.rs +++ b/src/core/whitelist/whitelist_tests.rs @@ -5,7 +5,8 @@ use torrust_tracker_configuration::Configuration; use super::authorization::WhitelistAuthorization; use super::manager::WhitelistManager; use super::repository::in_memory::InMemoryWhitelist; -use crate::core::services::{initialize_database, initialize_whitelist_manager}; +use crate::core::databases::setup::initialize_database; +use crate::core::services::initialize_whitelist_manager; #[must_use] pub fn initialize_whitelist_services(config: &Configuration) -> (Arc, Arc) { diff --git a/src/servers/http/v1/handlers/announce.rs b/src/servers/http/v1/handlers/announce.rs index 3de17df58..544d706fa 100644 --- a/src/servers/http/v1/handlers/announce.rs +++ b/src/servers/http/v1/handlers/announce.rs @@ -257,7 +257,7 @@ mod tests { use crate::core::authentication::key::repository::in_memory::InMemoryKeyRepository; use crate::core::authentication::service::AuthenticationService; use crate::core::core_tests::sample_info_hash; - use crate::core::services::initialize_database; + use crate::core::databases::setup::initialize_database; use crate::core::statistics; use crate::core::statistics::event::sender::Sender; use crate::core::torrent::repository::in_memory::InMemoryTorrentRepository; diff --git a/src/servers/http/v1/services/announce.rs b/src/servers/http/v1/services/announce.rs index 6314c0a98..ee682559e 100644 --- a/src/servers/http/v1/services/announce.rs +++ b/src/servers/http/v1/services/announce.rs @@ -65,7 +65,7 @@ mod tests { use torrust_tracker_test_helpers::configuration; use crate::core::announce_handler::AnnounceHandler; - use crate::core::services::initialize_database; + use crate::core::databases::setup::initialize_database; use crate::core::statistics; use crate::core::statistics::event::sender::Sender; use crate::core::torrent::repository::in_memory::InMemoryTorrentRepository; @@ -138,7 +138,7 @@ mod tests { use super::{sample_peer_using_ipv4, sample_peer_using_ipv6}; use crate::core::announce_handler::{AnnounceHandler, PeersWanted}; use crate::core::core_tests::sample_info_hash; - use crate::core::services::initialize_database; + use crate::core::databases::setup::initialize_database; use crate::core::statistics; use crate::core::torrent::repository::in_memory::InMemoryTorrentRepository; use crate::core::torrent::repository::persisted::DatabasePersistentTorrentRepository; diff --git a/src/servers/http/v1/services/scrape.rs b/src/servers/http/v1/services/scrape.rs index 16821e724..b5a858b83 100644 --- a/src/servers/http/v1/services/scrape.rs +++ b/src/servers/http/v1/services/scrape.rs @@ -82,8 +82,8 @@ mod tests { use crate::core::announce_handler::AnnounceHandler; use crate::core::core_tests::sample_info_hash; + use crate::core::databases::setup::initialize_database; use crate::core::scrape_handler::ScrapeHandler; - use crate::core::services::initialize_database; use crate::core::torrent::repository::in_memory::InMemoryTorrentRepository; use crate::core::torrent::repository::persisted::DatabasePersistentTorrentRepository; use crate::core::whitelist::authorization::WhitelistAuthorization; diff --git a/src/servers/udp/handlers.rs b/src/servers/udp/handlers.rs index f531718db..90c32771f 100644 --- a/src/servers/udp/handlers.rs +++ b/src/servers/udp/handlers.rs @@ -475,8 +475,8 @@ mod tests { use super::gen_remote_fingerprint; use crate::core::announce_handler::AnnounceHandler; + use crate::core::databases::setup::initialize_database; use crate::core::scrape_handler::ScrapeHandler; - use crate::core::services::initialize_database; use crate::core::statistics::event::sender::Sender; use crate::core::torrent::repository::in_memory::InMemoryTorrentRepository; use crate::core::torrent::repository::persisted::DatabasePersistentTorrentRepository; @@ -1393,7 +1393,7 @@ mod tests { use mockall::predicate::eq; use crate::core::announce_handler::AnnounceHandler; - use crate::core::services::initialize_database; + use crate::core::databases::setup::initialize_database; use crate::core::statistics; use crate::core::torrent::repository::in_memory::InMemoryTorrentRepository; use crate::core::torrent::repository::persisted::DatabasePersistentTorrentRepository; From 1db58b16603c225585842c85e9f8a759623e0722 Mon Sep 17 00:00:00 2001 From: Jose Celano Date: Wed, 29 Jan 2025 13:17:16 +0000 Subject: [PATCH 185/802] refactor: [#1221] move whitelist manager setup to whitelist context --- src/bootstrap/app.rs | 2 +- src/core/mod.rs | 1 - src/core/whitelist/mod.rs | 1 + src/core/{services/mod.rs => whitelist/setup.rs} | 8 ++++---- src/core/whitelist/whitelist_tests.rs | 2 +- 5 files changed, 7 insertions(+), 7 deletions(-) rename src/core/{services/mod.rs => whitelist/setup.rs} (61%) diff --git a/src/bootstrap/app.rs b/src/bootstrap/app.rs index 44fc4ea00..8a084dc7f 100644 --- a/src/bootstrap/app.rs +++ b/src/bootstrap/app.rs @@ -29,13 +29,13 @@ use crate::core::authentication::key::repository::persisted::DatabaseKeyReposito use crate::core::authentication::service; use crate::core::databases::setup::initialize_database; use crate::core::scrape_handler::ScrapeHandler; -use crate::core::services::initialize_whitelist_manager; use crate::core::statistics; use crate::core::torrent::manager::TorrentsManager; use crate::core::torrent::repository::in_memory::InMemoryTorrentRepository; use crate::core::torrent::repository::persisted::DatabasePersistentTorrentRepository; use crate::core::whitelist::authorization::WhitelistAuthorization; use crate::core::whitelist::repository::in_memory::InMemoryWhitelist; +use crate::core::whitelist::setup::initialize_whitelist_manager; use crate::servers::udp::server::banning::BanService; use crate::servers::udp::server::launcher::MAX_CONNECTION_ID_ERRORS_PER_IP; use crate::shared::crypto::ephemeral_instance_keys; diff --git a/src/core/mod.rs b/src/core/mod.rs index 7d5e7d4d6..038264446 100644 --- a/src/core/mod.rs +++ b/src/core/mod.rs @@ -444,7 +444,6 @@ pub mod authentication; pub mod databases; pub mod error; pub mod scrape_handler; -pub mod services; pub mod statistics; pub mod torrent; pub mod whitelist; diff --git a/src/core/whitelist/mod.rs b/src/core/whitelist/mod.rs index c23740111..1f5f87626 100644 --- a/src/core/whitelist/mod.rs +++ b/src/core/whitelist/mod.rs @@ -1,6 +1,7 @@ pub mod authorization; pub mod manager; pub mod repository; +pub mod setup; pub mod whitelist_tests; #[cfg(test)] diff --git a/src/core/services/mod.rs b/src/core/whitelist/setup.rs similarity index 61% rename from src/core/services/mod.rs rename to src/core/whitelist/setup.rs index 4d30fa966..bdd35737c 100644 --- a/src/core/services/mod.rs +++ b/src/core/whitelist/setup.rs @@ -1,9 +1,9 @@ use std::sync::Arc; -use super::databases::Database; -use super::whitelist::manager::WhitelistManager; -use super::whitelist::repository::in_memory::InMemoryWhitelist; -use super::whitelist::repository::persisted::DatabaseWhitelist; +use super::manager::WhitelistManager; +use super::repository::in_memory::InMemoryWhitelist; +use super::repository::persisted::DatabaseWhitelist; +use crate::core::databases::Database; #[must_use] pub fn initialize_whitelist_manager( diff --git a/src/core/whitelist/whitelist_tests.rs b/src/core/whitelist/whitelist_tests.rs index cbe1e6488..38c2bbde3 100644 --- a/src/core/whitelist/whitelist_tests.rs +++ b/src/core/whitelist/whitelist_tests.rs @@ -6,7 +6,7 @@ use super::authorization::WhitelistAuthorization; use super::manager::WhitelistManager; use super::repository::in_memory::InMemoryWhitelist; use crate::core::databases::setup::initialize_database; -use crate::core::services::initialize_whitelist_manager; +use crate::core::whitelist::setup::initialize_whitelist_manager; #[must_use] pub fn initialize_whitelist_services(config: &Configuration) -> (Arc, Arc) { From 4921f7b31b73752a0822f9d808c610da4854a31d Mon Sep 17 00:00:00 2001 From: Jose Celano Date: Wed, 29 Jan 2025 13:30:08 +0000 Subject: [PATCH 186/802] fix: [#1221] docs links --- src/core/mod.rs | 8 +++----- 1 file changed, 3 insertions(+), 5 deletions(-) diff --git a/src/core/mod.rs b/src/core/mod.rs index 038264446..125a67b5a 100644 --- a/src/core/mod.rs +++ b/src/core/mod.rs @@ -344,10 +344,10 @@ //! //! # Services //! -//! Services are domain services on top of the core tracker. Right now there are two types of service: +//! Services are domain services on top of the core tracker domain. Right now there are two types of service: //! -//! - For statistics -//! - For torrents +//! - For statistics: [`crate::core::statistics::services`] +//! - For torrents: [`crate::core::torrent::services`] //! //! Services usually format the data inside the tracker to make it easier to consume by other parts. //! They also decouple the internal data structure, used by the tracker, from the way we deliver that data to the consumers. @@ -356,8 +356,6 @@ //! //! Services can include extra features like pagination, for example. //! -//! Refer to [`services`] module for more information about services. -//! //! # Authentication //! //! One of the core `Tracker` responsibilities is to create and keep authentication keys. Auth keys are used by HTTP trackers From a5ca24460c5b06ad434f65f2e67f3aac47260b23 Mon Sep 17 00:00:00 2001 From: Jose Celano Date: Wed, 29 Jan 2025 18:10:18 +0000 Subject: [PATCH 187/802] refactor: [#1223] extract tracker-core workspace package --- .github/workflows/deployment.yaml | 1 + Cargo.lock | 31 + Cargo.toml | 14 +- packages/tracker-core/Cargo.toml | 43 ++ packages/tracker-core/LICENSE | 661 ++++++++++++++++++ packages/tracker-core/README.md | 15 + .../tracker-core/migrations}/README.md | 0 ...3000_torrust_tracker_create_all_tables.sql | 0 ...rust_tracker_keys_valid_until_nullable.sql | 0 ...3000_torrust_tracker_create_all_tables.sql | 0 ...rust_tracker_keys_valid_until_nullable.sql | 0 .../tracker-core/src}/announce_handler.rs | 38 +- .../src}/authentication/handler.rs | 28 +- .../src}/authentication/key/mod.rs | 19 +- .../key/repository/in_memory.rs | 2 +- .../src}/authentication/key/repository/mod.rs | 0 .../key/repository/persisted.rs | 4 +- .../tracker-core/src}/authentication/mod.rs | 30 +- .../src}/authentication/service.rs | 6 +- .../tracker-core/src}/core_tests.rs | 0 .../tracker-core/src}/databases/driver.rs | 8 +- .../tracker-core/src}/databases/error.rs | 0 .../tracker-core/src}/databases/mod.rs | 4 +- .../tracker-core/src}/databases/mysql.rs | 4 +- .../tracker-core/src}/databases/setup.rs | 0 .../tracker-core/src}/databases/sqlite.rs | 2 +- .../tracker-core/src}/error.rs | 0 packages/tracker-core/src/lib.rs | 585 ++++++++++++++++ .../tracker-core/src}/peer_tests.rs | 0 .../tracker-core/src}/scrape_handler.rs | 6 +- .../src}/statistics/event/handler.rs | 10 +- .../src}/statistics/event/listener.rs | 2 +- .../tracker-core/src}/statistics/event/mod.rs | 0 .../src}/statistics/event/sender.rs | 0 .../tracker-core/src}/statistics/keeper.rs | 6 +- .../tracker-core/src}/statistics/metrics.rs | 0 packages/tracker-core/src/statistics/mod.rs | 32 + .../src}/statistics/repository.rs | 0 .../tracker-core/src/statistics/services.rs | 55 ++ .../tracker-core/src}/statistics/setup.rs | 2 +- .../tracker-core/src}/torrent/manager.rs | 3 +- .../tracker-core/src}/torrent/mod.rs | 0 .../src}/torrent/repository/in_memory.rs | 6 +- .../src}/torrent/repository/mod.rs | 0 .../src}/torrent/repository/persisted.rs | 4 +- .../tracker-core/src}/torrent/services.rs | 50 +- .../src}/whitelist/authorization.rs | 6 +- .../tracker-core/src}/whitelist/manager.rs | 14 +- .../tracker-core/src}/whitelist/mod.rs | 4 +- .../src}/whitelist/repository/in_memory.rs | 4 +- .../src}/whitelist/repository/mod.rs | 0 .../src}/whitelist/repository/persisted.rs | 2 +- .../tracker-core/src}/whitelist/setup.rs | 2 +- .../src}/whitelist/whitelist_tests.rs | 4 +- src/bootstrap/app.rs | 28 +- src/bootstrap/jobs/torrent_cleanup.rs | 3 +- src/container.rs | 24 +- src/core/mod.rs | 572 --------------- src/core/statistics/mod.rs | 31 - src/core/statistics/services.rs | 39 +- .../apis/v1/context/auth_key/handlers.rs | 10 +- .../apis/v1/context/auth_key/resources.rs | 5 +- .../apis/v1/context/auth_key/routes.rs | 2 +- src/servers/apis/v1/context/stats/handlers.rs | 4 +- .../apis/v1/context/stats/resources.rs | 7 +- .../apis/v1/context/stats/responses.rs | 2 +- .../apis/v1/context/torrent/handlers.rs | 21 +- .../v1/context/torrent/resources/torrent.rs | 5 +- .../apis/v1/context/torrent/responses.rs | 2 +- src/servers/apis/v1/context/torrent/routes.rs | 2 +- .../apis/v1/context/whitelist/handlers.rs | 2 +- .../apis/v1/context/whitelist/routes.rs | 2 +- .../http/v1/extractors/authentication_key.rs | 2 +- src/servers/http/v1/handlers/announce.rs | 39 +- src/servers/http/v1/handlers/common/auth.rs | 15 +- src/servers/http/v1/handlers/scrape.rs | 27 +- src/servers/http/v1/services/announce.rs | 54 +- src/servers/http/v1/services/scrape.rs | 59 +- src/servers/udp/handlers.rs | 111 +-- src/servers/udp/server/launcher.rs | 2 +- src/servers/udp/server/processor.rs | 4 +- src/shared/bit_torrent/common.rs | 6 - tests/servers/api/environment.rs | 4 +- tests/servers/api/mod.rs | 2 +- .../api/v1/contract/context/auth_key.rs | 4 +- tests/servers/http/client.rs | 2 +- tests/servers/http/connection_info.rs | 2 +- tests/servers/http/environment.rs | 10 +- tests/servers/http/v1/contract.rs | 4 +- tests/servers/udp/environment.rs | 6 +- 90 files changed, 1830 insertions(+), 991 deletions(-) create mode 100644 packages/tracker-core/Cargo.toml create mode 100644 packages/tracker-core/LICENSE create mode 100644 packages/tracker-core/README.md rename {migrations => packages/tracker-core/migrations}/README.md (100%) rename {migrations => packages/tracker-core/migrations}/mysql/20240730183000_torrust_tracker_create_all_tables.sql (100%) rename {migrations => packages/tracker-core/migrations}/mysql/20240730183500_torrust_tracker_keys_valid_until_nullable.sql (100%) rename {migrations => packages/tracker-core/migrations}/sqlite/20240730183000_torrust_tracker_create_all_tables.sql (100%) rename {migrations => packages/tracker-core/migrations}/sqlite/20240730183500_torrust_tracker_keys_valid_until_nullable.sql (100%) rename {src/core => packages/tracker-core/src}/announce_handler.rs (92%) rename {src/core => packages/tracker-core/src}/authentication/handler.rs (91%) rename {src/core => packages/tracker-core/src}/authentication/key/mod.rs (95%) rename {src/core => packages/tracker-core/src}/authentication/key/repository/in_memory.rs (95%) rename {src/core => packages/tracker-core/src}/authentication/key/repository/mod.rs (100%) rename {src/core => packages/tracker-core/src}/authentication/key/repository/persisted.rs (92%) rename {src/core => packages/tracker-core/src}/authentication/mod.rs (86%) rename {src/core => packages/tracker-core/src}/authentication/service.rs (93%) rename {src/core => packages/tracker-core/src}/core_tests.rs (100%) rename {src/core => packages/tracker-core/src}/databases/driver.rs (90%) rename {src/core => packages/tracker-core/src}/databases/error.rs (100%) rename {src/core => packages/tracker-core/src}/databases/mod.rs (98%) rename {src/core => packages/tracker-core/src}/databases/mysql.rs (98%) rename {src/core => packages/tracker-core/src}/databases/setup.rs (100%) rename {src/core => packages/tracker-core/src}/databases/sqlite.rs (99%) rename {src/core => packages/tracker-core/src}/error.rs (100%) create mode 100644 packages/tracker-core/src/lib.rs rename {src/core => packages/tracker-core/src}/peer_tests.rs (100%) rename {src/core => packages/tracker-core/src}/scrape_handler.rs (95%) rename {src/core => packages/tracker-core/src}/statistics/event/handler.rs (96%) rename {src/core => packages/tracker-core/src}/statistics/event/listener.rs (84%) rename {src/core => packages/tracker-core/src}/statistics/event/mod.rs (100%) rename {src/core => packages/tracker-core/src}/statistics/event/sender.rs (100%) rename {src/core => packages/tracker-core/src}/statistics/keeper.rs (93%) rename {src/core => packages/tracker-core/src}/statistics/metrics.rs (100%) create mode 100644 packages/tracker-core/src/statistics/mod.rs rename {src/core => packages/tracker-core/src}/statistics/repository.rs (100%) create mode 100644 packages/tracker-core/src/statistics/services.rs rename {src/core => packages/tracker-core/src}/statistics/setup.rs (98%) rename {src/core => packages/tracker-core/src}/torrent/manager.rs (97%) rename {src/core => packages/tracker-core/src}/torrent/mod.rs (100%) rename {src/core => packages/tracker-core/src}/torrent/repository/in_memory.rs (98%) rename {src/core => packages/tracker-core/src}/torrent/repository/mod.rs (100%) rename {src/core => packages/tracker-core/src}/torrent/repository/persisted.rs (94%) rename {src/core => packages/tracker-core/src}/torrent/services.rs (85%) rename {src/core => packages/tracker-core/src}/whitelist/authorization.rs (93%) rename {src/core => packages/tracker-core/src}/whitelist/manager.rs (91%) rename {src/core => packages/tracker-core/src}/whitelist/mod.rs (88%) rename {src/core => packages/tracker-core/src}/whitelist/repository/in_memory.rs (94%) rename {src/core => packages/tracker-core/src}/whitelist/repository/mod.rs (100%) rename {src/core => packages/tracker-core/src}/whitelist/repository/persisted.rs (97%) rename {src/core => packages/tracker-core/src}/whitelist/setup.rs (92%) rename {src/core => packages/tracker-core/src}/whitelist/whitelist_tests.rs (89%) diff --git a/.github/workflows/deployment.yaml b/.github/workflows/deployment.yaml index fd4e0fd5c..41b40feaa 100644 --- a/.github/workflows/deployment.yaml +++ b/.github/workflows/deployment.yaml @@ -57,6 +57,7 @@ jobs: run: | cargo publish -p bittorrent-http-protocol cargo publish -p bittorrent-tracker-client + cargo publish -p bittorrent-tracker-core cargo publish -p torrust-tracker cargo publish -p torrust-tracker-api-client cargo publish -p torrust-tracker-client diff --git a/Cargo.lock b/Cargo.lock index 355457721..d0d4d7e8f 100644 --- a/Cargo.lock +++ b/Cargo.lock @@ -591,6 +591,36 @@ dependencies = [ "zerocopy", ] +[[package]] +name = "bittorrent-tracker-core" +version = "3.0.0-develop" +dependencies = [ + "aquatic_udp_protocol", + "bittorrent-http-protocol", + "bittorrent-primitives", + "chrono", + "derive_more", + "futures", + "local-ip-address", + "mockall", + "r2d2", + "r2d2_mysql", + "r2d2_sqlite", + "rand", + "serde", + "serde_json", + "thiserror 2.0.11", + "tokio", + "torrust-tracker-api-client", + "torrust-tracker-clock", + "torrust-tracker-configuration", + "torrust-tracker-located-error", + "torrust-tracker-primitives", + "torrust-tracker-test-helpers", + "torrust-tracker-torrent-repository", + "tracing", +] + [[package]] name = "bitvec" version = "1.0.1" @@ -3931,6 +3961,7 @@ dependencies = [ "bittorrent-http-protocol", "bittorrent-primitives", "bittorrent-tracker-client", + "bittorrent-tracker-core", "bloom", "blowfish", "camino", diff --git a/Cargo.toml b/Cargo.toml index 4b4862cca..6c9f7f22d 100644 --- a/Cargo.toml +++ b/Cargo.toml @@ -42,6 +42,7 @@ axum-server = { version = "0", features = ["tls-rustls-no-provider"] } bittorrent-http-protocol = { version = "3.0.0-develop", path = "packages/http-protocol" } bittorrent-primitives = "0.1.0" bittorrent-tracker-client = { version = "3.0.0-develop", path = "packages/tracker-client" } +bittorrent-tracker-core = { version = "3.0.0-develop", path = "packages/tracker-core" } bloom = "0.3.2" blowfish = "0" camino = { version = "1", features = ["serde", "serde1"] } @@ -90,7 +91,17 @@ uuid = { version = "1", features = ["v4"] } zerocopy = "0.7" [package.metadata.cargo-machete] -ignored = ["crossbeam-skiplist", "dashmap", "figment", "parking_lot", "serde_bytes"] +ignored = [ + "crossbeam-skiplist", + "dashmap", + "figment", + "parking_lot", + "r2d2", + "r2d2_mysql", + "r2d2_sqlite", + "serde_bytes", + "torrust-tracker-torrent-repository", +] [dev-dependencies] local-ip-address = "0" @@ -109,6 +120,7 @@ members = [ "packages/torrent-repository", "packages/tracker-api-client", "packages/tracker-client", + "packages/tracker-core", ] [profile.dev] diff --git a/packages/tracker-core/Cargo.toml b/packages/tracker-core/Cargo.toml new file mode 100644 index 000000000..b38f7c90f --- /dev/null +++ b/packages/tracker-core/Cargo.toml @@ -0,0 +1,43 @@ +[package] +description = "A library with the core functionality needed to implement a BitTorrent tracker." +keywords = ["api", "bittorrent", "core", "library", "tracker"] +name = "bittorrent-tracker-core" +readme = "README.md" + +authors.workspace = true +documentation.workspace = true +edition.workspace = true +homepage.workspace = true +license.workspace = true +publish.workspace = true +repository.workspace = true +rust-version.workspace = true +version.workspace = true + +[dependencies] +aquatic_udp_protocol = "0" +bittorrent-http-protocol = { version = "3.0.0-develop", path = "../http-protocol" } +bittorrent-primitives = "0.1.0" +chrono = { version = "0", default-features = false, features = ["clock"] } +derive_more = { version = "1", features = ["as_ref", "constructor", "from"] } +futures = "0" +r2d2 = "0" +r2d2_mysql = "25" +r2d2_sqlite = { version = "0", features = ["bundled"] } +rand = "0" +serde = { version = "1", features = ["derive"] } +serde_json = { version = "1", features = ["preserve_order"] } +thiserror = "2" +tokio = { version = "1", features = ["macros", "net", "rt-multi-thread", "signal", "sync"] } +torrust-tracker-clock = { version = "3.0.0-develop", path = "../clock" } +torrust-tracker-configuration = { version = "3.0.0-develop", path = "../configuration" } +torrust-tracker-located-error = { version = "3.0.0-develop", path = "../located-error" } +torrust-tracker-primitives = { version = "3.0.0-develop", path = "../primitives" } +torrust-tracker-torrent-repository = { version = "3.0.0-develop", path = "../torrent-repository" } +tracing = "0" + +[dev-dependencies] +local-ip-address = "0" +mockall = "0" +torrust-tracker-api-client = { version = "3.0.0-develop", path = "../tracker-api-client" } +torrust-tracker-test-helpers = { version = "3.0.0-develop", path = "../test-helpers" } diff --git a/packages/tracker-core/LICENSE b/packages/tracker-core/LICENSE new file mode 100644 index 000000000..0ad25db4b --- /dev/null +++ b/packages/tracker-core/LICENSE @@ -0,0 +1,661 @@ + GNU AFFERO GENERAL PUBLIC LICENSE + Version 3, 19 November 2007 + + Copyright (C) 2007 Free Software Foundation, Inc. + Everyone is permitted to copy and distribute verbatim copies + of this license document, but changing it is not allowed. + + Preamble + + The GNU Affero General Public License is a free, copyleft license for +software and other kinds of works, specifically designed to ensure +cooperation with the community in the case of network server software. + + The licenses for most software and other practical works are designed +to take away your freedom to share and change the works. By contrast, +our General Public Licenses are intended to guarantee your freedom to +share and change all versions of a program--to make sure it remains free +software for all its users. + + When we speak of free software, we are referring to freedom, not +price. Our General Public Licenses are designed to make sure that you +have the freedom to distribute copies of free software (and charge for +them if you wish), that you receive source code or can get it if you +want it, that you can change the software or use pieces of it in new +free programs, and that you know you can do these things. + + Developers that use our General Public Licenses protect your rights +with two steps: (1) assert copyright on the software, and (2) offer +you this License which gives you legal permission to copy, distribute +and/or modify the software. + + A secondary benefit of defending all users' freedom is that +improvements made in alternate versions of the program, if they +receive widespread use, become available for other developers to +incorporate. Many developers of free software are heartened and +encouraged by the resulting cooperation. However, in the case of +software used on network servers, this result may fail to come about. +The GNU General Public License permits making a modified version and +letting the public access it on a server without ever releasing its +source code to the public. + + The GNU Affero General Public License is designed specifically to +ensure that, in such cases, the modified source code becomes available +to the community. It requires the operator of a network server to +provide the source code of the modified version running there to the +users of that server. Therefore, public use of a modified version, on +a publicly accessible server, gives the public access to the source +code of the modified version. + + An older license, called the Affero General Public License and +published by Affero, was designed to accomplish similar goals. This is +a different license, not a version of the Affero GPL, but Affero has +released a new version of the Affero GPL which permits relicensing under +this license. + + The precise terms and conditions for copying, distribution and +modification follow. + + TERMS AND CONDITIONS + + 0. Definitions. + + "This License" refers to version 3 of the GNU Affero General Public License. + + "Copyright" also means copyright-like laws that apply to other kinds of +works, such as semiconductor masks. + + "The Program" refers to any copyrightable work licensed under this +License. Each licensee is addressed as "you". "Licensees" and +"recipients" may be individuals or organizations. + + To "modify" a work means to copy from or adapt all or part of the work +in a fashion requiring copyright permission, other than the making of an +exact copy. The resulting work is called a "modified version" of the +earlier work or a work "based on" the earlier work. + + A "covered work" means either the unmodified Program or a work based +on the Program. + + To "propagate" a work means to do anything with it that, without +permission, would make you directly or secondarily liable for +infringement under applicable copyright law, except executing it on a +computer or modifying a private copy. Propagation includes copying, +distribution (with or without modification), making available to the +public, and in some countries other activities as well. + + To "convey" a work means any kind of propagation that enables other +parties to make or receive copies. Mere interaction with a user through +a computer network, with no transfer of a copy, is not conveying. + + An interactive user interface displays "Appropriate Legal Notices" +to the extent that it includes a convenient and prominently visible +feature that (1) displays an appropriate copyright notice, and (2) +tells the user that there is no warranty for the work (except to the +extent that warranties are provided), that licensees may convey the +work under this License, and how to view a copy of this License. If +the interface presents a list of user commands or options, such as a +menu, a prominent item in the list meets this criterion. + + 1. Source Code. + + The "source code" for a work means the preferred form of the work +for making modifications to it. "Object code" means any non-source +form of a work. + + A "Standard Interface" means an interface that either is an official +standard defined by a recognized standards body, or, in the case of +interfaces specified for a particular programming language, one that +is widely used among developers working in that language. + + The "System Libraries" of an executable work include anything, other +than the work as a whole, that (a) is included in the normal form of +packaging a Major Component, but which is not part of that Major +Component, and (b) serves only to enable use of the work with that +Major Component, or to implement a Standard Interface for which an +implementation is available to the public in source code form. A +"Major Component", in this context, means a major essential component +(kernel, window system, and so on) of the specific operating system +(if any) on which the executable work runs, or a compiler used to +produce the work, or an object code interpreter used to run it. + + The "Corresponding Source" for a work in object code form means all +the source code needed to generate, install, and (for an executable +work) run the object code and to modify the work, including scripts to +control those activities. However, it does not include the work's +System Libraries, or general-purpose tools or generally available free +programs which are used unmodified in performing those activities but +which are not part of the work. For example, Corresponding Source +includes interface definition files associated with source files for +the work, and the source code for shared libraries and dynamically +linked subprograms that the work is specifically designed to require, +such as by intimate data communication or control flow between those +subprograms and other parts of the work. + + The Corresponding Source need not include anything that users +can regenerate automatically from other parts of the Corresponding +Source. + + The Corresponding Source for a work in source code form is that +same work. + + 2. Basic Permissions. + + All rights granted under this License are granted for the term of +copyright on the Program, and are irrevocable provided the stated +conditions are met. This License explicitly affirms your unlimited +permission to run the unmodified Program. The output from running a +covered work is covered by this License only if the output, given its +content, constitutes a covered work. This License acknowledges your +rights of fair use or other equivalent, as provided by copyright law. + + You may make, run and propagate covered works that you do not +convey, without conditions so long as your license otherwise remains +in force. You may convey covered works to others for the sole purpose +of having them make modifications exclusively for you, or provide you +with facilities for running those works, provided that you comply with +the terms of this License in conveying all material for which you do +not control copyright. Those thus making or running the covered works +for you must do so exclusively on your behalf, under your direction +and control, on terms that prohibit them from making any copies of +your copyrighted material outside their relationship with you. + + Conveying under any other circumstances is permitted solely under +the conditions stated below. Sublicensing is not allowed; section 10 +makes it unnecessary. + + 3. Protecting Users' Legal Rights From Anti-Circumvention Law. + + No covered work shall be deemed part of an effective technological +measure under any applicable law fulfilling obligations under article +11 of the WIPO copyright treaty adopted on 20 December 1996, or +similar laws prohibiting or restricting circumvention of such +measures. + + When you convey a covered work, you waive any legal power to forbid +circumvention of technological measures to the extent such circumvention +is effected by exercising rights under this License with respect to +the covered work, and you disclaim any intention to limit operation or +modification of the work as a means of enforcing, against the work's +users, your or third parties' legal rights to forbid circumvention of +technological measures. + + 4. Conveying Verbatim Copies. + + You may convey verbatim copies of the Program's source code as you +receive it, in any medium, provided that you conspicuously and +appropriately publish on each copy an appropriate copyright notice; +keep intact all notices stating that this License and any +non-permissive terms added in accord with section 7 apply to the code; +keep intact all notices of the absence of any warranty; and give all +recipients a copy of this License along with the Program. + + You may charge any price or no price for each copy that you convey, +and you may offer support or warranty protection for a fee. + + 5. Conveying Modified Source Versions. + + You may convey a work based on the Program, or the modifications to +produce it from the Program, in the form of source code under the +terms of section 4, provided that you also meet all of these conditions: + + a) The work must carry prominent notices stating that you modified + it, and giving a relevant date. + + b) The work must carry prominent notices stating that it is + released under this License and any conditions added under section + 7. This requirement modifies the requirement in section 4 to + "keep intact all notices". + + c) You must license the entire work, as a whole, under this + License to anyone who comes into possession of a copy. This + License will therefore apply, along with any applicable section 7 + additional terms, to the whole of the work, and all its parts, + regardless of how they are packaged. This License gives no + permission to license the work in any other way, but it does not + invalidate such permission if you have separately received it. + + d) If the work has interactive user interfaces, each must display + Appropriate Legal Notices; however, if the Program has interactive + interfaces that do not display Appropriate Legal Notices, your + work need not make them do so. + + A compilation of a covered work with other separate and independent +works, which are not by their nature extensions of the covered work, +and which are not combined with it such as to form a larger program, +in or on a volume of a storage or distribution medium, is called an +"aggregate" if the compilation and its resulting copyright are not +used to limit the access or legal rights of the compilation's users +beyond what the individual works permit. Inclusion of a covered work +in an aggregate does not cause this License to apply to the other +parts of the aggregate. + + 6. Conveying Non-Source Forms. + + You may convey a covered work in object code form under the terms +of sections 4 and 5, provided that you also convey the +machine-readable Corresponding Source under the terms of this License, +in one of these ways: + + a) Convey the object code in, or embodied in, a physical product + (including a physical distribution medium), accompanied by the + Corresponding Source fixed on a durable physical medium + customarily used for software interchange. + + b) Convey the object code in, or embodied in, a physical product + (including a physical distribution medium), accompanied by a + written offer, valid for at least three years and valid for as + long as you offer spare parts or customer support for that product + model, to give anyone who possesses the object code either (1) a + copy of the Corresponding Source for all the software in the + product that is covered by this License, on a durable physical + medium customarily used for software interchange, for a price no + more than your reasonable cost of physically performing this + conveying of source, or (2) access to copy the + Corresponding Source from a network server at no charge. + + c) Convey individual copies of the object code with a copy of the + written offer to provide the Corresponding Source. This + alternative is allowed only occasionally and noncommercially, and + only if you received the object code with such an offer, in accord + with subsection 6b. + + d) Convey the object code by offering access from a designated + place (gratis or for a charge), and offer equivalent access to the + Corresponding Source in the same way through the same place at no + further charge. You need not require recipients to copy the + Corresponding Source along with the object code. If the place to + copy the object code is a network server, the Corresponding Source + may be on a different server (operated by you or a third party) + that supports equivalent copying facilities, provided you maintain + clear directions next to the object code saying where to find the + Corresponding Source. Regardless of what server hosts the + Corresponding Source, you remain obligated to ensure that it is + available for as long as needed to satisfy these requirements. + + e) Convey the object code using peer-to-peer transmission, provided + you inform other peers where the object code and Corresponding + Source of the work are being offered to the general public at no + charge under subsection 6d. + + A separable portion of the object code, whose source code is excluded +from the Corresponding Source as a System Library, need not be +included in conveying the object code work. + + A "User Product" is either (1) a "consumer product", which means any +tangible personal property which is normally used for personal, family, +or household purposes, or (2) anything designed or sold for incorporation +into a dwelling. In determining whether a product is a consumer product, +doubtful cases shall be resolved in favor of coverage. For a particular +product received by a particular user, "normally used" refers to a +typical or common use of that class of product, regardless of the status +of the particular user or of the way in which the particular user +actually uses, or expects or is expected to use, the product. A product +is a consumer product regardless of whether the product has substantial +commercial, industrial or non-consumer uses, unless such uses represent +the only significant mode of use of the product. + + "Installation Information" for a User Product means any methods, +procedures, authorization keys, or other information required to install +and execute modified versions of a covered work in that User Product from +a modified version of its Corresponding Source. The information must +suffice to ensure that the continued functioning of the modified object +code is in no case prevented or interfered with solely because +modification has been made. + + If you convey an object code work under this section in, or with, or +specifically for use in, a User Product, and the conveying occurs as +part of a transaction in which the right of possession and use of the +User Product is transferred to the recipient in perpetuity or for a +fixed term (regardless of how the transaction is characterized), the +Corresponding Source conveyed under this section must be accompanied +by the Installation Information. But this requirement does not apply +if neither you nor any third party retains the ability to install +modified object code on the User Product (for example, the work has +been installed in ROM). + + The requirement to provide Installation Information does not include a +requirement to continue to provide support service, warranty, or updates +for a work that has been modified or installed by the recipient, or for +the User Product in which it has been modified or installed. Access to a +network may be denied when the modification itself materially and +adversely affects the operation of the network or violates the rules and +protocols for communication across the network. + + Corresponding Source conveyed, and Installation Information provided, +in accord with this section must be in a format that is publicly +documented (and with an implementation available to the public in +source code form), and must require no special password or key for +unpacking, reading or copying. + + 7. Additional Terms. + + "Additional permissions" are terms that supplement the terms of this +License by making exceptions from one or more of its conditions. +Additional permissions that are applicable to the entire Program shall +be treated as though they were included in this License, to the extent +that they are valid under applicable law. If additional permissions +apply only to part of the Program, that part may be used separately +under those permissions, but the entire Program remains governed by +this License without regard to the additional permissions. + + When you convey a copy of a covered work, you may at your option +remove any additional permissions from that copy, or from any part of +it. (Additional permissions may be written to require their own +removal in certain cases when you modify the work.) You may place +additional permissions on material, added by you to a covered work, +for which you have or can give appropriate copyright permission. + + Notwithstanding any other provision of this License, for material you +add to a covered work, you may (if authorized by the copyright holders of +that material) supplement the terms of this License with terms: + + a) Disclaiming warranty or limiting liability differently from the + terms of sections 15 and 16 of this License; or + + b) Requiring preservation of specified reasonable legal notices or + author attributions in that material or in the Appropriate Legal + Notices displayed by works containing it; or + + c) Prohibiting misrepresentation of the origin of that material, or + requiring that modified versions of such material be marked in + reasonable ways as different from the original version; or + + d) Limiting the use for publicity purposes of names of licensors or + authors of the material; or + + e) Declining to grant rights under trademark law for use of some + trade names, trademarks, or service marks; or + + f) Requiring indemnification of licensors and authors of that + material by anyone who conveys the material (or modified versions of + it) with contractual assumptions of liability to the recipient, for + any liability that these contractual assumptions directly impose on + those licensors and authors. + + All other non-permissive additional terms are considered "further +restrictions" within the meaning of section 10. If the Program as you +received it, or any part of it, contains a notice stating that it is +governed by this License along with a term that is a further +restriction, you may remove that term. If a license document contains +a further restriction but permits relicensing or conveying under this +License, you may add to a covered work material governed by the terms +of that license document, provided that the further restriction does +not survive such relicensing or conveying. + + If you add terms to a covered work in accord with this section, you +must place, in the relevant source files, a statement of the +additional terms that apply to those files, or a notice indicating +where to find the applicable terms. + + Additional terms, permissive or non-permissive, may be stated in the +form of a separately written license, or stated as exceptions; +the above requirements apply either way. + + 8. Termination. + + You may not propagate or modify a covered work except as expressly +provided under this License. Any attempt otherwise to propagate or +modify it is void, and will automatically terminate your rights under +this License (including any patent licenses granted under the third +paragraph of section 11). + + However, if you cease all violation of this License, then your +license from a particular copyright holder is reinstated (a) +provisionally, unless and until the copyright holder explicitly and +finally terminates your license, and (b) permanently, if the copyright +holder fails to notify you of the violation by some reasonable means +prior to 60 days after the cessation. + + Moreover, your license from a particular copyright holder is +reinstated permanently if the copyright holder notifies you of the +violation by some reasonable means, this is the first time you have +received notice of violation of this License (for any work) from that +copyright holder, and you cure the violation prior to 30 days after +your receipt of the notice. + + Termination of your rights under this section does not terminate the +licenses of parties who have received copies or rights from you under +this License. If your rights have been terminated and not permanently +reinstated, you do not qualify to receive new licenses for the same +material under section 10. + + 9. Acceptance Not Required for Having Copies. + + You are not required to accept this License in order to receive or +run a copy of the Program. Ancillary propagation of a covered work +occurring solely as a consequence of using peer-to-peer transmission +to receive a copy likewise does not require acceptance. However, +nothing other than this License grants you permission to propagate or +modify any covered work. These actions infringe copyright if you do +not accept this License. Therefore, by modifying or propagating a +covered work, you indicate your acceptance of this License to do so. + + 10. Automatic Licensing of Downstream Recipients. + + Each time you convey a covered work, the recipient automatically +receives a license from the original licensors, to run, modify and +propagate that work, subject to this License. You are not responsible +for enforcing compliance by third parties with this License. + + An "entity transaction" is a transaction transferring control of an +organization, or substantially all assets of one, or subdividing an +organization, or merging organizations. If propagation of a covered +work results from an entity transaction, each party to that +transaction who receives a copy of the work also receives whatever +licenses to the work the party's predecessor in interest had or could +give under the previous paragraph, plus a right to possession of the +Corresponding Source of the work from the predecessor in interest, if +the predecessor has it or can get it with reasonable efforts. + + You may not impose any further restrictions on the exercise of the +rights granted or affirmed under this License. For example, you may +not impose a license fee, royalty, or other charge for exercise of +rights granted under this License, and you may not initiate litigation +(including a cross-claim or counterclaim in a lawsuit) alleging that +any patent claim is infringed by making, using, selling, offering for +sale, or importing the Program or any portion of it. + + 11. Patents. + + A "contributor" is a copyright holder who authorizes use under this +License of the Program or a work on which the Program is based. The +work thus licensed is called the contributor's "contributor version". + + A contributor's "essential patent claims" are all patent claims +owned or controlled by the contributor, whether already acquired or +hereafter acquired, that would be infringed by some manner, permitted +by this License, of making, using, or selling its contributor version, +but do not include claims that would be infringed only as a +consequence of further modification of the contributor version. For +purposes of this definition, "control" includes the right to grant +patent sublicenses in a manner consistent with the requirements of +this License. + + Each contributor grants you a non-exclusive, worldwide, royalty-free +patent license under the contributor's essential patent claims, to +make, use, sell, offer for sale, import and otherwise run, modify and +propagate the contents of its contributor version. + + In the following three paragraphs, a "patent license" is any express +agreement or commitment, however denominated, not to enforce a patent +(such as an express permission to practice a patent or covenant not to +sue for patent infringement). To "grant" such a patent license to a +party means to make such an agreement or commitment not to enforce a +patent against the party. + + If you convey a covered work, knowingly relying on a patent license, +and the Corresponding Source of the work is not available for anyone +to copy, free of charge and under the terms of this License, through a +publicly available network server or other readily accessible means, +then you must either (1) cause the Corresponding Source to be so +available, or (2) arrange to deprive yourself of the benefit of the +patent license for this particular work, or (3) arrange, in a manner +consistent with the requirements of this License, to extend the patent +license to downstream recipients. "Knowingly relying" means you have +actual knowledge that, but for the patent license, your conveying the +covered work in a country, or your recipient's use of the covered work +in a country, would infringe one or more identifiable patents in that +country that you have reason to believe are valid. + + If, pursuant to or in connection with a single transaction or +arrangement, you convey, or propagate by procuring conveyance of, a +covered work, and grant a patent license to some of the parties +receiving the covered work authorizing them to use, propagate, modify +or convey a specific copy of the covered work, then the patent license +you grant is automatically extended to all recipients of the covered +work and works based on it. + + A patent license is "discriminatory" if it does not include within +the scope of its coverage, prohibits the exercise of, or is +conditioned on the non-exercise of one or more of the rights that are +specifically granted under this License. You may not convey a covered +work if you are a party to an arrangement with a third party that is +in the business of distributing software, under which you make payment +to the third party based on the extent of your activity of conveying +the work, and under which the third party grants, to any of the +parties who would receive the covered work from you, a discriminatory +patent license (a) in connection with copies of the covered work +conveyed by you (or copies made from those copies), or (b) primarily +for and in connection with specific products or compilations that +contain the covered work, unless you entered into that arrangement, +or that patent license was granted, prior to 28 March 2007. + + Nothing in this License shall be construed as excluding or limiting +any implied license or other defenses to infringement that may +otherwise be available to you under applicable patent law. + + 12. No Surrender of Others' Freedom. + + If conditions are imposed on you (whether by court order, agreement or +otherwise) that contradict the conditions of this License, they do not +excuse you from the conditions of this License. If you cannot convey a +covered work so as to satisfy simultaneously your obligations under this +License and any other pertinent obligations, then as a consequence you may +not convey it at all. For example, if you agree to terms that obligate you +to collect a royalty for further conveying from those to whom you convey +the Program, the only way you could satisfy both those terms and this +License would be to refrain entirely from conveying the Program. + + 13. Remote Network Interaction; Use with the GNU General Public License. + + Notwithstanding any other provision of this License, if you modify the +Program, your modified version must prominently offer all users +interacting with it remotely through a computer network (if your version +supports such interaction) an opportunity to receive the Corresponding +Source of your version by providing access to the Corresponding Source +from a network server at no charge, through some standard or customary +means of facilitating copying of software. This Corresponding Source +shall include the Corresponding Source for any work covered by version 3 +of the GNU General Public License that is incorporated pursuant to the +following paragraph. + + Notwithstanding any other provision of this License, you have +permission to link or combine any covered work with a work licensed +under version 3 of the GNU General Public License into a single +combined work, and to convey the resulting work. The terms of this +License will continue to apply to the part which is the covered work, +but the work with which it is combined will remain governed by version +3 of the GNU General Public License. + + 14. Revised Versions of this License. + + The Free Software Foundation may publish revised and/or new versions of +the GNU Affero General Public License from time to time. Such new versions +will be similar in spirit to the present version, but may differ in detail to +address new problems or concerns. + + Each version is given a distinguishing version number. If the +Program specifies that a certain numbered version of the GNU Affero General +Public License "or any later version" applies to it, you have the +option of following the terms and conditions either of that numbered +version or of any later version published by the Free Software +Foundation. If the Program does not specify a version number of the +GNU Affero General Public License, you may choose any version ever published +by the Free Software Foundation. + + If the Program specifies that a proxy can decide which future +versions of the GNU Affero General Public License can be used, that proxy's +public statement of acceptance of a version permanently authorizes you +to choose that version for the Program. + + Later license versions may give you additional or different +permissions. However, no additional obligations are imposed on any +author or copyright holder as a result of your choosing to follow a +later version. + + 15. Disclaimer of Warranty. + + THERE IS NO WARRANTY FOR THE PROGRAM, TO THE EXTENT PERMITTED BY +APPLICABLE LAW. EXCEPT WHEN OTHERWISE STATED IN WRITING THE COPYRIGHT +HOLDERS AND/OR OTHER PARTIES PROVIDE THE PROGRAM "AS IS" WITHOUT WARRANTY +OF ANY KIND, EITHER EXPRESSED OR IMPLIED, INCLUDING, BUT NOT LIMITED TO, +THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR +PURPOSE. THE ENTIRE RISK AS TO THE QUALITY AND PERFORMANCE OF THE PROGRAM +IS WITH YOU. SHOULD THE PROGRAM PROVE DEFECTIVE, YOU ASSUME THE COST OF +ALL NECESSARY SERVICING, REPAIR OR CORRECTION. + + 16. Limitation of Liability. + + IN NO EVENT UNLESS REQUIRED BY APPLICABLE LAW OR AGREED TO IN WRITING +WILL ANY COPYRIGHT HOLDER, OR ANY OTHER PARTY WHO MODIFIES AND/OR CONVEYS +THE PROGRAM AS PERMITTED ABOVE, BE LIABLE TO YOU FOR DAMAGES, INCLUDING ANY +GENERAL, SPECIAL, INCIDENTAL OR CONSEQUENTIAL DAMAGES ARISING OUT OF THE +USE OR INABILITY TO USE THE PROGRAM (INCLUDING BUT NOT LIMITED TO LOSS OF +DATA OR DATA BEING RENDERED INACCURATE OR LOSSES SUSTAINED BY YOU OR THIRD +PARTIES OR A FAILURE OF THE PROGRAM TO OPERATE WITH ANY OTHER PROGRAMS), +EVEN IF SUCH HOLDER OR OTHER PARTY HAS BEEN ADVISED OF THE POSSIBILITY OF +SUCH DAMAGES. + + 17. Interpretation of Sections 15 and 16. + + If the disclaimer of warranty and limitation of liability provided +above cannot be given local legal effect according to their terms, +reviewing courts shall apply local law that most closely approximates +an absolute waiver of all civil liability in connection with the +Program, unless a warranty or assumption of liability accompanies a +copy of the Program in return for a fee. + + END OF TERMS AND CONDITIONS + + How to Apply These Terms to Your New Programs + + If you develop a new program, and you want it to be of the greatest +possible use to the public, the best way to achieve this is to make it +free software which everyone can redistribute and change under these terms. + + To do so, attach the following notices to the program. It is safest +to attach them to the start of each source file to most effectively +state the exclusion of warranty; and each file should have at least +the "copyright" line and a pointer to where the full notice is found. + + + Copyright (C) + + This program is free software: you can redistribute it and/or modify + it under the terms of the GNU Affero General Public License as published + by the Free Software Foundation, either version 3 of the License, or + (at your option) any later version. + + This program is distributed in the hope that it will be useful, + but WITHOUT ANY WARRANTY; without even the implied warranty of + MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + GNU Affero General Public License for more details. + + You should have received a copy of the GNU Affero General Public License + along with this program. If not, see . + +Also add information on how to contact you by electronic and paper mail. + + If your software can interact with users remotely through a computer +network, you should also make sure that it provides a way for users to +get its source. For example, if your program is a web application, its +interface could display a "Source" link that leads users to an archive +of the code. There are many ways you could offer source, and different +solutions will be better for different programs; see section 13 for the +specific requirements. + + You should also get your employer (if you work as a programmer) or school, +if any, to sign a "copyright disclaimer" for the program, if necessary. +For more information on this, and how to apply and follow the GNU AGPL, see +. diff --git a/packages/tracker-core/README.md b/packages/tracker-core/README.md new file mode 100644 index 000000000..1575cda49 --- /dev/null +++ b/packages/tracker-core/README.md @@ -0,0 +1,15 @@ +# BitTorrent Core Tracker library + +A library with the core functionality needed to implement a BitTorrent tracker. + +You usually don’t need to use this library directly. Instead, you should use the [Torrust Tracker](https://github.com/torrust/torrust-tracker). If you want to build your own tracker, you can use this library as the core functionality. In that case, you should add the delivery layer (HTTP or UDP) on top of this library. + +> **Disclaimer**: This library is actively under development. We’re currently extracting and refining common types from the[Torrust Tracker](https://github.com/torrust/torrust-tracker) to make them available to the BitTorrent community in Rust. While these types are functional, they are not yet ready for use in production or third-party projects. + +## Documentation + +[Crate documentation](https://docs.rs/bittorrent-tracker-core). + +## License + +The project is licensed under the terms of the [GNU AFFERO GENERAL PUBLIC LICENSE](./LICENSE). diff --git a/migrations/README.md b/packages/tracker-core/migrations/README.md similarity index 100% rename from migrations/README.md rename to packages/tracker-core/migrations/README.md diff --git a/migrations/mysql/20240730183000_torrust_tracker_create_all_tables.sql b/packages/tracker-core/migrations/mysql/20240730183000_torrust_tracker_create_all_tables.sql similarity index 100% rename from migrations/mysql/20240730183000_torrust_tracker_create_all_tables.sql rename to packages/tracker-core/migrations/mysql/20240730183000_torrust_tracker_create_all_tables.sql diff --git a/migrations/mysql/20240730183500_torrust_tracker_keys_valid_until_nullable.sql b/packages/tracker-core/migrations/mysql/20240730183500_torrust_tracker_keys_valid_until_nullable.sql similarity index 100% rename from migrations/mysql/20240730183500_torrust_tracker_keys_valid_until_nullable.sql rename to packages/tracker-core/migrations/mysql/20240730183500_torrust_tracker_keys_valid_until_nullable.sql diff --git a/migrations/sqlite/20240730183000_torrust_tracker_create_all_tables.sql b/packages/tracker-core/migrations/sqlite/20240730183000_torrust_tracker_create_all_tables.sql similarity index 100% rename from migrations/sqlite/20240730183000_torrust_tracker_create_all_tables.sql rename to packages/tracker-core/migrations/sqlite/20240730183000_torrust_tracker_create_all_tables.sql diff --git a/migrations/sqlite/20240730183500_torrust_tracker_keys_valid_until_nullable.sql b/packages/tracker-core/migrations/sqlite/20240730183500_torrust_tracker_keys_valid_until_nullable.sql similarity index 100% rename from migrations/sqlite/20240730183500_torrust_tracker_keys_valid_until_nullable.sql rename to packages/tracker-core/migrations/sqlite/20240730183500_torrust_tracker_keys_valid_until_nullable.sql diff --git a/src/core/announce_handler.rs b/packages/tracker-core/src/announce_handler.rs similarity index 92% rename from src/core/announce_handler.rs rename to packages/tracker-core/src/announce_handler.rs index 816663bf6..877555d1c 100644 --- a/src/core/announce_handler.rs +++ b/packages/tracker-core/src/announce_handler.rs @@ -176,9 +176,9 @@ mod tests { use torrust_tracker_primitives::DurationSinceUnixEpoch; use torrust_tracker_test_helpers::configuration; - use crate::core::announce_handler::AnnounceHandler; - use crate::core::core_tests::initialize_handlers; - use crate::core::scrape_handler::ScrapeHandler; + use crate::announce_handler::AnnounceHandler; + use crate::core_tests::initialize_handlers; + use crate::scrape_handler::ScrapeHandler; fn public_tracker() -> (Arc, Arc) { let config = configuration::ephemeral_public(); @@ -222,17 +222,17 @@ mod tests { use std::sync::Arc; - use crate::core::announce_handler::tests::the_announce_handler::{ + use crate::announce_handler::tests::the_announce_handler::{ peer_ip, public_tracker, sample_peer_1, sample_peer_2, }; - use crate::core::announce_handler::PeersWanted; - use crate::core::core_tests::{sample_info_hash, sample_peer}; + use crate::announce_handler::PeersWanted; + use crate::core_tests::{sample_info_hash, sample_peer}; mod should_assign_the_ip_to_the_peer { use std::net::{IpAddr, Ipv4Addr}; - use crate::core::announce_handler::assign_ip_address_to_peer; + use crate::announce_handler::assign_ip_address_to_peer; #[test] fn using_the_source_ip_instead_of_the_ip_in_the_announce_request() { @@ -248,7 +248,7 @@ mod tests { use std::net::{IpAddr, Ipv4Addr, Ipv6Addr}; use std::str::FromStr; - use crate::core::announce_handler::assign_ip_address_to_peer; + use crate::announce_handler::assign_ip_address_to_peer; #[test] fn it_should_use_the_loopback_ip_if_the_tracker_does_not_have_the_external_ip_configuration() { @@ -289,7 +289,7 @@ mod tests { use std::net::{IpAddr, Ipv4Addr, Ipv6Addr}; use std::str::FromStr; - use crate::core::announce_handler::assign_ip_address_to_peer; + use crate::announce_handler::assign_ip_address_to_peer; #[test] fn it_should_use_the_loopback_ip_if_the_tracker_does_not_have_the_external_ip_configuration() { @@ -357,9 +357,9 @@ mod tests { mod it_should_update_the_swarm_stats_for_the_torrent { - use crate::core::announce_handler::tests::the_announce_handler::{peer_ip, public_tracker}; - use crate::core::announce_handler::PeersWanted; - use crate::core::core_tests::{completed_peer, leecher, sample_info_hash, seeder, started_peer}; + use crate::announce_handler::tests::the_announce_handler::{peer_ip, public_tracker}; + use crate::announce_handler::PeersWanted; + use crate::core_tests::{completed_peer, leecher, sample_info_hash, seeder, started_peer}; #[tokio::test] async fn when_the_peer_is_a_seeder() { @@ -411,13 +411,13 @@ mod tests { use torrust_tracker_test_helpers::configuration; use torrust_tracker_torrent_repository::entry::EntrySync; - use crate::core::announce_handler::tests::the_announce_handler::peer_ip; - use crate::core::announce_handler::{AnnounceHandler, PeersWanted}; - use crate::core::core_tests::{sample_info_hash, sample_peer}; - use crate::core::databases::setup::initialize_database; - use crate::core::torrent::manager::TorrentsManager; - use crate::core::torrent::repository::in_memory::InMemoryTorrentRepository; - use crate::core::torrent::repository::persisted::DatabasePersistentTorrentRepository; + use crate::announce_handler::tests::the_announce_handler::peer_ip; + use crate::announce_handler::{AnnounceHandler, PeersWanted}; + use crate::core_tests::{sample_info_hash, sample_peer}; + use crate::databases::setup::initialize_database; + use crate::torrent::manager::TorrentsManager; + use crate::torrent::repository::in_memory::InMemoryTorrentRepository; + use crate::torrent::repository::persisted::DatabasePersistentTorrentRepository; #[tokio::test] async fn it_should_persist_the_number_of_completed_peers_for_all_torrents_into_the_database() { diff --git a/src/core/authentication/handler.rs b/packages/tracker-core/src/authentication/handler.rs similarity index 91% rename from src/core/authentication/handler.rs rename to packages/tracker-core/src/authentication/handler.rs index d6477a948..1d74c7dfa 100644 --- a/src/core/authentication/handler.rs +++ b/packages/tracker-core/src/authentication/handler.rs @@ -8,8 +8,8 @@ use torrust_tracker_primitives::DurationSinceUnixEpoch; use super::key::repository::in_memory::InMemoryKeyRepository; use super::key::repository::persisted::DatabaseKeyRepository; use super::{key, CurrentClock, Key, PeerKey}; -use crate::core::databases; -use crate::core::error::PeerKeyError; +use crate::databases; +use crate::error::PeerKeyError; /// This type contains the info needed to add a new tracker key. /// @@ -243,10 +243,10 @@ mod tests { use torrust_tracker_configuration::Configuration; use torrust_tracker_test_helpers::configuration; - use crate::core::authentication::handler::KeysHandler; - use crate::core::authentication::key::repository::in_memory::InMemoryKeyRepository; - use crate::core::authentication::key::repository::persisted::DatabaseKeyRepository; - use crate::core::databases::setup::initialize_database; + use crate::authentication::handler::KeysHandler; + use crate::authentication::key::repository::in_memory::InMemoryKeyRepository; + use crate::authentication::key::repository::persisted::DatabaseKeyRepository; + use crate::databases::setup::initialize_database; fn instantiate_keys_handler() -> KeysHandler { let config = configuration::ephemeral_private(); @@ -280,7 +280,7 @@ mod tests { use torrust_tracker_clock::clock::Time; - use crate::core::authentication::handler::tests::the_keys_handler_when_tracker_is_configured_as_private::instantiate_keys_handler; + use crate::authentication::handler::tests::the_keys_handler_when_tracker_is_configured_as_private::instantiate_keys_handler; use crate::CurrentClock; #[tokio::test] @@ -301,9 +301,9 @@ mod tests { use torrust_tracker_clock::clock::Time; - use crate::core::authentication::handler::tests::the_keys_handler_when_tracker_is_configured_as_private::instantiate_keys_handler; - use crate::core::authentication::handler::AddKeyRequest; - use crate::core::authentication::Key; + use crate::authentication::handler::tests::the_keys_handler_when_tracker_is_configured_as_private::instantiate_keys_handler; + use crate::authentication::handler::AddKeyRequest; + use crate::authentication::Key; use crate::CurrentClock; #[tokio::test] @@ -329,7 +329,7 @@ mod tests { mod with_permanent_and { mod randomly_generated_keys { - use crate::core::authentication::handler::tests::the_keys_handler_when_tracker_is_configured_as_private::instantiate_keys_handler; + use crate::authentication::handler::tests::the_keys_handler_when_tracker_is_configured_as_private::instantiate_keys_handler; #[tokio::test] async fn it_should_generate_the_key() { @@ -343,9 +343,9 @@ mod tests { mod pre_generated_keys { - use crate::core::authentication::handler::tests::the_keys_handler_when_tracker_is_configured_as_private::instantiate_keys_handler; - use crate::core::authentication::handler::AddKeyRequest; - use crate::core::authentication::Key; + use crate::authentication::handler::tests::the_keys_handler_when_tracker_is_configured_as_private::instantiate_keys_handler; + use crate::authentication::handler::AddKeyRequest; + use crate::authentication::Key; #[tokio::test] async fn it_should_add_a_pre_generated_key() { diff --git a/src/core/authentication/key/mod.rs b/packages/tracker-core/src/authentication/key/mod.rs similarity index 95% rename from src/core/authentication/key/mod.rs rename to packages/tracker-core/src/authentication/key/mod.rs index 49d559e42..37fc4764b 100644 --- a/src/core/authentication/key/mod.rs +++ b/packages/tracker-core/src/authentication/key/mod.rs @@ -12,7 +12,7 @@ //! Keys are stored in this struct: //! //! ```rust,no_run -//! use torrust_tracker_lib::core::authentication::Key; +//! use bittorrent_tracker_core::authentication::Key; //! use torrust_tracker_primitives::DurationSinceUnixEpoch; //! //! pub struct PeerKey { @@ -28,7 +28,7 @@ //! You can generate a new key valid for `9999` seconds and `0` nanoseconds from the current time with the following: //! //! ```rust,no_run -//! use torrust_tracker_lib::core::authentication; +//! use bittorrent_tracker_core::authentication; //! use std::time::Duration; //! //! let expiring_key = authentication::key::generate_key(Some(Duration::new(9999, 0))); @@ -54,9 +54,14 @@ use torrust_tracker_clock::conv::convert_from_timestamp_to_datetime_utc; use torrust_tracker_located_error::{DynError, LocatedError}; use torrust_tracker_primitives::DurationSinceUnixEpoch; -use crate::shared::bit_torrent::common::AUTH_KEY_LENGTH; use crate::CurrentClock; +/// HTTP tracker authentication key length. +/// +/// For more information see function [`generate_key`](crate::authentication::key::generate_key) to generate the +/// [`PeerKey`](crate::authentication::PeerKey). +pub const AUTH_KEY_LENGTH: usize = 32; + /// It generates a new permanent random key [`PeerKey`]. #[must_use] pub fn generate_permanent_key() -> PeerKey { @@ -200,7 +205,7 @@ impl Key { /// Error returned when a key cannot be parsed from a string. /// /// ```text -/// use torrust_tracker_lib::core::authentication::Key; +/// use bittorrent_tracker_core::authentication::Key; /// use std::str::FromStr; /// /// let key_string = "YZSl4lMZupRuOpSRC3krIKR5BPB14nrJ"; @@ -230,7 +235,7 @@ impl FromStr for Key { } /// Verification error. Error returned when an [`PeerKey`] cannot be -/// verified with the (`crate::core::authentication::verify_key`) function. +/// verified with the (`crate::authentication::verify_key`) function. #[derive(Debug, Error)] #[allow(dead_code)] pub enum Error { @@ -261,7 +266,7 @@ mod tests { mod key { use std::str::FromStr; - use crate::core::authentication::Key; + use crate::authentication::Key; #[test] fn should_be_parsed_from_an_string() { @@ -296,7 +301,7 @@ mod tests { use torrust_tracker_clock::clock; use torrust_tracker_clock::clock::stopped::Stopped as _; - use crate::core::authentication; + use crate::authentication; #[test] fn should_be_parsed_from_an_string() { diff --git a/src/core/authentication/key/repository/in_memory.rs b/packages/tracker-core/src/authentication/key/repository/in_memory.rs similarity index 95% rename from src/core/authentication/key/repository/in_memory.rs rename to packages/tracker-core/src/authentication/key/repository/in_memory.rs index a15f9ecfa..41d34604b 100644 --- a/src/core/authentication/key/repository/in_memory.rs +++ b/packages/tracker-core/src/authentication/key/repository/in_memory.rs @@ -1,4 +1,4 @@ -use crate::core::authentication::key::{Key, PeerKey}; +use crate::authentication::key::{Key, PeerKey}; /// In-memory implementation of the authentication key repository. #[derive(Debug, Default)] diff --git a/src/core/authentication/key/repository/mod.rs b/packages/tracker-core/src/authentication/key/repository/mod.rs similarity index 100% rename from src/core/authentication/key/repository/mod.rs rename to packages/tracker-core/src/authentication/key/repository/mod.rs diff --git a/src/core/authentication/key/repository/persisted.rs b/packages/tracker-core/src/authentication/key/repository/persisted.rs similarity index 92% rename from src/core/authentication/key/repository/persisted.rs rename to packages/tracker-core/src/authentication/key/repository/persisted.rs index 736a409eb..322ab2913 100644 --- a/src/core/authentication/key/repository/persisted.rs +++ b/packages/tracker-core/src/authentication/key/repository/persisted.rs @@ -1,7 +1,7 @@ use std::sync::Arc; -use crate::core::authentication::key::{Key, PeerKey}; -use crate::core::databases::{self, Database}; +use crate::authentication::key::{Key, PeerKey}; +use crate::databases::{self, Database}; /// The database repository for the authentication keys. pub struct DatabaseKeyRepository { diff --git a/src/core/authentication/mod.rs b/packages/tracker-core/src/authentication/mod.rs similarity index 86% rename from src/core/authentication/mod.rs rename to packages/tracker-core/src/authentication/mod.rs index eddcc1ae7..9609733da 100644 --- a/src/core/authentication/mod.rs +++ b/packages/tracker-core/src/authentication/mod.rs @@ -22,12 +22,12 @@ mod tests { use torrust_tracker_configuration::Configuration; use torrust_tracker_test_helpers::configuration; - use crate::core::authentication::handler::KeysHandler; - use crate::core::authentication::key::repository::in_memory::InMemoryKeyRepository; - use crate::core::authentication::key::repository::persisted::DatabaseKeyRepository; - use crate::core::authentication::service; - use crate::core::authentication::service::AuthenticationService; - use crate::core::databases::setup::initialize_database; + use crate::authentication::handler::KeysHandler; + use crate::authentication::key::repository::in_memory::InMemoryKeyRepository; + use crate::authentication::key::repository::persisted::DatabaseKeyRepository; + use crate::authentication::service; + use crate::authentication::service::AuthenticationService; + use crate::databases::setup::initialize_database; fn instantiate_keys_manager_and_authentication() -> (Arc, Arc) { let config = configuration::ephemeral_private(); @@ -97,11 +97,11 @@ mod tests { mod randomly_generated_keys { use std::time::Duration; - use crate::core::authentication::tests::the_tracker_configured_as_private::{ + use crate::authentication::tests::the_tracker_configured_as_private::{ instantiate_keys_manager_and_authentication, instantiate_keys_manager_and_authentication_with_checking_keys_expiration_disabled, }; - use crate::core::authentication::Key; + use crate::authentication::Key; #[tokio::test] async fn it_should_authenticate_a_peer_with_the_key() { @@ -132,12 +132,12 @@ mod tests { mod pre_generated_keys { - use crate::core::authentication::handler::AddKeyRequest; - use crate::core::authentication::tests::the_tracker_configured_as_private::{ + use crate::authentication::handler::AddKeyRequest; + use crate::authentication::tests::the_tracker_configured_as_private::{ instantiate_keys_manager_and_authentication, instantiate_keys_manager_and_authentication_with_checking_keys_expiration_disabled, }; - use crate::core::authentication::Key; + use crate::authentication::Key; #[tokio::test] async fn it_should_authenticate_a_peer_with_the_key() { @@ -177,7 +177,7 @@ mod tests { mod with_permanent_and { mod randomly_generated_keys { - use crate::core::authentication::tests::the_tracker_configured_as_private::instantiate_keys_manager_and_authentication; + use crate::authentication::tests::the_tracker_configured_as_private::instantiate_keys_manager_and_authentication; #[tokio::test] async fn it_should_authenticate_a_peer_with_the_key() { @@ -192,9 +192,9 @@ mod tests { } mod pre_generated_keys { - use crate::core::authentication::handler::AddKeyRequest; - use crate::core::authentication::tests::the_tracker_configured_as_private::instantiate_keys_manager_and_authentication; - use crate::core::authentication::Key; + use crate::authentication::handler::AddKeyRequest; + use crate::authentication::tests::the_tracker_configured_as_private::instantiate_keys_manager_and_authentication; + use crate::authentication::Key; #[tokio::test] async fn it_should_authenticate_a_peer_with_the_key() { diff --git a/src/core/authentication/service.rs b/packages/tracker-core/src/authentication/service.rs similarity index 93% rename from src/core/authentication/service.rs rename to packages/tracker-core/src/authentication/service.rs index d100e3a70..3e32bfbcb 100644 --- a/src/core/authentication/service.rs +++ b/packages/tracker-core/src/authentication/service.rs @@ -79,9 +79,9 @@ mod tests { use torrust_tracker_test_helpers::configuration; - use crate::core::authentication; - use crate::core::authentication::key::repository::in_memory::InMemoryKeyRepository; - use crate::core::authentication::service::AuthenticationService; + use crate::authentication; + use crate::authentication::key::repository::in_memory::InMemoryKeyRepository; + use crate::authentication::service::AuthenticationService; fn instantiate_authentication() -> AuthenticationService { let config = configuration::ephemeral_private(); diff --git a/src/core/core_tests.rs b/packages/tracker-core/src/core_tests.rs similarity index 100% rename from src/core/core_tests.rs rename to packages/tracker-core/src/core_tests.rs diff --git a/src/core/databases/driver.rs b/packages/tracker-core/src/databases/driver.rs similarity index 90% rename from src/core/databases/driver.rs rename to packages/tracker-core/src/databases/driver.rs index b5cb797aa..7b532f3f0 100644 --- a/src/core/databases/driver.rs +++ b/packages/tracker-core/src/databases/driver.rs @@ -30,8 +30,8 @@ pub enum Driver { /// Example for `SQLite3`: /// /// ```text -/// use torrust_tracker_lib::core::databases; -/// use torrust_tracker_lib::core::databases::driver::Driver; +/// use bittorrent_tracker_core::databases; +/// use bittorrent_tracker_core::databases::driver::Driver; /// /// let db_driver = Driver::Sqlite3; /// let db_path = "./storage/tracker/lib/database/sqlite3.db".to_string(); @@ -41,8 +41,8 @@ pub enum Driver { /// Example for `MySQL`: /// /// ```text -/// use torrust_tracker_lib::core::databases; -/// use torrust_tracker_lib::core::databases::driver::Driver; +/// use bittorrent_tracker_core::databases; +/// use bittorrent_tracker_core::databases::driver::Driver; /// /// let db_driver = Driver::MySQL; /// let db_path = "mysql://db_user:db_user_secret_password@mysql:3306/torrust_tracker".to_string(); diff --git a/src/core/databases/error.rs b/packages/tracker-core/src/databases/error.rs similarity index 100% rename from src/core/databases/error.rs rename to packages/tracker-core/src/databases/error.rs diff --git a/src/core/databases/mod.rs b/packages/tracker-core/src/databases/mod.rs similarity index 98% rename from src/core/databases/mod.rs rename to packages/tracker-core/src/databases/mod.rs index dec6b799d..9b9ac8e9e 100644 --- a/src/core/databases/mod.rs +++ b/packages/tracker-core/src/databases/mod.rs @@ -55,7 +55,7 @@ use bittorrent_primitives::info_hash::InfoHash; use torrust_tracker_primitives::PersistentTorrents; use self::error::Error; -use crate::core::authentication::{self, Key}; +use crate::authentication::{self, Key}; struct Builder where @@ -200,7 +200,7 @@ pub trait Database: Sync + Send { /// It gets an expiring authentication key from the database. /// - /// It returns `Some(PeerKey)` if a [`PeerKey`](crate::core::authentication::PeerKey) + /// It returns `Some(PeerKey)` if a [`PeerKey`](crate::authentication::PeerKey) /// with the input [`Key`] exists, `None` otherwise. /// /// # Context: Authentication Keys diff --git a/src/core/databases/mysql.rs b/packages/tracker-core/src/databases/mysql.rs similarity index 98% rename from src/core/databases/mysql.rs rename to packages/tracker-core/src/databases/mysql.rs index 213f6300a..fb39b781d 100644 --- a/src/core/databases/mysql.rs +++ b/packages/tracker-core/src/databases/mysql.rs @@ -11,8 +11,8 @@ use torrust_tracker_primitives::PersistentTorrents; use super::driver::Driver; use super::{Database, Error}; -use crate::core::authentication::{self, Key}; -use crate::shared::bit_torrent::common::AUTH_KEY_LENGTH; +use crate::authentication::key::AUTH_KEY_LENGTH; +use crate::authentication::{self, Key}; const DRIVER: Driver = Driver::MySQL; diff --git a/src/core/databases/setup.rs b/packages/tracker-core/src/databases/setup.rs similarity index 100% rename from src/core/databases/setup.rs rename to packages/tracker-core/src/databases/setup.rs diff --git a/src/core/databases/sqlite.rs b/packages/tracker-core/src/databases/sqlite.rs similarity index 99% rename from src/core/databases/sqlite.rs rename to packages/tracker-core/src/databases/sqlite.rs index 6fe9ac599..a7552ec11 100644 --- a/src/core/databases/sqlite.rs +++ b/packages/tracker-core/src/databases/sqlite.rs @@ -11,7 +11,7 @@ use torrust_tracker_primitives::{DurationSinceUnixEpoch, PersistentTorrents}; use super::driver::Driver; use super::{Database, Error}; -use crate::core::authentication::{self, Key}; +use crate::authentication::{self, Key}; const DRIVER: Driver = Driver::Sqlite3; diff --git a/src/core/error.rs b/packages/tracker-core/src/error.rs similarity index 100% rename from src/core/error.rs rename to packages/tracker-core/src/error.rs diff --git a/packages/tracker-core/src/lib.rs b/packages/tracker-core/src/lib.rs new file mode 100644 index 000000000..2fb2d936d --- /dev/null +++ b/packages/tracker-core/src/lib.rs @@ -0,0 +1,585 @@ +//! The core `tracker` module contains the generic `BitTorrent` tracker logic which is independent of the delivery layer. +//! +//! It contains the tracker services and their dependencies. It's a domain layer which does not +//! specify how the end user should connect to the `Tracker`. +//! +//! Typically this module is intended to be used by higher modules like: +//! +//! - A UDP tracker +//! - A HTTP tracker +//! - A tracker REST API +//! +//! ```text +//! Delivery layer Domain layer +//! +//! HTTP tracker | +//! UDP tracker |> Core tracker +//! Tracker REST API | +//! ``` +//! +//! # Table of contents +//! +//! - [Tracker](#tracker) +//! - [Announce request](#announce-request) +//! - [Scrape request](#scrape-request) +//! - [Torrents](#torrents) +//! - [Peers](#peers) +//! - [Configuration](#configuration) +//! - [Services](#services) +//! - [Authentication](#authentication) +//! - [Statistics](#statistics) +//! - [Persistence](#persistence) +//! +//! # Tracker +//! +//! The `Tracker` is the main struct in this module. `The` tracker has some groups of responsibilities: +//! +//! - **Core tracker**: it handles the information about torrents and peers. +//! - **Authentication**: it handles authentication keys which are used by HTTP trackers. +//! - **Authorization**: it handles the permission to perform requests. +//! - **Whitelist**: when the tracker runs in `listed` or `private_listed` mode all operations are restricted to whitelisted torrents. +//! - **Statistics**: it keeps and serves the tracker statistics. +//! +//! Refer to [torrust-tracker-configuration](https://docs.rs/torrust-tracker-configuration) crate docs to get more information about the tracker settings. +//! +//! ## Announce request +//! +//! Handling `announce` requests is the most important task for a `BitTorrent` tracker. +//! +//! A `BitTorrent` swarm is a network of peers that are all trying to download the same torrent. +//! When a peer wants to find other peers it announces itself to the swarm via the tracker. +//! The peer sends its data to the tracker so that the tracker can add it to the swarm. +//! The tracker responds to the peer with the list of other peers in the swarm so that +//! the peer can contact them to start downloading pieces of the file from them. +//! +//! Once you have instantiated the `AnnounceHandler` you can `announce` a new [`peer::Peer`](torrust_tracker_primitives::peer::Peer) with: +//! +//! ```rust,no_run +//! use std::net::SocketAddr; +//! use std::net::IpAddr; +//! use std::net::Ipv4Addr; +//! use std::str::FromStr; +//! +//! use aquatic_udp_protocol::{AnnounceEvent, NumberOfBytes, PeerId}; +//! use torrust_tracker_primitives::DurationSinceUnixEpoch; +//! use torrust_tracker_primitives::peer; +//! use bittorrent_primitives::info_hash::InfoHash; +//! +//! let info_hash = InfoHash::from_str("3b245504cf5f11bbdbe1201cea6a6bf45aee1bc0").unwrap(); +//! +//! let peer = peer::Peer { +//! peer_id: PeerId(*b"-qB00000000000000001"), +//! peer_addr: SocketAddr::new(IpAddr::V4(Ipv4Addr::new(126, 0, 0, 1)), 8081), +//! updated: DurationSinceUnixEpoch::new(1_669_397_478_934, 0), +//! uploaded: NumberOfBytes::new(0), +//! downloaded: NumberOfBytes::new(0), +//! left: NumberOfBytes::new(0), +//! event: AnnounceEvent::Completed, +//! }; +//! +//! let peer_ip = IpAddr::V4(Ipv4Addr::from_str("126.0.0.1").unwrap()); +//! ``` +//! +//! ```text +//! let announce_data = announce_handler.announce(&info_hash, &mut peer, &peer_ip).await; +//! ``` +//! +//! The `Tracker` returns the list of peers for the torrent with the infohash `3b245504cf5f11bbdbe1201cea6a6bf45aee1bc0`, +//! filtering out the peer that is making the `announce` request. +//! +//! > **NOTICE**: that the peer argument is mutable because the `Tracker` can change the peer IP if the peer is using a loopback IP. +//! +//! The `peer_ip` argument is the resolved peer ip. It's a common practice that trackers ignore the peer ip in the `announce` request params, +//! and resolve the peer ip using the IP of the client making the request. As the tracker is a domain service, the peer IP must be provided +//! for the `Tracker` user, which is usually a higher component with access the the request metadata, for example, connection data, proxy headers, +//! etcetera. +//! +//! The returned struct is: +//! +//! ```rust,no_run +//! use torrust_tracker_primitives::peer; +//! use torrust_tracker_configuration::AnnouncePolicy; +//! +//! pub struct AnnounceData { +//! pub peers: Vec, +//! pub swarm_stats: SwarmMetadata, +//! pub policy: AnnouncePolicy, // the tracker announce policy. +//! } +//! +//! pub struct SwarmMetadata { +//! pub completed: u32, // The number of peers that have ever completed downloading +//! pub seeders: u32, // The number of active peers that have completed downloading (seeders) +//! pub leechers: u32, // The number of active peers that have not completed downloading (leechers) +//! } +//! +//! // Core tracker configuration +//! pub struct AnnounceInterval { +//! // ... +//! pub interval: u32, // Interval in seconds that the client should wait between sending regular announce requests to the tracker +//! pub interval_min: u32, // Minimum announce interval. Clients must not reannounce more frequently than this +//! // ... +//! } +//! ``` +//! +//! Refer to `BitTorrent` BEPs and other sites for more information about the `announce` request: +//! +//! - [BEP 3. The `BitTorrent` Protocol Specification](https://www.bittorrent.org/beps/bep_0003.html) +//! - [BEP 23. Tracker Returns Compact Peer Lists](https://www.bittorrent.org/beps/bep_0023.html) +//! - [Vuze docs](https://wiki.vuze.com/w/Announce) +//! +//! ## Scrape request +//! +//! The `scrape` request allows clients to query metadata about the swarm in bulk. +//! +//! An `scrape` request includes a list of infohashes whose swarm metadata you want to collect. +//! +//! The returned struct is: +//! +//! ```rust,no_run +//! use bittorrent_primitives::info_hash::InfoHash; +//! use std::collections::HashMap; +//! +//! pub struct ScrapeData { +//! pub files: HashMap, +//! } +//! +//! pub struct SwarmMetadata { +//! pub complete: u32, // The number of active peers that have completed downloading (seeders) +//! pub downloaded: u32, // The number of peers that have ever completed downloading +//! pub incomplete: u32, // The number of active peers that have not completed downloading (leechers) +//! } +//! ``` +//! +//! The JSON representation of a sample `scrape` response would be like the following: +//! +//! ```json +//! { +//! 'files': { +//! 'xxxxxxxxxxxxxxxxxxxx': {'complete': 11, 'downloaded': 13772, 'incomplete': 19}, +//! 'yyyyyyyyyyyyyyyyyyyy': {'complete': 21, 'downloaded': 206, 'incomplete': 20} +//! } +//! } +//! ``` +//! +//! `xxxxxxxxxxxxxxxxxxxx` and `yyyyyyyyyyyyyyyyyyyy` are 20-byte infohash arrays. +//! There are two data structures for infohashes: byte arrays and hex strings: +//! +//! ```rust,no_run +//! use bittorrent_primitives::info_hash::InfoHash; +//! use std::str::FromStr; +//! +//! let info_hash: InfoHash = [255u8; 20].into(); +//! +//! assert_eq!( +//! info_hash, +//! InfoHash::from_str("FFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFF").unwrap() +//! ); +//! ``` +//! Refer to `BitTorrent` BEPs and other sites for more information about the `scrape` request: +//! +//! - [BEP 48. Tracker Protocol Extension: Scrape](https://www.bittorrent.org/beps/bep_0048.html) +//! - [BEP 15. UDP Tracker Protocol for `BitTorrent`. Scrape section](https://www.bittorrent.org/beps/bep_0015.html) +//! - [Vuze docs](https://wiki.vuze.com/w/Scrape) +//! +//! ## Torrents +//! +//! The [`torrent`] module contains all the data structures stored by the `Tracker` except for peers. +//! +//! We can represent the data stored in memory internally by the `Tracker` with this JSON object: +//! +//! ```json +//! { +//! "c1277613db1d28709b034a017ab2cae4be07ae10": { +//! "completed": 0, +//! "peers": { +//! "-qB00000000000000001": { +//! "peer_id": "-qB00000000000000001", +//! "peer_addr": "2.137.87.41:1754", +//! "updated": 1672419840, +//! "uploaded": 120, +//! "downloaded": 60, +//! "left": 60, +//! "event": "started" +//! }, +//! "-qB00000000000000002": { +//! "peer_id": "-qB00000000000000002", +//! "peer_addr": "23.17.287.141:2345", +//! "updated": 1679415984, +//! "uploaded": 80, +//! "downloaded": 20, +//! "left": 40, +//! "event": "started" +//! } +//! } +//! } +//! } +//! ``` +//! +//! The `Tracker` maintains an indexed-by-info-hash list of torrents. For each torrent, it stores a torrent `Entry`. +//! The torrent entry has two attributes: +//! +//! - `completed`: which is hte number of peers that have completed downloading the torrent file/s. As they have completed downloading, +//! they have a full version of the torrent data, and they can provide the full data to other peers. That's why they are also known as "seeders". +//! - `peers`: an indexed and orderer list of peer for the torrent. Each peer contains the data received from the peer in the `announce` request. +//! +//! The [`torrent`] module not only contains the original data obtained from peer via `announce` requests, it also contains +//! aggregate data that can be derived from the original data. For example: +//! +//! ```rust,no_run +//! pub struct SwarmMetadata { +//! pub complete: u32, // The number of active peers that have completed downloading (seeders) +//! pub downloaded: u32, // The number of peers that have ever completed downloading +//! pub incomplete: u32, // The number of active peers that have not completed downloading (leechers) +//! } +//! +//! ``` +//! +//! > **NOTICE**: that `complete` or `completed` peers are the peers that have completed downloading, but only the active ones are considered "seeders". +//! +//! `SwarmMetadata` struct follows name conventions for `scrape` responses. See [BEP 48](https://www.bittorrent.org/beps/bep_0048.html), while `SwarmMetadata` +//! is used for the rest of cases. +//! +//! Refer to [`torrent`] module for more details about these data structures. +//! +//! ## Peers +//! +//! A `Peer` is the struct used by the `Tracker` to keep peers data: +//! +//! ```rust,no_run +//! use std::net::SocketAddr; + +//! use aquatic_udp_protocol::PeerId; +//! use torrust_tracker_primitives::DurationSinceUnixEpoch; +//! use aquatic_udp_protocol::NumberOfBytes; +//! use aquatic_udp_protocol::AnnounceEvent; +//! +//! pub struct Peer { +//! pub peer_id: PeerId, // The peer ID +//! pub peer_addr: SocketAddr, // Peer socket address +//! pub updated: DurationSinceUnixEpoch, // Last time (timestamp) when the peer was updated +//! pub uploaded: NumberOfBytes, // Number of bytes the peer has uploaded so far +//! pub downloaded: NumberOfBytes, // Number of bytes the peer has downloaded so far +//! pub left: NumberOfBytes, // The number of bytes this peer still has to download +//! pub event: AnnounceEvent, // The event the peer has announced: `started`, `completed`, `stopped` +//! } +//! ``` +//! +//! Notice that most of the attributes are obtained from the `announce` request. +//! For example, an HTTP announce request would contain the following `GET` parameters: +//! +//! +//! +//! The `Tracker` keeps an in-memory ordered data structure with all the torrents and a list of peers for each torrent, together with some swarm metrics. +//! +//! We can represent the data stored in memory with this JSON object: +//! +//! ```json +//! { +//! "c1277613db1d28709b034a017ab2cae4be07ae10": { +//! "completed": 0, +//! "peers": { +//! "-qB00000000000000001": { +//! "peer_id": "-qB00000000000000001", +//! "peer_addr": "2.137.87.41:1754", +//! "updated": 1672419840, +//! "uploaded": 120, +//! "downloaded": 60, +//! "left": 60, +//! "event": "started" +//! }, +//! "-qB00000000000000002": { +//! "peer_id": "-qB00000000000000002", +//! "peer_addr": "23.17.287.141:2345", +//! "updated": 1679415984, +//! "uploaded": 80, +//! "downloaded": 20, +//! "left": 40, +//! "event": "started" +//! } +//! } +//! } +//! } +//! ``` +//! +//! That JSON object does not exist, it's only a representation of the `Tracker` torrents data. +//! +//! `c1277613db1d28709b034a017ab2cae4be07ae10` is the torrent infohash and `completed` contains the number of peers +//! that have a full version of the torrent data, also known as seeders. +//! +//! Refer to [`peer`](torrust_tracker_primitives::peer) for more information about peers. +//! +//! # Configuration +//! +//! You can control the behavior of this module with the module settings: +//! +//! ```toml +//! [logging] +//! threshold = "debug" +//! +//! [core] +//! inactive_peer_cleanup_interval = 600 +//! listed = false +//! private = false +//! tracker_usage_statistics = true +//! +//! [core.announce_policy] +//! interval = 120 +//! interval_min = 120 +//! +//! [core.database] +//! driver = "sqlite3" +//! path = "./storage/tracker/lib/database/sqlite3.db" +//! +//! [core.net] +//! on_reverse_proxy = false +//! external_ip = "2.137.87.41" +//! +//! [core.tracker_policy] +//! max_peer_timeout = 900 +//! persistent_torrent_completed_stat = false +//! remove_peerless_torrents = true +//! ``` +//! +//! Refer to the [`configuration` module documentation](https://docs.rs/torrust-tracker-configuration) to get more information about all options. +//! +//! # Services +//! +//! Services are domain services on top of the core tracker domain. Right now there are two types of service: +//! +//! - For statistics: [`crate::core::statistics::services`] +//! - For torrents: [`crate::core::torrent::services`] +//! +//! Services usually format the data inside the tracker to make it easier to consume by other parts. +//! They also decouple the internal data structure, used by the tracker, from the way we deliver that data to the consumers. +//! The internal data structure is designed for performance or low memory consumption. And it should be changed +//! without affecting the external consumers. +//! +//! Services can include extra features like pagination, for example. +//! +//! # Authentication +//! +//! One of the core `Tracker` responsibilities is to create and keep authentication keys. Auth keys are used by HTTP trackers +//! when the tracker is running in `private` or `private_listed` mode. +//! +//! HTTP tracker's clients need to obtain an auth key before starting requesting the tracker. Once the get one they have to include +//! a `PATH` param with the key in all the HTTP requests. For example, when a peer wants to `announce` itself it has to use the +//! HTTP tracker endpoint `GET /announce/:key`. +//! +//! The common way to obtain the keys is by using the tracker API directly or via other applications like the [Torrust Index](https://github.com/torrust/torrust-index). +//! +//! To learn more about tracker authentication, refer to the following modules : +//! +//! - [`authentication`] module. +//! - [`core`](crate::core) module. +//! - [`http`](crate::servers::http) module. +//! +//! # Statistics +//! +//! The `Tracker` keeps metrics for some events: +//! +//! ```rust,no_run +//! pub struct Metrics { +//! // IP version 4 +//! +//! // HTTP tracker +//! pub tcp4_connections_handled: u64, +//! pub tcp4_announces_handled: u64, +//! pub tcp4_scrapes_handled: u64, +//! +//! // UDP tracker +//! pub udp4_connections_handled: u64, +//! pub udp4_announces_handled: u64, +//! pub udp4_scrapes_handled: u64, +//! +//! // IP version 6 +//! +//! // HTTP tracker +//! pub tcp6_connections_handled: u64, +//! pub tcp6_announces_handled: u64, +//! pub tcp6_scrapes_handled: u64, +//! +//! // UDP tracker +//! pub udp6_connections_handled: u64, +//! pub udp6_announces_handled: u64, +//! pub udp6_scrapes_handled: u64, +//! } +//! ``` +//! +//! The metrics maintained by the `Tracker` are: +//! +//! - `connections_handled`: number of connections handled by the tracker +//! - `announces_handled`: number of `announce` requests handled by the tracker +//! - `scrapes_handled`: number of `scrape` handled requests by the tracker +//! +//! > **NOTICE**: as the HTTP tracker does not have an specific `connection` request like the UDP tracker, `connections_handled` are +//! > increased on every `announce` and `scrape` requests. +//! +//! The tracker exposes an event sender API that allows the tracker users to send events. When a higher application service handles a +//! `connection` , `announce` or `scrape` requests, it notifies the `Tracker` by sending statistics events. +//! +//! For example, the HTTP tracker would send an event like the following when it handles an `announce` request received from a peer using IP version 4. +//! +//! ```text +//! stats_event_sender.send_stats_event(statistics::event::Event::Tcp4Announce).await +//! ``` +//! +//! Refer to [`statistics`] module for more information about statistics. +//! +//! # Persistence +//! +//! Right now the `Tracker` is responsible for storing and load data into and +//! from the database, when persistence is enabled. +//! +//! There are three types of persistent object: +//! +//! - Authentication keys (only expiring keys) +//! - Torrent whitelist +//! - Torrent metrics +//! +//! Refer to [`databases`] module for more information about persistence. +pub mod announce_handler; +pub mod authentication; +pub mod databases; +pub mod error; +pub mod scrape_handler; +pub mod statistics; +pub mod torrent; +pub mod whitelist; + +pub mod core_tests; +pub mod peer_tests; + +use torrust_tracker_clock::clock; +/// This code needs to be copied into each crate. +/// Working version, for production. +#[cfg(not(test))] +#[allow(dead_code)] +pub(crate) type CurrentClock = clock::Working; + +/// Stopped version, for testing. +#[cfg(test)] +#[allow(dead_code)] +pub(crate) type CurrentClock = clock::Stopped; + +#[cfg(test)] +mod tests { + mod the_tracker { + use std::net::{IpAddr, Ipv4Addr}; + use std::str::FromStr; + use std::sync::Arc; + + use torrust_tracker_test_helpers::configuration; + + use crate::announce_handler::AnnounceHandler; + use crate::core_tests::initialize_handlers; + use crate::scrape_handler::ScrapeHandler; + + fn initialize_handlers_for_public_tracker() -> (Arc, Arc) { + let config = configuration::ephemeral_public(); + initialize_handlers(&config) + } + + fn initialize_handlers_for_listed_tracker() -> (Arc, Arc) { + let config = configuration::ephemeral_listed(); + initialize_handlers(&config) + } + + // The client peer IP + fn peer_ip() -> IpAddr { + IpAddr::V4(Ipv4Addr::from_str("126.0.0.1").unwrap()) + } + + mod for_all_config_modes { + + mod handling_a_scrape_request { + + use std::net::{IpAddr, Ipv4Addr}; + + use bittorrent_primitives::info_hash::InfoHash; + use torrust_tracker_primitives::core::ScrapeData; + use torrust_tracker_primitives::swarm_metadata::SwarmMetadata; + + use crate::announce_handler::PeersWanted; + use crate::core_tests::{complete_peer, incomplete_peer}; + use crate::tests::the_tracker::initialize_handlers_for_public_tracker; + + #[tokio::test] + async fn it_should_return_the_swarm_metadata_for_the_requested_file_if_the_tracker_has_that_torrent() { + let (announce_handler, scrape_handler) = initialize_handlers_for_public_tracker(); + + let info_hash = "3b245504cf5f11bbdbe1201cea6a6bf45aee1bc0".parse::().unwrap(); // DevSkim: ignore DS173237 + + // Announce a "complete" peer for the torrent + let mut complete_peer = complete_peer(); + announce_handler.announce( + &info_hash, + &mut complete_peer, + &IpAddr::V4(Ipv4Addr::new(126, 0, 0, 10)), + &PeersWanted::All, + ); + + // Announce an "incomplete" peer for the torrent + let mut incomplete_peer = incomplete_peer(); + announce_handler.announce( + &info_hash, + &mut incomplete_peer, + &IpAddr::V4(Ipv4Addr::new(126, 0, 0, 11)), + &PeersWanted::All, + ); + + // Scrape + let scrape_data = scrape_handler.scrape(&vec![info_hash]).await; + + // The expected swarm metadata for the file + let mut expected_scrape_data = ScrapeData::empty(); + expected_scrape_data.add_file( + &info_hash, + SwarmMetadata { + complete: 0, // the "complete" peer does not count because it was not previously known + downloaded: 0, + incomplete: 1, // the "incomplete" peer we have just announced + }, + ); + + assert_eq!(scrape_data, expected_scrape_data); + } + } + } + + mod configured_as_whitelisted { + + mod handling_a_scrape_request { + + use bittorrent_primitives::info_hash::InfoHash; + use torrust_tracker_primitives::core::ScrapeData; + use torrust_tracker_primitives::swarm_metadata::SwarmMetadata; + + use crate::announce_handler::PeersWanted; + use crate::core_tests::{complete_peer, incomplete_peer}; + use crate::tests::the_tracker::{initialize_handlers_for_listed_tracker, peer_ip}; + + #[tokio::test] + async fn it_should_return_the_zeroed_swarm_metadata_for_the_requested_file_if_it_is_not_whitelisted() { + let (announce_handler, scrape_handler) = initialize_handlers_for_listed_tracker(); + + let info_hash = "3b245504cf5f11bbdbe1201cea6a6bf45aee1bc0".parse::().unwrap(); // DevSkim: ignore DS173237 + + let mut peer = incomplete_peer(); + announce_handler.announce(&info_hash, &mut peer, &peer_ip(), &PeersWanted::All); + + // Announce twice to force non zeroed swarm metadata + let mut peer = complete_peer(); + announce_handler.announce(&info_hash, &mut peer, &peer_ip(), &PeersWanted::All); + + let scrape_data = scrape_handler.scrape(&vec![info_hash]).await; + + // The expected zeroed swarm metadata for the file + let mut expected_scrape_data = ScrapeData::empty(); + expected_scrape_data.add_file(&info_hash, SwarmMetadata::zeroed()); + + assert_eq!(scrape_data, expected_scrape_data); + } + } + } + } +} diff --git a/src/core/peer_tests.rs b/packages/tracker-core/src/peer_tests.rs similarity index 100% rename from src/core/peer_tests.rs rename to packages/tracker-core/src/peer_tests.rs diff --git a/src/core/scrape_handler.rs b/packages/tracker-core/src/scrape_handler.rs similarity index 95% rename from src/core/scrape_handler.rs rename to packages/tracker-core/src/scrape_handler.rs index 33bb6ca6a..60d15de71 100644 --- a/src/core/scrape_handler.rs +++ b/packages/tracker-core/src/scrape_handler.rs @@ -54,9 +54,9 @@ mod tests { use torrust_tracker_test_helpers::configuration; use super::ScrapeHandler; - use crate::core::torrent::repository::in_memory::InMemoryTorrentRepository; - use crate::core::whitelist::repository::in_memory::InMemoryWhitelist; - use crate::core::whitelist::{self}; + use crate::torrent::repository::in_memory::InMemoryTorrentRepository; + use crate::whitelist::repository::in_memory::InMemoryWhitelist; + use crate::whitelist::{self}; fn scrape_handler() -> ScrapeHandler { let config = configuration::ephemeral_public(); diff --git a/src/core/statistics/event/handler.rs b/packages/tracker-core/src/statistics/event/handler.rs similarity index 96% rename from src/core/statistics/event/handler.rs rename to packages/tracker-core/src/statistics/event/handler.rs index 3c435145a..93ac05dde 100644 --- a/src/core/statistics/event/handler.rs +++ b/packages/tracker-core/src/statistics/event/handler.rs @@ -1,5 +1,5 @@ -use crate::core::statistics::event::{Event, UdpResponseKind}; -use crate::core::statistics::repository::Repository; +use crate::statistics::event::{Event, UdpResponseKind}; +use crate::statistics::repository::Repository; pub async fn handle_event(event: Event, stats_repository: &Repository) { match event { @@ -102,9 +102,9 @@ pub async fn handle_event(event: Event, stats_repository: &Repository) { #[cfg(test)] mod tests { - use crate::core::statistics::event::handler::handle_event; - use crate::core::statistics::event::Event; - use crate::core::statistics::repository::Repository; + use crate::statistics::event::handler::handle_event; + use crate::statistics::event::Event; + use crate::statistics::repository::Repository; #[tokio::test] async fn should_increase_the_tcp4_announces_counter_when_it_receives_a_tcp4_announce_event() { diff --git a/src/core/statistics/event/listener.rs b/packages/tracker-core/src/statistics/event/listener.rs similarity index 84% rename from src/core/statistics/event/listener.rs rename to packages/tracker-core/src/statistics/event/listener.rs index 89ed7b41a..f1a2e25de 100644 --- a/src/core/statistics/event/listener.rs +++ b/packages/tracker-core/src/statistics/event/listener.rs @@ -2,7 +2,7 @@ use tokio::sync::mpsc; use super::handler::handle_event; use super::Event; -use crate::core::statistics::repository::Repository; +use crate::statistics::repository::Repository; pub async fn dispatch_events(mut receiver: mpsc::Receiver, stats_repository: Repository) { while let Some(event) = receiver.recv().await { diff --git a/src/core/statistics/event/mod.rs b/packages/tracker-core/src/statistics/event/mod.rs similarity index 100% rename from src/core/statistics/event/mod.rs rename to packages/tracker-core/src/statistics/event/mod.rs diff --git a/src/core/statistics/event/sender.rs b/packages/tracker-core/src/statistics/event/sender.rs similarity index 100% rename from src/core/statistics/event/sender.rs rename to packages/tracker-core/src/statistics/event/sender.rs diff --git a/src/core/statistics/keeper.rs b/packages/tracker-core/src/statistics/keeper.rs similarity index 93% rename from src/core/statistics/keeper.rs rename to packages/tracker-core/src/statistics/keeper.rs index 5427734e1..a3d4542f7 100644 --- a/src/core/statistics/keeper.rs +++ b/packages/tracker-core/src/statistics/keeper.rs @@ -51,9 +51,9 @@ impl Keeper { #[cfg(test)] mod tests { - use crate::core::statistics::event::Event; - use crate::core::statistics::keeper::Keeper; - use crate::core::statistics::metrics::Metrics; + use crate::statistics::event::Event; + use crate::statistics::keeper::Keeper; + use crate::statistics::metrics::Metrics; #[tokio::test] async fn should_contain_the_tracker_statistics() { diff --git a/src/core/statistics/metrics.rs b/packages/tracker-core/src/statistics/metrics.rs similarity index 100% rename from src/core/statistics/metrics.rs rename to packages/tracker-core/src/statistics/metrics.rs diff --git a/packages/tracker-core/src/statistics/mod.rs b/packages/tracker-core/src/statistics/mod.rs new file mode 100644 index 000000000..2ffbc0c8f --- /dev/null +++ b/packages/tracker-core/src/statistics/mod.rs @@ -0,0 +1,32 @@ +//! Structs to collect and keep tracker metrics. +//! +//! The tracker collects metrics such as: +//! +//! - Number of connections handled +//! - Number of `announce` requests handled +//! - Number of `scrape` request handled +//! +//! These metrics are collected for each connection type: UDP and HTTP and +//! also for each IP version used by the peers: IPv4 and IPv6. +//! +//! > Notice: that UDP tracker have an specific `connection` request. For the +//! > `HTTP` metrics the counter counts one connection for each `announce` or +//! > `scrape` request. +//! +//! The data is collected by using an `event-sender -> event listener` model. +//! +//! The tracker uses a [`Sender`](crate::core::statistics::event::sender::Sender) +//! instance to send an event. +//! +//! The [`statistics::keeper::Keeper`](crate::core::statistics::keeper::Keeper) listens to new +//! events and uses the [`statistics::repository::Repository`](crate::core::statistics::repository::Repository) to +//! upgrade and store metrics. +//! +//! See the [`statistics::event::Event`](crate::core::statistics::event::Event) enum to check +//! which events are available. +pub mod event; +pub mod keeper; +pub mod metrics; +pub mod repository; +pub mod services; +pub mod setup; diff --git a/src/core/statistics/repository.rs b/packages/tracker-core/src/statistics/repository.rs similarity index 100% rename from src/core/statistics/repository.rs rename to packages/tracker-core/src/statistics/repository.rs diff --git a/packages/tracker-core/src/statistics/services.rs b/packages/tracker-core/src/statistics/services.rs new file mode 100644 index 000000000..196c6b340 --- /dev/null +++ b/packages/tracker-core/src/statistics/services.rs @@ -0,0 +1,55 @@ +//! Statistics services. +//! +//! It includes: +//! +//! - A [`factory`](crate::statistics::setup::factory) function to build the structs needed to collect the tracker metrics. +//! - A [`get_metrics`] service to get the tracker [`metrics`](crate::core::statistics::metrics::Metrics). +//! +//! Tracker metrics are collected using a Publisher-Subscribe pattern. +//! +//! The factory function builds two structs: +//! +//! - An statistics event [`Sender`](crate::core::statistics::event::sender::Sender) +//! - An statistics [`Repository`] +//! +//! ```text +//! let (stats_event_sender, stats_repository) = factory(tracker_usage_statistics); +//! ``` +//! +//! The statistics repository is responsible for storing the metrics in memory. +//! The statistics event sender allows sending events related to metrics. +//! There is an event listener that is receiving all the events and processing them with an event handler. +//! Then, the event handler updates the metrics depending on the received event. +//! +//! For example, if you send the event [`Event::Udp4Connect`](crate::core::statistics::event::Event::Udp4Connect): +//! +//! ```text +//! let result = event_sender.send_event(Event::Udp4Connect).await; +//! ``` +//! +//! Eventually the counter for UDP connections from IPv4 peers will be increased. +//! +//! ```rust,no_run +//! pub struct Metrics { +//! // ... +//! pub udp4_connections_handled: u64, // This will be incremented +//! // ... +//! } +//! ``` +use torrust_tracker_primitives::torrent_metrics::TorrentsMetrics; + +use crate::statistics::metrics::Metrics; + +/// All the metrics collected by the tracker. +#[derive(Debug, PartialEq)] +pub struct TrackerMetrics { + /// Domain level metrics. + /// + /// General metrics for all torrents (number of seeders, leechers, etcetera) + pub torrents_metrics: TorrentsMetrics, + + /// Application level metrics. Usage statistics/metrics. + /// + /// Metrics about how the tracker is been used (number of udp announce requests, number of http scrape requests, etcetera) + pub protocol_metrics: Metrics, +} diff --git a/src/core/statistics/setup.rs b/packages/tracker-core/src/statistics/setup.rs similarity index 98% rename from src/core/statistics/setup.rs rename to packages/tracker-core/src/statistics/setup.rs index e440a709c..701392176 100644 --- a/src/core/statistics/setup.rs +++ b/packages/tracker-core/src/statistics/setup.rs @@ -1,7 +1,7 @@ //! Setup for the tracker statistics. //! //! The [`factory`] function builds the structs needed for handling the tracker metrics. -use crate::core::statistics; +use crate::statistics; /// It builds the structs needed for handling the tracker metrics. /// diff --git a/src/core/torrent/manager.rs b/packages/tracker-core/src/torrent/manager.rs similarity index 97% rename from src/core/torrent/manager.rs rename to packages/tracker-core/src/torrent/manager.rs index 261376755..4199e9944 100644 --- a/src/core/torrent/manager.rs +++ b/packages/tracker-core/src/torrent/manager.rs @@ -6,8 +6,7 @@ use torrust_tracker_configuration::Core; use super::repository::in_memory::InMemoryTorrentRepository; use super::repository::persisted::DatabasePersistentTorrentRepository; -use crate::core::databases; -use crate::CurrentClock; +use crate::{databases, CurrentClock}; pub struct TorrentsManager { /// The tracker configuration. diff --git a/src/core/torrent/mod.rs b/packages/tracker-core/src/torrent/mod.rs similarity index 100% rename from src/core/torrent/mod.rs rename to packages/tracker-core/src/torrent/mod.rs diff --git a/src/core/torrent/repository/in_memory.rs b/packages/tracker-core/src/torrent/repository/in_memory.rs similarity index 98% rename from src/core/torrent/repository/in_memory.rs rename to packages/tracker-core/src/torrent/repository/in_memory.rs index 2e80a2e9b..b9979577a 100644 --- a/src/core/torrent/repository/in_memory.rs +++ b/packages/tracker-core/src/torrent/repository/in_memory.rs @@ -11,7 +11,7 @@ use torrust_tracker_torrent_repository::entry::EntrySync; use torrust_tracker_torrent_repository::repository::Repository; use torrust_tracker_torrent_repository::EntryMutexStd; -use crate::core::torrent::Torrents; +use crate::torrent::Torrents; /// The in-memory torrents repository. /// @@ -114,8 +114,8 @@ mod tests { use torrust_tracker_primitives::torrent_metrics::TorrentsMetrics; use torrust_tracker_primitives::DurationSinceUnixEpoch; - use crate::core::core_tests::{leecher, sample_info_hash, sample_peer}; - use crate::core::torrent::repository::in_memory::InMemoryTorrentRepository; + use crate::core_tests::{leecher, sample_info_hash, sample_peer}; + use crate::torrent::repository::in_memory::InMemoryTorrentRepository; /// It generates a peer id from a number where the number is the last /// part of the peer ID. For example, for `12` it returns diff --git a/src/core/torrent/repository/mod.rs b/packages/tracker-core/src/torrent/repository/mod.rs similarity index 100% rename from src/core/torrent/repository/mod.rs rename to packages/tracker-core/src/torrent/repository/mod.rs diff --git a/src/core/torrent/repository/persisted.rs b/packages/tracker-core/src/torrent/repository/persisted.rs similarity index 94% rename from src/core/torrent/repository/persisted.rs rename to packages/tracker-core/src/torrent/repository/persisted.rs index 86a3db0e3..77a9c23eb 100644 --- a/src/core/torrent/repository/persisted.rs +++ b/packages/tracker-core/src/torrent/repository/persisted.rs @@ -3,8 +3,8 @@ use std::sync::Arc; use bittorrent_primitives::info_hash::InfoHash; use torrust_tracker_primitives::PersistentTorrents; -use crate::core::databases::error::Error; -use crate::core::databases::Database; +use crate::databases::error::Error; +use crate::databases::Database; /// Torrent repository implementation that persists the torrents in a database. /// diff --git a/src/core/torrent/services.rs b/packages/tracker-core/src/torrent/services.rs similarity index 85% rename from src/core/torrent/services.rs rename to packages/tracker-core/src/torrent/services.rs index 5a4810412..2275f20d0 100644 --- a/src/core/torrent/services.rs +++ b/packages/tracker-core/src/torrent/services.rs @@ -11,7 +11,7 @@ use torrust_tracker_primitives::pagination::Pagination; use torrust_tracker_primitives::peer; use torrust_tracker_torrent_repository::entry::EntrySync; -use crate::core::torrent::repository::in_memory::InMemoryTorrentRepository; +use crate::torrent::repository::in_memory::InMemoryTorrentRepository; /// It contains all the information the tracker has about a torrent #[derive(Debug, PartialEq)] @@ -44,10 +44,8 @@ pub struct BasicInfo { } /// It returns all the information the tracker has about one torrent in a [Info] struct. -pub async fn get_torrent_info( - in_memory_torrent_repository: Arc, - info_hash: &InfoHash, -) -> Option { +#[must_use] +pub fn get_torrent_info(in_memory_torrent_repository: &Arc, info_hash: &InfoHash) -> Option { let torrent_entry_option = in_memory_torrent_repository.get(info_hash); let torrent_entry = torrent_entry_option?; @@ -68,8 +66,9 @@ pub async fn get_torrent_info( } /// It returns all the information the tracker has about multiple torrents in a [`BasicInfo`] struct, excluding the peer list. -pub async fn get_torrents_page( - in_memory_torrent_repository: Arc, +#[must_use] +pub fn get_torrents_page( + in_memory_torrent_repository: &Arc, pagination: Option<&Pagination>, ) -> Vec { let mut basic_infos: Vec = vec![]; @@ -89,10 +88,8 @@ pub async fn get_torrents_page( } /// It returns all the information the tracker has about multiple torrents in a [`BasicInfo`] struct, excluding the peer list. -pub async fn get_torrents( - in_memory_torrent_repository: Arc, - info_hashes: &[InfoHash], -) -> Vec { +#[must_use] +pub fn get_torrents(in_memory_torrent_repository: &Arc, info_hashes: &[InfoHash]) -> Vec { let mut basic_infos: Vec = vec![]; for info_hash in info_hashes { @@ -135,19 +132,18 @@ mod tests { use bittorrent_primitives::info_hash::InfoHash; - use crate::core::torrent::repository::in_memory::InMemoryTorrentRepository; - use crate::core::torrent::services::tests::sample_peer; - use crate::core::torrent::services::{get_torrent_info, Info}; + use crate::torrent::repository::in_memory::InMemoryTorrentRepository; + use crate::torrent::services::tests::sample_peer; + use crate::torrent::services::{get_torrent_info, Info}; #[tokio::test] async fn should_return_none_if_the_tracker_does_not_have_the_torrent() { let in_memory_torrent_repository = Arc::new(InMemoryTorrentRepository::default()); let torrent_info = get_torrent_info( - in_memory_torrent_repository.clone(), + &in_memory_torrent_repository, &InfoHash::from_str("0b3aea4adc213ce32295be85d3883a63bca25446").unwrap(), // DevSkim: ignore DS173237 - ) - .await; + ); assert!(torrent_info.is_none()); } @@ -160,9 +156,7 @@ mod tests { let info_hash = InfoHash::from_str(&hash).unwrap(); let () = in_memory_torrent_repository.upsert_peer(&info_hash, &sample_peer()); - let torrent_info = get_torrent_info(in_memory_torrent_repository.clone(), &info_hash) - .await - .unwrap(); + let torrent_info = get_torrent_info(&in_memory_torrent_repository, &info_hash).unwrap(); assert_eq!( torrent_info, @@ -184,15 +178,15 @@ mod tests { use bittorrent_primitives::info_hash::InfoHash; - use crate::core::torrent::repository::in_memory::InMemoryTorrentRepository; - use crate::core::torrent::services::tests::sample_peer; - use crate::core::torrent::services::{get_torrents_page, BasicInfo, Pagination}; + use crate::torrent::repository::in_memory::InMemoryTorrentRepository; + use crate::torrent::services::tests::sample_peer; + use crate::torrent::services::{get_torrents_page, BasicInfo, Pagination}; #[tokio::test] async fn should_return_an_empty_result_if_the_tracker_does_not_have_any_torrent() { let in_memory_torrent_repository = Arc::new(InMemoryTorrentRepository::default()); - let torrents = get_torrents_page(in_memory_torrent_repository.clone(), Some(&Pagination::default())).await; + let torrents = get_torrents_page(&in_memory_torrent_repository, Some(&Pagination::default())); assert_eq!(torrents, vec![]); } @@ -206,7 +200,7 @@ mod tests { let () = in_memory_torrent_repository.upsert_peer(&info_hash, &sample_peer()); - let torrents = get_torrents_page(in_memory_torrent_repository.clone(), Some(&Pagination::default())).await; + let torrents = get_torrents_page(&in_memory_torrent_repository, Some(&Pagination::default())); assert_eq!( torrents, @@ -235,7 +229,7 @@ mod tests { let offset = 0; let limit = 1; - let torrents = get_torrents_page(in_memory_torrent_repository.clone(), Some(&Pagination::new(offset, limit))).await; + let torrents = get_torrents_page(&in_memory_torrent_repository, Some(&Pagination::new(offset, limit))); assert_eq!(torrents.len(), 1); } @@ -256,7 +250,7 @@ mod tests { let offset = 1; let limit = 4000; - let torrents = get_torrents_page(in_memory_torrent_repository.clone(), Some(&Pagination::new(offset, limit))).await; + let torrents = get_torrents_page(&in_memory_torrent_repository, Some(&Pagination::new(offset, limit))); assert_eq!(torrents.len(), 1); assert_eq!( @@ -282,7 +276,7 @@ mod tests { let info_hash2 = InfoHash::from_str(&hash2).unwrap(); let () = in_memory_torrent_repository.upsert_peer(&info_hash2, &sample_peer()); - let torrents = get_torrents_page(in_memory_torrent_repository.clone(), Some(&Pagination::default())).await; + let torrents = get_torrents_page(&in_memory_torrent_repository, Some(&Pagination::default())); assert_eq!( torrents, diff --git a/src/core/whitelist/authorization.rs b/packages/tracker-core/src/whitelist/authorization.rs similarity index 93% rename from src/core/whitelist/authorization.rs rename to packages/tracker-core/src/whitelist/authorization.rs index 1a6d8b758..285f6613e 100644 --- a/src/core/whitelist/authorization.rs +++ b/packages/tracker-core/src/whitelist/authorization.rs @@ -6,7 +6,7 @@ use torrust_tracker_configuration::Core; use tracing::instrument; use super::repository::in_memory::InMemoryWhitelist; -use crate::core::error::Error; +use crate::error::Error; pub struct WhitelistAuthorization { /// Core tracker configuration. @@ -64,8 +64,8 @@ mod tests { mod configured_as_whitelisted { mod handling_authorization { - use crate::core::core_tests::sample_info_hash; - use crate::core::whitelist::whitelist_tests::initialize_whitelist_services_for_listed_tracker; + use crate::core_tests::sample_info_hash; + use crate::whitelist::whitelist_tests::initialize_whitelist_services_for_listed_tracker; #[tokio::test] async fn it_should_authorize_the_announce_and_scrape_actions_on_whitelisted_torrents() { diff --git a/src/core/whitelist/manager.rs b/packages/tracker-core/src/whitelist/manager.rs similarity index 91% rename from src/core/whitelist/manager.rs rename to packages/tracker-core/src/whitelist/manager.rs index 0d9751994..c78a59470 100644 --- a/src/core/whitelist/manager.rs +++ b/packages/tracker-core/src/whitelist/manager.rs @@ -4,7 +4,7 @@ use bittorrent_primitives::info_hash::InfoHash; use super::repository::in_memory::InMemoryWhitelist; use super::repository::persisted::DatabaseWhitelist; -use crate::core::databases; +use crate::databases; /// It handles the list of allowed torrents. Only for listed trackers. pub struct WhitelistManager { @@ -97,8 +97,8 @@ mod tests { use torrust_tracker_test_helpers::configuration; - use crate::core::whitelist::manager::WhitelistManager; - use crate::core::whitelist::whitelist_tests::initialize_whitelist_services; + use crate::whitelist::manager::WhitelistManager; + use crate::whitelist::whitelist_tests::initialize_whitelist_services; fn initialize_whitelist_manager_for_whitelisted_tracker() -> Arc { let config = configuration::ephemeral_listed(); @@ -111,8 +111,8 @@ mod tests { mod configured_as_whitelisted { mod handling_the_torrent_whitelist { - use crate::core::core_tests::sample_info_hash; - use crate::core::whitelist::manager::tests::initialize_whitelist_manager_for_whitelisted_tracker; + use crate::core_tests::sample_info_hash; + use crate::whitelist::manager::tests::initialize_whitelist_manager_for_whitelisted_tracker; #[tokio::test] async fn it_should_add_a_torrent_to_the_whitelist() { @@ -139,8 +139,8 @@ mod tests { } mod persistence { - use crate::core::core_tests::sample_info_hash; - use crate::core::whitelist::manager::tests::initialize_whitelist_manager_for_whitelisted_tracker; + use crate::core_tests::sample_info_hash; + use crate::whitelist::manager::tests::initialize_whitelist_manager_for_whitelisted_tracker; #[tokio::test] async fn it_should_load_the_whitelist_from_the_database() { diff --git a/src/core/whitelist/mod.rs b/packages/tracker-core/src/whitelist/mod.rs similarity index 88% rename from src/core/whitelist/mod.rs rename to packages/tracker-core/src/whitelist/mod.rs index 1f5f87626..8521485f7 100644 --- a/src/core/whitelist/mod.rs +++ b/packages/tracker-core/src/whitelist/mod.rs @@ -10,8 +10,8 @@ mod tests { mod configured_as_whitelisted { mod handling_authorization { - use crate::core::core_tests::sample_info_hash; - use crate::core::whitelist::whitelist_tests::initialize_whitelist_services_for_listed_tracker; + use crate::core_tests::sample_info_hash; + use crate::whitelist::whitelist_tests::initialize_whitelist_services_for_listed_tracker; #[tokio::test] async fn it_should_authorize_the_announce_and_scrape_actions_on_whitelisted_torrents() { diff --git a/src/core/whitelist/repository/in_memory.rs b/packages/tracker-core/src/whitelist/repository/in_memory.rs similarity index 94% rename from src/core/whitelist/repository/in_memory.rs rename to packages/tracker-core/src/whitelist/repository/in_memory.rs index f023c1610..befd6fed6 100644 --- a/src/core/whitelist/repository/in_memory.rs +++ b/packages/tracker-core/src/whitelist/repository/in_memory.rs @@ -33,8 +33,8 @@ impl InMemoryWhitelist { #[cfg(test)] mod tests { - use crate::core::core_tests::sample_info_hash; - use crate::core::whitelist::repository::in_memory::InMemoryWhitelist; + use crate::core_tests::sample_info_hash; + use crate::whitelist::repository::in_memory::InMemoryWhitelist; #[tokio::test] async fn should_allow_adding_a_new_torrent_to_the_whitelist() { diff --git a/src/core/whitelist/repository/mod.rs b/packages/tracker-core/src/whitelist/repository/mod.rs similarity index 100% rename from src/core/whitelist/repository/mod.rs rename to packages/tracker-core/src/whitelist/repository/mod.rs diff --git a/src/core/whitelist/repository/persisted.rs b/packages/tracker-core/src/whitelist/repository/persisted.rs similarity index 97% rename from src/core/whitelist/repository/persisted.rs rename to packages/tracker-core/src/whitelist/repository/persisted.rs index fd56d56b5..c3c4a2601 100644 --- a/src/core/whitelist/repository/persisted.rs +++ b/packages/tracker-core/src/whitelist/repository/persisted.rs @@ -2,7 +2,7 @@ use std::sync::Arc; use bittorrent_primitives::info_hash::InfoHash; -use crate::core::databases::{self, Database}; +use crate::databases::{self, Database}; /// The persisted list of allowed torrents. pub struct DatabaseWhitelist { diff --git a/src/core/whitelist/setup.rs b/packages/tracker-core/src/whitelist/setup.rs similarity index 92% rename from src/core/whitelist/setup.rs rename to packages/tracker-core/src/whitelist/setup.rs index bdd35737c..5b2a5de40 100644 --- a/src/core/whitelist/setup.rs +++ b/packages/tracker-core/src/whitelist/setup.rs @@ -3,7 +3,7 @@ use std::sync::Arc; use super::manager::WhitelistManager; use super::repository::in_memory::InMemoryWhitelist; use super::repository::persisted::DatabaseWhitelist; -use crate::core::databases::Database; +use crate::databases::Database; #[must_use] pub fn initialize_whitelist_manager( diff --git a/src/core/whitelist/whitelist_tests.rs b/packages/tracker-core/src/whitelist/whitelist_tests.rs similarity index 89% rename from src/core/whitelist/whitelist_tests.rs rename to packages/tracker-core/src/whitelist/whitelist_tests.rs index 38c2bbde3..33f5a97f7 100644 --- a/src/core/whitelist/whitelist_tests.rs +++ b/packages/tracker-core/src/whitelist/whitelist_tests.rs @@ -5,8 +5,8 @@ use torrust_tracker_configuration::Configuration; use super::authorization::WhitelistAuthorization; use super::manager::WhitelistManager; use super::repository::in_memory::InMemoryWhitelist; -use crate::core::databases::setup::initialize_database; -use crate::core::whitelist::setup::initialize_whitelist_manager; +use crate::databases::setup::initialize_database; +use crate::whitelist::setup::initialize_whitelist_manager; #[must_use] pub fn initialize_whitelist_services(config: &Configuration) -> (Arc, Arc) { diff --git a/src/bootstrap/app.rs b/src/bootstrap/app.rs index 8a084dc7f..f7506800e 100644 --- a/src/bootstrap/app.rs +++ b/src/bootstrap/app.rs @@ -13,6 +13,20 @@ //! 4. Initialize the domain tracker. use std::sync::Arc; +use bittorrent_tracker_core::announce_handler::AnnounceHandler; +use bittorrent_tracker_core::authentication::handler::KeysHandler; +use bittorrent_tracker_core::authentication::key::repository::in_memory::InMemoryKeyRepository; +use bittorrent_tracker_core::authentication::key::repository::persisted::DatabaseKeyRepository; +use bittorrent_tracker_core::authentication::service; +use bittorrent_tracker_core::databases::setup::initialize_database; +use bittorrent_tracker_core::scrape_handler::ScrapeHandler; +use bittorrent_tracker_core::statistics; +use bittorrent_tracker_core::torrent::manager::TorrentsManager; +use bittorrent_tracker_core::torrent::repository::in_memory::InMemoryTorrentRepository; +use bittorrent_tracker_core::torrent::repository::persisted::DatabasePersistentTorrentRepository; +use bittorrent_tracker_core::whitelist::authorization::WhitelistAuthorization; +use bittorrent_tracker_core::whitelist::repository::in_memory::InMemoryWhitelist; +use bittorrent_tracker_core::whitelist::setup::initialize_whitelist_manager; use tokio::sync::RwLock; use torrust_tracker_clock::static_time; use torrust_tracker_configuration::validator::Validator; @@ -22,20 +36,6 @@ use tracing::instrument; use super::config::initialize_configuration; use crate::bootstrap; use crate::container::AppContainer; -use crate::core::announce_handler::AnnounceHandler; -use crate::core::authentication::handler::KeysHandler; -use crate::core::authentication::key::repository::in_memory::InMemoryKeyRepository; -use crate::core::authentication::key::repository::persisted::DatabaseKeyRepository; -use crate::core::authentication::service; -use crate::core::databases::setup::initialize_database; -use crate::core::scrape_handler::ScrapeHandler; -use crate::core::statistics; -use crate::core::torrent::manager::TorrentsManager; -use crate::core::torrent::repository::in_memory::InMemoryTorrentRepository; -use crate::core::torrent::repository::persisted::DatabasePersistentTorrentRepository; -use crate::core::whitelist::authorization::WhitelistAuthorization; -use crate::core::whitelist::repository::in_memory::InMemoryWhitelist; -use crate::core::whitelist::setup::initialize_whitelist_manager; use crate::servers::udp::server::banning::BanService; use crate::servers::udp::server::launcher::MAX_CONNECTION_ID_ERRORS_PER_IP; use crate::shared::crypto::ephemeral_instance_keys; diff --git a/src/bootstrap/jobs/torrent_cleanup.rs b/src/bootstrap/jobs/torrent_cleanup.rs index 45e6e9e68..7085aa7e2 100644 --- a/src/bootstrap/jobs/torrent_cleanup.rs +++ b/src/bootstrap/jobs/torrent_cleanup.rs @@ -12,13 +12,12 @@ use std::sync::Arc; +use bittorrent_tracker_core::torrent::manager::TorrentsManager; use chrono::Utc; use tokio::task::JoinHandle; use torrust_tracker_configuration::Core; use tracing::instrument; -use crate::core::torrent::manager::TorrentsManager; - /// It starts a jobs for cleaning up the torrent data in the tracker. /// /// The cleaning task is executed on an `inactive_peer_cleanup_interval`. diff --git a/src/container.rs b/src/container.rs index 1a2a029ee..cae2d07ce 100644 --- a/src/container.rs +++ b/src/container.rs @@ -1,20 +1,20 @@ use std::sync::Arc; +use bittorrent_tracker_core::announce_handler::AnnounceHandler; +use bittorrent_tracker_core::authentication::handler::KeysHandler; +use bittorrent_tracker_core::authentication::service::AuthenticationService; +use bittorrent_tracker_core::databases::Database; +use bittorrent_tracker_core::scrape_handler::ScrapeHandler; +use bittorrent_tracker_core::statistics::event::sender::Sender; +use bittorrent_tracker_core::statistics::repository::Repository; +use bittorrent_tracker_core::torrent::manager::TorrentsManager; +use bittorrent_tracker_core::torrent::repository::in_memory::InMemoryTorrentRepository; +use bittorrent_tracker_core::torrent::repository::persisted::DatabasePersistentTorrentRepository; +use bittorrent_tracker_core::whitelist; +use bittorrent_tracker_core::whitelist::manager::WhitelistManager; use tokio::sync::RwLock; use torrust_tracker_configuration::{Core, HttpApi, HttpTracker, UdpTracker}; -use crate::core::announce_handler::AnnounceHandler; -use crate::core::authentication::handler::KeysHandler; -use crate::core::authentication::service::AuthenticationService; -use crate::core::databases::Database; -use crate::core::scrape_handler::ScrapeHandler; -use crate::core::statistics::event::sender::Sender; -use crate::core::statistics::repository::Repository; -use crate::core::torrent::manager::TorrentsManager; -use crate::core::torrent::repository::in_memory::InMemoryTorrentRepository; -use crate::core::torrent::repository::persisted::DatabasePersistentTorrentRepository; -use crate::core::whitelist; -use crate::core::whitelist::manager::WhitelistManager; use crate::servers::udp::server::banning::BanService; pub struct AppContainer { diff --git a/src/core/mod.rs b/src/core/mod.rs index 125a67b5a..3449ec7b4 100644 --- a/src/core/mod.rs +++ b/src/core/mod.rs @@ -1,573 +1 @@ -//! The core `tracker` module contains the generic `BitTorrent` tracker logic which is independent of the delivery layer. -//! -//! It contains the tracker services and their dependencies. It's a domain layer which does not -//! specify how the end user should connect to the `Tracker`. -//! -//! Typically this module is intended to be used by higher modules like: -//! -//! - A UDP tracker -//! - A HTTP tracker -//! - A tracker REST API -//! -//! ```text -//! Delivery layer Domain layer -//! -//! HTTP tracker | -//! UDP tracker |> Core tracker -//! Tracker REST API | -//! ``` -//! -//! # Table of contents -//! -//! - [Tracker](#tracker) -//! - [Announce request](#announce-request) -//! - [Scrape request](#scrape-request) -//! - [Torrents](#torrents) -//! - [Peers](#peers) -//! - [Configuration](#configuration) -//! - [Services](#services) -//! - [Authentication](#authentication) -//! - [Statistics](#statistics) -//! - [Persistence](#persistence) -//! -//! # Tracker -//! -//! The `Tracker` is the main struct in this module. `The` tracker has some groups of responsibilities: -//! -//! - **Core tracker**: it handles the information about torrents and peers. -//! - **Authentication**: it handles authentication keys which are used by HTTP trackers. -//! - **Authorization**: it handles the permission to perform requests. -//! - **Whitelist**: when the tracker runs in `listed` or `private_listed` mode all operations are restricted to whitelisted torrents. -//! - **Statistics**: it keeps and serves the tracker statistics. -//! -//! Refer to [torrust-tracker-configuration](https://docs.rs/torrust-tracker-configuration) crate docs to get more information about the tracker settings. -//! -//! ## Announce request -//! -//! Handling `announce` requests is the most important task for a `BitTorrent` tracker. -//! -//! A `BitTorrent` swarm is a network of peers that are all trying to download the same torrent. -//! When a peer wants to find other peers it announces itself to the swarm via the tracker. -//! The peer sends its data to the tracker so that the tracker can add it to the swarm. -//! The tracker responds to the peer with the list of other peers in the swarm so that -//! the peer can contact them to start downloading pieces of the file from them. -//! -//! Once you have instantiated the `AnnounceHandler` you can `announce` a new [`peer::Peer`](torrust_tracker_primitives::peer::Peer) with: -//! -//! ```rust,no_run -//! use std::net::SocketAddr; -//! use std::net::IpAddr; -//! use std::net::Ipv4Addr; -//! use std::str::FromStr; -//! -//! use aquatic_udp_protocol::{AnnounceEvent, NumberOfBytes, PeerId}; -//! use torrust_tracker_primitives::DurationSinceUnixEpoch; -//! use torrust_tracker_primitives::peer; -//! use bittorrent_primitives::info_hash::InfoHash; -//! -//! let info_hash = InfoHash::from_str("3b245504cf5f11bbdbe1201cea6a6bf45aee1bc0").unwrap(); -//! -//! let peer = peer::Peer { -//! peer_id: PeerId(*b"-qB00000000000000001"), -//! peer_addr: SocketAddr::new(IpAddr::V4(Ipv4Addr::new(126, 0, 0, 1)), 8081), -//! updated: DurationSinceUnixEpoch::new(1_669_397_478_934, 0), -//! uploaded: NumberOfBytes::new(0), -//! downloaded: NumberOfBytes::new(0), -//! left: NumberOfBytes::new(0), -//! event: AnnounceEvent::Completed, -//! }; -//! -//! let peer_ip = IpAddr::V4(Ipv4Addr::from_str("126.0.0.1").unwrap()); -//! ``` -//! -//! ```text -//! let announce_data = announce_handler.announce(&info_hash, &mut peer, &peer_ip).await; -//! ``` -//! -//! The `Tracker` returns the list of peers for the torrent with the infohash `3b245504cf5f11bbdbe1201cea6a6bf45aee1bc0`, -//! filtering out the peer that is making the `announce` request. -//! -//! > **NOTICE**: that the peer argument is mutable because the `Tracker` can change the peer IP if the peer is using a loopback IP. -//! -//! The `peer_ip` argument is the resolved peer ip. It's a common practice that trackers ignore the peer ip in the `announce` request params, -//! and resolve the peer ip using the IP of the client making the request. As the tracker is a domain service, the peer IP must be provided -//! for the `Tracker` user, which is usually a higher component with access the the request metadata, for example, connection data, proxy headers, -//! etcetera. -//! -//! The returned struct is: -//! -//! ```rust,no_run -//! use torrust_tracker_primitives::peer; -//! use torrust_tracker_configuration::AnnouncePolicy; -//! -//! pub struct AnnounceData { -//! pub peers: Vec, -//! pub swarm_stats: SwarmMetadata, -//! pub policy: AnnouncePolicy, // the tracker announce policy. -//! } -//! -//! pub struct SwarmMetadata { -//! pub completed: u32, // The number of peers that have ever completed downloading -//! pub seeders: u32, // The number of active peers that have completed downloading (seeders) -//! pub leechers: u32, // The number of active peers that have not completed downloading (leechers) -//! } -//! -//! // Core tracker configuration -//! pub struct AnnounceInterval { -//! // ... -//! pub interval: u32, // Interval in seconds that the client should wait between sending regular announce requests to the tracker -//! pub interval_min: u32, // Minimum announce interval. Clients must not reannounce more frequently than this -//! // ... -//! } -//! ``` -//! -//! Refer to `BitTorrent` BEPs and other sites for more information about the `announce` request: -//! -//! - [BEP 3. The `BitTorrent` Protocol Specification](https://www.bittorrent.org/beps/bep_0003.html) -//! - [BEP 23. Tracker Returns Compact Peer Lists](https://www.bittorrent.org/beps/bep_0023.html) -//! - [Vuze docs](https://wiki.vuze.com/w/Announce) -//! -//! ## Scrape request -//! -//! The `scrape` request allows clients to query metadata about the swarm in bulk. -//! -//! An `scrape` request includes a list of infohashes whose swarm metadata you want to collect. -//! -//! The returned struct is: -//! -//! ```rust,no_run -//! use bittorrent_primitives::info_hash::InfoHash; -//! use std::collections::HashMap; -//! -//! pub struct ScrapeData { -//! pub files: HashMap, -//! } -//! -//! pub struct SwarmMetadata { -//! pub complete: u32, // The number of active peers that have completed downloading (seeders) -//! pub downloaded: u32, // The number of peers that have ever completed downloading -//! pub incomplete: u32, // The number of active peers that have not completed downloading (leechers) -//! } -//! ``` -//! -//! The JSON representation of a sample `scrape` response would be like the following: -//! -//! ```json -//! { -//! 'files': { -//! 'xxxxxxxxxxxxxxxxxxxx': {'complete': 11, 'downloaded': 13772, 'incomplete': 19}, -//! 'yyyyyyyyyyyyyyyyyyyy': {'complete': 21, 'downloaded': 206, 'incomplete': 20} -//! } -//! } -//! ``` -//! -//! `xxxxxxxxxxxxxxxxxxxx` and `yyyyyyyyyyyyyyyyyyyy` are 20-byte infohash arrays. -//! There are two data structures for infohashes: byte arrays and hex strings: -//! -//! ```rust,no_run -//! use bittorrent_primitives::info_hash::InfoHash; -//! use std::str::FromStr; -//! -//! let info_hash: InfoHash = [255u8; 20].into(); -//! -//! assert_eq!( -//! info_hash, -//! InfoHash::from_str("FFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFF").unwrap() -//! ); -//! ``` -//! Refer to `BitTorrent` BEPs and other sites for more information about the `scrape` request: -//! -//! - [BEP 48. Tracker Protocol Extension: Scrape](https://www.bittorrent.org/beps/bep_0048.html) -//! - [BEP 15. UDP Tracker Protocol for `BitTorrent`. Scrape section](https://www.bittorrent.org/beps/bep_0015.html) -//! - [Vuze docs](https://wiki.vuze.com/w/Scrape) -//! -//! ## Torrents -//! -//! The [`torrent`] module contains all the data structures stored by the `Tracker` except for peers. -//! -//! We can represent the data stored in memory internally by the `Tracker` with this JSON object: -//! -//! ```json -//! { -//! "c1277613db1d28709b034a017ab2cae4be07ae10": { -//! "completed": 0, -//! "peers": { -//! "-qB00000000000000001": { -//! "peer_id": "-qB00000000000000001", -//! "peer_addr": "2.137.87.41:1754", -//! "updated": 1672419840, -//! "uploaded": 120, -//! "downloaded": 60, -//! "left": 60, -//! "event": "started" -//! }, -//! "-qB00000000000000002": { -//! "peer_id": "-qB00000000000000002", -//! "peer_addr": "23.17.287.141:2345", -//! "updated": 1679415984, -//! "uploaded": 80, -//! "downloaded": 20, -//! "left": 40, -//! "event": "started" -//! } -//! } -//! } -//! } -//! ``` -//! -//! The `Tracker` maintains an indexed-by-info-hash list of torrents. For each torrent, it stores a torrent `Entry`. -//! The torrent entry has two attributes: -//! -//! - `completed`: which is hte number of peers that have completed downloading the torrent file/s. As they have completed downloading, -//! they have a full version of the torrent data, and they can provide the full data to other peers. That's why they are also known as "seeders". -//! - `peers`: an indexed and orderer list of peer for the torrent. Each peer contains the data received from the peer in the `announce` request. -//! -//! The [`torrent`] module not only contains the original data obtained from peer via `announce` requests, it also contains -//! aggregate data that can be derived from the original data. For example: -//! -//! ```rust,no_run -//! pub struct SwarmMetadata { -//! pub complete: u32, // The number of active peers that have completed downloading (seeders) -//! pub downloaded: u32, // The number of peers that have ever completed downloading -//! pub incomplete: u32, // The number of active peers that have not completed downloading (leechers) -//! } -//! -//! ``` -//! -//! > **NOTICE**: that `complete` or `completed` peers are the peers that have completed downloading, but only the active ones are considered "seeders". -//! -//! `SwarmMetadata` struct follows name conventions for `scrape` responses. See [BEP 48](https://www.bittorrent.org/beps/bep_0048.html), while `SwarmMetadata` -//! is used for the rest of cases. -//! -//! Refer to [`torrent`] module for more details about these data structures. -//! -//! ## Peers -//! -//! A `Peer` is the struct used by the `Tracker` to keep peers data: -//! -//! ```rust,no_run -//! use std::net::SocketAddr; - -//! use aquatic_udp_protocol::PeerId; -//! use torrust_tracker_primitives::DurationSinceUnixEpoch; -//! use aquatic_udp_protocol::NumberOfBytes; -//! use aquatic_udp_protocol::AnnounceEvent; -//! -//! pub struct Peer { -//! pub peer_id: PeerId, // The peer ID -//! pub peer_addr: SocketAddr, // Peer socket address -//! pub updated: DurationSinceUnixEpoch, // Last time (timestamp) when the peer was updated -//! pub uploaded: NumberOfBytes, // Number of bytes the peer has uploaded so far -//! pub downloaded: NumberOfBytes, // Number of bytes the peer has downloaded so far -//! pub left: NumberOfBytes, // The number of bytes this peer still has to download -//! pub event: AnnounceEvent, // The event the peer has announced: `started`, `completed`, `stopped` -//! } -//! ``` -//! -//! Notice that most of the attributes are obtained from the `announce` request. -//! For example, an HTTP announce request would contain the following `GET` parameters: -//! -//! -//! -//! The `Tracker` keeps an in-memory ordered data structure with all the torrents and a list of peers for each torrent, together with some swarm metrics. -//! -//! We can represent the data stored in memory with this JSON object: -//! -//! ```json -//! { -//! "c1277613db1d28709b034a017ab2cae4be07ae10": { -//! "completed": 0, -//! "peers": { -//! "-qB00000000000000001": { -//! "peer_id": "-qB00000000000000001", -//! "peer_addr": "2.137.87.41:1754", -//! "updated": 1672419840, -//! "uploaded": 120, -//! "downloaded": 60, -//! "left": 60, -//! "event": "started" -//! }, -//! "-qB00000000000000002": { -//! "peer_id": "-qB00000000000000002", -//! "peer_addr": "23.17.287.141:2345", -//! "updated": 1679415984, -//! "uploaded": 80, -//! "downloaded": 20, -//! "left": 40, -//! "event": "started" -//! } -//! } -//! } -//! } -//! ``` -//! -//! That JSON object does not exist, it's only a representation of the `Tracker` torrents data. -//! -//! `c1277613db1d28709b034a017ab2cae4be07ae10` is the torrent infohash and `completed` contains the number of peers -//! that have a full version of the torrent data, also known as seeders. -//! -//! Refer to [`peer`](torrust_tracker_primitives::peer) for more information about peers. -//! -//! # Configuration -//! -//! You can control the behavior of this module with the module settings: -//! -//! ```toml -//! [logging] -//! threshold = "debug" -//! -//! [core] -//! inactive_peer_cleanup_interval = 600 -//! listed = false -//! private = false -//! tracker_usage_statistics = true -//! -//! [core.announce_policy] -//! interval = 120 -//! interval_min = 120 -//! -//! [core.database] -//! driver = "sqlite3" -//! path = "./storage/tracker/lib/database/sqlite3.db" -//! -//! [core.net] -//! on_reverse_proxy = false -//! external_ip = "2.137.87.41" -//! -//! [core.tracker_policy] -//! max_peer_timeout = 900 -//! persistent_torrent_completed_stat = false -//! remove_peerless_torrents = true -//! ``` -//! -//! Refer to the [`configuration` module documentation](https://docs.rs/torrust-tracker-configuration) to get more information about all options. -//! -//! # Services -//! -//! Services are domain services on top of the core tracker domain. Right now there are two types of service: -//! -//! - For statistics: [`crate::core::statistics::services`] -//! - For torrents: [`crate::core::torrent::services`] -//! -//! Services usually format the data inside the tracker to make it easier to consume by other parts. -//! They also decouple the internal data structure, used by the tracker, from the way we deliver that data to the consumers. -//! The internal data structure is designed for performance or low memory consumption. And it should be changed -//! without affecting the external consumers. -//! -//! Services can include extra features like pagination, for example. -//! -//! # Authentication -//! -//! One of the core `Tracker` responsibilities is to create and keep authentication keys. Auth keys are used by HTTP trackers -//! when the tracker is running in `private` or `private_listed` mode. -//! -//! HTTP tracker's clients need to obtain an auth key before starting requesting the tracker. Once the get one they have to include -//! a `PATH` param with the key in all the HTTP requests. For example, when a peer wants to `announce` itself it has to use the -//! HTTP tracker endpoint `GET /announce/:key`. -//! -//! The common way to obtain the keys is by using the tracker API directly or via other applications like the [Torrust Index](https://github.com/torrust/torrust-index). -//! -//! To learn more about tracker authentication, refer to the following modules : -//! -//! - [`authentication`] module. -//! - [`core`](crate::core) module. -//! - [`http`](crate::servers::http) module. -//! -//! # Statistics -//! -//! The `Tracker` keeps metrics for some events: -//! -//! ```rust,no_run -//! pub struct Metrics { -//! // IP version 4 -//! -//! // HTTP tracker -//! pub tcp4_connections_handled: u64, -//! pub tcp4_announces_handled: u64, -//! pub tcp4_scrapes_handled: u64, -//! -//! // UDP tracker -//! pub udp4_connections_handled: u64, -//! pub udp4_announces_handled: u64, -//! pub udp4_scrapes_handled: u64, -//! -//! // IP version 6 -//! -//! // HTTP tracker -//! pub tcp6_connections_handled: u64, -//! pub tcp6_announces_handled: u64, -//! pub tcp6_scrapes_handled: u64, -//! -//! // UDP tracker -//! pub udp6_connections_handled: u64, -//! pub udp6_announces_handled: u64, -//! pub udp6_scrapes_handled: u64, -//! } -//! ``` -//! -//! The metrics maintained by the `Tracker` are: -//! -//! - `connections_handled`: number of connections handled by the tracker -//! - `announces_handled`: number of `announce` requests handled by the tracker -//! - `scrapes_handled`: number of `scrape` handled requests by the tracker -//! -//! > **NOTICE**: as the HTTP tracker does not have an specific `connection` request like the UDP tracker, `connections_handled` are -//! > increased on every `announce` and `scrape` requests. -//! -//! The tracker exposes an event sender API that allows the tracker users to send events. When a higher application service handles a -//! `connection` , `announce` or `scrape` requests, it notifies the `Tracker` by sending statistics events. -//! -//! For example, the HTTP tracker would send an event like the following when it handles an `announce` request received from a peer using IP version 4. -//! -//! ```text -//! stats_event_sender.send_stats_event(statistics::event::Event::Tcp4Announce).await -//! ``` -//! -//! Refer to [`statistics`] module for more information about statistics. -//! -//! # Persistence -//! -//! Right now the `Tracker` is responsible for storing and load data into and -//! from the database, when persistence is enabled. -//! -//! There are three types of persistent object: -//! -//! - Authentication keys (only expiring keys) -//! - Torrent whitelist -//! - Torrent metrics -//! -//! Refer to [`databases`] module for more information about persistence. -pub mod announce_handler; -pub mod authentication; -pub mod databases; -pub mod error; -pub mod scrape_handler; pub mod statistics; -pub mod torrent; -pub mod whitelist; - -pub mod core_tests; -pub mod peer_tests; - -#[cfg(test)] -mod tests { - mod the_tracker { - use std::net::{IpAddr, Ipv4Addr}; - use std::str::FromStr; - use std::sync::Arc; - - use torrust_tracker_test_helpers::configuration; - - use crate::core::announce_handler::AnnounceHandler; - use crate::core::core_tests::initialize_handlers; - use crate::core::scrape_handler::ScrapeHandler; - - fn initialize_handlers_for_public_tracker() -> (Arc, Arc) { - let config = configuration::ephemeral_public(); - initialize_handlers(&config) - } - - fn initialize_handlers_for_listed_tracker() -> (Arc, Arc) { - let config = configuration::ephemeral_listed(); - initialize_handlers(&config) - } - - // The client peer IP - fn peer_ip() -> IpAddr { - IpAddr::V4(Ipv4Addr::from_str("126.0.0.1").unwrap()) - } - - mod for_all_config_modes { - - mod handling_a_scrape_request { - - use std::net::{IpAddr, Ipv4Addr}; - - use bittorrent_primitives::info_hash::InfoHash; - use torrust_tracker_primitives::core::ScrapeData; - use torrust_tracker_primitives::swarm_metadata::SwarmMetadata; - - use crate::core::announce_handler::PeersWanted; - use crate::core::core_tests::{complete_peer, incomplete_peer}; - use crate::core::tests::the_tracker::initialize_handlers_for_public_tracker; - - #[tokio::test] - async fn it_should_return_the_swarm_metadata_for_the_requested_file_if_the_tracker_has_that_torrent() { - let (announce_handler, scrape_handler) = initialize_handlers_for_public_tracker(); - - let info_hash = "3b245504cf5f11bbdbe1201cea6a6bf45aee1bc0".parse::().unwrap(); // DevSkim: ignore DS173237 - - // Announce a "complete" peer for the torrent - let mut complete_peer = complete_peer(); - announce_handler.announce( - &info_hash, - &mut complete_peer, - &IpAddr::V4(Ipv4Addr::new(126, 0, 0, 10)), - &PeersWanted::All, - ); - - // Announce an "incomplete" peer for the torrent - let mut incomplete_peer = incomplete_peer(); - announce_handler.announce( - &info_hash, - &mut incomplete_peer, - &IpAddr::V4(Ipv4Addr::new(126, 0, 0, 11)), - &PeersWanted::All, - ); - - // Scrape - let scrape_data = scrape_handler.scrape(&vec![info_hash]).await; - - // The expected swarm metadata for the file - let mut expected_scrape_data = ScrapeData::empty(); - expected_scrape_data.add_file( - &info_hash, - SwarmMetadata { - complete: 0, // the "complete" peer does not count because it was not previously known - downloaded: 0, - incomplete: 1, // the "incomplete" peer we have just announced - }, - ); - - assert_eq!(scrape_data, expected_scrape_data); - } - } - } - - mod configured_as_whitelisted { - - mod handling_a_scrape_request { - - use bittorrent_primitives::info_hash::InfoHash; - use torrust_tracker_primitives::core::ScrapeData; - use torrust_tracker_primitives::swarm_metadata::SwarmMetadata; - - use crate::core::announce_handler::PeersWanted; - use crate::core::core_tests::{complete_peer, incomplete_peer}; - use crate::core::tests::the_tracker::{initialize_handlers_for_listed_tracker, peer_ip}; - - #[tokio::test] - async fn it_should_return_the_zeroed_swarm_metadata_for_the_requested_file_if_it_is_not_whitelisted() { - let (announce_handler, scrape_handler) = initialize_handlers_for_listed_tracker(); - - let info_hash = "3b245504cf5f11bbdbe1201cea6a6bf45aee1bc0".parse::().unwrap(); // DevSkim: ignore DS173237 - - let mut peer = incomplete_peer(); - announce_handler.announce(&info_hash, &mut peer, &peer_ip(), &PeersWanted::All); - - // Announce twice to force non zeroed swarm metadata - let mut peer = complete_peer(); - announce_handler.announce(&info_hash, &mut peer, &peer_ip(), &PeersWanted::All); - - let scrape_data = scrape_handler.scrape(&vec![info_hash]).await; - - // The expected zeroed swarm metadata for the file - let mut expected_scrape_data = ScrapeData::empty(); - expected_scrape_data.add_file(&info_hash, SwarmMetadata::zeroed()); - - assert_eq!(scrape_data, expected_scrape_data); - } - } - } - } -} diff --git a/src/core/statistics/mod.rs b/src/core/statistics/mod.rs index 2ffbc0c8f..4e379ae78 100644 --- a/src/core/statistics/mod.rs +++ b/src/core/statistics/mod.rs @@ -1,32 +1 @@ -//! Structs to collect and keep tracker metrics. -//! -//! The tracker collects metrics such as: -//! -//! - Number of connections handled -//! - Number of `announce` requests handled -//! - Number of `scrape` request handled -//! -//! These metrics are collected for each connection type: UDP and HTTP and -//! also for each IP version used by the peers: IPv4 and IPv6. -//! -//! > Notice: that UDP tracker have an specific `connection` request. For the -//! > `HTTP` metrics the counter counts one connection for each `announce` or -//! > `scrape` request. -//! -//! The data is collected by using an `event-sender -> event listener` model. -//! -//! The tracker uses a [`Sender`](crate::core::statistics::event::sender::Sender) -//! instance to send an event. -//! -//! The [`statistics::keeper::Keeper`](crate::core::statistics::keeper::Keeper) listens to new -//! events and uses the [`statistics::repository::Repository`](crate::core::statistics::repository::Repository) to -//! upgrade and store metrics. -//! -//! See the [`statistics::event::Event`](crate::core::statistics::event::Event) enum to check -//! which events are available. -pub mod event; -pub mod keeper; -pub mod metrics; -pub mod repository; pub mod services; -pub mod setup; diff --git a/src/core/statistics/services.rs b/src/core/statistics/services.rs index 337731aea..a4bcc411e 100644 --- a/src/core/statistics/services.rs +++ b/src/core/statistics/services.rs @@ -2,14 +2,14 @@ //! //! It includes: //! -//! - A [`factory`](crate::core::statistics::setup::factory) function to build the structs needed to collect the tracker metrics. -//! - A [`get_metrics`] service to get the tracker [`metrics`](crate::core::statistics::metrics::Metrics). +//! - A [`factory`](bittorrent_tracker_core::statistics::setup::factory) function to build the structs needed to collect the tracker metrics. +//! - A [`get_metrics`] service to get the tracker [`metrics`](bittorrent_tracker_core::statistics::metrics::Metrics). //! //! Tracker metrics are collected using a Publisher-Subscribe pattern. //! //! The factory function builds two structs: //! -//! - An statistics event [`Sender`](crate::core::statistics::event::sender::Sender) +//! - An statistics event [`Sender`](bittorrent_tracker_core::statistics::event::sender::Sender) //! - An statistics [`Repository`] //! //! ```text @@ -21,7 +21,7 @@ //! There is an event listener that is receiving all the events and processing them with an event handler. //! Then, the event handler updates the metrics depending on the received event. //! -//! For example, if you send the event [`Event::Udp4Connect`](crate::core::statistics::event::Event::Udp4Connect): +//! For example, if you send the event [`Event::Udp4Connect`](bittorrent_tracker_core::statistics::event::Event::Udp4Connect): //! //! ```text //! let result = event_sender.send_event(Event::Udp4Connect).await; @@ -38,28 +38,14 @@ //! ``` use std::sync::Arc; +use bittorrent_tracker_core::statistics::metrics::Metrics; +use bittorrent_tracker_core::statistics::repository::Repository; +use bittorrent_tracker_core::statistics::services::TrackerMetrics; +use bittorrent_tracker_core::torrent::repository::in_memory::InMemoryTorrentRepository; use tokio::sync::RwLock; -use torrust_tracker_primitives::torrent_metrics::TorrentsMetrics; -use crate::core::statistics::metrics::Metrics; -use crate::core::statistics::repository::Repository; -use crate::core::torrent::repository::in_memory::InMemoryTorrentRepository; use crate::servers::udp::server::banning::BanService; -/// All the metrics collected by the tracker. -#[derive(Debug, PartialEq)] -pub struct TrackerMetrics { - /// Domain level metrics. - /// - /// General metrics for all torrents (number of seeders, leechers, etcetera) - pub torrents_metrics: TorrentsMetrics, - - /// Application level metrics. Usage statistics/metrics. - /// - /// Metrics about how the tracker is been used (number of udp announce requests, number of http scrape requests, etcetera) - pub protocol_metrics: Metrics, -} - /// It returns all the [`TrackerMetrics`] pub async fn get_metrics( in_memory_torrent_repository: Arc, @@ -110,14 +96,15 @@ pub async fn get_metrics( mod tests { use std::sync::Arc; + use bittorrent_tracker_core::statistics::services::TrackerMetrics; + use bittorrent_tracker_core::torrent::repository::in_memory::InMemoryTorrentRepository; + use bittorrent_tracker_core::{self, statistics}; use tokio::sync::RwLock; use torrust_tracker_configuration::Configuration; use torrust_tracker_primitives::torrent_metrics::TorrentsMetrics; use torrust_tracker_test_helpers::configuration; - use crate::core::statistics::services::{get_metrics, TrackerMetrics}; - use crate::core::torrent::repository::in_memory::InMemoryTorrentRepository; - use crate::core::{self, statistics}; + use crate::core::statistics::services::get_metrics; use crate::servers::udp::server::banning::BanService; use crate::servers::udp::server::launcher::MAX_CONNECTION_ID_ERRORS_PER_IP; @@ -145,7 +132,7 @@ mod tests { tracker_metrics, TrackerMetrics { torrents_metrics: TorrentsMetrics::default(), - protocol_metrics: core::statistics::metrics::Metrics::default(), + protocol_metrics: statistics::metrics::Metrics::default(), } ); } diff --git a/src/servers/apis/v1/context/auth_key/handlers.rs b/src/servers/apis/v1/context/auth_key/handlers.rs index 045a9d211..7024ffeba 100644 --- a/src/servers/apis/v1/context/auth_key/handlers.rs +++ b/src/servers/apis/v1/context/auth_key/handlers.rs @@ -5,6 +5,8 @@ use std::time::Duration; use axum::extract::{self, Path, State}; use axum::response::Response; +use bittorrent_tracker_core::authentication::handler::{AddKeyRequest, KeysHandler}; +use bittorrent_tracker_core::authentication::Key; use serde::Deserialize; use super::forms::AddKeyForm; @@ -12,8 +14,6 @@ use super::responses::{ auth_key_response, failed_to_delete_key_response, failed_to_generate_key_response, failed_to_reload_keys_response, invalid_auth_key_duration_response, invalid_auth_key_response, }; -use crate::core::authentication::handler::{AddKeyRequest, KeysHandler}; -use crate::core::authentication::Key; use crate::servers::apis::v1::context::auth_key::resources::AuthKey; use crate::servers::apis::v1::responses::{invalid_auth_key_param_response, ok_response}; @@ -43,11 +43,11 @@ pub async fn add_auth_key_handler( { Ok(auth_key) => auth_key_response(&AuthKey::from(auth_key)), Err(err) => match err { - crate::core::error::PeerKeyError::DurationOverflow { seconds_valid } => { + bittorrent_tracker_core::error::PeerKeyError::DurationOverflow { seconds_valid } => { invalid_auth_key_duration_response(seconds_valid) } - crate::core::error::PeerKeyError::InvalidKey { key, source } => invalid_auth_key_response(&key, source), - crate::core::error::PeerKeyError::DatabaseError { source } => failed_to_generate_key_response(source), + bittorrent_tracker_core::error::PeerKeyError::InvalidKey { key, source } => invalid_auth_key_response(&key, source), + bittorrent_tracker_core::error::PeerKeyError::DatabaseError { source } => failed_to_generate_key_response(source), }, } } diff --git a/src/servers/apis/v1/context/auth_key/resources.rs b/src/servers/apis/v1/context/auth_key/resources.rs index a65eb2ab2..8f5b4d309 100644 --- a/src/servers/apis/v1/context/auth_key/resources.rs +++ b/src/servers/apis/v1/context/auth_key/resources.rs @@ -1,10 +1,9 @@ //! API resources for the [`auth_key`](crate::servers::apis::v1::context::auth_key) API context. +use bittorrent_tracker_core::authentication::{self, Key}; use serde::{Deserialize, Serialize}; use torrust_tracker_clock::conv::convert_from_iso_8601_to_timestamp; -use crate::core::authentication::{self, Key}; - /// A resource that represents an authentication key. #[derive(Serialize, Deserialize, Debug, PartialEq, Eq)] pub struct AuthKey { @@ -50,11 +49,11 @@ impl From for AuthKey { mod tests { use std::time::Duration; + use bittorrent_tracker_core::authentication::{self, Key}; use torrust_tracker_clock::clock::stopped::Stopped as _; use torrust_tracker_clock::clock::{self, Time}; use super::AuthKey; - use crate::core::authentication::{self, Key}; use crate::CurrentClock; struct TestTime { diff --git a/src/servers/apis/v1/context/auth_key/routes.rs b/src/servers/apis/v1/context/auth_key/routes.rs index ee9f3252c..623fb3459 100644 --- a/src/servers/apis/v1/context/auth_key/routes.rs +++ b/src/servers/apis/v1/context/auth_key/routes.rs @@ -10,9 +10,9 @@ use std::sync::Arc; use axum::routing::{get, post}; use axum::Router; +use bittorrent_tracker_core::authentication::handler::KeysHandler; use super::handlers::{add_auth_key_handler, delete_auth_key_handler, generate_auth_key_handler, reload_keys_handler}; -use crate::core::authentication::handler::KeysHandler; /// It adds the routes to the router for the [`auth_key`](crate::servers::apis::v1::context::auth_key) API context. pub fn add(prefix: &str, router: Router, keys_handler: &Arc) -> Router { diff --git a/src/servers/apis/v1/context/stats/handlers.rs b/src/servers/apis/v1/context/stats/handlers.rs index b8e7abd87..b4ead78ea 100644 --- a/src/servers/apis/v1/context/stats/handlers.rs +++ b/src/servers/apis/v1/context/stats/handlers.rs @@ -5,13 +5,13 @@ use std::sync::Arc; use axum::extract::State; use axum::response::Response; use axum_extra::extract::Query; +use bittorrent_tracker_core::statistics::repository::Repository; +use bittorrent_tracker_core::torrent::repository::in_memory::InMemoryTorrentRepository; use serde::Deserialize; use tokio::sync::RwLock; use super::responses::{metrics_response, stats_response}; -use crate::core::statistics::repository::Repository; use crate::core::statistics::services::get_metrics; -use crate::core::torrent::repository::in_memory::InMemoryTorrentRepository; use crate::servers::udp::server::banning::BanService; #[derive(Deserialize, Debug, Default)] diff --git a/src/servers/apis/v1/context/stats/resources.rs b/src/servers/apis/v1/context/stats/resources.rs index 97ece22fc..d4a0ec7ec 100644 --- a/src/servers/apis/v1/context/stats/resources.rs +++ b/src/servers/apis/v1/context/stats/resources.rs @@ -1,9 +1,8 @@ //! API resources for the [`stats`](crate::servers::apis::v1::context::stats) //! API context. +use bittorrent_tracker_core::statistics::services::TrackerMetrics; use serde::{Deserialize, Serialize}; -use crate::core::statistics::services::TrackerMetrics; - /// It contains all the statistics generated by the tracker. #[derive(Serialize, Deserialize, Debug, PartialEq, Eq)] pub struct Stats { @@ -118,11 +117,11 @@ impl From for Stats { #[cfg(test)] mod tests { + use bittorrent_tracker_core::statistics::metrics::Metrics; + use bittorrent_tracker_core::statistics::services::TrackerMetrics; use torrust_tracker_primitives::torrent_metrics::TorrentsMetrics; use super::Stats; - use crate::core::statistics::metrics::Metrics; - use crate::core::statistics::services::TrackerMetrics; #[test] fn stats_resource_should_be_converted_from_tracker_metrics() { diff --git a/src/servers/apis/v1/context/stats/responses.rs b/src/servers/apis/v1/context/stats/responses.rs index 6fda43f8c..fc74b5f8d 100644 --- a/src/servers/apis/v1/context/stats/responses.rs +++ b/src/servers/apis/v1/context/stats/responses.rs @@ -1,9 +1,9 @@ //! API responses for the [`stats`](crate::servers::apis::v1::context::stats) //! API context. use axum::response::{IntoResponse, Json, Response}; +use bittorrent_tracker_core::statistics::services::TrackerMetrics; use super::resources::Stats; -use crate::core::statistics::services::TrackerMetrics; /// `200` response that contains the [`Stats`] resource as json. #[must_use] diff --git a/src/servers/apis/v1/context/torrent/handlers.rs b/src/servers/apis/v1/context/torrent/handlers.rs index 0ec90441d..ce80d8fee 100644 --- a/src/servers/apis/v1/context/torrent/handlers.rs +++ b/src/servers/apis/v1/context/torrent/handlers.rs @@ -8,13 +8,13 @@ use axum::extract::{Path, State}; use axum::response::{IntoResponse, Response}; use axum_extra::extract::Query; use bittorrent_primitives::info_hash::InfoHash; +use bittorrent_tracker_core::torrent::repository::in_memory::InMemoryTorrentRepository; +use bittorrent_tracker_core::torrent::services::{get_torrent_info, get_torrents, get_torrents_page}; use serde::{de, Deserialize, Deserializer}; use thiserror::Error; use torrust_tracker_primitives::pagination::Pagination; use super::responses::{torrent_info_response, torrent_list_response, torrent_not_known_response}; -use crate::core::torrent::repository::in_memory::InMemoryTorrentRepository; -use crate::core::torrent::services::{get_torrent_info, get_torrents, get_torrents_page}; use crate::servers::apis::v1::responses::invalid_info_hash_param_response; use crate::servers::apis::InfoHashParam; @@ -33,7 +33,7 @@ pub async fn get_torrent_handler( ) -> Response { match InfoHash::from_str(&info_hash.0) { Err(_) => invalid_info_hash_param_response(&info_hash.0), - Ok(info_hash) => match get_torrent_info(in_memory_torrent_repository.clone(), &info_hash).await { + Ok(info_hash) => match get_torrent_info(&in_memory_torrent_repository, &info_hash) { Some(info) => torrent_info_response(info).into_response(), None => torrent_not_known_response(), }, @@ -85,19 +85,14 @@ pub async fn get_torrents_handler( tracing::debug!("pagination: {:?}", pagination); if pagination.0.info_hashes.is_empty() { - torrent_list_response( - &get_torrents_page( - in_memory_torrent_repository.clone(), - Some(&Pagination::new_with_options(pagination.0.offset, pagination.0.limit)), - ) - .await, - ) + torrent_list_response(&get_torrents_page( + &in_memory_torrent_repository, + Some(&Pagination::new_with_options(pagination.0.offset, pagination.0.limit)), + )) .into_response() } else { match parse_info_hashes(pagination.0.info_hashes) { - Ok(info_hashes) => { - torrent_list_response(&get_torrents(in_memory_torrent_repository.clone(), &info_hashes).await).into_response() - } + Ok(info_hashes) => torrent_list_response(&get_torrents(&in_memory_torrent_repository, &info_hashes)).into_response(), Err(err) => match err { QueryParamError::InvalidInfoHash { info_hash } => invalid_info_hash_param_response(&info_hash), }, diff --git a/src/servers/apis/v1/context/torrent/resources/torrent.rs b/src/servers/apis/v1/context/torrent/resources/torrent.rs index c90a2a05f..5e4da5c16 100644 --- a/src/servers/apis/v1/context/torrent/resources/torrent.rs +++ b/src/servers/apis/v1/context/torrent/resources/torrent.rs @@ -4,10 +4,9 @@ //! - `ListItem` is a list item resource on a torrent list. `ListItem` does //! include a `peers` field but it is always `None` in the struct and `null` in //! the JSON response. +use bittorrent_tracker_core::torrent::services::{BasicInfo, Info}; use serde::{Deserialize, Serialize}; -use crate::core::torrent::services::{BasicInfo, Info}; - /// `Torrent` API resource. #[derive(Serialize, Deserialize, Debug, PartialEq, Eq)] pub struct Torrent { @@ -99,10 +98,10 @@ mod tests { use aquatic_udp_protocol::{AnnounceEvent, NumberOfBytes, PeerId}; use bittorrent_primitives::info_hash::InfoHash; + use bittorrent_tracker_core::torrent::services::{BasicInfo, Info}; use torrust_tracker_primitives::{peer, DurationSinceUnixEpoch}; use super::Torrent; - use crate::core::torrent::services::{BasicInfo, Info}; use crate::servers::apis::v1::context::torrent::resources::peer::Peer; use crate::servers::apis::v1::context::torrent::resources::torrent::ListItem; diff --git a/src/servers/apis/v1/context/torrent/responses.rs b/src/servers/apis/v1/context/torrent/responses.rs index 5174c9abe..cd359247b 100644 --- a/src/servers/apis/v1/context/torrent/responses.rs +++ b/src/servers/apis/v1/context/torrent/responses.rs @@ -1,10 +1,10 @@ //! API responses for the [`torrent`](crate::servers::apis::v1::context::torrent) //! API context. use axum::response::{IntoResponse, Json, Response}; +use bittorrent_tracker_core::torrent::services::{BasicInfo, Info}; use serde_json::json; use super::resources::torrent::{ListItem, Torrent}; -use crate::core::torrent::services::{BasicInfo, Info}; /// `200` response that contains an array of /// [`ListItem`] diff --git a/src/servers/apis/v1/context/torrent/routes.rs b/src/servers/apis/v1/context/torrent/routes.rs index 3ea8c639c..615bd8d51 100644 --- a/src/servers/apis/v1/context/torrent/routes.rs +++ b/src/servers/apis/v1/context/torrent/routes.rs @@ -8,9 +8,9 @@ use std::sync::Arc; use axum::routing::get; use axum::Router; +use bittorrent_tracker_core::torrent::repository::in_memory::InMemoryTorrentRepository; use super::handlers::{get_torrent_handler, get_torrents_handler}; -use crate::core::torrent::repository::in_memory::InMemoryTorrentRepository; /// It adds the routes to the router for the [`torrent`](crate::servers::apis::v1::context::torrent) API context. pub fn add(prefix: &str, router: Router, in_memory_torrent_repository: &Arc) -> Router { diff --git a/src/servers/apis/v1/context/whitelist/handlers.rs b/src/servers/apis/v1/context/whitelist/handlers.rs index ebe0bb15c..e33a215f2 100644 --- a/src/servers/apis/v1/context/whitelist/handlers.rs +++ b/src/servers/apis/v1/context/whitelist/handlers.rs @@ -6,11 +6,11 @@ use std::sync::Arc; use axum::extract::{Path, State}; use axum::response::Response; use bittorrent_primitives::info_hash::InfoHash; +use bittorrent_tracker_core::whitelist::manager::WhitelistManager; use super::responses::{ failed_to_reload_whitelist_response, failed_to_remove_torrent_from_whitelist_response, failed_to_whitelist_torrent_response, }; -use crate::core::whitelist::manager::WhitelistManager; use crate::servers::apis::v1::responses::{invalid_info_hash_param_response, ok_response}; use crate::servers::apis::InfoHashParam; diff --git a/src/servers/apis/v1/context/whitelist/routes.rs b/src/servers/apis/v1/context/whitelist/routes.rs index 5069332af..316193cd6 100644 --- a/src/servers/apis/v1/context/whitelist/routes.rs +++ b/src/servers/apis/v1/context/whitelist/routes.rs @@ -9,9 +9,9 @@ use std::sync::Arc; use axum::routing::{delete, get, post}; use axum::Router; +use bittorrent_tracker_core::whitelist::manager::WhitelistManager; use super::handlers::{add_torrent_to_whitelist_handler, reload_whitelist_handler, remove_torrent_from_whitelist_handler}; -use crate::core::whitelist::manager::WhitelistManager; /// It adds the routes to the router for the [`whitelist`](crate::servers::apis::v1::context::whitelist) API context. pub fn add(prefix: &str, router: Router, whitelist_manager: &Arc) -> Router { diff --git a/src/servers/http/v1/extractors/authentication_key.rs b/src/servers/http/v1/extractors/authentication_key.rs index d3b77c31a..0e46b75dd 100644 --- a/src/servers/http/v1/extractors/authentication_key.rs +++ b/src/servers/http/v1/extractors/authentication_key.rs @@ -50,10 +50,10 @@ use axum::extract::{FromRequestParts, Path}; use axum::http::request::Parts; use axum::response::{IntoResponse, Response}; use bittorrent_http_protocol::v1::responses; +use bittorrent_tracker_core::authentication::Key; use hyper::StatusCode; use serde::Deserialize; -use crate::core::authentication::Key; use crate::servers::http::v1::handlers::common::auth; /// Extractor for the [`Key`] struct. diff --git a/src/servers/http/v1/handlers/announce.rs b/src/servers/http/v1/handlers/announce.rs index 544d706fa..40462c31d 100644 --- a/src/servers/http/v1/handlers/announce.rs +++ b/src/servers/http/v1/handlers/announce.rs @@ -16,17 +16,18 @@ use bittorrent_http_protocol::v1::requests::announce::{Announce, Compact, Event} use bittorrent_http_protocol::v1::responses::{self}; use bittorrent_http_protocol::v1::services::peer_ip_resolver; use bittorrent_http_protocol::v1::services::peer_ip_resolver::ClientIpSources; +use bittorrent_tracker_core::announce_handler::{AnnounceHandler, PeersWanted}; +use bittorrent_tracker_core::authentication::service::AuthenticationService; +use bittorrent_tracker_core::authentication::Key; +use bittorrent_tracker_core::statistics::event::sender::Sender; +use bittorrent_tracker_core::whitelist; use hyper::StatusCode; use torrust_tracker_clock::clock::Time; use torrust_tracker_configuration::Core; use torrust_tracker_primitives::core::AnnounceData; use torrust_tracker_primitives::peer; -use crate::core::announce_handler::{AnnounceHandler, PeersWanted}; -use crate::core::authentication::service::AuthenticationService; -use crate::core::authentication::Key; -use crate::core::statistics::event::sender::Sender; -use crate::core::whitelist; +use super::common::auth::map_auth_error_to_error_response; use crate::servers::http::v1::extractors::announce_request::ExtractRequest; use crate::servers::http::v1::extractors::authentication_key::Extract as ExtractKey; use crate::servers::http::v1::extractors::client_ip_sources::Extract as ExtractClientIpSources; @@ -150,7 +151,7 @@ async fn handle_announce( match maybe_key { Some(key) => match authentication_service.authenticate(&key).await { Ok(()) => (), - Err(error) => return Err(responses::error::Error::from(error)), + Err(error) => return Err(map_auth_error_to_error_response(&error)), }, None => { return Err(responses::error::Error::from(auth::Error::MissingAuthKey { @@ -250,21 +251,20 @@ mod tests { use bittorrent_http_protocol::v1::requests::announce::Announce; use bittorrent_http_protocol::v1::responses; use bittorrent_http_protocol::v1::services::peer_ip_resolver::ClientIpSources; + use bittorrent_tracker_core::announce_handler::AnnounceHandler; + use bittorrent_tracker_core::authentication::key::repository::in_memory::InMemoryKeyRepository; + use bittorrent_tracker_core::authentication::service::AuthenticationService; + use bittorrent_tracker_core::core_tests::sample_info_hash; + use bittorrent_tracker_core::databases::setup::initialize_database; + use bittorrent_tracker_core::statistics; + use bittorrent_tracker_core::statistics::event::sender::Sender; + use bittorrent_tracker_core::torrent::repository::in_memory::InMemoryTorrentRepository; + use bittorrent_tracker_core::torrent::repository::persisted::DatabasePersistentTorrentRepository; + use bittorrent_tracker_core::whitelist::authorization::WhitelistAuthorization; + use bittorrent_tracker_core::whitelist::repository::in_memory::InMemoryWhitelist; use torrust_tracker_configuration::{Configuration, Core}; use torrust_tracker_test_helpers::configuration; - use crate::core::announce_handler::AnnounceHandler; - use crate::core::authentication::key::repository::in_memory::InMemoryKeyRepository; - use crate::core::authentication::service::AuthenticationService; - use crate::core::core_tests::sample_info_hash; - use crate::core::databases::setup::initialize_database; - use crate::core::statistics; - use crate::core::statistics::event::sender::Sender; - use crate::core::torrent::repository::in_memory::InMemoryTorrentRepository; - use crate::core::torrent::repository::persisted::DatabasePersistentTorrentRepository; - use crate::core::whitelist::authorization::WhitelistAuthorization; - use crate::core::whitelist::repository::in_memory::InMemoryWhitelist; - struct CoreTrackerServices { pub core_config: Arc, pub announce_handler: Arc, @@ -347,8 +347,9 @@ mod tests { use std::str::FromStr; + use bittorrent_tracker_core::authentication; + use super::{initialize_private_tracker, sample_announce_request, sample_client_ip_sources}; - use crate::core::authentication; use crate::servers::http::v1::handlers::announce::handle_announce; use crate::servers::http::v1::handlers::announce::tests::assert_error_response; diff --git a/src/servers/http/v1/handlers/common/auth.rs b/src/servers/http/v1/handlers/common/auth.rs index 5497427d8..c8625d03a 100644 --- a/src/servers/http/v1/handlers/common/auth.rs +++ b/src/servers/http/v1/handlers/common/auth.rs @@ -4,10 +4,9 @@ use std::panic::Location; use bittorrent_http_protocol::v1::responses; +use bittorrent_tracker_core::authentication; use thiserror::Error; -use crate::core::authentication; - /// Authentication error. /// /// When the tracker is private, the authentication key is required in the URL @@ -31,10 +30,12 @@ impl From for responses::error::Error { } } -impl From for responses::error::Error { - fn from(err: authentication::Error) -> Self { - responses::error::Error { - failure_reason: format!("Authentication error: {err}"), - } +#[must_use] +pub fn map_auth_error_to_error_response(err: &authentication::Error) -> responses::error::Error { + // code_review: this could not been implemented with the trait: + // impl From for responses::error::Error + // Consider moving the trait implementation to the http-protocol package. + responses::error::Error { + failure_reason: format!("Authentication error: {err}"), } } diff --git a/src/servers/http/v1/handlers/scrape.rs b/src/servers/http/v1/handlers/scrape.rs index 35c5b1409..1b9196e25 100644 --- a/src/servers/http/v1/handlers/scrape.rs +++ b/src/servers/http/v1/handlers/scrape.rs @@ -12,14 +12,14 @@ use axum::response::{IntoResponse, Response}; use bittorrent_http_protocol::v1::requests::scrape::Scrape; use bittorrent_http_protocol::v1::responses; use bittorrent_http_protocol::v1::services::peer_ip_resolver::{self, ClientIpSources}; +use bittorrent_tracker_core::authentication::service::AuthenticationService; +use bittorrent_tracker_core::authentication::Key; +use bittorrent_tracker_core::scrape_handler::ScrapeHandler; +use bittorrent_tracker_core::statistics::event::sender::Sender; use hyper::StatusCode; use torrust_tracker_configuration::Core; use torrust_tracker_primitives::core::ScrapeData; -use crate::core::authentication::service::AuthenticationService; -use crate::core::authentication::Key; -use crate::core::scrape_handler::ScrapeHandler; -use crate::core::statistics::event::sender::Sender; use crate::servers::http::v1::extractors::authentication_key::Extract as ExtractKey; use crate::servers::http::v1::extractors::client_ip_sources::Extract as ExtractClientIpSources; use crate::servers::http::v1::extractors::scrape_request::ExtractRequest; @@ -171,21 +171,20 @@ mod tests { use bittorrent_http_protocol::v1::responses; use bittorrent_http_protocol::v1::services::peer_ip_resolver::ClientIpSources; use bittorrent_primitives::info_hash::InfoHash; + use bittorrent_tracker_core::authentication::key::repository::in_memory::InMemoryKeyRepository; + use bittorrent_tracker_core::authentication::service::AuthenticationService; + use bittorrent_tracker_core::scrape_handler::ScrapeHandler; + use bittorrent_tracker_core::statistics; + use bittorrent_tracker_core::torrent::repository::in_memory::InMemoryTorrentRepository; + use bittorrent_tracker_core::whitelist::authorization::WhitelistAuthorization; + use bittorrent_tracker_core::whitelist::repository::in_memory::InMemoryWhitelist; use torrust_tracker_configuration::{Configuration, Core}; use torrust_tracker_test_helpers::configuration; - use crate::core::authentication::key::repository::in_memory::InMemoryKeyRepository; - use crate::core::authentication::service::AuthenticationService; - use crate::core::scrape_handler::ScrapeHandler; - use crate::core::statistics; - use crate::core::torrent::repository::in_memory::InMemoryTorrentRepository; - use crate::core::whitelist::authorization::WhitelistAuthorization; - use crate::core::whitelist::repository::in_memory::InMemoryWhitelist; - struct CoreTrackerServices { pub core_config: Arc, pub scrape_handler: Arc, - pub stats_event_sender: Arc>>, + pub stats_event_sender: Arc>>, pub authentication_service: Arc, } @@ -247,10 +246,10 @@ mod tests { mod with_tracker_in_private_mode { use std::str::FromStr; + use bittorrent_tracker_core::authentication; use torrust_tracker_primitives::core::ScrapeData; use super::{initialize_private_tracker, sample_client_ip_sources, sample_scrape_request}; - use crate::core::authentication; use crate::servers::http::v1::handlers::scrape::handle_scrape; #[tokio::test] diff --git a/src/servers/http/v1/services/announce.rs b/src/servers/http/v1/services/announce.rs index ee682559e..9e74ab8a5 100644 --- a/src/servers/http/v1/services/announce.rs +++ b/src/servers/http/v1/services/announce.rs @@ -11,13 +11,12 @@ use std::net::IpAddr; use std::sync::Arc; use bittorrent_primitives::info_hash::InfoHash; +use bittorrent_tracker_core::announce_handler::{AnnounceHandler, PeersWanted}; +use bittorrent_tracker_core::statistics; +use bittorrent_tracker_core::statistics::event::sender::Sender; use torrust_tracker_primitives::core::AnnounceData; use torrust_tracker_primitives::peer; -use crate::core::announce_handler::{AnnounceHandler, PeersWanted}; -use crate::core::statistics::event::sender::Sender; -use crate::core::statistics::{self}; - /// The HTTP tracker `announce` service. /// /// The service sends an statistics event that increments: @@ -60,17 +59,16 @@ mod tests { use std::sync::Arc; use aquatic_udp_protocol::{AnnounceEvent, NumberOfBytes, PeerId}; + use bittorrent_tracker_core::announce_handler::AnnounceHandler; + use bittorrent_tracker_core::databases::setup::initialize_database; + use bittorrent_tracker_core::statistics; + use bittorrent_tracker_core::statistics::event::sender::Sender; + use bittorrent_tracker_core::torrent::repository::in_memory::InMemoryTorrentRepository; + use bittorrent_tracker_core::torrent::repository::persisted::DatabasePersistentTorrentRepository; use torrust_tracker_configuration::Core; use torrust_tracker_primitives::{peer, DurationSinceUnixEpoch}; use torrust_tracker_test_helpers::configuration; - use crate::core::announce_handler::AnnounceHandler; - use crate::core::databases::setup::initialize_database; - use crate::core::statistics; - use crate::core::statistics::event::sender::Sender; - use crate::core::torrent::repository::in_memory::InMemoryTorrentRepository; - use crate::core::torrent::repository::persisted::DatabasePersistentTorrentRepository; - struct CoreTrackerServices { pub core_config: Arc, pub announce_handler: Arc, @@ -124,11 +122,29 @@ mod tests { } } + use bittorrent_tracker_core::statistics::event::Event; + use futures::future::BoxFuture; + use mockall::mock; + use tokio::sync::mpsc::error::SendError; + + mock! { + StatsEventSender {} + impl Sender for StatsEventSender { + fn send_event(&self, event: Event) -> BoxFuture<'static,Option > > > ; + } + } + mod with_tracker_in_any_mode { use std::future; use std::net::{IpAddr, Ipv4Addr, Ipv6Addr, SocketAddr}; use std::sync::Arc; + use bittorrent_tracker_core::announce_handler::{AnnounceHandler, PeersWanted}; + use bittorrent_tracker_core::core_tests::sample_info_hash; + use bittorrent_tracker_core::databases::setup::initialize_database; + use bittorrent_tracker_core::statistics; + use bittorrent_tracker_core::torrent::repository::in_memory::InMemoryTorrentRepository; + use bittorrent_tracker_core::torrent::repository::persisted::DatabasePersistentTorrentRepository; use mockall::predicate::eq; use torrust_tracker_primitives::core::AnnounceData; use torrust_tracker_primitives::peer; @@ -136,14 +152,10 @@ mod tests { use torrust_tracker_test_helpers::configuration; use super::{sample_peer_using_ipv4, sample_peer_using_ipv6}; - use crate::core::announce_handler::{AnnounceHandler, PeersWanted}; - use crate::core::core_tests::sample_info_hash; - use crate::core::databases::setup::initialize_database; - use crate::core::statistics; - use crate::core::torrent::repository::in_memory::InMemoryTorrentRepository; - use crate::core::torrent::repository::persisted::DatabasePersistentTorrentRepository; use crate::servers::http::v1::services::announce::invoke; - use crate::servers::http::v1::services::announce::tests::{initialize_core_tracker_services, sample_peer}; + use crate::servers::http::v1::services::announce::tests::{ + initialize_core_tracker_services, sample_peer, MockStatsEventSender, + }; fn initialize_announce_handler() -> Arc { let config = configuration::ephemeral(); @@ -189,7 +201,7 @@ mod tests { #[tokio::test] async fn it_should_send_the_tcp_4_announce_event_when_the_peer_uses_ipv4() { - let mut stats_event_sender_mock = statistics::event::sender::MockSender::new(); + let mut stats_event_sender_mock = MockStatsEventSender::new(); stats_event_sender_mock .expect_send_event() .with(eq(statistics::event::Event::Tcp4Announce)) @@ -234,7 +246,7 @@ mod tests { // Tracker changes the peer IP to the tracker external IP when the peer is using the loopback IP. // Assert that the event sent is a TCP4 event - let mut stats_event_sender_mock = statistics::event::sender::MockSender::new(); + let mut stats_event_sender_mock = MockStatsEventSender::new(); stats_event_sender_mock .expect_send_event() .with(eq(statistics::event::Event::Tcp4Announce)) @@ -260,7 +272,7 @@ mod tests { #[tokio::test] async fn it_should_send_the_tcp_6_announce_event_when_the_peer_uses_ipv6_even_if_the_tracker_changes_the_peer_ip_to_ipv4() { - let mut stats_event_sender_mock = statistics::event::sender::MockSender::new(); + let mut stats_event_sender_mock = MockStatsEventSender::new(); stats_event_sender_mock .expect_send_event() .with(eq(statistics::event::Event::Tcp6Announce)) diff --git a/src/servers/http/v1/services/scrape.rs b/src/servers/http/v1/services/scrape.rs index b5a858b83..59a7d34c7 100644 --- a/src/servers/http/v1/services/scrape.rs +++ b/src/servers/http/v1/services/scrape.rs @@ -11,12 +11,11 @@ use std::net::IpAddr; use std::sync::Arc; use bittorrent_primitives::info_hash::InfoHash; +use bittorrent_tracker_core::scrape_handler::ScrapeHandler; +use bittorrent_tracker_core::statistics::event::sender::Sender; +use bittorrent_tracker_core::statistics::{self}; use torrust_tracker_primitives::core::ScrapeData; -use crate::core::scrape_handler::ScrapeHandler; -use crate::core::statistics::event::sender::Sender; -use crate::core::statistics::{self}; - /// The HTTP tracker `scrape` service. /// /// The service sends an statistics event that increments: @@ -77,18 +76,22 @@ mod tests { use aquatic_udp_protocol::{AnnounceEvent, NumberOfBytes, PeerId}; use bittorrent_primitives::info_hash::InfoHash; + use bittorrent_tracker_core::announce_handler::AnnounceHandler; + use bittorrent_tracker_core::core_tests::sample_info_hash; + use bittorrent_tracker_core::databases::setup::initialize_database; + use bittorrent_tracker_core::scrape_handler::ScrapeHandler; + use bittorrent_tracker_core::statistics::event::sender::Sender; + use bittorrent_tracker_core::statistics::event::Event; + use bittorrent_tracker_core::torrent::repository::in_memory::InMemoryTorrentRepository; + use bittorrent_tracker_core::torrent::repository::persisted::DatabasePersistentTorrentRepository; + use bittorrent_tracker_core::whitelist::authorization::WhitelistAuthorization; + use bittorrent_tracker_core::whitelist::repository::in_memory::InMemoryWhitelist; + use futures::future::BoxFuture; + use mockall::mock; + use tokio::sync::mpsc::error::SendError; use torrust_tracker_primitives::{peer, DurationSinceUnixEpoch}; use torrust_tracker_test_helpers::configuration; - use crate::core::announce_handler::AnnounceHandler; - use crate::core::core_tests::sample_info_hash; - use crate::core::databases::setup::initialize_database; - use crate::core::scrape_handler::ScrapeHandler; - use crate::core::torrent::repository::in_memory::InMemoryTorrentRepository; - use crate::core::torrent::repository::persisted::DatabasePersistentTorrentRepository; - use crate::core::whitelist::authorization::WhitelistAuthorization; - use crate::core::whitelist::repository::in_memory::InMemoryWhitelist; - fn initialize_announce_and_scrape_handlers_for_public_tracker() -> (Arc, Arc) { let config = configuration::ephemeral_public(); @@ -133,27 +136,34 @@ mod tests { Arc::new(ScrapeHandler::new(&whitelist_authorization, &in_memory_torrent_repository)) } + mock! { + StatsEventSender {} + impl Sender for StatsEventSender { + fn send_event(&self, event: Event) -> BoxFuture<'static,Option > > > ; + } + } + mod with_real_data { use std::future; use std::net::{IpAddr, Ipv4Addr, Ipv6Addr}; use std::sync::Arc; + use bittorrent_tracker_core::announce_handler::PeersWanted; + use bittorrent_tracker_core::statistics; use mockall::predicate::eq; use torrust_tracker_primitives::core::ScrapeData; use torrust_tracker_primitives::swarm_metadata::SwarmMetadata; - use crate::core::announce_handler::PeersWanted; - use crate::core::statistics; use crate::servers::http::v1::services::scrape::invoke; use crate::servers::http::v1::services::scrape::tests::{ initialize_announce_and_scrape_handlers_for_public_tracker, initialize_scrape_handler, sample_info_hash, - sample_info_hashes, sample_peer, + sample_info_hashes, sample_peer, MockStatsEventSender, }; #[tokio::test] async fn it_should_return_the_scrape_data_for_a_torrent() { - let (stats_event_sender, _stats_repository) = crate::core::statistics::setup::factory(false); + let (stats_event_sender, _stats_repository) = bittorrent_tracker_core::statistics::setup::factory(false); let stats_event_sender = Arc::new(stats_event_sender); let (announce_handler, scrape_handler) = initialize_announce_and_scrape_handlers_for_public_tracker(); @@ -183,7 +193,7 @@ mod tests { #[tokio::test] async fn it_should_send_the_tcp_4_scrape_event_when_the_peer_uses_ipv4() { - let mut stats_event_sender_mock = statistics::event::sender::MockSender::new(); + let mut stats_event_sender_mock = MockStatsEventSender::new(); stats_event_sender_mock .expect_send_event() .with(eq(statistics::event::Event::Tcp4Scrape)) @@ -201,7 +211,7 @@ mod tests { #[tokio::test] async fn it_should_send_the_tcp_6_scrape_event_when_the_peer_uses_ipv6() { - let mut stats_event_sender_mock = statistics::event::sender::MockSender::new(); + let mut stats_event_sender_mock = MockStatsEventSender::new(); stats_event_sender_mock .expect_send_event() .with(eq(statistics::event::Event::Tcp6Scrape)) @@ -224,19 +234,20 @@ mod tests { use std::net::{IpAddr, Ipv4Addr, Ipv6Addr}; use std::sync::Arc; + use bittorrent_tracker_core::announce_handler::PeersWanted; + use bittorrent_tracker_core::statistics; use mockall::predicate::eq; use torrust_tracker_primitives::core::ScrapeData; - use crate::core::announce_handler::PeersWanted; - use crate::core::statistics; use crate::servers::http::v1::services::scrape::fake; use crate::servers::http::v1::services::scrape::tests::{ initialize_announce_and_scrape_handlers_for_public_tracker, sample_info_hash, sample_info_hashes, sample_peer, + MockStatsEventSender, }; #[tokio::test] async fn it_should_always_return_the_zeroed_scrape_data_for_a_torrent() { - let (stats_event_sender, _stats_repository) = crate::core::statistics::setup::factory(false); + let (stats_event_sender, _stats_repository) = bittorrent_tracker_core::statistics::setup::factory(false); let stats_event_sender = Arc::new(stats_event_sender); let (announce_handler, _scrape_handler) = initialize_announce_and_scrape_handlers_for_public_tracker(); @@ -258,7 +269,7 @@ mod tests { #[tokio::test] async fn it_should_send_the_tcp_4_scrape_event_when_the_peer_uses_ipv4() { - let mut stats_event_sender_mock = statistics::event::sender::MockSender::new(); + let mut stats_event_sender_mock = MockStatsEventSender::new(); stats_event_sender_mock .expect_send_event() .with(eq(statistics::event::Event::Tcp4Scrape)) @@ -274,7 +285,7 @@ mod tests { #[tokio::test] async fn it_should_send_the_tcp_6_scrape_event_when_the_peer_uses_ipv6() { - let mut stats_event_sender_mock = statistics::event::sender::MockSender::new(); + let mut stats_event_sender_mock = MockStatsEventSender::new(); stats_event_sender_mock .expect_send_event() .with(eq(statistics::event::Event::Tcp6Scrape)) diff --git a/src/servers/udp/handlers.rs b/src/servers/udp/handlers.rs index 90c32771f..84b2f1db2 100644 --- a/src/servers/udp/handlers.rs +++ b/src/servers/udp/handlers.rs @@ -11,6 +11,10 @@ use aquatic_udp_protocol::{ ResponsePeer, ScrapeRequest, ScrapeResponse, TorrentScrapeStatistics, TransactionId, }; use bittorrent_primitives::info_hash::InfoHash; +use bittorrent_tracker_core::announce_handler::{AnnounceHandler, PeersWanted}; +use bittorrent_tracker_core::scrape_handler::ScrapeHandler; +use bittorrent_tracker_core::statistics::event::sender::Sender; +use bittorrent_tracker_core::{statistics, whitelist}; use torrust_tracker_clock::clock::Time as _; use torrust_tracker_configuration::Core; use tracing::{instrument, Level}; @@ -20,10 +24,6 @@ use zerocopy::network_endian::I32; use super::connection_cookie::{check, make}; use super::RawRequest; use crate::container::UdpTrackerContainer; -use crate::core::announce_handler::{AnnounceHandler, PeersWanted}; -use crate::core::scrape_handler::ScrapeHandler; -use crate::core::statistics::event::sender::Sender; -use crate::core::{statistics, whitelist}; use crate::servers::udp::error::Error; use crate::servers::udp::{peer_builder, UDP_TRACKER_LOG_TARGET}; use crate::shared::bit_torrent::common::MAX_SCRAPE_TORRENTS; @@ -468,21 +468,25 @@ mod tests { use std::sync::Arc; use aquatic_udp_protocol::{NumberOfBytes, PeerId}; + use bittorrent_tracker_core::announce_handler::AnnounceHandler; + use bittorrent_tracker_core::databases::setup::initialize_database; + use bittorrent_tracker_core::scrape_handler::ScrapeHandler; + use bittorrent_tracker_core::statistics::event::sender::Sender; + use bittorrent_tracker_core::statistics::event::Event; + use bittorrent_tracker_core::torrent::repository::in_memory::InMemoryTorrentRepository; + use bittorrent_tracker_core::torrent::repository::persisted::DatabasePersistentTorrentRepository; + use bittorrent_tracker_core::whitelist::authorization::WhitelistAuthorization; + use bittorrent_tracker_core::whitelist::repository::in_memory::InMemoryWhitelist; + use bittorrent_tracker_core::{statistics, whitelist}; + use futures::future::BoxFuture; + use mockall::mock; + use tokio::sync::mpsc::error::SendError; use torrust_tracker_clock::clock::Time; use torrust_tracker_configuration::{Configuration, Core}; use torrust_tracker_primitives::peer; use torrust_tracker_test_helpers::configuration; use super::gen_remote_fingerprint; - use crate::core::announce_handler::AnnounceHandler; - use crate::core::databases::setup::initialize_database; - use crate::core::scrape_handler::ScrapeHandler; - use crate::core::statistics::event::sender::Sender; - use crate::core::torrent::repository::in_memory::InMemoryTorrentRepository; - use crate::core::torrent::repository::persisted::DatabasePersistentTorrentRepository; - use crate::core::whitelist::authorization::WhitelistAuthorization; - use crate::core::whitelist::repository::in_memory::InMemoryWhitelist; - use crate::core::{statistics, whitelist}; use crate::CurrentClock; struct CoreTrackerServices { @@ -632,20 +636,28 @@ mod tests { } } + mock! { + StatsEventSender {} + impl Sender for StatsEventSender { + fn send_event(&self, event: Event) -> BoxFuture<'static,Option > > > ; + } + } + mod connect_request { use std::future; use std::sync::Arc; use aquatic_udp_protocol::{ConnectRequest, ConnectResponse, Response, TransactionId}; + use bittorrent_tracker_core::statistics; use mockall::predicate::eq; use super::{sample_ipv4_socket_address, sample_ipv6_remote_addr}; - use crate::core::statistics; use crate::servers::udp::connection_cookie::make; use crate::servers::udp::handlers::handle_connect; use crate::servers::udp::handlers::tests::{ sample_ipv4_remote_addr, sample_ipv4_remote_addr_fingerprint, sample_ipv6_remote_addr_fingerprint, sample_issue_time, + MockStatsEventSender, }; fn sample_connect_request() -> ConnectRequest { @@ -656,7 +668,7 @@ mod tests { #[tokio::test] async fn a_connect_response_should_contain_the_same_transaction_id_as_the_connect_request() { - let (stats_event_sender, _stats_repository) = crate::core::statistics::setup::factory(false); + let (stats_event_sender, _stats_repository) = bittorrent_tracker_core::statistics::setup::factory(false); let stats_event_sender = Arc::new(stats_event_sender); let request = ConnectRequest { @@ -676,7 +688,7 @@ mod tests { #[tokio::test] async fn a_connect_response_should_contain_a_new_connection_id() { - let (stats_event_sender, _stats_repository) = crate::core::statistics::setup::factory(false); + let (stats_event_sender, _stats_repository) = bittorrent_tracker_core::statistics::setup::factory(false); let stats_event_sender = Arc::new(stats_event_sender); let request = ConnectRequest { @@ -696,7 +708,7 @@ mod tests { #[tokio::test] async fn a_connect_response_should_contain_a_new_connection_id_ipv6() { - let (stats_event_sender, _stats_repository) = crate::core::statistics::setup::factory(false); + let (stats_event_sender, _stats_repository) = bittorrent_tracker_core::statistics::setup::factory(false); let stats_event_sender = Arc::new(stats_event_sender); let request = ConnectRequest { @@ -716,7 +728,7 @@ mod tests { #[tokio::test] async fn it_should_send_the_upd4_connect_event_when_a_client_tries_to_connect_using_a_ip4_socket_address() { - let mut stats_event_sender_mock = statistics::event::sender::MockSender::new(); + let mut stats_event_sender_mock = MockStatsEventSender::new(); stats_event_sender_mock .expect_send_event() .with(eq(statistics::event::Event::Udp4Connect)) @@ -738,7 +750,7 @@ mod tests { #[tokio::test] async fn it_should_send_the_upd6_connect_event_when_a_client_tries_to_connect_using_a_ip6_socket_address() { - let mut stats_event_sender_mock = statistics::event::sender::MockSender::new(); + let mut stats_event_sender_mock = MockStatsEventSender::new(); stats_event_sender_mock .expect_send_event() .with(eq(statistics::event::Event::Udp6Connect)) @@ -840,18 +852,18 @@ mod tests { AnnounceInterval, AnnounceResponse, InfoHash as AquaticInfoHash, Ipv4AddrBytes, Ipv6AddrBytes, NumberOfPeers, PeerId as AquaticPeerId, Response, ResponsePeer, }; + use bittorrent_tracker_core::announce_handler::AnnounceHandler; + use bittorrent_tracker_core::torrent::repository::in_memory::InMemoryTorrentRepository; + use bittorrent_tracker_core::{statistics, whitelist}; use mockall::predicate::eq; use torrust_tracker_configuration::Core; - use crate::core::announce_handler::AnnounceHandler; - use crate::core::torrent::repository::in_memory::InMemoryTorrentRepository; - use crate::core::{statistics, whitelist}; use crate::servers::udp::connection_cookie::make; use crate::servers::udp::handlers::tests::announce_request::AnnounceRequestBuilder; use crate::servers::udp::handlers::tests::{ gen_remote_fingerprint, initialize_core_tracker_services_for_default_tracker_configuration, initialize_core_tracker_services_for_public_tracker, sample_cookie_valid_range, sample_ipv4_socket_address, - sample_issue_time, TorrentPeerBuilder, + sample_issue_time, MockStatsEventSender, TorrentPeerBuilder, }; use crate::servers::udp::handlers::{handle_announce, AnnounceResponseFixedData}; @@ -1001,7 +1013,7 @@ mod tests { announce_handler: Arc, whitelist_authorization: Arc, ) -> Response { - let (stats_event_sender, _stats_repository) = crate::core::statistics::setup::factory(false); + let (stats_event_sender, _stats_repository) = bittorrent_tracker_core::statistics::setup::factory(false); let stats_event_sender = Arc::new(stats_event_sender); let remote_addr = SocketAddr::new(IpAddr::V4(Ipv4Addr::new(126, 0, 0, 1)), 8080); @@ -1046,7 +1058,7 @@ mod tests { #[tokio::test] async fn should_send_the_upd4_announce_event() { - let mut stats_event_sender_mock = statistics::event::sender::MockSender::new(); + let mut stats_event_sender_mock = MockStatsEventSender::new(); stats_event_sender_mock .expect_send_event() .with(eq(statistics::event::Event::Udp4Announce)) @@ -1141,18 +1153,18 @@ mod tests { AnnounceInterval, AnnounceResponse, InfoHash as AquaticInfoHash, Ipv4AddrBytes, Ipv6AddrBytes, NumberOfPeers, PeerId as AquaticPeerId, Response, ResponsePeer, }; + use bittorrent_tracker_core::announce_handler::AnnounceHandler; + use bittorrent_tracker_core::torrent::repository::in_memory::InMemoryTorrentRepository; + use bittorrent_tracker_core::{statistics, whitelist}; use mockall::predicate::eq; use torrust_tracker_configuration::Core; - use crate::core::announce_handler::AnnounceHandler; - use crate::core::torrent::repository::in_memory::InMemoryTorrentRepository; - use crate::core::{statistics, whitelist}; use crate::servers::udp::connection_cookie::make; use crate::servers::udp::handlers::tests::announce_request::AnnounceRequestBuilder; use crate::servers::udp::handlers::tests::{ gen_remote_fingerprint, initialize_core_tracker_services_for_default_tracker_configuration, initialize_core_tracker_services_for_public_tracker, sample_cookie_valid_range, sample_ipv6_remote_addr, - sample_issue_time, TorrentPeerBuilder, + sample_issue_time, MockStatsEventSender, TorrentPeerBuilder, }; use crate::servers::udp::handlers::{handle_announce, AnnounceResponseFixedData}; @@ -1306,7 +1318,7 @@ mod tests { announce_handler: Arc, whitelist_authorization: Arc, ) -> Response { - let (stats_event_sender, _stats_repository) = crate::core::statistics::setup::factory(false); + let (stats_event_sender, _stats_repository) = bittorrent_tracker_core::statistics::setup::factory(false); let stats_event_sender = Arc::new(stats_event_sender); let client_ip_v4 = Ipv4Addr::new(126, 0, 0, 1); @@ -1354,7 +1366,7 @@ mod tests { #[tokio::test] async fn should_send_the_upd6_announce_event() { - let mut stats_event_sender_mock = statistics::event::sender::MockSender::new(); + let mut stats_event_sender_mock = MockStatsEventSender::new(); stats_event_sender_mock .expect_send_event() .with(eq(statistics::event::Event::Udp6Announce)) @@ -1390,20 +1402,21 @@ mod tests { use std::sync::Arc; use aquatic_udp_protocol::{InfoHash as AquaticInfoHash, PeerId as AquaticPeerId}; + use bittorrent_tracker_core::announce_handler::AnnounceHandler; + use bittorrent_tracker_core::databases::setup::initialize_database; + use bittorrent_tracker_core::statistics; + use bittorrent_tracker_core::torrent::repository::in_memory::InMemoryTorrentRepository; + use bittorrent_tracker_core::torrent::repository::persisted::DatabasePersistentTorrentRepository; + use bittorrent_tracker_core::whitelist::authorization::WhitelistAuthorization; + use bittorrent_tracker_core::whitelist::repository::in_memory::InMemoryWhitelist; use mockall::predicate::eq; - use crate::core::announce_handler::AnnounceHandler; - use crate::core::databases::setup::initialize_database; - use crate::core::statistics; - use crate::core::torrent::repository::in_memory::InMemoryTorrentRepository; - use crate::core::torrent::repository::persisted::DatabasePersistentTorrentRepository; - use crate::core::whitelist::authorization::WhitelistAuthorization; - use crate::core::whitelist::repository::in_memory::InMemoryWhitelist; use crate::servers::udp::connection_cookie::make; use crate::servers::udp::handlers::handle_announce; use crate::servers::udp::handlers::tests::announce_request::AnnounceRequestBuilder; use crate::servers::udp::handlers::tests::{ - gen_remote_fingerprint, sample_cookie_valid_range, sample_issue_time, TrackerConfigurationBuilder, + gen_remote_fingerprint, sample_cookie_valid_range, sample_issue_time, MockStatsEventSender, + TrackerConfigurationBuilder, }; #[tokio::test] @@ -1417,7 +1430,7 @@ mod tests { let in_memory_torrent_repository = Arc::new(InMemoryTorrentRepository::default()); let db_torrent_repository = Arc::new(DatabasePersistentTorrentRepository::new(&database)); - let mut stats_event_sender_mock = statistics::event::sender::MockSender::new(); + let mut stats_event_sender_mock = MockStatsEventSender::new(); stats_event_sender_mock .expect_send_event() .with(eq(statistics::event::Event::Udp6Announce)) @@ -1491,11 +1504,11 @@ mod tests { InfoHash, NumberOfDownloads, NumberOfPeers, PeerId, Response, ScrapeRequest, ScrapeResponse, TorrentScrapeStatistics, TransactionId, }; + use bittorrent_tracker_core::scrape_handler::ScrapeHandler; + use bittorrent_tracker_core::statistics; + use bittorrent_tracker_core::torrent::repository::in_memory::InMemoryTorrentRepository; use super::{gen_remote_fingerprint, TorrentPeerBuilder}; - use crate::core::scrape_handler::ScrapeHandler; - use crate::core::statistics; - use crate::core::torrent::repository::in_memory::InMemoryTorrentRepository; use crate::servers::udp::connection_cookie::make; use crate::servers::udp::handlers::handle_scrape; use crate::servers::udp::handlers::tests::{ @@ -1734,19 +1747,19 @@ mod tests { use std::future; use std::sync::Arc; + use bittorrent_tracker_core::statistics; use mockall::predicate::eq; use super::sample_scrape_request; - use crate::core::statistics; use crate::servers::udp::handlers::handle_scrape; use crate::servers::udp::handlers::tests::{ initialize_core_tracker_services_for_default_tracker_configuration, sample_cookie_valid_range, - sample_ipv4_remote_addr, + sample_ipv4_remote_addr, MockStatsEventSender, }; #[tokio::test] async fn should_send_the_upd4_scrape_event() { - let mut stats_event_sender_mock = statistics::event::sender::MockSender::new(); + let mut stats_event_sender_mock = MockStatsEventSender::new(); stats_event_sender_mock .expect_send_event() .with(eq(statistics::event::Event::Udp4Scrape)) @@ -1775,19 +1788,19 @@ mod tests { use std::future; use std::sync::Arc; + use bittorrent_tracker_core::statistics; use mockall::predicate::eq; use super::sample_scrape_request; - use crate::core::statistics; use crate::servers::udp::handlers::handle_scrape; use crate::servers::udp::handlers::tests::{ initialize_core_tracker_services_for_default_tracker_configuration, sample_cookie_valid_range, - sample_ipv6_remote_addr, + sample_ipv6_remote_addr, MockStatsEventSender, }; #[tokio::test] async fn should_send_the_upd6_scrape_event() { - let mut stats_event_sender_mock = statistics::event::sender::MockSender::new(); + let mut stats_event_sender_mock = MockStatsEventSender::new(); stats_event_sender_mock .expect_send_event() .with(eq(statistics::event::Event::Udp6Scrape)) diff --git a/src/servers/udp/server/launcher.rs b/src/servers/udp/server/launcher.rs index e4edadd8f..89b9b54d9 100644 --- a/src/servers/udp/server/launcher.rs +++ b/src/servers/udp/server/launcher.rs @@ -3,6 +3,7 @@ use std::sync::Arc; use std::time::Duration; use bittorrent_tracker_client::udp::client::check; +use bittorrent_tracker_core::statistics; use derive_more::Constructor; use futures_util::StreamExt; use tokio::select; @@ -13,7 +14,6 @@ use tracing::instrument; use super::request_buffer::ActiveRequests; use crate::bootstrap::jobs::Started; use crate::container::UdpTrackerContainer; -use crate::core::statistics; use crate::servers::logging::STARTED_ON; use crate::servers::registar::ServiceHealthCheckJob; use crate::servers::signals::{shutdown_signal_with_message, Halted}; diff --git a/src/servers/udp/server/processor.rs b/src/servers/udp/server/processor.rs index e2beb2377..db444a04c 100644 --- a/src/servers/udp/server/processor.rs +++ b/src/servers/udp/server/processor.rs @@ -4,13 +4,13 @@ use std::sync::Arc; use std::time::Duration; use aquatic_udp_protocol::Response; +use bittorrent_tracker_core::statistics; +use bittorrent_tracker_core::statistics::event::UdpResponseKind; use tokio::time::Instant; use tracing::{instrument, Level}; use super::bound_socket::BoundSocket; use crate::container::UdpTrackerContainer; -use crate::core::statistics; -use crate::core::statistics::event::UdpResponseKind; use crate::servers::udp::handlers::CookieTimeValues; use crate::servers::udp::{handlers, RawRequest}; diff --git a/src/shared/bit_torrent/common.rs b/src/shared/bit_torrent/common.rs index 2f93b5a08..0364071c6 100644 --- a/src/shared/bit_torrent/common.rs +++ b/src/shared/bit_torrent/common.rs @@ -14,9 +14,3 @@ /// does not specifically mention this limit, but the limit is being used for /// both the UDP and HTTP trackers since it's applied at the domain level. pub const MAX_SCRAPE_TORRENTS: u8 = 74; - -/// HTTP tracker authentication key length. -/// -/// For more information see function [`generate_key`](crate::core::authentication::key::generate_key) to generate the -/// [`PeerKey`](crate::core::authentication::PeerKey). -pub const AUTH_KEY_LENGTH: usize = 32; diff --git a/tests/servers/api/environment.rs b/tests/servers/api/environment.rs index 297e169d4..219c28b6e 100644 --- a/tests/servers/api/environment.rs +++ b/tests/servers/api/environment.rs @@ -2,14 +2,14 @@ use std::net::SocketAddr; use std::sync::Arc; use bittorrent_primitives::info_hash::InfoHash; +use bittorrent_tracker_core::authentication::service::AuthenticationService; +use bittorrent_tracker_core::databases::Database; use futures::executor::block_on; use torrust_tracker_api_client::connection_info::{ConnectionInfo, Origin}; use torrust_tracker_configuration::Configuration; use torrust_tracker_lib::bootstrap::app::{initialize_app_container, initialize_global_services}; use torrust_tracker_lib::bootstrap::jobs::make_rust_tls; use torrust_tracker_lib::container::HttpApiContainer; -use torrust_tracker_lib::core::authentication::service::AuthenticationService; -use torrust_tracker_lib::core::databases::Database; use torrust_tracker_lib::servers::apis::server::{ApiServer, Launcher, Running, Stopped}; use torrust_tracker_lib::servers::registar::Registar; use torrust_tracker_primitives::peer; diff --git a/tests/servers/api/mod.rs b/tests/servers/api/mod.rs index 92bc19a5f..8f5f6d016 100644 --- a/tests/servers/api/mod.rs +++ b/tests/servers/api/mod.rs @@ -1,6 +1,6 @@ use std::sync::Arc; -use torrust_tracker_lib::core::databases::Database; +use bittorrent_tracker_core::databases::Database; use torrust_tracker_lib::servers::apis::server; pub mod connection_info; diff --git a/tests/servers/api/v1/contract/context/auth_key.rs b/tests/servers/api/v1/contract/context/auth_key.rs index 3242c3ccc..47cf0ecd2 100644 --- a/tests/servers/api/v1/contract/context/auth_key.rs +++ b/tests/servers/api/v1/contract/context/auth_key.rs @@ -1,8 +1,8 @@ use std::time::Duration; +use bittorrent_tracker_core::authentication::Key; use serde::Serialize; use torrust_tracker_api_client::v1::client::{headers_with_request_id, AddKeyForm, Client}; -use torrust_tracker_lib::core::authentication::Key; use torrust_tracker_test_helpers::configuration; use uuid::Uuid; @@ -469,8 +469,8 @@ async fn should_not_allow_reloading_keys_for_unauthenticated_users() { mod deprecated_generate_key_endpoint { + use bittorrent_tracker_core::authentication::Key; use torrust_tracker_api_client::v1::client::{headers_with_request_id, Client}; - use torrust_tracker_lib::core::authentication::Key; use torrust_tracker_test_helpers::configuration; use uuid::Uuid; diff --git a/tests/servers/http/client.rs b/tests/servers/http/client.rs index 9fc278536..ca9703858 100644 --- a/tests/servers/http/client.rs +++ b/tests/servers/http/client.rs @@ -1,7 +1,7 @@ use std::net::IpAddr; +use bittorrent_tracker_core::authentication::Key; use reqwest::{Client as ReqwestClient, Response}; -use torrust_tracker_lib::core::authentication::Key; use super::requests::announce::{self, Query}; use super::requests::scrape; diff --git a/tests/servers/http/connection_info.rs b/tests/servers/http/connection_info.rs index 327bc0073..91486a3a7 100644 --- a/tests/servers/http/connection_info.rs +++ b/tests/servers/http/connection_info.rs @@ -1,4 +1,4 @@ -use torrust_tracker_lib::core::authentication::Key; +use bittorrent_tracker_core::authentication::Key; #[derive(Clone, Debug)] pub struct ConnectionInfo { diff --git a/tests/servers/http/environment.rs b/tests/servers/http/environment.rs index 07ff2bc8c..c91be1544 100644 --- a/tests/servers/http/environment.rs +++ b/tests/servers/http/environment.rs @@ -1,16 +1,16 @@ use std::sync::Arc; use bittorrent_primitives::info_hash::InfoHash; +use bittorrent_tracker_core::authentication::handler::KeysHandler; +use bittorrent_tracker_core::databases::Database; +use bittorrent_tracker_core::statistics::repository::Repository; +use bittorrent_tracker_core::torrent::repository::in_memory::InMemoryTorrentRepository; +use bittorrent_tracker_core::whitelist::manager::WhitelistManager; use futures::executor::block_on; use torrust_tracker_configuration::Configuration; use torrust_tracker_lib::bootstrap::app::{initialize_app_container, initialize_global_services}; use torrust_tracker_lib::bootstrap::jobs::make_rust_tls; use torrust_tracker_lib::container::HttpTrackerContainer; -use torrust_tracker_lib::core::authentication::handler::KeysHandler; -use torrust_tracker_lib::core::databases::Database; -use torrust_tracker_lib::core::statistics::repository::Repository; -use torrust_tracker_lib::core::torrent::repository::in_memory::InMemoryTorrentRepository; -use torrust_tracker_lib::core::whitelist::manager::WhitelistManager; use torrust_tracker_lib::servers::http::server::{HttpServer, Launcher, Running, Stopped}; use torrust_tracker_lib::servers::registar::Registar; use torrust_tracker_primitives::peer; diff --git a/tests/servers/http/v1/contract.rs b/tests/servers/http/v1/contract.rs index f434467fc..be603161a 100644 --- a/tests/servers/http/v1/contract.rs +++ b/tests/servers/http/v1/contract.rs @@ -1387,7 +1387,7 @@ mod configured_as_private { use std::time::Duration; use bittorrent_primitives::info_hash::InfoHash; - use torrust_tracker_lib::core::authentication::Key; + use bittorrent_tracker_core::authentication::Key; use torrust_tracker_test_helpers::configuration; use crate::common::logging; @@ -1477,7 +1477,7 @@ mod configured_as_private { use aquatic_udp_protocol::PeerId; use bittorrent_primitives::info_hash::InfoHash; - use torrust_tracker_lib::core::authentication::Key; + use bittorrent_tracker_core::authentication::Key; use torrust_tracker_primitives::peer::fixture::PeerBuilder; use torrust_tracker_test_helpers::configuration; diff --git a/tests/servers/udp/environment.rs b/tests/servers/udp/environment.rs index af0b04e5c..1483e1e5f 100644 --- a/tests/servers/udp/environment.rs +++ b/tests/servers/udp/environment.rs @@ -2,12 +2,12 @@ use std::net::SocketAddr; use std::sync::Arc; use bittorrent_primitives::info_hash::InfoHash; +use bittorrent_tracker_core::databases::Database; +use bittorrent_tracker_core::statistics::repository::Repository; +use bittorrent_tracker_core::torrent::repository::in_memory::InMemoryTorrentRepository; use torrust_tracker_configuration::{Configuration, DEFAULT_TIMEOUT}; use torrust_tracker_lib::bootstrap::app::{initialize_app_container, initialize_global_services}; use torrust_tracker_lib::container::UdpTrackerContainer; -use torrust_tracker_lib::core::databases::Database; -use torrust_tracker_lib::core::statistics::repository::Repository; -use torrust_tracker_lib::core::torrent::repository::in_memory::InMemoryTorrentRepository; use torrust_tracker_lib::servers::registar::Registar; use torrust_tracker_lib::servers::udp::server::spawner::Spawner; use torrust_tracker_lib::servers::udp::server::states::{Running, Stopped}; From 8bb376de40bd0284290a94b706b79a83cbb03996 Mon Sep 17 00:00:00 2001 From: Jose Celano Date: Thu, 30 Jan 2025 07:53:33 +0000 Subject: [PATCH 188/802] chore(deps): udpate dependencies ``` cargo update Updating crates.io index Locking 22 packages to latest compatible versions Updating bumpalo v3.16.0 -> v3.17.0 Updating cmake v0.1.52 -> v0.1.53 Updating cpufeatures v0.2.16 -> v0.2.17 Adding getrandom v0.3.1 Updating httparse v1.9.5 -> v1.10.0 Updating hyper v1.5.2 -> v1.6.0 Updating native-tls v0.2.12 -> v0.2.13 Updating openssl v0.10.68 -> v0.10.69 Updating openssl-probe v0.1.5 -> v0.1.6 Adding rand v0.9.0 Adding rand_chacha v0.9.0 Adding rand_core v0.9.0 Updating rustls-pki-types v1.10.1 -> v1.11.0 Updating ryu v1.0.18 -> v1.0.19 Updating serde_json v1.0.137 -> v1.0.138 Updating tempfile v3.15.0 -> v3.16.0 Updating unicode-ident v1.0.14 -> v1.0.16 Adding wasi v0.13.3+wasi-0.2.2 Updating winnow v0.6.24 -> v0.6.25 Adding wit-bindgen-rt v0.33.0 Adding zerocopy v0.8.14 Adding zerocopy-derive v0.8.14 ``` --- Cargo.lock | 193 +++++++++++++----- packages/test-helpers/src/random.rs | 6 +- .../src/authentication/key/mod.rs | 6 +- src/console/ci/e2e/tracker_container.rs | 8 +- src/shared/crypto/ephemeral_instance_keys.rs | 4 +- tests/common/fixtures.rs | 8 +- 6 files changed, 151 insertions(+), 74 deletions(-) diff --git a/Cargo.lock b/Cargo.lock index d0d4d7e8f..bbf225018 100644 --- a/Cargo.lock +++ b/Cargo.lock @@ -23,7 +23,7 @@ version = "0.7.8" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "891477e0c6a8957309ee5c45a6368af3ae14bb510732d2684ffa19af310920f9" dependencies = [ - "getrandom", + "getrandom 0.2.15", "once_cell", "version_check", ] @@ -146,7 +146,7 @@ dependencies = [ "quickcheck", "regex", "serde", - "zerocopy", + "zerocopy 0.7.35", ] [[package]] @@ -158,7 +158,7 @@ dependencies = [ "aquatic_peer_id", "byteorder", "either", - "zerocopy", + "zerocopy 0.7.35", ] [[package]] @@ -565,7 +565,7 @@ dependencies = [ "serde", "serde_json", "thiserror 1.0.69", - "zerocopy", + "zerocopy 0.7.35", ] [[package]] @@ -588,7 +588,7 @@ dependencies = [ "torrust-tracker-located-error", "torrust-tracker-primitives", "tracing", - "zerocopy", + "zerocopy 0.7.35", ] [[package]] @@ -606,7 +606,7 @@ dependencies = [ "r2d2", "r2d2_mysql", "r2d2_sqlite", - "rand", + "rand 0.9.0", "serde", "serde_json", "thiserror 2.0.11", @@ -735,9 +735,9 @@ checksum = "40e38929add23cdf8a366df9b0e088953150724bcbe5fc330b0d8eb3b328eec8" [[package]] name = "bumpalo" -version = "3.16.0" +version = "3.17.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "79296716171880943b8470b5f8d03aa55eb2e645a4874bdbb28adb49162e012c" +checksum = "1628fb46dfa0b37568d12e5edd512553eccf6a22a78e8bde00bb4aed84d5bdbf" [[package]] name = "bytecheck" @@ -938,9 +938,9 @@ checksum = "f46ad14479a25103f283c0f10005961cf086d8dc42205bb44c46ac563475dca6" [[package]] name = "cmake" -version = "0.1.52" +version = "0.1.53" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "c682c223677e0e5b6b7f63a64b9351844c3f1b1678a68b7ee617e30fb082620e" +checksum = "e24a03c8b52922d68a1589ad61032f2c1aa5a8158d2aa0d93c6e9534944bbad6" dependencies = [ "cc", ] @@ -991,9 +991,9 @@ checksum = "773648b94d0e5d620f64f280777445740e61fe701025087ec8b57f45c791888b" [[package]] name = "cpufeatures" -version = "0.2.16" +version = "0.2.17" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "16b80225097f2e5ae4e7179dd2266824648f3e2f49d9134d584b76389d31c4c3" +checksum = "59ed5838eebb26a2bb2e58f6d5b5316989ae9d08bab10e0e6d103e656d1b0280" dependencies = [ "libc", ] @@ -1598,7 +1598,19 @@ checksum = "c4567c8db10ae91089c99af84c68c38da3ec2f087c3f82960bcdbf3656b6f4d7" dependencies = [ "cfg-if", "libc", - "wasi", + "wasi 0.11.0+wasi-snapshot-preview1", +] + +[[package]] +name = "getrandom" +version = "0.3.1" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "43a49c392881ce6d5c3b8cb70f98717b7c07aabbdff06687b9030dbfbe2725f8" +dependencies = [ + "cfg-if", + "libc", + "wasi 0.13.3+wasi-0.2.2", + "windows-targets", ] [[package]] @@ -1749,9 +1761,9 @@ dependencies = [ [[package]] name = "httparse" -version = "1.9.5" +version = "1.10.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "7d71d3574edd2771538b901e6549113b4006ece66150fb69c0fb6d9a2adae946" +checksum = "f2d708df4e7140240a16cd6ab0ab65c972d7433ab77819ea693fde9c43811e2a" [[package]] name = "httpdate" @@ -1761,9 +1773,9 @@ checksum = "df3b46402a9d5adb4c86a0cf463f42e19994e3ee891101b1841f30a545cb49a9" [[package]] name = "hyper" -version = "1.5.2" +version = "1.6.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "256fb8d4bd6413123cc9d91832d78325c48ff41677595be797d90f42969beae0" +checksum = "cc2b571658e38e0c01b1fdca3bbbe93c00d3d71693ff2770043f8c29bc7d6f80" dependencies = [ "bytes", "futures-channel", @@ -2263,7 +2275,7 @@ source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "2886843bf800fba2e3377cff24abf6379b4c4d5c6681eaf9ea5b0d15090450bd" dependencies = [ "libc", - "wasi", + "wasi 0.11.0+wasi-snapshot-preview1", "windows-sys 0.52.0", ] @@ -2368,7 +2380,7 @@ dependencies = [ "mysql-common-derive", "num-bigint", "num-traits", - "rand", + "rand 0.8.5", "regex", "rust_decimal", "saturating", @@ -2395,9 +2407,9 @@ dependencies = [ [[package]] name = "native-tls" -version = "0.2.12" +version = "0.2.13" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "a8614eb2c83d59d1c8cc974dd3f920198647674a0a035e1af1fa58707e317466" +checksum = "0dab59f8e050d5df8e4dd87d9206fb6f65a483e20ac9fda365ade4fab353196c" dependencies = [ "libc", "log", @@ -2518,9 +2530,9 @@ checksum = "b410bbe7e14ab526a0e86877eb47c6996a2bd7746f027ba551028c925390e4e9" [[package]] name = "openssl" -version = "0.10.68" +version = "0.10.69" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "6174bc48f102d208783c2c84bf931bb75927a617866870de8a4ea85597f871f5" +checksum = "f5e534d133a060a3c19daec1eb3e98ec6f4685978834f2dbadfe2ec215bab64e" dependencies = [ "bitflags", "cfg-if", @@ -2544,9 +2556,9 @@ dependencies = [ [[package]] name = "openssl-probe" -version = "0.1.5" +version = "0.1.6" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "ff011a302c396a5197692431fc1948019154afc178baf7d8e37367442a4601cf" +checksum = "d05e27ee213611ffe7d6348b942e8f942b37114c00cc03cec254295a4a17852e" [[package]] name = "openssl-sys" @@ -2660,7 +2672,7 @@ source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "3c80231409c20246a13fddb31776fb942c38553c51e871f8cbd687a4cfb5843d" dependencies = [ "phf_shared", - "rand", + "rand 0.8.5", ] [[package]] @@ -2782,7 +2794,7 @@ version = "0.2.20" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "77957b295656769bb8ad2b6a6b09d897d94f05c41b069aede1fcdaa675eaea04" dependencies = [ - "zerocopy", + "zerocopy 0.7.35", ] [[package]] @@ -2892,7 +2904,7 @@ checksum = "588f6378e4dd99458b60ec275b4477add41ce4fa9f64dcba6f15adccb19b50d6" dependencies = [ "env_logger", "log", - "rand", + "rand 0.8.5", ] [[package]] @@ -2949,8 +2961,19 @@ source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "34af8d1a0e25924bc5b7c43c079c942339d8f0a8b57c39049bef581b46327404" dependencies = [ "libc", - "rand_chacha", - "rand_core", + "rand_chacha 0.3.1", + "rand_core 0.6.4", +] + +[[package]] +name = "rand" +version = "0.9.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "3779b94aeb87e8bd4e834cee3650289ee9e0d5677f976ecdb6d219e5f4f6cd94" +dependencies = [ + "rand_chacha 0.9.0", + "rand_core 0.9.0", + "zerocopy 0.8.14", ] [[package]] @@ -2960,7 +2983,17 @@ source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "e6c10a63a0fa32252be49d21e7709d4d4baf8d231c2dbce1eaa8141b9b127d88" dependencies = [ "ppv-lite86", - "rand_core", + "rand_core 0.6.4", +] + +[[package]] +name = "rand_chacha" +version = "0.9.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "d3022b5f1df60f26e1ffddd6c66e8aa15de382ae63b3a0c1bfc0e4d3e3f325cb" +dependencies = [ + "ppv-lite86", + "rand_core 0.9.0", ] [[package]] @@ -2969,7 +3002,17 @@ version = "0.6.4" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "ec0be4795e2f6a28069bec0b5ff3e2ac9bafc99e6a9a7dc3547996c5c816922c" dependencies = [ - "getrandom", + "getrandom 0.2.15", +] + +[[package]] +name = "rand_core" +version = "0.9.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "b08f3c9802962f7e1b25113931d94f43ed9725bebc59db9d0c3e9a23b67e15ff" +dependencies = [ + "getrandom 0.3.1", + "zerocopy 0.8.14", ] [[package]] @@ -3097,7 +3140,7 @@ checksum = "c17fa4cb658e3583423e915b9f3acc01cceaee1860e33d59ebae66adc3a2dc0d" dependencies = [ "cc", "cfg-if", - "getrandom", + "getrandom 0.2.15", "libc", "spin", "untrusted", @@ -3197,7 +3240,7 @@ dependencies = [ "borsh", "bytes", "num-traits", - "rand", + "rand 0.8.5", "rkyv", "serde", "serde_json", @@ -3261,9 +3304,9 @@ dependencies = [ [[package]] name = "rustls-pki-types" -version = "1.10.1" +version = "1.11.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "d2bf47e6ff922db3825eb750c4e2ff784c6ff8fb9e13046ef6a1d1c5401b0b37" +checksum = "917ce264624a4b4db1c364dcc35bfca9ded014d0a958cd47ad3e960e988ea51c" [[package]] name = "rustls-webpki" @@ -3284,9 +3327,9 @@ checksum = "f7c45b9784283f1b2e7fb61b42047c2fd678ef0960d4f6f1eba131594cc369d4" [[package]] name = "ryu" -version = "1.0.18" +version = "1.0.19" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "f3cb5ba0dc43242ce17de99c180e96db90b235b8a9fdc9543c96d2209116bd9f" +checksum = "6ea1a2d0a644769cc99faa24c3ad26b379b786fe7c36fd3c546254801650e6dd" [[package]] name = "same-file" @@ -3416,9 +3459,9 @@ dependencies = [ [[package]] name = "serde_json" -version = "1.0.137" +version = "1.0.138" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "930cfb6e6abf99298aaad7d29abbef7a9999a9a8806a40088f55f0dcec03146b" +checksum = "d434192e7da787e94a6ea7e9670b26a036d0ca41e0b7efb2676dd32bae872949" dependencies = [ "indexmap 2.7.1", "itoa", @@ -3710,13 +3753,13 @@ dependencies = [ [[package]] name = "tempfile" -version = "3.15.0" +version = "3.16.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "9a8a559c81686f576e8cd0290cd2a24a2a9ad80c98b3478856500fcbd7acd704" +checksum = "38c246215d7d24f48ae091a2902398798e05d978b24315d6efbc00ede9a8bb91" dependencies = [ "cfg-if", "fastrand", - "getrandom", + "getrandom 0.3.1", "once_cell", "rustix", "windows-sys 0.59.0", @@ -3986,7 +4029,7 @@ dependencies = [ "r2d2", "r2d2_mysql", "r2d2_sqlite", - "rand", + "rand 0.9.0", "regex", "reqwest", "ringbuf", @@ -4011,7 +4054,7 @@ dependencies = [ "tracing-subscriber", "url", "uuid", - "zerocopy", + "zerocopy 0.7.35", ] [[package]] @@ -4106,14 +4149,14 @@ dependencies = [ "tdyne-peer-id-registry", "thiserror 2.0.11", "torrust-tracker-configuration", - "zerocopy", + "zerocopy 0.7.35", ] [[package]] name = "torrust-tracker-test-helpers" version = "3.0.0-develop" dependencies = [ - "rand", + "rand 0.9.0", "torrust-tracker-configuration", ] @@ -4134,7 +4177,7 @@ dependencies = [ "torrust-tracker-clock", "torrust-tracker-configuration", "torrust-tracker-primitives", - "zerocopy", + "zerocopy 0.7.35", ] [[package]] @@ -4285,7 +4328,7 @@ source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "97fee6b57c6a41524a810daee9286c02d7752c4253064d0b05472833a438f675" dependencies = [ "cfg-if", - "rand", + "rand 0.8.5", "static_assertions", ] @@ -4306,9 +4349,9 @@ dependencies = [ [[package]] name = "unicode-ident" -version = "1.0.14" +version = "1.0.16" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "adb9e6ca4f869e1180728b7950e35922a7fc6397f7b641499e8f3ef06e50dc83" +checksum = "a210d160f08b701c8721ba1c726c11662f877ea6b7094007e1ca9a1041945034" [[package]] name = "unicode-xid" @@ -4358,8 +4401,8 @@ version = "1.12.1" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "b3758f5e68192bb96cc8f9b7e2c2cfdabb435499a28499a42f8f984092adad4b" dependencies = [ - "getrandom", - "rand", + "getrandom 0.2.15", + "rand 0.8.5", ] [[package]] @@ -4411,6 +4454,15 @@ version = "0.11.0+wasi-snapshot-preview1" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "9c8d87e72b64a3b4db28d11ce29237c246188f4f51057d65a7eab63b7987e423" +[[package]] +name = "wasi" +version = "0.13.3+wasi-0.2.2" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "26816d2e1a4a36a2940b96c5296ce403917633dff8f3440e9b236ed6f6bacad2" +dependencies = [ + "wit-bindgen-rt", +] + [[package]] name = "wasm-bindgen" version = "0.2.100" @@ -4646,13 +4698,22 @@ checksum = "589f6da84c646204747d1270a2a5661ea66ed1cced2631d546fdfb155959f9ec" [[package]] name = "winnow" -version = "0.6.24" +version = "0.6.25" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "c8d71a593cc5c42ad7876e2c1fda56f314f3754c084128833e64f1345ff8a03a" +checksum = "ad699df48212c6cc6eb4435f35500ac6fd3b9913324f938aea302022ce19d310" dependencies = [ "memchr", ] +[[package]] +name = "wit-bindgen-rt" +version = "0.33.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "3268f3d866458b787f390cf61f4bbb563b922d091359f9608842999eaee3943c" +dependencies = [ + "bitflags", +] + [[package]] name = "write16" version = "1.0.0" @@ -4711,7 +4772,16 @@ source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "1b9b4fd18abc82b8136838da5d50bae7bdea537c574d8dc1a34ed098d6c166f0" dependencies = [ "byteorder", - "zerocopy-derive", + "zerocopy-derive 0.7.35", +] + +[[package]] +name = "zerocopy" +version = "0.8.14" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "a367f292d93d4eab890745e75a778da40909cab4d6ff8173693812f79c4a2468" +dependencies = [ + "zerocopy-derive 0.8.14", ] [[package]] @@ -4725,6 +4795,17 @@ dependencies = [ "syn 2.0.96", ] +[[package]] +name = "zerocopy-derive" +version = "0.8.14" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "d3931cb58c62c13adec22e38686b559c86a30565e16ad6e8510a337cedc611e1" +dependencies = [ + "proc-macro2", + "quote", + "syn 2.0.96", +] + [[package]] name = "zerofrom" version = "0.1.5" diff --git a/packages/test-helpers/src/random.rs b/packages/test-helpers/src/random.rs index 2133dcd29..f096d695c 100644 --- a/packages/test-helpers/src/random.rs +++ b/packages/test-helpers/src/random.rs @@ -1,10 +1,10 @@ //! Random data generators for testing. -use rand::distributions::Alphanumeric; -use rand::{thread_rng, Rng}; +use rand::distr::Alphanumeric; +use rand::{rng, Rng}; /// Returns a random alphanumeric string of a certain size. /// /// It is useful for generating random names, IDs, etc for testing. pub fn string(size: usize) -> String { - thread_rng().sample_iter(&Alphanumeric).take(size).map(char::from).collect() + rng().sample_iter(&Alphanumeric).take(size).map(char::from).collect() } diff --git a/packages/tracker-core/src/authentication/key/mod.rs b/packages/tracker-core/src/authentication/key/mod.rs index 37fc4764b..e3e7fc018 100644 --- a/packages/tracker-core/src/authentication/key/mod.rs +++ b/packages/tracker-core/src/authentication/key/mod.rs @@ -45,8 +45,8 @@ use std::sync::Arc; use std::time::Duration; use derive_more::Display; -use rand::distributions::Alphanumeric; -use rand::{thread_rng, Rng}; +use rand::distr::Alphanumeric; +use rand::{rng, Rng}; use serde::{Deserialize, Serialize}; use thiserror::Error; use torrust_tracker_clock::clock::Time; @@ -81,7 +81,7 @@ pub fn generate_permanent_key() -> PeerKey { /// * `lifetime`: if `None` the key will be permanent. #[must_use] pub fn generate_key(lifetime: Option) -> PeerKey { - let random_id: String = thread_rng() + let random_id: String = rng() .sample_iter(&Alphanumeric) .take(AUTH_KEY_LENGTH) .map(char::from) diff --git a/src/console/ci/e2e/tracker_container.rs b/src/console/ci/e2e/tracker_container.rs index 0d15035a8..a3845c103 100644 --- a/src/console/ci/e2e/tracker_container.rs +++ b/src/console/ci/e2e/tracker_container.rs @@ -1,6 +1,6 @@ use std::time::Duration; -use rand::distributions::Alphanumeric; +use rand::distr::Alphanumeric; use rand::Rng; use super::docker::{RunOptions, RunningContainer}; @@ -113,11 +113,7 @@ impl TrackerContainer { } fn generate_random_container_name(prefix: &str) -> String { - let rand_string: String = rand::thread_rng() - .sample_iter(&Alphanumeric) - .take(20) - .map(char::from) - .collect(); + let rand_string: String = rand::rng().sample_iter(&Alphanumeric).take(20).map(char::from).collect(); format!("{prefix}{rand_string}") } diff --git a/src/shared/crypto/ephemeral_instance_keys.rs b/src/shared/crypto/ephemeral_instance_keys.rs index d214b6e6a..df560c3f5 100644 --- a/src/shared/crypto/ephemeral_instance_keys.rs +++ b/src/shared/crypto/ephemeral_instance_keys.rs @@ -15,10 +15,10 @@ pub type CipherArrayBlowfish = GenericArray(&mut ThreadRng::default())).expect("it could not generate key"); + pub static ref RANDOM_CIPHER_BLOWFISH: CipherBlowfish = CipherBlowfish::new_from_slice(&Rng::random::(&mut ThreadRng::default())).expect("it could not generate key"); /// The constant cipher for testing. pub static ref ZEROED_TEST_CIPHER_BLOWFISH: CipherBlowfish = CipherBlowfish::new_from_slice(&[0u8; 32]).expect("it could not generate key"); diff --git a/tests/common/fixtures.rs b/tests/common/fixtures.rs index f96b03dd1..1dd85ba2d 100644 --- a/tests/common/fixtures.rs +++ b/tests/common/fixtures.rs @@ -8,7 +8,7 @@ pub fn invalid_info_hashes() -> Vec { "-1".to_string(), "1.1".to_string(), "INVALID INFOHASH".to_string(), - "9c38422213e30bff212b30c360d26f9a0213642".to_string(), // 39-char length instead of 40 + "9c38422213e30bff212b30c360d26f9a0213642".to_string(), // 39-char length instead of 40. DevSkim: ignore DS173237 "9c38422213e30bff212b30c360d26f9a0213642&".to_string(), // Invalid char ] .to_vec() @@ -16,14 +16,14 @@ pub fn invalid_info_hashes() -> Vec { /// Returns a random info hash. pub fn random_info_hash() -> InfoHash { - let mut rng = rand::thread_rng(); - let random_bytes: [u8; 20] = rand::Rng::gen(&mut rng); + let mut rng = rand::rng(); + let random_bytes: [u8; 20] = rand::Rng::random(&mut rng); InfoHash::from_bytes(&random_bytes) } /// Returns a random transaction id. pub fn random_transaction_id() -> TransactionId { - let random_value = rand::Rng::gen::(&mut rand::thread_rng()); + let random_value = rand::Rng::random::(&mut rand::rng()); TransactionId::new(random_value) } From 9fa7b6172e04d453ac19c2741af403e0bac98d4f Mon Sep 17 00:00:00 2001 From: Jose Celano Date: Thu, 30 Jan 2025 16:26:48 +0000 Subject: [PATCH 189/802] fix: clippy errors on nightly nightly-x86_64-unknown-linux-gnu (default) rustc 1.86.0-nightly (ae5de6c75 2025-01-29) --- contrib/bencode/src/mutable/bencode_mut.rs | 5 +---- contrib/bencode/src/reference/bencode_ref.rs | 5 +---- contrib/bencode/src/reference/decode.rs | 3 ++- packages/test-helpers/src/configuration.rs | 2 +- .../torrent-repository/tests/common/repo.rs | 2 +- packages/tracker-api-client/src/v1/client.rs | 2 +- packages/tracker-client/src/udp/client.rs | 2 +- src/app.rs | 4 ++-- src/console/ci/e2e/logs_parser.rs | 2 +- .../apis/v1/context/auth_key/handlers.rs | 18 +++++++++--------- src/servers/udp/server/launcher.rs | 2 +- tests/servers/udp/contract.rs | 12 ++++++------ 12 files changed, 27 insertions(+), 32 deletions(-) diff --git a/contrib/bencode/src/mutable/bencode_mut.rs b/contrib/bencode/src/mutable/bencode_mut.rs index a3f95dbbf..21e00f7b0 100644 --- a/contrib/bencode/src/mutable/bencode_mut.rs +++ b/contrib/bencode/src/mutable/bencode_mut.rs @@ -82,10 +82,7 @@ impl<'a> BRefAccess for BencodeMut<'a> { fn str(&self) -> Option<&str> { let bytes = self.bytes()?; - match str::from_utf8(bytes) { - Ok(n) => Some(n), - Err(_) => None, - } + str::from_utf8(bytes).ok() } fn int(&self) -> Option { diff --git a/contrib/bencode/src/reference/bencode_ref.rs b/contrib/bencode/src/reference/bencode_ref.rs index 73aaad039..20d102cb4 100644 --- a/contrib/bencode/src/reference/bencode_ref.rs +++ b/contrib/bencode/src/reference/bencode_ref.rs @@ -107,10 +107,7 @@ impl<'a> BRefAccessExt<'a> for BencodeRef<'a> { fn str_ext(&self) -> Option<&'a str> { let bytes = self.bytes_ext()?; - match str::from_utf8(bytes) { - Ok(n) => Some(n), - Err(_) => None, - } + str::from_utf8(bytes).ok() } fn bytes_ext(&self) -> Option<&'a [u8]> { diff --git a/contrib/bencode/src/reference/decode.rs b/contrib/bencode/src/reference/decode.rs index 97c5cf1ff..37ca22549 100644 --- a/contrib/bencode/src/reference/decode.rs +++ b/contrib/bencode/src/reference/decode.rs @@ -129,7 +129,8 @@ fn decode_dict( }) } _ => (), - }; + } + curr_pos = next_pos; let (value, next_pos) = decode(bytes, curr_pos, opts, depth + 1)?; diff --git a/packages/test-helpers/src/configuration.rs b/packages/test-helpers/src/configuration.rs index e5de53fc2..678f4283a 100644 --- a/packages/test-helpers/src/configuration.rs +++ b/packages/test-helpers/src/configuration.rs @@ -153,7 +153,7 @@ pub fn ephemeral_ipv6() -> Configuration { if let Some(ref mut http_api) = cfg.http_api { http_api.bind_address.clone_from(&ipv6); - }; + } if let Some(ref mut http_trackers) = cfg.http_trackers { http_trackers[0].bind_address.clone_from(&ipv6); diff --git a/packages/torrent-repository/tests/common/repo.rs b/packages/torrent-repository/tests/common/repo.rs index ebd829f3c..c8412952c 100644 --- a/packages/torrent-repository/tests/common/repo.rs +++ b/packages/torrent-repository/tests/common/repo.rs @@ -232,7 +232,7 @@ impl Repo { Repo::DashMapMutexStd(repo) => { repo.torrents.insert(*info_hash, torrent.into()); } - }; + } self.get(info_hash).await } } diff --git a/packages/tracker-api-client/src/v1/client.rs b/packages/tracker-api-client/src/v1/client.rs index d48d4c008..54daa3289 100644 --- a/packages/tracker-api-client/src/v1/client.rs +++ b/packages/tracker-api-client/src/v1/client.rs @@ -67,7 +67,7 @@ impl Client { if let Some(token) = &self.connection_info.api_token { query.add_param(QueryParam::new("token", token)); - }; + } self.get_request_with_query(path, query, headers).await } diff --git a/packages/tracker-client/src/udp/client.rs b/packages/tracker-client/src/udp/client.rs index facdfac38..89a33726d 100644 --- a/packages/tracker-client/src/udp/client.rs +++ b/packages/tracker-client/src/udp/client.rs @@ -243,7 +243,7 @@ pub async fn check(remote_addr: &SocketAddr) -> Result { match client.send(connect_request.into()).await { Ok(_) => (), Err(e) => tracing::debug!("Error: {e:?}."), - }; + } let process = move |response| { if matches!(response, Response::Connect(_connect_response)) { diff --git a/src/app.rs b/src/app.rs index 13bdc904a..d69874eb0 100644 --- a/src/app.rs +++ b/src/app.rs @@ -98,7 +98,7 @@ pub async fn start(config: &Configuration, app_container: &Arc) -> http_tracker::start_job(http_tracker_container, registar.give_form(), servers::http::Version::V1).await { jobs.push(job); - }; + } } } else { tracing::info!("No HTTP blocks in configuration"); @@ -111,7 +111,7 @@ pub async fn start(config: &Configuration, app_container: &Arc) -> if let Some(job) = tracker_apis::start_job(http_api_container, registar.give_form(), servers::apis::Version::V1).await { jobs.push(job); - }; + } } else { tracing::info!("No API block in configuration"); } diff --git a/src/console/ci/e2e/logs_parser.rs b/src/console/ci/e2e/logs_parser.rs index 95648a2b5..b39143c8f 100644 --- a/src/console/ci/e2e/logs_parser.rs +++ b/src/console/ci/e2e/logs_parser.rs @@ -76,7 +76,7 @@ impl RunningServices { if !line.contains(INFO_THRESHOLD) { continue; - }; + } if line.contains(UDP_TRACKER_LOG_TARGET) { if let Some(captures) = udp_re.captures(&clean_line) { diff --git a/src/servers/apis/v1/context/auth_key/handlers.rs b/src/servers/apis/v1/context/auth_key/handlers.rs index 7024ffeba..ca38ade37 100644 --- a/src/servers/apis/v1/context/auth_key/handlers.rs +++ b/src/servers/apis/v1/context/auth_key/handlers.rs @@ -22,11 +22,11 @@ use crate::servers::apis::v1::responses::{invalid_auth_key_param_response, ok_re /// It returns these types of responses: /// /// - `200` with a json [`AuthKey`] -/// resource. If the key was generated successfully. +/// resource. If the key was generated successfully. /// - `400` with an error if the key couldn't been added because of an invalid -/// request. +/// request. /// - `500` with serialized error in debug format. If the key couldn't be -/// generated. +/// generated. /// /// Refer to the [API endpoint documentation](crate::servers::apis::v1::context::auth_key#generate-a-new-authentication-key) /// for more information about this endpoint. @@ -57,9 +57,9 @@ pub async fn add_auth_key_handler( /// It returns two types of responses: /// /// - `200` with an json [`AuthKey`] -/// resource. If the key was generated successfully. +/// resource. If the key was generated successfully. /// - `500` with serialized error in debug format. If the key couldn't be -/// generated. +/// generated. /// /// Refer to the [API endpoint documentation](crate::servers::apis::v1::context::auth_key#generate-a-new-authentication-key) /// for more information about this endpoint. @@ -99,9 +99,9 @@ pub struct KeyParam(String); /// It returns two types of responses: /// /// - `200` with an json [`ActionStatus::Ok`](crate::servers::apis::v1::responses::ActionStatus::Ok) -/// response. If the key was deleted successfully. +/// response. If the key was deleted successfully. /// - `500` with serialized error in debug format. If the key couldn't be -/// deleted. +/// deleted. /// /// Refer to the [API endpoint documentation](crate::servers::apis::v1::context::auth_key#delete-an-authentication-key) /// for more information about this endpoint. @@ -124,9 +124,9 @@ pub async fn delete_auth_key_handler( /// It returns two types of responses: /// /// - `200` with an json [`ActionStatus::Ok`](crate::servers::apis::v1::responses::ActionStatus::Ok) -/// response. If the keys were successfully reloaded. +/// response. If the keys were successfully reloaded. /// - `500` with serialized error in debug format. If the they couldn't be -/// reloaded. +/// reloaded. /// /// Refer to the [API endpoint documentation](crate::servers::apis::v1::context::auth_key#reload-authentication-keys) /// for more information about this endpoint. diff --git a/src/servers/udp/server/launcher.rs b/src/servers/udp/server/launcher.rs index 89b9b54d9..bbf2718ff 100644 --- a/src/servers/udp/server/launcher.rs +++ b/src/servers/udp/server/launcher.rs @@ -213,7 +213,7 @@ impl Launcher { stats_event_sender .send_event(statistics::event::Event::UdpRequestAborted) .await; - }; + } } } else { tokio::task::yield_now().await; diff --git a/tests/servers/udp/contract.rs b/tests/servers/udp/contract.rs index f6a1feb06..d38356ef4 100644 --- a/tests/servers/udp/contract.rs +++ b/tests/servers/udp/contract.rs @@ -25,7 +25,7 @@ async fn send_connection_request(transaction_id: TransactionId, client: &UdpTrac match client.send(connect_request.into()).await { Ok(_) => (), Err(err) => panic!("{err}"), - }; + } let response = match client.receive().await { Ok(response) => response, @@ -52,7 +52,7 @@ async fn should_return_a_bad_request_response_when_the_client_sends_an_empty_req match client.client.send(&empty_udp_request()).await { Ok(_) => (), Err(err) => panic!("{err}"), - }; + } let response = match client.client.receive().await { Ok(response) => response, @@ -94,7 +94,7 @@ mod receiving_a_connection_request { match client.send(connect_request.into()).await { Ok(_) => (), Err(err) => panic!("{err}"), - }; + } let response = match client.receive().await { Ok(response) => response, @@ -146,7 +146,7 @@ mod receiving_an_announce_request { match client.send(announce_request.into()).await { Ok(_) => (), Err(err) => panic!("{err}"), - }; + } match client.receive().await { Ok(response) => response, @@ -276,7 +276,7 @@ mod receiving_an_announce_request { match client.send(announce_request.into()).await { Ok(_) => (), Err(err) => panic!("{err}"), - }; + } assert!(client.receive().await.is_err()); @@ -333,7 +333,7 @@ mod receiving_an_scrape_request { match client.send(scrape_request.into()).await { Ok(_) => (), Err(err) => panic!("{err}"), - }; + } let response = match client.receive().await { Ok(response) => response, From 0ad88b620c715a9abcdbe5ad29528069716779b7 Mon Sep 17 00:00:00 2001 From: Jose Celano Date: Fri, 31 Jan 2025 09:56:04 +0000 Subject: [PATCH 190/802] refactor: [#1228] move type from tracker-core to main lib This is the frist step in a bigger refactor. We will move statistics out of the tracker-core package into new packages. Statistics are not related to the tracker-core or enven handled there. That logic belongs to upper layers. --- packages/tracker-core/src/statistics/mod.rs | 1 - .../tracker-core/src/statistics/services.rs | 55 ------------------- src/core/statistics/services.rs | 27 ++++++--- .../apis/v1/context/stats/resources.rs | 5 +- .../apis/v1/context/stats/responses.rs | 2 +- 5 files changed, 24 insertions(+), 66 deletions(-) delete mode 100644 packages/tracker-core/src/statistics/services.rs diff --git a/packages/tracker-core/src/statistics/mod.rs b/packages/tracker-core/src/statistics/mod.rs index 2ffbc0c8f..7517bab6e 100644 --- a/packages/tracker-core/src/statistics/mod.rs +++ b/packages/tracker-core/src/statistics/mod.rs @@ -28,5 +28,4 @@ pub mod event; pub mod keeper; pub mod metrics; pub mod repository; -pub mod services; pub mod setup; diff --git a/packages/tracker-core/src/statistics/services.rs b/packages/tracker-core/src/statistics/services.rs deleted file mode 100644 index 196c6b340..000000000 --- a/packages/tracker-core/src/statistics/services.rs +++ /dev/null @@ -1,55 +0,0 @@ -//! Statistics services. -//! -//! It includes: -//! -//! - A [`factory`](crate::statistics::setup::factory) function to build the structs needed to collect the tracker metrics. -//! - A [`get_metrics`] service to get the tracker [`metrics`](crate::core::statistics::metrics::Metrics). -//! -//! Tracker metrics are collected using a Publisher-Subscribe pattern. -//! -//! The factory function builds two structs: -//! -//! - An statistics event [`Sender`](crate::core::statistics::event::sender::Sender) -//! - An statistics [`Repository`] -//! -//! ```text -//! let (stats_event_sender, stats_repository) = factory(tracker_usage_statistics); -//! ``` -//! -//! The statistics repository is responsible for storing the metrics in memory. -//! The statistics event sender allows sending events related to metrics. -//! There is an event listener that is receiving all the events and processing them with an event handler. -//! Then, the event handler updates the metrics depending on the received event. -//! -//! For example, if you send the event [`Event::Udp4Connect`](crate::core::statistics::event::Event::Udp4Connect): -//! -//! ```text -//! let result = event_sender.send_event(Event::Udp4Connect).await; -//! ``` -//! -//! Eventually the counter for UDP connections from IPv4 peers will be increased. -//! -//! ```rust,no_run -//! pub struct Metrics { -//! // ... -//! pub udp4_connections_handled: u64, // This will be incremented -//! // ... -//! } -//! ``` -use torrust_tracker_primitives::torrent_metrics::TorrentsMetrics; - -use crate::statistics::metrics::Metrics; - -/// All the metrics collected by the tracker. -#[derive(Debug, PartialEq)] -pub struct TrackerMetrics { - /// Domain level metrics. - /// - /// General metrics for all torrents (number of seeders, leechers, etcetera) - pub torrents_metrics: TorrentsMetrics, - - /// Application level metrics. Usage statistics/metrics. - /// - /// Metrics about how the tracker is been used (number of udp announce requests, number of http scrape requests, etcetera) - pub protocol_metrics: Metrics, -} diff --git a/src/core/statistics/services.rs b/src/core/statistics/services.rs index a4bcc411e..514f126f9 100644 --- a/src/core/statistics/services.rs +++ b/src/core/statistics/services.rs @@ -2,14 +2,14 @@ //! //! It includes: //! -//! - A [`factory`](bittorrent_tracker_core::statistics::setup::factory) function to build the structs needed to collect the tracker metrics. -//! - A [`get_metrics`] service to get the tracker [`metrics`](bittorrent_tracker_core::statistics::metrics::Metrics). +//! - A [`factory`](crate::statistics::setup::factory) function to build the structs needed to collect the tracker metrics. +//! - A [`get_metrics`] service to get the tracker [`metrics`](crate::core::statistics::metrics::Metrics). //! //! Tracker metrics are collected using a Publisher-Subscribe pattern. //! //! The factory function builds two structs: //! -//! - An statistics event [`Sender`](bittorrent_tracker_core::statistics::event::sender::Sender) +//! - An statistics event [`Sender`](crate::core::statistics::event::sender::Sender) //! - An statistics [`Repository`] //! //! ```text @@ -21,7 +21,7 @@ //! There is an event listener that is receiving all the events and processing them with an event handler. //! Then, the event handler updates the metrics depending on the received event. //! -//! For example, if you send the event [`Event::Udp4Connect`](bittorrent_tracker_core::statistics::event::Event::Udp4Connect): +//! For example, if you send the event [`Event::Udp4Connect`](crate::core::statistics::event::Event::Udp4Connect): //! //! ```text //! let result = event_sender.send_event(Event::Udp4Connect).await; @@ -40,12 +40,26 @@ use std::sync::Arc; use bittorrent_tracker_core::statistics::metrics::Metrics; use bittorrent_tracker_core::statistics::repository::Repository; -use bittorrent_tracker_core::statistics::services::TrackerMetrics; use bittorrent_tracker_core::torrent::repository::in_memory::InMemoryTorrentRepository; use tokio::sync::RwLock; +use torrust_tracker_primitives::torrent_metrics::TorrentsMetrics; use crate::servers::udp::server::banning::BanService; +/// All the metrics collected by the tracker. +#[derive(Debug, PartialEq)] +pub struct TrackerMetrics { + /// Domain level metrics. + /// + /// General metrics for all torrents (number of seeders, leechers, etcetera) + pub torrents_metrics: TorrentsMetrics, + + /// Application level metrics. Usage statistics/metrics. + /// + /// Metrics about how the tracker is been used (number of udp announce requests, number of http scrape requests, etcetera) + pub protocol_metrics: Metrics, +} + /// It returns all the [`TrackerMetrics`] pub async fn get_metrics( in_memory_torrent_repository: Arc, @@ -96,7 +110,6 @@ pub async fn get_metrics( mod tests { use std::sync::Arc; - use bittorrent_tracker_core::statistics::services::TrackerMetrics; use bittorrent_tracker_core::torrent::repository::in_memory::InMemoryTorrentRepository; use bittorrent_tracker_core::{self, statistics}; use tokio::sync::RwLock; @@ -104,7 +117,7 @@ mod tests { use torrust_tracker_primitives::torrent_metrics::TorrentsMetrics; use torrust_tracker_test_helpers::configuration; - use crate::core::statistics::services::get_metrics; + use crate::core::statistics::services::{get_metrics, TrackerMetrics}; use crate::servers::udp::server::banning::BanService; use crate::servers::udp::server::launcher::MAX_CONNECTION_ID_ERRORS_PER_IP; diff --git a/src/servers/apis/v1/context/stats/resources.rs b/src/servers/apis/v1/context/stats/resources.rs index d4a0ec7ec..7b2242d40 100644 --- a/src/servers/apis/v1/context/stats/resources.rs +++ b/src/servers/apis/v1/context/stats/resources.rs @@ -1,8 +1,9 @@ //! API resources for the [`stats`](crate::servers::apis::v1::context::stats) //! API context. -use bittorrent_tracker_core::statistics::services::TrackerMetrics; use serde::{Deserialize, Serialize}; +use crate::core::statistics::services::TrackerMetrics; + /// It contains all the statistics generated by the tracker. #[derive(Serialize, Deserialize, Debug, PartialEq, Eq)] pub struct Stats { @@ -118,10 +119,10 @@ impl From for Stats { #[cfg(test)] mod tests { use bittorrent_tracker_core::statistics::metrics::Metrics; - use bittorrent_tracker_core::statistics::services::TrackerMetrics; use torrust_tracker_primitives::torrent_metrics::TorrentsMetrics; use super::Stats; + use crate::core::statistics::services::TrackerMetrics; #[test] fn stats_resource_should_be_converted_from_tracker_metrics() { diff --git a/src/servers/apis/v1/context/stats/responses.rs b/src/servers/apis/v1/context/stats/responses.rs index fc74b5f8d..6fda43f8c 100644 --- a/src/servers/apis/v1/context/stats/responses.rs +++ b/src/servers/apis/v1/context/stats/responses.rs @@ -1,9 +1,9 @@ //! API responses for the [`stats`](crate::servers::apis::v1::context::stats) //! API context. use axum::response::{IntoResponse, Json, Response}; -use bittorrent_tracker_core::statistics::services::TrackerMetrics; use super::resources::Stats; +use crate::core::statistics::services::TrackerMetrics; /// `200` response that contains the [`Stats`] resource as json. #[must_use] From 9318842a2745921e3dc4a05ed1f9b632768aaf7d Mon Sep 17 00:00:00 2001 From: Jose Celano Date: Fri, 31 Jan 2025 10:23:30 +0000 Subject: [PATCH 191/802] refactor: [#1228] move statistics back from tracker-core to main lib The statistics are only used at the higher levels: UDP and HTTP tracker. We will move them to new packages. --- Cargo.lock | 1 - packages/tracker-core/Cargo.toml | 1 - packages/tracker-core/src/lib.rs | 3 +- packages/tracker-core/src/statistics/mod.rs | 31 ------------ src/bootstrap/app.rs | 4 +- src/container.rs | 5 +- src/core/statistics/mod.rs | 1 - src/lib.rs | 2 +- src/{core => packages}/mod.rs | 0 .../packages}/statistics/event/handler.rs | 10 ++-- .../packages}/statistics/event/listener.rs | 2 +- .../packages}/statistics/event/mod.rs | 0 .../packages}/statistics/event/sender.rs | 4 +- .../src => src/packages}/statistics/keeper.rs | 6 +-- .../packages}/statistics/metrics.rs | 0 src/packages/statistics/mod.rs | 6 +++ .../packages}/statistics/repository.rs | 0 src/{core => packages}/statistics/services.rs | 18 +++---- .../src => src/packages}/statistics/setup.rs | 6 +-- src/servers/apis/v1/context/stats/handlers.rs | 5 +- .../apis/v1/context/stats/resources.rs | 7 +-- .../apis/v1/context/stats/responses.rs | 2 +- src/servers/http/v1/handlers/announce.rs | 10 ++-- src/servers/http/v1/handlers/scrape.rs | 7 ++- src/servers/http/v1/services/announce.rs | 17 ++++--- src/servers/http/v1/services/scrape.rs | 22 +++++---- src/servers/udp/handlers.rs | 47 +++++++++++-------- src/servers/udp/server/launcher.rs | 3 +- src/servers/udp/server/processor.rs | 5 +- tests/servers/http/environment.rs | 3 +- tests/servers/udp/environment.rs | 3 +- 31 files changed, 118 insertions(+), 113 deletions(-) delete mode 100644 packages/tracker-core/src/statistics/mod.rs delete mode 100644 src/core/statistics/mod.rs rename src/{core => packages}/mod.rs (100%) rename {packages/tracker-core/src => src/packages}/statistics/event/handler.rs (96%) rename {packages/tracker-core/src => src/packages}/statistics/event/listener.rs (83%) rename {packages/tracker-core/src => src/packages}/statistics/event/mod.rs (100%) rename {packages/tracker-core/src => src/packages}/statistics/event/sender.rs (82%) rename {packages/tracker-core/src => src/packages}/statistics/keeper.rs (92%) rename {packages/tracker-core/src => src/packages}/statistics/metrics.rs (100%) create mode 100644 src/packages/statistics/mod.rs rename {packages/tracker-core/src => src/packages}/statistics/repository.rs (100%) rename src/{core => packages}/statistics/services.rs (90%) rename {packages/tracker-core/src => src/packages}/statistics/setup.rs (82%) diff --git a/Cargo.lock b/Cargo.lock index bbf225018..d868f7452 100644 --- a/Cargo.lock +++ b/Cargo.lock @@ -600,7 +600,6 @@ dependencies = [ "bittorrent-primitives", "chrono", "derive_more", - "futures", "local-ip-address", "mockall", "r2d2", diff --git a/packages/tracker-core/Cargo.toml b/packages/tracker-core/Cargo.toml index b38f7c90f..7b5b1f2c2 100644 --- a/packages/tracker-core/Cargo.toml +++ b/packages/tracker-core/Cargo.toml @@ -20,7 +20,6 @@ bittorrent-http-protocol = { version = "3.0.0-develop", path = "../http-protocol bittorrent-primitives = "0.1.0" chrono = { version = "0", default-features = false, features = ["clock"] } derive_more = { version = "1", features = ["as_ref", "constructor", "from"] } -futures = "0" r2d2 = "0" r2d2_mysql = "25" r2d2_sqlite = { version = "0", features = ["bundled"] } diff --git a/packages/tracker-core/src/lib.rs b/packages/tracker-core/src/lib.rs index 2fb2d936d..ec4371322 100644 --- a/packages/tracker-core/src/lib.rs +++ b/packages/tracker-core/src/lib.rs @@ -346,7 +346,7 @@ //! //! Services are domain services on top of the core tracker domain. Right now there are two types of service: //! -//! - For statistics: [`crate::core::statistics::services`] +//! - For statistics: [`crate::packages::statistics::services`] //! - For torrents: [`crate::core::torrent::services`] //! //! Services usually format the data inside the tracker to make it easier to consume by other parts. @@ -442,7 +442,6 @@ pub mod authentication; pub mod databases; pub mod error; pub mod scrape_handler; -pub mod statistics; pub mod torrent; pub mod whitelist; diff --git a/packages/tracker-core/src/statistics/mod.rs b/packages/tracker-core/src/statistics/mod.rs deleted file mode 100644 index 7517bab6e..000000000 --- a/packages/tracker-core/src/statistics/mod.rs +++ /dev/null @@ -1,31 +0,0 @@ -//! Structs to collect and keep tracker metrics. -//! -//! The tracker collects metrics such as: -//! -//! - Number of connections handled -//! - Number of `announce` requests handled -//! - Number of `scrape` request handled -//! -//! These metrics are collected for each connection type: UDP and HTTP and -//! also for each IP version used by the peers: IPv4 and IPv6. -//! -//! > Notice: that UDP tracker have an specific `connection` request. For the -//! > `HTTP` metrics the counter counts one connection for each `announce` or -//! > `scrape` request. -//! -//! The data is collected by using an `event-sender -> event listener` model. -//! -//! The tracker uses a [`Sender`](crate::core::statistics::event::sender::Sender) -//! instance to send an event. -//! -//! The [`statistics::keeper::Keeper`](crate::core::statistics::keeper::Keeper) listens to new -//! events and uses the [`statistics::repository::Repository`](crate::core::statistics::repository::Repository) to -//! upgrade and store metrics. -//! -//! See the [`statistics::event::Event`](crate::core::statistics::event::Event) enum to check -//! which events are available. -pub mod event; -pub mod keeper; -pub mod metrics; -pub mod repository; -pub mod setup; diff --git a/src/bootstrap/app.rs b/src/bootstrap/app.rs index f7506800e..7313b2808 100644 --- a/src/bootstrap/app.rs +++ b/src/bootstrap/app.rs @@ -20,13 +20,13 @@ use bittorrent_tracker_core::authentication::key::repository::persisted::Databas use bittorrent_tracker_core::authentication::service; use bittorrent_tracker_core::databases::setup::initialize_database; use bittorrent_tracker_core::scrape_handler::ScrapeHandler; -use bittorrent_tracker_core::statistics; use bittorrent_tracker_core::torrent::manager::TorrentsManager; use bittorrent_tracker_core::torrent::repository::in_memory::InMemoryTorrentRepository; use bittorrent_tracker_core::torrent::repository::persisted::DatabasePersistentTorrentRepository; use bittorrent_tracker_core::whitelist::authorization::WhitelistAuthorization; use bittorrent_tracker_core::whitelist::repository::in_memory::InMemoryWhitelist; use bittorrent_tracker_core::whitelist::setup::initialize_whitelist_manager; +use packages::statistics; use tokio::sync::RwLock; use torrust_tracker_clock::static_time; use torrust_tracker_configuration::validator::Validator; @@ -34,12 +34,12 @@ use torrust_tracker_configuration::Configuration; use tracing::instrument; use super::config::initialize_configuration; -use crate::bootstrap; use crate::container::AppContainer; use crate::servers::udp::server::banning::BanService; use crate::servers::udp::server::launcher::MAX_CONNECTION_ID_ERRORS_PER_IP; use crate::shared::crypto::ephemeral_instance_keys; use crate::shared::crypto::keys::{self, Keeper as _}; +use crate::{bootstrap, packages}; /// It loads the configuration from the environment and builds app container. /// diff --git a/src/container.rs b/src/container.rs index cae2d07ce..965dbfa2a 100644 --- a/src/container.rs +++ b/src/container.rs @@ -5,16 +5,17 @@ use bittorrent_tracker_core::authentication::handler::KeysHandler; use bittorrent_tracker_core::authentication::service::AuthenticationService; use bittorrent_tracker_core::databases::Database; use bittorrent_tracker_core::scrape_handler::ScrapeHandler; -use bittorrent_tracker_core::statistics::event::sender::Sender; -use bittorrent_tracker_core::statistics::repository::Repository; use bittorrent_tracker_core::torrent::manager::TorrentsManager; use bittorrent_tracker_core::torrent::repository::in_memory::InMemoryTorrentRepository; use bittorrent_tracker_core::torrent::repository::persisted::DatabasePersistentTorrentRepository; use bittorrent_tracker_core::whitelist; use bittorrent_tracker_core::whitelist::manager::WhitelistManager; +use packages::statistics::event::sender::Sender; +use packages::statistics::repository::Repository; use tokio::sync::RwLock; use torrust_tracker_configuration::{Core, HttpApi, HttpTracker, UdpTracker}; +use crate::packages; use crate::servers::udp::server::banning::BanService; pub struct AppContainer { diff --git a/src/core/statistics/mod.rs b/src/core/statistics/mod.rs deleted file mode 100644 index 4e379ae78..000000000 --- a/src/core/statistics/mod.rs +++ /dev/null @@ -1 +0,0 @@ -pub mod services; diff --git a/src/lib.rs b/src/lib.rs index 212430605..b9ab402ab 100644 --- a/src/lib.rs +++ b/src/lib.rs @@ -494,7 +494,7 @@ pub mod app; pub mod bootstrap; pub mod console; pub mod container; -pub mod core; +pub mod packages; pub mod servers; pub mod shared; diff --git a/src/core/mod.rs b/src/packages/mod.rs similarity index 100% rename from src/core/mod.rs rename to src/packages/mod.rs diff --git a/packages/tracker-core/src/statistics/event/handler.rs b/src/packages/statistics/event/handler.rs similarity index 96% rename from packages/tracker-core/src/statistics/event/handler.rs rename to src/packages/statistics/event/handler.rs index 93ac05dde..99339041a 100644 --- a/packages/tracker-core/src/statistics/event/handler.rs +++ b/src/packages/statistics/event/handler.rs @@ -1,5 +1,5 @@ -use crate::statistics::event::{Event, UdpResponseKind}; -use crate::statistics::repository::Repository; +use crate::packages::statistics::event::{Event, UdpResponseKind}; +use crate::packages::statistics::repository::Repository; pub async fn handle_event(event: Event, stats_repository: &Repository) { match event { @@ -102,9 +102,9 @@ pub async fn handle_event(event: Event, stats_repository: &Repository) { #[cfg(test)] mod tests { - use crate::statistics::event::handler::handle_event; - use crate::statistics::event::Event; - use crate::statistics::repository::Repository; + use crate::packages::statistics::event::handler::handle_event; + use crate::packages::statistics::event::Event; + use crate::packages::statistics::repository::Repository; #[tokio::test] async fn should_increase_the_tcp4_announces_counter_when_it_receives_a_tcp4_announce_event() { diff --git a/packages/tracker-core/src/statistics/event/listener.rs b/src/packages/statistics/event/listener.rs similarity index 83% rename from packages/tracker-core/src/statistics/event/listener.rs rename to src/packages/statistics/event/listener.rs index f1a2e25de..009784fba 100644 --- a/packages/tracker-core/src/statistics/event/listener.rs +++ b/src/packages/statistics/event/listener.rs @@ -2,7 +2,7 @@ use tokio::sync::mpsc; use super::handler::handle_event; use super::Event; -use crate::statistics::repository::Repository; +use crate::packages::statistics::repository::Repository; pub async fn dispatch_events(mut receiver: mpsc::Receiver, stats_repository: Repository) { while let Some(event) = receiver.recv().await { diff --git a/packages/tracker-core/src/statistics/event/mod.rs b/src/packages/statistics/event/mod.rs similarity index 100% rename from packages/tracker-core/src/statistics/event/mod.rs rename to src/packages/statistics/event/mod.rs diff --git a/packages/tracker-core/src/statistics/event/sender.rs b/src/packages/statistics/event/sender.rs similarity index 82% rename from packages/tracker-core/src/statistics/event/sender.rs rename to src/packages/statistics/event/sender.rs index 1b663b5d1..b9b989053 100644 --- a/packages/tracker-core/src/statistics/event/sender.rs +++ b/src/packages/statistics/event/sender.rs @@ -13,10 +13,10 @@ pub trait Sender: Sync + Send { fn send_event(&self, event: Event) -> BoxFuture<'_, Option>>>; } -/// An [`statistics::EventSender`](crate::core::statistics::event::sender::Sender) implementation. +/// An [`statistics::EventSender`](crate::packages::statistics::event::sender::Sender) implementation. /// /// It uses a channel sender to send the statistic events. The channel is created by a -/// [`statistics::Keeper`](crate::core::statistics::keeper::Keeper) +/// [`statistics::Keeper`](crate::packages::statistics::keeper::Keeper) #[allow(clippy::module_name_repetitions)] pub struct ChannelSender { pub(crate) sender: mpsc::Sender, diff --git a/packages/tracker-core/src/statistics/keeper.rs b/src/packages/statistics/keeper.rs similarity index 92% rename from packages/tracker-core/src/statistics/keeper.rs rename to src/packages/statistics/keeper.rs index a3d4542f7..493e61cb2 100644 --- a/packages/tracker-core/src/statistics/keeper.rs +++ b/src/packages/statistics/keeper.rs @@ -51,9 +51,9 @@ impl Keeper { #[cfg(test)] mod tests { - use crate::statistics::event::Event; - use crate::statistics::keeper::Keeper; - use crate::statistics::metrics::Metrics; + use crate::packages::statistics::event::Event; + use crate::packages::statistics::keeper::Keeper; + use crate::packages::statistics::metrics::Metrics; #[tokio::test] async fn should_contain_the_tracker_statistics() { diff --git a/packages/tracker-core/src/statistics/metrics.rs b/src/packages/statistics/metrics.rs similarity index 100% rename from packages/tracker-core/src/statistics/metrics.rs rename to src/packages/statistics/metrics.rs diff --git a/src/packages/statistics/mod.rs b/src/packages/statistics/mod.rs new file mode 100644 index 000000000..939a41061 --- /dev/null +++ b/src/packages/statistics/mod.rs @@ -0,0 +1,6 @@ +pub mod event; +pub mod keeper; +pub mod metrics; +pub mod repository; +pub mod services; +pub mod setup; diff --git a/packages/tracker-core/src/statistics/repository.rs b/src/packages/statistics/repository.rs similarity index 100% rename from packages/tracker-core/src/statistics/repository.rs rename to src/packages/statistics/repository.rs diff --git a/src/core/statistics/services.rs b/src/packages/statistics/services.rs similarity index 90% rename from src/core/statistics/services.rs rename to src/packages/statistics/services.rs index 514f126f9..444ba533c 100644 --- a/src/core/statistics/services.rs +++ b/src/packages/statistics/services.rs @@ -2,14 +2,14 @@ //! //! It includes: //! -//! - A [`factory`](crate::statistics::setup::factory) function to build the structs needed to collect the tracker metrics. -//! - A [`get_metrics`] service to get the tracker [`metrics`](crate::core::statistics::metrics::Metrics). +//! - A [`factory`](crate::packages::statistics::setup::factory) function to build the structs needed to collect the tracker metrics. +//! - A [`get_metrics`] service to get the tracker [`metrics`](crate::packages::statistics::metrics::Metrics). //! //! Tracker metrics are collected using a Publisher-Subscribe pattern. //! //! The factory function builds two structs: //! -//! - An statistics event [`Sender`](crate::core::statistics::event::sender::Sender) +//! - An statistics event [`Sender`](crate::packages::statistics::event::sender::Sender) //! - An statistics [`Repository`] //! //! ```text @@ -21,7 +21,7 @@ //! There is an event listener that is receiving all the events and processing them with an event handler. //! Then, the event handler updates the metrics depending on the received event. //! -//! For example, if you send the event [`Event::Udp4Connect`](crate::core::statistics::event::Event::Udp4Connect): +//! For example, if you send the event [`Event::Udp4Connect`](crate::packages::statistics::event::Event::Udp4Connect): //! //! ```text //! let result = event_sender.send_event(Event::Udp4Connect).await; @@ -38,12 +38,13 @@ //! ``` use std::sync::Arc; -use bittorrent_tracker_core::statistics::metrics::Metrics; -use bittorrent_tracker_core::statistics::repository::Repository; use bittorrent_tracker_core::torrent::repository::in_memory::InMemoryTorrentRepository; +use packages::statistics::metrics::Metrics; +use packages::statistics::repository::Repository; use tokio::sync::RwLock; use torrust_tracker_primitives::torrent_metrics::TorrentsMetrics; +use crate::packages; use crate::servers::udp::server::banning::BanService; /// All the metrics collected by the tracker. @@ -111,13 +112,14 @@ mod tests { use std::sync::Arc; use bittorrent_tracker_core::torrent::repository::in_memory::InMemoryTorrentRepository; - use bittorrent_tracker_core::{self, statistics}; + use bittorrent_tracker_core::{self}; use tokio::sync::RwLock; use torrust_tracker_configuration::Configuration; use torrust_tracker_primitives::torrent_metrics::TorrentsMetrics; use torrust_tracker_test_helpers::configuration; - use crate::core::statistics::services::{get_metrics, TrackerMetrics}; + use crate::packages::statistics; + use crate::packages::statistics::services::{get_metrics, TrackerMetrics}; use crate::servers::udp::server::banning::BanService; use crate::servers::udp::server::launcher::MAX_CONNECTION_ID_ERRORS_PER_IP; diff --git a/packages/tracker-core/src/statistics/setup.rs b/src/packages/statistics/setup.rs similarity index 82% rename from packages/tracker-core/src/statistics/setup.rs rename to src/packages/statistics/setup.rs index 701392176..2a187dcf0 100644 --- a/packages/tracker-core/src/statistics/setup.rs +++ b/src/packages/statistics/setup.rs @@ -1,14 +1,14 @@ //! Setup for the tracker statistics. //! //! The [`factory`] function builds the structs needed for handling the tracker metrics. -use crate::statistics; +use crate::packages::statistics; /// It builds the structs needed for handling the tracker metrics. /// /// It returns: /// -/// - An statistics event [`Sender`](crate::core::statistics::event::sender::Sender) that allows you to send events related to statistics. -/// - An statistics [`Repository`](crate::core::statistics::repository::Repository) which is an in-memory repository for the tracker metrics. +/// - An statistics event [`Sender`](crate::packages::statistics::event::sender::Sender) that allows you to send events related to statistics. +/// - An statistics [`Repository`](crate::packages::statistics::repository::Repository) which is an in-memory repository for the tracker metrics. /// /// When the input argument `tracker_usage_statistics`is false the setup does not run the event listeners, consequently the statistics /// events are sent are received but not dispatched to the handler. diff --git a/src/servers/apis/v1/context/stats/handlers.rs b/src/servers/apis/v1/context/stats/handlers.rs index b4ead78ea..ffd4f1787 100644 --- a/src/servers/apis/v1/context/stats/handlers.rs +++ b/src/servers/apis/v1/context/stats/handlers.rs @@ -5,13 +5,14 @@ use std::sync::Arc; use axum::extract::State; use axum::response::Response; use axum_extra::extract::Query; -use bittorrent_tracker_core::statistics::repository::Repository; use bittorrent_tracker_core::torrent::repository::in_memory::InMemoryTorrentRepository; +use packages::statistics::repository::Repository; use serde::Deserialize; use tokio::sync::RwLock; use super::responses::{metrics_response, stats_response}; -use crate::core::statistics::services::get_metrics; +use crate::packages; +use crate::packages::statistics::services::get_metrics; use crate::servers::udp::server::banning::BanService; #[derive(Deserialize, Debug, Default)] diff --git a/src/servers/apis/v1/context/stats/resources.rs b/src/servers/apis/v1/context/stats/resources.rs index 7b2242d40..5900e293a 100644 --- a/src/servers/apis/v1/context/stats/resources.rs +++ b/src/servers/apis/v1/context/stats/resources.rs @@ -2,7 +2,7 @@ //! API context. use serde::{Deserialize, Serialize}; -use crate::core::statistics::services::TrackerMetrics; +use crate::packages::statistics::services::TrackerMetrics; /// It contains all the statistics generated by the tracker. #[derive(Serialize, Deserialize, Debug, PartialEq, Eq)] @@ -118,11 +118,12 @@ impl From for Stats { #[cfg(test)] mod tests { - use bittorrent_tracker_core::statistics::metrics::Metrics; + use packages::statistics::metrics::Metrics; use torrust_tracker_primitives::torrent_metrics::TorrentsMetrics; use super::Stats; - use crate::core::statistics::services::TrackerMetrics; + use crate::packages::statistics::services::TrackerMetrics; + use crate::packages::{self}; #[test] fn stats_resource_should_be_converted_from_tracker_metrics() { diff --git a/src/servers/apis/v1/context/stats/responses.rs b/src/servers/apis/v1/context/stats/responses.rs index 6fda43f8c..e3b45a66b 100644 --- a/src/servers/apis/v1/context/stats/responses.rs +++ b/src/servers/apis/v1/context/stats/responses.rs @@ -3,7 +3,7 @@ use axum::response::{IntoResponse, Json, Response}; use super::resources::Stats; -use crate::core::statistics::services::TrackerMetrics; +use crate::packages::statistics::services::TrackerMetrics; /// `200` response that contains the [`Stats`] resource as json. #[must_use] diff --git a/src/servers/http/v1/handlers/announce.rs b/src/servers/http/v1/handlers/announce.rs index 40462c31d..d3225ee29 100644 --- a/src/servers/http/v1/handlers/announce.rs +++ b/src/servers/http/v1/handlers/announce.rs @@ -19,9 +19,9 @@ use bittorrent_http_protocol::v1::services::peer_ip_resolver::ClientIpSources; use bittorrent_tracker_core::announce_handler::{AnnounceHandler, PeersWanted}; use bittorrent_tracker_core::authentication::service::AuthenticationService; use bittorrent_tracker_core::authentication::Key; -use bittorrent_tracker_core::statistics::event::sender::Sender; use bittorrent_tracker_core::whitelist; use hyper::StatusCode; +use packages::statistics::event::sender::Sender; use torrust_tracker_clock::clock::Time; use torrust_tracker_configuration::Core; use torrust_tracker_primitives::core::AnnounceData; @@ -33,7 +33,7 @@ use crate::servers::http::v1::extractors::authentication_key::Extract as Extract use crate::servers::http::v1::extractors::client_ip_sources::Extract as ExtractClientIpSources; use crate::servers::http::v1::handlers::common::auth; use crate::servers::http::v1::services::{self}; -use crate::CurrentClock; +use crate::{packages, CurrentClock}; /// It handles the `announce` request when the HTTP tracker does not require /// authentication (no PATH `key` parameter required). @@ -256,15 +256,17 @@ mod tests { use bittorrent_tracker_core::authentication::service::AuthenticationService; use bittorrent_tracker_core::core_tests::sample_info_hash; use bittorrent_tracker_core::databases::setup::initialize_database; - use bittorrent_tracker_core::statistics; - use bittorrent_tracker_core::statistics::event::sender::Sender; use bittorrent_tracker_core::torrent::repository::in_memory::InMemoryTorrentRepository; use bittorrent_tracker_core::torrent::repository::persisted::DatabasePersistentTorrentRepository; use bittorrent_tracker_core::whitelist::authorization::WhitelistAuthorization; use bittorrent_tracker_core::whitelist::repository::in_memory::InMemoryWhitelist; + use packages::statistics; + use packages::statistics::event::sender::Sender; use torrust_tracker_configuration::{Configuration, Core}; use torrust_tracker_test_helpers::configuration; + use crate::packages; + struct CoreTrackerServices { pub core_config: Arc, pub announce_handler: Arc, diff --git a/src/servers/http/v1/handlers/scrape.rs b/src/servers/http/v1/handlers/scrape.rs index 1b9196e25..141cf4c45 100644 --- a/src/servers/http/v1/handlers/scrape.rs +++ b/src/servers/http/v1/handlers/scrape.rs @@ -15,11 +15,12 @@ use bittorrent_http_protocol::v1::services::peer_ip_resolver::{self, ClientIpSou use bittorrent_tracker_core::authentication::service::AuthenticationService; use bittorrent_tracker_core::authentication::Key; use bittorrent_tracker_core::scrape_handler::ScrapeHandler; -use bittorrent_tracker_core::statistics::event::sender::Sender; use hyper::StatusCode; +use packages::statistics::event::sender::Sender; use torrust_tracker_configuration::Core; use torrust_tracker_primitives::core::ScrapeData; +use crate::packages; use crate::servers::http::v1::extractors::authentication_key::Extract as ExtractKey; use crate::servers::http::v1::extractors::client_ip_sources::Extract as ExtractClientIpSources; use crate::servers::http::v1::extractors::scrape_request::ExtractRequest; @@ -174,13 +175,15 @@ mod tests { use bittorrent_tracker_core::authentication::key::repository::in_memory::InMemoryKeyRepository; use bittorrent_tracker_core::authentication::service::AuthenticationService; use bittorrent_tracker_core::scrape_handler::ScrapeHandler; - use bittorrent_tracker_core::statistics; use bittorrent_tracker_core::torrent::repository::in_memory::InMemoryTorrentRepository; use bittorrent_tracker_core::whitelist::authorization::WhitelistAuthorization; use bittorrent_tracker_core::whitelist::repository::in_memory::InMemoryWhitelist; + use packages::statistics; use torrust_tracker_configuration::{Configuration, Core}; use torrust_tracker_test_helpers::configuration; + use crate::packages; + struct CoreTrackerServices { pub core_config: Arc, pub scrape_handler: Arc, diff --git a/src/servers/http/v1/services/announce.rs b/src/servers/http/v1/services/announce.rs index 9e74ab8a5..61bbd93c6 100644 --- a/src/servers/http/v1/services/announce.rs +++ b/src/servers/http/v1/services/announce.rs @@ -12,11 +12,13 @@ use std::sync::Arc; use bittorrent_primitives::info_hash::InfoHash; use bittorrent_tracker_core::announce_handler::{AnnounceHandler, PeersWanted}; -use bittorrent_tracker_core::statistics; -use bittorrent_tracker_core::statistics::event::sender::Sender; +use packages::statistics; +use packages::statistics::event::sender::Sender; use torrust_tracker_primitives::core::AnnounceData; use torrust_tracker_primitives::peer; +use crate::packages; + /// The HTTP tracker `announce` service. /// /// The service sends an statistics event that increments: @@ -61,10 +63,10 @@ mod tests { use aquatic_udp_protocol::{AnnounceEvent, NumberOfBytes, PeerId}; use bittorrent_tracker_core::announce_handler::AnnounceHandler; use bittorrent_tracker_core::databases::setup::initialize_database; - use bittorrent_tracker_core::statistics; - use bittorrent_tracker_core::statistics::event::sender::Sender; use bittorrent_tracker_core::torrent::repository::in_memory::InMemoryTorrentRepository; use bittorrent_tracker_core::torrent::repository::persisted::DatabasePersistentTorrentRepository; + use packages::statistics; + use packages::statistics::event::sender::Sender; use torrust_tracker_configuration::Core; use torrust_tracker_primitives::{peer, DurationSinceUnixEpoch}; use torrust_tracker_test_helpers::configuration; @@ -122,11 +124,13 @@ mod tests { } } - use bittorrent_tracker_core::statistics::event::Event; use futures::future::BoxFuture; use mockall::mock; + use packages::statistics::event::Event; use tokio::sync::mpsc::error::SendError; + use crate::packages; + mock! { StatsEventSender {} impl Sender for StatsEventSender { @@ -142,16 +146,17 @@ mod tests { use bittorrent_tracker_core::announce_handler::{AnnounceHandler, PeersWanted}; use bittorrent_tracker_core::core_tests::sample_info_hash; use bittorrent_tracker_core::databases::setup::initialize_database; - use bittorrent_tracker_core::statistics; use bittorrent_tracker_core::torrent::repository::in_memory::InMemoryTorrentRepository; use bittorrent_tracker_core::torrent::repository::persisted::DatabasePersistentTorrentRepository; use mockall::predicate::eq; + use packages::statistics; use torrust_tracker_primitives::core::AnnounceData; use torrust_tracker_primitives::peer; use torrust_tracker_primitives::swarm_metadata::SwarmMetadata; use torrust_tracker_test_helpers::configuration; use super::{sample_peer_using_ipv4, sample_peer_using_ipv6}; + use crate::packages; use crate::servers::http::v1::services::announce::invoke; use crate::servers::http::v1::services::announce::tests::{ initialize_core_tracker_services, sample_peer, MockStatsEventSender, diff --git a/src/servers/http/v1/services/scrape.rs b/src/servers/http/v1/services/scrape.rs index 59a7d34c7..1ac42ff10 100644 --- a/src/servers/http/v1/services/scrape.rs +++ b/src/servers/http/v1/services/scrape.rs @@ -12,10 +12,12 @@ use std::sync::Arc; use bittorrent_primitives::info_hash::InfoHash; use bittorrent_tracker_core::scrape_handler::ScrapeHandler; -use bittorrent_tracker_core::statistics::event::sender::Sender; -use bittorrent_tracker_core::statistics::{self}; +use packages::statistics::event::sender::Sender; +use packages::statistics::{self}; use torrust_tracker_primitives::core::ScrapeData; +use crate::packages; + /// The HTTP tracker `scrape` service. /// /// The service sends an statistics event that increments: @@ -80,18 +82,20 @@ mod tests { use bittorrent_tracker_core::core_tests::sample_info_hash; use bittorrent_tracker_core::databases::setup::initialize_database; use bittorrent_tracker_core::scrape_handler::ScrapeHandler; - use bittorrent_tracker_core::statistics::event::sender::Sender; - use bittorrent_tracker_core::statistics::event::Event; use bittorrent_tracker_core::torrent::repository::in_memory::InMemoryTorrentRepository; use bittorrent_tracker_core::torrent::repository::persisted::DatabasePersistentTorrentRepository; use bittorrent_tracker_core::whitelist::authorization::WhitelistAuthorization; use bittorrent_tracker_core::whitelist::repository::in_memory::InMemoryWhitelist; use futures::future::BoxFuture; use mockall::mock; + use packages::statistics::event::sender::Sender; + use packages::statistics::event::Event; use tokio::sync::mpsc::error::SendError; use torrust_tracker_primitives::{peer, DurationSinceUnixEpoch}; use torrust_tracker_test_helpers::configuration; + use crate::packages; + fn initialize_announce_and_scrape_handlers_for_public_tracker() -> (Arc, Arc) { let config = configuration::ephemeral_public(); @@ -150,11 +154,12 @@ mod tests { use std::sync::Arc; use bittorrent_tracker_core::announce_handler::PeersWanted; - use bittorrent_tracker_core::statistics; use mockall::predicate::eq; + use packages::statistics; use torrust_tracker_primitives::core::ScrapeData; use torrust_tracker_primitives::swarm_metadata::SwarmMetadata; + use crate::packages; use crate::servers::http::v1::services::scrape::invoke; use crate::servers::http::v1::services::scrape::tests::{ initialize_announce_and_scrape_handlers_for_public_tracker, initialize_scrape_handler, sample_info_hash, @@ -163,7 +168,7 @@ mod tests { #[tokio::test] async fn it_should_return_the_scrape_data_for_a_torrent() { - let (stats_event_sender, _stats_repository) = bittorrent_tracker_core::statistics::setup::factory(false); + let (stats_event_sender, _stats_repository) = packages::statistics::setup::factory(false); let stats_event_sender = Arc::new(stats_event_sender); let (announce_handler, scrape_handler) = initialize_announce_and_scrape_handlers_for_public_tracker(); @@ -235,10 +240,11 @@ mod tests { use std::sync::Arc; use bittorrent_tracker_core::announce_handler::PeersWanted; - use bittorrent_tracker_core::statistics; use mockall::predicate::eq; + use packages::statistics; use torrust_tracker_primitives::core::ScrapeData; + use crate::packages; use crate::servers::http::v1::services::scrape::fake; use crate::servers::http::v1::services::scrape::tests::{ initialize_announce_and_scrape_handlers_for_public_tracker, sample_info_hash, sample_info_hashes, sample_peer, @@ -247,7 +253,7 @@ mod tests { #[tokio::test] async fn it_should_always_return_the_zeroed_scrape_data_for_a_torrent() { - let (stats_event_sender, _stats_repository) = bittorrent_tracker_core::statistics::setup::factory(false); + let (stats_event_sender, _stats_repository) = packages::statistics::setup::factory(false); let stats_event_sender = Arc::new(stats_event_sender); let (announce_handler, _scrape_handler) = initialize_announce_and_scrape_handlers_for_public_tracker(); diff --git a/src/servers/udp/handlers.rs b/src/servers/udp/handlers.rs index 84b2f1db2..9f2562713 100644 --- a/src/servers/udp/handlers.rs +++ b/src/servers/udp/handlers.rs @@ -13,8 +13,8 @@ use aquatic_udp_protocol::{ use bittorrent_primitives::info_hash::InfoHash; use bittorrent_tracker_core::announce_handler::{AnnounceHandler, PeersWanted}; use bittorrent_tracker_core::scrape_handler::ScrapeHandler; -use bittorrent_tracker_core::statistics::event::sender::Sender; -use bittorrent_tracker_core::{statistics, whitelist}; +use bittorrent_tracker_core::whitelist; +use packages::statistics::event::sender::Sender; use torrust_tracker_clock::clock::Time as _; use torrust_tracker_configuration::Core; use tracing::{instrument, Level}; @@ -24,10 +24,11 @@ use zerocopy::network_endian::I32; use super::connection_cookie::{check, make}; use super::RawRequest; use crate::container::UdpTrackerContainer; +use crate::packages::statistics; use crate::servers::udp::error::Error; use crate::servers::udp::{peer_builder, UDP_TRACKER_LOG_TARGET}; use crate::shared::bit_torrent::common::MAX_SCRAPE_TORRENTS; -use crate::CurrentClock; +use crate::{packages, CurrentClock}; #[derive(Debug, Clone, PartialEq)] pub(super) struct CookieTimeValues { @@ -471,15 +472,15 @@ mod tests { use bittorrent_tracker_core::announce_handler::AnnounceHandler; use bittorrent_tracker_core::databases::setup::initialize_database; use bittorrent_tracker_core::scrape_handler::ScrapeHandler; - use bittorrent_tracker_core::statistics::event::sender::Sender; - use bittorrent_tracker_core::statistics::event::Event; use bittorrent_tracker_core::torrent::repository::in_memory::InMemoryTorrentRepository; use bittorrent_tracker_core::torrent::repository::persisted::DatabasePersistentTorrentRepository; + use bittorrent_tracker_core::whitelist; use bittorrent_tracker_core::whitelist::authorization::WhitelistAuthorization; use bittorrent_tracker_core::whitelist::repository::in_memory::InMemoryWhitelist; - use bittorrent_tracker_core::{statistics, whitelist}; use futures::future::BoxFuture; use mockall::mock; + use packages::statistics::event::sender::Sender; + use packages::statistics::event::Event; use tokio::sync::mpsc::error::SendError; use torrust_tracker_clock::clock::Time; use torrust_tracker_configuration::{Configuration, Core}; @@ -487,7 +488,8 @@ mod tests { use torrust_tracker_test_helpers::configuration; use super::gen_remote_fingerprint; - use crate::CurrentClock; + use crate::packages::statistics; + use crate::{packages, CurrentClock}; struct CoreTrackerServices { pub core_config: Arc, @@ -649,10 +651,11 @@ mod tests { use std::sync::Arc; use aquatic_udp_protocol::{ConnectRequest, ConnectResponse, Response, TransactionId}; - use bittorrent_tracker_core::statistics; use mockall::predicate::eq; + use packages::statistics; use super::{sample_ipv4_socket_address, sample_ipv6_remote_addr}; + use crate::packages; use crate::servers::udp::connection_cookie::make; use crate::servers::udp::handlers::handle_connect; use crate::servers::udp::handlers::tests::{ @@ -668,7 +671,7 @@ mod tests { #[tokio::test] async fn a_connect_response_should_contain_the_same_transaction_id_as_the_connect_request() { - let (stats_event_sender, _stats_repository) = bittorrent_tracker_core::statistics::setup::factory(false); + let (stats_event_sender, _stats_repository) = packages::statistics::setup::factory(false); let stats_event_sender = Arc::new(stats_event_sender); let request = ConnectRequest { @@ -688,7 +691,7 @@ mod tests { #[tokio::test] async fn a_connect_response_should_contain_a_new_connection_id() { - let (stats_event_sender, _stats_repository) = bittorrent_tracker_core::statistics::setup::factory(false); + let (stats_event_sender, _stats_repository) = packages::statistics::setup::factory(false); let stats_event_sender = Arc::new(stats_event_sender); let request = ConnectRequest { @@ -708,7 +711,7 @@ mod tests { #[tokio::test] async fn a_connect_response_should_contain_a_new_connection_id_ipv6() { - let (stats_event_sender, _stats_repository) = bittorrent_tracker_core::statistics::setup::factory(false); + let (stats_event_sender, _stats_repository) = packages::statistics::setup::factory(false); let stats_event_sender = Arc::new(stats_event_sender); let request = ConnectRequest { @@ -854,10 +857,11 @@ mod tests { }; use bittorrent_tracker_core::announce_handler::AnnounceHandler; use bittorrent_tracker_core::torrent::repository::in_memory::InMemoryTorrentRepository; - use bittorrent_tracker_core::{statistics, whitelist}; + use bittorrent_tracker_core::whitelist; use mockall::predicate::eq; use torrust_tracker_configuration::Core; + use crate::packages::{self, statistics}; use crate::servers::udp::connection_cookie::make; use crate::servers::udp::handlers::tests::announce_request::AnnounceRequestBuilder; use crate::servers::udp::handlers::tests::{ @@ -1013,7 +1017,7 @@ mod tests { announce_handler: Arc, whitelist_authorization: Arc, ) -> Response { - let (stats_event_sender, _stats_repository) = bittorrent_tracker_core::statistics::setup::factory(false); + let (stats_event_sender, _stats_repository) = packages::statistics::setup::factory(false); let stats_event_sender = Arc::new(stats_event_sender); let remote_addr = SocketAddr::new(IpAddr::V4(Ipv4Addr::new(126, 0, 0, 1)), 8080); @@ -1155,10 +1159,11 @@ mod tests { }; use bittorrent_tracker_core::announce_handler::AnnounceHandler; use bittorrent_tracker_core::torrent::repository::in_memory::InMemoryTorrentRepository; - use bittorrent_tracker_core::{statistics, whitelist}; + use bittorrent_tracker_core::whitelist; use mockall::predicate::eq; use torrust_tracker_configuration::Core; + use crate::packages::{self, statistics}; use crate::servers::udp::connection_cookie::make; use crate::servers::udp::handlers::tests::announce_request::AnnounceRequestBuilder; use crate::servers::udp::handlers::tests::{ @@ -1318,7 +1323,7 @@ mod tests { announce_handler: Arc, whitelist_authorization: Arc, ) -> Response { - let (stats_event_sender, _stats_repository) = bittorrent_tracker_core::statistics::setup::factory(false); + let (stats_event_sender, _stats_repository) = packages::statistics::setup::factory(false); let stats_event_sender = Arc::new(stats_event_sender); let client_ip_v4 = Ipv4Addr::new(126, 0, 0, 1); @@ -1404,13 +1409,14 @@ mod tests { use aquatic_udp_protocol::{InfoHash as AquaticInfoHash, PeerId as AquaticPeerId}; use bittorrent_tracker_core::announce_handler::AnnounceHandler; use bittorrent_tracker_core::databases::setup::initialize_database; - use bittorrent_tracker_core::statistics; use bittorrent_tracker_core::torrent::repository::in_memory::InMemoryTorrentRepository; use bittorrent_tracker_core::torrent::repository::persisted::DatabasePersistentTorrentRepository; use bittorrent_tracker_core::whitelist::authorization::WhitelistAuthorization; use bittorrent_tracker_core::whitelist::repository::in_memory::InMemoryWhitelist; use mockall::predicate::eq; + use packages::statistics; + use crate::packages; use crate::servers::udp::connection_cookie::make; use crate::servers::udp::handlers::handle_announce; use crate::servers::udp::handlers::tests::announce_request::AnnounceRequestBuilder; @@ -1505,10 +1511,11 @@ mod tests { TransactionId, }; use bittorrent_tracker_core::scrape_handler::ScrapeHandler; - use bittorrent_tracker_core::statistics; use bittorrent_tracker_core::torrent::repository::in_memory::InMemoryTorrentRepository; + use packages::statistics; use super::{gen_remote_fingerprint, TorrentPeerBuilder}; + use crate::packages; use crate::servers::udp::connection_cookie::make; use crate::servers::udp::handlers::handle_scrape; use crate::servers::udp::handlers::tests::{ @@ -1747,10 +1754,11 @@ mod tests { use std::future; use std::sync::Arc; - use bittorrent_tracker_core::statistics; use mockall::predicate::eq; + use packages::statistics; use super::sample_scrape_request; + use crate::packages; use crate::servers::udp::handlers::handle_scrape; use crate::servers::udp::handlers::tests::{ initialize_core_tracker_services_for_default_tracker_configuration, sample_cookie_valid_range, @@ -1788,10 +1796,11 @@ mod tests { use std::future; use std::sync::Arc; - use bittorrent_tracker_core::statistics; use mockall::predicate::eq; + use packages::statistics; use super::sample_scrape_request; + use crate::packages; use crate::servers::udp::handlers::handle_scrape; use crate::servers::udp::handlers::tests::{ initialize_core_tracker_services_for_default_tracker_configuration, sample_cookie_valid_range, diff --git a/src/servers/udp/server/launcher.rs b/src/servers/udp/server/launcher.rs index bbf2718ff..24872771a 100644 --- a/src/servers/udp/server/launcher.rs +++ b/src/servers/udp/server/launcher.rs @@ -3,9 +3,9 @@ use std::sync::Arc; use std::time::Duration; use bittorrent_tracker_client::udp::client::check; -use bittorrent_tracker_core::statistics; use derive_more::Constructor; use futures_util::StreamExt; +use packages::statistics; use tokio::select; use tokio::sync::oneshot; use tokio::time::interval; @@ -14,6 +14,7 @@ use tracing::instrument; use super::request_buffer::ActiveRequests; use crate::bootstrap::jobs::Started; use crate::container::UdpTrackerContainer; +use crate::packages; use crate::servers::logging::STARTED_ON; use crate::servers::registar::ServiceHealthCheckJob; use crate::servers::signals::{shutdown_signal_with_message, Halted}; diff --git a/src/servers/udp/server/processor.rs b/src/servers/udp/server/processor.rs index db444a04c..8a1ca64e3 100644 --- a/src/servers/udp/server/processor.rs +++ b/src/servers/udp/server/processor.rs @@ -4,13 +4,14 @@ use std::sync::Arc; use std::time::Duration; use aquatic_udp_protocol::Response; -use bittorrent_tracker_core::statistics; -use bittorrent_tracker_core::statistics::event::UdpResponseKind; +use packages::statistics; +use packages::statistics::event::UdpResponseKind; use tokio::time::Instant; use tracing::{instrument, Level}; use super::bound_socket::BoundSocket; use crate::container::UdpTrackerContainer; +use crate::packages; use crate::servers::udp::handlers::CookieTimeValues; use crate::servers::udp::{handlers, RawRequest}; diff --git a/tests/servers/http/environment.rs b/tests/servers/http/environment.rs index c91be1544..2828982f7 100644 --- a/tests/servers/http/environment.rs +++ b/tests/servers/http/environment.rs @@ -3,14 +3,15 @@ use std::sync::Arc; use bittorrent_primitives::info_hash::InfoHash; use bittorrent_tracker_core::authentication::handler::KeysHandler; use bittorrent_tracker_core::databases::Database; -use bittorrent_tracker_core::statistics::repository::Repository; use bittorrent_tracker_core::torrent::repository::in_memory::InMemoryTorrentRepository; use bittorrent_tracker_core::whitelist::manager::WhitelistManager; use futures::executor::block_on; +use packages::statistics::repository::Repository; use torrust_tracker_configuration::Configuration; use torrust_tracker_lib::bootstrap::app::{initialize_app_container, initialize_global_services}; use torrust_tracker_lib::bootstrap::jobs::make_rust_tls; use torrust_tracker_lib::container::HttpTrackerContainer; +use torrust_tracker_lib::packages; use torrust_tracker_lib::servers::http::server::{HttpServer, Launcher, Running, Stopped}; use torrust_tracker_lib::servers::registar::Registar; use torrust_tracker_primitives::peer; diff --git a/tests/servers/udp/environment.rs b/tests/servers/udp/environment.rs index 1483e1e5f..8e2e31f07 100644 --- a/tests/servers/udp/environment.rs +++ b/tests/servers/udp/environment.rs @@ -3,11 +3,12 @@ use std::sync::Arc; use bittorrent_primitives::info_hash::InfoHash; use bittorrent_tracker_core::databases::Database; -use bittorrent_tracker_core::statistics::repository::Repository; use bittorrent_tracker_core::torrent::repository::in_memory::InMemoryTorrentRepository; +use packages::statistics::repository::Repository; use torrust_tracker_configuration::{Configuration, DEFAULT_TIMEOUT}; use torrust_tracker_lib::bootstrap::app::{initialize_app_container, initialize_global_services}; use torrust_tracker_lib::container::UdpTrackerContainer; +use torrust_tracker_lib::packages; use torrust_tracker_lib::servers::registar::Registar; use torrust_tracker_lib::servers::udp::server::spawner::Spawner; use torrust_tracker_lib::servers::udp::server::states::{Running, Stopped}; From f99534a89194cbe91bbe6ddac52dfa69ec5a6f7e Mon Sep 17 00:00:00 2001 From: Jose Celano Date: Fri, 31 Jan 2025 11:01:33 +0000 Subject: [PATCH 192/802] refactor: [#1228] split statistics mod into UDO and HTTP statistics --- src/packages/http_tracker_core/mod.rs | 1 + .../statistics/event/handler.rs | 123 +++++++++++++ .../statistics/event/listener.rs | 11 ++ .../http_tracker_core/statistics/event/mod.rs | 21 +++ .../statistics/event/sender.rs | 29 +++ .../http_tracker_core/statistics/keeper.rs | 77 ++++++++ .../http_tracker_core/statistics/metrics.rs | 30 +++ .../http_tracker_core/statistics/mod.rs | 6 + .../statistics/repository.rs | 66 +++++++ .../http_tracker_core/statistics/services.rs | 104 +++++++++++ .../http_tracker_core/statistics/setup.rs | 54 ++++++ src/packages/mod.rs | 5 + src/packages/udp_tracker_core/mod.rs | 1 + .../statistics/event/handler.rs | 154 ++++++++++++++++ .../statistics/event/listener.rs | 11 ++ .../udp_tracker_core/statistics/event/mod.rs | 47 +++++ .../statistics/event/sender.rs | 29 +++ .../udp_tracker_core/statistics/keeper.rs | 77 ++++++++ .../udp_tracker_core/statistics/metrics.rs | 67 +++++++ .../udp_tracker_core/statistics/mod.rs | 6 + .../udp_tracker_core/statistics/repository.rs | 173 ++++++++++++++++++ .../udp_tracker_core/statistics/services.rs | 146 +++++++++++++++ .../udp_tracker_core/statistics/setup.rs | 54 ++++++ 23 files changed, 1292 insertions(+) create mode 100644 src/packages/http_tracker_core/mod.rs create mode 100644 src/packages/http_tracker_core/statistics/event/handler.rs create mode 100644 src/packages/http_tracker_core/statistics/event/listener.rs create mode 100644 src/packages/http_tracker_core/statistics/event/mod.rs create mode 100644 src/packages/http_tracker_core/statistics/event/sender.rs create mode 100644 src/packages/http_tracker_core/statistics/keeper.rs create mode 100644 src/packages/http_tracker_core/statistics/metrics.rs create mode 100644 src/packages/http_tracker_core/statistics/mod.rs create mode 100644 src/packages/http_tracker_core/statistics/repository.rs create mode 100644 src/packages/http_tracker_core/statistics/services.rs create mode 100644 src/packages/http_tracker_core/statistics/setup.rs create mode 100644 src/packages/udp_tracker_core/mod.rs create mode 100644 src/packages/udp_tracker_core/statistics/event/handler.rs create mode 100644 src/packages/udp_tracker_core/statistics/event/listener.rs create mode 100644 src/packages/udp_tracker_core/statistics/event/mod.rs create mode 100644 src/packages/udp_tracker_core/statistics/event/sender.rs create mode 100644 src/packages/udp_tracker_core/statistics/keeper.rs create mode 100644 src/packages/udp_tracker_core/statistics/metrics.rs create mode 100644 src/packages/udp_tracker_core/statistics/mod.rs create mode 100644 src/packages/udp_tracker_core/statistics/repository.rs create mode 100644 src/packages/udp_tracker_core/statistics/services.rs create mode 100644 src/packages/udp_tracker_core/statistics/setup.rs diff --git a/src/packages/http_tracker_core/mod.rs b/src/packages/http_tracker_core/mod.rs new file mode 100644 index 000000000..3449ec7b4 --- /dev/null +++ b/src/packages/http_tracker_core/mod.rs @@ -0,0 +1 @@ +pub mod statistics; diff --git a/src/packages/http_tracker_core/statistics/event/handler.rs b/src/packages/http_tracker_core/statistics/event/handler.rs new file mode 100644 index 000000000..caaf5d375 --- /dev/null +++ b/src/packages/http_tracker_core/statistics/event/handler.rs @@ -0,0 +1,123 @@ +use crate::packages::http_tracker_core::statistics::event::Event; +use crate::packages::http_tracker_core::statistics::repository::Repository; + +pub async fn handle_event(event: Event, stats_repository: &Repository) { + match event { + // TCP4 + Event::Tcp4Announce => { + stats_repository.increase_tcp4_announces().await; + stats_repository.increase_tcp4_connections().await; + } + Event::Tcp4Scrape => { + stats_repository.increase_tcp4_scrapes().await; + stats_repository.increase_tcp4_connections().await; + } + + // TCP6 + Event::Tcp6Announce => { + stats_repository.increase_tcp6_announces().await; + stats_repository.increase_tcp6_connections().await; + } + Event::Tcp6Scrape => { + stats_repository.increase_tcp6_scrapes().await; + stats_repository.increase_tcp6_connections().await; + } + } + + tracing::debug!("stats: {:?}", stats_repository.get_stats().await); +} + +#[cfg(test)] +mod tests { + use crate::packages::http_tracker_core::statistics::event::handler::handle_event; + use crate::packages::http_tracker_core::statistics::event::Event; + use crate::packages::http_tracker_core::statistics::repository::Repository; + + #[tokio::test] + async fn should_increase_the_tcp4_announces_counter_when_it_receives_a_tcp4_announce_event() { + let stats_repository = Repository::new(); + + handle_event(Event::Tcp4Announce, &stats_repository).await; + + let stats = stats_repository.get_stats().await; + + assert_eq!(stats.tcp4_announces_handled, 1); + } + + #[tokio::test] + async fn should_increase_the_tcp4_connections_counter_when_it_receives_a_tcp4_announce_event() { + let stats_repository = Repository::new(); + + handle_event(Event::Tcp4Announce, &stats_repository).await; + + let stats = stats_repository.get_stats().await; + + assert_eq!(stats.tcp4_connections_handled, 1); + } + + #[tokio::test] + async fn should_increase_the_tcp4_scrapes_counter_when_it_receives_a_tcp4_scrape_event() { + let stats_repository = Repository::new(); + + handle_event(Event::Tcp4Scrape, &stats_repository).await; + + let stats = stats_repository.get_stats().await; + + assert_eq!(stats.tcp4_scrapes_handled, 1); + } + + #[tokio::test] + async fn should_increase_the_tcp4_connections_counter_when_it_receives_a_tcp4_scrape_event() { + let stats_repository = Repository::new(); + + handle_event(Event::Tcp4Scrape, &stats_repository).await; + + let stats = stats_repository.get_stats().await; + + assert_eq!(stats.tcp4_connections_handled, 1); + } + + #[tokio::test] + async fn should_increase_the_tcp6_announces_counter_when_it_receives_a_tcp6_announce_event() { + let stats_repository = Repository::new(); + + handle_event(Event::Tcp6Announce, &stats_repository).await; + + let stats = stats_repository.get_stats().await; + + assert_eq!(stats.tcp6_announces_handled, 1); + } + + #[tokio::test] + async fn should_increase_the_tcp6_connections_counter_when_it_receives_a_tcp6_announce_event() { + let stats_repository = Repository::new(); + + handle_event(Event::Tcp6Announce, &stats_repository).await; + + let stats = stats_repository.get_stats().await; + + assert_eq!(stats.tcp6_connections_handled, 1); + } + + #[tokio::test] + async fn should_increase_the_tcp6_scrapes_counter_when_it_receives_a_tcp6_scrape_event() { + let stats_repository = Repository::new(); + + handle_event(Event::Tcp6Scrape, &stats_repository).await; + + let stats = stats_repository.get_stats().await; + + assert_eq!(stats.tcp6_scrapes_handled, 1); + } + + #[tokio::test] + async fn should_increase_the_tcp6_connections_counter_when_it_receives_a_tcp6_scrape_event() { + let stats_repository = Repository::new(); + + handle_event(Event::Tcp6Scrape, &stats_repository).await; + + let stats = stats_repository.get_stats().await; + + assert_eq!(stats.tcp6_connections_handled, 1); + } +} diff --git a/src/packages/http_tracker_core/statistics/event/listener.rs b/src/packages/http_tracker_core/statistics/event/listener.rs new file mode 100644 index 000000000..ed574a36b --- /dev/null +++ b/src/packages/http_tracker_core/statistics/event/listener.rs @@ -0,0 +1,11 @@ +use tokio::sync::mpsc; + +use super::handler::handle_event; +use super::Event; +use crate::packages::http_tracker_core::statistics::repository::Repository; + +pub async fn dispatch_events(mut receiver: mpsc::Receiver, stats_repository: Repository) { + while let Some(event) = receiver.recv().await { + handle_event(event, &stats_repository).await; + } +} diff --git a/src/packages/http_tracker_core/statistics/event/mod.rs b/src/packages/http_tracker_core/statistics/event/mod.rs new file mode 100644 index 000000000..e25148666 --- /dev/null +++ b/src/packages/http_tracker_core/statistics/event/mod.rs @@ -0,0 +1,21 @@ +pub mod handler; +pub mod listener; +pub mod sender; + +/// An statistics event. It is used to collect tracker metrics. +/// +/// - `Tcp` prefix means the event was triggered by the HTTP tracker +/// - `Udp` prefix means the event was triggered by the UDP tracker +/// - `4` or `6` prefixes means the IP version used by the peer +/// - Finally the event suffix is the type of request: `announce`, `scrape` or `connection` +/// +/// > NOTE: HTTP trackers do not use `connection` requests. +#[derive(Debug, PartialEq, Eq)] +pub enum Event { + // code-review: consider one single event for request type with data: Event::Announce { scheme: HTTPorUDP, ip_version: V4orV6 } + // Attributes are enums too. + Tcp4Announce, + Tcp4Scrape, + Tcp6Announce, + Tcp6Scrape, +} diff --git a/src/packages/http_tracker_core/statistics/event/sender.rs b/src/packages/http_tracker_core/statistics/event/sender.rs new file mode 100644 index 000000000..279d50962 --- /dev/null +++ b/src/packages/http_tracker_core/statistics/event/sender.rs @@ -0,0 +1,29 @@ +use futures::future::BoxFuture; +use futures::FutureExt; +#[cfg(test)] +use mockall::{automock, predicate::str}; +use tokio::sync::mpsc; +use tokio::sync::mpsc::error::SendError; + +use super::Event; + +/// A trait to allow sending statistics events +#[cfg_attr(test, automock)] +pub trait Sender: Sync + Send { + fn send_event(&self, event: Event) -> BoxFuture<'_, Option>>>; +} + +/// An [`statistics::EventSender`](crate::packages::http_tracker_core::statistics::event::sender::Sender) implementation. +/// +/// It uses a channel sender to send the statistic events. The channel is created by a +/// [`statistics::Keeper`](crate::packages::http_tracker_core::statistics::keeper::Keeper) +#[allow(clippy::module_name_repetitions)] +pub struct ChannelSender { + pub(crate) sender: mpsc::Sender, +} + +impl Sender for ChannelSender { + fn send_event(&self, event: Event) -> BoxFuture<'_, Option>>> { + async move { Some(self.sender.send(event).await) }.boxed() + } +} diff --git a/src/packages/http_tracker_core/statistics/keeper.rs b/src/packages/http_tracker_core/statistics/keeper.rs new file mode 100644 index 000000000..01ae5e6b3 --- /dev/null +++ b/src/packages/http_tracker_core/statistics/keeper.rs @@ -0,0 +1,77 @@ +use tokio::sync::mpsc; + +use super::event::listener::dispatch_events; +use super::event::sender::{ChannelSender, Sender}; +use super::event::Event; +use super::repository::Repository; + +const CHANNEL_BUFFER_SIZE: usize = 65_535; + +/// The service responsible for keeping tracker metrics (listening to statistics events and handle them). +/// +/// It actively listen to new statistics events. When it receives a new event +/// it accordingly increases the counters. +pub struct Keeper { + pub repository: Repository, +} + +impl Default for Keeper { + fn default() -> Self { + Self::new() + } +} + +impl Keeper { + #[must_use] + pub fn new() -> Self { + Self { + repository: Repository::new(), + } + } + + #[must_use] + pub fn new_active_instance() -> (Box, Repository) { + let mut stats_tracker = Self::new(); + + let stats_event_sender = stats_tracker.run_event_listener(); + + (stats_event_sender, stats_tracker.repository) + } + + pub fn run_event_listener(&mut self) -> Box { + let (sender, receiver) = mpsc::channel::(CHANNEL_BUFFER_SIZE); + + let stats_repository = self.repository.clone(); + + tokio::spawn(async move { dispatch_events(receiver, stats_repository).await }); + + Box::new(ChannelSender { sender }) + } +} + +#[cfg(test)] +mod tests { + use crate::packages::http_tracker_core::statistics::event::Event; + use crate::packages::http_tracker_core::statistics::keeper::Keeper; + use crate::packages::http_tracker_core::statistics::metrics::Metrics; + + #[tokio::test] + async fn should_contain_the_tracker_statistics() { + let stats_tracker = Keeper::new(); + + let stats = stats_tracker.repository.get_stats().await; + + assert_eq!(stats.tcp4_announces_handled, Metrics::default().tcp4_announces_handled); + } + + #[tokio::test] + async fn should_create_an_event_sender_to_send_statistical_events() { + let mut stats_tracker = Keeper::new(); + + let event_sender = stats_tracker.run_event_listener(); + + let result = event_sender.send_event(Event::Tcp4Announce).await; + + assert!(result.is_some()); + } +} diff --git a/src/packages/http_tracker_core/statistics/metrics.rs b/src/packages/http_tracker_core/statistics/metrics.rs new file mode 100644 index 000000000..ae4db9704 --- /dev/null +++ b/src/packages/http_tracker_core/statistics/metrics.rs @@ -0,0 +1,30 @@ +/// Metrics collected by the tracker. +/// +/// - Number of connections handled +/// - Number of `announce` requests handled +/// - Number of `scrape` request handled +/// +/// These metrics are collected for each connection type: UDP and HTTP +/// and also for each IP version used by the peers: IPv4 and IPv6. +#[derive(Debug, PartialEq, Default)] +pub struct Metrics { + /// Total number of TCP (HTTP tracker) connections from IPv4 peers. + /// Since the HTTP tracker spec does not require a handshake, this metric + /// increases for every HTTP request. + pub tcp4_connections_handled: u64, + + /// Total number of TCP (HTTP tracker) `announce` requests from IPv4 peers. + pub tcp4_announces_handled: u64, + + /// Total number of TCP (HTTP tracker) `scrape` requests from IPv4 peers. + pub tcp4_scrapes_handled: u64, + + /// Total number of TCP (HTTP tracker) connections from IPv6 peers. + pub tcp6_connections_handled: u64, + + /// Total number of TCP (HTTP tracker) `announce` requests from IPv6 peers. + pub tcp6_announces_handled: u64, + + /// Total number of TCP (HTTP tracker) `scrape` requests from IPv6 peers. + pub tcp6_scrapes_handled: u64, +} diff --git a/src/packages/http_tracker_core/statistics/mod.rs b/src/packages/http_tracker_core/statistics/mod.rs new file mode 100644 index 000000000..939a41061 --- /dev/null +++ b/src/packages/http_tracker_core/statistics/mod.rs @@ -0,0 +1,6 @@ +pub mod event; +pub mod keeper; +pub mod metrics; +pub mod repository; +pub mod services; +pub mod setup; diff --git a/src/packages/http_tracker_core/statistics/repository.rs b/src/packages/http_tracker_core/statistics/repository.rs new file mode 100644 index 000000000..41f048e29 --- /dev/null +++ b/src/packages/http_tracker_core/statistics/repository.rs @@ -0,0 +1,66 @@ +use std::sync::Arc; + +use tokio::sync::{RwLock, RwLockReadGuard}; + +use super::metrics::Metrics; + +/// A repository for the tracker metrics. +#[derive(Clone)] +pub struct Repository { + pub stats: Arc>, +} + +impl Default for Repository { + fn default() -> Self { + Self::new() + } +} + +impl Repository { + #[must_use] + pub fn new() -> Self { + Self { + stats: Arc::new(RwLock::new(Metrics::default())), + } + } + + pub async fn get_stats(&self) -> RwLockReadGuard<'_, Metrics> { + self.stats.read().await + } + + pub async fn increase_tcp4_announces(&self) { + let mut stats_lock = self.stats.write().await; + stats_lock.tcp4_announces_handled += 1; + drop(stats_lock); + } + + pub async fn increase_tcp4_connections(&self) { + let mut stats_lock = self.stats.write().await; + stats_lock.tcp4_connections_handled += 1; + drop(stats_lock); + } + + pub async fn increase_tcp4_scrapes(&self) { + let mut stats_lock = self.stats.write().await; + stats_lock.tcp4_scrapes_handled += 1; + drop(stats_lock); + } + + pub async fn increase_tcp6_announces(&self) { + let mut stats_lock = self.stats.write().await; + stats_lock.tcp6_announces_handled += 1; + drop(stats_lock); + } + + pub async fn increase_tcp6_connections(&self) { + let mut stats_lock = self.stats.write().await; + stats_lock.tcp6_connections_handled += 1; + drop(stats_lock); + } + + pub async fn increase_tcp6_scrapes(&self) { + let mut stats_lock = self.stats.write().await; + stats_lock.tcp6_scrapes_handled += 1; + drop(stats_lock); + } +} diff --git a/src/packages/http_tracker_core/statistics/services.rs b/src/packages/http_tracker_core/statistics/services.rs new file mode 100644 index 000000000..11e3a70c4 --- /dev/null +++ b/src/packages/http_tracker_core/statistics/services.rs @@ -0,0 +1,104 @@ +//! Statistics services. +//! +//! It includes: +//! +//! - A [`factory`](crate::packages::http_tracker_core::statistics::setup::factory) function to build the structs needed to collect the tracker metrics. +//! - A [`get_metrics`] service to get the tracker [`metrics`](crate::packages::http_tracker_core::statistics::metrics::Metrics). +//! +//! Tracker metrics are collected using a Publisher-Subscribe pattern. +//! +//! The factory function builds two structs: +//! +//! - An statistics event [`Sender`](crate::packages::http_tracker_core::statistics::event::sender::Sender) +//! - An statistics [`Repository`] +//! +//! ```text +//! let (stats_event_sender, stats_repository) = factory(tracker_usage_statistics); +//! ``` +//! +//! The statistics repository is responsible for storing the metrics in memory. +//! The statistics event sender allows sending events related to metrics. +//! There is an event listener that is receiving all the events and processing them with an event handler. +//! Then, the event handler updates the metrics depending on the received event. +use std::sync::Arc; + +use bittorrent_tracker_core::torrent::repository::in_memory::InMemoryTorrentRepository; +use packages::http_tracker_core::statistics::metrics::Metrics; +use packages::http_tracker_core::statistics::repository::Repository; +use torrust_tracker_primitives::torrent_metrics::TorrentsMetrics; + +use crate::packages; + +/// All the metrics collected by the tracker. +#[derive(Debug, PartialEq)] +pub struct TrackerMetrics { + /// Domain level metrics. + /// + /// General metrics for all torrents (number of seeders, leechers, etcetera) + pub torrents_metrics: TorrentsMetrics, + + /// Application level metrics. Usage statistics/metrics. + /// + /// Metrics about how the tracker is been used (number of number of http scrape requests, etcetera) + pub protocol_metrics: Metrics, +} + +/// It returns all the [`TrackerMetrics`] +pub async fn get_metrics( + in_memory_torrent_repository: Arc, + stats_repository: Arc, +) -> TrackerMetrics { + let torrents_metrics = in_memory_torrent_repository.get_torrents_metrics(); + let stats = stats_repository.get_stats().await; + + TrackerMetrics { + torrents_metrics, + protocol_metrics: Metrics { + // TCPv4 + tcp4_connections_handled: stats.tcp4_connections_handled, + tcp4_announces_handled: stats.tcp4_announces_handled, + tcp4_scrapes_handled: stats.tcp4_scrapes_handled, + // TCPv6 + tcp6_connections_handled: stats.tcp6_connections_handled, + tcp6_announces_handled: stats.tcp6_announces_handled, + tcp6_scrapes_handled: stats.tcp6_scrapes_handled, + }, + } +} + +#[cfg(test)] +mod tests { + use std::sync::Arc; + + use bittorrent_tracker_core::torrent::repository::in_memory::InMemoryTorrentRepository; + use bittorrent_tracker_core::{self}; + use torrust_tracker_configuration::Configuration; + use torrust_tracker_primitives::torrent_metrics::TorrentsMetrics; + use torrust_tracker_test_helpers::configuration; + + use crate::packages::http_tracker_core::statistics; + use crate::packages::http_tracker_core::statistics::services::{get_metrics, TrackerMetrics}; + + pub fn tracker_configuration() -> Configuration { + configuration::ephemeral() + } + + #[tokio::test] + async fn the_statistics_service_should_return_the_tracker_metrics() { + let config = tracker_configuration(); + + let in_memory_torrent_repository = Arc::new(InMemoryTorrentRepository::default()); + let (_stats_event_sender, stats_repository) = statistics::setup::factory(config.core.tracker_usage_statistics); + let stats_repository = Arc::new(stats_repository); + + let tracker_metrics = get_metrics(in_memory_torrent_repository.clone(), stats_repository.clone()).await; + + assert_eq!( + tracker_metrics, + TrackerMetrics { + torrents_metrics: TorrentsMetrics::default(), + protocol_metrics: statistics::metrics::Metrics::default(), + } + ); + } +} diff --git a/src/packages/http_tracker_core/statistics/setup.rs b/src/packages/http_tracker_core/statistics/setup.rs new file mode 100644 index 000000000..009f157d5 --- /dev/null +++ b/src/packages/http_tracker_core/statistics/setup.rs @@ -0,0 +1,54 @@ +//! Setup for the tracker statistics. +//! +//! The [`factory`] function builds the structs needed for handling the tracker metrics. +use crate::packages::http_tracker_core::statistics; + +/// It builds the structs needed for handling the tracker metrics. +/// +/// It returns: +/// +/// - An statistics event [`Sender`](crate::packages::http_tracker_core::statistics::event::sender::Sender) that allows you to send events related to statistics. +/// - An statistics [`Repository`](crate::packages::http_tracker_core::statistics::repository::Repository) which is an in-memory repository for the tracker metrics. +/// +/// When the input argument `tracker_usage_statistics`is false the setup does not run the event listeners, consequently the statistics +/// events are sent are received but not dispatched to the handler. +#[must_use] +pub fn factory( + tracker_usage_statistics: bool, +) -> ( + Option>, + statistics::repository::Repository, +) { + let mut stats_event_sender = None; + + let mut stats_tracker = statistics::keeper::Keeper::new(); + + if tracker_usage_statistics { + stats_event_sender = Some(stats_tracker.run_event_listener()); + } + + (stats_event_sender, stats_tracker.repository) +} + +#[cfg(test)] +mod test { + use super::factory; + + #[tokio::test] + async fn should_not_send_any_event_when_statistics_are_disabled() { + let tracker_usage_statistics = false; + + let (stats_event_sender, _stats_repository) = factory(tracker_usage_statistics); + + assert!(stats_event_sender.is_none()); + } + + #[tokio::test] + async fn should_send_events_when_statistics_are_enabled() { + let tracker_usage_statistics = true; + + let (stats_event_sender, _stats_repository) = factory(tracker_usage_statistics); + + assert!(stats_event_sender.is_some()); + } +} diff --git a/src/packages/mod.rs b/src/packages/mod.rs index 3449ec7b4..9e0bbec90 100644 --- a/src/packages/mod.rs +++ b/src/packages/mod.rs @@ -1 +1,6 @@ +//! This module contains logic pending to be extracted into workspace packages. +//! +//! It will be moved to the directory `packages`. +pub mod http_tracker_core; pub mod statistics; +pub mod udp_tracker_core; diff --git a/src/packages/udp_tracker_core/mod.rs b/src/packages/udp_tracker_core/mod.rs new file mode 100644 index 000000000..3449ec7b4 --- /dev/null +++ b/src/packages/udp_tracker_core/mod.rs @@ -0,0 +1 @@ +pub mod statistics; diff --git a/src/packages/udp_tracker_core/statistics/event/handler.rs b/src/packages/udp_tracker_core/statistics/event/handler.rs new file mode 100644 index 000000000..d696951d3 --- /dev/null +++ b/src/packages/udp_tracker_core/statistics/event/handler.rs @@ -0,0 +1,154 @@ +use crate::packages::udp_tracker_core::statistics::event::{Event, UdpResponseKind}; +use crate::packages::udp_tracker_core::statistics::repository::Repository; + +pub async fn handle_event(event: Event, stats_repository: &Repository) { + match event { + // UDP + Event::UdpRequestAborted => { + stats_repository.increase_udp_requests_aborted().await; + } + Event::UdpRequestBanned => { + stats_repository.increase_udp_requests_banned().await; + } + + // UDP4 + Event::Udp4Request => { + stats_repository.increase_udp4_requests().await; + } + Event::Udp4Connect => { + stats_repository.increase_udp4_connections().await; + } + Event::Udp4Announce => { + stats_repository.increase_udp4_announces().await; + } + Event::Udp4Scrape => { + stats_repository.increase_udp4_scrapes().await; + } + Event::Udp4Response { + kind, + req_processing_time, + } => { + stats_repository.increase_udp4_responses().await; + + match kind { + UdpResponseKind::Connect => { + stats_repository + .recalculate_udp_avg_connect_processing_time_ns(req_processing_time) + .await; + } + UdpResponseKind::Announce => { + stats_repository + .recalculate_udp_avg_announce_processing_time_ns(req_processing_time) + .await; + } + UdpResponseKind::Scrape => { + stats_repository + .recalculate_udp_avg_scrape_processing_time_ns(req_processing_time) + .await; + } + UdpResponseKind::Error => {} + } + } + Event::Udp4Error => { + stats_repository.increase_udp4_errors().await; + } + + // UDP6 + Event::Udp6Request => { + stats_repository.increase_udp6_requests().await; + } + Event::Udp6Connect => { + stats_repository.increase_udp6_connections().await; + } + Event::Udp6Announce => { + stats_repository.increase_udp6_announces().await; + } + Event::Udp6Scrape => { + stats_repository.increase_udp6_scrapes().await; + } + Event::Udp6Response { + kind: _, + req_processing_time: _, + } => { + stats_repository.increase_udp6_responses().await; + } + Event::Udp6Error => { + stats_repository.increase_udp6_errors().await; + } + } + + tracing::debug!("stats: {:?}", stats_repository.get_stats().await); +} + +#[cfg(test)] +mod tests { + use crate::packages::udp_tracker_core::statistics::event::handler::handle_event; + use crate::packages::udp_tracker_core::statistics::event::Event; + use crate::packages::udp_tracker_core::statistics::repository::Repository; + + #[tokio::test] + async fn should_increase_the_udp4_connections_counter_when_it_receives_a_udp4_connect_event() { + let stats_repository = Repository::new(); + + handle_event(Event::Udp4Connect, &stats_repository).await; + + let stats = stats_repository.get_stats().await; + + assert_eq!(stats.udp4_connections_handled, 1); + } + + #[tokio::test] + async fn should_increase_the_udp4_announces_counter_when_it_receives_a_udp4_announce_event() { + let stats_repository = Repository::new(); + + handle_event(Event::Udp4Announce, &stats_repository).await; + + let stats = stats_repository.get_stats().await; + + assert_eq!(stats.udp4_announces_handled, 1); + } + + #[tokio::test] + async fn should_increase_the_udp4_scrapes_counter_when_it_receives_a_udp4_scrape_event() { + let stats_repository = Repository::new(); + + handle_event(Event::Udp4Scrape, &stats_repository).await; + + let stats = stats_repository.get_stats().await; + + assert_eq!(stats.udp4_scrapes_handled, 1); + } + + #[tokio::test] + async fn should_increase_the_udp6_connections_counter_when_it_receives_a_udp6_connect_event() { + let stats_repository = Repository::new(); + + handle_event(Event::Udp6Connect, &stats_repository).await; + + let stats = stats_repository.get_stats().await; + + assert_eq!(stats.udp6_connections_handled, 1); + } + + #[tokio::test] + async fn should_increase_the_udp6_announces_counter_when_it_receives_a_udp6_announce_event() { + let stats_repository = Repository::new(); + + handle_event(Event::Udp6Announce, &stats_repository).await; + + let stats = stats_repository.get_stats().await; + + assert_eq!(stats.udp6_announces_handled, 1); + } + + #[tokio::test] + async fn should_increase_the_udp6_scrapes_counter_when_it_receives_a_udp6_scrape_event() { + let stats_repository = Repository::new(); + + handle_event(Event::Udp6Scrape, &stats_repository).await; + + let stats = stats_repository.get_stats().await; + + assert_eq!(stats.udp6_scrapes_handled, 1); + } +} diff --git a/src/packages/udp_tracker_core/statistics/event/listener.rs b/src/packages/udp_tracker_core/statistics/event/listener.rs new file mode 100644 index 000000000..6a84fbaa5 --- /dev/null +++ b/src/packages/udp_tracker_core/statistics/event/listener.rs @@ -0,0 +1,11 @@ +use tokio::sync::mpsc; + +use super::handler::handle_event; +use super::Event; +use crate::packages::udp_tracker_core::statistics::repository::Repository; + +pub async fn dispatch_events(mut receiver: mpsc::Receiver, stats_repository: Repository) { + while let Some(event) = receiver.recv().await { + handle_event(event, &stats_repository).await; + } +} diff --git a/src/packages/udp_tracker_core/statistics/event/mod.rs b/src/packages/udp_tracker_core/statistics/event/mod.rs new file mode 100644 index 000000000..6a5343933 --- /dev/null +++ b/src/packages/udp_tracker_core/statistics/event/mod.rs @@ -0,0 +1,47 @@ +use std::time::Duration; + +pub mod handler; +pub mod listener; +pub mod sender; + +/// An statistics event. It is used to collect tracker metrics. +/// +/// - `Tcp` prefix means the event was triggered by the HTTP tracker +/// - `Udp` prefix means the event was triggered by the UDP tracker +/// - `4` or `6` prefixes means the IP version used by the peer +/// - Finally the event suffix is the type of request: `announce`, `scrape` or `connection` +/// +/// > NOTE: HTTP trackers do not use `connection` requests. +#[derive(Debug, PartialEq, Eq)] +pub enum Event { + // code-review: consider one single event for request type with data: Event::Announce { scheme: HTTPorUDP, ip_version: V4orV6 } + // Attributes are enums too. + UdpRequestAborted, + UdpRequestBanned, + Udp4Request, + Udp4Connect, + Udp4Announce, + Udp4Scrape, + Udp4Response { + kind: UdpResponseKind, + req_processing_time: Duration, + }, + Udp4Error, + Udp6Request, + Udp6Connect, + Udp6Announce, + Udp6Scrape, + Udp6Response { + kind: UdpResponseKind, + req_processing_time: Duration, + }, + Udp6Error, +} + +#[derive(Debug, PartialEq, Eq)] +pub enum UdpResponseKind { + Connect, + Announce, + Scrape, + Error, +} diff --git a/src/packages/udp_tracker_core/statistics/event/sender.rs b/src/packages/udp_tracker_core/statistics/event/sender.rs new file mode 100644 index 000000000..68e197eca --- /dev/null +++ b/src/packages/udp_tracker_core/statistics/event/sender.rs @@ -0,0 +1,29 @@ +use futures::future::BoxFuture; +use futures::FutureExt; +#[cfg(test)] +use mockall::{automock, predicate::str}; +use tokio::sync::mpsc; +use tokio::sync::mpsc::error::SendError; + +use super::Event; + +/// A trait to allow sending statistics events +#[cfg_attr(test, automock)] +pub trait Sender: Sync + Send { + fn send_event(&self, event: Event) -> BoxFuture<'_, Option>>>; +} + +/// An [`statistics::EventSender`](crate::packages::udp_tracker_core::statistics::event::sender::Sender) implementation. +/// +/// It uses a channel sender to send the statistic events. The channel is created by a +/// [`statistics::Keeper`](crate::packages::udp_tracker_core::statistics::keeper::Keeper) +#[allow(clippy::module_name_repetitions)] +pub struct ChannelSender { + pub(crate) sender: mpsc::Sender, +} + +impl Sender for ChannelSender { + fn send_event(&self, event: Event) -> BoxFuture<'_, Option>>> { + async move { Some(self.sender.send(event).await) }.boxed() + } +} diff --git a/src/packages/udp_tracker_core/statistics/keeper.rs b/src/packages/udp_tracker_core/statistics/keeper.rs new file mode 100644 index 000000000..9bd290145 --- /dev/null +++ b/src/packages/udp_tracker_core/statistics/keeper.rs @@ -0,0 +1,77 @@ +use tokio::sync::mpsc; + +use super::event::listener::dispatch_events; +use super::event::sender::{ChannelSender, Sender}; +use super::event::Event; +use super::repository::Repository; + +const CHANNEL_BUFFER_SIZE: usize = 65_535; + +/// The service responsible for keeping tracker metrics (listening to statistics events and handle them). +/// +/// It actively listen to new statistics events. When it receives a new event +/// it accordingly increases the counters. +pub struct Keeper { + pub repository: Repository, +} + +impl Default for Keeper { + fn default() -> Self { + Self::new() + } +} + +impl Keeper { + #[must_use] + pub fn new() -> Self { + Self { + repository: Repository::new(), + } + } + + #[must_use] + pub fn new_active_instance() -> (Box, Repository) { + let mut stats_tracker = Self::new(); + + let stats_event_sender = stats_tracker.run_event_listener(); + + (stats_event_sender, stats_tracker.repository) + } + + pub fn run_event_listener(&mut self) -> Box { + let (sender, receiver) = mpsc::channel::(CHANNEL_BUFFER_SIZE); + + let stats_repository = self.repository.clone(); + + tokio::spawn(async move { dispatch_events(receiver, stats_repository).await }); + + Box::new(ChannelSender { sender }) + } +} + +#[cfg(test)] +mod tests { + use crate::packages::udp_tracker_core::statistics::event::Event; + use crate::packages::udp_tracker_core::statistics::keeper::Keeper; + use crate::packages::udp_tracker_core::statistics::metrics::Metrics; + + #[tokio::test] + async fn should_contain_the_tracker_statistics() { + let stats_tracker = Keeper::new(); + + let stats = stats_tracker.repository.get_stats().await; + + assert_eq!(stats.udp4_announces_handled, Metrics::default().udp4_announces_handled); + } + + #[tokio::test] + async fn should_create_an_event_sender_to_send_statistical_events() { + let mut stats_tracker = Keeper::new(); + + let event_sender = stats_tracker.run_event_listener(); + + let result = event_sender.send_event(Event::Udp4Connect).await; + + assert!(result.is_some()); + } +} diff --git a/src/packages/udp_tracker_core/statistics/metrics.rs b/src/packages/udp_tracker_core/statistics/metrics.rs new file mode 100644 index 000000000..23357aab6 --- /dev/null +++ b/src/packages/udp_tracker_core/statistics/metrics.rs @@ -0,0 +1,67 @@ +/// Metrics collected by the tracker. +/// +/// - Number of connections handled +/// - Number of `announce` requests handled +/// - Number of `scrape` request handled +/// +/// These metrics are collected for each connection type: UDP and HTTP +/// and also for each IP version used by the peers: IPv4 and IPv6. +#[derive(Debug, PartialEq, Default)] +pub struct Metrics { + // UDP + /// Total number of UDP (UDP tracker) requests aborted. + pub udp_requests_aborted: u64, + + /// Total number of UDP (UDP tracker) requests banned. + pub udp_requests_banned: u64, + + /// Total number of banned IPs. + pub udp_banned_ips_total: u64, + + /// Average rounded time spent processing UDP connect requests. + pub udp_avg_connect_processing_time_ns: u64, + + /// Average rounded time spent processing UDP announce requests. + pub udp_avg_announce_processing_time_ns: u64, + + /// Average rounded time spent processing UDP scrape requests. + pub udp_avg_scrape_processing_time_ns: u64, + + // UDPv4 + /// Total number of UDP (UDP tracker) requests from IPv4 peers. + pub udp4_requests: u64, + + /// Total number of UDP (UDP tracker) connections from IPv4 peers. + pub udp4_connections_handled: u64, + + /// Total number of UDP (UDP tracker) `announce` requests from IPv4 peers. + pub udp4_announces_handled: u64, + + /// Total number of UDP (UDP tracker) `scrape` requests from IPv4 peers. + pub udp4_scrapes_handled: u64, + + /// Total number of UDP (UDP tracker) responses from IPv4 peers. + pub udp4_responses: u64, + + /// Total number of UDP (UDP tracker) `error` requests from IPv4 peers. + pub udp4_errors_handled: u64, + + // UDPv6 + /// Total number of UDP (UDP tracker) requests from IPv6 peers. + pub udp6_requests: u64, + + /// Total number of UDP (UDP tracker) `connection` requests from IPv6 peers. + pub udp6_connections_handled: u64, + + /// Total number of UDP (UDP tracker) `announce` requests from IPv6 peers. + pub udp6_announces_handled: u64, + + /// Total number of UDP (UDP tracker) `scrape` requests from IPv6 peers. + pub udp6_scrapes_handled: u64, + + /// Total number of UDP (UDP tracker) responses from IPv6 peers. + pub udp6_responses: u64, + + /// Total number of UDP (UDP tracker) `error` requests from IPv6 peers. + pub udp6_errors_handled: u64, +} diff --git a/src/packages/udp_tracker_core/statistics/mod.rs b/src/packages/udp_tracker_core/statistics/mod.rs new file mode 100644 index 000000000..939a41061 --- /dev/null +++ b/src/packages/udp_tracker_core/statistics/mod.rs @@ -0,0 +1,6 @@ +pub mod event; +pub mod keeper; +pub mod metrics; +pub mod repository; +pub mod services; +pub mod setup; diff --git a/src/packages/udp_tracker_core/statistics/repository.rs b/src/packages/udp_tracker_core/statistics/repository.rs new file mode 100644 index 000000000..22e793036 --- /dev/null +++ b/src/packages/udp_tracker_core/statistics/repository.rs @@ -0,0 +1,173 @@ +use std::sync::Arc; +use std::time::Duration; + +use tokio::sync::{RwLock, RwLockReadGuard}; + +use super::metrics::Metrics; + +/// A repository for the tracker metrics. +#[derive(Clone)] +pub struct Repository { + pub stats: Arc>, +} + +impl Default for Repository { + fn default() -> Self { + Self::new() + } +} + +impl Repository { + #[must_use] + pub fn new() -> Self { + Self { + stats: Arc::new(RwLock::new(Metrics::default())), + } + } + + pub async fn get_stats(&self) -> RwLockReadGuard<'_, Metrics> { + self.stats.read().await + } + + pub async fn increase_udp_requests_aborted(&self) { + let mut stats_lock = self.stats.write().await; + stats_lock.udp_requests_aborted += 1; + drop(stats_lock); + } + + pub async fn increase_udp_requests_banned(&self) { + let mut stats_lock = self.stats.write().await; + stats_lock.udp_requests_banned += 1; + drop(stats_lock); + } + + pub async fn increase_udp4_requests(&self) { + let mut stats_lock = self.stats.write().await; + stats_lock.udp4_requests += 1; + drop(stats_lock); + } + + pub async fn increase_udp4_connections(&self) { + let mut stats_lock = self.stats.write().await; + stats_lock.udp4_connections_handled += 1; + drop(stats_lock); + } + + pub async fn increase_udp4_announces(&self) { + let mut stats_lock = self.stats.write().await; + stats_lock.udp4_announces_handled += 1; + drop(stats_lock); + } + + pub async fn increase_udp4_scrapes(&self) { + let mut stats_lock = self.stats.write().await; + stats_lock.udp4_scrapes_handled += 1; + drop(stats_lock); + } + + pub async fn increase_udp4_responses(&self) { + let mut stats_lock = self.stats.write().await; + stats_lock.udp4_responses += 1; + drop(stats_lock); + } + + pub async fn increase_udp4_errors(&self) { + let mut stats_lock = self.stats.write().await; + stats_lock.udp4_errors_handled += 1; + drop(stats_lock); + } + + #[allow(clippy::cast_precision_loss)] + #[allow(clippy::cast_possible_truncation)] + #[allow(clippy::cast_sign_loss)] + pub async fn recalculate_udp_avg_connect_processing_time_ns(&self, req_processing_time: Duration) { + let mut stats_lock = self.stats.write().await; + + let req_processing_time = req_processing_time.as_nanos() as f64; + let udp_connections_handled = (stats_lock.udp4_connections_handled + stats_lock.udp6_connections_handled) as f64; + + let previous_avg = stats_lock.udp_avg_connect_processing_time_ns; + + // Moving average: https://en.wikipedia.org/wiki/Moving_average + let new_avg = previous_avg as f64 + (req_processing_time - previous_avg as f64) / udp_connections_handled; + + stats_lock.udp_avg_connect_processing_time_ns = new_avg.ceil() as u64; + + drop(stats_lock); + } + + #[allow(clippy::cast_precision_loss)] + #[allow(clippy::cast_possible_truncation)] + #[allow(clippy::cast_sign_loss)] + pub async fn recalculate_udp_avg_announce_processing_time_ns(&self, req_processing_time: Duration) { + let mut stats_lock = self.stats.write().await; + + let req_processing_time = req_processing_time.as_nanos() as f64; + + let udp_announces_handled = (stats_lock.udp4_announces_handled + stats_lock.udp6_announces_handled) as f64; + + let previous_avg = stats_lock.udp_avg_announce_processing_time_ns; + + // Moving average: https://en.wikipedia.org/wiki/Moving_average + let new_avg = previous_avg as f64 + (req_processing_time - previous_avg as f64) / udp_announces_handled; + + stats_lock.udp_avg_announce_processing_time_ns = new_avg.ceil() as u64; + + drop(stats_lock); + } + + #[allow(clippy::cast_precision_loss)] + #[allow(clippy::cast_possible_truncation)] + #[allow(clippy::cast_sign_loss)] + pub async fn recalculate_udp_avg_scrape_processing_time_ns(&self, req_processing_time: Duration) { + let mut stats_lock = self.stats.write().await; + + let req_processing_time = req_processing_time.as_nanos() as f64; + let udp_scrapes_handled = (stats_lock.udp4_scrapes_handled + stats_lock.udp6_scrapes_handled) as f64; + + let previous_avg = stats_lock.udp_avg_scrape_processing_time_ns; + + // Moving average: https://en.wikipedia.org/wiki/Moving_average + let new_avg = previous_avg as f64 + (req_processing_time - previous_avg as f64) / udp_scrapes_handled; + + stats_lock.udp_avg_scrape_processing_time_ns = new_avg.ceil() as u64; + + drop(stats_lock); + } + + pub async fn increase_udp6_requests(&self) { + let mut stats_lock = self.stats.write().await; + stats_lock.udp6_requests += 1; + drop(stats_lock); + } + + pub async fn increase_udp6_connections(&self) { + let mut stats_lock = self.stats.write().await; + stats_lock.udp6_connections_handled += 1; + drop(stats_lock); + } + + pub async fn increase_udp6_announces(&self) { + let mut stats_lock = self.stats.write().await; + stats_lock.udp6_announces_handled += 1; + drop(stats_lock); + } + + pub async fn increase_udp6_scrapes(&self) { + let mut stats_lock = self.stats.write().await; + stats_lock.udp6_scrapes_handled += 1; + drop(stats_lock); + } + + pub async fn increase_udp6_responses(&self) { + let mut stats_lock = self.stats.write().await; + stats_lock.udp6_responses += 1; + drop(stats_lock); + } + + pub async fn increase_udp6_errors(&self) { + let mut stats_lock = self.stats.write().await; + stats_lock.udp6_errors_handled += 1; + drop(stats_lock); + } +} diff --git a/src/packages/udp_tracker_core/statistics/services.rs b/src/packages/udp_tracker_core/statistics/services.rs new file mode 100644 index 000000000..85ca08e54 --- /dev/null +++ b/src/packages/udp_tracker_core/statistics/services.rs @@ -0,0 +1,146 @@ +//! Statistics services. +//! +//! It includes: +//! +//! - A [`factory`](crate::packages::udp_tracker_core::statistics::setup::factory) function to build the structs needed to collect the tracker metrics. +//! - A [`get_metrics`] service to get the tracker [`metrics`](crate::packages::udp_tracker_core::statistics::metrics::Metrics). +//! +//! Tracker metrics are collected using a Publisher-Subscribe pattern. +//! +//! The factory function builds two structs: +//! +//! - An statistics event [`Sender`](crate::packages::udp_tracker_core::statistics::event::sender::Sender) +//! - An statistics [`Repository`] +//! +//! ```text +//! let (stats_event_sender, stats_repository) = factory(tracker_usage_statistics); +//! ``` +//! +//! The statistics repository is responsible for storing the metrics in memory. +//! The statistics event sender allows sending events related to metrics. +//! There is an event listener that is receiving all the events and processing them with an event handler. +//! Then, the event handler updates the metrics depending on the received event. +//! +//! For example, if you send the event [`Event::Udp4Connect`](crate::packages::udp_tracker_core::statistics::event::Event::Udp4Connect): +//! +//! ```text +//! let result = event_sender.send_event(Event::Udp4Connect).await; +//! ``` +//! +//! Eventually the counter for UDP connections from IPv4 peers will be increased. +//! +//! ```rust,no_run +//! pub struct Metrics { +//! // ... +//! pub udp4_connections_handled: u64, // This will be incremented +//! // ... +//! } +//! ``` +use std::sync::Arc; + +use bittorrent_tracker_core::torrent::repository::in_memory::InMemoryTorrentRepository; +use packages::udp_tracker_core::statistics::metrics::Metrics; +use packages::udp_tracker_core::statistics::repository::Repository; +use tokio::sync::RwLock; +use torrust_tracker_primitives::torrent_metrics::TorrentsMetrics; + +use crate::packages; +use crate::servers::udp::server::banning::BanService; + +/// All the metrics collected by the tracker. +#[derive(Debug, PartialEq)] +pub struct TrackerMetrics { + /// Domain level metrics. + /// + /// General metrics for all torrents (number of seeders, leechers, etcetera) + pub torrents_metrics: TorrentsMetrics, + + /// Application level metrics. Usage statistics/metrics. + /// + /// Metrics about how the tracker is been used (number of udp announce requests, etcetera) + pub protocol_metrics: Metrics, +} + +/// It returns all the [`TrackerMetrics`] +pub async fn get_metrics( + in_memory_torrent_repository: Arc, + ban_service: Arc>, + stats_repository: Arc, +) -> TrackerMetrics { + let torrents_metrics = in_memory_torrent_repository.get_torrents_metrics(); + let stats = stats_repository.get_stats().await; + let udp_banned_ips_total = ban_service.read().await.get_banned_ips_total(); + + TrackerMetrics { + torrents_metrics, + protocol_metrics: Metrics { + // UDP + udp_requests_aborted: stats.udp_requests_aborted, + udp_requests_banned: stats.udp_requests_banned, + udp_banned_ips_total: udp_banned_ips_total as u64, + udp_avg_connect_processing_time_ns: stats.udp_avg_connect_processing_time_ns, + udp_avg_announce_processing_time_ns: stats.udp_avg_announce_processing_time_ns, + udp_avg_scrape_processing_time_ns: stats.udp_avg_scrape_processing_time_ns, + // UDPv4 + udp4_requests: stats.udp4_requests, + udp4_connections_handled: stats.udp4_connections_handled, + udp4_announces_handled: stats.udp4_announces_handled, + udp4_scrapes_handled: stats.udp4_scrapes_handled, + udp4_responses: stats.udp4_responses, + udp4_errors_handled: stats.udp4_errors_handled, + // UDPv6 + udp6_requests: stats.udp6_requests, + udp6_connections_handled: stats.udp6_connections_handled, + udp6_announces_handled: stats.udp6_announces_handled, + udp6_scrapes_handled: stats.udp6_scrapes_handled, + udp6_responses: stats.udp6_responses, + udp6_errors_handled: stats.udp6_errors_handled, + }, + } +} + +#[cfg(test)] +mod tests { + use std::sync::Arc; + + use bittorrent_tracker_core::torrent::repository::in_memory::InMemoryTorrentRepository; + use bittorrent_tracker_core::{self}; + use tokio::sync::RwLock; + use torrust_tracker_configuration::Configuration; + use torrust_tracker_primitives::torrent_metrics::TorrentsMetrics; + use torrust_tracker_test_helpers::configuration; + + use crate::packages::udp_tracker_core::statistics; + use crate::packages::udp_tracker_core::statistics::services::{get_metrics, TrackerMetrics}; + use crate::servers::udp::server::banning::BanService; + use crate::servers::udp::server::launcher::MAX_CONNECTION_ID_ERRORS_PER_IP; + + pub fn tracker_configuration() -> Configuration { + configuration::ephemeral() + } + + #[tokio::test] + async fn the_statistics_service_should_return_the_tracker_metrics() { + let config = tracker_configuration(); + + let in_memory_torrent_repository = Arc::new(InMemoryTorrentRepository::default()); + let (_stats_event_sender, stats_repository) = statistics::setup::factory(config.core.tracker_usage_statistics); + let stats_repository = Arc::new(stats_repository); + let ban_service = Arc::new(RwLock::new(BanService::new(MAX_CONNECTION_ID_ERRORS_PER_IP))); + + let tracker_metrics = get_metrics( + in_memory_torrent_repository.clone(), + ban_service.clone(), + stats_repository.clone(), + ) + .await; + + assert_eq!( + tracker_metrics, + TrackerMetrics { + torrents_metrics: TorrentsMetrics::default(), + protocol_metrics: statistics::metrics::Metrics::default(), + } + ); + } +} diff --git a/src/packages/udp_tracker_core/statistics/setup.rs b/src/packages/udp_tracker_core/statistics/setup.rs new file mode 100644 index 000000000..c85c715a2 --- /dev/null +++ b/src/packages/udp_tracker_core/statistics/setup.rs @@ -0,0 +1,54 @@ +//! Setup for the tracker statistics. +//! +//! The [`factory`] function builds the structs needed for handling the tracker metrics. +use crate::packages::udp_tracker_core::statistics; + +/// It builds the structs needed for handling the tracker metrics. +/// +/// It returns: +/// +/// - An statistics event [`Sender`](crate::packages::udp_tracker_core::statistics::event::sender::Sender) that allows you to send events related to statistics. +/// - An statistics [`Repository`](crate::packages::udp_tracker_core::statistics::repository::Repository) which is an in-memory repository for the tracker metrics. +/// +/// When the input argument `tracker_usage_statistics`is false the setup does not run the event listeners, consequently the statistics +/// events are sent are received but not dispatched to the handler. +#[must_use] +pub fn factory( + tracker_usage_statistics: bool, +) -> ( + Option>, + statistics::repository::Repository, +) { + let mut stats_event_sender = None; + + let mut stats_tracker = statistics::keeper::Keeper::new(); + + if tracker_usage_statistics { + stats_event_sender = Some(stats_tracker.run_event_listener()); + } + + (stats_event_sender, stats_tracker.repository) +} + +#[cfg(test)] +mod test { + use super::factory; + + #[tokio::test] + async fn should_not_send_any_event_when_statistics_are_disabled() { + let tracker_usage_statistics = false; + + let (stats_event_sender, _stats_repository) = factory(tracker_usage_statistics); + + assert!(stats_event_sender.is_none()); + } + + #[tokio::test] + async fn should_send_events_when_statistics_are_enabled() { + let tracker_usage_statistics = true; + + let (stats_event_sender, _stats_repository) = factory(tracker_usage_statistics); + + assert!(stats_event_sender.is_some()); + } +} From 700c912dd8ae50ca17595d11d0051fe4d74979f7 Mon Sep 17 00:00:00 2001 From: Jose Celano Date: Fri, 31 Jan 2025 11:13:26 +0000 Subject: [PATCH 193/802] docs: update tracker core docs Statistics are not in the package anymore. --- packages/tracker-core/src/lib.rs | 54 -------------------------------- 1 file changed, 54 deletions(-) diff --git a/packages/tracker-core/src/lib.rs b/packages/tracker-core/src/lib.rs index ec4371322..68bc48552 100644 --- a/packages/tracker-core/src/lib.rs +++ b/packages/tracker-core/src/lib.rs @@ -370,60 +370,6 @@ //! To learn more about tracker authentication, refer to the following modules : //! //! - [`authentication`] module. -//! - [`core`](crate::core) module. -//! - [`http`](crate::servers::http) module. -//! -//! # Statistics -//! -//! The `Tracker` keeps metrics for some events: -//! -//! ```rust,no_run -//! pub struct Metrics { -//! // IP version 4 -//! -//! // HTTP tracker -//! pub tcp4_connections_handled: u64, -//! pub tcp4_announces_handled: u64, -//! pub tcp4_scrapes_handled: u64, -//! -//! // UDP tracker -//! pub udp4_connections_handled: u64, -//! pub udp4_announces_handled: u64, -//! pub udp4_scrapes_handled: u64, -//! -//! // IP version 6 -//! -//! // HTTP tracker -//! pub tcp6_connections_handled: u64, -//! pub tcp6_announces_handled: u64, -//! pub tcp6_scrapes_handled: u64, -//! -//! // UDP tracker -//! pub udp6_connections_handled: u64, -//! pub udp6_announces_handled: u64, -//! pub udp6_scrapes_handled: u64, -//! } -//! ``` -//! -//! The metrics maintained by the `Tracker` are: -//! -//! - `connections_handled`: number of connections handled by the tracker -//! - `announces_handled`: number of `announce` requests handled by the tracker -//! - `scrapes_handled`: number of `scrape` handled requests by the tracker -//! -//! > **NOTICE**: as the HTTP tracker does not have an specific `connection` request like the UDP tracker, `connections_handled` are -//! > increased on every `announce` and `scrape` requests. -//! -//! The tracker exposes an event sender API that allows the tracker users to send events. When a higher application service handles a -//! `connection` , `announce` or `scrape` requests, it notifies the `Tracker` by sending statistics events. -//! -//! For example, the HTTP tracker would send an event like the following when it handles an `announce` request received from a peer using IP version 4. -//! -//! ```text -//! stats_event_sender.send_stats_event(statistics::event::Event::Tcp4Announce).await -//! ``` -//! -//! Refer to [`statistics`] module for more information about statistics. //! //! # Persistence //! From 39cbeda28e0928a7e655152b191ee81647eca520 Mon Sep 17 00:00:00 2001 From: Jose Celano Date: Fri, 31 Jan 2025 11:30:19 +0000 Subject: [PATCH 194/802] refactor: [#1228] add new UDP and HTTP stats services to AppContainer --- src/bootstrap/app.rs | 19 +++++++++++++++++++ src/container.rs | 6 +++++- 2 files changed, 24 insertions(+), 1 deletion(-) diff --git a/src/bootstrap/app.rs b/src/bootstrap/app.rs index 7313b2808..550eb44f3 100644 --- a/src/bootstrap/app.rs +++ b/src/bootstrap/app.rs @@ -35,6 +35,7 @@ use tracing::instrument; use super::config::initialize_configuration; use crate::container::AppContainer; +use crate::packages::{http_tracker_core, udp_tracker_core}; use crate::servers::udp::server::banning::BanService; use crate::servers::udp::server::launcher::MAX_CONNECTION_ID_ERRORS_PER_IP; use crate::shared::crypto::ephemeral_instance_keys; @@ -90,9 +91,23 @@ pub fn initialize_global_services(configuration: &Configuration) { #[instrument(skip())] pub fn initialize_app_container(configuration: &Configuration) -> AppContainer { let core_config = Arc::new(configuration.core.clone()); + let (stats_event_sender, stats_repository) = statistics::setup::factory(configuration.core.tracker_usage_statistics); let stats_event_sender = Arc::new(stats_event_sender); let stats_repository = Arc::new(stats_repository); + + // HTTP stats + let (http_stats_event_sender, http_stats_repository) = + http_tracker_core::statistics::setup::factory(configuration.core.tracker_usage_statistics); + let http_stats_event_sender = Arc::new(http_stats_event_sender); + let http_stats_repository = Arc::new(http_stats_repository); + + // UDP stats + let (udp_stats_event_sender, udp_stats_repository) = + udp_tracker_core::statistics::setup::factory(configuration.core.tracker_usage_statistics); + let udp_stats_event_sender = Arc::new(udp_stats_event_sender); + let udp_stats_repository = Arc::new(udp_stats_repository); + let ban_service = Arc::new(RwLock::new(BanService::new(MAX_CONNECTION_ID_ERRORS_PER_IP))); let database = initialize_database(configuration); let in_memory_whitelist = Arc::new(InMemoryWhitelist::default()); @@ -134,7 +149,11 @@ pub fn initialize_app_container(configuration: &Configuration) -> AppContainer { whitelist_authorization, ban_service, stats_event_sender, + http_stats_event_sender, + udp_stats_event_sender, stats_repository, + http_stats_repository, + udp_stats_repository, whitelist_manager, in_memory_torrent_repository, db_torrent_repository, diff --git a/src/container.rs b/src/container.rs index 965dbfa2a..f1996decb 100644 --- a/src/container.rs +++ b/src/container.rs @@ -15,7 +15,7 @@ use packages::statistics::repository::Repository; use tokio::sync::RwLock; use torrust_tracker_configuration::{Core, HttpApi, HttpTracker, UdpTracker}; -use crate::packages; +use crate::packages::{self, http_tracker_core, udp_tracker_core}; use crate::servers::udp::server::banning::BanService; pub struct AppContainer { @@ -28,7 +28,11 @@ pub struct AppContainer { pub whitelist_authorization: Arc, pub ban_service: Arc>, pub stats_event_sender: Arc>>, + pub http_stats_event_sender: Arc>>, + pub udp_stats_event_sender: Arc>>, pub stats_repository: Arc, + pub http_stats_repository: Arc, + pub udp_stats_repository: Arc, pub whitelist_manager: Arc, pub in_memory_torrent_repository: Arc, pub db_torrent_repository: Arc, From 5f08b2ed509d3787c926bf55b5f58c85227af543 Mon Sep 17 00:00:00 2001 From: Jose Celano Date: Fri, 31 Jan 2025 13:41:43 +0000 Subject: [PATCH 195/802] refactor: [#1228] start using the http tracker stats Stats have been splited into HTTP and UDP stats. Parallel change, step 1: 1. [x] Start using HTTP Tracker Core Stats 2. [ ] Start using UDP Tracker Core Stats 3. [ ] Get metrics from HTTP and UDP Tracker Core Stats 4. [ ] Remove deprecate unified HTTP and UDP stats. --- src/container.rs | 2 + src/servers/http/v1/handlers/announce.rs | 68 +++++++++---- src/servers/http/v1/handlers/scrape.rs | 80 +++++++++++---- src/servers/http/v1/routes.rs | 4 + src/servers/http/v1/services/announce.rs | 97 +++++++++++++++--- src/servers/http/v1/services/scrape.rs | 123 ++++++++++++++++++++--- tests/servers/http/environment.rs | 1 + 7 files changed, 306 insertions(+), 69 deletions(-) diff --git a/src/container.rs b/src/container.rs index f1996decb..71c60a517 100644 --- a/src/container.rs +++ b/src/container.rs @@ -71,6 +71,7 @@ pub struct HttpTrackerContainer { pub scrape_handler: Arc, pub whitelist_authorization: Arc, pub stats_event_sender: Arc>>, + pub http_stats_event_sender: Arc>>, pub authentication_service: Arc, } @@ -84,6 +85,7 @@ impl HttpTrackerContainer { scrape_handler: app_container.scrape_handler.clone(), whitelist_authorization: app_container.whitelist_authorization.clone(), stats_event_sender: app_container.stats_event_sender.clone(), + http_stats_event_sender: app_container.http_stats_event_sender.clone(), authentication_service: app_container.authentication_service.clone(), } } diff --git a/src/servers/http/v1/handlers/announce.rs b/src/servers/http/v1/handlers/announce.rs index d3225ee29..594a11ea1 100644 --- a/src/servers/http/v1/handlers/announce.rs +++ b/src/servers/http/v1/handlers/announce.rs @@ -28,6 +28,7 @@ use torrust_tracker_primitives::core::AnnounceData; use torrust_tracker_primitives::peer; use super::common::auth::map_auth_error_to_error_response; +use crate::packages::http_tracker_core; use crate::servers::http::v1::extractors::announce_request::ExtractRequest; use crate::servers::http::v1::extractors::authentication_key::Extract as ExtractKey; use crate::servers::http::v1::extractors::client_ip_sources::Extract as ExtractClientIpSources; @@ -46,6 +47,7 @@ pub async fn handle_without_key( Arc, Arc, Arc>>, + Arc>>, )>, ExtractRequest(announce_request): ExtractRequest, ExtractClientIpSources(client_ip_sources): ExtractClientIpSources, @@ -58,6 +60,7 @@ pub async fn handle_without_key( &state.2, &state.3, &state.4, + &state.5, &announce_request, &client_ip_sources, None, @@ -76,6 +79,7 @@ pub async fn handle_with_key( Arc, Arc, Arc>>, + Arc>>, )>, ExtractRequest(announce_request): ExtractRequest, ExtractClientIpSources(client_ip_sources): ExtractClientIpSources, @@ -89,6 +93,7 @@ pub async fn handle_with_key( &state.2, &state.3, &state.4, + &state.5, &announce_request, &client_ip_sources, Some(key), @@ -107,6 +112,7 @@ async fn handle( authentication_service: &Arc, whitelist_authorization: &Arc, opt_stats_event_sender: &Arc>>, + opt_http_stats_event_sender: &Arc>>, announce_request: &Announce, client_ip_sources: &ClientIpSources, maybe_key: Option, @@ -117,6 +123,7 @@ async fn handle( authentication_service, whitelist_authorization, opt_stats_event_sender, + opt_http_stats_event_sender, announce_request, client_ip_sources, maybe_key, @@ -142,6 +149,7 @@ async fn handle_announce( authentication_service: &Arc, whitelist_authorization: &Arc, opt_stats_event_sender: &Arc>>, + opt_http_stats_event_sender: &Arc>>, announce_request: &Announce, client_ip_sources: &ClientIpSources, maybe_key: Option, @@ -181,6 +189,7 @@ async fn handle_announce( let announce_data = services::announce::invoke( announce_handler.clone(), opt_stats_event_sender.clone(), + opt_http_stats_event_sender.clone(), announce_request.info_hash, &mut peer, &peers_wanted, @@ -265,7 +274,7 @@ mod tests { use torrust_tracker_configuration::{Configuration, Core}; use torrust_tracker_test_helpers::configuration; - use crate::packages; + use crate::packages::{self, http_tracker_core}; struct CoreTrackerServices { pub core_config: Arc, @@ -275,23 +284,27 @@ mod tests { pub authentication_service: Arc, } - fn initialize_private_tracker() -> CoreTrackerServices { + struct CoreHttpTrackerServices { + pub http_stats_event_sender: Arc>>, + } + + fn initialize_private_tracker() -> (CoreTrackerServices, CoreHttpTrackerServices) { initialize_core_tracker_services(&configuration::ephemeral_private()) } - fn initialize_listed_tracker() -> CoreTrackerServices { + fn initialize_listed_tracker() -> (CoreTrackerServices, CoreHttpTrackerServices) { initialize_core_tracker_services(&configuration::ephemeral_listed()) } - fn initialize_tracker_on_reverse_proxy() -> CoreTrackerServices { + fn initialize_tracker_on_reverse_proxy() -> (CoreTrackerServices, CoreHttpTrackerServices) { initialize_core_tracker_services(&configuration::ephemeral_with_reverse_proxy()) } - fn initialize_tracker_not_on_reverse_proxy() -> CoreTrackerServices { + fn initialize_tracker_not_on_reverse_proxy() -> (CoreTrackerServices, CoreHttpTrackerServices) { initialize_core_tracker_services(&configuration::ephemeral_without_reverse_proxy()) } - fn initialize_core_tracker_services(config: &Configuration) -> CoreTrackerServices { + fn initialize_core_tracker_services(config: &Configuration) -> (CoreTrackerServices, CoreHttpTrackerServices) { let core_config = Arc::new(config.core.clone()); let database = initialize_database(config); let in_memory_whitelist = Arc::new(InMemoryWhitelist::default()); @@ -300,21 +313,31 @@ mod tests { let authentication_service = Arc::new(AuthenticationService::new(&config.core, &in_memory_key_repository)); let in_memory_torrent_repository = Arc::new(InMemoryTorrentRepository::default()); let db_torrent_repository = Arc::new(DatabasePersistentTorrentRepository::new(&database)); - let (stats_event_sender, _stats_repository) = statistics::setup::factory(config.core.tracker_usage_statistics); - let stats_event_sender = Arc::new(stats_event_sender); let announce_handler = Arc::new(AnnounceHandler::new( &config.core, &in_memory_torrent_repository, &db_torrent_repository, )); - CoreTrackerServices { - core_config, - announce_handler, - stats_event_sender, - whitelist_authorization, - authentication_service, - } + let (stats_event_sender, _stats_repository) = statistics::setup::factory(config.core.tracker_usage_statistics); + let stats_event_sender = Arc::new(stats_event_sender); + + // HTTP stats + let (http_stats_event_sender, http_stats_repository) = + http_tracker_core::statistics::setup::factory(config.core.tracker_usage_statistics); + let http_stats_event_sender = Arc::new(http_stats_event_sender); + let _http_stats_repository = Arc::new(http_stats_repository); + + ( + CoreTrackerServices { + core_config, + announce_handler, + stats_event_sender, + whitelist_authorization, + authentication_service, + }, + CoreHttpTrackerServices { http_stats_event_sender }, + ) } fn sample_announce_request() -> Announce { @@ -357,7 +380,7 @@ mod tests { #[tokio::test] async fn it_should_fail_when_the_authentication_key_is_missing() { - let core_tracker_services = initialize_private_tracker(); + let (core_tracker_services, http_core_tracker_services) = initialize_private_tracker(); let maybe_key = None; @@ -367,6 +390,7 @@ mod tests { &core_tracker_services.authentication_service, &core_tracker_services.whitelist_authorization, &core_tracker_services.stats_event_sender, + &http_core_tracker_services.http_stats_event_sender, &sample_announce_request(), &sample_client_ip_sources(), maybe_key, @@ -382,7 +406,7 @@ mod tests { #[tokio::test] async fn it_should_fail_when_the_authentication_key_is_invalid() { - let core_tracker_services = initialize_private_tracker(); + let (core_tracker_services, http_core_tracker_services) = initialize_private_tracker(); let unregistered_key = authentication::Key::from_str("YZSl4lMZupRuOpSRC3krIKR5BPB14nrJ").unwrap(); @@ -394,6 +418,7 @@ mod tests { &core_tracker_services.authentication_service, &core_tracker_services.whitelist_authorization, &core_tracker_services.stats_event_sender, + &http_core_tracker_services.http_stats_event_sender, &sample_announce_request(), &sample_client_ip_sources(), maybe_key, @@ -413,7 +438,7 @@ mod tests { #[tokio::test] async fn it_should_fail_when_the_announced_torrent_is_not_whitelisted() { - let core_tracker_services = initialize_listed_tracker(); + let (core_tracker_services, http_core_tracker_services) = initialize_listed_tracker(); let announce_request = sample_announce_request(); @@ -423,6 +448,7 @@ mod tests { &core_tracker_services.authentication_service, &core_tracker_services.whitelist_authorization, &core_tracker_services.stats_event_sender, + &http_core_tracker_services.http_stats_event_sender, &announce_request, &sample_client_ip_sources(), None, @@ -450,7 +476,7 @@ mod tests { #[tokio::test] async fn it_should_fail_when_the_right_most_x_forwarded_for_header_ip_is_not_available() { - let core_tracker_services = initialize_tracker_on_reverse_proxy(); + let (core_tracker_services, http_core_tracker_services) = initialize_tracker_on_reverse_proxy(); let client_ip_sources = ClientIpSources { right_most_x_forwarded_for: None, @@ -463,6 +489,7 @@ mod tests { &core_tracker_services.authentication_service, &core_tracker_services.whitelist_authorization, &core_tracker_services.stats_event_sender, + &http_core_tracker_services.http_stats_event_sender, &sample_announce_request(), &client_ip_sources, None, @@ -487,7 +514,7 @@ mod tests { #[tokio::test] async fn it_should_fail_when_the_client_ip_from_the_connection_info_is_not_available() { - let core_tracker_services = initialize_tracker_not_on_reverse_proxy(); + let (core_tracker_services, http_core_tracker_services) = initialize_tracker_not_on_reverse_proxy(); let client_ip_sources = ClientIpSources { right_most_x_forwarded_for: None, @@ -500,6 +527,7 @@ mod tests { &core_tracker_services.authentication_service, &core_tracker_services.whitelist_authorization, &core_tracker_services.stats_event_sender, + &http_core_tracker_services.http_stats_event_sender, &sample_announce_request(), &client_ip_sources, None, diff --git a/src/servers/http/v1/handlers/scrape.rs b/src/servers/http/v1/handlers/scrape.rs index 141cf4c45..d41a3742f 100644 --- a/src/servers/http/v1/handlers/scrape.rs +++ b/src/servers/http/v1/handlers/scrape.rs @@ -20,7 +20,7 @@ use packages::statistics::event::sender::Sender; use torrust_tracker_configuration::Core; use torrust_tracker_primitives::core::ScrapeData; -use crate::packages; +use crate::packages::{self, http_tracker_core}; use crate::servers::http::v1::extractors::authentication_key::Extract as ExtractKey; use crate::servers::http::v1::extractors::client_ip_sources::Extract as ExtractClientIpSources; use crate::servers::http::v1::extractors::scrape_request::ExtractRequest; @@ -36,6 +36,7 @@ pub async fn handle_without_key( Arc, Arc, Arc>>, + Arc>>, )>, ExtractRequest(scrape_request): ExtractRequest, ExtractClientIpSources(client_ip_sources): ExtractClientIpSources, @@ -47,6 +48,7 @@ pub async fn handle_without_key( &state.1, &state.2, &state.3, + &state.4, &scrape_request, &client_ip_sources, None, @@ -66,6 +68,7 @@ pub async fn handle_with_key( Arc, Arc, Arc>>, + Arc>>, )>, ExtractRequest(scrape_request): ExtractRequest, ExtractClientIpSources(client_ip_sources): ExtractClientIpSources, @@ -78,6 +81,7 @@ pub async fn handle_with_key( &state.1, &state.2, &state.3, + &state.4, &scrape_request, &client_ip_sources, Some(key), @@ -91,6 +95,7 @@ async fn handle( scrape_handler: &Arc, authentication_service: &Arc, stats_event_sender: &Arc>>, + http_stats_event_sender: &Arc>>, scrape_request: &Scrape, client_ip_sources: &ClientIpSources, maybe_key: Option, @@ -100,6 +105,7 @@ async fn handle( scrape_handler, authentication_service, stats_event_sender, + http_stats_event_sender, scrape_request, client_ip_sources, maybe_key, @@ -124,6 +130,7 @@ async fn handle_scrape( scrape_handler: &Arc, authentication_service: &Arc, opt_stats_event_sender: &Arc>>, + opt_http_stats_event_sender: &Arc>>, scrape_request: &Scrape, client_ip_sources: &ClientIpSources, maybe_key: Option, @@ -150,9 +157,22 @@ async fn handle_scrape( }; if return_real_scrape_data { - Ok(services::scrape::invoke(scrape_handler, opt_stats_event_sender, &scrape_request.info_hashes, &peer_ip).await) + Ok(services::scrape::invoke( + scrape_handler, + opt_stats_event_sender, + opt_http_stats_event_sender, + &scrape_request.info_hashes, + &peer_ip, + ) + .await) } else { - Ok(services::scrape::fake(opt_stats_event_sender, &scrape_request.info_hashes, &peer_ip).await) + Ok(services::scrape::fake( + opt_stats_event_sender, + opt_http_stats_event_sender, + &scrape_request.info_hashes, + &peer_ip, + ) + .await) } } @@ -182,7 +202,7 @@ mod tests { use torrust_tracker_configuration::{Configuration, Core}; use torrust_tracker_test_helpers::configuration; - use crate::packages; + use crate::packages::{self, http_tracker_core}; struct CoreTrackerServices { pub core_config: Arc, @@ -191,39 +211,52 @@ mod tests { pub authentication_service: Arc, } - fn initialize_private_tracker() -> CoreTrackerServices { + struct CoreHttpTrackerServices { + pub http_stats_event_sender: Arc>>, + } + + fn initialize_private_tracker() -> (CoreTrackerServices, CoreHttpTrackerServices) { initialize_core_tracker_services(&configuration::ephemeral_private()) } - fn initialize_listed_tracker() -> CoreTrackerServices { + fn initialize_listed_tracker() -> (CoreTrackerServices, CoreHttpTrackerServices) { initialize_core_tracker_services(&configuration::ephemeral_listed()) } - fn initialize_tracker_on_reverse_proxy() -> CoreTrackerServices { + fn initialize_tracker_on_reverse_proxy() -> (CoreTrackerServices, CoreHttpTrackerServices) { initialize_core_tracker_services(&configuration::ephemeral_with_reverse_proxy()) } - fn initialize_tracker_not_on_reverse_proxy() -> CoreTrackerServices { + fn initialize_tracker_not_on_reverse_proxy() -> (CoreTrackerServices, CoreHttpTrackerServices) { initialize_core_tracker_services(&configuration::ephemeral_without_reverse_proxy()) } - fn initialize_core_tracker_services(config: &Configuration) -> CoreTrackerServices { + fn initialize_core_tracker_services(config: &Configuration) -> (CoreTrackerServices, CoreHttpTrackerServices) { let core_config = Arc::new(config.core.clone()); let in_memory_whitelist = Arc::new(InMemoryWhitelist::default()); let whitelist_authorization = Arc::new(WhitelistAuthorization::new(&config.core, &in_memory_whitelist.clone())); let in_memory_key_repository = Arc::new(InMemoryKeyRepository::default()); let authentication_service = Arc::new(AuthenticationService::new(&config.core, &in_memory_key_repository)); let in_memory_torrent_repository = Arc::new(InMemoryTorrentRepository::default()); + let scrape_handler = Arc::new(ScrapeHandler::new(&whitelist_authorization, &in_memory_torrent_repository)); + let (stats_event_sender, _stats_repository) = statistics::setup::factory(config.core.tracker_usage_statistics); let stats_event_sender = Arc::new(stats_event_sender); - let scrape_handler = Arc::new(ScrapeHandler::new(&whitelist_authorization, &in_memory_torrent_repository)); - CoreTrackerServices { - core_config, - scrape_handler, - stats_event_sender, - authentication_service, - } + // HTTP stats + let (http_stats_event_sender, _http_stats_repository) = + http_tracker_core::statistics::setup::factory(config.core.tracker_usage_statistics); + let http_stats_event_sender = Arc::new(http_stats_event_sender); + + ( + CoreTrackerServices { + core_config, + scrape_handler, + stats_event_sender, + authentication_service, + }, + CoreHttpTrackerServices { http_stats_event_sender }, + ) } fn sample_scrape_request() -> Scrape { @@ -257,7 +290,7 @@ mod tests { #[tokio::test] async fn it_should_return_zeroed_swarm_metadata_when_the_authentication_key_is_missing() { - let core_tracker_services = initialize_private_tracker(); + let (core_tracker_services, core_http_tracker_services) = initialize_private_tracker(); let scrape_request = sample_scrape_request(); let maybe_key = None; @@ -267,6 +300,7 @@ mod tests { &core_tracker_services.scrape_handler, &core_tracker_services.authentication_service, &core_tracker_services.stats_event_sender, + &core_http_tracker_services.http_stats_event_sender, &scrape_request, &sample_client_ip_sources(), maybe_key, @@ -281,7 +315,7 @@ mod tests { #[tokio::test] async fn it_should_return_zeroed_swarm_metadata_when_the_authentication_key_is_invalid() { - let core_tracker_services = initialize_private_tracker(); + let (core_tracker_services, core_http_tracker_services) = initialize_private_tracker(); let scrape_request = sample_scrape_request(); let unregistered_key = authentication::Key::from_str("YZSl4lMZupRuOpSRC3krIKR5BPB14nrJ").unwrap(); @@ -292,6 +326,7 @@ mod tests { &core_tracker_services.scrape_handler, &core_tracker_services.authentication_service, &core_tracker_services.stats_event_sender, + &core_http_tracker_services.http_stats_event_sender, &scrape_request, &sample_client_ip_sources(), maybe_key, @@ -314,7 +349,7 @@ mod tests { #[tokio::test] async fn it_should_return_zeroed_swarm_metadata_when_the_torrent_is_not_whitelisted() { - let core_tracker_services = initialize_listed_tracker(); + let (core_tracker_services, core_http_tracker_services) = initialize_listed_tracker(); let scrape_request = sample_scrape_request(); @@ -323,6 +358,7 @@ mod tests { &core_tracker_services.scrape_handler, &core_tracker_services.authentication_service, &core_tracker_services.stats_event_sender, + &core_http_tracker_services.http_stats_event_sender, &scrape_request, &sample_client_ip_sources(), None, @@ -346,7 +382,7 @@ mod tests { #[tokio::test] async fn it_should_fail_when_the_right_most_x_forwarded_for_header_ip_is_not_available() { - let core_tracker_services = initialize_tracker_on_reverse_proxy(); + let (core_tracker_services, core_http_tracker_services) = initialize_tracker_on_reverse_proxy(); let client_ip_sources = ClientIpSources { right_most_x_forwarded_for: None, @@ -358,6 +394,7 @@ mod tests { &core_tracker_services.scrape_handler, &core_tracker_services.authentication_service, &core_tracker_services.stats_event_sender, + &core_http_tracker_services.http_stats_event_sender, &sample_scrape_request(), &client_ip_sources, None, @@ -382,7 +419,7 @@ mod tests { #[tokio::test] async fn it_should_fail_when_the_client_ip_from_the_connection_info_is_not_available() { - let core_tracker_services = initialize_tracker_not_on_reverse_proxy(); + let (core_tracker_services, core_http_tracker_services) = initialize_tracker_not_on_reverse_proxy(); let client_ip_sources = ClientIpSources { right_most_x_forwarded_for: None, @@ -394,6 +431,7 @@ mod tests { &core_tracker_services.scrape_handler, &core_tracker_services.authentication_service, &core_tracker_services.stats_event_sender, + &core_http_tracker_services.http_stats_event_sender, &sample_scrape_request(), &client_ip_sources, None, diff --git a/src/servers/http/v1/routes.rs b/src/servers/http/v1/routes.rs index ed9aa05e6..7caccb673 100644 --- a/src/servers/http/v1/routes.rs +++ b/src/servers/http/v1/routes.rs @@ -44,6 +44,7 @@ pub fn router(http_tracker_container: Arc, server_socket_a http_tracker_container.authentication_service.clone(), http_tracker_container.whitelist_authorization.clone(), http_tracker_container.stats_event_sender.clone(), + http_tracker_container.http_stats_event_sender.clone(), )), ) .route( @@ -54,6 +55,7 @@ pub fn router(http_tracker_container: Arc, server_socket_a http_tracker_container.authentication_service.clone(), http_tracker_container.whitelist_authorization.clone(), http_tracker_container.stats_event_sender.clone(), + http_tracker_container.http_stats_event_sender.clone(), )), ) // Scrape request @@ -64,6 +66,7 @@ pub fn router(http_tracker_container: Arc, server_socket_a http_tracker_container.scrape_handler.clone(), http_tracker_container.authentication_service.clone(), http_tracker_container.stats_event_sender.clone(), + http_tracker_container.http_stats_event_sender.clone(), )), ) .route( @@ -73,6 +76,7 @@ pub fn router(http_tracker_container: Arc, server_socket_a http_tracker_container.scrape_handler.clone(), http_tracker_container.authentication_service.clone(), http_tracker_container.stats_event_sender.clone(), + http_tracker_container.http_stats_event_sender.clone(), )), ) // Add extension to get the client IP from the connection info diff --git a/src/servers/http/v1/services/announce.rs b/src/servers/http/v1/services/announce.rs index 61bbd93c6..e7170c7e1 100644 --- a/src/servers/http/v1/services/announce.rs +++ b/src/servers/http/v1/services/announce.rs @@ -17,7 +17,7 @@ use packages::statistics::event::sender::Sender; use torrust_tracker_primitives::core::AnnounceData; use torrust_tracker_primitives::peer; -use crate::packages; +use crate::packages::{self, http_tracker_core}; /// The HTTP tracker `announce` service. /// @@ -32,6 +32,7 @@ use crate::packages; pub async fn invoke( announce_handler: Arc, opt_stats_event_sender: Arc>>, + opt_http_stats_event_sender: Arc>>, info_hash: InfoHash, peer: &mut peer::Peer, peers_wanted: &PeersWanted, @@ -52,6 +53,21 @@ pub async fn invoke( } } + if let Some(http_stats_event_sender) = opt_http_stats_event_sender.as_deref() { + match original_peer_ip { + IpAddr::V4(_) => { + http_stats_event_sender + .send_event(http_tracker_core::statistics::event::Event::Tcp4Announce) + .await; + } + IpAddr::V6(_) => { + http_stats_event_sender + .send_event(http_tracker_core::statistics::event::Event::Tcp6Announce) + .await; + } + } + } + announce_data } @@ -77,26 +93,41 @@ mod tests { pub stats_event_sender: Arc>>, } - fn initialize_core_tracker_services() -> CoreTrackerServices { + struct CoreHttpTrackerServices { + pub http_stats_event_sender: Arc>>, + } + + fn initialize_core_tracker_services() -> (CoreTrackerServices, CoreHttpTrackerServices) { let config = configuration::ephemeral_public(); let core_config = Arc::new(config.core.clone()); let database = initialize_database(&config); let in_memory_torrent_repository = Arc::new(InMemoryTorrentRepository::default()); let db_torrent_repository = Arc::new(DatabasePersistentTorrentRepository::new(&database)); - let (stats_event_sender, _stats_repository) = statistics::setup::factory(config.core.tracker_usage_statistics); - let stats_event_sender = Arc::new(stats_event_sender); + let announce_handler = Arc::new(AnnounceHandler::new( &config.core, &in_memory_torrent_repository, &db_torrent_repository, )); - CoreTrackerServices { - core_config, - announce_handler, - stats_event_sender, - } + let (stats_event_sender, _stats_repository) = statistics::setup::factory(config.core.tracker_usage_statistics); + let stats_event_sender = Arc::new(stats_event_sender); + + // HTTP stats + let (http_stats_event_sender, http_stats_repository) = + http_tracker_core::statistics::setup::factory(config.core.tracker_usage_statistics); + let http_stats_event_sender = Arc::new(http_stats_event_sender); + let _http_stats_repository = Arc::new(http_stats_repository); + + ( + CoreTrackerServices { + core_config, + announce_handler, + stats_event_sender, + }, + CoreHttpTrackerServices { http_stats_event_sender }, + ) } fn sample_peer_using_ipv4() -> peer::Peer { @@ -129,7 +160,7 @@ mod tests { use packages::statistics::event::Event; use tokio::sync::mpsc::error::SendError; - use crate::packages; + use crate::packages::{self, http_tracker_core}; mock! { StatsEventSender {} @@ -138,6 +169,13 @@ mod tests { } } + mock! { + HttpStatsEventSender {} + impl http_tracker_core::statistics::event::sender::Sender for HttpStatsEventSender { + fn send_event(&self, event: http_tracker_core::statistics::event::Event) -> BoxFuture<'static,Option > > > ; + } + } + mod with_tracker_in_any_mode { use std::future; use std::net::{IpAddr, Ipv4Addr, Ipv6Addr, SocketAddr}; @@ -156,10 +194,10 @@ mod tests { use torrust_tracker_test_helpers::configuration; use super::{sample_peer_using_ipv4, sample_peer_using_ipv6}; - use crate::packages; + use crate::packages::{self, http_tracker_core}; use crate::servers::http::v1::services::announce::invoke; use crate::servers::http::v1::services::announce::tests::{ - initialize_core_tracker_services, sample_peer, MockStatsEventSender, + initialize_core_tracker_services, sample_peer, MockHttpStatsEventSender, MockStatsEventSender, }; fn initialize_announce_handler() -> Arc { @@ -178,13 +216,14 @@ mod tests { #[tokio::test] async fn it_should_return_the_announce_data() { - let core_tracker_services = initialize_core_tracker_services(); + let (core_tracker_services, core_http_tracker_services) = initialize_core_tracker_services(); let mut peer = sample_peer(); let announce_data = invoke( core_tracker_services.announce_handler.clone(), core_tracker_services.stats_event_sender.clone(), + core_http_tracker_services.http_stats_event_sender.clone(), sample_info_hash(), &mut peer, &PeersWanted::All, @@ -215,6 +254,15 @@ mod tests { let stats_event_sender: Arc>> = Arc::new(Some(Box::new(stats_event_sender_mock))); + let mut http_stats_event_sender_mock = MockHttpStatsEventSender::new(); + http_stats_event_sender_mock + .expect_send_event() + .with(eq(http_tracker_core::statistics::event::Event::Tcp4Announce)) + .times(1) + .returning(|_| Box::pin(future::ready(Some(Ok(()))))); + let http_stats_event_sender: Arc>> = + Arc::new(Some(Box::new(http_stats_event_sender_mock))); + let announce_handler = initialize_announce_handler(); let mut peer = sample_peer_using_ipv4(); @@ -222,6 +270,7 @@ mod tests { let _announce_data = invoke( announce_handler, stats_event_sender, + http_stats_event_sender, sample_info_hash(), &mut peer, &PeersWanted::All, @@ -260,6 +309,16 @@ mod tests { let stats_event_sender: Arc>> = Arc::new(Some(Box::new(stats_event_sender_mock))); + // Assert that the event sent is a TCP4 event + let mut http_stats_event_sender_mock = MockHttpStatsEventSender::new(); + http_stats_event_sender_mock + .expect_send_event() + .with(eq(http_tracker_core::statistics::event::Event::Tcp4Announce)) + .times(1) + .returning(|_| Box::pin(future::ready(Some(Ok(()))))); + let http_stats_event_sender: Arc>> = + Arc::new(Some(Box::new(http_stats_event_sender_mock))); + let mut peer = peer_with_the_ipv4_loopback_ip(); let announce_handler = tracker_with_an_ipv6_external_ip(); @@ -267,6 +326,7 @@ mod tests { let _announce_data = invoke( announce_handler, stats_event_sender, + http_stats_event_sender, sample_info_hash(), &mut peer, &PeersWanted::All, @@ -286,6 +346,16 @@ mod tests { let stats_event_sender: Arc>> = Arc::new(Some(Box::new(stats_event_sender_mock))); + // Assert that the event sent is a TCP4 event + let mut http_stats_event_sender_mock = MockHttpStatsEventSender::new(); + http_stats_event_sender_mock + .expect_send_event() + .with(eq(http_tracker_core::statistics::event::Event::Tcp6Announce)) + .times(1) + .returning(|_| Box::pin(future::ready(Some(Ok(()))))); + let http_stats_event_sender: Arc>> = + Arc::new(Some(Box::new(http_stats_event_sender_mock))); + let announce_handler = initialize_announce_handler(); let mut peer = sample_peer_using_ipv6(); @@ -293,6 +363,7 @@ mod tests { let _announce_data = invoke( announce_handler, stats_event_sender, + http_stats_event_sender, sample_info_hash(), &mut peer, &PeersWanted::All, diff --git a/src/servers/http/v1/services/scrape.rs b/src/servers/http/v1/services/scrape.rs index 1ac42ff10..e745609aa 100644 --- a/src/servers/http/v1/services/scrape.rs +++ b/src/servers/http/v1/services/scrape.rs @@ -16,7 +16,7 @@ use packages::statistics::event::sender::Sender; use packages::statistics::{self}; use torrust_tracker_primitives::core::ScrapeData; -use crate::packages; +use crate::packages::{self, http_tracker_core}; /// The HTTP tracker `scrape` service. /// @@ -31,12 +31,13 @@ use crate::packages; pub async fn invoke( scrape_handler: &Arc, opt_stats_event_sender: &Arc>>, + opt_http_stats_event_sender: &Arc>>, info_hashes: &Vec, original_peer_ip: &IpAddr, ) -> ScrapeData { let scrape_data = scrape_handler.scrape(info_hashes).await; - send_scrape_event(original_peer_ip, opt_stats_event_sender).await; + send_scrape_event(original_peer_ip, opt_stats_event_sender, opt_http_stats_event_sender).await; scrape_data } @@ -49,15 +50,20 @@ pub async fn invoke( /// > **NOTICE**: tracker statistics are not updated in this case. pub async fn fake( opt_stats_event_sender: &Arc>>, + opt_http_stats_event_sender: &Arc>>, info_hashes: &Vec, original_peer_ip: &IpAddr, ) -> ScrapeData { - send_scrape_event(original_peer_ip, opt_stats_event_sender).await; + send_scrape_event(original_peer_ip, opt_stats_event_sender, opt_http_stats_event_sender).await; ScrapeData::zeroed(info_hashes) } -async fn send_scrape_event(original_peer_ip: &IpAddr, opt_stats_event_sender: &Arc>>) { +async fn send_scrape_event( + original_peer_ip: &IpAddr, + opt_stats_event_sender: &Arc>>, + opt_http_stats_event_sender: &Arc>>, +) { if let Some(stats_event_sender) = opt_stats_event_sender.as_deref() { match original_peer_ip { IpAddr::V4(_) => { @@ -68,6 +74,21 @@ async fn send_scrape_event(original_peer_ip: &IpAddr, opt_stats_event_sender: &A } } } + + if let Some(http_stats_event_sender) = opt_http_stats_event_sender.as_deref() { + match original_peer_ip { + IpAddr::V4(_) => { + http_stats_event_sender + .send_event(http_tracker_core::statistics::event::Event::Tcp4Scrape) + .await; + } + IpAddr::V6(_) => { + http_stats_event_sender + .send_event(http_tracker_core::statistics::event::Event::Tcp6Scrape) + .await; + } + } + } } #[cfg(test)] @@ -94,7 +115,7 @@ mod tests { use torrust_tracker_primitives::{peer, DurationSinceUnixEpoch}; use torrust_tracker_test_helpers::configuration; - use crate::packages; + use crate::packages::{self, http_tracker_core}; fn initialize_announce_and_scrape_handlers_for_public_tracker() -> (Arc, Arc) { let config = configuration::ephemeral_public(); @@ -147,6 +168,13 @@ mod tests { } } + mock! { + HttpStatsEventSender {} + impl http_tracker_core::statistics::event::sender::Sender for HttpStatsEventSender { + fn send_event(&self, event: http_tracker_core::statistics::event::Event) -> BoxFuture<'static,Option > > > ; + } + } + mod with_real_data { use std::future; @@ -159,11 +187,11 @@ mod tests { use torrust_tracker_primitives::core::ScrapeData; use torrust_tracker_primitives::swarm_metadata::SwarmMetadata; - use crate::packages; + use crate::packages::{self, http_tracker_core}; use crate::servers::http::v1::services::scrape::invoke; use crate::servers::http::v1::services::scrape::tests::{ initialize_announce_and_scrape_handlers_for_public_tracker, initialize_scrape_handler, sample_info_hash, - sample_info_hashes, sample_peer, MockStatsEventSender, + sample_info_hashes, sample_peer, MockHttpStatsEventSender, MockStatsEventSender, }; #[tokio::test] @@ -171,6 +199,10 @@ mod tests { let (stats_event_sender, _stats_repository) = packages::statistics::setup::factory(false); let stats_event_sender = Arc::new(stats_event_sender); + let (http_stats_event_sender, _http_stats_repository) = + packages::http_tracker_core::statistics::setup::factory(false); + let http_stats_event_sender = Arc::new(http_stats_event_sender); + let (announce_handler, scrape_handler) = initialize_announce_and_scrape_handlers_for_public_tracker(); let info_hash = sample_info_hash(); @@ -181,7 +213,14 @@ mod tests { let original_peer_ip = peer.ip(); announce_handler.announce(&info_hash, &mut peer, &original_peer_ip, &PeersWanted::All); - let scrape_data = invoke(&scrape_handler, &stats_event_sender, &info_hashes, &original_peer_ip).await; + let scrape_data = invoke( + &scrape_handler, + &stats_event_sender, + &http_stats_event_sender, + &info_hashes, + &original_peer_ip, + ) + .await; let mut expected_scrape_data = ScrapeData::empty(); expected_scrape_data.add_file( @@ -207,11 +246,27 @@ mod tests { let stats_event_sender: Arc>> = Arc::new(Some(Box::new(stats_event_sender_mock))); + let mut http_stats_event_sender_mock = MockHttpStatsEventSender::new(); + http_stats_event_sender_mock + .expect_send_event() + .with(eq(http_tracker_core::statistics::event::Event::Tcp4Scrape)) + .times(1) + .returning(|_| Box::pin(future::ready(Some(Ok(()))))); + let http_stats_event_sender: Arc>> = + Arc::new(Some(Box::new(http_stats_event_sender_mock))); + let scrape_handler = initialize_scrape_handler(); let peer_ip = IpAddr::V4(Ipv4Addr::new(126, 0, 0, 1)); - invoke(&scrape_handler, &stats_event_sender, &sample_info_hashes(), &peer_ip).await; + invoke( + &scrape_handler, + &stats_event_sender, + &http_stats_event_sender, + &sample_info_hashes(), + &peer_ip, + ) + .await; } #[tokio::test] @@ -225,11 +280,27 @@ mod tests { let stats_event_sender: Arc>> = Arc::new(Some(Box::new(stats_event_sender_mock))); + let mut http_stats_event_sender_mock = MockHttpStatsEventSender::new(); + http_stats_event_sender_mock + .expect_send_event() + .with(eq(http_tracker_core::statistics::event::Event::Tcp6Scrape)) + .times(1) + .returning(|_| Box::pin(future::ready(Some(Ok(()))))); + let http_stats_event_sender: Arc>> = + Arc::new(Some(Box::new(http_stats_event_sender_mock))); + let scrape_handler = initialize_scrape_handler(); let peer_ip = IpAddr::V6(Ipv6Addr::new(0x6969, 0x6969, 0x6969, 0x6969, 0x6969, 0x6969, 0x6969, 0x6969)); - invoke(&scrape_handler, &stats_event_sender, &sample_info_hashes(), &peer_ip).await; + invoke( + &scrape_handler, + &stats_event_sender, + &http_stats_event_sender, + &sample_info_hashes(), + &peer_ip, + ) + .await; } } @@ -244,11 +315,11 @@ mod tests { use packages::statistics; use torrust_tracker_primitives::core::ScrapeData; - use crate::packages; + use crate::packages::{self, http_tracker_core}; use crate::servers::http::v1::services::scrape::fake; use crate::servers::http::v1::services::scrape::tests::{ initialize_announce_and_scrape_handlers_for_public_tracker, sample_info_hash, sample_info_hashes, sample_peer, - MockStatsEventSender, + MockHttpStatsEventSender, MockStatsEventSender, }; #[tokio::test] @@ -256,6 +327,10 @@ mod tests { let (stats_event_sender, _stats_repository) = packages::statistics::setup::factory(false); let stats_event_sender = Arc::new(stats_event_sender); + let (http_stats_event_sender, _http_stats_repository) = + packages::http_tracker_core::statistics::setup::factory(false); + let http_stats_event_sender = Arc::new(http_stats_event_sender); + let (announce_handler, _scrape_handler) = initialize_announce_and_scrape_handlers_for_public_tracker(); let info_hash = sample_info_hash(); @@ -266,7 +341,7 @@ mod tests { let original_peer_ip = peer.ip(); announce_handler.announce(&info_hash, &mut peer, &original_peer_ip, &PeersWanted::All); - let scrape_data = fake(&stats_event_sender, &info_hashes, &original_peer_ip).await; + let scrape_data = fake(&stats_event_sender, &http_stats_event_sender, &info_hashes, &original_peer_ip).await; let expected_scrape_data = ScrapeData::zeroed(&info_hashes); @@ -284,9 +359,18 @@ mod tests { let stats_event_sender: Arc>> = Arc::new(Some(Box::new(stats_event_sender_mock))); + let mut http_stats_event_sender_mock = MockHttpStatsEventSender::new(); + http_stats_event_sender_mock + .expect_send_event() + .with(eq(http_tracker_core::statistics::event::Event::Tcp4Scrape)) + .times(1) + .returning(|_| Box::pin(future::ready(Some(Ok(()))))); + let http_stats_event_sender: Arc>> = + Arc::new(Some(Box::new(http_stats_event_sender_mock))); + let peer_ip = IpAddr::V4(Ipv4Addr::new(126, 0, 0, 1)); - fake(&stats_event_sender, &sample_info_hashes(), &peer_ip).await; + fake(&stats_event_sender, &http_stats_event_sender, &sample_info_hashes(), &peer_ip).await; } #[tokio::test] @@ -300,9 +384,18 @@ mod tests { let stats_event_sender: Arc>> = Arc::new(Some(Box::new(stats_event_sender_mock))); + let mut http_stats_event_sender_mock = MockHttpStatsEventSender::new(); + http_stats_event_sender_mock + .expect_send_event() + .with(eq(http_tracker_core::statistics::event::Event::Tcp6Scrape)) + .times(1) + .returning(|_| Box::pin(future::ready(Some(Ok(()))))); + let http_stats_event_sender: Arc>> = + Arc::new(Some(Box::new(http_stats_event_sender_mock))); + let peer_ip = IpAddr::V6(Ipv6Addr::new(0x6969, 0x6969, 0x6969, 0x6969, 0x6969, 0x6969, 0x6969, 0x6969)); - fake(&stats_event_sender, &sample_info_hashes(), &peer_ip).await; + fake(&stats_event_sender, &http_stats_event_sender, &sample_info_hashes(), &peer_ip).await; } } } diff --git a/tests/servers/http/environment.rs b/tests/servers/http/environment.rs index 2828982f7..17013250a 100644 --- a/tests/servers/http/environment.rs +++ b/tests/servers/http/environment.rs @@ -62,6 +62,7 @@ impl Environment { scrape_handler: app_container.scrape_handler.clone(), whitelist_authorization: app_container.whitelist_authorization.clone(), stats_event_sender: app_container.stats_event_sender.clone(), + http_stats_event_sender: app_container.http_stats_event_sender.clone(), authentication_service: app_container.authentication_service.clone(), }); From f33665dc60fd98aa7c2d0081c83456ca41d6b3d7 Mon Sep 17 00:00:00 2001 From: Jose Celano Date: Fri, 31 Jan 2025 15:55:35 +0000 Subject: [PATCH 196/802] refactor: [#1228] start using the udp tracker stats Parallel change, step 2: 1. [x] Start using HTTP Tracker Core Stats 2. [x] Start using UDP Tracker Core Stats 3. [ ] Get metrics from HTTP and UDP Tracker Core Stats 4. [ ] Remove deprecate unified HTTP and UDP stats. --- src/container.rs | 2 + src/servers/udp/handlers.rs | 323 +++++++++++++++++++++++----- src/servers/udp/server/launcher.rs | 29 ++- src/servers/udp/server/processor.rs | 32 ++- tests/servers/udp/environment.rs | 1 + 5 files changed, 332 insertions(+), 55 deletions(-) diff --git a/src/container.rs b/src/container.rs index 71c60a517..7b44bc834 100644 --- a/src/container.rs +++ b/src/container.rs @@ -46,6 +46,7 @@ pub struct UdpTrackerContainer { pub scrape_handler: Arc, pub whitelist_authorization: Arc, pub stats_event_sender: Arc>>, + pub udp_stats_event_sender: Arc>>, pub ban_service: Arc>, } @@ -59,6 +60,7 @@ impl UdpTrackerContainer { scrape_handler: app_container.scrape_handler.clone(), whitelist_authorization: app_container.whitelist_authorization.clone(), stats_event_sender: app_container.stats_event_sender.clone(), + udp_stats_event_sender: app_container.udp_stats_event_sender.clone(), ban_service: app_container.ban_service.clone(), } } diff --git a/src/servers/udp/handlers.rs b/src/servers/udp/handlers.rs index 9f2562713..4c943516e 100644 --- a/src/servers/udp/handlers.rs +++ b/src/servers/udp/handlers.rs @@ -24,7 +24,7 @@ use zerocopy::network_endian::I32; use super::connection_cookie::{check, make}; use super::RawRequest; use crate::container::UdpTrackerContainer; -use crate::packages::statistics; +use crate::packages::{statistics, udp_tracker_core}; use crate::servers::udp::error::Error; use crate::servers::udp::{peer_builder, UDP_TRACKER_LOG_TARGET}; use crate::shared::bit_torrent::common::MAX_SCRAPE_TORRENTS; @@ -99,6 +99,7 @@ pub(crate) async fn handle_packet( local_addr, request_id, &udp_tracker_container.stats_event_sender, + &udp_tracker_container.udp_stats_event_sender, cookie_time_values.valid_range.clone(), &e, Some(transaction_id), @@ -112,6 +113,7 @@ pub(crate) async fn handle_packet( local_addr, request_id, &udp_tracker_container.stats_event_sender, + &udp_tracker_container.udp_stats_event_sender, cookie_time_values.valid_range.clone(), &e, None, @@ -145,6 +147,7 @@ pub async fn handle_request( remote_addr, &connect_request, &udp_tracker_container.stats_event_sender, + &udp_tracker_container.udp_stats_event_sender, cookie_time_values.issue_time, ) .await), @@ -156,6 +159,7 @@ pub async fn handle_request( &udp_tracker_container.announce_handler, &udp_tracker_container.whitelist_authorization, &udp_tracker_container.stats_event_sender, + &udp_tracker_container.udp_stats_event_sender, cookie_time_values.valid_range, ) .await @@ -166,6 +170,7 @@ pub async fn handle_request( &scrape_request, &udp_tracker_container.scrape_handler, &udp_tracker_container.stats_event_sender, + &udp_tracker_container.udp_stats_event_sender, cookie_time_values.valid_range, ) .await @@ -179,11 +184,12 @@ pub async fn handle_request( /// # Errors /// /// This function does not ever return an error. -#[instrument(fields(transaction_id), skip(opt_stats_event_sender), ret(level = Level::TRACE))] +#[instrument(fields(transaction_id), skip(opt_stats_event_sender, opt_udp_stats_event_sender), ret(level = Level::TRACE))] pub async fn handle_connect( remote_addr: SocketAddr, request: &ConnectRequest, opt_stats_event_sender: &Arc>>, + opt_udp_stats_event_sender: &Arc>>, cookie_issue_time: f64, ) -> Response { tracing::Span::current().record("transaction_id", request.transaction_id.0.to_string()); @@ -208,6 +214,21 @@ pub async fn handle_connect( } } + if let Some(udp_stats_event_sender) = opt_udp_stats_event_sender.as_deref() { + match remote_addr { + SocketAddr::V4(_) => { + udp_stats_event_sender + .send_event(udp_tracker_core::statistics::event::Event::Udp4Connect) + .await; + } + SocketAddr::V6(_) => { + udp_stats_event_sender + .send_event(udp_tracker_core::statistics::event::Event::Udp6Connect) + .await; + } + } + } + Response::from(response) } @@ -218,7 +239,7 @@ pub async fn handle_connect( /// /// If a error happens in the `handle_announce` function, it will just return the `ServerError`. #[allow(clippy::too_many_arguments)] -#[instrument(fields(transaction_id, connection_id, info_hash), skip(announce_handler, whitelist_authorization, opt_stats_event_sender), ret(level = Level::TRACE))] +#[instrument(fields(transaction_id, connection_id, info_hash), skip(announce_handler, whitelist_authorization, opt_stats_event_sender, opt_udp_stats_event_sender), ret(level = Level::TRACE))] pub async fn handle_announce( remote_addr: SocketAddr, request: &AnnounceRequest, @@ -226,6 +247,7 @@ pub async fn handle_announce( announce_handler: &Arc, whitelist_authorization: &Arc, opt_stats_event_sender: &Arc>>, + opt_udp_stats_event_sender: &Arc>>, cookie_valid_range: Range, ) -> Result { tracing::Span::current() @@ -270,6 +292,21 @@ pub async fn handle_announce( } } + if let Some(udp_stats_event_sender) = opt_udp_stats_event_sender.as_deref() { + match remote_client_ip { + IpAddr::V4(_) => { + udp_stats_event_sender + .send_event(udp_tracker_core::statistics::event::Event::Udp4Announce) + .await; + } + IpAddr::V6(_) => { + udp_stats_event_sender + .send_event(udp_tracker_core::statistics::event::Event::Udp6Announce) + .await; + } + } + } + #[allow(clippy::cast_possible_truncation)] if remote_addr.is_ipv4() { let announce_response = AnnounceResponse { @@ -330,12 +367,13 @@ pub async fn handle_announce( /// # Errors /// /// This function does not ever return an error. -#[instrument(fields(transaction_id, connection_id), skip(scrape_handler, opt_stats_event_sender), ret(level = Level::TRACE))] +#[instrument(fields(transaction_id, connection_id), skip(scrape_handler, opt_stats_event_sender, opt_udp_stats_event_sender), ret(level = Level::TRACE))] pub async fn handle_scrape( remote_addr: SocketAddr, request: &ScrapeRequest, scrape_handler: &Arc, opt_stats_event_sender: &Arc>>, + opt_udp_stats_event_sender: &Arc>>, cookie_valid_range: Range, ) -> Result { tracing::Span::current() @@ -387,6 +425,21 @@ pub async fn handle_scrape( } } + if let Some(udp_stats_event_sender) = opt_udp_stats_event_sender.as_deref() { + match remote_addr { + SocketAddr::V4(_) => { + udp_stats_event_sender + .send_event(udp_tracker_core::statistics::event::Event::Udp4Scrape) + .await; + } + SocketAddr::V6(_) => { + udp_stats_event_sender + .send_event(udp_tracker_core::statistics::event::Event::Udp6Scrape) + .await; + } + } + } + let response = ScrapeResponse { transaction_id: request.transaction_id, torrent_stats, @@ -395,12 +448,14 @@ pub async fn handle_scrape( Ok(Response::from(response)) } -#[instrument(fields(transaction_id), skip(opt_stats_event_sender), ret(level = Level::TRACE))] +#[allow(clippy::too_many_arguments)] +#[instrument(fields(transaction_id), skip(opt_stats_event_sender, opt_udp_stats_event_sender), ret(level = Level::TRACE))] async fn handle_error( remote_addr: SocketAddr, local_addr: SocketAddr, request_id: Uuid, opt_stats_event_sender: &Arc>>, + opt_udp_stats_event_sender: &Arc>>, cookie_valid_range: Range, e: &Error, transaction_id: Option, @@ -447,6 +502,21 @@ async fn handle_error( } } } + + if let Some(udp_stats_event_sender) = opt_udp_stats_event_sender.as_deref() { + match remote_addr { + SocketAddr::V4(_) => { + udp_stats_event_sender + .send_event(udp_tracker_core::statistics::event::Event::Udp4Error) + .await; + } + SocketAddr::V6(_) => { + udp_stats_event_sender + .send_event(udp_tracker_core::statistics::event::Event::Udp6Error) + .await; + } + } + } } Response::from(ErrorResponse { @@ -488,7 +558,7 @@ mod tests { use torrust_tracker_test_helpers::configuration; use super::gen_remote_fingerprint; - use crate::packages::statistics; + use crate::packages::{statistics, udp_tracker_core}; use crate::{packages, CurrentClock}; struct CoreTrackerServices { @@ -501,31 +571,33 @@ mod tests { pub whitelist_authorization: Arc, } + struct CoreUdpTrackerServices { + pub udp_stats_event_sender: Arc>>, + } + fn default_testing_tracker_configuration() -> Configuration { configuration::ephemeral() } - fn initialize_core_tracker_services_for_default_tracker_configuration() -> CoreTrackerServices { + fn initialize_core_tracker_services_for_default_tracker_configuration() -> (CoreTrackerServices, CoreUdpTrackerServices) { initialize_core_tracker_services(&default_testing_tracker_configuration()) } - fn initialize_core_tracker_services_for_public_tracker() -> CoreTrackerServices { + fn initialize_core_tracker_services_for_public_tracker() -> (CoreTrackerServices, CoreUdpTrackerServices) { initialize_core_tracker_services(&configuration::ephemeral_public()) } - fn initialize_core_tracker_services_for_listed_tracker() -> CoreTrackerServices { + fn initialize_core_tracker_services_for_listed_tracker() -> (CoreTrackerServices, CoreUdpTrackerServices) { initialize_core_tracker_services(&configuration::ephemeral_listed()) } - fn initialize_core_tracker_services(config: &Configuration) -> CoreTrackerServices { + fn initialize_core_tracker_services(config: &Configuration) -> (CoreTrackerServices, CoreUdpTrackerServices) { let core_config = Arc::new(config.core.clone()); let database = initialize_database(config); let in_memory_whitelist = Arc::new(InMemoryWhitelist::default()); let whitelist_authorization = Arc::new(WhitelistAuthorization::new(&config.core, &in_memory_whitelist.clone())); let in_memory_torrent_repository = Arc::new(InMemoryTorrentRepository::default()); let db_torrent_repository = Arc::new(DatabasePersistentTorrentRepository::new(&database)); - let (stats_event_sender, _stats_repository) = statistics::setup::factory(config.core.tracker_usage_statistics); - let stats_event_sender = Arc::new(stats_event_sender); let announce_handler = Arc::new(AnnounceHandler::new( &config.core, &in_memory_torrent_repository, @@ -533,15 +605,24 @@ mod tests { )); let scrape_handler = Arc::new(ScrapeHandler::new(&whitelist_authorization, &in_memory_torrent_repository)); - CoreTrackerServices { - core_config, - announce_handler, - scrape_handler, - in_memory_torrent_repository, - stats_event_sender, - in_memory_whitelist, - whitelist_authorization, - } + let (stats_event_sender, _stats_repository) = statistics::setup::factory(config.core.tracker_usage_statistics); + let stats_event_sender = Arc::new(stats_event_sender); + + let (udp_stats_event_sender, _udp_stats_repository) = packages::udp_tracker_core::statistics::setup::factory(false); + let udp_stats_event_sender = Arc::new(udp_stats_event_sender); + + ( + CoreTrackerServices { + core_config, + announce_handler, + scrape_handler, + in_memory_torrent_repository, + stats_event_sender, + in_memory_whitelist, + whitelist_authorization, + }, + CoreUdpTrackerServices { udp_stats_event_sender }, + ) } fn sample_ipv4_remote_addr() -> SocketAddr { @@ -645,6 +726,13 @@ mod tests { } } + mock! { + UdpStatsEventSender {} + impl udp_tracker_core::statistics::event::sender::Sender for UdpStatsEventSender { + fn send_event(&self, event: udp_tracker_core::statistics::event::Event) -> BoxFuture<'static,Option > > > ; + } + } + mod connect_request { use std::future; @@ -655,12 +743,12 @@ mod tests { use packages::statistics; use super::{sample_ipv4_socket_address, sample_ipv6_remote_addr}; - use crate::packages; + use crate::packages::{self, udp_tracker_core}; use crate::servers::udp::connection_cookie::make; use crate::servers::udp::handlers::handle_connect; use crate::servers::udp::handlers::tests::{ sample_ipv4_remote_addr, sample_ipv4_remote_addr_fingerprint, sample_ipv6_remote_addr_fingerprint, sample_issue_time, - MockStatsEventSender, + MockStatsEventSender, MockUdpStatsEventSender, }; fn sample_connect_request() -> ConnectRequest { @@ -674,11 +762,21 @@ mod tests { let (stats_event_sender, _stats_repository) = packages::statistics::setup::factory(false); let stats_event_sender = Arc::new(stats_event_sender); + let (udp_stats_event_sender, _udp_stats_repository) = packages::udp_tracker_core::statistics::setup::factory(false); + let udp_stats_event_sender = Arc::new(udp_stats_event_sender); + let request = ConnectRequest { transaction_id: TransactionId(0i32.into()), }; - let response = handle_connect(sample_ipv4_remote_addr(), &request, &stats_event_sender, sample_issue_time()).await; + let response = handle_connect( + sample_ipv4_remote_addr(), + &request, + &stats_event_sender, + &udp_stats_event_sender, + sample_issue_time(), + ) + .await; assert_eq!( response, @@ -694,11 +792,21 @@ mod tests { let (stats_event_sender, _stats_repository) = packages::statistics::setup::factory(false); let stats_event_sender = Arc::new(stats_event_sender); + let (udp_stats_event_sender, _udp_stats_repository) = packages::udp_tracker_core::statistics::setup::factory(false); + let udp_stats_event_sender = Arc::new(udp_stats_event_sender); + let request = ConnectRequest { transaction_id: TransactionId(0i32.into()), }; - let response = handle_connect(sample_ipv4_remote_addr(), &request, &stats_event_sender, sample_issue_time()).await; + let response = handle_connect( + sample_ipv4_remote_addr(), + &request, + &stats_event_sender, + &udp_stats_event_sender, + sample_issue_time(), + ) + .await; assert_eq!( response, @@ -714,11 +822,21 @@ mod tests { let (stats_event_sender, _stats_repository) = packages::statistics::setup::factory(false); let stats_event_sender = Arc::new(stats_event_sender); + let (udp_stats_event_sender, _udp_stats_repository) = packages::udp_tracker_core::statistics::setup::factory(false); + let udp_stats_event_sender = Arc::new(udp_stats_event_sender); + let request = ConnectRequest { transaction_id: TransactionId(0i32.into()), }; - let response = handle_connect(sample_ipv6_remote_addr(), &request, &stats_event_sender, sample_issue_time()).await; + let response = handle_connect( + sample_ipv6_remote_addr(), + &request, + &stats_event_sender, + &udp_stats_event_sender, + sample_issue_time(), + ) + .await; assert_eq!( response, @@ -740,12 +858,22 @@ mod tests { let stats_event_sender: Arc>> = Arc::new(Some(Box::new(stats_event_sender_mock))); + let mut udp_stats_event_sender_mock = MockUdpStatsEventSender::new(); + udp_stats_event_sender_mock + .expect_send_event() + .with(eq(udp_tracker_core::statistics::event::Event::Udp4Connect)) + .times(1) + .returning(|_| Box::pin(future::ready(Some(Ok(()))))); + let udp_stats_event_sender: Arc>> = + Arc::new(Some(Box::new(udp_stats_event_sender_mock))); + let client_socket_address = sample_ipv4_socket_address(); handle_connect( client_socket_address, &sample_connect_request(), &stats_event_sender, + &udp_stats_event_sender, sample_issue_time(), ) .await; @@ -762,10 +890,20 @@ mod tests { let stats_event_sender: Arc>> = Arc::new(Some(Box::new(stats_event_sender_mock))); + let mut udp_stats_event_sender_mock = MockUdpStatsEventSender::new(); + udp_stats_event_sender_mock + .expect_send_event() + .with(eq(udp_tracker_core::statistics::event::Event::Udp6Connect)) + .times(1) + .returning(|_| Box::pin(future::ready(Some(Ok(()))))); + let udp_stats_event_sender: Arc>> = + Arc::new(Some(Box::new(udp_stats_event_sender_mock))); + handle_connect( sample_ipv6_remote_addr(), &sample_connect_request(), &stats_event_sender, + &udp_stats_event_sender, sample_issue_time(), ) .await; @@ -861,19 +999,19 @@ mod tests { use mockall::predicate::eq; use torrust_tracker_configuration::Core; - use crate::packages::{self, statistics}; + use crate::packages::{self, statistics, udp_tracker_core}; use crate::servers::udp::connection_cookie::make; use crate::servers::udp::handlers::tests::announce_request::AnnounceRequestBuilder; use crate::servers::udp::handlers::tests::{ gen_remote_fingerprint, initialize_core_tracker_services_for_default_tracker_configuration, initialize_core_tracker_services_for_public_tracker, sample_cookie_valid_range, sample_ipv4_socket_address, - sample_issue_time, MockStatsEventSender, TorrentPeerBuilder, + sample_issue_time, MockStatsEventSender, MockUdpStatsEventSender, TorrentPeerBuilder, }; use crate::servers::udp::handlers::{handle_announce, AnnounceResponseFixedData}; #[tokio::test] async fn an_announced_peer_should_be_added_to_the_tracker() { - let core_tracker_services = initialize_core_tracker_services_for_public_tracker(); + let (core_tracker_services, core_udp_tracker_services) = initialize_core_tracker_services_for_public_tracker(); let client_ip = Ipv4Addr::new(126, 0, 0, 1); let client_port = 8080; @@ -897,6 +1035,7 @@ mod tests { &core_tracker_services.announce_handler, &core_tracker_services.whitelist_authorization, &core_tracker_services.stats_event_sender, + &core_udp_tracker_services.udp_stats_event_sender, sample_cookie_valid_range(), ) .await @@ -916,7 +1055,7 @@ mod tests { #[tokio::test] async fn the_announced_peer_should_not_be_included_in_the_response() { - let core_tracker_services = initialize_core_tracker_services_for_public_tracker(); + let (core_tracker_services, core_udp_tracker_services) = initialize_core_tracker_services_for_public_tracker(); let remote_addr = SocketAddr::new(IpAddr::V4(Ipv4Addr::new(126, 0, 0, 1)), 8080); @@ -931,6 +1070,7 @@ mod tests { &core_tracker_services.announce_handler, &core_tracker_services.whitelist_authorization, &core_tracker_services.stats_event_sender, + &core_udp_tracker_services.udp_stats_event_sender, sample_cookie_valid_range(), ) .await @@ -957,7 +1097,7 @@ mod tests { // From the BEP 15 (https://www.bittorrent.org/beps/bep_0015.html): // "Do note that most trackers will only honor the IP address field under limited circumstances." - let core_tracker_services = initialize_core_tracker_services_for_public_tracker(); + let (core_tracker_services, core_udp_tracker_services) = initialize_core_tracker_services_for_public_tracker(); let info_hash = AquaticInfoHash([0u8; 20]); let peer_id = AquaticPeerId([255u8; 20]); @@ -984,6 +1124,7 @@ mod tests { &core_tracker_services.announce_handler, &core_tracker_services.whitelist_authorization, &core_tracker_services.stats_event_sender, + &core_udp_tracker_services.udp_stats_event_sender, sample_cookie_valid_range(), ) .await @@ -1020,6 +1161,10 @@ mod tests { let (stats_event_sender, _stats_repository) = packages::statistics::setup::factory(false); let stats_event_sender = Arc::new(stats_event_sender); + let (udp_stats_event_sender, _udp_stats_repository) = + packages::udp_tracker_core::statistics::setup::factory(false); + let udp_stats_event_sender = Arc::new(udp_stats_event_sender); + let remote_addr = SocketAddr::new(IpAddr::V4(Ipv4Addr::new(126, 0, 0, 1)), 8080); let request = AnnounceRequestBuilder::default() .with_connection_id(make(gen_remote_fingerprint(&remote_addr), sample_issue_time()).unwrap()) @@ -1032,6 +1177,7 @@ mod tests { &announce_handler, &whitelist_authorization, &stats_event_sender, + &udp_stats_event_sender, sample_cookie_valid_range(), ) .await @@ -1040,7 +1186,7 @@ mod tests { #[tokio::test] async fn when_the_announce_request_comes_from_a_client_using_ipv4_the_response_should_not_include_peers_using_ipv6() { - let core_tracker_services = initialize_core_tracker_services_for_public_tracker(); + let (core_tracker_services, _core_udp_tracker_services) = initialize_core_tracker_services_for_public_tracker(); add_a_torrent_peer_using_ipv6(&core_tracker_services.in_memory_torrent_repository); @@ -1071,7 +1217,17 @@ mod tests { let stats_event_sender: Arc>> = Arc::new(Some(Box::new(stats_event_sender_mock))); - let core_tracker_services = initialize_core_tracker_services_for_default_tracker_configuration(); + let mut udp_stats_event_sender_mock = MockUdpStatsEventSender::new(); + udp_stats_event_sender_mock + .expect_send_event() + .with(eq(udp_tracker_core::statistics::event::Event::Udp4Announce)) + .times(1) + .returning(|_| Box::pin(future::ready(Some(Ok(()))))); + let udp_stats_event_sender: Arc>> = + Arc::new(Some(Box::new(udp_stats_event_sender_mock))); + + let (core_tracker_services, _core_udp_tracker_services) = + initialize_core_tracker_services_for_default_tracker_configuration(); handle_announce( sample_ipv4_socket_address(), @@ -1080,6 +1236,7 @@ mod tests { &core_tracker_services.announce_handler, &core_tracker_services.whitelist_authorization, &stats_event_sender, + &udp_stats_event_sender, sample_cookie_valid_range(), ) .await @@ -1102,7 +1259,8 @@ mod tests { #[tokio::test] async fn the_peer_ip_should_be_changed_to_the_external_ip_in_the_tracker_configuration_if_defined() { - let core_tracker_services = initialize_core_tracker_services_for_public_tracker(); + let (core_tracker_services, core_udp_tracker_services) = + initialize_core_tracker_services_for_public_tracker(); let client_ip = Ipv4Addr::new(127, 0, 0, 1); let client_port = 8080; @@ -1126,6 +1284,7 @@ mod tests { &core_tracker_services.announce_handler, &core_tracker_services.whitelist_authorization, &core_tracker_services.stats_event_sender, + &core_udp_tracker_services.udp_stats_event_sender, sample_cookie_valid_range(), ) .await @@ -1163,19 +1322,19 @@ mod tests { use mockall::predicate::eq; use torrust_tracker_configuration::Core; - use crate::packages::{self, statistics}; + use crate::packages::{self, statistics, udp_tracker_core}; use crate::servers::udp::connection_cookie::make; use crate::servers::udp::handlers::tests::announce_request::AnnounceRequestBuilder; use crate::servers::udp::handlers::tests::{ gen_remote_fingerprint, initialize_core_tracker_services_for_default_tracker_configuration, initialize_core_tracker_services_for_public_tracker, sample_cookie_valid_range, sample_ipv6_remote_addr, - sample_issue_time, MockStatsEventSender, TorrentPeerBuilder, + sample_issue_time, MockStatsEventSender, MockUdpStatsEventSender, TorrentPeerBuilder, }; use crate::servers::udp::handlers::{handle_announce, AnnounceResponseFixedData}; #[tokio::test] async fn an_announced_peer_should_be_added_to_the_tracker() { - let core_tracker_services = initialize_core_tracker_services_for_public_tracker(); + let (core_tracker_services, core_udp_tracker_services) = initialize_core_tracker_services_for_public_tracker(); let client_ip_v4 = Ipv4Addr::new(126, 0, 0, 1); let client_ip_v6 = client_ip_v4.to_ipv6_compatible(); @@ -1200,6 +1359,7 @@ mod tests { &core_tracker_services.announce_handler, &core_tracker_services.whitelist_authorization, &core_tracker_services.stats_event_sender, + &core_udp_tracker_services.udp_stats_event_sender, sample_cookie_valid_range(), ) .await @@ -1219,7 +1379,7 @@ mod tests { #[tokio::test] async fn the_announced_peer_should_not_be_included_in_the_response() { - let core_tracker_services = initialize_core_tracker_services_for_public_tracker(); + let (core_tracker_services, core_udp_tracker_services) = initialize_core_tracker_services_for_public_tracker(); let client_ip_v4 = Ipv4Addr::new(126, 0, 0, 1); let client_ip_v6 = client_ip_v4.to_ipv6_compatible(); @@ -1237,6 +1397,7 @@ mod tests { &core_tracker_services.announce_handler, &core_tracker_services.whitelist_authorization, &core_tracker_services.stats_event_sender, + &core_udp_tracker_services.udp_stats_event_sender, sample_cookie_valid_range(), ) .await @@ -1263,7 +1424,7 @@ mod tests { // From the BEP 15 (https://www.bittorrent.org/beps/bep_0015.html): // "Do note that most trackers will only honor the IP address field under limited circumstances." - let core_tracker_services = initialize_core_tracker_services_for_public_tracker(); + let (core_tracker_services, core_udp_tracker_services) = initialize_core_tracker_services_for_public_tracker(); let info_hash = AquaticInfoHash([0u8; 20]); let peer_id = AquaticPeerId([255u8; 20]); @@ -1290,6 +1451,7 @@ mod tests { &core_tracker_services.announce_handler, &core_tracker_services.whitelist_authorization, &core_tracker_services.stats_event_sender, + &core_udp_tracker_services.udp_stats_event_sender, sample_cookie_valid_range(), ) .await @@ -1326,6 +1488,10 @@ mod tests { let (stats_event_sender, _stats_repository) = packages::statistics::setup::factory(false); let stats_event_sender = Arc::new(stats_event_sender); + let (udp_stats_event_sender, _udp_stats_repository) = + packages::udp_tracker_core::statistics::setup::factory(false); + let udp_stats_event_sender = Arc::new(udp_stats_event_sender); + let client_ip_v4 = Ipv4Addr::new(126, 0, 0, 1); let client_ip_v6 = client_ip_v4.to_ipv6_compatible(); let client_port = 8080; @@ -1341,6 +1507,7 @@ mod tests { &announce_handler, &whitelist_authorization, &stats_event_sender, + &udp_stats_event_sender, sample_cookie_valid_range(), ) .await @@ -1349,7 +1516,7 @@ mod tests { #[tokio::test] async fn when_the_announce_request_comes_from_a_client_using_ipv6_the_response_should_not_include_peers_using_ipv4() { - let core_tracker_services = initialize_core_tracker_services_for_public_tracker(); + let (core_tracker_services, _core_udp_tracker_services) = initialize_core_tracker_services_for_public_tracker(); add_a_torrent_peer_using_ipv4(&core_tracker_services.in_memory_torrent_repository); @@ -1380,7 +1547,17 @@ mod tests { let stats_event_sender: Arc>> = Arc::new(Some(Box::new(stats_event_sender_mock))); - let core_tracker_services = initialize_core_tracker_services_for_default_tracker_configuration(); + let mut udp_stats_event_sender_mock = MockUdpStatsEventSender::new(); + udp_stats_event_sender_mock + .expect_send_event() + .with(eq(udp_tracker_core::statistics::event::Event::Udp6Announce)) + .times(1) + .returning(|_| Box::pin(future::ready(Some(Ok(()))))); + let udp_stats_event_sender: Arc>> = + Arc::new(Some(Box::new(udp_stats_event_sender_mock))); + + let (core_tracker_services, _core_udp_tracker_services) = + initialize_core_tracker_services_for_default_tracker_configuration(); let remote_addr = sample_ipv6_remote_addr(); @@ -1395,6 +1572,7 @@ mod tests { &core_tracker_services.announce_handler, &core_tracker_services.whitelist_authorization, &stats_event_sender, + &udp_stats_event_sender, sample_cookie_valid_range(), ) .await @@ -1416,13 +1594,13 @@ mod tests { use mockall::predicate::eq; use packages::statistics; - use crate::packages; + use crate::packages::{self, udp_tracker_core}; use crate::servers::udp::connection_cookie::make; use crate::servers::udp::handlers::handle_announce; use crate::servers::udp::handlers::tests::announce_request::AnnounceRequestBuilder; use crate::servers::udp::handlers::tests::{ gen_remote_fingerprint, sample_cookie_valid_range, sample_issue_time, MockStatsEventSender, - TrackerConfigurationBuilder, + MockUdpStatsEventSender, TrackerConfigurationBuilder, }; #[tokio::test] @@ -1445,6 +1623,15 @@ mod tests { let stats_event_sender: Arc>> = Arc::new(Some(Box::new(stats_event_sender_mock))); + let mut udp_stats_event_sender_mock = MockUdpStatsEventSender::new(); + udp_stats_event_sender_mock + .expect_send_event() + .with(eq(udp_tracker_core::statistics::event::Event::Udp6Announce)) + .times(1) + .returning(|_| Box::pin(future::ready(Some(Ok(()))))); + let udp_stats_event_sender: Arc>> = + Arc::new(Some(Box::new(udp_stats_event_sender_mock))); + let announce_handler = Arc::new(AnnounceHandler::new( &config.core, &in_memory_torrent_repository, @@ -1480,6 +1667,7 @@ mod tests { &announce_handler, &whitelist_authorization, &stats_event_sender, + &udp_stats_event_sender, sample_cookie_valid_range(), ) .await @@ -1533,7 +1721,7 @@ mod tests { #[tokio::test] async fn should_return_no_stats_when_the_tracker_does_not_have_any_torrent() { - let core_tracker_services = initialize_core_tracker_services_for_public_tracker(); + let (core_tracker_services, core_udp_tracker_services) = initialize_core_tracker_services_for_public_tracker(); let remote_addr = sample_ipv4_remote_addr(); @@ -1551,6 +1739,7 @@ mod tests { &request, &core_tracker_services.scrape_handler, &core_tracker_services.stats_event_sender, + &core_udp_tracker_services.udp_stats_event_sender, sample_cookie_valid_range(), ) .await @@ -1600,6 +1789,9 @@ mod tests { let (stats_event_sender, _stats_repository) = statistics::setup::factory(false); let stats_event_sender = Arc::new(stats_event_sender); + let (udp_stats_event_sender, _udp_stats_repository) = packages::udp_tracker_core::statistics::setup::factory(false); + let udp_stats_event_sender = Arc::new(udp_stats_event_sender); + let remote_addr = sample_ipv4_remote_addr(); let info_hash = InfoHash([0u8; 20]); @@ -1612,6 +1804,7 @@ mod tests { &request, &scrape_handler, &stats_event_sender, + &udp_stats_event_sender, sample_cookie_valid_range(), ) .await @@ -1633,7 +1826,7 @@ mod tests { #[tokio::test] async fn should_return_torrent_statistics_when_the_tracker_has_the_requested_torrent() { - let core_tracker_services = initialize_core_tracker_services_for_public_tracker(); + let (core_tracker_services, _core_udp_tracker_services) = initialize_core_tracker_services_for_public_tracker(); let torrent_stats = match_scrape_response( add_a_sample_seeder_and_scrape( @@ -1666,7 +1859,7 @@ mod tests { #[tokio::test] async fn should_return_the_torrent_statistics_when_the_requested_torrent_is_whitelisted() { - let core_tracker_services = initialize_core_tracker_services_for_listed_tracker(); + let (core_tracker_services, core_udp_tracker_services) = initialize_core_tracker_services_for_listed_tracker(); let remote_addr = sample_ipv4_remote_addr(); let info_hash = InfoHash([0u8; 20]); @@ -1688,6 +1881,7 @@ mod tests { &request, &core_tracker_services.scrape_handler, &core_tracker_services.stats_event_sender, + &core_udp_tracker_services.udp_stats_event_sender, sample_cookie_valid_range(), ) .await @@ -1706,7 +1900,7 @@ mod tests { #[tokio::test] async fn should_return_zeroed_statistics_when_the_requested_torrent_is_not_whitelisted() { - let core_tracker_services = initialize_core_tracker_services_for_listed_tracker(); + let (core_tracker_services, core_udp_tracker_services) = initialize_core_tracker_services_for_listed_tracker(); let remote_addr = sample_ipv4_remote_addr(); let info_hash = InfoHash([0u8; 20]); @@ -1726,6 +1920,7 @@ mod tests { &request, &core_tracker_services.scrape_handler, &core_tracker_services.stats_event_sender, + &core_udp_tracker_services.udp_stats_event_sender, sample_cookie_valid_range(), ) .await @@ -1758,11 +1953,11 @@ mod tests { use packages::statistics; use super::sample_scrape_request; - use crate::packages; + use crate::packages::{self, udp_tracker_core}; use crate::servers::udp::handlers::handle_scrape; use crate::servers::udp::handlers::tests::{ initialize_core_tracker_services_for_default_tracker_configuration, sample_cookie_valid_range, - sample_ipv4_remote_addr, MockStatsEventSender, + sample_ipv4_remote_addr, MockStatsEventSender, MockUdpStatsEventSender, }; #[tokio::test] @@ -1776,15 +1971,26 @@ mod tests { let stats_event_sender: Arc>> = Arc::new(Some(Box::new(stats_event_sender_mock))); + let mut udp_stats_event_sender_mock = MockUdpStatsEventSender::new(); + udp_stats_event_sender_mock + .expect_send_event() + .with(eq(udp_tracker_core::statistics::event::Event::Udp4Scrape)) + .times(1) + .returning(|_| Box::pin(future::ready(Some(Ok(()))))); + let udp_stats_event_sender: Arc>> = + Arc::new(Some(Box::new(udp_stats_event_sender_mock))); + let remote_addr = sample_ipv4_remote_addr(); - let core_tracker_services = initialize_core_tracker_services_for_default_tracker_configuration(); + let (core_tracker_services, _core_udp_tracker_services) = + initialize_core_tracker_services_for_default_tracker_configuration(); handle_scrape( remote_addr, &sample_scrape_request(&remote_addr), &core_tracker_services.scrape_handler, &stats_event_sender, + &udp_stats_event_sender, sample_cookie_valid_range(), ) .await @@ -1800,11 +2006,11 @@ mod tests { use packages::statistics; use super::sample_scrape_request; - use crate::packages; + use crate::packages::{self, udp_tracker_core}; use crate::servers::udp::handlers::handle_scrape; use crate::servers::udp::handlers::tests::{ initialize_core_tracker_services_for_default_tracker_configuration, sample_cookie_valid_range, - sample_ipv6_remote_addr, MockStatsEventSender, + sample_ipv6_remote_addr, MockStatsEventSender, MockUdpStatsEventSender, }; #[tokio::test] @@ -1818,15 +2024,26 @@ mod tests { let stats_event_sender: Arc>> = Arc::new(Some(Box::new(stats_event_sender_mock))); + let mut udp_stats_event_sender_mock = MockUdpStatsEventSender::new(); + udp_stats_event_sender_mock + .expect_send_event() + .with(eq(udp_tracker_core::statistics::event::Event::Udp6Scrape)) + .times(1) + .returning(|_| Box::pin(future::ready(Some(Ok(()))))); + let udp_stats_event_sender: Arc>> = + Arc::new(Some(Box::new(udp_stats_event_sender_mock))); + let remote_addr = sample_ipv6_remote_addr(); - let core_tracker_services = initialize_core_tracker_services_for_default_tracker_configuration(); + let (core_tracker_services, _core_udp_tracker_services) = + initialize_core_tracker_services_for_default_tracker_configuration(); handle_scrape( remote_addr, &sample_scrape_request(&remote_addr), &core_tracker_services.scrape_handler, &stats_event_sender, + &udp_stats_event_sender, sample_cookie_valid_range(), ) .await diff --git a/src/servers/udp/server/launcher.rs b/src/servers/udp/server/launcher.rs index 24872771a..863f82e18 100644 --- a/src/servers/udp/server/launcher.rs +++ b/src/servers/udp/server/launcher.rs @@ -14,7 +14,7 @@ use tracing::instrument; use super::request_buffer::ActiveRequests; use crate::bootstrap::jobs::Started; use crate::container::UdpTrackerContainer; -use crate::packages; +use crate::packages::{self, udp_tracker_core}; use crate::servers::logging::STARTED_ON; use crate::servers::registar::ServiceHealthCheckJob; use crate::servers::signals::{shutdown_signal_with_message, Halted}; @@ -174,6 +174,21 @@ impl Launcher { } } + if let Some(udp_stats_event_sender) = udp_tracker_container.udp_stats_event_sender.as_deref() { + match req.from.ip() { + IpAddr::V4(_) => { + udp_stats_event_sender + .send_event(udp_tracker_core::statistics::event::Event::Udp4Request) + .await; + } + IpAddr::V6(_) => { + udp_stats_event_sender + .send_event(udp_tracker_core::statistics::event::Event::Udp6Request) + .await; + } + } + } + if udp_tracker_container.ban_service.read().await.is_banned(&req.from.ip()) { tracing::debug!(target: UDP_TRACKER_LOG_TARGET, local_addr, "Udp::run_udp_server::loop continue: (banned ip)"); @@ -183,6 +198,12 @@ impl Launcher { .await; } + if let Some(udp_stats_event_sender) = udp_tracker_container.udp_stats_event_sender.as_deref() { + udp_stats_event_sender + .send_event(udp_tracker_core::statistics::event::Event::UdpRequestBanned) + .await; + } + continue; } @@ -215,6 +236,12 @@ impl Launcher { .send_event(statistics::event::Event::UdpRequestAborted) .await; } + + if let Some(udp_stats_event_sender) = udp_tracker_container.udp_stats_event_sender.as_deref() { + udp_stats_event_sender + .send_event(udp_tracker_core::statistics::event::Event::UdpRequestAborted) + .await; + } } } else { tokio::task::yield_now().await; diff --git a/src/servers/udp/server/processor.rs b/src/servers/udp/server/processor.rs index 8a1ca64e3..bbf64dfb9 100644 --- a/src/servers/udp/server/processor.rs +++ b/src/servers/udp/server/processor.rs @@ -11,7 +11,7 @@ use tracing::{instrument, Level}; use super::bound_socket::BoundSocket; use crate::container::UdpTrackerContainer; -use crate::packages; +use crate::packages::{self, udp_tracker_core}; use crate::servers::udp::handlers::CookieTimeValues; use crate::servers::udp::{handlers, RawRequest}; @@ -68,6 +68,15 @@ impl Processor { Response::Error(_e) => UdpResponseKind::Error, }; + let udp_response_kind = match &response { + Response::Connect(_) => udp_tracker_core::statistics::event::UdpResponseKind::Connect, + Response::AnnounceIpv4(_) | Response::AnnounceIpv6(_) => { + udp_tracker_core::statistics::event::UdpResponseKind::Announce + } + Response::Scrape(_) => udp_tracker_core::statistics::event::UdpResponseKind::Scrape, + Response::Error(_e) => udp_tracker_core::statistics::event::UdpResponseKind::Error, + }; + let mut writer = Cursor::new(Vec::with_capacity(200)); match response.write_bytes(&mut writer) { @@ -103,6 +112,27 @@ impl Processor { } } } + + if let Some(udp_stats_event_sender) = self.udp_tracker_container.udp_stats_event_sender.as_deref() { + match target.ip() { + IpAddr::V4(_) => { + udp_stats_event_sender + .send_event(udp_tracker_core::statistics::event::Event::Udp4Response { + kind: udp_response_kind, + req_processing_time, + }) + .await; + } + IpAddr::V6(_) => { + udp_stats_event_sender + .send_event(udp_tracker_core::statistics::event::Event::Udp6Response { + kind: udp_response_kind, + req_processing_time, + }) + .await; + } + } + } } Err(error) => tracing::warn!(%bytes_count, %error, ?payload, "failed to send"), }; diff --git a/tests/servers/udp/environment.rs b/tests/servers/udp/environment.rs index 8e2e31f07..c8ecac1fb 100644 --- a/tests/servers/udp/environment.rs +++ b/tests/servers/udp/environment.rs @@ -62,6 +62,7 @@ impl Environment { scrape_handler: app_container.scrape_handler.clone(), whitelist_authorization: app_container.whitelist_authorization.clone(), stats_event_sender: app_container.stats_event_sender.clone(), + udp_stats_event_sender: app_container.udp_stats_event_sender.clone(), ban_service: app_container.ban_service.clone(), }); From 55769383f5b05bf10048de0f346ea04a2f5748aa Mon Sep 17 00:00:00 2001 From: Jose Celano Date: Fri, 31 Jan 2025 16:20:04 +0000 Subject: [PATCH 197/802] refactor: [#1228] get metrics from HTTP and UDP Tracker Core Stats Stats have been splited into HTTP and UDP stats. Parallel change, step 3: 1. [x] Start using HTTP Tracker Core Stats 2. [x] Start using UDP Tracker Core Stats 3. [x] Get metrics from HTTP and UDP Tracker Core Stats 4. [ ] Remove deprecated unified HTTP and UDP stats. --- src/container.rs | 8 +- src/packages/mod.rs | 1 + src/packages/tracker_api_core/mod.rs | 1 + .../tracker_api_core/statistics/metrics.rs | 87 ++++++++++++ .../tracker_api_core/statistics/mod.rs | 2 + .../tracker_api_core/statistics/services.rs | 127 ++++++++++++++++++ src/servers/apis/v1/context/stats/handlers.rs | 14 +- .../apis/v1/context/stats/resources.rs | 6 +- .../apis/v1/context/stats/responses.rs | 2 +- src/servers/apis/v1/context/stats/routes.rs | 3 +- tests/servers/api/environment.rs | 4 +- 11 files changed, 239 insertions(+), 16 deletions(-) create mode 100644 src/packages/tracker_api_core/mod.rs create mode 100644 src/packages/tracker_api_core/statistics/metrics.rs create mode 100644 src/packages/tracker_api_core/statistics/mod.rs create mode 100644 src/packages/tracker_api_core/statistics/services.rs diff --git a/src/container.rs b/src/container.rs index 7b44bc834..ccd85c7b1 100644 --- a/src/container.rs +++ b/src/container.rs @@ -100,8 +100,8 @@ pub struct HttpApiContainer { pub keys_handler: Arc, pub whitelist_manager: Arc, pub ban_service: Arc>, - pub stats_event_sender: Arc>>, - pub stats_repository: Arc, + pub http_stats_repository: Arc, + pub udp_stats_repository: Arc, } impl HttpApiContainer { @@ -114,8 +114,8 @@ impl HttpApiContainer { keys_handler: app_container.keys_handler.clone(), whitelist_manager: app_container.whitelist_manager.clone(), ban_service: app_container.ban_service.clone(), - stats_event_sender: app_container.stats_event_sender.clone(), - stats_repository: app_container.stats_repository.clone(), + http_stats_repository: app_container.http_stats_repository.clone(), + udp_stats_repository: app_container.udp_stats_repository.clone(), } } } diff --git a/src/packages/mod.rs b/src/packages/mod.rs index 9e0bbec90..dcf4cf428 100644 --- a/src/packages/mod.rs +++ b/src/packages/mod.rs @@ -3,4 +3,5 @@ //! It will be moved to the directory `packages`. pub mod http_tracker_core; pub mod statistics; +pub mod tracker_api_core; pub mod udp_tracker_core; diff --git a/src/packages/tracker_api_core/mod.rs b/src/packages/tracker_api_core/mod.rs new file mode 100644 index 000000000..3449ec7b4 --- /dev/null +++ b/src/packages/tracker_api_core/mod.rs @@ -0,0 +1 @@ +pub mod statistics; diff --git a/src/packages/tracker_api_core/statistics/metrics.rs b/src/packages/tracker_api_core/statistics/metrics.rs new file mode 100644 index 000000000..40262efd6 --- /dev/null +++ b/src/packages/tracker_api_core/statistics/metrics.rs @@ -0,0 +1,87 @@ +/// Metrics collected by the tracker. +/// +/// - Number of connections handled +/// - Number of `announce` requests handled +/// - Number of `scrape` request handled +/// +/// These metrics are collected for each connection type: UDP and HTTP +/// and also for each IP version used by the peers: IPv4 and IPv6. +#[derive(Debug, PartialEq, Default)] +pub struct Metrics { + /// Total number of TCP (HTTP tracker) connections from IPv4 peers. + /// Since the HTTP tracker spec does not require a handshake, this metric + /// increases for every HTTP request. + pub tcp4_connections_handled: u64, + + /// Total number of TCP (HTTP tracker) `announce` requests from IPv4 peers. + pub tcp4_announces_handled: u64, + + /// Total number of TCP (HTTP tracker) `scrape` requests from IPv4 peers. + pub tcp4_scrapes_handled: u64, + + /// Total number of TCP (HTTP tracker) connections from IPv6 peers. + pub tcp6_connections_handled: u64, + + /// Total number of TCP (HTTP tracker) `announce` requests from IPv6 peers. + pub tcp6_announces_handled: u64, + + /// Total number of TCP (HTTP tracker) `scrape` requests from IPv6 peers. + pub tcp6_scrapes_handled: u64, + + // UDP + /// Total number of UDP (UDP tracker) requests aborted. + pub udp_requests_aborted: u64, + + /// Total number of UDP (UDP tracker) requests banned. + pub udp_requests_banned: u64, + + /// Total number of banned IPs. + pub udp_banned_ips_total: u64, + + /// Average rounded time spent processing UDP connect requests. + pub udp_avg_connect_processing_time_ns: u64, + + /// Average rounded time spent processing UDP announce requests. + pub udp_avg_announce_processing_time_ns: u64, + + /// Average rounded time spent processing UDP scrape requests. + pub udp_avg_scrape_processing_time_ns: u64, + + // UDPv4 + /// Total number of UDP (UDP tracker) requests from IPv4 peers. + pub udp4_requests: u64, + + /// Total number of UDP (UDP tracker) connections from IPv4 peers. + pub udp4_connections_handled: u64, + + /// Total number of UDP (UDP tracker) `announce` requests from IPv4 peers. + pub udp4_announces_handled: u64, + + /// Total number of UDP (UDP tracker) `scrape` requests from IPv4 peers. + pub udp4_scrapes_handled: u64, + + /// Total number of UDP (UDP tracker) responses from IPv4 peers. + pub udp4_responses: u64, + + /// Total number of UDP (UDP tracker) `error` requests from IPv4 peers. + pub udp4_errors_handled: u64, + + // UDPv6 + /// Total number of UDP (UDP tracker) requests from IPv6 peers. + pub udp6_requests: u64, + + /// Total number of UDP (UDP tracker) `connection` requests from IPv6 peers. + pub udp6_connections_handled: u64, + + /// Total number of UDP (UDP tracker) `announce` requests from IPv6 peers. + pub udp6_announces_handled: u64, + + /// Total number of UDP (UDP tracker) `scrape` requests from IPv6 peers. + pub udp6_scrapes_handled: u64, + + /// Total number of UDP (UDP tracker) responses from IPv6 peers. + pub udp6_responses: u64, + + /// Total number of UDP (UDP tracker) `error` requests from IPv6 peers. + pub udp6_errors_handled: u64, +} diff --git a/src/packages/tracker_api_core/statistics/mod.rs b/src/packages/tracker_api_core/statistics/mod.rs new file mode 100644 index 000000000..a3c8a4b0e --- /dev/null +++ b/src/packages/tracker_api_core/statistics/mod.rs @@ -0,0 +1,2 @@ +pub mod metrics; +pub mod services; diff --git a/src/packages/tracker_api_core/statistics/services.rs b/src/packages/tracker_api_core/statistics/services.rs new file mode 100644 index 000000000..bb8e71ab8 --- /dev/null +++ b/src/packages/tracker_api_core/statistics/services.rs @@ -0,0 +1,127 @@ +use std::sync::Arc; + +use bittorrent_tracker_core::torrent::repository::in_memory::InMemoryTorrentRepository; +use packages::tracker_api_core::statistics::metrics::Metrics; +use tokio::sync::RwLock; +use torrust_tracker_primitives::torrent_metrics::TorrentsMetrics; + +use crate::packages::{self, http_tracker_core, udp_tracker_core}; +use crate::servers::udp::server::banning::BanService; + +/// All the metrics collected by the tracker. +#[derive(Debug, PartialEq)] +pub struct TrackerMetrics { + /// Domain level metrics. + /// + /// General metrics for all torrents (number of seeders, leechers, etcetera) + pub torrents_metrics: TorrentsMetrics, + + /// Application level metrics. Usage statistics/metrics. + /// + /// Metrics about how the tracker is been used (number of udp announce requests, number of http scrape requests, etcetera) + pub protocol_metrics: Metrics, +} + +/// It returns all the [`TrackerMetrics`] +pub async fn get_metrics( + in_memory_torrent_repository: Arc, + ban_service: Arc>, + http_stats_repository: Arc, + udp_stats_repository: Arc, +) -> TrackerMetrics { + let torrents_metrics = in_memory_torrent_repository.get_torrents_metrics(); + let udp_banned_ips_total = ban_service.read().await.get_banned_ips_total(); + let http_stats = http_stats_repository.get_stats().await; + let udp_stats = udp_stats_repository.get_stats().await; + + TrackerMetrics { + torrents_metrics, + protocol_metrics: Metrics { + // TCPv4 + tcp4_connections_handled: http_stats.tcp4_connections_handled, + tcp4_announces_handled: http_stats.tcp4_announces_handled, + tcp4_scrapes_handled: http_stats.tcp4_scrapes_handled, + // TCPv6 + tcp6_connections_handled: http_stats.tcp6_connections_handled, + tcp6_announces_handled: http_stats.tcp6_announces_handled, + tcp6_scrapes_handled: http_stats.tcp6_scrapes_handled, + // UDP + udp_requests_aborted: udp_stats.udp_requests_aborted, + udp_requests_banned: udp_stats.udp_requests_banned, + udp_banned_ips_total: udp_banned_ips_total as u64, + udp_avg_connect_processing_time_ns: udp_stats.udp_avg_connect_processing_time_ns, + udp_avg_announce_processing_time_ns: udp_stats.udp_avg_announce_processing_time_ns, + udp_avg_scrape_processing_time_ns: udp_stats.udp_avg_scrape_processing_time_ns, + // UDPv4 + udp4_requests: udp_stats.udp4_requests, + udp4_connections_handled: udp_stats.udp4_connections_handled, + udp4_announces_handled: udp_stats.udp4_announces_handled, + udp4_scrapes_handled: udp_stats.udp4_scrapes_handled, + udp4_responses: udp_stats.udp4_responses, + udp4_errors_handled: udp_stats.udp4_errors_handled, + // UDPv6 + udp6_requests: udp_stats.udp6_requests, + udp6_connections_handled: udp_stats.udp6_connections_handled, + udp6_announces_handled: udp_stats.udp6_announces_handled, + udp6_scrapes_handled: udp_stats.udp6_scrapes_handled, + udp6_responses: udp_stats.udp6_responses, + udp6_errors_handled: udp_stats.udp6_errors_handled, + }, + } +} + +#[cfg(test)] +mod tests { + use std::sync::Arc; + + use bittorrent_tracker_core::torrent::repository::in_memory::InMemoryTorrentRepository; + use bittorrent_tracker_core::{self}; + use tokio::sync::RwLock; + use torrust_tracker_configuration::Configuration; + use torrust_tracker_primitives::torrent_metrics::TorrentsMetrics; + use torrust_tracker_test_helpers::configuration; + + use crate::packages::tracker_api_core::statistics::metrics::Metrics; + use crate::packages::tracker_api_core::statistics::services::{get_metrics, TrackerMetrics}; + use crate::packages::{http_tracker_core, udp_tracker_core}; + use crate::servers::udp::server::banning::BanService; + use crate::servers::udp::server::launcher::MAX_CONNECTION_ID_ERRORS_PER_IP; + + pub fn tracker_configuration() -> Configuration { + configuration::ephemeral() + } + + #[tokio::test] + async fn the_statistics_service_should_return_the_tracker_metrics() { + let config = tracker_configuration(); + + let in_memory_torrent_repository = Arc::new(InMemoryTorrentRepository::default()); + let ban_service = Arc::new(RwLock::new(BanService::new(MAX_CONNECTION_ID_ERRORS_PER_IP))); + + // HTTP stats + let (_http_stats_event_sender, http_stats_repository) = + http_tracker_core::statistics::setup::factory(config.core.tracker_usage_statistics); + let http_stats_repository = Arc::new(http_stats_repository); + + // UDP stats + let (_udp_stats_event_sender, udp_stats_repository) = + udp_tracker_core::statistics::setup::factory(config.core.tracker_usage_statistics); + let udp_stats_repository = Arc::new(udp_stats_repository); + + let tracker_metrics = get_metrics( + in_memory_torrent_repository.clone(), + ban_service.clone(), + http_stats_repository.clone(), + udp_stats_repository.clone(), + ) + .await; + + assert_eq!( + tracker_metrics, + TrackerMetrics { + torrents_metrics: TorrentsMetrics::default(), + protocol_metrics: Metrics::default(), + } + ); + } +} diff --git a/src/servers/apis/v1/context/stats/handlers.rs b/src/servers/apis/v1/context/stats/handlers.rs index ffd4f1787..820f39909 100644 --- a/src/servers/apis/v1/context/stats/handlers.rs +++ b/src/servers/apis/v1/context/stats/handlers.rs @@ -6,13 +6,12 @@ use axum::extract::State; use axum::response::Response; use axum_extra::extract::Query; use bittorrent_tracker_core::torrent::repository::in_memory::InMemoryTorrentRepository; -use packages::statistics::repository::Repository; use serde::Deserialize; use tokio::sync::RwLock; use super::responses::{metrics_response, stats_response}; -use crate::packages; -use crate::packages::statistics::services::get_metrics; +use crate::packages::tracker_api_core::statistics::services::get_metrics; +use crate::packages::{http_tracker_core, udp_tracker_core}; use crate::servers::udp::server::banning::BanService; #[derive(Deserialize, Debug, Default)] @@ -41,10 +40,15 @@ pub struct QueryParams { /// for more information about this endpoint. #[allow(clippy::type_complexity)] pub async fn get_stats_handler( - State(state): State<(Arc, Arc>, Arc)>, + State(state): State<( + Arc, + Arc>, + Arc, + Arc, + )>, params: Query, ) -> Response { - let metrics = get_metrics(state.0.clone(), state.1.clone(), state.2.clone()).await; + let metrics = get_metrics(state.0.clone(), state.1.clone(), state.2.clone(), state.3.clone()).await; match params.0.format { Some(format) => match format { diff --git a/src/servers/apis/v1/context/stats/resources.rs b/src/servers/apis/v1/context/stats/resources.rs index 5900e293a..8477ca5cb 100644 --- a/src/servers/apis/v1/context/stats/resources.rs +++ b/src/servers/apis/v1/context/stats/resources.rs @@ -2,7 +2,7 @@ //! API context. use serde::{Deserialize, Serialize}; -use crate::packages::statistics::services::TrackerMetrics; +use crate::packages::tracker_api_core::statistics::services::TrackerMetrics; /// It contains all the statistics generated by the tracker. #[derive(Serialize, Deserialize, Debug, PartialEq, Eq)] @@ -118,11 +118,11 @@ impl From for Stats { #[cfg(test)] mod tests { - use packages::statistics::metrics::Metrics; + use packages::tracker_api_core::statistics::metrics::Metrics; use torrust_tracker_primitives::torrent_metrics::TorrentsMetrics; use super::Stats; - use crate::packages::statistics::services::TrackerMetrics; + use crate::packages::tracker_api_core::statistics::services::TrackerMetrics; use crate::packages::{self}; #[test] diff --git a/src/servers/apis/v1/context/stats/responses.rs b/src/servers/apis/v1/context/stats/responses.rs index e3b45a66b..5a71c4235 100644 --- a/src/servers/apis/v1/context/stats/responses.rs +++ b/src/servers/apis/v1/context/stats/responses.rs @@ -3,7 +3,7 @@ use axum::response::{IntoResponse, Json, Response}; use super::resources::Stats; -use crate::packages::statistics::services::TrackerMetrics; +use crate::packages::tracker_api_core::statistics::services::TrackerMetrics; /// `200` response that contains the [`Stats`] resource as json. #[must_use] diff --git a/src/servers/apis/v1/context/stats/routes.rs b/src/servers/apis/v1/context/stats/routes.rs index 4c80f110d..e660005ec 100644 --- a/src/servers/apis/v1/context/stats/routes.rs +++ b/src/servers/apis/v1/context/stats/routes.rs @@ -18,7 +18,8 @@ pub fn add(prefix: &str, router: Router, http_api_container: &Arc { keys_handler: app_container.keys_handler.clone(), whitelist_manager: app_container.whitelist_manager.clone(), ban_service: app_container.ban_service.clone(), - stats_event_sender: app_container.stats_event_sender.clone(), - stats_repository: app_container.stats_repository.clone(), + http_stats_repository: app_container.http_stats_repository.clone(), + udp_stats_repository: app_container.udp_stats_repository.clone(), }); Self { From fd8b57a6792977e823f38f2431ef7ad83794196d Mon Sep 17 00:00:00 2001 From: Jose Celano Date: Fri, 31 Jan 2025 17:02:52 +0000 Subject: [PATCH 198/802] refactor: [#1228] remove deprecated unified HTTP and UDP stats Stats have been split into HTTP and UDP stats. Parallel change, step 4: 1. [x] Start using HTTP Tracker Core Stats 2. [x] Start using UDP Tracker Core Stats 3. [x] Get metrics from HTTP and UDP Tracker Core Stats 4. [x] Remove deprecated unified HTTP and UDP stats. --- src/bootstrap/app.rs | 9 +- src/container.rs | 10 +- .../http_tracker_core/statistics/services.rs | 10 +- src/packages/mod.rs | 1 - src/packages/statistics/event/handler.rs | 262 ------------------ src/packages/statistics/event/listener.rs | 11 - src/packages/statistics/event/mod.rs | 51 ---- src/packages/statistics/event/sender.rs | 29 -- src/packages/statistics/keeper.rs | 77 ----- src/packages/statistics/metrics.rs | 87 ------ src/packages/statistics/mod.rs | 6 - src/packages/statistics/repository.rs | 209 -------------- src/packages/statistics/services.rs | 154 ---------- src/packages/statistics/setup.rs | 54 ---- .../udp_tracker_core/statistics/services.rs | 9 +- src/servers/http/v1/handlers/announce.rs | 25 +- src/servers/http/v1/handlers/scrape.rs | 32 +-- src/servers/http/v1/routes.rs | 4 - src/servers/http/v1/services/announce.rs | 73 +---- src/servers/http/v1/services/scrape.rs | 116 +------- src/servers/udp/handlers.rs | 215 ++------------ src/servers/udp/server/launcher.rs | 26 +- src/servers/udp/server/processor.rs | 32 +-- tests/servers/http/environment.rs | 12 +- tests/servers/http/v1/contract.rs | 16 +- tests/servers/udp/contract.rs | 4 +- tests/servers/udp/environment.rs | 12 +- 27 files changed, 78 insertions(+), 1468 deletions(-) delete mode 100644 src/packages/statistics/event/handler.rs delete mode 100644 src/packages/statistics/event/listener.rs delete mode 100644 src/packages/statistics/event/mod.rs delete mode 100644 src/packages/statistics/event/sender.rs delete mode 100644 src/packages/statistics/keeper.rs delete mode 100644 src/packages/statistics/metrics.rs delete mode 100644 src/packages/statistics/mod.rs delete mode 100644 src/packages/statistics/repository.rs delete mode 100644 src/packages/statistics/services.rs delete mode 100644 src/packages/statistics/setup.rs diff --git a/src/bootstrap/app.rs b/src/bootstrap/app.rs index 550eb44f3..93bbfe290 100644 --- a/src/bootstrap/app.rs +++ b/src/bootstrap/app.rs @@ -26,7 +26,6 @@ use bittorrent_tracker_core::torrent::repository::persisted::DatabasePersistentT use bittorrent_tracker_core::whitelist::authorization::WhitelistAuthorization; use bittorrent_tracker_core::whitelist::repository::in_memory::InMemoryWhitelist; use bittorrent_tracker_core::whitelist::setup::initialize_whitelist_manager; -use packages::statistics; use tokio::sync::RwLock; use torrust_tracker_clock::static_time; use torrust_tracker_configuration::validator::Validator; @@ -34,13 +33,13 @@ use torrust_tracker_configuration::Configuration; use tracing::instrument; use super::config::initialize_configuration; +use crate::bootstrap; use crate::container::AppContainer; use crate::packages::{http_tracker_core, udp_tracker_core}; use crate::servers::udp::server::banning::BanService; use crate::servers::udp::server::launcher::MAX_CONNECTION_ID_ERRORS_PER_IP; use crate::shared::crypto::ephemeral_instance_keys; use crate::shared::crypto::keys::{self, Keeper as _}; -use crate::{bootstrap, packages}; /// It loads the configuration from the environment and builds app container. /// @@ -92,10 +91,6 @@ pub fn initialize_global_services(configuration: &Configuration) { pub fn initialize_app_container(configuration: &Configuration) -> AppContainer { let core_config = Arc::new(configuration.core.clone()); - let (stats_event_sender, stats_repository) = statistics::setup::factory(configuration.core.tracker_usage_statistics); - let stats_event_sender = Arc::new(stats_event_sender); - let stats_repository = Arc::new(stats_repository); - // HTTP stats let (http_stats_event_sender, http_stats_repository) = http_tracker_core::statistics::setup::factory(configuration.core.tracker_usage_statistics); @@ -148,10 +143,8 @@ pub fn initialize_app_container(configuration: &Configuration) -> AppContainer { authentication_service, whitelist_authorization, ban_service, - stats_event_sender, http_stats_event_sender, udp_stats_event_sender, - stats_repository, http_stats_repository, udp_stats_repository, whitelist_manager, diff --git a/src/container.rs b/src/container.rs index ccd85c7b1..51c55e533 100644 --- a/src/container.rs +++ b/src/container.rs @@ -10,12 +10,10 @@ use bittorrent_tracker_core::torrent::repository::in_memory::InMemoryTorrentRepo use bittorrent_tracker_core::torrent::repository::persisted::DatabasePersistentTorrentRepository; use bittorrent_tracker_core::whitelist; use bittorrent_tracker_core::whitelist::manager::WhitelistManager; -use packages::statistics::event::sender::Sender; -use packages::statistics::repository::Repository; use tokio::sync::RwLock; use torrust_tracker_configuration::{Core, HttpApi, HttpTracker, UdpTracker}; -use crate::packages::{self, http_tracker_core, udp_tracker_core}; +use crate::packages::{http_tracker_core, udp_tracker_core}; use crate::servers::udp::server::banning::BanService; pub struct AppContainer { @@ -27,10 +25,8 @@ pub struct AppContainer { pub authentication_service: Arc, pub whitelist_authorization: Arc, pub ban_service: Arc>, - pub stats_event_sender: Arc>>, pub http_stats_event_sender: Arc>>, pub udp_stats_event_sender: Arc>>, - pub stats_repository: Arc, pub http_stats_repository: Arc, pub udp_stats_repository: Arc, pub whitelist_manager: Arc, @@ -45,7 +41,6 @@ pub struct UdpTrackerContainer { pub announce_handler: Arc, pub scrape_handler: Arc, pub whitelist_authorization: Arc, - pub stats_event_sender: Arc>>, pub udp_stats_event_sender: Arc>>, pub ban_service: Arc>, } @@ -59,7 +54,6 @@ impl UdpTrackerContainer { announce_handler: app_container.announce_handler.clone(), scrape_handler: app_container.scrape_handler.clone(), whitelist_authorization: app_container.whitelist_authorization.clone(), - stats_event_sender: app_container.stats_event_sender.clone(), udp_stats_event_sender: app_container.udp_stats_event_sender.clone(), ban_service: app_container.ban_service.clone(), } @@ -72,7 +66,6 @@ pub struct HttpTrackerContainer { pub announce_handler: Arc, pub scrape_handler: Arc, pub whitelist_authorization: Arc, - pub stats_event_sender: Arc>>, pub http_stats_event_sender: Arc>>, pub authentication_service: Arc, } @@ -86,7 +79,6 @@ impl HttpTrackerContainer { announce_handler: app_container.announce_handler.clone(), scrape_handler: app_container.scrape_handler.clone(), whitelist_authorization: app_container.whitelist_authorization.clone(), - stats_event_sender: app_container.stats_event_sender.clone(), http_stats_event_sender: app_container.http_stats_event_sender.clone(), authentication_service: app_container.authentication_service.clone(), } diff --git a/src/packages/http_tracker_core/statistics/services.rs b/src/packages/http_tracker_core/statistics/services.rs index 11e3a70c4..51065bf63 100644 --- a/src/packages/http_tracker_core/statistics/services.rs +++ b/src/packages/http_tracker_core/statistics/services.rs @@ -76,8 +76,8 @@ mod tests { use torrust_tracker_primitives::torrent_metrics::TorrentsMetrics; use torrust_tracker_test_helpers::configuration; - use crate::packages::http_tracker_core::statistics; use crate::packages::http_tracker_core::statistics::services::{get_metrics, TrackerMetrics}; + use crate::packages::http_tracker_core::{self, statistics}; pub fn tracker_configuration() -> Configuration { configuration::ephemeral() @@ -88,10 +88,12 @@ mod tests { let config = tracker_configuration(); let in_memory_torrent_repository = Arc::new(InMemoryTorrentRepository::default()); - let (_stats_event_sender, stats_repository) = statistics::setup::factory(config.core.tracker_usage_statistics); - let stats_repository = Arc::new(stats_repository); - let tracker_metrics = get_metrics(in_memory_torrent_repository.clone(), stats_repository.clone()).await; + let (_http_stats_event_sender, http_stats_repository) = + http_tracker_core::statistics::setup::factory(config.core.tracker_usage_statistics); + let http_stats_repository = Arc::new(http_stats_repository); + + let tracker_metrics = get_metrics(in_memory_torrent_repository.clone(), http_stats_repository.clone()).await; assert_eq!( tracker_metrics, diff --git a/src/packages/mod.rs b/src/packages/mod.rs index dcf4cf428..453c3d533 100644 --- a/src/packages/mod.rs +++ b/src/packages/mod.rs @@ -2,6 +2,5 @@ //! //! It will be moved to the directory `packages`. pub mod http_tracker_core; -pub mod statistics; pub mod tracker_api_core; pub mod udp_tracker_core; diff --git a/src/packages/statistics/event/handler.rs b/src/packages/statistics/event/handler.rs deleted file mode 100644 index 99339041a..000000000 --- a/src/packages/statistics/event/handler.rs +++ /dev/null @@ -1,262 +0,0 @@ -use crate::packages::statistics::event::{Event, UdpResponseKind}; -use crate::packages::statistics::repository::Repository; - -pub async fn handle_event(event: Event, stats_repository: &Repository) { - match event { - // TCP4 - Event::Tcp4Announce => { - stats_repository.increase_tcp4_announces().await; - stats_repository.increase_tcp4_connections().await; - } - Event::Tcp4Scrape => { - stats_repository.increase_tcp4_scrapes().await; - stats_repository.increase_tcp4_connections().await; - } - - // TCP6 - Event::Tcp6Announce => { - stats_repository.increase_tcp6_announces().await; - stats_repository.increase_tcp6_connections().await; - } - Event::Tcp6Scrape => { - stats_repository.increase_tcp6_scrapes().await; - stats_repository.increase_tcp6_connections().await; - } - - // UDP - Event::UdpRequestAborted => { - stats_repository.increase_udp_requests_aborted().await; - } - Event::UdpRequestBanned => { - stats_repository.increase_udp_requests_banned().await; - } - - // UDP4 - Event::Udp4Request => { - stats_repository.increase_udp4_requests().await; - } - Event::Udp4Connect => { - stats_repository.increase_udp4_connections().await; - } - Event::Udp4Announce => { - stats_repository.increase_udp4_announces().await; - } - Event::Udp4Scrape => { - stats_repository.increase_udp4_scrapes().await; - } - Event::Udp4Response { - kind, - req_processing_time, - } => { - stats_repository.increase_udp4_responses().await; - - match kind { - UdpResponseKind::Connect => { - stats_repository - .recalculate_udp_avg_connect_processing_time_ns(req_processing_time) - .await; - } - UdpResponseKind::Announce => { - stats_repository - .recalculate_udp_avg_announce_processing_time_ns(req_processing_time) - .await; - } - UdpResponseKind::Scrape => { - stats_repository - .recalculate_udp_avg_scrape_processing_time_ns(req_processing_time) - .await; - } - UdpResponseKind::Error => {} - } - } - Event::Udp4Error => { - stats_repository.increase_udp4_errors().await; - } - - // UDP6 - Event::Udp6Request => { - stats_repository.increase_udp6_requests().await; - } - Event::Udp6Connect => { - stats_repository.increase_udp6_connections().await; - } - Event::Udp6Announce => { - stats_repository.increase_udp6_announces().await; - } - Event::Udp6Scrape => { - stats_repository.increase_udp6_scrapes().await; - } - Event::Udp6Response { - kind: _, - req_processing_time: _, - } => { - stats_repository.increase_udp6_responses().await; - } - Event::Udp6Error => { - stats_repository.increase_udp6_errors().await; - } - } - - tracing::debug!("stats: {:?}", stats_repository.get_stats().await); -} - -#[cfg(test)] -mod tests { - use crate::packages::statistics::event::handler::handle_event; - use crate::packages::statistics::event::Event; - use crate::packages::statistics::repository::Repository; - - #[tokio::test] - async fn should_increase_the_tcp4_announces_counter_when_it_receives_a_tcp4_announce_event() { - let stats_repository = Repository::new(); - - handle_event(Event::Tcp4Announce, &stats_repository).await; - - let stats = stats_repository.get_stats().await; - - assert_eq!(stats.tcp4_announces_handled, 1); - } - - #[tokio::test] - async fn should_increase_the_tcp4_connections_counter_when_it_receives_a_tcp4_announce_event() { - let stats_repository = Repository::new(); - - handle_event(Event::Tcp4Announce, &stats_repository).await; - - let stats = stats_repository.get_stats().await; - - assert_eq!(stats.tcp4_connections_handled, 1); - } - - #[tokio::test] - async fn should_increase_the_tcp4_scrapes_counter_when_it_receives_a_tcp4_scrape_event() { - let stats_repository = Repository::new(); - - handle_event(Event::Tcp4Scrape, &stats_repository).await; - - let stats = stats_repository.get_stats().await; - - assert_eq!(stats.tcp4_scrapes_handled, 1); - } - - #[tokio::test] - async fn should_increase_the_tcp4_connections_counter_when_it_receives_a_tcp4_scrape_event() { - let stats_repository = Repository::new(); - - handle_event(Event::Tcp4Scrape, &stats_repository).await; - - let stats = stats_repository.get_stats().await; - - assert_eq!(stats.tcp4_connections_handled, 1); - } - - #[tokio::test] - async fn should_increase_the_tcp6_announces_counter_when_it_receives_a_tcp6_announce_event() { - let stats_repository = Repository::new(); - - handle_event(Event::Tcp6Announce, &stats_repository).await; - - let stats = stats_repository.get_stats().await; - - assert_eq!(stats.tcp6_announces_handled, 1); - } - - #[tokio::test] - async fn should_increase_the_tcp6_connections_counter_when_it_receives_a_tcp6_announce_event() { - let stats_repository = Repository::new(); - - handle_event(Event::Tcp6Announce, &stats_repository).await; - - let stats = stats_repository.get_stats().await; - - assert_eq!(stats.tcp6_connections_handled, 1); - } - - #[tokio::test] - async fn should_increase_the_tcp6_scrapes_counter_when_it_receives_a_tcp6_scrape_event() { - let stats_repository = Repository::new(); - - handle_event(Event::Tcp6Scrape, &stats_repository).await; - - let stats = stats_repository.get_stats().await; - - assert_eq!(stats.tcp6_scrapes_handled, 1); - } - - #[tokio::test] - async fn should_increase_the_tcp6_connections_counter_when_it_receives_a_tcp6_scrape_event() { - let stats_repository = Repository::new(); - - handle_event(Event::Tcp6Scrape, &stats_repository).await; - - let stats = stats_repository.get_stats().await; - - assert_eq!(stats.tcp6_connections_handled, 1); - } - - #[tokio::test] - async fn should_increase_the_udp4_connections_counter_when_it_receives_a_udp4_connect_event() { - let stats_repository = Repository::new(); - - handle_event(Event::Udp4Connect, &stats_repository).await; - - let stats = stats_repository.get_stats().await; - - assert_eq!(stats.udp4_connections_handled, 1); - } - - #[tokio::test] - async fn should_increase_the_udp4_announces_counter_when_it_receives_a_udp4_announce_event() { - let stats_repository = Repository::new(); - - handle_event(Event::Udp4Announce, &stats_repository).await; - - let stats = stats_repository.get_stats().await; - - assert_eq!(stats.udp4_announces_handled, 1); - } - - #[tokio::test] - async fn should_increase_the_udp4_scrapes_counter_when_it_receives_a_udp4_scrape_event() { - let stats_repository = Repository::new(); - - handle_event(Event::Udp4Scrape, &stats_repository).await; - - let stats = stats_repository.get_stats().await; - - assert_eq!(stats.udp4_scrapes_handled, 1); - } - - #[tokio::test] - async fn should_increase_the_udp6_connections_counter_when_it_receives_a_udp6_connect_event() { - let stats_repository = Repository::new(); - - handle_event(Event::Udp6Connect, &stats_repository).await; - - let stats = stats_repository.get_stats().await; - - assert_eq!(stats.udp6_connections_handled, 1); - } - - #[tokio::test] - async fn should_increase_the_udp6_announces_counter_when_it_receives_a_udp6_announce_event() { - let stats_repository = Repository::new(); - - handle_event(Event::Udp6Announce, &stats_repository).await; - - let stats = stats_repository.get_stats().await; - - assert_eq!(stats.udp6_announces_handled, 1); - } - - #[tokio::test] - async fn should_increase_the_udp6_scrapes_counter_when_it_receives_a_udp6_scrape_event() { - let stats_repository = Repository::new(); - - handle_event(Event::Udp6Scrape, &stats_repository).await; - - let stats = stats_repository.get_stats().await; - - assert_eq!(stats.udp6_scrapes_handled, 1); - } -} diff --git a/src/packages/statistics/event/listener.rs b/src/packages/statistics/event/listener.rs deleted file mode 100644 index 009784fba..000000000 --- a/src/packages/statistics/event/listener.rs +++ /dev/null @@ -1,11 +0,0 @@ -use tokio::sync::mpsc; - -use super::handler::handle_event; -use super::Event; -use crate::packages::statistics::repository::Repository; - -pub async fn dispatch_events(mut receiver: mpsc::Receiver, stats_repository: Repository) { - while let Some(event) = receiver.recv().await { - handle_event(event, &stats_repository).await; - } -} diff --git a/src/packages/statistics/event/mod.rs b/src/packages/statistics/event/mod.rs deleted file mode 100644 index 905aa0372..000000000 --- a/src/packages/statistics/event/mod.rs +++ /dev/null @@ -1,51 +0,0 @@ -use std::time::Duration; - -pub mod handler; -pub mod listener; -pub mod sender; - -/// An statistics event. It is used to collect tracker metrics. -/// -/// - `Tcp` prefix means the event was triggered by the HTTP tracker -/// - `Udp` prefix means the event was triggered by the UDP tracker -/// - `4` or `6` prefixes means the IP version used by the peer -/// - Finally the event suffix is the type of request: `announce`, `scrape` or `connection` -/// -/// > NOTE: HTTP trackers do not use `connection` requests. -#[derive(Debug, PartialEq, Eq)] -pub enum Event { - // code-review: consider one single event for request type with data: Event::Announce { scheme: HTTPorUDP, ip_version: V4orV6 } - // Attributes are enums too. - Tcp4Announce, - Tcp4Scrape, - Tcp6Announce, - Tcp6Scrape, - UdpRequestAborted, - UdpRequestBanned, - Udp4Request, - Udp4Connect, - Udp4Announce, - Udp4Scrape, - Udp4Response { - kind: UdpResponseKind, - req_processing_time: Duration, - }, - Udp4Error, - Udp6Request, - Udp6Connect, - Udp6Announce, - Udp6Scrape, - Udp6Response { - kind: UdpResponseKind, - req_processing_time: Duration, - }, - Udp6Error, -} - -#[derive(Debug, PartialEq, Eq)] -pub enum UdpResponseKind { - Connect, - Announce, - Scrape, - Error, -} diff --git a/src/packages/statistics/event/sender.rs b/src/packages/statistics/event/sender.rs deleted file mode 100644 index b9b989053..000000000 --- a/src/packages/statistics/event/sender.rs +++ /dev/null @@ -1,29 +0,0 @@ -use futures::future::BoxFuture; -use futures::FutureExt; -#[cfg(test)] -use mockall::{automock, predicate::str}; -use tokio::sync::mpsc; -use tokio::sync::mpsc::error::SendError; - -use super::Event; - -/// A trait to allow sending statistics events -#[cfg_attr(test, automock)] -pub trait Sender: Sync + Send { - fn send_event(&self, event: Event) -> BoxFuture<'_, Option>>>; -} - -/// An [`statistics::EventSender`](crate::packages::statistics::event::sender::Sender) implementation. -/// -/// It uses a channel sender to send the statistic events. The channel is created by a -/// [`statistics::Keeper`](crate::packages::statistics::keeper::Keeper) -#[allow(clippy::module_name_repetitions)] -pub struct ChannelSender { - pub(crate) sender: mpsc::Sender, -} - -impl Sender for ChannelSender { - fn send_event(&self, event: Event) -> BoxFuture<'_, Option>>> { - async move { Some(self.sender.send(event).await) }.boxed() - } -} diff --git a/src/packages/statistics/keeper.rs b/src/packages/statistics/keeper.rs deleted file mode 100644 index 493e61cb2..000000000 --- a/src/packages/statistics/keeper.rs +++ /dev/null @@ -1,77 +0,0 @@ -use tokio::sync::mpsc; - -use super::event::listener::dispatch_events; -use super::event::sender::{ChannelSender, Sender}; -use super::event::Event; -use super::repository::Repository; - -const CHANNEL_BUFFER_SIZE: usize = 65_535; - -/// The service responsible for keeping tracker metrics (listening to statistics events and handle them). -/// -/// It actively listen to new statistics events. When it receives a new event -/// it accordingly increases the counters. -pub struct Keeper { - pub repository: Repository, -} - -impl Default for Keeper { - fn default() -> Self { - Self::new() - } -} - -impl Keeper { - #[must_use] - pub fn new() -> Self { - Self { - repository: Repository::new(), - } - } - - #[must_use] - pub fn new_active_instance() -> (Box, Repository) { - let mut stats_tracker = Self::new(); - - let stats_event_sender = stats_tracker.run_event_listener(); - - (stats_event_sender, stats_tracker.repository) - } - - pub fn run_event_listener(&mut self) -> Box { - let (sender, receiver) = mpsc::channel::(CHANNEL_BUFFER_SIZE); - - let stats_repository = self.repository.clone(); - - tokio::spawn(async move { dispatch_events(receiver, stats_repository).await }); - - Box::new(ChannelSender { sender }) - } -} - -#[cfg(test)] -mod tests { - use crate::packages::statistics::event::Event; - use crate::packages::statistics::keeper::Keeper; - use crate::packages::statistics::metrics::Metrics; - - #[tokio::test] - async fn should_contain_the_tracker_statistics() { - let stats_tracker = Keeper::new(); - - let stats = stats_tracker.repository.get_stats().await; - - assert_eq!(stats.tcp4_announces_handled, Metrics::default().tcp4_announces_handled); - } - - #[tokio::test] - async fn should_create_an_event_sender_to_send_statistical_events() { - let mut stats_tracker = Keeper::new(); - - let event_sender = stats_tracker.run_event_listener(); - - let result = event_sender.send_event(Event::Udp4Connect).await; - - assert!(result.is_some()); - } -} diff --git a/src/packages/statistics/metrics.rs b/src/packages/statistics/metrics.rs deleted file mode 100644 index 40262efd6..000000000 --- a/src/packages/statistics/metrics.rs +++ /dev/null @@ -1,87 +0,0 @@ -/// Metrics collected by the tracker. -/// -/// - Number of connections handled -/// - Number of `announce` requests handled -/// - Number of `scrape` request handled -/// -/// These metrics are collected for each connection type: UDP and HTTP -/// and also for each IP version used by the peers: IPv4 and IPv6. -#[derive(Debug, PartialEq, Default)] -pub struct Metrics { - /// Total number of TCP (HTTP tracker) connections from IPv4 peers. - /// Since the HTTP tracker spec does not require a handshake, this metric - /// increases for every HTTP request. - pub tcp4_connections_handled: u64, - - /// Total number of TCP (HTTP tracker) `announce` requests from IPv4 peers. - pub tcp4_announces_handled: u64, - - /// Total number of TCP (HTTP tracker) `scrape` requests from IPv4 peers. - pub tcp4_scrapes_handled: u64, - - /// Total number of TCP (HTTP tracker) connections from IPv6 peers. - pub tcp6_connections_handled: u64, - - /// Total number of TCP (HTTP tracker) `announce` requests from IPv6 peers. - pub tcp6_announces_handled: u64, - - /// Total number of TCP (HTTP tracker) `scrape` requests from IPv6 peers. - pub tcp6_scrapes_handled: u64, - - // UDP - /// Total number of UDP (UDP tracker) requests aborted. - pub udp_requests_aborted: u64, - - /// Total number of UDP (UDP tracker) requests banned. - pub udp_requests_banned: u64, - - /// Total number of banned IPs. - pub udp_banned_ips_total: u64, - - /// Average rounded time spent processing UDP connect requests. - pub udp_avg_connect_processing_time_ns: u64, - - /// Average rounded time spent processing UDP announce requests. - pub udp_avg_announce_processing_time_ns: u64, - - /// Average rounded time spent processing UDP scrape requests. - pub udp_avg_scrape_processing_time_ns: u64, - - // UDPv4 - /// Total number of UDP (UDP tracker) requests from IPv4 peers. - pub udp4_requests: u64, - - /// Total number of UDP (UDP tracker) connections from IPv4 peers. - pub udp4_connections_handled: u64, - - /// Total number of UDP (UDP tracker) `announce` requests from IPv4 peers. - pub udp4_announces_handled: u64, - - /// Total number of UDP (UDP tracker) `scrape` requests from IPv4 peers. - pub udp4_scrapes_handled: u64, - - /// Total number of UDP (UDP tracker) responses from IPv4 peers. - pub udp4_responses: u64, - - /// Total number of UDP (UDP tracker) `error` requests from IPv4 peers. - pub udp4_errors_handled: u64, - - // UDPv6 - /// Total number of UDP (UDP tracker) requests from IPv6 peers. - pub udp6_requests: u64, - - /// Total number of UDP (UDP tracker) `connection` requests from IPv6 peers. - pub udp6_connections_handled: u64, - - /// Total number of UDP (UDP tracker) `announce` requests from IPv6 peers. - pub udp6_announces_handled: u64, - - /// Total number of UDP (UDP tracker) `scrape` requests from IPv6 peers. - pub udp6_scrapes_handled: u64, - - /// Total number of UDP (UDP tracker) responses from IPv6 peers. - pub udp6_responses: u64, - - /// Total number of UDP (UDP tracker) `error` requests from IPv6 peers. - pub udp6_errors_handled: u64, -} diff --git a/src/packages/statistics/mod.rs b/src/packages/statistics/mod.rs deleted file mode 100644 index 939a41061..000000000 --- a/src/packages/statistics/mod.rs +++ /dev/null @@ -1,6 +0,0 @@ -pub mod event; -pub mod keeper; -pub mod metrics; -pub mod repository; -pub mod services; -pub mod setup; diff --git a/src/packages/statistics/repository.rs b/src/packages/statistics/repository.rs deleted file mode 100644 index ec5100073..000000000 --- a/src/packages/statistics/repository.rs +++ /dev/null @@ -1,209 +0,0 @@ -use std::sync::Arc; -use std::time::Duration; - -use tokio::sync::{RwLock, RwLockReadGuard}; - -use super::metrics::Metrics; - -/// A repository for the tracker metrics. -#[derive(Clone)] -pub struct Repository { - pub stats: Arc>, -} - -impl Default for Repository { - fn default() -> Self { - Self::new() - } -} - -impl Repository { - #[must_use] - pub fn new() -> Self { - Self { - stats: Arc::new(RwLock::new(Metrics::default())), - } - } - - pub async fn get_stats(&self) -> RwLockReadGuard<'_, Metrics> { - self.stats.read().await - } - - pub async fn increase_tcp4_announces(&self) { - let mut stats_lock = self.stats.write().await; - stats_lock.tcp4_announces_handled += 1; - drop(stats_lock); - } - - pub async fn increase_tcp4_connections(&self) { - let mut stats_lock = self.stats.write().await; - stats_lock.tcp4_connections_handled += 1; - drop(stats_lock); - } - - pub async fn increase_tcp4_scrapes(&self) { - let mut stats_lock = self.stats.write().await; - stats_lock.tcp4_scrapes_handled += 1; - drop(stats_lock); - } - - pub async fn increase_tcp6_announces(&self) { - let mut stats_lock = self.stats.write().await; - stats_lock.tcp6_announces_handled += 1; - drop(stats_lock); - } - - pub async fn increase_tcp6_connections(&self) { - let mut stats_lock = self.stats.write().await; - stats_lock.tcp6_connections_handled += 1; - drop(stats_lock); - } - - pub async fn increase_tcp6_scrapes(&self) { - let mut stats_lock = self.stats.write().await; - stats_lock.tcp6_scrapes_handled += 1; - drop(stats_lock); - } - - pub async fn increase_udp_requests_aborted(&self) { - let mut stats_lock = self.stats.write().await; - stats_lock.udp_requests_aborted += 1; - drop(stats_lock); - } - - pub async fn increase_udp_requests_banned(&self) { - let mut stats_lock = self.stats.write().await; - stats_lock.udp_requests_banned += 1; - drop(stats_lock); - } - - pub async fn increase_udp4_requests(&self) { - let mut stats_lock = self.stats.write().await; - stats_lock.udp4_requests += 1; - drop(stats_lock); - } - - pub async fn increase_udp4_connections(&self) { - let mut stats_lock = self.stats.write().await; - stats_lock.udp4_connections_handled += 1; - drop(stats_lock); - } - - pub async fn increase_udp4_announces(&self) { - let mut stats_lock = self.stats.write().await; - stats_lock.udp4_announces_handled += 1; - drop(stats_lock); - } - - pub async fn increase_udp4_scrapes(&self) { - let mut stats_lock = self.stats.write().await; - stats_lock.udp4_scrapes_handled += 1; - drop(stats_lock); - } - - pub async fn increase_udp4_responses(&self) { - let mut stats_lock = self.stats.write().await; - stats_lock.udp4_responses += 1; - drop(stats_lock); - } - - pub async fn increase_udp4_errors(&self) { - let mut stats_lock = self.stats.write().await; - stats_lock.udp4_errors_handled += 1; - drop(stats_lock); - } - - #[allow(clippy::cast_precision_loss)] - #[allow(clippy::cast_possible_truncation)] - #[allow(clippy::cast_sign_loss)] - pub async fn recalculate_udp_avg_connect_processing_time_ns(&self, req_processing_time: Duration) { - let mut stats_lock = self.stats.write().await; - - let req_processing_time = req_processing_time.as_nanos() as f64; - let udp_connections_handled = (stats_lock.udp4_connections_handled + stats_lock.udp6_connections_handled) as f64; - - let previous_avg = stats_lock.udp_avg_connect_processing_time_ns; - - // Moving average: https://en.wikipedia.org/wiki/Moving_average - let new_avg = previous_avg as f64 + (req_processing_time - previous_avg as f64) / udp_connections_handled; - - stats_lock.udp_avg_connect_processing_time_ns = new_avg.ceil() as u64; - - drop(stats_lock); - } - - #[allow(clippy::cast_precision_loss)] - #[allow(clippy::cast_possible_truncation)] - #[allow(clippy::cast_sign_loss)] - pub async fn recalculate_udp_avg_announce_processing_time_ns(&self, req_processing_time: Duration) { - let mut stats_lock = self.stats.write().await; - - let req_processing_time = req_processing_time.as_nanos() as f64; - - let udp_announces_handled = (stats_lock.udp4_announces_handled + stats_lock.udp6_announces_handled) as f64; - - let previous_avg = stats_lock.udp_avg_announce_processing_time_ns; - - // Moving average: https://en.wikipedia.org/wiki/Moving_average - let new_avg = previous_avg as f64 + (req_processing_time - previous_avg as f64) / udp_announces_handled; - - stats_lock.udp_avg_announce_processing_time_ns = new_avg.ceil() as u64; - - drop(stats_lock); - } - - #[allow(clippy::cast_precision_loss)] - #[allow(clippy::cast_possible_truncation)] - #[allow(clippy::cast_sign_loss)] - pub async fn recalculate_udp_avg_scrape_processing_time_ns(&self, req_processing_time: Duration) { - let mut stats_lock = self.stats.write().await; - - let req_processing_time = req_processing_time.as_nanos() as f64; - let udp_scrapes_handled = (stats_lock.udp4_scrapes_handled + stats_lock.udp6_scrapes_handled) as f64; - - let previous_avg = stats_lock.udp_avg_scrape_processing_time_ns; - - // Moving average: https://en.wikipedia.org/wiki/Moving_average - let new_avg = previous_avg as f64 + (req_processing_time - previous_avg as f64) / udp_scrapes_handled; - - stats_lock.udp_avg_scrape_processing_time_ns = new_avg.ceil() as u64; - - drop(stats_lock); - } - - pub async fn increase_udp6_requests(&self) { - let mut stats_lock = self.stats.write().await; - stats_lock.udp6_requests += 1; - drop(stats_lock); - } - - pub async fn increase_udp6_connections(&self) { - let mut stats_lock = self.stats.write().await; - stats_lock.udp6_connections_handled += 1; - drop(stats_lock); - } - - pub async fn increase_udp6_announces(&self) { - let mut stats_lock = self.stats.write().await; - stats_lock.udp6_announces_handled += 1; - drop(stats_lock); - } - - pub async fn increase_udp6_scrapes(&self) { - let mut stats_lock = self.stats.write().await; - stats_lock.udp6_scrapes_handled += 1; - drop(stats_lock); - } - - pub async fn increase_udp6_responses(&self) { - let mut stats_lock = self.stats.write().await; - stats_lock.udp6_responses += 1; - drop(stats_lock); - } - - pub async fn increase_udp6_errors(&self) { - let mut stats_lock = self.stats.write().await; - stats_lock.udp6_errors_handled += 1; - drop(stats_lock); - } -} diff --git a/src/packages/statistics/services.rs b/src/packages/statistics/services.rs deleted file mode 100644 index 444ba533c..000000000 --- a/src/packages/statistics/services.rs +++ /dev/null @@ -1,154 +0,0 @@ -//! Statistics services. -//! -//! It includes: -//! -//! - A [`factory`](crate::packages::statistics::setup::factory) function to build the structs needed to collect the tracker metrics. -//! - A [`get_metrics`] service to get the tracker [`metrics`](crate::packages::statistics::metrics::Metrics). -//! -//! Tracker metrics are collected using a Publisher-Subscribe pattern. -//! -//! The factory function builds two structs: -//! -//! - An statistics event [`Sender`](crate::packages::statistics::event::sender::Sender) -//! - An statistics [`Repository`] -//! -//! ```text -//! let (stats_event_sender, stats_repository) = factory(tracker_usage_statistics); -//! ``` -//! -//! The statistics repository is responsible for storing the metrics in memory. -//! The statistics event sender allows sending events related to metrics. -//! There is an event listener that is receiving all the events and processing them with an event handler. -//! Then, the event handler updates the metrics depending on the received event. -//! -//! For example, if you send the event [`Event::Udp4Connect`](crate::packages::statistics::event::Event::Udp4Connect): -//! -//! ```text -//! let result = event_sender.send_event(Event::Udp4Connect).await; -//! ``` -//! -//! Eventually the counter for UDP connections from IPv4 peers will be increased. -//! -//! ```rust,no_run -//! pub struct Metrics { -//! // ... -//! pub udp4_connections_handled: u64, // This will be incremented -//! // ... -//! } -//! ``` -use std::sync::Arc; - -use bittorrent_tracker_core::torrent::repository::in_memory::InMemoryTorrentRepository; -use packages::statistics::metrics::Metrics; -use packages::statistics::repository::Repository; -use tokio::sync::RwLock; -use torrust_tracker_primitives::torrent_metrics::TorrentsMetrics; - -use crate::packages; -use crate::servers::udp::server::banning::BanService; - -/// All the metrics collected by the tracker. -#[derive(Debug, PartialEq)] -pub struct TrackerMetrics { - /// Domain level metrics. - /// - /// General metrics for all torrents (number of seeders, leechers, etcetera) - pub torrents_metrics: TorrentsMetrics, - - /// Application level metrics. Usage statistics/metrics. - /// - /// Metrics about how the tracker is been used (number of udp announce requests, number of http scrape requests, etcetera) - pub protocol_metrics: Metrics, -} - -/// It returns all the [`TrackerMetrics`] -pub async fn get_metrics( - in_memory_torrent_repository: Arc, - ban_service: Arc>, - stats_repository: Arc, -) -> TrackerMetrics { - let torrents_metrics = in_memory_torrent_repository.get_torrents_metrics(); - let stats = stats_repository.get_stats().await; - let udp_banned_ips_total = ban_service.read().await.get_banned_ips_total(); - - TrackerMetrics { - torrents_metrics, - protocol_metrics: Metrics { - // TCPv4 - tcp4_connections_handled: stats.tcp4_connections_handled, - tcp4_announces_handled: stats.tcp4_announces_handled, - tcp4_scrapes_handled: stats.tcp4_scrapes_handled, - // TCPv6 - tcp6_connections_handled: stats.tcp6_connections_handled, - tcp6_announces_handled: stats.tcp6_announces_handled, - tcp6_scrapes_handled: stats.tcp6_scrapes_handled, - // UDP - udp_requests_aborted: stats.udp_requests_aborted, - udp_requests_banned: stats.udp_requests_banned, - udp_banned_ips_total: udp_banned_ips_total as u64, - udp_avg_connect_processing_time_ns: stats.udp_avg_connect_processing_time_ns, - udp_avg_announce_processing_time_ns: stats.udp_avg_announce_processing_time_ns, - udp_avg_scrape_processing_time_ns: stats.udp_avg_scrape_processing_time_ns, - // UDPv4 - udp4_requests: stats.udp4_requests, - udp4_connections_handled: stats.udp4_connections_handled, - udp4_announces_handled: stats.udp4_announces_handled, - udp4_scrapes_handled: stats.udp4_scrapes_handled, - udp4_responses: stats.udp4_responses, - udp4_errors_handled: stats.udp4_errors_handled, - // UDPv6 - udp6_requests: stats.udp6_requests, - udp6_connections_handled: stats.udp6_connections_handled, - udp6_announces_handled: stats.udp6_announces_handled, - udp6_scrapes_handled: stats.udp6_scrapes_handled, - udp6_responses: stats.udp6_responses, - udp6_errors_handled: stats.udp6_errors_handled, - }, - } -} - -#[cfg(test)] -mod tests { - use std::sync::Arc; - - use bittorrent_tracker_core::torrent::repository::in_memory::InMemoryTorrentRepository; - use bittorrent_tracker_core::{self}; - use tokio::sync::RwLock; - use torrust_tracker_configuration::Configuration; - use torrust_tracker_primitives::torrent_metrics::TorrentsMetrics; - use torrust_tracker_test_helpers::configuration; - - use crate::packages::statistics; - use crate::packages::statistics::services::{get_metrics, TrackerMetrics}; - use crate::servers::udp::server::banning::BanService; - use crate::servers::udp::server::launcher::MAX_CONNECTION_ID_ERRORS_PER_IP; - - pub fn tracker_configuration() -> Configuration { - configuration::ephemeral() - } - - #[tokio::test] - async fn the_statistics_service_should_return_the_tracker_metrics() { - let config = tracker_configuration(); - - let in_memory_torrent_repository = Arc::new(InMemoryTorrentRepository::default()); - let (_stats_event_sender, stats_repository) = statistics::setup::factory(config.core.tracker_usage_statistics); - let stats_repository = Arc::new(stats_repository); - let ban_service = Arc::new(RwLock::new(BanService::new(MAX_CONNECTION_ID_ERRORS_PER_IP))); - - let tracker_metrics = get_metrics( - in_memory_torrent_repository.clone(), - ban_service.clone(), - stats_repository.clone(), - ) - .await; - - assert_eq!( - tracker_metrics, - TrackerMetrics { - torrents_metrics: TorrentsMetrics::default(), - protocol_metrics: statistics::metrics::Metrics::default(), - } - ); - } -} diff --git a/src/packages/statistics/setup.rs b/src/packages/statistics/setup.rs deleted file mode 100644 index 2a187dcf0..000000000 --- a/src/packages/statistics/setup.rs +++ /dev/null @@ -1,54 +0,0 @@ -//! Setup for the tracker statistics. -//! -//! The [`factory`] function builds the structs needed for handling the tracker metrics. -use crate::packages::statistics; - -/// It builds the structs needed for handling the tracker metrics. -/// -/// It returns: -/// -/// - An statistics event [`Sender`](crate::packages::statistics::event::sender::Sender) that allows you to send events related to statistics. -/// - An statistics [`Repository`](crate::packages::statistics::repository::Repository) which is an in-memory repository for the tracker metrics. -/// -/// When the input argument `tracker_usage_statistics`is false the setup does not run the event listeners, consequently the statistics -/// events are sent are received but not dispatched to the handler. -#[must_use] -pub fn factory( - tracker_usage_statistics: bool, -) -> ( - Option>, - statistics::repository::Repository, -) { - let mut stats_event_sender = None; - - let mut stats_tracker = statistics::keeper::Keeper::new(); - - if tracker_usage_statistics { - stats_event_sender = Some(stats_tracker.run_event_listener()); - } - - (stats_event_sender, stats_tracker.repository) -} - -#[cfg(test)] -mod test { - use super::factory; - - #[tokio::test] - async fn should_not_send_any_event_when_statistics_are_disabled() { - let tracker_usage_statistics = false; - - let (stats_event_sender, _stats_repository) = factory(tracker_usage_statistics); - - assert!(stats_event_sender.is_none()); - } - - #[tokio::test] - async fn should_send_events_when_statistics_are_enabled() { - let tracker_usage_statistics = true; - - let (stats_event_sender, _stats_repository) = factory(tracker_usage_statistics); - - assert!(stats_event_sender.is_some()); - } -} diff --git a/src/packages/udp_tracker_core/statistics/services.rs b/src/packages/udp_tracker_core/statistics/services.rs index 85ca08e54..80e1d8fb5 100644 --- a/src/packages/udp_tracker_core/statistics/services.rs +++ b/src/packages/udp_tracker_core/statistics/services.rs @@ -110,6 +110,7 @@ mod tests { use torrust_tracker_primitives::torrent_metrics::TorrentsMetrics; use torrust_tracker_test_helpers::configuration; + use crate::packages::udp_tracker_core; use crate::packages::udp_tracker_core::statistics; use crate::packages::udp_tracker_core::statistics::services::{get_metrics, TrackerMetrics}; use crate::servers::udp::server::banning::BanService; @@ -124,14 +125,16 @@ mod tests { let config = tracker_configuration(); let in_memory_torrent_repository = Arc::new(InMemoryTorrentRepository::default()); - let (_stats_event_sender, stats_repository) = statistics::setup::factory(config.core.tracker_usage_statistics); - let stats_repository = Arc::new(stats_repository); let ban_service = Arc::new(RwLock::new(BanService::new(MAX_CONNECTION_ID_ERRORS_PER_IP))); + let (_udp_stats_event_sender, udp_stats_repository) = + udp_tracker_core::statistics::setup::factory(config.core.tracker_usage_statistics); + let udp_stats_repository = Arc::new(udp_stats_repository); + let tracker_metrics = get_metrics( in_memory_torrent_repository.clone(), ban_service.clone(), - stats_repository.clone(), + udp_stats_repository.clone(), ) .await; diff --git a/src/servers/http/v1/handlers/announce.rs b/src/servers/http/v1/handlers/announce.rs index 594a11ea1..a6671e14a 100644 --- a/src/servers/http/v1/handlers/announce.rs +++ b/src/servers/http/v1/handlers/announce.rs @@ -21,7 +21,6 @@ use bittorrent_tracker_core::authentication::service::AuthenticationService; use bittorrent_tracker_core::authentication::Key; use bittorrent_tracker_core::whitelist; use hyper::StatusCode; -use packages::statistics::event::sender::Sender; use torrust_tracker_clock::clock::Time; use torrust_tracker_configuration::Core; use torrust_tracker_primitives::core::AnnounceData; @@ -34,7 +33,7 @@ use crate::servers::http::v1::extractors::authentication_key::Extract as Extract use crate::servers::http::v1::extractors::client_ip_sources::Extract as ExtractClientIpSources; use crate::servers::http::v1::handlers::common::auth; use crate::servers::http::v1::services::{self}; -use crate::{packages, CurrentClock}; +use crate::CurrentClock; /// It handles the `announce` request when the HTTP tracker does not require /// authentication (no PATH `key` parameter required). @@ -46,7 +45,6 @@ pub async fn handle_without_key( Arc, Arc, Arc, - Arc>>, Arc>>, )>, ExtractRequest(announce_request): ExtractRequest, @@ -60,7 +58,6 @@ pub async fn handle_without_key( &state.2, &state.3, &state.4, - &state.5, &announce_request, &client_ip_sources, None, @@ -78,7 +75,6 @@ pub async fn handle_with_key( Arc, Arc, Arc, - Arc>>, Arc>>, )>, ExtractRequest(announce_request): ExtractRequest, @@ -93,7 +89,6 @@ pub async fn handle_with_key( &state.2, &state.3, &state.4, - &state.5, &announce_request, &client_ip_sources, Some(key), @@ -111,7 +106,6 @@ async fn handle( announce_handler: &Arc, authentication_service: &Arc, whitelist_authorization: &Arc, - opt_stats_event_sender: &Arc>>, opt_http_stats_event_sender: &Arc>>, announce_request: &Announce, client_ip_sources: &ClientIpSources, @@ -122,7 +116,6 @@ async fn handle( announce_handler, authentication_service, whitelist_authorization, - opt_stats_event_sender, opt_http_stats_event_sender, announce_request, client_ip_sources, @@ -148,7 +141,6 @@ async fn handle_announce( announce_handler: &Arc, authentication_service: &Arc, whitelist_authorization: &Arc, - opt_stats_event_sender: &Arc>>, opt_http_stats_event_sender: &Arc>>, announce_request: &Announce, client_ip_sources: &ClientIpSources, @@ -188,7 +180,6 @@ async fn handle_announce( let announce_data = services::announce::invoke( announce_handler.clone(), - opt_stats_event_sender.clone(), opt_http_stats_event_sender.clone(), announce_request.info_hash, &mut peer, @@ -269,17 +260,14 @@ mod tests { use bittorrent_tracker_core::torrent::repository::persisted::DatabasePersistentTorrentRepository; use bittorrent_tracker_core::whitelist::authorization::WhitelistAuthorization; use bittorrent_tracker_core::whitelist::repository::in_memory::InMemoryWhitelist; - use packages::statistics; - use packages::statistics::event::sender::Sender; use torrust_tracker_configuration::{Configuration, Core}; use torrust_tracker_test_helpers::configuration; - use crate::packages::{self, http_tracker_core}; + use crate::packages::http_tracker_core; struct CoreTrackerServices { pub core_config: Arc, pub announce_handler: Arc, - pub stats_event_sender: Arc>>, pub whitelist_authorization: Arc, pub authentication_service: Arc, } @@ -319,9 +307,6 @@ mod tests { &db_torrent_repository, )); - let (stats_event_sender, _stats_repository) = statistics::setup::factory(config.core.tracker_usage_statistics); - let stats_event_sender = Arc::new(stats_event_sender); - // HTTP stats let (http_stats_event_sender, http_stats_repository) = http_tracker_core::statistics::setup::factory(config.core.tracker_usage_statistics); @@ -332,7 +317,6 @@ mod tests { CoreTrackerServices { core_config, announce_handler, - stats_event_sender, whitelist_authorization, authentication_service, }, @@ -389,7 +373,6 @@ mod tests { &core_tracker_services.announce_handler, &core_tracker_services.authentication_service, &core_tracker_services.whitelist_authorization, - &core_tracker_services.stats_event_sender, &http_core_tracker_services.http_stats_event_sender, &sample_announce_request(), &sample_client_ip_sources(), @@ -417,7 +400,6 @@ mod tests { &core_tracker_services.announce_handler, &core_tracker_services.authentication_service, &core_tracker_services.whitelist_authorization, - &core_tracker_services.stats_event_sender, &http_core_tracker_services.http_stats_event_sender, &sample_announce_request(), &sample_client_ip_sources(), @@ -447,7 +429,6 @@ mod tests { &core_tracker_services.announce_handler, &core_tracker_services.authentication_service, &core_tracker_services.whitelist_authorization, - &core_tracker_services.stats_event_sender, &http_core_tracker_services.http_stats_event_sender, &announce_request, &sample_client_ip_sources(), @@ -488,7 +469,6 @@ mod tests { &core_tracker_services.announce_handler, &core_tracker_services.authentication_service, &core_tracker_services.whitelist_authorization, - &core_tracker_services.stats_event_sender, &http_core_tracker_services.http_stats_event_sender, &sample_announce_request(), &client_ip_sources, @@ -526,7 +506,6 @@ mod tests { &core_tracker_services.announce_handler, &core_tracker_services.authentication_service, &core_tracker_services.whitelist_authorization, - &core_tracker_services.stats_event_sender, &http_core_tracker_services.http_stats_event_sender, &sample_announce_request(), &client_ip_sources, diff --git a/src/servers/http/v1/handlers/scrape.rs b/src/servers/http/v1/handlers/scrape.rs index d41a3742f..09af385fb 100644 --- a/src/servers/http/v1/handlers/scrape.rs +++ b/src/servers/http/v1/handlers/scrape.rs @@ -16,11 +16,10 @@ use bittorrent_tracker_core::authentication::service::AuthenticationService; use bittorrent_tracker_core::authentication::Key; use bittorrent_tracker_core::scrape_handler::ScrapeHandler; use hyper::StatusCode; -use packages::statistics::event::sender::Sender; use torrust_tracker_configuration::Core; use torrust_tracker_primitives::core::ScrapeData; -use crate::packages::{self, http_tracker_core}; +use crate::packages::http_tracker_core; use crate::servers::http::v1::extractors::authentication_key::Extract as ExtractKey; use crate::servers::http::v1::extractors::client_ip_sources::Extract as ExtractClientIpSources; use crate::servers::http::v1::extractors::scrape_request::ExtractRequest; @@ -35,7 +34,6 @@ pub async fn handle_without_key( Arc, Arc, Arc, - Arc>>, Arc>>, )>, ExtractRequest(scrape_request): ExtractRequest, @@ -48,7 +46,6 @@ pub async fn handle_without_key( &state.1, &state.2, &state.3, - &state.4, &scrape_request, &client_ip_sources, None, @@ -67,7 +64,6 @@ pub async fn handle_with_key( Arc, Arc, Arc, - Arc>>, Arc>>, )>, ExtractRequest(scrape_request): ExtractRequest, @@ -81,7 +77,6 @@ pub async fn handle_with_key( &state.1, &state.2, &state.3, - &state.4, &scrape_request, &client_ip_sources, Some(key), @@ -94,7 +89,6 @@ async fn handle( core_config: &Arc, scrape_handler: &Arc, authentication_service: &Arc, - stats_event_sender: &Arc>>, http_stats_event_sender: &Arc>>, scrape_request: &Scrape, client_ip_sources: &ClientIpSources, @@ -104,7 +98,6 @@ async fn handle( core_config, scrape_handler, authentication_service, - stats_event_sender, http_stats_event_sender, scrape_request, client_ip_sources, @@ -129,7 +122,6 @@ async fn handle_scrape( core_config: &Arc, scrape_handler: &Arc, authentication_service: &Arc, - opt_stats_event_sender: &Arc>>, opt_http_stats_event_sender: &Arc>>, scrape_request: &Scrape, client_ip_sources: &ClientIpSources, @@ -159,20 +151,13 @@ async fn handle_scrape( if return_real_scrape_data { Ok(services::scrape::invoke( scrape_handler, - opt_stats_event_sender, opt_http_stats_event_sender, &scrape_request.info_hashes, &peer_ip, ) .await) } else { - Ok(services::scrape::fake( - opt_stats_event_sender, - opt_http_stats_event_sender, - &scrape_request.info_hashes, - &peer_ip, - ) - .await) + Ok(services::scrape::fake(opt_http_stats_event_sender, &scrape_request.info_hashes, &peer_ip).await) } } @@ -198,16 +183,14 @@ mod tests { use bittorrent_tracker_core::torrent::repository::in_memory::InMemoryTorrentRepository; use bittorrent_tracker_core::whitelist::authorization::WhitelistAuthorization; use bittorrent_tracker_core::whitelist::repository::in_memory::InMemoryWhitelist; - use packages::statistics; use torrust_tracker_configuration::{Configuration, Core}; use torrust_tracker_test_helpers::configuration; - use crate::packages::{self, http_tracker_core}; + use crate::packages::http_tracker_core; struct CoreTrackerServices { pub core_config: Arc, pub scrape_handler: Arc, - pub stats_event_sender: Arc>>, pub authentication_service: Arc, } @@ -240,9 +223,6 @@ mod tests { let in_memory_torrent_repository = Arc::new(InMemoryTorrentRepository::default()); let scrape_handler = Arc::new(ScrapeHandler::new(&whitelist_authorization, &in_memory_torrent_repository)); - let (stats_event_sender, _stats_repository) = statistics::setup::factory(config.core.tracker_usage_statistics); - let stats_event_sender = Arc::new(stats_event_sender); - // HTTP stats let (http_stats_event_sender, _http_stats_repository) = http_tracker_core::statistics::setup::factory(config.core.tracker_usage_statistics); @@ -252,7 +232,6 @@ mod tests { CoreTrackerServices { core_config, scrape_handler, - stats_event_sender, authentication_service, }, CoreHttpTrackerServices { http_stats_event_sender }, @@ -299,7 +278,6 @@ mod tests { &core_tracker_services.core_config, &core_tracker_services.scrape_handler, &core_tracker_services.authentication_service, - &core_tracker_services.stats_event_sender, &core_http_tracker_services.http_stats_event_sender, &scrape_request, &sample_client_ip_sources(), @@ -325,7 +303,6 @@ mod tests { &core_tracker_services.core_config, &core_tracker_services.scrape_handler, &core_tracker_services.authentication_service, - &core_tracker_services.stats_event_sender, &core_http_tracker_services.http_stats_event_sender, &scrape_request, &sample_client_ip_sources(), @@ -357,7 +334,6 @@ mod tests { &core_tracker_services.core_config, &core_tracker_services.scrape_handler, &core_tracker_services.authentication_service, - &core_tracker_services.stats_event_sender, &core_http_tracker_services.http_stats_event_sender, &scrape_request, &sample_client_ip_sources(), @@ -393,7 +369,6 @@ mod tests { &core_tracker_services.core_config, &core_tracker_services.scrape_handler, &core_tracker_services.authentication_service, - &core_tracker_services.stats_event_sender, &core_http_tracker_services.http_stats_event_sender, &sample_scrape_request(), &client_ip_sources, @@ -430,7 +405,6 @@ mod tests { &core_tracker_services.core_config, &core_tracker_services.scrape_handler, &core_tracker_services.authentication_service, - &core_tracker_services.stats_event_sender, &core_http_tracker_services.http_stats_event_sender, &sample_scrape_request(), &client_ip_sources, diff --git a/src/servers/http/v1/routes.rs b/src/servers/http/v1/routes.rs index 7caccb673..73f4e5f29 100644 --- a/src/servers/http/v1/routes.rs +++ b/src/servers/http/v1/routes.rs @@ -43,7 +43,6 @@ pub fn router(http_tracker_container: Arc, server_socket_a http_tracker_container.announce_handler.clone(), http_tracker_container.authentication_service.clone(), http_tracker_container.whitelist_authorization.clone(), - http_tracker_container.stats_event_sender.clone(), http_tracker_container.http_stats_event_sender.clone(), )), ) @@ -54,7 +53,6 @@ pub fn router(http_tracker_container: Arc, server_socket_a http_tracker_container.announce_handler.clone(), http_tracker_container.authentication_service.clone(), http_tracker_container.whitelist_authorization.clone(), - http_tracker_container.stats_event_sender.clone(), http_tracker_container.http_stats_event_sender.clone(), )), ) @@ -65,7 +63,6 @@ pub fn router(http_tracker_container: Arc, server_socket_a http_tracker_container.core_config.clone(), http_tracker_container.scrape_handler.clone(), http_tracker_container.authentication_service.clone(), - http_tracker_container.stats_event_sender.clone(), http_tracker_container.http_stats_event_sender.clone(), )), ) @@ -75,7 +72,6 @@ pub fn router(http_tracker_container: Arc, server_socket_a http_tracker_container.core_config.clone(), http_tracker_container.scrape_handler.clone(), http_tracker_container.authentication_service.clone(), - http_tracker_container.stats_event_sender.clone(), http_tracker_container.http_stats_event_sender.clone(), )), ) diff --git a/src/servers/http/v1/services/announce.rs b/src/servers/http/v1/services/announce.rs index e7170c7e1..bc21657af 100644 --- a/src/servers/http/v1/services/announce.rs +++ b/src/servers/http/v1/services/announce.rs @@ -5,19 +5,17 @@ //! It delegates the `announce` logic to the [`AnnounceHandler`] and it returns //! the [`AnnounceData`]. //! -//! It also sends an [`statistics::event::Event`] +//! It also sends an [`http_tracker_core::statistics::event::Event`] //! because events are specific for the HTTP tracker. use std::net::IpAddr; use std::sync::Arc; use bittorrent_primitives::info_hash::InfoHash; use bittorrent_tracker_core::announce_handler::{AnnounceHandler, PeersWanted}; -use packages::statistics; -use packages::statistics::event::sender::Sender; use torrust_tracker_primitives::core::AnnounceData; use torrust_tracker_primitives::peer; -use crate::packages::{self, http_tracker_core}; +use crate::packages::http_tracker_core; /// The HTTP tracker `announce` service. /// @@ -31,7 +29,6 @@ use crate::packages::{self, http_tracker_core}; /// > each `announce` request. pub async fn invoke( announce_handler: Arc, - opt_stats_event_sender: Arc>>, opt_http_stats_event_sender: Arc>>, info_hash: InfoHash, peer: &mut peer::Peer, @@ -42,17 +39,6 @@ pub async fn invoke( // The tracker could change the original peer ip let announce_data = announce_handler.announce(&info_hash, peer, &original_peer_ip, peers_wanted); - if let Some(stats_event_sender) = opt_stats_event_sender.as_deref() { - match original_peer_ip { - IpAddr::V4(_) => { - stats_event_sender.send_event(statistics::event::Event::Tcp4Announce).await; - } - IpAddr::V6(_) => { - stats_event_sender.send_event(statistics::event::Event::Tcp6Announce).await; - } - } - } - if let Some(http_stats_event_sender) = opt_http_stats_event_sender.as_deref() { match original_peer_ip { IpAddr::V4(_) => { @@ -81,8 +67,6 @@ mod tests { use bittorrent_tracker_core::databases::setup::initialize_database; use bittorrent_tracker_core::torrent::repository::in_memory::InMemoryTorrentRepository; use bittorrent_tracker_core::torrent::repository::persisted::DatabasePersistentTorrentRepository; - use packages::statistics; - use packages::statistics::event::sender::Sender; use torrust_tracker_configuration::Core; use torrust_tracker_primitives::{peer, DurationSinceUnixEpoch}; use torrust_tracker_test_helpers::configuration; @@ -90,7 +74,6 @@ mod tests { struct CoreTrackerServices { pub core_config: Arc, pub announce_handler: Arc, - pub stats_event_sender: Arc>>, } struct CoreHttpTrackerServices { @@ -111,9 +94,6 @@ mod tests { &db_torrent_repository, )); - let (stats_event_sender, _stats_repository) = statistics::setup::factory(config.core.tracker_usage_statistics); - let stats_event_sender = Arc::new(stats_event_sender); - // HTTP stats let (http_stats_event_sender, http_stats_repository) = http_tracker_core::statistics::setup::factory(config.core.tracker_usage_statistics); @@ -124,7 +104,6 @@ mod tests { CoreTrackerServices { core_config, announce_handler, - stats_event_sender, }, CoreHttpTrackerServices { http_stats_event_sender }, ) @@ -157,17 +136,9 @@ mod tests { use futures::future::BoxFuture; use mockall::mock; - use packages::statistics::event::Event; use tokio::sync::mpsc::error::SendError; - use crate::packages::{self, http_tracker_core}; - - mock! { - StatsEventSender {} - impl Sender for StatsEventSender { - fn send_event(&self, event: Event) -> BoxFuture<'static,Option > > > ; - } - } + use crate::packages::http_tracker_core; mock! { HttpStatsEventSender {} @@ -187,17 +158,16 @@ mod tests { use bittorrent_tracker_core::torrent::repository::in_memory::InMemoryTorrentRepository; use bittorrent_tracker_core::torrent::repository::persisted::DatabasePersistentTorrentRepository; use mockall::predicate::eq; - use packages::statistics; use torrust_tracker_primitives::core::AnnounceData; use torrust_tracker_primitives::peer; use torrust_tracker_primitives::swarm_metadata::SwarmMetadata; use torrust_tracker_test_helpers::configuration; use super::{sample_peer_using_ipv4, sample_peer_using_ipv6}; - use crate::packages::{self, http_tracker_core}; + use crate::packages::http_tracker_core; use crate::servers::http::v1::services::announce::invoke; use crate::servers::http::v1::services::announce::tests::{ - initialize_core_tracker_services, sample_peer, MockHttpStatsEventSender, MockStatsEventSender, + initialize_core_tracker_services, sample_peer, MockHttpStatsEventSender, }; fn initialize_announce_handler() -> Arc { @@ -222,7 +192,6 @@ mod tests { let announce_data = invoke( core_tracker_services.announce_handler.clone(), - core_tracker_services.stats_event_sender.clone(), core_http_tracker_services.http_stats_event_sender.clone(), sample_info_hash(), &mut peer, @@ -245,15 +214,6 @@ mod tests { #[tokio::test] async fn it_should_send_the_tcp_4_announce_event_when_the_peer_uses_ipv4() { - let mut stats_event_sender_mock = MockStatsEventSender::new(); - stats_event_sender_mock - .expect_send_event() - .with(eq(statistics::event::Event::Tcp4Announce)) - .times(1) - .returning(|_| Box::pin(future::ready(Some(Ok(()))))); - let stats_event_sender: Arc>> = - Arc::new(Some(Box::new(stats_event_sender_mock))); - let mut http_stats_event_sender_mock = MockHttpStatsEventSender::new(); http_stats_event_sender_mock .expect_send_event() @@ -269,7 +229,6 @@ mod tests { let _announce_data = invoke( announce_handler, - stats_event_sender, http_stats_event_sender, sample_info_hash(), &mut peer, @@ -299,16 +258,6 @@ mod tests { { // Tracker changes the peer IP to the tracker external IP when the peer is using the loopback IP. - // Assert that the event sent is a TCP4 event - let mut stats_event_sender_mock = MockStatsEventSender::new(); - stats_event_sender_mock - .expect_send_event() - .with(eq(statistics::event::Event::Tcp4Announce)) - .times(1) - .returning(|_| Box::pin(future::ready(Some(Ok(()))))); - let stats_event_sender: Arc>> = - Arc::new(Some(Box::new(stats_event_sender_mock))); - // Assert that the event sent is a TCP4 event let mut http_stats_event_sender_mock = MockHttpStatsEventSender::new(); http_stats_event_sender_mock @@ -325,7 +274,6 @@ mod tests { let _announce_data = invoke( announce_handler, - stats_event_sender, http_stats_event_sender, sample_info_hash(), &mut peer, @@ -337,16 +285,6 @@ mod tests { #[tokio::test] async fn it_should_send_the_tcp_6_announce_event_when_the_peer_uses_ipv6_even_if_the_tracker_changes_the_peer_ip_to_ipv4() { - let mut stats_event_sender_mock = MockStatsEventSender::new(); - stats_event_sender_mock - .expect_send_event() - .with(eq(statistics::event::Event::Tcp6Announce)) - .times(1) - .returning(|_| Box::pin(future::ready(Some(Ok(()))))); - let stats_event_sender: Arc>> = - Arc::new(Some(Box::new(stats_event_sender_mock))); - - // Assert that the event sent is a TCP4 event let mut http_stats_event_sender_mock = MockHttpStatsEventSender::new(); http_stats_event_sender_mock .expect_send_event() @@ -362,7 +300,6 @@ mod tests { let _announce_data = invoke( announce_handler, - stats_event_sender, http_stats_event_sender, sample_info_hash(), &mut peer, diff --git a/src/servers/http/v1/services/scrape.rs b/src/servers/http/v1/services/scrape.rs index e745609aa..5325b188b 100644 --- a/src/servers/http/v1/services/scrape.rs +++ b/src/servers/http/v1/services/scrape.rs @@ -5,18 +5,16 @@ //! It delegates the `scrape` logic to the [`ScrapeHandler`] and it returns the //! [`ScrapeData`]. //! -//! It also sends an [`statistics::event::Event`] +//! It also sends an [`http_tracker_core::statistics::event::Event`] //! because events are specific for the HTTP tracker. use std::net::IpAddr; use std::sync::Arc; use bittorrent_primitives::info_hash::InfoHash; use bittorrent_tracker_core::scrape_handler::ScrapeHandler; -use packages::statistics::event::sender::Sender; -use packages::statistics::{self}; use torrust_tracker_primitives::core::ScrapeData; -use crate::packages::{self, http_tracker_core}; +use crate::packages::http_tracker_core; /// The HTTP tracker `scrape` service. /// @@ -30,14 +28,13 @@ use crate::packages::{self, http_tracker_core}; /// > each `scrape` request. pub async fn invoke( scrape_handler: &Arc, - opt_stats_event_sender: &Arc>>, opt_http_stats_event_sender: &Arc>>, info_hashes: &Vec, original_peer_ip: &IpAddr, ) -> ScrapeData { let scrape_data = scrape_handler.scrape(info_hashes).await; - send_scrape_event(original_peer_ip, opt_stats_event_sender, opt_http_stats_event_sender).await; + send_scrape_event(original_peer_ip, opt_http_stats_event_sender).await; scrape_data } @@ -49,32 +46,19 @@ pub async fn invoke( /// /// > **NOTICE**: tracker statistics are not updated in this case. pub async fn fake( - opt_stats_event_sender: &Arc>>, opt_http_stats_event_sender: &Arc>>, info_hashes: &Vec, original_peer_ip: &IpAddr, ) -> ScrapeData { - send_scrape_event(original_peer_ip, opt_stats_event_sender, opt_http_stats_event_sender).await; + send_scrape_event(original_peer_ip, opt_http_stats_event_sender).await; ScrapeData::zeroed(info_hashes) } async fn send_scrape_event( original_peer_ip: &IpAddr, - opt_stats_event_sender: &Arc>>, opt_http_stats_event_sender: &Arc>>, ) { - if let Some(stats_event_sender) = opt_stats_event_sender.as_deref() { - match original_peer_ip { - IpAddr::V4(_) => { - stats_event_sender.send_event(statistics::event::Event::Tcp4Scrape).await; - } - IpAddr::V6(_) => { - stats_event_sender.send_event(statistics::event::Event::Tcp6Scrape).await; - } - } - } - if let Some(http_stats_event_sender) = opt_http_stats_event_sender.as_deref() { match original_peer_ip { IpAddr::V4(_) => { @@ -109,13 +93,11 @@ mod tests { use bittorrent_tracker_core::whitelist::repository::in_memory::InMemoryWhitelist; use futures::future::BoxFuture; use mockall::mock; - use packages::statistics::event::sender::Sender; - use packages::statistics::event::Event; use tokio::sync::mpsc::error::SendError; use torrust_tracker_primitives::{peer, DurationSinceUnixEpoch}; use torrust_tracker_test_helpers::configuration; - use crate::packages::{self, http_tracker_core}; + use crate::packages::http_tracker_core; fn initialize_announce_and_scrape_handlers_for_public_tracker() -> (Arc, Arc) { let config = configuration::ephemeral_public(); @@ -161,13 +143,6 @@ mod tests { Arc::new(ScrapeHandler::new(&whitelist_authorization, &in_memory_torrent_repository)) } - mock! { - StatsEventSender {} - impl Sender for StatsEventSender { - fn send_event(&self, event: Event) -> BoxFuture<'static,Option > > > ; - } - } - mock! { HttpStatsEventSender {} impl http_tracker_core::statistics::event::sender::Sender for HttpStatsEventSender { @@ -183,7 +158,6 @@ mod tests { use bittorrent_tracker_core::announce_handler::PeersWanted; use mockall::predicate::eq; - use packages::statistics; use torrust_tracker_primitives::core::ScrapeData; use torrust_tracker_primitives::swarm_metadata::SwarmMetadata; @@ -191,14 +165,11 @@ mod tests { use crate::servers::http::v1::services::scrape::invoke; use crate::servers::http::v1::services::scrape::tests::{ initialize_announce_and_scrape_handlers_for_public_tracker, initialize_scrape_handler, sample_info_hash, - sample_info_hashes, sample_peer, MockHttpStatsEventSender, MockStatsEventSender, + sample_info_hashes, sample_peer, MockHttpStatsEventSender, }; #[tokio::test] async fn it_should_return_the_scrape_data_for_a_torrent() { - let (stats_event_sender, _stats_repository) = packages::statistics::setup::factory(false); - let stats_event_sender = Arc::new(stats_event_sender); - let (http_stats_event_sender, _http_stats_repository) = packages::http_tracker_core::statistics::setup::factory(false); let http_stats_event_sender = Arc::new(http_stats_event_sender); @@ -213,14 +184,7 @@ mod tests { let original_peer_ip = peer.ip(); announce_handler.announce(&info_hash, &mut peer, &original_peer_ip, &PeersWanted::All); - let scrape_data = invoke( - &scrape_handler, - &stats_event_sender, - &http_stats_event_sender, - &info_hashes, - &original_peer_ip, - ) - .await; + let scrape_data = invoke(&scrape_handler, &http_stats_event_sender, &info_hashes, &original_peer_ip).await; let mut expected_scrape_data = ScrapeData::empty(); expected_scrape_data.add_file( @@ -237,15 +201,6 @@ mod tests { #[tokio::test] async fn it_should_send_the_tcp_4_scrape_event_when_the_peer_uses_ipv4() { - let mut stats_event_sender_mock = MockStatsEventSender::new(); - stats_event_sender_mock - .expect_send_event() - .with(eq(statistics::event::Event::Tcp4Scrape)) - .times(1) - .returning(|_| Box::pin(future::ready(Some(Ok(()))))); - let stats_event_sender: Arc>> = - Arc::new(Some(Box::new(stats_event_sender_mock))); - let mut http_stats_event_sender_mock = MockHttpStatsEventSender::new(); http_stats_event_sender_mock .expect_send_event() @@ -259,27 +214,11 @@ mod tests { let peer_ip = IpAddr::V4(Ipv4Addr::new(126, 0, 0, 1)); - invoke( - &scrape_handler, - &stats_event_sender, - &http_stats_event_sender, - &sample_info_hashes(), - &peer_ip, - ) - .await; + invoke(&scrape_handler, &http_stats_event_sender, &sample_info_hashes(), &peer_ip).await; } #[tokio::test] async fn it_should_send_the_tcp_6_scrape_event_when_the_peer_uses_ipv6() { - let mut stats_event_sender_mock = MockStatsEventSender::new(); - stats_event_sender_mock - .expect_send_event() - .with(eq(statistics::event::Event::Tcp6Scrape)) - .times(1) - .returning(|_| Box::pin(future::ready(Some(Ok(()))))); - let stats_event_sender: Arc>> = - Arc::new(Some(Box::new(stats_event_sender_mock))); - let mut http_stats_event_sender_mock = MockHttpStatsEventSender::new(); http_stats_event_sender_mock .expect_send_event() @@ -293,14 +232,7 @@ mod tests { let peer_ip = IpAddr::V6(Ipv6Addr::new(0x6969, 0x6969, 0x6969, 0x6969, 0x6969, 0x6969, 0x6969, 0x6969)); - invoke( - &scrape_handler, - &stats_event_sender, - &http_stats_event_sender, - &sample_info_hashes(), - &peer_ip, - ) - .await; + invoke(&scrape_handler, &http_stats_event_sender, &sample_info_hashes(), &peer_ip).await; } } @@ -312,21 +244,17 @@ mod tests { use bittorrent_tracker_core::announce_handler::PeersWanted; use mockall::predicate::eq; - use packages::statistics; use torrust_tracker_primitives::core::ScrapeData; use crate::packages::{self, http_tracker_core}; use crate::servers::http::v1::services::scrape::fake; use crate::servers::http::v1::services::scrape::tests::{ initialize_announce_and_scrape_handlers_for_public_tracker, sample_info_hash, sample_info_hashes, sample_peer, - MockHttpStatsEventSender, MockStatsEventSender, + MockHttpStatsEventSender, }; #[tokio::test] async fn it_should_always_return_the_zeroed_scrape_data_for_a_torrent() { - let (stats_event_sender, _stats_repository) = packages::statistics::setup::factory(false); - let stats_event_sender = Arc::new(stats_event_sender); - let (http_stats_event_sender, _http_stats_repository) = packages::http_tracker_core::statistics::setup::factory(false); let http_stats_event_sender = Arc::new(http_stats_event_sender); @@ -341,7 +269,7 @@ mod tests { let original_peer_ip = peer.ip(); announce_handler.announce(&info_hash, &mut peer, &original_peer_ip, &PeersWanted::All); - let scrape_data = fake(&stats_event_sender, &http_stats_event_sender, &info_hashes, &original_peer_ip).await; + let scrape_data = fake(&http_stats_event_sender, &info_hashes, &original_peer_ip).await; let expected_scrape_data = ScrapeData::zeroed(&info_hashes); @@ -350,15 +278,6 @@ mod tests { #[tokio::test] async fn it_should_send_the_tcp_4_scrape_event_when_the_peer_uses_ipv4() { - let mut stats_event_sender_mock = MockStatsEventSender::new(); - stats_event_sender_mock - .expect_send_event() - .with(eq(statistics::event::Event::Tcp4Scrape)) - .times(1) - .returning(|_| Box::pin(future::ready(Some(Ok(()))))); - let stats_event_sender: Arc>> = - Arc::new(Some(Box::new(stats_event_sender_mock))); - let mut http_stats_event_sender_mock = MockHttpStatsEventSender::new(); http_stats_event_sender_mock .expect_send_event() @@ -370,20 +289,11 @@ mod tests { let peer_ip = IpAddr::V4(Ipv4Addr::new(126, 0, 0, 1)); - fake(&stats_event_sender, &http_stats_event_sender, &sample_info_hashes(), &peer_ip).await; + fake(&http_stats_event_sender, &sample_info_hashes(), &peer_ip).await; } #[tokio::test] async fn it_should_send_the_tcp_6_scrape_event_when_the_peer_uses_ipv6() { - let mut stats_event_sender_mock = MockStatsEventSender::new(); - stats_event_sender_mock - .expect_send_event() - .with(eq(statistics::event::Event::Tcp6Scrape)) - .times(1) - .returning(|_| Box::pin(future::ready(Some(Ok(()))))); - let stats_event_sender: Arc>> = - Arc::new(Some(Box::new(stats_event_sender_mock))); - let mut http_stats_event_sender_mock = MockHttpStatsEventSender::new(); http_stats_event_sender_mock .expect_send_event() @@ -395,7 +305,7 @@ mod tests { let peer_ip = IpAddr::V6(Ipv6Addr::new(0x6969, 0x6969, 0x6969, 0x6969, 0x6969, 0x6969, 0x6969, 0x6969)); - fake(&stats_event_sender, &http_stats_event_sender, &sample_info_hashes(), &peer_ip).await; + fake(&http_stats_event_sender, &sample_info_hashes(), &peer_ip).await; } } } diff --git a/src/servers/udp/handlers.rs b/src/servers/udp/handlers.rs index 4c943516e..59833b715 100644 --- a/src/servers/udp/handlers.rs +++ b/src/servers/udp/handlers.rs @@ -14,7 +14,6 @@ use bittorrent_primitives::info_hash::InfoHash; use bittorrent_tracker_core::announce_handler::{AnnounceHandler, PeersWanted}; use bittorrent_tracker_core::scrape_handler::ScrapeHandler; use bittorrent_tracker_core::whitelist; -use packages::statistics::event::sender::Sender; use torrust_tracker_clock::clock::Time as _; use torrust_tracker_configuration::Core; use tracing::{instrument, Level}; @@ -24,11 +23,11 @@ use zerocopy::network_endian::I32; use super::connection_cookie::{check, make}; use super::RawRequest; use crate::container::UdpTrackerContainer; -use crate::packages::{statistics, udp_tracker_core}; +use crate::packages::udp_tracker_core; use crate::servers::udp::error::Error; use crate::servers::udp::{peer_builder, UDP_TRACKER_LOG_TARGET}; use crate::shared::bit_torrent::common::MAX_SCRAPE_TORRENTS; -use crate::{packages, CurrentClock}; +use crate::CurrentClock; #[derive(Debug, Clone, PartialEq)] pub(super) struct CookieTimeValues { @@ -98,7 +97,6 @@ pub(crate) async fn handle_packet( udp_request.from, local_addr, request_id, - &udp_tracker_container.stats_event_sender, &udp_tracker_container.udp_stats_event_sender, cookie_time_values.valid_range.clone(), &e, @@ -112,7 +110,6 @@ pub(crate) async fn handle_packet( udp_request.from, local_addr, request_id, - &udp_tracker_container.stats_event_sender, &udp_tracker_container.udp_stats_event_sender, cookie_time_values.valid_range.clone(), &e, @@ -146,7 +143,6 @@ pub async fn handle_request( Request::Connect(connect_request) => Ok(handle_connect( remote_addr, &connect_request, - &udp_tracker_container.stats_event_sender, &udp_tracker_container.udp_stats_event_sender, cookie_time_values.issue_time, ) @@ -158,7 +154,6 @@ pub async fn handle_request( &udp_tracker_container.core_config, &udp_tracker_container.announce_handler, &udp_tracker_container.whitelist_authorization, - &udp_tracker_container.stats_event_sender, &udp_tracker_container.udp_stats_event_sender, cookie_time_values.valid_range, ) @@ -169,7 +164,6 @@ pub async fn handle_request( remote_addr, &scrape_request, &udp_tracker_container.scrape_handler, - &udp_tracker_container.stats_event_sender, &udp_tracker_container.udp_stats_event_sender, cookie_time_values.valid_range, ) @@ -184,11 +178,10 @@ pub async fn handle_request( /// # Errors /// /// This function does not ever return an error. -#[instrument(fields(transaction_id), skip(opt_stats_event_sender, opt_udp_stats_event_sender), ret(level = Level::TRACE))] +#[instrument(fields(transaction_id), skip(opt_udp_stats_event_sender), ret(level = Level::TRACE))] pub async fn handle_connect( remote_addr: SocketAddr, request: &ConnectRequest, - opt_stats_event_sender: &Arc>>, opt_udp_stats_event_sender: &Arc>>, cookie_issue_time: f64, ) -> Response { @@ -203,17 +196,6 @@ pub async fn handle_connect( connection_id, }; - if let Some(stats_event_sender) = opt_stats_event_sender.as_deref() { - match remote_addr { - SocketAddr::V4(_) => { - stats_event_sender.send_event(statistics::event::Event::Udp4Connect).await; - } - SocketAddr::V6(_) => { - stats_event_sender.send_event(statistics::event::Event::Udp6Connect).await; - } - } - } - if let Some(udp_stats_event_sender) = opt_udp_stats_event_sender.as_deref() { match remote_addr { SocketAddr::V4(_) => { @@ -239,14 +221,13 @@ pub async fn handle_connect( /// /// If a error happens in the `handle_announce` function, it will just return the `ServerError`. #[allow(clippy::too_many_arguments)] -#[instrument(fields(transaction_id, connection_id, info_hash), skip(announce_handler, whitelist_authorization, opt_stats_event_sender, opt_udp_stats_event_sender), ret(level = Level::TRACE))] +#[instrument(fields(transaction_id, connection_id, info_hash), skip(announce_handler, whitelist_authorization, opt_udp_stats_event_sender), ret(level = Level::TRACE))] pub async fn handle_announce( remote_addr: SocketAddr, request: &AnnounceRequest, core_config: &Arc, announce_handler: &Arc, whitelist_authorization: &Arc, - opt_stats_event_sender: &Arc>>, opt_udp_stats_event_sender: &Arc>>, cookie_valid_range: Range, ) -> Result { @@ -281,17 +262,6 @@ pub async fn handle_announce( let response = announce_handler.announce(&info_hash, &mut peer, &remote_client_ip, &peers_wanted); - if let Some(stats_event_sender) = opt_stats_event_sender.as_deref() { - match remote_client_ip { - IpAddr::V4(_) => { - stats_event_sender.send_event(statistics::event::Event::Udp4Announce).await; - } - IpAddr::V6(_) => { - stats_event_sender.send_event(statistics::event::Event::Udp6Announce).await; - } - } - } - if let Some(udp_stats_event_sender) = opt_udp_stats_event_sender.as_deref() { match remote_client_ip { IpAddr::V4(_) => { @@ -367,12 +337,11 @@ pub async fn handle_announce( /// # Errors /// /// This function does not ever return an error. -#[instrument(fields(transaction_id, connection_id), skip(scrape_handler, opt_stats_event_sender, opt_udp_stats_event_sender), ret(level = Level::TRACE))] +#[instrument(fields(transaction_id, connection_id), skip(scrape_handler, opt_udp_stats_event_sender), ret(level = Level::TRACE))] pub async fn handle_scrape( remote_addr: SocketAddr, request: &ScrapeRequest, scrape_handler: &Arc, - opt_stats_event_sender: &Arc>>, opt_udp_stats_event_sender: &Arc>>, cookie_valid_range: Range, ) -> Result { @@ -414,17 +383,6 @@ pub async fn handle_scrape( torrent_stats.push(scrape_entry); } - if let Some(stats_event_sender) = opt_stats_event_sender.as_deref() { - match remote_addr { - SocketAddr::V4(_) => { - stats_event_sender.send_event(statistics::event::Event::Udp4Scrape).await; - } - SocketAddr::V6(_) => { - stats_event_sender.send_event(statistics::event::Event::Udp6Scrape).await; - } - } - } - if let Some(udp_stats_event_sender) = opt_udp_stats_event_sender.as_deref() { match remote_addr { SocketAddr::V4(_) => { @@ -449,12 +407,11 @@ pub async fn handle_scrape( } #[allow(clippy::too_many_arguments)] -#[instrument(fields(transaction_id), skip(opt_stats_event_sender, opt_udp_stats_event_sender), ret(level = Level::TRACE))] +#[instrument(fields(transaction_id), skip(opt_udp_stats_event_sender), ret(level = Level::TRACE))] async fn handle_error( remote_addr: SocketAddr, local_addr: SocketAddr, request_id: Uuid, - opt_stats_event_sender: &Arc>>, opt_udp_stats_event_sender: &Arc>>, cookie_valid_range: Range, e: &Error, @@ -492,17 +449,6 @@ async fn handle_error( }; if e.1.is_some() { - if let Some(stats_event_sender) = opt_stats_event_sender.as_deref() { - match remote_addr { - SocketAddr::V4(_) => { - stats_event_sender.send_event(statistics::event::Event::Udp4Error).await; - } - SocketAddr::V6(_) => { - stats_event_sender.send_event(statistics::event::Event::Udp6Error).await; - } - } - } - if let Some(udp_stats_event_sender) = opt_udp_stats_event_sender.as_deref() { match remote_addr { SocketAddr::V4(_) => { @@ -549,8 +495,6 @@ mod tests { use bittorrent_tracker_core::whitelist::repository::in_memory::InMemoryWhitelist; use futures::future::BoxFuture; use mockall::mock; - use packages::statistics::event::sender::Sender; - use packages::statistics::event::Event; use tokio::sync::mpsc::error::SendError; use torrust_tracker_clock::clock::Time; use torrust_tracker_configuration::{Configuration, Core}; @@ -558,7 +502,7 @@ mod tests { use torrust_tracker_test_helpers::configuration; use super::gen_remote_fingerprint; - use crate::packages::{statistics, udp_tracker_core}; + use crate::packages::udp_tracker_core; use crate::{packages, CurrentClock}; struct CoreTrackerServices { @@ -566,7 +510,6 @@ mod tests { pub announce_handler: Arc, pub scrape_handler: Arc, pub in_memory_torrent_repository: Arc, - pub stats_event_sender: Arc>>, pub in_memory_whitelist: Arc, pub whitelist_authorization: Arc, } @@ -605,9 +548,6 @@ mod tests { )); let scrape_handler = Arc::new(ScrapeHandler::new(&whitelist_authorization, &in_memory_torrent_repository)); - let (stats_event_sender, _stats_repository) = statistics::setup::factory(config.core.tracker_usage_statistics); - let stats_event_sender = Arc::new(stats_event_sender); - let (udp_stats_event_sender, _udp_stats_repository) = packages::udp_tracker_core::statistics::setup::factory(false); let udp_stats_event_sender = Arc::new(udp_stats_event_sender); @@ -617,7 +557,6 @@ mod tests { announce_handler, scrape_handler, in_memory_torrent_repository, - stats_event_sender, in_memory_whitelist, whitelist_authorization, }, @@ -719,13 +658,6 @@ mod tests { } } - mock! { - StatsEventSender {} - impl Sender for StatsEventSender { - fn send_event(&self, event: Event) -> BoxFuture<'static,Option > > > ; - } - } - mock! { UdpStatsEventSender {} impl udp_tracker_core::statistics::event::sender::Sender for UdpStatsEventSender { @@ -740,7 +672,6 @@ mod tests { use aquatic_udp_protocol::{ConnectRequest, ConnectResponse, Response, TransactionId}; use mockall::predicate::eq; - use packages::statistics; use super::{sample_ipv4_socket_address, sample_ipv6_remote_addr}; use crate::packages::{self, udp_tracker_core}; @@ -748,7 +679,7 @@ mod tests { use crate::servers::udp::handlers::handle_connect; use crate::servers::udp::handlers::tests::{ sample_ipv4_remote_addr, sample_ipv4_remote_addr_fingerprint, sample_ipv6_remote_addr_fingerprint, sample_issue_time, - MockStatsEventSender, MockUdpStatsEventSender, + MockUdpStatsEventSender, }; fn sample_connect_request() -> ConnectRequest { @@ -759,9 +690,6 @@ mod tests { #[tokio::test] async fn a_connect_response_should_contain_the_same_transaction_id_as_the_connect_request() { - let (stats_event_sender, _stats_repository) = packages::statistics::setup::factory(false); - let stats_event_sender = Arc::new(stats_event_sender); - let (udp_stats_event_sender, _udp_stats_repository) = packages::udp_tracker_core::statistics::setup::factory(false); let udp_stats_event_sender = Arc::new(udp_stats_event_sender); @@ -772,7 +700,6 @@ mod tests { let response = handle_connect( sample_ipv4_remote_addr(), &request, - &stats_event_sender, &udp_stats_event_sender, sample_issue_time(), ) @@ -789,9 +716,6 @@ mod tests { #[tokio::test] async fn a_connect_response_should_contain_a_new_connection_id() { - let (stats_event_sender, _stats_repository) = packages::statistics::setup::factory(false); - let stats_event_sender = Arc::new(stats_event_sender); - let (udp_stats_event_sender, _udp_stats_repository) = packages::udp_tracker_core::statistics::setup::factory(false); let udp_stats_event_sender = Arc::new(udp_stats_event_sender); @@ -802,7 +726,6 @@ mod tests { let response = handle_connect( sample_ipv4_remote_addr(), &request, - &stats_event_sender, &udp_stats_event_sender, sample_issue_time(), ) @@ -819,9 +742,6 @@ mod tests { #[tokio::test] async fn a_connect_response_should_contain_a_new_connection_id_ipv6() { - let (stats_event_sender, _stats_repository) = packages::statistics::setup::factory(false); - let stats_event_sender = Arc::new(stats_event_sender); - let (udp_stats_event_sender, _udp_stats_repository) = packages::udp_tracker_core::statistics::setup::factory(false); let udp_stats_event_sender = Arc::new(udp_stats_event_sender); @@ -832,7 +752,6 @@ mod tests { let response = handle_connect( sample_ipv6_remote_addr(), &request, - &stats_event_sender, &udp_stats_event_sender, sample_issue_time(), ) @@ -849,15 +768,6 @@ mod tests { #[tokio::test] async fn it_should_send_the_upd4_connect_event_when_a_client_tries_to_connect_using_a_ip4_socket_address() { - let mut stats_event_sender_mock = MockStatsEventSender::new(); - stats_event_sender_mock - .expect_send_event() - .with(eq(statistics::event::Event::Udp4Connect)) - .times(1) - .returning(|_| Box::pin(future::ready(Some(Ok(()))))); - let stats_event_sender: Arc>> = - Arc::new(Some(Box::new(stats_event_sender_mock))); - let mut udp_stats_event_sender_mock = MockUdpStatsEventSender::new(); udp_stats_event_sender_mock .expect_send_event() @@ -872,7 +782,6 @@ mod tests { handle_connect( client_socket_address, &sample_connect_request(), - &stats_event_sender, &udp_stats_event_sender, sample_issue_time(), ) @@ -881,15 +790,6 @@ mod tests { #[tokio::test] async fn it_should_send_the_upd6_connect_event_when_a_client_tries_to_connect_using_a_ip6_socket_address() { - let mut stats_event_sender_mock = MockStatsEventSender::new(); - stats_event_sender_mock - .expect_send_event() - .with(eq(statistics::event::Event::Udp6Connect)) - .times(1) - .returning(|_| Box::pin(future::ready(Some(Ok(()))))); - let stats_event_sender: Arc>> = - Arc::new(Some(Box::new(stats_event_sender_mock))); - let mut udp_stats_event_sender_mock = MockUdpStatsEventSender::new(); udp_stats_event_sender_mock .expect_send_event() @@ -902,7 +802,6 @@ mod tests { handle_connect( sample_ipv6_remote_addr(), &sample_connect_request(), - &stats_event_sender, &udp_stats_event_sender, sample_issue_time(), ) @@ -999,13 +898,13 @@ mod tests { use mockall::predicate::eq; use torrust_tracker_configuration::Core; - use crate::packages::{self, statistics, udp_tracker_core}; + use crate::packages::{self, udp_tracker_core}; use crate::servers::udp::connection_cookie::make; use crate::servers::udp::handlers::tests::announce_request::AnnounceRequestBuilder; use crate::servers::udp::handlers::tests::{ gen_remote_fingerprint, initialize_core_tracker_services_for_default_tracker_configuration, initialize_core_tracker_services_for_public_tracker, sample_cookie_valid_range, sample_ipv4_socket_address, - sample_issue_time, MockStatsEventSender, MockUdpStatsEventSender, TorrentPeerBuilder, + sample_issue_time, MockUdpStatsEventSender, TorrentPeerBuilder, }; use crate::servers::udp::handlers::{handle_announce, AnnounceResponseFixedData}; @@ -1034,7 +933,6 @@ mod tests { &core_tracker_services.core_config, &core_tracker_services.announce_handler, &core_tracker_services.whitelist_authorization, - &core_tracker_services.stats_event_sender, &core_udp_tracker_services.udp_stats_event_sender, sample_cookie_valid_range(), ) @@ -1069,7 +967,6 @@ mod tests { &core_tracker_services.core_config, &core_tracker_services.announce_handler, &core_tracker_services.whitelist_authorization, - &core_tracker_services.stats_event_sender, &core_udp_tracker_services.udp_stats_event_sender, sample_cookie_valid_range(), ) @@ -1123,7 +1020,6 @@ mod tests { &core_tracker_services.core_config, &core_tracker_services.announce_handler, &core_tracker_services.whitelist_authorization, - &core_tracker_services.stats_event_sender, &core_udp_tracker_services.udp_stats_event_sender, sample_cookie_valid_range(), ) @@ -1158,9 +1054,6 @@ mod tests { announce_handler: Arc, whitelist_authorization: Arc, ) -> Response { - let (stats_event_sender, _stats_repository) = packages::statistics::setup::factory(false); - let stats_event_sender = Arc::new(stats_event_sender); - let (udp_stats_event_sender, _udp_stats_repository) = packages::udp_tracker_core::statistics::setup::factory(false); let udp_stats_event_sender = Arc::new(udp_stats_event_sender); @@ -1176,7 +1069,6 @@ mod tests { &core_config, &announce_handler, &whitelist_authorization, - &stats_event_sender, &udp_stats_event_sender, sample_cookie_valid_range(), ) @@ -1208,15 +1100,6 @@ mod tests { #[tokio::test] async fn should_send_the_upd4_announce_event() { - let mut stats_event_sender_mock = MockStatsEventSender::new(); - stats_event_sender_mock - .expect_send_event() - .with(eq(statistics::event::Event::Udp4Announce)) - .times(1) - .returning(|_| Box::pin(future::ready(Some(Ok(()))))); - let stats_event_sender: Arc>> = - Arc::new(Some(Box::new(stats_event_sender_mock))); - let mut udp_stats_event_sender_mock = MockUdpStatsEventSender::new(); udp_stats_event_sender_mock .expect_send_event() @@ -1235,7 +1118,6 @@ mod tests { &core_tracker_services.core_config, &core_tracker_services.announce_handler, &core_tracker_services.whitelist_authorization, - &stats_event_sender, &udp_stats_event_sender, sample_cookie_valid_range(), ) @@ -1283,7 +1165,6 @@ mod tests { &core_tracker_services.core_config, &core_tracker_services.announce_handler, &core_tracker_services.whitelist_authorization, - &core_tracker_services.stats_event_sender, &core_udp_tracker_services.udp_stats_event_sender, sample_cookie_valid_range(), ) @@ -1322,13 +1203,13 @@ mod tests { use mockall::predicate::eq; use torrust_tracker_configuration::Core; - use crate::packages::{self, statistics, udp_tracker_core}; + use crate::packages::{self, udp_tracker_core}; use crate::servers::udp::connection_cookie::make; use crate::servers::udp::handlers::tests::announce_request::AnnounceRequestBuilder; use crate::servers::udp::handlers::tests::{ gen_remote_fingerprint, initialize_core_tracker_services_for_default_tracker_configuration, initialize_core_tracker_services_for_public_tracker, sample_cookie_valid_range, sample_ipv6_remote_addr, - sample_issue_time, MockStatsEventSender, MockUdpStatsEventSender, TorrentPeerBuilder, + sample_issue_time, MockUdpStatsEventSender, TorrentPeerBuilder, }; use crate::servers::udp::handlers::{handle_announce, AnnounceResponseFixedData}; @@ -1358,7 +1239,6 @@ mod tests { &core_tracker_services.core_config, &core_tracker_services.announce_handler, &core_tracker_services.whitelist_authorization, - &core_tracker_services.stats_event_sender, &core_udp_tracker_services.udp_stats_event_sender, sample_cookie_valid_range(), ) @@ -1396,7 +1276,6 @@ mod tests { &core_tracker_services.core_config, &core_tracker_services.announce_handler, &core_tracker_services.whitelist_authorization, - &core_tracker_services.stats_event_sender, &core_udp_tracker_services.udp_stats_event_sender, sample_cookie_valid_range(), ) @@ -1450,7 +1329,6 @@ mod tests { &core_tracker_services.core_config, &core_tracker_services.announce_handler, &core_tracker_services.whitelist_authorization, - &core_tracker_services.stats_event_sender, &core_udp_tracker_services.udp_stats_event_sender, sample_cookie_valid_range(), ) @@ -1485,9 +1363,6 @@ mod tests { announce_handler: Arc, whitelist_authorization: Arc, ) -> Response { - let (stats_event_sender, _stats_repository) = packages::statistics::setup::factory(false); - let stats_event_sender = Arc::new(stats_event_sender); - let (udp_stats_event_sender, _udp_stats_repository) = packages::udp_tracker_core::statistics::setup::factory(false); let udp_stats_event_sender = Arc::new(udp_stats_event_sender); @@ -1506,7 +1381,6 @@ mod tests { &core_config, &announce_handler, &whitelist_authorization, - &stats_event_sender, &udp_stats_event_sender, sample_cookie_valid_range(), ) @@ -1538,15 +1412,6 @@ mod tests { #[tokio::test] async fn should_send_the_upd6_announce_event() { - let mut stats_event_sender_mock = MockStatsEventSender::new(); - stats_event_sender_mock - .expect_send_event() - .with(eq(statistics::event::Event::Udp6Announce)) - .times(1) - .returning(|_| Box::pin(future::ready(Some(Ok(()))))); - let stats_event_sender: Arc>> = - Arc::new(Some(Box::new(stats_event_sender_mock))); - let mut udp_stats_event_sender_mock = MockUdpStatsEventSender::new(); udp_stats_event_sender_mock .expect_send_event() @@ -1571,7 +1436,6 @@ mod tests { &core_tracker_services.core_config, &core_tracker_services.announce_handler, &core_tracker_services.whitelist_authorization, - &stats_event_sender, &udp_stats_event_sender, sample_cookie_valid_range(), ) @@ -1592,15 +1456,14 @@ mod tests { use bittorrent_tracker_core::whitelist::authorization::WhitelistAuthorization; use bittorrent_tracker_core::whitelist::repository::in_memory::InMemoryWhitelist; use mockall::predicate::eq; - use packages::statistics; - use crate::packages::{self, udp_tracker_core}; + use crate::packages::udp_tracker_core; use crate::servers::udp::connection_cookie::make; use crate::servers::udp::handlers::handle_announce; use crate::servers::udp::handlers::tests::announce_request::AnnounceRequestBuilder; use crate::servers::udp::handlers::tests::{ - gen_remote_fingerprint, sample_cookie_valid_range, sample_issue_time, MockStatsEventSender, - MockUdpStatsEventSender, TrackerConfigurationBuilder, + gen_remote_fingerprint, sample_cookie_valid_range, sample_issue_time, MockUdpStatsEventSender, + TrackerConfigurationBuilder, }; #[tokio::test] @@ -1614,15 +1477,6 @@ mod tests { let in_memory_torrent_repository = Arc::new(InMemoryTorrentRepository::default()); let db_torrent_repository = Arc::new(DatabasePersistentTorrentRepository::new(&database)); - let mut stats_event_sender_mock = MockStatsEventSender::new(); - stats_event_sender_mock - .expect_send_event() - .with(eq(statistics::event::Event::Udp6Announce)) - .times(1) - .returning(|_| Box::pin(future::ready(Some(Ok(()))))); - let stats_event_sender: Arc>> = - Arc::new(Some(Box::new(stats_event_sender_mock))); - let mut udp_stats_event_sender_mock = MockUdpStatsEventSender::new(); udp_stats_event_sender_mock .expect_send_event() @@ -1666,7 +1520,6 @@ mod tests { &core_config, &announce_handler, &whitelist_authorization, - &stats_event_sender, &udp_stats_event_sender, sample_cookie_valid_range(), ) @@ -1700,7 +1553,6 @@ mod tests { }; use bittorrent_tracker_core::scrape_handler::ScrapeHandler; use bittorrent_tracker_core::torrent::repository::in_memory::InMemoryTorrentRepository; - use packages::statistics; use super::{gen_remote_fingerprint, TorrentPeerBuilder}; use crate::packages; @@ -1738,7 +1590,6 @@ mod tests { remote_addr, &request, &core_tracker_services.scrape_handler, - &core_tracker_services.stats_event_sender, &core_udp_tracker_services.udp_stats_event_sender, sample_cookie_valid_range(), ) @@ -1786,9 +1637,6 @@ mod tests { in_memory_torrent_repository: Arc, scrape_handler: Arc, ) -> Response { - let (stats_event_sender, _stats_repository) = statistics::setup::factory(false); - let stats_event_sender = Arc::new(stats_event_sender); - let (udp_stats_event_sender, _udp_stats_repository) = packages::udp_tracker_core::statistics::setup::factory(false); let udp_stats_event_sender = Arc::new(udp_stats_event_sender); @@ -1803,7 +1651,6 @@ mod tests { remote_addr, &request, &scrape_handler, - &stats_event_sender, &udp_stats_event_sender, sample_cookie_valid_range(), ) @@ -1880,7 +1727,6 @@ mod tests { remote_addr, &request, &core_tracker_services.scrape_handler, - &core_tracker_services.stats_event_sender, &core_udp_tracker_services.udp_stats_event_sender, sample_cookie_valid_range(), ) @@ -1919,7 +1765,6 @@ mod tests { remote_addr, &request, &core_tracker_services.scrape_handler, - &core_tracker_services.stats_event_sender, &core_udp_tracker_services.udp_stats_event_sender, sample_cookie_valid_range(), ) @@ -1950,27 +1795,17 @@ mod tests { use std::sync::Arc; use mockall::predicate::eq; - use packages::statistics; use super::sample_scrape_request; - use crate::packages::{self, udp_tracker_core}; + use crate::packages::udp_tracker_core; use crate::servers::udp::handlers::handle_scrape; use crate::servers::udp::handlers::tests::{ initialize_core_tracker_services_for_default_tracker_configuration, sample_cookie_valid_range, - sample_ipv4_remote_addr, MockStatsEventSender, MockUdpStatsEventSender, + sample_ipv4_remote_addr, MockUdpStatsEventSender, }; #[tokio::test] async fn should_send_the_upd4_scrape_event() { - let mut stats_event_sender_mock = MockStatsEventSender::new(); - stats_event_sender_mock - .expect_send_event() - .with(eq(statistics::event::Event::Udp4Scrape)) - .times(1) - .returning(|_| Box::pin(future::ready(Some(Ok(()))))); - let stats_event_sender: Arc>> = - Arc::new(Some(Box::new(stats_event_sender_mock))); - let mut udp_stats_event_sender_mock = MockUdpStatsEventSender::new(); udp_stats_event_sender_mock .expect_send_event() @@ -1989,7 +1824,6 @@ mod tests { remote_addr, &sample_scrape_request(&remote_addr), &core_tracker_services.scrape_handler, - &stats_event_sender, &udp_stats_event_sender, sample_cookie_valid_range(), ) @@ -2003,27 +1837,17 @@ mod tests { use std::sync::Arc; use mockall::predicate::eq; - use packages::statistics; use super::sample_scrape_request; - use crate::packages::{self, udp_tracker_core}; + use crate::packages::udp_tracker_core; use crate::servers::udp::handlers::handle_scrape; use crate::servers::udp::handlers::tests::{ initialize_core_tracker_services_for_default_tracker_configuration, sample_cookie_valid_range, - sample_ipv6_remote_addr, MockStatsEventSender, MockUdpStatsEventSender, + sample_ipv6_remote_addr, MockUdpStatsEventSender, }; #[tokio::test] async fn should_send_the_upd6_scrape_event() { - let mut stats_event_sender_mock = MockStatsEventSender::new(); - stats_event_sender_mock - .expect_send_event() - .with(eq(statistics::event::Event::Udp6Scrape)) - .times(1) - .returning(|_| Box::pin(future::ready(Some(Ok(()))))); - let stats_event_sender: Arc>> = - Arc::new(Some(Box::new(stats_event_sender_mock))); - let mut udp_stats_event_sender_mock = MockUdpStatsEventSender::new(); udp_stats_event_sender_mock .expect_send_event() @@ -2042,7 +1866,6 @@ mod tests { remote_addr, &sample_scrape_request(&remote_addr), &core_tracker_services.scrape_handler, - &stats_event_sender, &udp_stats_event_sender, sample_cookie_valid_range(), ) diff --git a/src/servers/udp/server/launcher.rs b/src/servers/udp/server/launcher.rs index 863f82e18..e640749c6 100644 --- a/src/servers/udp/server/launcher.rs +++ b/src/servers/udp/server/launcher.rs @@ -5,7 +5,6 @@ use std::time::Duration; use bittorrent_tracker_client::udp::client::check; use derive_more::Constructor; use futures_util::StreamExt; -use packages::statistics; use tokio::select; use tokio::sync::oneshot; use tokio::time::interval; @@ -14,7 +13,7 @@ use tracing::instrument; use super::request_buffer::ActiveRequests; use crate::bootstrap::jobs::Started; use crate::container::UdpTrackerContainer; -use crate::packages::{self, udp_tracker_core}; +use crate::packages::udp_tracker_core; use crate::servers::logging::STARTED_ON; use crate::servers::registar::ServiceHealthCheckJob; use crate::servers::signals::{shutdown_signal_with_message, Halted}; @@ -163,17 +162,6 @@ impl Launcher { } }; - if let Some(stats_event_sender) = udp_tracker_container.stats_event_sender.as_deref() { - match req.from.ip() { - IpAddr::V4(_) => { - stats_event_sender.send_event(statistics::event::Event::Udp4Request).await; - } - IpAddr::V6(_) => { - stats_event_sender.send_event(statistics::event::Event::Udp6Request).await; - } - } - } - if let Some(udp_stats_event_sender) = udp_tracker_container.udp_stats_event_sender.as_deref() { match req.from.ip() { IpAddr::V4(_) => { @@ -192,12 +180,6 @@ impl Launcher { if udp_tracker_container.ban_service.read().await.is_banned(&req.from.ip()) { tracing::debug!(target: UDP_TRACKER_LOG_TARGET, local_addr, "Udp::run_udp_server::loop continue: (banned ip)"); - if let Some(stats_event_sender) = udp_tracker_container.stats_event_sender.as_deref() { - stats_event_sender - .send_event(statistics::event::Event::UdpRequestBanned) - .await; - } - if let Some(udp_stats_event_sender) = udp_tracker_container.udp_stats_event_sender.as_deref() { udp_stats_event_sender .send_event(udp_tracker_core::statistics::event::Event::UdpRequestBanned) @@ -231,12 +213,6 @@ impl Launcher { if old_request_aborted { // Evicted task from active requests buffer was aborted. - if let Some(stats_event_sender) = udp_tracker_container.stats_event_sender.as_deref() { - stats_event_sender - .send_event(statistics::event::Event::UdpRequestAborted) - .await; - } - if let Some(udp_stats_event_sender) = udp_tracker_container.udp_stats_event_sender.as_deref() { udp_stats_event_sender .send_event(udp_tracker_core::statistics::event::Event::UdpRequestAborted) diff --git a/src/servers/udp/server/processor.rs b/src/servers/udp/server/processor.rs index bbf64dfb9..dc55833c2 100644 --- a/src/servers/udp/server/processor.rs +++ b/src/servers/udp/server/processor.rs @@ -4,14 +4,12 @@ use std::sync::Arc; use std::time::Duration; use aquatic_udp_protocol::Response; -use packages::statistics; -use packages::statistics::event::UdpResponseKind; use tokio::time::Instant; use tracing::{instrument, Level}; use super::bound_socket::BoundSocket; use crate::container::UdpTrackerContainer; -use crate::packages::{self, udp_tracker_core}; +use crate::packages::udp_tracker_core; use crate::servers::udp::handlers::CookieTimeValues; use crate::servers::udp::{handlers, RawRequest}; @@ -61,13 +59,6 @@ impl Processor { Response::Error(e) => format!("Error: {e:?}"), }; - let response_kind = match &response { - Response::Connect(_) => UdpResponseKind::Connect, - Response::AnnounceIpv4(_) | Response::AnnounceIpv6(_) => UdpResponseKind::Announce, - Response::Scrape(_) => UdpResponseKind::Scrape, - Response::Error(_e) => UdpResponseKind::Error, - }; - let udp_response_kind = match &response { Response::Connect(_) => udp_tracker_core::statistics::event::UdpResponseKind::Connect, Response::AnnounceIpv4(_) | Response::AnnounceIpv6(_) => { @@ -92,27 +83,6 @@ impl Processor { tracing::debug!(%bytes_count, %sent_bytes, "sent {response_type}"); } - if let Some(stats_event_sender) = self.udp_tracker_container.stats_event_sender.as_deref() { - match target.ip() { - IpAddr::V4(_) => { - stats_event_sender - .send_event(statistics::event::Event::Udp4Response { - kind: response_kind, - req_processing_time, - }) - .await; - } - IpAddr::V6(_) => { - stats_event_sender - .send_event(statistics::event::Event::Udp6Response { - kind: response_kind, - req_processing_time, - }) - .await; - } - } - } - if let Some(udp_stats_event_sender) = self.udp_tracker_container.udp_stats_event_sender.as_deref() { match target.ip() { IpAddr::V4(_) => { diff --git a/tests/servers/http/environment.rs b/tests/servers/http/environment.rs index 17013250a..97ca13e95 100644 --- a/tests/servers/http/environment.rs +++ b/tests/servers/http/environment.rs @@ -6,12 +6,11 @@ use bittorrent_tracker_core::databases::Database; use bittorrent_tracker_core::torrent::repository::in_memory::InMemoryTorrentRepository; use bittorrent_tracker_core::whitelist::manager::WhitelistManager; use futures::executor::block_on; -use packages::statistics::repository::Repository; use torrust_tracker_configuration::Configuration; use torrust_tracker_lib::bootstrap::app::{initialize_app_container, initialize_global_services}; use torrust_tracker_lib::bootstrap::jobs::make_rust_tls; use torrust_tracker_lib::container::HttpTrackerContainer; -use torrust_tracker_lib::packages; +use torrust_tracker_lib::packages::http_tracker_core; use torrust_tracker_lib::servers::http::server::{HttpServer, Launcher, Running, Stopped}; use torrust_tracker_lib::servers::registar::Registar; use torrust_tracker_primitives::peer; @@ -22,7 +21,7 @@ pub struct Environment { pub database: Arc>, pub in_memory_torrent_repository: Arc, pub keys_handler: Arc, - pub stats_repository: Arc, + pub http_stats_repository: Arc, pub whitelist_manager: Arc, pub registar: Registar, @@ -61,7 +60,6 @@ impl Environment { announce_handler: app_container.announce_handler.clone(), scrape_handler: app_container.scrape_handler.clone(), whitelist_authorization: app_container.whitelist_authorization.clone(), - stats_event_sender: app_container.stats_event_sender.clone(), http_stats_event_sender: app_container.http_stats_event_sender.clone(), authentication_service: app_container.authentication_service.clone(), }); @@ -72,7 +70,7 @@ impl Environment { database: app_container.database.clone(), in_memory_torrent_repository: app_container.in_memory_torrent_repository.clone(), keys_handler: app_container.keys_handler.clone(), - stats_repository: app_container.stats_repository.clone(), + http_stats_repository: app_container.http_stats_repository.clone(), whitelist_manager: app_container.whitelist_manager.clone(), registar: Registar::default(), @@ -88,7 +86,7 @@ impl Environment { database: self.database.clone(), in_memory_torrent_repository: self.in_memory_torrent_repository.clone(), keys_handler: self.keys_handler.clone(), - stats_repository: self.stats_repository.clone(), + http_stats_repository: self.http_stats_repository.clone(), whitelist_manager: self.whitelist_manager.clone(), registar: self.registar.clone(), @@ -113,7 +111,7 @@ impl Environment { database: self.database, in_memory_torrent_repository: self.in_memory_torrent_repository, keys_handler: self.keys_handler, - stats_repository: self.stats_repository, + http_stats_repository: self.http_stats_repository, whitelist_manager: self.whitelist_manager, registar: Registar::default(), diff --git a/tests/servers/http/v1/contract.rs b/tests/servers/http/v1/contract.rs index be603161a..48c98fa02 100644 --- a/tests/servers/http/v1/contract.rs +++ b/tests/servers/http/v1/contract.rs @@ -680,7 +680,7 @@ mod for_all_config_modes { .announce(&QueryBuilder::default().query()) .await; - let stats = env.stats_repository.get_stats().await; + let stats = env.http_stats_repository.get_stats().await; assert_eq!(stats.tcp4_connections_handled, 1); @@ -706,7 +706,7 @@ mod for_all_config_modes { .announce(&QueryBuilder::default().query()) .await; - let stats = env.stats_repository.get_stats().await; + let stats = env.http_stats_repository.get_stats().await; assert_eq!(stats.tcp6_connections_handled, 1); @@ -731,7 +731,7 @@ mod for_all_config_modes { ) .await; - let stats = env.stats_repository.get_stats().await; + let stats = env.http_stats_repository.get_stats().await; assert_eq!(stats.tcp6_connections_handled, 0); @@ -750,7 +750,7 @@ mod for_all_config_modes { .announce(&QueryBuilder::default().query()) .await; - let stats = env.stats_repository.get_stats().await; + let stats = env.http_stats_repository.get_stats().await; assert_eq!(stats.tcp4_announces_handled, 1); @@ -776,7 +776,7 @@ mod for_all_config_modes { .announce(&QueryBuilder::default().query()) .await; - let stats = env.stats_repository.get_stats().await; + let stats = env.http_stats_repository.get_stats().await; assert_eq!(stats.tcp6_announces_handled, 1); @@ -801,7 +801,7 @@ mod for_all_config_modes { ) .await; - let stats = env.stats_repository.get_stats().await; + let stats = env.http_stats_repository.get_stats().await; assert_eq!(stats.tcp6_announces_handled, 0); @@ -1173,7 +1173,7 @@ mod for_all_config_modes { ) .await; - let stats = env.stats_repository.get_stats().await; + let stats = env.http_stats_repository.get_stats().await; assert_eq!(stats.tcp4_scrapes_handled, 1); @@ -1205,7 +1205,7 @@ mod for_all_config_modes { ) .await; - let stats = env.stats_repository.get_stats().await; + let stats = env.http_stats_repository.get_stats().await; assert_eq!(stats.tcp6_scrapes_handled, 1); diff --git a/tests/servers/udp/contract.rs b/tests/servers/udp/contract.rs index d38356ef4..f6e0589f8 100644 --- a/tests/servers/udp/contract.rs +++ b/tests/servers/udp/contract.rs @@ -270,7 +270,7 @@ mod receiving_an_announce_request { info_hash, ); - let udp_requests_banned_before = env.stats_repository.get_stats().await.udp_requests_banned; + let udp_requests_banned_before = env.udp_stats_repository.get_stats().await.udp_requests_banned; // This should return a timeout error match client.send(announce_request.into()).await { @@ -280,7 +280,7 @@ mod receiving_an_announce_request { assert!(client.receive().await.is_err()); - let udp_requests_banned_after = env.stats_repository.get_stats().await.udp_requests_banned; + let udp_requests_banned_after = env.udp_stats_repository.get_stats().await.udp_requests_banned; let udp_banned_ips_total_after = ban_service.read().await.get_banned_ips_total(); // UDP counter for banned requests should be increased by 1 diff --git a/tests/servers/udp/environment.rs b/tests/servers/udp/environment.rs index c8ecac1fb..24ce7bab2 100644 --- a/tests/servers/udp/environment.rs +++ b/tests/servers/udp/environment.rs @@ -4,11 +4,10 @@ use std::sync::Arc; use bittorrent_primitives::info_hash::InfoHash; use bittorrent_tracker_core::databases::Database; use bittorrent_tracker_core::torrent::repository::in_memory::InMemoryTorrentRepository; -use packages::statistics::repository::Repository; use torrust_tracker_configuration::{Configuration, DEFAULT_TIMEOUT}; use torrust_tracker_lib::bootstrap::app::{initialize_app_container, initialize_global_services}; use torrust_tracker_lib::container::UdpTrackerContainer; -use torrust_tracker_lib::packages; +use torrust_tracker_lib::packages::udp_tracker_core; use torrust_tracker_lib::servers::registar::Registar; use torrust_tracker_lib::servers::udp::server::spawner::Spawner; use torrust_tracker_lib::servers::udp::server::states::{Running, Stopped}; @@ -23,7 +22,7 @@ where pub database: Arc>, pub in_memory_torrent_repository: Arc, - pub stats_repository: Arc, + pub udp_stats_repository: Arc, pub registar: Registar, pub server: Server, @@ -61,7 +60,6 @@ impl Environment { announce_handler: app_container.announce_handler.clone(), scrape_handler: app_container.scrape_handler.clone(), whitelist_authorization: app_container.whitelist_authorization.clone(), - stats_event_sender: app_container.stats_event_sender.clone(), udp_stats_event_sender: app_container.udp_stats_event_sender.clone(), ban_service: app_container.ban_service.clone(), }); @@ -71,7 +69,7 @@ impl Environment { database: app_container.database.clone(), in_memory_torrent_repository: app_container.in_memory_torrent_repository.clone(), - stats_repository: app_container.stats_repository.clone(), + udp_stats_repository: app_container.udp_stats_repository.clone(), registar: Registar::default(), server, @@ -87,7 +85,7 @@ impl Environment { database: self.database.clone(), in_memory_torrent_repository: self.in_memory_torrent_repository.clone(), - stats_repository: self.stats_repository.clone(), + udp_stats_repository: self.udp_stats_repository.clone(), registar: self.registar.clone(), server: self @@ -117,7 +115,7 @@ impl Environment { database: self.database, in_memory_torrent_repository: self.in_memory_torrent_repository, - stats_repository: self.stats_repository, + udp_stats_repository: self.udp_stats_repository, registar: Registar::default(), server: stopped.expect("it stop the udp tracker service"), From 8efda62509a43edd03ca53f63c981b7d8c642dcb Mon Sep 17 00:00:00 2001 From: Jose Celano Date: Mon, 3 Feb 2025 10:32:51 +0000 Subject: [PATCH 199/802] test: [#1231] add tests for InMemoryKeyRepository --- packages/tracker-core/.gitignore | 1 + .../key/repository/in_memory.rs | 103 ++++++++++++++++++ 2 files changed, 104 insertions(+) create mode 100644 packages/tracker-core/.gitignore diff --git a/packages/tracker-core/.gitignore b/packages/tracker-core/.gitignore new file mode 100644 index 000000000..c5cb1afac --- /dev/null +++ b/packages/tracker-core/.gitignore @@ -0,0 +1 @@ +.coverage \ No newline at end of file diff --git a/packages/tracker-core/src/authentication/key/repository/in_memory.rs b/packages/tracker-core/src/authentication/key/repository/in_memory.rs index 41d34604b..0a2fc50cd 100644 --- a/packages/tracker-core/src/authentication/key/repository/in_memory.rs +++ b/packages/tracker-core/src/authentication/key/repository/in_memory.rs @@ -39,3 +39,106 @@ impl InMemoryKeyRepository { } } } + +#[cfg(test)] +mod tests { + + mod the_in_memory_key_repository_should { + use std::time::Duration; + + use crate::authentication::key::repository::in_memory::InMemoryKeyRepository; + use crate::authentication::key::Key; + use crate::authentication::PeerKey; + + #[tokio::test] + async fn insert_a_new_peer_key() { + let repository = InMemoryKeyRepository::default(); + + let new_peer_key = PeerKey { + key: Key::new("YZSl4lMZupRuOpSRC3krIKR5BPB14nrJ").unwrap(), + valid_until: Some(Duration::new(9999, 0)), + }; + + repository.insert(&new_peer_key).await; + + let peer_key = repository.get(&new_peer_key.key).await; + + assert_eq!(peer_key, Some(new_peer_key)); + } + + #[tokio::test] + async fn remove_a_new_peer_key() { + let repository = InMemoryKeyRepository::default(); + + let new_peer_key = PeerKey { + key: Key::new("YZSl4lMZupRuOpSRC3krIKR5BPB14nrJ").unwrap(), + valid_until: Some(Duration::new(9999, 0)), + }; + + repository.insert(&new_peer_key).await; + + repository.remove(&new_peer_key.key).await; + + let peer_key = repository.get(&new_peer_key.key).await; + + assert_eq!(peer_key, None); + } + + #[tokio::test] + async fn get_a_new_peer_key_by_its_internal_key() { + let repository = InMemoryKeyRepository::default(); + + let expected_peer_key = PeerKey { + key: Key::new("YZSl4lMZupRuOpSRC3krIKR5BPB14nrJ").unwrap(), + valid_until: Some(Duration::new(9999, 0)), + }; + + repository.insert(&expected_peer_key).await; + + let peer_key = repository.get(&expected_peer_key.key).await; + + assert_eq!(peer_key, Some(expected_peer_key)); + } + + #[tokio::test] + async fn clear_all_peer_keys() { + let repository = InMemoryKeyRepository::default(); + + let new_peer_key = PeerKey { + key: Key::new("YZSl4lMZupRuOpSRC3krIKR5BPB14nrJ").unwrap(), + valid_until: Some(Duration::new(9999, 0)), + }; + + repository.insert(&new_peer_key).await; + + repository.clear().await; + + let peer_key = repository.get(&new_peer_key.key).await; + + assert_eq!(peer_key, None); + } + + #[tokio::test] + async fn reset_the_peer_keys_with_a_new_list_of_keys() { + let repository = InMemoryKeyRepository::default(); + + let old_peer_key = PeerKey { + key: Key::new("YZSl4lMZupRuOpSRC3krIKR5BPB14nrJ").unwrap(), + valid_until: Some(Duration::new(9999, 0)), + }; + + repository.insert(&old_peer_key).await; + + let new_peer_key = PeerKey { + key: Key::new("kqdVKHlKKWXzAideqI5gvjBP4jdbe5dW").unwrap(), + valid_until: Some(Duration::new(9999, 0)), + }; + + repository.reset_with(vec![new_peer_key.clone()]).await; + + let peer_key = repository.get(&new_peer_key.key).await; + + assert_eq!(peer_key, Some(new_peer_key)); + } + } +} From 04ee425fcc6f69b84bc2ca02d69ad41c9cfd8c41 Mon Sep 17 00:00:00 2001 From: Jose Celano Date: Mon, 3 Feb 2025 11:18:56 +0000 Subject: [PATCH 200/802] chore: add todo --- packages/tracker-core/src/databases/setup.rs | 2 ++ 1 file changed, 2 insertions(+) diff --git a/packages/tracker-core/src/databases/setup.rs b/packages/tracker-core/src/databases/setup.rs index 728913e05..8d24c63b1 100644 --- a/packages/tracker-core/src/databases/setup.rs +++ b/packages/tracker-core/src/databases/setup.rs @@ -11,6 +11,8 @@ use super::Database; /// Will panic if database cannot be initialized. #[must_use] pub fn initialize_database(config: &Configuration) -> Arc> { + // todo: inject only core configuration + let driver = match config.core.database.driver { database::Driver::Sqlite3 => Driver::Sqlite3, database::Driver::MySQL => Driver::MySQL, From 7c8d2941f4152d85f7077a62f184d19a750f2c06 Mon Sep 17 00:00:00 2001 From: Jose Celano Date: Mon, 3 Feb 2025 11:29:05 +0000 Subject: [PATCH 201/802] test: add tests for DatabaseKeyRepository --- .../key/repository/persisted.rs | 73 +++++++++++++++++++ 1 file changed, 73 insertions(+) diff --git a/packages/tracker-core/src/authentication/key/repository/persisted.rs b/packages/tracker-core/src/authentication/key/repository/persisted.rs index 322ab2913..b3948ca4f 100644 --- a/packages/tracker-core/src/authentication/key/repository/persisted.rs +++ b/packages/tracker-core/src/authentication/key/repository/persisted.rs @@ -46,3 +46,76 @@ impl DatabaseKeyRepository { Ok(keys) } } + +#[cfg(test)] +mod tests { + + mod the_persisted_key_repository_should { + + use std::time::Duration; + + use torrust_tracker_test_helpers::configuration; + + use crate::authentication::key::repository::persisted::DatabaseKeyRepository; + use crate::authentication::{Key, PeerKey}; + use crate::databases::setup::initialize_database; + + #[test] + fn persist_a_new_peer_key() { + let configuration = configuration::ephemeral_public(); + + let database = initialize_database(&configuration); + + let repository = DatabaseKeyRepository::new(&database); + + let peer_key = PeerKey { + key: Key::new("YZSl4lMZupRuOpSRC3krIKR5BPB14nrJ").unwrap(), + valid_until: Some(Duration::new(9999, 0)), + }; + + let result = repository.add(&peer_key); + + assert!(result.is_ok()); + } + + #[test] + fn remove_a_persisted_peer_key() { + let configuration = configuration::ephemeral_public(); + + let database = initialize_database(&configuration); + + let repository = DatabaseKeyRepository::new(&database); + + let peer_key = PeerKey { + key: Key::new("YZSl4lMZupRuOpSRC3krIKR5BPB14nrJ").unwrap(), + valid_until: Some(Duration::new(9999, 0)), + }; + + let _unused = repository.add(&peer_key); + + let result = repository.remove(&peer_key.key); + + assert!(result.is_ok()); + } + + #[test] + fn load_all_persisted_peer_keys() { + let configuration = configuration::ephemeral_public(); + + let database = initialize_database(&configuration); + + let repository = DatabaseKeyRepository::new(&database); + + let peer_key = PeerKey { + key: Key::new("YZSl4lMZupRuOpSRC3krIKR5BPB14nrJ").unwrap(), + valid_until: Some(Duration::new(9999, 0)), + }; + + let _unused = repository.add(&peer_key); + + let keys = repository.load_keys().unwrap(); + + assert_eq!(keys, vec!(peer_key)); + } + } +} From f485a525ccd0e717eef0ae005061079d16092b71 Mon Sep 17 00:00:00 2001 From: Jose Celano Date: Mon, 3 Feb 2025 11:51:46 +0000 Subject: [PATCH 202/802] refactor: inject only core config in tracker core DB setup --- packages/tracker-core/src/announce_handler.rs | 2 +- .../tracker-core/src/authentication/handler.rs | 2 +- .../authentication/key/repository/persisted.rs | 6 +++--- packages/tracker-core/src/authentication/mod.rs | 2 +- packages/tracker-core/src/core_tests.rs | 2 +- packages/tracker-core/src/databases/setup.rs | 15 ++++++--------- .../tracker-core/src/whitelist/whitelist_tests.rs | 2 +- src/bootstrap/app.rs | 2 +- src/servers/http/v1/handlers/announce.rs | 2 +- src/servers/http/v1/services/announce.rs | 4 ++-- src/servers/http/v1/services/scrape.rs | 2 +- src/servers/udp/handlers.rs | 4 ++-- 12 files changed, 21 insertions(+), 24 deletions(-) diff --git a/packages/tracker-core/src/announce_handler.rs b/packages/tracker-core/src/announce_handler.rs index 877555d1c..fac1df5b2 100644 --- a/packages/tracker-core/src/announce_handler.rs +++ b/packages/tracker-core/src/announce_handler.rs @@ -425,7 +425,7 @@ mod tests { config.core.tracker_policy.persistent_torrent_completed_stat = true; - let database = initialize_database(&config); + let database = initialize_database(&config.core); let in_memory_torrent_repository = Arc::new(InMemoryTorrentRepository::default()); let db_torrent_repository = Arc::new(DatabasePersistentTorrentRepository::new(&database)); let torrents_manager = Arc::new(TorrentsManager::new( diff --git a/packages/tracker-core/src/authentication/handler.rs b/packages/tracker-core/src/authentication/handler.rs index 1d74c7dfa..10ba3ecbb 100644 --- a/packages/tracker-core/src/authentication/handler.rs +++ b/packages/tracker-core/src/authentication/handler.rs @@ -266,7 +266,7 @@ mod tests { } fn instantiate_keys_handler_with_configuration(config: &Configuration) -> KeysHandler { - let database = initialize_database(config); + let database = initialize_database(&config.core); let db_key_repository = Arc::new(DatabaseKeyRepository::new(&database)); let in_memory_key_repository = Arc::new(InMemoryKeyRepository::default()); diff --git a/packages/tracker-core/src/authentication/key/repository/persisted.rs b/packages/tracker-core/src/authentication/key/repository/persisted.rs index b3948ca4f..60db056a5 100644 --- a/packages/tracker-core/src/authentication/key/repository/persisted.rs +++ b/packages/tracker-core/src/authentication/key/repository/persisted.rs @@ -64,7 +64,7 @@ mod tests { fn persist_a_new_peer_key() { let configuration = configuration::ephemeral_public(); - let database = initialize_database(&configuration); + let database = initialize_database(&configuration.core); let repository = DatabaseKeyRepository::new(&database); @@ -82,7 +82,7 @@ mod tests { fn remove_a_persisted_peer_key() { let configuration = configuration::ephemeral_public(); - let database = initialize_database(&configuration); + let database = initialize_database(&configuration.core); let repository = DatabaseKeyRepository::new(&database); @@ -102,7 +102,7 @@ mod tests { fn load_all_persisted_peer_keys() { let configuration = configuration::ephemeral_public(); - let database = initialize_database(&configuration); + let database = initialize_database(&configuration.core); let repository = DatabaseKeyRepository::new(&database); diff --git a/packages/tracker-core/src/authentication/mod.rs b/packages/tracker-core/src/authentication/mod.rs index 9609733da..4197f4323 100644 --- a/packages/tracker-core/src/authentication/mod.rs +++ b/packages/tracker-core/src/authentication/mod.rs @@ -49,7 +49,7 @@ mod tests { fn instantiate_keys_manager_and_authentication_with_configuration( config: &Configuration, ) -> (Arc, Arc) { - let database = initialize_database(config); + let database = initialize_database(&config.core); let db_key_repository = Arc::new(DatabaseKeyRepository::new(&database)); let in_memory_key_repository = Arc::new(InMemoryKeyRepository::default()); let authentication_service = Arc::new(service::AuthenticationService::new(&config.core, &in_memory_key_repository)); diff --git a/packages/tracker-core/src/core_tests.rs b/packages/tracker-core/src/core_tests.rs index 35d5fb9b7..f6b47acd0 100644 --- a/packages/tracker-core/src/core_tests.rs +++ b/packages/tracker-core/src/core_tests.rs @@ -84,7 +84,7 @@ pub fn incomplete_peer() -> Peer { #[must_use] pub fn initialize_handlers(config: &Configuration) -> (Arc, Arc) { - let database = initialize_database(config); + let database = initialize_database(&config.core); let in_memory_whitelist = Arc::new(InMemoryWhitelist::default()); let whitelist_authorization = Arc::new(whitelist::authorization::WhitelistAuthorization::new( &config.core, diff --git a/packages/tracker-core/src/databases/setup.rs b/packages/tracker-core/src/databases/setup.rs index 8d24c63b1..73ff23feb 100644 --- a/packages/tracker-core/src/databases/setup.rs +++ b/packages/tracker-core/src/databases/setup.rs @@ -1,7 +1,6 @@ use std::sync::Arc; -use torrust_tracker_configuration::v2_0_0::database; -use torrust_tracker_configuration::Configuration; +use torrust_tracker_configuration::Core; use super::driver::{self, Driver}; use super::Database; @@ -10,13 +9,11 @@ use super::Database; /// /// Will panic if database cannot be initialized. #[must_use] -pub fn initialize_database(config: &Configuration) -> Arc> { - // todo: inject only core configuration - - let driver = match config.core.database.driver { - database::Driver::Sqlite3 => Driver::Sqlite3, - database::Driver::MySQL => Driver::MySQL, +pub fn initialize_database(config: &Core) -> Arc> { + let driver = match config.database.driver { + torrust_tracker_configuration::Driver::Sqlite3 => Driver::Sqlite3, + torrust_tracker_configuration::Driver::MySQL => Driver::MySQL, }; - Arc::new(driver::build(&driver, &config.core.database.path).expect("Database driver build failed.")) + Arc::new(driver::build(&driver, &config.database.path).expect("Database driver build failed.")) } diff --git a/packages/tracker-core/src/whitelist/whitelist_tests.rs b/packages/tracker-core/src/whitelist/whitelist_tests.rs index 33f5a97f7..d2fd275f2 100644 --- a/packages/tracker-core/src/whitelist/whitelist_tests.rs +++ b/packages/tracker-core/src/whitelist/whitelist_tests.rs @@ -10,7 +10,7 @@ use crate::whitelist::setup::initialize_whitelist_manager; #[must_use] pub fn initialize_whitelist_services(config: &Configuration) -> (Arc, Arc) { - let database = initialize_database(config); + let database = initialize_database(&config.core); let in_memory_whitelist = Arc::new(InMemoryWhitelist::default()); let whitelist_authorization = Arc::new(WhitelistAuthorization::new(&config.core, &in_memory_whitelist.clone())); let whitelist_manager = initialize_whitelist_manager(database.clone(), in_memory_whitelist.clone()); diff --git a/src/bootstrap/app.rs b/src/bootstrap/app.rs index 93bbfe290..e0e81c70c 100644 --- a/src/bootstrap/app.rs +++ b/src/bootstrap/app.rs @@ -104,7 +104,7 @@ pub fn initialize_app_container(configuration: &Configuration) -> AppContainer { let udp_stats_repository = Arc::new(udp_stats_repository); let ban_service = Arc::new(RwLock::new(BanService::new(MAX_CONNECTION_ID_ERRORS_PER_IP))); - let database = initialize_database(configuration); + let database = initialize_database(&configuration.core); let in_memory_whitelist = Arc::new(InMemoryWhitelist::default()); let whitelist_authorization = Arc::new(WhitelistAuthorization::new(&configuration.core, &in_memory_whitelist.clone())); let whitelist_manager = initialize_whitelist_manager(database.clone(), in_memory_whitelist.clone()); diff --git a/src/servers/http/v1/handlers/announce.rs b/src/servers/http/v1/handlers/announce.rs index a6671e14a..4c4aa6617 100644 --- a/src/servers/http/v1/handlers/announce.rs +++ b/src/servers/http/v1/handlers/announce.rs @@ -294,7 +294,7 @@ mod tests { fn initialize_core_tracker_services(config: &Configuration) -> (CoreTrackerServices, CoreHttpTrackerServices) { let core_config = Arc::new(config.core.clone()); - let database = initialize_database(config); + let database = initialize_database(&config.core); let in_memory_whitelist = Arc::new(InMemoryWhitelist::default()); let whitelist_authorization = Arc::new(WhitelistAuthorization::new(&config.core, &in_memory_whitelist.clone())); let in_memory_key_repository = Arc::new(InMemoryKeyRepository::default()); diff --git a/src/servers/http/v1/services/announce.rs b/src/servers/http/v1/services/announce.rs index bc21657af..64a29db5a 100644 --- a/src/servers/http/v1/services/announce.rs +++ b/src/servers/http/v1/services/announce.rs @@ -84,7 +84,7 @@ mod tests { let config = configuration::ephemeral_public(); let core_config = Arc::new(config.core.clone()); - let database = initialize_database(&config); + let database = initialize_database(&config.core); let in_memory_torrent_repository = Arc::new(InMemoryTorrentRepository::default()); let db_torrent_repository = Arc::new(DatabasePersistentTorrentRepository::new(&database)); @@ -173,7 +173,7 @@ mod tests { fn initialize_announce_handler() -> Arc { let config = configuration::ephemeral(); - let database = initialize_database(&config); + let database = initialize_database(&config.core); let in_memory_torrent_repository = Arc::new(InMemoryTorrentRepository::default()); let db_torrent_repository = Arc::new(DatabasePersistentTorrentRepository::new(&database)); diff --git a/src/servers/http/v1/services/scrape.rs b/src/servers/http/v1/services/scrape.rs index 5325b188b..0a3425efe 100644 --- a/src/servers/http/v1/services/scrape.rs +++ b/src/servers/http/v1/services/scrape.rs @@ -102,7 +102,7 @@ mod tests { fn initialize_announce_and_scrape_handlers_for_public_tracker() -> (Arc, Arc) { let config = configuration::ephemeral_public(); - let database = initialize_database(&config); + let database = initialize_database(&config.core); let in_memory_whitelist = Arc::new(InMemoryWhitelist::default()); let whitelist_authorization = Arc::new(WhitelistAuthorization::new(&config.core, &in_memory_whitelist.clone())); let in_memory_torrent_repository = Arc::new(InMemoryTorrentRepository::default()); diff --git a/src/servers/udp/handlers.rs b/src/servers/udp/handlers.rs index 59833b715..4f98f52d9 100644 --- a/src/servers/udp/handlers.rs +++ b/src/servers/udp/handlers.rs @@ -536,7 +536,7 @@ mod tests { fn initialize_core_tracker_services(config: &Configuration) -> (CoreTrackerServices, CoreUdpTrackerServices) { let core_config = Arc::new(config.core.clone()); - let database = initialize_database(config); + let database = initialize_database(&config.core); let in_memory_whitelist = Arc::new(InMemoryWhitelist::default()); let whitelist_authorization = Arc::new(WhitelistAuthorization::new(&config.core, &in_memory_whitelist.clone())); let in_memory_torrent_repository = Arc::new(InMemoryTorrentRepository::default()); @@ -1470,7 +1470,7 @@ mod tests { async fn the_peer_ip_should_be_changed_to_the_external_ip_in_the_tracker_configuration() { let config = Arc::new(TrackerConfigurationBuilder::default().with_external_ip("::126.0.0.1").into()); - let database = initialize_database(&config); + let database = initialize_database(&config.core); let in_memory_whitelist = Arc::new(InMemoryWhitelist::default()); let whitelist_authorization = Arc::new(WhitelistAuthorization::new(&config.core, &in_memory_whitelist.clone())); From e519e7f826c1aee64cc4c284b33c3f3f4a1c4843 Mon Sep 17 00:00:00 2001 From: Jose Celano Date: Mon, 3 Feb 2025 12:14:45 +0000 Subject: [PATCH 203/802] refactor: [#1231] simplify tests for DatabaseKeyRepository We don't need the whole tracker config. --- packages/test-helpers/src/configuration.rs | 13 +++++++---- .../key/repository/persisted.rs | 22 +++++++++++++------ 2 files changed, 24 insertions(+), 11 deletions(-) diff --git a/packages/test-helpers/src/configuration.rs b/packages/test-helpers/src/configuration.rs index 678f4283a..130820334 100644 --- a/packages/test-helpers/src/configuration.rs +++ b/packages/test-helpers/src/configuration.rs @@ -1,6 +1,7 @@ //! Tracker configuration factories for testing. use std::env; use std::net::{IpAddr, Ipv4Addr, Ipv6Addr, SocketAddr}; +use std::path::PathBuf; use std::time::Duration; use torrust_tracker_configuration::{Configuration, HttpApi, HttpTracker, Threshold, UdpTracker}; @@ -63,15 +64,19 @@ pub fn ephemeral() -> Configuration { tsl_config: None, }]); - // Ephemeral sqlite database - let temp_directory = env::temp_dir(); - let random_db_id = random::string(16); - let temp_file = temp_directory.join(format!("data_{random_db_id}.db")); + let temp_file = ephemeral_sqlite_database(); temp_file.to_str().unwrap().clone_into(&mut config.core.database.path); config } +#[must_use] +pub fn ephemeral_sqlite_database() -> PathBuf { + let temp_directory = env::temp_dir(); + let random_db_id = random::string(16); + temp_directory.join(format!("data_{random_db_id}.db")) +} + /// Ephemeral configuration with reverse proxy enabled. #[must_use] pub fn ephemeral_with_reverse_proxy() -> Configuration { diff --git a/packages/tracker-core/src/authentication/key/repository/persisted.rs b/packages/tracker-core/src/authentication/key/repository/persisted.rs index 60db056a5..65e56cec2 100644 --- a/packages/tracker-core/src/authentication/key/repository/persisted.rs +++ b/packages/tracker-core/src/authentication/key/repository/persisted.rs @@ -54,17 +54,25 @@ mod tests { use std::time::Duration; - use torrust_tracker_test_helpers::configuration; + use torrust_tracker_configuration::Core; + use torrust_tracker_test_helpers::configuration::ephemeral_sqlite_database; use crate::authentication::key::repository::persisted::DatabaseKeyRepository; use crate::authentication::{Key, PeerKey}; use crate::databases::setup::initialize_database; + fn ephemeral_configuration() -> Core { + let mut config = Core::default(); + let temp_file = ephemeral_sqlite_database(); + temp_file.to_str().unwrap().clone_into(&mut config.database.path); + config + } + #[test] fn persist_a_new_peer_key() { - let configuration = configuration::ephemeral_public(); + let configuration = ephemeral_configuration(); - let database = initialize_database(&configuration.core); + let database = initialize_database(&configuration); let repository = DatabaseKeyRepository::new(&database); @@ -80,9 +88,9 @@ mod tests { #[test] fn remove_a_persisted_peer_key() { - let configuration = configuration::ephemeral_public(); + let configuration = ephemeral_configuration(); - let database = initialize_database(&configuration.core); + let database = initialize_database(&configuration); let repository = DatabaseKeyRepository::new(&database); @@ -100,9 +108,9 @@ mod tests { #[test] fn load_all_persisted_peer_keys() { - let configuration = configuration::ephemeral_public(); + let configuration = ephemeral_configuration(); - let database = initialize_database(&configuration.core); + let database = initialize_database(&configuration); let repository = DatabaseKeyRepository::new(&database); From 87095400d7f91cd65c0c232e1f5bccd12af3a4a6 Mon Sep 17 00:00:00 2001 From: Jose Celano Date: Mon, 3 Feb 2025 12:18:31 +0000 Subject: [PATCH 204/802] refactor: [#1231] improve DatabaseKeyRepository tests --- .../src/authentication/key/repository/persisted.rs | 8 ++++++-- 1 file changed, 6 insertions(+), 2 deletions(-) diff --git a/packages/tracker-core/src/authentication/key/repository/persisted.rs b/packages/tracker-core/src/authentication/key/repository/persisted.rs index 65e56cec2..7edee62c0 100644 --- a/packages/tracker-core/src/authentication/key/repository/persisted.rs +++ b/packages/tracker-core/src/authentication/key/repository/persisted.rs @@ -82,8 +82,10 @@ mod tests { }; let result = repository.add(&peer_key); - assert!(result.is_ok()); + + let keys = repository.load_keys().unwrap(); + assert_eq!(keys, vec!(peer_key)); } #[test] @@ -102,8 +104,10 @@ mod tests { let _unused = repository.add(&peer_key); let result = repository.remove(&peer_key.key); - assert!(result.is_ok()); + + let keys = repository.load_keys().unwrap(); + assert!(keys.is_empty()); } #[test] From 0336ca7a5d404d71c4604002c8535cf6f0cba2c8 Mon Sep 17 00:00:00 2001 From: Jose Celano Date: Mon, 3 Feb 2025 12:31:45 +0000 Subject: [PATCH 205/802] refactor: [#1231] exctract mod --- .../src/authentication/key/mod.rs | 155 +------------ .../src/authentication/key/peer_key.rs | 206 ++++++++++++++++++ 2 files changed, 212 insertions(+), 149 deletions(-) create mode 100644 packages/tracker-core/src/authentication/key/peer_key.rs diff --git a/packages/tracker-core/src/authentication/key/mod.rs b/packages/tracker-core/src/authentication/key/mod.rs index e3e7fc018..081e027bf 100644 --- a/packages/tracker-core/src/authentication/key/mod.rs +++ b/packages/tracker-core/src/authentication/key/mod.rs @@ -37,25 +37,26 @@ //! //! assert!(authentication::key::verify_key_expiration(&expiring_key).is_ok()); //! ``` +pub mod peer_key; pub mod repository; use std::panic::Location; -use std::str::FromStr; use std::sync::Arc; use std::time::Duration; -use derive_more::Display; use rand::distr::Alphanumeric; use rand::{rng, Rng}; -use serde::{Deserialize, Serialize}; use thiserror::Error; use torrust_tracker_clock::clock::Time; -use torrust_tracker_clock::conv::convert_from_timestamp_to_datetime_utc; use torrust_tracker_located_error::{DynError, LocatedError}; use torrust_tracker_primitives::DurationSinceUnixEpoch; use crate::CurrentClock; +pub type PeerKey = peer_key::PeerKey; +pub type Key = peer_key::Key; +pub type ParseKeyError = peer_key::ParseKeyError; + /// HTTP tracker authentication key length. /// /// For more information see function [`generate_key`](crate::authentication::key::generate_key) to generate the @@ -130,110 +131,6 @@ pub fn verify_key_expiration(auth_key: &PeerKey) -> Result<(), Error> { } } -/// An authentication key which can potentially have an expiration time. -/// After that time is will automatically become invalid. -#[derive(Serialize, Deserialize, Debug, Eq, PartialEq, Clone)] -pub struct PeerKey { - /// Random 32-char string. For example: `YZSl4lMZupRuOpSRC3krIKR5BPB14nrJ` - pub key: Key, - - /// Timestamp, the key will be no longer valid after this timestamp. - /// If `None` the keys will not expire (permanent key). - pub valid_until: Option, -} - -impl std::fmt::Display for PeerKey { - fn fmt(&self, f: &mut std::fmt::Formatter<'_>) -> std::fmt::Result { - match self.expiry_time() { - Some(expire_time) => write!(f, "key: `{}`, valid until `{}`", self.key, expire_time), - None => write!(f, "key: `{}`, permanent", self.key), - } - } -} - -impl PeerKey { - #[must_use] - pub fn key(&self) -> Key { - self.key.clone() - } - - /// It returns the expiry time. For example, for the starting time for Unix Epoch - /// (timestamp 0) it will return a `DateTime` whose string representation is - /// `1970-01-01 00:00:00 UTC`. - /// - /// # Panics - /// - /// Will panic when the key timestamp overflows the internal i64 type. - /// (this will naturally happen in 292.5 billion years) - #[must_use] - pub fn expiry_time(&self) -> Option> { - self.valid_until.map(convert_from_timestamp_to_datetime_utc) - } -} - -/// A token used for authentication. -/// -/// - It contains only ascii alphanumeric chars: lower and uppercase letters and -/// numbers. -/// - It's a 32-char string. -#[derive(Serialize, Deserialize, Debug, Eq, PartialEq, Clone, Display, Hash)] -pub struct Key(String); - -impl Key { - /// # Errors - /// - /// Will return an error is the string represents an invalid key. - /// Valid keys can only contain 32 chars including 0-9, a-z and A-Z. - pub fn new(value: &str) -> Result { - if value.len() != AUTH_KEY_LENGTH { - return Err(ParseKeyError::InvalidKeyLength); - } - - if !value.chars().all(|c| c.is_ascii_alphanumeric()) { - return Err(ParseKeyError::InvalidChars); - } - - Ok(Self(value.to_owned())) - } - - #[must_use] - pub fn value(&self) -> &str { - &self.0 - } -} - -/// Error returned when a key cannot be parsed from a string. -/// -/// ```text -/// use bittorrent_tracker_core::authentication::Key; -/// use std::str::FromStr; -/// -/// let key_string = "YZSl4lMZupRuOpSRC3krIKR5BPB14nrJ"; -/// let key = Key::from_str(key_string); -/// -/// assert!(key.is_ok()); -/// assert_eq!(key.unwrap().to_string(), key_string); -/// ``` -/// -/// If the string does not contains a valid key, the parser function will return -/// this error. -#[derive(Debug, Error)] -pub enum ParseKeyError { - #[error("Invalid key length. Key must be have 32 chars")] - InvalidKeyLength, - #[error("Invalid chars for key. Key can only alphanumeric chars (0-9, a-z, A-Z)")] - InvalidChars, -} - -impl FromStr for Key { - type Err = ParseKeyError; - - fn from_str(s: &str) -> Result { - Key::new(s)?; - Ok(Self(s.to_string())) - } -} - /// Verification error. Error returned when an [`PeerKey`] cannot be /// verified with the (`crate::authentication::verify_key`) function. #[derive(Debug, Error)] @@ -263,39 +160,8 @@ impl From for Error { #[cfg(test)] mod tests { - mod key { - use std::str::FromStr; - - use crate::authentication::Key; - - #[test] - fn should_be_parsed_from_an_string() { - let key_string = "YZSl4lMZupRuOpSRC3krIKR5BPB14nrJ"; - let key = Key::from_str(key_string); - - assert!(key.is_ok()); - assert_eq!(key.unwrap().to_string(), key_string); - } - - #[test] - fn length_should_be_32() { - let key = Key::new(""); - assert!(key.is_err()); - - let string_longer_than_32 = "012345678901234567890123456789012"; // DevSkim: ignore DS173237 - let key = Key::new(string_longer_than_32); - assert!(key.is_err()); - } - - #[test] - fn should_only_include_alphanumeric_chars() { - let key = Key::new("%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%"); - assert!(key.is_err()); - } - } - mod expiring_auth_key { - use std::str::FromStr; + use std::time::Duration; use torrust_tracker_clock::clock; @@ -303,15 +169,6 @@ mod tests { use crate::authentication; - #[test] - fn should_be_parsed_from_an_string() { - let key_string = "YZSl4lMZupRuOpSRC3krIKR5BPB14nrJ"; - let auth_key = authentication::Key::from_str(key_string); - - assert!(auth_key.is_ok()); - assert_eq!(auth_key.unwrap().to_string(), key_string); - } - #[test] fn should_be_displayed() { // Set the time to the current time. diff --git a/packages/tracker-core/src/authentication/key/peer_key.rs b/packages/tracker-core/src/authentication/key/peer_key.rs new file mode 100644 index 000000000..c4dfc7742 --- /dev/null +++ b/packages/tracker-core/src/authentication/key/peer_key.rs @@ -0,0 +1,206 @@ +use std::str::FromStr; + +use derive_more::Display; +use serde::{Deserialize, Serialize}; +use thiserror::Error; +use torrust_tracker_clock::conv::convert_from_timestamp_to_datetime_utc; +use torrust_tracker_primitives::DurationSinceUnixEpoch; + +use super::AUTH_KEY_LENGTH; + +/// An authentication key which can potentially have an expiration time. +/// After that time is will automatically become invalid. +#[derive(Serialize, Deserialize, Debug, Eq, PartialEq, Clone)] +pub struct PeerKey { + /// Random 32-char string. For example: `YZSl4lMZupRuOpSRC3krIKR5BPB14nrJ` + pub key: Key, + + /// Timestamp, the key will be no longer valid after this timestamp. + /// If `None` the keys will not expire (permanent key). + pub valid_until: Option, +} + +impl std::fmt::Display for PeerKey { + fn fmt(&self, f: &mut std::fmt::Formatter<'_>) -> std::fmt::Result { + match self.expiry_time() { + Some(expire_time) => write!(f, "key: `{}`, valid until `{}`", self.key, expire_time), + None => write!(f, "key: `{}`, permanent", self.key), + } + } +} + +impl PeerKey { + #[must_use] + pub fn key(&self) -> Key { + self.key.clone() + } + + /// It returns the expiry time. For example, for the starting time for Unix Epoch + /// (timestamp 0) it will return a `DateTime` whose string representation is + /// `1970-01-01 00:00:00 UTC`. + /// + /// # Panics + /// + /// Will panic when the key timestamp overflows the internal i64 type. + /// (this will naturally happen in 292.5 billion years) + #[must_use] + pub fn expiry_time(&self) -> Option> { + self.valid_until.map(convert_from_timestamp_to_datetime_utc) + } +} + +/// A token used for authentication. +/// +/// - It contains only ascii alphanumeric chars: lower and uppercase letters and +/// numbers. +/// - It's a 32-char string. +#[derive(Serialize, Deserialize, Debug, Eq, PartialEq, Clone, Display, Hash)] +pub struct Key(String); + +impl Key { + /// # Errors + /// + /// Will return an error is the string represents an invalid key. + /// Valid keys can only contain 32 chars including 0-9, a-z and A-Z. + pub fn new(value: &str) -> Result { + if value.len() != AUTH_KEY_LENGTH { + return Err(ParseKeyError::InvalidKeyLength); + } + + if !value.chars().all(|c| c.is_ascii_alphanumeric()) { + return Err(ParseKeyError::InvalidChars); + } + + Ok(Self(value.to_owned())) + } + + #[must_use] + pub fn value(&self) -> &str { + &self.0 + } +} + +/// Error returned when a key cannot be parsed from a string. +/// +/// ```text +/// use bittorrent_tracker_core::authentication::Key; +/// use std::str::FromStr; +/// +/// let key_string = "YZSl4lMZupRuOpSRC3krIKR5BPB14nrJ"; +/// let key = Key::from_str(key_string); +/// +/// assert!(key.is_ok()); +/// assert_eq!(key.unwrap().to_string(), key_string); +/// ``` +/// +/// If the string does not contains a valid key, the parser function will return +/// this error. +#[derive(Debug, Error)] +pub enum ParseKeyError { + #[error("Invalid key length. Key must be have 32 chars")] + InvalidKeyLength, + #[error("Invalid chars for key. Key can only alphanumeric chars (0-9, a-z, A-Z)")] + InvalidChars, +} + +impl FromStr for Key { + type Err = ParseKeyError; + + fn from_str(s: &str) -> Result { + Key::new(s)?; + Ok(Self(s.to_string())) + } +} + +#[cfg(test)] +mod tests { + + mod key { + use std::str::FromStr; + + use crate::authentication::Key; + + #[test] + fn should_be_parsed_from_an_string() { + let key_string = "YZSl4lMZupRuOpSRC3krIKR5BPB14nrJ"; + let key = Key::from_str(key_string); + + assert!(key.is_ok()); + assert_eq!(key.unwrap().to_string(), key_string); + } + + #[test] + fn length_should_be_32() { + let key = Key::new(""); + assert!(key.is_err()); + + let string_longer_than_32 = "012345678901234567890123456789012"; // DevSkim: ignore DS173237 + let key = Key::new(string_longer_than_32); + assert!(key.is_err()); + } + + #[test] + fn should_only_include_alphanumeric_chars() { + let key = Key::new("%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%"); + assert!(key.is_err()); + } + } + + mod expiring_auth_key { + use std::str::FromStr; + use std::time::Duration; + + use torrust_tracker_clock::clock; + use torrust_tracker_clock::clock::stopped::Stopped as _; + + use crate::authentication; + + #[test] + fn should_be_parsed_from_an_string() { + let key_string = "YZSl4lMZupRuOpSRC3krIKR5BPB14nrJ"; + let auth_key = authentication::Key::from_str(key_string); + + assert!(auth_key.is_ok()); + assert_eq!(auth_key.unwrap().to_string(), key_string); + } + + #[test] + fn should_be_displayed() { + // Set the time to the current time. + clock::Stopped::local_set_to_unix_epoch(); + + let expiring_key = authentication::key::generate_key(Some(Duration::from_secs(0))); + + assert_eq!( + expiring_key.to_string(), + format!("key: `{}`, valid until `1970-01-01 00:00:00 UTC`", expiring_key.key) // cspell:disable-line + ); + } + + #[test] + fn should_be_generated_with_a_expiration_time() { + let expiring_key = authentication::key::generate_key(Some(Duration::new(9999, 0))); + + assert!(authentication::key::verify_key_expiration(&expiring_key).is_ok()); + } + + #[test] + fn should_be_generate_and_verified() { + // Set the time to the current time. + clock::Stopped::local_set_to_system_time_now(); + + // Make key that is valid for 19 seconds. + let expiring_key = authentication::key::generate_key(Some(Duration::from_secs(19))); + + // Mock the time has passed 10 sec. + clock::Stopped::local_add(&Duration::from_secs(10)).unwrap(); + + assert!(authentication::key::verify_key_expiration(&expiring_key).is_ok()); + + // Mock the time has passed another 10 sec. + clock::Stopped::local_add(&Duration::from_secs(10)).unwrap(); + + assert!(authentication::key::verify_key_expiration(&expiring_key).is_err()); + } + } +} From 5d91a326734cfc1f35cfa6bb845cdeae82a93a49 Mon Sep 17 00:00:00 2001 From: Jose Celano Date: Mon, 3 Feb 2025 12:59:50 +0000 Subject: [PATCH 206/802] test: [#1231] add more tests to peer_key mod --- .../src/authentication/key/peer_key.rs | 22 +++++++++++++++++-- 1 file changed, 20 insertions(+), 2 deletions(-) diff --git a/packages/tracker-core/src/authentication/key/peer_key.rs b/packages/tracker-core/src/authentication/key/peer_key.rs index c4dfc7742..9b330185e 100644 --- a/packages/tracker-core/src/authentication/key/peer_key.rs +++ b/packages/tracker-core/src/authentication/key/peer_key.rs @@ -99,6 +99,7 @@ impl Key { pub enum ParseKeyError { #[error("Invalid key length. Key must be have 32 chars")] InvalidKeyLength, + #[error("Invalid chars for key. Key can only alphanumeric chars (0-9, a-z, A-Z)")] InvalidChars, } @@ -144,9 +145,16 @@ mod tests { let key = Key::new("%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%"); assert!(key.is_err()); } + + #[test] + fn should_return_a_reference_to_the_inner_string() { + let key = Key::new("YZSl4lMZupRuOpSRC3krIKR5BPB14nrJ").unwrap(); // DevSkim: ignore DS173237 + + assert_eq!(key.value(), "YZSl4lMZupRuOpSRC3krIKR5BPB14nrJ"); // DevSkim: ignore DS173237 + } } - mod expiring_auth_key { + mod peer_key { use std::str::FromStr; use std::time::Duration; @@ -165,7 +173,7 @@ mod tests { } #[test] - fn should_be_displayed() { + fn should_be_displayed_when_it_is_expiring() { // Set the time to the current time. clock::Stopped::local_set_to_unix_epoch(); @@ -177,6 +185,16 @@ mod tests { ); } + #[test] + fn should_be_displayed_when_it_is_permanent() { + let expiring_key = authentication::key::generate_permanent_key(); + + assert_eq!( + expiring_key.to_string(), + format!("key: `{}`, permanent", expiring_key.key) // cspell:disable-line + ); + } + #[test] fn should_be_generated_with_a_expiration_time() { let expiring_key = authentication::key::generate_key(Some(Duration::new(9999, 0))); From d0c7313056d0baa9f7883c2bb90cea7e00e5a419 Mon Sep 17 00:00:00 2001 From: Jose Celano Date: Mon, 3 Feb 2025 13:23:35 +0000 Subject: [PATCH 207/802] refactor: [#1231] peer_key mod tests --- .../src/authentication/key/mod.rs | 16 +-- .../src/authentication/key/peer_key.rs | 107 ++++++++++-------- 2 files changed, 67 insertions(+), 56 deletions(-) diff --git a/packages/tracker-core/src/authentication/key/mod.rs b/packages/tracker-core/src/authentication/key/mod.rs index 081e027bf..228e4c680 100644 --- a/packages/tracker-core/src/authentication/key/mod.rs +++ b/packages/tracker-core/src/authentication/key/mod.rs @@ -44,8 +44,6 @@ use std::panic::Location; use std::sync::Arc; use std::time::Duration; -use rand::distr::Alphanumeric; -use rand::{rng, Rng}; use thiserror::Error; use torrust_tracker_clock::clock::Time; use torrust_tracker_located_error::{DynError, LocatedError}; @@ -82,24 +80,20 @@ pub fn generate_permanent_key() -> PeerKey { /// * `lifetime`: if `None` the key will be permanent. #[must_use] pub fn generate_key(lifetime: Option) -> PeerKey { - let random_id: String = rng() - .sample_iter(&Alphanumeric) - .take(AUTH_KEY_LENGTH) - .map(char::from) - .collect(); + let random_key = Key::random(); if let Some(lifetime) = lifetime { - tracing::debug!("Generated key: {}, valid for: {:?} seconds", random_id, lifetime); + tracing::debug!("Generated key: {}, valid for: {:?} seconds", random_key, lifetime); PeerKey { - key: random_id.parse::().unwrap(), + key: random_key, valid_until: Some(CurrentClock::now_add(&lifetime).unwrap()), } } else { - tracing::debug!("Generated key: {}, permanent", random_id); + tracing::debug!("Generated key: {}, permanent", random_key); PeerKey { - key: random_id.parse::().unwrap(), + key: random_key, valid_until: None, } } diff --git a/packages/tracker-core/src/authentication/key/peer_key.rs b/packages/tracker-core/src/authentication/key/peer_key.rs index 9b330185e..a3045e54e 100644 --- a/packages/tracker-core/src/authentication/key/peer_key.rs +++ b/packages/tracker-core/src/authentication/key/peer_key.rs @@ -1,6 +1,8 @@ use std::str::FromStr; use derive_more::Display; +use rand::distr::Alphanumeric; +use rand::{rng, Rng}; use serde::{Deserialize, Serialize}; use thiserror::Error; use torrust_tracker_clock::conv::convert_from_timestamp_to_datetime_utc; @@ -74,6 +76,20 @@ impl Key { Ok(Self(value.to_owned())) } + /// It generates a random key. + /// + /// # Panics + /// + /// Will panic if the random number generator fails to generate a valid key. + pub fn random() -> Self { + let random_id: String = rng() + .sample_iter(&Alphanumeric) + .take(AUTH_KEY_LENGTH) + .map(char::from) + .collect(); + random_id.parse::().expect("Failed to generate a valid random key") + } + #[must_use] pub fn value(&self) -> &str { &self.0 @@ -130,6 +146,11 @@ mod tests { assert_eq!(key.unwrap().to_string(), key_string); } + #[test] + fn should_be_generated_randomly() { + let _key = Key::random(); + } + #[test] fn length_should_be_32() { let key = Key::new(""); @@ -155,70 +176,66 @@ mod tests { } mod peer_key { - use std::str::FromStr; - use std::time::Duration; - use torrust_tracker_clock::clock; - use torrust_tracker_clock::clock::stopped::Stopped as _; + use std::time::Duration; - use crate::authentication; + use crate::authentication::key::peer_key::{Key, PeerKey}; #[test] - fn should_be_parsed_from_an_string() { - let key_string = "YZSl4lMZupRuOpSRC3krIKR5BPB14nrJ"; - let auth_key = authentication::Key::from_str(key_string); + fn could_have_an_expiration_time() { + let expiring_key = PeerKey { + key: Key::random(), + valid_until: Some(Duration::from_secs(100)), + }; - assert!(auth_key.is_ok()); - assert_eq!(auth_key.unwrap().to_string(), key_string); + assert_eq!(expiring_key.expiry_time().unwrap().to_string(), "1970-01-01 00:01:40 UTC"); } #[test] - fn should_be_displayed_when_it_is_expiring() { - // Set the time to the current time. - clock::Stopped::local_set_to_unix_epoch(); + fn could_be_permanent() { + let permanent_key = PeerKey { + key: Key::random(), + valid_until: None, + }; - let expiring_key = authentication::key::generate_key(Some(Duration::from_secs(0))); - - assert_eq!( - expiring_key.to_string(), - format!("key: `{}`, valid until `1970-01-01 00:00:00 UTC`", expiring_key.key) // cspell:disable-line - ); + assert_eq!(permanent_key.expiry_time(), None); } - #[test] - fn should_be_displayed_when_it_is_permanent() { - let expiring_key = authentication::key::generate_permanent_key(); + mod expiring { + use std::time::Duration; - assert_eq!( - expiring_key.to_string(), - format!("key: `{}`, permanent", expiring_key.key) // cspell:disable-line - ); - } + use crate::authentication::key::peer_key::{Key, PeerKey}; - #[test] - fn should_be_generated_with_a_expiration_time() { - let expiring_key = authentication::key::generate_key(Some(Duration::new(9999, 0))); + #[test] + fn should_be_displayed_when_it_is_expiring() { + let expiring_key = PeerKey { + key: Key::random(), + valid_until: Some(Duration::from_secs(100)), + }; - assert!(authentication::key::verify_key_expiration(&expiring_key).is_ok()); + assert_eq!( + expiring_key.to_string(), + format!("key: `{}`, valid until `1970-01-01 00:01:40 UTC`", expiring_key.key) // cspell:disable-line + ); + } } - #[test] - fn should_be_generate_and_verified() { - // Set the time to the current time. - clock::Stopped::local_set_to_system_time_now(); - - // Make key that is valid for 19 seconds. - let expiring_key = authentication::key::generate_key(Some(Duration::from_secs(19))); - - // Mock the time has passed 10 sec. - clock::Stopped::local_add(&Duration::from_secs(10)).unwrap(); + mod permanent { - assert!(authentication::key::verify_key_expiration(&expiring_key).is_ok()); + use crate::authentication::key::peer_key::{Key, PeerKey}; - // Mock the time has passed another 10 sec. - clock::Stopped::local_add(&Duration::from_secs(10)).unwrap(); + #[test] + fn should_be_displayed_when_it_is_permanent() { + let permanent_key = PeerKey { + key: Key::random(), + valid_until: None, + }; - assert!(authentication::key::verify_key_expiration(&expiring_key).is_err()); + assert_eq!( + permanent_key.to_string(), + format!("key: `{}`, permanent", permanent_key.key) // cspell:disable-line + ); + } } } } From 0d7e30e579ca61991113fd5ab96dd57a387aad3e Mon Sep 17 00:00:00 2001 From: Jose Celano Date: Mon, 3 Feb 2025 16:01:21 +0000 Subject: [PATCH 208/802] refactor: [#1231] bittorrent_tracker_core::authentication::key::tests --- .../src/authentication/key/mod.rs | 46 ++++++++++++++++++- 1 file changed, 44 insertions(+), 2 deletions(-) diff --git a/packages/tracker-core/src/authentication/key/mod.rs b/packages/tracker-core/src/authentication/key/mod.rs index 228e4c680..bdb72b1cf 100644 --- a/packages/tracker-core/src/authentication/key/mod.rs +++ b/packages/tracker-core/src/authentication/key/mod.rs @@ -134,11 +134,13 @@ pub enum Error { KeyVerificationError { source: LocatedError<'static, dyn std::error::Error + Send + Sync>, }, + #[error("Failed to read key: {key}, {location}")] UnableToReadKey { location: &'static Location<'static>, key: Box, }, + #[error("Key has expired, {location}")] KeyExpired { location: &'static Location<'static> }, } @@ -154,7 +156,7 @@ impl From for Error { #[cfg(test)] mod tests { - mod expiring_auth_key { + mod the_expiring_peer_key { use std::time::Duration; @@ -184,7 +186,7 @@ mod tests { } #[test] - fn should_be_generate_and_verified() { + fn expiration_verification_should_fail_when_the_key_has_expired() { // Set the time to the current time. clock::Stopped::local_set_to_system_time_now(); @@ -202,4 +204,44 @@ mod tests { assert!(authentication::key::verify_key_expiration(&expiring_key).is_err()); } } + + mod the_permanent_peer_key { + + use std::time::Duration; + + use torrust_tracker_clock::clock; + use torrust_tracker_clock::clock::stopped::Stopped as _; + + use crate::authentication; + + #[test] + fn should_be_displayed() { + // Set the time to the current time. + clock::Stopped::local_set_to_unix_epoch(); + + let expiring_key = authentication::key::generate_key(Some(Duration::from_secs(0))); + + assert_eq!( + expiring_key.to_string(), + format!("key: `{}`, valid until `1970-01-01 00:00:00 UTC`", expiring_key.key) // cspell:disable-line + ); + } + + #[test] + fn should_be_generated_without_expiration_time() { + let expiring_key = authentication::key::generate_permanent_key(); + + assert!(authentication::key::verify_key_expiration(&expiring_key).is_ok()); + } + + #[test] + fn expiration_verification_should_always_succeed() { + let expiring_key = authentication::key::generate_permanent_key(); + + // Mock the time has passed 10 years. + clock::Stopped::local_add(&Duration::from_secs(10 * 365 * 24 * 60 * 60)).unwrap(); + + assert!(authentication::key::verify_key_expiration(&expiring_key).is_ok()); + } + } } From 5db73be398c80d10fee73f39734c3513b5e09be9 Mon Sep 17 00:00:00 2001 From: Jose Celano Date: Mon, 3 Feb 2025 16:12:01 +0000 Subject: [PATCH 209/802] refactor: rename methods --- .../src/authentication/handler.rs | 36 ++++++++++--------- .../tracker-core/src/authentication/mod.rs | 23 ++++++++---- src/app.rs | 2 +- .../apis/v1/context/auth_key/handlers.rs | 9 +++-- .../api/v1/contract/context/auth_key.rs | 14 ++++---- tests/servers/http/v1/contract.rs | 4 +-- 6 files changed, 52 insertions(+), 36 deletions(-) diff --git a/packages/tracker-core/src/authentication/handler.rs b/packages/tracker-core/src/authentication/handler.rs index 10ba3ecbb..4c392ee56 100644 --- a/packages/tracker-core/src/authentication/handler.rs +++ b/packages/tracker-core/src/authentication/handler.rs @@ -54,8 +54,6 @@ impl KeysHandler { /// - The provided pre-generated key is invalid. /// - The key could not been persisted due to database issues. pub async fn add_peer_key(&self, add_key_req: AddKeyRequest) -> Result { - // code-review: all methods related to keys should be moved to a new independent "keys" service. - match add_key_req.opt_key { // Upload pre-generated key Some(pre_existing_key) => { @@ -68,7 +66,7 @@ impl KeysHandler { let key = pre_existing_key.parse::(); match key { - Ok(key) => match self.add_auth_key(key, Some(valid_until)).await { + Ok(key) => match self.add_expiring_peer_key(key, Some(valid_until)).await { Ok(auth_key) => Ok(auth_key), Err(err) => Err(PeerKeyError::DatabaseError { source: Located(err).into(), @@ -84,7 +82,7 @@ impl KeysHandler { let key = pre_existing_key.parse::(); match key { - Ok(key) => match self.add_permanent_auth_key(key).await { + Ok(key) => match self.add_permanent_peer_key(key).await { Ok(auth_key) => Ok(auth_key), Err(err) => Err(PeerKeyError::DatabaseError { source: Located(err).into(), @@ -100,14 +98,17 @@ impl KeysHandler { // Generate a new random key None => match add_key_req.opt_seconds_valid { // Expiring key - Some(seconds_valid) => match self.generate_auth_key(Some(Duration::from_secs(seconds_valid))).await { + Some(seconds_valid) => match self + .generate_expiring_peer_key(Some(Duration::from_secs(seconds_valid))) + .await + { Ok(auth_key) => Ok(auth_key), Err(err) => Err(PeerKeyError::DatabaseError { source: Located(err).into(), }), }, // Permanent key - None => match self.generate_permanent_auth_key().await { + None => match self.generate_permanent_peer_key().await { Ok(auth_key) => Ok(auth_key), Err(err) => Err(PeerKeyError::DatabaseError { source: Located(err).into(), @@ -124,8 +125,8 @@ impl KeysHandler { /// # Errors /// /// Will return a `database::Error` if unable to add the `auth_key` to the database. - pub async fn generate_permanent_auth_key(&self) -> Result { - self.generate_auth_key(None).await + pub async fn generate_permanent_peer_key(&self) -> Result { + self.generate_expiring_peer_key(None).await } /// It generates a new expiring authentication key. @@ -140,7 +141,7 @@ impl KeysHandler { /// /// * `lifetime` - The duration in seconds for the new key. The key will be /// no longer valid after `lifetime` seconds. - pub async fn generate_auth_key(&self, lifetime: Option) -> Result { + pub async fn generate_expiring_peer_key(&self, lifetime: Option) -> Result { let peer_key = key::generate_key(lifetime); self.db_key_repository.add(&peer_key)?; @@ -162,8 +163,8 @@ impl KeysHandler { /// # Arguments /// /// * `key` - The pre-generated key. - pub async fn add_permanent_auth_key(&self, key: Key) -> Result { - self.add_auth_key(key, None).await + pub async fn add_permanent_peer_key(&self, key: Key) -> Result { + self.add_expiring_peer_key(key, None).await } /// It adds a pre-generated authentication key. @@ -180,7 +181,7 @@ impl KeysHandler { /// * `key` - The pre-generated key. /// * `lifetime` - The duration in seconds for the new key. The key will be /// no longer valid after `lifetime` seconds. - pub async fn add_auth_key( + pub async fn add_expiring_peer_key( &self, key: Key, valid_until: Option, @@ -202,7 +203,7 @@ impl KeysHandler { /// # Errors /// /// Will return a `database::Error` if unable to remove the `key` to the database. - pub async fn remove_auth_key(&self, key: &Key) -> Result<(), databases::error::Error> { + pub async fn remove_peer_key(&self, key: &Key) -> Result<(), databases::error::Error> { self.db_key_repository.remove(key)?; self.remove_in_memory_auth_key(key).await; @@ -223,7 +224,7 @@ impl KeysHandler { /// # Errors /// /// Will return a `database::Error` if unable to `load_keys` from the database. - pub async fn load_keys_from_database(&self) -> Result<(), databases::error::Error> { + pub async fn load_peer_keys_from_database(&self) -> Result<(), databases::error::Error> { let keys_from_database = self.db_key_repository.load_keys()?; self.in_memory_key_repository.reset_with(keys_from_database).await; @@ -287,7 +288,10 @@ mod tests { async fn it_should_generate_the_key() { let keys_handler = instantiate_keys_handler(); - let peer_key = keys_handler.generate_auth_key(Some(Duration::from_secs(100))).await.unwrap(); + let peer_key = keys_handler + .generate_expiring_peer_key(Some(Duration::from_secs(100))) + .await + .unwrap(); assert_eq!( peer_key.valid_until, @@ -335,7 +339,7 @@ mod tests { async fn it_should_generate_the_key() { let keys_handler = instantiate_keys_handler(); - let peer_key = keys_handler.generate_permanent_auth_key().await.unwrap(); + let peer_key = keys_handler.generate_permanent_peer_key().await.unwrap(); assert_eq!(peer_key.valid_until, None); } diff --git a/packages/tracker-core/src/authentication/mod.rs b/packages/tracker-core/src/authentication/mod.rs index 4197f4323..52138d26b 100644 --- a/packages/tracker-core/src/authentication/mod.rs +++ b/packages/tracker-core/src/authentication/mod.rs @@ -65,9 +65,12 @@ mod tests { async fn it_should_remove_an_authentication_key() { let (keys_manager, authentication_service) = instantiate_keys_manager_and_authentication(); - let expiring_key = keys_manager.generate_auth_key(Some(Duration::from_secs(100))).await.unwrap(); + let expiring_key = keys_manager + .generate_expiring_peer_key(Some(Duration::from_secs(100))) + .await + .unwrap(); - let result = keys_manager.remove_auth_key(&expiring_key.key()).await; + let result = keys_manager.remove_peer_key(&expiring_key.key()).await; assert!(result.is_ok()); @@ -79,12 +82,15 @@ mod tests { async fn it_should_load_authentication_keys_from_the_database() { let (keys_manager, authentication_service) = instantiate_keys_manager_and_authentication(); - let expiring_key = keys_manager.generate_auth_key(Some(Duration::from_secs(100))).await.unwrap(); + let expiring_key = keys_manager + .generate_expiring_peer_key(Some(Duration::from_secs(100))) + .await + .unwrap(); // Remove the newly generated key in memory keys_manager.remove_in_memory_auth_key(&expiring_key.key()).await; - let result = keys_manager.load_keys_from_database().await; + let result = keys_manager.load_peer_keys_from_database().await; assert!(result.is_ok()); @@ -107,7 +113,10 @@ mod tests { async fn it_should_authenticate_a_peer_with_the_key() { let (keys_manager, authentication_service) = instantiate_keys_manager_and_authentication(); - let peer_key = keys_manager.generate_auth_key(Some(Duration::from_secs(100))).await.unwrap(); + let peer_key = keys_manager + .generate_expiring_peer_key(Some(Duration::from_secs(100))) + .await + .unwrap(); let result = authentication_service.authenticate(&peer_key.key()).await; @@ -122,7 +131,7 @@ mod tests { let past_timestamp = Duration::ZERO; let peer_key = keys_manager - .add_auth_key(Key::new("YZSl4lMZupRuOpSRC3krIKR5BPB14nrJ").unwrap(), Some(past_timestamp)) + .add_expiring_peer_key(Key::new("YZSl4lMZupRuOpSRC3krIKR5BPB14nrJ").unwrap(), Some(past_timestamp)) .await .unwrap(); @@ -183,7 +192,7 @@ mod tests { async fn it_should_authenticate_a_peer_with_the_key() { let (keys_manager, authentication_service) = instantiate_keys_manager_and_authentication(); - let peer_key = keys_manager.generate_permanent_auth_key().await.unwrap(); + let peer_key = keys_manager.generate_permanent_peer_key().await.unwrap(); let result = authentication_service.authenticate(&peer_key.key()).await; diff --git a/src/app.rs b/src/app.rs index d69874eb0..ad7524372 100644 --- a/src/app.rs +++ b/src/app.rs @@ -55,7 +55,7 @@ pub async fn start(config: &Configuration, app_container: &Arc) -> if config.core.private { app_container .keys_handler - .load_keys_from_database() + .load_peer_keys_from_database() .await .expect("Could not retrieve keys from database."); } diff --git a/src/servers/apis/v1/context/auth_key/handlers.rs b/src/servers/apis/v1/context/auth_key/handlers.rs index ca38ade37..c8d4c25b0 100644 --- a/src/servers/apis/v1/context/auth_key/handlers.rs +++ b/src/servers/apis/v1/context/auth_key/handlers.rs @@ -70,7 +70,10 @@ pub async fn generate_auth_key_handler( Path(seconds_valid_or_key): Path, ) -> Response { let seconds_valid = seconds_valid_or_key; - match keys_handler.generate_auth_key(Some(Duration::from_secs(seconds_valid))).await { + match keys_handler + .generate_expiring_peer_key(Some(Duration::from_secs(seconds_valid))) + .await + { Ok(auth_key) => auth_key_response(&AuthKey::from(auth_key)), Err(e) => failed_to_generate_key_response(e), } @@ -111,7 +114,7 @@ pub async fn delete_auth_key_handler( ) -> Response { match Key::from_str(&seconds_valid_or_key.0) { Err(_) => invalid_auth_key_param_response(&seconds_valid_or_key.0), - Ok(key) => match keys_handler.remove_auth_key(&key).await { + Ok(key) => match keys_handler.remove_peer_key(&key).await { Ok(()) => ok_response(), Err(e) => failed_to_delete_key_response(e), }, @@ -131,7 +134,7 @@ pub async fn delete_auth_key_handler( /// Refer to the [API endpoint documentation](crate::servers::apis::v1::context::auth_key#reload-authentication-keys) /// for more information about this endpoint. pub async fn reload_keys_handler(State(keys_handler): State>) -> Response { - match keys_handler.load_keys_from_database().await { + match keys_handler.load_peer_keys_from_database().await { Ok(()) => ok_response(), Err(e) => failed_to_reload_keys_response(e), } diff --git a/tests/servers/api/v1/contract/context/auth_key.rs b/tests/servers/api/v1/contract/context/auth_key.rs index 47cf0ecd2..ab9bfaf3e 100644 --- a/tests/servers/api/v1/contract/context/auth_key.rs +++ b/tests/servers/api/v1/contract/context/auth_key.rs @@ -160,7 +160,7 @@ async fn should_allow_deleting_an_auth_key() { let auth_key = env .http_api_container .keys_handler - .generate_auth_key(Some(Duration::from_secs(seconds_valid))) + .generate_expiring_peer_key(Some(Duration::from_secs(seconds_valid))) .await .unwrap(); @@ -295,7 +295,7 @@ async fn should_fail_when_the_auth_key_cannot_be_deleted() { let auth_key = env .http_api_container .keys_handler - .generate_auth_key(Some(Duration::from_secs(seconds_valid))) + .generate_expiring_peer_key(Some(Duration::from_secs(seconds_valid))) .await .unwrap(); @@ -329,7 +329,7 @@ async fn should_not_allow_deleting_an_auth_key_for_unauthenticated_users() { let auth_key = env .http_api_container .keys_handler - .generate_auth_key(Some(Duration::from_secs(seconds_valid))) + .generate_expiring_peer_key(Some(Duration::from_secs(seconds_valid))) .await .unwrap(); @@ -350,7 +350,7 @@ async fn should_not_allow_deleting_an_auth_key_for_unauthenticated_users() { let auth_key = env .http_api_container .keys_handler - .generate_auth_key(Some(Duration::from_secs(seconds_valid))) + .generate_expiring_peer_key(Some(Duration::from_secs(seconds_valid))) .await .unwrap(); @@ -379,7 +379,7 @@ async fn should_allow_reloading_keys() { let seconds_valid = 60; env.http_api_container .keys_handler - .generate_auth_key(Some(Duration::from_secs(seconds_valid))) + .generate_expiring_peer_key(Some(Duration::from_secs(seconds_valid))) .await .unwrap(); @@ -405,7 +405,7 @@ async fn should_fail_when_keys_cannot_be_reloaded() { env.http_api_container .keys_handler - .generate_auth_key(Some(Duration::from_secs(seconds_valid))) + .generate_expiring_peer_key(Some(Duration::from_secs(seconds_valid))) .await .unwrap(); @@ -434,7 +434,7 @@ async fn should_not_allow_reloading_keys_for_unauthenticated_users() { let seconds_valid = 60; env.http_api_container .keys_handler - .generate_auth_key(Some(Duration::from_secs(seconds_valid))) + .generate_expiring_peer_key(Some(Duration::from_secs(seconds_valid))) .await .unwrap(); diff --git a/tests/servers/http/v1/contract.rs b/tests/servers/http/v1/contract.rs index 48c98fa02..bab969403 100644 --- a/tests/servers/http/v1/contract.rs +++ b/tests/servers/http/v1/contract.rs @@ -1404,7 +1404,7 @@ mod configured_as_private { let expiring_key = env .keys_handler - .generate_auth_key(Some(Duration::from_secs(60))) + .generate_expiring_peer_key(Some(Duration::from_secs(60))) .await .unwrap(); @@ -1553,7 +1553,7 @@ mod configured_as_private { let expiring_key = env .keys_handler - .generate_auth_key(Some(Duration::from_secs(60))) + .generate_expiring_peer_key(Some(Duration::from_secs(60))) .await .unwrap(); From e3ba1e198701e3d8cf94d274f4d6bf85b628c551 Mon Sep 17 00:00:00 2001 From: Jose Celano Date: Mon, 3 Feb 2025 17:50:16 +0000 Subject: [PATCH 210/802] test: [#1231] add tests for KeysHandler --- .../src/authentication/handler.rs | 200 +++++++++++++++--- 1 file changed, 176 insertions(+), 24 deletions(-) diff --git a/packages/tracker-core/src/authentication/handler.rs b/packages/tracker-core/src/authentication/handler.rs index 4c392ee56..3643e7ece 100644 --- a/packages/tracker-core/src/authentication/handler.rs +++ b/packages/tracker-core/src/authentication/handler.rs @@ -44,7 +44,8 @@ impl KeysHandler { /// Adds new peer keys to the tracker. /// - /// Keys can be pre-generated or randomly created. They can also be permanent or expire. + /// Keys can be pre-generated or randomly created. They can also be + /// permanent or expire. /// /// # Errors /// @@ -55,8 +56,9 @@ impl KeysHandler { /// - The key could not been persisted due to database issues. pub async fn add_peer_key(&self, add_key_req: AddKeyRequest) -> Result { match add_key_req.opt_key { - // Upload pre-generated key Some(pre_existing_key) => { + // Upload pre-generated key + if let Some(seconds_valid) = add_key_req.opt_seconds_valid { // Expiring key let Some(valid_until) = CurrentClock::now_add(&Duration::from_secs(seconds_valid)) else { @@ -95,20 +97,20 @@ impl KeysHandler { } } } - // Generate a new random key None => match add_key_req.opt_seconds_valid { - // Expiring key + // Generate a new random key Some(seconds_valid) => match self .generate_expiring_peer_key(Some(Duration::from_secs(seconds_valid))) .await { + // Expiring key Ok(auth_key) => Ok(auth_key), Err(err) => Err(PeerKeyError::DatabaseError { source: Located(err).into(), }), }, - // Permanent key None => match self.generate_permanent_peer_key().await { + // Permanent key Ok(auth_key) => Ok(auth_key), Err(err) => Err(PeerKeyError::DatabaseError { source: Located(err).into(), @@ -236,7 +238,7 @@ impl KeysHandler { #[cfg(test)] mod tests { - mod the_keys_handler_when_tracker_is_configured_as_private { + mod the_keys_handler_when_the_tracker_is_configured_as_private { use std::sync::Arc; @@ -267,6 +269,8 @@ mod tests { } fn instantiate_keys_handler_with_configuration(config: &Configuration) -> KeysHandler { + // todo: pass only Core configuration + let database = initialize_database(&config.core); let db_key_repository = Arc::new(DatabaseKeyRepository::new(&database)); let in_memory_key_repository = Arc::new(InMemoryKeyRepository::default()); @@ -274,22 +278,48 @@ mod tests { KeysHandler::new(&db_key_repository, &in_memory_key_repository) } - mod with_expiring_and { + mod handling_expiring_peer_keys { - mod randomly_generated_keys { + use std::time::Duration; + + use torrust_tracker_clock::clock::Time; + + use crate::authentication::handler::tests::the_keys_handler_when_the_tracker_is_configured_as_private::instantiate_keys_handler; + use crate::CurrentClock; + + #[tokio::test] + async fn it_should_generate_the_key() { + let keys_handler = instantiate_keys_handler(); + + let peer_key = keys_handler + .generate_expiring_peer_key(Some(Duration::from_secs(100))) + .await + .unwrap(); + + assert_eq!( + peer_key.valid_until, + Some(CurrentClock::now_add(&Duration::from_secs(100)).unwrap()) + ); + } + + mod randomly_generated { use std::time::Duration; use torrust_tracker_clock::clock::Time; - use crate::authentication::handler::tests::the_keys_handler_when_tracker_is_configured_as_private::instantiate_keys_handler; + use crate::authentication::handler::tests::the_keys_handler_when_the_tracker_is_configured_as_private::instantiate_keys_handler; + use crate::authentication::handler::AddKeyRequest; use crate::CurrentClock; #[tokio::test] - async fn it_should_generate_the_key() { + async fn it_should_add_a_randomly_generated_key() { let keys_handler = instantiate_keys_handler(); let peer_key = keys_handler - .generate_expiring_peer_key(Some(Duration::from_secs(100))) + .add_peer_key(AddKeyRequest { + opt_key: None, + opt_seconds_valid: Some(100), + }) .await .unwrap(); @@ -300,14 +330,20 @@ mod tests { } } - mod pre_generated_keys { + mod pre_generated { + use std::sync::Arc; use std::time::Duration; use torrust_tracker_clock::clock::Time; - - use crate::authentication::handler::tests::the_keys_handler_when_tracker_is_configured_as_private::instantiate_keys_handler; - use crate::authentication::handler::AddKeyRequest; - use crate::authentication::Key; + use torrust_tracker_test_helpers::configuration; + + use crate::authentication::handler::tests::the_keys_handler_when_the_tracker_is_configured_as_private::instantiate_keys_handler; + use crate::authentication::handler::{AddKeyRequest, KeysHandler}; + use crate::authentication::key::repository::in_memory::InMemoryKeyRepository; + use crate::authentication::key::repository::persisted::DatabaseKeyRepository; + use crate::authentication::{Key, PeerKey}; + use crate::databases::setup::initialize_database; + use crate::error::PeerKeyError; use crate::CurrentClock; #[tokio::test] @@ -323,17 +359,59 @@ mod tests { .unwrap(); assert_eq!( - peer_key.valid_until, - Some(CurrentClock::now_add(&Duration::from_secs(100)).unwrap()) + peer_key, + PeerKey { + key: Key::new("YZSl4lMZupRuOpSRC3krIKR5BPB14nrJ").unwrap(), + valid_until: Some(CurrentClock::now_add(&Duration::from_secs(100)).unwrap()), + } ); } + + #[tokio::test] + async fn it_should_fail_adding_a_pre_generated_key_when_the_key_is_invalid() { + let keys_handler = instantiate_keys_handler(); + + let result = keys_handler + .add_peer_key(AddKeyRequest { + opt_key: Some("INVALID KEY".to_string()), + opt_seconds_valid: Some(100), + }) + .await; + + assert!(matches!(result.unwrap_err(), PeerKeyError::InvalidKey { .. })); + } + + #[tokio::test] + async fn it_should_fail_adding_a_pre_generated_key_when_there_is_a_database_error() { + let config = configuration::ephemeral_private(); + let database = initialize_database(&config.core); + let db_key_repository = Arc::new(DatabaseKeyRepository::new(&database)); + let in_memory_key_repository = Arc::new(InMemoryKeyRepository::default()); + + // Force database error. + // todo: extract trait for DatabaseKeyRepository to be able + // to mock it. Test should be faster if we don't have to + // create a new database. + let _unused = database.drop_database_tables(); + + let keys_handler = KeysHandler::new(&db_key_repository, &in_memory_key_repository); + + let result = keys_handler + .add_peer_key(AddKeyRequest { + opt_key: Some(Key::new("YZSl4lMZupRuOpSRC3krIKR5BPB14nrJ").unwrap().to_string()), + opt_seconds_valid: Some(100), + }) + .await; + + assert!(matches!(result.unwrap_err(), PeerKeyError::DatabaseError { .. })); + } } } - mod with_permanent_and { + mod handling_permanent_peer_keys { mod randomly_generated_keys { - use crate::authentication::handler::tests::the_keys_handler_when_tracker_is_configured_as_private::instantiate_keys_handler; + use crate::authentication::handler::tests::the_keys_handler_when_the_tracker_is_configured_as_private::instantiate_keys_handler; #[tokio::test] async fn it_should_generate_the_key() { @@ -345,11 +423,40 @@ mod tests { } } - mod pre_generated_keys { + mod randomly_generated { - use crate::authentication::handler::tests::the_keys_handler_when_tracker_is_configured_as_private::instantiate_keys_handler; + use crate::authentication::handler::tests::the_keys_handler_when_the_tracker_is_configured_as_private::instantiate_keys_handler; use crate::authentication::handler::AddKeyRequest; - use crate::authentication::Key; + + #[tokio::test] + async fn it_should_add_a_randomly_generated_key() { + let keys_handler = instantiate_keys_handler(); + + let peer_key = keys_handler + .add_peer_key(AddKeyRequest { + opt_key: None, + opt_seconds_valid: None, + }) + .await + .unwrap(); + + assert_eq!(peer_key.valid_until, None); + } + } + + mod pre_generated_keys { + + use std::sync::Arc; + + use torrust_tracker_test_helpers::configuration; + + use crate::authentication::handler::tests::the_keys_handler_when_the_tracker_is_configured_as_private::instantiate_keys_handler; + use crate::authentication::handler::{AddKeyRequest, KeysHandler}; + use crate::authentication::key::repository::in_memory::InMemoryKeyRepository; + use crate::authentication::key::repository::persisted::DatabaseKeyRepository; + use crate::authentication::{Key, PeerKey}; + use crate::databases::setup::initialize_database; + use crate::error::PeerKeyError; #[tokio::test] async fn it_should_add_a_pre_generated_key() { @@ -363,7 +470,52 @@ mod tests { .await .unwrap(); - assert_eq!(peer_key.valid_until, None); + assert_eq!( + peer_key, + PeerKey { + key: Key::new("YZSl4lMZupRuOpSRC3krIKR5BPB14nrJ").unwrap(), + valid_until: None, + } + ); + } + + #[tokio::test] + async fn it_should_fail_adding_a_pre_generated_key_when_the_key_is_invalid() { + let keys_handler = instantiate_keys_handler(); + + let result = keys_handler + .add_peer_key(AddKeyRequest { + opt_key: Some("INVALID KEY".to_string()), + opt_seconds_valid: None, + }) + .await; + + assert!(matches!(result.unwrap_err(), PeerKeyError::InvalidKey { .. })); + } + + #[tokio::test] + async fn it_should_fail_adding_a_pre_generated_key_when_there_is_a_database_error() { + let config = configuration::ephemeral_private(); + let database = initialize_database(&config.core); + let db_key_repository = Arc::new(DatabaseKeyRepository::new(&database)); + let in_memory_key_repository = Arc::new(InMemoryKeyRepository::default()); + + // Force database error. + // todo: extract trait for DatabaseKeyRepository to be able + // to mock it. Test should be faster if we don't have to + // create a new database. + let _unused = database.drop_database_tables(); + + let keys_handler = KeysHandler::new(&db_key_repository, &in_memory_key_repository); + + let result = keys_handler + .add_peer_key(AddKeyRequest { + opt_key: Some("YZSl4lMZupRuOpSRC3krIKR5BPB14nrJ".to_string()), + opt_seconds_valid: None, + }) + .await; + + assert!(matches!(result.unwrap_err(), PeerKeyError::DatabaseError { .. })); } } } From bd4cef66842aaab3f23c811362ad56ceb311d4fe Mon Sep 17 00:00:00 2001 From: Jose Celano Date: Tue, 4 Feb 2025 08:56:31 +0000 Subject: [PATCH 211/802] refactor: [#1231] tests to use database mock in KeysHandler tests that require the `Database` trait. --- packages/tracker-core/Cargo.toml | 1 + .../src/authentication/handler.rs | 294 ++++++++++++------ packages/tracker-core/src/databases/mod.rs | 2 + 3 files changed, 207 insertions(+), 90 deletions(-) diff --git a/packages/tracker-core/Cargo.toml b/packages/tracker-core/Cargo.toml index 7b5b1f2c2..aeea30a3e 100644 --- a/packages/tracker-core/Cargo.toml +++ b/packages/tracker-core/Cargo.toml @@ -20,6 +20,7 @@ bittorrent-http-protocol = { version = "3.0.0-develop", path = "../http-protocol bittorrent-primitives = "0.1.0" chrono = { version = "0", default-features = false, features = ["clock"] } derive_more = { version = "1", features = ["as_ref", "constructor", "from"] } +mockall = "0" r2d2 = "0" r2d2_mysql = "25" r2d2_sqlite = { version = "0", features = ["bundled"] } diff --git a/packages/tracker-core/src/authentication/handler.rs b/packages/tracker-core/src/authentication/handler.rs index 3643e7ece..f733f5cb6 100644 --- a/packages/tracker-core/src/authentication/handler.rs +++ b/packages/tracker-core/src/authentication/handler.rs @@ -55,68 +55,73 @@ impl KeysHandler { /// - The provided pre-generated key is invalid. /// - The key could not been persisted due to database issues. pub async fn add_peer_key(&self, add_key_req: AddKeyRequest) -> Result { - match add_key_req.opt_key { - Some(pre_existing_key) => { - // Upload pre-generated key - - if let Some(seconds_valid) = add_key_req.opt_seconds_valid { - // Expiring key - let Some(valid_until) = CurrentClock::now_add(&Duration::from_secs(seconds_valid)) else { - return Err(PeerKeyError::DurationOverflow { seconds_valid }); - }; + if let Some(pre_existing_key) = add_key_req.opt_key { + // Pre-generated key + + if let Some(seconds_valid) = add_key_req.opt_seconds_valid { + // Expiring key + + let Some(valid_until) = CurrentClock::now_add(&Duration::from_secs(seconds_valid)) else { + return Err(PeerKeyError::DurationOverflow { seconds_valid }); + }; - let key = pre_existing_key.parse::(); - - match key { - Ok(key) => match self.add_expiring_peer_key(key, Some(valid_until)).await { - Ok(auth_key) => Ok(auth_key), - Err(err) => Err(PeerKeyError::DatabaseError { - source: Located(err).into(), - }), - }, - Err(err) => Err(PeerKeyError::InvalidKey { - key: pre_existing_key, + let key = pre_existing_key.parse::(); + + match key { + Ok(key) => match self.add_expiring_peer_key(key, Some(valid_until)).await { + Ok(auth_key) => Ok(auth_key), + Err(err) => Err(PeerKeyError::DatabaseError { source: Located(err).into(), }), - } - } else { - // Permanent key - let key = pre_existing_key.parse::(); - - match key { - Ok(key) => match self.add_permanent_peer_key(key).await { - Ok(auth_key) => Ok(auth_key), - Err(err) => Err(PeerKeyError::DatabaseError { - source: Located(err).into(), - }), - }, - Err(err) => Err(PeerKeyError::InvalidKey { - key: pre_existing_key, + }, + Err(err) => Err(PeerKeyError::InvalidKey { + key: pre_existing_key, + source: Located(err).into(), + }), + } + } else { + // Permanent key + + let key = pre_existing_key.parse::(); + + match key { + Ok(key) => match self.add_permanent_peer_key(key).await { + Ok(auth_key) => Ok(auth_key), + Err(err) => Err(PeerKeyError::DatabaseError { source: Located(err).into(), }), - } + }, + Err(err) => Err(PeerKeyError::InvalidKey { + key: pre_existing_key, + source: Located(err).into(), + }), } } - None => match add_key_req.opt_seconds_valid { - // Generate a new random key - Some(seconds_valid) => match self + } else { + // New randomly generate key + + if let Some(seconds_valid) = add_key_req.opt_seconds_valid { + // Expiring key + + match self .generate_expiring_peer_key(Some(Duration::from_secs(seconds_valid))) .await { - // Expiring key Ok(auth_key) => Ok(auth_key), Err(err) => Err(PeerKeyError::DatabaseError { source: Located(err).into(), }), - }, - None => match self.generate_permanent_peer_key().await { - // Permanent key + } + } else { + // Permanent key + + match self.generate_permanent_peer_key().await { Ok(auth_key) => Ok(auth_key), Err(err) => Err(PeerKeyError::DatabaseError { source: Located(err).into(), }), - }, - }, + } + } } } @@ -250,6 +255,7 @@ mod tests { use crate::authentication::key::repository::in_memory::InMemoryKeyRepository; use crate::authentication::key::repository::persisted::DatabaseKeyRepository; use crate::databases::setup::initialize_database; + use crate::databases::Database; fn instantiate_keys_handler() -> KeysHandler { let config = configuration::ephemeral_private(); @@ -268,6 +274,13 @@ mod tests { instantiate_keys_handler_with_configuration(&config) } + fn instantiate_keys_handler_with_database(database: &Arc>) -> KeysHandler { + let db_key_repository = Arc::new(DatabaseKeyRepository::new(database)); + let in_memory_key_repository = Arc::new(InMemoryKeyRepository::default()); + + KeysHandler::new(&db_key_repository, &in_memory_key_repository) + } + fn instantiate_keys_handler_with_configuration(config: &Configuration) -> KeysHandler { // todo: pass only Core configuration @@ -303,12 +316,22 @@ mod tests { } mod randomly_generated { + use std::panic::Location; + use std::sync::Arc; use std::time::Duration; - use torrust_tracker_clock::clock::Time; + use mockall::predicate::function; + use torrust_tracker_clock::clock::stopped::Stopped; + use torrust_tracker_clock::clock::{self, Time}; - use crate::authentication::handler::tests::the_keys_handler_when_the_tracker_is_configured_as_private::instantiate_keys_handler; + use crate::authentication::handler::tests::the_keys_handler_when_the_tracker_is_configured_as_private::{ + instantiate_keys_handler, instantiate_keys_handler_with_database, + }; use crate::authentication::handler::AddKeyRequest; + use crate::authentication::PeerKey; + use crate::databases::driver::Driver; + use crate::databases::{self, Database, MockDatabase}; + use crate::error::PeerKeyError; use crate::CurrentClock; #[tokio::test] @@ -328,21 +351,58 @@ mod tests { Some(CurrentClock::now_add(&Duration::from_secs(100)).unwrap()) ); } + + #[tokio::test] + async fn it_should_fail_adding_a_randomly_generated_key_when_there_is_a_database_error() { + clock::Stopped::local_set(&Duration::from_secs(0)); + + // The key should be valid the next 60 seconds. + let expected_valid_until = clock::Stopped::now_add(&Duration::from_secs(60)).unwrap(); + + let mut database_mock = MockDatabase::default(); + database_mock + .expect_add_key_to_keys() + .with(function(move |peer_key: &PeerKey| { + peer_key.valid_until == Some(expected_valid_until) + })) + .times(1) + .returning(|_peer_key| { + Err(databases::error::Error::InsertFailed { + location: Location::caller(), + driver: Driver::Sqlite3, + }) + }); + let database_mock: Arc> = Arc::new(Box::new(database_mock)); + + let keys_handler = instantiate_keys_handler_with_database(&database_mock); + + let result = keys_handler + .add_peer_key(AddKeyRequest { + opt_key: None, + opt_seconds_valid: Some(60), // The key is valid for 60 seconds. + }) + .await; + + assert!(matches!(result.unwrap_err(), PeerKeyError::DatabaseError { .. })); + } } mod pre_generated { + use std::panic::Location; use std::sync::Arc; use std::time::Duration; - use torrust_tracker_clock::clock::Time; - use torrust_tracker_test_helpers::configuration; + use mockall::predicate; + use torrust_tracker_clock::clock::stopped::Stopped; + use torrust_tracker_clock::clock::{self, Time}; - use crate::authentication::handler::tests::the_keys_handler_when_the_tracker_is_configured_as_private::instantiate_keys_handler; - use crate::authentication::handler::{AddKeyRequest, KeysHandler}; - use crate::authentication::key::repository::in_memory::InMemoryKeyRepository; - use crate::authentication::key::repository::persisted::DatabaseKeyRepository; + use crate::authentication::handler::tests::the_keys_handler_when_the_tracker_is_configured_as_private::{ + instantiate_keys_handler, instantiate_keys_handler_with_database, + }; + use crate::authentication::handler::AddKeyRequest; use crate::authentication::{Key, PeerKey}; - use crate::databases::setup::initialize_database; + use crate::databases::driver::Driver; + use crate::databases::{self, Database, MockDatabase}; use crate::error::PeerKeyError; use crate::CurrentClock; @@ -383,23 +443,34 @@ mod tests { #[tokio::test] async fn it_should_fail_adding_a_pre_generated_key_when_there_is_a_database_error() { - let config = configuration::ephemeral_private(); - let database = initialize_database(&config.core); - let db_key_repository = Arc::new(DatabaseKeyRepository::new(&database)); - let in_memory_key_repository = Arc::new(InMemoryKeyRepository::default()); + clock::Stopped::local_set(&Duration::from_secs(0)); - // Force database error. - // todo: extract trait for DatabaseKeyRepository to be able - // to mock it. Test should be faster if we don't have to - // create a new database. - let _unused = database.drop_database_tables(); + // The key should be valid the next 60 seconds. + let expected_valid_until = clock::Stopped::now_add(&Duration::from_secs(60)).unwrap(); + let expected_peer_key = PeerKey { + key: Key::new("YZSl4lMZupRuOpSRC3krIKR5BPB14nrJ").unwrap(), + valid_until: Some(expected_valid_until), + }; - let keys_handler = KeysHandler::new(&db_key_repository, &in_memory_key_repository); + let mut database_mock = MockDatabase::default(); + database_mock + .expect_add_key_to_keys() + .with(predicate::eq(expected_peer_key)) + .times(1) + .returning(|_peer_key| { + Err(databases::error::Error::InsertFailed { + location: Location::caller(), + driver: Driver::Sqlite3, + }) + }); + let database_mock: Arc> = Arc::new(Box::new(database_mock)); + + let keys_handler = instantiate_keys_handler_with_database(&database_mock); let result = keys_handler .add_peer_key(AddKeyRequest { opt_key: Some(Key::new("YZSl4lMZupRuOpSRC3krIKR5BPB14nrJ").unwrap().to_string()), - opt_seconds_valid: Some(100), + opt_seconds_valid: Some(60), // The key is valid for 60 seconds. }) .await; @@ -410,8 +481,21 @@ mod tests { mod handling_permanent_peer_keys { - mod randomly_generated_keys { - use crate::authentication::handler::tests::the_keys_handler_when_the_tracker_is_configured_as_private::instantiate_keys_handler; + mod randomly_generated { + + use std::panic::Location; + use std::sync::Arc; + + use mockall::predicate::function; + + use crate::authentication::handler::tests::the_keys_handler_when_the_tracker_is_configured_as_private::{ + instantiate_keys_handler, instantiate_keys_handler_with_database, + }; + use crate::authentication::handler::AddKeyRequest; + use crate::authentication::PeerKey; + use crate::databases::driver::Driver; + use crate::databases::{self, Database, MockDatabase}; + use crate::error::PeerKeyError; #[tokio::test] async fn it_should_generate_the_key() { @@ -421,12 +505,6 @@ mod tests { assert_eq!(peer_key.valid_until, None); } - } - - mod randomly_generated { - - use crate::authentication::handler::tests::the_keys_handler_when_the_tracker_is_configured_as_private::instantiate_keys_handler; - use crate::authentication::handler::AddKeyRequest; #[tokio::test] async fn it_should_add_a_randomly_generated_key() { @@ -442,20 +520,49 @@ mod tests { assert_eq!(peer_key.valid_until, None); } + + #[tokio::test] + async fn it_should_fail_adding_a_randomly_generated_key_when_there_is_a_database_error() { + let mut database_mock = MockDatabase::default(); + database_mock + .expect_add_key_to_keys() + .with(function(move |peer_key: &PeerKey| peer_key.valid_until.is_none())) + .times(1) + .returning(|_peer_key| { + Err(databases::error::Error::InsertFailed { + location: Location::caller(), + driver: Driver::Sqlite3, + }) + }); + let database_mock: Arc> = Arc::new(Box::new(database_mock)); + + let keys_handler = instantiate_keys_handler_with_database(&database_mock); + + let result = keys_handler + .add_peer_key(AddKeyRequest { + opt_key: None, + opt_seconds_valid: None, + }) + .await; + + assert!(matches!(result.unwrap_err(), PeerKeyError::DatabaseError { .. })); + } } mod pre_generated_keys { + use std::panic::Location; use std::sync::Arc; - use torrust_tracker_test_helpers::configuration; + use mockall::predicate; - use crate::authentication::handler::tests::the_keys_handler_when_the_tracker_is_configured_as_private::instantiate_keys_handler; - use crate::authentication::handler::{AddKeyRequest, KeysHandler}; - use crate::authentication::key::repository::in_memory::InMemoryKeyRepository; - use crate::authentication::key::repository::persisted::DatabaseKeyRepository; + use crate::authentication::handler::tests::the_keys_handler_when_the_tracker_is_configured_as_private::{ + instantiate_keys_handler, instantiate_keys_handler_with_database, + }; + use crate::authentication::handler::AddKeyRequest; use crate::authentication::{Key, PeerKey}; - use crate::databases::setup::initialize_database; + use crate::databases::driver::Driver; + use crate::databases::{self, Database, MockDatabase}; use crate::error::PeerKeyError; #[tokio::test] @@ -495,22 +602,29 @@ mod tests { #[tokio::test] async fn it_should_fail_adding_a_pre_generated_key_when_there_is_a_database_error() { - let config = configuration::ephemeral_private(); - let database = initialize_database(&config.core); - let db_key_repository = Arc::new(DatabaseKeyRepository::new(&database)); - let in_memory_key_repository = Arc::new(InMemoryKeyRepository::default()); - - // Force database error. - // todo: extract trait for DatabaseKeyRepository to be able - // to mock it. Test should be faster if we don't have to - // create a new database. - let _unused = database.drop_database_tables(); + let expected_peer_key = PeerKey { + key: Key::new("YZSl4lMZupRuOpSRC3krIKR5BPB14nrJ").unwrap(), + valid_until: None, + }; - let keys_handler = KeysHandler::new(&db_key_repository, &in_memory_key_repository); + let mut database_mock = MockDatabase::default(); + database_mock + .expect_add_key_to_keys() + .with(predicate::eq(expected_peer_key)) + .times(1) + .returning(|_peer_key| { + Err(databases::error::Error::InsertFailed { + location: Location::caller(), + driver: Driver::Sqlite3, + }) + }); + let database_mock: Arc> = Arc::new(Box::new(database_mock)); + + let keys_handler = instantiate_keys_handler_with_database(&database_mock); let result = keys_handler .add_peer_key(AddKeyRequest { - opt_key: Some("YZSl4lMZupRuOpSRC3krIKR5BPB14nrJ".to_string()), + opt_key: Some(Key::new("YZSl4lMZupRuOpSRC3krIKR5BPB14nrJ").unwrap().to_string()), opt_seconds_valid: None, }) .await; diff --git a/packages/tracker-core/src/databases/mod.rs b/packages/tracker-core/src/databases/mod.rs index 9b9ac8e9e..f0930d05d 100644 --- a/packages/tracker-core/src/databases/mod.rs +++ b/packages/tracker-core/src/databases/mod.rs @@ -52,6 +52,7 @@ pub mod sqlite; use std::marker::PhantomData; use bittorrent_primitives::info_hash::InfoHash; +use mockall::automock; use torrust_tracker_primitives::PersistentTorrents; use self::error::Error; @@ -79,6 +80,7 @@ where } /// The persistence trait. It contains all the methods to interact with the database. +#[automock] pub trait Database: Sync + Send { /// It instantiates a new database driver. /// From c3117cfb961610568ab2f558a3d55f63d4dce223 Mon Sep 17 00:00:00 2001 From: Jose Celano Date: Tue, 4 Feb 2025 12:17:13 +0000 Subject: [PATCH 212/802] test: [#1231] add more tests for KeysHandler --- .../tracker-core/src/authentication/handler.rs | 14 ++++++++++++++ 1 file changed, 14 insertions(+) diff --git a/packages/tracker-core/src/authentication/handler.rs b/packages/tracker-core/src/authentication/handler.rs index f733f5cb6..3b708a516 100644 --- a/packages/tracker-core/src/authentication/handler.rs +++ b/packages/tracker-core/src/authentication/handler.rs @@ -427,6 +427,20 @@ mod tests { ); } + #[tokio::test] + async fn it_should_fail_adding_a_pre_generated_key_when_the_key_duration_exceeds_the_maximum_duration() { + let keys_handler = instantiate_keys_handler(); + + let result = keys_handler + .add_peer_key(AddKeyRequest { + opt_key: Some(Key::new("YZSl4lMZupRuOpSRC3krIKR5BPB14nrJ").unwrap().to_string()), + opt_seconds_valid: Some(u64::MAX), + }) + .await; + + assert!(matches!(result.unwrap_err(), PeerKeyError::DurationOverflow { .. })); + } + #[tokio::test] async fn it_should_fail_adding_a_pre_generated_key_when_the_key_is_invalid() { let keys_handler = instantiate_keys_handler(); From 63e773afca456ffba74683ec890f364974233d17 Mon Sep 17 00:00:00 2001 From: Jose Celano Date: Tue, 4 Feb 2025 12:23:54 +0000 Subject: [PATCH 213/802] refactor: [#1231] remove dead code --- packages/tracker-core/src/authentication/handler.rs | 12 ------------ 1 file changed, 12 deletions(-) diff --git a/packages/tracker-core/src/authentication/handler.rs b/packages/tracker-core/src/authentication/handler.rs index 3b708a516..f758830ac 100644 --- a/packages/tracker-core/src/authentication/handler.rs +++ b/packages/tracker-core/src/authentication/handler.rs @@ -247,7 +247,6 @@ mod tests { use std::sync::Arc; - use torrust_tracker_configuration::v2_0_0::core::PrivateMode; use torrust_tracker_configuration::Configuration; use torrust_tracker_test_helpers::configuration; @@ -263,17 +262,6 @@ mod tests { instantiate_keys_handler_with_configuration(&config) } - #[allow(dead_code)] - fn instantiate_keys_handler_with_checking_keys_expiration_disabled() -> KeysHandler { - let mut config = configuration::ephemeral_private(); - - config.core.private_mode = Some(PrivateMode { - check_keys_expiration: false, - }); - - instantiate_keys_handler_with_configuration(&config) - } - fn instantiate_keys_handler_with_database(database: &Arc>) -> KeysHandler { let db_key_repository = Arc::new(DatabaseKeyRepository::new(database)); let in_memory_key_repository = Arc::new(InMemoryKeyRepository::default()); From 7d8b3948c8a977935b01affa7f4d061dec853d93 Mon Sep 17 00:00:00 2001 From: Jose Celano Date: Tue, 4 Feb 2025 12:44:07 +0000 Subject: [PATCH 214/802] test: [#1231] add tests for AuthenticationService --- .../src/authentication/service.rs | 202 ++++++++++++++++-- 1 file changed, 183 insertions(+), 19 deletions(-) diff --git a/packages/tracker-core/src/authentication/service.rs b/packages/tracker-core/src/authentication/service.rs index 3e32bfbcb..98b8a3987 100644 --- a/packages/tracker-core/src/authentication/service.rs +++ b/packages/tracker-core/src/authentication/service.rs @@ -31,7 +31,7 @@ impl AuthenticationService { /// /// Will return an error if the the authentication key cannot be verified. pub async fn authenticate(&self, key: &Key) -> Result<(), Error> { - if self.is_private() { + if self.tracker_is_private() { self.verify_auth_key(key).await } else { Ok(()) @@ -40,7 +40,7 @@ impl AuthenticationService { /// Returns `true` is the tracker is in private mode. #[must_use] - pub fn is_private(&self) -> bool { + fn tracker_is_private(&self) -> bool { self.config.private } @@ -72,34 +72,198 @@ impl AuthenticationService { #[cfg(test)] mod tests { - mod the_tracker_configured_as_private { + mod the_authentication_service { - use std::str::FromStr; - use std::sync::Arc; + mod when_the_tracker_is_public { - use torrust_tracker_test_helpers::configuration; + use std::str::FromStr; + use std::sync::Arc; - use crate::authentication; - use crate::authentication::key::repository::in_memory::InMemoryKeyRepository; - use crate::authentication::service::AuthenticationService; + use torrust_tracker_configuration::Core; - fn instantiate_authentication() -> AuthenticationService { - let config = configuration::ephemeral_private(); + use crate::authentication::key::repository::in_memory::InMemoryKeyRepository; + use crate::authentication::service::AuthenticationService; + use crate::authentication::{self}; - let in_memory_key_repository = Arc::new(InMemoryKeyRepository::default()); + fn instantiate_authentication_for_public_tracker() -> AuthenticationService { + let config = Core { + private: false, + ..Default::default() + }; - AuthenticationService::new(&config.core, &in_memory_key_repository.clone()) + let in_memory_key_repository = Arc::new(InMemoryKeyRepository::default()); + + AuthenticationService::new(&config, &in_memory_key_repository.clone()) + } + + #[tokio::test] + async fn it_should_always_authenticate_when_the_tracker_is_public() { + let authentication = instantiate_authentication_for_public_tracker(); + + let unregistered_key = authentication::Key::from_str("YZSl4lMZupRuOpSRC3krIKR5BPB14nrJ").unwrap(); + + let result = authentication.authenticate(&unregistered_key).await; + + assert!(result.is_ok()); + } } - #[tokio::test] - async fn it_should_not_authenticate_an_unregistered_key() { - let authentication = instantiate_authentication(); + mod when_the_tracker_is_private { + + use std::str::FromStr; + use std::sync::Arc; + use std::time::Duration; + + use torrust_tracker_configuration::v2_0_0::core::PrivateMode; + use torrust_tracker_configuration::Core; + + use crate::authentication::key::repository::in_memory::InMemoryKeyRepository; + use crate::authentication::service::AuthenticationService; + use crate::authentication::{self, PeerKey}; + + fn instantiate_authentication_for_private_tracker() -> AuthenticationService { + let config = Core { + private: true, + ..Default::default() + }; + + let in_memory_key_repository = Arc::new(InMemoryKeyRepository::default()); + + AuthenticationService::new(&config, &in_memory_key_repository.clone()) + } + + #[tokio::test] + async fn it_should_authenticate_a_registered_key() { + let config = Core { + private: true, + ..Default::default() + }; + + let in_memory_key_repository = Arc::new(InMemoryKeyRepository::default()); + + let key = authentication::Key::from_str("YZSl4lMZupRuOpSRC3krIKR5BPB14nrJ").unwrap(); + + in_memory_key_repository + .insert(&PeerKey { + key: key.clone(), + valid_until: None, + }) + .await; + + let authentication = AuthenticationService::new(&config, &in_memory_key_repository.clone()); + + let result = authentication.authenticate(&key).await; + + assert!(result.is_ok()); + } + + #[tokio::test] + async fn it_should_not_authenticate_an_unregistered_key() { + let authentication = instantiate_authentication_for_private_tracker(); + + let unregistered_key = authentication::Key::from_str("YZSl4lMZupRuOpSRC3krIKR5BPB14nrJ").unwrap(); + + let result = authentication.authenticate(&unregistered_key).await; + + assert!(result.is_err()); + } - let unregistered_key = authentication::Key::from_str("YZSl4lMZupRuOpSRC3krIKR5BPB14nrJ").unwrap(); + #[tokio::test] + async fn it_should_not_authenticate_a_registered_but_expired_key_by_default() { + let config = Core { + private: true, + ..Default::default() + }; - let result = authentication.authenticate(&unregistered_key).await; + let in_memory_key_repository = Arc::new(InMemoryKeyRepository::default()); - assert!(result.is_err()); + let key = authentication::Key::from_str("YZSl4lMZupRuOpSRC3krIKR5BPB14nrJ").unwrap(); + + // Register the key with an immediate expiration date. + in_memory_key_repository + .insert(&PeerKey { + key: key.clone(), + valid_until: Some(Duration::from_secs(0)), + }) + .await; + + let authentication = AuthenticationService::new(&config, &in_memory_key_repository.clone()); + + let result = authentication.authenticate(&key).await; + + assert!(result.is_err()); + } + + #[tokio::test] + async fn it_should_not_authenticate_a_registered_but_expired_key_when_the_tracker_is_explicitly_configured_to_check_keys_expiration() { + let config = Core { + private: true, + private_mode: Some(PrivateMode { + check_keys_expiration: true, + }), + ..Default::default() + }; + + let in_memory_key_repository = Arc::new(InMemoryKeyRepository::default()); + + let key = authentication::Key::from_str("YZSl4lMZupRuOpSRC3krIKR5BPB14nrJ").unwrap(); + + // Register the key with an immediate expiration date. + in_memory_key_repository + .insert(&PeerKey { + key: key.clone(), + valid_until: Some(Duration::from_secs(0)), + }) + .await; + + let authentication = AuthenticationService::new(&config, &in_memory_key_repository.clone()); + + let result = authentication.authenticate(&key).await; + + assert!(result.is_err()); + } + + mod but_the_key_expiration_check_is_disabled_by_configuration { + use std::str::FromStr; + use std::sync::Arc; + use std::time::Duration; + + use torrust_tracker_configuration::v2_0_0::core::PrivateMode; + use torrust_tracker_configuration::Core; + + use crate::authentication::key::repository::in_memory::InMemoryKeyRepository; + use crate::authentication::service::AuthenticationService; + use crate::authentication::{self, PeerKey}; + + #[tokio::test] + async fn it_should_authenticate_an_expired_registered_key() { + let config = Core { + private: true, + private_mode: Some(PrivateMode { + check_keys_expiration: false, + }), + ..Default::default() + }; + + let in_memory_key_repository = Arc::new(InMemoryKeyRepository::default()); + + let key = authentication::Key::from_str("YZSl4lMZupRuOpSRC3krIKR5BPB14nrJ").unwrap(); + + // Register the key with an immediate expiration date. + in_memory_key_repository + .insert(&PeerKey { + key: key.clone(), + valid_until: Some(Duration::from_secs(0)), + }) + .await; + + let authentication = AuthenticationService::new(&config, &in_memory_key_repository.clone()); + + let result = authentication.authenticate(&key).await; + + assert!(result.is_ok()); + } + } } } } From 3e02b48c676c2355547bf8292fbd5a4ac6c8a349 Mon Sep 17 00:00:00 2001 From: Jose Celano Date: Tue, 4 Feb 2025 13:18:05 +0000 Subject: [PATCH 215/802] test: [#1231] add more tests for authentication::service mod --- .../src/authentication/key/mod.rs | 21 ++++++++++++++----- 1 file changed, 16 insertions(+), 5 deletions(-) diff --git a/packages/tracker-core/src/authentication/key/mod.rs b/packages/tracker-core/src/authentication/key/mod.rs index bdb72b1cf..33b3b6099 100644 --- a/packages/tracker-core/src/authentication/key/mod.rs +++ b/packages/tracker-core/src/authentication/key/mod.rs @@ -104,10 +104,8 @@ pub fn generate_key(lifetime: Option) -> PeerKey { /// /// # Errors /// -/// Will return: -/// -/// - `Error::KeyExpired` if `auth_key.valid_until` is past the `current_time`. -/// - `Error::KeyInvalid` if `auth_key.valid_until` is past the `None`. +/// Will return a verification error [`crate::authentication::key::Error`] if +/// it cannot verify the key. pub fn verify_key_expiration(auth_key: &PeerKey) -> Result<(), Error> { let current_time: DurationSinceUnixEpoch = CurrentClock::now(); @@ -126,7 +124,7 @@ pub fn verify_key_expiration(auth_key: &PeerKey) -> Result<(), Error> { } /// Verification error. Error returned when an [`PeerKey`] cannot be -/// verified with the (`crate::authentication::verify_key`) function. +/// verified with the [`crate::authentication::key::verify_key_expiration`] function. #[derive(Debug, Error)] #[allow(dead_code)] pub enum Error { @@ -244,4 +242,17 @@ mod tests { assert!(authentication::key::verify_key_expiration(&expiring_key).is_ok()); } } + + mod the_key_verification_error { + use crate::authentication::key; + + #[test] + fn could_be_a_database_error() { + let err = r2d2_sqlite::rusqlite::Error::InvalidQuery; + + let err: key::Error = err.into(); + + assert!(matches!(err, key::Error::KeyVerificationError { .. })); + } + } } From 3d89c7f8fcfd55f44692ec3163fab032426a72b7 Mon Sep 17 00:00:00 2001 From: Jose Celano Date: Tue, 4 Feb 2025 13:20:04 +0000 Subject: [PATCH 216/802] fix: [#1231] lint errors --- packages/tracker-core/src/authentication/service.rs | 3 ++- 1 file changed, 2 insertions(+), 1 deletion(-) diff --git a/packages/tracker-core/src/authentication/service.rs b/packages/tracker-core/src/authentication/service.rs index 98b8a3987..5ca0a09ec 100644 --- a/packages/tracker-core/src/authentication/service.rs +++ b/packages/tracker-core/src/authentication/service.rs @@ -195,7 +195,8 @@ mod tests { } #[tokio::test] - async fn it_should_not_authenticate_a_registered_but_expired_key_when_the_tracker_is_explicitly_configured_to_check_keys_expiration() { + async fn it_should_not_authenticate_a_registered_but_expired_key_when_the_tracker_is_explicitly_configured_to_check_keys_expiration( + ) { let config = Core { private: true, private_mode: Some(PrivateMode { From b4a4250256c6a5511301597cae42547f070c62d3 Mon Sep 17 00:00:00 2001 From: Jose Celano Date: Tue, 4 Feb 2025 16:45:13 +0000 Subject: [PATCH 217/802] test: [#1235] add tests for DatabaseWhitelist --- .../src/whitelist/repository/persisted.rs | 91 +++++++++++++++++++ 1 file changed, 91 insertions(+) diff --git a/packages/tracker-core/src/whitelist/repository/persisted.rs b/packages/tracker-core/src/whitelist/repository/persisted.rs index c3c4a2601..a54274f16 100644 --- a/packages/tracker-core/src/whitelist/repository/persisted.rs +++ b/packages/tracker-core/src/whitelist/repository/persisted.rs @@ -60,3 +60,94 @@ impl DatabaseWhitelist { self.database.load_whitelist() } } + +#[cfg(test)] +mod tests { + mod the_persisted_whitelist_repository { + + use torrust_tracker_configuration::Core; + use torrust_tracker_test_helpers::configuration::ephemeral_sqlite_database; + + use crate::core_tests::sample_info_hash; + use crate::databases::setup::initialize_database; + use crate::whitelist::repository::persisted::DatabaseWhitelist; + + fn initialize_database_whitelist() -> DatabaseWhitelist { + let configuration = ephemeral_configuration_for_listed_tracker(); + let database = initialize_database(&configuration); + DatabaseWhitelist::new(database) + } + + fn ephemeral_configuration_for_listed_tracker() -> Core { + let mut config = Core { + listed: true, + ..Default::default() + }; + + let temp_file = ephemeral_sqlite_database(); + temp_file.to_str().unwrap().clone_into(&mut config.database.path); + + config + } + + #[test] + fn should_add_a_new_infohash_to_the_list() { + let whitelist = initialize_database_whitelist(); + + let infohash = sample_info_hash(); + + let _result = whitelist.add(&infohash); + + assert_eq!(whitelist.load_from_database().unwrap(), vec!(infohash)); + } + + #[test] + fn should_remove_a_infohash_from_the_list() { + let whitelist = initialize_database_whitelist(); + + let infohash = sample_info_hash(); + + let _result = whitelist.add(&infohash); + + let _result = whitelist.remove(&infohash); + + assert_eq!(whitelist.load_from_database().unwrap(), vec!()); + } + + #[test] + fn should_load_all_infohashes_from_the_database() { + let whitelist = initialize_database_whitelist(); + + let infohash = sample_info_hash(); + + let _result = whitelist.add(&infohash); + + let result = whitelist.load_from_database().unwrap(); + + assert_eq!(result, vec!(infohash)); + } + + #[test] + fn should_not_add_the_same_infohash_to_the_list_twice() { + let whitelist = initialize_database_whitelist(); + + let infohash = sample_info_hash(); + + let _result = whitelist.add(&infohash); + let _result = whitelist.add(&infohash); + + assert_eq!(whitelist.load_from_database().unwrap(), vec!(infohash)); + } + + #[test] + fn should_not_fail_removing_an_infohash_that_is_not_in_the_list() { + let whitelist = initialize_database_whitelist(); + + let infohash = sample_info_hash(); + + let result = whitelist.remove(&infohash); + + assert!(result.is_ok()); + } + } +} From 933b6b0ef6f4c9afc48c31b8882b50bd9539e987 Mon Sep 17 00:00:00 2001 From: Jose Celano Date: Tue, 4 Feb 2025 17:30:47 +0000 Subject: [PATCH 218/802] test: [#1235] add tests for WhitelistAuthorization --- .../src/whitelist/authorization.rs | 94 +++++++++++++++++-- 1 file changed, 84 insertions(+), 10 deletions(-) diff --git a/packages/tracker-core/src/whitelist/authorization.rs b/packages/tracker-core/src/whitelist/authorization.rs index 285f6613e..cb5f4acbf 100644 --- a/packages/tracker-core/src/whitelist/authorization.rs +++ b/packages/tracker-core/src/whitelist/authorization.rs @@ -61,33 +61,107 @@ impl WhitelistAuthorization { #[cfg(test)] mod tests { - mod configured_as_whitelisted { + mod the_whitelist_authorization_for_announce_and_scrape_actions { + use std::sync::Arc; + + use torrust_tracker_configuration::Core; + + use crate::whitelist::authorization::WhitelistAuthorization; + use crate::whitelist::repository::in_memory::InMemoryWhitelist; + + fn initialize_whitelist_authorization_with(config: &Core) -> Arc { + let (whitelist_authorization, _in_memory_whitelist) = + initialize_whitelist_authorization_and_dependencies_with(config); + whitelist_authorization + } + + fn initialize_whitelist_authorization_and_dependencies_with( + config: &Core, + ) -> (Arc, Arc) { + let in_memory_whitelist = Arc::new(InMemoryWhitelist::default()); + let whitelist_authorization = Arc::new(WhitelistAuthorization::new(config, &in_memory_whitelist.clone())); + + (whitelist_authorization, in_memory_whitelist) + } + + mod when_the_tacker_is_configured_as_listed { + + use torrust_tracker_configuration::Core; - mod handling_authorization { use crate::core_tests::sample_info_hash; - use crate::whitelist::whitelist_tests::initialize_whitelist_services_for_listed_tracker; + use crate::error::Error; + use crate::whitelist::authorization::tests::the_whitelist_authorization_for_announce_and_scrape_actions::{ + initialize_whitelist_authorization_and_dependencies_with, initialize_whitelist_authorization_with, + }; + + fn configuration_for_listed_tracker() -> Core { + Core { + listed: true, + ..Default::default() + } + } #[tokio::test] - async fn it_should_authorize_the_announce_and_scrape_actions_on_whitelisted_torrents() { - let (whitelist_authorization, whitelist_manager) = initialize_whitelist_services_for_listed_tracker(); + async fn should_authorize_a_whitelisted_infohash() { + let (whitelist_authorization, in_memory_whitelist) = + initialize_whitelist_authorization_and_dependencies_with(&configuration_for_listed_tracker()); let info_hash = sample_info_hash(); - let result = whitelist_manager.add_torrent_to_whitelist(&info_hash).await; - assert!(result.is_ok()); + let _unused = in_memory_whitelist.add(&info_hash).await; let result = whitelist_authorization.authorize(&info_hash).await; + assert!(result.is_ok()); } #[tokio::test] - async fn it_should_not_authorize_the_announce_and_scrape_actions_on_not_whitelisted_torrents() { - let (whitelist_authorization, _whitelist_manager) = initialize_whitelist_services_for_listed_tracker(); + async fn should_not_authorize_a_non_whitelisted_infohash() { + let whitelist_authorization = initialize_whitelist_authorization_with(&configuration_for_listed_tracker()); + + let result = whitelist_authorization.authorize(&sample_info_hash()).await; + + assert!(matches!(result.unwrap_err(), Error::TorrentNotWhitelisted { .. })); + } + } + + mod when_the_tacker_is_not_configured_as_listed { + + use torrust_tracker_configuration::Core; + + use crate::core_tests::sample_info_hash; + use crate::whitelist::authorization::tests::the_whitelist_authorization_for_announce_and_scrape_actions::{ + initialize_whitelist_authorization_and_dependencies_with, initialize_whitelist_authorization_with, + }; + + fn configuration_for_non_listed_tracker() -> Core { + Core { + listed: false, + ..Default::default() + } + } + + #[tokio::test] + async fn should_authorize_a_whitelisted_infohash() { + let (whitelist_authorization, in_memory_whitelist) = + initialize_whitelist_authorization_and_dependencies_with(&configuration_for_non_listed_tracker()); let info_hash = sample_info_hash(); + let _unused = in_memory_whitelist.add(&info_hash).await; + let result = whitelist_authorization.authorize(&info_hash).await; - assert!(result.is_err()); + + assert!(result.is_ok()); + } + + #[tokio::test] + async fn should_also_authorize_a_non_whitelisted_infohash() { + let whitelist_authorization = initialize_whitelist_authorization_with(&configuration_for_non_listed_tracker()); + + let result = whitelist_authorization.authorize(&sample_info_hash()).await; + + assert!(result.is_ok()); } } } From 1735dfce6e5d44431ca568e0c330e2a18cd37167 Mon Sep 17 00:00:00 2001 From: Jose Celano Date: Tue, 4 Feb 2025 17:39:28 +0000 Subject: [PATCH 219/802] refactor: [#1235] remove unused methods --- packages/tracker-core/src/whitelist/manager.rs | 14 -------------- 1 file changed, 14 deletions(-) diff --git a/packages/tracker-core/src/whitelist/manager.rs b/packages/tracker-core/src/whitelist/manager.rs index c78a59470..e810f170e 100644 --- a/packages/tracker-core/src/whitelist/manager.rs +++ b/packages/tracker-core/src/whitelist/manager.rs @@ -48,20 +48,6 @@ impl WhitelistManager { Ok(()) } - /// It removes a torrent from the whitelist in the database. - /// - /// # Errors - /// - /// Will return a `database::Error` if unable to remove the `info_hash` from the whitelist database. - pub fn remove_torrent_from_database_whitelist(&self, info_hash: &InfoHash) -> Result<(), databases::error::Error> { - self.database_whitelist.remove(info_hash) - } - - /// It adds a torrent from the whitelist in memory. - pub async fn add_torrent_to_memory_whitelist(&self, info_hash: &InfoHash) -> bool { - self.in_memory_whitelist.add(info_hash).await - } - /// It removes a torrent from the whitelist in memory. pub async fn remove_torrent_from_memory_whitelist(&self, info_hash: &InfoHash) -> bool { self.in_memory_whitelist.remove(info_hash).await From f32f0bfc1bab808f0209a7af5454cac935a1d99b Mon Sep 17 00:00:00 2001 From: Jose Celano Date: Wed, 5 Feb 2025 08:45:31 +0000 Subject: [PATCH 220/802] refactor: [#1235] remove pun method only used for testing Now that we inject dependencies we can write assert using the dependencies instead of exposing public methods. --- packages/tracker-core/src/core_tests.rs | 21 +++++++ .../tracker-core/src/whitelist/manager.rs | 55 +++++++++++++------ .../src/whitelist/repository/persisted.rs | 17 +----- src/bootstrap/app.rs | 1 + src/container.rs | 2 + tests/servers/api/environment.rs | 5 ++ .../api/v1/contract/context/whitelist.rs | 12 +--- 7 files changed, 70 insertions(+), 43 deletions(-) diff --git a/packages/tracker-core/src/core_tests.rs b/packages/tracker-core/src/core_tests.rs index f6b47acd0..ac99770d4 100644 --- a/packages/tracker-core/src/core_tests.rs +++ b/packages/tracker-core/src/core_tests.rs @@ -5,8 +5,12 @@ use std::sync::Arc; use aquatic_udp_protocol::{AnnounceEvent, NumberOfBytes, PeerId}; use bittorrent_primitives::info_hash::InfoHash; use torrust_tracker_configuration::Configuration; +#[cfg(test)] +use torrust_tracker_configuration::Core; use torrust_tracker_primitives::peer::Peer; use torrust_tracker_primitives::DurationSinceUnixEpoch; +#[cfg(test)] +use torrust_tracker_test_helpers::configuration::ephemeral_sqlite_database; use super::announce_handler::AnnounceHandler; use super::databases::setup::initialize_database; @@ -103,3 +107,20 @@ pub fn initialize_handlers(config: &Configuration) -> (Arc, Arc (announce_handler, scrape_handler) } + +/// # Panics +/// +/// Will panic if the temporary file path is not a valid UFT string. +#[cfg(test)] +#[must_use] +pub fn ephemeral_configuration_for_listed_tracker() -> Core { + let mut config = Core { + listed: true, + ..Default::default() + }; + + let temp_file = ephemeral_sqlite_database(); + temp_file.to_str().unwrap().clone_into(&mut config.database.path); + + config +} diff --git a/packages/tracker-core/src/whitelist/manager.rs b/packages/tracker-core/src/whitelist/manager.rs index e810f170e..9d2ba249b 100644 --- a/packages/tracker-core/src/whitelist/manager.rs +++ b/packages/tracker-core/src/whitelist/manager.rs @@ -53,11 +53,6 @@ impl WhitelistManager { self.in_memory_whitelist.remove(info_hash).await } - /// It checks if a torrent is whitelisted. - pub async fn is_info_hash_whitelisted(&self, info_hash: &InfoHash) -> bool { - self.in_memory_whitelist.contains(info_hash).await - } - /// It loads the whitelist from the database. /// /// # Errors @@ -81,17 +76,41 @@ mod tests { use std::sync::Arc; - use torrust_tracker_test_helpers::configuration; + use torrust_tracker_configuration::Core; + use crate::core_tests::ephemeral_configuration_for_listed_tracker; + use crate::databases::setup::initialize_database; + use crate::databases::Database; use crate::whitelist::manager::WhitelistManager; - use crate::whitelist::whitelist_tests::initialize_whitelist_services; + use crate::whitelist::repository::in_memory::InMemoryWhitelist; + use crate::whitelist::repository::persisted::DatabaseWhitelist; - fn initialize_whitelist_manager_for_whitelisted_tracker() -> Arc { - let config = configuration::ephemeral_listed(); + struct WhitelistManagerDeps { + pub _database: Arc>, + pub _database_whitelist: Arc, + pub in_memory_whitelist: Arc, + } - let (_whitelist_authorization, whitelist_manager) = initialize_whitelist_services(&config); + fn initialize_whitelist_manager_for_whitelisted_tracker() -> (Arc, Arc) { + let config = ephemeral_configuration_for_listed_tracker(); + initialize_whitelist_manager_and_deps(&config) + } - whitelist_manager + fn initialize_whitelist_manager_and_deps(config: &Core) -> (Arc, Arc) { + let database = initialize_database(config); + let database_whitelist = Arc::new(DatabaseWhitelist::new(database.clone())); + let in_memory_whitelist = Arc::new(InMemoryWhitelist::default()); + + let whitelist_manager = Arc::new(WhitelistManager::new(database_whitelist.clone(), in_memory_whitelist.clone())); + + ( + whitelist_manager, + Arc::new(WhitelistManagerDeps { + _database: database, + _database_whitelist: database_whitelist, + in_memory_whitelist, + }), + ) } mod configured_as_whitelisted { @@ -102,18 +121,18 @@ mod tests { #[tokio::test] async fn it_should_add_a_torrent_to_the_whitelist() { - let whitelist_manager = initialize_whitelist_manager_for_whitelisted_tracker(); + let (whitelist_manager, services) = initialize_whitelist_manager_for_whitelisted_tracker(); let info_hash = sample_info_hash(); whitelist_manager.add_torrent_to_whitelist(&info_hash).await.unwrap(); - assert!(whitelist_manager.is_info_hash_whitelisted(&info_hash).await); + assert!(services.in_memory_whitelist.contains(&info_hash).await); } #[tokio::test] async fn it_should_remove_a_torrent_from_the_whitelist() { - let whitelist_manager = initialize_whitelist_manager_for_whitelisted_tracker(); + let (whitelist_manager, services) = initialize_whitelist_manager_for_whitelisted_tracker(); let info_hash = sample_info_hash(); @@ -121,7 +140,7 @@ mod tests { whitelist_manager.remove_torrent_from_whitelist(&info_hash).await.unwrap(); - assert!(!whitelist_manager.is_info_hash_whitelisted(&info_hash).await); + assert!(!services.in_memory_whitelist.contains(&info_hash).await); } mod persistence { @@ -130,7 +149,7 @@ mod tests { #[tokio::test] async fn it_should_load_the_whitelist_from_the_database() { - let whitelist_manager = initialize_whitelist_manager_for_whitelisted_tracker(); + let (whitelist_manager, services) = initialize_whitelist_manager_for_whitelisted_tracker(); let info_hash = sample_info_hash(); @@ -138,11 +157,11 @@ mod tests { whitelist_manager.remove_torrent_from_memory_whitelist(&info_hash).await; - assert!(!whitelist_manager.is_info_hash_whitelisted(&info_hash).await); + assert!(!services.in_memory_whitelist.contains(&info_hash).await); whitelist_manager.load_whitelist_from_database().await.unwrap(); - assert!(whitelist_manager.is_info_hash_whitelisted(&info_hash).await); + assert!(services.in_memory_whitelist.contains(&info_hash).await); } } } diff --git a/packages/tracker-core/src/whitelist/repository/persisted.rs b/packages/tracker-core/src/whitelist/repository/persisted.rs index a54274f16..5101b5e35 100644 --- a/packages/tracker-core/src/whitelist/repository/persisted.rs +++ b/packages/tracker-core/src/whitelist/repository/persisted.rs @@ -65,10 +65,7 @@ impl DatabaseWhitelist { mod tests { mod the_persisted_whitelist_repository { - use torrust_tracker_configuration::Core; - use torrust_tracker_test_helpers::configuration::ephemeral_sqlite_database; - - use crate::core_tests::sample_info_hash; + use crate::core_tests::{ephemeral_configuration_for_listed_tracker, sample_info_hash}; use crate::databases::setup::initialize_database; use crate::whitelist::repository::persisted::DatabaseWhitelist; @@ -78,18 +75,6 @@ mod tests { DatabaseWhitelist::new(database) } - fn ephemeral_configuration_for_listed_tracker() -> Core { - let mut config = Core { - listed: true, - ..Default::default() - }; - - let temp_file = ephemeral_sqlite_database(); - temp_file.to_str().unwrap().clone_into(&mut config.database.path); - - config - } - #[test] fn should_add_a_new_infohash_to_the_list() { let whitelist = initialize_database_whitelist(); diff --git a/src/bootstrap/app.rs b/src/bootstrap/app.rs index e0e81c70c..0236215f2 100644 --- a/src/bootstrap/app.rs +++ b/src/bootstrap/app.rs @@ -141,6 +141,7 @@ pub fn initialize_app_container(configuration: &Configuration) -> AppContainer { scrape_handler, keys_handler, authentication_service, + in_memory_whitelist, whitelist_authorization, ban_service, http_stats_event_sender, diff --git a/src/container.rs b/src/container.rs index 51c55e533..47cc39ed3 100644 --- a/src/container.rs +++ b/src/container.rs @@ -10,6 +10,7 @@ use bittorrent_tracker_core::torrent::repository::in_memory::InMemoryTorrentRepo use bittorrent_tracker_core::torrent::repository::persisted::DatabasePersistentTorrentRepository; use bittorrent_tracker_core::whitelist; use bittorrent_tracker_core::whitelist::manager::WhitelistManager; +use bittorrent_tracker_core::whitelist::repository::in_memory::InMemoryWhitelist; use tokio::sync::RwLock; use torrust_tracker_configuration::{Core, HttpApi, HttpTracker, UdpTracker}; @@ -23,6 +24,7 @@ pub struct AppContainer { pub scrape_handler: Arc, pub keys_handler: Arc, pub authentication_service: Arc, + pub in_memory_whitelist: Arc, pub whitelist_authorization: Arc, pub ban_service: Arc>, pub http_stats_event_sender: Arc>>, diff --git a/tests/servers/api/environment.rs b/tests/servers/api/environment.rs index 61351024d..02d6465e1 100644 --- a/tests/servers/api/environment.rs +++ b/tests/servers/api/environment.rs @@ -4,6 +4,7 @@ use std::sync::Arc; use bittorrent_primitives::info_hash::InfoHash; use bittorrent_tracker_core::authentication::service::AuthenticationService; use bittorrent_tracker_core::databases::Database; +use bittorrent_tracker_core::whitelist::repository::in_memory::InMemoryWhitelist; use futures::executor::block_on; use torrust_tracker_api_client::connection_info::{ConnectionInfo, Origin}; use torrust_tracker_configuration::Configuration; @@ -22,6 +23,7 @@ where pub database: Arc>, pub authentication_service: Arc, + pub in_memory_whitelist: Arc, pub registar: Registar, pub server: ApiServer, @@ -70,6 +72,7 @@ impl Environment { database: app_container.database.clone(), authentication_service: app_container.authentication_service.clone(), + in_memory_whitelist: app_container.in_memory_whitelist.clone(), registar: Registar::default(), server, @@ -84,6 +87,7 @@ impl Environment { database: self.database.clone(), authentication_service: self.authentication_service.clone(), + in_memory_whitelist: self.in_memory_whitelist.clone(), registar: self.registar.clone(), server: self @@ -106,6 +110,7 @@ impl Environment { database: self.database, authentication_service: self.authentication_service, + in_memory_whitelist: self.in_memory_whitelist, registar: Registar::default(), server: self.server.stop().await.unwrap(), diff --git a/tests/servers/api/v1/contract/context/whitelist.rs b/tests/servers/api/v1/contract/context/whitelist.rs index 945cb00b5..ca359650f 100644 --- a/tests/servers/api/v1/contract/context/whitelist.rs +++ b/tests/servers/api/v1/contract/context/whitelist.rs @@ -31,9 +31,8 @@ async fn should_allow_whitelisting_a_torrent() { assert_ok(response).await; assert!( - env.http_api_container - .whitelist_manager - .is_info_hash_whitelisted(&InfoHash::from_str(&info_hash).unwrap()) + env.in_memory_whitelist + .contains(&InfoHash::from_str(&info_hash).unwrap()) .await ); @@ -181,12 +180,7 @@ async fn should_allow_removing_a_torrent_from_the_whitelist() { .await; assert_ok(response).await; - assert!( - !env.http_api_container - .whitelist_manager - .is_info_hash_whitelisted(&info_hash) - .await - ); + assert!(!env.in_memory_whitelist.contains(&info_hash).await); env.stop().await; } From 2862c7706c0753dcd1b5c114205336202a94cd84 Mon Sep 17 00:00:00 2001 From: Jose Celano Date: Wed, 5 Feb 2025 08:50:26 +0000 Subject: [PATCH 221/802] refactor: [#1235] remove another pub methog only used for testing --- packages/tracker-core/src/whitelist/manager.rs | 7 +------ 1 file changed, 1 insertion(+), 6 deletions(-) diff --git a/packages/tracker-core/src/whitelist/manager.rs b/packages/tracker-core/src/whitelist/manager.rs index 9d2ba249b..5efe6e15a 100644 --- a/packages/tracker-core/src/whitelist/manager.rs +++ b/packages/tracker-core/src/whitelist/manager.rs @@ -48,11 +48,6 @@ impl WhitelistManager { Ok(()) } - /// It removes a torrent from the whitelist in memory. - pub async fn remove_torrent_from_memory_whitelist(&self, info_hash: &InfoHash) -> bool { - self.in_memory_whitelist.remove(info_hash).await - } - /// It loads the whitelist from the database. /// /// # Errors @@ -155,7 +150,7 @@ mod tests { whitelist_manager.add_torrent_to_whitelist(&info_hash).await.unwrap(); - whitelist_manager.remove_torrent_from_memory_whitelist(&info_hash).await; + services.in_memory_whitelist.remove(&info_hash).await; assert!(!services.in_memory_whitelist.contains(&info_hash).await); From e994aa2d41da89a7a522a6a748712170cb3f9cb9 Mon Sep 17 00:00:00 2001 From: Jose Celano Date: Wed, 5 Feb 2025 10:07:06 +0000 Subject: [PATCH 222/802] refactor: [#1235] WhitelistManager tests --- packages/tracker-core/src/whitelist/manager.rs | 12 +++++------- 1 file changed, 5 insertions(+), 7 deletions(-) diff --git a/packages/tracker-core/src/whitelist/manager.rs b/packages/tracker-core/src/whitelist/manager.rs index 5efe6e15a..e1cd2f89e 100644 --- a/packages/tracker-core/src/whitelist/manager.rs +++ b/packages/tracker-core/src/whitelist/manager.rs @@ -82,7 +82,7 @@ mod tests { struct WhitelistManagerDeps { pub _database: Arc>, - pub _database_whitelist: Arc, + pub database_whitelist: Arc, pub in_memory_whitelist: Arc, } @@ -102,7 +102,7 @@ mod tests { whitelist_manager, Arc::new(WhitelistManagerDeps { _database: database, - _database_whitelist: database_whitelist, + database_whitelist, in_memory_whitelist, }), ) @@ -123,6 +123,7 @@ mod tests { whitelist_manager.add_torrent_to_whitelist(&info_hash).await.unwrap(); assert!(services.in_memory_whitelist.contains(&info_hash).await); + assert!(services.database_whitelist.load_from_database().unwrap().contains(&info_hash)); } #[tokio::test] @@ -136,6 +137,7 @@ mod tests { whitelist_manager.remove_torrent_from_whitelist(&info_hash).await.unwrap(); assert!(!services.in_memory_whitelist.contains(&info_hash).await); + assert!(!services.database_whitelist.load_from_database().unwrap().contains(&info_hash)); } mod persistence { @@ -148,11 +150,7 @@ mod tests { let info_hash = sample_info_hash(); - whitelist_manager.add_torrent_to_whitelist(&info_hash).await.unwrap(); - - services.in_memory_whitelist.remove(&info_hash).await; - - assert!(!services.in_memory_whitelist.contains(&info_hash).await); + services.database_whitelist.add(&info_hash).unwrap(); whitelist_manager.load_whitelist_from_database().await.unwrap(); From dab29d55ac30c8f70c462722f195d71230de0a40 Mon Sep 17 00:00:00 2001 From: nuts-rice Date: Wed, 5 Feb 2025 15:14:34 +0000 Subject: [PATCH 223/802] test: add missing tests to udp_tracker_core::statistics::event::handler --- .../statistics/event/handler.rs | 96 +++++++++++++++++++ 1 file changed, 96 insertions(+) diff --git a/src/packages/udp_tracker_core/statistics/event/handler.rs b/src/packages/udp_tracker_core/statistics/event/handler.rs index d696951d3..d8fa049d0 100644 --- a/src/packages/udp_tracker_core/statistics/event/handler.rs +++ b/src/packages/udp_tracker_core/statistics/event/handler.rs @@ -151,4 +151,100 @@ mod tests { assert_eq!(stats.udp6_scrapes_handled, 1); } + + #[tokio::test] + async fn should_increase_the_udp_abort_counter_when_it_receives_a_udp_abort_event() { + let stats_repository = Repository::new(); + + handle_event(Event::UdpRequestAborted, &stats_repository).await; + let stats = stats_repository.get_stats().await; + assert_eq!(stats.udp_requests_aborted, 1); + } + #[tokio::test] + async fn should_increase_the_udp_ban_counter_when_it_receives_a_udp_banned_event() { + let stats_repository = Repository::new(); + + handle_event(Event::UdpRequestBanned, &stats_repository).await; + let stats = stats_repository.get_stats().await; + assert_eq!(stats.udp_requests_banned, 1); + } + + #[tokio::test] + async fn should_increase_the_udp4_requests_counter_when_it_receives_a_udp4_request_event() { + let stats_repository = Repository::new(); + + handle_event(Event::Udp4Request, &stats_repository).await; + + let stats = stats_repository.get_stats().await; + + assert_eq!(stats.udp4_requests, 1); + } + + #[tokio::test] + async fn should_increase_the_udp4_responses_counter_when_it_receives_a_udp4_response_event() { + let stats_repository = Repository::new(); + + handle_event( + Event::Udp4Response { + kind: crate::packages::udp_tracker_core::statistics::event::UdpResponseKind::Announce, + req_processing_time: std::time::Duration::from_secs(1), + }, + &stats_repository, + ) + .await; + + let stats = stats_repository.get_stats().await; + + assert_eq!(stats.udp4_responses, 1); + } + + #[tokio::test] + async fn should_increase_the_udp4_errors_counter_when_it_receives_a_udp4_error_event() { + let stats_repository = Repository::new(); + + handle_event(Event::Udp4Error, &stats_repository).await; + + let stats = stats_repository.get_stats().await; + + assert_eq!(stats.udp4_errors_handled, 1); + } + + #[tokio::test] + async fn should_increase_the_udp6_requests_counter_when_it_receives_a_udp6_request_event() { + let stats_repository = Repository::new(); + + handle_event(Event::Udp6Request, &stats_repository).await; + + let stats = stats_repository.get_stats().await; + + assert_eq!(stats.udp6_requests, 1); + } + + #[tokio::test] + async fn should_increase_the_udp6_response_counter_when_it_receives_a_udp6_response_event() { + let stats_repository = Repository::new(); + + handle_event( + Event::Udp6Response { + kind: crate::packages::udp_tracker_core::statistics::event::UdpResponseKind::Announce, + req_processing_time: std::time::Duration::from_secs(1), + }, + &stats_repository, + ) + .await; + + let stats = stats_repository.get_stats().await; + + assert_eq!(stats.udp6_responses, 1); + } + #[tokio::test] + async fn should_increase_the_udp6_errors_counter_when_it_receives_a_udp6_error_event() { + let stats_repository = Repository::new(); + + handle_event(Event::Udp6Error, &stats_repository).await; + + let stats = stats_repository.get_stats().await; + + assert_eq!(stats.udp6_errors_handled, 1); + } } From ed132941a83d28ad7c457dacb599bd9b32685068 Mon Sep 17 00:00:00 2001 From: Jose Celano Date: Wed, 5 Feb 2025 10:42:20 +0000 Subject: [PATCH 224/802] docs: add testing section to README --- packages/tracker-core/README.md | 16 ++++++++++++++++ 1 file changed, 16 insertions(+) diff --git a/packages/tracker-core/README.md b/packages/tracker-core/README.md index 1575cda49..e36a6f4be 100644 --- a/packages/tracker-core/README.md +++ b/packages/tracker-core/README.md @@ -10,6 +10,22 @@ You usually don’t need to use this library directly. Instead, you should use t [Crate documentation](https://docs.rs/bittorrent-tracker-core). +## Testing + +Show coverage report: + +```console +cargo +stable llvm-cov +``` + +Export coverage report to `lcov` format: + +```console +cargo +stable llvm-cov --lcov --output-path=./.coverage/lcov.info +``` + +If you use Visual Studio Code, you can use the [Coverage Gutters](https://marketplace.visualstudio.com/items?itemName=semasquare.vscode-coverage-gutters) extension to view the coverage lines. + ## License The project is licensed under the terms of the [GNU AFFERO GENERAL PUBLIC LICENSE](./LICENSE). From 4198bc6a41d38e47b6b10d8f02e818054ef2690d Mon Sep 17 00:00:00 2001 From: Jose Celano Date: Wed, 5 Feb 2025 11:15:33 +0000 Subject: [PATCH 225/802] refactor: [#1240] reorganize InMemoryTorrentRepository tests We've grouped them by responsability. The responsabilities are: - To maintain the peer lists for each torrent. - To return the peer lists for a given torrent. - To return the torrent entries, which contains all the info about the torrents, including the peer lists. - To return the torrent metrics. - To return the swarm metadata for a given torrent. - To handle the persistence of the torrent entries. --- .../src/torrent/repository/in_memory.rs | 290 +++++++++++------- 1 file changed, 172 insertions(+), 118 deletions(-) diff --git a/packages/tracker-core/src/torrent/repository/in_memory.rs b/packages/tracker-core/src/torrent/repository/in_memory.rs index b9979577a..7b8b941b4 100644 --- a/packages/tracker-core/src/torrent/repository/in_memory.rs +++ b/packages/tracker-core/src/torrent/repository/in_memory.rs @@ -104,18 +104,8 @@ impl InMemoryTorrentRepository { #[cfg(test)] mod tests { - use std::net::{IpAddr, Ipv4Addr, SocketAddr}; - use std::sync::Arc; - use aquatic_udp_protocol::{AnnounceEvent, NumberOfBytes, PeerId}; - use bittorrent_primitives::info_hash::fixture::gen_seeded_infohash; - use torrust_tracker_configuration::TORRENT_PEERS_LIMIT; - use torrust_tracker_primitives::peer::Peer; - use torrust_tracker_primitives::torrent_metrics::TorrentsMetrics; - use torrust_tracker_primitives::DurationSinceUnixEpoch; - - use crate::core_tests::{leecher, sample_info_hash, sample_peer}; - use crate::torrent::repository::in_memory::InMemoryTorrentRepository; + use aquatic_udp_protocol::PeerId; /// It generates a peer id from a number where the number is the last /// part of the peer ID. For example, for `12` it returns @@ -135,148 +125,212 @@ mod tests { PeerId(peer_id_bytes) } - #[tokio::test] - async fn it_should_collect_torrent_metrics() { - let in_memory_torrent_repository = Arc::new(InMemoryTorrentRepository::default()); - - let torrents_metrics = in_memory_torrent_repository.get_torrents_metrics(); + // The `InMemoryTorrentRepository` has these responsibilities: + // - To maintain the peer lists for each torrent. + // - To return the peer lists for a given torrent. + // - To return the torrent entries, which contains all the info about the + // torrents, including the peer lists. + // - To return the torrent metrics. + // - To return the swarm metadata for a given torrent. + // - To handle the persistence of the torrent entries. + + mod maintaining_the_peer_lists { + // Methods: + // - upsert_peer + // - remove + // - remove_inactive_peers + // - remove_peerless_torrents + } - assert_eq!( - torrents_metrics, - TorrentsMetrics { - complete: 0, - downloaded: 0, - incomplete: 0, - torrents: 0 + mod returning_peer_lists_for_a_torrent { + // Methods: + // - get_peers_for + // - get_torrent_peers + + use std::net::{IpAddr, Ipv4Addr, SocketAddr}; + use std::sync::Arc; + + use aquatic_udp_protocol::{AnnounceEvent, NumberOfBytes}; + use torrust_tracker_configuration::TORRENT_PEERS_LIMIT; + use torrust_tracker_primitives::peer::Peer; + use torrust_tracker_primitives::DurationSinceUnixEpoch; + + use crate::core_tests::{sample_info_hash, sample_peer}; + use crate::torrent::repository::in_memory::tests::numeric_peer_id; + use crate::torrent::repository::in_memory::InMemoryTorrentRepository; + + #[tokio::test] + async fn it_should_return_74_peers_at_the_most_for_a_given_torrent() { + let in_memory_torrent_repository = Arc::new(InMemoryTorrentRepository::default()); + + let info_hash = sample_info_hash(); + + for idx in 1..=75 { + let peer = Peer { + peer_id: numeric_peer_id(idx), + peer_addr: SocketAddr::new(IpAddr::V4(Ipv4Addr::new(126, 0, 0, idx.try_into().unwrap())), 8080), + updated: DurationSinceUnixEpoch::new(1_669_397_478_934, 0), + uploaded: NumberOfBytes::new(0), + downloaded: NumberOfBytes::new(0), + left: NumberOfBytes::new(0), // No bytes left to download + event: AnnounceEvent::Completed, + }; + + let () = in_memory_torrent_repository.upsert_peer(&info_hash, &peer); } - ); - } - #[tokio::test] - async fn it_should_return_74_peers_at_the_most_for_a_given_torrent() { - let in_memory_torrent_repository = Arc::new(InMemoryTorrentRepository::default()); + let peers = in_memory_torrent_repository.get_torrent_peers(&info_hash); - let info_hash = sample_info_hash(); + assert_eq!(peers.len(), 74); + } + + #[tokio::test] + async fn it_should_return_the_peers_for_a_given_torrent() { + let in_memory_torrent_repository = Arc::new(InMemoryTorrentRepository::default()); - for idx in 1..=75 { - let peer = Peer { - peer_id: numeric_peer_id(idx), - peer_addr: SocketAddr::new(IpAddr::V4(Ipv4Addr::new(126, 0, 0, idx.try_into().unwrap())), 8080), - updated: DurationSinceUnixEpoch::new(1_669_397_478_934, 0), - uploaded: NumberOfBytes::new(0), - downloaded: NumberOfBytes::new(0), - left: NumberOfBytes::new(0), // No bytes left to download - event: AnnounceEvent::Completed, - }; + let info_hash = sample_info_hash(); + let peer = sample_peer(); let () = in_memory_torrent_repository.upsert_peer(&info_hash, &peer); + + let peers = in_memory_torrent_repository.get_torrent_peers(&info_hash); + + assert_eq!(peers, vec![Arc::new(peer)]); } - let peers = in_memory_torrent_repository.get_torrent_peers(&info_hash); + #[tokio::test] + async fn it_should_return_the_peers_for_a_given_torrent_excluding_a_given_peer() { + let in_memory_torrent_repository = Arc::new(InMemoryTorrentRepository::default()); - assert_eq!(peers.len(), 74); - } + let info_hash = sample_info_hash(); + let peer = sample_peer(); - #[tokio::test] - async fn it_should_return_the_peers_for_a_given_torrent_excluding_a_given_peer() { - let in_memory_torrent_repository = Arc::new(InMemoryTorrentRepository::default()); + let () = in_memory_torrent_repository.upsert_peer(&info_hash, &peer); - let info_hash = sample_info_hash(); - let peer = sample_peer(); + let peers = in_memory_torrent_repository.get_peers_for(&info_hash, &peer, TORRENT_PEERS_LIMIT); - let () = in_memory_torrent_repository.upsert_peer(&info_hash, &peer); + assert_eq!(peers, vec![]); + } - let peers = in_memory_torrent_repository.get_peers_for(&info_hash, &peer, TORRENT_PEERS_LIMIT); + #[tokio::test] + async fn it_should_return_74_peers_at_the_most_for_a_given_torrent_when_it_filters_out_a_given_peer() { + let in_memory_torrent_repository = Arc::new(InMemoryTorrentRepository::default()); - assert_eq!(peers, vec![]); - } + let info_hash = sample_info_hash(); - #[tokio::test] - async fn it_should_return_74_peers_at_the_most_for_a_given_torrent_when_it_filters_out_a_given_peer() { - let in_memory_torrent_repository = Arc::new(InMemoryTorrentRepository::default()); + let excluded_peer = sample_peer(); - let info_hash = sample_info_hash(); + let () = in_memory_torrent_repository.upsert_peer(&info_hash, &excluded_peer); - let excluded_peer = sample_peer(); + // Add 74 peers + for idx in 2..=75 { + let peer = Peer { + peer_id: numeric_peer_id(idx), + peer_addr: SocketAddr::new(IpAddr::V4(Ipv4Addr::new(126, 0, 0, idx.try_into().unwrap())), 8080), + updated: DurationSinceUnixEpoch::new(1_669_397_478_934, 0), + uploaded: NumberOfBytes::new(0), + downloaded: NumberOfBytes::new(0), + left: NumberOfBytes::new(0), // No bytes left to download + event: AnnounceEvent::Completed, + }; - let () = in_memory_torrent_repository.upsert_peer(&info_hash, &excluded_peer); + let () = in_memory_torrent_repository.upsert_peer(&info_hash, &peer); + } - // Add 74 peers - for idx in 2..=75 { - let peer = Peer { - peer_id: numeric_peer_id(idx), - peer_addr: SocketAddr::new(IpAddr::V4(Ipv4Addr::new(126, 0, 0, idx.try_into().unwrap())), 8080), - updated: DurationSinceUnixEpoch::new(1_669_397_478_934, 0), - uploaded: NumberOfBytes::new(0), - downloaded: NumberOfBytes::new(0), - left: NumberOfBytes::new(0), // No bytes left to download - event: AnnounceEvent::Completed, - }; + let peers = in_memory_torrent_repository.get_peers_for(&info_hash, &excluded_peer, TORRENT_PEERS_LIMIT); - let () = in_memory_torrent_repository.upsert_peer(&info_hash, &peer); + assert_eq!(peers.len(), 74); } + } - let peers = in_memory_torrent_repository.get_peers_for(&info_hash, &excluded_peer, TORRENT_PEERS_LIMIT); - - assert_eq!(peers.len(), 74); + mod returning_torrent_entries { + // Methods: + // - get + // - get_paginated } - #[tokio::test] - async fn it_should_return_the_torrent_metrics() { - let in_memory_torrent_repository = Arc::new(InMemoryTorrentRepository::default()); + mod returning_torrent_metrics { + // Methods: + // - get_torrents_metrics - let () = in_memory_torrent_repository.upsert_peer(&sample_info_hash(), &leecher()); + use std::sync::Arc; - let torrent_metrics = in_memory_torrent_repository.get_torrents_metrics(); + use bittorrent_primitives::info_hash::fixture::gen_seeded_infohash; + use torrust_tracker_primitives::torrent_metrics::TorrentsMetrics; - assert_eq!( - torrent_metrics, - TorrentsMetrics { - complete: 0, - downloaded: 0, - incomplete: 1, - torrents: 1, - } - ); - } + use crate::core_tests::{leecher, sample_info_hash}; + use crate::torrent::repository::in_memory::InMemoryTorrentRepository; - #[tokio::test] - async fn it_should_get_many_the_torrent_metrics() { - let in_memory_torrent_repository = Arc::new(InMemoryTorrentRepository::default()); + #[tokio::test] + async fn it_should_collect_torrent_metrics() { + let in_memory_torrent_repository = Arc::new(InMemoryTorrentRepository::default()); - let start_time = std::time::Instant::now(); - for i in 0..1_000_000 { - let () = in_memory_torrent_repository.upsert_peer(&gen_seeded_infohash(&i), &leecher()); + let torrents_metrics = in_memory_torrent_repository.get_torrents_metrics(); + + assert_eq!( + torrents_metrics, + TorrentsMetrics { + complete: 0, + downloaded: 0, + incomplete: 0, + torrents: 0 + } + ); } - let result_a = start_time.elapsed(); - - let start_time = std::time::Instant::now(); - let torrent_metrics = in_memory_torrent_repository.get_torrents_metrics(); - let result_b = start_time.elapsed(); - - assert_eq!( - (torrent_metrics), - (TorrentsMetrics { - complete: 0, - downloaded: 0, - incomplete: 1_000_000, - torrents: 1_000_000, - }), - "{result_a:?} {result_b:?}" - ); - } - #[tokio::test] - async fn it_should_return_the_peers_for_a_given_torrent() { - let in_memory_torrent_repository = Arc::new(InMemoryTorrentRepository::default()); + #[tokio::test] + async fn it_should_return_the_torrent_metrics() { + let in_memory_torrent_repository = Arc::new(InMemoryTorrentRepository::default()); + + let () = in_memory_torrent_repository.upsert_peer(&sample_info_hash(), &leecher()); - let info_hash = sample_info_hash(); - let peer = sample_peer(); + let torrent_metrics = in_memory_torrent_repository.get_torrents_metrics(); - let () = in_memory_torrent_repository.upsert_peer(&info_hash, &peer); + assert_eq!( + torrent_metrics, + TorrentsMetrics { + complete: 0, + downloaded: 0, + incomplete: 1, + torrents: 1, + } + ); + } + + #[tokio::test] + async fn it_should_get_many_the_torrent_metrics() { + let in_memory_torrent_repository = Arc::new(InMemoryTorrentRepository::default()); - let peers = in_memory_torrent_repository.get_torrent_peers(&info_hash); + let start_time = std::time::Instant::now(); + for i in 0..1_000_000 { + let () = in_memory_torrent_repository.upsert_peer(&gen_seeded_infohash(&i), &leecher()); + } + let result_a = start_time.elapsed(); + + let start_time = std::time::Instant::now(); + let torrent_metrics = in_memory_torrent_repository.get_torrents_metrics(); + let result_b = start_time.elapsed(); + + assert_eq!( + (torrent_metrics), + (TorrentsMetrics { + complete: 0, + downloaded: 0, + incomplete: 1_000_000, + torrents: 1_000_000, + }), + "{result_a:?} {result_b:?}" + ); + } + } + + mod returning_swarm_metadata { + // Methods: + // - get_swarm_metadata + } - assert_eq!(peers, vec![Arc::new(peer)]); + mod handling_persistence { + // Methods: + // - import_persistent } } From f0481575a6ceb4bb43f828fa8e0283b65374280f Mon Sep 17 00:00:00 2001 From: Jose Celano Date: Wed, 5 Feb 2025 12:36:22 +0000 Subject: [PATCH 226/802] test: [#1240] add tests for InMemoryTorrentRepository --- packages/tracker-core/src/announce_handler.rs | 10 +- packages/tracker-core/src/core_tests.rs | 66 +- packages/tracker-core/src/torrent/mod.rs | 6 +- .../src/torrent/repository/in_memory.rs | 775 ++++++++++++++---- 4 files changed, 667 insertions(+), 190 deletions(-) diff --git a/packages/tracker-core/src/announce_handler.rs b/packages/tracker-core/src/announce_handler.rs index fac1df5b2..aa311fe46 100644 --- a/packages/tracker-core/src/announce_handler.rs +++ b/packages/tracker-core/src/announce_handler.rs @@ -82,17 +82,11 @@ impl AnnounceHandler { /// needed for a `announce` request response. #[must_use] fn upsert_peer_and_get_stats(&self, info_hash: &InfoHash, peer: &peer::Peer) -> SwarmMetadata { - let swarm_metadata_before = match self.in_memory_torrent_repository.get_opt_swarm_metadata(info_hash) { - Some(swarm_metadata) => swarm_metadata, - None => SwarmMetadata::zeroed(), - }; + let swarm_metadata_before = self.in_memory_torrent_repository.get_swarm_metadata(info_hash); self.in_memory_torrent_repository.upsert_peer(info_hash, peer); - let swarm_metadata_after = match self.in_memory_torrent_repository.get_opt_swarm_metadata(info_hash) { - Some(swarm_metadata) => swarm_metadata, - None => SwarmMetadata::zeroed(), - }; + let swarm_metadata_after = self.in_memory_torrent_repository.get_swarm_metadata(info_hash); if swarm_metadata_before != swarm_metadata_after { self.persist_stats(info_hash, &swarm_metadata_after); diff --git a/packages/tracker-core/src/core_tests.rs b/packages/tracker-core/src/core_tests.rs index ac99770d4..873c5f0ae 100644 --- a/packages/tracker-core/src/core_tests.rs +++ b/packages/tracker-core/src/core_tests.rs @@ -30,10 +30,74 @@ pub fn sample_info_hash() -> InfoHash { .expect("String should be a valid info hash") } +/// # Panics +/// +/// Will panic if the string representation of the info hash is not a valid info hash. +#[must_use] +pub fn sample_info_hash_one() -> InfoHash { + "3b245504cf5f11bbdbe1201cea6a6bf45aee1bc0" // DevSkim: ignore DS173237 + .parse::() + .expect("String should be a valid info hash") +} + +/// # Panics +/// +/// Will panic if the string representation of the info hash is not a valid info hash. +#[must_use] +pub fn sample_info_hash_two() -> InfoHash { + "99c82bb73505a3c0b453f9fa0e881d6e5a32a0c1" // DevSkim: ignore DS173237 + .parse::() + .expect("String should be a valid info hash") +} + +/// # Panics +/// +/// Will panic if the string representation of the info hash is not a valid info hash. +#[must_use] +pub fn sample_info_hash_alphabetically_ordered_after_sample_info_hash_one() -> InfoHash { + "99c82bb73505a3c0b453f9fa0e881d6e5a32a0c1" // DevSkim: ignore DS173237 + .parse::() + .expect("String should be a valid info hash") +} + /// Sample peer whose state is not relevant for the tests. #[must_use] pub fn sample_peer() -> Peer { - complete_peer() + Peer { + peer_id: PeerId(*b"-qB00000000000000000"), + peer_addr: SocketAddr::new(IpAddr::V4(Ipv4Addr::new(126, 0, 0, 1)), 8080), + updated: DurationSinceUnixEpoch::new(1_669_397_478_934, 0), + uploaded: NumberOfBytes::new(0), + downloaded: NumberOfBytes::new(0), + left: NumberOfBytes::new(0), // No bytes left to download + event: AnnounceEvent::Completed, + } +} + +#[must_use] +pub fn sample_peer_one() -> Peer { + Peer { + peer_id: PeerId(*b"-qB00000000000000001"), + peer_addr: SocketAddr::new(IpAddr::V4(Ipv4Addr::new(126, 0, 0, 1)), 8081), + updated: DurationSinceUnixEpoch::new(1_669_397_478_934, 0), + uploaded: NumberOfBytes::new(0), + downloaded: NumberOfBytes::new(0), + left: NumberOfBytes::new(0), // No bytes left to download + event: AnnounceEvent::Completed, + } +} + +#[must_use] +pub fn sample_peer_two() -> Peer { + Peer { + peer_id: PeerId(*b"-qB00000000000000002"), + peer_addr: SocketAddr::new(IpAddr::V4(Ipv4Addr::new(126, 0, 0, 2)), 8082), + updated: DurationSinceUnixEpoch::new(1_669_397_478_934, 0), + uploaded: NumberOfBytes::new(0), + downloaded: NumberOfBytes::new(0), + left: NumberOfBytes::new(0), // No bytes left to download + event: AnnounceEvent::Completed, + } } #[must_use] diff --git a/packages/tracker-core/src/torrent/mod.rs b/packages/tracker-core/src/torrent/mod.rs index 2aa19130e..340f049d2 100644 --- a/packages/tracker-core/src/torrent/mod.rs +++ b/packages/tracker-core/src/torrent/mod.rs @@ -29,6 +29,8 @@ pub mod manager; pub mod repository; pub mod services; -use torrust_tracker_torrent_repository::TorrentsSkipMapMutexStd; +use torrust_tracker_torrent_repository::{EntryMutexStd, TorrentsSkipMapMutexStd}; -pub type Torrents = TorrentsSkipMapMutexStd; // Currently Used +// Currently used types from the torrent repository crate. +pub type Torrents = TorrentsSkipMapMutexStd; +pub type TorrentEntry = EntryMutexStd; diff --git a/packages/tracker-core/src/torrent/repository/in_memory.rs b/packages/tracker-core/src/torrent/repository/in_memory.rs index 7b8b941b4..baa0c4fdb 100644 --- a/packages/tracker-core/src/torrent/repository/in_memory.rs +++ b/packages/tracker-core/src/torrent/repository/in_memory.rs @@ -61,16 +61,10 @@ impl InMemoryTorrentRepository { pub fn get_swarm_metadata(&self, info_hash: &InfoHash) -> SwarmMetadata { match self.torrents.get(info_hash) { Some(torrent_entry) => torrent_entry.get_swarm_metadata(), - None => SwarmMetadata::default(), + None => SwarmMetadata::zeroed(), } } - /// It returns the data for a `scrape` response if the torrent is found. - #[must_use] - pub fn get_opt_swarm_metadata(&self, info_hash: &InfoHash) -> Option { - self.torrents.get_swarm_metadata(info_hash) - } - /// Get torrent peers for a given torrent and client. /// /// It filters out the client making the request. @@ -105,232 +99,655 @@ impl InMemoryTorrentRepository { #[cfg(test)] mod tests { - use aquatic_udp_protocol::PeerId; + mod the_in_memory_torrent_repository { - /// It generates a peer id from a number where the number is the last - /// part of the peer ID. For example, for `12` it returns - /// `-qB00000000000000012`. - fn numeric_peer_id(two_digits_value: i32) -> PeerId { - // Format idx as a string with leading zeros, ensuring it has exactly 2 digits - let idx_str = format!("{two_digits_value:02}"); + use aquatic_udp_protocol::PeerId; - // Create the base part of the peer ID. - let base = b"-qB00000000000000000"; + /// It generates a peer id from a number where the number is the last + /// part of the peer ID. For example, for `12` it returns + /// `-qB00000000000000012`. + fn numeric_peer_id(two_digits_value: i32) -> PeerId { + // Format idx as a string with leading zeros, ensuring it has exactly 2 digits + let idx_str = format!("{two_digits_value:02}"); - // Concatenate the base with idx bytes, ensuring the total length is 20 bytes. - let mut peer_id_bytes = [0u8; 20]; - peer_id_bytes[..base.len()].copy_from_slice(base); - peer_id_bytes[base.len() - idx_str.len()..].copy_from_slice(idx_str.as_bytes()); + // Create the base part of the peer ID. + let base = b"-qB00000000000000000"; - PeerId(peer_id_bytes) - } + // Concatenate the base with idx bytes, ensuring the total length is 20 bytes. + let mut peer_id_bytes = [0u8; 20]; + peer_id_bytes[..base.len()].copy_from_slice(base); + peer_id_bytes[base.len() - idx_str.len()..].copy_from_slice(idx_str.as_bytes()); - // The `InMemoryTorrentRepository` has these responsibilities: - // - To maintain the peer lists for each torrent. - // - To return the peer lists for a given torrent. - // - To return the torrent entries, which contains all the info about the - // torrents, including the peer lists. - // - To return the torrent metrics. - // - To return the swarm metadata for a given torrent. - // - To handle the persistence of the torrent entries. - - mod maintaining_the_peer_lists { - // Methods: - // - upsert_peer - // - remove - // - remove_inactive_peers - // - remove_peerless_torrents - } + PeerId(peer_id_bytes) + } - mod returning_peer_lists_for_a_torrent { - // Methods: - // - get_peers_for - // - get_torrent_peers - - use std::net::{IpAddr, Ipv4Addr, SocketAddr}; - use std::sync::Arc; - - use aquatic_udp_protocol::{AnnounceEvent, NumberOfBytes}; - use torrust_tracker_configuration::TORRENT_PEERS_LIMIT; - use torrust_tracker_primitives::peer::Peer; - use torrust_tracker_primitives::DurationSinceUnixEpoch; - - use crate::core_tests::{sample_info_hash, sample_peer}; - use crate::torrent::repository::in_memory::tests::numeric_peer_id; - use crate::torrent::repository::in_memory::InMemoryTorrentRepository; - - #[tokio::test] - async fn it_should_return_74_peers_at_the_most_for_a_given_torrent() { - let in_memory_torrent_repository = Arc::new(InMemoryTorrentRepository::default()); - - let info_hash = sample_info_hash(); - - for idx in 1..=75 { - let peer = Peer { - peer_id: numeric_peer_id(idx), - peer_addr: SocketAddr::new(IpAddr::V4(Ipv4Addr::new(126, 0, 0, idx.try_into().unwrap())), 8080), - updated: DurationSinceUnixEpoch::new(1_669_397_478_934, 0), - uploaded: NumberOfBytes::new(0), - downloaded: NumberOfBytes::new(0), - left: NumberOfBytes::new(0), // No bytes left to download - event: AnnounceEvent::Completed, - }; + // The `InMemoryTorrentRepository` has these responsibilities: + // - To maintain the peer lists for each torrent. + // - To maintain the the torrent entries, which contains all the info about the + // torrents, including the peer lists. + // - To return the torrent entries. + // - To return the peer lists for a given torrent. + // - To return the torrent metrics. + // - To return the swarm metadata for a given torrent. + // - To handle the persistence of the torrent entries. - let () = in_memory_torrent_repository.upsert_peer(&info_hash, &peer); + mod maintaining_the_peer_lists { + + use std::sync::Arc; + + use crate::core_tests::{sample_info_hash, sample_peer}; + use crate::torrent::repository::in_memory::InMemoryTorrentRepository; + + #[tokio::test] + async fn it_should_add_the_first_peer_to_the_torrent_peer_list() { + let in_memory_torrent_repository = Arc::new(InMemoryTorrentRepository::default()); + + let info_hash = sample_info_hash(); + + let () = in_memory_torrent_repository.upsert_peer(&info_hash, &sample_peer()); + + assert!(in_memory_torrent_repository.get(&info_hash).is_some()); } - let peers = in_memory_torrent_repository.get_torrent_peers(&info_hash); + #[tokio::test] + async fn it_should_allow_adding_the_same_peer_twice_to_the_torrent_peer_list() { + let in_memory_torrent_repository = Arc::new(InMemoryTorrentRepository::default()); - assert_eq!(peers.len(), 74); + let info_hash = sample_info_hash(); + + let () = in_memory_torrent_repository.upsert_peer(&info_hash, &sample_peer()); + let () = in_memory_torrent_repository.upsert_peer(&info_hash, &sample_peer()); + + assert!(in_memory_torrent_repository.get(&info_hash).is_some()); + } } - #[tokio::test] - async fn it_should_return_the_peers_for_a_given_torrent() { - let in_memory_torrent_repository = Arc::new(InMemoryTorrentRepository::default()); + mod returning_peer_lists_for_a_torrent { - let info_hash = sample_info_hash(); - let peer = sample_peer(); + use std::net::{IpAddr, Ipv4Addr, SocketAddr}; + use std::sync::Arc; - let () = in_memory_torrent_repository.upsert_peer(&info_hash, &peer); + use aquatic_udp_protocol::{AnnounceEvent, NumberOfBytes}; + use torrust_tracker_primitives::peer::Peer; + use torrust_tracker_primitives::DurationSinceUnixEpoch; - let peers = in_memory_torrent_repository.get_torrent_peers(&info_hash); + use crate::core_tests::{sample_info_hash, sample_peer}; + use crate::torrent::repository::in_memory::tests::the_in_memory_torrent_repository::numeric_peer_id; + use crate::torrent::repository::in_memory::InMemoryTorrentRepository; - assert_eq!(peers, vec![Arc::new(peer)]); - } + #[tokio::test] + async fn it_should_return_the_peers_for_a_given_torrent() { + let in_memory_torrent_repository = Arc::new(InMemoryTorrentRepository::default()); - #[tokio::test] - async fn it_should_return_the_peers_for_a_given_torrent_excluding_a_given_peer() { - let in_memory_torrent_repository = Arc::new(InMemoryTorrentRepository::default()); + let info_hash = sample_info_hash(); + let peer = sample_peer(); + + let () = in_memory_torrent_repository.upsert_peer(&info_hash, &peer); + + let peers = in_memory_torrent_repository.get_torrent_peers(&info_hash); + + assert_eq!(peers, vec![Arc::new(peer)]); + } + + #[tokio::test] + async fn it_should_return_an_empty_list_or_peers_for_a_non_existing_torrent() { + let in_memory_torrent_repository = Arc::new(InMemoryTorrentRepository::default()); + + let peers = in_memory_torrent_repository.get_torrent_peers(&sample_info_hash()); + + assert!(peers.is_empty()); + } + + #[tokio::test] + async fn it_should_return_74_peers_at_the_most_for_a_given_torrent() { + let in_memory_torrent_repository = Arc::new(InMemoryTorrentRepository::default()); + + let info_hash = sample_info_hash(); + + for idx in 1..=75 { + let peer = Peer { + peer_id: numeric_peer_id(idx), + peer_addr: SocketAddr::new(IpAddr::V4(Ipv4Addr::new(126, 0, 0, idx.try_into().unwrap())), 8080), + updated: DurationSinceUnixEpoch::new(1_669_397_478_934, 0), + uploaded: NumberOfBytes::new(0), + downloaded: NumberOfBytes::new(0), + left: NumberOfBytes::new(0), // No bytes left to download + event: AnnounceEvent::Completed, + }; + + let () = in_memory_torrent_repository.upsert_peer(&info_hash, &peer); + } + + let peers = in_memory_torrent_repository.get_torrent_peers(&info_hash); + + assert_eq!(peers.len(), 74); + } - let info_hash = sample_info_hash(); - let peer = sample_peer(); + mod excluding_the_client_peer { - let () = in_memory_torrent_repository.upsert_peer(&info_hash, &peer); + use std::net::{IpAddr, Ipv4Addr, SocketAddr}; + use std::sync::Arc; - let peers = in_memory_torrent_repository.get_peers_for(&info_hash, &peer, TORRENT_PEERS_LIMIT); + use aquatic_udp_protocol::{AnnounceEvent, NumberOfBytes}; + use torrust_tracker_configuration::TORRENT_PEERS_LIMIT; + use torrust_tracker_primitives::peer::Peer; + use torrust_tracker_primitives::DurationSinceUnixEpoch; - assert_eq!(peers, vec![]); + use crate::core_tests::{sample_info_hash, sample_peer}; + use crate::torrent::repository::in_memory::tests::the_in_memory_torrent_repository::numeric_peer_id; + use crate::torrent::repository::in_memory::InMemoryTorrentRepository; + + #[tokio::test] + async fn it_should_return_an_empty_peer_list_for_a_non_existing_torrent() { + let in_memory_torrent_repository = Arc::new(InMemoryTorrentRepository::default()); + + let peers = + in_memory_torrent_repository.get_peers_for(&sample_info_hash(), &sample_peer(), TORRENT_PEERS_LIMIT); + + assert_eq!(peers, vec![]); + } + + #[tokio::test] + async fn it_should_return_the_peers_for_a_given_torrent_excluding_a_given_peer() { + let in_memory_torrent_repository = Arc::new(InMemoryTorrentRepository::default()); + + let info_hash = sample_info_hash(); + let peer = sample_peer(); + + let () = in_memory_torrent_repository.upsert_peer(&info_hash, &peer); + + let peers = in_memory_torrent_repository.get_peers_for(&info_hash, &peer, TORRENT_PEERS_LIMIT); + + assert_eq!(peers, vec![]); + } + + #[tokio::test] + async fn it_should_return_74_peers_at_the_most_for_a_given_torrent_when_it_filters_out_a_given_peer() { + let in_memory_torrent_repository = Arc::new(InMemoryTorrentRepository::default()); + + let info_hash = sample_info_hash(); + + let excluded_peer = sample_peer(); + + let () = in_memory_torrent_repository.upsert_peer(&info_hash, &excluded_peer); + + // Add 74 peers + for idx in 2..=75 { + let peer = Peer { + peer_id: numeric_peer_id(idx), + peer_addr: SocketAddr::new(IpAddr::V4(Ipv4Addr::new(126, 0, 0, idx.try_into().unwrap())), 8080), + updated: DurationSinceUnixEpoch::new(1_669_397_478_934, 0), + uploaded: NumberOfBytes::new(0), + downloaded: NumberOfBytes::new(0), + left: NumberOfBytes::new(0), // No bytes left to download + event: AnnounceEvent::Completed, + }; + + let () = in_memory_torrent_repository.upsert_peer(&info_hash, &peer); + } + + let peers = in_memory_torrent_repository.get_peers_for(&info_hash, &excluded_peer, TORRENT_PEERS_LIMIT); + + assert_eq!(peers.len(), 74); + } + } } - #[tokio::test] - async fn it_should_return_74_peers_at_the_most_for_a_given_torrent_when_it_filters_out_a_given_peer() { - let in_memory_torrent_repository = Arc::new(InMemoryTorrentRepository::default()); + mod maintaining_the_torrent_entries { - let info_hash = sample_info_hash(); + use std::ops::Add; + use std::sync::Arc; + use std::time::Duration; - let excluded_peer = sample_peer(); + use bittorrent_primitives::info_hash::InfoHash; + use torrust_tracker_configuration::TrackerPolicy; + use torrust_tracker_primitives::DurationSinceUnixEpoch; - let () = in_memory_torrent_repository.upsert_peer(&info_hash, &excluded_peer); + use crate::core_tests::{sample_info_hash, sample_peer}; + use crate::torrent::repository::in_memory::InMemoryTorrentRepository; - // Add 74 peers - for idx in 2..=75 { - let peer = Peer { - peer_id: numeric_peer_id(idx), - peer_addr: SocketAddr::new(IpAddr::V4(Ipv4Addr::new(126, 0, 0, idx.try_into().unwrap())), 8080), - updated: DurationSinceUnixEpoch::new(1_669_397_478_934, 0), - uploaded: NumberOfBytes::new(0), - downloaded: NumberOfBytes::new(0), - left: NumberOfBytes::new(0), // No bytes left to download - event: AnnounceEvent::Completed, - }; + #[tokio::test] + async fn it_should_remove_a_torrent_entry() { + let in_memory_torrent_repository = Arc::new(InMemoryTorrentRepository::default()); + + let info_hash = sample_info_hash(); + let () = in_memory_torrent_repository.upsert_peer(&info_hash, &sample_peer()); + + let _unused = in_memory_torrent_repository.remove(&info_hash); + + assert!(in_memory_torrent_repository.get(&info_hash).is_none()); + } + + #[tokio::test] + async fn it_should_remove_peers_that_have_not_been_updated_after_a_cutoff_time() { + let in_memory_torrent_repository = Arc::new(InMemoryTorrentRepository::default()); + + let info_hash = sample_info_hash(); + let mut peer = sample_peer(); + peer.updated = DurationSinceUnixEpoch::new(0, 0); let () = in_memory_torrent_repository.upsert_peer(&info_hash, &peer); + + // Cut off time is 1 second after the peer was updated + in_memory_torrent_repository.remove_inactive_peers(peer.updated.add(Duration::from_secs(1))); + + assert!(!in_memory_torrent_repository + .get_torrent_peers(&info_hash) + .contains(&Arc::new(peer))); + } + + fn initialize_repository_with_one_torrent_without_peers(info_hash: &InfoHash) -> Arc { + let in_memory_torrent_repository = Arc::new(InMemoryTorrentRepository::default()); + + // Insert a sample peer for the torrent to force adding the torrent entry + let mut peer = sample_peer(); + peer.updated = DurationSinceUnixEpoch::new(0, 0); + let () = in_memory_torrent_repository.upsert_peer(info_hash, &peer); + + // Remove the peer + in_memory_torrent_repository.remove_inactive_peers(peer.updated.add(Duration::from_secs(1))); + + in_memory_torrent_repository } - let peers = in_memory_torrent_repository.get_peers_for(&info_hash, &excluded_peer, TORRENT_PEERS_LIMIT); + #[tokio::test] + async fn it_should_remove_torrents_without_peers() { + let info_hash = sample_info_hash(); - assert_eq!(peers.len(), 74); + let in_memory_torrent_repository = initialize_repository_with_one_torrent_without_peers(&info_hash); + + let tracker_policy = TrackerPolicy { + remove_peerless_torrents: true, + ..Default::default() + }; + + in_memory_torrent_repository.remove_peerless_torrents(&tracker_policy); + + assert!(in_memory_torrent_repository.get(&info_hash).is_none()); + } } - } + mod returning_torrent_entries { + + use std::sync::Arc; + + use torrust_tracker_primitives::peer::Peer; + use torrust_tracker_primitives::swarm_metadata::SwarmMetadata; + use torrust_tracker_torrent_repository::entry::EntrySync; + + use crate::core_tests::{sample_info_hash, sample_peer}; + use crate::torrent::repository::in_memory::InMemoryTorrentRepository; + use crate::torrent::TorrentEntry; + + /// `TorrentEntry` data is not directly accessible. It's only + /// accessible through the trait methods. We need this temporary + /// DTO to write simple and more readable assertions. + #[derive(Debug, Clone, PartialEq)] + struct TorrentEntryInfo { + swarm_metadata: SwarmMetadata, + peers: Vec, + number_of_peers: usize, + } - mod returning_torrent_entries { - // Methods: - // - get - // - get_paginated - } + #[allow(clippy::from_over_into)] + impl Into for TorrentEntry { + fn into(self) -> TorrentEntryInfo { + TorrentEntryInfo { + swarm_metadata: self.get_swarm_metadata(), + peers: self.get_peers(None).iter().map(|peer| *peer.clone()).collect(), + number_of_peers: self.get_peers_len(), + } + } + } + + #[tokio::test] + async fn it_should_return_one_torrent_entry_by_infohash() { + let in_memory_torrent_repository = Arc::new(InMemoryTorrentRepository::default()); + + let info_hash = sample_info_hash(); + let peer = sample_peer(); + + let () = in_memory_torrent_repository.upsert_peer(&info_hash, &peer); + + let torrent_entry = in_memory_torrent_repository.get(&info_hash).unwrap(); + + assert_eq!( + TorrentEntryInfo { + swarm_metadata: SwarmMetadata { + downloaded: 0, + complete: 1, + incomplete: 0 + }, + peers: vec!(peer), + number_of_peers: 1 + }, + torrent_entry.into() + ); + } + + mod it_should_return_many_torrent_entries { + use std::sync::Arc; - mod returning_torrent_metrics { - // Methods: - // - get_torrents_metrics + use torrust_tracker_primitives::swarm_metadata::SwarmMetadata; - use std::sync::Arc; + use crate::core_tests::{sample_info_hash, sample_peer}; + use crate::torrent::repository::in_memory::tests::the_in_memory_torrent_repository::returning_torrent_entries::TorrentEntryInfo; + use crate::torrent::repository::in_memory::InMemoryTorrentRepository; - use bittorrent_primitives::info_hash::fixture::gen_seeded_infohash; - use torrust_tracker_primitives::torrent_metrics::TorrentsMetrics; + #[tokio::test] + async fn without_pagination() { + let in_memory_torrent_repository = Arc::new(InMemoryTorrentRepository::default()); - use crate::core_tests::{leecher, sample_info_hash}; - use crate::torrent::repository::in_memory::InMemoryTorrentRepository; + let info_hash = sample_info_hash(); + let peer = sample_peer(); + let () = in_memory_torrent_repository.upsert_peer(&info_hash, &peer); - #[tokio::test] - async fn it_should_collect_torrent_metrics() { - let in_memory_torrent_repository = Arc::new(InMemoryTorrentRepository::default()); + let torrent_entries = in_memory_torrent_repository.get_paginated(None); - let torrents_metrics = in_memory_torrent_repository.get_torrents_metrics(); + assert_eq!(torrent_entries.len(), 1); - assert_eq!( - torrents_metrics, - TorrentsMetrics { - complete: 0, - downloaded: 0, - incomplete: 0, - torrents: 0 + let torrent_entry = torrent_entries.first().unwrap().1.clone(); + + assert_eq!( + TorrentEntryInfo { + swarm_metadata: SwarmMetadata { + downloaded: 0, + complete: 1, + incomplete: 0 + }, + peers: vec!(peer), + number_of_peers: 1 + }, + torrent_entry.into() + ); + } + + mod with_pagination { + use std::sync::Arc; + + use torrust_tracker_primitives::pagination::Pagination; + use torrust_tracker_primitives::swarm_metadata::SwarmMetadata; + + use crate::core_tests::{ + sample_info_hash_alphabetically_ordered_after_sample_info_hash_one, sample_info_hash_one, + sample_peer_one, sample_peer_two, + }; + use crate::torrent::repository::in_memory::tests::the_in_memory_torrent_repository::returning_torrent_entries::TorrentEntryInfo; + use crate::torrent::repository::in_memory::InMemoryTorrentRepository; + + #[tokio::test] + async fn it_should_return_the_first_page() { + let in_memory_torrent_repository = Arc::new(InMemoryTorrentRepository::default()); + + // Insert one torrent entry + let info_hash_one = sample_info_hash_one(); + let peer_one = sample_peer_one(); + let () = in_memory_torrent_repository.upsert_peer(&info_hash_one, &peer_one); + + // Insert another torrent entry + let info_hash_one = sample_info_hash_alphabetically_ordered_after_sample_info_hash_one(); + let peer_two = sample_peer_two(); + let () = in_memory_torrent_repository.upsert_peer(&info_hash_one, &peer_two); + + // Get only the first page where page size is 1 + let torrent_entries = + in_memory_torrent_repository.get_paginated(Some(&Pagination { offset: 0, limit: 1 })); + + assert_eq!(torrent_entries.len(), 1); + + let torrent_entry = torrent_entries.first().unwrap().1.clone(); + + assert_eq!( + TorrentEntryInfo { + swarm_metadata: SwarmMetadata { + downloaded: 0, + complete: 1, + incomplete: 0 + }, + peers: vec!(peer_one), + number_of_peers: 1 + }, + torrent_entry.into() + ); + } + + #[tokio::test] + async fn it_should_return_the_second_page() { + let in_memory_torrent_repository = Arc::new(InMemoryTorrentRepository::default()); + + // Insert one torrent entry + let info_hash_one = sample_info_hash_one(); + let peer_one = sample_peer_one(); + let () = in_memory_torrent_repository.upsert_peer(&info_hash_one, &peer_one); + + // Insert another torrent entry + let info_hash_one = sample_info_hash_alphabetically_ordered_after_sample_info_hash_one(); + let peer_two = sample_peer_two(); + let () = in_memory_torrent_repository.upsert_peer(&info_hash_one, &peer_two); + + // Get only the first page where page size is 1 + let torrent_entries = + in_memory_torrent_repository.get_paginated(Some(&Pagination { offset: 1, limit: 1 })); + + assert_eq!(torrent_entries.len(), 1); + + let torrent_entry = torrent_entries.first().unwrap().1.clone(); + + assert_eq!( + TorrentEntryInfo { + swarm_metadata: SwarmMetadata { + downloaded: 0, + complete: 1, + incomplete: 0 + }, + peers: vec!(peer_two), + number_of_peers: 1 + }, + torrent_entry.into() + ); + } + + #[tokio::test] + async fn it_should_allow_changing_the_page_size() { + let in_memory_torrent_repository = Arc::new(InMemoryTorrentRepository::default()); + + // Insert one torrent entry + let info_hash_one = sample_info_hash_one(); + let peer_one = sample_peer_one(); + let () = in_memory_torrent_repository.upsert_peer(&info_hash_one, &peer_one); + + // Insert another torrent entry + let info_hash_one = sample_info_hash_alphabetically_ordered_after_sample_info_hash_one(); + let peer_two = sample_peer_two(); + let () = in_memory_torrent_repository.upsert_peer(&info_hash_one, &peer_two); + + // Get only the first page where page size is 1 + let torrent_entries = + in_memory_torrent_repository.get_paginated(Some(&Pagination { offset: 1, limit: 1 })); + + assert_eq!(torrent_entries.len(), 1); + } } - ); + } } - #[tokio::test] - async fn it_should_return_the_torrent_metrics() { - let in_memory_torrent_repository = Arc::new(InMemoryTorrentRepository::default()); + mod returning_torrent_metrics { + + use std::sync::Arc; + + use bittorrent_primitives::info_hash::fixture::gen_seeded_infohash; + use torrust_tracker_primitives::torrent_metrics::TorrentsMetrics; + + use crate::core_tests::{complete_peer, leecher, sample_info_hash, seeder}; + use crate::torrent::repository::in_memory::InMemoryTorrentRepository; + + // todo: refactor to use test parametrization + + #[tokio::test] + async fn it_should_get_empty_torrent_metrics_when_there_are_no_torrents() { + let in_memory_torrent_repository = Arc::new(InMemoryTorrentRepository::default()); + + let torrents_metrics = in_memory_torrent_repository.get_torrents_metrics(); + + assert_eq!( + torrents_metrics, + TorrentsMetrics { + complete: 0, + downloaded: 0, + incomplete: 0, + torrents: 0 + } + ); + } + + #[tokio::test] + async fn it_should_return_the_torrent_metrics_when_there_is_a_leecher() { + let in_memory_torrent_repository = Arc::new(InMemoryTorrentRepository::default()); + + let () = in_memory_torrent_repository.upsert_peer(&sample_info_hash(), &leecher()); + + let torrent_metrics = in_memory_torrent_repository.get_torrents_metrics(); + + assert_eq!( + torrent_metrics, + TorrentsMetrics { + complete: 0, + downloaded: 0, + incomplete: 1, + torrents: 1, + } + ); + } + + #[tokio::test] + async fn it_should_return_the_torrent_metrics_when_there_is_a_seeder() { + let in_memory_torrent_repository = Arc::new(InMemoryTorrentRepository::default()); + + let () = in_memory_torrent_repository.upsert_peer(&sample_info_hash(), &seeder()); + + let torrent_metrics = in_memory_torrent_repository.get_torrents_metrics(); + + assert_eq!( + torrent_metrics, + TorrentsMetrics { + complete: 1, + downloaded: 0, + incomplete: 0, + torrents: 1, + } + ); + } - let () = in_memory_torrent_repository.upsert_peer(&sample_info_hash(), &leecher()); + #[tokio::test] + async fn it_should_return_the_torrent_metrics_when_there_is_a_completed_peer() { + let in_memory_torrent_repository = Arc::new(InMemoryTorrentRepository::default()); + + let () = in_memory_torrent_repository.upsert_peer(&sample_info_hash(), &complete_peer()); + + let torrent_metrics = in_memory_torrent_repository.get_torrents_metrics(); + + assert_eq!( + torrent_metrics, + TorrentsMetrics { + complete: 1, + downloaded: 0, + incomplete: 0, + torrents: 1, + } + ); + } - let torrent_metrics = in_memory_torrent_repository.get_torrents_metrics(); + #[tokio::test] + async fn it_should_return_the_torrent_metrics_when_there_are_multiple_torrents() { + let in_memory_torrent_repository = Arc::new(InMemoryTorrentRepository::default()); - assert_eq!( - torrent_metrics, - TorrentsMetrics { - complete: 0, - downloaded: 0, - incomplete: 1, - torrents: 1, + let start_time = std::time::Instant::now(); + for i in 0..1_000_000 { + let () = in_memory_torrent_repository.upsert_peer(&gen_seeded_infohash(&i), &leecher()); } - ); + let result_a = start_time.elapsed(); + + let start_time = std::time::Instant::now(); + let torrent_metrics = in_memory_torrent_repository.get_torrents_metrics(); + let result_b = start_time.elapsed(); + + assert_eq!( + (torrent_metrics), + (TorrentsMetrics { + complete: 0, + downloaded: 0, + incomplete: 1_000_000, + torrents: 1_000_000, + }), + "{result_a:?} {result_b:?}" + ); + } } - #[tokio::test] - async fn it_should_get_many_the_torrent_metrics() { - let in_memory_torrent_repository = Arc::new(InMemoryTorrentRepository::default()); + mod returning_swarm_metadata { + + use std::sync::Arc; + + use torrust_tracker_primitives::swarm_metadata::SwarmMetadata; + + use crate::core_tests::{leecher, sample_info_hash}; + use crate::torrent::repository::in_memory::InMemoryTorrentRepository; + + #[tokio::test] + async fn it_should_get_swarm_metadata_for_an_existing_torrent() { + let in_memory_torrent_repository = Arc::new(InMemoryTorrentRepository::default()); + + let infohash = sample_info_hash(); + + let () = in_memory_torrent_repository.upsert_peer(&infohash, &leecher()); + + let swarm_metadata = in_memory_torrent_repository.get_swarm_metadata(&infohash); - let start_time = std::time::Instant::now(); - for i in 0..1_000_000 { - let () = in_memory_torrent_repository.upsert_peer(&gen_seeded_infohash(&i), &leecher()); + assert_eq!( + swarm_metadata, + SwarmMetadata { + complete: 0, + downloaded: 0, + incomplete: 1, + } + ); + } + + #[tokio::test] + async fn it_should_return_zeroed_swarm_metadata_for_a_non_existing_torrent() { + let in_memory_torrent_repository = Arc::new(InMemoryTorrentRepository::default()); + + let swarm_metadata = in_memory_torrent_repository.get_swarm_metadata(&sample_info_hash()); + + assert_eq!(swarm_metadata, SwarmMetadata::zeroed()); } - let result_a = start_time.elapsed(); - - let start_time = std::time::Instant::now(); - let torrent_metrics = in_memory_torrent_repository.get_torrents_metrics(); - let result_b = start_time.elapsed(); - - assert_eq!( - (torrent_metrics), - (TorrentsMetrics { - complete: 0, - downloaded: 0, - incomplete: 1_000_000, - torrents: 1_000_000, - }), - "{result_a:?} {result_b:?}" - ); } - } - mod returning_swarm_metadata { - // Methods: - // - get_swarm_metadata - } + mod handling_persistence { + + use std::sync::Arc; + + use torrust_tracker_primitives::PersistentTorrents; + + use crate::core_tests::sample_info_hash; + use crate::torrent::repository::in_memory::InMemoryTorrentRepository; - mod handling_persistence { - // Methods: - // - import_persistent + #[tokio::test] + async fn it_should_allow_importing_persisted_torrent_entries() { + let in_memory_torrent_repository = Arc::new(InMemoryTorrentRepository::default()); + + let infohash = sample_info_hash(); + + let mut persistent_torrents = PersistentTorrents::default(); + + persistent_torrents.insert(infohash, 1); + + in_memory_torrent_repository.import_persistent(&persistent_torrents); + + let swarm_metadata = in_memory_torrent_repository.get_swarm_metadata(&infohash); + + // Only the number of downloads is persisted. + assert_eq!(swarm_metadata.downloaded, 1); + } + } } } From 6a15e069c11dc13650250f3384d18cd4369554c7 Mon Sep 17 00:00:00 2001 From: Jose Celano Date: Fri, 7 Feb 2025 08:02:29 +0000 Subject: [PATCH 227/802] test: [#1240] add tests for DatabasePersistentTorrentRepository --- packages/primitives/src/lib.rs | 3 +- packages/tracker-core/src/core_tests.rs | 16 ++++++- .../src/torrent/repository/persisted.rs | 48 +++++++++++++++++++ 3 files changed, 65 insertions(+), 2 deletions(-) diff --git a/packages/primitives/src/lib.rs b/packages/primitives/src/lib.rs index 55f90ef20..ec9732778 100644 --- a/packages/primitives/src/lib.rs +++ b/packages/primitives/src/lib.rs @@ -18,4 +18,5 @@ use bittorrent_primitives::info_hash::InfoHash; /// Duration since the Unix Epoch. pub type DurationSinceUnixEpoch = Duration; -pub type PersistentTorrents = BTreeMap; +pub type PersistentTorrent = u32; +pub type PersistentTorrents = BTreeMap; diff --git a/packages/tracker-core/src/core_tests.rs b/packages/tracker-core/src/core_tests.rs index 873c5f0ae..53049f326 100644 --- a/packages/tracker-core/src/core_tests.rs +++ b/packages/tracker-core/src/core_tests.rs @@ -174,7 +174,21 @@ pub fn initialize_handlers(config: &Configuration) -> (Arc, Arc /// # Panics /// -/// Will panic if the temporary file path is not a valid UFT string. +/// Will panic if the temporary database file path is not a valid UFT string. +#[cfg(test)] +#[must_use] +pub fn ephemeral_configuration() -> Core { + let mut config = Core::default(); + + let temp_file = ephemeral_sqlite_database(); + temp_file.to_str().unwrap().clone_into(&mut config.database.path); + + config +} + +/// # Panics +/// +/// Will panic if the temporary database file path is not a valid UFT string. #[cfg(test)] #[must_use] pub fn ephemeral_configuration_for_listed_tracker() -> Core { diff --git a/packages/tracker-core/src/torrent/repository/persisted.rs b/packages/tracker-core/src/torrent/repository/persisted.rs index 77a9c23eb..224919d0e 100644 --- a/packages/tracker-core/src/torrent/repository/persisted.rs +++ b/packages/tracker-core/src/torrent/repository/persisted.rs @@ -42,3 +42,51 @@ impl DatabasePersistentTorrentRepository { self.database.save_persistent_torrent(info_hash, downloaded) } } + +#[cfg(test)] +mod tests { + + use torrust_tracker_primitives::PersistentTorrents; + + use super::DatabasePersistentTorrentRepository; + use crate::core_tests::{ephemeral_configuration, sample_info_hash, sample_info_hash_one, sample_info_hash_two}; + use crate::databases::setup::initialize_database; + + fn initialize_db_persistent_torrent_repository() -> DatabasePersistentTorrentRepository { + let config = ephemeral_configuration(); + let database = initialize_database(&config); + DatabasePersistentTorrentRepository::new(&database) + } + + #[test] + fn it_saves_the_numbers_of_downloads_for_a_torrent_into_the_database() { + let repository = initialize_db_persistent_torrent_repository(); + + let infohash = sample_info_hash(); + + repository.save(&infohash, 1).unwrap(); + + let torrents = repository.load_all().unwrap(); + + assert_eq!(torrents.get(&infohash), Some(1).as_ref()); + } + + #[test] + fn it_loads_the_numbers_of_downloads_for_all_torrents_from_the_database() { + let repository = initialize_db_persistent_torrent_repository(); + + let infohash_one = sample_info_hash_one(); + let infohash_two = sample_info_hash_two(); + + repository.save(&infohash_one, 1).unwrap(); + repository.save(&infohash_two, 2).unwrap(); + + let torrents = repository.load_all().unwrap(); + + let mut expected_torrents = PersistentTorrents::new(); + expected_torrents.insert(infohash_one, 1); + expected_torrents.insert(infohash_two, 2); + + assert_eq!(torrents, expected_torrents); + } +} From 0d0c601f08be8b3b4415771d32fca94d49dd5d38 Mon Sep 17 00:00:00 2001 From: Jose Celano Date: Fri, 7 Feb 2025 08:13:51 +0000 Subject: [PATCH 228/802] test: [#1240] add tests for TorrentsManager --- packages/tracker-core/src/torrent/manager.rs | 148 +++++++++++++++++++ 1 file changed, 148 insertions(+) diff --git a/packages/tracker-core/src/torrent/manager.rs b/packages/tracker-core/src/torrent/manager.rs index 4199e9944..778ac6d92 100644 --- a/packages/tracker-core/src/torrent/manager.rs +++ b/packages/tracker-core/src/torrent/manager.rs @@ -61,3 +61,151 @@ impl TorrentsManager { } } } + +#[cfg(test)] +mod tests { + + use std::sync::Arc; + + use torrust_tracker_configuration::Core; + use torrust_tracker_torrent_repository::entry::EntrySync; + + use super::{DatabasePersistentTorrentRepository, TorrentsManager}; + use crate::core_tests::{ephemeral_configuration, sample_info_hash}; + use crate::databases::setup::initialize_database; + use crate::torrent::repository::in_memory::InMemoryTorrentRepository; + + struct TorrentsManagerDeps { + config: Arc, + in_memory_torrent_repository: Arc, + database_persistent_torrent_repository: Arc, + } + + fn initialize_torrents_manager() -> (Arc, Arc) { + let config = ephemeral_configuration(); + initialize_torrents_manager_with(config.clone()) + } + + fn initialize_torrents_manager_with(config: Core) -> (Arc, Arc) { + let in_memory_torrent_repository = Arc::new(InMemoryTorrentRepository::default()); + let database = initialize_database(&config); + let database_persistent_torrent_repository = Arc::new(DatabasePersistentTorrentRepository::new(&database)); + + let torrents_manager = Arc::new(TorrentsManager::new( + &config, + &in_memory_torrent_repository, + &database_persistent_torrent_repository, + )); + + ( + torrents_manager, + Arc::new(TorrentsManagerDeps { + config: Arc::new(config), + in_memory_torrent_repository, + database_persistent_torrent_repository, + }), + ) + } + + #[test] + fn it_should_load_the_numbers_of_downloads_for_all_torrents_from_the_database() { + let (torrents_manager, services) = initialize_torrents_manager(); + + let infohash = sample_info_hash(); + + services.database_persistent_torrent_repository.save(&infohash, 1).unwrap(); + + torrents_manager.load_torrents_from_database().unwrap(); + + assert_eq!( + services + .in_memory_torrent_repository + .get(&infohash) + .unwrap() + .get_swarm_metadata() + .downloaded, + 1 + ); + } + + mod cleaning_torrents { + use std::ops::Add; + use std::sync::Arc; + use std::time::Duration; + + use bittorrent_primitives::info_hash::InfoHash; + use torrust_tracker_clock::clock::stopped::Stopped; + use torrust_tracker_clock::clock::{self}; + use torrust_tracker_primitives::DurationSinceUnixEpoch; + + use crate::core_tests::{ephemeral_configuration, sample_info_hash, sample_peer}; + use crate::torrent::manager::tests::{initialize_torrents_manager, initialize_torrents_manager_with}; + use crate::torrent::repository::in_memory::InMemoryTorrentRepository; + + #[test] + fn it_should_remove_peers_that_have_not_been_updated_after_a_cutoff_time() { + let (torrents_manager, services) = initialize_torrents_manager(); + + let infohash = sample_info_hash(); + + clock::Stopped::local_set(&Duration::from_secs(0)); + + // Add a peer to the torrent + let mut peer = sample_peer(); + peer.updated = DurationSinceUnixEpoch::new(0, 0); + let () = services.in_memory_torrent_repository.upsert_peer(&infohash, &peer); + + // Simulate the time has passed 1 second more than the max peer timeout. + clock::Stopped::local_add(&Duration::from_secs( + (services.config.tracker_policy.max_peer_timeout + 1).into(), + )) + .unwrap(); + + torrents_manager.cleanup_torrents(); + + assert!(services.in_memory_torrent_repository.get(&infohash).is_none()); + } + + fn add_a_peerless_torrent(infohash: &InfoHash, in_memory_torrent_repository: &Arc) { + // Add a peer to the torrent + let mut peer = sample_peer(); + peer.updated = DurationSinceUnixEpoch::new(0, 0); + let () = in_memory_torrent_repository.upsert_peer(infohash, &peer); + + // Remove the peer. The torrent is now peerless. + in_memory_torrent_repository.remove_inactive_peers(peer.updated.add(Duration::from_secs(1))); + } + + #[test] + fn it_should_remove_torrents_that_have_no_peers_when_it_is_configured_to_do_so() { + let mut config = ephemeral_configuration(); + config.tracker_policy.remove_peerless_torrents = true; + + let (torrents_manager, services) = initialize_torrents_manager_with(config); + + let infohash = sample_info_hash(); + + add_a_peerless_torrent(&infohash, &services.in_memory_torrent_repository); + + torrents_manager.cleanup_torrents(); + + assert!(services.in_memory_torrent_repository.get(&infohash).is_none()); + } + + #[test] + fn it_should_retain_peerless_torrents_when_it_is_configured_to_do_so() { + let mut config = ephemeral_configuration(); + config.tracker_policy.remove_peerless_torrents = false; + + let (torrents_manager, services) = initialize_torrents_manager_with(config); + + let infohash = sample_info_hash(); + + add_a_peerless_torrent(&infohash, &services.in_memory_torrent_repository); + + torrents_manager.cleanup_torrents(); + + assert!(services.in_memory_torrent_repository.get(&infohash).is_some()); + } + } +} From ef879541a34f450cdda5d448c0aad6b743eb49ab Mon Sep 17 00:00:00 2001 From: Jose Celano Date: Fri, 7 Feb 2025 08:50:05 +0000 Subject: [PATCH 229/802] refactor: minor changes --- packages/tracker-core/src/torrent/services.rs | 20 +++++++++---------- 1 file changed, 10 insertions(+), 10 deletions(-) diff --git a/packages/tracker-core/src/torrent/services.rs b/packages/tracker-core/src/torrent/services.rs index 2275f20d0..ea3966f6d 100644 --- a/packages/tracker-core/src/torrent/services.rs +++ b/packages/tracker-core/src/torrent/services.rs @@ -137,7 +137,7 @@ mod tests { use crate::torrent::services::{get_torrent_info, Info}; #[tokio::test] - async fn should_return_none_if_the_tracker_does_not_have_the_torrent() { + async fn it_should_return_none_if_the_tracker_does_not_have_the_torrent() { let in_memory_torrent_repository = Arc::new(InMemoryTorrentRepository::default()); let torrent_info = get_torrent_info( @@ -149,7 +149,7 @@ mod tests { } #[tokio::test] - async fn should_return_the_torrent_info_if_the_tracker_has_the_torrent() { + async fn it_should_return_the_torrent_info_if_the_tracker_has_the_torrent() { let in_memory_torrent_repository = Arc::new(InMemoryTorrentRepository::default()); let hash = "9e0217d0fa71c87332cd8bf9dbeabcb2c2cf3c4d".to_owned(); // DevSkim: ignore DS173237 @@ -183,7 +183,7 @@ mod tests { use crate::torrent::services::{get_torrents_page, BasicInfo, Pagination}; #[tokio::test] - async fn should_return_an_empty_result_if_the_tracker_does_not_have_any_torrent() { + async fn it_should_return_an_empty_result_if_the_tracker_does_not_have_any_torrent() { let in_memory_torrent_repository = Arc::new(InMemoryTorrentRepository::default()); let torrents = get_torrents_page(&in_memory_torrent_repository, Some(&Pagination::default())); @@ -192,7 +192,7 @@ mod tests { } #[tokio::test] - async fn should_return_a_summarized_info_for_all_torrents() { + async fn it_should_return_a_summarized_info_for_all_torrents() { let in_memory_torrent_repository = Arc::new(InMemoryTorrentRepository::default()); let hash = "9e0217d0fa71c87332cd8bf9dbeabcb2c2cf3c4d".to_owned(); // DevSkim: ignore DS173237 @@ -214,13 +214,13 @@ mod tests { } #[tokio::test] - async fn should_allow_limiting_the_number_of_torrents_in_the_result() { + async fn it_should_allow_limiting_the_number_of_torrents_in_the_result() { let in_memory_torrent_repository = Arc::new(InMemoryTorrentRepository::default()); let hash1 = "9e0217d0fa71c87332cd8bf9dbeabcb2c2cf3c4d".to_owned(); // DevSkim: ignore DS173237 let info_hash1 = InfoHash::from_str(&hash1).unwrap(); - let hash2 = "03840548643af2a7b63a9f5cbca348bc7150ca3a".to_owned(); + let hash2 = "03840548643af2a7b63a9f5cbca348bc7150ca3a".to_owned(); // DevSkim: ignore DS173237 let info_hash2 = InfoHash::from_str(&hash2).unwrap(); let () = in_memory_torrent_repository.upsert_peer(&info_hash1, &sample_peer()); @@ -235,13 +235,13 @@ mod tests { } #[tokio::test] - async fn should_allow_using_pagination_in_the_result() { + async fn it_should_allow_using_pagination_in_the_result() { let in_memory_torrent_repository = Arc::new(InMemoryTorrentRepository::default()); let hash1 = "9e0217d0fa71c87332cd8bf9dbeabcb2c2cf3c4d".to_owned(); // DevSkim: ignore DS173237 let info_hash1 = InfoHash::from_str(&hash1).unwrap(); - let hash2 = "03840548643af2a7b63a9f5cbca348bc7150ca3a".to_owned(); + let hash2 = "03840548643af2a7b63a9f5cbca348bc7150ca3a".to_owned(); // DevSkim: ignore DS173237 let info_hash2 = InfoHash::from_str(&hash2).unwrap(); let () = in_memory_torrent_repository.upsert_peer(&info_hash1, &sample_peer()); @@ -265,14 +265,14 @@ mod tests { } #[tokio::test] - async fn should_return_torrents_ordered_by_info_hash() { + async fn it_should_return_torrents_ordered_by_info_hash() { let in_memory_torrent_repository = Arc::new(InMemoryTorrentRepository::default()); let hash1 = "9e0217d0fa71c87332cd8bf9dbeabcb2c2cf3c4d".to_owned(); // DevSkim: ignore DS173237 let info_hash1 = InfoHash::from_str(&hash1).unwrap(); let () = in_memory_torrent_repository.upsert_peer(&info_hash1, &sample_peer()); - let hash2 = "03840548643af2a7b63a9f5cbca348bc7150ca3a".to_owned(); + let hash2 = "03840548643af2a7b63a9f5cbca348bc7150ca3a".to_owned(); // DevSkim: ignore DS173237 let info_hash2 = InfoHash::from_str(&hash2).unwrap(); let () = in_memory_torrent_repository.upsert_peer(&info_hash2, &sample_peer()); From 21c865dca26b96e594a8fb1fade624bc1a58c600 Mon Sep 17 00:00:00 2001 From: Jose Celano Date: Fri, 7 Feb 2025 09:11:20 +0000 Subject: [PATCH 230/802] test: [#1240] add tests for bittorrent_tracker_core::torrent::services --- packages/tracker-core/src/torrent/services.rs | 40 +++++++++++++++++++ 1 file changed, 40 insertions(+) diff --git a/packages/tracker-core/src/torrent/services.rs b/packages/tracker-core/src/torrent/services.rs index ea3966f6d..c36190ed1 100644 --- a/packages/tracker-core/src/torrent/services.rs +++ b/packages/tracker-core/src/torrent/services.rs @@ -297,4 +297,44 @@ mod tests { ); } } + + mod getting_basic_torrent_info_for_multiple_torrents_at_once { + + use std::sync::Arc; + + use crate::core_tests::sample_info_hash; + use crate::torrent::repository::in_memory::InMemoryTorrentRepository; + use crate::torrent::services::tests::sample_peer; + use crate::torrent::services::{get_torrents, BasicInfo}; + + #[tokio::test] + async fn it_should_return_an_empty_list_if_none_of_the_requested_torrents_is_found() { + let in_memory_torrent_repository = Arc::new(InMemoryTorrentRepository::default()); + + let torrent_info = get_torrents(&in_memory_torrent_repository, &[sample_info_hash()]); + + assert!(torrent_info.is_empty()); + } + + #[tokio::test] + async fn it_should_return_a_list_with_basic_info_about_the_requested_torrents() { + let in_memory_torrent_repository = Arc::new(InMemoryTorrentRepository::default()); + + let info_hash = sample_info_hash(); + + let () = in_memory_torrent_repository.upsert_peer(&info_hash, &sample_peer()); + + let torrent_info = get_torrents(&in_memory_torrent_repository, &[info_hash]); + + assert_eq!( + torrent_info, + vec!(BasicInfo { + info_hash, + seeders: 1, + completed: 0, + leechers: 0, + }) + ); + } + } } From b2fc66331a67d59754db700968bfd76457109135 Mon Sep 17 00:00:00 2001 From: Jose Celano Date: Fri, 7 Feb 2025 10:03:30 +0000 Subject: [PATCH 231/802] test: [#1247] add tests for bittorrent_tracker_core::announce_handler --- packages/tracker-core/src/announce_handler.rs | 174 ++++++++++++++++-- 1 file changed, 160 insertions(+), 14 deletions(-) diff --git a/packages/tracker-core/src/announce_handler.rs b/packages/tracker-core/src/announce_handler.rs index aa311fe46..5dd4a0291 100644 --- a/packages/tracker-core/src/announce_handler.rs +++ b/packages/tracker-core/src/announce_handler.rs @@ -119,12 +119,7 @@ pub enum PeersWanted { impl PeersWanted { #[must_use] pub fn only(limit: u32) -> Self { - let amount: usize = match limit.try_into() { - Ok(amount) => amount, - Err(_) => TORRENT_PEERS_LIMIT, - }; - - Self::Only { amount } + limit.into() } fn limit(&self) -> usize { @@ -137,13 +132,29 @@ impl PeersWanted { impl From for PeersWanted { fn from(value: i32) -> Self { - if value > 0 { - match value.try_into() { - Ok(peers_wanted) => Self::Only { amount: peers_wanted }, - Err(_) => Self::All, - } - } else { - Self::All + if value <= 0 { + return PeersWanted::All; + } + + // This conversion is safe because `value > 0` + let amount = usize::try_from(value).unwrap(); + + PeersWanted::Only { + amount: amount.min(TORRENT_PEERS_LIMIT), + } + } +} + +impl From for PeersWanted { + fn from(value: u32) -> Self { + if value == 0 { + return PeersWanted::All; + } + + let amount = value as usize; + + PeersWanted::Only { + amount: amount.min(TORRENT_PEERS_LIMIT), } } } @@ -210,6 +221,19 @@ mod tests { } } + /// Sample peer when for tests that need more than two peer + fn sample_peer_3() -> Peer { + Peer { + peer_id: PeerId(*b"-qB00000000000000003"), + peer_addr: SocketAddr::new(IpAddr::V4(Ipv4Addr::new(126, 0, 0, 3)), 8082), + updated: DurationSinceUnixEpoch::new(1_669_397_478_934, 0), + uploaded: NumberOfBytes::new(0), + downloaded: NumberOfBytes::new(0), + left: NumberOfBytes::new(0), + event: AnnounceEvent::Completed, + } + } + mod for_all_tracker_config_modes { mod handling_an_announce_request { @@ -217,7 +241,7 @@ mod tests { use std::sync::Arc; use crate::announce_handler::tests::the_announce_handler::{ - peer_ip, public_tracker, sample_peer_1, sample_peer_2, + peer_ip, public_tracker, sample_peer_1, sample_peer_2, sample_peer_3, }; use crate::announce_handler::PeersWanted; use crate::core_tests::{sample_info_hash, sample_peer}; @@ -349,6 +373,38 @@ mod tests { assert_eq!(announce_data.peers, vec![Arc::new(previously_announced_peer)]); } + #[tokio::test] + async fn it_should_allow_peers_to_get_only_a_subset_of_the_peers_in_the_swarm() { + let (announce_handler, _scrape_handler) = public_tracker(); + + let mut previously_announced_peer_1 = sample_peer_1(); + announce_handler.announce( + &sample_info_hash(), + &mut previously_announced_peer_1, + &peer_ip(), + &PeersWanted::All, + ); + + let mut previously_announced_peer_2 = sample_peer_2(); + announce_handler.announce( + &sample_info_hash(), + &mut previously_announced_peer_2, + &peer_ip(), + &PeersWanted::All, + ); + + let mut peer = sample_peer_3(); + let announce_data = + announce_handler.announce(&sample_info_hash(), &mut peer, &peer_ip(), &PeersWanted::only(1)); + + // It should return only one peer. There is no guarantee on + // which peer will be returned. + assert!( + announce_data.peers == vec![Arc::new(previously_announced_peer_1)] + || announce_data.peers == vec![Arc::new(previously_announced_peer_2)] + ); + } + mod it_should_update_the_swarm_stats_for_the_torrent { use crate::announce_handler::tests::the_announce_handler::{peer_ip, public_tracker}; @@ -461,5 +517,95 @@ mod tests { assert!(torrent_entry.peers_is_empty()); } } + + mod should_allow_the_client_peers_to_specified_the_number_of_peers_wanted { + + use torrust_tracker_configuration::TORRENT_PEERS_LIMIT; + + use crate::announce_handler::PeersWanted; + + #[test] + fn it_should_return_the_maximin_number_of_peers_by_default() { + let peers_wanted = PeersWanted::default(); + + assert_eq!(peers_wanted.limit(), TORRENT_PEERS_LIMIT); + } + + #[test] + fn it_should_return_74_at_the_most_if_the_client_wants_them_all() { + let peers_wanted = PeersWanted::All; + + assert_eq!(peers_wanted.limit(), TORRENT_PEERS_LIMIT); + } + + #[test] + fn it_should_allow_limiting_the_peer_list() { + let peers_wanted = PeersWanted::only(10); + + assert_eq!(peers_wanted.limit(), 10); + } + + fn maximum_as_u32() -> u32 { + u32::try_from(TORRENT_PEERS_LIMIT).unwrap() + } + + fn maximum_as_i32() -> i32 { + i32::try_from(TORRENT_PEERS_LIMIT).unwrap() + } + + #[test] + fn it_should_return_the_maximum_when_wanting_more_than_the_maximum() { + let peers_wanted = PeersWanted::only(maximum_as_u32() + 1); + assert_eq!(peers_wanted.limit(), TORRENT_PEERS_LIMIT); + } + + #[test] + fn it_should_return_the_maximum_when_wanting_only_zero() { + let peers_wanted = PeersWanted::only(0); + assert_eq!(peers_wanted.limit(), TORRENT_PEERS_LIMIT); + } + + #[test] + fn it_should_convert_the_peers_wanted_number_from_i32() { + // Negative. It should return the maximum + let peers_wanted: PeersWanted = (-1i32).into(); + assert_eq!(peers_wanted.limit(), TORRENT_PEERS_LIMIT); + + // Zero. It should return the maximum + let peers_wanted: PeersWanted = 0i32.into(); + assert_eq!(peers_wanted.limit(), TORRENT_PEERS_LIMIT); + + // Greater than the maximum. It should return the maximum + let peers_wanted: PeersWanted = (maximum_as_i32() + 1).into(); + assert_eq!(peers_wanted.limit(), TORRENT_PEERS_LIMIT); + + // The maximum + let peers_wanted: PeersWanted = (maximum_as_i32()).into(); + assert_eq!(peers_wanted.limit(), TORRENT_PEERS_LIMIT); + + // Smaller than the maximum + let peers_wanted: PeersWanted = (maximum_as_i32() - 1).into(); + assert_eq!(i32::try_from(peers_wanted.limit()).unwrap(), maximum_as_i32() - 1); + } + + #[test] + fn it_should_convert_the_peers_wanted_number_from_u32() { + // Zero. It should return the maximum + let peers_wanted: PeersWanted = 0u32.into(); + assert_eq!(peers_wanted.limit(), TORRENT_PEERS_LIMIT); + + // Greater than the maximum. It should return the maximum + let peers_wanted: PeersWanted = (maximum_as_u32() + 1).into(); + assert_eq!(peers_wanted.limit(), TORRENT_PEERS_LIMIT); + + // The maximum + let peers_wanted: PeersWanted = (maximum_as_u32()).into(); + assert_eq!(peers_wanted.limit(), TORRENT_PEERS_LIMIT); + + // Smaller than the maximum + let peers_wanted: PeersWanted = (maximum_as_u32() - 1).into(); + assert_eq!(i32::try_from(peers_wanted.limit()).unwrap(), maximum_as_i32() - 1); + } + } } } From 5fdee789a5aacdcdbfad2b2b9a5ef58919c5eaa8 Mon Sep 17 00:00:00 2001 From: Jose Celano Date: Fri, 7 Feb 2025 11:05:45 +0000 Subject: [PATCH 232/802] refactor: rename enum variant --- packages/tracker-core/src/announce_handler.rs | 45 ++++++++++++------- packages/tracker-core/src/lib.rs | 8 ++-- src/servers/http/v1/handlers/announce.rs | 2 +- src/servers/http/v1/services/announce.rs | 8 ++-- src/servers/http/v1/services/scrape.rs | 4 +- 5 files changed, 39 insertions(+), 28 deletions(-) diff --git a/packages/tracker-core/src/announce_handler.rs b/packages/tracker-core/src/announce_handler.rs index 5dd4a0291..85dd354bf 100644 --- a/packages/tracker-core/src/announce_handler.rs +++ b/packages/tracker-core/src/announce_handler.rs @@ -111,7 +111,7 @@ impl AnnounceHandler { pub enum PeersWanted { /// The peer wants as many peers as possible in the announce response. #[default] - All, + AsManyAsPossible, /// The peer only wants a certain amount of peers in the announce response. Only { amount: usize }, } @@ -124,7 +124,7 @@ impl PeersWanted { fn limit(&self) -> usize { match self { - PeersWanted::All => TORRENT_PEERS_LIMIT, + PeersWanted::AsManyAsPossible => TORRENT_PEERS_LIMIT, PeersWanted::Only { amount } => *amount, } } @@ -133,7 +133,7 @@ impl PeersWanted { impl From for PeersWanted { fn from(value: i32) -> Self { if value <= 0 { - return PeersWanted::All; + return PeersWanted::AsManyAsPossible; } // This conversion is safe because `value > 0` @@ -148,7 +148,7 @@ impl From for PeersWanted { impl From for PeersWanted { fn from(value: u32) -> Self { if value == 0 { - return PeersWanted::All; + return PeersWanted::AsManyAsPossible; } let amount = value as usize; @@ -350,7 +350,8 @@ mod tests { let mut peer = sample_peer(); - let announce_data = announce_handler.announce(&sample_info_hash(), &mut peer, &peer_ip(), &PeersWanted::All); + let announce_data = + announce_handler.announce(&sample_info_hash(), &mut peer, &peer_ip(), &PeersWanted::AsManyAsPossible); assert_eq!(announce_data.peers, vec![]); } @@ -364,11 +365,12 @@ mod tests { &sample_info_hash(), &mut previously_announced_peer, &peer_ip(), - &PeersWanted::All, + &PeersWanted::AsManyAsPossible, ); let mut peer = sample_peer_2(); - let announce_data = announce_handler.announce(&sample_info_hash(), &mut peer, &peer_ip(), &PeersWanted::All); + let announce_data = + announce_handler.announce(&sample_info_hash(), &mut peer, &peer_ip(), &PeersWanted::AsManyAsPossible); assert_eq!(announce_data.peers, vec![Arc::new(previously_announced_peer)]); } @@ -382,7 +384,7 @@ mod tests { &sample_info_hash(), &mut previously_announced_peer_1, &peer_ip(), - &PeersWanted::All, + &PeersWanted::AsManyAsPossible, ); let mut previously_announced_peer_2 = sample_peer_2(); @@ -390,7 +392,7 @@ mod tests { &sample_info_hash(), &mut previously_announced_peer_2, &peer_ip(), - &PeersWanted::All, + &PeersWanted::AsManyAsPossible, ); let mut peer = sample_peer_3(); @@ -418,7 +420,7 @@ mod tests { let mut peer = seeder(); let announce_data = - announce_handler.announce(&sample_info_hash(), &mut peer, &peer_ip(), &PeersWanted::All); + announce_handler.announce(&sample_info_hash(), &mut peer, &peer_ip(), &PeersWanted::AsManyAsPossible); assert_eq!(announce_data.stats.complete, 1); } @@ -430,7 +432,7 @@ mod tests { let mut peer = leecher(); let announce_data = - announce_handler.announce(&sample_info_hash(), &mut peer, &peer_ip(), &PeersWanted::All); + announce_handler.announce(&sample_info_hash(), &mut peer, &peer_ip(), &PeersWanted::AsManyAsPossible); assert_eq!(announce_data.stats.incomplete, 1); } @@ -441,11 +443,20 @@ mod tests { // We have to announce with "started" event because peer does not count if peer was not previously known let mut started_peer = started_peer(); - announce_handler.announce(&sample_info_hash(), &mut started_peer, &peer_ip(), &PeersWanted::All); + announce_handler.announce( + &sample_info_hash(), + &mut started_peer, + &peer_ip(), + &PeersWanted::AsManyAsPossible, + ); let mut completed_peer = completed_peer(); - let announce_data = - announce_handler.announce(&sample_info_hash(), &mut completed_peer, &peer_ip(), &PeersWanted::All); + let announce_data = announce_handler.announce( + &sample_info_hash(), + &mut completed_peer, + &peer_ip(), + &PeersWanted::AsManyAsPossible, + ); assert_eq!(announce_data.stats.downloaded, 1); } @@ -494,11 +505,11 @@ mod tests { let mut peer = sample_peer(); peer.event = AnnounceEvent::Started; - let announce_data = announce_handler.announce(&info_hash, &mut peer, &peer_ip(), &PeersWanted::All); + let announce_data = announce_handler.announce(&info_hash, &mut peer, &peer_ip(), &PeersWanted::AsManyAsPossible); assert_eq!(announce_data.stats.downloaded, 0); peer.event = AnnounceEvent::Completed; - let announce_data = announce_handler.announce(&info_hash, &mut peer, &peer_ip(), &PeersWanted::All); + let announce_data = announce_handler.announce(&info_hash, &mut peer, &peer_ip(), &PeersWanted::AsManyAsPossible); assert_eq!(announce_data.stats.downloaded, 1); // Remove the newly updated torrent from memory @@ -533,7 +544,7 @@ mod tests { #[test] fn it_should_return_74_at_the_most_if_the_client_wants_them_all() { - let peers_wanted = PeersWanted::All; + let peers_wanted = PeersWanted::AsManyAsPossible; assert_eq!(peers_wanted.limit(), TORRENT_PEERS_LIMIT); } diff --git a/packages/tracker-core/src/lib.rs b/packages/tracker-core/src/lib.rs index 68bc48552..9334e4a02 100644 --- a/packages/tracker-core/src/lib.rs +++ b/packages/tracker-core/src/lib.rs @@ -460,7 +460,7 @@ mod tests { &info_hash, &mut complete_peer, &IpAddr::V4(Ipv4Addr::new(126, 0, 0, 10)), - &PeersWanted::All, + &PeersWanted::AsManyAsPossible, ); // Announce an "incomplete" peer for the torrent @@ -469,7 +469,7 @@ mod tests { &info_hash, &mut incomplete_peer, &IpAddr::V4(Ipv4Addr::new(126, 0, 0, 11)), - &PeersWanted::All, + &PeersWanted::AsManyAsPossible, ); // Scrape @@ -510,11 +510,11 @@ mod tests { let info_hash = "3b245504cf5f11bbdbe1201cea6a6bf45aee1bc0".parse::().unwrap(); // DevSkim: ignore DS173237 let mut peer = incomplete_peer(); - announce_handler.announce(&info_hash, &mut peer, &peer_ip(), &PeersWanted::All); + announce_handler.announce(&info_hash, &mut peer, &peer_ip(), &PeersWanted::AsManyAsPossible); // Announce twice to force non zeroed swarm metadata let mut peer = complete_peer(); - announce_handler.announce(&info_hash, &mut peer, &peer_ip(), &PeersWanted::All); + announce_handler.announce(&info_hash, &mut peer, &peer_ip(), &PeersWanted::AsManyAsPossible); let scrape_data = scrape_handler.scrape(&vec![info_hash]).await; diff --git a/src/servers/http/v1/handlers/announce.rs b/src/servers/http/v1/handlers/announce.rs index 4c4aa6617..f76aa7a07 100644 --- a/src/servers/http/v1/handlers/announce.rs +++ b/src/servers/http/v1/handlers/announce.rs @@ -175,7 +175,7 @@ async fn handle_announce( let mut peer = peer_from_request(announce_request, &peer_ip); let peers_wanted = match announce_request.numwant { Some(numwant) => PeersWanted::only(numwant), - None => PeersWanted::All, + None => PeersWanted::AsManyAsPossible, }; let announce_data = services::announce::invoke( diff --git a/src/servers/http/v1/services/announce.rs b/src/servers/http/v1/services/announce.rs index 64a29db5a..4de9296b3 100644 --- a/src/servers/http/v1/services/announce.rs +++ b/src/servers/http/v1/services/announce.rs @@ -195,7 +195,7 @@ mod tests { core_http_tracker_services.http_stats_event_sender.clone(), sample_info_hash(), &mut peer, - &PeersWanted::All, + &PeersWanted::AsManyAsPossible, ) .await; @@ -232,7 +232,7 @@ mod tests { http_stats_event_sender, sample_info_hash(), &mut peer, - &PeersWanted::All, + &PeersWanted::AsManyAsPossible, ) .await; } @@ -277,7 +277,7 @@ mod tests { http_stats_event_sender, sample_info_hash(), &mut peer, - &PeersWanted::All, + &PeersWanted::AsManyAsPossible, ) .await; } @@ -303,7 +303,7 @@ mod tests { http_stats_event_sender, sample_info_hash(), &mut peer, - &PeersWanted::All, + &PeersWanted::AsManyAsPossible, ) .await; } diff --git a/src/servers/http/v1/services/scrape.rs b/src/servers/http/v1/services/scrape.rs index 0a3425efe..3a2323693 100644 --- a/src/servers/http/v1/services/scrape.rs +++ b/src/servers/http/v1/services/scrape.rs @@ -182,7 +182,7 @@ mod tests { // Announce a new peer to force scrape data to contain not zeroed data let mut peer = sample_peer(); let original_peer_ip = peer.ip(); - announce_handler.announce(&info_hash, &mut peer, &original_peer_ip, &PeersWanted::All); + announce_handler.announce(&info_hash, &mut peer, &original_peer_ip, &PeersWanted::AsManyAsPossible); let scrape_data = invoke(&scrape_handler, &http_stats_event_sender, &info_hashes, &original_peer_ip).await; @@ -267,7 +267,7 @@ mod tests { // Announce a new peer to force scrape data to contain not zeroed data let mut peer = sample_peer(); let original_peer_ip = peer.ip(); - announce_handler.announce(&info_hash, &mut peer, &original_peer_ip, &PeersWanted::All); + announce_handler.announce(&info_hash, &mut peer, &original_peer_ip, &PeersWanted::AsManyAsPossible); let scrape_data = fake(&http_stats_event_sender, &info_hashes, &original_peer_ip).await; From 58a3741db9b7b51dc65517bf9468ffde1c2fca64 Mon Sep 17 00:00:00 2001 From: Jose Celano Date: Fri, 7 Feb 2025 11:52:12 +0000 Subject: [PATCH 233/802] refactor: [#1250] invert dependency between http-protocol and tracker-core pacakges The `tracker-core` should not depend on higher levels layers like the `http-protocol` package. --- Cargo.lock | 2 +- packages/http-protocol/Cargo.toml | 1 + packages/http-protocol/src/v1/responses/error.rs | 8 ++++++++ packages/tracker-core/Cargo.toml | 1 - packages/tracker-core/src/error.rs | 9 --------- 5 files changed, 10 insertions(+), 11 deletions(-) diff --git a/Cargo.lock b/Cargo.lock index d868f7452..228640f84 100644 --- a/Cargo.lock +++ b/Cargo.lock @@ -542,6 +542,7 @@ version = "3.0.0-develop" dependencies = [ "aquatic_udp_protocol", "bittorrent-primitives", + "bittorrent-tracker-core", "derive_more", "multimap", "percent-encoding", @@ -596,7 +597,6 @@ name = "bittorrent-tracker-core" version = "3.0.0-develop" dependencies = [ "aquatic_udp_protocol", - "bittorrent-http-protocol", "bittorrent-primitives", "chrono", "derive_more", diff --git a/packages/http-protocol/Cargo.toml b/packages/http-protocol/Cargo.toml index 05b69d201..2d0cabf51 100644 --- a/packages/http-protocol/Cargo.toml +++ b/packages/http-protocol/Cargo.toml @@ -17,6 +17,7 @@ version.workspace = true [dependencies] aquatic_udp_protocol = "0" bittorrent-primitives = "0.1.0" +bittorrent-tracker-core = { version = "3.0.0-develop", path = "../tracker-core" } derive_more = { version = "1", features = ["as_ref", "constructor", "from"] } multimap = "0" percent-encoding = "2" diff --git a/packages/http-protocol/src/v1/responses/error.rs b/packages/http-protocol/src/v1/responses/error.rs index f939ce298..cdf27e00b 100644 --- a/packages/http-protocol/src/v1/responses/error.rs +++ b/packages/http-protocol/src/v1/responses/error.rs @@ -55,6 +55,14 @@ impl From for Error { } } +impl From for Error { + fn from(err: bittorrent_tracker_core::error::Error) -> Self { + Error { + failure_reason: format!("Tracker error: {err}"), + } + } +} + #[cfg(test)] mod tests { diff --git a/packages/tracker-core/Cargo.toml b/packages/tracker-core/Cargo.toml index aeea30a3e..96505a7ba 100644 --- a/packages/tracker-core/Cargo.toml +++ b/packages/tracker-core/Cargo.toml @@ -16,7 +16,6 @@ version.workspace = true [dependencies] aquatic_udp_protocol = "0" -bittorrent-http-protocol = { version = "3.0.0-develop", path = "../http-protocol" } bittorrent-primitives = "0.1.0" chrono = { version = "0", default-features = false, features = ["clock"] } derive_more = { version = "1", features = ["as_ref", "constructor", "from"] } diff --git a/packages/tracker-core/src/error.rs b/packages/tracker-core/src/error.rs index 1d0e974e5..6fdb5b626 100644 --- a/packages/tracker-core/src/error.rs +++ b/packages/tracker-core/src/error.rs @@ -8,7 +8,6 @@ //! use std::panic::Location; -use bittorrent_http_protocol::v1::responses; use bittorrent_primitives::info_hash::InfoHash; use torrust_tracker_located_error::LocatedError; @@ -54,11 +53,3 @@ pub enum PeerKeyError { source: LocatedError<'static, databases::error::Error>, }, } - -impl From for responses::error::Error { - fn from(err: Error) -> Self { - responses::error::Error { - failure_reason: format!("Tracker error: {err}"), - } - } -} From 3bb4a13127b72c8b8fa16b27d89d6f1d21d02af4 Mon Sep 17 00:00:00 2001 From: Jose Celano Date: Fri, 7 Feb 2025 12:21:24 +0000 Subject: [PATCH 234/802] refactor: [#1250] remove unused errors in tracker core --- packages/tracker-core/src/error.rs | 24 +++--------------------- 1 file changed, 3 insertions(+), 21 deletions(-) diff --git a/packages/tracker-core/src/error.rs b/packages/tracker-core/src/error.rs index 6fdb5b626..6c2e3d4c5 100644 --- a/packages/tracker-core/src/error.rs +++ b/packages/tracker-core/src/error.rs @@ -1,11 +1,4 @@ -//! Error returned by the core `Tracker`. -//! -//! Error | Context | Description -//! ---|---|--- -//! `PeerKeyNotValid` | Authentication | The supplied key is not valid. It may not be registered or expired. -//! `PeerNotAuthenticated` | Authentication | The peer did not provide the authentication key. -//! `TorrentNotWhitelisted` | Authorization | The action cannot be perform on a not-whitelisted torrent (it only applies for trackers running in `listed` or `private_listed` modes). -//! +//! Errors returned by the core tracker. use std::panic::Location; use bittorrent_primitives::info_hash::InfoHash; @@ -14,20 +7,9 @@ use torrust_tracker_located_error::LocatedError; use super::authentication::key::ParseKeyError; use super::databases; -/// Authentication or authorization error returned by the core `Tracker` +/// Authorization errors returned by the core tracker. #[derive(thiserror::Error, Debug, Clone)] pub enum Error { - // Authentication errors - #[error("The supplied key: {key:?}, is not valid: {source}")] - PeerKeyNotValid { - key: super::authentication::Key, - source: LocatedError<'static, dyn std::error::Error + Send + Sync>, - }, - - #[error("The peer is not authenticated, {location}")] - PeerNotAuthenticated { location: &'static Location<'static> }, - - // Authorization errors #[error("The torrent: {info_hash}, is not whitelisted, {location}")] TorrentNotWhitelisted { info_hash: InfoHash, @@ -35,7 +17,7 @@ pub enum Error { }, } -/// Errors related to peers keys. +/// Peers keys errors returned by the core tracker. #[allow(clippy::module_name_repetitions)] #[derive(thiserror::Error, Debug, Clone)] pub enum PeerKeyError { From 0811d67b28a793401895fbf0f91b5e940c2624e8 Mon Sep 17 00:00:00 2001 From: Jose Celano Date: Fri, 7 Feb 2025 12:23:59 +0000 Subject: [PATCH 235/802] refactor: [#1250] rename enum --- packages/http-protocol/src/v1/responses/error.rs | 4 ++-- packages/tracker-core/src/error.rs | 4 ++-- packages/tracker-core/src/whitelist/authorization.rs | 10 +++++----- 3 files changed, 9 insertions(+), 9 deletions(-) diff --git a/packages/http-protocol/src/v1/responses/error.rs b/packages/http-protocol/src/v1/responses/error.rs index cdf27e00b..8a6b4cf55 100644 --- a/packages/http-protocol/src/v1/responses/error.rs +++ b/packages/http-protocol/src/v1/responses/error.rs @@ -55,8 +55,8 @@ impl From for Error { } } -impl From for Error { - fn from(err: bittorrent_tracker_core::error::Error) -> Self { +impl From for Error { + fn from(err: bittorrent_tracker_core::error::WhitelistError) -> Self { Error { failure_reason: format!("Tracker error: {err}"), } diff --git a/packages/tracker-core/src/error.rs b/packages/tracker-core/src/error.rs index 6c2e3d4c5..56b6e1a10 100644 --- a/packages/tracker-core/src/error.rs +++ b/packages/tracker-core/src/error.rs @@ -7,9 +7,9 @@ use torrust_tracker_located_error::LocatedError; use super::authentication::key::ParseKeyError; use super::databases; -/// Authorization errors returned by the core tracker. +/// Whitelist errors returned by the core tracker. #[derive(thiserror::Error, Debug, Clone)] -pub enum Error { +pub enum WhitelistError { #[error("The torrent: {info_hash}, is not whitelisted, {location}")] TorrentNotWhitelisted { info_hash: InfoHash, diff --git a/packages/tracker-core/src/whitelist/authorization.rs b/packages/tracker-core/src/whitelist/authorization.rs index cb5f4acbf..66f909226 100644 --- a/packages/tracker-core/src/whitelist/authorization.rs +++ b/packages/tracker-core/src/whitelist/authorization.rs @@ -6,7 +6,7 @@ use torrust_tracker_configuration::Core; use tracing::instrument; use super::repository::in_memory::InMemoryWhitelist; -use crate::error::Error; +use crate::error::WhitelistError; pub struct WhitelistAuthorization { /// Core tracker configuration. @@ -32,7 +32,7 @@ impl WhitelistAuthorization { /// Will return an error if the tracker is running in `listed` mode /// and the infohash is not whitelisted. #[instrument(skip(self, info_hash), err)] - pub async fn authorize(&self, info_hash: &InfoHash) -> Result<(), Error> { + pub async fn authorize(&self, info_hash: &InfoHash) -> Result<(), WhitelistError> { if !self.is_listed() { return Ok(()); } @@ -41,7 +41,7 @@ impl WhitelistAuthorization { return Ok(()); } - Err(Error::TorrentNotWhitelisted { + Err(WhitelistError::TorrentNotWhitelisted { info_hash: *info_hash, location: Location::caller(), }) @@ -89,7 +89,7 @@ mod tests { use torrust_tracker_configuration::Core; use crate::core_tests::sample_info_hash; - use crate::error::Error; + use crate::error::WhitelistError; use crate::whitelist::authorization::tests::the_whitelist_authorization_for_announce_and_scrape_actions::{ initialize_whitelist_authorization_and_dependencies_with, initialize_whitelist_authorization_with, }; @@ -121,7 +121,7 @@ mod tests { let result = whitelist_authorization.authorize(&sample_info_hash()).await; - assert!(matches!(result.unwrap_err(), Error::TorrentNotWhitelisted { .. })); + assert!(matches!(result.unwrap_err(), WhitelistError::TorrentNotWhitelisted { .. })); } } From ca4ead502ae94b9bb8c93a6d115fb010153e11dc Mon Sep 17 00:00:00 2001 From: Jose Celano Date: Fri, 7 Feb 2025 12:59:35 +0000 Subject: [PATCH 236/802] test: [#1250] add tests for tracker core errors --- packages/tracker-core/src/error.rs | 83 ++++++++++++++++++++++++++++++ 1 file changed, 83 insertions(+) diff --git a/packages/tracker-core/src/error.rs b/packages/tracker-core/src/error.rs index 56b6e1a10..515510b85 100644 --- a/packages/tracker-core/src/error.rs +++ b/packages/tracker-core/src/error.rs @@ -35,3 +35,86 @@ pub enum PeerKeyError { source: LocatedError<'static, databases::error::Error>, }, } + +#[cfg(test)] +mod tests { + + mod whitelist_error { + + use crate::core_tests::sample_info_hash; + use crate::error::WhitelistError; + + #[test] + fn torrent_not_whitelisted() { + let err = WhitelistError::TorrentNotWhitelisted { + info_hash: sample_info_hash(), + location: std::panic::Location::caller(), + }; + + let err_msg = format!("{err}"); + + assert!( + err_msg.contains(&format!("The torrent: {}, is not whitelisted", sample_info_hash())), + "Error message did not contain expected text: {err_msg}" + ); + } + } + + mod peer_key_error { + use torrust_tracker_located_error::Located; + + use crate::databases::driver::Driver; + use crate::error::PeerKeyError; + use crate::{authentication, databases}; + + #[test] + fn duration_overflow() { + let seconds_valid = 100; + + let err = PeerKeyError::DurationOverflow { seconds_valid }; + + let err_msg = format!("{err}"); + + assert!( + err_msg.contains(&format!("Invalid peer key duration: {seconds_valid}")), + "Error message did not contain expected text: {err_msg}" + ); + } + + #[test] + fn parsing_from_string() { + let err = authentication::key::ParseKeyError::InvalidKeyLength; + + let err = PeerKeyError::InvalidKey { + key: "INVALID KEY".to_string(), + source: Located(err).into(), + }; + + let err_msg = format!("{err}"); + + assert!( + err_msg.contains(&"Invalid key: INVALID KEY".to_string()), + "Error message did not contain expected text: {err_msg}" + ); + } + + #[test] + fn persisting_into_database() { + let err = databases::error::Error::InsertFailed { + location: std::panic::Location::caller(), + driver: Driver::Sqlite3, + }; + + let err = PeerKeyError::DatabaseError { + source: Located(err).into(), + }; + + let err_msg = format!("{err}"); + + assert!( + err_msg.contains(&"Can't persist key".to_string()), + "Error message did not contain expected text: {err}" + ); + } + } +} From 480933f2d6b0eacd96ac48ea6a8f5ec351fb66fd Mon Sep 17 00:00:00 2001 From: Jose Celano Date: Fri, 7 Feb 2025 14:11:56 +0000 Subject: [PATCH 237/802] chore(deps): udpate dependencies ```output cargo update Updating crates.io index Locking 18 packages to latest compatible versions Updating bytes v1.9.0 -> v1.10.0 Updating cc v1.2.10 -> v1.2.12 Updating clap v4.5.27 -> v4.5.28 Updating clap_derive v4.5.24 -> v4.5.28 Updating once_cell v1.20.2 -> v1.20.3 Updating openssl v0.10.69 -> v0.10.70 Updating openssl-sys v0.9.104 -> v0.9.105 Updating pin-project v1.1.8 -> v1.1.9 Updating pin-project-internal v1.1.8 -> v1.1.9 Updating rustc-hash v2.1.0 -> v2.1.1 Updating rustls v0.23.21 -> v0.23.22 Updating syn v2.0.96 -> v2.0.98 Updating toml v0.8.19 -> v0.8.20 Updating toml_edit v0.22.22 -> v0.22.23 Updating uuid v1.12.1 -> v1.13.1 Updating winnow v0.6.25 -> v0.7.1 Updating zerocopy v0.8.14 -> v0.8.17 Updating zerocopy-derive v0.8.14 -> v0.8.17 ``` --- Cargo.lock | 156 ++++++++++++++++++++++++++--------------------------- 1 file changed, 78 insertions(+), 78 deletions(-) diff --git a/Cargo.lock b/Cargo.lock index 228640f84..b186f0e9b 100644 --- a/Cargo.lock +++ b/Cargo.lock @@ -433,7 +433,7 @@ checksum = "604fde5e028fea851ce1d8570bbdc034bec850d157f7569d10f347d06808c05c" dependencies = [ "proc-macro2", "quote", - "syn 2.0.96", + "syn 2.0.98", ] [[package]] @@ -521,7 +521,7 @@ dependencies = [ "regex", "rustc-hash", "shlex", - "syn 2.0.96", + "syn 2.0.98", ] [[package]] @@ -693,7 +693,7 @@ dependencies = [ "proc-macro-crate", "proc-macro2", "quote", - "syn 2.0.96", + "syn 2.0.98", ] [[package]] @@ -774,9 +774,9 @@ checksum = "1fd0f2584146f6f2ef48085050886acf353beff7305ebd1ae69500e27c67f64b" [[package]] name = "bytes" -version = "1.9.0" +version = "1.10.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "325918d6fe32f23b19878fe4b34794ae41fc19ddbe53b10571a4874d44ffd39b" +checksum = "f61dac84819c6588b558454b194026eb1f09c293b9036ae9b159e74e73ab6cf9" [[package]] name = "camino" @@ -804,9 +804,9 @@ dependencies = [ [[package]] name = "cc" -version = "1.2.10" +version = "1.2.12" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "13208fcbb66eaeffe09b99fffbe1af420f00a7b35aa99ad683dfc1aa76145229" +checksum = "755717a7de9ec452bf7f3f1a3099085deabd7f2962b861dae91ecd7a365903d2" dependencies = [ "jobserver", "libc", @@ -897,9 +897,9 @@ dependencies = [ [[package]] name = "clap" -version = "4.5.27" +version = "4.5.28" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "769b0145982b4b48713e01ec42d61614425f27b7058bda7180a3a41f30104796" +checksum = "3e77c3243bd94243c03672cb5154667347c457ca271254724f9f393aee1c05ff" dependencies = [ "clap_builder", "clap_derive", @@ -919,14 +919,14 @@ dependencies = [ [[package]] name = "clap_derive" -version = "4.5.24" +version = "4.5.28" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "54b755194d6389280185988721fffba69495eed5ee9feeee9a599b53db80318c" +checksum = "bf4ced95c6f4a675af3da73304b9ac4ed991640c36374e4b46795c49e17cf1ed" dependencies = [ "heck", "proc-macro2", "quote", - "syn 2.0.96", + "syn 2.0.98", ] [[package]] @@ -1147,7 +1147,7 @@ dependencies = [ "proc-macro2", "quote", "strsim", - "syn 2.0.96", + "syn 2.0.98", ] [[package]] @@ -1158,7 +1158,7 @@ checksum = "d336a2a514f6ccccaa3e09b02d41d35330c07ddf03a62165fcec10bb561c7806" dependencies = [ "darling_core", "quote", - "syn 2.0.96", + "syn 2.0.98", ] [[package]] @@ -1202,7 +1202,7 @@ checksum = "cb7330aeadfbe296029522e6c40f315320aba36fc43a5b3632f3795348f3bd22" dependencies = [ "proc-macro2", "quote", - "syn 2.0.96", + "syn 2.0.98", "unicode-xid", ] @@ -1214,7 +1214,7 @@ checksum = "ccfae181bab5ab6c5478b2ccb69e4c68a02f8c3ec72f6616bfec9dbc599d2ee0" dependencies = [ "proc-macro2", "quote", - "syn 2.0.96", + "syn 2.0.98", ] [[package]] @@ -1235,7 +1235,7 @@ checksum = "97369cbbc041bc366949bc74d34658d6cda5621039731c6310521892a3a20ae0" dependencies = [ "proc-macro2", "quote", - "syn 2.0.96", + "syn 2.0.98", ] [[package]] @@ -1438,7 +1438,7 @@ checksum = "e99b8b3c28ae0e84b604c75f721c21dc77afb3706076af5e8216d15fd1deaae3" dependencies = [ "frunk_proc_macro_helpers", "quote", - "syn 2.0.96", + "syn 2.0.98", ] [[package]] @@ -1450,7 +1450,7 @@ dependencies = [ "frunk_core", "proc-macro2", "quote", - "syn 2.0.96", + "syn 2.0.98", ] [[package]] @@ -1462,7 +1462,7 @@ dependencies = [ "frunk_core", "frunk_proc_macro_helpers", "quote", - "syn 2.0.96", + "syn 2.0.98", ] [[package]] @@ -1540,7 +1540,7 @@ checksum = "162ee34ebcb7c64a8abebc059ce0fee27c2262618d7b60ed8faf72fef13c3650" dependencies = [ "proc-macro2", "quote", - "syn 2.0.96", + "syn 2.0.98", ] [[package]] @@ -1981,7 +1981,7 @@ checksum = "1ec89e9337638ecdc08744df490b221a7399bf8d164eb52a665454e60e075ad6" dependencies = [ "proc-macro2", "quote", - "syn 2.0.96", + "syn 2.0.98", ] [[package]] @@ -2301,7 +2301,7 @@ dependencies = [ "cfg-if", "proc-macro2", "quote", - "syn 2.0.96", + "syn 2.0.98", ] [[package]] @@ -2351,7 +2351,7 @@ dependencies = [ "proc-macro-error2", "proc-macro2", "quote", - "syn 2.0.96", + "syn 2.0.98", "termcolor", "thiserror 1.0.69", ] @@ -2517,9 +2517,9 @@ dependencies = [ [[package]] name = "once_cell" -version = "1.20.2" +version = "1.20.3" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "1261fe7e33c73b354eab43b1273a57c8f967d0391e80353e51f764ac02cf6775" +checksum = "945462a4b81e43c4e3ba96bd7b49d834c6f61198356aa858733bc4acf3cbe62e" [[package]] name = "oorandom" @@ -2529,9 +2529,9 @@ checksum = "b410bbe7e14ab526a0e86877eb47c6996a2bd7746f027ba551028c925390e4e9" [[package]] name = "openssl" -version = "0.10.69" +version = "0.10.70" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "f5e534d133a060a3c19daec1eb3e98ec6f4685978834f2dbadfe2ec215bab64e" +checksum = "61cfb4e166a8bb8c9b55c500bc2308550148ece889be90f609377e58140f42c6" dependencies = [ "bitflags", "cfg-if", @@ -2550,7 +2550,7 @@ checksum = "a948666b637a0f465e8564c73e89d4dde00d72d4d473cc972f390fc3dcee7d9c" dependencies = [ "proc-macro2", "quote", - "syn 2.0.96", + "syn 2.0.98", ] [[package]] @@ -2561,9 +2561,9 @@ checksum = "d05e27ee213611ffe7d6348b942e8f942b37114c00cc03cec254295a4a17852e" [[package]] name = "openssl-sys" -version = "0.9.104" +version = "0.9.105" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "45abf306cbf99debc8195b66b7346498d7b10c210de50418b5ccd7ceba08c741" +checksum = "8b22d5b84be05a8d6947c7cb71f7c849aa0f112acd4bf51c2a7c1c988ac0a9dc" dependencies = [ "cc", "libc", @@ -2626,7 +2626,7 @@ dependencies = [ "proc-macro2", "proc-macro2-diagnostics", "quote", - "syn 2.0.96", + "syn 2.0.98", ] [[package]] @@ -2685,22 +2685,22 @@ dependencies = [ [[package]] name = "pin-project" -version = "1.1.8" +version = "1.1.9" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "1e2ec53ad785f4d35dac0adea7f7dc6f1bb277ad84a680c7afefeae05d1f5916" +checksum = "dfe2e71e1471fe07709406bf725f710b02927c9c54b2b5b2ec0e8087d97c327d" dependencies = [ "pin-project-internal", ] [[package]] name = "pin-project-internal" -version = "1.1.8" +version = "1.1.9" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "d56a66c0c55993aa927429d0f8a0abfd74f084e4d9c192cffed01e418d83eefb" +checksum = "f6e859e6e5bd50440ab63c47e3ebabc90f26251f7c73c3d3e837b74a1cc3fa67" dependencies = [ "proc-macro2", "quote", - "syn 2.0.96", + "syn 2.0.98", ] [[package]] @@ -2850,7 +2850,7 @@ dependencies = [ "proc-macro-error-attr2", "proc-macro2", "quote", - "syn 2.0.96", + "syn 2.0.98", ] [[package]] @@ -2870,7 +2870,7 @@ checksum = "af066a9c399a26e020ada66a034357a868728e72cd426f3adcd35f80d88d88c8" dependencies = [ "proc-macro2", "quote", - "syn 2.0.96", + "syn 2.0.98", "version_check", "yansi", ] @@ -2972,7 +2972,7 @@ checksum = "3779b94aeb87e8bd4e834cee3650289ee9e0d5677f976ecdb6d219e5f4f6cd94" dependencies = [ "rand_chacha 0.9.0", "rand_core 0.9.0", - "zerocopy 0.8.14", + "zerocopy 0.8.17", ] [[package]] @@ -3011,7 +3011,7 @@ source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "b08f3c9802962f7e1b25113931d94f43ed9725bebc59db9d0c3e9a23b67e15ff" dependencies = [ "getrandom 0.3.1", - "zerocopy 0.8.14", + "zerocopy 0.8.17", ] [[package]] @@ -3211,7 +3211,7 @@ dependencies = [ "regex", "relative-path", "rustc_version", - "syn 2.0.96", + "syn 2.0.98", "unicode-ident", ] @@ -3253,9 +3253,9 @@ checksum = "719b953e2095829ee67db738b3bfa9fa368c94900df327b3f07fe6e794d2fe1f" [[package]] name = "rustc-hash" -version = "2.1.0" +version = "2.1.1" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "c7fb8039b3032c191086b10f11f319a6e99e1e82889c5cc6046f515c9db1d497" +checksum = "357703d41365b4b27c590e3ed91eabb1b663f07c4c084095e60cbed4362dff0d" [[package]] name = "rustc_version" @@ -3281,9 +3281,9 @@ dependencies = [ [[package]] name = "rustls" -version = "0.23.21" +version = "0.23.22" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "8f287924602bf649d949c63dc8ac8b235fa5387d394020705b80c4eb597ce5b8" +checksum = "9fb9263ab4eb695e42321db096e3b8fbd715a59b154d5c88d82db2175b681ba7" dependencies = [ "once_cell", "rustls-pki-types", @@ -3440,7 +3440,7 @@ checksum = "5a9bf7cf98d04a2b28aead066b7496853d4779c9cc183c440dbac457641e19a0" dependencies = [ "proc-macro2", "quote", - "syn 2.0.96", + "syn 2.0.98", ] [[package]] @@ -3487,7 +3487,7 @@ checksum = "6c64451ba24fc7a6a2d60fc75dd9c83c90903b19028d4eff35e88fc1e86564e9" dependencies = [ "proc-macro2", "quote", - "syn 2.0.96", + "syn 2.0.98", ] [[package]] @@ -3538,7 +3538,7 @@ dependencies = [ "darling", "proc-macro2", "quote", - "syn 2.0.96", + "syn 2.0.98", ] [[package]] @@ -3677,9 +3677,9 @@ dependencies = [ [[package]] name = "syn" -version = "2.0.96" +version = "2.0.98" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "d5d0adab1ae378d7f53bdebc67a39f1f151407ef230f0ce2883572f5d8985c80" +checksum = "36147f1a48ae0ec2b5b3bc5b537d267457555a10dc06f3dbc8cb11ba3006d3b1" dependencies = [ "proc-macro2", "quote", @@ -3703,7 +3703,7 @@ checksum = "c8af7666ab7b6390ab78131fb5b0fce11d6b7a6951602017c35fa82800708971" dependencies = [ "proc-macro2", "quote", - "syn 2.0.96", + "syn 2.0.98", ] [[package]] @@ -3805,7 +3805,7 @@ checksum = "4fee6c4efc90059e10f81e6d42c60a18f76588c3d74cb83a0b242a2b6c7504c1" dependencies = [ "proc-macro2", "quote", - "syn 2.0.96", + "syn 2.0.98", ] [[package]] @@ -3816,7 +3816,7 @@ checksum = "26afc1baea8a989337eeb52b6e72a039780ce45c3edfcc9c5b9d112feeb173c2" dependencies = [ "proc-macro2", "quote", - "syn 2.0.96", + "syn 2.0.98", ] [[package]] @@ -3920,7 +3920,7 @@ checksum = "6e06d43f1345a3bcd39f6a56dbb7dcab2ba47e68e8ac134855e7e2bdbaf8cab8" dependencies = [ "proc-macro2", "quote", - "syn 2.0.96", + "syn 2.0.98", ] [[package]] @@ -3958,9 +3958,9 @@ dependencies = [ [[package]] name = "toml" -version = "0.8.19" +version = "0.8.20" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "a1ed1f98e3fdc28d6d910e6737ae6ab1a93bf1985935a1193e68f93eeb68d24e" +checksum = "cd87a5cdd6ffab733b2f74bc4fd7ee5fff6634124999ac278c35fc78c6120148" dependencies = [ "serde", "serde_spanned", @@ -3979,9 +3979,9 @@ dependencies = [ [[package]] name = "toml_edit" -version = "0.22.22" +version = "0.22.23" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "4ae48d6208a266e853d946088ed816055e556cc6028c5e8e2b84d9fa5dd7c7f5" +checksum = "02a8b472d1a3d7c18e2d61a489aee3453fd9031c33e4f55bd533f4a7adca1bee" dependencies = [ "indexmap 2.7.1", "serde", @@ -4263,7 +4263,7 @@ checksum = "395ae124c09f9e6918a2310af6038fba074bcf474ac352496d5910dd59a2226d" dependencies = [ "proc-macro2", "quote", - "syn 2.0.96", + "syn 2.0.98", ] [[package]] @@ -4396,12 +4396,12 @@ checksum = "06abde3611657adf66d383f00b093d7faecc7fa57071cce2578660c9f1010821" [[package]] name = "uuid" -version = "1.12.1" +version = "1.13.1" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "b3758f5e68192bb96cc8f9b7e2c2cfdabb435499a28499a42f8f984092adad4b" +checksum = "ced87ca4be083373936a67f8de945faa23b6b42384bd5b64434850802c6dccd0" dependencies = [ - "getrandom 0.2.15", - "rand 0.8.5", + "getrandom 0.3.1", + "rand 0.9.0", ] [[package]] @@ -4484,7 +4484,7 @@ dependencies = [ "log", "proc-macro2", "quote", - "syn 2.0.96", + "syn 2.0.98", "wasm-bindgen-shared", ] @@ -4519,7 +4519,7 @@ checksum = "8ae87ea40c9f689fc23f209965b6fb8a99ad69aeeb0231408be24920604395de" dependencies = [ "proc-macro2", "quote", - "syn 2.0.96", + "syn 2.0.98", "wasm-bindgen-backend", "wasm-bindgen-shared", ] @@ -4697,9 +4697,9 @@ checksum = "589f6da84c646204747d1270a2a5661ea66ed1cced2631d546fdfb155959f9ec" [[package]] name = "winnow" -version = "0.6.25" +version = "0.7.1" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "ad699df48212c6cc6eb4435f35500ac6fd3b9913324f938aea302022ce19d310" +checksum = "86e376c75f4f43f44db463cf729e0d3acbf954d13e22c51e26e4c264b4ab545f" dependencies = [ "memchr", ] @@ -4760,7 +4760,7 @@ checksum = "2380878cad4ac9aac1e2435f3eb4020e8374b5f13c296cb75b4620ff8e229154" dependencies = [ "proc-macro2", "quote", - "syn 2.0.96", + "syn 2.0.98", "synstructure", ] @@ -4776,11 +4776,11 @@ dependencies = [ [[package]] name = "zerocopy" -version = "0.8.14" +version = "0.8.17" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "a367f292d93d4eab890745e75a778da40909cab4d6ff8173693812f79c4a2468" +checksum = "aa91407dacce3a68c56de03abe2760159582b846c6a4acd2f456618087f12713" dependencies = [ - "zerocopy-derive 0.8.14", + "zerocopy-derive 0.8.17", ] [[package]] @@ -4791,18 +4791,18 @@ checksum = "fa4f8080344d4671fb4e831a13ad1e68092748387dfc4f55e356242fae12ce3e" dependencies = [ "proc-macro2", "quote", - "syn 2.0.96", + "syn 2.0.98", ] [[package]] name = "zerocopy-derive" -version = "0.8.14" +version = "0.8.17" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "d3931cb58c62c13adec22e38686b559c86a30565e16ad6e8510a337cedc611e1" +checksum = "06718a168365cad3d5ff0bb133aad346959a2074bd4a85c121255a11304a8626" dependencies = [ "proc-macro2", "quote", - "syn 2.0.96", + "syn 2.0.98", ] [[package]] @@ -4822,7 +4822,7 @@ checksum = "595eed982f7d355beb85837f651fa22e90b3c044842dc7f2c2842c086f295808" dependencies = [ "proc-macro2", "quote", - "syn 2.0.96", + "syn 2.0.98", "synstructure", ] @@ -4851,7 +4851,7 @@ checksum = "6eafa6dfb17584ea3e2bd6e76e0cc15ad7af12b09abdd1ca55961bed9b1063c6" dependencies = [ "proc-macro2", "quote", - "syn 2.0.96", + "syn 2.0.98", ] [[package]] From c88ab10c751e02eb9bc8d6d63bcce90e89eff63a Mon Sep 17 00:00:00 2001 From: Jose Celano Date: Fri, 7 Feb 2025 15:32:13 +0000 Subject: [PATCH 238/802] refactor: reorganize tracker core databases mod --- .../src/databases/{driver.rs => driver/mod.rs} | 16 ++++++++++++++-- .../src/databases/{ => driver}/mysql.rs | 3 +-- .../src/databases/{ => driver}/sqlite.rs | 3 +-- packages/tracker-core/src/databases/mod.rs | 4 ---- 4 files changed, 16 insertions(+), 10 deletions(-) rename packages/tracker-core/src/databases/{driver.rs => driver/mod.rs} (89%) rename packages/tracker-core/src/databases/{ => driver}/mysql.rs (99%) rename packages/tracker-core/src/databases/{ => driver}/sqlite.rs (99%) diff --git a/packages/tracker-core/src/databases/driver.rs b/packages/tracker-core/src/databases/driver/mod.rs similarity index 89% rename from packages/tracker-core/src/databases/driver.rs rename to packages/tracker-core/src/databases/driver/mod.rs index 7b532f3f0..651a97913 100644 --- a/packages/tracker-core/src/databases/driver.rs +++ b/packages/tracker-core/src/databases/driver/mod.rs @@ -2,11 +2,11 @@ //! //! See [`databases::driver::build`](crate::core::databases::driver::build) //! function for more information. +use mysql::Mysql; use serde::{Deserialize, Serialize}; +use sqlite::Sqlite; use super::error::Error; -use super::mysql::Mysql; -use super::sqlite::Sqlite; use super::{Builder, Database}; /// The database management system used by the tracker. @@ -61,6 +61,18 @@ pub enum Driver { /// # Panics /// /// This function will panic if unable to create database tables. +pub mod mysql; +pub mod sqlite; + +/// It builds a new database driver. +/// +/// # Panics +/// +/// Will panic if unable to create database tables. +/// +/// # Errors +/// +/// Will return `Error` if unable to build the driver. pub fn build(driver: &Driver, db_path: &str) -> Result, Error> { let database = match driver { Driver::Sqlite3 => Builder::::build(db_path), diff --git a/packages/tracker-core/src/databases/mysql.rs b/packages/tracker-core/src/databases/driver/mysql.rs similarity index 99% rename from packages/tracker-core/src/databases/mysql.rs rename to packages/tracker-core/src/databases/driver/mysql.rs index fb39b781d..b0198464c 100644 --- a/packages/tracker-core/src/databases/mysql.rs +++ b/packages/tracker-core/src/databases/driver/mysql.rs @@ -9,8 +9,7 @@ use r2d2_mysql::mysql::{params, Opts, OptsBuilder}; use r2d2_mysql::MySqlConnectionManager; use torrust_tracker_primitives::PersistentTorrents; -use super::driver::Driver; -use super::{Database, Error}; +use super::{Database, Driver, Error}; use crate::authentication::key::AUTH_KEY_LENGTH; use crate::authentication::{self, Key}; diff --git a/packages/tracker-core/src/databases/sqlite.rs b/packages/tracker-core/src/databases/driver/sqlite.rs similarity index 99% rename from packages/tracker-core/src/databases/sqlite.rs rename to packages/tracker-core/src/databases/driver/sqlite.rs index a7552ec11..fa90d7930 100644 --- a/packages/tracker-core/src/databases/sqlite.rs +++ b/packages/tracker-core/src/databases/driver/sqlite.rs @@ -9,8 +9,7 @@ use r2d2_sqlite::rusqlite::types::Null; use r2d2_sqlite::SqliteConnectionManager; use torrust_tracker_primitives::{DurationSinceUnixEpoch, PersistentTorrents}; -use super::driver::Driver; -use super::{Database, Error}; +use super::{Database, Driver, Error}; use crate::authentication::{self, Key}; const DRIVER: Driver = Driver::Sqlite3; diff --git a/packages/tracker-core/src/databases/mod.rs b/packages/tracker-core/src/databases/mod.rs index f0930d05d..208305211 100644 --- a/packages/tracker-core/src/databases/mod.rs +++ b/packages/tracker-core/src/databases/mod.rs @@ -45,9 +45,7 @@ //! > **NOTICE**: All keys must have an expiration date. pub mod driver; pub mod error; -pub mod mysql; pub mod setup; -pub mod sqlite; use std::marker::PhantomData; @@ -69,8 +67,6 @@ impl Builder where T: Database + 'static, { - /// . - /// /// # Errors /// /// Will return `r2d2::Error` if `db_path` is not able to create a database. From 3f78459208aec0945b0671a92ebc2b31c6569e32 Mon Sep 17 00:00:00 2001 From: Jose Celano Date: Fri, 7 Feb 2025 15:56:13 +0000 Subject: [PATCH 239/802] refactor: remove constructor from Database trait For two reasons: - Drivers' dependencies migth be different in the future for different drivers. - You can have conflict when mocking the trait. See https://github.com/asomers/mockall/commit/7c54ed1999c4c44cbc0c71701d5293e781c7d7a9 --- .../tracker-core/src/databases/driver/mod.rs | 10 +++--- .../src/databases/driver/mysql.rs | 6 ++-- .../src/databases/driver/sqlite.rs | 12 ++++--- packages/tracker-core/src/databases/mod.rs | 32 ------------------- 4 files changed, 17 insertions(+), 43 deletions(-) diff --git a/packages/tracker-core/src/databases/driver/mod.rs b/packages/tracker-core/src/databases/driver/mod.rs index 651a97913..bdef7fcee 100644 --- a/packages/tracker-core/src/databases/driver/mod.rs +++ b/packages/tracker-core/src/databases/driver/mod.rs @@ -7,7 +7,7 @@ use serde::{Deserialize, Serialize}; use sqlite::Sqlite; use super::error::Error; -use super::{Builder, Database}; +use super::Database; /// The database management system used by the tracker. /// @@ -74,10 +74,10 @@ pub mod sqlite; /// /// Will return `Error` if unable to build the driver. pub fn build(driver: &Driver, db_path: &str) -> Result, Error> { - let database = match driver { - Driver::Sqlite3 => Builder::::build(db_path), - Driver::MySQL => Builder::::build(db_path), - }?; + let database: Box = match driver { + Driver::Sqlite3 => Box::new(Sqlite::new(db_path)?), + Driver::MySQL => Box::new(Mysql::new(db_path)?), + }; database.create_database_tables().expect("Could not create database tables."); diff --git a/packages/tracker-core/src/databases/driver/mysql.rs b/packages/tracker-core/src/databases/driver/mysql.rs index b0198464c..69fa1240e 100644 --- a/packages/tracker-core/src/databases/driver/mysql.rs +++ b/packages/tracker-core/src/databases/driver/mysql.rs @@ -19,7 +19,7 @@ pub struct Mysql { pool: Pool, } -impl Database for Mysql { +impl Mysql { /// It instantiates a new `MySQL` database driver. /// /// Refer to [`databases::Database::new`](crate::core::databases::Database::new). @@ -27,7 +27,7 @@ impl Database for Mysql { /// # Errors /// /// Will return `r2d2::Error` if `db_path` is not able to create `MySQL` database. - fn new(db_path: &str) -> Result { + pub fn new(db_path: &str) -> Result { let opts = Opts::from_url(db_path)?; let builder = OptsBuilder::from_opts(opts); let manager = MySqlConnectionManager::new(builder); @@ -35,7 +35,9 @@ impl Database for Mysql { Ok(Self { pool }) } +} +impl Database for Mysql { /// Refer to [`databases::Database::create_database_tables`](crate::core::databases::Database::create_database_tables). fn create_database_tables(&self) -> Result<(), Error> { let create_whitelist_table = " diff --git a/packages/tracker-core/src/databases/driver/sqlite.rs b/packages/tracker-core/src/databases/driver/sqlite.rs index fa90d7930..3a08406fa 100644 --- a/packages/tracker-core/src/databases/driver/sqlite.rs +++ b/packages/tracker-core/src/databases/driver/sqlite.rs @@ -18,7 +18,7 @@ pub struct Sqlite { pool: Pool, } -impl Database for Sqlite { +impl Sqlite { /// It instantiates a new `SQLite3` database driver. /// /// Refer to [`databases::Database::new`](crate::core::databases::Database::new). @@ -26,11 +26,15 @@ impl Database for Sqlite { /// # Errors /// /// Will return `r2d2::Error` if `db_path` is not able to create `SqLite` database. - fn new(db_path: &str) -> Result { - let cm = SqliteConnectionManager::file(db_path); - Pool::new(cm).map_or_else(|err| Err((err, Driver::Sqlite3).into()), |pool| Ok(Sqlite { pool })) + pub fn new(db_path: &str) -> Result { + let manager = SqliteConnectionManager::file(db_path); + let pool = r2d2::Pool::builder().build(manager).map_err(|e| (e, DRIVER))?; + + Ok(Self { pool }) } +} +impl Database for Sqlite { /// Refer to [`databases::Database::create_database_tables`](crate::core::databases::Database::create_database_tables). fn create_database_tables(&self) -> Result<(), Error> { let create_whitelist_table = " diff --git a/packages/tracker-core/src/databases/mod.rs b/packages/tracker-core/src/databases/mod.rs index 208305211..010252139 100644 --- a/packages/tracker-core/src/databases/mod.rs +++ b/packages/tracker-core/src/databases/mod.rs @@ -47,8 +47,6 @@ pub mod driver; pub mod error; pub mod setup; -use std::marker::PhantomData; - use bittorrent_primitives::info_hash::InfoHash; use mockall::automock; use torrust_tracker_primitives::PersistentTorrents; @@ -56,39 +54,9 @@ use torrust_tracker_primitives::PersistentTorrents; use self::error::Error; use crate::authentication::{self, Key}; -struct Builder -where - T: Database, -{ - phantom: PhantomData, -} - -impl Builder -where - T: Database + 'static, -{ - /// # Errors - /// - /// Will return `r2d2::Error` if `db_path` is not able to create a database. - pub(self) fn build(db_path: &str) -> Result, Error> { - Ok(Box::new(T::new(db_path)?)) - } -} - /// The persistence trait. It contains all the methods to interact with the database. #[automock] pub trait Database: Sync + Send { - /// It instantiates a new database driver. - /// - /// # Errors - /// - /// Will return `r2d2::Error` if `db_path` is not able to create a database. - fn new(db_path: &str) -> Result - where - Self: std::marker::Sized; - - // Schema - /// It generates the database tables. SQL queries are hardcoded in the trait /// implementation. /// From 568d6d362316e08fad7b7710fbb37d9534633687 Mon Sep 17 00:00:00 2001 From: Jose Celano Date: Fri, 7 Feb 2025 17:31:27 +0000 Subject: [PATCH 240/802] test: [#1251] add tests for core database driver sqlite --- .../src/databases/driver/sqlite.rs | 113 ++++++++++++++++++ 1 file changed, 113 insertions(+) diff --git a/packages/tracker-core/src/databases/driver/sqlite.rs b/packages/tracker-core/src/databases/driver/sqlite.rs index 3a08406fa..e04cf6110 100644 --- a/packages/tracker-core/src/databases/driver/sqlite.rs +++ b/packages/tracker-core/src/databases/driver/sqlite.rs @@ -288,3 +288,116 @@ impl Database for Sqlite { } } } + +#[cfg(test)] +mod tests { + + mod the_sqlite_driver { + use torrust_tracker_configuration::Core; + use torrust_tracker_test_helpers::configuration::ephemeral_sqlite_database; + + use crate::databases::driver::sqlite::Sqlite; + use crate::databases::Database; + + fn initialize_driver_and_database() -> Sqlite { + let config = ephemeral_configuration(); + let driver = Sqlite::new(&config.database.path).unwrap(); + driver.create_database_tables().unwrap(); + driver + } + + fn ephemeral_configuration() -> Core { + let mut config = Core::default(); + let temp_file = ephemeral_sqlite_database(); + temp_file.to_str().unwrap().clone_into(&mut config.database.path); + config + } + + mod handling_torrent_persistence { + + use crate::core_tests::sample_info_hash; + use crate::databases::driver::sqlite::tests::the_sqlite_driver::initialize_driver_and_database; + use crate::databases::Database; + + #[test] + fn it_should_save_and_load_persistent_torrents() { + let driver = initialize_driver_and_database(); + + let infohash = sample_info_hash(); + + let number_of_downloads = 1; + + driver.save_persistent_torrent(&infohash, number_of_downloads).unwrap(); + + let torrents = driver.load_persistent_torrents().unwrap(); + + assert_eq!(torrents.len(), 1); + assert_eq!(torrents.get(&infohash), Some(number_of_downloads).as_ref()); + } + } + + mod handling_authentication_keys { + use std::time::Duration; + + use crate::authentication::key::{generate_key, generate_permanent_key}; + use crate::databases::driver::sqlite::tests::the_sqlite_driver::initialize_driver_and_database; + use crate::databases::Database; + + #[test] + fn it_should_save_and_load_permanent_authentication_keys() { + let driver = initialize_driver_and_database(); + + // Add a new permanent key + let peer_key = generate_permanent_key(); + driver.add_key_to_keys(&peer_key).unwrap(); + + // Get the key back + let stored_peer_key = driver.get_key_from_keys(&peer_key.key()).unwrap().unwrap(); + + assert_eq!(stored_peer_key, peer_key); + } + #[test] + fn it_should_save_and_load_expiring_authentication_keys() { + let driver = initialize_driver_and_database(); + + // Add a new expiring key + let peer_key = generate_key(Some(Duration::from_secs(120))); + driver.add_key_to_keys(&peer_key).unwrap(); + + // Get the key back + let stored_peer_key = driver.get_key_from_keys(&peer_key.key()).unwrap().unwrap(); + + /* todo: + + The expiration time recovered from the database is not the same + as the one we set. It includes a small offset (nanoseconds). + + left: PeerKey { key: Key("7HP1NslpuQn6kLVAgAF4nFpnZNSQ4hrx"), valid_until: Some(1739182308s) } + right: PeerKey { key: Key("7HP1NslpuQn6kLVAgAF4nFpnZNSQ4hrx"), valid_until: Some(1739182308.603691299s) + + */ + + assert_eq!(stored_peer_key.key(), peer_key.key()); + assert_eq!( + stored_peer_key.valid_until.unwrap().as_secs(), + peer_key.valid_until.unwrap().as_secs() + ); + } + + #[test] + fn it_should_remove_an_authentication_key() { + let driver = initialize_driver_and_database(); + + let peer_key = generate_key(None); + + // Add a new key + driver.add_key_to_keys(&peer_key).unwrap(); + + // Remove the key + driver.remove_key_from_keys(&peer_key.key()).unwrap(); + + assert!(driver.get_key_from_keys(&peer_key.key()).unwrap().is_none()); + } + } + } +} From 46949a6fe1fcc3d4f7b877d0f5e2eebf89b65db0 Mon Sep 17 00:00:00 2001 From: Jose Celano Date: Mon, 10 Feb 2025 17:27:01 +0000 Subject: [PATCH 241/802] test: [#1251] add tests for core database driver mysql --- .github/workflows/testing.yaml | 4 + Cargo.lock | 417 +++++++++++++++++- cSpell.json | 1 + packages/tracker-core/Cargo.toml | 11 +- .../src/databases/driver/mysql.rs | 245 ++++++++++ 5 files changed, 652 insertions(+), 26 deletions(-) diff --git a/.github/workflows/testing.yaml b/.github/workflows/testing.yaml index 28600dee9..671864fc9 100644 --- a/.github/workflows/testing.yaml +++ b/.github/workflows/testing.yaml @@ -146,6 +146,10 @@ jobs: name: Run Unit Tests run: cargo test --tests --benches --examples --workspace --all-targets --all-features + - id: database + name: Run MySQL Database Tests + run: TORRUST_TRACKER_CORE_RUN_MYSQL_DRIVER_TEST=true cargo test --package bittorrent-tracker-core + e2e: name: E2E runs-on: ubuntu-latest diff --git a/Cargo.lock b/Cargo.lock index b186f0e9b..2f99db113 100644 --- a/Cargo.lock +++ b/Cargo.lock @@ -314,6 +314,17 @@ version = "4.7.1" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "8b75356056920673b02621b35afd0f7dda9306d03c79a30f5c56c44cf256e3de" +[[package]] +name = "async-trait" +version = "0.1.86" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "644dd749086bf3771a2fbc5f256fdb982d53f011c7d5d560304eafeecebce79d" +dependencies = [ + "proc-macro2", + "quote", + "syn 2.0.98", +] + [[package]] name = "atomic" version = "0.6.0" @@ -451,11 +462,11 @@ dependencies = [ "hyper", "hyper-util", "pin-project-lite", - "rustls", + "rustls 0.23.22", "rustls-pemfile", "rustls-pki-types", "tokio", - "tokio-rustls", + "tokio-rustls 0.26.1", "tower 0.4.13", "tower-service", ] @@ -472,7 +483,7 @@ dependencies = [ "miniz_oxide", "object", "rustc-demangle", - "windows-targets", + "windows-targets 0.52.6", ] [[package]] @@ -608,6 +619,7 @@ dependencies = [ "rand 0.9.0", "serde", "serde_json", + "testcontainers", "thiserror 2.0.11", "tokio", "torrust-tracker-api-client", @@ -618,6 +630,7 @@ dependencies = [ "torrust-tracker-test-helpers", "torrust-tracker-torrent-repository", "tracing", + "url", ] [[package]] @@ -673,6 +686,56 @@ dependencies = [ "cipher", ] +[[package]] +name = "bollard" +version = "0.16.1" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "0aed08d3adb6ebe0eff737115056652670ae290f177759aac19c30456135f94c" +dependencies = [ + "base64 0.22.1", + "bollard-stubs", + "bytes", + "futures-core", + "futures-util", + "hex", + "home", + "http", + "http-body-util", + "hyper", + "hyper-named-pipe", + "hyper-rustls 0.26.0", + "hyper-util", + "hyperlocal-next", + "log", + "pin-project-lite", + "rustls 0.22.4", + "rustls-native-certs", + "rustls-pemfile", + "rustls-pki-types", + "serde", + "serde_derive", + "serde_json", + "serde_repr", + "serde_urlencoded", + "thiserror 1.0.69", + "tokio", + "tokio-util", + "tower-service", + "url", + "winapi", +] + +[[package]] +name = "bollard-stubs" +version = "1.44.0-rc.2" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "709d9aa1c37abb89d40f19f5d0ad6f0d88cb1581264e571c9350fc5bb89cf1c5" +dependencies = [ + "serde", + "serde_repr", + "serde_with", +] + [[package]] name = "borsh" version = "1.5.5" @@ -844,7 +907,7 @@ dependencies = [ "iana-time-zone", "num-traits", "serde", - "windows-targets", + "windows-targets 0.52.6", ] [[package]] @@ -1227,6 +1290,27 @@ dependencies = [ "crypto-common", ] +[[package]] +name = "dirs" +version = "5.0.1" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "44c45a9d03d6676652bcb5e724c7e988de1acad23a711b5217ab9cbecbec2225" +dependencies = [ + "dirs-sys", +] + +[[package]] +name = "dirs-sys" +version = "0.4.1" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "520f05a5cbd335fae5a99ff7a6ab8627577660ee5cfd6a94a6a929b52ff0321c" +dependencies = [ + "libc", + "option-ext", + "redox_users", + "windows-sys 0.48.0", +] + [[package]] name = "displaydoc" version = "0.2.5" @@ -1238,6 +1322,17 @@ dependencies = [ "syn 2.0.98", ] +[[package]] +name = "docker_credential" +version = "1.3.1" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "31951f49556e34d90ed28342e1df7e1cb7a229c4cab0aecc627b5d91edd41d07" +dependencies = [ + "base64 0.21.7", + "serde", + "serde_json", +] + [[package]] name = "downcast" version = "0.11.0" @@ -1609,7 +1704,7 @@ dependencies = [ "cfg-if", "libc", "wasi 0.13.3+wasi-0.2.2", - "windows-targets", + "windows-targets 0.52.6", ] [[package]] @@ -1724,6 +1819,15 @@ version = "0.4.1" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "6fe2267d4ed49bc07b63801559be28c718ea06c4738b7a03c94df7386d2cde46" +[[package]] +name = "home" +version = "0.5.11" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "589533453244b0995c858700322199b2becb13b627df2851f64a2775d024abcf" +dependencies = [ + "windows-sys 0.59.0", +] + [[package]] name = "http" version = "1.2.0" @@ -1791,6 +1895,40 @@ dependencies = [ "want", ] +[[package]] +name = "hyper-named-pipe" +version = "0.1.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "73b7d8abf35697b81a825e386fc151e0d503e8cb5fcb93cc8669c376dfd6f278" +dependencies = [ + "hex", + "hyper", + "hyper-util", + "pin-project-lite", + "tokio", + "tower-service", + "winapi", +] + +[[package]] +name = "hyper-rustls" +version = "0.26.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "a0bea761b46ae2b24eb4aef630d8d1c398157b6fc29e6350ecf090a0b70c952c" +dependencies = [ + "futures-util", + "http", + "hyper", + "hyper-util", + "log", + "rustls 0.22.4", + "rustls-native-certs", + "rustls-pki-types", + "tokio", + "tokio-rustls 0.25.0", + "tower-service", +] + [[package]] name = "hyper-rustls" version = "0.27.5" @@ -1801,10 +1939,10 @@ dependencies = [ "http", "hyper", "hyper-util", - "rustls", + "rustls 0.23.22", "rustls-pki-types", "tokio", - "tokio-rustls", + "tokio-rustls 0.26.1", "tower-service", ] @@ -1843,6 +1981,21 @@ dependencies = [ "tracing", ] +[[package]] +name = "hyperlocal-next" +version = "0.9.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "acf569d43fa9848e510358c07b80f4adf34084ddc28c6a4a651ee8474c070dcc" +dependencies = [ + "hex", + "http-body-util", + "hyper", + "hyper-util", + "pin-project-lite", + "tokio", + "tower-service", +] + [[package]] name = "iana-time-zone" version = "0.1.61" @@ -2151,7 +2304,7 @@ source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "fc2f4eb4bc735547cfed7c0a4922cbd04a4655978c09b54f1f7b228750664c34" dependencies = [ "cfg-if", - "windows-targets", + "windows-targets 0.52.6", ] [[package]] @@ -2160,6 +2313,16 @@ version = "0.2.11" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "8355be11b20d696c8f18f6cc018c4e372165b1fa8126cef092399c9951984ffa" +[[package]] +name = "libredox" +version = "0.1.3" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "c0ff37bd590ca25063e35af745c343cb7a0271906fb7b37e4813e8f79f00268d" +dependencies = [ + "bitflags", + "libc", +] + [[package]] name = "libsqlite3-sys" version = "0.31.0" @@ -2571,6 +2734,12 @@ dependencies = [ "vcpkg", ] +[[package]] +name = "option-ext" +version = "0.2.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "04744f49eae99ab78e0d5c0b603ab218f515ea8cfe5a456d7629ad883a3b6e7d" + [[package]] name = "overload" version = "0.1.1" @@ -2603,7 +2772,32 @@ dependencies = [ "libc", "redox_syscall", "smallvec", - "windows-targets", + "windows-targets 0.52.6", +] + +[[package]] +name = "parse-display" +version = "0.9.1" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "914a1c2265c98e2446911282c6ac86d8524f495792c38c5bd884f80499c7538a" +dependencies = [ + "parse-display-derive", + "regex", + "regex-syntax", +] + +[[package]] +name = "parse-display-derive" +version = "0.9.1" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "2ae7800a4c974efd12df917266338e79a7a74415173caf7e70aa0a0707345281" +dependencies = [ + "proc-macro2", + "quote", + "regex", + "regex-syntax", + "structmeta", + "syn 2.0.98", ] [[package]] @@ -3043,6 +3237,17 @@ dependencies = [ "bitflags", ] +[[package]] +name = "redox_users" +version = "0.4.6" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "ba009ff324d1fc1b900bd1fdb31564febe58a8ccc8a6fdbb93b543d33b13ca43" +dependencies = [ + "getrandom 0.2.15", + "libredox", + "thiserror 1.0.69", +] + [[package]] name = "regex" version = "1.11.1" @@ -3103,7 +3308,7 @@ dependencies = [ "http-body", "http-body-util", "hyper", - "hyper-rustls", + "hyper-rustls 0.27.5", "hyper-tls", "hyper-util", "ipnet", @@ -3279,6 +3484,20 @@ dependencies = [ "windows-sys 0.59.0", ] +[[package]] +name = "rustls" +version = "0.22.4" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "bf4ef73721ac7bcd79b2b315da7779d8fc09718c6b3d2d1b2d94850eb8c18432" +dependencies = [ + "log", + "ring", + "rustls-pki-types", + "rustls-webpki", + "subtle", + "zeroize", +] + [[package]] name = "rustls" version = "0.23.22" @@ -3292,6 +3511,19 @@ dependencies = [ "zeroize", ] +[[package]] +name = "rustls-native-certs" +version = "0.7.3" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "e5bfb394eeed242e909609f56089eecfe5fda225042e8b171791b9c95f5931e5" +dependencies = [ + "openssl-probe", + "rustls-pemfile", + "rustls-pki-types", + "schannel", + "security-framework", +] + [[package]] name = "rustls-pemfile" version = "2.2.0" @@ -3648,6 +3880,29 @@ version = "0.11.1" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "7da8b5736845d9f2fcb837ea5d9e2628564b3b043a70948a3f0b778838c5fb4f" +[[package]] +name = "structmeta" +version = "0.3.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "2e1575d8d40908d70f6fd05537266b90ae71b15dbbe7a8b7dffa2b759306d329" +dependencies = [ + "proc-macro2", + "quote", + "structmeta-derive", + "syn 2.0.98", +] + +[[package]] +name = "structmeta-derive" +version = "0.3.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "152a0b65a590ff6c3da95cabe2353ee04e6167c896b28e3b14478c2636c922fc" +dependencies = [ + "proc-macro2", + "quote", + "syn 2.0.98", +] + [[package]] name = "subprocess" version = "0.2.9" @@ -3779,6 +4034,32 @@ version = "0.5.1" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "8f50febec83f5ee1df3015341d8bd429f2d1cc62bcba7ea2076759d315084683" +[[package]] +name = "testcontainers" +version = "0.17.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "025e0ac563d543e0354d984540e749859a83dbe5c0afb8d458dc48d91cef2d6a" +dependencies = [ + "async-trait", + "bollard", + "bollard-stubs", + "bytes", + "dirs", + "docker_credential", + "futures", + "log", + "memchr", + "parse-display", + "serde", + "serde_json", + "serde_with", + "thiserror 1.0.69", + "tokio", + "tokio-stream", + "tokio-util", + "url", +] + [[package]] name = "thiserror" version = "1.0.69" @@ -3933,13 +4214,35 @@ dependencies = [ "tokio", ] +[[package]] +name = "tokio-rustls" +version = "0.25.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "775e0c0f0adb3a2f22a00c4745d728b479985fc15ee7ca6a2608388c5569860f" +dependencies = [ + "rustls 0.22.4", + "rustls-pki-types", + "tokio", +] + [[package]] name = "tokio-rustls" version = "0.26.1" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "5f6d0975eaace0cf0fcadee4e4aaa5da15b5c079146f2cffb67c113be122bf37" dependencies = [ - "rustls", + "rustls 0.23.22", + "tokio", +] + +[[package]] +name = "tokio-stream" +version = "0.1.17" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "eca58d7bba4a75707817a2c44174253f9236b2d5fbd055602e9d5c07c139a047" +dependencies = [ + "futures-core", + "pin-project-lite", "tokio", ] @@ -4580,7 +4883,7 @@ version = "0.52.0" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "33ab640c8d7e35bf8ba19b884ba838ceb4fba93a4e8c65a9059d08afcfc683d9" dependencies = [ - "windows-targets", + "windows-targets 0.52.6", ] [[package]] @@ -4591,7 +4894,7 @@ checksum = "e400001bb720a623c1c69032f8e3e4cf09984deec740f007dd2b03ec864804b0" dependencies = [ "windows-result", "windows-strings", - "windows-targets", + "windows-targets 0.52.6", ] [[package]] @@ -4600,7 +4903,7 @@ version = "0.2.0" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "1d1043d8214f791817bab27572aaa8af63732e11bf84aa21a45a78d6c317ae0e" dependencies = [ - "windows-targets", + "windows-targets 0.52.6", ] [[package]] @@ -4610,7 +4913,16 @@ source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "4cd9b125c486025df0eabcb585e62173c6c9eddcec5d117d3b6e8c30e2ee4d10" dependencies = [ "windows-result", - "windows-targets", + "windows-targets 0.52.6", +] + +[[package]] +name = "windows-sys" +version = "0.48.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "677d2418bec65e3338edb076e806bc1ec15693c5d0104683f2efe857f61056a9" +dependencies = [ + "windows-targets 0.48.5", ] [[package]] @@ -4619,7 +4931,7 @@ version = "0.52.0" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "282be5f36a8ce781fad8c8ae18fa3f9beff57ec1b52cb3de0789201425d9a33d" dependencies = [ - "windows-targets", + "windows-targets 0.52.6", ] [[package]] @@ -4628,7 +4940,22 @@ version = "0.59.0" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "1e38bc4d79ed67fd075bcc251a1c39b32a1776bbe92e5bef1f0bf1f8c531853b" dependencies = [ - "windows-targets", + "windows-targets 0.52.6", +] + +[[package]] +name = "windows-targets" +version = "0.48.5" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "9a2fa6e2155d7247be68c096456083145c183cbbbc2764150dda45a87197940c" +dependencies = [ + "windows_aarch64_gnullvm 0.48.5", + "windows_aarch64_msvc 0.48.5", + "windows_i686_gnu 0.48.5", + "windows_i686_msvc 0.48.5", + "windows_x86_64_gnu 0.48.5", + "windows_x86_64_gnullvm 0.48.5", + "windows_x86_64_msvc 0.48.5", ] [[package]] @@ -4637,28 +4964,46 @@ version = "0.52.6" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "9b724f72796e036ab90c1021d4780d4d3d648aca59e491e6b98e725b84e99973" dependencies = [ - "windows_aarch64_gnullvm", - "windows_aarch64_msvc", - "windows_i686_gnu", + "windows_aarch64_gnullvm 0.52.6", + "windows_aarch64_msvc 0.52.6", + "windows_i686_gnu 0.52.6", "windows_i686_gnullvm", - "windows_i686_msvc", - "windows_x86_64_gnu", - "windows_x86_64_gnullvm", - "windows_x86_64_msvc", + "windows_i686_msvc 0.52.6", + "windows_x86_64_gnu 0.52.6", + "windows_x86_64_gnullvm 0.52.6", + "windows_x86_64_msvc 0.52.6", ] +[[package]] +name = "windows_aarch64_gnullvm" +version = "0.48.5" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "2b38e32f0abccf9987a4e3079dfb67dcd799fb61361e53e2882c3cbaf0d905d8" + [[package]] name = "windows_aarch64_gnullvm" version = "0.52.6" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "32a4622180e7a0ec044bb555404c800bc9fd9ec262ec147edd5989ccd0c02cd3" +[[package]] +name = "windows_aarch64_msvc" +version = "0.48.5" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "dc35310971f3b2dbbf3f0690a219f40e2d9afcf64f9ab7cc1be722937c26b4bc" + [[package]] name = "windows_aarch64_msvc" version = "0.52.6" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "09ec2a7bb152e2252b53fa7803150007879548bc709c039df7627cabbd05d469" +[[package]] +name = "windows_i686_gnu" +version = "0.48.5" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "a75915e7def60c94dcef72200b9a8e58e5091744960da64ec734a6c6e9b3743e" + [[package]] name = "windows_i686_gnu" version = "0.52.6" @@ -4671,24 +5016,48 @@ version = "0.52.6" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "0eee52d38c090b3caa76c563b86c3a4bd71ef1a819287c19d586d7334ae8ed66" +[[package]] +name = "windows_i686_msvc" +version = "0.48.5" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "8f55c233f70c4b27f66c523580f78f1004e8b5a8b659e05a4eb49d4166cca406" + [[package]] name = "windows_i686_msvc" version = "0.52.6" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "240948bc05c5e7c6dabba28bf89d89ffce3e303022809e73deaefe4f6ec56c66" +[[package]] +name = "windows_x86_64_gnu" +version = "0.48.5" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "53d40abd2583d23e4718fddf1ebec84dbff8381c07cae67ff7768bbf19c6718e" + [[package]] name = "windows_x86_64_gnu" version = "0.52.6" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "147a5c80aabfbf0c7d901cb5895d1de30ef2907eb21fbbab29ca94c5b08b1a78" +[[package]] +name = "windows_x86_64_gnullvm" +version = "0.48.5" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "0b7b52767868a23d5bab768e390dc5f5c55825b6d30b86c844ff2dc7414044cc" + [[package]] name = "windows_x86_64_gnullvm" version = "0.52.6" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "24d5b23dc417412679681396f2b49f3de8c1473deb516bd34410872eff51ed0d" +[[package]] +name = "windows_x86_64_msvc" +version = "0.48.5" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "ed94fce61571a4006852b7389a063ab983c02eb1bb37b47f8272ce92d06d9538" + [[package]] name = "windows_x86_64_msvc" version = "0.52.6" diff --git a/cSpell.json b/cSpell.json index a21e69b9f..b1e9a5e95 100644 --- a/cSpell.json +++ b/cSpell.json @@ -155,6 +155,7 @@ "taiki", "tdyne", "tempfile", + "testcontainers", "thiserror", "tlsv", "Torrentstorm", diff --git a/packages/tracker-core/Cargo.toml b/packages/tracker-core/Cargo.toml index 96505a7ba..46807a534 100644 --- a/packages/tracker-core/Cargo.toml +++ b/packages/tracker-core/Cargo.toml @@ -3,7 +3,6 @@ description = "A library with the core functionality needed to implement a BitTo keywords = ["api", "bittorrent", "core", "library", "tracker"] name = "bittorrent-tracker-core" readme = "README.md" - authors.workspace = true documentation.workspace = true edition.workspace = true @@ -27,7 +26,13 @@ rand = "0" serde = { version = "1", features = ["derive"] } serde_json = { version = "1", features = ["preserve_order"] } thiserror = "2" -tokio = { version = "1", features = ["macros", "net", "rt-multi-thread", "signal", "sync"] } +tokio = { version = "1", features = [ + "macros", + "net", + "rt-multi-thread", + "signal", + "sync", +] } torrust-tracker-clock = { version = "3.0.0-develop", path = "../clock" } torrust-tracker-configuration = { version = "3.0.0-develop", path = "../configuration" } torrust-tracker-located-error = { version = "3.0.0-develop", path = "../located-error" } @@ -40,3 +45,5 @@ local-ip-address = "0" mockall = "0" torrust-tracker-api-client = { version = "3.0.0-develop", path = "../tracker-api-client" } torrust-tracker-test-helpers = { version = "3.0.0-develop", path = "../test-helpers" } +testcontainers = "0.17.0" +url = "2.5.4" diff --git a/packages/tracker-core/src/databases/driver/mysql.rs b/packages/tracker-core/src/databases/driver/mysql.rs index 69fa1240e..a30d75b90 100644 --- a/packages/tracker-core/src/databases/driver/mysql.rs +++ b/packages/tracker-core/src/databases/driver/mysql.rs @@ -252,3 +252,248 @@ impl Database for Mysql { Ok(1) } } + +#[cfg(test)] +mod tests { + /* + We run a MySQL container and run all the tests against the same container and database. + + The `Database`` trait is very simple and we only have one driver that needs + a container. In the future we might want to use different approaches like: + + - https://github.com/testcontainers/testcontainers-rs/issues/707 + - https://www.infinyon.com/blog/2021/04/rust-custom-test-harness/ + - https://github.com/torrust/torrust-tracker/blob/develop/src/bin/e2e_tests_runner.rs + + If we increase the number of methods or the number or drivers. + */ + use std::time::Duration; + + use testcontainers::runners::AsyncRunner; + use testcontainers::{ContainerAsync, GenericImage}; + use torrust_tracker_configuration::Core; + + use super::Mysql; + use crate::databases::Database; + + #[derive(Debug, Default)] + struct StoppedMysqlContainer {} + + impl StoppedMysqlContainer { + async fn run(self, config: &MysqlConfiguration) -> Result> { + let container = GenericImage::new("mysql", "8.0") + .with_env_var("MYSQL_ROOT_PASSWORD", config.db_root_password.clone()) + .with_env_var("MYSQL_DATABASE", config.database.clone()) + .with_env_var("MYSQL_ROOT_HOST", "%") + .with_exposed_port(config.internal_port) + // todo: this doesn't work + //.with_wait_for(WaitFor::message_on_stdout("ready for connections")) + .start() + .await?; + + Ok(RunningMysqlContainer::new(container, config.internal_port)) + } + } + + struct RunningMysqlContainer { + container: ContainerAsync, + internal_port: u16, + } + + impl RunningMysqlContainer { + fn new(container: ContainerAsync, internal_port: u16) -> Self { + Self { + container, + internal_port, + } + } + + async fn stop(self) { + self.container.stop().await.unwrap(); + } + + async fn get_host(&self) -> url::Host { + self.container.get_host().await.unwrap() + } + + async fn get_host_port_ipv4(&self) -> u16 { + self.container.get_host_port_ipv4(self.internal_port).await.unwrap() + } + } + + impl Default for MysqlConfiguration { + fn default() -> Self { + Self { + internal_port: 3306, + database: "torrust_tracker_test".to_string(), + db_user: "root".to_string(), + db_root_password: "test".to_string(), + } + } + } + + struct MysqlConfiguration { + pub internal_port: u16, + pub database: String, + pub db_user: String, + pub db_root_password: String, + } + + fn core_configuration(host: &url::Host, port: u16, mysql_configuration: &MysqlConfiguration) -> Core { + let mut config = Core::default(); + + let database = mysql_configuration.database.clone(); + let db_user = mysql_configuration.db_user.clone(); + let db_password = mysql_configuration.db_root_password.clone(); + + config.database.path = format!("mysql://{db_user}:{db_password}@{host}:{port}/{database}"); + + config + } + + fn initialize_driver(config: &Core) -> Mysql { + Mysql::new(&config.database.path).unwrap() + } + + async fn create_database_tables(driver: &Mysql) -> Result<(), Box> { + for _ in 0..5 { + if driver.create_database_tables().is_ok() { + return Ok(()); + } + tokio::time::sleep(Duration::from_secs(2)).await; + } + Err("MySQL is not ready after retries.".into()) + } + + #[tokio::test] + async fn run() -> Result<(), Box> { + if std::env::var("TORRUST_TRACKER_CORE_RUN_MYSQL_DRIVER_TEST").is_err() { + println!("Skipping the MySQL driver tests."); + return Ok(()); + } + + let mysql_configuration = MysqlConfiguration::default(); + + let stopped_mysql_container = StoppedMysqlContainer::default(); + + let mysql_container = stopped_mysql_container.run(&mysql_configuration).await.unwrap(); + + let host = mysql_container.get_host().await; + let port = mysql_container.get_host_port_ipv4().await; + + let config = core_configuration(&host, port, &mysql_configuration); + + let driver = initialize_driver(&config); + + // Since the interface is very simple and there are no conflicts between + // tests, we share the same database. If we want to isolate the tests in + // the future, we can create a new database for each test. + create_database_tables(&driver).await?; + + // todo: truncate tables otherwise they will increase in size over time. + // That's not a problem on CI when the database is always newly created. + + handling_torrent_persistence::it_should_save_and_load_persistent_torrents(&driver); + + // Permanent keys + //handling_authentication_keys::it_should_save_and_load_permanent_authentication_keys(&driver); + //handling_authentication_keys::it_should_remove_a_permanent_authentication_key(&driver); + + // Expiring keys + handling_authentication_keys::it_should_save_and_load_expiring_authentication_keys(&driver); + //handling_authentication_keys::it_should_remove_an_expiring_authentication_key(&driver); + + driver.drop_database_tables().unwrap(); + + mysql_container.stop().await; + + Ok(()) + } + + mod handling_torrent_persistence { + + use crate::core_tests::sample_info_hash; + use crate::databases::Database; + + pub fn it_should_save_and_load_persistent_torrents(driver: &impl Database) { + let infohash = sample_info_hash(); + + let number_of_downloads = 1; + + driver.save_persistent_torrent(&infohash, number_of_downloads).unwrap(); + + let torrents = driver.load_persistent_torrents().unwrap(); + + assert_eq!(torrents.len(), 1); + assert_eq!(torrents.get(&infohash), Some(number_of_downloads).as_ref()); + } + } + + mod handling_authentication_keys { + + use std::time::Duration; + + use crate::authentication::key::generate_key; + use crate::databases::Database; + + /*pub fn it_should_save_and_load_permanent_authentication_keys(driver: &impl Database) { + // Add a new permanent key + let peer_key = generate_permanent_key(); + driver.add_key_to_keys(&peer_key).unwrap(); + + // Get the key back + let stored_peer_key = driver.get_key_from_keys(&peer_key.key()).unwrap().unwrap(); + + assert_eq!(stored_peer_key, peer_key); + }*/ + + pub fn it_should_save_and_load_expiring_authentication_keys(driver: &impl Database) { + // Add a new expiring key + let peer_key = generate_key(Some(Duration::from_secs(120))); + driver.add_key_to_keys(&peer_key).unwrap(); + + // Get the key back + let stored_peer_key = driver.get_key_from_keys(&peer_key.key()).unwrap().unwrap(); + + /* todo: + + The expiration time recovered from the database is not the same + as the one we set. It includes a small offset (nanoseconds). + + left: PeerKey { key: Key("7HP1NslpuQn6kLVAgAF4nFpnZNSQ4hrx"), valid_until: Some(1739182308s) } + right: PeerKey { key: Key("7HP1NslpuQn6kLVAgAF4nFpnZNSQ4hrx"), valid_until: Some(1739182308.603691299s) + + */ + + assert_eq!(stored_peer_key.key(), peer_key.key()); + assert_eq!( + stored_peer_key.valid_until.unwrap().as_secs(), + peer_key.valid_until.unwrap().as_secs() + ); + } + + /*pub fn it_should_remove_a_permanent_authentication_key(driver: &impl Database) { + let peer_key = generate_permanent_key(); + + // Add a new key + driver.add_key_to_keys(&peer_key).unwrap(); + + // Remove the key + driver.remove_key_from_keys(&peer_key.key()).unwrap(); + + assert!(driver.get_key_from_keys(&peer_key.key()).unwrap().is_none()); + }*/ + + /*pub fn it_should_remove_an_expiring_authentication_key(driver: &impl Database) { + let peer_key = generate_key(Some(Duration::from_secs(120))); + + // Add a new key + driver.add_key_to_keys(&peer_key).unwrap(); + + // Remove the key + driver.remove_key_from_keys(&peer_key.key()).unwrap(); + + assert!(driver.get_key_from_keys(&peer_key.key()).unwrap().is_none()); + }*/ + } +} From 7ddacdc73d51a3365ce8068c9c8298a853e3b24d Mon Sep 17 00:00:00 2001 From: Jose Celano Date: Mon, 10 Feb 2025 19:02:13 +0000 Subject: [PATCH 242/802] test: [#1251] unify test runner for sqlite and mysql drivers In the tracker-core package. --- packages/tracker-core/README.md | 8 ++ .../tracker-core/src/databases/driver/mod.rs | 130 +++++++++++++++++ .../src/databases/driver/mysql.rs | 133 ++---------------- .../src/databases/driver/sqlite.rs | 121 +++------------- 4 files changed, 172 insertions(+), 220 deletions(-) diff --git a/packages/tracker-core/README.md b/packages/tracker-core/README.md index e36a6f4be..dfb57f304 100644 --- a/packages/tracker-core/README.md +++ b/packages/tracker-core/README.md @@ -12,6 +12,14 @@ You usually don’t need to use this library directly. Instead, you should use t ## Testing +Run tests including tests for MySQL driver: + +```console +TORRUST_TRACKER_CORE_RUN_MYSQL_DRIVER_TEST=true cargo test +``` + +> NOTE: MySQL driver requires docker to run. We don't run them by default because we don't want to run them when we build container images. The Torrust Tracker container build runs unit tests for all dependencies, including this library. + Show coverage report: ```console diff --git a/packages/tracker-core/src/databases/driver/mod.rs b/packages/tracker-core/src/databases/driver/mod.rs index bdef7fcee..30888375e 100644 --- a/packages/tracker-core/src/databases/driver/mod.rs +++ b/packages/tracker-core/src/databases/driver/mod.rs @@ -83,3 +83,133 @@ pub fn build(driver: &Driver, db_path: &str) -> Result, Error> Ok(database) } + +#[cfg(test)] +mod tests { + use std::sync::Arc; + use std::time::Duration; + + use crate::databases::Database; + + pub async fn run_tests(driver: &Arc>) { + // Since the interface is very simple and there are no conflicts between + // tests, we share the same database. If we want to isolate the tests in + // the future, we can create a new database for each test. + create_database_tables(driver).await.unwrap(); + + // todo: truncate tables otherwise they will increase in size over time. + // That's not a problem on CI when the database is always newly created. + + handling_torrent_persistence::it_should_save_and_load_persistent_torrents(driver); + + // Permanent keys + //handling_authentication_keys::it_should_save_and_load_permanent_authentication_keys(&driver); + //handling_authentication_keys::it_should_remove_a_permanent_authentication_key(&driver); + + // Expiring keys + handling_authentication_keys::it_should_save_and_load_expiring_authentication_keys(driver); + //handling_authentication_keys::it_should_remove_an_expiring_authentication_key(&driver); + + driver.drop_database_tables().unwrap(); + } + + async fn create_database_tables(driver: &Arc>) -> Result<(), Box> { + for _ in 0..5 { + if driver.create_database_tables().is_ok() { + return Ok(()); + } + tokio::time::sleep(Duration::from_secs(2)).await; + } + Err("MySQL is not ready after retries.".into()) + } + + mod handling_torrent_persistence { + + use std::sync::Arc; + + use crate::core_tests::sample_info_hash; + use crate::databases::Database; + + pub fn it_should_save_and_load_persistent_torrents(driver: &Arc>) { + let infohash = sample_info_hash(); + + let number_of_downloads = 1; + + driver.save_persistent_torrent(&infohash, number_of_downloads).unwrap(); + + let torrents = driver.load_persistent_torrents().unwrap(); + + assert_eq!(torrents.len(), 1); + assert_eq!(torrents.get(&infohash), Some(number_of_downloads).as_ref()); + } + } + + mod handling_authentication_keys { + + use std::sync::Arc; + use std::time::Duration; + + use crate::authentication::key::generate_key; + use crate::databases::Database; + + /*pub fn it_should_save_and_load_permanent_authentication_keys(driver: &Arc>) { + // Add a new permanent key + let peer_key = generate_permanent_key(); + driver.add_key_to_keys(&peer_key).unwrap(); + + // Get the key back + let stored_peer_key = driver.get_key_from_keys(&peer_key.key()).unwrap().unwrap(); + + assert_eq!(stored_peer_key, peer_key); + }*/ + + pub fn it_should_save_and_load_expiring_authentication_keys(driver: &Arc>) { + // Add a new expiring key + let peer_key = generate_key(Some(Duration::from_secs(120))); + driver.add_key_to_keys(&peer_key).unwrap(); + + // Get the key back + let stored_peer_key = driver.get_key_from_keys(&peer_key.key()).unwrap().unwrap(); + + /* todo: + + The expiration time recovered from the database is not the same + as the one we set. It includes a small offset (nanoseconds). + + left: PeerKey { key: Key("7HP1NslpuQn6kLVAgAF4nFpnZNSQ4hrx"), valid_until: Some(1739182308s) } + right: PeerKey { key: Key("7HP1NslpuQn6kLVAgAF4nFpnZNSQ4hrx"), valid_until: Some(1739182308.603691299s) + + */ + + assert_eq!(stored_peer_key.key(), peer_key.key()); + assert_eq!( + stored_peer_key.valid_until.unwrap().as_secs(), + peer_key.valid_until.unwrap().as_secs() + ); + } + + /*pub fn it_should_remove_a_permanent_authentication_key(driver: &Arc>) { + let peer_key = generate_permanent_key(); + + // Add a new key + driver.add_key_to_keys(&peer_key).unwrap(); + + // Remove the key + driver.remove_key_from_keys(&peer_key.key()).unwrap(); + + assert!(driver.get_key_from_keys(&peer_key.key()).unwrap().is_none()); + }*/ + + /*pub fn it_should_remove_an_expiring_authentication_key(driver: &Arc>) { + let peer_key = generate_key(Some(Duration::from_secs(120))); + + // Add a new key + driver.add_key_to_keys(&peer_key).unwrap(); + + // Remove the key + driver.remove_key_from_keys(&peer_key.key()).unwrap(); + + assert!(driver.get_key_from_keys(&peer_key.key()).unwrap().is_none()); + }*/ + } +} diff --git a/packages/tracker-core/src/databases/driver/mysql.rs b/packages/tracker-core/src/databases/driver/mysql.rs index a30d75b90..39ef8f55b 100644 --- a/packages/tracker-core/src/databases/driver/mysql.rs +++ b/packages/tracker-core/src/databases/driver/mysql.rs @@ -255,9 +255,15 @@ impl Database for Mysql { #[cfg(test)] mod tests { + use std::sync::Arc; + /* We run a MySQL container and run all the tests against the same container and database. + Test for this driver are executed with: + + `TORRUST_TRACKER_CORE_RUN_MYSQL_DRIVER_TEST=true cargo test` + The `Database`` trait is very simple and we only have one driver that needs a container. In the future we might want to use different approaches like: @@ -267,13 +273,12 @@ mod tests { If we increase the number of methods or the number or drivers. */ - use std::time::Duration; - use testcontainers::runners::AsyncRunner; use testcontainers::{ContainerAsync, GenericImage}; use torrust_tracker_configuration::Core; use super::Mysql; + use crate::databases::driver::tests::run_tests; use crate::databases::Database; #[derive(Debug, Default)] @@ -351,22 +356,13 @@ mod tests { config } - fn initialize_driver(config: &Core) -> Mysql { - Mysql::new(&config.database.path).unwrap() - } - - async fn create_database_tables(driver: &Mysql) -> Result<(), Box> { - for _ in 0..5 { - if driver.create_database_tables().is_ok() { - return Ok(()); - } - tokio::time::sleep(Duration::from_secs(2)).await; - } - Err("MySQL is not ready after retries.".into()) + fn initialize_driver(config: &Core) -> Arc> { + let driver: Arc> = Arc::new(Box::new(Mysql::new(&config.database.path).unwrap())); + driver } #[tokio::test] - async fn run() -> Result<(), Box> { + async fn run_mysql_driver_tests() -> Result<(), Box> { if std::env::var("TORRUST_TRACKER_CORE_RUN_MYSQL_DRIVER_TEST").is_err() { println!("Skipping the MySQL driver tests."); return Ok(()); @@ -385,115 +381,10 @@ mod tests { let driver = initialize_driver(&config); - // Since the interface is very simple and there are no conflicts between - // tests, we share the same database. If we want to isolate the tests in - // the future, we can create a new database for each test. - create_database_tables(&driver).await?; - - // todo: truncate tables otherwise they will increase in size over time. - // That's not a problem on CI when the database is always newly created. - - handling_torrent_persistence::it_should_save_and_load_persistent_torrents(&driver); - - // Permanent keys - //handling_authentication_keys::it_should_save_and_load_permanent_authentication_keys(&driver); - //handling_authentication_keys::it_should_remove_a_permanent_authentication_key(&driver); - - // Expiring keys - handling_authentication_keys::it_should_save_and_load_expiring_authentication_keys(&driver); - //handling_authentication_keys::it_should_remove_an_expiring_authentication_key(&driver); - - driver.drop_database_tables().unwrap(); + run_tests(&driver).await; mysql_container.stop().await; Ok(()) } - - mod handling_torrent_persistence { - - use crate::core_tests::sample_info_hash; - use crate::databases::Database; - - pub fn it_should_save_and_load_persistent_torrents(driver: &impl Database) { - let infohash = sample_info_hash(); - - let number_of_downloads = 1; - - driver.save_persistent_torrent(&infohash, number_of_downloads).unwrap(); - - let torrents = driver.load_persistent_torrents().unwrap(); - - assert_eq!(torrents.len(), 1); - assert_eq!(torrents.get(&infohash), Some(number_of_downloads).as_ref()); - } - } - - mod handling_authentication_keys { - - use std::time::Duration; - - use crate::authentication::key::generate_key; - use crate::databases::Database; - - /*pub fn it_should_save_and_load_permanent_authentication_keys(driver: &impl Database) { - // Add a new permanent key - let peer_key = generate_permanent_key(); - driver.add_key_to_keys(&peer_key).unwrap(); - - // Get the key back - let stored_peer_key = driver.get_key_from_keys(&peer_key.key()).unwrap().unwrap(); - - assert_eq!(stored_peer_key, peer_key); - }*/ - - pub fn it_should_save_and_load_expiring_authentication_keys(driver: &impl Database) { - // Add a new expiring key - let peer_key = generate_key(Some(Duration::from_secs(120))); - driver.add_key_to_keys(&peer_key).unwrap(); - - // Get the key back - let stored_peer_key = driver.get_key_from_keys(&peer_key.key()).unwrap().unwrap(); - - /* todo: - - The expiration time recovered from the database is not the same - as the one we set. It includes a small offset (nanoseconds). - - left: PeerKey { key: Key("7HP1NslpuQn6kLVAgAF4nFpnZNSQ4hrx"), valid_until: Some(1739182308s) } - right: PeerKey { key: Key("7HP1NslpuQn6kLVAgAF4nFpnZNSQ4hrx"), valid_until: Some(1739182308.603691299s) - - */ - - assert_eq!(stored_peer_key.key(), peer_key.key()); - assert_eq!( - stored_peer_key.valid_until.unwrap().as_secs(), - peer_key.valid_until.unwrap().as_secs() - ); - } - - /*pub fn it_should_remove_a_permanent_authentication_key(driver: &impl Database) { - let peer_key = generate_permanent_key(); - - // Add a new key - driver.add_key_to_keys(&peer_key).unwrap(); - - // Remove the key - driver.remove_key_from_keys(&peer_key.key()).unwrap(); - - assert!(driver.get_key_from_keys(&peer_key.key()).unwrap().is_none()); - }*/ - - /*pub fn it_should_remove_an_expiring_authentication_key(driver: &impl Database) { - let peer_key = generate_key(Some(Duration::from_secs(120))); - - // Add a new key - driver.add_key_to_keys(&peer_key).unwrap(); - - // Remove the key - driver.remove_key_from_keys(&peer_key.key()).unwrap(); - - assert!(driver.get_key_from_keys(&peer_key.key()).unwrap().is_none()); - }*/ - } } diff --git a/packages/tracker-core/src/databases/driver/sqlite.rs b/packages/tracker-core/src/databases/driver/sqlite.rs index e04cf6110..37f5254a5 100644 --- a/packages/tracker-core/src/databases/driver/sqlite.rs +++ b/packages/tracker-core/src/databases/driver/sqlite.rs @@ -292,112 +292,35 @@ impl Database for Sqlite { #[cfg(test)] mod tests { - mod the_sqlite_driver { - use torrust_tracker_configuration::Core; - use torrust_tracker_test_helpers::configuration::ephemeral_sqlite_database; - - use crate::databases::driver::sqlite::Sqlite; - use crate::databases::Database; - - fn initialize_driver_and_database() -> Sqlite { - let config = ephemeral_configuration(); - let driver = Sqlite::new(&config.database.path).unwrap(); - driver.create_database_tables().unwrap(); - driver - } - - fn ephemeral_configuration() -> Core { - let mut config = Core::default(); - let temp_file = ephemeral_sqlite_database(); - temp_file.to_str().unwrap().clone_into(&mut config.database.path); - config - } - - mod handling_torrent_persistence { - - use crate::core_tests::sample_info_hash; - use crate::databases::driver::sqlite::tests::the_sqlite_driver::initialize_driver_and_database; - use crate::databases::Database; - - #[test] - fn it_should_save_and_load_persistent_torrents() { - let driver = initialize_driver_and_database(); - - let infohash = sample_info_hash(); - - let number_of_downloads = 1; - - driver.save_persistent_torrent(&infohash, number_of_downloads).unwrap(); - - let torrents = driver.load_persistent_torrents().unwrap(); - - assert_eq!(torrents.len(), 1); - assert_eq!(torrents.get(&infohash), Some(number_of_downloads).as_ref()); - } - } - - mod handling_authentication_keys { - use std::time::Duration; - - use crate::authentication::key::{generate_key, generate_permanent_key}; - use crate::databases::driver::sqlite::tests::the_sqlite_driver::initialize_driver_and_database; - use crate::databases::Database; + use std::sync::Arc; - #[test] - fn it_should_save_and_load_permanent_authentication_keys() { - let driver = initialize_driver_and_database(); + use torrust_tracker_configuration::Core; + use torrust_tracker_test_helpers::configuration::ephemeral_sqlite_database; - // Add a new permanent key - let peer_key = generate_permanent_key(); - driver.add_key_to_keys(&peer_key).unwrap(); + use crate::databases::driver::sqlite::Sqlite; + use crate::databases::driver::tests::run_tests; + use crate::databases::Database; - // Get the key back - let stored_peer_key = driver.get_key_from_keys(&peer_key.key()).unwrap().unwrap(); - - assert_eq!(stored_peer_key, peer_key); - } - #[test] - fn it_should_save_and_load_expiring_authentication_keys() { - let driver = initialize_driver_and_database(); - - // Add a new expiring key - let peer_key = generate_key(Some(Duration::from_secs(120))); - driver.add_key_to_keys(&peer_key).unwrap(); - - // Get the key back - let stored_peer_key = driver.get_key_from_keys(&peer_key.key()).unwrap().unwrap(); - - /* todo: - - The expiration time recovered from the database is not the same - as the one we set. It includes a small offset (nanoseconds). - - left: PeerKey { key: Key("7HP1NslpuQn6kLVAgAF4nFpnZNSQ4hrx"), valid_until: Some(1739182308s) } - right: PeerKey { key: Key("7HP1NslpuQn6kLVAgAF4nFpnZNSQ4hrx"), valid_until: Some(1739182308.603691299s) - - */ - - assert_eq!(stored_peer_key.key(), peer_key.key()); - assert_eq!( - stored_peer_key.valid_until.unwrap().as_secs(), - peer_key.valid_until.unwrap().as_secs() - ); - } + fn ephemeral_configuration() -> Core { + let mut config = Core::default(); + let temp_file = ephemeral_sqlite_database(); + temp_file.to_str().unwrap().clone_into(&mut config.database.path); + config + } - #[test] - fn it_should_remove_an_authentication_key() { - let driver = initialize_driver_and_database(); + fn initialize_driver(config: &Core) -> Arc> { + let driver: Arc> = Arc::new(Box::new(Sqlite::new(&config.database.path).unwrap())); + driver + } - let peer_key = generate_key(None); + #[tokio::test] + async fn run_sqlite_driver_tests() -> Result<(), Box> { + let config = ephemeral_configuration(); - // Add a new key - driver.add_key_to_keys(&peer_key).unwrap(); + let driver = initialize_driver(&config); - // Remove the key - driver.remove_key_from_keys(&peer_key.key()).unwrap(); + run_tests(&driver).await; - assert!(driver.get_key_from_keys(&peer_key.key()).unwrap().is_none()); - } - } + Ok(()) } } From 595397b74d0542900ca61c585420092c86d8aee2 Mon Sep 17 00:00:00 2001 From: Jose Celano Date: Mon, 10 Feb 2025 19:36:04 +0000 Subject: [PATCH 243/802] fix: [#1257] bug 1. permanent keys can't be created in MySQL --- .../tracker-core/src/databases/driver/mod.rs | 9 +++++---- .../src/databases/driver/mysql.rs | 20 +++++++++---------- 2 files changed, 15 insertions(+), 14 deletions(-) diff --git a/packages/tracker-core/src/databases/driver/mod.rs b/packages/tracker-core/src/databases/driver/mod.rs index 30888375e..7b42475a6 100644 --- a/packages/tracker-core/src/databases/driver/mod.rs +++ b/packages/tracker-core/src/databases/driver/mod.rs @@ -95,6 +95,7 @@ mod tests { // Since the interface is very simple and there are no conflicts between // tests, we share the same database. If we want to isolate the tests in // the future, we can create a new database for each test. + create_database_tables(driver).await.unwrap(); // todo: truncate tables otherwise they will increase in size over time. @@ -103,7 +104,7 @@ mod tests { handling_torrent_persistence::it_should_save_and_load_persistent_torrents(driver); // Permanent keys - //handling_authentication_keys::it_should_save_and_load_permanent_authentication_keys(&driver); + handling_authentication_keys::it_should_save_and_load_permanent_authentication_keys(driver); //handling_authentication_keys::it_should_remove_a_permanent_authentication_key(&driver); // Expiring keys @@ -149,10 +150,10 @@ mod tests { use std::sync::Arc; use std::time::Duration; - use crate::authentication::key::generate_key; + use crate::authentication::key::{generate_key, generate_permanent_key}; use crate::databases::Database; - /*pub fn it_should_save_and_load_permanent_authentication_keys(driver: &Arc>) { + pub fn it_should_save_and_load_permanent_authentication_keys(driver: &Arc>) { // Add a new permanent key let peer_key = generate_permanent_key(); driver.add_key_to_keys(&peer_key).unwrap(); @@ -161,7 +162,7 @@ mod tests { let stored_peer_key = driver.get_key_from_keys(&peer_key.key()).unwrap().unwrap(); assert_eq!(stored_peer_key, peer_key); - }*/ + } pub fn it_should_save_and_load_expiring_authentication_keys(driver: &Arc>) { // Add a new expiring key diff --git a/packages/tracker-core/src/databases/driver/mysql.rs b/packages/tracker-core/src/databases/driver/mysql.rs index 39ef8f55b..976a26d49 100644 --- a/packages/tracker-core/src/databases/driver/mysql.rs +++ b/packages/tracker-core/src/databases/driver/mysql.rs @@ -229,16 +229,16 @@ impl Database for Mysql { fn add_key_to_keys(&self, auth_key: &authentication::PeerKey) -> Result { let mut conn = self.pool.get().map_err(|e| (e, DRIVER))?; - let key = auth_key.key.to_string(); - let valid_until = match auth_key.valid_until { - Some(valid_until) => valid_until.as_secs().to_string(), - None => todo!(), - }; - - conn.exec_drop( - "INSERT INTO `keys` (`key`, valid_until) VALUES (:key, :valid_until)", - params! { key, valid_until }, - )?; + match auth_key.valid_until { + Some(valid_until) => conn.exec_drop( + "INSERT INTO `keys` (`key`, valid_until) VALUES (:key, :valid_until)", + params! { "key" => auth_key.key.to_string(), "valid_until" => valid_until.as_secs().to_string() }, + )?, + None => conn.exec_drop( + "INSERT INTO `keys` (`key`) VALUES (:key)", + params! { "key" => auth_key.key.to_string() }, + )?, + } Ok(1) } From b94179dd1f742fd429b6765e3a04936cc3373331 Mon Sep 17 00:00:00 2001 From: Jose Celano Date: Mon, 10 Feb 2025 19:41:03 +0000 Subject: [PATCH 244/802] fix: [#1257] bug 2. Auth keys can't be removed in MySQL --- packages/tracker-core/README.md | 4 ++-- .../tracker-core/src/databases/driver/mod.rs | 20 +++++++++++++------ .../src/databases/driver/mysql.rs | 2 +- 3 files changed, 17 insertions(+), 9 deletions(-) diff --git a/packages/tracker-core/README.md b/packages/tracker-core/README.md index dfb57f304..f80243d29 100644 --- a/packages/tracker-core/README.md +++ b/packages/tracker-core/README.md @@ -23,13 +23,13 @@ TORRUST_TRACKER_CORE_RUN_MYSQL_DRIVER_TEST=true cargo test Show coverage report: ```console -cargo +stable llvm-cov +TORRUST_TRACKER_CORE_RUN_MYSQL_DRIVER_TEST=true cargo +stable llvm-cov ``` Export coverage report to `lcov` format: ```console -cargo +stable llvm-cov --lcov --output-path=./.coverage/lcov.info +TORRUST_TRACKER_CORE_RUN_MYSQL_DRIVER_TEST=true cargo +stable llvm-cov --lcov --output-path=./.coverage/lcov.info ``` If you use Visual Studio Code, you can use the [Coverage Gutters](https://marketplace.visualstudio.com/items?itemName=semasquare.vscode-coverage-gutters) extension to view the coverage lines. diff --git a/packages/tracker-core/src/databases/driver/mod.rs b/packages/tracker-core/src/databases/driver/mod.rs index 7b42475a6..1f0b54a57 100644 --- a/packages/tracker-core/src/databases/driver/mod.rs +++ b/packages/tracker-core/src/databases/driver/mod.rs @@ -101,15 +101,23 @@ mod tests { // todo: truncate tables otherwise they will increase in size over time. // That's not a problem on CI when the database is always newly created. + // Persistent torrents (stats) + handling_torrent_persistence::it_should_save_and_load_persistent_torrents(driver); + // Authentication keys (for private trackers) + // Permanent keys handling_authentication_keys::it_should_save_and_load_permanent_authentication_keys(driver); - //handling_authentication_keys::it_should_remove_a_permanent_authentication_key(&driver); + handling_authentication_keys::it_should_remove_a_permanent_authentication_key(driver); // Expiring keys handling_authentication_keys::it_should_save_and_load_expiring_authentication_keys(driver); - //handling_authentication_keys::it_should_remove_an_expiring_authentication_key(&driver); + handling_authentication_keys::it_should_remove_an_expiring_authentication_key(driver); + + // Whitelist (for listed trackers) + + // todo driver.drop_database_tables().unwrap(); } @@ -189,7 +197,7 @@ mod tests { ); } - /*pub fn it_should_remove_a_permanent_authentication_key(driver: &Arc>) { + pub fn it_should_remove_a_permanent_authentication_key(driver: &Arc>) { let peer_key = generate_permanent_key(); // Add a new key @@ -199,9 +207,9 @@ mod tests { driver.remove_key_from_keys(&peer_key.key()).unwrap(); assert!(driver.get_key_from_keys(&peer_key.key()).unwrap().is_none()); - }*/ + } - /*pub fn it_should_remove_an_expiring_authentication_key(driver: &Arc>) { + pub fn it_should_remove_an_expiring_authentication_key(driver: &Arc>) { let peer_key = generate_key(Some(Duration::from_secs(120))); // Add a new key @@ -211,6 +219,6 @@ mod tests { driver.remove_key_from_keys(&peer_key.key()).unwrap(); assert!(driver.get_key_from_keys(&peer_key.key()).unwrap().is_none()); - }*/ + } } } diff --git a/packages/tracker-core/src/databases/driver/mysql.rs b/packages/tracker-core/src/databases/driver/mysql.rs index 976a26d49..a739832d6 100644 --- a/packages/tracker-core/src/databases/driver/mysql.rs +++ b/packages/tracker-core/src/databases/driver/mysql.rs @@ -247,7 +247,7 @@ impl Database for Mysql { fn remove_key_from_keys(&self, key: &Key) -> Result { let mut conn = self.pool.get().map_err(|e| (e, DRIVER))?; - conn.exec_drop("DELETE FROM `keys` WHERE key = :key", params! { "key" => key.to_string() })?; + conn.exec_drop("DELETE FROM `keys` WHERE `key` = :key", params! { "key" => key.to_string() })?; Ok(1) } From 613efb26b7f020169bceb03d86300dae2d24ed10 Mon Sep 17 00:00:00 2001 From: Jose Celano Date: Mon, 10 Feb 2025 20:30:19 +0000 Subject: [PATCH 245/802] fix: [#1257] bug 3. Expiring auth keys ignore fractions of seconds The `Duration` of a peer Key can have gractions of seconds. However we only store seconds (integer) in the database. When comparing peer keys we should ignore the fractions. --- .../src/authentication/key/peer_key.rs | 23 +++++++++++++++++-- .../tracker-core/src/databases/driver/mod.rs | 17 ++------------ 2 files changed, 23 insertions(+), 17 deletions(-) diff --git a/packages/tracker-core/src/authentication/key/peer_key.rs b/packages/tracker-core/src/authentication/key/peer_key.rs index a3045e54e..1d2b1fadc 100644 --- a/packages/tracker-core/src/authentication/key/peer_key.rs +++ b/packages/tracker-core/src/authentication/key/peer_key.rs @@ -1,4 +1,5 @@ use std::str::FromStr; +use std::time::Duration; use derive_more::Display; use rand::distr::Alphanumeric; @@ -12,7 +13,7 @@ use super::AUTH_KEY_LENGTH; /// An authentication key which can potentially have an expiration time. /// After that time is will automatically become invalid. -#[derive(Serialize, Deserialize, Debug, Eq, PartialEq, Clone)] +#[derive(Serialize, Deserialize, Debug, Clone)] pub struct PeerKey { /// Random 32-char string. For example: `YZSl4lMZupRuOpSRC3krIKR5BPB14nrJ` pub key: Key, @@ -22,6 +23,21 @@ pub struct PeerKey { pub valid_until: Option, } +impl PartialEq for PeerKey { + fn eq(&self, other: &Self) -> bool { + // We ignore the fractions of seconds when comparing the timestamps + // because we only store the seconds in the database. + self.key == other.key + && match (&self.valid_until, &other.valid_until) { + (Some(a), Some(b)) => a.as_secs() == b.as_secs(), + (None, None) => true, + _ => false, + } + } +} + +impl Eq for PeerKey {} + impl std::fmt::Display for PeerKey { fn fmt(&self, f: &mut std::fmt::Formatter<'_>) -> std::fmt::Result { match self.expiry_time() { @@ -47,7 +63,10 @@ impl PeerKey { /// (this will naturally happen in 292.5 billion years) #[must_use] pub fn expiry_time(&self) -> Option> { - self.valid_until.map(convert_from_timestamp_to_datetime_utc) + // We remove the fractions of seconds because we only store the seconds + // in the database. + self.valid_until + .map(|valid_until| convert_from_timestamp_to_datetime_utc(Duration::from_secs(valid_until.as_secs()))) } } diff --git a/packages/tracker-core/src/databases/driver/mod.rs b/packages/tracker-core/src/databases/driver/mod.rs index 1f0b54a57..557633b81 100644 --- a/packages/tracker-core/src/databases/driver/mod.rs +++ b/packages/tracker-core/src/databases/driver/mod.rs @@ -180,21 +180,8 @@ mod tests { // Get the key back let stored_peer_key = driver.get_key_from_keys(&peer_key.key()).unwrap().unwrap(); - /* todo: - - The expiration time recovered from the database is not the same - as the one we set. It includes a small offset (nanoseconds). - - left: PeerKey { key: Key("7HP1NslpuQn6kLVAgAF4nFpnZNSQ4hrx"), valid_until: Some(1739182308s) } - right: PeerKey { key: Key("7HP1NslpuQn6kLVAgAF4nFpnZNSQ4hrx"), valid_until: Some(1739182308.603691299s) - - */ - - assert_eq!(stored_peer_key.key(), peer_key.key()); - assert_eq!( - stored_peer_key.valid_until.unwrap().as_secs(), - peer_key.valid_until.unwrap().as_secs() - ); + assert_eq!(stored_peer_key, peer_key); + assert_eq!(stored_peer_key.expiry_time(), peer_key.expiry_time()); } pub fn it_should_remove_a_permanent_authentication_key(driver: &Arc>) { From b9188c74b9d2f08994ec5caf48bfba2d89b6a525 Mon Sep 17 00:00:00 2001 From: Jose Celano Date: Mon, 10 Feb 2025 20:50:53 +0000 Subject: [PATCH 246/802] test: [#1251] add tests for whitelist is DB drivers --- packages/tracker-core/src/core_tests.rs | 11 ++++ .../tracker-core/src/databases/driver/mod.rs | 62 +++++++++++++++---- 2 files changed, 61 insertions(+), 12 deletions(-) diff --git a/packages/tracker-core/src/core_tests.rs b/packages/tracker-core/src/core_tests.rs index 53049f326..165c8790e 100644 --- a/packages/tracker-core/src/core_tests.rs +++ b/packages/tracker-core/src/core_tests.rs @@ -4,6 +4,7 @@ use std::sync::Arc; use aquatic_udp_protocol::{AnnounceEvent, NumberOfBytes, PeerId}; use bittorrent_primitives::info_hash::InfoHash; +use rand::Rng; use torrust_tracker_configuration::Configuration; #[cfg(test)] use torrust_tracker_configuration::Core; @@ -20,6 +21,16 @@ use super::torrent::repository::persisted::DatabasePersistentTorrentRepository; use super::whitelist::repository::in_memory::InMemoryWhitelist; use super::whitelist::{self}; +/// Generates a random `InfoHash`. +#[must_use] +pub fn random_info_hash() -> InfoHash { + let mut rng = rand::rng(); + let mut random_bytes = [0u8; 20]; + rng.fill(&mut random_bytes); + + InfoHash::from_bytes(&random_bytes) +} + /// # Panics /// /// Will panic if the string representation of the info hash is not a valid info hash. diff --git a/packages/tracker-core/src/databases/driver/mod.rs b/packages/tracker-core/src/databases/driver/mod.rs index 557633b81..f4e165f00 100644 --- a/packages/tracker-core/src/databases/driver/mod.rs +++ b/packages/tracker-core/src/databases/driver/mod.rs @@ -117,7 +117,10 @@ mod tests { // Whitelist (for listed trackers) - // todo + handling_the_whitelist::it_should_add_and_get_infohashes(driver); + handling_the_whitelist::it_should_remove_an_infohash_from_the_whitelist(driver); + handling_the_whitelist::it_should_fail_trying_to_add_the_same_infohash_twice(driver); + handling_the_whitelist::it_load_the_whitelist(driver); driver.drop_database_tables().unwrap(); } @@ -129,7 +132,7 @@ mod tests { } tokio::time::sleep(Duration::from_secs(2)).await; } - Err("MySQL is not ready after retries.".into()) + Err("Database is not ready after retries.".into()) } mod handling_torrent_persistence { @@ -162,22 +165,18 @@ mod tests { use crate::databases::Database; pub fn it_should_save_and_load_permanent_authentication_keys(driver: &Arc>) { - // Add a new permanent key let peer_key = generate_permanent_key(); driver.add_key_to_keys(&peer_key).unwrap(); - // Get the key back let stored_peer_key = driver.get_key_from_keys(&peer_key.key()).unwrap().unwrap(); assert_eq!(stored_peer_key, peer_key); } pub fn it_should_save_and_load_expiring_authentication_keys(driver: &Arc>) { - // Add a new expiring key let peer_key = generate_key(Some(Duration::from_secs(120))); driver.add_key_to_keys(&peer_key).unwrap(); - // Get the key back let stored_peer_key = driver.get_key_from_keys(&peer_key.key()).unwrap().unwrap(); assert_eq!(stored_peer_key, peer_key); @@ -186,11 +185,8 @@ mod tests { pub fn it_should_remove_a_permanent_authentication_key(driver: &Arc>) { let peer_key = generate_permanent_key(); - - // Add a new key driver.add_key_to_keys(&peer_key).unwrap(); - // Remove the key driver.remove_key_from_keys(&peer_key.key()).unwrap(); assert!(driver.get_key_from_keys(&peer_key.key()).unwrap().is_none()); @@ -198,14 +194,56 @@ mod tests { pub fn it_should_remove_an_expiring_authentication_key(driver: &Arc>) { let peer_key = generate_key(Some(Duration::from_secs(120))); - - // Add a new key driver.add_key_to_keys(&peer_key).unwrap(); - // Remove the key driver.remove_key_from_keys(&peer_key.key()).unwrap(); assert!(driver.get_key_from_keys(&peer_key.key()).unwrap().is_none()); } } + + mod handling_the_whitelist { + + use std::sync::Arc; + + use crate::core_tests::random_info_hash; + use crate::databases::Database; + + pub fn it_should_add_and_get_infohashes(driver: &Arc>) { + let infohash = random_info_hash(); + + driver.add_info_hash_to_whitelist(infohash).unwrap(); + + let stored_infohash = driver.get_info_hash_from_whitelist(infohash).unwrap().unwrap(); + + assert_eq!(stored_infohash, infohash); + } + + pub fn it_should_remove_an_infohash_from_the_whitelist(driver: &Arc>) { + let infohash = random_info_hash(); + driver.add_info_hash_to_whitelist(infohash).unwrap(); + + driver.remove_info_hash_from_whitelist(infohash).unwrap(); + + assert!(driver.get_info_hash_from_whitelist(infohash).unwrap().is_none()); + } + + pub fn it_should_fail_trying_to_add_the_same_infohash_twice(driver: &Arc>) { + let infohash = random_info_hash(); + + driver.add_info_hash_to_whitelist(infohash).unwrap(); + let result = driver.add_info_hash_to_whitelist(infohash); + + assert!(result.is_err()); + } + + pub fn it_load_the_whitelist(driver: &Arc>) { + let infohash = random_info_hash(); + driver.add_info_hash_to_whitelist(infohash).unwrap(); + + let whitelist = driver.load_whitelist().unwrap(); + + assert!(whitelist.contains(&infohash)); + } + } } From 700482d14c0a5e417423548a92630f9a58f1b014 Mon Sep 17 00:00:00 2001 From: Jose Celano Date: Tue, 11 Feb 2025 10:48:29 +0000 Subject: [PATCH 247/802] test: [#1251] reset database before running db driver tests --- .../tracker-core/src/databases/driver/mod.rs | 21 ++++++++++++++----- .../src/databases/driver/mysql.rs | 2 +- 2 files changed, 17 insertions(+), 6 deletions(-) diff --git a/packages/tracker-core/src/databases/driver/mod.rs b/packages/tracker-core/src/databases/driver/mod.rs index f4e165f00..07d338915 100644 --- a/packages/tracker-core/src/databases/driver/mod.rs +++ b/packages/tracker-core/src/databases/driver/mod.rs @@ -96,10 +96,7 @@ mod tests { // tests, we share the same database. If we want to isolate the tests in // the future, we can create a new database for each test. - create_database_tables(driver).await.unwrap(); - - // todo: truncate tables otherwise they will increase in size over time. - // That's not a problem on CI when the database is always newly created. + database_setup(driver).await; // Persistent torrents (stats) @@ -121,8 +118,22 @@ mod tests { handling_the_whitelist::it_should_remove_an_infohash_from_the_whitelist(driver); handling_the_whitelist::it_should_fail_trying_to_add_the_same_infohash_twice(driver); handling_the_whitelist::it_load_the_whitelist(driver); + } - driver.drop_database_tables().unwrap(); + /// It initializes the database schema. + /// + /// Since the drop SQL queries don't check if the tables already exist, + /// we have to create them first, and then drop them. + /// + /// The method to drop tables does not use "DROP TABLE IF EXISTS". We can + /// change this function when we update the `Database::drop_database_tables` + /// method to use "DROP TABLE IF EXISTS". + async fn database_setup(driver: &Arc>) { + create_database_tables(driver).await.expect("database tables creation failed"); + driver.drop_database_tables().expect("old database tables deletion failed"); + create_database_tables(driver) + .await + .expect("database tables creation from empty schema failed"); } async fn create_database_tables(driver: &Arc>) -> Result<(), Box> { diff --git a/packages/tracker-core/src/databases/driver/mysql.rs b/packages/tracker-core/src/databases/driver/mysql.rs index a739832d6..1e1e29f36 100644 --- a/packages/tracker-core/src/databases/driver/mysql.rs +++ b/packages/tracker-core/src/databases/driver/mysql.rs @@ -264,7 +264,7 @@ mod tests { `TORRUST_TRACKER_CORE_RUN_MYSQL_DRIVER_TEST=true cargo test` - The `Database`` trait is very simple and we only have one driver that needs + The `Database` trait is very simple and we only have one driver that needs a container. In the future we might want to use different approaches like: - https://github.com/testcontainers/testcontainers-rs/issues/707 From adb96141590c7ae1bbd1bf76b1de0d54d2ca8d07 Mon Sep 17 00:00:00 2001 From: Jose Celano Date: Tue, 11 Feb 2025 11:05:52 +0000 Subject: [PATCH 248/802] tests: [#1251] add test for loading keys in DB drivers --- .../src/authentication/key/mod.rs | 6 +++ .../tracker-core/src/databases/driver/mod.rs | 41 +++++++++++++------ 2 files changed, 34 insertions(+), 13 deletions(-) diff --git a/packages/tracker-core/src/authentication/key/mod.rs b/packages/tracker-core/src/authentication/key/mod.rs index 33b3b6099..8ec368ebc 100644 --- a/packages/tracker-core/src/authentication/key/mod.rs +++ b/packages/tracker-core/src/authentication/key/mod.rs @@ -67,6 +67,12 @@ pub fn generate_permanent_key() -> PeerKey { generate_key(None) } +/// It generates a new expiring random key [`PeerKey`]. +#[must_use] +pub fn generate_expiring_key(lifetime: Duration) -> PeerKey { + generate_key(Some(lifetime)) +} + /// It generates a new random 32-char authentication [`PeerKey`]. /// /// It can be an expiring or permanent key. diff --git a/packages/tracker-core/src/databases/driver/mod.rs b/packages/tracker-core/src/databases/driver/mod.rs index 07d338915..1e42e4414 100644 --- a/packages/tracker-core/src/databases/driver/mod.rs +++ b/packages/tracker-core/src/databases/driver/mod.rs @@ -104,6 +104,8 @@ mod tests { // Authentication keys (for private trackers) + handling_authentication_keys::it_should_load_the_keys(driver); + // Permanent keys handling_authentication_keys::it_should_save_and_load_permanent_authentication_keys(driver); handling_authentication_keys::it_should_remove_a_permanent_authentication_key(driver); @@ -114,10 +116,10 @@ mod tests { // Whitelist (for listed trackers) + handling_the_whitelist::it_should_load_the_whitelist(driver); handling_the_whitelist::it_should_add_and_get_infohashes(driver); handling_the_whitelist::it_should_remove_an_infohash_from_the_whitelist(driver); handling_the_whitelist::it_should_fail_trying_to_add_the_same_infohash_twice(driver); - handling_the_whitelist::it_load_the_whitelist(driver); } /// It initializes the database schema. @@ -172,9 +174,22 @@ mod tests { use std::sync::Arc; use std::time::Duration; - use crate::authentication::key::{generate_key, generate_permanent_key}; + use crate::authentication::key::{generate_expiring_key, generate_permanent_key}; use crate::databases::Database; + pub fn it_should_load_the_keys(driver: &Arc>) { + let permanent_peer_key = generate_permanent_key(); + driver.add_key_to_keys(&permanent_peer_key).unwrap(); + + let expiring_peer_key = generate_expiring_key(Duration::from_secs(120)); + driver.add_key_to_keys(&expiring_peer_key).unwrap(); + + let keys = driver.load_keys().unwrap(); + + assert!(keys.contains(&permanent_peer_key)); + assert!(keys.contains(&expiring_peer_key)); + } + pub fn it_should_save_and_load_permanent_authentication_keys(driver: &Arc>) { let peer_key = generate_permanent_key(); driver.add_key_to_keys(&peer_key).unwrap(); @@ -185,7 +200,7 @@ mod tests { } pub fn it_should_save_and_load_expiring_authentication_keys(driver: &Arc>) { - let peer_key = generate_key(Some(Duration::from_secs(120))); + let peer_key = generate_expiring_key(Duration::from_secs(120)); driver.add_key_to_keys(&peer_key).unwrap(); let stored_peer_key = driver.get_key_from_keys(&peer_key.key()).unwrap().unwrap(); @@ -204,7 +219,7 @@ mod tests { } pub fn it_should_remove_an_expiring_authentication_key(driver: &Arc>) { - let peer_key = generate_key(Some(Duration::from_secs(120))); + let peer_key = generate_expiring_key(Duration::from_secs(120)); driver.add_key_to_keys(&peer_key).unwrap(); driver.remove_key_from_keys(&peer_key.key()).unwrap(); @@ -220,6 +235,15 @@ mod tests { use crate::core_tests::random_info_hash; use crate::databases::Database; + pub fn it_should_load_the_whitelist(driver: &Arc>) { + let infohash = random_info_hash(); + driver.add_info_hash_to_whitelist(infohash).unwrap(); + + let whitelist = driver.load_whitelist().unwrap(); + + assert!(whitelist.contains(&infohash)); + } + pub fn it_should_add_and_get_infohashes(driver: &Arc>) { let infohash = random_info_hash(); @@ -247,14 +271,5 @@ mod tests { assert!(result.is_err()); } - - pub fn it_load_the_whitelist(driver: &Arc>) { - let infohash = random_info_hash(); - driver.add_info_hash_to_whitelist(infohash).unwrap(); - - let whitelist = driver.load_whitelist().unwrap(); - - assert!(whitelist.contains(&infohash)); - } } } From 2cd1c65cbff75895ec26dfc6943319cf2c665399 Mon Sep 17 00:00:00 2001 From: Jose Celano Date: Tue, 11 Feb 2025 11:34:43 +0000 Subject: [PATCH 249/802] test: [#1251] add tests for database driver error converters --- packages/tracker-core/src/databases/error.rs | 36 ++++++++++++++++++++ 1 file changed, 36 insertions(+) diff --git a/packages/tracker-core/src/databases/error.rs b/packages/tracker-core/src/databases/error.rs index 4d64baf48..0f3207587 100644 --- a/packages/tracker-core/src/databases/error.rs +++ b/packages/tracker-core/src/databases/error.rs @@ -102,3 +102,39 @@ impl From<(r2d2::Error, Driver)> for Error { } } } + +#[cfg(test)] +mod tests { + use r2d2_mysql::mysql; + + use crate::databases::error::Error; + + #[test] + fn it_should_build_a_database_error_from_a_rusqlite_error() { + let err: Error = r2d2_sqlite::rusqlite::Error::InvalidQuery.into(); + + assert!(matches!(err, Error::InvalidQuery { .. })); + } + + #[test] + fn it_should_build_an_specific_database_error_from_a_no_rows_returned_rusqlite_error() { + let err: Error = r2d2_sqlite::rusqlite::Error::QueryReturnedNoRows.into(); + + assert!(matches!(err, Error::QueryReturnedNoRows { .. })); + } + + #[test] + fn it_should_build_a_database_error_from_a_mysql_error() { + let url_err = mysql::error::UrlError::BadUrl; + let err: Error = r2d2_mysql::mysql::Error::UrlError(url_err).into(); + + assert!(matches!(err, Error::InvalidQuery { .. })); + } + + #[test] + fn it_should_build_a_database_error_from_a_mysql_url_error() { + let err: Error = mysql::error::UrlError::BadUrl.into(); + + assert!(matches!(err, Error::ConnectionError { .. })); + } +} From 6639c98e4d61f9f1b4e52af7d7301dcef16dce1a Mon Sep 17 00:00:00 2001 From: Jose Celano Date: Tue, 11 Feb 2025 15:29:25 +0000 Subject: [PATCH 250/802] refactor: [#1258] make things private or pub(crate) when possible. Limit the exposed funtionality for pacakges. Specially the new `tracker-core` package which has not been published yet. --- packages/tracker-core/src/announce_handler.rs | 8 +- .../src/authentication/handler.rs | 8 +- .../src/authentication/key/mod.rs | 8 +- .../key/repository/in_memory.rs | 9 +- .../key/repository/persisted.rs | 6 +- packages/tracker-core/src/core_tests.rs | 215 ----------------- .../tracker-core/src/databases/driver/mod.rs | 8 +- .../src/databases/driver/mysql.rs | 2 +- .../src/databases/driver/sqlite.rs | 2 +- packages/tracker-core/src/error.rs | 2 +- packages/tracker-core/src/lib.rs | 8 +- packages/tracker-core/src/test_helpers.rs | 219 ++++++++++++++++++ packages/tracker-core/src/torrent/manager.rs | 8 +- packages/tracker-core/src/torrent/mod.rs | 9 +- .../src/torrent/repository/in_memory.rs | 35 +-- .../src/torrent/repository/persisted.rs | 6 +- packages/tracker-core/src/torrent/services.rs | 2 +- .../src/whitelist/authorization.rs | 4 +- .../tracker-core/src/whitelist/manager.rs | 6 +- packages/tracker-core/src/whitelist/mod.rs | 6 +- .../src/whitelist/repository/in_memory.rs | 6 +- .../src/whitelist/repository/persisted.rs | 8 +- .../src/whitelist/test_helpers.rs | 32 +++ .../src/whitelist/whitelist_tests.rs | 27 --- src/bootstrap/app.rs | 1 + src/servers/http/mod.rs | 1 + src/servers/http/test_helpers.rs | 16 ++ src/servers/http/v1/handlers/announce.rs | 2 +- src/servers/http/v1/services/announce.rs | 2 +- src/servers/http/v1/services/scrape.rs | 11 +- 30 files changed, 357 insertions(+), 320 deletions(-) delete mode 100644 packages/tracker-core/src/core_tests.rs create mode 100644 packages/tracker-core/src/test_helpers.rs create mode 100644 packages/tracker-core/src/whitelist/test_helpers.rs delete mode 100644 packages/tracker-core/src/whitelist/whitelist_tests.rs create mode 100644 src/servers/http/test_helpers.rs diff --git a/packages/tracker-core/src/announce_handler.rs b/packages/tracker-core/src/announce_handler.rs index 85dd354bf..cd0a9b861 100644 --- a/packages/tracker-core/src/announce_handler.rs +++ b/packages/tracker-core/src/announce_handler.rs @@ -182,8 +182,8 @@ mod tests { use torrust_tracker_test_helpers::configuration; use crate::announce_handler::AnnounceHandler; - use crate::core_tests::initialize_handlers; use crate::scrape_handler::ScrapeHandler; + use crate::test_helpers::tests::initialize_handlers; fn public_tracker() -> (Arc, Arc) { let config = configuration::ephemeral_public(); @@ -244,7 +244,7 @@ mod tests { peer_ip, public_tracker, sample_peer_1, sample_peer_2, sample_peer_3, }; use crate::announce_handler::PeersWanted; - use crate::core_tests::{sample_info_hash, sample_peer}; + use crate::test_helpers::tests::{sample_info_hash, sample_peer}; mod should_assign_the_ip_to_the_peer { @@ -411,7 +411,7 @@ mod tests { use crate::announce_handler::tests::the_announce_handler::{peer_ip, public_tracker}; use crate::announce_handler::PeersWanted; - use crate::core_tests::{completed_peer, leecher, sample_info_hash, seeder, started_peer}; + use crate::test_helpers::tests::{completed_peer, leecher, sample_info_hash, seeder, started_peer}; #[tokio::test] async fn when_the_peer_is_a_seeder() { @@ -474,8 +474,8 @@ mod tests { use crate::announce_handler::tests::the_announce_handler::peer_ip; use crate::announce_handler::{AnnounceHandler, PeersWanted}; - use crate::core_tests::{sample_info_hash, sample_peer}; use crate::databases::setup::initialize_database; + use crate::test_helpers::tests::{sample_info_hash, sample_peer}; use crate::torrent::manager::TorrentsManager; use crate::torrent::repository::in_memory::InMemoryTorrentRepository; use crate::torrent::repository::persisted::DatabasePersistentTorrentRepository; diff --git a/packages/tracker-core/src/authentication/handler.rs b/packages/tracker-core/src/authentication/handler.rs index f758830ac..136060916 100644 --- a/packages/tracker-core/src/authentication/handler.rs +++ b/packages/tracker-core/src/authentication/handler.rs @@ -132,7 +132,7 @@ impl KeysHandler { /// # Errors /// /// Will return a `database::Error` if unable to add the `auth_key` to the database. - pub async fn generate_permanent_peer_key(&self) -> Result { + pub(crate) async fn generate_permanent_peer_key(&self) -> Result { self.generate_expiring_peer_key(None).await } @@ -170,7 +170,7 @@ impl KeysHandler { /// # Arguments /// /// * `key` - The pre-generated key. - pub async fn add_permanent_peer_key(&self, key: Key) -> Result { + pub(crate) async fn add_permanent_peer_key(&self, key: Key) -> Result { self.add_expiring_peer_key(key, None).await } @@ -188,7 +188,7 @@ impl KeysHandler { /// * `key` - The pre-generated key. /// * `lifetime` - The duration in seconds for the new key. The key will be /// no longer valid after `lifetime` seconds. - pub async fn add_expiring_peer_key( + pub(crate) async fn add_expiring_peer_key( &self, key: Key, valid_until: Option, @@ -219,7 +219,7 @@ impl KeysHandler { } /// It removes an authentication key from memory. - pub async fn remove_in_memory_auth_key(&self, key: &Key) { + pub(crate) async fn remove_in_memory_auth_key(&self, key: &Key) { self.in_memory_key_repository.remove(key).await; } diff --git a/packages/tracker-core/src/authentication/key/mod.rs b/packages/tracker-core/src/authentication/key/mod.rs index 8ec368ebc..fce18c0dd 100644 --- a/packages/tracker-core/src/authentication/key/mod.rs +++ b/packages/tracker-core/src/authentication/key/mod.rs @@ -59,17 +59,19 @@ pub type ParseKeyError = peer_key::ParseKeyError; /// /// For more information see function [`generate_key`](crate::authentication::key::generate_key) to generate the /// [`PeerKey`](crate::authentication::PeerKey). -pub const AUTH_KEY_LENGTH: usize = 32; +pub(crate) const AUTH_KEY_LENGTH: usize = 32; /// It generates a new permanent random key [`PeerKey`]. +#[cfg(test)] #[must_use] -pub fn generate_permanent_key() -> PeerKey { +pub(crate) fn generate_permanent_key() -> PeerKey { generate_key(None) } /// It generates a new expiring random key [`PeerKey`]. +#[cfg(test)] #[must_use] -pub fn generate_expiring_key(lifetime: Duration) -> PeerKey { +pub(crate) fn generate_expiring_key(lifetime: Duration) -> PeerKey { generate_key(Some(lifetime)) } diff --git a/packages/tracker-core/src/authentication/key/repository/in_memory.rs b/packages/tracker-core/src/authentication/key/repository/in_memory.rs index 0a2fc50cd..13664e27c 100644 --- a/packages/tracker-core/src/authentication/key/repository/in_memory.rs +++ b/packages/tracker-core/src/authentication/key/repository/in_memory.rs @@ -9,21 +9,22 @@ pub struct InMemoryKeyRepository { impl InMemoryKeyRepository { /// It adds a new authentication key. - pub async fn insert(&self, auth_key: &PeerKey) { + pub(crate) async fn insert(&self, auth_key: &PeerKey) { self.keys.write().await.insert(auth_key.key.clone(), auth_key.clone()); } /// It removes an authentication key. - pub async fn remove(&self, key: &Key) { + pub(crate) async fn remove(&self, key: &Key) { self.keys.write().await.remove(key); } - pub async fn get(&self, key: &Key) -> Option { + pub(crate) async fn get(&self, key: &Key) -> Option { self.keys.read().await.get(key).cloned() } /// It clears all the authentication keys. - pub async fn clear(&self) { + #[allow(dead_code)] + pub(crate) async fn clear(&self) { let mut keys = self.keys.write().await; keys.clear(); } diff --git a/packages/tracker-core/src/authentication/key/repository/persisted.rs b/packages/tracker-core/src/authentication/key/repository/persisted.rs index 7edee62c0..95a3b874c 100644 --- a/packages/tracker-core/src/authentication/key/repository/persisted.rs +++ b/packages/tracker-core/src/authentication/key/repository/persisted.rs @@ -21,7 +21,7 @@ impl DatabaseKeyRepository { /// # Errors /// /// Will return a `databases::error::Error` if unable to add the `auth_key` to the database. - pub fn add(&self, peer_key: &PeerKey) -> Result<(), databases::error::Error> { + pub(crate) fn add(&self, peer_key: &PeerKey) -> Result<(), databases::error::Error> { self.database.add_key_to_keys(peer_key)?; Ok(()) } @@ -31,7 +31,7 @@ impl DatabaseKeyRepository { /// # Errors /// /// Will return a `database::Error` if unable to remove the `key` from the database. - pub fn remove(&self, key: &Key) -> Result<(), databases::error::Error> { + pub(crate) fn remove(&self, key: &Key) -> Result<(), databases::error::Error> { self.database.remove_key_from_keys(key)?; Ok(()) } @@ -41,7 +41,7 @@ impl DatabaseKeyRepository { /// # Errors /// /// Will return a `database::Error` if unable to load the keys from the database. - pub fn load_keys(&self) -> Result, databases::error::Error> { + pub(crate) fn load_keys(&self) -> Result, databases::error::Error> { let keys = self.database.load_keys()?; Ok(keys) } diff --git a/packages/tracker-core/src/core_tests.rs b/packages/tracker-core/src/core_tests.rs deleted file mode 100644 index 165c8790e..000000000 --- a/packages/tracker-core/src/core_tests.rs +++ /dev/null @@ -1,215 +0,0 @@ -//! Some generic test helpers functions. -use std::net::{IpAddr, Ipv4Addr, SocketAddr}; -use std::sync::Arc; - -use aquatic_udp_protocol::{AnnounceEvent, NumberOfBytes, PeerId}; -use bittorrent_primitives::info_hash::InfoHash; -use rand::Rng; -use torrust_tracker_configuration::Configuration; -#[cfg(test)] -use torrust_tracker_configuration::Core; -use torrust_tracker_primitives::peer::Peer; -use torrust_tracker_primitives::DurationSinceUnixEpoch; -#[cfg(test)] -use torrust_tracker_test_helpers::configuration::ephemeral_sqlite_database; - -use super::announce_handler::AnnounceHandler; -use super::databases::setup::initialize_database; -use super::scrape_handler::ScrapeHandler; -use super::torrent::repository::in_memory::InMemoryTorrentRepository; -use super::torrent::repository::persisted::DatabasePersistentTorrentRepository; -use super::whitelist::repository::in_memory::InMemoryWhitelist; -use super::whitelist::{self}; - -/// Generates a random `InfoHash`. -#[must_use] -pub fn random_info_hash() -> InfoHash { - let mut rng = rand::rng(); - let mut random_bytes = [0u8; 20]; - rng.fill(&mut random_bytes); - - InfoHash::from_bytes(&random_bytes) -} - -/// # Panics -/// -/// Will panic if the string representation of the info hash is not a valid info hash. -#[must_use] -pub fn sample_info_hash() -> InfoHash { - "3b245504cf5f11bbdbe1201cea6a6bf45aee1bc0" // DevSkim: ignore DS173237 - .parse::() - .expect("String should be a valid info hash") -} - -/// # Panics -/// -/// Will panic if the string representation of the info hash is not a valid info hash. -#[must_use] -pub fn sample_info_hash_one() -> InfoHash { - "3b245504cf5f11bbdbe1201cea6a6bf45aee1bc0" // DevSkim: ignore DS173237 - .parse::() - .expect("String should be a valid info hash") -} - -/// # Panics -/// -/// Will panic if the string representation of the info hash is not a valid info hash. -#[must_use] -pub fn sample_info_hash_two() -> InfoHash { - "99c82bb73505a3c0b453f9fa0e881d6e5a32a0c1" // DevSkim: ignore DS173237 - .parse::() - .expect("String should be a valid info hash") -} - -/// # Panics -/// -/// Will panic if the string representation of the info hash is not a valid info hash. -#[must_use] -pub fn sample_info_hash_alphabetically_ordered_after_sample_info_hash_one() -> InfoHash { - "99c82bb73505a3c0b453f9fa0e881d6e5a32a0c1" // DevSkim: ignore DS173237 - .parse::() - .expect("String should be a valid info hash") -} - -/// Sample peer whose state is not relevant for the tests. -#[must_use] -pub fn sample_peer() -> Peer { - Peer { - peer_id: PeerId(*b"-qB00000000000000000"), - peer_addr: SocketAddr::new(IpAddr::V4(Ipv4Addr::new(126, 0, 0, 1)), 8080), - updated: DurationSinceUnixEpoch::new(1_669_397_478_934, 0), - uploaded: NumberOfBytes::new(0), - downloaded: NumberOfBytes::new(0), - left: NumberOfBytes::new(0), // No bytes left to download - event: AnnounceEvent::Completed, - } -} - -#[must_use] -pub fn sample_peer_one() -> Peer { - Peer { - peer_id: PeerId(*b"-qB00000000000000001"), - peer_addr: SocketAddr::new(IpAddr::V4(Ipv4Addr::new(126, 0, 0, 1)), 8081), - updated: DurationSinceUnixEpoch::new(1_669_397_478_934, 0), - uploaded: NumberOfBytes::new(0), - downloaded: NumberOfBytes::new(0), - left: NumberOfBytes::new(0), // No bytes left to download - event: AnnounceEvent::Completed, - } -} - -#[must_use] -pub fn sample_peer_two() -> Peer { - Peer { - peer_id: PeerId(*b"-qB00000000000000002"), - peer_addr: SocketAddr::new(IpAddr::V4(Ipv4Addr::new(126, 0, 0, 2)), 8082), - updated: DurationSinceUnixEpoch::new(1_669_397_478_934, 0), - uploaded: NumberOfBytes::new(0), - downloaded: NumberOfBytes::new(0), - left: NumberOfBytes::new(0), // No bytes left to download - event: AnnounceEvent::Completed, - } -} - -#[must_use] -pub fn seeder() -> Peer { - complete_peer() -} - -#[must_use] -pub fn leecher() -> Peer { - incomplete_peer() -} - -#[must_use] -pub fn started_peer() -> Peer { - incomplete_peer() -} - -#[must_use] -pub fn completed_peer() -> Peer { - complete_peer() -} - -/// A peer that counts as `complete` is swarm metadata -/// IMPORTANT!: it only counts if the it has been announce at least once before -/// announcing the `AnnounceEvent::Completed` event. -#[must_use] -pub fn complete_peer() -> Peer { - Peer { - peer_id: PeerId(*b"-qB00000000000000000"), - peer_addr: SocketAddr::new(IpAddr::V4(Ipv4Addr::new(126, 0, 0, 1)), 8080), - updated: DurationSinceUnixEpoch::new(1_669_397_478_934, 0), - uploaded: NumberOfBytes::new(0), - downloaded: NumberOfBytes::new(0), - left: NumberOfBytes::new(0), // No bytes left to download - event: AnnounceEvent::Completed, - } -} - -/// A peer that counts as `incomplete` is swarm metadata -#[must_use] -pub fn incomplete_peer() -> Peer { - Peer { - peer_id: PeerId(*b"-qB00000000000000000"), - peer_addr: SocketAddr::new(IpAddr::V4(Ipv4Addr::new(126, 0, 0, 1)), 8080), - updated: DurationSinceUnixEpoch::new(1_669_397_478_934, 0), - uploaded: NumberOfBytes::new(0), - downloaded: NumberOfBytes::new(0), - left: NumberOfBytes::new(1000), // Still bytes to download - event: AnnounceEvent::Started, - } -} - -#[must_use] -pub fn initialize_handlers(config: &Configuration) -> (Arc, Arc) { - let database = initialize_database(&config.core); - let in_memory_whitelist = Arc::new(InMemoryWhitelist::default()); - let whitelist_authorization = Arc::new(whitelist::authorization::WhitelistAuthorization::new( - &config.core, - &in_memory_whitelist.clone(), - )); - let in_memory_torrent_repository = Arc::new(InMemoryTorrentRepository::default()); - let db_torrent_repository = Arc::new(DatabasePersistentTorrentRepository::new(&database)); - - let announce_handler = Arc::new(AnnounceHandler::new( - &config.core, - &in_memory_torrent_repository, - &db_torrent_repository, - )); - - let scrape_handler = Arc::new(ScrapeHandler::new(&whitelist_authorization, &in_memory_torrent_repository)); - - (announce_handler, scrape_handler) -} - -/// # Panics -/// -/// Will panic if the temporary database file path is not a valid UFT string. -#[cfg(test)] -#[must_use] -pub fn ephemeral_configuration() -> Core { - let mut config = Core::default(); - - let temp_file = ephemeral_sqlite_database(); - temp_file.to_str().unwrap().clone_into(&mut config.database.path); - - config -} - -/// # Panics -/// -/// Will panic if the temporary database file path is not a valid UFT string. -#[cfg(test)] -#[must_use] -pub fn ephemeral_configuration_for_listed_tracker() -> Core { - let mut config = Core { - listed: true, - ..Default::default() - }; - - let temp_file = ephemeral_sqlite_database(); - temp_file.to_str().unwrap().clone_into(&mut config.database.path); - - config -} diff --git a/packages/tracker-core/src/databases/driver/mod.rs b/packages/tracker-core/src/databases/driver/mod.rs index 1e42e4414..2bc6a1e3c 100644 --- a/packages/tracker-core/src/databases/driver/mod.rs +++ b/packages/tracker-core/src/databases/driver/mod.rs @@ -73,7 +73,7 @@ pub mod sqlite; /// # Errors /// /// Will return `Error` if unable to build the driver. -pub fn build(driver: &Driver, db_path: &str) -> Result, Error> { +pub(crate) fn build(driver: &Driver, db_path: &str) -> Result, Error> { let database: Box = match driver { Driver::Sqlite3 => Box::new(Sqlite::new(db_path)?), Driver::MySQL => Box::new(Mysql::new(db_path)?), @@ -85,7 +85,7 @@ pub fn build(driver: &Driver, db_path: &str) -> Result, Error> } #[cfg(test)] -mod tests { +pub(crate) mod tests { use std::sync::Arc; use std::time::Duration; @@ -152,8 +152,8 @@ mod tests { use std::sync::Arc; - use crate::core_tests::sample_info_hash; use crate::databases::Database; + use crate::test_helpers::tests::sample_info_hash; pub fn it_should_save_and_load_persistent_torrents(driver: &Arc>) { let infohash = sample_info_hash(); @@ -232,8 +232,8 @@ mod tests { use std::sync::Arc; - use crate::core_tests::random_info_hash; use crate::databases::Database; + use crate::test_helpers::tests::random_info_hash; pub fn it_should_load_the_whitelist(driver: &Arc>) { let infohash = random_info_hash(); diff --git a/packages/tracker-core/src/databases/driver/mysql.rs b/packages/tracker-core/src/databases/driver/mysql.rs index 1e1e29f36..365bd0ad9 100644 --- a/packages/tracker-core/src/databases/driver/mysql.rs +++ b/packages/tracker-core/src/databases/driver/mysql.rs @@ -15,7 +15,7 @@ use crate::authentication::{self, Key}; const DRIVER: Driver = Driver::MySQL; -pub struct Mysql { +pub(crate) struct Mysql { pool: Pool, } diff --git a/packages/tracker-core/src/databases/driver/sqlite.rs b/packages/tracker-core/src/databases/driver/sqlite.rs index 37f5254a5..36ca4eabe 100644 --- a/packages/tracker-core/src/databases/driver/sqlite.rs +++ b/packages/tracker-core/src/databases/driver/sqlite.rs @@ -14,7 +14,7 @@ use crate::authentication::{self, Key}; const DRIVER: Driver = Driver::Sqlite3; -pub struct Sqlite { +pub(crate) struct Sqlite { pool: Pool, } diff --git a/packages/tracker-core/src/error.rs b/packages/tracker-core/src/error.rs index 515510b85..dcdd89668 100644 --- a/packages/tracker-core/src/error.rs +++ b/packages/tracker-core/src/error.rs @@ -41,8 +41,8 @@ mod tests { mod whitelist_error { - use crate::core_tests::sample_info_hash; use crate::error::WhitelistError; + use crate::test_helpers::tests::sample_info_hash; #[test] fn torrent_not_whitelisted() { diff --git a/packages/tracker-core/src/lib.rs b/packages/tracker-core/src/lib.rs index 9334e4a02..ecbaef9c5 100644 --- a/packages/tracker-core/src/lib.rs +++ b/packages/tracker-core/src/lib.rs @@ -391,8 +391,8 @@ pub mod scrape_handler; pub mod torrent; pub mod whitelist; -pub mod core_tests; pub mod peer_tests; +pub mod test_helpers; use torrust_tracker_clock::clock; /// This code needs to be copied into each crate. @@ -416,8 +416,8 @@ mod tests { use torrust_tracker_test_helpers::configuration; use crate::announce_handler::AnnounceHandler; - use crate::core_tests::initialize_handlers; use crate::scrape_handler::ScrapeHandler; + use crate::test_helpers::tests::initialize_handlers; fn initialize_handlers_for_public_tracker() -> (Arc, Arc) { let config = configuration::ephemeral_public(); @@ -445,7 +445,7 @@ mod tests { use torrust_tracker_primitives::swarm_metadata::SwarmMetadata; use crate::announce_handler::PeersWanted; - use crate::core_tests::{complete_peer, incomplete_peer}; + use crate::test_helpers::tests::{complete_peer, incomplete_peer}; use crate::tests::the_tracker::initialize_handlers_for_public_tracker; #[tokio::test] @@ -500,7 +500,7 @@ mod tests { use torrust_tracker_primitives::swarm_metadata::SwarmMetadata; use crate::announce_handler::PeersWanted; - use crate::core_tests::{complete_peer, incomplete_peer}; + use crate::test_helpers::tests::{complete_peer, incomplete_peer}; use crate::tests::the_tracker::{initialize_handlers_for_listed_tracker, peer_ip}; #[tokio::test] diff --git a/packages/tracker-core/src/test_helpers.rs b/packages/tracker-core/src/test_helpers.rs new file mode 100644 index 000000000..06f5ce384 --- /dev/null +++ b/packages/tracker-core/src/test_helpers.rs @@ -0,0 +1,219 @@ +//! Some generic test helpers functions. + +#[cfg(test)] +pub(crate) mod tests { + use std::net::{IpAddr, Ipv4Addr, SocketAddr}; + use std::sync::Arc; + + use aquatic_udp_protocol::{AnnounceEvent, NumberOfBytes, PeerId}; + use bittorrent_primitives::info_hash::InfoHash; + use rand::Rng; + use torrust_tracker_configuration::Configuration; + #[cfg(test)] + use torrust_tracker_configuration::Core; + use torrust_tracker_primitives::peer::Peer; + use torrust_tracker_primitives::DurationSinceUnixEpoch; + #[cfg(test)] + use torrust_tracker_test_helpers::configuration::ephemeral_sqlite_database; + + use crate::announce_handler::AnnounceHandler; + use crate::databases::setup::initialize_database; + use crate::scrape_handler::ScrapeHandler; + use crate::torrent::repository::in_memory::InMemoryTorrentRepository; + use crate::torrent::repository::persisted::DatabasePersistentTorrentRepository; + use crate::whitelist::repository::in_memory::InMemoryWhitelist; + use crate::whitelist::{self}; + + /// Generates a random `InfoHash`. + #[must_use] + pub fn random_info_hash() -> InfoHash { + let mut rng = rand::rng(); + let mut random_bytes = [0u8; 20]; + rng.fill(&mut random_bytes); + + InfoHash::from_bytes(&random_bytes) + } + + /// # Panics + /// + /// Will panic if the string representation of the info hash is not a valid info hash. + #[must_use] + pub fn sample_info_hash() -> InfoHash { + "3b245504cf5f11bbdbe1201cea6a6bf45aee1bc0" // DevSkim: ignore DS173237 + .parse::() + .expect("String should be a valid info hash") + } + + /// # Panics + /// + /// Will panic if the string representation of the info hash is not a valid info hash. + #[must_use] + pub fn sample_info_hash_one() -> InfoHash { + "3b245504cf5f11bbdbe1201cea6a6bf45aee1bc0" // DevSkim: ignore DS173237 + .parse::() + .expect("String should be a valid info hash") + } + + /// # Panics + /// + /// Will panic if the string representation of the info hash is not a valid info hash. + #[must_use] + pub fn sample_info_hash_two() -> InfoHash { + "99c82bb73505a3c0b453f9fa0e881d6e5a32a0c1" // DevSkim: ignore DS173237 + .parse::() + .expect("String should be a valid info hash") + } + + /// # Panics + /// + /// Will panic if the string representation of the info hash is not a valid info hash. + #[must_use] + pub fn sample_info_hash_alphabetically_ordered_after_sample_info_hash_one() -> InfoHash { + "99c82bb73505a3c0b453f9fa0e881d6e5a32a0c1" // DevSkim: ignore DS173237 + .parse::() + .expect("String should be a valid info hash") + } + + /// Sample peer whose state is not relevant for the tests. + #[must_use] + pub fn sample_peer() -> Peer { + Peer { + peer_id: PeerId(*b"-qB00000000000000000"), + peer_addr: SocketAddr::new(IpAddr::V4(Ipv4Addr::new(126, 0, 0, 1)), 8080), + updated: DurationSinceUnixEpoch::new(1_669_397_478_934, 0), + uploaded: NumberOfBytes::new(0), + downloaded: NumberOfBytes::new(0), + left: NumberOfBytes::new(0), // No bytes left to download + event: AnnounceEvent::Completed, + } + } + + #[must_use] + pub fn sample_peer_one() -> Peer { + Peer { + peer_id: PeerId(*b"-qB00000000000000001"), + peer_addr: SocketAddr::new(IpAddr::V4(Ipv4Addr::new(126, 0, 0, 1)), 8081), + updated: DurationSinceUnixEpoch::new(1_669_397_478_934, 0), + uploaded: NumberOfBytes::new(0), + downloaded: NumberOfBytes::new(0), + left: NumberOfBytes::new(0), // No bytes left to download + event: AnnounceEvent::Completed, + } + } + + #[must_use] + pub fn sample_peer_two() -> Peer { + Peer { + peer_id: PeerId(*b"-qB00000000000000002"), + peer_addr: SocketAddr::new(IpAddr::V4(Ipv4Addr::new(126, 0, 0, 2)), 8082), + updated: DurationSinceUnixEpoch::new(1_669_397_478_934, 0), + uploaded: NumberOfBytes::new(0), + downloaded: NumberOfBytes::new(0), + left: NumberOfBytes::new(0), // No bytes left to download + event: AnnounceEvent::Completed, + } + } + + #[must_use] + pub fn seeder() -> Peer { + complete_peer() + } + + #[must_use] + pub fn leecher() -> Peer { + incomplete_peer() + } + + #[must_use] + pub fn started_peer() -> Peer { + incomplete_peer() + } + + #[must_use] + pub fn completed_peer() -> Peer { + complete_peer() + } + + /// A peer that counts as `complete` is swarm metadata + /// IMPORTANT!: it only counts if the it has been announce at least once before + /// announcing the `AnnounceEvent::Completed` event. + #[must_use] + pub fn complete_peer() -> Peer { + Peer { + peer_id: PeerId(*b"-qB00000000000000000"), + peer_addr: SocketAddr::new(IpAddr::V4(Ipv4Addr::new(126, 0, 0, 1)), 8080), + updated: DurationSinceUnixEpoch::new(1_669_397_478_934, 0), + uploaded: NumberOfBytes::new(0), + downloaded: NumberOfBytes::new(0), + left: NumberOfBytes::new(0), // No bytes left to download + event: AnnounceEvent::Completed, + } + } + + /// A peer that counts as `incomplete` is swarm metadata + #[must_use] + pub fn incomplete_peer() -> Peer { + Peer { + peer_id: PeerId(*b"-qB00000000000000000"), + peer_addr: SocketAddr::new(IpAddr::V4(Ipv4Addr::new(126, 0, 0, 1)), 8080), + updated: DurationSinceUnixEpoch::new(1_669_397_478_934, 0), + uploaded: NumberOfBytes::new(0), + downloaded: NumberOfBytes::new(0), + left: NumberOfBytes::new(1000), // Still bytes to download + event: AnnounceEvent::Started, + } + } + + #[must_use] + pub fn initialize_handlers(config: &Configuration) -> (Arc, Arc) { + let database = initialize_database(&config.core); + let in_memory_whitelist = Arc::new(InMemoryWhitelist::default()); + let whitelist_authorization = Arc::new(whitelist::authorization::WhitelistAuthorization::new( + &config.core, + &in_memory_whitelist.clone(), + )); + let in_memory_torrent_repository = Arc::new(InMemoryTorrentRepository::default()); + let db_torrent_repository = Arc::new(DatabasePersistentTorrentRepository::new(&database)); + + let announce_handler = Arc::new(AnnounceHandler::new( + &config.core, + &in_memory_torrent_repository, + &db_torrent_repository, + )); + + let scrape_handler = Arc::new(ScrapeHandler::new(&whitelist_authorization, &in_memory_torrent_repository)); + + (announce_handler, scrape_handler) + } + + /// # Panics + /// + /// Will panic if the temporary database file path is not a valid UFT string. + #[cfg(test)] + #[must_use] + pub fn ephemeral_configuration() -> Core { + let mut config = Core::default(); + + let temp_file = ephemeral_sqlite_database(); + temp_file.to_str().unwrap().clone_into(&mut config.database.path); + + config + } + + /// # Panics + /// + /// Will panic if the temporary database file path is not a valid UFT string. + #[cfg(test)] + #[must_use] + pub fn ephemeral_configuration_for_listed_tracker() -> Core { + let mut config = Core { + listed: true, + ..Default::default() + }; + + let temp_file = ephemeral_sqlite_database(); + temp_file.to_str().unwrap().clone_into(&mut config.database.path); + + config + } +} diff --git a/packages/tracker-core/src/torrent/manager.rs b/packages/tracker-core/src/torrent/manager.rs index 778ac6d92..9dac35258 100644 --- a/packages/tracker-core/src/torrent/manager.rs +++ b/packages/tracker-core/src/torrent/manager.rs @@ -16,6 +16,7 @@ pub struct TorrentsManager { in_memory_torrent_repository: Arc, /// The persistent torrents repository. + #[allow(dead_code)] db_torrent_repository: Arc, } @@ -40,7 +41,8 @@ impl TorrentsManager { /// # Errors /// /// Will return a `database::Error` if unable to load the list of `persistent_torrents` from the database. - pub fn load_torrents_from_database(&self) -> Result<(), databases::error::Error> { + #[allow(dead_code)] + pub(crate) fn load_torrents_from_database(&self) -> Result<(), databases::error::Error> { let persistent_torrents = self.db_torrent_repository.load_all()?; self.in_memory_torrent_repository.import_persistent(&persistent_torrents); @@ -71,8 +73,8 @@ mod tests { use torrust_tracker_torrent_repository::entry::EntrySync; use super::{DatabasePersistentTorrentRepository, TorrentsManager}; - use crate::core_tests::{ephemeral_configuration, sample_info_hash}; use crate::databases::setup::initialize_database; + use crate::test_helpers::tests::{ephemeral_configuration, sample_info_hash}; use crate::torrent::repository::in_memory::InMemoryTorrentRepository; struct TorrentsManagerDeps { @@ -138,7 +140,7 @@ mod tests { use torrust_tracker_clock::clock::{self}; use torrust_tracker_primitives::DurationSinceUnixEpoch; - use crate::core_tests::{ephemeral_configuration, sample_info_hash, sample_peer}; + use crate::test_helpers::tests::{ephemeral_configuration, sample_info_hash, sample_peer}; use crate::torrent::manager::tests::{initialize_torrents_manager, initialize_torrents_manager_with}; use crate::torrent::repository::in_memory::InMemoryTorrentRepository; diff --git a/packages/tracker-core/src/torrent/mod.rs b/packages/tracker-core/src/torrent/mod.rs index 340f049d2..7ca9000f8 100644 --- a/packages/tracker-core/src/torrent/mod.rs +++ b/packages/tracker-core/src/torrent/mod.rs @@ -29,8 +29,11 @@ pub mod manager; pub mod repository; pub mod services; -use torrust_tracker_torrent_repository::{EntryMutexStd, TorrentsSkipMapMutexStd}; +#[cfg(test)] +use torrust_tracker_torrent_repository::EntryMutexStd; +use torrust_tracker_torrent_repository::TorrentsSkipMapMutexStd; // Currently used types from the torrent repository crate. -pub type Torrents = TorrentsSkipMapMutexStd; -pub type TorrentEntry = EntryMutexStd; +pub(crate) type Torrents = TorrentsSkipMapMutexStd; +#[cfg(test)] +pub(crate) type TorrentEntry = EntryMutexStd; diff --git a/packages/tracker-core/src/torrent/repository/in_memory.rs b/packages/tracker-core/src/torrent/repository/in_memory.rs index baa0c4fdb..26302260b 100644 --- a/packages/tracker-core/src/torrent/repository/in_memory.rs +++ b/packages/tracker-core/src/torrent/repository/in_memory.rs @@ -32,33 +32,34 @@ impl InMemoryTorrentRepository { self.torrents.upsert_peer(info_hash, peer); } + #[cfg(test)] #[must_use] - pub fn remove(&self, key: &InfoHash) -> Option { + pub(crate) fn remove(&self, key: &InfoHash) -> Option { self.torrents.remove(key) } - pub fn remove_inactive_peers(&self, current_cutoff: DurationSinceUnixEpoch) { + pub(crate) fn remove_inactive_peers(&self, current_cutoff: DurationSinceUnixEpoch) { self.torrents.remove_inactive_peers(current_cutoff); } - pub fn remove_peerless_torrents(&self, policy: &TrackerPolicy) { + pub(crate) fn remove_peerless_torrents(&self, policy: &TrackerPolicy) { self.torrents.remove_peerless_torrents(policy); } #[must_use] - pub fn get(&self, key: &InfoHash) -> Option { + pub(crate) fn get(&self, key: &InfoHash) -> Option { self.torrents.get(key) } #[must_use] - pub fn get_paginated(&self, pagination: Option<&Pagination>) -> Vec<(InfoHash, EntryMutexStd)> { + pub(crate) fn get_paginated(&self, pagination: Option<&Pagination>) -> Vec<(InfoHash, EntryMutexStd)> { self.torrents.get_paginated(pagination) } /// It returns the data for a `scrape` response or empty if the torrent is /// not found. #[must_use] - pub fn get_swarm_metadata(&self, info_hash: &InfoHash) -> SwarmMetadata { + pub(crate) fn get_swarm_metadata(&self, info_hash: &InfoHash) -> SwarmMetadata { match self.torrents.get(info_hash) { Some(torrent_entry) => torrent_entry.get_swarm_metadata(), None => SwarmMetadata::zeroed(), @@ -69,7 +70,7 @@ impl InMemoryTorrentRepository { /// /// It filters out the client making the request. #[must_use] - pub fn get_peers_for(&self, info_hash: &InfoHash, peer: &peer::Peer, limit: usize) -> Vec> { + pub(crate) fn get_peers_for(&self, info_hash: &InfoHash, peer: &peer::Peer, limit: usize) -> Vec> { match self.torrents.get(info_hash) { None => vec![], Some(entry) => entry.get_peers_for_client(&peer.peer_addr, Some(max(limit, TORRENT_PEERS_LIMIT))), @@ -135,7 +136,7 @@ mod tests { use std::sync::Arc; - use crate::core_tests::{sample_info_hash, sample_peer}; + use crate::test_helpers::tests::{sample_info_hash, sample_peer}; use crate::torrent::repository::in_memory::InMemoryTorrentRepository; #[tokio::test] @@ -171,7 +172,7 @@ mod tests { use torrust_tracker_primitives::peer::Peer; use torrust_tracker_primitives::DurationSinceUnixEpoch; - use crate::core_tests::{sample_info_hash, sample_peer}; + use crate::test_helpers::tests::{sample_info_hash, sample_peer}; use crate::torrent::repository::in_memory::tests::the_in_memory_torrent_repository::numeric_peer_id; use crate::torrent::repository::in_memory::InMemoryTorrentRepository; @@ -233,7 +234,7 @@ mod tests { use torrust_tracker_primitives::peer::Peer; use torrust_tracker_primitives::DurationSinceUnixEpoch; - use crate::core_tests::{sample_info_hash, sample_peer}; + use crate::test_helpers::tests::{sample_info_hash, sample_peer}; use crate::torrent::repository::in_memory::tests::the_in_memory_torrent_repository::numeric_peer_id; use crate::torrent::repository::in_memory::InMemoryTorrentRepository; @@ -303,7 +304,7 @@ mod tests { use torrust_tracker_configuration::TrackerPolicy; use torrust_tracker_primitives::DurationSinceUnixEpoch; - use crate::core_tests::{sample_info_hash, sample_peer}; + use crate::test_helpers::tests::{sample_info_hash, sample_peer}; use crate::torrent::repository::in_memory::InMemoryTorrentRepository; #[tokio::test] @@ -374,7 +375,7 @@ mod tests { use torrust_tracker_primitives::swarm_metadata::SwarmMetadata; use torrust_tracker_torrent_repository::entry::EntrySync; - use crate::core_tests::{sample_info_hash, sample_peer}; + use crate::test_helpers::tests::{sample_info_hash, sample_peer}; use crate::torrent::repository::in_memory::InMemoryTorrentRepository; use crate::torrent::TorrentEntry; @@ -429,7 +430,7 @@ mod tests { use torrust_tracker_primitives::swarm_metadata::SwarmMetadata; - use crate::core_tests::{sample_info_hash, sample_peer}; + use crate::test_helpers::tests::{sample_info_hash, sample_peer}; use crate::torrent::repository::in_memory::tests::the_in_memory_torrent_repository::returning_torrent_entries::TorrentEntryInfo; use crate::torrent::repository::in_memory::InMemoryTorrentRepository; @@ -467,7 +468,7 @@ mod tests { use torrust_tracker_primitives::pagination::Pagination; use torrust_tracker_primitives::swarm_metadata::SwarmMetadata; - use crate::core_tests::{ + use crate::test_helpers::tests::{ sample_info_hash_alphabetically_ordered_after_sample_info_hash_one, sample_info_hash_one, sample_peer_one, sample_peer_two, }; @@ -577,7 +578,7 @@ mod tests { use bittorrent_primitives::info_hash::fixture::gen_seeded_infohash; use torrust_tracker_primitives::torrent_metrics::TorrentsMetrics; - use crate::core_tests::{complete_peer, leecher, sample_info_hash, seeder}; + use crate::test_helpers::tests::{complete_peer, leecher, sample_info_hash, seeder}; use crate::torrent::repository::in_memory::InMemoryTorrentRepository; // todo: refactor to use test parametrization @@ -689,7 +690,7 @@ mod tests { use torrust_tracker_primitives::swarm_metadata::SwarmMetadata; - use crate::core_tests::{leecher, sample_info_hash}; + use crate::test_helpers::tests::{leecher, sample_info_hash}; use crate::torrent::repository::in_memory::InMemoryTorrentRepository; #[tokio::test] @@ -728,7 +729,7 @@ mod tests { use torrust_tracker_primitives::PersistentTorrents; - use crate::core_tests::sample_info_hash; + use crate::test_helpers::tests::sample_info_hash; use crate::torrent::repository::in_memory::InMemoryTorrentRepository; #[tokio::test] diff --git a/packages/tracker-core/src/torrent/repository/persisted.rs b/packages/tracker-core/src/torrent/repository/persisted.rs index 224919d0e..0430f03bb 100644 --- a/packages/tracker-core/src/torrent/repository/persisted.rs +++ b/packages/tracker-core/src/torrent/repository/persisted.rs @@ -29,7 +29,7 @@ impl DatabasePersistentTorrentRepository { /// # Errors /// /// Will return a database `Err` if unable to load. - pub fn load_all(&self) -> Result { + pub(crate) fn load_all(&self) -> Result { self.database.load_persistent_torrents() } @@ -38,7 +38,7 @@ impl DatabasePersistentTorrentRepository { /// # Errors /// /// Will return a database `Err` if unable to save. - pub fn save(&self, info_hash: &InfoHash, downloaded: u32) -> Result<(), Error> { + pub(crate) fn save(&self, info_hash: &InfoHash, downloaded: u32) -> Result<(), Error> { self.database.save_persistent_torrent(info_hash, downloaded) } } @@ -49,8 +49,8 @@ mod tests { use torrust_tracker_primitives::PersistentTorrents; use super::DatabasePersistentTorrentRepository; - use crate::core_tests::{ephemeral_configuration, sample_info_hash, sample_info_hash_one, sample_info_hash_two}; use crate::databases::setup::initialize_database; + use crate::test_helpers::tests::{ephemeral_configuration, sample_info_hash, sample_info_hash_one, sample_info_hash_two}; fn initialize_db_persistent_torrent_repository() -> DatabasePersistentTorrentRepository { let config = ephemeral_configuration(); diff --git a/packages/tracker-core/src/torrent/services.rs b/packages/tracker-core/src/torrent/services.rs index c36190ed1..4c470bb74 100644 --- a/packages/tracker-core/src/torrent/services.rs +++ b/packages/tracker-core/src/torrent/services.rs @@ -302,7 +302,7 @@ mod tests { use std::sync::Arc; - use crate::core_tests::sample_info_hash; + use crate::test_helpers::tests::sample_info_hash; use crate::torrent::repository::in_memory::InMemoryTorrentRepository; use crate::torrent::services::tests::sample_peer; use crate::torrent::services::{get_torrents, BasicInfo}; diff --git a/packages/tracker-core/src/whitelist/authorization.rs b/packages/tracker-core/src/whitelist/authorization.rs index 66f909226..3b7b8b4fb 100644 --- a/packages/tracker-core/src/whitelist/authorization.rs +++ b/packages/tracker-core/src/whitelist/authorization.rs @@ -88,8 +88,8 @@ mod tests { use torrust_tracker_configuration::Core; - use crate::core_tests::sample_info_hash; use crate::error::WhitelistError; + use crate::test_helpers::tests::sample_info_hash; use crate::whitelist::authorization::tests::the_whitelist_authorization_for_announce_and_scrape_actions::{ initialize_whitelist_authorization_and_dependencies_with, initialize_whitelist_authorization_with, }; @@ -129,7 +129,7 @@ mod tests { use torrust_tracker_configuration::Core; - use crate::core_tests::sample_info_hash; + use crate::test_helpers::tests::sample_info_hash; use crate::whitelist::authorization::tests::the_whitelist_authorization_for_announce_and_scrape_actions::{ initialize_whitelist_authorization_and_dependencies_with, initialize_whitelist_authorization_with, }; diff --git a/packages/tracker-core/src/whitelist/manager.rs b/packages/tracker-core/src/whitelist/manager.rs index e1cd2f89e..5ebd6db36 100644 --- a/packages/tracker-core/src/whitelist/manager.rs +++ b/packages/tracker-core/src/whitelist/manager.rs @@ -73,9 +73,9 @@ mod tests { use torrust_tracker_configuration::Core; - use crate::core_tests::ephemeral_configuration_for_listed_tracker; use crate::databases::setup::initialize_database; use crate::databases::Database; + use crate::test_helpers::tests::ephemeral_configuration_for_listed_tracker; use crate::whitelist::manager::WhitelistManager; use crate::whitelist::repository::in_memory::InMemoryWhitelist; use crate::whitelist::repository::persisted::DatabaseWhitelist; @@ -111,7 +111,7 @@ mod tests { mod configured_as_whitelisted { mod handling_the_torrent_whitelist { - use crate::core_tests::sample_info_hash; + use crate::test_helpers::tests::sample_info_hash; use crate::whitelist::manager::tests::initialize_whitelist_manager_for_whitelisted_tracker; #[tokio::test] @@ -141,7 +141,7 @@ mod tests { } mod persistence { - use crate::core_tests::sample_info_hash; + use crate::test_helpers::tests::sample_info_hash; use crate::whitelist::manager::tests::initialize_whitelist_manager_for_whitelisted_tracker; #[tokio::test] diff --git a/packages/tracker-core/src/whitelist/mod.rs b/packages/tracker-core/src/whitelist/mod.rs index 8521485f7..a39768e93 100644 --- a/packages/tracker-core/src/whitelist/mod.rs +++ b/packages/tracker-core/src/whitelist/mod.rs @@ -2,7 +2,7 @@ pub mod authorization; pub mod manager; pub mod repository; pub mod setup; -pub mod whitelist_tests; +pub mod test_helpers; #[cfg(test)] mod tests { @@ -10,8 +10,8 @@ mod tests { mod configured_as_whitelisted { mod handling_authorization { - use crate::core_tests::sample_info_hash; - use crate::whitelist::whitelist_tests::initialize_whitelist_services_for_listed_tracker; + use crate::test_helpers::tests::sample_info_hash; + use crate::whitelist::test_helpers::tests::initialize_whitelist_services_for_listed_tracker; #[tokio::test] async fn it_should_authorize_the_announce_and_scrape_actions_on_whitelisted_torrents() { diff --git a/packages/tracker-core/src/whitelist/repository/in_memory.rs b/packages/tracker-core/src/whitelist/repository/in_memory.rs index befd6fed6..4faeda784 100644 --- a/packages/tracker-core/src/whitelist/repository/in_memory.rs +++ b/packages/tracker-core/src/whitelist/repository/in_memory.rs @@ -14,7 +14,7 @@ impl InMemoryWhitelist { } /// It removes a torrent from the whitelist in memory. - pub async fn remove(&self, info_hash: &InfoHash) -> bool { + pub(crate) async fn remove(&self, info_hash: &InfoHash) -> bool { self.whitelist.write().await.remove(info_hash) } @@ -24,7 +24,7 @@ impl InMemoryWhitelist { } /// It clears the whitelist. - pub async fn clear(&self) { + pub(crate) async fn clear(&self) { let mut whitelist = self.whitelist.write().await; whitelist.clear(); } @@ -33,7 +33,7 @@ impl InMemoryWhitelist { #[cfg(test)] mod tests { - use crate::core_tests::sample_info_hash; + use crate::test_helpers::tests::sample_info_hash; use crate::whitelist::repository::in_memory::InMemoryWhitelist; #[tokio::test] diff --git a/packages/tracker-core/src/whitelist/repository/persisted.rs b/packages/tracker-core/src/whitelist/repository/persisted.rs index 5101b5e35..4773cfbe6 100644 --- a/packages/tracker-core/src/whitelist/repository/persisted.rs +++ b/packages/tracker-core/src/whitelist/repository/persisted.rs @@ -22,7 +22,7 @@ impl DatabaseWhitelist { /// # Errors /// /// Will return a `database::Error` if unable to add the `info_hash` to the whitelist database. - pub fn add(&self, info_hash: &InfoHash) -> Result<(), databases::error::Error> { + pub(crate) fn add(&self, info_hash: &InfoHash) -> Result<(), databases::error::Error> { let is_whitelisted = self.database.is_info_hash_whitelisted(*info_hash)?; if is_whitelisted { @@ -39,7 +39,7 @@ impl DatabaseWhitelist { /// # Errors /// /// Will return a `database::Error` if unable to remove the `info_hash` from the whitelist database. - pub fn remove(&self, info_hash: &InfoHash) -> Result<(), databases::error::Error> { + pub(crate) fn remove(&self, info_hash: &InfoHash) -> Result<(), databases::error::Error> { let is_whitelisted = self.database.is_info_hash_whitelisted(*info_hash)?; if !is_whitelisted { @@ -56,7 +56,7 @@ impl DatabaseWhitelist { /// # Errors /// /// Will return a `database::Error` if unable to load the list whitelisted `info_hash`s from the database. - pub fn load_from_database(&self) -> Result, databases::error::Error> { + pub(crate) fn load_from_database(&self) -> Result, databases::error::Error> { self.database.load_whitelist() } } @@ -65,8 +65,8 @@ impl DatabaseWhitelist { mod tests { mod the_persisted_whitelist_repository { - use crate::core_tests::{ephemeral_configuration_for_listed_tracker, sample_info_hash}; use crate::databases::setup::initialize_database; + use crate::test_helpers::tests::{ephemeral_configuration_for_listed_tracker, sample_info_hash}; use crate::whitelist::repository::persisted::DatabaseWhitelist; fn initialize_database_whitelist() -> DatabaseWhitelist { diff --git a/packages/tracker-core/src/whitelist/test_helpers.rs b/packages/tracker-core/src/whitelist/test_helpers.rs new file mode 100644 index 000000000..cc30c4476 --- /dev/null +++ b/packages/tracker-core/src/whitelist/test_helpers.rs @@ -0,0 +1,32 @@ +//! Some generic test helpers functions. + +#[cfg(test)] +pub(crate) mod tests { + + use std::sync::Arc; + + use torrust_tracker_configuration::Configuration; + + use crate::databases::setup::initialize_database; + use crate::whitelist::authorization::WhitelistAuthorization; + use crate::whitelist::manager::WhitelistManager; + use crate::whitelist::repository::in_memory::InMemoryWhitelist; + use crate::whitelist::setup::initialize_whitelist_manager; + + #[must_use] + pub fn initialize_whitelist_services(config: &Configuration) -> (Arc, Arc) { + let database = initialize_database(&config.core); + let in_memory_whitelist = Arc::new(InMemoryWhitelist::default()); + let whitelist_authorization = Arc::new(WhitelistAuthorization::new(&config.core, &in_memory_whitelist.clone())); + let whitelist_manager = initialize_whitelist_manager(database.clone(), in_memory_whitelist.clone()); + + (whitelist_authorization, whitelist_manager) + } + + #[must_use] + pub fn initialize_whitelist_services_for_listed_tracker() -> (Arc, Arc) { + use torrust_tracker_test_helpers::configuration; + + initialize_whitelist_services(&configuration::ephemeral_listed()) + } +} diff --git a/packages/tracker-core/src/whitelist/whitelist_tests.rs b/packages/tracker-core/src/whitelist/whitelist_tests.rs deleted file mode 100644 index d2fd275f2..000000000 --- a/packages/tracker-core/src/whitelist/whitelist_tests.rs +++ /dev/null @@ -1,27 +0,0 @@ -use std::sync::Arc; - -use torrust_tracker_configuration::Configuration; - -use super::authorization::WhitelistAuthorization; -use super::manager::WhitelistManager; -use super::repository::in_memory::InMemoryWhitelist; -use crate::databases::setup::initialize_database; -use crate::whitelist::setup::initialize_whitelist_manager; - -#[must_use] -pub fn initialize_whitelist_services(config: &Configuration) -> (Arc, Arc) { - let database = initialize_database(&config.core); - let in_memory_whitelist = Arc::new(InMemoryWhitelist::default()); - let whitelist_authorization = Arc::new(WhitelistAuthorization::new(&config.core, &in_memory_whitelist.clone())); - let whitelist_manager = initialize_whitelist_manager(database.clone(), in_memory_whitelist.clone()); - - (whitelist_authorization, whitelist_manager) -} - -#[cfg(test)] -#[must_use] -pub fn initialize_whitelist_services_for_listed_tracker() -> (Arc, Arc) { - use torrust_tracker_test_helpers::configuration; - - initialize_whitelist_services(&configuration::ephemeral_listed()) -} diff --git a/src/bootstrap/app.rs b/src/bootstrap/app.rs index 0236215f2..e0d77ab8a 100644 --- a/src/bootstrap/app.rs +++ b/src/bootstrap/app.rs @@ -120,6 +120,7 @@ pub fn initialize_app_container(configuration: &Configuration) -> AppContainer { )); let in_memory_torrent_repository = Arc::new(InMemoryTorrentRepository::default()); let db_torrent_repository = Arc::new(DatabasePersistentTorrentRepository::new(&database)); + let torrents_manager = Arc::new(TorrentsManager::new( &configuration.core, &in_memory_torrent_repository, diff --git a/src/servers/http/mod.rs b/src/servers/http/mod.rs index fa0ccc776..6bc93992f 100644 --- a/src/servers/http/mod.rs +++ b/src/servers/http/mod.rs @@ -306,6 +306,7 @@ use serde::{Deserialize, Serialize}; pub mod server; +pub mod test_helpers; pub mod v1; pub const HTTP_TRACKER_LOG_TARGET: &str = "HTTP TRACKER"; diff --git a/src/servers/http/test_helpers.rs b/src/servers/http/test_helpers.rs new file mode 100644 index 000000000..8c3020c52 --- /dev/null +++ b/src/servers/http/test_helpers.rs @@ -0,0 +1,16 @@ +//! Some generic test helpers functions. + +#[cfg(test)] +pub(crate) mod tests { + use bittorrent_primitives::info_hash::InfoHash; + + /// # Panics + /// + /// Will panic if the string representation of the info hash is not a valid info hash. + #[must_use] + pub fn sample_info_hash() -> InfoHash { + "3b245504cf5f11bbdbe1201cea6a6bf45aee1bc0" // DevSkim: ignore DS173237 + .parse::() + .expect("String should be a valid info hash") + } +} diff --git a/src/servers/http/v1/handlers/announce.rs b/src/servers/http/v1/handlers/announce.rs index f76aa7a07..64939ff48 100644 --- a/src/servers/http/v1/handlers/announce.rs +++ b/src/servers/http/v1/handlers/announce.rs @@ -254,7 +254,6 @@ mod tests { use bittorrent_tracker_core::announce_handler::AnnounceHandler; use bittorrent_tracker_core::authentication::key::repository::in_memory::InMemoryKeyRepository; use bittorrent_tracker_core::authentication::service::AuthenticationService; - use bittorrent_tracker_core::core_tests::sample_info_hash; use bittorrent_tracker_core::databases::setup::initialize_database; use bittorrent_tracker_core::torrent::repository::in_memory::InMemoryTorrentRepository; use bittorrent_tracker_core::torrent::repository::persisted::DatabasePersistentTorrentRepository; @@ -264,6 +263,7 @@ mod tests { use torrust_tracker_test_helpers::configuration; use crate::packages::http_tracker_core; + use crate::servers::http::test_helpers::tests::sample_info_hash; struct CoreTrackerServices { pub core_config: Arc, diff --git a/src/servers/http/v1/services/announce.rs b/src/servers/http/v1/services/announce.rs index 4de9296b3..e321ad01f 100644 --- a/src/servers/http/v1/services/announce.rs +++ b/src/servers/http/v1/services/announce.rs @@ -153,7 +153,6 @@ mod tests { use std::sync::Arc; use bittorrent_tracker_core::announce_handler::{AnnounceHandler, PeersWanted}; - use bittorrent_tracker_core::core_tests::sample_info_hash; use bittorrent_tracker_core::databases::setup::initialize_database; use bittorrent_tracker_core::torrent::repository::in_memory::InMemoryTorrentRepository; use bittorrent_tracker_core::torrent::repository::persisted::DatabasePersistentTorrentRepository; @@ -165,6 +164,7 @@ mod tests { use super::{sample_peer_using_ipv4, sample_peer_using_ipv6}; use crate::packages::http_tracker_core; + use crate::servers::http::test_helpers::tests::sample_info_hash; use crate::servers::http::v1::services::announce::invoke; use crate::servers::http::v1::services::announce::tests::{ initialize_core_tracker_services, sample_peer, MockHttpStatsEventSender, diff --git a/src/servers/http/v1/services/scrape.rs b/src/servers/http/v1/services/scrape.rs index 3a2323693..e2eb4f87c 100644 --- a/src/servers/http/v1/services/scrape.rs +++ b/src/servers/http/v1/services/scrape.rs @@ -84,7 +84,6 @@ mod tests { use aquatic_udp_protocol::{AnnounceEvent, NumberOfBytes, PeerId}; use bittorrent_primitives::info_hash::InfoHash; use bittorrent_tracker_core::announce_handler::AnnounceHandler; - use bittorrent_tracker_core::core_tests::sample_info_hash; use bittorrent_tracker_core::databases::setup::initialize_database; use bittorrent_tracker_core::scrape_handler::ScrapeHandler; use bittorrent_tracker_core::torrent::repository::in_memory::InMemoryTorrentRepository; @@ -98,6 +97,7 @@ mod tests { use torrust_tracker_test_helpers::configuration; use crate::packages::http_tracker_core; + use crate::servers::http::test_helpers::tests::sample_info_hash; fn initialize_announce_and_scrape_handlers_for_public_tracker() -> (Arc, Arc) { let config = configuration::ephemeral_public(); @@ -162,10 +162,11 @@ mod tests { use torrust_tracker_primitives::swarm_metadata::SwarmMetadata; use crate::packages::{self, http_tracker_core}; + use crate::servers::http::test_helpers::tests::sample_info_hash; use crate::servers::http::v1::services::scrape::invoke; use crate::servers::http::v1::services::scrape::tests::{ - initialize_announce_and_scrape_handlers_for_public_tracker, initialize_scrape_handler, sample_info_hash, - sample_info_hashes, sample_peer, MockHttpStatsEventSender, + initialize_announce_and_scrape_handlers_for_public_tracker, initialize_scrape_handler, sample_info_hashes, + sample_peer, MockHttpStatsEventSender, }; #[tokio::test] @@ -247,10 +248,10 @@ mod tests { use torrust_tracker_primitives::core::ScrapeData; use crate::packages::{self, http_tracker_core}; + use crate::servers::http::test_helpers::tests::sample_info_hash; use crate::servers::http::v1::services::scrape::fake; use crate::servers::http::v1::services::scrape::tests::{ - initialize_announce_and_scrape_handlers_for_public_tracker, sample_info_hash, sample_info_hashes, sample_peer, - MockHttpStatsEventSender, + initialize_announce_and_scrape_handlers_for_public_tracker, sample_info_hashes, sample_peer, MockHttpStatsEventSender, }; #[tokio::test] From 74d0d2851c97ef220052bef3f1d7bf6543b49bfd Mon Sep 17 00:00:00 2001 From: Jose Celano Date: Wed, 12 Feb 2025 09:59:27 +0000 Subject: [PATCH 251/802] docs: [#1261] fix doc errors in tracker-core --- packages/tracker-core/src/authentication/key/mod.rs | 4 ++-- packages/tracker-core/src/databases/driver/mod.rs | 3 --- packages/tracker-core/src/databases/error.rs | 2 +- packages/tracker-core/src/databases/mod.rs | 4 ++-- packages/tracker-core/src/lib.rs | 13 ------------- 5 files changed, 5 insertions(+), 21 deletions(-) diff --git a/packages/tracker-core/src/authentication/key/mod.rs b/packages/tracker-core/src/authentication/key/mod.rs index fce18c0dd..ea9edb7d5 100644 --- a/packages/tracker-core/src/authentication/key/mod.rs +++ b/packages/tracker-core/src/authentication/key/mod.rs @@ -6,7 +6,7 @@ //! //! There are services to [`generate_key`] and [`verify_key_expiration`] authentication keys. //! -//! Authentication keys are used only by [`HTTP`](crate::servers::http) trackers. All keys have an expiration time, that means +//! Authentication keys are used only by HTTP trackers. All keys have an expiration time, that means //! they are only valid during a period of time. After that time the expiring key will no longer be valid. //! //! Keys are stored in this struct: @@ -112,7 +112,7 @@ pub fn generate_key(lifetime: Option) -> PeerKey { /// /// # Errors /// -/// Will return a verification error [`crate::authentication::key::Error`] if +/// Will return a verification error [`enum@crate::authentication::key::Error`] if /// it cannot verify the key. pub fn verify_key_expiration(auth_key: &PeerKey) -> Result<(), Error> { let current_time: DurationSinceUnixEpoch = CurrentClock::now(); diff --git a/packages/tracker-core/src/databases/driver/mod.rs b/packages/tracker-core/src/databases/driver/mod.rs index 2bc6a1e3c..06e912f7c 100644 --- a/packages/tracker-core/src/databases/driver/mod.rs +++ b/packages/tracker-core/src/databases/driver/mod.rs @@ -1,7 +1,4 @@ //! Database driver factory. -//! -//! See [`databases::driver::build`](crate::core::databases::driver::build) -//! function for more information. use mysql::Mysql; use serde::{Deserialize, Serialize}; use sqlite::Sqlite; diff --git a/packages/tracker-core/src/databases/error.rs b/packages/tracker-core/src/databases/error.rs index 0f3207587..6b340080e 100644 --- a/packages/tracker-core/src/databases/error.rs +++ b/packages/tracker-core/src/databases/error.rs @@ -1,6 +1,6 @@ //! Database errors. //! -//! This module contains the [Database errors](crate::core::databases::error::Error). +//! This module contains the [Database errors](crate::databases::error::Error). use std::panic::Location; use std::sync::Arc; diff --git a/packages/tracker-core/src/databases/mod.rs b/packages/tracker-core/src/databases/mod.rs index 010252139..1de13332f 100644 --- a/packages/tracker-core/src/databases/mod.rs +++ b/packages/tracker-core/src/databases/mod.rs @@ -4,8 +4,8 @@ //! //! There are two implementations of the trait (two drivers): //! -//! - [`Mysql`](crate::core::databases::mysql::Mysql) -//! - [`Sqlite`](crate::core::databases::sqlite::Sqlite) +//! - `Mysql` +//! - `Sqlite` //! //! > **NOTICE**: There are no database migrations. If there are any changes, //! > we will implemented them or provide a script to migrate to the new schema. diff --git a/packages/tracker-core/src/lib.rs b/packages/tracker-core/src/lib.rs index ecbaef9c5..ac6e4edac 100644 --- a/packages/tracker-core/src/lib.rs +++ b/packages/tracker-core/src/lib.rs @@ -25,7 +25,6 @@ //! - [Torrents](#torrents) //! - [Peers](#peers) //! - [Configuration](#configuration) -//! - [Services](#services) //! - [Authentication](#authentication) //! - [Statistics](#statistics) //! - [Persistence](#persistence) @@ -342,18 +341,6 @@ //! //! Refer to the [`configuration` module documentation](https://docs.rs/torrust-tracker-configuration) to get more information about all options. //! -//! # Services -//! -//! Services are domain services on top of the core tracker domain. Right now there are two types of service: -//! -//! - For statistics: [`crate::packages::statistics::services`] -//! - For torrents: [`crate::core::torrent::services`] -//! -//! Services usually format the data inside the tracker to make it easier to consume by other parts. -//! They also decouple the internal data structure, used by the tracker, from the way we deliver that data to the consumers. -//! The internal data structure is designed for performance or low memory consumption. And it should be changed -//! without affecting the external consumers. -//! //! Services can include extra features like pagination, for example. //! //! # Authentication From 181c27e749fe8da1f86c10960cb622bc1a5e082a Mon Sep 17 00:00:00 2001 From: Jose Celano Date: Wed, 12 Feb 2025 10:40:28 +0000 Subject: [PATCH 252/802] docs: [#1261] review docs for tracker-core package --- packages/http-protocol/src/v1/query.rs | 7 + packages/tracker-core/src/announce_handler.rs | 131 ++++++- .../src/authentication/handler.rs | 141 ++++--- .../src/authentication/key/mod.rs | 102 +++-- .../src/authentication/key/peer_key.rs | 103 ++++- .../key/repository/in_memory.rs | 53 ++- .../src/authentication/key/repository/mod.rs | 1 + .../key/repository/persisted.rs | 40 +- .../tracker-core/src/authentication/mod.rs | 15 + .../src/authentication/service.rs | 53 ++- .../src/databases/driver/mysql.rs | 11 + .../src/databases/driver/sqlite.rs | 23 +- packages/tracker-core/src/databases/error.rs | 38 +- packages/tracker-core/src/databases/mod.rs | 149 ++++---- packages/tracker-core/src/databases/setup.rs | 44 ++- packages/tracker-core/src/error.rs | 26 +- packages/tracker-core/src/lib.rs | 358 +++--------------- packages/tracker-core/src/scrape_handler.rs | 74 +++- packages/tracker-core/src/torrent/manager.rs | 50 ++- packages/tracker-core/src/torrent/mod.rs | 180 ++++++++- .../src/torrent/repository/in_memory.rs | 146 ++++++- .../src/torrent/repository/mod.rs | 1 + .../src/torrent/repository/persisted.rs | 52 ++- packages/tracker-core/src/torrent/services.rs | 109 +++++- .../src/whitelist/authorization.rs | 29 +- .../tracker-core/src/whitelist/manager.rs | 48 ++- packages/tracker-core/src/whitelist/mod.rs | 18 + .../src/whitelist/repository/in_memory.rs | 25 +- .../src/whitelist/repository/mod.rs | 1 + .../src/whitelist/repository/persisted.rs | 22 +- packages/tracker-core/src/whitelist/setup.rs | 26 ++ .../src/whitelist/test_helpers.rs | 7 +- 32 files changed, 1467 insertions(+), 616 deletions(-) diff --git a/packages/http-protocol/src/v1/query.rs b/packages/http-protocol/src/v1/query.rs index f77145cb6..66afddf65 100644 --- a/packages/http-protocol/src/v1/query.rs +++ b/packages/http-protocol/src/v1/query.rs @@ -249,6 +249,13 @@ mod tests { assert_eq!(query.get_param("param2"), Some("value2".to_string())); } + #[test] + fn should_ignore_duplicate_param_values_when_asked_to_return_only_one_value() { + let query = Query::from(vec![("param1", "value1"), ("param1", "value2")]); + + assert_eq!(query.get_param("param1"), Some("value1".to_string())); + } + #[test] fn should_fail_parsing_an_invalid_query_string() { let invalid_raw_query = "name=value=value"; diff --git a/packages/tracker-core/src/announce_handler.rs b/packages/tracker-core/src/announce_handler.rs index cd0a9b861..6707f1917 100644 --- a/packages/tracker-core/src/announce_handler.rs +++ b/packages/tracker-core/src/announce_handler.rs @@ -1,3 +1,95 @@ +//! Announce handler. +//! +//! Handling `announce` requests is the most important task for a `BitTorrent` +//! tracker. +//! +//! A `BitTorrent` swarm is a network of peers that are all trying to download +//! the same torrent. When a peer wants to find other peers it announces itself +//! to the swarm via the tracker. The peer sends its data to the tracker so that +//! the tracker can add it to the swarm. The tracker responds to the peer with +//! the list of other peers in the swarm so that the peer can contact them to +//! start downloading pieces of the file from them. +//! +//! Once you have instantiated the `AnnounceHandler` you can `announce` a new [`peer::Peer`](torrust_tracker_primitives) with: +//! +//! ```rust,no_run +//! use std::net::SocketAddr; +//! use std::net::IpAddr; +//! use std::net::Ipv4Addr; +//! use std::str::FromStr; +//! +//! use aquatic_udp_protocol::{AnnounceEvent, NumberOfBytes, PeerId}; +//! use torrust_tracker_primitives::DurationSinceUnixEpoch; +//! use torrust_tracker_primitives::peer; +//! use bittorrent_primitives::info_hash::InfoHash; +//! +//! let info_hash = InfoHash::from_str("3b245504cf5f11bbdbe1201cea6a6bf45aee1bc0").unwrap(); +//! +//! let peer = peer::Peer { +//! peer_id: PeerId(*b"-qB00000000000000001"), +//! peer_addr: SocketAddr::new(IpAddr::V4(Ipv4Addr::new(126, 0, 0, 1)), 8081), +//! updated: DurationSinceUnixEpoch::new(1_669_397_478_934, 0), +//! uploaded: NumberOfBytes::new(0), +//! downloaded: NumberOfBytes::new(0), +//! left: NumberOfBytes::new(0), +//! event: AnnounceEvent::Completed, +//! }; +//! +//! let peer_ip = IpAddr::V4(Ipv4Addr::from_str("126.0.0.1").unwrap()); +//! ``` +//! +//! ```text +//! let announce_data = announce_handler.announce(&info_hash, &mut peer, &peer_ip).await; +//! ``` +//! +//! The handler returns the list of peers for the torrent with the infohash +//! `3b245504cf5f11bbdbe1201cea6a6bf45aee1bc0`, filtering out the peer that is +//! making the `announce` request. +//! +//! > **NOTICE**: that the peer argument is mutable because the handler can +//! > change the peer IP if the peer is using a loopback IP. +//! +//! The `peer_ip` argument is the resolved peer ip. It's a common practice that +//! trackers ignore the peer ip in the `announce` request params, and resolve +//! the peer ip using the IP of the client making the request. As the tracker is +//! a domain service, the peer IP must be provided for the handler user, which +//! is usually a higher component with access the the request metadata, for +//! example, connection data, proxy headers, etcetera. +//! +//! The returned struct is: +//! +//! ```rust,no_run +//! use torrust_tracker_primitives::peer; +//! use torrust_tracker_configuration::AnnouncePolicy; +//! +//! pub struct AnnounceData { +//! pub peers: Vec, +//! pub swarm_stats: SwarmMetadata, +//! pub policy: AnnouncePolicy, // the tracker announce policy. +//! } +//! +//! pub struct SwarmMetadata { +//! pub completed: u32, // The number of peers that have ever completed downloading +//! pub seeders: u32, // The number of active peers that have completed downloading (seeders) +//! pub leechers: u32, // The number of active peers that have not completed downloading (leechers) +//! } +//! +//! // Core tracker configuration +//! pub struct AnnounceInterval { +//! // ... +//! pub interval: u32, // Interval in seconds that the client should wait between sending regular announce requests to the tracker +//! pub interval_min: u32, // Minimum announce interval. Clients must not reannounce more frequently than this +//! // ... +//! } +//! ``` +//! +//! ## Related BEPs: +//! +//! Refer to `BitTorrent` BEPs and other sites for more information about the `announce` request: +//! +//! - [BEP 3. The `BitTorrent` Protocol Specification](https://www.bittorrent.org/beps/bep_0003.html) +//! - [BEP 23. Tracker Returns Compact Peer Lists](https://www.bittorrent.org/beps/bep_0023.html) +//! - [Vuze docs](https://wiki.vuze.com/w/Announce) use std::net::IpAddr; use std::sync::Arc; @@ -10,18 +102,20 @@ use torrust_tracker_primitives::swarm_metadata::SwarmMetadata; use super::torrent::repository::in_memory::InMemoryTorrentRepository; use super::torrent::repository::persisted::DatabasePersistentTorrentRepository; +/// Handles `announce` requests from `BitTorrent` clients. pub struct AnnounceHandler { /// The tracker configuration. config: Core, - /// The in-memory torrents repository. + /// Repository for in-memory torrent data. in_memory_torrent_repository: Arc, - /// The persistent torrents repository. + /// Repository for persistent torrent data (database). db_torrent_repository: Arc, } impl AnnounceHandler { + /// Creates a new `AnnounceHandler`. #[must_use] pub fn new( config: &Core, @@ -35,9 +129,20 @@ impl AnnounceHandler { } } - /// It handles an announce request. + /// Processes an announce request from a peer. /// /// BEP 03: [The `BitTorrent` Protocol Specification](https://www.bittorrent.org/beps/bep_0003.html). + /// + /// # Parameters + /// + /// - `info_hash`: The unique identifier of the torrent. + /// - `peer`: The peer announcing itself (may be updated if IP is adjusted). + /// - `remote_client_ip`: The IP address of the client making the request. + /// - `peers_wanted`: Specifies how many peers the client wants in the response. + /// + /// # Returns + /// + /// An `AnnounceData` struct containing the list of peers, swarm statistics, and tracker policy. pub fn announce( &self, info_hash: &InfoHash, @@ -77,9 +182,8 @@ impl AnnounceHandler { } } - /// It updates the torrent entry in memory, it also stores in the database - /// the torrent info data which is persistent, and finally return the data - /// needed for a `announce` request response. + /// Updates the torrent data in memory, persists statistics if needed, and + /// returns the updated swarm stats. #[must_use] fn upsert_peer_and_get_stats(&self, info_hash: &InfoHash, peer: &peer::Peer) -> SwarmMetadata { let swarm_metadata_before = self.in_memory_torrent_repository.get_swarm_metadata(info_hash); @@ -95,7 +199,7 @@ impl AnnounceHandler { swarm_metadata_after } - /// It stores the torrents stats into the database (if persistency is enabled). + /// Persists torrent statistics to the database if persistence is enabled. fn persist_stats(&self, info_hash: &InfoHash, swarm_metadata: &SwarmMetadata) { if self.config.tracker_policy.persistent_torrent_completed_stat { let completed = swarm_metadata.downloaded; @@ -106,22 +210,25 @@ impl AnnounceHandler { } } -/// How many peers the peer announcing wants in the announce response. +/// Specifies how many peers a client wants in the announce response. #[derive(Clone, Debug, PartialEq, Default)] pub enum PeersWanted { - /// The peer wants as many peers as possible in the announce response. + /// Request as many peers as possible (default behavior). #[default] AsManyAsPossible, - /// The peer only wants a certain amount of peers in the announce response. + + /// Request a specific number of peers. Only { amount: usize }, } impl PeersWanted { + /// Request a specific number of peers. #[must_use] pub fn only(limit: u32) -> Self { limit.into() } + /// Returns the maximum number of peers allowed based on the request and tracker limit. fn limit(&self) -> usize { match self { PeersWanted::AsManyAsPossible => TORRENT_PEERS_LIMIT, @@ -159,6 +266,10 @@ impl From for PeersWanted { } } +/// Assigns the correct IP address to a peer based on tracker settings. +/// +/// If the client IP is a loopback address and the tracker has an external IP +/// configured, the external IP will be assigned to the peer. #[must_use] fn assign_ip_address_to_peer(remote_client_ip: &IpAddr, tracker_external_ip: Option) -> IpAddr { if let Some(host_ip) = tracker_external_ip.filter(|_| remote_client_ip.is_loopback()) { diff --git a/packages/tracker-core/src/authentication/handler.rs b/packages/tracker-core/src/authentication/handler.rs index 136060916..178895b8d 100644 --- a/packages/tracker-core/src/authentication/handler.rs +++ b/packages/tracker-core/src/authentication/handler.rs @@ -1,3 +1,11 @@ +//! This module implements the `KeysHandler` service +//! +//! It's responsible for managing authentication keys for the `BitTorrent` tracker. +//! +//! The service handles both persistent and in-memory storage of peer keys, and +//! supports adding new keys (either pre-generated or randomly created), +//! removing keys, and loading keys from the database into memory. Keys can be +//! either permanent or expire after a configurable duration per key. use std::sync::Arc; use std::time::Duration; @@ -11,29 +19,44 @@ use super::{key, CurrentClock, Key, PeerKey}; use crate::databases; use crate::error::PeerKeyError; -/// This type contains the info needed to add a new tracker key. +/// Contains the information needed to add a new tracker key. /// -/// You can upload a pre-generated key or let the app to generate a new one. -/// You can also set an expiration date or leave it empty (`None`) if you want -/// to create a permanent key that does not expire. +/// A new key can either be a pre-generated key provided by the user or can be +/// randomly generated by the application. Additionally, the key may be set to +/// expire after a certain number of seconds, or be permanent (if no expiration +/// is specified). #[derive(Debug)] pub struct AddKeyRequest { - /// The pre-generated key. Use `None` to generate a random key. + /// The pre-generated key as a string. If `None` the service will generate a + /// random key. pub opt_key: Option, - /// How long the key will be valid in seconds. Use `None` for permanent keys. + /// The duration (in seconds) for which the key is valid. Use `None` for + /// permanent keys. pub opt_seconds_valid: Option, } +/// The `KeysHandler` service manages the creation, addition, removal, and loading +/// of authentication keys for the tracker. +/// +/// It uses both a persistent (database) repository and an in-memory repository +/// to manage keys. pub struct KeysHandler { - /// The database repository for the authentication keys. + /// The database repository for storing authentication keys persistently. db_key_repository: Arc, - /// In-memory implementation of the authentication key repository. + /// The in-memory repository for caching authentication keys. in_memory_key_repository: Arc, } impl KeysHandler { + /// Creates a new instance of the `KeysHandler` service. + /// + /// # Parameters + /// + /// - `db_key_repository`: A shared reference to the database key repository. + /// - `in_memory_key_repository`: A shared reference to the in-memory key + /// repository. #[must_use] pub fn new(db_key_repository: &Arc, in_memory_key_repository: &Arc) -> Self { Self { @@ -42,18 +65,24 @@ impl KeysHandler { } } - /// Adds new peer keys to the tracker. + /// Adds a new peer key to the tracker. + /// + /// The key may be pre-generated or generated on-the-fly. + /// + /// Depending on whether an expiration duration is specified, the key will + /// be either expiring or permanent. /// - /// Keys can be pre-generated or randomly created. They can also be - /// permanent or expire. + /// # Parameters + /// + /// - `add_key_req`: The request containing options for key creation. /// /// # Errors /// - /// Will return an error if: + /// Returns an error if: /// - /// - The key duration overflows the duration type maximum value. + /// - The provided key duration exceeds the maximum allowed value. /// - The provided pre-generated key is invalid. - /// - The key could not been persisted due to database issues. + /// - There is an error persisting the key in the database. pub async fn add_peer_key(&self, add_key_req: AddKeyRequest) -> Result { if let Some(pre_existing_key) = add_key_req.opt_key { // Pre-generated key @@ -125,29 +154,31 @@ impl KeysHandler { } } - /// It generates a new permanent authentication key. + /// Generates a new permanent authentication key. /// - /// Authentication keys are used by HTTP trackers. + /// Permanent keys do not expire. /// /// # Errors /// - /// Will return a `database::Error` if unable to add the `auth_key` to the database. + /// Returns a `databases::error::Error` if the key cannot be persisted in + /// the database. pub(crate) async fn generate_permanent_peer_key(&self) -> Result { self.generate_expiring_peer_key(None).await } - /// It generates a new expiring authentication key. + /// Generates a new authentication key with an optional expiration lifetime. /// - /// Authentication keys are used by HTTP trackers. + /// If a `lifetime` is provided, the generated key will expire after that + /// duration. The new key is stored both in the database and in memory. /// - /// # Errors + /// # Parameters /// - /// Will return a `database::Error` if unable to add the `auth_key` to the database. + /// - `lifetime`: An optional duration specifying how long the key is valid. /// - /// # Arguments + /// # Errors /// - /// * `lifetime` - The duration in seconds for the new key. The key will be - /// no longer valid after `lifetime` seconds. + /// Returns a `databases::error::Error` if there is an issue adding the key + /// to the database. pub async fn generate_expiring_peer_key(&self, lifetime: Option) -> Result { let peer_key = key::generate_key(lifetime); @@ -158,36 +189,36 @@ impl KeysHandler { Ok(peer_key) } - /// It adds a pre-generated permanent authentication key. + /// Adds a pre-generated permanent authentication key. /// - /// Authentication keys are used by HTTP trackers. + /// Internally, this calls `add_expiring_peer_key` with no expiration. /// - /// # Errors + /// # Parameters /// - /// Will return a `database::Error` if unable to add the `auth_key` to the - /// database. For example, if the key already exist. + /// - `key`: The pre-generated key. /// - /// # Arguments + /// # Errors /// - /// * `key` - The pre-generated key. + /// Returns a `databases::error::Error` if there is an issue persisting the + /// key. pub(crate) async fn add_permanent_peer_key(&self, key: Key) -> Result { self.add_expiring_peer_key(key, None).await } - /// It adds a pre-generated authentication key. + /// Adds a pre-generated authentication key with an optional expiration. /// - /// Authentication keys are used by HTTP trackers. + /// The key is stored in both the database and the in-memory repository. /// - /// # Errors + /// # Parameters /// - /// Will return a `database::Error` if unable to add the `auth_key` to the - /// database. For example, if the key already exist. + /// - `key`: The pre-generated key. + /// - `valid_until`: An optional timestamp (as a duration since the Unix + /// epoch) after which the key expires. /// - /// # Arguments + /// # Errors /// - /// * `key` - The pre-generated key. - /// * `lifetime` - The duration in seconds for the new key. The key will be - /// no longer valid after `lifetime` seconds. + /// Returns a `databases::error::Error` if there is an issue adding the key + /// to the database. pub(crate) async fn add_expiring_peer_key( &self, key: Key, @@ -205,11 +236,18 @@ impl KeysHandler { Ok(peer_key) } - /// It removes an authentication key. + /// Removes an authentication key. + /// + /// The key is removed from both the database and the in-memory repository. + /// + /// # Parameters + /// + /// - `key`: A reference to the key to be removed. /// /// # Errors /// - /// Will return a `database::Error` if unable to remove the `key` to the database. + /// Returns a `databases::error::Error` if the key cannot be removed from + /// the database. pub async fn remove_peer_key(&self, key: &Key) -> Result<(), databases::error::Error> { self.db_key_repository.remove(key)?; @@ -218,19 +256,26 @@ impl KeysHandler { Ok(()) } - /// It removes an authentication key from memory. + /// Removes an authentication key from the in-memory repository. + /// + /// This function does not interact with the database. + /// + /// # Parameters + /// + /// - `key`: A reference to the key to be removed. pub(crate) async fn remove_in_memory_auth_key(&self, key: &Key) { self.in_memory_key_repository.remove(key).await; } - /// The `Tracker` stores the authentication keys in memory and in the - /// database. In case you need to restart the `Tracker` you can load the - /// keys from the database into memory with this function. Keys are - /// automatically stored in the database when they are generated. + /// Loads all authentication keys from the database into the in-memory + /// repository. + /// + /// This is useful during tracker startup to ensure that all persisted keys + /// are available in memory. /// /// # Errors /// - /// Will return a `database::Error` if unable to `load_keys` from the database. + /// Returns a `databases::error::Error` if there is an issue loading the keys from the database. pub async fn load_peer_keys_from_database(&self) -> Result<(), databases::error::Error> { let keys_from_database = self.db_key_repository.load_keys()?; diff --git a/packages/tracker-core/src/authentication/key/mod.rs b/packages/tracker-core/src/authentication/key/mod.rs index ea9edb7d5..648143928 100644 --- a/packages/tracker-core/src/authentication/key/mod.rs +++ b/packages/tracker-core/src/authentication/key/mod.rs @@ -1,42 +1,45 @@ -//! Tracker authentication services and structs. +//! Tracker authentication services and types. //! -//! This module contains functions to handle tracker keys. -//! Tracker keys are tokens used to authenticate the tracker clients when the tracker runs -//! in `private` or `private_listed` modes. +//! This module provides functions and data structures for handling tracker keys. +//! Tracker keys are tokens used to authenticate tracker clients when the +//! tracker is running in `private` mode. //! -//! There are services to [`generate_key`] and [`verify_key_expiration`] authentication keys. +//! Authentication keys are used exclusively by HTTP trackers. Every key has an +//! expiration time, meaning that it is only valid for a predetermined period. +//! Once the expiration time is reached, an expiring key will be rejected. //! -//! Authentication keys are used only by HTTP trackers. All keys have an expiration time, that means -//! they are only valid during a period of time. After that time the expiring key will no longer be valid. +//! The primary key structure is [`PeerKey`], which couples a randomly generated +//! [`Key`] (a 32-character alphanumeric string) with an optional expiration +//! timestamp. //! -//! Keys are stored in this struct: +//! # Examples //! -//! ```rust,no_run +//! Generating a new key valid for `9999` seconds: +//! +//! ```rust +//! use bittorrent_tracker_core::authentication; +//! use std::time::Duration; +//! +//! let expiring_key = authentication::key::generate_key(Some(Duration::new(9999, 0))); +//! +//! // Later, verify that the key is still valid. +//! assert!(authentication::key::verify_key_expiration(&expiring_key).is_ok()); +//! ``` +//! +//! The core key types are defined as follows: +//! +//! ```rust //! use bittorrent_tracker_core::authentication::Key; //! use torrust_tracker_primitives::DurationSinceUnixEpoch; //! //! pub struct PeerKey { -//! /// Random 32-char string. For example: `YZSl4lMZupRuOpSRC3krIKR5BPB14nrJ` +//! /// A random 32-character authentication token (e.g., `YZSl4lMZupRuOpSRC3krIKR5BPB14nrJ`) //! pub key: Key, //! -//! /// Timestamp, the key will be no longer valid after this timestamp. -//! /// If `None` the keys will not expire (permanent key). +//! /// The timestamp after which the key expires. If `None`, the key is permanent. //! pub valid_until: Option, //! } //! ``` -//! -//! You can generate a new key valid for `9999` seconds and `0` nanoseconds from the current time with the following: -//! -//! ```rust,no_run -//! use bittorrent_tracker_core::authentication; -//! use std::time::Duration; -//! -//! let expiring_key = authentication::key::generate_key(Some(Duration::new(9999, 0))); -//! -//! // And you can later verify it with: -//! -//! assert!(authentication::key::verify_key_expiration(&expiring_key).is_ok()); -//! ``` pub mod peer_key; pub mod repository; @@ -75,17 +78,33 @@ pub(crate) fn generate_expiring_key(lifetime: Duration) -> PeerKey { generate_key(Some(lifetime)) } -/// It generates a new random 32-char authentication [`PeerKey`]. +/// Generates a new random 32-character authentication key (`PeerKey`). /// -/// It can be an expiring or permanent key. +/// If a lifetime is provided, the generated key will expire after the specified +/// duration; otherwise, the key is permanent (i.e., it never expires). /// /// # Panics /// -/// It would panic if the `lifetime: Duration` + Duration is more than `Duration::MAX`. +/// Panics if the addition of the lifetime to the current time overflows +/// (an extremely unlikely event). /// /// # Arguments /// -/// * `lifetime`: if `None` the key will be permanent. +/// * `lifetime`: An optional duration specifying how long the key is valid. +/// If `None`, the key is permanent. +/// +/// # Examples +/// +/// ```rust +/// use bittorrent_tracker_core::authentication::key; +/// use std::time::Duration; +/// +/// // Generate an expiring key valid for 3600 seconds. +/// let expiring_key = key::generate_key(Some(Duration::from_secs(3600))); +/// +/// // Generate a permanent key. +/// let permanent_key = key::generate_key(None); +/// ``` #[must_use] pub fn generate_key(lifetime: Option) -> PeerKey { let random_key = Key::random(); @@ -107,13 +126,27 @@ pub fn generate_key(lifetime: Option) -> PeerKey { } } -/// It verifies an [`PeerKey`]. It checks if the expiration date has passed. -/// Permanent keys without duration (`None`) do not expire. +/// Verifies whether a given authentication key (`PeerKey`) is still valid. +/// +/// For expiring keys, this function compares the key's expiration timestamp +/// against the current time. Permanent keys (with `None` as their expiration) +/// are always valid. /// /// # Errors /// -/// Will return a verification error [`enum@crate::authentication::key::Error`] if -/// it cannot verify the key. +/// Returns a verification error of type [`enum@Error`] if the key has expired. +/// +/// # Examples +/// +/// ```rust +/// use bittorrent_tracker_core::authentication::key; +/// use std::time::Duration; +/// +/// let expiring_key = key::generate_key(Some(Duration::from_secs(100))); +/// +/// // If the key's expiration time has passed, the verification will fail. +/// assert!(key::verify_key_expiration(&expiring_key).is_ok()); +/// ``` pub fn verify_key_expiration(auth_key: &PeerKey) -> Result<(), Error> { let current_time: DurationSinceUnixEpoch = CurrentClock::now(); @@ -136,17 +169,20 @@ pub fn verify_key_expiration(auth_key: &PeerKey) -> Result<(), Error> { #[derive(Debug, Error)] #[allow(dead_code)] pub enum Error { + /// Wraps an underlying error encountered during key verification. #[error("Key could not be verified: {source}")] KeyVerificationError { source: LocatedError<'static, dyn std::error::Error + Send + Sync>, }, + /// Indicates that the key could not be read or found. #[error("Failed to read key: {key}, {location}")] UnableToReadKey { location: &'static Location<'static>, key: Box, }, + /// Indicates that the key has expired. #[error("Key has expired, {location}")] KeyExpired { location: &'static Location<'static> }, } diff --git a/packages/tracker-core/src/authentication/key/peer_key.rs b/packages/tracker-core/src/authentication/key/peer_key.rs index 1d2b1fadc..41aba950b 100644 --- a/packages/tracker-core/src/authentication/key/peer_key.rs +++ b/packages/tracker-core/src/authentication/key/peer_key.rs @@ -1,3 +1,13 @@ +//! Authentication keys for private trackers. +//! +//! This module defines the types and functionality for managing authentication +//! keys used by the tracker. These keys, represented by the `Key` and `PeerKey` +//! types, are essential for authenticating peers in private tracker +//! environments. +//! +//! A `Key` is a 32-character alphanumeric token, while a `PeerKey` couples a +//! `Key` with an optional expiration timestamp. If the expiration is set (via +//! `valid_until`), the key will become invalid after that time. use std::str::FromStr; use std::time::Duration; @@ -11,22 +21,42 @@ use torrust_tracker_primitives::DurationSinceUnixEpoch; use super::AUTH_KEY_LENGTH; -/// An authentication key which can potentially have an expiration time. -/// After that time is will automatically become invalid. +/// A peer authentication key with an optional expiration time. +/// +/// A `PeerKey` associates a generated `Key` (a 32-character alphanumeric string) +/// with an optional expiration timestamp (`valid_until`). If `valid_until` is +/// `None`, the key is considered permanent. +/// +/// # Example +/// +/// ```rust +/// use std::time::Duration; +/// use bittorrent_tracker_core::authentication::key::peer_key::{Key, PeerKey}; +/// +/// let expiring_key = PeerKey { +/// key: Key::random(), +/// valid_until: Some(Duration::from_secs(3600)), // Expires in 1 hour +/// }; +/// +/// let permanent_key = PeerKey { +/// key: Key::random(), +/// valid_until: None, +/// }; +/// ``` #[derive(Serialize, Deserialize, Debug, Clone)] pub struct PeerKey { - /// Random 32-char string. For example: `YZSl4lMZupRuOpSRC3krIKR5BPB14nrJ` + /// A 32-character authentication key. For example: `YZSl4lMZupRuOpSRC3krIKR5BPB14nrJ` pub key: Key, - /// Timestamp, the key will be no longer valid after this timestamp. - /// If `None` the keys will not expire (permanent key). + /// An optional expiration timestamp. If set, the key becomes invalid after + /// this time. A value of `None` indicates a permanent key. pub valid_until: Option, } impl PartialEq for PeerKey { fn eq(&self, other: &Self) -> bool { - // We ignore the fractions of seconds when comparing the timestamps - // because we only store the seconds in the database. + // When comparing two PeerKeys, ignore fractions of seconds since only + // whole seconds are stored in the database. self.key == other.key && match (&self.valid_until, &other.valid_until) { (Some(a), Some(b)) => a.as_secs() == b.as_secs(), @@ -53,14 +83,17 @@ impl PeerKey { self.key.clone() } - /// It returns the expiry time. For example, for the starting time for Unix Epoch - /// (timestamp 0) it will return a `DateTime` whose string representation is - /// `1970-01-01 00:00:00 UTC`. + /// Computes and returns the expiration time as a UTC `DateTime`, if one + /// exists. + /// + /// The returned time is derived from the stored seconds since the Unix + /// epoch. Note that any fractional seconds are discarded since only whole + /// seconds are stored in the database. /// /// # Panics /// - /// Will panic when the key timestamp overflows the internal i64 type. - /// (this will naturally happen in 292.5 billion years) + /// Panics if the key's timestamp overflows the internal `i64` type (this is + /// extremely unlikely, happening roughly 292.5 billion years from now). #[must_use] pub fn expiry_time(&self) -> Option> { // We remove the fractions of seconds because we only store the seconds @@ -72,17 +105,37 @@ impl PeerKey { /// A token used for authentication. /// -/// - It contains only ascii alphanumeric chars: lower and uppercase letters and -/// numbers. -/// - It's a 32-char string. +/// The `Key` type encapsulates a 32-character string that must consist solely +/// of ASCII alphanumeric characters (0-9, a-z, A-Z). This key is used by the +/// tracker to authenticate peers. +/// +/// # Examples +/// +/// Creating a key from a valid string: +/// +/// ``` +/// use bittorrent_tracker_core::authentication::key::peer_key::Key; +/// let key = Key::new("YZSl4lMZupRuOpSRC3krIKR5BPB14nrJ").unwrap(); +/// ``` +/// +/// Generating a random key: +/// +/// ``` +/// use bittorrent_tracker_core::authentication::key::peer_key::Key; +/// let random_key = Key::random(); +/// ``` #[derive(Serialize, Deserialize, Debug, Eq, PartialEq, Clone, Display, Hash)] pub struct Key(String); impl Key { + /// Constructs a new `Key` from the given string. + /// /// # Errors /// - /// Will return an error is the string represents an invalid key. - /// Valid keys can only contain 32 chars including 0-9, a-z and A-Z. + /// Returns a `ParseKeyError` if: + /// + /// - The input string does not have exactly 32 characters. + /// - The input string contains characters that are not ASCII alphanumeric. pub fn new(value: &str) -> Result { if value.len() != AUTH_KEY_LENGTH { return Err(ParseKeyError::InvalidKeyLength); @@ -95,11 +148,14 @@ impl Key { Ok(Self(value.to_owned())) } - /// It generates a random key. + /// Generates a new random authentication key. + /// + /// The random key is generated by sampling 32 ASCII alphanumeric characters. /// /// # Panics /// - /// Will panic if the random number generator fails to generate a valid key. + /// Panics if the random number generator fails to produce a valid key + /// (extremely unlikely). pub fn random() -> Self { let random_id: String = rng() .sample_iter(&Alphanumeric) @@ -115,9 +171,11 @@ impl Key { } } -/// Error returned when a key cannot be parsed from a string. +/// Errors that can occur when parsing a string into a `Key`. +/// +/// # Examples /// -/// ```text +/// ```rust /// use bittorrent_tracker_core::authentication::Key; /// use std::str::FromStr; /// @@ -132,9 +190,12 @@ impl Key { /// this error. #[derive(Debug, Error)] pub enum ParseKeyError { + /// The provided key does not have exactly 32 characters. #[error("Invalid key length. Key must be have 32 chars")] InvalidKeyLength, + /// The provided key contains invalid characters. Only ASCII alphanumeric + /// characters are allowed. #[error("Invalid chars for key. Key can only alphanumeric chars (0-9, a-z, A-Z)")] InvalidChars, } diff --git a/packages/tracker-core/src/authentication/key/repository/in_memory.rs b/packages/tracker-core/src/authentication/key/repository/in_memory.rs index 13664e27c..5911771d4 100644 --- a/packages/tracker-core/src/authentication/key/repository/in_memory.rs +++ b/packages/tracker-core/src/authentication/key/repository/in_memory.rs @@ -1,6 +1,11 @@ +//! In-memory implementation of the authentication key repository. use crate::authentication::key::{Key, PeerKey}; -/// In-memory implementation of the authentication key repository. +/// An in-memory repository for storing authentication keys. +/// +/// This repository maintains a mapping between a peer's [`Key`] and its +/// corresponding [`PeerKey`]. It is designed for use in private tracker +/// environments where keys are maintained in memory. #[derive(Debug, Default)] pub struct InMemoryKeyRepository { /// Tracker users' keys. Only for private trackers. @@ -8,28 +13,66 @@ pub struct InMemoryKeyRepository { } impl InMemoryKeyRepository { - /// It adds a new authentication key. + /// Inserts a new authentication key into the repository. + /// + /// This function acquires a write lock on the internal storage and inserts + /// the provided [`PeerKey`], using its inner [`Key`] as the map key. + /// + /// # Arguments + /// + /// * `auth_key` - A reference to the [`PeerKey`] to be inserted. pub(crate) async fn insert(&self, auth_key: &PeerKey) { self.keys.write().await.insert(auth_key.key.clone(), auth_key.clone()); } - /// It removes an authentication key. + /// Removes an authentication key from the repository. + /// + /// This function acquires a write lock on the internal storage and removes + /// the key that matches the provided [`Key`]. + /// + /// # Arguments + /// + /// * `key` - A reference to the [`Key`] corresponding to the key to be removed. pub(crate) async fn remove(&self, key: &Key) { self.keys.write().await.remove(key); } + /// Retrieves an authentication key from the repository. + /// + /// This function acquires a read lock on the internal storage and returns a + /// cloned [`PeerKey`] if the provided [`Key`] exists. + /// + /// # Arguments + /// + /// * `key` - A reference to the [`Key`] to look up. + /// + /// # Returns + /// + /// An `Option` containing the matching key if found, or `None` + /// otherwise. pub(crate) async fn get(&self, key: &Key) -> Option { self.keys.read().await.get(key).cloned() } - /// It clears all the authentication keys. + /// Clears all authentication keys from the repository. + /// + /// This function acquires a write lock on the internal storage and removes + /// all entries. #[allow(dead_code)] pub(crate) async fn clear(&self) { let mut keys = self.keys.write().await; keys.clear(); } - /// It resets the authentication keys with a new list of keys. + /// Resets the repository with a new list of authentication keys. + /// + /// This function clears all existing keys and then inserts each key from + /// the provided vector. + /// + /// # Arguments + /// + /// * `peer_keys` - A vector of [`PeerKey`] instances that will replace the + /// current set of keys. pub async fn reset_with(&self, peer_keys: Vec) { let mut keys_lock = self.keys.write().await; diff --git a/packages/tracker-core/src/authentication/key/repository/mod.rs b/packages/tracker-core/src/authentication/key/repository/mod.rs index 51723b68d..3df783622 100644 --- a/packages/tracker-core/src/authentication/key/repository/mod.rs +++ b/packages/tracker-core/src/authentication/key/repository/mod.rs @@ -1,2 +1,3 @@ +//! Key repository implementations. pub mod in_memory; pub mod persisted; diff --git a/packages/tracker-core/src/authentication/key/repository/persisted.rs b/packages/tracker-core/src/authentication/key/repository/persisted.rs index 95a3b874c..e84a23c9b 100644 --- a/packages/tracker-core/src/authentication/key/repository/persisted.rs +++ b/packages/tracker-core/src/authentication/key/repository/persisted.rs @@ -1,14 +1,28 @@ +//! The database repository for the authentication keys. use std::sync::Arc; use crate::authentication::key::{Key, PeerKey}; use crate::databases::{self, Database}; -/// The database repository for the authentication keys. +/// A repository for storing authentication keys in a persistent database. +/// +/// This repository provides methods to add, remove, and load authentication +/// keys from the underlying database. It wraps an instance of a type +/// implementing the [`Database`] trait. pub struct DatabaseKeyRepository { database: Arc>, } impl DatabaseKeyRepository { + /// Creates a new `DatabaseKeyRepository` instance. + /// + /// # Arguments + /// + /// * `database` - A shared reference to a boxed database implementation. + /// + /// # Returns + /// + /// A new instance of `DatabaseKeyRepository` #[must_use] pub fn new(database: &Arc>) -> Self { Self { @@ -16,31 +30,43 @@ impl DatabaseKeyRepository { } } - /// It adds a new key to the database. + /// Adds a new authentication key to the database. + /// + /// # Arguments + /// + /// * `peer_key` - A reference to the [`PeerKey`] to be persisted. /// /// # Errors /// - /// Will return a `databases::error::Error` if unable to add the `auth_key` to the database. + /// Returns a [`databases::error::Error`] if the key cannot be added. pub(crate) fn add(&self, peer_key: &PeerKey) -> Result<(), databases::error::Error> { self.database.add_key_to_keys(peer_key)?; Ok(()) } - /// It removes an key from the database. + /// Removes an authentication key from the database. + /// + /// # Arguments + /// + /// * `key` - A reference to the [`Key`] corresponding to the key to remove. /// /// # Errors /// - /// Will return a `database::Error` if unable to remove the `key` from the database. + /// Returns a [`databases::error::Error`] if the key cannot be removed. pub(crate) fn remove(&self, key: &Key) -> Result<(), databases::error::Error> { self.database.remove_key_from_keys(key)?; Ok(()) } - /// It loads all keys from the database. + /// Loads all authentication keys from the database. /// /// # Errors /// - /// Will return a `database::Error` if unable to load the keys from the database. + /// Returns a [`databases::error::Error`] if the keys cannot be loaded. + /// + /// # Returns + /// + /// A vector containing all persisted [`PeerKey`] entries. pub(crate) fn load_keys(&self) -> Result, databases::error::Error> { let keys = self.database.load_keys()?; Ok(keys) diff --git a/packages/tracker-core/src/authentication/mod.rs b/packages/tracker-core/src/authentication/mod.rs index 52138d26b..12b742b8b 100644 --- a/packages/tracker-core/src/authentication/mod.rs +++ b/packages/tracker-core/src/authentication/mod.rs @@ -1,3 +1,18 @@ +//! Tracker authentication services and structs. +//! +//! One of the crate responsibilities is to create and keep authentication keys. +//! Auth keys are used by HTTP trackers when the tracker is running in `private` +//! mode. +//! +//! HTTP tracker's clients need to obtain an authentication key before starting +//! requesting the tracker. Once they get one they have to include a `PATH` +//! param with the key in all the HTTP requests. For example, when a peer wants +//! to `announce` itself it has to use the HTTP tracker endpoint: +//! +//! `GET /announce/:key` +//! +//! The common way to obtain the keys is by using the tracker API directly or +//! via other applications like the [Torrust Index](https://github.com/torrust/torrust-index). use crate::CurrentClock; pub mod handler; diff --git a/packages/tracker-core/src/authentication/service.rs b/packages/tracker-core/src/authentication/service.rs index 5ca0a09ec..75b28944f 100644 --- a/packages/tracker-core/src/authentication/service.rs +++ b/packages/tracker-core/src/authentication/service.rs @@ -1,3 +1,4 @@ +//! Authentication service. use std::panic::Location; use std::sync::Arc; @@ -6,6 +7,11 @@ use torrust_tracker_configuration::Core; use super::key::repository::in_memory::InMemoryKeyRepository; use super::{key, Error, Key}; +/// The authentication service responsible for validating peer keys. +/// +/// The service uses an in-memory key repository along with the tracker +/// configuration to determine whether a given peer key is valid. In a private +/// tracker, only registered keys (and optionally unexpired keys) are allowed. #[derive(Debug)] pub struct AuthenticationService { /// The tracker configuration. @@ -16,6 +22,18 @@ pub struct AuthenticationService { } impl AuthenticationService { + /// Creates a new instance of the `AuthenticationService`. + /// + /// # Parameters + /// + /// - `config`: A reference to the tracker core configuration. + /// - `in_memory_key_repository`: A shared reference to an in-memory key + /// repository. + /// + /// # Returns + /// + /// An `AuthenticationService` instance initialized with the given + /// configuration and repository. #[must_use] pub fn new(config: &Core, in_memory_key_repository: &Arc) -> Self { Self { @@ -24,12 +42,23 @@ impl AuthenticationService { } } - /// It authenticates the peer `key` against the `Tracker` authentication - /// key list. + /// Authenticates a peer key against the tracker's authentication key list. + /// + /// For private trackers, the key must be registered (and optionally not + /// expired) to be considered valid. For public trackers, authentication + /// always succeeds. + /// + /// # Parameters + /// + /// - `key`: A reference to the peer key that needs to be authenticated. /// /// # Errors /// - /// Will return an error if the the authentication key cannot be verified. + /// Returns an error if: + /// + /// - The tracker is in private mode and the key cannot be found in the + /// repository. + /// - The key is found but fails the expiration check (if expiration is enforced). pub async fn authenticate(&self, key: &Key) -> Result<(), Error> { if self.tracker_is_private() { self.verify_auth_key(key).await @@ -44,11 +73,25 @@ impl AuthenticationService { self.config.private } - /// It verifies an authentication key. + /// Verifies the authentication key against the in-memory repository. + /// + /// This function retrieves the key from the repository. If the key is not + /// found, it returns an error with the caller's location. If the key is + /// found, the function then checks the key's expiration based on the + /// tracker configuration. The behavior differs depending on whether a + /// `private` configuration is provided and whether key expiration checking + /// is enabled. + /// + /// # Parameters + /// + /// - `key`: A reference to the peer key that needs to be verified. /// /// # Errors /// - /// Will return a `key::Error` if unable to get any `auth_key`. + /// Returns an error if: + /// + /// - The key is not found in the repository. + /// - The key fails the expiration check when such verification is required. async fn verify_auth_key(&self, key: &Key) -> Result<(), Error> { match self.in_memory_key_repository.get(key).await { None => Err(Error::UnableToReadKey { diff --git a/packages/tracker-core/src/databases/driver/mysql.rs b/packages/tracker-core/src/databases/driver/mysql.rs index 365bd0ad9..624e34c9b 100644 --- a/packages/tracker-core/src/databases/driver/mysql.rs +++ b/packages/tracker-core/src/databases/driver/mysql.rs @@ -1,4 +1,10 @@ //! The `MySQL` database driver. +//! +//! This module provides an implementation of the [`Database`] trait for `MySQL` +//! using the `r2d2_mysql` connection pool. It configures the MySQL connection +//! based on a URL, creates the necessary tables (for torrent metrics, torrent +//! whitelist, and authentication keys), and implements all CRUD operations +//! required by the persistence layer. use std::str::FromStr; use std::time::Duration; @@ -15,6 +21,11 @@ use crate::authentication::{self, Key}; const DRIVER: Driver = Driver::MySQL; +/// `MySQL` driver implementation. +/// +/// This struct encapsulates a connection pool for `MySQL`, built using the +/// `r2d2_mysql` connection manager. It implements the [`Database`] trait to +/// provide persistence operations. pub(crate) struct Mysql { pool: Pool, } diff --git a/packages/tracker-core/src/databases/driver/sqlite.rs b/packages/tracker-core/src/databases/driver/sqlite.rs index 36ca4eabe..bab2fb6a7 100644 --- a/packages/tracker-core/src/databases/driver/sqlite.rs +++ b/packages/tracker-core/src/databases/driver/sqlite.rs @@ -1,4 +1,10 @@ //! The `SQLite3` database driver. +//! +//! This module provides an implementation of the [`Database`] trait for +//! `SQLite3` using the `r2d2_sqlite` connection pool. It defines the schema for +//! whitelist, torrent metrics, and authentication keys, and provides methods +//! to create and drop tables as well as perform CRUD operations on these +//! persistent objects. use std::panic::Location; use std::str::FromStr; @@ -14,18 +20,29 @@ use crate::authentication::{self, Key}; const DRIVER: Driver = Driver::Sqlite3; +/// `SQLite` driver implementation. +/// +/// This struct encapsulates a connection pool for `SQLite` using the `r2d2_sqlite` +/// connection manager. pub(crate) struct Sqlite { pool: Pool, } impl Sqlite { - /// It instantiates a new `SQLite3` database driver. + /// Instantiates a new `SQLite3` database driver. /// - /// Refer to [`databases::Database::new`](crate::core::databases::Database::new). + /// This function creates a connection manager for the `SQLite` database + /// located at `db_path` and then builds a connection pool using `r2d2`. If + /// the pool cannot be created, an error is returned (wrapped with the + /// appropriate driver information). + /// + /// # Arguments + /// + /// * `db_path` - A string slice representing the file path to the `SQLite` database. /// /// # Errors /// - /// Will return `r2d2::Error` if `db_path` is not able to create `SqLite` database. + /// Returns an [`Error`] if the connection pool cannot be built. pub fn new(db_path: &str) -> Result { let manager = SqliteConnectionManager::file(db_path); let pool = r2d2::Pool::builder().build(manager).map_err(|e| (e, DRIVER))?; diff --git a/packages/tracker-core/src/databases/error.rs b/packages/tracker-core/src/databases/error.rs index 6b340080e..fd9adfc22 100644 --- a/packages/tracker-core/src/databases/error.rs +++ b/packages/tracker-core/src/databases/error.rs @@ -1,6 +1,13 @@ //! Database errors. //! -//! This module contains the [Database errors](crate::databases::error::Error). +//! This module defines the [`Error`] enum used to represent errors that occur +//! during database operations. These errors encapsulate issues such as missing +//! query results, malformed queries, connection failures, and connection pool +//! creation errors. Each error variant includes contextual information such as +//! the associated database driver and, when applicable, the source error. +//! +//! External errors from database libraries (e.g., `rusqlite`, `mysql`) are +//! converted into this error type using the provided `From` implementations. use std::panic::Location; use std::sync::Arc; @@ -9,30 +16,43 @@ use torrust_tracker_located_error::{DynError, Located, LocatedError}; use super::driver::Driver; +/// Database error type that encapsulates various failures encountered during +/// database operations. #[derive(thiserror::Error, Debug, Clone)] pub enum Error { - /// The query unexpectedly returned nothing. + /// Indicates that a query unexpectedly returned no rows. + /// + /// This error variant is used when a query that is expected to return a + /// result does not. #[error("The {driver} query unexpectedly returned nothing: {source}")] QueryReturnedNoRows { source: LocatedError<'static, dyn std::error::Error + Send + Sync>, driver: Driver, }, - /// The query was malformed. + /// Indicates that the query was malformed. + /// + /// This error variant is used when the SQL query itself is invalid or + /// improperly formatted. #[error("The {driver} query was malformed: {source}")] InvalidQuery { source: LocatedError<'static, dyn std::error::Error + Send + Sync>, driver: Driver, }, - /// Unable to insert a record into the database + /// Indicates a failure to insert a record into the database. + /// + /// This error is raised when an insertion operation fails. #[error("Unable to insert record into {driver} database, {location}")] InsertFailed { location: &'static Location<'static>, driver: Driver, }, - /// Unable to delete a record into the database + /// Indicates a failure to delete a record from the database. + /// + /// This error includes an error code that may be returned by the database + /// driver. #[error("Failed to remove record from {driver} database, error-code: {error_code}, {location}")] DeleteFailed { location: &'static Location<'static>, @@ -40,14 +60,18 @@ pub enum Error { driver: Driver, }, - /// Unable to connect to the database + /// Indicates a failure to connect to the database. + /// + /// This error variant wraps connection-related errors, such as those caused by an invalid URL. #[error("Failed to connect to {driver} database: {source}")] ConnectionError { source: LocatedError<'static, UrlError>, driver: Driver, }, - /// Unable to create a connection pool + /// Indicates a failure to create a connection pool. + /// + /// This error variant is used when the connection pool creation (using r2d2) fails. #[error("Failed to create r2d2 {driver} connection pool: {source}")] ConnectionPool { source: LocatedError<'static, r2d2::Error>, diff --git a/packages/tracker-core/src/databases/mod.rs b/packages/tracker-core/src/databases/mod.rs index 1de13332f..33a7e3c69 100644 --- a/packages/tracker-core/src/databases/mod.rs +++ b/packages/tracker-core/src/databases/mod.rs @@ -1,48 +1,51 @@ //! The persistence module. //! -//! Persistence is currently implemented with one [`Database`] trait. +//! Persistence is currently implemented using a single [`Database`] trait. //! //! There are two implementations of the trait (two drivers): //! -//! - `Mysql` -//! - `Sqlite` +//! - **`MySQL`** +//! - **`Sqlite`** //! -//! > **NOTICE**: There are no database migrations. If there are any changes, -//! > we will implemented them or provide a script to migrate to the new schema. +//! > **NOTICE**: There are no database migrations at this time. If schema +//! > changes occur, either migration functionality will be implemented or a +//! > script will be provided to migrate to the new schema. //! -//! The persistent objects are: +//! The persistent objects handled by this module include: //! -//! - [Torrent metrics](#torrent-metrics) -//! - [Torrent whitelist](torrent-whitelist) -//! - [Authentication keys](authentication-keys) +//! - **Torrent metrics**: Metrics such as the number of completed downloads for +//! each torrent. +//! - **Torrent whitelist**: A list of torrents (by infohash) that are allowed. +//! - **Authentication keys**: Expiring authentication keys used to secure +//! access to private trackers. //! -//! # Torrent metrics +//! # Torrent Metrics //! -//! Field | Sample data | Description -//! ---|---|--- -//! `id` | 1 | Autoincrement id -//! `info_hash` | `c1277613db1d28709b034a017ab2cae4be07ae10` | `BitTorrent` infohash V1 -//! `completed` | 20 | The number of peers that have ever completed downloading the torrent associated to this entry. See [`Entry`](torrust_tracker_torrent_repository::entry::Entry) for more information. +//! | Field | Sample data | Description | +//! |-------------|--------------------------------------------|-----------------------------------------------------------------------------| +//! | `id` | 1 | Auto-increment id | +//! | `info_hash` | `c1277613db1d28709b034a017ab2cae4be07ae10` | `BitTorrent` infohash V1 | +//! | `completed` | 20 | The number of peers that have completed downloading the associated torrent. | //! -//! > **NOTICE**: The peer list for a torrent is not persisted. Since peer have to re-announce themselves on intervals, the data is be -//! > regenerated again after some minutes. +//! > **NOTICE**: The peer list for a torrent is not persisted. Because peers re-announce at +//! > intervals, the peer list is regenerated periodically. //! -//! # Torrent whitelist +//! # Torrent Whitelist //! -//! Field | Sample data | Description -//! ---|---|--- -//! `id` | 1 | Autoincrement id -//! `info_hash` | `c1277613db1d28709b034a017ab2cae4be07ae10` | `BitTorrent` infohash V1 +//! | Field | Sample data | Description | +//! |-------------|--------------------------------------------|--------------------------------| +//! | `id` | 1 | Auto-increment id | +//! | `info_hash` | `c1277613db1d28709b034a017ab2cae4be07ae10` | `BitTorrent` infohash V1 | //! -//! # Authentication keys +//! # Authentication Keys //! -//! Field | Sample data | Description -//! ---|---|--- -//! `id` | 1 | Autoincrement id -//! `key` | `IrweYtVuQPGbG9Jzx1DihcPmJGGpVy82` | Token -//! `valid_until` | 1672419840 | Timestamp for the expiring date +//! | Field | Sample data | Description | +//! |---------------|------------------------------------|--------------------------------------| +//! | `id` | 1 | Auto-increment id | +//! | `key` | `IrweYtVuQPGbG9Jzx1DihcPmJGGpVy82` | Authentication token (32 chars) | +//! | `valid_until` | 1672419840 | Timestamp indicating expiration time | //! -//! > **NOTICE**: All keys must have an expiration date. +//! > **NOTICE**: All authentication keys must have an expiration date. pub mod driver; pub mod error; pub mod setup; @@ -54,143 +57,159 @@ use torrust_tracker_primitives::PersistentTorrents; use self::error::Error; use crate::authentication::{self, Key}; -/// The persistence trait. It contains all the methods to interact with the database. +/// The persistence trait. +/// +/// This trait defines all the methods required to interact with the database, +/// including creating and dropping schema tables, and CRUD operations for +/// torrent metrics, whitelists, and authentication keys. Implementations of +/// this trait must ensure that operations are safe, consistent, and report +/// errors using the [`Error`] type. #[automock] pub trait Database: Sync + Send { - /// It generates the database tables. SQL queries are hardcoded in the trait - /// implementation. + /// Creates the necessary database tables. + /// + /// The SQL queries for table creation are hardcoded in the trait implementation. /// /// # Context: Schema /// /// # Errors /// - /// Will return `Error` if unable to create own tables. + /// Returns an [`Error`] if the tables cannot be created. fn create_database_tables(&self) -> Result<(), Error>; - /// It drops the database tables. + /// Drops the database tables. + /// + /// This operation removes the persistent schema. /// /// # Context: Schema /// /// # Errors /// - /// Will return `Err` if unable to drop tables. + /// Returns an [`Error`] if the tables cannot be dropped. fn drop_database_tables(&self) -> Result<(), Error>; // Torrent Metrics - /// It loads the torrent metrics data from the database. + /// Loads torrent metrics data from the database. /// - /// It returns an array of tuples with the torrent - /// [`InfoHash`] and the - /// [`downloaded`](torrust_tracker_torrent_repository::entry::Torrent::downloaded) counter - /// which is the number of times the torrent has been downloaded. - /// See [`Entry::downloaded`](torrust_tracker_torrent_repository::entry::Torrent::downloaded). + /// This function returns the persistent torrent metrics as a collection of + /// tuples, where each tuple contains an [`InfoHash`] and the `downloaded` + /// counter (i.e. the number of times the torrent has been downloaded). /// /// # Context: Torrent Metrics /// /// # Errors /// - /// Will return `Err` if unable to load. + /// Returns an [`Error`] if the metrics cannot be loaded. fn load_persistent_torrents(&self) -> Result; - /// It saves the torrent metrics data into the database. + /// Saves torrent metrics data into the database. + /// + /// # Arguments + /// + /// * `info_hash` - A reference to the torrent's info hash. + /// * `downloaded` - The number of times the torrent has been downloaded. /// /// # Context: Torrent Metrics /// /// # Errors /// - /// Will return `Err` if unable to save. + /// Returns an [`Error`] if the metrics cannot be saved. fn save_persistent_torrent(&self, info_hash: &InfoHash, downloaded: u32) -> Result<(), Error>; // Whitelist - /// It loads the whitelisted torrents from the database. + /// Loads the whitelisted torrents from the database. /// /// # Context: Whitelist /// /// # Errors /// - /// Will return `Err` if unable to load. + /// Returns an [`Error`] if the whitelist cannot be loaded. fn load_whitelist(&self) -> Result, Error>; - /// It checks if the torrent is whitelisted. + /// Retrieves a whitelisted torrent from the database. /// - /// It returns `Some(InfoHash)` if the torrent is whitelisted, `None` otherwise. + /// Returns `Some(InfoHash)` if the torrent is in the whitelist, or `None` + /// otherwise. /// /// # Context: Whitelist /// /// # Errors /// - /// Will return `Err` if unable to load. + /// Returns an [`Error`] if the whitelist cannot be queried. fn get_info_hash_from_whitelist(&self, info_hash: InfoHash) -> Result, Error>; - /// It adds the torrent to the whitelist. + /// Adds a torrent to the whitelist. /// /// # Context: Whitelist /// /// # Errors /// - /// Will return `Err` if unable to save. + /// Returns an [`Error`] if the torrent cannot be added to the whitelist. fn add_info_hash_to_whitelist(&self, info_hash: InfoHash) -> Result; - /// It checks if the torrent is whitelisted. + /// Checks whether a torrent is whitelisted. + /// + /// This default implementation returns `true` if the infohash is included + /// in the whitelist, or `false` otherwise. /// /// # Context: Whitelist /// /// # Errors /// - /// Will return `Err` if unable to load. + /// Returns an [`Error`] if the whitelist cannot be queried. fn is_info_hash_whitelisted(&self, info_hash: InfoHash) -> Result { Ok(self.get_info_hash_from_whitelist(info_hash)?.is_some()) } - /// It removes the torrent from the whitelist. + /// Removes a torrent from the whitelist. /// /// # Context: Whitelist /// /// # Errors /// - /// Will return `Err` if unable to save. + /// Returns an [`Error`] if the torrent cannot be removed from the whitelist. fn remove_info_hash_from_whitelist(&self, info_hash: InfoHash) -> Result; // Authentication keys - /// It loads the expiring authentication keys from the database. + /// Loads all authentication keys from the database. /// /// # Context: Authentication Keys /// /// # Errors /// - /// Will return `Err` if unable to load. + /// Returns an [`Error`] if the keys cannot be loaded. fn load_keys(&self) -> Result, Error>; - /// It gets an expiring authentication key from the database. + /// Retrieves a specific authentication key from the database. /// - /// It returns `Some(PeerKey)` if a [`PeerKey`](crate::authentication::PeerKey) - /// with the input [`Key`] exists, `None` otherwise. + /// Returns `Some(PeerKey)` if a key corresponding to the provided [`Key`] + /// exists, or `None` otherwise. /// /// # Context: Authentication Keys /// /// # Errors /// - /// Will return `Err` if unable to load. + /// Returns an [`Error`] if the key cannot be queried. fn get_key_from_keys(&self, key: &Key) -> Result, Error>; - /// It adds an expiring authentication key to the database. + /// Adds an authentication key to the database. /// /// # Context: Authentication Keys /// /// # Errors /// - /// Will return `Err` if unable to save. + /// Returns an [`Error`] if the key cannot be saved. fn add_key_to_keys(&self, auth_key: &authentication::PeerKey) -> Result; - /// It removes an expiring authentication key from the database. + /// Removes an authentication key from the database. /// /// # Context: Authentication Keys /// /// # Errors /// - /// Will return `Err` if unable to load. + /// Returns an [`Error`] if the key cannot be removed. fn remove_key_from_keys(&self, key: &Key) -> Result; } diff --git a/packages/tracker-core/src/databases/setup.rs b/packages/tracker-core/src/databases/setup.rs index 73ff23feb..6ba9f2a64 100644 --- a/packages/tracker-core/src/databases/setup.rs +++ b/packages/tracker-core/src/databases/setup.rs @@ -1,3 +1,4 @@ +//! This module provides functionality for setting up databases. use std::sync::Arc; use torrust_tracker_configuration::Core; @@ -5,9 +6,38 @@ use torrust_tracker_configuration::Core; use super::driver::{self, Driver}; use super::Database; +/// Initializes and returns a database instance based on the provided configuration. +/// +/// This function creates a new database instance according to the settings +/// defined in the [`Core`] configuration. It selects the appropriate driver +/// (either `Sqlite3` or `MySQL`) as specified in `config.database.driver` and +/// attempts to build the database connection using the path defined in +/// `config.database.path`. +/// +/// The resulting database instance is wrapped in a shared pointer (`Arc`) to a +/// boxed trait object, allowing safe sharing of the database connection across +/// multiple threads. +/// /// # Panics /// -/// Will panic if database cannot be initialized. +/// This function will panic if the database cannot be initialized (i.e., if the +/// driver fails to build the connection). This is enforced by the use of +/// [`expect`](std::result::Result::expect) in the implementation. +/// +/// # Example +/// +/// ```rust,no_run +/// use torrust_tracker_configuration::Core; +/// use bittorrent_tracker_core::databases::setup::initialize_database; +/// +/// // Create a default configuration (ensure it is properly set up for your environment) +/// let config = Core::default(); +/// +/// // Initialize the database; this will panic if initialization fails. +/// let database = initialize_database(&config); +/// +/// // The returned database instance can now be used for persistence operations. +/// ``` #[must_use] pub fn initialize_database(config: &Core) -> Arc> { let driver = match config.database.driver { @@ -17,3 +47,15 @@ pub fn initialize_database(config: &Core) -> Arc> { Arc::new(driver::build(&driver, &config.database.path).expect("Database driver build failed.")) } + +#[cfg(test)] +mod tests { + use super::initialize_database; + use crate::test_helpers::tests::ephemeral_configuration; + + #[test] + fn it_should_initialize_the_sqlite_database() { + let config = ephemeral_configuration(); + let _database = initialize_database(&config); + } +} diff --git a/packages/tracker-core/src/error.rs b/packages/tracker-core/src/error.rs index dcdd89668..99ac48ed3 100644 --- a/packages/tracker-core/src/error.rs +++ b/packages/tracker-core/src/error.rs @@ -1,4 +1,12 @@ -//! Errors returned by the core tracker. +//! Core tracker errors. +//! +//! This module defines the error types used internally by the `BitTorrent` +//! tracker core. +//! +//! These errors encapsulate issues such as whitelisting violations, invalid +//! peer key data, and database persistence failures. Each error variant +//! includes contextual information (such as source code location) to facilitate +//! debugging. use std::panic::Location; use bittorrent_primitives::info_hash::InfoHash; @@ -7,9 +15,13 @@ use torrust_tracker_located_error::LocatedError; use super::authentication::key::ParseKeyError; use super::databases; -/// Whitelist errors returned by the core tracker. +/// Errors related to torrent whitelisting. +/// +/// This error is returned when an operation involves a torrent that is not +/// present in the whitelist. #[derive(thiserror::Error, Debug, Clone)] pub enum WhitelistError { + /// Indicates that the torrent identified by `info_hash` is not whitelisted. #[error("The torrent: {info_hash}, is not whitelisted, {location}")] TorrentNotWhitelisted { info_hash: InfoHash, @@ -17,19 +29,27 @@ pub enum WhitelistError { }, } -/// Peers keys errors returned by the core tracker. +/// Errors related to peer key operations. +/// +/// This error type covers issues encountered during the handling of peer keys, +/// including validation of key durations, parsing errors, and database +/// persistence problems. #[allow(clippy::module_name_repetitions)] #[derive(thiserror::Error, Debug, Clone)] pub enum PeerKeyError { + /// Returned when the duration specified for the peer key exceeds the + /// maximum. #[error("Invalid peer key duration: {seconds_valid:?}, is not valid")] DurationOverflow { seconds_valid: u64 }, + /// Returned when the provided peer key is invalid. #[error("Invalid key: {key}")] InvalidKey { key: String, source: LocatedError<'static, ParseKeyError>, }, + /// Returned when persisting the peer key to the database fails. #[error("Can't persist key: {source}")] DatabaseError { source: LocatedError<'static, databases::error::Error>, diff --git a/packages/tracker-core/src/lib.rs b/packages/tracker-core/src/lib.rs index ac6e4edac..843817deb 100644 --- a/packages/tracker-core/src/lib.rs +++ b/packages/tracker-core/src/lib.rs @@ -1,315 +1,57 @@ -//! The core `tracker` module contains the generic `BitTorrent` tracker logic which is independent of the delivery layer. +//! The core `bittorrent-tracker-core` crate contains the generic `BitTorrent` +//! tracker logic which is independent of the delivery layer. //! -//! It contains the tracker services and their dependencies. It's a domain layer which does not -//! specify how the end user should connect to the `Tracker`. +//! It contains the tracker services and their dependencies. It's a domain layer +//! which does not specify how the end user should connect to the `Tracker`. //! -//! Typically this module is intended to be used by higher modules like: +//! Typically this crate is intended to be used by higher components like: //! //! - A UDP tracker //! - A HTTP tracker //! - A tracker REST API //! //! ```text -//! Delivery layer Domain layer -//! -//! HTTP tracker | -//! UDP tracker |> Core tracker -//! Tracker REST API | +//! Delivery layer | Domain layer +//! ----------------------------------- +//! HTTP tracker | +//! UDP tracker |-> Core tracker +//! Tracker REST API | //! ``` //! //! # Table of contents //! -//! - [Tracker](#tracker) -//! - [Announce request](#announce-request) -//! - [Scrape request](#scrape-request) -//! - [Torrents](#torrents) -//! - [Peers](#peers) +//! - [Introduction](#introduction) //! - [Configuration](#configuration) +//! - [Announce handler](#announce-handler) +//! - [Scrape handler](#scrape-handler) //! - [Authentication](#authentication) -//! - [Statistics](#statistics) -//! - [Persistence](#persistence) -//! -//! # Tracker -//! -//! The `Tracker` is the main struct in this module. `The` tracker has some groups of responsibilities: -//! -//! - **Core tracker**: it handles the information about torrents and peers. -//! - **Authentication**: it handles authentication keys which are used by HTTP trackers. -//! - **Authorization**: it handles the permission to perform requests. -//! - **Whitelist**: when the tracker runs in `listed` or `private_listed` mode all operations are restricted to whitelisted torrents. -//! - **Statistics**: it keeps and serves the tracker statistics. -//! -//! Refer to [torrust-tracker-configuration](https://docs.rs/torrust-tracker-configuration) crate docs to get more information about the tracker settings. -//! -//! ## Announce request -//! -//! Handling `announce` requests is the most important task for a `BitTorrent` tracker. -//! -//! A `BitTorrent` swarm is a network of peers that are all trying to download the same torrent. -//! When a peer wants to find other peers it announces itself to the swarm via the tracker. -//! The peer sends its data to the tracker so that the tracker can add it to the swarm. -//! The tracker responds to the peer with the list of other peers in the swarm so that -//! the peer can contact them to start downloading pieces of the file from them. +//! - [Databases](#databases) +//! - [Torrent](#torrent) +//! - [Whitelist](#whitelist) //! -//! Once you have instantiated the `AnnounceHandler` you can `announce` a new [`peer::Peer`](torrust_tracker_primitives::peer::Peer) with: +//! # Introduction //! -//! ```rust,no_run -//! use std::net::SocketAddr; -//! use std::net::IpAddr; -//! use std::net::Ipv4Addr; -//! use std::str::FromStr; +//! The main purpose of this crate is to provide a generic `BitTorrent` tracker. //! -//! use aquatic_udp_protocol::{AnnounceEvent, NumberOfBytes, PeerId}; -//! use torrust_tracker_primitives::DurationSinceUnixEpoch; -//! use torrust_tracker_primitives::peer; -//! use bittorrent_primitives::info_hash::InfoHash; +//! It has two main responsibilities: //! -//! let info_hash = InfoHash::from_str("3b245504cf5f11bbdbe1201cea6a6bf45aee1bc0").unwrap(); +//! - To handle **announce** requests. +//! - To handle **scrape** requests. //! -//! let peer = peer::Peer { -//! peer_id: PeerId(*b"-qB00000000000000001"), -//! peer_addr: SocketAddr::new(IpAddr::V4(Ipv4Addr::new(126, 0, 0, 1)), 8081), -//! updated: DurationSinceUnixEpoch::new(1_669_397_478_934, 0), -//! uploaded: NumberOfBytes::new(0), -//! downloaded: NumberOfBytes::new(0), -//! left: NumberOfBytes::new(0), -//! event: AnnounceEvent::Completed, -//! }; -//! -//! let peer_ip = IpAddr::V4(Ipv4Addr::from_str("126.0.0.1").unwrap()); -//! ``` -//! -//! ```text -//! let announce_data = announce_handler.announce(&info_hash, &mut peer, &peer_ip).await; -//! ``` -//! -//! The `Tracker` returns the list of peers for the torrent with the infohash `3b245504cf5f11bbdbe1201cea6a6bf45aee1bc0`, -//! filtering out the peer that is making the `announce` request. -//! -//! > **NOTICE**: that the peer argument is mutable because the `Tracker` can change the peer IP if the peer is using a loopback IP. -//! -//! The `peer_ip` argument is the resolved peer ip. It's a common practice that trackers ignore the peer ip in the `announce` request params, -//! and resolve the peer ip using the IP of the client making the request. As the tracker is a domain service, the peer IP must be provided -//! for the `Tracker` user, which is usually a higher component with access the the request metadata, for example, connection data, proxy headers, -//! etcetera. -//! -//! The returned struct is: -//! -//! ```rust,no_run -//! use torrust_tracker_primitives::peer; -//! use torrust_tracker_configuration::AnnouncePolicy; -//! -//! pub struct AnnounceData { -//! pub peers: Vec, -//! pub swarm_stats: SwarmMetadata, -//! pub policy: AnnouncePolicy, // the tracker announce policy. -//! } -//! -//! pub struct SwarmMetadata { -//! pub completed: u32, // The number of peers that have ever completed downloading -//! pub seeders: u32, // The number of active peers that have completed downloading (seeders) -//! pub leechers: u32, // The number of active peers that have not completed downloading (leechers) -//! } -//! -//! // Core tracker configuration -//! pub struct AnnounceInterval { -//! // ... -//! pub interval: u32, // Interval in seconds that the client should wait between sending regular announce requests to the tracker -//! pub interval_min: u32, // Minimum announce interval. Clients must not reannounce more frequently than this -//! // ... -//! } -//! ``` +//! The crate has also other features: //! -//! Refer to `BitTorrent` BEPs and other sites for more information about the `announce` request: +//! - **Authentication**: It handles authentication keys which are used by HTTP trackers. +//! - **Persistence**: It handles persistence of data into a database. +//! - **Torrent**: It handles the torrent data. +//! - **Whitelist**: When the tracker runs in [`listed`](https://docs.rs/torrust-tracker-configuration/latest/torrust_tracker_configuration/type.Core.html) mode +//! all operations are restricted to whitelisted torrents. //! -//! - [BEP 3. The `BitTorrent` Protocol Specification](https://www.bittorrent.org/beps/bep_0003.html) -//! - [BEP 23. Tracker Returns Compact Peer Lists](https://www.bittorrent.org/beps/bep_0023.html) -//! - [Vuze docs](https://wiki.vuze.com/w/Announce) -//! -//! ## Scrape request -//! -//! The `scrape` request allows clients to query metadata about the swarm in bulk. -//! -//! An `scrape` request includes a list of infohashes whose swarm metadata you want to collect. -//! -//! The returned struct is: -//! -//! ```rust,no_run -//! use bittorrent_primitives::info_hash::InfoHash; -//! use std::collections::HashMap; -//! -//! pub struct ScrapeData { -//! pub files: HashMap, -//! } -//! -//! pub struct SwarmMetadata { -//! pub complete: u32, // The number of active peers that have completed downloading (seeders) -//! pub downloaded: u32, // The number of peers that have ever completed downloading -//! pub incomplete: u32, // The number of active peers that have not completed downloading (leechers) -//! } -//! ``` -//! -//! The JSON representation of a sample `scrape` response would be like the following: -//! -//! ```json -//! { -//! 'files': { -//! 'xxxxxxxxxxxxxxxxxxxx': {'complete': 11, 'downloaded': 13772, 'incomplete': 19}, -//! 'yyyyyyyyyyyyyyyyyyyy': {'complete': 21, 'downloaded': 206, 'incomplete': 20} -//! } -//! } -//! ``` -//! -//! `xxxxxxxxxxxxxxxxxxxx` and `yyyyyyyyyyyyyyyyyyyy` are 20-byte infohash arrays. -//! There are two data structures for infohashes: byte arrays and hex strings: -//! -//! ```rust,no_run -//! use bittorrent_primitives::info_hash::InfoHash; -//! use std::str::FromStr; -//! -//! let info_hash: InfoHash = [255u8; 20].into(); -//! -//! assert_eq!( -//! info_hash, -//! InfoHash::from_str("FFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFF").unwrap() -//! ); -//! ``` -//! Refer to `BitTorrent` BEPs and other sites for more information about the `scrape` request: -//! -//! - [BEP 48. Tracker Protocol Extension: Scrape](https://www.bittorrent.org/beps/bep_0048.html) -//! - [BEP 15. UDP Tracker Protocol for `BitTorrent`. Scrape section](https://www.bittorrent.org/beps/bep_0015.html) -//! - [Vuze docs](https://wiki.vuze.com/w/Scrape) -//! -//! ## Torrents -//! -//! The [`torrent`] module contains all the data structures stored by the `Tracker` except for peers. -//! -//! We can represent the data stored in memory internally by the `Tracker` with this JSON object: -//! -//! ```json -//! { -//! "c1277613db1d28709b034a017ab2cae4be07ae10": { -//! "completed": 0, -//! "peers": { -//! "-qB00000000000000001": { -//! "peer_id": "-qB00000000000000001", -//! "peer_addr": "2.137.87.41:1754", -//! "updated": 1672419840, -//! "uploaded": 120, -//! "downloaded": 60, -//! "left": 60, -//! "event": "started" -//! }, -//! "-qB00000000000000002": { -//! "peer_id": "-qB00000000000000002", -//! "peer_addr": "23.17.287.141:2345", -//! "updated": 1679415984, -//! "uploaded": 80, -//! "downloaded": 20, -//! "left": 40, -//! "event": "started" -//! } -//! } -//! } -//! } -//! ``` -//! -//! The `Tracker` maintains an indexed-by-info-hash list of torrents. For each torrent, it stores a torrent `Entry`. -//! The torrent entry has two attributes: -//! -//! - `completed`: which is hte number of peers that have completed downloading the torrent file/s. As they have completed downloading, -//! they have a full version of the torrent data, and they can provide the full data to other peers. That's why they are also known as "seeders". -//! - `peers`: an indexed and orderer list of peer for the torrent. Each peer contains the data received from the peer in the `announce` request. -//! -//! The [`torrent`] module not only contains the original data obtained from peer via `announce` requests, it also contains -//! aggregate data that can be derived from the original data. For example: -//! -//! ```rust,no_run -//! pub struct SwarmMetadata { -//! pub complete: u32, // The number of active peers that have completed downloading (seeders) -//! pub downloaded: u32, // The number of peers that have ever completed downloading -//! pub incomplete: u32, // The number of active peers that have not completed downloading (leechers) -//! } -//! -//! ``` -//! -//! > **NOTICE**: that `complete` or `completed` peers are the peers that have completed downloading, but only the active ones are considered "seeders". -//! -//! `SwarmMetadata` struct follows name conventions for `scrape` responses. See [BEP 48](https://www.bittorrent.org/beps/bep_0048.html), while `SwarmMetadata` -//! is used for the rest of cases. -//! -//! Refer to [`torrent`] module for more details about these data structures. -//! -//! ## Peers -//! -//! A `Peer` is the struct used by the `Tracker` to keep peers data: -//! -//! ```rust,no_run -//! use std::net::SocketAddr; - -//! use aquatic_udp_protocol::PeerId; -//! use torrust_tracker_primitives::DurationSinceUnixEpoch; -//! use aquatic_udp_protocol::NumberOfBytes; -//! use aquatic_udp_protocol::AnnounceEvent; -//! -//! pub struct Peer { -//! pub peer_id: PeerId, // The peer ID -//! pub peer_addr: SocketAddr, // Peer socket address -//! pub updated: DurationSinceUnixEpoch, // Last time (timestamp) when the peer was updated -//! pub uploaded: NumberOfBytes, // Number of bytes the peer has uploaded so far -//! pub downloaded: NumberOfBytes, // Number of bytes the peer has downloaded so far -//! pub left: NumberOfBytes, // The number of bytes this peer still has to download -//! pub event: AnnounceEvent, // The event the peer has announced: `started`, `completed`, `stopped` -//! } -//! ``` -//! -//! Notice that most of the attributes are obtained from the `announce` request. -//! For example, an HTTP announce request would contain the following `GET` parameters: -//! -//! -//! -//! The `Tracker` keeps an in-memory ordered data structure with all the torrents and a list of peers for each torrent, together with some swarm metrics. -//! -//! We can represent the data stored in memory with this JSON object: -//! -//! ```json -//! { -//! "c1277613db1d28709b034a017ab2cae4be07ae10": { -//! "completed": 0, -//! "peers": { -//! "-qB00000000000000001": { -//! "peer_id": "-qB00000000000000001", -//! "peer_addr": "2.137.87.41:1754", -//! "updated": 1672419840, -//! "uploaded": 120, -//! "downloaded": 60, -//! "left": 60, -//! "event": "started" -//! }, -//! "-qB00000000000000002": { -//! "peer_id": "-qB00000000000000002", -//! "peer_addr": "23.17.287.141:2345", -//! "updated": 1679415984, -//! "uploaded": 80, -//! "downloaded": 20, -//! "left": 40, -//! "event": "started" -//! } -//! } -//! } -//! } -//! ``` -//! -//! That JSON object does not exist, it's only a representation of the `Tracker` torrents data. -//! -//! `c1277613db1d28709b034a017ab2cae4be07ae10` is the torrent infohash and `completed` contains the number of peers -//! that have a full version of the torrent data, also known as seeders. -//! -//! Refer to [`peer`](torrust_tracker_primitives::peer) for more information about peers. +//! Refer to [torrust-tracker-configuration](https://docs.rs/torrust-tracker-configuration) +//! crate docs to get more information about the tracker settings. //! //! # Configuration //! -//! You can control the behavior of this module with the module settings: +//! You can control the behavior of this crate with the `Core` settings: //! //! ```toml //! [logging] @@ -341,35 +83,41 @@ //! //! Refer to the [`configuration` module documentation](https://docs.rs/torrust-tracker-configuration) to get more information about all options. //! -//! Services can include extra features like pagination, for example. +//! # Announce handler +//! +//! The `AnnounceHandler` is responsible for handling announce requests. +//! +//! Please refer to the [`announce_handler`] documentation. +//! +//! # Scrape handler +//! +//! The `ScrapeHandler` is responsible for handling scrape requests. +//! +//! Please refer to the [`scrape_handler`] documentation. //! //! # Authentication //! -//! One of the core `Tracker` responsibilities is to create and keep authentication keys. Auth keys are used by HTTP trackers -//! when the tracker is running in `private` or `private_listed` mode. +//! The `Authentication` module is responsible for handling authentication keys which are used by HTTP trackers. +//! +//! Please refer to the [`authentication`] documentation. //! -//! HTTP tracker's clients need to obtain an auth key before starting requesting the tracker. Once the get one they have to include -//! a `PATH` param with the key in all the HTTP requests. For example, when a peer wants to `announce` itself it has to use the -//! HTTP tracker endpoint `GET /announce/:key`. +//! # Databases //! -//! The common way to obtain the keys is by using the tracker API directly or via other applications like the [Torrust Index](https://github.com/torrust/torrust-index). +//! The `Databases` module is responsible for handling persistence of data into a database. //! -//! To learn more about tracker authentication, refer to the following modules : +//! Please refer to the [`databases`] documentation. //! -//! - [`authentication`] module. +//! # Torrent //! -//! # Persistence +//! The `Torrent` module is responsible for handling the torrent data. //! -//! Right now the `Tracker` is responsible for storing and load data into and -//! from the database, when persistence is enabled. +//! Please refer to the [`torrent`] documentation. //! -//! There are three types of persistent object: +//! # Whitelist //! -//! - Authentication keys (only expiring keys) -//! - Torrent whitelist -//! - Torrent metrics +//! The `Whitelist` module is responsible for handling the whitelist. //! -//! Refer to [`databases`] module for more information about persistence. +//! Please refer to the [`whitelist`] documentation. pub mod announce_handler; pub mod authentication; pub mod databases; diff --git a/packages/tracker-core/src/scrape_handler.rs b/packages/tracker-core/src/scrape_handler.rs index 60d15de71..1e75580ab 100644 --- a/packages/tracker-core/src/scrape_handler.rs +++ b/packages/tracker-core/src/scrape_handler.rs @@ -1,3 +1,64 @@ +//! Scrape handler. +//! +//! The `scrape` request allows clients to query metadata about the swarm in bulk. +//! +//! An `scrape` request includes a list of infohashes whose swarm metadata you +//! want to collect. +//! +//! ## Scrape Response Format +//! +//! The returned struct is: +//! +//! ```rust,no_run +//! use bittorrent_primitives::info_hash::InfoHash; +//! use std::collections::HashMap; +//! +//! pub struct ScrapeData { +//! pub files: HashMap, +//! } +//! +//! pub struct SwarmMetadata { +//! pub complete: u32, // The number of active peers that have completed downloading (seeders) +//! pub downloaded: u32, // The number of peers that have ever completed downloading +//! pub incomplete: u32, // The number of active peers that have not completed downloading (leechers) +//! } +//! ``` +//! +//! ## Example JSON Response +//! +//! The JSON representation of a sample `scrape` response would be like the following: +//! +//! ```json +//! { +//! 'files': { +//! 'xxxxxxxxxxxxxxxxxxxx': {'complete': 11, 'downloaded': 13772, 'incomplete': 19}, +//! 'yyyyyyyyyyyyyyyyyyyy': {'complete': 21, 'downloaded': 206, 'incomplete': 20} +//! } +//! } +//! ``` +//! +//! `xxxxxxxxxxxxxxxxxxxx` and `yyyyyyyyyyyyyyyyyyyy` are 20-byte infohash arrays. +//! There are two data structures for infohashes: byte arrays and hex strings: +//! +//! ```rust,no_run +//! use bittorrent_primitives::info_hash::InfoHash; +//! use std::str::FromStr; +//! +//! let info_hash: InfoHash = [255u8; 20].into(); +//! +//! assert_eq!( +//! info_hash, +//! InfoHash::from_str("FFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFF").unwrap() +//! ); +//! ``` +//! +//! ## References: +//! +//! Refer to `BitTorrent` BEPs and other sites for more information about the `scrape` request: +//! +//! - [BEP 48. Tracker Protocol Extension: Scrape](https://www.bittorrent.org/beps/bep_0048.html) +//! - [BEP 15. UDP Tracker Protocol for `BitTorrent`. Scrape section](https://www.bittorrent.org/beps/bep_0015.html) +//! - [Vuze docs](https://wiki.vuze.com/w/Scrape) use std::sync::Arc; use bittorrent_primitives::info_hash::InfoHash; @@ -7,8 +68,9 @@ use torrust_tracker_primitives::swarm_metadata::SwarmMetadata; use super::torrent::repository::in_memory::InMemoryTorrentRepository; use super::whitelist; +/// Handles scrape requests, providing torrent swarm metadata. pub struct ScrapeHandler { - /// The service to check is a torrent is whitelisted. + /// Service for authorizing access to whitelisted torrents. whitelist_authorization: Arc, /// The in-memory torrents repository. @@ -16,6 +78,7 @@ pub struct ScrapeHandler { } impl ScrapeHandler { + /// Creates a new `ScrapeHandler` instance. #[must_use] pub fn new( whitelist_authorization: &Arc, @@ -27,9 +90,14 @@ impl ScrapeHandler { } } - /// It handles a scrape request. + /// Handles a scrape request for multiple torrents. /// - /// BEP 48: [Tracker Protocol Extension: Scrape](https://www.bittorrent.org/beps/bep_0048.html). + /// - Returns metadata for each requested torrent. + /// - If a torrent isn't whitelisted or doesn't exist, returns zeroed stats. + /// + /// # BEP Reference: + /// + /// [BEP 48: Scrape Protocol](https://www.bittorrent.org/beps/bep_0048.html) pub async fn scrape(&self, info_hashes: &Vec) -> ScrapeData { let mut scrape_data = ScrapeData::empty(); diff --git a/packages/tracker-core/src/torrent/manager.rs b/packages/tracker-core/src/torrent/manager.rs index 9dac35258..51df97fb5 100644 --- a/packages/tracker-core/src/torrent/manager.rs +++ b/packages/tracker-core/src/torrent/manager.rs @@ -1,3 +1,4 @@ +//! Torrents manager. use std::sync::Arc; use std::time::Duration; @@ -8,6 +9,18 @@ use super::repository::in_memory::InMemoryTorrentRepository; use super::repository::persisted::DatabasePersistentTorrentRepository; use crate::{databases, CurrentClock}; +/// The `TorrentsManager` is responsible for managing torrent entries by +/// integrating persistent storage and in-memory state. It provides methods to +/// load torrent data from the database into memory, and to periodically clean +/// up stale torrent entries by removing inactive peers or entire torrent +/// entries that no longer have active peers. +/// +/// This manager relies on two repositories: +/// +/// - An **in-memory repository** to provide fast access to the current torrent +/// state. +/// - A **persistent repository** that stores aggregate torrent metrics (e.g., +/// seeders count) across tracker restarts. pub struct TorrentsManager { /// The tracker configuration. config: Core, @@ -21,6 +34,19 @@ pub struct TorrentsManager { } impl TorrentsManager { + /// Creates a new instance of `TorrentsManager`. + /// + /// # Arguments + /// + /// * `config` - A reference to the tracker configuration. + /// * `in_memory_torrent_repository` - A shared reference to the in-memory + /// repository of torrents. + /// * `db_torrent_repository` - A shared reference to the persistent + /// repository for torrent metrics. + /// + /// # Returns + /// + /// A new `TorrentsManager` instance with cloned references of the provided dependencies. #[must_use] pub fn new( config: &Core, @@ -34,13 +60,16 @@ impl TorrentsManager { } } - /// It loads the torrents from database into memory. It only loads the - /// torrent entry list with the number of seeders for each torrent. Peers - /// data is not persisted. + /// Loads torrents from the persistent database into the in-memory repository. + /// + /// This function retrieves the list of persistent torrent entries (which + /// include only the aggregate metrics, not the detailed peer lists) from + /// the database, and then imports that data into the in-memory repository. /// /// # Errors /// - /// Will return a `database::Error` if unable to load the list of `persistent_torrents` from the database. + /// Returns a `databases::error::Error` if unable to load the persistent + /// torrent data. #[allow(dead_code)] pub(crate) fn load_torrents_from_database(&self) -> Result<(), databases::error::Error> { let persistent_torrents = self.db_torrent_repository.load_all()?; @@ -50,7 +79,18 @@ impl TorrentsManager { Ok(()) } - /// Remove inactive peers and (optionally) peerless torrents. + /// Cleans up torrent entries by removing inactive peers and, optionally, + /// torrents with no active peers. + /// + /// This function performs two cleanup tasks: + /// + /// 1. It removes peers from torrent entries that have not been updated + /// within a cutoff time. The cutoff time is calculated as the current + /// time minus the maximum allowed peer timeout, as specified in the + /// tracker policy. + /// 2. If the tracker is configured to remove peerless torrents + /// (`remove_peerless_torrents` is set), it removes entire torrent + /// entries that have no active peers. pub fn cleanup_torrents(&self) { let current_cutoff = CurrentClock::now_sub(&Duration::from_secs(u64::from(self.config.tracker_policy.max_peer_timeout))) .unwrap_or_default(); diff --git a/packages/tracker-core/src/torrent/mod.rs b/packages/tracker-core/src/torrent/mod.rs index 7ca9000f8..8ee8fa6d3 100644 --- a/packages/tracker-core/src/torrent/mod.rs +++ b/packages/tracker-core/src/torrent/mod.rs @@ -1,30 +1,168 @@ -//! Structs to store the swarm data. +//! Swarm Data Structures. //! -//! There are to main data structures: +//! This module defines the primary data structures used to store and manage +//! swarm data within the tracker. In `BitTorrent` terminology, a "swarm" is +//! the collection of peers that are sharing or downloading a given torrent. //! -//! - A torrent [`Entry`](torrust_tracker_torrent_repository::entry::Entry): it contains all the information stored by the tracker for one torrent. -//! - The [`SwarmMetadata`](torrust_tracker_primitives::swarm_metadata::SwarmMetadata): it contains aggregate information that can me derived from the torrent entries. +//! There are two main types of data stored: //! -//! A "swarm" is a network of peers that are trying to download the same torrent. +//! - **Torrent Entry** (`Entry`): Contains all the information the tracker +//! stores for a single torrent, including the list of peers currently in the +//! swarm. This data is crucial for peers to locate each other and initiate +//! downloads. //! -//! The torrent entry contains the "swarm" data, which is basically the list of peers in the swarm. -//! That's the most valuable information the peer want to get from the tracker, because it allows them to -//! start downloading torrent from those peers. +//! - **Swarm Metadata** (`SwarmMetadata`): Contains aggregate data derived from +//! all torrent entries. This metadata is split into: +//! - **Active Peers Data:** Metrics related to the peers that are currently +//! active in the swarm. +//! - **Historical Data:** Metrics collected since the tracker started, such +//! as the total number of completed downloads. //! -//! The "swarm metadata" contains aggregate data derived from the torrent entries. There two types of data: +//! ## Metrics Collected //! -//! - For **active peers**: metrics related to the current active peers in the swarm. -//! - **Historical data**: since the tracker started running. +//! The tracker collects and aggregates the following metrics: //! -//! The tracker collects metrics for: +//! - The total number of peers that have completed downloading the torrent +//! since the tracker began collecting metrics. +//! - The number of completed downloads from peers that remain active (i.e., seeders). +//! - The number of active peers that have not completed downloading the torrent (i.e., leechers). //! -//! - The number of peers that have completed downloading the torrent since the tracker started collecting metrics. -//! - The number of peers that have completed downloading the torrent and are still active, that means they are actively participating in the network, -//! by announcing themselves periodically to the tracker. Since they have completed downloading they have a full copy of the torrent data. Peers with a -//! full copy of the data are called "seeders". -//! - The number of peers that have NOT completed downloading the torrent and are still active, that means they are actively participating in the network. -//! Peer that don not have a full copy of the torrent data are called "leechers". +//! This information is used both to inform peers about available connections +//! and to provide overall swarm statistics. //! +//! This module re-exports core types from the torrent repository crate to +//! simplify integration. +//! +//! ## Internal Data Structures +//! +//! The [`torrent`](crate::torrent) module contains all the data structures +//! stored by the tracker except for peers. +//! +//! We can represent the data stored in memory internally by the tracker with +//! this JSON object: +//! +//! ```json +//! { +//! "c1277613db1d28709b034a017ab2cae4be07ae10": { +//! "completed": 0, +//! "peers": { +//! "-qB00000000000000001": { +//! "peer_id": "-qB00000000000000001", +//! "peer_addr": "2.137.87.41:1754", +//! "updated": 1672419840, +//! "uploaded": 120, +//! "downloaded": 60, +//! "left": 60, +//! "event": "started" +//! }, +//! "-qB00000000000000002": { +//! "peer_id": "-qB00000000000000002", +//! "peer_addr": "23.17.287.141:2345", +//! "updated": 1679415984, +//! "uploaded": 80, +//! "downloaded": 20, +//! "left": 40, +//! "event": "started" +//! } +//! } +//! } +//! } +//! ``` +//! +//! The tracker maintains an indexed-by-info-hash list of torrents. For each +//! torrent, it stores a torrent `Entry`. The torrent entry has two attributes: +//! +//! - `completed`: which is hte number of peers that have completed downloading +//! the torrent file/s. As they have completed downloading, they have a full +//! version of the torrent data, and they can provide the full data to other +//! peers. That's why they are also known as "seeders". +//! - `peers`: an indexed and orderer list of peer for the torrent. Each peer +//! contains the data received from the peer in the `announce` request. +//! +//! The [`crate::torrent`] module not only contains the original data obtained +//! from peer via `announce` requests, it also contains aggregate data that can +//! be derived from the original data. For example: +//! +//! ```rust,no_run +//! pub struct SwarmMetadata { +//! pub complete: u32, // The number of active peers that have completed downloading (seeders) +//! pub downloaded: u32, // The number of peers that have ever completed downloading +//! pub incomplete: u32, // The number of active peers that have not completed downloading (leechers) +//! } +//! ``` +//! +//! > **NOTICE**: that `complete` or `completed` peers are the peers that have +//! > completed downloading, but only the active ones are considered "seeders". +//! +//! `SwarmMetadata` struct follows name conventions for `scrape` responses. See +//! [BEP 48](https://www.bittorrent.org/beps/bep_0048.html), while `SwarmMetadata` +//! is used for the rest of cases. +//! +//! ## Peers +//! +//! A `Peer` is the struct used by the tracker to keep peers data: +//! +//! ```rust,no_run +//! use std::net::SocketAddr; +//! use aquatic_udp_protocol::PeerId; +//! use torrust_tracker_primitives::DurationSinceUnixEpoch; +//! use aquatic_udp_protocol::NumberOfBytes; +//! use aquatic_udp_protocol::AnnounceEvent; +//! +//! pub struct Peer { +//! pub peer_id: PeerId, // The peer ID +//! pub peer_addr: SocketAddr, // Peer socket address +//! pub updated: DurationSinceUnixEpoch, // Last time (timestamp) when the peer was updated +//! pub uploaded: NumberOfBytes, // Number of bytes the peer has uploaded so far +//! pub downloaded: NumberOfBytes, // Number of bytes the peer has downloaded so far +//! pub left: NumberOfBytes, // The number of bytes this peer still has to download +//! pub event: AnnounceEvent, // The event the peer has announced: `started`, `completed`, `stopped` +//! } +//! ``` +//! +//! Notice that most of the attributes are obtained from the `announce` request. +//! For example, an HTTP announce request would contain the following `GET` parameters: +//! +//! +//! +//! The `Tracker` keeps an in-memory ordered data structure with all the torrents and a list of peers for each torrent, together with some swarm metrics. +//! +//! We can represent the data stored in memory with this JSON object: +//! +//! ```json +//! { +//! "c1277613db1d28709b034a017ab2cae4be07ae10": { +//! "completed": 0, +//! "peers": { +//! "-qB00000000000000001": { +//! "peer_id": "-qB00000000000000001", +//! "peer_addr": "2.137.87.41:1754", +//! "updated": 1672419840, +//! "uploaded": 120, +//! "downloaded": 60, +//! "left": 60, +//! "event": "started" +//! }, +//! "-qB00000000000000002": { +//! "peer_id": "-qB00000000000000002", +//! "peer_addr": "23.17.287.141:2345", +//! "updated": 1679415984, +//! "uploaded": 80, +//! "downloaded": 20, +//! "left": 40, +//! "event": "started" +//! } +//! } +//! } +//! } +//! ``` +//! +//! That JSON object does not exist, it's only a representation of the `Tracker` torrents data. +//! +//! `c1277613db1d28709b034a017ab2cae4be07ae10` is the torrent infohash and `completed` contains the number of peers +//! that have a full version of the torrent data, also known as seeders. +//! +//! Refer to [`peer`](torrust_tracker_primitives::peer) for more information about peers. pub mod manager; pub mod repository; pub mod services; @@ -33,7 +171,11 @@ pub mod services; use torrust_tracker_torrent_repository::EntryMutexStd; use torrust_tracker_torrent_repository::TorrentsSkipMapMutexStd; -// Currently used types from the torrent repository crate. +/// Alias for the primary torrent collection type, implemented as a skip map +/// wrapped in a mutex. This type is used internally by the tracker to manage +/// and access torrent entries. pub(crate) type Torrents = TorrentsSkipMapMutexStd; + +/// Alias for a single torrent entry. #[cfg(test)] pub(crate) type TorrentEntry = EntryMutexStd; diff --git a/packages/tracker-core/src/torrent/repository/in_memory.rs b/packages/tracker-core/src/torrent/repository/in_memory.rs index 26302260b..584feabc9 100644 --- a/packages/tracker-core/src/torrent/repository/in_memory.rs +++ b/packages/tracker-core/src/torrent/repository/in_memory.rs @@ -1,3 +1,4 @@ +//! In-memory torrents repository. use std::cmp::max; use std::sync::Arc; @@ -13,51 +14,126 @@ use torrust_tracker_torrent_repository::EntryMutexStd; use crate::torrent::Torrents; -/// The in-memory torrents repository. +/// In-memory repository for torrent entries. /// -/// There are many implementations of the repository trait. We tried with -/// different types of data structures, but the best performance was with -/// the one we use for production. We kept the other implementations for -/// reference. +/// This repository manages the torrent entries and their associated peer lists +/// in memory. It is built on top of a high-performance data structure (the +/// production implementation) and provides methods to update, query, and remove +/// torrent entries as well as to import persisted data. +/// +/// Multiple implementations were considered, and the chosen implementation is +/// used in production. Other implementations are kept for reference. #[derive(Debug, Default)] pub struct InMemoryTorrentRepository { - /// The in-memory torrents repository implementation. + /// The underlying in-memory data structure that stores torrent entries. torrents: Arc, } impl InMemoryTorrentRepository { - /// It inserts (or updates if it's already in the list) the peer in the - /// torrent entry. + /// Inserts or updates a peer in the torrent entry corresponding to the + /// given infohash. + /// + /// If the torrent entry already exists, the peer is added to its peer list; + /// otherwise, a new torrent entry is created. + /// + /// # Arguments + /// + /// * `info_hash` - The unique identifier of the torrent. + /// * `peer` - The peer to insert or update in the torrent entry. pub fn upsert_peer(&self, info_hash: &InfoHash, peer: &peer::Peer) { self.torrents.upsert_peer(info_hash, peer); } + /// Removes a torrent entry from the repository. + /// + /// This method is only available in tests. It removes the torrent entry + /// associated with the given info hash and returns the removed entry if it + /// existed. + /// + /// # Arguments + /// + /// * `key` - The info hash of the torrent to remove. + /// + /// # Returns + /// + /// An `Option` containing the removed torrent entry if it existed. #[cfg(test)] #[must_use] pub(crate) fn remove(&self, key: &InfoHash) -> Option { self.torrents.remove(key) } + /// Removes inactive peers from all torrent entries. + /// + /// A peer is considered inactive if its last update timestamp is older than + /// the provided cutoff time. + /// + /// # Arguments + /// + /// * `current_cutoff` - The cutoff timestamp; peers not updated since this + /// time will be removed. pub(crate) fn remove_inactive_peers(&self, current_cutoff: DurationSinceUnixEpoch) { self.torrents.remove_inactive_peers(current_cutoff); } + /// Removes torrent entries that have no active peers. + /// + /// Depending on the tracker policy, torrents without any peers may be + /// removed to conserve memory. + /// + /// # Arguments + /// + /// * `policy` - The tracker policy containing the configuration for + /// removing peerless torrents. pub(crate) fn remove_peerless_torrents(&self, policy: &TrackerPolicy) { self.torrents.remove_peerless_torrents(policy); } + /// Retrieves a torrent entry by its infohash. + /// + /// # Arguments + /// + /// * `key` - The info hash of the torrent. + /// + /// # Returns + /// + /// An `Option` containing the torrent entry if found. #[must_use] pub(crate) fn get(&self, key: &InfoHash) -> Option { self.torrents.get(key) } + /// Retrieves a paginated list of torrent entries. + /// + /// This method returns a vector of tuples, each containing an infohash and + /// its associated torrent entry. The pagination parameters (offset and limit) + /// can be used to control the size of the result set. + /// + /// # Arguments + /// + /// * `pagination` - An optional reference to a `Pagination` object. + /// + /// # Returns + /// + /// A vector of `(InfoHash, EntryMutexStd)` tuples. #[must_use] pub(crate) fn get_paginated(&self, pagination: Option<&Pagination>) -> Vec<(InfoHash, EntryMutexStd)> { self.torrents.get_paginated(pagination) } - /// It returns the data for a `scrape` response or empty if the torrent is - /// not found. + /// Retrieves swarm metadata for a given torrent. + /// + /// This method returns the swarm metadata (aggregate information such as + /// peer counts) for the torrent specified by the infohash. If the torrent + /// entry is not found, a zeroed metadata struct is returned. + /// + /// # Arguments + /// + /// * `info_hash` - The info hash of the torrent. + /// + /// # Returns + /// + /// A `SwarmMetadata` struct containing the aggregated torrent data. #[must_use] pub(crate) fn get_swarm_metadata(&self, info_hash: &InfoHash) -> SwarmMetadata { match self.torrents.get(info_hash) { @@ -66,9 +142,23 @@ impl InMemoryTorrentRepository { } } - /// Get torrent peers for a given torrent and client. + /// Retrieves torrent peers for a given torrent and client, excluding the + /// requesting client. + /// + /// This method filters out the client making the request (based on its + /// network address) and returns up to a maximum number of peers, defined by + /// the greater of the provided limit or the global `TORRENT_PEERS_LIMIT`. + /// + /// # Arguments + /// + /// * `info_hash` - The info hash of the torrent. + /// * `peer` - The client peer that should be excluded from the returned list. + /// * `limit` - The maximum number of peers to return. + /// + /// # Returns /// - /// It filters out the client making the request. + /// A vector of peers (wrapped in `Arc`) representing the active peers for + /// the torrent, excluding the requesting client. #[must_use] pub(crate) fn get_peers_for(&self, info_hash: &InfoHash, peer: &peer::Peer, limit: usize) -> Vec> { match self.torrents.get(info_hash) { @@ -77,7 +167,19 @@ impl InMemoryTorrentRepository { } } - /// Get torrent peers for a given torrent. + /// Retrieves the list of peers for a given torrent. + /// + /// This method returns up to `TORRENT_PEERS_LIMIT` peers for the torrent + /// specified by the info-hash. + /// + /// # Arguments + /// + /// * `info_hash` - The info hash of the torrent. + /// + /// # Returns + /// + /// A vector of peers (wrapped in `Arc`) representing the active peers for + /// the torrent. #[must_use] pub fn get_torrent_peers(&self, info_hash: &InfoHash) -> Vec> { match self.torrents.get(info_hash) { @@ -86,12 +188,28 @@ impl InMemoryTorrentRepository { } } - /// It calculates and returns the general [`TorrentsMetrics`]. + /// Calculates and returns overall torrent metrics. + /// + /// The returned [`TorrentsMetrics`] contains aggregate data such as the + /// total number of torrents, total complete (seeders), incomplete (leechers), + /// and downloaded counts. + /// + /// # Returns + /// + /// A [`TorrentsMetrics`] struct with the aggregated metrics. #[must_use] pub fn get_torrents_metrics(&self) -> TorrentsMetrics { self.torrents.get_metrics() } + /// Imports persistent torrent data into the in-memory repository. + /// + /// This method takes a set of persisted torrent entries (e.g., from a database) + /// and imports them into the in-memory repository for immediate access. + /// + /// # Arguments + /// + /// * `persistent_torrents` - A reference to the persisted torrent data. pub fn import_persistent(&self, persistent_torrents: &PersistentTorrents) { self.torrents.import_persistent(persistent_torrents); } diff --git a/packages/tracker-core/src/torrent/repository/mod.rs b/packages/tracker-core/src/torrent/repository/mod.rs index 51723b68d..ae789e5e9 100644 --- a/packages/tracker-core/src/torrent/repository/mod.rs +++ b/packages/tracker-core/src/torrent/repository/mod.rs @@ -1,2 +1,3 @@ +//! Torrent repository implementations. pub mod in_memory; pub mod persisted; diff --git a/packages/tracker-core/src/torrent/repository/persisted.rs b/packages/tracker-core/src/torrent/repository/persisted.rs index 0430f03bb..694a2fe7c 100644 --- a/packages/tracker-core/src/torrent/repository/persisted.rs +++ b/packages/tracker-core/src/torrent/repository/persisted.rs @@ -1,3 +1,4 @@ +//! The repository that stored persistent torrents' data into the database. use std::sync::Arc; use bittorrent_primitives::info_hash::InfoHash; @@ -6,17 +7,39 @@ use torrust_tracker_primitives::PersistentTorrents; use crate::databases::error::Error; use crate::databases::Database; -/// Torrent repository implementation that persists the torrents in a database. +/// Torrent repository implementation that persists torrent metrics in a database. /// -/// Not all the torrent in-memory data is persisted. For now only some of the -/// torrent metrics are persisted. +/// This repository persists only a subset of the torrent data: the torrent +/// metrics, specifically the number of downloads (or completed counts) for each +/// torrent. It relies on a database driver (either `SQLite3` or `MySQL`) that +/// implements the [`Database`] trait to perform the actual persistence +/// operations. +/// +/// # Note +/// +/// Not all in-memory torrent data is persisted; only the aggregate metrics are +/// stored. pub struct DatabasePersistentTorrentRepository { - /// A database driver implementation: [`Sqlite3`](crate::core::databases::sqlite) - /// or [`MySQL`](crate::core::databases::mysql) + /// A shared reference to the database driver implementation. + /// + /// The driver must implement the [`Database`] trait. This allows for + /// different underlying implementations (e.g., `SQLite3` or `MySQL`) to be + /// used interchangeably. database: Arc>, } impl DatabasePersistentTorrentRepository { + /// Creates a new instance of `DatabasePersistentTorrentRepository`. + /// + /// # Arguments + /// + /// * `database` - A shared reference to a boxed database driver + /// implementing the [`Database`] trait. + /// + /// # Returns + /// + /// A new `DatabasePersistentTorrentRepository` instance with a cloned + /// reference to the provided database. #[must_use] pub fn new(database: &Arc>) -> DatabasePersistentTorrentRepository { Self { @@ -24,20 +47,31 @@ impl DatabasePersistentTorrentRepository { } } - /// It loads the persistent torrents from the database. + /// Loads all persistent torrent metrics from the database. + /// + /// This function retrieves the torrent metrics (e.g., download counts) from the persistent store + /// and returns them as a [`PersistentTorrents`] map. /// /// # Errors /// - /// Will return a database `Err` if unable to load. + /// Returns an [`Error`] if the underlying database query fails. pub(crate) fn load_all(&self) -> Result { self.database.load_persistent_torrents() } - /// It saves the persistent torrent into the database. + /// Saves the persistent torrent metric into the database. + /// + /// This function stores or updates the download count for the torrent + /// identified by the provided infohash. + /// + /// # Arguments + /// + /// * `info_hash` - The info hash of the torrent. + /// * `downloaded` - The number of times the torrent has been downloaded. /// /// # Errors /// - /// Will return a database `Err` if unable to save. + /// Returns an [`Error`] if the database operation fails. pub(crate) fn save(&self, info_hash: &InfoHash, downloaded: u32) -> Result<(), Error> { self.database.save_persistent_torrent(info_hash, downloaded) } diff --git a/packages/tracker-core/src/torrent/services.rs b/packages/tracker-core/src/torrent/services.rs index 4c470bb74..98d25ba47 100644 --- a/packages/tracker-core/src/torrent/services.rs +++ b/packages/tracker-core/src/torrent/services.rs @@ -1,9 +1,17 @@ //! Core tracker domain services. //! -//! There are two services: +//! This module defines the primary services for retrieving torrent-related data +//! from the tracker. There are two main services: //! -//! - [`get_torrent_info`]: it returns all the data about one torrent. -//! - [`get_torrents`]: it returns data about some torrent in bulk excluding the peer list. +//! - [`get_torrent_info`]: Returns all available data (including the list of +//! peers) about a single torrent. +//! - [`get_torrents_page`] and [`get_torrents`]: Return summarized data about +//! multiple torrents, excluding the peer list. +//! +//! The full torrent info is represented by the [`Info`] struct, which includes +//! swarm data (peer list) and aggregate metrics. The [`BasicInfo`] struct +//! provides similar data but without the list of peers, making it suitable for +//! bulk queries. use std::sync::Arc; use bittorrent_primitives::info_hash::InfoHash; @@ -13,37 +21,74 @@ use torrust_tracker_torrent_repository::entry::EntrySync; use crate::torrent::repository::in_memory::InMemoryTorrentRepository; -/// It contains all the information the tracker has about a torrent +/// Full torrent information, including swarm (peer) details. +/// +/// This struct contains all the information that the tracker holds about a +/// torrent, including the infohash, aggregate swarm metrics (seeders, leechers, +/// completed downloads), and the complete list of peers in the swarm. #[derive(Debug, PartialEq)] pub struct Info { /// The infohash of the torrent this data is related to pub info_hash: InfoHash, - /// The total number of seeders for this torrent. Peer that actively serving a full copy of the torrent data + + /// The total number of seeders for this torrent. Peer that actively serving + /// a full copy of the torrent data pub seeders: u64, - /// The total number of peers that have ever complete downloading this torrent + + /// The total number of peers that have ever complete downloading this + /// torrent pub completed: u64, - /// The total number of leechers for this torrent. Peers that actively downloading this torrent + + /// The total number of leechers for this torrent. Peers that actively + /// downloading this torrent pub leechers: u64, - /// The swarm: the list of peers that are actively trying to download or serving this torrent + + /// The swarm: the list of peers that are actively trying to download or + /// serving this torrent pub peers: Option>, } -/// It contains only part of the information the tracker has about a torrent +/// Basic torrent information, excluding the list of peers. /// -/// It contains the same data as [Info] but without the list of peers in the swarm. +/// This struct contains the same aggregate metrics as [`Info`] (infohash, +/// seeders, completed, leechers) but omits the peer list. It is used when only +/// summary information is needed. #[derive(Debug, PartialEq, Clone)] pub struct BasicInfo { /// The infohash of the torrent this data is related to pub info_hash: InfoHash, - /// The total number of seeders for this torrent. Peer that actively serving a full copy of the torrent data + + /// The total number of seeders for this torrent. Peer that actively serving + /// a full copy of the torrent data pub seeders: u64, - /// The total number of peers that have ever complete downloading this torrent + + /// The total number of peers that have ever complete downloading this + /// torrent pub completed: u64, - /// The total number of leechers for this torrent. Peers that actively downloading this torrent + + /// The total number of leechers for this torrent. Peers that actively + /// downloading this torrent pub leechers: u64, } -/// It returns all the information the tracker has about one torrent in a [Info] struct. +/// Retrieves complete torrent information for a given torrent. +/// +/// This function queries the in-memory torrent repository for a torrent entry +/// matching the provided infohash. If found, it extracts the swarm metadata +/// (aggregate metrics) and the current list of peers, and returns an [`Info`] +/// struct. +/// +/// # Arguments +/// +/// * `in_memory_torrent_repository` - A shared reference to the in-memory +/// torrent repository. +/// * `info_hash` - A reference to the torrent's infohash. +/// +/// # Returns +/// +/// An [`Option`] which is: +/// - `Some(Info)` if the torrent exists in the repository. +/// - `None` if the torrent is not found. #[must_use] pub fn get_torrent_info(in_memory_torrent_repository: &Arc, info_hash: &InfoHash) -> Option { let torrent_entry_option = in_memory_torrent_repository.get(info_hash); @@ -65,7 +110,23 @@ pub fn get_torrent_info(in_memory_torrent_repository: &Arc, @@ -87,7 +148,23 @@ pub fn get_torrents_page( basic_infos } -/// It returns all the information the tracker has about multiple torrents in a [`BasicInfo`] struct, excluding the peer list. +/// Retrieves summarized torrent information for a specified list of torrents. +/// +/// This function iterates over a slice of infohashes, fetches the corresponding +/// swarm metadata from the in-memory repository (if available), and returns a +/// vector of [`BasicInfo`] structs. This function is useful for bulk queries +/// where detailed peer information is not required. +/// +/// # Arguments +/// +/// * `in_memory_torrent_repository` - A shared reference to the in-memory +/// torrent repository. +/// * `info_hashes` - A slice of infohashes for which to retrieve the torrent +/// information. +/// +/// # Returns +/// +/// A vector of [`BasicInfo`] structs for the requested torrents. #[must_use] pub fn get_torrents(in_memory_torrent_repository: &Arc, info_hashes: &[InfoHash]) -> Vec { let mut basic_infos: Vec = vec![]; diff --git a/packages/tracker-core/src/whitelist/authorization.rs b/packages/tracker-core/src/whitelist/authorization.rs index 3b7b8b4fb..a8323457b 100644 --- a/packages/tracker-core/src/whitelist/authorization.rs +++ b/packages/tracker-core/src/whitelist/authorization.rs @@ -1,3 +1,4 @@ +//! Whitelist authorization. use std::panic::Location; use std::sync::Arc; @@ -8,6 +9,10 @@ use tracing::instrument; use super::repository::in_memory::InMemoryWhitelist; use crate::error::WhitelistError; +/// Manages the authorization of torrents based on the whitelist. +/// +/// Used to determine whether a given torrent (`infohash`) is allowed +/// to be announced or scraped from the tracker. pub struct WhitelistAuthorization { /// Core tracker configuration. config: Core, @@ -17,7 +22,14 @@ pub struct WhitelistAuthorization { } impl WhitelistAuthorization { - /// Creates a new authorization instance. + /// Creates a new `WhitelistAuthorization` instance. + /// + /// # Arguments + /// - `config`: Tracker configuration. + /// - `in_memory_whitelist`: The in-memory whitelist instance. + /// + /// # Returns + /// A new `WhitelistAuthorization` instance. pub fn new(config: &Core, in_memory_whitelist: &Arc) -> Self { Self { config: config.clone(), @@ -25,12 +37,15 @@ impl WhitelistAuthorization { } } - /// It returns true if the torrent is authorized. + /// Checks whether a torrent is authorized. /// - /// # Errors + /// - If the tracker is **public**, all torrents are authorized. + /// - If the tracker is **private** (listed mode), only whitelisted torrents + /// are authorized. /// - /// Will return an error if the tracker is running in `listed` mode - /// and the infohash is not whitelisted. + /// # Errors + /// Returns `WhitelistError::TorrentNotWhitelisted` if the tracker is in `listed` mode + /// and the `info_hash` is not in the whitelist. #[instrument(skip(self, info_hash), err)] pub async fn authorize(&self, info_hash: &InfoHash) -> Result<(), WhitelistError> { if !self.is_listed() { @@ -47,12 +62,12 @@ impl WhitelistAuthorization { }) } - /// Returns `true` is the tracker is in listed mode. + /// Checks if the tracker is running in "listed" mode. fn is_listed(&self) -> bool { self.config.listed } - /// It checks if a torrent is whitelisted. + /// Checks if a torrent is present in the whitelist. async fn is_info_hash_whitelisted(&self, info_hash: &InfoHash) -> bool { self.in_memory_whitelist.contains(info_hash).await } diff --git a/packages/tracker-core/src/whitelist/manager.rs b/packages/tracker-core/src/whitelist/manager.rs index 5ebd6db36..452fcb6c5 100644 --- a/packages/tracker-core/src/whitelist/manager.rs +++ b/packages/tracker-core/src/whitelist/manager.rs @@ -1,3 +1,7 @@ +//! Whitelist manager. +//! +//! This module provides the `WhitelistManager` struct, which is responsible for +//! managing the whitelist of torrents. use std::sync::Arc; use bittorrent_primitives::info_hash::InfoHash; @@ -5,8 +9,11 @@ use bittorrent_primitives::info_hash::InfoHash; use super::repository::in_memory::InMemoryWhitelist; use super::repository::persisted::DatabaseWhitelist; use crate::databases; - -/// It handles the list of allowed torrents. Only for listed trackers. +/// Manages the whitelist of allowed torrents. +/// +/// This structure handles both the in-memory and persistent representations of +/// the whitelist. It is primarily relevant for private trackers that restrict +/// access to specific torrents. pub struct WhitelistManager { /// The in-memory list of allowed torrents. in_memory_whitelist: Arc, @@ -16,6 +23,17 @@ pub struct WhitelistManager { } impl WhitelistManager { + /// Creates a new `WhitelistManager` instance. + /// + /// # Arguments + /// + /// - `database_whitelist`: Persistent database-backed whitelist repository. + /// - `in_memory_whitelist`: In-memory whitelist repository for fast runtime + /// access. + /// + /// # Returns + /// + /// A new `WhitelistManager` instance. #[must_use] pub fn new(database_whitelist: Arc, in_memory_whitelist: Arc) -> Self { Self { @@ -24,35 +42,39 @@ impl WhitelistManager { } } - /// It adds a torrent to the whitelist. - /// Adding torrents is not relevant to public trackers. + /// Adds a torrent to the whitelist. /// - /// # Errors + /// This operation is relevant for private trackers to control which + /// torrents are allowed. /// - /// Will return a `database::Error` if unable to add the `info_hash` into the whitelist database. + /// # Errors + /// Returns a `database::Error` if the operation fails in the database. pub async fn add_torrent_to_whitelist(&self, info_hash: &InfoHash) -> Result<(), databases::error::Error> { self.database_whitelist.add(info_hash)?; self.in_memory_whitelist.add(info_hash).await; Ok(()) } - /// It removes a torrent from the whitelist. - /// Removing torrents is not relevant to public trackers. + /// Removes a torrent from the whitelist. /// - /// # Errors + /// This operation is relevant for private trackers to revoke access to + /// specific torrents. /// - /// Will return a `database::Error` if unable to remove the `info_hash` from the whitelist database. + /// # Errors + /// Returns a `database::Error` if the operation fails in the database. pub async fn remove_torrent_from_whitelist(&self, info_hash: &InfoHash) -> Result<(), databases::error::Error> { self.database_whitelist.remove(info_hash)?; self.in_memory_whitelist.remove(info_hash).await; Ok(()) } - /// It loads the whitelist from the database. + /// Loads the whitelist from the database into memory. /// - /// # Errors + /// This is useful when restarting the tracker to ensure the in-memory + /// whitelist is synchronized with the database. /// - /// Will return a `database::Error` if unable to load the list whitelisted `info_hash`s from the database. + /// # Errors + /// Returns a `database::Error` if the operation fails to load from the database. pub async fn load_whitelist_from_database(&self) -> Result<(), databases::error::Error> { let whitelisted_torrents_from_database = self.database_whitelist.load_from_database()?; diff --git a/packages/tracker-core/src/whitelist/mod.rs b/packages/tracker-core/src/whitelist/mod.rs index a39768e93..d9ad18311 100644 --- a/packages/tracker-core/src/whitelist/mod.rs +++ b/packages/tracker-core/src/whitelist/mod.rs @@ -1,3 +1,21 @@ +//! This module contains the logic to manage the torrent whitelist. +//! +//! In tracker configurations where the tracker operates in "listed" mode, only +//! torrents that have been explicitly added to the whitelist are allowed to +//! perform announce and scrape actions. This module provides all the +//! functionality required to manage such a whitelist. +//! +//! The module is organized into the following submodules: +//! +//! - **`authorization`**: Contains the logic to authorize torrents based on their +//! whitelist status. +//! - **`manager`**: Provides high-level management functions for the whitelist, +//! such as adding or removing torrents. +//! - **`repository`**: Implements persistence for whitelist data. +//! - **`setup`**: Provides initialization routines for setting up the whitelist +//! system. +//! - **`test_helpers`**: Contains helper functions and fixtures for testing +//! whitelist functionality. pub mod authorization; pub mod manager; pub mod repository; diff --git a/packages/tracker-core/src/whitelist/repository/in_memory.rs b/packages/tracker-core/src/whitelist/repository/in_memory.rs index 4faeda784..0cee3a94b 100644 --- a/packages/tracker-core/src/whitelist/repository/in_memory.rs +++ b/packages/tracker-core/src/whitelist/repository/in_memory.rs @@ -1,29 +1,42 @@ +//! The in-memory list of allowed torrents. use bittorrent_primitives::info_hash::InfoHash; -/// The in-memory list of allowed torrents. +/// In-memory whitelist to manage allowed torrents. +/// +/// Stores `InfoHash` values for quick lookup and modification. #[derive(Debug, Default)] pub struct InMemoryWhitelist { - /// The list of allowed torrents. + /// A thread-safe set of whitelisted `InfoHash` values. whitelist: tokio::sync::RwLock>, } impl InMemoryWhitelist { - /// It adds a torrent from the whitelist in memory. + /// Adds a torrent to the in-memory whitelist. + /// + /// # Returns + /// + /// - `true` if the torrent was newly added. + /// - `false` if the torrent was already in the whitelist. pub async fn add(&self, info_hash: &InfoHash) -> bool { self.whitelist.write().await.insert(*info_hash) } - /// It removes a torrent from the whitelist in memory. + /// Removes a torrent from the in-memory whitelist. + /// + /// # Returns + /// + /// - `true` if the torrent was present and removed. + /// - `false` if the torrent was not found. pub(crate) async fn remove(&self, info_hash: &InfoHash) -> bool { self.whitelist.write().await.remove(info_hash) } - /// It checks if it contains an info-hash. + /// Checks if a torrent is in the whitelist. pub async fn contains(&self, info_hash: &InfoHash) -> bool { self.whitelist.read().await.contains(info_hash) } - /// It clears the whitelist. + /// Clears all torrents from the whitelist. pub(crate) async fn clear(&self) { let mut whitelist = self.whitelist.write().await; whitelist.clear(); diff --git a/packages/tracker-core/src/whitelist/repository/mod.rs b/packages/tracker-core/src/whitelist/repository/mod.rs index 51723b68d..d900a8c29 100644 --- a/packages/tracker-core/src/whitelist/repository/mod.rs +++ b/packages/tracker-core/src/whitelist/repository/mod.rs @@ -1,2 +1,3 @@ +//! Repository implementations for the whitelist. pub mod in_memory; pub mod persisted; diff --git a/packages/tracker-core/src/whitelist/repository/persisted.rs b/packages/tracker-core/src/whitelist/repository/persisted.rs index 4773cfbe6..eec6704d6 100644 --- a/packages/tracker-core/src/whitelist/repository/persisted.rs +++ b/packages/tracker-core/src/whitelist/repository/persisted.rs @@ -1,3 +1,4 @@ +//! The repository that persists the whitelist. use std::sync::Arc; use bittorrent_primitives::info_hash::InfoHash; @@ -5,6 +6,9 @@ use bittorrent_primitives::info_hash::InfoHash; use crate::databases::{self, Database}; /// The persisted list of allowed torrents. +/// +/// This repository handles adding, removing, and loading torrents +/// from a persistent database like `SQLite` or `MySQL`ç. pub struct DatabaseWhitelist { /// A database driver implementation: [`Sqlite3`](crate::core::databases::sqlite) /// or [`MySQL`](crate::core::databases::mysql) @@ -12,16 +16,17 @@ pub struct DatabaseWhitelist { } impl DatabaseWhitelist { + /// Creates a new `DatabaseWhitelist`. #[must_use] pub fn new(database: Arc>) -> Self { Self { database } } - /// It adds a torrent to the whitelist if it has not been whitelisted previously + /// Adds a torrent to the whitelist if not already present. /// /// # Errors - /// - /// Will return a `database::Error` if unable to add the `info_hash` to the whitelist database. + /// Returns a `database::Error` if unable to add the `info_hash` to the + /// whitelist. pub(crate) fn add(&self, info_hash: &InfoHash) -> Result<(), databases::error::Error> { let is_whitelisted = self.database.is_info_hash_whitelisted(*info_hash)?; @@ -34,11 +39,10 @@ impl DatabaseWhitelist { Ok(()) } - /// It removes a torrent from the whitelist in the database. + /// Removes a torrent from the whitelist if it exists. /// /// # Errors - /// - /// Will return a `database::Error` if unable to remove the `info_hash` from the whitelist database. + /// Returns a `database::Error` if unable to remove the `info_hash`. pub(crate) fn remove(&self, info_hash: &InfoHash) -> Result<(), databases::error::Error> { let is_whitelisted = self.database.is_info_hash_whitelisted(*info_hash)?; @@ -51,11 +55,11 @@ impl DatabaseWhitelist { Ok(()) } - /// It loads the whitelist from the database. + /// Loads the entire whitelist from the database. /// /// # Errors - /// - /// Will return a `database::Error` if unable to load the list whitelisted `info_hash`s from the database. + /// Returns a `database::Error` if unable to load whitelisted `info_hash` + /// values. pub(crate) fn load_from_database(&self) -> Result, databases::error::Error> { self.database.load_whitelist() } diff --git a/packages/tracker-core/src/whitelist/setup.rs b/packages/tracker-core/src/whitelist/setup.rs index 5b2a5de40..cb18c1478 100644 --- a/packages/tracker-core/src/whitelist/setup.rs +++ b/packages/tracker-core/src/whitelist/setup.rs @@ -1,3 +1,7 @@ +//! Initializes the whitelist manager. +//! +//! This module provides functions to set up the `WhitelistManager`, which is responsible +//! for managing whitelisted torrents in both the in-memory and persistent database repositories. use std::sync::Arc; use super::manager::WhitelistManager; @@ -5,6 +9,28 @@ use super::repository::in_memory::InMemoryWhitelist; use super::repository::persisted::DatabaseWhitelist; use crate::databases::Database; +/// Initializes the `WhitelistManager` by combining in-memory and database +/// repositories. +/// +/// The `WhitelistManager` handles the operations related to whitelisted +/// torrents, such as adding, removing, and verifying torrents in the whitelist. +/// It operates with: +/// +/// 1. **In-Memory Whitelist:** Provides fast, runtime-based access to +/// whitelisted torrents. +/// 2. **Database Whitelist:** Ensures persistent storage of the whitelist data. +/// +/// # Arguments +/// +/// * `database` - An `Arc>` representing the database connection, +/// sed for persistent whitelist storage. +/// * `in_memory_whitelist` - An `Arc` representing the in-memory +/// whitelist repository for fast access. +/// +/// # Returns +/// +/// An `Arc` instance that manages both the in-memory and database +/// whitelist repositories. #[must_use] pub fn initialize_whitelist_manager( database: Arc>, diff --git a/packages/tracker-core/src/whitelist/test_helpers.rs b/packages/tracker-core/src/whitelist/test_helpers.rs index cc30c4476..cf1699be4 100644 --- a/packages/tracker-core/src/whitelist/test_helpers.rs +++ b/packages/tracker-core/src/whitelist/test_helpers.rs @@ -1,5 +1,8 @@ -//! Some generic test helpers functions. - +//! Generic test helper functions for the whitelist module. +//! +//! This module provides utility functions to initialize the whitelist services required for testing. +//! In particular, it sets up the `WhitelistAuthorization` and `WhitelistManager` services using a +//! configured database and an in-memory whitelist repository. #[cfg(test)] pub(crate) mod tests { From 35ca4280affaae18aecf84f01f952ee173cd7943 Mon Sep 17 00:00:00 2001 From: Jose Celano Date: Thu, 13 Feb 2025 12:47:19 +0000 Subject: [PATCH 253/802] test: [#1266] add integartion test for bittorrent_tracker_core lib --- packages/tracker-core/tests/integration.rs | 132 +++++++++++++++++++++ 1 file changed, 132 insertions(+) create mode 100644 packages/tracker-core/tests/integration.rs diff --git a/packages/tracker-core/tests/integration.rs b/packages/tracker-core/tests/integration.rs new file mode 100644 index 000000000..4dbd60b9e --- /dev/null +++ b/packages/tracker-core/tests/integration.rs @@ -0,0 +1,132 @@ +use std::net::{IpAddr, Ipv4Addr, SocketAddr}; +use std::str::FromStr; +use std::sync::Arc; + +use aquatic_udp_protocol::{AnnounceEvent, NumberOfBytes, PeerId}; +use bittorrent_primitives::info_hash::InfoHash; +use bittorrent_tracker_core::announce_handler::{AnnounceHandler, PeersWanted}; +use bittorrent_tracker_core::databases::setup::initialize_database; +use bittorrent_tracker_core::scrape_handler::ScrapeHandler; +use bittorrent_tracker_core::torrent::repository::in_memory::InMemoryTorrentRepository; +use bittorrent_tracker_core::torrent::repository::persisted::DatabasePersistentTorrentRepository; +use bittorrent_tracker_core::whitelist; +use bittorrent_tracker_core::whitelist::repository::in_memory::InMemoryWhitelist; +use torrust_tracker_configuration::Core; +use torrust_tracker_primitives::peer::Peer; +use torrust_tracker_primitives::DurationSinceUnixEpoch; +use torrust_tracker_test_helpers::configuration::ephemeral_sqlite_database; + +/// # Panics +/// +/// Will panic if the temporary file path is not a valid UTF-8 string. +#[must_use] +pub fn ephemeral_configuration() -> Core { + let mut config = Core::default(); + + let temp_file = ephemeral_sqlite_database(); + temp_file.to_str().unwrap().clone_into(&mut config.database.path); + + config +} + +/// # Panics +/// +/// Will panic if the string representation of the info hash is not a valid infohash. +#[must_use] +pub fn sample_info_hash() -> InfoHash { + "3b245504cf5f11bbdbe1201cea6a6bf45aee1bc0" // DevSkim: ignore DS173237 + .parse::() + .expect("String should be a valid info hash") +} + +/// Sample peer whose state is not relevant for the tests. +#[must_use] +pub fn sample_peer() -> Peer { + Peer { + peer_id: PeerId(*b"-qB00000000000000000"), + peer_addr: SocketAddr::new(remote_client_ip(), 8080), + updated: DurationSinceUnixEpoch::new(1_669_397_478_934, 0), + uploaded: NumberOfBytes::new(0), + downloaded: NumberOfBytes::new(0), + left: NumberOfBytes::new(0), // No bytes left to download + event: AnnounceEvent::Completed, + } +} + +// The client peer IP. +#[must_use] +fn remote_client_ip() -> IpAddr { + IpAddr::V4(Ipv4Addr::from_str("126.0.0.1").unwrap()) +} + +struct Container { + pub announce_handler: Arc, + pub scrape_handler: Arc, +} + +impl Container { + pub fn initialize(config: &Core) -> Self { + let database = initialize_database(config); + let in_memory_torrent_repository = Arc::new(InMemoryTorrentRepository::default()); + let db_torrent_repository = Arc::new(DatabasePersistentTorrentRepository::new(&database)); + let in_memory_whitelist = Arc::new(InMemoryWhitelist::default()); + let whitelist_authorization = Arc::new(whitelist::authorization::WhitelistAuthorization::new( + config, + &in_memory_whitelist.clone(), + )); + let announce_handler = Arc::new(AnnounceHandler::new( + config, + &in_memory_torrent_repository, + &db_torrent_repository, + )); + let scrape_handler = Arc::new(ScrapeHandler::new(&whitelist_authorization, &in_memory_torrent_repository)); + + Self { + announce_handler, + scrape_handler, + } + } +} + +#[tokio::test] +async fn test_announce_and_scrape_requests() { + let config = ephemeral_configuration(); + + let container = Container::initialize(&config); + + let info_hash = sample_info_hash(); + + let mut peer = sample_peer(); + + // Announce + + // First announce: download started + peer.event = AnnounceEvent::Started; + let announce_data = + container + .announce_handler + .announce(&info_hash, &mut peer, &remote_client_ip(), &PeersWanted::AsManyAsPossible); + + // NOTICE: you don't get back the peer making the request. + assert_eq!(announce_data.peers.len(), 0); + assert_eq!(announce_data.stats.downloaded, 0); + + // Second announce: download completed + peer.event = AnnounceEvent::Completed; + let announce_data = + container + .announce_handler + .announce(&info_hash, &mut peer, &remote_client_ip(), &PeersWanted::AsManyAsPossible); + + assert_eq!(announce_data.peers.len(), 0); + assert_eq!(announce_data.stats.downloaded, 1); + + // Scrape + + let scrape_data = container.scrape_handler.scrape(&vec![info_hash]).await; + + assert!(scrape_data.files.contains_key(&info_hash)); +} + +#[test] +fn test_scrape_request() {} From 81825c9a5b1546bda00f7ddfa70bf176937bf1a6 Mon Sep 17 00:00:00 2001 From: Jose Celano Date: Fri, 14 Feb 2025 12:32:59 +0000 Subject: [PATCH 254/802] refactor: [#1268] separate UDP handlers into diferent modules Following HTTP structure. --- src/servers/udp/handlers.rs | 1877 -------------------------- src/servers/udp/handlers/announce.rs | 875 ++++++++++++ src/servers/udp/handlers/connect.rs | 199 +++ src/servers/udp/handlers/error.rs | 80 ++ src/servers/udp/handlers/mod.rs | 366 +++++ src/servers/udp/handlers/scrape.rs | 429 ++++++ 6 files changed, 1949 insertions(+), 1877 deletions(-) delete mode 100644 src/servers/udp/handlers.rs create mode 100644 src/servers/udp/handlers/announce.rs create mode 100644 src/servers/udp/handlers/connect.rs create mode 100644 src/servers/udp/handlers/error.rs create mode 100644 src/servers/udp/handlers/mod.rs create mode 100644 src/servers/udp/handlers/scrape.rs diff --git a/src/servers/udp/handlers.rs b/src/servers/udp/handlers.rs deleted file mode 100644 index 4f98f52d9..000000000 --- a/src/servers/udp/handlers.rs +++ /dev/null @@ -1,1877 +0,0 @@ -//! Handlers for the UDP server. -use std::hash::{DefaultHasher, Hash, Hasher as _}; -use std::net::{IpAddr, SocketAddr}; -use std::ops::Range; -use std::sync::Arc; -use std::time::Instant; - -use aquatic_udp_protocol::{ - AnnounceInterval, AnnounceRequest, AnnounceResponse, AnnounceResponseFixedData, ConnectRequest, ConnectResponse, - ErrorResponse, Ipv4AddrBytes, Ipv6AddrBytes, NumberOfDownloads, NumberOfPeers, Port, Request, RequestParseError, Response, - ResponsePeer, ScrapeRequest, ScrapeResponse, TorrentScrapeStatistics, TransactionId, -}; -use bittorrent_primitives::info_hash::InfoHash; -use bittorrent_tracker_core::announce_handler::{AnnounceHandler, PeersWanted}; -use bittorrent_tracker_core::scrape_handler::ScrapeHandler; -use bittorrent_tracker_core::whitelist; -use torrust_tracker_clock::clock::Time as _; -use torrust_tracker_configuration::Core; -use tracing::{instrument, Level}; -use uuid::Uuid; -use zerocopy::network_endian::I32; - -use super::connection_cookie::{check, make}; -use super::RawRequest; -use crate::container::UdpTrackerContainer; -use crate::packages::udp_tracker_core; -use crate::servers::udp::error::Error; -use crate::servers::udp::{peer_builder, UDP_TRACKER_LOG_TARGET}; -use crate::shared::bit_torrent::common::MAX_SCRAPE_TORRENTS; -use crate::CurrentClock; - -#[derive(Debug, Clone, PartialEq)] -pub(super) struct CookieTimeValues { - pub(super) issue_time: f64, - pub(super) valid_range: Range, -} - -impl CookieTimeValues { - pub(super) fn new(cookie_lifetime: f64) -> Self { - let issue_time = CurrentClock::now().as_secs_f64(); - let expiry_time = issue_time - cookie_lifetime - 1.0; - let tolerance_max_time = issue_time + 1.0; - - Self { - issue_time, - valid_range: expiry_time..tolerance_max_time, - } - } -} - -/// It handles the incoming UDP packets. -/// -/// It's responsible for: -/// -/// - Parsing the incoming packet. -/// - Delegating the request to the correct handler depending on the request type. -/// -/// It will return an `Error` response if the request is invalid. -#[instrument(fields(request_id), skip(udp_request, udp_tracker_container, cookie_time_values), ret(level = Level::TRACE))] -pub(crate) async fn handle_packet( - udp_request: RawRequest, - udp_tracker_container: Arc, - local_addr: SocketAddr, - cookie_time_values: CookieTimeValues, -) -> Response { - let request_id = Uuid::new_v4(); - - tracing::Span::current().record("request_id", request_id.to_string()); - tracing::debug!("Handling Packets: {udp_request:?}"); - - let start_time = Instant::now(); - - let response = - match Request::parse_bytes(&udp_request.payload[..udp_request.payload.len()], MAX_SCRAPE_TORRENTS).map_err(Error::from) { - Ok(request) => match handle_request( - request, - udp_request.from, - udp_tracker_container.clone(), - cookie_time_values.clone(), - ) - .await - { - Ok(response) => return response, - Err((e, transaction_id)) => { - match &e { - Error::CookieValueNotNormal { .. } - | Error::CookieValueExpired { .. } - | Error::CookieValueFromFuture { .. } => { - // code-review: should we include `RequestParseError` and `BadRequest`? - let mut ban_service = udp_tracker_container.ban_service.write().await; - ban_service.increase_counter(&udp_request.from.ip()); - } - _ => {} - } - - handle_error( - udp_request.from, - local_addr, - request_id, - &udp_tracker_container.udp_stats_event_sender, - cookie_time_values.valid_range.clone(), - &e, - Some(transaction_id), - ) - .await - } - }, - Err(e) => { - handle_error( - udp_request.from, - local_addr, - request_id, - &udp_tracker_container.udp_stats_event_sender, - cookie_time_values.valid_range.clone(), - &e, - None, - ) - .await - } - }; - - let latency = start_time.elapsed(); - tracing::trace!(?latency, "responded"); - - response -} - -/// It dispatches the request to the correct handler. -/// -/// # Errors -/// -/// If a error happens in the `handle_request` function, it will just return the `ServerError`. -#[instrument(skip(request, remote_addr, udp_tracker_container, cookie_time_values))] -pub async fn handle_request( - request: Request, - remote_addr: SocketAddr, - udp_tracker_container: Arc, - cookie_time_values: CookieTimeValues, -) -> Result { - tracing::trace!("handle request"); - - match request { - Request::Connect(connect_request) => Ok(handle_connect( - remote_addr, - &connect_request, - &udp_tracker_container.udp_stats_event_sender, - cookie_time_values.issue_time, - ) - .await), - Request::Announce(announce_request) => { - handle_announce( - remote_addr, - &announce_request, - &udp_tracker_container.core_config, - &udp_tracker_container.announce_handler, - &udp_tracker_container.whitelist_authorization, - &udp_tracker_container.udp_stats_event_sender, - cookie_time_values.valid_range, - ) - .await - } - Request::Scrape(scrape_request) => { - handle_scrape( - remote_addr, - &scrape_request, - &udp_tracker_container.scrape_handler, - &udp_tracker_container.udp_stats_event_sender, - cookie_time_values.valid_range, - ) - .await - } - } -} - -/// It handles the `Connect` request. Refer to [`Connect`](crate::servers::udp#connect) -/// request for more information. -/// -/// # Errors -/// -/// This function does not ever return an error. -#[instrument(fields(transaction_id), skip(opt_udp_stats_event_sender), ret(level = Level::TRACE))] -pub async fn handle_connect( - remote_addr: SocketAddr, - request: &ConnectRequest, - opt_udp_stats_event_sender: &Arc>>, - cookie_issue_time: f64, -) -> Response { - tracing::Span::current().record("transaction_id", request.transaction_id.0.to_string()); - - tracing::trace!("handle connect"); - - let connection_id = make(gen_remote_fingerprint(&remote_addr), cookie_issue_time).expect("it should be a normal value"); - - let response = ConnectResponse { - transaction_id: request.transaction_id, - connection_id, - }; - - if let Some(udp_stats_event_sender) = opt_udp_stats_event_sender.as_deref() { - match remote_addr { - SocketAddr::V4(_) => { - udp_stats_event_sender - .send_event(udp_tracker_core::statistics::event::Event::Udp4Connect) - .await; - } - SocketAddr::V6(_) => { - udp_stats_event_sender - .send_event(udp_tracker_core::statistics::event::Event::Udp6Connect) - .await; - } - } - } - - Response::from(response) -} - -/// It handles the `Announce` request. Refer to [`Announce`](crate::servers::udp#announce) -/// request for more information. -/// -/// # Errors -/// -/// If a error happens in the `handle_announce` function, it will just return the `ServerError`. -#[allow(clippy::too_many_arguments)] -#[instrument(fields(transaction_id, connection_id, info_hash), skip(announce_handler, whitelist_authorization, opt_udp_stats_event_sender), ret(level = Level::TRACE))] -pub async fn handle_announce( - remote_addr: SocketAddr, - request: &AnnounceRequest, - core_config: &Arc, - announce_handler: &Arc, - whitelist_authorization: &Arc, - opt_udp_stats_event_sender: &Arc>>, - cookie_valid_range: Range, -) -> Result { - tracing::Span::current() - .record("transaction_id", request.transaction_id.0.to_string()) - .record("connection_id", request.connection_id.0.to_string()) - .record("info_hash", InfoHash::from_bytes(&request.info_hash.0).to_hex_string()); - - tracing::trace!("handle announce"); - - check( - &request.connection_id, - gen_remote_fingerprint(&remote_addr), - cookie_valid_range, - ) - .map_err(|e| (e, request.transaction_id))?; - - let info_hash = request.info_hash.into(); - let remote_client_ip = remote_addr.ip(); - - // Authorization - whitelist_authorization - .authorize(&info_hash) - .await - .map_err(|e| Error::TrackerError { - source: (Arc::new(e) as Arc).into(), - }) - .map_err(|e| (e, request.transaction_id))?; - - let mut peer = peer_builder::from_request(request, &remote_client_ip); - let peers_wanted: PeersWanted = i32::from(request.peers_wanted.0).into(); - - let response = announce_handler.announce(&info_hash, &mut peer, &remote_client_ip, &peers_wanted); - - if let Some(udp_stats_event_sender) = opt_udp_stats_event_sender.as_deref() { - match remote_client_ip { - IpAddr::V4(_) => { - udp_stats_event_sender - .send_event(udp_tracker_core::statistics::event::Event::Udp4Announce) - .await; - } - IpAddr::V6(_) => { - udp_stats_event_sender - .send_event(udp_tracker_core::statistics::event::Event::Udp6Announce) - .await; - } - } - } - - #[allow(clippy::cast_possible_truncation)] - if remote_addr.is_ipv4() { - let announce_response = AnnounceResponse { - fixed: AnnounceResponseFixedData { - transaction_id: request.transaction_id, - announce_interval: AnnounceInterval(I32::new(i64::from(core_config.announce_policy.interval) as i32)), - leechers: NumberOfPeers(I32::new(i64::from(response.stats.incomplete) as i32)), - seeders: NumberOfPeers(I32::new(i64::from(response.stats.complete) as i32)), - }, - peers: response - .peers - .iter() - .filter_map(|peer| { - if let IpAddr::V4(ip) = peer.peer_addr.ip() { - Some(ResponsePeer:: { - ip_address: ip.into(), - port: Port(peer.peer_addr.port().into()), - }) - } else { - None - } - }) - .collect(), - }; - - Ok(Response::from(announce_response)) - } else { - let announce_response = AnnounceResponse { - fixed: AnnounceResponseFixedData { - transaction_id: request.transaction_id, - announce_interval: AnnounceInterval(I32::new(i64::from(core_config.announce_policy.interval) as i32)), - leechers: NumberOfPeers(I32::new(i64::from(response.stats.incomplete) as i32)), - seeders: NumberOfPeers(I32::new(i64::from(response.stats.complete) as i32)), - }, - peers: response - .peers - .iter() - .filter_map(|peer| { - if let IpAddr::V6(ip) = peer.peer_addr.ip() { - Some(ResponsePeer:: { - ip_address: ip.into(), - port: Port(peer.peer_addr.port().into()), - }) - } else { - None - } - }) - .collect(), - }; - - Ok(Response::from(announce_response)) - } -} - -/// It handles the `Scrape` request. Refer to [`Scrape`](crate::servers::udp#scrape) -/// request for more information. -/// -/// # Errors -/// -/// This function does not ever return an error. -#[instrument(fields(transaction_id, connection_id), skip(scrape_handler, opt_udp_stats_event_sender), ret(level = Level::TRACE))] -pub async fn handle_scrape( - remote_addr: SocketAddr, - request: &ScrapeRequest, - scrape_handler: &Arc, - opt_udp_stats_event_sender: &Arc>>, - cookie_valid_range: Range, -) -> Result { - tracing::Span::current() - .record("transaction_id", request.transaction_id.0.to_string()) - .record("connection_id", request.connection_id.0.to_string()); - - tracing::trace!("handle scrape"); - - check( - &request.connection_id, - gen_remote_fingerprint(&remote_addr), - cookie_valid_range, - ) - .map_err(|e| (e, request.transaction_id))?; - - // Convert from aquatic infohashes - let mut info_hashes: Vec = vec![]; - for info_hash in &request.info_hashes { - info_hashes.push((*info_hash).into()); - } - - let scrape_data = scrape_handler.scrape(&info_hashes).await; - - let mut torrent_stats: Vec = Vec::new(); - - for file in &scrape_data.files { - let swarm_metadata = file.1; - - #[allow(clippy::cast_possible_truncation)] - let scrape_entry = { - TorrentScrapeStatistics { - seeders: NumberOfPeers(I32::new(i64::from(swarm_metadata.complete) as i32)), - completed: NumberOfDownloads(I32::new(i64::from(swarm_metadata.downloaded) as i32)), - leechers: NumberOfPeers(I32::new(i64::from(swarm_metadata.incomplete) as i32)), - } - }; - - torrent_stats.push(scrape_entry); - } - - if let Some(udp_stats_event_sender) = opt_udp_stats_event_sender.as_deref() { - match remote_addr { - SocketAddr::V4(_) => { - udp_stats_event_sender - .send_event(udp_tracker_core::statistics::event::Event::Udp4Scrape) - .await; - } - SocketAddr::V6(_) => { - udp_stats_event_sender - .send_event(udp_tracker_core::statistics::event::Event::Udp6Scrape) - .await; - } - } - } - - let response = ScrapeResponse { - transaction_id: request.transaction_id, - torrent_stats, - }; - - Ok(Response::from(response)) -} - -#[allow(clippy::too_many_arguments)] -#[instrument(fields(transaction_id), skip(opt_udp_stats_event_sender), ret(level = Level::TRACE))] -async fn handle_error( - remote_addr: SocketAddr, - local_addr: SocketAddr, - request_id: Uuid, - opt_udp_stats_event_sender: &Arc>>, - cookie_valid_range: Range, - e: &Error, - transaction_id: Option, -) -> Response { - tracing::trace!("handle error"); - - match transaction_id { - Some(transaction_id) => { - let transaction_id = transaction_id.0.to_string(); - tracing::error!(target: UDP_TRACKER_LOG_TARGET, error = %e, %remote_addr, %local_addr, %request_id, %transaction_id, "response error"); - } - None => { - tracing::error!(target: UDP_TRACKER_LOG_TARGET, error = %e, %remote_addr, %local_addr, %request_id, "response error"); - } - } - - let e = if let Error::RequestParseError { request_parse_error } = e { - match request_parse_error { - RequestParseError::Sendable { - connection_id, - transaction_id, - err, - } => { - if let Err(e) = check(connection_id, gen_remote_fingerprint(&remote_addr), cookie_valid_range) { - (e.to_string(), Some(*transaction_id)) - } else { - ((*err).to_string(), Some(*transaction_id)) - } - } - RequestParseError::Unsendable { err } => (err.to_string(), transaction_id), - } - } else { - (e.to_string(), transaction_id) - }; - - if e.1.is_some() { - if let Some(udp_stats_event_sender) = opt_udp_stats_event_sender.as_deref() { - match remote_addr { - SocketAddr::V4(_) => { - udp_stats_event_sender - .send_event(udp_tracker_core::statistics::event::Event::Udp4Error) - .await; - } - SocketAddr::V6(_) => { - udp_stats_event_sender - .send_event(udp_tracker_core::statistics::event::Event::Udp6Error) - .await; - } - } - } - } - - Response::from(ErrorResponse { - transaction_id: e.1.unwrap_or(TransactionId(I32::new(0))), - message: e.0.into(), - }) -} - -fn gen_remote_fingerprint(remote_addr: &SocketAddr) -> u64 { - let mut state = DefaultHasher::new(); - remote_addr.hash(&mut state); - state.finish() -} - -#[cfg(test)] -mod tests { - - use std::net::{IpAddr, Ipv4Addr, Ipv6Addr, SocketAddr}; - use std::ops::Range; - use std::sync::Arc; - - use aquatic_udp_protocol::{NumberOfBytes, PeerId}; - use bittorrent_tracker_core::announce_handler::AnnounceHandler; - use bittorrent_tracker_core::databases::setup::initialize_database; - use bittorrent_tracker_core::scrape_handler::ScrapeHandler; - use bittorrent_tracker_core::torrent::repository::in_memory::InMemoryTorrentRepository; - use bittorrent_tracker_core::torrent::repository::persisted::DatabasePersistentTorrentRepository; - use bittorrent_tracker_core::whitelist; - use bittorrent_tracker_core::whitelist::authorization::WhitelistAuthorization; - use bittorrent_tracker_core::whitelist::repository::in_memory::InMemoryWhitelist; - use futures::future::BoxFuture; - use mockall::mock; - use tokio::sync::mpsc::error::SendError; - use torrust_tracker_clock::clock::Time; - use torrust_tracker_configuration::{Configuration, Core}; - use torrust_tracker_primitives::peer; - use torrust_tracker_test_helpers::configuration; - - use super::gen_remote_fingerprint; - use crate::packages::udp_tracker_core; - use crate::{packages, CurrentClock}; - - struct CoreTrackerServices { - pub core_config: Arc, - pub announce_handler: Arc, - pub scrape_handler: Arc, - pub in_memory_torrent_repository: Arc, - pub in_memory_whitelist: Arc, - pub whitelist_authorization: Arc, - } - - struct CoreUdpTrackerServices { - pub udp_stats_event_sender: Arc>>, - } - - fn default_testing_tracker_configuration() -> Configuration { - configuration::ephemeral() - } - - fn initialize_core_tracker_services_for_default_tracker_configuration() -> (CoreTrackerServices, CoreUdpTrackerServices) { - initialize_core_tracker_services(&default_testing_tracker_configuration()) - } - - fn initialize_core_tracker_services_for_public_tracker() -> (CoreTrackerServices, CoreUdpTrackerServices) { - initialize_core_tracker_services(&configuration::ephemeral_public()) - } - - fn initialize_core_tracker_services_for_listed_tracker() -> (CoreTrackerServices, CoreUdpTrackerServices) { - initialize_core_tracker_services(&configuration::ephemeral_listed()) - } - - fn initialize_core_tracker_services(config: &Configuration) -> (CoreTrackerServices, CoreUdpTrackerServices) { - let core_config = Arc::new(config.core.clone()); - let database = initialize_database(&config.core); - let in_memory_whitelist = Arc::new(InMemoryWhitelist::default()); - let whitelist_authorization = Arc::new(WhitelistAuthorization::new(&config.core, &in_memory_whitelist.clone())); - let in_memory_torrent_repository = Arc::new(InMemoryTorrentRepository::default()); - let db_torrent_repository = Arc::new(DatabasePersistentTorrentRepository::new(&database)); - let announce_handler = Arc::new(AnnounceHandler::new( - &config.core, - &in_memory_torrent_repository, - &db_torrent_repository, - )); - let scrape_handler = Arc::new(ScrapeHandler::new(&whitelist_authorization, &in_memory_torrent_repository)); - - let (udp_stats_event_sender, _udp_stats_repository) = packages::udp_tracker_core::statistics::setup::factory(false); - let udp_stats_event_sender = Arc::new(udp_stats_event_sender); - - ( - CoreTrackerServices { - core_config, - announce_handler, - scrape_handler, - in_memory_torrent_repository, - in_memory_whitelist, - whitelist_authorization, - }, - CoreUdpTrackerServices { udp_stats_event_sender }, - ) - } - - fn sample_ipv4_remote_addr() -> SocketAddr { - sample_ipv4_socket_address() - } - - fn sample_ipv4_remote_addr_fingerprint() -> u64 { - gen_remote_fingerprint(&sample_ipv4_socket_address()) - } - - fn sample_ipv6_remote_addr() -> SocketAddr { - sample_ipv6_socket_address() - } - - fn sample_ipv6_remote_addr_fingerprint() -> u64 { - gen_remote_fingerprint(&sample_ipv6_socket_address()) - } - - fn sample_ipv4_socket_address() -> SocketAddr { - SocketAddr::new(IpAddr::V4(Ipv4Addr::new(127, 0, 0, 1)), 8080) - } - - fn sample_ipv6_socket_address() -> SocketAddr { - SocketAddr::new(IpAddr::V6(Ipv6Addr::new(0, 0, 0, 0, 0, 0, 0, 1)), 8080) - } - - fn sample_issue_time() -> f64 { - 1_000_000_000_f64 - } - - fn sample_cookie_valid_range() -> Range { - sample_issue_time() - 10.0..sample_issue_time() + 10.0 - } - - #[derive(Debug, Default)] - pub struct TorrentPeerBuilder { - peer: peer::Peer, - } - - impl TorrentPeerBuilder { - #[must_use] - pub fn new() -> Self { - Self { - peer: peer::Peer { - updated: CurrentClock::now(), - ..Default::default() - }, - } - } - - #[must_use] - pub fn with_peer_address(mut self, peer_addr: SocketAddr) -> Self { - self.peer.peer_addr = peer_addr; - self - } - - #[must_use] - pub fn with_peer_id(mut self, peer_id: PeerId) -> Self { - self.peer.peer_id = peer_id; - self - } - - #[must_use] - pub fn with_number_of_bytes_left(mut self, left: i64) -> Self { - self.peer.left = NumberOfBytes::new(left); - self - } - - #[must_use] - pub fn into(self) -> peer::Peer { - self.peer - } - } - - struct TrackerConfigurationBuilder { - configuration: Configuration, - } - - impl TrackerConfigurationBuilder { - pub fn default() -> TrackerConfigurationBuilder { - let default_configuration = default_testing_tracker_configuration(); - TrackerConfigurationBuilder { - configuration: default_configuration, - } - } - - pub fn with_external_ip(mut self, external_ip: &str) -> Self { - self.configuration.core.net.external_ip = Some(external_ip.to_owned().parse().expect("valid IP address")); - self - } - - pub fn into(self) -> Configuration { - self.configuration - } - } - - mock! { - UdpStatsEventSender {} - impl udp_tracker_core::statistics::event::sender::Sender for UdpStatsEventSender { - fn send_event(&self, event: udp_tracker_core::statistics::event::Event) -> BoxFuture<'static,Option > > > ; - } - } - - mod connect_request { - - use std::future; - use std::sync::Arc; - - use aquatic_udp_protocol::{ConnectRequest, ConnectResponse, Response, TransactionId}; - use mockall::predicate::eq; - - use super::{sample_ipv4_socket_address, sample_ipv6_remote_addr}; - use crate::packages::{self, udp_tracker_core}; - use crate::servers::udp::connection_cookie::make; - use crate::servers::udp::handlers::handle_connect; - use crate::servers::udp::handlers::tests::{ - sample_ipv4_remote_addr, sample_ipv4_remote_addr_fingerprint, sample_ipv6_remote_addr_fingerprint, sample_issue_time, - MockUdpStatsEventSender, - }; - - fn sample_connect_request() -> ConnectRequest { - ConnectRequest { - transaction_id: TransactionId(0i32.into()), - } - } - - #[tokio::test] - async fn a_connect_response_should_contain_the_same_transaction_id_as_the_connect_request() { - let (udp_stats_event_sender, _udp_stats_repository) = packages::udp_tracker_core::statistics::setup::factory(false); - let udp_stats_event_sender = Arc::new(udp_stats_event_sender); - - let request = ConnectRequest { - transaction_id: TransactionId(0i32.into()), - }; - - let response = handle_connect( - sample_ipv4_remote_addr(), - &request, - &udp_stats_event_sender, - sample_issue_time(), - ) - .await; - - assert_eq!( - response, - Response::Connect(ConnectResponse { - connection_id: make(sample_ipv4_remote_addr_fingerprint(), sample_issue_time()).unwrap(), - transaction_id: request.transaction_id - }) - ); - } - - #[tokio::test] - async fn a_connect_response_should_contain_a_new_connection_id() { - let (udp_stats_event_sender, _udp_stats_repository) = packages::udp_tracker_core::statistics::setup::factory(false); - let udp_stats_event_sender = Arc::new(udp_stats_event_sender); - - let request = ConnectRequest { - transaction_id: TransactionId(0i32.into()), - }; - - let response = handle_connect( - sample_ipv4_remote_addr(), - &request, - &udp_stats_event_sender, - sample_issue_time(), - ) - .await; - - assert_eq!( - response, - Response::Connect(ConnectResponse { - connection_id: make(sample_ipv4_remote_addr_fingerprint(), sample_issue_time()).unwrap(), - transaction_id: request.transaction_id - }) - ); - } - - #[tokio::test] - async fn a_connect_response_should_contain_a_new_connection_id_ipv6() { - let (udp_stats_event_sender, _udp_stats_repository) = packages::udp_tracker_core::statistics::setup::factory(false); - let udp_stats_event_sender = Arc::new(udp_stats_event_sender); - - let request = ConnectRequest { - transaction_id: TransactionId(0i32.into()), - }; - - let response = handle_connect( - sample_ipv6_remote_addr(), - &request, - &udp_stats_event_sender, - sample_issue_time(), - ) - .await; - - assert_eq!( - response, - Response::Connect(ConnectResponse { - connection_id: make(sample_ipv6_remote_addr_fingerprint(), sample_issue_time()).unwrap(), - transaction_id: request.transaction_id - }) - ); - } - - #[tokio::test] - async fn it_should_send_the_upd4_connect_event_when_a_client_tries_to_connect_using_a_ip4_socket_address() { - let mut udp_stats_event_sender_mock = MockUdpStatsEventSender::new(); - udp_stats_event_sender_mock - .expect_send_event() - .with(eq(udp_tracker_core::statistics::event::Event::Udp4Connect)) - .times(1) - .returning(|_| Box::pin(future::ready(Some(Ok(()))))); - let udp_stats_event_sender: Arc>> = - Arc::new(Some(Box::new(udp_stats_event_sender_mock))); - - let client_socket_address = sample_ipv4_socket_address(); - - handle_connect( - client_socket_address, - &sample_connect_request(), - &udp_stats_event_sender, - sample_issue_time(), - ) - .await; - } - - #[tokio::test] - async fn it_should_send_the_upd6_connect_event_when_a_client_tries_to_connect_using_a_ip6_socket_address() { - let mut udp_stats_event_sender_mock = MockUdpStatsEventSender::new(); - udp_stats_event_sender_mock - .expect_send_event() - .with(eq(udp_tracker_core::statistics::event::Event::Udp6Connect)) - .times(1) - .returning(|_| Box::pin(future::ready(Some(Ok(()))))); - let udp_stats_event_sender: Arc>> = - Arc::new(Some(Box::new(udp_stats_event_sender_mock))); - - handle_connect( - sample_ipv6_remote_addr(), - &sample_connect_request(), - &udp_stats_event_sender, - sample_issue_time(), - ) - .await; - } - } - - mod announce_request { - - use std::net::Ipv4Addr; - use std::num::NonZeroU16; - - use aquatic_udp_protocol::{ - AnnounceActionPlaceholder, AnnounceEvent, AnnounceRequest, ConnectionId, NumberOfBytes, NumberOfPeers, - PeerId as AquaticPeerId, PeerKey, Port, TransactionId, - }; - - use super::{sample_ipv4_remote_addr_fingerprint, sample_issue_time}; - use crate::servers::udp::connection_cookie::make; - - struct AnnounceRequestBuilder { - request: AnnounceRequest, - } - - impl AnnounceRequestBuilder { - pub fn default() -> AnnounceRequestBuilder { - let client_ip = Ipv4Addr::new(126, 0, 0, 1); - let client_port = 8080; - let info_hash_aquatic = aquatic_udp_protocol::InfoHash([0u8; 20]); - - let default_request = AnnounceRequest { - connection_id: make(sample_ipv4_remote_addr_fingerprint(), sample_issue_time()).unwrap(), - action_placeholder: AnnounceActionPlaceholder::default(), - transaction_id: TransactionId(0i32.into()), - info_hash: info_hash_aquatic, - peer_id: AquaticPeerId([255u8; 20]), - bytes_downloaded: NumberOfBytes(0i64.into()), - bytes_uploaded: NumberOfBytes(0i64.into()), - bytes_left: NumberOfBytes(0i64.into()), - event: AnnounceEvent::Started.into(), - ip_address: client_ip.into(), - key: PeerKey::new(0i32), - peers_wanted: NumberOfPeers::new(1i32), - port: Port::new(NonZeroU16::new(client_port).expect("a non-zero client port")), - }; - AnnounceRequestBuilder { - request: default_request, - } - } - - pub fn with_connection_id(mut self, connection_id: ConnectionId) -> Self { - self.request.connection_id = connection_id; - self - } - - pub fn with_info_hash(mut self, info_hash: aquatic_udp_protocol::InfoHash) -> Self { - self.request.info_hash = info_hash; - self - } - - pub fn with_peer_id(mut self, peer_id: AquaticPeerId) -> Self { - self.request.peer_id = peer_id; - self - } - - pub fn with_ip_address(mut self, ip_address: Ipv4Addr) -> Self { - self.request.ip_address = ip_address.into(); - self - } - - pub fn with_port(mut self, port: u16) -> Self { - self.request.port = Port(port.into()); - self - } - - pub fn into(self) -> AnnounceRequest { - self.request - } - } - - mod using_ipv4 { - - use std::future; - use std::net::{IpAddr, Ipv4Addr, SocketAddr}; - use std::sync::Arc; - - use aquatic_udp_protocol::{ - AnnounceInterval, AnnounceResponse, InfoHash as AquaticInfoHash, Ipv4AddrBytes, Ipv6AddrBytes, NumberOfPeers, - PeerId as AquaticPeerId, Response, ResponsePeer, - }; - use bittorrent_tracker_core::announce_handler::AnnounceHandler; - use bittorrent_tracker_core::torrent::repository::in_memory::InMemoryTorrentRepository; - use bittorrent_tracker_core::whitelist; - use mockall::predicate::eq; - use torrust_tracker_configuration::Core; - - use crate::packages::{self, udp_tracker_core}; - use crate::servers::udp::connection_cookie::make; - use crate::servers::udp::handlers::tests::announce_request::AnnounceRequestBuilder; - use crate::servers::udp::handlers::tests::{ - gen_remote_fingerprint, initialize_core_tracker_services_for_default_tracker_configuration, - initialize_core_tracker_services_for_public_tracker, sample_cookie_valid_range, sample_ipv4_socket_address, - sample_issue_time, MockUdpStatsEventSender, TorrentPeerBuilder, - }; - use crate::servers::udp::handlers::{handle_announce, AnnounceResponseFixedData}; - - #[tokio::test] - async fn an_announced_peer_should_be_added_to_the_tracker() { - let (core_tracker_services, core_udp_tracker_services) = initialize_core_tracker_services_for_public_tracker(); - - let client_ip = Ipv4Addr::new(126, 0, 0, 1); - let client_port = 8080; - let info_hash = AquaticInfoHash([0u8; 20]); - let peer_id = AquaticPeerId([255u8; 20]); - - let remote_addr = SocketAddr::new(IpAddr::V4(client_ip), client_port); - - let request = AnnounceRequestBuilder::default() - .with_connection_id(make(gen_remote_fingerprint(&remote_addr), sample_issue_time()).unwrap()) - .with_info_hash(info_hash) - .with_peer_id(peer_id) - .with_ip_address(client_ip) - .with_port(client_port) - .into(); - - handle_announce( - remote_addr, - &request, - &core_tracker_services.core_config, - &core_tracker_services.announce_handler, - &core_tracker_services.whitelist_authorization, - &core_udp_tracker_services.udp_stats_event_sender, - sample_cookie_valid_range(), - ) - .await - .unwrap(); - - let peers = core_tracker_services - .in_memory_torrent_repository - .get_torrent_peers(&info_hash.0.into()); - - let expected_peer = TorrentPeerBuilder::new() - .with_peer_id(peer_id) - .with_peer_address(SocketAddr::new(IpAddr::V4(client_ip), client_port)) - .into(); - - assert_eq!(peers[0], Arc::new(expected_peer)); - } - - #[tokio::test] - async fn the_announced_peer_should_not_be_included_in_the_response() { - let (core_tracker_services, core_udp_tracker_services) = initialize_core_tracker_services_for_public_tracker(); - - let remote_addr = SocketAddr::new(IpAddr::V4(Ipv4Addr::new(126, 0, 0, 1)), 8080); - - let request = AnnounceRequestBuilder::default() - .with_connection_id(make(gen_remote_fingerprint(&remote_addr), sample_issue_time()).unwrap()) - .into(); - - let response = handle_announce( - remote_addr, - &request, - &core_tracker_services.core_config, - &core_tracker_services.announce_handler, - &core_tracker_services.whitelist_authorization, - &core_udp_tracker_services.udp_stats_event_sender, - sample_cookie_valid_range(), - ) - .await - .unwrap(); - - let empty_peer_vector: Vec> = vec![]; - assert_eq!( - response, - Response::from(AnnounceResponse { - fixed: AnnounceResponseFixedData { - transaction_id: request.transaction_id, - announce_interval: AnnounceInterval(120i32.into()), - leechers: NumberOfPeers(0i32.into()), - seeders: NumberOfPeers(1i32.into()), - }, - peers: empty_peer_vector - }) - ); - } - - #[tokio::test] - async fn the_tracker_should_always_use_the_remote_client_ip_but_not_the_port_in_the_udp_request_header_instead_of_the_peer_address_in_the_announce_request( - ) { - // From the BEP 15 (https://www.bittorrent.org/beps/bep_0015.html): - // "Do note that most trackers will only honor the IP address field under limited circumstances." - - let (core_tracker_services, core_udp_tracker_services) = initialize_core_tracker_services_for_public_tracker(); - - let info_hash = AquaticInfoHash([0u8; 20]); - let peer_id = AquaticPeerId([255u8; 20]); - let client_port = 8080; - - let remote_client_ip = Ipv4Addr::new(126, 0, 0, 1); - let remote_client_port = 8081; - let peer_address = Ipv4Addr::new(126, 0, 0, 2); - - let remote_addr = SocketAddr::new(IpAddr::V4(remote_client_ip), remote_client_port); - - let request = AnnounceRequestBuilder::default() - .with_connection_id(make(gen_remote_fingerprint(&remote_addr), sample_issue_time()).unwrap()) - .with_info_hash(info_hash) - .with_peer_id(peer_id) - .with_ip_address(peer_address) - .with_port(client_port) - .into(); - - handle_announce( - remote_addr, - &request, - &core_tracker_services.core_config, - &core_tracker_services.announce_handler, - &core_tracker_services.whitelist_authorization, - &core_udp_tracker_services.udp_stats_event_sender, - sample_cookie_valid_range(), - ) - .await - .unwrap(); - - let peers = core_tracker_services - .in_memory_torrent_repository - .get_torrent_peers(&info_hash.0.into()); - - assert_eq!(peers[0].peer_addr, SocketAddr::new(IpAddr::V4(remote_client_ip), client_port)); - } - - fn add_a_torrent_peer_using_ipv6(in_memory_torrent_repository: &Arc) { - let info_hash = AquaticInfoHash([0u8; 20]); - - let client_ip_v4 = Ipv4Addr::new(126, 0, 0, 1); - let client_ip_v6 = client_ip_v4.to_ipv6_compatible(); - let client_port = 8080; - let peer_id = AquaticPeerId([255u8; 20]); - - let peer_using_ipv6 = TorrentPeerBuilder::new() - .with_peer_id(peer_id) - .with_peer_address(SocketAddr::new(IpAddr::V6(client_ip_v6), client_port)) - .into(); - - let () = in_memory_torrent_repository.upsert_peer(&info_hash.0.into(), &peer_using_ipv6); - } - - async fn announce_a_new_peer_using_ipv4( - core_config: Arc, - announce_handler: Arc, - whitelist_authorization: Arc, - ) -> Response { - let (udp_stats_event_sender, _udp_stats_repository) = - packages::udp_tracker_core::statistics::setup::factory(false); - let udp_stats_event_sender = Arc::new(udp_stats_event_sender); - - let remote_addr = SocketAddr::new(IpAddr::V4(Ipv4Addr::new(126, 0, 0, 1)), 8080); - let request = AnnounceRequestBuilder::default() - .with_connection_id(make(gen_remote_fingerprint(&remote_addr), sample_issue_time()).unwrap()) - .into(); - - handle_announce( - remote_addr, - &request, - &core_config, - &announce_handler, - &whitelist_authorization, - &udp_stats_event_sender, - sample_cookie_valid_range(), - ) - .await - .unwrap() - } - - #[tokio::test] - async fn when_the_announce_request_comes_from_a_client_using_ipv4_the_response_should_not_include_peers_using_ipv6() { - let (core_tracker_services, _core_udp_tracker_services) = initialize_core_tracker_services_for_public_tracker(); - - add_a_torrent_peer_using_ipv6(&core_tracker_services.in_memory_torrent_repository); - - let response = announce_a_new_peer_using_ipv4( - core_tracker_services.core_config.clone(), - core_tracker_services.announce_handler.clone(), - core_tracker_services.whitelist_authorization, - ) - .await; - - // The response should not contain the peer using IPV6 - let peers: Option>> = match response { - Response::AnnounceIpv6(announce_response) => Some(announce_response.peers), - _ => None, - }; - let no_ipv6_peers = peers.is_none(); - assert!(no_ipv6_peers); - } - - #[tokio::test] - async fn should_send_the_upd4_announce_event() { - let mut udp_stats_event_sender_mock = MockUdpStatsEventSender::new(); - udp_stats_event_sender_mock - .expect_send_event() - .with(eq(udp_tracker_core::statistics::event::Event::Udp4Announce)) - .times(1) - .returning(|_| Box::pin(future::ready(Some(Ok(()))))); - let udp_stats_event_sender: Arc>> = - Arc::new(Some(Box::new(udp_stats_event_sender_mock))); - - let (core_tracker_services, _core_udp_tracker_services) = - initialize_core_tracker_services_for_default_tracker_configuration(); - - handle_announce( - sample_ipv4_socket_address(), - &AnnounceRequestBuilder::default().into(), - &core_tracker_services.core_config, - &core_tracker_services.announce_handler, - &core_tracker_services.whitelist_authorization, - &udp_stats_event_sender, - sample_cookie_valid_range(), - ) - .await - .unwrap(); - } - - mod from_a_loopback_ip { - use std::net::{IpAddr, Ipv4Addr, SocketAddr}; - use std::sync::Arc; - - use aquatic_udp_protocol::{InfoHash as AquaticInfoHash, PeerId as AquaticPeerId}; - - use crate::servers::udp::connection_cookie::make; - use crate::servers::udp::handlers::handle_announce; - use crate::servers::udp::handlers::tests::announce_request::AnnounceRequestBuilder; - use crate::servers::udp::handlers::tests::{ - gen_remote_fingerprint, initialize_core_tracker_services_for_public_tracker, sample_cookie_valid_range, - sample_issue_time, TorrentPeerBuilder, - }; - - #[tokio::test] - async fn the_peer_ip_should_be_changed_to_the_external_ip_in_the_tracker_configuration_if_defined() { - let (core_tracker_services, core_udp_tracker_services) = - initialize_core_tracker_services_for_public_tracker(); - - let client_ip = Ipv4Addr::new(127, 0, 0, 1); - let client_port = 8080; - let info_hash = AquaticInfoHash([0u8; 20]); - let peer_id = AquaticPeerId([255u8; 20]); - - let remote_addr = SocketAddr::new(IpAddr::V4(client_ip), client_port); - - let request = AnnounceRequestBuilder::default() - .with_connection_id(make(gen_remote_fingerprint(&remote_addr), sample_issue_time()).unwrap()) - .with_info_hash(info_hash) - .with_peer_id(peer_id) - .with_ip_address(client_ip) - .with_port(client_port) - .into(); - - handle_announce( - remote_addr, - &request, - &core_tracker_services.core_config, - &core_tracker_services.announce_handler, - &core_tracker_services.whitelist_authorization, - &core_udp_tracker_services.udp_stats_event_sender, - sample_cookie_valid_range(), - ) - .await - .unwrap(); - - let peers = core_tracker_services - .in_memory_torrent_repository - .get_torrent_peers(&info_hash.0.into()); - - let external_ip_in_tracker_configuration = core_tracker_services.core_config.net.external_ip.unwrap(); - - let expected_peer = TorrentPeerBuilder::new() - .with_peer_id(peer_id) - .with_peer_address(SocketAddr::new(external_ip_in_tracker_configuration, client_port)) - .into(); - - assert_eq!(peers[0], Arc::new(expected_peer)); - } - } - } - - mod using_ipv6 { - - use std::future; - use std::net::{IpAddr, Ipv4Addr, SocketAddr}; - use std::sync::Arc; - - use aquatic_udp_protocol::{ - AnnounceInterval, AnnounceResponse, InfoHash as AquaticInfoHash, Ipv4AddrBytes, Ipv6AddrBytes, NumberOfPeers, - PeerId as AquaticPeerId, Response, ResponsePeer, - }; - use bittorrent_tracker_core::announce_handler::AnnounceHandler; - use bittorrent_tracker_core::torrent::repository::in_memory::InMemoryTorrentRepository; - use bittorrent_tracker_core::whitelist; - use mockall::predicate::eq; - use torrust_tracker_configuration::Core; - - use crate::packages::{self, udp_tracker_core}; - use crate::servers::udp::connection_cookie::make; - use crate::servers::udp::handlers::tests::announce_request::AnnounceRequestBuilder; - use crate::servers::udp::handlers::tests::{ - gen_remote_fingerprint, initialize_core_tracker_services_for_default_tracker_configuration, - initialize_core_tracker_services_for_public_tracker, sample_cookie_valid_range, sample_ipv6_remote_addr, - sample_issue_time, MockUdpStatsEventSender, TorrentPeerBuilder, - }; - use crate::servers::udp::handlers::{handle_announce, AnnounceResponseFixedData}; - - #[tokio::test] - async fn an_announced_peer_should_be_added_to_the_tracker() { - let (core_tracker_services, core_udp_tracker_services) = initialize_core_tracker_services_for_public_tracker(); - - let client_ip_v4 = Ipv4Addr::new(126, 0, 0, 1); - let client_ip_v6 = client_ip_v4.to_ipv6_compatible(); - let client_port = 8080; - let info_hash = AquaticInfoHash([0u8; 20]); - let peer_id = AquaticPeerId([255u8; 20]); - - let remote_addr = SocketAddr::new(IpAddr::V6(client_ip_v6), client_port); - - let request = AnnounceRequestBuilder::default() - .with_connection_id(make(gen_remote_fingerprint(&remote_addr), sample_issue_time()).unwrap()) - .with_info_hash(info_hash) - .with_peer_id(peer_id) - .with_ip_address(client_ip_v4) - .with_port(client_port) - .into(); - - handle_announce( - remote_addr, - &request, - &core_tracker_services.core_config, - &core_tracker_services.announce_handler, - &core_tracker_services.whitelist_authorization, - &core_udp_tracker_services.udp_stats_event_sender, - sample_cookie_valid_range(), - ) - .await - .unwrap(); - - let peers = core_tracker_services - .in_memory_torrent_repository - .get_torrent_peers(&info_hash.0.into()); - - let expected_peer = TorrentPeerBuilder::new() - .with_peer_id(peer_id) - .with_peer_address(SocketAddr::new(IpAddr::V6(client_ip_v6), client_port)) - .into(); - - assert_eq!(peers[0], Arc::new(expected_peer)); - } - - #[tokio::test] - async fn the_announced_peer_should_not_be_included_in_the_response() { - let (core_tracker_services, core_udp_tracker_services) = initialize_core_tracker_services_for_public_tracker(); - - let client_ip_v4 = Ipv4Addr::new(126, 0, 0, 1); - let client_ip_v6 = client_ip_v4.to_ipv6_compatible(); - - let remote_addr = SocketAddr::new(IpAddr::V6(client_ip_v6), 8080); - - let request = AnnounceRequestBuilder::default() - .with_connection_id(make(gen_remote_fingerprint(&remote_addr), sample_issue_time()).unwrap()) - .into(); - - let response = handle_announce( - remote_addr, - &request, - &core_tracker_services.core_config, - &core_tracker_services.announce_handler, - &core_tracker_services.whitelist_authorization, - &core_udp_tracker_services.udp_stats_event_sender, - sample_cookie_valid_range(), - ) - .await - .unwrap(); - - let empty_peer_vector: Vec> = vec![]; - assert_eq!( - response, - Response::from(AnnounceResponse { - fixed: AnnounceResponseFixedData { - transaction_id: request.transaction_id, - announce_interval: AnnounceInterval(120i32.into()), - leechers: NumberOfPeers(0i32.into()), - seeders: NumberOfPeers(1i32.into()), - }, - peers: empty_peer_vector - }) - ); - } - - #[tokio::test] - async fn the_tracker_should_always_use_the_remote_client_ip_but_not_the_port_in_the_udp_request_header_instead_of_the_peer_address_in_the_announce_request( - ) { - // From the BEP 15 (https://www.bittorrent.org/beps/bep_0015.html): - // "Do note that most trackers will only honor the IP address field under limited circumstances." - - let (core_tracker_services, core_udp_tracker_services) = initialize_core_tracker_services_for_public_tracker(); - - let info_hash = AquaticInfoHash([0u8; 20]); - let peer_id = AquaticPeerId([255u8; 20]); - let client_port = 8080; - - let remote_client_ip = "::100".parse().unwrap(); // IPV4 ::0.0.1.0 -> IPV6 = ::100 = ::ffff:0:100 = 0:0:0:0:0:ffff:0:0100 - let remote_client_port = 8081; - let peer_address = "126.0.0.1".parse().unwrap(); - - let remote_addr = SocketAddr::new(IpAddr::V6(remote_client_ip), remote_client_port); - - let request = AnnounceRequestBuilder::default() - .with_connection_id(make(gen_remote_fingerprint(&remote_addr), sample_issue_time()).unwrap()) - .with_info_hash(info_hash) - .with_peer_id(peer_id) - .with_ip_address(peer_address) - .with_port(client_port) - .into(); - - handle_announce( - remote_addr, - &request, - &core_tracker_services.core_config, - &core_tracker_services.announce_handler, - &core_tracker_services.whitelist_authorization, - &core_udp_tracker_services.udp_stats_event_sender, - sample_cookie_valid_range(), - ) - .await - .unwrap(); - - let peers = core_tracker_services - .in_memory_torrent_repository - .get_torrent_peers(&info_hash.0.into()); - - // When using IPv6 the tracker converts the remote client ip into a IPv4 address - assert_eq!(peers[0].peer_addr, SocketAddr::new(IpAddr::V6(remote_client_ip), client_port)); - } - - fn add_a_torrent_peer_using_ipv4(in_memory_torrent_repository: &Arc) { - let info_hash = AquaticInfoHash([0u8; 20]); - - let client_ip_v4 = Ipv4Addr::new(126, 0, 0, 1); - let client_port = 8080; - let peer_id = AquaticPeerId([255u8; 20]); - - let peer_using_ipv4 = TorrentPeerBuilder::new() - .with_peer_id(peer_id) - .with_peer_address(SocketAddr::new(IpAddr::V4(client_ip_v4), client_port)) - .into(); - - let () = in_memory_torrent_repository.upsert_peer(&info_hash.0.into(), &peer_using_ipv4); - } - - async fn announce_a_new_peer_using_ipv6( - core_config: Arc, - announce_handler: Arc, - whitelist_authorization: Arc, - ) -> Response { - let (udp_stats_event_sender, _udp_stats_repository) = - packages::udp_tracker_core::statistics::setup::factory(false); - let udp_stats_event_sender = Arc::new(udp_stats_event_sender); - - let client_ip_v4 = Ipv4Addr::new(126, 0, 0, 1); - let client_ip_v6 = client_ip_v4.to_ipv6_compatible(); - let client_port = 8080; - let remote_addr = SocketAddr::new(IpAddr::V6(client_ip_v6), client_port); - let request = AnnounceRequestBuilder::default() - .with_connection_id(make(gen_remote_fingerprint(&remote_addr), sample_issue_time()).unwrap()) - .into(); - - handle_announce( - remote_addr, - &request, - &core_config, - &announce_handler, - &whitelist_authorization, - &udp_stats_event_sender, - sample_cookie_valid_range(), - ) - .await - .unwrap() - } - - #[tokio::test] - async fn when_the_announce_request_comes_from_a_client_using_ipv6_the_response_should_not_include_peers_using_ipv4() { - let (core_tracker_services, _core_udp_tracker_services) = initialize_core_tracker_services_for_public_tracker(); - - add_a_torrent_peer_using_ipv4(&core_tracker_services.in_memory_torrent_repository); - - let response = announce_a_new_peer_using_ipv6( - core_tracker_services.core_config.clone(), - core_tracker_services.announce_handler.clone(), - core_tracker_services.whitelist_authorization, - ) - .await; - - // The response should not contain the peer using IPV4 - let peers: Option>> = match response { - Response::AnnounceIpv4(announce_response) => Some(announce_response.peers), - _ => None, - }; - let no_ipv4_peers = peers.is_none(); - assert!(no_ipv4_peers); - } - - #[tokio::test] - async fn should_send_the_upd6_announce_event() { - let mut udp_stats_event_sender_mock = MockUdpStatsEventSender::new(); - udp_stats_event_sender_mock - .expect_send_event() - .with(eq(udp_tracker_core::statistics::event::Event::Udp6Announce)) - .times(1) - .returning(|_| Box::pin(future::ready(Some(Ok(()))))); - let udp_stats_event_sender: Arc>> = - Arc::new(Some(Box::new(udp_stats_event_sender_mock))); - - let (core_tracker_services, _core_udp_tracker_services) = - initialize_core_tracker_services_for_default_tracker_configuration(); - - let remote_addr = sample_ipv6_remote_addr(); - - let announce_request = AnnounceRequestBuilder::default() - .with_connection_id(make(gen_remote_fingerprint(&remote_addr), sample_issue_time()).unwrap()) - .into(); - - handle_announce( - remote_addr, - &announce_request, - &core_tracker_services.core_config, - &core_tracker_services.announce_handler, - &core_tracker_services.whitelist_authorization, - &udp_stats_event_sender, - sample_cookie_valid_range(), - ) - .await - .unwrap(); - } - - mod from_a_loopback_ip { - use std::future; - use std::net::{IpAddr, Ipv4Addr, Ipv6Addr, SocketAddr}; - use std::sync::Arc; - - use aquatic_udp_protocol::{InfoHash as AquaticInfoHash, PeerId as AquaticPeerId}; - use bittorrent_tracker_core::announce_handler::AnnounceHandler; - use bittorrent_tracker_core::databases::setup::initialize_database; - use bittorrent_tracker_core::torrent::repository::in_memory::InMemoryTorrentRepository; - use bittorrent_tracker_core::torrent::repository::persisted::DatabasePersistentTorrentRepository; - use bittorrent_tracker_core::whitelist::authorization::WhitelistAuthorization; - use bittorrent_tracker_core::whitelist::repository::in_memory::InMemoryWhitelist; - use mockall::predicate::eq; - - use crate::packages::udp_tracker_core; - use crate::servers::udp::connection_cookie::make; - use crate::servers::udp::handlers::handle_announce; - use crate::servers::udp::handlers::tests::announce_request::AnnounceRequestBuilder; - use crate::servers::udp::handlers::tests::{ - gen_remote_fingerprint, sample_cookie_valid_range, sample_issue_time, MockUdpStatsEventSender, - TrackerConfigurationBuilder, - }; - - #[tokio::test] - async fn the_peer_ip_should_be_changed_to_the_external_ip_in_the_tracker_configuration() { - let config = Arc::new(TrackerConfigurationBuilder::default().with_external_ip("::126.0.0.1").into()); - - let database = initialize_database(&config.core); - let in_memory_whitelist = Arc::new(InMemoryWhitelist::default()); - let whitelist_authorization = - Arc::new(WhitelistAuthorization::new(&config.core, &in_memory_whitelist.clone())); - let in_memory_torrent_repository = Arc::new(InMemoryTorrentRepository::default()); - let db_torrent_repository = Arc::new(DatabasePersistentTorrentRepository::new(&database)); - - let mut udp_stats_event_sender_mock = MockUdpStatsEventSender::new(); - udp_stats_event_sender_mock - .expect_send_event() - .with(eq(udp_tracker_core::statistics::event::Event::Udp6Announce)) - .times(1) - .returning(|_| Box::pin(future::ready(Some(Ok(()))))); - let udp_stats_event_sender: Arc>> = - Arc::new(Some(Box::new(udp_stats_event_sender_mock))); - - let announce_handler = Arc::new(AnnounceHandler::new( - &config.core, - &in_memory_torrent_repository, - &db_torrent_repository, - )); - - let loopback_ipv4 = Ipv4Addr::new(127, 0, 0, 1); - let loopback_ipv6 = Ipv6Addr::new(0, 0, 0, 0, 0, 0, 0, 1); - - let client_ip_v4 = loopback_ipv4; - let client_ip_v6 = loopback_ipv6; - let client_port = 8080; - - let info_hash = AquaticInfoHash([0u8; 20]); - let peer_id = AquaticPeerId([255u8; 20]); - - let remote_addr = SocketAddr::new(IpAddr::V6(client_ip_v6), client_port); - - let request = AnnounceRequestBuilder::default() - .with_connection_id(make(gen_remote_fingerprint(&remote_addr), sample_issue_time()).unwrap()) - .with_info_hash(info_hash) - .with_peer_id(peer_id) - .with_ip_address(client_ip_v4) - .with_port(client_port) - .into(); - - let core_config = Arc::new(config.core.clone()); - - handle_announce( - remote_addr, - &request, - &core_config, - &announce_handler, - &whitelist_authorization, - &udp_stats_event_sender, - sample_cookie_valid_range(), - ) - .await - .unwrap(); - - let peers = in_memory_torrent_repository.get_torrent_peers(&info_hash.0.into()); - - let external_ip_in_tracker_configuration = core_config.net.external_ip.unwrap(); - - assert!(external_ip_in_tracker_configuration.is_ipv6()); - - // There's a special type of IPv6 addresses that provide compatibility with IPv4. - // The last 32 bits of these addresses represent an IPv4, and are represented like this: - // 1111:2222:3333:4444:5555:6666:1.2.3.4 - // - // ::127.0.0.1 is the IPV6 representation for the IPV4 address 127.0.0.1. - assert_eq!(Ok(peers[0].peer_addr.ip()), "::126.0.0.1".parse()); - } - } - } - } - - mod scrape_request { - use std::net::SocketAddr; - use std::sync::Arc; - - use aquatic_udp_protocol::{ - InfoHash, NumberOfDownloads, NumberOfPeers, PeerId, Response, ScrapeRequest, ScrapeResponse, TorrentScrapeStatistics, - TransactionId, - }; - use bittorrent_tracker_core::scrape_handler::ScrapeHandler; - use bittorrent_tracker_core::torrent::repository::in_memory::InMemoryTorrentRepository; - - use super::{gen_remote_fingerprint, TorrentPeerBuilder}; - use crate::packages; - use crate::servers::udp::connection_cookie::make; - use crate::servers::udp::handlers::handle_scrape; - use crate::servers::udp::handlers::tests::{ - initialize_core_tracker_services_for_public_tracker, sample_cookie_valid_range, sample_ipv4_remote_addr, - sample_issue_time, - }; - - fn zeroed_torrent_statistics() -> TorrentScrapeStatistics { - TorrentScrapeStatistics { - seeders: NumberOfPeers(0.into()), - completed: NumberOfDownloads(0.into()), - leechers: NumberOfPeers(0.into()), - } - } - - #[tokio::test] - async fn should_return_no_stats_when_the_tracker_does_not_have_any_torrent() { - let (core_tracker_services, core_udp_tracker_services) = initialize_core_tracker_services_for_public_tracker(); - - let remote_addr = sample_ipv4_remote_addr(); - - let info_hash = InfoHash([0u8; 20]); - let info_hashes = vec![info_hash]; - - let request = ScrapeRequest { - connection_id: make(gen_remote_fingerprint(&remote_addr), sample_issue_time()).unwrap(), - transaction_id: TransactionId(0i32.into()), - info_hashes, - }; - - let response = handle_scrape( - remote_addr, - &request, - &core_tracker_services.scrape_handler, - &core_udp_tracker_services.udp_stats_event_sender, - sample_cookie_valid_range(), - ) - .await - .unwrap(); - - let expected_torrent_stats = vec![zeroed_torrent_statistics()]; - - assert_eq!( - response, - Response::from(ScrapeResponse { - transaction_id: request.transaction_id, - torrent_stats: expected_torrent_stats - }) - ); - } - - async fn add_a_seeder( - in_memory_torrent_repository: Arc, - remote_addr: &SocketAddr, - info_hash: &InfoHash, - ) { - let peer_id = PeerId([255u8; 20]); - - let peer = TorrentPeerBuilder::new() - .with_peer_id(peer_id) - .with_peer_address(*remote_addr) - .with_number_of_bytes_left(0) - .into(); - - let () = in_memory_torrent_repository.upsert_peer(&info_hash.0.into(), &peer); - } - - fn build_scrape_request(remote_addr: &SocketAddr, info_hash: &InfoHash) -> ScrapeRequest { - let info_hashes = vec![*info_hash]; - - ScrapeRequest { - connection_id: make(gen_remote_fingerprint(remote_addr), sample_issue_time()).unwrap(), - transaction_id: TransactionId::new(0i32), - info_hashes, - } - } - - async fn add_a_sample_seeder_and_scrape( - in_memory_torrent_repository: Arc, - scrape_handler: Arc, - ) -> Response { - let (udp_stats_event_sender, _udp_stats_repository) = packages::udp_tracker_core::statistics::setup::factory(false); - let udp_stats_event_sender = Arc::new(udp_stats_event_sender); - - let remote_addr = sample_ipv4_remote_addr(); - let info_hash = InfoHash([0u8; 20]); - - add_a_seeder(in_memory_torrent_repository.clone(), &remote_addr, &info_hash).await; - - let request = build_scrape_request(&remote_addr, &info_hash); - - handle_scrape( - remote_addr, - &request, - &scrape_handler, - &udp_stats_event_sender, - sample_cookie_valid_range(), - ) - .await - .unwrap() - } - - fn match_scrape_response(response: Response) -> Option { - match response { - Response::Scrape(scrape_response) => Some(scrape_response), - _ => None, - } - } - - mod with_a_public_tracker { - use aquatic_udp_protocol::{NumberOfDownloads, NumberOfPeers, TorrentScrapeStatistics}; - - use crate::servers::udp::handlers::tests::initialize_core_tracker_services_for_public_tracker; - use crate::servers::udp::handlers::tests::scrape_request::{add_a_sample_seeder_and_scrape, match_scrape_response}; - - #[tokio::test] - async fn should_return_torrent_statistics_when_the_tracker_has_the_requested_torrent() { - let (core_tracker_services, _core_udp_tracker_services) = initialize_core_tracker_services_for_public_tracker(); - - let torrent_stats = match_scrape_response( - add_a_sample_seeder_and_scrape( - core_tracker_services.in_memory_torrent_repository.clone(), - core_tracker_services.scrape_handler.clone(), - ) - .await, - ); - - let expected_torrent_stats = vec![TorrentScrapeStatistics { - seeders: NumberOfPeers(1.into()), - completed: NumberOfDownloads(0.into()), - leechers: NumberOfPeers(0.into()), - }]; - - assert_eq!(torrent_stats.unwrap().torrent_stats, expected_torrent_stats); - } - } - - mod with_a_whitelisted_tracker { - use aquatic_udp_protocol::{InfoHash, NumberOfDownloads, NumberOfPeers, TorrentScrapeStatistics}; - - use crate::servers::udp::handlers::handle_scrape; - use crate::servers::udp::handlers::tests::scrape_request::{ - add_a_seeder, build_scrape_request, match_scrape_response, zeroed_torrent_statistics, - }; - use crate::servers::udp::handlers::tests::{ - initialize_core_tracker_services_for_listed_tracker, sample_cookie_valid_range, sample_ipv4_remote_addr, - }; - - #[tokio::test] - async fn should_return_the_torrent_statistics_when_the_requested_torrent_is_whitelisted() { - let (core_tracker_services, core_udp_tracker_services) = initialize_core_tracker_services_for_listed_tracker(); - - let remote_addr = sample_ipv4_remote_addr(); - let info_hash = InfoHash([0u8; 20]); - - add_a_seeder( - core_tracker_services.in_memory_torrent_repository.clone(), - &remote_addr, - &info_hash, - ) - .await; - - core_tracker_services.in_memory_whitelist.add(&info_hash.0.into()).await; - - let request = build_scrape_request(&remote_addr, &info_hash); - - let torrent_stats = match_scrape_response( - handle_scrape( - remote_addr, - &request, - &core_tracker_services.scrape_handler, - &core_udp_tracker_services.udp_stats_event_sender, - sample_cookie_valid_range(), - ) - .await - .unwrap(), - ) - .unwrap(); - - let expected_torrent_stats = vec![TorrentScrapeStatistics { - seeders: NumberOfPeers(1.into()), - completed: NumberOfDownloads(0.into()), - leechers: NumberOfPeers(0.into()), - }]; - - assert_eq!(torrent_stats.torrent_stats, expected_torrent_stats); - } - - #[tokio::test] - async fn should_return_zeroed_statistics_when_the_requested_torrent_is_not_whitelisted() { - let (core_tracker_services, core_udp_tracker_services) = initialize_core_tracker_services_for_listed_tracker(); - - let remote_addr = sample_ipv4_remote_addr(); - let info_hash = InfoHash([0u8; 20]); - - add_a_seeder( - core_tracker_services.in_memory_torrent_repository.clone(), - &remote_addr, - &info_hash, - ) - .await; - - let request = build_scrape_request(&remote_addr, &info_hash); - - let torrent_stats = match_scrape_response( - handle_scrape( - remote_addr, - &request, - &core_tracker_services.scrape_handler, - &core_udp_tracker_services.udp_stats_event_sender, - sample_cookie_valid_range(), - ) - .await - .unwrap(), - ) - .unwrap(); - - let expected_torrent_stats = vec![zeroed_torrent_statistics()]; - - assert_eq!(torrent_stats.torrent_stats, expected_torrent_stats); - } - } - - fn sample_scrape_request(remote_addr: &SocketAddr) -> ScrapeRequest { - let info_hash = InfoHash([0u8; 20]); - let info_hashes = vec![info_hash]; - - ScrapeRequest { - connection_id: make(gen_remote_fingerprint(remote_addr), sample_issue_time()).unwrap(), - transaction_id: TransactionId(0i32.into()), - info_hashes, - } - } - - mod using_ipv4 { - use std::future; - use std::sync::Arc; - - use mockall::predicate::eq; - - use super::sample_scrape_request; - use crate::packages::udp_tracker_core; - use crate::servers::udp::handlers::handle_scrape; - use crate::servers::udp::handlers::tests::{ - initialize_core_tracker_services_for_default_tracker_configuration, sample_cookie_valid_range, - sample_ipv4_remote_addr, MockUdpStatsEventSender, - }; - - #[tokio::test] - async fn should_send_the_upd4_scrape_event() { - let mut udp_stats_event_sender_mock = MockUdpStatsEventSender::new(); - udp_stats_event_sender_mock - .expect_send_event() - .with(eq(udp_tracker_core::statistics::event::Event::Udp4Scrape)) - .times(1) - .returning(|_| Box::pin(future::ready(Some(Ok(()))))); - let udp_stats_event_sender: Arc>> = - Arc::new(Some(Box::new(udp_stats_event_sender_mock))); - - let remote_addr = sample_ipv4_remote_addr(); - - let (core_tracker_services, _core_udp_tracker_services) = - initialize_core_tracker_services_for_default_tracker_configuration(); - - handle_scrape( - remote_addr, - &sample_scrape_request(&remote_addr), - &core_tracker_services.scrape_handler, - &udp_stats_event_sender, - sample_cookie_valid_range(), - ) - .await - .unwrap(); - } - } - - mod using_ipv6 { - use std::future; - use std::sync::Arc; - - use mockall::predicate::eq; - - use super::sample_scrape_request; - use crate::packages::udp_tracker_core; - use crate::servers::udp::handlers::handle_scrape; - use crate::servers::udp::handlers::tests::{ - initialize_core_tracker_services_for_default_tracker_configuration, sample_cookie_valid_range, - sample_ipv6_remote_addr, MockUdpStatsEventSender, - }; - - #[tokio::test] - async fn should_send_the_upd6_scrape_event() { - let mut udp_stats_event_sender_mock = MockUdpStatsEventSender::new(); - udp_stats_event_sender_mock - .expect_send_event() - .with(eq(udp_tracker_core::statistics::event::Event::Udp6Scrape)) - .times(1) - .returning(|_| Box::pin(future::ready(Some(Ok(()))))); - let udp_stats_event_sender: Arc>> = - Arc::new(Some(Box::new(udp_stats_event_sender_mock))); - - let remote_addr = sample_ipv6_remote_addr(); - - let (core_tracker_services, _core_udp_tracker_services) = - initialize_core_tracker_services_for_default_tracker_configuration(); - - handle_scrape( - remote_addr, - &sample_scrape_request(&remote_addr), - &core_tracker_services.scrape_handler, - &udp_stats_event_sender, - sample_cookie_valid_range(), - ) - .await - .unwrap(); - } - } - } -} diff --git a/src/servers/udp/handlers/announce.rs b/src/servers/udp/handlers/announce.rs new file mode 100644 index 000000000..79fb91f49 --- /dev/null +++ b/src/servers/udp/handlers/announce.rs @@ -0,0 +1,875 @@ +//! UDP tracker announce handler. +use std::net::{IpAddr, SocketAddr}; +use std::ops::Range; +use std::sync::Arc; + +use aquatic_udp_protocol::{ + AnnounceInterval, AnnounceRequest, AnnounceResponse, AnnounceResponseFixedData, Ipv4AddrBytes, Ipv6AddrBytes, NumberOfPeers, + Port, Response, ResponsePeer, TransactionId, +}; +use bittorrent_primitives::info_hash::InfoHash; +use bittorrent_tracker_core::announce_handler::{AnnounceHandler, PeersWanted}; +use bittorrent_tracker_core::whitelist; +use torrust_tracker_configuration::Core; +use tracing::{instrument, Level}; +use zerocopy::network_endian::I32; + +use crate::packages::udp_tracker_core; +use crate::servers::udp::connection_cookie::check; +use crate::servers::udp::error::Error; +use crate::servers::udp::handlers::gen_remote_fingerprint; +use crate::servers::udp::peer_builder; + +/// It handles the `Announce` request. Refer to [`Announce`](crate::servers::udp#announce) +/// request for more information. +/// +/// # Errors +/// +/// If a error happens in the `handle_announce` function, it will just return the `ServerError`. +#[allow(clippy::too_many_arguments)] +#[instrument(fields(transaction_id, connection_id, info_hash), skip(announce_handler, whitelist_authorization, opt_udp_stats_event_sender), ret(level = Level::TRACE))] +pub async fn handle_announce( + remote_addr: SocketAddr, + request: &AnnounceRequest, + core_config: &Arc, + announce_handler: &Arc, + whitelist_authorization: &Arc, + opt_udp_stats_event_sender: &Arc>>, + cookie_valid_range: Range, +) -> Result { + tracing::Span::current() + .record("transaction_id", request.transaction_id.0.to_string()) + .record("connection_id", request.connection_id.0.to_string()) + .record("info_hash", InfoHash::from_bytes(&request.info_hash.0).to_hex_string()); + + tracing::trace!("handle announce"); + + check( + &request.connection_id, + gen_remote_fingerprint(&remote_addr), + cookie_valid_range, + ) + .map_err(|e| (e, request.transaction_id))?; + + let info_hash = request.info_hash.into(); + let remote_client_ip = remote_addr.ip(); + + // Authorization + whitelist_authorization + .authorize(&info_hash) + .await + .map_err(|e| Error::TrackerError { + source: (Arc::new(e) as Arc).into(), + }) + .map_err(|e| (e, request.transaction_id))?; + + let mut peer = peer_builder::from_request(request, &remote_client_ip); + let peers_wanted: PeersWanted = i32::from(request.peers_wanted.0).into(); + + let response = announce_handler.announce(&info_hash, &mut peer, &remote_client_ip, &peers_wanted); + + if let Some(udp_stats_event_sender) = opt_udp_stats_event_sender.as_deref() { + match remote_client_ip { + IpAddr::V4(_) => { + udp_stats_event_sender + .send_event(udp_tracker_core::statistics::event::Event::Udp4Announce) + .await; + } + IpAddr::V6(_) => { + udp_stats_event_sender + .send_event(udp_tracker_core::statistics::event::Event::Udp6Announce) + .await; + } + } + } + + #[allow(clippy::cast_possible_truncation)] + if remote_addr.is_ipv4() { + let announce_response = AnnounceResponse { + fixed: AnnounceResponseFixedData { + transaction_id: request.transaction_id, + announce_interval: AnnounceInterval(I32::new(i64::from(core_config.announce_policy.interval) as i32)), + leechers: NumberOfPeers(I32::new(i64::from(response.stats.incomplete) as i32)), + seeders: NumberOfPeers(I32::new(i64::from(response.stats.complete) as i32)), + }, + peers: response + .peers + .iter() + .filter_map(|peer| { + if let IpAddr::V4(ip) = peer.peer_addr.ip() { + Some(ResponsePeer:: { + ip_address: ip.into(), + port: Port(peer.peer_addr.port().into()), + }) + } else { + None + } + }) + .collect(), + }; + + Ok(Response::from(announce_response)) + } else { + let announce_response = AnnounceResponse { + fixed: AnnounceResponseFixedData { + transaction_id: request.transaction_id, + announce_interval: AnnounceInterval(I32::new(i64::from(core_config.announce_policy.interval) as i32)), + leechers: NumberOfPeers(I32::new(i64::from(response.stats.incomplete) as i32)), + seeders: NumberOfPeers(I32::new(i64::from(response.stats.complete) as i32)), + }, + peers: response + .peers + .iter() + .filter_map(|peer| { + if let IpAddr::V6(ip) = peer.peer_addr.ip() { + Some(ResponsePeer:: { + ip_address: ip.into(), + port: Port(peer.peer_addr.port().into()), + }) + } else { + None + } + }) + .collect(), + }; + + Ok(Response::from(announce_response)) + } +} + +#[cfg(test)] +mod tests { + + mod announce_request { + + use std::net::Ipv4Addr; + use std::num::NonZeroU16; + + use aquatic_udp_protocol::{ + AnnounceActionPlaceholder, AnnounceEvent, AnnounceRequest, ConnectionId, NumberOfBytes, NumberOfPeers, + PeerId as AquaticPeerId, PeerKey, Port, TransactionId, + }; + + use crate::servers::udp::connection_cookie::make; + use crate::servers::udp::handlers::tests::{sample_ipv4_remote_addr_fingerprint, sample_issue_time}; + + struct AnnounceRequestBuilder { + request: AnnounceRequest, + } + + impl AnnounceRequestBuilder { + pub fn default() -> AnnounceRequestBuilder { + let client_ip = Ipv4Addr::new(126, 0, 0, 1); + let client_port = 8080; + let info_hash_aquatic = aquatic_udp_protocol::InfoHash([0u8; 20]); + + let default_request = AnnounceRequest { + connection_id: make(sample_ipv4_remote_addr_fingerprint(), sample_issue_time()).unwrap(), + action_placeholder: AnnounceActionPlaceholder::default(), + transaction_id: TransactionId(0i32.into()), + info_hash: info_hash_aquatic, + peer_id: AquaticPeerId([255u8; 20]), + bytes_downloaded: NumberOfBytes(0i64.into()), + bytes_uploaded: NumberOfBytes(0i64.into()), + bytes_left: NumberOfBytes(0i64.into()), + event: AnnounceEvent::Started.into(), + ip_address: client_ip.into(), + key: PeerKey::new(0i32), + peers_wanted: NumberOfPeers::new(1i32), + port: Port::new(NonZeroU16::new(client_port).expect("a non-zero client port")), + }; + AnnounceRequestBuilder { + request: default_request, + } + } + + pub fn with_connection_id(mut self, connection_id: ConnectionId) -> Self { + self.request.connection_id = connection_id; + self + } + + pub fn with_info_hash(mut self, info_hash: aquatic_udp_protocol::InfoHash) -> Self { + self.request.info_hash = info_hash; + self + } + + pub fn with_peer_id(mut self, peer_id: AquaticPeerId) -> Self { + self.request.peer_id = peer_id; + self + } + + pub fn with_ip_address(mut self, ip_address: Ipv4Addr) -> Self { + self.request.ip_address = ip_address.into(); + self + } + + pub fn with_port(mut self, port: u16) -> Self { + self.request.port = Port(port.into()); + self + } + + pub fn into(self) -> AnnounceRequest { + self.request + } + } + + mod using_ipv4 { + + use std::future; + use std::net::{IpAddr, Ipv4Addr, SocketAddr}; + use std::sync::Arc; + + use aquatic_udp_protocol::{ + AnnounceInterval, AnnounceResponse, AnnounceResponseFixedData, InfoHash as AquaticInfoHash, Ipv4AddrBytes, + Ipv6AddrBytes, NumberOfPeers, PeerId as AquaticPeerId, Response, ResponsePeer, + }; + use bittorrent_tracker_core::announce_handler::AnnounceHandler; + use bittorrent_tracker_core::torrent::repository::in_memory::InMemoryTorrentRepository; + use bittorrent_tracker_core::whitelist; + use mockall::predicate::eq; + use torrust_tracker_configuration::Core; + + use crate::packages::{self, udp_tracker_core}; + use crate::servers::udp::connection_cookie::make; + use crate::servers::udp::handlers::announce::tests::announce_request::AnnounceRequestBuilder; + use crate::servers::udp::handlers::tests::{ + initialize_core_tracker_services_for_default_tracker_configuration, + initialize_core_tracker_services_for_public_tracker, sample_cookie_valid_range, sample_ipv4_socket_address, + sample_issue_time, MockUdpStatsEventSender, TorrentPeerBuilder, + }; + use crate::servers::udp::handlers::{gen_remote_fingerprint, handle_announce}; + + #[tokio::test] + async fn an_announced_peer_should_be_added_to_the_tracker() { + let (core_tracker_services, core_udp_tracker_services) = initialize_core_tracker_services_for_public_tracker(); + + let client_ip = Ipv4Addr::new(126, 0, 0, 1); + let client_port = 8080; + let info_hash = AquaticInfoHash([0u8; 20]); + let peer_id = AquaticPeerId([255u8; 20]); + + let remote_addr = SocketAddr::new(IpAddr::V4(client_ip), client_port); + + let request = AnnounceRequestBuilder::default() + .with_connection_id(make(gen_remote_fingerprint(&remote_addr), sample_issue_time()).unwrap()) + .with_info_hash(info_hash) + .with_peer_id(peer_id) + .with_ip_address(client_ip) + .with_port(client_port) + .into(); + + handle_announce( + remote_addr, + &request, + &core_tracker_services.core_config, + &core_tracker_services.announce_handler, + &core_tracker_services.whitelist_authorization, + &core_udp_tracker_services.udp_stats_event_sender, + sample_cookie_valid_range(), + ) + .await + .unwrap(); + + let peers = core_tracker_services + .in_memory_torrent_repository + .get_torrent_peers(&info_hash.0.into()); + + let expected_peer = TorrentPeerBuilder::new() + .with_peer_id(peer_id) + .with_peer_address(SocketAddr::new(IpAddr::V4(client_ip), client_port)) + .into(); + + assert_eq!(peers[0], Arc::new(expected_peer)); + } + + #[tokio::test] + async fn the_announced_peer_should_not_be_included_in_the_response() { + let (core_tracker_services, core_udp_tracker_services) = initialize_core_tracker_services_for_public_tracker(); + + let remote_addr = SocketAddr::new(IpAddr::V4(Ipv4Addr::new(126, 0, 0, 1)), 8080); + + let request = AnnounceRequestBuilder::default() + .with_connection_id(make(gen_remote_fingerprint(&remote_addr), sample_issue_time()).unwrap()) + .into(); + + let response = handle_announce( + remote_addr, + &request, + &core_tracker_services.core_config, + &core_tracker_services.announce_handler, + &core_tracker_services.whitelist_authorization, + &core_udp_tracker_services.udp_stats_event_sender, + sample_cookie_valid_range(), + ) + .await + .unwrap(); + + let empty_peer_vector: Vec> = vec![]; + assert_eq!( + response, + Response::from(AnnounceResponse { + fixed: AnnounceResponseFixedData { + transaction_id: request.transaction_id, + announce_interval: AnnounceInterval(120i32.into()), + leechers: NumberOfPeers(0i32.into()), + seeders: NumberOfPeers(1i32.into()), + }, + peers: empty_peer_vector + }) + ); + } + + #[tokio::test] + async fn the_tracker_should_always_use_the_remote_client_ip_but_not_the_port_in_the_udp_request_header_instead_of_the_peer_address_in_the_announce_request( + ) { + // From the BEP 15 (https://www.bittorrent.org/beps/bep_0015.html): + // "Do note that most trackers will only honor the IP address field under limited circumstances." + + let (core_tracker_services, core_udp_tracker_services) = initialize_core_tracker_services_for_public_tracker(); + + let info_hash = AquaticInfoHash([0u8; 20]); + let peer_id = AquaticPeerId([255u8; 20]); + let client_port = 8080; + + let remote_client_ip = Ipv4Addr::new(126, 0, 0, 1); + let remote_client_port = 8081; + let peer_address = Ipv4Addr::new(126, 0, 0, 2); + + let remote_addr = SocketAddr::new(IpAddr::V4(remote_client_ip), remote_client_port); + + let request = AnnounceRequestBuilder::default() + .with_connection_id(make(gen_remote_fingerprint(&remote_addr), sample_issue_time()).unwrap()) + .with_info_hash(info_hash) + .with_peer_id(peer_id) + .with_ip_address(peer_address) + .with_port(client_port) + .into(); + + handle_announce( + remote_addr, + &request, + &core_tracker_services.core_config, + &core_tracker_services.announce_handler, + &core_tracker_services.whitelist_authorization, + &core_udp_tracker_services.udp_stats_event_sender, + sample_cookie_valid_range(), + ) + .await + .unwrap(); + + let peers = core_tracker_services + .in_memory_torrent_repository + .get_torrent_peers(&info_hash.0.into()); + + assert_eq!(peers[0].peer_addr, SocketAddr::new(IpAddr::V4(remote_client_ip), client_port)); + } + + fn add_a_torrent_peer_using_ipv6(in_memory_torrent_repository: &Arc) { + let info_hash = AquaticInfoHash([0u8; 20]); + + let client_ip_v4 = Ipv4Addr::new(126, 0, 0, 1); + let client_ip_v6 = client_ip_v4.to_ipv6_compatible(); + let client_port = 8080; + let peer_id = AquaticPeerId([255u8; 20]); + + let peer_using_ipv6 = TorrentPeerBuilder::new() + .with_peer_id(peer_id) + .with_peer_address(SocketAddr::new(IpAddr::V6(client_ip_v6), client_port)) + .into(); + + let () = in_memory_torrent_repository.upsert_peer(&info_hash.0.into(), &peer_using_ipv6); + } + + async fn announce_a_new_peer_using_ipv4( + core_config: Arc, + announce_handler: Arc, + whitelist_authorization: Arc, + ) -> Response { + let (udp_stats_event_sender, _udp_stats_repository) = + packages::udp_tracker_core::statistics::setup::factory(false); + let udp_stats_event_sender = Arc::new(udp_stats_event_sender); + + let remote_addr = SocketAddr::new(IpAddr::V4(Ipv4Addr::new(126, 0, 0, 1)), 8080); + let request = AnnounceRequestBuilder::default() + .with_connection_id(make(gen_remote_fingerprint(&remote_addr), sample_issue_time()).unwrap()) + .into(); + + handle_announce( + remote_addr, + &request, + &core_config, + &announce_handler, + &whitelist_authorization, + &udp_stats_event_sender, + sample_cookie_valid_range(), + ) + .await + .unwrap() + } + + #[tokio::test] + async fn when_the_announce_request_comes_from_a_client_using_ipv4_the_response_should_not_include_peers_using_ipv6() { + let (core_tracker_services, _core_udp_tracker_services) = initialize_core_tracker_services_for_public_tracker(); + + add_a_torrent_peer_using_ipv6(&core_tracker_services.in_memory_torrent_repository); + + let response = announce_a_new_peer_using_ipv4( + core_tracker_services.core_config.clone(), + core_tracker_services.announce_handler.clone(), + core_tracker_services.whitelist_authorization, + ) + .await; + + // The response should not contain the peer using IPV6 + let peers: Option>> = match response { + Response::AnnounceIpv6(announce_response) => Some(announce_response.peers), + _ => None, + }; + let no_ipv6_peers = peers.is_none(); + assert!(no_ipv6_peers); + } + + #[tokio::test] + async fn should_send_the_upd4_announce_event() { + let mut udp_stats_event_sender_mock = MockUdpStatsEventSender::new(); + udp_stats_event_sender_mock + .expect_send_event() + .with(eq(udp_tracker_core::statistics::event::Event::Udp4Announce)) + .times(1) + .returning(|_| Box::pin(future::ready(Some(Ok(()))))); + let udp_stats_event_sender: Arc>> = + Arc::new(Some(Box::new(udp_stats_event_sender_mock))); + + let (core_tracker_services, _core_udp_tracker_services) = + initialize_core_tracker_services_for_default_tracker_configuration(); + + handle_announce( + sample_ipv4_socket_address(), + &AnnounceRequestBuilder::default().into(), + &core_tracker_services.core_config, + &core_tracker_services.announce_handler, + &core_tracker_services.whitelist_authorization, + &udp_stats_event_sender, + sample_cookie_valid_range(), + ) + .await + .unwrap(); + } + + mod from_a_loopback_ip { + use std::net::{IpAddr, Ipv4Addr, SocketAddr}; + use std::sync::Arc; + + use aquatic_udp_protocol::{InfoHash as AquaticInfoHash, PeerId as AquaticPeerId}; + + use crate::servers::udp::connection_cookie::make; + use crate::servers::udp::handlers::announce::tests::announce_request::AnnounceRequestBuilder; + use crate::servers::udp::handlers::tests::{ + initialize_core_tracker_services_for_public_tracker, sample_cookie_valid_range, sample_issue_time, + TorrentPeerBuilder, + }; + use crate::servers::udp::handlers::{gen_remote_fingerprint, handle_announce}; + + #[tokio::test] + async fn the_peer_ip_should_be_changed_to_the_external_ip_in_the_tracker_configuration_if_defined() { + let (core_tracker_services, core_udp_tracker_services) = + initialize_core_tracker_services_for_public_tracker(); + + let client_ip = Ipv4Addr::new(127, 0, 0, 1); + let client_port = 8080; + let info_hash = AquaticInfoHash([0u8; 20]); + let peer_id = AquaticPeerId([255u8; 20]); + + let remote_addr = SocketAddr::new(IpAddr::V4(client_ip), client_port); + + let request = AnnounceRequestBuilder::default() + .with_connection_id(make(gen_remote_fingerprint(&remote_addr), sample_issue_time()).unwrap()) + .with_info_hash(info_hash) + .with_peer_id(peer_id) + .with_ip_address(client_ip) + .with_port(client_port) + .into(); + + handle_announce( + remote_addr, + &request, + &core_tracker_services.core_config, + &core_tracker_services.announce_handler, + &core_tracker_services.whitelist_authorization, + &core_udp_tracker_services.udp_stats_event_sender, + sample_cookie_valid_range(), + ) + .await + .unwrap(); + + let peers = core_tracker_services + .in_memory_torrent_repository + .get_torrent_peers(&info_hash.0.into()); + + let external_ip_in_tracker_configuration = core_tracker_services.core_config.net.external_ip.unwrap(); + + let expected_peer = TorrentPeerBuilder::new() + .with_peer_id(peer_id) + .with_peer_address(SocketAddr::new(external_ip_in_tracker_configuration, client_port)) + .into(); + + assert_eq!(peers[0], Arc::new(expected_peer)); + } + } + } + + mod using_ipv6 { + + use std::future; + use std::net::{IpAddr, Ipv4Addr, SocketAddr}; + use std::sync::Arc; + + use aquatic_udp_protocol::{ + AnnounceInterval, AnnounceResponse, AnnounceResponseFixedData, InfoHash as AquaticInfoHash, Ipv4AddrBytes, + Ipv6AddrBytes, NumberOfPeers, PeerId as AquaticPeerId, Response, ResponsePeer, + }; + use bittorrent_tracker_core::announce_handler::AnnounceHandler; + use bittorrent_tracker_core::torrent::repository::in_memory::InMemoryTorrentRepository; + use bittorrent_tracker_core::whitelist; + use mockall::predicate::eq; + use torrust_tracker_configuration::Core; + + use crate::packages::{self, udp_tracker_core}; + use crate::servers::udp::connection_cookie::make; + use crate::servers::udp::handlers::announce::tests::announce_request::AnnounceRequestBuilder; + use crate::servers::udp::handlers::tests::{ + initialize_core_tracker_services_for_default_tracker_configuration, + initialize_core_tracker_services_for_public_tracker, sample_cookie_valid_range, sample_ipv6_remote_addr, + sample_issue_time, MockUdpStatsEventSender, TorrentPeerBuilder, + }; + use crate::servers::udp::handlers::{gen_remote_fingerprint, handle_announce}; + + #[tokio::test] + async fn an_announced_peer_should_be_added_to_the_tracker() { + let (core_tracker_services, core_udp_tracker_services) = initialize_core_tracker_services_for_public_tracker(); + + let client_ip_v4 = Ipv4Addr::new(126, 0, 0, 1); + let client_ip_v6 = client_ip_v4.to_ipv6_compatible(); + let client_port = 8080; + let info_hash = AquaticInfoHash([0u8; 20]); + let peer_id = AquaticPeerId([255u8; 20]); + + let remote_addr = SocketAddr::new(IpAddr::V6(client_ip_v6), client_port); + + let request = AnnounceRequestBuilder::default() + .with_connection_id(make(gen_remote_fingerprint(&remote_addr), sample_issue_time()).unwrap()) + .with_info_hash(info_hash) + .with_peer_id(peer_id) + .with_ip_address(client_ip_v4) + .with_port(client_port) + .into(); + + handle_announce( + remote_addr, + &request, + &core_tracker_services.core_config, + &core_tracker_services.announce_handler, + &core_tracker_services.whitelist_authorization, + &core_udp_tracker_services.udp_stats_event_sender, + sample_cookie_valid_range(), + ) + .await + .unwrap(); + + let peers = core_tracker_services + .in_memory_torrent_repository + .get_torrent_peers(&info_hash.0.into()); + + let expected_peer = TorrentPeerBuilder::new() + .with_peer_id(peer_id) + .with_peer_address(SocketAddr::new(IpAddr::V6(client_ip_v6), client_port)) + .into(); + + assert_eq!(peers[0], Arc::new(expected_peer)); + } + + #[tokio::test] + async fn the_announced_peer_should_not_be_included_in_the_response() { + let (core_tracker_services, core_udp_tracker_services) = initialize_core_tracker_services_for_public_tracker(); + + let client_ip_v4 = Ipv4Addr::new(126, 0, 0, 1); + let client_ip_v6 = client_ip_v4.to_ipv6_compatible(); + + let remote_addr = SocketAddr::new(IpAddr::V6(client_ip_v6), 8080); + + let request = AnnounceRequestBuilder::default() + .with_connection_id(make(gen_remote_fingerprint(&remote_addr), sample_issue_time()).unwrap()) + .into(); + + let response = handle_announce( + remote_addr, + &request, + &core_tracker_services.core_config, + &core_tracker_services.announce_handler, + &core_tracker_services.whitelist_authorization, + &core_udp_tracker_services.udp_stats_event_sender, + sample_cookie_valid_range(), + ) + .await + .unwrap(); + + let empty_peer_vector: Vec> = vec![]; + assert_eq!( + response, + Response::from(AnnounceResponse { + fixed: AnnounceResponseFixedData { + transaction_id: request.transaction_id, + announce_interval: AnnounceInterval(120i32.into()), + leechers: NumberOfPeers(0i32.into()), + seeders: NumberOfPeers(1i32.into()), + }, + peers: empty_peer_vector + }) + ); + } + + #[tokio::test] + async fn the_tracker_should_always_use_the_remote_client_ip_but_not_the_port_in_the_udp_request_header_instead_of_the_peer_address_in_the_announce_request( + ) { + // From the BEP 15 (https://www.bittorrent.org/beps/bep_0015.html): + // "Do note that most trackers will only honor the IP address field under limited circumstances." + + let (core_tracker_services, core_udp_tracker_services) = initialize_core_tracker_services_for_public_tracker(); + + let info_hash = AquaticInfoHash([0u8; 20]); + let peer_id = AquaticPeerId([255u8; 20]); + let client_port = 8080; + + let remote_client_ip = "::100".parse().unwrap(); // IPV4 ::0.0.1.0 -> IPV6 = ::100 = ::ffff:0:100 = 0:0:0:0:0:ffff:0:0100 + let remote_client_port = 8081; + let peer_address = "126.0.0.1".parse().unwrap(); + + let remote_addr = SocketAddr::new(IpAddr::V6(remote_client_ip), remote_client_port); + + let request = AnnounceRequestBuilder::default() + .with_connection_id(make(gen_remote_fingerprint(&remote_addr), sample_issue_time()).unwrap()) + .with_info_hash(info_hash) + .with_peer_id(peer_id) + .with_ip_address(peer_address) + .with_port(client_port) + .into(); + + handle_announce( + remote_addr, + &request, + &core_tracker_services.core_config, + &core_tracker_services.announce_handler, + &core_tracker_services.whitelist_authorization, + &core_udp_tracker_services.udp_stats_event_sender, + sample_cookie_valid_range(), + ) + .await + .unwrap(); + + let peers = core_tracker_services + .in_memory_torrent_repository + .get_torrent_peers(&info_hash.0.into()); + + // When using IPv6 the tracker converts the remote client ip into a IPv4 address + assert_eq!(peers[0].peer_addr, SocketAddr::new(IpAddr::V6(remote_client_ip), client_port)); + } + + fn add_a_torrent_peer_using_ipv4(in_memory_torrent_repository: &Arc) { + let info_hash = AquaticInfoHash([0u8; 20]); + + let client_ip_v4 = Ipv4Addr::new(126, 0, 0, 1); + let client_port = 8080; + let peer_id = AquaticPeerId([255u8; 20]); + + let peer_using_ipv4 = TorrentPeerBuilder::new() + .with_peer_id(peer_id) + .with_peer_address(SocketAddr::new(IpAddr::V4(client_ip_v4), client_port)) + .into(); + + let () = in_memory_torrent_repository.upsert_peer(&info_hash.0.into(), &peer_using_ipv4); + } + + async fn announce_a_new_peer_using_ipv6( + core_config: Arc, + announce_handler: Arc, + whitelist_authorization: Arc, + ) -> Response { + let (udp_stats_event_sender, _udp_stats_repository) = + packages::udp_tracker_core::statistics::setup::factory(false); + let udp_stats_event_sender = Arc::new(udp_stats_event_sender); + + let client_ip_v4 = Ipv4Addr::new(126, 0, 0, 1); + let client_ip_v6 = client_ip_v4.to_ipv6_compatible(); + let client_port = 8080; + let remote_addr = SocketAddr::new(IpAddr::V6(client_ip_v6), client_port); + let request = AnnounceRequestBuilder::default() + .with_connection_id(make(gen_remote_fingerprint(&remote_addr), sample_issue_time()).unwrap()) + .into(); + + handle_announce( + remote_addr, + &request, + &core_config, + &announce_handler, + &whitelist_authorization, + &udp_stats_event_sender, + sample_cookie_valid_range(), + ) + .await + .unwrap() + } + + #[tokio::test] + async fn when_the_announce_request_comes_from_a_client_using_ipv6_the_response_should_not_include_peers_using_ipv4() { + let (core_tracker_services, _core_udp_tracker_services) = initialize_core_tracker_services_for_public_tracker(); + + add_a_torrent_peer_using_ipv4(&core_tracker_services.in_memory_torrent_repository); + + let response = announce_a_new_peer_using_ipv6( + core_tracker_services.core_config.clone(), + core_tracker_services.announce_handler.clone(), + core_tracker_services.whitelist_authorization, + ) + .await; + + // The response should not contain the peer using IPV4 + let peers: Option>> = match response { + Response::AnnounceIpv4(announce_response) => Some(announce_response.peers), + _ => None, + }; + let no_ipv4_peers = peers.is_none(); + assert!(no_ipv4_peers); + } + + #[tokio::test] + async fn should_send_the_upd6_announce_event() { + let mut udp_stats_event_sender_mock = MockUdpStatsEventSender::new(); + udp_stats_event_sender_mock + .expect_send_event() + .with(eq(udp_tracker_core::statistics::event::Event::Udp6Announce)) + .times(1) + .returning(|_| Box::pin(future::ready(Some(Ok(()))))); + let udp_stats_event_sender: Arc>> = + Arc::new(Some(Box::new(udp_stats_event_sender_mock))); + + let (core_tracker_services, _core_udp_tracker_services) = + initialize_core_tracker_services_for_default_tracker_configuration(); + + let remote_addr = sample_ipv6_remote_addr(); + + let announce_request = AnnounceRequestBuilder::default() + .with_connection_id(make(gen_remote_fingerprint(&remote_addr), sample_issue_time()).unwrap()) + .into(); + + handle_announce( + remote_addr, + &announce_request, + &core_tracker_services.core_config, + &core_tracker_services.announce_handler, + &core_tracker_services.whitelist_authorization, + &udp_stats_event_sender, + sample_cookie_valid_range(), + ) + .await + .unwrap(); + } + + mod from_a_loopback_ip { + use std::future; + use std::net::{IpAddr, Ipv4Addr, Ipv6Addr, SocketAddr}; + use std::sync::Arc; + + use aquatic_udp_protocol::{InfoHash as AquaticInfoHash, PeerId as AquaticPeerId}; + use bittorrent_tracker_core::announce_handler::AnnounceHandler; + use bittorrent_tracker_core::databases::setup::initialize_database; + use bittorrent_tracker_core::torrent::repository::in_memory::InMemoryTorrentRepository; + use bittorrent_tracker_core::torrent::repository::persisted::DatabasePersistentTorrentRepository; + use bittorrent_tracker_core::whitelist::authorization::WhitelistAuthorization; + use bittorrent_tracker_core::whitelist::repository::in_memory::InMemoryWhitelist; + use mockall::predicate::eq; + + use crate::packages::udp_tracker_core; + use crate::servers::udp::connection_cookie::make; + use crate::servers::udp::handlers::announce::tests::announce_request::AnnounceRequestBuilder; + use crate::servers::udp::handlers::tests::{ + sample_cookie_valid_range, sample_issue_time, MockUdpStatsEventSender, TrackerConfigurationBuilder, + }; + use crate::servers::udp::handlers::{gen_remote_fingerprint, handle_announce}; + + #[tokio::test] + async fn the_peer_ip_should_be_changed_to_the_external_ip_in_the_tracker_configuration() { + let config = Arc::new(TrackerConfigurationBuilder::default().with_external_ip("::126.0.0.1").into()); + + let database = initialize_database(&config.core); + let in_memory_whitelist = Arc::new(InMemoryWhitelist::default()); + let whitelist_authorization = + Arc::new(WhitelistAuthorization::new(&config.core, &in_memory_whitelist.clone())); + let in_memory_torrent_repository = Arc::new(InMemoryTorrentRepository::default()); + let db_torrent_repository = Arc::new(DatabasePersistentTorrentRepository::new(&database)); + + let mut udp_stats_event_sender_mock = MockUdpStatsEventSender::new(); + udp_stats_event_sender_mock + .expect_send_event() + .with(eq(udp_tracker_core::statistics::event::Event::Udp6Announce)) + .times(1) + .returning(|_| Box::pin(future::ready(Some(Ok(()))))); + let udp_stats_event_sender: Arc>> = + Arc::new(Some(Box::new(udp_stats_event_sender_mock))); + + let announce_handler = Arc::new(AnnounceHandler::new( + &config.core, + &in_memory_torrent_repository, + &db_torrent_repository, + )); + + let loopback_ipv4 = Ipv4Addr::new(127, 0, 0, 1); + let loopback_ipv6 = Ipv6Addr::new(0, 0, 0, 0, 0, 0, 0, 1); + + let client_ip_v4 = loopback_ipv4; + let client_ip_v6 = loopback_ipv6; + let client_port = 8080; + + let info_hash = AquaticInfoHash([0u8; 20]); + let peer_id = AquaticPeerId([255u8; 20]); + + let remote_addr = SocketAddr::new(IpAddr::V6(client_ip_v6), client_port); + + let request = AnnounceRequestBuilder::default() + .with_connection_id(make(gen_remote_fingerprint(&remote_addr), sample_issue_time()).unwrap()) + .with_info_hash(info_hash) + .with_peer_id(peer_id) + .with_ip_address(client_ip_v4) + .with_port(client_port) + .into(); + + let core_config = Arc::new(config.core.clone()); + + handle_announce( + remote_addr, + &request, + &core_config, + &announce_handler, + &whitelist_authorization, + &udp_stats_event_sender, + sample_cookie_valid_range(), + ) + .await + .unwrap(); + + let peers = in_memory_torrent_repository.get_torrent_peers(&info_hash.0.into()); + + let external_ip_in_tracker_configuration = core_config.net.external_ip.unwrap(); + + assert!(external_ip_in_tracker_configuration.is_ipv6()); + + // There's a special type of IPv6 addresses that provide compatibility with IPv4. + // The last 32 bits of these addresses represent an IPv4, and are represented like this: + // 1111:2222:3333:4444:5555:6666:1.2.3.4 + // + // ::127.0.0.1 is the IPV6 representation for the IPV4 address 127.0.0.1. + assert_eq!(Ok(peers[0].peer_addr.ip()), "::126.0.0.1".parse()); + } + } + } + } +} diff --git a/src/servers/udp/handlers/connect.rs b/src/servers/udp/handlers/connect.rs new file mode 100644 index 000000000..431c3bb4d --- /dev/null +++ b/src/servers/udp/handlers/connect.rs @@ -0,0 +1,199 @@ +//! UDP tracker connect handler. +use std::net::SocketAddr; +use std::sync::Arc; + +use aquatic_udp_protocol::{ConnectRequest, ConnectResponse, Response}; +use tracing::{instrument, Level}; + +use crate::packages::udp_tracker_core; +use crate::servers::udp::connection_cookie::make; +use crate::servers::udp::handlers::gen_remote_fingerprint; + +/// It handles the `Connect` request. Refer to [`Connect`](crate::servers::udp#connect) +/// request for more information. +/// +/// # Errors +/// +/// This function does not ever return an error. +#[instrument(fields(transaction_id), skip(opt_udp_stats_event_sender), ret(level = Level::TRACE))] +pub async fn handle_connect( + remote_addr: SocketAddr, + request: &ConnectRequest, + opt_udp_stats_event_sender: &Arc>>, + cookie_issue_time: f64, +) -> Response { + tracing::Span::current().record("transaction_id", request.transaction_id.0.to_string()); + + tracing::trace!("handle connect"); + + let connection_id = make(gen_remote_fingerprint(&remote_addr), cookie_issue_time).expect("it should be a normal value"); + + let response = ConnectResponse { + transaction_id: request.transaction_id, + connection_id, + }; + + if let Some(udp_stats_event_sender) = opt_udp_stats_event_sender.as_deref() { + match remote_addr { + SocketAddr::V4(_) => { + udp_stats_event_sender + .send_event(udp_tracker_core::statistics::event::Event::Udp4Connect) + .await; + } + SocketAddr::V6(_) => { + udp_stats_event_sender + .send_event(udp_tracker_core::statistics::event::Event::Udp6Connect) + .await; + } + } + } + + Response::from(response) +} + +#[cfg(test)] +mod tests { + + mod connect_request { + + use std::future; + use std::sync::Arc; + + use aquatic_udp_protocol::{ConnectRequest, ConnectResponse, Response, TransactionId}; + use mockall::predicate::eq; + + use crate::packages::{self, udp_tracker_core}; + use crate::servers::udp::connection_cookie::make; + use crate::servers::udp::handlers::handle_connect; + use crate::servers::udp::handlers::tests::{ + sample_ipv4_remote_addr, sample_ipv4_remote_addr_fingerprint, sample_ipv4_socket_address, sample_ipv6_remote_addr, + sample_ipv6_remote_addr_fingerprint, sample_issue_time, MockUdpStatsEventSender, + }; + + fn sample_connect_request() -> ConnectRequest { + ConnectRequest { + transaction_id: TransactionId(0i32.into()), + } + } + + #[tokio::test] + async fn a_connect_response_should_contain_the_same_transaction_id_as_the_connect_request() { + let (udp_stats_event_sender, _udp_stats_repository) = packages::udp_tracker_core::statistics::setup::factory(false); + let udp_stats_event_sender = Arc::new(udp_stats_event_sender); + + let request = ConnectRequest { + transaction_id: TransactionId(0i32.into()), + }; + + let response = handle_connect( + sample_ipv4_remote_addr(), + &request, + &udp_stats_event_sender, + sample_issue_time(), + ) + .await; + + assert_eq!( + response, + Response::Connect(ConnectResponse { + connection_id: make(sample_ipv4_remote_addr_fingerprint(), sample_issue_time()).unwrap(), + transaction_id: request.transaction_id + }) + ); + } + + #[tokio::test] + async fn a_connect_response_should_contain_a_new_connection_id() { + let (udp_stats_event_sender, _udp_stats_repository) = packages::udp_tracker_core::statistics::setup::factory(false); + let udp_stats_event_sender = Arc::new(udp_stats_event_sender); + + let request = ConnectRequest { + transaction_id: TransactionId(0i32.into()), + }; + + let response = handle_connect( + sample_ipv4_remote_addr(), + &request, + &udp_stats_event_sender, + sample_issue_time(), + ) + .await; + + assert_eq!( + response, + Response::Connect(ConnectResponse { + connection_id: make(sample_ipv4_remote_addr_fingerprint(), sample_issue_time()).unwrap(), + transaction_id: request.transaction_id + }) + ); + } + + #[tokio::test] + async fn a_connect_response_should_contain_a_new_connection_id_ipv6() { + let (udp_stats_event_sender, _udp_stats_repository) = packages::udp_tracker_core::statistics::setup::factory(false); + let udp_stats_event_sender = Arc::new(udp_stats_event_sender); + + let request = ConnectRequest { + transaction_id: TransactionId(0i32.into()), + }; + + let response = handle_connect( + sample_ipv6_remote_addr(), + &request, + &udp_stats_event_sender, + sample_issue_time(), + ) + .await; + + assert_eq!( + response, + Response::Connect(ConnectResponse { + connection_id: make(sample_ipv6_remote_addr_fingerprint(), sample_issue_time()).unwrap(), + transaction_id: request.transaction_id + }) + ); + } + + #[tokio::test] + async fn it_should_send_the_upd4_connect_event_when_a_client_tries_to_connect_using_a_ip4_socket_address() { + let mut udp_stats_event_sender_mock = MockUdpStatsEventSender::new(); + udp_stats_event_sender_mock + .expect_send_event() + .with(eq(udp_tracker_core::statistics::event::Event::Udp4Connect)) + .times(1) + .returning(|_| Box::pin(future::ready(Some(Ok(()))))); + let udp_stats_event_sender: Arc>> = + Arc::new(Some(Box::new(udp_stats_event_sender_mock))); + + let client_socket_address = sample_ipv4_socket_address(); + + handle_connect( + client_socket_address, + &sample_connect_request(), + &udp_stats_event_sender, + sample_issue_time(), + ) + .await; + } + + #[tokio::test] + async fn it_should_send_the_upd6_connect_event_when_a_client_tries_to_connect_using_a_ip6_socket_address() { + let mut udp_stats_event_sender_mock = MockUdpStatsEventSender::new(); + udp_stats_event_sender_mock + .expect_send_event() + .with(eq(udp_tracker_core::statistics::event::Event::Udp6Connect)) + .times(1) + .returning(|_| Box::pin(future::ready(Some(Ok(()))))); + let udp_stats_event_sender: Arc>> = + Arc::new(Some(Box::new(udp_stats_event_sender_mock))); + + handle_connect( + sample_ipv6_remote_addr(), + &sample_connect_request(), + &udp_stats_event_sender, + sample_issue_time(), + ) + .await; + } + } +} diff --git a/src/servers/udp/handlers/error.rs b/src/servers/udp/handlers/error.rs new file mode 100644 index 000000000..36095eeed --- /dev/null +++ b/src/servers/udp/handlers/error.rs @@ -0,0 +1,80 @@ +//! UDP tracker error handling. +use std::net::SocketAddr; +use std::ops::Range; +use std::sync::Arc; + +use aquatic_udp_protocol::{ErrorResponse, RequestParseError, Response, TransactionId}; +use tracing::{instrument, Level}; +use uuid::Uuid; +use zerocopy::network_endian::I32; + +use crate::packages::udp_tracker_core; +use crate::servers::udp::connection_cookie::check; +use crate::servers::udp::error::Error; +use crate::servers::udp::handlers::gen_remote_fingerprint; +use crate::servers::udp::UDP_TRACKER_LOG_TARGET; + +#[allow(clippy::too_many_arguments)] +#[instrument(fields(transaction_id), skip(opt_udp_stats_event_sender), ret(level = Level::TRACE))] +pub async fn handle_error( + remote_addr: SocketAddr, + local_addr: SocketAddr, + request_id: Uuid, + opt_udp_stats_event_sender: &Arc>>, + cookie_valid_range: Range, + e: &Error, + transaction_id: Option, +) -> Response { + tracing::trace!("handle error"); + + match transaction_id { + Some(transaction_id) => { + let transaction_id = transaction_id.0.to_string(); + tracing::error!(target: UDP_TRACKER_LOG_TARGET, error = %e, %remote_addr, %local_addr, %request_id, %transaction_id, "response error"); + } + None => { + tracing::error!(target: UDP_TRACKER_LOG_TARGET, error = %e, %remote_addr, %local_addr, %request_id, "response error"); + } + } + + let e = if let Error::RequestParseError { request_parse_error } = e { + match request_parse_error { + RequestParseError::Sendable { + connection_id, + transaction_id, + err, + } => { + if let Err(e) = check(connection_id, gen_remote_fingerprint(&remote_addr), cookie_valid_range) { + (e.to_string(), Some(*transaction_id)) + } else { + ((*err).to_string(), Some(*transaction_id)) + } + } + RequestParseError::Unsendable { err } => (err.to_string(), transaction_id), + } + } else { + (e.to_string(), transaction_id) + }; + + if e.1.is_some() { + if let Some(udp_stats_event_sender) = opt_udp_stats_event_sender.as_deref() { + match remote_addr { + SocketAddr::V4(_) => { + udp_stats_event_sender + .send_event(udp_tracker_core::statistics::event::Event::Udp4Error) + .await; + } + SocketAddr::V6(_) => { + udp_stats_event_sender + .send_event(udp_tracker_core::statistics::event::Event::Udp6Error) + .await; + } + } + } + } + + Response::from(ErrorResponse { + transaction_id: e.1.unwrap_or(TransactionId(I32::new(0))), + message: e.0.into(), + }) +} diff --git a/src/servers/udp/handlers/mod.rs b/src/servers/udp/handlers/mod.rs new file mode 100644 index 000000000..252a5be02 --- /dev/null +++ b/src/servers/udp/handlers/mod.rs @@ -0,0 +1,366 @@ +//! Handlers for the UDP server. +pub mod announce; +pub mod connect; +pub mod error; +pub mod scrape; + +use std::hash::{DefaultHasher, Hash, Hasher as _}; +use std::net::SocketAddr; +use std::ops::Range; +use std::sync::Arc; +use std::time::Instant; + +use announce::handle_announce; +use aquatic_udp_protocol::{Request, Response, TransactionId}; +use connect::handle_connect; +use error::handle_error; +use scrape::handle_scrape; +use torrust_tracker_clock::clock::Time as _; +use tracing::{instrument, Level}; +use uuid::Uuid; + +use super::RawRequest; +use crate::container::UdpTrackerContainer; +use crate::servers::udp::error::Error; +use crate::shared::bit_torrent::common::MAX_SCRAPE_TORRENTS; +use crate::CurrentClock; + +#[derive(Debug, Clone, PartialEq)] +pub(super) struct CookieTimeValues { + pub(super) issue_time: f64, + pub(super) valid_range: Range, +} + +impl CookieTimeValues { + pub(super) fn new(cookie_lifetime: f64) -> Self { + let issue_time = CurrentClock::now().as_secs_f64(); + let expiry_time = issue_time - cookie_lifetime - 1.0; + let tolerance_max_time = issue_time + 1.0; + + Self { + issue_time, + valid_range: expiry_time..tolerance_max_time, + } + } +} + +/// It handles the incoming UDP packets. +/// +/// It's responsible for: +/// +/// - Parsing the incoming packet. +/// - Delegating the request to the correct handler depending on the request type. +/// +/// It will return an `Error` response if the request is invalid. +#[instrument(fields(request_id), skip(udp_request, udp_tracker_container, cookie_time_values), ret(level = Level::TRACE))] +pub(crate) async fn handle_packet( + udp_request: RawRequest, + udp_tracker_container: Arc, + local_addr: SocketAddr, + cookie_time_values: CookieTimeValues, +) -> Response { + let request_id = Uuid::new_v4(); + + tracing::Span::current().record("request_id", request_id.to_string()); + tracing::debug!("Handling Packets: {udp_request:?}"); + + let start_time = Instant::now(); + + let response = + match Request::parse_bytes(&udp_request.payload[..udp_request.payload.len()], MAX_SCRAPE_TORRENTS).map_err(Error::from) { + Ok(request) => match handle_request( + request, + udp_request.from, + udp_tracker_container.clone(), + cookie_time_values.clone(), + ) + .await + { + Ok(response) => return response, + Err((e, transaction_id)) => { + match &e { + Error::CookieValueNotNormal { .. } + | Error::CookieValueExpired { .. } + | Error::CookieValueFromFuture { .. } => { + // code-review: should we include `RequestParseError` and `BadRequest`? + let mut ban_service = udp_tracker_container.ban_service.write().await; + ban_service.increase_counter(&udp_request.from.ip()); + } + _ => {} + } + + handle_error( + udp_request.from, + local_addr, + request_id, + &udp_tracker_container.udp_stats_event_sender, + cookie_time_values.valid_range.clone(), + &e, + Some(transaction_id), + ) + .await + } + }, + Err(e) => { + handle_error( + udp_request.from, + local_addr, + request_id, + &udp_tracker_container.udp_stats_event_sender, + cookie_time_values.valid_range.clone(), + &e, + None, + ) + .await + } + }; + + let latency = start_time.elapsed(); + tracing::trace!(?latency, "responded"); + + response +} + +/// It dispatches the request to the correct handler. +/// +/// # Errors +/// +/// If a error happens in the `handle_request` function, it will just return the `ServerError`. +#[instrument(skip(request, remote_addr, udp_tracker_container, cookie_time_values))] +pub async fn handle_request( + request: Request, + remote_addr: SocketAddr, + udp_tracker_container: Arc, + cookie_time_values: CookieTimeValues, +) -> Result { + tracing::trace!("handle request"); + + match request { + Request::Connect(connect_request) => Ok(handle_connect( + remote_addr, + &connect_request, + &udp_tracker_container.udp_stats_event_sender, + cookie_time_values.issue_time, + ) + .await), + Request::Announce(announce_request) => { + handle_announce( + remote_addr, + &announce_request, + &udp_tracker_container.core_config, + &udp_tracker_container.announce_handler, + &udp_tracker_container.whitelist_authorization, + &udp_tracker_container.udp_stats_event_sender, + cookie_time_values.valid_range, + ) + .await + } + Request::Scrape(scrape_request) => { + handle_scrape( + remote_addr, + &scrape_request, + &udp_tracker_container.scrape_handler, + &udp_tracker_container.udp_stats_event_sender, + cookie_time_values.valid_range, + ) + .await + } + } +} + +#[must_use] +pub(crate) fn gen_remote_fingerprint(remote_addr: &SocketAddr) -> u64 { + let mut state = DefaultHasher::new(); + remote_addr.hash(&mut state); + state.finish() +} + +#[cfg(test)] +pub(crate) mod tests { + + use std::net::{IpAddr, Ipv4Addr, Ipv6Addr, SocketAddr}; + use std::ops::Range; + use std::sync::Arc; + + use aquatic_udp_protocol::{NumberOfBytes, PeerId}; + use bittorrent_tracker_core::announce_handler::AnnounceHandler; + use bittorrent_tracker_core::databases::setup::initialize_database; + use bittorrent_tracker_core::scrape_handler::ScrapeHandler; + use bittorrent_tracker_core::torrent::repository::in_memory::InMemoryTorrentRepository; + use bittorrent_tracker_core::torrent::repository::persisted::DatabasePersistentTorrentRepository; + use bittorrent_tracker_core::whitelist; + use bittorrent_tracker_core::whitelist::authorization::WhitelistAuthorization; + use bittorrent_tracker_core::whitelist::repository::in_memory::InMemoryWhitelist; + use futures::future::BoxFuture; + use mockall::mock; + use tokio::sync::mpsc::error::SendError; + use torrust_tracker_clock::clock::Time; + use torrust_tracker_configuration::{Configuration, Core}; + use torrust_tracker_primitives::peer; + use torrust_tracker_test_helpers::configuration; + + use super::gen_remote_fingerprint; + use crate::packages::udp_tracker_core; + use crate::{packages, CurrentClock}; + + pub(crate) struct CoreTrackerServices { + pub core_config: Arc, + pub announce_handler: Arc, + pub scrape_handler: Arc, + pub in_memory_torrent_repository: Arc, + pub in_memory_whitelist: Arc, + pub whitelist_authorization: Arc, + } + + pub(crate) struct CoreUdpTrackerServices { + pub udp_stats_event_sender: Arc>>, + } + + fn default_testing_tracker_configuration() -> Configuration { + configuration::ephemeral() + } + + pub(crate) fn initialize_core_tracker_services_for_default_tracker_configuration( + ) -> (CoreTrackerServices, CoreUdpTrackerServices) { + initialize_core_tracker_services(&default_testing_tracker_configuration()) + } + + pub(crate) fn initialize_core_tracker_services_for_public_tracker() -> (CoreTrackerServices, CoreUdpTrackerServices) { + initialize_core_tracker_services(&configuration::ephemeral_public()) + } + + pub(crate) fn initialize_core_tracker_services_for_listed_tracker() -> (CoreTrackerServices, CoreUdpTrackerServices) { + initialize_core_tracker_services(&configuration::ephemeral_listed()) + } + + fn initialize_core_tracker_services(config: &Configuration) -> (CoreTrackerServices, CoreUdpTrackerServices) { + let core_config = Arc::new(config.core.clone()); + let database = initialize_database(&config.core); + let in_memory_whitelist = Arc::new(InMemoryWhitelist::default()); + let whitelist_authorization = Arc::new(WhitelistAuthorization::new(&config.core, &in_memory_whitelist.clone())); + let in_memory_torrent_repository = Arc::new(InMemoryTorrentRepository::default()); + let db_torrent_repository = Arc::new(DatabasePersistentTorrentRepository::new(&database)); + let announce_handler = Arc::new(AnnounceHandler::new( + &config.core, + &in_memory_torrent_repository, + &db_torrent_repository, + )); + let scrape_handler = Arc::new(ScrapeHandler::new(&whitelist_authorization, &in_memory_torrent_repository)); + + let (udp_stats_event_sender, _udp_stats_repository) = packages::udp_tracker_core::statistics::setup::factory(false); + let udp_stats_event_sender = Arc::new(udp_stats_event_sender); + + ( + CoreTrackerServices { + core_config, + announce_handler, + scrape_handler, + in_memory_torrent_repository, + in_memory_whitelist, + whitelist_authorization, + }, + CoreUdpTrackerServices { udp_stats_event_sender }, + ) + } + + pub(crate) fn sample_ipv4_remote_addr() -> SocketAddr { + sample_ipv4_socket_address() + } + + pub(crate) fn sample_ipv4_remote_addr_fingerprint() -> u64 { + gen_remote_fingerprint(&sample_ipv4_socket_address()) + } + + pub(crate) fn sample_ipv6_remote_addr() -> SocketAddr { + sample_ipv6_socket_address() + } + + pub(crate) fn sample_ipv6_remote_addr_fingerprint() -> u64 { + gen_remote_fingerprint(&sample_ipv6_socket_address()) + } + + pub(crate) fn sample_ipv4_socket_address() -> SocketAddr { + SocketAddr::new(IpAddr::V4(Ipv4Addr::new(127, 0, 0, 1)), 8080) + } + + fn sample_ipv6_socket_address() -> SocketAddr { + SocketAddr::new(IpAddr::V6(Ipv6Addr::new(0, 0, 0, 0, 0, 0, 0, 1)), 8080) + } + + pub(crate) fn sample_issue_time() -> f64 { + 1_000_000_000_f64 + } + + pub(crate) fn sample_cookie_valid_range() -> Range { + sample_issue_time() - 10.0..sample_issue_time() + 10.0 + } + + #[derive(Debug, Default)] + pub(crate) struct TorrentPeerBuilder { + peer: peer::Peer, + } + + impl TorrentPeerBuilder { + #[must_use] + pub fn new() -> Self { + Self { + peer: peer::Peer { + updated: CurrentClock::now(), + ..Default::default() + }, + } + } + + #[must_use] + pub fn with_peer_address(mut self, peer_addr: SocketAddr) -> Self { + self.peer.peer_addr = peer_addr; + self + } + + #[must_use] + pub fn with_peer_id(mut self, peer_id: PeerId) -> Self { + self.peer.peer_id = peer_id; + self + } + + #[must_use] + pub fn with_number_of_bytes_left(mut self, left: i64) -> Self { + self.peer.left = NumberOfBytes::new(left); + self + } + + #[must_use] + pub fn into(self) -> peer::Peer { + self.peer + } + } + + pub(crate) struct TrackerConfigurationBuilder { + configuration: Configuration, + } + + impl TrackerConfigurationBuilder { + pub fn default() -> TrackerConfigurationBuilder { + let default_configuration = default_testing_tracker_configuration(); + TrackerConfigurationBuilder { + configuration: default_configuration, + } + } + + pub fn with_external_ip(mut self, external_ip: &str) -> Self { + self.configuration.core.net.external_ip = Some(external_ip.to_owned().parse().expect("valid IP address")); + self + } + + pub fn into(self) -> Configuration { + self.configuration + } + } + + mock! { + pub(crate) UdpStatsEventSender {} + impl udp_tracker_core::statistics::event::sender::Sender for UdpStatsEventSender { + fn send_event(&self, event: udp_tracker_core::statistics::event::Event) -> BoxFuture<'static,Option > > > ; + } + } +} diff --git a/src/servers/udp/handlers/scrape.rs b/src/servers/udp/handlers/scrape.rs new file mode 100644 index 000000000..2c8ca335a --- /dev/null +++ b/src/servers/udp/handlers/scrape.rs @@ -0,0 +1,429 @@ +//! UDP tracker scrape handler. +use std::net::SocketAddr; +use std::ops::Range; +use std::sync::Arc; + +use aquatic_udp_protocol::{ + NumberOfDownloads, NumberOfPeers, Response, ScrapeRequest, ScrapeResponse, TorrentScrapeStatistics, TransactionId, +}; +use bittorrent_primitives::info_hash::InfoHash; +use bittorrent_tracker_core::scrape_handler::ScrapeHandler; +use tracing::{instrument, Level}; +use zerocopy::network_endian::I32; + +use crate::packages::udp_tracker_core; +use crate::servers::udp::connection_cookie::check; +use crate::servers::udp::error::Error; +use crate::servers::udp::handlers::gen_remote_fingerprint; + +/// It handles the `Scrape` request. Refer to [`Scrape`](crate::servers::udp#scrape) +/// request for more information. +/// +/// # Errors +/// +/// This function does not ever return an error. +#[instrument(fields(transaction_id, connection_id), skip(scrape_handler, opt_udp_stats_event_sender), ret(level = Level::TRACE))] +pub async fn handle_scrape( + remote_addr: SocketAddr, + request: &ScrapeRequest, + scrape_handler: &Arc, + opt_udp_stats_event_sender: &Arc>>, + cookie_valid_range: Range, +) -> Result { + tracing::Span::current() + .record("transaction_id", request.transaction_id.0.to_string()) + .record("connection_id", request.connection_id.0.to_string()); + + tracing::trace!("handle scrape"); + + check( + &request.connection_id, + gen_remote_fingerprint(&remote_addr), + cookie_valid_range, + ) + .map_err(|e| (e, request.transaction_id))?; + + // Convert from aquatic infohashes + let mut info_hashes: Vec = vec![]; + for info_hash in &request.info_hashes { + info_hashes.push((*info_hash).into()); + } + + let scrape_data = scrape_handler.scrape(&info_hashes).await; + + let mut torrent_stats: Vec = Vec::new(); + + for file in &scrape_data.files { + let swarm_metadata = file.1; + + #[allow(clippy::cast_possible_truncation)] + let scrape_entry = { + TorrentScrapeStatistics { + seeders: NumberOfPeers(I32::new(i64::from(swarm_metadata.complete) as i32)), + completed: NumberOfDownloads(I32::new(i64::from(swarm_metadata.downloaded) as i32)), + leechers: NumberOfPeers(I32::new(i64::from(swarm_metadata.incomplete) as i32)), + } + }; + + torrent_stats.push(scrape_entry); + } + + if let Some(udp_stats_event_sender) = opt_udp_stats_event_sender.as_deref() { + match remote_addr { + SocketAddr::V4(_) => { + udp_stats_event_sender + .send_event(udp_tracker_core::statistics::event::Event::Udp4Scrape) + .await; + } + SocketAddr::V6(_) => { + udp_stats_event_sender + .send_event(udp_tracker_core::statistics::event::Event::Udp6Scrape) + .await; + } + } + } + + let response = ScrapeResponse { + transaction_id: request.transaction_id, + torrent_stats, + }; + + Ok(Response::from(response)) +} + +#[cfg(test)] +mod tests { + + mod scrape_request { + use std::net::SocketAddr; + use std::sync::Arc; + + use aquatic_udp_protocol::{ + InfoHash, NumberOfDownloads, NumberOfPeers, PeerId, Response, ScrapeRequest, ScrapeResponse, TorrentScrapeStatistics, + TransactionId, + }; + use bittorrent_tracker_core::scrape_handler::ScrapeHandler; + use bittorrent_tracker_core::torrent::repository::in_memory::InMemoryTorrentRepository; + + use crate::packages; + use crate::servers::udp::connection_cookie::make; + use crate::servers::udp::handlers::tests::{ + initialize_core_tracker_services_for_public_tracker, sample_cookie_valid_range, sample_ipv4_remote_addr, + sample_issue_time, TorrentPeerBuilder, + }; + use crate::servers::udp::handlers::{gen_remote_fingerprint, handle_scrape}; + + fn zeroed_torrent_statistics() -> TorrentScrapeStatistics { + TorrentScrapeStatistics { + seeders: NumberOfPeers(0.into()), + completed: NumberOfDownloads(0.into()), + leechers: NumberOfPeers(0.into()), + } + } + + #[tokio::test] + async fn should_return_no_stats_when_the_tracker_does_not_have_any_torrent() { + let (core_tracker_services, core_udp_tracker_services) = initialize_core_tracker_services_for_public_tracker(); + + let remote_addr = sample_ipv4_remote_addr(); + + let info_hash = InfoHash([0u8; 20]); + let info_hashes = vec![info_hash]; + + let request = ScrapeRequest { + connection_id: make(gen_remote_fingerprint(&remote_addr), sample_issue_time()).unwrap(), + transaction_id: TransactionId(0i32.into()), + info_hashes, + }; + + let response = handle_scrape( + remote_addr, + &request, + &core_tracker_services.scrape_handler, + &core_udp_tracker_services.udp_stats_event_sender, + sample_cookie_valid_range(), + ) + .await + .unwrap(); + + let expected_torrent_stats = vec![zeroed_torrent_statistics()]; + + assert_eq!( + response, + Response::from(ScrapeResponse { + transaction_id: request.transaction_id, + torrent_stats: expected_torrent_stats + }) + ); + } + + async fn add_a_seeder( + in_memory_torrent_repository: Arc, + remote_addr: &SocketAddr, + info_hash: &InfoHash, + ) { + let peer_id = PeerId([255u8; 20]); + + let peer = TorrentPeerBuilder::new() + .with_peer_id(peer_id) + .with_peer_address(*remote_addr) + .with_number_of_bytes_left(0) + .into(); + + let () = in_memory_torrent_repository.upsert_peer(&info_hash.0.into(), &peer); + } + + fn build_scrape_request(remote_addr: &SocketAddr, info_hash: &InfoHash) -> ScrapeRequest { + let info_hashes = vec![*info_hash]; + + ScrapeRequest { + connection_id: make(gen_remote_fingerprint(remote_addr), sample_issue_time()).unwrap(), + transaction_id: TransactionId::new(0i32), + info_hashes, + } + } + + async fn add_a_sample_seeder_and_scrape( + in_memory_torrent_repository: Arc, + scrape_handler: Arc, + ) -> Response { + let (udp_stats_event_sender, _udp_stats_repository) = packages::udp_tracker_core::statistics::setup::factory(false); + let udp_stats_event_sender = Arc::new(udp_stats_event_sender); + + let remote_addr = sample_ipv4_remote_addr(); + let info_hash = InfoHash([0u8; 20]); + + add_a_seeder(in_memory_torrent_repository.clone(), &remote_addr, &info_hash).await; + + let request = build_scrape_request(&remote_addr, &info_hash); + + handle_scrape( + remote_addr, + &request, + &scrape_handler, + &udp_stats_event_sender, + sample_cookie_valid_range(), + ) + .await + .unwrap() + } + + fn match_scrape_response(response: Response) -> Option { + match response { + Response::Scrape(scrape_response) => Some(scrape_response), + _ => None, + } + } + + mod with_a_public_tracker { + use aquatic_udp_protocol::{NumberOfDownloads, NumberOfPeers, TorrentScrapeStatistics}; + + use crate::servers::udp::handlers::scrape::tests::scrape_request::{ + add_a_sample_seeder_and_scrape, match_scrape_response, + }; + use crate::servers::udp::handlers::tests::initialize_core_tracker_services_for_public_tracker; + + #[tokio::test] + async fn should_return_torrent_statistics_when_the_tracker_has_the_requested_torrent() { + let (core_tracker_services, _core_udp_tracker_services) = initialize_core_tracker_services_for_public_tracker(); + + let torrent_stats = match_scrape_response( + add_a_sample_seeder_and_scrape( + core_tracker_services.in_memory_torrent_repository.clone(), + core_tracker_services.scrape_handler.clone(), + ) + .await, + ); + + let expected_torrent_stats = vec![TorrentScrapeStatistics { + seeders: NumberOfPeers(1.into()), + completed: NumberOfDownloads(0.into()), + leechers: NumberOfPeers(0.into()), + }]; + + assert_eq!(torrent_stats.unwrap().torrent_stats, expected_torrent_stats); + } + } + + mod with_a_whitelisted_tracker { + use aquatic_udp_protocol::{InfoHash, NumberOfDownloads, NumberOfPeers, TorrentScrapeStatistics}; + + use crate::servers::udp::handlers::handle_scrape; + use crate::servers::udp::handlers::scrape::tests::scrape_request::{ + add_a_seeder, build_scrape_request, match_scrape_response, zeroed_torrent_statistics, + }; + use crate::servers::udp::handlers::tests::{ + initialize_core_tracker_services_for_listed_tracker, sample_cookie_valid_range, sample_ipv4_remote_addr, + }; + + #[tokio::test] + async fn should_return_the_torrent_statistics_when_the_requested_torrent_is_whitelisted() { + let (core_tracker_services, core_udp_tracker_services) = initialize_core_tracker_services_for_listed_tracker(); + + let remote_addr = sample_ipv4_remote_addr(); + let info_hash = InfoHash([0u8; 20]); + + add_a_seeder( + core_tracker_services.in_memory_torrent_repository.clone(), + &remote_addr, + &info_hash, + ) + .await; + + core_tracker_services.in_memory_whitelist.add(&info_hash.0.into()).await; + + let request = build_scrape_request(&remote_addr, &info_hash); + + let torrent_stats = match_scrape_response( + handle_scrape( + remote_addr, + &request, + &core_tracker_services.scrape_handler, + &core_udp_tracker_services.udp_stats_event_sender, + sample_cookie_valid_range(), + ) + .await + .unwrap(), + ) + .unwrap(); + + let expected_torrent_stats = vec![TorrentScrapeStatistics { + seeders: NumberOfPeers(1.into()), + completed: NumberOfDownloads(0.into()), + leechers: NumberOfPeers(0.into()), + }]; + + assert_eq!(torrent_stats.torrent_stats, expected_torrent_stats); + } + + #[tokio::test] + async fn should_return_zeroed_statistics_when_the_requested_torrent_is_not_whitelisted() { + let (core_tracker_services, core_udp_tracker_services) = initialize_core_tracker_services_for_listed_tracker(); + + let remote_addr = sample_ipv4_remote_addr(); + let info_hash = InfoHash([0u8; 20]); + + add_a_seeder( + core_tracker_services.in_memory_torrent_repository.clone(), + &remote_addr, + &info_hash, + ) + .await; + + let request = build_scrape_request(&remote_addr, &info_hash); + + let torrent_stats = match_scrape_response( + handle_scrape( + remote_addr, + &request, + &core_tracker_services.scrape_handler, + &core_udp_tracker_services.udp_stats_event_sender, + sample_cookie_valid_range(), + ) + .await + .unwrap(), + ) + .unwrap(); + + let expected_torrent_stats = vec![zeroed_torrent_statistics()]; + + assert_eq!(torrent_stats.torrent_stats, expected_torrent_stats); + } + } + + fn sample_scrape_request(remote_addr: &SocketAddr) -> ScrapeRequest { + let info_hash = InfoHash([0u8; 20]); + let info_hashes = vec![info_hash]; + + ScrapeRequest { + connection_id: make(gen_remote_fingerprint(remote_addr), sample_issue_time()).unwrap(), + transaction_id: TransactionId(0i32.into()), + info_hashes, + } + } + + mod using_ipv4 { + use std::future; + use std::sync::Arc; + + use mockall::predicate::eq; + + use super::sample_scrape_request; + use crate::packages::udp_tracker_core; + use crate::servers::udp::handlers::handle_scrape; + use crate::servers::udp::handlers::tests::{ + initialize_core_tracker_services_for_default_tracker_configuration, sample_cookie_valid_range, + sample_ipv4_remote_addr, MockUdpStatsEventSender, + }; + + #[tokio::test] + async fn should_send_the_upd4_scrape_event() { + let mut udp_stats_event_sender_mock = MockUdpStatsEventSender::new(); + udp_stats_event_sender_mock + .expect_send_event() + .with(eq(udp_tracker_core::statistics::event::Event::Udp4Scrape)) + .times(1) + .returning(|_| Box::pin(future::ready(Some(Ok(()))))); + let udp_stats_event_sender: Arc>> = + Arc::new(Some(Box::new(udp_stats_event_sender_mock))); + + let remote_addr = sample_ipv4_remote_addr(); + + let (core_tracker_services, _core_udp_tracker_services) = + initialize_core_tracker_services_for_default_tracker_configuration(); + + handle_scrape( + remote_addr, + &sample_scrape_request(&remote_addr), + &core_tracker_services.scrape_handler, + &udp_stats_event_sender, + sample_cookie_valid_range(), + ) + .await + .unwrap(); + } + } + + mod using_ipv6 { + use std::future; + use std::sync::Arc; + + use mockall::predicate::eq; + + use super::sample_scrape_request; + use crate::packages::udp_tracker_core; + use crate::servers::udp::handlers::handle_scrape; + use crate::servers::udp::handlers::tests::{ + initialize_core_tracker_services_for_default_tracker_configuration, sample_cookie_valid_range, + sample_ipv6_remote_addr, MockUdpStatsEventSender, + }; + + #[tokio::test] + async fn should_send_the_upd6_scrape_event() { + let mut udp_stats_event_sender_mock = MockUdpStatsEventSender::new(); + udp_stats_event_sender_mock + .expect_send_event() + .with(eq(udp_tracker_core::statistics::event::Event::Udp6Scrape)) + .times(1) + .returning(|_| Box::pin(future::ready(Some(Ok(()))))); + let udp_stats_event_sender: Arc>> = + Arc::new(Some(Box::new(udp_stats_event_sender_mock))); + + let remote_addr = sample_ipv6_remote_addr(); + + let (core_tracker_services, _core_udp_tracker_services) = + initialize_core_tracker_services_for_default_tracker_configuration(); + + handle_scrape( + remote_addr, + &sample_scrape_request(&remote_addr), + &core_tracker_services.scrape_handler, + &udp_stats_event_sender, + sample_cookie_valid_range(), + ) + .await + .unwrap(); + } + } + } +} From 3c07b260313bb088682e8378e7f4393340c85fc5 Mon Sep 17 00:00:00 2001 From: Jose Celano Date: Fri, 14 Feb 2025 13:44:19 +0000 Subject: [PATCH 255/802] refactor: [#1268] extract servers::udp::services::announce service --- src/servers/udp/handlers/announce.rs | 26 ++++++--------- src/servers/udp/mod.rs | 1 + src/servers/udp/services/announce.rs | 48 ++++++++++++++++++++++++++++ src/servers/udp/services/mod.rs | 2 ++ src/servers/udp/services/scrape.rs | 0 5 files changed, 60 insertions(+), 17 deletions(-) create mode 100644 src/servers/udp/services/announce.rs create mode 100644 src/servers/udp/services/mod.rs create mode 100644 src/servers/udp/services/scrape.rs diff --git a/src/servers/udp/handlers/announce.rs b/src/servers/udp/handlers/announce.rs index 79fb91f49..ecc4ba88f 100644 --- a/src/servers/udp/handlers/announce.rs +++ b/src/servers/udp/handlers/announce.rs @@ -18,7 +18,7 @@ use crate::packages::udp_tracker_core; use crate::servers::udp::connection_cookie::check; use crate::servers::udp::error::Error; use crate::servers::udp::handlers::gen_remote_fingerprint; -use crate::servers::udp::peer_builder; +use crate::servers::udp::{peer_builder, services}; /// It handles the `Announce` request. Refer to [`Announce`](crate::servers::udp#announce) /// request for more information. @@ -66,22 +66,14 @@ pub async fn handle_announce( let mut peer = peer_builder::from_request(request, &remote_client_ip); let peers_wanted: PeersWanted = i32::from(request.peers_wanted.0).into(); - let response = announce_handler.announce(&info_hash, &mut peer, &remote_client_ip, &peers_wanted); - - if let Some(udp_stats_event_sender) = opt_udp_stats_event_sender.as_deref() { - match remote_client_ip { - IpAddr::V4(_) => { - udp_stats_event_sender - .send_event(udp_tracker_core::statistics::event::Event::Udp4Announce) - .await; - } - IpAddr::V6(_) => { - udp_stats_event_sender - .send_event(udp_tracker_core::statistics::event::Event::Udp6Announce) - .await; - } - } - } + let response = services::announce::invoke( + announce_handler.clone(), + opt_udp_stats_event_sender.clone(), + info_hash, + &mut peer, + &peers_wanted, + ) + .await; #[allow(clippy::cast_possible_truncation)] if remote_addr.is_ipv4() { diff --git a/src/servers/udp/mod.rs b/src/servers/udp/mod.rs index b141cc322..604fee8fe 100644 --- a/src/servers/udp/mod.rs +++ b/src/servers/udp/mod.rs @@ -642,6 +642,7 @@ pub mod error; pub mod handlers; pub mod peer_builder; pub mod server; +pub mod services; pub const UDP_TRACKER_LOG_TARGET: &str = "UDP TRACKER"; diff --git a/src/servers/udp/services/announce.rs b/src/servers/udp/services/announce.rs new file mode 100644 index 000000000..317b1afef --- /dev/null +++ b/src/servers/udp/services/announce.rs @@ -0,0 +1,48 @@ +//! The `announce` service. +//! +//! The service is responsible for handling the `announce` requests. +//! +//! It delegates the `announce` logic to the [`AnnounceHandler`] and it returns +//! the [`AnnounceData`]. +//! +//! It also sends an [`http_tracker_core::statistics::event::Event`] +//! because events are specific for the HTTP tracker. +use std::net::IpAddr; +use std::sync::Arc; + +use bittorrent_primitives::info_hash::InfoHash; +use bittorrent_tracker_core::announce_handler::{AnnounceHandler, PeersWanted}; +use torrust_tracker_primitives::core::AnnounceData; +use torrust_tracker_primitives::peer; + +use crate::packages::udp_tracker_core; + +pub async fn invoke( + announce_handler: Arc, + opt_udp_stats_event_sender: Arc>>, + info_hash: InfoHash, + peer: &mut peer::Peer, + peers_wanted: &PeersWanted, +) -> AnnounceData { + let original_peer_ip = peer.peer_addr.ip(); + + // The tracker could change the original peer ip + let announce_data = announce_handler.announce(&info_hash, peer, &original_peer_ip, peers_wanted); + + if let Some(udp_stats_event_sender) = opt_udp_stats_event_sender.as_deref() { + match original_peer_ip { + IpAddr::V4(_) => { + udp_stats_event_sender + .send_event(udp_tracker_core::statistics::event::Event::Udp4Announce) + .await; + } + IpAddr::V6(_) => { + udp_stats_event_sender + .send_event(udp_tracker_core::statistics::event::Event::Udp6Announce) + .await; + } + } + } + + announce_data +} diff --git a/src/servers/udp/services/mod.rs b/src/servers/udp/services/mod.rs new file mode 100644 index 000000000..776d2dfbf --- /dev/null +++ b/src/servers/udp/services/mod.rs @@ -0,0 +1,2 @@ +pub mod announce; +pub mod scrape; diff --git a/src/servers/udp/services/scrape.rs b/src/servers/udp/services/scrape.rs new file mode 100644 index 000000000..e69de29bb From dec742e2326a9e9861a1b26907c1cbfe0c127838 Mon Sep 17 00:00:00 2001 From: Jose Celano Date: Fri, 14 Feb 2025 13:52:46 +0000 Subject: [PATCH 256/802] refactor: [#1268] extract servers::udp::services::scrape service --- src/servers/udp/handlers/scrape.rs | 18 ++---------- src/servers/udp/services/announce.rs | 2 +- src/servers/udp/services/scrape.rs | 43 ++++++++++++++++++++++++++++ 3 files changed, 46 insertions(+), 17 deletions(-) diff --git a/src/servers/udp/handlers/scrape.rs b/src/servers/udp/handlers/scrape.rs index 2c8ca335a..d68ca07dd 100644 --- a/src/servers/udp/handlers/scrape.rs +++ b/src/servers/udp/handlers/scrape.rs @@ -15,6 +15,7 @@ use crate::packages::udp_tracker_core; use crate::servers::udp::connection_cookie::check; use crate::servers::udp::error::Error; use crate::servers::udp::handlers::gen_remote_fingerprint; +use crate::servers::udp::services; /// It handles the `Scrape` request. Refer to [`Scrape`](crate::servers::udp#scrape) /// request for more information. @@ -49,7 +50,7 @@ pub async fn handle_scrape( info_hashes.push((*info_hash).into()); } - let scrape_data = scrape_handler.scrape(&info_hashes).await; + let scrape_data = services::scrape::invoke(scrape_handler, opt_udp_stats_event_sender, &info_hashes, remote_addr).await; let mut torrent_stats: Vec = Vec::new(); @@ -68,21 +69,6 @@ pub async fn handle_scrape( torrent_stats.push(scrape_entry); } - if let Some(udp_stats_event_sender) = opt_udp_stats_event_sender.as_deref() { - match remote_addr { - SocketAddr::V4(_) => { - udp_stats_event_sender - .send_event(udp_tracker_core::statistics::event::Event::Udp4Scrape) - .await; - } - SocketAddr::V6(_) => { - udp_stats_event_sender - .send_event(udp_tracker_core::statistics::event::Event::Udp6Scrape) - .await; - } - } - } - let response = ScrapeResponse { transaction_id: request.transaction_id, torrent_stats, diff --git a/src/servers/udp/services/announce.rs b/src/servers/udp/services/announce.rs index 317b1afef..8a046a625 100644 --- a/src/servers/udp/services/announce.rs +++ b/src/servers/udp/services/announce.rs @@ -5,7 +5,7 @@ //! It delegates the `announce` logic to the [`AnnounceHandler`] and it returns //! the [`AnnounceData`]. //! -//! It also sends an [`http_tracker_core::statistics::event::Event`] +//! It also sends an [`udp_tracker_core::statistics::event::Event`] //! because events are specific for the HTTP tracker. use std::net::IpAddr; use std::sync::Arc; diff --git a/src/servers/udp/services/scrape.rs b/src/servers/udp/services/scrape.rs index e69de29bb..7d4897564 100644 --- a/src/servers/udp/services/scrape.rs +++ b/src/servers/udp/services/scrape.rs @@ -0,0 +1,43 @@ +//! The `scrape` service. +//! +//! The service is responsible for handling the `scrape` requests. +//! +//! It delegates the `scrape` logic to the [`ScrapeHandler`] and it returns the +//! [`ScrapeData`]. +//! +//! It also sends an [`udp_tracker_core::statistics::event::Event`] +//! because events are specific for the UDP tracker. +use std::net::SocketAddr; +use std::sync::Arc; + +use bittorrent_primitives::info_hash::InfoHash; +use bittorrent_tracker_core::scrape_handler::ScrapeHandler; +use torrust_tracker_primitives::core::ScrapeData; + +use crate::packages::udp_tracker_core; + +pub async fn invoke( + scrape_handler: &Arc, + opt_udp_stats_event_sender: &Arc>>, + info_hashes: &Vec, + remote_addr: SocketAddr, +) -> ScrapeData { + let scrape_data = scrape_handler.scrape(info_hashes).await; + + if let Some(udp_stats_event_sender) = opt_udp_stats_event_sender.as_deref() { + match remote_addr { + SocketAddr::V4(_) => { + udp_stats_event_sender + .send_event(udp_tracker_core::statistics::event::Event::Udp4Scrape) + .await; + } + SocketAddr::V6(_) => { + udp_stats_event_sender + .send_event(udp_tracker_core::statistics::event::Event::Udp6Scrape) + .await; + } + } + } + + scrape_data +} From 73753e31f2626ff694bb0ea8994cce2877e2a637 Mon Sep 17 00:00:00 2001 From: Jose Celano Date: Fri, 14 Feb 2025 15:41:25 +0000 Subject: [PATCH 257/802] [#1268] move http services to http_tracker_core package --- src/packages/http_tracker_core/mod.rs | 1 + .../http_tracker_core}/services/announce.rs | 6 +++--- .../http_tracker_core}/services/mod.rs | 0 .../http_tracker_core}/services/scrape.rs | 16 ++++++++-------- src/servers/http/v1/handlers/announce.rs | 2 +- src/servers/http/v1/handlers/scrape.rs | 2 +- src/servers/http/v1/mod.rs | 1 - 7 files changed, 14 insertions(+), 14 deletions(-) rename src/{servers/http/v1 => packages/http_tracker_core}/services/announce.rs (98%) rename src/{servers/http/v1 => packages/http_tracker_core}/services/mod.rs (100%) rename src/{servers/http/v1 => packages/http_tracker_core}/services/scrape.rs (97%) diff --git a/src/packages/http_tracker_core/mod.rs b/src/packages/http_tracker_core/mod.rs index 3449ec7b4..4f3e54857 100644 --- a/src/packages/http_tracker_core/mod.rs +++ b/src/packages/http_tracker_core/mod.rs @@ -1 +1,2 @@ +pub mod services; pub mod statistics; diff --git a/src/servers/http/v1/services/announce.rs b/src/packages/http_tracker_core/services/announce.rs similarity index 98% rename from src/servers/http/v1/services/announce.rs rename to src/packages/http_tracker_core/services/announce.rs index e321ad01f..67b5997b3 100644 --- a/src/servers/http/v1/services/announce.rs +++ b/src/packages/http_tracker_core/services/announce.rs @@ -164,11 +164,11 @@ mod tests { use super::{sample_peer_using_ipv4, sample_peer_using_ipv6}; use crate::packages::http_tracker_core; - use crate::servers::http::test_helpers::tests::sample_info_hash; - use crate::servers::http::v1::services::announce::invoke; - use crate::servers::http::v1::services::announce::tests::{ + use crate::packages::http_tracker_core::services::announce::invoke; + use crate::packages::http_tracker_core::services::announce::tests::{ initialize_core_tracker_services, sample_peer, MockHttpStatsEventSender, }; + use crate::servers::http::test_helpers::tests::sample_info_hash; fn initialize_announce_handler() -> Arc { let config = configuration::ephemeral(); diff --git a/src/servers/http/v1/services/mod.rs b/src/packages/http_tracker_core/services/mod.rs similarity index 100% rename from src/servers/http/v1/services/mod.rs rename to src/packages/http_tracker_core/services/mod.rs diff --git a/src/servers/http/v1/services/scrape.rs b/src/packages/http_tracker_core/services/scrape.rs similarity index 97% rename from src/servers/http/v1/services/scrape.rs rename to src/packages/http_tracker_core/services/scrape.rs index e2eb4f87c..8ce83212e 100644 --- a/src/servers/http/v1/services/scrape.rs +++ b/src/packages/http_tracker_core/services/scrape.rs @@ -161,13 +161,13 @@ mod tests { use torrust_tracker_primitives::core::ScrapeData; use torrust_tracker_primitives::swarm_metadata::SwarmMetadata; - use crate::packages::{self, http_tracker_core}; - use crate::servers::http::test_helpers::tests::sample_info_hash; - use crate::servers::http::v1::services::scrape::invoke; - use crate::servers::http::v1::services::scrape::tests::{ + use crate::packages::http_tracker_core::services::scrape::invoke; + use crate::packages::http_tracker_core::services::scrape::tests::{ initialize_announce_and_scrape_handlers_for_public_tracker, initialize_scrape_handler, sample_info_hashes, sample_peer, MockHttpStatsEventSender, }; + use crate::packages::{self, http_tracker_core}; + use crate::servers::http::test_helpers::tests::sample_info_hash; #[tokio::test] async fn it_should_return_the_scrape_data_for_a_torrent() { @@ -247,12 +247,12 @@ mod tests { use mockall::predicate::eq; use torrust_tracker_primitives::core::ScrapeData; - use crate::packages::{self, http_tracker_core}; - use crate::servers::http::test_helpers::tests::sample_info_hash; - use crate::servers::http::v1::services::scrape::fake; - use crate::servers::http::v1::services::scrape::tests::{ + use crate::packages::http_tracker_core::services::scrape::fake; + use crate::packages::http_tracker_core::services::scrape::tests::{ initialize_announce_and_scrape_handlers_for_public_tracker, sample_info_hashes, sample_peer, MockHttpStatsEventSender, }; + use crate::packages::{self, http_tracker_core}; + use crate::servers::http::test_helpers::tests::sample_info_hash; #[tokio::test] async fn it_should_always_return_the_zeroed_scrape_data_for_a_torrent() { diff --git a/src/servers/http/v1/handlers/announce.rs b/src/servers/http/v1/handlers/announce.rs index 64939ff48..ffc6a7b0a 100644 --- a/src/servers/http/v1/handlers/announce.rs +++ b/src/servers/http/v1/handlers/announce.rs @@ -28,11 +28,11 @@ use torrust_tracker_primitives::peer; use super::common::auth::map_auth_error_to_error_response; use crate::packages::http_tracker_core; +use crate::packages::http_tracker_core::services::{self}; use crate::servers::http::v1::extractors::announce_request::ExtractRequest; use crate::servers::http::v1::extractors::authentication_key::Extract as ExtractKey; use crate::servers::http::v1::extractors::client_ip_sources::Extract as ExtractClientIpSources; use crate::servers::http::v1::handlers::common::auth; -use crate::servers::http::v1::services::{self}; use crate::CurrentClock; /// It handles the `announce` request when the HTTP tracker does not require diff --git a/src/servers/http/v1/handlers/scrape.rs b/src/servers/http/v1/handlers/scrape.rs index 09af385fb..d2f4f9e0f 100644 --- a/src/servers/http/v1/handlers/scrape.rs +++ b/src/servers/http/v1/handlers/scrape.rs @@ -20,10 +20,10 @@ use torrust_tracker_configuration::Core; use torrust_tracker_primitives::core::ScrapeData; use crate::packages::http_tracker_core; +use crate::packages::http_tracker_core::services; use crate::servers::http::v1::extractors::authentication_key::Extract as ExtractKey; use crate::servers::http::v1::extractors::client_ip_sources::Extract as ExtractClientIpSources; use crate::servers::http::v1::extractors::scrape_request::ExtractRequest; -use crate::servers::http::v1::services; /// It handles the `scrape` request when the HTTP tracker is configured /// to run in `public` mode. diff --git a/src/servers/http/v1/mod.rs b/src/servers/http/v1/mod.rs index 48dac5663..6e9530cb0 100644 --- a/src/servers/http/v1/mod.rs +++ b/src/servers/http/v1/mod.rs @@ -5,4 +5,3 @@ pub mod extractors; pub mod handlers; pub mod routes; -pub mod services; From e48aaf51db7d7b84899c66149ecde2c6facb0615 Mon Sep 17 00:00:00 2001 From: Jose Celano Date: Fri, 14 Feb 2025 15:47:28 +0000 Subject: [PATCH 258/802] [#1268] move udp services to udp_tracker_core package --- src/packages/udp_tracker_core/mod.rs | 1 + .../udp => packages/udp_tracker_core}/services/announce.rs | 0 .../udp => packages/udp_tracker_core}/services/mod.rs | 0 .../udp => packages/udp_tracker_core}/services/scrape.rs | 0 src/servers/udp/handlers/announce.rs | 4 ++-- src/servers/udp/handlers/scrape.rs | 2 +- src/servers/udp/mod.rs | 1 - 7 files changed, 4 insertions(+), 4 deletions(-) rename src/{servers/udp => packages/udp_tracker_core}/services/announce.rs (100%) rename src/{servers/udp => packages/udp_tracker_core}/services/mod.rs (100%) rename src/{servers/udp => packages/udp_tracker_core}/services/scrape.rs (100%) diff --git a/src/packages/udp_tracker_core/mod.rs b/src/packages/udp_tracker_core/mod.rs index 3449ec7b4..4f3e54857 100644 --- a/src/packages/udp_tracker_core/mod.rs +++ b/src/packages/udp_tracker_core/mod.rs @@ -1 +1,2 @@ +pub mod services; pub mod statistics; diff --git a/src/servers/udp/services/announce.rs b/src/packages/udp_tracker_core/services/announce.rs similarity index 100% rename from src/servers/udp/services/announce.rs rename to src/packages/udp_tracker_core/services/announce.rs diff --git a/src/servers/udp/services/mod.rs b/src/packages/udp_tracker_core/services/mod.rs similarity index 100% rename from src/servers/udp/services/mod.rs rename to src/packages/udp_tracker_core/services/mod.rs diff --git a/src/servers/udp/services/scrape.rs b/src/packages/udp_tracker_core/services/scrape.rs similarity index 100% rename from src/servers/udp/services/scrape.rs rename to src/packages/udp_tracker_core/services/scrape.rs diff --git a/src/servers/udp/handlers/announce.rs b/src/servers/udp/handlers/announce.rs index ecc4ba88f..26a1a2116 100644 --- a/src/servers/udp/handlers/announce.rs +++ b/src/servers/udp/handlers/announce.rs @@ -14,11 +14,11 @@ use torrust_tracker_configuration::Core; use tracing::{instrument, Level}; use zerocopy::network_endian::I32; -use crate::packages::udp_tracker_core; +use crate::packages::udp_tracker_core::{self, services}; use crate::servers::udp::connection_cookie::check; use crate::servers::udp::error::Error; use crate::servers::udp::handlers::gen_remote_fingerprint; -use crate::servers::udp::{peer_builder, services}; +use crate::servers::udp::peer_builder; /// It handles the `Announce` request. Refer to [`Announce`](crate::servers::udp#announce) /// request for more information. diff --git a/src/servers/udp/handlers/scrape.rs b/src/servers/udp/handlers/scrape.rs index d68ca07dd..3b5ccf50d 100644 --- a/src/servers/udp/handlers/scrape.rs +++ b/src/servers/udp/handlers/scrape.rs @@ -12,10 +12,10 @@ use tracing::{instrument, Level}; use zerocopy::network_endian::I32; use crate::packages::udp_tracker_core; +use crate::packages::udp_tracker_core::services; use crate::servers::udp::connection_cookie::check; use crate::servers::udp::error::Error; use crate::servers::udp::handlers::gen_remote_fingerprint; -use crate::servers::udp::services; /// It handles the `Scrape` request. Refer to [`Scrape`](crate::servers::udp#scrape) /// request for more information. diff --git a/src/servers/udp/mod.rs b/src/servers/udp/mod.rs index 604fee8fe..b141cc322 100644 --- a/src/servers/udp/mod.rs +++ b/src/servers/udp/mod.rs @@ -642,7 +642,6 @@ pub mod error; pub mod handlers; pub mod peer_builder; pub mod server; -pub mod services; pub const UDP_TRACKER_LOG_TARGET: &str = "UDP TRACKER"; From 74815abeb78198e0cc234e47a4af0633247232cf Mon Sep 17 00:00:00 2001 From: Jose Celano Date: Fri, 14 Feb 2025 16:31:15 +0000 Subject: [PATCH 259/802] refactor: [#1268] move announce logic from axum to http_tracker_core package --- Cargo.lock | 1 + packages/http-protocol/Cargo.toml | 1 + packages/http-protocol/src/lib.rs | 13 ++++ .../http-protocol/src/v1/requests/announce.rs | 33 ++++++++- .../http_tracker_core/services/announce.rs | 53 ++++++++++++++ src/servers/http/v1/handlers/announce.rs | 69 ++++--------------- 6 files changed, 113 insertions(+), 57 deletions(-) diff --git a/Cargo.lock b/Cargo.lock index 2f99db113..408471efc 100644 --- a/Cargo.lock +++ b/Cargo.lock @@ -560,6 +560,7 @@ dependencies = [ "serde", "serde_bencode", "thiserror 2.0.11", + "torrust-tracker-clock", "torrust-tracker-configuration", "torrust-tracker-contrib-bencode", "torrust-tracker-located-error", diff --git a/packages/http-protocol/Cargo.toml b/packages/http-protocol/Cargo.toml index 2d0cabf51..e76094c1a 100644 --- a/packages/http-protocol/Cargo.toml +++ b/packages/http-protocol/Cargo.toml @@ -24,6 +24,7 @@ percent-encoding = "2" serde = { version = "1", features = ["derive"] } serde_bencode = "0" thiserror = "2" +torrust-tracker-clock = { version = "3.0.0-develop", path = "../clock" } torrust-tracker-configuration = { version = "3.0.0-develop", path = "../configuration" } torrust-tracker-contrib-bencode = { version = "3.0.0-develop", path = "../../contrib/bencode" } torrust-tracker-located-error = { version = "3.0.0-develop", path = "../located-error" } diff --git a/packages/http-protocol/src/lib.rs b/packages/http-protocol/src/lib.rs index 6525a6dca..326a5b182 100644 --- a/packages/http-protocol/src/lib.rs +++ b/packages/http-protocol/src/lib.rs @@ -1,3 +1,16 @@ //! Primitive types and function for `BitTorrent` HTTP trackers. pub mod percent_encoding; pub mod v1; + +use torrust_tracker_clock::clock; + +/// This code needs to be copied into each crate. +/// Working version, for production. +#[cfg(not(test))] +#[allow(dead_code)] +pub(crate) type CurrentClock = clock::Working; + +/// Stopped version, for testing. +#[cfg(test)] +#[allow(dead_code)] +pub(crate) type CurrentClock = clock::Stopped; diff --git a/packages/http-protocol/src/v1/requests/announce.rs b/packages/http-protocol/src/v1/requests/announce.rs index 9bde7ec13..f293b9cf5 100644 --- a/packages/http-protocol/src/v1/requests/announce.rs +++ b/packages/http-protocol/src/v1/requests/announce.rs @@ -2,18 +2,21 @@ //! //! Data structures and logic for parsing the `announce` request. use std::fmt; +use std::net::{IpAddr, SocketAddr}; use std::panic::Location; use std::str::FromStr; -use aquatic_udp_protocol::{NumberOfBytes, PeerId}; +use aquatic_udp_protocol::{AnnounceEvent, NumberOfBytes, PeerId}; use bittorrent_primitives::info_hash::{self, InfoHash}; use thiserror::Error; +use torrust_tracker_clock::clock::Time; use torrust_tracker_located_error::{Located, LocatedError}; use torrust_tracker_primitives::peer; use crate::percent_encoding::{percent_decode_info_hash, percent_decode_peer_id}; use crate::v1::query::{ParseQueryError, Query}; use crate::v1::responses; +use crate::CurrentClock; // Query param names const INFO_HASH: &str = "info_hash"; @@ -373,6 +376,34 @@ fn extract_numwant(query: &Query) -> Result, ParseAnnounceQueryError } } +/// It builds a `Peer` from the announce request. +/// +/// It ignores the peer address in the announce request params. +#[must_use] +pub fn peer_from_request(announce_request: &Announce, peer_ip: &IpAddr) -> peer::Peer { + peer::Peer { + peer_id: announce_request.peer_id, + peer_addr: SocketAddr::new(*peer_ip, announce_request.port), + updated: CurrentClock::now(), + uploaded: announce_request.uploaded.unwrap_or(NumberOfBytes::new(0)), + downloaded: announce_request.downloaded.unwrap_or(NumberOfBytes::new(0)), + left: announce_request.left.unwrap_or(NumberOfBytes::new(0)), + event: map_to_torrust_event(&announce_request.event), + } +} + +#[must_use] +pub fn map_to_torrust_event(event: &Option) -> AnnounceEvent { + match event { + Some(event) => match &event { + Event::Started => AnnounceEvent::Started, + Event::Stopped => AnnounceEvent::Stopped, + Event::Completed => AnnounceEvent::Completed, + }, + None => AnnounceEvent::None, + } +} + #[cfg(test)] mod tests { diff --git a/src/packages/http_tracker_core/services/announce.rs b/src/packages/http_tracker_core/services/announce.rs index 67b5997b3..049d0d228 100644 --- a/src/packages/http_tracker_core/services/announce.rs +++ b/src/packages/http_tracker_core/services/announce.rs @@ -10,8 +10,14 @@ use std::net::IpAddr; use std::sync::Arc; +use bittorrent_http_protocol::v1::requests::announce::{peer_from_request, Announce}; +use bittorrent_http_protocol::v1::responses; +use bittorrent_http_protocol::v1::services::peer_ip_resolver::{self, ClientIpSources}; use bittorrent_primitives::info_hash::InfoHash; use bittorrent_tracker_core::announce_handler::{AnnounceHandler, PeersWanted}; +use bittorrent_tracker_core::authentication::service::AuthenticationService; +use bittorrent_tracker_core::whitelist; +use torrust_tracker_configuration::Core; use torrust_tracker_primitives::core::AnnounceData; use torrust_tracker_primitives::peer; @@ -27,6 +33,53 @@ use crate::packages::http_tracker_core; /// > **NOTICE**: as the HTTP tracker does not requires a connection request /// > like the UDP tracker, the number of TCP connections is incremented for /// > each `announce` request. +/// +/// # Errors +/// +/// This function will return an error if: +/// +/// - The tracker is running in `listed` mode and the torrent is not whitelisted. +/// - There is an error when resolving the client IP address. +#[allow(clippy::too_many_arguments)] +pub async fn handle_announce( + core_config: &Arc, + announce_handler: &Arc, + _authentication_service: &Arc, + whitelist_authorization: &Arc, + opt_http_stats_event_sender: &Arc>>, + announce_request: &Announce, + client_ip_sources: &ClientIpSources, +) -> Result { + // Authorization + match whitelist_authorization.authorize(&announce_request.info_hash).await { + Ok(()) => (), + Err(error) => return Err(responses::error::Error::from(error)), + } + + let peer_ip = match peer_ip_resolver::invoke(core_config.net.on_reverse_proxy, client_ip_sources) { + Ok(peer_ip) => peer_ip, + Err(error) => return Err(responses::error::Error::from(error)), + }; + + let mut peer = peer_from_request(announce_request, &peer_ip); + + let peers_wanted = match announce_request.numwant { + Some(numwant) => PeersWanted::only(numwant), + None => PeersWanted::AsManyAsPossible, + }; + + let announce_data = invoke( + announce_handler.clone(), + opt_http_stats_event_sender.clone(), + announce_request.info_hash, + &mut peer, + &peers_wanted, + ) + .await; + + Ok(announce_data) +} + pub async fn invoke( announce_handler: Arc, opt_http_stats_event_sender: Arc>>, diff --git a/src/servers/http/v1/handlers/announce.rs b/src/servers/http/v1/handlers/announce.rs index ffc6a7b0a..977e7dd6a 100644 --- a/src/servers/http/v1/handlers/announce.rs +++ b/src/servers/http/v1/handlers/announce.rs @@ -5,35 +5,29 @@ //! //! The handlers perform the authentication and authorization of the request, //! and resolve the client IP address. -use std::net::{IpAddr, SocketAddr}; use std::panic::Location; use std::sync::Arc; -use aquatic_udp_protocol::{AnnounceEvent, NumberOfBytes}; +use aquatic_udp_protocol::AnnounceEvent; use axum::extract::State; use axum::response::{IntoResponse, Response}; use bittorrent_http_protocol::v1::requests::announce::{Announce, Compact, Event}; use bittorrent_http_protocol::v1::responses::{self}; -use bittorrent_http_protocol::v1::services::peer_ip_resolver; use bittorrent_http_protocol::v1::services::peer_ip_resolver::ClientIpSources; -use bittorrent_tracker_core::announce_handler::{AnnounceHandler, PeersWanted}; +use bittorrent_tracker_core::announce_handler::AnnounceHandler; use bittorrent_tracker_core::authentication::service::AuthenticationService; use bittorrent_tracker_core::authentication::Key; use bittorrent_tracker_core::whitelist; use hyper::StatusCode; -use torrust_tracker_clock::clock::Time; use torrust_tracker_configuration::Core; use torrust_tracker_primitives::core::AnnounceData; -use torrust_tracker_primitives::peer; use super::common::auth::map_auth_error_to_error_response; use crate::packages::http_tracker_core; -use crate::packages::http_tracker_core::services::{self}; use crate::servers::http::v1::extractors::announce_request::ExtractRequest; use crate::servers::http::v1::extractors::authentication_key::Extract as ExtractKey; use crate::servers::http::v1::extractors::client_ip_sources::Extract as ExtractClientIpSources; use crate::servers::http::v1::handlers::common::auth; -use crate::CurrentClock; /// It handles the `announce` request when the HTTP tracker does not require /// authentication (no PATH `key` parameter required). @@ -129,12 +123,6 @@ async fn handle( build_response(announce_request, announce_data) } -/* code-review: authentication, authorization and peer IP resolution could be moved - from the handler (Axum) layer into the app layer `services::announce::invoke`. - That would make the handler even simpler and the code more reusable and decoupled from Axum. - See https://github.com/torrust/torrust-tracker/discussions/240. -*/ - #[allow(clippy::too_many_arguments)] async fn handle_announce( core_config: &Arc, @@ -146,6 +134,8 @@ async fn handle_announce( client_ip_sources: &ClientIpSources, maybe_key: Option, ) -> Result { + // todo: move authentication inside `http_tracker_core::services::announce::handle_announce` + // Authentication if core_config.private { match maybe_key { @@ -161,33 +151,16 @@ async fn handle_announce( } } - // Authorization - match whitelist_authorization.authorize(&announce_request.info_hash).await { - Ok(()) => (), - Err(error) => return Err(responses::error::Error::from(error)), - } - - let peer_ip = match peer_ip_resolver::invoke(core_config.net.on_reverse_proxy, client_ip_sources) { - Ok(peer_ip) => peer_ip, - Err(error) => return Err(responses::error::Error::from(error)), - }; - - let mut peer = peer_from_request(announce_request, &peer_ip); - let peers_wanted = match announce_request.numwant { - Some(numwant) => PeersWanted::only(numwant), - None => PeersWanted::AsManyAsPossible, - }; - - let announce_data = services::announce::invoke( - announce_handler.clone(), - opt_http_stats_event_sender.clone(), - announce_request.info_hash, - &mut peer, - &peers_wanted, + http_tracker_core::services::announce::handle_announce( + &core_config.clone(), + &announce_handler.clone(), + &authentication_service.clone(), + &whitelist_authorization.clone(), + &opt_http_stats_event_sender.clone(), + announce_request, + client_ip_sources, ) - .await; - - Ok(announce_data) + .await } fn build_response(announce_request: &Announce, announce_data: AnnounceData) -> Response { @@ -202,22 +175,6 @@ fn build_response(announce_request: &Announce, announce_data: AnnounceData) -> R } } -/// It builds a `Peer` from the announce request. -/// -/// It ignores the peer address in the announce request params. -#[must_use] -fn peer_from_request(announce_request: &Announce, peer_ip: &IpAddr) -> peer::Peer { - peer::Peer { - peer_id: announce_request.peer_id, - peer_addr: SocketAddr::new(*peer_ip, announce_request.port), - updated: CurrentClock::now(), - uploaded: announce_request.uploaded.unwrap_or(NumberOfBytes::new(0)), - downloaded: announce_request.downloaded.unwrap_or(NumberOfBytes::new(0)), - left: announce_request.left.unwrap_or(NumberOfBytes::new(0)), - event: map_to_torrust_event(&announce_request.event), - } -} - #[must_use] pub fn map_to_aquatic_event(event: &Option) -> aquatic_udp_protocol::AnnounceEvent { match event { From 37a142efcea0d1c85a7a16ec67d0847414855171 Mon Sep 17 00:00:00 2001 From: Jose Celano Date: Fri, 14 Feb 2025 16:45:35 +0000 Subject: [PATCH 260/802] refactor: [#1268] move scrape logic from axum to http_tracker_core package --- .../http_tracker_core/services/scrape.rs | 42 +++++++++++++++++++ src/servers/http/v1/handlers/scrape.rs | 40 ++++++------------ 2 files changed, 55 insertions(+), 27 deletions(-) diff --git a/src/packages/http_tracker_core/services/scrape.rs b/src/packages/http_tracker_core/services/scrape.rs index 8ce83212e..62f5fdf62 100644 --- a/src/packages/http_tracker_core/services/scrape.rs +++ b/src/packages/http_tracker_core/services/scrape.rs @@ -10,8 +10,13 @@ use std::net::IpAddr; use std::sync::Arc; +use bittorrent_http_protocol::v1::requests::scrape::Scrape; +use bittorrent_http_protocol::v1::responses; +use bittorrent_http_protocol::v1::services::peer_ip_resolver::{self, ClientIpSources}; use bittorrent_primitives::info_hash::InfoHash; +use bittorrent_tracker_core::authentication::service::AuthenticationService; use bittorrent_tracker_core::scrape_handler::ScrapeHandler; +use torrust_tracker_configuration::Core; use torrust_tracker_primitives::core::ScrapeData; use crate::packages::http_tracker_core; @@ -26,6 +31,43 @@ use crate::packages::http_tracker_core; /// > **NOTICE**: as the HTTP tracker does not requires a connection request /// > like the UDP tracker, the number of TCP connections is incremented for /// > each `scrape` request. +/// +/// # Errors +/// +/// This function will return an error if: +/// +/// - There is an error when resolving the client IP address. +#[allow(clippy::too_many_arguments)] +pub async fn handle_scrape( + core_config: &Arc, + scrape_handler: &Arc, + _authentication_service: &Arc, + opt_http_stats_event_sender: &Arc>>, + scrape_request: &Scrape, + client_ip_sources: &ClientIpSources, + return_real_scrape_data: bool, +) -> Result { + // Authorization for scrape requests is handled at the `http_tracker_core` + // level for each torrent. + + let peer_ip = match peer_ip_resolver::invoke(core_config.net.on_reverse_proxy, client_ip_sources) { + Ok(peer_ip) => peer_ip, + Err(error) => return Err(responses::error::Error::from(error)), + }; + + if return_real_scrape_data { + Ok(invoke( + scrape_handler, + opt_http_stats_event_sender, + &scrape_request.info_hashes, + &peer_ip, + ) + .await) + } else { + Ok(http_tracker_core::services::scrape::fake(opt_http_stats_event_sender, &scrape_request.info_hashes, &peer_ip).await) + } +} + pub async fn invoke( scrape_handler: &Arc, opt_http_stats_event_sender: &Arc>>, diff --git a/src/servers/http/v1/handlers/scrape.rs b/src/servers/http/v1/handlers/scrape.rs index d2f4f9e0f..39bebe18e 100644 --- a/src/servers/http/v1/handlers/scrape.rs +++ b/src/servers/http/v1/handlers/scrape.rs @@ -11,7 +11,7 @@ use axum::extract::State; use axum::response::{IntoResponse, Response}; use bittorrent_http_protocol::v1::requests::scrape::Scrape; use bittorrent_http_protocol::v1::responses; -use bittorrent_http_protocol::v1::services::peer_ip_resolver::{self, ClientIpSources}; +use bittorrent_http_protocol::v1::services::peer_ip_resolver::ClientIpSources; use bittorrent_tracker_core::authentication::service::AuthenticationService; use bittorrent_tracker_core::authentication::Key; use bittorrent_tracker_core::scrape_handler::ScrapeHandler; @@ -20,7 +20,6 @@ use torrust_tracker_configuration::Core; use torrust_tracker_primitives::core::ScrapeData; use crate::packages::http_tracker_core; -use crate::packages::http_tracker_core::services; use crate::servers::http::v1::extractors::authentication_key::Extract as ExtractKey; use crate::servers::http::v1::extractors::client_ip_sources::Extract as ExtractClientIpSources; use crate::servers::http::v1::extractors::scrape_request::ExtractRequest; @@ -111,12 +110,6 @@ async fn handle( build_response(scrape_data) } -/* code-review: authentication, authorization and peer IP resolution could be moved - from the handler (Axum) layer into the app layer `services::announce::invoke`. - That would make the handler even simpler and the code more reusable and decoupled from Axum. - See https://github.com/torrust/torrust-tracker/discussions/240. -*/ - #[allow(clippy::too_many_arguments)] async fn handle_scrape( core_config: &Arc, @@ -127,6 +120,8 @@ async fn handle_scrape( client_ip_sources: &ClientIpSources, maybe_key: Option, ) -> Result { + // todo: move authentication inside `http_tracker_core::services::scrape::handle_scrape` + // Authentication let return_real_scrape_data = if core_config.private { match maybe_key { @@ -140,25 +135,16 @@ async fn handle_scrape( true }; - // Authorization for scrape requests is handled at the `Tracker` level - // for each torrent. - - let peer_ip = match peer_ip_resolver::invoke(core_config.net.on_reverse_proxy, client_ip_sources) { - Ok(peer_ip) => peer_ip, - Err(error) => return Err(responses::error::Error::from(error)), - }; - - if return_real_scrape_data { - Ok(services::scrape::invoke( - scrape_handler, - opt_http_stats_event_sender, - &scrape_request.info_hashes, - &peer_ip, - ) - .await) - } else { - Ok(services::scrape::fake(opt_http_stats_event_sender, &scrape_request.info_hashes, &peer_ip).await) - } + http_tracker_core::services::scrape::handle_scrape( + core_config, + scrape_handler, + authentication_service, + opt_http_stats_event_sender, + scrape_request, + client_ip_sources, + return_real_scrape_data, + ) + .await } fn build_response(scrape_data: ScrapeData) -> Response { From c0fc390409949ac11bde35095b58f81266d66e85 Mon Sep 17 00:00:00 2001 From: Jose Celano Date: Fri, 14 Feb 2025 17:20:19 +0000 Subject: [PATCH 261/802] refactor: [#1268] move announce logic from udp server to udp_tracker_core package --- src/packages/udp_tracker_core/mod.rs | 1 + .../udp_tracker_core}/peer_builder.rs | 0 .../udp_tracker_core/services/announce.rs | 44 ++++++++++++++++++- src/servers/udp/handlers/announce.rs | 42 +++++++----------- src/servers/udp/mod.rs | 1 - 5 files changed, 60 insertions(+), 28 deletions(-) rename src/{servers/udp => packages/udp_tracker_core}/peer_builder.rs (100%) diff --git a/src/packages/udp_tracker_core/mod.rs b/src/packages/udp_tracker_core/mod.rs index 4f3e54857..3ab1d83dd 100644 --- a/src/packages/udp_tracker_core/mod.rs +++ b/src/packages/udp_tracker_core/mod.rs @@ -1,2 +1,3 @@ +pub mod peer_builder; pub mod services; pub mod statistics; diff --git a/src/servers/udp/peer_builder.rs b/src/packages/udp_tracker_core/peer_builder.rs similarity index 100% rename from src/servers/udp/peer_builder.rs rename to src/packages/udp_tracker_core/peer_builder.rs diff --git a/src/packages/udp_tracker_core/services/announce.rs b/src/packages/udp_tracker_core/services/announce.rs index 8a046a625..dec506aec 100644 --- a/src/packages/udp_tracker_core/services/announce.rs +++ b/src/packages/udp_tracker_core/services/announce.rs @@ -7,15 +7,55 @@ //! //! It also sends an [`udp_tracker_core::statistics::event::Event`] //! because events are specific for the HTTP tracker. -use std::net::IpAddr; +use std::net::{IpAddr, SocketAddr}; use std::sync::Arc; +use aquatic_udp_protocol::AnnounceRequest; use bittorrent_primitives::info_hash::InfoHash; use bittorrent_tracker_core::announce_handler::{AnnounceHandler, PeersWanted}; +use bittorrent_tracker_core::error::WhitelistError; +use bittorrent_tracker_core::whitelist; use torrust_tracker_primitives::core::AnnounceData; use torrust_tracker_primitives::peer; -use crate::packages::udp_tracker_core; +use crate::packages::udp_tracker_core::{self, peer_builder}; + +/// It handles the `Announce` request. +/// +/// # Errors +/// +/// It will return an error if: +/// +/// - The tracker is running in listed mode and the torrent is not in the +/// whitelist. +#[allow(clippy::too_many_arguments)] +pub async fn handle_announce( + remote_addr: SocketAddr, + request: &AnnounceRequest, + announce_handler: &Arc, + whitelist_authorization: &Arc, + opt_udp_stats_event_sender: &Arc>>, +) -> Result { + let info_hash = request.info_hash.into(); + let remote_client_ip = remote_addr.ip(); + + // Authorization + whitelist_authorization.authorize(&info_hash).await?; + + let mut peer = peer_builder::from_request(request, &remote_client_ip); + let peers_wanted: PeersWanted = i32::from(request.peers_wanted.0).into(); + + let announce_data = invoke( + announce_handler.clone(), + opt_udp_stats_event_sender.clone(), + info_hash, + &mut peer, + &peers_wanted, + ) + .await; + + Ok(announce_data) +} pub async fn invoke( announce_handler: Arc, diff --git a/src/servers/udp/handlers/announce.rs b/src/servers/udp/handlers/announce.rs index 26a1a2116..2254ea979 100644 --- a/src/servers/udp/handlers/announce.rs +++ b/src/servers/udp/handlers/announce.rs @@ -8,17 +8,16 @@ use aquatic_udp_protocol::{ Port, Response, ResponsePeer, TransactionId, }; use bittorrent_primitives::info_hash::InfoHash; -use bittorrent_tracker_core::announce_handler::{AnnounceHandler, PeersWanted}; +use bittorrent_tracker_core::announce_handler::AnnounceHandler; use bittorrent_tracker_core::whitelist; use torrust_tracker_configuration::Core; use tracing::{instrument, Level}; use zerocopy::network_endian::I32; -use crate::packages::udp_tracker_core::{self, services}; +use crate::packages::udp_tracker_core::{self}; use crate::servers::udp::connection_cookie::check; use crate::servers::udp::error::Error; use crate::servers::udp::handlers::gen_remote_fingerprint; -use crate::servers::udp::peer_builder; /// It handles the `Announce` request. Refer to [`Announce`](crate::servers::udp#announce) /// request for more information. @@ -44,6 +43,8 @@ pub async fn handle_announce( tracing::trace!("handle announce"); + // todo: move authentication to `udp_tracker_core::services::announce::handle_announce` + check( &request.connection_id, gen_remote_fingerprint(&remote_addr), @@ -51,29 +52,20 @@ pub async fn handle_announce( ) .map_err(|e| (e, request.transaction_id))?; - let info_hash = request.info_hash.into(); - let remote_client_ip = remote_addr.ip(); - - // Authorization - whitelist_authorization - .authorize(&info_hash) - .await - .map_err(|e| Error::TrackerError { - source: (Arc::new(e) as Arc).into(), - }) - .map_err(|e| (e, request.transaction_id))?; - - let mut peer = peer_builder::from_request(request, &remote_client_ip); - let peers_wanted: PeersWanted = i32::from(request.peers_wanted.0).into(); - - let response = services::announce::invoke( - announce_handler.clone(), - opt_udp_stats_event_sender.clone(), - info_hash, - &mut peer, - &peers_wanted, + let response = udp_tracker_core::services::announce::handle_announce( + remote_addr, + request, + announce_handler, + whitelist_authorization, + opt_udp_stats_event_sender, ) - .await; + .await + .map_err(|e| Error::TrackerError { + source: (Arc::new(e) as Arc).into(), + }) + .map_err(|e| (e, request.transaction_id))?; + + // todo: extract `build_response` function. #[allow(clippy::cast_possible_truncation)] if remote_addr.is_ipv4() { diff --git a/src/servers/udp/mod.rs b/src/servers/udp/mod.rs index b141cc322..e8410e5f0 100644 --- a/src/servers/udp/mod.rs +++ b/src/servers/udp/mod.rs @@ -640,7 +640,6 @@ use std::net::SocketAddr; pub mod connection_cookie; pub mod error; pub mod handlers; -pub mod peer_builder; pub mod server; pub const UDP_TRACKER_LOG_TARGET: &str = "UDP TRACKER"; From eca5c597a7624d2a5edfe52e042b84a8e76998ec Mon Sep 17 00:00:00 2001 From: Jose Celano Date: Fri, 14 Feb 2025 17:31:51 +0000 Subject: [PATCH 262/802] refactor: [#1268] move scrape logic from udp server to udp_tracker_core package --- .../udp_tracker_core/services/scrape.rs | 17 +++++++++++++++++ src/servers/udp/handlers/scrape.rs | 13 +++++-------- 2 files changed, 22 insertions(+), 8 deletions(-) diff --git a/src/packages/udp_tracker_core/services/scrape.rs b/src/packages/udp_tracker_core/services/scrape.rs index 7d4897564..e47dd35b3 100644 --- a/src/packages/udp_tracker_core/services/scrape.rs +++ b/src/packages/udp_tracker_core/services/scrape.rs @@ -10,12 +10,29 @@ use std::net::SocketAddr; use std::sync::Arc; +use aquatic_udp_protocol::ScrapeRequest; use bittorrent_primitives::info_hash::InfoHash; use bittorrent_tracker_core::scrape_handler::ScrapeHandler; use torrust_tracker_primitives::core::ScrapeData; use crate::packages::udp_tracker_core; +/// It handles the `Scrape` request. +pub async fn handle_scrape( + remote_addr: SocketAddr, + request: &ScrapeRequest, + scrape_handler: &Arc, + opt_udp_stats_event_sender: &Arc>>, +) -> ScrapeData { + // Convert from aquatic infohashes + let mut info_hashes: Vec = vec![]; + for info_hash in &request.info_hashes { + info_hashes.push((*info_hash).into()); + } + + invoke(scrape_handler, opt_udp_stats_event_sender, &info_hashes, remote_addr).await +} + pub async fn invoke( scrape_handler: &Arc, opt_udp_stats_event_sender: &Arc>>, diff --git a/src/servers/udp/handlers/scrape.rs b/src/servers/udp/handlers/scrape.rs index 3b5ccf50d..d41563add 100644 --- a/src/servers/udp/handlers/scrape.rs +++ b/src/servers/udp/handlers/scrape.rs @@ -6,13 +6,11 @@ use std::sync::Arc; use aquatic_udp_protocol::{ NumberOfDownloads, NumberOfPeers, Response, ScrapeRequest, ScrapeResponse, TorrentScrapeStatistics, TransactionId, }; -use bittorrent_primitives::info_hash::InfoHash; use bittorrent_tracker_core::scrape_handler::ScrapeHandler; use tracing::{instrument, Level}; use zerocopy::network_endian::I32; use crate::packages::udp_tracker_core; -use crate::packages::udp_tracker_core::services; use crate::servers::udp::connection_cookie::check; use crate::servers::udp::error::Error; use crate::servers::udp::handlers::gen_remote_fingerprint; @@ -37,6 +35,8 @@ pub async fn handle_scrape( tracing::trace!("handle scrape"); + // todo: move authentication to `udp_tracker_core::services::scrape::handle_scrape` + check( &request.connection_id, gen_remote_fingerprint(&remote_addr), @@ -44,13 +44,10 @@ pub async fn handle_scrape( ) .map_err(|e| (e, request.transaction_id))?; - // Convert from aquatic infohashes - let mut info_hashes: Vec = vec![]; - for info_hash in &request.info_hashes { - info_hashes.push((*info_hash).into()); - } + let scrape_data = + udp_tracker_core::services::scrape::handle_scrape(remote_addr, request, scrape_handler, opt_udp_stats_event_sender).await; - let scrape_data = services::scrape::invoke(scrape_handler, opt_udp_stats_event_sender, &info_hashes, remote_addr).await; + // todo: extract `build_response` function. let mut torrent_stats: Vec = Vec::new(); From e92a61e8070174d370ba964e50b2d99d15b4f32e Mon Sep 17 00:00:00 2001 From: Jose Celano Date: Fri, 14 Feb 2025 18:07:39 +0000 Subject: [PATCH 263/802] chore(deps): udpate dependencies ```output cargo update Updating crates.io index Locking 9 packages to latest compatible versions Updating cc v1.2.12 -> v1.2.14 Updating clap v4.5.28 -> v4.5.29 Updating clap_builder v4.5.27 -> v4.5.29 Updating cmake v0.1.53 -> v0.1.54 Updating miniz_oxide v0.8.3 -> v0.8.4 Updating ring v0.17.8 -> v0.17.9 Updating rustls v0.23.22 -> v0.23.23 Removing spin v0.9.8 Updating toml_edit v0.22.23 -> v0.22.24 Updating winnow v0.7.1 -> v0.7.2 ``` --- Cargo.lock | 49 +++++++++++++++++++++---------------------------- 1 file changed, 21 insertions(+), 28 deletions(-) diff --git a/Cargo.lock b/Cargo.lock index 408471efc..544ae8e0d 100644 --- a/Cargo.lock +++ b/Cargo.lock @@ -462,7 +462,7 @@ dependencies = [ "hyper", "hyper-util", "pin-project-lite", - "rustls 0.23.22", + "rustls 0.23.23", "rustls-pemfile", "rustls-pki-types", "tokio", @@ -868,9 +868,9 @@ dependencies = [ [[package]] name = "cc" -version = "1.2.12" +version = "1.2.14" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "755717a7de9ec452bf7f3f1a3099085deabd7f2962b861dae91ecd7a365903d2" +checksum = "0c3d1b2e905a3a7b00a6141adb0e4c0bb941d11caf55349d863942a1cc44e3c9" dependencies = [ "jobserver", "libc", @@ -961,9 +961,9 @@ dependencies = [ [[package]] name = "clap" -version = "4.5.28" +version = "4.5.29" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "3e77c3243bd94243c03672cb5154667347c457ca271254724f9f393aee1c05ff" +checksum = "8acebd8ad879283633b343856142139f2da2317c96b05b4dd6181c61e2480184" dependencies = [ "clap_builder", "clap_derive", @@ -971,9 +971,9 @@ dependencies = [ [[package]] name = "clap_builder" -version = "4.5.27" +version = "4.5.29" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "1b26884eb4b57140e4d2d93652abfa49498b938b3c9179f9fc487b0acc3edad7" +checksum = "f6ba32cbda51c7e1dfd49acc1457ba1a7dec5b64fe360e828acb13ca8dc9c2f9" dependencies = [ "anstream", "anstyle", @@ -1001,9 +1001,9 @@ checksum = "f46ad14479a25103f283c0f10005961cf086d8dc42205bb44c46ac563475dca6" [[package]] name = "cmake" -version = "0.1.53" +version = "0.1.54" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "e24a03c8b52922d68a1589ad61032f2c1aa5a8158d2aa0d93c6e9534944bbad6" +checksum = "e7caa3f9de89ddbe2c607f4101924c5abec803763ae9534e4f4d7d8f84aa81f0" dependencies = [ "cc", ] @@ -1940,7 +1940,7 @@ dependencies = [ "http", "hyper", "hyper-util", - "rustls 0.23.22", + "rustls 0.23.23", "rustls-pki-types", "tokio", "tokio-rustls 0.26.1", @@ -2424,9 +2424,9 @@ checksum = "68354c5c6bd36d73ff3feceb05efa59b6acb7626617f4962be322a825e61f79a" [[package]] name = "miniz_oxide" -version = "0.8.3" +version = "0.8.4" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "b8402cab7aefae129c6977bb0ff1b8fd9a04eb5b51efc50a70bea51cda0c7924" +checksum = "b3b1c9bd4fe1f0f8b387f6eb9eb3b4a1aa26185e5750efb9140301703f62cd1b" dependencies = [ "adler2", ] @@ -3339,15 +3339,14 @@ dependencies = [ [[package]] name = "ring" -version = "0.17.8" +version = "0.17.9" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "c17fa4cb658e3583423e915b9f3acc01cceaee1860e33d59ebae66adc3a2dc0d" +checksum = "e75ec5e92c4d8aede845126adc388046234541629e76029599ed35a003c7ed24" dependencies = [ "cc", "cfg-if", "getrandom 0.2.15", "libc", - "spin", "untrusted", "windows-sys 0.52.0", ] @@ -3501,9 +3500,9 @@ dependencies = [ [[package]] name = "rustls" -version = "0.23.22" +version = "0.23.23" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "9fb9263ab4eb695e42321db096e3b8fbd715a59b154d5c88d82db2175b681ba7" +checksum = "47796c98c480fce5406ef69d1c76378375492c3b0a0de587be0c1d9feb12f395" dependencies = [ "once_cell", "rustls-pki-types", @@ -3857,12 +3856,6 @@ dependencies = [ "windows-sys 0.52.0", ] -[[package]] -name = "spin" -version = "0.9.8" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "6980e8d7511241f8acf4aebddbb1ff938df5eebe98691418c4468d0b72a96a67" - [[package]] name = "stable_deref_trait" version = "1.2.0" @@ -4232,7 +4225,7 @@ version = "0.26.1" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "5f6d0975eaace0cf0fcadee4e4aaa5da15b5c079146f2cffb67c113be122bf37" dependencies = [ - "rustls 0.23.22", + "rustls 0.23.23", "tokio", ] @@ -4283,9 +4276,9 @@ dependencies = [ [[package]] name = "toml_edit" -version = "0.22.23" +version = "0.22.24" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "02a8b472d1a3d7c18e2d61a489aee3453fd9031c33e4f55bd533f4a7adca1bee" +checksum = "17b4795ff5edd201c7cd6dca065ae59972ce77d1b80fa0a84d94950ece7d1474" dependencies = [ "indexmap 2.7.1", "serde", @@ -5067,9 +5060,9 @@ checksum = "589f6da84c646204747d1270a2a5661ea66ed1cced2631d546fdfb155959f9ec" [[package]] name = "winnow" -version = "0.7.1" +version = "0.7.2" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "86e376c75f4f43f44db463cf729e0d3acbf954d13e22c51e26e4c264b4ab545f" +checksum = "59690dea168f2198d1a3b0cac23b8063efcd11012f10ae4698f284808c8ef603" dependencies = [ "memchr", ] From 45e5ee40033e6cf27ce6a757f619c61241009416 Mon Sep 17 00:00:00 2001 From: Jose Celano Date: Sat, 15 Feb 2025 17:33:32 +0000 Subject: [PATCH 264/802] chore(deps): udpate dependencies ```output cargo update Updating crates.io index Locking 6 packages to latest compatible versions Updating equivalent v1.0.1 -> v1.0.2 Updating openssl v0.10.70 -> v0.10.71 Updating openssl-sys v0.9.105 -> v0.9.106 Updating smallvec v1.13.2 -> v1.14.0 Updating zerocopy v0.8.17 -> v0.8.18 Updating zerocopy-derive v0.8.17 -> v0.8.18 ``` --- Cargo.lock | 30 +++++++++++++++--------------- 1 file changed, 15 insertions(+), 15 deletions(-) diff --git a/Cargo.lock b/Cargo.lock index 544ae8e0d..06cde2457 100644 --- a/Cargo.lock +++ b/Cargo.lock @@ -1367,9 +1367,9 @@ dependencies = [ [[package]] name = "equivalent" -version = "1.0.1" +version = "1.0.2" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "5443807d6dff69373d433ab9ef5378ad8df50ca6298caf15de6e52e24aaf54d5" +checksum = "877a4ace8713b0bcf2a4e7eec82529c029f1d0619886d18145fea96c3ffe5c0f" [[package]] name = "errno" @@ -2693,9 +2693,9 @@ checksum = "b410bbe7e14ab526a0e86877eb47c6996a2bd7746f027ba551028c925390e4e9" [[package]] name = "openssl" -version = "0.10.70" +version = "0.10.71" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "61cfb4e166a8bb8c9b55c500bc2308550148ece889be90f609377e58140f42c6" +checksum = "5e14130c6a98cd258fdcb0fb6d744152343ff729cbfcb28c656a9d12b999fbcd" dependencies = [ "bitflags", "cfg-if", @@ -2725,9 +2725,9 @@ checksum = "d05e27ee213611ffe7d6348b942e8f942b37114c00cc03cec254295a4a17852e" [[package]] name = "openssl-sys" -version = "0.9.105" +version = "0.9.106" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "8b22d5b84be05a8d6947c7cb71f7c849aa0f112acd4bf51c2a7c1c988ac0a9dc" +checksum = "8bb61ea9811cc39e3c2069f40b8b8e2e70d8569b361f879786cc7ed48b777cdd" dependencies = [ "cc", "libc", @@ -3167,7 +3167,7 @@ checksum = "3779b94aeb87e8bd4e834cee3650289ee9e0d5677f976ecdb6d219e5f4f6cd94" dependencies = [ "rand_chacha 0.9.0", "rand_core 0.9.0", - "zerocopy 0.8.17", + "zerocopy 0.8.18", ] [[package]] @@ -3206,7 +3206,7 @@ source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "b08f3c9802962f7e1b25113931d94f43ed9725bebc59db9d0c3e9a23b67e15ff" dependencies = [ "getrandom 0.3.1", - "zerocopy 0.8.17", + "zerocopy 0.8.18", ] [[package]] @@ -3842,9 +3842,9 @@ dependencies = [ [[package]] name = "smallvec" -version = "1.13.2" +version = "1.14.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "3c5e1a9a646d36c3599cd173a41282daf47c44583ad367b8e6837255952e5c67" +checksum = "7fcf8323ef1faaee30a44a340193b1ac6814fd9b7b4e88e9d4519a3e4abe1cfd" [[package]] name = "socket2" @@ -5139,11 +5139,11 @@ dependencies = [ [[package]] name = "zerocopy" -version = "0.8.17" +version = "0.8.18" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "aa91407dacce3a68c56de03abe2760159582b846c6a4acd2f456618087f12713" +checksum = "79386d31a42a4996e3336b0919ddb90f81112af416270cff95b5f5af22b839c2" dependencies = [ - "zerocopy-derive 0.8.17", + "zerocopy-derive 0.8.18", ] [[package]] @@ -5159,9 +5159,9 @@ dependencies = [ [[package]] name = "zerocopy-derive" -version = "0.8.17" +version = "0.8.18" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "06718a168365cad3d5ff0bb133aad346959a2074bd4a85c121255a11304a8626" +checksum = "76331675d372f91bf8d17e13afbd5fe639200b73d01f0fc748bb059f9cca2db7" dependencies = [ "proc-macro2", "quote", From da1353bf1899f70e34360f43b87fe6338b801fa6 Mon Sep 17 00:00:00 2001 From: Jose Celano Date: Mon, 17 Feb 2025 09:54:49 +0000 Subject: [PATCH 265/802] refactor: [#1270] return errorin core announce and scrape handler - In the announce handler, it returns an error when the tracker is running in `listed` mode and the infohash is not whitelisted. This was done only in the delivery layers but not in the domain. - In the scrape handler, it does not return any errors for now, but It will allow us in the future to return errors whithout making breaking changes. --- .../http-protocol/src/v1/responses/error.rs | 18 ++- packages/tracker-core/src/announce_handler.rs | 153 +++++++++++------- packages/tracker-core/src/error.rs | 16 ++ packages/tracker-core/src/lib.rs | 58 +++---- packages/tracker-core/src/scrape_handler.rs | 17 +- packages/tracker-core/src/test_helpers.rs | 1 + packages/tracker-core/tests/integration.rs | 21 +-- src/bootstrap/app.rs | 1 + .../http_tracker_core/services/announce.rs | 37 ++++- .../http_tracker_core/services/scrape.rs | 39 +++-- .../udp_tracker_core/services/announce.rs | 17 +- .../udp_tracker_core/services/scrape.rs | 16 +- src/servers/http/v1/handlers/announce.rs | 3 +- src/servers/udp/handlers/announce.rs | 1 + src/servers/udp/handlers/mod.rs | 1 + src/servers/udp/handlers/scrape.rs | 7 +- 16 files changed, 271 insertions(+), 135 deletions(-) diff --git a/packages/http-protocol/src/v1/responses/error.rs b/packages/http-protocol/src/v1/responses/error.rs index 8a6b4cf55..2bd8cd95c 100644 --- a/packages/http-protocol/src/v1/responses/error.rs +++ b/packages/http-protocol/src/v1/responses/error.rs @@ -55,10 +55,26 @@ impl From for Error { } } +impl From for Error { + fn from(err: bittorrent_tracker_core::error::AnnounceError) -> Self { + Error { + failure_reason: format!("Tracker announce error: {err}"), + } + } +} + +impl From for Error { + fn from(err: bittorrent_tracker_core::error::ScrapeError) -> Self { + Error { + failure_reason: format!("Tracker scrape error: {err}"), + } + } +} + impl From for Error { fn from(err: bittorrent_tracker_core::error::WhitelistError) -> Self { Error { - failure_reason: format!("Tracker error: {err}"), + failure_reason: format!("Tracker whitelist error: {err}"), } } } diff --git a/packages/tracker-core/src/announce_handler.rs b/packages/tracker-core/src/announce_handler.rs index 6707f1917..cd2073857 100644 --- a/packages/tracker-core/src/announce_handler.rs +++ b/packages/tracker-core/src/announce_handler.rs @@ -101,12 +101,17 @@ use torrust_tracker_primitives::swarm_metadata::SwarmMetadata; use super::torrent::repository::in_memory::InMemoryTorrentRepository; use super::torrent::repository::persisted::DatabasePersistentTorrentRepository; +use crate::error::AnnounceError; +use crate::whitelist::authorization::WhitelistAuthorization; /// Handles `announce` requests from `BitTorrent` clients. pub struct AnnounceHandler { /// The tracker configuration. config: Core, + /// Service for authorizing access to whitelisted torrents. + whitelist_authorization: Arc, + /// Repository for in-memory torrent data. in_memory_torrent_repository: Arc, @@ -119,10 +124,12 @@ impl AnnounceHandler { #[must_use] pub fn new( config: &Core, + whitelist_authorization: &Arc, in_memory_torrent_repository: &Arc, db_torrent_repository: &Arc, ) -> Self { Self { + whitelist_authorization: whitelist_authorization.clone(), config: config.clone(), in_memory_torrent_repository: in_memory_torrent_repository.clone(), db_torrent_repository: db_torrent_repository.clone(), @@ -143,27 +150,23 @@ impl AnnounceHandler { /// # Returns /// /// An `AnnounceData` struct containing the list of peers, swarm statistics, and tracker policy. - pub fn announce( + /// + /// # Errors + /// + /// Returns an error if the tracker is running in `listed` mode and the + /// torrent is not whitelisted. + pub async fn announce( &self, info_hash: &InfoHash, peer: &mut peer::Peer, remote_client_ip: &IpAddr, peers_wanted: &PeersWanted, - ) -> AnnounceData { + ) -> Result { // code-review: maybe instead of mutating the peer we could just return // a tuple with the new peer and the announce data: (Peer, AnnounceData). // It could even be a different struct: `StoredPeer` or `PublicPeer`. - // code-review: in the `scrape` function we perform an authorization check. - // We check if the torrent is whitelisted. Should we also check authorization here? - // I think so because the `Tracker` has the responsibility for checking authentication and authorization. - // The `Tracker` has delegated that responsibility to the handlers - // (because we want to return a friendly error response) but that does not mean we should - // double-check authorization at this domain level too. - // I would propose to return a `Result` here. - // Besides, regarding authentication the `Tracker` is also responsible for authentication but - // we are actually handling authentication at the handlers level. So I would extract that - // responsibility into another authentication service. + self.whitelist_authorization.authorize(info_hash).await?; tracing::debug!("Before: {peer:?}"); peer.change_ip(&assign_ip_address_to_peer(remote_client_ip, self.config.net.external_ip)); @@ -175,11 +178,11 @@ impl AnnounceHandler { .in_memory_torrent_repository .get_peers_for(info_hash, peer, peers_wanted.limit()); - AnnounceData { + Ok(AnnounceData { peers, stats, policy: self.config.announce_policy, - } + }) } /// Updates the torrent data in memory, persists statistics if needed, and @@ -461,8 +464,10 @@ mod tests { let mut peer = sample_peer(); - let announce_data = - announce_handler.announce(&sample_info_hash(), &mut peer, &peer_ip(), &PeersWanted::AsManyAsPossible); + let announce_data = announce_handler + .announce(&sample_info_hash(), &mut peer, &peer_ip(), &PeersWanted::AsManyAsPossible) + .await + .unwrap(); assert_eq!(announce_data.peers, vec![]); } @@ -472,16 +477,21 @@ mod tests { let (announce_handler, _scrape_handler) = public_tracker(); let mut previously_announced_peer = sample_peer_1(); - announce_handler.announce( - &sample_info_hash(), - &mut previously_announced_peer, - &peer_ip(), - &PeersWanted::AsManyAsPossible, - ); + announce_handler + .announce( + &sample_info_hash(), + &mut previously_announced_peer, + &peer_ip(), + &PeersWanted::AsManyAsPossible, + ) + .await + .unwrap(); let mut peer = sample_peer_2(); - let announce_data = - announce_handler.announce(&sample_info_hash(), &mut peer, &peer_ip(), &PeersWanted::AsManyAsPossible); + let announce_data = announce_handler + .announce(&sample_info_hash(), &mut peer, &peer_ip(), &PeersWanted::AsManyAsPossible) + .await + .unwrap(); assert_eq!(announce_data.peers, vec![Arc::new(previously_announced_peer)]); } @@ -491,24 +501,32 @@ mod tests { let (announce_handler, _scrape_handler) = public_tracker(); let mut previously_announced_peer_1 = sample_peer_1(); - announce_handler.announce( - &sample_info_hash(), - &mut previously_announced_peer_1, - &peer_ip(), - &PeersWanted::AsManyAsPossible, - ); + announce_handler + .announce( + &sample_info_hash(), + &mut previously_announced_peer_1, + &peer_ip(), + &PeersWanted::AsManyAsPossible, + ) + .await + .unwrap(); let mut previously_announced_peer_2 = sample_peer_2(); - announce_handler.announce( - &sample_info_hash(), - &mut previously_announced_peer_2, - &peer_ip(), - &PeersWanted::AsManyAsPossible, - ); + announce_handler + .announce( + &sample_info_hash(), + &mut previously_announced_peer_2, + &peer_ip(), + &PeersWanted::AsManyAsPossible, + ) + .await + .unwrap(); let mut peer = sample_peer_3(); - let announce_data = - announce_handler.announce(&sample_info_hash(), &mut peer, &peer_ip(), &PeersWanted::only(1)); + let announce_data = announce_handler + .announce(&sample_info_hash(), &mut peer, &peer_ip(), &PeersWanted::only(1)) + .await + .unwrap(); // It should return only one peer. There is no guarantee on // which peer will be returned. @@ -530,8 +548,10 @@ mod tests { let mut peer = seeder(); - let announce_data = - announce_handler.announce(&sample_info_hash(), &mut peer, &peer_ip(), &PeersWanted::AsManyAsPossible); + let announce_data = announce_handler + .announce(&sample_info_hash(), &mut peer, &peer_ip(), &PeersWanted::AsManyAsPossible) + .await + .unwrap(); assert_eq!(announce_data.stats.complete, 1); } @@ -542,8 +562,10 @@ mod tests { let mut peer = leecher(); - let announce_data = - announce_handler.announce(&sample_info_hash(), &mut peer, &peer_ip(), &PeersWanted::AsManyAsPossible); + let announce_data = announce_handler + .announce(&sample_info_hash(), &mut peer, &peer_ip(), &PeersWanted::AsManyAsPossible) + .await + .unwrap(); assert_eq!(announce_data.stats.incomplete, 1); } @@ -554,20 +576,26 @@ mod tests { // We have to announce with "started" event because peer does not count if peer was not previously known let mut started_peer = started_peer(); - announce_handler.announce( - &sample_info_hash(), - &mut started_peer, - &peer_ip(), - &PeersWanted::AsManyAsPossible, - ); + announce_handler + .announce( + &sample_info_hash(), + &mut started_peer, + &peer_ip(), + &PeersWanted::AsManyAsPossible, + ) + .await + .unwrap(); let mut completed_peer = completed_peer(); - let announce_data = announce_handler.announce( - &sample_info_hash(), - &mut completed_peer, - &peer_ip(), - &PeersWanted::AsManyAsPossible, - ); + let announce_data = announce_handler + .announce( + &sample_info_hash(), + &mut completed_peer, + &peer_ip(), + &PeersWanted::AsManyAsPossible, + ) + .await + .unwrap(); assert_eq!(announce_data.stats.downloaded, 1); } @@ -590,10 +618,12 @@ mod tests { use crate::torrent::manager::TorrentsManager; use crate::torrent::repository::in_memory::InMemoryTorrentRepository; use crate::torrent::repository::persisted::DatabasePersistentTorrentRepository; + use crate::whitelist::authorization::WhitelistAuthorization; + use crate::whitelist::repository::in_memory::InMemoryWhitelist; #[tokio::test] async fn it_should_persist_the_number_of_completed_peers_for_all_torrents_into_the_database() { - let mut config = configuration::ephemeral_listed(); + let mut config = configuration::ephemeral_public(); config.core.tracker_policy.persistent_torrent_completed_stat = true; @@ -605,8 +635,11 @@ mod tests { &in_memory_torrent_repository, &db_torrent_repository, )); + let in_memory_whitelist = Arc::new(InMemoryWhitelist::default()); + let whitelist_authorization = Arc::new(WhitelistAuthorization::new(&config.core, &in_memory_whitelist.clone())); let announce_handler = Arc::new(AnnounceHandler::new( &config.core, + &whitelist_authorization, &in_memory_torrent_repository, &db_torrent_repository, )); @@ -616,11 +649,17 @@ mod tests { let mut peer = sample_peer(); peer.event = AnnounceEvent::Started; - let announce_data = announce_handler.announce(&info_hash, &mut peer, &peer_ip(), &PeersWanted::AsManyAsPossible); + let announce_data = announce_handler + .announce(&info_hash, &mut peer, &peer_ip(), &PeersWanted::AsManyAsPossible) + .await + .unwrap(); assert_eq!(announce_data.stats.downloaded, 0); peer.event = AnnounceEvent::Completed; - let announce_data = announce_handler.announce(&info_hash, &mut peer, &peer_ip(), &PeersWanted::AsManyAsPossible); + let announce_data = announce_handler + .announce(&info_hash, &mut peer, &peer_ip(), &PeersWanted::AsManyAsPossible) + .await + .unwrap(); assert_eq!(announce_data.stats.downloaded, 1); // Remove the newly updated torrent from memory diff --git a/packages/tracker-core/src/error.rs b/packages/tracker-core/src/error.rs index 99ac48ed3..fed076ffa 100644 --- a/packages/tracker-core/src/error.rs +++ b/packages/tracker-core/src/error.rs @@ -15,6 +15,22 @@ use torrust_tracker_located_error::LocatedError; use super::authentication::key::ParseKeyError; use super::databases; +/// Errors related to announce requests. +#[derive(thiserror::Error, Debug, Clone)] +pub enum AnnounceError { + /// Wraps errors related to torrent whitelisting. + #[error("Whitelist error: {0}")] + Whitelist(#[from] WhitelistError), +} + +/// Errors related to scrape requests. +#[derive(thiserror::Error, Debug, Clone)] +pub enum ScrapeError { + /// Wraps errors related to torrent whitelisting. + #[error("Whitelist error: {0}")] + Whitelist(#[from] WhitelistError), +} + /// Errors related to torrent whitelisting. /// /// This error is returned when an operation involves a torrent that is not diff --git a/packages/tracker-core/src/lib.rs b/packages/tracker-core/src/lib.rs index 843817deb..8e73fe027 100644 --- a/packages/tracker-core/src/lib.rs +++ b/packages/tracker-core/src/lib.rs @@ -144,8 +144,6 @@ pub(crate) type CurrentClock = clock::Stopped; #[cfg(test)] mod tests { mod the_tracker { - use std::net::{IpAddr, Ipv4Addr}; - use std::str::FromStr; use std::sync::Arc; use torrust_tracker_test_helpers::configuration; @@ -164,11 +162,6 @@ mod tests { initialize_handlers(&config) } - // The client peer IP - fn peer_ip() -> IpAddr { - IpAddr::V4(Ipv4Addr::from_str("126.0.0.1").unwrap()) - } - mod for_all_config_modes { mod handling_a_scrape_request { @@ -191,24 +184,30 @@ mod tests { // Announce a "complete" peer for the torrent let mut complete_peer = complete_peer(); - announce_handler.announce( - &info_hash, - &mut complete_peer, - &IpAddr::V4(Ipv4Addr::new(126, 0, 0, 10)), - &PeersWanted::AsManyAsPossible, - ); + announce_handler + .announce( + &info_hash, + &mut complete_peer, + &IpAddr::V4(Ipv4Addr::new(126, 0, 0, 10)), + &PeersWanted::AsManyAsPossible, + ) + .await + .unwrap(); // Announce an "incomplete" peer for the torrent let mut incomplete_peer = incomplete_peer(); - announce_handler.announce( - &info_hash, - &mut incomplete_peer, - &IpAddr::V4(Ipv4Addr::new(126, 0, 0, 11)), - &PeersWanted::AsManyAsPossible, - ); + announce_handler + .announce( + &info_hash, + &mut incomplete_peer, + &IpAddr::V4(Ipv4Addr::new(126, 0, 0, 11)), + &PeersWanted::AsManyAsPossible, + ) + .await + .unwrap(); // Scrape - let scrape_data = scrape_handler.scrape(&vec![info_hash]).await; + let scrape_data = scrape_handler.scrape(&vec![info_hash]).await.unwrap(); // The expected swarm metadata for the file let mut expected_scrape_data = ScrapeData::empty(); @@ -234,28 +233,19 @@ mod tests { use torrust_tracker_primitives::core::ScrapeData; use torrust_tracker_primitives::swarm_metadata::SwarmMetadata; - use crate::announce_handler::PeersWanted; - use crate::test_helpers::tests::{complete_peer, incomplete_peer}; - use crate::tests::the_tracker::{initialize_handlers_for_listed_tracker, peer_ip}; + use crate::tests::the_tracker::initialize_handlers_for_listed_tracker; #[tokio::test] async fn it_should_return_the_zeroed_swarm_metadata_for_the_requested_file_if_it_is_not_whitelisted() { - let (announce_handler, scrape_handler) = initialize_handlers_for_listed_tracker(); - - let info_hash = "3b245504cf5f11bbdbe1201cea6a6bf45aee1bc0".parse::().unwrap(); // DevSkim: ignore DS173237 - - let mut peer = incomplete_peer(); - announce_handler.announce(&info_hash, &mut peer, &peer_ip(), &PeersWanted::AsManyAsPossible); + let (_announce_handler, scrape_handler) = initialize_handlers_for_listed_tracker(); - // Announce twice to force non zeroed swarm metadata - let mut peer = complete_peer(); - announce_handler.announce(&info_hash, &mut peer, &peer_ip(), &PeersWanted::AsManyAsPossible); + let non_whitelisted_info_hash = "3b245504cf5f11bbdbe1201cea6a6bf45aee1bc0".parse::().unwrap(); // DevSkim: ignore DS173237 - let scrape_data = scrape_handler.scrape(&vec![info_hash]).await; + let scrape_data = scrape_handler.scrape(&vec![non_whitelisted_info_hash]).await.unwrap(); // The expected zeroed swarm metadata for the file let mut expected_scrape_data = ScrapeData::empty(); - expected_scrape_data.add_file(&info_hash, SwarmMetadata::zeroed()); + expected_scrape_data.add_file(&non_whitelisted_info_hash, SwarmMetadata::zeroed()); assert_eq!(scrape_data, expected_scrape_data); } diff --git a/packages/tracker-core/src/scrape_handler.rs b/packages/tracker-core/src/scrape_handler.rs index 1e75580ab..93b25dea6 100644 --- a/packages/tracker-core/src/scrape_handler.rs +++ b/packages/tracker-core/src/scrape_handler.rs @@ -67,6 +67,7 @@ use torrust_tracker_primitives::swarm_metadata::SwarmMetadata; use super::torrent::repository::in_memory::InMemoryTorrentRepository; use super::whitelist; +use crate::error::ScrapeError; /// Handles scrape requests, providing torrent swarm metadata. pub struct ScrapeHandler { @@ -95,10 +96,18 @@ impl ScrapeHandler { /// - Returns metadata for each requested torrent. /// - If a torrent isn't whitelisted or doesn't exist, returns zeroed stats. /// + /// # Errors + /// + /// It does not return any errors for the time being. The error is returned + /// to avoid breaking changes in the future if we decide to return errors. + /// For example, a new tracker configuration option could be added to return + /// an error if a torrent is not whitelisted instead of returning zeroed + /// stats. + /// /// # BEP Reference: /// /// [BEP 48: Scrape Protocol](https://www.bittorrent.org/beps/bep_0048.html) - pub async fn scrape(&self, info_hashes: &Vec) -> ScrapeData { + pub async fn scrape(&self, info_hashes: &Vec) -> Result { let mut scrape_data = ScrapeData::empty(); for info_hash in info_hashes { @@ -109,7 +118,7 @@ impl ScrapeHandler { scrape_data.add_file(info_hash, swarm_metadata); } - scrape_data + Ok(scrape_data) } } @@ -145,7 +154,7 @@ mod tests { let info_hashes = vec!["3b245504cf5f11bbdbe1201cea6a6bf45aee1bc0".parse::().unwrap()]; // DevSkim: ignore DS173237 - let scrape_data = scrape_handler.scrape(&info_hashes).await; + let scrape_data = scrape_handler.scrape(&info_hashes).await.unwrap(); let mut expected_scrape_data = ScrapeData::empty(); @@ -163,7 +172,7 @@ mod tests { "99c82bb73505a3c0b453f9fa0e881d6e5a32a0c1".parse::().unwrap(), // DevSkim: ignore DS173237 ]; - let scrape_data = scrape_handler.scrape(&info_hashes).await; + let scrape_data = scrape_handler.scrape(&info_hashes).await.unwrap(); let mut expected_scrape_data = ScrapeData::empty(); expected_scrape_data.add_file_with_zeroed_metadata(&info_hashes[0]); diff --git a/packages/tracker-core/src/test_helpers.rs b/packages/tracker-core/src/test_helpers.rs index 06f5ce384..79904dec2 100644 --- a/packages/tracker-core/src/test_helpers.rs +++ b/packages/tracker-core/src/test_helpers.rs @@ -177,6 +177,7 @@ pub(crate) mod tests { let announce_handler = Arc::new(AnnounceHandler::new( &config.core, + &whitelist_authorization, &in_memory_torrent_repository, &db_torrent_repository, )); diff --git a/packages/tracker-core/tests/integration.rs b/packages/tracker-core/tests/integration.rs index 4dbd60b9e..5aaded10a 100644 --- a/packages/tracker-core/tests/integration.rs +++ b/packages/tracker-core/tests/integration.rs @@ -76,6 +76,7 @@ impl Container { )); let announce_handler = Arc::new(AnnounceHandler::new( config, + &whitelist_authorization, &in_memory_torrent_repository, &db_torrent_repository, )); @@ -102,10 +103,11 @@ async fn test_announce_and_scrape_requests() { // First announce: download started peer.event = AnnounceEvent::Started; - let announce_data = - container - .announce_handler - .announce(&info_hash, &mut peer, &remote_client_ip(), &PeersWanted::AsManyAsPossible); + let announce_data = container + .announce_handler + .announce(&info_hash, &mut peer, &remote_client_ip(), &PeersWanted::AsManyAsPossible) + .await + .unwrap(); // NOTICE: you don't get back the peer making the request. assert_eq!(announce_data.peers.len(), 0); @@ -113,17 +115,18 @@ async fn test_announce_and_scrape_requests() { // Second announce: download completed peer.event = AnnounceEvent::Completed; - let announce_data = - container - .announce_handler - .announce(&info_hash, &mut peer, &remote_client_ip(), &PeersWanted::AsManyAsPossible); + let announce_data = container + .announce_handler + .announce(&info_hash, &mut peer, &remote_client_ip(), &PeersWanted::AsManyAsPossible) + .await + .unwrap(); assert_eq!(announce_data.peers.len(), 0); assert_eq!(announce_data.stats.downloaded, 1); // Scrape - let scrape_data = container.scrape_handler.scrape(&vec![info_hash]).await; + let scrape_data = container.scrape_handler.scrape(&vec![info_hash]).await.unwrap(); assert!(scrape_data.files.contains_key(&info_hash)); } diff --git a/src/bootstrap/app.rs b/src/bootstrap/app.rs index e0d77ab8a..41023f2fa 100644 --- a/src/bootstrap/app.rs +++ b/src/bootstrap/app.rs @@ -129,6 +129,7 @@ pub fn initialize_app_container(configuration: &Configuration) -> AppContainer { let announce_handler = Arc::new(AnnounceHandler::new( &configuration.core, + &whitelist_authorization, &in_memory_torrent_repository, &db_torrent_repository, )); diff --git a/src/packages/http_tracker_core/services/announce.rs b/src/packages/http_tracker_core/services/announce.rs index 049d0d228..3b4edea4e 100644 --- a/src/packages/http_tracker_core/services/announce.rs +++ b/src/packages/http_tracker_core/services/announce.rs @@ -16,6 +16,7 @@ use bittorrent_http_protocol::v1::services::peer_ip_resolver::{self, ClientIpSou use bittorrent_primitives::info_hash::InfoHash; use bittorrent_tracker_core::announce_handler::{AnnounceHandler, PeersWanted}; use bittorrent_tracker_core::authentication::service::AuthenticationService; +use bittorrent_tracker_core::error::AnnounceError; use bittorrent_tracker_core::whitelist; use torrust_tracker_configuration::Core; use torrust_tracker_primitives::core::AnnounceData; @@ -75,22 +76,28 @@ pub async fn handle_announce( &mut peer, &peers_wanted, ) - .await; + .await + .map_err(responses::error::Error::from)?; Ok(announce_data) } +/// # Errors +/// +/// This function will return an error if the announce requests failed. pub async fn invoke( announce_handler: Arc, opt_http_stats_event_sender: Arc>>, info_hash: InfoHash, peer: &mut peer::Peer, peers_wanted: &PeersWanted, -) -> AnnounceData { +) -> Result { let original_peer_ip = peer.peer_addr.ip(); // The tracker could change the original peer ip - let announce_data = announce_handler.announce(&info_hash, peer, &original_peer_ip, peers_wanted); + let announce_data = announce_handler + .announce(&info_hash, peer, &original_peer_ip, peers_wanted) + .await?; if let Some(http_stats_event_sender) = opt_http_stats_event_sender.as_deref() { match original_peer_ip { @@ -107,7 +114,7 @@ pub async fn invoke( } } - announce_data + Ok(announce_data) } #[cfg(test)] @@ -120,6 +127,8 @@ mod tests { use bittorrent_tracker_core::databases::setup::initialize_database; use bittorrent_tracker_core::torrent::repository::in_memory::InMemoryTorrentRepository; use bittorrent_tracker_core::torrent::repository::persisted::DatabasePersistentTorrentRepository; + use bittorrent_tracker_core::whitelist::authorization::WhitelistAuthorization; + use bittorrent_tracker_core::whitelist::repository::in_memory::InMemoryWhitelist; use torrust_tracker_configuration::Core; use torrust_tracker_primitives::{peer, DurationSinceUnixEpoch}; use torrust_tracker_test_helpers::configuration; @@ -140,9 +149,12 @@ mod tests { let database = initialize_database(&config.core); let in_memory_torrent_repository = Arc::new(InMemoryTorrentRepository::default()); let db_torrent_repository = Arc::new(DatabasePersistentTorrentRepository::new(&database)); + let in_memory_whitelist = Arc::new(InMemoryWhitelist::default()); + let whitelist_authorization = Arc::new(WhitelistAuthorization::new(&config.core, &in_memory_whitelist.clone())); let announce_handler = Arc::new(AnnounceHandler::new( &config.core, + &whitelist_authorization, &in_memory_torrent_repository, &db_torrent_repository, )); @@ -209,6 +221,8 @@ mod tests { use bittorrent_tracker_core::databases::setup::initialize_database; use bittorrent_tracker_core::torrent::repository::in_memory::InMemoryTorrentRepository; use bittorrent_tracker_core::torrent::repository::persisted::DatabasePersistentTorrentRepository; + use bittorrent_tracker_core::whitelist::authorization::WhitelistAuthorization; + use bittorrent_tracker_core::whitelist::repository::in_memory::InMemoryWhitelist; use mockall::predicate::eq; use torrust_tracker_primitives::core::AnnounceData; use torrust_tracker_primitives::peer; @@ -229,9 +243,12 @@ mod tests { let database = initialize_database(&config.core); let in_memory_torrent_repository = Arc::new(InMemoryTorrentRepository::default()); let db_torrent_repository = Arc::new(DatabasePersistentTorrentRepository::new(&database)); + let in_memory_whitelist = Arc::new(InMemoryWhitelist::default()); + let whitelist_authorization = Arc::new(WhitelistAuthorization::new(&config.core, &in_memory_whitelist.clone())); Arc::new(AnnounceHandler::new( &config.core, + &whitelist_authorization, &in_memory_torrent_repository, &db_torrent_repository, )) @@ -250,7 +267,8 @@ mod tests { &mut peer, &PeersWanted::AsManyAsPossible, ) - .await; + .await + .unwrap(); let expected_announce_data = AnnounceData { peers: vec![], @@ -287,7 +305,8 @@ mod tests { &mut peer, &PeersWanted::AsManyAsPossible, ) - .await; + .await + .unwrap(); } fn tracker_with_an_ipv6_external_ip() -> Arc { @@ -332,7 +351,8 @@ mod tests { &mut peer, &PeersWanted::AsManyAsPossible, ) - .await; + .await + .unwrap(); } #[tokio::test] @@ -358,7 +378,8 @@ mod tests { &mut peer, &PeersWanted::AsManyAsPossible, ) - .await; + .await + .unwrap(); } } } diff --git a/src/packages/http_tracker_core/services/scrape.rs b/src/packages/http_tracker_core/services/scrape.rs index 62f5fdf62..467c69f51 100644 --- a/src/packages/http_tracker_core/services/scrape.rs +++ b/src/packages/http_tracker_core/services/scrape.rs @@ -15,6 +15,7 @@ use bittorrent_http_protocol::v1::responses; use bittorrent_http_protocol::v1::services::peer_ip_resolver::{self, ClientIpSources}; use bittorrent_primitives::info_hash::InfoHash; use bittorrent_tracker_core::authentication::service::AuthenticationService; +use bittorrent_tracker_core::error::ScrapeError; use bittorrent_tracker_core::scrape_handler::ScrapeHandler; use torrust_tracker_configuration::Core; use torrust_tracker_primitives::core::ScrapeData; @@ -56,29 +57,34 @@ pub async fn handle_scrape( }; if return_real_scrape_data { - Ok(invoke( + let scrape_data = invoke( scrape_handler, opt_http_stats_event_sender, &scrape_request.info_hashes, &peer_ip, ) - .await) + .await?; + + Ok(scrape_data) } else { Ok(http_tracker_core::services::scrape::fake(opt_http_stats_event_sender, &scrape_request.info_hashes, &peer_ip).await) } } +/// # Errors +/// +/// This function will return an error if the tracker core scrape handler fails. pub async fn invoke( scrape_handler: &Arc, opt_http_stats_event_sender: &Arc>>, info_hashes: &Vec, original_peer_ip: &IpAddr, -) -> ScrapeData { - let scrape_data = scrape_handler.scrape(info_hashes).await; +) -> Result { + let scrape_data = scrape_handler.scrape(info_hashes).await?; send_scrape_event(original_peer_ip, opt_http_stats_event_sender).await; - scrape_data + Ok(scrape_data) } /// The HTTP tracker fake `scrape` service. It returns zeroed stats. @@ -151,6 +157,7 @@ mod tests { let db_torrent_repository = Arc::new(DatabasePersistentTorrentRepository::new(&database)); let announce_handler = Arc::new(AnnounceHandler::new( &config.core, + &whitelist_authorization, &in_memory_torrent_repository, &db_torrent_repository, )); @@ -225,9 +232,14 @@ mod tests { // Announce a new peer to force scrape data to contain not zeroed data let mut peer = sample_peer(); let original_peer_ip = peer.ip(); - announce_handler.announce(&info_hash, &mut peer, &original_peer_ip, &PeersWanted::AsManyAsPossible); + announce_handler + .announce(&info_hash, &mut peer, &original_peer_ip, &PeersWanted::AsManyAsPossible) + .await + .unwrap(); - let scrape_data = invoke(&scrape_handler, &http_stats_event_sender, &info_hashes, &original_peer_ip).await; + let scrape_data = invoke(&scrape_handler, &http_stats_event_sender, &info_hashes, &original_peer_ip) + .await + .unwrap(); let mut expected_scrape_data = ScrapeData::empty(); expected_scrape_data.add_file( @@ -257,7 +269,9 @@ mod tests { let peer_ip = IpAddr::V4(Ipv4Addr::new(126, 0, 0, 1)); - invoke(&scrape_handler, &http_stats_event_sender, &sample_info_hashes(), &peer_ip).await; + invoke(&scrape_handler, &http_stats_event_sender, &sample_info_hashes(), &peer_ip) + .await + .unwrap(); } #[tokio::test] @@ -275,7 +289,9 @@ mod tests { let peer_ip = IpAddr::V6(Ipv6Addr::new(0x6969, 0x6969, 0x6969, 0x6969, 0x6969, 0x6969, 0x6969, 0x6969)); - invoke(&scrape_handler, &http_stats_event_sender, &sample_info_hashes(), &peer_ip).await; + invoke(&scrape_handler, &http_stats_event_sender, &sample_info_hashes(), &peer_ip) + .await + .unwrap(); } } @@ -310,7 +326,10 @@ mod tests { // Announce a new peer to force scrape data to contain not zeroed data let mut peer = sample_peer(); let original_peer_ip = peer.ip(); - announce_handler.announce(&info_hash, &mut peer, &original_peer_ip, &PeersWanted::AsManyAsPossible); + announce_handler + .announce(&info_hash, &mut peer, &original_peer_ip, &PeersWanted::AsManyAsPossible) + .await + .unwrap(); let scrape_data = fake(&http_stats_event_sender, &info_hashes, &original_peer_ip).await; diff --git a/src/packages/udp_tracker_core/services/announce.rs b/src/packages/udp_tracker_core/services/announce.rs index dec506aec..29725c6d4 100644 --- a/src/packages/udp_tracker_core/services/announce.rs +++ b/src/packages/udp_tracker_core/services/announce.rs @@ -13,7 +13,7 @@ use std::sync::Arc; use aquatic_udp_protocol::AnnounceRequest; use bittorrent_primitives::info_hash::InfoHash; use bittorrent_tracker_core::announce_handler::{AnnounceHandler, PeersWanted}; -use bittorrent_tracker_core::error::WhitelistError; +use bittorrent_tracker_core::error::AnnounceError; use bittorrent_tracker_core::whitelist; use torrust_tracker_primitives::core::AnnounceData; use torrust_tracker_primitives::peer; @@ -35,7 +35,7 @@ pub async fn handle_announce( announce_handler: &Arc, whitelist_authorization: &Arc, opt_udp_stats_event_sender: &Arc>>, -) -> Result { +) -> Result { let info_hash = request.info_hash.into(); let remote_client_ip = remote_addr.ip(); @@ -52,22 +52,27 @@ pub async fn handle_announce( &mut peer, &peers_wanted, ) - .await; + .await?; Ok(announce_data) } +/// # Errors +/// +/// It will return an error if the announce request fails. pub async fn invoke( announce_handler: Arc, opt_udp_stats_event_sender: Arc>>, info_hash: InfoHash, peer: &mut peer::Peer, peers_wanted: &PeersWanted, -) -> AnnounceData { +) -> Result { let original_peer_ip = peer.peer_addr.ip(); // The tracker could change the original peer ip - let announce_data = announce_handler.announce(&info_hash, peer, &original_peer_ip, peers_wanted); + let announce_data = announce_handler + .announce(&info_hash, peer, &original_peer_ip, peers_wanted) + .await?; if let Some(udp_stats_event_sender) = opt_udp_stats_event_sender.as_deref() { match original_peer_ip { @@ -84,5 +89,5 @@ pub async fn invoke( } } - announce_data + Ok(announce_data) } diff --git a/src/packages/udp_tracker_core/services/scrape.rs b/src/packages/udp_tracker_core/services/scrape.rs index e47dd35b3..e7608928c 100644 --- a/src/packages/udp_tracker_core/services/scrape.rs +++ b/src/packages/udp_tracker_core/services/scrape.rs @@ -12,18 +12,23 @@ use std::sync::Arc; use aquatic_udp_protocol::ScrapeRequest; use bittorrent_primitives::info_hash::InfoHash; +use bittorrent_tracker_core::error::ScrapeError; use bittorrent_tracker_core::scrape_handler::ScrapeHandler; use torrust_tracker_primitives::core::ScrapeData; use crate::packages::udp_tracker_core; /// It handles the `Scrape` request. +/// +/// # Errors +/// +/// It will return an error if the tracker core scrape handler returns an error. pub async fn handle_scrape( remote_addr: SocketAddr, request: &ScrapeRequest, scrape_handler: &Arc, opt_udp_stats_event_sender: &Arc>>, -) -> ScrapeData { +) -> Result { // Convert from aquatic infohashes let mut info_hashes: Vec = vec![]; for info_hash in &request.info_hashes { @@ -33,13 +38,16 @@ pub async fn handle_scrape( invoke(scrape_handler, opt_udp_stats_event_sender, &info_hashes, remote_addr).await } +/// # Errors +/// +/// It will return an error if the tracker core scrape handler returns an error. pub async fn invoke( scrape_handler: &Arc, opt_udp_stats_event_sender: &Arc>>, info_hashes: &Vec, remote_addr: SocketAddr, -) -> ScrapeData { - let scrape_data = scrape_handler.scrape(info_hashes).await; +) -> Result { + let scrape_data = scrape_handler.scrape(info_hashes).await?; if let Some(udp_stats_event_sender) = opt_udp_stats_event_sender.as_deref() { match remote_addr { @@ -56,5 +64,5 @@ pub async fn invoke( } } - scrape_data + Ok(scrape_data) } diff --git a/src/servers/http/v1/handlers/announce.rs b/src/servers/http/v1/handlers/announce.rs index 977e7dd6a..5f25c317b 100644 --- a/src/servers/http/v1/handlers/announce.rs +++ b/src/servers/http/v1/handlers/announce.rs @@ -260,6 +260,7 @@ mod tests { let db_torrent_repository = Arc::new(DatabasePersistentTorrentRepository::new(&database)); let announce_handler = Arc::new(AnnounceHandler::new( &config.core, + &whitelist_authorization, &in_memory_torrent_repository, &db_torrent_repository, )); @@ -397,7 +398,7 @@ mod tests { assert_error_response( &response, &format!( - "Tracker error: The torrent: {}, is not whitelisted", + "Tracker whitelist error: The torrent: {}, is not whitelisted", announce_request.info_hash ), ); diff --git a/src/servers/udp/handlers/announce.rs b/src/servers/udp/handlers/announce.rs index 2254ea979..abae9651d 100644 --- a/src/servers/udp/handlers/announce.rs +++ b/src/servers/udp/handlers/announce.rs @@ -802,6 +802,7 @@ mod tests { let announce_handler = Arc::new(AnnounceHandler::new( &config.core, + &whitelist_authorization, &in_memory_torrent_repository, &db_torrent_repository, )); diff --git a/src/servers/udp/handlers/mod.rs b/src/servers/udp/handlers/mod.rs index 252a5be02..c2fabe87a 100644 --- a/src/servers/udp/handlers/mod.rs +++ b/src/servers/udp/handlers/mod.rs @@ -242,6 +242,7 @@ pub(crate) mod tests { let db_torrent_repository = Arc::new(DatabasePersistentTorrentRepository::new(&database)); let announce_handler = Arc::new(AnnounceHandler::new( &config.core, + &whitelist_authorization, &in_memory_torrent_repository, &db_torrent_repository, )); diff --git a/src/servers/udp/handlers/scrape.rs b/src/servers/udp/handlers/scrape.rs index d41563add..2b03e0dc7 100644 --- a/src/servers/udp/handlers/scrape.rs +++ b/src/servers/udp/handlers/scrape.rs @@ -45,7 +45,12 @@ pub async fn handle_scrape( .map_err(|e| (e, request.transaction_id))?; let scrape_data = - udp_tracker_core::services::scrape::handle_scrape(remote_addr, request, scrape_handler, opt_udp_stats_event_sender).await; + udp_tracker_core::services::scrape::handle_scrape(remote_addr, request, scrape_handler, opt_udp_stats_event_sender) + .await + .map_err(|e| Error::TrackerError { + source: (Arc::new(e) as Arc).into(), + }) + .map_err(|e| (e, request.transaction_id))?; // todo: extract `build_response` function. From dbee7ad3a103624a47f5b3b917961d1741a5cd4d Mon Sep 17 00:00:00 2001 From: Jose Celano Date: Mon, 17 Feb 2025 11:28:31 +0000 Subject: [PATCH 266/802] refactor: [#1270] extract fn to new package udp-protocol --- .github/workflows/deployment.yaml | 1 + Cargo.lock | 10 + Cargo.toml | 1 + packages/udp-protocol/Cargo.toml | 20 + packages/udp-protocol/LICENSE | 661 ++++++++++++++++++ packages/udp-protocol/README.md | 11 + packages/udp-protocol/src/lib.rs | 15 + .../udp-protocol/src}/peer_builder.rs | 0 src/packages/udp_tracker_core/mod.rs | 1 - .../udp_tracker_core/services/announce.rs | 3 +- src/servers/udp/handlers/announce.rs | 3 + src/servers/udp/handlers/mod.rs | 8 +- 12 files changed, 731 insertions(+), 3 deletions(-) create mode 100644 packages/udp-protocol/Cargo.toml create mode 100644 packages/udp-protocol/LICENSE create mode 100644 packages/udp-protocol/README.md create mode 100644 packages/udp-protocol/src/lib.rs rename {src/packages/udp_tracker_core => packages/udp-protocol/src}/peer_builder.rs (100%) diff --git a/.github/workflows/deployment.yaml b/.github/workflows/deployment.yaml index 41b40feaa..328bd91bb 100644 --- a/.github/workflows/deployment.yaml +++ b/.github/workflows/deployment.yaml @@ -58,6 +58,7 @@ jobs: cargo publish -p bittorrent-http-protocol cargo publish -p bittorrent-tracker-client cargo publish -p bittorrent-tracker-core + cargo publish -p bittorrent-udp-protocol cargo publish -p torrust-tracker cargo publish -p torrust-tracker-api-client cargo publish -p torrust-tracker-client diff --git a/Cargo.lock b/Cargo.lock index 06cde2457..07c08ab04 100644 --- a/Cargo.lock +++ b/Cargo.lock @@ -634,6 +634,15 @@ dependencies = [ "url", ] +[[package]] +name = "bittorrent-udp-protocol" +version = "3.0.0-develop" +dependencies = [ + "aquatic_udp_protocol", + "torrust-tracker-clock", + "torrust-tracker-primitives", +] + [[package]] name = "bitvec" version = "1.0.1" @@ -4301,6 +4310,7 @@ dependencies = [ "bittorrent-primitives", "bittorrent-tracker-client", "bittorrent-tracker-core", + "bittorrent-udp-protocol", "bloom", "blowfish", "camino", diff --git a/Cargo.toml b/Cargo.toml index 6c9f7f22d..7337b49af 100644 --- a/Cargo.toml +++ b/Cargo.toml @@ -43,6 +43,7 @@ bittorrent-http-protocol = { version = "3.0.0-develop", path = "packages/http-pr bittorrent-primitives = "0.1.0" bittorrent-tracker-client = { version = "3.0.0-develop", path = "packages/tracker-client" } bittorrent-tracker-core = { version = "3.0.0-develop", path = "packages/tracker-core" } +bittorrent-udp-protocol = { version = "3.0.0-develop", path = "packages/udp-protocol" } bloom = "0.3.2" blowfish = "0" camino = { version = "1", features = ["serde", "serde1"] } diff --git a/packages/udp-protocol/Cargo.toml b/packages/udp-protocol/Cargo.toml new file mode 100644 index 000000000..8f0f9fe98 --- /dev/null +++ b/packages/udp-protocol/Cargo.toml @@ -0,0 +1,20 @@ +[package] +description = "A library with the primitive types and functions for the BitTorrent UDP tracker protocol." +keywords = ["bittorrent", "library", "primitives", "udp"] +name = "bittorrent-udp-protocol" +readme = "README.md" + +authors.workspace = true +documentation.workspace = true +edition.workspace = true +homepage.workspace = true +license.workspace = true +publish.workspace = true +repository.workspace = true +rust-version.workspace = true +version.workspace = true + +[dependencies] +aquatic_udp_protocol = "0" +torrust-tracker-clock = { version = "3.0.0-develop", path = "../clock" } +torrust-tracker-primitives = { version = "3.0.0-develop", path = "../primitives" } diff --git a/packages/udp-protocol/LICENSE b/packages/udp-protocol/LICENSE new file mode 100644 index 000000000..0ad25db4b --- /dev/null +++ b/packages/udp-protocol/LICENSE @@ -0,0 +1,661 @@ + GNU AFFERO GENERAL PUBLIC LICENSE + Version 3, 19 November 2007 + + Copyright (C) 2007 Free Software Foundation, Inc. + Everyone is permitted to copy and distribute verbatim copies + of this license document, but changing it is not allowed. + + Preamble + + The GNU Affero General Public License is a free, copyleft license for +software and other kinds of works, specifically designed to ensure +cooperation with the community in the case of network server software. + + The licenses for most software and other practical works are designed +to take away your freedom to share and change the works. By contrast, +our General Public Licenses are intended to guarantee your freedom to +share and change all versions of a program--to make sure it remains free +software for all its users. + + When we speak of free software, we are referring to freedom, not +price. Our General Public Licenses are designed to make sure that you +have the freedom to distribute copies of free software (and charge for +them if you wish), that you receive source code or can get it if you +want it, that you can change the software or use pieces of it in new +free programs, and that you know you can do these things. + + Developers that use our General Public Licenses protect your rights +with two steps: (1) assert copyright on the software, and (2) offer +you this License which gives you legal permission to copy, distribute +and/or modify the software. + + A secondary benefit of defending all users' freedom is that +improvements made in alternate versions of the program, if they +receive widespread use, become available for other developers to +incorporate. Many developers of free software are heartened and +encouraged by the resulting cooperation. However, in the case of +software used on network servers, this result may fail to come about. +The GNU General Public License permits making a modified version and +letting the public access it on a server without ever releasing its +source code to the public. + + The GNU Affero General Public License is designed specifically to +ensure that, in such cases, the modified source code becomes available +to the community. It requires the operator of a network server to +provide the source code of the modified version running there to the +users of that server. Therefore, public use of a modified version, on +a publicly accessible server, gives the public access to the source +code of the modified version. + + An older license, called the Affero General Public License and +published by Affero, was designed to accomplish similar goals. This is +a different license, not a version of the Affero GPL, but Affero has +released a new version of the Affero GPL which permits relicensing under +this license. + + The precise terms and conditions for copying, distribution and +modification follow. + + TERMS AND CONDITIONS + + 0. Definitions. + + "This License" refers to version 3 of the GNU Affero General Public License. + + "Copyright" also means copyright-like laws that apply to other kinds of +works, such as semiconductor masks. + + "The Program" refers to any copyrightable work licensed under this +License. Each licensee is addressed as "you". "Licensees" and +"recipients" may be individuals or organizations. + + To "modify" a work means to copy from or adapt all or part of the work +in a fashion requiring copyright permission, other than the making of an +exact copy. The resulting work is called a "modified version" of the +earlier work or a work "based on" the earlier work. + + A "covered work" means either the unmodified Program or a work based +on the Program. + + To "propagate" a work means to do anything with it that, without +permission, would make you directly or secondarily liable for +infringement under applicable copyright law, except executing it on a +computer or modifying a private copy. Propagation includes copying, +distribution (with or without modification), making available to the +public, and in some countries other activities as well. + + To "convey" a work means any kind of propagation that enables other +parties to make or receive copies. Mere interaction with a user through +a computer network, with no transfer of a copy, is not conveying. + + An interactive user interface displays "Appropriate Legal Notices" +to the extent that it includes a convenient and prominently visible +feature that (1) displays an appropriate copyright notice, and (2) +tells the user that there is no warranty for the work (except to the +extent that warranties are provided), that licensees may convey the +work under this License, and how to view a copy of this License. If +the interface presents a list of user commands or options, such as a +menu, a prominent item in the list meets this criterion. + + 1. Source Code. + + The "source code" for a work means the preferred form of the work +for making modifications to it. "Object code" means any non-source +form of a work. + + A "Standard Interface" means an interface that either is an official +standard defined by a recognized standards body, or, in the case of +interfaces specified for a particular programming language, one that +is widely used among developers working in that language. + + The "System Libraries" of an executable work include anything, other +than the work as a whole, that (a) is included in the normal form of +packaging a Major Component, but which is not part of that Major +Component, and (b) serves only to enable use of the work with that +Major Component, or to implement a Standard Interface for which an +implementation is available to the public in source code form. A +"Major Component", in this context, means a major essential component +(kernel, window system, and so on) of the specific operating system +(if any) on which the executable work runs, or a compiler used to +produce the work, or an object code interpreter used to run it. + + The "Corresponding Source" for a work in object code form means all +the source code needed to generate, install, and (for an executable +work) run the object code and to modify the work, including scripts to +control those activities. However, it does not include the work's +System Libraries, or general-purpose tools or generally available free +programs which are used unmodified in performing those activities but +which are not part of the work. For example, Corresponding Source +includes interface definition files associated with source files for +the work, and the source code for shared libraries and dynamically +linked subprograms that the work is specifically designed to require, +such as by intimate data communication or control flow between those +subprograms and other parts of the work. + + The Corresponding Source need not include anything that users +can regenerate automatically from other parts of the Corresponding +Source. + + The Corresponding Source for a work in source code form is that +same work. + + 2. Basic Permissions. + + All rights granted under this License are granted for the term of +copyright on the Program, and are irrevocable provided the stated +conditions are met. This License explicitly affirms your unlimited +permission to run the unmodified Program. The output from running a +covered work is covered by this License only if the output, given its +content, constitutes a covered work. This License acknowledges your +rights of fair use or other equivalent, as provided by copyright law. + + You may make, run and propagate covered works that you do not +convey, without conditions so long as your license otherwise remains +in force. You may convey covered works to others for the sole purpose +of having them make modifications exclusively for you, or provide you +with facilities for running those works, provided that you comply with +the terms of this License in conveying all material for which you do +not control copyright. Those thus making or running the covered works +for you must do so exclusively on your behalf, under your direction +and control, on terms that prohibit them from making any copies of +your copyrighted material outside their relationship with you. + + Conveying under any other circumstances is permitted solely under +the conditions stated below. Sublicensing is not allowed; section 10 +makes it unnecessary. + + 3. Protecting Users' Legal Rights From Anti-Circumvention Law. + + No covered work shall be deemed part of an effective technological +measure under any applicable law fulfilling obligations under article +11 of the WIPO copyright treaty adopted on 20 December 1996, or +similar laws prohibiting or restricting circumvention of such +measures. + + When you convey a covered work, you waive any legal power to forbid +circumvention of technological measures to the extent such circumvention +is effected by exercising rights under this License with respect to +the covered work, and you disclaim any intention to limit operation or +modification of the work as a means of enforcing, against the work's +users, your or third parties' legal rights to forbid circumvention of +technological measures. + + 4. Conveying Verbatim Copies. + + You may convey verbatim copies of the Program's source code as you +receive it, in any medium, provided that you conspicuously and +appropriately publish on each copy an appropriate copyright notice; +keep intact all notices stating that this License and any +non-permissive terms added in accord with section 7 apply to the code; +keep intact all notices of the absence of any warranty; and give all +recipients a copy of this License along with the Program. + + You may charge any price or no price for each copy that you convey, +and you may offer support or warranty protection for a fee. + + 5. Conveying Modified Source Versions. + + You may convey a work based on the Program, or the modifications to +produce it from the Program, in the form of source code under the +terms of section 4, provided that you also meet all of these conditions: + + a) The work must carry prominent notices stating that you modified + it, and giving a relevant date. + + b) The work must carry prominent notices stating that it is + released under this License and any conditions added under section + 7. This requirement modifies the requirement in section 4 to + "keep intact all notices". + + c) You must license the entire work, as a whole, under this + License to anyone who comes into possession of a copy. This + License will therefore apply, along with any applicable section 7 + additional terms, to the whole of the work, and all its parts, + regardless of how they are packaged. This License gives no + permission to license the work in any other way, but it does not + invalidate such permission if you have separately received it. + + d) If the work has interactive user interfaces, each must display + Appropriate Legal Notices; however, if the Program has interactive + interfaces that do not display Appropriate Legal Notices, your + work need not make them do so. + + A compilation of a covered work with other separate and independent +works, which are not by their nature extensions of the covered work, +and which are not combined with it such as to form a larger program, +in or on a volume of a storage or distribution medium, is called an +"aggregate" if the compilation and its resulting copyright are not +used to limit the access or legal rights of the compilation's users +beyond what the individual works permit. Inclusion of a covered work +in an aggregate does not cause this License to apply to the other +parts of the aggregate. + + 6. Conveying Non-Source Forms. + + You may convey a covered work in object code form under the terms +of sections 4 and 5, provided that you also convey the +machine-readable Corresponding Source under the terms of this License, +in one of these ways: + + a) Convey the object code in, or embodied in, a physical product + (including a physical distribution medium), accompanied by the + Corresponding Source fixed on a durable physical medium + customarily used for software interchange. + + b) Convey the object code in, or embodied in, a physical product + (including a physical distribution medium), accompanied by a + written offer, valid for at least three years and valid for as + long as you offer spare parts or customer support for that product + model, to give anyone who possesses the object code either (1) a + copy of the Corresponding Source for all the software in the + product that is covered by this License, on a durable physical + medium customarily used for software interchange, for a price no + more than your reasonable cost of physically performing this + conveying of source, or (2) access to copy the + Corresponding Source from a network server at no charge. + + c) Convey individual copies of the object code with a copy of the + written offer to provide the Corresponding Source. This + alternative is allowed only occasionally and noncommercially, and + only if you received the object code with such an offer, in accord + with subsection 6b. + + d) Convey the object code by offering access from a designated + place (gratis or for a charge), and offer equivalent access to the + Corresponding Source in the same way through the same place at no + further charge. You need not require recipients to copy the + Corresponding Source along with the object code. If the place to + copy the object code is a network server, the Corresponding Source + may be on a different server (operated by you or a third party) + that supports equivalent copying facilities, provided you maintain + clear directions next to the object code saying where to find the + Corresponding Source. Regardless of what server hosts the + Corresponding Source, you remain obligated to ensure that it is + available for as long as needed to satisfy these requirements. + + e) Convey the object code using peer-to-peer transmission, provided + you inform other peers where the object code and Corresponding + Source of the work are being offered to the general public at no + charge under subsection 6d. + + A separable portion of the object code, whose source code is excluded +from the Corresponding Source as a System Library, need not be +included in conveying the object code work. + + A "User Product" is either (1) a "consumer product", which means any +tangible personal property which is normally used for personal, family, +or household purposes, or (2) anything designed or sold for incorporation +into a dwelling. In determining whether a product is a consumer product, +doubtful cases shall be resolved in favor of coverage. For a particular +product received by a particular user, "normally used" refers to a +typical or common use of that class of product, regardless of the status +of the particular user or of the way in which the particular user +actually uses, or expects or is expected to use, the product. A product +is a consumer product regardless of whether the product has substantial +commercial, industrial or non-consumer uses, unless such uses represent +the only significant mode of use of the product. + + "Installation Information" for a User Product means any methods, +procedures, authorization keys, or other information required to install +and execute modified versions of a covered work in that User Product from +a modified version of its Corresponding Source. The information must +suffice to ensure that the continued functioning of the modified object +code is in no case prevented or interfered with solely because +modification has been made. + + If you convey an object code work under this section in, or with, or +specifically for use in, a User Product, and the conveying occurs as +part of a transaction in which the right of possession and use of the +User Product is transferred to the recipient in perpetuity or for a +fixed term (regardless of how the transaction is characterized), the +Corresponding Source conveyed under this section must be accompanied +by the Installation Information. But this requirement does not apply +if neither you nor any third party retains the ability to install +modified object code on the User Product (for example, the work has +been installed in ROM). + + The requirement to provide Installation Information does not include a +requirement to continue to provide support service, warranty, or updates +for a work that has been modified or installed by the recipient, or for +the User Product in which it has been modified or installed. Access to a +network may be denied when the modification itself materially and +adversely affects the operation of the network or violates the rules and +protocols for communication across the network. + + Corresponding Source conveyed, and Installation Information provided, +in accord with this section must be in a format that is publicly +documented (and with an implementation available to the public in +source code form), and must require no special password or key for +unpacking, reading or copying. + + 7. Additional Terms. + + "Additional permissions" are terms that supplement the terms of this +License by making exceptions from one or more of its conditions. +Additional permissions that are applicable to the entire Program shall +be treated as though they were included in this License, to the extent +that they are valid under applicable law. If additional permissions +apply only to part of the Program, that part may be used separately +under those permissions, but the entire Program remains governed by +this License without regard to the additional permissions. + + When you convey a copy of a covered work, you may at your option +remove any additional permissions from that copy, or from any part of +it. (Additional permissions may be written to require their own +removal in certain cases when you modify the work.) You may place +additional permissions on material, added by you to a covered work, +for which you have or can give appropriate copyright permission. + + Notwithstanding any other provision of this License, for material you +add to a covered work, you may (if authorized by the copyright holders of +that material) supplement the terms of this License with terms: + + a) Disclaiming warranty or limiting liability differently from the + terms of sections 15 and 16 of this License; or + + b) Requiring preservation of specified reasonable legal notices or + author attributions in that material or in the Appropriate Legal + Notices displayed by works containing it; or + + c) Prohibiting misrepresentation of the origin of that material, or + requiring that modified versions of such material be marked in + reasonable ways as different from the original version; or + + d) Limiting the use for publicity purposes of names of licensors or + authors of the material; or + + e) Declining to grant rights under trademark law for use of some + trade names, trademarks, or service marks; or + + f) Requiring indemnification of licensors and authors of that + material by anyone who conveys the material (or modified versions of + it) with contractual assumptions of liability to the recipient, for + any liability that these contractual assumptions directly impose on + those licensors and authors. + + All other non-permissive additional terms are considered "further +restrictions" within the meaning of section 10. If the Program as you +received it, or any part of it, contains a notice stating that it is +governed by this License along with a term that is a further +restriction, you may remove that term. If a license document contains +a further restriction but permits relicensing or conveying under this +License, you may add to a covered work material governed by the terms +of that license document, provided that the further restriction does +not survive such relicensing or conveying. + + If you add terms to a covered work in accord with this section, you +must place, in the relevant source files, a statement of the +additional terms that apply to those files, or a notice indicating +where to find the applicable terms. + + Additional terms, permissive or non-permissive, may be stated in the +form of a separately written license, or stated as exceptions; +the above requirements apply either way. + + 8. Termination. + + You may not propagate or modify a covered work except as expressly +provided under this License. Any attempt otherwise to propagate or +modify it is void, and will automatically terminate your rights under +this License (including any patent licenses granted under the third +paragraph of section 11). + + However, if you cease all violation of this License, then your +license from a particular copyright holder is reinstated (a) +provisionally, unless and until the copyright holder explicitly and +finally terminates your license, and (b) permanently, if the copyright +holder fails to notify you of the violation by some reasonable means +prior to 60 days after the cessation. + + Moreover, your license from a particular copyright holder is +reinstated permanently if the copyright holder notifies you of the +violation by some reasonable means, this is the first time you have +received notice of violation of this License (for any work) from that +copyright holder, and you cure the violation prior to 30 days after +your receipt of the notice. + + Termination of your rights under this section does not terminate the +licenses of parties who have received copies or rights from you under +this License. If your rights have been terminated and not permanently +reinstated, you do not qualify to receive new licenses for the same +material under section 10. + + 9. Acceptance Not Required for Having Copies. + + You are not required to accept this License in order to receive or +run a copy of the Program. Ancillary propagation of a covered work +occurring solely as a consequence of using peer-to-peer transmission +to receive a copy likewise does not require acceptance. However, +nothing other than this License grants you permission to propagate or +modify any covered work. These actions infringe copyright if you do +not accept this License. Therefore, by modifying or propagating a +covered work, you indicate your acceptance of this License to do so. + + 10. Automatic Licensing of Downstream Recipients. + + Each time you convey a covered work, the recipient automatically +receives a license from the original licensors, to run, modify and +propagate that work, subject to this License. You are not responsible +for enforcing compliance by third parties with this License. + + An "entity transaction" is a transaction transferring control of an +organization, or substantially all assets of one, or subdividing an +organization, or merging organizations. If propagation of a covered +work results from an entity transaction, each party to that +transaction who receives a copy of the work also receives whatever +licenses to the work the party's predecessor in interest had or could +give under the previous paragraph, plus a right to possession of the +Corresponding Source of the work from the predecessor in interest, if +the predecessor has it or can get it with reasonable efforts. + + You may not impose any further restrictions on the exercise of the +rights granted or affirmed under this License. For example, you may +not impose a license fee, royalty, or other charge for exercise of +rights granted under this License, and you may not initiate litigation +(including a cross-claim or counterclaim in a lawsuit) alleging that +any patent claim is infringed by making, using, selling, offering for +sale, or importing the Program or any portion of it. + + 11. Patents. + + A "contributor" is a copyright holder who authorizes use under this +License of the Program or a work on which the Program is based. The +work thus licensed is called the contributor's "contributor version". + + A contributor's "essential patent claims" are all patent claims +owned or controlled by the contributor, whether already acquired or +hereafter acquired, that would be infringed by some manner, permitted +by this License, of making, using, or selling its contributor version, +but do not include claims that would be infringed only as a +consequence of further modification of the contributor version. For +purposes of this definition, "control" includes the right to grant +patent sublicenses in a manner consistent with the requirements of +this License. + + Each contributor grants you a non-exclusive, worldwide, royalty-free +patent license under the contributor's essential patent claims, to +make, use, sell, offer for sale, import and otherwise run, modify and +propagate the contents of its contributor version. + + In the following three paragraphs, a "patent license" is any express +agreement or commitment, however denominated, not to enforce a patent +(such as an express permission to practice a patent or covenant not to +sue for patent infringement). To "grant" such a patent license to a +party means to make such an agreement or commitment not to enforce a +patent against the party. + + If you convey a covered work, knowingly relying on a patent license, +and the Corresponding Source of the work is not available for anyone +to copy, free of charge and under the terms of this License, through a +publicly available network server or other readily accessible means, +then you must either (1) cause the Corresponding Source to be so +available, or (2) arrange to deprive yourself of the benefit of the +patent license for this particular work, or (3) arrange, in a manner +consistent with the requirements of this License, to extend the patent +license to downstream recipients. "Knowingly relying" means you have +actual knowledge that, but for the patent license, your conveying the +covered work in a country, or your recipient's use of the covered work +in a country, would infringe one or more identifiable patents in that +country that you have reason to believe are valid. + + If, pursuant to or in connection with a single transaction or +arrangement, you convey, or propagate by procuring conveyance of, a +covered work, and grant a patent license to some of the parties +receiving the covered work authorizing them to use, propagate, modify +or convey a specific copy of the covered work, then the patent license +you grant is automatically extended to all recipients of the covered +work and works based on it. + + A patent license is "discriminatory" if it does not include within +the scope of its coverage, prohibits the exercise of, or is +conditioned on the non-exercise of one or more of the rights that are +specifically granted under this License. You may not convey a covered +work if you are a party to an arrangement with a third party that is +in the business of distributing software, under which you make payment +to the third party based on the extent of your activity of conveying +the work, and under which the third party grants, to any of the +parties who would receive the covered work from you, a discriminatory +patent license (a) in connection with copies of the covered work +conveyed by you (or copies made from those copies), or (b) primarily +for and in connection with specific products or compilations that +contain the covered work, unless you entered into that arrangement, +or that patent license was granted, prior to 28 March 2007. + + Nothing in this License shall be construed as excluding or limiting +any implied license or other defenses to infringement that may +otherwise be available to you under applicable patent law. + + 12. No Surrender of Others' Freedom. + + If conditions are imposed on you (whether by court order, agreement or +otherwise) that contradict the conditions of this License, they do not +excuse you from the conditions of this License. If you cannot convey a +covered work so as to satisfy simultaneously your obligations under this +License and any other pertinent obligations, then as a consequence you may +not convey it at all. For example, if you agree to terms that obligate you +to collect a royalty for further conveying from those to whom you convey +the Program, the only way you could satisfy both those terms and this +License would be to refrain entirely from conveying the Program. + + 13. Remote Network Interaction; Use with the GNU General Public License. + + Notwithstanding any other provision of this License, if you modify the +Program, your modified version must prominently offer all users +interacting with it remotely through a computer network (if your version +supports such interaction) an opportunity to receive the Corresponding +Source of your version by providing access to the Corresponding Source +from a network server at no charge, through some standard or customary +means of facilitating copying of software. This Corresponding Source +shall include the Corresponding Source for any work covered by version 3 +of the GNU General Public License that is incorporated pursuant to the +following paragraph. + + Notwithstanding any other provision of this License, you have +permission to link or combine any covered work with a work licensed +under version 3 of the GNU General Public License into a single +combined work, and to convey the resulting work. The terms of this +License will continue to apply to the part which is the covered work, +but the work with which it is combined will remain governed by version +3 of the GNU General Public License. + + 14. Revised Versions of this License. + + The Free Software Foundation may publish revised and/or new versions of +the GNU Affero General Public License from time to time. Such new versions +will be similar in spirit to the present version, but may differ in detail to +address new problems or concerns. + + Each version is given a distinguishing version number. If the +Program specifies that a certain numbered version of the GNU Affero General +Public License "or any later version" applies to it, you have the +option of following the terms and conditions either of that numbered +version or of any later version published by the Free Software +Foundation. If the Program does not specify a version number of the +GNU Affero General Public License, you may choose any version ever published +by the Free Software Foundation. + + If the Program specifies that a proxy can decide which future +versions of the GNU Affero General Public License can be used, that proxy's +public statement of acceptance of a version permanently authorizes you +to choose that version for the Program. + + Later license versions may give you additional or different +permissions. However, no additional obligations are imposed on any +author or copyright holder as a result of your choosing to follow a +later version. + + 15. Disclaimer of Warranty. + + THERE IS NO WARRANTY FOR THE PROGRAM, TO THE EXTENT PERMITTED BY +APPLICABLE LAW. EXCEPT WHEN OTHERWISE STATED IN WRITING THE COPYRIGHT +HOLDERS AND/OR OTHER PARTIES PROVIDE THE PROGRAM "AS IS" WITHOUT WARRANTY +OF ANY KIND, EITHER EXPRESSED OR IMPLIED, INCLUDING, BUT NOT LIMITED TO, +THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR +PURPOSE. THE ENTIRE RISK AS TO THE QUALITY AND PERFORMANCE OF THE PROGRAM +IS WITH YOU. SHOULD THE PROGRAM PROVE DEFECTIVE, YOU ASSUME THE COST OF +ALL NECESSARY SERVICING, REPAIR OR CORRECTION. + + 16. Limitation of Liability. + + IN NO EVENT UNLESS REQUIRED BY APPLICABLE LAW OR AGREED TO IN WRITING +WILL ANY COPYRIGHT HOLDER, OR ANY OTHER PARTY WHO MODIFIES AND/OR CONVEYS +THE PROGRAM AS PERMITTED ABOVE, BE LIABLE TO YOU FOR DAMAGES, INCLUDING ANY +GENERAL, SPECIAL, INCIDENTAL OR CONSEQUENTIAL DAMAGES ARISING OUT OF THE +USE OR INABILITY TO USE THE PROGRAM (INCLUDING BUT NOT LIMITED TO LOSS OF +DATA OR DATA BEING RENDERED INACCURATE OR LOSSES SUSTAINED BY YOU OR THIRD +PARTIES OR A FAILURE OF THE PROGRAM TO OPERATE WITH ANY OTHER PROGRAMS), +EVEN IF SUCH HOLDER OR OTHER PARTY HAS BEEN ADVISED OF THE POSSIBILITY OF +SUCH DAMAGES. + + 17. Interpretation of Sections 15 and 16. + + If the disclaimer of warranty and limitation of liability provided +above cannot be given local legal effect according to their terms, +reviewing courts shall apply local law that most closely approximates +an absolute waiver of all civil liability in connection with the +Program, unless a warranty or assumption of liability accompanies a +copy of the Program in return for a fee. + + END OF TERMS AND CONDITIONS + + How to Apply These Terms to Your New Programs + + If you develop a new program, and you want it to be of the greatest +possible use to the public, the best way to achieve this is to make it +free software which everyone can redistribute and change under these terms. + + To do so, attach the following notices to the program. It is safest +to attach them to the start of each source file to most effectively +state the exclusion of warranty; and each file should have at least +the "copyright" line and a pointer to where the full notice is found. + + + Copyright (C) + + This program is free software: you can redistribute it and/or modify + it under the terms of the GNU Affero General Public License as published + by the Free Software Foundation, either version 3 of the License, or + (at your option) any later version. + + This program is distributed in the hope that it will be useful, + but WITHOUT ANY WARRANTY; without even the implied warranty of + MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + GNU Affero General Public License for more details. + + You should have received a copy of the GNU Affero General Public License + along with this program. If not, see . + +Also add information on how to contact you by electronic and paper mail. + + If your software can interact with users remotely through a computer +network, you should also make sure that it provides a way for users to +get its source. For example, if your program is a web application, its +interface could display a "Source" link that leads users to an archive +of the code. There are many ways you could offer source, and different +solutions will be better for different programs; see section 13 for the +specific requirements. + + You should also get your employer (if you work as a programmer) or school, +if any, to sign a "copyright disclaimer" for the program, if necessary. +For more information on this, and how to apply and follow the GNU AGPL, see +. diff --git a/packages/udp-protocol/README.md b/packages/udp-protocol/README.md new file mode 100644 index 000000000..4f63fb675 --- /dev/null +++ b/packages/udp-protocol/README.md @@ -0,0 +1,11 @@ +# BitTorrent UDP Tracker Protocol + +A library with the primitive types and functions used by BitTorrent UDP trackers. + +## Documentation + +[Crate documentation](https://docs.rs/bittorrent-udp-protocol). + +## License + +The project is licensed under the terms of the [GNU AFFERO GENERAL PUBLIC LICENSE](./LICENSE). diff --git a/packages/udp-protocol/src/lib.rs b/packages/udp-protocol/src/lib.rs new file mode 100644 index 000000000..f0983a7ba --- /dev/null +++ b/packages/udp-protocol/src/lib.rs @@ -0,0 +1,15 @@ +//! Primitive types and functions for `BitTorrent` UDP trackers. +pub mod peer_builder; + +use torrust_tracker_clock::clock; + +/// This code needs to be copied into each crate. +/// Working version, for production. +#[cfg(not(test))] +#[allow(dead_code)] +pub(crate) type CurrentClock = clock::Working; + +/// Stopped version, for testing. +#[cfg(test)] +#[allow(dead_code)] +pub(crate) type CurrentClock = clock::Stopped; diff --git a/src/packages/udp_tracker_core/peer_builder.rs b/packages/udp-protocol/src/peer_builder.rs similarity index 100% rename from src/packages/udp_tracker_core/peer_builder.rs rename to packages/udp-protocol/src/peer_builder.rs diff --git a/src/packages/udp_tracker_core/mod.rs b/src/packages/udp_tracker_core/mod.rs index 3ab1d83dd..4f3e54857 100644 --- a/src/packages/udp_tracker_core/mod.rs +++ b/src/packages/udp_tracker_core/mod.rs @@ -1,3 +1,2 @@ -pub mod peer_builder; pub mod services; pub mod statistics; diff --git a/src/packages/udp_tracker_core/services/announce.rs b/src/packages/udp_tracker_core/services/announce.rs index 29725c6d4..db90d445f 100644 --- a/src/packages/udp_tracker_core/services/announce.rs +++ b/src/packages/udp_tracker_core/services/announce.rs @@ -15,10 +15,11 @@ use bittorrent_primitives::info_hash::InfoHash; use bittorrent_tracker_core::announce_handler::{AnnounceHandler, PeersWanted}; use bittorrent_tracker_core::error::AnnounceError; use bittorrent_tracker_core::whitelist; +use bittorrent_udp_protocol::peer_builder; use torrust_tracker_primitives::core::AnnounceData; use torrust_tracker_primitives::peer; -use crate::packages::udp_tracker_core::{self, peer_builder}; +use crate::packages::udp_tracker_core::{self}; /// It handles the `Announce` request. /// diff --git a/src/servers/udp/handlers/announce.rs b/src/servers/udp/handlers/announce.rs index abae9651d..a273a2ecb 100644 --- a/src/servers/udp/handlers/announce.rs +++ b/src/servers/udp/handlers/announce.rs @@ -261,6 +261,7 @@ mod tests { let expected_peer = TorrentPeerBuilder::new() .with_peer_id(peer_id) .with_peer_address(SocketAddr::new(IpAddr::V4(client_ip), client_port)) + .updated_on(peers[0].updated) .into(); assert_eq!(peers[0], Arc::new(expected_peer)); @@ -495,6 +496,7 @@ mod tests { let expected_peer = TorrentPeerBuilder::new() .with_peer_id(peer_id) .with_peer_address(SocketAddr::new(external_ip_in_tracker_configuration, client_port)) + .updated_on(peers[0].updated) .into(); assert_eq!(peers[0], Arc::new(expected_peer)); @@ -567,6 +569,7 @@ mod tests { let expected_peer = TorrentPeerBuilder::new() .with_peer_id(peer_id) .with_peer_address(SocketAddr::new(IpAddr::V6(client_ip_v6), client_port)) + .updated_on(peers[0].updated) .into(); assert_eq!(peers[0], Arc::new(expected_peer)); diff --git a/src/servers/udp/handlers/mod.rs b/src/servers/udp/handlers/mod.rs index c2fabe87a..f9f8edae7 100644 --- a/src/servers/udp/handlers/mod.rs +++ b/src/servers/udp/handlers/mod.rs @@ -196,7 +196,7 @@ pub(crate) mod tests { use tokio::sync::mpsc::error::SendError; use torrust_tracker_clock::clock::Time; use torrust_tracker_configuration::{Configuration, Core}; - use torrust_tracker_primitives::peer; + use torrust_tracker_primitives::{peer, DurationSinceUnixEpoch}; use torrust_tracker_test_helpers::configuration; use super::gen_remote_fingerprint; @@ -330,6 +330,12 @@ pub(crate) mod tests { self } + #[must_use] + pub fn updated_on(mut self, updated: DurationSinceUnixEpoch) -> Self { + self.peer.updated = updated; + self + } + #[must_use] pub fn into(self) -> peer::Peer { self.peer From 096d5032d7f47a0a5202f136ba79e87fb3623dad Mon Sep 17 00:00:00 2001 From: Jose Celano Date: Mon, 17 Feb 2025 12:07:37 +0000 Subject: [PATCH 267/802] refactor: [#1270] inline http tracker announce invoke fn --- .../http-protocol/src/v1/requests/announce.rs | 11 ++ .../http_tracker_core/services/announce.rs | 184 +++++++++--------- 2 files changed, 106 insertions(+), 89 deletions(-) diff --git a/packages/http-protocol/src/v1/requests/announce.rs b/packages/http-protocol/src/v1/requests/announce.rs index f293b9cf5..66f7a1227 100644 --- a/packages/http-protocol/src/v1/requests/announce.rs +++ b/packages/http-protocol/src/v1/requests/announce.rs @@ -183,6 +183,17 @@ impl fmt::Display for Event { } } +impl From for Event { + fn from(value: aquatic_udp_protocol::request::AnnounceEvent) -> Self { + match value { + AnnounceEvent::Started => Self::Started, + AnnounceEvent::Stopped => Self::Stopped, + AnnounceEvent::Completed => Self::Completed, + AnnounceEvent::None => panic!("can't convert announce event from aquatic for None variant"), + } + } +} + /// Whether the `announce` response should be in compact mode or not. /// /// Depending on the value of this param, the tracker will return a different diff --git a/src/packages/http_tracker_core/services/announce.rs b/src/packages/http_tracker_core/services/announce.rs index 3b4edea4e..2bc421f1d 100644 --- a/src/packages/http_tracker_core/services/announce.rs +++ b/src/packages/http_tracker_core/services/announce.rs @@ -13,14 +13,11 @@ use std::sync::Arc; use bittorrent_http_protocol::v1::requests::announce::{peer_from_request, Announce}; use bittorrent_http_protocol::v1::responses; use bittorrent_http_protocol::v1::services::peer_ip_resolver::{self, ClientIpSources}; -use bittorrent_primitives::info_hash::InfoHash; use bittorrent_tracker_core::announce_handler::{AnnounceHandler, PeersWanted}; use bittorrent_tracker_core::authentication::service::AuthenticationService; -use bittorrent_tracker_core::error::AnnounceError; use bittorrent_tracker_core::whitelist; use torrust_tracker_configuration::Core; use torrust_tracker_primitives::core::AnnounceData; -use torrust_tracker_primitives::peer; use crate::packages::http_tracker_core; @@ -69,34 +66,11 @@ pub async fn handle_announce( None => PeersWanted::AsManyAsPossible, }; - let announce_data = invoke( - announce_handler.clone(), - opt_http_stats_event_sender.clone(), - announce_request.info_hash, - &mut peer, - &peers_wanted, - ) - .await - .map_err(responses::error::Error::from)?; - - Ok(announce_data) -} - -/// # Errors -/// -/// This function will return an error if the announce requests failed. -pub async fn invoke( - announce_handler: Arc, - opt_http_stats_event_sender: Arc>>, - info_hash: InfoHash, - peer: &mut peer::Peer, - peers_wanted: &PeersWanted, -) -> Result { let original_peer_ip = peer.peer_addr.ip(); // The tracker could change the original peer ip let announce_data = announce_handler - .announce(&info_hash, peer, &original_peer_ip, peers_wanted) + .announce(&announce_request.info_hash, &mut peer, &original_peer_ip, &peers_wanted) .await?; if let Some(http_stats_event_sender) = opt_http_stats_event_sender.as_deref() { @@ -123,19 +97,26 @@ mod tests { use std::sync::Arc; use aquatic_udp_protocol::{AnnounceEvent, NumberOfBytes, PeerId}; + use bittorrent_http_protocol::v1::requests::announce::Announce; + use bittorrent_http_protocol::v1::services::peer_ip_resolver::ClientIpSources; use bittorrent_tracker_core::announce_handler::AnnounceHandler; + use bittorrent_tracker_core::authentication::key::repository::in_memory::InMemoryKeyRepository; + use bittorrent_tracker_core::authentication::service::AuthenticationService; use bittorrent_tracker_core::databases::setup::initialize_database; use bittorrent_tracker_core::torrent::repository::in_memory::InMemoryTorrentRepository; use bittorrent_tracker_core::torrent::repository::persisted::DatabasePersistentTorrentRepository; use bittorrent_tracker_core::whitelist::authorization::WhitelistAuthorization; use bittorrent_tracker_core::whitelist::repository::in_memory::InMemoryWhitelist; - use torrust_tracker_configuration::Core; + use torrust_tracker_configuration::{Configuration, Core}; + use torrust_tracker_primitives::peer::Peer; use torrust_tracker_primitives::{peer, DurationSinceUnixEpoch}; use torrust_tracker_test_helpers::configuration; struct CoreTrackerServices { pub core_config: Arc, pub announce_handler: Arc, + pub authentication_service: Arc, + pub whitelist_authorization: Arc, } struct CoreHttpTrackerServices { @@ -143,14 +124,18 @@ mod tests { } fn initialize_core_tracker_services() -> (CoreTrackerServices, CoreHttpTrackerServices) { - let config = configuration::ephemeral_public(); + initialize_core_tracker_services_with_config(&configuration::ephemeral_public()) + } + fn initialize_core_tracker_services_with_config(config: &Configuration) -> (CoreTrackerServices, CoreHttpTrackerServices) { let core_config = Arc::new(config.core.clone()); let database = initialize_database(&config.core); let in_memory_torrent_repository = Arc::new(InMemoryTorrentRepository::default()); let db_torrent_repository = Arc::new(DatabasePersistentTorrentRepository::new(&database)); let in_memory_whitelist = Arc::new(InMemoryWhitelist::default()); let whitelist_authorization = Arc::new(WhitelistAuthorization::new(&config.core, &in_memory_whitelist.clone())); + let in_memory_key_repository = Arc::new(InMemoryKeyRepository::default()); + let authentication_service = Arc::new(AuthenticationService::new(&core_config, &in_memory_key_repository)); let announce_handler = Arc::new(AnnounceHandler::new( &config.core, @@ -169,6 +154,8 @@ mod tests { CoreTrackerServices { core_config, announce_handler, + authentication_service, + whitelist_authorization, }, CoreHttpTrackerServices { http_stats_event_sender }, ) @@ -199,11 +186,33 @@ mod tests { } } + fn sample_announce_request_for_peer(peer: Peer) -> (Announce, ClientIpSources) { + let announce_request = Announce { + info_hash: sample_info_hash(), + peer_id: peer.peer_id, + port: peer.peer_addr.port(), + uploaded: Some(peer.uploaded), + downloaded: Some(peer.downloaded), + left: Some(peer.left), + event: Some(peer.event.into()), + compact: None, + numwant: None, + }; + + let client_ip_sources = ClientIpSources { + right_most_x_forwarded_for: None, + connection_info_ip: Some(peer.peer_addr.ip()), + }; + + (announce_request, client_ip_sources) + } + use futures::future::BoxFuture; use mockall::mock; use tokio::sync::mpsc::error::SendError; use crate::packages::http_tracker_core; + use crate::servers::http::test_helpers::tests::sample_info_hash; mock! { HttpStatsEventSender {} @@ -217,13 +226,8 @@ mod tests { use std::net::{IpAddr, Ipv4Addr, Ipv6Addr, SocketAddr}; use std::sync::Arc; - use bittorrent_tracker_core::announce_handler::{AnnounceHandler, PeersWanted}; - use bittorrent_tracker_core::databases::setup::initialize_database; - use bittorrent_tracker_core::torrent::repository::in_memory::InMemoryTorrentRepository; - use bittorrent_tracker_core::torrent::repository::persisted::DatabasePersistentTorrentRepository; - use bittorrent_tracker_core::whitelist::authorization::WhitelistAuthorization; - use bittorrent_tracker_core::whitelist::repository::in_memory::InMemoryWhitelist; use mockall::predicate::eq; + use torrust_tracker_configuration::Configuration; use torrust_tracker_primitives::core::AnnounceData; use torrust_tracker_primitives::peer; use torrust_tracker_primitives::swarm_metadata::SwarmMetadata; @@ -231,41 +235,28 @@ mod tests { use super::{sample_peer_using_ipv4, sample_peer_using_ipv6}; use crate::packages::http_tracker_core; - use crate::packages::http_tracker_core::services::announce::invoke; + use crate::packages::http_tracker_core::services::announce::handle_announce; use crate::packages::http_tracker_core::services::announce::tests::{ - initialize_core_tracker_services, sample_peer, MockHttpStatsEventSender, + initialize_core_tracker_services, initialize_core_tracker_services_with_config, sample_announce_request_for_peer, + sample_peer, MockHttpStatsEventSender, }; - use crate::servers::http::test_helpers::tests::sample_info_hash; - - fn initialize_announce_handler() -> Arc { - let config = configuration::ephemeral(); - - let database = initialize_database(&config.core); - let in_memory_torrent_repository = Arc::new(InMemoryTorrentRepository::default()); - let db_torrent_repository = Arc::new(DatabasePersistentTorrentRepository::new(&database)); - let in_memory_whitelist = Arc::new(InMemoryWhitelist::default()); - let whitelist_authorization = Arc::new(WhitelistAuthorization::new(&config.core, &in_memory_whitelist.clone())); - - Arc::new(AnnounceHandler::new( - &config.core, - &whitelist_authorization, - &in_memory_torrent_repository, - &db_torrent_repository, - )) - } #[tokio::test] async fn it_should_return_the_announce_data() { let (core_tracker_services, core_http_tracker_services) = initialize_core_tracker_services(); - let mut peer = sample_peer(); + let peer = sample_peer(); - let announce_data = invoke( - core_tracker_services.announce_handler.clone(), - core_http_tracker_services.http_stats_event_sender.clone(), - sample_info_hash(), - &mut peer, - &PeersWanted::AsManyAsPossible, + let (announce_request, client_ip_sources) = sample_announce_request_for_peer(peer); + + let announce_data = handle_announce( + &core_tracker_services.core_config, + &core_tracker_services.announce_handler, + &core_tracker_services.authentication_service, + &core_tracker_services.whitelist_authorization, + &core_http_tracker_services.http_stats_event_sender, + &announce_request, + &client_ip_sources, ) .await .unwrap(); @@ -294,28 +285,32 @@ mod tests { let http_stats_event_sender: Arc>> = Arc::new(Some(Box::new(http_stats_event_sender_mock))); - let announce_handler = initialize_announce_handler(); + let (core_tracker_services, mut core_http_tracker_services) = initialize_core_tracker_services(); + core_http_tracker_services.http_stats_event_sender = http_stats_event_sender; - let mut peer = sample_peer_using_ipv4(); + let peer = sample_peer_using_ipv4(); - let _announce_data = invoke( - announce_handler, - http_stats_event_sender, - sample_info_hash(), - &mut peer, - &PeersWanted::AsManyAsPossible, + let (announce_request, client_ip_sources) = sample_announce_request_for_peer(peer); + + let _announce_data = handle_announce( + &core_tracker_services.core_config, + &core_tracker_services.announce_handler, + &core_tracker_services.authentication_service, + &core_tracker_services.whitelist_authorization, + &core_http_tracker_services.http_stats_event_sender, + &announce_request, + &client_ip_sources, ) .await .unwrap(); } - fn tracker_with_an_ipv6_external_ip() -> Arc { + fn tracker_with_an_ipv6_external_ip() -> Configuration { let mut configuration = configuration::ephemeral(); configuration.core.net.external_ip = Some(IpAddr::V6(Ipv6Addr::new( 0x6969, 0x6969, 0x6969, 0x6969, 0x6969, 0x6969, 0x6969, 0x6969, ))); - - initialize_announce_handler() + configuration } fn peer_with_the_ipv4_loopback_ip() -> peer::Peer { @@ -340,16 +335,22 @@ mod tests { let http_stats_event_sender: Arc>> = Arc::new(Some(Box::new(http_stats_event_sender_mock))); - let mut peer = peer_with_the_ipv4_loopback_ip(); + let (core_tracker_services, mut core_http_tracker_services) = + initialize_core_tracker_services_with_config(&tracker_with_an_ipv6_external_ip()); + core_http_tracker_services.http_stats_event_sender = http_stats_event_sender; - let announce_handler = tracker_with_an_ipv6_external_ip(); + let peer = peer_with_the_ipv4_loopback_ip(); - let _announce_data = invoke( - announce_handler, - http_stats_event_sender, - sample_info_hash(), - &mut peer, - &PeersWanted::AsManyAsPossible, + let (announce_request, client_ip_sources) = sample_announce_request_for_peer(peer); + + let _announce_data = handle_announce( + &core_tracker_services.core_config, + &core_tracker_services.announce_handler, + &core_tracker_services.authentication_service, + &core_tracker_services.whitelist_authorization, + &core_http_tracker_services.http_stats_event_sender, + &announce_request, + &client_ip_sources, ) .await .unwrap(); @@ -367,16 +368,21 @@ mod tests { let http_stats_event_sender: Arc>> = Arc::new(Some(Box::new(http_stats_event_sender_mock))); - let announce_handler = initialize_announce_handler(); + let (core_tracker_services, mut core_http_tracker_services) = initialize_core_tracker_services(); + core_http_tracker_services.http_stats_event_sender = http_stats_event_sender; - let mut peer = sample_peer_using_ipv6(); + let peer = sample_peer_using_ipv6(); - let _announce_data = invoke( - announce_handler, - http_stats_event_sender, - sample_info_hash(), - &mut peer, - &PeersWanted::AsManyAsPossible, + let (announce_request, client_ip_sources) = sample_announce_request_for_peer(peer); + + let _announce_data = handle_announce( + &core_tracker_services.core_config, + &core_tracker_services.announce_handler, + &core_tracker_services.authentication_service, + &core_tracker_services.whitelist_authorization, + &core_http_tracker_services.http_stats_event_sender, + &announce_request, + &client_ip_sources, ) .await .unwrap(); From 6651343ef98c0c15c813a85fbe7e25ecf3652e81 Mon Sep 17 00:00:00 2001 From: Jose Celano Date: Mon, 17 Feb 2025 15:54:03 +0000 Subject: [PATCH 268/802] refactor: [#1270] inline http tracker scrape invoke fn --- .../http_tracker_core/services/scrape.rs | 138 +++++++++++------- src/servers/http/v1/handlers/scrape.rs | 13 +- 2 files changed, 95 insertions(+), 56 deletions(-) diff --git a/src/packages/http_tracker_core/services/scrape.rs b/src/packages/http_tracker_core/services/scrape.rs index 467c69f51..667ce8d0d 100644 --- a/src/packages/http_tracker_core/services/scrape.rs +++ b/src/packages/http_tracker_core/services/scrape.rs @@ -14,8 +14,6 @@ use bittorrent_http_protocol::v1::requests::scrape::Scrape; use bittorrent_http_protocol::v1::responses; use bittorrent_http_protocol::v1::services::peer_ip_resolver::{self, ClientIpSources}; use bittorrent_primitives::info_hash::InfoHash; -use bittorrent_tracker_core::authentication::service::AuthenticationService; -use bittorrent_tracker_core::error::ScrapeError; use bittorrent_tracker_core::scrape_handler::ScrapeHandler; use torrust_tracker_configuration::Core; use torrust_tracker_primitives::core::ScrapeData; @@ -42,13 +40,12 @@ use crate::packages::http_tracker_core; pub async fn handle_scrape( core_config: &Arc, scrape_handler: &Arc, - _authentication_service: &Arc, opt_http_stats_event_sender: &Arc>>, scrape_request: &Scrape, client_ip_sources: &ClientIpSources, - return_real_scrape_data: bool, + return_fake_scrape_data: bool, ) -> Result { - // Authorization for scrape requests is handled at the `http_tracker_core` + // Authorization for scrape requests is handled at the `bittorrent-_racker_core` // level for each torrent. let peer_ip = match peer_ip_resolver::invoke(core_config.net.on_reverse_proxy, client_ip_sources) { @@ -56,33 +53,15 @@ pub async fn handle_scrape( Err(error) => return Err(responses::error::Error::from(error)), }; - if return_real_scrape_data { - let scrape_data = invoke( - scrape_handler, - opt_http_stats_event_sender, - &scrape_request.info_hashes, - &peer_ip, - ) - .await?; - - Ok(scrape_data) - } else { - Ok(http_tracker_core::services::scrape::fake(opt_http_stats_event_sender, &scrape_request.info_hashes, &peer_ip).await) + if return_fake_scrape_data { + return Ok( + http_tracker_core::services::scrape::fake(opt_http_stats_event_sender, &scrape_request.info_hashes, &peer_ip).await, + ); } -} -/// # Errors -/// -/// This function will return an error if the tracker core scrape handler fails. -pub async fn invoke( - scrape_handler: &Arc, - opt_http_stats_event_sender: &Arc>>, - info_hashes: &Vec, - original_peer_ip: &IpAddr, -) -> Result { - let scrape_data = scrape_handler.scrape(info_hashes).await?; + let scrape_data = scrape_handler.scrape(&scrape_request.info_hashes).await?; - send_scrape_event(original_peer_ip, opt_http_stats_event_sender).await; + send_scrape_event(&peer_ip, opt_http_stats_event_sender).await; Ok(scrape_data) } @@ -141,6 +120,7 @@ mod tests { use futures::future::BoxFuture; use mockall::mock; use tokio::sync::mpsc::error::SendError; + use torrust_tracker_configuration::Configuration; use torrust_tracker_primitives::{peer, DurationSinceUnixEpoch}; use torrust_tracker_test_helpers::configuration; @@ -148,8 +128,12 @@ mod tests { use crate::servers::http::test_helpers::tests::sample_info_hash; fn initialize_announce_and_scrape_handlers_for_public_tracker() -> (Arc, Arc) { - let config = configuration::ephemeral_public(); + initialize_announce_and_scrape_handlers_with_configuration(&configuration::ephemeral_public()) + } + fn initialize_announce_and_scrape_handlers_with_configuration( + config: &Configuration, + ) -> (Arc, Arc) { let database = initialize_database(&config.core); let in_memory_whitelist = Arc::new(InMemoryWhitelist::default()); let whitelist_authorization = Arc::new(WhitelistAuthorization::new(&config.core, &in_memory_whitelist.clone())); @@ -182,9 +166,7 @@ mod tests { } } - fn initialize_scrape_handler() -> Arc { - let config = configuration::ephemeral(); - + fn initialize_scrape_handler_with_config(config: &Configuration) -> Arc { let in_memory_whitelist = Arc::new(InMemoryWhitelist::default()); let whitelist_authorization = Arc::new(WhitelistAuthorization::new(&config.core, &in_memory_whitelist.clone())); let in_memory_torrent_repository = Arc::new(InMemoryTorrentRepository::default()); @@ -205,31 +187,37 @@ mod tests { use std::net::{IpAddr, Ipv4Addr, Ipv6Addr}; use std::sync::Arc; + use bittorrent_http_protocol::v1::requests::scrape::Scrape; + use bittorrent_http_protocol::v1::services::peer_ip_resolver::ClientIpSources; use bittorrent_tracker_core::announce_handler::PeersWanted; use mockall::predicate::eq; use torrust_tracker_primitives::core::ScrapeData; use torrust_tracker_primitives::swarm_metadata::SwarmMetadata; + use torrust_tracker_test_helpers::configuration; - use crate::packages::http_tracker_core::services::scrape::invoke; + use crate::packages::http_tracker_core::services::scrape::handle_scrape; use crate::packages::http_tracker_core::services::scrape::tests::{ - initialize_announce_and_scrape_handlers_for_public_tracker, initialize_scrape_handler, sample_info_hashes, - sample_peer, MockHttpStatsEventSender, + initialize_announce_and_scrape_handlers_with_configuration, initialize_scrape_handler_with_config, + sample_info_hashes, sample_peer, MockHttpStatsEventSender, }; use crate::packages::{self, http_tracker_core}; use crate::servers::http::test_helpers::tests::sample_info_hash; #[tokio::test] async fn it_should_return_the_scrape_data_for_a_torrent() { + let configuration = configuration::ephemeral_public(); + let core_config = Arc::new(configuration.core.clone()); + let (http_stats_event_sender, _http_stats_repository) = packages::http_tracker_core::statistics::setup::factory(false); let http_stats_event_sender = Arc::new(http_stats_event_sender); - let (announce_handler, scrape_handler) = initialize_announce_and_scrape_handlers_for_public_tracker(); + let (announce_handler, scrape_handler) = initialize_announce_and_scrape_handlers_with_configuration(&configuration); let info_hash = sample_info_hash(); let info_hashes = vec![info_hash]; - // Announce a new peer to force scrape data to contain not zeroed data + // Announce a new peer to force scrape data to contain non zeroed data let mut peer = sample_peer(); let original_peer_ip = peer.ip(); announce_handler @@ -237,9 +225,25 @@ mod tests { .await .unwrap(); - let scrape_data = invoke(&scrape_handler, &http_stats_event_sender, &info_hashes, &original_peer_ip) - .await - .unwrap(); + let scrape_request = Scrape { + info_hashes: info_hashes.clone(), + }; + + let client_ip_sources = ClientIpSources { + right_most_x_forwarded_for: None, + connection_info_ip: Some(original_peer_ip), + }; + + let scrape_data = handle_scrape( + &core_config, + &scrape_handler, + &http_stats_event_sender, + &scrape_request, + &client_ip_sources, + false, + ) + .await + .unwrap(); let mut expected_scrape_data = ScrapeData::empty(); expected_scrape_data.add_file( @@ -256,6 +260,8 @@ mod tests { #[tokio::test] async fn it_should_send_the_tcp_4_scrape_event_when_the_peer_uses_ipv4() { + let config = configuration::ephemeral(); + let mut http_stats_event_sender_mock = MockHttpStatsEventSender::new(); http_stats_event_sender_mock .expect_send_event() @@ -265,17 +271,35 @@ mod tests { let http_stats_event_sender: Arc>> = Arc::new(Some(Box::new(http_stats_event_sender_mock))); - let scrape_handler = initialize_scrape_handler(); + let scrape_handler = initialize_scrape_handler_with_config(&config); let peer_ip = IpAddr::V4(Ipv4Addr::new(126, 0, 0, 1)); - invoke(&scrape_handler, &http_stats_event_sender, &sample_info_hashes(), &peer_ip) - .await - .unwrap(); + let scrape_request = Scrape { + info_hashes: sample_info_hashes(), + }; + + let client_ip_sources = ClientIpSources { + right_most_x_forwarded_for: None, + connection_info_ip: Some(peer_ip), + }; + + handle_scrape( + &Arc::new(config.core), + &scrape_handler, + &http_stats_event_sender, + &scrape_request, + &client_ip_sources, + false, + ) + .await + .unwrap(); } #[tokio::test] async fn it_should_send_the_tcp_6_scrape_event_when_the_peer_uses_ipv6() { + let config = configuration::ephemeral(); + let mut http_stats_event_sender_mock = MockHttpStatsEventSender::new(); http_stats_event_sender_mock .expect_send_event() @@ -285,13 +309,29 @@ mod tests { let http_stats_event_sender: Arc>> = Arc::new(Some(Box::new(http_stats_event_sender_mock))); - let scrape_handler = initialize_scrape_handler(); + let scrape_handler = initialize_scrape_handler_with_config(&config); let peer_ip = IpAddr::V6(Ipv6Addr::new(0x6969, 0x6969, 0x6969, 0x6969, 0x6969, 0x6969, 0x6969, 0x6969)); - invoke(&scrape_handler, &http_stats_event_sender, &sample_info_hashes(), &peer_ip) - .await - .unwrap(); + let scrape_request = Scrape { + info_hashes: sample_info_hashes(), + }; + + let client_ip_sources = ClientIpSources { + right_most_x_forwarded_for: None, + connection_info_ip: Some(peer_ip), + }; + + handle_scrape( + &Arc::new(config.core), + &scrape_handler, + &http_stats_event_sender, + &scrape_request, + &client_ip_sources, + false, + ) + .await + .unwrap(); } } diff --git a/src/servers/http/v1/handlers/scrape.rs b/src/servers/http/v1/handlers/scrape.rs index 39bebe18e..2d41c2f78 100644 --- a/src/servers/http/v1/handlers/scrape.rs +++ b/src/servers/http/v1/handlers/scrape.rs @@ -123,26 +123,25 @@ async fn handle_scrape( // todo: move authentication inside `http_tracker_core::services::scrape::handle_scrape` // Authentication - let return_real_scrape_data = if core_config.private { + let return_fake_scrape_data = if core_config.private { match maybe_key { Some(key) => match authentication_service.authenticate(&key).await { - Ok(()) => true, - Err(_error) => false, + Ok(()) => false, + Err(_error) => true, }, - None => false, + None => true, } } else { - true + false }; http_tracker_core::services::scrape::handle_scrape( core_config, scrape_handler, - authentication_service, opt_http_stats_event_sender, scrape_request, client_ip_sources, - return_real_scrape_data, + return_fake_scrape_data, ) .await } From af28429f2c350bb866beb5e14acf0ca02dfb5af4 Mon Sep 17 00:00:00 2001 From: Jose Celano Date: Mon, 17 Feb 2025 16:10:23 +0000 Subject: [PATCH 269/802] refactor: [#1270] inline udp tracker announce invoke fn --- .../udp_tracker_core/services/announce.rs | 29 ++++++++++++++----- 1 file changed, 21 insertions(+), 8 deletions(-) diff --git a/src/packages/udp_tracker_core/services/announce.rs b/src/packages/udp_tracker_core/services/announce.rs index db90d445f..5851cdc92 100644 --- a/src/packages/udp_tracker_core/services/announce.rs +++ b/src/packages/udp_tracker_core/services/announce.rs @@ -46,14 +46,27 @@ pub async fn handle_announce( let mut peer = peer_builder::from_request(request, &remote_client_ip); let peers_wanted: PeersWanted = i32::from(request.peers_wanted.0).into(); - let announce_data = invoke( - announce_handler.clone(), - opt_udp_stats_event_sender.clone(), - info_hash, - &mut peer, - &peers_wanted, - ) - .await?; + let original_peer_ip = peer.peer_addr.ip(); + + // The tracker could change the original peer ip + let announce_data = announce_handler + .announce(&info_hash, &mut peer, &original_peer_ip, &peers_wanted) + .await?; + + if let Some(udp_stats_event_sender) = opt_udp_stats_event_sender.as_deref() { + match original_peer_ip { + IpAddr::V4(_) => { + udp_stats_event_sender + .send_event(udp_tracker_core::statistics::event::Event::Udp4Announce) + .await; + } + IpAddr::V6(_) => { + udp_stats_event_sender + .send_event(udp_tracker_core::statistics::event::Event::Udp6Announce) + .await; + } + } + } Ok(announce_data) } From d49aebd7a14e9ff87c3fa9ce73bf5e69496fdedf Mon Sep 17 00:00:00 2001 From: Jose Celano Date: Mon, 17 Feb 2025 16:13:17 +0000 Subject: [PATCH 270/802] refactor: [#1270] inline udp tracker scrape invoke fn --- src/packages/udp_tracker_core/services/scrape.rs | 14 +------------- 1 file changed, 1 insertion(+), 13 deletions(-) diff --git a/src/packages/udp_tracker_core/services/scrape.rs b/src/packages/udp_tracker_core/services/scrape.rs index e7608928c..f12d4bc2e 100644 --- a/src/packages/udp_tracker_core/services/scrape.rs +++ b/src/packages/udp_tracker_core/services/scrape.rs @@ -35,19 +35,7 @@ pub async fn handle_scrape( info_hashes.push((*info_hash).into()); } - invoke(scrape_handler, opt_udp_stats_event_sender, &info_hashes, remote_addr).await -} - -/// # Errors -/// -/// It will return an error if the tracker core scrape handler returns an error. -pub async fn invoke( - scrape_handler: &Arc, - opt_udp_stats_event_sender: &Arc>>, - info_hashes: &Vec, - remote_addr: SocketAddr, -) -> Result { - let scrape_data = scrape_handler.scrape(info_hashes).await?; + let scrape_data = scrape_handler.scrape(&info_hashes).await?; if let Some(udp_stats_event_sender) = opt_udp_stats_event_sender.as_deref() { match remote_addr { From d0e693619f2322dde1866b20a4a66d453bde7504 Mon Sep 17 00:00:00 2001 From: Jose Celano Date: Mon, 17 Feb 2025 16:27:18 +0000 Subject: [PATCH 271/802] refactor: simplify loop with map --- src/packages/udp_tracker_core/services/scrape.rs | 5 +---- 1 file changed, 1 insertion(+), 4 deletions(-) diff --git a/src/packages/udp_tracker_core/services/scrape.rs b/src/packages/udp_tracker_core/services/scrape.rs index f12d4bc2e..07ae452f8 100644 --- a/src/packages/udp_tracker_core/services/scrape.rs +++ b/src/packages/udp_tracker_core/services/scrape.rs @@ -30,10 +30,7 @@ pub async fn handle_scrape( opt_udp_stats_event_sender: &Arc>>, ) -> Result { // Convert from aquatic infohashes - let mut info_hashes: Vec = vec![]; - for info_hash in &request.info_hashes { - info_hashes.push((*info_hash).into()); - } + let info_hashes: Vec = request.info_hashes.iter().map(|&x| x.into()).collect(); let scrape_data = scrape_handler.scrape(&info_hashes).await?; From f6bf07050cb2c48a4af459f1143de33fb19fbb0e Mon Sep 17 00:00:00 2001 From: Jose Celano Date: Mon, 17 Feb 2025 19:04:31 +0000 Subject: [PATCH 272/802] refactor: [#1275] move announce authentication in http tracker to core --- .../http-protocol/src/v1/responses/error.rs | 8 +++++ .../src/authentication/key/mod.rs | 4 +++ .../http_tracker_core/services/announce.rs | 24 +++++++++++++- .../http/v1/extractors/authentication_key.rs | 10 +++--- src/servers/http/v1/handlers/announce.rs | 31 ++++--------------- src/servers/http/v1/handlers/common/auth.rs | 7 ++--- tests/servers/http/asserts.rs | 6 +++- 7 files changed, 53 insertions(+), 37 deletions(-) diff --git a/packages/http-protocol/src/v1/responses/error.rs b/packages/http-protocol/src/v1/responses/error.rs index 2bd8cd95c..30749f73a 100644 --- a/packages/http-protocol/src/v1/responses/error.rs +++ b/packages/http-protocol/src/v1/responses/error.rs @@ -79,6 +79,14 @@ impl From for Error { } } +impl From for Error { + fn from(err: bittorrent_tracker_core::authentication::Error) -> Self { + Error { + failure_reason: format!("Tracker authentication error: {err}"), + } + } +} + #[cfg(test)] mod tests { diff --git a/packages/tracker-core/src/authentication/key/mod.rs b/packages/tracker-core/src/authentication/key/mod.rs index 648143928..efc734356 100644 --- a/packages/tracker-core/src/authentication/key/mod.rs +++ b/packages/tracker-core/src/authentication/key/mod.rs @@ -185,6 +185,10 @@ pub enum Error { /// Indicates that the key has expired. #[error("Key has expired, {location}")] KeyExpired { location: &'static Location<'static> }, + + /// Indicates that the required key for authentication was not provided. + #[error("Missing authentication key, {location}")] + MissingAuthKey { location: &'static Location<'static> }, } impl From for Error { diff --git a/src/packages/http_tracker_core/services/announce.rs b/src/packages/http_tracker_core/services/announce.rs index 2bc421f1d..6c9cbec17 100644 --- a/src/packages/http_tracker_core/services/announce.rs +++ b/src/packages/http_tracker_core/services/announce.rs @@ -8,6 +8,7 @@ //! It also sends an [`http_tracker_core::statistics::event::Event`] //! because events are specific for the HTTP tracker. use std::net::IpAddr; +use std::panic::Location; use std::sync::Arc; use bittorrent_http_protocol::v1::requests::announce::{peer_from_request, Announce}; @@ -15,6 +16,7 @@ use bittorrent_http_protocol::v1::responses; use bittorrent_http_protocol::v1::services::peer_ip_resolver::{self, ClientIpSources}; use bittorrent_tracker_core::announce_handler::{AnnounceHandler, PeersWanted}; use bittorrent_tracker_core::authentication::service::AuthenticationService; +use bittorrent_tracker_core::authentication::{self, Key}; use bittorrent_tracker_core::whitelist; use torrust_tracker_configuration::Core; use torrust_tracker_primitives::core::AnnounceData; @@ -42,12 +44,28 @@ use crate::packages::http_tracker_core; pub async fn handle_announce( core_config: &Arc, announce_handler: &Arc, - _authentication_service: &Arc, + authentication_service: &Arc, whitelist_authorization: &Arc, opt_http_stats_event_sender: &Arc>>, announce_request: &Announce, client_ip_sources: &ClientIpSources, + maybe_key: Option, ) -> Result { + // Authentication + if core_config.private { + match maybe_key { + Some(key) => match authentication_service.authenticate(&key).await { + Ok(()) => (), + Err(error) => return Err(error.into()), + }, + None => { + return Err(responses::error::Error::from(authentication::key::Error::MissingAuthKey { + location: Location::caller(), + })) + } + } + } + // Authorization match whitelist_authorization.authorize(&announce_request.info_hash).await { Ok(()) => (), @@ -257,6 +275,7 @@ mod tests { &core_http_tracker_services.http_stats_event_sender, &announce_request, &client_ip_sources, + None, ) .await .unwrap(); @@ -300,6 +319,7 @@ mod tests { &core_http_tracker_services.http_stats_event_sender, &announce_request, &client_ip_sources, + None, ) .await .unwrap(); @@ -351,6 +371,7 @@ mod tests { &core_http_tracker_services.http_stats_event_sender, &announce_request, &client_ip_sources, + None, ) .await .unwrap(); @@ -383,6 +404,7 @@ mod tests { &core_http_tracker_services.http_stats_event_sender, &announce_request, &client_ip_sources, + None, ) .await .unwrap(); diff --git a/src/servers/http/v1/extractors/authentication_key.rs b/src/servers/http/v1/extractors/authentication_key.rs index 0e46b75dd..c99c7000a 100644 --- a/src/servers/http/v1/extractors/authentication_key.rs +++ b/src/servers/http/v1/extractors/authentication_key.rs @@ -117,11 +117,6 @@ fn custom_error(rejection: &PathRejection) -> responses::error::Error { location: Location::caller(), }) } - axum::extract::rejection::PathRejection::MissingPathParams(_) => { - responses::error::Error::from(auth::Error::MissingAuthKey { - location: Location::caller(), - }) - } _ => responses::error::Error::from(auth::Error::CannotExtractKeyParam { location: Location::caller(), }), @@ -148,6 +143,9 @@ mod tests { let response = parse_key(invalid_key).unwrap_err(); - assert_error_response(&response, "Authentication error: Invalid format for authentication key param"); + assert_error_response( + &response, + "Tracker authentication error: Invalid format for authentication key param", + ); } } diff --git a/src/servers/http/v1/handlers/announce.rs b/src/servers/http/v1/handlers/announce.rs index 5f25c317b..76f4e5134 100644 --- a/src/servers/http/v1/handlers/announce.rs +++ b/src/servers/http/v1/handlers/announce.rs @@ -5,7 +5,6 @@ //! //! The handlers perform the authentication and authorization of the request, //! and resolve the client IP address. -use std::panic::Location; use std::sync::Arc; use aquatic_udp_protocol::AnnounceEvent; @@ -22,12 +21,10 @@ use hyper::StatusCode; use torrust_tracker_configuration::Core; use torrust_tracker_primitives::core::AnnounceData; -use super::common::auth::map_auth_error_to_error_response; use crate::packages::http_tracker_core; use crate::servers::http::v1::extractors::announce_request::ExtractRequest; use crate::servers::http::v1::extractors::authentication_key::Extract as ExtractKey; use crate::servers::http::v1::extractors::client_ip_sources::Extract as ExtractClientIpSources; -use crate::servers::http::v1::handlers::common::auth; /// It handles the `announce` request when the HTTP tracker does not require /// authentication (no PATH `key` parameter required). @@ -134,23 +131,6 @@ async fn handle_announce( client_ip_sources: &ClientIpSources, maybe_key: Option, ) -> Result { - // todo: move authentication inside `http_tracker_core::services::announce::handle_announce` - - // Authentication - if core_config.private { - match maybe_key { - Some(key) => match authentication_service.authenticate(&key).await { - Ok(()) => (), - Err(error) => return Err(map_auth_error_to_error_response(&error)), - }, - None => { - return Err(responses::error::Error::from(auth::Error::MissingAuthKey { - location: Location::caller(), - })) - } - } - } - http_tracker_core::services::announce::handle_announce( &core_config.clone(), &announce_handler.clone(), @@ -159,6 +139,7 @@ async fn handle_announce( &opt_http_stats_event_sender.clone(), announce_request, client_ip_sources, + maybe_key, ) .await } @@ -339,10 +320,7 @@ mod tests { .await .unwrap_err(); - assert_error_response( - &response, - "Authentication error: Missing authentication key param for private tracker", - ); + assert_error_response(&response, "Tracker authentication error: Missing authentication key"); } #[tokio::test] @@ -366,7 +344,10 @@ mod tests { .await .unwrap_err(); - assert_error_response(&response, "Authentication error: Failed to read key"); + assert_error_response( + &response, + "Tracker authentication error: Failed to read key: YZSl4lMZupRuOpSRC3krIKR5BPB14nrJ", + ); } } diff --git a/src/servers/http/v1/handlers/common/auth.rs b/src/servers/http/v1/handlers/common/auth.rs index c8625d03a..f45064ae3 100644 --- a/src/servers/http/v1/handlers/common/auth.rs +++ b/src/servers/http/v1/handlers/common/auth.rs @@ -14,10 +14,9 @@ use thiserror::Error; /// from the URL path. #[derive(Debug, Error)] pub enum Error { - #[error("Missing authentication key param for private tracker. Error in {location}")] - MissingAuthKey { location: &'static Location<'static> }, #[error("Invalid format for authentication key param. Error in {location}")] InvalidKeyFormat { location: &'static Location<'static> }, + #[error("Cannot extract authentication key param from URL path. Error in {location}")] CannotExtractKeyParam { location: &'static Location<'static> }, } @@ -25,7 +24,7 @@ pub enum Error { impl From for responses::error::Error { fn from(err: Error) -> Self { responses::error::Error { - failure_reason: format!("Authentication error: {err}"), + failure_reason: format!("Tracker authentication error: {err}"), } } } @@ -36,6 +35,6 @@ pub fn map_auth_error_to_error_response(err: &authentication::Error) -> response // impl From for responses::error::Error // Consider moving the trait implementation to the http-protocol package. responses::error::Error { - failure_reason: format!("Authentication error: {err}"), + failure_reason: format!("Tracker authentication error: {err}"), } } diff --git a/tests/servers/http/asserts.rs b/tests/servers/http/asserts.rs index 8d40d7e74..a68a1896e 100644 --- a/tests/servers/http/asserts.rs +++ b/tests/servers/http/asserts.rs @@ -141,5 +141,9 @@ pub async fn assert_cannot_parse_query_params_error_response(response: Response, pub async fn assert_authentication_error_response(response: Response) { assert_eq!(response.status(), 200); - assert_bencoded_error(&response.text().await.unwrap(), "Authentication error", Location::caller()); + assert_bencoded_error( + &response.text().await.unwrap(), + "Tracker authentication error", + Location::caller(), + ); } From ecc093f8f77889580177b5e8db4e6bfdab0ed594 Mon Sep 17 00:00:00 2001 From: Jose Celano Date: Mon, 17 Feb 2025 19:23:13 +0000 Subject: [PATCH 273/802] refactor: [#1275] move scrape authentication in http tracker to core --- .../http_tracker_core/services/scrape.rs | 88 ++++++++++++------- src/servers/http/v1/handlers/scrape.rs | 19 +--- 2 files changed, 60 insertions(+), 47 deletions(-) diff --git a/src/packages/http_tracker_core/services/scrape.rs b/src/packages/http_tracker_core/services/scrape.rs index 667ce8d0d..7e3ea47fd 100644 --- a/src/packages/http_tracker_core/services/scrape.rs +++ b/src/packages/http_tracker_core/services/scrape.rs @@ -14,6 +14,8 @@ use bittorrent_http_protocol::v1::requests::scrape::Scrape; use bittorrent_http_protocol::v1::responses; use bittorrent_http_protocol::v1::services::peer_ip_resolver::{self, ClientIpSources}; use bittorrent_primitives::info_hash::InfoHash; +use bittorrent_tracker_core::authentication::service::AuthenticationService; +use bittorrent_tracker_core::authentication::Key; use bittorrent_tracker_core::scrape_handler::ScrapeHandler; use torrust_tracker_configuration::Core; use torrust_tracker_primitives::core::ScrapeData; @@ -40,12 +42,26 @@ use crate::packages::http_tracker_core; pub async fn handle_scrape( core_config: &Arc, scrape_handler: &Arc, + authentication_service: &Arc, opt_http_stats_event_sender: &Arc>>, scrape_request: &Scrape, client_ip_sources: &ClientIpSources, - return_fake_scrape_data: bool, + maybe_key: Option, ) -> Result { - // Authorization for scrape requests is handled at the `bittorrent-_racker_core` + // Authentication + let return_fake_scrape_data = if core_config.private { + match maybe_key { + Some(key) => match authentication_service.authenticate(&key).await { + Ok(()) => false, + Err(_error) => true, + }, + None => true, + } + } else { + false + }; + + // Authorization for scrape requests is handled at the `bittorrent_tracker_core` // level for each torrent. let peer_ip = match peer_ip_resolver::invoke(core_config.net.on_reverse_proxy, client_ip_sources) { @@ -111,6 +127,8 @@ mod tests { use aquatic_udp_protocol::{AnnounceEvent, NumberOfBytes, PeerId}; use bittorrent_primitives::info_hash::InfoHash; use bittorrent_tracker_core::announce_handler::AnnounceHandler; + use bittorrent_tracker_core::authentication::key::repository::in_memory::InMemoryKeyRepository; + use bittorrent_tracker_core::authentication::service::AuthenticationService; use bittorrent_tracker_core::databases::setup::initialize_database; use bittorrent_tracker_core::scrape_handler::ScrapeHandler; use bittorrent_tracker_core::torrent::repository::in_memory::InMemoryTorrentRepository; @@ -127,27 +145,39 @@ mod tests { use crate::packages::http_tracker_core; use crate::servers::http::test_helpers::tests::sample_info_hash; - fn initialize_announce_and_scrape_handlers_for_public_tracker() -> (Arc, Arc) { - initialize_announce_and_scrape_handlers_with_configuration(&configuration::ephemeral_public()) + struct Container { + announce_handler: Arc, + scrape_handler: Arc, + authentication_service: Arc, } - fn initialize_announce_and_scrape_handlers_with_configuration( - config: &Configuration, - ) -> (Arc, Arc) { + fn initialize_services_for_public_tracker() -> Container { + initialize_services_with_configuration(&configuration::ephemeral_public()) + } + + fn initialize_services_with_configuration(config: &Configuration) -> Container { let database = initialize_database(&config.core); let in_memory_whitelist = Arc::new(InMemoryWhitelist::default()); let whitelist_authorization = Arc::new(WhitelistAuthorization::new(&config.core, &in_memory_whitelist.clone())); let in_memory_torrent_repository = Arc::new(InMemoryTorrentRepository::default()); let db_torrent_repository = Arc::new(DatabasePersistentTorrentRepository::new(&database)); + let in_memory_key_repository = Arc::new(InMemoryKeyRepository::default()); + let authentication_service = Arc::new(AuthenticationService::new(&config.core, &in_memory_key_repository)); + let announce_handler = Arc::new(AnnounceHandler::new( &config.core, &whitelist_authorization, &in_memory_torrent_repository, &db_torrent_repository, )); + let scrape_handler = Arc::new(ScrapeHandler::new(&whitelist_authorization, &in_memory_torrent_repository)); - (announce_handler, scrape_handler) + Container { + announce_handler, + scrape_handler, + authentication_service, + } } fn sample_info_hashes() -> Vec { @@ -166,14 +196,6 @@ mod tests { } } - fn initialize_scrape_handler_with_config(config: &Configuration) -> Arc { - let in_memory_whitelist = Arc::new(InMemoryWhitelist::default()); - let whitelist_authorization = Arc::new(WhitelistAuthorization::new(&config.core, &in_memory_whitelist.clone())); - let in_memory_torrent_repository = Arc::new(InMemoryTorrentRepository::default()); - - Arc::new(ScrapeHandler::new(&whitelist_authorization, &in_memory_torrent_repository)) - } - mock! { HttpStatsEventSender {} impl http_tracker_core::statistics::event::sender::Sender for HttpStatsEventSender { @@ -197,8 +219,7 @@ mod tests { use crate::packages::http_tracker_core::services::scrape::handle_scrape; use crate::packages::http_tracker_core::services::scrape::tests::{ - initialize_announce_and_scrape_handlers_with_configuration, initialize_scrape_handler_with_config, - sample_info_hashes, sample_peer, MockHttpStatsEventSender, + initialize_services_with_configuration, sample_info_hashes, sample_peer, MockHttpStatsEventSender, }; use crate::packages::{self, http_tracker_core}; use crate::servers::http::test_helpers::tests::sample_info_hash; @@ -212,7 +233,7 @@ mod tests { packages::http_tracker_core::statistics::setup::factory(false); let http_stats_event_sender = Arc::new(http_stats_event_sender); - let (announce_handler, scrape_handler) = initialize_announce_and_scrape_handlers_with_configuration(&configuration); + let container = initialize_services_with_configuration(&configuration); let info_hash = sample_info_hash(); let info_hashes = vec![info_hash]; @@ -220,7 +241,8 @@ mod tests { // Announce a new peer to force scrape data to contain non zeroed data let mut peer = sample_peer(); let original_peer_ip = peer.ip(); - announce_handler + container + .announce_handler .announce(&info_hash, &mut peer, &original_peer_ip, &PeersWanted::AsManyAsPossible) .await .unwrap(); @@ -236,11 +258,12 @@ mod tests { let scrape_data = handle_scrape( &core_config, - &scrape_handler, + &container.scrape_handler, + &container.authentication_service, &http_stats_event_sender, &scrape_request, &client_ip_sources, - false, + None, ) .await .unwrap(); @@ -271,7 +294,7 @@ mod tests { let http_stats_event_sender: Arc>> = Arc::new(Some(Box::new(http_stats_event_sender_mock))); - let scrape_handler = initialize_scrape_handler_with_config(&config); + let container = initialize_services_with_configuration(&config); let peer_ip = IpAddr::V4(Ipv4Addr::new(126, 0, 0, 1)); @@ -286,11 +309,12 @@ mod tests { handle_scrape( &Arc::new(config.core), - &scrape_handler, + &container.scrape_handler, + &container.authentication_service, &http_stats_event_sender, &scrape_request, &client_ip_sources, - false, + None, ) .await .unwrap(); @@ -309,7 +333,7 @@ mod tests { let http_stats_event_sender: Arc>> = Arc::new(Some(Box::new(http_stats_event_sender_mock))); - let scrape_handler = initialize_scrape_handler_with_config(&config); + let container = initialize_services_with_configuration(&config); let peer_ip = IpAddr::V6(Ipv6Addr::new(0x6969, 0x6969, 0x6969, 0x6969, 0x6969, 0x6969, 0x6969, 0x6969)); @@ -324,11 +348,12 @@ mod tests { handle_scrape( &Arc::new(config.core), - &scrape_handler, + &container.scrape_handler, + &container.authentication_service, &http_stats_event_sender, &scrape_request, &client_ip_sources, - false, + None, ) .await .unwrap(); @@ -347,7 +372,7 @@ mod tests { use crate::packages::http_tracker_core::services::scrape::fake; use crate::packages::http_tracker_core::services::scrape::tests::{ - initialize_announce_and_scrape_handlers_for_public_tracker, sample_info_hashes, sample_peer, MockHttpStatsEventSender, + initialize_services_for_public_tracker, sample_info_hashes, sample_peer, MockHttpStatsEventSender, }; use crate::packages::{self, http_tracker_core}; use crate::servers::http::test_helpers::tests::sample_info_hash; @@ -358,7 +383,7 @@ mod tests { packages::http_tracker_core::statistics::setup::factory(false); let http_stats_event_sender = Arc::new(http_stats_event_sender); - let (announce_handler, _scrape_handler) = initialize_announce_and_scrape_handlers_for_public_tracker(); + let container = initialize_services_for_public_tracker(); let info_hash = sample_info_hash(); let info_hashes = vec![info_hash]; @@ -366,7 +391,8 @@ mod tests { // Announce a new peer to force scrape data to contain not zeroed data let mut peer = sample_peer(); let original_peer_ip = peer.ip(); - announce_handler + container + .announce_handler .announce(&info_hash, &mut peer, &original_peer_ip, &PeersWanted::AsManyAsPossible) .await .unwrap(); diff --git a/src/servers/http/v1/handlers/scrape.rs b/src/servers/http/v1/handlers/scrape.rs index 2d41c2f78..946190e8f 100644 --- a/src/servers/http/v1/handlers/scrape.rs +++ b/src/servers/http/v1/handlers/scrape.rs @@ -107,6 +107,7 @@ async fn handle( Ok(scrape_data) => scrape_data, Err(error) => return (StatusCode::OK, error.write()).into_response(), }; + build_response(scrape_data) } @@ -120,28 +121,14 @@ async fn handle_scrape( client_ip_sources: &ClientIpSources, maybe_key: Option, ) -> Result { - // todo: move authentication inside `http_tracker_core::services::scrape::handle_scrape` - - // Authentication - let return_fake_scrape_data = if core_config.private { - match maybe_key { - Some(key) => match authentication_service.authenticate(&key).await { - Ok(()) => false, - Err(_error) => true, - }, - None => true, - } - } else { - false - }; - http_tracker_core::services::scrape::handle_scrape( core_config, scrape_handler, + authentication_service, opt_http_stats_event_sender, scrape_request, client_ip_sources, - return_fake_scrape_data, + maybe_key, ) .await } From 694621bc63ae542afca395e376a31008b60c43fd Mon Sep 17 00:00:00 2001 From: Jose Celano Date: Tue, 18 Feb 2025 07:53:54 +0000 Subject: [PATCH 274/802] refactor: [#1275] extract ConnectionCookieError enum --- src/servers/udp/connection_cookie.rs | 18 ++++++++-------- src/servers/udp/error.rs | 32 +++++++++++++++++++++------- src/servers/udp/handlers/announce.rs | 2 +- src/servers/udp/handlers/mod.rs | 13 ++++------- src/servers/udp/handlers/scrape.rs | 2 +- 5 files changed, 39 insertions(+), 28 deletions(-) diff --git a/src/servers/udp/connection_cookie.rs b/src/servers/udp/connection_cookie.rs index 439be9da7..5d8976f0e 100644 --- a/src/servers/udp/connection_cookie.rs +++ b/src/servers/udp/connection_cookie.rs @@ -82,7 +82,7 @@ use cookie_builder::{assemble, decode, disassemble, encode}; use tracing::instrument; use zerocopy::AsBytes; -use super::error::Error; +use crate::servers::udp::error::ConnectionCookieError; use crate::shared::crypto::keys::CipherArrayBlowfish; /// Generates a new connection cookie. @@ -96,9 +96,9 @@ use crate::shared::crypto::keys::CipherArrayBlowfish; /// It would panic if the cookie is not exactly 8 bytes is size. /// #[instrument(err)] -pub fn make(fingerprint: u64, issue_at: f64) -> Result { +pub fn make(fingerprint: u64, issue_at: f64) -> Result { if !issue_at.is_normal() { - return Err(Error::CookieValueNotNormal { + return Err(ConnectionCookieError::ValueNotNormal { not_normal_value: issue_at, }); } @@ -122,7 +122,7 @@ use std::ops::Range; /// /// It would panic if the range start is not smaller than it's end. #[instrument] -pub fn check(cookie: &Cookie, fingerprint: u64, valid_range: Range) -> Result { +pub fn check(cookie: &Cookie, fingerprint: u64, valid_range: Range) -> Result { assert!(valid_range.start <= valid_range.end, "range start is larger than range end"); let cookie_bytes = CipherArrayBlowfish::from_slice(cookie.0.as_bytes()); @@ -131,20 +131,20 @@ pub fn check(cookie: &Cookie, fingerprint: u64, valid_range: Range) -> Resu let issue_time = disassemble(fingerprint, cookie_bytes); if !issue_time.is_normal() { - return Err(Error::CookieValueNotNormal { + return Err(ConnectionCookieError::ValueNotNormal { not_normal_value: issue_time, }); } if issue_time < valid_range.start { - return Err(Error::CookieValueExpired { + return Err(ConnectionCookieError::ValueExpired { expired_value: issue_time, min_value: valid_range.start, }); } if issue_time > valid_range.end { - return Err(Error::CookieValueFromFuture { + return Err(ConnectionCookieError::ValueFromFuture { future_value: issue_time, max_value: valid_range.end, }); @@ -287,7 +287,7 @@ mod tests { let result = check(&cookie, fingerprint, min..max).unwrap_err(); match result { - Error::CookieValueExpired { .. } => {} // Expected error + ConnectionCookieError::ValueExpired { .. } => {} // Expected error _ => panic!("Expected ConnectionIdExpired error"), } } @@ -305,7 +305,7 @@ mod tests { let result = check(&cookie, fingerprint, min..max).unwrap_err(); match result { - Error::CookieValueFromFuture { .. } => {} // Expected error + ConnectionCookieError::ValueFromFuture { .. } => {} // Expected error _ => panic!("Expected ConnectionIdFromFuture error"), } } diff --git a/src/servers/udp/error.rs b/src/servers/udp/error.rs index cda562aed..6ba62a5e0 100644 --- a/src/servers/udp/error.rs +++ b/src/servers/udp/error.rs @@ -13,14 +13,9 @@ pub struct ConnectionCookie(pub ConnectionId); /// Error returned by the UDP server. #[derive(Error, Debug)] pub enum Error { - #[error("cookie value is not normal: {not_normal_value}")] - CookieValueNotNormal { not_normal_value: f64 }, - - #[error("cookie value is expired: {expired_value}, expected > {min_value}")] - CookieValueExpired { expired_value: f64, min_value: f64 }, - - #[error("cookie value is from future: {future_value}, expected < {max_value}")] - CookieValueFromFuture { future_value: f64, max_value: f64 }, + /// Error returned when there was an error with the connection cookie. + #[error("Connection cookie error: {source}")] + ConnectionCookieError { source: ConnectionCookieError }, #[error("error when phrasing request: {request_parse_error:?}")] RequestParseError { request_parse_error: RequestParseError }, @@ -49,8 +44,29 @@ pub enum Error { TrackerAuthenticationRequired { location: &'static Location<'static> }, } +/// Error returned when there was an error with the connection cookie. +#[derive(Error, Debug)] +pub enum ConnectionCookieError { + #[error("cookie value is not normal: {not_normal_value}")] + ValueNotNormal { not_normal_value: f64 }, + + #[error("cookie value is expired: {expired_value}, expected > {min_value}")] + ValueExpired { expired_value: f64, min_value: f64 }, + + #[error("cookie value is from future: {future_value}, expected < {max_value}")] + ValueFromFuture { future_value: f64, max_value: f64 }, +} + impl From for Error { fn from(request_parse_error: RequestParseError) -> Self { Self::RequestParseError { request_parse_error } } } + +impl From for Error { + fn from(connection_cookie_error: ConnectionCookieError) -> Self { + Self::ConnectionCookieError { + source: connection_cookie_error, + } + } +} diff --git a/src/servers/udp/handlers/announce.rs b/src/servers/udp/handlers/announce.rs index a273a2ecb..e5ee6dccf 100644 --- a/src/servers/udp/handlers/announce.rs +++ b/src/servers/udp/handlers/announce.rs @@ -50,7 +50,7 @@ pub async fn handle_announce( gen_remote_fingerprint(&remote_addr), cookie_valid_range, ) - .map_err(|e| (e, request.transaction_id))?; + .map_err(|e| (e.into(), request.transaction_id))?; let response = udp_tracker_core::services::announce::handle_announce( remote_addr, diff --git a/src/servers/udp/handlers/mod.rs b/src/servers/udp/handlers/mod.rs index f9f8edae7..2611931e3 100644 --- a/src/servers/udp/handlers/mod.rs +++ b/src/servers/udp/handlers/mod.rs @@ -78,15 +78,10 @@ pub(crate) async fn handle_packet( { Ok(response) => return response, Err((e, transaction_id)) => { - match &e { - Error::CookieValueNotNormal { .. } - | Error::CookieValueExpired { .. } - | Error::CookieValueFromFuture { .. } => { - // code-review: should we include `RequestParseError` and `BadRequest`? - let mut ban_service = udp_tracker_container.ban_service.write().await; - ban_service.increase_counter(&udp_request.from.ip()); - } - _ => {} + if let Error::ConnectionCookieError { .. } = &e { + // code-review: should we include `RequestParseError` and `BadRequest`? + let mut ban_service = udp_tracker_container.ban_service.write().await; + ban_service.increase_counter(&udp_request.from.ip()); } handle_error( diff --git a/src/servers/udp/handlers/scrape.rs b/src/servers/udp/handlers/scrape.rs index 2b03e0dc7..51a914e9f 100644 --- a/src/servers/udp/handlers/scrape.rs +++ b/src/servers/udp/handlers/scrape.rs @@ -42,7 +42,7 @@ pub async fn handle_scrape( gen_remote_fingerprint(&remote_addr), cookie_valid_range, ) - .map_err(|e| (e, request.transaction_id))?; + .map_err(|e| (e.into(), request.transaction_id))?; let scrape_data = udp_tracker_core::services::scrape::handle_scrape(remote_addr, request, scrape_handler, opt_udp_stats_event_sender) From 4fd79b798d8ef456444a02621dd348c7e3a6ac79 Mon Sep 17 00:00:00 2001 From: Jose Celano Date: Tue, 18 Feb 2025 08:03:56 +0000 Subject: [PATCH 275/802] refactor: [#1275] move conenction cookie to udp_tracker_core package --- .../udp_tracker_core}/connection_cookie.rs | 15 ++++++++++++++- src/packages/udp_tracker_core/mod.rs | 1 + src/servers/udp/error.rs | 15 ++------------- src/servers/udp/handlers/announce.rs | 12 ++++++------ src/servers/udp/handlers/connect.rs | 4 ++-- src/servers/udp/handlers/error.rs | 2 +- src/servers/udp/handlers/scrape.rs | 4 ++-- src/servers/udp/mod.rs | 1 - 8 files changed, 28 insertions(+), 26 deletions(-) rename src/{servers/udp => packages/udp_tracker_core}/connection_cookie.rs (95%) diff --git a/src/servers/udp/connection_cookie.rs b/src/packages/udp_tracker_core/connection_cookie.rs similarity index 95% rename from src/servers/udp/connection_cookie.rs rename to src/packages/udp_tracker_core/connection_cookie.rs index 5d8976f0e..2d8eae335 100644 --- a/src/servers/udp/connection_cookie.rs +++ b/src/packages/udp_tracker_core/connection_cookie.rs @@ -79,12 +79,25 @@ use aquatic_udp_protocol::ConnectionId as Cookie; use cookie_builder::{assemble, decode, disassemble, encode}; +use thiserror::Error; use tracing::instrument; use zerocopy::AsBytes; -use crate::servers::udp::error::ConnectionCookieError; use crate::shared::crypto::keys::CipherArrayBlowfish; +/// Error returned when there was an error with the connection cookie. +#[derive(Error, Debug)] +pub enum ConnectionCookieError { + #[error("cookie value is not normal: {not_normal_value}")] + ValueNotNormal { not_normal_value: f64 }, + + #[error("cookie value is expired: {expired_value}, expected > {min_value}")] + ValueExpired { expired_value: f64, min_value: f64 }, + + #[error("cookie value is from future: {future_value}, expected < {max_value}")] + ValueFromFuture { future_value: f64, max_value: f64 }, +} + /// Generates a new connection cookie. /// /// # Errors diff --git a/src/packages/udp_tracker_core/mod.rs b/src/packages/udp_tracker_core/mod.rs index 4f3e54857..1c93f811a 100644 --- a/src/packages/udp_tracker_core/mod.rs +++ b/src/packages/udp_tracker_core/mod.rs @@ -1,2 +1,3 @@ +pub mod connection_cookie; pub mod services; pub mod statistics; diff --git a/src/servers/udp/error.rs b/src/servers/udp/error.rs index 6ba62a5e0..81e7847c0 100644 --- a/src/servers/udp/error.rs +++ b/src/servers/udp/error.rs @@ -6,6 +6,8 @@ use derive_more::derive::Display; use thiserror::Error; use torrust_tracker_located_error::LocatedError; +use crate::packages::udp_tracker_core::connection_cookie::ConnectionCookieError; + #[derive(Display, Debug)] #[display(":?")] pub struct ConnectionCookie(pub ConnectionId); @@ -44,19 +46,6 @@ pub enum Error { TrackerAuthenticationRequired { location: &'static Location<'static> }, } -/// Error returned when there was an error with the connection cookie. -#[derive(Error, Debug)] -pub enum ConnectionCookieError { - #[error("cookie value is not normal: {not_normal_value}")] - ValueNotNormal { not_normal_value: f64 }, - - #[error("cookie value is expired: {expired_value}, expected > {min_value}")] - ValueExpired { expired_value: f64, min_value: f64 }, - - #[error("cookie value is from future: {future_value}, expected < {max_value}")] - ValueFromFuture { future_value: f64, max_value: f64 }, -} - impl From for Error { fn from(request_parse_error: RequestParseError) -> Self { Self::RequestParseError { request_parse_error } diff --git a/src/servers/udp/handlers/announce.rs b/src/servers/udp/handlers/announce.rs index e5ee6dccf..c9dd15735 100644 --- a/src/servers/udp/handlers/announce.rs +++ b/src/servers/udp/handlers/announce.rs @@ -14,8 +14,8 @@ use torrust_tracker_configuration::Core; use tracing::{instrument, Level}; use zerocopy::network_endian::I32; +use crate::packages::udp_tracker_core::connection_cookie::check; use crate::packages::udp_tracker_core::{self}; -use crate::servers::udp::connection_cookie::check; use crate::servers::udp::error::Error; use crate::servers::udp::handlers::gen_remote_fingerprint; @@ -134,7 +134,7 @@ mod tests { PeerId as AquaticPeerId, PeerKey, Port, TransactionId, }; - use crate::servers::udp::connection_cookie::make; + use crate::packages::udp_tracker_core::connection_cookie::make; use crate::servers::udp::handlers::tests::{sample_ipv4_remote_addr_fingerprint, sample_issue_time}; struct AnnounceRequestBuilder { @@ -213,8 +213,8 @@ mod tests { use mockall::predicate::eq; use torrust_tracker_configuration::Core; + use crate::packages::udp_tracker_core::connection_cookie::make; use crate::packages::{self, udp_tracker_core}; - use crate::servers::udp::connection_cookie::make; use crate::servers::udp::handlers::announce::tests::announce_request::AnnounceRequestBuilder; use crate::servers::udp::handlers::tests::{ initialize_core_tracker_services_for_default_tracker_configuration, @@ -447,7 +447,7 @@ mod tests { use aquatic_udp_protocol::{InfoHash as AquaticInfoHash, PeerId as AquaticPeerId}; - use crate::servers::udp::connection_cookie::make; + use crate::packages::udp_tracker_core::connection_cookie::make; use crate::servers::udp::handlers::announce::tests::announce_request::AnnounceRequestBuilder; use crate::servers::udp::handlers::tests::{ initialize_core_tracker_services_for_public_tracker, sample_cookie_valid_range, sample_issue_time, @@ -520,8 +520,8 @@ mod tests { use mockall::predicate::eq; use torrust_tracker_configuration::Core; + use crate::packages::udp_tracker_core::connection_cookie::make; use crate::packages::{self, udp_tracker_core}; - use crate::servers::udp::connection_cookie::make; use crate::servers::udp::handlers::announce::tests::announce_request::AnnounceRequestBuilder; use crate::servers::udp::handlers::tests::{ initialize_core_tracker_services_for_default_tracker_configuration, @@ -776,7 +776,7 @@ mod tests { use mockall::predicate::eq; use crate::packages::udp_tracker_core; - use crate::servers::udp::connection_cookie::make; + use crate::packages::udp_tracker_core::connection_cookie::make; use crate::servers::udp::handlers::announce::tests::announce_request::AnnounceRequestBuilder; use crate::servers::udp::handlers::tests::{ sample_cookie_valid_range, sample_issue_time, MockUdpStatsEventSender, TrackerConfigurationBuilder, diff --git a/src/servers/udp/handlers/connect.rs b/src/servers/udp/handlers/connect.rs index 431c3bb4d..799e46347 100644 --- a/src/servers/udp/handlers/connect.rs +++ b/src/servers/udp/handlers/connect.rs @@ -6,7 +6,7 @@ use aquatic_udp_protocol::{ConnectRequest, ConnectResponse, Response}; use tracing::{instrument, Level}; use crate::packages::udp_tracker_core; -use crate::servers::udp::connection_cookie::make; +use crate::packages::udp_tracker_core::connection_cookie::make; use crate::servers::udp::handlers::gen_remote_fingerprint; /// It handles the `Connect` request. Refer to [`Connect`](crate::servers::udp#connect) @@ -62,8 +62,8 @@ mod tests { use aquatic_udp_protocol::{ConnectRequest, ConnectResponse, Response, TransactionId}; use mockall::predicate::eq; + use crate::packages::udp_tracker_core::connection_cookie::make; use crate::packages::{self, udp_tracker_core}; - use crate::servers::udp::connection_cookie::make; use crate::servers::udp::handlers::handle_connect; use crate::servers::udp::handlers::tests::{ sample_ipv4_remote_addr, sample_ipv4_remote_addr_fingerprint, sample_ipv4_socket_address, sample_ipv6_remote_addr, diff --git a/src/servers/udp/handlers/error.rs b/src/servers/udp/handlers/error.rs index 36095eeed..e5529dbdf 100644 --- a/src/servers/udp/handlers/error.rs +++ b/src/servers/udp/handlers/error.rs @@ -9,7 +9,7 @@ use uuid::Uuid; use zerocopy::network_endian::I32; use crate::packages::udp_tracker_core; -use crate::servers::udp::connection_cookie::check; +use crate::packages::udp_tracker_core::connection_cookie::check; use crate::servers::udp::error::Error; use crate::servers::udp::handlers::gen_remote_fingerprint; use crate::servers::udp::UDP_TRACKER_LOG_TARGET; diff --git a/src/servers/udp/handlers/scrape.rs b/src/servers/udp/handlers/scrape.rs index 51a914e9f..9bc445417 100644 --- a/src/servers/udp/handlers/scrape.rs +++ b/src/servers/udp/handlers/scrape.rs @@ -11,7 +11,7 @@ use tracing::{instrument, Level}; use zerocopy::network_endian::I32; use crate::packages::udp_tracker_core; -use crate::servers::udp::connection_cookie::check; +use crate::packages::udp_tracker_core::connection_cookie::check; use crate::servers::udp::error::Error; use crate::servers::udp::handlers::gen_remote_fingerprint; @@ -94,7 +94,7 @@ mod tests { use bittorrent_tracker_core::torrent::repository::in_memory::InMemoryTorrentRepository; use crate::packages; - use crate::servers::udp::connection_cookie::make; + use crate::packages::udp_tracker_core::connection_cookie::make; use crate::servers::udp::handlers::tests::{ initialize_core_tracker_services_for_public_tracker, sample_cookie_valid_range, sample_ipv4_remote_addr, sample_issue_time, TorrentPeerBuilder, diff --git a/src/servers/udp/mod.rs b/src/servers/udp/mod.rs index e8410e5f0..2f0d4e4ce 100644 --- a/src/servers/udp/mod.rs +++ b/src/servers/udp/mod.rs @@ -637,7 +637,6 @@ use std::net::SocketAddr; -pub mod connection_cookie; pub mod error; pub mod handlers; pub mod server; From 91525af0c8e6d8686187909fe8735a4a89345eb2 Mon Sep 17 00:00:00 2001 From: Jose Celano Date: Tue, 18 Feb 2025 09:09:10 +0000 Subject: [PATCH 276/802] refactor: [#1275] move authentication in udp tracker to core --- .../udp_tracker_core/connection_cookie.rs | 11 +++- .../udp_tracker_core/services/announce.rs | 52 ++++++++++++++++++- .../udp_tracker_core/services/scrape.rs | 51 +++++++++++++++++- src/servers/udp/error.rs | 36 ++++++++----- src/servers/udp/handlers/announce.rs | 33 ++++-------- src/servers/udp/handlers/connect.rs | 3 +- src/servers/udp/handlers/error.rs | 3 +- src/servers/udp/handlers/mod.rs | 20 +++---- src/servers/udp/handlers/scrape.rs | 25 +++------ src/servers/udp/mod.rs | 2 +- 10 files changed, 160 insertions(+), 76 deletions(-) diff --git a/src/packages/udp_tracker_core/connection_cookie.rs b/src/packages/udp_tracker_core/connection_cookie.rs index 2d8eae335..b9070c63a 100644 --- a/src/packages/udp_tracker_core/connection_cookie.rs +++ b/src/packages/udp_tracker_core/connection_cookie.rs @@ -86,7 +86,7 @@ use zerocopy::AsBytes; use crate::shared::crypto::keys::CipherArrayBlowfish; /// Error returned when there was an error with the connection cookie. -#[derive(Error, Debug)] +#[derive(Error, Debug, Clone)] pub enum ConnectionCookieError { #[error("cookie value is not normal: {not_normal_value}")] ValueNotNormal { not_normal_value: f64 }, @@ -123,6 +123,8 @@ pub fn make(fingerprint: u64, issue_at: f64) -> Result) -> Resu Ok(issue_time) } +#[must_use] +pub(crate) fn gen_remote_fingerprint(remote_addr: &SocketAddr) -> u64 { + let mut state = DefaultHasher::new(); + remote_addr.hash(&mut state); + state.finish() +} + mod cookie_builder { use cipher::{BlockDecrypt, BlockEncrypt}; use tracing::instrument; diff --git a/src/packages/udp_tracker_core/services/announce.rs b/src/packages/udp_tracker_core/services/announce.rs index 5851cdc92..a825d06ad 100644 --- a/src/packages/udp_tracker_core/services/announce.rs +++ b/src/packages/udp_tracker_core/services/announce.rs @@ -8,19 +8,57 @@ //! It also sends an [`udp_tracker_core::statistics::event::Event`] //! because events are specific for the HTTP tracker. use std::net::{IpAddr, SocketAddr}; +use std::ops::Range; use std::sync::Arc; use aquatic_udp_protocol::AnnounceRequest; use bittorrent_primitives::info_hash::InfoHash; use bittorrent_tracker_core::announce_handler::{AnnounceHandler, PeersWanted}; -use bittorrent_tracker_core::error::AnnounceError; +use bittorrent_tracker_core::error::{AnnounceError, WhitelistError}; use bittorrent_tracker_core::whitelist; use bittorrent_udp_protocol::peer_builder; use torrust_tracker_primitives::core::AnnounceData; use torrust_tracker_primitives::peer; +use crate::packages::udp_tracker_core::connection_cookie::{check, gen_remote_fingerprint, ConnectionCookieError}; use crate::packages::udp_tracker_core::{self}; +/// Errors related to announce requests. +#[derive(thiserror::Error, Debug, Clone)] +pub enum UdpAnnounceError { + /// Error returned when there was an error with the connection cookie. + #[error("Connection cookie error: {source}")] + ConnectionCookieError { source: ConnectionCookieError }, + + /// Error returned when there was an error with the tracker core announce handler. + #[error("Tracker core announce error: {source}")] + TrackerCoreAnnounceError { source: AnnounceError }, + + /// Error returned when there was an error with the tracker core whitelist. + #[error("Tracker core whitelist error: {source}")] + TrackerCoreWhitelistError { source: WhitelistError }, +} + +impl From for UdpAnnounceError { + fn from(connection_cookie_error: ConnectionCookieError) -> Self { + Self::ConnectionCookieError { + source: connection_cookie_error, + } + } +} + +impl From for UdpAnnounceError { + fn from(announce_error: AnnounceError) -> Self { + Self::TrackerCoreAnnounceError { source: announce_error } + } +} + +impl From for UdpAnnounceError { + fn from(whitelist_error: WhitelistError) -> Self { + Self::TrackerCoreWhitelistError { source: whitelist_error } + } +} + /// It handles the `Announce` request. /// /// # Errors @@ -36,7 +74,17 @@ pub async fn handle_announce( announce_handler: &Arc, whitelist_authorization: &Arc, opt_udp_stats_event_sender: &Arc>>, -) -> Result { + cookie_valid_range: Range, +) -> Result { + // todo: return a UDP response like the HTTP tracker instead of raw AnnounceData. + + // Authentication + check( + &request.connection_id, + gen_remote_fingerprint(&remote_addr), + cookie_valid_range, + )?; + let info_hash = request.info_hash.into(); let remote_client_ip = remote_addr.ip(); diff --git a/src/packages/udp_tracker_core/services/scrape.rs b/src/packages/udp_tracker_core/services/scrape.rs index 07ae452f8..5beb54e9f 100644 --- a/src/packages/udp_tracker_core/services/scrape.rs +++ b/src/packages/udp_tracker_core/services/scrape.rs @@ -8,15 +8,53 @@ //! It also sends an [`udp_tracker_core::statistics::event::Event`] //! because events are specific for the UDP tracker. use std::net::SocketAddr; +use std::ops::Range; use std::sync::Arc; use aquatic_udp_protocol::ScrapeRequest; use bittorrent_primitives::info_hash::InfoHash; -use bittorrent_tracker_core::error::ScrapeError; +use bittorrent_tracker_core::error::{ScrapeError, WhitelistError}; use bittorrent_tracker_core::scrape_handler::ScrapeHandler; use torrust_tracker_primitives::core::ScrapeData; use crate::packages::udp_tracker_core; +use crate::packages::udp_tracker_core::connection_cookie::{check, gen_remote_fingerprint, ConnectionCookieError}; + +/// Errors related to scrape requests. +#[derive(thiserror::Error, Debug, Clone)] +pub enum UdpScrapeError { + /// Error returned when there was an error with the connection cookie. + #[error("Connection cookie error: {source}")] + ConnectionCookieError { source: ConnectionCookieError }, + + /// Error returned when there was an error with the tracker core scrape handler. + #[error("Tracker core scrape error: {source}")] + TrackerCoreScrapeError { source: ScrapeError }, + + /// Error returned when there was an error with the tracker core whitelist. + #[error("Tracker core whitelist error: {source}")] + TrackerCoreWhitelistError { source: WhitelistError }, +} + +impl From for UdpScrapeError { + fn from(connection_cookie_error: ConnectionCookieError) -> Self { + Self::ConnectionCookieError { + source: connection_cookie_error, + } + } +} + +impl From for UdpScrapeError { + fn from(scrape_error: ScrapeError) -> Self { + Self::TrackerCoreScrapeError { source: scrape_error } + } +} + +impl From for UdpScrapeError { + fn from(whitelist_error: WhitelistError) -> Self { + Self::TrackerCoreWhitelistError { source: whitelist_error } + } +} /// It handles the `Scrape` request. /// @@ -28,7 +66,16 @@ pub async fn handle_scrape( request: &ScrapeRequest, scrape_handler: &Arc, opt_udp_stats_event_sender: &Arc>>, -) -> Result { + cookie_valid_range: Range, +) -> Result { + // todo: return a UDP response like the HTTP tracker instead of raw ScrapeData. + + check( + &request.connection_id, + gen_remote_fingerprint(&remote_addr), + cookie_valid_range, + )?; + // Convert from aquatic infohashes let info_hashes: Vec = request.info_hashes.iter().map(|&x| x.into()).collect(); diff --git a/src/servers/udp/error.rs b/src/servers/udp/error.rs index 81e7847c0..9105ba0cb 100644 --- a/src/servers/udp/error.rs +++ b/src/servers/udp/error.rs @@ -6,7 +6,8 @@ use derive_more::derive::Display; use thiserror::Error; use torrust_tracker_located_error::LocatedError; -use crate::packages::udp_tracker_core::connection_cookie::ConnectionCookieError; +use crate::packages::udp_tracker_core::services::announce::UdpAnnounceError; +use crate::packages::udp_tracker_core::services::scrape::UdpScrapeError; #[derive(Display, Debug)] #[display(":?")] @@ -15,18 +16,17 @@ pub struct ConnectionCookie(pub ConnectionId); /// Error returned by the UDP server. #[derive(Error, Debug)] pub enum Error { - /// Error returned when there was an error with the connection cookie. - #[error("Connection cookie error: {source}")] - ConnectionCookieError { source: ConnectionCookieError }, - + /// Error returned when the request is invalid. #[error("error when phrasing request: {request_parse_error:?}")] RequestParseError { request_parse_error: RequestParseError }, - /// Error returned when the domain tracker returns an error. - #[error("tracker server error: {source}")] - TrackerError { - source: LocatedError<'static, dyn std::error::Error + Send + Sync>, - }, + /// Error returned when the domain tracker returns an announce error. + #[error("tracker announce error: {source}")] + UdpAnnounceError { source: UdpAnnounceError }, + + /// Error returned when the domain tracker returns an scrape error. + #[error("tracker scrape error: {source}")] + UdpScrapeError { source: UdpScrapeError }, /// Error returned from a third-party library (`aquatic_udp_protocol`). #[error("internal server error: {message}, {location}")] @@ -52,10 +52,18 @@ impl From for Error { } } -impl From for Error { - fn from(connection_cookie_error: ConnectionCookieError) -> Self { - Self::ConnectionCookieError { - source: connection_cookie_error, +impl From for Error { + fn from(udp_announce_error: UdpAnnounceError) -> Self { + Self::UdpAnnounceError { + source: udp_announce_error, + } + } +} + +impl From for Error { + fn from(udp_scrape_error: UdpScrapeError) -> Self { + Self::UdpScrapeError { + source: udp_scrape_error, } } } diff --git a/src/servers/udp/handlers/announce.rs b/src/servers/udp/handlers/announce.rs index c9dd15735..48e0d6179 100644 --- a/src/servers/udp/handlers/announce.rs +++ b/src/servers/udp/handlers/announce.rs @@ -14,10 +14,8 @@ use torrust_tracker_configuration::Core; use tracing::{instrument, Level}; use zerocopy::network_endian::I32; -use crate::packages::udp_tracker_core::connection_cookie::check; use crate::packages::udp_tracker_core::{self}; use crate::servers::udp::error::Error; -use crate::servers::udp::handlers::gen_remote_fingerprint; /// It handles the `Announce` request. Refer to [`Announce`](crate::servers::udp#announce) /// request for more information. @@ -43,27 +41,16 @@ pub async fn handle_announce( tracing::trace!("handle announce"); - // todo: move authentication to `udp_tracker_core::services::announce::handle_announce` - - check( - &request.connection_id, - gen_remote_fingerprint(&remote_addr), - cookie_valid_range, - ) - .map_err(|e| (e.into(), request.transaction_id))?; - let response = udp_tracker_core::services::announce::handle_announce( remote_addr, request, announce_handler, whitelist_authorization, opt_udp_stats_event_sender, + cookie_valid_range, ) .await - .map_err(|e| Error::TrackerError { - source: (Arc::new(e) as Arc).into(), - }) - .map_err(|e| (e, request.transaction_id))?; + .map_err(|e| (e.into(), request.transaction_id))?; // todo: extract `build_response` function. @@ -213,15 +200,15 @@ mod tests { use mockall::predicate::eq; use torrust_tracker_configuration::Core; - use crate::packages::udp_tracker_core::connection_cookie::make; + use crate::packages::udp_tracker_core::connection_cookie::{gen_remote_fingerprint, make}; use crate::packages::{self, udp_tracker_core}; use crate::servers::udp::handlers::announce::tests::announce_request::AnnounceRequestBuilder; + use crate::servers::udp::handlers::handle_announce; use crate::servers::udp::handlers::tests::{ initialize_core_tracker_services_for_default_tracker_configuration, initialize_core_tracker_services_for_public_tracker, sample_cookie_valid_range, sample_ipv4_socket_address, sample_issue_time, MockUdpStatsEventSender, TorrentPeerBuilder, }; - use crate::servers::udp::handlers::{gen_remote_fingerprint, handle_announce}; #[tokio::test] async fn an_announced_peer_should_be_added_to_the_tracker() { @@ -447,13 +434,13 @@ mod tests { use aquatic_udp_protocol::{InfoHash as AquaticInfoHash, PeerId as AquaticPeerId}; - use crate::packages::udp_tracker_core::connection_cookie::make; + use crate::packages::udp_tracker_core::connection_cookie::{gen_remote_fingerprint, make}; use crate::servers::udp::handlers::announce::tests::announce_request::AnnounceRequestBuilder; + use crate::servers::udp::handlers::handle_announce; use crate::servers::udp::handlers::tests::{ initialize_core_tracker_services_for_public_tracker, sample_cookie_valid_range, sample_issue_time, TorrentPeerBuilder, }; - use crate::servers::udp::handlers::{gen_remote_fingerprint, handle_announce}; #[tokio::test] async fn the_peer_ip_should_be_changed_to_the_external_ip_in_the_tracker_configuration_if_defined() { @@ -520,15 +507,15 @@ mod tests { use mockall::predicate::eq; use torrust_tracker_configuration::Core; - use crate::packages::udp_tracker_core::connection_cookie::make; + use crate::packages::udp_tracker_core::connection_cookie::{gen_remote_fingerprint, make}; use crate::packages::{self, udp_tracker_core}; use crate::servers::udp::handlers::announce::tests::announce_request::AnnounceRequestBuilder; + use crate::servers::udp::handlers::handle_announce; use crate::servers::udp::handlers::tests::{ initialize_core_tracker_services_for_default_tracker_configuration, initialize_core_tracker_services_for_public_tracker, sample_cookie_valid_range, sample_ipv6_remote_addr, sample_issue_time, MockUdpStatsEventSender, TorrentPeerBuilder, }; - use crate::servers::udp::handlers::{gen_remote_fingerprint, handle_announce}; #[tokio::test] async fn an_announced_peer_should_be_added_to_the_tracker() { @@ -776,12 +763,12 @@ mod tests { use mockall::predicate::eq; use crate::packages::udp_tracker_core; - use crate::packages::udp_tracker_core::connection_cookie::make; + use crate::packages::udp_tracker_core::connection_cookie::{gen_remote_fingerprint, make}; use crate::servers::udp::handlers::announce::tests::announce_request::AnnounceRequestBuilder; + use crate::servers::udp::handlers::handle_announce; use crate::servers::udp::handlers::tests::{ sample_cookie_valid_range, sample_issue_time, MockUdpStatsEventSender, TrackerConfigurationBuilder, }; - use crate::servers::udp::handlers::{gen_remote_fingerprint, handle_announce}; #[tokio::test] async fn the_peer_ip_should_be_changed_to_the_external_ip_in_the_tracker_configuration() { diff --git a/src/servers/udp/handlers/connect.rs b/src/servers/udp/handlers/connect.rs index 799e46347..bd3c4ef0a 100644 --- a/src/servers/udp/handlers/connect.rs +++ b/src/servers/udp/handlers/connect.rs @@ -6,8 +6,7 @@ use aquatic_udp_protocol::{ConnectRequest, ConnectResponse, Response}; use tracing::{instrument, Level}; use crate::packages::udp_tracker_core; -use crate::packages::udp_tracker_core::connection_cookie::make; -use crate::servers::udp::handlers::gen_remote_fingerprint; +use crate::packages::udp_tracker_core::connection_cookie::{gen_remote_fingerprint, make}; /// It handles the `Connect` request. Refer to [`Connect`](crate::servers::udp#connect) /// request for more information. diff --git a/src/servers/udp/handlers/error.rs b/src/servers/udp/handlers/error.rs index e5529dbdf..6cf273e78 100644 --- a/src/servers/udp/handlers/error.rs +++ b/src/servers/udp/handlers/error.rs @@ -9,9 +9,8 @@ use uuid::Uuid; use zerocopy::network_endian::I32; use crate::packages::udp_tracker_core; -use crate::packages::udp_tracker_core::connection_cookie::check; +use crate::packages::udp_tracker_core::connection_cookie::{check, gen_remote_fingerprint}; use crate::servers::udp::error::Error; -use crate::servers::udp::handlers::gen_remote_fingerprint; use crate::servers::udp::UDP_TRACKER_LOG_TARGET; #[allow(clippy::too_many_arguments)] diff --git a/src/servers/udp/handlers/mod.rs b/src/servers/udp/handlers/mod.rs index 2611931e3..e58497d4b 100644 --- a/src/servers/udp/handlers/mod.rs +++ b/src/servers/udp/handlers/mod.rs @@ -4,7 +4,6 @@ pub mod connect; pub mod error; pub mod scrape; -use std::hash::{DefaultHasher, Hash, Hasher as _}; use std::net::SocketAddr; use std::ops::Range; use std::sync::Arc; @@ -21,6 +20,7 @@ use uuid::Uuid; use super::RawRequest; use crate::container::UdpTrackerContainer; +use crate::packages::udp_tracker_core::services::announce::UdpAnnounceError; use crate::servers::udp::error::Error; use crate::shared::bit_torrent::common::MAX_SCRAPE_TORRENTS; use crate::CurrentClock; @@ -77,8 +77,11 @@ pub(crate) async fn handle_packet( .await { Ok(response) => return response, - Err((e, transaction_id)) => { - if let Error::ConnectionCookieError { .. } = &e { + Err((error, transaction_id)) => { + if let Error::UdpAnnounceError { + source: UdpAnnounceError::ConnectionCookieError { .. }, + } = error + { // code-review: should we include `RequestParseError` and `BadRequest`? let mut ban_service = udp_tracker_container.ban_service.write().await; ban_service.increase_counter(&udp_request.from.ip()); @@ -90,7 +93,7 @@ pub(crate) async fn handle_packet( request_id, &udp_tracker_container.udp_stats_event_sender, cookie_time_values.valid_range.clone(), - &e, + &error, Some(transaction_id), ) .await @@ -163,13 +166,6 @@ pub async fn handle_request( } } -#[must_use] -pub(crate) fn gen_remote_fingerprint(remote_addr: &SocketAddr) -> u64 { - let mut state = DefaultHasher::new(); - remote_addr.hash(&mut state); - state.finish() -} - #[cfg(test)] pub(crate) mod tests { @@ -194,8 +190,8 @@ pub(crate) mod tests { use torrust_tracker_primitives::{peer, DurationSinceUnixEpoch}; use torrust_tracker_test_helpers::configuration; - use super::gen_remote_fingerprint; use crate::packages::udp_tracker_core; + use crate::packages::udp_tracker_core::connection_cookie::gen_remote_fingerprint; use crate::{packages, CurrentClock}; pub(crate) struct CoreTrackerServices { diff --git a/src/servers/udp/handlers/scrape.rs b/src/servers/udp/handlers/scrape.rs index 9bc445417..bca284860 100644 --- a/src/servers/udp/handlers/scrape.rs +++ b/src/servers/udp/handlers/scrape.rs @@ -11,9 +11,7 @@ use tracing::{instrument, Level}; use zerocopy::network_endian::I32; use crate::packages::udp_tracker_core; -use crate::packages::udp_tracker_core::connection_cookie::check; use crate::servers::udp::error::Error; -use crate::servers::udp::handlers::gen_remote_fingerprint; /// It handles the `Scrape` request. Refer to [`Scrape`](crate::servers::udp#scrape) /// request for more information. @@ -35,23 +33,16 @@ pub async fn handle_scrape( tracing::trace!("handle scrape"); - // todo: move authentication to `udp_tracker_core::services::scrape::handle_scrape` - - check( - &request.connection_id, - gen_remote_fingerprint(&remote_addr), + let scrape_data = udp_tracker_core::services::scrape::handle_scrape( + remote_addr, + request, + scrape_handler, + opt_udp_stats_event_sender, cookie_valid_range, ) + .await .map_err(|e| (e.into(), request.transaction_id))?; - let scrape_data = - udp_tracker_core::services::scrape::handle_scrape(remote_addr, request, scrape_handler, opt_udp_stats_event_sender) - .await - .map_err(|e| Error::TrackerError { - source: (Arc::new(e) as Arc).into(), - }) - .map_err(|e| (e, request.transaction_id))?; - // todo: extract `build_response` function. let mut torrent_stats: Vec = Vec::new(); @@ -94,12 +85,12 @@ mod tests { use bittorrent_tracker_core::torrent::repository::in_memory::InMemoryTorrentRepository; use crate::packages; - use crate::packages::udp_tracker_core::connection_cookie::make; + use crate::packages::udp_tracker_core::connection_cookie::{gen_remote_fingerprint, make}; + use crate::servers::udp::handlers::handle_scrape; use crate::servers::udp::handlers::tests::{ initialize_core_tracker_services_for_public_tracker, sample_cookie_valid_range, sample_ipv4_remote_addr, sample_issue_time, TorrentPeerBuilder, }; - use crate::servers::udp::handlers::{gen_remote_fingerprint, handle_scrape}; fn zeroed_torrent_statistics() -> TorrentScrapeStatistics { TorrentScrapeStatistics { diff --git a/src/servers/udp/mod.rs b/src/servers/udp/mod.rs index 2f0d4e4ce..614db5bf6 100644 --- a/src/servers/udp/mod.rs +++ b/src/servers/udp/mod.rs @@ -105,7 +105,7 @@ //! connection ID = hash(client IP + current time slot + secret seed) //! ``` //! -//! The BEP-15 recommends a two-minute time slot. Refer to [`connection_cookie`] +//! The BEP-15 recommends a two-minute time slot. Refer to [`connection_cookie`](crate::packages::udp_tracker_core::connection_cookie) //! for more information about the connection ID generation with this method. //! //! #### Connect Request From 4618f706dba7c53291e3c7366fc4b1ec147d3524 Mon Sep 17 00:00:00 2001 From: Jose Celano Date: Tue, 18 Feb 2025 09:50:03 +0000 Subject: [PATCH 277/802] refactor: exatract response builders for UDP handlers --- src/servers/udp/handlers/announce.rs | 28 ++++++++++++++++++---------- src/servers/udp/handlers/connect.rs | 18 ++++++++++++------ src/servers/udp/handlers/scrape.rs | 7 +++++-- 3 files changed, 35 insertions(+), 18 deletions(-) diff --git a/src/servers/udp/handlers/announce.rs b/src/servers/udp/handlers/announce.rs index 48e0d6179..1003b4041 100644 --- a/src/servers/udp/handlers/announce.rs +++ b/src/servers/udp/handlers/announce.rs @@ -11,6 +11,7 @@ use bittorrent_primitives::info_hash::InfoHash; use bittorrent_tracker_core::announce_handler::AnnounceHandler; use bittorrent_tracker_core::whitelist; use torrust_tracker_configuration::Core; +use torrust_tracker_primitives::core::AnnounceData; use tracing::{instrument, Level}; use zerocopy::network_endian::I32; @@ -41,7 +42,7 @@ pub async fn handle_announce( tracing::trace!("handle announce"); - let response = udp_tracker_core::services::announce::handle_announce( + let announce_data = udp_tracker_core::services::announce::handle_announce( remote_addr, request, announce_handler, @@ -52,18 +53,25 @@ pub async fn handle_announce( .await .map_err(|e| (e.into(), request.transaction_id))?; - // todo: extract `build_response` function. + Ok(build_response(remote_addr, request, core_config, &announce_data)) +} +fn build_response( + remote_addr: SocketAddr, + request: &AnnounceRequest, + core_config: &Arc, + announce_data: &AnnounceData, +) -> Response { #[allow(clippy::cast_possible_truncation)] if remote_addr.is_ipv4() { let announce_response = AnnounceResponse { fixed: AnnounceResponseFixedData { transaction_id: request.transaction_id, announce_interval: AnnounceInterval(I32::new(i64::from(core_config.announce_policy.interval) as i32)), - leechers: NumberOfPeers(I32::new(i64::from(response.stats.incomplete) as i32)), - seeders: NumberOfPeers(I32::new(i64::from(response.stats.complete) as i32)), + leechers: NumberOfPeers(I32::new(i64::from(announce_data.stats.incomplete) as i32)), + seeders: NumberOfPeers(I32::new(i64::from(announce_data.stats.complete) as i32)), }, - peers: response + peers: announce_data .peers .iter() .filter_map(|peer| { @@ -79,16 +87,16 @@ pub async fn handle_announce( .collect(), }; - Ok(Response::from(announce_response)) + Response::from(announce_response) } else { let announce_response = AnnounceResponse { fixed: AnnounceResponseFixedData { transaction_id: request.transaction_id, announce_interval: AnnounceInterval(I32::new(i64::from(core_config.announce_policy.interval) as i32)), - leechers: NumberOfPeers(I32::new(i64::from(response.stats.incomplete) as i32)), - seeders: NumberOfPeers(I32::new(i64::from(response.stats.complete) as i32)), + leechers: NumberOfPeers(I32::new(i64::from(announce_data.stats.incomplete) as i32)), + seeders: NumberOfPeers(I32::new(i64::from(announce_data.stats.complete) as i32)), }, - peers: response + peers: announce_data .peers .iter() .filter_map(|peer| { @@ -104,7 +112,7 @@ pub async fn handle_announce( .collect(), }; - Ok(Response::from(announce_response)) + Response::from(announce_response) } } diff --git a/src/servers/udp/handlers/connect.rs b/src/servers/udp/handlers/connect.rs index bd3c4ef0a..8275d36af 100644 --- a/src/servers/udp/handlers/connect.rs +++ b/src/servers/udp/handlers/connect.rs @@ -2,7 +2,7 @@ use std::net::SocketAddr; use std::sync::Arc; -use aquatic_udp_protocol::{ConnectRequest, ConnectResponse, Response}; +use aquatic_udp_protocol::{ConnectRequest, ConnectResponse, ConnectionId, Response}; use tracing::{instrument, Level}; use crate::packages::udp_tracker_core; @@ -25,12 +25,9 @@ pub async fn handle_connect( tracing::trace!("handle connect"); - let connection_id = make(gen_remote_fingerprint(&remote_addr), cookie_issue_time).expect("it should be a normal value"); + // todo: move to connect service in udp_tracker_core - let response = ConnectResponse { - transaction_id: request.transaction_id, - connection_id, - }; + let connection_id = make(gen_remote_fingerprint(&remote_addr), cookie_issue_time).expect("it should be a normal value"); if let Some(udp_stats_event_sender) = opt_udp_stats_event_sender.as_deref() { match remote_addr { @@ -47,6 +44,15 @@ pub async fn handle_connect( } } + build_response(*request, connection_id) +} + +fn build_response(request: ConnectRequest, connection_id: ConnectionId) -> Response { + let response = ConnectResponse { + transaction_id: request.transaction_id, + connection_id, + }; + Response::from(response) } diff --git a/src/servers/udp/handlers/scrape.rs b/src/servers/udp/handlers/scrape.rs index bca284860..b36eb92a0 100644 --- a/src/servers/udp/handlers/scrape.rs +++ b/src/servers/udp/handlers/scrape.rs @@ -7,6 +7,7 @@ use aquatic_udp_protocol::{ NumberOfDownloads, NumberOfPeers, Response, ScrapeRequest, ScrapeResponse, TorrentScrapeStatistics, TransactionId, }; use bittorrent_tracker_core::scrape_handler::ScrapeHandler; +use torrust_tracker_primitives::core::ScrapeData; use tracing::{instrument, Level}; use zerocopy::network_endian::I32; @@ -43,8 +44,10 @@ pub async fn handle_scrape( .await .map_err(|e| (e.into(), request.transaction_id))?; - // todo: extract `build_response` function. + Ok(build_response(request, &scrape_data)) +} +fn build_response(request: &ScrapeRequest, scrape_data: &ScrapeData) -> Response { let mut torrent_stats: Vec = Vec::new(); for file in &scrape_data.files { @@ -67,7 +70,7 @@ pub async fn handle_scrape( torrent_stats, }; - Ok(Response::from(response)) + Response::from(response) } #[cfg(test)] From fdc2543f49691b7f5d81698f9c326cd94a78f1cc Mon Sep 17 00:00:00 2001 From: Jose Celano Date: Tue, 18 Feb 2025 10:08:03 +0000 Subject: [PATCH 278/802] refactor: extract UDP connect service --- .../udp_tracker_core/services/connect.rs | 129 ++++++++++++++++++ src/packages/udp_tracker_core/services/mod.rs | 49 +++++++ src/servers/udp/handlers/connect.rs | 26 +--- 3 files changed, 180 insertions(+), 24 deletions(-) create mode 100644 src/packages/udp_tracker_core/services/connect.rs diff --git a/src/packages/udp_tracker_core/services/connect.rs b/src/packages/udp_tracker_core/services/connect.rs new file mode 100644 index 000000000..4cc8b0a3b --- /dev/null +++ b/src/packages/udp_tracker_core/services/connect.rs @@ -0,0 +1,129 @@ +//! The `connect` service. +//! +//! The service is responsible for handling the `connect` requests. +use std::net::SocketAddr; +use std::sync::Arc; + +use aquatic_udp_protocol::ConnectionId; + +use crate::packages::udp_tracker_core; +use crate::packages::udp_tracker_core::connection_cookie::{gen_remote_fingerprint, make}; + +/// # Panics +/// +/// IT will panic if there was an error making the connection cookie. +pub async fn handle_connect( + remote_addr: SocketAddr, + opt_udp_stats_event_sender: &Arc>>, + cookie_issue_time: f64, +) -> ConnectionId { + // todo: return a UDP response like the HTTP tracker instead of raw ConnectionId. + + let connection_id = make(gen_remote_fingerprint(&remote_addr), cookie_issue_time).expect("it should be a normal value"); + + if let Some(udp_stats_event_sender) = opt_udp_stats_event_sender.as_deref() { + match remote_addr { + SocketAddr::V4(_) => { + udp_stats_event_sender + .send_event(udp_tracker_core::statistics::event::Event::Udp4Connect) + .await; + } + SocketAddr::V6(_) => { + udp_stats_event_sender + .send_event(udp_tracker_core::statistics::event::Event::Udp6Connect) + .await; + } + } + } + + connection_id +} + +#[cfg(test)] +mod tests { + + mod connect_request { + + use std::future; + use std::sync::Arc; + + use mockall::predicate::eq; + + use crate::packages::udp_tracker_core::connection_cookie::make; + use crate::packages::udp_tracker_core::services::connect::handle_connect; + use crate::packages::udp_tracker_core::services::tests::{ + sample_ipv4_remote_addr, sample_ipv4_remote_addr_fingerprint, sample_ipv4_socket_address, sample_ipv6_remote_addr, + sample_ipv6_remote_addr_fingerprint, sample_issue_time, MockUdpStatsEventSender, + }; + use crate::packages::{self, udp_tracker_core}; + + #[tokio::test] + async fn a_connect_response_should_contain_the_same_transaction_id_as_the_connect_request() { + let (udp_stats_event_sender, _udp_stats_repository) = packages::udp_tracker_core::statistics::setup::factory(false); + let udp_stats_event_sender = Arc::new(udp_stats_event_sender); + + let response = handle_connect(sample_ipv4_remote_addr(), &udp_stats_event_sender, sample_issue_time()).await; + + assert_eq!( + response, + make(sample_ipv4_remote_addr_fingerprint(), sample_issue_time()).unwrap() + ); + } + + #[tokio::test] + async fn a_connect_response_should_contain_a_new_connection_id() { + let (udp_stats_event_sender, _udp_stats_repository) = packages::udp_tracker_core::statistics::setup::factory(false); + let udp_stats_event_sender = Arc::new(udp_stats_event_sender); + + let response = handle_connect(sample_ipv4_remote_addr(), &udp_stats_event_sender, sample_issue_time()).await; + + assert_eq!( + response, + make(sample_ipv4_remote_addr_fingerprint(), sample_issue_time()).unwrap(), + ); + } + + #[tokio::test] + async fn a_connect_response_should_contain_a_new_connection_id_ipv6() { + let (udp_stats_event_sender, _udp_stats_repository) = packages::udp_tracker_core::statistics::setup::factory(false); + let udp_stats_event_sender = Arc::new(udp_stats_event_sender); + + let response = handle_connect(sample_ipv6_remote_addr(), &udp_stats_event_sender, sample_issue_time()).await; + + assert_eq!( + response, + make(sample_ipv6_remote_addr_fingerprint(), sample_issue_time()).unwrap(), + ); + } + + #[tokio::test] + async fn it_should_send_the_upd4_connect_event_when_a_client_tries_to_connect_using_a_ip4_socket_address() { + let mut udp_stats_event_sender_mock = MockUdpStatsEventSender::new(); + udp_stats_event_sender_mock + .expect_send_event() + .with(eq(udp_tracker_core::statistics::event::Event::Udp4Connect)) + .times(1) + .returning(|_| Box::pin(future::ready(Some(Ok(()))))); + let udp_stats_event_sender: Arc>> = + Arc::new(Some(Box::new(udp_stats_event_sender_mock))); + + let client_socket_address = sample_ipv4_socket_address(); + + handle_connect(client_socket_address, &udp_stats_event_sender, sample_issue_time()).await; + } + + #[tokio::test] + async fn it_should_send_the_upd6_connect_event_when_a_client_tries_to_connect_using_a_ip6_socket_address() { + let mut udp_stats_event_sender_mock = MockUdpStatsEventSender::new(); + udp_stats_event_sender_mock + .expect_send_event() + .with(eq(udp_tracker_core::statistics::event::Event::Udp6Connect)) + .times(1) + .returning(|_| Box::pin(future::ready(Some(Ok(()))))); + let udp_stats_event_sender: Arc>> = + Arc::new(Some(Box::new(udp_stats_event_sender_mock))); + + handle_connect(sample_ipv6_remote_addr(), &udp_stats_event_sender, sample_issue_time()).await; + } + } +} diff --git a/src/packages/udp_tracker_core/services/mod.rs b/src/packages/udp_tracker_core/services/mod.rs index 776d2dfbf..5b222c4d9 100644 --- a/src/packages/udp_tracker_core/services/mod.rs +++ b/src/packages/udp_tracker_core/services/mod.rs @@ -1,2 +1,51 @@ pub mod announce; +pub mod connect; pub mod scrape; + +#[cfg(test)] +pub(crate) mod tests { + + use std::net::{IpAddr, Ipv4Addr, Ipv6Addr, SocketAddr}; + + use futures::future::BoxFuture; + use mockall::mock; + use tokio::sync::mpsc::error::SendError; + + use crate::packages::udp_tracker_core; + use crate::packages::udp_tracker_core::connection_cookie::gen_remote_fingerprint; + + pub(crate) fn sample_ipv4_remote_addr() -> SocketAddr { + sample_ipv4_socket_address() + } + + pub(crate) fn sample_ipv4_remote_addr_fingerprint() -> u64 { + gen_remote_fingerprint(&sample_ipv4_socket_address()) + } + + pub(crate) fn sample_ipv6_remote_addr() -> SocketAddr { + sample_ipv6_socket_address() + } + + pub(crate) fn sample_ipv6_remote_addr_fingerprint() -> u64 { + gen_remote_fingerprint(&sample_ipv6_socket_address()) + } + + pub(crate) fn sample_ipv4_socket_address() -> SocketAddr { + SocketAddr::new(IpAddr::V4(Ipv4Addr::new(127, 0, 0, 1)), 8080) + } + + fn sample_ipv6_socket_address() -> SocketAddr { + SocketAddr::new(IpAddr::V6(Ipv6Addr::new(0, 0, 0, 0, 0, 0, 0, 1)), 8080) + } + + pub(crate) fn sample_issue_time() -> f64 { + 1_000_000_000_f64 + } + + mock! { + pub(crate) UdpStatsEventSender {} + impl udp_tracker_core::statistics::event::sender::Sender for UdpStatsEventSender { + fn send_event(&self, event: udp_tracker_core::statistics::event::Event) -> BoxFuture<'static,Option > > > ; + } + } +} diff --git a/src/servers/udp/handlers/connect.rs b/src/servers/udp/handlers/connect.rs index 8275d36af..aae6a25f5 100644 --- a/src/servers/udp/handlers/connect.rs +++ b/src/servers/udp/handlers/connect.rs @@ -6,14 +6,9 @@ use aquatic_udp_protocol::{ConnectRequest, ConnectResponse, ConnectionId, Respon use tracing::{instrument, Level}; use crate::packages::udp_tracker_core; -use crate::packages::udp_tracker_core::connection_cookie::{gen_remote_fingerprint, make}; /// It handles the `Connect` request. Refer to [`Connect`](crate::servers::udp#connect) /// request for more information. -/// -/// # Errors -/// -/// This function does not ever return an error. #[instrument(fields(transaction_id), skip(opt_udp_stats_event_sender), ret(level = Level::TRACE))] pub async fn handle_connect( remote_addr: SocketAddr, @@ -22,27 +17,10 @@ pub async fn handle_connect( cookie_issue_time: f64, ) -> Response { tracing::Span::current().record("transaction_id", request.transaction_id.0.to_string()); - tracing::trace!("handle connect"); - // todo: move to connect service in udp_tracker_core - - let connection_id = make(gen_remote_fingerprint(&remote_addr), cookie_issue_time).expect("it should be a normal value"); - - if let Some(udp_stats_event_sender) = opt_udp_stats_event_sender.as_deref() { - match remote_addr { - SocketAddr::V4(_) => { - udp_stats_event_sender - .send_event(udp_tracker_core::statistics::event::Event::Udp4Connect) - .await; - } - SocketAddr::V6(_) => { - udp_stats_event_sender - .send_event(udp_tracker_core::statistics::event::Event::Udp6Connect) - .await; - } - } - } + let connection_id = + udp_tracker_core::services::connect::handle_connect(remote_addr, opt_udp_stats_event_sender, cookie_issue_time).await; build_response(*request, connection_id) } From fdbe97c2dc33f72bb493e9edcfa0fa54a1ec2027 Mon Sep 17 00:00:00 2001 From: Jose Celano Date: Tue, 18 Feb 2025 10:42:01 +0000 Subject: [PATCH 279/802] chore(deps): udpate dependencies ``` cargo update Updating crates.io index Locking 6 packages to latest compatible versions Updating clap v4.5.29 -> v4.5.30 Updating clap_builder v4.5.29 -> v4.5.30 Updating rand_core v0.9.0 -> v0.9.1 Updating tempfile v3.16.0 -> v3.17.1 Updating typenum v1.17.0 -> v1.18.0 Updating uuid v1.13.1 -> v1.13.2 ``` --- Cargo.lock | 28 ++++++++++++++-------------- 1 file changed, 14 insertions(+), 14 deletions(-) diff --git a/Cargo.lock b/Cargo.lock index 07c08ab04..0a2a4f9fe 100644 --- a/Cargo.lock +++ b/Cargo.lock @@ -970,9 +970,9 @@ dependencies = [ [[package]] name = "clap" -version = "4.5.29" +version = "4.5.30" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "8acebd8ad879283633b343856142139f2da2317c96b05b4dd6181c61e2480184" +checksum = "92b7b18d71fad5313a1e320fa9897994228ce274b60faa4d694fe0ea89cd9e6d" dependencies = [ "clap_builder", "clap_derive", @@ -980,9 +980,9 @@ dependencies = [ [[package]] name = "clap_builder" -version = "4.5.29" +version = "4.5.30" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "f6ba32cbda51c7e1dfd49acc1457ba1a7dec5b64fe360e828acb13ca8dc9c2f9" +checksum = "a35db2071778a7344791a4fb4f95308b5673d219dee3ae348b86642574ecc90c" dependencies = [ "anstream", "anstyle", @@ -3175,7 +3175,7 @@ source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "3779b94aeb87e8bd4e834cee3650289ee9e0d5677f976ecdb6d219e5f4f6cd94" dependencies = [ "rand_chacha 0.9.0", - "rand_core 0.9.0", + "rand_core 0.9.1", "zerocopy 0.8.18", ] @@ -3196,7 +3196,7 @@ source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "d3022b5f1df60f26e1ffddd6c66e8aa15de382ae63b3a0c1bfc0e4d3e3f325cb" dependencies = [ "ppv-lite86", - "rand_core 0.9.0", + "rand_core 0.9.1", ] [[package]] @@ -3210,9 +3210,9 @@ dependencies = [ [[package]] name = "rand_core" -version = "0.9.0" +version = "0.9.1" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "b08f3c9802962f7e1b25113931d94f43ed9725bebc59db9d0c3e9a23b67e15ff" +checksum = "a88e0da7a2c97baa202165137c158d0a2e824ac465d13d81046727b34cb247d3" dependencies = [ "getrandom 0.3.1", "zerocopy 0.8.18", @@ -4010,9 +4010,9 @@ dependencies = [ [[package]] name = "tempfile" -version = "3.16.0" +version = "3.17.1" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "38c246215d7d24f48ae091a2902398798e05d978b24315d6efbc00ede9a8bb91" +checksum = "22e5a0acb1f3f55f65cc4a866c361b2fb2a0ff6366785ae6fbb5f85df07ba230" dependencies = [ "cfg-if", "fastrand", @@ -4640,9 +4640,9 @@ dependencies = [ [[package]] name = "typenum" -version = "1.17.0" +version = "1.18.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "42ff0bf0c66b8238c6f3b578df37d0b7848e55df8577b3f74f92a69acceeb825" +checksum = "1dccffe3ce07af9386bfd29e80c0ab1a8205a2fc34e4bcd40364df902cfa8f3f" [[package]] name = "uncased" @@ -4703,9 +4703,9 @@ checksum = "06abde3611657adf66d383f00b093d7faecc7fa57071cce2578660c9f1010821" [[package]] name = "uuid" -version = "1.13.1" +version = "1.13.2" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "ced87ca4be083373936a67f8de945faa23b6b42384bd5b64434850802c6dccd0" +checksum = "8c1f41ffb7cf259f1ecc2876861a17e7142e63ead296f671f81f6ae85903e0d6" dependencies = [ "getrandom 0.3.1", "rand 0.9.0", From 84cf581f8c7417f9670c2101bcaf9149a91aba4c Mon Sep 17 00:00:00 2001 From: Jose Celano Date: Tue, 18 Feb 2025 11:46:08 +0000 Subject: [PATCH 280/802] refactor: [#1282] move BanService to udp-tracker-core package --- src/bootstrap/app.rs | 2 +- src/container.rs | 5 +++-- src/packages/tracker_api_core/statistics/services.rs | 7 ++++--- .../udp_tracker_core/services}/banning.rs | 0 src/packages/udp_tracker_core/services/mod.rs | 1 + src/packages/udp_tracker_core/statistics/services.rs | 4 ++-- src/servers/apis/v1/context/stats/handlers.rs | 2 +- src/servers/udp/server/mod.rs | 1 - 8 files changed, 12 insertions(+), 10 deletions(-) rename src/{servers/udp/server => packages/udp_tracker_core/services}/banning.rs (100%) diff --git a/src/bootstrap/app.rs b/src/bootstrap/app.rs index 41023f2fa..b7ce8f21c 100644 --- a/src/bootstrap/app.rs +++ b/src/bootstrap/app.rs @@ -35,8 +35,8 @@ use tracing::instrument; use super::config::initialize_configuration; use crate::bootstrap; use crate::container::AppContainer; +use crate::packages::udp_tracker_core::services::banning::BanService; use crate::packages::{http_tracker_core, udp_tracker_core}; -use crate::servers::udp::server::banning::BanService; use crate::servers::udp::server::launcher::MAX_CONNECTION_ID_ERRORS_PER_IP; use crate::shared::crypto::ephemeral_instance_keys; use crate::shared::crypto::keys::{self, Keeper as _}; diff --git a/src/container.rs b/src/container.rs index 47cc39ed3..d62f8d985 100644 --- a/src/container.rs +++ b/src/container.rs @@ -14,8 +14,9 @@ use bittorrent_tracker_core::whitelist::repository::in_memory::InMemoryWhitelist use tokio::sync::RwLock; use torrust_tracker_configuration::{Core, HttpApi, HttpTracker, UdpTracker}; -use crate::packages::{http_tracker_core, udp_tracker_core}; -use crate::servers::udp::server::banning::BanService; +use crate::packages::http_tracker_core; +use crate::packages::udp_tracker_core::services::banning::BanService; +use crate::packages::udp_tracker_core::{self}; pub struct AppContainer { pub core_config: Arc, diff --git a/src/packages/tracker_api_core/statistics/services.rs b/src/packages/tracker_api_core/statistics/services.rs index bb8e71ab8..15f976b52 100644 --- a/src/packages/tracker_api_core/statistics/services.rs +++ b/src/packages/tracker_api_core/statistics/services.rs @@ -5,8 +5,9 @@ use packages::tracker_api_core::statistics::metrics::Metrics; use tokio::sync::RwLock; use torrust_tracker_primitives::torrent_metrics::TorrentsMetrics; -use crate::packages::{self, http_tracker_core, udp_tracker_core}; -use crate::servers::udp::server::banning::BanService; +use crate::packages::udp_tracker_core::services::banning::BanService; +use crate::packages::udp_tracker_core::{self}; +use crate::packages::{self, http_tracker_core}; /// All the metrics collected by the tracker. #[derive(Debug, PartialEq)] @@ -83,8 +84,8 @@ mod tests { use crate::packages::tracker_api_core::statistics::metrics::Metrics; use crate::packages::tracker_api_core::statistics::services::{get_metrics, TrackerMetrics}; + use crate::packages::udp_tracker_core::services::banning::BanService; use crate::packages::{http_tracker_core, udp_tracker_core}; - use crate::servers::udp::server::banning::BanService; use crate::servers::udp::server::launcher::MAX_CONNECTION_ID_ERRORS_PER_IP; pub fn tracker_configuration() -> Configuration { diff --git a/src/servers/udp/server/banning.rs b/src/packages/udp_tracker_core/services/banning.rs similarity index 100% rename from src/servers/udp/server/banning.rs rename to src/packages/udp_tracker_core/services/banning.rs diff --git a/src/packages/udp_tracker_core/services/mod.rs b/src/packages/udp_tracker_core/services/mod.rs index 5b222c4d9..5c7c760c8 100644 --- a/src/packages/udp_tracker_core/services/mod.rs +++ b/src/packages/udp_tracker_core/services/mod.rs @@ -1,4 +1,5 @@ pub mod announce; +pub mod banning; pub mod connect; pub mod scrape; diff --git a/src/packages/udp_tracker_core/statistics/services.rs b/src/packages/udp_tracker_core/statistics/services.rs index 80e1d8fb5..63279bc9a 100644 --- a/src/packages/udp_tracker_core/statistics/services.rs +++ b/src/packages/udp_tracker_core/statistics/services.rs @@ -45,7 +45,7 @@ use tokio::sync::RwLock; use torrust_tracker_primitives::torrent_metrics::TorrentsMetrics; use crate::packages; -use crate::servers::udp::server::banning::BanService; +use crate::packages::udp_tracker_core::services::banning::BanService; /// All the metrics collected by the tracker. #[derive(Debug, PartialEq)] @@ -111,9 +111,9 @@ mod tests { use torrust_tracker_test_helpers::configuration; use crate::packages::udp_tracker_core; + use crate::packages::udp_tracker_core::services::banning::BanService; use crate::packages::udp_tracker_core::statistics; use crate::packages::udp_tracker_core::statistics::services::{get_metrics, TrackerMetrics}; - use crate::servers::udp::server::banning::BanService; use crate::servers::udp::server::launcher::MAX_CONNECTION_ID_ERRORS_PER_IP; pub fn tracker_configuration() -> Configuration { diff --git a/src/servers/apis/v1/context/stats/handlers.rs b/src/servers/apis/v1/context/stats/handlers.rs index 820f39909..287bca5d1 100644 --- a/src/servers/apis/v1/context/stats/handlers.rs +++ b/src/servers/apis/v1/context/stats/handlers.rs @@ -11,8 +11,8 @@ use tokio::sync::RwLock; use super::responses::{metrics_response, stats_response}; use crate::packages::tracker_api_core::statistics::services::get_metrics; +use crate::packages::udp_tracker_core::services::banning::BanService; use crate::packages::{http_tracker_core, udp_tracker_core}; -use crate::servers::udp::server::banning::BanService; #[derive(Deserialize, Debug, Default)] #[serde(rename_all = "lowercase")] diff --git a/src/servers/udp/server/mod.rs b/src/servers/udp/server/mod.rs index 941f6b5cb..2be568c89 100644 --- a/src/servers/udp/server/mod.rs +++ b/src/servers/udp/server/mod.rs @@ -6,7 +6,6 @@ use thiserror::Error; use super::RawRequest; -pub mod banning; pub mod bound_socket; pub mod launcher; pub mod processor; From 1886593b465b2a7c4b4a51eee08b3f5adce38394 Mon Sep 17 00:00:00 2001 From: Jose Celano Date: Tue, 18 Feb 2025 12:50:51 +0000 Subject: [PATCH 281/802] refactor: [#1282] extract udp-tracker-core package --- .github/workflows/deployment.yaml | 1 + Cargo.lock | 29 +- Cargo.toml | 5 +- packages/udp-tracker-core/Cargo.toml | 36 + packages/udp-tracker-core/LICENSE | 661 ++++++++++++++++++ packages/udp-tracker-core/README.md | 15 + .../src}/connection_cookie.rs | 6 +- .../src}/crypto/ephemeral_instance_keys.rs | 11 +- .../udp-tracker-core/src}/crypto/keys.rs | 24 +- .../udp-tracker-core/src}/crypto/mod.rs | 0 packages/udp-tracker-core/src/lib.rs | 13 + .../src}/services/announce.rs | 16 +- .../udp-tracker-core/src}/services/banning.rs | 2 +- .../udp-tracker-core/src}/services/connect.rs | 36 +- .../udp-tracker-core/src}/services/mod.rs | 8 +- .../udp-tracker-core/src}/services/scrape.rs | 14 +- .../src}/statistics/event/handler.rs | 14 +- .../src}/statistics/event/listener.rs | 2 +- .../src}/statistics/event/mod.rs | 0 .../src}/statistics/event/sender.rs | 4 +- .../src}/statistics/keeper.rs | 6 +- .../src}/statistics/metrics.rs | 0 .../udp-tracker-core/src}/statistics/mod.rs | 0 .../src}/statistics/repository.rs | 0 .../src}/statistics/services.rs | 26 +- .../udp-tracker-core/src}/statistics/setup.rs | 6 +- src/bootstrap/app.rs | 12 +- src/bootstrap/jobs/udp_tracker.rs | 2 +- src/console/ci/e2e/logs_parser.rs | 2 +- src/container.rs | 12 +- src/lib.rs | 3 - src/packages/mod.rs | 1 - .../tracker_api_core/statistics/services.rs | 14 +- src/packages/udp_tracker_core/mod.rs | 3 - src/servers/apis/v1/context/stats/handlers.rs | 6 +- src/servers/udp/error.rs | 5 +- src/servers/udp/handlers/announce.rs | 38 +- src/servers/udp/handlers/connect.rs | 26 +- src/servers/udp/handlers/error.rs | 15 +- src/servers/udp/handlers/mod.rs | 16 +- src/servers/udp/handlers/scrape.rs | 24 +- src/servers/udp/mod.rs | 4 +- src/servers/udp/server/bound_socket.rs | 3 +- src/servers/udp/server/launcher.rs | 18 +- src/servers/udp/server/processor.rs | 16 +- src/servers/udp/server/request_buffer.rs | 3 +- src/servers/udp/server/states.rs | 2 +- src/shared/mod.rs | 2 - tests/servers/udp/environment.rs | 4 +- 49 files changed, 937 insertions(+), 229 deletions(-) create mode 100644 packages/udp-tracker-core/Cargo.toml create mode 100644 packages/udp-tracker-core/LICENSE create mode 100644 packages/udp-tracker-core/README.md rename {src/packages/udp_tracker_core => packages/udp-tracker-core/src}/connection_cookie.rs (98%) rename {src/shared => packages/udp-tracker-core/src}/crypto/ephemeral_instance_keys.rs (69%) rename {src/shared => packages/udp-tracker-core/src}/crypto/keys.rs (78%) rename {src/shared => packages/udp-tracker-core/src}/crypto/mod.rs (100%) create mode 100644 packages/udp-tracker-core/src/lib.rs rename {src/packages/udp_tracker_core => packages/udp-tracker-core/src}/services/announce.rs (86%) rename {src/packages/udp_tracker_core => packages/udp-tracker-core/src}/services/banning.rs (98%) rename {src/packages/udp_tracker_core => packages/udp-tracker-core/src}/services/connect.rs (74%) rename {src/packages/udp_tracker_core => packages/udp-tracker-core/src}/services/mod.rs (74%) rename {src/packages/udp_tracker_core => packages/udp-tracker-core/src}/services/scrape.rs (83%) rename {src/packages/udp_tracker_core => packages/udp-tracker-core/src}/statistics/event/handler.rs (92%) rename {src/packages/udp_tracker_core => packages/udp-tracker-core/src}/statistics/event/listener.rs (79%) rename {src/packages/udp_tracker_core => packages/udp-tracker-core/src}/statistics/event/mod.rs (100%) rename {src/packages/udp_tracker_core => packages/udp-tracker-core/src}/statistics/event/sender.rs (79%) rename {src/packages/udp_tracker_core => packages/udp-tracker-core/src}/statistics/keeper.rs (90%) rename {src/packages/udp_tracker_core => packages/udp-tracker-core/src}/statistics/metrics.rs (100%) rename {src/packages/udp_tracker_core => packages/udp-tracker-core/src}/statistics/mod.rs (100%) rename {src/packages/udp_tracker_core => packages/udp-tracker-core/src}/statistics/repository.rs (100%) rename {src/packages/udp_tracker_core => packages/udp-tracker-core/src}/statistics/services.rs (81%) rename {src/packages/udp_tracker_core => packages/udp-tracker-core/src}/statistics/setup.rs (79%) delete mode 100644 src/packages/udp_tracker_core/mod.rs diff --git a/.github/workflows/deployment.yaml b/.github/workflows/deployment.yaml index 328bd91bb..cd4887cbe 100644 --- a/.github/workflows/deployment.yaml +++ b/.github/workflows/deployment.yaml @@ -59,6 +59,7 @@ jobs: cargo publish -p bittorrent-tracker-client cargo publish -p bittorrent-tracker-core cargo publish -p bittorrent-udp-protocol + cargo publish -p bittorrent-udp-tracker-core cargo publish -p torrust-tracker cargo publish -p torrust-tracker-api-client cargo publish -p torrust-tracker-client diff --git a/Cargo.lock b/Cargo.lock index 0a2a4f9fe..2835cd5c1 100644 --- a/Cargo.lock +++ b/Cargo.lock @@ -643,6 +643,30 @@ dependencies = [ "torrust-tracker-primitives", ] +[[package]] +name = "bittorrent-udp-tracker-core" +version = "3.0.0-develop" +dependencies = [ + "aquatic_udp_protocol", + "bittorrent-primitives", + "bittorrent-tracker-core", + "bittorrent-udp-protocol", + "bloom", + "blowfish", + "cipher", + "futures", + "lazy_static", + "mockall", + "rand 0.8.5", + "thiserror 2.0.11", + "tokio", + "torrust-tracker-configuration", + "torrust-tracker-primitives", + "torrust-tracker-test-helpers", + "tracing", + "zerocopy 0.7.35", +] + [[package]] name = "bitvec" version = "1.0.1" @@ -4310,12 +4334,9 @@ dependencies = [ "bittorrent-primitives", "bittorrent-tracker-client", "bittorrent-tracker-core", - "bittorrent-udp-protocol", - "bloom", - "blowfish", + "bittorrent-udp-tracker-core", "camino", "chrono", - "cipher", "clap", "crossbeam-skiplist", "dashmap", diff --git a/Cargo.toml b/Cargo.toml index 7337b49af..b72baea5b 100644 --- a/Cargo.toml +++ b/Cargo.toml @@ -43,12 +43,9 @@ bittorrent-http-protocol = { version = "3.0.0-develop", path = "packages/http-pr bittorrent-primitives = "0.1.0" bittorrent-tracker-client = { version = "3.0.0-develop", path = "packages/tracker-client" } bittorrent-tracker-core = { version = "3.0.0-develop", path = "packages/tracker-core" } -bittorrent-udp-protocol = { version = "3.0.0-develop", path = "packages/udp-protocol" } -bloom = "0.3.2" -blowfish = "0" +bittorrent-udp-tracker-core = { version = "3.0.0-develop", path = "packages/udp-tracker-core" } camino = { version = "1", features = ["serde", "serde1"] } chrono = { version = "0", default-features = false, features = ["clock"] } -cipher = "0" clap = { version = "4", features = ["derive", "env"] } crossbeam-skiplist = "0" dashmap = "6" diff --git a/packages/udp-tracker-core/Cargo.toml b/packages/udp-tracker-core/Cargo.toml new file mode 100644 index 000000000..bfa840cc3 --- /dev/null +++ b/packages/udp-tracker-core/Cargo.toml @@ -0,0 +1,36 @@ +[package] +authors.workspace = true +description = "A library with the core functionality needed to implement a BitTorrent UDP tracker." +documentation.workspace = true +edition.workspace = true +homepage.workspace = true +keywords = ["api", "bittorrent", "core", "library", "tracker"] +license.workspace = true +name = "bittorrent-udp-tracker-core" +publish.workspace = true +readme = "README.md" +repository.workspace = true +rust-version.workspace = true +version.workspace = true + +[dependencies] +aquatic_udp_protocol = "0" +bittorrent-primitives = "0.1.0" +bittorrent-tracker-core = { version = "3.0.0-develop", path = "../tracker-core" } +bittorrent-udp-protocol = { version = "3.0.0-develop", path = "../udp-protocol" } +bloom = "0.3.2" +blowfish = "0" +cipher = "0" +futures = "0" +lazy_static = "1" +rand = "0" +thiserror = "2" +tokio = { version = "1", features = ["macros", "net", "rt-multi-thread", "signal", "sync"] } +torrust-tracker-configuration = { version = "3.0.0-develop", path = "../configuration" } +torrust-tracker-primitives = { version = "3.0.0-develop", path = "../primitives" } +tracing = "0" +zerocopy = "0.7" + +[dev-dependencies] +mockall = "0" +torrust-tracker-test-helpers = { version = "3.0.0-develop", path = "../test-helpers" } diff --git a/packages/udp-tracker-core/LICENSE b/packages/udp-tracker-core/LICENSE new file mode 100644 index 000000000..0ad25db4b --- /dev/null +++ b/packages/udp-tracker-core/LICENSE @@ -0,0 +1,661 @@ + GNU AFFERO GENERAL PUBLIC LICENSE + Version 3, 19 November 2007 + + Copyright (C) 2007 Free Software Foundation, Inc. + Everyone is permitted to copy and distribute verbatim copies + of this license document, but changing it is not allowed. + + Preamble + + The GNU Affero General Public License is a free, copyleft license for +software and other kinds of works, specifically designed to ensure +cooperation with the community in the case of network server software. + + The licenses for most software and other practical works are designed +to take away your freedom to share and change the works. By contrast, +our General Public Licenses are intended to guarantee your freedom to +share and change all versions of a program--to make sure it remains free +software for all its users. + + When we speak of free software, we are referring to freedom, not +price. Our General Public Licenses are designed to make sure that you +have the freedom to distribute copies of free software (and charge for +them if you wish), that you receive source code or can get it if you +want it, that you can change the software or use pieces of it in new +free programs, and that you know you can do these things. + + Developers that use our General Public Licenses protect your rights +with two steps: (1) assert copyright on the software, and (2) offer +you this License which gives you legal permission to copy, distribute +and/or modify the software. + + A secondary benefit of defending all users' freedom is that +improvements made in alternate versions of the program, if they +receive widespread use, become available for other developers to +incorporate. Many developers of free software are heartened and +encouraged by the resulting cooperation. However, in the case of +software used on network servers, this result may fail to come about. +The GNU General Public License permits making a modified version and +letting the public access it on a server without ever releasing its +source code to the public. + + The GNU Affero General Public License is designed specifically to +ensure that, in such cases, the modified source code becomes available +to the community. It requires the operator of a network server to +provide the source code of the modified version running there to the +users of that server. Therefore, public use of a modified version, on +a publicly accessible server, gives the public access to the source +code of the modified version. + + An older license, called the Affero General Public License and +published by Affero, was designed to accomplish similar goals. This is +a different license, not a version of the Affero GPL, but Affero has +released a new version of the Affero GPL which permits relicensing under +this license. + + The precise terms and conditions for copying, distribution and +modification follow. + + TERMS AND CONDITIONS + + 0. Definitions. + + "This License" refers to version 3 of the GNU Affero General Public License. + + "Copyright" also means copyright-like laws that apply to other kinds of +works, such as semiconductor masks. + + "The Program" refers to any copyrightable work licensed under this +License. Each licensee is addressed as "you". "Licensees" and +"recipients" may be individuals or organizations. + + To "modify" a work means to copy from or adapt all or part of the work +in a fashion requiring copyright permission, other than the making of an +exact copy. The resulting work is called a "modified version" of the +earlier work or a work "based on" the earlier work. + + A "covered work" means either the unmodified Program or a work based +on the Program. + + To "propagate" a work means to do anything with it that, without +permission, would make you directly or secondarily liable for +infringement under applicable copyright law, except executing it on a +computer or modifying a private copy. Propagation includes copying, +distribution (with or without modification), making available to the +public, and in some countries other activities as well. + + To "convey" a work means any kind of propagation that enables other +parties to make or receive copies. Mere interaction with a user through +a computer network, with no transfer of a copy, is not conveying. + + An interactive user interface displays "Appropriate Legal Notices" +to the extent that it includes a convenient and prominently visible +feature that (1) displays an appropriate copyright notice, and (2) +tells the user that there is no warranty for the work (except to the +extent that warranties are provided), that licensees may convey the +work under this License, and how to view a copy of this License. If +the interface presents a list of user commands or options, such as a +menu, a prominent item in the list meets this criterion. + + 1. Source Code. + + The "source code" for a work means the preferred form of the work +for making modifications to it. "Object code" means any non-source +form of a work. + + A "Standard Interface" means an interface that either is an official +standard defined by a recognized standards body, or, in the case of +interfaces specified for a particular programming language, one that +is widely used among developers working in that language. + + The "System Libraries" of an executable work include anything, other +than the work as a whole, that (a) is included in the normal form of +packaging a Major Component, but which is not part of that Major +Component, and (b) serves only to enable use of the work with that +Major Component, or to implement a Standard Interface for which an +implementation is available to the public in source code form. A +"Major Component", in this context, means a major essential component +(kernel, window system, and so on) of the specific operating system +(if any) on which the executable work runs, or a compiler used to +produce the work, or an object code interpreter used to run it. + + The "Corresponding Source" for a work in object code form means all +the source code needed to generate, install, and (for an executable +work) run the object code and to modify the work, including scripts to +control those activities. However, it does not include the work's +System Libraries, or general-purpose tools or generally available free +programs which are used unmodified in performing those activities but +which are not part of the work. For example, Corresponding Source +includes interface definition files associated with source files for +the work, and the source code for shared libraries and dynamically +linked subprograms that the work is specifically designed to require, +such as by intimate data communication or control flow between those +subprograms and other parts of the work. + + The Corresponding Source need not include anything that users +can regenerate automatically from other parts of the Corresponding +Source. + + The Corresponding Source for a work in source code form is that +same work. + + 2. Basic Permissions. + + All rights granted under this License are granted for the term of +copyright on the Program, and are irrevocable provided the stated +conditions are met. This License explicitly affirms your unlimited +permission to run the unmodified Program. The output from running a +covered work is covered by this License only if the output, given its +content, constitutes a covered work. This License acknowledges your +rights of fair use or other equivalent, as provided by copyright law. + + You may make, run and propagate covered works that you do not +convey, without conditions so long as your license otherwise remains +in force. You may convey covered works to others for the sole purpose +of having them make modifications exclusively for you, or provide you +with facilities for running those works, provided that you comply with +the terms of this License in conveying all material for which you do +not control copyright. Those thus making or running the covered works +for you must do so exclusively on your behalf, under your direction +and control, on terms that prohibit them from making any copies of +your copyrighted material outside their relationship with you. + + Conveying under any other circumstances is permitted solely under +the conditions stated below. Sublicensing is not allowed; section 10 +makes it unnecessary. + + 3. Protecting Users' Legal Rights From Anti-Circumvention Law. + + No covered work shall be deemed part of an effective technological +measure under any applicable law fulfilling obligations under article +11 of the WIPO copyright treaty adopted on 20 December 1996, or +similar laws prohibiting or restricting circumvention of such +measures. + + When you convey a covered work, you waive any legal power to forbid +circumvention of technological measures to the extent such circumvention +is effected by exercising rights under this License with respect to +the covered work, and you disclaim any intention to limit operation or +modification of the work as a means of enforcing, against the work's +users, your or third parties' legal rights to forbid circumvention of +technological measures. + + 4. Conveying Verbatim Copies. + + You may convey verbatim copies of the Program's source code as you +receive it, in any medium, provided that you conspicuously and +appropriately publish on each copy an appropriate copyright notice; +keep intact all notices stating that this License and any +non-permissive terms added in accord with section 7 apply to the code; +keep intact all notices of the absence of any warranty; and give all +recipients a copy of this License along with the Program. + + You may charge any price or no price for each copy that you convey, +and you may offer support or warranty protection for a fee. + + 5. Conveying Modified Source Versions. + + You may convey a work based on the Program, or the modifications to +produce it from the Program, in the form of source code under the +terms of section 4, provided that you also meet all of these conditions: + + a) The work must carry prominent notices stating that you modified + it, and giving a relevant date. + + b) The work must carry prominent notices stating that it is + released under this License and any conditions added under section + 7. This requirement modifies the requirement in section 4 to + "keep intact all notices". + + c) You must license the entire work, as a whole, under this + License to anyone who comes into possession of a copy. This + License will therefore apply, along with any applicable section 7 + additional terms, to the whole of the work, and all its parts, + regardless of how they are packaged. This License gives no + permission to license the work in any other way, but it does not + invalidate such permission if you have separately received it. + + d) If the work has interactive user interfaces, each must display + Appropriate Legal Notices; however, if the Program has interactive + interfaces that do not display Appropriate Legal Notices, your + work need not make them do so. + + A compilation of a covered work with other separate and independent +works, which are not by their nature extensions of the covered work, +and which are not combined with it such as to form a larger program, +in or on a volume of a storage or distribution medium, is called an +"aggregate" if the compilation and its resulting copyright are not +used to limit the access or legal rights of the compilation's users +beyond what the individual works permit. Inclusion of a covered work +in an aggregate does not cause this License to apply to the other +parts of the aggregate. + + 6. Conveying Non-Source Forms. + + You may convey a covered work in object code form under the terms +of sections 4 and 5, provided that you also convey the +machine-readable Corresponding Source under the terms of this License, +in one of these ways: + + a) Convey the object code in, or embodied in, a physical product + (including a physical distribution medium), accompanied by the + Corresponding Source fixed on a durable physical medium + customarily used for software interchange. + + b) Convey the object code in, or embodied in, a physical product + (including a physical distribution medium), accompanied by a + written offer, valid for at least three years and valid for as + long as you offer spare parts or customer support for that product + model, to give anyone who possesses the object code either (1) a + copy of the Corresponding Source for all the software in the + product that is covered by this License, on a durable physical + medium customarily used for software interchange, for a price no + more than your reasonable cost of physically performing this + conveying of source, or (2) access to copy the + Corresponding Source from a network server at no charge. + + c) Convey individual copies of the object code with a copy of the + written offer to provide the Corresponding Source. This + alternative is allowed only occasionally and noncommercially, and + only if you received the object code with such an offer, in accord + with subsection 6b. + + d) Convey the object code by offering access from a designated + place (gratis or for a charge), and offer equivalent access to the + Corresponding Source in the same way through the same place at no + further charge. You need not require recipients to copy the + Corresponding Source along with the object code. If the place to + copy the object code is a network server, the Corresponding Source + may be on a different server (operated by you or a third party) + that supports equivalent copying facilities, provided you maintain + clear directions next to the object code saying where to find the + Corresponding Source. Regardless of what server hosts the + Corresponding Source, you remain obligated to ensure that it is + available for as long as needed to satisfy these requirements. + + e) Convey the object code using peer-to-peer transmission, provided + you inform other peers where the object code and Corresponding + Source of the work are being offered to the general public at no + charge under subsection 6d. + + A separable portion of the object code, whose source code is excluded +from the Corresponding Source as a System Library, need not be +included in conveying the object code work. + + A "User Product" is either (1) a "consumer product", which means any +tangible personal property which is normally used for personal, family, +or household purposes, or (2) anything designed or sold for incorporation +into a dwelling. In determining whether a product is a consumer product, +doubtful cases shall be resolved in favor of coverage. For a particular +product received by a particular user, "normally used" refers to a +typical or common use of that class of product, regardless of the status +of the particular user or of the way in which the particular user +actually uses, or expects or is expected to use, the product. A product +is a consumer product regardless of whether the product has substantial +commercial, industrial or non-consumer uses, unless such uses represent +the only significant mode of use of the product. + + "Installation Information" for a User Product means any methods, +procedures, authorization keys, or other information required to install +and execute modified versions of a covered work in that User Product from +a modified version of its Corresponding Source. The information must +suffice to ensure that the continued functioning of the modified object +code is in no case prevented or interfered with solely because +modification has been made. + + If you convey an object code work under this section in, or with, or +specifically for use in, a User Product, and the conveying occurs as +part of a transaction in which the right of possession and use of the +User Product is transferred to the recipient in perpetuity or for a +fixed term (regardless of how the transaction is characterized), the +Corresponding Source conveyed under this section must be accompanied +by the Installation Information. But this requirement does not apply +if neither you nor any third party retains the ability to install +modified object code on the User Product (for example, the work has +been installed in ROM). + + The requirement to provide Installation Information does not include a +requirement to continue to provide support service, warranty, or updates +for a work that has been modified or installed by the recipient, or for +the User Product in which it has been modified or installed. Access to a +network may be denied when the modification itself materially and +adversely affects the operation of the network or violates the rules and +protocols for communication across the network. + + Corresponding Source conveyed, and Installation Information provided, +in accord with this section must be in a format that is publicly +documented (and with an implementation available to the public in +source code form), and must require no special password or key for +unpacking, reading or copying. + + 7. Additional Terms. + + "Additional permissions" are terms that supplement the terms of this +License by making exceptions from one or more of its conditions. +Additional permissions that are applicable to the entire Program shall +be treated as though they were included in this License, to the extent +that they are valid under applicable law. If additional permissions +apply only to part of the Program, that part may be used separately +under those permissions, but the entire Program remains governed by +this License without regard to the additional permissions. + + When you convey a copy of a covered work, you may at your option +remove any additional permissions from that copy, or from any part of +it. (Additional permissions may be written to require their own +removal in certain cases when you modify the work.) You may place +additional permissions on material, added by you to a covered work, +for which you have or can give appropriate copyright permission. + + Notwithstanding any other provision of this License, for material you +add to a covered work, you may (if authorized by the copyright holders of +that material) supplement the terms of this License with terms: + + a) Disclaiming warranty or limiting liability differently from the + terms of sections 15 and 16 of this License; or + + b) Requiring preservation of specified reasonable legal notices or + author attributions in that material or in the Appropriate Legal + Notices displayed by works containing it; or + + c) Prohibiting misrepresentation of the origin of that material, or + requiring that modified versions of such material be marked in + reasonable ways as different from the original version; or + + d) Limiting the use for publicity purposes of names of licensors or + authors of the material; or + + e) Declining to grant rights under trademark law for use of some + trade names, trademarks, or service marks; or + + f) Requiring indemnification of licensors and authors of that + material by anyone who conveys the material (or modified versions of + it) with contractual assumptions of liability to the recipient, for + any liability that these contractual assumptions directly impose on + those licensors and authors. + + All other non-permissive additional terms are considered "further +restrictions" within the meaning of section 10. If the Program as you +received it, or any part of it, contains a notice stating that it is +governed by this License along with a term that is a further +restriction, you may remove that term. If a license document contains +a further restriction but permits relicensing or conveying under this +License, you may add to a covered work material governed by the terms +of that license document, provided that the further restriction does +not survive such relicensing or conveying. + + If you add terms to a covered work in accord with this section, you +must place, in the relevant source files, a statement of the +additional terms that apply to those files, or a notice indicating +where to find the applicable terms. + + Additional terms, permissive or non-permissive, may be stated in the +form of a separately written license, or stated as exceptions; +the above requirements apply either way. + + 8. Termination. + + You may not propagate or modify a covered work except as expressly +provided under this License. Any attempt otherwise to propagate or +modify it is void, and will automatically terminate your rights under +this License (including any patent licenses granted under the third +paragraph of section 11). + + However, if you cease all violation of this License, then your +license from a particular copyright holder is reinstated (a) +provisionally, unless and until the copyright holder explicitly and +finally terminates your license, and (b) permanently, if the copyright +holder fails to notify you of the violation by some reasonable means +prior to 60 days after the cessation. + + Moreover, your license from a particular copyright holder is +reinstated permanently if the copyright holder notifies you of the +violation by some reasonable means, this is the first time you have +received notice of violation of this License (for any work) from that +copyright holder, and you cure the violation prior to 30 days after +your receipt of the notice. + + Termination of your rights under this section does not terminate the +licenses of parties who have received copies or rights from you under +this License. If your rights have been terminated and not permanently +reinstated, you do not qualify to receive new licenses for the same +material under section 10. + + 9. Acceptance Not Required for Having Copies. + + You are not required to accept this License in order to receive or +run a copy of the Program. Ancillary propagation of a covered work +occurring solely as a consequence of using peer-to-peer transmission +to receive a copy likewise does not require acceptance. However, +nothing other than this License grants you permission to propagate or +modify any covered work. These actions infringe copyright if you do +not accept this License. Therefore, by modifying or propagating a +covered work, you indicate your acceptance of this License to do so. + + 10. Automatic Licensing of Downstream Recipients. + + Each time you convey a covered work, the recipient automatically +receives a license from the original licensors, to run, modify and +propagate that work, subject to this License. You are not responsible +for enforcing compliance by third parties with this License. + + An "entity transaction" is a transaction transferring control of an +organization, or substantially all assets of one, or subdividing an +organization, or merging organizations. If propagation of a covered +work results from an entity transaction, each party to that +transaction who receives a copy of the work also receives whatever +licenses to the work the party's predecessor in interest had or could +give under the previous paragraph, plus a right to possession of the +Corresponding Source of the work from the predecessor in interest, if +the predecessor has it or can get it with reasonable efforts. + + You may not impose any further restrictions on the exercise of the +rights granted or affirmed under this License. For example, you may +not impose a license fee, royalty, or other charge for exercise of +rights granted under this License, and you may not initiate litigation +(including a cross-claim or counterclaim in a lawsuit) alleging that +any patent claim is infringed by making, using, selling, offering for +sale, or importing the Program or any portion of it. + + 11. Patents. + + A "contributor" is a copyright holder who authorizes use under this +License of the Program or a work on which the Program is based. The +work thus licensed is called the contributor's "contributor version". + + A contributor's "essential patent claims" are all patent claims +owned or controlled by the contributor, whether already acquired or +hereafter acquired, that would be infringed by some manner, permitted +by this License, of making, using, or selling its contributor version, +but do not include claims that would be infringed only as a +consequence of further modification of the contributor version. For +purposes of this definition, "control" includes the right to grant +patent sublicenses in a manner consistent with the requirements of +this License. + + Each contributor grants you a non-exclusive, worldwide, royalty-free +patent license under the contributor's essential patent claims, to +make, use, sell, offer for sale, import and otherwise run, modify and +propagate the contents of its contributor version. + + In the following three paragraphs, a "patent license" is any express +agreement or commitment, however denominated, not to enforce a patent +(such as an express permission to practice a patent or covenant not to +sue for patent infringement). To "grant" such a patent license to a +party means to make such an agreement or commitment not to enforce a +patent against the party. + + If you convey a covered work, knowingly relying on a patent license, +and the Corresponding Source of the work is not available for anyone +to copy, free of charge and under the terms of this License, through a +publicly available network server or other readily accessible means, +then you must either (1) cause the Corresponding Source to be so +available, or (2) arrange to deprive yourself of the benefit of the +patent license for this particular work, or (3) arrange, in a manner +consistent with the requirements of this License, to extend the patent +license to downstream recipients. "Knowingly relying" means you have +actual knowledge that, but for the patent license, your conveying the +covered work in a country, or your recipient's use of the covered work +in a country, would infringe one or more identifiable patents in that +country that you have reason to believe are valid. + + If, pursuant to or in connection with a single transaction or +arrangement, you convey, or propagate by procuring conveyance of, a +covered work, and grant a patent license to some of the parties +receiving the covered work authorizing them to use, propagate, modify +or convey a specific copy of the covered work, then the patent license +you grant is automatically extended to all recipients of the covered +work and works based on it. + + A patent license is "discriminatory" if it does not include within +the scope of its coverage, prohibits the exercise of, or is +conditioned on the non-exercise of one or more of the rights that are +specifically granted under this License. You may not convey a covered +work if you are a party to an arrangement with a third party that is +in the business of distributing software, under which you make payment +to the third party based on the extent of your activity of conveying +the work, and under which the third party grants, to any of the +parties who would receive the covered work from you, a discriminatory +patent license (a) in connection with copies of the covered work +conveyed by you (or copies made from those copies), or (b) primarily +for and in connection with specific products or compilations that +contain the covered work, unless you entered into that arrangement, +or that patent license was granted, prior to 28 March 2007. + + Nothing in this License shall be construed as excluding or limiting +any implied license or other defenses to infringement that may +otherwise be available to you under applicable patent law. + + 12. No Surrender of Others' Freedom. + + If conditions are imposed on you (whether by court order, agreement or +otherwise) that contradict the conditions of this License, they do not +excuse you from the conditions of this License. If you cannot convey a +covered work so as to satisfy simultaneously your obligations under this +License and any other pertinent obligations, then as a consequence you may +not convey it at all. For example, if you agree to terms that obligate you +to collect a royalty for further conveying from those to whom you convey +the Program, the only way you could satisfy both those terms and this +License would be to refrain entirely from conveying the Program. + + 13. Remote Network Interaction; Use with the GNU General Public License. + + Notwithstanding any other provision of this License, if you modify the +Program, your modified version must prominently offer all users +interacting with it remotely through a computer network (if your version +supports such interaction) an opportunity to receive the Corresponding +Source of your version by providing access to the Corresponding Source +from a network server at no charge, through some standard or customary +means of facilitating copying of software. This Corresponding Source +shall include the Corresponding Source for any work covered by version 3 +of the GNU General Public License that is incorporated pursuant to the +following paragraph. + + Notwithstanding any other provision of this License, you have +permission to link or combine any covered work with a work licensed +under version 3 of the GNU General Public License into a single +combined work, and to convey the resulting work. The terms of this +License will continue to apply to the part which is the covered work, +but the work with which it is combined will remain governed by version +3 of the GNU General Public License. + + 14. Revised Versions of this License. + + The Free Software Foundation may publish revised and/or new versions of +the GNU Affero General Public License from time to time. Such new versions +will be similar in spirit to the present version, but may differ in detail to +address new problems or concerns. + + Each version is given a distinguishing version number. If the +Program specifies that a certain numbered version of the GNU Affero General +Public License "or any later version" applies to it, you have the +option of following the terms and conditions either of that numbered +version or of any later version published by the Free Software +Foundation. If the Program does not specify a version number of the +GNU Affero General Public License, you may choose any version ever published +by the Free Software Foundation. + + If the Program specifies that a proxy can decide which future +versions of the GNU Affero General Public License can be used, that proxy's +public statement of acceptance of a version permanently authorizes you +to choose that version for the Program. + + Later license versions may give you additional or different +permissions. However, no additional obligations are imposed on any +author or copyright holder as a result of your choosing to follow a +later version. + + 15. Disclaimer of Warranty. + + THERE IS NO WARRANTY FOR THE PROGRAM, TO THE EXTENT PERMITTED BY +APPLICABLE LAW. EXCEPT WHEN OTHERWISE STATED IN WRITING THE COPYRIGHT +HOLDERS AND/OR OTHER PARTIES PROVIDE THE PROGRAM "AS IS" WITHOUT WARRANTY +OF ANY KIND, EITHER EXPRESSED OR IMPLIED, INCLUDING, BUT NOT LIMITED TO, +THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR +PURPOSE. THE ENTIRE RISK AS TO THE QUALITY AND PERFORMANCE OF THE PROGRAM +IS WITH YOU. SHOULD THE PROGRAM PROVE DEFECTIVE, YOU ASSUME THE COST OF +ALL NECESSARY SERVICING, REPAIR OR CORRECTION. + + 16. Limitation of Liability. + + IN NO EVENT UNLESS REQUIRED BY APPLICABLE LAW OR AGREED TO IN WRITING +WILL ANY COPYRIGHT HOLDER, OR ANY OTHER PARTY WHO MODIFIES AND/OR CONVEYS +THE PROGRAM AS PERMITTED ABOVE, BE LIABLE TO YOU FOR DAMAGES, INCLUDING ANY +GENERAL, SPECIAL, INCIDENTAL OR CONSEQUENTIAL DAMAGES ARISING OUT OF THE +USE OR INABILITY TO USE THE PROGRAM (INCLUDING BUT NOT LIMITED TO LOSS OF +DATA OR DATA BEING RENDERED INACCURATE OR LOSSES SUSTAINED BY YOU OR THIRD +PARTIES OR A FAILURE OF THE PROGRAM TO OPERATE WITH ANY OTHER PROGRAMS), +EVEN IF SUCH HOLDER OR OTHER PARTY HAS BEEN ADVISED OF THE POSSIBILITY OF +SUCH DAMAGES. + + 17. Interpretation of Sections 15 and 16. + + If the disclaimer of warranty and limitation of liability provided +above cannot be given local legal effect according to their terms, +reviewing courts shall apply local law that most closely approximates +an absolute waiver of all civil liability in connection with the +Program, unless a warranty or assumption of liability accompanies a +copy of the Program in return for a fee. + + END OF TERMS AND CONDITIONS + + How to Apply These Terms to Your New Programs + + If you develop a new program, and you want it to be of the greatest +possible use to the public, the best way to achieve this is to make it +free software which everyone can redistribute and change under these terms. + + To do so, attach the following notices to the program. It is safest +to attach them to the start of each source file to most effectively +state the exclusion of warranty; and each file should have at least +the "copyright" line and a pointer to where the full notice is found. + + + Copyright (C) + + This program is free software: you can redistribute it and/or modify + it under the terms of the GNU Affero General Public License as published + by the Free Software Foundation, either version 3 of the License, or + (at your option) any later version. + + This program is distributed in the hope that it will be useful, + but WITHOUT ANY WARRANTY; without even the implied warranty of + MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + GNU Affero General Public License for more details. + + You should have received a copy of the GNU Affero General Public License + along with this program. If not, see . + +Also add information on how to contact you by electronic and paper mail. + + If your software can interact with users remotely through a computer +network, you should also make sure that it provides a way for users to +get its source. For example, if your program is a web application, its +interface could display a "Source" link that leads users to an archive +of the code. There are many ways you could offer source, and different +solutions will be better for different programs; see section 13 for the +specific requirements. + + You should also get your employer (if you work as a programmer) or school, +if any, to sign a "copyright disclaimer" for the program, if necessary. +For more information on this, and how to apply and follow the GNU AGPL, see +. diff --git a/packages/udp-tracker-core/README.md b/packages/udp-tracker-core/README.md new file mode 100644 index 000000000..625e5d011 --- /dev/null +++ b/packages/udp-tracker-core/README.md @@ -0,0 +1,15 @@ +# BitTorrent UDP Tracker Core library + +A library with the core functionality needed to implement a BitTorrent UDP tracker. + +You usually don’t need to use this library directly. Instead, you should use the [Torrust Tracker](https://github.com/torrust/torrust-tracker). If you want to build your own tracker, you can use this library as the core functionality. + +> **Disclaimer**: This library is actively under development. We’re currently extracting and refining common types from the[Torrust Tracker](https://github.com/torrust/torrust-tracker) to make them available to the BitTorrent community in Rust. While these types are functional, they are not yet ready for use in production or third-party projects. + +## Documentation + +[Crate documentation](https://docs.rs/bittorrent-udp-tracker-core). + +## License + +The project is licensed under the terms of the [GNU AFFERO GENERAL PUBLIC LICENSE](./LICENSE). diff --git a/src/packages/udp_tracker_core/connection_cookie.rs b/packages/udp-tracker-core/src/connection_cookie.rs similarity index 98% rename from src/packages/udp_tracker_core/connection_cookie.rs rename to packages/udp-tracker-core/src/connection_cookie.rs index b9070c63a..31c116400 100644 --- a/src/packages/udp_tracker_core/connection_cookie.rs +++ b/packages/udp-tracker-core/src/connection_cookie.rs @@ -83,7 +83,7 @@ use thiserror::Error; use tracing::instrument; use zerocopy::AsBytes; -use crate::shared::crypto::keys::CipherArrayBlowfish; +use crate::crypto::keys::CipherArrayBlowfish; /// Error returned when there was an error with the connection cookie. #[derive(Error, Debug, Clone)] @@ -169,7 +169,7 @@ pub fn check(cookie: &Cookie, fingerprint: u64, valid_range: Range) -> Resu } #[must_use] -pub(crate) fn gen_remote_fingerprint(remote_addr: &SocketAddr) -> u64 { +pub fn gen_remote_fingerprint(remote_addr: &SocketAddr) -> u64 { let mut state = DefaultHasher::new(); remote_addr.hash(&mut state); state.finish() @@ -183,7 +183,7 @@ mod cookie_builder { pub type CookiePlainText = CipherArrayBlowfish; pub type CookieCipherText = CipherArrayBlowfish; - use crate::shared::crypto::keys::{CipherArrayBlowfish, Current, Keeper}; + use crate::crypto::keys::{CipherArrayBlowfish, Current, Keeper}; #[instrument()] pub(super) fn assemble(fingerprint: u64, issue_at: f64) -> CookiePlainText { diff --git a/src/shared/crypto/ephemeral_instance_keys.rs b/packages/udp-tracker-core/src/crypto/ephemeral_instance_keys.rs similarity index 69% rename from src/shared/crypto/ephemeral_instance_keys.rs rename to packages/udp-tracker-core/src/crypto/ephemeral_instance_keys.rs index df560c3f5..fcbf78288 100644 --- a/src/shared/crypto/ephemeral_instance_keys.rs +++ b/packages/udp-tracker-core/src/crypto/ephemeral_instance_keys.rs @@ -15,10 +15,17 @@ pub type CipherArrayBlowfish = GenericArray() + }; /// The random cipher from the seed. - pub static ref RANDOM_CIPHER_BLOWFISH: CipherBlowfish = CipherBlowfish::new_from_slice(&Rng::random::(&mut ThreadRng::default())).expect("it could not generate key"); + pub static ref RANDOM_CIPHER_BLOWFISH: CipherBlowfish = { + let mut rng = ThreadRng::default(); + let seed: Seed = rng.gen(); + CipherBlowfish::new_from_slice(&seed).expect("it could not generate key") + }; /// The constant cipher for testing. pub static ref ZEROED_TEST_CIPHER_BLOWFISH: CipherBlowfish = CipherBlowfish::new_from_slice(&[0u8; 32]).expect("it could not generate key"); diff --git a/src/shared/crypto/keys.rs b/packages/udp-tracker-core/src/crypto/keys.rs similarity index 78% rename from src/shared/crypto/keys.rs rename to packages/udp-tracker-core/src/crypto/keys.rs index 60dc16660..f9a3e361d 100644 --- a/src/shared/crypto/keys.rs +++ b/packages/udp-tracker-core/src/crypto/keys.rs @@ -7,8 +7,8 @@ use self::detail_cipher::CURRENT_CIPHER; use self::detail_seed::CURRENT_SEED; -pub use crate::shared::crypto::ephemeral_instance_keys::CipherArrayBlowfish; -use crate::shared::crypto::ephemeral_instance_keys::{CipherBlowfish, Seed, RANDOM_CIPHER_BLOWFISH, RANDOM_SEED}; +pub use crate::crypto::ephemeral_instance_keys::CipherArrayBlowfish; +use crate::crypto::ephemeral_instance_keys::{CipherBlowfish, Seed, RANDOM_CIPHER_BLOWFISH, RANDOM_SEED}; /// This trait is for structures that can keep and provide a seed. pub trait Keeper { @@ -61,7 +61,7 @@ mod tests { use super::detail_seed::ZEROED_TEST_SEED; use super::{Current, Instance, Keeper}; - use crate::shared::crypto::ephemeral_instance_keys::{CipherBlowfish, Seed, ZEROED_TEST_CIPHER_BLOWFISH}; + use crate::crypto::ephemeral_instance_keys::{CipherBlowfish, Seed, ZEROED_TEST_CIPHER_BLOWFISH}; pub struct ZeroedTest; @@ -91,7 +91,7 @@ mod tests { } mod detail_seed { - use crate::shared::crypto::ephemeral_instance_keys::Seed; + use crate::crypto::ephemeral_instance_keys::Seed; #[allow(dead_code)] pub const ZEROED_TEST_SEED: Seed = [0u8; 32]; @@ -100,13 +100,13 @@ mod detail_seed { pub use ZEROED_TEST_SEED as CURRENT_SEED; #[cfg(not(test))] - pub use crate::shared::crypto::ephemeral_instance_keys::RANDOM_SEED as CURRENT_SEED; + pub use crate::crypto::ephemeral_instance_keys::RANDOM_SEED as CURRENT_SEED; #[cfg(test)] mod tests { - use crate::shared::crypto::ephemeral_instance_keys::RANDOM_SEED; - use crate::shared::crypto::keys::detail_seed::ZEROED_TEST_SEED; - use crate::shared::crypto::keys::CURRENT_SEED; + use crate::crypto::ephemeral_instance_keys::RANDOM_SEED; + use crate::crypto::keys::detail_seed::ZEROED_TEST_SEED; + use crate::crypto::keys::CURRENT_SEED; #[test] fn it_should_have_a_zero_test_seed() { @@ -129,16 +129,16 @@ mod detail_seed { mod detail_cipher { #[allow(unused_imports)] #[cfg(not(test))] - pub use crate::shared::crypto::ephemeral_instance_keys::RANDOM_CIPHER_BLOWFISH as CURRENT_CIPHER; + pub use crate::crypto::ephemeral_instance_keys::RANDOM_CIPHER_BLOWFISH as CURRENT_CIPHER; #[cfg(test)] - pub use crate::shared::crypto::ephemeral_instance_keys::ZEROED_TEST_CIPHER_BLOWFISH as CURRENT_CIPHER; + pub use crate::crypto::ephemeral_instance_keys::ZEROED_TEST_CIPHER_BLOWFISH as CURRENT_CIPHER; #[cfg(test)] mod tests { use cipher::BlockEncrypt; - use crate::shared::crypto::ephemeral_instance_keys::{CipherArrayBlowfish, ZEROED_TEST_CIPHER_BLOWFISH}; - use crate::shared::crypto::keys::detail_cipher::CURRENT_CIPHER; + use crate::crypto::ephemeral_instance_keys::{CipherArrayBlowfish, ZEROED_TEST_CIPHER_BLOWFISH}; + use crate::crypto::keys::detail_cipher::CURRENT_CIPHER; #[test] fn it_should_default_to_zeroed_seed_when_testing() { diff --git a/src/shared/crypto/mod.rs b/packages/udp-tracker-core/src/crypto/mod.rs similarity index 100% rename from src/shared/crypto/mod.rs rename to packages/udp-tracker-core/src/crypto/mod.rs diff --git a/packages/udp-tracker-core/src/lib.rs b/packages/udp-tracker-core/src/lib.rs new file mode 100644 index 000000000..8283e08c5 --- /dev/null +++ b/packages/udp-tracker-core/src/lib.rs @@ -0,0 +1,13 @@ +pub mod connection_cookie; +pub mod crypto; +pub mod services; +pub mod statistics; + +#[macro_use] +extern crate lazy_static; + +/// The maximum number of connection id errors per ip. Clients will be banned if +/// they exceed this limit. +pub const MAX_CONNECTION_ID_ERRORS_PER_IP: u32 = 10; + +pub const UDP_TRACKER_LOG_TARGET: &str = "UDP TRACKER"; diff --git a/src/packages/udp_tracker_core/services/announce.rs b/packages/udp-tracker-core/src/services/announce.rs similarity index 86% rename from src/packages/udp_tracker_core/services/announce.rs rename to packages/udp-tracker-core/src/services/announce.rs index a825d06ad..be47b9136 100644 --- a/src/packages/udp_tracker_core/services/announce.rs +++ b/packages/udp-tracker-core/src/services/announce.rs @@ -20,8 +20,8 @@ use bittorrent_udp_protocol::peer_builder; use torrust_tracker_primitives::core::AnnounceData; use torrust_tracker_primitives::peer; -use crate::packages::udp_tracker_core::connection_cookie::{check, gen_remote_fingerprint, ConnectionCookieError}; -use crate::packages::udp_tracker_core::{self}; +use crate::connection_cookie::{check, gen_remote_fingerprint, ConnectionCookieError}; +use crate::statistics; /// Errors related to announce requests. #[derive(thiserror::Error, Debug, Clone)] @@ -73,7 +73,7 @@ pub async fn handle_announce( request: &AnnounceRequest, announce_handler: &Arc, whitelist_authorization: &Arc, - opt_udp_stats_event_sender: &Arc>>, + opt_udp_stats_event_sender: &Arc>>, cookie_valid_range: Range, ) -> Result { // todo: return a UDP response like the HTTP tracker instead of raw AnnounceData. @@ -105,12 +105,12 @@ pub async fn handle_announce( match original_peer_ip { IpAddr::V4(_) => { udp_stats_event_sender - .send_event(udp_tracker_core::statistics::event::Event::Udp4Announce) + .send_event(statistics::event::Event::Udp4Announce) .await; } IpAddr::V6(_) => { udp_stats_event_sender - .send_event(udp_tracker_core::statistics::event::Event::Udp6Announce) + .send_event(statistics::event::Event::Udp6Announce) .await; } } @@ -124,7 +124,7 @@ pub async fn handle_announce( /// It will return an error if the announce request fails. pub async fn invoke( announce_handler: Arc, - opt_udp_stats_event_sender: Arc>>, + opt_udp_stats_event_sender: Arc>>, info_hash: InfoHash, peer: &mut peer::Peer, peers_wanted: &PeersWanted, @@ -140,12 +140,12 @@ pub async fn invoke( match original_peer_ip { IpAddr::V4(_) => { udp_stats_event_sender - .send_event(udp_tracker_core::statistics::event::Event::Udp4Announce) + .send_event(statistics::event::Event::Udp4Announce) .await; } IpAddr::V6(_) => { udp_stats_event_sender - .send_event(udp_tracker_core::statistics::event::Event::Udp6Announce) + .send_event(statistics::event::Event::Udp6Announce) .await; } } diff --git a/src/packages/udp_tracker_core/services/banning.rs b/packages/udp-tracker-core/src/services/banning.rs similarity index 98% rename from src/packages/udp_tracker_core/services/banning.rs rename to packages/udp-tracker-core/src/services/banning.rs index d32dfa541..8f63dd804 100644 --- a/src/packages/udp_tracker_core/services/banning.rs +++ b/packages/udp-tracker-core/src/services/banning.rs @@ -21,7 +21,7 @@ use std::net::IpAddr; use bloom::{CountingBloomFilter, ASMS}; use tokio::time::Instant; -use crate::servers::udp::UDP_TRACKER_LOG_TARGET; +use crate::UDP_TRACKER_LOG_TARGET; pub struct BanService { max_connection_id_errors_per_ip: u32, diff --git a/src/packages/udp_tracker_core/services/connect.rs b/packages/udp-tracker-core/src/services/connect.rs similarity index 74% rename from src/packages/udp_tracker_core/services/connect.rs rename to packages/udp-tracker-core/src/services/connect.rs index 4cc8b0a3b..9cb419bbc 100644 --- a/src/packages/udp_tracker_core/services/connect.rs +++ b/packages/udp-tracker-core/src/services/connect.rs @@ -6,15 +6,15 @@ use std::sync::Arc; use aquatic_udp_protocol::ConnectionId; -use crate::packages::udp_tracker_core; -use crate::packages::udp_tracker_core::connection_cookie::{gen_remote_fingerprint, make}; +use crate::connection_cookie::{gen_remote_fingerprint, make}; +use crate::statistics; /// # Panics /// /// IT will panic if there was an error making the connection cookie. pub async fn handle_connect( remote_addr: SocketAddr, - opt_udp_stats_event_sender: &Arc>>, + opt_udp_stats_event_sender: &Arc>>, cookie_issue_time: f64, ) -> ConnectionId { // todo: return a UDP response like the HTTP tracker instead of raw ConnectionId. @@ -24,14 +24,10 @@ pub async fn handle_connect( if let Some(udp_stats_event_sender) = opt_udp_stats_event_sender.as_deref() { match remote_addr { SocketAddr::V4(_) => { - udp_stats_event_sender - .send_event(udp_tracker_core::statistics::event::Event::Udp4Connect) - .await; + udp_stats_event_sender.send_event(statistics::event::Event::Udp4Connect).await; } SocketAddr::V6(_) => { - udp_stats_event_sender - .send_event(udp_tracker_core::statistics::event::Event::Udp6Connect) - .await; + udp_stats_event_sender.send_event(statistics::event::Event::Udp6Connect).await; } } } @@ -49,17 +45,17 @@ mod tests { use mockall::predicate::eq; - use crate::packages::udp_tracker_core::connection_cookie::make; - use crate::packages::udp_tracker_core::services::connect::handle_connect; - use crate::packages::udp_tracker_core::services::tests::{ + use crate::connection_cookie::make; + use crate::services::connect::handle_connect; + use crate::services::tests::{ sample_ipv4_remote_addr, sample_ipv4_remote_addr_fingerprint, sample_ipv4_socket_address, sample_ipv6_remote_addr, sample_ipv6_remote_addr_fingerprint, sample_issue_time, MockUdpStatsEventSender, }; - use crate::packages::{self, udp_tracker_core}; + use crate::statistics; #[tokio::test] async fn a_connect_response_should_contain_the_same_transaction_id_as_the_connect_request() { - let (udp_stats_event_sender, _udp_stats_repository) = packages::udp_tracker_core::statistics::setup::factory(false); + let (udp_stats_event_sender, _udp_stats_repository) = statistics::setup::factory(false); let udp_stats_event_sender = Arc::new(udp_stats_event_sender); let response = handle_connect(sample_ipv4_remote_addr(), &udp_stats_event_sender, sample_issue_time()).await; @@ -72,7 +68,7 @@ mod tests { #[tokio::test] async fn a_connect_response_should_contain_a_new_connection_id() { - let (udp_stats_event_sender, _udp_stats_repository) = packages::udp_tracker_core::statistics::setup::factory(false); + let (udp_stats_event_sender, _udp_stats_repository) = statistics::setup::factory(false); let udp_stats_event_sender = Arc::new(udp_stats_event_sender); let response = handle_connect(sample_ipv4_remote_addr(), &udp_stats_event_sender, sample_issue_time()).await; @@ -85,7 +81,7 @@ mod tests { #[tokio::test] async fn a_connect_response_should_contain_a_new_connection_id_ipv6() { - let (udp_stats_event_sender, _udp_stats_repository) = packages::udp_tracker_core::statistics::setup::factory(false); + let (udp_stats_event_sender, _udp_stats_repository) = statistics::setup::factory(false); let udp_stats_event_sender = Arc::new(udp_stats_event_sender); let response = handle_connect(sample_ipv6_remote_addr(), &udp_stats_event_sender, sample_issue_time()).await; @@ -101,10 +97,10 @@ mod tests { let mut udp_stats_event_sender_mock = MockUdpStatsEventSender::new(); udp_stats_event_sender_mock .expect_send_event() - .with(eq(udp_tracker_core::statistics::event::Event::Udp4Connect)) + .with(eq(statistics::event::Event::Udp4Connect)) .times(1) .returning(|_| Box::pin(future::ready(Some(Ok(()))))); - let udp_stats_event_sender: Arc>> = + let udp_stats_event_sender: Arc>> = Arc::new(Some(Box::new(udp_stats_event_sender_mock))); let client_socket_address = sample_ipv4_socket_address(); @@ -117,10 +113,10 @@ mod tests { let mut udp_stats_event_sender_mock = MockUdpStatsEventSender::new(); udp_stats_event_sender_mock .expect_send_event() - .with(eq(udp_tracker_core::statistics::event::Event::Udp6Connect)) + .with(eq(statistics::event::Event::Udp6Connect)) .times(1) .returning(|_| Box::pin(future::ready(Some(Ok(()))))); - let udp_stats_event_sender: Arc>> = + let udp_stats_event_sender: Arc>> = Arc::new(Some(Box::new(udp_stats_event_sender_mock))); handle_connect(sample_ipv6_remote_addr(), &udp_stats_event_sender, sample_issue_time()).await; diff --git a/src/packages/udp_tracker_core/services/mod.rs b/packages/udp-tracker-core/src/services/mod.rs similarity index 74% rename from src/packages/udp_tracker_core/services/mod.rs rename to packages/udp-tracker-core/src/services/mod.rs index 5c7c760c8..0fcb612e4 100644 --- a/src/packages/udp_tracker_core/services/mod.rs +++ b/packages/udp-tracker-core/src/services/mod.rs @@ -12,8 +12,8 @@ pub(crate) mod tests { use mockall::mock; use tokio::sync::mpsc::error::SendError; - use crate::packages::udp_tracker_core; - use crate::packages::udp_tracker_core::connection_cookie::gen_remote_fingerprint; + use crate::connection_cookie::gen_remote_fingerprint; + use crate::statistics; pub(crate) fn sample_ipv4_remote_addr() -> SocketAddr { sample_ipv4_socket_address() @@ -45,8 +45,8 @@ pub(crate) mod tests { mock! { pub(crate) UdpStatsEventSender {} - impl udp_tracker_core::statistics::event::sender::Sender for UdpStatsEventSender { - fn send_event(&self, event: udp_tracker_core::statistics::event::Event) -> BoxFuture<'static,Option > > > ; + impl statistics::event::sender::Sender for UdpStatsEventSender { + fn send_event(&self, event: statistics::event::Event) -> BoxFuture<'static,Option > > > ; } } } diff --git a/src/packages/udp_tracker_core/services/scrape.rs b/packages/udp-tracker-core/src/services/scrape.rs similarity index 83% rename from src/packages/udp_tracker_core/services/scrape.rs rename to packages/udp-tracker-core/src/services/scrape.rs index 5beb54e9f..bec55afe3 100644 --- a/src/packages/udp_tracker_core/services/scrape.rs +++ b/packages/udp-tracker-core/src/services/scrape.rs @@ -17,8 +17,8 @@ use bittorrent_tracker_core::error::{ScrapeError, WhitelistError}; use bittorrent_tracker_core::scrape_handler::ScrapeHandler; use torrust_tracker_primitives::core::ScrapeData; -use crate::packages::udp_tracker_core; -use crate::packages::udp_tracker_core::connection_cookie::{check, gen_remote_fingerprint, ConnectionCookieError}; +use crate::connection_cookie::{check, gen_remote_fingerprint, ConnectionCookieError}; +use crate::statistics; /// Errors related to scrape requests. #[derive(thiserror::Error, Debug, Clone)] @@ -65,7 +65,7 @@ pub async fn handle_scrape( remote_addr: SocketAddr, request: &ScrapeRequest, scrape_handler: &Arc, - opt_udp_stats_event_sender: &Arc>>, + opt_udp_stats_event_sender: &Arc>>, cookie_valid_range: Range, ) -> Result { // todo: return a UDP response like the HTTP tracker instead of raw ScrapeData. @@ -84,14 +84,10 @@ pub async fn handle_scrape( if let Some(udp_stats_event_sender) = opt_udp_stats_event_sender.as_deref() { match remote_addr { SocketAddr::V4(_) => { - udp_stats_event_sender - .send_event(udp_tracker_core::statistics::event::Event::Udp4Scrape) - .await; + udp_stats_event_sender.send_event(statistics::event::Event::Udp4Scrape).await; } SocketAddr::V6(_) => { - udp_stats_event_sender - .send_event(udp_tracker_core::statistics::event::Event::Udp6Scrape) - .await; + udp_stats_event_sender.send_event(statistics::event::Event::Udp6Scrape).await; } } } diff --git a/src/packages/udp_tracker_core/statistics/event/handler.rs b/packages/udp-tracker-core/src/statistics/event/handler.rs similarity index 92% rename from src/packages/udp_tracker_core/statistics/event/handler.rs rename to packages/udp-tracker-core/src/statistics/event/handler.rs index d8fa049d0..91be32ad1 100644 --- a/src/packages/udp_tracker_core/statistics/event/handler.rs +++ b/packages/udp-tracker-core/src/statistics/event/handler.rs @@ -1,5 +1,5 @@ -use crate::packages::udp_tracker_core::statistics::event::{Event, UdpResponseKind}; -use crate::packages::udp_tracker_core::statistics::repository::Repository; +use crate::statistics::event::{Event, UdpResponseKind}; +use crate::statistics::repository::Repository; pub async fn handle_event(event: Event, stats_repository: &Repository) { match event { @@ -82,9 +82,9 @@ pub async fn handle_event(event: Event, stats_repository: &Repository) { #[cfg(test)] mod tests { - use crate::packages::udp_tracker_core::statistics::event::handler::handle_event; - use crate::packages::udp_tracker_core::statistics::event::Event; - use crate::packages::udp_tracker_core::statistics::repository::Repository; + use crate::statistics::event::handler::handle_event; + use crate::statistics::event::Event; + use crate::statistics::repository::Repository; #[tokio::test] async fn should_increase_the_udp4_connections_counter_when_it_receives_a_udp4_connect_event() { @@ -186,7 +186,7 @@ mod tests { handle_event( Event::Udp4Response { - kind: crate::packages::udp_tracker_core::statistics::event::UdpResponseKind::Announce, + kind: crate::statistics::event::UdpResponseKind::Announce, req_processing_time: std::time::Duration::from_secs(1), }, &stats_repository, @@ -226,7 +226,7 @@ mod tests { handle_event( Event::Udp6Response { - kind: crate::packages::udp_tracker_core::statistics::event::UdpResponseKind::Announce, + kind: crate::statistics::event::UdpResponseKind::Announce, req_processing_time: std::time::Duration::from_secs(1), }, &stats_repository, diff --git a/src/packages/udp_tracker_core/statistics/event/listener.rs b/packages/udp-tracker-core/src/statistics/event/listener.rs similarity index 79% rename from src/packages/udp_tracker_core/statistics/event/listener.rs rename to packages/udp-tracker-core/src/statistics/event/listener.rs index 6a84fbaa5..f1a2e25de 100644 --- a/src/packages/udp_tracker_core/statistics/event/listener.rs +++ b/packages/udp-tracker-core/src/statistics/event/listener.rs @@ -2,7 +2,7 @@ use tokio::sync::mpsc; use super::handler::handle_event; use super::Event; -use crate::packages::udp_tracker_core::statistics::repository::Repository; +use crate::statistics::repository::Repository; pub async fn dispatch_events(mut receiver: mpsc::Receiver, stats_repository: Repository) { while let Some(event) = receiver.recv().await { diff --git a/src/packages/udp_tracker_core/statistics/event/mod.rs b/packages/udp-tracker-core/src/statistics/event/mod.rs similarity index 100% rename from src/packages/udp_tracker_core/statistics/event/mod.rs rename to packages/udp-tracker-core/src/statistics/event/mod.rs diff --git a/src/packages/udp_tracker_core/statistics/event/sender.rs b/packages/udp-tracker-core/src/statistics/event/sender.rs similarity index 79% rename from src/packages/udp_tracker_core/statistics/event/sender.rs rename to packages/udp-tracker-core/src/statistics/event/sender.rs index 68e197eca..ca4b4e210 100644 --- a/src/packages/udp_tracker_core/statistics/event/sender.rs +++ b/packages/udp-tracker-core/src/statistics/event/sender.rs @@ -13,10 +13,10 @@ pub trait Sender: Sync + Send { fn send_event(&self, event: Event) -> BoxFuture<'_, Option>>>; } -/// An [`statistics::EventSender`](crate::packages::udp_tracker_core::statistics::event::sender::Sender) implementation. +/// An [`statistics::EventSender`](crate::statistics::event::sender::Sender) implementation. /// /// It uses a channel sender to send the statistic events. The channel is created by a -/// [`statistics::Keeper`](crate::packages::udp_tracker_core::statistics::keeper::Keeper) +/// [`statistics::Keeper`](crate::statistics::keeper::Keeper) #[allow(clippy::module_name_repetitions)] pub struct ChannelSender { pub(crate) sender: mpsc::Sender, diff --git a/src/packages/udp_tracker_core/statistics/keeper.rs b/packages/udp-tracker-core/src/statistics/keeper.rs similarity index 90% rename from src/packages/udp_tracker_core/statistics/keeper.rs rename to packages/udp-tracker-core/src/statistics/keeper.rs index 9bd290145..dac7e7541 100644 --- a/src/packages/udp_tracker_core/statistics/keeper.rs +++ b/packages/udp-tracker-core/src/statistics/keeper.rs @@ -51,9 +51,9 @@ impl Keeper { #[cfg(test)] mod tests { - use crate::packages::udp_tracker_core::statistics::event::Event; - use crate::packages::udp_tracker_core::statistics::keeper::Keeper; - use crate::packages::udp_tracker_core::statistics::metrics::Metrics; + use crate::statistics::event::Event; + use crate::statistics::keeper::Keeper; + use crate::statistics::metrics::Metrics; #[tokio::test] async fn should_contain_the_tracker_statistics() { diff --git a/src/packages/udp_tracker_core/statistics/metrics.rs b/packages/udp-tracker-core/src/statistics/metrics.rs similarity index 100% rename from src/packages/udp_tracker_core/statistics/metrics.rs rename to packages/udp-tracker-core/src/statistics/metrics.rs diff --git a/src/packages/udp_tracker_core/statistics/mod.rs b/packages/udp-tracker-core/src/statistics/mod.rs similarity index 100% rename from src/packages/udp_tracker_core/statistics/mod.rs rename to packages/udp-tracker-core/src/statistics/mod.rs diff --git a/src/packages/udp_tracker_core/statistics/repository.rs b/packages/udp-tracker-core/src/statistics/repository.rs similarity index 100% rename from src/packages/udp_tracker_core/statistics/repository.rs rename to packages/udp-tracker-core/src/statistics/repository.rs diff --git a/src/packages/udp_tracker_core/statistics/services.rs b/packages/udp-tracker-core/src/statistics/services.rs similarity index 81% rename from src/packages/udp_tracker_core/statistics/services.rs rename to packages/udp-tracker-core/src/statistics/services.rs index 63279bc9a..486aaac06 100644 --- a/src/packages/udp_tracker_core/statistics/services.rs +++ b/packages/udp-tracker-core/src/statistics/services.rs @@ -2,14 +2,14 @@ //! //! It includes: //! -//! - A [`factory`](crate::packages::udp_tracker_core::statistics::setup::factory) function to build the structs needed to collect the tracker metrics. -//! - A [`get_metrics`] service to get the tracker [`metrics`](crate::packages::udp_tracker_core::statistics::metrics::Metrics). +//! - A [`factory`](crate::statistics::setup::factory) function to build the structs needed to collect the tracker metrics. +//! - A [`get_metrics`] service to get the tracker [`metrics`](crate::statistics::metrics::Metrics). //! //! Tracker metrics are collected using a Publisher-Subscribe pattern. //! //! The factory function builds two structs: //! -//! - An statistics event [`Sender`](crate::packages::udp_tracker_core::statistics::event::sender::Sender) +//! - An statistics event [`Sender`](crate::statistics::event::sender::Sender) //! - An statistics [`Repository`] //! //! ```text @@ -21,7 +21,7 @@ //! There is an event listener that is receiving all the events and processing them with an event handler. //! Then, the event handler updates the metrics depending on the received event. //! -//! For example, if you send the event [`Event::Udp4Connect`](crate::packages::udp_tracker_core::statistics::event::Event::Udp4Connect): +//! For example, if you send the event [`Event::Udp4Connect`](crate::statistics::event::Event::Udp4Connect): //! //! ```text //! let result = event_sender.send_event(Event::Udp4Connect).await; @@ -39,13 +39,12 @@ use std::sync::Arc; use bittorrent_tracker_core::torrent::repository::in_memory::InMemoryTorrentRepository; -use packages::udp_tracker_core::statistics::metrics::Metrics; -use packages::udp_tracker_core::statistics::repository::Repository; use tokio::sync::RwLock; use torrust_tracker_primitives::torrent_metrics::TorrentsMetrics; -use crate::packages; -use crate::packages::udp_tracker_core::services::banning::BanService; +use crate::services::banning::BanService; +use crate::statistics::metrics::Metrics; +use crate::statistics::repository::Repository; /// All the metrics collected by the tracker. #[derive(Debug, PartialEq)] @@ -110,11 +109,9 @@ mod tests { use torrust_tracker_primitives::torrent_metrics::TorrentsMetrics; use torrust_tracker_test_helpers::configuration; - use crate::packages::udp_tracker_core; - use crate::packages::udp_tracker_core::services::banning::BanService; - use crate::packages::udp_tracker_core::statistics; - use crate::packages::udp_tracker_core::statistics::services::{get_metrics, TrackerMetrics}; - use crate::servers::udp::server::launcher::MAX_CONNECTION_ID_ERRORS_PER_IP; + use crate::services::banning::BanService; + use crate::statistics::services::{get_metrics, TrackerMetrics}; + use crate::{statistics, MAX_CONNECTION_ID_ERRORS_PER_IP}; pub fn tracker_configuration() -> Configuration { configuration::ephemeral() @@ -127,8 +124,7 @@ mod tests { let in_memory_torrent_repository = Arc::new(InMemoryTorrentRepository::default()); let ban_service = Arc::new(RwLock::new(BanService::new(MAX_CONNECTION_ID_ERRORS_PER_IP))); - let (_udp_stats_event_sender, udp_stats_repository) = - udp_tracker_core::statistics::setup::factory(config.core.tracker_usage_statistics); + let (_udp_stats_event_sender, udp_stats_repository) = statistics::setup::factory(config.core.tracker_usage_statistics); let udp_stats_repository = Arc::new(udp_stats_repository); let tracker_metrics = get_metrics( diff --git a/src/packages/udp_tracker_core/statistics/setup.rs b/packages/udp-tracker-core/src/statistics/setup.rs similarity index 79% rename from src/packages/udp_tracker_core/statistics/setup.rs rename to packages/udp-tracker-core/src/statistics/setup.rs index c85c715a2..d3114a75e 100644 --- a/src/packages/udp_tracker_core/statistics/setup.rs +++ b/packages/udp-tracker-core/src/statistics/setup.rs @@ -1,14 +1,14 @@ //! Setup for the tracker statistics. //! //! The [`factory`] function builds the structs needed for handling the tracker metrics. -use crate::packages::udp_tracker_core::statistics; +use crate::statistics; /// It builds the structs needed for handling the tracker metrics. /// /// It returns: /// -/// - An statistics event [`Sender`](crate::packages::udp_tracker_core::statistics::event::sender::Sender) that allows you to send events related to statistics. -/// - An statistics [`Repository`](crate::packages::udp_tracker_core::statistics::repository::Repository) which is an in-memory repository for the tracker metrics. +/// - An statistics event [`Sender`](crate::statistics::event::sender::Sender) that allows you to send events related to statistics. +/// - An statistics [`Repository`](crate::statistics::repository::Repository) which is an in-memory repository for the tracker metrics. /// /// When the input argument `tracker_usage_statistics`is false the setup does not run the event listeners, consequently the statistics /// events are sent are received but not dispatched to the handler. diff --git a/src/bootstrap/app.rs b/src/bootstrap/app.rs index b7ce8f21c..9247f76bb 100644 --- a/src/bootstrap/app.rs +++ b/src/bootstrap/app.rs @@ -26,6 +26,10 @@ use bittorrent_tracker_core::torrent::repository::persisted::DatabasePersistentT use bittorrent_tracker_core::whitelist::authorization::WhitelistAuthorization; use bittorrent_tracker_core::whitelist::repository::in_memory::InMemoryWhitelist; use bittorrent_tracker_core::whitelist::setup::initialize_whitelist_manager; +use bittorrent_udp_tracker_core::crypto::ephemeral_instance_keys; +use bittorrent_udp_tracker_core::crypto::keys::{self, Keeper as _}; +use bittorrent_udp_tracker_core::services::banning::BanService; +use bittorrent_udp_tracker_core::MAX_CONNECTION_ID_ERRORS_PER_IP; use tokio::sync::RwLock; use torrust_tracker_clock::static_time; use torrust_tracker_configuration::validator::Validator; @@ -35,11 +39,7 @@ use tracing::instrument; use super::config::initialize_configuration; use crate::bootstrap; use crate::container::AppContainer; -use crate::packages::udp_tracker_core::services::banning::BanService; -use crate::packages::{http_tracker_core, udp_tracker_core}; -use crate::servers::udp::server::launcher::MAX_CONNECTION_ID_ERRORS_PER_IP; -use crate::shared::crypto::ephemeral_instance_keys; -use crate::shared::crypto::keys::{self, Keeper as _}; +use crate::packages::http_tracker_core; /// It loads the configuration from the environment and builds app container. /// @@ -99,7 +99,7 @@ pub fn initialize_app_container(configuration: &Configuration) -> AppContainer { // UDP stats let (udp_stats_event_sender, udp_stats_repository) = - udp_tracker_core::statistics::setup::factory(configuration.core.tracker_usage_statistics); + bittorrent_udp_tracker_core::statistics::setup::factory(configuration.core.tracker_usage_statistics); let udp_stats_event_sender = Arc::new(udp_stats_event_sender); let udp_stats_repository = Arc::new(udp_stats_repository); diff --git a/src/bootstrap/jobs/udp_tracker.rs b/src/bootstrap/jobs/udp_tracker.rs index 387fdd6ae..03fe396d6 100644 --- a/src/bootstrap/jobs/udp_tracker.rs +++ b/src/bootstrap/jobs/udp_tracker.rs @@ -8,6 +8,7 @@ //! > for the configuration options. use std::sync::Arc; +use bittorrent_udp_tracker_core::UDP_TRACKER_LOG_TARGET; use tokio::task::JoinHandle; use tracing::instrument; @@ -15,7 +16,6 @@ use crate::container::UdpTrackerContainer; use crate::servers::registar::ServiceRegistrationForm; use crate::servers::udp::server::spawner::Spawner; use crate::servers::udp::server::Server; -use crate::servers::udp::UDP_TRACKER_LOG_TARGET; /// It starts a new UDP server with the provided configuration. /// diff --git a/src/console/ci/e2e/logs_parser.rs b/src/console/ci/e2e/logs_parser.rs index b39143c8f..8f7f6059d 100644 --- a/src/console/ci/e2e/logs_parser.rs +++ b/src/console/ci/e2e/logs_parser.rs @@ -1,11 +1,11 @@ //! Utilities to parse Torrust Tracker logs. +use bittorrent_udp_tracker_core::UDP_TRACKER_LOG_TARGET; use regex::Regex; use serde::{Deserialize, Serialize}; use crate::servers::health_check_api::HEALTH_CHECK_API_LOG_TARGET; use crate::servers::http::HTTP_TRACKER_LOG_TARGET; use crate::servers::logging::STARTED_ON; -use crate::servers::udp::UDP_TRACKER_LOG_TARGET; const INFO_THRESHOLD: &str = "INFO"; diff --git a/src/container.rs b/src/container.rs index d62f8d985..d4e46b116 100644 --- a/src/container.rs +++ b/src/container.rs @@ -11,12 +11,12 @@ use bittorrent_tracker_core::torrent::repository::persisted::DatabasePersistentT use bittorrent_tracker_core::whitelist; use bittorrent_tracker_core::whitelist::manager::WhitelistManager; use bittorrent_tracker_core::whitelist::repository::in_memory::InMemoryWhitelist; +use bittorrent_udp_tracker_core::services::banning::BanService; +use bittorrent_udp_tracker_core::{self}; use tokio::sync::RwLock; use torrust_tracker_configuration::{Core, HttpApi, HttpTracker, UdpTracker}; use crate::packages::http_tracker_core; -use crate::packages::udp_tracker_core::services::banning::BanService; -use crate::packages::udp_tracker_core::{self}; pub struct AppContainer { pub core_config: Arc, @@ -29,9 +29,9 @@ pub struct AppContainer { pub whitelist_authorization: Arc, pub ban_service: Arc>, pub http_stats_event_sender: Arc>>, - pub udp_stats_event_sender: Arc>>, + pub udp_stats_event_sender: Arc>>, pub http_stats_repository: Arc, - pub udp_stats_repository: Arc, + pub udp_stats_repository: Arc, pub whitelist_manager: Arc, pub in_memory_torrent_repository: Arc, pub db_torrent_repository: Arc, @@ -44,7 +44,7 @@ pub struct UdpTrackerContainer { pub announce_handler: Arc, pub scrape_handler: Arc, pub whitelist_authorization: Arc, - pub udp_stats_event_sender: Arc>>, + pub udp_stats_event_sender: Arc>>, pub ban_service: Arc>, } @@ -96,7 +96,7 @@ pub struct HttpApiContainer { pub whitelist_manager: Arc, pub ban_service: Arc>, pub http_stats_repository: Arc, - pub udp_stats_repository: Arc, + pub udp_stats_repository: Arc, } impl HttpApiContainer { diff --git a/src/lib.rs b/src/lib.rs index b9ab402ab..210c88c14 100644 --- a/src/lib.rs +++ b/src/lib.rs @@ -498,9 +498,6 @@ pub mod packages; pub mod servers; pub mod shared; -#[macro_use] -extern crate lazy_static; - /// This code needs to be copied into each crate. /// Working version, for production. #[cfg(not(test))] diff --git a/src/packages/mod.rs b/src/packages/mod.rs index 453c3d533..f00f1ace0 100644 --- a/src/packages/mod.rs +++ b/src/packages/mod.rs @@ -3,4 +3,3 @@ //! It will be moved to the directory `packages`. pub mod http_tracker_core; pub mod tracker_api_core; -pub mod udp_tracker_core; diff --git a/src/packages/tracker_api_core/statistics/services.rs b/src/packages/tracker_api_core/statistics/services.rs index 15f976b52..d94ff5bf7 100644 --- a/src/packages/tracker_api_core/statistics/services.rs +++ b/src/packages/tracker_api_core/statistics/services.rs @@ -1,12 +1,12 @@ use std::sync::Arc; use bittorrent_tracker_core::torrent::repository::in_memory::InMemoryTorrentRepository; +use bittorrent_udp_tracker_core::services::banning::BanService; +use bittorrent_udp_tracker_core::{self, statistics}; use packages::tracker_api_core::statistics::metrics::Metrics; use tokio::sync::RwLock; use torrust_tracker_primitives::torrent_metrics::TorrentsMetrics; -use crate::packages::udp_tracker_core::services::banning::BanService; -use crate::packages::udp_tracker_core::{self}; use crate::packages::{self, http_tracker_core}; /// All the metrics collected by the tracker. @@ -28,7 +28,7 @@ pub async fn get_metrics( in_memory_torrent_repository: Arc, ban_service: Arc>, http_stats_repository: Arc, - udp_stats_repository: Arc, + udp_stats_repository: Arc, ) -> TrackerMetrics { let torrents_metrics = in_memory_torrent_repository.get_torrents_metrics(); let udp_banned_ips_total = ban_service.read().await.get_banned_ips_total(); @@ -77,16 +77,16 @@ mod tests { use bittorrent_tracker_core::torrent::repository::in_memory::InMemoryTorrentRepository; use bittorrent_tracker_core::{self}; + use bittorrent_udp_tracker_core::services::banning::BanService; + use bittorrent_udp_tracker_core::MAX_CONNECTION_ID_ERRORS_PER_IP; use tokio::sync::RwLock; use torrust_tracker_configuration::Configuration; use torrust_tracker_primitives::torrent_metrics::TorrentsMetrics; use torrust_tracker_test_helpers::configuration; + use crate::packages::http_tracker_core; use crate::packages::tracker_api_core::statistics::metrics::Metrics; use crate::packages::tracker_api_core::statistics::services::{get_metrics, TrackerMetrics}; - use crate::packages::udp_tracker_core::services::banning::BanService; - use crate::packages::{http_tracker_core, udp_tracker_core}; - use crate::servers::udp::server::launcher::MAX_CONNECTION_ID_ERRORS_PER_IP; pub fn tracker_configuration() -> Configuration { configuration::ephemeral() @@ -106,7 +106,7 @@ mod tests { // UDP stats let (_udp_stats_event_sender, udp_stats_repository) = - udp_tracker_core::statistics::setup::factory(config.core.tracker_usage_statistics); + bittorrent_udp_tracker_core::statistics::setup::factory(config.core.tracker_usage_statistics); let udp_stats_repository = Arc::new(udp_stats_repository); let tracker_metrics = get_metrics( diff --git a/src/packages/udp_tracker_core/mod.rs b/src/packages/udp_tracker_core/mod.rs deleted file mode 100644 index 1c93f811a..000000000 --- a/src/packages/udp_tracker_core/mod.rs +++ /dev/null @@ -1,3 +0,0 @@ -pub mod connection_cookie; -pub mod services; -pub mod statistics; diff --git a/src/servers/apis/v1/context/stats/handlers.rs b/src/servers/apis/v1/context/stats/handlers.rs index 287bca5d1..62379b6f4 100644 --- a/src/servers/apis/v1/context/stats/handlers.rs +++ b/src/servers/apis/v1/context/stats/handlers.rs @@ -6,13 +6,13 @@ use axum::extract::State; use axum::response::Response; use axum_extra::extract::Query; use bittorrent_tracker_core::torrent::repository::in_memory::InMemoryTorrentRepository; +use bittorrent_udp_tracker_core::services::banning::BanService; use serde::Deserialize; use tokio::sync::RwLock; use super::responses::{metrics_response, stats_response}; +use crate::packages::http_tracker_core; use crate::packages::tracker_api_core::statistics::services::get_metrics; -use crate::packages::udp_tracker_core::services::banning::BanService; -use crate::packages::{http_tracker_core, udp_tracker_core}; #[derive(Deserialize, Debug, Default)] #[serde(rename_all = "lowercase")] @@ -44,7 +44,7 @@ pub async fn get_stats_handler( Arc, Arc>, Arc, - Arc, + Arc, )>, params: Query, ) -> Response { diff --git a/src/servers/udp/error.rs b/src/servers/udp/error.rs index 9105ba0cb..93caf6853 100644 --- a/src/servers/udp/error.rs +++ b/src/servers/udp/error.rs @@ -2,13 +2,12 @@ use std::panic::Location; use aquatic_udp_protocol::{ConnectionId, RequestParseError}; +use bittorrent_udp_tracker_core::services::announce::UdpAnnounceError; +use bittorrent_udp_tracker_core::services::scrape::UdpScrapeError; use derive_more::derive::Display; use thiserror::Error; use torrust_tracker_located_error::LocatedError; -use crate::packages::udp_tracker_core::services::announce::UdpAnnounceError; -use crate::packages::udp_tracker_core::services::scrape::UdpScrapeError; - #[derive(Display, Debug)] #[display(":?")] pub struct ConnectionCookie(pub ConnectionId); diff --git a/src/servers/udp/handlers/announce.rs b/src/servers/udp/handlers/announce.rs index 1003b4041..66fc0ab42 100644 --- a/src/servers/udp/handlers/announce.rs +++ b/src/servers/udp/handlers/announce.rs @@ -10,12 +10,12 @@ use aquatic_udp_protocol::{ use bittorrent_primitives::info_hash::InfoHash; use bittorrent_tracker_core::announce_handler::AnnounceHandler; use bittorrent_tracker_core::whitelist; +use bittorrent_udp_tracker_core::{services, statistics}; use torrust_tracker_configuration::Core; use torrust_tracker_primitives::core::AnnounceData; use tracing::{instrument, Level}; use zerocopy::network_endian::I32; -use crate::packages::udp_tracker_core::{self}; use crate::servers::udp::error::Error; /// It handles the `Announce` request. Refer to [`Announce`](crate::servers::udp#announce) @@ -32,7 +32,7 @@ pub async fn handle_announce( core_config: &Arc, announce_handler: &Arc, whitelist_authorization: &Arc, - opt_udp_stats_event_sender: &Arc>>, + opt_udp_stats_event_sender: &Arc>>, cookie_valid_range: Range, ) -> Result { tracing::Span::current() @@ -42,7 +42,7 @@ pub async fn handle_announce( tracing::trace!("handle announce"); - let announce_data = udp_tracker_core::services::announce::handle_announce( + let announce_data = services::announce::handle_announce( remote_addr, request, announce_handler, @@ -128,8 +128,8 @@ mod tests { AnnounceActionPlaceholder, AnnounceEvent, AnnounceRequest, ConnectionId, NumberOfBytes, NumberOfPeers, PeerId as AquaticPeerId, PeerKey, Port, TransactionId, }; + use bittorrent_udp_tracker_core::connection_cookie::make; - use crate::packages::udp_tracker_core::connection_cookie::make; use crate::servers::udp::handlers::tests::{sample_ipv4_remote_addr_fingerprint, sample_issue_time}; struct AnnounceRequestBuilder { @@ -205,11 +205,11 @@ mod tests { use bittorrent_tracker_core::announce_handler::AnnounceHandler; use bittorrent_tracker_core::torrent::repository::in_memory::InMemoryTorrentRepository; use bittorrent_tracker_core::whitelist; + use bittorrent_udp_tracker_core::connection_cookie::{gen_remote_fingerprint, make}; + use bittorrent_udp_tracker_core::statistics; use mockall::predicate::eq; use torrust_tracker_configuration::Core; - use crate::packages::udp_tracker_core::connection_cookie::{gen_remote_fingerprint, make}; - use crate::packages::{self, udp_tracker_core}; use crate::servers::udp::handlers::announce::tests::announce_request::AnnounceRequestBuilder; use crate::servers::udp::handlers::handle_announce; use crate::servers::udp::handlers::tests::{ @@ -366,7 +366,7 @@ mod tests { whitelist_authorization: Arc, ) -> Response { let (udp_stats_event_sender, _udp_stats_repository) = - packages::udp_tracker_core::statistics::setup::factory(false); + bittorrent_udp_tracker_core::statistics::setup::factory(false); let udp_stats_event_sender = Arc::new(udp_stats_event_sender); let remote_addr = SocketAddr::new(IpAddr::V4(Ipv4Addr::new(126, 0, 0, 1)), 8080); @@ -414,10 +414,10 @@ mod tests { let mut udp_stats_event_sender_mock = MockUdpStatsEventSender::new(); udp_stats_event_sender_mock .expect_send_event() - .with(eq(udp_tracker_core::statistics::event::Event::Udp4Announce)) + .with(eq(statistics::event::Event::Udp4Announce)) .times(1) .returning(|_| Box::pin(future::ready(Some(Ok(()))))); - let udp_stats_event_sender: Arc>> = + let udp_stats_event_sender: Arc>> = Arc::new(Some(Box::new(udp_stats_event_sender_mock))); let (core_tracker_services, _core_udp_tracker_services) = @@ -441,8 +441,8 @@ mod tests { use std::sync::Arc; use aquatic_udp_protocol::{InfoHash as AquaticInfoHash, PeerId as AquaticPeerId}; + use bittorrent_udp_tracker_core::connection_cookie::{gen_remote_fingerprint, make}; - use crate::packages::udp_tracker_core::connection_cookie::{gen_remote_fingerprint, make}; use crate::servers::udp::handlers::announce::tests::announce_request::AnnounceRequestBuilder; use crate::servers::udp::handlers::handle_announce; use crate::servers::udp::handlers::tests::{ @@ -512,11 +512,11 @@ mod tests { use bittorrent_tracker_core::announce_handler::AnnounceHandler; use bittorrent_tracker_core::torrent::repository::in_memory::InMemoryTorrentRepository; use bittorrent_tracker_core::whitelist; + use bittorrent_udp_tracker_core::connection_cookie::{gen_remote_fingerprint, make}; + use bittorrent_udp_tracker_core::statistics; use mockall::predicate::eq; use torrust_tracker_configuration::Core; - use crate::packages::udp_tracker_core::connection_cookie::{gen_remote_fingerprint, make}; - use crate::packages::{self, udp_tracker_core}; use crate::servers::udp::handlers::announce::tests::announce_request::AnnounceRequestBuilder; use crate::servers::udp::handlers::handle_announce; use crate::servers::udp::handlers::tests::{ @@ -677,7 +677,7 @@ mod tests { whitelist_authorization: Arc, ) -> Response { let (udp_stats_event_sender, _udp_stats_repository) = - packages::udp_tracker_core::statistics::setup::factory(false); + bittorrent_udp_tracker_core::statistics::setup::factory(false); let udp_stats_event_sender = Arc::new(udp_stats_event_sender); let client_ip_v4 = Ipv4Addr::new(126, 0, 0, 1); @@ -728,10 +728,10 @@ mod tests { let mut udp_stats_event_sender_mock = MockUdpStatsEventSender::new(); udp_stats_event_sender_mock .expect_send_event() - .with(eq(udp_tracker_core::statistics::event::Event::Udp6Announce)) + .with(eq(statistics::event::Event::Udp6Announce)) .times(1) .returning(|_| Box::pin(future::ready(Some(Ok(()))))); - let udp_stats_event_sender: Arc>> = + let udp_stats_event_sender: Arc>> = Arc::new(Some(Box::new(udp_stats_event_sender_mock))); let (core_tracker_services, _core_udp_tracker_services) = @@ -768,10 +768,10 @@ mod tests { use bittorrent_tracker_core::torrent::repository::persisted::DatabasePersistentTorrentRepository; use bittorrent_tracker_core::whitelist::authorization::WhitelistAuthorization; use bittorrent_tracker_core::whitelist::repository::in_memory::InMemoryWhitelist; + use bittorrent_udp_tracker_core::connection_cookie::{gen_remote_fingerprint, make}; + use bittorrent_udp_tracker_core::{self, statistics}; use mockall::predicate::eq; - use crate::packages::udp_tracker_core; - use crate::packages::udp_tracker_core::connection_cookie::{gen_remote_fingerprint, make}; use crate::servers::udp::handlers::announce::tests::announce_request::AnnounceRequestBuilder; use crate::servers::udp::handlers::handle_announce; use crate::servers::udp::handlers::tests::{ @@ -792,10 +792,10 @@ mod tests { let mut udp_stats_event_sender_mock = MockUdpStatsEventSender::new(); udp_stats_event_sender_mock .expect_send_event() - .with(eq(udp_tracker_core::statistics::event::Event::Udp6Announce)) + .with(eq(statistics::event::Event::Udp6Announce)) .times(1) .returning(|_| Box::pin(future::ready(Some(Ok(()))))); - let udp_stats_event_sender: Arc>> = + let udp_stats_event_sender: Arc>> = Arc::new(Some(Box::new(udp_stats_event_sender_mock))); let announce_handler = Arc::new(AnnounceHandler::new( diff --git a/src/servers/udp/handlers/connect.rs b/src/servers/udp/handlers/connect.rs index aae6a25f5..b9209a115 100644 --- a/src/servers/udp/handlers/connect.rs +++ b/src/servers/udp/handlers/connect.rs @@ -3,24 +3,22 @@ use std::net::SocketAddr; use std::sync::Arc; use aquatic_udp_protocol::{ConnectRequest, ConnectResponse, ConnectionId, Response}; +use bittorrent_udp_tracker_core::{services, statistics}; use tracing::{instrument, Level}; -use crate::packages::udp_tracker_core; - /// It handles the `Connect` request. Refer to [`Connect`](crate::servers::udp#connect) /// request for more information. #[instrument(fields(transaction_id), skip(opt_udp_stats_event_sender), ret(level = Level::TRACE))] pub async fn handle_connect( remote_addr: SocketAddr, request: &ConnectRequest, - opt_udp_stats_event_sender: &Arc>>, + opt_udp_stats_event_sender: &Arc>>, cookie_issue_time: f64, ) -> Response { tracing::Span::current().record("transaction_id", request.transaction_id.0.to_string()); tracing::trace!("handle connect"); - let connection_id = - udp_tracker_core::services::connect::handle_connect(remote_addr, opt_udp_stats_event_sender, cookie_issue_time).await; + let connection_id = services::connect::handle_connect(remote_addr, opt_udp_stats_event_sender, cookie_issue_time).await; build_response(*request, connection_id) } @@ -43,10 +41,10 @@ mod tests { use std::sync::Arc; use aquatic_udp_protocol::{ConnectRequest, ConnectResponse, Response, TransactionId}; + use bittorrent_udp_tracker_core::connection_cookie::make; + use bittorrent_udp_tracker_core::statistics; use mockall::predicate::eq; - use crate::packages::udp_tracker_core::connection_cookie::make; - use crate::packages::{self, udp_tracker_core}; use crate::servers::udp::handlers::handle_connect; use crate::servers::udp::handlers::tests::{ sample_ipv4_remote_addr, sample_ipv4_remote_addr_fingerprint, sample_ipv4_socket_address, sample_ipv6_remote_addr, @@ -61,7 +59,7 @@ mod tests { #[tokio::test] async fn a_connect_response_should_contain_the_same_transaction_id_as_the_connect_request() { - let (udp_stats_event_sender, _udp_stats_repository) = packages::udp_tracker_core::statistics::setup::factory(false); + let (udp_stats_event_sender, _udp_stats_repository) = bittorrent_udp_tracker_core::statistics::setup::factory(false); let udp_stats_event_sender = Arc::new(udp_stats_event_sender); let request = ConnectRequest { @@ -87,7 +85,7 @@ mod tests { #[tokio::test] async fn a_connect_response_should_contain_a_new_connection_id() { - let (udp_stats_event_sender, _udp_stats_repository) = packages::udp_tracker_core::statistics::setup::factory(false); + let (udp_stats_event_sender, _udp_stats_repository) = bittorrent_udp_tracker_core::statistics::setup::factory(false); let udp_stats_event_sender = Arc::new(udp_stats_event_sender); let request = ConnectRequest { @@ -113,7 +111,7 @@ mod tests { #[tokio::test] async fn a_connect_response_should_contain_a_new_connection_id_ipv6() { - let (udp_stats_event_sender, _udp_stats_repository) = packages::udp_tracker_core::statistics::setup::factory(false); + let (udp_stats_event_sender, _udp_stats_repository) = bittorrent_udp_tracker_core::statistics::setup::factory(false); let udp_stats_event_sender = Arc::new(udp_stats_event_sender); let request = ConnectRequest { @@ -142,10 +140,10 @@ mod tests { let mut udp_stats_event_sender_mock = MockUdpStatsEventSender::new(); udp_stats_event_sender_mock .expect_send_event() - .with(eq(udp_tracker_core::statistics::event::Event::Udp4Connect)) + .with(eq(statistics::event::Event::Udp4Connect)) .times(1) .returning(|_| Box::pin(future::ready(Some(Ok(()))))); - let udp_stats_event_sender: Arc>> = + let udp_stats_event_sender: Arc>> = Arc::new(Some(Box::new(udp_stats_event_sender_mock))); let client_socket_address = sample_ipv4_socket_address(); @@ -164,10 +162,10 @@ mod tests { let mut udp_stats_event_sender_mock = MockUdpStatsEventSender::new(); udp_stats_event_sender_mock .expect_send_event() - .with(eq(udp_tracker_core::statistics::event::Event::Udp6Connect)) + .with(eq(statistics::event::Event::Udp6Connect)) .times(1) .returning(|_| Box::pin(future::ready(Some(Ok(()))))); - let udp_stats_event_sender: Arc>> = + let udp_stats_event_sender: Arc>> = Arc::new(Some(Box::new(udp_stats_event_sender_mock))); handle_connect( diff --git a/src/servers/udp/handlers/error.rs b/src/servers/udp/handlers/error.rs index 6cf273e78..443f36cc0 100644 --- a/src/servers/udp/handlers/error.rs +++ b/src/servers/udp/handlers/error.rs @@ -4,14 +4,13 @@ use std::ops::Range; use std::sync::Arc; use aquatic_udp_protocol::{ErrorResponse, RequestParseError, Response, TransactionId}; +use bittorrent_udp_tracker_core::connection_cookie::{check, gen_remote_fingerprint}; +use bittorrent_udp_tracker_core::{self, statistics, UDP_TRACKER_LOG_TARGET}; use tracing::{instrument, Level}; use uuid::Uuid; use zerocopy::network_endian::I32; -use crate::packages::udp_tracker_core; -use crate::packages::udp_tracker_core::connection_cookie::{check, gen_remote_fingerprint}; use crate::servers::udp::error::Error; -use crate::servers::udp::UDP_TRACKER_LOG_TARGET; #[allow(clippy::too_many_arguments)] #[instrument(fields(transaction_id), skip(opt_udp_stats_event_sender), ret(level = Level::TRACE))] @@ -19,7 +18,7 @@ pub async fn handle_error( remote_addr: SocketAddr, local_addr: SocketAddr, request_id: Uuid, - opt_udp_stats_event_sender: &Arc>>, + opt_udp_stats_event_sender: &Arc>>, cookie_valid_range: Range, e: &Error, transaction_id: Option, @@ -59,14 +58,10 @@ pub async fn handle_error( if let Some(udp_stats_event_sender) = opt_udp_stats_event_sender.as_deref() { match remote_addr { SocketAddr::V4(_) => { - udp_stats_event_sender - .send_event(udp_tracker_core::statistics::event::Event::Udp4Error) - .await; + udp_stats_event_sender.send_event(statistics::event::Event::Udp4Error).await; } SocketAddr::V6(_) => { - udp_stats_event_sender - .send_event(udp_tracker_core::statistics::event::Event::Udp6Error) - .await; + udp_stats_event_sender.send_event(statistics::event::Event::Udp6Error).await; } } } diff --git a/src/servers/udp/handlers/mod.rs b/src/servers/udp/handlers/mod.rs index e58497d4b..3d378b525 100644 --- a/src/servers/udp/handlers/mod.rs +++ b/src/servers/udp/handlers/mod.rs @@ -11,6 +11,7 @@ use std::time::Instant; use announce::handle_announce; use aquatic_udp_protocol::{Request, Response, TransactionId}; +use bittorrent_udp_tracker_core::services::announce::UdpAnnounceError; use connect::handle_connect; use error::handle_error; use scrape::handle_scrape; @@ -20,7 +21,6 @@ use uuid::Uuid; use super::RawRequest; use crate::container::UdpTrackerContainer; -use crate::packages::udp_tracker_core::services::announce::UdpAnnounceError; use crate::servers::udp::error::Error; use crate::shared::bit_torrent::common::MAX_SCRAPE_TORRENTS; use crate::CurrentClock; @@ -182,6 +182,8 @@ pub(crate) mod tests { use bittorrent_tracker_core::whitelist; use bittorrent_tracker_core::whitelist::authorization::WhitelistAuthorization; use bittorrent_tracker_core::whitelist::repository::in_memory::InMemoryWhitelist; + use bittorrent_udp_tracker_core::connection_cookie::gen_remote_fingerprint; + use bittorrent_udp_tracker_core::{self, statistics}; use futures::future::BoxFuture; use mockall::mock; use tokio::sync::mpsc::error::SendError; @@ -190,9 +192,7 @@ pub(crate) mod tests { use torrust_tracker_primitives::{peer, DurationSinceUnixEpoch}; use torrust_tracker_test_helpers::configuration; - use crate::packages::udp_tracker_core; - use crate::packages::udp_tracker_core::connection_cookie::gen_remote_fingerprint; - use crate::{packages, CurrentClock}; + use crate::CurrentClock; pub(crate) struct CoreTrackerServices { pub core_config: Arc, @@ -204,7 +204,7 @@ pub(crate) mod tests { } pub(crate) struct CoreUdpTrackerServices { - pub udp_stats_event_sender: Arc>>, + pub udp_stats_event_sender: Arc>>, } fn default_testing_tracker_configuration() -> Configuration { @@ -239,7 +239,7 @@ pub(crate) mod tests { )); let scrape_handler = Arc::new(ScrapeHandler::new(&whitelist_authorization, &in_memory_torrent_repository)); - let (udp_stats_event_sender, _udp_stats_repository) = packages::udp_tracker_core::statistics::setup::factory(false); + let (udp_stats_event_sender, _udp_stats_repository) = bittorrent_udp_tracker_core::statistics::setup::factory(false); let udp_stats_event_sender = Arc::new(udp_stats_event_sender); ( @@ -357,8 +357,8 @@ pub(crate) mod tests { mock! { pub(crate) UdpStatsEventSender {} - impl udp_tracker_core::statistics::event::sender::Sender for UdpStatsEventSender { - fn send_event(&self, event: udp_tracker_core::statistics::event::Event) -> BoxFuture<'static,Option > > > ; + impl statistics::event::sender::Sender for UdpStatsEventSender { + fn send_event(&self, event: statistics::event::Event) -> BoxFuture<'static,Option > > > ; } } } diff --git a/src/servers/udp/handlers/scrape.rs b/src/servers/udp/handlers/scrape.rs index b36eb92a0..aa7287951 100644 --- a/src/servers/udp/handlers/scrape.rs +++ b/src/servers/udp/handlers/scrape.rs @@ -7,11 +7,12 @@ use aquatic_udp_protocol::{ NumberOfDownloads, NumberOfPeers, Response, ScrapeRequest, ScrapeResponse, TorrentScrapeStatistics, TransactionId, }; use bittorrent_tracker_core::scrape_handler::ScrapeHandler; +use bittorrent_udp_tracker_core::statistics::{self}; +use bittorrent_udp_tracker_core::{self, services}; use torrust_tracker_primitives::core::ScrapeData; use tracing::{instrument, Level}; use zerocopy::network_endian::I32; -use crate::packages::udp_tracker_core; use crate::servers::udp::error::Error; /// It handles the `Scrape` request. Refer to [`Scrape`](crate::servers::udp#scrape) @@ -25,7 +26,7 @@ pub async fn handle_scrape( remote_addr: SocketAddr, request: &ScrapeRequest, scrape_handler: &Arc, - opt_udp_stats_event_sender: &Arc>>, + opt_udp_stats_event_sender: &Arc>>, cookie_valid_range: Range, ) -> Result { tracing::Span::current() @@ -34,7 +35,7 @@ pub async fn handle_scrape( tracing::trace!("handle scrape"); - let scrape_data = udp_tracker_core::services::scrape::handle_scrape( + let scrape_data = services::scrape::handle_scrape( remote_addr, request, scrape_handler, @@ -86,9 +87,8 @@ mod tests { }; use bittorrent_tracker_core::scrape_handler::ScrapeHandler; use bittorrent_tracker_core::torrent::repository::in_memory::InMemoryTorrentRepository; + use bittorrent_udp_tracker_core::connection_cookie::{gen_remote_fingerprint, make}; - use crate::packages; - use crate::packages::udp_tracker_core::connection_cookie::{gen_remote_fingerprint, make}; use crate::servers::udp::handlers::handle_scrape; use crate::servers::udp::handlers::tests::{ initialize_core_tracker_services_for_public_tracker, sample_cookie_valid_range, sample_ipv4_remote_addr, @@ -169,7 +169,7 @@ mod tests { in_memory_torrent_repository: Arc, scrape_handler: Arc, ) -> Response { - let (udp_stats_event_sender, _udp_stats_repository) = packages::udp_tracker_core::statistics::setup::factory(false); + let (udp_stats_event_sender, _udp_stats_repository) = bittorrent_udp_tracker_core::statistics::setup::factory(false); let udp_stats_event_sender = Arc::new(udp_stats_event_sender); let remote_addr = sample_ipv4_remote_addr(); @@ -328,10 +328,10 @@ mod tests { use std::future; use std::sync::Arc; + use bittorrent_udp_tracker_core::statistics; use mockall::predicate::eq; use super::sample_scrape_request; - use crate::packages::udp_tracker_core; use crate::servers::udp::handlers::handle_scrape; use crate::servers::udp::handlers::tests::{ initialize_core_tracker_services_for_default_tracker_configuration, sample_cookie_valid_range, @@ -343,10 +343,10 @@ mod tests { let mut udp_stats_event_sender_mock = MockUdpStatsEventSender::new(); udp_stats_event_sender_mock .expect_send_event() - .with(eq(udp_tracker_core::statistics::event::Event::Udp4Scrape)) + .with(eq(statistics::event::Event::Udp4Scrape)) .times(1) .returning(|_| Box::pin(future::ready(Some(Ok(()))))); - let udp_stats_event_sender: Arc>> = + let udp_stats_event_sender: Arc>> = Arc::new(Some(Box::new(udp_stats_event_sender_mock))); let remote_addr = sample_ipv4_remote_addr(); @@ -370,10 +370,10 @@ mod tests { use std::future; use std::sync::Arc; + use bittorrent_udp_tracker_core::statistics; use mockall::predicate::eq; use super::sample_scrape_request; - use crate::packages::udp_tracker_core; use crate::servers::udp::handlers::handle_scrape; use crate::servers::udp::handlers::tests::{ initialize_core_tracker_services_for_default_tracker_configuration, sample_cookie_valid_range, @@ -385,10 +385,10 @@ mod tests { let mut udp_stats_event_sender_mock = MockUdpStatsEventSender::new(); udp_stats_event_sender_mock .expect_send_event() - .with(eq(udp_tracker_core::statistics::event::Event::Udp6Scrape)) + .with(eq(statistics::event::Event::Udp6Scrape)) .times(1) .returning(|_| Box::pin(future::ready(Some(Ok(()))))); - let udp_stats_event_sender: Arc>> = + let udp_stats_event_sender: Arc>> = Arc::new(Some(Box::new(udp_stats_event_sender_mock))); let remote_addr = sample_ipv6_remote_addr(); diff --git a/src/servers/udp/mod.rs b/src/servers/udp/mod.rs index 614db5bf6..1fcd49725 100644 --- a/src/servers/udp/mod.rs +++ b/src/servers/udp/mod.rs @@ -105,7 +105,7 @@ //! connection ID = hash(client IP + current time slot + secret seed) //! ``` //! -//! The BEP-15 recommends a two-minute time slot. Refer to [`connection_cookie`](crate::packages::udp_tracker_core::connection_cookie) +//! The BEP-15 recommends a two-minute time slot. Refer to [`connection_cookie`](bittorrent_udp_tracker_core::connection_cookie) //! for more information about the connection ID generation with this method. //! //! #### Connect Request @@ -641,8 +641,6 @@ pub mod error; pub mod handlers; pub mod server; -pub const UDP_TRACKER_LOG_TARGET: &str = "UDP TRACKER"; - /// Number of bytes. pub type Bytes = u64; /// The port the peer is listening on. diff --git a/src/servers/udp/server/bound_socket.rs b/src/servers/udp/server/bound_socket.rs index 658589aa6..988bfb67f 100644 --- a/src/servers/udp/server/bound_socket.rs +++ b/src/servers/udp/server/bound_socket.rs @@ -2,10 +2,9 @@ use std::fmt::Debug; use std::net::SocketAddr; use std::ops::Deref; +use bittorrent_udp_tracker_core::UDP_TRACKER_LOG_TARGET; use url::Url; -use crate::servers::udp::UDP_TRACKER_LOG_TARGET; - /// Wrapper for Tokio [`UdpSocket`][`tokio::net::UdpSocket`] that is bound to a particular socket. pub struct BoundSocket { socket: tokio::net::UdpSocket, diff --git a/src/servers/udp/server/launcher.rs b/src/servers/udp/server/launcher.rs index e640749c6..fb0033624 100644 --- a/src/servers/udp/server/launcher.rs +++ b/src/servers/udp/server/launcher.rs @@ -3,6 +3,7 @@ use std::sync::Arc; use std::time::Duration; use bittorrent_tracker_client::udp::client::check; +use bittorrent_udp_tracker_core::{self, statistics, UDP_TRACKER_LOG_TARGET}; use derive_more::Constructor; use futures_util::StreamExt; use tokio::select; @@ -13,18 +14,13 @@ use tracing::instrument; use super::request_buffer::ActiveRequests; use crate::bootstrap::jobs::Started; use crate::container::UdpTrackerContainer; -use crate::packages::udp_tracker_core; use crate::servers::logging::STARTED_ON; use crate::servers::registar::ServiceHealthCheckJob; use crate::servers::signals::{shutdown_signal_with_message, Halted}; use crate::servers::udp::server::bound_socket::BoundSocket; use crate::servers::udp::server::processor::Processor; use crate::servers::udp::server::receiver::Receiver; -use crate::servers::udp::UDP_TRACKER_LOG_TARGET; -/// The maximum number of connection id errors per ip. Clients will be banned if -/// they exceed this limit. -pub const MAX_CONNECTION_ID_ERRORS_PER_IP: u32 = 10; const IP_BANS_RESET_INTERVAL_IN_SECS: u64 = 3600; /// A UDP server instance launcher. @@ -165,14 +161,10 @@ impl Launcher { if let Some(udp_stats_event_sender) = udp_tracker_container.udp_stats_event_sender.as_deref() { match req.from.ip() { IpAddr::V4(_) => { - udp_stats_event_sender - .send_event(udp_tracker_core::statistics::event::Event::Udp4Request) - .await; + udp_stats_event_sender.send_event(statistics::event::Event::Udp4Request).await; } IpAddr::V6(_) => { - udp_stats_event_sender - .send_event(udp_tracker_core::statistics::event::Event::Udp6Request) - .await; + udp_stats_event_sender.send_event(statistics::event::Event::Udp6Request).await; } } } @@ -182,7 +174,7 @@ impl Launcher { if let Some(udp_stats_event_sender) = udp_tracker_container.udp_stats_event_sender.as_deref() { udp_stats_event_sender - .send_event(udp_tracker_core::statistics::event::Event::UdpRequestBanned) + .send_event(statistics::event::Event::UdpRequestBanned) .await; } @@ -215,7 +207,7 @@ impl Launcher { if let Some(udp_stats_event_sender) = udp_tracker_container.udp_stats_event_sender.as_deref() { udp_stats_event_sender - .send_event(udp_tracker_core::statistics::event::Event::UdpRequestAborted) + .send_event(statistics::event::Event::UdpRequestAborted) .await; } } diff --git a/src/servers/udp/server/processor.rs b/src/servers/udp/server/processor.rs index dc55833c2..af4c68770 100644 --- a/src/servers/udp/server/processor.rs +++ b/src/servers/udp/server/processor.rs @@ -4,12 +4,12 @@ use std::sync::Arc; use std::time::Duration; use aquatic_udp_protocol::Response; +use bittorrent_udp_tracker_core::{self, statistics}; use tokio::time::Instant; use tracing::{instrument, Level}; use super::bound_socket::BoundSocket; use crate::container::UdpTrackerContainer; -use crate::packages::udp_tracker_core; use crate::servers::udp::handlers::CookieTimeValues; use crate::servers::udp::{handlers, RawRequest}; @@ -60,12 +60,10 @@ impl Processor { }; let udp_response_kind = match &response { - Response::Connect(_) => udp_tracker_core::statistics::event::UdpResponseKind::Connect, - Response::AnnounceIpv4(_) | Response::AnnounceIpv6(_) => { - udp_tracker_core::statistics::event::UdpResponseKind::Announce - } - Response::Scrape(_) => udp_tracker_core::statistics::event::UdpResponseKind::Scrape, - Response::Error(_e) => udp_tracker_core::statistics::event::UdpResponseKind::Error, + Response::Connect(_) => statistics::event::UdpResponseKind::Connect, + Response::AnnounceIpv4(_) | Response::AnnounceIpv6(_) => statistics::event::UdpResponseKind::Announce, + Response::Scrape(_) => statistics::event::UdpResponseKind::Scrape, + Response::Error(_e) => statistics::event::UdpResponseKind::Error, }; let mut writer = Cursor::new(Vec::with_capacity(200)); @@ -87,7 +85,7 @@ impl Processor { match target.ip() { IpAddr::V4(_) => { udp_stats_event_sender - .send_event(udp_tracker_core::statistics::event::Event::Udp4Response { + .send_event(statistics::event::Event::Udp4Response { kind: udp_response_kind, req_processing_time, }) @@ -95,7 +93,7 @@ impl Processor { } IpAddr::V6(_) => { udp_stats_event_sender - .send_event(udp_tracker_core::statistics::event::Event::Udp6Response { + .send_event(statistics::event::Event::Udp6Response { kind: udp_response_kind, req_processing_time, }) diff --git a/src/servers/udp/server/request_buffer.rs b/src/servers/udp/server/request_buffer.rs index 03cb6040f..6e420306e 100644 --- a/src/servers/udp/server/request_buffer.rs +++ b/src/servers/udp/server/request_buffer.rs @@ -1,9 +1,8 @@ +use bittorrent_udp_tracker_core::UDP_TRACKER_LOG_TARGET; use ringbuf::traits::{Consumer, Observer, Producer}; use ringbuf::StaticRb; use tokio::task::AbortHandle; -use crate::servers::udp::UDP_TRACKER_LOG_TARGET; - /// A ring buffer for managing active UDP request abort handles. /// /// The `ActiveRequests` struct maintains a fixed-size ring buffer of abort diff --git a/src/servers/udp/server/states.rs b/src/servers/udp/server/states.rs index abce9720a..c74c7f4db 100644 --- a/src/servers/udp/server/states.rs +++ b/src/servers/udp/server/states.rs @@ -3,6 +3,7 @@ use std::net::SocketAddr; use std::sync::Arc; use std::time::Duration; +use bittorrent_udp_tracker_core::UDP_TRACKER_LOG_TARGET; use derive_more::derive::Display; use derive_more::Constructor; use tokio::task::JoinHandle; @@ -15,7 +16,6 @@ use crate::container::UdpTrackerContainer; use crate::servers::registar::{ServiceRegistration, ServiceRegistrationForm}; use crate::servers::signals::Halted; use crate::servers::udp::server::launcher::Launcher; -use crate::servers::udp::UDP_TRACKER_LOG_TARGET; /// A UDP server instance controller with no UDP instance running. #[allow(clippy::module_name_repetitions)] diff --git a/src/shared/mod.rs b/src/shared/mod.rs index 8c95effe1..3b4a46e67 100644 --- a/src/shared/mod.rs +++ b/src/shared/mod.rs @@ -1,6 +1,4 @@ //! Modules with generic logic used by several modules. //! //! - [`bit_torrent`]: `BitTorrent` protocol related logic. -//! - [`crypto`]: Encryption related logic. pub mod bit_torrent; -pub mod crypto; diff --git a/tests/servers/udp/environment.rs b/tests/servers/udp/environment.rs index 24ce7bab2..7a6992583 100644 --- a/tests/servers/udp/environment.rs +++ b/tests/servers/udp/environment.rs @@ -4,10 +4,10 @@ use std::sync::Arc; use bittorrent_primitives::info_hash::InfoHash; use bittorrent_tracker_core::databases::Database; use bittorrent_tracker_core::torrent::repository::in_memory::InMemoryTorrentRepository; +use bittorrent_udp_tracker_core::statistics; use torrust_tracker_configuration::{Configuration, DEFAULT_TIMEOUT}; use torrust_tracker_lib::bootstrap::app::{initialize_app_container, initialize_global_services}; use torrust_tracker_lib::container::UdpTrackerContainer; -use torrust_tracker_lib::packages::udp_tracker_core; use torrust_tracker_lib::servers::registar::Registar; use torrust_tracker_lib::servers::udp::server::spawner::Spawner; use torrust_tracker_lib::servers::udp::server::states::{Running, Stopped}; @@ -22,7 +22,7 @@ where pub database: Arc>, pub in_memory_torrent_repository: Arc, - pub udp_stats_repository: Arc, + pub udp_stats_repository: Arc, pub registar: Registar, pub server: Server, From 8958609385e95b380fee464ccd98d356836e0722 Mon Sep 17 00:00:00 2001 From: Jose Celano Date: Tue, 18 Feb 2025 15:57:40 +0000 Subject: [PATCH 282/802] refactor: [#1281] extract http-tracker-core package --- .github/workflows/deployment.yaml | 1 + Cargo.lock | 18 + Cargo.toml | 1 + packages/http-tracker-core/Cargo.toml | 29 + packages/http-tracker-core/LICENSE | 661 ++++++++++++++++++ packages/http-tracker-core/README.md | 15 + packages/http-tracker-core/src/lib.rs | 17 + .../src}/services/announce.rs | 39 +- .../http-tracker-core/src}/services/mod.rs | 0 .../http-tracker-core/src}/services/scrape.rs | 66 +- .../src}/statistics/event/handler.rs | 10 +- .../src}/statistics/event/listener.rs | 2 +- .../src}/statistics/event/mod.rs | 0 .../src}/statistics/event/sender.rs | 4 +- .../src}/statistics/keeper.rs | 6 +- .../src}/statistics/metrics.rs | 0 .../http-tracker-core/src}/statistics/mod.rs | 0 .../src}/statistics/repository.rs | 0 .../src}/statistics/services.rs | 18 +- .../src}/statistics/setup.rs | 6 +- src/bootstrap/app.rs | 3 +- src/container.rs | 10 +- src/packages/http_tracker_core/mod.rs | 2 - src/packages/mod.rs | 1 - .../tracker_api_core/statistics/services.rs | 7 +- src/servers/apis/v1/context/stats/handlers.rs | 3 +- src/servers/http/v1/handlers/announce.rs | 16 +- src/servers/http/v1/handlers/scrape.rs | 17 +- tests/servers/http/environment.rs | 3 +- 29 files changed, 836 insertions(+), 119 deletions(-) create mode 100644 packages/http-tracker-core/Cargo.toml create mode 100644 packages/http-tracker-core/LICENSE create mode 100644 packages/http-tracker-core/README.md create mode 100644 packages/http-tracker-core/src/lib.rs rename {src/packages/http_tracker_core => packages/http-tracker-core/src}/services/announce.rs (89%) rename {src/packages/http_tracker_core => packages/http-tracker-core/src}/services/mod.rs (100%) rename {src/packages/http_tracker_core => packages/http-tracker-core/src}/services/scrape.rs (84%) rename {src/packages/http_tracker_core => packages/http-tracker-core/src}/statistics/event/handler.rs (90%) rename {src/packages/http_tracker_core => packages/http-tracker-core/src}/statistics/event/listener.rs (79%) rename {src/packages/http_tracker_core => packages/http-tracker-core/src}/statistics/event/mod.rs (100%) rename {src/packages/http_tracker_core => packages/http-tracker-core/src}/statistics/event/sender.rs (79%) rename {src/packages/http_tracker_core => packages/http-tracker-core/src}/statistics/keeper.rs (90%) rename {src/packages/http_tracker_core => packages/http-tracker-core/src}/statistics/metrics.rs (100%) rename {src/packages/http_tracker_core => packages/http-tracker-core/src}/statistics/mod.rs (100%) rename {src/packages/http_tracker_core => packages/http-tracker-core/src}/statistics/repository.rs (100%) rename {src/packages/http_tracker_core => packages/http-tracker-core/src}/statistics/services.rs (80%) rename {src/packages/http_tracker_core => packages/http-tracker-core/src}/statistics/setup.rs (79%) delete mode 100644 src/packages/http_tracker_core/mod.rs diff --git a/.github/workflows/deployment.yaml b/.github/workflows/deployment.yaml index cd4887cbe..7b718bccf 100644 --- a/.github/workflows/deployment.yaml +++ b/.github/workflows/deployment.yaml @@ -56,6 +56,7 @@ jobs: CARGO_REGISTRY_TOKEN: "${{ secrets.TORRUST_UPDATE_CARGO_REGISTRY_TOKEN }}" run: | cargo publish -p bittorrent-http-protocol + cargo publish -p bittorrent-http-tracker-core cargo publish -p bittorrent-tracker-client cargo publish -p bittorrent-tracker-core cargo publish -p bittorrent-udp-protocol diff --git a/Cargo.lock b/Cargo.lock index 2835cd5c1..73f7dfb88 100644 --- a/Cargo.lock +++ b/Cargo.lock @@ -567,6 +567,23 @@ dependencies = [ "torrust-tracker-primitives", ] +[[package]] +name = "bittorrent-http-tracker-core" +version = "3.0.0-develop" +dependencies = [ + "aquatic_udp_protocol", + "bittorrent-http-protocol", + "bittorrent-primitives", + "bittorrent-tracker-core", + "futures", + "mockall", + "tokio", + "torrust-tracker-configuration", + "torrust-tracker-primitives", + "torrust-tracker-test-helpers", + "tracing", +] + [[package]] name = "bittorrent-primitives" version = "0.1.0" @@ -4331,6 +4348,7 @@ dependencies = [ "axum-extra", "axum-server", "bittorrent-http-protocol", + "bittorrent-http-tracker-core", "bittorrent-primitives", "bittorrent-tracker-client", "bittorrent-tracker-core", diff --git a/Cargo.toml b/Cargo.toml index b72baea5b..21c08a8b5 100644 --- a/Cargo.toml +++ b/Cargo.toml @@ -40,6 +40,7 @@ axum-client-ip = "0" axum-extra = { version = "0", features = ["query"] } axum-server = { version = "0", features = ["tls-rustls-no-provider"] } bittorrent-http-protocol = { version = "3.0.0-develop", path = "packages/http-protocol" } +bittorrent-http-tracker-core = { version = "3.0.0-develop", path = "packages/http-tracker-core" } bittorrent-primitives = "0.1.0" bittorrent-tracker-client = { version = "3.0.0-develop", path = "packages/tracker-client" } bittorrent-tracker-core = { version = "3.0.0-develop", path = "packages/tracker-core" } diff --git a/packages/http-tracker-core/Cargo.toml b/packages/http-tracker-core/Cargo.toml new file mode 100644 index 000000000..a1ee18f66 --- /dev/null +++ b/packages/http-tracker-core/Cargo.toml @@ -0,0 +1,29 @@ +[package] +authors.workspace = true +description = "A library with the core functionality needed to implement a BitTorrent HTTP tracker." +documentation.workspace = true +edition.workspace = true +homepage.workspace = true +keywords = ["api", "bittorrent", "core", "library", "tracker"] +license.workspace = true +name = "bittorrent-http-tracker-core" +publish.workspace = true +readme = "README.md" +repository.workspace = true +rust-version.workspace = true +version.workspace = true + +[dependencies] +aquatic_udp_protocol = "0" +bittorrent-http-protocol = { version = "3.0.0-develop", path = "../http-protocol" } +bittorrent-primitives = "0.1.0" +bittorrent-tracker-core = { version = "3.0.0-develop", path = "../tracker-core" } +futures = "0" +tokio = { version = "1", features = ["macros", "net", "rt-multi-thread", "signal", "sync"] } +torrust-tracker-configuration = { version = "3.0.0-develop", path = "../configuration" } +torrust-tracker-primitives = { version = "3.0.0-develop", path = "../primitives" } +tracing = "0" + +[dev-dependencies] +mockall = "0" +torrust-tracker-test-helpers = { version = "3.0.0-develop", path = "../test-helpers" } diff --git a/packages/http-tracker-core/LICENSE b/packages/http-tracker-core/LICENSE new file mode 100644 index 000000000..0ad25db4b --- /dev/null +++ b/packages/http-tracker-core/LICENSE @@ -0,0 +1,661 @@ + GNU AFFERO GENERAL PUBLIC LICENSE + Version 3, 19 November 2007 + + Copyright (C) 2007 Free Software Foundation, Inc. + Everyone is permitted to copy and distribute verbatim copies + of this license document, but changing it is not allowed. + + Preamble + + The GNU Affero General Public License is a free, copyleft license for +software and other kinds of works, specifically designed to ensure +cooperation with the community in the case of network server software. + + The licenses for most software and other practical works are designed +to take away your freedom to share and change the works. By contrast, +our General Public Licenses are intended to guarantee your freedom to +share and change all versions of a program--to make sure it remains free +software for all its users. + + When we speak of free software, we are referring to freedom, not +price. Our General Public Licenses are designed to make sure that you +have the freedom to distribute copies of free software (and charge for +them if you wish), that you receive source code or can get it if you +want it, that you can change the software or use pieces of it in new +free programs, and that you know you can do these things. + + Developers that use our General Public Licenses protect your rights +with two steps: (1) assert copyright on the software, and (2) offer +you this License which gives you legal permission to copy, distribute +and/or modify the software. + + A secondary benefit of defending all users' freedom is that +improvements made in alternate versions of the program, if they +receive widespread use, become available for other developers to +incorporate. Many developers of free software are heartened and +encouraged by the resulting cooperation. However, in the case of +software used on network servers, this result may fail to come about. +The GNU General Public License permits making a modified version and +letting the public access it on a server without ever releasing its +source code to the public. + + The GNU Affero General Public License is designed specifically to +ensure that, in such cases, the modified source code becomes available +to the community. It requires the operator of a network server to +provide the source code of the modified version running there to the +users of that server. Therefore, public use of a modified version, on +a publicly accessible server, gives the public access to the source +code of the modified version. + + An older license, called the Affero General Public License and +published by Affero, was designed to accomplish similar goals. This is +a different license, not a version of the Affero GPL, but Affero has +released a new version of the Affero GPL which permits relicensing under +this license. + + The precise terms and conditions for copying, distribution and +modification follow. + + TERMS AND CONDITIONS + + 0. Definitions. + + "This License" refers to version 3 of the GNU Affero General Public License. + + "Copyright" also means copyright-like laws that apply to other kinds of +works, such as semiconductor masks. + + "The Program" refers to any copyrightable work licensed under this +License. Each licensee is addressed as "you". "Licensees" and +"recipients" may be individuals or organizations. + + To "modify" a work means to copy from or adapt all or part of the work +in a fashion requiring copyright permission, other than the making of an +exact copy. The resulting work is called a "modified version" of the +earlier work or a work "based on" the earlier work. + + A "covered work" means either the unmodified Program or a work based +on the Program. + + To "propagate" a work means to do anything with it that, without +permission, would make you directly or secondarily liable for +infringement under applicable copyright law, except executing it on a +computer or modifying a private copy. Propagation includes copying, +distribution (with or without modification), making available to the +public, and in some countries other activities as well. + + To "convey" a work means any kind of propagation that enables other +parties to make or receive copies. Mere interaction with a user through +a computer network, with no transfer of a copy, is not conveying. + + An interactive user interface displays "Appropriate Legal Notices" +to the extent that it includes a convenient and prominently visible +feature that (1) displays an appropriate copyright notice, and (2) +tells the user that there is no warranty for the work (except to the +extent that warranties are provided), that licensees may convey the +work under this License, and how to view a copy of this License. If +the interface presents a list of user commands or options, such as a +menu, a prominent item in the list meets this criterion. + + 1. Source Code. + + The "source code" for a work means the preferred form of the work +for making modifications to it. "Object code" means any non-source +form of a work. + + A "Standard Interface" means an interface that either is an official +standard defined by a recognized standards body, or, in the case of +interfaces specified for a particular programming language, one that +is widely used among developers working in that language. + + The "System Libraries" of an executable work include anything, other +than the work as a whole, that (a) is included in the normal form of +packaging a Major Component, but which is not part of that Major +Component, and (b) serves only to enable use of the work with that +Major Component, or to implement a Standard Interface for which an +implementation is available to the public in source code form. A +"Major Component", in this context, means a major essential component +(kernel, window system, and so on) of the specific operating system +(if any) on which the executable work runs, or a compiler used to +produce the work, or an object code interpreter used to run it. + + The "Corresponding Source" for a work in object code form means all +the source code needed to generate, install, and (for an executable +work) run the object code and to modify the work, including scripts to +control those activities. However, it does not include the work's +System Libraries, or general-purpose tools or generally available free +programs which are used unmodified in performing those activities but +which are not part of the work. For example, Corresponding Source +includes interface definition files associated with source files for +the work, and the source code for shared libraries and dynamically +linked subprograms that the work is specifically designed to require, +such as by intimate data communication or control flow between those +subprograms and other parts of the work. + + The Corresponding Source need not include anything that users +can regenerate automatically from other parts of the Corresponding +Source. + + The Corresponding Source for a work in source code form is that +same work. + + 2. Basic Permissions. + + All rights granted under this License are granted for the term of +copyright on the Program, and are irrevocable provided the stated +conditions are met. This License explicitly affirms your unlimited +permission to run the unmodified Program. The output from running a +covered work is covered by this License only if the output, given its +content, constitutes a covered work. This License acknowledges your +rights of fair use or other equivalent, as provided by copyright law. + + You may make, run and propagate covered works that you do not +convey, without conditions so long as your license otherwise remains +in force. You may convey covered works to others for the sole purpose +of having them make modifications exclusively for you, or provide you +with facilities for running those works, provided that you comply with +the terms of this License in conveying all material for which you do +not control copyright. Those thus making or running the covered works +for you must do so exclusively on your behalf, under your direction +and control, on terms that prohibit them from making any copies of +your copyrighted material outside their relationship with you. + + Conveying under any other circumstances is permitted solely under +the conditions stated below. Sublicensing is not allowed; section 10 +makes it unnecessary. + + 3. Protecting Users' Legal Rights From Anti-Circumvention Law. + + No covered work shall be deemed part of an effective technological +measure under any applicable law fulfilling obligations under article +11 of the WIPO copyright treaty adopted on 20 December 1996, or +similar laws prohibiting or restricting circumvention of such +measures. + + When you convey a covered work, you waive any legal power to forbid +circumvention of technological measures to the extent such circumvention +is effected by exercising rights under this License with respect to +the covered work, and you disclaim any intention to limit operation or +modification of the work as a means of enforcing, against the work's +users, your or third parties' legal rights to forbid circumvention of +technological measures. + + 4. Conveying Verbatim Copies. + + You may convey verbatim copies of the Program's source code as you +receive it, in any medium, provided that you conspicuously and +appropriately publish on each copy an appropriate copyright notice; +keep intact all notices stating that this License and any +non-permissive terms added in accord with section 7 apply to the code; +keep intact all notices of the absence of any warranty; and give all +recipients a copy of this License along with the Program. + + You may charge any price or no price for each copy that you convey, +and you may offer support or warranty protection for a fee. + + 5. Conveying Modified Source Versions. + + You may convey a work based on the Program, or the modifications to +produce it from the Program, in the form of source code under the +terms of section 4, provided that you also meet all of these conditions: + + a) The work must carry prominent notices stating that you modified + it, and giving a relevant date. + + b) The work must carry prominent notices stating that it is + released under this License and any conditions added under section + 7. This requirement modifies the requirement in section 4 to + "keep intact all notices". + + c) You must license the entire work, as a whole, under this + License to anyone who comes into possession of a copy. This + License will therefore apply, along with any applicable section 7 + additional terms, to the whole of the work, and all its parts, + regardless of how they are packaged. This License gives no + permission to license the work in any other way, but it does not + invalidate such permission if you have separately received it. + + d) If the work has interactive user interfaces, each must display + Appropriate Legal Notices; however, if the Program has interactive + interfaces that do not display Appropriate Legal Notices, your + work need not make them do so. + + A compilation of a covered work with other separate and independent +works, which are not by their nature extensions of the covered work, +and which are not combined with it such as to form a larger program, +in or on a volume of a storage or distribution medium, is called an +"aggregate" if the compilation and its resulting copyright are not +used to limit the access or legal rights of the compilation's users +beyond what the individual works permit. Inclusion of a covered work +in an aggregate does not cause this License to apply to the other +parts of the aggregate. + + 6. Conveying Non-Source Forms. + + You may convey a covered work in object code form under the terms +of sections 4 and 5, provided that you also convey the +machine-readable Corresponding Source under the terms of this License, +in one of these ways: + + a) Convey the object code in, or embodied in, a physical product + (including a physical distribution medium), accompanied by the + Corresponding Source fixed on a durable physical medium + customarily used for software interchange. + + b) Convey the object code in, or embodied in, a physical product + (including a physical distribution medium), accompanied by a + written offer, valid for at least three years and valid for as + long as you offer spare parts or customer support for that product + model, to give anyone who possesses the object code either (1) a + copy of the Corresponding Source for all the software in the + product that is covered by this License, on a durable physical + medium customarily used for software interchange, for a price no + more than your reasonable cost of physically performing this + conveying of source, or (2) access to copy the + Corresponding Source from a network server at no charge. + + c) Convey individual copies of the object code with a copy of the + written offer to provide the Corresponding Source. This + alternative is allowed only occasionally and noncommercially, and + only if you received the object code with such an offer, in accord + with subsection 6b. + + d) Convey the object code by offering access from a designated + place (gratis or for a charge), and offer equivalent access to the + Corresponding Source in the same way through the same place at no + further charge. You need not require recipients to copy the + Corresponding Source along with the object code. If the place to + copy the object code is a network server, the Corresponding Source + may be on a different server (operated by you or a third party) + that supports equivalent copying facilities, provided you maintain + clear directions next to the object code saying where to find the + Corresponding Source. Regardless of what server hosts the + Corresponding Source, you remain obligated to ensure that it is + available for as long as needed to satisfy these requirements. + + e) Convey the object code using peer-to-peer transmission, provided + you inform other peers where the object code and Corresponding + Source of the work are being offered to the general public at no + charge under subsection 6d. + + A separable portion of the object code, whose source code is excluded +from the Corresponding Source as a System Library, need not be +included in conveying the object code work. + + A "User Product" is either (1) a "consumer product", which means any +tangible personal property which is normally used for personal, family, +or household purposes, or (2) anything designed or sold for incorporation +into a dwelling. In determining whether a product is a consumer product, +doubtful cases shall be resolved in favor of coverage. For a particular +product received by a particular user, "normally used" refers to a +typical or common use of that class of product, regardless of the status +of the particular user or of the way in which the particular user +actually uses, or expects or is expected to use, the product. A product +is a consumer product regardless of whether the product has substantial +commercial, industrial or non-consumer uses, unless such uses represent +the only significant mode of use of the product. + + "Installation Information" for a User Product means any methods, +procedures, authorization keys, or other information required to install +and execute modified versions of a covered work in that User Product from +a modified version of its Corresponding Source. The information must +suffice to ensure that the continued functioning of the modified object +code is in no case prevented or interfered with solely because +modification has been made. + + If you convey an object code work under this section in, or with, or +specifically for use in, a User Product, and the conveying occurs as +part of a transaction in which the right of possession and use of the +User Product is transferred to the recipient in perpetuity or for a +fixed term (regardless of how the transaction is characterized), the +Corresponding Source conveyed under this section must be accompanied +by the Installation Information. But this requirement does not apply +if neither you nor any third party retains the ability to install +modified object code on the User Product (for example, the work has +been installed in ROM). + + The requirement to provide Installation Information does not include a +requirement to continue to provide support service, warranty, or updates +for a work that has been modified or installed by the recipient, or for +the User Product in which it has been modified or installed. Access to a +network may be denied when the modification itself materially and +adversely affects the operation of the network or violates the rules and +protocols for communication across the network. + + Corresponding Source conveyed, and Installation Information provided, +in accord with this section must be in a format that is publicly +documented (and with an implementation available to the public in +source code form), and must require no special password or key for +unpacking, reading or copying. + + 7. Additional Terms. + + "Additional permissions" are terms that supplement the terms of this +License by making exceptions from one or more of its conditions. +Additional permissions that are applicable to the entire Program shall +be treated as though they were included in this License, to the extent +that they are valid under applicable law. If additional permissions +apply only to part of the Program, that part may be used separately +under those permissions, but the entire Program remains governed by +this License without regard to the additional permissions. + + When you convey a copy of a covered work, you may at your option +remove any additional permissions from that copy, or from any part of +it. (Additional permissions may be written to require their own +removal in certain cases when you modify the work.) You may place +additional permissions on material, added by you to a covered work, +for which you have or can give appropriate copyright permission. + + Notwithstanding any other provision of this License, for material you +add to a covered work, you may (if authorized by the copyright holders of +that material) supplement the terms of this License with terms: + + a) Disclaiming warranty or limiting liability differently from the + terms of sections 15 and 16 of this License; or + + b) Requiring preservation of specified reasonable legal notices or + author attributions in that material or in the Appropriate Legal + Notices displayed by works containing it; or + + c) Prohibiting misrepresentation of the origin of that material, or + requiring that modified versions of such material be marked in + reasonable ways as different from the original version; or + + d) Limiting the use for publicity purposes of names of licensors or + authors of the material; or + + e) Declining to grant rights under trademark law for use of some + trade names, trademarks, or service marks; or + + f) Requiring indemnification of licensors and authors of that + material by anyone who conveys the material (or modified versions of + it) with contractual assumptions of liability to the recipient, for + any liability that these contractual assumptions directly impose on + those licensors and authors. + + All other non-permissive additional terms are considered "further +restrictions" within the meaning of section 10. If the Program as you +received it, or any part of it, contains a notice stating that it is +governed by this License along with a term that is a further +restriction, you may remove that term. If a license document contains +a further restriction but permits relicensing or conveying under this +License, you may add to a covered work material governed by the terms +of that license document, provided that the further restriction does +not survive such relicensing or conveying. + + If you add terms to a covered work in accord with this section, you +must place, in the relevant source files, a statement of the +additional terms that apply to those files, or a notice indicating +where to find the applicable terms. + + Additional terms, permissive or non-permissive, may be stated in the +form of a separately written license, or stated as exceptions; +the above requirements apply either way. + + 8. Termination. + + You may not propagate or modify a covered work except as expressly +provided under this License. Any attempt otherwise to propagate or +modify it is void, and will automatically terminate your rights under +this License (including any patent licenses granted under the third +paragraph of section 11). + + However, if you cease all violation of this License, then your +license from a particular copyright holder is reinstated (a) +provisionally, unless and until the copyright holder explicitly and +finally terminates your license, and (b) permanently, if the copyright +holder fails to notify you of the violation by some reasonable means +prior to 60 days after the cessation. + + Moreover, your license from a particular copyright holder is +reinstated permanently if the copyright holder notifies you of the +violation by some reasonable means, this is the first time you have +received notice of violation of this License (for any work) from that +copyright holder, and you cure the violation prior to 30 days after +your receipt of the notice. + + Termination of your rights under this section does not terminate the +licenses of parties who have received copies or rights from you under +this License. If your rights have been terminated and not permanently +reinstated, you do not qualify to receive new licenses for the same +material under section 10. + + 9. Acceptance Not Required for Having Copies. + + You are not required to accept this License in order to receive or +run a copy of the Program. Ancillary propagation of a covered work +occurring solely as a consequence of using peer-to-peer transmission +to receive a copy likewise does not require acceptance. However, +nothing other than this License grants you permission to propagate or +modify any covered work. These actions infringe copyright if you do +not accept this License. Therefore, by modifying or propagating a +covered work, you indicate your acceptance of this License to do so. + + 10. Automatic Licensing of Downstream Recipients. + + Each time you convey a covered work, the recipient automatically +receives a license from the original licensors, to run, modify and +propagate that work, subject to this License. You are not responsible +for enforcing compliance by third parties with this License. + + An "entity transaction" is a transaction transferring control of an +organization, or substantially all assets of one, or subdividing an +organization, or merging organizations. If propagation of a covered +work results from an entity transaction, each party to that +transaction who receives a copy of the work also receives whatever +licenses to the work the party's predecessor in interest had or could +give under the previous paragraph, plus a right to possession of the +Corresponding Source of the work from the predecessor in interest, if +the predecessor has it or can get it with reasonable efforts. + + You may not impose any further restrictions on the exercise of the +rights granted or affirmed under this License. For example, you may +not impose a license fee, royalty, or other charge for exercise of +rights granted under this License, and you may not initiate litigation +(including a cross-claim or counterclaim in a lawsuit) alleging that +any patent claim is infringed by making, using, selling, offering for +sale, or importing the Program or any portion of it. + + 11. Patents. + + A "contributor" is a copyright holder who authorizes use under this +License of the Program or a work on which the Program is based. The +work thus licensed is called the contributor's "contributor version". + + A contributor's "essential patent claims" are all patent claims +owned or controlled by the contributor, whether already acquired or +hereafter acquired, that would be infringed by some manner, permitted +by this License, of making, using, or selling its contributor version, +but do not include claims that would be infringed only as a +consequence of further modification of the contributor version. For +purposes of this definition, "control" includes the right to grant +patent sublicenses in a manner consistent with the requirements of +this License. + + Each contributor grants you a non-exclusive, worldwide, royalty-free +patent license under the contributor's essential patent claims, to +make, use, sell, offer for sale, import and otherwise run, modify and +propagate the contents of its contributor version. + + In the following three paragraphs, a "patent license" is any express +agreement or commitment, however denominated, not to enforce a patent +(such as an express permission to practice a patent or covenant not to +sue for patent infringement). To "grant" such a patent license to a +party means to make such an agreement or commitment not to enforce a +patent against the party. + + If you convey a covered work, knowingly relying on a patent license, +and the Corresponding Source of the work is not available for anyone +to copy, free of charge and under the terms of this License, through a +publicly available network server or other readily accessible means, +then you must either (1) cause the Corresponding Source to be so +available, or (2) arrange to deprive yourself of the benefit of the +patent license for this particular work, or (3) arrange, in a manner +consistent with the requirements of this License, to extend the patent +license to downstream recipients. "Knowingly relying" means you have +actual knowledge that, but for the patent license, your conveying the +covered work in a country, or your recipient's use of the covered work +in a country, would infringe one or more identifiable patents in that +country that you have reason to believe are valid. + + If, pursuant to or in connection with a single transaction or +arrangement, you convey, or propagate by procuring conveyance of, a +covered work, and grant a patent license to some of the parties +receiving the covered work authorizing them to use, propagate, modify +or convey a specific copy of the covered work, then the patent license +you grant is automatically extended to all recipients of the covered +work and works based on it. + + A patent license is "discriminatory" if it does not include within +the scope of its coverage, prohibits the exercise of, or is +conditioned on the non-exercise of one or more of the rights that are +specifically granted under this License. You may not convey a covered +work if you are a party to an arrangement with a third party that is +in the business of distributing software, under which you make payment +to the third party based on the extent of your activity of conveying +the work, and under which the third party grants, to any of the +parties who would receive the covered work from you, a discriminatory +patent license (a) in connection with copies of the covered work +conveyed by you (or copies made from those copies), or (b) primarily +for and in connection with specific products or compilations that +contain the covered work, unless you entered into that arrangement, +or that patent license was granted, prior to 28 March 2007. + + Nothing in this License shall be construed as excluding or limiting +any implied license or other defenses to infringement that may +otherwise be available to you under applicable patent law. + + 12. No Surrender of Others' Freedom. + + If conditions are imposed on you (whether by court order, agreement or +otherwise) that contradict the conditions of this License, they do not +excuse you from the conditions of this License. If you cannot convey a +covered work so as to satisfy simultaneously your obligations under this +License and any other pertinent obligations, then as a consequence you may +not convey it at all. For example, if you agree to terms that obligate you +to collect a royalty for further conveying from those to whom you convey +the Program, the only way you could satisfy both those terms and this +License would be to refrain entirely from conveying the Program. + + 13. Remote Network Interaction; Use with the GNU General Public License. + + Notwithstanding any other provision of this License, if you modify the +Program, your modified version must prominently offer all users +interacting with it remotely through a computer network (if your version +supports such interaction) an opportunity to receive the Corresponding +Source of your version by providing access to the Corresponding Source +from a network server at no charge, through some standard or customary +means of facilitating copying of software. This Corresponding Source +shall include the Corresponding Source for any work covered by version 3 +of the GNU General Public License that is incorporated pursuant to the +following paragraph. + + Notwithstanding any other provision of this License, you have +permission to link or combine any covered work with a work licensed +under version 3 of the GNU General Public License into a single +combined work, and to convey the resulting work. The terms of this +License will continue to apply to the part which is the covered work, +but the work with which it is combined will remain governed by version +3 of the GNU General Public License. + + 14. Revised Versions of this License. + + The Free Software Foundation may publish revised and/or new versions of +the GNU Affero General Public License from time to time. Such new versions +will be similar in spirit to the present version, but may differ in detail to +address new problems or concerns. + + Each version is given a distinguishing version number. If the +Program specifies that a certain numbered version of the GNU Affero General +Public License "or any later version" applies to it, you have the +option of following the terms and conditions either of that numbered +version or of any later version published by the Free Software +Foundation. If the Program does not specify a version number of the +GNU Affero General Public License, you may choose any version ever published +by the Free Software Foundation. + + If the Program specifies that a proxy can decide which future +versions of the GNU Affero General Public License can be used, that proxy's +public statement of acceptance of a version permanently authorizes you +to choose that version for the Program. + + Later license versions may give you additional or different +permissions. However, no additional obligations are imposed on any +author or copyright holder as a result of your choosing to follow a +later version. + + 15. Disclaimer of Warranty. + + THERE IS NO WARRANTY FOR THE PROGRAM, TO THE EXTENT PERMITTED BY +APPLICABLE LAW. EXCEPT WHEN OTHERWISE STATED IN WRITING THE COPYRIGHT +HOLDERS AND/OR OTHER PARTIES PROVIDE THE PROGRAM "AS IS" WITHOUT WARRANTY +OF ANY KIND, EITHER EXPRESSED OR IMPLIED, INCLUDING, BUT NOT LIMITED TO, +THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR +PURPOSE. THE ENTIRE RISK AS TO THE QUALITY AND PERFORMANCE OF THE PROGRAM +IS WITH YOU. SHOULD THE PROGRAM PROVE DEFECTIVE, YOU ASSUME THE COST OF +ALL NECESSARY SERVICING, REPAIR OR CORRECTION. + + 16. Limitation of Liability. + + IN NO EVENT UNLESS REQUIRED BY APPLICABLE LAW OR AGREED TO IN WRITING +WILL ANY COPYRIGHT HOLDER, OR ANY OTHER PARTY WHO MODIFIES AND/OR CONVEYS +THE PROGRAM AS PERMITTED ABOVE, BE LIABLE TO YOU FOR DAMAGES, INCLUDING ANY +GENERAL, SPECIAL, INCIDENTAL OR CONSEQUENTIAL DAMAGES ARISING OUT OF THE +USE OR INABILITY TO USE THE PROGRAM (INCLUDING BUT NOT LIMITED TO LOSS OF +DATA OR DATA BEING RENDERED INACCURATE OR LOSSES SUSTAINED BY YOU OR THIRD +PARTIES OR A FAILURE OF THE PROGRAM TO OPERATE WITH ANY OTHER PROGRAMS), +EVEN IF SUCH HOLDER OR OTHER PARTY HAS BEEN ADVISED OF THE POSSIBILITY OF +SUCH DAMAGES. + + 17. Interpretation of Sections 15 and 16. + + If the disclaimer of warranty and limitation of liability provided +above cannot be given local legal effect according to their terms, +reviewing courts shall apply local law that most closely approximates +an absolute waiver of all civil liability in connection with the +Program, unless a warranty or assumption of liability accompanies a +copy of the Program in return for a fee. + + END OF TERMS AND CONDITIONS + + How to Apply These Terms to Your New Programs + + If you develop a new program, and you want it to be of the greatest +possible use to the public, the best way to achieve this is to make it +free software which everyone can redistribute and change under these terms. + + To do so, attach the following notices to the program. It is safest +to attach them to the start of each source file to most effectively +state the exclusion of warranty; and each file should have at least +the "copyright" line and a pointer to where the full notice is found. + + + Copyright (C) + + This program is free software: you can redistribute it and/or modify + it under the terms of the GNU Affero General Public License as published + by the Free Software Foundation, either version 3 of the License, or + (at your option) any later version. + + This program is distributed in the hope that it will be useful, + but WITHOUT ANY WARRANTY; without even the implied warranty of + MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + GNU Affero General Public License for more details. + + You should have received a copy of the GNU Affero General Public License + along with this program. If not, see . + +Also add information on how to contact you by electronic and paper mail. + + If your software can interact with users remotely through a computer +network, you should also make sure that it provides a way for users to +get its source. For example, if your program is a web application, its +interface could display a "Source" link that leads users to an archive +of the code. There are many ways you could offer source, and different +solutions will be better for different programs; see section 13 for the +specific requirements. + + You should also get your employer (if you work as a programmer) or school, +if any, to sign a "copyright disclaimer" for the program, if necessary. +For more information on this, and how to apply and follow the GNU AGPL, see +. diff --git a/packages/http-tracker-core/README.md b/packages/http-tracker-core/README.md new file mode 100644 index 000000000..0dd915c24 --- /dev/null +++ b/packages/http-tracker-core/README.md @@ -0,0 +1,15 @@ +# BitTorrent HTTP Tracker Core library + +A library with the core functionality needed to implement a BitTorrent HTTP tracker. + +You usually don’t need to use this library directly. Instead, you should use the [Torrust Tracker](https://github.com/torrust/torrust-tracker). If you want to build your own tracker, you can use this library as the core functionality. + +> **Disclaimer**: This library is actively under development. We’re currently extracting and refining common types from the[Torrust Tracker](https://github.com/torrust/torrust-tracker) to make them available to the BitTorrent community in Rust. While these types are functional, they are not yet ready for use in production or third-party projects. + +## Documentation + +[Crate documentation](https://docs.rs/bittorrent-http-tracker-core). + +## License + +The project is licensed under the terms of the [GNU AFFERO GENERAL PUBLIC LICENSE](./LICENSE). diff --git a/packages/http-tracker-core/src/lib.rs b/packages/http-tracker-core/src/lib.rs new file mode 100644 index 000000000..cb5306aa6 --- /dev/null +++ b/packages/http-tracker-core/src/lib.rs @@ -0,0 +1,17 @@ +pub mod services; +pub mod statistics; + +#[cfg(test)] +pub(crate) mod tests { + use bittorrent_primitives::info_hash::InfoHash; + + /// # Panics + /// + /// Will panic if the string representation of the info hash is not a valid info hash. + #[must_use] + pub fn sample_info_hash() -> InfoHash { + "3b245504cf5f11bbdbe1201cea6a6bf45aee1bc0" // DevSkim: ignore DS173237 + .parse::() + .expect("String should be a valid info hash") + } +} diff --git a/src/packages/http_tracker_core/services/announce.rs b/packages/http-tracker-core/src/services/announce.rs similarity index 89% rename from src/packages/http_tracker_core/services/announce.rs rename to packages/http-tracker-core/src/services/announce.rs index 6c9cbec17..aff1fc1bd 100644 --- a/src/packages/http_tracker_core/services/announce.rs +++ b/packages/http-tracker-core/src/services/announce.rs @@ -21,7 +21,7 @@ use bittorrent_tracker_core::whitelist; use torrust_tracker_configuration::Core; use torrust_tracker_primitives::core::AnnounceData; -use crate::packages::http_tracker_core; +use crate::statistics; /// The HTTP tracker `announce` service. /// @@ -46,7 +46,7 @@ pub async fn handle_announce( announce_handler: &Arc, authentication_service: &Arc, whitelist_authorization: &Arc, - opt_http_stats_event_sender: &Arc>>, + opt_http_stats_event_sender: &Arc>>, announce_request: &Announce, client_ip_sources: &ClientIpSources, maybe_key: Option, @@ -95,12 +95,12 @@ pub async fn handle_announce( match original_peer_ip { IpAddr::V4(_) => { http_stats_event_sender - .send_event(http_tracker_core::statistics::event::Event::Tcp4Announce) + .send_event(statistics::event::Event::Tcp4Announce) .await; } IpAddr::V6(_) => { http_stats_event_sender - .send_event(http_tracker_core::statistics::event::Event::Tcp6Announce) + .send_event(statistics::event::Event::Tcp6Announce) .await; } } @@ -138,7 +138,7 @@ mod tests { } struct CoreHttpTrackerServices { - pub http_stats_event_sender: Arc>>, + pub http_stats_event_sender: Arc>>, } fn initialize_core_tracker_services() -> (CoreTrackerServices, CoreHttpTrackerServices) { @@ -163,8 +163,7 @@ mod tests { )); // HTTP stats - let (http_stats_event_sender, http_stats_repository) = - http_tracker_core::statistics::setup::factory(config.core.tracker_usage_statistics); + let (http_stats_event_sender, http_stats_repository) = statistics::setup::factory(config.core.tracker_usage_statistics); let http_stats_event_sender = Arc::new(http_stats_event_sender); let _http_stats_repository = Arc::new(http_stats_repository); @@ -229,13 +228,13 @@ mod tests { use mockall::mock; use tokio::sync::mpsc::error::SendError; - use crate::packages::http_tracker_core; - use crate::servers::http::test_helpers::tests::sample_info_hash; + use crate::statistics; + use crate::tests::sample_info_hash; mock! { HttpStatsEventSender {} - impl http_tracker_core::statistics::event::sender::Sender for HttpStatsEventSender { - fn send_event(&self, event: http_tracker_core::statistics::event::Event) -> BoxFuture<'static,Option > > > ; + impl statistics::event::sender::Sender for HttpStatsEventSender { + fn send_event(&self, event: statistics::event::Event) -> BoxFuture<'static,Option > > > ; } } @@ -252,12 +251,12 @@ mod tests { use torrust_tracker_test_helpers::configuration; use super::{sample_peer_using_ipv4, sample_peer_using_ipv6}; - use crate::packages::http_tracker_core; - use crate::packages::http_tracker_core::services::announce::handle_announce; - use crate::packages::http_tracker_core::services::announce::tests::{ + use crate::services::announce::handle_announce; + use crate::services::announce::tests::{ initialize_core_tracker_services, initialize_core_tracker_services_with_config, sample_announce_request_for_peer, sample_peer, MockHttpStatsEventSender, }; + use crate::statistics; #[tokio::test] async fn it_should_return_the_announce_data() { @@ -298,10 +297,10 @@ mod tests { let mut http_stats_event_sender_mock = MockHttpStatsEventSender::new(); http_stats_event_sender_mock .expect_send_event() - .with(eq(http_tracker_core::statistics::event::Event::Tcp4Announce)) + .with(eq(statistics::event::Event::Tcp4Announce)) .times(1) .returning(|_| Box::pin(future::ready(Some(Ok(()))))); - let http_stats_event_sender: Arc>> = + let http_stats_event_sender: Arc>> = Arc::new(Some(Box::new(http_stats_event_sender_mock))); let (core_tracker_services, mut core_http_tracker_services) = initialize_core_tracker_services(); @@ -349,10 +348,10 @@ mod tests { let mut http_stats_event_sender_mock = MockHttpStatsEventSender::new(); http_stats_event_sender_mock .expect_send_event() - .with(eq(http_tracker_core::statistics::event::Event::Tcp4Announce)) + .with(eq(statistics::event::Event::Tcp4Announce)) .times(1) .returning(|_| Box::pin(future::ready(Some(Ok(()))))); - let http_stats_event_sender: Arc>> = + let http_stats_event_sender: Arc>> = Arc::new(Some(Box::new(http_stats_event_sender_mock))); let (core_tracker_services, mut core_http_tracker_services) = @@ -383,10 +382,10 @@ mod tests { let mut http_stats_event_sender_mock = MockHttpStatsEventSender::new(); http_stats_event_sender_mock .expect_send_event() - .with(eq(http_tracker_core::statistics::event::Event::Tcp6Announce)) + .with(eq(statistics::event::Event::Tcp6Announce)) .times(1) .returning(|_| Box::pin(future::ready(Some(Ok(()))))); - let http_stats_event_sender: Arc>> = + let http_stats_event_sender: Arc>> = Arc::new(Some(Box::new(http_stats_event_sender_mock))); let (core_tracker_services, mut core_http_tracker_services) = initialize_core_tracker_services(); diff --git a/src/packages/http_tracker_core/services/mod.rs b/packages/http-tracker-core/src/services/mod.rs similarity index 100% rename from src/packages/http_tracker_core/services/mod.rs rename to packages/http-tracker-core/src/services/mod.rs diff --git a/src/packages/http_tracker_core/services/scrape.rs b/packages/http-tracker-core/src/services/scrape.rs similarity index 84% rename from src/packages/http_tracker_core/services/scrape.rs rename to packages/http-tracker-core/src/services/scrape.rs index 7e3ea47fd..11011f16b 100644 --- a/src/packages/http_tracker_core/services/scrape.rs +++ b/packages/http-tracker-core/src/services/scrape.rs @@ -20,7 +20,7 @@ use bittorrent_tracker_core::scrape_handler::ScrapeHandler; use torrust_tracker_configuration::Core; use torrust_tracker_primitives::core::ScrapeData; -use crate::packages::http_tracker_core; +use crate::statistics; /// The HTTP tracker `scrape` service. /// @@ -43,7 +43,7 @@ pub async fn handle_scrape( core_config: &Arc, scrape_handler: &Arc, authentication_service: &Arc, - opt_http_stats_event_sender: &Arc>>, + opt_http_stats_event_sender: &Arc>>, scrape_request: &Scrape, client_ip_sources: &ClientIpSources, maybe_key: Option, @@ -70,9 +70,7 @@ pub async fn handle_scrape( }; if return_fake_scrape_data { - return Ok( - http_tracker_core::services::scrape::fake(opt_http_stats_event_sender, &scrape_request.info_hashes, &peer_ip).await, - ); + return Ok(fake(opt_http_stats_event_sender, &scrape_request.info_hashes, &peer_ip).await); } let scrape_data = scrape_handler.scrape(&scrape_request.info_hashes).await?; @@ -89,7 +87,7 @@ pub async fn handle_scrape( /// /// > **NOTICE**: tracker statistics are not updated in this case. pub async fn fake( - opt_http_stats_event_sender: &Arc>>, + opt_http_stats_event_sender: &Arc>>, info_hashes: &Vec, original_peer_ip: &IpAddr, ) -> ScrapeData { @@ -100,19 +98,15 @@ pub async fn fake( async fn send_scrape_event( original_peer_ip: &IpAddr, - opt_http_stats_event_sender: &Arc>>, + opt_http_stats_event_sender: &Arc>>, ) { if let Some(http_stats_event_sender) = opt_http_stats_event_sender.as_deref() { match original_peer_ip { IpAddr::V4(_) => { - http_stats_event_sender - .send_event(http_tracker_core::statistics::event::Event::Tcp4Scrape) - .await; + http_stats_event_sender.send_event(statistics::event::Event::Tcp4Scrape).await; } IpAddr::V6(_) => { - http_stats_event_sender - .send_event(http_tracker_core::statistics::event::Event::Tcp6Scrape) - .await; + http_stats_event_sender.send_event(statistics::event::Event::Tcp6Scrape).await; } } } @@ -142,8 +136,8 @@ mod tests { use torrust_tracker_primitives::{peer, DurationSinceUnixEpoch}; use torrust_tracker_test_helpers::configuration; - use crate::packages::http_tracker_core; - use crate::servers::http::test_helpers::tests::sample_info_hash; + use crate::statistics; + use crate::tests::sample_info_hash; struct Container { announce_handler: Arc, @@ -198,8 +192,8 @@ mod tests { mock! { HttpStatsEventSender {} - impl http_tracker_core::statistics::event::sender::Sender for HttpStatsEventSender { - fn send_event(&self, event: http_tracker_core::statistics::event::Event) -> BoxFuture<'static,Option > > > ; + impl statistics::event::sender::Sender for HttpStatsEventSender { + fn send_event(&self, event: statistics::event::Event) -> BoxFuture<'static,Option > > > ; } } @@ -217,20 +211,19 @@ mod tests { use torrust_tracker_primitives::swarm_metadata::SwarmMetadata; use torrust_tracker_test_helpers::configuration; - use crate::packages::http_tracker_core::services::scrape::handle_scrape; - use crate::packages::http_tracker_core::services::scrape::tests::{ + use crate::services::scrape::handle_scrape; + use crate::services::scrape::tests::{ initialize_services_with_configuration, sample_info_hashes, sample_peer, MockHttpStatsEventSender, }; - use crate::packages::{self, http_tracker_core}; - use crate::servers::http::test_helpers::tests::sample_info_hash; + use crate::statistics; + use crate::tests::sample_info_hash; #[tokio::test] async fn it_should_return_the_scrape_data_for_a_torrent() { let configuration = configuration::ephemeral_public(); let core_config = Arc::new(configuration.core.clone()); - let (http_stats_event_sender, _http_stats_repository) = - packages::http_tracker_core::statistics::setup::factory(false); + let (http_stats_event_sender, _http_stats_repository) = statistics::setup::factory(false); let http_stats_event_sender = Arc::new(http_stats_event_sender); let container = initialize_services_with_configuration(&configuration); @@ -288,10 +281,10 @@ mod tests { let mut http_stats_event_sender_mock = MockHttpStatsEventSender::new(); http_stats_event_sender_mock .expect_send_event() - .with(eq(http_tracker_core::statistics::event::Event::Tcp4Scrape)) + .with(eq(statistics::event::Event::Tcp4Scrape)) .times(1) .returning(|_| Box::pin(future::ready(Some(Ok(()))))); - let http_stats_event_sender: Arc>> = + let http_stats_event_sender: Arc>> = Arc::new(Some(Box::new(http_stats_event_sender_mock))); let container = initialize_services_with_configuration(&config); @@ -327,10 +320,10 @@ mod tests { let mut http_stats_event_sender_mock = MockHttpStatsEventSender::new(); http_stats_event_sender_mock .expect_send_event() - .with(eq(http_tracker_core::statistics::event::Event::Tcp6Scrape)) + .with(eq(statistics::event::Event::Tcp6Scrape)) .times(1) .returning(|_| Box::pin(future::ready(Some(Ok(()))))); - let http_stats_event_sender: Arc>> = + let http_stats_event_sender: Arc>> = Arc::new(Some(Box::new(http_stats_event_sender_mock))); let container = initialize_services_with_configuration(&config); @@ -370,17 +363,16 @@ mod tests { use mockall::predicate::eq; use torrust_tracker_primitives::core::ScrapeData; - use crate::packages::http_tracker_core::services::scrape::fake; - use crate::packages::http_tracker_core::services::scrape::tests::{ + use crate::services::scrape::fake; + use crate::services::scrape::tests::{ initialize_services_for_public_tracker, sample_info_hashes, sample_peer, MockHttpStatsEventSender, }; - use crate::packages::{self, http_tracker_core}; - use crate::servers::http::test_helpers::tests::sample_info_hash; + use crate::statistics; + use crate::tests::sample_info_hash; #[tokio::test] async fn it_should_always_return_the_zeroed_scrape_data_for_a_torrent() { - let (http_stats_event_sender, _http_stats_repository) = - packages::http_tracker_core::statistics::setup::factory(false); + let (http_stats_event_sender, _http_stats_repository) = statistics::setup::factory(false); let http_stats_event_sender = Arc::new(http_stats_event_sender); let container = initialize_services_for_public_tracker(); @@ -409,10 +401,10 @@ mod tests { let mut http_stats_event_sender_mock = MockHttpStatsEventSender::new(); http_stats_event_sender_mock .expect_send_event() - .with(eq(http_tracker_core::statistics::event::Event::Tcp4Scrape)) + .with(eq(statistics::event::Event::Tcp4Scrape)) .times(1) .returning(|_| Box::pin(future::ready(Some(Ok(()))))); - let http_stats_event_sender: Arc>> = + let http_stats_event_sender: Arc>> = Arc::new(Some(Box::new(http_stats_event_sender_mock))); let peer_ip = IpAddr::V4(Ipv4Addr::new(126, 0, 0, 1)); @@ -425,10 +417,10 @@ mod tests { let mut http_stats_event_sender_mock = MockHttpStatsEventSender::new(); http_stats_event_sender_mock .expect_send_event() - .with(eq(http_tracker_core::statistics::event::Event::Tcp6Scrape)) + .with(eq(statistics::event::Event::Tcp6Scrape)) .times(1) .returning(|_| Box::pin(future::ready(Some(Ok(()))))); - let http_stats_event_sender: Arc>> = + let http_stats_event_sender: Arc>> = Arc::new(Some(Box::new(http_stats_event_sender_mock))); let peer_ip = IpAddr::V6(Ipv6Addr::new(0x6969, 0x6969, 0x6969, 0x6969, 0x6969, 0x6969, 0x6969, 0x6969)); diff --git a/src/packages/http_tracker_core/statistics/event/handler.rs b/packages/http-tracker-core/src/statistics/event/handler.rs similarity index 90% rename from src/packages/http_tracker_core/statistics/event/handler.rs rename to packages/http-tracker-core/src/statistics/event/handler.rs index caaf5d375..af323d06b 100644 --- a/src/packages/http_tracker_core/statistics/event/handler.rs +++ b/packages/http-tracker-core/src/statistics/event/handler.rs @@ -1,5 +1,5 @@ -use crate::packages::http_tracker_core::statistics::event::Event; -use crate::packages::http_tracker_core::statistics::repository::Repository; +use crate::statistics::event::Event; +use crate::statistics::repository::Repository; pub async fn handle_event(event: Event, stats_repository: &Repository) { match event { @@ -29,9 +29,9 @@ pub async fn handle_event(event: Event, stats_repository: &Repository) { #[cfg(test)] mod tests { - use crate::packages::http_tracker_core::statistics::event::handler::handle_event; - use crate::packages::http_tracker_core::statistics::event::Event; - use crate::packages::http_tracker_core::statistics::repository::Repository; + use crate::statistics::event::handler::handle_event; + use crate::statistics::event::Event; + use crate::statistics::repository::Repository; #[tokio::test] async fn should_increase_the_tcp4_announces_counter_when_it_receives_a_tcp4_announce_event() { diff --git a/src/packages/http_tracker_core/statistics/event/listener.rs b/packages/http-tracker-core/src/statistics/event/listener.rs similarity index 79% rename from src/packages/http_tracker_core/statistics/event/listener.rs rename to packages/http-tracker-core/src/statistics/event/listener.rs index ed574a36b..f1a2e25de 100644 --- a/src/packages/http_tracker_core/statistics/event/listener.rs +++ b/packages/http-tracker-core/src/statistics/event/listener.rs @@ -2,7 +2,7 @@ use tokio::sync::mpsc; use super::handler::handle_event; use super::Event; -use crate::packages::http_tracker_core::statistics::repository::Repository; +use crate::statistics::repository::Repository; pub async fn dispatch_events(mut receiver: mpsc::Receiver, stats_repository: Repository) { while let Some(event) = receiver.recv().await { diff --git a/src/packages/http_tracker_core/statistics/event/mod.rs b/packages/http-tracker-core/src/statistics/event/mod.rs similarity index 100% rename from src/packages/http_tracker_core/statistics/event/mod.rs rename to packages/http-tracker-core/src/statistics/event/mod.rs diff --git a/src/packages/http_tracker_core/statistics/event/sender.rs b/packages/http-tracker-core/src/statistics/event/sender.rs similarity index 79% rename from src/packages/http_tracker_core/statistics/event/sender.rs rename to packages/http-tracker-core/src/statistics/event/sender.rs index 279d50962..ca4b4e210 100644 --- a/src/packages/http_tracker_core/statistics/event/sender.rs +++ b/packages/http-tracker-core/src/statistics/event/sender.rs @@ -13,10 +13,10 @@ pub trait Sender: Sync + Send { fn send_event(&self, event: Event) -> BoxFuture<'_, Option>>>; } -/// An [`statistics::EventSender`](crate::packages::http_tracker_core::statistics::event::sender::Sender) implementation. +/// An [`statistics::EventSender`](crate::statistics::event::sender::Sender) implementation. /// /// It uses a channel sender to send the statistic events. The channel is created by a -/// [`statistics::Keeper`](crate::packages::http_tracker_core::statistics::keeper::Keeper) +/// [`statistics::Keeper`](crate::statistics::keeper::Keeper) #[allow(clippy::module_name_repetitions)] pub struct ChannelSender { pub(crate) sender: mpsc::Sender, diff --git a/src/packages/http_tracker_core/statistics/keeper.rs b/packages/http-tracker-core/src/statistics/keeper.rs similarity index 90% rename from src/packages/http_tracker_core/statistics/keeper.rs rename to packages/http-tracker-core/src/statistics/keeper.rs index 01ae5e6b3..ae5c3276e 100644 --- a/src/packages/http_tracker_core/statistics/keeper.rs +++ b/packages/http-tracker-core/src/statistics/keeper.rs @@ -51,9 +51,9 @@ impl Keeper { #[cfg(test)] mod tests { - use crate::packages::http_tracker_core::statistics::event::Event; - use crate::packages::http_tracker_core::statistics::keeper::Keeper; - use crate::packages::http_tracker_core::statistics::metrics::Metrics; + use crate::statistics::event::Event; + use crate::statistics::keeper::Keeper; + use crate::statistics::metrics::Metrics; #[tokio::test] async fn should_contain_the_tracker_statistics() { diff --git a/src/packages/http_tracker_core/statistics/metrics.rs b/packages/http-tracker-core/src/statistics/metrics.rs similarity index 100% rename from src/packages/http_tracker_core/statistics/metrics.rs rename to packages/http-tracker-core/src/statistics/metrics.rs diff --git a/src/packages/http_tracker_core/statistics/mod.rs b/packages/http-tracker-core/src/statistics/mod.rs similarity index 100% rename from src/packages/http_tracker_core/statistics/mod.rs rename to packages/http-tracker-core/src/statistics/mod.rs diff --git a/src/packages/http_tracker_core/statistics/repository.rs b/packages/http-tracker-core/src/statistics/repository.rs similarity index 100% rename from src/packages/http_tracker_core/statistics/repository.rs rename to packages/http-tracker-core/src/statistics/repository.rs diff --git a/src/packages/http_tracker_core/statistics/services.rs b/packages/http-tracker-core/src/statistics/services.rs similarity index 80% rename from src/packages/http_tracker_core/statistics/services.rs rename to packages/http-tracker-core/src/statistics/services.rs index 51065bf63..57806677e 100644 --- a/src/packages/http_tracker_core/statistics/services.rs +++ b/packages/http-tracker-core/src/statistics/services.rs @@ -2,14 +2,14 @@ //! //! It includes: //! -//! - A [`factory`](crate::packages::http_tracker_core::statistics::setup::factory) function to build the structs needed to collect the tracker metrics. -//! - A [`get_metrics`] service to get the tracker [`metrics`](crate::packages::http_tracker_core::statistics::metrics::Metrics). +//! - A [`factory`](crate::statistics::setup::factory) function to build the structs needed to collect the tracker metrics. +//! - A [`get_metrics`] service to get the tracker [`metrics`](crate::statistics::metrics::Metrics). //! //! Tracker metrics are collected using a Publisher-Subscribe pattern. //! //! The factory function builds two structs: //! -//! - An statistics event [`Sender`](crate::packages::http_tracker_core::statistics::event::sender::Sender) +//! - An statistics event [`Sender`](crate::statistics::event::sender::Sender) //! - An statistics [`Repository`] //! //! ```text @@ -23,11 +23,10 @@ use std::sync::Arc; use bittorrent_tracker_core::torrent::repository::in_memory::InMemoryTorrentRepository; -use packages::http_tracker_core::statistics::metrics::Metrics; -use packages::http_tracker_core::statistics::repository::Repository; use torrust_tracker_primitives::torrent_metrics::TorrentsMetrics; -use crate::packages; +use crate::statistics::metrics::Metrics; +use crate::statistics::repository::Repository; /// All the metrics collected by the tracker. #[derive(Debug, PartialEq)] @@ -76,8 +75,8 @@ mod tests { use torrust_tracker_primitives::torrent_metrics::TorrentsMetrics; use torrust_tracker_test_helpers::configuration; - use crate::packages::http_tracker_core::statistics::services::{get_metrics, TrackerMetrics}; - use crate::packages::http_tracker_core::{self, statistics}; + use crate::statistics; + use crate::statistics::services::{get_metrics, TrackerMetrics}; pub fn tracker_configuration() -> Configuration { configuration::ephemeral() @@ -89,8 +88,7 @@ mod tests { let in_memory_torrent_repository = Arc::new(InMemoryTorrentRepository::default()); - let (_http_stats_event_sender, http_stats_repository) = - http_tracker_core::statistics::setup::factory(config.core.tracker_usage_statistics); + let (_http_stats_event_sender, http_stats_repository) = statistics::setup::factory(config.core.tracker_usage_statistics); let http_stats_repository = Arc::new(http_stats_repository); let tracker_metrics = get_metrics(in_memory_torrent_repository.clone(), http_stats_repository.clone()).await; diff --git a/src/packages/http_tracker_core/statistics/setup.rs b/packages/http-tracker-core/src/statistics/setup.rs similarity index 79% rename from src/packages/http_tracker_core/statistics/setup.rs rename to packages/http-tracker-core/src/statistics/setup.rs index 009f157d5..d3114a75e 100644 --- a/src/packages/http_tracker_core/statistics/setup.rs +++ b/packages/http-tracker-core/src/statistics/setup.rs @@ -1,14 +1,14 @@ //! Setup for the tracker statistics. //! //! The [`factory`] function builds the structs needed for handling the tracker metrics. -use crate::packages::http_tracker_core::statistics; +use crate::statistics; /// It builds the structs needed for handling the tracker metrics. /// /// It returns: /// -/// - An statistics event [`Sender`](crate::packages::http_tracker_core::statistics::event::sender::Sender) that allows you to send events related to statistics. -/// - An statistics [`Repository`](crate::packages::http_tracker_core::statistics::repository::Repository) which is an in-memory repository for the tracker metrics. +/// - An statistics event [`Sender`](crate::statistics::event::sender::Sender) that allows you to send events related to statistics. +/// - An statistics [`Repository`](crate::statistics::repository::Repository) which is an in-memory repository for the tracker metrics. /// /// When the input argument `tracker_usage_statistics`is false the setup does not run the event listeners, consequently the statistics /// events are sent are received but not dispatched to the handler. diff --git a/src/bootstrap/app.rs b/src/bootstrap/app.rs index 9247f76bb..2f4ff0e94 100644 --- a/src/bootstrap/app.rs +++ b/src/bootstrap/app.rs @@ -39,7 +39,6 @@ use tracing::instrument; use super::config::initialize_configuration; use crate::bootstrap; use crate::container::AppContainer; -use crate::packages::http_tracker_core; /// It loads the configuration from the environment and builds app container. /// @@ -93,7 +92,7 @@ pub fn initialize_app_container(configuration: &Configuration) -> AppContainer { // HTTP stats let (http_stats_event_sender, http_stats_repository) = - http_tracker_core::statistics::setup::factory(configuration.core.tracker_usage_statistics); + bittorrent_http_tracker_core::statistics::setup::factory(configuration.core.tracker_usage_statistics); let http_stats_event_sender = Arc::new(http_stats_event_sender); let http_stats_repository = Arc::new(http_stats_repository); diff --git a/src/container.rs b/src/container.rs index d4e46b116..0f4a840cf 100644 --- a/src/container.rs +++ b/src/container.rs @@ -16,8 +16,6 @@ use bittorrent_udp_tracker_core::{self}; use tokio::sync::RwLock; use torrust_tracker_configuration::{Core, HttpApi, HttpTracker, UdpTracker}; -use crate::packages::http_tracker_core; - pub struct AppContainer { pub core_config: Arc, pub database: Arc>, @@ -28,9 +26,9 @@ pub struct AppContainer { pub in_memory_whitelist: Arc, pub whitelist_authorization: Arc, pub ban_service: Arc>, - pub http_stats_event_sender: Arc>>, + pub http_stats_event_sender: Arc>>, pub udp_stats_event_sender: Arc>>, - pub http_stats_repository: Arc, + pub http_stats_repository: Arc, pub udp_stats_repository: Arc, pub whitelist_manager: Arc, pub in_memory_torrent_repository: Arc, @@ -69,7 +67,7 @@ pub struct HttpTrackerContainer { pub announce_handler: Arc, pub scrape_handler: Arc, pub whitelist_authorization: Arc, - pub http_stats_event_sender: Arc>>, + pub http_stats_event_sender: Arc>>, pub authentication_service: Arc, } @@ -95,7 +93,7 @@ pub struct HttpApiContainer { pub keys_handler: Arc, pub whitelist_manager: Arc, pub ban_service: Arc>, - pub http_stats_repository: Arc, + pub http_stats_repository: Arc, pub udp_stats_repository: Arc, } diff --git a/src/packages/http_tracker_core/mod.rs b/src/packages/http_tracker_core/mod.rs deleted file mode 100644 index 4f3e54857..000000000 --- a/src/packages/http_tracker_core/mod.rs +++ /dev/null @@ -1,2 +0,0 @@ -pub mod services; -pub mod statistics; diff --git a/src/packages/mod.rs b/src/packages/mod.rs index f00f1ace0..7e43aa210 100644 --- a/src/packages/mod.rs +++ b/src/packages/mod.rs @@ -1,5 +1,4 @@ //! This module contains logic pending to be extracted into workspace packages. //! //! It will be moved to the directory `packages`. -pub mod http_tracker_core; pub mod tracker_api_core; diff --git a/src/packages/tracker_api_core/statistics/services.rs b/src/packages/tracker_api_core/statistics/services.rs index d94ff5bf7..bb03dd8ef 100644 --- a/src/packages/tracker_api_core/statistics/services.rs +++ b/src/packages/tracker_api_core/statistics/services.rs @@ -7,7 +7,7 @@ use packages::tracker_api_core::statistics::metrics::Metrics; use tokio::sync::RwLock; use torrust_tracker_primitives::torrent_metrics::TorrentsMetrics; -use crate::packages::{self, http_tracker_core}; +use crate::packages::{self}; /// All the metrics collected by the tracker. #[derive(Debug, PartialEq)] @@ -27,7 +27,7 @@ pub struct TrackerMetrics { pub async fn get_metrics( in_memory_torrent_repository: Arc, ban_service: Arc>, - http_stats_repository: Arc, + http_stats_repository: Arc, udp_stats_repository: Arc, ) -> TrackerMetrics { let torrents_metrics = in_memory_torrent_repository.get_torrents_metrics(); @@ -84,7 +84,6 @@ mod tests { use torrust_tracker_primitives::torrent_metrics::TorrentsMetrics; use torrust_tracker_test_helpers::configuration; - use crate::packages::http_tracker_core; use crate::packages::tracker_api_core::statistics::metrics::Metrics; use crate::packages::tracker_api_core::statistics::services::{get_metrics, TrackerMetrics}; @@ -101,7 +100,7 @@ mod tests { // HTTP stats let (_http_stats_event_sender, http_stats_repository) = - http_tracker_core::statistics::setup::factory(config.core.tracker_usage_statistics); + bittorrent_http_tracker_core::statistics::setup::factory(config.core.tracker_usage_statistics); let http_stats_repository = Arc::new(http_stats_repository); // UDP stats diff --git a/src/servers/apis/v1/context/stats/handlers.rs b/src/servers/apis/v1/context/stats/handlers.rs index 62379b6f4..cfd266c49 100644 --- a/src/servers/apis/v1/context/stats/handlers.rs +++ b/src/servers/apis/v1/context/stats/handlers.rs @@ -11,7 +11,6 @@ use serde::Deserialize; use tokio::sync::RwLock; use super::responses::{metrics_response, stats_response}; -use crate::packages::http_tracker_core; use crate::packages::tracker_api_core::statistics::services::get_metrics; #[derive(Deserialize, Debug, Default)] @@ -43,7 +42,7 @@ pub async fn get_stats_handler( State(state): State<( Arc, Arc>, - Arc, + Arc, Arc, )>, params: Query, diff --git a/src/servers/http/v1/handlers/announce.rs b/src/servers/http/v1/handlers/announce.rs index 76f4e5134..5cd4595da 100644 --- a/src/servers/http/v1/handlers/announce.rs +++ b/src/servers/http/v1/handlers/announce.rs @@ -21,7 +21,6 @@ use hyper::StatusCode; use torrust_tracker_configuration::Core; use torrust_tracker_primitives::core::AnnounceData; -use crate::packages::http_tracker_core; use crate::servers::http::v1::extractors::announce_request::ExtractRequest; use crate::servers::http::v1::extractors::authentication_key::Extract as ExtractKey; use crate::servers::http::v1::extractors::client_ip_sources::Extract as ExtractClientIpSources; @@ -36,7 +35,7 @@ pub async fn handle_without_key( Arc, Arc, Arc, - Arc>>, + Arc>>, )>, ExtractRequest(announce_request): ExtractRequest, ExtractClientIpSources(client_ip_sources): ExtractClientIpSources, @@ -66,7 +65,7 @@ pub async fn handle_with_key( Arc, Arc, Arc, - Arc>>, + Arc>>, )>, ExtractRequest(announce_request): ExtractRequest, ExtractClientIpSources(client_ip_sources): ExtractClientIpSources, @@ -97,7 +96,7 @@ async fn handle( announce_handler: &Arc, authentication_service: &Arc, whitelist_authorization: &Arc, - opt_http_stats_event_sender: &Arc>>, + opt_http_stats_event_sender: &Arc>>, announce_request: &Announce, client_ip_sources: &ClientIpSources, maybe_key: Option, @@ -126,12 +125,12 @@ async fn handle_announce( announce_handler: &Arc, authentication_service: &Arc, whitelist_authorization: &Arc, - opt_http_stats_event_sender: &Arc>>, + opt_http_stats_event_sender: &Arc>>, announce_request: &Announce, client_ip_sources: &ClientIpSources, maybe_key: Option, ) -> Result { - http_tracker_core::services::announce::handle_announce( + bittorrent_http_tracker_core::services::announce::handle_announce( &core_config.clone(), &announce_handler.clone(), &authentication_service.clone(), @@ -200,7 +199,6 @@ mod tests { use torrust_tracker_configuration::{Configuration, Core}; use torrust_tracker_test_helpers::configuration; - use crate::packages::http_tracker_core; use crate::servers::http::test_helpers::tests::sample_info_hash; struct CoreTrackerServices { @@ -211,7 +209,7 @@ mod tests { } struct CoreHttpTrackerServices { - pub http_stats_event_sender: Arc>>, + pub http_stats_event_sender: Arc>>, } fn initialize_private_tracker() -> (CoreTrackerServices, CoreHttpTrackerServices) { @@ -248,7 +246,7 @@ mod tests { // HTTP stats let (http_stats_event_sender, http_stats_repository) = - http_tracker_core::statistics::setup::factory(config.core.tracker_usage_statistics); + bittorrent_http_tracker_core::statistics::setup::factory(config.core.tracker_usage_statistics); let http_stats_event_sender = Arc::new(http_stats_event_sender); let _http_stats_repository = Arc::new(http_stats_repository); diff --git a/src/servers/http/v1/handlers/scrape.rs b/src/servers/http/v1/handlers/scrape.rs index 946190e8f..ad344aa29 100644 --- a/src/servers/http/v1/handlers/scrape.rs +++ b/src/servers/http/v1/handlers/scrape.rs @@ -19,7 +19,6 @@ use hyper::StatusCode; use torrust_tracker_configuration::Core; use torrust_tracker_primitives::core::ScrapeData; -use crate::packages::http_tracker_core; use crate::servers::http::v1::extractors::authentication_key::Extract as ExtractKey; use crate::servers::http::v1::extractors::client_ip_sources::Extract as ExtractClientIpSources; use crate::servers::http::v1::extractors::scrape_request::ExtractRequest; @@ -33,7 +32,7 @@ pub async fn handle_without_key( Arc, Arc, Arc, - Arc>>, + Arc>>, )>, ExtractRequest(scrape_request): ExtractRequest, ExtractClientIpSources(client_ip_sources): ExtractClientIpSources, @@ -63,7 +62,7 @@ pub async fn handle_with_key( Arc, Arc, Arc, - Arc>>, + Arc>>, )>, ExtractRequest(scrape_request): ExtractRequest, ExtractClientIpSources(client_ip_sources): ExtractClientIpSources, @@ -88,7 +87,7 @@ async fn handle( core_config: &Arc, scrape_handler: &Arc, authentication_service: &Arc, - http_stats_event_sender: &Arc>>, + http_stats_event_sender: &Arc>>, scrape_request: &Scrape, client_ip_sources: &ClientIpSources, maybe_key: Option, @@ -116,12 +115,12 @@ async fn handle_scrape( core_config: &Arc, scrape_handler: &Arc, authentication_service: &Arc, - opt_http_stats_event_sender: &Arc>>, + opt_http_stats_event_sender: &Arc>>, scrape_request: &Scrape, client_ip_sources: &ClientIpSources, maybe_key: Option, ) -> Result { - http_tracker_core::services::scrape::handle_scrape( + bittorrent_http_tracker_core::services::scrape::handle_scrape( core_config, scrape_handler, authentication_service, @@ -158,8 +157,6 @@ mod tests { use torrust_tracker_configuration::{Configuration, Core}; use torrust_tracker_test_helpers::configuration; - use crate::packages::http_tracker_core; - struct CoreTrackerServices { pub core_config: Arc, pub scrape_handler: Arc, @@ -167,7 +164,7 @@ mod tests { } struct CoreHttpTrackerServices { - pub http_stats_event_sender: Arc>>, + pub http_stats_event_sender: Arc>>, } fn initialize_private_tracker() -> (CoreTrackerServices, CoreHttpTrackerServices) { @@ -197,7 +194,7 @@ mod tests { // HTTP stats let (http_stats_event_sender, _http_stats_repository) = - http_tracker_core::statistics::setup::factory(config.core.tracker_usage_statistics); + bittorrent_http_tracker_core::statistics::setup::factory(config.core.tracker_usage_statistics); let http_stats_event_sender = Arc::new(http_stats_event_sender); ( diff --git a/tests/servers/http/environment.rs b/tests/servers/http/environment.rs index 97ca13e95..6621bc6ee 100644 --- a/tests/servers/http/environment.rs +++ b/tests/servers/http/environment.rs @@ -10,7 +10,6 @@ use torrust_tracker_configuration::Configuration; use torrust_tracker_lib::bootstrap::app::{initialize_app_container, initialize_global_services}; use torrust_tracker_lib::bootstrap::jobs::make_rust_tls; use torrust_tracker_lib::container::HttpTrackerContainer; -use torrust_tracker_lib::packages::http_tracker_core; use torrust_tracker_lib::servers::http::server::{HttpServer, Launcher, Running, Stopped}; use torrust_tracker_lib::servers::registar::Registar; use torrust_tracker_primitives::peer; @@ -21,7 +20,7 @@ pub struct Environment { pub database: Arc>, pub in_memory_torrent_repository: Arc, pub keys_handler: Arc, - pub http_stats_repository: Arc, + pub http_stats_repository: Arc, pub whitelist_manager: Arc, pub registar: Registar, From 97d26294177f05d7110194758a90dfe4269dc528 Mon Sep 17 00:00:00 2001 From: Jose Celano Date: Tue, 18 Feb 2025 17:04:47 +0000 Subject: [PATCH 283/802] refactor: [#1280] extract tracker-api-core package --- .github/workflows/deployment.yaml | 1 + Cargo.lock | 16 +- Cargo.toml | 1 + packages/tracker-api-core/Cargo.toml | 25 + packages/tracker-api-core/LICENSE | 661 ++++++++++++++++++ packages/tracker-api-core/README.md | 11 + .../tracker-api-core/src/lib.rs | 0 .../src}/statistics/metrics.rs | 0 .../tracker-api-core/src}/statistics/mod.rs | 0 .../src}/statistics/services.rs | 7 +- .../src/crypto/ephemeral_instance_keys.rs | 4 +- src/lib.rs | 1 - src/packages/mod.rs | 4 - src/servers/apis/v1/context/stats/handlers.rs | 2 +- .../apis/v1/context/stats/resources.rs | 8 +- .../apis/v1/context/stats/responses.rs | 2 +- 16 files changed, 724 insertions(+), 19 deletions(-) create mode 100644 packages/tracker-api-core/Cargo.toml create mode 100644 packages/tracker-api-core/LICENSE create mode 100644 packages/tracker-api-core/README.md rename src/packages/tracker_api_core/mod.rs => packages/tracker-api-core/src/lib.rs (100%) rename {src/packages/tracker_api_core => packages/tracker-api-core/src}/statistics/metrics.rs (100%) rename {src/packages/tracker_api_core => packages/tracker-api-core/src}/statistics/mod.rs (100%) rename {src/packages/tracker_api_core => packages/tracker-api-core/src}/statistics/services.rs (95%) delete mode 100644 src/packages/mod.rs diff --git a/.github/workflows/deployment.yaml b/.github/workflows/deployment.yaml index 7b718bccf..5b39c7f2f 100644 --- a/.github/workflows/deployment.yaml +++ b/.github/workflows/deployment.yaml @@ -63,6 +63,7 @@ jobs: cargo publish -p bittorrent-udp-tracker-core cargo publish -p torrust-tracker cargo publish -p torrust-tracker-api-client + cargo publish -p torrust-tracker-api-core cargo publish -p torrust-tracker-client cargo publish -p torrust-tracker-clock cargo publish -p torrust-tracker-configuration diff --git a/Cargo.lock b/Cargo.lock index 73f7dfb88..63c51dc19 100644 --- a/Cargo.lock +++ b/Cargo.lock @@ -674,7 +674,7 @@ dependencies = [ "futures", "lazy_static", "mockall", - "rand 0.8.5", + "rand 0.9.0", "thiserror 2.0.11", "tokio", "torrust-tracker-configuration", @@ -4387,6 +4387,7 @@ dependencies = [ "thiserror 2.0.11", "tokio", "torrust-tracker-api-client", + "torrust-tracker-api-core", "torrust-tracker-clock", "torrust-tracker-configuration", "torrust-tracker-located-error", @@ -4414,6 +4415,19 @@ dependencies = [ "uuid", ] +[[package]] +name = "torrust-tracker-api-core" +version = "3.0.0-develop" +dependencies = [ + "bittorrent-http-tracker-core", + "bittorrent-tracker-core", + "bittorrent-udp-tracker-core", + "tokio", + "torrust-tracker-configuration", + "torrust-tracker-primitives", + "torrust-tracker-test-helpers", +] + [[package]] name = "torrust-tracker-client" version = "3.0.0-develop" diff --git a/Cargo.toml b/Cargo.toml index 21c08a8b5..d31dcf9c4 100644 --- a/Cargo.toml +++ b/Cargo.toml @@ -76,6 +76,7 @@ serde_repr = "0" serde_with = { version = "3", features = ["json"] } thiserror = "2" tokio = { version = "1", features = ["macros", "net", "rt-multi-thread", "signal", "sync"] } +torrust-tracker-api-core = { version = "3.0.0-develop", path = "packages/tracker-api-core" } torrust-tracker-clock = { version = "3.0.0-develop", path = "packages/clock" } torrust-tracker-configuration = { version = "3.0.0-develop", path = "packages/configuration" } torrust-tracker-located-error = { version = "3.0.0-develop", path = "packages/located-error" } diff --git a/packages/tracker-api-core/Cargo.toml b/packages/tracker-api-core/Cargo.toml new file mode 100644 index 000000000..4fc46ed04 --- /dev/null +++ b/packages/tracker-api-core/Cargo.toml @@ -0,0 +1,25 @@ +[package] +authors.workspace = true +description = "A library with the core functionality needed to implement a BitTorrent UDP tracker." +documentation.workspace = true +edition.workspace = true +homepage.workspace = true +keywords = ["api", "bittorrent", "core", "library", "tracker"] +license.workspace = true +name = "torrust-tracker-api-core" +publish.workspace = true +readme = "README.md" +repository.workspace = true +rust-version.workspace = true +version.workspace = true + +[dependencies] +bittorrent-http-tracker-core = { version = "3.0.0-develop", path = "../http-tracker-core" } +bittorrent-tracker-core = { version = "3.0.0-develop", path = "../tracker-core" } +bittorrent-udp-tracker-core = { version = "3.0.0-develop", path = "../udp-tracker-core" } +tokio = { version = "1", features = ["macros", "net", "rt-multi-thread", "signal", "sync"] } +torrust-tracker-configuration = { version = "3.0.0-develop", path = "../configuration" } +torrust-tracker-primitives = { version = "3.0.0-develop", path = "../primitives" } + +[dev-dependencies] +torrust-tracker-test-helpers = { version = "3.0.0-develop", path = "../test-helpers" } diff --git a/packages/tracker-api-core/LICENSE b/packages/tracker-api-core/LICENSE new file mode 100644 index 000000000..0ad25db4b --- /dev/null +++ b/packages/tracker-api-core/LICENSE @@ -0,0 +1,661 @@ + GNU AFFERO GENERAL PUBLIC LICENSE + Version 3, 19 November 2007 + + Copyright (C) 2007 Free Software Foundation, Inc. + Everyone is permitted to copy and distribute verbatim copies + of this license document, but changing it is not allowed. + + Preamble + + The GNU Affero General Public License is a free, copyleft license for +software and other kinds of works, specifically designed to ensure +cooperation with the community in the case of network server software. + + The licenses for most software and other practical works are designed +to take away your freedom to share and change the works. By contrast, +our General Public Licenses are intended to guarantee your freedom to +share and change all versions of a program--to make sure it remains free +software for all its users. + + When we speak of free software, we are referring to freedom, not +price. Our General Public Licenses are designed to make sure that you +have the freedom to distribute copies of free software (and charge for +them if you wish), that you receive source code or can get it if you +want it, that you can change the software or use pieces of it in new +free programs, and that you know you can do these things. + + Developers that use our General Public Licenses protect your rights +with two steps: (1) assert copyright on the software, and (2) offer +you this License which gives you legal permission to copy, distribute +and/or modify the software. + + A secondary benefit of defending all users' freedom is that +improvements made in alternate versions of the program, if they +receive widespread use, become available for other developers to +incorporate. Many developers of free software are heartened and +encouraged by the resulting cooperation. However, in the case of +software used on network servers, this result may fail to come about. +The GNU General Public License permits making a modified version and +letting the public access it on a server without ever releasing its +source code to the public. + + The GNU Affero General Public License is designed specifically to +ensure that, in such cases, the modified source code becomes available +to the community. It requires the operator of a network server to +provide the source code of the modified version running there to the +users of that server. Therefore, public use of a modified version, on +a publicly accessible server, gives the public access to the source +code of the modified version. + + An older license, called the Affero General Public License and +published by Affero, was designed to accomplish similar goals. This is +a different license, not a version of the Affero GPL, but Affero has +released a new version of the Affero GPL which permits relicensing under +this license. + + The precise terms and conditions for copying, distribution and +modification follow. + + TERMS AND CONDITIONS + + 0. Definitions. + + "This License" refers to version 3 of the GNU Affero General Public License. + + "Copyright" also means copyright-like laws that apply to other kinds of +works, such as semiconductor masks. + + "The Program" refers to any copyrightable work licensed under this +License. Each licensee is addressed as "you". "Licensees" and +"recipients" may be individuals or organizations. + + To "modify" a work means to copy from or adapt all or part of the work +in a fashion requiring copyright permission, other than the making of an +exact copy. The resulting work is called a "modified version" of the +earlier work or a work "based on" the earlier work. + + A "covered work" means either the unmodified Program or a work based +on the Program. + + To "propagate" a work means to do anything with it that, without +permission, would make you directly or secondarily liable for +infringement under applicable copyright law, except executing it on a +computer or modifying a private copy. Propagation includes copying, +distribution (with or without modification), making available to the +public, and in some countries other activities as well. + + To "convey" a work means any kind of propagation that enables other +parties to make or receive copies. Mere interaction with a user through +a computer network, with no transfer of a copy, is not conveying. + + An interactive user interface displays "Appropriate Legal Notices" +to the extent that it includes a convenient and prominently visible +feature that (1) displays an appropriate copyright notice, and (2) +tells the user that there is no warranty for the work (except to the +extent that warranties are provided), that licensees may convey the +work under this License, and how to view a copy of this License. If +the interface presents a list of user commands or options, such as a +menu, a prominent item in the list meets this criterion. + + 1. Source Code. + + The "source code" for a work means the preferred form of the work +for making modifications to it. "Object code" means any non-source +form of a work. + + A "Standard Interface" means an interface that either is an official +standard defined by a recognized standards body, or, in the case of +interfaces specified for a particular programming language, one that +is widely used among developers working in that language. + + The "System Libraries" of an executable work include anything, other +than the work as a whole, that (a) is included in the normal form of +packaging a Major Component, but which is not part of that Major +Component, and (b) serves only to enable use of the work with that +Major Component, or to implement a Standard Interface for which an +implementation is available to the public in source code form. A +"Major Component", in this context, means a major essential component +(kernel, window system, and so on) of the specific operating system +(if any) on which the executable work runs, or a compiler used to +produce the work, or an object code interpreter used to run it. + + The "Corresponding Source" for a work in object code form means all +the source code needed to generate, install, and (for an executable +work) run the object code and to modify the work, including scripts to +control those activities. However, it does not include the work's +System Libraries, or general-purpose tools or generally available free +programs which are used unmodified in performing those activities but +which are not part of the work. For example, Corresponding Source +includes interface definition files associated with source files for +the work, and the source code for shared libraries and dynamically +linked subprograms that the work is specifically designed to require, +such as by intimate data communication or control flow between those +subprograms and other parts of the work. + + The Corresponding Source need not include anything that users +can regenerate automatically from other parts of the Corresponding +Source. + + The Corresponding Source for a work in source code form is that +same work. + + 2. Basic Permissions. + + All rights granted under this License are granted for the term of +copyright on the Program, and are irrevocable provided the stated +conditions are met. This License explicitly affirms your unlimited +permission to run the unmodified Program. The output from running a +covered work is covered by this License only if the output, given its +content, constitutes a covered work. This License acknowledges your +rights of fair use or other equivalent, as provided by copyright law. + + You may make, run and propagate covered works that you do not +convey, without conditions so long as your license otherwise remains +in force. You may convey covered works to others for the sole purpose +of having them make modifications exclusively for you, or provide you +with facilities for running those works, provided that you comply with +the terms of this License in conveying all material for which you do +not control copyright. Those thus making or running the covered works +for you must do so exclusively on your behalf, under your direction +and control, on terms that prohibit them from making any copies of +your copyrighted material outside their relationship with you. + + Conveying under any other circumstances is permitted solely under +the conditions stated below. Sublicensing is not allowed; section 10 +makes it unnecessary. + + 3. Protecting Users' Legal Rights From Anti-Circumvention Law. + + No covered work shall be deemed part of an effective technological +measure under any applicable law fulfilling obligations under article +11 of the WIPO copyright treaty adopted on 20 December 1996, or +similar laws prohibiting or restricting circumvention of such +measures. + + When you convey a covered work, you waive any legal power to forbid +circumvention of technological measures to the extent such circumvention +is effected by exercising rights under this License with respect to +the covered work, and you disclaim any intention to limit operation or +modification of the work as a means of enforcing, against the work's +users, your or third parties' legal rights to forbid circumvention of +technological measures. + + 4. Conveying Verbatim Copies. + + You may convey verbatim copies of the Program's source code as you +receive it, in any medium, provided that you conspicuously and +appropriately publish on each copy an appropriate copyright notice; +keep intact all notices stating that this License and any +non-permissive terms added in accord with section 7 apply to the code; +keep intact all notices of the absence of any warranty; and give all +recipients a copy of this License along with the Program. + + You may charge any price or no price for each copy that you convey, +and you may offer support or warranty protection for a fee. + + 5. Conveying Modified Source Versions. + + You may convey a work based on the Program, or the modifications to +produce it from the Program, in the form of source code under the +terms of section 4, provided that you also meet all of these conditions: + + a) The work must carry prominent notices stating that you modified + it, and giving a relevant date. + + b) The work must carry prominent notices stating that it is + released under this License and any conditions added under section + 7. This requirement modifies the requirement in section 4 to + "keep intact all notices". + + c) You must license the entire work, as a whole, under this + License to anyone who comes into possession of a copy. This + License will therefore apply, along with any applicable section 7 + additional terms, to the whole of the work, and all its parts, + regardless of how they are packaged. This License gives no + permission to license the work in any other way, but it does not + invalidate such permission if you have separately received it. + + d) If the work has interactive user interfaces, each must display + Appropriate Legal Notices; however, if the Program has interactive + interfaces that do not display Appropriate Legal Notices, your + work need not make them do so. + + A compilation of a covered work with other separate and independent +works, which are not by their nature extensions of the covered work, +and which are not combined with it such as to form a larger program, +in or on a volume of a storage or distribution medium, is called an +"aggregate" if the compilation and its resulting copyright are not +used to limit the access or legal rights of the compilation's users +beyond what the individual works permit. Inclusion of a covered work +in an aggregate does not cause this License to apply to the other +parts of the aggregate. + + 6. Conveying Non-Source Forms. + + You may convey a covered work in object code form under the terms +of sections 4 and 5, provided that you also convey the +machine-readable Corresponding Source under the terms of this License, +in one of these ways: + + a) Convey the object code in, or embodied in, a physical product + (including a physical distribution medium), accompanied by the + Corresponding Source fixed on a durable physical medium + customarily used for software interchange. + + b) Convey the object code in, or embodied in, a physical product + (including a physical distribution medium), accompanied by a + written offer, valid for at least three years and valid for as + long as you offer spare parts or customer support for that product + model, to give anyone who possesses the object code either (1) a + copy of the Corresponding Source for all the software in the + product that is covered by this License, on a durable physical + medium customarily used for software interchange, for a price no + more than your reasonable cost of physically performing this + conveying of source, or (2) access to copy the + Corresponding Source from a network server at no charge. + + c) Convey individual copies of the object code with a copy of the + written offer to provide the Corresponding Source. This + alternative is allowed only occasionally and noncommercially, and + only if you received the object code with such an offer, in accord + with subsection 6b. + + d) Convey the object code by offering access from a designated + place (gratis or for a charge), and offer equivalent access to the + Corresponding Source in the same way through the same place at no + further charge. You need not require recipients to copy the + Corresponding Source along with the object code. If the place to + copy the object code is a network server, the Corresponding Source + may be on a different server (operated by you or a third party) + that supports equivalent copying facilities, provided you maintain + clear directions next to the object code saying where to find the + Corresponding Source. Regardless of what server hosts the + Corresponding Source, you remain obligated to ensure that it is + available for as long as needed to satisfy these requirements. + + e) Convey the object code using peer-to-peer transmission, provided + you inform other peers where the object code and Corresponding + Source of the work are being offered to the general public at no + charge under subsection 6d. + + A separable portion of the object code, whose source code is excluded +from the Corresponding Source as a System Library, need not be +included in conveying the object code work. + + A "User Product" is either (1) a "consumer product", which means any +tangible personal property which is normally used for personal, family, +or household purposes, or (2) anything designed or sold for incorporation +into a dwelling. In determining whether a product is a consumer product, +doubtful cases shall be resolved in favor of coverage. For a particular +product received by a particular user, "normally used" refers to a +typical or common use of that class of product, regardless of the status +of the particular user or of the way in which the particular user +actually uses, or expects or is expected to use, the product. A product +is a consumer product regardless of whether the product has substantial +commercial, industrial or non-consumer uses, unless such uses represent +the only significant mode of use of the product. + + "Installation Information" for a User Product means any methods, +procedures, authorization keys, or other information required to install +and execute modified versions of a covered work in that User Product from +a modified version of its Corresponding Source. The information must +suffice to ensure that the continued functioning of the modified object +code is in no case prevented or interfered with solely because +modification has been made. + + If you convey an object code work under this section in, or with, or +specifically for use in, a User Product, and the conveying occurs as +part of a transaction in which the right of possession and use of the +User Product is transferred to the recipient in perpetuity or for a +fixed term (regardless of how the transaction is characterized), the +Corresponding Source conveyed under this section must be accompanied +by the Installation Information. But this requirement does not apply +if neither you nor any third party retains the ability to install +modified object code on the User Product (for example, the work has +been installed in ROM). + + The requirement to provide Installation Information does not include a +requirement to continue to provide support service, warranty, or updates +for a work that has been modified or installed by the recipient, or for +the User Product in which it has been modified or installed. Access to a +network may be denied when the modification itself materially and +adversely affects the operation of the network or violates the rules and +protocols for communication across the network. + + Corresponding Source conveyed, and Installation Information provided, +in accord with this section must be in a format that is publicly +documented (and with an implementation available to the public in +source code form), and must require no special password or key for +unpacking, reading or copying. + + 7. Additional Terms. + + "Additional permissions" are terms that supplement the terms of this +License by making exceptions from one or more of its conditions. +Additional permissions that are applicable to the entire Program shall +be treated as though they were included in this License, to the extent +that they are valid under applicable law. If additional permissions +apply only to part of the Program, that part may be used separately +under those permissions, but the entire Program remains governed by +this License without regard to the additional permissions. + + When you convey a copy of a covered work, you may at your option +remove any additional permissions from that copy, or from any part of +it. (Additional permissions may be written to require their own +removal in certain cases when you modify the work.) You may place +additional permissions on material, added by you to a covered work, +for which you have or can give appropriate copyright permission. + + Notwithstanding any other provision of this License, for material you +add to a covered work, you may (if authorized by the copyright holders of +that material) supplement the terms of this License with terms: + + a) Disclaiming warranty or limiting liability differently from the + terms of sections 15 and 16 of this License; or + + b) Requiring preservation of specified reasonable legal notices or + author attributions in that material or in the Appropriate Legal + Notices displayed by works containing it; or + + c) Prohibiting misrepresentation of the origin of that material, or + requiring that modified versions of such material be marked in + reasonable ways as different from the original version; or + + d) Limiting the use for publicity purposes of names of licensors or + authors of the material; or + + e) Declining to grant rights under trademark law for use of some + trade names, trademarks, or service marks; or + + f) Requiring indemnification of licensors and authors of that + material by anyone who conveys the material (or modified versions of + it) with contractual assumptions of liability to the recipient, for + any liability that these contractual assumptions directly impose on + those licensors and authors. + + All other non-permissive additional terms are considered "further +restrictions" within the meaning of section 10. If the Program as you +received it, or any part of it, contains a notice stating that it is +governed by this License along with a term that is a further +restriction, you may remove that term. If a license document contains +a further restriction but permits relicensing or conveying under this +License, you may add to a covered work material governed by the terms +of that license document, provided that the further restriction does +not survive such relicensing or conveying. + + If you add terms to a covered work in accord with this section, you +must place, in the relevant source files, a statement of the +additional terms that apply to those files, or a notice indicating +where to find the applicable terms. + + Additional terms, permissive or non-permissive, may be stated in the +form of a separately written license, or stated as exceptions; +the above requirements apply either way. + + 8. Termination. + + You may not propagate or modify a covered work except as expressly +provided under this License. Any attempt otherwise to propagate or +modify it is void, and will automatically terminate your rights under +this License (including any patent licenses granted under the third +paragraph of section 11). + + However, if you cease all violation of this License, then your +license from a particular copyright holder is reinstated (a) +provisionally, unless and until the copyright holder explicitly and +finally terminates your license, and (b) permanently, if the copyright +holder fails to notify you of the violation by some reasonable means +prior to 60 days after the cessation. + + Moreover, your license from a particular copyright holder is +reinstated permanently if the copyright holder notifies you of the +violation by some reasonable means, this is the first time you have +received notice of violation of this License (for any work) from that +copyright holder, and you cure the violation prior to 30 days after +your receipt of the notice. + + Termination of your rights under this section does not terminate the +licenses of parties who have received copies or rights from you under +this License. If your rights have been terminated and not permanently +reinstated, you do not qualify to receive new licenses for the same +material under section 10. + + 9. Acceptance Not Required for Having Copies. + + You are not required to accept this License in order to receive or +run a copy of the Program. Ancillary propagation of a covered work +occurring solely as a consequence of using peer-to-peer transmission +to receive a copy likewise does not require acceptance. However, +nothing other than this License grants you permission to propagate or +modify any covered work. These actions infringe copyright if you do +not accept this License. Therefore, by modifying or propagating a +covered work, you indicate your acceptance of this License to do so. + + 10. Automatic Licensing of Downstream Recipients. + + Each time you convey a covered work, the recipient automatically +receives a license from the original licensors, to run, modify and +propagate that work, subject to this License. You are not responsible +for enforcing compliance by third parties with this License. + + An "entity transaction" is a transaction transferring control of an +organization, or substantially all assets of one, or subdividing an +organization, or merging organizations. If propagation of a covered +work results from an entity transaction, each party to that +transaction who receives a copy of the work also receives whatever +licenses to the work the party's predecessor in interest had or could +give under the previous paragraph, plus a right to possession of the +Corresponding Source of the work from the predecessor in interest, if +the predecessor has it or can get it with reasonable efforts. + + You may not impose any further restrictions on the exercise of the +rights granted or affirmed under this License. For example, you may +not impose a license fee, royalty, or other charge for exercise of +rights granted under this License, and you may not initiate litigation +(including a cross-claim or counterclaim in a lawsuit) alleging that +any patent claim is infringed by making, using, selling, offering for +sale, or importing the Program or any portion of it. + + 11. Patents. + + A "contributor" is a copyright holder who authorizes use under this +License of the Program or a work on which the Program is based. The +work thus licensed is called the contributor's "contributor version". + + A contributor's "essential patent claims" are all patent claims +owned or controlled by the contributor, whether already acquired or +hereafter acquired, that would be infringed by some manner, permitted +by this License, of making, using, or selling its contributor version, +but do not include claims that would be infringed only as a +consequence of further modification of the contributor version. For +purposes of this definition, "control" includes the right to grant +patent sublicenses in a manner consistent with the requirements of +this License. + + Each contributor grants you a non-exclusive, worldwide, royalty-free +patent license under the contributor's essential patent claims, to +make, use, sell, offer for sale, import and otherwise run, modify and +propagate the contents of its contributor version. + + In the following three paragraphs, a "patent license" is any express +agreement or commitment, however denominated, not to enforce a patent +(such as an express permission to practice a patent or covenant not to +sue for patent infringement). To "grant" such a patent license to a +party means to make such an agreement or commitment not to enforce a +patent against the party. + + If you convey a covered work, knowingly relying on a patent license, +and the Corresponding Source of the work is not available for anyone +to copy, free of charge and under the terms of this License, through a +publicly available network server or other readily accessible means, +then you must either (1) cause the Corresponding Source to be so +available, or (2) arrange to deprive yourself of the benefit of the +patent license for this particular work, or (3) arrange, in a manner +consistent with the requirements of this License, to extend the patent +license to downstream recipients. "Knowingly relying" means you have +actual knowledge that, but for the patent license, your conveying the +covered work in a country, or your recipient's use of the covered work +in a country, would infringe one or more identifiable patents in that +country that you have reason to believe are valid. + + If, pursuant to or in connection with a single transaction or +arrangement, you convey, or propagate by procuring conveyance of, a +covered work, and grant a patent license to some of the parties +receiving the covered work authorizing them to use, propagate, modify +or convey a specific copy of the covered work, then the patent license +you grant is automatically extended to all recipients of the covered +work and works based on it. + + A patent license is "discriminatory" if it does not include within +the scope of its coverage, prohibits the exercise of, or is +conditioned on the non-exercise of one or more of the rights that are +specifically granted under this License. You may not convey a covered +work if you are a party to an arrangement with a third party that is +in the business of distributing software, under which you make payment +to the third party based on the extent of your activity of conveying +the work, and under which the third party grants, to any of the +parties who would receive the covered work from you, a discriminatory +patent license (a) in connection with copies of the covered work +conveyed by you (or copies made from those copies), or (b) primarily +for and in connection with specific products or compilations that +contain the covered work, unless you entered into that arrangement, +or that patent license was granted, prior to 28 March 2007. + + Nothing in this License shall be construed as excluding or limiting +any implied license or other defenses to infringement that may +otherwise be available to you under applicable patent law. + + 12. No Surrender of Others' Freedom. + + If conditions are imposed on you (whether by court order, agreement or +otherwise) that contradict the conditions of this License, they do not +excuse you from the conditions of this License. If you cannot convey a +covered work so as to satisfy simultaneously your obligations under this +License and any other pertinent obligations, then as a consequence you may +not convey it at all. For example, if you agree to terms that obligate you +to collect a royalty for further conveying from those to whom you convey +the Program, the only way you could satisfy both those terms and this +License would be to refrain entirely from conveying the Program. + + 13. Remote Network Interaction; Use with the GNU General Public License. + + Notwithstanding any other provision of this License, if you modify the +Program, your modified version must prominently offer all users +interacting with it remotely through a computer network (if your version +supports such interaction) an opportunity to receive the Corresponding +Source of your version by providing access to the Corresponding Source +from a network server at no charge, through some standard or customary +means of facilitating copying of software. This Corresponding Source +shall include the Corresponding Source for any work covered by version 3 +of the GNU General Public License that is incorporated pursuant to the +following paragraph. + + Notwithstanding any other provision of this License, you have +permission to link or combine any covered work with a work licensed +under version 3 of the GNU General Public License into a single +combined work, and to convey the resulting work. The terms of this +License will continue to apply to the part which is the covered work, +but the work with which it is combined will remain governed by version +3 of the GNU General Public License. + + 14. Revised Versions of this License. + + The Free Software Foundation may publish revised and/or new versions of +the GNU Affero General Public License from time to time. Such new versions +will be similar in spirit to the present version, but may differ in detail to +address new problems or concerns. + + Each version is given a distinguishing version number. If the +Program specifies that a certain numbered version of the GNU Affero General +Public License "or any later version" applies to it, you have the +option of following the terms and conditions either of that numbered +version or of any later version published by the Free Software +Foundation. If the Program does not specify a version number of the +GNU Affero General Public License, you may choose any version ever published +by the Free Software Foundation. + + If the Program specifies that a proxy can decide which future +versions of the GNU Affero General Public License can be used, that proxy's +public statement of acceptance of a version permanently authorizes you +to choose that version for the Program. + + Later license versions may give you additional or different +permissions. However, no additional obligations are imposed on any +author or copyright holder as a result of your choosing to follow a +later version. + + 15. Disclaimer of Warranty. + + THERE IS NO WARRANTY FOR THE PROGRAM, TO THE EXTENT PERMITTED BY +APPLICABLE LAW. EXCEPT WHEN OTHERWISE STATED IN WRITING THE COPYRIGHT +HOLDERS AND/OR OTHER PARTIES PROVIDE THE PROGRAM "AS IS" WITHOUT WARRANTY +OF ANY KIND, EITHER EXPRESSED OR IMPLIED, INCLUDING, BUT NOT LIMITED TO, +THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR +PURPOSE. THE ENTIRE RISK AS TO THE QUALITY AND PERFORMANCE OF THE PROGRAM +IS WITH YOU. SHOULD THE PROGRAM PROVE DEFECTIVE, YOU ASSUME THE COST OF +ALL NECESSARY SERVICING, REPAIR OR CORRECTION. + + 16. Limitation of Liability. + + IN NO EVENT UNLESS REQUIRED BY APPLICABLE LAW OR AGREED TO IN WRITING +WILL ANY COPYRIGHT HOLDER, OR ANY OTHER PARTY WHO MODIFIES AND/OR CONVEYS +THE PROGRAM AS PERMITTED ABOVE, BE LIABLE TO YOU FOR DAMAGES, INCLUDING ANY +GENERAL, SPECIAL, INCIDENTAL OR CONSEQUENTIAL DAMAGES ARISING OUT OF THE +USE OR INABILITY TO USE THE PROGRAM (INCLUDING BUT NOT LIMITED TO LOSS OF +DATA OR DATA BEING RENDERED INACCURATE OR LOSSES SUSTAINED BY YOU OR THIRD +PARTIES OR A FAILURE OF THE PROGRAM TO OPERATE WITH ANY OTHER PROGRAMS), +EVEN IF SUCH HOLDER OR OTHER PARTY HAS BEEN ADVISED OF THE POSSIBILITY OF +SUCH DAMAGES. + + 17. Interpretation of Sections 15 and 16. + + If the disclaimer of warranty and limitation of liability provided +above cannot be given local legal effect according to their terms, +reviewing courts shall apply local law that most closely approximates +an absolute waiver of all civil liability in connection with the +Program, unless a warranty or assumption of liability accompanies a +copy of the Program in return for a fee. + + END OF TERMS AND CONDITIONS + + How to Apply These Terms to Your New Programs + + If you develop a new program, and you want it to be of the greatest +possible use to the public, the best way to achieve this is to make it +free software which everyone can redistribute and change under these terms. + + To do so, attach the following notices to the program. It is safest +to attach them to the start of each source file to most effectively +state the exclusion of warranty; and each file should have at least +the "copyright" line and a pointer to where the full notice is found. + + + Copyright (C) + + This program is free software: you can redistribute it and/or modify + it under the terms of the GNU Affero General Public License as published + by the Free Software Foundation, either version 3 of the License, or + (at your option) any later version. + + This program is distributed in the hope that it will be useful, + but WITHOUT ANY WARRANTY; without even the implied warranty of + MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + GNU Affero General Public License for more details. + + You should have received a copy of the GNU Affero General Public License + along with this program. If not, see . + +Also add information on how to contact you by electronic and paper mail. + + If your software can interact with users remotely through a computer +network, you should also make sure that it provides a way for users to +get its source. For example, if your program is a web application, its +interface could display a "Source" link that leads users to an archive +of the code. There are many ways you could offer source, and different +solutions will be better for different programs; see section 13 for the +specific requirements. + + You should also get your employer (if you work as a programmer) or school, +if any, to sign a "copyright disclaimer" for the program, if necessary. +For more information on this, and how to apply and follow the GNU AGPL, see +. diff --git a/packages/tracker-api-core/README.md b/packages/tracker-api-core/README.md new file mode 100644 index 000000000..96bf17bf7 --- /dev/null +++ b/packages/tracker-api-core/README.md @@ -0,0 +1,11 @@ +# BitTorrent UDP Tracker Core library + +A library with the core functionality needed to implement the Torrust Tracker API + +## Documentation + +[Crate documentation](https://docs.rs/torrust-tracker-api-core). + +## License + +The project is licensed under the terms of the [GNU AFFERO GENERAL PUBLIC LICENSE](./LICENSE). diff --git a/src/packages/tracker_api_core/mod.rs b/packages/tracker-api-core/src/lib.rs similarity index 100% rename from src/packages/tracker_api_core/mod.rs rename to packages/tracker-api-core/src/lib.rs diff --git a/src/packages/tracker_api_core/statistics/metrics.rs b/packages/tracker-api-core/src/statistics/metrics.rs similarity index 100% rename from src/packages/tracker_api_core/statistics/metrics.rs rename to packages/tracker-api-core/src/statistics/metrics.rs diff --git a/src/packages/tracker_api_core/statistics/mod.rs b/packages/tracker-api-core/src/statistics/mod.rs similarity index 100% rename from src/packages/tracker_api_core/statistics/mod.rs rename to packages/tracker-api-core/src/statistics/mod.rs diff --git a/src/packages/tracker_api_core/statistics/services.rs b/packages/tracker-api-core/src/statistics/services.rs similarity index 95% rename from src/packages/tracker_api_core/statistics/services.rs rename to packages/tracker-api-core/src/statistics/services.rs index bb03dd8ef..178c8ca0f 100644 --- a/src/packages/tracker_api_core/statistics/services.rs +++ b/packages/tracker-api-core/src/statistics/services.rs @@ -3,11 +3,10 @@ use std::sync::Arc; use bittorrent_tracker_core::torrent::repository::in_memory::InMemoryTorrentRepository; use bittorrent_udp_tracker_core::services::banning::BanService; use bittorrent_udp_tracker_core::{self, statistics}; -use packages::tracker_api_core::statistics::metrics::Metrics; use tokio::sync::RwLock; use torrust_tracker_primitives::torrent_metrics::TorrentsMetrics; -use crate::packages::{self}; +use crate::statistics::metrics::Metrics; /// All the metrics collected by the tracker. #[derive(Debug, PartialEq)] @@ -84,8 +83,8 @@ mod tests { use torrust_tracker_primitives::torrent_metrics::TorrentsMetrics; use torrust_tracker_test_helpers::configuration; - use crate::packages::tracker_api_core::statistics::metrics::Metrics; - use crate::packages::tracker_api_core::statistics::services::{get_metrics, TrackerMetrics}; + use crate::statistics::metrics::Metrics; + use crate::statistics::services::{get_metrics, TrackerMetrics}; pub fn tracker_configuration() -> Configuration { configuration::ephemeral() diff --git a/packages/udp-tracker-core/src/crypto/ephemeral_instance_keys.rs b/packages/udp-tracker-core/src/crypto/ephemeral_instance_keys.rs index fcbf78288..58ba70562 100644 --- a/packages/udp-tracker-core/src/crypto/ephemeral_instance_keys.rs +++ b/packages/udp-tracker-core/src/crypto/ephemeral_instance_keys.rs @@ -17,13 +17,13 @@ lazy_static! { /// The random static seed. pub static ref RANDOM_SEED: Seed = { let mut rng = ThreadRng::default(); - rng.gen::() + rng.random::() }; /// The random cipher from the seed. pub static ref RANDOM_CIPHER_BLOWFISH: CipherBlowfish = { let mut rng = ThreadRng::default(); - let seed: Seed = rng.gen(); + let seed: Seed = rng.random(); CipherBlowfish::new_from_slice(&seed).expect("it could not generate key") }; diff --git a/src/lib.rs b/src/lib.rs index 210c88c14..a864587c5 100644 --- a/src/lib.rs +++ b/src/lib.rs @@ -494,7 +494,6 @@ pub mod app; pub mod bootstrap; pub mod console; pub mod container; -pub mod packages; pub mod servers; pub mod shared; diff --git a/src/packages/mod.rs b/src/packages/mod.rs deleted file mode 100644 index 7e43aa210..000000000 --- a/src/packages/mod.rs +++ /dev/null @@ -1,4 +0,0 @@ -//! This module contains logic pending to be extracted into workspace packages. -//! -//! It will be moved to the directory `packages`. -pub mod tracker_api_core; diff --git a/src/servers/apis/v1/context/stats/handlers.rs b/src/servers/apis/v1/context/stats/handlers.rs index cfd266c49..b8adfc3e3 100644 --- a/src/servers/apis/v1/context/stats/handlers.rs +++ b/src/servers/apis/v1/context/stats/handlers.rs @@ -9,9 +9,9 @@ use bittorrent_tracker_core::torrent::repository::in_memory::InMemoryTorrentRepo use bittorrent_udp_tracker_core::services::banning::BanService; use serde::Deserialize; use tokio::sync::RwLock; +use torrust_tracker_api_core::statistics::services::get_metrics; use super::responses::{metrics_response, stats_response}; -use crate::packages::tracker_api_core::statistics::services::get_metrics; #[derive(Deserialize, Debug, Default)] #[serde(rename_all = "lowercase")] diff --git a/src/servers/apis/v1/context/stats/resources.rs b/src/servers/apis/v1/context/stats/resources.rs index 8477ca5cb..11169f31e 100644 --- a/src/servers/apis/v1/context/stats/resources.rs +++ b/src/servers/apis/v1/context/stats/resources.rs @@ -1,8 +1,7 @@ //! API resources for the [`stats`](crate::servers::apis::v1::context::stats) //! API context. use serde::{Deserialize, Serialize}; - -use crate::packages::tracker_api_core::statistics::services::TrackerMetrics; +use torrust_tracker_api_core::statistics::services::TrackerMetrics; /// It contains all the statistics generated by the tracker. #[derive(Serialize, Deserialize, Debug, PartialEq, Eq)] @@ -118,12 +117,11 @@ impl From for Stats { #[cfg(test)] mod tests { - use packages::tracker_api_core::statistics::metrics::Metrics; + use torrust_tracker_api_core::statistics::metrics::Metrics; + use torrust_tracker_api_core::statistics::services::TrackerMetrics; use torrust_tracker_primitives::torrent_metrics::TorrentsMetrics; use super::Stats; - use crate::packages::tracker_api_core::statistics::services::TrackerMetrics; - use crate::packages::{self}; #[test] fn stats_resource_should_be_converted_from_tracker_metrics() { diff --git a/src/servers/apis/v1/context/stats/responses.rs b/src/servers/apis/v1/context/stats/responses.rs index 5a71c4235..0b4da778f 100644 --- a/src/servers/apis/v1/context/stats/responses.rs +++ b/src/servers/apis/v1/context/stats/responses.rs @@ -1,9 +1,9 @@ //! API responses for the [`stats`](crate::servers::apis::v1::context::stats) //! API context. use axum::response::{IntoResponse, Json, Response}; +use torrust_tracker_api_core::statistics::services::TrackerMetrics; use super::resources::Stats; -use crate::packages::tracker_api_core::statistics::services::TrackerMetrics; /// `200` response that contains the [`Stats`] resource as json. #[must_use] From 39d9706de3346c9c68a1e93d478688f2460e3216 Mon Sep 17 00:00:00 2001 From: Jose Celano Date: Wed, 19 Feb 2025 07:19:38 +0000 Subject: [PATCH 284/802] refactor: [#1224] rename package to bittorrent-http-tracker-protocol --- .github/workflows/deployment.yaml | 2 +- Cargo.lock | 36 +++++++++---------- Cargo.toml | 2 +- packages/http-protocol/Cargo.toml | 2 +- packages/http-protocol/README.md | 2 +- .../http-protocol/src/percent_encoding.rs | 4 +-- packages/http-protocol/src/v1/query.rs | 8 ++--- .../http-protocol/src/v1/requests/announce.rs | 2 +- .../src/v1/responses/announce.rs | 4 +-- .../http-protocol/src/v1/responses/error.rs | 2 +- .../http-protocol/src/v1/responses/scrape.rs | 2 +- .../src/v1/services/peer_ip_resolver.rs | 4 +-- packages/http-tracker-core/Cargo.toml | 2 +- .../src/services/announce.rs | 10 +++--- .../http-tracker-core/src/services/scrape.rs | 10 +++--- src/servers/http/mod.rs | 26 +++++++------- .../http/v1/extractors/announce_request.rs | 14 ++++---- .../http/v1/extractors/authentication_key.rs | 6 ++-- .../http/v1/extractors/client_ip_sources.rs | 2 +- .../http/v1/extractors/scrape_request.rs | 14 ++++---- src/servers/http/v1/handlers/announce.rs | 16 ++++----- src/servers/http/v1/handlers/common/auth.rs | 2 +- .../http/v1/handlers/common/peer_ip.rs | 6 ++-- src/servers/http/v1/handlers/scrape.rs | 16 ++++----- 24 files changed, 97 insertions(+), 97 deletions(-) diff --git a/.github/workflows/deployment.yaml b/.github/workflows/deployment.yaml index 5b39c7f2f..67c22dd78 100644 --- a/.github/workflows/deployment.yaml +++ b/.github/workflows/deployment.yaml @@ -55,8 +55,8 @@ jobs: env: CARGO_REGISTRY_TOKEN: "${{ secrets.TORRUST_UPDATE_CARGO_REGISTRY_TOKEN }}" run: | - cargo publish -p bittorrent-http-protocol cargo publish -p bittorrent-http-tracker-core + cargo publish -p bittorrent-http-tracker-protocol cargo publish -p bittorrent-tracker-client cargo publish -p bittorrent-tracker-core cargo publish -p bittorrent-udp-protocol diff --git a/Cargo.lock b/Cargo.lock index 63c51dc19..28d553519 100644 --- a/Cargo.lock +++ b/Cargo.lock @@ -548,40 +548,40 @@ source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "8f68f53c83ab957f72c32642f3868eec03eb974d1fb82e453128456482613d36" [[package]] -name = "bittorrent-http-protocol" +name = "bittorrent-http-tracker-core" version = "3.0.0-develop" dependencies = [ "aquatic_udp_protocol", + "bittorrent-http-tracker-protocol", "bittorrent-primitives", "bittorrent-tracker-core", - "derive_more", - "multimap", - "percent-encoding", - "serde", - "serde_bencode", - "thiserror 2.0.11", - "torrust-tracker-clock", + "futures", + "mockall", + "tokio", "torrust-tracker-configuration", - "torrust-tracker-contrib-bencode", - "torrust-tracker-located-error", "torrust-tracker-primitives", + "torrust-tracker-test-helpers", + "tracing", ] [[package]] -name = "bittorrent-http-tracker-core" +name = "bittorrent-http-tracker-protocol" version = "3.0.0-develop" dependencies = [ "aquatic_udp_protocol", - "bittorrent-http-protocol", "bittorrent-primitives", "bittorrent-tracker-core", - "futures", - "mockall", - "tokio", + "derive_more", + "multimap", + "percent-encoding", + "serde", + "serde_bencode", + "thiserror 2.0.11", + "torrust-tracker-clock", "torrust-tracker-configuration", + "torrust-tracker-contrib-bencode", + "torrust-tracker-located-error", "torrust-tracker-primitives", - "torrust-tracker-test-helpers", - "tracing", ] [[package]] @@ -4347,8 +4347,8 @@ dependencies = [ "axum-client-ip", "axum-extra", "axum-server", - "bittorrent-http-protocol", "bittorrent-http-tracker-core", + "bittorrent-http-tracker-protocol", "bittorrent-primitives", "bittorrent-tracker-client", "bittorrent-tracker-core", diff --git a/Cargo.toml b/Cargo.toml index d31dcf9c4..1fcd189da 100644 --- a/Cargo.toml +++ b/Cargo.toml @@ -39,7 +39,7 @@ axum = { version = "0", features = ["macros"] } axum-client-ip = "0" axum-extra = { version = "0", features = ["query"] } axum-server = { version = "0", features = ["tls-rustls-no-provider"] } -bittorrent-http-protocol = { version = "3.0.0-develop", path = "packages/http-protocol" } +bittorrent-http-tracker-protocol = { version = "3.0.0-develop", path = "packages/http-protocol" } bittorrent-http-tracker-core = { version = "3.0.0-develop", path = "packages/http-tracker-core" } bittorrent-primitives = "0.1.0" bittorrent-tracker-client = { version = "3.0.0-develop", path = "packages/tracker-client" } diff --git a/packages/http-protocol/Cargo.toml b/packages/http-protocol/Cargo.toml index e76094c1a..7445b37a1 100644 --- a/packages/http-protocol/Cargo.toml +++ b/packages/http-protocol/Cargo.toml @@ -1,7 +1,7 @@ [package] description = "A library with the primitive types and functions for the BitTorrent HTTP tracker protocol." keywords = ["api", "library", "primitives"] -name = "bittorrent-http-protocol" +name = "bittorrent-http-tracker-protocol" readme = "README.md" authors.workspace = true diff --git a/packages/http-protocol/README.md b/packages/http-protocol/README.md index 62de968d9..5f0a31a78 100644 --- a/packages/http-protocol/README.md +++ b/packages/http-protocol/README.md @@ -4,7 +4,7 @@ A library with the primitive types and functions used by BitTorrent HTTP tracker ## Documentation -[Crate documentation](https://docs.rs/bittorrent-http-protocol). +[Crate documentation](https://docs.rs/bittorrent-http-tracker-protocol). ## License diff --git a/packages/http-protocol/src/percent_encoding.rs b/packages/http-protocol/src/percent_encoding.rs index b54c89a04..e58bf94be 100644 --- a/packages/http-protocol/src/percent_encoding.rs +++ b/packages/http-protocol/src/percent_encoding.rs @@ -27,7 +27,7 @@ use torrust_tracker_primitives::peer; /// /// ```rust /// use std::str::FromStr; -/// use bittorrent_http_protocol::percent_encoding::percent_decode_info_hash; +/// use bittorrent_http_tracker_protocol::percent_encoding::percent_decode_info_hash; /// use bittorrent_primitives::info_hash::InfoHash; /// use torrust_tracker_primitives::peer; /// @@ -60,7 +60,7 @@ pub fn percent_decode_info_hash(raw_info_hash: &str) -> Result().unwrap(); /// @@ -73,7 +73,7 @@ impl Query { /// Returns all the param values as a vector even if it has only one value. /// /// ```rust - /// use bittorrent_http_protocol::v1::query::Query; + /// use bittorrent_http_tracker_protocol::v1::query::Query; /// /// let query = "param1=value1".parse::().unwrap(); /// diff --git a/packages/http-protocol/src/v1/requests/announce.rs b/packages/http-protocol/src/v1/requests/announce.rs index 66f7a1227..036aa3048 100644 --- a/packages/http-protocol/src/v1/requests/announce.rs +++ b/packages/http-protocol/src/v1/requests/announce.rs @@ -34,7 +34,7 @@ const NUMWANT: &str = "numwant"; /// /// ```rust /// use aquatic_udp_protocol::{NumberOfBytes, PeerId}; -/// use bittorrent_http_protocol::v1::requests::announce::{Announce, Compact, Event}; +/// use bittorrent_http_tracker_protocol::v1::requests::announce::{Announce, Compact, Event}; /// use bittorrent_primitives::info_hash::InfoHash; /// /// let request = Announce { diff --git a/packages/http-protocol/src/v1/responses/announce.rs b/packages/http-protocol/src/v1/responses/announce.rs index df187fdd1..7175b019a 100644 --- a/packages/http-protocol/src/v1/responses/announce.rs +++ b/packages/http-protocol/src/v1/responses/announce.rs @@ -132,7 +132,7 @@ impl Into> for Compact { /// /// ```rust /// use std::net::{IpAddr, Ipv4Addr}; -/// use bittorrent_http_protocol::v1::responses::announce::{Normal, NormalPeer}; +/// use bittorrent_http_tracker_protocol::v1::responses::announce::{Normal, NormalPeer}; /// /// let peer = NormalPeer { /// peer_id: *b"-qB00000000000000001", @@ -184,7 +184,7 @@ impl From<&NormalPeer> for BencodeMut<'_> { /// /// ```rust /// use std::net::{IpAddr, Ipv4Addr}; -/// use bittorrent_http_protocol::v1::responses::announce::{Compact, CompactPeer, CompactPeerData}; +/// use bittorrent_http_tracker_protocol::v1::responses::announce::{Compact, CompactPeer, CompactPeerData}; /// /// let peer = CompactPeer::V4(CompactPeerData { /// ip: Ipv4Addr::new(0x69, 0x69, 0x69, 0x69), // 105.105.105.105 diff --git a/packages/http-protocol/src/v1/responses/error.rs b/packages/http-protocol/src/v1/responses/error.rs index 30749f73a..8dc28e938 100644 --- a/packages/http-protocol/src/v1/responses/error.rs +++ b/packages/http-protocol/src/v1/responses/error.rs @@ -27,7 +27,7 @@ impl Error { /// Returns the bencoded representation of the `Error` struct. /// /// ```rust - /// use bittorrent_http_protocol::v1::responses::error::Error; + /// use bittorrent_http_tracker_protocol::v1::responses::error::Error; /// /// let err = Error { /// failure_reason: "error message".to_owned(), diff --git a/packages/http-protocol/src/v1/responses/scrape.rs b/packages/http-protocol/src/v1/responses/scrape.rs index 6b4dcc793..022735abc 100644 --- a/packages/http-protocol/src/v1/responses/scrape.rs +++ b/packages/http-protocol/src/v1/responses/scrape.rs @@ -9,7 +9,7 @@ use torrust_tracker_primitives::core::ScrapeData; /// The `Scrape` response for the HTTP tracker. /// /// ```rust -/// use bittorrent_http_protocol::v1::responses::scrape::Bencoded; +/// use bittorrent_http_tracker_protocol::v1::responses::scrape::Bencoded; /// use bittorrent_primitives::info_hash::InfoHash; /// use torrust_tracker_primitives::swarm_metadata::SwarmMetadata; /// use torrust_tracker_primitives::core::ScrapeData; diff --git a/packages/http-protocol/src/v1/services/peer_ip_resolver.rs b/packages/http-protocol/src/v1/services/peer_ip_resolver.rs index f0ad6a83e..8e99b56d1 100644 --- a/packages/http-protocol/src/v1/services/peer_ip_resolver.rs +++ b/packages/http-protocol/src/v1/services/peer_ip_resolver.rs @@ -63,7 +63,7 @@ pub enum PeerIpResolutionError { /// use std::net::IpAddr; /// use std::str::FromStr; /// -/// use bittorrent_http_protocol::v1::services::peer_ip_resolver::{invoke, ClientIpSources, PeerIpResolutionError}; +/// use bittorrent_http_tracker_protocol::v1::services::peer_ip_resolver::{invoke, ClientIpSources, PeerIpResolutionError}; /// /// let on_reverse_proxy = true; /// @@ -85,7 +85,7 @@ pub enum PeerIpResolutionError { /// use std::net::IpAddr; /// use std::str::FromStr; /// -/// use bittorrent_http_protocol::v1::services::peer_ip_resolver::{invoke, ClientIpSources, PeerIpResolutionError}; +/// use bittorrent_http_tracker_protocol::v1::services::peer_ip_resolver::{invoke, ClientIpSources, PeerIpResolutionError}; /// /// let on_reverse_proxy = false; /// diff --git a/packages/http-tracker-core/Cargo.toml b/packages/http-tracker-core/Cargo.toml index a1ee18f66..bc6a3d1b3 100644 --- a/packages/http-tracker-core/Cargo.toml +++ b/packages/http-tracker-core/Cargo.toml @@ -15,7 +15,7 @@ version.workspace = true [dependencies] aquatic_udp_protocol = "0" -bittorrent-http-protocol = { version = "3.0.0-develop", path = "../http-protocol" } +bittorrent-http-tracker-protocol = { version = "3.0.0-develop", path = "../http-protocol" } bittorrent-primitives = "0.1.0" bittorrent-tracker-core = { version = "3.0.0-develop", path = "../tracker-core" } futures = "0" diff --git a/packages/http-tracker-core/src/services/announce.rs b/packages/http-tracker-core/src/services/announce.rs index aff1fc1bd..ce34ee31c 100644 --- a/packages/http-tracker-core/src/services/announce.rs +++ b/packages/http-tracker-core/src/services/announce.rs @@ -11,9 +11,9 @@ use std::net::IpAddr; use std::panic::Location; use std::sync::Arc; -use bittorrent_http_protocol::v1::requests::announce::{peer_from_request, Announce}; -use bittorrent_http_protocol::v1::responses; -use bittorrent_http_protocol::v1::services::peer_ip_resolver::{self, ClientIpSources}; +use bittorrent_http_tracker_protocol::v1::requests::announce::{peer_from_request, Announce}; +use bittorrent_http_tracker_protocol::v1::responses; +use bittorrent_http_tracker_protocol::v1::services::peer_ip_resolver::{self, ClientIpSources}; use bittorrent_tracker_core::announce_handler::{AnnounceHandler, PeersWanted}; use bittorrent_tracker_core::authentication::service::AuthenticationService; use bittorrent_tracker_core::authentication::{self, Key}; @@ -115,8 +115,8 @@ mod tests { use std::sync::Arc; use aquatic_udp_protocol::{AnnounceEvent, NumberOfBytes, PeerId}; - use bittorrent_http_protocol::v1::requests::announce::Announce; - use bittorrent_http_protocol::v1::services::peer_ip_resolver::ClientIpSources; + use bittorrent_http_tracker_protocol::v1::requests::announce::Announce; + use bittorrent_http_tracker_protocol::v1::services::peer_ip_resolver::ClientIpSources; use bittorrent_tracker_core::announce_handler::AnnounceHandler; use bittorrent_tracker_core::authentication::key::repository::in_memory::InMemoryKeyRepository; use bittorrent_tracker_core::authentication::service::AuthenticationService; diff --git a/packages/http-tracker-core/src/services/scrape.rs b/packages/http-tracker-core/src/services/scrape.rs index 11011f16b..686a849ea 100644 --- a/packages/http-tracker-core/src/services/scrape.rs +++ b/packages/http-tracker-core/src/services/scrape.rs @@ -10,9 +10,9 @@ use std::net::IpAddr; use std::sync::Arc; -use bittorrent_http_protocol::v1::requests::scrape::Scrape; -use bittorrent_http_protocol::v1::responses; -use bittorrent_http_protocol::v1::services::peer_ip_resolver::{self, ClientIpSources}; +use bittorrent_http_tracker_protocol::v1::requests::scrape::Scrape; +use bittorrent_http_tracker_protocol::v1::responses; +use bittorrent_http_tracker_protocol::v1::services::peer_ip_resolver::{self, ClientIpSources}; use bittorrent_primitives::info_hash::InfoHash; use bittorrent_tracker_core::authentication::service::AuthenticationService; use bittorrent_tracker_core::authentication::Key; @@ -203,8 +203,8 @@ mod tests { use std::net::{IpAddr, Ipv4Addr, Ipv6Addr}; use std::sync::Arc; - use bittorrent_http_protocol::v1::requests::scrape::Scrape; - use bittorrent_http_protocol::v1::services::peer_ip_resolver::ClientIpSources; + use bittorrent_http_tracker_protocol::v1::requests::scrape::Scrape; + use bittorrent_http_tracker_protocol::v1::services::peer_ip_resolver::ClientIpSources; use bittorrent_tracker_core::announce_handler::PeersWanted; use mockall::predicate::eq; use torrust_tracker_primitives::core::ScrapeData; diff --git a/src/servers/http/mod.rs b/src/servers/http/mod.rs index 6bc93992f..395f633cf 100644 --- a/src/servers/http/mod.rs +++ b/src/servers/http/mod.rs @@ -43,18 +43,18 @@ //! //! Parameter | Type | Description | Required | Default | Example //! ---|---|---|---|---|--- -//! [`info_hash`](bittorrent_http_protocol::v1::requests::announce::Announce::info_hash) | percent encoded of 20-byte array | The `Info Hash` of the torrent. | Yes | No | `%81%00%00%00%00%00%00%00%00%00%00%00%00%00%00%00%00%00%00%00` +//! [`info_hash`](bittorrent_http_tracker_protocol::v1::requests::announce::Announce::info_hash) | percent encoded of 20-byte array | The `Info Hash` of the torrent. | Yes | No | `%81%00%00%00%00%00%00%00%00%00%00%00%00%00%00%00%00%00%00%00` //! `peer_addr` | string |The IP address of the peer. | No | No | `2.137.87.41` -//! [`downloaded`](bittorrent_http_protocol::v1::requests::announce::Announce::downloaded) | positive integer |The number of bytes downloaded by the peer. | No | `0` | `0` -//! [`uploaded`](bittorrent_http_protocol::v1::requests::announce::Announce::uploaded) | positive integer | The number of bytes uploaded by the peer. | No | `0` | `0` -//! [`peer_id`](bittorrent_http_protocol::v1::requests::announce::Announce::peer_id) | percent encoded of 20-byte array | The ID of the peer. | Yes | No | `-qB00000000000000001` -//! [`port`](bittorrent_http_protocol::v1::requests::announce::Announce::port) | positive integer | The port used by the peer. | Yes | No | `17548` -//! [`left`](bittorrent_http_protocol::v1::requests::announce::Announce::left) | positive integer | The number of bytes pending to download. | No | `0` | `0` -//! [`event`](bittorrent_http_protocol::v1::requests::announce::Announce::event) | positive integer | The event that triggered the `Announce` request: `started`, `completed`, `stopped` | No | `None` | `completed` -//! [`compact`](bittorrent_http_protocol::v1::requests::announce::Announce::compact) | `0` or `1` | Whether the tracker should return a compact peer list. | No | `None` | `0` +//! [`downloaded`](bittorrent_http_tracker_protocol::v1::requests::announce::Announce::downloaded) | positive integer |The number of bytes downloaded by the peer. | No | `0` | `0` +//! [`uploaded`](bittorrent_http_tracker_protocol::v1::requests::announce::Announce::uploaded) | positive integer | The number of bytes uploaded by the peer. | No | `0` | `0` +//! [`peer_id`](bittorrent_http_tracker_protocol::v1::requests::announce::Announce::peer_id) | percent encoded of 20-byte array | The ID of the peer. | Yes | No | `-qB00000000000000001` +//! [`port`](bittorrent_http_tracker_protocol::v1::requests::announce::Announce::port) | positive integer | The port used by the peer. | Yes | No | `17548` +//! [`left`](bittorrent_http_tracker_protocol::v1::requests::announce::Announce::left) | positive integer | The number of bytes pending to download. | No | `0` | `0` +//! [`event`](bittorrent_http_tracker_protocol::v1::requests::announce::Announce::event) | positive integer | The event that triggered the `Announce` request: `started`, `completed`, `stopped` | No | `None` | `completed` +//! [`compact`](bittorrent_http_tracker_protocol::v1::requests::announce::Announce::compact) | `0` or `1` | Whether the tracker should return a compact peer list. | No | `None` | `0` //! `numwant` | positive integer | **Not implemented**. The maximum number of peers you want in the reply. | No | `50` | `50` //! -//! Refer to the [`Announce`](bittorrent_http_protocol::v1::requests::announce::Announce) +//! Refer to the [`Announce`](bittorrent_http_tracker_protocol::v1::requests::announce::Announce) //! request for more information about the parameters. //! //! > **NOTICE**: the [BEP 03](https://www.bittorrent.org/beps/bep_0003.html) @@ -152,7 +152,7 @@ //! 000000f0: 65 e //! ``` //! -//! Refer to the [`Normal`](bittorrent_http_protocol::v1::responses::announce::Normal), i.e. `Non-Compact` +//! Refer to the [`Normal`](bittorrent_http_tracker_protocol::v1::responses::announce::Normal), i.e. `Non-Compact` //! response for more information about the response. //! //! **Sample compact response** @@ -190,7 +190,7 @@ //! 0000070: 7065 pe //! ``` //! -//! Refer to the [`Compact`](bittorrent_http_protocol::v1::responses::announce::Compact) +//! Refer to the [`Compact`](bittorrent_http_tracker_protocol::v1::responses::announce::Compact) //! response for more information about the response. //! //! **Protocol** @@ -220,12 +220,12 @@ //! //! Parameter | Type | Description | Required | Default | Example //! ---|---|---|---|---|--- -//! [`info_hash`](bittorrent_http_protocol::v1::requests::scrape::Scrape::info_hashes) | percent encoded of 20-byte array | The `Info Hash` of the torrent. | Yes | No | `%81%00%00%00%00%00%00%00%00%00%00%00%00%00%00%00%00%00%00%00` +//! [`info_hash`](bittorrent_http_tracker_protocol::v1::requests::scrape::Scrape::info_hashes) | percent encoded of 20-byte array | The `Info Hash` of the torrent. | Yes | No | `%81%00%00%00%00%00%00%00%00%00%00%00%00%00%00%00%00%00%00%00` //! //! > **NOTICE**: you can scrape multiple torrents at the same time by passing //! > multiple `info_hash` parameters. //! -//! Refer to the [`Scrape`](bittorrent_http_protocol::v1::requests::scrape::Scrape) +//! Refer to the [`Scrape`](bittorrent_http_tracker_protocol::v1::requests::scrape::Scrape) //! request for more information about the parameters. //! //! **Sample scrape URL** diff --git a/src/servers/http/v1/extractors/announce_request.rs b/src/servers/http/v1/extractors/announce_request.rs index 74c9ab8c1..3265d04cd 100644 --- a/src/servers/http/v1/extractors/announce_request.rs +++ b/src/servers/http/v1/extractors/announce_request.rs @@ -4,10 +4,10 @@ //! It parses the query parameters returning an [`Announce`] //! request. //! -//! Refer to [`Announce`](bittorrent_http_protocol::v1::requests::announce) for more +//! Refer to [`Announce`](bittorrent_http_tracker_protocol::v1::requests::announce) for more //! information about the returned structure. //! -//! It returns a bencoded [`Error`](bittorrent_http_protocol::v1::responses::error) +//! It returns a bencoded [`Error`](bittorrent_http_tracker_protocol::v1::responses::error) //! response (`500`) if the query parameters are missing or invalid. //! //! **Sample announce request** @@ -33,9 +33,9 @@ use std::panic::Location; use axum::extract::FromRequestParts; use axum::http::request::Parts; use axum::response::{IntoResponse, Response}; -use bittorrent_http_protocol::v1::query::Query; -use bittorrent_http_protocol::v1::requests::announce::{Announce, ParseAnnounceQueryError}; -use bittorrent_http_protocol::v1::responses; +use bittorrent_http_tracker_protocol::v1::query::Query; +use bittorrent_http_tracker_protocol::v1::requests::announce::{Announce, ParseAnnounceQueryError}; +use bittorrent_http_tracker_protocol::v1::responses; use futures::FutureExt; use hyper::StatusCode; @@ -87,8 +87,8 @@ mod tests { use std::str::FromStr; use aquatic_udp_protocol::{NumberOfBytes, PeerId}; - use bittorrent_http_protocol::v1::requests::announce::{Announce, Compact, Event}; - use bittorrent_http_protocol::v1::responses::error::Error; + use bittorrent_http_tracker_protocol::v1::requests::announce::{Announce, Compact, Event}; + use bittorrent_http_tracker_protocol::v1::responses::error::Error; use bittorrent_primitives::info_hash::InfoHash; use super::extract_announce_from; diff --git a/src/servers/http/v1/extractors/authentication_key.rs b/src/servers/http/v1/extractors/authentication_key.rs index c99c7000a..da4cd2217 100644 --- a/src/servers/http/v1/extractors/authentication_key.rs +++ b/src/servers/http/v1/extractors/authentication_key.rs @@ -9,7 +9,7 @@ //! It's a wrapper for Axum `Path` extractor in order to return custom //! authentication errors. //! -//! It returns a bencoded [`Error`](bittorrent_http_protocol::v1::responses::error) +//! It returns a bencoded [`Error`](bittorrent_http_tracker_protocol::v1::responses::error) //! response (`500`) if the `key` parameter are missing or invalid. //! //! **Sample authentication error responses** @@ -49,7 +49,7 @@ use axum::extract::rejection::PathRejection; use axum::extract::{FromRequestParts, Path}; use axum::http::request::Parts; use axum::response::{IntoResponse, Response}; -use bittorrent_http_protocol::v1::responses; +use bittorrent_http_tracker_protocol::v1::responses; use bittorrent_tracker_core::authentication::Key; use hyper::StatusCode; use serde::Deserialize; @@ -126,7 +126,7 @@ fn custom_error(rejection: &PathRejection) -> responses::error::Error { #[cfg(test)] mod tests { - use bittorrent_http_protocol::v1::responses::error::Error; + use bittorrent_http_tracker_protocol::v1::responses::error::Error; use super::parse_key; diff --git a/src/servers/http/v1/extractors/client_ip_sources.rs b/src/servers/http/v1/extractors/client_ip_sources.rs index 02265554e..8c7a2bf40 100644 --- a/src/servers/http/v1/extractors/client_ip_sources.rs +++ b/src/servers/http/v1/extractors/client_ip_sources.rs @@ -42,7 +42,7 @@ use axum::extract::{ConnectInfo, FromRequestParts}; use axum::http::request::Parts; use axum::response::Response; use axum_client_ip::RightmostXForwardedFor; -use bittorrent_http_protocol::v1::services::peer_ip_resolver::ClientIpSources; +use bittorrent_http_tracker_protocol::v1::services::peer_ip_resolver::ClientIpSources; /// Extractor for the [`ClientIpSources`] /// struct. diff --git a/src/servers/http/v1/extractors/scrape_request.rs b/src/servers/http/v1/extractors/scrape_request.rs index bacd36169..66442da95 100644 --- a/src/servers/http/v1/extractors/scrape_request.rs +++ b/src/servers/http/v1/extractors/scrape_request.rs @@ -4,10 +4,10 @@ //! It parses the query parameters returning an [`Scrape`] //! request. //! -//! Refer to [`Scrape`](bittorrent_http_protocol::v1::requests::scrape) for more +//! Refer to [`Scrape`](bittorrent_http_tracker_protocol::v1::requests::scrape) for more //! information about the returned structure. //! -//! It returns a bencoded [`Error`](bittorrent_http_protocol::v1::responses::error) +//! It returns a bencoded [`Error`](bittorrent_http_tracker_protocol::v1::responses::error) //! response (`500`) if the query parameters are missing or invalid. //! //! **Sample scrape request** @@ -33,9 +33,9 @@ use std::panic::Location; use axum::extract::FromRequestParts; use axum::http::request::Parts; use axum::response::{IntoResponse, Response}; -use bittorrent_http_protocol::v1::query::Query; -use bittorrent_http_protocol::v1::requests::scrape::{ParseScrapeQueryError, Scrape}; -use bittorrent_http_protocol::v1::responses; +use bittorrent_http_tracker_protocol::v1::query::Query; +use bittorrent_http_tracker_protocol::v1::requests::scrape::{ParseScrapeQueryError, Scrape}; +use bittorrent_http_tracker_protocol::v1::responses; use futures::FutureExt; use hyper::StatusCode; @@ -86,8 +86,8 @@ fn extract_scrape_from(maybe_raw_query: Option<&str>) -> Result Date: Wed, 19 Feb 2025 07:25:35 +0000 Subject: [PATCH 285/802] refactor: [#1224] rename package to bittorrent-udp-tracker-protocol --- .github/workflows/deployment.yaml | 2 +- Cargo.lock | 20 +++++++++---------- packages/udp-protocol/Cargo.toml | 2 +- packages/udp-tracker-core/Cargo.toml | 2 +- .../udp-tracker-core/src/services/announce.rs | 2 +- 5 files changed, 14 insertions(+), 14 deletions(-) diff --git a/.github/workflows/deployment.yaml b/.github/workflows/deployment.yaml index 67c22dd78..0e38f5cfe 100644 --- a/.github/workflows/deployment.yaml +++ b/.github/workflows/deployment.yaml @@ -59,8 +59,8 @@ jobs: cargo publish -p bittorrent-http-tracker-protocol cargo publish -p bittorrent-tracker-client cargo publish -p bittorrent-tracker-core - cargo publish -p bittorrent-udp-protocol cargo publish -p bittorrent-udp-tracker-core + cargo publish -p bittorrent-udp-tracker-protocol cargo publish -p torrust-tracker cargo publish -p torrust-tracker-api-client cargo publish -p torrust-tracker-api-core diff --git a/Cargo.lock b/Cargo.lock index 28d553519..788d60d68 100644 --- a/Cargo.lock +++ b/Cargo.lock @@ -651,15 +651,6 @@ dependencies = [ "url", ] -[[package]] -name = "bittorrent-udp-protocol" -version = "3.0.0-develop" -dependencies = [ - "aquatic_udp_protocol", - "torrust-tracker-clock", - "torrust-tracker-primitives", -] - [[package]] name = "bittorrent-udp-tracker-core" version = "3.0.0-develop" @@ -667,7 +658,7 @@ dependencies = [ "aquatic_udp_protocol", "bittorrent-primitives", "bittorrent-tracker-core", - "bittorrent-udp-protocol", + "bittorrent-udp-tracker-protocol", "bloom", "blowfish", "cipher", @@ -684,6 +675,15 @@ dependencies = [ "zerocopy 0.7.35", ] +[[package]] +name = "bittorrent-udp-tracker-protocol" +version = "3.0.0-develop" +dependencies = [ + "aquatic_udp_protocol", + "torrust-tracker-clock", + "torrust-tracker-primitives", +] + [[package]] name = "bitvec" version = "1.0.1" diff --git a/packages/udp-protocol/Cargo.toml b/packages/udp-protocol/Cargo.toml index 8f0f9fe98..31fd52af8 100644 --- a/packages/udp-protocol/Cargo.toml +++ b/packages/udp-protocol/Cargo.toml @@ -1,7 +1,7 @@ [package] description = "A library with the primitive types and functions for the BitTorrent UDP tracker protocol." keywords = ["bittorrent", "library", "primitives", "udp"] -name = "bittorrent-udp-protocol" +name = "bittorrent-udp-tracker-protocol" readme = "README.md" authors.workspace = true diff --git a/packages/udp-tracker-core/Cargo.toml b/packages/udp-tracker-core/Cargo.toml index bfa840cc3..5f7622032 100644 --- a/packages/udp-tracker-core/Cargo.toml +++ b/packages/udp-tracker-core/Cargo.toml @@ -17,7 +17,7 @@ version.workspace = true aquatic_udp_protocol = "0" bittorrent-primitives = "0.1.0" bittorrent-tracker-core = { version = "3.0.0-develop", path = "../tracker-core" } -bittorrent-udp-protocol = { version = "3.0.0-develop", path = "../udp-protocol" } +bittorrent-udp-tracker-protocol = { version = "3.0.0-develop", path = "../udp-protocol" } bloom = "0.3.2" blowfish = "0" cipher = "0" diff --git a/packages/udp-tracker-core/src/services/announce.rs b/packages/udp-tracker-core/src/services/announce.rs index be47b9136..b40162283 100644 --- a/packages/udp-tracker-core/src/services/announce.rs +++ b/packages/udp-tracker-core/src/services/announce.rs @@ -16,7 +16,7 @@ use bittorrent_primitives::info_hash::InfoHash; use bittorrent_tracker_core::announce_handler::{AnnounceHandler, PeersWanted}; use bittorrent_tracker_core::error::{AnnounceError, WhitelistError}; use bittorrent_tracker_core::whitelist; -use bittorrent_udp_protocol::peer_builder; +use bittorrent_udp_tracker_protocol::peer_builder; use torrust_tracker_primitives::core::AnnounceData; use torrust_tracker_primitives::peer; From 3daf3f175b82556027641ca2f2f46e3a7a9af756 Mon Sep 17 00:00:00 2001 From: Jose Celano Date: Wed, 19 Feb 2025 08:51:01 +0000 Subject: [PATCH 286/802] refactor: [#1294] extract axum-server package --- .github/workflows/deployment.yaml | 1 + Cargo.lock | 16 + Cargo.toml | 3 +- packages/axum-server/Cargo.toml | 26 + packages/axum-server/LICENSE | 661 ++++++++++++++++++ packages/axum-server/README.md | 11 + .../axum-server/src}/custom_axum_server.rs | 0 packages/axum-server/src/lib.rs | 1 + src/servers/apis/server.rs | 2 +- src/servers/http/server.rs | 2 +- src/servers/mod.rs | 1 - 11 files changed, 720 insertions(+), 4 deletions(-) create mode 100644 packages/axum-server/Cargo.toml create mode 100644 packages/axum-server/LICENSE create mode 100644 packages/axum-server/README.md rename {src/servers => packages/axum-server/src}/custom_axum_server.rs (100%) create mode 100644 packages/axum-server/src/lib.rs diff --git a/.github/workflows/deployment.yaml b/.github/workflows/deployment.yaml index 0e38f5cfe..7ced8af44 100644 --- a/.github/workflows/deployment.yaml +++ b/.github/workflows/deployment.yaml @@ -61,6 +61,7 @@ jobs: cargo publish -p bittorrent-tracker-core cargo publish -p bittorrent-udp-tracker-core cargo publish -p bittorrent-udp-tracker-protocol + cargo publish -p torrust-axum-server cargo publish -p torrust-tracker cargo publish -p torrust-tracker-api-client cargo publish -p torrust-tracker-api-core diff --git a/Cargo.lock b/Cargo.lock index 788d60d68..d1e1ac7b0 100644 --- a/Cargo.lock +++ b/Cargo.lock @@ -4337,6 +4337,20 @@ dependencies = [ "winnow", ] +[[package]] +name = "torrust-axum-server" +version = "3.0.0-develop" +dependencies = [ + "axum-server", + "futures-util", + "http-body", + "hyper", + "hyper-util", + "pin-project-lite", + "tokio", + "tower 0.4.13", +] + [[package]] name = "torrust-tracker" version = "3.0.0-develop" @@ -4386,6 +4400,7 @@ dependencies = [ "serde_with", "thiserror 2.0.11", "tokio", + "torrust-axum-server", "torrust-tracker-api-client", "torrust-tracker-api-core", "torrust-tracker-clock", @@ -4549,6 +4564,7 @@ dependencies = [ "futures-util", "pin-project", "pin-project-lite", + "tokio", "tower-layer", "tower-service", "tracing", diff --git a/Cargo.toml b/Cargo.toml index 1fcd189da..4283baab5 100644 --- a/Cargo.toml +++ b/Cargo.toml @@ -39,8 +39,8 @@ axum = { version = "0", features = ["macros"] } axum-client-ip = "0" axum-extra = { version = "0", features = ["query"] } axum-server = { version = "0", features = ["tls-rustls-no-provider"] } -bittorrent-http-tracker-protocol = { version = "3.0.0-develop", path = "packages/http-protocol" } bittorrent-http-tracker-core = { version = "3.0.0-develop", path = "packages/http-tracker-core" } +bittorrent-http-tracker-protocol = { version = "3.0.0-develop", path = "packages/http-protocol" } bittorrent-primitives = "0.1.0" bittorrent-tracker-client = { version = "3.0.0-develop", path = "packages/tracker-client" } bittorrent-tracker-core = { version = "3.0.0-develop", path = "packages/tracker-core" } @@ -76,6 +76,7 @@ serde_repr = "0" serde_with = { version = "3", features = ["json"] } thiserror = "2" tokio = { version = "1", features = ["macros", "net", "rt-multi-thread", "signal", "sync"] } +torrust-axum-server = { version = "3.0.0-develop", path = "packages/axum-server" } torrust-tracker-api-core = { version = "3.0.0-develop", path = "packages/tracker-api-core" } torrust-tracker-clock = { version = "3.0.0-develop", path = "packages/clock" } torrust-tracker-configuration = { version = "3.0.0-develop", path = "packages/configuration" } diff --git a/packages/axum-server/Cargo.toml b/packages/axum-server/Cargo.toml new file mode 100644 index 000000000..8a4b76998 --- /dev/null +++ b/packages/axum-server/Cargo.toml @@ -0,0 +1,26 @@ +[package] +authors.workspace = true +description = "A wrapper for the Axum server for Torrust HTTP servers to add timeouts." +documentation.workspace = true +edition.workspace = true +homepage.workspace = true +keywords = ["axum", "server", "torrust", "torrust", "wrapper"] +license.workspace = true +name = "torrust-axum-server" +publish.workspace = true +readme = "README.md" +repository.workspace = true +rust-version.workspace = true +version.workspace = true + +[dependencies] +axum-server = { version = "0", features = ["tls-rustls-no-provider"] } +futures-util = "0" +http-body = "1" +hyper = "1" +hyper-util = { version = "0", features = ["http1", "http2", "tokio"] } +pin-project-lite = "0" +tokio = { version = "1", features = ["macros", "net", "rt-multi-thread", "signal", "sync"] } +tower = { version = "0", features = ["timeout"] } + +[dev-dependencies] diff --git a/packages/axum-server/LICENSE b/packages/axum-server/LICENSE new file mode 100644 index 000000000..0ad25db4b --- /dev/null +++ b/packages/axum-server/LICENSE @@ -0,0 +1,661 @@ + GNU AFFERO GENERAL PUBLIC LICENSE + Version 3, 19 November 2007 + + Copyright (C) 2007 Free Software Foundation, Inc. + Everyone is permitted to copy and distribute verbatim copies + of this license document, but changing it is not allowed. + + Preamble + + The GNU Affero General Public License is a free, copyleft license for +software and other kinds of works, specifically designed to ensure +cooperation with the community in the case of network server software. + + The licenses for most software and other practical works are designed +to take away your freedom to share and change the works. By contrast, +our General Public Licenses are intended to guarantee your freedom to +share and change all versions of a program--to make sure it remains free +software for all its users. + + When we speak of free software, we are referring to freedom, not +price. Our General Public Licenses are designed to make sure that you +have the freedom to distribute copies of free software (and charge for +them if you wish), that you receive source code or can get it if you +want it, that you can change the software or use pieces of it in new +free programs, and that you know you can do these things. + + Developers that use our General Public Licenses protect your rights +with two steps: (1) assert copyright on the software, and (2) offer +you this License which gives you legal permission to copy, distribute +and/or modify the software. + + A secondary benefit of defending all users' freedom is that +improvements made in alternate versions of the program, if they +receive widespread use, become available for other developers to +incorporate. Many developers of free software are heartened and +encouraged by the resulting cooperation. However, in the case of +software used on network servers, this result may fail to come about. +The GNU General Public License permits making a modified version and +letting the public access it on a server without ever releasing its +source code to the public. + + The GNU Affero General Public License is designed specifically to +ensure that, in such cases, the modified source code becomes available +to the community. It requires the operator of a network server to +provide the source code of the modified version running there to the +users of that server. Therefore, public use of a modified version, on +a publicly accessible server, gives the public access to the source +code of the modified version. + + An older license, called the Affero General Public License and +published by Affero, was designed to accomplish similar goals. This is +a different license, not a version of the Affero GPL, but Affero has +released a new version of the Affero GPL which permits relicensing under +this license. + + The precise terms and conditions for copying, distribution and +modification follow. + + TERMS AND CONDITIONS + + 0. Definitions. + + "This License" refers to version 3 of the GNU Affero General Public License. + + "Copyright" also means copyright-like laws that apply to other kinds of +works, such as semiconductor masks. + + "The Program" refers to any copyrightable work licensed under this +License. Each licensee is addressed as "you". "Licensees" and +"recipients" may be individuals or organizations. + + To "modify" a work means to copy from or adapt all or part of the work +in a fashion requiring copyright permission, other than the making of an +exact copy. The resulting work is called a "modified version" of the +earlier work or a work "based on" the earlier work. + + A "covered work" means either the unmodified Program or a work based +on the Program. + + To "propagate" a work means to do anything with it that, without +permission, would make you directly or secondarily liable for +infringement under applicable copyright law, except executing it on a +computer or modifying a private copy. Propagation includes copying, +distribution (with or without modification), making available to the +public, and in some countries other activities as well. + + To "convey" a work means any kind of propagation that enables other +parties to make or receive copies. Mere interaction with a user through +a computer network, with no transfer of a copy, is not conveying. + + An interactive user interface displays "Appropriate Legal Notices" +to the extent that it includes a convenient and prominently visible +feature that (1) displays an appropriate copyright notice, and (2) +tells the user that there is no warranty for the work (except to the +extent that warranties are provided), that licensees may convey the +work under this License, and how to view a copy of this License. If +the interface presents a list of user commands or options, such as a +menu, a prominent item in the list meets this criterion. + + 1. Source Code. + + The "source code" for a work means the preferred form of the work +for making modifications to it. "Object code" means any non-source +form of a work. + + A "Standard Interface" means an interface that either is an official +standard defined by a recognized standards body, or, in the case of +interfaces specified for a particular programming language, one that +is widely used among developers working in that language. + + The "System Libraries" of an executable work include anything, other +than the work as a whole, that (a) is included in the normal form of +packaging a Major Component, but which is not part of that Major +Component, and (b) serves only to enable use of the work with that +Major Component, or to implement a Standard Interface for which an +implementation is available to the public in source code form. A +"Major Component", in this context, means a major essential component +(kernel, window system, and so on) of the specific operating system +(if any) on which the executable work runs, or a compiler used to +produce the work, or an object code interpreter used to run it. + + The "Corresponding Source" for a work in object code form means all +the source code needed to generate, install, and (for an executable +work) run the object code and to modify the work, including scripts to +control those activities. However, it does not include the work's +System Libraries, or general-purpose tools or generally available free +programs which are used unmodified in performing those activities but +which are not part of the work. For example, Corresponding Source +includes interface definition files associated with source files for +the work, and the source code for shared libraries and dynamically +linked subprograms that the work is specifically designed to require, +such as by intimate data communication or control flow between those +subprograms and other parts of the work. + + The Corresponding Source need not include anything that users +can regenerate automatically from other parts of the Corresponding +Source. + + The Corresponding Source for a work in source code form is that +same work. + + 2. Basic Permissions. + + All rights granted under this License are granted for the term of +copyright on the Program, and are irrevocable provided the stated +conditions are met. This License explicitly affirms your unlimited +permission to run the unmodified Program. The output from running a +covered work is covered by this License only if the output, given its +content, constitutes a covered work. This License acknowledges your +rights of fair use or other equivalent, as provided by copyright law. + + You may make, run and propagate covered works that you do not +convey, without conditions so long as your license otherwise remains +in force. You may convey covered works to others for the sole purpose +of having them make modifications exclusively for you, or provide you +with facilities for running those works, provided that you comply with +the terms of this License in conveying all material for which you do +not control copyright. Those thus making or running the covered works +for you must do so exclusively on your behalf, under your direction +and control, on terms that prohibit them from making any copies of +your copyrighted material outside their relationship with you. + + Conveying under any other circumstances is permitted solely under +the conditions stated below. Sublicensing is not allowed; section 10 +makes it unnecessary. + + 3. Protecting Users' Legal Rights From Anti-Circumvention Law. + + No covered work shall be deemed part of an effective technological +measure under any applicable law fulfilling obligations under article +11 of the WIPO copyright treaty adopted on 20 December 1996, or +similar laws prohibiting or restricting circumvention of such +measures. + + When you convey a covered work, you waive any legal power to forbid +circumvention of technological measures to the extent such circumvention +is effected by exercising rights under this License with respect to +the covered work, and you disclaim any intention to limit operation or +modification of the work as a means of enforcing, against the work's +users, your or third parties' legal rights to forbid circumvention of +technological measures. + + 4. Conveying Verbatim Copies. + + You may convey verbatim copies of the Program's source code as you +receive it, in any medium, provided that you conspicuously and +appropriately publish on each copy an appropriate copyright notice; +keep intact all notices stating that this License and any +non-permissive terms added in accord with section 7 apply to the code; +keep intact all notices of the absence of any warranty; and give all +recipients a copy of this License along with the Program. + + You may charge any price or no price for each copy that you convey, +and you may offer support or warranty protection for a fee. + + 5. Conveying Modified Source Versions. + + You may convey a work based on the Program, or the modifications to +produce it from the Program, in the form of source code under the +terms of section 4, provided that you also meet all of these conditions: + + a) The work must carry prominent notices stating that you modified + it, and giving a relevant date. + + b) The work must carry prominent notices stating that it is + released under this License and any conditions added under section + 7. This requirement modifies the requirement in section 4 to + "keep intact all notices". + + c) You must license the entire work, as a whole, under this + License to anyone who comes into possession of a copy. This + License will therefore apply, along with any applicable section 7 + additional terms, to the whole of the work, and all its parts, + regardless of how they are packaged. This License gives no + permission to license the work in any other way, but it does not + invalidate such permission if you have separately received it. + + d) If the work has interactive user interfaces, each must display + Appropriate Legal Notices; however, if the Program has interactive + interfaces that do not display Appropriate Legal Notices, your + work need not make them do so. + + A compilation of a covered work with other separate and independent +works, which are not by their nature extensions of the covered work, +and which are not combined with it such as to form a larger program, +in or on a volume of a storage or distribution medium, is called an +"aggregate" if the compilation and its resulting copyright are not +used to limit the access or legal rights of the compilation's users +beyond what the individual works permit. Inclusion of a covered work +in an aggregate does not cause this License to apply to the other +parts of the aggregate. + + 6. Conveying Non-Source Forms. + + You may convey a covered work in object code form under the terms +of sections 4 and 5, provided that you also convey the +machine-readable Corresponding Source under the terms of this License, +in one of these ways: + + a) Convey the object code in, or embodied in, a physical product + (including a physical distribution medium), accompanied by the + Corresponding Source fixed on a durable physical medium + customarily used for software interchange. + + b) Convey the object code in, or embodied in, a physical product + (including a physical distribution medium), accompanied by a + written offer, valid for at least three years and valid for as + long as you offer spare parts or customer support for that product + model, to give anyone who possesses the object code either (1) a + copy of the Corresponding Source for all the software in the + product that is covered by this License, on a durable physical + medium customarily used for software interchange, for a price no + more than your reasonable cost of physically performing this + conveying of source, or (2) access to copy the + Corresponding Source from a network server at no charge. + + c) Convey individual copies of the object code with a copy of the + written offer to provide the Corresponding Source. This + alternative is allowed only occasionally and noncommercially, and + only if you received the object code with such an offer, in accord + with subsection 6b. + + d) Convey the object code by offering access from a designated + place (gratis or for a charge), and offer equivalent access to the + Corresponding Source in the same way through the same place at no + further charge. You need not require recipients to copy the + Corresponding Source along with the object code. If the place to + copy the object code is a network server, the Corresponding Source + may be on a different server (operated by you or a third party) + that supports equivalent copying facilities, provided you maintain + clear directions next to the object code saying where to find the + Corresponding Source. Regardless of what server hosts the + Corresponding Source, you remain obligated to ensure that it is + available for as long as needed to satisfy these requirements. + + e) Convey the object code using peer-to-peer transmission, provided + you inform other peers where the object code and Corresponding + Source of the work are being offered to the general public at no + charge under subsection 6d. + + A separable portion of the object code, whose source code is excluded +from the Corresponding Source as a System Library, need not be +included in conveying the object code work. + + A "User Product" is either (1) a "consumer product", which means any +tangible personal property which is normally used for personal, family, +or household purposes, or (2) anything designed or sold for incorporation +into a dwelling. In determining whether a product is a consumer product, +doubtful cases shall be resolved in favor of coverage. For a particular +product received by a particular user, "normally used" refers to a +typical or common use of that class of product, regardless of the status +of the particular user or of the way in which the particular user +actually uses, or expects or is expected to use, the product. A product +is a consumer product regardless of whether the product has substantial +commercial, industrial or non-consumer uses, unless such uses represent +the only significant mode of use of the product. + + "Installation Information" for a User Product means any methods, +procedures, authorization keys, or other information required to install +and execute modified versions of a covered work in that User Product from +a modified version of its Corresponding Source. The information must +suffice to ensure that the continued functioning of the modified object +code is in no case prevented or interfered with solely because +modification has been made. + + If you convey an object code work under this section in, or with, or +specifically for use in, a User Product, and the conveying occurs as +part of a transaction in which the right of possession and use of the +User Product is transferred to the recipient in perpetuity or for a +fixed term (regardless of how the transaction is characterized), the +Corresponding Source conveyed under this section must be accompanied +by the Installation Information. But this requirement does not apply +if neither you nor any third party retains the ability to install +modified object code on the User Product (for example, the work has +been installed in ROM). + + The requirement to provide Installation Information does not include a +requirement to continue to provide support service, warranty, or updates +for a work that has been modified or installed by the recipient, or for +the User Product in which it has been modified or installed. Access to a +network may be denied when the modification itself materially and +adversely affects the operation of the network or violates the rules and +protocols for communication across the network. + + Corresponding Source conveyed, and Installation Information provided, +in accord with this section must be in a format that is publicly +documented (and with an implementation available to the public in +source code form), and must require no special password or key for +unpacking, reading or copying. + + 7. Additional Terms. + + "Additional permissions" are terms that supplement the terms of this +License by making exceptions from one or more of its conditions. +Additional permissions that are applicable to the entire Program shall +be treated as though they were included in this License, to the extent +that they are valid under applicable law. If additional permissions +apply only to part of the Program, that part may be used separately +under those permissions, but the entire Program remains governed by +this License without regard to the additional permissions. + + When you convey a copy of a covered work, you may at your option +remove any additional permissions from that copy, or from any part of +it. (Additional permissions may be written to require their own +removal in certain cases when you modify the work.) You may place +additional permissions on material, added by you to a covered work, +for which you have or can give appropriate copyright permission. + + Notwithstanding any other provision of this License, for material you +add to a covered work, you may (if authorized by the copyright holders of +that material) supplement the terms of this License with terms: + + a) Disclaiming warranty or limiting liability differently from the + terms of sections 15 and 16 of this License; or + + b) Requiring preservation of specified reasonable legal notices or + author attributions in that material or in the Appropriate Legal + Notices displayed by works containing it; or + + c) Prohibiting misrepresentation of the origin of that material, or + requiring that modified versions of such material be marked in + reasonable ways as different from the original version; or + + d) Limiting the use for publicity purposes of names of licensors or + authors of the material; or + + e) Declining to grant rights under trademark law for use of some + trade names, trademarks, or service marks; or + + f) Requiring indemnification of licensors and authors of that + material by anyone who conveys the material (or modified versions of + it) with contractual assumptions of liability to the recipient, for + any liability that these contractual assumptions directly impose on + those licensors and authors. + + All other non-permissive additional terms are considered "further +restrictions" within the meaning of section 10. If the Program as you +received it, or any part of it, contains a notice stating that it is +governed by this License along with a term that is a further +restriction, you may remove that term. If a license document contains +a further restriction but permits relicensing or conveying under this +License, you may add to a covered work material governed by the terms +of that license document, provided that the further restriction does +not survive such relicensing or conveying. + + If you add terms to a covered work in accord with this section, you +must place, in the relevant source files, a statement of the +additional terms that apply to those files, or a notice indicating +where to find the applicable terms. + + Additional terms, permissive or non-permissive, may be stated in the +form of a separately written license, or stated as exceptions; +the above requirements apply either way. + + 8. Termination. + + You may not propagate or modify a covered work except as expressly +provided under this License. Any attempt otherwise to propagate or +modify it is void, and will automatically terminate your rights under +this License (including any patent licenses granted under the third +paragraph of section 11). + + However, if you cease all violation of this License, then your +license from a particular copyright holder is reinstated (a) +provisionally, unless and until the copyright holder explicitly and +finally terminates your license, and (b) permanently, if the copyright +holder fails to notify you of the violation by some reasonable means +prior to 60 days after the cessation. + + Moreover, your license from a particular copyright holder is +reinstated permanently if the copyright holder notifies you of the +violation by some reasonable means, this is the first time you have +received notice of violation of this License (for any work) from that +copyright holder, and you cure the violation prior to 30 days after +your receipt of the notice. + + Termination of your rights under this section does not terminate the +licenses of parties who have received copies or rights from you under +this License. If your rights have been terminated and not permanently +reinstated, you do not qualify to receive new licenses for the same +material under section 10. + + 9. Acceptance Not Required for Having Copies. + + You are not required to accept this License in order to receive or +run a copy of the Program. Ancillary propagation of a covered work +occurring solely as a consequence of using peer-to-peer transmission +to receive a copy likewise does not require acceptance. However, +nothing other than this License grants you permission to propagate or +modify any covered work. These actions infringe copyright if you do +not accept this License. Therefore, by modifying or propagating a +covered work, you indicate your acceptance of this License to do so. + + 10. Automatic Licensing of Downstream Recipients. + + Each time you convey a covered work, the recipient automatically +receives a license from the original licensors, to run, modify and +propagate that work, subject to this License. You are not responsible +for enforcing compliance by third parties with this License. + + An "entity transaction" is a transaction transferring control of an +organization, or substantially all assets of one, or subdividing an +organization, or merging organizations. If propagation of a covered +work results from an entity transaction, each party to that +transaction who receives a copy of the work also receives whatever +licenses to the work the party's predecessor in interest had or could +give under the previous paragraph, plus a right to possession of the +Corresponding Source of the work from the predecessor in interest, if +the predecessor has it or can get it with reasonable efforts. + + You may not impose any further restrictions on the exercise of the +rights granted or affirmed under this License. For example, you may +not impose a license fee, royalty, or other charge for exercise of +rights granted under this License, and you may not initiate litigation +(including a cross-claim or counterclaim in a lawsuit) alleging that +any patent claim is infringed by making, using, selling, offering for +sale, or importing the Program or any portion of it. + + 11. Patents. + + A "contributor" is a copyright holder who authorizes use under this +License of the Program or a work on which the Program is based. The +work thus licensed is called the contributor's "contributor version". + + A contributor's "essential patent claims" are all patent claims +owned or controlled by the contributor, whether already acquired or +hereafter acquired, that would be infringed by some manner, permitted +by this License, of making, using, or selling its contributor version, +but do not include claims that would be infringed only as a +consequence of further modification of the contributor version. For +purposes of this definition, "control" includes the right to grant +patent sublicenses in a manner consistent with the requirements of +this License. + + Each contributor grants you a non-exclusive, worldwide, royalty-free +patent license under the contributor's essential patent claims, to +make, use, sell, offer for sale, import and otherwise run, modify and +propagate the contents of its contributor version. + + In the following three paragraphs, a "patent license" is any express +agreement or commitment, however denominated, not to enforce a patent +(such as an express permission to practice a patent or covenant not to +sue for patent infringement). To "grant" such a patent license to a +party means to make such an agreement or commitment not to enforce a +patent against the party. + + If you convey a covered work, knowingly relying on a patent license, +and the Corresponding Source of the work is not available for anyone +to copy, free of charge and under the terms of this License, through a +publicly available network server or other readily accessible means, +then you must either (1) cause the Corresponding Source to be so +available, or (2) arrange to deprive yourself of the benefit of the +patent license for this particular work, or (3) arrange, in a manner +consistent with the requirements of this License, to extend the patent +license to downstream recipients. "Knowingly relying" means you have +actual knowledge that, but for the patent license, your conveying the +covered work in a country, or your recipient's use of the covered work +in a country, would infringe one or more identifiable patents in that +country that you have reason to believe are valid. + + If, pursuant to or in connection with a single transaction or +arrangement, you convey, or propagate by procuring conveyance of, a +covered work, and grant a patent license to some of the parties +receiving the covered work authorizing them to use, propagate, modify +or convey a specific copy of the covered work, then the patent license +you grant is automatically extended to all recipients of the covered +work and works based on it. + + A patent license is "discriminatory" if it does not include within +the scope of its coverage, prohibits the exercise of, or is +conditioned on the non-exercise of one or more of the rights that are +specifically granted under this License. You may not convey a covered +work if you are a party to an arrangement with a third party that is +in the business of distributing software, under which you make payment +to the third party based on the extent of your activity of conveying +the work, and under which the third party grants, to any of the +parties who would receive the covered work from you, a discriminatory +patent license (a) in connection with copies of the covered work +conveyed by you (or copies made from those copies), or (b) primarily +for and in connection with specific products or compilations that +contain the covered work, unless you entered into that arrangement, +or that patent license was granted, prior to 28 March 2007. + + Nothing in this License shall be construed as excluding or limiting +any implied license or other defenses to infringement that may +otherwise be available to you under applicable patent law. + + 12. No Surrender of Others' Freedom. + + If conditions are imposed on you (whether by court order, agreement or +otherwise) that contradict the conditions of this License, they do not +excuse you from the conditions of this License. If you cannot convey a +covered work so as to satisfy simultaneously your obligations under this +License and any other pertinent obligations, then as a consequence you may +not convey it at all. For example, if you agree to terms that obligate you +to collect a royalty for further conveying from those to whom you convey +the Program, the only way you could satisfy both those terms and this +License would be to refrain entirely from conveying the Program. + + 13. Remote Network Interaction; Use with the GNU General Public License. + + Notwithstanding any other provision of this License, if you modify the +Program, your modified version must prominently offer all users +interacting with it remotely through a computer network (if your version +supports such interaction) an opportunity to receive the Corresponding +Source of your version by providing access to the Corresponding Source +from a network server at no charge, through some standard or customary +means of facilitating copying of software. This Corresponding Source +shall include the Corresponding Source for any work covered by version 3 +of the GNU General Public License that is incorporated pursuant to the +following paragraph. + + Notwithstanding any other provision of this License, you have +permission to link or combine any covered work with a work licensed +under version 3 of the GNU General Public License into a single +combined work, and to convey the resulting work. The terms of this +License will continue to apply to the part which is the covered work, +but the work with which it is combined will remain governed by version +3 of the GNU General Public License. + + 14. Revised Versions of this License. + + The Free Software Foundation may publish revised and/or new versions of +the GNU Affero General Public License from time to time. Such new versions +will be similar in spirit to the present version, but may differ in detail to +address new problems or concerns. + + Each version is given a distinguishing version number. If the +Program specifies that a certain numbered version of the GNU Affero General +Public License "or any later version" applies to it, you have the +option of following the terms and conditions either of that numbered +version or of any later version published by the Free Software +Foundation. If the Program does not specify a version number of the +GNU Affero General Public License, you may choose any version ever published +by the Free Software Foundation. + + If the Program specifies that a proxy can decide which future +versions of the GNU Affero General Public License can be used, that proxy's +public statement of acceptance of a version permanently authorizes you +to choose that version for the Program. + + Later license versions may give you additional or different +permissions. However, no additional obligations are imposed on any +author or copyright holder as a result of your choosing to follow a +later version. + + 15. Disclaimer of Warranty. + + THERE IS NO WARRANTY FOR THE PROGRAM, TO THE EXTENT PERMITTED BY +APPLICABLE LAW. EXCEPT WHEN OTHERWISE STATED IN WRITING THE COPYRIGHT +HOLDERS AND/OR OTHER PARTIES PROVIDE THE PROGRAM "AS IS" WITHOUT WARRANTY +OF ANY KIND, EITHER EXPRESSED OR IMPLIED, INCLUDING, BUT NOT LIMITED TO, +THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR +PURPOSE. THE ENTIRE RISK AS TO THE QUALITY AND PERFORMANCE OF THE PROGRAM +IS WITH YOU. SHOULD THE PROGRAM PROVE DEFECTIVE, YOU ASSUME THE COST OF +ALL NECESSARY SERVICING, REPAIR OR CORRECTION. + + 16. Limitation of Liability. + + IN NO EVENT UNLESS REQUIRED BY APPLICABLE LAW OR AGREED TO IN WRITING +WILL ANY COPYRIGHT HOLDER, OR ANY OTHER PARTY WHO MODIFIES AND/OR CONVEYS +THE PROGRAM AS PERMITTED ABOVE, BE LIABLE TO YOU FOR DAMAGES, INCLUDING ANY +GENERAL, SPECIAL, INCIDENTAL OR CONSEQUENTIAL DAMAGES ARISING OUT OF THE +USE OR INABILITY TO USE THE PROGRAM (INCLUDING BUT NOT LIMITED TO LOSS OF +DATA OR DATA BEING RENDERED INACCURATE OR LOSSES SUSTAINED BY YOU OR THIRD +PARTIES OR A FAILURE OF THE PROGRAM TO OPERATE WITH ANY OTHER PROGRAMS), +EVEN IF SUCH HOLDER OR OTHER PARTY HAS BEEN ADVISED OF THE POSSIBILITY OF +SUCH DAMAGES. + + 17. Interpretation of Sections 15 and 16. + + If the disclaimer of warranty and limitation of liability provided +above cannot be given local legal effect according to their terms, +reviewing courts shall apply local law that most closely approximates +an absolute waiver of all civil liability in connection with the +Program, unless a warranty or assumption of liability accompanies a +copy of the Program in return for a fee. + + END OF TERMS AND CONDITIONS + + How to Apply These Terms to Your New Programs + + If you develop a new program, and you want it to be of the greatest +possible use to the public, the best way to achieve this is to make it +free software which everyone can redistribute and change under these terms. + + To do so, attach the following notices to the program. It is safest +to attach them to the start of each source file to most effectively +state the exclusion of warranty; and each file should have at least +the "copyright" line and a pointer to where the full notice is found. + + + Copyright (C) + + This program is free software: you can redistribute it and/or modify + it under the terms of the GNU Affero General Public License as published + by the Free Software Foundation, either version 3 of the License, or + (at your option) any later version. + + This program is distributed in the hope that it will be useful, + but WITHOUT ANY WARRANTY; without even the implied warranty of + MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + GNU Affero General Public License for more details. + + You should have received a copy of the GNU Affero General Public License + along with this program. If not, see . + +Also add information on how to contact you by electronic and paper mail. + + If your software can interact with users remotely through a computer +network, you should also make sure that it provides a way for users to +get its source. For example, if your program is a web application, its +interface could display a "Source" link that leads users to an archive +of the code. There are many ways you could offer source, and different +solutions will be better for different programs; see section 13 for the +specific requirements. + + You should also get your employer (if you work as a programmer) or school, +if any, to sign a "copyright disclaimer" for the program, if necessary. +For more information on this, and how to apply and follow the GNU AGPL, see +. diff --git a/packages/axum-server/README.md b/packages/axum-server/README.md new file mode 100644 index 000000000..d2f396915 --- /dev/null +++ b/packages/axum-server/README.md @@ -0,0 +1,11 @@ +# Torrust Axum Server + +A wrapper for the Axum server for Torrust HTTP servers to add timeouts. + +## Documentation + +[Crate documentation](https://docs.rs/torrust-axum-server). + +## License + +The project is licensed under the terms of the [GNU AFFERO GENERAL PUBLIC LICENSE](./LICENSE). diff --git a/src/servers/custom_axum_server.rs b/packages/axum-server/src/custom_axum_server.rs similarity index 100% rename from src/servers/custom_axum_server.rs rename to packages/axum-server/src/custom_axum_server.rs diff --git a/packages/axum-server/src/lib.rs b/packages/axum-server/src/lib.rs new file mode 100644 index 000000000..ace51c184 --- /dev/null +++ b/packages/axum-server/src/lib.rs @@ -0,0 +1 @@ +pub mod custom_axum_server; \ No newline at end of file diff --git a/src/servers/apis/server.rs b/src/servers/apis/server.rs index 7388a1851..7a8087215 100644 --- a/src/servers/apis/server.rs +++ b/src/servers/apis/server.rs @@ -33,6 +33,7 @@ use derive_more::Constructor; use futures::future::BoxFuture; use thiserror::Error; use tokio::sync::oneshot::{Receiver, Sender}; +use torrust_axum_server::custom_axum_server::{self, TimeoutAcceptor}; use torrust_tracker_configuration::AccessTokens; use tracing::{instrument, Level}; @@ -40,7 +41,6 @@ use super::routes::router; use crate::bootstrap::jobs::Started; use crate::container::HttpApiContainer; use crate::servers::apis::API_LOG_TARGET; -use crate::servers::custom_axum_server::{self, TimeoutAcceptor}; use crate::servers::logging::STARTED_ON; use crate::servers::registar::{ServiceHealthCheckJob, ServiceRegistration, ServiceRegistrationForm}; use crate::servers::signals::{graceful_shutdown, Halted}; diff --git a/src/servers/http/server.rs b/src/servers/http/server.rs index 2355bedf9..3de40e0b0 100644 --- a/src/servers/http/server.rs +++ b/src/servers/http/server.rs @@ -7,12 +7,12 @@ use axum_server::Handle; use derive_more::Constructor; use futures::future::BoxFuture; use tokio::sync::oneshot::{Receiver, Sender}; +use torrust_axum_server::custom_axum_server::{self, TimeoutAcceptor}; use tracing::instrument; use super::v1::routes::router; use crate::bootstrap::jobs::Started; use crate::container::HttpTrackerContainer; -use crate::servers::custom_axum_server::{self, TimeoutAcceptor}; use crate::servers::http::HTTP_TRACKER_LOG_TARGET; use crate::servers::logging::STARTED_ON; use crate::servers::registar::{ServiceHealthCheckJob, ServiceRegistration, ServiceRegistrationForm}; diff --git a/src/servers/mod.rs b/src/servers/mod.rs index 705a4728e..f9ed2d10c 100644 --- a/src/servers/mod.rs +++ b/src/servers/mod.rs @@ -1,6 +1,5 @@ //! Servers. Services that can be started and stopped. pub mod apis; -pub mod custom_axum_server; pub mod health_check_api; pub mod http; pub mod logging; From 0685f1a79e2854a377bb82246c11b72fa63eb25a Mon Sep 17 00:00:00 2001 From: Jose Celano Date: Wed, 19 Feb 2025 09:12:57 +0000 Subject: [PATCH 287/802] refactor: [#1294] extract server-lib package --- .github/workflows/deployment.yaml | 1 + Cargo.lock | 16 +- Cargo.toml | 4 +- packages/axum-server/Cargo.toml | 4 +- packages/axum-server/src/lib.rs | 3 +- packages/axum-server/src/signals.rs | 21 + packages/server-lib/Cargo.toml | 22 + packages/server-lib/LICENSE | 661 ++++++++++++++++++ packages/server-lib/README.md | 11 + packages/server-lib/src/lib.rs | 3 + .../server-lib/src}/logging.rs | 0 .../server-lib/src}/registar.rs | 0 .../server-lib/src}/signals.rs | 19 - src/app.rs | 2 +- src/bootstrap/jobs/health_check_api.rs | 6 +- src/bootstrap/jobs/http_tracker.rs | 4 +- src/bootstrap/jobs/tracker_apis.rs | 4 +- src/bootstrap/jobs/udp_tracker.rs | 2 +- src/console/ci/e2e/logs_parser.rs | 2 +- src/servers/apis/routes.rs | 2 +- src/servers/apis/server.rs | 9 +- src/servers/health_check_api/handlers.rs | 2 +- src/servers/health_check_api/server.rs | 7 +- src/servers/http/server.rs | 9 +- src/servers/http/v1/routes.rs | 2 +- src/servers/mod.rs | 3 - src/servers/udp/server/launcher.rs | 6 +- src/servers/udp/server/mod.rs | 2 +- src/servers/udp/server/spawner.rs | 2 +- src/servers/udp/server/states.rs | 4 +- tests/servers/api/environment.rs | 2 +- tests/servers/health_check_api/contract.rs | 2 +- tests/servers/health_check_api/environment.rs | 4 +- tests/servers/http/environment.rs | 2 +- tests/servers/udp/environment.rs | 2 +- 35 files changed, 778 insertions(+), 67 deletions(-) create mode 100644 packages/axum-server/src/signals.rs create mode 100644 packages/server-lib/Cargo.toml create mode 100644 packages/server-lib/LICENSE create mode 100644 packages/server-lib/README.md create mode 100644 packages/server-lib/src/lib.rs rename {src/servers => packages/server-lib/src}/logging.rs (100%) rename {src/servers => packages/server-lib/src}/registar.rs (100%) rename {src/servers => packages/server-lib/src}/signals.rs (77%) diff --git a/.github/workflows/deployment.yaml b/.github/workflows/deployment.yaml index 7ced8af44..901f9c878 100644 --- a/.github/workflows/deployment.yaml +++ b/.github/workflows/deployment.yaml @@ -62,6 +62,7 @@ jobs: cargo publish -p bittorrent-udp-tracker-core cargo publish -p bittorrent-udp-tracker-protocol cargo publish -p torrust-axum-server + cargo publish -p torrust-torrust-server-lib cargo publish -p torrust-tracker cargo publish -p torrust-tracker-api-client cargo publish -p torrust-tracker-api-core diff --git a/Cargo.lock b/Cargo.lock index d1e1ac7b0..a62a20619 100644 --- a/Cargo.lock +++ b/Cargo.lock @@ -4348,7 +4348,19 @@ dependencies = [ "hyper-util", "pin-project-lite", "tokio", + "torrust-server-lib", "tower 0.4.13", + "tracing", +] + +[[package]] +name = "torrust-server-lib" +version = "3.0.0-develop" +dependencies = [ + "derive_more", + "tokio", + "tower-http", + "tracing", ] [[package]] @@ -4376,15 +4388,12 @@ dependencies = [ "figment", "futures", "futures-util", - "http-body", "hyper", - "hyper-util", "lazy_static", "local-ip-address", "mockall", "parking_lot", "percent-encoding", - "pin-project-lite", "r2d2", "r2d2_mysql", "r2d2_sqlite", @@ -4401,6 +4410,7 @@ dependencies = [ "thiserror 2.0.11", "tokio", "torrust-axum-server", + "torrust-server-lib", "torrust-tracker-api-client", "torrust-tracker-api-core", "torrust-tracker-clock", diff --git a/Cargo.toml b/Cargo.toml index 4283baab5..22df92b2d 100644 --- a/Cargo.toml +++ b/Cargo.toml @@ -54,13 +54,10 @@ derive_more = { version = "1", features = ["as_ref", "constructor", "from"] } figment = "0" futures = "0" futures-util = "0" -http-body = "1" hyper = "1" -hyper-util = { version = "0", features = ["http1", "http2", "tokio"] } lazy_static = "1" parking_lot = "0" percent-encoding = "2" -pin-project-lite = "0" r2d2 = "0" r2d2_mysql = "25" r2d2_sqlite = { version = "0", features = ["bundled"] } @@ -77,6 +74,7 @@ serde_with = { version = "3", features = ["json"] } thiserror = "2" tokio = { version = "1", features = ["macros", "net", "rt-multi-thread", "signal", "sync"] } torrust-axum-server = { version = "3.0.0-develop", path = "packages/axum-server" } +torrust-server-lib = { version = "3.0.0-develop", path = "packages/server-lib" } torrust-tracker-api-core = { version = "3.0.0-develop", path = "packages/tracker-api-core" } torrust-tracker-clock = { version = "3.0.0-develop", path = "packages/clock" } torrust-tracker-configuration = { version = "3.0.0-develop", path = "packages/configuration" } diff --git a/packages/axum-server/Cargo.toml b/packages/axum-server/Cargo.toml index 8a4b76998..6604a0555 100644 --- a/packages/axum-server/Cargo.toml +++ b/packages/axum-server/Cargo.toml @@ -4,7 +4,7 @@ description = "A wrapper for the Axum server for Torrust HTTP servers to add tim documentation.workspace = true edition.workspace = true homepage.workspace = true -keywords = ["axum", "server", "torrust", "torrust", "wrapper"] +keywords = ["axum", "server", "torrust", "wrapper"] license.workspace = true name = "torrust-axum-server" publish.workspace = true @@ -21,6 +21,8 @@ hyper = "1" hyper-util = { version = "0", features = ["http1", "http2", "tokio"] } pin-project-lite = "0" tokio = { version = "1", features = ["macros", "net", "rt-multi-thread", "signal", "sync"] } +torrust-server-lib = { version = "3.0.0-develop", path = "../server-lib" } tower = { version = "0", features = ["timeout"] } +tracing = "0" [dev-dependencies] diff --git a/packages/axum-server/src/lib.rs b/packages/axum-server/src/lib.rs index ace51c184..06de31d8c 100644 --- a/packages/axum-server/src/lib.rs +++ b/packages/axum-server/src/lib.rs @@ -1 +1,2 @@ -pub mod custom_axum_server; \ No newline at end of file +pub mod custom_axum_server; +pub mod signals; diff --git a/packages/axum-server/src/signals.rs b/packages/axum-server/src/signals.rs new file mode 100644 index 000000000..af69cbb6e --- /dev/null +++ b/packages/axum-server/src/signals.rs @@ -0,0 +1,21 @@ +use std::time::Duration; + +use tokio::time::sleep; +use torrust_server_lib::signals::{shutdown_signal_with_message, Halted}; +use tracing::instrument; + +#[instrument(skip(handle, rx_halt, message))] +pub async fn graceful_shutdown(handle: axum_server::Handle, rx_halt: tokio::sync::oneshot::Receiver, message: String) { + shutdown_signal_with_message(rx_halt, message).await; + + tracing::debug!("Sending graceful shutdown signal"); + handle.graceful_shutdown(Some(Duration::from_secs(90))); + + println!("!! shuting down in 90 seconds !!"); + + loop { + sleep(Duration::from_secs(1)).await; + + tracing::info!("remaining alive connections: {}", handle.connection_count()); + } +} diff --git a/packages/server-lib/Cargo.toml b/packages/server-lib/Cargo.toml new file mode 100644 index 000000000..b0e196d64 --- /dev/null +++ b/packages/server-lib/Cargo.toml @@ -0,0 +1,22 @@ +[package] +authors.workspace = true +description = "Common functionality used in all Torrust HTTP servers." +documentation.workspace = true +edition.workspace = true +homepage.workspace = true +keywords = ["lib", "server", "torrust"] +license.workspace = true +name = "torrust-server-lib" +publish.workspace = true +readme = "README.md" +repository.workspace = true +rust-version.workspace = true +version.workspace = true + +[dependencies] +derive_more = { version = "1", features = ["as_ref", "constructor", "from"] } +tokio = { version = "1", features = ["macros", "net", "rt-multi-thread", "signal", "sync"] } +tower-http = { version = "0", features = ["compression-full", "cors", "propagate-header", "request-id", "trace"] } +tracing = "0" + +[dev-dependencies] diff --git a/packages/server-lib/LICENSE b/packages/server-lib/LICENSE new file mode 100644 index 000000000..0ad25db4b --- /dev/null +++ b/packages/server-lib/LICENSE @@ -0,0 +1,661 @@ + GNU AFFERO GENERAL PUBLIC LICENSE + Version 3, 19 November 2007 + + Copyright (C) 2007 Free Software Foundation, Inc. + Everyone is permitted to copy and distribute verbatim copies + of this license document, but changing it is not allowed. + + Preamble + + The GNU Affero General Public License is a free, copyleft license for +software and other kinds of works, specifically designed to ensure +cooperation with the community in the case of network server software. + + The licenses for most software and other practical works are designed +to take away your freedom to share and change the works. By contrast, +our General Public Licenses are intended to guarantee your freedom to +share and change all versions of a program--to make sure it remains free +software for all its users. + + When we speak of free software, we are referring to freedom, not +price. Our General Public Licenses are designed to make sure that you +have the freedom to distribute copies of free software (and charge for +them if you wish), that you receive source code or can get it if you +want it, that you can change the software or use pieces of it in new +free programs, and that you know you can do these things. + + Developers that use our General Public Licenses protect your rights +with two steps: (1) assert copyright on the software, and (2) offer +you this License which gives you legal permission to copy, distribute +and/or modify the software. + + A secondary benefit of defending all users' freedom is that +improvements made in alternate versions of the program, if they +receive widespread use, become available for other developers to +incorporate. Many developers of free software are heartened and +encouraged by the resulting cooperation. However, in the case of +software used on network servers, this result may fail to come about. +The GNU General Public License permits making a modified version and +letting the public access it on a server without ever releasing its +source code to the public. + + The GNU Affero General Public License is designed specifically to +ensure that, in such cases, the modified source code becomes available +to the community. It requires the operator of a network server to +provide the source code of the modified version running there to the +users of that server. Therefore, public use of a modified version, on +a publicly accessible server, gives the public access to the source +code of the modified version. + + An older license, called the Affero General Public License and +published by Affero, was designed to accomplish similar goals. This is +a different license, not a version of the Affero GPL, but Affero has +released a new version of the Affero GPL which permits relicensing under +this license. + + The precise terms and conditions for copying, distribution and +modification follow. + + TERMS AND CONDITIONS + + 0. Definitions. + + "This License" refers to version 3 of the GNU Affero General Public License. + + "Copyright" also means copyright-like laws that apply to other kinds of +works, such as semiconductor masks. + + "The Program" refers to any copyrightable work licensed under this +License. Each licensee is addressed as "you". "Licensees" and +"recipients" may be individuals or organizations. + + To "modify" a work means to copy from or adapt all or part of the work +in a fashion requiring copyright permission, other than the making of an +exact copy. The resulting work is called a "modified version" of the +earlier work or a work "based on" the earlier work. + + A "covered work" means either the unmodified Program or a work based +on the Program. + + To "propagate" a work means to do anything with it that, without +permission, would make you directly or secondarily liable for +infringement under applicable copyright law, except executing it on a +computer or modifying a private copy. Propagation includes copying, +distribution (with or without modification), making available to the +public, and in some countries other activities as well. + + To "convey" a work means any kind of propagation that enables other +parties to make or receive copies. Mere interaction with a user through +a computer network, with no transfer of a copy, is not conveying. + + An interactive user interface displays "Appropriate Legal Notices" +to the extent that it includes a convenient and prominently visible +feature that (1) displays an appropriate copyright notice, and (2) +tells the user that there is no warranty for the work (except to the +extent that warranties are provided), that licensees may convey the +work under this License, and how to view a copy of this License. If +the interface presents a list of user commands or options, such as a +menu, a prominent item in the list meets this criterion. + + 1. Source Code. + + The "source code" for a work means the preferred form of the work +for making modifications to it. "Object code" means any non-source +form of a work. + + A "Standard Interface" means an interface that either is an official +standard defined by a recognized standards body, or, in the case of +interfaces specified for a particular programming language, one that +is widely used among developers working in that language. + + The "System Libraries" of an executable work include anything, other +than the work as a whole, that (a) is included in the normal form of +packaging a Major Component, but which is not part of that Major +Component, and (b) serves only to enable use of the work with that +Major Component, or to implement a Standard Interface for which an +implementation is available to the public in source code form. A +"Major Component", in this context, means a major essential component +(kernel, window system, and so on) of the specific operating system +(if any) on which the executable work runs, or a compiler used to +produce the work, or an object code interpreter used to run it. + + The "Corresponding Source" for a work in object code form means all +the source code needed to generate, install, and (for an executable +work) run the object code and to modify the work, including scripts to +control those activities. However, it does not include the work's +System Libraries, or general-purpose tools or generally available free +programs which are used unmodified in performing those activities but +which are not part of the work. For example, Corresponding Source +includes interface definition files associated with source files for +the work, and the source code for shared libraries and dynamically +linked subprograms that the work is specifically designed to require, +such as by intimate data communication or control flow between those +subprograms and other parts of the work. + + The Corresponding Source need not include anything that users +can regenerate automatically from other parts of the Corresponding +Source. + + The Corresponding Source for a work in source code form is that +same work. + + 2. Basic Permissions. + + All rights granted under this License are granted for the term of +copyright on the Program, and are irrevocable provided the stated +conditions are met. This License explicitly affirms your unlimited +permission to run the unmodified Program. The output from running a +covered work is covered by this License only if the output, given its +content, constitutes a covered work. This License acknowledges your +rights of fair use or other equivalent, as provided by copyright law. + + You may make, run and propagate covered works that you do not +convey, without conditions so long as your license otherwise remains +in force. You may convey covered works to others for the sole purpose +of having them make modifications exclusively for you, or provide you +with facilities for running those works, provided that you comply with +the terms of this License in conveying all material for which you do +not control copyright. Those thus making or running the covered works +for you must do so exclusively on your behalf, under your direction +and control, on terms that prohibit them from making any copies of +your copyrighted material outside their relationship with you. + + Conveying under any other circumstances is permitted solely under +the conditions stated below. Sublicensing is not allowed; section 10 +makes it unnecessary. + + 3. Protecting Users' Legal Rights From Anti-Circumvention Law. + + No covered work shall be deemed part of an effective technological +measure under any applicable law fulfilling obligations under article +11 of the WIPO copyright treaty adopted on 20 December 1996, or +similar laws prohibiting or restricting circumvention of such +measures. + + When you convey a covered work, you waive any legal power to forbid +circumvention of technological measures to the extent such circumvention +is effected by exercising rights under this License with respect to +the covered work, and you disclaim any intention to limit operation or +modification of the work as a means of enforcing, against the work's +users, your or third parties' legal rights to forbid circumvention of +technological measures. + + 4. Conveying Verbatim Copies. + + You may convey verbatim copies of the Program's source code as you +receive it, in any medium, provided that you conspicuously and +appropriately publish on each copy an appropriate copyright notice; +keep intact all notices stating that this License and any +non-permissive terms added in accord with section 7 apply to the code; +keep intact all notices of the absence of any warranty; and give all +recipients a copy of this License along with the Program. + + You may charge any price or no price for each copy that you convey, +and you may offer support or warranty protection for a fee. + + 5. Conveying Modified Source Versions. + + You may convey a work based on the Program, or the modifications to +produce it from the Program, in the form of source code under the +terms of section 4, provided that you also meet all of these conditions: + + a) The work must carry prominent notices stating that you modified + it, and giving a relevant date. + + b) The work must carry prominent notices stating that it is + released under this License and any conditions added under section + 7. This requirement modifies the requirement in section 4 to + "keep intact all notices". + + c) You must license the entire work, as a whole, under this + License to anyone who comes into possession of a copy. This + License will therefore apply, along with any applicable section 7 + additional terms, to the whole of the work, and all its parts, + regardless of how they are packaged. This License gives no + permission to license the work in any other way, but it does not + invalidate such permission if you have separately received it. + + d) If the work has interactive user interfaces, each must display + Appropriate Legal Notices; however, if the Program has interactive + interfaces that do not display Appropriate Legal Notices, your + work need not make them do so. + + A compilation of a covered work with other separate and independent +works, which are not by their nature extensions of the covered work, +and which are not combined with it such as to form a larger program, +in or on a volume of a storage or distribution medium, is called an +"aggregate" if the compilation and its resulting copyright are not +used to limit the access or legal rights of the compilation's users +beyond what the individual works permit. Inclusion of a covered work +in an aggregate does not cause this License to apply to the other +parts of the aggregate. + + 6. Conveying Non-Source Forms. + + You may convey a covered work in object code form under the terms +of sections 4 and 5, provided that you also convey the +machine-readable Corresponding Source under the terms of this License, +in one of these ways: + + a) Convey the object code in, or embodied in, a physical product + (including a physical distribution medium), accompanied by the + Corresponding Source fixed on a durable physical medium + customarily used for software interchange. + + b) Convey the object code in, or embodied in, a physical product + (including a physical distribution medium), accompanied by a + written offer, valid for at least three years and valid for as + long as you offer spare parts or customer support for that product + model, to give anyone who possesses the object code either (1) a + copy of the Corresponding Source for all the software in the + product that is covered by this License, on a durable physical + medium customarily used for software interchange, for a price no + more than your reasonable cost of physically performing this + conveying of source, or (2) access to copy the + Corresponding Source from a network server at no charge. + + c) Convey individual copies of the object code with a copy of the + written offer to provide the Corresponding Source. This + alternative is allowed only occasionally and noncommercially, and + only if you received the object code with such an offer, in accord + with subsection 6b. + + d) Convey the object code by offering access from a designated + place (gratis or for a charge), and offer equivalent access to the + Corresponding Source in the same way through the same place at no + further charge. You need not require recipients to copy the + Corresponding Source along with the object code. If the place to + copy the object code is a network server, the Corresponding Source + may be on a different server (operated by you or a third party) + that supports equivalent copying facilities, provided you maintain + clear directions next to the object code saying where to find the + Corresponding Source. Regardless of what server hosts the + Corresponding Source, you remain obligated to ensure that it is + available for as long as needed to satisfy these requirements. + + e) Convey the object code using peer-to-peer transmission, provided + you inform other peers where the object code and Corresponding + Source of the work are being offered to the general public at no + charge under subsection 6d. + + A separable portion of the object code, whose source code is excluded +from the Corresponding Source as a System Library, need not be +included in conveying the object code work. + + A "User Product" is either (1) a "consumer product", which means any +tangible personal property which is normally used for personal, family, +or household purposes, or (2) anything designed or sold for incorporation +into a dwelling. In determining whether a product is a consumer product, +doubtful cases shall be resolved in favor of coverage. For a particular +product received by a particular user, "normally used" refers to a +typical or common use of that class of product, regardless of the status +of the particular user or of the way in which the particular user +actually uses, or expects or is expected to use, the product. A product +is a consumer product regardless of whether the product has substantial +commercial, industrial or non-consumer uses, unless such uses represent +the only significant mode of use of the product. + + "Installation Information" for a User Product means any methods, +procedures, authorization keys, or other information required to install +and execute modified versions of a covered work in that User Product from +a modified version of its Corresponding Source. The information must +suffice to ensure that the continued functioning of the modified object +code is in no case prevented or interfered with solely because +modification has been made. + + If you convey an object code work under this section in, or with, or +specifically for use in, a User Product, and the conveying occurs as +part of a transaction in which the right of possession and use of the +User Product is transferred to the recipient in perpetuity or for a +fixed term (regardless of how the transaction is characterized), the +Corresponding Source conveyed under this section must be accompanied +by the Installation Information. But this requirement does not apply +if neither you nor any third party retains the ability to install +modified object code on the User Product (for example, the work has +been installed in ROM). + + The requirement to provide Installation Information does not include a +requirement to continue to provide support service, warranty, or updates +for a work that has been modified or installed by the recipient, or for +the User Product in which it has been modified or installed. Access to a +network may be denied when the modification itself materially and +adversely affects the operation of the network or violates the rules and +protocols for communication across the network. + + Corresponding Source conveyed, and Installation Information provided, +in accord with this section must be in a format that is publicly +documented (and with an implementation available to the public in +source code form), and must require no special password or key for +unpacking, reading or copying. + + 7. Additional Terms. + + "Additional permissions" are terms that supplement the terms of this +License by making exceptions from one or more of its conditions. +Additional permissions that are applicable to the entire Program shall +be treated as though they were included in this License, to the extent +that they are valid under applicable law. If additional permissions +apply only to part of the Program, that part may be used separately +under those permissions, but the entire Program remains governed by +this License without regard to the additional permissions. + + When you convey a copy of a covered work, you may at your option +remove any additional permissions from that copy, or from any part of +it. (Additional permissions may be written to require their own +removal in certain cases when you modify the work.) You may place +additional permissions on material, added by you to a covered work, +for which you have or can give appropriate copyright permission. + + Notwithstanding any other provision of this License, for material you +add to a covered work, you may (if authorized by the copyright holders of +that material) supplement the terms of this License with terms: + + a) Disclaiming warranty or limiting liability differently from the + terms of sections 15 and 16 of this License; or + + b) Requiring preservation of specified reasonable legal notices or + author attributions in that material or in the Appropriate Legal + Notices displayed by works containing it; or + + c) Prohibiting misrepresentation of the origin of that material, or + requiring that modified versions of such material be marked in + reasonable ways as different from the original version; or + + d) Limiting the use for publicity purposes of names of licensors or + authors of the material; or + + e) Declining to grant rights under trademark law for use of some + trade names, trademarks, or service marks; or + + f) Requiring indemnification of licensors and authors of that + material by anyone who conveys the material (or modified versions of + it) with contractual assumptions of liability to the recipient, for + any liability that these contractual assumptions directly impose on + those licensors and authors. + + All other non-permissive additional terms are considered "further +restrictions" within the meaning of section 10. If the Program as you +received it, or any part of it, contains a notice stating that it is +governed by this License along with a term that is a further +restriction, you may remove that term. If a license document contains +a further restriction but permits relicensing or conveying under this +License, you may add to a covered work material governed by the terms +of that license document, provided that the further restriction does +not survive such relicensing or conveying. + + If you add terms to a covered work in accord with this section, you +must place, in the relevant source files, a statement of the +additional terms that apply to those files, or a notice indicating +where to find the applicable terms. + + Additional terms, permissive or non-permissive, may be stated in the +form of a separately written license, or stated as exceptions; +the above requirements apply either way. + + 8. Termination. + + You may not propagate or modify a covered work except as expressly +provided under this License. Any attempt otherwise to propagate or +modify it is void, and will automatically terminate your rights under +this License (including any patent licenses granted under the third +paragraph of section 11). + + However, if you cease all violation of this License, then your +license from a particular copyright holder is reinstated (a) +provisionally, unless and until the copyright holder explicitly and +finally terminates your license, and (b) permanently, if the copyright +holder fails to notify you of the violation by some reasonable means +prior to 60 days after the cessation. + + Moreover, your license from a particular copyright holder is +reinstated permanently if the copyright holder notifies you of the +violation by some reasonable means, this is the first time you have +received notice of violation of this License (for any work) from that +copyright holder, and you cure the violation prior to 30 days after +your receipt of the notice. + + Termination of your rights under this section does not terminate the +licenses of parties who have received copies or rights from you under +this License. If your rights have been terminated and not permanently +reinstated, you do not qualify to receive new licenses for the same +material under section 10. + + 9. Acceptance Not Required for Having Copies. + + You are not required to accept this License in order to receive or +run a copy of the Program. Ancillary propagation of a covered work +occurring solely as a consequence of using peer-to-peer transmission +to receive a copy likewise does not require acceptance. However, +nothing other than this License grants you permission to propagate or +modify any covered work. These actions infringe copyright if you do +not accept this License. Therefore, by modifying or propagating a +covered work, you indicate your acceptance of this License to do so. + + 10. Automatic Licensing of Downstream Recipients. + + Each time you convey a covered work, the recipient automatically +receives a license from the original licensors, to run, modify and +propagate that work, subject to this License. You are not responsible +for enforcing compliance by third parties with this License. + + An "entity transaction" is a transaction transferring control of an +organization, or substantially all assets of one, or subdividing an +organization, or merging organizations. If propagation of a covered +work results from an entity transaction, each party to that +transaction who receives a copy of the work also receives whatever +licenses to the work the party's predecessor in interest had or could +give under the previous paragraph, plus a right to possession of the +Corresponding Source of the work from the predecessor in interest, if +the predecessor has it or can get it with reasonable efforts. + + You may not impose any further restrictions on the exercise of the +rights granted or affirmed under this License. For example, you may +not impose a license fee, royalty, or other charge for exercise of +rights granted under this License, and you may not initiate litigation +(including a cross-claim or counterclaim in a lawsuit) alleging that +any patent claim is infringed by making, using, selling, offering for +sale, or importing the Program or any portion of it. + + 11. Patents. + + A "contributor" is a copyright holder who authorizes use under this +License of the Program or a work on which the Program is based. The +work thus licensed is called the contributor's "contributor version". + + A contributor's "essential patent claims" are all patent claims +owned or controlled by the contributor, whether already acquired or +hereafter acquired, that would be infringed by some manner, permitted +by this License, of making, using, or selling its contributor version, +but do not include claims that would be infringed only as a +consequence of further modification of the contributor version. For +purposes of this definition, "control" includes the right to grant +patent sublicenses in a manner consistent with the requirements of +this License. + + Each contributor grants you a non-exclusive, worldwide, royalty-free +patent license under the contributor's essential patent claims, to +make, use, sell, offer for sale, import and otherwise run, modify and +propagate the contents of its contributor version. + + In the following three paragraphs, a "patent license" is any express +agreement or commitment, however denominated, not to enforce a patent +(such as an express permission to practice a patent or covenant not to +sue for patent infringement). To "grant" such a patent license to a +party means to make such an agreement or commitment not to enforce a +patent against the party. + + If you convey a covered work, knowingly relying on a patent license, +and the Corresponding Source of the work is not available for anyone +to copy, free of charge and under the terms of this License, through a +publicly available network server or other readily accessible means, +then you must either (1) cause the Corresponding Source to be so +available, or (2) arrange to deprive yourself of the benefit of the +patent license for this particular work, or (3) arrange, in a manner +consistent with the requirements of this License, to extend the patent +license to downstream recipients. "Knowingly relying" means you have +actual knowledge that, but for the patent license, your conveying the +covered work in a country, or your recipient's use of the covered work +in a country, would infringe one or more identifiable patents in that +country that you have reason to believe are valid. + + If, pursuant to or in connection with a single transaction or +arrangement, you convey, or propagate by procuring conveyance of, a +covered work, and grant a patent license to some of the parties +receiving the covered work authorizing them to use, propagate, modify +or convey a specific copy of the covered work, then the patent license +you grant is automatically extended to all recipients of the covered +work and works based on it. + + A patent license is "discriminatory" if it does not include within +the scope of its coverage, prohibits the exercise of, or is +conditioned on the non-exercise of one or more of the rights that are +specifically granted under this License. You may not convey a covered +work if you are a party to an arrangement with a third party that is +in the business of distributing software, under which you make payment +to the third party based on the extent of your activity of conveying +the work, and under which the third party grants, to any of the +parties who would receive the covered work from you, a discriminatory +patent license (a) in connection with copies of the covered work +conveyed by you (or copies made from those copies), or (b) primarily +for and in connection with specific products or compilations that +contain the covered work, unless you entered into that arrangement, +or that patent license was granted, prior to 28 March 2007. + + Nothing in this License shall be construed as excluding or limiting +any implied license or other defenses to infringement that may +otherwise be available to you under applicable patent law. + + 12. No Surrender of Others' Freedom. + + If conditions are imposed on you (whether by court order, agreement or +otherwise) that contradict the conditions of this License, they do not +excuse you from the conditions of this License. If you cannot convey a +covered work so as to satisfy simultaneously your obligations under this +License and any other pertinent obligations, then as a consequence you may +not convey it at all. For example, if you agree to terms that obligate you +to collect a royalty for further conveying from those to whom you convey +the Program, the only way you could satisfy both those terms and this +License would be to refrain entirely from conveying the Program. + + 13. Remote Network Interaction; Use with the GNU General Public License. + + Notwithstanding any other provision of this License, if you modify the +Program, your modified version must prominently offer all users +interacting with it remotely through a computer network (if your version +supports such interaction) an opportunity to receive the Corresponding +Source of your version by providing access to the Corresponding Source +from a network server at no charge, through some standard or customary +means of facilitating copying of software. This Corresponding Source +shall include the Corresponding Source for any work covered by version 3 +of the GNU General Public License that is incorporated pursuant to the +following paragraph. + + Notwithstanding any other provision of this License, you have +permission to link or combine any covered work with a work licensed +under version 3 of the GNU General Public License into a single +combined work, and to convey the resulting work. The terms of this +License will continue to apply to the part which is the covered work, +but the work with which it is combined will remain governed by version +3 of the GNU General Public License. + + 14. Revised Versions of this License. + + The Free Software Foundation may publish revised and/or new versions of +the GNU Affero General Public License from time to time. Such new versions +will be similar in spirit to the present version, but may differ in detail to +address new problems or concerns. + + Each version is given a distinguishing version number. If the +Program specifies that a certain numbered version of the GNU Affero General +Public License "or any later version" applies to it, you have the +option of following the terms and conditions either of that numbered +version or of any later version published by the Free Software +Foundation. If the Program does not specify a version number of the +GNU Affero General Public License, you may choose any version ever published +by the Free Software Foundation. + + If the Program specifies that a proxy can decide which future +versions of the GNU Affero General Public License can be used, that proxy's +public statement of acceptance of a version permanently authorizes you +to choose that version for the Program. + + Later license versions may give you additional or different +permissions. However, no additional obligations are imposed on any +author or copyright holder as a result of your choosing to follow a +later version. + + 15. Disclaimer of Warranty. + + THERE IS NO WARRANTY FOR THE PROGRAM, TO THE EXTENT PERMITTED BY +APPLICABLE LAW. EXCEPT WHEN OTHERWISE STATED IN WRITING THE COPYRIGHT +HOLDERS AND/OR OTHER PARTIES PROVIDE THE PROGRAM "AS IS" WITHOUT WARRANTY +OF ANY KIND, EITHER EXPRESSED OR IMPLIED, INCLUDING, BUT NOT LIMITED TO, +THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR +PURPOSE. THE ENTIRE RISK AS TO THE QUALITY AND PERFORMANCE OF THE PROGRAM +IS WITH YOU. SHOULD THE PROGRAM PROVE DEFECTIVE, YOU ASSUME THE COST OF +ALL NECESSARY SERVICING, REPAIR OR CORRECTION. + + 16. Limitation of Liability. + + IN NO EVENT UNLESS REQUIRED BY APPLICABLE LAW OR AGREED TO IN WRITING +WILL ANY COPYRIGHT HOLDER, OR ANY OTHER PARTY WHO MODIFIES AND/OR CONVEYS +THE PROGRAM AS PERMITTED ABOVE, BE LIABLE TO YOU FOR DAMAGES, INCLUDING ANY +GENERAL, SPECIAL, INCIDENTAL OR CONSEQUENTIAL DAMAGES ARISING OUT OF THE +USE OR INABILITY TO USE THE PROGRAM (INCLUDING BUT NOT LIMITED TO LOSS OF +DATA OR DATA BEING RENDERED INACCURATE OR LOSSES SUSTAINED BY YOU OR THIRD +PARTIES OR A FAILURE OF THE PROGRAM TO OPERATE WITH ANY OTHER PROGRAMS), +EVEN IF SUCH HOLDER OR OTHER PARTY HAS BEEN ADVISED OF THE POSSIBILITY OF +SUCH DAMAGES. + + 17. Interpretation of Sections 15 and 16. + + If the disclaimer of warranty and limitation of liability provided +above cannot be given local legal effect according to their terms, +reviewing courts shall apply local law that most closely approximates +an absolute waiver of all civil liability in connection with the +Program, unless a warranty or assumption of liability accompanies a +copy of the Program in return for a fee. + + END OF TERMS AND CONDITIONS + + How to Apply These Terms to Your New Programs + + If you develop a new program, and you want it to be of the greatest +possible use to the public, the best way to achieve this is to make it +free software which everyone can redistribute and change under these terms. + + To do so, attach the following notices to the program. It is safest +to attach them to the start of each source file to most effectively +state the exclusion of warranty; and each file should have at least +the "copyright" line and a pointer to where the full notice is found. + + + Copyright (C) + + This program is free software: you can redistribute it and/or modify + it under the terms of the GNU Affero General Public License as published + by the Free Software Foundation, either version 3 of the License, or + (at your option) any later version. + + This program is distributed in the hope that it will be useful, + but WITHOUT ANY WARRANTY; without even the implied warranty of + MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + GNU Affero General Public License for more details. + + You should have received a copy of the GNU Affero General Public License + along with this program. If not, see . + +Also add information on how to contact you by electronic and paper mail. + + If your software can interact with users remotely through a computer +network, you should also make sure that it provides a way for users to +get its source. For example, if your program is a web application, its +interface could display a "Source" link that leads users to an archive +of the code. There are many ways you could offer source, and different +solutions will be better for different programs; see section 13 for the +specific requirements. + + You should also get your employer (if you work as a programmer) or school, +if any, to sign a "copyright disclaimer" for the program, if necessary. +For more information on this, and how to apply and follow the GNU AGPL, see +. diff --git a/packages/server-lib/README.md b/packages/server-lib/README.md new file mode 100644 index 000000000..820225a00 --- /dev/null +++ b/packages/server-lib/README.md @@ -0,0 +1,11 @@ +# Torrust Server Lib + +Common functionality used in all Torrust HTTP servers. + +## Documentation + +[Crate documentation](https://docs.rs/torrust-axum-server). + +## License + +The project is licensed under the terms of the [GNU AFFERO GENERAL PUBLIC LICENSE](./LICENSE). diff --git a/packages/server-lib/src/lib.rs b/packages/server-lib/src/lib.rs new file mode 100644 index 000000000..324041822 --- /dev/null +++ b/packages/server-lib/src/lib.rs @@ -0,0 +1,3 @@ +pub mod logging; +pub mod registar; +pub mod signals; diff --git a/src/servers/logging.rs b/packages/server-lib/src/logging.rs similarity index 100% rename from src/servers/logging.rs rename to packages/server-lib/src/logging.rs diff --git a/src/servers/registar.rs b/packages/server-lib/src/registar.rs similarity index 100% rename from src/servers/registar.rs rename to packages/server-lib/src/registar.rs diff --git a/src/servers/signals.rs b/packages/server-lib/src/signals.rs similarity index 77% rename from src/servers/signals.rs rename to packages/server-lib/src/signals.rs index b83dd5213..b5cff03c1 100644 --- a/src/servers/signals.rs +++ b/packages/server-lib/src/signals.rs @@ -1,8 +1,5 @@ //! This module contains functions to handle signals. -use std::time::Duration; - use derive_more::Display; -use tokio::time::sleep; use tracing::instrument; /// This is the message that the "launcher" spawned task receives from the main @@ -68,19 +65,3 @@ pub async fn shutdown_signal_with_message(rx_halt: tokio::sync::oneshot::Receive tracing::info!("{message}"); } - -#[instrument(skip(handle, rx_halt, message))] -pub async fn graceful_shutdown(handle: axum_server::Handle, rx_halt: tokio::sync::oneshot::Receiver, message: String) { - shutdown_signal_with_message(rx_halt, message).await; - - tracing::debug!("Sending graceful shutdown signal"); - handle.graceful_shutdown(Some(Duration::from_secs(90))); - - println!("!! shuting down in 90 seconds !!"); - - loop { - sleep(Duration::from_secs(1)).await; - - tracing::info!("remaining alive connections: {}", handle.connection_count()); - } -} diff --git a/src/app.rs b/src/app.rs index ad7524372..c13414b3b 100644 --- a/src/app.rs +++ b/src/app.rs @@ -24,13 +24,13 @@ use std::sync::Arc; use tokio::task::JoinHandle; +use torrust_server_lib::registar::Registar; use torrust_tracker_configuration::Configuration; use tracing::instrument; use crate::bootstrap::jobs::{health_check_api, http_tracker, torrent_cleanup, tracker_apis, udp_tracker}; use crate::container::{AppContainer, HttpApiContainer, HttpTrackerContainer, UdpTrackerContainer}; use crate::servers; -use crate::servers::registar::Registar; /// # Panics /// diff --git a/src/bootstrap/jobs/health_check_api.rs b/src/bootstrap/jobs/health_check_api.rs index b6250efcc..95c3bfc24 100644 --- a/src/bootstrap/jobs/health_check_api.rs +++ b/src/bootstrap/jobs/health_check_api.rs @@ -16,14 +16,14 @@ use tokio::sync::oneshot; use tokio::task::JoinHandle; +use torrust_server_lib::logging::STARTED_ON; +use torrust_server_lib::registar::ServiceRegistry; +use torrust_server_lib::signals::Halted; use torrust_tracker_configuration::HealthCheckApi; use tracing::instrument; use super::Started; use crate::servers::health_check_api::{server, HEALTH_CHECK_API_LOG_TARGET}; -use crate::servers::logging::STARTED_ON; -use crate::servers::registar::ServiceRegistry; -use crate::servers::signals::Halted; /// This function starts a new Health Check API server with the provided /// configuration. diff --git a/src/bootstrap/jobs/http_tracker.rs b/src/bootstrap/jobs/http_tracker.rs index 83cc0ae02..38aeb4028 100644 --- a/src/bootstrap/jobs/http_tracker.rs +++ b/src/bootstrap/jobs/http_tracker.rs @@ -15,13 +15,13 @@ use std::sync::Arc; use axum_server::tls_rustls::RustlsConfig; use tokio::task::JoinHandle; +use torrust_server_lib::registar::ServiceRegistrationForm; use tracing::instrument; use super::make_rust_tls; use crate::container::HttpTrackerContainer; use crate::servers::http::server::{HttpServer, Launcher}; use crate::servers::http::Version; -use crate::servers::registar::ServiceRegistrationForm; /// It starts a new HTTP server with the provided configuration and version. /// @@ -78,13 +78,13 @@ async fn start_v1( mod tests { use std::sync::Arc; + use torrust_server_lib::registar::Registar; use torrust_tracker_test_helpers::configuration::ephemeral_public; use crate::bootstrap::app::{initialize_app_container, initialize_global_services}; use crate::bootstrap::jobs::http_tracker::start_job; use crate::container::HttpTrackerContainer; use crate::servers::http::Version; - use crate::servers::registar::Registar; #[tokio::test] async fn it_should_start_http_tracker() { diff --git a/src/bootstrap/jobs/tracker_apis.rs b/src/bootstrap/jobs/tracker_apis.rs index cee6cbae2..1f43ee67c 100644 --- a/src/bootstrap/jobs/tracker_apis.rs +++ b/src/bootstrap/jobs/tracker_apis.rs @@ -25,6 +25,7 @@ use std::sync::Arc; use axum_server::tls_rustls::RustlsConfig; use tokio::task::JoinHandle; +use torrust_server_lib::registar::ServiceRegistrationForm; use torrust_tracker_configuration::AccessTokens; use tracing::instrument; @@ -32,7 +33,6 @@ use super::make_rust_tls; use crate::container::HttpApiContainer; use crate::servers::apis::server::{ApiServer, Launcher}; use crate::servers::apis::Version; -use crate::servers::registar::ServiceRegistrationForm; /// This is the message that the "launcher" spawned task sends to the main /// application process to notify the API server was successfully started. @@ -97,13 +97,13 @@ async fn start_v1( mod tests { use std::sync::Arc; + use torrust_server_lib::registar::Registar; use torrust_tracker_test_helpers::configuration::ephemeral_public; use crate::bootstrap::app::{initialize_app_container, initialize_global_services}; use crate::bootstrap::jobs::tracker_apis::start_job; use crate::container::HttpApiContainer; use crate::servers::apis::Version; - use crate::servers::registar::Registar; #[tokio::test] async fn it_should_start_http_tracker() { diff --git a/src/bootstrap/jobs/udp_tracker.rs b/src/bootstrap/jobs/udp_tracker.rs index 03fe396d6..c97e239ce 100644 --- a/src/bootstrap/jobs/udp_tracker.rs +++ b/src/bootstrap/jobs/udp_tracker.rs @@ -10,10 +10,10 @@ use std::sync::Arc; use bittorrent_udp_tracker_core::UDP_TRACKER_LOG_TARGET; use tokio::task::JoinHandle; +use torrust_server_lib::registar::ServiceRegistrationForm; use tracing::instrument; use crate::container::UdpTrackerContainer; -use crate::servers::registar::ServiceRegistrationForm; use crate::servers::udp::server::spawner::Spawner; use crate::servers::udp::server::Server; diff --git a/src/console/ci/e2e/logs_parser.rs b/src/console/ci/e2e/logs_parser.rs index 8f7f6059d..dd2fbdb53 100644 --- a/src/console/ci/e2e/logs_parser.rs +++ b/src/console/ci/e2e/logs_parser.rs @@ -2,10 +2,10 @@ use bittorrent_udp_tracker_core::UDP_TRACKER_LOG_TARGET; use regex::Regex; use serde::{Deserialize, Serialize}; +use torrust_server_lib::logging::STARTED_ON; use crate::servers::health_check_api::HEALTH_CHECK_API_LOG_TARGET; use crate::servers::http::HTTP_TRACKER_LOG_TARGET; -use crate::servers::logging::STARTED_ON; const INFO_THRESHOLD: &str = "INFO"; diff --git a/src/servers/apis/routes.rs b/src/servers/apis/routes.rs index 137975259..f21c59207 100644 --- a/src/servers/apis/routes.rs +++ b/src/servers/apis/routes.rs @@ -15,6 +15,7 @@ use axum::response::Response; use axum::routing::get; use axum::{middleware, BoxError, Router}; use hyper::{Request, StatusCode}; +use torrust_server_lib::logging::Latency; use torrust_tracker_configuration::{AccessTokens, DEFAULT_TIMEOUT}; use tower::timeout::TimeoutLayer; use tower::ServiceBuilder; @@ -31,7 +32,6 @@ use super::v1::context::health_check::handlers::health_check_handler; use super::v1::middlewares::auth::State; use crate::container::HttpApiContainer; use crate::servers::apis::API_LOG_TARGET; -use crate::servers::logging::Latency; /// Add all API routes to the router. #[instrument(skip(http_api_container, access_tokens))] diff --git a/src/servers/apis/server.rs b/src/servers/apis/server.rs index 7a8087215..47473d964 100644 --- a/src/servers/apis/server.rs +++ b/src/servers/apis/server.rs @@ -34,6 +34,10 @@ use futures::future::BoxFuture; use thiserror::Error; use tokio::sync::oneshot::{Receiver, Sender}; use torrust_axum_server::custom_axum_server::{self, TimeoutAcceptor}; +use torrust_axum_server::signals::graceful_shutdown; +use torrust_server_lib::logging::STARTED_ON; +use torrust_server_lib::registar::{ServiceHealthCheckJob, ServiceRegistration, ServiceRegistrationForm}; +use torrust_server_lib::signals::Halted; use torrust_tracker_configuration::AccessTokens; use tracing::{instrument, Level}; @@ -41,9 +45,6 @@ use super::routes::router; use crate::bootstrap::jobs::Started; use crate::container::HttpApiContainer; use crate::servers::apis::API_LOG_TARGET; -use crate::servers::logging::STARTED_ON; -use crate::servers::registar::{ServiceHealthCheckJob, ServiceRegistration, ServiceRegistrationForm}; -use crate::servers::signals::{graceful_shutdown, Halted}; /// Errors that can occur when starting or stopping the API server. #[derive(Debug, Error)] @@ -294,13 +295,13 @@ impl Launcher { mod tests { use std::sync::Arc; + use torrust_server_lib::registar::Registar; use torrust_tracker_test_helpers::configuration::ephemeral_public; use crate::bootstrap::app::{initialize_app_container, initialize_global_services}; use crate::bootstrap::jobs::make_rust_tls; use crate::container::HttpApiContainer; use crate::servers::apis::server::{ApiServer, Launcher}; - use crate::servers::registar::Registar; #[tokio::test] async fn it_should_be_able_to_start_and_stop() { diff --git a/src/servers/health_check_api/handlers.rs b/src/servers/health_check_api/handlers.rs index fe65e996b..0af2ab05d 100644 --- a/src/servers/health_check_api/handlers.rs +++ b/src/servers/health_check_api/handlers.rs @@ -2,11 +2,11 @@ use std::collections::VecDeque; use axum::extract::State; use axum::Json; +use torrust_server_lib::registar::{ServiceHealthCheckJob, ServiceRegistration, ServiceRegistry}; use tracing::{instrument, Level}; use super::resources::{CheckReport, Report}; use super::responses; -use crate::servers::registar::{ServiceHealthCheckJob, ServiceRegistration, ServiceRegistry}; /// Endpoint for container health check. /// diff --git a/src/servers/health_check_api/server.rs b/src/servers/health_check_api/server.rs index 42111f507..06dd5af65 100644 --- a/src/servers/health_check_api/server.rs +++ b/src/servers/health_check_api/server.rs @@ -14,6 +14,10 @@ use futures::Future; use hyper::Request; use serde_json::json; use tokio::sync::oneshot::{Receiver, Sender}; +use torrust_axum_server::signals::graceful_shutdown; +use torrust_server_lib::logging::Latency; +use torrust_server_lib::registar::ServiceRegistry; +use torrust_server_lib::signals::Halted; use tower_http::classify::ServerErrorsFailureClass; use tower_http::compression::CompressionLayer; use tower_http::propagate_header::PropagateHeaderLayer; @@ -25,9 +29,6 @@ use tracing::{instrument, Level, Span}; use crate::bootstrap::jobs::Started; use crate::servers::health_check_api::handlers::health_check_handler; use crate::servers::health_check_api::HEALTH_CHECK_API_LOG_TARGET; -use crate::servers::logging::Latency; -use crate::servers::registar::ServiceRegistry; -use crate::servers::signals::{graceful_shutdown, Halted}; /// Starts Health Check API server. /// diff --git a/src/servers/http/server.rs b/src/servers/http/server.rs index 3de40e0b0..25a8e5635 100644 --- a/src/servers/http/server.rs +++ b/src/servers/http/server.rs @@ -8,15 +8,16 @@ use derive_more::Constructor; use futures::future::BoxFuture; use tokio::sync::oneshot::{Receiver, Sender}; use torrust_axum_server::custom_axum_server::{self, TimeoutAcceptor}; +use torrust_axum_server::signals::graceful_shutdown; +use torrust_server_lib::logging::STARTED_ON; +use torrust_server_lib::registar::{ServiceHealthCheckJob, ServiceRegistration, ServiceRegistrationForm}; +use torrust_server_lib::signals::Halted; use tracing::instrument; use super::v1::routes::router; use crate::bootstrap::jobs::Started; use crate::container::HttpTrackerContainer; use crate::servers::http::HTTP_TRACKER_LOG_TARGET; -use crate::servers::logging::STARTED_ON; -use crate::servers::registar::{ServiceHealthCheckJob, ServiceRegistration, ServiceRegistrationForm}; -use crate::servers::signals::{graceful_shutdown, Halted}; /// Error that can occur when starting or stopping the HTTP server. /// @@ -238,13 +239,13 @@ pub fn check_fn(binding: &SocketAddr) -> ServiceHealthCheckJob { mod tests { use std::sync::Arc; + use torrust_server_lib::registar::Registar; use torrust_tracker_test_helpers::configuration::ephemeral_public; use crate::bootstrap::app::{initialize_app_container, initialize_global_services}; use crate::bootstrap::jobs::make_rust_tls; use crate::container::HttpTrackerContainer; use crate::servers::http::server::{HttpServer, Launcher}; - use crate::servers::registar::Registar; #[tokio::test] async fn it_should_be_able_to_start_and_stop() { diff --git a/src/servers/http/v1/routes.rs b/src/servers/http/v1/routes.rs index 73f4e5f29..5f2d95a8e 100644 --- a/src/servers/http/v1/routes.rs +++ b/src/servers/http/v1/routes.rs @@ -10,6 +10,7 @@ use axum::routing::get; use axum::{BoxError, Router}; use axum_client_ip::SecureClientIpSource; use hyper::{Request, StatusCode}; +use torrust_server_lib::logging::Latency; use torrust_tracker_configuration::DEFAULT_TIMEOUT; use tower::timeout::TimeoutLayer; use tower::ServiceBuilder; @@ -24,7 +25,6 @@ use tracing::{instrument, Level, Span}; use super::handlers::{announce, health_check, scrape}; use crate::container::HttpTrackerContainer; use crate::servers::http::HTTP_TRACKER_LOG_TARGET; -use crate::servers::logging::Latency; /// It adds the routes to the router. /// diff --git a/src/servers/mod.rs b/src/servers/mod.rs index f9ed2d10c..eb5e5fee7 100644 --- a/src/servers/mod.rs +++ b/src/servers/mod.rs @@ -2,7 +2,4 @@ pub mod apis; pub mod health_check_api; pub mod http; -pub mod logging; -pub mod registar; -pub mod signals; pub mod udp; diff --git a/src/servers/udp/server/launcher.rs b/src/servers/udp/server/launcher.rs index fb0033624..f85972721 100644 --- a/src/servers/udp/server/launcher.rs +++ b/src/servers/udp/server/launcher.rs @@ -9,14 +9,14 @@ use futures_util::StreamExt; use tokio::select; use tokio::sync::oneshot; use tokio::time::interval; +use torrust_server_lib::logging::STARTED_ON; +use torrust_server_lib::registar::ServiceHealthCheckJob; +use torrust_server_lib::signals::{shutdown_signal_with_message, Halted}; use tracing::instrument; use super::request_buffer::ActiveRequests; use crate::bootstrap::jobs::Started; use crate::container::UdpTrackerContainer; -use crate::servers::logging::STARTED_ON; -use crate::servers::registar::ServiceHealthCheckJob; -use crate::servers::signals::{shutdown_signal_with_message, Halted}; use crate::servers::udp::server::bound_socket::BoundSocket; use crate::servers::udp::server::processor::Processor; use crate::servers::udp::server::receiver::Receiver; diff --git a/src/servers/udp/server/mod.rs b/src/servers/udp/server/mod.rs index 2be568c89..85940e853 100644 --- a/src/servers/udp/server/mod.rs +++ b/src/servers/udp/server/mod.rs @@ -57,13 +57,13 @@ mod tests { use std::sync::Arc; use std::time::Duration; + use torrust_server_lib::registar::Registar; use torrust_tracker_test_helpers::configuration::ephemeral_public; use super::spawner::Spawner; use super::Server; use crate::bootstrap::app::{initialize_app_container, initialize_global_services}; use crate::container::UdpTrackerContainer; - use crate::servers::registar::Registar; #[tokio::test] async fn it_should_be_able_to_start_and_stop() { diff --git a/src/servers/udp/server/spawner.rs b/src/servers/udp/server/spawner.rs index 88ce5a245..34437cdfb 100644 --- a/src/servers/udp/server/spawner.rs +++ b/src/servers/udp/server/spawner.rs @@ -7,11 +7,11 @@ use derive_more::derive::Display; use derive_more::Constructor; use tokio::sync::oneshot; use tokio::task::JoinHandle; +use torrust_server_lib::signals::Halted; use super::launcher::Launcher; use crate::bootstrap::jobs::Started; use crate::container::UdpTrackerContainer; -use crate::servers::signals::Halted; #[derive(Constructor, Copy, Clone, Debug, Display)] #[display("(with socket): {bind_to}")] diff --git a/src/servers/udp/server/states.rs b/src/servers/udp/server/states.rs index c74c7f4db..123d7f8a5 100644 --- a/src/servers/udp/server/states.rs +++ b/src/servers/udp/server/states.rs @@ -7,14 +7,14 @@ use bittorrent_udp_tracker_core::UDP_TRACKER_LOG_TARGET; use derive_more::derive::Display; use derive_more::Constructor; use tokio::task::JoinHandle; +use torrust_server_lib::registar::{ServiceRegistration, ServiceRegistrationForm}; +use torrust_server_lib::signals::Halted; use tracing::{instrument, Level}; use super::spawner::Spawner; use super::{Server, UdpError}; use crate::bootstrap::jobs::Started; use crate::container::UdpTrackerContainer; -use crate::servers::registar::{ServiceRegistration, ServiceRegistrationForm}; -use crate::servers::signals::Halted; use crate::servers::udp::server::launcher::Launcher; /// A UDP server instance controller with no UDP instance running. diff --git a/tests/servers/api/environment.rs b/tests/servers/api/environment.rs index 02d6465e1..cc7574895 100644 --- a/tests/servers/api/environment.rs +++ b/tests/servers/api/environment.rs @@ -6,13 +6,13 @@ use bittorrent_tracker_core::authentication::service::AuthenticationService; use bittorrent_tracker_core::databases::Database; use bittorrent_tracker_core::whitelist::repository::in_memory::InMemoryWhitelist; use futures::executor::block_on; +use torrust_server_lib::registar::Registar; use torrust_tracker_api_client::connection_info::{ConnectionInfo, Origin}; use torrust_tracker_configuration::Configuration; use torrust_tracker_lib::bootstrap::app::{initialize_app_container, initialize_global_services}; use torrust_tracker_lib::bootstrap::jobs::make_rust_tls; use torrust_tracker_lib::container::HttpApiContainer; use torrust_tracker_lib::servers::apis::server::{ApiServer, Launcher, Running, Stopped}; -use torrust_tracker_lib::servers::registar::Registar; use torrust_tracker_primitives::peer; pub struct Environment diff --git a/tests/servers/health_check_api/contract.rs b/tests/servers/health_check_api/contract.rs index 2c7efd547..bf38e05a7 100644 --- a/tests/servers/health_check_api/contract.rs +++ b/tests/servers/health_check_api/contract.rs @@ -1,5 +1,5 @@ +use torrust_server_lib::registar::Registar; use torrust_tracker_lib::servers::health_check_api::resources::{Report, Status}; -use torrust_tracker_lib::servers::registar::Registar; use torrust_tracker_test_helpers::configuration; use crate::common::logging; diff --git a/tests/servers/health_check_api/environment.rs b/tests/servers/health_check_api/environment.rs index 17d87d666..e364a52cb 100644 --- a/tests/servers/health_check_api/environment.rs +++ b/tests/servers/health_check_api/environment.rs @@ -3,11 +3,11 @@ use std::sync::Arc; use tokio::sync::oneshot::{self, Sender}; use tokio::task::JoinHandle; +use torrust_server_lib::registar::Registar; +use torrust_server_lib::signals::{self, Halted}; use torrust_tracker_configuration::HealthCheckApi; use torrust_tracker_lib::bootstrap::jobs::Started; use torrust_tracker_lib::servers::health_check_api::{server, HEALTH_CHECK_API_LOG_TARGET}; -use torrust_tracker_lib::servers::registar::Registar; -use torrust_tracker_lib::servers::signals::{self, Halted}; #[derive(Debug)] pub enum Error { diff --git a/tests/servers/http/environment.rs b/tests/servers/http/environment.rs index 6621bc6ee..2584c51c7 100644 --- a/tests/servers/http/environment.rs +++ b/tests/servers/http/environment.rs @@ -6,12 +6,12 @@ use bittorrent_tracker_core::databases::Database; use bittorrent_tracker_core::torrent::repository::in_memory::InMemoryTorrentRepository; use bittorrent_tracker_core::whitelist::manager::WhitelistManager; use futures::executor::block_on; +use torrust_server_lib::registar::Registar; use torrust_tracker_configuration::Configuration; use torrust_tracker_lib::bootstrap::app::{initialize_app_container, initialize_global_services}; use torrust_tracker_lib::bootstrap::jobs::make_rust_tls; use torrust_tracker_lib::container::HttpTrackerContainer; use torrust_tracker_lib::servers::http::server::{HttpServer, Launcher, Running, Stopped}; -use torrust_tracker_lib::servers::registar::Registar; use torrust_tracker_primitives::peer; pub struct Environment { diff --git a/tests/servers/udp/environment.rs b/tests/servers/udp/environment.rs index 7a6992583..67e119bb4 100644 --- a/tests/servers/udp/environment.rs +++ b/tests/servers/udp/environment.rs @@ -5,10 +5,10 @@ use bittorrent_primitives::info_hash::InfoHash; use bittorrent_tracker_core::databases::Database; use bittorrent_tracker_core::torrent::repository::in_memory::InMemoryTorrentRepository; use bittorrent_udp_tracker_core::statistics; +use torrust_server_lib::registar::Registar; use torrust_tracker_configuration::{Configuration, DEFAULT_TIMEOUT}; use torrust_tracker_lib::bootstrap::app::{initialize_app_container, initialize_global_services}; use torrust_tracker_lib::container::UdpTrackerContainer; -use torrust_tracker_lib::servers::registar::Registar; use torrust_tracker_lib::servers::udp::server::spawner::Spawner; use torrust_tracker_lib::servers::udp::server::states::{Running, Stopped}; use torrust_tracker_lib::servers::udp::server::Server; From 3b5cf862a5172aac9da53d421a6e9055faa101e3 Mon Sep 17 00:00:00 2001 From: Jose Celano Date: Wed, 19 Feb 2025 11:09:11 +0000 Subject: [PATCH 288/802] refactor: [#1283] extract axum-http-tracker-server package --- .github/workflows/deployment.yaml | 3 + Cargo.lock | 37 +- Cargo.toml | 4 +- packages/axum-http-tracker-server/Cargo.toml | 41 ++ packages/axum-http-tracker-server/LICENSE | 661 ++++++++++++++++++ packages/axum-http-tracker-server/README.md | 11 + .../axum-http-tracker-server/src/container.rs | 17 + .../axum-http-tracker-server/src/lib.rs | 1 + .../axum-http-tracker-server/src}/server.rs | 86 ++- .../src}/test_helpers.rs | 0 .../src}/v1/extractors/announce_request.rs | 2 +- .../src}/v1/extractors/authentication_key.rs | 2 +- .../src}/v1/extractors/client_ip_sources.rs | 0 .../src}/v1/extractors/mod.rs | 0 .../src}/v1/extractors/scrape_request.rs | 2 +- .../src}/v1/handlers/announce.rs | 24 +- .../src}/v1/handlers/common/auth.rs | 0 .../src}/v1/handlers/common/mod.rs | 0 .../src}/v1/handlers/common/peer_ip.rs | 0 .../src}/v1/handlers/health_check.rs | 0 .../src}/v1/handlers/mod.rs | 0 .../src}/v1/handlers/scrape.rs | 18 +- .../axum-http-tracker-server/src}/v1/mod.rs | 0 .../src}/v1/routes.rs | 2 +- packages/axum-server/Cargo.toml | 4 + packages/axum-server/src/lib.rs | 1 + packages/axum-server/src/tsl.rs | 85 +++ packages/server-lib/src/signals.rs | 8 + src/app.rs | 12 +- src/bootstrap/jobs/health_check_api.rs | 3 +- src/bootstrap/jobs/http_tracker.rs | 14 +- src/bootstrap/jobs/mod.rs | 94 --- src/bootstrap/jobs/tracker_apis.rs | 2 +- src/console/ci/e2e/logs_parser.rs | 2 +- src/container.rs | 41 +- src/lib.rs | 10 +- src/servers/apis/server.rs | 5 +- src/servers/health_check_api/server.rs | 3 +- src/servers/mod.rs | 1 - src/servers/udp/server/launcher.rs | 3 +- src/servers/udp/server/spawner.rs | 3 +- src/servers/udp/server/states.rs | 3 +- tests/servers/api/environment.rs | 2 +- tests/servers/health_check_api/environment.rs | 3 +- tests/servers/http/environment.rs | 6 +- tests/servers/http/mod.rs | 6 +- tests/servers/http/v1/contract.rs | 2 +- 47 files changed, 1017 insertions(+), 207 deletions(-) create mode 100644 packages/axum-http-tracker-server/Cargo.toml create mode 100644 packages/axum-http-tracker-server/LICENSE create mode 100644 packages/axum-http-tracker-server/README.md create mode 100644 packages/axum-http-tracker-server/src/container.rs rename src/servers/http/mod.rs => packages/axum-http-tracker-server/src/lib.rs (99%) rename {src/servers/http => packages/axum-http-tracker-server/src}/server.rs (73%) rename {src/servers/http => packages/axum-http-tracker-server/src}/test_helpers.rs (100%) rename {src/servers/http => packages/axum-http-tracker-server/src}/v1/extractors/announce_request.rs (98%) rename {src/servers/http => packages/axum-http-tracker-server/src}/v1/extractors/authentication_key.rs (98%) rename {src/servers/http => packages/axum-http-tracker-server/src}/v1/extractors/client_ip_sources.rs (100%) rename {src/servers/http => packages/axum-http-tracker-server/src}/v1/extractors/mod.rs (100%) rename {src/servers/http => packages/axum-http-tracker-server/src}/v1/extractors/scrape_request.rs (99%) rename {src/servers/http => packages/axum-http-tracker-server/src}/v1/handlers/announce.rs (94%) rename {src/servers/http => packages/axum-http-tracker-server/src}/v1/handlers/common/auth.rs (100%) rename {src/servers/http => packages/axum-http-tracker-server/src}/v1/handlers/common/mod.rs (100%) rename {src/servers/http => packages/axum-http-tracker-server/src}/v1/handlers/common/peer_ip.rs (100%) rename {src/servers/http => packages/axum-http-tracker-server/src}/v1/handlers/health_check.rs (100%) rename {src/servers/http => packages/axum-http-tracker-server/src}/v1/handlers/mod.rs (100%) rename {src/servers/http => packages/axum-http-tracker-server/src}/v1/handlers/scrape.rs (95%) rename {src/servers/http => packages/axum-http-tracker-server/src}/v1/mod.rs (100%) rename {src/servers/http => packages/axum-http-tracker-server/src}/v1/routes.rs (99%) create mode 100644 packages/axum-server/src/tsl.rs diff --git a/.github/workflows/deployment.yaml b/.github/workflows/deployment.yaml index 901f9c878..296752df4 100644 --- a/.github/workflows/deployment.yaml +++ b/.github/workflows/deployment.yaml @@ -61,6 +61,7 @@ jobs: cargo publish -p bittorrent-tracker-core cargo publish -p bittorrent-udp-tracker-core cargo publish -p bittorrent-udp-tracker-protocol + cargo publish -p torrust-axum-http-tracker-server cargo publish -p torrust-axum-server cargo publish -p torrust-torrust-server-lib cargo publish -p torrust-tracker @@ -74,3 +75,5 @@ jobs: cargo publish -p torrust-tracker-primitives cargo publish -p torrust-tracker-test-helpers cargo publish -p torrust-tracker-torrent-repository + + diff --git a/Cargo.lock b/Cargo.lock index a62a20619..23092e31e 100644 --- a/Cargo.lock +++ b/Cargo.lock @@ -4337,18 +4337,51 @@ dependencies = [ "winnow", ] +[[package]] +name = "torrust-axum-http-tracker-server" +version = "3.0.0-develop" +dependencies = [ + "aquatic_udp_protocol", + "axum", + "axum-client-ip", + "axum-server", + "bittorrent-http-tracker-core", + "bittorrent-http-tracker-protocol", + "bittorrent-primitives", + "bittorrent-tracker-core", + "derive_more", + "futures", + "hyper", + "reqwest", + "serde", + "thiserror 2.0.11", + "tokio", + "torrust-axum-server", + "torrust-server-lib", + "torrust-tracker-configuration", + "torrust-tracker-primitives", + "torrust-tracker-test-helpers", + "tower 0.4.13", + "tower-http", + "tracing", +] + [[package]] name = "torrust-axum-server" version = "3.0.0-develop" dependencies = [ "axum-server", + "camino", "futures-util", "http-body", "hyper", "hyper-util", "pin-project-lite", + "thiserror 2.0.11", "tokio", "torrust-server-lib", + "torrust-tracker-configuration", + "torrust-tracker-located-error", "tower 0.4.13", "tracing", ] @@ -4370,16 +4403,13 @@ dependencies = [ "anyhow", "aquatic_udp_protocol", "axum", - "axum-client-ip", "axum-extra", "axum-server", "bittorrent-http-tracker-core", - "bittorrent-http-tracker-protocol", "bittorrent-primitives", "bittorrent-tracker-client", "bittorrent-tracker-core", "bittorrent-udp-tracker-core", - "camino", "chrono", "clap", "crossbeam-skiplist", @@ -4409,6 +4439,7 @@ dependencies = [ "serde_with", "thiserror 2.0.11", "tokio", + "torrust-axum-http-tracker-server", "torrust-axum-server", "torrust-server-lib", "torrust-tracker-api-client", diff --git a/Cargo.toml b/Cargo.toml index 22df92b2d..4f8854d34 100644 --- a/Cargo.toml +++ b/Cargo.toml @@ -36,16 +36,13 @@ version = "3.0.0-develop" anyhow = "1" aquatic_udp_protocol = "0" axum = { version = "0", features = ["macros"] } -axum-client-ip = "0" axum-extra = { version = "0", features = ["query"] } axum-server = { version = "0", features = ["tls-rustls-no-provider"] } bittorrent-http-tracker-core = { version = "3.0.0-develop", path = "packages/http-tracker-core" } -bittorrent-http-tracker-protocol = { version = "3.0.0-develop", path = "packages/http-protocol" } bittorrent-primitives = "0.1.0" bittorrent-tracker-client = { version = "3.0.0-develop", path = "packages/tracker-client" } bittorrent-tracker-core = { version = "3.0.0-develop", path = "packages/tracker-core" } bittorrent-udp-tracker-core = { version = "3.0.0-develop", path = "packages/udp-tracker-core" } -camino = { version = "1", features = ["serde", "serde1"] } chrono = { version = "0", default-features = false, features = ["clock"] } clap = { version = "4", features = ["derive", "env"] } crossbeam-skiplist = "0" @@ -73,6 +70,7 @@ serde_repr = "0" serde_with = { version = "3", features = ["json"] } thiserror = "2" tokio = { version = "1", features = ["macros", "net", "rt-multi-thread", "signal", "sync"] } +torrust-axum-http-tracker-server = { version = "3.0.0-develop", path = "packages/axum-http-tracker-server" } torrust-axum-server = { version = "3.0.0-develop", path = "packages/axum-server" } torrust-server-lib = { version = "3.0.0-develop", path = "packages/server-lib" } torrust-tracker-api-core = { version = "3.0.0-develop", path = "packages/tracker-api-core" } diff --git a/packages/axum-http-tracker-server/Cargo.toml b/packages/axum-http-tracker-server/Cargo.toml new file mode 100644 index 000000000..b47ea23ce --- /dev/null +++ b/packages/axum-http-tracker-server/Cargo.toml @@ -0,0 +1,41 @@ +[package] +authors.workspace = true +description = "The Torrust Bittorrent HTTP tracker." +documentation.workspace = true +edition.workspace = true +homepage.workspace = true +keywords = ["axum", "bittorrent", "http", "server", "torrust", "tracker"] +license.workspace = true +name = "torrust-axum-http-tracker-server" +publish.workspace = true +readme = "README.md" +repository.workspace = true +rust-version.workspace = true +version.workspace = true + +[dependencies] +aquatic_udp_protocol = "0" +axum = { version = "0", features = ["macros"] } +axum-client-ip = "0" +axum-server = { version = "0", features = ["tls-rustls-no-provider"] } +bittorrent-http-tracker-core = { version = "3.0.0-develop", path = "../http-tracker-core" } +bittorrent-http-tracker-protocol = { version = "3.0.0-develop", path = "../http-protocol" } +bittorrent-primitives = "0.1.0" +bittorrent-tracker-core = { version = "3.0.0-develop", path = "../tracker-core" } +derive_more = { version = "1", features = ["as_ref", "constructor", "from"] } +futures = "0" +hyper = "1" +reqwest = { version = "0", features = ["json"] } +serde = { version = "1", features = ["derive"] } +thiserror = "2" +tokio = { version = "1", features = ["macros", "net", "rt-multi-thread", "signal", "sync"] } +torrust-axum-server = { version = "3.0.0-develop", path = "../axum-server" } +torrust-server-lib = { version = "3.0.0-develop", path = "../server-lib" } +torrust-tracker-configuration = { version = "3.0.0-develop", path = "../configuration" } +torrust-tracker-primitives = { version = "3.0.0-develop", path = "../primitives" } +tower = { version = "0", features = ["timeout"] } +tower-http = { version = "0", features = ["compression-full", "cors", "propagate-header", "request-id", "trace"] } +tracing = "0" + +[dev-dependencies] +torrust-tracker-test-helpers = { version = "3.0.0-develop", path = "../test-helpers" } diff --git a/packages/axum-http-tracker-server/LICENSE b/packages/axum-http-tracker-server/LICENSE new file mode 100644 index 000000000..0ad25db4b --- /dev/null +++ b/packages/axum-http-tracker-server/LICENSE @@ -0,0 +1,661 @@ + GNU AFFERO GENERAL PUBLIC LICENSE + Version 3, 19 November 2007 + + Copyright (C) 2007 Free Software Foundation, Inc. + Everyone is permitted to copy and distribute verbatim copies + of this license document, but changing it is not allowed. + + Preamble + + The GNU Affero General Public License is a free, copyleft license for +software and other kinds of works, specifically designed to ensure +cooperation with the community in the case of network server software. + + The licenses for most software and other practical works are designed +to take away your freedom to share and change the works. By contrast, +our General Public Licenses are intended to guarantee your freedom to +share and change all versions of a program--to make sure it remains free +software for all its users. + + When we speak of free software, we are referring to freedom, not +price. Our General Public Licenses are designed to make sure that you +have the freedom to distribute copies of free software (and charge for +them if you wish), that you receive source code or can get it if you +want it, that you can change the software or use pieces of it in new +free programs, and that you know you can do these things. + + Developers that use our General Public Licenses protect your rights +with two steps: (1) assert copyright on the software, and (2) offer +you this License which gives you legal permission to copy, distribute +and/or modify the software. + + A secondary benefit of defending all users' freedom is that +improvements made in alternate versions of the program, if they +receive widespread use, become available for other developers to +incorporate. Many developers of free software are heartened and +encouraged by the resulting cooperation. However, in the case of +software used on network servers, this result may fail to come about. +The GNU General Public License permits making a modified version and +letting the public access it on a server without ever releasing its +source code to the public. + + The GNU Affero General Public License is designed specifically to +ensure that, in such cases, the modified source code becomes available +to the community. It requires the operator of a network server to +provide the source code of the modified version running there to the +users of that server. Therefore, public use of a modified version, on +a publicly accessible server, gives the public access to the source +code of the modified version. + + An older license, called the Affero General Public License and +published by Affero, was designed to accomplish similar goals. This is +a different license, not a version of the Affero GPL, but Affero has +released a new version of the Affero GPL which permits relicensing under +this license. + + The precise terms and conditions for copying, distribution and +modification follow. + + TERMS AND CONDITIONS + + 0. Definitions. + + "This License" refers to version 3 of the GNU Affero General Public License. + + "Copyright" also means copyright-like laws that apply to other kinds of +works, such as semiconductor masks. + + "The Program" refers to any copyrightable work licensed under this +License. Each licensee is addressed as "you". "Licensees" and +"recipients" may be individuals or organizations. + + To "modify" a work means to copy from or adapt all or part of the work +in a fashion requiring copyright permission, other than the making of an +exact copy. The resulting work is called a "modified version" of the +earlier work or a work "based on" the earlier work. + + A "covered work" means either the unmodified Program or a work based +on the Program. + + To "propagate" a work means to do anything with it that, without +permission, would make you directly or secondarily liable for +infringement under applicable copyright law, except executing it on a +computer or modifying a private copy. Propagation includes copying, +distribution (with or without modification), making available to the +public, and in some countries other activities as well. + + To "convey" a work means any kind of propagation that enables other +parties to make or receive copies. Mere interaction with a user through +a computer network, with no transfer of a copy, is not conveying. + + An interactive user interface displays "Appropriate Legal Notices" +to the extent that it includes a convenient and prominently visible +feature that (1) displays an appropriate copyright notice, and (2) +tells the user that there is no warranty for the work (except to the +extent that warranties are provided), that licensees may convey the +work under this License, and how to view a copy of this License. If +the interface presents a list of user commands or options, such as a +menu, a prominent item in the list meets this criterion. + + 1. Source Code. + + The "source code" for a work means the preferred form of the work +for making modifications to it. "Object code" means any non-source +form of a work. + + A "Standard Interface" means an interface that either is an official +standard defined by a recognized standards body, or, in the case of +interfaces specified for a particular programming language, one that +is widely used among developers working in that language. + + The "System Libraries" of an executable work include anything, other +than the work as a whole, that (a) is included in the normal form of +packaging a Major Component, but which is not part of that Major +Component, and (b) serves only to enable use of the work with that +Major Component, or to implement a Standard Interface for which an +implementation is available to the public in source code form. A +"Major Component", in this context, means a major essential component +(kernel, window system, and so on) of the specific operating system +(if any) on which the executable work runs, or a compiler used to +produce the work, or an object code interpreter used to run it. + + The "Corresponding Source" for a work in object code form means all +the source code needed to generate, install, and (for an executable +work) run the object code and to modify the work, including scripts to +control those activities. However, it does not include the work's +System Libraries, or general-purpose tools or generally available free +programs which are used unmodified in performing those activities but +which are not part of the work. For example, Corresponding Source +includes interface definition files associated with source files for +the work, and the source code for shared libraries and dynamically +linked subprograms that the work is specifically designed to require, +such as by intimate data communication or control flow between those +subprograms and other parts of the work. + + The Corresponding Source need not include anything that users +can regenerate automatically from other parts of the Corresponding +Source. + + The Corresponding Source for a work in source code form is that +same work. + + 2. Basic Permissions. + + All rights granted under this License are granted for the term of +copyright on the Program, and are irrevocable provided the stated +conditions are met. This License explicitly affirms your unlimited +permission to run the unmodified Program. The output from running a +covered work is covered by this License only if the output, given its +content, constitutes a covered work. This License acknowledges your +rights of fair use or other equivalent, as provided by copyright law. + + You may make, run and propagate covered works that you do not +convey, without conditions so long as your license otherwise remains +in force. You may convey covered works to others for the sole purpose +of having them make modifications exclusively for you, or provide you +with facilities for running those works, provided that you comply with +the terms of this License in conveying all material for which you do +not control copyright. Those thus making or running the covered works +for you must do so exclusively on your behalf, under your direction +and control, on terms that prohibit them from making any copies of +your copyrighted material outside their relationship with you. + + Conveying under any other circumstances is permitted solely under +the conditions stated below. Sublicensing is not allowed; section 10 +makes it unnecessary. + + 3. Protecting Users' Legal Rights From Anti-Circumvention Law. + + No covered work shall be deemed part of an effective technological +measure under any applicable law fulfilling obligations under article +11 of the WIPO copyright treaty adopted on 20 December 1996, or +similar laws prohibiting or restricting circumvention of such +measures. + + When you convey a covered work, you waive any legal power to forbid +circumvention of technological measures to the extent such circumvention +is effected by exercising rights under this License with respect to +the covered work, and you disclaim any intention to limit operation or +modification of the work as a means of enforcing, against the work's +users, your or third parties' legal rights to forbid circumvention of +technological measures. + + 4. Conveying Verbatim Copies. + + You may convey verbatim copies of the Program's source code as you +receive it, in any medium, provided that you conspicuously and +appropriately publish on each copy an appropriate copyright notice; +keep intact all notices stating that this License and any +non-permissive terms added in accord with section 7 apply to the code; +keep intact all notices of the absence of any warranty; and give all +recipients a copy of this License along with the Program. + + You may charge any price or no price for each copy that you convey, +and you may offer support or warranty protection for a fee. + + 5. Conveying Modified Source Versions. + + You may convey a work based on the Program, or the modifications to +produce it from the Program, in the form of source code under the +terms of section 4, provided that you also meet all of these conditions: + + a) The work must carry prominent notices stating that you modified + it, and giving a relevant date. + + b) The work must carry prominent notices stating that it is + released under this License and any conditions added under section + 7. This requirement modifies the requirement in section 4 to + "keep intact all notices". + + c) You must license the entire work, as a whole, under this + License to anyone who comes into possession of a copy. This + License will therefore apply, along with any applicable section 7 + additional terms, to the whole of the work, and all its parts, + regardless of how they are packaged. This License gives no + permission to license the work in any other way, but it does not + invalidate such permission if you have separately received it. + + d) If the work has interactive user interfaces, each must display + Appropriate Legal Notices; however, if the Program has interactive + interfaces that do not display Appropriate Legal Notices, your + work need not make them do so. + + A compilation of a covered work with other separate and independent +works, which are not by their nature extensions of the covered work, +and which are not combined with it such as to form a larger program, +in or on a volume of a storage or distribution medium, is called an +"aggregate" if the compilation and its resulting copyright are not +used to limit the access or legal rights of the compilation's users +beyond what the individual works permit. Inclusion of a covered work +in an aggregate does not cause this License to apply to the other +parts of the aggregate. + + 6. Conveying Non-Source Forms. + + You may convey a covered work in object code form under the terms +of sections 4 and 5, provided that you also convey the +machine-readable Corresponding Source under the terms of this License, +in one of these ways: + + a) Convey the object code in, or embodied in, a physical product + (including a physical distribution medium), accompanied by the + Corresponding Source fixed on a durable physical medium + customarily used for software interchange. + + b) Convey the object code in, or embodied in, a physical product + (including a physical distribution medium), accompanied by a + written offer, valid for at least three years and valid for as + long as you offer spare parts or customer support for that product + model, to give anyone who possesses the object code either (1) a + copy of the Corresponding Source for all the software in the + product that is covered by this License, on a durable physical + medium customarily used for software interchange, for a price no + more than your reasonable cost of physically performing this + conveying of source, or (2) access to copy the + Corresponding Source from a network server at no charge. + + c) Convey individual copies of the object code with a copy of the + written offer to provide the Corresponding Source. This + alternative is allowed only occasionally and noncommercially, and + only if you received the object code with such an offer, in accord + with subsection 6b. + + d) Convey the object code by offering access from a designated + place (gratis or for a charge), and offer equivalent access to the + Corresponding Source in the same way through the same place at no + further charge. You need not require recipients to copy the + Corresponding Source along with the object code. If the place to + copy the object code is a network server, the Corresponding Source + may be on a different server (operated by you or a third party) + that supports equivalent copying facilities, provided you maintain + clear directions next to the object code saying where to find the + Corresponding Source. Regardless of what server hosts the + Corresponding Source, you remain obligated to ensure that it is + available for as long as needed to satisfy these requirements. + + e) Convey the object code using peer-to-peer transmission, provided + you inform other peers where the object code and Corresponding + Source of the work are being offered to the general public at no + charge under subsection 6d. + + A separable portion of the object code, whose source code is excluded +from the Corresponding Source as a System Library, need not be +included in conveying the object code work. + + A "User Product" is either (1) a "consumer product", which means any +tangible personal property which is normally used for personal, family, +or household purposes, or (2) anything designed or sold for incorporation +into a dwelling. In determining whether a product is a consumer product, +doubtful cases shall be resolved in favor of coverage. For a particular +product received by a particular user, "normally used" refers to a +typical or common use of that class of product, regardless of the status +of the particular user or of the way in which the particular user +actually uses, or expects or is expected to use, the product. A product +is a consumer product regardless of whether the product has substantial +commercial, industrial or non-consumer uses, unless such uses represent +the only significant mode of use of the product. + + "Installation Information" for a User Product means any methods, +procedures, authorization keys, or other information required to install +and execute modified versions of a covered work in that User Product from +a modified version of its Corresponding Source. The information must +suffice to ensure that the continued functioning of the modified object +code is in no case prevented or interfered with solely because +modification has been made. + + If you convey an object code work under this section in, or with, or +specifically for use in, a User Product, and the conveying occurs as +part of a transaction in which the right of possession and use of the +User Product is transferred to the recipient in perpetuity or for a +fixed term (regardless of how the transaction is characterized), the +Corresponding Source conveyed under this section must be accompanied +by the Installation Information. But this requirement does not apply +if neither you nor any third party retains the ability to install +modified object code on the User Product (for example, the work has +been installed in ROM). + + The requirement to provide Installation Information does not include a +requirement to continue to provide support service, warranty, or updates +for a work that has been modified or installed by the recipient, or for +the User Product in which it has been modified or installed. Access to a +network may be denied when the modification itself materially and +adversely affects the operation of the network or violates the rules and +protocols for communication across the network. + + Corresponding Source conveyed, and Installation Information provided, +in accord with this section must be in a format that is publicly +documented (and with an implementation available to the public in +source code form), and must require no special password or key for +unpacking, reading or copying. + + 7. Additional Terms. + + "Additional permissions" are terms that supplement the terms of this +License by making exceptions from one or more of its conditions. +Additional permissions that are applicable to the entire Program shall +be treated as though they were included in this License, to the extent +that they are valid under applicable law. If additional permissions +apply only to part of the Program, that part may be used separately +under those permissions, but the entire Program remains governed by +this License without regard to the additional permissions. + + When you convey a copy of a covered work, you may at your option +remove any additional permissions from that copy, or from any part of +it. (Additional permissions may be written to require their own +removal in certain cases when you modify the work.) You may place +additional permissions on material, added by you to a covered work, +for which you have or can give appropriate copyright permission. + + Notwithstanding any other provision of this License, for material you +add to a covered work, you may (if authorized by the copyright holders of +that material) supplement the terms of this License with terms: + + a) Disclaiming warranty or limiting liability differently from the + terms of sections 15 and 16 of this License; or + + b) Requiring preservation of specified reasonable legal notices or + author attributions in that material or in the Appropriate Legal + Notices displayed by works containing it; or + + c) Prohibiting misrepresentation of the origin of that material, or + requiring that modified versions of such material be marked in + reasonable ways as different from the original version; or + + d) Limiting the use for publicity purposes of names of licensors or + authors of the material; or + + e) Declining to grant rights under trademark law for use of some + trade names, trademarks, or service marks; or + + f) Requiring indemnification of licensors and authors of that + material by anyone who conveys the material (or modified versions of + it) with contractual assumptions of liability to the recipient, for + any liability that these contractual assumptions directly impose on + those licensors and authors. + + All other non-permissive additional terms are considered "further +restrictions" within the meaning of section 10. If the Program as you +received it, or any part of it, contains a notice stating that it is +governed by this License along with a term that is a further +restriction, you may remove that term. If a license document contains +a further restriction but permits relicensing or conveying under this +License, you may add to a covered work material governed by the terms +of that license document, provided that the further restriction does +not survive such relicensing or conveying. + + If you add terms to a covered work in accord with this section, you +must place, in the relevant source files, a statement of the +additional terms that apply to those files, or a notice indicating +where to find the applicable terms. + + Additional terms, permissive or non-permissive, may be stated in the +form of a separately written license, or stated as exceptions; +the above requirements apply either way. + + 8. Termination. + + You may not propagate or modify a covered work except as expressly +provided under this License. Any attempt otherwise to propagate or +modify it is void, and will automatically terminate your rights under +this License (including any patent licenses granted under the third +paragraph of section 11). + + However, if you cease all violation of this License, then your +license from a particular copyright holder is reinstated (a) +provisionally, unless and until the copyright holder explicitly and +finally terminates your license, and (b) permanently, if the copyright +holder fails to notify you of the violation by some reasonable means +prior to 60 days after the cessation. + + Moreover, your license from a particular copyright holder is +reinstated permanently if the copyright holder notifies you of the +violation by some reasonable means, this is the first time you have +received notice of violation of this License (for any work) from that +copyright holder, and you cure the violation prior to 30 days after +your receipt of the notice. + + Termination of your rights under this section does not terminate the +licenses of parties who have received copies or rights from you under +this License. If your rights have been terminated and not permanently +reinstated, you do not qualify to receive new licenses for the same +material under section 10. + + 9. Acceptance Not Required for Having Copies. + + You are not required to accept this License in order to receive or +run a copy of the Program. Ancillary propagation of a covered work +occurring solely as a consequence of using peer-to-peer transmission +to receive a copy likewise does not require acceptance. However, +nothing other than this License grants you permission to propagate or +modify any covered work. These actions infringe copyright if you do +not accept this License. Therefore, by modifying or propagating a +covered work, you indicate your acceptance of this License to do so. + + 10. Automatic Licensing of Downstream Recipients. + + Each time you convey a covered work, the recipient automatically +receives a license from the original licensors, to run, modify and +propagate that work, subject to this License. You are not responsible +for enforcing compliance by third parties with this License. + + An "entity transaction" is a transaction transferring control of an +organization, or substantially all assets of one, or subdividing an +organization, or merging organizations. If propagation of a covered +work results from an entity transaction, each party to that +transaction who receives a copy of the work also receives whatever +licenses to the work the party's predecessor in interest had or could +give under the previous paragraph, plus a right to possession of the +Corresponding Source of the work from the predecessor in interest, if +the predecessor has it or can get it with reasonable efforts. + + You may not impose any further restrictions on the exercise of the +rights granted or affirmed under this License. For example, you may +not impose a license fee, royalty, or other charge for exercise of +rights granted under this License, and you may not initiate litigation +(including a cross-claim or counterclaim in a lawsuit) alleging that +any patent claim is infringed by making, using, selling, offering for +sale, or importing the Program or any portion of it. + + 11. Patents. + + A "contributor" is a copyright holder who authorizes use under this +License of the Program or a work on which the Program is based. The +work thus licensed is called the contributor's "contributor version". + + A contributor's "essential patent claims" are all patent claims +owned or controlled by the contributor, whether already acquired or +hereafter acquired, that would be infringed by some manner, permitted +by this License, of making, using, or selling its contributor version, +but do not include claims that would be infringed only as a +consequence of further modification of the contributor version. For +purposes of this definition, "control" includes the right to grant +patent sublicenses in a manner consistent with the requirements of +this License. + + Each contributor grants you a non-exclusive, worldwide, royalty-free +patent license under the contributor's essential patent claims, to +make, use, sell, offer for sale, import and otherwise run, modify and +propagate the contents of its contributor version. + + In the following three paragraphs, a "patent license" is any express +agreement or commitment, however denominated, not to enforce a patent +(such as an express permission to practice a patent or covenant not to +sue for patent infringement). To "grant" such a patent license to a +party means to make such an agreement or commitment not to enforce a +patent against the party. + + If you convey a covered work, knowingly relying on a patent license, +and the Corresponding Source of the work is not available for anyone +to copy, free of charge and under the terms of this License, through a +publicly available network server or other readily accessible means, +then you must either (1) cause the Corresponding Source to be so +available, or (2) arrange to deprive yourself of the benefit of the +patent license for this particular work, or (3) arrange, in a manner +consistent with the requirements of this License, to extend the patent +license to downstream recipients. "Knowingly relying" means you have +actual knowledge that, but for the patent license, your conveying the +covered work in a country, or your recipient's use of the covered work +in a country, would infringe one or more identifiable patents in that +country that you have reason to believe are valid. + + If, pursuant to or in connection with a single transaction or +arrangement, you convey, or propagate by procuring conveyance of, a +covered work, and grant a patent license to some of the parties +receiving the covered work authorizing them to use, propagate, modify +or convey a specific copy of the covered work, then the patent license +you grant is automatically extended to all recipients of the covered +work and works based on it. + + A patent license is "discriminatory" if it does not include within +the scope of its coverage, prohibits the exercise of, or is +conditioned on the non-exercise of one or more of the rights that are +specifically granted under this License. You may not convey a covered +work if you are a party to an arrangement with a third party that is +in the business of distributing software, under which you make payment +to the third party based on the extent of your activity of conveying +the work, and under which the third party grants, to any of the +parties who would receive the covered work from you, a discriminatory +patent license (a) in connection with copies of the covered work +conveyed by you (or copies made from those copies), or (b) primarily +for and in connection with specific products or compilations that +contain the covered work, unless you entered into that arrangement, +or that patent license was granted, prior to 28 March 2007. + + Nothing in this License shall be construed as excluding or limiting +any implied license or other defenses to infringement that may +otherwise be available to you under applicable patent law. + + 12. No Surrender of Others' Freedom. + + If conditions are imposed on you (whether by court order, agreement or +otherwise) that contradict the conditions of this License, they do not +excuse you from the conditions of this License. If you cannot convey a +covered work so as to satisfy simultaneously your obligations under this +License and any other pertinent obligations, then as a consequence you may +not convey it at all. For example, if you agree to terms that obligate you +to collect a royalty for further conveying from those to whom you convey +the Program, the only way you could satisfy both those terms and this +License would be to refrain entirely from conveying the Program. + + 13. Remote Network Interaction; Use with the GNU General Public License. + + Notwithstanding any other provision of this License, if you modify the +Program, your modified version must prominently offer all users +interacting with it remotely through a computer network (if your version +supports such interaction) an opportunity to receive the Corresponding +Source of your version by providing access to the Corresponding Source +from a network server at no charge, through some standard or customary +means of facilitating copying of software. This Corresponding Source +shall include the Corresponding Source for any work covered by version 3 +of the GNU General Public License that is incorporated pursuant to the +following paragraph. + + Notwithstanding any other provision of this License, you have +permission to link or combine any covered work with a work licensed +under version 3 of the GNU General Public License into a single +combined work, and to convey the resulting work. The terms of this +License will continue to apply to the part which is the covered work, +but the work with which it is combined will remain governed by version +3 of the GNU General Public License. + + 14. Revised Versions of this License. + + The Free Software Foundation may publish revised and/or new versions of +the GNU Affero General Public License from time to time. Such new versions +will be similar in spirit to the present version, but may differ in detail to +address new problems or concerns. + + Each version is given a distinguishing version number. If the +Program specifies that a certain numbered version of the GNU Affero General +Public License "or any later version" applies to it, you have the +option of following the terms and conditions either of that numbered +version or of any later version published by the Free Software +Foundation. If the Program does not specify a version number of the +GNU Affero General Public License, you may choose any version ever published +by the Free Software Foundation. + + If the Program specifies that a proxy can decide which future +versions of the GNU Affero General Public License can be used, that proxy's +public statement of acceptance of a version permanently authorizes you +to choose that version for the Program. + + Later license versions may give you additional or different +permissions. However, no additional obligations are imposed on any +author or copyright holder as a result of your choosing to follow a +later version. + + 15. Disclaimer of Warranty. + + THERE IS NO WARRANTY FOR THE PROGRAM, TO THE EXTENT PERMITTED BY +APPLICABLE LAW. EXCEPT WHEN OTHERWISE STATED IN WRITING THE COPYRIGHT +HOLDERS AND/OR OTHER PARTIES PROVIDE THE PROGRAM "AS IS" WITHOUT WARRANTY +OF ANY KIND, EITHER EXPRESSED OR IMPLIED, INCLUDING, BUT NOT LIMITED TO, +THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR +PURPOSE. THE ENTIRE RISK AS TO THE QUALITY AND PERFORMANCE OF THE PROGRAM +IS WITH YOU. SHOULD THE PROGRAM PROVE DEFECTIVE, YOU ASSUME THE COST OF +ALL NECESSARY SERVICING, REPAIR OR CORRECTION. + + 16. Limitation of Liability. + + IN NO EVENT UNLESS REQUIRED BY APPLICABLE LAW OR AGREED TO IN WRITING +WILL ANY COPYRIGHT HOLDER, OR ANY OTHER PARTY WHO MODIFIES AND/OR CONVEYS +THE PROGRAM AS PERMITTED ABOVE, BE LIABLE TO YOU FOR DAMAGES, INCLUDING ANY +GENERAL, SPECIAL, INCIDENTAL OR CONSEQUENTIAL DAMAGES ARISING OUT OF THE +USE OR INABILITY TO USE THE PROGRAM (INCLUDING BUT NOT LIMITED TO LOSS OF +DATA OR DATA BEING RENDERED INACCURATE OR LOSSES SUSTAINED BY YOU OR THIRD +PARTIES OR A FAILURE OF THE PROGRAM TO OPERATE WITH ANY OTHER PROGRAMS), +EVEN IF SUCH HOLDER OR OTHER PARTY HAS BEEN ADVISED OF THE POSSIBILITY OF +SUCH DAMAGES. + + 17. Interpretation of Sections 15 and 16. + + If the disclaimer of warranty and limitation of liability provided +above cannot be given local legal effect according to their terms, +reviewing courts shall apply local law that most closely approximates +an absolute waiver of all civil liability in connection with the +Program, unless a warranty or assumption of liability accompanies a +copy of the Program in return for a fee. + + END OF TERMS AND CONDITIONS + + How to Apply These Terms to Your New Programs + + If you develop a new program, and you want it to be of the greatest +possible use to the public, the best way to achieve this is to make it +free software which everyone can redistribute and change under these terms. + + To do so, attach the following notices to the program. It is safest +to attach them to the start of each source file to most effectively +state the exclusion of warranty; and each file should have at least +the "copyright" line and a pointer to where the full notice is found. + + + Copyright (C) + + This program is free software: you can redistribute it and/or modify + it under the terms of the GNU Affero General Public License as published + by the Free Software Foundation, either version 3 of the License, or + (at your option) any later version. + + This program is distributed in the hope that it will be useful, + but WITHOUT ANY WARRANTY; without even the implied warranty of + MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + GNU Affero General Public License for more details. + + You should have received a copy of the GNU Affero General Public License + along with this program. If not, see . + +Also add information on how to contact you by electronic and paper mail. + + If your software can interact with users remotely through a computer +network, you should also make sure that it provides a way for users to +get its source. For example, if your program is a web application, its +interface could display a "Source" link that leads users to an archive +of the code. There are many ways you could offer source, and different +solutions will be better for different programs; see section 13 for the +specific requirements. + + You should also get your employer (if you work as a programmer) or school, +if any, to sign a "copyright disclaimer" for the program, if necessary. +For more information on this, and how to apply and follow the GNU AGPL, see +. diff --git a/packages/axum-http-tracker-server/README.md b/packages/axum-http-tracker-server/README.md new file mode 100644 index 000000000..b7286d157 --- /dev/null +++ b/packages/axum-http-tracker-server/README.md @@ -0,0 +1,11 @@ +# Torrust Axum HTTP Tracker + +The Torrust Bittorrent HTTP tracker. + +## Documentation + +[Crate documentation](https://docs.rs/torrust-axum-server). + +## License + +The project is licensed under the terms of the [GNU AFFERO GENERAL PUBLIC LICENSE](./LICENSE). diff --git a/packages/axum-http-tracker-server/src/container.rs b/packages/axum-http-tracker-server/src/container.rs new file mode 100644 index 000000000..c20a8f28f --- /dev/null +++ b/packages/axum-http-tracker-server/src/container.rs @@ -0,0 +1,17 @@ +use std::sync::Arc; + +use bittorrent_tracker_core::announce_handler::AnnounceHandler; +use bittorrent_tracker_core::authentication::service::AuthenticationService; +use bittorrent_tracker_core::scrape_handler::ScrapeHandler; +use bittorrent_tracker_core::whitelist; +use torrust_tracker_configuration::{Core, HttpTracker}; + +pub struct HttpTrackerContainer { + pub core_config: Arc, + pub http_tracker_config: Arc, + pub announce_handler: Arc, + pub scrape_handler: Arc, + pub whitelist_authorization: Arc, + pub http_stats_event_sender: Arc>>, + pub authentication_service: Arc, +} diff --git a/src/servers/http/mod.rs b/packages/axum-http-tracker-server/src/lib.rs similarity index 99% rename from src/servers/http/mod.rs rename to packages/axum-http-tracker-server/src/lib.rs index 395f633cf..fd2aa8506 100644 --- a/src/servers/http/mod.rs +++ b/packages/axum-http-tracker-server/src/lib.rs @@ -305,6 +305,7 @@ //! - [Bencode to Json Online converter](https://chocobo1.github.io/bencode_online). use serde::{Deserialize, Serialize}; +pub mod container; pub mod server; pub mod test_helpers; pub mod v1; diff --git a/src/servers/http/server.rs b/packages/axum-http-tracker-server/src/server.rs similarity index 73% rename from src/servers/http/server.rs rename to packages/axum-http-tracker-server/src/server.rs index 25a8e5635..39969907b 100644 --- a/src/servers/http/server.rs +++ b/packages/axum-http-tracker-server/src/server.rs @@ -11,13 +11,12 @@ use torrust_axum_server::custom_axum_server::{self, TimeoutAcceptor}; use torrust_axum_server::signals::graceful_shutdown; use torrust_server_lib::logging::STARTED_ON; use torrust_server_lib::registar::{ServiceHealthCheckJob, ServiceRegistration, ServiceRegistrationForm}; -use torrust_server_lib::signals::Halted; +use torrust_server_lib::signals::{Halted, Started}; use tracing::instrument; use super::v1::routes::router; -use crate::bootstrap::jobs::Started; use crate::container::HttpTrackerContainer; -use crate::servers::http::HTTP_TRACKER_LOG_TARGET; +use crate::HTTP_TRACKER_LOG_TARGET; /// Error that can occur when starting or stopping the HTTP server. /// @@ -239,33 +238,92 @@ pub fn check_fn(binding: &SocketAddr) -> ServiceHealthCheckJob { mod tests { use std::sync::Arc; + use bittorrent_tracker_core::announce_handler::AnnounceHandler; + use bittorrent_tracker_core::authentication::key::repository::in_memory::InMemoryKeyRepository; + use bittorrent_tracker_core::authentication::service; + use bittorrent_tracker_core::databases::setup::initialize_database; + use bittorrent_tracker_core::scrape_handler::ScrapeHandler; + use bittorrent_tracker_core::torrent::repository::in_memory::InMemoryTorrentRepository; + use bittorrent_tracker_core::torrent::repository::persisted::DatabasePersistentTorrentRepository; + use bittorrent_tracker_core::whitelist::authorization::WhitelistAuthorization; + use bittorrent_tracker_core::whitelist::repository::in_memory::InMemoryWhitelist; + use torrust_axum_server::tsl::make_rust_tls; use torrust_server_lib::registar::Registar; + use torrust_tracker_configuration::Configuration; use torrust_tracker_test_helpers::configuration::ephemeral_public; - use crate::bootstrap::app::{initialize_app_container, initialize_global_services}; - use crate::bootstrap::jobs::make_rust_tls; use crate::container::HttpTrackerContainer; - use crate::servers::http::server::{HttpServer, Launcher}; + use crate::server::{HttpServer, Launcher}; + + pub fn initialize_container(configuration: &Configuration) -> HttpTrackerContainer { + let core_config = Arc::new(configuration.core.clone()); + + let http_trackers = configuration + .http_trackers + .clone() + .expect("missing HTTP trackers configuration"); + + let http_tracker_config = &http_trackers[0]; + + let http_tracker_config = Arc::new(http_tracker_config.clone()); + + // HTTP stats + let (http_stats_event_sender, _http_stats_repository) = + bittorrent_http_tracker_core::statistics::setup::factory(configuration.core.tracker_usage_statistics); + let http_stats_event_sender = Arc::new(http_stats_event_sender); + + let database = initialize_database(&configuration.core); + let in_memory_whitelist = Arc::new(InMemoryWhitelist::default()); + let whitelist_authorization = Arc::new(WhitelistAuthorization::new(&configuration.core, &in_memory_whitelist.clone())); + let in_memory_key_repository = Arc::new(InMemoryKeyRepository::default()); + let authentication_service = Arc::new(service::AuthenticationService::new( + &configuration.core, + &in_memory_key_repository, + )); + let in_memory_torrent_repository = Arc::new(InMemoryTorrentRepository::default()); + let db_torrent_repository = Arc::new(DatabasePersistentTorrentRepository::new(&database)); + + let announce_handler = Arc::new(AnnounceHandler::new( + &configuration.core, + &whitelist_authorization, + &in_memory_torrent_repository, + &db_torrent_repository, + )); + + let scrape_handler = Arc::new(ScrapeHandler::new(&whitelist_authorization, &in_memory_torrent_repository)); + + HttpTrackerContainer { + core_config, + http_tracker_config, + announce_handler, + scrape_handler, + whitelist_authorization, + http_stats_event_sender, + authentication_service, + } + } #[tokio::test] async fn it_should_be_able_to_start_and_stop() { - let cfg = Arc::new(ephemeral_public()); + let configuration = Arc::new(ephemeral_public()); - initialize_global_services(&cfg); + let http_trackers = configuration + .http_trackers + .clone() + .expect("missing HTTP trackers configuration"); - let app_container = Arc::new(initialize_app_container(&cfg)); - - let http_trackers = cfg.http_trackers.clone().expect("missing HTTP trackers configuration"); let http_tracker_config = &http_trackers[0]; + + //initialize_global_services(&cfg); // not needed for this test + + let http_tracker_container = Arc::new(initialize_container(&configuration)); + let bind_to = http_tracker_config.bind_address; let tls = make_rust_tls(&http_tracker_config.tsl_config) .await .map(|tls| tls.expect("tls config failed")); - let http_tracker_config = Arc::new(http_tracker_config.clone()); - let http_tracker_container = Arc::new(HttpTrackerContainer::from_app_container(&http_tracker_config, &app_container)); - let register = &Registar::default(); let stopped = HttpServer::new(Launcher::new(bind_to, tls)); diff --git a/src/servers/http/test_helpers.rs b/packages/axum-http-tracker-server/src/test_helpers.rs similarity index 100% rename from src/servers/http/test_helpers.rs rename to packages/axum-http-tracker-server/src/test_helpers.rs diff --git a/src/servers/http/v1/extractors/announce_request.rs b/packages/axum-http-tracker-server/src/v1/extractors/announce_request.rs similarity index 98% rename from src/servers/http/v1/extractors/announce_request.rs rename to packages/axum-http-tracker-server/src/v1/extractors/announce_request.rs index 3265d04cd..57001a47e 100644 --- a/src/servers/http/v1/extractors/announce_request.rs +++ b/packages/axum-http-tracker-server/src/v1/extractors/announce_request.rs @@ -109,7 +109,7 @@ mod tests { assert_eq!( announce, Announce { - info_hash: InfoHash::from_str("3b245504cf5f11bbdbe1201cea6a6bf45aee1bc0").unwrap(), + info_hash: InfoHash::from_str("3b245504cf5f11bbdbe1201cea6a6bf45aee1bc0").unwrap(), // DevSkim: ignore DS173237 peer_id: PeerId(*b"-qB00000000000000001"), port: 17548, downloaded: Some(NumberOfBytes::new(0)), diff --git a/src/servers/http/v1/extractors/authentication_key.rs b/packages/axum-http-tracker-server/src/v1/extractors/authentication_key.rs similarity index 98% rename from src/servers/http/v1/extractors/authentication_key.rs rename to packages/axum-http-tracker-server/src/v1/extractors/authentication_key.rs index da4cd2217..89781f48b 100644 --- a/src/servers/http/v1/extractors/authentication_key.rs +++ b/packages/axum-http-tracker-server/src/v1/extractors/authentication_key.rs @@ -54,7 +54,7 @@ use bittorrent_tracker_core::authentication::Key; use hyper::StatusCode; use serde::Deserialize; -use crate::servers::http::v1::handlers::common::auth; +use crate::v1::handlers::common::auth; /// Extractor for the [`Key`] struct. pub struct Extract(pub Key); diff --git a/src/servers/http/v1/extractors/client_ip_sources.rs b/packages/axum-http-tracker-server/src/v1/extractors/client_ip_sources.rs similarity index 100% rename from src/servers/http/v1/extractors/client_ip_sources.rs rename to packages/axum-http-tracker-server/src/v1/extractors/client_ip_sources.rs diff --git a/src/servers/http/v1/extractors/mod.rs b/packages/axum-http-tracker-server/src/v1/extractors/mod.rs similarity index 100% rename from src/servers/http/v1/extractors/mod.rs rename to packages/axum-http-tracker-server/src/v1/extractors/mod.rs diff --git a/src/servers/http/v1/extractors/scrape_request.rs b/packages/axum-http-tracker-server/src/v1/extractors/scrape_request.rs similarity index 99% rename from src/servers/http/v1/extractors/scrape_request.rs rename to packages/axum-http-tracker-server/src/v1/extractors/scrape_request.rs index 66442da95..33a998ff2 100644 --- a/src/servers/http/v1/extractors/scrape_request.rs +++ b/packages/axum-http-tracker-server/src/v1/extractors/scrape_request.rs @@ -100,7 +100,7 @@ mod tests { fn test_info_hash() -> TestInfoHash { TestInfoHash { bencoded: "%3B%24U%04%CF%5F%11%BB%DB%E1%20%1C%EAjk%F4Z%EE%1B%C0".to_owned(), - value: InfoHash::from_str("3b245504cf5f11bbdbe1201cea6a6bf45aee1bc0").unwrap(), + value: InfoHash::from_str("3b245504cf5f11bbdbe1201cea6a6bf45aee1bc0").unwrap(), // DevSkim: ignore DS173237 } } diff --git a/src/servers/http/v1/handlers/announce.rs b/packages/axum-http-tracker-server/src/v1/handlers/announce.rs similarity index 94% rename from src/servers/http/v1/handlers/announce.rs rename to packages/axum-http-tracker-server/src/v1/handlers/announce.rs index 1aa062faa..43122f8bd 100644 --- a/src/servers/http/v1/handlers/announce.rs +++ b/packages/axum-http-tracker-server/src/v1/handlers/announce.rs @@ -21,9 +21,9 @@ use hyper::StatusCode; use torrust_tracker_configuration::Core; use torrust_tracker_primitives::core::AnnounceData; -use crate::servers::http::v1::extractors::announce_request::ExtractRequest; -use crate::servers::http::v1::extractors::authentication_key::Extract as ExtractKey; -use crate::servers::http::v1::extractors::client_ip_sources::Extract as ExtractClientIpSources; +use crate::v1::extractors::announce_request::ExtractRequest; +use crate::v1::extractors::authentication_key::Extract as ExtractKey; +use crate::v1::extractors::client_ip_sources::Extract as ExtractClientIpSources; /// It handles the `announce` request when the HTTP tracker does not require /// authentication (no PATH `key` parameter required). @@ -199,7 +199,7 @@ mod tests { use torrust_tracker_configuration::{Configuration, Core}; use torrust_tracker_test_helpers::configuration; - use crate::servers::http::test_helpers::tests::sample_info_hash; + use crate::test_helpers::tests::sample_info_hash; struct CoreTrackerServices { pub core_config: Arc, @@ -296,8 +296,8 @@ mod tests { use bittorrent_tracker_core::authentication; use super::{initialize_private_tracker, sample_announce_request, sample_client_ip_sources}; - use crate::servers::http::v1::handlers::announce::handle_announce; - use crate::servers::http::v1::handlers::announce::tests::assert_error_response; + use crate::v1::handlers::announce::handle_announce; + use crate::v1::handlers::announce::tests::assert_error_response; #[tokio::test] async fn it_should_fail_when_the_authentication_key_is_missing() { @@ -352,8 +352,8 @@ mod tests { mod with_tracker_in_listed_mode { use super::{initialize_listed_tracker, sample_announce_request, sample_client_ip_sources}; - use crate::servers::http::v1::handlers::announce::handle_announce; - use crate::servers::http::v1::handlers::announce::tests::assert_error_response; + use crate::v1::handlers::announce::handle_announce; + use crate::v1::handlers::announce::tests::assert_error_response; #[tokio::test] async fn it_should_fail_when_the_announced_torrent_is_not_whitelisted() { @@ -389,8 +389,8 @@ mod tests { use bittorrent_http_tracker_protocol::v1::services::peer_ip_resolver::ClientIpSources; use super::{initialize_tracker_on_reverse_proxy, sample_announce_request}; - use crate::servers::http::v1::handlers::announce::handle_announce; - use crate::servers::http::v1::handlers::announce::tests::assert_error_response; + use crate::v1::handlers::announce::handle_announce; + use crate::v1::handlers::announce::tests::assert_error_response; #[tokio::test] async fn it_should_fail_when_the_right_most_x_forwarded_for_header_ip_is_not_available() { @@ -426,8 +426,8 @@ mod tests { use bittorrent_http_tracker_protocol::v1::services::peer_ip_resolver::ClientIpSources; use super::{initialize_tracker_not_on_reverse_proxy, sample_announce_request}; - use crate::servers::http::v1::handlers::announce::handle_announce; - use crate::servers::http::v1::handlers::announce::tests::assert_error_response; + use crate::v1::handlers::announce::handle_announce; + use crate::v1::handlers::announce::tests::assert_error_response; #[tokio::test] async fn it_should_fail_when_the_client_ip_from_the_connection_info_is_not_available() { diff --git a/src/servers/http/v1/handlers/common/auth.rs b/packages/axum-http-tracker-server/src/v1/handlers/common/auth.rs similarity index 100% rename from src/servers/http/v1/handlers/common/auth.rs rename to packages/axum-http-tracker-server/src/v1/handlers/common/auth.rs diff --git a/src/servers/http/v1/handlers/common/mod.rs b/packages/axum-http-tracker-server/src/v1/handlers/common/mod.rs similarity index 100% rename from src/servers/http/v1/handlers/common/mod.rs rename to packages/axum-http-tracker-server/src/v1/handlers/common/mod.rs diff --git a/src/servers/http/v1/handlers/common/peer_ip.rs b/packages/axum-http-tracker-server/src/v1/handlers/common/peer_ip.rs similarity index 100% rename from src/servers/http/v1/handlers/common/peer_ip.rs rename to packages/axum-http-tracker-server/src/v1/handlers/common/peer_ip.rs diff --git a/src/servers/http/v1/handlers/health_check.rs b/packages/axum-http-tracker-server/src/v1/handlers/health_check.rs similarity index 100% rename from src/servers/http/v1/handlers/health_check.rs rename to packages/axum-http-tracker-server/src/v1/handlers/health_check.rs diff --git a/src/servers/http/v1/handlers/mod.rs b/packages/axum-http-tracker-server/src/v1/handlers/mod.rs similarity index 100% rename from src/servers/http/v1/handlers/mod.rs rename to packages/axum-http-tracker-server/src/v1/handlers/mod.rs diff --git a/src/servers/http/v1/handlers/scrape.rs b/packages/axum-http-tracker-server/src/v1/handlers/scrape.rs similarity index 95% rename from src/servers/http/v1/handlers/scrape.rs rename to packages/axum-http-tracker-server/src/v1/handlers/scrape.rs index 02a5b3136..a4e20cc6f 100644 --- a/src/servers/http/v1/handlers/scrape.rs +++ b/packages/axum-http-tracker-server/src/v1/handlers/scrape.rs @@ -19,9 +19,9 @@ use hyper::StatusCode; use torrust_tracker_configuration::Core; use torrust_tracker_primitives::core::ScrapeData; -use crate::servers::http::v1::extractors::authentication_key::Extract as ExtractKey; -use crate::servers::http::v1::extractors::client_ip_sources::Extract as ExtractClientIpSources; -use crate::servers::http::v1::extractors::scrape_request::ExtractRequest; +use crate::v1::extractors::authentication_key::Extract as ExtractKey; +use crate::v1::extractors::client_ip_sources::Extract as ExtractClientIpSources; +use crate::v1::extractors::scrape_request::ExtractRequest; /// It handles the `scrape` request when the HTTP tracker is configured /// to run in `public` mode. @@ -234,7 +234,7 @@ mod tests { use torrust_tracker_primitives::core::ScrapeData; use super::{initialize_private_tracker, sample_client_ip_sources, sample_scrape_request}; - use crate::servers::http::v1::handlers::scrape::handle_scrape; + use crate::v1::handlers::scrape::handle_scrape; #[tokio::test] async fn it_should_return_zeroed_swarm_metadata_when_the_authentication_key_is_missing() { @@ -291,7 +291,7 @@ mod tests { use torrust_tracker_primitives::core::ScrapeData; use super::{initialize_listed_tracker, sample_client_ip_sources, sample_scrape_request}; - use crate::servers::http::v1::handlers::scrape::handle_scrape; + use crate::v1::handlers::scrape::handle_scrape; #[tokio::test] async fn it_should_return_zeroed_swarm_metadata_when_the_torrent_is_not_whitelisted() { @@ -322,8 +322,8 @@ mod tests { use bittorrent_http_tracker_protocol::v1::services::peer_ip_resolver::ClientIpSources; use super::{initialize_tracker_on_reverse_proxy, sample_scrape_request}; - use crate::servers::http::v1::handlers::scrape::handle_scrape; - use crate::servers::http::v1::handlers::scrape::tests::assert_error_response; + use crate::v1::handlers::scrape::handle_scrape; + use crate::v1::handlers::scrape::tests::assert_error_response; #[tokio::test] async fn it_should_fail_when_the_right_most_x_forwarded_for_header_ip_is_not_available() { @@ -358,8 +358,8 @@ mod tests { use bittorrent_http_tracker_protocol::v1::services::peer_ip_resolver::ClientIpSources; use super::{initialize_tracker_not_on_reverse_proxy, sample_scrape_request}; - use crate::servers::http::v1::handlers::scrape::handle_scrape; - use crate::servers::http::v1::handlers::scrape::tests::assert_error_response; + use crate::v1::handlers::scrape::handle_scrape; + use crate::v1::handlers::scrape::tests::assert_error_response; #[tokio::test] async fn it_should_fail_when_the_client_ip_from_the_connection_info_is_not_available() { diff --git a/src/servers/http/v1/mod.rs b/packages/axum-http-tracker-server/src/v1/mod.rs similarity index 100% rename from src/servers/http/v1/mod.rs rename to packages/axum-http-tracker-server/src/v1/mod.rs diff --git a/src/servers/http/v1/routes.rs b/packages/axum-http-tracker-server/src/v1/routes.rs similarity index 99% rename from src/servers/http/v1/routes.rs rename to packages/axum-http-tracker-server/src/v1/routes.rs index 5f2d95a8e..2d530f633 100644 --- a/src/servers/http/v1/routes.rs +++ b/packages/axum-http-tracker-server/src/v1/routes.rs @@ -24,7 +24,7 @@ use tracing::{instrument, Level, Span}; use super::handlers::{announce, health_check, scrape}; use crate::container::HttpTrackerContainer; -use crate::servers::http::HTTP_TRACKER_LOG_TARGET; +use crate::HTTP_TRACKER_LOG_TARGET; /// It adds the routes to the router. /// diff --git a/packages/axum-server/Cargo.toml b/packages/axum-server/Cargo.toml index 6604a0555..a60bab885 100644 --- a/packages/axum-server/Cargo.toml +++ b/packages/axum-server/Cargo.toml @@ -15,13 +15,17 @@ version.workspace = true [dependencies] axum-server = { version = "0", features = ["tls-rustls-no-provider"] } +camino = { version = "1", features = ["serde", "serde1"] } futures-util = "0" http-body = "1" hyper = "1" hyper-util = { version = "0", features = ["http1", "http2", "tokio"] } pin-project-lite = "0" +thiserror = "2" tokio = { version = "1", features = ["macros", "net", "rt-multi-thread", "signal", "sync"] } torrust-server-lib = { version = "3.0.0-develop", path = "../server-lib" } +torrust-tracker-configuration = { version = "3.0.0-develop", path = "../configuration" } +torrust-tracker-located-error = { version = "3.0.0-develop", path = "../located-error" } tower = { version = "0", features = ["timeout"] } tracing = "0" diff --git a/packages/axum-server/src/lib.rs b/packages/axum-server/src/lib.rs index 06de31d8c..88bf25f19 100644 --- a/packages/axum-server/src/lib.rs +++ b/packages/axum-server/src/lib.rs @@ -1,2 +1,3 @@ pub mod custom_axum_server; pub mod signals; +pub mod tsl; diff --git a/packages/axum-server/src/tsl.rs b/packages/axum-server/src/tsl.rs new file mode 100644 index 000000000..5d68b5b4c --- /dev/null +++ b/packages/axum-server/src/tsl.rs @@ -0,0 +1,85 @@ +use std::panic::Location; +use std::sync::Arc; + +use axum_server::tls_rustls::RustlsConfig; +use thiserror::Error; +use torrust_tracker_configuration::TslConfig; +use torrust_tracker_located_error::{DynError, LocatedError}; +use tracing::instrument; + +/// Error returned by the Bootstrap Process. +#[derive(Error, Debug)] +pub enum Error { + /// Enabled tls but missing config. + #[error("tls config missing")] + MissingTlsConfig { location: &'static Location<'static> }, + + /// Unable to parse tls Config. + #[error("bad tls config: {source}")] + BadTlsConfig { + source: LocatedError<'static, dyn std::error::Error + Send + Sync>, + }, +} + +#[instrument(skip(opt_tsl_config))] +pub async fn make_rust_tls(opt_tsl_config: &Option) -> Option> { + match opt_tsl_config { + Some(tsl_config) => { + let cert = tsl_config.ssl_cert_path.clone(); + let key = tsl_config.ssl_key_path.clone(); + + if !cert.exists() || !key.exists() { + return Some(Err(Error::MissingTlsConfig { + location: Location::caller(), + })); + } + + tracing::info!("Using https: cert path: {cert}."); + tracing::info!("Using https: key path: {key}."); + + Some( + RustlsConfig::from_pem_file(cert, key) + .await + .map_err(|err| Error::BadTlsConfig { + source: (Arc::new(err) as DynError).into(), + }), + ) + } + None => None, + } +} + +#[cfg(test)] +mod tests { + + use camino::Utf8PathBuf; + use torrust_tracker_configuration::TslConfig; + + use super::{make_rust_tls, Error}; + + #[tokio::test] + async fn it_should_error_on_bad_tls_config() { + let err = make_rust_tls(&Some(TslConfig { + ssl_cert_path: Utf8PathBuf::from("bad cert path"), + ssl_key_path: Utf8PathBuf::from("bad key path"), + })) + .await + .expect("tls_was_enabled") + .expect_err("bad_cert_and_key_files"); + + assert!(matches!(err, Error::MissingTlsConfig { location: _ })); + } + + #[tokio::test] + async fn it_should_error_on_missing_cert_or_key_paths() { + let err = make_rust_tls(&Some(TslConfig { + ssl_cert_path: Utf8PathBuf::from(""), + ssl_key_path: Utf8PathBuf::from(""), + })) + .await + .expect("tls_was_enabled") + .expect_err("missing_config"); + + assert!(matches!(err, Error::MissingTlsConfig { location: _ })); + } +} diff --git a/packages/server-lib/src/signals.rs b/packages/server-lib/src/signals.rs index b5cff03c1..63f7554c8 100644 --- a/packages/server-lib/src/signals.rs +++ b/packages/server-lib/src/signals.rs @@ -2,6 +2,14 @@ use derive_more::Display; use tracing::instrument; +/// This is the message that the "launcher" spawned task sends to the main +/// application process to notify the service was successfully started. +/// +#[derive(Debug)] +pub struct Started { + pub address: std::net::SocketAddr, +} + /// This is the message that the "launcher" spawned task receives from the main /// application process to notify the service to shutdown. /// diff --git a/src/app.rs b/src/app.rs index c13414b3b..2f712cf3a 100644 --- a/src/app.rs +++ b/src/app.rs @@ -29,7 +29,7 @@ use torrust_tracker_configuration::Configuration; use tracing::instrument; use crate::bootstrap::jobs::{health_check_api, http_tracker, torrent_cleanup, tracker_apis, udp_tracker}; -use crate::container::{AppContainer, HttpApiContainer, HttpTrackerContainer, UdpTrackerContainer}; +use crate::container::{AppContainer, HttpApiContainer, UdpTrackerContainer}; use crate::servers; /// # Panics @@ -92,10 +92,14 @@ pub async fn start(config: &Configuration, app_container: &Arc) -> if let Some(http_trackers) = &config.http_trackers { for http_tracker_config in http_trackers { let http_tracker_config = Arc::new(http_tracker_config.clone()); - let http_tracker_container = Arc::new(HttpTrackerContainer::from_app_container(&http_tracker_config, app_container)); + let http_tracker_container = Arc::new(app_container.http_tracker_container(&http_tracker_config)); - if let Some(job) = - http_tracker::start_job(http_tracker_container, registar.give_form(), servers::http::Version::V1).await + if let Some(job) = http_tracker::start_job( + http_tracker_container, + registar.give_form(), + torrust_axum_http_tracker_server::Version::V1, + ) + .await { jobs.push(job); } diff --git a/src/bootstrap/jobs/health_check_api.rs b/src/bootstrap/jobs/health_check_api.rs index 95c3bfc24..d64ca0073 100644 --- a/src/bootstrap/jobs/health_check_api.rs +++ b/src/bootstrap/jobs/health_check_api.rs @@ -18,11 +18,10 @@ use tokio::sync::oneshot; use tokio::task::JoinHandle; use torrust_server_lib::logging::STARTED_ON; use torrust_server_lib::registar::ServiceRegistry; -use torrust_server_lib::signals::Halted; +use torrust_server_lib::signals::{Halted, Started}; use torrust_tracker_configuration::HealthCheckApi; use tracing::instrument; -use super::Started; use crate::servers::health_check_api::{server, HEALTH_CHECK_API_LOG_TARGET}; /// This function starts a new Health Check API server with the provided diff --git a/src/bootstrap/jobs/http_tracker.rs b/src/bootstrap/jobs/http_tracker.rs index 38aeb4028..2052bf50b 100644 --- a/src/bootstrap/jobs/http_tracker.rs +++ b/src/bootstrap/jobs/http_tracker.rs @@ -15,14 +15,13 @@ use std::sync::Arc; use axum_server::tls_rustls::RustlsConfig; use tokio::task::JoinHandle; +use torrust_axum_http_tracker_server::container::HttpTrackerContainer; +use torrust_axum_http_tracker_server::server::{HttpServer, Launcher}; +use torrust_axum_http_tracker_server::Version; +use torrust_axum_server::tsl::make_rust_tls; use torrust_server_lib::registar::ServiceRegistrationForm; use tracing::instrument; -use super::make_rust_tls; -use crate::container::HttpTrackerContainer; -use crate::servers::http::server::{HttpServer, Launcher}; -use crate::servers::http::Version; - /// It starts a new HTTP server with the provided configuration and version. /// /// Right now there is only one version but in the future we could support more than one HTTP tracker version at the same time. @@ -78,13 +77,12 @@ async fn start_v1( mod tests { use std::sync::Arc; + use torrust_axum_http_tracker_server::Version; use torrust_server_lib::registar::Registar; use torrust_tracker_test_helpers::configuration::ephemeral_public; use crate::bootstrap::app::{initialize_app_container, initialize_global_services}; use crate::bootstrap::jobs::http_tracker::start_job; - use crate::container::HttpTrackerContainer; - use crate::servers::http::Version; #[tokio::test] async fn it_should_start_http_tracker() { @@ -96,7 +94,7 @@ mod tests { let app_container = Arc::new(initialize_app_container(&cfg)); - let http_tracker_container = Arc::new(HttpTrackerContainer::from_app_container(&http_tracker_config, &app_container)); + let http_tracker_container = Arc::new(app_container.http_tracker_container(&http_tracker_config)); let version = Version::V1; diff --git a/src/bootstrap/jobs/mod.rs b/src/bootstrap/jobs/mod.rs index 6e18ec3ba..8c85ba45b 100644 --- a/src/bootstrap/jobs/mod.rs +++ b/src/bootstrap/jobs/mod.rs @@ -11,97 +11,3 @@ pub mod http_tracker; pub mod torrent_cleanup; pub mod tracker_apis; pub mod udp_tracker; - -/// This is the message that the "launcher" spawned task sends to the main -/// application process to notify the service was successfully started. -/// -#[derive(Debug)] -pub struct Started { - pub address: std::net::SocketAddr, -} - -#[instrument(skip(opt_tsl_config))] -pub async fn make_rust_tls(opt_tsl_config: &Option) -> Option> { - match opt_tsl_config { - Some(tsl_config) => { - let cert = tsl_config.ssl_cert_path.clone(); - let key = tsl_config.ssl_key_path.clone(); - - if !cert.exists() || !key.exists() { - return Some(Err(Error::MissingTlsConfig { - location: Location::caller(), - })); - } - - tracing::info!("Using https: cert path: {cert}."); - tracing::info!("Using https: key path: {key}."); - - Some( - RustlsConfig::from_pem_file(cert, key) - .await - .map_err(|err| Error::BadTlsConfig { - source: (Arc::new(err) as DynError).into(), - }), - ) - } - None => None, - } -} - -#[cfg(test)] -mod tests { - - use camino::Utf8PathBuf; - use torrust_tracker_configuration::TslConfig; - - use super::{make_rust_tls, Error}; - - #[tokio::test] - async fn it_should_error_on_bad_tls_config() { - let err = make_rust_tls(&Some(TslConfig { - ssl_cert_path: Utf8PathBuf::from("bad cert path"), - ssl_key_path: Utf8PathBuf::from("bad key path"), - })) - .await - .expect("tls_was_enabled") - .expect_err("bad_cert_and_key_files"); - - assert!(matches!(err, Error::MissingTlsConfig { location: _ })); - } - - #[tokio::test] - async fn it_should_error_on_missing_cert_or_key_paths() { - let err = make_rust_tls(&Some(TslConfig { - ssl_cert_path: Utf8PathBuf::from(""), - ssl_key_path: Utf8PathBuf::from(""), - })) - .await - .expect("tls_was_enabled") - .expect_err("missing_config"); - - assert!(matches!(err, Error::MissingTlsConfig { location: _ })); - } -} - -use std::panic::Location; -use std::sync::Arc; - -use axum_server::tls_rustls::RustlsConfig; -use thiserror::Error; -use torrust_tracker_configuration::TslConfig; -use torrust_tracker_located_error::{DynError, LocatedError}; -use tracing::instrument; - -/// Error returned by the Bootstrap Process. -#[derive(Error, Debug)] -pub enum Error { - /// Enabled tls but missing config. - #[error("tls config missing")] - MissingTlsConfig { location: &'static Location<'static> }, - - /// Unable to parse tls Config. - #[error("bad tls config: {source}")] - BadTlsConfig { - source: LocatedError<'static, dyn std::error::Error + Send + Sync>, - }, -} diff --git a/src/bootstrap/jobs/tracker_apis.rs b/src/bootstrap/jobs/tracker_apis.rs index 1f43ee67c..df736f23f 100644 --- a/src/bootstrap/jobs/tracker_apis.rs +++ b/src/bootstrap/jobs/tracker_apis.rs @@ -25,11 +25,11 @@ use std::sync::Arc; use axum_server::tls_rustls::RustlsConfig; use tokio::task::JoinHandle; +use torrust_axum_server::tsl::make_rust_tls; use torrust_server_lib::registar::ServiceRegistrationForm; use torrust_tracker_configuration::AccessTokens; use tracing::instrument; -use super::make_rust_tls; use crate::container::HttpApiContainer; use crate::servers::apis::server::{ApiServer, Launcher}; use crate::servers::apis::Version; diff --git a/src/console/ci/e2e/logs_parser.rs b/src/console/ci/e2e/logs_parser.rs index dd2fbdb53..fdbe5d9c0 100644 --- a/src/console/ci/e2e/logs_parser.rs +++ b/src/console/ci/e2e/logs_parser.rs @@ -2,10 +2,10 @@ use bittorrent_udp_tracker_core::UDP_TRACKER_LOG_TARGET; use regex::Regex; use serde::{Deserialize, Serialize}; +use torrust_axum_http_tracker_server::HTTP_TRACKER_LOG_TARGET; use torrust_server_lib::logging::STARTED_ON; use crate::servers::health_check_api::HEALTH_CHECK_API_LOG_TARGET; -use crate::servers::http::HTTP_TRACKER_LOG_TARGET; const INFO_THRESHOLD: &str = "INFO"; diff --git a/src/container.rs b/src/container.rs index 0f4a840cf..57e24334b 100644 --- a/src/container.rs +++ b/src/container.rs @@ -14,6 +14,7 @@ use bittorrent_tracker_core::whitelist::repository::in_memory::InMemoryWhitelist use bittorrent_udp_tracker_core::services::banning::BanService; use bittorrent_udp_tracker_core::{self}; use tokio::sync::RwLock; +use torrust_axum_http_tracker_server::container::HttpTrackerContainer; use torrust_tracker_configuration::{Core, HttpApi, HttpTracker, UdpTracker}; pub struct AppContainer { @@ -36,6 +37,21 @@ pub struct AppContainer { pub torrents_manager: Arc, } +impl AppContainer { + #[must_use] + pub fn http_tracker_container(&self, http_tracker_config: &Arc) -> HttpTrackerContainer { + HttpTrackerContainer { + http_tracker_config: http_tracker_config.clone(), + core_config: self.core_config.clone(), + announce_handler: self.announce_handler.clone(), + scrape_handler: self.scrape_handler.clone(), + whitelist_authorization: self.whitelist_authorization.clone(), + http_stats_event_sender: self.http_stats_event_sender.clone(), + authentication_service: self.authentication_service.clone(), + } + } +} + pub struct UdpTrackerContainer { pub core_config: Arc, pub udp_tracker_config: Arc, @@ -61,31 +77,6 @@ impl UdpTrackerContainer { } } -pub struct HttpTrackerContainer { - pub core_config: Arc, - pub http_tracker_config: Arc, - pub announce_handler: Arc, - pub scrape_handler: Arc, - pub whitelist_authorization: Arc, - pub http_stats_event_sender: Arc>>, - pub authentication_service: Arc, -} - -impl HttpTrackerContainer { - #[must_use] - pub fn from_app_container(http_tracker_config: &Arc, app_container: &Arc) -> Self { - Self { - http_tracker_config: http_tracker_config.clone(), - core_config: app_container.core_config.clone(), - announce_handler: app_container.announce_handler.clone(), - scrape_handler: app_container.scrape_handler.clone(), - whitelist_authorization: app_container.whitelist_authorization.clone(), - http_stats_event_sender: app_container.http_stats_event_sender.clone(), - authentication_service: app_container.authentication_service.clone(), - } - } -} - pub struct HttpApiContainer { pub core_config: Arc, pub http_api_config: Arc, diff --git a/src/lib.rs b/src/lib.rs index a864587c5..4f552ab34 100644 --- a/src/lib.rs +++ b/src/lib.rs @@ -57,7 +57,7 @@ //! //! - A REST [`API`](crate::servers::apis) //! - One or more [`UDP`](crate::servers::udp) trackers -//! - One or more [`HTTP`](crate::servers::http) trackers +//! - One or more [`HTTP`](torrust_axum_http_tracker_server) trackers //! //! # Installation //! @@ -124,7 +124,7 @@ //! By default the tracker uses `SQLite` and the database file name `sqlite3.db`. //! //! You only need the `tls` directory in case you are setting up SSL for the HTTP tracker or the tracker API. -//! Visit [`HTTP`](crate::servers::http) or [`API`](crate::servers::apis) if you want to know how you can use HTTPS. +//! Visit [`HTTP`](torrust_axum_http_tracker_server) or [`API`](crate::servers::apis) if you want to know how you can use HTTPS. //! //! ## Install from sources //! @@ -301,7 +301,7 @@ //! bind_address = "0.0.0.0:7070" //! ``` //! -//! Refer to the [`HTTP`](crate::servers::http) documentation for more information about the [`HTTP`](crate::servers::http) tracker. +//! Refer to the [`HTTP`](torrust_axum_http_tracker_server) documentation for more information about the [`HTTP`](torrust_axum_http_tracker_server) tracker. //! //! ### Announce //! @@ -408,7 +408,7 @@ //! - The core tracker [`core`] //! - The tracker REST [`API`](crate::servers::apis) //! - The [`UDP`](crate::servers::udp) tracker -//! - The [`HTTP`](crate::servers::http) tracker +//! - The [`HTTP`](torrust_axum_http_tracker_server) tracker //! //! ![Torrust Tracker Components](https://raw.githubusercontent.com/torrust/torrust-tracker/main/docs/media/torrust-tracker-components.png) //! @@ -452,7 +452,7 @@ //! //! HTTP tracker was the original tracker specification defined on the [BEP 3]((https://www.bittorrent.org/beps/bep_0003.html)). //! -//! See [`HTTP`](crate::servers::http) for more details on the HTTP tracker. +//! See [`HTTP`](torrust_axum_http_tracker_server) for more details on the HTTP tracker. //! //! You can find more information about UDP tracker on: //! diff --git a/src/servers/apis/server.rs b/src/servers/apis/server.rs index 47473d964..187969f8d 100644 --- a/src/servers/apis/server.rs +++ b/src/servers/apis/server.rs @@ -37,12 +37,11 @@ use torrust_axum_server::custom_axum_server::{self, TimeoutAcceptor}; use torrust_axum_server::signals::graceful_shutdown; use torrust_server_lib::logging::STARTED_ON; use torrust_server_lib::registar::{ServiceHealthCheckJob, ServiceRegistration, ServiceRegistrationForm}; -use torrust_server_lib::signals::Halted; +use torrust_server_lib::signals::{Halted, Started}; use torrust_tracker_configuration::AccessTokens; use tracing::{instrument, Level}; use super::routes::router; -use crate::bootstrap::jobs::Started; use crate::container::HttpApiContainer; use crate::servers::apis::API_LOG_TARGET; @@ -295,11 +294,11 @@ impl Launcher { mod tests { use std::sync::Arc; + use torrust_axum_server::tsl::make_rust_tls; use torrust_server_lib::registar::Registar; use torrust_tracker_test_helpers::configuration::ephemeral_public; use crate::bootstrap::app::{initialize_app_container, initialize_global_services}; - use crate::bootstrap::jobs::make_rust_tls; use crate::container::HttpApiContainer; use crate::servers::apis::server::{ApiServer, Launcher}; diff --git a/src/servers/health_check_api/server.rs b/src/servers/health_check_api/server.rs index 06dd5af65..6f468b98e 100644 --- a/src/servers/health_check_api/server.rs +++ b/src/servers/health_check_api/server.rs @@ -17,7 +17,7 @@ use tokio::sync::oneshot::{Receiver, Sender}; use torrust_axum_server::signals::graceful_shutdown; use torrust_server_lib::logging::Latency; use torrust_server_lib::registar::ServiceRegistry; -use torrust_server_lib::signals::Halted; +use torrust_server_lib::signals::{Halted, Started}; use tower_http::classify::ServerErrorsFailureClass; use tower_http::compression::CompressionLayer; use tower_http::propagate_header::PropagateHeaderLayer; @@ -26,7 +26,6 @@ use tower_http::trace::{DefaultMakeSpan, TraceLayer}; use tower_http::LatencyUnit; use tracing::{instrument, Level, Span}; -use crate::bootstrap::jobs::Started; use crate::servers::health_check_api::handlers::health_check_handler; use crate::servers::health_check_api::HEALTH_CHECK_API_LOG_TARGET; diff --git a/src/servers/mod.rs b/src/servers/mod.rs index eb5e5fee7..037179ba8 100644 --- a/src/servers/mod.rs +++ b/src/servers/mod.rs @@ -1,5 +1,4 @@ //! Servers. Services that can be started and stopped. pub mod apis; pub mod health_check_api; -pub mod http; pub mod udp; diff --git a/src/servers/udp/server/launcher.rs b/src/servers/udp/server/launcher.rs index f85972721..dbf0d5693 100644 --- a/src/servers/udp/server/launcher.rs +++ b/src/servers/udp/server/launcher.rs @@ -11,11 +11,10 @@ use tokio::sync::oneshot; use tokio::time::interval; use torrust_server_lib::logging::STARTED_ON; use torrust_server_lib::registar::ServiceHealthCheckJob; -use torrust_server_lib::signals::{shutdown_signal_with_message, Halted}; +use torrust_server_lib::signals::{shutdown_signal_with_message, Halted, Started}; use tracing::instrument; use super::request_buffer::ActiveRequests; -use crate::bootstrap::jobs::Started; use crate::container::UdpTrackerContainer; use crate::servers::udp::server::bound_socket::BoundSocket; use crate::servers::udp::server::processor::Processor; diff --git a/src/servers/udp/server/spawner.rs b/src/servers/udp/server/spawner.rs index 34437cdfb..fd85a57c9 100644 --- a/src/servers/udp/server/spawner.rs +++ b/src/servers/udp/server/spawner.rs @@ -7,10 +7,9 @@ use derive_more::derive::Display; use derive_more::Constructor; use tokio::sync::oneshot; use tokio::task::JoinHandle; -use torrust_server_lib::signals::Halted; +use torrust_server_lib::signals::{Halted, Started}; use super::launcher::Launcher; -use crate::bootstrap::jobs::Started; use crate::container::UdpTrackerContainer; #[derive(Constructor, Copy, Clone, Debug, Display)] diff --git a/src/servers/udp/server/states.rs b/src/servers/udp/server/states.rs index 123d7f8a5..ae499acf7 100644 --- a/src/servers/udp/server/states.rs +++ b/src/servers/udp/server/states.rs @@ -8,12 +8,11 @@ use derive_more::derive::Display; use derive_more::Constructor; use tokio::task::JoinHandle; use torrust_server_lib::registar::{ServiceRegistration, ServiceRegistrationForm}; -use torrust_server_lib::signals::Halted; +use torrust_server_lib::signals::{Halted, Started}; use tracing::{instrument, Level}; use super::spawner::Spawner; use super::{Server, UdpError}; -use crate::bootstrap::jobs::Started; use crate::container::UdpTrackerContainer; use crate::servers::udp::server::launcher::Launcher; diff --git a/tests/servers/api/environment.rs b/tests/servers/api/environment.rs index cc7574895..b899c9f02 100644 --- a/tests/servers/api/environment.rs +++ b/tests/servers/api/environment.rs @@ -6,11 +6,11 @@ use bittorrent_tracker_core::authentication::service::AuthenticationService; use bittorrent_tracker_core::databases::Database; use bittorrent_tracker_core::whitelist::repository::in_memory::InMemoryWhitelist; use futures::executor::block_on; +use torrust_axum_server::tsl::make_rust_tls; use torrust_server_lib::registar::Registar; use torrust_tracker_api_client::connection_info::{ConnectionInfo, Origin}; use torrust_tracker_configuration::Configuration; use torrust_tracker_lib::bootstrap::app::{initialize_app_container, initialize_global_services}; -use torrust_tracker_lib::bootstrap::jobs::make_rust_tls; use torrust_tracker_lib::container::HttpApiContainer; use torrust_tracker_lib::servers::apis::server::{ApiServer, Launcher, Running, Stopped}; use torrust_tracker_primitives::peer; diff --git a/tests/servers/health_check_api/environment.rs b/tests/servers/health_check_api/environment.rs index e364a52cb..b83240767 100644 --- a/tests/servers/health_check_api/environment.rs +++ b/tests/servers/health_check_api/environment.rs @@ -4,9 +4,8 @@ use std::sync::Arc; use tokio::sync::oneshot::{self, Sender}; use tokio::task::JoinHandle; use torrust_server_lib::registar::Registar; -use torrust_server_lib::signals::{self, Halted}; +use torrust_server_lib::signals::{self, Halted, Started}; use torrust_tracker_configuration::HealthCheckApi; -use torrust_tracker_lib::bootstrap::jobs::Started; use torrust_tracker_lib::servers::health_check_api::{server, HEALTH_CHECK_API_LOG_TARGET}; #[derive(Debug)] diff --git a/tests/servers/http/environment.rs b/tests/servers/http/environment.rs index 2584c51c7..4afb262d7 100644 --- a/tests/servers/http/environment.rs +++ b/tests/servers/http/environment.rs @@ -6,12 +6,12 @@ use bittorrent_tracker_core::databases::Database; use bittorrent_tracker_core::torrent::repository::in_memory::InMemoryTorrentRepository; use bittorrent_tracker_core::whitelist::manager::WhitelistManager; use futures::executor::block_on; +use torrust_axum_http_tracker_server::container::HttpTrackerContainer; +use torrust_axum_http_tracker_server::server::{HttpServer, Launcher, Running, Stopped}; +use torrust_axum_server::tsl::make_rust_tls; use torrust_server_lib::registar::Registar; use torrust_tracker_configuration::Configuration; use torrust_tracker_lib::bootstrap::app::{initialize_app_container, initialize_global_services}; -use torrust_tracker_lib::bootstrap::jobs::make_rust_tls; -use torrust_tracker_lib::container::HttpTrackerContainer; -use torrust_tracker_lib::servers::http::server::{HttpServer, Launcher, Running, Stopped}; use torrust_tracker_primitives::peer; pub struct Environment { diff --git a/tests/servers/http/mod.rs b/tests/servers/http/mod.rs index adcdcbf5e..37d4dcd3d 100644 --- a/tests/servers/http/mod.rs +++ b/tests/servers/http/mod.rs @@ -5,10 +5,10 @@ pub mod requests; pub mod responses; pub mod v1; -pub type Started = environment::Environment; - use percent_encoding::NON_ALPHANUMERIC; -use torrust_tracker_lib::servers::http::server; +use torrust_axum_http_tracker_server::server; + +pub type Started = environment::Environment; pub type ByteArray20 = [u8; 20]; diff --git a/tests/servers/http/v1/contract.rs b/tests/servers/http/v1/contract.rs index bab969403..1931544b9 100644 --- a/tests/servers/http/v1/contract.rs +++ b/tests/servers/http/v1/contract.rs @@ -14,7 +14,7 @@ async fn environment_should_be_started_and_stopped() { mod for_all_config_modes { - use torrust_tracker_lib::servers::http::v1::handlers::health_check::{Report, Status}; + use torrust_axum_http_tracker_server::v1::handlers::health_check::{Report, Status}; use torrust_tracker_test_helpers::configuration; use crate::common::logging; From 2b72ae07ce3dbf3073c5755178663fbdc25148d0 Mon Sep 17 00:00:00 2001 From: Jose Celano Date: Wed, 19 Feb 2025 11:16:36 +0000 Subject: [PATCH 289/802] refactor: [#1283] move test helpers to main test mod in package --- packages/axum-http-tracker-server/src/lib.rs | 19 ++++++++++++++++++- .../src/test_helpers.rs | 16 ---------------- .../src/v1/handlers/announce.rs | 2 +- 3 files changed, 19 insertions(+), 18 deletions(-) delete mode 100644 packages/axum-http-tracker-server/src/test_helpers.rs diff --git a/packages/axum-http-tracker-server/src/lib.rs b/packages/axum-http-tracker-server/src/lib.rs index fd2aa8506..3d9f6d1b7 100644 --- a/packages/axum-http-tracker-server/src/lib.rs +++ b/packages/axum-http-tracker-server/src/lib.rs @@ -307,7 +307,6 @@ use serde::{Deserialize, Serialize}; pub mod container; pub mod server; -pub mod test_helpers; pub mod v1; pub const HTTP_TRACKER_LOG_TARGET: &str = "HTTP TRACKER"; @@ -318,3 +317,21 @@ pub enum Version { /// The `v1` version of the HTTP tracker. V1, } + +#[cfg(test)] +pub(crate) mod tests { + + pub(crate) mod helpers { + use bittorrent_primitives::info_hash::InfoHash; + + /// # Panics + /// + /// Will panic if the string representation of the info hash is not a valid info hash. + #[must_use] + pub fn sample_info_hash() -> InfoHash { + "3b245504cf5f11bbdbe1201cea6a6bf45aee1bc0" // DevSkim: ignore DS173237 + .parse::() + .expect("String should be a valid info hash") + } + } +} diff --git a/packages/axum-http-tracker-server/src/test_helpers.rs b/packages/axum-http-tracker-server/src/test_helpers.rs deleted file mode 100644 index 8c3020c52..000000000 --- a/packages/axum-http-tracker-server/src/test_helpers.rs +++ /dev/null @@ -1,16 +0,0 @@ -//! Some generic test helpers functions. - -#[cfg(test)] -pub(crate) mod tests { - use bittorrent_primitives::info_hash::InfoHash; - - /// # Panics - /// - /// Will panic if the string representation of the info hash is not a valid info hash. - #[must_use] - pub fn sample_info_hash() -> InfoHash { - "3b245504cf5f11bbdbe1201cea6a6bf45aee1bc0" // DevSkim: ignore DS173237 - .parse::() - .expect("String should be a valid info hash") - } -} diff --git a/packages/axum-http-tracker-server/src/v1/handlers/announce.rs b/packages/axum-http-tracker-server/src/v1/handlers/announce.rs index 43122f8bd..f8f551253 100644 --- a/packages/axum-http-tracker-server/src/v1/handlers/announce.rs +++ b/packages/axum-http-tracker-server/src/v1/handlers/announce.rs @@ -199,7 +199,7 @@ mod tests { use torrust_tracker_configuration::{Configuration, Core}; use torrust_tracker_test_helpers::configuration; - use crate::test_helpers::tests::sample_info_hash; + use crate::tests::helpers::sample_info_hash; struct CoreTrackerServices { pub core_config: Arc, From 3e81d3ec9361bde2c15a63d4287e6bb42fda6c70 Mon Sep 17 00:00:00 2001 From: Jose Celano Date: Wed, 19 Feb 2025 12:20:49 +0000 Subject: [PATCH 290/802] refactor: [#1287] extract axum-health-check-api-server package --- .github/workflows/deployment.yaml | 3 +- Cargo.lock | 18 + Cargo.toml | 1 + cSpell.json | 1 + .../axum-health-check-api-server/Cargo.toml | 29 + packages/axum-health-check-api-server/LICENSE | 661 ++++++++++++++++++ .../axum-health-check-api-server/README.md | 49 ++ .../src}/handlers.rs | 0 .../axum-health-check-api-server/src/lib.rs | 0 .../src}/resources.rs | 0 .../src}/responses.rs | 0 .../src}/server.rs | 4 +- src/bootstrap/jobs/health_check_api.rs | 3 +- src/console/ci/e2e/logs_parser.rs | 3 +- src/servers/mod.rs | 1 - tests/servers/health_check_api/contract.rs | 8 +- tests/servers/health_check_api/environment.rs | 2 +- 17 files changed, 769 insertions(+), 14 deletions(-) create mode 100644 packages/axum-health-check-api-server/Cargo.toml create mode 100644 packages/axum-health-check-api-server/LICENSE create mode 100644 packages/axum-health-check-api-server/README.md rename {src/servers/health_check_api => packages/axum-health-check-api-server/src}/handlers.rs (100%) rename src/servers/health_check_api/mod.rs => packages/axum-health-check-api-server/src/lib.rs (100%) rename {src/servers/health_check_api => packages/axum-health-check-api-server/src}/resources.rs (100%) rename {src/servers/health_check_api => packages/axum-health-check-api-server/src}/responses.rs (100%) rename {src/servers/health_check_api => packages/axum-health-check-api-server/src}/server.rs (97%) diff --git a/.github/workflows/deployment.yaml b/.github/workflows/deployment.yaml index 296752df4..5aca88ac4 100644 --- a/.github/workflows/deployment.yaml +++ b/.github/workflows/deployment.yaml @@ -61,6 +61,7 @@ jobs: cargo publish -p bittorrent-tracker-core cargo publish -p bittorrent-udp-tracker-core cargo publish -p bittorrent-udp-tracker-protocol + cargo publish -p torrust-axum-health-check-api-server cargo publish -p torrust-axum-http-tracker-server cargo publish -p torrust-axum-server cargo publish -p torrust-torrust-server-lib @@ -75,5 +76,3 @@ jobs: cargo publish -p torrust-tracker-primitives cargo publish -p torrust-tracker-test-helpers cargo publish -p torrust-tracker-torrent-repository - - diff --git a/Cargo.lock b/Cargo.lock index 23092e31e..1c30e5128 100644 --- a/Cargo.lock +++ b/Cargo.lock @@ -4337,6 +4337,23 @@ dependencies = [ "winnow", ] +[[package]] +name = "torrust-axum-health-check-api-server" +version = "3.0.0-develop" +dependencies = [ + "axum", + "axum-server", + "futures", + "hyper", + "serde", + "serde_json", + "tokio", + "torrust-axum-server", + "torrust-server-lib", + "tower-http", + "tracing", +] + [[package]] name = "torrust-axum-http-tracker-server" version = "3.0.0-develop" @@ -4439,6 +4456,7 @@ dependencies = [ "serde_with", "thiserror 2.0.11", "tokio", + "torrust-axum-health-check-api-server", "torrust-axum-http-tracker-server", "torrust-axum-server", "torrust-server-lib", diff --git a/Cargo.toml b/Cargo.toml index 4f8854d34..20d7c00dd 100644 --- a/Cargo.toml +++ b/Cargo.toml @@ -70,6 +70,7 @@ serde_repr = "0" serde_with = { version = "3", features = ["json"] } thiserror = "2" tokio = { version = "1", features = ["macros", "net", "rt-multi-thread", "signal", "sync"] } +torrust-axum-health-check-api-server = { version = "3.0.0-develop", path = "packages/axum-health-check-api-server" } torrust-axum-http-tracker-server = { version = "3.0.0-develop", path = "packages/axum-http-tracker-server" } torrust-axum-server = { version = "3.0.0-develop", path = "packages/axum-server" } torrust-server-lib = { version = "3.0.0-develop", path = "packages/server-lib" } diff --git a/cSpell.json b/cSpell.json index b1e9a5e95..e067df932 100644 --- a/cSpell.json +++ b/cSpell.json @@ -63,6 +63,7 @@ "gecos", "Grcov", "hasher", + "healthcheck", "heaptrack", "hexlify", "hlocalhost", diff --git a/packages/axum-health-check-api-server/Cargo.toml b/packages/axum-health-check-api-server/Cargo.toml new file mode 100644 index 000000000..37e49f9e7 --- /dev/null +++ b/packages/axum-health-check-api-server/Cargo.toml @@ -0,0 +1,29 @@ +[package] +authors.workspace = true +description = "The Torrust Bittorrent HTTP tracker." +documentation.workspace = true +edition.workspace = true +homepage.workspace = true +keywords = ["axum", "bittorrent", "healthcheck", "http", "server", "torrust", "tracker"] +license.workspace = true +name = "torrust-axum-health-check-api-server" +publish.workspace = true +readme = "README.md" +repository.workspace = true +rust-version.workspace = true +version.workspace = true + +[dependencies] +axum = { version = "0", features = ["macros"] } +axum-server = { version = "0", features = ["tls-rustls-no-provider"] } +futures = "0" +hyper = "1" +serde = { version = "1", features = ["derive"] } +serde_json = { version = "1", features = ["preserve_order"] } +tokio = { version = "1", features = ["macros", "net", "rt-multi-thread", "signal", "sync"] } +torrust-axum-server = { version = "3.0.0-develop", path = "../axum-server" } +torrust-server-lib = { version = "3.0.0-develop", path = "../server-lib" } +tower-http = { version = "0", features = ["compression-full", "cors", "propagate-header", "request-id", "trace"] } +tracing = "0" + +[dev-dependencies] diff --git a/packages/axum-health-check-api-server/LICENSE b/packages/axum-health-check-api-server/LICENSE new file mode 100644 index 000000000..0ad25db4b --- /dev/null +++ b/packages/axum-health-check-api-server/LICENSE @@ -0,0 +1,661 @@ + GNU AFFERO GENERAL PUBLIC LICENSE + Version 3, 19 November 2007 + + Copyright (C) 2007 Free Software Foundation, Inc. + Everyone is permitted to copy and distribute verbatim copies + of this license document, but changing it is not allowed. + + Preamble + + The GNU Affero General Public License is a free, copyleft license for +software and other kinds of works, specifically designed to ensure +cooperation with the community in the case of network server software. + + The licenses for most software and other practical works are designed +to take away your freedom to share and change the works. By contrast, +our General Public Licenses are intended to guarantee your freedom to +share and change all versions of a program--to make sure it remains free +software for all its users. + + When we speak of free software, we are referring to freedom, not +price. Our General Public Licenses are designed to make sure that you +have the freedom to distribute copies of free software (and charge for +them if you wish), that you receive source code or can get it if you +want it, that you can change the software or use pieces of it in new +free programs, and that you know you can do these things. + + Developers that use our General Public Licenses protect your rights +with two steps: (1) assert copyright on the software, and (2) offer +you this License which gives you legal permission to copy, distribute +and/or modify the software. + + A secondary benefit of defending all users' freedom is that +improvements made in alternate versions of the program, if they +receive widespread use, become available for other developers to +incorporate. Many developers of free software are heartened and +encouraged by the resulting cooperation. However, in the case of +software used on network servers, this result may fail to come about. +The GNU General Public License permits making a modified version and +letting the public access it on a server without ever releasing its +source code to the public. + + The GNU Affero General Public License is designed specifically to +ensure that, in such cases, the modified source code becomes available +to the community. It requires the operator of a network server to +provide the source code of the modified version running there to the +users of that server. Therefore, public use of a modified version, on +a publicly accessible server, gives the public access to the source +code of the modified version. + + An older license, called the Affero General Public License and +published by Affero, was designed to accomplish similar goals. This is +a different license, not a version of the Affero GPL, but Affero has +released a new version of the Affero GPL which permits relicensing under +this license. + + The precise terms and conditions for copying, distribution and +modification follow. + + TERMS AND CONDITIONS + + 0. Definitions. + + "This License" refers to version 3 of the GNU Affero General Public License. + + "Copyright" also means copyright-like laws that apply to other kinds of +works, such as semiconductor masks. + + "The Program" refers to any copyrightable work licensed under this +License. Each licensee is addressed as "you". "Licensees" and +"recipients" may be individuals or organizations. + + To "modify" a work means to copy from or adapt all or part of the work +in a fashion requiring copyright permission, other than the making of an +exact copy. The resulting work is called a "modified version" of the +earlier work or a work "based on" the earlier work. + + A "covered work" means either the unmodified Program or a work based +on the Program. + + To "propagate" a work means to do anything with it that, without +permission, would make you directly or secondarily liable for +infringement under applicable copyright law, except executing it on a +computer or modifying a private copy. Propagation includes copying, +distribution (with or without modification), making available to the +public, and in some countries other activities as well. + + To "convey" a work means any kind of propagation that enables other +parties to make or receive copies. Mere interaction with a user through +a computer network, with no transfer of a copy, is not conveying. + + An interactive user interface displays "Appropriate Legal Notices" +to the extent that it includes a convenient and prominently visible +feature that (1) displays an appropriate copyright notice, and (2) +tells the user that there is no warranty for the work (except to the +extent that warranties are provided), that licensees may convey the +work under this License, and how to view a copy of this License. If +the interface presents a list of user commands or options, such as a +menu, a prominent item in the list meets this criterion. + + 1. Source Code. + + The "source code" for a work means the preferred form of the work +for making modifications to it. "Object code" means any non-source +form of a work. + + A "Standard Interface" means an interface that either is an official +standard defined by a recognized standards body, or, in the case of +interfaces specified for a particular programming language, one that +is widely used among developers working in that language. + + The "System Libraries" of an executable work include anything, other +than the work as a whole, that (a) is included in the normal form of +packaging a Major Component, but which is not part of that Major +Component, and (b) serves only to enable use of the work with that +Major Component, or to implement a Standard Interface for which an +implementation is available to the public in source code form. A +"Major Component", in this context, means a major essential component +(kernel, window system, and so on) of the specific operating system +(if any) on which the executable work runs, or a compiler used to +produce the work, or an object code interpreter used to run it. + + The "Corresponding Source" for a work in object code form means all +the source code needed to generate, install, and (for an executable +work) run the object code and to modify the work, including scripts to +control those activities. However, it does not include the work's +System Libraries, or general-purpose tools or generally available free +programs which are used unmodified in performing those activities but +which are not part of the work. For example, Corresponding Source +includes interface definition files associated with source files for +the work, and the source code for shared libraries and dynamically +linked subprograms that the work is specifically designed to require, +such as by intimate data communication or control flow between those +subprograms and other parts of the work. + + The Corresponding Source need not include anything that users +can regenerate automatically from other parts of the Corresponding +Source. + + The Corresponding Source for a work in source code form is that +same work. + + 2. Basic Permissions. + + All rights granted under this License are granted for the term of +copyright on the Program, and are irrevocable provided the stated +conditions are met. This License explicitly affirms your unlimited +permission to run the unmodified Program. The output from running a +covered work is covered by this License only if the output, given its +content, constitutes a covered work. This License acknowledges your +rights of fair use or other equivalent, as provided by copyright law. + + You may make, run and propagate covered works that you do not +convey, without conditions so long as your license otherwise remains +in force. You may convey covered works to others for the sole purpose +of having them make modifications exclusively for you, or provide you +with facilities for running those works, provided that you comply with +the terms of this License in conveying all material for which you do +not control copyright. Those thus making or running the covered works +for you must do so exclusively on your behalf, under your direction +and control, on terms that prohibit them from making any copies of +your copyrighted material outside their relationship with you. + + Conveying under any other circumstances is permitted solely under +the conditions stated below. Sublicensing is not allowed; section 10 +makes it unnecessary. + + 3. Protecting Users' Legal Rights From Anti-Circumvention Law. + + No covered work shall be deemed part of an effective technological +measure under any applicable law fulfilling obligations under article +11 of the WIPO copyright treaty adopted on 20 December 1996, or +similar laws prohibiting or restricting circumvention of such +measures. + + When you convey a covered work, you waive any legal power to forbid +circumvention of technological measures to the extent such circumvention +is effected by exercising rights under this License with respect to +the covered work, and you disclaim any intention to limit operation or +modification of the work as a means of enforcing, against the work's +users, your or third parties' legal rights to forbid circumvention of +technological measures. + + 4. Conveying Verbatim Copies. + + You may convey verbatim copies of the Program's source code as you +receive it, in any medium, provided that you conspicuously and +appropriately publish on each copy an appropriate copyright notice; +keep intact all notices stating that this License and any +non-permissive terms added in accord with section 7 apply to the code; +keep intact all notices of the absence of any warranty; and give all +recipients a copy of this License along with the Program. + + You may charge any price or no price for each copy that you convey, +and you may offer support or warranty protection for a fee. + + 5. Conveying Modified Source Versions. + + You may convey a work based on the Program, or the modifications to +produce it from the Program, in the form of source code under the +terms of section 4, provided that you also meet all of these conditions: + + a) The work must carry prominent notices stating that you modified + it, and giving a relevant date. + + b) The work must carry prominent notices stating that it is + released under this License and any conditions added under section + 7. This requirement modifies the requirement in section 4 to + "keep intact all notices". + + c) You must license the entire work, as a whole, under this + License to anyone who comes into possession of a copy. This + License will therefore apply, along with any applicable section 7 + additional terms, to the whole of the work, and all its parts, + regardless of how they are packaged. This License gives no + permission to license the work in any other way, but it does not + invalidate such permission if you have separately received it. + + d) If the work has interactive user interfaces, each must display + Appropriate Legal Notices; however, if the Program has interactive + interfaces that do not display Appropriate Legal Notices, your + work need not make them do so. + + A compilation of a covered work with other separate and independent +works, which are not by their nature extensions of the covered work, +and which are not combined with it such as to form a larger program, +in or on a volume of a storage or distribution medium, is called an +"aggregate" if the compilation and its resulting copyright are not +used to limit the access or legal rights of the compilation's users +beyond what the individual works permit. Inclusion of a covered work +in an aggregate does not cause this License to apply to the other +parts of the aggregate. + + 6. Conveying Non-Source Forms. + + You may convey a covered work in object code form under the terms +of sections 4 and 5, provided that you also convey the +machine-readable Corresponding Source under the terms of this License, +in one of these ways: + + a) Convey the object code in, or embodied in, a physical product + (including a physical distribution medium), accompanied by the + Corresponding Source fixed on a durable physical medium + customarily used for software interchange. + + b) Convey the object code in, or embodied in, a physical product + (including a physical distribution medium), accompanied by a + written offer, valid for at least three years and valid for as + long as you offer spare parts or customer support for that product + model, to give anyone who possesses the object code either (1) a + copy of the Corresponding Source for all the software in the + product that is covered by this License, on a durable physical + medium customarily used for software interchange, for a price no + more than your reasonable cost of physically performing this + conveying of source, or (2) access to copy the + Corresponding Source from a network server at no charge. + + c) Convey individual copies of the object code with a copy of the + written offer to provide the Corresponding Source. This + alternative is allowed only occasionally and noncommercially, and + only if you received the object code with such an offer, in accord + with subsection 6b. + + d) Convey the object code by offering access from a designated + place (gratis or for a charge), and offer equivalent access to the + Corresponding Source in the same way through the same place at no + further charge. You need not require recipients to copy the + Corresponding Source along with the object code. If the place to + copy the object code is a network server, the Corresponding Source + may be on a different server (operated by you or a third party) + that supports equivalent copying facilities, provided you maintain + clear directions next to the object code saying where to find the + Corresponding Source. Regardless of what server hosts the + Corresponding Source, you remain obligated to ensure that it is + available for as long as needed to satisfy these requirements. + + e) Convey the object code using peer-to-peer transmission, provided + you inform other peers where the object code and Corresponding + Source of the work are being offered to the general public at no + charge under subsection 6d. + + A separable portion of the object code, whose source code is excluded +from the Corresponding Source as a System Library, need not be +included in conveying the object code work. + + A "User Product" is either (1) a "consumer product", which means any +tangible personal property which is normally used for personal, family, +or household purposes, or (2) anything designed or sold for incorporation +into a dwelling. In determining whether a product is a consumer product, +doubtful cases shall be resolved in favor of coverage. For a particular +product received by a particular user, "normally used" refers to a +typical or common use of that class of product, regardless of the status +of the particular user or of the way in which the particular user +actually uses, or expects or is expected to use, the product. A product +is a consumer product regardless of whether the product has substantial +commercial, industrial or non-consumer uses, unless such uses represent +the only significant mode of use of the product. + + "Installation Information" for a User Product means any methods, +procedures, authorization keys, or other information required to install +and execute modified versions of a covered work in that User Product from +a modified version of its Corresponding Source. The information must +suffice to ensure that the continued functioning of the modified object +code is in no case prevented or interfered with solely because +modification has been made. + + If you convey an object code work under this section in, or with, or +specifically for use in, a User Product, and the conveying occurs as +part of a transaction in which the right of possession and use of the +User Product is transferred to the recipient in perpetuity or for a +fixed term (regardless of how the transaction is characterized), the +Corresponding Source conveyed under this section must be accompanied +by the Installation Information. But this requirement does not apply +if neither you nor any third party retains the ability to install +modified object code on the User Product (for example, the work has +been installed in ROM). + + The requirement to provide Installation Information does not include a +requirement to continue to provide support service, warranty, or updates +for a work that has been modified or installed by the recipient, or for +the User Product in which it has been modified or installed. Access to a +network may be denied when the modification itself materially and +adversely affects the operation of the network or violates the rules and +protocols for communication across the network. + + Corresponding Source conveyed, and Installation Information provided, +in accord with this section must be in a format that is publicly +documented (and with an implementation available to the public in +source code form), and must require no special password or key for +unpacking, reading or copying. + + 7. Additional Terms. + + "Additional permissions" are terms that supplement the terms of this +License by making exceptions from one or more of its conditions. +Additional permissions that are applicable to the entire Program shall +be treated as though they were included in this License, to the extent +that they are valid under applicable law. If additional permissions +apply only to part of the Program, that part may be used separately +under those permissions, but the entire Program remains governed by +this License without regard to the additional permissions. + + When you convey a copy of a covered work, you may at your option +remove any additional permissions from that copy, or from any part of +it. (Additional permissions may be written to require their own +removal in certain cases when you modify the work.) You may place +additional permissions on material, added by you to a covered work, +for which you have or can give appropriate copyright permission. + + Notwithstanding any other provision of this License, for material you +add to a covered work, you may (if authorized by the copyright holders of +that material) supplement the terms of this License with terms: + + a) Disclaiming warranty or limiting liability differently from the + terms of sections 15 and 16 of this License; or + + b) Requiring preservation of specified reasonable legal notices or + author attributions in that material or in the Appropriate Legal + Notices displayed by works containing it; or + + c) Prohibiting misrepresentation of the origin of that material, or + requiring that modified versions of such material be marked in + reasonable ways as different from the original version; or + + d) Limiting the use for publicity purposes of names of licensors or + authors of the material; or + + e) Declining to grant rights under trademark law for use of some + trade names, trademarks, or service marks; or + + f) Requiring indemnification of licensors and authors of that + material by anyone who conveys the material (or modified versions of + it) with contractual assumptions of liability to the recipient, for + any liability that these contractual assumptions directly impose on + those licensors and authors. + + All other non-permissive additional terms are considered "further +restrictions" within the meaning of section 10. If the Program as you +received it, or any part of it, contains a notice stating that it is +governed by this License along with a term that is a further +restriction, you may remove that term. If a license document contains +a further restriction but permits relicensing or conveying under this +License, you may add to a covered work material governed by the terms +of that license document, provided that the further restriction does +not survive such relicensing or conveying. + + If you add terms to a covered work in accord with this section, you +must place, in the relevant source files, a statement of the +additional terms that apply to those files, or a notice indicating +where to find the applicable terms. + + Additional terms, permissive or non-permissive, may be stated in the +form of a separately written license, or stated as exceptions; +the above requirements apply either way. + + 8. Termination. + + You may not propagate or modify a covered work except as expressly +provided under this License. Any attempt otherwise to propagate or +modify it is void, and will automatically terminate your rights under +this License (including any patent licenses granted under the third +paragraph of section 11). + + However, if you cease all violation of this License, then your +license from a particular copyright holder is reinstated (a) +provisionally, unless and until the copyright holder explicitly and +finally terminates your license, and (b) permanently, if the copyright +holder fails to notify you of the violation by some reasonable means +prior to 60 days after the cessation. + + Moreover, your license from a particular copyright holder is +reinstated permanently if the copyright holder notifies you of the +violation by some reasonable means, this is the first time you have +received notice of violation of this License (for any work) from that +copyright holder, and you cure the violation prior to 30 days after +your receipt of the notice. + + Termination of your rights under this section does not terminate the +licenses of parties who have received copies or rights from you under +this License. If your rights have been terminated and not permanently +reinstated, you do not qualify to receive new licenses for the same +material under section 10. + + 9. Acceptance Not Required for Having Copies. + + You are not required to accept this License in order to receive or +run a copy of the Program. Ancillary propagation of a covered work +occurring solely as a consequence of using peer-to-peer transmission +to receive a copy likewise does not require acceptance. However, +nothing other than this License grants you permission to propagate or +modify any covered work. These actions infringe copyright if you do +not accept this License. Therefore, by modifying or propagating a +covered work, you indicate your acceptance of this License to do so. + + 10. Automatic Licensing of Downstream Recipients. + + Each time you convey a covered work, the recipient automatically +receives a license from the original licensors, to run, modify and +propagate that work, subject to this License. You are not responsible +for enforcing compliance by third parties with this License. + + An "entity transaction" is a transaction transferring control of an +organization, or substantially all assets of one, or subdividing an +organization, or merging organizations. If propagation of a covered +work results from an entity transaction, each party to that +transaction who receives a copy of the work also receives whatever +licenses to the work the party's predecessor in interest had or could +give under the previous paragraph, plus a right to possession of the +Corresponding Source of the work from the predecessor in interest, if +the predecessor has it or can get it with reasonable efforts. + + You may not impose any further restrictions on the exercise of the +rights granted or affirmed under this License. For example, you may +not impose a license fee, royalty, or other charge for exercise of +rights granted under this License, and you may not initiate litigation +(including a cross-claim or counterclaim in a lawsuit) alleging that +any patent claim is infringed by making, using, selling, offering for +sale, or importing the Program or any portion of it. + + 11. Patents. + + A "contributor" is a copyright holder who authorizes use under this +License of the Program or a work on which the Program is based. The +work thus licensed is called the contributor's "contributor version". + + A contributor's "essential patent claims" are all patent claims +owned or controlled by the contributor, whether already acquired or +hereafter acquired, that would be infringed by some manner, permitted +by this License, of making, using, or selling its contributor version, +but do not include claims that would be infringed only as a +consequence of further modification of the contributor version. For +purposes of this definition, "control" includes the right to grant +patent sublicenses in a manner consistent with the requirements of +this License. + + Each contributor grants you a non-exclusive, worldwide, royalty-free +patent license under the contributor's essential patent claims, to +make, use, sell, offer for sale, import and otherwise run, modify and +propagate the contents of its contributor version. + + In the following three paragraphs, a "patent license" is any express +agreement or commitment, however denominated, not to enforce a patent +(such as an express permission to practice a patent or covenant not to +sue for patent infringement). To "grant" such a patent license to a +party means to make such an agreement or commitment not to enforce a +patent against the party. + + If you convey a covered work, knowingly relying on a patent license, +and the Corresponding Source of the work is not available for anyone +to copy, free of charge and under the terms of this License, through a +publicly available network server or other readily accessible means, +then you must either (1) cause the Corresponding Source to be so +available, or (2) arrange to deprive yourself of the benefit of the +patent license for this particular work, or (3) arrange, in a manner +consistent with the requirements of this License, to extend the patent +license to downstream recipients. "Knowingly relying" means you have +actual knowledge that, but for the patent license, your conveying the +covered work in a country, or your recipient's use of the covered work +in a country, would infringe one or more identifiable patents in that +country that you have reason to believe are valid. + + If, pursuant to or in connection with a single transaction or +arrangement, you convey, or propagate by procuring conveyance of, a +covered work, and grant a patent license to some of the parties +receiving the covered work authorizing them to use, propagate, modify +or convey a specific copy of the covered work, then the patent license +you grant is automatically extended to all recipients of the covered +work and works based on it. + + A patent license is "discriminatory" if it does not include within +the scope of its coverage, prohibits the exercise of, or is +conditioned on the non-exercise of one or more of the rights that are +specifically granted under this License. You may not convey a covered +work if you are a party to an arrangement with a third party that is +in the business of distributing software, under which you make payment +to the third party based on the extent of your activity of conveying +the work, and under which the third party grants, to any of the +parties who would receive the covered work from you, a discriminatory +patent license (a) in connection with copies of the covered work +conveyed by you (or copies made from those copies), or (b) primarily +for and in connection with specific products or compilations that +contain the covered work, unless you entered into that arrangement, +or that patent license was granted, prior to 28 March 2007. + + Nothing in this License shall be construed as excluding or limiting +any implied license or other defenses to infringement that may +otherwise be available to you under applicable patent law. + + 12. No Surrender of Others' Freedom. + + If conditions are imposed on you (whether by court order, agreement or +otherwise) that contradict the conditions of this License, they do not +excuse you from the conditions of this License. If you cannot convey a +covered work so as to satisfy simultaneously your obligations under this +License and any other pertinent obligations, then as a consequence you may +not convey it at all. For example, if you agree to terms that obligate you +to collect a royalty for further conveying from those to whom you convey +the Program, the only way you could satisfy both those terms and this +License would be to refrain entirely from conveying the Program. + + 13. Remote Network Interaction; Use with the GNU General Public License. + + Notwithstanding any other provision of this License, if you modify the +Program, your modified version must prominently offer all users +interacting with it remotely through a computer network (if your version +supports such interaction) an opportunity to receive the Corresponding +Source of your version by providing access to the Corresponding Source +from a network server at no charge, through some standard or customary +means of facilitating copying of software. This Corresponding Source +shall include the Corresponding Source for any work covered by version 3 +of the GNU General Public License that is incorporated pursuant to the +following paragraph. + + Notwithstanding any other provision of this License, you have +permission to link or combine any covered work with a work licensed +under version 3 of the GNU General Public License into a single +combined work, and to convey the resulting work. The terms of this +License will continue to apply to the part which is the covered work, +but the work with which it is combined will remain governed by version +3 of the GNU General Public License. + + 14. Revised Versions of this License. + + The Free Software Foundation may publish revised and/or new versions of +the GNU Affero General Public License from time to time. Such new versions +will be similar in spirit to the present version, but may differ in detail to +address new problems or concerns. + + Each version is given a distinguishing version number. If the +Program specifies that a certain numbered version of the GNU Affero General +Public License "or any later version" applies to it, you have the +option of following the terms and conditions either of that numbered +version or of any later version published by the Free Software +Foundation. If the Program does not specify a version number of the +GNU Affero General Public License, you may choose any version ever published +by the Free Software Foundation. + + If the Program specifies that a proxy can decide which future +versions of the GNU Affero General Public License can be used, that proxy's +public statement of acceptance of a version permanently authorizes you +to choose that version for the Program. + + Later license versions may give you additional or different +permissions. However, no additional obligations are imposed on any +author or copyright holder as a result of your choosing to follow a +later version. + + 15. Disclaimer of Warranty. + + THERE IS NO WARRANTY FOR THE PROGRAM, TO THE EXTENT PERMITTED BY +APPLICABLE LAW. EXCEPT WHEN OTHERWISE STATED IN WRITING THE COPYRIGHT +HOLDERS AND/OR OTHER PARTIES PROVIDE THE PROGRAM "AS IS" WITHOUT WARRANTY +OF ANY KIND, EITHER EXPRESSED OR IMPLIED, INCLUDING, BUT NOT LIMITED TO, +THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR +PURPOSE. THE ENTIRE RISK AS TO THE QUALITY AND PERFORMANCE OF THE PROGRAM +IS WITH YOU. SHOULD THE PROGRAM PROVE DEFECTIVE, YOU ASSUME THE COST OF +ALL NECESSARY SERVICING, REPAIR OR CORRECTION. + + 16. Limitation of Liability. + + IN NO EVENT UNLESS REQUIRED BY APPLICABLE LAW OR AGREED TO IN WRITING +WILL ANY COPYRIGHT HOLDER, OR ANY OTHER PARTY WHO MODIFIES AND/OR CONVEYS +THE PROGRAM AS PERMITTED ABOVE, BE LIABLE TO YOU FOR DAMAGES, INCLUDING ANY +GENERAL, SPECIAL, INCIDENTAL OR CONSEQUENTIAL DAMAGES ARISING OUT OF THE +USE OR INABILITY TO USE THE PROGRAM (INCLUDING BUT NOT LIMITED TO LOSS OF +DATA OR DATA BEING RENDERED INACCURATE OR LOSSES SUSTAINED BY YOU OR THIRD +PARTIES OR A FAILURE OF THE PROGRAM TO OPERATE WITH ANY OTHER PROGRAMS), +EVEN IF SUCH HOLDER OR OTHER PARTY HAS BEEN ADVISED OF THE POSSIBILITY OF +SUCH DAMAGES. + + 17. Interpretation of Sections 15 and 16. + + If the disclaimer of warranty and limitation of liability provided +above cannot be given local legal effect according to their terms, +reviewing courts shall apply local law that most closely approximates +an absolute waiver of all civil liability in connection with the +Program, unless a warranty or assumption of liability accompanies a +copy of the Program in return for a fee. + + END OF TERMS AND CONDITIONS + + How to Apply These Terms to Your New Programs + + If you develop a new program, and you want it to be of the greatest +possible use to the public, the best way to achieve this is to make it +free software which everyone can redistribute and change under these terms. + + To do so, attach the following notices to the program. It is safest +to attach them to the start of each source file to most effectively +state the exclusion of warranty; and each file should have at least +the "copyright" line and a pointer to where the full notice is found. + + + Copyright (C) + + This program is free software: you can redistribute it and/or modify + it under the terms of the GNU Affero General Public License as published + by the Free Software Foundation, either version 3 of the License, or + (at your option) any later version. + + This program is distributed in the hope that it will be useful, + but WITHOUT ANY WARRANTY; without even the implied warranty of + MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + GNU Affero General Public License for more details. + + You should have received a copy of the GNU Affero General Public License + along with this program. If not, see . + +Also add information on how to contact you by electronic and paper mail. + + If your software can interact with users remotely through a computer +network, you should also make sure that it provides a way for users to +get its source. For example, if your program is a web application, its +interface could display a "Source" link that leads users to an archive +of the code. There are many ways you could offer source, and different +solutions will be better for different programs; see section 13 for the +specific requirements. + + You should also get your employer (if you work as a programmer) or school, +if any, to sign a "copyright disclaimer" for the program, if necessary. +For more information on this, and how to apply and follow the GNU AGPL, see +. diff --git a/packages/axum-health-check-api-server/README.md b/packages/axum-health-check-api-server/README.md new file mode 100644 index 000000000..d4c6b4f0b --- /dev/null +++ b/packages/axum-health-check-api-server/README.md @@ -0,0 +1,49 @@ +# Torrust Axum HTTP Tracker + +The Torrust Tracker Health Check API. + +The Torrust tracker container starts a local HTTP server on port 1313 to check all services. + +It's used for the container health check. + +URL: + +Example response: + +```json +{ + "status": "Ok", + "message": "", + "details": [ + { + "binding": "0.0.0.0:6969", + "info": "checking the udp tracker health check at: 0.0.0.0:6969", + "result": { + "Ok": "Connected" + } + }, + { + "binding": "0.0.0.0:1212", + "info": "checking api health check at: http://0.0.0.0:1212/api/health_check", + "result": { + "Ok": "200 OK" + } + }, + { + "binding": "0.0.0.0:7070", + "info": "checking http tracker health check at: http://0.0.0.0:7070/health_check", + "result": { + "Ok": "200 OK" + } + } + ] +} +``` + +## Documentation + +[Crate documentation](https://docs.rs/torrust-axum-health-check-api-server). + +## License + +The project is licensed under the terms of the [GNU AFFERO GENERAL PUBLIC LICENSE](./LICENSE). diff --git a/src/servers/health_check_api/handlers.rs b/packages/axum-health-check-api-server/src/handlers.rs similarity index 100% rename from src/servers/health_check_api/handlers.rs rename to packages/axum-health-check-api-server/src/handlers.rs diff --git a/src/servers/health_check_api/mod.rs b/packages/axum-health-check-api-server/src/lib.rs similarity index 100% rename from src/servers/health_check_api/mod.rs rename to packages/axum-health-check-api-server/src/lib.rs diff --git a/src/servers/health_check_api/resources.rs b/packages/axum-health-check-api-server/src/resources.rs similarity index 100% rename from src/servers/health_check_api/resources.rs rename to packages/axum-health-check-api-server/src/resources.rs diff --git a/src/servers/health_check_api/responses.rs b/packages/axum-health-check-api-server/src/responses.rs similarity index 100% rename from src/servers/health_check_api/responses.rs rename to packages/axum-health-check-api-server/src/responses.rs diff --git a/src/servers/health_check_api/server.rs b/packages/axum-health-check-api-server/src/server.rs similarity index 97% rename from src/servers/health_check_api/server.rs rename to packages/axum-health-check-api-server/src/server.rs index 6f468b98e..733fec3a0 100644 --- a/src/servers/health_check_api/server.rs +++ b/packages/axum-health-check-api-server/src/server.rs @@ -26,8 +26,8 @@ use tower_http::trace::{DefaultMakeSpan, TraceLayer}; use tower_http::LatencyUnit; use tracing::{instrument, Level, Span}; -use crate::servers::health_check_api::handlers::health_check_handler; -use crate::servers::health_check_api::HEALTH_CHECK_API_LOG_TARGET; +use crate::handlers::health_check_handler; +use crate::HEALTH_CHECK_API_LOG_TARGET; /// Starts Health Check API server. /// diff --git a/src/bootstrap/jobs/health_check_api.rs b/src/bootstrap/jobs/health_check_api.rs index d64ca0073..5d342a7f0 100644 --- a/src/bootstrap/jobs/health_check_api.rs +++ b/src/bootstrap/jobs/health_check_api.rs @@ -16,14 +16,13 @@ use tokio::sync::oneshot; use tokio::task::JoinHandle; +use torrust_axum_health_check_api_server::{server, HEALTH_CHECK_API_LOG_TARGET}; use torrust_server_lib::logging::STARTED_ON; use torrust_server_lib::registar::ServiceRegistry; use torrust_server_lib::signals::{Halted, Started}; use torrust_tracker_configuration::HealthCheckApi; use tracing::instrument; -use crate::servers::health_check_api::{server, HEALTH_CHECK_API_LOG_TARGET}; - /// This function starts a new Health Check API server with the provided /// configuration. /// diff --git a/src/console/ci/e2e/logs_parser.rs b/src/console/ci/e2e/logs_parser.rs index fdbe5d9c0..c406fa7a5 100644 --- a/src/console/ci/e2e/logs_parser.rs +++ b/src/console/ci/e2e/logs_parser.rs @@ -2,11 +2,10 @@ use bittorrent_udp_tracker_core::UDP_TRACKER_LOG_TARGET; use regex::Regex; use serde::{Deserialize, Serialize}; +use torrust_axum_health_check_api_server::HEALTH_CHECK_API_LOG_TARGET; use torrust_axum_http_tracker_server::HTTP_TRACKER_LOG_TARGET; use torrust_server_lib::logging::STARTED_ON; -use crate::servers::health_check_api::HEALTH_CHECK_API_LOG_TARGET; - const INFO_THRESHOLD: &str = "INFO"; #[derive(Serialize, Deserialize, Debug, Default)] diff --git a/src/servers/mod.rs b/src/servers/mod.rs index 037179ba8..8dea8a10d 100644 --- a/src/servers/mod.rs +++ b/src/servers/mod.rs @@ -1,4 +1,3 @@ //! Servers. Services that can be started and stopped. pub mod apis; -pub mod health_check_api; pub mod udp; diff --git a/tests/servers/health_check_api/contract.rs b/tests/servers/health_check_api/contract.rs index bf38e05a7..bde3e9f5d 100644 --- a/tests/servers/health_check_api/contract.rs +++ b/tests/servers/health_check_api/contract.rs @@ -1,5 +1,5 @@ +use torrust_axum_health_check_api_server::resources::{Report, Status}; use torrust_server_lib::registar::Registar; -use torrust_tracker_lib::servers::health_check_api::resources::{Report, Status}; use torrust_tracker_test_helpers::configuration; use crate::common::logging; @@ -32,7 +32,7 @@ async fn health_check_endpoint_should_return_status_ok_when_there_is_no_services mod api { use std::sync::Arc; - use torrust_tracker_lib::servers::health_check_api::resources::{Report, Status}; + use torrust_axum_health_check_api_server::resources::{Report, Status}; use torrust_tracker_test_helpers::configuration; use crate::common::logging; @@ -142,7 +142,7 @@ mod api { mod http { use std::sync::Arc; - use torrust_tracker_lib::servers::health_check_api::resources::{Report, Status}; + use torrust_axum_health_check_api_server::resources::{Report, Status}; use torrust_tracker_test_helpers::configuration; use crate::common::logging; @@ -251,7 +251,7 @@ mod http { mod udp { use std::sync::Arc; - use torrust_tracker_lib::servers::health_check_api::resources::{Report, Status}; + use torrust_axum_health_check_api_server::resources::{Report, Status}; use torrust_tracker_test_helpers::configuration; use crate::common::logging; diff --git a/tests/servers/health_check_api/environment.rs b/tests/servers/health_check_api/environment.rs index b83240767..f8c1209cd 100644 --- a/tests/servers/health_check_api/environment.rs +++ b/tests/servers/health_check_api/environment.rs @@ -3,10 +3,10 @@ use std::sync::Arc; use tokio::sync::oneshot::{self, Sender}; use tokio::task::JoinHandle; +use torrust_axum_health_check_api_server::{server, HEALTH_CHECK_API_LOG_TARGET}; use torrust_server_lib::registar::Registar; use torrust_server_lib::signals::{self, Halted, Started}; use torrust_tracker_configuration::HealthCheckApi; -use torrust_tracker_lib::servers::health_check_api::{server, HEALTH_CHECK_API_LOG_TARGET}; #[derive(Debug)] pub enum Error { From 39dfbdc631bd250d98fe3e51eab9146efb73fa78 Mon Sep 17 00:00:00 2001 From: Jose Celano Date: Wed, 19 Feb 2025 12:59:27 +0000 Subject: [PATCH 291/802] chore(deps): udpate dependencies ```output cargo update Updating crates.io index Locking 2 packages to latest compatible versions Updating h2 v0.4.7 -> v0.4.8 Updating unicode-ident v1.0.16 -> v1.0.17 ``` --- Cargo.lock | 13 ++++++------- 1 file changed, 6 insertions(+), 7 deletions(-) diff --git a/Cargo.lock b/Cargo.lock index 1c30e5128..d0fb7a7d9 100644 --- a/Cargo.lock +++ b/Cargo.lock @@ -1784,9 +1784,9 @@ dependencies = [ [[package]] name = "h2" -version = "0.4.7" +version = "0.4.8" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "ccae279728d634d083c00f6099cb58f01cc99c145b84b8be2f6c74618d79922e" +checksum = "5017294ff4bb30944501348f6f8e42e6ad28f42c8bbef7a74029aff064a4e3c2" dependencies = [ "atomic-waker", "bytes", @@ -4378,7 +4378,7 @@ dependencies = [ "torrust-tracker-configuration", "torrust-tracker-primitives", "torrust-tracker-test-helpers", - "tower 0.4.13", + "tower 0.5.2", "tower-http", "tracing", ] @@ -4399,7 +4399,7 @@ dependencies = [ "torrust-server-lib", "torrust-tracker-configuration", "torrust-tracker-located-error", - "tower 0.4.13", + "tower 0.5.2", "tracing", ] @@ -4623,7 +4623,6 @@ dependencies = [ "futures-util", "pin-project", "pin-project-lite", - "tokio", "tower-layer", "tower-service", "tracing", @@ -4783,9 +4782,9 @@ dependencies = [ [[package]] name = "unicode-ident" -version = "1.0.16" +version = "1.0.17" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "a210d160f08b701c8721ba1c726c11662f877ea6b7094007e1ca9a1041945034" +checksum = "00e2473a93778eb0bad35909dff6a10d28e63f792f16ed15e404fca9d5eeedbe" [[package]] name = "unicode-xid" From d4ec44e2cc62a27b0858911e3a392aa18ab56e42 Mon Sep 17 00:00:00 2001 From: Jose Celano Date: Thu, 20 Feb 2025 07:58:21 +0000 Subject: [PATCH 292/802] chore(deps): udpate dependencies ``` Updating crates.io index Locking 21 packages to latest compatible versions Updating anyhow v1.0.95 -> v1.0.96 Adding bitflags v1.3.2 Updating bollard v0.16.1 -> v0.18.1 Updating bollard-stubs v1.44.0-rc.2 -> v1.47.1-rc.27.3.1 Adding core-foundation v0.10.0 Removing dirs v5.0.1 Removing dirs-sys v0.4.1 Adding etcetera v0.8.0 Adding filetime v0.2.25 Removing hyper-rustls v0.26.0 Adding hyperlocal v0.9.1 Removing hyperlocal-next v0.9.0 Updating native-tls v0.2.13 -> v0.2.14 Removing option-ext v0.2.0 Adding redox_syscall v0.3.5 Removing redox_users v0.4.6 Removing rustls v0.22.4 Updating rustls-native-certs v0.7.3 -> v0.8.1 Adding security-framework v3.2.0 Updating serde v1.0.217 -> v1.0.218 Updating serde_derive v1.0.217 -> v1.0.218 Updating serde_json v1.0.138 -> v1.0.139 Updating testcontainers v0.17.0 -> v0.23.3 Removing tokio-rustls v0.25.0 Adding tokio-tar v0.3.1 Updating winnow v0.7.2 -> v0.7.3 Adding xattr v1.4.0 Updating zerocopy v0.8.18 -> v0.8.20 Updating zerocopy-derive v0.8.18 -> v0.8.20 ``` --- Cargo.lock | 285 ++++++++++++++++--------------- packages/tracker-core/Cargo.toml | 18 +- 2 files changed, 153 insertions(+), 150 deletions(-) diff --git a/Cargo.lock b/Cargo.lock index d0fb7a7d9..71ac6b225 100644 --- a/Cargo.lock +++ b/Cargo.lock @@ -131,9 +131,9 @@ dependencies = [ [[package]] name = "anyhow" -version = "1.0.95" +version = "1.0.96" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "34ac096ce696dc2fcabef30516bb13c0a68a11d30131d3df6f04711467681b04" +checksum = "6b964d184e89d9b6b67dd2715bc8e74cf3107fb2b529990c90cf517326150bf4" [[package]] name = "aquatic_peer_id" @@ -462,11 +462,11 @@ dependencies = [ "hyper", "hyper-util", "pin-project-lite", - "rustls 0.23.23", + "rustls", "rustls-pemfile", "rustls-pki-types", "tokio", - "tokio-rustls 0.26.1", + "tokio-rustls", "tower 0.4.13", "tower-service", ] @@ -523,7 +523,7 @@ version = "0.71.1" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "5f58bf3d7db68cfbac37cfc485a8d711e87e064c3d0fe0435b92f7a407f9d6b3" dependencies = [ - "bitflags", + "bitflags 2.8.0", "cexpr", "clang-sys", "itertools 0.13.0", @@ -541,6 +541,12 @@ version = "0.4.4" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "02b4ff8b16e6076c3e14220b39fbc1fabb6737522281a388998046859400895f" +[[package]] +name = "bitflags" +version = "1.3.2" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "bef38d45163c2f1dde094a7dfd33ccf595c92905c8f8f4fdc18d06fb1037718a" + [[package]] name = "bitflags" version = "2.8.0" @@ -739,9 +745,9 @@ dependencies = [ [[package]] name = "bollard" -version = "0.16.1" +version = "0.18.1" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "0aed08d3adb6ebe0eff737115056652670ae290f177759aac19c30456135f94c" +checksum = "97ccca1260af6a459d75994ad5acc1651bcabcbdbc41467cc9786519ab854c30" dependencies = [ "base64 0.22.1", "bollard-stubs", @@ -754,12 +760,12 @@ dependencies = [ "http-body-util", "hyper", "hyper-named-pipe", - "hyper-rustls 0.26.0", + "hyper-rustls", "hyper-util", - "hyperlocal-next", + "hyperlocal", "log", "pin-project-lite", - "rustls 0.22.4", + "rustls", "rustls-native-certs", "rustls-pemfile", "rustls-pki-types", @@ -768,7 +774,7 @@ dependencies = [ "serde_json", "serde_repr", "serde_urlencoded", - "thiserror 1.0.69", + "thiserror 2.0.11", "tokio", "tokio-util", "tower-service", @@ -778,9 +784,9 @@ dependencies = [ [[package]] name = "bollard-stubs" -version = "1.44.0-rc.2" +version = "1.47.1-rc.27.3.1" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "709d9aa1c37abb89d40f19f5d0ad6f0d88cb1581264e571c9350fc5bb89cf1c5" +checksum = "3f179cfbddb6e77a5472703d4b30436bff32929c0aa8a9008ecf23d1d3cdd0da" dependencies = [ "serde", "serde_repr", @@ -1096,6 +1102,16 @@ dependencies = [ "libc", ] +[[package]] +name = "core-foundation" +version = "0.10.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "b55271e5c8c478ad3f38ad24ef34923091e0548492a266d19b3c0b4d82574c63" +dependencies = [ + "core-foundation-sys", + "libc", +] + [[package]] name = "core-foundation-sys" version = "0.8.7" @@ -1341,27 +1357,6 @@ dependencies = [ "crypto-common", ] -[[package]] -name = "dirs" -version = "5.0.1" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "44c45a9d03d6676652bcb5e724c7e988de1acad23a711b5217ab9cbecbec2225" -dependencies = [ - "dirs-sys", -] - -[[package]] -name = "dirs-sys" -version = "0.4.1" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "520f05a5cbd335fae5a99ff7a6ab8627577660ee5cfd6a94a6a929b52ff0321c" -dependencies = [ - "libc", - "option-ext", - "redox_users", - "windows-sys 0.48.0", -] - [[package]] name = "displaydoc" version = "0.2.5" @@ -1431,6 +1426,17 @@ dependencies = [ "windows-sys 0.59.0", ] +[[package]] +name = "etcetera" +version = "0.8.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "136d1b5283a1ab77bd9257427ffd09d8667ced0570b6f938942bc7568ed5b943" +dependencies = [ + "cfg-if", + "home", + "windows-sys 0.48.0", +] + [[package]] name = "event-listener" version = "2.5.3" @@ -1492,6 +1498,18 @@ dependencies = [ "version_check", ] +[[package]] +name = "filetime" +version = "0.2.25" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "35c0522e981e68cbfa8c3f978441a5f34b30b96e146b33cd3359176b50fe8586" +dependencies = [ + "cfg-if", + "libc", + "libredox", + "windows-sys 0.59.0", +] + [[package]] name = "flate2" version = "1.0.35" @@ -1961,25 +1979,6 @@ dependencies = [ "winapi", ] -[[package]] -name = "hyper-rustls" -version = "0.26.0" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "a0bea761b46ae2b24eb4aef630d8d1c398157b6fc29e6350ecf090a0b70c952c" -dependencies = [ - "futures-util", - "http", - "hyper", - "hyper-util", - "log", - "rustls 0.22.4", - "rustls-native-certs", - "rustls-pki-types", - "tokio", - "tokio-rustls 0.25.0", - "tower-service", -] - [[package]] name = "hyper-rustls" version = "0.27.5" @@ -1990,10 +1989,10 @@ dependencies = [ "http", "hyper", "hyper-util", - "rustls 0.23.23", + "rustls", "rustls-pki-types", "tokio", - "tokio-rustls 0.26.1", + "tokio-rustls", "tower-service", ] @@ -2033,10 +2032,10 @@ dependencies = [ ] [[package]] -name = "hyperlocal-next" -version = "0.9.0" +name = "hyperlocal" +version = "0.9.1" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "acf569d43fa9848e510358c07b80f4adf34084ddc28c6a4a651ee8474c070dcc" +checksum = "986c5ce3b994526b3cd75578e62554abd09f0899d6206de48b3e96ab34ccc8c7" dependencies = [ "hex", "http-body-util", @@ -2370,8 +2369,9 @@ version = "0.1.3" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "c0ff37bd590ca25063e35af745c343cb7a0271906fb7b37e4813e8f79f00268d" dependencies = [ - "bitflags", + "bitflags 2.8.0", "libc", + "redox_syscall 0.5.8", ] [[package]] @@ -2579,7 +2579,7 @@ dependencies = [ "base64 0.21.7", "bigdecimal", "bindgen", - "bitflags", + "bitflags 2.8.0", "bitvec", "btoi", "byteorder", @@ -2620,9 +2620,9 @@ dependencies = [ [[package]] name = "native-tls" -version = "0.2.13" +version = "0.2.14" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "0dab59f8e050d5df8e4dd87d9206fb6f65a483e20ac9fda365ade4fab353196c" +checksum = "87de3442987e9dbec73158d5c715e7ad9072fda936bb03d19d7fa10e00520f0e" dependencies = [ "libc", "log", @@ -2630,7 +2630,7 @@ dependencies = [ "openssl-probe", "openssl-sys", "schannel", - "security-framework", + "security-framework 2.11.1", "security-framework-sys", "tempfile", ] @@ -2747,7 +2747,7 @@ version = "0.10.71" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "5e14130c6a98cd258fdcb0fb6d744152343ff729cbfcb28c656a9d12b999fbcd" dependencies = [ - "bitflags", + "bitflags 2.8.0", "cfg-if", "foreign-types", "libc", @@ -2785,12 +2785,6 @@ dependencies = [ "vcpkg", ] -[[package]] -name = "option-ext" -version = "0.2.0" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "04744f49eae99ab78e0d5c0b603ab218f515ea8cfe5a456d7629ad883a3b6e7d" - [[package]] name = "overload" version = "0.1.1" @@ -2821,7 +2815,7 @@ checksum = "1e401f977ab385c9e4e3ab30627d6f26d00e2c73eef317493c4ec6d468726cf8" dependencies = [ "cfg-if", "libc", - "redox_syscall", + "redox_syscall 0.5.8", "smallvec", "windows-targets 0.52.6", ] @@ -3217,7 +3211,7 @@ checksum = "3779b94aeb87e8bd4e834cee3650289ee9e0d5677f976ecdb6d219e5f4f6cd94" dependencies = [ "rand_chacha 0.9.0", "rand_core 0.9.1", - "zerocopy 0.8.18", + "zerocopy 0.8.20", ] [[package]] @@ -3256,7 +3250,7 @@ source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "a88e0da7a2c97baa202165137c158d0a2e824ac465d13d81046727b34cb247d3" dependencies = [ "getrandom 0.3.1", - "zerocopy 0.8.18", + "zerocopy 0.8.20", ] [[package]] @@ -3281,22 +3275,20 @@ dependencies = [ [[package]] name = "redox_syscall" -version = "0.5.8" +version = "0.3.5" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "03a862b389f93e68874fbf580b9de08dd02facb9a788ebadaf4a3fd33cf58834" +checksum = "567664f262709473930a4bf9e51bf2ebf3348f2e748ccc50dea20646858f8f29" dependencies = [ - "bitflags", + "bitflags 1.3.2", ] [[package]] -name = "redox_users" -version = "0.4.6" +name = "redox_syscall" +version = "0.5.8" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "ba009ff324d1fc1b900bd1fdb31564febe58a8ccc8a6fdbb93b543d33b13ca43" +checksum = "03a862b389f93e68874fbf580b9de08dd02facb9a788ebadaf4a3fd33cf58834" dependencies = [ - "getrandom 0.2.15", - "libredox", - "thiserror 1.0.69", + "bitflags 2.8.0", ] [[package]] @@ -3359,7 +3351,7 @@ dependencies = [ "http-body", "http-body-util", "hyper", - "hyper-rustls 0.27.5", + "hyper-rustls", "hyper-tls", "hyper-util", "ipnet", @@ -3476,7 +3468,7 @@ version = "0.33.0" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "1c6d5e5acb6f6129fe3f7ba0a7fc77bca1942cb568535e18e7bc40262baf3110" dependencies = [ - "bitflags", + "bitflags 2.8.0", "fallible-iterator", "fallible-streaming-iterator", "hashlink", @@ -3527,27 +3519,13 @@ version = "0.38.44" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "fdb5bc1ae2baa591800df16c9ca78619bf65c0488b41b96ccec5d11220d8c154" dependencies = [ - "bitflags", + "bitflags 2.8.0", "errno", "libc", "linux-raw-sys", "windows-sys 0.59.0", ] -[[package]] -name = "rustls" -version = "0.22.4" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "bf4ef73721ac7bcd79b2b315da7779d8fc09718c6b3d2d1b2d94850eb8c18432" -dependencies = [ - "log", - "ring", - "rustls-pki-types", - "rustls-webpki", - "subtle", - "zeroize", -] - [[package]] name = "rustls" version = "0.23.23" @@ -3555,6 +3533,7 @@ source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "47796c98c480fce5406ef69d1c76378375492c3b0a0de587be0c1d9feb12f395" dependencies = [ "once_cell", + "ring", "rustls-pki-types", "rustls-webpki", "subtle", @@ -3563,15 +3542,14 @@ dependencies = [ [[package]] name = "rustls-native-certs" -version = "0.7.3" +version = "0.8.1" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "e5bfb394eeed242e909609f56089eecfe5fda225042e8b171791b9c95f5931e5" +checksum = "7fcff2dd52b58a8d98a70243663a0d234c4e2b79235637849d15913394a247d3" dependencies = [ "openssl-probe", - "rustls-pemfile", "rustls-pki-types", "schannel", - "security-framework", + "security-framework 3.2.0", ] [[package]] @@ -3663,8 +3641,21 @@ version = "2.11.1" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "897b2245f0b511c87893af39b033e5ca9cce68824c4d7e7630b5a1d339658d02" dependencies = [ - "bitflags", - "core-foundation", + "bitflags 2.8.0", + "core-foundation 0.9.4", + "core-foundation-sys", + "libc", + "security-framework-sys", +] + +[[package]] +name = "security-framework" +version = "3.2.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "271720403f46ca04f7ba6f55d438f8bd878d6b8ca0a1046e8228c4145bcbb316" +dependencies = [ + "bitflags 2.8.0", + "core-foundation 0.10.0", "core-foundation-sys", "libc", "security-framework-sys", @@ -3688,9 +3679,9 @@ checksum = "f79dfe2d285b0488816f30e700a7438c5a73d816b5b7d3ac72fbc48b0d185e03" [[package]] name = "serde" -version = "1.0.217" +version = "1.0.218" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "02fc4265df13d6fa1d00ecff087228cc0a2b5f3c0e87e258d8b94a156e984c70" +checksum = "e8dfc9d19bdbf6d17e22319da49161d5d0108e4188e8b680aef6299eed22df60" dependencies = [ "serde_derive", ] @@ -3716,9 +3707,9 @@ dependencies = [ [[package]] name = "serde_derive" -version = "1.0.217" +version = "1.0.218" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "5a9bf7cf98d04a2b28aead066b7496853d4779c9cc183c440dbac457641e19a0" +checksum = "f09503e191f4e797cb8aac08e9a4a4695c5edf6a2e70e376d961ddd5c969f82b" dependencies = [ "proc-macro2", "quote", @@ -3740,9 +3731,9 @@ dependencies = [ [[package]] name = "serde_json" -version = "1.0.138" +version = "1.0.139" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "d434192e7da787e94a6ea7e9670b26a036d0ca41e0b7efb2676dd32bae872949" +checksum = "44f86c3acccc9c65b153fe1b85a3be07fe5515274ec9f0653b4a0875731c72a6" dependencies = [ "indexmap 2.7.1", "itoa", @@ -4011,8 +4002,8 @@ version = "0.6.1" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "3c879d448e9d986b661742763247d3693ed13609438cf3d006f51f5368a5ba6b" dependencies = [ - "bitflags", - "core-foundation", + "bitflags 2.8.0", + "core-foundation 0.9.4", "system-configuration-sys", ] @@ -4080,26 +4071,29 @@ checksum = "8f50febec83f5ee1df3015341d8bd429f2d1cc62bcba7ea2076759d315084683" [[package]] name = "testcontainers" -version = "0.17.0" +version = "0.23.3" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "025e0ac563d543e0354d984540e749859a83dbe5c0afb8d458dc48d91cef2d6a" +checksum = "59a4f01f39bb10fc2a5ab23eb0d888b1e2bb168c157f61a1b98e6c501c639c74" dependencies = [ "async-trait", "bollard", "bollard-stubs", "bytes", - "dirs", "docker_credential", + "either", + "etcetera", "futures", "log", "memchr", "parse-display", + "pin-project-lite", "serde", "serde_json", "serde_with", - "thiserror 1.0.69", + "thiserror 2.0.11", "tokio", "tokio-stream", + "tokio-tar", "tokio-util", "url", ] @@ -4258,24 +4252,13 @@ dependencies = [ "tokio", ] -[[package]] -name = "tokio-rustls" -version = "0.25.0" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "775e0c0f0adb3a2f22a00c4745d728b479985fc15ee7ca6a2608388c5569860f" -dependencies = [ - "rustls 0.22.4", - "rustls-pki-types", - "tokio", -] - [[package]] name = "tokio-rustls" version = "0.26.1" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "5f6d0975eaace0cf0fcadee4e4aaa5da15b5c079146f2cffb67c113be122bf37" dependencies = [ - "rustls 0.23.23", + "rustls", "tokio", ] @@ -4290,6 +4273,21 @@ dependencies = [ "tokio", ] +[[package]] +name = "tokio-tar" +version = "0.3.1" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "9d5714c010ca3e5c27114c1cdeb9d14641ace49874aa5626d7149e47aedace75" +dependencies = [ + "filetime", + "futures-core", + "libc", + "redox_syscall 0.3.5", + "tokio", + "tokio-stream", + "xattr", +] + [[package]] name = "tokio-util" version = "0.7.13" @@ -4651,7 +4649,7 @@ source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "403fa3b783d4b626a8ad51d766ab03cb6d2dbfc46b1c5d4448395e6628dc9697" dependencies = [ "async-compression", - "bitflags", + "bitflags 2.8.0", "bytes", "futures-core", "http", @@ -5197,9 +5195,9 @@ checksum = "589f6da84c646204747d1270a2a5661ea66ed1cced2631d546fdfb155959f9ec" [[package]] name = "winnow" -version = "0.7.2" +version = "0.7.3" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "59690dea168f2198d1a3b0cac23b8063efcd11012f10ae4698f284808c8ef603" +checksum = "0e7f4ea97f6f78012141bcdb6a216b2609f0979ada50b20ca5b52dde2eac2bb1" dependencies = [ "memchr", ] @@ -5210,7 +5208,7 @@ version = "0.33.0" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "3268f3d866458b787f390cf61f4bbb563b922d091359f9608842999eaee3943c" dependencies = [ - "bitflags", + "bitflags 2.8.0", ] [[package]] @@ -5234,6 +5232,17 @@ dependencies = [ "tap", ] +[[package]] +name = "xattr" +version = "1.4.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "e105d177a3871454f754b33bb0ee637ecaaac997446375fd3e5d43a2ed00c909" +dependencies = [ + "libc", + "linux-raw-sys", + "rustix", +] + [[package]] name = "yansi" version = "1.0.1" @@ -5276,11 +5285,11 @@ dependencies = [ [[package]] name = "zerocopy" -version = "0.8.18" +version = "0.8.20" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "79386d31a42a4996e3336b0919ddb90f81112af416270cff95b5f5af22b839c2" +checksum = "dde3bb8c68a8f3f1ed4ac9221aad6b10cece3e60a8e2ea54a6a2dec806d0084c" dependencies = [ - "zerocopy-derive 0.8.18", + "zerocopy-derive 0.8.20", ] [[package]] @@ -5296,9 +5305,9 @@ dependencies = [ [[package]] name = "zerocopy-derive" -version = "0.8.18" +version = "0.8.20" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "76331675d372f91bf8d17e13afbd5fe639200b73d01f0fc748bb059f9cca2db7" +checksum = "eea57037071898bf96a6da35fd626f4f27e9cee3ead2a6c703cf09d472b2e700" dependencies = [ "proc-macro2", "quote", diff --git a/packages/tracker-core/Cargo.toml b/packages/tracker-core/Cargo.toml index 46807a534..5a830051e 100644 --- a/packages/tracker-core/Cargo.toml +++ b/packages/tracker-core/Cargo.toml @@ -1,14 +1,14 @@ [package] -description = "A library with the core functionality needed to implement a BitTorrent tracker." -keywords = ["api", "bittorrent", "core", "library", "tracker"] -name = "bittorrent-tracker-core" -readme = "README.md" authors.workspace = true +description = "A library with the core functionality needed to implement a BitTorrent tracker." documentation.workspace = true edition.workspace = true homepage.workspace = true +keywords = ["api", "bittorrent", "core", "library", "tracker"] license.workspace = true +name = "bittorrent-tracker-core" publish.workspace = true +readme = "README.md" repository.workspace = true rust-version.workspace = true version.workspace = true @@ -26,13 +26,7 @@ rand = "0" serde = { version = "1", features = ["derive"] } serde_json = { version = "1", features = ["preserve_order"] } thiserror = "2" -tokio = { version = "1", features = [ - "macros", - "net", - "rt-multi-thread", - "signal", - "sync", -] } +tokio = { version = "1", features = ["macros", "net", "rt-multi-thread", "signal", "sync"] } torrust-tracker-clock = { version = "3.0.0-develop", path = "../clock" } torrust-tracker-configuration = { version = "3.0.0-develop", path = "../configuration" } torrust-tracker-located-error = { version = "3.0.0-develop", path = "../located-error" } @@ -43,7 +37,7 @@ tracing = "0" [dev-dependencies] local-ip-address = "0" mockall = "0" +testcontainers = "0" torrust-tracker-api-client = { version = "3.0.0-develop", path = "../tracker-api-client" } torrust-tracker-test-helpers = { version = "3.0.0-develop", path = "../test-helpers" } -testcontainers = "0.17.0" url = "2.5.4" From 66c70d98e9613d9f0e4d7b5c7d29c3ba824cfc54 Mon Sep 17 00:00:00 2001 From: Jose Celano Date: Thu, 20 Feb 2025 08:17:44 +0000 Subject: [PATCH 293/802] chore(deps): bump testcontainers from 0.17.0 to 0.23.3 --- packages/tracker-core/src/databases/driver/mysql.rs | 9 +++++---- 1 file changed, 5 insertions(+), 4 deletions(-) diff --git a/packages/tracker-core/src/databases/driver/mysql.rs b/packages/tracker-core/src/databases/driver/mysql.rs index 624e34c9b..6f7deb2b9 100644 --- a/packages/tracker-core/src/databases/driver/mysql.rs +++ b/packages/tracker-core/src/databases/driver/mysql.rs @@ -268,6 +268,7 @@ impl Database for Mysql { mod tests { use std::sync::Arc; + use testcontainers::core::IntoContainerPort; /* We run a MySQL container and run all the tests against the same container and database. @@ -285,7 +286,7 @@ mod tests { If we increase the number of methods or the number or drivers. */ use testcontainers::runners::AsyncRunner; - use testcontainers::{ContainerAsync, GenericImage}; + use testcontainers::{ContainerAsync, GenericImage, ImageExt}; use torrust_tracker_configuration::Core; use super::Mysql; @@ -298,12 +299,12 @@ mod tests { impl StoppedMysqlContainer { async fn run(self, config: &MysqlConfiguration) -> Result> { let container = GenericImage::new("mysql", "8.0") + .with_exposed_port(config.internal_port.tcp()) + // todo: this does not work + //.with_wait_for(WaitFor::message_on_stdout("ready for connections")) .with_env_var("MYSQL_ROOT_PASSWORD", config.db_root_password.clone()) .with_env_var("MYSQL_DATABASE", config.database.clone()) .with_env_var("MYSQL_ROOT_HOST", "%") - .with_exposed_port(config.internal_port) - // todo: this doesn't work - //.with_wait_for(WaitFor::message_on_stdout("ready for connections")) .start() .await?; From 6e74f5f255d82d85c0a4f525be4626f3dc73c2f9 Mon Sep 17 00:00:00 2001 From: Jose Celano Date: Thu, 20 Feb 2025 08:20:28 +0000 Subject: [PATCH 294/802] chore(deps): bump derive_more from 1.0.0 to 2.0.1 --- Cargo.lock | 8 ++++---- Cargo.toml | 2 +- packages/axum-http-tracker-server/Cargo.toml | 2 +- packages/configuration/Cargo.toml | 2 +- packages/http-protocol/Cargo.toml | 2 +- packages/primitives/Cargo.toml | 2 +- packages/server-lib/Cargo.toml | 2 +- packages/tracker-client/Cargo.toml | 2 +- packages/tracker-core/Cargo.toml | 2 +- 9 files changed, 12 insertions(+), 12 deletions(-) diff --git a/Cargo.lock b/Cargo.lock index 71ac6b225..5779f2ff9 100644 --- a/Cargo.lock +++ b/Cargo.lock @@ -1317,18 +1317,18 @@ dependencies = [ [[package]] name = "derive_more" -version = "1.0.0" +version = "2.0.1" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "4a9b99b9cbbe49445b21764dc0625032a89b145a2642e67603e1c936f5458d05" +checksum = "093242cf7570c207c83073cf82f79706fe7b8317e98620a47d5be7c3d8497678" dependencies = [ "derive_more-impl", ] [[package]] name = "derive_more-impl" -version = "1.0.0" +version = "2.0.1" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "cb7330aeadfbe296029522e6c40f315320aba36fc43a5b3632f3795348f3bd22" +checksum = "bda628edc44c4bb645fbe0f758797143e4e07926f7ebf4e9bdfbd3d2ce621df3" dependencies = [ "proc-macro2", "quote", diff --git a/Cargo.toml b/Cargo.toml index 20d7c00dd..dadd39ccf 100644 --- a/Cargo.toml +++ b/Cargo.toml @@ -47,7 +47,7 @@ chrono = { version = "0", default-features = false, features = ["clock"] } clap = { version = "4", features = ["derive", "env"] } crossbeam-skiplist = "0" dashmap = "6" -derive_more = { version = "1", features = ["as_ref", "constructor", "from"] } +derive_more = { version = "2", features = ["as_ref", "constructor", "from"] } figment = "0" futures = "0" futures-util = "0" diff --git a/packages/axum-http-tracker-server/Cargo.toml b/packages/axum-http-tracker-server/Cargo.toml index b47ea23ce..ae038cb7b 100644 --- a/packages/axum-http-tracker-server/Cargo.toml +++ b/packages/axum-http-tracker-server/Cargo.toml @@ -22,7 +22,7 @@ bittorrent-http-tracker-core = { version = "3.0.0-develop", path = "../http-trac bittorrent-http-tracker-protocol = { version = "3.0.0-develop", path = "../http-protocol" } bittorrent-primitives = "0.1.0" bittorrent-tracker-core = { version = "3.0.0-develop", path = "../tracker-core" } -derive_more = { version = "1", features = ["as_ref", "constructor", "from"] } +derive_more = { version = "2", features = ["as_ref", "constructor", "from"] } futures = "0" hyper = "1" reqwest = { version = "0", features = ["json"] } diff --git a/packages/configuration/Cargo.toml b/packages/configuration/Cargo.toml index 05789b882..da04f29cd 100644 --- a/packages/configuration/Cargo.toml +++ b/packages/configuration/Cargo.toml @@ -16,7 +16,7 @@ version.workspace = true [dependencies] camino = { version = "1", features = ["serde", "serde1"] } -derive_more = { version = "1", features = ["constructor", "display"] } +derive_more = { version = "2", features = ["constructor", "display"] } figment = { version = "0", features = ["env", "test", "toml"] } serde = { version = "1", features = ["derive"] } serde_json = { version = "1", features = ["preserve_order"] } diff --git a/packages/http-protocol/Cargo.toml b/packages/http-protocol/Cargo.toml index 7445b37a1..7803fe78e 100644 --- a/packages/http-protocol/Cargo.toml +++ b/packages/http-protocol/Cargo.toml @@ -18,7 +18,7 @@ version.workspace = true aquatic_udp_protocol = "0" bittorrent-primitives = "0.1.0" bittorrent-tracker-core = { version = "3.0.0-develop", path = "../tracker-core" } -derive_more = { version = "1", features = ["as_ref", "constructor", "from"] } +derive_more = { version = "2", features = ["as_ref", "constructor", "from"] } multimap = "0" percent-encoding = "2" serde = { version = "1", features = ["derive"] } diff --git a/packages/primitives/Cargo.toml b/packages/primitives/Cargo.toml index b83886385..1396d8bc8 100644 --- a/packages/primitives/Cargo.toml +++ b/packages/primitives/Cargo.toml @@ -18,7 +18,7 @@ version.workspace = true aquatic_udp_protocol = "0" binascii = "0" bittorrent-primitives = "0.1.0" -derive_more = { version = "1", features = ["constructor"] } +derive_more = { version = "2", features = ["constructor"] } serde = { version = "1", features = ["derive"] } tdyne-peer-id = "1" tdyne-peer-id-registry = "0" diff --git a/packages/server-lib/Cargo.toml b/packages/server-lib/Cargo.toml index b0e196d64..b8514fbf4 100644 --- a/packages/server-lib/Cargo.toml +++ b/packages/server-lib/Cargo.toml @@ -14,7 +14,7 @@ rust-version.workspace = true version.workspace = true [dependencies] -derive_more = { version = "1", features = ["as_ref", "constructor", "from"] } +derive_more = { version = "2", features = ["as_ref", "constructor", "from"] } tokio = { version = "1", features = ["macros", "net", "rt-multi-thread", "signal", "sync"] } tower-http = { version = "0", features = ["compression-full", "cors", "propagate-header", "request-id", "trace"] } tracing = "0" diff --git a/packages/tracker-client/Cargo.toml b/packages/tracker-client/Cargo.toml index 67a4c767a..ef5cccaa2 100644 --- a/packages/tracker-client/Cargo.toml +++ b/packages/tracker-client/Cargo.toml @@ -17,7 +17,7 @@ version.workspace = true [dependencies] aquatic_udp_protocol = "0" bittorrent-primitives = "0.1.0" -derive_more = { version = "1", features = ["as_ref", "constructor", "from"] } +derive_more = { version = "2", features = ["as_ref", "constructor", "from"] } hyper = "1" percent-encoding = "2" reqwest = { version = "0", features = ["json"] } diff --git a/packages/tracker-core/Cargo.toml b/packages/tracker-core/Cargo.toml index 5a830051e..731ee900d 100644 --- a/packages/tracker-core/Cargo.toml +++ b/packages/tracker-core/Cargo.toml @@ -17,7 +17,7 @@ version.workspace = true aquatic_udp_protocol = "0" bittorrent-primitives = "0.1.0" chrono = { version = "0", default-features = false, features = ["clock"] } -derive_more = { version = "1", features = ["as_ref", "constructor", "from"] } +derive_more = { version = "2", features = ["as_ref", "constructor", "from"] } mockall = "0" r2d2 = "0" r2d2_mysql = "25" From bc95fc4c841674d837064099fedf040e059a20b8 Mon Sep 17 00:00:00 2001 From: Jose Celano Date: Thu, 20 Feb 2025 16:08:57 +0000 Subject: [PATCH 295/802] docs: remove code-review comment We decided not to change it. - The returned value is simpler. - We force the initial peer to change so there is no confusion about what was the final announced peer. --- packages/tracker-core/src/announce_handler.rs | 4 ---- 1 file changed, 4 deletions(-) diff --git a/packages/tracker-core/src/announce_handler.rs b/packages/tracker-core/src/announce_handler.rs index cd2073857..cb48a321a 100644 --- a/packages/tracker-core/src/announce_handler.rs +++ b/packages/tracker-core/src/announce_handler.rs @@ -162,10 +162,6 @@ impl AnnounceHandler { remote_client_ip: &IpAddr, peers_wanted: &PeersWanted, ) -> Result { - // code-review: maybe instead of mutating the peer we could just return - // a tuple with the new peer and the announce data: (Peer, AnnounceData). - // It could even be a different struct: `StoredPeer` or `PublicPeer`. - self.whitelist_authorization.authorize(info_hash).await?; tracing::debug!("Before: {peer:?}"); From d48272f53fad95240660657c31b2ad47b5c38ea4 Mon Sep 17 00:00:00 2001 From: Jose Celano Date: Thu, 20 Feb 2025 17:43:55 +0000 Subject: [PATCH 296/802] chore: [#1303] remove explicit declaration of workspace members from Cargo.toml For packages included in the main Cargo.toml file. The application works anyway because _all [path dependencies](https://doc.rust-lang.org/cargo/reference/specifying-dependencies.html#specifying-path-dependencies) residing in the workspace directory automatically become members_. See https://doc.rust-lang.org/cargo/reference/workspaces.html#the-members-and-exclude-fields. We have to keep `console/tracker-client` becuase it's not included directly in the main Cargo.toml as a dependency. --- Cargo.toml | 13 +------------ 1 file changed, 1 insertion(+), 12 deletions(-) diff --git a/Cargo.toml b/Cargo.toml index dadd39ccf..d3b194ed9 100644 --- a/Cargo.toml +++ b/Cargo.toml @@ -108,18 +108,7 @@ torrust-tracker-api-client = { version = "3.0.0-develop", path = "packages/track torrust-tracker-test-helpers = { version = "3.0.0-develop", path = "packages/test-helpers" } [workspace] -members = [ - "console/tracker-client", - "contrib/bencode", - "packages/configuration", - "packages/located-error", - "packages/primitives", - "packages/test-helpers", - "packages/torrent-repository", - "packages/tracker-api-client", - "packages/tracker-client", - "packages/tracker-core", -] +members = ["console/tracker-client"] [profile.dev] debug = 1 From 265da2d565bd34c90e9fc00302e3291c351e8e83 Mon Sep 17 00:00:00 2001 From: Jose Celano Date: Fri, 21 Feb 2025 09:50:37 +0000 Subject: [PATCH 297/802] refactor: [#1298] inject only logging config in logging setup --- packages/configuration/src/lib.rs | 1 + src/bootstrap/app.rs | 2 +- src/bootstrap/logging.rs | 6 +++--- 3 files changed, 5 insertions(+), 4 deletions(-) diff --git a/packages/configuration/src/lib.rs b/packages/configuration/src/lib.rs index 7e384297d..8a4d3f81e 100644 --- a/packages/configuration/src/lib.rs +++ b/packages/configuration/src/lib.rs @@ -37,6 +37,7 @@ pub const ENV_VAR_CONFIG_TOML_PATH: &str = "TORRUST_TRACKER_CONFIG_TOML_PATH"; pub type Configuration = v2_0_0::Configuration; pub type Core = v2_0_0::core::Core; +pub type Logging = v2_0_0::logging::Logging; pub type HealthCheckApi = v2_0_0::health_check_api::HealthCheckApi; pub type HttpApi = v2_0_0::tracker_api::HttpApi; pub type HttpTracker = v2_0_0::http_tracker::HttpTracker; diff --git a/src/bootstrap/app.rs b/src/bootstrap/app.rs index 2f4ff0e94..420f1a981 100644 --- a/src/bootstrap/app.rs +++ b/src/bootstrap/app.rs @@ -182,5 +182,5 @@ pub fn initialize_static() { /// See [the logging setup](crate::bootstrap::logging::setup) for more info about logging. #[instrument(skip(config))] pub fn initialize_logging(config: &Configuration) { - bootstrap::logging::setup(config); + bootstrap::logging::setup(&config.logging); } diff --git a/src/bootstrap/logging.rs b/src/bootstrap/logging.rs index d7a100aed..ab66822a1 100644 --- a/src/bootstrap/logging.rs +++ b/src/bootstrap/logging.rs @@ -13,15 +13,15 @@ //! Refer to the [configuration crate documentation](https://docs.rs/torrust-tracker-configuration) to know how to change log settings. use std::sync::Once; -use torrust_tracker_configuration::{Configuration, Threshold}; +use torrust_tracker_configuration::{Logging, Threshold}; use tracing::level_filters::LevelFilter; static INIT: Once = Once::new(); /// It redirects the log info to the standard output with the log threshold /// defined in the configuration. -pub fn setup(cfg: &Configuration) { - let tracing_level = map_to_tracing_level_filter(&cfg.logging.threshold); +pub fn setup(cfg: &Logging) { + let tracing_level = map_to_tracing_level_filter(&cfg.threshold); if tracing_level == LevelFilter::OFF { return; From 1bd5f0a1c76cc4cf4ef5a980508ac3ae4c65cb58 Mon Sep 17 00:00:00 2001 From: Jose Celano Date: Fri, 21 Feb 2025 09:58:50 +0000 Subject: [PATCH 298/802] refactor: [#1298] move loggin setup to configration package It will be used in other workspace packages. --- Cargo.lock | 2 ++ packages/configuration/Cargo.toml | 2 ++ packages/configuration/src/lib.rs | 1 + .../configuration/src}/logging.rs | 3 ++- src/bootstrap/app.rs | 13 ++++++------- src/bootstrap/mod.rs | 1 - tests/common/logging.rs | 2 +- 7 files changed, 14 insertions(+), 10 deletions(-) rename {src/bootstrap => packages/configuration/src}/logging.rs (97%) diff --git a/Cargo.lock b/Cargo.lock index 5779f2ff9..981ffeba3 100644 --- a/Cargo.lock +++ b/Cargo.lock @@ -4547,6 +4547,8 @@ dependencies = [ "thiserror 2.0.11", "toml", "torrust-tracker-located-error", + "tracing", + "tracing-subscriber", "url", "uuid", ] diff --git a/packages/configuration/Cargo.toml b/packages/configuration/Cargo.toml index da04f29cd..e213f7c0c 100644 --- a/packages/configuration/Cargo.toml +++ b/packages/configuration/Cargo.toml @@ -24,6 +24,8 @@ serde_with = "3" thiserror = "2" toml = "0" torrust-tracker-located-error = { version = "3.0.0-develop", path = "../located-error" } +tracing = "0" +tracing-subscriber = { version = "0", features = ["json"] } url = "2" [dev-dependencies] diff --git a/packages/configuration/src/lib.rs b/packages/configuration/src/lib.rs index 8a4d3f81e..d12020b8c 100644 --- a/packages/configuration/src/lib.rs +++ b/packages/configuration/src/lib.rs @@ -4,6 +4,7 @@ //! Torrust Tracker, which is a `BitTorrent` tracker server. //! //! The current version for configuration is [`v2_0_0`]. +pub mod logging; pub mod v2_0_0; pub mod validator; diff --git a/src/bootstrap/logging.rs b/packages/configuration/src/logging.rs similarity index 97% rename from src/bootstrap/logging.rs rename to packages/configuration/src/logging.rs index ab66822a1..b8db27b8c 100644 --- a/src/bootstrap/logging.rs +++ b/packages/configuration/src/logging.rs @@ -13,9 +13,10 @@ //! Refer to the [configuration crate documentation](https://docs.rs/torrust-tracker-configuration) to know how to change log settings. use std::sync::Once; -use torrust_tracker_configuration::{Logging, Threshold}; use tracing::level_filters::LevelFilter; +use crate::{Logging, Threshold}; + static INIT: Once = Once::new(); /// It redirects the log info to the standard output with the log threshold diff --git a/src/bootstrap/app.rs b/src/bootstrap/app.rs index 420f1a981..977447752 100644 --- a/src/bootstrap/app.rs +++ b/src/bootstrap/app.rs @@ -33,11 +33,10 @@ use bittorrent_udp_tracker_core::MAX_CONNECTION_ID_ERRORS_PER_IP; use tokio::sync::RwLock; use torrust_tracker_clock::static_time; use torrust_tracker_configuration::validator::Validator; -use torrust_tracker_configuration::Configuration; +use torrust_tracker_configuration::{logging, Configuration, Logging}; use tracing::instrument; use super::config::initialize_configuration; -use crate::bootstrap; use crate::container::AppContainer; /// It loads the configuration from the environment and builds app container. @@ -82,7 +81,7 @@ pub fn check_seed() { #[instrument(skip())] pub fn initialize_global_services(configuration: &Configuration) { initialize_static(); - initialize_logging(configuration); + initialize_logging(&configuration.logging); } /// It initializes the IoC Container. @@ -179,8 +178,8 @@ pub fn initialize_static() { /// It initializes the log threshold, format and channel. /// -/// See [the logging setup](crate::bootstrap::logging::setup) for more info about logging. -#[instrument(skip(config))] -pub fn initialize_logging(config: &Configuration) { - bootstrap::logging::setup(&config.logging); +/// See [the logging setup](torrust_tracker_configuration::logging::setup) for more info about logging. +#[instrument(skip(logging_config))] +pub fn initialize_logging(logging_config: &Logging) { + logging::setup(logging_config); } diff --git a/src/bootstrap/mod.rs b/src/bootstrap/mod.rs index 22044aafd..2f7909043 100644 --- a/src/bootstrap/mod.rs +++ b/src/bootstrap/mod.rs @@ -8,4 +8,3 @@ pub mod app; pub mod config; pub mod jobs; -pub mod logging; diff --git a/tests/common/logging.rs b/tests/common/logging.rs index f04dcdc7d..564074f3e 100644 --- a/tests/common/logging.rs +++ b/tests/common/logging.rs @@ -3,7 +3,7 @@ use std::collections::VecDeque; use std::io; use std::sync::{Mutex, MutexGuard, Once, OnceLock}; -use torrust_tracker_lib::bootstrap::logging::TraceStyle; +use torrust_tracker_configuration::logging::TraceStyle; use tracing::level_filters::LevelFilter; use tracing_subscriber::fmt::MakeWriter; From 78002d7b357d4d3c2722f5e5c1cf45b32a8db643 Mon Sep 17 00:00:00 2001 From: Jose Celano Date: Fri, 21 Feb 2025 10:49:14 +0000 Subject: [PATCH 299/802] refactor: [#1298] remove dependency on global app container in packages To be able to move more code from the main mod to workspace packages. Packages cannot depend on global stuff. --- .../axum-http-tracker-server/src/container.rs | 45 +++++++ src/app.rs | 6 +- src/bootstrap/app.rs | 77 ++++++++++- src/bootstrap/jobs/http_tracker.rs | 8 +- src/bootstrap/jobs/tracker_apis.rs | 8 +- src/container.rs | 58 ++++---- src/servers/apis/server.rs | 8 +- src/servers/udp/server/mod.rs | 40 ++++-- tests/servers/api/environment.rs | 104 ++++++++++++--- tests/servers/http/environment.rs | 125 ++++++++++++++---- tests/servers/udp/environment.rs | 102 +++++++++++--- 11 files changed, 454 insertions(+), 127 deletions(-) diff --git a/packages/axum-http-tracker-server/src/container.rs b/packages/axum-http-tracker-server/src/container.rs index c20a8f28f..339c25778 100644 --- a/packages/axum-http-tracker-server/src/container.rs +++ b/packages/axum-http-tracker-server/src/container.rs @@ -1,9 +1,15 @@ use std::sync::Arc; use bittorrent_tracker_core::announce_handler::AnnounceHandler; +use bittorrent_tracker_core::authentication::key::repository::in_memory::InMemoryKeyRepository; use bittorrent_tracker_core::authentication::service::AuthenticationService; +use bittorrent_tracker_core::databases::setup::initialize_database; use bittorrent_tracker_core::scrape_handler::ScrapeHandler; +use bittorrent_tracker_core::torrent::repository::in_memory::InMemoryTorrentRepository; +use bittorrent_tracker_core::torrent::repository::persisted::DatabasePersistentTorrentRepository; use bittorrent_tracker_core::whitelist; +use bittorrent_tracker_core::whitelist::authorization::WhitelistAuthorization; +use bittorrent_tracker_core::whitelist::repository::in_memory::InMemoryWhitelist; use torrust_tracker_configuration::{Core, HttpTracker}; pub struct HttpTrackerContainer { @@ -15,3 +21,42 @@ pub struct HttpTrackerContainer { pub http_stats_event_sender: Arc>>, pub authentication_service: Arc, } + +#[must_use] +pub fn initialize_http_tracker_container( + core_config: &Arc, + http_tracker_config: &Arc, +) -> Arc { + // HTTP stats + let (http_stats_event_sender, _http_stats_repository) = + bittorrent_http_tracker_core::statistics::setup::factory(core_config.tracker_usage_statistics); + let http_stats_event_sender = Arc::new(http_stats_event_sender); + + let database = initialize_database(core_config); + let in_memory_whitelist = Arc::new(InMemoryWhitelist::default()); + let whitelist_authorization = Arc::new(WhitelistAuthorization::new(core_config, &in_memory_whitelist.clone())); + let in_memory_key_repository = Arc::new(InMemoryKeyRepository::default()); + let authentication_service = Arc::new(AuthenticationService::new(core_config, &in_memory_key_repository)); + + let in_memory_torrent_repository = Arc::new(InMemoryTorrentRepository::default()); + let db_torrent_repository = Arc::new(DatabasePersistentTorrentRepository::new(&database)); + + let announce_handler = Arc::new(AnnounceHandler::new( + core_config, + &whitelist_authorization, + &in_memory_torrent_repository, + &db_torrent_repository, + )); + + let scrape_handler = Arc::new(ScrapeHandler::new(&whitelist_authorization, &in_memory_torrent_repository)); + + Arc::new(HttpTrackerContainer { + http_tracker_config: http_tracker_config.clone(), + core_config: core_config.clone(), + announce_handler: announce_handler.clone(), + scrape_handler: scrape_handler.clone(), + whitelist_authorization: whitelist_authorization.clone(), + http_stats_event_sender: http_stats_event_sender.clone(), + authentication_service: authentication_service.clone(), + }) +} diff --git a/src/app.rs b/src/app.rs index 2f712cf3a..3d2da4ff5 100644 --- a/src/app.rs +++ b/src/app.rs @@ -29,7 +29,7 @@ use torrust_tracker_configuration::Configuration; use tracing::instrument; use crate::bootstrap::jobs::{health_check_api, http_tracker, torrent_cleanup, tracker_apis, udp_tracker}; -use crate::container::{AppContainer, HttpApiContainer, UdpTrackerContainer}; +use crate::container::AppContainer; use crate::servers; /// # Panics @@ -79,7 +79,7 @@ pub async fn start(config: &Configuration, app_container: &Arc) -> ); } else { let udp_tracker_config = Arc::new(udp_tracker_config.clone()); - let udp_tracker_container = Arc::new(UdpTrackerContainer::from_app_container(&udp_tracker_config, app_container)); + let udp_tracker_container = Arc::new(app_container.udp_tracker_container(&udp_tracker_config)); jobs.push(udp_tracker::start_job(udp_tracker_container, registar.give_form()).await); } @@ -111,7 +111,7 @@ pub async fn start(config: &Configuration, app_container: &Arc) -> // Start HTTP API if let Some(http_api_config) = &config.http_api { let http_api_config = Arc::new(http_api_config.clone()); - let http_api_container = Arc::new(HttpApiContainer::from_app_container(&http_api_config, app_container)); + let http_api_container = Arc::new(app_container.http_api_container(&http_api_config)); if let Some(job) = tracker_apis::start_job(http_api_container, registar.give_form(), servers::apis::Version::V1).await { jobs.push(job); diff --git a/src/bootstrap/app.rs b/src/bootstrap/app.rs index 977447752..6be7d0aa2 100644 --- a/src/bootstrap/app.rs +++ b/src/bootstrap/app.rs @@ -33,11 +33,11 @@ use bittorrent_udp_tracker_core::MAX_CONNECTION_ID_ERRORS_PER_IP; use tokio::sync::RwLock; use torrust_tracker_clock::static_time; use torrust_tracker_configuration::validator::Validator; -use torrust_tracker_configuration::{logging, Configuration, Logging}; +use torrust_tracker_configuration::{logging, Configuration, Core, HttpApi, Logging, UdpTracker}; use tracing::instrument; use super::config::initialize_configuration; -use crate::container::AppContainer; +use crate::container::{AppContainer, HttpApiContainer, UdpTrackerContainer}; /// It loads the configuration from the environment and builds app container. /// @@ -155,6 +155,79 @@ pub fn initialize_app_container(configuration: &Configuration) -> AppContainer { } } +#[must_use] +pub fn initialize_http_api_container(core_config: &Arc, http_api_config: &Arc) -> Arc { + // HTTP stats + let (_http_stats_event_sender, http_stats_repository) = + bittorrent_http_tracker_core::statistics::setup::factory(core_config.tracker_usage_statistics); + let http_stats_repository = Arc::new(http_stats_repository); + + // UDP stats + let (_udp_stats_event_sender, udp_stats_repository) = + bittorrent_udp_tracker_core::statistics::setup::factory(core_config.tracker_usage_statistics); + let udp_stats_repository = Arc::new(udp_stats_repository); + + let ban_service = Arc::new(RwLock::new(BanService::new(MAX_CONNECTION_ID_ERRORS_PER_IP))); + let database = initialize_database(core_config); + let in_memory_whitelist = Arc::new(InMemoryWhitelist::default()); + let whitelist_manager = initialize_whitelist_manager(database.clone(), in_memory_whitelist.clone()); + let db_key_repository = Arc::new(DatabaseKeyRepository::new(&database)); + let in_memory_key_repository = Arc::new(InMemoryKeyRepository::default()); + let keys_handler = Arc::new(KeysHandler::new( + &db_key_repository.clone(), + &in_memory_key_repository.clone(), + )); + let in_memory_torrent_repository = Arc::new(InMemoryTorrentRepository::default()); + + Arc::new(HttpApiContainer { + http_api_config: http_api_config.clone(), + core_config: core_config.clone(), + in_memory_torrent_repository: in_memory_torrent_repository.clone(), + keys_handler: keys_handler.clone(), + whitelist_manager: whitelist_manager.clone(), + ban_service: ban_service.clone(), + http_stats_repository: http_stats_repository.clone(), + udp_stats_repository: udp_stats_repository.clone(), + }) +} + +#[must_use] +pub fn initialize_udt_tracker_container( + core_config: &Arc, + udp_tracker_config: &Arc, +) -> Arc { + // UDP stats + let (udp_stats_event_sender, _udp_stats_repository) = + bittorrent_udp_tracker_core::statistics::setup::factory(core_config.tracker_usage_statistics); + let udp_stats_event_sender = Arc::new(udp_stats_event_sender); + + let ban_service = Arc::new(RwLock::new(BanService::new(MAX_CONNECTION_ID_ERRORS_PER_IP))); + let database = initialize_database(core_config); + let in_memory_whitelist = Arc::new(InMemoryWhitelist::default()); + let whitelist_authorization = Arc::new(WhitelistAuthorization::new(core_config, &in_memory_whitelist.clone())); + let in_memory_torrent_repository = Arc::new(InMemoryTorrentRepository::default()); + let db_torrent_repository = Arc::new(DatabasePersistentTorrentRepository::new(&database)); + + let announce_handler = Arc::new(AnnounceHandler::new( + core_config, + &whitelist_authorization, + &in_memory_torrent_repository, + &db_torrent_repository, + )); + + let scrape_handler = Arc::new(ScrapeHandler::new(&whitelist_authorization, &in_memory_torrent_repository)); + + Arc::new(UdpTrackerContainer { + udp_tracker_config: udp_tracker_config.clone(), + core_config: core_config.clone(), + announce_handler: announce_handler.clone(), + scrape_handler: scrape_handler.clone(), + whitelist_authorization: whitelist_authorization.clone(), + udp_stats_event_sender: udp_stats_event_sender.clone(), + ban_service: ban_service.clone(), + }) +} + /// It initializes the application static values. /// /// These values are accessible throughout the entire application: diff --git a/src/bootstrap/jobs/http_tracker.rs b/src/bootstrap/jobs/http_tracker.rs index 2052bf50b..471f74b8b 100644 --- a/src/bootstrap/jobs/http_tracker.rs +++ b/src/bootstrap/jobs/http_tracker.rs @@ -77,24 +77,24 @@ async fn start_v1( mod tests { use std::sync::Arc; + use torrust_axum_http_tracker_server::container::initialize_http_tracker_container; use torrust_axum_http_tracker_server::Version; use torrust_server_lib::registar::Registar; use torrust_tracker_test_helpers::configuration::ephemeral_public; - use crate::bootstrap::app::{initialize_app_container, initialize_global_services}; + use crate::bootstrap::app::initialize_global_services; use crate::bootstrap::jobs::http_tracker::start_job; #[tokio::test] async fn it_should_start_http_tracker() { let cfg = Arc::new(ephemeral_public()); + let core_config = Arc::new(cfg.core.clone()); let http_tracker = cfg.http_trackers.clone().expect("missing HTTP tracker configuration"); let http_tracker_config = Arc::new(http_tracker[0].clone()); initialize_global_services(&cfg); - let app_container = Arc::new(initialize_app_container(&cfg)); - - let http_tracker_container = Arc::new(app_container.http_tracker_container(&http_tracker_config)); + let http_tracker_container = initialize_http_tracker_container(&core_config, &http_tracker_config); let version = Version::V1; diff --git a/src/bootstrap/jobs/tracker_apis.rs b/src/bootstrap/jobs/tracker_apis.rs index df736f23f..8a36d74dc 100644 --- a/src/bootstrap/jobs/tracker_apis.rs +++ b/src/bootstrap/jobs/tracker_apis.rs @@ -100,21 +100,19 @@ mod tests { use torrust_server_lib::registar::Registar; use torrust_tracker_test_helpers::configuration::ephemeral_public; - use crate::bootstrap::app::{initialize_app_container, initialize_global_services}; + use crate::bootstrap::app::{initialize_global_services, initialize_http_api_container}; use crate::bootstrap::jobs::tracker_apis::start_job; - use crate::container::HttpApiContainer; use crate::servers::apis::Version; #[tokio::test] async fn it_should_start_http_tracker() { let cfg = Arc::new(ephemeral_public()); + let core_config = Arc::new(cfg.core.clone()); let http_api_config = Arc::new(cfg.http_api.clone().unwrap()); initialize_global_services(&cfg); - let app_container = Arc::new(initialize_app_container(&cfg)); - - let http_api_container = Arc::new(HttpApiContainer::from_app_container(&http_api_config, &app_container)); + let http_api_container = initialize_http_api_container(&core_config, &http_api_config); let version = Version::V1; diff --git a/src/container.rs b/src/container.rs index 57e24334b..881f50d2d 100644 --- a/src/container.rs +++ b/src/container.rs @@ -50,6 +50,33 @@ impl AppContainer { authentication_service: self.authentication_service.clone(), } } + + #[must_use] + pub fn udp_tracker_container(&self, udp_tracker_config: &Arc) -> UdpTrackerContainer { + UdpTrackerContainer { + udp_tracker_config: udp_tracker_config.clone(), + core_config: self.core_config.clone(), + announce_handler: self.announce_handler.clone(), + scrape_handler: self.scrape_handler.clone(), + whitelist_authorization: self.whitelist_authorization.clone(), + udp_stats_event_sender: self.udp_stats_event_sender.clone(), + ban_service: self.ban_service.clone(), + } + } + + #[must_use] + pub fn http_api_container(&self, http_api_config: &Arc) -> HttpApiContainer { + HttpApiContainer { + http_api_config: http_api_config.clone(), + core_config: self.core_config.clone(), + in_memory_torrent_repository: self.in_memory_torrent_repository.clone(), + keys_handler: self.keys_handler.clone(), + whitelist_manager: self.whitelist_manager.clone(), + ban_service: self.ban_service.clone(), + http_stats_repository: self.http_stats_repository.clone(), + udp_stats_repository: self.udp_stats_repository.clone(), + } + } } pub struct UdpTrackerContainer { @@ -62,21 +89,6 @@ pub struct UdpTrackerContainer { pub ban_service: Arc>, } -impl UdpTrackerContainer { - #[must_use] - pub fn from_app_container(udp_tracker_config: &Arc, app_container: &Arc) -> Self { - Self { - udp_tracker_config: udp_tracker_config.clone(), - core_config: app_container.core_config.clone(), - announce_handler: app_container.announce_handler.clone(), - scrape_handler: app_container.scrape_handler.clone(), - whitelist_authorization: app_container.whitelist_authorization.clone(), - udp_stats_event_sender: app_container.udp_stats_event_sender.clone(), - ban_service: app_container.ban_service.clone(), - } - } -} - pub struct HttpApiContainer { pub core_config: Arc, pub http_api_config: Arc, @@ -87,19 +99,3 @@ pub struct HttpApiContainer { pub http_stats_repository: Arc, pub udp_stats_repository: Arc, } - -impl HttpApiContainer { - #[must_use] - pub fn from_app_container(http_api_config: &Arc, app_container: &Arc) -> Self { - Self { - http_api_config: http_api_config.clone(), - core_config: app_container.core_config.clone(), - in_memory_torrent_repository: app_container.in_memory_torrent_repository.clone(), - keys_handler: app_container.keys_handler.clone(), - whitelist_manager: app_container.whitelist_manager.clone(), - ban_service: app_container.ban_service.clone(), - http_stats_repository: app_container.http_stats_repository.clone(), - udp_stats_repository: app_container.udp_stats_repository.clone(), - } - } -} diff --git a/src/servers/apis/server.rs b/src/servers/apis/server.rs index 187969f8d..cf2c2e96b 100644 --- a/src/servers/apis/server.rs +++ b/src/servers/apis/server.rs @@ -298,19 +298,17 @@ mod tests { use torrust_server_lib::registar::Registar; use torrust_tracker_test_helpers::configuration::ephemeral_public; - use crate::bootstrap::app::{initialize_app_container, initialize_global_services}; - use crate::container::HttpApiContainer; + use crate::bootstrap::app::{initialize_global_services, initialize_http_api_container}; use crate::servers::apis::server::{ApiServer, Launcher}; #[tokio::test] async fn it_should_be_able_to_start_and_stop() { let cfg = Arc::new(ephemeral_public()); + let core_config = Arc::new(cfg.core.clone()); let http_api_config = Arc::new(cfg.http_api.clone().unwrap()); initialize_global_services(&cfg); - let app_container = Arc::new(initialize_app_container(&cfg)); - let bind_to = http_api_config.bind_address; let tls = make_rust_tls(&http_api_config.tsl_config) @@ -323,7 +321,7 @@ mod tests { let register = &Registar::default(); - let http_api_container = Arc::new(HttpApiContainer::from_app_container(&http_api_config, &app_container)); + let http_api_container = initialize_http_api_container(&core_config, &http_api_config); let started = stopped .start(http_api_container, register.give_form(), access_tokens) diff --git a/src/servers/udp/server/mod.rs b/src/servers/udp/server/mod.rs index 85940e853..d40f3a97f 100644 --- a/src/servers/udp/server/mod.rs +++ b/src/servers/udp/server/mod.rs @@ -62,17 +62,23 @@ mod tests { use super::spawner::Spawner; use super::Server; - use crate::bootstrap::app::{initialize_app_container, initialize_global_services}; - use crate::container::UdpTrackerContainer; + use crate::bootstrap::app::{initialize_global_services, initialize_udt_tracker_container}; #[tokio::test] async fn it_should_be_able_to_start_and_stop() { let cfg = Arc::new(ephemeral_public()); + let core_config = Arc::new(cfg.core.clone()); + let udp_tracker_config = Arc::new( + cfg.udp_trackers + .clone() + .expect("no UDP services array config provided") + .first() + .expect("no UDP test service config provided") + .clone(), + ); initialize_global_services(&cfg); - let app_container = Arc::new(initialize_app_container(&cfg)); - let udp_trackers = cfg.udp_trackers.clone().expect("missing UDP trackers configuration"); let config = &udp_trackers[0]; let bind_to = config.bind_address; @@ -80,8 +86,7 @@ mod tests { let stopped = Server::new(Spawner::new(bind_to)); - let udp_tracker_config = Arc::new(config.clone()); - let udp_tracker_container = Arc::new(UdpTrackerContainer::from_app_container(&udp_tracker_config, &app_container)); + let udp_tracker_container = initialize_udt_tracker_container(&core_config, &udp_tracker_config); let started = stopped .start(udp_tracker_container, register.give_form(), config.cookie_lifetime) @@ -98,22 +103,31 @@ mod tests { #[tokio::test] async fn it_should_be_able_to_start_and_stop_with_wait() { let cfg = Arc::new(ephemeral_public()); + let core_config = Arc::new(cfg.core.clone()); + let udp_tracker_config = Arc::new( + cfg.udp_trackers + .clone() + .expect("no UDP services array config provided") + .first() + .expect("no UDP test service config provided") + .clone(), + ); initialize_global_services(&cfg); - let app_container = Arc::new(initialize_app_container(&cfg)); - - let config = cfg.udp_trackers.as_ref().unwrap().first().unwrap(); - let bind_to = config.bind_address; + let bind_to = udp_tracker_config.bind_address; let register = &Registar::default(); let stopped = Server::new(Spawner::new(bind_to)); - let udp_tracker_config = Arc::new(config.clone()); - let udp_tracker_container = Arc::new(UdpTrackerContainer::from_app_container(&udp_tracker_config, &app_container)); + let udp_tracker_container = initialize_udt_tracker_container(&core_config, &udp_tracker_config); let started = stopped - .start(udp_tracker_container, register.give_form(), config.cookie_lifetime) + .start( + udp_tracker_container, + register.give_form(), + udp_tracker_config.cookie_lifetime, + ) .await .expect("it should start the server"); diff --git a/tests/servers/api/environment.rs b/tests/servers/api/environment.rs index b899c9f02..7cf088568 100644 --- a/tests/servers/api/environment.rs +++ b/tests/servers/api/environment.rs @@ -2,15 +2,24 @@ use std::net::SocketAddr; use std::sync::Arc; use bittorrent_primitives::info_hash::InfoHash; +use bittorrent_tracker_core::authentication::handler::KeysHandler; +use bittorrent_tracker_core::authentication::key::repository::in_memory::InMemoryKeyRepository; +use bittorrent_tracker_core::authentication::key::repository::persisted::DatabaseKeyRepository; use bittorrent_tracker_core::authentication::service::AuthenticationService; +use bittorrent_tracker_core::databases::setup::initialize_database; use bittorrent_tracker_core::databases::Database; +use bittorrent_tracker_core::torrent::repository::in_memory::InMemoryTorrentRepository; use bittorrent_tracker_core::whitelist::repository::in_memory::InMemoryWhitelist; +use bittorrent_tracker_core::whitelist::setup::initialize_whitelist_manager; +use bittorrent_udp_tracker_core::services::banning::BanService; +use bittorrent_udp_tracker_core::MAX_CONNECTION_ID_ERRORS_PER_IP; use futures::executor::block_on; +use tokio::sync::RwLock; use torrust_axum_server::tsl::make_rust_tls; use torrust_server_lib::registar::Registar; use torrust_tracker_api_client::connection_info::{ConnectionInfo, Origin}; -use torrust_tracker_configuration::Configuration; -use torrust_tracker_lib::bootstrap::app::{initialize_app_container, initialize_global_services}; +use torrust_tracker_configuration::{Configuration, HttpApi}; +use torrust_tracker_lib::bootstrap::app::initialize_global_services; use torrust_tracker_lib::container::HttpApiContainer; use torrust_tracker_lib::servers::apis::server::{ApiServer, Launcher, Running, Stopped}; use torrust_tracker_primitives::peer; @@ -46,33 +55,20 @@ impl Environment { pub fn new(configuration: &Arc) -> Self { initialize_global_services(configuration); - let app_container = initialize_app_container(configuration); + let env_container = EnvContainer::initialize(configuration); - let http_api_config = Arc::new(configuration.http_api.clone().expect("missing API configuration")); + let bind_to = env_container.http_api_config.bind_address; - let bind_to = http_api_config.bind_address; - - let tls = block_on(make_rust_tls(&http_api_config.tsl_config)).map(|tls| tls.expect("tls config failed")); + let tls = block_on(make_rust_tls(&env_container.http_api_config.tsl_config)).map(|tls| tls.expect("tls config failed")); let server = ApiServer::new(Launcher::new(bind_to, tls)); - let http_api_container = Arc::new(HttpApiContainer { - http_api_config: http_api_config.clone(), - core_config: app_container.core_config.clone(), - in_memory_torrent_repository: app_container.in_memory_torrent_repository.clone(), - keys_handler: app_container.keys_handler.clone(), - whitelist_manager: app_container.whitelist_manager.clone(), - ban_service: app_container.ban_service.clone(), - http_stats_repository: app_container.http_stats_repository.clone(), - udp_stats_repository: app_container.udp_stats_repository.clone(), - }); - Self { - http_api_container, + http_api_container: env_container.http_api_container, - database: app_container.database.clone(), - authentication_service: app_container.authentication_service.clone(), - in_memory_whitelist: app_container.in_memory_whitelist.clone(), + database: env_container.database.clone(), + authentication_service: env_container.authentication_service.clone(), + in_memory_whitelist: env_container.in_memory_whitelist.clone(), registar: Registar::default(), server, @@ -130,3 +126,67 @@ impl Environment { self.server.state.local_addr } } + +pub struct EnvContainer { + pub http_api_config: Arc, + pub http_api_container: Arc, + pub database: Arc>, + pub authentication_service: Arc, + pub in_memory_whitelist: Arc, +} + +impl EnvContainer { + pub fn initialize(configuration: &Configuration) -> Self { + let core_config = Arc::new(configuration.core.clone()); + let http_api_config = Arc::new( + configuration + .http_api + .clone() + .expect("missing HTTP API configuration") + .clone(), + ); + + // HTTP stats + let (_http_stats_event_sender, http_stats_repository) = + bittorrent_http_tracker_core::statistics::setup::factory(configuration.core.tracker_usage_statistics); + let http_stats_repository = Arc::new(http_stats_repository); + + // UDP stats + let (_udp_stats_event_sender, udp_stats_repository) = + bittorrent_udp_tracker_core::statistics::setup::factory(configuration.core.tracker_usage_statistics); + let udp_stats_repository = Arc::new(udp_stats_repository); + + let ban_service = Arc::new(RwLock::new(BanService::new(MAX_CONNECTION_ID_ERRORS_PER_IP))); + let database = initialize_database(&configuration.core); + let in_memory_whitelist = Arc::new(InMemoryWhitelist::default()); + + let whitelist_manager = initialize_whitelist_manager(database.clone(), in_memory_whitelist.clone()); + let db_key_repository = Arc::new(DatabaseKeyRepository::new(&database)); + let in_memory_key_repository = Arc::new(InMemoryKeyRepository::default()); + let authentication_service = Arc::new(AuthenticationService::new(&configuration.core, &in_memory_key_repository)); + let keys_handler = Arc::new(KeysHandler::new( + &db_key_repository.clone(), + &in_memory_key_repository.clone(), + )); + let in_memory_torrent_repository = Arc::new(InMemoryTorrentRepository::default()); + + let http_api_container = Arc::new(HttpApiContainer { + http_api_config: http_api_config.clone(), + core_config: core_config.clone(), + in_memory_torrent_repository: in_memory_torrent_repository.clone(), + keys_handler: keys_handler.clone(), + whitelist_manager: whitelist_manager.clone(), + ban_service: ban_service.clone(), + http_stats_repository: http_stats_repository.clone(), + udp_stats_repository: udp_stats_repository.clone(), + }); + + Self { + http_api_config, + http_api_container, + database, + authentication_service, + in_memory_whitelist, + } + } +} diff --git a/tests/servers/http/environment.rs b/tests/servers/http/environment.rs index 4afb262d7..a0164eccc 100644 --- a/tests/servers/http/environment.rs +++ b/tests/servers/http/environment.rs @@ -1,17 +1,27 @@ use std::sync::Arc; use bittorrent_primitives::info_hash::InfoHash; +use bittorrent_tracker_core::announce_handler::AnnounceHandler; use bittorrent_tracker_core::authentication::handler::KeysHandler; +use bittorrent_tracker_core::authentication::key::repository::in_memory::InMemoryKeyRepository; +use bittorrent_tracker_core::authentication::key::repository::persisted::DatabaseKeyRepository; +use bittorrent_tracker_core::authentication::service::AuthenticationService; +use bittorrent_tracker_core::databases::setup::initialize_database; use bittorrent_tracker_core::databases::Database; +use bittorrent_tracker_core::scrape_handler::ScrapeHandler; use bittorrent_tracker_core::torrent::repository::in_memory::InMemoryTorrentRepository; +use bittorrent_tracker_core::torrent::repository::persisted::DatabasePersistentTorrentRepository; +use bittorrent_tracker_core::whitelist::authorization::WhitelistAuthorization; use bittorrent_tracker_core::whitelist::manager::WhitelistManager; +use bittorrent_tracker_core::whitelist::repository::in_memory::InMemoryWhitelist; +use bittorrent_tracker_core::whitelist::setup::initialize_whitelist_manager; use futures::executor::block_on; use torrust_axum_http_tracker_server::container::HttpTrackerContainer; use torrust_axum_http_tracker_server::server::{HttpServer, Launcher, Running, Stopped}; use torrust_axum_server::tsl::make_rust_tls; use torrust_server_lib::registar::Registar; -use torrust_tracker_configuration::Configuration; -use torrust_tracker_lib::bootstrap::app::{initialize_app_container, initialize_global_services}; +use torrust_tracker_configuration::{Configuration, Core, HttpTracker}; +use torrust_tracker_lib::bootstrap::app::initialize_global_services; use torrust_tracker_primitives::peer; pub struct Environment { @@ -39,38 +49,33 @@ impl Environment { pub fn new(configuration: &Arc) -> Self { initialize_global_services(configuration); - let app_container = initialize_app_container(configuration); + let env_container = EnvContainer::initialize(configuration); - let http_tracker = configuration - .http_trackers - .clone() - .expect("missing HTTP tracker configuration"); - let http_tracker_config = Arc::new(http_tracker[0].clone()); - - let bind_to = http_tracker_config.bind_address; + let bind_to = env_container.http_tracker_config.bind_address; - let tls = block_on(make_rust_tls(&http_tracker_config.tsl_config)).map(|tls| tls.expect("tls config failed")); + let tls = + block_on(make_rust_tls(&env_container.http_tracker_config.tsl_config)).map(|tls| tls.expect("tls config failed")); let server = HttpServer::new(Launcher::new(bind_to, tls)); let http_tracker_container = Arc::new(HttpTrackerContainer { - core_config: app_container.core_config.clone(), - http_tracker_config: http_tracker_config.clone(), - announce_handler: app_container.announce_handler.clone(), - scrape_handler: app_container.scrape_handler.clone(), - whitelist_authorization: app_container.whitelist_authorization.clone(), - http_stats_event_sender: app_container.http_stats_event_sender.clone(), - authentication_service: app_container.authentication_service.clone(), + core_config: env_container.core_config.clone(), + http_tracker_config: env_container.http_tracker_config.clone(), + announce_handler: env_container.http_tracker_container.announce_handler.clone(), + scrape_handler: env_container.http_tracker_container.scrape_handler.clone(), + whitelist_authorization: env_container.http_tracker_container.whitelist_authorization.clone(), + http_stats_event_sender: env_container.http_tracker_container.http_stats_event_sender.clone(), + authentication_service: env_container.http_tracker_container.authentication_service.clone(), }); Self { http_tracker_container, - database: app_container.database.clone(), - in_memory_torrent_repository: app_container.in_memory_torrent_repository.clone(), - keys_handler: app_container.keys_handler.clone(), - http_stats_repository: app_container.http_stats_repository.clone(), - whitelist_manager: app_container.whitelist_manager.clone(), + database: env_container.database.clone(), + in_memory_torrent_repository: env_container.in_memory_torrent_repository.clone(), + keys_handler: env_container.keys_handler.clone(), + http_stats_repository: env_container.http_stats_repository.clone(), + whitelist_manager: env_container.whitelist_manager.clone(), registar: Registar::default(), server, @@ -122,3 +127,77 @@ impl Environment { &self.server.state.binding } } + +pub struct EnvContainer { + pub core_config: Arc, + pub http_tracker_config: Arc, + pub http_tracker_container: Arc, + + pub database: Arc>, + pub in_memory_torrent_repository: Arc, + pub keys_handler: Arc, + pub http_stats_repository: Arc, + pub whitelist_manager: Arc, +} + +impl EnvContainer { + pub fn initialize(configuration: &Configuration) -> Self { + let core_config = Arc::new(configuration.core.clone()); + let http_tracker_config = configuration + .http_trackers + .clone() + .expect("missing HTTP tracker configuration"); + let http_tracker_config = Arc::new(http_tracker_config[0].clone()); + + // HTTP stats + let (http_stats_event_sender, http_stats_repository) = + bittorrent_http_tracker_core::statistics::setup::factory(configuration.core.tracker_usage_statistics); + let http_stats_event_sender = Arc::new(http_stats_event_sender); + let http_stats_repository = Arc::new(http_stats_repository); + + let database = initialize_database(&configuration.core); + let in_memory_whitelist = Arc::new(InMemoryWhitelist::default()); + let whitelist_authorization = Arc::new(WhitelistAuthorization::new(&configuration.core, &in_memory_whitelist.clone())); + let whitelist_manager = initialize_whitelist_manager(database.clone(), in_memory_whitelist.clone()); + let db_key_repository = Arc::new(DatabaseKeyRepository::new(&database)); + let in_memory_key_repository = Arc::new(InMemoryKeyRepository::default()); + let authentication_service = Arc::new(AuthenticationService::new(&configuration.core, &in_memory_key_repository)); + let keys_handler = Arc::new(KeysHandler::new( + &db_key_repository.clone(), + &in_memory_key_repository.clone(), + )); + let in_memory_torrent_repository = Arc::new(InMemoryTorrentRepository::default()); + let db_torrent_repository = Arc::new(DatabasePersistentTorrentRepository::new(&database)); + + let announce_handler = Arc::new(AnnounceHandler::new( + &configuration.core, + &whitelist_authorization, + &in_memory_torrent_repository, + &db_torrent_repository, + )); + + let scrape_handler = Arc::new(ScrapeHandler::new(&whitelist_authorization, &in_memory_torrent_repository)); + + let http_tracker_container = Arc::new(HttpTrackerContainer { + http_tracker_config: http_tracker_config.clone(), + core_config: core_config.clone(), + announce_handler: announce_handler.clone(), + scrape_handler: scrape_handler.clone(), + whitelist_authorization: whitelist_authorization.clone(), + http_stats_event_sender: http_stats_event_sender.clone(), + authentication_service: authentication_service.clone(), + }); + + Self { + core_config, + http_tracker_config, + http_tracker_container, + + database, + in_memory_torrent_repository, + keys_handler, + http_stats_repository, + whitelist_manager, + } + } +} diff --git a/tests/servers/udp/environment.rs b/tests/servers/udp/environment.rs index 67e119bb4..241623732 100644 --- a/tests/servers/udp/environment.rs +++ b/tests/servers/udp/environment.rs @@ -2,12 +2,20 @@ use std::net::SocketAddr; use std::sync::Arc; use bittorrent_primitives::info_hash::InfoHash; +use bittorrent_tracker_core::announce_handler::AnnounceHandler; +use bittorrent_tracker_core::databases::setup::initialize_database; use bittorrent_tracker_core::databases::Database; +use bittorrent_tracker_core::scrape_handler::ScrapeHandler; use bittorrent_tracker_core::torrent::repository::in_memory::InMemoryTorrentRepository; -use bittorrent_udp_tracker_core::statistics; +use bittorrent_tracker_core::torrent::repository::persisted::DatabasePersistentTorrentRepository; +use bittorrent_tracker_core::whitelist::authorization::WhitelistAuthorization; +use bittorrent_tracker_core::whitelist::repository::in_memory::InMemoryWhitelist; +use bittorrent_udp_tracker_core::services::banning::BanService; +use bittorrent_udp_tracker_core::{statistics, MAX_CONNECTION_ID_ERRORS_PER_IP}; +use tokio::sync::RwLock; use torrust_server_lib::registar::Registar; -use torrust_tracker_configuration::{Configuration, DEFAULT_TIMEOUT}; -use torrust_tracker_lib::bootstrap::app::{initialize_app_container, initialize_global_services}; +use torrust_tracker_configuration::{Configuration, Core, UdpTracker, DEFAULT_TIMEOUT}; +use torrust_tracker_lib::bootstrap::app::initialize_global_services; use torrust_tracker_lib::container::UdpTrackerContainer; use torrust_tracker_lib::servers::udp::server::spawner::Spawner; use torrust_tracker_lib::servers::udp::server::states::{Running, Stopped}; @@ -44,32 +52,28 @@ impl Environment { pub fn new(configuration: &Arc) -> Self { initialize_global_services(configuration); - let app_container = initialize_app_container(configuration); + let env_container = EnvContainer::initialize(configuration); - let udp_tracker_configurations = configuration.udp_trackers.clone().expect("missing UDP tracker configuration"); - - let udp_tracker_config = Arc::new(udp_tracker_configurations[0].clone()); - - let bind_to = udp_tracker_config.bind_address; + let bind_to = env_container.udp_tracker_config.bind_address; let server = Server::new(Spawner::new(bind_to)); let udp_tracker_container = Arc::new(UdpTrackerContainer { - udp_tracker_config: udp_tracker_config.clone(), - core_config: app_container.core_config.clone(), - announce_handler: app_container.announce_handler.clone(), - scrape_handler: app_container.scrape_handler.clone(), - whitelist_authorization: app_container.whitelist_authorization.clone(), - udp_stats_event_sender: app_container.udp_stats_event_sender.clone(), - ban_service: app_container.ban_service.clone(), + udp_tracker_config: env_container.udp_tracker_config.clone(), + core_config: env_container.core_config.clone(), + announce_handler: env_container.udp_tracker_container.announce_handler.clone(), + scrape_handler: env_container.udp_tracker_container.scrape_handler.clone(), + whitelist_authorization: env_container.udp_tracker_container.whitelist_authorization.clone(), + udp_stats_event_sender: env_container.udp_tracker_container.udp_stats_event_sender.clone(), + ban_service: env_container.udp_tracker_container.ban_service.clone(), }); Self { udp_tracker_container, - database: app_container.database.clone(), - in_memory_torrent_repository: app_container.in_memory_torrent_repository.clone(), - udp_stats_repository: app_container.udp_stats_repository.clone(), + database: env_container.database.clone(), + in_memory_torrent_repository: env_container.in_memory_torrent_repository.clone(), + udp_stats_repository: env_container.udp_stats_repository.clone(), registar: Registar::default(), server, @@ -127,6 +131,66 @@ impl Environment { } } +pub struct EnvContainer { + pub core_config: Arc, + pub udp_tracker_config: Arc, + pub udp_tracker_container: Arc, + + pub database: Arc>, + pub in_memory_torrent_repository: Arc, + pub udp_stats_repository: Arc, +} + +impl EnvContainer { + pub fn initialize(configuration: &Configuration) -> Self { + let core_config = Arc::new(configuration.core.clone()); + let udp_tracker_configurations = configuration.udp_trackers.clone().expect("missing UDP tracker configuration"); + let udp_tracker_config = Arc::new(udp_tracker_configurations[0].clone()); + + // UDP stats + let (udp_stats_event_sender, udp_stats_repository) = + bittorrent_udp_tracker_core::statistics::setup::factory(configuration.core.tracker_usage_statistics); + let udp_stats_event_sender = Arc::new(udp_stats_event_sender); + let udp_stats_repository = Arc::new(udp_stats_repository); + + let ban_service = Arc::new(RwLock::new(BanService::new(MAX_CONNECTION_ID_ERRORS_PER_IP))); + let database = initialize_database(&configuration.core); + let in_memory_whitelist = Arc::new(InMemoryWhitelist::default()); + let whitelist_authorization = Arc::new(WhitelistAuthorization::new(&configuration.core, &in_memory_whitelist.clone())); + let in_memory_torrent_repository = Arc::new(InMemoryTorrentRepository::default()); + let db_torrent_repository = Arc::new(DatabasePersistentTorrentRepository::new(&database)); + + let announce_handler = Arc::new(AnnounceHandler::new( + &configuration.core, + &whitelist_authorization, + &in_memory_torrent_repository, + &db_torrent_repository, + )); + + let scrape_handler = Arc::new(ScrapeHandler::new(&whitelist_authorization, &in_memory_torrent_repository)); + + let udp_tracker_container = Arc::new(UdpTrackerContainer { + udp_tracker_config: udp_tracker_config.clone(), + core_config: core_config.clone(), + announce_handler: announce_handler.clone(), + scrape_handler: scrape_handler.clone(), + whitelist_authorization: whitelist_authorization.clone(), + udp_stats_event_sender: udp_stats_event_sender.clone(), + ban_service: ban_service.clone(), + }); + + Self { + core_config, + udp_tracker_config, + udp_tracker_container, + + database, + in_memory_torrent_repository, + udp_stats_repository, + } + } +} + #[cfg(test)] mod tests { use std::time::Duration; From 9ba5cdd35daa2019e73189bf3fd3fff6bf64e16c Mon Sep 17 00:00:00 2001 From: Jose Celano Date: Fri, 21 Feb 2025 11:49:26 +0000 Subject: [PATCH 300/802] refactor: [#1298] inine function --- src/bootstrap/app.rs | 12 ++---------- 1 file changed, 2 insertions(+), 10 deletions(-) diff --git a/src/bootstrap/app.rs b/src/bootstrap/app.rs index 6be7d0aa2..bbc7fd7bc 100644 --- a/src/bootstrap/app.rs +++ b/src/bootstrap/app.rs @@ -33,7 +33,7 @@ use bittorrent_udp_tracker_core::MAX_CONNECTION_ID_ERRORS_PER_IP; use tokio::sync::RwLock; use torrust_tracker_clock::static_time; use torrust_tracker_configuration::validator::Validator; -use torrust_tracker_configuration::{logging, Configuration, Core, HttpApi, Logging, UdpTracker}; +use torrust_tracker_configuration::{logging, Configuration, Core, HttpApi, UdpTracker}; use tracing::instrument; use super::config::initialize_configuration; @@ -81,7 +81,7 @@ pub fn check_seed() { #[instrument(skip())] pub fn initialize_global_services(configuration: &Configuration) { initialize_static(); - initialize_logging(&configuration.logging); + logging::setup(&configuration.logging); } /// It initializes the IoC Container. @@ -248,11 +248,3 @@ pub fn initialize_static() { // Initialize the Zeroed Cipher lazy_static::initialize(&ephemeral_instance_keys::ZEROED_TEST_CIPHER_BLOWFISH); } - -/// It initializes the log threshold, format and channel. -/// -/// See [the logging setup](torrust_tracker_configuration::logging::setup) for more info about logging. -#[instrument(skip(logging_config))] -pub fn initialize_logging(logging_config: &Logging) { - logging::setup(logging_config); -} From bdec261cfac6a252dfdc846b73e4183808b2e6f8 Mon Sep 17 00:00:00 2001 From: Jose Celano Date: Fri, 21 Feb 2025 11:57:23 +0000 Subject: [PATCH 301/802] refactor: [#1298] move functions to container mod --- src/bootstrap/app.rs | 166 +---------------------------- src/bootstrap/jobs/tracker_apis.rs | 3 +- src/container.rs | 151 +++++++++++++++++++++++++- src/servers/apis/server.rs | 3 +- src/servers/udp/server/mod.rs | 3 +- 5 files changed, 157 insertions(+), 169 deletions(-) diff --git a/src/bootstrap/app.rs b/src/bootstrap/app.rs index bbc7fd7bc..ec09edd51 100644 --- a/src/bootstrap/app.rs +++ b/src/bootstrap/app.rs @@ -11,33 +11,15 @@ //! 2. Initialize static variables. //! 3. Initialize logging. //! 4. Initialize the domain tracker. -use std::sync::Arc; - -use bittorrent_tracker_core::announce_handler::AnnounceHandler; -use bittorrent_tracker_core::authentication::handler::KeysHandler; -use bittorrent_tracker_core::authentication::key::repository::in_memory::InMemoryKeyRepository; -use bittorrent_tracker_core::authentication::key::repository::persisted::DatabaseKeyRepository; -use bittorrent_tracker_core::authentication::service; -use bittorrent_tracker_core::databases::setup::initialize_database; -use bittorrent_tracker_core::scrape_handler::ScrapeHandler; -use bittorrent_tracker_core::torrent::manager::TorrentsManager; -use bittorrent_tracker_core::torrent::repository::in_memory::InMemoryTorrentRepository; -use bittorrent_tracker_core::torrent::repository::persisted::DatabasePersistentTorrentRepository; -use bittorrent_tracker_core::whitelist::authorization::WhitelistAuthorization; -use bittorrent_tracker_core::whitelist::repository::in_memory::InMemoryWhitelist; -use bittorrent_tracker_core::whitelist::setup::initialize_whitelist_manager; use bittorrent_udp_tracker_core::crypto::ephemeral_instance_keys; use bittorrent_udp_tracker_core::crypto::keys::{self, Keeper as _}; -use bittorrent_udp_tracker_core::services::banning::BanService; -use bittorrent_udp_tracker_core::MAX_CONNECTION_ID_ERRORS_PER_IP; -use tokio::sync::RwLock; use torrust_tracker_clock::static_time; use torrust_tracker_configuration::validator::Validator; -use torrust_tracker_configuration::{logging, Configuration, Core, HttpApi, UdpTracker}; +use torrust_tracker_configuration::{logging, Configuration}; use tracing::instrument; use super::config::initialize_configuration; -use crate::container::{AppContainer, HttpApiContainer, UdpTrackerContainer}; +use crate::container::{initialize_app_container, AppContainer}; /// It loads the configuration from the environment and builds app container. /// @@ -84,150 +66,6 @@ pub fn initialize_global_services(configuration: &Configuration) { logging::setup(&configuration.logging); } -/// It initializes the IoC Container. -#[instrument(skip())] -pub fn initialize_app_container(configuration: &Configuration) -> AppContainer { - let core_config = Arc::new(configuration.core.clone()); - - // HTTP stats - let (http_stats_event_sender, http_stats_repository) = - bittorrent_http_tracker_core::statistics::setup::factory(configuration.core.tracker_usage_statistics); - let http_stats_event_sender = Arc::new(http_stats_event_sender); - let http_stats_repository = Arc::new(http_stats_repository); - - // UDP stats - let (udp_stats_event_sender, udp_stats_repository) = - bittorrent_udp_tracker_core::statistics::setup::factory(configuration.core.tracker_usage_statistics); - let udp_stats_event_sender = Arc::new(udp_stats_event_sender); - let udp_stats_repository = Arc::new(udp_stats_repository); - - let ban_service = Arc::new(RwLock::new(BanService::new(MAX_CONNECTION_ID_ERRORS_PER_IP))); - let database = initialize_database(&configuration.core); - let in_memory_whitelist = Arc::new(InMemoryWhitelist::default()); - let whitelist_authorization = Arc::new(WhitelistAuthorization::new(&configuration.core, &in_memory_whitelist.clone())); - let whitelist_manager = initialize_whitelist_manager(database.clone(), in_memory_whitelist.clone()); - let db_key_repository = Arc::new(DatabaseKeyRepository::new(&database)); - let in_memory_key_repository = Arc::new(InMemoryKeyRepository::default()); - let authentication_service = Arc::new(service::AuthenticationService::new( - &configuration.core, - &in_memory_key_repository, - )); - let keys_handler = Arc::new(KeysHandler::new( - &db_key_repository.clone(), - &in_memory_key_repository.clone(), - )); - let in_memory_torrent_repository = Arc::new(InMemoryTorrentRepository::default()); - let db_torrent_repository = Arc::new(DatabasePersistentTorrentRepository::new(&database)); - - let torrents_manager = Arc::new(TorrentsManager::new( - &configuration.core, - &in_memory_torrent_repository, - &db_torrent_repository, - )); - - let announce_handler = Arc::new(AnnounceHandler::new( - &configuration.core, - &whitelist_authorization, - &in_memory_torrent_repository, - &db_torrent_repository, - )); - - let scrape_handler = Arc::new(ScrapeHandler::new(&whitelist_authorization, &in_memory_torrent_repository)); - - AppContainer { - core_config, - database, - announce_handler, - scrape_handler, - keys_handler, - authentication_service, - in_memory_whitelist, - whitelist_authorization, - ban_service, - http_stats_event_sender, - udp_stats_event_sender, - http_stats_repository, - udp_stats_repository, - whitelist_manager, - in_memory_torrent_repository, - db_torrent_repository, - torrents_manager, - } -} - -#[must_use] -pub fn initialize_http_api_container(core_config: &Arc, http_api_config: &Arc) -> Arc { - // HTTP stats - let (_http_stats_event_sender, http_stats_repository) = - bittorrent_http_tracker_core::statistics::setup::factory(core_config.tracker_usage_statistics); - let http_stats_repository = Arc::new(http_stats_repository); - - // UDP stats - let (_udp_stats_event_sender, udp_stats_repository) = - bittorrent_udp_tracker_core::statistics::setup::factory(core_config.tracker_usage_statistics); - let udp_stats_repository = Arc::new(udp_stats_repository); - - let ban_service = Arc::new(RwLock::new(BanService::new(MAX_CONNECTION_ID_ERRORS_PER_IP))); - let database = initialize_database(core_config); - let in_memory_whitelist = Arc::new(InMemoryWhitelist::default()); - let whitelist_manager = initialize_whitelist_manager(database.clone(), in_memory_whitelist.clone()); - let db_key_repository = Arc::new(DatabaseKeyRepository::new(&database)); - let in_memory_key_repository = Arc::new(InMemoryKeyRepository::default()); - let keys_handler = Arc::new(KeysHandler::new( - &db_key_repository.clone(), - &in_memory_key_repository.clone(), - )); - let in_memory_torrent_repository = Arc::new(InMemoryTorrentRepository::default()); - - Arc::new(HttpApiContainer { - http_api_config: http_api_config.clone(), - core_config: core_config.clone(), - in_memory_torrent_repository: in_memory_torrent_repository.clone(), - keys_handler: keys_handler.clone(), - whitelist_manager: whitelist_manager.clone(), - ban_service: ban_service.clone(), - http_stats_repository: http_stats_repository.clone(), - udp_stats_repository: udp_stats_repository.clone(), - }) -} - -#[must_use] -pub fn initialize_udt_tracker_container( - core_config: &Arc, - udp_tracker_config: &Arc, -) -> Arc { - // UDP stats - let (udp_stats_event_sender, _udp_stats_repository) = - bittorrent_udp_tracker_core::statistics::setup::factory(core_config.tracker_usage_statistics); - let udp_stats_event_sender = Arc::new(udp_stats_event_sender); - - let ban_service = Arc::new(RwLock::new(BanService::new(MAX_CONNECTION_ID_ERRORS_PER_IP))); - let database = initialize_database(core_config); - let in_memory_whitelist = Arc::new(InMemoryWhitelist::default()); - let whitelist_authorization = Arc::new(WhitelistAuthorization::new(core_config, &in_memory_whitelist.clone())); - let in_memory_torrent_repository = Arc::new(InMemoryTorrentRepository::default()); - let db_torrent_repository = Arc::new(DatabasePersistentTorrentRepository::new(&database)); - - let announce_handler = Arc::new(AnnounceHandler::new( - core_config, - &whitelist_authorization, - &in_memory_torrent_repository, - &db_torrent_repository, - )); - - let scrape_handler = Arc::new(ScrapeHandler::new(&whitelist_authorization, &in_memory_torrent_repository)); - - Arc::new(UdpTrackerContainer { - udp_tracker_config: udp_tracker_config.clone(), - core_config: core_config.clone(), - announce_handler: announce_handler.clone(), - scrape_handler: scrape_handler.clone(), - whitelist_authorization: whitelist_authorization.clone(), - udp_stats_event_sender: udp_stats_event_sender.clone(), - ban_service: ban_service.clone(), - }) -} - /// It initializes the application static values. /// /// These values are accessible throughout the entire application: diff --git a/src/bootstrap/jobs/tracker_apis.rs b/src/bootstrap/jobs/tracker_apis.rs index 8a36d74dc..66152905a 100644 --- a/src/bootstrap/jobs/tracker_apis.rs +++ b/src/bootstrap/jobs/tracker_apis.rs @@ -100,8 +100,9 @@ mod tests { use torrust_server_lib::registar::Registar; use torrust_tracker_test_helpers::configuration::ephemeral_public; - use crate::bootstrap::app::{initialize_global_services, initialize_http_api_container}; + use crate::bootstrap::app::initialize_global_services; use crate::bootstrap::jobs::tracker_apis::start_job; + use crate::container::initialize_http_api_container; use crate::servers::apis::Version; #[tokio::test] diff --git a/src/container.rs b/src/container.rs index 881f50d2d..87f001b65 100644 --- a/src/container.rs +++ b/src/container.rs @@ -2,20 +2,26 @@ use std::sync::Arc; use bittorrent_tracker_core::announce_handler::AnnounceHandler; use bittorrent_tracker_core::authentication::handler::KeysHandler; +use bittorrent_tracker_core::authentication::key::repository::in_memory::InMemoryKeyRepository; +use bittorrent_tracker_core::authentication::key::repository::persisted::DatabaseKeyRepository; use bittorrent_tracker_core::authentication::service::AuthenticationService; +use bittorrent_tracker_core::databases::setup::initialize_database; use bittorrent_tracker_core::databases::Database; use bittorrent_tracker_core::scrape_handler::ScrapeHandler; use bittorrent_tracker_core::torrent::manager::TorrentsManager; use bittorrent_tracker_core::torrent::repository::in_memory::InMemoryTorrentRepository; use bittorrent_tracker_core::torrent::repository::persisted::DatabasePersistentTorrentRepository; use bittorrent_tracker_core::whitelist; +use bittorrent_tracker_core::whitelist::authorization::WhitelistAuthorization; use bittorrent_tracker_core::whitelist::manager::WhitelistManager; use bittorrent_tracker_core::whitelist::repository::in_memory::InMemoryWhitelist; +use bittorrent_tracker_core::whitelist::setup::initialize_whitelist_manager; use bittorrent_udp_tracker_core::services::banning::BanService; -use bittorrent_udp_tracker_core::{self}; +use bittorrent_udp_tracker_core::{self, MAX_CONNECTION_ID_ERRORS_PER_IP}; use tokio::sync::RwLock; use torrust_axum_http_tracker_server::container::HttpTrackerContainer; -use torrust_tracker_configuration::{Core, HttpApi, HttpTracker, UdpTracker}; +use torrust_tracker_configuration::{Configuration, Core, HttpApi, HttpTracker, UdpTracker}; +use tracing::instrument; pub struct AppContainer { pub core_config: Arc, @@ -99,3 +105,144 @@ pub struct HttpApiContainer { pub http_stats_repository: Arc, pub udp_stats_repository: Arc, } + +/// It initializes the IoC Container. +#[instrument(skip())] +pub fn initialize_app_container(configuration: &Configuration) -> AppContainer { + let core_config = Arc::new(configuration.core.clone()); + + // HTTP stats + let (http_stats_event_sender, http_stats_repository) = + bittorrent_http_tracker_core::statistics::setup::factory(configuration.core.tracker_usage_statistics); + let http_stats_event_sender = Arc::new(http_stats_event_sender); + let http_stats_repository = Arc::new(http_stats_repository); + + // UDP stats + let (udp_stats_event_sender, udp_stats_repository) = + bittorrent_udp_tracker_core::statistics::setup::factory(configuration.core.tracker_usage_statistics); + let udp_stats_event_sender = Arc::new(udp_stats_event_sender); + let udp_stats_repository = Arc::new(udp_stats_repository); + + let ban_service = Arc::new(RwLock::new(BanService::new(MAX_CONNECTION_ID_ERRORS_PER_IP))); + let database = initialize_database(&configuration.core); + let in_memory_whitelist = Arc::new(InMemoryWhitelist::default()); + let whitelist_authorization = Arc::new(WhitelistAuthorization::new(&configuration.core, &in_memory_whitelist.clone())); + let whitelist_manager = initialize_whitelist_manager(database.clone(), in_memory_whitelist.clone()); + let db_key_repository = Arc::new(DatabaseKeyRepository::new(&database)); + let in_memory_key_repository = Arc::new(InMemoryKeyRepository::default()); + let authentication_service = Arc::new(AuthenticationService::new(&configuration.core, &in_memory_key_repository)); + let keys_handler = Arc::new(KeysHandler::new( + &db_key_repository.clone(), + &in_memory_key_repository.clone(), + )); + let in_memory_torrent_repository = Arc::new(InMemoryTorrentRepository::default()); + let db_torrent_repository = Arc::new(DatabasePersistentTorrentRepository::new(&database)); + + let torrents_manager = Arc::new(TorrentsManager::new( + &configuration.core, + &in_memory_torrent_repository, + &db_torrent_repository, + )); + + let announce_handler = Arc::new(AnnounceHandler::new( + &configuration.core, + &whitelist_authorization, + &in_memory_torrent_repository, + &db_torrent_repository, + )); + + let scrape_handler = Arc::new(ScrapeHandler::new(&whitelist_authorization, &in_memory_torrent_repository)); + + AppContainer { + core_config, + database, + announce_handler, + scrape_handler, + keys_handler, + authentication_service, + in_memory_whitelist, + whitelist_authorization, + ban_service, + http_stats_event_sender, + udp_stats_event_sender, + http_stats_repository, + udp_stats_repository, + whitelist_manager, + in_memory_torrent_repository, + db_torrent_repository, + torrents_manager, + } +} + +#[must_use] +pub fn initialize_http_api_container(core_config: &Arc, http_api_config: &Arc) -> Arc { + // HTTP stats + let (_http_stats_event_sender, http_stats_repository) = + bittorrent_http_tracker_core::statistics::setup::factory(core_config.tracker_usage_statistics); + let http_stats_repository = Arc::new(http_stats_repository); + + // UDP stats + let (_udp_stats_event_sender, udp_stats_repository) = + bittorrent_udp_tracker_core::statistics::setup::factory(core_config.tracker_usage_statistics); + let udp_stats_repository = Arc::new(udp_stats_repository); + + let ban_service = Arc::new(RwLock::new(BanService::new(MAX_CONNECTION_ID_ERRORS_PER_IP))); + let database = initialize_database(core_config); + let in_memory_whitelist = Arc::new(InMemoryWhitelist::default()); + let whitelist_manager = initialize_whitelist_manager(database.clone(), in_memory_whitelist.clone()); + let db_key_repository = Arc::new(DatabaseKeyRepository::new(&database)); + let in_memory_key_repository = Arc::new(InMemoryKeyRepository::default()); + let keys_handler = Arc::new(KeysHandler::new( + &db_key_repository.clone(), + &in_memory_key_repository.clone(), + )); + let in_memory_torrent_repository = Arc::new(InMemoryTorrentRepository::default()); + + Arc::new(HttpApiContainer { + http_api_config: http_api_config.clone(), + core_config: core_config.clone(), + in_memory_torrent_repository: in_memory_torrent_repository.clone(), + keys_handler: keys_handler.clone(), + whitelist_manager: whitelist_manager.clone(), + ban_service: ban_service.clone(), + http_stats_repository: http_stats_repository.clone(), + udp_stats_repository: udp_stats_repository.clone(), + }) +} + +#[must_use] +pub fn initialize_udt_tracker_container( + core_config: &Arc, + udp_tracker_config: &Arc, +) -> Arc { + // UDP stats + let (udp_stats_event_sender, _udp_stats_repository) = + bittorrent_udp_tracker_core::statistics::setup::factory(core_config.tracker_usage_statistics); + let udp_stats_event_sender = Arc::new(udp_stats_event_sender); + + let ban_service = Arc::new(RwLock::new(BanService::new(MAX_CONNECTION_ID_ERRORS_PER_IP))); + let database = initialize_database(core_config); + let in_memory_whitelist = Arc::new(InMemoryWhitelist::default()); + let whitelist_authorization = Arc::new(WhitelistAuthorization::new(core_config, &in_memory_whitelist.clone())); + let in_memory_torrent_repository = Arc::new(InMemoryTorrentRepository::default()); + let db_torrent_repository = Arc::new(DatabasePersistentTorrentRepository::new(&database)); + + let announce_handler = Arc::new(AnnounceHandler::new( + core_config, + &whitelist_authorization, + &in_memory_torrent_repository, + &db_torrent_repository, + )); + + let scrape_handler = Arc::new(ScrapeHandler::new(&whitelist_authorization, &in_memory_torrent_repository)); + + Arc::new(UdpTrackerContainer { + udp_tracker_config: udp_tracker_config.clone(), + core_config: core_config.clone(), + announce_handler: announce_handler.clone(), + scrape_handler: scrape_handler.clone(), + whitelist_authorization: whitelist_authorization.clone(), + udp_stats_event_sender: udp_stats_event_sender.clone(), + ban_service: ban_service.clone(), + }) +} diff --git a/src/servers/apis/server.rs b/src/servers/apis/server.rs index cf2c2e96b..42f16ab77 100644 --- a/src/servers/apis/server.rs +++ b/src/servers/apis/server.rs @@ -298,7 +298,8 @@ mod tests { use torrust_server_lib::registar::Registar; use torrust_tracker_test_helpers::configuration::ephemeral_public; - use crate::bootstrap::app::{initialize_global_services, initialize_http_api_container}; + use crate::bootstrap::app::initialize_global_services; + use crate::container::initialize_http_api_container; use crate::servers::apis::server::{ApiServer, Launcher}; #[tokio::test] diff --git a/src/servers/udp/server/mod.rs b/src/servers/udp/server/mod.rs index d40f3a97f..8e14bc8be 100644 --- a/src/servers/udp/server/mod.rs +++ b/src/servers/udp/server/mod.rs @@ -62,7 +62,8 @@ mod tests { use super::spawner::Spawner; use super::Server; - use crate::bootstrap::app::{initialize_global_services, initialize_udt_tracker_container}; + use crate::bootstrap::app::initialize_global_services; + use crate::container::initialize_udt_tracker_container; #[tokio::test] async fn it_should_be_able_to_start_and_stop() { From 9f18c6e498f6f3324fb20ea4b42e7b84ac1ddc64 Mon Sep 17 00:00:00 2001 From: Jose Celano Date: Fri, 21 Feb 2025 12:27:15 +0000 Subject: [PATCH 302/802] refactor: [#1298] extract TrackerCoreContainer --- packages/tracker-core/src/container.rs | 84 +++++++++++++++++++ packages/tracker-core/src/lib.rs | 1 + src/container.rs | 111 +++++++------------------ 3 files changed, 117 insertions(+), 79 deletions(-) create mode 100644 packages/tracker-core/src/container.rs diff --git a/packages/tracker-core/src/container.rs b/packages/tracker-core/src/container.rs new file mode 100644 index 000000000..9f4d23802 --- /dev/null +++ b/packages/tracker-core/src/container.rs @@ -0,0 +1,84 @@ +use std::sync::Arc; + +use torrust_tracker_configuration::Core; + +use crate::announce_handler::AnnounceHandler; +use crate::authentication::handler::KeysHandler; +use crate::authentication::key::repository::in_memory::InMemoryKeyRepository; +use crate::authentication::key::repository::persisted::DatabaseKeyRepository; +use crate::authentication::service::AuthenticationService; +use crate::databases::setup::initialize_database; +use crate::databases::Database; +use crate::scrape_handler::ScrapeHandler; +use crate::torrent::manager::TorrentsManager; +use crate::torrent::repository::in_memory::InMemoryTorrentRepository; +use crate::torrent::repository::persisted::DatabasePersistentTorrentRepository; +use crate::whitelist; +use crate::whitelist::authorization::WhitelistAuthorization; +use crate::whitelist::manager::WhitelistManager; +use crate::whitelist::repository::in_memory::InMemoryWhitelist; +use crate::whitelist::setup::initialize_whitelist_manager; + +pub struct TrackerCoreContainer { + pub core_config: Arc, + pub database: Arc>, + pub announce_handler: Arc, + pub scrape_handler: Arc, + pub keys_handler: Arc, + pub authentication_service: Arc, + pub in_memory_whitelist: Arc, + pub whitelist_authorization: Arc, + pub whitelist_manager: Arc, + pub in_memory_torrent_repository: Arc, + pub db_torrent_repository: Arc, + pub torrents_manager: Arc, +} + +impl TrackerCoreContainer { + #[must_use] + pub fn initialize(core_config: &Arc) -> Self { + let database = initialize_database(core_config); + let in_memory_whitelist = Arc::new(InMemoryWhitelist::default()); + let whitelist_authorization = Arc::new(WhitelistAuthorization::new(core_config, &in_memory_whitelist.clone())); + let whitelist_manager = initialize_whitelist_manager(database.clone(), in_memory_whitelist.clone()); + let db_key_repository = Arc::new(DatabaseKeyRepository::new(&database)); + let in_memory_key_repository = Arc::new(InMemoryKeyRepository::default()); + let authentication_service = Arc::new(AuthenticationService::new(core_config, &in_memory_key_repository)); + let keys_handler = Arc::new(KeysHandler::new( + &db_key_repository.clone(), + &in_memory_key_repository.clone(), + )); + let in_memory_torrent_repository = Arc::new(InMemoryTorrentRepository::default()); + let db_torrent_repository = Arc::new(DatabasePersistentTorrentRepository::new(&database)); + + let torrents_manager = Arc::new(TorrentsManager::new( + core_config, + &in_memory_torrent_repository, + &db_torrent_repository, + )); + + let announce_handler = Arc::new(AnnounceHandler::new( + core_config, + &whitelist_authorization, + &in_memory_torrent_repository, + &db_torrent_repository, + )); + + let scrape_handler = Arc::new(ScrapeHandler::new(&whitelist_authorization, &in_memory_torrent_repository)); + + Self { + core_config: core_config.clone(), + database, + announce_handler, + scrape_handler, + keys_handler, + authentication_service, + in_memory_whitelist, + whitelist_authorization, + whitelist_manager, + in_memory_torrent_repository, + db_torrent_repository, + torrents_manager, + } + } +} diff --git a/packages/tracker-core/src/lib.rs b/packages/tracker-core/src/lib.rs index 8e73fe027..0107fb443 100644 --- a/packages/tracker-core/src/lib.rs +++ b/packages/tracker-core/src/lib.rs @@ -120,6 +120,7 @@ //! Please refer to the [`whitelist`] documentation. pub mod announce_handler; pub mod authentication; +pub mod container; pub mod databases; pub mod error; pub mod scrape_handler; diff --git a/src/container.rs b/src/container.rs index 87f001b65..22443aa56 100644 --- a/src/container.rs +++ b/src/container.rs @@ -2,20 +2,16 @@ use std::sync::Arc; use bittorrent_tracker_core::announce_handler::AnnounceHandler; use bittorrent_tracker_core::authentication::handler::KeysHandler; -use bittorrent_tracker_core::authentication::key::repository::in_memory::InMemoryKeyRepository; -use bittorrent_tracker_core::authentication::key::repository::persisted::DatabaseKeyRepository; use bittorrent_tracker_core::authentication::service::AuthenticationService; -use bittorrent_tracker_core::databases::setup::initialize_database; +use bittorrent_tracker_core::container::TrackerCoreContainer; use bittorrent_tracker_core::databases::Database; use bittorrent_tracker_core::scrape_handler::ScrapeHandler; use bittorrent_tracker_core::torrent::manager::TorrentsManager; use bittorrent_tracker_core::torrent::repository::in_memory::InMemoryTorrentRepository; use bittorrent_tracker_core::torrent::repository::persisted::DatabasePersistentTorrentRepository; use bittorrent_tracker_core::whitelist; -use bittorrent_tracker_core::whitelist::authorization::WhitelistAuthorization; use bittorrent_tracker_core::whitelist::manager::WhitelistManager; use bittorrent_tracker_core::whitelist::repository::in_memory::InMemoryWhitelist; -use bittorrent_tracker_core::whitelist::setup::initialize_whitelist_manager; use bittorrent_udp_tracker_core::services::banning::BanService; use bittorrent_udp_tracker_core::{self, MAX_CONNECTION_ID_ERRORS_PER_IP}; use tokio::sync::RwLock; @@ -24,6 +20,7 @@ use torrust_tracker_configuration::{Configuration, Core, HttpApi, HttpTracker, U use tracing::instrument; pub struct AppContainer { + // Tracker Core Services pub core_config: Arc, pub database: Arc>, pub announce_handler: Arc, @@ -32,15 +29,17 @@ pub struct AppContainer { pub authentication_service: Arc, pub in_memory_whitelist: Arc, pub whitelist_authorization: Arc, - pub ban_service: Arc>, - pub http_stats_event_sender: Arc>>, - pub udp_stats_event_sender: Arc>>, - pub http_stats_repository: Arc, - pub udp_stats_repository: Arc, pub whitelist_manager: Arc, pub in_memory_torrent_repository: Arc, pub db_torrent_repository: Arc, pub torrents_manager: Arc, + // UDP Tracker Core Services + pub ban_service: Arc>, + pub udp_stats_event_sender: Arc>>, + // HTTP Tracker Core Services + pub http_stats_event_sender: Arc>>, + pub http_stats_repository: Arc, + pub udp_stats_repository: Arc, } impl AppContainer { @@ -111,6 +110,8 @@ pub struct HttpApiContainer { pub fn initialize_app_container(configuration: &Configuration) -> AppContainer { let core_config = Arc::new(configuration.core.clone()); + let tracker_core_container = TrackerCoreContainer::initialize(&core_config); + // HTTP stats let (http_stats_event_sender, http_stats_repository) = bittorrent_http_tracker_core::statistics::setup::factory(configuration.core.tracker_usage_statistics); @@ -124,58 +125,32 @@ pub fn initialize_app_container(configuration: &Configuration) -> AppContainer { let udp_stats_repository = Arc::new(udp_stats_repository); let ban_service = Arc::new(RwLock::new(BanService::new(MAX_CONNECTION_ID_ERRORS_PER_IP))); - let database = initialize_database(&configuration.core); - let in_memory_whitelist = Arc::new(InMemoryWhitelist::default()); - let whitelist_authorization = Arc::new(WhitelistAuthorization::new(&configuration.core, &in_memory_whitelist.clone())); - let whitelist_manager = initialize_whitelist_manager(database.clone(), in_memory_whitelist.clone()); - let db_key_repository = Arc::new(DatabaseKeyRepository::new(&database)); - let in_memory_key_repository = Arc::new(InMemoryKeyRepository::default()); - let authentication_service = Arc::new(AuthenticationService::new(&configuration.core, &in_memory_key_repository)); - let keys_handler = Arc::new(KeysHandler::new( - &db_key_repository.clone(), - &in_memory_key_repository.clone(), - )); - let in_memory_torrent_repository = Arc::new(InMemoryTorrentRepository::default()); - let db_torrent_repository = Arc::new(DatabasePersistentTorrentRepository::new(&database)); - - let torrents_manager = Arc::new(TorrentsManager::new( - &configuration.core, - &in_memory_torrent_repository, - &db_torrent_repository, - )); - - let announce_handler = Arc::new(AnnounceHandler::new( - &configuration.core, - &whitelist_authorization, - &in_memory_torrent_repository, - &db_torrent_repository, - )); - - let scrape_handler = Arc::new(ScrapeHandler::new(&whitelist_authorization, &in_memory_torrent_repository)); AppContainer { core_config, - database, - announce_handler, - scrape_handler, - keys_handler, - authentication_service, - in_memory_whitelist, - whitelist_authorization, + database: tracker_core_container.database, + announce_handler: tracker_core_container.announce_handler, + scrape_handler: tracker_core_container.scrape_handler, + keys_handler: tracker_core_container.keys_handler, + authentication_service: tracker_core_container.authentication_service, + in_memory_whitelist: tracker_core_container.in_memory_whitelist, + whitelist_authorization: tracker_core_container.whitelist_authorization, + whitelist_manager: tracker_core_container.whitelist_manager, + in_memory_torrent_repository: tracker_core_container.in_memory_torrent_repository, + db_torrent_repository: tracker_core_container.db_torrent_repository, + torrents_manager: tracker_core_container.torrents_manager, ban_service, http_stats_event_sender, udp_stats_event_sender, http_stats_repository, udp_stats_repository, - whitelist_manager, - in_memory_torrent_repository, - db_torrent_repository, - torrents_manager, } } #[must_use] pub fn initialize_http_api_container(core_config: &Arc, http_api_config: &Arc) -> Arc { + let tracker_core_container = TrackerCoreContainer::initialize(core_config); + // HTTP stats let (_http_stats_event_sender, http_stats_repository) = bittorrent_http_tracker_core::statistics::setup::factory(core_config.tracker_usage_statistics); @@ -187,23 +162,13 @@ pub fn initialize_http_api_container(core_config: &Arc, http_api_config: & let udp_stats_repository = Arc::new(udp_stats_repository); let ban_service = Arc::new(RwLock::new(BanService::new(MAX_CONNECTION_ID_ERRORS_PER_IP))); - let database = initialize_database(core_config); - let in_memory_whitelist = Arc::new(InMemoryWhitelist::default()); - let whitelist_manager = initialize_whitelist_manager(database.clone(), in_memory_whitelist.clone()); - let db_key_repository = Arc::new(DatabaseKeyRepository::new(&database)); - let in_memory_key_repository = Arc::new(InMemoryKeyRepository::default()); - let keys_handler = Arc::new(KeysHandler::new( - &db_key_repository.clone(), - &in_memory_key_repository.clone(), - )); - let in_memory_torrent_repository = Arc::new(InMemoryTorrentRepository::default()); Arc::new(HttpApiContainer { http_api_config: http_api_config.clone(), core_config: core_config.clone(), - in_memory_torrent_repository: in_memory_torrent_repository.clone(), - keys_handler: keys_handler.clone(), - whitelist_manager: whitelist_manager.clone(), + in_memory_torrent_repository: tracker_core_container.in_memory_torrent_repository.clone(), + keys_handler: tracker_core_container.keys_handler.clone(), + whitelist_manager: tracker_core_container.whitelist_manager.clone(), ban_service: ban_service.clone(), http_stats_repository: http_stats_repository.clone(), udp_stats_repository: udp_stats_repository.clone(), @@ -215,33 +180,21 @@ pub fn initialize_udt_tracker_container( core_config: &Arc, udp_tracker_config: &Arc, ) -> Arc { + let tracker_core_container = TrackerCoreContainer::initialize(core_config); + // UDP stats let (udp_stats_event_sender, _udp_stats_repository) = bittorrent_udp_tracker_core::statistics::setup::factory(core_config.tracker_usage_statistics); let udp_stats_event_sender = Arc::new(udp_stats_event_sender); let ban_service = Arc::new(RwLock::new(BanService::new(MAX_CONNECTION_ID_ERRORS_PER_IP))); - let database = initialize_database(core_config); - let in_memory_whitelist = Arc::new(InMemoryWhitelist::default()); - let whitelist_authorization = Arc::new(WhitelistAuthorization::new(core_config, &in_memory_whitelist.clone())); - let in_memory_torrent_repository = Arc::new(InMemoryTorrentRepository::default()); - let db_torrent_repository = Arc::new(DatabasePersistentTorrentRepository::new(&database)); - - let announce_handler = Arc::new(AnnounceHandler::new( - core_config, - &whitelist_authorization, - &in_memory_torrent_repository, - &db_torrent_repository, - )); - - let scrape_handler = Arc::new(ScrapeHandler::new(&whitelist_authorization, &in_memory_torrent_repository)); Arc::new(UdpTrackerContainer { udp_tracker_config: udp_tracker_config.clone(), core_config: core_config.clone(), - announce_handler: announce_handler.clone(), - scrape_handler: scrape_handler.clone(), - whitelist_authorization: whitelist_authorization.clone(), + announce_handler: tracker_core_container.announce_handler.clone(), + scrape_handler: tracker_core_container.scrape_handler.clone(), + whitelist_authorization: tracker_core_container.whitelist_authorization.clone(), udp_stats_event_sender: udp_stats_event_sender.clone(), ban_service: ban_service.clone(), }) From d1520430ff4e44d16e1cadf24fb7f94d6e27f410 Mon Sep 17 00:00:00 2001 From: Jose Celano Date: Fri, 21 Feb 2025 13:50:11 +0000 Subject: [PATCH 303/802] refactor: [#1298] mmove HttpTrackerContainer to http-tracker-core --- packages/axum-http-tracker-server/src/lib.rs | 1 - packages/axum-http-tracker-server/src/server.rs | 4 ++-- packages/axum-http-tracker-server/src/v1/routes.rs | 2 +- .../src/container.rs | 7 ++++--- packages/http-tracker-core/src/lib.rs | 1 + src/bootstrap/jobs/http_tracker.rs | 4 ++-- src/container.rs | 2 +- tests/servers/http/environment.rs | 2 +- 8 files changed, 12 insertions(+), 11 deletions(-) rename packages/{axum-http-tracker-server => http-tracker-core}/src/container.rs (90%) diff --git a/packages/axum-http-tracker-server/src/lib.rs b/packages/axum-http-tracker-server/src/lib.rs index 3d9f6d1b7..a8823b868 100644 --- a/packages/axum-http-tracker-server/src/lib.rs +++ b/packages/axum-http-tracker-server/src/lib.rs @@ -305,7 +305,6 @@ //! - [Bencode to Json Online converter](https://chocobo1.github.io/bencode_online). use serde::{Deserialize, Serialize}; -pub mod container; pub mod server; pub mod v1; diff --git a/packages/axum-http-tracker-server/src/server.rs b/packages/axum-http-tracker-server/src/server.rs index 39969907b..615335aba 100644 --- a/packages/axum-http-tracker-server/src/server.rs +++ b/packages/axum-http-tracker-server/src/server.rs @@ -4,6 +4,7 @@ use std::sync::Arc; use axum_server::tls_rustls::RustlsConfig; use axum_server::Handle; +use bittorrent_http_tracker_core::container::HttpTrackerContainer; use derive_more::Constructor; use futures::future::BoxFuture; use tokio::sync::oneshot::{Receiver, Sender}; @@ -15,7 +16,6 @@ use torrust_server_lib::signals::{Halted, Started}; use tracing::instrument; use super::v1::routes::router; -use crate::container::HttpTrackerContainer; use crate::HTTP_TRACKER_LOG_TARGET; /// Error that can occur when starting or stopping the HTTP server. @@ -238,6 +238,7 @@ pub fn check_fn(binding: &SocketAddr) -> ServiceHealthCheckJob { mod tests { use std::sync::Arc; + use bittorrent_http_tracker_core::container::HttpTrackerContainer; use bittorrent_tracker_core::announce_handler::AnnounceHandler; use bittorrent_tracker_core::authentication::key::repository::in_memory::InMemoryKeyRepository; use bittorrent_tracker_core::authentication::service; @@ -252,7 +253,6 @@ mod tests { use torrust_tracker_configuration::Configuration; use torrust_tracker_test_helpers::configuration::ephemeral_public; - use crate::container::HttpTrackerContainer; use crate::server::{HttpServer, Launcher}; pub fn initialize_container(configuration: &Configuration) -> HttpTrackerContainer { diff --git a/packages/axum-http-tracker-server/src/v1/routes.rs b/packages/axum-http-tracker-server/src/v1/routes.rs index 2d530f633..7e4b7c922 100644 --- a/packages/axum-http-tracker-server/src/v1/routes.rs +++ b/packages/axum-http-tracker-server/src/v1/routes.rs @@ -9,6 +9,7 @@ use axum::response::Response; use axum::routing::get; use axum::{BoxError, Router}; use axum_client_ip::SecureClientIpSource; +use bittorrent_http_tracker_core::container::HttpTrackerContainer; use hyper::{Request, StatusCode}; use torrust_server_lib::logging::Latency; use torrust_tracker_configuration::DEFAULT_TIMEOUT; @@ -23,7 +24,6 @@ use tower_http::LatencyUnit; use tracing::{instrument, Level, Span}; use super::handlers::{announce, health_check, scrape}; -use crate::container::HttpTrackerContainer; use crate::HTTP_TRACKER_LOG_TARGET; /// It adds the routes to the router. diff --git a/packages/axum-http-tracker-server/src/container.rs b/packages/http-tracker-core/src/container.rs similarity index 90% rename from packages/axum-http-tracker-server/src/container.rs rename to packages/http-tracker-core/src/container.rs index 339c25778..b952a5853 100644 --- a/packages/axum-http-tracker-server/src/container.rs +++ b/packages/http-tracker-core/src/container.rs @@ -12,13 +12,15 @@ use bittorrent_tracker_core::whitelist::authorization::WhitelistAuthorization; use bittorrent_tracker_core::whitelist::repository::in_memory::InMemoryWhitelist; use torrust_tracker_configuration::{Core, HttpTracker}; +use crate::statistics; + pub struct HttpTrackerContainer { pub core_config: Arc, pub http_tracker_config: Arc, pub announce_handler: Arc, pub scrape_handler: Arc, pub whitelist_authorization: Arc, - pub http_stats_event_sender: Arc>>, + pub http_stats_event_sender: Arc>>, pub authentication_service: Arc, } @@ -28,8 +30,7 @@ pub fn initialize_http_tracker_container( http_tracker_config: &Arc, ) -> Arc { // HTTP stats - let (http_stats_event_sender, _http_stats_repository) = - bittorrent_http_tracker_core::statistics::setup::factory(core_config.tracker_usage_statistics); + let (http_stats_event_sender, _http_stats_repository) = statistics::setup::factory(core_config.tracker_usage_statistics); let http_stats_event_sender = Arc::new(http_stats_event_sender); let database = initialize_database(core_config); diff --git a/packages/http-tracker-core/src/lib.rs b/packages/http-tracker-core/src/lib.rs index cb5306aa6..b42b99f8e 100644 --- a/packages/http-tracker-core/src/lib.rs +++ b/packages/http-tracker-core/src/lib.rs @@ -1,3 +1,4 @@ +pub mod container; pub mod services; pub mod statistics; diff --git a/src/bootstrap/jobs/http_tracker.rs b/src/bootstrap/jobs/http_tracker.rs index 471f74b8b..3febf60f0 100644 --- a/src/bootstrap/jobs/http_tracker.rs +++ b/src/bootstrap/jobs/http_tracker.rs @@ -14,8 +14,8 @@ use std::net::SocketAddr; use std::sync::Arc; use axum_server::tls_rustls::RustlsConfig; +use bittorrent_http_tracker_core::container::HttpTrackerContainer; use tokio::task::JoinHandle; -use torrust_axum_http_tracker_server::container::HttpTrackerContainer; use torrust_axum_http_tracker_server::server::{HttpServer, Launcher}; use torrust_axum_http_tracker_server::Version; use torrust_axum_server::tsl::make_rust_tls; @@ -77,7 +77,7 @@ async fn start_v1( mod tests { use std::sync::Arc; - use torrust_axum_http_tracker_server::container::initialize_http_tracker_container; + use bittorrent_http_tracker_core::container::initialize_http_tracker_container; use torrust_axum_http_tracker_server::Version; use torrust_server_lib::registar::Registar; use torrust_tracker_test_helpers::configuration::ephemeral_public; diff --git a/src/container.rs b/src/container.rs index 22443aa56..692036220 100644 --- a/src/container.rs +++ b/src/container.rs @@ -1,5 +1,6 @@ use std::sync::Arc; +use bittorrent_http_tracker_core::container::HttpTrackerContainer; use bittorrent_tracker_core::announce_handler::AnnounceHandler; use bittorrent_tracker_core::authentication::handler::KeysHandler; use bittorrent_tracker_core::authentication::service::AuthenticationService; @@ -15,7 +16,6 @@ use bittorrent_tracker_core::whitelist::repository::in_memory::InMemoryWhitelist use bittorrent_udp_tracker_core::services::banning::BanService; use bittorrent_udp_tracker_core::{self, MAX_CONNECTION_ID_ERRORS_PER_IP}; use tokio::sync::RwLock; -use torrust_axum_http_tracker_server::container::HttpTrackerContainer; use torrust_tracker_configuration::{Configuration, Core, HttpApi, HttpTracker, UdpTracker}; use tracing::instrument; diff --git a/tests/servers/http/environment.rs b/tests/servers/http/environment.rs index a0164eccc..fe1a2374b 100644 --- a/tests/servers/http/environment.rs +++ b/tests/servers/http/environment.rs @@ -1,5 +1,6 @@ use std::sync::Arc; +use bittorrent_http_tracker_core::container::HttpTrackerContainer; use bittorrent_primitives::info_hash::InfoHash; use bittorrent_tracker_core::announce_handler::AnnounceHandler; use bittorrent_tracker_core::authentication::handler::KeysHandler; @@ -16,7 +17,6 @@ use bittorrent_tracker_core::whitelist::manager::WhitelistManager; use bittorrent_tracker_core::whitelist::repository::in_memory::InMemoryWhitelist; use bittorrent_tracker_core::whitelist::setup::initialize_whitelist_manager; use futures::executor::block_on; -use torrust_axum_http_tracker_server::container::HttpTrackerContainer; use torrust_axum_http_tracker_server::server::{HttpServer, Launcher, Running, Stopped}; use torrust_axum_server::tsl::make_rust_tls; use torrust_server_lib::registar::Registar; From 06a29cd5856c3f1dad8c2313ee512b60e4141fd6 Mon Sep 17 00:00:00 2001 From: Jose Celano Date: Fri, 21 Feb 2025 13:52:21 +0000 Subject: [PATCH 304/802] refactor: [#1298] convert fn into static method --- packages/http-tracker-core/src/container.rs | 71 ++++++++++----------- src/bootstrap/jobs/http_tracker.rs | 4 +- 2 files changed, 37 insertions(+), 38 deletions(-) diff --git a/packages/http-tracker-core/src/container.rs b/packages/http-tracker-core/src/container.rs index b952a5853..eb19d8334 100644 --- a/packages/http-tracker-core/src/container.rs +++ b/packages/http-tracker-core/src/container.rs @@ -24,40 +24,39 @@ pub struct HttpTrackerContainer { pub authentication_service: Arc, } -#[must_use] -pub fn initialize_http_tracker_container( - core_config: &Arc, - http_tracker_config: &Arc, -) -> Arc { - // HTTP stats - let (http_stats_event_sender, _http_stats_repository) = statistics::setup::factory(core_config.tracker_usage_statistics); - let http_stats_event_sender = Arc::new(http_stats_event_sender); - - let database = initialize_database(core_config); - let in_memory_whitelist = Arc::new(InMemoryWhitelist::default()); - let whitelist_authorization = Arc::new(WhitelistAuthorization::new(core_config, &in_memory_whitelist.clone())); - let in_memory_key_repository = Arc::new(InMemoryKeyRepository::default()); - let authentication_service = Arc::new(AuthenticationService::new(core_config, &in_memory_key_repository)); - - let in_memory_torrent_repository = Arc::new(InMemoryTorrentRepository::default()); - let db_torrent_repository = Arc::new(DatabasePersistentTorrentRepository::new(&database)); - - let announce_handler = Arc::new(AnnounceHandler::new( - core_config, - &whitelist_authorization, - &in_memory_torrent_repository, - &db_torrent_repository, - )); - - let scrape_handler = Arc::new(ScrapeHandler::new(&whitelist_authorization, &in_memory_torrent_repository)); - - Arc::new(HttpTrackerContainer { - http_tracker_config: http_tracker_config.clone(), - core_config: core_config.clone(), - announce_handler: announce_handler.clone(), - scrape_handler: scrape_handler.clone(), - whitelist_authorization: whitelist_authorization.clone(), - http_stats_event_sender: http_stats_event_sender.clone(), - authentication_service: authentication_service.clone(), - }) +impl HttpTrackerContainer { + #[must_use] + pub fn initialize(core_config: &Arc, http_tracker_config: &Arc) -> Arc { + // HTTP stats + let (http_stats_event_sender, _http_stats_repository) = statistics::setup::factory(core_config.tracker_usage_statistics); + let http_stats_event_sender = Arc::new(http_stats_event_sender); + + let database = initialize_database(core_config); + let in_memory_whitelist = Arc::new(InMemoryWhitelist::default()); + let whitelist_authorization = Arc::new(WhitelistAuthorization::new(core_config, &in_memory_whitelist.clone())); + let in_memory_key_repository = Arc::new(InMemoryKeyRepository::default()); + let authentication_service = Arc::new(AuthenticationService::new(core_config, &in_memory_key_repository)); + + let in_memory_torrent_repository = Arc::new(InMemoryTorrentRepository::default()); + let db_torrent_repository = Arc::new(DatabasePersistentTorrentRepository::new(&database)); + + let announce_handler = Arc::new(AnnounceHandler::new( + core_config, + &whitelist_authorization, + &in_memory_torrent_repository, + &db_torrent_repository, + )); + + let scrape_handler = Arc::new(ScrapeHandler::new(&whitelist_authorization, &in_memory_torrent_repository)); + + Arc::new(Self { + http_tracker_config: http_tracker_config.clone(), + core_config: core_config.clone(), + announce_handler: announce_handler.clone(), + scrape_handler: scrape_handler.clone(), + whitelist_authorization: whitelist_authorization.clone(), + http_stats_event_sender: http_stats_event_sender.clone(), + authentication_service: authentication_service.clone(), + }) + } } diff --git a/src/bootstrap/jobs/http_tracker.rs b/src/bootstrap/jobs/http_tracker.rs index 3febf60f0..6a8c6d84c 100644 --- a/src/bootstrap/jobs/http_tracker.rs +++ b/src/bootstrap/jobs/http_tracker.rs @@ -77,7 +77,7 @@ async fn start_v1( mod tests { use std::sync::Arc; - use bittorrent_http_tracker_core::container::initialize_http_tracker_container; + use bittorrent_http_tracker_core::container::HttpTrackerContainer; use torrust_axum_http_tracker_server::Version; use torrust_server_lib::registar::Registar; use torrust_tracker_test_helpers::configuration::ephemeral_public; @@ -94,7 +94,7 @@ mod tests { initialize_global_services(&cfg); - let http_tracker_container = initialize_http_tracker_container(&core_config, &http_tracker_config); + let http_tracker_container = HttpTrackerContainer::initialize(&core_config, &http_tracker_config); let version = Version::V1; From b562f8d19487125c6c8629a9960477a8b8408486 Mon Sep 17 00:00:00 2001 From: Jose Celano Date: Fri, 21 Feb 2025 14:46:45 +0000 Subject: [PATCH 305/802] refacotr: [#1298] remove duplicate code --- .../axum-http-tracker-server/src/server.rs | 9 +- packages/http-tracker-core/src/container.rs | 67 +++++---- src/container.rs | 6 +- tests/servers/http/environment.rs | 139 +++--------------- tests/servers/http/v1/contract.rs | 108 ++++++++------ 5 files changed, 137 insertions(+), 192 deletions(-) diff --git a/packages/axum-http-tracker-server/src/server.rs b/packages/axum-http-tracker-server/src/server.rs index 615335aba..749f57e02 100644 --- a/packages/axum-http-tracker-server/src/server.rs +++ b/packages/axum-http-tracker-server/src/server.rs @@ -268,9 +268,10 @@ mod tests { let http_tracker_config = Arc::new(http_tracker_config.clone()); // HTTP stats - let (http_stats_event_sender, _http_stats_repository) = + let (http_stats_event_sender, http_stats_repository) = bittorrent_http_tracker_core::statistics::setup::factory(configuration.core.tracker_usage_statistics); let http_stats_event_sender = Arc::new(http_stats_event_sender); + let http_stats_repository = Arc::new(http_stats_repository); let database = initialize_database(&configuration.core); let in_memory_whitelist = Arc::new(InMemoryWhitelist::default()); @@ -294,12 +295,14 @@ mod tests { HttpTrackerContainer { core_config, - http_tracker_config, announce_handler, scrape_handler, whitelist_authorization, - http_stats_event_sender, authentication_service, + + http_tracker_config, + http_stats_event_sender, + http_stats_repository, } } diff --git a/packages/http-tracker-core/src/container.rs b/packages/http-tracker-core/src/container.rs index eb19d8334..c9945f3b1 100644 --- a/packages/http-tracker-core/src/container.rs +++ b/packages/http-tracker-core/src/container.rs @@ -1,62 +1,73 @@ use std::sync::Arc; use bittorrent_tracker_core::announce_handler::AnnounceHandler; -use bittorrent_tracker_core::authentication::key::repository::in_memory::InMemoryKeyRepository; use bittorrent_tracker_core::authentication::service::AuthenticationService; -use bittorrent_tracker_core::databases::setup::initialize_database; +use bittorrent_tracker_core::container::TrackerCoreContainer; use bittorrent_tracker_core::scrape_handler::ScrapeHandler; -use bittorrent_tracker_core::torrent::repository::in_memory::InMemoryTorrentRepository; -use bittorrent_tracker_core::torrent::repository::persisted::DatabasePersistentTorrentRepository; use bittorrent_tracker_core::whitelist; -use bittorrent_tracker_core::whitelist::authorization::WhitelistAuthorization; -use bittorrent_tracker_core::whitelist::repository::in_memory::InMemoryWhitelist; use torrust_tracker_configuration::{Core, HttpTracker}; use crate::statistics; pub struct HttpTrackerContainer { + // todo: replace with TrackerCoreContainer pub core_config: Arc, - pub http_tracker_config: Arc, pub announce_handler: Arc, pub scrape_handler: Arc, pub whitelist_authorization: Arc, - pub http_stats_event_sender: Arc>>, pub authentication_service: Arc, + + pub http_tracker_config: Arc, + pub http_stats_event_sender: Arc>>, + pub http_stats_repository: Arc, } impl HttpTrackerContainer { #[must_use] - pub fn initialize(core_config: &Arc, http_tracker_config: &Arc) -> Arc { + pub fn initialize_from( + tracker_core_container: &Arc, + http_tracker_config: &Arc, + ) -> Arc { // HTTP stats - let (http_stats_event_sender, _http_stats_repository) = statistics::setup::factory(core_config.tracker_usage_statistics); + let (http_stats_event_sender, http_stats_repository) = + statistics::setup::factory(tracker_core_container.core_config.tracker_usage_statistics); let http_stats_event_sender = Arc::new(http_stats_event_sender); + let http_stats_repository = Arc::new(http_stats_repository); + + Arc::new(Self { + http_tracker_config: http_tracker_config.clone(), - let database = initialize_database(core_config); - let in_memory_whitelist = Arc::new(InMemoryWhitelist::default()); - let whitelist_authorization = Arc::new(WhitelistAuthorization::new(core_config, &in_memory_whitelist.clone())); - let in_memory_key_repository = Arc::new(InMemoryKeyRepository::default()); - let authentication_service = Arc::new(AuthenticationService::new(core_config, &in_memory_key_repository)); + core_config: tracker_core_container.core_config.clone(), + announce_handler: tracker_core_container.announce_handler.clone(), + scrape_handler: tracker_core_container.scrape_handler.clone(), + whitelist_authorization: tracker_core_container.whitelist_authorization.clone(), + authentication_service: tracker_core_container.authentication_service.clone(), - let in_memory_torrent_repository = Arc::new(InMemoryTorrentRepository::default()); - let db_torrent_repository = Arc::new(DatabasePersistentTorrentRepository::new(&database)); + http_stats_event_sender: http_stats_event_sender.clone(), + http_stats_repository: http_stats_repository.clone(), + }) + } - let announce_handler = Arc::new(AnnounceHandler::new( - core_config, - &whitelist_authorization, - &in_memory_torrent_repository, - &db_torrent_repository, - )); + #[must_use] + pub fn initialize(core_config: &Arc, http_tracker_config: &Arc) -> Arc { + let tracker_core_container = TrackerCoreContainer::initialize(core_config); - let scrape_handler = Arc::new(ScrapeHandler::new(&whitelist_authorization, &in_memory_torrent_repository)); + // HTTP stats + let (http_stats_event_sender, http_stats_repository) = statistics::setup::factory(core_config.tracker_usage_statistics); + let http_stats_event_sender = Arc::new(http_stats_event_sender); + let http_stats_repository = Arc::new(http_stats_repository); Arc::new(Self { http_tracker_config: http_tracker_config.clone(), + core_config: core_config.clone(), - announce_handler: announce_handler.clone(), - scrape_handler: scrape_handler.clone(), - whitelist_authorization: whitelist_authorization.clone(), + announce_handler: tracker_core_container.announce_handler.clone(), + scrape_handler: tracker_core_container.scrape_handler.clone(), + whitelist_authorization: tracker_core_container.whitelist_authorization.clone(), + authentication_service: tracker_core_container.authentication_service.clone(), + http_stats_event_sender: http_stats_event_sender.clone(), - authentication_service: authentication_service.clone(), + http_stats_repository: http_stats_repository.clone(), }) } } diff --git a/src/container.rs b/src/container.rs index 692036220..5d76a20f3 100644 --- a/src/container.rs +++ b/src/container.rs @@ -46,13 +46,15 @@ impl AppContainer { #[must_use] pub fn http_tracker_container(&self, http_tracker_config: &Arc) -> HttpTrackerContainer { HttpTrackerContainer { - http_tracker_config: http_tracker_config.clone(), core_config: self.core_config.clone(), announce_handler: self.announce_handler.clone(), scrape_handler: self.scrape_handler.clone(), whitelist_authorization: self.whitelist_authorization.clone(), - http_stats_event_sender: self.http_stats_event_sender.clone(), authentication_service: self.authentication_service.clone(), + + http_tracker_config: http_tracker_config.clone(), + http_stats_event_sender: self.http_stats_event_sender.clone(), + http_stats_repository: self.http_stats_repository.clone(), } } diff --git a/tests/servers/http/environment.rs b/tests/servers/http/environment.rs index fe1a2374b..209430d25 100644 --- a/tests/servers/http/environment.rs +++ b/tests/servers/http/environment.rs @@ -2,37 +2,17 @@ use std::sync::Arc; use bittorrent_http_tracker_core::container::HttpTrackerContainer; use bittorrent_primitives::info_hash::InfoHash; -use bittorrent_tracker_core::announce_handler::AnnounceHandler; -use bittorrent_tracker_core::authentication::handler::KeysHandler; -use bittorrent_tracker_core::authentication::key::repository::in_memory::InMemoryKeyRepository; -use bittorrent_tracker_core::authentication::key::repository::persisted::DatabaseKeyRepository; -use bittorrent_tracker_core::authentication::service::AuthenticationService; -use bittorrent_tracker_core::databases::setup::initialize_database; -use bittorrent_tracker_core::databases::Database; -use bittorrent_tracker_core::scrape_handler::ScrapeHandler; -use bittorrent_tracker_core::torrent::repository::in_memory::InMemoryTorrentRepository; -use bittorrent_tracker_core::torrent::repository::persisted::DatabasePersistentTorrentRepository; -use bittorrent_tracker_core::whitelist::authorization::WhitelistAuthorization; -use bittorrent_tracker_core::whitelist::manager::WhitelistManager; -use bittorrent_tracker_core::whitelist::repository::in_memory::InMemoryWhitelist; -use bittorrent_tracker_core::whitelist::setup::initialize_whitelist_manager; +use bittorrent_tracker_core::container::TrackerCoreContainer; use futures::executor::block_on; use torrust_axum_http_tracker_server::server::{HttpServer, Launcher, Running, Stopped}; use torrust_axum_server::tsl::make_rust_tls; use torrust_server_lib::registar::Registar; -use torrust_tracker_configuration::{Configuration, Core, HttpTracker}; +use torrust_tracker_configuration::Configuration; use torrust_tracker_lib::bootstrap::app::initialize_global_services; use torrust_tracker_primitives::peer; pub struct Environment { - pub http_tracker_container: Arc, - - pub database: Arc>, - pub in_memory_torrent_repository: Arc, - pub keys_handler: Arc, - pub http_stats_repository: Arc, - pub whitelist_manager: Arc, - + pub container: Arc, pub registar: Registar, pub server: HttpServer, } @@ -40,7 +20,11 @@ pub struct Environment { impl Environment { /// Add a torrent to the tracker pub fn add_torrent_peer(&self, info_hash: &InfoHash, peer: &peer::Peer) { - let () = self.in_memory_torrent_repository.upsert_peer(info_hash, peer); + let () = self + .container + .tracker_core_container + .in_memory_torrent_repository + .upsert_peer(info_hash, peer); } } @@ -49,34 +33,19 @@ impl Environment { pub fn new(configuration: &Arc) -> Self { initialize_global_services(configuration); - let env_container = EnvContainer::initialize(configuration); + let container = Arc::new(EnvContainer::initialize(configuration)); - let bind_to = env_container.http_tracker_config.bind_address; + let bind_to = container.http_tracker_container.http_tracker_config.bind_address; - let tls = - block_on(make_rust_tls(&env_container.http_tracker_config.tsl_config)).map(|tls| tls.expect("tls config failed")); + let tls = block_on(make_rust_tls( + &container.http_tracker_container.http_tracker_config.tsl_config, + )) + .map(|tls| tls.expect("tls config failed")); let server = HttpServer::new(Launcher::new(bind_to, tls)); - let http_tracker_container = Arc::new(HttpTrackerContainer { - core_config: env_container.core_config.clone(), - http_tracker_config: env_container.http_tracker_config.clone(), - announce_handler: env_container.http_tracker_container.announce_handler.clone(), - scrape_handler: env_container.http_tracker_container.scrape_handler.clone(), - whitelist_authorization: env_container.http_tracker_container.whitelist_authorization.clone(), - http_stats_event_sender: env_container.http_tracker_container.http_stats_event_sender.clone(), - authentication_service: env_container.http_tracker_container.authentication_service.clone(), - }); - Self { - http_tracker_container, - - database: env_container.database.clone(), - in_memory_torrent_repository: env_container.in_memory_torrent_repository.clone(), - keys_handler: env_container.keys_handler.clone(), - http_stats_repository: env_container.http_stats_repository.clone(), - whitelist_manager: env_container.whitelist_manager.clone(), - + container, registar: Registar::default(), server, } @@ -85,18 +54,11 @@ impl Environment { #[allow(dead_code)] pub async fn start(self) -> Environment { Environment { - http_tracker_container: self.http_tracker_container.clone(), - - database: self.database.clone(), - in_memory_torrent_repository: self.in_memory_torrent_repository.clone(), - keys_handler: self.keys_handler.clone(), - http_stats_repository: self.http_stats_repository.clone(), - whitelist_manager: self.whitelist_manager.clone(), - + container: self.container.clone(), registar: self.registar.clone(), server: self .server - .start(self.http_tracker_container, self.registar.give_form()) + .start(self.container.http_tracker_container.clone(), self.registar.give_form()) .await .unwrap(), } @@ -110,14 +72,7 @@ impl Environment { pub async fn stop(self) -> Environment { Environment { - http_tracker_container: self.http_tracker_container, - - database: self.database, - in_memory_torrent_repository: self.in_memory_torrent_repository, - keys_handler: self.keys_handler, - http_stats_repository: self.http_stats_repository, - whitelist_manager: self.whitelist_manager, - + container: self.container, registar: Registar::default(), server: self.server.stop().await.unwrap(), } @@ -129,15 +84,8 @@ impl Environment { } pub struct EnvContainer { - pub core_config: Arc, - pub http_tracker_config: Arc, + pub tracker_core_container: Arc, pub http_tracker_container: Arc, - - pub database: Arc>, - pub in_memory_torrent_repository: Arc, - pub keys_handler: Arc, - pub http_stats_repository: Arc, - pub whitelist_manager: Arc, } impl EnvContainer { @@ -149,55 +97,12 @@ impl EnvContainer { .expect("missing HTTP tracker configuration"); let http_tracker_config = Arc::new(http_tracker_config[0].clone()); - // HTTP stats - let (http_stats_event_sender, http_stats_repository) = - bittorrent_http_tracker_core::statistics::setup::factory(configuration.core.tracker_usage_statistics); - let http_stats_event_sender = Arc::new(http_stats_event_sender); - let http_stats_repository = Arc::new(http_stats_repository); - - let database = initialize_database(&configuration.core); - let in_memory_whitelist = Arc::new(InMemoryWhitelist::default()); - let whitelist_authorization = Arc::new(WhitelistAuthorization::new(&configuration.core, &in_memory_whitelist.clone())); - let whitelist_manager = initialize_whitelist_manager(database.clone(), in_memory_whitelist.clone()); - let db_key_repository = Arc::new(DatabaseKeyRepository::new(&database)); - let in_memory_key_repository = Arc::new(InMemoryKeyRepository::default()); - let authentication_service = Arc::new(AuthenticationService::new(&configuration.core, &in_memory_key_repository)); - let keys_handler = Arc::new(KeysHandler::new( - &db_key_repository.clone(), - &in_memory_key_repository.clone(), - )); - let in_memory_torrent_repository = Arc::new(InMemoryTorrentRepository::default()); - let db_torrent_repository = Arc::new(DatabasePersistentTorrentRepository::new(&database)); - - let announce_handler = Arc::new(AnnounceHandler::new( - &configuration.core, - &whitelist_authorization, - &in_memory_torrent_repository, - &db_torrent_repository, - )); - - let scrape_handler = Arc::new(ScrapeHandler::new(&whitelist_authorization, &in_memory_torrent_repository)); - - let http_tracker_container = Arc::new(HttpTrackerContainer { - http_tracker_config: http_tracker_config.clone(), - core_config: core_config.clone(), - announce_handler: announce_handler.clone(), - scrape_handler: scrape_handler.clone(), - whitelist_authorization: whitelist_authorization.clone(), - http_stats_event_sender: http_stats_event_sender.clone(), - authentication_service: authentication_service.clone(), - }); + let tracker_core_container = Arc::new(TrackerCoreContainer::initialize(&core_config)); + let http_tracker_container = HttpTrackerContainer::initialize_from(&tracker_core_container, &http_tracker_config); Self { - core_config, - http_tracker_config, + tracker_core_container, http_tracker_container, - - database, - in_memory_torrent_repository, - keys_handler, - http_stats_repository, - whitelist_manager, } } } diff --git a/tests/servers/http/v1/contract.rs b/tests/servers/http/v1/contract.rs index 1931544b9..d7a09cd2d 100644 --- a/tests/servers/http/v1/contract.rs +++ b/tests/servers/http/v1/contract.rs @@ -444,12 +444,12 @@ mod for_all_config_modes { let response = Client::new(*env.bind_address()) .announce( &QueryBuilder::default() - .with_info_hash(&InfoHash::from_str("9c38422213e30bff212b30c360d26f9a02136422").unwrap()) + .with_info_hash(&InfoHash::from_str("9c38422213e30bff212b30c360d26f9a02136422").unwrap()) // DevSkim: ignore DS173237 .query(), ) .await; - let announce_policy = env.http_tracker_container.core_config.announce_policy; + let announce_policy = env.container.tracker_core_container.core_config.announce_policy; assert_announce_response( response, @@ -472,7 +472,7 @@ mod for_all_config_modes { let env = Started::new(&configuration::ephemeral_public().into()).await; - let info_hash = InfoHash::from_str("9c38422213e30bff212b30c360d26f9a02136422").unwrap(); + let info_hash = InfoHash::from_str("9c38422213e30bff212b30c360d26f9a02136422").unwrap(); // DevSkim: ignore DS173237 // Peer 1 let previously_announced_peer = PeerBuilder::default().with_peer_id(&PeerId(*b"-qB00000000000000001")).build(); @@ -490,7 +490,7 @@ mod for_all_config_modes { ) .await; - let announce_policy = env.http_tracker_container.core_config.announce_policy; + let announce_policy = env.container.tracker_core_container.core_config.announce_policy; // It should only contain the previously announced peer assert_announce_response( @@ -514,7 +514,7 @@ mod for_all_config_modes { let env = Started::new(&configuration::ephemeral_public().into()).await; - let info_hash = InfoHash::from_str("9c38422213e30bff212b30c360d26f9a02136422").unwrap(); + let info_hash = InfoHash::from_str("9c38422213e30bff212b30c360d26f9a02136422").unwrap(); // DevSkim: ignore DS173237 // Announce a peer using IPV4 let peer_using_ipv4 = PeerBuilder::default() @@ -543,7 +543,7 @@ mod for_all_config_modes { ) .await; - let announce_policy = env.http_tracker_container.core_config.announce_policy; + let announce_policy = env.container.tracker_core_container.core_config.announce_policy; // The newly announced peer is not included on the response peer list, // but all the previously announced peers should be included regardless the IP version they are using. @@ -568,7 +568,7 @@ mod for_all_config_modes { let env = Started::new(&configuration::ephemeral_public().into()).await; - let info_hash = InfoHash::from_str("9c38422213e30bff212b30c360d26f9a02136422").unwrap(); + let info_hash = InfoHash::from_str("9c38422213e30bff212b30c360d26f9a02136422").unwrap(); // DevSkim: ignore DS173237 let peer = PeerBuilder::default().build(); // Add a peer @@ -597,7 +597,7 @@ mod for_all_config_modes { let env = Started::new(&configuration::ephemeral_public().into()).await; - let info_hash = InfoHash::from_str("9c38422213e30bff212b30c360d26f9a02136422").unwrap(); + let info_hash = InfoHash::from_str("9c38422213e30bff212b30c360d26f9a02136422").unwrap(); // DevSkim: ignore DS173237 // Peer 1 let previously_announced_peer = PeerBuilder::default().with_peer_id(&PeerId(*b"-qB00000000000000001")).build(); @@ -638,7 +638,7 @@ mod for_all_config_modes { let env = Started::new(&configuration::ephemeral_public().into()).await; - let info_hash = InfoHash::from_str("9c38422213e30bff212b30c360d26f9a02136422").unwrap(); + let info_hash = InfoHash::from_str("9c38422213e30bff212b30c360d26f9a02136422").unwrap(); // DevSkim: ignore DS173237 // Peer 1 let previously_announced_peer = PeerBuilder::default().with_peer_id(&PeerId(*b"-qB00000000000000001")).build(); @@ -680,7 +680,7 @@ mod for_all_config_modes { .announce(&QueryBuilder::default().query()) .await; - let stats = env.http_stats_repository.get_stats().await; + let stats = env.container.http_tracker_container.http_stats_repository.get_stats().await; assert_eq!(stats.tcp4_connections_handled, 1); @@ -706,7 +706,7 @@ mod for_all_config_modes { .announce(&QueryBuilder::default().query()) .await; - let stats = env.http_stats_repository.get_stats().await; + let stats = env.container.http_tracker_container.http_stats_repository.get_stats().await; assert_eq!(stats.tcp6_connections_handled, 1); @@ -731,7 +731,7 @@ mod for_all_config_modes { ) .await; - let stats = env.http_stats_repository.get_stats().await; + let stats = env.container.http_tracker_container.http_stats_repository.get_stats().await; assert_eq!(stats.tcp6_connections_handled, 0); @@ -750,7 +750,7 @@ mod for_all_config_modes { .announce(&QueryBuilder::default().query()) .await; - let stats = env.http_stats_repository.get_stats().await; + let stats = env.container.http_tracker_container.http_stats_repository.get_stats().await; assert_eq!(stats.tcp4_announces_handled, 1); @@ -776,7 +776,7 @@ mod for_all_config_modes { .announce(&QueryBuilder::default().query()) .await; - let stats = env.http_stats_repository.get_stats().await; + let stats = env.container.http_tracker_container.http_stats_repository.get_stats().await; assert_eq!(stats.tcp6_announces_handled, 1); @@ -801,7 +801,7 @@ mod for_all_config_modes { ) .await; - let stats = env.http_stats_repository.get_stats().await; + let stats = env.container.http_tracker_container.http_stats_repository.get_stats().await; assert_eq!(stats.tcp6_announces_handled, 0); @@ -816,7 +816,7 @@ mod for_all_config_modes { let env = Started::new(&configuration::ephemeral_public().into()).await; - let info_hash = InfoHash::from_str("9c38422213e30bff212b30c360d26f9a02136422").unwrap(); + let info_hash = InfoHash::from_str("9c38422213e30bff212b30c360d26f9a02136422").unwrap(); // DevSkim: ignore DS173237 let client_ip = local_ip().unwrap(); let announce_query = QueryBuilder::default() @@ -831,7 +831,11 @@ mod for_all_config_modes { assert_eq!(status, StatusCode::OK); } - let peers = env.in_memory_torrent_repository.get_torrent_peers(&info_hash); + let peers = env + .container + .tracker_core_container + .in_memory_torrent_repository + .get_torrent_peers(&info_hash); let peer_addr = peers[0].peer_addr; assert_eq!(peer_addr.ip(), client_ip); @@ -853,7 +857,7 @@ mod for_all_config_modes { let env = Started::new(&configuration::ephemeral_with_external_ip(IpAddr::from_str("2.137.87.41").unwrap()).into()).await; - let info_hash = InfoHash::from_str("9c38422213e30bff212b30c360d26f9a02136422").unwrap(); + let info_hash = InfoHash::from_str("9c38422213e30bff212b30c360d26f9a02136422").unwrap(); // DevSkim: ignore DS173237 let loopback_ip = IpAddr::from_str("127.0.0.1").unwrap(); let client_ip = loopback_ip; @@ -869,12 +873,16 @@ mod for_all_config_modes { assert_eq!(status, StatusCode::OK); } - let peers = env.in_memory_torrent_repository.get_torrent_peers(&info_hash); + let peers = env + .container + .tracker_core_container + .in_memory_torrent_repository + .get_torrent_peers(&info_hash); let peer_addr = peers[0].peer_addr; assert_eq!( peer_addr.ip(), - env.http_tracker_container.core_config.net.external_ip.unwrap() + env.container.tracker_core_container.core_config.net.external_ip.unwrap() ); assert_ne!(peer_addr.ip(), IpAddr::from_str("2.2.2.2").unwrap()); @@ -898,7 +906,7 @@ mod for_all_config_modes { ) .await; - let info_hash = InfoHash::from_str("9c38422213e30bff212b30c360d26f9a02136422").unwrap(); + let info_hash = InfoHash::from_str("9c38422213e30bff212b30c360d26f9a02136422").unwrap(); // DevSkim: ignore DS173237 let loopback_ip = IpAddr::from_str("127.0.0.1").unwrap(); let client_ip = loopback_ip; @@ -914,12 +922,16 @@ mod for_all_config_modes { assert_eq!(status, StatusCode::OK); } - let peers = env.in_memory_torrent_repository.get_torrent_peers(&info_hash); + let peers = env + .container + .tracker_core_container + .in_memory_torrent_repository + .get_torrent_peers(&info_hash); let peer_addr = peers[0].peer_addr; assert_eq!( peer_addr.ip(), - env.http_tracker_container.core_config.net.external_ip.unwrap() + env.container.tracker_core_container.core_config.net.external_ip.unwrap() ); assert_ne!(peer_addr.ip(), IpAddr::from_str("2.2.2.2").unwrap()); @@ -939,7 +951,7 @@ mod for_all_config_modes { let env = Started::new(&configuration::ephemeral_with_reverse_proxy().into()).await; - let info_hash = InfoHash::from_str("9c38422213e30bff212b30c360d26f9a02136422").unwrap(); + let info_hash = InfoHash::from_str("9c38422213e30bff212b30c360d26f9a02136422").unwrap(); // DevSkim: ignore DS173237 let announce_query = QueryBuilder::default().with_info_hash(&info_hash).query(); @@ -957,7 +969,11 @@ mod for_all_config_modes { assert_eq!(status, StatusCode::OK); } - let peers = env.in_memory_torrent_repository.get_torrent_peers(&info_hash); + let peers = env + .container + .tracker_core_container + .in_memory_torrent_repository + .get_torrent_peers(&info_hash); let peer_addr = peers[0].peer_addr; assert_eq!(peer_addr.ip(), IpAddr::from_str("150.172.238.178").unwrap()); @@ -1034,7 +1050,7 @@ mod for_all_config_modes { let env = Started::new(&configuration::ephemeral_public().into()).await; - let info_hash = InfoHash::from_str("9c38422213e30bff212b30c360d26f9a02136422").unwrap(); + let info_hash = InfoHash::from_str("9c38422213e30bff212b30c360d26f9a02136422").unwrap(); // DevSkim: ignore DS173237 env.add_torrent_peer( &info_hash, @@ -1074,7 +1090,7 @@ mod for_all_config_modes { let env = Started::new(&configuration::ephemeral_public().into()).await; - let info_hash = InfoHash::from_str("9c38422213e30bff212b30c360d26f9a02136422").unwrap(); + let info_hash = InfoHash::from_str("9c38422213e30bff212b30c360d26f9a02136422").unwrap(); // DevSkim: ignore DS173237 env.add_torrent_peer( &info_hash, @@ -1114,7 +1130,7 @@ mod for_all_config_modes { let env = Started::new(&configuration::ephemeral_public().into()).await; - let info_hash = InfoHash::from_str("9c38422213e30bff212b30c360d26f9a02136422").unwrap(); + let info_hash = InfoHash::from_str("9c38422213e30bff212b30c360d26f9a02136422").unwrap(); // DevSkim: ignore DS173237 let response = Client::new(*env.bind_address()) .scrape( @@ -1135,8 +1151,8 @@ mod for_all_config_modes { let env = Started::new(&configuration::ephemeral_public().into()).await; - let info_hash1 = InfoHash::from_str("9c38422213e30bff212b30c360d26f9a02136422").unwrap(); - let info_hash2 = InfoHash::from_str("3b245504cf5f11bbdbe1201cea6a6bf45aee1bc0").unwrap(); + let info_hash1 = InfoHash::from_str("9c38422213e30bff212b30c360d26f9a02136422").unwrap(); // DevSkim: ignore DS173237 + let info_hash2 = InfoHash::from_str("3b245504cf5f11bbdbe1201cea6a6bf45aee1bc0").unwrap(); // DevSkim: ignore DS173237 let response = Client::new(*env.bind_address()) .scrape( @@ -1163,7 +1179,7 @@ mod for_all_config_modes { let env = Started::new(&configuration::ephemeral_public().into()).await; - let info_hash = InfoHash::from_str("9c38422213e30bff212b30c360d26f9a02136422").unwrap(); + let info_hash = InfoHash::from_str("9c38422213e30bff212b30c360d26f9a02136422").unwrap(); // DevSkim: ignore DS173237 Client::new(*env.bind_address()) .scrape( @@ -1173,7 +1189,7 @@ mod for_all_config_modes { ) .await; - let stats = env.http_stats_repository.get_stats().await; + let stats = env.container.http_tracker_container.http_stats_repository.get_stats().await; assert_eq!(stats.tcp4_scrapes_handled, 1); @@ -1195,7 +1211,7 @@ mod for_all_config_modes { let env = Started::new(&configuration::ephemeral_ipv6().into()).await; - let info_hash = InfoHash::from_str("9c38422213e30bff212b30c360d26f9a02136422").unwrap(); + let info_hash = InfoHash::from_str("9c38422213e30bff212b30c360d26f9a02136422").unwrap(); // DevSkim: ignore DS173237 Client::bind(*env.bind_address(), IpAddr::from_str("::1").unwrap()) .scrape( @@ -1205,7 +1221,7 @@ mod for_all_config_modes { ) .await; - let stats = env.http_stats_repository.get_stats().await; + let stats = env.container.http_tracker_container.http_stats_repository.get_stats().await; assert_eq!(stats.tcp6_scrapes_handled, 1); @@ -1265,9 +1281,11 @@ mod configured_as_whitelisted { let env = Started::new(&configuration::ephemeral_listed().into()).await; - let info_hash = InfoHash::from_str("9c38422213e30bff212b30c360d26f9a02136422").unwrap(); + let info_hash = InfoHash::from_str("9c38422213e30bff212b30c360d26f9a02136422").unwrap(); // DevSkim: ignore DS173237 - env.whitelist_manager + env.container + .tracker_core_container + .whitelist_manager .add_torrent_to_whitelist(&info_hash) .await .expect("should add the torrent to the whitelist"); @@ -1339,7 +1357,7 @@ mod configured_as_whitelisted { let env = Started::new(&configuration::ephemeral_listed().into()).await; - let info_hash = InfoHash::from_str("9c38422213e30bff212b30c360d26f9a02136422").unwrap(); + let info_hash = InfoHash::from_str("9c38422213e30bff212b30c360d26f9a02136422").unwrap(); // DevSkim: ignore DS173237 env.add_torrent_peer( &info_hash, @@ -1349,7 +1367,9 @@ mod configured_as_whitelisted { .build(), ); - env.whitelist_manager + env.container + .tracker_core_container + .whitelist_manager .add_torrent_to_whitelist(&info_hash) .await .expect("should add the torrent to the whitelist"); @@ -1403,6 +1423,8 @@ mod configured_as_private { let env = Started::new(&configuration::ephemeral_private().into()).await; let expiring_key = env + .container + .tracker_core_container .keys_handler .generate_expiring_peer_key(Some(Duration::from_secs(60))) .await @@ -1423,7 +1445,7 @@ mod configured_as_private { let env = Started::new(&configuration::ephemeral_private().into()).await; - let info_hash = InfoHash::from_str("9c38422213e30bff212b30c360d26f9a02136422").unwrap(); + let info_hash = InfoHash::from_str("9c38422213e30bff212b30c360d26f9a02136422").unwrap(); // DevSkim: ignore DS173237 let response = Client::new(*env.bind_address()) .announce(&QueryBuilder::default().with_info_hash(&info_hash).query()) @@ -1510,7 +1532,7 @@ mod configured_as_private { let env = Started::new(&configuration::ephemeral_private().into()).await; - let info_hash = InfoHash::from_str("9c38422213e30bff212b30c360d26f9a02136422").unwrap(); + let info_hash = InfoHash::from_str("9c38422213e30bff212b30c360d26f9a02136422").unwrap(); // DevSkim: ignore DS173237 env.add_torrent_peer( &info_hash, @@ -1541,7 +1563,7 @@ mod configured_as_private { let env = Started::new(&configuration::ephemeral_private().into()).await; - let info_hash = InfoHash::from_str("9c38422213e30bff212b30c360d26f9a02136422").unwrap(); + let info_hash = InfoHash::from_str("9c38422213e30bff212b30c360d26f9a02136422").unwrap(); // DevSkim: ignore DS173237 env.add_torrent_peer( &info_hash, @@ -1552,6 +1574,8 @@ mod configured_as_private { ); let expiring_key = env + .container + .tracker_core_container .keys_handler .generate_expiring_peer_key(Some(Duration::from_secs(60))) .await @@ -1590,7 +1614,7 @@ mod configured_as_private { let env = Started::new(&configuration::ephemeral_private().into()).await; - let info_hash = InfoHash::from_str("9c38422213e30bff212b30c360d26f9a02136422").unwrap(); + let info_hash = InfoHash::from_str("9c38422213e30bff212b30c360d26f9a02136422").unwrap(); // DevSkim: ignore DS173237 env.add_torrent_peer( &info_hash, From 1eca6a3293cdaec2ada6ba945fec856e061bde1f Mon Sep 17 00:00:00 2001 From: Jose Celano Date: Fri, 21 Feb 2025 14:49:06 +0000 Subject: [PATCH 306/802] refactor: [#1298] rename struct and field --- .../axum-http-tracker-server/src/server.rs | 12 ++-- .../axum-http-tracker-server/src/v1/routes.rs | 4 +- packages/http-tracker-core/src/container.rs | 4 +- src/bootstrap/jobs/http_tracker.rs | 10 ++-- src/container.rs | 6 +- tests/servers/http/environment.rs | 14 ++--- tests/servers/http/v1/contract.rs | 56 ++++++++++++++++--- 7 files changed, 73 insertions(+), 33 deletions(-) diff --git a/packages/axum-http-tracker-server/src/server.rs b/packages/axum-http-tracker-server/src/server.rs index 749f57e02..a5cd3bb74 100644 --- a/packages/axum-http-tracker-server/src/server.rs +++ b/packages/axum-http-tracker-server/src/server.rs @@ -4,7 +4,7 @@ use std::sync::Arc; use axum_server::tls_rustls::RustlsConfig; use axum_server::Handle; -use bittorrent_http_tracker_core::container::HttpTrackerContainer; +use bittorrent_http_tracker_core::container::HttpTrackerCoreContainer; use derive_more::Constructor; use futures::future::BoxFuture; use tokio::sync::oneshot::{Receiver, Sender}; @@ -45,7 +45,7 @@ impl Launcher { #[instrument(skip(self, http_tracker_container, tx_start, rx_halt))] fn start( &self, - http_tracker_container: Arc, + http_tracker_container: Arc, tx_start: Sender, rx_halt: Receiver, ) -> BoxFuture<'static, ()> { @@ -160,7 +160,7 @@ impl HttpServer { /// back to the main thread. pub async fn start( self, - http_tracker_container: Arc, + http_tracker_container: Arc, form: ServiceRegistrationForm, ) -> Result, Error> { let (tx_start, rx_start) = tokio::sync::oneshot::channel::(); @@ -238,7 +238,7 @@ pub fn check_fn(binding: &SocketAddr) -> ServiceHealthCheckJob { mod tests { use std::sync::Arc; - use bittorrent_http_tracker_core::container::HttpTrackerContainer; + use bittorrent_http_tracker_core::container::HttpTrackerCoreContainer; use bittorrent_tracker_core::announce_handler::AnnounceHandler; use bittorrent_tracker_core::authentication::key::repository::in_memory::InMemoryKeyRepository; use bittorrent_tracker_core::authentication::service; @@ -255,7 +255,7 @@ mod tests { use crate::server::{HttpServer, Launcher}; - pub fn initialize_container(configuration: &Configuration) -> HttpTrackerContainer { + pub fn initialize_container(configuration: &Configuration) -> HttpTrackerCoreContainer { let core_config = Arc::new(configuration.core.clone()); let http_trackers = configuration @@ -293,7 +293,7 @@ mod tests { let scrape_handler = Arc::new(ScrapeHandler::new(&whitelist_authorization, &in_memory_torrent_repository)); - HttpTrackerContainer { + HttpTrackerCoreContainer { core_config, announce_handler, scrape_handler, diff --git a/packages/axum-http-tracker-server/src/v1/routes.rs b/packages/axum-http-tracker-server/src/v1/routes.rs index 7e4b7c922..7a96f6014 100644 --- a/packages/axum-http-tracker-server/src/v1/routes.rs +++ b/packages/axum-http-tracker-server/src/v1/routes.rs @@ -9,7 +9,7 @@ use axum::response::Response; use axum::routing::get; use axum::{BoxError, Router}; use axum_client_ip::SecureClientIpSource; -use bittorrent_http_tracker_core::container::HttpTrackerContainer; +use bittorrent_http_tracker_core::container::HttpTrackerCoreContainer; use hyper::{Request, StatusCode}; use torrust_server_lib::logging::Latency; use torrust_tracker_configuration::DEFAULT_TIMEOUT; @@ -31,7 +31,7 @@ use crate::HTTP_TRACKER_LOG_TARGET; /// > **NOTICE**: it's added a layer to get the client IP from the connection /// > info. The tracker could use the connection info to get the client IP. #[instrument(skip(http_tracker_container, server_socket_addr))] -pub fn router(http_tracker_container: Arc, server_socket_addr: SocketAddr) -> Router { +pub fn router(http_tracker_container: Arc, server_socket_addr: SocketAddr) -> Router { Router::new() // Health check .route("/health_check", get(health_check::handler)) diff --git a/packages/http-tracker-core/src/container.rs b/packages/http-tracker-core/src/container.rs index c9945f3b1..5ac82000a 100644 --- a/packages/http-tracker-core/src/container.rs +++ b/packages/http-tracker-core/src/container.rs @@ -9,7 +9,7 @@ use torrust_tracker_configuration::{Core, HttpTracker}; use crate::statistics; -pub struct HttpTrackerContainer { +pub struct HttpTrackerCoreContainer { // todo: replace with TrackerCoreContainer pub core_config: Arc, pub announce_handler: Arc, @@ -22,7 +22,7 @@ pub struct HttpTrackerContainer { pub http_stats_repository: Arc, } -impl HttpTrackerContainer { +impl HttpTrackerCoreContainer { #[must_use] pub fn initialize_from( tracker_core_container: &Arc, diff --git a/src/bootstrap/jobs/http_tracker.rs b/src/bootstrap/jobs/http_tracker.rs index 6a8c6d84c..013031395 100644 --- a/src/bootstrap/jobs/http_tracker.rs +++ b/src/bootstrap/jobs/http_tracker.rs @@ -14,7 +14,7 @@ use std::net::SocketAddr; use std::sync::Arc; use axum_server::tls_rustls::RustlsConfig; -use bittorrent_http_tracker_core::container::HttpTrackerContainer; +use bittorrent_http_tracker_core::container::HttpTrackerCoreContainer; use tokio::task::JoinHandle; use torrust_axum_http_tracker_server::server::{HttpServer, Launcher}; use torrust_axum_http_tracker_server::Version; @@ -32,7 +32,7 @@ use tracing::instrument; /// It would panic if the `config::HttpTracker` struct would contain inappropriate values. #[instrument(skip(http_tracker_container, form))] pub async fn start_job( - http_tracker_container: Arc, + http_tracker_container: Arc, form: ServiceRegistrationForm, version: Version, ) -> Option> { @@ -52,7 +52,7 @@ pub async fn start_job( async fn start_v1( socket: SocketAddr, tls: Option, - http_tracker_container: Arc, + http_tracker_container: Arc, form: ServiceRegistrationForm, ) -> JoinHandle<()> { let server = HttpServer::new(Launcher::new(socket, tls)) @@ -77,7 +77,7 @@ async fn start_v1( mod tests { use std::sync::Arc; - use bittorrent_http_tracker_core::container::HttpTrackerContainer; + use bittorrent_http_tracker_core::container::HttpTrackerCoreContainer; use torrust_axum_http_tracker_server::Version; use torrust_server_lib::registar::Registar; use torrust_tracker_test_helpers::configuration::ephemeral_public; @@ -94,7 +94,7 @@ mod tests { initialize_global_services(&cfg); - let http_tracker_container = HttpTrackerContainer::initialize(&core_config, &http_tracker_config); + let http_tracker_container = HttpTrackerCoreContainer::initialize(&core_config, &http_tracker_config); let version = Version::V1; diff --git a/src/container.rs b/src/container.rs index 5d76a20f3..b8e2c5d9a 100644 --- a/src/container.rs +++ b/src/container.rs @@ -1,6 +1,6 @@ use std::sync::Arc; -use bittorrent_http_tracker_core::container::HttpTrackerContainer; +use bittorrent_http_tracker_core::container::HttpTrackerCoreContainer; use bittorrent_tracker_core::announce_handler::AnnounceHandler; use bittorrent_tracker_core::authentication::handler::KeysHandler; use bittorrent_tracker_core::authentication::service::AuthenticationService; @@ -44,8 +44,8 @@ pub struct AppContainer { impl AppContainer { #[must_use] - pub fn http_tracker_container(&self, http_tracker_config: &Arc) -> HttpTrackerContainer { - HttpTrackerContainer { + pub fn http_tracker_container(&self, http_tracker_config: &Arc) -> HttpTrackerCoreContainer { + HttpTrackerCoreContainer { core_config: self.core_config.clone(), announce_handler: self.announce_handler.clone(), scrape_handler: self.scrape_handler.clone(), diff --git a/tests/servers/http/environment.rs b/tests/servers/http/environment.rs index 209430d25..e77cc38aa 100644 --- a/tests/servers/http/environment.rs +++ b/tests/servers/http/environment.rs @@ -1,6 +1,6 @@ use std::sync::Arc; -use bittorrent_http_tracker_core::container::HttpTrackerContainer; +use bittorrent_http_tracker_core::container::HttpTrackerCoreContainer; use bittorrent_primitives::info_hash::InfoHash; use bittorrent_tracker_core::container::TrackerCoreContainer; use futures::executor::block_on; @@ -35,10 +35,10 @@ impl Environment { let container = Arc::new(EnvContainer::initialize(configuration)); - let bind_to = container.http_tracker_container.http_tracker_config.bind_address; + let bind_to = container.http_tracker_core_container.http_tracker_config.bind_address; let tls = block_on(make_rust_tls( - &container.http_tracker_container.http_tracker_config.tsl_config, + &container.http_tracker_core_container.http_tracker_config.tsl_config, )) .map(|tls| tls.expect("tls config failed")); @@ -58,7 +58,7 @@ impl Environment { registar: self.registar.clone(), server: self .server - .start(self.container.http_tracker_container.clone(), self.registar.give_form()) + .start(self.container.http_tracker_core_container.clone(), self.registar.give_form()) .await .unwrap(), } @@ -85,7 +85,7 @@ impl Environment { pub struct EnvContainer { pub tracker_core_container: Arc, - pub http_tracker_container: Arc, + pub http_tracker_core_container: Arc, } impl EnvContainer { @@ -98,11 +98,11 @@ impl EnvContainer { let http_tracker_config = Arc::new(http_tracker_config[0].clone()); let tracker_core_container = Arc::new(TrackerCoreContainer::initialize(&core_config)); - let http_tracker_container = HttpTrackerContainer::initialize_from(&tracker_core_container, &http_tracker_config); + let http_tracker_container = HttpTrackerCoreContainer::initialize_from(&tracker_core_container, &http_tracker_config); Self { tracker_core_container, - http_tracker_container, + http_tracker_core_container: http_tracker_container, } } } diff --git a/tests/servers/http/v1/contract.rs b/tests/servers/http/v1/contract.rs index d7a09cd2d..084766593 100644 --- a/tests/servers/http/v1/contract.rs +++ b/tests/servers/http/v1/contract.rs @@ -680,7 +680,12 @@ mod for_all_config_modes { .announce(&QueryBuilder::default().query()) .await; - let stats = env.container.http_tracker_container.http_stats_repository.get_stats().await; + let stats = env + .container + .http_tracker_core_container + .http_stats_repository + .get_stats() + .await; assert_eq!(stats.tcp4_connections_handled, 1); @@ -706,7 +711,12 @@ mod for_all_config_modes { .announce(&QueryBuilder::default().query()) .await; - let stats = env.container.http_tracker_container.http_stats_repository.get_stats().await; + let stats = env + .container + .http_tracker_core_container + .http_stats_repository + .get_stats() + .await; assert_eq!(stats.tcp6_connections_handled, 1); @@ -731,7 +741,12 @@ mod for_all_config_modes { ) .await; - let stats = env.container.http_tracker_container.http_stats_repository.get_stats().await; + let stats = env + .container + .http_tracker_core_container + .http_stats_repository + .get_stats() + .await; assert_eq!(stats.tcp6_connections_handled, 0); @@ -750,7 +765,12 @@ mod for_all_config_modes { .announce(&QueryBuilder::default().query()) .await; - let stats = env.container.http_tracker_container.http_stats_repository.get_stats().await; + let stats = env + .container + .http_tracker_core_container + .http_stats_repository + .get_stats() + .await; assert_eq!(stats.tcp4_announces_handled, 1); @@ -776,7 +796,12 @@ mod for_all_config_modes { .announce(&QueryBuilder::default().query()) .await; - let stats = env.container.http_tracker_container.http_stats_repository.get_stats().await; + let stats = env + .container + .http_tracker_core_container + .http_stats_repository + .get_stats() + .await; assert_eq!(stats.tcp6_announces_handled, 1); @@ -801,7 +826,12 @@ mod for_all_config_modes { ) .await; - let stats = env.container.http_tracker_container.http_stats_repository.get_stats().await; + let stats = env + .container + .http_tracker_core_container + .http_stats_repository + .get_stats() + .await; assert_eq!(stats.tcp6_announces_handled, 0); @@ -1189,7 +1219,12 @@ mod for_all_config_modes { ) .await; - let stats = env.container.http_tracker_container.http_stats_repository.get_stats().await; + let stats = env + .container + .http_tracker_core_container + .http_stats_repository + .get_stats() + .await; assert_eq!(stats.tcp4_scrapes_handled, 1); @@ -1221,7 +1256,12 @@ mod for_all_config_modes { ) .await; - let stats = env.container.http_tracker_container.http_stats_repository.get_stats().await; + let stats = env + .container + .http_tracker_core_container + .http_stats_repository + .get_stats() + .await; assert_eq!(stats.tcp6_scrapes_handled, 1); From 786000bc3ac4b1c88d57c3cc1a65c113f00fdb1f Mon Sep 17 00:00:00 2001 From: Jose Celano Date: Fri, 21 Feb 2025 16:07:36 +0000 Subject: [PATCH 307/802] refactor: [#1298] remove duplicate code --- packages/http-tracker-core/src/container.rs | 31 ++++----------------- 1 file changed, 6 insertions(+), 25 deletions(-) diff --git a/packages/http-tracker-core/src/container.rs b/packages/http-tracker-core/src/container.rs index 5ac82000a..0fc313a38 100644 --- a/packages/http-tracker-core/src/container.rs +++ b/packages/http-tracker-core/src/container.rs @@ -23,49 +23,30 @@ pub struct HttpTrackerCoreContainer { } impl HttpTrackerCoreContainer { + #[must_use] + pub fn initialize(core_config: &Arc, http_tracker_config: &Arc) -> Arc { + let tracker_core_container = Arc::new(TrackerCoreContainer::initialize(core_config)); + Self::initialize_from(&tracker_core_container, http_tracker_config) + } + #[must_use] pub fn initialize_from( tracker_core_container: &Arc, http_tracker_config: &Arc, ) -> Arc { - // HTTP stats let (http_stats_event_sender, http_stats_repository) = statistics::setup::factory(tracker_core_container.core_config.tracker_usage_statistics); let http_stats_event_sender = Arc::new(http_stats_event_sender); let http_stats_repository = Arc::new(http_stats_repository); Arc::new(Self { - http_tracker_config: http_tracker_config.clone(), - core_config: tracker_core_container.core_config.clone(), announce_handler: tracker_core_container.announce_handler.clone(), scrape_handler: tracker_core_container.scrape_handler.clone(), whitelist_authorization: tracker_core_container.whitelist_authorization.clone(), authentication_service: tracker_core_container.authentication_service.clone(), - http_stats_event_sender: http_stats_event_sender.clone(), - http_stats_repository: http_stats_repository.clone(), - }) - } - - #[must_use] - pub fn initialize(core_config: &Arc, http_tracker_config: &Arc) -> Arc { - let tracker_core_container = TrackerCoreContainer::initialize(core_config); - - // HTTP stats - let (http_stats_event_sender, http_stats_repository) = statistics::setup::factory(core_config.tracker_usage_statistics); - let http_stats_event_sender = Arc::new(http_stats_event_sender); - let http_stats_repository = Arc::new(http_stats_repository); - - Arc::new(Self { http_tracker_config: http_tracker_config.clone(), - - core_config: core_config.clone(), - announce_handler: tracker_core_container.announce_handler.clone(), - scrape_handler: tracker_core_container.scrape_handler.clone(), - whitelist_authorization: tracker_core_container.whitelist_authorization.clone(), - authentication_service: tracker_core_container.authentication_service.clone(), - http_stats_event_sender: http_stats_event_sender.clone(), http_stats_repository: http_stats_repository.clone(), }) From de75a3267dc1d33e8fcf522e532ce1da580209eb Mon Sep 17 00:00:00 2001 From: Jose Celano Date: Fri, 21 Feb 2025 16:14:59 +0000 Subject: [PATCH 308/802] refactor: [#1298] move UdpTrackerContainer to udp-tracker-core package --- packages/udp-tracker-core/src/container.rs | 45 ++++++++++++++++++++++ packages/udp-tracker-core/src/lib.rs | 1 + src/bootstrap/jobs/udp_tracker.rs | 2 +- src/container.rs | 38 ++---------------- src/servers/udp/handlers/mod.rs | 2 +- src/servers/udp/server/launcher.rs | 2 +- src/servers/udp/server/mod.rs | 2 +- src/servers/udp/server/processor.rs | 2 +- src/servers/udp/server/spawner.rs | 2 +- src/servers/udp/server/states.rs | 2 +- tests/servers/udp/environment.rs | 2 +- 11 files changed, 57 insertions(+), 43 deletions(-) create mode 100644 packages/udp-tracker-core/src/container.rs diff --git a/packages/udp-tracker-core/src/container.rs b/packages/udp-tracker-core/src/container.rs new file mode 100644 index 000000000..51ae6e40c --- /dev/null +++ b/packages/udp-tracker-core/src/container.rs @@ -0,0 +1,45 @@ +use std::sync::Arc; + +use bittorrent_tracker_core::announce_handler::AnnounceHandler; +use bittorrent_tracker_core::container::TrackerCoreContainer; +use bittorrent_tracker_core::scrape_handler::ScrapeHandler; +use bittorrent_tracker_core::whitelist; +use tokio::sync::RwLock; +use torrust_tracker_configuration::{Core, UdpTracker}; + +use crate::services::banning::BanService; +use crate::{statistics, MAX_CONNECTION_ID_ERRORS_PER_IP}; + +pub struct UdpTrackerContainer { + pub core_config: Arc, + pub udp_tracker_config: Arc, + pub announce_handler: Arc, + pub scrape_handler: Arc, + pub whitelist_authorization: Arc, + pub udp_stats_event_sender: Arc>>, + pub ban_service: Arc>, +} + +#[must_use] +pub fn initialize_udt_tracker_container( + core_config: &Arc, + udp_tracker_config: &Arc, +) -> Arc { + let tracker_core_container = TrackerCoreContainer::initialize(core_config); + + // UDP stats + let (udp_stats_event_sender, _udp_stats_repository) = statistics::setup::factory(core_config.tracker_usage_statistics); + let udp_stats_event_sender = Arc::new(udp_stats_event_sender); + + let ban_service = Arc::new(RwLock::new(BanService::new(MAX_CONNECTION_ID_ERRORS_PER_IP))); + + Arc::new(UdpTrackerContainer { + udp_tracker_config: udp_tracker_config.clone(), + core_config: core_config.clone(), + announce_handler: tracker_core_container.announce_handler.clone(), + scrape_handler: tracker_core_container.scrape_handler.clone(), + whitelist_authorization: tracker_core_container.whitelist_authorization.clone(), + udp_stats_event_sender: udp_stats_event_sender.clone(), + ban_service: ban_service.clone(), + }) +} diff --git a/packages/udp-tracker-core/src/lib.rs b/packages/udp-tracker-core/src/lib.rs index 8283e08c5..f649cbeaf 100644 --- a/packages/udp-tracker-core/src/lib.rs +++ b/packages/udp-tracker-core/src/lib.rs @@ -1,4 +1,5 @@ pub mod connection_cookie; +pub mod container; pub mod crypto; pub mod services; pub mod statistics; diff --git a/src/bootstrap/jobs/udp_tracker.rs b/src/bootstrap/jobs/udp_tracker.rs index c97e239ce..8a2fc4412 100644 --- a/src/bootstrap/jobs/udp_tracker.rs +++ b/src/bootstrap/jobs/udp_tracker.rs @@ -8,12 +8,12 @@ //! > for the configuration options. use std::sync::Arc; +use bittorrent_udp_tracker_core::container::UdpTrackerContainer; use bittorrent_udp_tracker_core::UDP_TRACKER_LOG_TARGET; use tokio::task::JoinHandle; use torrust_server_lib::registar::ServiceRegistrationForm; use tracing::instrument; -use crate::container::UdpTrackerContainer; use crate::servers::udp::server::spawner::Spawner; use crate::servers::udp::server::Server; diff --git a/src/container.rs b/src/container.rs index b8e2c5d9a..ee26247f7 100644 --- a/src/container.rs +++ b/src/container.rs @@ -13,6 +13,7 @@ use bittorrent_tracker_core::torrent::repository::persisted::DatabasePersistentT use bittorrent_tracker_core::whitelist; use bittorrent_tracker_core::whitelist::manager::WhitelistManager; use bittorrent_tracker_core::whitelist::repository::in_memory::InMemoryWhitelist; +use bittorrent_udp_tracker_core::container::UdpTrackerContainer; use bittorrent_udp_tracker_core::services::banning::BanService; use bittorrent_udp_tracker_core::{self, MAX_CONNECTION_ID_ERRORS_PER_IP}; use tokio::sync::RwLock; @@ -33,9 +34,11 @@ pub struct AppContainer { pub in_memory_torrent_repository: Arc, pub db_torrent_repository: Arc, pub torrents_manager: Arc, + // UDP Tracker Core Services pub ban_service: Arc>, pub udp_stats_event_sender: Arc>>, + // HTTP Tracker Core Services pub http_stats_event_sender: Arc>>, pub http_stats_repository: Arc, @@ -86,16 +89,6 @@ impl AppContainer { } } -pub struct UdpTrackerContainer { - pub core_config: Arc, - pub udp_tracker_config: Arc, - pub announce_handler: Arc, - pub scrape_handler: Arc, - pub whitelist_authorization: Arc, - pub udp_stats_event_sender: Arc>>, - pub ban_service: Arc>, -} - pub struct HttpApiContainer { pub core_config: Arc, pub http_api_config: Arc, @@ -176,28 +169,3 @@ pub fn initialize_http_api_container(core_config: &Arc, http_api_config: & udp_stats_repository: udp_stats_repository.clone(), }) } - -#[must_use] -pub fn initialize_udt_tracker_container( - core_config: &Arc, - udp_tracker_config: &Arc, -) -> Arc { - let tracker_core_container = TrackerCoreContainer::initialize(core_config); - - // UDP stats - let (udp_stats_event_sender, _udp_stats_repository) = - bittorrent_udp_tracker_core::statistics::setup::factory(core_config.tracker_usage_statistics); - let udp_stats_event_sender = Arc::new(udp_stats_event_sender); - - let ban_service = Arc::new(RwLock::new(BanService::new(MAX_CONNECTION_ID_ERRORS_PER_IP))); - - Arc::new(UdpTrackerContainer { - udp_tracker_config: udp_tracker_config.clone(), - core_config: core_config.clone(), - announce_handler: tracker_core_container.announce_handler.clone(), - scrape_handler: tracker_core_container.scrape_handler.clone(), - whitelist_authorization: tracker_core_container.whitelist_authorization.clone(), - udp_stats_event_sender: udp_stats_event_sender.clone(), - ban_service: ban_service.clone(), - }) -} diff --git a/src/servers/udp/handlers/mod.rs b/src/servers/udp/handlers/mod.rs index 3d378b525..08959be35 100644 --- a/src/servers/udp/handlers/mod.rs +++ b/src/servers/udp/handlers/mod.rs @@ -11,6 +11,7 @@ use std::time::Instant; use announce::handle_announce; use aquatic_udp_protocol::{Request, Response, TransactionId}; +use bittorrent_udp_tracker_core::container::UdpTrackerContainer; use bittorrent_udp_tracker_core::services::announce::UdpAnnounceError; use connect::handle_connect; use error::handle_error; @@ -20,7 +21,6 @@ use tracing::{instrument, Level}; use uuid::Uuid; use super::RawRequest; -use crate::container::UdpTrackerContainer; use crate::servers::udp::error::Error; use crate::shared::bit_torrent::common::MAX_SCRAPE_TORRENTS; use crate::CurrentClock; diff --git a/src/servers/udp/server/launcher.rs b/src/servers/udp/server/launcher.rs index dbf0d5693..d180d121c 100644 --- a/src/servers/udp/server/launcher.rs +++ b/src/servers/udp/server/launcher.rs @@ -3,6 +3,7 @@ use std::sync::Arc; use std::time::Duration; use bittorrent_tracker_client::udp::client::check; +use bittorrent_udp_tracker_core::container::UdpTrackerContainer; use bittorrent_udp_tracker_core::{self, statistics, UDP_TRACKER_LOG_TARGET}; use derive_more::Constructor; use futures_util::StreamExt; @@ -15,7 +16,6 @@ use torrust_server_lib::signals::{shutdown_signal_with_message, Halted, Started} use tracing::instrument; use super::request_buffer::ActiveRequests; -use crate::container::UdpTrackerContainer; use crate::servers::udp::server::bound_socket::BoundSocket; use crate::servers::udp::server::processor::Processor; use crate::servers::udp::server::receiver::Receiver; diff --git a/src/servers/udp/server/mod.rs b/src/servers/udp/server/mod.rs index 8e14bc8be..e4b3297c7 100644 --- a/src/servers/udp/server/mod.rs +++ b/src/servers/udp/server/mod.rs @@ -57,13 +57,13 @@ mod tests { use std::sync::Arc; use std::time::Duration; + use bittorrent_udp_tracker_core::container::initialize_udt_tracker_container; use torrust_server_lib::registar::Registar; use torrust_tracker_test_helpers::configuration::ephemeral_public; use super::spawner::Spawner; use super::Server; use crate::bootstrap::app::initialize_global_services; - use crate::container::initialize_udt_tracker_container; #[tokio::test] async fn it_should_be_able_to_start_and_stop() { diff --git a/src/servers/udp/server/processor.rs b/src/servers/udp/server/processor.rs index af4c68770..21679a6fc 100644 --- a/src/servers/udp/server/processor.rs +++ b/src/servers/udp/server/processor.rs @@ -4,12 +4,12 @@ use std::sync::Arc; use std::time::Duration; use aquatic_udp_protocol::Response; +use bittorrent_udp_tracker_core::container::UdpTrackerContainer; use bittorrent_udp_tracker_core::{self, statistics}; use tokio::time::Instant; use tracing::{instrument, Level}; use super::bound_socket::BoundSocket; -use crate::container::UdpTrackerContainer; use crate::servers::udp::handlers::CookieTimeValues; use crate::servers::udp::{handlers, RawRequest}; diff --git a/src/servers/udp/server/spawner.rs b/src/servers/udp/server/spawner.rs index fd85a57c9..30acd36e9 100644 --- a/src/servers/udp/server/spawner.rs +++ b/src/servers/udp/server/spawner.rs @@ -3,6 +3,7 @@ use std::net::SocketAddr; use std::sync::Arc; use std::time::Duration; +use bittorrent_udp_tracker_core::container::UdpTrackerContainer; use derive_more::derive::Display; use derive_more::Constructor; use tokio::sync::oneshot; @@ -10,7 +11,6 @@ use tokio::task::JoinHandle; use torrust_server_lib::signals::{Halted, Started}; use super::launcher::Launcher; -use crate::container::UdpTrackerContainer; #[derive(Constructor, Copy, Clone, Debug, Display)] #[display("(with socket): {bind_to}")] diff --git a/src/servers/udp/server/states.rs b/src/servers/udp/server/states.rs index ae499acf7..4923d90b0 100644 --- a/src/servers/udp/server/states.rs +++ b/src/servers/udp/server/states.rs @@ -3,6 +3,7 @@ use std::net::SocketAddr; use std::sync::Arc; use std::time::Duration; +use bittorrent_udp_tracker_core::container::UdpTrackerContainer; use bittorrent_udp_tracker_core::UDP_TRACKER_LOG_TARGET; use derive_more::derive::Display; use derive_more::Constructor; @@ -13,7 +14,6 @@ use tracing::{instrument, Level}; use super::spawner::Spawner; use super::{Server, UdpError}; -use crate::container::UdpTrackerContainer; use crate::servers::udp::server::launcher::Launcher; /// A UDP server instance controller with no UDP instance running. diff --git a/tests/servers/udp/environment.rs b/tests/servers/udp/environment.rs index 241623732..e3d354ee4 100644 --- a/tests/servers/udp/environment.rs +++ b/tests/servers/udp/environment.rs @@ -10,13 +10,13 @@ use bittorrent_tracker_core::torrent::repository::in_memory::InMemoryTorrentRepo use bittorrent_tracker_core::torrent::repository::persisted::DatabasePersistentTorrentRepository; use bittorrent_tracker_core::whitelist::authorization::WhitelistAuthorization; use bittorrent_tracker_core::whitelist::repository::in_memory::InMemoryWhitelist; +use bittorrent_udp_tracker_core::container::UdpTrackerContainer; use bittorrent_udp_tracker_core::services::banning::BanService; use bittorrent_udp_tracker_core::{statistics, MAX_CONNECTION_ID_ERRORS_PER_IP}; use tokio::sync::RwLock; use torrust_server_lib::registar::Registar; use torrust_tracker_configuration::{Configuration, Core, UdpTracker, DEFAULT_TIMEOUT}; use torrust_tracker_lib::bootstrap::app::initialize_global_services; -use torrust_tracker_lib::container::UdpTrackerContainer; use torrust_tracker_lib::servers::udp::server::spawner::Spawner; use torrust_tracker_lib::servers::udp::server::states::{Running, Stopped}; use torrust_tracker_lib::servers::udp::server::Server; From 5c1fd97093f82d347972b93e72c76a7eff772a4a Mon Sep 17 00:00:00 2001 From: Jose Celano Date: Fri, 21 Feb 2025 16:17:15 +0000 Subject: [PATCH 309/802] refactor: [#1298] rename struct to UdpTrackerCoreContainer --- packages/udp-tracker-core/src/container.rs | 7 +++---- src/bootstrap/jobs/udp_tracker.rs | 4 ++-- src/container.rs | 6 +++--- src/servers/udp/handlers/mod.rs | 6 +++--- src/servers/udp/server/launcher.rs | 6 +++--- src/servers/udp/server/processor.rs | 6 +++--- src/servers/udp/server/spawner.rs | 4 ++-- src/servers/udp/server/states.rs | 4 ++-- tests/servers/udp/environment.rs | 22 +++++++++++----------- 9 files changed, 32 insertions(+), 33 deletions(-) diff --git a/packages/udp-tracker-core/src/container.rs b/packages/udp-tracker-core/src/container.rs index 51ae6e40c..e7b01835b 100644 --- a/packages/udp-tracker-core/src/container.rs +++ b/packages/udp-tracker-core/src/container.rs @@ -10,7 +10,7 @@ use torrust_tracker_configuration::{Core, UdpTracker}; use crate::services::banning::BanService; use crate::{statistics, MAX_CONNECTION_ID_ERRORS_PER_IP}; -pub struct UdpTrackerContainer { +pub struct UdpTrackerCoreContainer { pub core_config: Arc, pub udp_tracker_config: Arc, pub announce_handler: Arc, @@ -24,16 +24,15 @@ pub struct UdpTrackerContainer { pub fn initialize_udt_tracker_container( core_config: &Arc, udp_tracker_config: &Arc, -) -> Arc { +) -> Arc { let tracker_core_container = TrackerCoreContainer::initialize(core_config); - // UDP stats let (udp_stats_event_sender, _udp_stats_repository) = statistics::setup::factory(core_config.tracker_usage_statistics); let udp_stats_event_sender = Arc::new(udp_stats_event_sender); let ban_service = Arc::new(RwLock::new(BanService::new(MAX_CONNECTION_ID_ERRORS_PER_IP))); - Arc::new(UdpTrackerContainer { + Arc::new(UdpTrackerCoreContainer { udp_tracker_config: udp_tracker_config.clone(), core_config: core_config.clone(), announce_handler: tracker_core_container.announce_handler.clone(), diff --git a/src/bootstrap/jobs/udp_tracker.rs b/src/bootstrap/jobs/udp_tracker.rs index 8a2fc4412..89b2a38be 100644 --- a/src/bootstrap/jobs/udp_tracker.rs +++ b/src/bootstrap/jobs/udp_tracker.rs @@ -8,7 +8,7 @@ //! > for the configuration options. use std::sync::Arc; -use bittorrent_udp_tracker_core::container::UdpTrackerContainer; +use bittorrent_udp_tracker_core::container::UdpTrackerCoreContainer; use bittorrent_udp_tracker_core::UDP_TRACKER_LOG_TARGET; use tokio::task::JoinHandle; use torrust_server_lib::registar::ServiceRegistrationForm; @@ -29,7 +29,7 @@ use crate::servers::udp::server::Server; #[must_use] #[allow(clippy::async_yields_async)] #[instrument(skip(udp_tracker_container, form))] -pub async fn start_job(udp_tracker_container: Arc, form: ServiceRegistrationForm) -> JoinHandle<()> { +pub async fn start_job(udp_tracker_container: Arc, form: ServiceRegistrationForm) -> JoinHandle<()> { let bind_to = udp_tracker_container.udp_tracker_config.bind_address; let cookie_lifetime = udp_tracker_container.udp_tracker_config.cookie_lifetime; diff --git a/src/container.rs b/src/container.rs index ee26247f7..921c1624f 100644 --- a/src/container.rs +++ b/src/container.rs @@ -13,7 +13,7 @@ use bittorrent_tracker_core::torrent::repository::persisted::DatabasePersistentT use bittorrent_tracker_core::whitelist; use bittorrent_tracker_core::whitelist::manager::WhitelistManager; use bittorrent_tracker_core::whitelist::repository::in_memory::InMemoryWhitelist; -use bittorrent_udp_tracker_core::container::UdpTrackerContainer; +use bittorrent_udp_tracker_core::container::UdpTrackerCoreContainer; use bittorrent_udp_tracker_core::services::banning::BanService; use bittorrent_udp_tracker_core::{self, MAX_CONNECTION_ID_ERRORS_PER_IP}; use tokio::sync::RwLock; @@ -62,8 +62,8 @@ impl AppContainer { } #[must_use] - pub fn udp_tracker_container(&self, udp_tracker_config: &Arc) -> UdpTrackerContainer { - UdpTrackerContainer { + pub fn udp_tracker_container(&self, udp_tracker_config: &Arc) -> UdpTrackerCoreContainer { + UdpTrackerCoreContainer { udp_tracker_config: udp_tracker_config.clone(), core_config: self.core_config.clone(), announce_handler: self.announce_handler.clone(), diff --git a/src/servers/udp/handlers/mod.rs b/src/servers/udp/handlers/mod.rs index 08959be35..bc876bced 100644 --- a/src/servers/udp/handlers/mod.rs +++ b/src/servers/udp/handlers/mod.rs @@ -11,7 +11,7 @@ use std::time::Instant; use announce::handle_announce; use aquatic_udp_protocol::{Request, Response, TransactionId}; -use bittorrent_udp_tracker_core::container::UdpTrackerContainer; +use bittorrent_udp_tracker_core::container::UdpTrackerCoreContainer; use bittorrent_udp_tracker_core::services::announce::UdpAnnounceError; use connect::handle_connect; use error::handle_error; @@ -55,7 +55,7 @@ impl CookieTimeValues { #[instrument(fields(request_id), skip(udp_request, udp_tracker_container, cookie_time_values), ret(level = Level::TRACE))] pub(crate) async fn handle_packet( udp_request: RawRequest, - udp_tracker_container: Arc, + udp_tracker_container: Arc, local_addr: SocketAddr, cookie_time_values: CookieTimeValues, ) -> Response { @@ -128,7 +128,7 @@ pub(crate) async fn handle_packet( pub async fn handle_request( request: Request, remote_addr: SocketAddr, - udp_tracker_container: Arc, + udp_tracker_container: Arc, cookie_time_values: CookieTimeValues, ) -> Result { tracing::trace!("handle request"); diff --git a/src/servers/udp/server/launcher.rs b/src/servers/udp/server/launcher.rs index d180d121c..d66ad8d37 100644 --- a/src/servers/udp/server/launcher.rs +++ b/src/servers/udp/server/launcher.rs @@ -3,7 +3,7 @@ use std::sync::Arc; use std::time::Duration; use bittorrent_tracker_client::udp::client::check; -use bittorrent_udp_tracker_core::container::UdpTrackerContainer; +use bittorrent_udp_tracker_core::container::UdpTrackerCoreContainer; use bittorrent_udp_tracker_core::{self, statistics, UDP_TRACKER_LOG_TARGET}; use derive_more::Constructor; use futures_util::StreamExt; @@ -36,7 +36,7 @@ impl Launcher { /// It panics if the udp server is loaded when the tracker is private. #[instrument(skip(udp_tracker_container, bind_to, tx_start, rx_halt))] pub async fn run_with_graceful_shutdown( - udp_tracker_container: Arc, + udp_tracker_container: Arc, bind_to: SocketAddr, cookie_lifetime: Duration, tx_start: oneshot::Sender, @@ -114,7 +114,7 @@ impl Launcher { #[instrument(skip(receiver, udp_tracker_container))] async fn run_udp_server_main( mut receiver: Receiver, - udp_tracker_container: Arc, + udp_tracker_container: Arc, cookie_lifetime: Duration, ) { let active_requests = &mut ActiveRequests::default(); diff --git a/src/servers/udp/server/processor.rs b/src/servers/udp/server/processor.rs index 21679a6fc..157f3ecfe 100644 --- a/src/servers/udp/server/processor.rs +++ b/src/servers/udp/server/processor.rs @@ -4,7 +4,7 @@ use std::sync::Arc; use std::time::Duration; use aquatic_udp_protocol::Response; -use bittorrent_udp_tracker_core::container::UdpTrackerContainer; +use bittorrent_udp_tracker_core::container::UdpTrackerCoreContainer; use bittorrent_udp_tracker_core::{self, statistics}; use tokio::time::Instant; use tracing::{instrument, Level}; @@ -15,12 +15,12 @@ use crate::servers::udp::{handlers, RawRequest}; pub struct Processor { socket: Arc, - udp_tracker_container: Arc, + udp_tracker_container: Arc, cookie_lifetime: f64, } impl Processor { - pub fn new(socket: Arc, udp_tracker_container: Arc, cookie_lifetime: f64) -> Self { + pub fn new(socket: Arc, udp_tracker_container: Arc, cookie_lifetime: f64) -> Self { Self { socket, udp_tracker_container, diff --git a/src/servers/udp/server/spawner.rs b/src/servers/udp/server/spawner.rs index 30acd36e9..6c1f9a48e 100644 --- a/src/servers/udp/server/spawner.rs +++ b/src/servers/udp/server/spawner.rs @@ -3,7 +3,7 @@ use std::net::SocketAddr; use std::sync::Arc; use std::time::Duration; -use bittorrent_udp_tracker_core::container::UdpTrackerContainer; +use bittorrent_udp_tracker_core::container::UdpTrackerCoreContainer; use derive_more::derive::Display; use derive_more::Constructor; use tokio::sync::oneshot; @@ -27,7 +27,7 @@ impl Spawner { #[must_use] pub fn spawn_launcher( &self, - udp_tracker_container: Arc, + udp_tracker_container: Arc, cookie_lifetime: Duration, tx_start: oneshot::Sender, rx_halt: oneshot::Receiver, diff --git a/src/servers/udp/server/states.rs b/src/servers/udp/server/states.rs index 4923d90b0..3501aebf1 100644 --- a/src/servers/udp/server/states.rs +++ b/src/servers/udp/server/states.rs @@ -3,7 +3,7 @@ use std::net::SocketAddr; use std::sync::Arc; use std::time::Duration; -use bittorrent_udp_tracker_core::container::UdpTrackerContainer; +use bittorrent_udp_tracker_core::container::UdpTrackerCoreContainer; use bittorrent_udp_tracker_core::UDP_TRACKER_LOG_TARGET; use derive_more::derive::Display; use derive_more::Constructor; @@ -63,7 +63,7 @@ impl Server { #[instrument(skip(self, udp_tracker_container, form), err, ret(Display, level = Level::INFO))] pub async fn start( self, - udp_tracker_container: Arc, + udp_tracker_container: Arc, form: ServiceRegistrationForm, cookie_lifetime: Duration, ) -> Result, std::io::Error> { diff --git a/tests/servers/udp/environment.rs b/tests/servers/udp/environment.rs index e3d354ee4..47260fedb 100644 --- a/tests/servers/udp/environment.rs +++ b/tests/servers/udp/environment.rs @@ -10,7 +10,7 @@ use bittorrent_tracker_core::torrent::repository::in_memory::InMemoryTorrentRepo use bittorrent_tracker_core::torrent::repository::persisted::DatabasePersistentTorrentRepository; use bittorrent_tracker_core::whitelist::authorization::WhitelistAuthorization; use bittorrent_tracker_core::whitelist::repository::in_memory::InMemoryWhitelist; -use bittorrent_udp_tracker_core::container::UdpTrackerContainer; +use bittorrent_udp_tracker_core::container::UdpTrackerCoreContainer; use bittorrent_udp_tracker_core::services::banning::BanService; use bittorrent_udp_tracker_core::{statistics, MAX_CONNECTION_ID_ERRORS_PER_IP}; use tokio::sync::RwLock; @@ -26,7 +26,7 @@ pub struct Environment where S: std::fmt::Debug + std::fmt::Display, { - pub udp_tracker_container: Arc, + pub udp_tracker_container: Arc, pub database: Arc>, pub in_memory_torrent_repository: Arc, @@ -58,14 +58,14 @@ impl Environment { let server = Server::new(Spawner::new(bind_to)); - let udp_tracker_container = Arc::new(UdpTrackerContainer { + let udp_tracker_container = Arc::new(UdpTrackerCoreContainer { udp_tracker_config: env_container.udp_tracker_config.clone(), core_config: env_container.core_config.clone(), - announce_handler: env_container.udp_tracker_container.announce_handler.clone(), - scrape_handler: env_container.udp_tracker_container.scrape_handler.clone(), - whitelist_authorization: env_container.udp_tracker_container.whitelist_authorization.clone(), - udp_stats_event_sender: env_container.udp_tracker_container.udp_stats_event_sender.clone(), - ban_service: env_container.udp_tracker_container.ban_service.clone(), + announce_handler: env_container.udp_tracker_core_container.announce_handler.clone(), + scrape_handler: env_container.udp_tracker_core_container.scrape_handler.clone(), + whitelist_authorization: env_container.udp_tracker_core_container.whitelist_authorization.clone(), + udp_stats_event_sender: env_container.udp_tracker_core_container.udp_stats_event_sender.clone(), + ban_service: env_container.udp_tracker_core_container.ban_service.clone(), }); Self { @@ -134,7 +134,7 @@ impl Environment { pub struct EnvContainer { pub core_config: Arc, pub udp_tracker_config: Arc, - pub udp_tracker_container: Arc, + pub udp_tracker_core_container: Arc, pub database: Arc>, pub in_memory_torrent_repository: Arc, @@ -169,7 +169,7 @@ impl EnvContainer { let scrape_handler = Arc::new(ScrapeHandler::new(&whitelist_authorization, &in_memory_torrent_repository)); - let udp_tracker_container = Arc::new(UdpTrackerContainer { + let udp_tracker_container = Arc::new(UdpTrackerCoreContainer { udp_tracker_config: udp_tracker_config.clone(), core_config: core_config.clone(), announce_handler: announce_handler.clone(), @@ -182,7 +182,7 @@ impl EnvContainer { Self { core_config, udp_tracker_config, - udp_tracker_container, + udp_tracker_core_container: udp_tracker_container, database, in_memory_torrent_repository, From 292a7ebe105d5d5339e7bfd1b8dc7451768048f6 Mon Sep 17 00:00:00 2001 From: Jose Celano Date: Fri, 21 Feb 2025 16:28:03 +0000 Subject: [PATCH 310/802] refactor: [#1298] convert fn into static method --- packages/udp-tracker-core/src/container.rs | 57 +++++++++++++--------- src/container.rs | 4 +- src/servers/udp/server/mod.rs | 6 +-- tests/servers/udp/environment.rs | 8 ++- 4 files changed, 47 insertions(+), 28 deletions(-) diff --git a/packages/udp-tracker-core/src/container.rs b/packages/udp-tracker-core/src/container.rs index e7b01835b..62378e0af 100644 --- a/packages/udp-tracker-core/src/container.rs +++ b/packages/udp-tracker-core/src/container.rs @@ -11,34 +11,47 @@ use crate::services::banning::BanService; use crate::{statistics, MAX_CONNECTION_ID_ERRORS_PER_IP}; pub struct UdpTrackerCoreContainer { + // todo: replace with TrackerCoreContainer pub core_config: Arc, - pub udp_tracker_config: Arc, pub announce_handler: Arc, pub scrape_handler: Arc, pub whitelist_authorization: Arc, + + pub udp_tracker_config: Arc, pub udp_stats_event_sender: Arc>>, + pub udp_stats_repository: Arc, pub ban_service: Arc>, } -#[must_use] -pub fn initialize_udt_tracker_container( - core_config: &Arc, - udp_tracker_config: &Arc, -) -> Arc { - let tracker_core_container = TrackerCoreContainer::initialize(core_config); - - let (udp_stats_event_sender, _udp_stats_repository) = statistics::setup::factory(core_config.tracker_usage_statistics); - let udp_stats_event_sender = Arc::new(udp_stats_event_sender); - - let ban_service = Arc::new(RwLock::new(BanService::new(MAX_CONNECTION_ID_ERRORS_PER_IP))); - - Arc::new(UdpTrackerCoreContainer { - udp_tracker_config: udp_tracker_config.clone(), - core_config: core_config.clone(), - announce_handler: tracker_core_container.announce_handler.clone(), - scrape_handler: tracker_core_container.scrape_handler.clone(), - whitelist_authorization: tracker_core_container.whitelist_authorization.clone(), - udp_stats_event_sender: udp_stats_event_sender.clone(), - ban_service: ban_service.clone(), - }) +impl UdpTrackerCoreContainer { + #[must_use] + pub fn initialize(core_config: &Arc, udp_tracker_config: &Arc) -> Arc { + let tracker_core_container = Arc::new(TrackerCoreContainer::initialize(core_config)); + Self::initialize_from(&tracker_core_container, udp_tracker_config) + } + + #[must_use] + pub fn initialize_from( + tracker_core_container: &Arc, + udp_tracker_config: &Arc, + ) -> Arc { + let (udp_stats_event_sender, udp_stats_repository) = + statistics::setup::factory(tracker_core_container.core_config.tracker_usage_statistics); + let udp_stats_event_sender = Arc::new(udp_stats_event_sender); + let udp_stats_repository = Arc::new(udp_stats_repository); + + let ban_service = Arc::new(RwLock::new(BanService::new(MAX_CONNECTION_ID_ERRORS_PER_IP))); + + Arc::new(UdpTrackerCoreContainer { + core_config: tracker_core_container.core_config.clone(), + announce_handler: tracker_core_container.announce_handler.clone(), + scrape_handler: tracker_core_container.scrape_handler.clone(), + whitelist_authorization: tracker_core_container.whitelist_authorization.clone(), + + udp_tracker_config: udp_tracker_config.clone(), + udp_stats_event_sender: udp_stats_event_sender.clone(), + udp_stats_repository: udp_stats_repository.clone(), + ban_service: ban_service.clone(), + }) + } } diff --git a/src/container.rs b/src/container.rs index 921c1624f..e61d070d6 100644 --- a/src/container.rs +++ b/src/container.rs @@ -64,12 +64,14 @@ impl AppContainer { #[must_use] pub fn udp_tracker_container(&self, udp_tracker_config: &Arc) -> UdpTrackerCoreContainer { UdpTrackerCoreContainer { - udp_tracker_config: udp_tracker_config.clone(), core_config: self.core_config.clone(), announce_handler: self.announce_handler.clone(), scrape_handler: self.scrape_handler.clone(), whitelist_authorization: self.whitelist_authorization.clone(), + + udp_tracker_config: udp_tracker_config.clone(), udp_stats_event_sender: self.udp_stats_event_sender.clone(), + udp_stats_repository: self.udp_stats_repository.clone(), ban_service: self.ban_service.clone(), } } diff --git a/src/servers/udp/server/mod.rs b/src/servers/udp/server/mod.rs index e4b3297c7..a328c45ce 100644 --- a/src/servers/udp/server/mod.rs +++ b/src/servers/udp/server/mod.rs @@ -57,7 +57,7 @@ mod tests { use std::sync::Arc; use std::time::Duration; - use bittorrent_udp_tracker_core::container::initialize_udt_tracker_container; + use bittorrent_udp_tracker_core::container::UdpTrackerCoreContainer; use torrust_server_lib::registar::Registar; use torrust_tracker_test_helpers::configuration::ephemeral_public; @@ -87,7 +87,7 @@ mod tests { let stopped = Server::new(Spawner::new(bind_to)); - let udp_tracker_container = initialize_udt_tracker_container(&core_config, &udp_tracker_config); + let udp_tracker_container = UdpTrackerCoreContainer::initialize(&core_config, &udp_tracker_config); let started = stopped .start(udp_tracker_container, register.give_form(), config.cookie_lifetime) @@ -121,7 +121,7 @@ mod tests { let stopped = Server::new(Spawner::new(bind_to)); - let udp_tracker_container = initialize_udt_tracker_container(&core_config, &udp_tracker_config); + let udp_tracker_container = UdpTrackerCoreContainer::initialize(&core_config, &udp_tracker_config); let started = stopped .start( diff --git a/tests/servers/udp/environment.rs b/tests/servers/udp/environment.rs index 47260fedb..0b2d1c79c 100644 --- a/tests/servers/udp/environment.rs +++ b/tests/servers/udp/environment.rs @@ -59,12 +59,14 @@ impl Environment { let server = Server::new(Spawner::new(bind_to)); let udp_tracker_container = Arc::new(UdpTrackerCoreContainer { - udp_tracker_config: env_container.udp_tracker_config.clone(), core_config: env_container.core_config.clone(), announce_handler: env_container.udp_tracker_core_container.announce_handler.clone(), scrape_handler: env_container.udp_tracker_core_container.scrape_handler.clone(), whitelist_authorization: env_container.udp_tracker_core_container.whitelist_authorization.clone(), + + udp_tracker_config: env_container.udp_tracker_config.clone(), udp_stats_event_sender: env_container.udp_tracker_core_container.udp_stats_event_sender.clone(), + udp_stats_repository: env_container.udp_tracker_core_container.udp_stats_repository.clone(), ban_service: env_container.udp_tracker_core_container.ban_service.clone(), }); @@ -170,12 +172,14 @@ impl EnvContainer { let scrape_handler = Arc::new(ScrapeHandler::new(&whitelist_authorization, &in_memory_torrent_repository)); let udp_tracker_container = Arc::new(UdpTrackerCoreContainer { - udp_tracker_config: udp_tracker_config.clone(), core_config: core_config.clone(), announce_handler: announce_handler.clone(), scrape_handler: scrape_handler.clone(), whitelist_authorization: whitelist_authorization.clone(), + + udp_tracker_config: udp_tracker_config.clone(), udp_stats_event_sender: udp_stats_event_sender.clone(), + udp_stats_repository: udp_stats_repository.clone(), ban_service: ban_service.clone(), }); From be05211b02b960974ca2ccb384beb8b809cbc0bb Mon Sep 17 00:00:00 2001 From: Jose Celano Date: Fri, 21 Feb 2025 16:37:45 +0000 Subject: [PATCH 311/802] refactor: [#1298] remove duplicate code --- tests/servers/udp/contract.rs | 18 ++++- tests/servers/udp/environment.rs | 123 ++++++------------------------- 2 files changed, 39 insertions(+), 102 deletions(-) diff --git a/tests/servers/udp/contract.rs b/tests/servers/udp/contract.rs index f6e0589f8..f0f647443 100644 --- a/tests/servers/udp/contract.rs +++ b/tests/servers/udp/contract.rs @@ -229,7 +229,7 @@ mod receiving_an_announce_request { logging::setup(); let env = Started::new(&configuration::ephemeral().into()).await; - let ban_service = env.udp_tracker_container.ban_service.clone(); + let ban_service = env.container.udp_tracker_core_container.ban_service.clone(); let client = match UdpTrackerClient::new(env.bind_address(), DEFAULT_TIMEOUT).await { Ok(udp_tracker_client) => udp_tracker_client, @@ -270,7 +270,13 @@ mod receiving_an_announce_request { info_hash, ); - let udp_requests_banned_before = env.udp_stats_repository.get_stats().await.udp_requests_banned; + let udp_requests_banned_before = env + .container + .udp_tracker_core_container + .udp_stats_repository + .get_stats() + .await + .udp_requests_banned; // This should return a timeout error match client.send(announce_request.into()).await { @@ -280,7 +286,13 @@ mod receiving_an_announce_request { assert!(client.receive().await.is_err()); - let udp_requests_banned_after = env.udp_stats_repository.get_stats().await.udp_requests_banned; + let udp_requests_banned_after = env + .container + .udp_tracker_core_container + .udp_stats_repository + .get_stats() + .await + .udp_requests_banned; let udp_banned_ips_total_after = ban_service.read().await.get_banned_ips_total(); // UDP counter for banned requests should be increased by 1 diff --git a/tests/servers/udp/environment.rs b/tests/servers/udp/environment.rs index 0b2d1c79c..8afaffc15 100644 --- a/tests/servers/udp/environment.rs +++ b/tests/servers/udp/environment.rs @@ -2,20 +2,10 @@ use std::net::SocketAddr; use std::sync::Arc; use bittorrent_primitives::info_hash::InfoHash; -use bittorrent_tracker_core::announce_handler::AnnounceHandler; -use bittorrent_tracker_core::databases::setup::initialize_database; -use bittorrent_tracker_core::databases::Database; -use bittorrent_tracker_core::scrape_handler::ScrapeHandler; -use bittorrent_tracker_core::torrent::repository::in_memory::InMemoryTorrentRepository; -use bittorrent_tracker_core::torrent::repository::persisted::DatabasePersistentTorrentRepository; -use bittorrent_tracker_core::whitelist::authorization::WhitelistAuthorization; -use bittorrent_tracker_core::whitelist::repository::in_memory::InMemoryWhitelist; +use bittorrent_tracker_core::container::TrackerCoreContainer; use bittorrent_udp_tracker_core::container::UdpTrackerCoreContainer; -use bittorrent_udp_tracker_core::services::banning::BanService; -use bittorrent_udp_tracker_core::{statistics, MAX_CONNECTION_ID_ERRORS_PER_IP}; -use tokio::sync::RwLock; use torrust_server_lib::registar::Registar; -use torrust_tracker_configuration::{Configuration, Core, UdpTracker, DEFAULT_TIMEOUT}; +use torrust_tracker_configuration::{Configuration, DEFAULT_TIMEOUT}; use torrust_tracker_lib::bootstrap::app::initialize_global_services; use torrust_tracker_lib::servers::udp::server::spawner::Spawner; use torrust_tracker_lib::servers::udp::server::states::{Running, Stopped}; @@ -26,12 +16,7 @@ pub struct Environment where S: std::fmt::Debug + std::fmt::Display, { - pub udp_tracker_container: Arc, - - pub database: Arc>, - pub in_memory_torrent_repository: Arc, - pub udp_stats_repository: Arc, - + pub container: Arc, pub registar: Registar, pub server: Server, } @@ -43,7 +28,11 @@ where /// Add a torrent to the tracker #[allow(dead_code)] pub fn add_torrent(&self, info_hash: &InfoHash, peer: &peer::Peer) { - let () = self.in_memory_torrent_repository.upsert_peer(info_hash, peer); + let () = self + .container + .tracker_core_container + .in_memory_torrent_repository + .upsert_peer(info_hash, peer); } } @@ -52,31 +41,14 @@ impl Environment { pub fn new(configuration: &Arc) -> Self { initialize_global_services(configuration); - let env_container = EnvContainer::initialize(configuration); + let container = Arc::new(EnvContainer::initialize(configuration)); - let bind_to = env_container.udp_tracker_config.bind_address; + let bind_to = container.udp_tracker_core_container.udp_tracker_config.bind_address; let server = Server::new(Spawner::new(bind_to)); - let udp_tracker_container = Arc::new(UdpTrackerCoreContainer { - core_config: env_container.core_config.clone(), - announce_handler: env_container.udp_tracker_core_container.announce_handler.clone(), - scrape_handler: env_container.udp_tracker_core_container.scrape_handler.clone(), - whitelist_authorization: env_container.udp_tracker_core_container.whitelist_authorization.clone(), - - udp_tracker_config: env_container.udp_tracker_config.clone(), - udp_stats_event_sender: env_container.udp_tracker_core_container.udp_stats_event_sender.clone(), - udp_stats_repository: env_container.udp_tracker_core_container.udp_stats_repository.clone(), - ban_service: env_container.udp_tracker_core_container.ban_service.clone(), - }); - Self { - udp_tracker_container, - - database: env_container.database.clone(), - in_memory_torrent_repository: env_container.in_memory_torrent_repository.clone(), - udp_stats_repository: env_container.udp_stats_repository.clone(), - + container, registar: Registar::default(), server, } @@ -84,19 +56,18 @@ impl Environment { #[allow(dead_code)] pub async fn start(self) -> Environment { - let cookie_lifetime = self.udp_tracker_container.udp_tracker_config.cookie_lifetime; + let cookie_lifetime = self.container.udp_tracker_core_container.udp_tracker_config.cookie_lifetime; Environment { - udp_tracker_container: self.udp_tracker_container.clone(), - - database: self.database.clone(), - in_memory_torrent_repository: self.in_memory_torrent_repository.clone(), - udp_stats_repository: self.udp_stats_repository.clone(), - + container: self.container.clone(), registar: self.registar.clone(), server: self .server - .start(self.udp_tracker_container, self.registar.give_form(), cookie_lifetime) + .start( + self.container.udp_tracker_core_container.clone(), + self.registar.give_form(), + cookie_lifetime, + ) .await .unwrap(), } @@ -117,12 +88,7 @@ impl Environment { .expect("it should stop the environment within the timeout"); Environment { - udp_tracker_container: self.udp_tracker_container, - - database: self.database, - in_memory_torrent_repository: self.in_memory_torrent_repository, - udp_stats_repository: self.udp_stats_repository, - + container: self.container, registar: Registar::default(), server: stopped.expect("it stop the udp tracker service"), } @@ -134,13 +100,8 @@ impl Environment { } pub struct EnvContainer { - pub core_config: Arc, - pub udp_tracker_config: Arc, + pub tracker_core_container: Arc, pub udp_tracker_core_container: Arc, - - pub database: Arc>, - pub in_memory_torrent_repository: Arc, - pub udp_stats_repository: Arc, } impl EnvContainer { @@ -149,48 +110,12 @@ impl EnvContainer { let udp_tracker_configurations = configuration.udp_trackers.clone().expect("missing UDP tracker configuration"); let udp_tracker_config = Arc::new(udp_tracker_configurations[0].clone()); - // UDP stats - let (udp_stats_event_sender, udp_stats_repository) = - bittorrent_udp_tracker_core::statistics::setup::factory(configuration.core.tracker_usage_statistics); - let udp_stats_event_sender = Arc::new(udp_stats_event_sender); - let udp_stats_repository = Arc::new(udp_stats_repository); - - let ban_service = Arc::new(RwLock::new(BanService::new(MAX_CONNECTION_ID_ERRORS_PER_IP))); - let database = initialize_database(&configuration.core); - let in_memory_whitelist = Arc::new(InMemoryWhitelist::default()); - let whitelist_authorization = Arc::new(WhitelistAuthorization::new(&configuration.core, &in_memory_whitelist.clone())); - let in_memory_torrent_repository = Arc::new(InMemoryTorrentRepository::default()); - let db_torrent_repository = Arc::new(DatabasePersistentTorrentRepository::new(&database)); - - let announce_handler = Arc::new(AnnounceHandler::new( - &configuration.core, - &whitelist_authorization, - &in_memory_torrent_repository, - &db_torrent_repository, - )); - - let scrape_handler = Arc::new(ScrapeHandler::new(&whitelist_authorization, &in_memory_torrent_repository)); - - let udp_tracker_container = Arc::new(UdpTrackerCoreContainer { - core_config: core_config.clone(), - announce_handler: announce_handler.clone(), - scrape_handler: scrape_handler.clone(), - whitelist_authorization: whitelist_authorization.clone(), - - udp_tracker_config: udp_tracker_config.clone(), - udp_stats_event_sender: udp_stats_event_sender.clone(), - udp_stats_repository: udp_stats_repository.clone(), - ban_service: ban_service.clone(), - }); + let tracker_core_container = Arc::new(TrackerCoreContainer::initialize(&core_config)); + let udp_tracker_core_container = UdpTrackerCoreContainer::initialize_from(&tracker_core_container, &udp_tracker_config); Self { - core_config, - udp_tracker_config, - udp_tracker_core_container: udp_tracker_container, - - database, - in_memory_torrent_repository, - udp_stats_repository, + tracker_core_container, + udp_tracker_core_container, } } } From c8ec781290a064f3c976ae4ef95542aed685fb65 Mon Sep 17 00:00:00 2001 From: Jose Celano Date: Fri, 21 Feb 2025 16:43:26 +0000 Subject: [PATCH 312/802] refactor: [#1298] move HttpApiContainer to tracker-api-core package --- packages/tracker-api-core/src/container.rs | 49 +++++++++++++++++++++ packages/tracker-api-core/src/lib.rs | 1 + src/bootstrap/jobs/tracker_apis.rs | 4 +- src/container.rs | 40 +---------------- src/servers/apis/routes.rs | 2 +- src/servers/apis/server.rs | 4 +- src/servers/apis/v1/context/stats/routes.rs | 2 +- src/servers/apis/v1/routes.rs | 2 +- tests/servers/api/environment.rs | 2 +- 9 files changed, 59 insertions(+), 47 deletions(-) create mode 100644 packages/tracker-api-core/src/container.rs diff --git a/packages/tracker-api-core/src/container.rs b/packages/tracker-api-core/src/container.rs new file mode 100644 index 000000000..9a45008cf --- /dev/null +++ b/packages/tracker-api-core/src/container.rs @@ -0,0 +1,49 @@ +use std::sync::Arc; + +use bittorrent_tracker_core::authentication::handler::KeysHandler; +use bittorrent_tracker_core::container::TrackerCoreContainer; +use bittorrent_tracker_core::torrent::repository::in_memory::InMemoryTorrentRepository; +use bittorrent_tracker_core::whitelist::manager::WhitelistManager; +use bittorrent_udp_tracker_core::services::banning::BanService; +use bittorrent_udp_tracker_core::{self, MAX_CONNECTION_ID_ERRORS_PER_IP}; +use tokio::sync::RwLock; +use torrust_tracker_configuration::{Core, HttpApi}; + +pub struct HttpApiContainer { + pub core_config: Arc, + pub http_api_config: Arc, + pub in_memory_torrent_repository: Arc, + pub keys_handler: Arc, + pub whitelist_manager: Arc, + pub ban_service: Arc>, + pub http_stats_repository: Arc, + pub udp_stats_repository: Arc, +} + +#[must_use] +pub fn initialize_http_api_container(core_config: &Arc, http_api_config: &Arc) -> Arc { + let tracker_core_container = TrackerCoreContainer::initialize(core_config); + + // HTTP stats + let (_http_stats_event_sender, http_stats_repository) = + bittorrent_http_tracker_core::statistics::setup::factory(core_config.tracker_usage_statistics); + let http_stats_repository = Arc::new(http_stats_repository); + + // UDP stats + let (_udp_stats_event_sender, udp_stats_repository) = + bittorrent_udp_tracker_core::statistics::setup::factory(core_config.tracker_usage_statistics); + let udp_stats_repository = Arc::new(udp_stats_repository); + + let ban_service = Arc::new(RwLock::new(BanService::new(MAX_CONNECTION_ID_ERRORS_PER_IP))); + + Arc::new(HttpApiContainer { + http_api_config: http_api_config.clone(), + core_config: core_config.clone(), + in_memory_torrent_repository: tracker_core_container.in_memory_torrent_repository.clone(), + keys_handler: tracker_core_container.keys_handler.clone(), + whitelist_manager: tracker_core_container.whitelist_manager.clone(), + ban_service: ban_service.clone(), + http_stats_repository: http_stats_repository.clone(), + udp_stats_repository: udp_stats_repository.clone(), + }) +} diff --git a/packages/tracker-api-core/src/lib.rs b/packages/tracker-api-core/src/lib.rs index 3449ec7b4..ddf1d9afd 100644 --- a/packages/tracker-api-core/src/lib.rs +++ b/packages/tracker-api-core/src/lib.rs @@ -1 +1,2 @@ +pub mod container; pub mod statistics; diff --git a/src/bootstrap/jobs/tracker_apis.rs b/src/bootstrap/jobs/tracker_apis.rs index 66152905a..fa32ce925 100644 --- a/src/bootstrap/jobs/tracker_apis.rs +++ b/src/bootstrap/jobs/tracker_apis.rs @@ -27,10 +27,10 @@ use axum_server::tls_rustls::RustlsConfig; use tokio::task::JoinHandle; use torrust_axum_server::tsl::make_rust_tls; use torrust_server_lib::registar::ServiceRegistrationForm; +use torrust_tracker_api_core::container::HttpApiContainer; use torrust_tracker_configuration::AccessTokens; use tracing::instrument; -use crate::container::HttpApiContainer; use crate::servers::apis::server::{ApiServer, Launcher}; use crate::servers::apis::Version; @@ -98,11 +98,11 @@ mod tests { use std::sync::Arc; use torrust_server_lib::registar::Registar; + use torrust_tracker_api_core::container::initialize_http_api_container; use torrust_tracker_test_helpers::configuration::ephemeral_public; use crate::bootstrap::app::initialize_global_services; use crate::bootstrap::jobs::tracker_apis::start_job; - use crate::container::initialize_http_api_container; use crate::servers::apis::Version; #[tokio::test] diff --git a/src/container.rs b/src/container.rs index e61d070d6..ed9688935 100644 --- a/src/container.rs +++ b/src/container.rs @@ -17,6 +17,7 @@ use bittorrent_udp_tracker_core::container::UdpTrackerCoreContainer; use bittorrent_udp_tracker_core::services::banning::BanService; use bittorrent_udp_tracker_core::{self, MAX_CONNECTION_ID_ERRORS_PER_IP}; use tokio::sync::RwLock; +use torrust_tracker_api_core::container::HttpApiContainer; use torrust_tracker_configuration::{Configuration, Core, HttpApi, HttpTracker, UdpTracker}; use tracing::instrument; @@ -91,17 +92,6 @@ impl AppContainer { } } -pub struct HttpApiContainer { - pub core_config: Arc, - pub http_api_config: Arc, - pub in_memory_torrent_repository: Arc, - pub keys_handler: Arc, - pub whitelist_manager: Arc, - pub ban_service: Arc>, - pub http_stats_repository: Arc, - pub udp_stats_repository: Arc, -} - /// It initializes the IoC Container. #[instrument(skip())] pub fn initialize_app_container(configuration: &Configuration) -> AppContainer { @@ -143,31 +133,3 @@ pub fn initialize_app_container(configuration: &Configuration) -> AppContainer { udp_stats_repository, } } - -#[must_use] -pub fn initialize_http_api_container(core_config: &Arc, http_api_config: &Arc) -> Arc { - let tracker_core_container = TrackerCoreContainer::initialize(core_config); - - // HTTP stats - let (_http_stats_event_sender, http_stats_repository) = - bittorrent_http_tracker_core::statistics::setup::factory(core_config.tracker_usage_statistics); - let http_stats_repository = Arc::new(http_stats_repository); - - // UDP stats - let (_udp_stats_event_sender, udp_stats_repository) = - bittorrent_udp_tracker_core::statistics::setup::factory(core_config.tracker_usage_statistics); - let udp_stats_repository = Arc::new(udp_stats_repository); - - let ban_service = Arc::new(RwLock::new(BanService::new(MAX_CONNECTION_ID_ERRORS_PER_IP))); - - Arc::new(HttpApiContainer { - http_api_config: http_api_config.clone(), - core_config: core_config.clone(), - in_memory_torrent_repository: tracker_core_container.in_memory_torrent_repository.clone(), - keys_handler: tracker_core_container.keys_handler.clone(), - whitelist_manager: tracker_core_container.whitelist_manager.clone(), - ban_service: ban_service.clone(), - http_stats_repository: http_stats_repository.clone(), - udp_stats_repository: udp_stats_repository.clone(), - }) -} diff --git a/src/servers/apis/routes.rs b/src/servers/apis/routes.rs index f21c59207..558d02913 100644 --- a/src/servers/apis/routes.rs +++ b/src/servers/apis/routes.rs @@ -16,6 +16,7 @@ use axum::routing::get; use axum::{middleware, BoxError, Router}; use hyper::{Request, StatusCode}; use torrust_server_lib::logging::Latency; +use torrust_tracker_api_core::container::HttpApiContainer; use torrust_tracker_configuration::{AccessTokens, DEFAULT_TIMEOUT}; use tower::timeout::TimeoutLayer; use tower::ServiceBuilder; @@ -30,7 +31,6 @@ use tracing::{instrument, Level, Span}; use super::v1; use super::v1::context::health_check::handlers::health_check_handler; use super::v1::middlewares::auth::State; -use crate::container::HttpApiContainer; use crate::servers::apis::API_LOG_TARGET; /// Add all API routes to the router. diff --git a/src/servers/apis/server.rs b/src/servers/apis/server.rs index 42f16ab77..637ec3b2b 100644 --- a/src/servers/apis/server.rs +++ b/src/servers/apis/server.rs @@ -38,11 +38,11 @@ use torrust_axum_server::signals::graceful_shutdown; use torrust_server_lib::logging::STARTED_ON; use torrust_server_lib::registar::{ServiceHealthCheckJob, ServiceRegistration, ServiceRegistrationForm}; use torrust_server_lib::signals::{Halted, Started}; +use torrust_tracker_api_core::container::HttpApiContainer; use torrust_tracker_configuration::AccessTokens; use tracing::{instrument, Level}; use super::routes::router; -use crate::container::HttpApiContainer; use crate::servers::apis::API_LOG_TARGET; /// Errors that can occur when starting or stopping the API server. @@ -296,10 +296,10 @@ mod tests { use torrust_axum_server::tsl::make_rust_tls; use torrust_server_lib::registar::Registar; + use torrust_tracker_api_core::container::initialize_http_api_container; use torrust_tracker_test_helpers::configuration::ephemeral_public; use crate::bootstrap::app::initialize_global_services; - use crate::container::initialize_http_api_container; use crate::servers::apis::server::{ApiServer, Launcher}; #[tokio::test] diff --git a/src/servers/apis/v1/context/stats/routes.rs b/src/servers/apis/v1/context/stats/routes.rs index e660005ec..aa723f7ec 100644 --- a/src/servers/apis/v1/context/stats/routes.rs +++ b/src/servers/apis/v1/context/stats/routes.rs @@ -7,9 +7,9 @@ use std::sync::Arc; use axum::routing::get; use axum::Router; +use torrust_tracker_api_core::container::HttpApiContainer; use super::handlers::get_stats_handler; -use crate::container::HttpApiContainer; /// It adds the routes to the router for the [`stats`](crate::servers::apis::v1::context::stats) API context. pub fn add(prefix: &str, router: Router, http_api_container: &Arc) -> Router { diff --git a/src/servers/apis/v1/routes.rs b/src/servers/apis/v1/routes.rs index e593cb140..7ea01c685 100644 --- a/src/servers/apis/v1/routes.rs +++ b/src/servers/apis/v1/routes.rs @@ -2,9 +2,9 @@ use std::sync::Arc; use axum::Router; +use torrust_tracker_api_core::container::HttpApiContainer; use super::context::{auth_key, stats, torrent, whitelist}; -use crate::container::HttpApiContainer; /// Add the routes for the v1 API. pub fn add(prefix: &str, router: Router, http_api_container: &Arc) -> Router { diff --git a/tests/servers/api/environment.rs b/tests/servers/api/environment.rs index 7cf088568..066ad6fff 100644 --- a/tests/servers/api/environment.rs +++ b/tests/servers/api/environment.rs @@ -18,9 +18,9 @@ use tokio::sync::RwLock; use torrust_axum_server::tsl::make_rust_tls; use torrust_server_lib::registar::Registar; use torrust_tracker_api_client::connection_info::{ConnectionInfo, Origin}; +use torrust_tracker_api_core::container::HttpApiContainer; use torrust_tracker_configuration::{Configuration, HttpApi}; use torrust_tracker_lib::bootstrap::app::initialize_global_services; -use torrust_tracker_lib::container::HttpApiContainer; use torrust_tracker_lib::servers::apis::server::{ApiServer, Launcher, Running, Stopped}; use torrust_tracker_primitives::peer; From 3052ebe2d4bb9e1fbbe4502ac4355ee99071c393 Mon Sep 17 00:00:00 2001 From: Jose Celano Date: Fri, 21 Feb 2025 16:48:31 +0000 Subject: [PATCH 313/802] refactor: [#1298] convert fn into static method --- packages/tracker-api-core/src/container.rs | 46 +++++++++++----------- src/bootstrap/jobs/tracker_apis.rs | 4 +- src/servers/apis/server.rs | 4 +- 3 files changed, 28 insertions(+), 26 deletions(-) diff --git a/packages/tracker-api-core/src/container.rs b/packages/tracker-api-core/src/container.rs index 9a45008cf..6dd2d80b1 100644 --- a/packages/tracker-api-core/src/container.rs +++ b/packages/tracker-api-core/src/container.rs @@ -20,30 +20,32 @@ pub struct HttpApiContainer { pub udp_stats_repository: Arc, } -#[must_use] -pub fn initialize_http_api_container(core_config: &Arc, http_api_config: &Arc) -> Arc { - let tracker_core_container = TrackerCoreContainer::initialize(core_config); +impl HttpApiContainer { + #[must_use] + pub fn initialize(core_config: &Arc, http_api_config: &Arc) -> Arc { + let tracker_core_container = TrackerCoreContainer::initialize(core_config); - // HTTP stats - let (_http_stats_event_sender, http_stats_repository) = - bittorrent_http_tracker_core::statistics::setup::factory(core_config.tracker_usage_statistics); - let http_stats_repository = Arc::new(http_stats_repository); + // HTTP stats + let (_http_stats_event_sender, http_stats_repository) = + bittorrent_http_tracker_core::statistics::setup::factory(core_config.tracker_usage_statistics); + let http_stats_repository = Arc::new(http_stats_repository); - // UDP stats - let (_udp_stats_event_sender, udp_stats_repository) = - bittorrent_udp_tracker_core::statistics::setup::factory(core_config.tracker_usage_statistics); - let udp_stats_repository = Arc::new(udp_stats_repository); + // UDP stats + let (_udp_stats_event_sender, udp_stats_repository) = + bittorrent_udp_tracker_core::statistics::setup::factory(core_config.tracker_usage_statistics); + let udp_stats_repository = Arc::new(udp_stats_repository); - let ban_service = Arc::new(RwLock::new(BanService::new(MAX_CONNECTION_ID_ERRORS_PER_IP))); + let ban_service = Arc::new(RwLock::new(BanService::new(MAX_CONNECTION_ID_ERRORS_PER_IP))); - Arc::new(HttpApiContainer { - http_api_config: http_api_config.clone(), - core_config: core_config.clone(), - in_memory_torrent_repository: tracker_core_container.in_memory_torrent_repository.clone(), - keys_handler: tracker_core_container.keys_handler.clone(), - whitelist_manager: tracker_core_container.whitelist_manager.clone(), - ban_service: ban_service.clone(), - http_stats_repository: http_stats_repository.clone(), - udp_stats_repository: udp_stats_repository.clone(), - }) + Arc::new(HttpApiContainer { + http_api_config: http_api_config.clone(), + core_config: core_config.clone(), + in_memory_torrent_repository: tracker_core_container.in_memory_torrent_repository.clone(), + keys_handler: tracker_core_container.keys_handler.clone(), + whitelist_manager: tracker_core_container.whitelist_manager.clone(), + ban_service: ban_service.clone(), + http_stats_repository: http_stats_repository.clone(), + udp_stats_repository: udp_stats_repository.clone(), + }) + } } diff --git a/src/bootstrap/jobs/tracker_apis.rs b/src/bootstrap/jobs/tracker_apis.rs index fa32ce925..82dcccb00 100644 --- a/src/bootstrap/jobs/tracker_apis.rs +++ b/src/bootstrap/jobs/tracker_apis.rs @@ -98,7 +98,7 @@ mod tests { use std::sync::Arc; use torrust_server_lib::registar::Registar; - use torrust_tracker_api_core::container::initialize_http_api_container; + use torrust_tracker_api_core::container::HttpApiContainer; use torrust_tracker_test_helpers::configuration::ephemeral_public; use crate::bootstrap::app::initialize_global_services; @@ -113,7 +113,7 @@ mod tests { initialize_global_services(&cfg); - let http_api_container = initialize_http_api_container(&core_config, &http_api_config); + let http_api_container = HttpApiContainer::initialize(&core_config, &http_api_config); let version = Version::V1; diff --git a/src/servers/apis/server.rs b/src/servers/apis/server.rs index 637ec3b2b..20e350d78 100644 --- a/src/servers/apis/server.rs +++ b/src/servers/apis/server.rs @@ -296,7 +296,7 @@ mod tests { use torrust_axum_server::tsl::make_rust_tls; use torrust_server_lib::registar::Registar; - use torrust_tracker_api_core::container::initialize_http_api_container; + use torrust_tracker_api_core::container::HttpApiContainer; use torrust_tracker_test_helpers::configuration::ephemeral_public; use crate::bootstrap::app::initialize_global_services; @@ -322,7 +322,7 @@ mod tests { let register = &Registar::default(); - let http_api_container = initialize_http_api_container(&core_config, &http_api_config); + let http_api_container = HttpApiContainer::initialize(&core_config, &http_api_config); let started = stopped .start(http_api_container, register.give_form(), access_tokens) From 3ce6fb21a4ae9d267d788193a4696c7add0276eb Mon Sep 17 00:00:00 2001 From: Jose Celano Date: Fri, 21 Feb 2025 16:57:43 +0000 Subject: [PATCH 314/802] refactor: [#1298] reorganize code --- packages/tracker-api-core/src/container.rs | 32 ++++++++++++++++------ 1 file changed, 24 insertions(+), 8 deletions(-) diff --git a/packages/tracker-api-core/src/container.rs b/packages/tracker-api-core/src/container.rs index 6dd2d80b1..505b5d7d8 100644 --- a/packages/tracker-api-core/src/container.rs +++ b/packages/tracker-api-core/src/container.rs @@ -10,42 +10,58 @@ use tokio::sync::RwLock; use torrust_tracker_configuration::{Core, HttpApi}; pub struct HttpApiContainer { + // todo: replace with TrackerCoreContainer pub core_config: Arc, - pub http_api_config: Arc, pub in_memory_torrent_repository: Arc, pub keys_handler: Arc, pub whitelist_manager: Arc, - pub ban_service: Arc>, + + // todo: replace with HttpTrackerCoreContainer pub http_stats_repository: Arc, + + // todo: replace with UdpTrackerCoreContainer + pub ban_service: Arc>, pub udp_stats_repository: Arc, + + pub http_api_config: Arc, } impl HttpApiContainer { #[must_use] pub fn initialize(core_config: &Arc, http_api_config: &Arc) -> Arc { - let tracker_core_container = TrackerCoreContainer::initialize(core_config); + let tracker_core_container = Arc::new(TrackerCoreContainer::initialize(core_config)); + Self::initialize_from(&tracker_core_container, http_api_config) + } + #[must_use] + pub fn initialize_from( + tracker_core_container: &Arc, + http_api_config: &Arc, + ) -> Arc { // HTTP stats let (_http_stats_event_sender, http_stats_repository) = - bittorrent_http_tracker_core::statistics::setup::factory(core_config.tracker_usage_statistics); + bittorrent_http_tracker_core::statistics::setup::factory(tracker_core_container.core_config.tracker_usage_statistics); let http_stats_repository = Arc::new(http_stats_repository); // UDP stats let (_udp_stats_event_sender, udp_stats_repository) = - bittorrent_udp_tracker_core::statistics::setup::factory(core_config.tracker_usage_statistics); + bittorrent_udp_tracker_core::statistics::setup::factory(tracker_core_container.core_config.tracker_usage_statistics); let udp_stats_repository = Arc::new(udp_stats_repository); let ban_service = Arc::new(RwLock::new(BanService::new(MAX_CONNECTION_ID_ERRORS_PER_IP))); Arc::new(HttpApiContainer { - http_api_config: http_api_config.clone(), - core_config: core_config.clone(), + core_config: tracker_core_container.core_config.clone(), in_memory_torrent_repository: tracker_core_container.in_memory_torrent_repository.clone(), keys_handler: tracker_core_container.keys_handler.clone(), whitelist_manager: tracker_core_container.whitelist_manager.clone(), - ban_service: ban_service.clone(), + http_stats_repository: http_stats_repository.clone(), + + ban_service: ban_service.clone(), udp_stats_repository: udp_stats_repository.clone(), + + http_api_config: http_api_config.clone(), }) } } From 0a44d01a0367fe0ff23110c7b063e6e9559d6e32 Mon Sep 17 00:00:00 2001 From: Jose Celano Date: Fri, 21 Feb 2025 17:29:31 +0000 Subject: [PATCH 315/802] refactor: [#1298] remove duplicate code --- packages/tracker-api-core/src/container.rs | 51 +++--- src/bootstrap/jobs/tracker_apis.rs | 21 ++- src/container.rs | 6 +- src/servers/apis/routes.rs | 4 +- src/servers/apis/server.rs | 17 +- src/servers/apis/v1/context/stats/routes.rs | 4 +- src/servers/apis/v1/routes.rs | 4 +- tests/servers/api/environment.rs | 150 +++++++----------- .../api/v1/contract/context/auth_key.rs | 35 ++-- .../api/v1/contract/context/whitelist.rs | 36 +++-- 10 files changed, 173 insertions(+), 155 deletions(-) diff --git a/packages/tracker-api-core/src/container.rs b/packages/tracker-api-core/src/container.rs index 505b5d7d8..6a650b052 100644 --- a/packages/tracker-api-core/src/container.rs +++ b/packages/tracker-api-core/src/container.rs @@ -1,15 +1,17 @@ use std::sync::Arc; +use bittorrent_http_tracker_core::container::HttpTrackerCoreContainer; use bittorrent_tracker_core::authentication::handler::KeysHandler; use bittorrent_tracker_core::container::TrackerCoreContainer; use bittorrent_tracker_core::torrent::repository::in_memory::InMemoryTorrentRepository; use bittorrent_tracker_core::whitelist::manager::WhitelistManager; +use bittorrent_udp_tracker_core::container::UdpTrackerCoreContainer; use bittorrent_udp_tracker_core::services::banning::BanService; -use bittorrent_udp_tracker_core::{self, MAX_CONNECTION_ID_ERRORS_PER_IP}; +use bittorrent_udp_tracker_core::{self}; use tokio::sync::RwLock; -use torrust_tracker_configuration::{Core, HttpApi}; +use torrust_tracker_configuration::{Core, HttpApi, HttpTracker, UdpTracker}; -pub struct HttpApiContainer { +pub struct TrackerHttpApiCoreContainer { // todo: replace with TrackerCoreContainer pub core_config: Arc, pub in_memory_torrent_repository: Arc, @@ -26,40 +28,43 @@ pub struct HttpApiContainer { pub http_api_config: Arc, } -impl HttpApiContainer { +impl TrackerHttpApiCoreContainer { #[must_use] - pub fn initialize(core_config: &Arc, http_api_config: &Arc) -> Arc { + pub fn initialize( + core_config: &Arc, + http_tracker_config: &Arc, + udp_tracker_config: &Arc, + http_api_config: &Arc, + ) -> Arc { let tracker_core_container = Arc::new(TrackerCoreContainer::initialize(core_config)); - Self::initialize_from(&tracker_core_container, http_api_config) + let http_tracker_core_container = HttpTrackerCoreContainer::initialize_from(&tracker_core_container, http_tracker_config); + let udp_tracker_core_container = UdpTrackerCoreContainer::initialize_from(&tracker_core_container, udp_tracker_config); + + Self::initialize_from( + &tracker_core_container, + &http_tracker_core_container, + &udp_tracker_core_container, + http_api_config, + ) } #[must_use] pub fn initialize_from( tracker_core_container: &Arc, + http_tracker_core_container: &Arc, + udp_tracker_core_container: &Arc, http_api_config: &Arc, - ) -> Arc { - // HTTP stats - let (_http_stats_event_sender, http_stats_repository) = - bittorrent_http_tracker_core::statistics::setup::factory(tracker_core_container.core_config.tracker_usage_statistics); - let http_stats_repository = Arc::new(http_stats_repository); - - // UDP stats - let (_udp_stats_event_sender, udp_stats_repository) = - bittorrent_udp_tracker_core::statistics::setup::factory(tracker_core_container.core_config.tracker_usage_statistics); - let udp_stats_repository = Arc::new(udp_stats_repository); - - let ban_service = Arc::new(RwLock::new(BanService::new(MAX_CONNECTION_ID_ERRORS_PER_IP))); - - Arc::new(HttpApiContainer { + ) -> Arc { + Arc::new(TrackerHttpApiCoreContainer { core_config: tracker_core_container.core_config.clone(), in_memory_torrent_repository: tracker_core_container.in_memory_torrent_repository.clone(), keys_handler: tracker_core_container.keys_handler.clone(), whitelist_manager: tracker_core_container.whitelist_manager.clone(), - http_stats_repository: http_stats_repository.clone(), + http_stats_repository: http_tracker_core_container.http_stats_repository.clone(), - ban_service: ban_service.clone(), - udp_stats_repository: udp_stats_repository.clone(), + ban_service: udp_tracker_core_container.ban_service.clone(), + udp_stats_repository: udp_tracker_core_container.udp_stats_repository.clone(), http_api_config: http_api_config.clone(), }) diff --git a/src/bootstrap/jobs/tracker_apis.rs b/src/bootstrap/jobs/tracker_apis.rs index 82dcccb00..458d25367 100644 --- a/src/bootstrap/jobs/tracker_apis.rs +++ b/src/bootstrap/jobs/tracker_apis.rs @@ -27,7 +27,7 @@ use axum_server::tls_rustls::RustlsConfig; use tokio::task::JoinHandle; use torrust_axum_server::tsl::make_rust_tls; use torrust_server_lib::registar::ServiceRegistrationForm; -use torrust_tracker_api_core::container::HttpApiContainer; +use torrust_tracker_api_core::container::TrackerHttpApiCoreContainer; use torrust_tracker_configuration::AccessTokens; use tracing::instrument; @@ -56,7 +56,7 @@ pub struct ApiServerJobStarted(); /// #[instrument(skip(http_api_container, form))] pub async fn start_job( - http_api_container: Arc, + http_api_container: Arc, form: ServiceRegistrationForm, version: Version, ) -> Option> { @@ -78,7 +78,7 @@ pub async fn start_job( async fn start_v1( socket: SocketAddr, tls: Option, - http_api_container: Arc, + http_api_container: Arc, form: ServiceRegistrationForm, access_tokens: Arc, ) -> JoinHandle<()> { @@ -98,7 +98,7 @@ mod tests { use std::sync::Arc; use torrust_server_lib::registar::Registar; - use torrust_tracker_api_core::container::HttpApiContainer; + use torrust_tracker_api_core::container::TrackerHttpApiCoreContainer; use torrust_tracker_test_helpers::configuration::ephemeral_public; use crate::bootstrap::app::initialize_global_services; @@ -108,12 +108,21 @@ mod tests { #[tokio::test] async fn it_should_start_http_tracker() { let cfg = Arc::new(ephemeral_public()); + let core_config = Arc::new(cfg.core.clone()); - let http_api_config = Arc::new(cfg.http_api.clone().unwrap()); + + let http_tracker_config = cfg.http_trackers.clone().expect("missing HTTP tracker configuration"); + let http_tracker_config = Arc::new(http_tracker_config[0].clone()); + + let udp_tracker_configurations = cfg.udp_trackers.clone().expect("missing UDP tracker configuration"); + let udp_tracker_config = Arc::new(udp_tracker_configurations[0].clone()); + + let http_api_config = Arc::new(cfg.http_api.clone().expect("missing HTTP API configuration").clone()); initialize_global_services(&cfg); - let http_api_container = HttpApiContainer::initialize(&core_config, &http_api_config); + let http_api_container = + TrackerHttpApiCoreContainer::initialize(&core_config, &http_tracker_config, &udp_tracker_config, &http_api_config); let version = Version::V1; diff --git a/src/container.rs b/src/container.rs index ed9688935..2175c112f 100644 --- a/src/container.rs +++ b/src/container.rs @@ -17,7 +17,7 @@ use bittorrent_udp_tracker_core::container::UdpTrackerCoreContainer; use bittorrent_udp_tracker_core::services::banning::BanService; use bittorrent_udp_tracker_core::{self, MAX_CONNECTION_ID_ERRORS_PER_IP}; use tokio::sync::RwLock; -use torrust_tracker_api_core::container::HttpApiContainer; +use torrust_tracker_api_core::container::TrackerHttpApiCoreContainer; use torrust_tracker_configuration::{Configuration, Core, HttpApi, HttpTracker, UdpTracker}; use tracing::instrument; @@ -78,8 +78,8 @@ impl AppContainer { } #[must_use] - pub fn http_api_container(&self, http_api_config: &Arc) -> HttpApiContainer { - HttpApiContainer { + pub fn http_api_container(&self, http_api_config: &Arc) -> TrackerHttpApiCoreContainer { + TrackerHttpApiCoreContainer { http_api_config: http_api_config.clone(), core_config: self.core_config.clone(), in_memory_torrent_repository: self.in_memory_torrent_repository.clone(), diff --git a/src/servers/apis/routes.rs b/src/servers/apis/routes.rs index 558d02913..64f6f0cb8 100644 --- a/src/servers/apis/routes.rs +++ b/src/servers/apis/routes.rs @@ -16,7 +16,7 @@ use axum::routing::get; use axum::{middleware, BoxError, Router}; use hyper::{Request, StatusCode}; use torrust_server_lib::logging::Latency; -use torrust_tracker_api_core::container::HttpApiContainer; +use torrust_tracker_api_core::container::TrackerHttpApiCoreContainer; use torrust_tracker_configuration::{AccessTokens, DEFAULT_TIMEOUT}; use tower::timeout::TimeoutLayer; use tower::ServiceBuilder; @@ -36,7 +36,7 @@ use crate::servers::apis::API_LOG_TARGET; /// Add all API routes to the router. #[instrument(skip(http_api_container, access_tokens))] pub fn router( - http_api_container: Arc, + http_api_container: Arc, access_tokens: Arc, server_socket_addr: SocketAddr, ) -> Router { diff --git a/src/servers/apis/server.rs b/src/servers/apis/server.rs index 20e350d78..df78bf7dc 100644 --- a/src/servers/apis/server.rs +++ b/src/servers/apis/server.rs @@ -38,7 +38,7 @@ use torrust_axum_server::signals::graceful_shutdown; use torrust_server_lib::logging::STARTED_ON; use torrust_server_lib::registar::{ServiceHealthCheckJob, ServiceRegistration, ServiceRegistrationForm}; use torrust_server_lib::signals::{Halted, Started}; -use torrust_tracker_api_core::container::HttpApiContainer; +use torrust_tracker_api_core::container::TrackerHttpApiCoreContainer; use torrust_tracker_configuration::AccessTokens; use tracing::{instrument, Level}; @@ -125,7 +125,7 @@ impl ApiServer { #[instrument(skip(self, http_api_container, form, access_tokens), err, ret(Display, level = Level::INFO))] pub async fn start( self, - http_api_container: Arc, + http_api_container: Arc, form: ServiceRegistrationForm, access_tokens: Arc, ) -> Result, Error> { @@ -238,7 +238,7 @@ impl Launcher { #[instrument(skip(self, http_api_container, access_tokens, tx_start, rx_halt))] pub fn start( &self, - http_api_container: Arc, + http_api_container: Arc, access_tokens: Arc, tx_start: Sender, rx_halt: Receiver, @@ -296,7 +296,7 @@ mod tests { use torrust_axum_server::tsl::make_rust_tls; use torrust_server_lib::registar::Registar; - use torrust_tracker_api_core::container::HttpApiContainer; + use torrust_tracker_api_core::container::TrackerHttpApiCoreContainer; use torrust_tracker_test_helpers::configuration::ephemeral_public; use crate::bootstrap::app::initialize_global_services; @@ -306,7 +306,11 @@ mod tests { async fn it_should_be_able_to_start_and_stop() { let cfg = Arc::new(ephemeral_public()); let core_config = Arc::new(cfg.core.clone()); - let http_api_config = Arc::new(cfg.http_api.clone().unwrap()); + let http_tracker_config = cfg.http_trackers.clone().expect("missing HTTP tracker configuration"); + let http_tracker_config = Arc::new(http_tracker_config[0].clone()); + let udp_tracker_configurations = cfg.udp_trackers.clone().expect("missing UDP tracker configuration"); + let udp_tracker_config = Arc::new(udp_tracker_configurations[0].clone()); + let http_api_config = Arc::new(cfg.http_api.clone().expect("missing HTTP API configuration").clone()); initialize_global_services(&cfg); @@ -322,7 +326,8 @@ mod tests { let register = &Registar::default(); - let http_api_container = HttpApiContainer::initialize(&core_config, &http_api_config); + let http_api_container = + TrackerHttpApiCoreContainer::initialize(&core_config, &http_tracker_config, &udp_tracker_config, &http_api_config); let started = stopped .start(http_api_container, register.give_form(), access_tokens) diff --git a/src/servers/apis/v1/context/stats/routes.rs b/src/servers/apis/v1/context/stats/routes.rs index aa723f7ec..df198eba6 100644 --- a/src/servers/apis/v1/context/stats/routes.rs +++ b/src/servers/apis/v1/context/stats/routes.rs @@ -7,12 +7,12 @@ use std::sync::Arc; use axum::routing::get; use axum::Router; -use torrust_tracker_api_core::container::HttpApiContainer; +use torrust_tracker_api_core::container::TrackerHttpApiCoreContainer; use super::handlers::get_stats_handler; /// It adds the routes to the router for the [`stats`](crate::servers::apis::v1::context::stats) API context. -pub fn add(prefix: &str, router: Router, http_api_container: &Arc) -> Router { +pub fn add(prefix: &str, router: Router, http_api_container: &Arc) -> Router { router.route( &format!("{prefix}/stats"), get(get_stats_handler).with_state(( diff --git a/src/servers/apis/v1/routes.rs b/src/servers/apis/v1/routes.rs index 7ea01c685..90596f0e7 100644 --- a/src/servers/apis/v1/routes.rs +++ b/src/servers/apis/v1/routes.rs @@ -2,12 +2,12 @@ use std::sync::Arc; use axum::Router; -use torrust_tracker_api_core::container::HttpApiContainer; +use torrust_tracker_api_core::container::TrackerHttpApiCoreContainer; use super::context::{auth_key, stats, torrent, whitelist}; /// Add the routes for the v1 API. -pub fn add(prefix: &str, router: Router, http_api_container: &Arc) -> Router { +pub fn add(prefix: &str, router: Router, http_api_container: &Arc) -> Router { let v1_prefix = format!("{prefix}/v1"); let router = auth_key::routes::add(&v1_prefix, router, &http_api_container.keys_handler.clone()); diff --git a/tests/servers/api/environment.rs b/tests/servers/api/environment.rs index 066ad6fff..90c58c821 100644 --- a/tests/servers/api/environment.rs +++ b/tests/servers/api/environment.rs @@ -1,25 +1,16 @@ use std::net::SocketAddr; use std::sync::Arc; +use bittorrent_http_tracker_core::container::HttpTrackerCoreContainer; use bittorrent_primitives::info_hash::InfoHash; -use bittorrent_tracker_core::authentication::handler::KeysHandler; -use bittorrent_tracker_core::authentication::key::repository::in_memory::InMemoryKeyRepository; -use bittorrent_tracker_core::authentication::key::repository::persisted::DatabaseKeyRepository; -use bittorrent_tracker_core::authentication::service::AuthenticationService; -use bittorrent_tracker_core::databases::setup::initialize_database; -use bittorrent_tracker_core::databases::Database; -use bittorrent_tracker_core::torrent::repository::in_memory::InMemoryTorrentRepository; -use bittorrent_tracker_core::whitelist::repository::in_memory::InMemoryWhitelist; -use bittorrent_tracker_core::whitelist::setup::initialize_whitelist_manager; -use bittorrent_udp_tracker_core::services::banning::BanService; -use bittorrent_udp_tracker_core::MAX_CONNECTION_ID_ERRORS_PER_IP; +use bittorrent_tracker_core::container::TrackerCoreContainer; +use bittorrent_udp_tracker_core::container::UdpTrackerCoreContainer; use futures::executor::block_on; -use tokio::sync::RwLock; use torrust_axum_server::tsl::make_rust_tls; use torrust_server_lib::registar::Registar; use torrust_tracker_api_client::connection_info::{ConnectionInfo, Origin}; -use torrust_tracker_api_core::container::HttpApiContainer; -use torrust_tracker_configuration::{Configuration, HttpApi}; +use torrust_tracker_api_core::container::TrackerHttpApiCoreContainer; +use torrust_tracker_configuration::Configuration; use torrust_tracker_lib::bootstrap::app::initialize_global_services; use torrust_tracker_lib::servers::apis::server::{ApiServer, Launcher, Running, Stopped}; use torrust_tracker_primitives::peer; @@ -28,12 +19,7 @@ pub struct Environment where S: std::fmt::Debug + std::fmt::Display, { - pub http_api_container: Arc, - - pub database: Arc>, - pub authentication_service: Arc, - pub in_memory_whitelist: Arc, - + pub container: Arc, pub registar: Registar, pub server: ApiServer, } @@ -45,7 +31,8 @@ where /// Add a torrent to the tracker pub fn add_torrent_peer(&self, info_hash: &InfoHash, peer: &peer::Peer) { let () = self - .http_api_container + .container + .tracker_core_container .in_memory_torrent_repository .upsert_peer(info_hash, peer); } @@ -55,40 +42,43 @@ impl Environment { pub fn new(configuration: &Arc) -> Self { initialize_global_services(configuration); - let env_container = EnvContainer::initialize(configuration); + let container = Arc::new(EnvContainer::initialize(configuration)); - let bind_to = env_container.http_api_config.bind_address; + let bind_to = container.tracker_http_api_core_container.http_api_config.bind_address; - let tls = block_on(make_rust_tls(&env_container.http_api_config.tsl_config)).map(|tls| tls.expect("tls config failed")); + let tls = block_on(make_rust_tls( + &container.tracker_http_api_core_container.http_api_config.tsl_config, + )) + .map(|tls| tls.expect("tls config failed")); let server = ApiServer::new(Launcher::new(bind_to, tls)); Self { - http_api_container: env_container.http_api_container, - - database: env_container.database.clone(), - authentication_service: env_container.authentication_service.clone(), - in_memory_whitelist: env_container.in_memory_whitelist.clone(), - + container, registar: Registar::default(), server, } } pub async fn start(self) -> Environment { - let access_tokens = Arc::new(self.http_api_container.http_api_config.access_tokens.clone()); + let access_tokens = Arc::new( + self.container + .tracker_http_api_core_container + .http_api_config + .access_tokens + .clone(), + ); Environment { - http_api_container: self.http_api_container.clone(), - - database: self.database.clone(), - authentication_service: self.authentication_service.clone(), - in_memory_whitelist: self.in_memory_whitelist.clone(), - + container: self.container.clone(), registar: self.registar.clone(), server: self .server - .start(self.http_api_container, self.registar.give_form(), access_tokens) + .start( + self.container.tracker_http_api_core_container.clone(), + self.registar.give_form(), + access_tokens, + ) .await .unwrap(), } @@ -102,12 +92,7 @@ impl Environment { pub async fn stop(self) -> Environment { Environment { - http_api_container: self.http_api_container, - - database: self.database, - authentication_service: self.authentication_service, - in_memory_whitelist: self.in_memory_whitelist, - + container: self.container, registar: Registar::default(), server: self.server.stop().await.unwrap(), } @@ -118,7 +103,13 @@ impl Environment { ConnectionInfo { origin, - api_token: self.http_api_container.http_api_config.access_tokens.get("admin").cloned(), + api_token: self + .container + .tracker_http_api_core_container + .http_api_config + .access_tokens + .get("admin") + .cloned(), } } @@ -128,16 +119,23 @@ impl Environment { } pub struct EnvContainer { - pub http_api_config: Arc, - pub http_api_container: Arc, - pub database: Arc>, - pub authentication_service: Arc, - pub in_memory_whitelist: Arc, + pub tracker_core_container: Arc, + pub tracker_http_api_core_container: Arc, } impl EnvContainer { pub fn initialize(configuration: &Configuration) -> Self { let core_config = Arc::new(configuration.core.clone()); + + let http_tracker_config = configuration + .http_trackers + .clone() + .expect("missing HTTP tracker configuration"); + let http_tracker_config = Arc::new(http_tracker_config[0].clone()); + + let udp_tracker_configurations = configuration.udp_trackers.clone().expect("missing UDP tracker configuration"); + let udp_tracker_config = Arc::new(udp_tracker_configurations[0].clone()); + let http_api_config = Arc::new( configuration .http_api @@ -146,47 +144,21 @@ impl EnvContainer { .clone(), ); - // HTTP stats - let (_http_stats_event_sender, http_stats_repository) = - bittorrent_http_tracker_core::statistics::setup::factory(configuration.core.tracker_usage_statistics); - let http_stats_repository = Arc::new(http_stats_repository); - - // UDP stats - let (_udp_stats_event_sender, udp_stats_repository) = - bittorrent_udp_tracker_core::statistics::setup::factory(configuration.core.tracker_usage_statistics); - let udp_stats_repository = Arc::new(udp_stats_repository); - - let ban_service = Arc::new(RwLock::new(BanService::new(MAX_CONNECTION_ID_ERRORS_PER_IP))); - let database = initialize_database(&configuration.core); - let in_memory_whitelist = Arc::new(InMemoryWhitelist::default()); - - let whitelist_manager = initialize_whitelist_manager(database.clone(), in_memory_whitelist.clone()); - let db_key_repository = Arc::new(DatabaseKeyRepository::new(&database)); - let in_memory_key_repository = Arc::new(InMemoryKeyRepository::default()); - let authentication_service = Arc::new(AuthenticationService::new(&configuration.core, &in_memory_key_repository)); - let keys_handler = Arc::new(KeysHandler::new( - &db_key_repository.clone(), - &in_memory_key_repository.clone(), - )); - let in_memory_torrent_repository = Arc::new(InMemoryTorrentRepository::default()); - - let http_api_container = Arc::new(HttpApiContainer { - http_api_config: http_api_config.clone(), - core_config: core_config.clone(), - in_memory_torrent_repository: in_memory_torrent_repository.clone(), - keys_handler: keys_handler.clone(), - whitelist_manager: whitelist_manager.clone(), - ban_service: ban_service.clone(), - http_stats_repository: http_stats_repository.clone(), - udp_stats_repository: udp_stats_repository.clone(), - }); + let tracker_core_container = Arc::new(TrackerCoreContainer::initialize(&core_config)); + let http_tracker_core_container = + HttpTrackerCoreContainer::initialize_from(&tracker_core_container, &http_tracker_config); + let udp_tracker_core_container = UdpTrackerCoreContainer::initialize_from(&tracker_core_container, &udp_tracker_config); + + let tracker_http_api_core_container = TrackerHttpApiCoreContainer::initialize_from( + &tracker_core_container, + &http_tracker_core_container, + &udp_tracker_core_container, + &http_api_config, + ); Self { - http_api_config, - http_api_container, - database, - authentication_service, - in_memory_whitelist, + tracker_core_container, + tracker_http_api_core_container, } } } diff --git a/tests/servers/api/v1/contract/context/auth_key.rs b/tests/servers/api/v1/contract/context/auth_key.rs index ab9bfaf3e..bc6d38bae 100644 --- a/tests/servers/api/v1/contract/context/auth_key.rs +++ b/tests/servers/api/v1/contract/context/auth_key.rs @@ -36,6 +36,8 @@ async fn should_allow_generating_a_new_random_auth_key() { let auth_key_resource = assert_auth_key_utf8(response).await; assert!(env + .container + .tracker_core_container .authentication_service .authenticate(&auth_key_resource.key.parse::().unwrap()) .await @@ -65,6 +67,8 @@ async fn should_allow_uploading_a_preexisting_auth_key() { let auth_key_resource = assert_auth_key_utf8(response).await; assert!(env + .container + .tracker_core_container .authentication_service .authenticate(&auth_key_resource.key.parse::().unwrap()) .await @@ -126,7 +130,7 @@ async fn should_fail_when_the_auth_key_cannot_be_generated() { let env = Started::new(&configuration::ephemeral().into()).await; - force_database_error(&env.database); + force_database_error(&env.container.tracker_core_container.database); let request_id = Uuid::new_v4(); @@ -158,7 +162,8 @@ async fn should_allow_deleting_an_auth_key() { let seconds_valid = 60; let auth_key = env - .http_api_container + .container + .tracker_core_container .keys_handler .generate_expiring_peer_key(Some(Duration::from_secs(seconds_valid))) .await @@ -293,13 +298,14 @@ async fn should_fail_when_the_auth_key_cannot_be_deleted() { let seconds_valid = 60; let auth_key = env - .http_api_container + .container + .tracker_core_container .keys_handler .generate_expiring_peer_key(Some(Duration::from_secs(seconds_valid))) .await .unwrap(); - force_database_error(&env.database); + force_database_error(&env.container.tracker_core_container.database); let request_id = Uuid::new_v4(); @@ -327,7 +333,8 @@ async fn should_not_allow_deleting_an_auth_key_for_unauthenticated_users() { // Generate new auth key let auth_key = env - .http_api_container + .container + .tracker_core_container .keys_handler .generate_expiring_peer_key(Some(Duration::from_secs(seconds_valid))) .await @@ -348,7 +355,8 @@ async fn should_not_allow_deleting_an_auth_key_for_unauthenticated_users() { // Generate new auth key let auth_key = env - .http_api_container + .container + .tracker_core_container .keys_handler .generate_expiring_peer_key(Some(Duration::from_secs(seconds_valid))) .await @@ -377,7 +385,8 @@ async fn should_allow_reloading_keys() { let env = Started::new(&configuration::ephemeral().into()).await; let seconds_valid = 60; - env.http_api_container + env.container + .tracker_core_container .keys_handler .generate_expiring_peer_key(Some(Duration::from_secs(seconds_valid))) .await @@ -403,13 +412,14 @@ async fn should_fail_when_keys_cannot_be_reloaded() { let request_id = Uuid::new_v4(); let seconds_valid = 60; - env.http_api_container + env.container + .tracker_core_container .keys_handler .generate_expiring_peer_key(Some(Duration::from_secs(seconds_valid))) .await .unwrap(); - force_database_error(&env.database); + force_database_error(&env.container.tracker_core_container.database); let response = Client::new(env.get_connection_info()) .reload_keys(Some(headers_with_request_id(request_id))) @@ -432,7 +442,8 @@ async fn should_not_allow_reloading_keys_for_unauthenticated_users() { let env = Started::new(&configuration::ephemeral().into()).await; let seconds_valid = 60; - env.http_api_container + env.container + .tracker_core_container .keys_handler .generate_expiring_peer_key(Some(Duration::from_secs(seconds_valid))) .await @@ -497,6 +508,8 @@ mod deprecated_generate_key_endpoint { let auth_key_resource = assert_auth_key_utf8(response).await; assert!(env + .container + .tracker_core_container .authentication_service .authenticate(&auth_key_resource.key.parse::().unwrap()) .await @@ -563,7 +576,7 @@ mod deprecated_generate_key_endpoint { let env = Started::new(&configuration::ephemeral().into()).await; - force_database_error(&env.database); + force_database_error(&env.container.tracker_core_container.database); let request_id = Uuid::new_v4(); let seconds_valid = 60; diff --git a/tests/servers/api/v1/contract/context/whitelist.rs b/tests/servers/api/v1/contract/context/whitelist.rs index ca359650f..6742da4d8 100644 --- a/tests/servers/api/v1/contract/context/whitelist.rs +++ b/tests/servers/api/v1/contract/context/whitelist.rs @@ -31,7 +31,9 @@ async fn should_allow_whitelisting_a_torrent() { assert_ok(response).await; assert!( - env.in_memory_whitelist + env.container + .tracker_core_container + .in_memory_whitelist .contains(&InfoHash::from_str(&info_hash).unwrap()) .await ); @@ -111,7 +113,7 @@ async fn should_fail_when_the_torrent_cannot_be_whitelisted() { let info_hash = "9e0217d0fa71c87332cd8bf9dbeabcb2c2cf3c4d".to_owned(); // DevSkim: ignore DS173237 - force_database_error(&env.database); + force_database_error(&env.container.tracker_core_container.database); let request_id = Uuid::new_v4(); @@ -167,7 +169,8 @@ async fn should_allow_removing_a_torrent_from_the_whitelist() { let hash = "9e0217d0fa71c87332cd8bf9dbeabcb2c2cf3c4d".to_owned(); // DevSkim: ignore DS173237 let info_hash = InfoHash::from_str(&hash).unwrap(); - env.http_api_container + env.container + .tracker_core_container .whitelist_manager .add_torrent_to_whitelist(&info_hash) .await @@ -180,7 +183,13 @@ async fn should_allow_removing_a_torrent_from_the_whitelist() { .await; assert_ok(response).await; - assert!(!env.in_memory_whitelist.contains(&info_hash).await); + assert!( + !env.container + .tracker_core_container + .in_memory_whitelist + .contains(&info_hash) + .await + ); env.stop().await; } @@ -241,13 +250,14 @@ async fn should_fail_when_the_torrent_cannot_be_removed_from_the_whitelist() { let hash = "9e0217d0fa71c87332cd8bf9dbeabcb2c2cf3c4d".to_owned(); // DevSkim: ignore DS173237 let info_hash = InfoHash::from_str(&hash).unwrap(); - env.http_api_container + env.container + .tracker_core_container .whitelist_manager .add_torrent_to_whitelist(&info_hash) .await .unwrap(); - force_database_error(&env.database); + force_database_error(&env.container.tracker_core_container.database); let request_id = Uuid::new_v4(); @@ -274,7 +284,8 @@ async fn should_not_allow_removing_a_torrent_from_the_whitelist_for_unauthentica let hash = "9e0217d0fa71c87332cd8bf9dbeabcb2c2cf3c4d".to_owned(); // DevSkim: ignore DS173237 let info_hash = InfoHash::from_str(&hash).unwrap(); - env.http_api_container + env.container + .tracker_core_container .whitelist_manager .add_torrent_to_whitelist(&info_hash) .await @@ -293,7 +304,8 @@ async fn should_not_allow_removing_a_torrent_from_the_whitelist_for_unauthentica "Expected logs to contain: ERROR ... API ... request_id={request_id}" ); - env.http_api_container + env.container + .tracker_core_container .whitelist_manager .add_torrent_to_whitelist(&info_hash) .await @@ -323,7 +335,8 @@ async fn should_allow_reload_the_whitelist_from_the_database() { let hash = "9e0217d0fa71c87332cd8bf9dbeabcb2c2cf3c4d".to_owned(); // DevSkim: ignore DS173237 let info_hash = InfoHash::from_str(&hash).unwrap(); - env.http_api_container + env.container + .tracker_core_container .whitelist_manager .add_torrent_to_whitelist(&info_hash) .await @@ -358,13 +371,14 @@ async fn should_fail_when_the_whitelist_cannot_be_reloaded_from_the_database() { let hash = "9e0217d0fa71c87332cd8bf9dbeabcb2c2cf3c4d".to_owned(); // DevSkim: ignore DS173237 let info_hash = InfoHash::from_str(&hash).unwrap(); - env.http_api_container + env.container + .tracker_core_container .whitelist_manager .add_torrent_to_whitelist(&info_hash) .await .unwrap(); - force_database_error(&env.database); + force_database_error(&env.container.tracker_core_container.database); let request_id = Uuid::new_v4(); From 4b250c375a8707cf9509253f180d71c763d69ca0 Mon Sep 17 00:00:00 2001 From: Jose Celano Date: Fri, 21 Feb 2025 17:33:48 +0000 Subject: [PATCH 316/802] refactor: [#1298] convert fn into static method --- src/bootstrap/app.rs | 4 +-- src/container.rs | 83 ++++++++++++++++++++++---------------------- 2 files changed, 43 insertions(+), 44 deletions(-) diff --git a/src/bootstrap/app.rs b/src/bootstrap/app.rs index ec09edd51..04f638a8c 100644 --- a/src/bootstrap/app.rs +++ b/src/bootstrap/app.rs @@ -19,7 +19,7 @@ use torrust_tracker_configuration::{logging, Configuration}; use tracing::instrument; use super::config::initialize_configuration; -use crate::container::{initialize_app_container, AppContainer}; +use crate::container::AppContainer; /// It loads the configuration from the environment and builds app container. /// @@ -42,7 +42,7 @@ pub fn setup() -> (Configuration, AppContainer) { tracing::info!("Configuration:\n{}", configuration.clone().mask_secrets().to_json()); - let app_container = initialize_app_container(&configuration); + let app_container = AppContainer::initialize(&configuration); (configuration, app_container) } diff --git a/src/container.rs b/src/container.rs index 2175c112f..495a59b09 100644 --- a/src/container.rs +++ b/src/container.rs @@ -47,6 +47,47 @@ pub struct AppContainer { } impl AppContainer { + #[instrument(skip())] + pub fn initialize(configuration: &Configuration) -> AppContainer { + let core_config = Arc::new(configuration.core.clone()); + + let tracker_core_container = TrackerCoreContainer::initialize(&core_config); + + // HTTP stats + let (http_stats_event_sender, http_stats_repository) = + bittorrent_http_tracker_core::statistics::setup::factory(configuration.core.tracker_usage_statistics); + let http_stats_event_sender = Arc::new(http_stats_event_sender); + let http_stats_repository = Arc::new(http_stats_repository); + + // UDP stats + let (udp_stats_event_sender, udp_stats_repository) = + bittorrent_udp_tracker_core::statistics::setup::factory(configuration.core.tracker_usage_statistics); + let udp_stats_event_sender = Arc::new(udp_stats_event_sender); + let udp_stats_repository = Arc::new(udp_stats_repository); + + let ban_service = Arc::new(RwLock::new(BanService::new(MAX_CONNECTION_ID_ERRORS_PER_IP))); + + AppContainer { + core_config, + database: tracker_core_container.database, + announce_handler: tracker_core_container.announce_handler, + scrape_handler: tracker_core_container.scrape_handler, + keys_handler: tracker_core_container.keys_handler, + authentication_service: tracker_core_container.authentication_service, + in_memory_whitelist: tracker_core_container.in_memory_whitelist, + whitelist_authorization: tracker_core_container.whitelist_authorization, + whitelist_manager: tracker_core_container.whitelist_manager, + in_memory_torrent_repository: tracker_core_container.in_memory_torrent_repository, + db_torrent_repository: tracker_core_container.db_torrent_repository, + torrents_manager: tracker_core_container.torrents_manager, + ban_service, + http_stats_event_sender, + udp_stats_event_sender, + http_stats_repository, + udp_stats_repository, + } + } + #[must_use] pub fn http_tracker_container(&self, http_tracker_config: &Arc) -> HttpTrackerCoreContainer { HttpTrackerCoreContainer { @@ -91,45 +132,3 @@ impl AppContainer { } } } - -/// It initializes the IoC Container. -#[instrument(skip())] -pub fn initialize_app_container(configuration: &Configuration) -> AppContainer { - let core_config = Arc::new(configuration.core.clone()); - - let tracker_core_container = TrackerCoreContainer::initialize(&core_config); - - // HTTP stats - let (http_stats_event_sender, http_stats_repository) = - bittorrent_http_tracker_core::statistics::setup::factory(configuration.core.tracker_usage_statistics); - let http_stats_event_sender = Arc::new(http_stats_event_sender); - let http_stats_repository = Arc::new(http_stats_repository); - - // UDP stats - let (udp_stats_event_sender, udp_stats_repository) = - bittorrent_udp_tracker_core::statistics::setup::factory(configuration.core.tracker_usage_statistics); - let udp_stats_event_sender = Arc::new(udp_stats_event_sender); - let udp_stats_repository = Arc::new(udp_stats_repository); - - let ban_service = Arc::new(RwLock::new(BanService::new(MAX_CONNECTION_ID_ERRORS_PER_IP))); - - AppContainer { - core_config, - database: tracker_core_container.database, - announce_handler: tracker_core_container.announce_handler, - scrape_handler: tracker_core_container.scrape_handler, - keys_handler: tracker_core_container.keys_handler, - authentication_service: tracker_core_container.authentication_service, - in_memory_whitelist: tracker_core_container.in_memory_whitelist, - whitelist_authorization: tracker_core_container.whitelist_authorization, - whitelist_manager: tracker_core_container.whitelist_manager, - in_memory_torrent_repository: tracker_core_container.in_memory_torrent_repository, - db_torrent_repository: tracker_core_container.db_torrent_repository, - torrents_manager: tracker_core_container.torrents_manager, - ban_service, - http_stats_event_sender, - udp_stats_event_sender, - http_stats_repository, - udp_stats_repository, - } -} From 56ae9f3447e0bb26804c0228867d20680ab3715f Mon Sep 17 00:00:00 2001 From: Jose Celano Date: Fri, 21 Feb 2025 17:35:29 +0000 Subject: [PATCH 317/802] refactor: [#1298] rename method --- src/app.rs | 2 +- src/container.rs | 2 +- 2 files changed, 2 insertions(+), 2 deletions(-) diff --git a/src/app.rs b/src/app.rs index 3d2da4ff5..e94db66e3 100644 --- a/src/app.rs +++ b/src/app.rs @@ -111,7 +111,7 @@ pub async fn start(config: &Configuration, app_container: &Arc) -> // Start HTTP API if let Some(http_api_config) = &config.http_api { let http_api_config = Arc::new(http_api_config.clone()); - let http_api_container = Arc::new(app_container.http_api_container(&http_api_config)); + let http_api_container = Arc::new(app_container.tracker_http_api_container(&http_api_config)); if let Some(job) = tracker_apis::start_job(http_api_container, registar.give_form(), servers::apis::Version::V1).await { jobs.push(job); diff --git a/src/container.rs b/src/container.rs index 495a59b09..6f6d9013d 100644 --- a/src/container.rs +++ b/src/container.rs @@ -119,7 +119,7 @@ impl AppContainer { } #[must_use] - pub fn http_api_container(&self, http_api_config: &Arc) -> TrackerHttpApiCoreContainer { + pub fn tracker_http_api_container(&self, http_api_config: &Arc) -> TrackerHttpApiCoreContainer { TrackerHttpApiCoreContainer { http_api_config: http_api_config.clone(), core_config: self.core_config.clone(), From 025f1008a24486cfb1ece13d2d68a9810239071d Mon Sep 17 00:00:00 2001 From: Jose Celano Date: Mon, 24 Feb 2025 10:13:56 +0000 Subject: [PATCH 318/802] refactor: [#1311] move static initialization to the module where the static values are used This will alllow to initizlize static values in other packages, not only the main tracker app. --- Cargo.lock | 2 +- Cargo.toml | 1 - packages/clock/Cargo.toml | 1 + packages/clock/src/lib.rs | 16 +++++++++++++++- packages/udp-tracker-core/src/lib.rs | 16 ++++++++++++++++ src/bootstrap/app.rs | 18 ++++-------------- 6 files changed, 37 insertions(+), 17 deletions(-) diff --git a/Cargo.lock b/Cargo.lock index 981ffeba3..bb2688681 100644 --- a/Cargo.lock +++ b/Cargo.lock @@ -4434,7 +4434,6 @@ dependencies = [ "futures", "futures-util", "hyper", - "lazy_static", "local-ip-address", "mockall", "parking_lot", @@ -4532,6 +4531,7 @@ dependencies = [ "chrono", "lazy_static", "torrust-tracker-primitives", + "tracing", ] [[package]] diff --git a/Cargo.toml b/Cargo.toml index d3b194ed9..fbd81423d 100644 --- a/Cargo.toml +++ b/Cargo.toml @@ -52,7 +52,6 @@ figment = "0" futures = "0" futures-util = "0" hyper = "1" -lazy_static = "1" parking_lot = "0" percent-encoding = "2" r2d2 = "0" diff --git a/packages/clock/Cargo.toml b/packages/clock/Cargo.toml index 2ede678d9..3bd00d2b0 100644 --- a/packages/clock/Cargo.toml +++ b/packages/clock/Cargo.toml @@ -18,6 +18,7 @@ version.workspace = true [dependencies] chrono = { version = "0", default-features = false, features = ["clock"] } lazy_static = "1" +tracing = "0" torrust-tracker-primitives = { version = "3.0.0-develop", path = "../primitives" } diff --git a/packages/clock/src/lib.rs b/packages/clock/src/lib.rs index b7d20620c..ff0527714 100644 --- a/packages/clock/src/lib.rs +++ b/packages/clock/src/lib.rs @@ -22,7 +22,6 @@ //! > **NOTICE**: the timestamp does not depend on the time zone. That gives you //! > the ability to use the clock regardless of the underlying system time zone //! > configuration. See [Unix time Wikipedia entry](https://en.wikipedia.org/wiki/Unix_time). - pub mod clock; pub mod conv; pub mod static_time; @@ -30,6 +29,8 @@ pub mod static_time; #[macro_use] extern crate lazy_static; +use tracing::instrument; + /// This code needs to be copied into each crate. /// Working version, for production. #[cfg(not(test))] @@ -40,3 +41,16 @@ pub(crate) type CurrentClock = clock::Working; #[cfg(test)] #[allow(dead_code)] pub(crate) type CurrentClock = clock::Stopped; + +/// It initializes the application static values. +/// +/// These values are accessible throughout the entire application: +/// +/// - The time when the application started. +/// - An ephemeral instance random seed. This seed is used for encryption and +/// it's changed when the main application process is restarted. +#[instrument(skip())] +pub fn initialize_static() { + // Set the time of Torrust app starting + lazy_static::initialize(&static_time::TIME_AT_APP_START); +} diff --git a/packages/udp-tracker-core/src/lib.rs b/packages/udp-tracker-core/src/lib.rs index f649cbeaf..5aa714d35 100644 --- a/packages/udp-tracker-core/src/lib.rs +++ b/packages/udp-tracker-core/src/lib.rs @@ -4,6 +4,9 @@ pub mod crypto; pub mod services; pub mod statistics; +use crypto::ephemeral_instance_keys; +use tracing::instrument; + #[macro_use] extern crate lazy_static; @@ -12,3 +15,16 @@ extern crate lazy_static; pub const MAX_CONNECTION_ID_ERRORS_PER_IP: u32 = 10; pub const UDP_TRACKER_LOG_TARGET: &str = "UDP TRACKER"; + +/// It initializes the static values. +#[instrument(skip())] +pub fn initialize_static() { + // Initialize the Ephemeral Instance Random Seed + lazy_static::initialize(&ephemeral_instance_keys::RANDOM_SEED); + + // Initialize the Ephemeral Instance Random Cipher + lazy_static::initialize(&ephemeral_instance_keys::RANDOM_CIPHER_BLOWFISH); + + // Initialize the Zeroed Cipher + lazy_static::initialize(&ephemeral_instance_keys::ZEROED_TEST_CIPHER_BLOWFISH); +} diff --git a/src/bootstrap/app.rs b/src/bootstrap/app.rs index 04f638a8c..bcf000dfd 100644 --- a/src/bootstrap/app.rs +++ b/src/bootstrap/app.rs @@ -11,9 +11,7 @@ //! 2. Initialize static variables. //! 3. Initialize logging. //! 4. Initialize the domain tracker. -use bittorrent_udp_tracker_core::crypto::ephemeral_instance_keys; use bittorrent_udp_tracker_core::crypto::keys::{self, Keeper as _}; -use torrust_tracker_clock::static_time; use torrust_tracker_configuration::validator::Validator; use torrust_tracker_configuration::{logging, Configuration}; use tracing::instrument; @@ -71,18 +69,10 @@ pub fn initialize_global_services(configuration: &Configuration) { /// These values are accessible throughout the entire application: /// /// - The time when the application started. -/// - An ephemeral instance random seed. This seed is used for encryption and it's changed when the main application process is restarted. +/// - An ephemeral instance random seed. This seed is used for encryption and +/// it's changed when the main application process is restarted. #[instrument(skip())] pub fn initialize_static() { - // Set the time of Torrust app starting - lazy_static::initialize(&static_time::TIME_AT_APP_START); - - // Initialize the Ephemeral Instance Random Seed - lazy_static::initialize(&ephemeral_instance_keys::RANDOM_SEED); - - // Initialize the Ephemeral Instance Random Cipher - lazy_static::initialize(&ephemeral_instance_keys::RANDOM_CIPHER_BLOWFISH); - - // Initialize the Zeroed Cipher - lazy_static::initialize(&ephemeral_instance_keys::ZEROED_TEST_CIPHER_BLOWFISH); + torrust_tracker_clock::initialize_static(); + bittorrent_udp_tracker_core::initialize_static(); } From d6e7a92bedb448ddc43bcbd878014c6e948d9a72 Mon Sep 17 00:00:00 2001 From: Jose Celano Date: Mon, 24 Feb 2025 11:05:20 +0000 Subject: [PATCH 319/802] refactor: [#1311] initialize statics in each package To avoid dependency on the main app. This will allow moving code to workspace packages. --- Cargo.lock | 1 + packages/axum-http-tracker-server/Cargo.toml | 1 + packages/axum-http-tracker-server/src/server.rs | 13 +++++++++++-- src/servers/apis/server.rs | 12 +++++++++++- src/servers/udp/server/mod.rs | 12 +++++++++++- tests/servers/api/environment.rs | 13 +++++++++++-- tests/servers/http/environment.rs | 12 ++++++++++-- tests/servers/udp/environment.rs | 13 +++++++++++-- 8 files changed, 67 insertions(+), 10 deletions(-) diff --git a/Cargo.lock b/Cargo.lock index bb2688681..2c6127740 100644 --- a/Cargo.lock +++ b/Cargo.lock @@ -4373,6 +4373,7 @@ dependencies = [ "tokio", "torrust-axum-server", "torrust-server-lib", + "torrust-tracker-clock", "torrust-tracker-configuration", "torrust-tracker-primitives", "torrust-tracker-test-helpers", diff --git a/packages/axum-http-tracker-server/Cargo.toml b/packages/axum-http-tracker-server/Cargo.toml index ae038cb7b..98c807a92 100644 --- a/packages/axum-http-tracker-server/Cargo.toml +++ b/packages/axum-http-tracker-server/Cargo.toml @@ -38,4 +38,5 @@ tower-http = { version = "0", features = ["compression-full", "cors", "propagate tracing = "0" [dev-dependencies] +torrust-tracker-clock = { version = "3.0.0-develop", path = "../clock" } torrust-tracker-test-helpers = { version = "3.0.0-develop", path = "../test-helpers" } diff --git a/packages/axum-http-tracker-server/src/server.rs b/packages/axum-http-tracker-server/src/server.rs index a5cd3bb74..4cf5afc13 100644 --- a/packages/axum-http-tracker-server/src/server.rs +++ b/packages/axum-http-tracker-server/src/server.rs @@ -250,7 +250,7 @@ mod tests { use bittorrent_tracker_core::whitelist::repository::in_memory::InMemoryWhitelist; use torrust_axum_server::tsl::make_rust_tls; use torrust_server_lib::registar::Registar; - use torrust_tracker_configuration::Configuration; + use torrust_tracker_configuration::{logging, Configuration}; use torrust_tracker_test_helpers::configuration::ephemeral_public; use crate::server::{HttpServer, Launcher}; @@ -306,6 +306,15 @@ mod tests { } } + fn initialize_global_services(configuration: &Configuration) { + initialize_static(); + logging::setup(&configuration.logging); + } + + fn initialize_static() { + torrust_tracker_clock::initialize_static(); + } + #[tokio::test] async fn it_should_be_able_to_start_and_stop() { let configuration = Arc::new(ephemeral_public()); @@ -317,7 +326,7 @@ mod tests { let http_tracker_config = &http_trackers[0]; - //initialize_global_services(&cfg); // not needed for this test + initialize_global_services(&configuration); let http_tracker_container = Arc::new(initialize_container(&configuration)); diff --git a/src/servers/apis/server.rs b/src/servers/apis/server.rs index df78bf7dc..4c3484ded 100644 --- a/src/servers/apis/server.rs +++ b/src/servers/apis/server.rs @@ -297,11 +297,21 @@ mod tests { use torrust_axum_server::tsl::make_rust_tls; use torrust_server_lib::registar::Registar; use torrust_tracker_api_core::container::TrackerHttpApiCoreContainer; + use torrust_tracker_configuration::{logging, Configuration}; use torrust_tracker_test_helpers::configuration::ephemeral_public; - use crate::bootstrap::app::initialize_global_services; use crate::servers::apis::server::{ApiServer, Launcher}; + fn initialize_global_services(configuration: &Configuration) { + initialize_static(); + logging::setup(&configuration.logging); + } + + fn initialize_static() { + torrust_tracker_clock::initialize_static(); + bittorrent_udp_tracker_core::initialize_static(); + } + #[tokio::test] async fn it_should_be_able_to_start_and_stop() { let cfg = Arc::new(ephemeral_public()); diff --git a/src/servers/udp/server/mod.rs b/src/servers/udp/server/mod.rs index a328c45ce..1ab79b6fe 100644 --- a/src/servers/udp/server/mod.rs +++ b/src/servers/udp/server/mod.rs @@ -59,11 +59,21 @@ mod tests { use bittorrent_udp_tracker_core::container::UdpTrackerCoreContainer; use torrust_server_lib::registar::Registar; + use torrust_tracker_configuration::{logging, Configuration}; use torrust_tracker_test_helpers::configuration::ephemeral_public; use super::spawner::Spawner; use super::Server; - use crate::bootstrap::app::initialize_global_services; + + fn initialize_global_services(configuration: &Configuration) { + initialize_static(); + logging::setup(&configuration.logging); + } + + fn initialize_static() { + torrust_tracker_clock::initialize_static(); + bittorrent_udp_tracker_core::initialize_static(); + } #[tokio::test] async fn it_should_be_able_to_start_and_stop() { diff --git a/tests/servers/api/environment.rs b/tests/servers/api/environment.rs index 90c58c821..5534a99a9 100644 --- a/tests/servers/api/environment.rs +++ b/tests/servers/api/environment.rs @@ -10,8 +10,7 @@ use torrust_axum_server::tsl::make_rust_tls; use torrust_server_lib::registar::Registar; use torrust_tracker_api_client::connection_info::{ConnectionInfo, Origin}; use torrust_tracker_api_core::container::TrackerHttpApiCoreContainer; -use torrust_tracker_configuration::Configuration; -use torrust_tracker_lib::bootstrap::app::initialize_global_services; +use torrust_tracker_configuration::{logging, Configuration}; use torrust_tracker_lib::servers::apis::server::{ApiServer, Launcher, Running, Stopped}; use torrust_tracker_primitives::peer; @@ -162,3 +161,13 @@ impl EnvContainer { } } } + +fn initialize_global_services(configuration: &Configuration) { + initialize_static(); + logging::setup(&configuration.logging); +} + +fn initialize_static() { + torrust_tracker_clock::initialize_static(); + bittorrent_udp_tracker_core::initialize_static(); +} diff --git a/tests/servers/http/environment.rs b/tests/servers/http/environment.rs index e77cc38aa..f79d42b36 100644 --- a/tests/servers/http/environment.rs +++ b/tests/servers/http/environment.rs @@ -7,8 +7,7 @@ use futures::executor::block_on; use torrust_axum_http_tracker_server::server::{HttpServer, Launcher, Running, Stopped}; use torrust_axum_server::tsl::make_rust_tls; use torrust_server_lib::registar::Registar; -use torrust_tracker_configuration::Configuration; -use torrust_tracker_lib::bootstrap::app::initialize_global_services; +use torrust_tracker_configuration::{logging, Configuration}; use torrust_tracker_primitives::peer; pub struct Environment { @@ -106,3 +105,12 @@ impl EnvContainer { } } } + +fn initialize_global_services(configuration: &Configuration) { + initialize_static(); + logging::setup(&configuration.logging); +} + +fn initialize_static() { + torrust_tracker_clock::initialize_static(); +} diff --git a/tests/servers/udp/environment.rs b/tests/servers/udp/environment.rs index 8afaffc15..c53f7a723 100644 --- a/tests/servers/udp/environment.rs +++ b/tests/servers/udp/environment.rs @@ -5,8 +5,7 @@ use bittorrent_primitives::info_hash::InfoHash; use bittorrent_tracker_core::container::TrackerCoreContainer; use bittorrent_udp_tracker_core::container::UdpTrackerCoreContainer; use torrust_server_lib::registar::Registar; -use torrust_tracker_configuration::{Configuration, DEFAULT_TIMEOUT}; -use torrust_tracker_lib::bootstrap::app::initialize_global_services; +use torrust_tracker_configuration::{logging, Configuration, DEFAULT_TIMEOUT}; use torrust_tracker_lib::servers::udp::server::spawner::Spawner; use torrust_tracker_lib::servers::udp::server::states::{Running, Stopped}; use torrust_tracker_lib::servers::udp::server::Server; @@ -120,6 +119,16 @@ impl EnvContainer { } } +fn initialize_global_services(configuration: &Configuration) { + initialize_static(); + logging::setup(&configuration.logging); +} + +fn initialize_static() { + torrust_tracker_clock::initialize_static(); + bittorrent_udp_tracker_core::initialize_static(); +} + #[cfg(test)] mod tests { use std::time::Duration; From b071900a8bb2d67e0c9dc792d4725c10f1fe9cbf Mon Sep 17 00:00:00 2001 From: Jose Celano Date: Mon, 24 Feb 2025 11:59:09 +0000 Subject: [PATCH 320/802] docs: fix crate docs URL --- packages/axum-http-tracker-server/README.md | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/packages/axum-http-tracker-server/README.md b/packages/axum-http-tracker-server/README.md index b7286d157..b109a08c1 100644 --- a/packages/axum-http-tracker-server/README.md +++ b/packages/axum-http-tracker-server/README.md @@ -4,7 +4,7 @@ The Torrust Bittorrent HTTP tracker. ## Documentation -[Crate documentation](https://docs.rs/torrust-axum-server). +[Crate documentation](https://docs.rs/torrust-axum-http-tracker-server). ## License From ec6b968be3b0132a861250e0fa98b71d714287a2 Mon Sep 17 00:00:00 2001 From: Jose Celano Date: Mon, 24 Feb 2025 12:37:46 +0000 Subject: [PATCH 321/802] refactor: [#1284] extract udp-tracker-server package --- .github/workflows/deployment.yaml | 1 + Cargo.lock | 33 +- Cargo.toml | 4 +- packages/axum-http-tracker-server/src/lib.rs | 2 +- .../src/v1/handlers/announce.rs | 3 - .../src/v1/handlers/mod.rs | 3 - .../src/v1/handlers/scrape.rs | 3 - .../axum-http-tracker-server/src/v1/mod.rs | 3 - packages/tracker-core/src/lib.rs | 14 + packages/udp-tracker-server/Cargo.toml | 41 ++ packages/udp-tracker-server/LICENSE | 661 ++++++++++++++++++ packages/udp-tracker-server/README.md | 11 + .../udp-tracker-server/src}/error.rs | 0 .../src}/handlers/announce.rs | 31 +- .../src}/handlers/connect.rs | 7 +- .../udp-tracker-server/src}/handlers/error.rs | 2 +- .../udp-tracker-server/src}/handlers/mod.rs | 6 +- .../src}/handlers/scrape.rs | 29 +- .../udp-tracker-server/src/lib.rs | 18 +- .../src}/server/bound_socket.rs | 0 .../src}/server/launcher.rs | 6 +- .../udp-tracker-server/src}/server/mod.rs | 0 .../src}/server/processor.rs | 4 +- .../src}/server/receiver.rs | 2 +- .../src}/server/request_buffer.rs | 0 .../udp-tracker-server/src}/server/spawner.rs | 0 .../udp-tracker-server/src}/server/states.rs | 2 +- src/bootstrap/jobs/udp_tracker.rs | 5 +- src/lib.rs | 8 +- src/servers/mod.rs | 1 - src/shared/bit_torrent/common.rs | 13 - src/shared/bit_torrent/tracker/udp/mod.rs | 3 - tests/servers/health_check_api/contract.rs | 22 +- tests/servers/udp/contract.rs | 2 +- tests/servers/udp/environment.rs | 6 +- tests/servers/udp/mod.rs | 4 +- 36 files changed, 842 insertions(+), 108 deletions(-) create mode 100644 packages/udp-tracker-server/Cargo.toml create mode 100644 packages/udp-tracker-server/LICENSE create mode 100644 packages/udp-tracker-server/README.md rename {src/servers/udp => packages/udp-tracker-server/src}/error.rs (100%) rename {src/servers/udp => packages/udp-tracker-server/src}/handlers/announce.rs (96%) rename {src/servers/udp => packages/udp-tracker-server/src}/handlers/connect.rs (96%) rename {src/servers/udp => packages/udp-tracker-server/src}/handlers/error.rs (98%) rename {src/servers/udp => packages/udp-tracker-server/src}/handlers/mod.rs (98%) rename {src/servers/udp => packages/udp-tracker-server/src}/handlers/scrape.rs (93%) rename src/servers/udp/mod.rs => packages/udp-tracker-server/src/lib.rs (98%) rename {src/servers/udp => packages/udp-tracker-server/src}/server/bound_socket.rs (100%) rename {src/servers/udp => packages/udp-tracker-server/src}/server/launcher.rs (98%) rename {src/servers/udp => packages/udp-tracker-server/src}/server/mod.rs (100%) rename {src/servers/udp => packages/udp-tracker-server/src}/server/processor.rs (97%) rename {src/servers/udp => packages/udp-tracker-server/src}/server/receiver.rs (95%) rename {src/servers/udp => packages/udp-tracker-server/src}/server/request_buffer.rs (100%) rename {src/servers/udp => packages/udp-tracker-server/src}/server/spawner.rs (100%) rename {src/servers/udp => packages/udp-tracker-server/src}/server/states.rs (98%) diff --git a/.github/workflows/deployment.yaml b/.github/workflows/deployment.yaml index 5aca88ac4..259a97728 100644 --- a/.github/workflows/deployment.yaml +++ b/.github/workflows/deployment.yaml @@ -76,3 +76,4 @@ jobs: cargo publish -p torrust-tracker-primitives cargo publish -p torrust-tracker-test-helpers cargo publish -p torrust-tracker-torrent-repository + cargo publish -p torrust-udp-tracker-server diff --git a/Cargo.lock b/Cargo.lock index 2c6127740..22cdc002a 100644 --- a/Cargo.lock +++ b/Cargo.lock @@ -4433,7 +4433,6 @@ dependencies = [ "derive_more", "figment", "futures", - "futures-util", "hyper", "local-ip-address", "mockall", @@ -4445,7 +4444,6 @@ dependencies = [ "rand 0.9.0", "regex", "reqwest", - "ringbuf", "serde", "serde_bencode", "serde_bytes", @@ -4462,10 +4460,10 @@ dependencies = [ "torrust-tracker-api-core", "torrust-tracker-clock", "torrust-tracker-configuration", - "torrust-tracker-located-error", "torrust-tracker-primitives", "torrust-tracker-test-helpers", "torrust-tracker-torrent-repository", + "torrust-udp-tracker-server", "tower 0.5.2", "tower-http", "tracing", @@ -4614,6 +4612,35 @@ dependencies = [ "zerocopy 0.7.35", ] +[[package]] +name = "torrust-udp-tracker-server" +version = "3.0.0-develop" +dependencies = [ + "aquatic_udp_protocol", + "bittorrent-primitives", + "bittorrent-tracker-client", + "bittorrent-tracker-core", + "bittorrent-udp-tracker-core", + "derive_more", + "futures", + "futures-util", + "local-ip-address", + "mockall", + "ringbuf", + "thiserror 2.0.11", + "tokio", + "torrust-server-lib", + "torrust-tracker-clock", + "torrust-tracker-configuration", + "torrust-tracker-located-error", + "torrust-tracker-primitives", + "torrust-tracker-test-helpers", + "tracing", + "url", + "uuid", + "zerocopy 0.7.35", +] + [[package]] name = "tower" version = "0.4.13" diff --git a/Cargo.toml b/Cargo.toml index fbd81423d..d8c739440 100644 --- a/Cargo.toml +++ b/Cargo.toml @@ -50,7 +50,6 @@ dashmap = "6" derive_more = { version = "2", features = ["as_ref", "constructor", "from"] } figment = "0" futures = "0" -futures-util = "0" hyper = "1" parking_lot = "0" percent-encoding = "2" @@ -60,7 +59,6 @@ r2d2_sqlite = { version = "0", features = ["bundled"] } rand = "0" regex = "1" reqwest = { version = "0", features = ["json"] } -ringbuf = "0" serde = { version = "1", features = ["derive"] } serde_bencode = "0" serde_bytes = "0" @@ -76,9 +74,9 @@ torrust-server-lib = { version = "3.0.0-develop", path = "packages/server-lib" } torrust-tracker-api-core = { version = "3.0.0-develop", path = "packages/tracker-api-core" } torrust-tracker-clock = { version = "3.0.0-develop", path = "packages/clock" } torrust-tracker-configuration = { version = "3.0.0-develop", path = "packages/configuration" } -torrust-tracker-located-error = { version = "3.0.0-develop", path = "packages/located-error" } torrust-tracker-primitives = { version = "3.0.0-develop", path = "packages/primitives" } torrust-tracker-torrent-repository = { version = "3.0.0-develop", path = "packages/torrent-repository" } +torrust-udp-tracker-server = { version = "3.0.0-develop", path = "packages/udp-tracker-server" } tower = { version = "0", features = ["timeout"] } tower-http = { version = "0", features = ["compression-full", "cors", "propagate-header", "request-id", "trace"] } tracing = "0" diff --git a/packages/axum-http-tracker-server/src/lib.rs b/packages/axum-http-tracker-server/src/lib.rs index a8823b868..7f6bec892 100644 --- a/packages/axum-http-tracker-server/src/lib.rs +++ b/packages/axum-http-tracker-server/src/lib.rs @@ -238,7 +238,7 @@ //! `info_hash` parameters: `info_hash=%81%00%0...00%00%00&info_hash=%82%00%0...00%00%00` //! //! > **NOTICE**: the maximum number of torrents you can scrape at the same time -//! > is `74`. Defined with a hardcoded const [`MAX_SCRAPE_TORRENTS`](crate::shared::bit_torrent::common::MAX_SCRAPE_TORRENTS). +//! > is `74`. Defined with a hardcoded const [`MAX_SCRAPE_TORRENTS`](torrust_udp_tracker_server::MAX_SCRAPE_TORRENTS). //! //! **Sample response** //! diff --git a/packages/axum-http-tracker-server/src/v1/handlers/announce.rs b/packages/axum-http-tracker-server/src/v1/handlers/announce.rs index f8f551253..98b2d374c 100644 --- a/packages/axum-http-tracker-server/src/v1/handlers/announce.rs +++ b/packages/axum-http-tracker-server/src/v1/handlers/announce.rs @@ -1,8 +1,5 @@ //! Axum [`handlers`](axum#handlers) for the `announce` requests. //! -//! Refer to [HTTP server](crate::servers::http) for more information about the -//! `announce` request. -//! //! The handlers perform the authentication and authorization of the request, //! and resolve the client IP address. use std::sync::Arc; diff --git a/packages/axum-http-tracker-server/src/v1/handlers/mod.rs b/packages/axum-http-tracker-server/src/v1/handlers/mod.rs index f9305cf20..ce58e09b3 100644 --- a/packages/axum-http-tracker-server/src/v1/handlers/mod.rs +++ b/packages/axum-http-tracker-server/src/v1/handlers/mod.rs @@ -1,7 +1,4 @@ //! Axum [`handlers`](axum#handlers) for the HTTP server. -//! -//! Refer to the generic [HTTP server documentation](crate::servers::http) for -//! more information about the HTTP tracker. pub mod announce; pub mod common; pub mod health_check; diff --git a/packages/axum-http-tracker-server/src/v1/handlers/scrape.rs b/packages/axum-http-tracker-server/src/v1/handlers/scrape.rs index a4e20cc6f..59549128a 100644 --- a/packages/axum-http-tracker-server/src/v1/handlers/scrape.rs +++ b/packages/axum-http-tracker-server/src/v1/handlers/scrape.rs @@ -1,8 +1,5 @@ //! Axum [`handlers`](axum#handlers) for the `announce` requests. //! -//! Refer to [HTTP server](crate::servers::http) for more information about the -//! `scrape` request. -//! //! The handlers perform the authentication and authorization of the request, //! and resolve the client IP address. use std::sync::Arc; diff --git a/packages/axum-http-tracker-server/src/v1/mod.rs b/packages/axum-http-tracker-server/src/v1/mod.rs index 6e9530cb0..7b1b15138 100644 --- a/packages/axum-http-tracker-server/src/v1/mod.rs +++ b/packages/axum-http-tracker-server/src/v1/mod.rs @@ -1,7 +1,4 @@ //! HTTP server implementation for the `v1` API. -//! -//! Refer to the generic [HTTP server documentation](crate::servers::http) for -//! more information about the endpoints and their usage. pub mod extractors; pub mod handlers; pub mod routes; diff --git a/packages/tracker-core/src/lib.rs b/packages/tracker-core/src/lib.rs index 0107fb443..d9da9b9e7 100644 --- a/packages/tracker-core/src/lib.rs +++ b/packages/tracker-core/src/lib.rs @@ -131,6 +131,20 @@ pub mod peer_tests; pub mod test_helpers; use torrust_tracker_clock::clock; + +/// The maximum number of torrents that can be returned in an `scrape` response. +/// +/// The [BEP 15. UDP Tracker Protocol for `BitTorrent`](https://www.bittorrent.org/beps/bep_0015.html) +/// defines this limit: +/// +/// "Up to about 74 torrents can be scraped at once. A full scrape can't be done +/// with this protocol." +/// +/// The [BEP 48. Tracker Protocol Extension: Scrape](https://www.bittorrent.org/beps/bep_0048.html) +/// does not specifically mention this limit, but the limit is being used for +/// both the UDP and HTTP trackers since it's applied at the domain level. +pub const MAX_SCRAPE_TORRENTS: u8 = 74; + /// This code needs to be copied into each crate. /// Working version, for production. #[cfg(not(test))] diff --git a/packages/udp-tracker-server/Cargo.toml b/packages/udp-tracker-server/Cargo.toml new file mode 100644 index 000000000..7ebba677f --- /dev/null +++ b/packages/udp-tracker-server/Cargo.toml @@ -0,0 +1,41 @@ +[package] +authors.workspace = true +description = "The Torrust Bittorrent UDP tracker." +documentation.workspace = true +edition.workspace = true +homepage.workspace = true +keywords = ["axum", "bittorrent", "server", "torrust", "tracker", "udp"] +license.workspace = true +name = "torrust-udp-tracker-server" +publish.workspace = true +readme = "README.md" +repository.workspace = true +rust-version.workspace = true +version.workspace = true + +[dependencies] +aquatic_udp_protocol = "0" +bittorrent-primitives = "0.1.0" +bittorrent-tracker-client = { version = "3.0.0-develop", path = "../tracker-client" } +bittorrent-tracker-core = { version = "3.0.0-develop", path = "../tracker-core" } +bittorrent-udp-tracker-core = { version = "3.0.0-develop", path = "../udp-tracker-core" } +derive_more = { version = "2", features = ["as_ref", "constructor", "from"] } +futures = "0" +futures-util = "0" +ringbuf = "0" +thiserror = "2" +tokio = { version = "1", features = ["macros", "net", "rt-multi-thread", "signal", "sync"] } +torrust-server-lib = { version = "3.0.0-develop", path = "../server-lib" } +torrust-tracker-clock = { version = "3.0.0-develop", path = "../clock" } +torrust-tracker-configuration = { version = "3.0.0-develop", path = "../configuration" } +torrust-tracker-located-error = { version = "3.0.0-develop", path = "../located-error" } +torrust-tracker-primitives = { version = "3.0.0-develop", path = "../primitives" } +tracing = "0" +url = { version = "2", features = ["serde"] } +uuid = { version = "1", features = ["v4"] } +zerocopy = "0.7" + +[dev-dependencies] +local-ip-address = "0" +mockall = "0" +torrust-tracker-test-helpers = { version = "3.0.0-develop", path = "../test-helpers" } diff --git a/packages/udp-tracker-server/LICENSE b/packages/udp-tracker-server/LICENSE new file mode 100644 index 000000000..0ad25db4b --- /dev/null +++ b/packages/udp-tracker-server/LICENSE @@ -0,0 +1,661 @@ + GNU AFFERO GENERAL PUBLIC LICENSE + Version 3, 19 November 2007 + + Copyright (C) 2007 Free Software Foundation, Inc. + Everyone is permitted to copy and distribute verbatim copies + of this license document, but changing it is not allowed. + + Preamble + + The GNU Affero General Public License is a free, copyleft license for +software and other kinds of works, specifically designed to ensure +cooperation with the community in the case of network server software. + + The licenses for most software and other practical works are designed +to take away your freedom to share and change the works. By contrast, +our General Public Licenses are intended to guarantee your freedom to +share and change all versions of a program--to make sure it remains free +software for all its users. + + When we speak of free software, we are referring to freedom, not +price. Our General Public Licenses are designed to make sure that you +have the freedom to distribute copies of free software (and charge for +them if you wish), that you receive source code or can get it if you +want it, that you can change the software or use pieces of it in new +free programs, and that you know you can do these things. + + Developers that use our General Public Licenses protect your rights +with two steps: (1) assert copyright on the software, and (2) offer +you this License which gives you legal permission to copy, distribute +and/or modify the software. + + A secondary benefit of defending all users' freedom is that +improvements made in alternate versions of the program, if they +receive widespread use, become available for other developers to +incorporate. Many developers of free software are heartened and +encouraged by the resulting cooperation. However, in the case of +software used on network servers, this result may fail to come about. +The GNU General Public License permits making a modified version and +letting the public access it on a server without ever releasing its +source code to the public. + + The GNU Affero General Public License is designed specifically to +ensure that, in such cases, the modified source code becomes available +to the community. It requires the operator of a network server to +provide the source code of the modified version running there to the +users of that server. Therefore, public use of a modified version, on +a publicly accessible server, gives the public access to the source +code of the modified version. + + An older license, called the Affero General Public License and +published by Affero, was designed to accomplish similar goals. This is +a different license, not a version of the Affero GPL, but Affero has +released a new version of the Affero GPL which permits relicensing under +this license. + + The precise terms and conditions for copying, distribution and +modification follow. + + TERMS AND CONDITIONS + + 0. Definitions. + + "This License" refers to version 3 of the GNU Affero General Public License. + + "Copyright" also means copyright-like laws that apply to other kinds of +works, such as semiconductor masks. + + "The Program" refers to any copyrightable work licensed under this +License. Each licensee is addressed as "you". "Licensees" and +"recipients" may be individuals or organizations. + + To "modify" a work means to copy from or adapt all or part of the work +in a fashion requiring copyright permission, other than the making of an +exact copy. The resulting work is called a "modified version" of the +earlier work or a work "based on" the earlier work. + + A "covered work" means either the unmodified Program or a work based +on the Program. + + To "propagate" a work means to do anything with it that, without +permission, would make you directly or secondarily liable for +infringement under applicable copyright law, except executing it on a +computer or modifying a private copy. Propagation includes copying, +distribution (with or without modification), making available to the +public, and in some countries other activities as well. + + To "convey" a work means any kind of propagation that enables other +parties to make or receive copies. Mere interaction with a user through +a computer network, with no transfer of a copy, is not conveying. + + An interactive user interface displays "Appropriate Legal Notices" +to the extent that it includes a convenient and prominently visible +feature that (1) displays an appropriate copyright notice, and (2) +tells the user that there is no warranty for the work (except to the +extent that warranties are provided), that licensees may convey the +work under this License, and how to view a copy of this License. If +the interface presents a list of user commands or options, such as a +menu, a prominent item in the list meets this criterion. + + 1. Source Code. + + The "source code" for a work means the preferred form of the work +for making modifications to it. "Object code" means any non-source +form of a work. + + A "Standard Interface" means an interface that either is an official +standard defined by a recognized standards body, or, in the case of +interfaces specified for a particular programming language, one that +is widely used among developers working in that language. + + The "System Libraries" of an executable work include anything, other +than the work as a whole, that (a) is included in the normal form of +packaging a Major Component, but which is not part of that Major +Component, and (b) serves only to enable use of the work with that +Major Component, or to implement a Standard Interface for which an +implementation is available to the public in source code form. A +"Major Component", in this context, means a major essential component +(kernel, window system, and so on) of the specific operating system +(if any) on which the executable work runs, or a compiler used to +produce the work, or an object code interpreter used to run it. + + The "Corresponding Source" for a work in object code form means all +the source code needed to generate, install, and (for an executable +work) run the object code and to modify the work, including scripts to +control those activities. However, it does not include the work's +System Libraries, or general-purpose tools or generally available free +programs which are used unmodified in performing those activities but +which are not part of the work. For example, Corresponding Source +includes interface definition files associated with source files for +the work, and the source code for shared libraries and dynamically +linked subprograms that the work is specifically designed to require, +such as by intimate data communication or control flow between those +subprograms and other parts of the work. + + The Corresponding Source need not include anything that users +can regenerate automatically from other parts of the Corresponding +Source. + + The Corresponding Source for a work in source code form is that +same work. + + 2. Basic Permissions. + + All rights granted under this License are granted for the term of +copyright on the Program, and are irrevocable provided the stated +conditions are met. This License explicitly affirms your unlimited +permission to run the unmodified Program. The output from running a +covered work is covered by this License only if the output, given its +content, constitutes a covered work. This License acknowledges your +rights of fair use or other equivalent, as provided by copyright law. + + You may make, run and propagate covered works that you do not +convey, without conditions so long as your license otherwise remains +in force. You may convey covered works to others for the sole purpose +of having them make modifications exclusively for you, or provide you +with facilities for running those works, provided that you comply with +the terms of this License in conveying all material for which you do +not control copyright. Those thus making or running the covered works +for you must do so exclusively on your behalf, under your direction +and control, on terms that prohibit them from making any copies of +your copyrighted material outside their relationship with you. + + Conveying under any other circumstances is permitted solely under +the conditions stated below. Sublicensing is not allowed; section 10 +makes it unnecessary. + + 3. Protecting Users' Legal Rights From Anti-Circumvention Law. + + No covered work shall be deemed part of an effective technological +measure under any applicable law fulfilling obligations under article +11 of the WIPO copyright treaty adopted on 20 December 1996, or +similar laws prohibiting or restricting circumvention of such +measures. + + When you convey a covered work, you waive any legal power to forbid +circumvention of technological measures to the extent such circumvention +is effected by exercising rights under this License with respect to +the covered work, and you disclaim any intention to limit operation or +modification of the work as a means of enforcing, against the work's +users, your or third parties' legal rights to forbid circumvention of +technological measures. + + 4. Conveying Verbatim Copies. + + You may convey verbatim copies of the Program's source code as you +receive it, in any medium, provided that you conspicuously and +appropriately publish on each copy an appropriate copyright notice; +keep intact all notices stating that this License and any +non-permissive terms added in accord with section 7 apply to the code; +keep intact all notices of the absence of any warranty; and give all +recipients a copy of this License along with the Program. + + You may charge any price or no price for each copy that you convey, +and you may offer support or warranty protection for a fee. + + 5. Conveying Modified Source Versions. + + You may convey a work based on the Program, or the modifications to +produce it from the Program, in the form of source code under the +terms of section 4, provided that you also meet all of these conditions: + + a) The work must carry prominent notices stating that you modified + it, and giving a relevant date. + + b) The work must carry prominent notices stating that it is + released under this License and any conditions added under section + 7. This requirement modifies the requirement in section 4 to + "keep intact all notices". + + c) You must license the entire work, as a whole, under this + License to anyone who comes into possession of a copy. This + License will therefore apply, along with any applicable section 7 + additional terms, to the whole of the work, and all its parts, + regardless of how they are packaged. This License gives no + permission to license the work in any other way, but it does not + invalidate such permission if you have separately received it. + + d) If the work has interactive user interfaces, each must display + Appropriate Legal Notices; however, if the Program has interactive + interfaces that do not display Appropriate Legal Notices, your + work need not make them do so. + + A compilation of a covered work with other separate and independent +works, which are not by their nature extensions of the covered work, +and which are not combined with it such as to form a larger program, +in or on a volume of a storage or distribution medium, is called an +"aggregate" if the compilation and its resulting copyright are not +used to limit the access or legal rights of the compilation's users +beyond what the individual works permit. Inclusion of a covered work +in an aggregate does not cause this License to apply to the other +parts of the aggregate. + + 6. Conveying Non-Source Forms. + + You may convey a covered work in object code form under the terms +of sections 4 and 5, provided that you also convey the +machine-readable Corresponding Source under the terms of this License, +in one of these ways: + + a) Convey the object code in, or embodied in, a physical product + (including a physical distribution medium), accompanied by the + Corresponding Source fixed on a durable physical medium + customarily used for software interchange. + + b) Convey the object code in, or embodied in, a physical product + (including a physical distribution medium), accompanied by a + written offer, valid for at least three years and valid for as + long as you offer spare parts or customer support for that product + model, to give anyone who possesses the object code either (1) a + copy of the Corresponding Source for all the software in the + product that is covered by this License, on a durable physical + medium customarily used for software interchange, for a price no + more than your reasonable cost of physically performing this + conveying of source, or (2) access to copy the + Corresponding Source from a network server at no charge. + + c) Convey individual copies of the object code with a copy of the + written offer to provide the Corresponding Source. This + alternative is allowed only occasionally and noncommercially, and + only if you received the object code with such an offer, in accord + with subsection 6b. + + d) Convey the object code by offering access from a designated + place (gratis or for a charge), and offer equivalent access to the + Corresponding Source in the same way through the same place at no + further charge. You need not require recipients to copy the + Corresponding Source along with the object code. If the place to + copy the object code is a network server, the Corresponding Source + may be on a different server (operated by you or a third party) + that supports equivalent copying facilities, provided you maintain + clear directions next to the object code saying where to find the + Corresponding Source. Regardless of what server hosts the + Corresponding Source, you remain obligated to ensure that it is + available for as long as needed to satisfy these requirements. + + e) Convey the object code using peer-to-peer transmission, provided + you inform other peers where the object code and Corresponding + Source of the work are being offered to the general public at no + charge under subsection 6d. + + A separable portion of the object code, whose source code is excluded +from the Corresponding Source as a System Library, need not be +included in conveying the object code work. + + A "User Product" is either (1) a "consumer product", which means any +tangible personal property which is normally used for personal, family, +or household purposes, or (2) anything designed or sold for incorporation +into a dwelling. In determining whether a product is a consumer product, +doubtful cases shall be resolved in favor of coverage. For a particular +product received by a particular user, "normally used" refers to a +typical or common use of that class of product, regardless of the status +of the particular user or of the way in which the particular user +actually uses, or expects or is expected to use, the product. A product +is a consumer product regardless of whether the product has substantial +commercial, industrial or non-consumer uses, unless such uses represent +the only significant mode of use of the product. + + "Installation Information" for a User Product means any methods, +procedures, authorization keys, or other information required to install +and execute modified versions of a covered work in that User Product from +a modified version of its Corresponding Source. The information must +suffice to ensure that the continued functioning of the modified object +code is in no case prevented or interfered with solely because +modification has been made. + + If you convey an object code work under this section in, or with, or +specifically for use in, a User Product, and the conveying occurs as +part of a transaction in which the right of possession and use of the +User Product is transferred to the recipient in perpetuity or for a +fixed term (regardless of how the transaction is characterized), the +Corresponding Source conveyed under this section must be accompanied +by the Installation Information. But this requirement does not apply +if neither you nor any third party retains the ability to install +modified object code on the User Product (for example, the work has +been installed in ROM). + + The requirement to provide Installation Information does not include a +requirement to continue to provide support service, warranty, or updates +for a work that has been modified or installed by the recipient, or for +the User Product in which it has been modified or installed. Access to a +network may be denied when the modification itself materially and +adversely affects the operation of the network or violates the rules and +protocols for communication across the network. + + Corresponding Source conveyed, and Installation Information provided, +in accord with this section must be in a format that is publicly +documented (and with an implementation available to the public in +source code form), and must require no special password or key for +unpacking, reading or copying. + + 7. Additional Terms. + + "Additional permissions" are terms that supplement the terms of this +License by making exceptions from one or more of its conditions. +Additional permissions that are applicable to the entire Program shall +be treated as though they were included in this License, to the extent +that they are valid under applicable law. If additional permissions +apply only to part of the Program, that part may be used separately +under those permissions, but the entire Program remains governed by +this License without regard to the additional permissions. + + When you convey a copy of a covered work, you may at your option +remove any additional permissions from that copy, or from any part of +it. (Additional permissions may be written to require their own +removal in certain cases when you modify the work.) You may place +additional permissions on material, added by you to a covered work, +for which you have or can give appropriate copyright permission. + + Notwithstanding any other provision of this License, for material you +add to a covered work, you may (if authorized by the copyright holders of +that material) supplement the terms of this License with terms: + + a) Disclaiming warranty or limiting liability differently from the + terms of sections 15 and 16 of this License; or + + b) Requiring preservation of specified reasonable legal notices or + author attributions in that material or in the Appropriate Legal + Notices displayed by works containing it; or + + c) Prohibiting misrepresentation of the origin of that material, or + requiring that modified versions of such material be marked in + reasonable ways as different from the original version; or + + d) Limiting the use for publicity purposes of names of licensors or + authors of the material; or + + e) Declining to grant rights under trademark law for use of some + trade names, trademarks, or service marks; or + + f) Requiring indemnification of licensors and authors of that + material by anyone who conveys the material (or modified versions of + it) with contractual assumptions of liability to the recipient, for + any liability that these contractual assumptions directly impose on + those licensors and authors. + + All other non-permissive additional terms are considered "further +restrictions" within the meaning of section 10. If the Program as you +received it, or any part of it, contains a notice stating that it is +governed by this License along with a term that is a further +restriction, you may remove that term. If a license document contains +a further restriction but permits relicensing or conveying under this +License, you may add to a covered work material governed by the terms +of that license document, provided that the further restriction does +not survive such relicensing or conveying. + + If you add terms to a covered work in accord with this section, you +must place, in the relevant source files, a statement of the +additional terms that apply to those files, or a notice indicating +where to find the applicable terms. + + Additional terms, permissive or non-permissive, may be stated in the +form of a separately written license, or stated as exceptions; +the above requirements apply either way. + + 8. Termination. + + You may not propagate or modify a covered work except as expressly +provided under this License. Any attempt otherwise to propagate or +modify it is void, and will automatically terminate your rights under +this License (including any patent licenses granted under the third +paragraph of section 11). + + However, if you cease all violation of this License, then your +license from a particular copyright holder is reinstated (a) +provisionally, unless and until the copyright holder explicitly and +finally terminates your license, and (b) permanently, if the copyright +holder fails to notify you of the violation by some reasonable means +prior to 60 days after the cessation. + + Moreover, your license from a particular copyright holder is +reinstated permanently if the copyright holder notifies you of the +violation by some reasonable means, this is the first time you have +received notice of violation of this License (for any work) from that +copyright holder, and you cure the violation prior to 30 days after +your receipt of the notice. + + Termination of your rights under this section does not terminate the +licenses of parties who have received copies or rights from you under +this License. If your rights have been terminated and not permanently +reinstated, you do not qualify to receive new licenses for the same +material under section 10. + + 9. Acceptance Not Required for Having Copies. + + You are not required to accept this License in order to receive or +run a copy of the Program. Ancillary propagation of a covered work +occurring solely as a consequence of using peer-to-peer transmission +to receive a copy likewise does not require acceptance. However, +nothing other than this License grants you permission to propagate or +modify any covered work. These actions infringe copyright if you do +not accept this License. Therefore, by modifying or propagating a +covered work, you indicate your acceptance of this License to do so. + + 10. Automatic Licensing of Downstream Recipients. + + Each time you convey a covered work, the recipient automatically +receives a license from the original licensors, to run, modify and +propagate that work, subject to this License. You are not responsible +for enforcing compliance by third parties with this License. + + An "entity transaction" is a transaction transferring control of an +organization, or substantially all assets of one, or subdividing an +organization, or merging organizations. If propagation of a covered +work results from an entity transaction, each party to that +transaction who receives a copy of the work also receives whatever +licenses to the work the party's predecessor in interest had or could +give under the previous paragraph, plus a right to possession of the +Corresponding Source of the work from the predecessor in interest, if +the predecessor has it or can get it with reasonable efforts. + + You may not impose any further restrictions on the exercise of the +rights granted or affirmed under this License. For example, you may +not impose a license fee, royalty, or other charge for exercise of +rights granted under this License, and you may not initiate litigation +(including a cross-claim or counterclaim in a lawsuit) alleging that +any patent claim is infringed by making, using, selling, offering for +sale, or importing the Program or any portion of it. + + 11. Patents. + + A "contributor" is a copyright holder who authorizes use under this +License of the Program or a work on which the Program is based. The +work thus licensed is called the contributor's "contributor version". + + A contributor's "essential patent claims" are all patent claims +owned or controlled by the contributor, whether already acquired or +hereafter acquired, that would be infringed by some manner, permitted +by this License, of making, using, or selling its contributor version, +but do not include claims that would be infringed only as a +consequence of further modification of the contributor version. For +purposes of this definition, "control" includes the right to grant +patent sublicenses in a manner consistent with the requirements of +this License. + + Each contributor grants you a non-exclusive, worldwide, royalty-free +patent license under the contributor's essential patent claims, to +make, use, sell, offer for sale, import and otherwise run, modify and +propagate the contents of its contributor version. + + In the following three paragraphs, a "patent license" is any express +agreement or commitment, however denominated, not to enforce a patent +(such as an express permission to practice a patent or covenant not to +sue for patent infringement). To "grant" such a patent license to a +party means to make such an agreement or commitment not to enforce a +patent against the party. + + If you convey a covered work, knowingly relying on a patent license, +and the Corresponding Source of the work is not available for anyone +to copy, free of charge and under the terms of this License, through a +publicly available network server or other readily accessible means, +then you must either (1) cause the Corresponding Source to be so +available, or (2) arrange to deprive yourself of the benefit of the +patent license for this particular work, or (3) arrange, in a manner +consistent with the requirements of this License, to extend the patent +license to downstream recipients. "Knowingly relying" means you have +actual knowledge that, but for the patent license, your conveying the +covered work in a country, or your recipient's use of the covered work +in a country, would infringe one or more identifiable patents in that +country that you have reason to believe are valid. + + If, pursuant to or in connection with a single transaction or +arrangement, you convey, or propagate by procuring conveyance of, a +covered work, and grant a patent license to some of the parties +receiving the covered work authorizing them to use, propagate, modify +or convey a specific copy of the covered work, then the patent license +you grant is automatically extended to all recipients of the covered +work and works based on it. + + A patent license is "discriminatory" if it does not include within +the scope of its coverage, prohibits the exercise of, or is +conditioned on the non-exercise of one or more of the rights that are +specifically granted under this License. You may not convey a covered +work if you are a party to an arrangement with a third party that is +in the business of distributing software, under which you make payment +to the third party based on the extent of your activity of conveying +the work, and under which the third party grants, to any of the +parties who would receive the covered work from you, a discriminatory +patent license (a) in connection with copies of the covered work +conveyed by you (or copies made from those copies), or (b) primarily +for and in connection with specific products or compilations that +contain the covered work, unless you entered into that arrangement, +or that patent license was granted, prior to 28 March 2007. + + Nothing in this License shall be construed as excluding or limiting +any implied license or other defenses to infringement that may +otherwise be available to you under applicable patent law. + + 12. No Surrender of Others' Freedom. + + If conditions are imposed on you (whether by court order, agreement or +otherwise) that contradict the conditions of this License, they do not +excuse you from the conditions of this License. If you cannot convey a +covered work so as to satisfy simultaneously your obligations under this +License and any other pertinent obligations, then as a consequence you may +not convey it at all. For example, if you agree to terms that obligate you +to collect a royalty for further conveying from those to whom you convey +the Program, the only way you could satisfy both those terms and this +License would be to refrain entirely from conveying the Program. + + 13. Remote Network Interaction; Use with the GNU General Public License. + + Notwithstanding any other provision of this License, if you modify the +Program, your modified version must prominently offer all users +interacting with it remotely through a computer network (if your version +supports such interaction) an opportunity to receive the Corresponding +Source of your version by providing access to the Corresponding Source +from a network server at no charge, through some standard or customary +means of facilitating copying of software. This Corresponding Source +shall include the Corresponding Source for any work covered by version 3 +of the GNU General Public License that is incorporated pursuant to the +following paragraph. + + Notwithstanding any other provision of this License, you have +permission to link or combine any covered work with a work licensed +under version 3 of the GNU General Public License into a single +combined work, and to convey the resulting work. The terms of this +License will continue to apply to the part which is the covered work, +but the work with which it is combined will remain governed by version +3 of the GNU General Public License. + + 14. Revised Versions of this License. + + The Free Software Foundation may publish revised and/or new versions of +the GNU Affero General Public License from time to time. Such new versions +will be similar in spirit to the present version, but may differ in detail to +address new problems or concerns. + + Each version is given a distinguishing version number. If the +Program specifies that a certain numbered version of the GNU Affero General +Public License "or any later version" applies to it, you have the +option of following the terms and conditions either of that numbered +version or of any later version published by the Free Software +Foundation. If the Program does not specify a version number of the +GNU Affero General Public License, you may choose any version ever published +by the Free Software Foundation. + + If the Program specifies that a proxy can decide which future +versions of the GNU Affero General Public License can be used, that proxy's +public statement of acceptance of a version permanently authorizes you +to choose that version for the Program. + + Later license versions may give you additional or different +permissions. However, no additional obligations are imposed on any +author or copyright holder as a result of your choosing to follow a +later version. + + 15. Disclaimer of Warranty. + + THERE IS NO WARRANTY FOR THE PROGRAM, TO THE EXTENT PERMITTED BY +APPLICABLE LAW. EXCEPT WHEN OTHERWISE STATED IN WRITING THE COPYRIGHT +HOLDERS AND/OR OTHER PARTIES PROVIDE THE PROGRAM "AS IS" WITHOUT WARRANTY +OF ANY KIND, EITHER EXPRESSED OR IMPLIED, INCLUDING, BUT NOT LIMITED TO, +THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR +PURPOSE. THE ENTIRE RISK AS TO THE QUALITY AND PERFORMANCE OF THE PROGRAM +IS WITH YOU. SHOULD THE PROGRAM PROVE DEFECTIVE, YOU ASSUME THE COST OF +ALL NECESSARY SERVICING, REPAIR OR CORRECTION. + + 16. Limitation of Liability. + + IN NO EVENT UNLESS REQUIRED BY APPLICABLE LAW OR AGREED TO IN WRITING +WILL ANY COPYRIGHT HOLDER, OR ANY OTHER PARTY WHO MODIFIES AND/OR CONVEYS +THE PROGRAM AS PERMITTED ABOVE, BE LIABLE TO YOU FOR DAMAGES, INCLUDING ANY +GENERAL, SPECIAL, INCIDENTAL OR CONSEQUENTIAL DAMAGES ARISING OUT OF THE +USE OR INABILITY TO USE THE PROGRAM (INCLUDING BUT NOT LIMITED TO LOSS OF +DATA OR DATA BEING RENDERED INACCURATE OR LOSSES SUSTAINED BY YOU OR THIRD +PARTIES OR A FAILURE OF THE PROGRAM TO OPERATE WITH ANY OTHER PROGRAMS), +EVEN IF SUCH HOLDER OR OTHER PARTY HAS BEEN ADVISED OF THE POSSIBILITY OF +SUCH DAMAGES. + + 17. Interpretation of Sections 15 and 16. + + If the disclaimer of warranty and limitation of liability provided +above cannot be given local legal effect according to their terms, +reviewing courts shall apply local law that most closely approximates +an absolute waiver of all civil liability in connection with the +Program, unless a warranty or assumption of liability accompanies a +copy of the Program in return for a fee. + + END OF TERMS AND CONDITIONS + + How to Apply These Terms to Your New Programs + + If you develop a new program, and you want it to be of the greatest +possible use to the public, the best way to achieve this is to make it +free software which everyone can redistribute and change under these terms. + + To do so, attach the following notices to the program. It is safest +to attach them to the start of each source file to most effectively +state the exclusion of warranty; and each file should have at least +the "copyright" line and a pointer to where the full notice is found. + + + Copyright (C) + + This program is free software: you can redistribute it and/or modify + it under the terms of the GNU Affero General Public License as published + by the Free Software Foundation, either version 3 of the License, or + (at your option) any later version. + + This program is distributed in the hope that it will be useful, + but WITHOUT ANY WARRANTY; without even the implied warranty of + MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + GNU Affero General Public License for more details. + + You should have received a copy of the GNU Affero General Public License + along with this program. If not, see . + +Also add information on how to contact you by electronic and paper mail. + + If your software can interact with users remotely through a computer +network, you should also make sure that it provides a way for users to +get its source. For example, if your program is a web application, its +interface could display a "Source" link that leads users to an archive +of the code. There are many ways you could offer source, and different +solutions will be better for different programs; see section 13 for the +specific requirements. + + You should also get your employer (if you work as a programmer) or school, +if any, to sign a "copyright disclaimer" for the program, if necessary. +For more information on this, and how to apply and follow the GNU AGPL, see +. diff --git a/packages/udp-tracker-server/README.md b/packages/udp-tracker-server/README.md new file mode 100644 index 000000000..bdf147104 --- /dev/null +++ b/packages/udp-tracker-server/README.md @@ -0,0 +1,11 @@ +# Torrust UDP Tracker + +The Torrust Bittorrent UDP tracker. + +## Documentation + +[Crate documentation](https://docs.rs/torrust-udp-tracker-server). + +## License + +The project is licensed under the terms of the [GNU AFFERO GENERAL PUBLIC LICENSE](./LICENSE). diff --git a/src/servers/udp/error.rs b/packages/udp-tracker-server/src/error.rs similarity index 100% rename from src/servers/udp/error.rs rename to packages/udp-tracker-server/src/error.rs diff --git a/src/servers/udp/handlers/announce.rs b/packages/udp-tracker-server/src/handlers/announce.rs similarity index 96% rename from src/servers/udp/handlers/announce.rs rename to packages/udp-tracker-server/src/handlers/announce.rs index 66fc0ab42..7e3b8e7dd 100644 --- a/src/servers/udp/handlers/announce.rs +++ b/packages/udp-tracker-server/src/handlers/announce.rs @@ -16,10 +16,9 @@ use torrust_tracker_primitives::core::AnnounceData; use tracing::{instrument, Level}; use zerocopy::network_endian::I32; -use crate::servers::udp::error::Error; +use crate::error::Error; -/// It handles the `Announce` request. Refer to [`Announce`](crate::servers::udp#announce) -/// request for more information. +/// It handles the `Announce` request. /// /// # Errors /// @@ -130,7 +129,7 @@ mod tests { }; use bittorrent_udp_tracker_core::connection_cookie::make; - use crate::servers::udp::handlers::tests::{sample_ipv4_remote_addr_fingerprint, sample_issue_time}; + use crate::handlers::tests::{sample_ipv4_remote_addr_fingerprint, sample_issue_time}; struct AnnounceRequestBuilder { request: AnnounceRequest, @@ -210,9 +209,9 @@ mod tests { use mockall::predicate::eq; use torrust_tracker_configuration::Core; - use crate::servers::udp::handlers::announce::tests::announce_request::AnnounceRequestBuilder; - use crate::servers::udp::handlers::handle_announce; - use crate::servers::udp::handlers::tests::{ + use crate::handlers::announce::tests::announce_request::AnnounceRequestBuilder; + use crate::handlers::handle_announce; + use crate::handlers::tests::{ initialize_core_tracker_services_for_default_tracker_configuration, initialize_core_tracker_services_for_public_tracker, sample_cookie_valid_range, sample_ipv4_socket_address, sample_issue_time, MockUdpStatsEventSender, TorrentPeerBuilder, @@ -443,9 +442,9 @@ mod tests { use aquatic_udp_protocol::{InfoHash as AquaticInfoHash, PeerId as AquaticPeerId}; use bittorrent_udp_tracker_core::connection_cookie::{gen_remote_fingerprint, make}; - use crate::servers::udp::handlers::announce::tests::announce_request::AnnounceRequestBuilder; - use crate::servers::udp::handlers::handle_announce; - use crate::servers::udp::handlers::tests::{ + use crate::handlers::announce::tests::announce_request::AnnounceRequestBuilder; + use crate::handlers::handle_announce; + use crate::handlers::tests::{ initialize_core_tracker_services_for_public_tracker, sample_cookie_valid_range, sample_issue_time, TorrentPeerBuilder, }; @@ -517,9 +516,9 @@ mod tests { use mockall::predicate::eq; use torrust_tracker_configuration::Core; - use crate::servers::udp::handlers::announce::tests::announce_request::AnnounceRequestBuilder; - use crate::servers::udp::handlers::handle_announce; - use crate::servers::udp::handlers::tests::{ + use crate::handlers::announce::tests::announce_request::AnnounceRequestBuilder; + use crate::handlers::handle_announce; + use crate::handlers::tests::{ initialize_core_tracker_services_for_default_tracker_configuration, initialize_core_tracker_services_for_public_tracker, sample_cookie_valid_range, sample_ipv6_remote_addr, sample_issue_time, MockUdpStatsEventSender, TorrentPeerBuilder, @@ -772,9 +771,9 @@ mod tests { use bittorrent_udp_tracker_core::{self, statistics}; use mockall::predicate::eq; - use crate::servers::udp::handlers::announce::tests::announce_request::AnnounceRequestBuilder; - use crate::servers::udp::handlers::handle_announce; - use crate::servers::udp::handlers::tests::{ + use crate::handlers::announce::tests::announce_request::AnnounceRequestBuilder; + use crate::handlers::handle_announce; + use crate::handlers::tests::{ sample_cookie_valid_range, sample_issue_time, MockUdpStatsEventSender, TrackerConfigurationBuilder, }; diff --git a/src/servers/udp/handlers/connect.rs b/packages/udp-tracker-server/src/handlers/connect.rs similarity index 96% rename from src/servers/udp/handlers/connect.rs rename to packages/udp-tracker-server/src/handlers/connect.rs index b9209a115..d1c3a05d8 100644 --- a/src/servers/udp/handlers/connect.rs +++ b/packages/udp-tracker-server/src/handlers/connect.rs @@ -6,8 +6,7 @@ use aquatic_udp_protocol::{ConnectRequest, ConnectResponse, ConnectionId, Respon use bittorrent_udp_tracker_core::{services, statistics}; use tracing::{instrument, Level}; -/// It handles the `Connect` request. Refer to [`Connect`](crate::servers::udp#connect) -/// request for more information. +/// It handles the `Connect` request. #[instrument(fields(transaction_id), skip(opt_udp_stats_event_sender), ret(level = Level::TRACE))] pub async fn handle_connect( remote_addr: SocketAddr, @@ -45,8 +44,8 @@ mod tests { use bittorrent_udp_tracker_core::statistics; use mockall::predicate::eq; - use crate::servers::udp::handlers::handle_connect; - use crate::servers::udp::handlers::tests::{ + use crate::handlers::handle_connect; + use crate::handlers::tests::{ sample_ipv4_remote_addr, sample_ipv4_remote_addr_fingerprint, sample_ipv4_socket_address, sample_ipv6_remote_addr, sample_ipv6_remote_addr_fingerprint, sample_issue_time, MockUdpStatsEventSender, }; diff --git a/src/servers/udp/handlers/error.rs b/packages/udp-tracker-server/src/handlers/error.rs similarity index 98% rename from src/servers/udp/handlers/error.rs rename to packages/udp-tracker-server/src/handlers/error.rs index 443f36cc0..4f2457126 100644 --- a/src/servers/udp/handlers/error.rs +++ b/packages/udp-tracker-server/src/handlers/error.rs @@ -10,7 +10,7 @@ use tracing::{instrument, Level}; use uuid::Uuid; use zerocopy::network_endian::I32; -use crate::servers::udp::error::Error; +use crate::error::Error; #[allow(clippy::too_many_arguments)] #[instrument(fields(transaction_id), skip(opt_udp_stats_event_sender), ret(level = Level::TRACE))] diff --git a/src/servers/udp/handlers/mod.rs b/packages/udp-tracker-server/src/handlers/mod.rs similarity index 98% rename from src/servers/udp/handlers/mod.rs rename to packages/udp-tracker-server/src/handlers/mod.rs index bc876bced..5d7fdb3b3 100644 --- a/src/servers/udp/handlers/mod.rs +++ b/packages/udp-tracker-server/src/handlers/mod.rs @@ -11,18 +11,18 @@ use std::time::Instant; use announce::handle_announce; use aquatic_udp_protocol::{Request, Response, TransactionId}; +use bittorrent_tracker_core::MAX_SCRAPE_TORRENTS; use bittorrent_udp_tracker_core::container::UdpTrackerCoreContainer; use bittorrent_udp_tracker_core::services::announce::UdpAnnounceError; use connect::handle_connect; use error::handle_error; use scrape::handle_scrape; -use torrust_tracker_clock::clock::Time as _; +use torrust_tracker_clock::clock::Time; use tracing::{instrument, Level}; use uuid::Uuid; use super::RawRequest; -use crate::servers::udp::error::Error; -use crate::shared::bit_torrent::common::MAX_SCRAPE_TORRENTS; +use crate::error::Error; use crate::CurrentClock; #[derive(Debug, Clone, PartialEq)] diff --git a/src/servers/udp/handlers/scrape.rs b/packages/udp-tracker-server/src/handlers/scrape.rs similarity index 93% rename from src/servers/udp/handlers/scrape.rs rename to packages/udp-tracker-server/src/handlers/scrape.rs index aa7287951..de98b5f6d 100644 --- a/src/servers/udp/handlers/scrape.rs +++ b/packages/udp-tracker-server/src/handlers/scrape.rs @@ -13,10 +13,9 @@ use torrust_tracker_primitives::core::ScrapeData; use tracing::{instrument, Level}; use zerocopy::network_endian::I32; -use crate::servers::udp::error::Error; +use crate::error::Error; -/// It handles the `Scrape` request. Refer to [`Scrape`](crate::servers::udp#scrape) -/// request for more information. +/// It handles the `Scrape` request. /// /// # Errors /// @@ -89,8 +88,8 @@ mod tests { use bittorrent_tracker_core::torrent::repository::in_memory::InMemoryTorrentRepository; use bittorrent_udp_tracker_core::connection_cookie::{gen_remote_fingerprint, make}; - use crate::servers::udp::handlers::handle_scrape; - use crate::servers::udp::handlers::tests::{ + use crate::handlers::handle_scrape; + use crate::handlers::tests::{ initialize_core_tracker_services_for_public_tracker, sample_cookie_valid_range, sample_ipv4_remote_addr, sample_issue_time, TorrentPeerBuilder, }; @@ -200,10 +199,8 @@ mod tests { mod with_a_public_tracker { use aquatic_udp_protocol::{NumberOfDownloads, NumberOfPeers, TorrentScrapeStatistics}; - use crate::servers::udp::handlers::scrape::tests::scrape_request::{ - add_a_sample_seeder_and_scrape, match_scrape_response, - }; - use crate::servers::udp::handlers::tests::initialize_core_tracker_services_for_public_tracker; + use crate::handlers::scrape::tests::scrape_request::{add_a_sample_seeder_and_scrape, match_scrape_response}; + use crate::handlers::tests::initialize_core_tracker_services_for_public_tracker; #[tokio::test] async fn should_return_torrent_statistics_when_the_tracker_has_the_requested_torrent() { @@ -230,11 +227,11 @@ mod tests { mod with_a_whitelisted_tracker { use aquatic_udp_protocol::{InfoHash, NumberOfDownloads, NumberOfPeers, TorrentScrapeStatistics}; - use crate::servers::udp::handlers::handle_scrape; - use crate::servers::udp::handlers::scrape::tests::scrape_request::{ + use crate::handlers::handle_scrape; + use crate::handlers::scrape::tests::scrape_request::{ add_a_seeder, build_scrape_request, match_scrape_response, zeroed_torrent_statistics, }; - use crate::servers::udp::handlers::tests::{ + use crate::handlers::tests::{ initialize_core_tracker_services_for_listed_tracker, sample_cookie_valid_range, sample_ipv4_remote_addr, }; @@ -332,8 +329,8 @@ mod tests { use mockall::predicate::eq; use super::sample_scrape_request; - use crate::servers::udp::handlers::handle_scrape; - use crate::servers::udp::handlers::tests::{ + use crate::handlers::handle_scrape; + use crate::handlers::tests::{ initialize_core_tracker_services_for_default_tracker_configuration, sample_cookie_valid_range, sample_ipv4_remote_addr, MockUdpStatsEventSender, }; @@ -374,8 +371,8 @@ mod tests { use mockall::predicate::eq; use super::sample_scrape_request; - use crate::servers::udp::handlers::handle_scrape; - use crate::servers::udp::handlers::tests::{ + use crate::handlers::handle_scrape; + use crate::handlers::tests::{ initialize_core_tracker_services_for_default_tracker_configuration, sample_cookie_valid_range, sample_ipv6_remote_addr, MockUdpStatsEventSender, }; diff --git a/src/servers/udp/mod.rs b/packages/udp-tracker-server/src/lib.rs similarity index 98% rename from src/servers/udp/mod.rs rename to packages/udp-tracker-server/src/lib.rs index 1fcd49725..a07f2e665 100644 --- a/src/servers/udp/mod.rs +++ b/packages/udp-tracker-server/src/lib.rs @@ -475,7 +475,7 @@ //! //! > **NOTICE**: up to about 74 torrents can be scraped at once. A full scrape //! > can't be done with this protocol. This is a limitation of the UDP protocol. -//! > Defined with a hardcoded const [`MAX_SCRAPE_TORRENTS`](crate::shared::bit_torrent::common::MAX_SCRAPE_TORRENTS). +//! > Defined with a hardcoded const [`MAX_SCRAPE_TORRENTS`](torrust_udp_tracker_server::MAX_SCRAPE_TORRENTS). //! > Refer to [issue 262](https://github.com/torrust/torrust-tracker/issues/262) //! > for more information about this limitation. //! @@ -637,10 +637,26 @@ use std::net::SocketAddr; +use torrust_tracker_clock::clock; + pub mod error; pub mod handlers; pub mod server; +/// The maximum number of bytes in a UDP packet. +pub const MAX_PACKET_SIZE: usize = 1496; + +/// This code needs to be copied into each crate. +/// Working version, for production. +#[cfg(not(test))] +#[allow(dead_code)] +pub(crate) type CurrentClock = clock::Working; + +/// Stopped version, for testing. +#[cfg(test)] +#[allow(dead_code)] +pub(crate) type CurrentClock = clock::Stopped; + /// Number of bytes. pub type Bytes = u64; /// The port the peer is listening on. diff --git a/src/servers/udp/server/bound_socket.rs b/packages/udp-tracker-server/src/server/bound_socket.rs similarity index 100% rename from src/servers/udp/server/bound_socket.rs rename to packages/udp-tracker-server/src/server/bound_socket.rs diff --git a/src/servers/udp/server/launcher.rs b/packages/udp-tracker-server/src/server/launcher.rs similarity index 98% rename from src/servers/udp/server/launcher.rs rename to packages/udp-tracker-server/src/server/launcher.rs index d66ad8d37..12d9c740c 100644 --- a/src/servers/udp/server/launcher.rs +++ b/packages/udp-tracker-server/src/server/launcher.rs @@ -16,9 +16,9 @@ use torrust_server_lib::signals::{shutdown_signal_with_message, Halted, Started} use tracing::instrument; use super::request_buffer::ActiveRequests; -use crate::servers::udp::server::bound_socket::BoundSocket; -use crate::servers::udp::server::processor::Processor; -use crate::servers::udp::server::receiver::Receiver; +use crate::server::bound_socket::BoundSocket; +use crate::server::processor::Processor; +use crate::server::receiver::Receiver; const IP_BANS_RESET_INTERVAL_IN_SECS: u64 = 3600; diff --git a/src/servers/udp/server/mod.rs b/packages/udp-tracker-server/src/server/mod.rs similarity index 100% rename from src/servers/udp/server/mod.rs rename to packages/udp-tracker-server/src/server/mod.rs diff --git a/src/servers/udp/server/processor.rs b/packages/udp-tracker-server/src/server/processor.rs similarity index 97% rename from src/servers/udp/server/processor.rs rename to packages/udp-tracker-server/src/server/processor.rs index 157f3ecfe..a933fdd17 100644 --- a/src/servers/udp/server/processor.rs +++ b/packages/udp-tracker-server/src/server/processor.rs @@ -10,8 +10,8 @@ use tokio::time::Instant; use tracing::{instrument, Level}; use super::bound_socket::BoundSocket; -use crate::servers::udp::handlers::CookieTimeValues; -use crate::servers::udp::{handlers, RawRequest}; +use crate::handlers::CookieTimeValues; +use crate::{handlers, RawRequest}; pub struct Processor { socket: Arc, diff --git a/src/servers/udp/server/receiver.rs b/packages/udp-tracker-server/src/server/receiver.rs similarity index 95% rename from src/servers/udp/server/receiver.rs rename to packages/udp-tracker-server/src/server/receiver.rs index 0176930a4..89fbed081 100644 --- a/src/servers/udp/server/receiver.rs +++ b/packages/udp-tracker-server/src/server/receiver.rs @@ -8,7 +8,7 @@ use futures::Stream; use super::bound_socket::BoundSocket; use super::RawRequest; -use crate::shared::bit_torrent::tracker::udp::MAX_PACKET_SIZE; +use crate::MAX_PACKET_SIZE; pub struct Receiver { pub socket: Arc, diff --git a/src/servers/udp/server/request_buffer.rs b/packages/udp-tracker-server/src/server/request_buffer.rs similarity index 100% rename from src/servers/udp/server/request_buffer.rs rename to packages/udp-tracker-server/src/server/request_buffer.rs diff --git a/src/servers/udp/server/spawner.rs b/packages/udp-tracker-server/src/server/spawner.rs similarity index 100% rename from src/servers/udp/server/spawner.rs rename to packages/udp-tracker-server/src/server/spawner.rs diff --git a/src/servers/udp/server/states.rs b/packages/udp-tracker-server/src/server/states.rs similarity index 98% rename from src/servers/udp/server/states.rs rename to packages/udp-tracker-server/src/server/states.rs index 3501aebf1..fc700ea40 100644 --- a/src/servers/udp/server/states.rs +++ b/packages/udp-tracker-server/src/server/states.rs @@ -14,7 +14,7 @@ use tracing::{instrument, Level}; use super::spawner::Spawner; use super::{Server, UdpError}; -use crate::servers::udp::server::launcher::Launcher; +use crate::server::launcher::Launcher; /// A UDP server instance controller with no UDP instance running. #[allow(clippy::module_name_repetitions)] diff --git a/src/bootstrap/jobs/udp_tracker.rs b/src/bootstrap/jobs/udp_tracker.rs index 89b2a38be..0276de1d3 100644 --- a/src/bootstrap/jobs/udp_tracker.rs +++ b/src/bootstrap/jobs/udp_tracker.rs @@ -12,11 +12,10 @@ use bittorrent_udp_tracker_core::container::UdpTrackerCoreContainer; use bittorrent_udp_tracker_core::UDP_TRACKER_LOG_TARGET; use tokio::task::JoinHandle; use torrust_server_lib::registar::ServiceRegistrationForm; +use torrust_udp_tracker_server::server::spawner::Spawner; +use torrust_udp_tracker_server::server::Server; use tracing::instrument; -use crate::servers::udp::server::spawner::Spawner; -use crate::servers::udp::server::Server; - /// It starts a new UDP server with the provided configuration. /// /// It spawns a new asynchronous task for the new UDP server. diff --git a/src/lib.rs b/src/lib.rs index 4f552ab34..fa18fd7c8 100644 --- a/src/lib.rs +++ b/src/lib.rs @@ -56,7 +56,7 @@ //! From the end-user perspective the Torrust Tracker exposes three different services. //! //! - A REST [`API`](crate::servers::apis) -//! - One or more [`UDP`](crate::servers::udp) trackers +//! - One or more [`UDP`](torrust_udp_tracker_server) trackers //! - One or more [`HTTP`](torrust_axum_http_tracker_server) trackers //! //! # Installation @@ -395,7 +395,7 @@ //! bind_address = "0.0.0.0:6969" //! ``` //! -//! Refer to the [`UDP`](crate::servers::udp) documentation for more information about the [`UDP`](crate::servers::udp) tracker. +//! Refer to the [`UDP`](torrust_udp_tracker_server) documentation for more information about the [`UDP`](torrust_udp_tracker_server) tracker. //! //! If you want to know more about the UDP tracker protocol: //! @@ -407,7 +407,7 @@ //! //! - The core tracker [`core`] //! - The tracker REST [`API`](crate::servers::apis) -//! - The [`UDP`](crate::servers::udp) tracker +//! - The [`UDP`](torrust_udp_tracker_server) tracker //! - The [`HTTP`](torrust_axum_http_tracker_server) tracker //! //! ![Torrust Tracker Components](https://raw.githubusercontent.com/torrust/torrust-tracker/main/docs/media/torrust-tracker-components.png) @@ -446,7 +446,7 @@ //! - [Wikipedia: UDP tracker](https://en.wikipedia.org/wiki/UDP_tracker) //! - [BEP 15: UDP Tracker Protocol for `BitTorrent`](https://www.bittorrent.org/beps/bep_0015.html) //! -//! See [`UDP`](crate::servers::udp) for more details on the UDP tracker. +//! See [`UDP`](torrust_udp_tracker_server) for more details on the UDP tracker. //! //! ## HTTP tracker //! diff --git a/src/servers/mod.rs b/src/servers/mod.rs index 8dea8a10d..de756162d 100644 --- a/src/servers/mod.rs +++ b/src/servers/mod.rs @@ -1,3 +1,2 @@ //! Servers. Services that can be started and stopped. pub mod apis; -pub mod udp; diff --git a/src/shared/bit_torrent/common.rs b/src/shared/bit_torrent/common.rs index 0364071c6..c954655e2 100644 --- a/src/shared/bit_torrent/common.rs +++ b/src/shared/bit_torrent/common.rs @@ -1,16 +1,3 @@ //! `BitTorrent` protocol primitive types //! //! [BEP 3. The `BitTorrent` Protocol Specification](https://www.bittorrent.org/beps/bep_0003.html) - -/// The maximum number of torrents that can be returned in an `scrape` response. -/// -/// The [BEP 15. UDP Tracker Protocol for `BitTorrent`](https://www.bittorrent.org/beps/bep_0015.html) -/// defines this limit: -/// -/// "Up to about 74 torrents can be scraped at once. A full scrape can't be done -/// with this protocol." -/// -/// The [BEP 48. Tracker Protocol Extension: Scrape](https://www.bittorrent.org/beps/bep_0048.html) -/// does not specifically mention this limit, but the limit is being used for -/// both the UDP and HTTP trackers since it's applied at the domain level. -pub const MAX_SCRAPE_TORRENTS: u8 = 74; diff --git a/src/shared/bit_torrent/tracker/udp/mod.rs b/src/shared/bit_torrent/tracker/udp/mod.rs index 1ceb8a08b..eb38d99fd 100644 --- a/src/shared/bit_torrent/tracker/udp/mod.rs +++ b/src/shared/bit_torrent/tracker/udp/mod.rs @@ -1,6 +1,3 @@ -/// The maximum number of bytes in a UDP packet. -pub const MAX_PACKET_SIZE: usize = 1496; - /// A magic 64-bit integer constant defined in the protocol that is used to /// identify the protocol. pub const PROTOCOL_ID: i64 = 0x0417_2710_1980; diff --git a/tests/servers/health_check_api/contract.rs b/tests/servers/health_check_api/contract.rs index bde3e9f5d..553b68902 100644 --- a/tests/servers/health_check_api/contract.rs +++ b/tests/servers/health_check_api/contract.rs @@ -14,7 +14,7 @@ async fn health_check_endpoint_should_return_status_ok_when_there_is_no_services let env = Started::new(&configuration.health_check_api.into(), Registar::default()).await; - let response = get(&format!("http://{}/health_check", env.state.binding)).await; + let response = get(&format!("http://{}/health_check", env.state.binding)).await; // DevSkim: ignore DS137138 assert_eq!(response.status(), 200); assert_eq!(response.headers().get("content-type").unwrap(), "application/json"); @@ -54,7 +54,7 @@ mod api { let config = configuration.health_check_api.clone(); let env = Started::new(&config.into(), registar).await; - let response = get(&format!("http://{}/health_check", env.state.binding)).await; + let response = get(&format!("http://{}/health_check", env.state.binding)).await; // DevSkim: ignore DS137138 assert_eq!(response.status(), 200); assert_eq!(response.headers().get("content-type").unwrap(), "application/json"); @@ -76,7 +76,7 @@ mod api { assert_eq!( details.info, format!( - "checking api health check at: http://{}/api/health_check", + "checking api health check at: http://{}/api/health_check", // DevSkim: ignore DS137138 service.bind_address() ) ); @@ -105,7 +105,7 @@ mod api { let config = configuration.health_check_api.clone(); let env = Started::new(&config.into(), registar).await; - let response = get(&format!("http://{}/health_check", env.state.binding)).await; + let response = get(&format!("http://{}/health_check", env.state.binding)).await; // DevSkim: ignore DS137138 assert_eq!(response.status(), 200); assert_eq!(response.headers().get("content-type").unwrap(), "application/json"); @@ -131,7 +131,7 @@ mod api { ); assert_eq!( details.info, - format!("checking api health check at: http://{binding}/api/health_check") + format!("checking api health check at: http://{binding}/api/health_check") // DevSkim: ignore DS137138 ); env.stop().await.expect("it should stop the service"); @@ -164,7 +164,7 @@ mod http { let config = configuration.health_check_api.clone(); let env = Started::new(&config.into(), registar).await; - let response = get(&format!("http://{}/health_check", env.state.binding)).await; + let response = get(&format!("http://{}/health_check", env.state.binding)).await; // DevSkim: ignore DS137138 assert_eq!(response.status(), 200); assert_eq!(response.headers().get("content-type").unwrap(), "application/json"); @@ -185,7 +185,7 @@ mod http { assert_eq!( details.info, format!( - "checking http tracker health check at: http://{}/health_check", + "checking http tracker health check at: http://{}/health_check", // DevSkim: ignore DS137138 service.bind_address() ) ); @@ -214,7 +214,7 @@ mod http { let config = configuration.health_check_api.clone(); let env = Started::new(&config.into(), registar).await; - let response = get(&format!("http://{}/health_check", env.state.binding)).await; + let response = get(&format!("http://{}/health_check", env.state.binding)).await; // DevSkim: ignore DS137138 assert_eq!(response.status(), 200); assert_eq!(response.headers().get("content-type").unwrap(), "application/json"); @@ -240,7 +240,7 @@ mod http { ); assert_eq!( details.info, - format!("checking http tracker health check at: http://{binding}/health_check") + format!("checking http tracker health check at: http://{binding}/health_check") // DevSkim: ignore DS137138 ); env.stop().await.expect("it should stop the service"); @@ -273,7 +273,7 @@ mod udp { let config = configuration.health_check_api.clone(); let env = Started::new(&config.into(), registar).await; - let response = get(&format!("http://{}/health_check", env.state.binding)).await; + let response = get(&format!("http://{}/health_check", env.state.binding)).await; // DevSkim: ignore DS137138 assert_eq!(response.status(), 200); assert_eq!(response.headers().get("content-type").unwrap(), "application/json"); @@ -320,7 +320,7 @@ mod udp { let config = configuration.health_check_api.clone(); let env = Started::new(&config.into(), registar).await; - let response = get(&format!("http://{}/health_check", env.state.binding)).await; + let response = get(&format!("http://{}/health_check", env.state.binding)).await; // DevSkim: ignore DS137138 assert_eq!(response.status(), 200); assert_eq!(response.headers().get("content-type").unwrap(), "application/json"); diff --git a/tests/servers/udp/contract.rs b/tests/servers/udp/contract.rs index f0f647443..78a511bd4 100644 --- a/tests/servers/udp/contract.rs +++ b/tests/servers/udp/contract.rs @@ -8,8 +8,8 @@ use core::panic; use aquatic_udp_protocol::{ConnectRequest, ConnectionId, Response, TransactionId}; use bittorrent_tracker_client::udp::client::UdpTrackerClient; use torrust_tracker_configuration::DEFAULT_TIMEOUT; -use torrust_tracker_lib::shared::bit_torrent::tracker::udp::MAX_PACKET_SIZE; use torrust_tracker_test_helpers::configuration; +use torrust_udp_tracker_server::MAX_PACKET_SIZE; use crate::common::logging; use crate::servers::udp::asserts::get_error_response_message; diff --git a/tests/servers/udp/environment.rs b/tests/servers/udp/environment.rs index c53f7a723..7d91fe535 100644 --- a/tests/servers/udp/environment.rs +++ b/tests/servers/udp/environment.rs @@ -6,10 +6,10 @@ use bittorrent_tracker_core::container::TrackerCoreContainer; use bittorrent_udp_tracker_core::container::UdpTrackerCoreContainer; use torrust_server_lib::registar::Registar; use torrust_tracker_configuration::{logging, Configuration, DEFAULT_TIMEOUT}; -use torrust_tracker_lib::servers::udp::server::spawner::Spawner; -use torrust_tracker_lib::servers::udp::server::states::{Running, Stopped}; -use torrust_tracker_lib::servers::udp::server::Server; use torrust_tracker_primitives::peer; +use torrust_udp_tracker_server::server::spawner::Spawner; +use torrust_udp_tracker_server::server::states::{Running, Stopped}; +use torrust_udp_tracker_server::server::Server; pub struct Environment where diff --git a/tests/servers/udp/mod.rs b/tests/servers/udp/mod.rs index 4a89b667a..c52115081 100644 --- a/tests/servers/udp/mod.rs +++ b/tests/servers/udp/mod.rs @@ -1,7 +1,7 @@ -use torrust_tracker_lib::servers::udp::server::states::Running; - pub mod asserts; pub mod contract; pub mod environment; +use torrust_udp_tracker_server::server::states::Running; + pub type Started = environment::Environment; From 35c686574973dca96b51c6cd545410395b3c2a8e Mon Sep 17 00:00:00 2001 From: Jose Celano Date: Mon, 24 Feb 2025 12:45:29 +0000 Subject: [PATCH 322/802] refactor: remove unused code --- src/lib.rs | 1 - src/shared/bit_torrent/common.rs | 3 - src/shared/bit_torrent/mod.rs | 71 ----------------------- src/shared/bit_torrent/tracker/mod.rs | 1 - src/shared/bit_torrent/tracker/udp/mod.rs | 3 - src/shared/mod.rs | 4 -- 6 files changed, 83 deletions(-) delete mode 100644 src/shared/bit_torrent/common.rs delete mode 100644 src/shared/bit_torrent/mod.rs delete mode 100644 src/shared/bit_torrent/tracker/mod.rs delete mode 100644 src/shared/bit_torrent/tracker/udp/mod.rs delete mode 100644 src/shared/mod.rs diff --git a/src/lib.rs b/src/lib.rs index fa18fd7c8..100a297fe 100644 --- a/src/lib.rs +++ b/src/lib.rs @@ -495,7 +495,6 @@ pub mod bootstrap; pub mod console; pub mod container; pub mod servers; -pub mod shared; /// This code needs to be copied into each crate. /// Working version, for production. diff --git a/src/shared/bit_torrent/common.rs b/src/shared/bit_torrent/common.rs deleted file mode 100644 index c954655e2..000000000 --- a/src/shared/bit_torrent/common.rs +++ /dev/null @@ -1,3 +0,0 @@ -//! `BitTorrent` protocol primitive types -//! -//! [BEP 3. The `BitTorrent` Protocol Specification](https://www.bittorrent.org/beps/bep_0003.html) diff --git a/src/shared/bit_torrent/mod.rs b/src/shared/bit_torrent/mod.rs deleted file mode 100644 index 7d6b12f09..000000000 --- a/src/shared/bit_torrent/mod.rs +++ /dev/null @@ -1,71 +0,0 @@ -//! Common code for the `BitTorrent` protocol. -//! -//! # Glossary -//! -//! - [Announce](#announce) -//! - [Info Hash](#info-hash) -//! - [Leecher](#leechers) -//! - [Peer ID](#peer-id) -//! - [Peer List](#peer-list) -//! - [Peer](#peer) -//! - [Scrape](#scrape) -//! - [Seeders](#seeders) -//! - [Swarm](#swarm) -//! - [Tracker](#tracker) -//! -//! Glossary of `BitTorrent` terms. -//! -//! # Announce -//! -//! A request to the tracker to announce the presence of a peer. -//! -//! ## Info Hash -//! -//! A unique identifier for a torrent. -//! -//! ## Leecher -//! -//! Peers that are only downloading data. -//! -//! ## Peer ID -//! -//! A unique identifier for a peer. -//! -//! ## Peer List -//! -//! A list of peers that are downloading a torrent. -//! -//! ## Peer -//! -//! A client that is downloading or uploading a torrent. -//! -//! ## Scrape -//! -//! A request to the tracker to get information about a torrent. -//! -//! ## Seeder -//! -//! Peers that are only uploading data. -//! -//! ## Swarm -//! -//! A group of peers that are downloading the same torrent. -//! -//! ## Tracker -//! -//! A server that keeps track of peers that are downloading a torrent. -//! -//! # Links -//! -//! Description | Link -//! ---|--- -//! `BitTorrent.org`. A forum for developers to exchange ideas about the direction of the `BitTorrent` protocol | -//! Wikipedia entry for Glossary of `BitTorrent` term | -//! `BitTorrent` Specification Wiki | -//! Vuze Wiki. A `BitTorrent` client implementation | -//! `libtorrent`. Complete C++ bittorrent implementation| -//! UDP Tracker Protocol docs by `libtorrent` | -//! Percent Encoding spec | -//!Bencode & bdecode in your browser | -pub mod common; -pub mod tracker; diff --git a/src/shared/bit_torrent/tracker/mod.rs b/src/shared/bit_torrent/tracker/mod.rs deleted file mode 100644 index 7e5aaa137..000000000 --- a/src/shared/bit_torrent/tracker/mod.rs +++ /dev/null @@ -1 +0,0 @@ -pub mod udp; diff --git a/src/shared/bit_torrent/tracker/udp/mod.rs b/src/shared/bit_torrent/tracker/udp/mod.rs deleted file mode 100644 index eb38d99fd..000000000 --- a/src/shared/bit_torrent/tracker/udp/mod.rs +++ /dev/null @@ -1,3 +0,0 @@ -/// A magic 64-bit integer constant defined in the protocol that is used to -/// identify the protocol. -pub const PROTOCOL_ID: i64 = 0x0417_2710_1980; diff --git a/src/shared/mod.rs b/src/shared/mod.rs deleted file mode 100644 index 3b4a46e67..000000000 --- a/src/shared/mod.rs +++ /dev/null @@ -1,4 +0,0 @@ -//! Modules with generic logic used by several modules. -//! -//! - [`bit_torrent`]: `BitTorrent` protocol related logic. -pub mod bit_torrent; From aa415bdc2ad7f1cfac110ee9a6bb86e2f7b07a9e Mon Sep 17 00:00:00 2001 From: Jose Celano Date: Mon, 24 Feb 2025 16:21:05 +0000 Subject: [PATCH 323/802] refactor: [#1285] extract axum-tracker-api-server --- .github/workflows/deployment.yaml | 1 + Cargo.lock | 45 +- Cargo.toml | 9 +- packages/axum-tracker-api-server/Cargo.toml | 48 ++ packages/axum-tracker-api-server/LICENSE | 661 ++++++++++++++++++ packages/axum-tracker-api-server/README.md | 11 + .../axum-tracker-api-server/src/lib.rs | 16 +- .../axum-tracker-api-server/src}/routes.rs | 2 +- .../axum-tracker-api-server/src}/server.rs | 4 +- .../src}/v1/context/auth_key/forms.rs | 0 .../src}/v1/context/auth_key/handlers.rs | 18 +- .../src}/v1/context/auth_key/mod.rs | 2 +- .../src}/v1/context/auth_key/resources.rs | 2 +- .../src}/v1/context/auth_key/responses.rs | 6 +- .../src}/v1/context/auth_key/routes.rs | 6 +- .../src}/v1/context/health_check/handlers.rs | 2 +- .../src}/v1/context/health_check/mod.rs | 2 +- .../src}/v1/context/health_check/resources.rs | 2 +- .../src}/v1/context/mod.rs | 0 .../src}/v1/context/stats/handlers.rs | 4 +- .../src}/v1/context/stats/mod.rs | 2 +- .../src}/v1/context/stats/resources.rs | 2 +- .../src}/v1/context/stats/responses.rs | 2 +- .../src}/v1/context/stats/routes.rs | 6 +- .../src}/v1/context/torrent/handlers.rs | 14 +- .../src}/v1/context/torrent/mod.rs | 4 +- .../src/v1/context/torrent/resources/mod.rs | 4 + .../src}/v1/context/torrent/resources/peer.rs | 0 .../v1/context/torrent/resources/torrent.rs | 6 +- .../src}/v1/context/torrent/responses.rs | 2 +- .../src}/v1/context/torrent/routes.rs | 6 +- .../src}/v1/context/whitelist/handlers.rs | 18 +- .../src}/v1/context/whitelist/mod.rs | 0 .../src}/v1/context/whitelist/responses.rs | 4 +- .../src}/v1/context/whitelist/routes.rs | 6 +- .../src}/v1/middlewares/auth.rs | 2 +- .../src}/v1/middlewares/mod.rs | 0 .../axum-tracker-api-server/src}/v1/mod.rs | 10 +- .../src}/v1/responses.rs | 0 .../axum-tracker-api-server/src}/v1/routes.rs | 0 src/app.rs | 9 +- src/bootstrap/jobs/tracker_apis.rs | 7 +- src/lib.rs | 15 +- .../apis/v1/context/torrent/resources/mod.rs | 4 - src/servers/mod.rs | 2 - tests/servers/api/environment.rs | 2 +- tests/servers/api/mod.rs | 2 +- tests/servers/api/v1/asserts.rs | 6 +- .../api/v1/contract/context/health_check.rs | 2 +- .../servers/api/v1/contract/context/stats.rs | 2 +- .../api/v1/contract/context/torrent.rs | 4 +- 51 files changed, 870 insertions(+), 114 deletions(-) create mode 100644 packages/axum-tracker-api-server/Cargo.toml create mode 100644 packages/axum-tracker-api-server/LICENSE create mode 100644 packages/axum-tracker-api-server/README.md rename src/servers/apis/mod.rs => packages/axum-tracker-api-server/src/lib.rs (93%) rename {src/servers/apis => packages/axum-tracker-api-server/src}/routes.rs (99%) rename {src/servers/apis => packages/axum-tracker-api-server/src}/server.rs (99%) rename {src/servers/apis => packages/axum-tracker-api-server/src}/v1/context/auth_key/forms.rs (100%) rename {src/servers/apis => packages/axum-tracker-api-server/src}/v1/context/auth_key/handlers.rs (83%) rename {src/servers/apis => packages/axum-tracker-api-server/src}/v1/context/auth_key/mod.rs (97%) rename {src/servers/apis => packages/axum-tracker-api-server/src}/v1/context/auth_key/resources.rs (97%) rename {src/servers/apis => packages/axum-tracker-api-server/src}/v1/context/auth_key/responses.rs (87%) rename {src/servers/apis => packages/axum-tracker-api-server/src}/v1/context/auth_key/routes.rs (87%) rename {src/servers/apis => packages/axum-tracker-api-server/src}/v1/context/health_check/handlers.rs (71%) rename {src/servers/apis => packages/axum-tracker-api-server/src}/v1/context/health_check/mod.rs (85%) rename {src/servers/apis => packages/axum-tracker-api-server/src}/v1/context/health_check/resources.rs (74%) rename {src/servers/apis => packages/axum-tracker-api-server/src}/v1/context/mod.rs (100%) rename {src/servers/apis => packages/axum-tracker-api-server/src}/v1/context/stats/handlers.rs (90%) rename {src/servers/apis => packages/axum-tracker-api-server/src}/v1/context/stats/mod.rs (93%) rename {src/servers/apis => packages/axum-tracker-api-server/src}/v1/context/stats/resources.rs (99%) rename {src/servers/apis => packages/axum-tracker-api-server/src}/v1/context/stats/responses.rs (98%) rename {src/servers/apis => packages/axum-tracker-api-server/src}/v1/context/stats/routes.rs (70%) rename {src/servers/apis => packages/axum-tracker-api-server/src}/v1/context/torrent/handlers.rs (88%) rename {src/servers/apis => packages/axum-tracker-api-server/src}/v1/context/torrent/mod.rs (92%) create mode 100644 packages/axum-tracker-api-server/src/v1/context/torrent/resources/mod.rs rename {src/servers/apis => packages/axum-tracker-api-server/src}/v1/context/torrent/resources/peer.rs (100%) rename {src/servers/apis => packages/axum-tracker-api-server/src}/v1/context/torrent/resources/torrent.rs (95%) rename {src/servers/apis => packages/axum-tracker-api-server/src}/v1/context/torrent/responses.rs (90%) rename {src/servers/apis => packages/axum-tracker-api-server/src}/v1/context/torrent/routes.rs (77%) rename {src/servers/apis => packages/axum-tracker-api-server/src}/v1/context/whitelist/handlers.rs (72%) rename {src/servers/apis => packages/axum-tracker-api-server/src}/v1/context/whitelist/mod.rs (100%) rename {src/servers/apis => packages/axum-tracker-api-server/src}/v1/context/whitelist/responses.rs (84%) rename {src/servers/apis => packages/axum-tracker-api-server/src}/v1/context/whitelist/routes.rs (82%) rename {src/servers/apis => packages/axum-tracker-api-server/src}/v1/middlewares/auth.rs (97%) rename {src/servers/apis => packages/axum-tracker-api-server/src}/v1/middlewares/mod.rs (100%) rename {src/servers/apis => packages/axum-tracker-api-server/src}/v1/mod.rs (58%) rename {src/servers/apis => packages/axum-tracker-api-server/src}/v1/responses.rs (100%) rename {src/servers/apis => packages/axum-tracker-api-server/src}/v1/routes.rs (100%) delete mode 100644 src/servers/apis/v1/context/torrent/resources/mod.rs delete mode 100644 src/servers/mod.rs diff --git a/.github/workflows/deployment.yaml b/.github/workflows/deployment.yaml index 259a97728..e492d7490 100644 --- a/.github/workflows/deployment.yaml +++ b/.github/workflows/deployment.yaml @@ -64,6 +64,7 @@ jobs: cargo publish -p torrust-axum-health-check-api-server cargo publish -p torrust-axum-http-tracker-server cargo publish -p torrust-axum-server + cargo publish -p torrust-axum-tracker-api-server cargo publish -p torrust-torrust-server-lib cargo publish -p torrust-tracker cargo publish -p torrust-tracker-api-client diff --git a/Cargo.lock b/Cargo.lock index 22cdc002a..a46aa30b1 100644 --- a/Cargo.lock +++ b/Cargo.lock @@ -4402,6 +4402,42 @@ dependencies = [ "tracing", ] +[[package]] +name = "torrust-axum-tracker-api-server" +version = "3.0.0-develop" +dependencies = [ + "aquatic_udp_protocol", + "axum", + "axum-extra", + "axum-server", + "bittorrent-http-tracker-core", + "bittorrent-primitives", + "bittorrent-tracker-core", + "bittorrent-udp-tracker-core", + "derive_more", + "futures", + "hyper", + "local-ip-address", + "mockall", + "reqwest", + "serde", + "serde_json", + "serde_with", + "thiserror 2.0.11", + "tokio", + "torrust-axum-server", + "torrust-server-lib", + "torrust-tracker-api-client", + "torrust-tracker-api-core", + "torrust-tracker-clock", + "torrust-tracker-configuration", + "torrust-tracker-primitives", + "torrust-tracker-test-helpers", + "tower 0.5.2", + "tower-http", + "tracing", +] + [[package]] name = "torrust-server-lib" version = "3.0.0-develop" @@ -4418,8 +4454,6 @@ version = "3.0.0-develop" dependencies = [ "anyhow", "aquatic_udp_protocol", - "axum", - "axum-extra", "axum-server", "bittorrent-http-tracker-core", "bittorrent-primitives", @@ -4430,10 +4464,8 @@ dependencies = [ "clap", "crossbeam-skiplist", "dashmap", - "derive_more", "figment", "futures", - "hyper", "local-ip-address", "mockall", "parking_lot", @@ -4449,12 +4481,11 @@ dependencies = [ "serde_bytes", "serde_json", "serde_repr", - "serde_with", - "thiserror 2.0.11", "tokio", "torrust-axum-health-check-api-server", "torrust-axum-http-tracker-server", "torrust-axum-server", + "torrust-axum-tracker-api-server", "torrust-server-lib", "torrust-tracker-api-client", "torrust-tracker-api-core", @@ -4464,8 +4495,6 @@ dependencies = [ "torrust-tracker-test-helpers", "torrust-tracker-torrent-repository", "torrust-udp-tracker-server", - "tower 0.5.2", - "tower-http", "tracing", "tracing-subscriber", "url", diff --git a/Cargo.toml b/Cargo.toml index d8c739440..92d0aa5dc 100644 --- a/Cargo.toml +++ b/Cargo.toml @@ -35,8 +35,6 @@ version = "3.0.0-develop" [dependencies] anyhow = "1" aquatic_udp_protocol = "0" -axum = { version = "0", features = ["macros"] } -axum-extra = { version = "0", features = ["query"] } axum-server = { version = "0", features = ["tls-rustls-no-provider"] } bittorrent-http-tracker-core = { version = "3.0.0-develop", path = "packages/http-tracker-core" } bittorrent-primitives = "0.1.0" @@ -47,10 +45,8 @@ chrono = { version = "0", default-features = false, features = ["clock"] } clap = { version = "4", features = ["derive", "env"] } crossbeam-skiplist = "0" dashmap = "6" -derive_more = { version = "2", features = ["as_ref", "constructor", "from"] } figment = "0" futures = "0" -hyper = "1" parking_lot = "0" percent-encoding = "2" r2d2 = "0" @@ -64,12 +60,11 @@ serde_bencode = "0" serde_bytes = "0" serde_json = { version = "1", features = ["preserve_order"] } serde_repr = "0" -serde_with = { version = "3", features = ["json"] } -thiserror = "2" tokio = { version = "1", features = ["macros", "net", "rt-multi-thread", "signal", "sync"] } torrust-axum-health-check-api-server = { version = "3.0.0-develop", path = "packages/axum-health-check-api-server" } torrust-axum-http-tracker-server = { version = "3.0.0-develop", path = "packages/axum-http-tracker-server" } torrust-axum-server = { version = "3.0.0-develop", path = "packages/axum-server" } +torrust-axum-tracker-api-server = { version = "3.0.0-develop", path = "packages/axum-tracker-api-server" } torrust-server-lib = { version = "3.0.0-develop", path = "packages/server-lib" } torrust-tracker-api-core = { version = "3.0.0-develop", path = "packages/tracker-api-core" } torrust-tracker-clock = { version = "3.0.0-develop", path = "packages/clock" } @@ -77,8 +72,6 @@ torrust-tracker-configuration = { version = "3.0.0-develop", path = "packages/co torrust-tracker-primitives = { version = "3.0.0-develop", path = "packages/primitives" } torrust-tracker-torrent-repository = { version = "3.0.0-develop", path = "packages/torrent-repository" } torrust-udp-tracker-server = { version = "3.0.0-develop", path = "packages/udp-tracker-server" } -tower = { version = "0", features = ["timeout"] } -tower-http = { version = "0", features = ["compression-full", "cors", "propagate-header", "request-id", "trace"] } tracing = "0" tracing-subscriber = { version = "0", features = ["json"] } url = { version = "2", features = ["serde"] } diff --git a/packages/axum-tracker-api-server/Cargo.toml b/packages/axum-tracker-api-server/Cargo.toml new file mode 100644 index 000000000..7c7455fc4 --- /dev/null +++ b/packages/axum-tracker-api-server/Cargo.toml @@ -0,0 +1,48 @@ +[package] +authors.workspace = true +description = "The Torrust Tracker API." +documentation.workspace = true +edition.workspace = true +homepage.workspace = true +keywords = ["axum", "bittorrent", "http", "server", "torrust", "tracker"] +license.workspace = true +name = "torrust-axum-tracker-api-server" +publish.workspace = true +readme = "README.md" +repository.workspace = true +rust-version.workspace = true +version.workspace = true + +[dependencies] +aquatic_udp_protocol = "0" +axum = { version = "0", features = ["macros"] } +axum-extra = { version = "0", features = ["query"] } +axum-server = { version = "0", features = ["tls-rustls-no-provider"] } +bittorrent-http-tracker-core = { version = "3.0.0-develop", path = "../http-tracker-core" } +bittorrent-primitives = "0.1.0" +bittorrent-tracker-core = { version = "3.0.0-develop", path = "../tracker-core" } +bittorrent-udp-tracker-core = { version = "3.0.0-develop", path = "../udp-tracker-core" } +derive_more = { version = "2", features = ["as_ref", "constructor", "from"] } +futures = "0" +hyper = "1" +reqwest = { version = "0", features = ["json"] } +serde = { version = "1", features = ["derive"] } +serde_json = { version = "1", features = ["preserve_order"] } +serde_with = { version = "3", features = ["json"] } +thiserror = "2" +tokio = { version = "1", features = ["macros", "net", "rt-multi-thread", "signal", "sync"] } +torrust-axum-server = { version = "3.0.0-develop", path = "../axum-server" } +torrust-server-lib = { version = "3.0.0-develop", path = "../server-lib" } +torrust-tracker-api-core = { version = "3.0.0-develop", path = "../tracker-api-core" } +torrust-tracker-clock = { version = "3.0.0-develop", path = "../clock" } +torrust-tracker-configuration = { version = "3.0.0-develop", path = "../configuration" } +torrust-tracker-primitives = { version = "3.0.0-develop", path = "../primitives" } +tower = { version = "0", features = ["timeout"] } +tower-http = { version = "0", features = ["compression-full", "cors", "propagate-header", "request-id", "trace"] } +tracing = "0" + +[dev-dependencies] +local-ip-address = "0" +mockall = "0" +torrust-tracker-api-client = { version = "3.0.0-develop", path = "../tracker-api-client" } +torrust-tracker-test-helpers = { version = "3.0.0-develop", path = "../test-helpers" } diff --git a/packages/axum-tracker-api-server/LICENSE b/packages/axum-tracker-api-server/LICENSE new file mode 100644 index 000000000..0ad25db4b --- /dev/null +++ b/packages/axum-tracker-api-server/LICENSE @@ -0,0 +1,661 @@ + GNU AFFERO GENERAL PUBLIC LICENSE + Version 3, 19 November 2007 + + Copyright (C) 2007 Free Software Foundation, Inc. + Everyone is permitted to copy and distribute verbatim copies + of this license document, but changing it is not allowed. + + Preamble + + The GNU Affero General Public License is a free, copyleft license for +software and other kinds of works, specifically designed to ensure +cooperation with the community in the case of network server software. + + The licenses for most software and other practical works are designed +to take away your freedom to share and change the works. By contrast, +our General Public Licenses are intended to guarantee your freedom to +share and change all versions of a program--to make sure it remains free +software for all its users. + + When we speak of free software, we are referring to freedom, not +price. Our General Public Licenses are designed to make sure that you +have the freedom to distribute copies of free software (and charge for +them if you wish), that you receive source code or can get it if you +want it, that you can change the software or use pieces of it in new +free programs, and that you know you can do these things. + + Developers that use our General Public Licenses protect your rights +with two steps: (1) assert copyright on the software, and (2) offer +you this License which gives you legal permission to copy, distribute +and/or modify the software. + + A secondary benefit of defending all users' freedom is that +improvements made in alternate versions of the program, if they +receive widespread use, become available for other developers to +incorporate. Many developers of free software are heartened and +encouraged by the resulting cooperation. However, in the case of +software used on network servers, this result may fail to come about. +The GNU General Public License permits making a modified version and +letting the public access it on a server without ever releasing its +source code to the public. + + The GNU Affero General Public License is designed specifically to +ensure that, in such cases, the modified source code becomes available +to the community. It requires the operator of a network server to +provide the source code of the modified version running there to the +users of that server. Therefore, public use of a modified version, on +a publicly accessible server, gives the public access to the source +code of the modified version. + + An older license, called the Affero General Public License and +published by Affero, was designed to accomplish similar goals. This is +a different license, not a version of the Affero GPL, but Affero has +released a new version of the Affero GPL which permits relicensing under +this license. + + The precise terms and conditions for copying, distribution and +modification follow. + + TERMS AND CONDITIONS + + 0. Definitions. + + "This License" refers to version 3 of the GNU Affero General Public License. + + "Copyright" also means copyright-like laws that apply to other kinds of +works, such as semiconductor masks. + + "The Program" refers to any copyrightable work licensed under this +License. Each licensee is addressed as "you". "Licensees" and +"recipients" may be individuals or organizations. + + To "modify" a work means to copy from or adapt all or part of the work +in a fashion requiring copyright permission, other than the making of an +exact copy. The resulting work is called a "modified version" of the +earlier work or a work "based on" the earlier work. + + A "covered work" means either the unmodified Program or a work based +on the Program. + + To "propagate" a work means to do anything with it that, without +permission, would make you directly or secondarily liable for +infringement under applicable copyright law, except executing it on a +computer or modifying a private copy. Propagation includes copying, +distribution (with or without modification), making available to the +public, and in some countries other activities as well. + + To "convey" a work means any kind of propagation that enables other +parties to make or receive copies. Mere interaction with a user through +a computer network, with no transfer of a copy, is not conveying. + + An interactive user interface displays "Appropriate Legal Notices" +to the extent that it includes a convenient and prominently visible +feature that (1) displays an appropriate copyright notice, and (2) +tells the user that there is no warranty for the work (except to the +extent that warranties are provided), that licensees may convey the +work under this License, and how to view a copy of this License. If +the interface presents a list of user commands or options, such as a +menu, a prominent item in the list meets this criterion. + + 1. Source Code. + + The "source code" for a work means the preferred form of the work +for making modifications to it. "Object code" means any non-source +form of a work. + + A "Standard Interface" means an interface that either is an official +standard defined by a recognized standards body, or, in the case of +interfaces specified for a particular programming language, one that +is widely used among developers working in that language. + + The "System Libraries" of an executable work include anything, other +than the work as a whole, that (a) is included in the normal form of +packaging a Major Component, but which is not part of that Major +Component, and (b) serves only to enable use of the work with that +Major Component, or to implement a Standard Interface for which an +implementation is available to the public in source code form. A +"Major Component", in this context, means a major essential component +(kernel, window system, and so on) of the specific operating system +(if any) on which the executable work runs, or a compiler used to +produce the work, or an object code interpreter used to run it. + + The "Corresponding Source" for a work in object code form means all +the source code needed to generate, install, and (for an executable +work) run the object code and to modify the work, including scripts to +control those activities. However, it does not include the work's +System Libraries, or general-purpose tools or generally available free +programs which are used unmodified in performing those activities but +which are not part of the work. For example, Corresponding Source +includes interface definition files associated with source files for +the work, and the source code for shared libraries and dynamically +linked subprograms that the work is specifically designed to require, +such as by intimate data communication or control flow between those +subprograms and other parts of the work. + + The Corresponding Source need not include anything that users +can regenerate automatically from other parts of the Corresponding +Source. + + The Corresponding Source for a work in source code form is that +same work. + + 2. Basic Permissions. + + All rights granted under this License are granted for the term of +copyright on the Program, and are irrevocable provided the stated +conditions are met. This License explicitly affirms your unlimited +permission to run the unmodified Program. The output from running a +covered work is covered by this License only if the output, given its +content, constitutes a covered work. This License acknowledges your +rights of fair use or other equivalent, as provided by copyright law. + + You may make, run and propagate covered works that you do not +convey, without conditions so long as your license otherwise remains +in force. You may convey covered works to others for the sole purpose +of having them make modifications exclusively for you, or provide you +with facilities for running those works, provided that you comply with +the terms of this License in conveying all material for which you do +not control copyright. Those thus making or running the covered works +for you must do so exclusively on your behalf, under your direction +and control, on terms that prohibit them from making any copies of +your copyrighted material outside their relationship with you. + + Conveying under any other circumstances is permitted solely under +the conditions stated below. Sublicensing is not allowed; section 10 +makes it unnecessary. + + 3. Protecting Users' Legal Rights From Anti-Circumvention Law. + + No covered work shall be deemed part of an effective technological +measure under any applicable law fulfilling obligations under article +11 of the WIPO copyright treaty adopted on 20 December 1996, or +similar laws prohibiting or restricting circumvention of such +measures. + + When you convey a covered work, you waive any legal power to forbid +circumvention of technological measures to the extent such circumvention +is effected by exercising rights under this License with respect to +the covered work, and you disclaim any intention to limit operation or +modification of the work as a means of enforcing, against the work's +users, your or third parties' legal rights to forbid circumvention of +technological measures. + + 4. Conveying Verbatim Copies. + + You may convey verbatim copies of the Program's source code as you +receive it, in any medium, provided that you conspicuously and +appropriately publish on each copy an appropriate copyright notice; +keep intact all notices stating that this License and any +non-permissive terms added in accord with section 7 apply to the code; +keep intact all notices of the absence of any warranty; and give all +recipients a copy of this License along with the Program. + + You may charge any price or no price for each copy that you convey, +and you may offer support or warranty protection for a fee. + + 5. Conveying Modified Source Versions. + + You may convey a work based on the Program, or the modifications to +produce it from the Program, in the form of source code under the +terms of section 4, provided that you also meet all of these conditions: + + a) The work must carry prominent notices stating that you modified + it, and giving a relevant date. + + b) The work must carry prominent notices stating that it is + released under this License and any conditions added under section + 7. This requirement modifies the requirement in section 4 to + "keep intact all notices". + + c) You must license the entire work, as a whole, under this + License to anyone who comes into possession of a copy. This + License will therefore apply, along with any applicable section 7 + additional terms, to the whole of the work, and all its parts, + regardless of how they are packaged. This License gives no + permission to license the work in any other way, but it does not + invalidate such permission if you have separately received it. + + d) If the work has interactive user interfaces, each must display + Appropriate Legal Notices; however, if the Program has interactive + interfaces that do not display Appropriate Legal Notices, your + work need not make them do so. + + A compilation of a covered work with other separate and independent +works, which are not by their nature extensions of the covered work, +and which are not combined with it such as to form a larger program, +in or on a volume of a storage or distribution medium, is called an +"aggregate" if the compilation and its resulting copyright are not +used to limit the access or legal rights of the compilation's users +beyond what the individual works permit. Inclusion of a covered work +in an aggregate does not cause this License to apply to the other +parts of the aggregate. + + 6. Conveying Non-Source Forms. + + You may convey a covered work in object code form under the terms +of sections 4 and 5, provided that you also convey the +machine-readable Corresponding Source under the terms of this License, +in one of these ways: + + a) Convey the object code in, or embodied in, a physical product + (including a physical distribution medium), accompanied by the + Corresponding Source fixed on a durable physical medium + customarily used for software interchange. + + b) Convey the object code in, or embodied in, a physical product + (including a physical distribution medium), accompanied by a + written offer, valid for at least three years and valid for as + long as you offer spare parts or customer support for that product + model, to give anyone who possesses the object code either (1) a + copy of the Corresponding Source for all the software in the + product that is covered by this License, on a durable physical + medium customarily used for software interchange, for a price no + more than your reasonable cost of physically performing this + conveying of source, or (2) access to copy the + Corresponding Source from a network server at no charge. + + c) Convey individual copies of the object code with a copy of the + written offer to provide the Corresponding Source. This + alternative is allowed only occasionally and noncommercially, and + only if you received the object code with such an offer, in accord + with subsection 6b. + + d) Convey the object code by offering access from a designated + place (gratis or for a charge), and offer equivalent access to the + Corresponding Source in the same way through the same place at no + further charge. You need not require recipients to copy the + Corresponding Source along with the object code. If the place to + copy the object code is a network server, the Corresponding Source + may be on a different server (operated by you or a third party) + that supports equivalent copying facilities, provided you maintain + clear directions next to the object code saying where to find the + Corresponding Source. Regardless of what server hosts the + Corresponding Source, you remain obligated to ensure that it is + available for as long as needed to satisfy these requirements. + + e) Convey the object code using peer-to-peer transmission, provided + you inform other peers where the object code and Corresponding + Source of the work are being offered to the general public at no + charge under subsection 6d. + + A separable portion of the object code, whose source code is excluded +from the Corresponding Source as a System Library, need not be +included in conveying the object code work. + + A "User Product" is either (1) a "consumer product", which means any +tangible personal property which is normally used for personal, family, +or household purposes, or (2) anything designed or sold for incorporation +into a dwelling. In determining whether a product is a consumer product, +doubtful cases shall be resolved in favor of coverage. For a particular +product received by a particular user, "normally used" refers to a +typical or common use of that class of product, regardless of the status +of the particular user or of the way in which the particular user +actually uses, or expects or is expected to use, the product. A product +is a consumer product regardless of whether the product has substantial +commercial, industrial or non-consumer uses, unless such uses represent +the only significant mode of use of the product. + + "Installation Information" for a User Product means any methods, +procedures, authorization keys, or other information required to install +and execute modified versions of a covered work in that User Product from +a modified version of its Corresponding Source. The information must +suffice to ensure that the continued functioning of the modified object +code is in no case prevented or interfered with solely because +modification has been made. + + If you convey an object code work under this section in, or with, or +specifically for use in, a User Product, and the conveying occurs as +part of a transaction in which the right of possession and use of the +User Product is transferred to the recipient in perpetuity or for a +fixed term (regardless of how the transaction is characterized), the +Corresponding Source conveyed under this section must be accompanied +by the Installation Information. But this requirement does not apply +if neither you nor any third party retains the ability to install +modified object code on the User Product (for example, the work has +been installed in ROM). + + The requirement to provide Installation Information does not include a +requirement to continue to provide support service, warranty, or updates +for a work that has been modified or installed by the recipient, or for +the User Product in which it has been modified or installed. Access to a +network may be denied when the modification itself materially and +adversely affects the operation of the network or violates the rules and +protocols for communication across the network. + + Corresponding Source conveyed, and Installation Information provided, +in accord with this section must be in a format that is publicly +documented (and with an implementation available to the public in +source code form), and must require no special password or key for +unpacking, reading or copying. + + 7. Additional Terms. + + "Additional permissions" are terms that supplement the terms of this +License by making exceptions from one or more of its conditions. +Additional permissions that are applicable to the entire Program shall +be treated as though they were included in this License, to the extent +that they are valid under applicable law. If additional permissions +apply only to part of the Program, that part may be used separately +under those permissions, but the entire Program remains governed by +this License without regard to the additional permissions. + + When you convey a copy of a covered work, you may at your option +remove any additional permissions from that copy, or from any part of +it. (Additional permissions may be written to require their own +removal in certain cases when you modify the work.) You may place +additional permissions on material, added by you to a covered work, +for which you have or can give appropriate copyright permission. + + Notwithstanding any other provision of this License, for material you +add to a covered work, you may (if authorized by the copyright holders of +that material) supplement the terms of this License with terms: + + a) Disclaiming warranty or limiting liability differently from the + terms of sections 15 and 16 of this License; or + + b) Requiring preservation of specified reasonable legal notices or + author attributions in that material or in the Appropriate Legal + Notices displayed by works containing it; or + + c) Prohibiting misrepresentation of the origin of that material, or + requiring that modified versions of such material be marked in + reasonable ways as different from the original version; or + + d) Limiting the use for publicity purposes of names of licensors or + authors of the material; or + + e) Declining to grant rights under trademark law for use of some + trade names, trademarks, or service marks; or + + f) Requiring indemnification of licensors and authors of that + material by anyone who conveys the material (or modified versions of + it) with contractual assumptions of liability to the recipient, for + any liability that these contractual assumptions directly impose on + those licensors and authors. + + All other non-permissive additional terms are considered "further +restrictions" within the meaning of section 10. If the Program as you +received it, or any part of it, contains a notice stating that it is +governed by this License along with a term that is a further +restriction, you may remove that term. If a license document contains +a further restriction but permits relicensing or conveying under this +License, you may add to a covered work material governed by the terms +of that license document, provided that the further restriction does +not survive such relicensing or conveying. + + If you add terms to a covered work in accord with this section, you +must place, in the relevant source files, a statement of the +additional terms that apply to those files, or a notice indicating +where to find the applicable terms. + + Additional terms, permissive or non-permissive, may be stated in the +form of a separately written license, or stated as exceptions; +the above requirements apply either way. + + 8. Termination. + + You may not propagate or modify a covered work except as expressly +provided under this License. Any attempt otherwise to propagate or +modify it is void, and will automatically terminate your rights under +this License (including any patent licenses granted under the third +paragraph of section 11). + + However, if you cease all violation of this License, then your +license from a particular copyright holder is reinstated (a) +provisionally, unless and until the copyright holder explicitly and +finally terminates your license, and (b) permanently, if the copyright +holder fails to notify you of the violation by some reasonable means +prior to 60 days after the cessation. + + Moreover, your license from a particular copyright holder is +reinstated permanently if the copyright holder notifies you of the +violation by some reasonable means, this is the first time you have +received notice of violation of this License (for any work) from that +copyright holder, and you cure the violation prior to 30 days after +your receipt of the notice. + + Termination of your rights under this section does not terminate the +licenses of parties who have received copies or rights from you under +this License. If your rights have been terminated and not permanently +reinstated, you do not qualify to receive new licenses for the same +material under section 10. + + 9. Acceptance Not Required for Having Copies. + + You are not required to accept this License in order to receive or +run a copy of the Program. Ancillary propagation of a covered work +occurring solely as a consequence of using peer-to-peer transmission +to receive a copy likewise does not require acceptance. However, +nothing other than this License grants you permission to propagate or +modify any covered work. These actions infringe copyright if you do +not accept this License. Therefore, by modifying or propagating a +covered work, you indicate your acceptance of this License to do so. + + 10. Automatic Licensing of Downstream Recipients. + + Each time you convey a covered work, the recipient automatically +receives a license from the original licensors, to run, modify and +propagate that work, subject to this License. You are not responsible +for enforcing compliance by third parties with this License. + + An "entity transaction" is a transaction transferring control of an +organization, or substantially all assets of one, or subdividing an +organization, or merging organizations. If propagation of a covered +work results from an entity transaction, each party to that +transaction who receives a copy of the work also receives whatever +licenses to the work the party's predecessor in interest had or could +give under the previous paragraph, plus a right to possession of the +Corresponding Source of the work from the predecessor in interest, if +the predecessor has it or can get it with reasonable efforts. + + You may not impose any further restrictions on the exercise of the +rights granted or affirmed under this License. For example, you may +not impose a license fee, royalty, or other charge for exercise of +rights granted under this License, and you may not initiate litigation +(including a cross-claim or counterclaim in a lawsuit) alleging that +any patent claim is infringed by making, using, selling, offering for +sale, or importing the Program or any portion of it. + + 11. Patents. + + A "contributor" is a copyright holder who authorizes use under this +License of the Program or a work on which the Program is based. The +work thus licensed is called the contributor's "contributor version". + + A contributor's "essential patent claims" are all patent claims +owned or controlled by the contributor, whether already acquired or +hereafter acquired, that would be infringed by some manner, permitted +by this License, of making, using, or selling its contributor version, +but do not include claims that would be infringed only as a +consequence of further modification of the contributor version. For +purposes of this definition, "control" includes the right to grant +patent sublicenses in a manner consistent with the requirements of +this License. + + Each contributor grants you a non-exclusive, worldwide, royalty-free +patent license under the contributor's essential patent claims, to +make, use, sell, offer for sale, import and otherwise run, modify and +propagate the contents of its contributor version. + + In the following three paragraphs, a "patent license" is any express +agreement or commitment, however denominated, not to enforce a patent +(such as an express permission to practice a patent or covenant not to +sue for patent infringement). To "grant" such a patent license to a +party means to make such an agreement or commitment not to enforce a +patent against the party. + + If you convey a covered work, knowingly relying on a patent license, +and the Corresponding Source of the work is not available for anyone +to copy, free of charge and under the terms of this License, through a +publicly available network server or other readily accessible means, +then you must either (1) cause the Corresponding Source to be so +available, or (2) arrange to deprive yourself of the benefit of the +patent license for this particular work, or (3) arrange, in a manner +consistent with the requirements of this License, to extend the patent +license to downstream recipients. "Knowingly relying" means you have +actual knowledge that, but for the patent license, your conveying the +covered work in a country, or your recipient's use of the covered work +in a country, would infringe one or more identifiable patents in that +country that you have reason to believe are valid. + + If, pursuant to or in connection with a single transaction or +arrangement, you convey, or propagate by procuring conveyance of, a +covered work, and grant a patent license to some of the parties +receiving the covered work authorizing them to use, propagate, modify +or convey a specific copy of the covered work, then the patent license +you grant is automatically extended to all recipients of the covered +work and works based on it. + + A patent license is "discriminatory" if it does not include within +the scope of its coverage, prohibits the exercise of, or is +conditioned on the non-exercise of one or more of the rights that are +specifically granted under this License. You may not convey a covered +work if you are a party to an arrangement with a third party that is +in the business of distributing software, under which you make payment +to the third party based on the extent of your activity of conveying +the work, and under which the third party grants, to any of the +parties who would receive the covered work from you, a discriminatory +patent license (a) in connection with copies of the covered work +conveyed by you (or copies made from those copies), or (b) primarily +for and in connection with specific products or compilations that +contain the covered work, unless you entered into that arrangement, +or that patent license was granted, prior to 28 March 2007. + + Nothing in this License shall be construed as excluding or limiting +any implied license or other defenses to infringement that may +otherwise be available to you under applicable patent law. + + 12. No Surrender of Others' Freedom. + + If conditions are imposed on you (whether by court order, agreement or +otherwise) that contradict the conditions of this License, they do not +excuse you from the conditions of this License. If you cannot convey a +covered work so as to satisfy simultaneously your obligations under this +License and any other pertinent obligations, then as a consequence you may +not convey it at all. For example, if you agree to terms that obligate you +to collect a royalty for further conveying from those to whom you convey +the Program, the only way you could satisfy both those terms and this +License would be to refrain entirely from conveying the Program. + + 13. Remote Network Interaction; Use with the GNU General Public License. + + Notwithstanding any other provision of this License, if you modify the +Program, your modified version must prominently offer all users +interacting with it remotely through a computer network (if your version +supports such interaction) an opportunity to receive the Corresponding +Source of your version by providing access to the Corresponding Source +from a network server at no charge, through some standard or customary +means of facilitating copying of software. This Corresponding Source +shall include the Corresponding Source for any work covered by version 3 +of the GNU General Public License that is incorporated pursuant to the +following paragraph. + + Notwithstanding any other provision of this License, you have +permission to link or combine any covered work with a work licensed +under version 3 of the GNU General Public License into a single +combined work, and to convey the resulting work. The terms of this +License will continue to apply to the part which is the covered work, +but the work with which it is combined will remain governed by version +3 of the GNU General Public License. + + 14. Revised Versions of this License. + + The Free Software Foundation may publish revised and/or new versions of +the GNU Affero General Public License from time to time. Such new versions +will be similar in spirit to the present version, but may differ in detail to +address new problems or concerns. + + Each version is given a distinguishing version number. If the +Program specifies that a certain numbered version of the GNU Affero General +Public License "or any later version" applies to it, you have the +option of following the terms and conditions either of that numbered +version or of any later version published by the Free Software +Foundation. If the Program does not specify a version number of the +GNU Affero General Public License, you may choose any version ever published +by the Free Software Foundation. + + If the Program specifies that a proxy can decide which future +versions of the GNU Affero General Public License can be used, that proxy's +public statement of acceptance of a version permanently authorizes you +to choose that version for the Program. + + Later license versions may give you additional or different +permissions. However, no additional obligations are imposed on any +author or copyright holder as a result of your choosing to follow a +later version. + + 15. Disclaimer of Warranty. + + THERE IS NO WARRANTY FOR THE PROGRAM, TO THE EXTENT PERMITTED BY +APPLICABLE LAW. EXCEPT WHEN OTHERWISE STATED IN WRITING THE COPYRIGHT +HOLDERS AND/OR OTHER PARTIES PROVIDE THE PROGRAM "AS IS" WITHOUT WARRANTY +OF ANY KIND, EITHER EXPRESSED OR IMPLIED, INCLUDING, BUT NOT LIMITED TO, +THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR +PURPOSE. THE ENTIRE RISK AS TO THE QUALITY AND PERFORMANCE OF THE PROGRAM +IS WITH YOU. SHOULD THE PROGRAM PROVE DEFECTIVE, YOU ASSUME THE COST OF +ALL NECESSARY SERVICING, REPAIR OR CORRECTION. + + 16. Limitation of Liability. + + IN NO EVENT UNLESS REQUIRED BY APPLICABLE LAW OR AGREED TO IN WRITING +WILL ANY COPYRIGHT HOLDER, OR ANY OTHER PARTY WHO MODIFIES AND/OR CONVEYS +THE PROGRAM AS PERMITTED ABOVE, BE LIABLE TO YOU FOR DAMAGES, INCLUDING ANY +GENERAL, SPECIAL, INCIDENTAL OR CONSEQUENTIAL DAMAGES ARISING OUT OF THE +USE OR INABILITY TO USE THE PROGRAM (INCLUDING BUT NOT LIMITED TO LOSS OF +DATA OR DATA BEING RENDERED INACCURATE OR LOSSES SUSTAINED BY YOU OR THIRD +PARTIES OR A FAILURE OF THE PROGRAM TO OPERATE WITH ANY OTHER PROGRAMS), +EVEN IF SUCH HOLDER OR OTHER PARTY HAS BEEN ADVISED OF THE POSSIBILITY OF +SUCH DAMAGES. + + 17. Interpretation of Sections 15 and 16. + + If the disclaimer of warranty and limitation of liability provided +above cannot be given local legal effect according to their terms, +reviewing courts shall apply local law that most closely approximates +an absolute waiver of all civil liability in connection with the +Program, unless a warranty or assumption of liability accompanies a +copy of the Program in return for a fee. + + END OF TERMS AND CONDITIONS + + How to Apply These Terms to Your New Programs + + If you develop a new program, and you want it to be of the greatest +possible use to the public, the best way to achieve this is to make it +free software which everyone can redistribute and change under these terms. + + To do so, attach the following notices to the program. It is safest +to attach them to the start of each source file to most effectively +state the exclusion of warranty; and each file should have at least +the "copyright" line and a pointer to where the full notice is found. + + + Copyright (C) + + This program is free software: you can redistribute it and/or modify + it under the terms of the GNU Affero General Public License as published + by the Free Software Foundation, either version 3 of the License, or + (at your option) any later version. + + This program is distributed in the hope that it will be useful, + but WITHOUT ANY WARRANTY; without even the implied warranty of + MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + GNU Affero General Public License for more details. + + You should have received a copy of the GNU Affero General Public License + along with this program. If not, see . + +Also add information on how to contact you by electronic and paper mail. + + If your software can interact with users remotely through a computer +network, you should also make sure that it provides a way for users to +get its source. For example, if your program is a web application, its +interface could display a "Source" link that leads users to an archive +of the code. There are many ways you could offer source, and different +solutions will be better for different programs; see section 13 for the +specific requirements. + + You should also get your employer (if you work as a programmer) or school, +if any, to sign a "copyright disclaimer" for the program, if necessary. +For more information on this, and how to apply and follow the GNU AGPL, see +. diff --git a/packages/axum-tracker-api-server/README.md b/packages/axum-tracker-api-server/README.md new file mode 100644 index 000000000..6a0415828 --- /dev/null +++ b/packages/axum-tracker-api-server/README.md @@ -0,0 +1,11 @@ +# Torrust Tracker API + +The Torrust Tracker Rest API. + +## Documentation + +[Crate documentation](https://docs.rs/torrust-axum-tracker-api-server). + +## License + +The project is licensed under the terms of the [GNU AFFERO GENERAL PUBLIC LICENSE](./LICENSE). diff --git a/src/servers/apis/mod.rs b/packages/axum-tracker-api-server/src/lib.rs similarity index 93% rename from src/servers/apis/mod.rs rename to packages/axum-tracker-api-server/src/lib.rs index 0451b46c0..c3591908e 100644 --- a/src/servers/apis/mod.rs +++ b/packages/axum-tracker-api-server/src/lib.rs @@ -60,7 +60,7 @@ //! ``` //! //! The response will be a JSON object. For example, the [tracker statistics -//! endpoint](crate::servers::apis::v1::context::stats#get-tracker-statistics): +//! endpoint](crate::v1::context::stats#get-tracker-statistics): //! //! ```json //! { @@ -101,7 +101,7 @@ //! //! Refer to [`torrust-tracker-configuration`](torrust_tracker_configuration) //! for more information about the API configuration and to the -//! [`auth`](crate::servers::apis::v1::middlewares::auth) middleware for more +//! [`auth`](crate::v1::middlewares::auth) middleware for more //! information about the authentication process. //! //! # Setup SSL (optional) @@ -158,6 +158,18 @@ pub mod server; pub mod v1; use serde::{Deserialize, Serialize}; +use torrust_tracker_clock::clock; + +/// This code needs to be copied into each crate. +/// Working version, for production. +#[cfg(not(test))] +#[allow(dead_code)] +pub(crate) type CurrentClock = clock::Working; + +/// Stopped version, for testing. +#[cfg(test)] +#[allow(dead_code)] +pub(crate) type CurrentClock = clock::Stopped; pub const API_LOG_TARGET: &str = "API"; diff --git a/src/servers/apis/routes.rs b/packages/axum-tracker-api-server/src/routes.rs similarity index 99% rename from src/servers/apis/routes.rs rename to packages/axum-tracker-api-server/src/routes.rs index 64f6f0cb8..492e0dc37 100644 --- a/src/servers/apis/routes.rs +++ b/packages/axum-tracker-api-server/src/routes.rs @@ -31,7 +31,7 @@ use tracing::{instrument, Level, Span}; use super::v1; use super::v1::context::health_check::handlers::health_check_handler; use super::v1::middlewares::auth::State; -use crate::servers::apis::API_LOG_TARGET; +use crate::API_LOG_TARGET; /// Add all API routes to the router. #[instrument(skip(http_api_container, access_tokens))] diff --git a/src/servers/apis/server.rs b/packages/axum-tracker-api-server/src/server.rs similarity index 99% rename from src/servers/apis/server.rs rename to packages/axum-tracker-api-server/src/server.rs index 4c3484ded..65d8ca27a 100644 --- a/src/servers/apis/server.rs +++ b/packages/axum-tracker-api-server/src/server.rs @@ -43,7 +43,7 @@ use torrust_tracker_configuration::AccessTokens; use tracing::{instrument, Level}; use super::routes::router; -use crate::servers::apis::API_LOG_TARGET; +use crate::API_LOG_TARGET; /// Errors that can occur when starting or stopping the API server. #[derive(Debug, Error)] @@ -300,7 +300,7 @@ mod tests { use torrust_tracker_configuration::{logging, Configuration}; use torrust_tracker_test_helpers::configuration::ephemeral_public; - use crate::servers::apis::server::{ApiServer, Launcher}; + use crate::server::{ApiServer, Launcher}; fn initialize_global_services(configuration: &Configuration) { initialize_static(); diff --git a/src/servers/apis/v1/context/auth_key/forms.rs b/packages/axum-tracker-api-server/src/v1/context/auth_key/forms.rs similarity index 100% rename from src/servers/apis/v1/context/auth_key/forms.rs rename to packages/axum-tracker-api-server/src/v1/context/auth_key/forms.rs diff --git a/src/servers/apis/v1/context/auth_key/handlers.rs b/packages/axum-tracker-api-server/src/v1/context/auth_key/handlers.rs similarity index 83% rename from src/servers/apis/v1/context/auth_key/handlers.rs rename to packages/axum-tracker-api-server/src/v1/context/auth_key/handlers.rs index c8d4c25b0..10530287c 100644 --- a/src/servers/apis/v1/context/auth_key/handlers.rs +++ b/packages/axum-tracker-api-server/src/v1/context/auth_key/handlers.rs @@ -1,4 +1,4 @@ -//! API handlers for the [`auth_key`](crate::servers::apis::v1::context::auth_key) API context. +//! API handlers for the [`auth_key`](crate::v1::context::auth_key) API context. use std::str::FromStr; use std::sync::Arc; use std::time::Duration; @@ -14,8 +14,8 @@ use super::responses::{ auth_key_response, failed_to_delete_key_response, failed_to_generate_key_response, failed_to_reload_keys_response, invalid_auth_key_duration_response, invalid_auth_key_response, }; -use crate::servers::apis::v1::context::auth_key::resources::AuthKey; -use crate::servers::apis::v1::responses::{invalid_auth_key_param_response, ok_response}; +use crate::v1::context::auth_key::resources::AuthKey; +use crate::v1::responses::{invalid_auth_key_param_response, ok_response}; /// It handles the request to add a new authentication key. /// @@ -28,7 +28,7 @@ use crate::servers::apis::v1::responses::{invalid_auth_key_param_response, ok_re /// - `500` with serialized error in debug format. If the key couldn't be /// generated. /// -/// Refer to the [API endpoint documentation](crate::servers::apis::v1::context::auth_key#generate-a-new-authentication-key) +/// Refer to the [API endpoint documentation](crate::v1::context::auth_key#generate-a-new-authentication-key) /// for more information about this endpoint. pub async fn add_auth_key_handler( State(keys_handler): State>, @@ -61,7 +61,7 @@ pub async fn add_auth_key_handler( /// - `500` with serialized error in debug format. If the key couldn't be /// generated. /// -/// Refer to the [API endpoint documentation](crate::servers::apis::v1::context::auth_key#generate-a-new-authentication-key) +/// Refer to the [API endpoint documentation](crate::v1::context::auth_key#generate-a-new-authentication-key) /// for more information about this endpoint. /// /// This endpoint has been deprecated. Use [`add_auth_key_handler`]. @@ -101,12 +101,12 @@ pub struct KeyParam(String); /// /// It returns two types of responses: /// -/// - `200` with an json [`ActionStatus::Ok`](crate::servers::apis::v1::responses::ActionStatus::Ok) +/// - `200` with an json [`ActionStatus::Ok`](crate::v1::responses::ActionStatus::Ok) /// response. If the key was deleted successfully. /// - `500` with serialized error in debug format. If the key couldn't be /// deleted. /// -/// Refer to the [API endpoint documentation](crate::servers::apis::v1::context::auth_key#delete-an-authentication-key) +/// Refer to the [API endpoint documentation](crate::v1::context::auth_key#delete-an-authentication-key) /// for more information about this endpoint. pub async fn delete_auth_key_handler( State(keys_handler): State>, @@ -126,12 +126,12 @@ pub async fn delete_auth_key_handler( /// /// It returns two types of responses: /// -/// - `200` with an json [`ActionStatus::Ok`](crate::servers::apis::v1::responses::ActionStatus::Ok) +/// - `200` with an json [`ActionStatus::Ok`](crate::v1::responses::ActionStatus::Ok) /// response. If the keys were successfully reloaded. /// - `500` with serialized error in debug format. If the they couldn't be /// reloaded. /// -/// Refer to the [API endpoint documentation](crate::servers::apis::v1::context::auth_key#reload-authentication-keys) +/// Refer to the [API endpoint documentation](crate::v1::context::auth_key#reload-authentication-keys) /// for more information about this endpoint. pub async fn reload_keys_handler(State(keys_handler): State>) -> Response { match keys_handler.load_peer_keys_from_database().await { diff --git a/src/servers/apis/v1/context/auth_key/mod.rs b/packages/axum-tracker-api-server/src/v1/context/auth_key/mod.rs similarity index 97% rename from src/servers/apis/v1/context/auth_key/mod.rs rename to packages/axum-tracker-api-server/src/v1/context/auth_key/mod.rs index b4112f21f..0a3937ef2 100644 --- a/src/servers/apis/v1/context/auth_key/mod.rs +++ b/packages/axum-tracker-api-server/src/v1/context/auth_key/mod.rs @@ -64,7 +64,7 @@ //! //! **Resource** //! -//! Refer to the API [`AuthKey`](crate::servers::apis::v1::context::auth_key::resources::AuthKey) +//! Refer to the API [`AuthKey`](crate::v1::context::auth_key::resources::AuthKey) //! resource for more information about the response attributes. //! //! # Delete an authentication key diff --git a/src/servers/apis/v1/context/auth_key/resources.rs b/packages/axum-tracker-api-server/src/v1/context/auth_key/resources.rs similarity index 97% rename from src/servers/apis/v1/context/auth_key/resources.rs rename to packages/axum-tracker-api-server/src/v1/context/auth_key/resources.rs index 8f5b4d309..357f1c365 100644 --- a/src/servers/apis/v1/context/auth_key/resources.rs +++ b/packages/axum-tracker-api-server/src/v1/context/auth_key/resources.rs @@ -1,4 +1,4 @@ -//! API resources for the [`auth_key`](crate::servers::apis::v1::context::auth_key) API context. +//! API resources for the [`auth_key`](crate::v1::context::auth_key) API context. use bittorrent_tracker_core::authentication::{self, Key}; use serde::{Deserialize, Serialize}; diff --git a/src/servers/apis/v1/context/auth_key/responses.rs b/packages/axum-tracker-api-server/src/v1/context/auth_key/responses.rs similarity index 87% rename from src/servers/apis/v1/context/auth_key/responses.rs rename to packages/axum-tracker-api-server/src/v1/context/auth_key/responses.rs index 4905d9adc..8a0503703 100644 --- a/src/servers/apis/v1/context/auth_key/responses.rs +++ b/packages/axum-tracker-api-server/src/v1/context/auth_key/responses.rs @@ -1,11 +1,11 @@ -//! API responses for the [`auth_key`](crate::servers::apis::v1::context::auth_key) API context. +//! API responses for the [`auth_key`](crate::v1::context::auth_key) API context. use std::error::Error; use axum::http::{header, StatusCode}; use axum::response::{IntoResponse, Response}; -use crate::servers::apis::v1::context::auth_key::resources::AuthKey; -use crate::servers::apis::v1::responses::{bad_request_response, unhandled_rejection_response}; +use crate::v1::context::auth_key::resources::AuthKey; +use crate::v1::responses::{bad_request_response, unhandled_rejection_response}; /// `200` response that contains the `AuthKey` resource as json. /// diff --git a/src/servers/apis/v1/context/auth_key/routes.rs b/packages/axum-tracker-api-server/src/v1/context/auth_key/routes.rs similarity index 87% rename from src/servers/apis/v1/context/auth_key/routes.rs rename to packages/axum-tracker-api-server/src/v1/context/auth_key/routes.rs index 623fb3459..64a0c1f11 100644 --- a/src/servers/apis/v1/context/auth_key/routes.rs +++ b/packages/axum-tracker-api-server/src/v1/context/auth_key/routes.rs @@ -1,11 +1,11 @@ -//! API routes for the [`auth_key`](crate::servers::apis::v1::context::auth_key) +//! API routes for the [`auth_key`](crate::v1::context::auth_key) //! API context. //! //! - `POST /key/:seconds_valid` //! - `DELETE /key/:key` //! - `GET /keys/reload` //! -//! Refer to the [API endpoint documentation](crate::servers::apis::v1::context::auth_key). +//! Refer to the [API endpoint documentation](crate::v1::context::auth_key). use std::sync::Arc; use axum::routing::{get, post}; @@ -14,7 +14,7 @@ use bittorrent_tracker_core::authentication::handler::KeysHandler; use super::handlers::{add_auth_key_handler, delete_auth_key_handler, generate_auth_key_handler, reload_keys_handler}; -/// It adds the routes to the router for the [`auth_key`](crate::servers::apis::v1::context::auth_key) API context. +/// It adds the routes to the router for the [`auth_key`](crate::v1::context::auth_key) API context. pub fn add(prefix: &str, router: Router, keys_handler: &Arc) -> Router { // Keys router diff --git a/src/servers/apis/v1/context/health_check/handlers.rs b/packages/axum-tracker-api-server/src/v1/context/health_check/handlers.rs similarity index 71% rename from src/servers/apis/v1/context/health_check/handlers.rs rename to packages/axum-tracker-api-server/src/v1/context/health_check/handlers.rs index bfbeab549..dfcad1f56 100644 --- a/src/servers/apis/v1/context/health_check/handlers.rs +++ b/packages/axum-tracker-api-server/src/v1/context/health_check/handlers.rs @@ -1,4 +1,4 @@ -//! API handlers for the [`stats`](crate::servers::apis::v1::context::health_check) +//! API handlers for the [`stats`](crate::v1::context::health_check) //! API context. use axum::Json; diff --git a/src/servers/apis/v1/context/health_check/mod.rs b/packages/axum-tracker-api-server/src/v1/context/health_check/mod.rs similarity index 85% rename from src/servers/apis/v1/context/health_check/mod.rs rename to packages/axum-tracker-api-server/src/v1/context/health_check/mod.rs index b73849511..6b1a1475f 100644 --- a/src/servers/apis/v1/context/health_check/mod.rs +++ b/packages/axum-tracker-api-server/src/v1/context/health_check/mod.rs @@ -28,7 +28,7 @@ //! //! **Resource** //! -//! Refer to the API [`Stats`](crate::servers::apis::v1::context::health_check::resources::Report) +//! Refer to the API [`Stats`](crate::context::health_check::resources::Report) //! resource for more information about the response attributes. pub mod handlers; pub mod resources; diff --git a/src/servers/apis/v1/context/health_check/resources.rs b/packages/axum-tracker-api-server/src/v1/context/health_check/resources.rs similarity index 74% rename from src/servers/apis/v1/context/health_check/resources.rs rename to packages/axum-tracker-api-server/src/v1/context/health_check/resources.rs index 9830e643c..5ea5871f8 100644 --- a/src/servers/apis/v1/context/health_check/resources.rs +++ b/packages/axum-tracker-api-server/src/v1/context/health_check/resources.rs @@ -1,4 +1,4 @@ -//! API resources for the [`stats`](crate::servers::apis::v1::context::health_check) +//! API resources for the [`stats`](crate::v1::context::health_check) //! API context. use serde::{Deserialize, Serialize}; diff --git a/src/servers/apis/v1/context/mod.rs b/packages/axum-tracker-api-server/src/v1/context/mod.rs similarity index 100% rename from src/servers/apis/v1/context/mod.rs rename to packages/axum-tracker-api-server/src/v1/context/mod.rs diff --git a/src/servers/apis/v1/context/stats/handlers.rs b/packages/axum-tracker-api-server/src/v1/context/stats/handlers.rs similarity index 90% rename from src/servers/apis/v1/context/stats/handlers.rs rename to packages/axum-tracker-api-server/src/v1/context/stats/handlers.rs index b8adfc3e3..e0149cb23 100644 --- a/src/servers/apis/v1/context/stats/handlers.rs +++ b/packages/axum-tracker-api-server/src/v1/context/stats/handlers.rs @@ -1,4 +1,4 @@ -//! API handlers for the [`stats`](crate::servers::apis::v1::context::stats) +//! API handlers for the [`stats`](crate::v1::context::stats) //! API context. use std::sync::Arc; @@ -35,7 +35,7 @@ pub struct QueryParams { /// You can add the GET parameter `format=prometheus` to get the stats in /// Prometheus Text Exposition Format. /// -/// Refer to the [API endpoint documentation](crate::servers::apis::v1::context::stats#get-tracker-statistics) +/// Refer to the [API endpoint documentation](crate::v1::context::stats#get-tracker-statistics) /// for more information about this endpoint. #[allow(clippy::type_complexity)] pub async fn get_stats_handler( diff --git a/src/servers/apis/v1/context/stats/mod.rs b/packages/axum-tracker-api-server/src/v1/context/stats/mod.rs similarity index 93% rename from src/servers/apis/v1/context/stats/mod.rs rename to packages/axum-tracker-api-server/src/v1/context/stats/mod.rs index 80f37f73f..5c6b0a39c 100644 --- a/src/servers/apis/v1/context/stats/mod.rs +++ b/packages/axum-tracker-api-server/src/v1/context/stats/mod.rs @@ -44,7 +44,7 @@ //! //! **Resource** //! -//! Refer to the API [`Stats`](crate::servers::apis::v1::context::stats::resources::Stats) +//! Refer to the API [`Stats`](crate::v1::context::stats::resources::Stats) //! resource for more information about the response attributes. pub mod handlers; pub mod resources; diff --git a/src/servers/apis/v1/context/stats/resources.rs b/packages/axum-tracker-api-server/src/v1/context/stats/resources.rs similarity index 99% rename from src/servers/apis/v1/context/stats/resources.rs rename to packages/axum-tracker-api-server/src/v1/context/stats/resources.rs index 11169f31e..f27050e22 100644 --- a/src/servers/apis/v1/context/stats/resources.rs +++ b/packages/axum-tracker-api-server/src/v1/context/stats/resources.rs @@ -1,4 +1,4 @@ -//! API resources for the [`stats`](crate::servers::apis::v1::context::stats) +//! API resources for the [`stats`](crate::v1::context::stats) //! API context. use serde::{Deserialize, Serialize}; use torrust_tracker_api_core::statistics::services::TrackerMetrics; diff --git a/src/servers/apis/v1/context/stats/responses.rs b/packages/axum-tracker-api-server/src/v1/context/stats/responses.rs similarity index 98% rename from src/servers/apis/v1/context/stats/responses.rs rename to packages/axum-tracker-api-server/src/v1/context/stats/responses.rs index 0b4da778f..f68c1e062 100644 --- a/src/servers/apis/v1/context/stats/responses.rs +++ b/packages/axum-tracker-api-server/src/v1/context/stats/responses.rs @@ -1,4 +1,4 @@ -//! API responses for the [`stats`](crate::servers::apis::v1::context::stats) +//! API responses for the [`stats`](crate::v1::context::stats) //! API context. use axum::response::{IntoResponse, Json, Response}; use torrust_tracker_api_core::statistics::services::TrackerMetrics; diff --git a/src/servers/apis/v1/context/stats/routes.rs b/packages/axum-tracker-api-server/src/v1/context/stats/routes.rs similarity index 70% rename from src/servers/apis/v1/context/stats/routes.rs rename to packages/axum-tracker-api-server/src/v1/context/stats/routes.rs index df198eba6..e73de8625 100644 --- a/src/servers/apis/v1/context/stats/routes.rs +++ b/packages/axum-tracker-api-server/src/v1/context/stats/routes.rs @@ -1,8 +1,8 @@ -//! API routes for the [`stats`](crate::servers::apis::v1::context::stats) API context. +//! API routes for the [`stats`](crate::v1::context::stats) API context. //! //! - `GET /stats` //! -//! Refer to the [API endpoint documentation](crate::servers::apis::v1::context::stats). +//! Refer to the [API endpoint documentation](crate::v1::context::stats). use std::sync::Arc; use axum::routing::get; @@ -11,7 +11,7 @@ use torrust_tracker_api_core::container::TrackerHttpApiCoreContainer; use super::handlers::get_stats_handler; -/// It adds the routes to the router for the [`stats`](crate::servers::apis::v1::context::stats) API context. +/// It adds the routes to the router for the [`stats`](crate::v1::context::stats) API context. pub fn add(prefix: &str, router: Router, http_api_container: &Arc) -> Router { router.route( &format!("{prefix}/stats"), diff --git a/src/servers/apis/v1/context/torrent/handlers.rs b/packages/axum-tracker-api-server/src/v1/context/torrent/handlers.rs similarity index 88% rename from src/servers/apis/v1/context/torrent/handlers.rs rename to packages/axum-tracker-api-server/src/v1/context/torrent/handlers.rs index ce80d8fee..613abbdeb 100644 --- a/src/servers/apis/v1/context/torrent/handlers.rs +++ b/packages/axum-tracker-api-server/src/v1/context/torrent/handlers.rs @@ -1,4 +1,4 @@ -//! API handlers for the [`torrent`](crate::servers::apis::v1::context::torrent) +//! API handlers for the [`torrent`](crate::v1::context::torrent) //! API context. use std::fmt; use std::str::FromStr; @@ -15,17 +15,17 @@ use thiserror::Error; use torrust_tracker_primitives::pagination::Pagination; use super::responses::{torrent_info_response, torrent_list_response, torrent_not_known_response}; -use crate::servers::apis::v1::responses::invalid_info_hash_param_response; -use crate::servers::apis::InfoHashParam; +use crate::v1::responses::invalid_info_hash_param_response; +use crate::InfoHashParam; /// It handles the request to get the torrent data. /// /// It returns: /// -/// - `200` response with a json [`Torrent`](crate::servers::apis::v1::context::torrent::resources::torrent::Torrent). +/// - `200` response with a json [`Torrent`](crate::v1::context::torrent::resources::torrent::Torrent). /// - `500` with serialized error in debug format if the torrent is not known. /// -/// Refer to the [API endpoint documentation](crate::servers::apis::v1::context::torrent#get-a-torrent) +/// Refer to the [API endpoint documentation](crate::v1::context::torrent#get-a-torrent) /// for more information about this endpoint. pub async fn get_torrent_handler( State(in_memory_torrent_repository): State>, @@ -74,9 +74,9 @@ pub struct QueryParams { /// It handles the request to get a list of torrents. /// -/// It returns a `200` response with a json array with [`crate::servers::apis::v1::context::torrent::resources::torrent::ListItem`] resources. +/// It returns a `200` response with a json array with [`crate::v1::context::torrent::resources::torrent::ListItem`] resources. /// -/// Refer to the [API endpoint documentation](crate::servers::apis::v1::context::torrent#list-torrents) +/// Refer to the [API endpoint documentation](crate::v1::context::torrent#list-torrents) /// for more information about this endpoint. pub async fn get_torrents_handler( State(in_memory_torrent_repository): State>, diff --git a/src/servers/apis/v1/context/torrent/mod.rs b/packages/axum-tracker-api-server/src/v1/context/torrent/mod.rs similarity index 92% rename from src/servers/apis/v1/context/torrent/mod.rs rename to packages/axum-tracker-api-server/src/v1/context/torrent/mod.rs index 1658e1748..1a62fef25 100644 --- a/src/servers/apis/v1/context/torrent/mod.rs +++ b/packages/axum-tracker-api-server/src/v1/context/torrent/mod.rs @@ -62,7 +62,7 @@ //! //! **Resource** //! -//! Refer to the API [`Torrent`](crate::servers::apis::v1::context::torrent::resources::torrent::Torrent) +//! Refer to the API [`Torrent`](crate::v1::context::torrent::resources::torrent::Torrent) //! resource for more information about the response attributes. //! //! # List torrents @@ -102,7 +102,7 @@ //! //! **Resource** //! -//! Refer to the API [`ListItem`](crate::servers::apis::v1::context::torrent::resources::torrent::ListItem) +//! Refer to the API [`ListItem`](crate::v1::context::torrent::resources::torrent::ListItem) //! resource for more information about the attributes for a single item in the //! response. //! diff --git a/packages/axum-tracker-api-server/src/v1/context/torrent/resources/mod.rs b/packages/axum-tracker-api-server/src/v1/context/torrent/resources/mod.rs new file mode 100644 index 000000000..8e31036d3 --- /dev/null +++ b/packages/axum-tracker-api-server/src/v1/context/torrent/resources/mod.rs @@ -0,0 +1,4 @@ +//! API resources for the [`torrent`](crate::v1::context::torrent) +//! API context. +pub mod peer; +pub mod torrent; diff --git a/src/servers/apis/v1/context/torrent/resources/peer.rs b/packages/axum-tracker-api-server/src/v1/context/torrent/resources/peer.rs similarity index 100% rename from src/servers/apis/v1/context/torrent/resources/peer.rs rename to packages/axum-tracker-api-server/src/v1/context/torrent/resources/peer.rs diff --git a/src/servers/apis/v1/context/torrent/resources/torrent.rs b/packages/axum-tracker-api-server/src/v1/context/torrent/resources/torrent.rs similarity index 95% rename from src/servers/apis/v1/context/torrent/resources/torrent.rs rename to packages/axum-tracker-api-server/src/v1/context/torrent/resources/torrent.rs index 5e4da5c16..1753b60b9 100644 --- a/src/servers/apis/v1/context/torrent/resources/torrent.rs +++ b/packages/axum-tracker-api-server/src/v1/context/torrent/resources/torrent.rs @@ -21,7 +21,7 @@ pub struct Torrent { /// The torrent's leechers counter. Active peers that are downloading the /// torrent. pub leechers: u64, - /// The torrent's peers. See [`Peer`](crate::servers::apis::v1::context::torrent::resources::peer::Peer). + /// The torrent's peers. See [`Peer`](crate::v1::context::torrent::resources::peer::Peer). #[serde(skip_serializing_if = "Option::is_none")] pub peers: Option>, } @@ -102,8 +102,8 @@ mod tests { use torrust_tracker_primitives::{peer, DurationSinceUnixEpoch}; use super::Torrent; - use crate::servers::apis::v1::context::torrent::resources::peer::Peer; - use crate::servers::apis::v1::context::torrent::resources::torrent::ListItem; + use crate::v1::context::torrent::resources::peer::Peer; + use crate::v1::context::torrent::resources::torrent::ListItem; fn sample_peer() -> peer::Peer { peer::Peer { diff --git a/src/servers/apis/v1/context/torrent/responses.rs b/packages/axum-tracker-api-server/src/v1/context/torrent/responses.rs similarity index 90% rename from src/servers/apis/v1/context/torrent/responses.rs rename to packages/axum-tracker-api-server/src/v1/context/torrent/responses.rs index cd359247b..e498c6c59 100644 --- a/src/servers/apis/v1/context/torrent/responses.rs +++ b/packages/axum-tracker-api-server/src/v1/context/torrent/responses.rs @@ -1,4 +1,4 @@ -//! API responses for the [`torrent`](crate::servers::apis::v1::context::torrent) +//! API responses for the [`torrent`](crate::v1::context::torrent) //! API context. use axum::response::{IntoResponse, Json, Response}; use bittorrent_tracker_core::torrent::services::{BasicInfo, Info}; diff --git a/src/servers/apis/v1/context/torrent/routes.rs b/packages/axum-tracker-api-server/src/v1/context/torrent/routes.rs similarity index 77% rename from src/servers/apis/v1/context/torrent/routes.rs rename to packages/axum-tracker-api-server/src/v1/context/torrent/routes.rs index 615bd8d51..678fe7783 100644 --- a/src/servers/apis/v1/context/torrent/routes.rs +++ b/packages/axum-tracker-api-server/src/v1/context/torrent/routes.rs @@ -1,9 +1,9 @@ -//! API routes for the [`torrent`](crate::servers::apis::v1::context::torrent) API context. +//! API routes for the [`torrent`](crate::v1::context::torrent) API context. //! //! - `GET /torrent/:info_hash` //! - `GET /torrents` //! -//! Refer to the [API endpoint documentation](crate::servers::apis::v1::context::torrent). +//! Refer to the [API endpoint documentation](crate::v1::context::torrent). use std::sync::Arc; use axum::routing::get; @@ -12,7 +12,7 @@ use bittorrent_tracker_core::torrent::repository::in_memory::InMemoryTorrentRepo use super::handlers::{get_torrent_handler, get_torrents_handler}; -/// It adds the routes to the router for the [`torrent`](crate::servers::apis::v1::context::torrent) API context. +/// It adds the routes to the router for the [`torrent`](crate::v1::context::torrent) API context. pub fn add(prefix: &str, router: Router, in_memory_torrent_repository: &Arc) -> Router { // Torrents router diff --git a/src/servers/apis/v1/context/whitelist/handlers.rs b/packages/axum-tracker-api-server/src/v1/context/whitelist/handlers.rs similarity index 72% rename from src/servers/apis/v1/context/whitelist/handlers.rs rename to packages/axum-tracker-api-server/src/v1/context/whitelist/handlers.rs index e33a215f2..bafa8aaff 100644 --- a/src/servers/apis/v1/context/whitelist/handlers.rs +++ b/packages/axum-tracker-api-server/src/v1/context/whitelist/handlers.rs @@ -1,4 +1,4 @@ -//! API handlers for the [`whitelist`](crate::servers::apis::v1::context::whitelist) +//! API handlers for the [`whitelist`](crate::v1::context::whitelist) //! API context. use std::str::FromStr; use std::sync::Arc; @@ -11,17 +11,17 @@ use bittorrent_tracker_core::whitelist::manager::WhitelistManager; use super::responses::{ failed_to_reload_whitelist_response, failed_to_remove_torrent_from_whitelist_response, failed_to_whitelist_torrent_response, }; -use crate::servers::apis::v1::responses::{invalid_info_hash_param_response, ok_response}; -use crate::servers::apis::InfoHashParam; +use crate::v1::responses::{invalid_info_hash_param_response, ok_response}; +use crate::InfoHashParam; /// It handles the request to add a torrent to the whitelist. /// /// It returns: /// -/// - `200` response with a [`ActionStatus::Ok`](crate::servers::apis::v1::responses::ActionStatus::Ok) in json. +/// - `200` response with a [`ActionStatus::Ok`](crate::v1::responses::ActionStatus::Ok) in json. /// - `500` with serialized error in debug format if the torrent couldn't be whitelisted. /// -/// Refer to the [API endpoint documentation](crate::servers::apis::v1::context::whitelist#add-a-torrent-to-the-whitelist) +/// Refer to the [API endpoint documentation](crate::v1::context::whitelist#add-a-torrent-to-the-whitelist) /// for more information about this endpoint. pub async fn add_torrent_to_whitelist_handler( State(whitelist_manager): State>, @@ -40,11 +40,11 @@ pub async fn add_torrent_to_whitelist_handler( /// /// It returns: /// -/// - `200` response with a [`ActionStatus::Ok`](crate::servers::apis::v1::responses::ActionStatus::Ok) in json. +/// - `200` response with a [`ActionStatus::Ok`](crate::v1::responses::ActionStatus::Ok) in json. /// - `500` with serialized error in debug format if the torrent couldn't be /// removed from the whitelisted. /// -/// Refer to the [API endpoint documentation](crate::servers::apis::v1::context::whitelist#remove-a-torrent-from-the-whitelist) +/// Refer to the [API endpoint documentation](crate::v1::context::whitelist#remove-a-torrent-from-the-whitelist) /// for more information about this endpoint. pub async fn remove_torrent_from_whitelist_handler( State(whitelist_manager): State>, @@ -63,11 +63,11 @@ pub async fn remove_torrent_from_whitelist_handler( /// /// It returns: /// -/// - `200` response with a [`ActionStatus::Ok`](crate::servers::apis::v1::responses::ActionStatus::Ok) in json. +/// - `200` response with a [`ActionStatus::Ok`](crate::v1::responses::ActionStatus::Ok) in json. /// - `500` with serialized error in debug format if the torrent whitelist /// couldn't be reloaded from the database. /// -/// Refer to the [API endpoint documentation](crate::servers::apis::v1::context::whitelist#reload-the-whitelist) +/// Refer to the [API endpoint documentation](crate::v1::context::whitelist#reload-the-whitelist) /// for more information about this endpoint. pub async fn reload_whitelist_handler(State(whitelist_manager): State>) -> Response { match whitelist_manager.load_whitelist_from_database().await { diff --git a/src/servers/apis/v1/context/whitelist/mod.rs b/packages/axum-tracker-api-server/src/v1/context/whitelist/mod.rs similarity index 100% rename from src/servers/apis/v1/context/whitelist/mod.rs rename to packages/axum-tracker-api-server/src/v1/context/whitelist/mod.rs diff --git a/src/servers/apis/v1/context/whitelist/responses.rs b/packages/axum-tracker-api-server/src/v1/context/whitelist/responses.rs similarity index 84% rename from src/servers/apis/v1/context/whitelist/responses.rs rename to packages/axum-tracker-api-server/src/v1/context/whitelist/responses.rs index ce901c2f0..1e4d66f7f 100644 --- a/src/servers/apis/v1/context/whitelist/responses.rs +++ b/packages/axum-tracker-api-server/src/v1/context/whitelist/responses.rs @@ -1,10 +1,10 @@ -//! API responses for the [`whitelist`](crate::servers::apis::v1::context::whitelist) +//! API responses for the [`whitelist`](crate::v1::context::whitelist) //! API context. use std::error::Error; use axum::response::Response; -use crate::servers::apis::v1::responses::unhandled_rejection_response; +use crate::v1::responses::unhandled_rejection_response; /// `500` error response when a torrent cannot be removed from the whitelist. #[must_use] diff --git a/src/servers/apis/v1/context/whitelist/routes.rs b/packages/axum-tracker-api-server/src/v1/context/whitelist/routes.rs similarity index 82% rename from src/servers/apis/v1/context/whitelist/routes.rs rename to packages/axum-tracker-api-server/src/v1/context/whitelist/routes.rs index 316193cd6..c99b008b3 100644 --- a/src/servers/apis/v1/context/whitelist/routes.rs +++ b/packages/axum-tracker-api-server/src/v1/context/whitelist/routes.rs @@ -1,10 +1,10 @@ -//! API routes for the [`whitelist`](crate::servers::apis::v1::context::whitelist) API context. +//! API routes for the [`whitelist`](crate::v1::context::whitelist) API context. //! //! - `POST /whitelist/:info_hash` //! - `DELETE /whitelist/:info_hash` //! - `GET /whitelist/reload` //! -//! Refer to the [API endpoint documentation](crate::servers::apis::v1::context::torrent). +//! Refer to the [API endpoint documentation](crate::v1::context::torrent). use std::sync::Arc; use axum::routing::{delete, get, post}; @@ -13,7 +13,7 @@ use bittorrent_tracker_core::whitelist::manager::WhitelistManager; use super::handlers::{add_torrent_to_whitelist_handler, reload_whitelist_handler, remove_torrent_from_whitelist_handler}; -/// It adds the routes to the router for the [`whitelist`](crate::servers::apis::v1::context::whitelist) API context. +/// It adds the routes to the router for the [`whitelist`](crate::v1::context::whitelist) API context. pub fn add(prefix: &str, router: Router, whitelist_manager: &Arc) -> Router { let prefix = format!("{prefix}/whitelist"); diff --git a/src/servers/apis/v1/middlewares/auth.rs b/packages/axum-tracker-api-server/src/v1/middlewares/auth.rs similarity index 97% rename from src/servers/apis/v1/middlewares/auth.rs rename to packages/axum-tracker-api-server/src/v1/middlewares/auth.rs index 58219c7ca..2ec046bed 100644 --- a/src/servers/apis/v1/middlewares/auth.rs +++ b/packages/axum-tracker-api-server/src/v1/middlewares/auth.rs @@ -30,7 +30,7 @@ use axum::response::{IntoResponse, Response}; use serde::Deserialize; use torrust_tracker_configuration::AccessTokens; -use crate::servers::apis::v1::responses::unhandled_rejection_response; +use crate::v1::responses::unhandled_rejection_response; /// Container for the `token` extracted from the query params. #[derive(Deserialize, Debug)] diff --git a/src/servers/apis/v1/middlewares/mod.rs b/packages/axum-tracker-api-server/src/v1/middlewares/mod.rs similarity index 100% rename from src/servers/apis/v1/middlewares/mod.rs rename to packages/axum-tracker-api-server/src/v1/middlewares/mod.rs diff --git a/src/servers/apis/v1/mod.rs b/packages/axum-tracker-api-server/src/v1/mod.rs similarity index 58% rename from src/servers/apis/v1/mod.rs rename to packages/axum-tracker-api-server/src/v1/mod.rs index 372ae0ff9..7910d7d4d 100644 --- a/src/servers/apis/v1/mod.rs +++ b/packages/axum-tracker-api-server/src/v1/mod.rs @@ -4,17 +4,17 @@ //! //! Context | Description | Version //! ---|---|--- -//! `Stats` | Tracker statistics | [`v1`](crate::servers::apis::v1::context::stats) -//! `Torrents` | Torrents | [`v1`](crate::servers::apis::v1::context::torrent) -//! `Whitelist` | Torrents whitelist | [`v1`](crate::servers::apis::v1::context::whitelist) -//! `Authentication keys` | Authentication keys | [`v1`](crate::servers::apis::v1::context::auth_key) +//! `Stats` | Tracker statistics | [`v1`](crate::v1::context::stats) +//! `Torrents` | Torrents | [`v1`](crate::v1::context::torrent) +//! `Whitelist` | Torrents whitelist | [`v1`](crate::v1::context::whitelist) +//! `Authentication keys` | Authentication keys | [`v1`](crate::v1::context::auth_key) //! //! > **NOTICE**: //! - The authentication keys are only used by the HTTP tracker. //! - The whitelist is only used when the tracker is running in `listed` or //! `private_listed` mode. //! -//! Refer to the [authentication middleware](crate::servers::apis::v1::middlewares::auth) +//! Refer to the [authentication middleware](crate::v1::middlewares::auth) //! for more information about the authentication process. pub mod context; pub mod middlewares; diff --git a/src/servers/apis/v1/responses.rs b/packages/axum-tracker-api-server/src/v1/responses.rs similarity index 100% rename from src/servers/apis/v1/responses.rs rename to packages/axum-tracker-api-server/src/v1/responses.rs diff --git a/src/servers/apis/v1/routes.rs b/packages/axum-tracker-api-server/src/v1/routes.rs similarity index 100% rename from src/servers/apis/v1/routes.rs rename to packages/axum-tracker-api-server/src/v1/routes.rs diff --git a/src/app.rs b/src/app.rs index e94db66e3..27ffe7a4a 100644 --- a/src/app.rs +++ b/src/app.rs @@ -30,7 +30,6 @@ use tracing::instrument; use crate::bootstrap::jobs::{health_check_api, http_tracker, torrent_cleanup, tracker_apis, udp_tracker}; use crate::container::AppContainer; -use crate::servers; /// # Panics /// @@ -113,7 +112,13 @@ pub async fn start(config: &Configuration, app_container: &Arc) -> let http_api_config = Arc::new(http_api_config.clone()); let http_api_container = Arc::new(app_container.tracker_http_api_container(&http_api_config)); - if let Some(job) = tracker_apis::start_job(http_api_container, registar.give_form(), servers::apis::Version::V1).await { + if let Some(job) = tracker_apis::start_job( + http_api_container, + registar.give_form(), + torrust_axum_tracker_api_server::Version::V1, + ) + .await + { jobs.push(job); } } else { diff --git a/src/bootstrap/jobs/tracker_apis.rs b/src/bootstrap/jobs/tracker_apis.rs index 458d25367..93850d65e 100644 --- a/src/bootstrap/jobs/tracker_apis.rs +++ b/src/bootstrap/jobs/tracker_apis.rs @@ -26,14 +26,13 @@ use std::sync::Arc; use axum_server::tls_rustls::RustlsConfig; use tokio::task::JoinHandle; use torrust_axum_server::tsl::make_rust_tls; +use torrust_axum_tracker_api_server::server::{ApiServer, Launcher}; +use torrust_axum_tracker_api_server::Version; use torrust_server_lib::registar::ServiceRegistrationForm; use torrust_tracker_api_core::container::TrackerHttpApiCoreContainer; use torrust_tracker_configuration::AccessTokens; use tracing::instrument; -use crate::servers::apis::server::{ApiServer, Launcher}; -use crate::servers::apis::Version; - /// This is the message that the "launcher" spawned task sends to the main /// application process to notify the API server was successfully started. /// @@ -97,13 +96,13 @@ async fn start_v1( mod tests { use std::sync::Arc; + use torrust_axum_tracker_api_server::Version; use torrust_server_lib::registar::Registar; use torrust_tracker_api_core::container::TrackerHttpApiCoreContainer; use torrust_tracker_test_helpers::configuration::ephemeral_public; use crate::bootstrap::app::initialize_global_services; use crate::bootstrap::jobs::tracker_apis::start_job; - use crate::servers::apis::Version; #[tokio::test] async fn it_should_start_http_tracker() { diff --git a/src/lib.rs b/src/lib.rs index 100a297fe..5f05df8b2 100644 --- a/src/lib.rs +++ b/src/lib.rs @@ -55,7 +55,7 @@ //! //! From the end-user perspective the Torrust Tracker exposes three different services. //! -//! - A REST [`API`](crate::servers::apis) +//! - A REST [`API`](torrust_axum_tracker_api_server) //! - One or more [`UDP`](torrust_udp_tracker_server) trackers //! - One or more [`HTTP`](torrust_axum_http_tracker_server) trackers //! @@ -124,7 +124,7 @@ //! By default the tracker uses `SQLite` and the database file name `sqlite3.db`. //! //! You only need the `tls` directory in case you are setting up SSL for the HTTP tracker or the tracker API. -//! Visit [`HTTP`](torrust_axum_http_tracker_server) or [`API`](crate::servers::apis) if you want to know how you can use HTTPS. +//! Visit [`HTTP`](torrust_axum_http_tracker_server) or [`API`](torrust_axum_tracker_api_server) if you want to know how you can use HTTPS. //! //! ## Install from sources //! @@ -280,7 +280,7 @@ //! } //! ``` //! -//! Refer to the [`API`](crate::servers::apis) documentation for more information about the [`API`](crate::servers::apis) endpoints. +//! Refer to the [`API`](torrust_axum_tracker_api_server) documentation for more information about the [`API`](torrust_axum_tracker_api_server) endpoints. //! //! ## HTTP tracker //! @@ -359,7 +359,7 @@ //! //! If the tracker is running in `private` or `private_listed` mode you will need to provide a valid authentication key. //! -//! Right now the only way to add new keys is via the REST [`API`](crate::servers::apis). The endpoint `POST /api/vi/key/:duration_in_seconds` +//! Right now the only way to add new keys is via the REST [`API`](torrust_axum_tracker_api_server). The endpoint `POST /api/vi/key/:duration_in_seconds` //! will return an expiring key that will be valid for `duration_in_seconds` seconds. //! //! Using `curl` you can create a 2-minute valid auth key: @@ -379,7 +379,7 @@ //! ``` //! //! You can also use the Torrust Tracker together with the [Torrust Index](https://github.com/torrust/torrust-index). If that's the case, -//! the Index will create the keys by using the tracker [API](crate::servers::apis). +//! the Index will create the keys by using the tracker [API](torrust_axum_tracker_api_server). //! //! ## UDP tracker //! @@ -406,7 +406,7 @@ //! Torrust Tracker has four main components: //! //! - The core tracker [`core`] -//! - The tracker REST [`API`](crate::servers::apis) +//! - The tracker REST [`API`](torrust_axum_tracker_api_server) //! - The [`UDP`](torrust_udp_tracker_server) tracker //! - The [`HTTP`](torrust_axum_http_tracker_server) tracker //! @@ -434,7 +434,7 @@ //! - Torrents: to get peers for a torrent //! - Whitelist: to handle the torrent whitelist when the tracker runs on `listed` or `private_listed` mode //! -//! See [`API`](crate::servers::apis) for more details on the REST API. +//! See [`API`](torrust_axum_tracker_api_server) for more details on the REST API. //! //! ## UDP tracker //! @@ -494,7 +494,6 @@ pub mod app; pub mod bootstrap; pub mod console; pub mod container; -pub mod servers; /// This code needs to be copied into each crate. /// Working version, for production. diff --git a/src/servers/apis/v1/context/torrent/resources/mod.rs b/src/servers/apis/v1/context/torrent/resources/mod.rs deleted file mode 100644 index a6dbff726..000000000 --- a/src/servers/apis/v1/context/torrent/resources/mod.rs +++ /dev/null @@ -1,4 +0,0 @@ -//! API resources for the [`torrent`](crate::servers::apis::v1::context::torrent) -//! API context. -pub mod peer; -pub mod torrent; diff --git a/src/servers/mod.rs b/src/servers/mod.rs deleted file mode 100644 index de756162d..000000000 --- a/src/servers/mod.rs +++ /dev/null @@ -1,2 +0,0 @@ -//! Servers. Services that can be started and stopped. -pub mod apis; diff --git a/tests/servers/api/environment.rs b/tests/servers/api/environment.rs index 5534a99a9..b9373b533 100644 --- a/tests/servers/api/environment.rs +++ b/tests/servers/api/environment.rs @@ -7,11 +7,11 @@ use bittorrent_tracker_core::container::TrackerCoreContainer; use bittorrent_udp_tracker_core::container::UdpTrackerCoreContainer; use futures::executor::block_on; use torrust_axum_server::tsl::make_rust_tls; +use torrust_axum_tracker_api_server::server::{ApiServer, Launcher, Running, Stopped}; use torrust_server_lib::registar::Registar; use torrust_tracker_api_client::connection_info::{ConnectionInfo, Origin}; use torrust_tracker_api_core::container::TrackerHttpApiCoreContainer; use torrust_tracker_configuration::{logging, Configuration}; -use torrust_tracker_lib::servers::apis::server::{ApiServer, Launcher, Running, Stopped}; use torrust_tracker_primitives::peer; pub struct Environment diff --git a/tests/servers/api/mod.rs b/tests/servers/api/mod.rs index 8f5f6d016..1176d8a6b 100644 --- a/tests/servers/api/mod.rs +++ b/tests/servers/api/mod.rs @@ -1,7 +1,7 @@ use std::sync::Arc; use bittorrent_tracker_core::databases::Database; -use torrust_tracker_lib::servers::apis::server; +use torrust_axum_tracker_api_server::server; pub mod connection_info; pub mod environment; diff --git a/tests/servers/api/v1/asserts.rs b/tests/servers/api/v1/asserts.rs index b56144d3f..c1a06594a 100644 --- a/tests/servers/api/v1/asserts.rs +++ b/tests/servers/api/v1/asserts.rs @@ -1,9 +1,9 @@ // code-review: should we use macros to return the exact line where the assert fails? use reqwest::Response; -use torrust_tracker_lib::servers::apis::v1::context::auth_key::resources::AuthKey; -use torrust_tracker_lib::servers::apis::v1::context::stats::resources::Stats; -use torrust_tracker_lib::servers::apis::v1::context::torrent::resources::torrent::{ListItem, Torrent}; +use torrust_axum_tracker_api_server::v1::context::auth_key::resources::AuthKey; +use torrust_axum_tracker_api_server::v1::context::stats::resources::Stats; +use torrust_axum_tracker_api_server::v1::context::torrent::resources::torrent::{ListItem, Torrent}; // Resource responses diff --git a/tests/servers/api/v1/contract/context/health_check.rs b/tests/servers/api/v1/contract/context/health_check.rs index 4d37917fc..b0812dc8c 100644 --- a/tests/servers/api/v1/contract/context/health_check.rs +++ b/tests/servers/api/v1/contract/context/health_check.rs @@ -1,5 +1,5 @@ +use torrust_axum_tracker_api_server::v1::context::health_check::resources::{Report, Status}; use torrust_tracker_api_client::v1::client::get; -use torrust_tracker_lib::servers::apis::v1::context::health_check::resources::{Report, Status}; use torrust_tracker_test_helpers::configuration; use url::Url; diff --git a/tests/servers/api/v1/contract/context/stats.rs b/tests/servers/api/v1/contract/context/stats.rs index 55d3cd869..3d8e6481c 100644 --- a/tests/servers/api/v1/contract/context/stats.rs +++ b/tests/servers/api/v1/contract/context/stats.rs @@ -1,8 +1,8 @@ use std::str::FromStr; use bittorrent_primitives::info_hash::InfoHash; +use torrust_axum_tracker_api_server::v1::context::stats::resources::Stats; use torrust_tracker_api_client::v1::client::{headers_with_request_id, Client}; -use torrust_tracker_lib::servers::apis::v1::context::stats::resources::Stats; use torrust_tracker_primitives::peer::fixture::PeerBuilder; use torrust_tracker_test_helpers::configuration; use uuid::Uuid; diff --git a/tests/servers/api/v1/contract/context/torrent.rs b/tests/servers/api/v1/contract/context/torrent.rs index 8aa408173..c2d5bfbaf 100644 --- a/tests/servers/api/v1/contract/context/torrent.rs +++ b/tests/servers/api/v1/contract/context/torrent.rs @@ -1,10 +1,10 @@ use std::str::FromStr; use bittorrent_primitives::info_hash::InfoHash; +use torrust_axum_tracker_api_server::v1::context::torrent::resources::peer::Peer; +use torrust_axum_tracker_api_server::v1::context::torrent::resources::torrent::{self, Torrent}; use torrust_tracker_api_client::common::http::{Query, QueryParam}; use torrust_tracker_api_client::v1::client::{headers_with_request_id, Client}; -use torrust_tracker_lib::servers::apis::v1::context::torrent::resources::peer::Peer; -use torrust_tracker_lib::servers::apis::v1::context::torrent::resources::torrent::{self, Torrent}; use torrust_tracker_primitives::peer::fixture::PeerBuilder; use torrust_tracker_test_helpers::configuration; use uuid::Uuid; From 229f9ee0de0416794b7a26aa1bbe1c0aefb6bdd2 Mon Sep 17 00:00:00 2001 From: Jose Celano Date: Mon, 24 Feb 2025 18:50:24 +0000 Subject: [PATCH 324/802] refactor: [#1320] move code from axum-http-tracker-server pkg to http-protocol pkg --- Cargo.lock | 1 - packages/axum-http-tracker-server/Cargo.toml | 1 - .../src/v1/extractors/authentication_key.rs | 4 +- .../src/v1/handlers/common/auth.rs | 40 ------------------- .../src/v1/handlers/common/mod.rs | 3 -- .../src/v1/handlers/common/peer_ip.rs | 30 -------------- .../src/v1/handlers/mod.rs | 1 - packages/http-protocol/src/v1/auth.rs | 17 ++++++++ packages/http-protocol/src/v1/mod.rs | 1 + .../http-protocol/src/v1/responses/error.rs | 28 +++++++++++++ 10 files changed, 47 insertions(+), 79 deletions(-) delete mode 100644 packages/axum-http-tracker-server/src/v1/handlers/common/auth.rs delete mode 100644 packages/axum-http-tracker-server/src/v1/handlers/common/mod.rs delete mode 100644 packages/axum-http-tracker-server/src/v1/handlers/common/peer_ip.rs create mode 100644 packages/http-protocol/src/v1/auth.rs diff --git a/Cargo.lock b/Cargo.lock index a46aa30b1..cead25a49 100644 --- a/Cargo.lock +++ b/Cargo.lock @@ -4369,7 +4369,6 @@ dependencies = [ "hyper", "reqwest", "serde", - "thiserror 2.0.11", "tokio", "torrust-axum-server", "torrust-server-lib", diff --git a/packages/axum-http-tracker-server/Cargo.toml b/packages/axum-http-tracker-server/Cargo.toml index 98c807a92..abb419e4a 100644 --- a/packages/axum-http-tracker-server/Cargo.toml +++ b/packages/axum-http-tracker-server/Cargo.toml @@ -27,7 +27,6 @@ futures = "0" hyper = "1" reqwest = { version = "0", features = ["json"] } serde = { version = "1", features = ["derive"] } -thiserror = "2" tokio = { version = "1", features = ["macros", "net", "rt-multi-thread", "signal", "sync"] } torrust-axum-server = { version = "3.0.0-develop", path = "../axum-server" } torrust-server-lib = { version = "3.0.0-develop", path = "../server-lib" } diff --git a/packages/axum-http-tracker-server/src/v1/extractors/authentication_key.rs b/packages/axum-http-tracker-server/src/v1/extractors/authentication_key.rs index 89781f48b..7dca7f42e 100644 --- a/packages/axum-http-tracker-server/src/v1/extractors/authentication_key.rs +++ b/packages/axum-http-tracker-server/src/v1/extractors/authentication_key.rs @@ -49,13 +49,11 @@ use axum::extract::rejection::PathRejection; use axum::extract::{FromRequestParts, Path}; use axum::http::request::Parts; use axum::response::{IntoResponse, Response}; -use bittorrent_http_tracker_protocol::v1::responses; +use bittorrent_http_tracker_protocol::v1::{auth, responses}; use bittorrent_tracker_core::authentication::Key; use hyper::StatusCode; use serde::Deserialize; -use crate::v1::handlers::common::auth; - /// Extractor for the [`Key`] struct. pub struct Extract(pub Key); diff --git a/packages/axum-http-tracker-server/src/v1/handlers/common/auth.rs b/packages/axum-http-tracker-server/src/v1/handlers/common/auth.rs deleted file mode 100644 index fe1dddd7d..000000000 --- a/packages/axum-http-tracker-server/src/v1/handlers/common/auth.rs +++ /dev/null @@ -1,40 +0,0 @@ -//! HTTP server authentication error and conversion to -//! [`responses::error::Error`] -//! response. -use std::panic::Location; - -use bittorrent_http_tracker_protocol::v1::responses; -use bittorrent_tracker_core::authentication; -use thiserror::Error; - -/// Authentication error. -/// -/// When the tracker is private, the authentication key is required in the URL -/// path. These are the possible errors that can occur when extracting the key -/// from the URL path. -#[derive(Debug, Error)] -pub enum Error { - #[error("Invalid format for authentication key param. Error in {location}")] - InvalidKeyFormat { location: &'static Location<'static> }, - - #[error("Cannot extract authentication key param from URL path. Error in {location}")] - CannotExtractKeyParam { location: &'static Location<'static> }, -} - -impl From for responses::error::Error { - fn from(err: Error) -> Self { - responses::error::Error { - failure_reason: format!("Tracker authentication error: {err}"), - } - } -} - -#[must_use] -pub fn map_auth_error_to_error_response(err: &authentication::Error) -> responses::error::Error { - // code_review: this could not been implemented with the trait: - // impl From for responses::error::Error - // Consider moving the trait implementation to the http-protocol package. - responses::error::Error { - failure_reason: format!("Tracker authentication error: {err}"), - } -} diff --git a/packages/axum-http-tracker-server/src/v1/handlers/common/mod.rs b/packages/axum-http-tracker-server/src/v1/handlers/common/mod.rs deleted file mode 100644 index 30eaf37b7..000000000 --- a/packages/axum-http-tracker-server/src/v1/handlers/common/mod.rs +++ /dev/null @@ -1,3 +0,0 @@ -//! Common logic for HTTP handlers. -pub mod auth; -pub mod peer_ip; diff --git a/packages/axum-http-tracker-server/src/v1/handlers/common/peer_ip.rs b/packages/axum-http-tracker-server/src/v1/handlers/common/peer_ip.rs deleted file mode 100644 index 8d51f9817..000000000 --- a/packages/axum-http-tracker-server/src/v1/handlers/common/peer_ip.rs +++ /dev/null @@ -1,30 +0,0 @@ -//! Logic to convert peer IP resolution errors into responses. -//! -//! The HTTP tracker may fail to resolve the peer IP address. This module -//! contains the logic to convert those -//! [`PeerIpResolutionError`](bittorrent_http_tracker_protocol::v1::services::peer_ip_resolver::PeerIpResolutionError) -//! errors into responses. - -#[cfg(test)] -mod tests { - use std::panic::Location; - - use bittorrent_http_tracker_protocol::v1::responses; - use bittorrent_http_tracker_protocol::v1::services::peer_ip_resolver::PeerIpResolutionError; - - fn assert_error_response(error: &responses::error::Error, error_message: &str) { - assert!( - error.failure_reason.contains(error_message), - "Error response does not contain message: '{error_message}'. Error: {error:?}" - ); - } - - #[test] - fn it_should_map_a_peer_ip_resolution_error_into_an_error_response() { - let response = responses::error::Error::from(PeerIpResolutionError::MissingRightMostXForwardedForIp { - location: Location::caller(), - }); - - assert_error_response(&response, "Error resolving peer IP"); - } -} diff --git a/packages/axum-http-tracker-server/src/v1/handlers/mod.rs b/packages/axum-http-tracker-server/src/v1/handlers/mod.rs index ce58e09b3..785213696 100644 --- a/packages/axum-http-tracker-server/src/v1/handlers/mod.rs +++ b/packages/axum-http-tracker-server/src/v1/handlers/mod.rs @@ -1,5 +1,4 @@ //! Axum [`handlers`](axum#handlers) for the HTTP server. pub mod announce; -pub mod common; pub mod health_check; pub mod scrape; diff --git a/packages/http-protocol/src/v1/auth.rs b/packages/http-protocol/src/v1/auth.rs new file mode 100644 index 000000000..ad2e59e53 --- /dev/null +++ b/packages/http-protocol/src/v1/auth.rs @@ -0,0 +1,17 @@ +use std::panic::Location; + +use thiserror::Error; + +/// Authentication error. +/// +/// When the tracker is private, the authentication key is required in the URL +/// path. These are the possible errors that can occur when extracting the key +/// from the URL path. +#[derive(Debug, Error)] +pub enum Error { + #[error("Invalid format for authentication key param. Error in {location}")] + InvalidKeyFormat { location: &'static Location<'static> }, + + #[error("Cannot extract authentication key param from URL path. Error in {location}")] + CannotExtractKeyParam { location: &'static Location<'static> }, +} diff --git a/packages/http-protocol/src/v1/mod.rs b/packages/http-protocol/src/v1/mod.rs index d52ba7609..6de653e66 100644 --- a/packages/http-protocol/src/v1/mod.rs +++ b/packages/http-protocol/src/v1/mod.rs @@ -1,3 +1,4 @@ +pub mod auth; pub mod query; pub mod requests; pub mod responses; diff --git a/packages/http-protocol/src/v1/responses/error.rs b/packages/http-protocol/src/v1/responses/error.rs index 8dc28e938..2e7a36d0a 100644 --- a/packages/http-protocol/src/v1/responses/error.rs +++ b/packages/http-protocol/src/v1/responses/error.rs @@ -13,6 +13,7 @@ //! > code. use serde::Serialize; +use crate::v1::auth; use crate::v1::services::peer_ip_resolver::PeerIpResolutionError; /// `Error` response for the HTTP tracker. @@ -47,6 +48,14 @@ impl Error { } } +impl From for Error { + fn from(err: auth::Error) -> Self { + Self { + failure_reason: format!("Tracker authentication error: {err}"), + } + } +} + impl From for Error { fn from(err: PeerIpResolutionError) -> Self { Self { @@ -89,8 +98,11 @@ impl From for Error { #[cfg(test)] mod tests { + use std::panic::Location; use super::Error; + use crate::v1::responses; + use crate::v1::services::peer_ip_resolver::PeerIpResolutionError; #[test] fn http_tracker_errors_can_be_bencoded() { @@ -100,4 +112,20 @@ mod tests { assert_eq!(err.write(), "d14:failure reason13:error messagee"); // cspell:disable-line } + + fn assert_error_response(error: &responses::error::Error, error_message: &str) { + assert!( + error.failure_reason.contains(error_message), + "Error response does not contain message: '{error_message}'. Error: {error:?}" + ); + } + + #[test] + fn it_should_map_a_peer_ip_resolution_error_into_an_error_response() { + let response = responses::error::Error::from(PeerIpResolutionError::MissingRightMostXForwardedForIp { + location: Location::caller(), + }); + + assert_error_response(&response, "Error resolving peer IP"); + } } From a841e03e98d7f27d241ea67a925455c449aaed4c Mon Sep 17 00:00:00 2001 From: Jose Celano Date: Tue, 25 Feb 2025 08:54:29 +0000 Subject: [PATCH 325/802] refactor: [#1316] move health check API integration tests to pkg It moved the integration tests for the Health Check API from the main lib to the `axum-health-check-api-server`. Some tests have not been moved becuase they depend on other server pakages. They basically use the test "environments" from other servers which are not publicly exposed yet. They are only used in integration tests. We will move those environments to the corresponding server so they can be used in other packages to run the servers. --- Cargo.lock | 7 + .../axum-health-check-api-server/Cargo.toml | 5 + .../src}/environment.rs | 25 ++- .../axum-health-check-api-server/src/lib.rs | 1 + .../tests/integration.rs | 19 +++ .../tests/server/client.rs | 5 + .../tests/server/contract.rs | 29 ++++ .../tests/server/mod.rs | 2 + packages/test-helpers/Cargo.toml | 2 + packages/test-helpers/src/lib.rs | 1 + packages/test-helpers/src/logging.rs | 156 ++++++++++++++++++ tests/servers/health_check_api/contract.rs | 37 +---- tests/servers/health_check_api/mod.rs | 3 - 13 files changed, 250 insertions(+), 42 deletions(-) rename {tests/servers/health_check_api => packages/axum-health-check-api-server/src}/environment.rs (81%) create mode 100644 packages/axum-health-check-api-server/tests/integration.rs create mode 100644 packages/axum-health-check-api-server/tests/server/client.rs create mode 100644 packages/axum-health-check-api-server/tests/server/contract.rs create mode 100644 packages/axum-health-check-api-server/tests/server/mod.rs create mode 100644 packages/test-helpers/src/logging.rs diff --git a/Cargo.lock b/Cargo.lock index cead25a49..4c85cf407 100644 --- a/Cargo.lock +++ b/Cargo.lock @@ -4343,13 +4343,18 @@ dependencies = [ "axum-server", "futures", "hyper", + "reqwest", "serde", "serde_json", "tokio", "torrust-axum-server", "torrust-server-lib", + "torrust-tracker-clock", + "torrust-tracker-configuration", + "torrust-tracker-test-helpers", "tower-http", "tracing", + "tracing-subscriber", ] [[package]] @@ -4618,6 +4623,8 @@ version = "3.0.0-develop" dependencies = [ "rand 0.9.0", "torrust-tracker-configuration", + "tracing", + "tracing-subscriber", ] [[package]] diff --git a/packages/axum-health-check-api-server/Cargo.toml b/packages/axum-health-check-api-server/Cargo.toml index 37e49f9e7..17c269aae 100644 --- a/packages/axum-health-check-api-server/Cargo.toml +++ b/packages/axum-health-check-api-server/Cargo.toml @@ -23,7 +23,12 @@ serde_json = { version = "1", features = ["preserve_order"] } tokio = { version = "1", features = ["macros", "net", "rt-multi-thread", "signal", "sync"] } torrust-axum-server = { version = "3.0.0-develop", path = "../axum-server" } torrust-server-lib = { version = "3.0.0-develop", path = "../server-lib" } +torrust-tracker-configuration = { version = "3.0.0-develop", path = "../configuration" } tower-http = { version = "0", features = ["compression-full", "cors", "propagate-header", "request-id", "trace"] } tracing = "0" [dev-dependencies] +reqwest = { version = "0", features = ["json"] } +torrust-tracker-clock = { version = "3.0.0-develop", path = "../clock" } +torrust-tracker-test-helpers = { version = "3.0.0-develop", path = "../test-helpers" } +tracing-subscriber = { version = "0", features = ["json"] } diff --git a/tests/servers/health_check_api/environment.rs b/packages/axum-health-check-api-server/src/environment.rs similarity index 81% rename from tests/servers/health_check_api/environment.rs rename to packages/axum-health-check-api-server/src/environment.rs index f8c1209cd..c1fb0547a 100644 --- a/tests/servers/health_check_api/environment.rs +++ b/packages/axum-health-check-api-server/src/environment.rs @@ -3,11 +3,14 @@ use std::sync::Arc; use tokio::sync::oneshot::{self, Sender}; use tokio::task::JoinHandle; -use torrust_axum_health_check_api_server::{server, HEALTH_CHECK_API_LOG_TARGET}; use torrust_server_lib::registar::Registar; -use torrust_server_lib::signals::{self, Halted, Started}; +use torrust_server_lib::signals::{self, Halted as SignalHalted, Started as SignalStarted}; use torrust_tracker_configuration::HealthCheckApi; +use crate::{server, HEALTH_CHECK_API_LOG_TARGET}; + +pub type Started = Environment; + #[derive(Debug)] pub enum Error { #[allow(dead_code)] @@ -30,6 +33,7 @@ pub struct Environment { } impl Environment { + #[must_use] pub fn new(config: &Arc, registar: Registar) -> Self { let bind_to = config.bind_address; @@ -41,9 +45,13 @@ impl Environment { /// Start the test environment for the Health Check API. /// It runs the API server. + /// + /// # Panics + /// + /// Will panic if it cannot start the service in a spawned task. pub async fn start(self) -> Environment { - let (tx_start, rx_start) = oneshot::channel::(); - let (tx_halt, rx_halt) = tokio::sync::oneshot::channel::(); + let (tx_start, rx_start) = oneshot::channel::(); + let (tx_halt, rx_halt) = tokio::sync::oneshot::channel::(); let register = self.registar.entries(); @@ -81,10 +89,17 @@ impl Environment { Environment::::new(config, registar).start().await } + /// # Errors + /// + /// Will return an error if it cannot send the halt signal. + /// + /// # Panics + /// + /// Will panic if it cannot shutdown the service. pub async fn stop(self) -> Result, Error> { self.state .halt_task - .send(Halted::Normal) + .send(SignalHalted::Normal) .map_err(|e| Error::Error(e.to_string()))?; let bind_to = self.state.task.await.expect("it should shutdown the service"); diff --git a/packages/axum-health-check-api-server/src/lib.rs b/packages/axum-health-check-api-server/src/lib.rs index 24c5232c8..6a3b4b34d 100644 --- a/packages/axum-health-check-api-server/src/lib.rs +++ b/packages/axum-health-check-api-server/src/lib.rs @@ -1,3 +1,4 @@ +pub mod environment; pub mod handlers; pub mod resources; pub mod responses; diff --git a/packages/axum-health-check-api-server/tests/integration.rs b/packages/axum-health-check-api-server/tests/integration.rs new file mode 100644 index 000000000..13ca963a3 --- /dev/null +++ b/packages/axum-health-check-api-server/tests/integration.rs @@ -0,0 +1,19 @@ +//! Integration tests. +//! +//! ```text +//! cargo test --test integration +//! ``` +mod server; + +use torrust_tracker_clock::clock; + +/// This code needs to be copied into each crate. +/// Working version, for production. +#[cfg(not(test))] +#[allow(dead_code)] +pub(crate) type CurrentClock = clock::Working; + +/// Stopped version, for testing. +#[cfg(test)] +#[allow(dead_code)] +pub(crate) type CurrentClock = clock::Stopped; diff --git a/packages/axum-health-check-api-server/tests/server/client.rs b/packages/axum-health-check-api-server/tests/server/client.rs new file mode 100644 index 000000000..3d8bdc7d6 --- /dev/null +++ b/packages/axum-health-check-api-server/tests/server/client.rs @@ -0,0 +1,5 @@ +use reqwest::Response; + +pub async fn get(path: &str) -> Response { + reqwest::Client::builder().build().unwrap().get(path).send().await.unwrap() +} diff --git a/packages/axum-health-check-api-server/tests/server/contract.rs b/packages/axum-health-check-api-server/tests/server/contract.rs new file mode 100644 index 000000000..2bd5d292e --- /dev/null +++ b/packages/axum-health-check-api-server/tests/server/contract.rs @@ -0,0 +1,29 @@ +use torrust_axum_health_check_api_server::environment::Started; +use torrust_axum_health_check_api_server::resources::{Report, Status}; +use torrust_server_lib::registar::Registar; +use torrust_tracker_test_helpers::{configuration, logging}; + +use crate::server::client::get; + +#[tokio::test] +async fn health_check_endpoint_should_return_status_ok_when_there_is_no_services_registered() { + logging::setup(); + + let configuration = configuration::ephemeral_with_no_services(); + + let env = Started::new(&configuration.health_check_api.into(), Registar::default()).await; + + let response = get(&format!("http://{}/health_check", env.state.binding)).await; // DevSkim: ignore DS137138 + + assert_eq!(response.status(), 200); + assert_eq!(response.headers().get("content-type").unwrap(), "application/json"); + + let report = response + .json::() + .await + .expect("it should be able to get the report as json"); + + assert_eq!(report.status, Status::None); + + env.stop().await.expect("it should stop the service"); +} diff --git a/packages/axum-health-check-api-server/tests/server/mod.rs b/packages/axum-health-check-api-server/tests/server/mod.rs new file mode 100644 index 000000000..2676be6f9 --- /dev/null +++ b/packages/axum-health-check-api-server/tests/server/mod.rs @@ -0,0 +1,2 @@ +pub mod client; +pub mod contract; diff --git a/packages/test-helpers/Cargo.toml b/packages/test-helpers/Cargo.toml index ad291d209..3495c314a 100644 --- a/packages/test-helpers/Cargo.toml +++ b/packages/test-helpers/Cargo.toml @@ -17,3 +17,5 @@ version.workspace = true [dependencies] rand = "0" torrust-tracker-configuration = { version = "3.0.0-develop", path = "../configuration" } +tracing = "0" +tracing-subscriber = { version = "0", features = ["json"] } diff --git a/packages/test-helpers/src/lib.rs b/packages/test-helpers/src/lib.rs index e66ea2adc..bd67ca770 100644 --- a/packages/test-helpers/src/lib.rs +++ b/packages/test-helpers/src/lib.rs @@ -2,4 +2,5 @@ //! //! A collection of functions and types to help with testing the tracker server. pub mod configuration; +pub mod logging; pub mod random; diff --git a/packages/test-helpers/src/logging.rs b/packages/test-helpers/src/logging.rs new file mode 100644 index 000000000..564074f3e --- /dev/null +++ b/packages/test-helpers/src/logging.rs @@ -0,0 +1,156 @@ +//! Setup for logging in tests. +use std::collections::VecDeque; +use std::io; +use std::sync::{Mutex, MutexGuard, Once, OnceLock}; + +use torrust_tracker_configuration::logging::TraceStyle; +use tracing::level_filters::LevelFilter; +use tracing_subscriber::fmt::MakeWriter; + +static INIT: Once = Once::new(); + +/// A global buffer containing the latest lines captured from logs. +#[doc(hidden)] +pub fn captured_logs_buffer() -> &'static Mutex { + static CAPTURED_LOGS_GLOBAL_BUFFER: OnceLock> = OnceLock::new(); + CAPTURED_LOGS_GLOBAL_BUFFER.get_or_init(|| Mutex::new(CircularBuffer::new(10000, 200))) +} + +pub fn setup() { + INIT.call_once(|| { + tracing_init(LevelFilter::ERROR, &TraceStyle::Default); + }); +} + +fn tracing_init(level_filter: LevelFilter, style: &TraceStyle) { + let mock_writer = LogCapturer::new(captured_logs_buffer()); + + let builder = tracing_subscriber::fmt() + .with_max_level(level_filter) + .with_ansi(true) + .with_test_writer() + .with_writer(mock_writer); + + let () = match style { + TraceStyle::Default => builder.init(), + TraceStyle::Pretty(display_filename) => builder.pretty().with_file(*display_filename).init(), + TraceStyle::Compact => builder.compact().init(), + TraceStyle::Json => builder.json().init(), + }; + + tracing::info!("Logging initialized"); +} + +/// It returns true is there is a log line containing all the texts passed. +/// +/// # Panics +/// +/// Will panic if it can't get the lock for the global buffer or convert it into +/// a vec. +#[must_use] +#[allow(dead_code)] +pub fn logs_contains_a_line_with(texts: &[&str]) -> bool { + // code-review: we can search directly in the buffer instead of converting + // the buffer into a string but that would slow down the tests because + // cloning should be faster that locking the buffer for searching. + // Because the buffer is not big. + let logs = String::from_utf8(captured_logs_buffer().lock().unwrap().as_vec()).unwrap(); + + for line in logs.split('\n') { + if contains(line, texts) { + return true; + } + } + + false +} + +#[allow(dead_code)] +fn contains(text: &str, texts: &[&str]) -> bool { + texts.iter().all(|&word| text.contains(word)) +} + +/// A tracing writer which captures the latests logs lines into a buffer. +/// It's used to capture the logs in the tests. +#[derive(Debug)] +pub struct LogCapturer<'a> { + logs: &'a Mutex, +} + +impl<'a> LogCapturer<'a> { + pub fn new(buf: &'a Mutex) -> Self { + Self { logs: buf } + } + + fn buf(&self) -> io::Result> { + self.logs.lock().map_err(|_| io::Error::from(io::ErrorKind::Other)) + } +} + +impl io::Write for LogCapturer<'_> { + fn write(&mut self, buf: &[u8]) -> io::Result { + print!("{}", String::from_utf8(buf.to_vec()).unwrap()); + + let mut target = self.buf()?; + + target.write(buf) + } + + fn flush(&mut self) -> io::Result<()> { + self.buf()?.flush() + } +} + +impl MakeWriter<'_> for LogCapturer<'_> { + type Writer = Self; + + fn make_writer(&self) -> Self::Writer { + LogCapturer::new(self.logs) + } +} + +#[derive(Debug)] +pub struct CircularBuffer { + max_size: usize, + buffer: VecDeque, +} + +impl CircularBuffer { + #[must_use] + pub fn new(max_lines: usize, average_line_size: usize) -> Self { + Self { + max_size: max_lines * average_line_size, + buffer: VecDeque::with_capacity(max_lines * average_line_size), + } + } + + /// # Errors + /// + /// Won't return any error. + #[allow(clippy::unnecessary_wraps)] + pub fn write(&mut self, buf: &[u8]) -> io::Result { + for &byte in buf { + if self.buffer.len() == self.max_size { + // Remove oldest byte to make space + self.buffer.pop_front(); + } + self.buffer.push_back(byte); + } + + Ok(buf.len()) + } + + /// # Errors + /// + /// Won't return any error. + #[allow(clippy::unnecessary_wraps)] + #[allow(clippy::unused_self)] + pub fn flush(&mut self) -> io::Result<()> { + Ok(()) + } + + #[must_use] + pub fn as_vec(&self) -> Vec { + self.buffer.iter().copied().collect() + } +} diff --git a/tests/servers/health_check_api/contract.rs b/tests/servers/health_check_api/contract.rs index 553b68902..ab6b0d48e 100644 --- a/tests/servers/health_check_api/contract.rs +++ b/tests/servers/health_check_api/contract.rs @@ -1,44 +1,13 @@ -use torrust_axum_health_check_api_server::resources::{Report, Status}; -use torrust_server_lib::registar::Registar; -use torrust_tracker_test_helpers::configuration; - -use crate::common::logging; -use crate::servers::health_check_api::client::get; -use crate::servers::health_check_api::Started; - -#[tokio::test] -async fn health_check_endpoint_should_return_status_ok_when_there_is_no_services_registered() { - logging::setup(); - - let configuration = configuration::ephemeral_with_no_services(); - - let env = Started::new(&configuration.health_check_api.into(), Registar::default()).await; - - let response = get(&format!("http://{}/health_check", env.state.binding)).await; // DevSkim: ignore DS137138 - - assert_eq!(response.status(), 200); - assert_eq!(response.headers().get("content-type").unwrap(), "application/json"); - - let report = response - .json::() - .await - .expect("it should be able to get the report as json"); - - assert_eq!(report.status, Status::None); - - env.stop().await.expect("it should stop the service"); -} - mod api { use std::sync::Arc; + use torrust_axum_health_check_api_server::environment::Started; use torrust_axum_health_check_api_server::resources::{Report, Status}; use torrust_tracker_test_helpers::configuration; use crate::common::logging; use crate::servers::api; use crate::servers::health_check_api::client::get; - use crate::servers::health_check_api::Started; #[tokio::test] pub(crate) async fn it_should_return_good_health_for_api_service() { @@ -142,12 +111,12 @@ mod api { mod http { use std::sync::Arc; + use torrust_axum_health_check_api_server::environment::Started; use torrust_axum_health_check_api_server::resources::{Report, Status}; use torrust_tracker_test_helpers::configuration; use crate::common::logging; use crate::servers::health_check_api::client::get; - use crate::servers::health_check_api::Started; use crate::servers::http; #[tokio::test] @@ -251,12 +220,12 @@ mod http { mod udp { use std::sync::Arc; + use torrust_axum_health_check_api_server::environment::Started; use torrust_axum_health_check_api_server::resources::{Report, Status}; use torrust_tracker_test_helpers::configuration; use crate::common::logging; use crate::servers::health_check_api::client::get; - use crate::servers::health_check_api::Started; use crate::servers::udp; #[tokio::test] diff --git a/tests/servers/health_check_api/mod.rs b/tests/servers/health_check_api/mod.rs index 9e15c5f62..2676be6f9 100644 --- a/tests/servers/health_check_api/mod.rs +++ b/tests/servers/health_check_api/mod.rs @@ -1,5 +1,2 @@ pub mod client; pub mod contract; -pub mod environment; - -pub type Started = environment::Environment; From 08efc3bc8e2e67823f32821268f9ecaee4145b24 Mon Sep 17 00:00:00 2001 From: Jose Celano Date: Tue, 25 Feb 2025 09:57:51 +0000 Subject: [PATCH 326/802] refactor: [#1316] move tracker API integration tests to pkg --- Cargo.lock | 3 +- Cargo.toml | 1 - packages/axum-tracker-api-server/Cargo.toml | 3 ++ .../src}/environment.rs | 30 ++++++++++++++++- packages/axum-tracker-api-server/src/lib.rs | 1 + .../tests/common/fixtures.rs | 11 +++++++ .../tests/common/mod.rs | 1 + .../tests/integration.rs | 20 ++++++++++++ .../tests/server}/connection_info.rs | 0 .../tests/server}/mod.rs | 10 ++---- .../tests/server}/v1/asserts.rs | 0 .../server}/v1/contract/authentication.rs | 8 ++--- .../server}/v1/contract/context/auth_key.rs | 22 +++++++------ .../v1/contract/context/health_check.rs | 6 ++-- .../tests/server}/v1/contract/context/mod.rs | 0 .../server}/v1/contract/context/stats.rs | 10 +++--- .../server}/v1/contract/context/torrent.rs | 14 ++++---- .../server}/v1/contract/context/whitelist.rs | 15 ++++----- .../tests/server}/v1/contract/fixtures.rs | 0 .../tests/server}/v1/contract/mod.rs | 1 - .../tests/server}/v1/mod.rs | 0 .../servers/api/v1/contract/configuration.rs | 32 ------------------- tests/servers/health_check_api/contract.rs | 5 ++- tests/servers/mod.rs | 3 +- 24 files changed, 109 insertions(+), 87 deletions(-) rename {tests/servers/api => packages/axum-tracker-api-server/src}/environment.rs (86%) create mode 100644 packages/axum-tracker-api-server/tests/common/fixtures.rs create mode 100644 packages/axum-tracker-api-server/tests/common/mod.rs create mode 100644 packages/axum-tracker-api-server/tests/integration.rs rename {tests/servers/api => packages/axum-tracker-api-server/tests/server}/connection_info.rs (100%) rename {tests/servers/api => packages/axum-tracker-api-server/tests/server}/mod.rs (78%) rename {tests/servers/api => packages/axum-tracker-api-server/tests/server}/v1/asserts.rs (100%) rename {tests/servers/api => packages/axum-tracker-api-server/tests/server}/v1/contract/authentication.rs (92%) rename {tests/servers/api => packages/axum-tracker-api-server/tests/server}/v1/contract/context/auth_key.rs (95%) rename {tests/servers/api => packages/axum-tracker-api-server/tests/server}/v1/contract/context/health_check.rs (86%) rename {tests/servers/api => packages/axum-tracker-api-server/tests/server}/v1/contract/context/mod.rs (100%) rename {tests/servers/api => packages/axum-tracker-api-server/tests/server}/v1/contract/context/stats.rs (89%) rename {tests/servers/api => packages/axum-tracker-api-server/tests/server}/v1/contract/context/torrent.rs (96%) rename {tests/servers/api => packages/axum-tracker-api-server/tests/server}/v1/contract/context/whitelist.rs (96%) rename {tests/servers/api => packages/axum-tracker-api-server/tests/server}/v1/contract/fixtures.rs (100%) rename {tests/servers/api => packages/axum-tracker-api-server/tests/server}/v1/contract/mod.rs (71%) rename {tests/servers/api => packages/axum-tracker-api-server/tests/server}/v1/mod.rs (100%) delete mode 100644 tests/servers/api/v1/contract/configuration.rs diff --git a/Cargo.lock b/Cargo.lock index 4c85cf407..c753c7bc2 100644 --- a/Cargo.lock +++ b/Cargo.lock @@ -4440,6 +4440,8 @@ dependencies = [ "tower 0.5.2", "tower-http", "tracing", + "url", + "uuid", ] [[package]] @@ -4501,7 +4503,6 @@ dependencies = [ "torrust-udp-tracker-server", "tracing", "tracing-subscriber", - "url", "uuid", "zerocopy 0.7.35", ] diff --git a/Cargo.toml b/Cargo.toml index 92d0aa5dc..184fe38ed 100644 --- a/Cargo.toml +++ b/Cargo.toml @@ -74,7 +74,6 @@ torrust-tracker-torrent-repository = { version = "3.0.0-develop", path = "packag torrust-udp-tracker-server = { version = "3.0.0-develop", path = "packages/udp-tracker-server" } tracing = "0" tracing-subscriber = { version = "0", features = ["json"] } -url = { version = "2", features = ["serde"] } uuid = { version = "1", features = ["v4"] } zerocopy = "0.7" diff --git a/packages/axum-tracker-api-server/Cargo.toml b/packages/axum-tracker-api-server/Cargo.toml index 7c7455fc4..480ee2a54 100644 --- a/packages/axum-tracker-api-server/Cargo.toml +++ b/packages/axum-tracker-api-server/Cargo.toml @@ -33,6 +33,7 @@ thiserror = "2" tokio = { version = "1", features = ["macros", "net", "rt-multi-thread", "signal", "sync"] } torrust-axum-server = { version = "3.0.0-develop", path = "../axum-server" } torrust-server-lib = { version = "3.0.0-develop", path = "../server-lib" } +torrust-tracker-api-client = { version = "3.0.0-develop", path = "../tracker-api-client" } torrust-tracker-api-core = { version = "3.0.0-develop", path = "../tracker-api-core" } torrust-tracker-clock = { version = "3.0.0-develop", path = "../clock" } torrust-tracker-configuration = { version = "3.0.0-develop", path = "../configuration" } @@ -46,3 +47,5 @@ local-ip-address = "0" mockall = "0" torrust-tracker-api-client = { version = "3.0.0-develop", path = "../tracker-api-client" } torrust-tracker-test-helpers = { version = "3.0.0-develop", path = "../test-helpers" } +url = { version = "2", features = ["serde"] } +uuid = { version = "1", features = ["v4"] } diff --git a/tests/servers/api/environment.rs b/packages/axum-tracker-api-server/src/environment.rs similarity index 86% rename from tests/servers/api/environment.rs rename to packages/axum-tracker-api-server/src/environment.rs index b9373b533..f6d6fb4e4 100644 --- a/tests/servers/api/environment.rs +++ b/packages/axum-tracker-api-server/src/environment.rs @@ -7,13 +7,16 @@ use bittorrent_tracker_core::container::TrackerCoreContainer; use bittorrent_udp_tracker_core::container::UdpTrackerCoreContainer; use futures::executor::block_on; use torrust_axum_server::tsl::make_rust_tls; -use torrust_axum_tracker_api_server::server::{ApiServer, Launcher, Running, Stopped}; use torrust_server_lib::registar::Registar; use torrust_tracker_api_client::connection_info::{ConnectionInfo, Origin}; use torrust_tracker_api_core::container::TrackerHttpApiCoreContainer; use torrust_tracker_configuration::{logging, Configuration}; use torrust_tracker_primitives::peer; +use crate::server::{ApiServer, Launcher, Running, Stopped}; + +pub type Started = Environment; + pub struct Environment where S: std::fmt::Debug + std::fmt::Display, @@ -38,6 +41,11 @@ where } impl Environment { + /// # Panics + /// + /// Will panic if it cannot make the TSL configuration from the provided + /// configuration. + #[must_use] pub fn new(configuration: &Arc) -> Self { initialize_global_services(configuration); @@ -59,6 +67,9 @@ impl Environment { } } + /// # Panics + /// + /// Will panic if the server cannot be started. pub async fn start(self) -> Environment { let access_tokens = Arc::new( self.container @@ -89,6 +100,9 @@ impl Environment { Environment::::new(configuration).start().await } + /// # Panics + /// + /// Will panic if the server cannot be stopped. pub async fn stop(self) -> Environment { Environment { container: self.container, @@ -97,6 +111,11 @@ impl Environment { } } + /// # Panics + /// + /// Will panic if it cannot build the origin for the connection info from the + /// server local socket address. + #[must_use] pub fn get_connection_info(&self) -> ConnectionInfo { let origin = Origin::new(&format!("http://{}/", self.server.state.local_addr)).unwrap(); // DevSkim: ignore DS137138 @@ -112,6 +131,7 @@ impl Environment { } } + #[must_use] pub fn bind_address(&self) -> SocketAddr { self.server.state.local_addr } @@ -123,6 +143,14 @@ pub struct EnvContainer { } impl EnvContainer { + /// # Panics + /// + /// Will panic if: + /// + /// - The configuration does not contain a HTTP tracker configuration. + /// - The configuration does not contain a UDP tracker configuration. + /// - The configuration does not contain a HTTP API configuration. + #[must_use] pub fn initialize(configuration: &Configuration) -> Self { let core_config = Arc::new(configuration.core.clone()); diff --git a/packages/axum-tracker-api-server/src/lib.rs b/packages/axum-tracker-api-server/src/lib.rs index c3591908e..0ed026654 100644 --- a/packages/axum-tracker-api-server/src/lib.rs +++ b/packages/axum-tracker-api-server/src/lib.rs @@ -153,6 +153,7 @@ //! > **NOTICE**: we are using [curl](https://curl.se/) in the API examples. //! > And you have to use quotes around the URL in order to avoid unexpected //! > errors. For example: `curl "http://127.0.0.1:1212/api/v1/stats?token=MyAccessToken"`. +pub mod environment; pub mod routes; pub mod server; pub mod v1; diff --git a/packages/axum-tracker-api-server/tests/common/fixtures.rs b/packages/axum-tracker-api-server/tests/common/fixtures.rs new file mode 100644 index 000000000..4589ea2ce --- /dev/null +++ b/packages/axum-tracker-api-server/tests/common/fixtures.rs @@ -0,0 +1,11 @@ +pub fn invalid_info_hashes() -> Vec { + [ + "0".to_string(), + "-1".to_string(), + "1.1".to_string(), + "INVALID INFOHASH".to_string(), + "9c38422213e30bff212b30c360d26f9a0213642".to_string(), // 39-char length instead of 40. DevSkim: ignore DS173237 + "9c38422213e30bff212b30c360d26f9a0213642&".to_string(), // Invalid char + ] + .to_vec() +} diff --git a/packages/axum-tracker-api-server/tests/common/mod.rs b/packages/axum-tracker-api-server/tests/common/mod.rs new file mode 100644 index 000000000..d066349cc --- /dev/null +++ b/packages/axum-tracker-api-server/tests/common/mod.rs @@ -0,0 +1 @@ +pub mod fixtures; diff --git a/packages/axum-tracker-api-server/tests/integration.rs b/packages/axum-tracker-api-server/tests/integration.rs new file mode 100644 index 000000000..878ac203d --- /dev/null +++ b/packages/axum-tracker-api-server/tests/integration.rs @@ -0,0 +1,20 @@ +//! Integration tests. +//! +//! ```text +//! cargo test --test integration +//! ``` + +use torrust_tracker_clock::clock; +mod common; +mod server; + +/// This code needs to be copied into each crate. +/// Working version, for production. +#[cfg(not(test))] +#[allow(dead_code)] +pub(crate) type CurrentClock = clock::Working; + +/// Stopped version, for testing. +#[cfg(test)] +#[allow(dead_code)] +pub(crate) type CurrentClock = clock::Stopped; diff --git a/tests/servers/api/connection_info.rs b/packages/axum-tracker-api-server/tests/server/connection_info.rs similarity index 100% rename from tests/servers/api/connection_info.rs rename to packages/axum-tracker-api-server/tests/server/connection_info.rs diff --git a/tests/servers/api/mod.rs b/packages/axum-tracker-api-server/tests/server/mod.rs similarity index 78% rename from tests/servers/api/mod.rs rename to packages/axum-tracker-api-server/tests/server/mod.rs index 1176d8a6b..9dea49a4c 100644 --- a/tests/servers/api/mod.rs +++ b/packages/axum-tracker-api-server/tests/server/mod.rs @@ -1,13 +1,9 @@ -use std::sync::Arc; - -use bittorrent_tracker_core::databases::Database; -use torrust_axum_tracker_api_server::server; - pub mod connection_info; -pub mod environment; pub mod v1; -pub type Started = environment::Environment; +use std::sync::Arc; + +use bittorrent_tracker_core::databases::Database; /// It forces a database error by dropping all tables. That makes all queries /// fail. diff --git a/tests/servers/api/v1/asserts.rs b/packages/axum-tracker-api-server/tests/server/v1/asserts.rs similarity index 100% rename from tests/servers/api/v1/asserts.rs rename to packages/axum-tracker-api-server/tests/server/v1/asserts.rs diff --git a/tests/servers/api/v1/contract/authentication.rs b/packages/axum-tracker-api-server/tests/server/v1/contract/authentication.rs similarity index 92% rename from tests/servers/api/v1/contract/authentication.rs rename to packages/axum-tracker-api-server/tests/server/v1/contract/authentication.rs index 6cb1e52b9..5acb25a3c 100644 --- a/tests/servers/api/v1/contract/authentication.rs +++ b/packages/axum-tracker-api-server/tests/server/v1/contract/authentication.rs @@ -1,11 +1,11 @@ +use torrust_axum_tracker_api_server::environment::Started; use torrust_tracker_api_client::common::http::{Query, QueryParam}; use torrust_tracker_api_client::v1::client::{headers_with_request_id, Client}; -use torrust_tracker_test_helpers::configuration; +use torrust_tracker_test_helpers::logging::logs_contains_a_line_with; +use torrust_tracker_test_helpers::{configuration, logging}; use uuid::Uuid; -use crate::common::logging::{self, logs_contains_a_line_with}; -use crate::servers::api::v1::asserts::{assert_token_not_valid, assert_unauthorized}; -use crate::servers::api::Started; +use crate::server::v1::asserts::{assert_token_not_valid, assert_unauthorized}; #[tokio::test] async fn should_authenticate_requests_by_using_a_token_query_param() { diff --git a/tests/servers/api/v1/contract/context/auth_key.rs b/packages/axum-tracker-api-server/tests/server/v1/contract/context/auth_key.rs similarity index 95% rename from tests/servers/api/v1/contract/context/auth_key.rs rename to packages/axum-tracker-api-server/tests/server/v1/contract/context/auth_key.rs index bc6d38bae..92e4b59fe 100644 --- a/tests/servers/api/v1/contract/context/auth_key.rs +++ b/packages/axum-tracker-api-server/tests/server/v1/contract/context/auth_key.rs @@ -2,18 +2,19 @@ use std::time::Duration; use bittorrent_tracker_core::authentication::Key; use serde::Serialize; +use torrust_axum_tracker_api_server::environment::Started; use torrust_tracker_api_client::v1::client::{headers_with_request_id, AddKeyForm, Client}; -use torrust_tracker_test_helpers::configuration; +use torrust_tracker_test_helpers::logging::logs_contains_a_line_with; +use torrust_tracker_test_helpers::{configuration, logging}; use uuid::Uuid; -use crate::common::logging::{self, logs_contains_a_line_with}; -use crate::servers::api::connection_info::{connection_with_invalid_token, connection_with_no_token}; -use crate::servers::api::v1::asserts::{ +use crate::server::connection_info::{connection_with_invalid_token, connection_with_no_token}; +use crate::server::force_database_error; +use crate::server::v1::asserts::{ assert_auth_key_utf8, assert_failed_to_delete_key, assert_failed_to_generate_key, assert_failed_to_reload_keys, assert_invalid_auth_key_get_param, assert_invalid_auth_key_post_param, assert_ok, assert_token_not_valid, assert_unauthorized, assert_unprocessable_auth_key_duration_param, }; -use crate::servers::api::{force_database_error, Started}; #[tokio::test] async fn should_allow_generating_a_new_random_auth_key() { @@ -481,17 +482,18 @@ async fn should_not_allow_reloading_keys_for_unauthenticated_users() { mod deprecated_generate_key_endpoint { use bittorrent_tracker_core::authentication::Key; + use torrust_axum_tracker_api_server::environment::Started; use torrust_tracker_api_client::v1::client::{headers_with_request_id, Client}; - use torrust_tracker_test_helpers::configuration; + use torrust_tracker_test_helpers::logging::logs_contains_a_line_with; + use torrust_tracker_test_helpers::{configuration, logging}; use uuid::Uuid; - use crate::common::logging::{self, logs_contains_a_line_with}; - use crate::servers::api::connection_info::{connection_with_invalid_token, connection_with_no_token}; - use crate::servers::api::v1::asserts::{ + use crate::server::connection_info::{connection_with_invalid_token, connection_with_no_token}; + use crate::server::force_database_error; + use crate::server::v1::asserts::{ assert_auth_key_utf8, assert_failed_to_generate_key, assert_invalid_key_duration_param, assert_token_not_valid, assert_unauthorized, }; - use crate::servers::api::{force_database_error, Started}; #[tokio::test] async fn should_allow_generating_a_new_auth_key() { diff --git a/tests/servers/api/v1/contract/context/health_check.rs b/packages/axum-tracker-api-server/tests/server/v1/contract/context/health_check.rs similarity index 86% rename from tests/servers/api/v1/contract/context/health_check.rs rename to packages/axum-tracker-api-server/tests/server/v1/contract/context/health_check.rs index b0812dc8c..d543422d3 100644 --- a/tests/servers/api/v1/contract/context/health_check.rs +++ b/packages/axum-tracker-api-server/tests/server/v1/contract/context/health_check.rs @@ -1,11 +1,9 @@ +use torrust_axum_tracker_api_server::environment::Started; use torrust_axum_tracker_api_server::v1::context::health_check::resources::{Report, Status}; use torrust_tracker_api_client::v1::client::get; -use torrust_tracker_test_helpers::configuration; +use torrust_tracker_test_helpers::{configuration, logging}; use url::Url; -use crate::common::logging; -use crate::servers::api::Started; - #[tokio::test] async fn health_check_endpoint_should_return_status_ok_if_api_is_running() { logging::setup(); diff --git a/tests/servers/api/v1/contract/context/mod.rs b/packages/axum-tracker-api-server/tests/server/v1/contract/context/mod.rs similarity index 100% rename from tests/servers/api/v1/contract/context/mod.rs rename to packages/axum-tracker-api-server/tests/server/v1/contract/context/mod.rs diff --git a/tests/servers/api/v1/contract/context/stats.rs b/packages/axum-tracker-api-server/tests/server/v1/contract/context/stats.rs similarity index 89% rename from tests/servers/api/v1/contract/context/stats.rs rename to packages/axum-tracker-api-server/tests/server/v1/contract/context/stats.rs index 3d8e6481c..179e5c555 100644 --- a/tests/servers/api/v1/contract/context/stats.rs +++ b/packages/axum-tracker-api-server/tests/server/v1/contract/context/stats.rs @@ -1,16 +1,16 @@ use std::str::FromStr; use bittorrent_primitives::info_hash::InfoHash; +use torrust_axum_tracker_api_server::environment::Started; use torrust_axum_tracker_api_server::v1::context::stats::resources::Stats; use torrust_tracker_api_client::v1::client::{headers_with_request_id, Client}; use torrust_tracker_primitives::peer::fixture::PeerBuilder; -use torrust_tracker_test_helpers::configuration; +use torrust_tracker_test_helpers::logging::logs_contains_a_line_with; +use torrust_tracker_test_helpers::{configuration, logging}; use uuid::Uuid; -use crate::common::logging::{self, logs_contains_a_line_with}; -use crate::servers::api::connection_info::{connection_with_invalid_token, connection_with_no_token}; -use crate::servers::api::v1::asserts::{assert_stats, assert_token_not_valid, assert_unauthorized}; -use crate::servers::api::Started; +use crate::server::connection_info::{connection_with_invalid_token, connection_with_no_token}; +use crate::server::v1::asserts::{assert_stats, assert_token_not_valid, assert_unauthorized}; #[tokio::test] async fn should_allow_getting_tracker_statistics() { diff --git a/tests/servers/api/v1/contract/context/torrent.rs b/packages/axum-tracker-api-server/tests/server/v1/contract/context/torrent.rs similarity index 96% rename from tests/servers/api/v1/contract/context/torrent.rs rename to packages/axum-tracker-api-server/tests/server/v1/contract/context/torrent.rs index c2d5bfbaf..d77147f38 100644 --- a/tests/servers/api/v1/contract/context/torrent.rs +++ b/packages/axum-tracker-api-server/tests/server/v1/contract/context/torrent.rs @@ -1,24 +1,22 @@ use std::str::FromStr; use bittorrent_primitives::info_hash::InfoHash; +use torrust_axum_tracker_api_server::environment::Started; use torrust_axum_tracker_api_server::v1::context::torrent::resources::peer::Peer; use torrust_axum_tracker_api_server::v1::context::torrent::resources::torrent::{self, Torrent}; use torrust_tracker_api_client::common::http::{Query, QueryParam}; use torrust_tracker_api_client::v1::client::{headers_with_request_id, Client}; use torrust_tracker_primitives::peer::fixture::PeerBuilder; -use torrust_tracker_test_helpers::configuration; +use torrust_tracker_test_helpers::logging::logs_contains_a_line_with; +use torrust_tracker_test_helpers::{configuration, logging}; use uuid::Uuid; -use crate::common::logging::{self, logs_contains_a_line_with}; -use crate::servers::api::connection_info::{connection_with_invalid_token, connection_with_no_token}; -use crate::servers::api::v1::asserts::{ +use crate::server::connection_info::{connection_with_invalid_token, connection_with_no_token}; +use crate::server::v1::asserts::{ assert_bad_request, assert_invalid_infohash_param, assert_not_found, assert_token_not_valid, assert_torrent_info, assert_torrent_list, assert_torrent_not_known, assert_unauthorized, }; -use crate::servers::api::v1::contract::fixtures::{ - invalid_infohashes_returning_bad_request, invalid_infohashes_returning_not_found, -}; -use crate::servers::api::Started; +use crate::server::v1::contract::fixtures::{invalid_infohashes_returning_bad_request, invalid_infohashes_returning_not_found}; #[tokio::test] async fn should_allow_getting_all_torrents() { diff --git a/tests/servers/api/v1/contract/context/whitelist.rs b/packages/axum-tracker-api-server/tests/server/v1/contract/context/whitelist.rs similarity index 96% rename from tests/servers/api/v1/contract/context/whitelist.rs rename to packages/axum-tracker-api-server/tests/server/v1/contract/context/whitelist.rs index 6742da4d8..e41b74f45 100644 --- a/tests/servers/api/v1/contract/context/whitelist.rs +++ b/packages/axum-tracker-api-server/tests/server/v1/contract/context/whitelist.rs @@ -1,20 +1,19 @@ use std::str::FromStr; use bittorrent_primitives::info_hash::InfoHash; +use torrust_axum_tracker_api_server::environment::Started; use torrust_tracker_api_client::v1::client::{headers_with_request_id, Client}; -use torrust_tracker_test_helpers::configuration; +use torrust_tracker_test_helpers::logging::logs_contains_a_line_with; +use torrust_tracker_test_helpers::{configuration, logging}; use uuid::Uuid; -use crate::common::logging::{self, logs_contains_a_line_with}; -use crate::servers::api::connection_info::{connection_with_invalid_token, connection_with_no_token}; -use crate::servers::api::v1::asserts::{ +use crate::server::connection_info::{connection_with_invalid_token, connection_with_no_token}; +use crate::server::force_database_error; +use crate::server::v1::asserts::{ assert_failed_to_reload_whitelist, assert_failed_to_remove_torrent_from_whitelist, assert_failed_to_whitelist_torrent, assert_invalid_infohash_param, assert_not_found, assert_ok, assert_token_not_valid, assert_unauthorized, }; -use crate::servers::api::v1::contract::fixtures::{ - invalid_infohashes_returning_bad_request, invalid_infohashes_returning_not_found, -}; -use crate::servers::api::{force_database_error, Started}; +use crate::server::v1::contract::fixtures::{invalid_infohashes_returning_bad_request, invalid_infohashes_returning_not_found}; #[tokio::test] async fn should_allow_whitelisting_a_torrent() { diff --git a/tests/servers/api/v1/contract/fixtures.rs b/packages/axum-tracker-api-server/tests/server/v1/contract/fixtures.rs similarity index 100% rename from tests/servers/api/v1/contract/fixtures.rs rename to packages/axum-tracker-api-server/tests/server/v1/contract/fixtures.rs diff --git a/tests/servers/api/v1/contract/mod.rs b/packages/axum-tracker-api-server/tests/server/v1/contract/mod.rs similarity index 71% rename from tests/servers/api/v1/contract/mod.rs rename to packages/axum-tracker-api-server/tests/server/v1/contract/mod.rs index 38b4a2b37..2a3f78afd 100644 --- a/tests/servers/api/v1/contract/mod.rs +++ b/packages/axum-tracker-api-server/tests/server/v1/contract/mod.rs @@ -1,4 +1,3 @@ pub mod authentication; -pub mod configuration; pub mod context; pub mod fixtures; diff --git a/tests/servers/api/v1/mod.rs b/packages/axum-tracker-api-server/tests/server/v1/mod.rs similarity index 100% rename from tests/servers/api/v1/mod.rs rename to packages/axum-tracker-api-server/tests/server/v1/mod.rs diff --git a/tests/servers/api/v1/contract/configuration.rs b/tests/servers/api/v1/contract/configuration.rs deleted file mode 100644 index 91aa138a8..000000000 --- a/tests/servers/api/v1/contract/configuration.rs +++ /dev/null @@ -1,32 +0,0 @@ -// use std::sync::Arc; - -// use axum_server::tls_rustls::RustlsConfig; -// use futures::executor::block_on; -// use torrust_tracker_test_helpers::configuration; - -// use crate::common::app::setup_with_configuration; -// use crate::servers::api::environment::stopped_environment; - -#[tokio::test] -#[ignore] -#[should_panic = "Could not receive bind_address."] -async fn should_fail_with_ssl_enabled_and_bad_ssl_config() { - // let tracker = setup_with_configuration(&Arc::new(configuration::ephemeral())); - - // let config = tracker.config.http_api.clone(); - - // let bind_to = config - // .bind_address - // .parse::() - // .expect("Tracker API bind_address invalid."); - - // let tls = - // if let (true, Some(cert), Some(key)) = (&true, &Some("bad cert path".to_string()), &Some("bad cert path".to_string())) { - // Some(block_on(RustlsConfig::from_pem_file(cert, key)).expect("Could not read tls cert.")) - // } else { - // None - // }; - - // let env = new_stopped(tracker, bind_to, tls); - // env.start().await; -} diff --git a/tests/servers/health_check_api/contract.rs b/tests/servers/health_check_api/contract.rs index ab6b0d48e..f42d62223 100644 --- a/tests/servers/health_check_api/contract.rs +++ b/tests/servers/health_check_api/contract.rs @@ -6,7 +6,6 @@ mod api { use torrust_tracker_test_helpers::configuration; use crate::common::logging; - use crate::servers::api; use crate::servers::health_check_api::client::get; #[tokio::test] @@ -15,7 +14,7 @@ mod api { let configuration = Arc::new(configuration::ephemeral()); - let service = api::Started::new(&configuration).await; + let service = torrust_axum_tracker_api_server::environment::Started::new(&configuration).await; let registar = service.registar.clone(); @@ -62,7 +61,7 @@ mod api { let configuration = Arc::new(configuration::ephemeral()); - let service = api::Started::new(&configuration).await; + let service = torrust_axum_tracker_api_server::environment::Started::new(&configuration).await; let binding = service.bind_address(); diff --git a/tests/servers/mod.rs b/tests/servers/mod.rs index 65e9a665b..d5eb4e916 100644 --- a/tests/servers/mod.rs +++ b/tests/servers/mod.rs @@ -1,4 +1,3 @@ -mod api; -pub mod health_check_api; +mod health_check_api; mod http; mod udp; From 92505b9c10a0f39cb6a8c1410e374285fe9a1f39 Mon Sep 17 00:00:00 2001 From: Jose Celano Date: Tue, 25 Feb 2025 11:16:55 +0000 Subject: [PATCH 327/802] refactor: [#1316] UDP tracker integration tests to pkg --- Cargo.lock | 2 +- Cargo.toml | 1 - packages/udp-tracker-server/Cargo.toml | 1 + .../udp-tracker-server/src}/environment.rs | 31 ++++++++++---- packages/udp-tracker-server/src/lib.rs | 8 ++-- .../tests/common/fixtures.rs | 17 ++++++++ .../udp-tracker-server/tests/common/mod.rs | 2 + .../udp-tracker-server/tests/common/udp.rs | 41 +++++++++++++++++++ .../udp-tracker-server/tests/integration.rs | 20 +++++++++ .../tests/server}/asserts.rs | 0 .../tests/server}/contract.rs | 41 ++++++++----------- .../udp-tracker-server/tests/server/mod.rs | 2 + tests/common/fixtures.rs | 7 ---- tests/servers/health_check_api/contract.rs | 5 +-- tests/servers/mod.rs | 1 - tests/servers/udp/mod.rs | 7 ---- 16 files changed, 131 insertions(+), 55 deletions(-) rename {tests/servers/udp => packages/udp-tracker-server/src}/environment.rs (85%) create mode 100644 packages/udp-tracker-server/tests/common/fixtures.rs create mode 100644 packages/udp-tracker-server/tests/common/mod.rs create mode 100644 packages/udp-tracker-server/tests/common/udp.rs create mode 100644 packages/udp-tracker-server/tests/integration.rs rename {tests/servers/udp => packages/udp-tracker-server/tests/server}/asserts.rs (100%) rename {tests/servers/udp => packages/udp-tracker-server/tests/server}/contract.rs (88%) create mode 100644 packages/udp-tracker-server/tests/server/mod.rs delete mode 100644 tests/servers/udp/mod.rs diff --git a/Cargo.lock b/Cargo.lock index c753c7bc2..30c29b905 100644 --- a/Cargo.lock +++ b/Cargo.lock @@ -4463,7 +4463,6 @@ dependencies = [ "axum-server", "bittorrent-http-tracker-core", "bittorrent-primitives", - "bittorrent-tracker-client", "bittorrent-tracker-core", "bittorrent-udp-tracker-core", "chrono", @@ -4662,6 +4661,7 @@ dependencies = [ "futures-util", "local-ip-address", "mockall", + "rand 0.8.5", "ringbuf", "thiserror 2.0.11", "tokio", diff --git a/Cargo.toml b/Cargo.toml index 184fe38ed..5b063bd14 100644 --- a/Cargo.toml +++ b/Cargo.toml @@ -38,7 +38,6 @@ aquatic_udp_protocol = "0" axum-server = { version = "0", features = ["tls-rustls-no-provider"] } bittorrent-http-tracker-core = { version = "3.0.0-develop", path = "packages/http-tracker-core" } bittorrent-primitives = "0.1.0" -bittorrent-tracker-client = { version = "3.0.0-develop", path = "packages/tracker-client" } bittorrent-tracker-core = { version = "3.0.0-develop", path = "packages/tracker-core" } bittorrent-udp-tracker-core = { version = "3.0.0-develop", path = "packages/udp-tracker-core" } chrono = { version = "0", default-features = false, features = ["clock"] } diff --git a/packages/udp-tracker-server/Cargo.toml b/packages/udp-tracker-server/Cargo.toml index 7ebba677f..f8fcd2def 100644 --- a/packages/udp-tracker-server/Cargo.toml +++ b/packages/udp-tracker-server/Cargo.toml @@ -38,4 +38,5 @@ zerocopy = "0.7" [dev-dependencies] local-ip-address = "0" mockall = "0" +rand = "0" torrust-tracker-test-helpers = { version = "3.0.0-develop", path = "../test-helpers" } diff --git a/tests/servers/udp/environment.rs b/packages/udp-tracker-server/src/environment.rs similarity index 85% rename from tests/servers/udp/environment.rs rename to packages/udp-tracker-server/src/environment.rs index 7d91fe535..0ab3bdea1 100644 --- a/tests/servers/udp/environment.rs +++ b/packages/udp-tracker-server/src/environment.rs @@ -7,9 +7,12 @@ use bittorrent_udp_tracker_core::container::UdpTrackerCoreContainer; use torrust_server_lib::registar::Registar; use torrust_tracker_configuration::{logging, Configuration, DEFAULT_TIMEOUT}; use torrust_tracker_primitives::peer; -use torrust_udp_tracker_server::server::spawner::Spawner; -use torrust_udp_tracker_server::server::states::{Running, Stopped}; -use torrust_udp_tracker_server::server::Server; + +use crate::server::spawner::Spawner; +use crate::server::states::{Running, Stopped}; +use crate::server::Server; + +pub type Started = Environment; pub struct Environment where @@ -37,6 +40,7 @@ where impl Environment { #[allow(dead_code)] + #[must_use] pub fn new(configuration: &Arc) -> Self { initialize_global_services(configuration); @@ -53,6 +57,9 @@ impl Environment { } } + /// # Panics + /// + /// Will panic if it cannot start the server. #[allow(dead_code)] pub async fn start(self) -> Environment { let cookie_lifetime = self.container.udp_tracker_core_container.udp_tracker_config.cookie_lifetime; @@ -74,12 +81,18 @@ impl Environment { } impl Environment { + /// # Panics + /// + /// Will panic if it cannot start the server within the timeout. pub async fn new(configuration: &Arc) -> Self { tokio::time::timeout(DEFAULT_TIMEOUT, Environment::::new(configuration).start()) .await .expect("it should create an environment within the timeout") } + /// # Panics + /// + /// Will panic if it cannot stop the service within the timeout. #[allow(dead_code)] pub async fn stop(self) -> Environment { let stopped = tokio::time::timeout(DEFAULT_TIMEOUT, self.server.stop()) @@ -89,10 +102,11 @@ impl Environment { Environment { container: self.container, registar: Registar::default(), - server: stopped.expect("it stop the udp tracker service"), + server: stopped.expect("it should stop the udp tracker service"), } } + #[must_use] pub fn bind_address(&self) -> SocketAddr { self.server.state.local_addr } @@ -104,6 +118,10 @@ pub struct EnvContainer { } impl EnvContainer { + /// # Panics + /// + /// Will panic if the configuration is missing the UDP tracker configuration. + #[must_use] pub fn initialize(configuration: &Configuration) -> Self { let core_config = Arc::new(configuration.core.clone()); let udp_tracker_configurations = configuration.udp_trackers.clone().expect("missing UDP tracker configuration"); @@ -134,10 +152,9 @@ mod tests { use std::time::Duration; use tokio::time::sleep; - use torrust_tracker_test_helpers::configuration; + use torrust_tracker_test_helpers::{configuration, logging}; - use crate::common::logging; - use crate::servers::udp::Started; + use crate::environment::Started; #[tokio::test] async fn it_should_make_and_stop_udp_server() { diff --git a/packages/udp-tracker-server/src/lib.rs b/packages/udp-tracker-server/src/lib.rs index a07f2e665..8e3cf503b 100644 --- a/packages/udp-tracker-server/src/lib.rs +++ b/packages/udp-tracker-server/src/lib.rs @@ -634,15 +634,15 @@ //! documentation by [Arvid Norberg](https://github.com/arvidn) was very //! supportive in the development of this documentation. Some descriptions were //! taken from the [libtorrent](https://www.rasterbar.com/products/libtorrent/udp_tracker_protocol.html). +pub mod environment; +pub mod error; +pub mod handlers; +pub mod server; use std::net::SocketAddr; use torrust_tracker_clock::clock; -pub mod error; -pub mod handlers; -pub mod server; - /// The maximum number of bytes in a UDP packet. pub const MAX_PACKET_SIZE: usize = 1496; diff --git a/packages/udp-tracker-server/tests/common/fixtures.rs b/packages/udp-tracker-server/tests/common/fixtures.rs new file mode 100644 index 000000000..477314398 --- /dev/null +++ b/packages/udp-tracker-server/tests/common/fixtures.rs @@ -0,0 +1,17 @@ +use aquatic_udp_protocol::TransactionId; +use bittorrent_primitives::info_hash::InfoHash; +use rand::prelude::*; + +/// Returns a random info hash. +pub fn random_info_hash() -> InfoHash { + let mut rng = rand::thread_rng(); + let random_bytes: [u8; 20] = rng.gen(); + + InfoHash::from_bytes(&random_bytes) +} + +/// Returns a random transaction id. +pub fn random_transaction_id() -> TransactionId { + let random_value = rand::thread_rng().gen(); + TransactionId::new(random_value) +} diff --git a/packages/udp-tracker-server/tests/common/mod.rs b/packages/udp-tracker-server/tests/common/mod.rs new file mode 100644 index 000000000..d327fd14f --- /dev/null +++ b/packages/udp-tracker-server/tests/common/mod.rs @@ -0,0 +1,2 @@ +pub mod fixtures; +pub mod udp; diff --git a/packages/udp-tracker-server/tests/common/udp.rs b/packages/udp-tracker-server/tests/common/udp.rs new file mode 100644 index 000000000..3d84e2b97 --- /dev/null +++ b/packages/udp-tracker-server/tests/common/udp.rs @@ -0,0 +1,41 @@ +use std::net::SocketAddr; +use std::sync::Arc; + +use tokio::net::UdpSocket; + +/// A generic UDP client +pub struct Client { + pub socket: Arc, +} + +impl Client { + #[allow(dead_code)] + pub async fn connected(remote_socket_addr: &SocketAddr, local_socket_addr: &SocketAddr) -> Client { + let client = Client::bind(local_socket_addr).await; + client.connect(remote_socket_addr).await; + client + } + + pub async fn bind(local_socket_addr: &SocketAddr) -> Self { + let socket = UdpSocket::bind(local_socket_addr).await.unwrap(); + Self { + socket: Arc::new(socket), + } + } + + pub async fn connect(&self, remote_address: &SocketAddr) { + self.socket.connect(remote_address).await.unwrap(); + } + + #[allow(dead_code)] + pub async fn send(&self, bytes: &[u8]) -> usize { + self.socket.writable().await.unwrap(); + self.socket.send(bytes).await.unwrap() + } + + #[allow(dead_code)] + pub async fn receive(&self, bytes: &mut [u8]) -> usize { + self.socket.readable().await.unwrap(); + self.socket.recv(bytes).await.unwrap() + } +} diff --git a/packages/udp-tracker-server/tests/integration.rs b/packages/udp-tracker-server/tests/integration.rs new file mode 100644 index 000000000..70b3aeb89 --- /dev/null +++ b/packages/udp-tracker-server/tests/integration.rs @@ -0,0 +1,20 @@ +//! Integration tests. +//! +//! ```text +//! cargo test --test integration +//! ``` +mod common; +mod server; + +use torrust_tracker_clock::clock; + +/// This code needs to be copied into each crate. +/// Working version, for production. +#[cfg(not(test))] +#[allow(dead_code)] +pub(crate) type CurrentClock = clock::Working; + +/// Stopped version, for testing. +#[cfg(test)] +#[allow(dead_code)] +pub(crate) type CurrentClock = clock::Stopped; diff --git a/tests/servers/udp/asserts.rs b/packages/udp-tracker-server/tests/server/asserts.rs similarity index 100% rename from tests/servers/udp/asserts.rs rename to packages/udp-tracker-server/tests/server/asserts.rs diff --git a/tests/servers/udp/contract.rs b/packages/udp-tracker-server/tests/server/contract.rs similarity index 88% rename from tests/servers/udp/contract.rs rename to packages/udp-tracker-server/tests/server/contract.rs index 78a511bd4..d2da552a2 100644 --- a/tests/servers/udp/contract.rs +++ b/packages/udp-tracker-server/tests/server/contract.rs @@ -8,12 +8,10 @@ use core::panic; use aquatic_udp_protocol::{ConnectRequest, ConnectionId, Response, TransactionId}; use bittorrent_tracker_client::udp::client::UdpTrackerClient; use torrust_tracker_configuration::DEFAULT_TIMEOUT; -use torrust_tracker_test_helpers::configuration; +use torrust_tracker_test_helpers::{configuration, logging}; use torrust_udp_tracker_server::MAX_PACKET_SIZE; -use crate::common::logging; -use crate::servers::udp::asserts::get_error_response_message; -use crate::servers::udp::Started; +use crate::server::asserts::get_error_response_message; fn empty_udp_request() -> [u8; MAX_PACKET_SIZE] { [0; MAX_PACKET_SIZE] @@ -42,7 +40,7 @@ async fn send_connection_request(transaction_id: TransactionId, client: &UdpTrac async fn should_return_a_bad_request_response_when_the_client_sends_an_empty_request() { logging::setup(); - let env = Started::new(&configuration::ephemeral().into()).await; + let env = torrust_udp_tracker_server::environment::Started::new(&configuration::ephemeral().into()).await; let client = match UdpTrackerClient::new(env.bind_address(), DEFAULT_TIMEOUT).await { Ok(udp_client) => udp_client, @@ -70,17 +68,15 @@ mod receiving_a_connection_request { use aquatic_udp_protocol::{ConnectRequest, TransactionId}; use bittorrent_tracker_client::udp::client::UdpTrackerClient; use torrust_tracker_configuration::DEFAULT_TIMEOUT; - use torrust_tracker_test_helpers::configuration; + use torrust_tracker_test_helpers::{configuration, logging}; - use crate::common::logging; - use crate::servers::udp::asserts::is_connect_response; - use crate::servers::udp::Started; + use crate::server::asserts::is_connect_response; #[tokio::test] async fn should_return_a_connect_response() { logging::setup(); - let env = Started::new(&configuration::ephemeral().into()).await; + let env = torrust_udp_tracker_server::environment::Started::new(&configuration::ephemeral().into()).await; let client = match UdpTrackerClient::new(env.bind_address(), DEFAULT_TIMEOUT).await { Ok(udp_tracker_client) => udp_tracker_client, @@ -116,13 +112,12 @@ mod receiving_an_announce_request { }; use bittorrent_tracker_client::udp::client::UdpTrackerClient; use torrust_tracker_configuration::DEFAULT_TIMEOUT; - use torrust_tracker_test_helpers::configuration; + use torrust_tracker_test_helpers::logging::logs_contains_a_line_with; + use torrust_tracker_test_helpers::{configuration, logging}; use crate::common::fixtures::{random_info_hash, random_transaction_id}; - use crate::common::logging::{self, logs_contains_a_line_with}; - use crate::servers::udp::asserts::is_ipv4_announce_response; - use crate::servers::udp::contract::send_connection_request; - use crate::servers::udp::Started; + use crate::server::asserts::is_ipv4_announce_response; + use crate::server::contract::send_connection_request; pub async fn assert_send_and_get_announce( tx_id: TransactionId, @@ -181,7 +176,7 @@ mod receiving_an_announce_request { async fn should_return_an_announce_response() { logging::setup(); - let env = Started::new(&configuration::ephemeral().into()).await; + let env = torrust_udp_tracker_server::environment::Started::new(&configuration::ephemeral().into()).await; let client = match UdpTrackerClient::new(env.bind_address(), DEFAULT_TIMEOUT).await { Ok(udp_tracker_client) => udp_tracker_client, @@ -203,7 +198,7 @@ mod receiving_an_announce_request { async fn should_return_many_announce_response() { logging::setup(); - let env = Started::new(&configuration::ephemeral().into()).await; + let env = torrust_udp_tracker_server::environment::Started::new(&configuration::ephemeral().into()).await; let client = match UdpTrackerClient::new(env.bind_address(), DEFAULT_TIMEOUT).await { Ok(udp_tracker_client) => udp_tracker_client, @@ -228,7 +223,7 @@ mod receiving_an_announce_request { async fn should_ban_the_client_ip_if_it_sends_more_than_10_requests_with_a_cookie_value_not_normal() { logging::setup(); - let env = Started::new(&configuration::ephemeral().into()).await; + let env = torrust_udp_tracker_server::environment::Started::new(&configuration::ephemeral().into()).await; let ban_service = env.container.udp_tracker_core_container.ban_service.clone(); let client = match UdpTrackerClient::new(env.bind_address(), DEFAULT_TIMEOUT).await { @@ -309,18 +304,16 @@ mod receiving_an_scrape_request { use aquatic_udp_protocol::{ConnectionId, InfoHash, ScrapeRequest, TransactionId}; use bittorrent_tracker_client::udp::client::UdpTrackerClient; use torrust_tracker_configuration::DEFAULT_TIMEOUT; - use torrust_tracker_test_helpers::configuration; + use torrust_tracker_test_helpers::{configuration, logging}; - use crate::common::logging; - use crate::servers::udp::asserts::is_scrape_response; - use crate::servers::udp::contract::send_connection_request; - use crate::servers::udp::Started; + use crate::server::asserts::is_scrape_response; + use crate::server::contract::send_connection_request; #[tokio::test] async fn should_return_a_scrape_response() { logging::setup(); - let env = Started::new(&configuration::ephemeral().into()).await; + let env = torrust_udp_tracker_server::environment::Started::new(&configuration::ephemeral().into()).await; let client = match UdpTrackerClient::new(env.bind_address(), DEFAULT_TIMEOUT).await { Ok(udp_tracker_client) => udp_tracker_client, diff --git a/packages/udp-tracker-server/tests/server/mod.rs b/packages/udp-tracker-server/tests/server/mod.rs new file mode 100644 index 000000000..e2db6b4ce --- /dev/null +++ b/packages/udp-tracker-server/tests/server/mod.rs @@ -0,0 +1,2 @@ +pub mod asserts; +pub mod contract; diff --git a/tests/common/fixtures.rs b/tests/common/fixtures.rs index 1dd85ba2d..fa6884425 100644 --- a/tests/common/fixtures.rs +++ b/tests/common/fixtures.rs @@ -1,4 +1,3 @@ -use aquatic_udp_protocol::TransactionId; use bittorrent_primitives::info_hash::InfoHash; #[allow(dead_code)] @@ -21,9 +20,3 @@ pub fn random_info_hash() -> InfoHash { InfoHash::from_bytes(&random_bytes) } - -/// Returns a random transaction id. -pub fn random_transaction_id() -> TransactionId { - let random_value = rand::Rng::random::(&mut rand::rng()); - TransactionId::new(random_value) -} diff --git a/tests/servers/health_check_api/contract.rs b/tests/servers/health_check_api/contract.rs index f42d62223..875510db3 100644 --- a/tests/servers/health_check_api/contract.rs +++ b/tests/servers/health_check_api/contract.rs @@ -225,7 +225,6 @@ mod udp { use crate::common::logging; use crate::servers::health_check_api::client::get; - use crate::servers::udp; #[tokio::test] pub(crate) async fn it_should_return_good_health_for_udp_service() { @@ -233,7 +232,7 @@ mod udp { let configuration = Arc::new(configuration::ephemeral()); - let service = udp::Started::new(&configuration).await; + let service = torrust_udp_tracker_server::environment::Started::new(&configuration).await; let registar = service.registar.clone(); @@ -276,7 +275,7 @@ mod udp { let configuration = Arc::new(configuration::ephemeral()); - let service = udp::Started::new(&configuration).await; + let service = torrust_udp_tracker_server::environment::Started::new(&configuration).await; let binding = service.bind_address(); diff --git a/tests/servers/mod.rs b/tests/servers/mod.rs index d5eb4e916..627073101 100644 --- a/tests/servers/mod.rs +++ b/tests/servers/mod.rs @@ -1,3 +1,2 @@ mod health_check_api; mod http; -mod udp; diff --git a/tests/servers/udp/mod.rs b/tests/servers/udp/mod.rs deleted file mode 100644 index c52115081..000000000 --- a/tests/servers/udp/mod.rs +++ /dev/null @@ -1,7 +0,0 @@ -pub mod asserts; -pub mod contract; -pub mod environment; - -use torrust_udp_tracker_server::server::states::Running; - -pub type Started = environment::Environment; From 1aef0c193c3c2443751e944feedfc4fd51bb13e2 Mon Sep 17 00:00:00 2001 From: Jose Celano Date: Tue, 25 Feb 2025 11:47:09 +0000 Subject: [PATCH 328/802] refactor: [#1316] move HTTP tracker integration tests to pkg --- Cargo.lock | 16 +-- Cargo.toml | 8 -- packages/axum-http-tracker-server/Cargo.toml | 9 ++ .../src}/environment.rs | 20 +++- packages/axum-http-tracker-server/src/lib.rs | 5 +- .../tests}/common/fixtures.rs | 6 +- .../tests/common/http.rs | 54 ++++++++++ .../tests/common/mod.rs | 2 + .../tests/integration.rs | 20 ++++ .../tests/server}/asserts.rs | 2 +- .../tests/server}/client.rs | 0 .../tests/server}/mod.rs | 4 - .../tests/server}/requests/announce.rs | 2 +- .../tests/server}/requests/mod.rs | 0 .../tests/server}/requests/scrape.rs | 2 +- .../tests/server}/responses/announce.rs | 0 .../tests/server}/responses/error.rs | 0 .../tests/server}/responses/mod.rs | 0 .../tests/server}/responses/scrape.rs | 2 +- .../tests/server}/v1/contract.rs | 100 +++++++++--------- .../tests/server}/v1/mod.rs | 0 tests/common/mod.rs | 1 - tests/servers/health_check_api/contract.rs | 5 +- tests/servers/http/connection_info.rs | 16 --- tests/servers/mod.rs | 1 - 25 files changed, 172 insertions(+), 103 deletions(-) rename {tests/servers/http => packages/axum-http-tracker-server/src}/environment.rs (87%) rename {tests => packages/axum-http-tracker-server/tests}/common/fixtures.rs (83%) create mode 100644 packages/axum-http-tracker-server/tests/common/http.rs create mode 100644 packages/axum-http-tracker-server/tests/common/mod.rs create mode 100644 packages/axum-http-tracker-server/tests/integration.rs rename {tests/servers/http => packages/axum-http-tracker-server/tests/server}/asserts.rs (99%) rename {tests/servers/http => packages/axum-http-tracker-server/tests/server}/client.rs (100%) rename {tests/servers/http => packages/axum-http-tracker-server/tests/server}/mod.rs (82%) rename {tests/servers/http => packages/axum-http-tracker-server/tests/server}/requests/announce.rs (99%) rename {tests/servers/http => packages/axum-http-tracker-server/tests/server}/requests/mod.rs (100%) rename {tests/servers/http => packages/axum-http-tracker-server/tests/server}/requests/scrape.rs (97%) rename {tests/servers/http => packages/axum-http-tracker-server/tests/server}/responses/announce.rs (100%) rename {tests/servers/http => packages/axum-http-tracker-server/tests/server}/responses/error.rs (100%) rename {tests/servers/http => packages/axum-http-tracker-server/tests/server}/responses/mod.rs (100%) rename {tests/servers/http => packages/axum-http-tracker-server/tests/server}/responses/scrape.rs (99%) rename {tests/servers/http => packages/axum-http-tracker-server/tests/server}/v1/contract.rs (94%) rename {tests/servers/http => packages/axum-http-tracker-server/tests/server}/v1/mod.rs (100%) delete mode 100644 tests/servers/http/connection_info.rs diff --git a/Cargo.lock b/Cargo.lock index 30c29b905..8cc377fbb 100644 --- a/Cargo.lock +++ b/Cargo.lock @@ -4372,8 +4372,14 @@ dependencies = [ "derive_more", "futures", "hyper", + "local-ip-address", + "percent-encoding", + "rand 0.8.5", "reqwest", "serde", + "serde_bencode", + "serde_bytes", + "serde_repr", "tokio", "torrust-axum-server", "torrust-server-lib", @@ -4384,6 +4390,8 @@ dependencies = [ "tower 0.5.2", "tower-http", "tracing", + "uuid", + "zerocopy 0.7.35", ] [[package]] @@ -4459,10 +4467,8 @@ name = "torrust-tracker" version = "3.0.0-develop" dependencies = [ "anyhow", - "aquatic_udp_protocol", "axum-server", "bittorrent-http-tracker-core", - "bittorrent-primitives", "bittorrent-tracker-core", "bittorrent-udp-tracker-core", "chrono", @@ -4474,7 +4480,6 @@ dependencies = [ "local-ip-address", "mockall", "parking_lot", - "percent-encoding", "r2d2", "r2d2_mysql", "r2d2_sqlite", @@ -4482,10 +4487,8 @@ dependencies = [ "regex", "reqwest", "serde", - "serde_bencode", "serde_bytes", "serde_json", - "serde_repr", "tokio", "torrust-axum-health-check-api-server", "torrust-axum-http-tracker-server", @@ -4496,14 +4499,11 @@ dependencies = [ "torrust-tracker-api-core", "torrust-tracker-clock", "torrust-tracker-configuration", - "torrust-tracker-primitives", "torrust-tracker-test-helpers", "torrust-tracker-torrent-repository", "torrust-udp-tracker-server", "tracing", "tracing-subscriber", - "uuid", - "zerocopy 0.7.35", ] [[package]] diff --git a/Cargo.toml b/Cargo.toml index 5b063bd14..40ddeca09 100644 --- a/Cargo.toml +++ b/Cargo.toml @@ -34,10 +34,8 @@ version = "3.0.0-develop" [dependencies] anyhow = "1" -aquatic_udp_protocol = "0" axum-server = { version = "0", features = ["tls-rustls-no-provider"] } bittorrent-http-tracker-core = { version = "3.0.0-develop", path = "packages/http-tracker-core" } -bittorrent-primitives = "0.1.0" bittorrent-tracker-core = { version = "3.0.0-develop", path = "packages/tracker-core" } bittorrent-udp-tracker-core = { version = "3.0.0-develop", path = "packages/udp-tracker-core" } chrono = { version = "0", default-features = false, features = ["clock"] } @@ -47,7 +45,6 @@ dashmap = "6" figment = "0" futures = "0" parking_lot = "0" -percent-encoding = "2" r2d2 = "0" r2d2_mysql = "25" r2d2_sqlite = { version = "0", features = ["bundled"] } @@ -55,10 +52,8 @@ rand = "0" regex = "1" reqwest = { version = "0", features = ["json"] } serde = { version = "1", features = ["derive"] } -serde_bencode = "0" serde_bytes = "0" serde_json = { version = "1", features = ["preserve_order"] } -serde_repr = "0" tokio = { version = "1", features = ["macros", "net", "rt-multi-thread", "signal", "sync"] } torrust-axum-health-check-api-server = { version = "3.0.0-develop", path = "packages/axum-health-check-api-server" } torrust-axum-http-tracker-server = { version = "3.0.0-develop", path = "packages/axum-http-tracker-server" } @@ -68,13 +63,10 @@ torrust-server-lib = { version = "3.0.0-develop", path = "packages/server-lib" } torrust-tracker-api-core = { version = "3.0.0-develop", path = "packages/tracker-api-core" } torrust-tracker-clock = { version = "3.0.0-develop", path = "packages/clock" } torrust-tracker-configuration = { version = "3.0.0-develop", path = "packages/configuration" } -torrust-tracker-primitives = { version = "3.0.0-develop", path = "packages/primitives" } torrust-tracker-torrent-repository = { version = "3.0.0-develop", path = "packages/torrent-repository" } torrust-udp-tracker-server = { version = "3.0.0-develop", path = "packages/udp-tracker-server" } tracing = "0" tracing-subscriber = { version = "0", features = ["json"] } -uuid = { version = "1", features = ["v4"] } -zerocopy = "0.7" [package.metadata.cargo-machete] ignored = [ diff --git a/packages/axum-http-tracker-server/Cargo.toml b/packages/axum-http-tracker-server/Cargo.toml index abb419e4a..0c64ee986 100644 --- a/packages/axum-http-tracker-server/Cargo.toml +++ b/packages/axum-http-tracker-server/Cargo.toml @@ -30,6 +30,7 @@ serde = { version = "1", features = ["derive"] } tokio = { version = "1", features = ["macros", "net", "rt-multi-thread", "signal", "sync"] } torrust-axum-server = { version = "3.0.0-develop", path = "../axum-server" } torrust-server-lib = { version = "3.0.0-develop", path = "../server-lib" } +torrust-tracker-clock = { version = "3.0.0-develop", path = "../clock" } torrust-tracker-configuration = { version = "3.0.0-develop", path = "../configuration" } torrust-tracker-primitives = { version = "3.0.0-develop", path = "../primitives" } tower = { version = "0", features = ["timeout"] } @@ -37,5 +38,13 @@ tower-http = { version = "0", features = ["compression-full", "cors", "propagate tracing = "0" [dev-dependencies] +local-ip-address = "0" +percent-encoding = "2" +rand = "0" +serde_bencode = "0" +serde_bytes = "0" +serde_repr = "0" torrust-tracker-clock = { version = "3.0.0-develop", path = "../clock" } torrust-tracker-test-helpers = { version = "3.0.0-develop", path = "../test-helpers" } +uuid = { version = "1", features = ["v4"] } +zerocopy = "0.7" diff --git a/tests/servers/http/environment.rs b/packages/axum-http-tracker-server/src/environment.rs similarity index 87% rename from tests/servers/http/environment.rs rename to packages/axum-http-tracker-server/src/environment.rs index f79d42b36..45cc276fd 100644 --- a/tests/servers/http/environment.rs +++ b/packages/axum-http-tracker-server/src/environment.rs @@ -4,12 +4,15 @@ use bittorrent_http_tracker_core::container::HttpTrackerCoreContainer; use bittorrent_primitives::info_hash::InfoHash; use bittorrent_tracker_core::container::TrackerCoreContainer; use futures::executor::block_on; -use torrust_axum_http_tracker_server::server::{HttpServer, Launcher, Running, Stopped}; use torrust_axum_server::tsl::make_rust_tls; use torrust_server_lib::registar::Registar; use torrust_tracker_configuration::{logging, Configuration}; use torrust_tracker_primitives::peer; +use crate::server::{HttpServer, Launcher, Running, Stopped}; + +pub type Started = Environment; + pub struct Environment { pub container: Arc, pub registar: Registar, @@ -28,7 +31,11 @@ impl Environment { } impl Environment { + /// # Panics + /// + /// Will panic if it fails to make the TSL config from the configuration. #[allow(dead_code)] + #[must_use] pub fn new(configuration: &Arc) -> Self { initialize_global_services(configuration); @@ -50,6 +57,9 @@ impl Environment { } } + /// # Panics + /// + /// Will panic if the server fails to start. #[allow(dead_code)] pub async fn start(self) -> Environment { Environment { @@ -69,6 +79,9 @@ impl Environment { Environment::::new(configuration).start().await } + /// # Panics + /// + /// Will panic if the server fails to stop. pub async fn stop(self) -> Environment { Environment { container: self.container, @@ -77,6 +90,7 @@ impl Environment { } } + #[must_use] pub fn bind_address(&self) -> &std::net::SocketAddr { &self.server.state.binding } @@ -88,6 +102,10 @@ pub struct EnvContainer { } impl EnvContainer { + /// # Panics + /// + /// Will panic if the configuration is missing the HTTP tracker configuration. + #[must_use] pub fn initialize(configuration: &Configuration) -> Self { let core_config = Arc::new(configuration.core.clone()); let http_tracker_config = configuration diff --git a/packages/axum-http-tracker-server/src/lib.rs b/packages/axum-http-tracker-server/src/lib.rs index 7f6bec892..2bb6978b7 100644 --- a/packages/axum-http-tracker-server/src/lib.rs +++ b/packages/axum-http-tracker-server/src/lib.rs @@ -303,11 +303,12 @@ //! //! - [Bencode](https://en.wikipedia.org/wiki/Bencode). //! - [Bencode to Json Online converter](https://chocobo1.github.io/bencode_online). -use serde::{Deserialize, Serialize}; - +pub mod environment; pub mod server; pub mod v1; +use serde::{Deserialize, Serialize}; + pub const HTTP_TRACKER_LOG_TARGET: &str = "HTTP TRACKER"; /// The version of the HTTP tracker. diff --git a/tests/common/fixtures.rs b/packages/axum-http-tracker-server/tests/common/fixtures.rs similarity index 83% rename from tests/common/fixtures.rs rename to packages/axum-http-tracker-server/tests/common/fixtures.rs index fa6884425..995079adf 100644 --- a/tests/common/fixtures.rs +++ b/packages/axum-http-tracker-server/tests/common/fixtures.rs @@ -1,6 +1,6 @@ use bittorrent_primitives::info_hash::InfoHash; +use rand::prelude::*; -#[allow(dead_code)] pub fn invalid_info_hashes() -> Vec { [ "0".to_string(), @@ -15,8 +15,8 @@ pub fn invalid_info_hashes() -> Vec { /// Returns a random info hash. pub fn random_info_hash() -> InfoHash { - let mut rng = rand::rng(); - let random_bytes: [u8; 20] = rand::Rng::random(&mut rng); + let mut rng = rand::thread_rng(); + let random_bytes: [u8; 20] = rng.gen(); InfoHash::from_bytes(&random_bytes) } diff --git a/packages/axum-http-tracker-server/tests/common/http.rs b/packages/axum-http-tracker-server/tests/common/http.rs new file mode 100644 index 000000000..d682027fd --- /dev/null +++ b/packages/axum-http-tracker-server/tests/common/http.rs @@ -0,0 +1,54 @@ +pub type ReqwestQuery = Vec; +pub type ReqwestQueryParam = (String, String); + +/// URL Query component +#[derive(Default, Debug)] +pub struct Query { + params: Vec, +} + +impl Query { + pub fn empty() -> Self { + Self { params: vec![] } + } + + pub fn params(params: Vec) -> Self { + Self { params } + } + + pub fn add_param(&mut self, param: QueryParam) { + self.params.push(param); + } +} + +impl From for ReqwestQuery { + fn from(url_search_params: Query) -> Self { + url_search_params + .params + .iter() + .map(|param| ReqwestQueryParam::from((*param).clone())) + .collect() + } +} + +/// URL query param +#[derive(Clone, Debug)] +pub struct QueryParam { + name: String, + value: String, +} + +impl QueryParam { + pub fn new(name: &str, value: &str) -> Self { + Self { + name: name.to_string(), + value: value.to_string(), + } + } +} + +impl From for ReqwestQueryParam { + fn from(param: QueryParam) -> Self { + (param.name, param.value) + } +} diff --git a/packages/axum-http-tracker-server/tests/common/mod.rs b/packages/axum-http-tracker-server/tests/common/mod.rs new file mode 100644 index 000000000..810620359 --- /dev/null +++ b/packages/axum-http-tracker-server/tests/common/mod.rs @@ -0,0 +1,2 @@ +pub mod fixtures; +pub mod http; diff --git a/packages/axum-http-tracker-server/tests/integration.rs b/packages/axum-http-tracker-server/tests/integration.rs new file mode 100644 index 000000000..70b3aeb89 --- /dev/null +++ b/packages/axum-http-tracker-server/tests/integration.rs @@ -0,0 +1,20 @@ +//! Integration tests. +//! +//! ```text +//! cargo test --test integration +//! ``` +mod common; +mod server; + +use torrust_tracker_clock::clock; + +/// This code needs to be copied into each crate. +/// Working version, for production. +#[cfg(not(test))] +#[allow(dead_code)] +pub(crate) type CurrentClock = clock::Working; + +/// Stopped version, for testing. +#[cfg(test)] +#[allow(dead_code)] +pub(crate) type CurrentClock = clock::Stopped; diff --git a/tests/servers/http/asserts.rs b/packages/axum-http-tracker-server/tests/server/asserts.rs similarity index 99% rename from tests/servers/http/asserts.rs rename to packages/axum-http-tracker-server/tests/server/asserts.rs index a68a1896e..7173aa8a9 100644 --- a/tests/servers/http/asserts.rs +++ b/packages/axum-http-tracker-server/tests/server/asserts.rs @@ -4,7 +4,7 @@ use reqwest::Response; use super::responses::announce::{Announce, Compact, DeserializedCompact}; use super::responses::scrape; -use crate::servers::http::responses::error::Error; +use crate::server::responses::error::Error; pub fn assert_bencoded_error(response_text: &String, expected_failure_reason: &str, location: &'static Location<'static>) { let error_failure_reason = serde_bencode::from_str::(response_text) diff --git a/tests/servers/http/client.rs b/packages/axum-http-tracker-server/tests/server/client.rs similarity index 100% rename from tests/servers/http/client.rs rename to packages/axum-http-tracker-server/tests/server/client.rs diff --git a/tests/servers/http/mod.rs b/packages/axum-http-tracker-server/tests/server/mod.rs similarity index 82% rename from tests/servers/http/mod.rs rename to packages/axum-http-tracker-server/tests/server/mod.rs index 37d4dcd3d..31b48b2f0 100644 --- a/tests/servers/http/mod.rs +++ b/packages/axum-http-tracker-server/tests/server/mod.rs @@ -1,14 +1,10 @@ pub mod asserts; pub mod client; -pub mod environment; pub mod requests; pub mod responses; pub mod v1; use percent_encoding::NON_ALPHANUMERIC; -use torrust_axum_http_tracker_server::server; - -pub type Started = environment::Environment; pub type ByteArray20 = [u8; 20]; diff --git a/tests/servers/http/requests/announce.rs b/packages/axum-http-tracker-server/tests/server/requests/announce.rs similarity index 99% rename from tests/servers/http/requests/announce.rs rename to packages/axum-http-tracker-server/tests/server/requests/announce.rs index 740c86d38..0775de7e4 100644 --- a/tests/servers/http/requests/announce.rs +++ b/packages/axum-http-tracker-server/tests/server/requests/announce.rs @@ -6,7 +6,7 @@ use aquatic_udp_protocol::PeerId; use bittorrent_primitives::info_hash::InfoHash; use serde_repr::Serialize_repr; -use crate::servers::http::{percent_encode_byte_array, ByteArray20}; +use crate::server::{percent_encode_byte_array, ByteArray20}; pub struct Query { pub info_hash: ByteArray20, diff --git a/tests/servers/http/requests/mod.rs b/packages/axum-http-tracker-server/tests/server/requests/mod.rs similarity index 100% rename from tests/servers/http/requests/mod.rs rename to packages/axum-http-tracker-server/tests/server/requests/mod.rs diff --git a/tests/servers/http/requests/scrape.rs b/packages/axum-http-tracker-server/tests/server/requests/scrape.rs similarity index 97% rename from tests/servers/http/requests/scrape.rs rename to packages/axum-http-tracker-server/tests/server/requests/scrape.rs index ecef541f1..afd8cfbe3 100644 --- a/tests/servers/http/requests/scrape.rs +++ b/packages/axum-http-tracker-server/tests/server/requests/scrape.rs @@ -3,7 +3,7 @@ use std::str::FromStr; use bittorrent_primitives::info_hash::InfoHash; -use crate::servers::http::{percent_encode_byte_array, ByteArray20}; +use crate::server::{percent_encode_byte_array, ByteArray20}; pub struct Query { pub info_hash: Vec, diff --git a/tests/servers/http/responses/announce.rs b/packages/axum-http-tracker-server/tests/server/responses/announce.rs similarity index 100% rename from tests/servers/http/responses/announce.rs rename to packages/axum-http-tracker-server/tests/server/responses/announce.rs diff --git a/tests/servers/http/responses/error.rs b/packages/axum-http-tracker-server/tests/server/responses/error.rs similarity index 100% rename from tests/servers/http/responses/error.rs rename to packages/axum-http-tracker-server/tests/server/responses/error.rs diff --git a/tests/servers/http/responses/mod.rs b/packages/axum-http-tracker-server/tests/server/responses/mod.rs similarity index 100% rename from tests/servers/http/responses/mod.rs rename to packages/axum-http-tracker-server/tests/server/responses/mod.rs diff --git a/tests/servers/http/responses/scrape.rs b/packages/axum-http-tracker-server/tests/server/responses/scrape.rs similarity index 99% rename from tests/servers/http/responses/scrape.rs rename to packages/axum-http-tracker-server/tests/server/responses/scrape.rs index fc741cbf4..5de15c731 100644 --- a/tests/servers/http/responses/scrape.rs +++ b/packages/axum-http-tracker-server/tests/server/responses/scrape.rs @@ -4,7 +4,7 @@ use std::str; use serde::{Deserialize, Serialize}; use serde_bencode::value::Value; -use crate::servers::http::{ByteArray20, InfoHash}; +use crate::server::{ByteArray20, InfoHash}; #[derive(Debug, PartialEq, Default)] pub struct Response { diff --git a/tests/servers/http/v1/contract.rs b/packages/axum-http-tracker-server/tests/server/v1/contract.rs similarity index 94% rename from tests/servers/http/v1/contract.rs rename to packages/axum-http-tracker-server/tests/server/v1/contract.rs index 084766593..b62920234 100644 --- a/tests/servers/http/v1/contract.rs +++ b/packages/axum-http-tracker-server/tests/server/v1/contract.rs @@ -1,7 +1,5 @@ -use torrust_tracker_test_helpers::configuration; - -use crate::common::logging; -use crate::servers::http::Started; +use torrust_axum_http_tracker_server::environment::Started; +use torrust_tracker_test_helpers::{configuration, logging}; #[tokio::test] async fn environment_should_be_started_and_stopped() { @@ -14,12 +12,11 @@ async fn environment_should_be_started_and_stopped() { mod for_all_config_modes { + use torrust_axum_http_tracker_server::environment::Started; use torrust_axum_http_tracker_server::v1::handlers::health_check::{Report, Status}; - use torrust_tracker_test_helpers::configuration; + use torrust_tracker_test_helpers::{configuration, logging}; - use crate::common::logging; - use crate::servers::http::client::Client; - use crate::servers::http::Started; + use crate::server::client::Client; #[tokio::test] async fn health_check_endpoint_should_return_ok_if_the_http_tracker_is_running() { @@ -37,13 +34,12 @@ mod for_all_config_modes { } mod and_running_on_reverse_proxy { - use torrust_tracker_test_helpers::configuration; + use torrust_axum_http_tracker_server::environment::Started; + use torrust_tracker_test_helpers::{configuration, logging}; - use crate::common::logging; - use crate::servers::http::asserts::assert_could_not_find_remote_address_on_x_forwarded_for_header_error_response; - use crate::servers::http::client::Client; - use crate::servers::http::requests::announce::QueryBuilder; - use crate::servers::http::Started; + use crate::server::asserts::assert_could_not_find_remote_address_on_x_forwarded_for_header_error_response; + use crate::server::client::Client; + use crate::server::requests::announce::QueryBuilder; #[tokio::test] async fn should_fail_when_the_http_request_does_not_include_the_xff_http_request_header() { @@ -102,20 +98,20 @@ mod for_all_config_modes { use local_ip_address::local_ip; use reqwest::{Response, StatusCode}; use tokio::net::TcpListener; + use torrust_axum_http_tracker_server::environment::Started; use torrust_tracker_primitives::peer::fixture::PeerBuilder; - use torrust_tracker_test_helpers::configuration; + use torrust_tracker_test_helpers::{configuration, logging}; use crate::common::fixtures::invalid_info_hashes; - use crate::common::logging; - use crate::servers::http::asserts::{ + use crate::server::asserts::{ assert_announce_response, assert_bad_announce_request_error_response, assert_cannot_parse_query_param_error_response, assert_cannot_parse_query_params_error_response, assert_compact_announce_response, assert_empty_announce_response, assert_is_announce_response, assert_missing_query_params_for_announce_request_error_response, }; - use crate::servers::http::client::Client; - use crate::servers::http::requests::announce::{Compact, QueryBuilder}; - use crate::servers::http::responses::announce::{Announce, CompactPeer, CompactPeerList, DictionaryPeer}; - use crate::servers::http::{responses, Started}; + use crate::server::client::Client; + use crate::server::requests::announce::{Compact, QueryBuilder}; + use crate::server::responses; + use crate::server::responses::announce::{Announce, CompactPeer, CompactPeerList, DictionaryPeer}; #[tokio::test] async fn it_should_start_and_stop() { @@ -1028,19 +1024,19 @@ mod for_all_config_modes { use aquatic_udp_protocol::PeerId; use bittorrent_primitives::info_hash::InfoHash; use tokio::net::TcpListener; + use torrust_axum_http_tracker_server::environment::Started; use torrust_tracker_primitives::peer::fixture::PeerBuilder; - use torrust_tracker_test_helpers::configuration; + use torrust_tracker_test_helpers::{configuration, logging}; use crate::common::fixtures::invalid_info_hashes; - use crate::common::logging; - use crate::servers::http::asserts::{ + use crate::server::asserts::{ assert_cannot_parse_query_params_error_response, assert_missing_query_params_for_scrape_request_error_response, assert_scrape_response, }; - use crate::servers::http::client::Client; - use crate::servers::http::requests::scrape::QueryBuilder; - use crate::servers::http::responses::scrape::{self, File, ResponseBuilder}; - use crate::servers::http::{requests, Started}; + use crate::server::client::Client; + use crate::server::requests; + use crate::server::requests::scrape::QueryBuilder; + use crate::server::responses::scrape::{self, File, ResponseBuilder}; #[tokio::test] #[allow(dead_code)] @@ -1278,15 +1274,15 @@ mod configured_as_whitelisted { use std::str::FromStr; use bittorrent_primitives::info_hash::InfoHash; - use torrust_tracker_test_helpers::configuration; + use torrust_axum_http_tracker_server::environment::Started; + use torrust_tracker_test_helpers::logging::logs_contains_a_line_with; + use torrust_tracker_test_helpers::{configuration, logging}; use uuid::Uuid; use crate::common::fixtures::random_info_hash; - use crate::common::logging::{self, logs_contains_a_line_with}; - use crate::servers::http::asserts::{assert_is_announce_response, assert_torrent_not_in_whitelist_error_response}; - use crate::servers::http::client::Client; - use crate::servers::http::requests::announce::QueryBuilder; - use crate::servers::http::Started; + use crate::server::asserts::{assert_is_announce_response, assert_torrent_not_in_whitelist_error_response}; + use crate::server::client::Client; + use crate::server::requests::announce::QueryBuilder; #[tokio::test] async fn should_fail_if_the_torrent_is_not_in_the_whitelist() { @@ -1345,15 +1341,16 @@ mod configured_as_whitelisted { use aquatic_udp_protocol::PeerId; use bittorrent_primitives::info_hash::InfoHash; + use torrust_axum_http_tracker_server::environment::Started; use torrust_tracker_primitives::peer::fixture::PeerBuilder; - use torrust_tracker_test_helpers::configuration; + use torrust_tracker_test_helpers::logging::logs_contains_a_line_with; + use torrust_tracker_test_helpers::{configuration, logging}; use crate::common::fixtures::random_info_hash; - use crate::common::logging::{self, logs_contains_a_line_with}; - use crate::servers::http::asserts::assert_scrape_response; - use crate::servers::http::client::Client; - use crate::servers::http::responses::scrape::{File, ResponseBuilder}; - use crate::servers::http::{requests, Started}; + use crate::server::asserts::assert_scrape_response; + use crate::server::client::Client; + use crate::server::requests; + use crate::server::responses::scrape::{File, ResponseBuilder}; #[tokio::test] async fn should_return_the_zeroed_file_when_the_requested_file_is_not_whitelisted() { @@ -1448,13 +1445,12 @@ mod configured_as_private { use bittorrent_primitives::info_hash::InfoHash; use bittorrent_tracker_core::authentication::Key; - use torrust_tracker_test_helpers::configuration; + use torrust_axum_http_tracker_server::environment::Started; + use torrust_tracker_test_helpers::{configuration, logging}; - use crate::common::logging; - use crate::servers::http::asserts::{assert_authentication_error_response, assert_is_announce_response}; - use crate::servers::http::client::Client; - use crate::servers::http::requests::announce::QueryBuilder; - use crate::servers::http::Started; + use crate::server::asserts::{assert_authentication_error_response, assert_is_announce_response}; + use crate::server::client::Client; + use crate::server::requests::announce::QueryBuilder; #[tokio::test] async fn should_respond_to_authenticated_peers() { @@ -1540,14 +1536,14 @@ mod configured_as_private { use aquatic_udp_protocol::PeerId; use bittorrent_primitives::info_hash::InfoHash; use bittorrent_tracker_core::authentication::Key; + use torrust_axum_http_tracker_server::environment::Started; use torrust_tracker_primitives::peer::fixture::PeerBuilder; - use torrust_tracker_test_helpers::configuration; + use torrust_tracker_test_helpers::{configuration, logging}; - use crate::common::logging; - use crate::servers::http::asserts::{assert_authentication_error_response, assert_scrape_response}; - use crate::servers::http::client::Client; - use crate::servers::http::responses::scrape::{File, ResponseBuilder}; - use crate::servers::http::{requests, Started}; + use crate::server::asserts::{assert_authentication_error_response, assert_scrape_response}; + use crate::server::client::Client; + use crate::server::requests; + use crate::server::responses::scrape::{File, ResponseBuilder}; #[tokio::test] async fn should_fail_if_the_key_query_param_cannot_be_parsed() { diff --git a/tests/servers/http/v1/mod.rs b/packages/axum-http-tracker-server/tests/server/v1/mod.rs similarity index 100% rename from tests/servers/http/v1/mod.rs rename to packages/axum-http-tracker-server/tests/server/v1/mod.rs diff --git a/tests/common/mod.rs b/tests/common/mod.rs index 9589ccb1e..c6777573d 100644 --- a/tests/common/mod.rs +++ b/tests/common/mod.rs @@ -1,5 +1,4 @@ pub mod clock; -pub mod fixtures; pub mod http; pub mod logging; pub mod udp; diff --git a/tests/servers/health_check_api/contract.rs b/tests/servers/health_check_api/contract.rs index 875510db3..473d52812 100644 --- a/tests/servers/health_check_api/contract.rs +++ b/tests/servers/health_check_api/contract.rs @@ -116,7 +116,6 @@ mod http { use crate::common::logging; use crate::servers::health_check_api::client::get; - use crate::servers::http; #[tokio::test] pub(crate) async fn it_should_return_good_health_for_http_service() { @@ -124,7 +123,7 @@ mod http { let configuration = Arc::new(configuration::ephemeral()); - let service = http::Started::new(&configuration).await; + let service = torrust_axum_http_tracker_server::environment::Started::new(&configuration).await; let registar = service.registar.clone(); @@ -170,7 +169,7 @@ mod http { let configuration = Arc::new(configuration::ephemeral()); - let service = http::Started::new(&configuration).await; + let service = torrust_axum_http_tracker_server::environment::Started::new(&configuration).await; let binding = *service.bind_address(); diff --git a/tests/servers/http/connection_info.rs b/tests/servers/http/connection_info.rs deleted file mode 100644 index 91486a3a7..000000000 --- a/tests/servers/http/connection_info.rs +++ /dev/null @@ -1,16 +0,0 @@ -use bittorrent_tracker_core::authentication::Key; - -#[derive(Clone, Debug)] -pub struct ConnectionInfo { - pub bind_address: String, - pub key: Option, -} - -impl ConnectionInfo { - pub fn anonymous(bind_address: &str) -> Self { - Self { - bind_address: bind_address.to_string(), - key: None, - } - } -} diff --git a/tests/servers/mod.rs b/tests/servers/mod.rs index 627073101..5aa096824 100644 --- a/tests/servers/mod.rs +++ b/tests/servers/mod.rs @@ -1,2 +1 @@ mod health_check_api; -mod http; From f783dbddf1859ba123ee31df12a84a00bbc88313 Mon Sep 17 00:00:00 2001 From: Jose Celano Date: Tue, 25 Feb 2025 12:16:07 +0000 Subject: [PATCH 329/802] refactor: [#1316] move the rest of the Health Check API integration tests to the server pkg It's now possible becuase test environments have been exposed in their server packages. --- Cargo.lock | 4 + .../axum-health-check-api-server/Cargo.toml | 4 + .../tests/server/contract.rs | 309 +++++++++++++++++ tests/common/logging.rs | 156 --------- tests/common/mod.rs | 1 - tests/integration.rs | 1 - tests/servers/health_check_api/client.rs | 5 - tests/servers/health_check_api/contract.rs | 311 ------------------ tests/servers/health_check_api/mod.rs | 2 - tests/servers/mod.rs | 1 - 10 files changed, 317 insertions(+), 477 deletions(-) delete mode 100644 tests/common/logging.rs delete mode 100644 tests/servers/health_check_api/client.rs delete mode 100644 tests/servers/health_check_api/contract.rs delete mode 100644 tests/servers/health_check_api/mod.rs delete mode 100644 tests/servers/mod.rs diff --git a/Cargo.lock b/Cargo.lock index 8cc377fbb..512383460 100644 --- a/Cargo.lock +++ b/Cargo.lock @@ -4347,11 +4347,15 @@ dependencies = [ "serde", "serde_json", "tokio", + "torrust-axum-health-check-api-server", + "torrust-axum-http-tracker-server", "torrust-axum-server", + "torrust-axum-tracker-api-server", "torrust-server-lib", "torrust-tracker-clock", "torrust-tracker-configuration", "torrust-tracker-test-helpers", + "torrust-udp-tracker-server", "tower-http", "tracing", "tracing-subscriber", diff --git a/packages/axum-health-check-api-server/Cargo.toml b/packages/axum-health-check-api-server/Cargo.toml index 17c269aae..928393bee 100644 --- a/packages/axum-health-check-api-server/Cargo.toml +++ b/packages/axum-health-check-api-server/Cargo.toml @@ -29,6 +29,10 @@ tracing = "0" [dev-dependencies] reqwest = { version = "0", features = ["json"] } +torrust-axum-health-check-api-server = { version = "3.0.0-develop", path = "../axum-health-check-api-server" } +torrust-axum-http-tracker-server = { version = "3.0.0-develop", path = "../axum-http-tracker-server" } +torrust-axum-tracker-api-server = { version = "3.0.0-develop", path = "../axum-tracker-api-server" } torrust-tracker-clock = { version = "3.0.0-develop", path = "../clock" } torrust-tracker-test-helpers = { version = "3.0.0-develop", path = "../test-helpers" } +torrust-udp-tracker-server = { version = "3.0.0-develop", path = "../udp-tracker-server" } tracing-subscriber = { version = "0", features = ["json"] } diff --git a/packages/axum-health-check-api-server/tests/server/contract.rs b/packages/axum-health-check-api-server/tests/server/contract.rs index 2bd5d292e..96a03cca4 100644 --- a/packages/axum-health-check-api-server/tests/server/contract.rs +++ b/packages/axum-health-check-api-server/tests/server/contract.rs @@ -27,3 +27,312 @@ async fn health_check_endpoint_should_return_status_ok_when_there_is_no_services env.stop().await.expect("it should stop the service"); } + +mod api { + use std::sync::Arc; + + use torrust_axum_health_check_api_server::environment::Started; + use torrust_axum_health_check_api_server::resources::{Report, Status}; + use torrust_tracker_test_helpers::{configuration, logging}; + + use crate::server::client::get; + + #[tokio::test] + pub(crate) async fn it_should_return_good_health_for_api_service() { + logging::setup(); + + let configuration = Arc::new(configuration::ephemeral()); + + let service = torrust_axum_tracker_api_server::environment::Started::new(&configuration).await; + + let registar = service.registar.clone(); + + { + let config = configuration.health_check_api.clone(); + let env = Started::new(&config.into(), registar).await; + + let response = get(&format!("http://{}/health_check", env.state.binding)).await; // DevSkim: ignore DS137138 + + assert_eq!(response.status(), 200); + assert_eq!(response.headers().get("content-type").unwrap(), "application/json"); + + let report: Report = response + .json() + .await + .expect("it should be able to get the report from the json"); + + assert_eq!(report.status, Status::Ok); + assert_eq!(report.message, String::new()); + + let details = report.details.first().expect("it should have some details"); + + assert_eq!(details.binding, service.bind_address()); + + assert_eq!(details.result, Ok("200 OK".to_string())); + + assert_eq!( + details.info, + format!( + "checking api health check at: http://{}/api/health_check", // DevSkim: ignore DS137138 + service.bind_address() + ) + ); + + env.stop().await.expect("it should stop the service"); + } + + service.stop().await; + } + + #[tokio::test] + pub(crate) async fn it_should_return_error_when_api_service_was_stopped_after_registration() { + logging::setup(); + + let configuration = Arc::new(configuration::ephemeral()); + + let service = torrust_axum_tracker_api_server::environment::Started::new(&configuration).await; + + let binding = service.bind_address(); + + let registar = service.registar.clone(); + + service.server.stop().await.expect("it should stop udp server"); + + { + let config = configuration.health_check_api.clone(); + let env = Started::new(&config.into(), registar).await; + + let response = get(&format!("http://{}/health_check", env.state.binding)).await; // DevSkim: ignore DS137138 + + assert_eq!(response.status(), 200); + assert_eq!(response.headers().get("content-type").unwrap(), "application/json"); + + let report: Report = response + .json() + .await + .expect("it should be able to get the report from the json"); + + assert_eq!(report.status, Status::Error); + assert_eq!(report.message, "health check failed".to_string()); + + let details = report.details.first().expect("it should have some details"); + + assert_eq!(details.binding, binding); + assert!( + details + .result + .as_ref() + .is_err_and(|e| e.contains("error sending request for url")), + "Expected to contain, \"error sending request for url\", but have message \"{:?}\".", + details.result + ); + assert_eq!( + details.info, + format!("checking api health check at: http://{binding}/api/health_check") // DevSkim: ignore DS137138 + ); + + env.stop().await.expect("it should stop the service"); + } + } +} + +mod http { + use std::sync::Arc; + + use torrust_axum_health_check_api_server::environment::Started; + use torrust_axum_health_check_api_server::resources::{Report, Status}; + use torrust_tracker_test_helpers::{configuration, logging}; + + use crate::server::client::get; + + #[tokio::test] + pub(crate) async fn it_should_return_good_health_for_http_service() { + logging::setup(); + + let configuration = Arc::new(configuration::ephemeral()); + + let service = torrust_axum_http_tracker_server::environment::Started::new(&configuration).await; + + let registar = service.registar.clone(); + + { + let config = configuration.health_check_api.clone(); + let env = Started::new(&config.into(), registar).await; + + let response = get(&format!("http://{}/health_check", env.state.binding)).await; // DevSkim: ignore DS137138 + + assert_eq!(response.status(), 200); + assert_eq!(response.headers().get("content-type").unwrap(), "application/json"); + + let report: Report = response + .json() + .await + .expect("it should be able to get the report from the json"); + + assert_eq!(report.status, Status::Ok); + assert_eq!(report.message, String::new()); + + let details = report.details.first().expect("it should have some details"); + + assert_eq!(details.binding, *service.bind_address()); + assert_eq!(details.result, Ok("200 OK".to_string())); + + assert_eq!( + details.info, + format!( + "checking http tracker health check at: http://{}/health_check", // DevSkim: ignore DS137138 + service.bind_address() + ) + ); + + env.stop().await.expect("it should stop the service"); + } + + service.stop().await; + } + + #[tokio::test] + pub(crate) async fn it_should_return_error_when_http_service_was_stopped_after_registration() { + logging::setup(); + + let configuration = Arc::new(configuration::ephemeral()); + + let service = torrust_axum_http_tracker_server::environment::Started::new(&configuration).await; + + let binding = *service.bind_address(); + + let registar = service.registar.clone(); + + service.server.stop().await.expect("it should stop udp server"); + + { + let config = configuration.health_check_api.clone(); + let env = Started::new(&config.into(), registar).await; + + let response = get(&format!("http://{}/health_check", env.state.binding)).await; // DevSkim: ignore DS137138 + + assert_eq!(response.status(), 200); + assert_eq!(response.headers().get("content-type").unwrap(), "application/json"); + + let report: Report = response + .json() + .await + .expect("it should be able to get the report from the json"); + + assert_eq!(report.status, Status::Error); + assert_eq!(report.message, "health check failed".to_string()); + + let details = report.details.first().expect("it should have some details"); + + assert_eq!(details.binding, binding); + assert!( + details + .result + .as_ref() + .is_err_and(|e| e.contains("error sending request for url")), + "Expected to contain, \"error sending request for url\", but have message \"{:?}\".", + details.result + ); + assert_eq!( + details.info, + format!("checking http tracker health check at: http://{binding}/health_check") // DevSkim: ignore DS137138 + ); + + env.stop().await.expect("it should stop the service"); + } + } +} + +mod udp { + use std::sync::Arc; + + use torrust_axum_health_check_api_server::environment::Started; + use torrust_axum_health_check_api_server::resources::{Report, Status}; + use torrust_tracker_test_helpers::{configuration, logging}; + + use crate::server::client::get; + + #[tokio::test] + pub(crate) async fn it_should_return_good_health_for_udp_service() { + logging::setup(); + + let configuration = Arc::new(configuration::ephemeral()); + + let service = torrust_udp_tracker_server::environment::Started::new(&configuration).await; + + let registar = service.registar.clone(); + + { + let config = configuration.health_check_api.clone(); + let env = Started::new(&config.into(), registar).await; + + let response = get(&format!("http://{}/health_check", env.state.binding)).await; // DevSkim: ignore DS137138 + + assert_eq!(response.status(), 200); + assert_eq!(response.headers().get("content-type").unwrap(), "application/json"); + + let report: Report = response + .json() + .await + .expect("it should be able to get the report from the json"); + + assert_eq!(report.status, Status::Ok); + assert_eq!(report.message, String::new()); + + let details = report.details.first().expect("it should have some details"); + + assert_eq!(details.binding, service.bind_address()); + assert_eq!(details.result, Ok("Connected".to_string())); + + assert_eq!( + details.info, + format!("checking the udp tracker health check at: {}", service.bind_address()) + ); + + env.stop().await.expect("it should stop the service"); + } + + service.stop().await; + } + + #[tokio::test] + pub(crate) async fn it_should_return_error_when_udp_service_was_stopped_after_registration() { + logging::setup(); + + let configuration = Arc::new(configuration::ephemeral()); + + let service = torrust_udp_tracker_server::environment::Started::new(&configuration).await; + + let binding = service.bind_address(); + + let registar = service.registar.clone(); + + service.server.stop().await.expect("it should stop udp server"); + + { + let config = configuration.health_check_api.clone(); + let env = Started::new(&config.into(), registar).await; + + let response = get(&format!("http://{}/health_check", env.state.binding)).await; // DevSkim: ignore DS137138 + + assert_eq!(response.status(), 200); + assert_eq!(response.headers().get("content-type").unwrap(), "application/json"); + + let report: Report = response + .json() + .await + .expect("it should be able to get the report from the json"); + + assert_eq!(report.status, Status::Error); + assert_eq!(report.message, "health check failed".to_string()); + + let details = report.details.first().expect("it should have some details"); + + assert_eq!(details.binding, binding); + assert_eq!(details.result, Err("Timed Out".to_string())); + assert_eq!(details.info, format!("checking the udp tracker health check at: {binding}")); + + env.stop().await.expect("it should stop the service"); + } + } +} diff --git a/tests/common/logging.rs b/tests/common/logging.rs deleted file mode 100644 index 564074f3e..000000000 --- a/tests/common/logging.rs +++ /dev/null @@ -1,156 +0,0 @@ -//! Setup for logging in tests. -use std::collections::VecDeque; -use std::io; -use std::sync::{Mutex, MutexGuard, Once, OnceLock}; - -use torrust_tracker_configuration::logging::TraceStyle; -use tracing::level_filters::LevelFilter; -use tracing_subscriber::fmt::MakeWriter; - -static INIT: Once = Once::new(); - -/// A global buffer containing the latest lines captured from logs. -#[doc(hidden)] -pub fn captured_logs_buffer() -> &'static Mutex { - static CAPTURED_LOGS_GLOBAL_BUFFER: OnceLock> = OnceLock::new(); - CAPTURED_LOGS_GLOBAL_BUFFER.get_or_init(|| Mutex::new(CircularBuffer::new(10000, 200))) -} - -pub fn setup() { - INIT.call_once(|| { - tracing_init(LevelFilter::ERROR, &TraceStyle::Default); - }); -} - -fn tracing_init(level_filter: LevelFilter, style: &TraceStyle) { - let mock_writer = LogCapturer::new(captured_logs_buffer()); - - let builder = tracing_subscriber::fmt() - .with_max_level(level_filter) - .with_ansi(true) - .with_test_writer() - .with_writer(mock_writer); - - let () = match style { - TraceStyle::Default => builder.init(), - TraceStyle::Pretty(display_filename) => builder.pretty().with_file(*display_filename).init(), - TraceStyle::Compact => builder.compact().init(), - TraceStyle::Json => builder.json().init(), - }; - - tracing::info!("Logging initialized"); -} - -/// It returns true is there is a log line containing all the texts passed. -/// -/// # Panics -/// -/// Will panic if it can't get the lock for the global buffer or convert it into -/// a vec. -#[must_use] -#[allow(dead_code)] -pub fn logs_contains_a_line_with(texts: &[&str]) -> bool { - // code-review: we can search directly in the buffer instead of converting - // the buffer into a string but that would slow down the tests because - // cloning should be faster that locking the buffer for searching. - // Because the buffer is not big. - let logs = String::from_utf8(captured_logs_buffer().lock().unwrap().as_vec()).unwrap(); - - for line in logs.split('\n') { - if contains(line, texts) { - return true; - } - } - - false -} - -#[allow(dead_code)] -fn contains(text: &str, texts: &[&str]) -> bool { - texts.iter().all(|&word| text.contains(word)) -} - -/// A tracing writer which captures the latests logs lines into a buffer. -/// It's used to capture the logs in the tests. -#[derive(Debug)] -pub struct LogCapturer<'a> { - logs: &'a Mutex, -} - -impl<'a> LogCapturer<'a> { - pub fn new(buf: &'a Mutex) -> Self { - Self { logs: buf } - } - - fn buf(&self) -> io::Result> { - self.logs.lock().map_err(|_| io::Error::from(io::ErrorKind::Other)) - } -} - -impl io::Write for LogCapturer<'_> { - fn write(&mut self, buf: &[u8]) -> io::Result { - print!("{}", String::from_utf8(buf.to_vec()).unwrap()); - - let mut target = self.buf()?; - - target.write(buf) - } - - fn flush(&mut self) -> io::Result<()> { - self.buf()?.flush() - } -} - -impl MakeWriter<'_> for LogCapturer<'_> { - type Writer = Self; - - fn make_writer(&self) -> Self::Writer { - LogCapturer::new(self.logs) - } -} - -#[derive(Debug)] -pub struct CircularBuffer { - max_size: usize, - buffer: VecDeque, -} - -impl CircularBuffer { - #[must_use] - pub fn new(max_lines: usize, average_line_size: usize) -> Self { - Self { - max_size: max_lines * average_line_size, - buffer: VecDeque::with_capacity(max_lines * average_line_size), - } - } - - /// # Errors - /// - /// Won't return any error. - #[allow(clippy::unnecessary_wraps)] - pub fn write(&mut self, buf: &[u8]) -> io::Result { - for &byte in buf { - if self.buffer.len() == self.max_size { - // Remove oldest byte to make space - self.buffer.pop_front(); - } - self.buffer.push_back(byte); - } - - Ok(buf.len()) - } - - /// # Errors - /// - /// Won't return any error. - #[allow(clippy::unnecessary_wraps)] - #[allow(clippy::unused_self)] - pub fn flush(&mut self) -> io::Result<()> { - Ok(()) - } - - #[must_use] - pub fn as_vec(&self) -> Vec { - self.buffer.iter().copied().collect() - } -} diff --git a/tests/common/mod.rs b/tests/common/mod.rs index c6777573d..6cb94892b 100644 --- a/tests/common/mod.rs +++ b/tests/common/mod.rs @@ -1,4 +1,3 @@ pub mod clock; pub mod http; -pub mod logging; pub mod udp; diff --git a/tests/integration.rs b/tests/integration.rs index 8e3d46826..18414db89 100644 --- a/tests/integration.rs +++ b/tests/integration.rs @@ -6,7 +6,6 @@ use torrust_tracker_clock::clock; mod common; -mod servers; /// This code needs to be copied into each crate. /// Working version, for production. diff --git a/tests/servers/health_check_api/client.rs b/tests/servers/health_check_api/client.rs deleted file mode 100644 index 3d8bdc7d6..000000000 --- a/tests/servers/health_check_api/client.rs +++ /dev/null @@ -1,5 +0,0 @@ -use reqwest::Response; - -pub async fn get(path: &str) -> Response { - reqwest::Client::builder().build().unwrap().get(path).send().await.unwrap() -} diff --git a/tests/servers/health_check_api/contract.rs b/tests/servers/health_check_api/contract.rs deleted file mode 100644 index 473d52812..000000000 --- a/tests/servers/health_check_api/contract.rs +++ /dev/null @@ -1,311 +0,0 @@ -mod api { - use std::sync::Arc; - - use torrust_axum_health_check_api_server::environment::Started; - use torrust_axum_health_check_api_server::resources::{Report, Status}; - use torrust_tracker_test_helpers::configuration; - - use crate::common::logging; - use crate::servers::health_check_api::client::get; - - #[tokio::test] - pub(crate) async fn it_should_return_good_health_for_api_service() { - logging::setup(); - - let configuration = Arc::new(configuration::ephemeral()); - - let service = torrust_axum_tracker_api_server::environment::Started::new(&configuration).await; - - let registar = service.registar.clone(); - - { - let config = configuration.health_check_api.clone(); - let env = Started::new(&config.into(), registar).await; - - let response = get(&format!("http://{}/health_check", env.state.binding)).await; // DevSkim: ignore DS137138 - - assert_eq!(response.status(), 200); - assert_eq!(response.headers().get("content-type").unwrap(), "application/json"); - - let report: Report = response - .json() - .await - .expect("it should be able to get the report from the json"); - - assert_eq!(report.status, Status::Ok); - assert_eq!(report.message, String::new()); - - let details = report.details.first().expect("it should have some details"); - - assert_eq!(details.binding, service.bind_address()); - - assert_eq!(details.result, Ok("200 OK".to_string())); - - assert_eq!( - details.info, - format!( - "checking api health check at: http://{}/api/health_check", // DevSkim: ignore DS137138 - service.bind_address() - ) - ); - - env.stop().await.expect("it should stop the service"); - } - - service.stop().await; - } - - #[tokio::test] - pub(crate) async fn it_should_return_error_when_api_service_was_stopped_after_registration() { - logging::setup(); - - let configuration = Arc::new(configuration::ephemeral()); - - let service = torrust_axum_tracker_api_server::environment::Started::new(&configuration).await; - - let binding = service.bind_address(); - - let registar = service.registar.clone(); - - service.server.stop().await.expect("it should stop udp server"); - - { - let config = configuration.health_check_api.clone(); - let env = Started::new(&config.into(), registar).await; - - let response = get(&format!("http://{}/health_check", env.state.binding)).await; // DevSkim: ignore DS137138 - - assert_eq!(response.status(), 200); - assert_eq!(response.headers().get("content-type").unwrap(), "application/json"); - - let report: Report = response - .json() - .await - .expect("it should be able to get the report from the json"); - - assert_eq!(report.status, Status::Error); - assert_eq!(report.message, "health check failed".to_string()); - - let details = report.details.first().expect("it should have some details"); - - assert_eq!(details.binding, binding); - assert!( - details - .result - .as_ref() - .is_err_and(|e| e.contains("error sending request for url")), - "Expected to contain, \"error sending request for url\", but have message \"{:?}\".", - details.result - ); - assert_eq!( - details.info, - format!("checking api health check at: http://{binding}/api/health_check") // DevSkim: ignore DS137138 - ); - - env.stop().await.expect("it should stop the service"); - } - } -} - -mod http { - use std::sync::Arc; - - use torrust_axum_health_check_api_server::environment::Started; - use torrust_axum_health_check_api_server::resources::{Report, Status}; - use torrust_tracker_test_helpers::configuration; - - use crate::common::logging; - use crate::servers::health_check_api::client::get; - - #[tokio::test] - pub(crate) async fn it_should_return_good_health_for_http_service() { - logging::setup(); - - let configuration = Arc::new(configuration::ephemeral()); - - let service = torrust_axum_http_tracker_server::environment::Started::new(&configuration).await; - - let registar = service.registar.clone(); - - { - let config = configuration.health_check_api.clone(); - let env = Started::new(&config.into(), registar).await; - - let response = get(&format!("http://{}/health_check", env.state.binding)).await; // DevSkim: ignore DS137138 - - assert_eq!(response.status(), 200); - assert_eq!(response.headers().get("content-type").unwrap(), "application/json"); - - let report: Report = response - .json() - .await - .expect("it should be able to get the report from the json"); - - assert_eq!(report.status, Status::Ok); - assert_eq!(report.message, String::new()); - - let details = report.details.first().expect("it should have some details"); - - assert_eq!(details.binding, *service.bind_address()); - assert_eq!(details.result, Ok("200 OK".to_string())); - - assert_eq!( - details.info, - format!( - "checking http tracker health check at: http://{}/health_check", // DevSkim: ignore DS137138 - service.bind_address() - ) - ); - - env.stop().await.expect("it should stop the service"); - } - - service.stop().await; - } - - #[tokio::test] - pub(crate) async fn it_should_return_error_when_http_service_was_stopped_after_registration() { - logging::setup(); - - let configuration = Arc::new(configuration::ephemeral()); - - let service = torrust_axum_http_tracker_server::environment::Started::new(&configuration).await; - - let binding = *service.bind_address(); - - let registar = service.registar.clone(); - - service.server.stop().await.expect("it should stop udp server"); - - { - let config = configuration.health_check_api.clone(); - let env = Started::new(&config.into(), registar).await; - - let response = get(&format!("http://{}/health_check", env.state.binding)).await; // DevSkim: ignore DS137138 - - assert_eq!(response.status(), 200); - assert_eq!(response.headers().get("content-type").unwrap(), "application/json"); - - let report: Report = response - .json() - .await - .expect("it should be able to get the report from the json"); - - assert_eq!(report.status, Status::Error); - assert_eq!(report.message, "health check failed".to_string()); - - let details = report.details.first().expect("it should have some details"); - - assert_eq!(details.binding, binding); - assert!( - details - .result - .as_ref() - .is_err_and(|e| e.contains("error sending request for url")), - "Expected to contain, \"error sending request for url\", but have message \"{:?}\".", - details.result - ); - assert_eq!( - details.info, - format!("checking http tracker health check at: http://{binding}/health_check") // DevSkim: ignore DS137138 - ); - - env.stop().await.expect("it should stop the service"); - } - } -} - -mod udp { - use std::sync::Arc; - - use torrust_axum_health_check_api_server::environment::Started; - use torrust_axum_health_check_api_server::resources::{Report, Status}; - use torrust_tracker_test_helpers::configuration; - - use crate::common::logging; - use crate::servers::health_check_api::client::get; - - #[tokio::test] - pub(crate) async fn it_should_return_good_health_for_udp_service() { - logging::setup(); - - let configuration = Arc::new(configuration::ephemeral()); - - let service = torrust_udp_tracker_server::environment::Started::new(&configuration).await; - - let registar = service.registar.clone(); - - { - let config = configuration.health_check_api.clone(); - let env = Started::new(&config.into(), registar).await; - - let response = get(&format!("http://{}/health_check", env.state.binding)).await; // DevSkim: ignore DS137138 - - assert_eq!(response.status(), 200); - assert_eq!(response.headers().get("content-type").unwrap(), "application/json"); - - let report: Report = response - .json() - .await - .expect("it should be able to get the report from the json"); - - assert_eq!(report.status, Status::Ok); - assert_eq!(report.message, String::new()); - - let details = report.details.first().expect("it should have some details"); - - assert_eq!(details.binding, service.bind_address()); - assert_eq!(details.result, Ok("Connected".to_string())); - - assert_eq!( - details.info, - format!("checking the udp tracker health check at: {}", service.bind_address()) - ); - - env.stop().await.expect("it should stop the service"); - } - - service.stop().await; - } - - #[tokio::test] - pub(crate) async fn it_should_return_error_when_udp_service_was_stopped_after_registration() { - logging::setup(); - - let configuration = Arc::new(configuration::ephemeral()); - - let service = torrust_udp_tracker_server::environment::Started::new(&configuration).await; - - let binding = service.bind_address(); - - let registar = service.registar.clone(); - - service.server.stop().await.expect("it should stop udp server"); - - { - let config = configuration.health_check_api.clone(); - let env = Started::new(&config.into(), registar).await; - - let response = get(&format!("http://{}/health_check", env.state.binding)).await; // DevSkim: ignore DS137138 - - assert_eq!(response.status(), 200); - assert_eq!(response.headers().get("content-type").unwrap(), "application/json"); - - let report: Report = response - .json() - .await - .expect("it should be able to get the report from the json"); - - assert_eq!(report.status, Status::Error); - assert_eq!(report.message, "health check failed".to_string()); - - let details = report.details.first().expect("it should have some details"); - - assert_eq!(details.binding, binding); - assert_eq!(details.result, Err("Timed Out".to_string())); - assert_eq!(details.info, format!("checking the udp tracker health check at: {binding}")); - - env.stop().await.expect("it should stop the service"); - } - } -} diff --git a/tests/servers/health_check_api/mod.rs b/tests/servers/health_check_api/mod.rs deleted file mode 100644 index 2676be6f9..000000000 --- a/tests/servers/health_check_api/mod.rs +++ /dev/null @@ -1,2 +0,0 @@ -pub mod client; -pub mod contract; diff --git a/tests/servers/mod.rs b/tests/servers/mod.rs deleted file mode 100644 index 5aa096824..000000000 --- a/tests/servers/mod.rs +++ /dev/null @@ -1 +0,0 @@ -mod health_check_api; From 60c582646f879427c5ae2d0b2a955eec754f9b44 Mon Sep 17 00:00:00 2001 From: Jose Celano Date: Tue, 25 Feb 2025 12:18:34 +0000 Subject: [PATCH 330/802] refactor: remove duplicate test --- tests/common/clock.rs | 16 ---------------- tests/common/mod.rs | 1 - 2 files changed, 17 deletions(-) delete mode 100644 tests/common/clock.rs diff --git a/tests/common/clock.rs b/tests/common/clock.rs deleted file mode 100644 index 5d94bb83d..000000000 --- a/tests/common/clock.rs +++ /dev/null @@ -1,16 +0,0 @@ -use std::time::Duration; - -use torrust_tracker_clock::clock::Time; - -use crate::CurrentClock; - -#[test] -fn it_should_use_stopped_time_for_testing() { - assert_eq!(CurrentClock::dbg_clock_type(), "Stopped".to_owned()); - - let time = CurrentClock::now(); - std::thread::sleep(Duration::from_millis(50)); - let time_2 = CurrentClock::now(); - - assert_eq!(time, time_2); -} diff --git a/tests/common/mod.rs b/tests/common/mod.rs index 6cb94892b..b08eaa622 100644 --- a/tests/common/mod.rs +++ b/tests/common/mod.rs @@ -1,3 +1,2 @@ -pub mod clock; pub mod http; pub mod udp; From c34c8bc077df6fa0d8907ff206a6661f6b51bb58 Mon Sep 17 00:00:00 2001 From: Jose Celano Date: Tue, 25 Feb 2025 12:19:23 +0000 Subject: [PATCH 331/802] refactor: [#1316] remove unsued code Integration tests have been moved to their respective server packages. --- tests/common/http.rs | 54 -------------------------------------------- tests/common/mod.rs | 2 -- tests/common/udp.rs | 41 --------------------------------- tests/integration.rs | 19 ---------------- 4 files changed, 116 deletions(-) delete mode 100644 tests/common/http.rs delete mode 100644 tests/common/mod.rs delete mode 100644 tests/common/udp.rs delete mode 100644 tests/integration.rs diff --git a/tests/common/http.rs b/tests/common/http.rs deleted file mode 100644 index d682027fd..000000000 --- a/tests/common/http.rs +++ /dev/null @@ -1,54 +0,0 @@ -pub type ReqwestQuery = Vec; -pub type ReqwestQueryParam = (String, String); - -/// URL Query component -#[derive(Default, Debug)] -pub struct Query { - params: Vec, -} - -impl Query { - pub fn empty() -> Self { - Self { params: vec![] } - } - - pub fn params(params: Vec) -> Self { - Self { params } - } - - pub fn add_param(&mut self, param: QueryParam) { - self.params.push(param); - } -} - -impl From for ReqwestQuery { - fn from(url_search_params: Query) -> Self { - url_search_params - .params - .iter() - .map(|param| ReqwestQueryParam::from((*param).clone())) - .collect() - } -} - -/// URL query param -#[derive(Clone, Debug)] -pub struct QueryParam { - name: String, - value: String, -} - -impl QueryParam { - pub fn new(name: &str, value: &str) -> Self { - Self { - name: name.to_string(), - value: value.to_string(), - } - } -} - -impl From for ReqwestQueryParam { - fn from(param: QueryParam) -> Self { - (param.name, param.value) - } -} diff --git a/tests/common/mod.rs b/tests/common/mod.rs deleted file mode 100644 index b08eaa622..000000000 --- a/tests/common/mod.rs +++ /dev/null @@ -1,2 +0,0 @@ -pub mod http; -pub mod udp; diff --git a/tests/common/udp.rs b/tests/common/udp.rs deleted file mode 100644 index 3d84e2b97..000000000 --- a/tests/common/udp.rs +++ /dev/null @@ -1,41 +0,0 @@ -use std::net::SocketAddr; -use std::sync::Arc; - -use tokio::net::UdpSocket; - -/// A generic UDP client -pub struct Client { - pub socket: Arc, -} - -impl Client { - #[allow(dead_code)] - pub async fn connected(remote_socket_addr: &SocketAddr, local_socket_addr: &SocketAddr) -> Client { - let client = Client::bind(local_socket_addr).await; - client.connect(remote_socket_addr).await; - client - } - - pub async fn bind(local_socket_addr: &SocketAddr) -> Self { - let socket = UdpSocket::bind(local_socket_addr).await.unwrap(); - Self { - socket: Arc::new(socket), - } - } - - pub async fn connect(&self, remote_address: &SocketAddr) { - self.socket.connect(remote_address).await.unwrap(); - } - - #[allow(dead_code)] - pub async fn send(&self, bytes: &[u8]) -> usize { - self.socket.writable().await.unwrap(); - self.socket.send(bytes).await.unwrap() - } - - #[allow(dead_code)] - pub async fn receive(&self, bytes: &mut [u8]) -> usize { - self.socket.readable().await.unwrap(); - self.socket.recv(bytes).await.unwrap() - } -} diff --git a/tests/integration.rs b/tests/integration.rs deleted file mode 100644 index 18414db89..000000000 --- a/tests/integration.rs +++ /dev/null @@ -1,19 +0,0 @@ -//! Integration tests. -//! -//! ```text -//! cargo test --test integration -//! ``` - -use torrust_tracker_clock::clock; -mod common; - -/// This code needs to be copied into each crate. -/// Working version, for production. -#[cfg(not(test))] -#[allow(dead_code)] -pub(crate) type CurrentClock = clock::Working; - -/// Stopped version, for testing. -#[cfg(test)] -#[allow(dead_code)] -pub(crate) type CurrentClock = clock::Stopped; From f233dd2dac8ca00394b31ff8b7e34a42628fbe7a Mon Sep 17 00:00:00 2001 From: Jose Celano Date: Tue, 25 Feb 2025 12:23:50 +0000 Subject: [PATCH 332/802] refactor: remove unsued dependencies from the main Cargo.toml --- Cargo.lock | 9 --------- Cargo.toml | 22 ---------------------- 2 files changed, 31 deletions(-) diff --git a/Cargo.lock b/Cargo.lock index 512383460..966b987a0 100644 --- a/Cargo.lock +++ b/Cargo.lock @@ -4477,21 +4477,13 @@ dependencies = [ "bittorrent-udp-tracker-core", "chrono", "clap", - "crossbeam-skiplist", - "dashmap", - "figment", "futures", "local-ip-address", "mockall", - "parking_lot", - "r2d2", - "r2d2_mysql", - "r2d2_sqlite", "rand 0.9.0", "regex", "reqwest", "serde", - "serde_bytes", "serde_json", "tokio", "torrust-axum-health-check-api-server", @@ -4504,7 +4496,6 @@ dependencies = [ "torrust-tracker-clock", "torrust-tracker-configuration", "torrust-tracker-test-helpers", - "torrust-tracker-torrent-repository", "torrust-udp-tracker-server", "tracing", "tracing-subscriber", diff --git a/Cargo.toml b/Cargo.toml index 40ddeca09..346817e27 100644 --- a/Cargo.toml +++ b/Cargo.toml @@ -40,19 +40,11 @@ bittorrent-tracker-core = { version = "3.0.0-develop", path = "packages/tracker- bittorrent-udp-tracker-core = { version = "3.0.0-develop", path = "packages/udp-tracker-core" } chrono = { version = "0", default-features = false, features = ["clock"] } clap = { version = "4", features = ["derive", "env"] } -crossbeam-skiplist = "0" -dashmap = "6" -figment = "0" futures = "0" -parking_lot = "0" -r2d2 = "0" -r2d2_mysql = "25" -r2d2_sqlite = { version = "0", features = ["bundled"] } rand = "0" regex = "1" reqwest = { version = "0", features = ["json"] } serde = { version = "1", features = ["derive"] } -serde_bytes = "0" serde_json = { version = "1", features = ["preserve_order"] } tokio = { version = "1", features = ["macros", "net", "rt-multi-thread", "signal", "sync"] } torrust-axum-health-check-api-server = { version = "3.0.0-develop", path = "packages/axum-health-check-api-server" } @@ -63,24 +55,10 @@ torrust-server-lib = { version = "3.0.0-develop", path = "packages/server-lib" } torrust-tracker-api-core = { version = "3.0.0-develop", path = "packages/tracker-api-core" } torrust-tracker-clock = { version = "3.0.0-develop", path = "packages/clock" } torrust-tracker-configuration = { version = "3.0.0-develop", path = "packages/configuration" } -torrust-tracker-torrent-repository = { version = "3.0.0-develop", path = "packages/torrent-repository" } torrust-udp-tracker-server = { version = "3.0.0-develop", path = "packages/udp-tracker-server" } tracing = "0" tracing-subscriber = { version = "0", features = ["json"] } -[package.metadata.cargo-machete] -ignored = [ - "crossbeam-skiplist", - "dashmap", - "figment", - "parking_lot", - "r2d2", - "r2d2_mysql", - "r2d2_sqlite", - "serde_bytes", - "torrust-tracker-torrent-repository", -] - [dev-dependencies] local-ip-address = "0" mockall = "0" From 3f5080338b245dd48d306e3d793c8e2979cfd931 Mon Sep 17 00:00:00 2001 From: Jose Celano Date: Tue, 25 Feb 2025 13:11:37 +0000 Subject: [PATCH 333/802] tests: scaffolding for integration tests --- tests/integration.rs | 22 +++++++++++++++++++++ tests/servers/health_check_api.rs | 32 +++++++++++++++++++++++++++++++ tests/servers/mod.rs | 1 + 3 files changed, 55 insertions(+) create mode 100644 tests/integration.rs create mode 100644 tests/servers/health_check_api.rs create mode 100644 tests/servers/mod.rs diff --git a/tests/integration.rs b/tests/integration.rs new file mode 100644 index 000000000..6a139e047 --- /dev/null +++ b/tests/integration.rs @@ -0,0 +1,22 @@ +//! Scaffolding for integration tests. +//! +//! ```text +//! cargo test --test integration +//! ``` +mod servers; + +// todo: there is only one test example that was copied from other package. +// We have to add tests for the whole app. + +use torrust_tracker_clock::clock; + +/// This code needs to be copied into each crate. +/// Working version, for production. +#[cfg(not(test))] +#[allow(dead_code)] +pub(crate) type CurrentClock = clock::Working; + +/// Stopped version, for testing. +#[cfg(test)] +#[allow(dead_code)] +pub(crate) type CurrentClock = clock::Stopped; diff --git a/tests/servers/health_check_api.rs b/tests/servers/health_check_api.rs new file mode 100644 index 000000000..0e66014da --- /dev/null +++ b/tests/servers/health_check_api.rs @@ -0,0 +1,32 @@ +use reqwest::Response; +use torrust_axum_health_check_api_server::environment::Started; +use torrust_axum_health_check_api_server::resources::{Report, Status}; +use torrust_server_lib::registar::Registar; +use torrust_tracker_test_helpers::{configuration, logging}; + +pub async fn get(path: &str) -> Response { + reqwest::Client::builder().build().unwrap().get(path).send().await.unwrap() +} + +#[tokio::test] +async fn the_health_check_endpoint_should_return_status_ok_when_there_is_not_any_service_registered() { + logging::setup(); + + let configuration = configuration::ephemeral_with_no_services(); + + let env = Started::new(&configuration.health_check_api.into(), Registar::default()).await; + + let response = get(&format!("http://{}/health_check", env.state.binding)).await; // DevSkim: ignore DS137138 + + assert_eq!(response.status(), 200); + assert_eq!(response.headers().get("content-type").unwrap(), "application/json"); + + let report = response + .json::() + .await + .expect("it should be able to get the report as json"); + + assert_eq!(report.status, Status::None); + + env.stop().await.expect("it should stop the service"); +} diff --git a/tests/servers/mod.rs b/tests/servers/mod.rs new file mode 100644 index 000000000..7aeefeec4 --- /dev/null +++ b/tests/servers/mod.rs @@ -0,0 +1 @@ +pub mod health_check_api; From 0c2be36c4b6184b87aff63d2a0abd9a5ce043301 Mon Sep 17 00:00:00 2001 From: Jose Celano Date: Tue, 25 Feb 2025 16:31:57 +0000 Subject: [PATCH 334/802] refactor: [#1318] return error instead of response from http announce handler --- Cargo.lock | 1 + .../src/v1/handlers/announce.rs | 52 ++++++++++++--- .../tests/server/asserts.rs | 10 +++ .../tests/server/v1/contract.rs | 8 ++- .../src/v1/services/peer_ip_resolver.rs | 2 +- packages/http-tracker-core/Cargo.toml | 1 + .../src/services/announce.rs | 65 +++++++++++++++++-- .../src/authentication/key/mod.rs | 2 +- packages/tracker-core/src/error.rs | 35 ++++++++++ 9 files changed, 155 insertions(+), 21 deletions(-) diff --git a/Cargo.lock b/Cargo.lock index 966b987a0..83afae727 100644 --- a/Cargo.lock +++ b/Cargo.lock @@ -563,6 +563,7 @@ dependencies = [ "bittorrent-tracker-core", "futures", "mockall", + "thiserror 2.0.11", "tokio", "torrust-tracker-configuration", "torrust-tracker-primitives", diff --git a/packages/axum-http-tracker-server/src/v1/handlers/announce.rs b/packages/axum-http-tracker-server/src/v1/handlers/announce.rs index 98b2d374c..7855c8172 100644 --- a/packages/axum-http-tracker-server/src/v1/handlers/announce.rs +++ b/packages/axum-http-tracker-server/src/v1/handlers/announce.rs @@ -7,6 +7,7 @@ use std::sync::Arc; use aquatic_udp_protocol::AnnounceEvent; use axum::extract::State; use axum::response::{IntoResponse, Response}; +use bittorrent_http_tracker_core::services::announce::HttpAnnounceError; use bittorrent_http_tracker_protocol::v1::requests::announce::{Announce, Compact, Event}; use bittorrent_http_tracker_protocol::v1::responses::{self}; use bittorrent_http_tracker_protocol::v1::services::peer_ip_resolver::ClientIpSources; @@ -111,7 +112,12 @@ async fn handle( .await { Ok(announce_data) => announce_data, - Err(error) => return (StatusCode::OK, error.write()).into_response(), + Err(error) => { + let error_response = responses::error::Error { + failure_reason: error.to_string(), + }; + return (StatusCode::OK, error_response.write()).into_response(); + } }; build_response(announce_request, announce_data) } @@ -126,7 +132,7 @@ async fn handle_announce( announce_request: &Announce, client_ip_sources: &ClientIpSources, maybe_key: Option, -) -> Result { +) -> Result { bittorrent_http_tracker_core::services::announce::handle_announce( &core_config.clone(), &announce_handler.clone(), @@ -290,6 +296,7 @@ mod tests { use std::str::FromStr; + use bittorrent_http_tracker_protocol::v1::responses; use bittorrent_tracker_core::authentication; use super::{initialize_private_tracker, sample_announce_request, sample_client_ip_sources}; @@ -315,7 +322,14 @@ mod tests { .await .unwrap_err(); - assert_error_response(&response, "Tracker authentication error: Missing authentication key"); + let error_response = responses::error::Error { + failure_reason: response.to_string(), + }; + + assert_error_response( + &error_response, + "Tracker core error: Tracker core authentication error: Missing authentication key", + ); } #[tokio::test] @@ -339,15 +353,21 @@ mod tests { .await .unwrap_err(); + let error_response = responses::error::Error { + failure_reason: response.to_string(), + }; + assert_error_response( - &response, - "Tracker authentication error: Failed to read key: YZSl4lMZupRuOpSRC3krIKR5BPB14nrJ", + &error_response, + "Tracker core error: Tracker core authentication error: Failed to read key: YZSl4lMZupRuOpSRC3krIKR5BPB14nrJ", ); } } mod with_tracker_in_listed_mode { + use bittorrent_http_tracker_protocol::v1::responses; + use super::{initialize_listed_tracker, sample_announce_request, sample_client_ip_sources}; use crate::v1::handlers::announce::handle_announce; use crate::v1::handlers::announce::tests::assert_error_response; @@ -371,10 +391,14 @@ mod tests { .await .unwrap_err(); + let error_response = responses::error::Error { + failure_reason: response.to_string(), + }; + assert_error_response( - &response, + &error_response, &format!( - "Tracker whitelist error: The torrent: {}, is not whitelisted", + "Tracker core error: Tracker core whitelist error: The torrent: {}, is not whitelisted", announce_request.info_hash ), ); @@ -383,6 +407,7 @@ mod tests { mod with_tracker_on_reverse_proxy { + use bittorrent_http_tracker_protocol::v1::responses; use bittorrent_http_tracker_protocol::v1::services::peer_ip_resolver::ClientIpSources; use super::{initialize_tracker_on_reverse_proxy, sample_announce_request}; @@ -411,8 +436,12 @@ mod tests { .await .unwrap_err(); + let error_response = responses::error::Error { + failure_reason: response.to_string(), + }; + assert_error_response( - &response, + &error_response, "Error resolving peer IP: missing or invalid the right most X-Forwarded-For IP", ); } @@ -420,6 +449,7 @@ mod tests { mod with_tracker_not_on_reverse_proxy { + use bittorrent_http_tracker_protocol::v1::responses; use bittorrent_http_tracker_protocol::v1::services::peer_ip_resolver::ClientIpSources; use super::{initialize_tracker_not_on_reverse_proxy, sample_announce_request}; @@ -448,8 +478,12 @@ mod tests { .await .unwrap_err(); + let error_response = responses::error::Error { + failure_reason: response.to_string(), + }; + assert_error_response( - &response, + &error_response, "Error resolving peer IP: cannot get the client IP from the connection info", ); } diff --git a/packages/axum-http-tracker-server/tests/server/asserts.rs b/packages/axum-http-tracker-server/tests/server/asserts.rs index 7173aa8a9..7ab8d93e5 100644 --- a/packages/axum-http-tracker-server/tests/server/asserts.rs +++ b/packages/axum-http-tracker-server/tests/server/asserts.rs @@ -147,3 +147,13 @@ pub async fn assert_authentication_error_response(response: Response) { Location::caller(), ); } + +pub async fn assert_tracker_core_authentication_error_response(response: Response) { + assert_eq!(response.status(), 200); + + assert_bencoded_error( + &response.text().await.unwrap(), + "Tracker core error: Tracker core authentication error", + Location::caller(), + ); +} diff --git a/packages/axum-http-tracker-server/tests/server/v1/contract.rs b/packages/axum-http-tracker-server/tests/server/v1/contract.rs index b62920234..992793022 100644 --- a/packages/axum-http-tracker-server/tests/server/v1/contract.rs +++ b/packages/axum-http-tracker-server/tests/server/v1/contract.rs @@ -1448,7 +1448,9 @@ mod configured_as_private { use torrust_axum_http_tracker_server::environment::Started; use torrust_tracker_test_helpers::{configuration, logging}; - use crate::server::asserts::{assert_authentication_error_response, assert_is_announce_response}; + use crate::server::asserts::{ + assert_authentication_error_response, assert_is_announce_response, assert_tracker_core_authentication_error_response, + }; use crate::server::client::Client; use crate::server::requests::announce::QueryBuilder; @@ -1487,7 +1489,7 @@ mod configured_as_private { .announce(&QueryBuilder::default().with_info_hash(&info_hash).query()) .await; - assert_authentication_error_response(response).await; + assert_tracker_core_authentication_error_response(response).await; env.stop().await; } @@ -1522,7 +1524,7 @@ mod configured_as_private { .announce(&QueryBuilder::default().query()) .await; - assert_authentication_error_response(response).await; + assert_tracker_core_authentication_error_response(response).await; env.stop().await; } diff --git a/packages/http-protocol/src/v1/services/peer_ip_resolver.rs b/packages/http-protocol/src/v1/services/peer_ip_resolver.rs index 8e99b56d1..bea93f1ba 100644 --- a/packages/http-protocol/src/v1/services/peer_ip_resolver.rs +++ b/packages/http-protocol/src/v1/services/peer_ip_resolver.rs @@ -36,7 +36,7 @@ pub struct ClientIpSources { } /// The error that can occur when resolving the peer IP. -#[derive(Error, Debug)] +#[derive(Error, Debug, Clone)] pub enum PeerIpResolutionError { /// The peer IP cannot be obtained because the tracker is configured as a /// reverse proxy but the `X-Forwarded-For` HTTP header is missing or diff --git a/packages/http-tracker-core/Cargo.toml b/packages/http-tracker-core/Cargo.toml index bc6a3d1b3..1e0bcff28 100644 --- a/packages/http-tracker-core/Cargo.toml +++ b/packages/http-tracker-core/Cargo.toml @@ -19,6 +19,7 @@ bittorrent-http-tracker-protocol = { version = "3.0.0-develop", path = "../http- bittorrent-primitives = "0.1.0" bittorrent-tracker-core = { version = "3.0.0-develop", path = "../tracker-core" } futures = "0" +thiserror = "2" tokio = { version = "1", features = ["macros", "net", "rt-multi-thread", "signal", "sync"] } torrust-tracker-configuration = { version = "3.0.0-develop", path = "../configuration" } torrust-tracker-primitives = { version = "3.0.0-develop", path = "../primitives" } diff --git a/packages/http-tracker-core/src/services/announce.rs b/packages/http-tracker-core/src/services/announce.rs index ce34ee31c..2f530c654 100644 --- a/packages/http-tracker-core/src/services/announce.rs +++ b/packages/http-tracker-core/src/services/announce.rs @@ -12,17 +12,67 @@ use std::panic::Location; use std::sync::Arc; use bittorrent_http_tracker_protocol::v1::requests::announce::{peer_from_request, Announce}; -use bittorrent_http_tracker_protocol::v1::responses; -use bittorrent_http_tracker_protocol::v1::services::peer_ip_resolver::{self, ClientIpSources}; +use bittorrent_http_tracker_protocol::v1::services::peer_ip_resolver::{self, ClientIpSources, PeerIpResolutionError}; use bittorrent_tracker_core::announce_handler::{AnnounceHandler, PeersWanted}; use bittorrent_tracker_core::authentication::service::AuthenticationService; use bittorrent_tracker_core::authentication::{self, Key}; +use bittorrent_tracker_core::error::{AnnounceError, TrackerCoreError, WhitelistError}; use bittorrent_tracker_core::whitelist; use torrust_tracker_configuration::Core; use torrust_tracker_primitives::core::AnnounceData; use crate::statistics; +/// Errors related to announce requests. +#[derive(thiserror::Error, Debug, Clone)] +pub enum HttpAnnounceError { + #[error("Error resolving peer IP: {source}")] + PeerIpResolutionError { source: PeerIpResolutionError }, + + #[error("Tracker core error: {source}")] + TrackerCoreError { source: TrackerCoreError }, +} + +impl From for HttpAnnounceError { + fn from(peer_ip_resolution_error: PeerIpResolutionError) -> Self { + Self::PeerIpResolutionError { + source: peer_ip_resolution_error, + } + } +} + +impl From for HttpAnnounceError { + fn from(tracker_core_error: TrackerCoreError) -> Self { + Self::TrackerCoreError { + source: tracker_core_error, + } + } +} + +impl From for HttpAnnounceError { + fn from(announce_error: AnnounceError) -> Self { + Self::TrackerCoreError { + source: announce_error.into(), + } + } +} + +impl From for HttpAnnounceError { + fn from(whitelist_error: WhitelistError) -> Self { + Self::TrackerCoreError { + source: whitelist_error.into(), + } + } +} + +impl From for HttpAnnounceError { + fn from(whitelist_error: authentication::key::Error) -> Self { + Self::TrackerCoreError { + source: whitelist_error.into(), + } + } +} + /// The HTTP tracker `announce` service. /// /// The service sends an statistics event that increments: @@ -50,7 +100,7 @@ pub async fn handle_announce( announce_request: &Announce, client_ip_sources: &ClientIpSources, maybe_key: Option, -) -> Result { +) -> Result { // Authentication if core_config.private { match maybe_key { @@ -59,9 +109,10 @@ pub async fn handle_announce( Err(error) => return Err(error.into()), }, None => { - return Err(responses::error::Error::from(authentication::key::Error::MissingAuthKey { + return Err(authentication::key::Error::MissingAuthKey { location: Location::caller(), - })) + } + .into()) } } } @@ -69,12 +120,12 @@ pub async fn handle_announce( // Authorization match whitelist_authorization.authorize(&announce_request.info_hash).await { Ok(()) => (), - Err(error) => return Err(responses::error::Error::from(error)), + Err(error) => return Err(error.into()), } let peer_ip = match peer_ip_resolver::invoke(core_config.net.on_reverse_proxy, client_ip_sources) { Ok(peer_ip) => peer_ip, - Err(error) => return Err(responses::error::Error::from(error)), + Err(error) => return Err(error.into()), }; let mut peer = peer_from_request(announce_request, &peer_ip); diff --git a/packages/tracker-core/src/authentication/key/mod.rs b/packages/tracker-core/src/authentication/key/mod.rs index efc734356..44bbd0688 100644 --- a/packages/tracker-core/src/authentication/key/mod.rs +++ b/packages/tracker-core/src/authentication/key/mod.rs @@ -166,7 +166,7 @@ pub fn verify_key_expiration(auth_key: &PeerKey) -> Result<(), Error> { /// Verification error. Error returned when an [`PeerKey`] cannot be /// verified with the [`crate::authentication::key::verify_key_expiration`] function. -#[derive(Debug, Error)] +#[derive(Debug, Error, Clone)] #[allow(dead_code)] pub enum Error { /// Wraps an underlying error encountered during key verification. diff --git a/packages/tracker-core/src/error.rs b/packages/tracker-core/src/error.rs index fed076ffa..f2d763233 100644 --- a/packages/tracker-core/src/error.rs +++ b/packages/tracker-core/src/error.rs @@ -14,6 +14,41 @@ use torrust_tracker_located_error::LocatedError; use super::authentication::key::ParseKeyError; use super::databases; +use crate::authentication; + +/// Wrapper for all errors returned by the tracker core. +#[derive(thiserror::Error, Debug, Clone)] +pub enum TrackerCoreError { + /// Error returned when there was an error with the tracker core announce handler. + #[error("Tracker core announce error: {source}")] + AnnounceError { source: AnnounceError }, + + /// Error returned when there was an error with the tracker core whitelist. + #[error("Tracker core whitelist error: {source}")] + WhitelistError { source: WhitelistError }, + + /// Error returned when there was an error with the authentication in the tracker core. + #[error("Tracker core authentication error: {source}")] + AuthenticationError { source: authentication::key::Error }, +} + +impl From for TrackerCoreError { + fn from(announce_error: AnnounceError) -> Self { + Self::AnnounceError { source: announce_error } + } +} + +impl From for TrackerCoreError { + fn from(whitelist_error: WhitelistError) -> Self { + Self::WhitelistError { source: whitelist_error } + } +} + +impl From for TrackerCoreError { + fn from(whitelist_error: authentication::key::Error) -> Self { + Self::AuthenticationError { source: whitelist_error } + } +} /// Errors related to announce requests. #[derive(thiserror::Error, Debug, Clone)] From df4f7827e5b99ad3ad1e3fe6498850dce4c7731a Mon Sep 17 00:00:00 2001 From: Jose Celano Date: Tue, 25 Feb 2025 17:03:05 +0000 Subject: [PATCH 335/802] refactor: [#1318] return error instead of response from http scrape handler --- .../src/v1/handlers/scrape.rs | 24 ++++++-- .../http-tracker-core/src/services/scrape.rs | 60 +++++++++++++++++-- packages/tracker-core/src/error.rs | 10 ++++ 3 files changed, 85 insertions(+), 9 deletions(-) diff --git a/packages/axum-http-tracker-server/src/v1/handlers/scrape.rs b/packages/axum-http-tracker-server/src/v1/handlers/scrape.rs index 59549128a..00046a618 100644 --- a/packages/axum-http-tracker-server/src/v1/handlers/scrape.rs +++ b/packages/axum-http-tracker-server/src/v1/handlers/scrape.rs @@ -6,6 +6,7 @@ use std::sync::Arc; use axum::extract::State; use axum::response::{IntoResponse, Response}; +use bittorrent_http_tracker_core::services::scrape::HttpScrapeError; use bittorrent_http_tracker_protocol::v1::requests::scrape::Scrape; use bittorrent_http_tracker_protocol::v1::responses; use bittorrent_http_tracker_protocol::v1::services::peer_ip_resolver::ClientIpSources; @@ -101,7 +102,12 @@ async fn handle( .await { Ok(scrape_data) => scrape_data, - Err(error) => return (StatusCode::OK, error.write()).into_response(), + Err(error) => { + let error_response = responses::error::Error { + failure_reason: error.to_string(), + }; + return (StatusCode::OK, error_response.write()).into_response(); + } }; build_response(scrape_data) @@ -116,7 +122,7 @@ async fn handle_scrape( scrape_request: &Scrape, client_ip_sources: &ClientIpSources, maybe_key: Option, -) -> Result { +) -> Result { bittorrent_http_tracker_core::services::scrape::handle_scrape( core_config, scrape_handler, @@ -316,6 +322,7 @@ mod tests { mod with_tracker_on_reverse_proxy { + use bittorrent_http_tracker_protocol::v1::responses; use bittorrent_http_tracker_protocol::v1::services::peer_ip_resolver::ClientIpSources; use super::{initialize_tracker_on_reverse_proxy, sample_scrape_request}; @@ -343,8 +350,12 @@ mod tests { .await .unwrap_err(); + let error_response = responses::error::Error { + failure_reason: response.to_string(), + }; + assert_error_response( - &response, + &error_response, "Error resolving peer IP: missing or invalid the right most X-Forwarded-For IP", ); } @@ -352,6 +363,7 @@ mod tests { mod with_tracker_not_on_reverse_proxy { + use bittorrent_http_tracker_protocol::v1::responses; use bittorrent_http_tracker_protocol::v1::services::peer_ip_resolver::ClientIpSources; use super::{initialize_tracker_not_on_reverse_proxy, sample_scrape_request}; @@ -379,8 +391,12 @@ mod tests { .await .unwrap_err(); + let error_response = responses::error::Error { + failure_reason: response.to_string(), + }; + assert_error_response( - &response, + &error_response, "Error resolving peer IP: cannot get the client IP from the connection info", ); } diff --git a/packages/http-tracker-core/src/services/scrape.rs b/packages/http-tracker-core/src/services/scrape.rs index 686a849ea..394f285ee 100644 --- a/packages/http-tracker-core/src/services/scrape.rs +++ b/packages/http-tracker-core/src/services/scrape.rs @@ -11,17 +11,67 @@ use std::net::IpAddr; use std::sync::Arc; use bittorrent_http_tracker_protocol::v1::requests::scrape::Scrape; -use bittorrent_http_tracker_protocol::v1::responses; -use bittorrent_http_tracker_protocol::v1::services::peer_ip_resolver::{self, ClientIpSources}; +use bittorrent_http_tracker_protocol::v1::services::peer_ip_resolver::{self, ClientIpSources, PeerIpResolutionError}; use bittorrent_primitives::info_hash::InfoHash; use bittorrent_tracker_core::authentication::service::AuthenticationService; -use bittorrent_tracker_core::authentication::Key; +use bittorrent_tracker_core::authentication::{self, Key}; +use bittorrent_tracker_core::error::{ScrapeError, TrackerCoreError, WhitelistError}; use bittorrent_tracker_core::scrape_handler::ScrapeHandler; use torrust_tracker_configuration::Core; use torrust_tracker_primitives::core::ScrapeData; use crate::statistics; +/// Errors related to announce requests. +#[derive(thiserror::Error, Debug, Clone)] +pub enum HttpScrapeError { + #[error("Error resolving peer IP: {source}")] + PeerIpResolutionError { source: PeerIpResolutionError }, + + #[error("Tracker core error: {source}")] + TrackerCoreError { source: TrackerCoreError }, +} + +impl From for HttpScrapeError { + fn from(peer_ip_resolution_error: PeerIpResolutionError) -> Self { + Self::PeerIpResolutionError { + source: peer_ip_resolution_error, + } + } +} + +impl From for HttpScrapeError { + fn from(tracker_core_error: TrackerCoreError) -> Self { + Self::TrackerCoreError { + source: tracker_core_error, + } + } +} + +impl From for HttpScrapeError { + fn from(announce_error: ScrapeError) -> Self { + Self::TrackerCoreError { + source: announce_error.into(), + } + } +} + +impl From for HttpScrapeError { + fn from(whitelist_error: WhitelistError) -> Self { + Self::TrackerCoreError { + source: whitelist_error.into(), + } + } +} + +impl From for HttpScrapeError { + fn from(whitelist_error: authentication::key::Error) -> Self { + Self::TrackerCoreError { + source: whitelist_error.into(), + } + } +} + /// The HTTP tracker `scrape` service. /// /// The service sends an statistics event that increments: @@ -47,7 +97,7 @@ pub async fn handle_scrape( scrape_request: &Scrape, client_ip_sources: &ClientIpSources, maybe_key: Option, -) -> Result { +) -> Result { // Authentication let return_fake_scrape_data = if core_config.private { match maybe_key { @@ -66,7 +116,7 @@ pub async fn handle_scrape( let peer_ip = match peer_ip_resolver::invoke(core_config.net.on_reverse_proxy, client_ip_sources) { Ok(peer_ip) => peer_ip, - Err(error) => return Err(responses::error::Error::from(error)), + Err(error) => return Err(error.into()), }; if return_fake_scrape_data { diff --git a/packages/tracker-core/src/error.rs b/packages/tracker-core/src/error.rs index f2d763233..0b94483eb 100644 --- a/packages/tracker-core/src/error.rs +++ b/packages/tracker-core/src/error.rs @@ -23,6 +23,10 @@ pub enum TrackerCoreError { #[error("Tracker core announce error: {source}")] AnnounceError { source: AnnounceError }, + /// Error returned when there was an error with the tracker core scrape handler. + #[error("Tracker core scrape error: {source}")] + ScrapeError { source: ScrapeError }, + /// Error returned when there was an error with the tracker core whitelist. #[error("Tracker core whitelist error: {source}")] WhitelistError { source: WhitelistError }, @@ -38,6 +42,12 @@ impl From for TrackerCoreError { } } +impl From for TrackerCoreError { + fn from(scrape_error: ScrapeError) -> Self { + Self::ScrapeError { source: scrape_error } + } +} + impl From for TrackerCoreError { fn from(whitelist_error: WhitelistError) -> Self { Self::WhitelistError { source: whitelist_error } From 3f55b9d61c44517145f86271116af872e0591dcf Mon Sep 17 00:00:00 2001 From: Jose Celano Date: Wed, 26 Feb 2025 12:11:07 +0000 Subject: [PATCH 336/802] refactor: [#1319] add UDP server events We will splot UDP stats events into: - UDP core events - UDP server event This is step 1 in the refactor: - Step 1. Create UDP server events. - Step 2. Remove UDP server events from core events. --- packages/udp-tracker-server/src/lib.rs | 1 + .../src/statistics/event/handler.rs | 184 ++++++++++++++++++ .../src/statistics/event/listener.rs | 11 ++ .../src/statistics/event/mod.rs | 43 ++++ .../src/statistics/event/sender.rs | 29 +++ .../src/statistics/keeper.rs | 81 ++++++++ .../src/statistics/metrics.rs | 60 ++++++ .../udp-tracker-server/src/statistics/mod.rs | 6 + .../src/statistics/repository.rs | 173 ++++++++++++++++ .../src/statistics/services.rs | 146 ++++++++++++++ .../src/statistics/setup.rs | 54 +++++ 11 files changed, 788 insertions(+) create mode 100644 packages/udp-tracker-server/src/statistics/event/handler.rs create mode 100644 packages/udp-tracker-server/src/statistics/event/listener.rs create mode 100644 packages/udp-tracker-server/src/statistics/event/mod.rs create mode 100644 packages/udp-tracker-server/src/statistics/event/sender.rs create mode 100644 packages/udp-tracker-server/src/statistics/keeper.rs create mode 100644 packages/udp-tracker-server/src/statistics/metrics.rs create mode 100644 packages/udp-tracker-server/src/statistics/mod.rs create mode 100644 packages/udp-tracker-server/src/statistics/repository.rs create mode 100644 packages/udp-tracker-server/src/statistics/services.rs create mode 100644 packages/udp-tracker-server/src/statistics/setup.rs diff --git a/packages/udp-tracker-server/src/lib.rs b/packages/udp-tracker-server/src/lib.rs index 8e3cf503b..e02011a8b 100644 --- a/packages/udp-tracker-server/src/lib.rs +++ b/packages/udp-tracker-server/src/lib.rs @@ -638,6 +638,7 @@ pub mod environment; pub mod error; pub mod handlers; pub mod server; +pub mod statistics; use std::net::SocketAddr; diff --git a/packages/udp-tracker-server/src/statistics/event/handler.rs b/packages/udp-tracker-server/src/statistics/event/handler.rs new file mode 100644 index 000000000..731f678a1 --- /dev/null +++ b/packages/udp-tracker-server/src/statistics/event/handler.rs @@ -0,0 +1,184 @@ +use crate::statistics::event::{Event, UdpResponseKind}; +use crate::statistics::repository::Repository; + +pub async fn handle_event(event: Event, stats_repository: &Repository) { + match event { + // UDP + Event::UdpRequestAborted => { + stats_repository.increase_udp_requests_aborted().await; + } + Event::UdpRequestBanned => { + stats_repository.increase_udp_requests_banned().await; + } + + // UDP4 + Event::Udp4Request { kind } => { + stats_repository.increase_udp4_requests().await; + match kind { + UdpResponseKind::Connect => { + stats_repository.increase_udp4_connections().await; + } + UdpResponseKind::Announce => { + stats_repository.increase_udp4_announces().await; + } + UdpResponseKind::Scrape => { + stats_repository.increase_udp4_scrapes().await; + } + UdpResponseKind::Error => {} + } + } + Event::Udp4Response { + kind, + req_processing_time, + } => { + stats_repository.increase_udp4_responses().await; + + match kind { + UdpResponseKind::Connect => { + stats_repository + .recalculate_udp_avg_connect_processing_time_ns(req_processing_time) + .await; + } + UdpResponseKind::Announce => { + stats_repository + .recalculate_udp_avg_announce_processing_time_ns(req_processing_time) + .await; + } + UdpResponseKind::Scrape => { + stats_repository + .recalculate_udp_avg_scrape_processing_time_ns(req_processing_time) + .await; + } + UdpResponseKind::Error => {} + } + } + Event::Udp4Error => { + stats_repository.increase_udp4_errors().await; + } + + // UDP6 + Event::Udp6Request => { + stats_repository.increase_udp6_requests().await; + } + Event::Udp6Response { + kind: _, + req_processing_time: _, + } => { + stats_repository.increase_udp6_responses().await; + } + Event::Udp6Error => { + stats_repository.increase_udp6_errors().await; + } + } + + tracing::debug!("stats: {:?}", stats_repository.get_stats().await); +} + +#[cfg(test)] +mod tests { + use crate::statistics::event::handler::handle_event; + use crate::statistics::event::{Event, UdpResponseKind}; + use crate::statistics::repository::Repository; + + #[tokio::test] + async fn should_increase_the_udp_abort_counter_when_it_receives_a_udp_abort_event() { + let stats_repository = Repository::new(); + + handle_event(Event::UdpRequestAborted, &stats_repository).await; + let stats = stats_repository.get_stats().await; + assert_eq!(stats.udp_requests_aborted, 1); + } + #[tokio::test] + async fn should_increase_the_udp_ban_counter_when_it_receives_a_udp_banned_event() { + let stats_repository = Repository::new(); + + handle_event(Event::UdpRequestBanned, &stats_repository).await; + let stats = stats_repository.get_stats().await; + assert_eq!(stats.udp_requests_banned, 1); + } + + #[tokio::test] + async fn should_increase_the_udp4_requests_counter_when_it_receives_a_udp4_request_event() { + let stats_repository = Repository::new(); + + handle_event( + Event::Udp4Request { + kind: UdpResponseKind::Connect, + }, + &stats_repository, + ) + .await; + + let stats = stats_repository.get_stats().await; + + assert_eq!(stats.udp4_requests, 1); + } + + #[tokio::test] + async fn should_increase_the_udp4_responses_counter_when_it_receives_a_udp4_response_event() { + let stats_repository = Repository::new(); + + handle_event( + Event::Udp4Response { + kind: crate::statistics::event::UdpResponseKind::Announce, + req_processing_time: std::time::Duration::from_secs(1), + }, + &stats_repository, + ) + .await; + + let stats = stats_repository.get_stats().await; + + assert_eq!(stats.udp4_responses, 1); + } + + #[tokio::test] + async fn should_increase_the_udp4_errors_counter_when_it_receives_a_udp4_error_event() { + let stats_repository = Repository::new(); + + handle_event(Event::Udp4Error, &stats_repository).await; + + let stats = stats_repository.get_stats().await; + + assert_eq!(stats.udp4_errors_handled, 1); + } + + #[tokio::test] + async fn should_increase_the_udp6_requests_counter_when_it_receives_a_udp6_request_event() { + let stats_repository = Repository::new(); + + handle_event(Event::Udp6Request, &stats_repository).await; + + let stats = stats_repository.get_stats().await; + + assert_eq!(stats.udp6_requests, 1); + } + + #[tokio::test] + async fn should_increase_the_udp6_response_counter_when_it_receives_a_udp6_response_event() { + let stats_repository = Repository::new(); + + handle_event( + Event::Udp6Response { + kind: crate::statistics::event::UdpResponseKind::Announce, + req_processing_time: std::time::Duration::from_secs(1), + }, + &stats_repository, + ) + .await; + + let stats = stats_repository.get_stats().await; + + assert_eq!(stats.udp6_responses, 1); + } + #[tokio::test] + async fn should_increase_the_udp6_errors_counter_when_it_receives_a_udp6_error_event() { + let stats_repository = Repository::new(); + + handle_event(Event::Udp6Error, &stats_repository).await; + + let stats = stats_repository.get_stats().await; + + assert_eq!(stats.udp6_errors_handled, 1); + } +} diff --git a/packages/udp-tracker-server/src/statistics/event/listener.rs b/packages/udp-tracker-server/src/statistics/event/listener.rs new file mode 100644 index 000000000..f1a2e25de --- /dev/null +++ b/packages/udp-tracker-server/src/statistics/event/listener.rs @@ -0,0 +1,11 @@ +use tokio::sync::mpsc; + +use super::handler::handle_event; +use super::Event; +use crate::statistics::repository::Repository; + +pub async fn dispatch_events(mut receiver: mpsc::Receiver, stats_repository: Repository) { + while let Some(event) = receiver.recv().await { + handle_event(event, &stats_repository).await; + } +} diff --git a/packages/udp-tracker-server/src/statistics/event/mod.rs b/packages/udp-tracker-server/src/statistics/event/mod.rs new file mode 100644 index 000000000..4f66862d6 --- /dev/null +++ b/packages/udp-tracker-server/src/statistics/event/mod.rs @@ -0,0 +1,43 @@ +use std::time::Duration; + +pub mod handler; +pub mod listener; +pub mod sender; + +/// An statistics event. It is used to collect tracker metrics. +/// +/// - `Tcp` prefix means the event was triggered by the HTTP tracker +/// - `Udp` prefix means the event was triggered by the UDP tracker +/// - `4` or `6` prefixes means the IP version used by the peer +/// - Finally the event suffix is the type of request: `announce`, `scrape` or `connection` +/// +/// > NOTE: HTTP trackers do not use `connection` requests. +#[derive(Debug, PartialEq, Eq)] +pub enum Event { + // code-review: consider one single event for request type with data: Event::Announce { scheme: HTTPorUDP, ip_version: V4orV6 } + // Attributes are enums too. + UdpRequestAborted, + UdpRequestBanned, + Udp4Request { + kind: UdpResponseKind, + }, + Udp4Response { + kind: UdpResponseKind, + req_processing_time: Duration, + }, + Udp4Error, + Udp6Request, + Udp6Response { + kind: UdpResponseKind, + req_processing_time: Duration, + }, + Udp6Error, +} + +#[derive(Debug, PartialEq, Eq)] +pub enum UdpResponseKind { + Connect, + Announce, + Scrape, + Error, +} diff --git a/packages/udp-tracker-server/src/statistics/event/sender.rs b/packages/udp-tracker-server/src/statistics/event/sender.rs new file mode 100644 index 000000000..ca4b4e210 --- /dev/null +++ b/packages/udp-tracker-server/src/statistics/event/sender.rs @@ -0,0 +1,29 @@ +use futures::future::BoxFuture; +use futures::FutureExt; +#[cfg(test)] +use mockall::{automock, predicate::str}; +use tokio::sync::mpsc; +use tokio::sync::mpsc::error::SendError; + +use super::Event; + +/// A trait to allow sending statistics events +#[cfg_attr(test, automock)] +pub trait Sender: Sync + Send { + fn send_event(&self, event: Event) -> BoxFuture<'_, Option>>>; +} + +/// An [`statistics::EventSender`](crate::statistics::event::sender::Sender) implementation. +/// +/// It uses a channel sender to send the statistic events. The channel is created by a +/// [`statistics::Keeper`](crate::statistics::keeper::Keeper) +#[allow(clippy::module_name_repetitions)] +pub struct ChannelSender { + pub(crate) sender: mpsc::Sender, +} + +impl Sender for ChannelSender { + fn send_event(&self, event: Event) -> BoxFuture<'_, Option>>> { + async move { Some(self.sender.send(event).await) }.boxed() + } +} diff --git a/packages/udp-tracker-server/src/statistics/keeper.rs b/packages/udp-tracker-server/src/statistics/keeper.rs new file mode 100644 index 000000000..e805a7eea --- /dev/null +++ b/packages/udp-tracker-server/src/statistics/keeper.rs @@ -0,0 +1,81 @@ +use tokio::sync::mpsc; + +use super::event::listener::dispatch_events; +use super::event::sender::{ChannelSender, Sender}; +use super::event::Event; +use super::repository::Repository; + +const CHANNEL_BUFFER_SIZE: usize = 65_535; + +/// The service responsible for keeping tracker metrics (listening to statistics events and handle them). +/// +/// It actively listen to new statistics events. When it receives a new event +/// it accordingly increases the counters. +pub struct Keeper { + pub repository: Repository, +} + +impl Default for Keeper { + fn default() -> Self { + Self::new() + } +} + +impl Keeper { + #[must_use] + pub fn new() -> Self { + Self { + repository: Repository::new(), + } + } + + #[must_use] + pub fn new_active_instance() -> (Box, Repository) { + let mut stats_tracker = Self::new(); + + let stats_event_sender = stats_tracker.run_event_listener(); + + (stats_event_sender, stats_tracker.repository) + } + + pub fn run_event_listener(&mut self) -> Box { + let (sender, receiver) = mpsc::channel::(CHANNEL_BUFFER_SIZE); + + let stats_repository = self.repository.clone(); + + tokio::spawn(async move { dispatch_events(receiver, stats_repository).await }); + + Box::new(ChannelSender { sender }) + } +} + +#[cfg(test)] +mod tests { + use crate::statistics::event::{Event, UdpResponseKind}; + use crate::statistics::keeper::Keeper; + use crate::statistics::metrics::Metrics; + + #[tokio::test] + async fn should_contain_the_tracker_statistics() { + let stats_tracker = Keeper::new(); + + let stats = stats_tracker.repository.get_stats().await; + + assert_eq!(stats.udp4_requests, Metrics::default().udp4_requests); + } + + #[tokio::test] + async fn should_create_an_event_sender_to_send_statistical_events() { + let mut stats_tracker = Keeper::new(); + + let event_sender = stats_tracker.run_event_listener(); + + let result = event_sender + .send_event(Event::Udp4Request { + kind: UdpResponseKind::Connect, + }) + .await; + + assert!(result.is_some()); + } +} diff --git a/packages/udp-tracker-server/src/statistics/metrics.rs b/packages/udp-tracker-server/src/statistics/metrics.rs new file mode 100644 index 000000000..cce618d74 --- /dev/null +++ b/packages/udp-tracker-server/src/statistics/metrics.rs @@ -0,0 +1,60 @@ +/// Metrics collected by the UDP tracker server. +#[derive(Debug, PartialEq, Default)] +pub struct Metrics { + // UDP + /// Total number of UDP (UDP tracker) requests aborted. + pub udp_requests_aborted: u64, + + /// Total number of UDP (UDP tracker) requests banned. + pub udp_requests_banned: u64, + + /// Total number of banned IPs. + pub udp_banned_ips_total: u64, + + /// Average rounded time spent processing UDP connect requests. + pub udp_avg_connect_processing_time_ns: u64, + + /// Average rounded time spent processing UDP announce requests. + pub udp_avg_announce_processing_time_ns: u64, + + /// Average rounded time spent processing UDP scrape requests. + pub udp_avg_scrape_processing_time_ns: u64, + + // UDPv4 + /// Total number of UDP (UDP tracker) requests from IPv4 peers. + pub udp4_requests: u64, + + /// Total number of UDP (UDP tracker) connections from IPv4 peers. + pub udp4_connections_handled: u64, + + /// Total number of UDP (UDP tracker) `announce` requests from IPv4 peers. + pub udp4_announces_handled: u64, + + /// Total number of UDP (UDP tracker) `scrape` requests from IPv4 peers. + pub udp4_scrapes_handled: u64, + + /// Total number of UDP (UDP tracker) responses from IPv4 peers. + pub udp4_responses: u64, + + /// Total number of UDP (UDP tracker) `error` requests from IPv4 peers. + pub udp4_errors_handled: u64, + + // UDPv6 + /// Total number of UDP (UDP tracker) requests from IPv6 peers. + pub udp6_requests: u64, + + /// Total number of UDP (UDP tracker) `connection` requests from IPv6 peers. + pub udp6_connections_handled: u64, + + /// Total number of UDP (UDP tracker) `announce` requests from IPv6 peers. + pub udp6_announces_handled: u64, + + /// Total number of UDP (UDP tracker) `scrape` requests from IPv6 peers. + pub udp6_scrapes_handled: u64, + + /// Total number of UDP (UDP tracker) responses from IPv6 peers. + pub udp6_responses: u64, + + /// Total number of UDP (UDP tracker) `error` requests from IPv6 peers. + pub udp6_errors_handled: u64, +} diff --git a/packages/udp-tracker-server/src/statistics/mod.rs b/packages/udp-tracker-server/src/statistics/mod.rs new file mode 100644 index 000000000..939a41061 --- /dev/null +++ b/packages/udp-tracker-server/src/statistics/mod.rs @@ -0,0 +1,6 @@ +pub mod event; +pub mod keeper; +pub mod metrics; +pub mod repository; +pub mod services; +pub mod setup; diff --git a/packages/udp-tracker-server/src/statistics/repository.rs b/packages/udp-tracker-server/src/statistics/repository.rs new file mode 100644 index 000000000..22e793036 --- /dev/null +++ b/packages/udp-tracker-server/src/statistics/repository.rs @@ -0,0 +1,173 @@ +use std::sync::Arc; +use std::time::Duration; + +use tokio::sync::{RwLock, RwLockReadGuard}; + +use super::metrics::Metrics; + +/// A repository for the tracker metrics. +#[derive(Clone)] +pub struct Repository { + pub stats: Arc>, +} + +impl Default for Repository { + fn default() -> Self { + Self::new() + } +} + +impl Repository { + #[must_use] + pub fn new() -> Self { + Self { + stats: Arc::new(RwLock::new(Metrics::default())), + } + } + + pub async fn get_stats(&self) -> RwLockReadGuard<'_, Metrics> { + self.stats.read().await + } + + pub async fn increase_udp_requests_aborted(&self) { + let mut stats_lock = self.stats.write().await; + stats_lock.udp_requests_aborted += 1; + drop(stats_lock); + } + + pub async fn increase_udp_requests_banned(&self) { + let mut stats_lock = self.stats.write().await; + stats_lock.udp_requests_banned += 1; + drop(stats_lock); + } + + pub async fn increase_udp4_requests(&self) { + let mut stats_lock = self.stats.write().await; + stats_lock.udp4_requests += 1; + drop(stats_lock); + } + + pub async fn increase_udp4_connections(&self) { + let mut stats_lock = self.stats.write().await; + stats_lock.udp4_connections_handled += 1; + drop(stats_lock); + } + + pub async fn increase_udp4_announces(&self) { + let mut stats_lock = self.stats.write().await; + stats_lock.udp4_announces_handled += 1; + drop(stats_lock); + } + + pub async fn increase_udp4_scrapes(&self) { + let mut stats_lock = self.stats.write().await; + stats_lock.udp4_scrapes_handled += 1; + drop(stats_lock); + } + + pub async fn increase_udp4_responses(&self) { + let mut stats_lock = self.stats.write().await; + stats_lock.udp4_responses += 1; + drop(stats_lock); + } + + pub async fn increase_udp4_errors(&self) { + let mut stats_lock = self.stats.write().await; + stats_lock.udp4_errors_handled += 1; + drop(stats_lock); + } + + #[allow(clippy::cast_precision_loss)] + #[allow(clippy::cast_possible_truncation)] + #[allow(clippy::cast_sign_loss)] + pub async fn recalculate_udp_avg_connect_processing_time_ns(&self, req_processing_time: Duration) { + let mut stats_lock = self.stats.write().await; + + let req_processing_time = req_processing_time.as_nanos() as f64; + let udp_connections_handled = (stats_lock.udp4_connections_handled + stats_lock.udp6_connections_handled) as f64; + + let previous_avg = stats_lock.udp_avg_connect_processing_time_ns; + + // Moving average: https://en.wikipedia.org/wiki/Moving_average + let new_avg = previous_avg as f64 + (req_processing_time - previous_avg as f64) / udp_connections_handled; + + stats_lock.udp_avg_connect_processing_time_ns = new_avg.ceil() as u64; + + drop(stats_lock); + } + + #[allow(clippy::cast_precision_loss)] + #[allow(clippy::cast_possible_truncation)] + #[allow(clippy::cast_sign_loss)] + pub async fn recalculate_udp_avg_announce_processing_time_ns(&self, req_processing_time: Duration) { + let mut stats_lock = self.stats.write().await; + + let req_processing_time = req_processing_time.as_nanos() as f64; + + let udp_announces_handled = (stats_lock.udp4_announces_handled + stats_lock.udp6_announces_handled) as f64; + + let previous_avg = stats_lock.udp_avg_announce_processing_time_ns; + + // Moving average: https://en.wikipedia.org/wiki/Moving_average + let new_avg = previous_avg as f64 + (req_processing_time - previous_avg as f64) / udp_announces_handled; + + stats_lock.udp_avg_announce_processing_time_ns = new_avg.ceil() as u64; + + drop(stats_lock); + } + + #[allow(clippy::cast_precision_loss)] + #[allow(clippy::cast_possible_truncation)] + #[allow(clippy::cast_sign_loss)] + pub async fn recalculate_udp_avg_scrape_processing_time_ns(&self, req_processing_time: Duration) { + let mut stats_lock = self.stats.write().await; + + let req_processing_time = req_processing_time.as_nanos() as f64; + let udp_scrapes_handled = (stats_lock.udp4_scrapes_handled + stats_lock.udp6_scrapes_handled) as f64; + + let previous_avg = stats_lock.udp_avg_scrape_processing_time_ns; + + // Moving average: https://en.wikipedia.org/wiki/Moving_average + let new_avg = previous_avg as f64 + (req_processing_time - previous_avg as f64) / udp_scrapes_handled; + + stats_lock.udp_avg_scrape_processing_time_ns = new_avg.ceil() as u64; + + drop(stats_lock); + } + + pub async fn increase_udp6_requests(&self) { + let mut stats_lock = self.stats.write().await; + stats_lock.udp6_requests += 1; + drop(stats_lock); + } + + pub async fn increase_udp6_connections(&self) { + let mut stats_lock = self.stats.write().await; + stats_lock.udp6_connections_handled += 1; + drop(stats_lock); + } + + pub async fn increase_udp6_announces(&self) { + let mut stats_lock = self.stats.write().await; + stats_lock.udp6_announces_handled += 1; + drop(stats_lock); + } + + pub async fn increase_udp6_scrapes(&self) { + let mut stats_lock = self.stats.write().await; + stats_lock.udp6_scrapes_handled += 1; + drop(stats_lock); + } + + pub async fn increase_udp6_responses(&self) { + let mut stats_lock = self.stats.write().await; + stats_lock.udp6_responses += 1; + drop(stats_lock); + } + + pub async fn increase_udp6_errors(&self) { + let mut stats_lock = self.stats.write().await; + stats_lock.udp6_errors_handled += 1; + drop(stats_lock); + } +} diff --git a/packages/udp-tracker-server/src/statistics/services.rs b/packages/udp-tracker-server/src/statistics/services.rs new file mode 100644 index 000000000..d34bd3c8a --- /dev/null +++ b/packages/udp-tracker-server/src/statistics/services.rs @@ -0,0 +1,146 @@ +//! Statistics services. +//! +//! It includes: +//! +//! - A [`factory`](crate::statistics::setup::factory) function to build the structs needed to collect the tracker metrics. +//! - A [`get_metrics`] service to get the tracker [`metrics`](crate::statistics::metrics::Metrics). +//! +//! Tracker metrics are collected using a Publisher-Subscribe pattern. +//! +//! The factory function builds two structs: +//! +//! - An statistics event [`Sender`](crate::statistics::event::sender::Sender) +//! - An statistics [`Repository`] +//! +//! ```text +//! let (stats_event_sender, stats_repository) = factory(tracker_usage_statistics); +//! ``` +//! +//! The statistics repository is responsible for storing the metrics in memory. +//! The statistics event sender allows sending events related to metrics. +//! There is an event listener that is receiving all the events and processing them with an event handler. +//! Then, the event handler updates the metrics depending on the received event. +//! +//! For example, if you send the event [`Event::Udp4Connect`](crate::statistics::event::Event::Udp4Connect): +//! +//! ```text +//! let result = event_sender.send_event(Event::Udp4Connect).await; +//! ``` +//! +//! Eventually the counter for UDP connections from IPv4 peers will be increased. +//! +//! ```rust,no_run +//! pub struct Metrics { +//! // ... +//! pub udp4_connections_handled: u64, // This will be incremented +//! // ... +//! } +//! ``` +use std::sync::Arc; + +use bittorrent_tracker_core::torrent::repository::in_memory::InMemoryTorrentRepository; +use bittorrent_udp_tracker_core::services::banning::BanService; +use tokio::sync::RwLock; +use torrust_tracker_primitives::torrent_metrics::TorrentsMetrics; + +use crate::statistics::metrics::Metrics; +use crate::statistics::repository::Repository; + +/// All the metrics collected by the tracker. +#[derive(Debug, PartialEq)] +pub struct TrackerMetrics { + /// Domain level metrics. + /// + /// General metrics for all torrents (number of seeders, leechers, etcetera) + pub torrents_metrics: TorrentsMetrics, + + /// Application level metrics. Usage statistics/metrics. + /// + /// Metrics about how the tracker is been used (number of udp announce requests, etcetera) + pub protocol_metrics: Metrics, +} + +/// It returns all the [`TrackerMetrics`] +pub async fn get_metrics( + in_memory_torrent_repository: Arc, + ban_service: Arc>, + stats_repository: Arc, +) -> TrackerMetrics { + let torrents_metrics = in_memory_torrent_repository.get_torrents_metrics(); + let stats = stats_repository.get_stats().await; + let udp_banned_ips_total = ban_service.read().await.get_banned_ips_total(); + + TrackerMetrics { + torrents_metrics, + protocol_metrics: Metrics { + // UDP + udp_requests_aborted: stats.udp_requests_aborted, + udp_requests_banned: stats.udp_requests_banned, + udp_banned_ips_total: udp_banned_ips_total as u64, + udp_avg_connect_processing_time_ns: stats.udp_avg_connect_processing_time_ns, + udp_avg_announce_processing_time_ns: stats.udp_avg_announce_processing_time_ns, + udp_avg_scrape_processing_time_ns: stats.udp_avg_scrape_processing_time_ns, + // UDPv4 + udp4_requests: stats.udp4_requests, + udp4_connections_handled: stats.udp4_connections_handled, + udp4_announces_handled: stats.udp4_announces_handled, + udp4_scrapes_handled: stats.udp4_scrapes_handled, + udp4_responses: stats.udp4_responses, + udp4_errors_handled: stats.udp4_errors_handled, + // UDPv6 + udp6_requests: stats.udp6_requests, + udp6_connections_handled: stats.udp6_connections_handled, + udp6_announces_handled: stats.udp6_announces_handled, + udp6_scrapes_handled: stats.udp6_scrapes_handled, + udp6_responses: stats.udp6_responses, + udp6_errors_handled: stats.udp6_errors_handled, + }, + } +} + +#[cfg(test)] +mod tests { + use std::sync::Arc; + + use bittorrent_tracker_core::torrent::repository::in_memory::InMemoryTorrentRepository; + use bittorrent_tracker_core::{self}; + use bittorrent_udp_tracker_core::services::banning::BanService; + use bittorrent_udp_tracker_core::MAX_CONNECTION_ID_ERRORS_PER_IP; + use tokio::sync::RwLock; + use torrust_tracker_configuration::Configuration; + use torrust_tracker_primitives::torrent_metrics::TorrentsMetrics; + use torrust_tracker_test_helpers::configuration; + + use crate::statistics; + use crate::statistics::services::{get_metrics, TrackerMetrics}; + + pub fn tracker_configuration() -> Configuration { + configuration::ephemeral() + } + + #[tokio::test] + async fn the_statistics_service_should_return_the_tracker_metrics() { + let config = tracker_configuration(); + + let in_memory_torrent_repository = Arc::new(InMemoryTorrentRepository::default()); + let ban_service = Arc::new(RwLock::new(BanService::new(MAX_CONNECTION_ID_ERRORS_PER_IP))); + + let (_udp_stats_event_sender, udp_stats_repository) = statistics::setup::factory(config.core.tracker_usage_statistics); + let udp_stats_repository = Arc::new(udp_stats_repository); + + let tracker_metrics = get_metrics( + in_memory_torrent_repository.clone(), + ban_service.clone(), + udp_stats_repository.clone(), + ) + .await; + + assert_eq!( + tracker_metrics, + TrackerMetrics { + torrents_metrics: TorrentsMetrics::default(), + protocol_metrics: statistics::metrics::Metrics::default(), + } + ); + } +} diff --git a/packages/udp-tracker-server/src/statistics/setup.rs b/packages/udp-tracker-server/src/statistics/setup.rs new file mode 100644 index 000000000..d3114a75e --- /dev/null +++ b/packages/udp-tracker-server/src/statistics/setup.rs @@ -0,0 +1,54 @@ +//! Setup for the tracker statistics. +//! +//! The [`factory`] function builds the structs needed for handling the tracker metrics. +use crate::statistics; + +/// It builds the structs needed for handling the tracker metrics. +/// +/// It returns: +/// +/// - An statistics event [`Sender`](crate::statistics::event::sender::Sender) that allows you to send events related to statistics. +/// - An statistics [`Repository`](crate::statistics::repository::Repository) which is an in-memory repository for the tracker metrics. +/// +/// When the input argument `tracker_usage_statistics`is false the setup does not run the event listeners, consequently the statistics +/// events are sent are received but not dispatched to the handler. +#[must_use] +pub fn factory( + tracker_usage_statistics: bool, +) -> ( + Option>, + statistics::repository::Repository, +) { + let mut stats_event_sender = None; + + let mut stats_tracker = statistics::keeper::Keeper::new(); + + if tracker_usage_statistics { + stats_event_sender = Some(stats_tracker.run_event_listener()); + } + + (stats_event_sender, stats_tracker.repository) +} + +#[cfg(test)] +mod test { + use super::factory; + + #[tokio::test] + async fn should_not_send_any_event_when_statistics_are_disabled() { + let tracker_usage_statistics = false; + + let (stats_event_sender, _stats_repository) = factory(tracker_usage_statistics); + + assert!(stats_event_sender.is_none()); + } + + #[tokio::test] + async fn should_send_events_when_statistics_are_enabled() { + let tracker_usage_statistics = true; + + let (stats_event_sender, _stats_repository) = factory(tracker_usage_statistics); + + assert!(stats_event_sender.is_some()); + } +} From b8d2f762a4a7b23bebe70f31f4dca172b022e9a0 Mon Sep 17 00:00:00 2001 From: Jose Celano Date: Wed, 26 Feb 2025 18:27:22 +0000 Subject: [PATCH 337/802] refactor: [#1319] remove UDP server events from UDP tracker core package Some events were moved from the `udp-tracker-core` package to the `udp-tracker-server` package. This commits remmoves the unused events from the `udp-tracker-core`. --- Cargo.lock | 2 + packages/axum-tracker-api-server/Cargo.toml | 1 + .../src/environment.rs | 3 + .../src/v1/context/stats/handlers.rs | 10 +- .../src/v1/context/stats/routes.rs | 3 +- packages/tracker-api-core/Cargo.toml | 1 + packages/tracker-api-core/src/container.rs | 13 +- .../src/statistics/services.rs | 53 +++-- packages/udp-tracker-core/src/container.rs | 14 +- .../udp-tracker-core/src/services/connect.rs | 18 +- .../src/statistics/event/handler.rs | 149 +------------- .../src/statistics/event/mod.rs | 24 --- .../src/statistics/metrics.rs | 39 ---- .../src/statistics/repository.rs | 107 ---------- .../src/statistics/services.rs | 34 +--- packages/udp-tracker-server/src/container.rs | 25 +++ .../udp-tracker-server/src/environment.rs | 5 + .../src/handlers/announce.rs | 192 +++++++++++++----- .../src/handlers/connect.rs | 120 ++++++++--- .../udp-tracker-server/src/handlers/error.rs | 17 +- .../udp-tracker-server/src/handlers/mod.rs | 93 ++++++--- .../udp-tracker-server/src/handlers/scrape.rs | 127 ++++++++---- packages/udp-tracker-server/src/lib.rs | 1 + .../udp-tracker-server/src/server/launcher.rs | 58 ++++-- packages/udp-tracker-server/src/server/mod.rs | 17 +- .../src/server/processor.rs | 29 ++- .../udp-tracker-server/src/server/spawner.rs | 15 +- .../udp-tracker-server/src/server/states.rs | 17 +- .../src/statistics/event/handler.rs | 52 ++--- .../src/statistics/event/mod.rs | 10 +- .../src/statistics/keeper.rs | 8 +- .../src/statistics/services.rs | 7 +- .../tests/server/contract.rs | 8 +- src/app.rs | 5 +- src/bootstrap/jobs/udp_tracker.rs | 20 +- src/container.rs | 46 +++-- 36 files changed, 712 insertions(+), 631 deletions(-) create mode 100644 packages/udp-tracker-server/src/container.rs diff --git a/Cargo.lock b/Cargo.lock index 83afae727..71140b9f7 100644 --- a/Cargo.lock +++ b/Cargo.lock @@ -4450,6 +4450,7 @@ dependencies = [ "torrust-tracker-configuration", "torrust-tracker-primitives", "torrust-tracker-test-helpers", + "torrust-udp-tracker-server", "tower 0.5.2", "tower-http", "tracing", @@ -4525,6 +4526,7 @@ dependencies = [ "torrust-tracker-configuration", "torrust-tracker-primitives", "torrust-tracker-test-helpers", + "torrust-udp-tracker-server", ] [[package]] diff --git a/packages/axum-tracker-api-server/Cargo.toml b/packages/axum-tracker-api-server/Cargo.toml index 480ee2a54..e1deb9b8a 100644 --- a/packages/axum-tracker-api-server/Cargo.toml +++ b/packages/axum-tracker-api-server/Cargo.toml @@ -38,6 +38,7 @@ torrust-tracker-api-core = { version = "3.0.0-develop", path = "../tracker-api-c torrust-tracker-clock = { version = "3.0.0-develop", path = "../clock" } torrust-tracker-configuration = { version = "3.0.0-develop", path = "../configuration" } torrust-tracker-primitives = { version = "3.0.0-develop", path = "../primitives" } +torrust-udp-tracker-server = { version = "3.0.0-develop", path = "../udp-tracker-server" } tower = { version = "0", features = ["timeout"] } tower-http = { version = "0", features = ["compression-full", "cors", "propagate-header", "request-id", "trace"] } tracing = "0" diff --git a/packages/axum-tracker-api-server/src/environment.rs b/packages/axum-tracker-api-server/src/environment.rs index f6d6fb4e4..7390bc659 100644 --- a/packages/axum-tracker-api-server/src/environment.rs +++ b/packages/axum-tracker-api-server/src/environment.rs @@ -12,6 +12,7 @@ use torrust_tracker_api_client::connection_info::{ConnectionInfo, Origin}; use torrust_tracker_api_core::container::TrackerHttpApiCoreContainer; use torrust_tracker_configuration::{logging, Configuration}; use torrust_tracker_primitives::peer; +use torrust_udp_tracker_server::container::UdpTrackerServerContainer; use crate::server::{ApiServer, Launcher, Running, Stopped}; @@ -175,11 +176,13 @@ impl EnvContainer { let http_tracker_core_container = HttpTrackerCoreContainer::initialize_from(&tracker_core_container, &http_tracker_config); let udp_tracker_core_container = UdpTrackerCoreContainer::initialize_from(&tracker_core_container, &udp_tracker_config); + let udp_tracker_server_container = UdpTrackerServerContainer::initialize(&core_config); let tracker_http_api_core_container = TrackerHttpApiCoreContainer::initialize_from( &tracker_core_container, &http_tracker_core_container, &udp_tracker_core_container, + &udp_tracker_server_container, &http_api_config, ); diff --git a/packages/axum-tracker-api-server/src/v1/context/stats/handlers.rs b/packages/axum-tracker-api-server/src/v1/context/stats/handlers.rs index e0149cb23..5e23211a6 100644 --- a/packages/axum-tracker-api-server/src/v1/context/stats/handlers.rs +++ b/packages/axum-tracker-api-server/src/v1/context/stats/handlers.rs @@ -44,10 +44,18 @@ pub async fn get_stats_handler( Arc>, Arc, Arc, + Arc, )>, params: Query, ) -> Response { - let metrics = get_metrics(state.0.clone(), state.1.clone(), state.2.clone(), state.3.clone()).await; + let metrics = get_metrics( + state.0.clone(), + state.1.clone(), + state.2.clone(), + state.3.clone(), + state.4.clone(), + ) + .await; match params.0.format { Some(format) => match format { diff --git a/packages/axum-tracker-api-server/src/v1/context/stats/routes.rs b/packages/axum-tracker-api-server/src/v1/context/stats/routes.rs index e73de8625..6caaf13bf 100644 --- a/packages/axum-tracker-api-server/src/v1/context/stats/routes.rs +++ b/packages/axum-tracker-api-server/src/v1/context/stats/routes.rs @@ -19,7 +19,8 @@ pub fn add(prefix: &str, router: Router, http_api_container: &Arc>, - pub udp_stats_repository: Arc, + pub udp_core_stats_repository: Arc, + + // todo: replace with UdpTrackerServerContainer + pub udp_server_stats_repository: Arc, pub http_api_config: Arc, } @@ -39,11 +43,13 @@ impl TrackerHttpApiCoreContainer { let tracker_core_container = Arc::new(TrackerCoreContainer::initialize(core_config)); let http_tracker_core_container = HttpTrackerCoreContainer::initialize_from(&tracker_core_container, http_tracker_config); let udp_tracker_core_container = UdpTrackerCoreContainer::initialize_from(&tracker_core_container, udp_tracker_config); + let udp_tracker_server_container = UdpTrackerServerContainer::initialize(core_config); Self::initialize_from( &tracker_core_container, &http_tracker_core_container, &udp_tracker_core_container, + &udp_tracker_server_container, http_api_config, ) } @@ -53,6 +59,7 @@ impl TrackerHttpApiCoreContainer { tracker_core_container: &Arc, http_tracker_core_container: &Arc, udp_tracker_core_container: &Arc, + udp_tracker_server_container: &Arc, http_api_config: &Arc, ) -> Arc { Arc::new(TrackerHttpApiCoreContainer { @@ -64,7 +71,9 @@ impl TrackerHttpApiCoreContainer { http_stats_repository: http_tracker_core_container.http_stats_repository.clone(), ban_service: udp_tracker_core_container.ban_service.clone(), - udp_stats_repository: udp_tracker_core_container.udp_stats_repository.clone(), + udp_core_stats_repository: udp_tracker_core_container.udp_core_stats_repository.clone(), + + udp_server_stats_repository: udp_tracker_server_container.udp_server_stats_repository.clone(), http_api_config: http_api_config.clone(), }) diff --git a/packages/tracker-api-core/src/statistics/services.rs b/packages/tracker-api-core/src/statistics/services.rs index 178c8ca0f..c4dfcf533 100644 --- a/packages/tracker-api-core/src/statistics/services.rs +++ b/packages/tracker-api-core/src/statistics/services.rs @@ -2,9 +2,10 @@ use std::sync::Arc; use bittorrent_tracker_core::torrent::repository::in_memory::InMemoryTorrentRepository; use bittorrent_udp_tracker_core::services::banning::BanService; -use bittorrent_udp_tracker_core::{self, statistics}; +use bittorrent_udp_tracker_core::{self, statistics as udp_core_statistics}; use tokio::sync::RwLock; use torrust_tracker_primitives::torrent_metrics::TorrentsMetrics; +use torrust_udp_tracker_server::statistics as udp_server_statistics; use crate::statistics::metrics::Metrics; @@ -27,12 +28,14 @@ pub async fn get_metrics( in_memory_torrent_repository: Arc, ban_service: Arc>, http_stats_repository: Arc, - udp_stats_repository: Arc, + udp_core_stats_repository: Arc, + udp_server_stats_repository: Arc, ) -> TrackerMetrics { let torrents_metrics = in_memory_torrent_repository.get_torrents_metrics(); let udp_banned_ips_total = ban_service.read().await.get_banned_ips_total(); let http_stats = http_stats_repository.get_stats().await; - let udp_stats = udp_stats_repository.get_stats().await; + let udp_core_stats = udp_core_stats_repository.get_stats().await; + let udp_server_stats = udp_server_stats_repository.get_stats().await; TrackerMetrics { torrents_metrics, @@ -46,26 +49,26 @@ pub async fn get_metrics( tcp6_announces_handled: http_stats.tcp6_announces_handled, tcp6_scrapes_handled: http_stats.tcp6_scrapes_handled, // UDP - udp_requests_aborted: udp_stats.udp_requests_aborted, - udp_requests_banned: udp_stats.udp_requests_banned, + udp_requests_aborted: udp_server_stats.udp_requests_aborted, + udp_requests_banned: udp_server_stats.udp_requests_banned, udp_banned_ips_total: udp_banned_ips_total as u64, - udp_avg_connect_processing_time_ns: udp_stats.udp_avg_connect_processing_time_ns, - udp_avg_announce_processing_time_ns: udp_stats.udp_avg_announce_processing_time_ns, - udp_avg_scrape_processing_time_ns: udp_stats.udp_avg_scrape_processing_time_ns, + udp_avg_connect_processing_time_ns: udp_server_stats.udp_avg_connect_processing_time_ns, + udp_avg_announce_processing_time_ns: udp_server_stats.udp_avg_announce_processing_time_ns, + udp_avg_scrape_processing_time_ns: udp_server_stats.udp_avg_scrape_processing_time_ns, // UDPv4 - udp4_requests: udp_stats.udp4_requests, - udp4_connections_handled: udp_stats.udp4_connections_handled, - udp4_announces_handled: udp_stats.udp4_announces_handled, - udp4_scrapes_handled: udp_stats.udp4_scrapes_handled, - udp4_responses: udp_stats.udp4_responses, - udp4_errors_handled: udp_stats.udp4_errors_handled, + udp4_requests: udp_server_stats.udp4_requests, + udp4_connections_handled: udp_core_stats.udp4_connections_handled, + udp4_announces_handled: udp_core_stats.udp4_announces_handled, + udp4_scrapes_handled: udp_core_stats.udp4_scrapes_handled, + udp4_responses: udp_server_stats.udp4_responses, + udp4_errors_handled: udp_server_stats.udp4_errors_handled, // UDPv6 - udp6_requests: udp_stats.udp6_requests, - udp6_connections_handled: udp_stats.udp6_connections_handled, - udp6_announces_handled: udp_stats.udp6_announces_handled, - udp6_scrapes_handled: udp_stats.udp6_scrapes_handled, - udp6_responses: udp_stats.udp6_responses, - udp6_errors_handled: udp_stats.udp6_errors_handled, + udp6_requests: udp_server_stats.udp6_requests, + udp6_connections_handled: udp_core_stats.udp6_connections_handled, + udp6_announces_handled: udp_core_stats.udp6_announces_handled, + udp6_scrapes_handled: udp_core_stats.udp6_scrapes_handled, + udp6_responses: udp_server_stats.udp6_responses, + udp6_errors_handled: udp_server_stats.udp6_errors_handled, }, } } @@ -97,21 +100,27 @@ mod tests { let in_memory_torrent_repository = Arc::new(InMemoryTorrentRepository::default()); let ban_service = Arc::new(RwLock::new(BanService::new(MAX_CONNECTION_ID_ERRORS_PER_IP))); - // HTTP stats + // HTTP core stats let (_http_stats_event_sender, http_stats_repository) = bittorrent_http_tracker_core::statistics::setup::factory(config.core.tracker_usage_statistics); let http_stats_repository = Arc::new(http_stats_repository); - // UDP stats + // UDP core stats let (_udp_stats_event_sender, udp_stats_repository) = bittorrent_udp_tracker_core::statistics::setup::factory(config.core.tracker_usage_statistics); let udp_stats_repository = Arc::new(udp_stats_repository); + // UDP server stats + let (_udp_server_stats_event_sender, udp_server_stats_repository) = + torrust_udp_tracker_server::statistics::setup::factory(config.core.tracker_usage_statistics); + let udp_server_stats_repository = Arc::new(udp_server_stats_repository); + let tracker_metrics = get_metrics( in_memory_torrent_repository.clone(), ban_service.clone(), http_stats_repository.clone(), udp_stats_repository.clone(), + udp_server_stats_repository.clone(), ) .await; diff --git a/packages/udp-tracker-core/src/container.rs b/packages/udp-tracker-core/src/container.rs index 62378e0af..1467134c5 100644 --- a/packages/udp-tracker-core/src/container.rs +++ b/packages/udp-tracker-core/src/container.rs @@ -18,8 +18,8 @@ pub struct UdpTrackerCoreContainer { pub whitelist_authorization: Arc, pub udp_tracker_config: Arc, - pub udp_stats_event_sender: Arc>>, - pub udp_stats_repository: Arc, + pub udp_core_stats_event_sender: Arc>>, + pub udp_core_stats_repository: Arc, pub ban_service: Arc>, } @@ -35,10 +35,10 @@ impl UdpTrackerCoreContainer { tracker_core_container: &Arc, udp_tracker_config: &Arc, ) -> Arc { - let (udp_stats_event_sender, udp_stats_repository) = + let (udp_core_stats_event_sender, udp_core_stats_repository) = statistics::setup::factory(tracker_core_container.core_config.tracker_usage_statistics); - let udp_stats_event_sender = Arc::new(udp_stats_event_sender); - let udp_stats_repository = Arc::new(udp_stats_repository); + let udp_core_stats_event_sender = Arc::new(udp_core_stats_event_sender); + let udp_core_stats_repository = Arc::new(udp_core_stats_repository); let ban_service = Arc::new(RwLock::new(BanService::new(MAX_CONNECTION_ID_ERRORS_PER_IP))); @@ -49,8 +49,8 @@ impl UdpTrackerCoreContainer { whitelist_authorization: tracker_core_container.whitelist_authorization.clone(), udp_tracker_config: udp_tracker_config.clone(), - udp_stats_event_sender: udp_stats_event_sender.clone(), - udp_stats_repository: udp_stats_repository.clone(), + udp_core_stats_event_sender: udp_core_stats_event_sender.clone(), + udp_core_stats_repository: udp_core_stats_repository.clone(), ban_service: ban_service.clone(), }) } diff --git a/packages/udp-tracker-core/src/services/connect.rs b/packages/udp-tracker-core/src/services/connect.rs index 9cb419bbc..3354595e5 100644 --- a/packages/udp-tracker-core/src/services/connect.rs +++ b/packages/udp-tracker-core/src/services/connect.rs @@ -55,10 +55,10 @@ mod tests { #[tokio::test] async fn a_connect_response_should_contain_the_same_transaction_id_as_the_connect_request() { - let (udp_stats_event_sender, _udp_stats_repository) = statistics::setup::factory(false); - let udp_stats_event_sender = Arc::new(udp_stats_event_sender); + let (udp_core_stats_event_sender, _udp_core_stats_repository) = statistics::setup::factory(false); + let udp_core_stats_event_sender = Arc::new(udp_core_stats_event_sender); - let response = handle_connect(sample_ipv4_remote_addr(), &udp_stats_event_sender, sample_issue_time()).await; + let response = handle_connect(sample_ipv4_remote_addr(), &udp_core_stats_event_sender, sample_issue_time()).await; assert_eq!( response, @@ -68,10 +68,10 @@ mod tests { #[tokio::test] async fn a_connect_response_should_contain_a_new_connection_id() { - let (udp_stats_event_sender, _udp_stats_repository) = statistics::setup::factory(false); - let udp_stats_event_sender = Arc::new(udp_stats_event_sender); + let (udp_core_stats_event_sender, _udp_core_stats_repository) = statistics::setup::factory(false); + let udp_core_stats_event_sender = Arc::new(udp_core_stats_event_sender); - let response = handle_connect(sample_ipv4_remote_addr(), &udp_stats_event_sender, sample_issue_time()).await; + let response = handle_connect(sample_ipv4_remote_addr(), &udp_core_stats_event_sender, sample_issue_time()).await; assert_eq!( response, @@ -81,10 +81,10 @@ mod tests { #[tokio::test] async fn a_connect_response_should_contain_a_new_connection_id_ipv6() { - let (udp_stats_event_sender, _udp_stats_repository) = statistics::setup::factory(false); - let udp_stats_event_sender = Arc::new(udp_stats_event_sender); + let (udp_core_stats_event_sender, _udp_core_stats_repository) = statistics::setup::factory(false); + let udp_core_stats_event_sender = Arc::new(udp_core_stats_event_sender); - let response = handle_connect(sample_ipv6_remote_addr(), &udp_stats_event_sender, sample_issue_time()).await; + let response = handle_connect(sample_ipv6_remote_addr(), &udp_core_stats_event_sender, sample_issue_time()).await; assert_eq!( response, diff --git a/packages/udp-tracker-core/src/statistics/event/handler.rs b/packages/udp-tracker-core/src/statistics/event/handler.rs index 91be32ad1..096059b91 100644 --- a/packages/udp-tracker-core/src/statistics/event/handler.rs +++ b/packages/udp-tracker-core/src/statistics/event/handler.rs @@ -1,20 +1,9 @@ -use crate::statistics::event::{Event, UdpResponseKind}; +use crate::statistics::event::Event; use crate::statistics::repository::Repository; pub async fn handle_event(event: Event, stats_repository: &Repository) { match event { - // UDP - Event::UdpRequestAborted => { - stats_repository.increase_udp_requests_aborted().await; - } - Event::UdpRequestBanned => { - stats_repository.increase_udp_requests_banned().await; - } - // UDP4 - Event::Udp4Request => { - stats_repository.increase_udp4_requests().await; - } Event::Udp4Connect => { stats_repository.increase_udp4_connections().await; } @@ -24,39 +13,8 @@ pub async fn handle_event(event: Event, stats_repository: &Repository) { Event::Udp4Scrape => { stats_repository.increase_udp4_scrapes().await; } - Event::Udp4Response { - kind, - req_processing_time, - } => { - stats_repository.increase_udp4_responses().await; - - match kind { - UdpResponseKind::Connect => { - stats_repository - .recalculate_udp_avg_connect_processing_time_ns(req_processing_time) - .await; - } - UdpResponseKind::Announce => { - stats_repository - .recalculate_udp_avg_announce_processing_time_ns(req_processing_time) - .await; - } - UdpResponseKind::Scrape => { - stats_repository - .recalculate_udp_avg_scrape_processing_time_ns(req_processing_time) - .await; - } - UdpResponseKind::Error => {} - } - } - Event::Udp4Error => { - stats_repository.increase_udp4_errors().await; - } // UDP6 - Event::Udp6Request => { - stats_repository.increase_udp6_requests().await; - } Event::Udp6Connect => { stats_repository.increase_udp6_connections().await; } @@ -66,15 +24,6 @@ pub async fn handle_event(event: Event, stats_repository: &Repository) { Event::Udp6Scrape => { stats_repository.increase_udp6_scrapes().await; } - Event::Udp6Response { - kind: _, - req_processing_time: _, - } => { - stats_repository.increase_udp6_responses().await; - } - Event::Udp6Error => { - stats_repository.increase_udp6_errors().await; - } } tracing::debug!("stats: {:?}", stats_repository.get_stats().await); @@ -151,100 +100,4 @@ mod tests { assert_eq!(stats.udp6_scrapes_handled, 1); } - - #[tokio::test] - async fn should_increase_the_udp_abort_counter_when_it_receives_a_udp_abort_event() { - let stats_repository = Repository::new(); - - handle_event(Event::UdpRequestAborted, &stats_repository).await; - let stats = stats_repository.get_stats().await; - assert_eq!(stats.udp_requests_aborted, 1); - } - #[tokio::test] - async fn should_increase_the_udp_ban_counter_when_it_receives_a_udp_banned_event() { - let stats_repository = Repository::new(); - - handle_event(Event::UdpRequestBanned, &stats_repository).await; - let stats = stats_repository.get_stats().await; - assert_eq!(stats.udp_requests_banned, 1); - } - - #[tokio::test] - async fn should_increase_the_udp4_requests_counter_when_it_receives_a_udp4_request_event() { - let stats_repository = Repository::new(); - - handle_event(Event::Udp4Request, &stats_repository).await; - - let stats = stats_repository.get_stats().await; - - assert_eq!(stats.udp4_requests, 1); - } - - #[tokio::test] - async fn should_increase_the_udp4_responses_counter_when_it_receives_a_udp4_response_event() { - let stats_repository = Repository::new(); - - handle_event( - Event::Udp4Response { - kind: crate::statistics::event::UdpResponseKind::Announce, - req_processing_time: std::time::Duration::from_secs(1), - }, - &stats_repository, - ) - .await; - - let stats = stats_repository.get_stats().await; - - assert_eq!(stats.udp4_responses, 1); - } - - #[tokio::test] - async fn should_increase_the_udp4_errors_counter_when_it_receives_a_udp4_error_event() { - let stats_repository = Repository::new(); - - handle_event(Event::Udp4Error, &stats_repository).await; - - let stats = stats_repository.get_stats().await; - - assert_eq!(stats.udp4_errors_handled, 1); - } - - #[tokio::test] - async fn should_increase_the_udp6_requests_counter_when_it_receives_a_udp6_request_event() { - let stats_repository = Repository::new(); - - handle_event(Event::Udp6Request, &stats_repository).await; - - let stats = stats_repository.get_stats().await; - - assert_eq!(stats.udp6_requests, 1); - } - - #[tokio::test] - async fn should_increase_the_udp6_response_counter_when_it_receives_a_udp6_response_event() { - let stats_repository = Repository::new(); - - handle_event( - Event::Udp6Response { - kind: crate::statistics::event::UdpResponseKind::Announce, - req_processing_time: std::time::Duration::from_secs(1), - }, - &stats_repository, - ) - .await; - - let stats = stats_repository.get_stats().await; - - assert_eq!(stats.udp6_responses, 1); - } - #[tokio::test] - async fn should_increase_the_udp6_errors_counter_when_it_receives_a_udp6_error_event() { - let stats_repository = Repository::new(); - - handle_event(Event::Udp6Error, &stats_repository).await; - - let stats = stats_repository.get_stats().await; - - assert_eq!(stats.udp6_errors_handled, 1); - } } diff --git a/packages/udp-tracker-core/src/statistics/event/mod.rs b/packages/udp-tracker-core/src/statistics/event/mod.rs index 6a5343933..bfc733657 100644 --- a/packages/udp-tracker-core/src/statistics/event/mod.rs +++ b/packages/udp-tracker-core/src/statistics/event/mod.rs @@ -1,5 +1,3 @@ -use std::time::Duration; - pub mod handler; pub mod listener; pub mod sender; @@ -16,32 +14,10 @@ pub mod sender; pub enum Event { // code-review: consider one single event for request type with data: Event::Announce { scheme: HTTPorUDP, ip_version: V4orV6 } // Attributes are enums too. - UdpRequestAborted, - UdpRequestBanned, - Udp4Request, Udp4Connect, Udp4Announce, Udp4Scrape, - Udp4Response { - kind: UdpResponseKind, - req_processing_time: Duration, - }, - Udp4Error, - Udp6Request, Udp6Connect, Udp6Announce, Udp6Scrape, - Udp6Response { - kind: UdpResponseKind, - req_processing_time: Duration, - }, - Udp6Error, -} - -#[derive(Debug, PartialEq, Eq)] -pub enum UdpResponseKind { - Connect, - Announce, - Scrape, - Error, } diff --git a/packages/udp-tracker-core/src/statistics/metrics.rs b/packages/udp-tracker-core/src/statistics/metrics.rs index 23357aab6..1b3805288 100644 --- a/packages/udp-tracker-core/src/statistics/metrics.rs +++ b/packages/udp-tracker-core/src/statistics/metrics.rs @@ -8,29 +8,6 @@ /// and also for each IP version used by the peers: IPv4 and IPv6. #[derive(Debug, PartialEq, Default)] pub struct Metrics { - // UDP - /// Total number of UDP (UDP tracker) requests aborted. - pub udp_requests_aborted: u64, - - /// Total number of UDP (UDP tracker) requests banned. - pub udp_requests_banned: u64, - - /// Total number of banned IPs. - pub udp_banned_ips_total: u64, - - /// Average rounded time spent processing UDP connect requests. - pub udp_avg_connect_processing_time_ns: u64, - - /// Average rounded time spent processing UDP announce requests. - pub udp_avg_announce_processing_time_ns: u64, - - /// Average rounded time spent processing UDP scrape requests. - pub udp_avg_scrape_processing_time_ns: u64, - - // UDPv4 - /// Total number of UDP (UDP tracker) requests from IPv4 peers. - pub udp4_requests: u64, - /// Total number of UDP (UDP tracker) connections from IPv4 peers. pub udp4_connections_handled: u64, @@ -40,16 +17,6 @@ pub struct Metrics { /// Total number of UDP (UDP tracker) `scrape` requests from IPv4 peers. pub udp4_scrapes_handled: u64, - /// Total number of UDP (UDP tracker) responses from IPv4 peers. - pub udp4_responses: u64, - - /// Total number of UDP (UDP tracker) `error` requests from IPv4 peers. - pub udp4_errors_handled: u64, - - // UDPv6 - /// Total number of UDP (UDP tracker) requests from IPv6 peers. - pub udp6_requests: u64, - /// Total number of UDP (UDP tracker) `connection` requests from IPv6 peers. pub udp6_connections_handled: u64, @@ -58,10 +25,4 @@ pub struct Metrics { /// Total number of UDP (UDP tracker) `scrape` requests from IPv6 peers. pub udp6_scrapes_handled: u64, - - /// Total number of UDP (UDP tracker) responses from IPv6 peers. - pub udp6_responses: u64, - - /// Total number of UDP (UDP tracker) `error` requests from IPv6 peers. - pub udp6_errors_handled: u64, } diff --git a/packages/udp-tracker-core/src/statistics/repository.rs b/packages/udp-tracker-core/src/statistics/repository.rs index 22e793036..f7609e5c2 100644 --- a/packages/udp-tracker-core/src/statistics/repository.rs +++ b/packages/udp-tracker-core/src/statistics/repository.rs @@ -1,5 +1,4 @@ use std::sync::Arc; -use std::time::Duration; use tokio::sync::{RwLock, RwLockReadGuard}; @@ -29,24 +28,6 @@ impl Repository { self.stats.read().await } - pub async fn increase_udp_requests_aborted(&self) { - let mut stats_lock = self.stats.write().await; - stats_lock.udp_requests_aborted += 1; - drop(stats_lock); - } - - pub async fn increase_udp_requests_banned(&self) { - let mut stats_lock = self.stats.write().await; - stats_lock.udp_requests_banned += 1; - drop(stats_lock); - } - - pub async fn increase_udp4_requests(&self) { - let mut stats_lock = self.stats.write().await; - stats_lock.udp4_requests += 1; - drop(stats_lock); - } - pub async fn increase_udp4_connections(&self) { let mut stats_lock = self.stats.write().await; stats_lock.udp4_connections_handled += 1; @@ -65,82 +46,6 @@ impl Repository { drop(stats_lock); } - pub async fn increase_udp4_responses(&self) { - let mut stats_lock = self.stats.write().await; - stats_lock.udp4_responses += 1; - drop(stats_lock); - } - - pub async fn increase_udp4_errors(&self) { - let mut stats_lock = self.stats.write().await; - stats_lock.udp4_errors_handled += 1; - drop(stats_lock); - } - - #[allow(clippy::cast_precision_loss)] - #[allow(clippy::cast_possible_truncation)] - #[allow(clippy::cast_sign_loss)] - pub async fn recalculate_udp_avg_connect_processing_time_ns(&self, req_processing_time: Duration) { - let mut stats_lock = self.stats.write().await; - - let req_processing_time = req_processing_time.as_nanos() as f64; - let udp_connections_handled = (stats_lock.udp4_connections_handled + stats_lock.udp6_connections_handled) as f64; - - let previous_avg = stats_lock.udp_avg_connect_processing_time_ns; - - // Moving average: https://en.wikipedia.org/wiki/Moving_average - let new_avg = previous_avg as f64 + (req_processing_time - previous_avg as f64) / udp_connections_handled; - - stats_lock.udp_avg_connect_processing_time_ns = new_avg.ceil() as u64; - - drop(stats_lock); - } - - #[allow(clippy::cast_precision_loss)] - #[allow(clippy::cast_possible_truncation)] - #[allow(clippy::cast_sign_loss)] - pub async fn recalculate_udp_avg_announce_processing_time_ns(&self, req_processing_time: Duration) { - let mut stats_lock = self.stats.write().await; - - let req_processing_time = req_processing_time.as_nanos() as f64; - - let udp_announces_handled = (stats_lock.udp4_announces_handled + stats_lock.udp6_announces_handled) as f64; - - let previous_avg = stats_lock.udp_avg_announce_processing_time_ns; - - // Moving average: https://en.wikipedia.org/wiki/Moving_average - let new_avg = previous_avg as f64 + (req_processing_time - previous_avg as f64) / udp_announces_handled; - - stats_lock.udp_avg_announce_processing_time_ns = new_avg.ceil() as u64; - - drop(stats_lock); - } - - #[allow(clippy::cast_precision_loss)] - #[allow(clippy::cast_possible_truncation)] - #[allow(clippy::cast_sign_loss)] - pub async fn recalculate_udp_avg_scrape_processing_time_ns(&self, req_processing_time: Duration) { - let mut stats_lock = self.stats.write().await; - - let req_processing_time = req_processing_time.as_nanos() as f64; - let udp_scrapes_handled = (stats_lock.udp4_scrapes_handled + stats_lock.udp6_scrapes_handled) as f64; - - let previous_avg = stats_lock.udp_avg_scrape_processing_time_ns; - - // Moving average: https://en.wikipedia.org/wiki/Moving_average - let new_avg = previous_avg as f64 + (req_processing_time - previous_avg as f64) / udp_scrapes_handled; - - stats_lock.udp_avg_scrape_processing_time_ns = new_avg.ceil() as u64; - - drop(stats_lock); - } - - pub async fn increase_udp6_requests(&self) { - let mut stats_lock = self.stats.write().await; - stats_lock.udp6_requests += 1; - drop(stats_lock); - } - pub async fn increase_udp6_connections(&self) { let mut stats_lock = self.stats.write().await; stats_lock.udp6_connections_handled += 1; @@ -158,16 +63,4 @@ impl Repository { stats_lock.udp6_scrapes_handled += 1; drop(stats_lock); } - - pub async fn increase_udp6_responses(&self) { - let mut stats_lock = self.stats.write().await; - stats_lock.udp6_responses += 1; - drop(stats_lock); - } - - pub async fn increase_udp6_errors(&self) { - let mut stats_lock = self.stats.write().await; - stats_lock.udp6_errors_handled += 1; - drop(stats_lock); - } } diff --git a/packages/udp-tracker-core/src/statistics/services.rs b/packages/udp-tracker-core/src/statistics/services.rs index 486aaac06..7ffa127e6 100644 --- a/packages/udp-tracker-core/src/statistics/services.rs +++ b/packages/udp-tracker-core/src/statistics/services.rs @@ -39,10 +39,8 @@ use std::sync::Arc; use bittorrent_tracker_core::torrent::repository::in_memory::InMemoryTorrentRepository; -use tokio::sync::RwLock; use torrust_tracker_primitives::torrent_metrics::TorrentsMetrics; -use crate::services::banning::BanService; use crate::statistics::metrics::Metrics; use crate::statistics::repository::Repository; @@ -63,37 +61,22 @@ pub struct TrackerMetrics { /// It returns all the [`TrackerMetrics`] pub async fn get_metrics( in_memory_torrent_repository: Arc, - ban_service: Arc>, stats_repository: Arc, ) -> TrackerMetrics { let torrents_metrics = in_memory_torrent_repository.get_torrents_metrics(); let stats = stats_repository.get_stats().await; - let udp_banned_ips_total = ban_service.read().await.get_banned_ips_total(); TrackerMetrics { torrents_metrics, protocol_metrics: Metrics { - // UDP - udp_requests_aborted: stats.udp_requests_aborted, - udp_requests_banned: stats.udp_requests_banned, - udp_banned_ips_total: udp_banned_ips_total as u64, - udp_avg_connect_processing_time_ns: stats.udp_avg_connect_processing_time_ns, - udp_avg_announce_processing_time_ns: stats.udp_avg_announce_processing_time_ns, - udp_avg_scrape_processing_time_ns: stats.udp_avg_scrape_processing_time_ns, // UDPv4 - udp4_requests: stats.udp4_requests, udp4_connections_handled: stats.udp4_connections_handled, udp4_announces_handled: stats.udp4_announces_handled, udp4_scrapes_handled: stats.udp4_scrapes_handled, - udp4_responses: stats.udp4_responses, - udp4_errors_handled: stats.udp4_errors_handled, // UDPv6 - udp6_requests: stats.udp6_requests, udp6_connections_handled: stats.udp6_connections_handled, udp6_announces_handled: stats.udp6_announces_handled, udp6_scrapes_handled: stats.udp6_scrapes_handled, - udp6_responses: stats.udp6_responses, - udp6_errors_handled: stats.udp6_errors_handled, }, } } @@ -104,14 +87,12 @@ mod tests { use bittorrent_tracker_core::torrent::repository::in_memory::InMemoryTorrentRepository; use bittorrent_tracker_core::{self}; - use tokio::sync::RwLock; use torrust_tracker_configuration::Configuration; use torrust_tracker_primitives::torrent_metrics::TorrentsMetrics; use torrust_tracker_test_helpers::configuration; - use crate::services::banning::BanService; + use crate::statistics; use crate::statistics::services::{get_metrics, TrackerMetrics}; - use crate::{statistics, MAX_CONNECTION_ID_ERRORS_PER_IP}; pub fn tracker_configuration() -> Configuration { configuration::ephemeral() @@ -122,17 +103,12 @@ mod tests { let config = tracker_configuration(); let in_memory_torrent_repository = Arc::new(InMemoryTorrentRepository::default()); - let ban_service = Arc::new(RwLock::new(BanService::new(MAX_CONNECTION_ID_ERRORS_PER_IP))); - let (_udp_stats_event_sender, udp_stats_repository) = statistics::setup::factory(config.core.tracker_usage_statistics); - let udp_stats_repository = Arc::new(udp_stats_repository); + let (_udp_core_stats_event_sender, udp_core_stats_repository) = + crate::statistics::setup::factory(config.core.tracker_usage_statistics); + let udp_core_stats_repository = Arc::new(udp_core_stats_repository); - let tracker_metrics = get_metrics( - in_memory_torrent_repository.clone(), - ban_service.clone(), - udp_stats_repository.clone(), - ) - .await; + let tracker_metrics = get_metrics(in_memory_torrent_repository.clone(), udp_core_stats_repository.clone()).await; assert_eq!( tracker_metrics, diff --git a/packages/udp-tracker-server/src/container.rs b/packages/udp-tracker-server/src/container.rs new file mode 100644 index 000000000..36ad0e671 --- /dev/null +++ b/packages/udp-tracker-server/src/container.rs @@ -0,0 +1,25 @@ +use std::sync::Arc; + +use torrust_tracker_configuration::Core; + +use crate::statistics; + +pub struct UdpTrackerServerContainer { + pub udp_server_stats_event_sender: Arc>>, + pub udp_server_stats_repository: Arc, +} + +impl UdpTrackerServerContainer { + #[must_use] + pub fn initialize(core_config: &Arc) -> Arc { + let (udp_server_stats_event_sender, udp_server_stats_repository) = + statistics::setup::factory(core_config.tracker_usage_statistics); + let udp_server_stats_event_sender = Arc::new(udp_server_stats_event_sender); + let udp_server_stats_repository = Arc::new(udp_server_stats_repository); + + Arc::new(Self { + udp_server_stats_event_sender: udp_server_stats_event_sender.clone(), + udp_server_stats_repository: udp_server_stats_repository.clone(), + }) + } +} diff --git a/packages/udp-tracker-server/src/environment.rs b/packages/udp-tracker-server/src/environment.rs index 0ab3bdea1..c6ec98290 100644 --- a/packages/udp-tracker-server/src/environment.rs +++ b/packages/udp-tracker-server/src/environment.rs @@ -8,6 +8,7 @@ use torrust_server_lib::registar::Registar; use torrust_tracker_configuration::{logging, Configuration, DEFAULT_TIMEOUT}; use torrust_tracker_primitives::peer; +use crate::container::UdpTrackerServerContainer; use crate::server::spawner::Spawner; use crate::server::states::{Running, Stopped}; use crate::server::Server; @@ -71,6 +72,7 @@ impl Environment { .server .start( self.container.udp_tracker_core_container.clone(), + self.container.udp_tracker_server_container.clone(), self.registar.give_form(), cookie_lifetime, ) @@ -115,6 +117,7 @@ impl Environment { pub struct EnvContainer { pub tracker_core_container: Arc, pub udp_tracker_core_container: Arc, + pub udp_tracker_server_container: Arc, } impl EnvContainer { @@ -129,10 +132,12 @@ impl EnvContainer { let tracker_core_container = Arc::new(TrackerCoreContainer::initialize(&core_config)); let udp_tracker_core_container = UdpTrackerCoreContainer::initialize_from(&tracker_core_container, &udp_tracker_config); + let udp_tracker_server_container = UdpTrackerServerContainer::initialize(&core_config); Self { tracker_core_container, udp_tracker_core_container, + udp_tracker_server_container, } } } diff --git a/packages/udp-tracker-server/src/handlers/announce.rs b/packages/udp-tracker-server/src/handlers/announce.rs index 7e3b8e7dd..97ce6ba4a 100644 --- a/packages/udp-tracker-server/src/handlers/announce.rs +++ b/packages/udp-tracker-server/src/handlers/announce.rs @@ -10,13 +10,15 @@ use aquatic_udp_protocol::{ use bittorrent_primitives::info_hash::InfoHash; use bittorrent_tracker_core::announce_handler::AnnounceHandler; use bittorrent_tracker_core::whitelist; -use bittorrent_udp_tracker_core::{services, statistics}; +use bittorrent_udp_tracker_core::{services, statistics as core_statistics}; use torrust_tracker_configuration::Core; use torrust_tracker_primitives::core::AnnounceData; use tracing::{instrument, Level}; use zerocopy::network_endian::I32; use crate::error::Error; +use crate::statistics as server_statistics; +use crate::statistics::event::UdpResponseKind; /// It handles the `Announce` request. /// @@ -24,14 +26,15 @@ use crate::error::Error; /// /// If a error happens in the `handle_announce` function, it will just return the `ServerError`. #[allow(clippy::too_many_arguments)] -#[instrument(fields(transaction_id, connection_id, info_hash), skip(announce_handler, whitelist_authorization, opt_udp_stats_event_sender), ret(level = Level::TRACE))] +#[instrument(fields(transaction_id, connection_id, info_hash), skip(announce_handler, whitelist_authorization, opt_udp_core_stats_event_sender, opt_udp_server_stats_event_sender), ret(level = Level::TRACE))] pub async fn handle_announce( remote_addr: SocketAddr, request: &AnnounceRequest, core_config: &Arc, announce_handler: &Arc, whitelist_authorization: &Arc, - opt_udp_stats_event_sender: &Arc>>, + opt_udp_core_stats_event_sender: &Arc>>, + opt_udp_server_stats_event_sender: &Arc>>, cookie_valid_range: Range, ) -> Result { tracing::Span::current() @@ -41,12 +44,31 @@ pub async fn handle_announce( tracing::trace!("handle announce"); + if let Some(udp_server_stats_event_sender) = opt_udp_server_stats_event_sender.as_deref() { + match remote_addr.ip() { + IpAddr::V4(_) => { + udp_server_stats_event_sender + .send_event(server_statistics::event::Event::Udp4Request { + kind: UdpResponseKind::Announce, + }) + .await; + } + IpAddr::V6(_) => { + udp_server_stats_event_sender + .send_event(server_statistics::event::Event::Udp6Request { + kind: UdpResponseKind::Announce, + }) + .await; + } + } + } + let announce_data = services::announce::handle_announce( remote_addr, request, announce_handler, whitelist_authorization, - opt_udp_stats_event_sender, + opt_udp_core_stats_event_sender, cookie_valid_range, ) .await @@ -205,7 +227,7 @@ mod tests { use bittorrent_tracker_core::torrent::repository::in_memory::InMemoryTorrentRepository; use bittorrent_tracker_core::whitelist; use bittorrent_udp_tracker_core::connection_cookie::{gen_remote_fingerprint, make}; - use bittorrent_udp_tracker_core::statistics; + use bittorrent_udp_tracker_core::statistics as core_statistics; use mockall::predicate::eq; use torrust_tracker_configuration::Core; @@ -214,12 +236,15 @@ mod tests { use crate::handlers::tests::{ initialize_core_tracker_services_for_default_tracker_configuration, initialize_core_tracker_services_for_public_tracker, sample_cookie_valid_range, sample_ipv4_socket_address, - sample_issue_time, MockUdpStatsEventSender, TorrentPeerBuilder, + sample_issue_time, MockUdpCoreStatsEventSender, MockUdpServerStatsEventSender, TorrentPeerBuilder, }; + use crate::statistics as server_statistics; + use crate::statistics::event::UdpResponseKind; #[tokio::test] async fn an_announced_peer_should_be_added_to_the_tracker() { - let (core_tracker_services, core_udp_tracker_services) = initialize_core_tracker_services_for_public_tracker(); + let (core_tracker_services, core_udp_tracker_services, server_udp_tracker_services) = + initialize_core_tracker_services_for_public_tracker(); let client_ip = Ipv4Addr::new(126, 0, 0, 1); let client_port = 8080; @@ -242,7 +267,8 @@ mod tests { &core_tracker_services.core_config, &core_tracker_services.announce_handler, &core_tracker_services.whitelist_authorization, - &core_udp_tracker_services.udp_stats_event_sender, + &core_udp_tracker_services.udp_core_stats_event_sender, + &server_udp_tracker_services.udp_server_stats_event_sender, sample_cookie_valid_range(), ) .await @@ -263,7 +289,8 @@ mod tests { #[tokio::test] async fn the_announced_peer_should_not_be_included_in_the_response() { - let (core_tracker_services, core_udp_tracker_services) = initialize_core_tracker_services_for_public_tracker(); + let (core_tracker_services, core_udp_tracker_services, server_udp_tracker_services) = + initialize_core_tracker_services_for_public_tracker(); let remote_addr = SocketAddr::new(IpAddr::V4(Ipv4Addr::new(126, 0, 0, 1)), 8080); @@ -277,7 +304,8 @@ mod tests { &core_tracker_services.core_config, &core_tracker_services.announce_handler, &core_tracker_services.whitelist_authorization, - &core_udp_tracker_services.udp_stats_event_sender, + &core_udp_tracker_services.udp_core_stats_event_sender, + &server_udp_tracker_services.udp_server_stats_event_sender, sample_cookie_valid_range(), ) .await @@ -304,7 +332,8 @@ mod tests { // From the BEP 15 (https://www.bittorrent.org/beps/bep_0015.html): // "Do note that most trackers will only honor the IP address field under limited circumstances." - let (core_tracker_services, core_udp_tracker_services) = initialize_core_tracker_services_for_public_tracker(); + let (core_tracker_services, core_udp_tracker_services, server_udp_tracker_services) = + initialize_core_tracker_services_for_public_tracker(); let info_hash = AquaticInfoHash([0u8; 20]); let peer_id = AquaticPeerId([255u8; 20]); @@ -330,7 +359,8 @@ mod tests { &core_tracker_services.core_config, &core_tracker_services.announce_handler, &core_tracker_services.whitelist_authorization, - &core_udp_tracker_services.udp_stats_event_sender, + &core_udp_tracker_services.udp_core_stats_event_sender, + &server_udp_tracker_services.udp_server_stats_event_sender, sample_cookie_valid_range(), ) .await @@ -364,9 +394,12 @@ mod tests { announce_handler: Arc, whitelist_authorization: Arc, ) -> Response { - let (udp_stats_event_sender, _udp_stats_repository) = + let (udp_core_stats_event_sender, _udp_core_stats_repository) = bittorrent_udp_tracker_core::statistics::setup::factory(false); - let udp_stats_event_sender = Arc::new(udp_stats_event_sender); + let udp_core_stats_event_sender = Arc::new(udp_core_stats_event_sender); + + let (udp_server_stats_event_sender, _udp_server_stats_repository) = crate::statistics::setup::factory(false); + let udp_server_stats_event_sender = Arc::new(udp_server_stats_event_sender); let remote_addr = SocketAddr::new(IpAddr::V4(Ipv4Addr::new(126, 0, 0, 1)), 8080); let request = AnnounceRequestBuilder::default() @@ -379,7 +412,8 @@ mod tests { &core_config, &announce_handler, &whitelist_authorization, - &udp_stats_event_sender, + &udp_core_stats_event_sender, + &udp_server_stats_event_sender, sample_cookie_valid_range(), ) .await @@ -388,7 +422,8 @@ mod tests { #[tokio::test] async fn when_the_announce_request_comes_from_a_client_using_ipv4_the_response_should_not_include_peers_using_ipv6() { - let (core_tracker_services, _core_udp_tracker_services) = initialize_core_tracker_services_for_public_tracker(); + let (core_tracker_services, _core_udp_tracker_services, _server_udp_tracker_services) = + initialize_core_tracker_services_for_public_tracker(); add_a_torrent_peer_using_ipv6(&core_tracker_services.in_memory_torrent_repository); @@ -410,16 +445,27 @@ mod tests { #[tokio::test] async fn should_send_the_upd4_announce_event() { - let mut udp_stats_event_sender_mock = MockUdpStatsEventSender::new(); - udp_stats_event_sender_mock + let mut udp_core_stats_event_sender_mock = MockUdpCoreStatsEventSender::new(); + udp_core_stats_event_sender_mock .expect_send_event() - .with(eq(statistics::event::Event::Udp4Announce)) + .with(eq(core_statistics::event::Event::Udp4Announce)) .times(1) .returning(|_| Box::pin(future::ready(Some(Ok(()))))); - let udp_stats_event_sender: Arc>> = - Arc::new(Some(Box::new(udp_stats_event_sender_mock))); + let udp_core_stats_event_sender: Arc>> = + Arc::new(Some(Box::new(udp_core_stats_event_sender_mock))); - let (core_tracker_services, _core_udp_tracker_services) = + let mut udp_server_stats_event_sender_mock = MockUdpServerStatsEventSender::new(); + udp_server_stats_event_sender_mock + .expect_send_event() + .with(eq(server_statistics::event::Event::Udp4Request { + kind: UdpResponseKind::Announce, + })) + .times(1) + .returning(|_| Box::pin(future::ready(Some(Ok(()))))); + let udp_server_stats_event_sender: Arc>> = + Arc::new(Some(Box::new(udp_server_stats_event_sender_mock))); + + let (core_tracker_services, _core_udp_tracker_services, _server_udp_tracker_services) = initialize_core_tracker_services_for_default_tracker_configuration(); handle_announce( @@ -428,7 +474,8 @@ mod tests { &core_tracker_services.core_config, &core_tracker_services.announce_handler, &core_tracker_services.whitelist_authorization, - &udp_stats_event_sender, + &udp_core_stats_event_sender, + &udp_server_stats_event_sender, sample_cookie_valid_range(), ) .await @@ -451,7 +498,7 @@ mod tests { #[tokio::test] async fn the_peer_ip_should_be_changed_to_the_external_ip_in_the_tracker_configuration_if_defined() { - let (core_tracker_services, core_udp_tracker_services) = + let (core_tracker_services, core_udp_tracker_services, server_udp_tracker_services) = initialize_core_tracker_services_for_public_tracker(); let client_ip = Ipv4Addr::new(127, 0, 0, 1); @@ -475,7 +522,8 @@ mod tests { &core_tracker_services.core_config, &core_tracker_services.announce_handler, &core_tracker_services.whitelist_authorization, - &core_udp_tracker_services.udp_stats_event_sender, + &core_udp_tracker_services.udp_core_stats_event_sender, + &server_udp_tracker_services.udp_server_stats_event_sender, sample_cookie_valid_range(), ) .await @@ -512,7 +560,7 @@ mod tests { use bittorrent_tracker_core::torrent::repository::in_memory::InMemoryTorrentRepository; use bittorrent_tracker_core::whitelist; use bittorrent_udp_tracker_core::connection_cookie::{gen_remote_fingerprint, make}; - use bittorrent_udp_tracker_core::statistics; + use bittorrent_udp_tracker_core::statistics as core_statistics; use mockall::predicate::eq; use torrust_tracker_configuration::Core; @@ -521,12 +569,15 @@ mod tests { use crate::handlers::tests::{ initialize_core_tracker_services_for_default_tracker_configuration, initialize_core_tracker_services_for_public_tracker, sample_cookie_valid_range, sample_ipv6_remote_addr, - sample_issue_time, MockUdpStatsEventSender, TorrentPeerBuilder, + sample_issue_time, MockUdpCoreStatsEventSender, MockUdpServerStatsEventSender, TorrentPeerBuilder, }; + use crate::statistics as server_statistics; + use crate::statistics::event::UdpResponseKind; #[tokio::test] async fn an_announced_peer_should_be_added_to_the_tracker() { - let (core_tracker_services, core_udp_tracker_services) = initialize_core_tracker_services_for_public_tracker(); + let (core_tracker_services, core_udp_tracker_services, server_udp_tracker_services) = + initialize_core_tracker_services_for_public_tracker(); let client_ip_v4 = Ipv4Addr::new(126, 0, 0, 1); let client_ip_v6 = client_ip_v4.to_ipv6_compatible(); @@ -550,7 +601,8 @@ mod tests { &core_tracker_services.core_config, &core_tracker_services.announce_handler, &core_tracker_services.whitelist_authorization, - &core_udp_tracker_services.udp_stats_event_sender, + &core_udp_tracker_services.udp_core_stats_event_sender, + &server_udp_tracker_services.udp_server_stats_event_sender, sample_cookie_valid_range(), ) .await @@ -571,7 +623,8 @@ mod tests { #[tokio::test] async fn the_announced_peer_should_not_be_included_in_the_response() { - let (core_tracker_services, core_udp_tracker_services) = initialize_core_tracker_services_for_public_tracker(); + let (core_tracker_services, core_udp_tracker_services, server_udp_tracker_services) = + initialize_core_tracker_services_for_public_tracker(); let client_ip_v4 = Ipv4Addr::new(126, 0, 0, 1); let client_ip_v6 = client_ip_v4.to_ipv6_compatible(); @@ -588,7 +641,8 @@ mod tests { &core_tracker_services.core_config, &core_tracker_services.announce_handler, &core_tracker_services.whitelist_authorization, - &core_udp_tracker_services.udp_stats_event_sender, + &core_udp_tracker_services.udp_core_stats_event_sender, + &server_udp_tracker_services.udp_server_stats_event_sender, sample_cookie_valid_range(), ) .await @@ -615,7 +669,8 @@ mod tests { // From the BEP 15 (https://www.bittorrent.org/beps/bep_0015.html): // "Do note that most trackers will only honor the IP address field under limited circumstances." - let (core_tracker_services, core_udp_tracker_services) = initialize_core_tracker_services_for_public_tracker(); + let (core_tracker_services, core_udp_tracker_services, server_udp_tracker_service) = + initialize_core_tracker_services_for_public_tracker(); let info_hash = AquaticInfoHash([0u8; 20]); let peer_id = AquaticPeerId([255u8; 20]); @@ -641,7 +696,8 @@ mod tests { &core_tracker_services.core_config, &core_tracker_services.announce_handler, &core_tracker_services.whitelist_authorization, - &core_udp_tracker_services.udp_stats_event_sender, + &core_udp_tracker_services.udp_core_stats_event_sender, + &server_udp_tracker_service.udp_server_stats_event_sender, sample_cookie_valid_range(), ) .await @@ -675,9 +731,12 @@ mod tests { announce_handler: Arc, whitelist_authorization: Arc, ) -> Response { - let (udp_stats_event_sender, _udp_stats_repository) = + let (udp_core_stats_event_sender, _udp_core_stats_repository) = bittorrent_udp_tracker_core::statistics::setup::factory(false); - let udp_stats_event_sender = Arc::new(udp_stats_event_sender); + let udp_core_stats_event_sender = Arc::new(udp_core_stats_event_sender); + + let (udp_server_stats_event_sender, _udp_server_stats_repository) = crate::statistics::setup::factory(false); + let udp_server_stats_event_sender = Arc::new(udp_server_stats_event_sender); let client_ip_v4 = Ipv4Addr::new(126, 0, 0, 1); let client_ip_v6 = client_ip_v4.to_ipv6_compatible(); @@ -693,7 +752,8 @@ mod tests { &core_config, &announce_handler, &whitelist_authorization, - &udp_stats_event_sender, + &udp_core_stats_event_sender, + &udp_server_stats_event_sender, sample_cookie_valid_range(), ) .await @@ -702,7 +762,8 @@ mod tests { #[tokio::test] async fn when_the_announce_request_comes_from_a_client_using_ipv6_the_response_should_not_include_peers_using_ipv4() { - let (core_tracker_services, _core_udp_tracker_services) = initialize_core_tracker_services_for_public_tracker(); + let (core_tracker_services, _core_udp_tracker_services, _server_udp_tracker_services) = + initialize_core_tracker_services_for_public_tracker(); add_a_torrent_peer_using_ipv4(&core_tracker_services.in_memory_torrent_repository); @@ -724,16 +785,27 @@ mod tests { #[tokio::test] async fn should_send_the_upd6_announce_event() { - let mut udp_stats_event_sender_mock = MockUdpStatsEventSender::new(); - udp_stats_event_sender_mock + let mut udp_core_stats_event_sender_mock = MockUdpCoreStatsEventSender::new(); + udp_core_stats_event_sender_mock + .expect_send_event() + .with(eq(core_statistics::event::Event::Udp6Announce)) + .times(1) + .returning(|_| Box::pin(future::ready(Some(Ok(()))))); + let udp_core_stats_event_sender: Arc>> = + Arc::new(Some(Box::new(udp_core_stats_event_sender_mock))); + + let mut udp_server_stats_event_sender_mock = MockUdpServerStatsEventSender::new(); + udp_server_stats_event_sender_mock .expect_send_event() - .with(eq(statistics::event::Event::Udp6Announce)) + .with(eq(server_statistics::event::Event::Udp6Request { + kind: UdpResponseKind::Announce, + })) .times(1) .returning(|_| Box::pin(future::ready(Some(Ok(()))))); - let udp_stats_event_sender: Arc>> = - Arc::new(Some(Box::new(udp_stats_event_sender_mock))); + let udp_server_stats_event_sender: Arc>> = + Arc::new(Some(Box::new(udp_server_stats_event_sender_mock))); - let (core_tracker_services, _core_udp_tracker_services) = + let (core_tracker_services, _core_udp_tracker_services, _server_udp_tracker_services) = initialize_core_tracker_services_for_default_tracker_configuration(); let remote_addr = sample_ipv6_remote_addr(); @@ -748,7 +820,8 @@ mod tests { &core_tracker_services.core_config, &core_tracker_services.announce_handler, &core_tracker_services.whitelist_authorization, - &udp_stats_event_sender, + &udp_core_stats_event_sender, + &udp_server_stats_event_sender, sample_cookie_valid_range(), ) .await @@ -768,14 +841,17 @@ mod tests { use bittorrent_tracker_core::whitelist::authorization::WhitelistAuthorization; use bittorrent_tracker_core::whitelist::repository::in_memory::InMemoryWhitelist; use bittorrent_udp_tracker_core::connection_cookie::{gen_remote_fingerprint, make}; - use bittorrent_udp_tracker_core::{self, statistics}; + use bittorrent_udp_tracker_core::{self, statistics as core_statistics}; use mockall::predicate::eq; use crate::handlers::announce::tests::announce_request::AnnounceRequestBuilder; use crate::handlers::handle_announce; use crate::handlers::tests::{ - sample_cookie_valid_range, sample_issue_time, MockUdpStatsEventSender, TrackerConfigurationBuilder, + sample_cookie_valid_range, sample_issue_time, MockUdpCoreStatsEventSender, MockUdpServerStatsEventSender, + TrackerConfigurationBuilder, }; + use crate::statistics as server_statistics; + use crate::statistics::event::UdpResponseKind; #[tokio::test] async fn the_peer_ip_should_be_changed_to_the_external_ip_in_the_tracker_configuration() { @@ -788,14 +864,25 @@ mod tests { let in_memory_torrent_repository = Arc::new(InMemoryTorrentRepository::default()); let db_torrent_repository = Arc::new(DatabasePersistentTorrentRepository::new(&database)); - let mut udp_stats_event_sender_mock = MockUdpStatsEventSender::new(); - udp_stats_event_sender_mock + let mut udp_core_stats_event_sender_mock = MockUdpCoreStatsEventSender::new(); + udp_core_stats_event_sender_mock + .expect_send_event() + .with(eq(core_statistics::event::Event::Udp6Announce)) + .times(1) + .returning(|_| Box::pin(future::ready(Some(Ok(()))))); + let udp_core_stats_event_sender: Arc>> = + Arc::new(Some(Box::new(udp_core_stats_event_sender_mock))); + + let mut udp_server_stats_event_sender_mock = MockUdpServerStatsEventSender::new(); + udp_server_stats_event_sender_mock .expect_send_event() - .with(eq(statistics::event::Event::Udp6Announce)) + .with(eq(server_statistics::event::Event::Udp6Request { + kind: UdpResponseKind::Announce, + })) .times(1) .returning(|_| Box::pin(future::ready(Some(Ok(()))))); - let udp_stats_event_sender: Arc>> = - Arc::new(Some(Box::new(udp_stats_event_sender_mock))); + let udp_server_stats_event_sender: Arc>> = + Arc::new(Some(Box::new(udp_server_stats_event_sender_mock))); let announce_handler = Arc::new(AnnounceHandler::new( &config.core, @@ -832,7 +919,8 @@ mod tests { &core_config, &announce_handler, &whitelist_authorization, - &udp_stats_event_sender, + &udp_core_stats_event_sender, + &udp_server_stats_event_sender, sample_cookie_valid_range(), ) .await diff --git a/packages/udp-tracker-server/src/handlers/connect.rs b/packages/udp-tracker-server/src/handlers/connect.rs index d1c3a05d8..be6dc45d4 100644 --- a/packages/udp-tracker-server/src/handlers/connect.rs +++ b/packages/udp-tracker-server/src/handlers/connect.rs @@ -1,23 +1,46 @@ //! UDP tracker connect handler. -use std::net::SocketAddr; +use std::net::{IpAddr, SocketAddr}; use std::sync::Arc; use aquatic_udp_protocol::{ConnectRequest, ConnectResponse, ConnectionId, Response}; -use bittorrent_udp_tracker_core::{services, statistics}; +use bittorrent_udp_tracker_core::{services, statistics as core_statistics}; use tracing::{instrument, Level}; +use crate::statistics as server_statistics; +use crate::statistics::event::UdpResponseKind; + /// It handles the `Connect` request. -#[instrument(fields(transaction_id), skip(opt_udp_stats_event_sender), ret(level = Level::TRACE))] +#[instrument(fields(transaction_id), skip(opt_udp_core_stats_event_sender, opt_udp_server_stats_event_sender), ret(level = Level::TRACE))] pub async fn handle_connect( remote_addr: SocketAddr, request: &ConnectRequest, - opt_udp_stats_event_sender: &Arc>>, + opt_udp_core_stats_event_sender: &Arc>>, + opt_udp_server_stats_event_sender: &Arc>>, cookie_issue_time: f64, ) -> Response { tracing::Span::current().record("transaction_id", request.transaction_id.0.to_string()); tracing::trace!("handle connect"); - let connection_id = services::connect::handle_connect(remote_addr, opt_udp_stats_event_sender, cookie_issue_time).await; + if let Some(udp_server_stats_event_sender) = opt_udp_server_stats_event_sender.as_deref() { + match remote_addr.ip() { + IpAddr::V4(_) => { + udp_server_stats_event_sender + .send_event(server_statistics::event::Event::Udp4Request { + kind: UdpResponseKind::Connect, + }) + .await; + } + IpAddr::V6(_) => { + udp_server_stats_event_sender + .send_event(server_statistics::event::Event::Udp6Request { + kind: UdpResponseKind::Connect, + }) + .await; + } + } + } + + let connection_id = services::connect::handle_connect(remote_addr, opt_udp_core_stats_event_sender, cookie_issue_time).await; build_response(*request, connection_id) } @@ -41,14 +64,16 @@ mod tests { use aquatic_udp_protocol::{ConnectRequest, ConnectResponse, Response, TransactionId}; use bittorrent_udp_tracker_core::connection_cookie::make; - use bittorrent_udp_tracker_core::statistics; + use bittorrent_udp_tracker_core::statistics as core_statistics; use mockall::predicate::eq; use crate::handlers::handle_connect; use crate::handlers::tests::{ sample_ipv4_remote_addr, sample_ipv4_remote_addr_fingerprint, sample_ipv4_socket_address, sample_ipv6_remote_addr, - sample_ipv6_remote_addr_fingerprint, sample_issue_time, MockUdpStatsEventSender, + sample_ipv6_remote_addr_fingerprint, sample_issue_time, MockUdpCoreStatsEventSender, MockUdpServerStatsEventSender, }; + use crate::statistics as server_statistics; + use crate::statistics::event::UdpResponseKind; fn sample_connect_request() -> ConnectRequest { ConnectRequest { @@ -58,8 +83,12 @@ mod tests { #[tokio::test] async fn a_connect_response_should_contain_the_same_transaction_id_as_the_connect_request() { - let (udp_stats_event_sender, _udp_stats_repository) = bittorrent_udp_tracker_core::statistics::setup::factory(false); - let udp_stats_event_sender = Arc::new(udp_stats_event_sender); + let (udp_core_stats_event_sender, _udp_core_stats_repository) = + bittorrent_udp_tracker_core::statistics::setup::factory(false); + let udp_core_stats_event_sender = Arc::new(udp_core_stats_event_sender); + + let (udp_server_stats_event_sender, _udp_server_stats_repository) = crate::statistics::setup::factory(false); + let udp_server_stats_event_sender = Arc::new(udp_server_stats_event_sender); let request = ConnectRequest { transaction_id: TransactionId(0i32.into()), @@ -68,7 +97,8 @@ mod tests { let response = handle_connect( sample_ipv4_remote_addr(), &request, - &udp_stats_event_sender, + &udp_core_stats_event_sender, + &udp_server_stats_event_sender, sample_issue_time(), ) .await; @@ -84,8 +114,12 @@ mod tests { #[tokio::test] async fn a_connect_response_should_contain_a_new_connection_id() { - let (udp_stats_event_sender, _udp_stats_repository) = bittorrent_udp_tracker_core::statistics::setup::factory(false); - let udp_stats_event_sender = Arc::new(udp_stats_event_sender); + let (udp_core_stats_event_sender, _udp_core_stats_repository) = + bittorrent_udp_tracker_core::statistics::setup::factory(false); + let udp_core_stats_event_sender = Arc::new(udp_core_stats_event_sender); + + let (udp_server_stats_event_sender, _udp_server_stats_repository) = crate::statistics::setup::factory(false); + let udp_server_stats_event_sender = Arc::new(udp_server_stats_event_sender); let request = ConnectRequest { transaction_id: TransactionId(0i32.into()), @@ -94,7 +128,8 @@ mod tests { let response = handle_connect( sample_ipv4_remote_addr(), &request, - &udp_stats_event_sender, + &udp_core_stats_event_sender, + &udp_server_stats_event_sender, sample_issue_time(), ) .await; @@ -110,8 +145,12 @@ mod tests { #[tokio::test] async fn a_connect_response_should_contain_a_new_connection_id_ipv6() { - let (udp_stats_event_sender, _udp_stats_repository) = bittorrent_udp_tracker_core::statistics::setup::factory(false); - let udp_stats_event_sender = Arc::new(udp_stats_event_sender); + let (udp_core_stats_event_sender, _udp_core_stats_repository) = + bittorrent_udp_tracker_core::statistics::setup::factory(false); + let udp_core_stats_event_sender = Arc::new(udp_core_stats_event_sender); + + let (udp_server_stats_event_sender, _udp_server_stats_repository) = crate::statistics::setup::factory(false); + let udp_server_stats_event_sender = Arc::new(udp_server_stats_event_sender); let request = ConnectRequest { transaction_id: TransactionId(0i32.into()), @@ -120,7 +159,8 @@ mod tests { let response = handle_connect( sample_ipv6_remote_addr(), &request, - &udp_stats_event_sender, + &udp_core_stats_event_sender, + &udp_server_stats_event_sender, sample_issue_time(), ) .await; @@ -136,21 +176,33 @@ mod tests { #[tokio::test] async fn it_should_send_the_upd4_connect_event_when_a_client_tries_to_connect_using_a_ip4_socket_address() { - let mut udp_stats_event_sender_mock = MockUdpStatsEventSender::new(); - udp_stats_event_sender_mock + let mut udp_core_stats_event_sender_mock = MockUdpCoreStatsEventSender::new(); + udp_core_stats_event_sender_mock .expect_send_event() - .with(eq(statistics::event::Event::Udp4Connect)) + .with(eq(core_statistics::event::Event::Udp4Connect)) .times(1) .returning(|_| Box::pin(future::ready(Some(Ok(()))))); - let udp_stats_event_sender: Arc>> = - Arc::new(Some(Box::new(udp_stats_event_sender_mock))); + let udp_core_stats_event_sender: Arc>> = + Arc::new(Some(Box::new(udp_core_stats_event_sender_mock))); + + let mut udp_server_stats_event_sender_mock = MockUdpServerStatsEventSender::new(); + udp_server_stats_event_sender_mock + .expect_send_event() + .with(eq(server_statistics::event::Event::Udp4Request { + kind: UdpResponseKind::Connect, + })) + .times(1) + .returning(|_| Box::pin(future::ready(Some(Ok(()))))); + let udp_server_stats_event_sender: Arc>> = + Arc::new(Some(Box::new(udp_server_stats_event_sender_mock))); let client_socket_address = sample_ipv4_socket_address(); handle_connect( client_socket_address, &sample_connect_request(), - &udp_stats_event_sender, + &udp_core_stats_event_sender, + &udp_server_stats_event_sender, sample_issue_time(), ) .await; @@ -158,19 +210,31 @@ mod tests { #[tokio::test] async fn it_should_send_the_upd6_connect_event_when_a_client_tries_to_connect_using_a_ip6_socket_address() { - let mut udp_stats_event_sender_mock = MockUdpStatsEventSender::new(); - udp_stats_event_sender_mock + let mut udp_core_stats_event_sender_mock = MockUdpCoreStatsEventSender::new(); + udp_core_stats_event_sender_mock + .expect_send_event() + .with(eq(core_statistics::event::Event::Udp6Connect)) + .times(1) + .returning(|_| Box::pin(future::ready(Some(Ok(()))))); + let udp_core_stats_event_sender: Arc>> = + Arc::new(Some(Box::new(udp_core_stats_event_sender_mock))); + + let mut udp_server_stats_event_sender_mock = MockUdpServerStatsEventSender::new(); + udp_server_stats_event_sender_mock .expect_send_event() - .with(eq(statistics::event::Event::Udp6Connect)) + .with(eq(server_statistics::event::Event::Udp6Request { + kind: UdpResponseKind::Connect, + })) .times(1) .returning(|_| Box::pin(future::ready(Some(Ok(()))))); - let udp_stats_event_sender: Arc>> = - Arc::new(Some(Box::new(udp_stats_event_sender_mock))); + let udp_server_stats_event_sender: Arc>> = + Arc::new(Some(Box::new(udp_server_stats_event_sender_mock))); handle_connect( sample_ipv6_remote_addr(), &sample_connect_request(), - &udp_stats_event_sender, + &udp_core_stats_event_sender, + &udp_server_stats_event_sender, sample_issue_time(), ) .await; diff --git a/packages/udp-tracker-server/src/handlers/error.rs b/packages/udp-tracker-server/src/handlers/error.rs index 4f2457126..e4bd382da 100644 --- a/packages/udp-tracker-server/src/handlers/error.rs +++ b/packages/udp-tracker-server/src/handlers/error.rs @@ -5,20 +5,21 @@ use std::sync::Arc; use aquatic_udp_protocol::{ErrorResponse, RequestParseError, Response, TransactionId}; use bittorrent_udp_tracker_core::connection_cookie::{check, gen_remote_fingerprint}; -use bittorrent_udp_tracker_core::{self, statistics, UDP_TRACKER_LOG_TARGET}; +use bittorrent_udp_tracker_core::{self, UDP_TRACKER_LOG_TARGET}; use tracing::{instrument, Level}; use uuid::Uuid; use zerocopy::network_endian::I32; use crate::error::Error; +use crate::statistics as server_statistics; #[allow(clippy::too_many_arguments)] -#[instrument(fields(transaction_id), skip(opt_udp_stats_event_sender), ret(level = Level::TRACE))] +#[instrument(fields(transaction_id), skip(opt_udp_server_stats_event_sender), ret(level = Level::TRACE))] pub async fn handle_error( remote_addr: SocketAddr, local_addr: SocketAddr, request_id: Uuid, - opt_udp_stats_event_sender: &Arc>>, + opt_udp_server_stats_event_sender: &Arc>>, cookie_valid_range: Range, e: &Error, transaction_id: Option, @@ -55,13 +56,17 @@ pub async fn handle_error( }; if e.1.is_some() { - if let Some(udp_stats_event_sender) = opt_udp_stats_event_sender.as_deref() { + if let Some(udp_server_stats_event_sender) = opt_udp_server_stats_event_sender.as_deref() { match remote_addr { SocketAddr::V4(_) => { - udp_stats_event_sender.send_event(statistics::event::Event::Udp4Error).await; + udp_server_stats_event_sender + .send_event(server_statistics::event::Event::Udp4Error) + .await; } SocketAddr::V6(_) => { - udp_stats_event_sender.send_event(statistics::event::Event::Udp6Error).await; + udp_server_stats_event_sender + .send_event(server_statistics::event::Event::Udp6Error) + .await; } } } diff --git a/packages/udp-tracker-server/src/handlers/mod.rs b/packages/udp-tracker-server/src/handlers/mod.rs index 5d7fdb3b3..fd0536b8b 100644 --- a/packages/udp-tracker-server/src/handlers/mod.rs +++ b/packages/udp-tracker-server/src/handlers/mod.rs @@ -22,6 +22,7 @@ use tracing::{instrument, Level}; use uuid::Uuid; use super::RawRequest; +use crate::container::UdpTrackerServerContainer; use crate::error::Error; use crate::CurrentClock; @@ -52,10 +53,11 @@ impl CookieTimeValues { /// - Delegating the request to the correct handler depending on the request type. /// /// It will return an `Error` response if the request is invalid. -#[instrument(fields(request_id), skip(udp_request, udp_tracker_container, cookie_time_values), ret(level = Level::TRACE))] +#[instrument(fields(request_id), skip(udp_request, udp_tracker_core_container, udp_tracker_server_container, cookie_time_values), ret(level = Level::TRACE))] pub(crate) async fn handle_packet( udp_request: RawRequest, - udp_tracker_container: Arc, + udp_tracker_core_container: Arc, + udp_tracker_server_container: Arc, local_addr: SocketAddr, cookie_time_values: CookieTimeValues, ) -> Response { @@ -71,7 +73,8 @@ pub(crate) async fn handle_packet( Ok(request) => match handle_request( request, udp_request.from, - udp_tracker_container.clone(), + udp_tracker_core_container.clone(), + udp_tracker_server_container.clone(), cookie_time_values.clone(), ) .await @@ -83,7 +86,7 @@ pub(crate) async fn handle_packet( } = error { // code-review: should we include `RequestParseError` and `BadRequest`? - let mut ban_service = udp_tracker_container.ban_service.write().await; + let mut ban_service = udp_tracker_core_container.ban_service.write().await; ban_service.increase_counter(&udp_request.from.ip()); } @@ -91,7 +94,7 @@ pub(crate) async fn handle_packet( udp_request.from, local_addr, request_id, - &udp_tracker_container.udp_stats_event_sender, + &udp_tracker_server_container.udp_server_stats_event_sender, cookie_time_values.valid_range.clone(), &error, Some(transaction_id), @@ -104,7 +107,7 @@ pub(crate) async fn handle_packet( udp_request.from, local_addr, request_id, - &udp_tracker_container.udp_stats_event_sender, + &udp_tracker_server_container.udp_server_stats_event_sender, cookie_time_values.valid_range.clone(), &e, None, @@ -124,11 +127,18 @@ pub(crate) async fn handle_packet( /// # Errors /// /// If a error happens in the `handle_request` function, it will just return the `ServerError`. -#[instrument(skip(request, remote_addr, udp_tracker_container, cookie_time_values))] +#[instrument(skip( + request, + remote_addr, + udp_tracker_core_container, + udp_tracker_server_container, + cookie_time_values +))] pub async fn handle_request( request: Request, remote_addr: SocketAddr, - udp_tracker_container: Arc, + udp_tracker_core_container: Arc, + udp_tracker_server_container: Arc, cookie_time_values: CookieTimeValues, ) -> Result { tracing::trace!("handle request"); @@ -137,7 +147,8 @@ pub async fn handle_request( Request::Connect(connect_request) => Ok(handle_connect( remote_addr, &connect_request, - &udp_tracker_container.udp_stats_event_sender, + &udp_tracker_core_container.udp_core_stats_event_sender, + &udp_tracker_server_container.udp_server_stats_event_sender, cookie_time_values.issue_time, ) .await), @@ -145,10 +156,11 @@ pub async fn handle_request( handle_announce( remote_addr, &announce_request, - &udp_tracker_container.core_config, - &udp_tracker_container.announce_handler, - &udp_tracker_container.whitelist_authorization, - &udp_tracker_container.udp_stats_event_sender, + &udp_tracker_core_container.core_config, + &udp_tracker_core_container.announce_handler, + &udp_tracker_core_container.whitelist_authorization, + &udp_tracker_core_container.udp_core_stats_event_sender, + &udp_tracker_server_container.udp_server_stats_event_sender, cookie_time_values.valid_range, ) .await @@ -157,8 +169,9 @@ pub async fn handle_request( handle_scrape( remote_addr, &scrape_request, - &udp_tracker_container.scrape_handler, - &udp_tracker_container.udp_stats_event_sender, + &udp_tracker_core_container.scrape_handler, + &udp_tracker_core_container.udp_core_stats_event_sender, + &udp_tracker_server_container.udp_server_stats_event_sender, cookie_time_values.valid_range, ) .await @@ -183,7 +196,7 @@ pub(crate) mod tests { use bittorrent_tracker_core::whitelist::authorization::WhitelistAuthorization; use bittorrent_tracker_core::whitelist::repository::in_memory::InMemoryWhitelist; use bittorrent_udp_tracker_core::connection_cookie::gen_remote_fingerprint; - use bittorrent_udp_tracker_core::{self, statistics}; + use bittorrent_udp_tracker_core::{self, statistics as core_statistics}; use futures::future::BoxFuture; use mockall::mock; use tokio::sync::mpsc::error::SendError; @@ -192,7 +205,7 @@ pub(crate) mod tests { use torrust_tracker_primitives::{peer, DurationSinceUnixEpoch}; use torrust_tracker_test_helpers::configuration; - use crate::CurrentClock; + use crate::{statistics as server_statistics, CurrentClock}; pub(crate) struct CoreTrackerServices { pub core_config: Arc, @@ -204,7 +217,11 @@ pub(crate) mod tests { } pub(crate) struct CoreUdpTrackerServices { - pub udp_stats_event_sender: Arc>>, + pub udp_core_stats_event_sender: Arc>>, + } + + pub(crate) struct ServerUdpTrackerServices { + pub udp_server_stats_event_sender: Arc>>, } fn default_testing_tracker_configuration() -> Configuration { @@ -212,19 +229,23 @@ pub(crate) mod tests { } pub(crate) fn initialize_core_tracker_services_for_default_tracker_configuration( - ) -> (CoreTrackerServices, CoreUdpTrackerServices) { + ) -> (CoreTrackerServices, CoreUdpTrackerServices, ServerUdpTrackerServices) { initialize_core_tracker_services(&default_testing_tracker_configuration()) } - pub(crate) fn initialize_core_tracker_services_for_public_tracker() -> (CoreTrackerServices, CoreUdpTrackerServices) { + pub(crate) fn initialize_core_tracker_services_for_public_tracker( + ) -> (CoreTrackerServices, CoreUdpTrackerServices, ServerUdpTrackerServices) { initialize_core_tracker_services(&configuration::ephemeral_public()) } - pub(crate) fn initialize_core_tracker_services_for_listed_tracker() -> (CoreTrackerServices, CoreUdpTrackerServices) { + pub(crate) fn initialize_core_tracker_services_for_listed_tracker( + ) -> (CoreTrackerServices, CoreUdpTrackerServices, ServerUdpTrackerServices) { initialize_core_tracker_services(&configuration::ephemeral_listed()) } - fn initialize_core_tracker_services(config: &Configuration) -> (CoreTrackerServices, CoreUdpTrackerServices) { + fn initialize_core_tracker_services( + config: &Configuration, + ) -> (CoreTrackerServices, CoreUdpTrackerServices, ServerUdpTrackerServices) { let core_config = Arc::new(config.core.clone()); let database = initialize_database(&config.core); let in_memory_whitelist = Arc::new(InMemoryWhitelist::default()); @@ -239,8 +260,12 @@ pub(crate) mod tests { )); let scrape_handler = Arc::new(ScrapeHandler::new(&whitelist_authorization, &in_memory_torrent_repository)); - let (udp_stats_event_sender, _udp_stats_repository) = bittorrent_udp_tracker_core::statistics::setup::factory(false); - let udp_stats_event_sender = Arc::new(udp_stats_event_sender); + let (udp_core_stats_event_sender, _udp_core_stats_repository) = + bittorrent_udp_tracker_core::statistics::setup::factory(false); + let udp_core_stats_event_sender = Arc::new(udp_core_stats_event_sender); + + let (udp_server_stats_event_sender, _udp_server_stats_repository) = crate::statistics::setup::factory(false); + let udp_server_stats_event_sender = Arc::new(udp_server_stats_event_sender); ( CoreTrackerServices { @@ -251,7 +276,12 @@ pub(crate) mod tests { in_memory_whitelist, whitelist_authorization, }, - CoreUdpTrackerServices { udp_stats_event_sender }, + CoreUdpTrackerServices { + udp_core_stats_event_sender, + }, + ServerUdpTrackerServices { + udp_server_stats_event_sender, + }, ) } @@ -356,9 +386,16 @@ pub(crate) mod tests { } mock! { - pub(crate) UdpStatsEventSender {} - impl statistics::event::sender::Sender for UdpStatsEventSender { - fn send_event(&self, event: statistics::event::Event) -> BoxFuture<'static,Option > > > ; + pub(crate) UdpCoreStatsEventSender {} + impl core_statistics::event::sender::Sender for UdpCoreStatsEventSender { + fn send_event(&self, event: core_statistics::event::Event) -> BoxFuture<'static,Option > > > ; + } + } + + mock! { + pub(crate) UdpServerStatsEventSender {} + impl server_statistics::event::sender::Sender for UdpServerStatsEventSender { + fn send_event(&self, event: server_statistics::event::Event) -> BoxFuture<'static,Option > > > ; } } } diff --git a/packages/udp-tracker-server/src/handlers/scrape.rs b/packages/udp-tracker-server/src/handlers/scrape.rs index de98b5f6d..248f0ca12 100644 --- a/packages/udp-tracker-server/src/handlers/scrape.rs +++ b/packages/udp-tracker-server/src/handlers/scrape.rs @@ -1,5 +1,5 @@ //! UDP tracker scrape handler. -use std::net::SocketAddr; +use std::net::{IpAddr, SocketAddr}; use std::ops::Range; use std::sync::Arc; @@ -7,25 +7,27 @@ use aquatic_udp_protocol::{ NumberOfDownloads, NumberOfPeers, Response, ScrapeRequest, ScrapeResponse, TorrentScrapeStatistics, TransactionId, }; use bittorrent_tracker_core::scrape_handler::ScrapeHandler; -use bittorrent_udp_tracker_core::statistics::{self}; -use bittorrent_udp_tracker_core::{self, services}; +use bittorrent_udp_tracker_core::{self, services, statistics as core_statistics}; use torrust_tracker_primitives::core::ScrapeData; use tracing::{instrument, Level}; use zerocopy::network_endian::I32; use crate::error::Error; +use crate::statistics as server_statistics; +use crate::statistics::event::UdpResponseKind; /// It handles the `Scrape` request. /// /// # Errors /// /// This function does not ever return an error. -#[instrument(fields(transaction_id, connection_id), skip(scrape_handler, opt_udp_stats_event_sender), ret(level = Level::TRACE))] +#[instrument(fields(transaction_id, connection_id), skip(scrape_handler, opt_udp_core_stats_event_sender, opt_udp_server_stats_event_sender), ret(level = Level::TRACE))] pub async fn handle_scrape( remote_addr: SocketAddr, request: &ScrapeRequest, scrape_handler: &Arc, - opt_udp_stats_event_sender: &Arc>>, + opt_udp_core_stats_event_sender: &Arc>>, + opt_udp_server_stats_event_sender: &Arc>>, cookie_valid_range: Range, ) -> Result { tracing::Span::current() @@ -34,11 +36,30 @@ pub async fn handle_scrape( tracing::trace!("handle scrape"); + if let Some(udp_server_stats_event_sender) = opt_udp_server_stats_event_sender.as_deref() { + match remote_addr.ip() { + IpAddr::V4(_) => { + udp_server_stats_event_sender + .send_event(server_statistics::event::Event::Udp4Request { + kind: UdpResponseKind::Scrape, + }) + .await; + } + IpAddr::V6(_) => { + udp_server_stats_event_sender + .send_event(server_statistics::event::Event::Udp6Request { + kind: UdpResponseKind::Scrape, + }) + .await; + } + } + } + let scrape_data = services::scrape::handle_scrape( remote_addr, request, scrape_handler, - opt_udp_stats_event_sender, + opt_udp_core_stats_event_sender, cookie_valid_range, ) .await @@ -104,7 +125,8 @@ mod tests { #[tokio::test] async fn should_return_no_stats_when_the_tracker_does_not_have_any_torrent() { - let (core_tracker_services, core_udp_tracker_services) = initialize_core_tracker_services_for_public_tracker(); + let (core_tracker_services, core_udp_tracker_services, server_udp_tracker_services) = + initialize_core_tracker_services_for_public_tracker(); let remote_addr = sample_ipv4_remote_addr(); @@ -121,7 +143,8 @@ mod tests { remote_addr, &request, &core_tracker_services.scrape_handler, - &core_udp_tracker_services.udp_stats_event_sender, + &core_udp_tracker_services.udp_core_stats_event_sender, + &server_udp_tracker_services.udp_server_stats_event_sender, sample_cookie_valid_range(), ) .await @@ -168,8 +191,12 @@ mod tests { in_memory_torrent_repository: Arc, scrape_handler: Arc, ) -> Response { - let (udp_stats_event_sender, _udp_stats_repository) = bittorrent_udp_tracker_core::statistics::setup::factory(false); - let udp_stats_event_sender = Arc::new(udp_stats_event_sender); + let (udp_core_stats_event_sender, _udp_core_stats_repository) = + bittorrent_udp_tracker_core::statistics::setup::factory(false); + let udp_core_stats_event_sender = Arc::new(udp_core_stats_event_sender); + + let (udp_server_stats_event_sender, _udp_server_stats_repository) = crate::statistics::setup::factory(false); + let udp_server_stats_event_sender = Arc::new(udp_server_stats_event_sender); let remote_addr = sample_ipv4_remote_addr(); let info_hash = InfoHash([0u8; 20]); @@ -182,7 +209,8 @@ mod tests { remote_addr, &request, &scrape_handler, - &udp_stats_event_sender, + &udp_core_stats_event_sender, + &udp_server_stats_event_sender, sample_cookie_valid_range(), ) .await @@ -204,7 +232,8 @@ mod tests { #[tokio::test] async fn should_return_torrent_statistics_when_the_tracker_has_the_requested_torrent() { - let (core_tracker_services, _core_udp_tracker_services) = initialize_core_tracker_services_for_public_tracker(); + let (core_tracker_services, _core_udp_tracker_services, _server_udp_tracker_services) = + initialize_core_tracker_services_for_public_tracker(); let torrent_stats = match_scrape_response( add_a_sample_seeder_and_scrape( @@ -237,7 +266,8 @@ mod tests { #[tokio::test] async fn should_return_the_torrent_statistics_when_the_requested_torrent_is_whitelisted() { - let (core_tracker_services, core_udp_tracker_services) = initialize_core_tracker_services_for_listed_tracker(); + let (core_tracker_services, core_udp_tracker_services, server_udp_tracker_services) = + initialize_core_tracker_services_for_listed_tracker(); let remote_addr = sample_ipv4_remote_addr(); let info_hash = InfoHash([0u8; 20]); @@ -258,7 +288,8 @@ mod tests { remote_addr, &request, &core_tracker_services.scrape_handler, - &core_udp_tracker_services.udp_stats_event_sender, + &core_udp_tracker_services.udp_core_stats_event_sender, + &server_udp_tracker_services.udp_server_stats_event_sender, sample_cookie_valid_range(), ) .await @@ -277,7 +308,8 @@ mod tests { #[tokio::test] async fn should_return_zeroed_statistics_when_the_requested_torrent_is_not_whitelisted() { - let (core_tracker_services, core_udp_tracker_services) = initialize_core_tracker_services_for_listed_tracker(); + let (core_tracker_services, core_udp_tracker_services, server_udp_tracker_services) = + initialize_core_tracker_services_for_listed_tracker(); let remote_addr = sample_ipv4_remote_addr(); let info_hash = InfoHash([0u8; 20]); @@ -296,7 +328,8 @@ mod tests { remote_addr, &request, &core_tracker_services.scrape_handler, - &core_udp_tracker_services.udp_stats_event_sender, + &core_udp_tracker_services.udp_core_stats_event_sender, + &server_udp_tracker_services.udp_server_stats_event_sender, sample_cookie_valid_range(), ) .await @@ -325,37 +358,50 @@ mod tests { use std::future; use std::sync::Arc; - use bittorrent_udp_tracker_core::statistics; + use bittorrent_udp_tracker_core::statistics as core_statistics; use mockall::predicate::eq; use super::sample_scrape_request; use crate::handlers::handle_scrape; use crate::handlers::tests::{ initialize_core_tracker_services_for_default_tracker_configuration, sample_cookie_valid_range, - sample_ipv4_remote_addr, MockUdpStatsEventSender, + sample_ipv4_remote_addr, MockUdpCoreStatsEventSender, MockUdpServerStatsEventSender, }; + use crate::statistics as server_statistics; #[tokio::test] async fn should_send_the_upd4_scrape_event() { - let mut udp_stats_event_sender_mock = MockUdpStatsEventSender::new(); - udp_stats_event_sender_mock + let mut udp_core_stats_event_sender_mock = MockUdpCoreStatsEventSender::new(); + udp_core_stats_event_sender_mock + .expect_send_event() + .with(eq(core_statistics::event::Event::Udp4Scrape)) + .times(1) + .returning(|_| Box::pin(future::ready(Some(Ok(()))))); + let udp_core_stats_event_sender: Arc>> = + Arc::new(Some(Box::new(udp_core_stats_event_sender_mock))); + + let mut udp_server_stats_event_sender_mock = MockUdpServerStatsEventSender::new(); + udp_server_stats_event_sender_mock .expect_send_event() - .with(eq(statistics::event::Event::Udp4Scrape)) + .with(eq(server_statistics::event::Event::Udp4Request { + kind: server_statistics::event::UdpResponseKind::Scrape, + })) .times(1) .returning(|_| Box::pin(future::ready(Some(Ok(()))))); - let udp_stats_event_sender: Arc>> = - Arc::new(Some(Box::new(udp_stats_event_sender_mock))); + let udp_server_stats_event_sender: Arc>> = + Arc::new(Some(Box::new(udp_server_stats_event_sender_mock))); let remote_addr = sample_ipv4_remote_addr(); - let (core_tracker_services, _core_udp_tracker_services) = + let (core_tracker_services, _core_udp_tracker_services, _server_udp_tracker_services) = initialize_core_tracker_services_for_default_tracker_configuration(); handle_scrape( remote_addr, &sample_scrape_request(&remote_addr), &core_tracker_services.scrape_handler, - &udp_stats_event_sender, + &udp_core_stats_event_sender, + &udp_server_stats_event_sender, sample_cookie_valid_range(), ) .await @@ -367,37 +413,50 @@ mod tests { use std::future; use std::sync::Arc; - use bittorrent_udp_tracker_core::statistics; + use bittorrent_udp_tracker_core::statistics as core_statistics; use mockall::predicate::eq; use super::sample_scrape_request; use crate::handlers::handle_scrape; use crate::handlers::tests::{ initialize_core_tracker_services_for_default_tracker_configuration, sample_cookie_valid_range, - sample_ipv6_remote_addr, MockUdpStatsEventSender, + sample_ipv6_remote_addr, MockUdpCoreStatsEventSender, MockUdpServerStatsEventSender, }; + use crate::statistics as server_statistics; #[tokio::test] async fn should_send_the_upd6_scrape_event() { - let mut udp_stats_event_sender_mock = MockUdpStatsEventSender::new(); - udp_stats_event_sender_mock + let mut udp_core_stats_event_sender_mock = MockUdpCoreStatsEventSender::new(); + udp_core_stats_event_sender_mock + .expect_send_event() + .with(eq(core_statistics::event::Event::Udp6Scrape)) + .times(1) + .returning(|_| Box::pin(future::ready(Some(Ok(()))))); + let udp_core_stats_event_sender: Arc>> = + Arc::new(Some(Box::new(udp_core_stats_event_sender_mock))); + + let mut udp_server_stats_event_sender_mock = MockUdpServerStatsEventSender::new(); + udp_server_stats_event_sender_mock .expect_send_event() - .with(eq(statistics::event::Event::Udp6Scrape)) + .with(eq(server_statistics::event::Event::Udp6Request { + kind: server_statistics::event::UdpResponseKind::Scrape, + })) .times(1) .returning(|_| Box::pin(future::ready(Some(Ok(()))))); - let udp_stats_event_sender: Arc>> = - Arc::new(Some(Box::new(udp_stats_event_sender_mock))); + let udp_server_stats_event_sender: Arc>> = + Arc::new(Some(Box::new(udp_server_stats_event_sender_mock))); let remote_addr = sample_ipv6_remote_addr(); - let (core_tracker_services, _core_udp_tracker_services) = + let (core_tracker_services, _core_udp_tracker_services, _server_udp_tracker_services) = initialize_core_tracker_services_for_default_tracker_configuration(); handle_scrape( remote_addr, &sample_scrape_request(&remote_addr), &core_tracker_services.scrape_handler, - &udp_stats_event_sender, + &udp_core_stats_event_sender, + &udp_server_stats_event_sender, sample_cookie_valid_range(), ) .await diff --git a/packages/udp-tracker-server/src/lib.rs b/packages/udp-tracker-server/src/lib.rs index e02011a8b..9e013bf81 100644 --- a/packages/udp-tracker-server/src/lib.rs +++ b/packages/udp-tracker-server/src/lib.rs @@ -634,6 +634,7 @@ //! documentation by [Arvid Norberg](https://github.com/arvidn) was very //! supportive in the development of this documentation. Some descriptions were //! taken from the [libtorrent](https://www.rasterbar.com/products/libtorrent/udp_tracker_protocol.html). +pub mod container; pub mod environment; pub mod error; pub mod handlers; diff --git a/packages/udp-tracker-server/src/server/launcher.rs b/packages/udp-tracker-server/src/server/launcher.rs index 12d9c740c..acd214ab0 100644 --- a/packages/udp-tracker-server/src/server/launcher.rs +++ b/packages/udp-tracker-server/src/server/launcher.rs @@ -4,7 +4,7 @@ use std::time::Duration; use bittorrent_tracker_client::udp::client::check; use bittorrent_udp_tracker_core::container::UdpTrackerCoreContainer; -use bittorrent_udp_tracker_core::{self, statistics, UDP_TRACKER_LOG_TARGET}; +use bittorrent_udp_tracker_core::{self, UDP_TRACKER_LOG_TARGET}; use derive_more::Constructor; use futures_util::StreamExt; use tokio::select; @@ -16,9 +16,11 @@ use torrust_server_lib::signals::{shutdown_signal_with_message, Halted, Started} use tracing::instrument; use super::request_buffer::ActiveRequests; +use crate::container::UdpTrackerServerContainer; use crate::server::bound_socket::BoundSocket; use crate::server::processor::Processor; use crate::server::receiver::Receiver; +use crate::statistics; const IP_BANS_RESET_INTERVAL_IN_SECS: u64 = 3600; @@ -34,9 +36,10 @@ impl Launcher { /// It panics if unable to bind to udp socket, and get the address from the udp socket. /// It panics if unable to send address of socket. /// It panics if the udp server is loaded when the tracker is private. - #[instrument(skip(udp_tracker_container, bind_to, tx_start, rx_halt))] + #[instrument(skip(udp_tracker_core_container, udp_tracker_server_container, bind_to, tx_start, rx_halt))] pub async fn run_with_graceful_shutdown( - udp_tracker_container: Arc, + udp_tracker_core_container: Arc, + udp_tracker_server_container: Arc, bind_to: SocketAddr, cookie_lifetime: Duration, tx_start: oneshot::Sender, @@ -44,7 +47,7 @@ impl Launcher { ) { tracing::info!(target: UDP_TRACKER_LOG_TARGET, "Starting on: {bind_to}"); - if udp_tracker_container.core_config.private { + if udp_tracker_core_container.core_config.private { tracing::error!("udp services cannot be used for private trackers"); panic!("it should not use udp if using authentication"); } @@ -74,7 +77,13 @@ impl Launcher { let local_addr = local_udp_url.clone(); tokio::task::spawn(async move { tracing::debug!(target: UDP_TRACKER_LOG_TARGET, local_addr, "Udp::run_with_graceful_shutdown::task (listening...)"); - let () = Self::run_udp_server_main(receiver, udp_tracker_container, cookie_lifetime).await; + let () = Self::run_udp_server_main( + receiver, + udp_tracker_core_container, + udp_tracker_server_container, + cookie_lifetime, + ) + .await; }) }; @@ -111,10 +120,11 @@ impl Launcher { ServiceHealthCheckJob::new(binding, info, job) } - #[instrument(skip(receiver, udp_tracker_container))] + #[instrument(skip(receiver, udp_tracker_core_container, udp_tracker_server_container))] async fn run_udp_server_main( mut receiver: Receiver, - udp_tracker_container: Arc, + udp_tracker_core_container: Arc, + udp_tracker_server_container: Arc, cookie_lifetime: Duration, ) { let active_requests = &mut ActiveRequests::default(); @@ -125,7 +135,7 @@ impl Launcher { let cookie_lifetime = cookie_lifetime.as_secs_f64(); - let ban_cleaner = udp_tracker_container.ban_service.clone(); + let ban_cleaner = udp_tracker_core_container.ban_service.clone(); tokio::spawn(async move { let mut cleaner_interval = interval(Duration::from_secs(IP_BANS_RESET_INTERVAL_IN_SECS)); @@ -157,22 +167,29 @@ impl Launcher { } }; - if let Some(udp_stats_event_sender) = udp_tracker_container.udp_stats_event_sender.as_deref() { + if let Some(udp_server_stats_event_sender) = udp_tracker_server_container.udp_server_stats_event_sender.as_deref() + { match req.from.ip() { IpAddr::V4(_) => { - udp_stats_event_sender.send_event(statistics::event::Event::Udp4Request).await; + udp_server_stats_event_sender + .send_event(statistics::event::Event::Udp4IncomingRequest) + .await; } IpAddr::V6(_) => { - udp_stats_event_sender.send_event(statistics::event::Event::Udp6Request).await; + udp_server_stats_event_sender + .send_event(statistics::event::Event::Udp6IncomingRequest) + .await; } } } - if udp_tracker_container.ban_service.read().await.is_banned(&req.from.ip()) { + if udp_tracker_core_container.ban_service.read().await.is_banned(&req.from.ip()) { tracing::debug!(target: UDP_TRACKER_LOG_TARGET, local_addr, "Udp::run_udp_server::loop continue: (banned ip)"); - if let Some(udp_stats_event_sender) = udp_tracker_container.udp_stats_event_sender.as_deref() { - udp_stats_event_sender + if let Some(udp_server_stats_event_sender) = + udp_tracker_server_container.udp_server_stats_event_sender.as_deref() + { + udp_server_stats_event_sender .send_event(statistics::event::Event::UdpRequestBanned) .await; } @@ -180,7 +197,12 @@ impl Launcher { continue; } - let processor = Processor::new(receiver.socket.clone(), udp_tracker_container.clone(), cookie_lifetime); + let processor = Processor::new( + receiver.socket.clone(), + udp_tracker_core_container.clone(), + udp_tracker_server_container.clone(), + cookie_lifetime, + ); /* We spawn the new task even if the active requests buffer is full. This could seem counterintuitive because we are accepting @@ -204,8 +226,10 @@ impl Launcher { if old_request_aborted { // Evicted task from active requests buffer was aborted. - if let Some(udp_stats_event_sender) = udp_tracker_container.udp_stats_event_sender.as_deref() { - udp_stats_event_sender + if let Some(udp_server_stats_event_sender) = + udp_tracker_server_container.udp_server_stats_event_sender.as_deref() + { + udp_server_stats_event_sender .send_event(statistics::event::Event::UdpRequestAborted) .await; } diff --git a/packages/udp-tracker-server/src/server/mod.rs b/packages/udp-tracker-server/src/server/mod.rs index 1ab79b6fe..f70e28b27 100644 --- a/packages/udp-tracker-server/src/server/mod.rs +++ b/packages/udp-tracker-server/src/server/mod.rs @@ -64,6 +64,7 @@ mod tests { use super::spawner::Spawner; use super::Server; + use crate::container::UdpTrackerServerContainer; fn initialize_global_services(configuration: &Configuration) { initialize_static(); @@ -97,10 +98,16 @@ mod tests { let stopped = Server::new(Spawner::new(bind_to)); - let udp_tracker_container = UdpTrackerCoreContainer::initialize(&core_config, &udp_tracker_config); + let udp_tracker_core_container = UdpTrackerCoreContainer::initialize(&core_config, &udp_tracker_config); + let udp_tracker_server_container = UdpTrackerServerContainer::initialize(&core_config); let started = stopped - .start(udp_tracker_container, register.give_form(), config.cookie_lifetime) + .start( + udp_tracker_core_container, + udp_tracker_server_container, + register.give_form(), + config.cookie_lifetime, + ) .await .expect("it should start the server"); @@ -131,11 +138,13 @@ mod tests { let stopped = Server::new(Spawner::new(bind_to)); - let udp_tracker_container = UdpTrackerCoreContainer::initialize(&core_config, &udp_tracker_config); + let udp_tracker_core_container = UdpTrackerCoreContainer::initialize(&core_config, &udp_tracker_config); + let udp_tracker_server_container = UdpTrackerServerContainer::initialize(&core_config); let started = stopped .start( - udp_tracker_container, + udp_tracker_core_container, + udp_tracker_server_container, register.give_form(), udp_tracker_config.cookie_lifetime, ) diff --git a/packages/udp-tracker-server/src/server/processor.rs b/packages/udp-tracker-server/src/server/processor.rs index a933fdd17..44b543571 100644 --- a/packages/udp-tracker-server/src/server/processor.rs +++ b/packages/udp-tracker-server/src/server/processor.rs @@ -5,25 +5,33 @@ use std::time::Duration; use aquatic_udp_protocol::Response; use bittorrent_udp_tracker_core::container::UdpTrackerCoreContainer; -use bittorrent_udp_tracker_core::{self, statistics}; +use bittorrent_udp_tracker_core::{self}; use tokio::time::Instant; use tracing::{instrument, Level}; use super::bound_socket::BoundSocket; +use crate::container::UdpTrackerServerContainer; use crate::handlers::CookieTimeValues; -use crate::{handlers, RawRequest}; +use crate::{handlers, statistics, RawRequest}; pub struct Processor { socket: Arc, - udp_tracker_container: Arc, + udp_tracker_core_container: Arc, + udp_tracker_server_container: Arc, cookie_lifetime: f64, } impl Processor { - pub fn new(socket: Arc, udp_tracker_container: Arc, cookie_lifetime: f64) -> Self { + pub fn new( + socket: Arc, + udp_tracker_core_container: Arc, + udp_tracker_server_container: Arc, + cookie_lifetime: f64, + ) -> Self { Self { socket, - udp_tracker_container, + udp_tracker_core_container, + udp_tracker_server_container, cookie_lifetime, } } @@ -36,7 +44,8 @@ impl Processor { let response = handlers::handle_packet( request, - self.udp_tracker_container.clone(), + self.udp_tracker_core_container.clone(), + self.udp_tracker_server_container.clone(), self.socket.address(), CookieTimeValues::new(self.cookie_lifetime), ) @@ -81,10 +90,12 @@ impl Processor { tracing::debug!(%bytes_count, %sent_bytes, "sent {response_type}"); } - if let Some(udp_stats_event_sender) = self.udp_tracker_container.udp_stats_event_sender.as_deref() { + if let Some(udp_server_stats_event_sender) = + self.udp_tracker_server_container.udp_server_stats_event_sender.as_deref() + { match target.ip() { IpAddr::V4(_) => { - udp_stats_event_sender + udp_server_stats_event_sender .send_event(statistics::event::Event::Udp4Response { kind: udp_response_kind, req_processing_time, @@ -92,7 +103,7 @@ impl Processor { .await; } IpAddr::V6(_) => { - udp_stats_event_sender + udp_server_stats_event_sender .send_event(statistics::event::Event::Udp6Response { kind: udp_response_kind, req_processing_time, diff --git a/packages/udp-tracker-server/src/server/spawner.rs b/packages/udp-tracker-server/src/server/spawner.rs index 6c1f9a48e..46916f6ae 100644 --- a/packages/udp-tracker-server/src/server/spawner.rs +++ b/packages/udp-tracker-server/src/server/spawner.rs @@ -11,6 +11,7 @@ use tokio::task::JoinHandle; use torrust_server_lib::signals::{Halted, Started}; use super::launcher::Launcher; +use crate::container::UdpTrackerServerContainer; #[derive(Constructor, Copy, Clone, Debug, Display)] #[display("(with socket): {bind_to}")] @@ -27,7 +28,8 @@ impl Spawner { #[must_use] pub fn spawn_launcher( &self, - udp_tracker_container: Arc, + udp_tracker_core_container: Arc, + udp_tracker_server_container: Arc, cookie_lifetime: Duration, tx_start: oneshot::Sender, rx_halt: oneshot::Receiver, @@ -35,8 +37,15 @@ impl Spawner { let spawner = Self::new(self.bind_to); tokio::spawn(async move { - Launcher::run_with_graceful_shutdown(udp_tracker_container, spawner.bind_to, cookie_lifetime, tx_start, rx_halt) - .await; + Launcher::run_with_graceful_shutdown( + udp_tracker_core_container, + udp_tracker_server_container, + spawner.bind_to, + cookie_lifetime, + tx_start, + rx_halt, + ) + .await; spawner }) } diff --git a/packages/udp-tracker-server/src/server/states.rs b/packages/udp-tracker-server/src/server/states.rs index fc700ea40..4d1c97167 100644 --- a/packages/udp-tracker-server/src/server/states.rs +++ b/packages/udp-tracker-server/src/server/states.rs @@ -14,6 +14,7 @@ use tracing::{instrument, Level}; use super::spawner::Spawner; use super::{Server, UdpError}; +use crate::container::UdpTrackerServerContainer; use crate::server::launcher::Launcher; /// A UDP server instance controller with no UDP instance running. @@ -60,10 +61,11 @@ impl Server { /// # Panics /// /// It panics if unable to receive the bound socket address from service. - #[instrument(skip(self, udp_tracker_container, form), err, ret(Display, level = Level::INFO))] + #[instrument(skip(self, udp_tracker_core_container, udp_tracker_server_container, form), err, ret(Display, level = Level::INFO))] pub async fn start( self, - udp_tracker_container: Arc, + udp_tracker_core_container: Arc, + udp_tracker_server_container: Arc, form: ServiceRegistrationForm, cookie_lifetime: Duration, ) -> Result, std::io::Error> { @@ -73,10 +75,13 @@ impl Server { assert!(!tx_halt.is_closed(), "Halt channel for UDP tracker should be open"); // May need to wrap in a task to about a tokio bug. - let task = self - .state - .spawner - .spawn_launcher(udp_tracker_container, cookie_lifetime, tx_start, rx_halt); + let task = self.state.spawner.spawn_launcher( + udp_tracker_core_container, + udp_tracker_server_container, + cookie_lifetime, + tx_start, + rx_halt, + ); let local_addr = rx_start.await.expect("it should be able to start the service").address; diff --git a/packages/udp-tracker-server/src/statistics/event/handler.rs b/packages/udp-tracker-server/src/statistics/event/handler.rs index 731f678a1..b3b86e20a 100644 --- a/packages/udp-tracker-server/src/statistics/event/handler.rs +++ b/packages/udp-tracker-server/src/statistics/event/handler.rs @@ -12,21 +12,21 @@ pub async fn handle_event(event: Event, stats_repository: &Repository) { } // UDP4 - Event::Udp4Request { kind } => { + Event::Udp4IncomingRequest => { stats_repository.increase_udp4_requests().await; - match kind { - UdpResponseKind::Connect => { - stats_repository.increase_udp4_connections().await; - } - UdpResponseKind::Announce => { - stats_repository.increase_udp4_announces().await; - } - UdpResponseKind::Scrape => { - stats_repository.increase_udp4_scrapes().await; - } - UdpResponseKind::Error => {} - } } + Event::Udp4Request { kind } => match kind { + UdpResponseKind::Connect => { + stats_repository.increase_udp4_connections().await; + } + UdpResponseKind::Announce => { + stats_repository.increase_udp4_announces().await; + } + UdpResponseKind::Scrape => { + stats_repository.increase_udp4_scrapes().await; + } + UdpResponseKind::Error => {} + }, Event::Udp4Response { kind, req_processing_time, @@ -57,9 +57,21 @@ pub async fn handle_event(event: Event, stats_repository: &Repository) { } // UDP6 - Event::Udp6Request => { + Event::Udp6IncomingRequest => { stats_repository.increase_udp6_requests().await; } + Event::Udp6Request { kind } => match kind { + UdpResponseKind::Connect => { + stats_repository.increase_udp6_connections().await; + } + UdpResponseKind::Announce => { + stats_repository.increase_udp6_announces().await; + } + UdpResponseKind::Scrape => { + stats_repository.increase_udp6_scrapes().await; + } + UdpResponseKind::Error => {} + }, Event::Udp6Response { kind: _, req_processing_time: _, @@ -77,7 +89,7 @@ pub async fn handle_event(event: Event, stats_repository: &Repository) { #[cfg(test)] mod tests { use crate::statistics::event::handler::handle_event; - use crate::statistics::event::{Event, UdpResponseKind}; + use crate::statistics::event::Event; use crate::statistics::repository::Repository; #[tokio::test] @@ -101,13 +113,7 @@ mod tests { async fn should_increase_the_udp4_requests_counter_when_it_receives_a_udp4_request_event() { let stats_repository = Repository::new(); - handle_event( - Event::Udp4Request { - kind: UdpResponseKind::Connect, - }, - &stats_repository, - ) - .await; + handle_event(Event::Udp4IncomingRequest, &stats_repository).await; let stats = stats_repository.get_stats().await; @@ -147,7 +153,7 @@ mod tests { async fn should_increase_the_udp6_requests_counter_when_it_receives_a_udp6_request_event() { let stats_repository = Repository::new(); - handle_event(Event::Udp6Request, &stats_repository).await; + handle_event(Event::Udp6IncomingRequest, &stats_repository).await; let stats = stats_repository.get_stats().await; diff --git a/packages/udp-tracker-server/src/statistics/event/mod.rs b/packages/udp-tracker-server/src/statistics/event/mod.rs index 4f66862d6..6a48b9449 100644 --- a/packages/udp-tracker-server/src/statistics/event/mod.rs +++ b/packages/udp-tracker-server/src/statistics/event/mod.rs @@ -18,6 +18,9 @@ pub enum Event { // Attributes are enums too. UdpRequestAborted, UdpRequestBanned, + + // UDP4 + Udp4IncomingRequest, Udp4Request { kind: UdpResponseKind, }, @@ -26,7 +29,12 @@ pub enum Event { req_processing_time: Duration, }, Udp4Error, - Udp6Request, + + // UDP6 + Udp6IncomingRequest, + Udp6Request { + kind: UdpResponseKind, + }, Udp6Response { kind: UdpResponseKind, req_processing_time: Duration, diff --git a/packages/udp-tracker-server/src/statistics/keeper.rs b/packages/udp-tracker-server/src/statistics/keeper.rs index e805a7eea..ae80e7970 100644 --- a/packages/udp-tracker-server/src/statistics/keeper.rs +++ b/packages/udp-tracker-server/src/statistics/keeper.rs @@ -51,7 +51,7 @@ impl Keeper { #[cfg(test)] mod tests { - use crate::statistics::event::{Event, UdpResponseKind}; + use crate::statistics::event::Event; use crate::statistics::keeper::Keeper; use crate::statistics::metrics::Metrics; @@ -70,11 +70,7 @@ mod tests { let event_sender = stats_tracker.run_event_listener(); - let result = event_sender - .send_event(Event::Udp4Request { - kind: UdpResponseKind::Connect, - }) - .await; + let result = event_sender.send_event(Event::Udp4IncomingRequest).await; assert!(result.is_some()); } diff --git a/packages/udp-tracker-server/src/statistics/services.rs b/packages/udp-tracker-server/src/statistics/services.rs index d34bd3c8a..92ee14f50 100644 --- a/packages/udp-tracker-server/src/statistics/services.rs +++ b/packages/udp-tracker-server/src/statistics/services.rs @@ -125,13 +125,14 @@ mod tests { let in_memory_torrent_repository = Arc::new(InMemoryTorrentRepository::default()); let ban_service = Arc::new(RwLock::new(BanService::new(MAX_CONNECTION_ID_ERRORS_PER_IP))); - let (_udp_stats_event_sender, udp_stats_repository) = statistics::setup::factory(config.core.tracker_usage_statistics); - let udp_stats_repository = Arc::new(udp_stats_repository); + let (_udp_server_stats_event_sender, udp_server_stats_repository) = + statistics::setup::factory(config.core.tracker_usage_statistics); + let udp_server_stats_repository = Arc::new(udp_server_stats_repository); let tracker_metrics = get_metrics( in_memory_torrent_repository.clone(), ban_service.clone(), - udp_stats_repository.clone(), + udp_server_stats_repository.clone(), ) .await; diff --git a/packages/udp-tracker-server/tests/server/contract.rs b/packages/udp-tracker-server/tests/server/contract.rs index d2da552a2..4cb23621d 100644 --- a/packages/udp-tracker-server/tests/server/contract.rs +++ b/packages/udp-tracker-server/tests/server/contract.rs @@ -267,8 +267,8 @@ mod receiving_an_announce_request { let udp_requests_banned_before = env .container - .udp_tracker_core_container - .udp_stats_repository + .udp_tracker_server_container + .udp_server_stats_repository .get_stats() .await .udp_requests_banned; @@ -283,8 +283,8 @@ mod receiving_an_announce_request { let udp_requests_banned_after = env .container - .udp_tracker_core_container - .udp_stats_repository + .udp_tracker_server_container + .udp_server_stats_repository .get_stats() .await .udp_requests_banned; diff --git a/src/app.rs b/src/app.rs index 27ffe7a4a..5458ea600 100644 --- a/src/app.rs +++ b/src/app.rs @@ -79,8 +79,11 @@ pub async fn start(config: &Configuration, app_container: &Arc) -> } else { let udp_tracker_config = Arc::new(udp_tracker_config.clone()); let udp_tracker_container = Arc::new(app_container.udp_tracker_container(&udp_tracker_config)); + let udp_tracker_server_container = Arc::new(app_container.udp_tracker_server_container()); - jobs.push(udp_tracker::start_job(udp_tracker_container, registar.give_form()).await); + jobs.push( + udp_tracker::start_job(udp_tracker_container, udp_tracker_server_container, registar.give_form()).await, + ); } } } else { diff --git a/src/bootstrap/jobs/udp_tracker.rs b/src/bootstrap/jobs/udp_tracker.rs index 0276de1d3..2723ad9ab 100644 --- a/src/bootstrap/jobs/udp_tracker.rs +++ b/src/bootstrap/jobs/udp_tracker.rs @@ -12,6 +12,7 @@ use bittorrent_udp_tracker_core::container::UdpTrackerCoreContainer; use bittorrent_udp_tracker_core::UDP_TRACKER_LOG_TARGET; use tokio::task::JoinHandle; use torrust_server_lib::registar::ServiceRegistrationForm; +use torrust_udp_tracker_server::container::UdpTrackerServerContainer; use torrust_udp_tracker_server::server::spawner::Spawner; use torrust_udp_tracker_server::server::Server; use tracing::instrument; @@ -27,13 +28,22 @@ use tracing::instrument; /// It will panic if the task did not finish successfully. #[must_use] #[allow(clippy::async_yields_async)] -#[instrument(skip(udp_tracker_container, form))] -pub async fn start_job(udp_tracker_container: Arc, form: ServiceRegistrationForm) -> JoinHandle<()> { - let bind_to = udp_tracker_container.udp_tracker_config.bind_address; - let cookie_lifetime = udp_tracker_container.udp_tracker_config.cookie_lifetime; +#[instrument(skip(udp_tracker_core_container, udp_tracker_server_container, form))] +pub async fn start_job( + udp_tracker_core_container: Arc, + udp_tracker_server_container: Arc, + form: ServiceRegistrationForm, +) -> JoinHandle<()> { + let bind_to = udp_tracker_core_container.udp_tracker_config.bind_address; + let cookie_lifetime = udp_tracker_core_container.udp_tracker_config.cookie_lifetime; let server = Server::new(Spawner::new(bind_to)) - .start(udp_tracker_container, form, cookie_lifetime) + .start( + udp_tracker_core_container, + udp_tracker_server_container, + form, + cookie_lifetime, + ) .await .expect("it should be able to start the udp tracker"); diff --git a/src/container.rs b/src/container.rs index 6f6d9013d..b10ac9ae0 100644 --- a/src/container.rs +++ b/src/container.rs @@ -19,6 +19,7 @@ use bittorrent_udp_tracker_core::{self, MAX_CONNECTION_ID_ERRORS_PER_IP}; use tokio::sync::RwLock; use torrust_tracker_api_core::container::TrackerHttpApiCoreContainer; use torrust_tracker_configuration::{Configuration, Core, HttpApi, HttpTracker, UdpTracker}; +use torrust_udp_tracker_server::container::UdpTrackerServerContainer; use tracing::instrument; pub struct AppContainer { @@ -38,12 +39,16 @@ pub struct AppContainer { // UDP Tracker Core Services pub ban_service: Arc>, - pub udp_stats_event_sender: Arc>>, + pub udp_core_stats_event_sender: Arc>>, + pub udp_core_stats_repository: Arc, // HTTP Tracker Core Services pub http_stats_event_sender: Arc>>, pub http_stats_repository: Arc, - pub udp_stats_repository: Arc, + + // UDP Tracker Server Services + pub udp_server_stats_event_sender: Arc>>, + pub udp_server_stats_repository: Arc, } impl AppContainer { @@ -53,20 +58,26 @@ impl AppContainer { let tracker_core_container = TrackerCoreContainer::initialize(&core_config); - // HTTP stats + // HTTP core stats let (http_stats_event_sender, http_stats_repository) = bittorrent_http_tracker_core::statistics::setup::factory(configuration.core.tracker_usage_statistics); let http_stats_event_sender = Arc::new(http_stats_event_sender); let http_stats_repository = Arc::new(http_stats_repository); - // UDP stats - let (udp_stats_event_sender, udp_stats_repository) = + // UDP core stats + let (udp_core_stats_event_sender, udp_core_stats_repository) = bittorrent_udp_tracker_core::statistics::setup::factory(configuration.core.tracker_usage_statistics); - let udp_stats_event_sender = Arc::new(udp_stats_event_sender); - let udp_stats_repository = Arc::new(udp_stats_repository); + let udp_core_stats_event_sender = Arc::new(udp_core_stats_event_sender); + let udp_core_stats_repository = Arc::new(udp_core_stats_repository); let ban_service = Arc::new(RwLock::new(BanService::new(MAX_CONNECTION_ID_ERRORS_PER_IP))); + // UDP server stats + let (udp_server_stats_event_sender, udp_server_stats_repository) = + torrust_udp_tracker_server::statistics::setup::factory(configuration.core.tracker_usage_statistics); + let udp_server_stats_event_sender = Arc::new(udp_server_stats_event_sender); + let udp_server_stats_repository = Arc::new(udp_server_stats_repository); + AppContainer { core_config, database: tracker_core_container.database, @@ -82,9 +93,11 @@ impl AppContainer { torrents_manager: tracker_core_container.torrents_manager, ban_service, http_stats_event_sender, - udp_stats_event_sender, + udp_core_stats_event_sender, http_stats_repository, - udp_stats_repository, + udp_core_stats_repository, + udp_server_stats_event_sender, + udp_server_stats_repository, } } @@ -112,8 +125,8 @@ impl AppContainer { whitelist_authorization: self.whitelist_authorization.clone(), udp_tracker_config: udp_tracker_config.clone(), - udp_stats_event_sender: self.udp_stats_event_sender.clone(), - udp_stats_repository: self.udp_stats_repository.clone(), + udp_core_stats_event_sender: self.udp_core_stats_event_sender.clone(), + udp_core_stats_repository: self.udp_core_stats_repository.clone(), ban_service: self.ban_service.clone(), } } @@ -128,7 +141,16 @@ impl AppContainer { whitelist_manager: self.whitelist_manager.clone(), ban_service: self.ban_service.clone(), http_stats_repository: self.http_stats_repository.clone(), - udp_stats_repository: self.udp_stats_repository.clone(), + udp_core_stats_repository: self.udp_core_stats_repository.clone(), + udp_server_stats_repository: self.udp_server_stats_repository.clone(), + } + } + + #[must_use] + pub fn udp_tracker_server_container(&self) -> UdpTrackerServerContainer { + UdpTrackerServerContainer { + udp_server_stats_event_sender: self.udp_server_stats_event_sender.clone(), + udp_server_stats_repository: self.udp_server_stats_repository.clone(), } } } From 5362a6daf0032fee0703bf45b9294072694a85ee Mon Sep 17 00:00:00 2001 From: Jose Celano Date: Thu, 27 Feb 2025 15:37:56 +0000 Subject: [PATCH 338/802] refactor: [#1309] remove unused code adn rename function And other changes to make the convertion more explicit and clear. --- .../src/v1/handlers/announce.rs | 27 +------------------ .../http-protocol/src/v1/requests/announce.rs | 8 +++--- 2 files changed, 5 insertions(+), 30 deletions(-) diff --git a/packages/axum-http-tracker-server/src/v1/handlers/announce.rs b/packages/axum-http-tracker-server/src/v1/handlers/announce.rs index 7855c8172..0221f8dad 100644 --- a/packages/axum-http-tracker-server/src/v1/handlers/announce.rs +++ b/packages/axum-http-tracker-server/src/v1/handlers/announce.rs @@ -4,11 +4,10 @@ //! and resolve the client IP address. use std::sync::Arc; -use aquatic_udp_protocol::AnnounceEvent; use axum::extract::State; use axum::response::{IntoResponse, Response}; use bittorrent_http_tracker_core::services::announce::HttpAnnounceError; -use bittorrent_http_tracker_protocol::v1::requests::announce::{Announce, Compact, Event}; +use bittorrent_http_tracker_protocol::v1::requests::announce::{Announce, Compact}; use bittorrent_http_tracker_protocol::v1::responses::{self}; use bittorrent_http_tracker_protocol::v1::services::peer_ip_resolver::ClientIpSources; use bittorrent_tracker_core::announce_handler::AnnounceHandler; @@ -158,30 +157,6 @@ fn build_response(announce_request: &Announce, announce_data: AnnounceData) -> R } } -#[must_use] -pub fn map_to_aquatic_event(event: &Option) -> aquatic_udp_protocol::AnnounceEvent { - match event { - Some(event) => match &event { - Event::Started => aquatic_udp_protocol::AnnounceEvent::Started, - Event::Stopped => aquatic_udp_protocol::AnnounceEvent::Stopped, - Event::Completed => aquatic_udp_protocol::AnnounceEvent::Completed, - }, - None => aquatic_udp_protocol::AnnounceEvent::None, - } -} - -#[must_use] -pub fn map_to_torrust_event(event: &Option) -> AnnounceEvent { - match event { - Some(event) => match &event { - Event::Started => AnnounceEvent::Started, - Event::Stopped => AnnounceEvent::Stopped, - Event::Completed => AnnounceEvent::Completed, - }, - None => AnnounceEvent::None, - } -} - #[cfg(test)] mod tests { diff --git a/packages/http-protocol/src/v1/requests/announce.rs b/packages/http-protocol/src/v1/requests/announce.rs index 036aa3048..abb6d7e90 100644 --- a/packages/http-protocol/src/v1/requests/announce.rs +++ b/packages/http-protocol/src/v1/requests/announce.rs @@ -399,12 +399,12 @@ pub fn peer_from_request(announce_request: &Announce, peer_ip: &IpAddr) -> peer: uploaded: announce_request.uploaded.unwrap_or(NumberOfBytes::new(0)), downloaded: announce_request.downloaded.unwrap_or(NumberOfBytes::new(0)), left: announce_request.left.unwrap_or(NumberOfBytes::new(0)), - event: map_to_torrust_event(&announce_request.event), + event: convert_to_aquatic_event(&announce_request.event), } } #[must_use] -pub fn map_to_torrust_event(event: &Option) -> AnnounceEvent { +pub fn convert_to_aquatic_event(event: &Option) -> aquatic_udp_protocol::request::AnnounceEvent { match event { Some(event) => match &event { Event::Started => AnnounceEvent::Started, @@ -444,7 +444,7 @@ mod tests { assert_eq!( announce_request, Announce { - info_hash: "3b245504cf5f11bbdbe1201cea6a6bf45aee1bc0".parse::().unwrap(), + info_hash: "3b245504cf5f11bbdbe1201cea6a6bf45aee1bc0".parse::().unwrap(), // DevSkim: ignore DS173237 peer_id: PeerId(*b"-qB00000000000000001"), port: 17548, downloaded: None, @@ -479,7 +479,7 @@ mod tests { assert_eq!( announce_request, Announce { - info_hash: "3b245504cf5f11bbdbe1201cea6a6bf45aee1bc0".parse::().unwrap(), + info_hash: "3b245504cf5f11bbdbe1201cea6a6bf45aee1bc0".parse::().unwrap(), // DevSkim: ignore DS173237 peer_id: PeerId(*b"-qB00000000000000001"), port: 17548, downloaded: Some(NumberOfBytes::new(1)), From 81675988cb52b7722c027df070b805fb9c65b316 Mon Sep 17 00:00:00 2001 From: Jose Celano Date: Thu, 27 Feb 2025 16:26:01 +0000 Subject: [PATCH 339/802] refactor: [#1309] align our HTTP Announce Events with the aquatic one - To simplify conversions. - To properly implement BEP 3. The new `empty` option is defined in the protocol and was missing in our implementation. --- .../http-protocol/src/v1/requests/announce.rs | 44 ++++++++++++------- 1 file changed, 27 insertions(+), 17 deletions(-) diff --git a/packages/http-protocol/src/v1/requests/announce.rs b/packages/http-protocol/src/v1/requests/announce.rs index abb6d7e90..a04738749 100644 --- a/packages/http-protocol/src/v1/requests/announce.rs +++ b/packages/http-protocol/src/v1/requests/announce.rs @@ -145,15 +145,21 @@ pub enum ParseAnnounceQueryError { /// /// Refer to [BEP 03. The `BitTorrent Protocol` Specification](https://www.bittorrent.org/beps/bep_0003.html) /// for more information. -#[derive(PartialEq, Debug)] +#[derive(PartialEq, Debug, Clone)] pub enum Event { /// Event sent when a download first begins. Started, + /// Event sent when the downloader cease downloading. Stopped, + /// Event sent when the download is complete. - /// No `completed` is sent if the file was complete when started + /// No `completed` is sent if the file was complete when started. Completed, + + /// It is the same as not being present. If not present, this is one of the + /// announcements done at regular intervals. + Empty, } impl FromStr for Event { @@ -164,6 +170,7 @@ impl FromStr for Event { "started" => Ok(Self::Started), "stopped" => Ok(Self::Stopped), "completed" => Ok(Self::Completed), + "empty" => Ok(Self::Empty), _ => Err(ParseAnnounceQueryError::InvalidParam { param_name: EVENT.to_owned(), param_value: raw_param.to_owned(), @@ -179,17 +186,29 @@ impl fmt::Display for Event { Event::Started => write!(f, "started"), Event::Stopped => write!(f, "stopped"), Event::Completed => write!(f, "completed"), + Event::Empty => write!(f, "empty"), } } } impl From for Event { - fn from(value: aquatic_udp_protocol::request::AnnounceEvent) -> Self { - match value { + fn from(event: aquatic_udp_protocol::request::AnnounceEvent) -> Self { + match event { AnnounceEvent::Started => Self::Started, AnnounceEvent::Stopped => Self::Stopped, AnnounceEvent::Completed => Self::Completed, - AnnounceEvent::None => panic!("can't convert announce event from aquatic for None variant"), + AnnounceEvent::None => Self::Empty, + } + } +} + +impl From for aquatic_udp_protocol::request::AnnounceEvent { + fn from(event: Event) -> Self { + match event { + Event::Started => Self::Started, + Event::Stopped => Self::Stopped, + Event::Completed => Self::Completed, + Event::Empty => Self::None, } } } @@ -399,19 +418,10 @@ pub fn peer_from_request(announce_request: &Announce, peer_ip: &IpAddr) -> peer: uploaded: announce_request.uploaded.unwrap_or(NumberOfBytes::new(0)), downloaded: announce_request.downloaded.unwrap_or(NumberOfBytes::new(0)), left: announce_request.left.unwrap_or(NumberOfBytes::new(0)), - event: convert_to_aquatic_event(&announce_request.event), - } -} - -#[must_use] -pub fn convert_to_aquatic_event(event: &Option) -> aquatic_udp_protocol::request::AnnounceEvent { - match event { - Some(event) => match &event { - Event::Started => AnnounceEvent::Started, - Event::Stopped => AnnounceEvent::Stopped, - Event::Completed => AnnounceEvent::Completed, + event: match &announce_request.event { + Some(event) => event.clone().into(), + None => AnnounceEvent::None, }, - None => AnnounceEvent::None, } } From d190feee254966565dcc60c155d7942844332209 Mon Sep 17 00:00:00 2001 From: Jose Celano Date: Thu, 27 Feb 2025 18:06:10 +0000 Subject: [PATCH 340/802] refactor: [#1317] rename api packages To avoid future conflicts with other API implementations like GraphQL API. --- .github/workflows/deployment.yaml | 6 +- Cargo.lock | 111 +++++++++--------- Cargo.toml | 6 +- .../axum-health-check-api-server/Cargo.toml | 2 +- .../tests/server/contract.rs | 4 +- .../Cargo.toml | 8 +- .../LICENSE | 0 .../README.md | 0 .../src/environment.rs | 4 +- .../src/lib.rs | 0 .../src/routes.rs | 2 +- .../src/server.rs | 4 +- .../src/v1/context/auth_key/forms.rs | 0 .../src/v1/context/auth_key/handlers.rs | 0 .../src/v1/context/auth_key/mod.rs | 0 .../src/v1/context/auth_key/resources.rs | 0 .../src/v1/context/auth_key/responses.rs | 0 .../src/v1/context/auth_key/routes.rs | 0 .../src/v1/context/health_check/handlers.rs | 0 .../src/v1/context/health_check/mod.rs | 0 .../src/v1/context/health_check/resources.rs | 0 .../src/v1/context/mod.rs | 0 .../src/v1/context/stats/handlers.rs | 2 +- .../src/v1/context/stats/mod.rs | 0 .../src/v1/context/stats/resources.rs | 6 +- .../src/v1/context/stats/responses.rs | 2 +- .../src/v1/context/stats/routes.rs | 2 +- .../src/v1/context/torrent/handlers.rs | 0 .../src/v1/context/torrent/mod.rs | 0 .../src/v1/context/torrent/resources/mod.rs | 0 .../src/v1/context/torrent/resources/peer.rs | 0 .../v1/context/torrent/resources/torrent.rs | 0 .../src/v1/context/torrent/responses.rs | 0 .../src/v1/context/torrent/routes.rs | 0 .../src/v1/context/whitelist/handlers.rs | 0 .../src/v1/context/whitelist/mod.rs | 0 .../src/v1/context/whitelist/responses.rs | 0 .../src/v1/context/whitelist/routes.rs | 0 .../src/v1/middlewares/auth.rs | 0 .../src/v1/middlewares/mod.rs | 0 .../src/v1/mod.rs | 0 .../src/v1/responses.rs | 0 .../src/v1/routes.rs | 2 +- .../tests/common/fixtures.rs | 0 .../tests/common/mod.rs | 0 .../tests/integration.rs | 0 .../tests/server/connection_info.rs | 2 +- .../tests/server/mod.rs | 0 .../tests/server/v1/asserts.rs | 6 +- .../server/v1/contract/authentication.rs | 6 +- .../server/v1/contract/context/auth_key.rs | 8 +- .../v1/contract/context/health_check.rs | 6 +- .../tests/server/v1/contract/context/mod.rs | 0 .../tests/server/v1/contract/context/stats.rs | 6 +- .../server/v1/contract/context/torrent.rs | 10 +- .../server/v1/contract/context/whitelist.rs | 4 +- .../tests/server/v1/contract/fixtures.rs | 0 .../tests/server/v1/contract/mod.rs | 0 .../tests/server/v1/mod.rs | 0 .../Cargo.toml | 2 +- .../README.md | 0 .../docs/licenses/LICENSE-MIT_0 | 0 .../src/common/http.rs | 0 .../src/common/mod.rs | 0 .../src/connection_info.rs | 0 .../src/lib.rs | 0 .../src/v1/client.rs | 0 .../src/v1/mod.rs | 0 .../Cargo.toml | 2 +- .../LICENSE | 0 .../README.md | 0 .../src/container.rs | 0 .../src/lib.rs | 0 .../src/statistics/metrics.rs | 0 .../src/statistics/mod.rs | 0 .../src/statistics/services.rs | 0 packages/tracker-core/Cargo.toml | 2 +- src/app.rs | 2 +- src/bootstrap/jobs/tracker_apis.rs | 10 +- src/container.rs | 2 +- src/lib.rs | 14 +-- 81 files changed, 122 insertions(+), 121 deletions(-) rename packages/{axum-tracker-api-server => axum-rest-tracker-api-server}/Cargo.toml (86%) rename packages/{axum-tracker-api-server => axum-rest-tracker-api-server}/LICENSE (100%) rename packages/{axum-tracker-api-server => axum-rest-tracker-api-server}/README.md (100%) rename packages/{axum-tracker-api-server => axum-rest-tracker-api-server}/src/environment.rs (97%) rename packages/{axum-tracker-api-server => axum-rest-tracker-api-server}/src/lib.rs (100%) rename packages/{axum-tracker-api-server => axum-rest-tracker-api-server}/src/routes.rs (98%) rename packages/{axum-tracker-api-server => axum-rest-tracker-api-server}/src/server.rs (98%) rename packages/{axum-tracker-api-server => axum-rest-tracker-api-server}/src/v1/context/auth_key/forms.rs (100%) rename packages/{axum-tracker-api-server => axum-rest-tracker-api-server}/src/v1/context/auth_key/handlers.rs (100%) rename packages/{axum-tracker-api-server => axum-rest-tracker-api-server}/src/v1/context/auth_key/mod.rs (100%) rename packages/{axum-tracker-api-server => axum-rest-tracker-api-server}/src/v1/context/auth_key/resources.rs (100%) rename packages/{axum-tracker-api-server => axum-rest-tracker-api-server}/src/v1/context/auth_key/responses.rs (100%) rename packages/{axum-tracker-api-server => axum-rest-tracker-api-server}/src/v1/context/auth_key/routes.rs (100%) rename packages/{axum-tracker-api-server => axum-rest-tracker-api-server}/src/v1/context/health_check/handlers.rs (100%) rename packages/{axum-tracker-api-server => axum-rest-tracker-api-server}/src/v1/context/health_check/mod.rs (100%) rename packages/{axum-tracker-api-server => axum-rest-tracker-api-server}/src/v1/context/health_check/resources.rs (100%) rename packages/{axum-tracker-api-server => axum-rest-tracker-api-server}/src/v1/context/mod.rs (100%) rename packages/{axum-tracker-api-server => axum-rest-tracker-api-server}/src/v1/context/stats/handlers.rs (96%) rename packages/{axum-tracker-api-server => axum-rest-tracker-api-server}/src/v1/context/stats/mod.rs (100%) rename packages/{axum-tracker-api-server => axum-rest-tracker-api-server}/src/v1/context/stats/resources.rs (97%) rename packages/{axum-tracker-api-server => axum-rest-tracker-api-server}/src/v1/context/stats/responses.rs (98%) rename packages/{axum-tracker-api-server => axum-rest-tracker-api-server}/src/v1/context/stats/routes.rs (92%) rename packages/{axum-tracker-api-server => axum-rest-tracker-api-server}/src/v1/context/torrent/handlers.rs (100%) rename packages/{axum-tracker-api-server => axum-rest-tracker-api-server}/src/v1/context/torrent/mod.rs (100%) rename packages/{axum-tracker-api-server => axum-rest-tracker-api-server}/src/v1/context/torrent/resources/mod.rs (100%) rename packages/{axum-tracker-api-server => axum-rest-tracker-api-server}/src/v1/context/torrent/resources/peer.rs (100%) rename packages/{axum-tracker-api-server => axum-rest-tracker-api-server}/src/v1/context/torrent/resources/torrent.rs (100%) rename packages/{axum-tracker-api-server => axum-rest-tracker-api-server}/src/v1/context/torrent/responses.rs (100%) rename packages/{axum-tracker-api-server => axum-rest-tracker-api-server}/src/v1/context/torrent/routes.rs (100%) rename packages/{axum-tracker-api-server => axum-rest-tracker-api-server}/src/v1/context/whitelist/handlers.rs (100%) rename packages/{axum-tracker-api-server => axum-rest-tracker-api-server}/src/v1/context/whitelist/mod.rs (100%) rename packages/{axum-tracker-api-server => axum-rest-tracker-api-server}/src/v1/context/whitelist/responses.rs (100%) rename packages/{axum-tracker-api-server => axum-rest-tracker-api-server}/src/v1/context/whitelist/routes.rs (100%) rename packages/{axum-tracker-api-server => axum-rest-tracker-api-server}/src/v1/middlewares/auth.rs (100%) rename packages/{axum-tracker-api-server => axum-rest-tracker-api-server}/src/v1/middlewares/mod.rs (100%) rename packages/{axum-tracker-api-server => axum-rest-tracker-api-server}/src/v1/mod.rs (100%) rename packages/{axum-tracker-api-server => axum-rest-tracker-api-server}/src/v1/responses.rs (100%) rename packages/{axum-tracker-api-server => axum-rest-tracker-api-server}/src/v1/routes.rs (90%) rename packages/{axum-tracker-api-server => axum-rest-tracker-api-server}/tests/common/fixtures.rs (100%) rename packages/{axum-tracker-api-server => axum-rest-tracker-api-server}/tests/common/mod.rs (100%) rename packages/{axum-tracker-api-server => axum-rest-tracker-api-server}/tests/integration.rs (100%) rename packages/{axum-tracker-api-server => axum-rest-tracker-api-server}/tests/server/connection_info.rs (75%) rename packages/{axum-tracker-api-server => axum-rest-tracker-api-server}/tests/server/mod.rs (100%) rename packages/{axum-tracker-api-server => axum-rest-tracker-api-server}/tests/server/v1/asserts.rs (95%) rename packages/{axum-tracker-api-server => axum-rest-tracker-api-server}/tests/server/v1/contract/authentication.rs (94%) rename packages/{axum-tracker-api-server => axum-rest-tracker-api-server}/tests/server/v1/contract/context/auth_key.rs (98%) rename packages/{axum-tracker-api-server => axum-rest-tracker-api-server}/tests/server/v1/contract/context/health_check.rs (75%) rename packages/{axum-tracker-api-server => axum-rest-tracker-api-server}/tests/server/v1/contract/context/mod.rs (100%) rename packages/{axum-tracker-api-server => axum-rest-tracker-api-server}/tests/server/v1/contract/context/stats.rs (93%) rename packages/{axum-tracker-api-server => axum-rest-tracker-api-server}/tests/server/v1/contract/context/torrent.rs (96%) rename packages/{axum-tracker-api-server => axum-rest-tracker-api-server}/tests/server/v1/contract/context/whitelist.rs (98%) rename packages/{axum-tracker-api-server => axum-rest-tracker-api-server}/tests/server/v1/contract/fixtures.rs (100%) rename packages/{axum-tracker-api-server => axum-rest-tracker-api-server}/tests/server/v1/contract/mod.rs (100%) rename packages/{axum-tracker-api-server => axum-rest-tracker-api-server}/tests/server/v1/mod.rs (100%) rename packages/{tracker-api-client => rest-tracker-api-client}/Cargo.toml (93%) rename packages/{tracker-api-client => rest-tracker-api-client}/README.md (100%) rename packages/{tracker-api-client => rest-tracker-api-client}/docs/licenses/LICENSE-MIT_0 (100%) rename packages/{tracker-api-client => rest-tracker-api-client}/src/common/http.rs (100%) rename packages/{tracker-api-client => rest-tracker-api-client}/src/common/mod.rs (100%) rename packages/{tracker-api-client => rest-tracker-api-client}/src/connection_info.rs (100%) rename packages/{tracker-api-client => rest-tracker-api-client}/src/lib.rs (100%) rename packages/{tracker-api-client => rest-tracker-api-client}/src/v1/client.rs (100%) rename packages/{tracker-api-client => rest-tracker-api-client}/src/v1/mod.rs (100%) rename packages/{tracker-api-core => rest-tracker-api-core}/Cargo.toml (96%) rename packages/{tracker-api-core => rest-tracker-api-core}/LICENSE (100%) rename packages/{tracker-api-core => rest-tracker-api-core}/README.md (100%) rename packages/{tracker-api-core => rest-tracker-api-core}/src/container.rs (100%) rename packages/{tracker-api-core => rest-tracker-api-core}/src/lib.rs (100%) rename packages/{tracker-api-core => rest-tracker-api-core}/src/statistics/metrics.rs (100%) rename packages/{tracker-api-core => rest-tracker-api-core}/src/statistics/mod.rs (100%) rename packages/{tracker-api-core => rest-tracker-api-core}/src/statistics/services.rs (100%) diff --git a/.github/workflows/deployment.yaml b/.github/workflows/deployment.yaml index e492d7490..1422ec394 100644 --- a/.github/workflows/deployment.yaml +++ b/.github/workflows/deployment.yaml @@ -63,12 +63,12 @@ jobs: cargo publish -p bittorrent-udp-tracker-protocol cargo publish -p torrust-axum-health-check-api-server cargo publish -p torrust-axum-http-tracker-server + cargo publish -p torrust-axum-rest-tracker-api-server cargo publish -p torrust-axum-server - cargo publish -p torrust-axum-tracker-api-server + cargo publish -p torrust-rest-tracker-api-client + cargo publish -p torrust-rest-tracker-api-core cargo publish -p torrust-torrust-server-lib cargo publish -p torrust-tracker - cargo publish -p torrust-tracker-api-client - cargo publish -p torrust-tracker-api-core cargo publish -p torrust-tracker-client cargo publish -p torrust-tracker-clock cargo publish -p torrust-tracker-configuration diff --git a/Cargo.lock b/Cargo.lock index 71140b9f7..fbb55562a 100644 --- a/Cargo.lock +++ b/Cargo.lock @@ -647,7 +647,7 @@ dependencies = [ "testcontainers", "thiserror 2.0.11", "tokio", - "torrust-tracker-api-client", + "torrust-rest-tracker-api-client", "torrust-tracker-clock", "torrust-tracker-configuration", "torrust-tracker-located-error", @@ -4350,8 +4350,8 @@ dependencies = [ "tokio", "torrust-axum-health-check-api-server", "torrust-axum-http-tracker-server", + "torrust-axum-rest-tracker-api-server", "torrust-axum-server", - "torrust-axum-tracker-api-server", "torrust-server-lib", "torrust-tracker-clock", "torrust-tracker-configuration", @@ -4400,27 +4400,7 @@ dependencies = [ ] [[package]] -name = "torrust-axum-server" -version = "3.0.0-develop" -dependencies = [ - "axum-server", - "camino", - "futures-util", - "http-body", - "hyper", - "hyper-util", - "pin-project-lite", - "thiserror 2.0.11", - "tokio", - "torrust-server-lib", - "torrust-tracker-configuration", - "torrust-tracker-located-error", - "tower 0.5.2", - "tracing", -] - -[[package]] -name = "torrust-axum-tracker-api-server" +name = "torrust-axum-rest-tracker-api-server" version = "3.0.0-develop" dependencies = [ "aquatic_udp_protocol", @@ -4443,21 +4423,67 @@ dependencies = [ "thiserror 2.0.11", "tokio", "torrust-axum-server", + "torrust-rest-tracker-api-client", + "torrust-rest-tracker-api-core", "torrust-server-lib", - "torrust-tracker-api-client", - "torrust-tracker-api-core", "torrust-tracker-clock", "torrust-tracker-configuration", "torrust-tracker-primitives", "torrust-tracker-test-helpers", "torrust-udp-tracker-server", - "tower 0.5.2", + "tower 0.4.13", "tower-http", "tracing", "url", "uuid", ] +[[package]] +name = "torrust-axum-server" +version = "3.0.0-develop" +dependencies = [ + "axum-server", + "camino", + "futures-util", + "http-body", + "hyper", + "hyper-util", + "pin-project-lite", + "thiserror 2.0.11", + "tokio", + "torrust-server-lib", + "torrust-tracker-configuration", + "torrust-tracker-located-error", + "tower 0.5.2", + "tracing", +] + +[[package]] +name = "torrust-rest-tracker-api-client" +version = "3.0.0-develop" +dependencies = [ + "hyper", + "reqwest", + "serde", + "thiserror 2.0.11", + "url", + "uuid", +] + +[[package]] +name = "torrust-rest-tracker-api-core" +version = "3.0.0-develop" +dependencies = [ + "bittorrent-http-tracker-core", + "bittorrent-tracker-core", + "bittorrent-udp-tracker-core", + "tokio", + "torrust-tracker-configuration", + "torrust-tracker-primitives", + "torrust-tracker-test-helpers", + "torrust-udp-tracker-server", +] + [[package]] name = "torrust-server-lib" version = "3.0.0-develop" @@ -4490,11 +4516,11 @@ dependencies = [ "tokio", "torrust-axum-health-check-api-server", "torrust-axum-http-tracker-server", + "torrust-axum-rest-tracker-api-server", "torrust-axum-server", - "torrust-axum-tracker-api-server", + "torrust-rest-tracker-api-client", + "torrust-rest-tracker-api-core", "torrust-server-lib", - "torrust-tracker-api-client", - "torrust-tracker-api-core", "torrust-tracker-clock", "torrust-tracker-configuration", "torrust-tracker-test-helpers", @@ -4503,32 +4529,6 @@ dependencies = [ "tracing-subscriber", ] -[[package]] -name = "torrust-tracker-api-client" -version = "3.0.0-develop" -dependencies = [ - "hyper", - "reqwest", - "serde", - "thiserror 2.0.11", - "url", - "uuid", -] - -[[package]] -name = "torrust-tracker-api-core" -version = "3.0.0-develop" -dependencies = [ - "bittorrent-http-tracker-core", - "bittorrent-tracker-core", - "bittorrent-udp-tracker-core", - "tokio", - "torrust-tracker-configuration", - "torrust-tracker-primitives", - "torrust-tracker-test-helpers", - "torrust-udp-tracker-server", -] - [[package]] name = "torrust-tracker-client" version = "3.0.0-develop" @@ -4685,6 +4685,7 @@ dependencies = [ "futures-util", "pin-project", "pin-project-lite", + "tokio", "tower-layer", "tower-service", "tracing", diff --git a/Cargo.toml b/Cargo.toml index 346817e27..bcac4bf66 100644 --- a/Cargo.toml +++ b/Cargo.toml @@ -49,10 +49,10 @@ serde_json = { version = "1", features = ["preserve_order"] } tokio = { version = "1", features = ["macros", "net", "rt-multi-thread", "signal", "sync"] } torrust-axum-health-check-api-server = { version = "3.0.0-develop", path = "packages/axum-health-check-api-server" } torrust-axum-http-tracker-server = { version = "3.0.0-develop", path = "packages/axum-http-tracker-server" } +torrust-axum-rest-tracker-api-server = { version = "3.0.0-develop", path = "packages/axum-rest-tracker-api-server" } torrust-axum-server = { version = "3.0.0-develop", path = "packages/axum-server" } -torrust-axum-tracker-api-server = { version = "3.0.0-develop", path = "packages/axum-tracker-api-server" } +torrust-rest-tracker-api-core = { version = "3.0.0-develop", path = "packages/rest-tracker-api-core" } torrust-server-lib = { version = "3.0.0-develop", path = "packages/server-lib" } -torrust-tracker-api-core = { version = "3.0.0-develop", path = "packages/tracker-api-core" } torrust-tracker-clock = { version = "3.0.0-develop", path = "packages/clock" } torrust-tracker-configuration = { version = "3.0.0-develop", path = "packages/configuration" } torrust-udp-tracker-server = { version = "3.0.0-develop", path = "packages/udp-tracker-server" } @@ -62,7 +62,7 @@ tracing-subscriber = { version = "0", features = ["json"] } [dev-dependencies] local-ip-address = "0" mockall = "0" -torrust-tracker-api-client = { version = "3.0.0-develop", path = "packages/tracker-api-client" } +torrust-rest-tracker-api-client = { version = "3.0.0-develop", path = "packages/rest-tracker-api-client" } torrust-tracker-test-helpers = { version = "3.0.0-develop", path = "packages/test-helpers" } [workspace] diff --git a/packages/axum-health-check-api-server/Cargo.toml b/packages/axum-health-check-api-server/Cargo.toml index 928393bee..e24e609bf 100644 --- a/packages/axum-health-check-api-server/Cargo.toml +++ b/packages/axum-health-check-api-server/Cargo.toml @@ -31,7 +31,7 @@ tracing = "0" reqwest = { version = "0", features = ["json"] } torrust-axum-health-check-api-server = { version = "3.0.0-develop", path = "../axum-health-check-api-server" } torrust-axum-http-tracker-server = { version = "3.0.0-develop", path = "../axum-http-tracker-server" } -torrust-axum-tracker-api-server = { version = "3.0.0-develop", path = "../axum-tracker-api-server" } +torrust-axum-rest-tracker-api-server = { version = "3.0.0-develop", path = "../axum-rest-tracker-api-server" } torrust-tracker-clock = { version = "3.0.0-develop", path = "../clock" } torrust-tracker-test-helpers = { version = "3.0.0-develop", path = "../test-helpers" } torrust-udp-tracker-server = { version = "3.0.0-develop", path = "../udp-tracker-server" } diff --git a/packages/axum-health-check-api-server/tests/server/contract.rs b/packages/axum-health-check-api-server/tests/server/contract.rs index 96a03cca4..0e0d26b83 100644 --- a/packages/axum-health-check-api-server/tests/server/contract.rs +++ b/packages/axum-health-check-api-server/tests/server/contract.rs @@ -43,7 +43,7 @@ mod api { let configuration = Arc::new(configuration::ephemeral()); - let service = torrust_axum_tracker_api_server::environment::Started::new(&configuration).await; + let service = torrust_axum_rest_tracker_api_server::environment::Started::new(&configuration).await; let registar = service.registar.clone(); @@ -90,7 +90,7 @@ mod api { let configuration = Arc::new(configuration::ephemeral()); - let service = torrust_axum_tracker_api_server::environment::Started::new(&configuration).await; + let service = torrust_axum_rest_tracker_api_server::environment::Started::new(&configuration).await; let binding = service.bind_address(); diff --git a/packages/axum-tracker-api-server/Cargo.toml b/packages/axum-rest-tracker-api-server/Cargo.toml similarity index 86% rename from packages/axum-tracker-api-server/Cargo.toml rename to packages/axum-rest-tracker-api-server/Cargo.toml index e1deb9b8a..9c0d2bc2f 100644 --- a/packages/axum-tracker-api-server/Cargo.toml +++ b/packages/axum-rest-tracker-api-server/Cargo.toml @@ -6,7 +6,7 @@ edition.workspace = true homepage.workspace = true keywords = ["axum", "bittorrent", "http", "server", "torrust", "tracker"] license.workspace = true -name = "torrust-axum-tracker-api-server" +name = "torrust-axum-rest-tracker-api-server" publish.workspace = true readme = "README.md" repository.workspace = true @@ -32,9 +32,9 @@ serde_with = { version = "3", features = ["json"] } thiserror = "2" tokio = { version = "1", features = ["macros", "net", "rt-multi-thread", "signal", "sync"] } torrust-axum-server = { version = "3.0.0-develop", path = "../axum-server" } +torrust-rest-tracker-api-client = { version = "3.0.0-develop", path = "../rest-tracker-api-client" } +torrust-rest-tracker-api-core = { version = "3.0.0-develop", path = "../rest-tracker-api-core" } torrust-server-lib = { version = "3.0.0-develop", path = "../server-lib" } -torrust-tracker-api-client = { version = "3.0.0-develop", path = "../tracker-api-client" } -torrust-tracker-api-core = { version = "3.0.0-develop", path = "../tracker-api-core" } torrust-tracker-clock = { version = "3.0.0-develop", path = "../clock" } torrust-tracker-configuration = { version = "3.0.0-develop", path = "../configuration" } torrust-tracker-primitives = { version = "3.0.0-develop", path = "../primitives" } @@ -46,7 +46,7 @@ tracing = "0" [dev-dependencies] local-ip-address = "0" mockall = "0" -torrust-tracker-api-client = { version = "3.0.0-develop", path = "../tracker-api-client" } +torrust-rest-tracker-api-client = { version = "3.0.0-develop", path = "../rest-tracker-api-client" } torrust-tracker-test-helpers = { version = "3.0.0-develop", path = "../test-helpers" } url = { version = "2", features = ["serde"] } uuid = { version = "1", features = ["v4"] } diff --git a/packages/axum-tracker-api-server/LICENSE b/packages/axum-rest-tracker-api-server/LICENSE similarity index 100% rename from packages/axum-tracker-api-server/LICENSE rename to packages/axum-rest-tracker-api-server/LICENSE diff --git a/packages/axum-tracker-api-server/README.md b/packages/axum-rest-tracker-api-server/README.md similarity index 100% rename from packages/axum-tracker-api-server/README.md rename to packages/axum-rest-tracker-api-server/README.md diff --git a/packages/axum-tracker-api-server/src/environment.rs b/packages/axum-rest-tracker-api-server/src/environment.rs similarity index 97% rename from packages/axum-tracker-api-server/src/environment.rs rename to packages/axum-rest-tracker-api-server/src/environment.rs index 7390bc659..2ee5cf744 100644 --- a/packages/axum-tracker-api-server/src/environment.rs +++ b/packages/axum-rest-tracker-api-server/src/environment.rs @@ -7,9 +7,9 @@ use bittorrent_tracker_core::container::TrackerCoreContainer; use bittorrent_udp_tracker_core::container::UdpTrackerCoreContainer; use futures::executor::block_on; use torrust_axum_server::tsl::make_rust_tls; +use torrust_rest_tracker_api_client::connection_info::{ConnectionInfo, Origin}; +use torrust_rest_tracker_api_core::container::TrackerHttpApiCoreContainer; use torrust_server_lib::registar::Registar; -use torrust_tracker_api_client::connection_info::{ConnectionInfo, Origin}; -use torrust_tracker_api_core::container::TrackerHttpApiCoreContainer; use torrust_tracker_configuration::{logging, Configuration}; use torrust_tracker_primitives::peer; use torrust_udp_tracker_server::container::UdpTrackerServerContainer; diff --git a/packages/axum-tracker-api-server/src/lib.rs b/packages/axum-rest-tracker-api-server/src/lib.rs similarity index 100% rename from packages/axum-tracker-api-server/src/lib.rs rename to packages/axum-rest-tracker-api-server/src/lib.rs diff --git a/packages/axum-tracker-api-server/src/routes.rs b/packages/axum-rest-tracker-api-server/src/routes.rs similarity index 98% rename from packages/axum-tracker-api-server/src/routes.rs rename to packages/axum-rest-tracker-api-server/src/routes.rs index 492e0dc37..c18451c89 100644 --- a/packages/axum-tracker-api-server/src/routes.rs +++ b/packages/axum-rest-tracker-api-server/src/routes.rs @@ -15,8 +15,8 @@ use axum::response::Response; use axum::routing::get; use axum::{middleware, BoxError, Router}; use hyper::{Request, StatusCode}; +use torrust_rest_tracker_api_core::container::TrackerHttpApiCoreContainer; use torrust_server_lib::logging::Latency; -use torrust_tracker_api_core::container::TrackerHttpApiCoreContainer; use torrust_tracker_configuration::{AccessTokens, DEFAULT_TIMEOUT}; use tower::timeout::TimeoutLayer; use tower::ServiceBuilder; diff --git a/packages/axum-tracker-api-server/src/server.rs b/packages/axum-rest-tracker-api-server/src/server.rs similarity index 98% rename from packages/axum-tracker-api-server/src/server.rs rename to packages/axum-rest-tracker-api-server/src/server.rs index 65d8ca27a..fd8f92944 100644 --- a/packages/axum-tracker-api-server/src/server.rs +++ b/packages/axum-rest-tracker-api-server/src/server.rs @@ -35,10 +35,10 @@ use thiserror::Error; use tokio::sync::oneshot::{Receiver, Sender}; use torrust_axum_server::custom_axum_server::{self, TimeoutAcceptor}; use torrust_axum_server::signals::graceful_shutdown; +use torrust_rest_tracker_api_core::container::TrackerHttpApiCoreContainer; use torrust_server_lib::logging::STARTED_ON; use torrust_server_lib::registar::{ServiceHealthCheckJob, ServiceRegistration, ServiceRegistrationForm}; use torrust_server_lib::signals::{Halted, Started}; -use torrust_tracker_api_core::container::TrackerHttpApiCoreContainer; use torrust_tracker_configuration::AccessTokens; use tracing::{instrument, Level}; @@ -295,8 +295,8 @@ mod tests { use std::sync::Arc; use torrust_axum_server::tsl::make_rust_tls; + use torrust_rest_tracker_api_core::container::TrackerHttpApiCoreContainer; use torrust_server_lib::registar::Registar; - use torrust_tracker_api_core::container::TrackerHttpApiCoreContainer; use torrust_tracker_configuration::{logging, Configuration}; use torrust_tracker_test_helpers::configuration::ephemeral_public; diff --git a/packages/axum-tracker-api-server/src/v1/context/auth_key/forms.rs b/packages/axum-rest-tracker-api-server/src/v1/context/auth_key/forms.rs similarity index 100% rename from packages/axum-tracker-api-server/src/v1/context/auth_key/forms.rs rename to packages/axum-rest-tracker-api-server/src/v1/context/auth_key/forms.rs diff --git a/packages/axum-tracker-api-server/src/v1/context/auth_key/handlers.rs b/packages/axum-rest-tracker-api-server/src/v1/context/auth_key/handlers.rs similarity index 100% rename from packages/axum-tracker-api-server/src/v1/context/auth_key/handlers.rs rename to packages/axum-rest-tracker-api-server/src/v1/context/auth_key/handlers.rs diff --git a/packages/axum-tracker-api-server/src/v1/context/auth_key/mod.rs b/packages/axum-rest-tracker-api-server/src/v1/context/auth_key/mod.rs similarity index 100% rename from packages/axum-tracker-api-server/src/v1/context/auth_key/mod.rs rename to packages/axum-rest-tracker-api-server/src/v1/context/auth_key/mod.rs diff --git a/packages/axum-tracker-api-server/src/v1/context/auth_key/resources.rs b/packages/axum-rest-tracker-api-server/src/v1/context/auth_key/resources.rs similarity index 100% rename from packages/axum-tracker-api-server/src/v1/context/auth_key/resources.rs rename to packages/axum-rest-tracker-api-server/src/v1/context/auth_key/resources.rs diff --git a/packages/axum-tracker-api-server/src/v1/context/auth_key/responses.rs b/packages/axum-rest-tracker-api-server/src/v1/context/auth_key/responses.rs similarity index 100% rename from packages/axum-tracker-api-server/src/v1/context/auth_key/responses.rs rename to packages/axum-rest-tracker-api-server/src/v1/context/auth_key/responses.rs diff --git a/packages/axum-tracker-api-server/src/v1/context/auth_key/routes.rs b/packages/axum-rest-tracker-api-server/src/v1/context/auth_key/routes.rs similarity index 100% rename from packages/axum-tracker-api-server/src/v1/context/auth_key/routes.rs rename to packages/axum-rest-tracker-api-server/src/v1/context/auth_key/routes.rs diff --git a/packages/axum-tracker-api-server/src/v1/context/health_check/handlers.rs b/packages/axum-rest-tracker-api-server/src/v1/context/health_check/handlers.rs similarity index 100% rename from packages/axum-tracker-api-server/src/v1/context/health_check/handlers.rs rename to packages/axum-rest-tracker-api-server/src/v1/context/health_check/handlers.rs diff --git a/packages/axum-tracker-api-server/src/v1/context/health_check/mod.rs b/packages/axum-rest-tracker-api-server/src/v1/context/health_check/mod.rs similarity index 100% rename from packages/axum-tracker-api-server/src/v1/context/health_check/mod.rs rename to packages/axum-rest-tracker-api-server/src/v1/context/health_check/mod.rs diff --git a/packages/axum-tracker-api-server/src/v1/context/health_check/resources.rs b/packages/axum-rest-tracker-api-server/src/v1/context/health_check/resources.rs similarity index 100% rename from packages/axum-tracker-api-server/src/v1/context/health_check/resources.rs rename to packages/axum-rest-tracker-api-server/src/v1/context/health_check/resources.rs diff --git a/packages/axum-tracker-api-server/src/v1/context/mod.rs b/packages/axum-rest-tracker-api-server/src/v1/context/mod.rs similarity index 100% rename from packages/axum-tracker-api-server/src/v1/context/mod.rs rename to packages/axum-rest-tracker-api-server/src/v1/context/mod.rs diff --git a/packages/axum-tracker-api-server/src/v1/context/stats/handlers.rs b/packages/axum-rest-tracker-api-server/src/v1/context/stats/handlers.rs similarity index 96% rename from packages/axum-tracker-api-server/src/v1/context/stats/handlers.rs rename to packages/axum-rest-tracker-api-server/src/v1/context/stats/handlers.rs index 5e23211a6..5273df332 100644 --- a/packages/axum-tracker-api-server/src/v1/context/stats/handlers.rs +++ b/packages/axum-rest-tracker-api-server/src/v1/context/stats/handlers.rs @@ -9,7 +9,7 @@ use bittorrent_tracker_core::torrent::repository::in_memory::InMemoryTorrentRepo use bittorrent_udp_tracker_core::services::banning::BanService; use serde::Deserialize; use tokio::sync::RwLock; -use torrust_tracker_api_core::statistics::services::get_metrics; +use torrust_rest_tracker_api_core::statistics::services::get_metrics; use super::responses::{metrics_response, stats_response}; diff --git a/packages/axum-tracker-api-server/src/v1/context/stats/mod.rs b/packages/axum-rest-tracker-api-server/src/v1/context/stats/mod.rs similarity index 100% rename from packages/axum-tracker-api-server/src/v1/context/stats/mod.rs rename to packages/axum-rest-tracker-api-server/src/v1/context/stats/mod.rs diff --git a/packages/axum-tracker-api-server/src/v1/context/stats/resources.rs b/packages/axum-rest-tracker-api-server/src/v1/context/stats/resources.rs similarity index 97% rename from packages/axum-tracker-api-server/src/v1/context/stats/resources.rs rename to packages/axum-rest-tracker-api-server/src/v1/context/stats/resources.rs index f27050e22..9a82593c7 100644 --- a/packages/axum-tracker-api-server/src/v1/context/stats/resources.rs +++ b/packages/axum-rest-tracker-api-server/src/v1/context/stats/resources.rs @@ -1,7 +1,7 @@ //! API resources for the [`stats`](crate::v1::context::stats) //! API context. use serde::{Deserialize, Serialize}; -use torrust_tracker_api_core::statistics::services::TrackerMetrics; +use torrust_rest_tracker_api_core::statistics::services::TrackerMetrics; /// It contains all the statistics generated by the tracker. #[derive(Serialize, Deserialize, Debug, PartialEq, Eq)] @@ -117,8 +117,8 @@ impl From for Stats { #[cfg(test)] mod tests { - use torrust_tracker_api_core::statistics::metrics::Metrics; - use torrust_tracker_api_core::statistics::services::TrackerMetrics; + use torrust_rest_tracker_api_core::statistics::metrics::Metrics; + use torrust_rest_tracker_api_core::statistics::services::TrackerMetrics; use torrust_tracker_primitives::torrent_metrics::TorrentsMetrics; use super::Stats; diff --git a/packages/axum-tracker-api-server/src/v1/context/stats/responses.rs b/packages/axum-rest-tracker-api-server/src/v1/context/stats/responses.rs similarity index 98% rename from packages/axum-tracker-api-server/src/v1/context/stats/responses.rs rename to packages/axum-rest-tracker-api-server/src/v1/context/stats/responses.rs index f68c1e062..61455178c 100644 --- a/packages/axum-tracker-api-server/src/v1/context/stats/responses.rs +++ b/packages/axum-rest-tracker-api-server/src/v1/context/stats/responses.rs @@ -1,7 +1,7 @@ //! API responses for the [`stats`](crate::v1::context::stats) //! API context. use axum::response::{IntoResponse, Json, Response}; -use torrust_tracker_api_core::statistics::services::TrackerMetrics; +use torrust_rest_tracker_api_core::statistics::services::TrackerMetrics; use super::resources::Stats; diff --git a/packages/axum-tracker-api-server/src/v1/context/stats/routes.rs b/packages/axum-rest-tracker-api-server/src/v1/context/stats/routes.rs similarity index 92% rename from packages/axum-tracker-api-server/src/v1/context/stats/routes.rs rename to packages/axum-rest-tracker-api-server/src/v1/context/stats/routes.rs index 6caaf13bf..1334c0d70 100644 --- a/packages/axum-tracker-api-server/src/v1/context/stats/routes.rs +++ b/packages/axum-rest-tracker-api-server/src/v1/context/stats/routes.rs @@ -7,7 +7,7 @@ use std::sync::Arc; use axum::routing::get; use axum::Router; -use torrust_tracker_api_core::container::TrackerHttpApiCoreContainer; +use torrust_rest_tracker_api_core::container::TrackerHttpApiCoreContainer; use super::handlers::get_stats_handler; diff --git a/packages/axum-tracker-api-server/src/v1/context/torrent/handlers.rs b/packages/axum-rest-tracker-api-server/src/v1/context/torrent/handlers.rs similarity index 100% rename from packages/axum-tracker-api-server/src/v1/context/torrent/handlers.rs rename to packages/axum-rest-tracker-api-server/src/v1/context/torrent/handlers.rs diff --git a/packages/axum-tracker-api-server/src/v1/context/torrent/mod.rs b/packages/axum-rest-tracker-api-server/src/v1/context/torrent/mod.rs similarity index 100% rename from packages/axum-tracker-api-server/src/v1/context/torrent/mod.rs rename to packages/axum-rest-tracker-api-server/src/v1/context/torrent/mod.rs diff --git a/packages/axum-tracker-api-server/src/v1/context/torrent/resources/mod.rs b/packages/axum-rest-tracker-api-server/src/v1/context/torrent/resources/mod.rs similarity index 100% rename from packages/axum-tracker-api-server/src/v1/context/torrent/resources/mod.rs rename to packages/axum-rest-tracker-api-server/src/v1/context/torrent/resources/mod.rs diff --git a/packages/axum-tracker-api-server/src/v1/context/torrent/resources/peer.rs b/packages/axum-rest-tracker-api-server/src/v1/context/torrent/resources/peer.rs similarity index 100% rename from packages/axum-tracker-api-server/src/v1/context/torrent/resources/peer.rs rename to packages/axum-rest-tracker-api-server/src/v1/context/torrent/resources/peer.rs diff --git a/packages/axum-tracker-api-server/src/v1/context/torrent/resources/torrent.rs b/packages/axum-rest-tracker-api-server/src/v1/context/torrent/resources/torrent.rs similarity index 100% rename from packages/axum-tracker-api-server/src/v1/context/torrent/resources/torrent.rs rename to packages/axum-rest-tracker-api-server/src/v1/context/torrent/resources/torrent.rs diff --git a/packages/axum-tracker-api-server/src/v1/context/torrent/responses.rs b/packages/axum-rest-tracker-api-server/src/v1/context/torrent/responses.rs similarity index 100% rename from packages/axum-tracker-api-server/src/v1/context/torrent/responses.rs rename to packages/axum-rest-tracker-api-server/src/v1/context/torrent/responses.rs diff --git a/packages/axum-tracker-api-server/src/v1/context/torrent/routes.rs b/packages/axum-rest-tracker-api-server/src/v1/context/torrent/routes.rs similarity index 100% rename from packages/axum-tracker-api-server/src/v1/context/torrent/routes.rs rename to packages/axum-rest-tracker-api-server/src/v1/context/torrent/routes.rs diff --git a/packages/axum-tracker-api-server/src/v1/context/whitelist/handlers.rs b/packages/axum-rest-tracker-api-server/src/v1/context/whitelist/handlers.rs similarity index 100% rename from packages/axum-tracker-api-server/src/v1/context/whitelist/handlers.rs rename to packages/axum-rest-tracker-api-server/src/v1/context/whitelist/handlers.rs diff --git a/packages/axum-tracker-api-server/src/v1/context/whitelist/mod.rs b/packages/axum-rest-tracker-api-server/src/v1/context/whitelist/mod.rs similarity index 100% rename from packages/axum-tracker-api-server/src/v1/context/whitelist/mod.rs rename to packages/axum-rest-tracker-api-server/src/v1/context/whitelist/mod.rs diff --git a/packages/axum-tracker-api-server/src/v1/context/whitelist/responses.rs b/packages/axum-rest-tracker-api-server/src/v1/context/whitelist/responses.rs similarity index 100% rename from packages/axum-tracker-api-server/src/v1/context/whitelist/responses.rs rename to packages/axum-rest-tracker-api-server/src/v1/context/whitelist/responses.rs diff --git a/packages/axum-tracker-api-server/src/v1/context/whitelist/routes.rs b/packages/axum-rest-tracker-api-server/src/v1/context/whitelist/routes.rs similarity index 100% rename from packages/axum-tracker-api-server/src/v1/context/whitelist/routes.rs rename to packages/axum-rest-tracker-api-server/src/v1/context/whitelist/routes.rs diff --git a/packages/axum-tracker-api-server/src/v1/middlewares/auth.rs b/packages/axum-rest-tracker-api-server/src/v1/middlewares/auth.rs similarity index 100% rename from packages/axum-tracker-api-server/src/v1/middlewares/auth.rs rename to packages/axum-rest-tracker-api-server/src/v1/middlewares/auth.rs diff --git a/packages/axum-tracker-api-server/src/v1/middlewares/mod.rs b/packages/axum-rest-tracker-api-server/src/v1/middlewares/mod.rs similarity index 100% rename from packages/axum-tracker-api-server/src/v1/middlewares/mod.rs rename to packages/axum-rest-tracker-api-server/src/v1/middlewares/mod.rs diff --git a/packages/axum-tracker-api-server/src/v1/mod.rs b/packages/axum-rest-tracker-api-server/src/v1/mod.rs similarity index 100% rename from packages/axum-tracker-api-server/src/v1/mod.rs rename to packages/axum-rest-tracker-api-server/src/v1/mod.rs diff --git a/packages/axum-tracker-api-server/src/v1/responses.rs b/packages/axum-rest-tracker-api-server/src/v1/responses.rs similarity index 100% rename from packages/axum-tracker-api-server/src/v1/responses.rs rename to packages/axum-rest-tracker-api-server/src/v1/responses.rs diff --git a/packages/axum-tracker-api-server/src/v1/routes.rs b/packages/axum-rest-tracker-api-server/src/v1/routes.rs similarity index 90% rename from packages/axum-tracker-api-server/src/v1/routes.rs rename to packages/axum-rest-tracker-api-server/src/v1/routes.rs index 90596f0e7..b36a20eac 100644 --- a/packages/axum-tracker-api-server/src/v1/routes.rs +++ b/packages/axum-rest-tracker-api-server/src/v1/routes.rs @@ -2,7 +2,7 @@ use std::sync::Arc; use axum::Router; -use torrust_tracker_api_core::container::TrackerHttpApiCoreContainer; +use torrust_rest_tracker_api_core::container::TrackerHttpApiCoreContainer; use super::context::{auth_key, stats, torrent, whitelist}; diff --git a/packages/axum-tracker-api-server/tests/common/fixtures.rs b/packages/axum-rest-tracker-api-server/tests/common/fixtures.rs similarity index 100% rename from packages/axum-tracker-api-server/tests/common/fixtures.rs rename to packages/axum-rest-tracker-api-server/tests/common/fixtures.rs diff --git a/packages/axum-tracker-api-server/tests/common/mod.rs b/packages/axum-rest-tracker-api-server/tests/common/mod.rs similarity index 100% rename from packages/axum-tracker-api-server/tests/common/mod.rs rename to packages/axum-rest-tracker-api-server/tests/common/mod.rs diff --git a/packages/axum-tracker-api-server/tests/integration.rs b/packages/axum-rest-tracker-api-server/tests/integration.rs similarity index 100% rename from packages/axum-tracker-api-server/tests/integration.rs rename to packages/axum-rest-tracker-api-server/tests/integration.rs diff --git a/packages/axum-tracker-api-server/tests/server/connection_info.rs b/packages/axum-rest-tracker-api-server/tests/server/connection_info.rs similarity index 75% rename from packages/axum-tracker-api-server/tests/server/connection_info.rs rename to packages/axum-rest-tracker-api-server/tests/server/connection_info.rs index e78f4cbb7..6459c9a2f 100644 --- a/packages/axum-tracker-api-server/tests/server/connection_info.rs +++ b/packages/axum-rest-tracker-api-server/tests/server/connection_info.rs @@ -1,4 +1,4 @@ -use torrust_tracker_api_client::connection_info::{ConnectionInfo, Origin}; +use torrust_rest_tracker_api_client::connection_info::{ConnectionInfo, Origin}; pub fn connection_with_invalid_token(origin: Origin) -> ConnectionInfo { ConnectionInfo::authenticated(origin, "invalid token") diff --git a/packages/axum-tracker-api-server/tests/server/mod.rs b/packages/axum-rest-tracker-api-server/tests/server/mod.rs similarity index 100% rename from packages/axum-tracker-api-server/tests/server/mod.rs rename to packages/axum-rest-tracker-api-server/tests/server/mod.rs diff --git a/packages/axum-tracker-api-server/tests/server/v1/asserts.rs b/packages/axum-rest-tracker-api-server/tests/server/v1/asserts.rs similarity index 95% rename from packages/axum-tracker-api-server/tests/server/v1/asserts.rs rename to packages/axum-rest-tracker-api-server/tests/server/v1/asserts.rs index c1a06594a..abd60cf94 100644 --- a/packages/axum-tracker-api-server/tests/server/v1/asserts.rs +++ b/packages/axum-rest-tracker-api-server/tests/server/v1/asserts.rs @@ -1,9 +1,9 @@ // code-review: should we use macros to return the exact line where the assert fails? use reqwest::Response; -use torrust_axum_tracker_api_server::v1::context::auth_key::resources::AuthKey; -use torrust_axum_tracker_api_server::v1::context::stats::resources::Stats; -use torrust_axum_tracker_api_server::v1::context::torrent::resources::torrent::{ListItem, Torrent}; +use torrust_axum_rest_tracker_api_server::v1::context::auth_key::resources::AuthKey; +use torrust_axum_rest_tracker_api_server::v1::context::stats::resources::Stats; +use torrust_axum_rest_tracker_api_server::v1::context::torrent::resources::torrent::{ListItem, Torrent}; // Resource responses diff --git a/packages/axum-tracker-api-server/tests/server/v1/contract/authentication.rs b/packages/axum-rest-tracker-api-server/tests/server/v1/contract/authentication.rs similarity index 94% rename from packages/axum-tracker-api-server/tests/server/v1/contract/authentication.rs rename to packages/axum-rest-tracker-api-server/tests/server/v1/contract/authentication.rs index 5acb25a3c..eac30d93a 100644 --- a/packages/axum-tracker-api-server/tests/server/v1/contract/authentication.rs +++ b/packages/axum-rest-tracker-api-server/tests/server/v1/contract/authentication.rs @@ -1,6 +1,6 @@ -use torrust_axum_tracker_api_server::environment::Started; -use torrust_tracker_api_client::common::http::{Query, QueryParam}; -use torrust_tracker_api_client::v1::client::{headers_with_request_id, Client}; +use torrust_axum_rest_tracker_api_server::environment::Started; +use torrust_rest_tracker_api_client::common::http::{Query, QueryParam}; +use torrust_rest_tracker_api_client::v1::client::{headers_with_request_id, Client}; use torrust_tracker_test_helpers::logging::logs_contains_a_line_with; use torrust_tracker_test_helpers::{configuration, logging}; use uuid::Uuid; diff --git a/packages/axum-tracker-api-server/tests/server/v1/contract/context/auth_key.rs b/packages/axum-rest-tracker-api-server/tests/server/v1/contract/context/auth_key.rs similarity index 98% rename from packages/axum-tracker-api-server/tests/server/v1/contract/context/auth_key.rs rename to packages/axum-rest-tracker-api-server/tests/server/v1/contract/context/auth_key.rs index 92e4b59fe..f6355fc6e 100644 --- a/packages/axum-tracker-api-server/tests/server/v1/contract/context/auth_key.rs +++ b/packages/axum-rest-tracker-api-server/tests/server/v1/contract/context/auth_key.rs @@ -2,8 +2,8 @@ use std::time::Duration; use bittorrent_tracker_core::authentication::Key; use serde::Serialize; -use torrust_axum_tracker_api_server::environment::Started; -use torrust_tracker_api_client::v1::client::{headers_with_request_id, AddKeyForm, Client}; +use torrust_axum_rest_tracker_api_server::environment::Started; +use torrust_rest_tracker_api_client::v1::client::{headers_with_request_id, AddKeyForm, Client}; use torrust_tracker_test_helpers::logging::logs_contains_a_line_with; use torrust_tracker_test_helpers::{configuration, logging}; use uuid::Uuid; @@ -482,8 +482,8 @@ async fn should_not_allow_reloading_keys_for_unauthenticated_users() { mod deprecated_generate_key_endpoint { use bittorrent_tracker_core::authentication::Key; - use torrust_axum_tracker_api_server::environment::Started; - use torrust_tracker_api_client::v1::client::{headers_with_request_id, Client}; + use torrust_axum_rest_tracker_api_server::environment::Started; + use torrust_rest_tracker_api_client::v1::client::{headers_with_request_id, Client}; use torrust_tracker_test_helpers::logging::logs_contains_a_line_with; use torrust_tracker_test_helpers::{configuration, logging}; use uuid::Uuid; diff --git a/packages/axum-tracker-api-server/tests/server/v1/contract/context/health_check.rs b/packages/axum-rest-tracker-api-server/tests/server/v1/contract/context/health_check.rs similarity index 75% rename from packages/axum-tracker-api-server/tests/server/v1/contract/context/health_check.rs rename to packages/axum-rest-tracker-api-server/tests/server/v1/contract/context/health_check.rs index d543422d3..3a08c6d51 100644 --- a/packages/axum-tracker-api-server/tests/server/v1/contract/context/health_check.rs +++ b/packages/axum-rest-tracker-api-server/tests/server/v1/contract/context/health_check.rs @@ -1,6 +1,6 @@ -use torrust_axum_tracker_api_server::environment::Started; -use torrust_axum_tracker_api_server::v1::context::health_check::resources::{Report, Status}; -use torrust_tracker_api_client::v1::client::get; +use torrust_axum_rest_tracker_api_server::environment::Started; +use torrust_axum_rest_tracker_api_server::v1::context::health_check::resources::{Report, Status}; +use torrust_rest_tracker_api_client::v1::client::get; use torrust_tracker_test_helpers::{configuration, logging}; use url::Url; diff --git a/packages/axum-tracker-api-server/tests/server/v1/contract/context/mod.rs b/packages/axum-rest-tracker-api-server/tests/server/v1/contract/context/mod.rs similarity index 100% rename from packages/axum-tracker-api-server/tests/server/v1/contract/context/mod.rs rename to packages/axum-rest-tracker-api-server/tests/server/v1/contract/context/mod.rs diff --git a/packages/axum-tracker-api-server/tests/server/v1/contract/context/stats.rs b/packages/axum-rest-tracker-api-server/tests/server/v1/contract/context/stats.rs similarity index 93% rename from packages/axum-tracker-api-server/tests/server/v1/contract/context/stats.rs rename to packages/axum-rest-tracker-api-server/tests/server/v1/contract/context/stats.rs index 179e5c555..1e66eb4cc 100644 --- a/packages/axum-tracker-api-server/tests/server/v1/contract/context/stats.rs +++ b/packages/axum-rest-tracker-api-server/tests/server/v1/contract/context/stats.rs @@ -1,9 +1,9 @@ use std::str::FromStr; use bittorrent_primitives::info_hash::InfoHash; -use torrust_axum_tracker_api_server::environment::Started; -use torrust_axum_tracker_api_server::v1::context::stats::resources::Stats; -use torrust_tracker_api_client::v1::client::{headers_with_request_id, Client}; +use torrust_axum_rest_tracker_api_server::environment::Started; +use torrust_axum_rest_tracker_api_server::v1::context::stats::resources::Stats; +use torrust_rest_tracker_api_client::v1::client::{headers_with_request_id, Client}; use torrust_tracker_primitives::peer::fixture::PeerBuilder; use torrust_tracker_test_helpers::logging::logs_contains_a_line_with; use torrust_tracker_test_helpers::{configuration, logging}; diff --git a/packages/axum-tracker-api-server/tests/server/v1/contract/context/torrent.rs b/packages/axum-rest-tracker-api-server/tests/server/v1/contract/context/torrent.rs similarity index 96% rename from packages/axum-tracker-api-server/tests/server/v1/contract/context/torrent.rs rename to packages/axum-rest-tracker-api-server/tests/server/v1/contract/context/torrent.rs index d77147f38..b479416e4 100644 --- a/packages/axum-tracker-api-server/tests/server/v1/contract/context/torrent.rs +++ b/packages/axum-rest-tracker-api-server/tests/server/v1/contract/context/torrent.rs @@ -1,11 +1,11 @@ use std::str::FromStr; use bittorrent_primitives::info_hash::InfoHash; -use torrust_axum_tracker_api_server::environment::Started; -use torrust_axum_tracker_api_server::v1::context::torrent::resources::peer::Peer; -use torrust_axum_tracker_api_server::v1::context::torrent::resources::torrent::{self, Torrent}; -use torrust_tracker_api_client::common::http::{Query, QueryParam}; -use torrust_tracker_api_client::v1::client::{headers_with_request_id, Client}; +use torrust_axum_rest_tracker_api_server::environment::Started; +use torrust_axum_rest_tracker_api_server::v1::context::torrent::resources::peer::Peer; +use torrust_axum_rest_tracker_api_server::v1::context::torrent::resources::torrent::{self, Torrent}; +use torrust_rest_tracker_api_client::common::http::{Query, QueryParam}; +use torrust_rest_tracker_api_client::v1::client::{headers_with_request_id, Client}; use torrust_tracker_primitives::peer::fixture::PeerBuilder; use torrust_tracker_test_helpers::logging::logs_contains_a_line_with; use torrust_tracker_test_helpers::{configuration, logging}; diff --git a/packages/axum-tracker-api-server/tests/server/v1/contract/context/whitelist.rs b/packages/axum-rest-tracker-api-server/tests/server/v1/contract/context/whitelist.rs similarity index 98% rename from packages/axum-tracker-api-server/tests/server/v1/contract/context/whitelist.rs rename to packages/axum-rest-tracker-api-server/tests/server/v1/contract/context/whitelist.rs index e41b74f45..e8f98b8ab 100644 --- a/packages/axum-tracker-api-server/tests/server/v1/contract/context/whitelist.rs +++ b/packages/axum-rest-tracker-api-server/tests/server/v1/contract/context/whitelist.rs @@ -1,8 +1,8 @@ use std::str::FromStr; use bittorrent_primitives::info_hash::InfoHash; -use torrust_axum_tracker_api_server::environment::Started; -use torrust_tracker_api_client::v1::client::{headers_with_request_id, Client}; +use torrust_axum_rest_tracker_api_server::environment::Started; +use torrust_rest_tracker_api_client::v1::client::{headers_with_request_id, Client}; use torrust_tracker_test_helpers::logging::logs_contains_a_line_with; use torrust_tracker_test_helpers::{configuration, logging}; use uuid::Uuid; diff --git a/packages/axum-tracker-api-server/tests/server/v1/contract/fixtures.rs b/packages/axum-rest-tracker-api-server/tests/server/v1/contract/fixtures.rs similarity index 100% rename from packages/axum-tracker-api-server/tests/server/v1/contract/fixtures.rs rename to packages/axum-rest-tracker-api-server/tests/server/v1/contract/fixtures.rs diff --git a/packages/axum-tracker-api-server/tests/server/v1/contract/mod.rs b/packages/axum-rest-tracker-api-server/tests/server/v1/contract/mod.rs similarity index 100% rename from packages/axum-tracker-api-server/tests/server/v1/contract/mod.rs rename to packages/axum-rest-tracker-api-server/tests/server/v1/contract/mod.rs diff --git a/packages/axum-tracker-api-server/tests/server/v1/mod.rs b/packages/axum-rest-tracker-api-server/tests/server/v1/mod.rs similarity index 100% rename from packages/axum-tracker-api-server/tests/server/v1/mod.rs rename to packages/axum-rest-tracker-api-server/tests/server/v1/mod.rs diff --git a/packages/tracker-api-client/Cargo.toml b/packages/rest-tracker-api-client/Cargo.toml similarity index 93% rename from packages/tracker-api-client/Cargo.toml rename to packages/rest-tracker-api-client/Cargo.toml index ee45e12f7..cba580e18 100644 --- a/packages/tracker-api-client/Cargo.toml +++ b/packages/rest-tracker-api-client/Cargo.toml @@ -2,7 +2,7 @@ description = "A library to interact with the Torrust Tracker REST API." keywords = ["bittorrent", "client", "tracker"] license = "LGPL-3.0" -name = "torrust-tracker-api-client" +name = "torrust-rest-tracker-api-client" readme = "README.md" authors.workspace = true diff --git a/packages/tracker-api-client/README.md b/packages/rest-tracker-api-client/README.md similarity index 100% rename from packages/tracker-api-client/README.md rename to packages/rest-tracker-api-client/README.md diff --git a/packages/tracker-api-client/docs/licenses/LICENSE-MIT_0 b/packages/rest-tracker-api-client/docs/licenses/LICENSE-MIT_0 similarity index 100% rename from packages/tracker-api-client/docs/licenses/LICENSE-MIT_0 rename to packages/rest-tracker-api-client/docs/licenses/LICENSE-MIT_0 diff --git a/packages/tracker-api-client/src/common/http.rs b/packages/rest-tracker-api-client/src/common/http.rs similarity index 100% rename from packages/tracker-api-client/src/common/http.rs rename to packages/rest-tracker-api-client/src/common/http.rs diff --git a/packages/tracker-api-client/src/common/mod.rs b/packages/rest-tracker-api-client/src/common/mod.rs similarity index 100% rename from packages/tracker-api-client/src/common/mod.rs rename to packages/rest-tracker-api-client/src/common/mod.rs diff --git a/packages/tracker-api-client/src/connection_info.rs b/packages/rest-tracker-api-client/src/connection_info.rs similarity index 100% rename from packages/tracker-api-client/src/connection_info.rs rename to packages/rest-tracker-api-client/src/connection_info.rs diff --git a/packages/tracker-api-client/src/lib.rs b/packages/rest-tracker-api-client/src/lib.rs similarity index 100% rename from packages/tracker-api-client/src/lib.rs rename to packages/rest-tracker-api-client/src/lib.rs diff --git a/packages/tracker-api-client/src/v1/client.rs b/packages/rest-tracker-api-client/src/v1/client.rs similarity index 100% rename from packages/tracker-api-client/src/v1/client.rs rename to packages/rest-tracker-api-client/src/v1/client.rs diff --git a/packages/tracker-api-client/src/v1/mod.rs b/packages/rest-tracker-api-client/src/v1/mod.rs similarity index 100% rename from packages/tracker-api-client/src/v1/mod.rs rename to packages/rest-tracker-api-client/src/v1/mod.rs diff --git a/packages/tracker-api-core/Cargo.toml b/packages/rest-tracker-api-core/Cargo.toml similarity index 96% rename from packages/tracker-api-core/Cargo.toml rename to packages/rest-tracker-api-core/Cargo.toml index 495729e69..d9ccb5d3f 100644 --- a/packages/tracker-api-core/Cargo.toml +++ b/packages/rest-tracker-api-core/Cargo.toml @@ -6,7 +6,7 @@ edition.workspace = true homepage.workspace = true keywords = ["api", "bittorrent", "core", "library", "tracker"] license.workspace = true -name = "torrust-tracker-api-core" +name = "torrust-rest-tracker-api-core" publish.workspace = true readme = "README.md" repository.workspace = true diff --git a/packages/tracker-api-core/LICENSE b/packages/rest-tracker-api-core/LICENSE similarity index 100% rename from packages/tracker-api-core/LICENSE rename to packages/rest-tracker-api-core/LICENSE diff --git a/packages/tracker-api-core/README.md b/packages/rest-tracker-api-core/README.md similarity index 100% rename from packages/tracker-api-core/README.md rename to packages/rest-tracker-api-core/README.md diff --git a/packages/tracker-api-core/src/container.rs b/packages/rest-tracker-api-core/src/container.rs similarity index 100% rename from packages/tracker-api-core/src/container.rs rename to packages/rest-tracker-api-core/src/container.rs diff --git a/packages/tracker-api-core/src/lib.rs b/packages/rest-tracker-api-core/src/lib.rs similarity index 100% rename from packages/tracker-api-core/src/lib.rs rename to packages/rest-tracker-api-core/src/lib.rs diff --git a/packages/tracker-api-core/src/statistics/metrics.rs b/packages/rest-tracker-api-core/src/statistics/metrics.rs similarity index 100% rename from packages/tracker-api-core/src/statistics/metrics.rs rename to packages/rest-tracker-api-core/src/statistics/metrics.rs diff --git a/packages/tracker-api-core/src/statistics/mod.rs b/packages/rest-tracker-api-core/src/statistics/mod.rs similarity index 100% rename from packages/tracker-api-core/src/statistics/mod.rs rename to packages/rest-tracker-api-core/src/statistics/mod.rs diff --git a/packages/tracker-api-core/src/statistics/services.rs b/packages/rest-tracker-api-core/src/statistics/services.rs similarity index 100% rename from packages/tracker-api-core/src/statistics/services.rs rename to packages/rest-tracker-api-core/src/statistics/services.rs diff --git a/packages/tracker-core/Cargo.toml b/packages/tracker-core/Cargo.toml index 731ee900d..ac1cee88d 100644 --- a/packages/tracker-core/Cargo.toml +++ b/packages/tracker-core/Cargo.toml @@ -38,6 +38,6 @@ tracing = "0" local-ip-address = "0" mockall = "0" testcontainers = "0" -torrust-tracker-api-client = { version = "3.0.0-develop", path = "../tracker-api-client" } +torrust-rest-tracker-api-client = { version = "3.0.0-develop", path = "../rest-tracker-api-client" } torrust-tracker-test-helpers = { version = "3.0.0-develop", path = "../test-helpers" } url = "2.5.4" diff --git a/src/app.rs b/src/app.rs index 5458ea600..60e907a88 100644 --- a/src/app.rs +++ b/src/app.rs @@ -118,7 +118,7 @@ pub async fn start(config: &Configuration, app_container: &Arc) -> if let Some(job) = tracker_apis::start_job( http_api_container, registar.give_form(), - torrust_axum_tracker_api_server::Version::V1, + torrust_axum_rest_tracker_api_server::Version::V1, ) .await { diff --git a/src/bootstrap/jobs/tracker_apis.rs b/src/bootstrap/jobs/tracker_apis.rs index 93850d65e..d152e853f 100644 --- a/src/bootstrap/jobs/tracker_apis.rs +++ b/src/bootstrap/jobs/tracker_apis.rs @@ -25,11 +25,11 @@ use std::sync::Arc; use axum_server::tls_rustls::RustlsConfig; use tokio::task::JoinHandle; +use torrust_axum_rest_tracker_api_server::server::{ApiServer, Launcher}; +use torrust_axum_rest_tracker_api_server::Version; use torrust_axum_server::tsl::make_rust_tls; -use torrust_axum_tracker_api_server::server::{ApiServer, Launcher}; -use torrust_axum_tracker_api_server::Version; +use torrust_rest_tracker_api_core::container::TrackerHttpApiCoreContainer; use torrust_server_lib::registar::ServiceRegistrationForm; -use torrust_tracker_api_core::container::TrackerHttpApiCoreContainer; use torrust_tracker_configuration::AccessTokens; use tracing::instrument; @@ -96,9 +96,9 @@ async fn start_v1( mod tests { use std::sync::Arc; - use torrust_axum_tracker_api_server::Version; + use torrust_axum_rest_tracker_api_server::Version; + use torrust_rest_tracker_api_core::container::TrackerHttpApiCoreContainer; use torrust_server_lib::registar::Registar; - use torrust_tracker_api_core::container::TrackerHttpApiCoreContainer; use torrust_tracker_test_helpers::configuration::ephemeral_public; use crate::bootstrap::app::initialize_global_services; diff --git a/src/container.rs b/src/container.rs index b10ac9ae0..4bdae1b29 100644 --- a/src/container.rs +++ b/src/container.rs @@ -17,7 +17,7 @@ use bittorrent_udp_tracker_core::container::UdpTrackerCoreContainer; use bittorrent_udp_tracker_core::services::banning::BanService; use bittorrent_udp_tracker_core::{self, MAX_CONNECTION_ID_ERRORS_PER_IP}; use tokio::sync::RwLock; -use torrust_tracker_api_core::container::TrackerHttpApiCoreContainer; +use torrust_rest_tracker_api_core::container::TrackerHttpApiCoreContainer; use torrust_tracker_configuration::{Configuration, Core, HttpApi, HttpTracker, UdpTracker}; use torrust_udp_tracker_server::container::UdpTrackerServerContainer; use tracing::instrument; diff --git a/src/lib.rs b/src/lib.rs index 5f05df8b2..e947d2ab5 100644 --- a/src/lib.rs +++ b/src/lib.rs @@ -55,7 +55,7 @@ //! //! From the end-user perspective the Torrust Tracker exposes three different services. //! -//! - A REST [`API`](torrust_axum_tracker_api_server) +//! - A REST [`API`](torrust_axum_rest_tracker_api_server) //! - One or more [`UDP`](torrust_udp_tracker_server) trackers //! - One or more [`HTTP`](torrust_axum_http_tracker_server) trackers //! @@ -124,7 +124,7 @@ //! By default the tracker uses `SQLite` and the database file name `sqlite3.db`. //! //! You only need the `tls` directory in case you are setting up SSL for the HTTP tracker or the tracker API. -//! Visit [`HTTP`](torrust_axum_http_tracker_server) or [`API`](torrust_axum_tracker_api_server) if you want to know how you can use HTTPS. +//! Visit [`HTTP`](torrust_axum_http_tracker_server) or [`API`](torrust_axum_rest_tracker_api_server) if you want to know how you can use HTTPS. //! //! ## Install from sources //! @@ -280,7 +280,7 @@ //! } //! ``` //! -//! Refer to the [`API`](torrust_axum_tracker_api_server) documentation for more information about the [`API`](torrust_axum_tracker_api_server) endpoints. +//! Refer to the [`API`](torrust_axum_rest_tracker_api_server) documentation for more information about the [`API`](torrust_axum_rest_tracker_api_server) endpoints. //! //! ## HTTP tracker //! @@ -359,7 +359,7 @@ //! //! If the tracker is running in `private` or `private_listed` mode you will need to provide a valid authentication key. //! -//! Right now the only way to add new keys is via the REST [`API`](torrust_axum_tracker_api_server). The endpoint `POST /api/vi/key/:duration_in_seconds` +//! Right now the only way to add new keys is via the REST [`API`](torrust_axum_rest_tracker_api_server). The endpoint `POST /api/vi/key/:duration_in_seconds` //! will return an expiring key that will be valid for `duration_in_seconds` seconds. //! //! Using `curl` you can create a 2-minute valid auth key: @@ -379,7 +379,7 @@ //! ``` //! //! You can also use the Torrust Tracker together with the [Torrust Index](https://github.com/torrust/torrust-index). If that's the case, -//! the Index will create the keys by using the tracker [API](torrust_axum_tracker_api_server). +//! the Index will create the keys by using the tracker [API](torrust_axum_rest_tracker_api_server). //! //! ## UDP tracker //! @@ -406,7 +406,7 @@ //! Torrust Tracker has four main components: //! //! - The core tracker [`core`] -//! - The tracker REST [`API`](torrust_axum_tracker_api_server) +//! - The tracker REST [`API`](torrust_axum_rest_tracker_api_server) //! - The [`UDP`](torrust_udp_tracker_server) tracker //! - The [`HTTP`](torrust_axum_http_tracker_server) tracker //! @@ -434,7 +434,7 @@ //! - Torrents: to get peers for a torrent //! - Whitelist: to handle the torrent whitelist when the tracker runs on `listed` or `private_listed` mode //! -//! See [`API`](torrust_axum_tracker_api_server) for more details on the REST API. +//! See [`API`](torrust_axum_rest_tracker_api_server) for more details on the REST API. //! //! ## UDP tracker //! From 5da41038f5f01a193e8fef1387e0b4af4357f3b4 Mon Sep 17 00:00:00 2001 From: Jose Celano Date: Thu, 27 Feb 2025 19:11:23 +0000 Subject: [PATCH 341/802] chore(deps): udpate dependencies ``` cargo update Updating crates.io index Locking 24 packages to latest compatible versions Updating async-compression v0.4.18 -> v0.4.19 Updating cc v1.2.14 -> v1.2.15 Updating chrono v0.4.39 -> v0.4.40 Updating clap v4.5.30 -> v4.5.31 Updating clap_builder v4.5.30 -> v4.5.31 Updating either v1.13.0 -> v1.14.0 Updating flate2 v1.0.35 -> v1.1.0 Updating inout v0.1.3 -> v0.1.4 Updating libc v0.2.169 -> v0.2.170 Updating litemap v0.7.4 -> v0.7.5 Updating log v0.4.25 -> v0.4.26 Updating miniz_oxide v0.8.4 -> v0.8.5 Updating pem v3.0.4 -> v3.0.5 Updating portable-atomic v1.10.0 -> v1.11.0 Updating rand_core v0.9.1 -> v0.9.2 Updating redox_syscall v0.5.8 -> v0.5.9 Updating ring v0.17.9 -> v0.17.11 Updating uuid v1.13.2 -> v1.15.1 Adding windows-link v0.1.0 Updating zerofrom v0.1.5 -> v0.1.6 Updating zerofrom-derive v0.1.5 -> v0.1.6 Updating zstd v0.13.2 -> v0.13.3 Updating zstd-safe v7.2.1 -> v7.2.3 Updating zstd-sys v2.0.13+zstd.1.5.6 -> v2.0.14+zstd.1.5.7 ``` --- Cargo.lock | 115 +++++++++--------- .../tests/common/fixtures.rs | 4 +- .../tests/common/fixtures.rs | 6 +- 3 files changed, 65 insertions(+), 60 deletions(-) diff --git a/Cargo.lock b/Cargo.lock index fbb55562a..3e1cea83d 100644 --- a/Cargo.lock +++ b/Cargo.lock @@ -208,9 +208,9 @@ dependencies = [ [[package]] name = "async-compression" -version = "0.4.18" +version = "0.4.19" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "df895a515f70646414f4b45c0b79082783b80552b373a68283012928df56f522" +checksum = "06575e6a9673580f52661c92107baabffbf41e2141373441cbcdc47cb733003c" dependencies = [ "brotli", "flate2", @@ -925,9 +925,9 @@ dependencies = [ [[package]] name = "cc" -version = "1.2.14" +version = "1.2.15" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "0c3d1b2e905a3a7b00a6141adb0e4c0bb941d11caf55349d863942a1cc44e3c9" +checksum = "c736e259eea577f443d5c86c304f9f4ae0295c43f3ba05c21f1d66b5f06001af" dependencies = [ "jobserver", "libc", @@ -957,15 +957,15 @@ checksum = "613afe47fcd5fac7ccf1db93babcb082c5994d996f20b8b159f2ad1658eb5724" [[package]] name = "chrono" -version = "0.4.39" +version = "0.4.40" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "7e36cc9d416881d2e24f9a963be5fb1cd90966419ac844274161d10488b3e825" +checksum = "1a7964611d71df112cb1730f2ee67324fcf4d0fc6606acbbe9bfe06df124637c" dependencies = [ "android-tzdata", "iana-time-zone", "num-traits", "serde", - "windows-targets 0.52.6", + "windows-link", ] [[package]] @@ -1018,9 +1018,9 @@ dependencies = [ [[package]] name = "clap" -version = "4.5.30" +version = "4.5.31" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "92b7b18d71fad5313a1e320fa9897994228ce274b60faa4d694fe0ea89cd9e6d" +checksum = "027bb0d98429ae334a8698531da7077bdf906419543a35a55c2cb1b66437d767" dependencies = [ "clap_builder", "clap_derive", @@ -1028,9 +1028,9 @@ dependencies = [ [[package]] name = "clap_builder" -version = "4.5.30" +version = "4.5.31" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "a35db2071778a7344791a4fb4f95308b5673d219dee3ae348b86642574ecc90c" +checksum = "5589e0cba072e0f3d23791efac0fd8627b49c829c196a492e88168e6a669d863" dependencies = [ "anstream", "anstyle", @@ -1388,9 +1388,9 @@ checksum = "1435fa1053d8b2fbbe9be7e97eca7f33d37b28409959813daefc1446a14247f1" [[package]] name = "either" -version = "1.13.0" +version = "1.14.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "60b1af1c220855b6ceac025d3f6ecdd2b7c4894bfe9cd9bda4fbb4bc7c0d4cf0" +checksum = "b7914353092ddf589ad78f25c5c1c21b7f80b0ff8621e7c814c3485b5306da9d" [[package]] name = "encoding_rs" @@ -1513,9 +1513,9 @@ dependencies = [ [[package]] name = "flate2" -version = "1.0.35" +version = "1.1.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "c936bfdafb507ebbf50b8074c54fa31c5be9a1e7e5f467dd659697041407d07c" +checksum = "11faaf5a5236997af9848be0bef4db95824b1d534ebc64d0f0c6cf3e67bd38dc" dependencies = [ "crc32fast", "libz-sys", @@ -2245,9 +2245,9 @@ checksum = "c8fae54786f62fb2918dcfae3d568594e50eb9b5c25bf04371af6fe7516452fb" [[package]] name = "inout" -version = "0.1.3" +version = "0.1.4" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "a0c10553d664a4d0bcff9f4215d0aac67a639cc68ef660840afe309b807bc9f5" +checksum = "879f10e63c20629ecabbb64a8010319738c66a5cd0c29b02d63d272b03751d01" dependencies = [ "generic-array", ] @@ -2344,9 +2344,9 @@ checksum = "bbd2bcb4c963f2ddae06a2efc7e9f3591312473c50c6685e1f298068316e66fe" [[package]] name = "libc" -version = "0.2.169" +version = "0.2.170" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "b5aba8db14291edd000dfcc4d620c7ebfb122c613afb886ca8803fa4e128a20a" +checksum = "875b3680cb2f8f71bdcf9a30f38d48282f5d3c95cbf9b3fa57269bb5d5c06828" [[package]] name = "libloading" @@ -2372,7 +2372,7 @@ checksum = "c0ff37bd590ca25063e35af745c343cb7a0271906fb7b37e4813e8f79f00268d" dependencies = [ "bitflags 2.8.0", "libc", - "redox_syscall 0.5.8", + "redox_syscall 0.5.9", ] [[package]] @@ -2405,9 +2405,9 @@ checksum = "d26c52dbd32dccf2d10cac7725f8eae5296885fb5703b261f7d0a0739ec807ab" [[package]] name = "litemap" -version = "0.7.4" +version = "0.7.5" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "4ee93343901ab17bd981295f2cf0026d4ad018c7c31ba84549a4ddbb47a45104" +checksum = "23fb14cb19457329c82206317a5663005a4d404783dc74f4252769b0d5f42856" [[package]] name = "local-ip-address" @@ -2433,9 +2433,9 @@ dependencies = [ [[package]] name = "log" -version = "0.4.25" +version = "0.4.26" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "04cbf5b083de1c7e0222a7a51dbfdba1cbe1c6ab0b15e29fff3f6c077fd9cd9f" +checksum = "30bde2b3dc3671ae49d8e2e9f044c7c005836e7a023ee57cffa25ab82764bb9e" dependencies = [ "value-bag", ] @@ -2475,9 +2475,9 @@ checksum = "68354c5c6bd36d73ff3feceb05efa59b6acb7626617f4962be322a825e61f79a" [[package]] name = "miniz_oxide" -version = "0.8.4" +version = "0.8.5" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "b3b1c9bd4fe1f0f8b387f6eb9eb3b4a1aa26185e5750efb9140301703f62cd1b" +checksum = "8e3e04debbb59698c15bacbb6d93584a8c0ca9cc3213cb423d31f760d8843ce5" dependencies = [ "adler2", ] @@ -2816,7 +2816,7 @@ checksum = "1e401f977ab385c9e4e3ab30627d6f26d00e2c73eef317493c4ec6d468726cf8" dependencies = [ "cfg-if", "libc", - "redox_syscall 0.5.8", + "redox_syscall 0.5.9", "smallvec", "windows-targets 0.52.6", ] @@ -2871,9 +2871,9 @@ dependencies = [ [[package]] name = "pem" -version = "3.0.4" +version = "3.0.5" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "8e459365e590736a54c3fa561947c84837534b8e9af6fc5bf781307e82658fae" +checksum = "38af38e8470ac9dee3ce1bae1af9c1671fffc44ddfd8bd1d0a3445bf349a8ef3" dependencies = [ "base64 0.22.1", "serde", @@ -3017,9 +3017,9 @@ dependencies = [ [[package]] name = "portable-atomic" -version = "1.10.0" +version = "1.11.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "280dc24453071f1b63954171985a0b0d30058d287960968b9b2aca264c8d4ee6" +checksum = "350e9b48cbc6b0e028b0473b114454c6316e57336ee184ceab6e53f72c178b3e" [[package]] name = "powerfmt" @@ -3211,7 +3211,7 @@ source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "3779b94aeb87e8bd4e834cee3650289ee9e0d5677f976ecdb6d219e5f4f6cd94" dependencies = [ "rand_chacha 0.9.0", - "rand_core 0.9.1", + "rand_core 0.9.2", "zerocopy 0.8.20", ] @@ -3232,7 +3232,7 @@ source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "d3022b5f1df60f26e1ffddd6c66e8aa15de382ae63b3a0c1bfc0e4d3e3f325cb" dependencies = [ "ppv-lite86", - "rand_core 0.9.1", + "rand_core 0.9.2", ] [[package]] @@ -3246,9 +3246,9 @@ dependencies = [ [[package]] name = "rand_core" -version = "0.9.1" +version = "0.9.2" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "a88e0da7a2c97baa202165137c158d0a2e824ac465d13d81046727b34cb247d3" +checksum = "7a509b1a2ffbe92afab0e55c8fd99dea1c280e8171bd2d88682bb20bc41cbc2c" dependencies = [ "getrandom 0.3.1", "zerocopy 0.8.20", @@ -3285,9 +3285,9 @@ dependencies = [ [[package]] name = "redox_syscall" -version = "0.5.8" +version = "0.5.9" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "03a862b389f93e68874fbf580b9de08dd02facb9a788ebadaf4a3fd33cf58834" +checksum = "82b568323e98e49e2a0899dcee453dd679fae22d69adf9b11dd508d1549b7e2f" dependencies = [ "bitflags 2.8.0", ] @@ -3382,9 +3382,9 @@ dependencies = [ [[package]] name = "ring" -version = "0.17.9" +version = "0.17.11" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "e75ec5e92c4d8aede845126adc388046234541629e76029599ed35a003c7ed24" +checksum = "da5349ae27d3887ca812fb375b45a4fbb36d8d12d2df394968cd86e35683fe73" dependencies = [ "cc", "cfg-if", @@ -4379,7 +4379,7 @@ dependencies = [ "hyper", "local-ip-address", "percent-encoding", - "rand 0.8.5", + "rand 0.9.0", "reqwest", "serde", "serde_bencode", @@ -4431,7 +4431,7 @@ dependencies = [ "torrust-tracker-primitives", "torrust-tracker-test-helpers", "torrust-udp-tracker-server", - "tower 0.4.13", + "tower 0.5.2", "tower-http", "tracing", "url", @@ -4659,7 +4659,7 @@ dependencies = [ "futures-util", "local-ip-address", "mockall", - "rand 0.8.5", + "rand 0.9.0", "ringbuf", "thiserror 2.0.11", "tokio", @@ -4685,7 +4685,6 @@ dependencies = [ "futures-util", "pin-project", "pin-project-lite", - "tokio", "tower-layer", "tower-service", "tracing", @@ -4893,9 +4892,9 @@ checksum = "06abde3611657adf66d383f00b093d7faecc7fa57071cce2578660c9f1010821" [[package]] name = "uuid" -version = "1.13.2" +version = "1.15.1" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "8c1f41ffb7cf259f1ecc2876861a17e7142e63ead296f671f81f6ae85903e0d6" +checksum = "e0f540e3240398cce6128b64ba83fdbdd86129c16a3aa1a3a252efd66eb3d587" dependencies = [ "getrandom 0.3.1", "rand 0.9.0", @@ -5080,6 +5079,12 @@ dependencies = [ "windows-targets 0.52.6", ] +[[package]] +name = "windows-link" +version = "0.1.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "6dccfd733ce2b1753b03b6d3c65edf020262ea35e20ccdf3e288043e6dd620e3" + [[package]] name = "windows-registry" version = "0.2.0" @@ -5381,18 +5386,18 @@ dependencies = [ [[package]] name = "zerofrom" -version = "0.1.5" +version = "0.1.6" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "cff3ee08c995dee1859d998dea82f7374f2826091dd9cd47def953cae446cd2e" +checksum = "50cc42e0333e05660c3587f3bf9d0478688e15d870fab3346451ce7f8c9fbea5" dependencies = [ "zerofrom-derive", ] [[package]] name = "zerofrom-derive" -version = "0.1.5" +version = "0.1.6" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "595eed982f7d355beb85837f651fa22e90b3c044842dc7f2c2842c086f295808" +checksum = "d71e5d6e06ab090c67b5e44993ec16b72dcbaabc526db883a360057678b48502" dependencies = [ "proc-macro2", "quote", @@ -5430,27 +5435,27 @@ dependencies = [ [[package]] name = "zstd" -version = "0.13.2" +version = "0.13.3" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "fcf2b778a664581e31e389454a7072dab1647606d44f7feea22cd5abb9c9f3f9" +checksum = "e91ee311a569c327171651566e07972200e76fcfe2242a4fa446149a3881c08a" dependencies = [ "zstd-safe", ] [[package]] name = "zstd-safe" -version = "7.2.1" +version = "7.2.3" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "54a3ab4db68cea366acc5c897c7b4d4d1b8994a9cd6e6f841f8964566a419059" +checksum = "f3051792fbdc2e1e143244dc28c60f73d8470e93f3f9cbd0ead44da5ed802722" dependencies = [ "zstd-sys", ] [[package]] name = "zstd-sys" -version = "2.0.13+zstd.1.5.6" +version = "2.0.14+zstd.1.5.7" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "38ff0f21cfee8f97d94cef41359e0c89aa6113028ab0291aa8ca0038995a95aa" +checksum = "8fb060d4926e4ac3a3ad15d864e99ceb5f343c6b34f5bd6d81ae6ed417311be5" dependencies = [ "cc", "pkg-config", diff --git a/packages/axum-http-tracker-server/tests/common/fixtures.rs b/packages/axum-http-tracker-server/tests/common/fixtures.rs index 995079adf..2b4a42b58 100644 --- a/packages/axum-http-tracker-server/tests/common/fixtures.rs +++ b/packages/axum-http-tracker-server/tests/common/fixtures.rs @@ -15,8 +15,8 @@ pub fn invalid_info_hashes() -> Vec { /// Returns a random info hash. pub fn random_info_hash() -> InfoHash { - let mut rng = rand::thread_rng(); - let random_bytes: [u8; 20] = rng.gen(); + let mut rng = rand::rng(); + let random_bytes: [u8; 20] = rng.random(); InfoHash::from_bytes(&random_bytes) } diff --git a/packages/udp-tracker-server/tests/common/fixtures.rs b/packages/udp-tracker-server/tests/common/fixtures.rs index 477314398..f4066c67a 100644 --- a/packages/udp-tracker-server/tests/common/fixtures.rs +++ b/packages/udp-tracker-server/tests/common/fixtures.rs @@ -4,14 +4,14 @@ use rand::prelude::*; /// Returns a random info hash. pub fn random_info_hash() -> InfoHash { - let mut rng = rand::thread_rng(); - let random_bytes: [u8; 20] = rng.gen(); + let mut rng = rand::rng(); + let random_bytes: [u8; 20] = rng.random(); InfoHash::from_bytes(&random_bytes) } /// Returns a random transaction id. pub fn random_transaction_id() -> TransactionId { - let random_value = rand::thread_rng().gen(); + let random_value = rand::rng().random(); TransactionId::new(random_value) } From 89b0bfd4549d4f7eb453389ec47880ad9956d8d8 Mon Sep 17 00:00:00 2001 From: Jose Celano Date: Fri, 28 Feb 2025 09:51:42 +0000 Subject: [PATCH 342/802] refactor: [#689] improve REST API client - Add a timeout to the requests. - Return an error in the construction if it can't build the HTTP client. - Extract constants. --- .../server/v1/contract/authentication.rs | 6 +++ .../server/v1/contract/context/auth_key.rs | 21 ++++++++++ .../tests/server/v1/contract/context/stats.rs | 3 ++ .../server/v1/contract/context/torrent.rs | 15 +++++++ .../server/v1/contract/context/whitelist.rs | 18 +++++++- .../rest-tracker-api-client/src/v1/client.rs | 42 ++++++++++++++----- 6 files changed, 93 insertions(+), 12 deletions(-) diff --git a/packages/axum-rest-tracker-api-server/tests/server/v1/contract/authentication.rs b/packages/axum-rest-tracker-api-server/tests/server/v1/contract/authentication.rs index eac30d93a..3b6419187 100644 --- a/packages/axum-rest-tracker-api-server/tests/server/v1/contract/authentication.rs +++ b/packages/axum-rest-tracker-api-server/tests/server/v1/contract/authentication.rs @@ -16,6 +16,7 @@ async fn should_authenticate_requests_by_using_a_token_query_param() { let token = env.get_connection_info().api_token.unwrap(); let response = Client::new(env.get_connection_info()) + .unwrap() .get_request_with_query("stats", Query::params([QueryParam::new("token", &token)].to_vec()), None) .await; @@ -33,6 +34,7 @@ async fn should_not_authenticate_requests_when_the_token_is_missing() { let request_id = Uuid::new_v4(); let response = Client::new(env.get_connection_info()) + .unwrap() .get_request_with_query("stats", Query::default(), Some(headers_with_request_id(request_id))) .await; @@ -55,6 +57,7 @@ async fn should_not_authenticate_requests_when_the_token_is_empty() { let request_id = Uuid::new_v4(); let response = Client::new(env.get_connection_info()) + .unwrap() .get_request_with_query( "stats", Query::params([QueryParam::new("token", "")].to_vec()), @@ -81,6 +84,7 @@ async fn should_not_authenticate_requests_when_the_token_is_invalid() { let request_id = Uuid::new_v4(); let response = Client::new(env.get_connection_info()) + .unwrap() .get_request_with_query( "stats", Query::params([QueryParam::new("token", "INVALID TOKEN")].to_vec()), @@ -108,6 +112,7 @@ async fn should_allow_the_token_query_param_to_be_at_any_position_in_the_url_que // At the beginning of the query component let response = Client::new(env.get_connection_info()) + .unwrap() .get_request(&format!("torrents?token={token}&limit=1")) .await; @@ -115,6 +120,7 @@ async fn should_allow_the_token_query_param_to_be_at_any_position_in_the_url_que // At the end of the query component let response = Client::new(env.get_connection_info()) + .unwrap() .get_request(&format!("torrents?limit=1&token={token}")) .await; diff --git a/packages/axum-rest-tracker-api-server/tests/server/v1/contract/context/auth_key.rs b/packages/axum-rest-tracker-api-server/tests/server/v1/contract/context/auth_key.rs index f6355fc6e..3781f4f60 100644 --- a/packages/axum-rest-tracker-api-server/tests/server/v1/contract/context/auth_key.rs +++ b/packages/axum-rest-tracker-api-server/tests/server/v1/contract/context/auth_key.rs @@ -25,6 +25,7 @@ async fn should_allow_generating_a_new_random_auth_key() { let request_id = Uuid::new_v4(); let response = Client::new(env.get_connection_info()) + .unwrap() .add_auth_key( AddKeyForm { opt_key: None, @@ -56,6 +57,7 @@ async fn should_allow_uploading_a_preexisting_auth_key() { let request_id = Uuid::new_v4(); let response = Client::new(env.get_connection_info()) + .unwrap() .add_auth_key( AddKeyForm { opt_key: Some("Xc1L4PbQJSFGlrgSRZl8wxSFAuMa21z5".to_string()), @@ -87,6 +89,7 @@ async fn should_not_allow_generating_a_new_auth_key_for_unauthenticated_users() let request_id = Uuid::new_v4(); let response = Client::new(connection_with_invalid_token(env.get_connection_info().origin)) + .unwrap() .add_auth_key( AddKeyForm { opt_key: None, @@ -106,6 +109,7 @@ async fn should_not_allow_generating_a_new_auth_key_for_unauthenticated_users() let request_id = Uuid::new_v4(); let response = Client::new(connection_with_no_token(env.get_connection_info().origin)) + .unwrap() .add_auth_key( AddKeyForm { opt_key: None, @@ -136,6 +140,7 @@ async fn should_fail_when_the_auth_key_cannot_be_generated() { let request_id = Uuid::new_v4(); let response = Client::new(env.get_connection_info()) + .unwrap() .add_auth_key( AddKeyForm { opt_key: None, @@ -173,6 +178,7 @@ async fn should_allow_deleting_an_auth_key() { let request_id = Uuid::new_v4(); let response = Client::new(env.get_connection_info()) + .unwrap() .delete_auth_key(&auth_key.key.to_string(), Some(headers_with_request_id(request_id))) .await; @@ -207,6 +213,7 @@ async fn should_fail_generating_a_new_auth_key_when_the_provided_key_is_invalid( let request_id = Uuid::new_v4(); let response = Client::new(env.get_connection_info()) + .unwrap() .post_form( "keys", &InvalidAddKeyForm { @@ -246,6 +253,7 @@ async fn should_fail_generating_a_new_auth_key_when_the_key_duration_is_invalid( let request_id = Uuid::new_v4(); let response = Client::new(env.get_connection_info()) + .unwrap() .post_form( "keys", &InvalidAddKeyForm { @@ -282,6 +290,7 @@ async fn should_fail_deleting_an_auth_key_when_the_key_id_is_invalid() { let request_id = Uuid::new_v4(); let response = Client::new(env.get_connection_info()) + .unwrap() .delete_auth_key(invalid_auth_key, Some(headers_with_request_id(request_id))) .await; @@ -311,6 +320,7 @@ async fn should_fail_when_the_auth_key_cannot_be_deleted() { let request_id = Uuid::new_v4(); let response = Client::new(env.get_connection_info()) + .unwrap() .delete_auth_key(&auth_key.key.to_string(), Some(headers_with_request_id(request_id))) .await; @@ -344,6 +354,7 @@ async fn should_not_allow_deleting_an_auth_key_for_unauthenticated_users() { let request_id = Uuid::new_v4(); let response = Client::new(connection_with_invalid_token(env.get_connection_info().origin)) + .unwrap() .delete_auth_key(&auth_key.key.to_string(), Some(headers_with_request_id(request_id))) .await; @@ -366,6 +377,7 @@ async fn should_not_allow_deleting_an_auth_key_for_unauthenticated_users() { let request_id = Uuid::new_v4(); let response = Client::new(connection_with_no_token(env.get_connection_info().origin)) + .unwrap() .delete_auth_key(&auth_key.key.to_string(), Some(headers_with_request_id(request_id))) .await; @@ -396,6 +408,7 @@ async fn should_allow_reloading_keys() { let request_id = Uuid::new_v4(); let response = Client::new(env.get_connection_info()) + .unwrap() .reload_keys(Some(headers_with_request_id(request_id))) .await; @@ -423,6 +436,7 @@ async fn should_fail_when_keys_cannot_be_reloaded() { force_database_error(&env.container.tracker_core_container.database); let response = Client::new(env.get_connection_info()) + .unwrap() .reload_keys(Some(headers_with_request_id(request_id))) .await; @@ -453,6 +467,7 @@ async fn should_not_allow_reloading_keys_for_unauthenticated_users() { let request_id = Uuid::new_v4(); let response = Client::new(connection_with_invalid_token(env.get_connection_info().origin)) + .unwrap() .reload_keys(Some(headers_with_request_id(request_id))) .await; @@ -466,6 +481,7 @@ async fn should_not_allow_reloading_keys_for_unauthenticated_users() { let request_id = Uuid::new_v4(); let response = Client::new(connection_with_no_token(env.get_connection_info().origin)) + .unwrap() .reload_keys(Some(headers_with_request_id(request_id))) .await; @@ -504,6 +520,7 @@ mod deprecated_generate_key_endpoint { let seconds_valid = 60; let response = Client::new(env.get_connection_info()) + .unwrap() .generate_auth_key(seconds_valid, None) .await; @@ -530,12 +547,14 @@ mod deprecated_generate_key_endpoint { let seconds_valid = 60; let response = Client::new(connection_with_invalid_token(env.get_connection_info().origin)) + .unwrap() .generate_auth_key(seconds_valid, Some(headers_with_request_id(request_id))) .await; assert_token_not_valid(response).await; let response = Client::new(connection_with_no_token(env.get_connection_info().origin)) + .unwrap() .generate_auth_key(seconds_valid, None) .await; @@ -563,6 +582,7 @@ mod deprecated_generate_key_endpoint { for invalid_key_duration in invalid_key_durations { let response = Client::new(env.get_connection_info()) + .unwrap() .post_empty(&format!("key/{invalid_key_duration}"), None) .await; @@ -583,6 +603,7 @@ mod deprecated_generate_key_endpoint { let request_id = Uuid::new_v4(); let seconds_valid = 60; let response = Client::new(env.get_connection_info()) + .unwrap() .generate_auth_key(seconds_valid, Some(headers_with_request_id(request_id))) .await; diff --git a/packages/axum-rest-tracker-api-server/tests/server/v1/contract/context/stats.rs b/packages/axum-rest-tracker-api-server/tests/server/v1/contract/context/stats.rs index 1e66eb4cc..51a4804e7 100644 --- a/packages/axum-rest-tracker-api-server/tests/server/v1/contract/context/stats.rs +++ b/packages/axum-rest-tracker-api-server/tests/server/v1/contract/context/stats.rs @@ -26,6 +26,7 @@ async fn should_allow_getting_tracker_statistics() { let request_id = Uuid::new_v4(); let response = Client::new(env.get_connection_info()) + .unwrap() .get_tracker_statistics(Some(headers_with_request_id(request_id))) .await; @@ -80,6 +81,7 @@ async fn should_not_allow_getting_tracker_statistics_for_unauthenticated_users() let request_id = Uuid::new_v4(); let response = Client::new(connection_with_invalid_token(env.get_connection_info().origin)) + .unwrap() .get_tracker_statistics(Some(headers_with_request_id(request_id))) .await; @@ -93,6 +95,7 @@ async fn should_not_allow_getting_tracker_statistics_for_unauthenticated_users() let request_id = Uuid::new_v4(); let response = Client::new(connection_with_no_token(env.get_connection_info().origin)) + .unwrap() .get_tracker_statistics(Some(headers_with_request_id(request_id))) .await; diff --git a/packages/axum-rest-tracker-api-server/tests/server/v1/contract/context/torrent.rs b/packages/axum-rest-tracker-api-server/tests/server/v1/contract/context/torrent.rs index b479416e4..42421db99 100644 --- a/packages/axum-rest-tracker-api-server/tests/server/v1/contract/context/torrent.rs +++ b/packages/axum-rest-tracker-api-server/tests/server/v1/contract/context/torrent.rs @@ -31,6 +31,7 @@ async fn should_allow_getting_all_torrents() { let request_id = Uuid::new_v4(); let response = Client::new(env.get_connection_info()) + .unwrap() .get_torrents(Query::empty(), Some(headers_with_request_id(request_id))) .await; @@ -64,6 +65,7 @@ async fn should_allow_limiting_the_torrents_in_the_result() { let request_id = Uuid::new_v4(); let response = Client::new(env.get_connection_info()) + .unwrap() .get_torrents( Query::params([QueryParam::new("limit", "1")].to_vec()), Some(headers_with_request_id(request_id)), @@ -100,6 +102,7 @@ async fn should_allow_the_torrents_result_pagination() { let request_id = Uuid::new_v4(); let response = Client::new(env.get_connection_info()) + .unwrap() .get_torrents( Query::params([QueryParam::new("offset", "1")].to_vec()), Some(headers_with_request_id(request_id)), @@ -135,6 +138,7 @@ async fn should_allow_getting_a_list_of_torrents_providing_infohashes() { let request_id = Uuid::new_v4(); let response = Client::new(env.get_connection_info()) + .unwrap() .get_torrents( Query::params( [ @@ -181,6 +185,7 @@ async fn should_fail_getting_torrents_when_the_offset_query_parameter_cannot_be_ let request_id = Uuid::new_v4(); let response = Client::new(env.get_connection_info()) + .unwrap() .get_torrents( Query::params([QueryParam::new("offset", invalid_offset)].to_vec()), Some(headers_with_request_id(request_id)), @@ -209,6 +214,7 @@ async fn should_fail_getting_torrents_when_the_limit_query_parameter_cannot_be_p let request_id = Uuid::new_v4(); let response = Client::new(env.get_connection_info()) + .unwrap() .get_torrents( Query::params([QueryParam::new("limit", invalid_limit)].to_vec()), Some(headers_with_request_id(request_id)), @@ -237,6 +243,7 @@ async fn should_fail_getting_torrents_when_the_info_hash_parameter_is_invalid() let request_id = Uuid::new_v4(); let response = Client::new(env.get_connection_info()) + .unwrap() .get_torrents( Query::params([QueryParam::new("info_hash", invalid_info_hash)].to_vec()), Some(headers_with_request_id(request_id)), @@ -262,6 +269,7 @@ async fn should_not_allow_getting_torrents_for_unauthenticated_users() { let request_id = Uuid::new_v4(); let response = Client::new(connection_with_invalid_token(env.get_connection_info().origin)) + .unwrap() .get_torrents(Query::empty(), Some(headers_with_request_id(request_id))) .await; @@ -275,6 +283,7 @@ async fn should_not_allow_getting_torrents_for_unauthenticated_users() { let request_id = Uuid::new_v4(); let response = Client::new(connection_with_no_token(env.get_connection_info().origin)) + .unwrap() .get_torrents(Query::default(), Some(headers_with_request_id(request_id))) .await; @@ -303,6 +312,7 @@ async fn should_allow_getting_a_torrent_info() { let request_id = Uuid::new_v4(); let response = Client::new(env.get_connection_info()) + .unwrap() .get_torrent(&info_hash.to_string(), Some(headers_with_request_id(request_id))) .await; @@ -331,6 +341,7 @@ async fn should_fail_while_getting_a_torrent_info_when_the_torrent_does_not_exis let info_hash = InfoHash::from_str("9e0217d0fa71c87332cd8bf9dbeabcb2c2cf3c4d").unwrap(); // DevSkim: ignore DS173237 let response = Client::new(env.get_connection_info()) + .unwrap() .get_torrent(&info_hash.to_string(), Some(headers_with_request_id(request_id))) .await; @@ -349,6 +360,7 @@ async fn should_fail_getting_a_torrent_info_when_the_provided_infohash_is_invali let request_id = Uuid::new_v4(); let response = Client::new(env.get_connection_info()) + .unwrap() .get_torrent(invalid_infohash, Some(headers_with_request_id(request_id))) .await; @@ -359,6 +371,7 @@ async fn should_fail_getting_a_torrent_info_when_the_provided_infohash_is_invali let request_id = Uuid::new_v4(); let response = Client::new(env.get_connection_info()) + .unwrap() .get_torrent(invalid_infohash, Some(headers_with_request_id(request_id))) .await; @@ -381,6 +394,7 @@ async fn should_not_allow_getting_a_torrent_info_for_unauthenticated_users() { let request_id = Uuid::new_v4(); let response = Client::new(connection_with_invalid_token(env.get_connection_info().origin)) + .unwrap() .get_torrent(&info_hash.to_string(), Some(headers_with_request_id(request_id))) .await; @@ -394,6 +408,7 @@ async fn should_not_allow_getting_a_torrent_info_for_unauthenticated_users() { let request_id = Uuid::new_v4(); let response = Client::new(connection_with_no_token(env.get_connection_info().origin)) + .unwrap() .get_torrent(&info_hash.to_string(), Some(headers_with_request_id(request_id))) .await; diff --git a/packages/axum-rest-tracker-api-server/tests/server/v1/contract/context/whitelist.rs b/packages/axum-rest-tracker-api-server/tests/server/v1/contract/context/whitelist.rs index e8f98b8ab..61fc233d0 100644 --- a/packages/axum-rest-tracker-api-server/tests/server/v1/contract/context/whitelist.rs +++ b/packages/axum-rest-tracker-api-server/tests/server/v1/contract/context/whitelist.rs @@ -25,6 +25,7 @@ async fn should_allow_whitelisting_a_torrent() { let info_hash = "9e0217d0fa71c87332cd8bf9dbeabcb2c2cf3c4d".to_owned(); // DevSkim: ignore DS173237 let response = Client::new(env.get_connection_info()) + .unwrap() .whitelist_a_torrent(&info_hash, Some(headers_with_request_id(request_id))) .await; @@ -48,7 +49,7 @@ async fn should_allow_whitelisting_a_torrent_that_has_been_already_whitelisted() let info_hash = "9e0217d0fa71c87332cd8bf9dbeabcb2c2cf3c4d".to_owned(); // DevSkim: ignore DS173237 - let api_client = Client::new(env.get_connection_info()); + let api_client = Client::new(env.get_connection_info()).unwrap(); let request_id = Uuid::new_v4(); @@ -78,6 +79,7 @@ async fn should_not_allow_whitelisting_a_torrent_for_unauthenticated_users() { let request_id = Uuid::new_v4(); let response = Client::new(connection_with_invalid_token(env.get_connection_info().origin)) + .unwrap() .whitelist_a_torrent(&info_hash, Some(headers_with_request_id(request_id))) .await; @@ -91,6 +93,7 @@ async fn should_not_allow_whitelisting_a_torrent_for_unauthenticated_users() { let request_id = Uuid::new_v4(); let response = Client::new(connection_with_no_token(env.get_connection_info().origin)) + .unwrap() .whitelist_a_torrent(&info_hash, Some(headers_with_request_id(request_id))) .await; @@ -117,6 +120,7 @@ async fn should_fail_when_the_torrent_cannot_be_whitelisted() { let request_id = Uuid::new_v4(); let response = Client::new(env.get_connection_info()) + .unwrap() .whitelist_a_torrent(&info_hash, Some(headers_with_request_id(request_id))) .await; @@ -140,6 +144,7 @@ async fn should_fail_whitelisting_a_torrent_when_the_provided_infohash_is_invali for invalid_infohash in &invalid_infohashes_returning_bad_request() { let response = Client::new(env.get_connection_info()) + .unwrap() .whitelist_a_torrent(invalid_infohash, Some(headers_with_request_id(request_id))) .await; @@ -150,6 +155,7 @@ async fn should_fail_whitelisting_a_torrent_when_the_provided_infohash_is_invali for invalid_infohash in &invalid_infohashes_returning_not_found() { let response = Client::new(env.get_connection_info()) + .unwrap() .whitelist_a_torrent(invalid_infohash, Some(headers_with_request_id(request_id))) .await; @@ -178,6 +184,7 @@ async fn should_allow_removing_a_torrent_from_the_whitelist() { let request_id = Uuid::new_v4(); let response = Client::new(env.get_connection_info()) + .unwrap() .remove_torrent_from_whitelist(&hash, Some(headers_with_request_id(request_id))) .await; @@ -204,6 +211,7 @@ async fn should_not_fail_trying_to_remove_a_non_whitelisted_torrent_from_the_whi let request_id = Uuid::new_v4(); let response = Client::new(env.get_connection_info()) + .unwrap() .remove_torrent_from_whitelist(&non_whitelisted_torrent_hash, Some(headers_with_request_id(request_id))) .await; @@ -222,6 +230,7 @@ async fn should_fail_removing_a_torrent_from_the_whitelist_when_the_provided_inf let request_id = Uuid::new_v4(); let response = Client::new(env.get_connection_info()) + .unwrap() .remove_torrent_from_whitelist(invalid_infohash, Some(headers_with_request_id(request_id))) .await; @@ -232,6 +241,7 @@ async fn should_fail_removing_a_torrent_from_the_whitelist_when_the_provided_inf let request_id = Uuid::new_v4(); let response = Client::new(env.get_connection_info()) + .unwrap() .remove_torrent_from_whitelist(invalid_infohash, Some(headers_with_request_id(request_id))) .await; @@ -261,6 +271,7 @@ async fn should_fail_when_the_torrent_cannot_be_removed_from_the_whitelist() { let request_id = Uuid::new_v4(); let response = Client::new(env.get_connection_info()) + .unwrap() .remove_torrent_from_whitelist(&hash, Some(headers_with_request_id(request_id))) .await; @@ -293,6 +304,7 @@ async fn should_not_allow_removing_a_torrent_from_the_whitelist_for_unauthentica let request_id = Uuid::new_v4(); let response = Client::new(connection_with_invalid_token(env.get_connection_info().origin)) + .unwrap() .remove_torrent_from_whitelist(&hash, Some(headers_with_request_id(request_id))) .await; @@ -313,6 +325,7 @@ async fn should_not_allow_removing_a_torrent_from_the_whitelist_for_unauthentica let request_id = Uuid::new_v4(); let response = Client::new(connection_with_no_token(env.get_connection_info().origin)) + .unwrap() .remove_torrent_from_whitelist(&hash, Some(headers_with_request_id(request_id))) .await; @@ -334,6 +347,7 @@ async fn should_allow_reload_the_whitelist_from_the_database() { let hash = "9e0217d0fa71c87332cd8bf9dbeabcb2c2cf3c4d".to_owned(); // DevSkim: ignore DS173237 let info_hash = InfoHash::from_str(&hash).unwrap(); + env.container .tracker_core_container .whitelist_manager @@ -344,6 +358,7 @@ async fn should_allow_reload_the_whitelist_from_the_database() { let request_id = Uuid::new_v4(); let response = Client::new(env.get_connection_info()) + .unwrap() .reload_whitelist(Some(headers_with_request_id(request_id))) .await; @@ -382,6 +397,7 @@ async fn should_fail_when_the_whitelist_cannot_be_reloaded_from_the_database() { let request_id = Uuid::new_v4(); let response = Client::new(env.get_connection_info()) + .unwrap() .reload_whitelist(Some(headers_with_request_id(request_id))) .await; diff --git a/packages/rest-tracker-api-client/src/v1/client.rs b/packages/rest-tracker-api-client/src/v1/client.rs index 54daa3289..65e3fceb8 100644 --- a/packages/rest-tracker-api-client/src/v1/client.rs +++ b/packages/rest-tracker-api-client/src/v1/client.rs @@ -1,5 +1,7 @@ +use std::time::Duration; + use hyper::HeaderMap; -use reqwest::Response; +use reqwest::{Error, Response}; use serde::Serialize; use url::Url; use uuid::Uuid; @@ -7,19 +9,31 @@ use uuid::Uuid; use crate::common::http::{Query, QueryParam, ReqwestQuery}; use crate::connection_info::ConnectionInfo; +const TOKEN_PARAM_NAME: &str = "token"; +const API_PATH: &str = "api/v1/"; +const DEFAULT_REQUEST_TIMEOUT_IN_SECS: u64 = 5; + /// API Client pub struct Client { connection_info: ConnectionInfo, base_path: String, + client: reqwest::Client, } impl Client { - #[must_use] - pub fn new(connection_info: ConnectionInfo) -> Self { - Self { + /// # Errors + /// + /// Will return an error if the HTTP client can't be created. + pub fn new(connection_info: ConnectionInfo) -> Result { + let client = reqwest::Client::builder() + .timeout(Duration::from_secs(DEFAULT_REQUEST_TIMEOUT_IN_SECS)) + .build()?; + + Ok(Self { connection_info, - base_path: "api/v1/".to_string(), - } + base_path: API_PATH.to_string(), + client, + }) } pub async fn generate_auth_key(&self, seconds_valid: i32, headers: Option) -> Response { @@ -66,7 +80,7 @@ impl Client { let mut query: Query = params; if let Some(token) = &self.connection_info.api_token { - query.add_param(QueryParam::new("token", token)); + query.add_param(QueryParam::new(TOKEN_PARAM_NAME, token)); } self.get_request_with_query(path, query, headers).await @@ -76,7 +90,8 @@ impl Client { /// /// Will panic if the request can't be sent pub async fn post_empty(&self, path: &str, headers: Option) -> Response { - let builder = reqwest::Client::new() + let builder = self + .client .post(self.base_url(path).clone()) .query(&ReqwestQuery::from(self.query_with_token())); @@ -92,7 +107,8 @@ impl Client { /// /// Will panic if the request can't be sent pub async fn post_form(&self, path: &str, form: &T, headers: Option) -> Response { - let builder = reqwest::Client::new() + let builder = self + .client .post(self.base_url(path).clone()) .query(&ReqwestQuery::from(self.query_with_token())) .json(&form); @@ -109,7 +125,8 @@ impl Client { /// /// Will panic if the request can't be sent async fn delete(&self, path: &str, headers: Option) -> Response { - let builder = reqwest::Client::new() + let builder = self + .client .delete(self.base_url(path).clone()) .query(&ReqwestQuery::from(self.query_with_token())); @@ -145,7 +162,10 @@ impl Client { /// /// Will panic if the request can't be sent pub async fn get(path: Url, query: Option, headers: Option) -> Response { - let builder = reqwest::Client::builder().build().unwrap(); + let builder = reqwest::Client::builder() + .timeout(Duration::from_secs(DEFAULT_REQUEST_TIMEOUT_IN_SECS)) + .build() + .unwrap(); let builder = match query { Some(params) => builder.get(path).query(&ReqwestQuery::from(params)), From 9cda8ec21206572454b8615d8ad052636be678b8 Mon Sep 17 00:00:00 2001 From: Jose Celano Date: Fri, 28 Feb 2025 11:17:39 +0000 Subject: [PATCH 343/802] refactor: [#1326] extract bittorrent_http_tracker_core::services::announce::AnnounceService --- .../axum-http-tracker-server/src/server.rs | 10 + .../src/v1/handlers/announce.rs | 169 +++--------- .../axum-http-tracker-server/src/v1/routes.rs | 16 +- packages/http-tracker-core/src/container.rs | 11 + .../src/services/announce.rs | 246 ++++++++++-------- src/container.rs | 30 ++- 6 files changed, 220 insertions(+), 262 deletions(-) diff --git a/packages/axum-http-tracker-server/src/server.rs b/packages/axum-http-tracker-server/src/server.rs index 4cf5afc13..6cdf28446 100644 --- a/packages/axum-http-tracker-server/src/server.rs +++ b/packages/axum-http-tracker-server/src/server.rs @@ -239,6 +239,7 @@ mod tests { use std::sync::Arc; use bittorrent_http_tracker_core::container::HttpTrackerCoreContainer; + use bittorrent_http_tracker_core::services::announce::AnnounceService; use bittorrent_tracker_core::announce_handler::AnnounceHandler; use bittorrent_tracker_core::authentication::key::repository::in_memory::InMemoryKeyRepository; use bittorrent_tracker_core::authentication::service; @@ -293,6 +294,14 @@ mod tests { let scrape_handler = Arc::new(ScrapeHandler::new(&whitelist_authorization, &in_memory_torrent_repository)); + let announce_service = Arc::new(AnnounceService::new( + core_config.clone(), + announce_handler.clone(), + authentication_service.clone(), + whitelist_authorization.clone(), + http_stats_event_sender.clone(), + )); + HttpTrackerCoreContainer { core_config, announce_handler, @@ -303,6 +312,7 @@ mod tests { http_tracker_config, http_stats_event_sender, http_stats_repository, + announce_service, } } diff --git a/packages/axum-http-tracker-server/src/v1/handlers/announce.rs b/packages/axum-http-tracker-server/src/v1/handlers/announce.rs index 0221f8dad..6c2e4b713 100644 --- a/packages/axum-http-tracker-server/src/v1/handlers/announce.rs +++ b/packages/axum-http-tracker-server/src/v1/handlers/announce.rs @@ -6,16 +6,12 @@ use std::sync::Arc; use axum::extract::State; use axum::response::{IntoResponse, Response}; -use bittorrent_http_tracker_core::services::announce::HttpAnnounceError; +use bittorrent_http_tracker_core::services::announce::{AnnounceService, HttpAnnounceError}; use bittorrent_http_tracker_protocol::v1::requests::announce::{Announce, Compact}; use bittorrent_http_tracker_protocol::v1::responses::{self}; use bittorrent_http_tracker_protocol::v1::services::peer_ip_resolver::ClientIpSources; -use bittorrent_tracker_core::announce_handler::AnnounceHandler; -use bittorrent_tracker_core::authentication::service::AuthenticationService; use bittorrent_tracker_core::authentication::Key; -use bittorrent_tracker_core::whitelist; use hyper::StatusCode; -use torrust_tracker_configuration::Core; use torrust_tracker_primitives::core::AnnounceData; use crate::v1::extractors::announce_request::ExtractRequest; @@ -25,91 +21,41 @@ use crate::v1::extractors::client_ip_sources::Extract as ExtractClientIpSources; /// It handles the `announce` request when the HTTP tracker does not require /// authentication (no PATH `key` parameter required). #[allow(clippy::unused_async)] -#[allow(clippy::type_complexity)] pub async fn handle_without_key( - State(state): State<( - Arc, - Arc, - Arc, - Arc, - Arc>>, - )>, + State(state): State>, ExtractRequest(announce_request): ExtractRequest, ExtractClientIpSources(client_ip_sources): ExtractClientIpSources, ) -> Response { tracing::debug!("http announce request: {:#?}", announce_request); - handle( - &state.0, - &state.1, - &state.2, - &state.3, - &state.4, - &announce_request, - &client_ip_sources, - None, - ) - .await + handle(&state, &announce_request, &client_ip_sources, None).await } /// It handles the `announce` request when the HTTP tracker requires /// authentication (PATH `key` parameter required). #[allow(clippy::unused_async)] -#[allow(clippy::type_complexity)] pub async fn handle_with_key( - State(state): State<( - Arc, - Arc, - Arc, - Arc, - Arc>>, - )>, + State(state): State>, ExtractRequest(announce_request): ExtractRequest, ExtractClientIpSources(client_ip_sources): ExtractClientIpSources, ExtractKey(key): ExtractKey, ) -> Response { tracing::debug!("http announce request: {:#?}", announce_request); - handle( - &state.0, - &state.1, - &state.2, - &state.3, - &state.4, - &announce_request, - &client_ip_sources, - Some(key), - ) - .await + handle(&state, &announce_request, &client_ip_sources, Some(key)).await } /// It handles the `announce` request. /// /// Internal implementation that handles both the `authenticated` and /// `unauthenticated` modes. -#[allow(clippy::too_many_arguments)] async fn handle( - config: &Arc, - announce_handler: &Arc, - authentication_service: &Arc, - whitelist_authorization: &Arc, - opt_http_stats_event_sender: &Arc>>, + announce_service: &Arc, announce_request: &Announce, client_ip_sources: &ClientIpSources, maybe_key: Option, ) -> Response { - let announce_data = match handle_announce( - config, - announce_handler, - authentication_service, - whitelist_authorization, - opt_http_stats_event_sender, - announce_request, - client_ip_sources, - maybe_key, - ) - .await - { + let announce_data = match handle_announce(announce_service, announce_request, client_ip_sources, maybe_key).await { Ok(announce_data) => announce_data, Err(error) => { let error_response = responses::error::Error { @@ -121,28 +67,15 @@ async fn handle( build_response(announce_request, announce_data) } -#[allow(clippy::too_many_arguments)] async fn handle_announce( - core_config: &Arc, - announce_handler: &Arc, - authentication_service: &Arc, - whitelist_authorization: &Arc, - opt_http_stats_event_sender: &Arc>>, + announce_service: &Arc, announce_request: &Announce, client_ip_sources: &ClientIpSources, maybe_key: Option, ) -> Result { - bittorrent_http_tracker_core::services::announce::handle_announce( - &core_config.clone(), - &announce_handler.clone(), - &authentication_service.clone(), - &whitelist_authorization.clone(), - &opt_http_stats_event_sender.clone(), - announce_request, - client_ip_sources, - maybe_key, - ) - .await + announce_service + .handle_announce(announce_request, client_ip_sources, maybe_key) + .await } fn build_response(announce_request: &Announce, announce_data: AnnounceData) -> Response { @@ -163,6 +96,7 @@ mod tests { use std::sync::Arc; use aquatic_udp_protocol::PeerId; + use bittorrent_http_tracker_core::services::announce::AnnounceService; use bittorrent_http_tracker_protocol::v1::requests::announce::Announce; use bittorrent_http_tracker_protocol::v1::responses; use bittorrent_http_tracker_protocol::v1::services::peer_ip_resolver::ClientIpSources; @@ -174,39 +108,32 @@ mod tests { use bittorrent_tracker_core::torrent::repository::persisted::DatabasePersistentTorrentRepository; use bittorrent_tracker_core::whitelist::authorization::WhitelistAuthorization; use bittorrent_tracker_core::whitelist::repository::in_memory::InMemoryWhitelist; - use torrust_tracker_configuration::{Configuration, Core}; + use torrust_tracker_configuration::Configuration; use torrust_tracker_test_helpers::configuration; use crate::tests::helpers::sample_info_hash; - struct CoreTrackerServices { - pub core_config: Arc, - pub announce_handler: Arc, - pub whitelist_authorization: Arc, - pub authentication_service: Arc, - } - struct CoreHttpTrackerServices { - pub http_stats_event_sender: Arc>>, + pub announce_service: Arc, } - fn initialize_private_tracker() -> (CoreTrackerServices, CoreHttpTrackerServices) { + fn initialize_private_tracker() -> CoreHttpTrackerServices { initialize_core_tracker_services(&configuration::ephemeral_private()) } - fn initialize_listed_tracker() -> (CoreTrackerServices, CoreHttpTrackerServices) { + fn initialize_listed_tracker() -> CoreHttpTrackerServices { initialize_core_tracker_services(&configuration::ephemeral_listed()) } - fn initialize_tracker_on_reverse_proxy() -> (CoreTrackerServices, CoreHttpTrackerServices) { + fn initialize_tracker_on_reverse_proxy() -> CoreHttpTrackerServices { initialize_core_tracker_services(&configuration::ephemeral_with_reverse_proxy()) } - fn initialize_tracker_not_on_reverse_proxy() -> (CoreTrackerServices, CoreHttpTrackerServices) { + fn initialize_tracker_not_on_reverse_proxy() -> CoreHttpTrackerServices { initialize_core_tracker_services(&configuration::ephemeral_without_reverse_proxy()) } - fn initialize_core_tracker_services(config: &Configuration) -> (CoreTrackerServices, CoreHttpTrackerServices) { + fn initialize_core_tracker_services(config: &Configuration) -> CoreHttpTrackerServices { let core_config = Arc::new(config.core.clone()); let database = initialize_database(&config.core); let in_memory_whitelist = Arc::new(InMemoryWhitelist::default()); @@ -228,15 +155,15 @@ mod tests { let http_stats_event_sender = Arc::new(http_stats_event_sender); let _http_stats_repository = Arc::new(http_stats_repository); - ( - CoreTrackerServices { - core_config, - announce_handler, - whitelist_authorization, - authentication_service, - }, - CoreHttpTrackerServices { http_stats_event_sender }, - ) + let announce_service = Arc::new(AnnounceService::new( + core_config.clone(), + announce_handler.clone(), + authentication_service.clone(), + whitelist_authorization.clone(), + http_stats_event_sender.clone(), + )); + + CoreHttpTrackerServices { announce_service } } fn sample_announce_request() -> Announce { @@ -280,16 +207,12 @@ mod tests { #[tokio::test] async fn it_should_fail_when_the_authentication_key_is_missing() { - let (core_tracker_services, http_core_tracker_services) = initialize_private_tracker(); + let http_core_tracker_services = initialize_private_tracker(); let maybe_key = None; let response = handle_announce( - &core_tracker_services.core_config, - &core_tracker_services.announce_handler, - &core_tracker_services.authentication_service, - &core_tracker_services.whitelist_authorization, - &http_core_tracker_services.http_stats_event_sender, + &http_core_tracker_services.announce_service, &sample_announce_request(), &sample_client_ip_sources(), maybe_key, @@ -309,18 +232,14 @@ mod tests { #[tokio::test] async fn it_should_fail_when_the_authentication_key_is_invalid() { - let (core_tracker_services, http_core_tracker_services) = initialize_private_tracker(); + let http_core_tracker_services = initialize_private_tracker(); let unregistered_key = authentication::Key::from_str("YZSl4lMZupRuOpSRC3krIKR5BPB14nrJ").unwrap(); let maybe_key = Some(unregistered_key); let response = handle_announce( - &core_tracker_services.core_config, - &core_tracker_services.announce_handler, - &core_tracker_services.authentication_service, - &core_tracker_services.whitelist_authorization, - &http_core_tracker_services.http_stats_event_sender, + &http_core_tracker_services.announce_service, &sample_announce_request(), &sample_client_ip_sources(), maybe_key, @@ -349,16 +268,12 @@ mod tests { #[tokio::test] async fn it_should_fail_when_the_announced_torrent_is_not_whitelisted() { - let (core_tracker_services, http_core_tracker_services) = initialize_listed_tracker(); + let http_core_tracker_services = initialize_listed_tracker(); let announce_request = sample_announce_request(); let response = handle_announce( - &core_tracker_services.core_config, - &core_tracker_services.announce_handler, - &core_tracker_services.authentication_service, - &core_tracker_services.whitelist_authorization, - &http_core_tracker_services.http_stats_event_sender, + &http_core_tracker_services.announce_service, &announce_request, &sample_client_ip_sources(), None, @@ -391,7 +306,7 @@ mod tests { #[tokio::test] async fn it_should_fail_when_the_right_most_x_forwarded_for_header_ip_is_not_available() { - let (core_tracker_services, http_core_tracker_services) = initialize_tracker_on_reverse_proxy(); + let http_core_tracker_services = initialize_tracker_on_reverse_proxy(); let client_ip_sources = ClientIpSources { right_most_x_forwarded_for: None, @@ -399,11 +314,7 @@ mod tests { }; let response = handle_announce( - &core_tracker_services.core_config, - &core_tracker_services.announce_handler, - &core_tracker_services.authentication_service, - &core_tracker_services.whitelist_authorization, - &http_core_tracker_services.http_stats_event_sender, + &http_core_tracker_services.announce_service, &sample_announce_request(), &client_ip_sources, None, @@ -433,7 +344,7 @@ mod tests { #[tokio::test] async fn it_should_fail_when_the_client_ip_from_the_connection_info_is_not_available() { - let (core_tracker_services, http_core_tracker_services) = initialize_tracker_not_on_reverse_proxy(); + let http_core_tracker_services = initialize_tracker_not_on_reverse_proxy(); let client_ip_sources = ClientIpSources { right_most_x_forwarded_for: None, @@ -441,11 +352,7 @@ mod tests { }; let response = handle_announce( - &core_tracker_services.core_config, - &core_tracker_services.announce_handler, - &core_tracker_services.authentication_service, - &core_tracker_services.whitelist_authorization, - &http_core_tracker_services.http_stats_event_sender, + &http_core_tracker_services.announce_service, &sample_announce_request(), &client_ip_sources, None, diff --git a/packages/axum-http-tracker-server/src/v1/routes.rs b/packages/axum-http-tracker-server/src/v1/routes.rs index 7a96f6014..6c4005ff5 100644 --- a/packages/axum-http-tracker-server/src/v1/routes.rs +++ b/packages/axum-http-tracker-server/src/v1/routes.rs @@ -38,23 +38,11 @@ pub fn router(http_tracker_container: Arc, server_sock // Announce request .route( "/announce", - get(announce::handle_without_key).with_state(( - http_tracker_container.core_config.clone(), - http_tracker_container.announce_handler.clone(), - http_tracker_container.authentication_service.clone(), - http_tracker_container.whitelist_authorization.clone(), - http_tracker_container.http_stats_event_sender.clone(), - )), + get(announce::handle_without_key).with_state(http_tracker_container.announce_service.clone()), ) .route( "/announce/{key}", - get(announce::handle_with_key).with_state(( - http_tracker_container.core_config.clone(), - http_tracker_container.announce_handler.clone(), - http_tracker_container.authentication_service.clone(), - http_tracker_container.whitelist_authorization.clone(), - http_tracker_container.http_stats_event_sender.clone(), - )), + get(announce::handle_with_key).with_state(http_tracker_container.announce_service.clone()), ) // Scrape request .route( diff --git a/packages/http-tracker-core/src/container.rs b/packages/http-tracker-core/src/container.rs index 0fc313a38..27a24b813 100644 --- a/packages/http-tracker-core/src/container.rs +++ b/packages/http-tracker-core/src/container.rs @@ -7,6 +7,7 @@ use bittorrent_tracker_core::scrape_handler::ScrapeHandler; use bittorrent_tracker_core::whitelist; use torrust_tracker_configuration::{Core, HttpTracker}; +use crate::services::announce::AnnounceService; use crate::statistics; pub struct HttpTrackerCoreContainer { @@ -20,6 +21,7 @@ pub struct HttpTrackerCoreContainer { pub http_tracker_config: Arc, pub http_stats_event_sender: Arc>>, pub http_stats_repository: Arc, + pub announce_service: Arc, } impl HttpTrackerCoreContainer { @@ -39,6 +41,14 @@ impl HttpTrackerCoreContainer { let http_stats_event_sender = Arc::new(http_stats_event_sender); let http_stats_repository = Arc::new(http_stats_repository); + let announce_service = Arc::new(AnnounceService::new( + tracker_core_container.core_config.clone(), + tracker_core_container.announce_handler.clone(), + tracker_core_container.authentication_service.clone(), + tracker_core_container.whitelist_authorization.clone(), + http_stats_event_sender.clone(), + )); + Arc::new(Self { core_config: tracker_core_container.core_config.clone(), announce_handler: tracker_core_container.announce_handler.clone(), @@ -49,6 +59,7 @@ impl HttpTrackerCoreContainer { http_tracker_config: http_tracker_config.clone(), http_stats_event_sender: http_stats_event_sender.clone(), http_stats_repository: http_stats_repository.clone(), + announce_service: announce_service.clone(), }) } } diff --git a/packages/http-tracker-core/src/services/announce.rs b/packages/http-tracker-core/src/services/announce.rs index 2f530c654..5890d35c1 100644 --- a/packages/http-tracker-core/src/services/announce.rs +++ b/packages/http-tracker-core/src/services/announce.rs @@ -83,81 +83,105 @@ impl From for HttpAnnounceError { /// > **NOTICE**: as the HTTP tracker does not requires a connection request /// > like the UDP tracker, the number of TCP connections is incremented for /// > each `announce` request. -/// -/// # Errors -/// -/// This function will return an error if: -/// -/// - The tracker is running in `listed` mode and the torrent is not whitelisted. -/// - There is an error when resolving the client IP address. -#[allow(clippy::too_many_arguments)] -pub async fn handle_announce( - core_config: &Arc, - announce_handler: &Arc, - authentication_service: &Arc, - whitelist_authorization: &Arc, - opt_http_stats_event_sender: &Arc>>, - announce_request: &Announce, - client_ip_sources: &ClientIpSources, - maybe_key: Option, -) -> Result { - // Authentication - if core_config.private { - match maybe_key { - Some(key) => match authentication_service.authenticate(&key).await { - Ok(()) => (), - Err(error) => return Err(error.into()), - }, - None => { - return Err(authentication::key::Error::MissingAuthKey { - location: Location::caller(), +pub struct AnnounceService { + core_config: Arc, + announce_handler: Arc, + authentication_service: Arc, + whitelist_authorization: Arc, + opt_http_stats_event_sender: Arc>>, +} + +impl AnnounceService { + #[must_use] + pub fn new( + core_config: Arc, + announce_handler: Arc, + authentication_service: Arc, + whitelist_authorization: Arc, + opt_http_stats_event_sender: Arc>>, + ) -> Self { + Self { + core_config, + announce_handler, + authentication_service, + whitelist_authorization, + opt_http_stats_event_sender, + } + } + + /// Handles an announce request. + /// + /// # Errors + /// + /// This function will return an error if: + /// + /// - The tracker is running in `listed` mode and the torrent is not whitelisted. + /// - There is an error when resolving the client IP address. + pub async fn handle_announce( + &self, + announce_request: &Announce, + client_ip_sources: &ClientIpSources, + maybe_key: Option, + ) -> Result { + // Authentication + if self.core_config.private { + match maybe_key { + Some(key) => match self.authentication_service.authenticate(&key).await { + Ok(()) => (), + Err(error) => return Err(error.into()), + }, + None => { + return Err(authentication::key::Error::MissingAuthKey { + location: Location::caller(), + } + .into()) } - .into()) } } - } - // Authorization - match whitelist_authorization.authorize(&announce_request.info_hash).await { - Ok(()) => (), - Err(error) => return Err(error.into()), - } + // Authorization + match self.whitelist_authorization.authorize(&announce_request.info_hash).await { + Ok(()) => (), + Err(error) => return Err(error.into()), + } - let peer_ip = match peer_ip_resolver::invoke(core_config.net.on_reverse_proxy, client_ip_sources) { - Ok(peer_ip) => peer_ip, - Err(error) => return Err(error.into()), - }; + let peer_ip = match peer_ip_resolver::invoke(self.core_config.net.on_reverse_proxy, client_ip_sources) { + Ok(peer_ip) => peer_ip, + Err(error) => return Err(error.into()), + }; - let mut peer = peer_from_request(announce_request, &peer_ip); + let mut peer = peer_from_request(announce_request, &peer_ip); - let peers_wanted = match announce_request.numwant { - Some(numwant) => PeersWanted::only(numwant), - None => PeersWanted::AsManyAsPossible, - }; + let peers_wanted = match announce_request.numwant { + Some(numwant) => PeersWanted::only(numwant), + None => PeersWanted::AsManyAsPossible, + }; - let original_peer_ip = peer.peer_addr.ip(); + let original_peer_ip = peer.peer_addr.ip(); - // The tracker could change the original peer ip - let announce_data = announce_handler - .announce(&announce_request.info_hash, &mut peer, &original_peer_ip, &peers_wanted) - .await?; + // The tracker could change the original peer ip + let announce_data = self + .announce_handler + .announce(&announce_request.info_hash, &mut peer, &original_peer_ip, &peers_wanted) + .await?; - if let Some(http_stats_event_sender) = opt_http_stats_event_sender.as_deref() { - match original_peer_ip { - IpAddr::V4(_) => { - http_stats_event_sender - .send_event(statistics::event::Event::Tcp4Announce) - .await; - } - IpAddr::V6(_) => { - http_stats_event_sender - .send_event(statistics::event::Event::Tcp6Announce) - .await; + if let Some(http_stats_event_sender) = self.opt_http_stats_event_sender.as_deref() { + match original_peer_ip { + IpAddr::V4(_) => { + http_stats_event_sender + .send_event(statistics::event::Event::Tcp4Announce) + .await; + } + IpAddr::V6(_) => { + http_stats_event_sender + .send_event(statistics::event::Event::Tcp6Announce) + .await; + } } } - } - Ok(announce_data) + Ok(announce_data) + } } #[cfg(test)] @@ -302,11 +326,11 @@ mod tests { use torrust_tracker_test_helpers::configuration; use super::{sample_peer_using_ipv4, sample_peer_using_ipv6}; - use crate::services::announce::handle_announce; use crate::services::announce::tests::{ initialize_core_tracker_services, initialize_core_tracker_services_with_config, sample_announce_request_for_peer, sample_peer, MockHttpStatsEventSender, }; + use crate::services::announce::AnnounceService; use crate::statistics; #[tokio::test] @@ -317,18 +341,18 @@ mod tests { let (announce_request, client_ip_sources) = sample_announce_request_for_peer(peer); - let announce_data = handle_announce( - &core_tracker_services.core_config, - &core_tracker_services.announce_handler, - &core_tracker_services.authentication_service, - &core_tracker_services.whitelist_authorization, - &core_http_tracker_services.http_stats_event_sender, - &announce_request, - &client_ip_sources, - None, - ) - .await - .unwrap(); + let announce_service = AnnounceService::new( + core_tracker_services.core_config.clone(), + core_tracker_services.announce_handler.clone(), + core_tracker_services.authentication_service.clone(), + core_tracker_services.whitelist_authorization.clone(), + core_http_tracker_services.http_stats_event_sender.clone(), + ); + + let announce_data = announce_service + .handle_announce(&announce_request, &client_ip_sources, None) + .await + .unwrap(); let expected_announce_data = AnnounceData { peers: vec![], @@ -361,18 +385,18 @@ mod tests { let (announce_request, client_ip_sources) = sample_announce_request_for_peer(peer); - let _announce_data = handle_announce( - &core_tracker_services.core_config, - &core_tracker_services.announce_handler, - &core_tracker_services.authentication_service, - &core_tracker_services.whitelist_authorization, - &core_http_tracker_services.http_stats_event_sender, - &announce_request, - &client_ip_sources, - None, - ) - .await - .unwrap(); + let announce_service = AnnounceService::new( + core_tracker_services.core_config.clone(), + core_tracker_services.announce_handler.clone(), + core_tracker_services.authentication_service.clone(), + core_tracker_services.whitelist_authorization.clone(), + core_http_tracker_services.http_stats_event_sender.clone(), + ); + + let _announce_data = announce_service + .handle_announce(&announce_request, &client_ip_sources, None) + .await + .unwrap(); } fn tracker_with_an_ipv6_external_ip() -> Configuration { @@ -413,18 +437,18 @@ mod tests { let (announce_request, client_ip_sources) = sample_announce_request_for_peer(peer); - let _announce_data = handle_announce( - &core_tracker_services.core_config, - &core_tracker_services.announce_handler, - &core_tracker_services.authentication_service, - &core_tracker_services.whitelist_authorization, - &core_http_tracker_services.http_stats_event_sender, - &announce_request, - &client_ip_sources, - None, - ) - .await - .unwrap(); + let announce_service = AnnounceService::new( + core_tracker_services.core_config.clone(), + core_tracker_services.announce_handler.clone(), + core_tracker_services.authentication_service.clone(), + core_tracker_services.whitelist_authorization.clone(), + core_http_tracker_services.http_stats_event_sender.clone(), + ); + + let _announce_data = announce_service + .handle_announce(&announce_request, &client_ip_sources, None) + .await + .unwrap(); } #[tokio::test] @@ -446,18 +470,18 @@ mod tests { let (announce_request, client_ip_sources) = sample_announce_request_for_peer(peer); - let _announce_data = handle_announce( - &core_tracker_services.core_config, - &core_tracker_services.announce_handler, - &core_tracker_services.authentication_service, - &core_tracker_services.whitelist_authorization, - &core_http_tracker_services.http_stats_event_sender, - &announce_request, - &client_ip_sources, - None, - ) - .await - .unwrap(); + let announce_service = AnnounceService::new( + core_tracker_services.core_config.clone(), + core_tracker_services.announce_handler.clone(), + core_tracker_services.authentication_service.clone(), + core_tracker_services.whitelist_authorization.clone(), + core_http_tracker_services.http_stats_event_sender.clone(), + ); + + let _announce_data = announce_service + .handle_announce(&announce_request, &client_ip_sources, None) + .await + .unwrap(); } } } diff --git a/src/container.rs b/src/container.rs index 4bdae1b29..fba03bf2c 100644 --- a/src/container.rs +++ b/src/container.rs @@ -1,6 +1,7 @@ use std::sync::Arc; use bittorrent_http_tracker_core::container::HttpTrackerCoreContainer; +use bittorrent_http_tracker_core::services::announce::AnnounceService; use bittorrent_tracker_core::announce_handler::AnnounceHandler; use bittorrent_tracker_core::authentication::handler::KeysHandler; use bittorrent_tracker_core::authentication::service::AuthenticationService; @@ -45,6 +46,7 @@ pub struct AppContainer { // HTTP Tracker Core Services pub http_stats_event_sender: Arc>>, pub http_stats_repository: Arc, + pub http_announce_service: Arc, // UDP Tracker Server Services pub udp_server_stats_event_sender: Arc>>, @@ -58,13 +60,20 @@ impl AppContainer { let tracker_core_container = TrackerCoreContainer::initialize(&core_config); - // HTTP core stats + // HTTP Tracker Core Services let (http_stats_event_sender, http_stats_repository) = bittorrent_http_tracker_core::statistics::setup::factory(configuration.core.tracker_usage_statistics); let http_stats_event_sender = Arc::new(http_stats_event_sender); let http_stats_repository = Arc::new(http_stats_repository); - - // UDP core stats + let http_announce_service = Arc::new(AnnounceService::new( + tracker_core_container.core_config.clone(), + tracker_core_container.announce_handler.clone(), + tracker_core_container.authentication_service.clone(), + tracker_core_container.whitelist_authorization.clone(), + http_stats_event_sender.clone(), + )); + + // UDP Tracker Core Services let (udp_core_stats_event_sender, udp_core_stats_repository) = bittorrent_udp_tracker_core::statistics::setup::factory(configuration.core.tracker_usage_statistics); let udp_core_stats_event_sender = Arc::new(udp_core_stats_event_sender); @@ -72,13 +81,14 @@ impl AppContainer { let ban_service = Arc::new(RwLock::new(BanService::new(MAX_CONNECTION_ID_ERRORS_PER_IP))); - // UDP server stats + // UDP Tracker Server Services let (udp_server_stats_event_sender, udp_server_stats_repository) = torrust_udp_tracker_server::statistics::setup::factory(configuration.core.tracker_usage_statistics); let udp_server_stats_event_sender = Arc::new(udp_server_stats_event_sender); let udp_server_stats_repository = Arc::new(udp_server_stats_repository); AppContainer { + // Tracker Core Services core_config, database: tracker_core_container.database, announce_handler: tracker_core_container.announce_handler, @@ -91,11 +101,18 @@ impl AppContainer { in_memory_torrent_repository: tracker_core_container.in_memory_torrent_repository, db_torrent_repository: tracker_core_container.db_torrent_repository, torrents_manager: tracker_core_container.torrents_manager, + + // UDP Tracker Core Services ban_service, - http_stats_event_sender, udp_core_stats_event_sender, - http_stats_repository, udp_core_stats_repository, + + // HTTP Tracker Core Services + http_stats_event_sender, + http_stats_repository, + http_announce_service, + + // UDP Tracker Server Services udp_server_stats_event_sender, udp_server_stats_repository, } @@ -113,6 +130,7 @@ impl AppContainer { http_tracker_config: http_tracker_config.clone(), http_stats_event_sender: self.http_stats_event_sender.clone(), http_stats_repository: self.http_stats_repository.clone(), + announce_service: self.http_announce_service.clone(), } } From 8ec45ad63b400c423cdec4eee0582c94ce941441 Mon Sep 17 00:00:00 2001 From: Jose Celano Date: Fri, 28 Feb 2025 12:00:59 +0000 Subject: [PATCH 344/802] refactor: [#1326] extract bittorrent_http_tracker_core::services::scrape::ScrapeService --- .../axum-http-tracker-server/src/server.rs | 10 +- .../src/v1/handlers/scrape.rs | 202 ++++++------------ .../axum-http-tracker-server/src/v1/routes.rs | 14 +- packages/http-tracker-core/src/container.rs | 10 + .../http-tracker-core/src/services/scrape.rs | 160 ++++++++------ src/container.rs | 10 + 6 files changed, 191 insertions(+), 215 deletions(-) diff --git a/packages/axum-http-tracker-server/src/server.rs b/packages/axum-http-tracker-server/src/server.rs index 6cdf28446..15eef3c38 100644 --- a/packages/axum-http-tracker-server/src/server.rs +++ b/packages/axum-http-tracker-server/src/server.rs @@ -238,8 +238,8 @@ pub fn check_fn(binding: &SocketAddr) -> ServiceHealthCheckJob { mod tests { use std::sync::Arc; - use bittorrent_http_tracker_core::container::HttpTrackerCoreContainer; use bittorrent_http_tracker_core::services::announce::AnnounceService; + use bittorrent_http_tracker_core::{container::HttpTrackerCoreContainer, services::scrape::ScrapeService}; use bittorrent_tracker_core::announce_handler::AnnounceHandler; use bittorrent_tracker_core::authentication::key::repository::in_memory::InMemoryKeyRepository; use bittorrent_tracker_core::authentication::service; @@ -302,6 +302,13 @@ mod tests { http_stats_event_sender.clone(), )); + let scrape_service = Arc::new(ScrapeService::new( + core_config.clone(), + scrape_handler.clone(), + authentication_service.clone(), + http_stats_event_sender.clone(), + )); + HttpTrackerCoreContainer { core_config, announce_handler, @@ -313,6 +320,7 @@ mod tests { http_stats_event_sender, http_stats_repository, announce_service, + scrape_service, } } diff --git a/packages/axum-http-tracker-server/src/v1/handlers/scrape.rs b/packages/axum-http-tracker-server/src/v1/handlers/scrape.rs index 00046a618..ae3a35bd3 100644 --- a/packages/axum-http-tracker-server/src/v1/handlers/scrape.rs +++ b/packages/axum-http-tracker-server/src/v1/handlers/scrape.rs @@ -6,15 +6,12 @@ use std::sync::Arc; use axum::extract::State; use axum::response::{IntoResponse, Response}; -use bittorrent_http_tracker_core::services::scrape::HttpScrapeError; +use bittorrent_http_tracker_core::services::scrape::ScrapeService; use bittorrent_http_tracker_protocol::v1::requests::scrape::Scrape; use bittorrent_http_tracker_protocol::v1::responses; use bittorrent_http_tracker_protocol::v1::services::peer_ip_resolver::ClientIpSources; -use bittorrent_tracker_core::authentication::service::AuthenticationService; use bittorrent_tracker_core::authentication::Key; -use bittorrent_tracker_core::scrape_handler::ScrapeHandler; use hyper::StatusCode; -use torrust_tracker_configuration::Core; use torrust_tracker_primitives::core::ScrapeData; use crate::v1::extractors::authentication_key::Extract as ExtractKey; @@ -24,29 +21,14 @@ use crate::v1::extractors::scrape_request::ExtractRequest; /// It handles the `scrape` request when the HTTP tracker is configured /// to run in `public` mode. #[allow(clippy::unused_async)] -#[allow(clippy::type_complexity)] pub async fn handle_without_key( - State(state): State<( - Arc, - Arc, - Arc, - Arc>>, - )>, + State(state): State>, ExtractRequest(scrape_request): ExtractRequest, ExtractClientIpSources(client_ip_sources): ExtractClientIpSources, ) -> Response { tracing::debug!("http scrape request: {:#?}", &scrape_request); - handle( - &state.0, - &state.1, - &state.2, - &state.3, - &scrape_request, - &client_ip_sources, - None, - ) - .await + handle(&state, &scrape_request, &client_ip_sources, None).await } /// It handles the `scrape` request when the HTTP tracker is configured @@ -54,52 +36,26 @@ pub async fn handle_without_key( /// /// In this case, the authentication `key` parameter is required. #[allow(clippy::unused_async)] -#[allow(clippy::type_complexity)] pub async fn handle_with_key( - State(state): State<( - Arc, - Arc, - Arc, - Arc>>, - )>, + State(state): State>, ExtractRequest(scrape_request): ExtractRequest, ExtractClientIpSources(client_ip_sources): ExtractClientIpSources, ExtractKey(key): ExtractKey, ) -> Response { tracing::debug!("http scrape request: {:#?}", &scrape_request); - handle( - &state.0, - &state.1, - &state.2, - &state.3, - &scrape_request, - &client_ip_sources, - Some(key), - ) - .await + handle(&state, &scrape_request, &client_ip_sources, Some(key)).await } -#[allow(clippy::too_many_arguments)] async fn handle( - core_config: &Arc, - scrape_handler: &Arc, - authentication_service: &Arc, - http_stats_event_sender: &Arc>>, + scrape_service: &Arc, scrape_request: &Scrape, client_ip_sources: &ClientIpSources, maybe_key: Option, ) -> Response { - let scrape_data = match handle_scrape( - core_config, - scrape_handler, - authentication_service, - http_stats_event_sender, - scrape_request, - client_ip_sources, - maybe_key, - ) - .await + let scrape_data = match scrape_service + .handle_scrape(scrape_request, client_ip_sources, maybe_key) + .await { Ok(scrape_data) => scrape_data, Err(error) => { @@ -113,28 +69,6 @@ async fn handle( build_response(scrape_data) } -#[allow(clippy::too_many_arguments)] -async fn handle_scrape( - core_config: &Arc, - scrape_handler: &Arc, - authentication_service: &Arc, - opt_http_stats_event_sender: &Arc>>, - scrape_request: &Scrape, - client_ip_sources: &ClientIpSources, - maybe_key: Option, -) -> Result { - bittorrent_http_tracker_core::services::scrape::handle_scrape( - core_config, - scrape_handler, - authentication_service, - opt_http_stats_event_sender, - scrape_request, - client_ip_sources, - maybe_key, - ) - .await -} - fn build_response(scrape_data: ScrapeData) -> Response { let response = responses::scrape::Bencoded::from(scrape_data); @@ -233,11 +167,11 @@ mod tests { mod with_tracker_in_private_mode { use std::str::FromStr; + use bittorrent_http_tracker_core::services::scrape::ScrapeService; use bittorrent_tracker_core::authentication; use torrust_tracker_primitives::core::ScrapeData; use super::{initialize_private_tracker, sample_client_ip_sources, sample_scrape_request}; - use crate::v1::handlers::scrape::handle_scrape; #[tokio::test] async fn it_should_return_zeroed_swarm_metadata_when_the_authentication_key_is_missing() { @@ -246,17 +180,17 @@ mod tests { let scrape_request = sample_scrape_request(); let maybe_key = None; - let scrape_data = handle_scrape( - &core_tracker_services.core_config, - &core_tracker_services.scrape_handler, - &core_tracker_services.authentication_service, - &core_http_tracker_services.http_stats_event_sender, - &scrape_request, - &sample_client_ip_sources(), - maybe_key, - ) - .await - .unwrap(); + let scrape_service = ScrapeService::new( + core_tracker_services.core_config.clone(), + core_tracker_services.scrape_handler.clone(), + core_tracker_services.authentication_service.clone(), + core_http_tracker_services.http_stats_event_sender.clone(), + ); + + let scrape_data = scrape_service + .handle_scrape(&scrape_request, &sample_client_ip_sources(), maybe_key) + .await + .unwrap(); let expected_scrape_data = ScrapeData::zeroed(&scrape_request.info_hashes); @@ -271,17 +205,17 @@ mod tests { let unregistered_key = authentication::Key::from_str("YZSl4lMZupRuOpSRC3krIKR5BPB14nrJ").unwrap(); let maybe_key = Some(unregistered_key); - let scrape_data = handle_scrape( - &core_tracker_services.core_config, - &core_tracker_services.scrape_handler, - &core_tracker_services.authentication_service, - &core_http_tracker_services.http_stats_event_sender, - &scrape_request, - &sample_client_ip_sources(), - maybe_key, - ) - .await - .unwrap(); + let scrape_service = ScrapeService::new( + core_tracker_services.core_config.clone(), + core_tracker_services.scrape_handler.clone(), + core_tracker_services.authentication_service.clone(), + core_http_tracker_services.http_stats_event_sender.clone(), + ); + + let scrape_data = scrape_service + .handle_scrape(&scrape_request, &sample_client_ip_sources(), maybe_key) + .await + .unwrap(); let expected_scrape_data = ScrapeData::zeroed(&scrape_request.info_hashes); @@ -291,10 +225,10 @@ mod tests { mod with_tracker_in_listed_mode { + use bittorrent_http_tracker_core::services::scrape::ScrapeService; use torrust_tracker_primitives::core::ScrapeData; use super::{initialize_listed_tracker, sample_client_ip_sources, sample_scrape_request}; - use crate::v1::handlers::scrape::handle_scrape; #[tokio::test] async fn it_should_return_zeroed_swarm_metadata_when_the_torrent_is_not_whitelisted() { @@ -302,17 +236,17 @@ mod tests { let scrape_request = sample_scrape_request(); - let scrape_data = handle_scrape( - &core_tracker_services.core_config, - &core_tracker_services.scrape_handler, - &core_tracker_services.authentication_service, - &core_http_tracker_services.http_stats_event_sender, - &scrape_request, - &sample_client_ip_sources(), - None, - ) - .await - .unwrap(); + let scrape_service = ScrapeService::new( + core_tracker_services.core_config.clone(), + core_tracker_services.scrape_handler.clone(), + core_tracker_services.authentication_service.clone(), + core_http_tracker_services.http_stats_event_sender.clone(), + ); + + let scrape_data = scrape_service + .handle_scrape(&scrape_request, &sample_client_ip_sources(), None) + .await + .unwrap(); let expected_scrape_data = ScrapeData::zeroed(&scrape_request.info_hashes); @@ -322,11 +256,11 @@ mod tests { mod with_tracker_on_reverse_proxy { + use bittorrent_http_tracker_core::services::scrape::ScrapeService; use bittorrent_http_tracker_protocol::v1::responses; use bittorrent_http_tracker_protocol::v1::services::peer_ip_resolver::ClientIpSources; use super::{initialize_tracker_on_reverse_proxy, sample_scrape_request}; - use crate::v1::handlers::scrape::handle_scrape; use crate::v1::handlers::scrape::tests::assert_error_response; #[tokio::test] @@ -338,17 +272,17 @@ mod tests { connection_info_ip: None, }; - let response = handle_scrape( - &core_tracker_services.core_config, - &core_tracker_services.scrape_handler, - &core_tracker_services.authentication_service, - &core_http_tracker_services.http_stats_event_sender, - &sample_scrape_request(), - &client_ip_sources, - None, - ) - .await - .unwrap_err(); + let scrape_service = ScrapeService::new( + core_tracker_services.core_config.clone(), + core_tracker_services.scrape_handler.clone(), + core_tracker_services.authentication_service.clone(), + core_http_tracker_services.http_stats_event_sender.clone(), + ); + + let response = scrape_service + .handle_scrape(&sample_scrape_request(), &client_ip_sources, None) + .await + .unwrap_err(); let error_response = responses::error::Error { failure_reason: response.to_string(), @@ -363,11 +297,11 @@ mod tests { mod with_tracker_not_on_reverse_proxy { + use bittorrent_http_tracker_core::services::scrape::ScrapeService; use bittorrent_http_tracker_protocol::v1::responses; use bittorrent_http_tracker_protocol::v1::services::peer_ip_resolver::ClientIpSources; use super::{initialize_tracker_not_on_reverse_proxy, sample_scrape_request}; - use crate::v1::handlers::scrape::handle_scrape; use crate::v1::handlers::scrape::tests::assert_error_response; #[tokio::test] @@ -379,17 +313,17 @@ mod tests { connection_info_ip: None, }; - let response = handle_scrape( - &core_tracker_services.core_config, - &core_tracker_services.scrape_handler, - &core_tracker_services.authentication_service, - &core_http_tracker_services.http_stats_event_sender, - &sample_scrape_request(), - &client_ip_sources, - None, - ) - .await - .unwrap_err(); + let scrape_service = ScrapeService::new( + core_tracker_services.core_config.clone(), + core_tracker_services.scrape_handler.clone(), + core_tracker_services.authentication_service.clone(), + core_http_tracker_services.http_stats_event_sender.clone(), + ); + + let response = scrape_service + .handle_scrape(&sample_scrape_request(), &client_ip_sources, None) + .await + .unwrap_err(); let error_response = responses::error::Error { failure_reason: response.to_string(), diff --git a/packages/axum-http-tracker-server/src/v1/routes.rs b/packages/axum-http-tracker-server/src/v1/routes.rs index 6c4005ff5..5f666e9d4 100644 --- a/packages/axum-http-tracker-server/src/v1/routes.rs +++ b/packages/axum-http-tracker-server/src/v1/routes.rs @@ -47,21 +47,11 @@ pub fn router(http_tracker_container: Arc, server_sock // Scrape request .route( "/scrape", - get(scrape::handle_without_key).with_state(( - http_tracker_container.core_config.clone(), - http_tracker_container.scrape_handler.clone(), - http_tracker_container.authentication_service.clone(), - http_tracker_container.http_stats_event_sender.clone(), - )), + get(scrape::handle_without_key).with_state(http_tracker_container.scrape_service.clone()), ) .route( "/scrape/{key}", - get(scrape::handle_with_key).with_state(( - http_tracker_container.core_config.clone(), - http_tracker_container.scrape_handler.clone(), - http_tracker_container.authentication_service.clone(), - http_tracker_container.http_stats_event_sender.clone(), - )), + get(scrape::handle_with_key).with_state(http_tracker_container.scrape_service.clone()), ) // Add extension to get the client IP from the connection info .layer(SecureClientIpSource::ConnectInfo.into_extension()) diff --git a/packages/http-tracker-core/src/container.rs b/packages/http-tracker-core/src/container.rs index 27a24b813..448dce246 100644 --- a/packages/http-tracker-core/src/container.rs +++ b/packages/http-tracker-core/src/container.rs @@ -8,6 +8,7 @@ use bittorrent_tracker_core::whitelist; use torrust_tracker_configuration::{Core, HttpTracker}; use crate::services::announce::AnnounceService; +use crate::services::scrape::ScrapeService; use crate::statistics; pub struct HttpTrackerCoreContainer { @@ -22,6 +23,7 @@ pub struct HttpTrackerCoreContainer { pub http_stats_event_sender: Arc>>, pub http_stats_repository: Arc, pub announce_service: Arc, + pub scrape_service: Arc, } impl HttpTrackerCoreContainer { @@ -49,6 +51,13 @@ impl HttpTrackerCoreContainer { http_stats_event_sender.clone(), )); + let scrape_service = Arc::new(ScrapeService::new( + tracker_core_container.core_config.clone(), + tracker_core_container.scrape_handler.clone(), + tracker_core_container.authentication_service.clone(), + http_stats_event_sender.clone(), + )); + Arc::new(Self { core_config: tracker_core_container.core_config.clone(), announce_handler: tracker_core_container.announce_handler.clone(), @@ -60,6 +69,7 @@ impl HttpTrackerCoreContainer { http_stats_event_sender: http_stats_event_sender.clone(), http_stats_repository: http_stats_repository.clone(), announce_service: announce_service.clone(), + scrape_service: scrape_service.clone(), }) } } diff --git a/packages/http-tracker-core/src/services/scrape.rs b/packages/http-tracker-core/src/services/scrape.rs index 394f285ee..48cee7c8c 100644 --- a/packages/http-tracker-core/src/services/scrape.rs +++ b/packages/http-tracker-core/src/services/scrape.rs @@ -71,7 +71,6 @@ impl From for HttpScrapeError { } } } - /// The HTTP tracker `scrape` service. /// /// The service sends an statistics event that increments: @@ -88,46 +87,71 @@ impl From for HttpScrapeError { /// This function will return an error if: /// /// - There is an error when resolving the client IP address. -#[allow(clippy::too_many_arguments)] -pub async fn handle_scrape( - core_config: &Arc, - scrape_handler: &Arc, - authentication_service: &Arc, - opt_http_stats_event_sender: &Arc>>, - scrape_request: &Scrape, - client_ip_sources: &ClientIpSources, - maybe_key: Option, -) -> Result { - // Authentication - let return_fake_scrape_data = if core_config.private { - match maybe_key { - Some(key) => match authentication_service.authenticate(&key).await { - Ok(()) => false, - Err(_error) => true, - }, - None => true, +pub struct ScrapeService { + core_config: Arc, + scrape_handler: Arc, + authentication_service: Arc, + opt_http_stats_event_sender: Arc>>, +} + +impl ScrapeService { + #[must_use] + pub fn new( + core_config: Arc, + scrape_handler: Arc, + authentication_service: Arc, + opt_http_stats_event_sender: Arc>>, + ) -> Self { + Self { + core_config, + scrape_handler, + authentication_service, + opt_http_stats_event_sender, } - } else { - false - }; + } - // Authorization for scrape requests is handled at the `bittorrent_tracker_core` - // level for each torrent. + /// # Errors + /// + /// This function will return an error if: + /// + /// - There is an error when resolving the client IP address. + pub async fn handle_scrape( + &self, + scrape_request: &Scrape, + client_ip_sources: &ClientIpSources, + maybe_key: Option, + ) -> Result { + // Authentication + let return_fake_scrape_data = if self.core_config.private { + match maybe_key { + Some(key) => match self.authentication_service.authenticate(&key).await { + Ok(()) => false, + Err(_error) => true, + }, + None => true, + } + } else { + false + }; - let peer_ip = match peer_ip_resolver::invoke(core_config.net.on_reverse_proxy, client_ip_sources) { - Ok(peer_ip) => peer_ip, - Err(error) => return Err(error.into()), - }; + // Authorization for scrape requests is handled at the `bittorrent_tracker_core` + // level for each torrent. - if return_fake_scrape_data { - return Ok(fake(opt_http_stats_event_sender, &scrape_request.info_hashes, &peer_ip).await); - } + let peer_ip = match peer_ip_resolver::invoke(self.core_config.net.on_reverse_proxy, client_ip_sources) { + Ok(peer_ip) => peer_ip, + Err(error) => return Err(error.into()), + }; + + if return_fake_scrape_data { + return Ok(fake(&self.opt_http_stats_event_sender, &scrape_request.info_hashes, &peer_ip).await); + } - let scrape_data = scrape_handler.scrape(&scrape_request.info_hashes).await?; + let scrape_data = self.scrape_handler.scrape(&scrape_request.info_hashes).await?; - send_scrape_event(&peer_ip, opt_http_stats_event_sender).await; + send_scrape_event(&peer_ip, &self.opt_http_stats_event_sender).await; - Ok(scrape_data) + Ok(scrape_data) + } } /// The HTTP tracker fake `scrape` service. It returns zeroed stats. @@ -261,10 +285,10 @@ mod tests { use torrust_tracker_primitives::swarm_metadata::SwarmMetadata; use torrust_tracker_test_helpers::configuration; - use crate::services::scrape::handle_scrape; use crate::services::scrape::tests::{ initialize_services_with_configuration, sample_info_hashes, sample_peer, MockHttpStatsEventSender, }; + use crate::services::scrape::ScrapeService; use crate::statistics; use crate::tests::sample_info_hash; @@ -299,17 +323,17 @@ mod tests { connection_info_ip: Some(original_peer_ip), }; - let scrape_data = handle_scrape( - &core_config, - &container.scrape_handler, - &container.authentication_service, - &http_stats_event_sender, - &scrape_request, - &client_ip_sources, - None, - ) - .await - .unwrap(); + let scrape_service = Arc::new(ScrapeService::new( + core_config.clone(), + container.scrape_handler.clone(), + container.authentication_service.clone(), + http_stats_event_sender.clone(), + )); + + let scrape_data = scrape_service + .handle_scrape(&scrape_request, &client_ip_sources, None) + .await + .unwrap(); let mut expected_scrape_data = ScrapeData::empty(); expected_scrape_data.add_file( @@ -350,17 +374,17 @@ mod tests { connection_info_ip: Some(peer_ip), }; - handle_scrape( - &Arc::new(config.core), - &container.scrape_handler, - &container.authentication_service, - &http_stats_event_sender, - &scrape_request, - &client_ip_sources, - None, - ) - .await - .unwrap(); + let scrape_service = Arc::new(ScrapeService::new( + Arc::new(config.core), + container.scrape_handler.clone(), + container.authentication_service.clone(), + http_stats_event_sender.clone(), + )); + + scrape_service + .handle_scrape(&scrape_request, &client_ip_sources, None) + .await + .unwrap(); } #[tokio::test] @@ -389,17 +413,17 @@ mod tests { connection_info_ip: Some(peer_ip), }; - handle_scrape( - &Arc::new(config.core), - &container.scrape_handler, - &container.authentication_service, - &http_stats_event_sender, - &scrape_request, - &client_ip_sources, - None, - ) - .await - .unwrap(); + let scrape_service = Arc::new(ScrapeService::new( + Arc::new(config.core), + container.scrape_handler.clone(), + container.authentication_service.clone(), + http_stats_event_sender.clone(), + )); + + scrape_service + .handle_scrape(&scrape_request, &client_ip_sources, None) + .await + .unwrap(); } } diff --git a/src/container.rs b/src/container.rs index fba03bf2c..c9e58bdc4 100644 --- a/src/container.rs +++ b/src/container.rs @@ -2,6 +2,7 @@ use std::sync::Arc; use bittorrent_http_tracker_core::container::HttpTrackerCoreContainer; use bittorrent_http_tracker_core::services::announce::AnnounceService; +use bittorrent_http_tracker_core::services::scrape::ScrapeService; use bittorrent_tracker_core::announce_handler::AnnounceHandler; use bittorrent_tracker_core::authentication::handler::KeysHandler; use bittorrent_tracker_core::authentication::service::AuthenticationService; @@ -47,6 +48,7 @@ pub struct AppContainer { pub http_stats_event_sender: Arc>>, pub http_stats_repository: Arc, pub http_announce_service: Arc, + pub http_scrape_service: Arc, // UDP Tracker Server Services pub udp_server_stats_event_sender: Arc>>, @@ -72,6 +74,12 @@ impl AppContainer { tracker_core_container.whitelist_authorization.clone(), http_stats_event_sender.clone(), )); + let http_scrape_service = Arc::new(ScrapeService::new( + tracker_core_container.core_config.clone(), + tracker_core_container.scrape_handler.clone(), + tracker_core_container.authentication_service.clone(), + http_stats_event_sender.clone(), + )); // UDP Tracker Core Services let (udp_core_stats_event_sender, udp_core_stats_repository) = @@ -111,6 +119,7 @@ impl AppContainer { http_stats_event_sender, http_stats_repository, http_announce_service, + http_scrape_service, // UDP Tracker Server Services udp_server_stats_event_sender, @@ -131,6 +140,7 @@ impl AppContainer { http_stats_event_sender: self.http_stats_event_sender.clone(), http_stats_repository: self.http_stats_repository.clone(), announce_service: self.http_announce_service.clone(), + scrape_service: self.http_scrape_service.clone(), } } From 47e159e7835e375411fbfdde7cf72b790e597270 Mon Sep 17 00:00:00 2001 From: Jose Celano Date: Fri, 28 Feb 2025 12:29:32 +0000 Subject: [PATCH 345/802] docs: remove deprecate comments --- packages/udp-tracker-core/src/services/announce.rs | 2 -- packages/udp-tracker-core/src/services/connect.rs | 2 -- packages/udp-tracker-core/src/services/scrape.rs | 2 -- 3 files changed, 6 deletions(-) diff --git a/packages/udp-tracker-core/src/services/announce.rs b/packages/udp-tracker-core/src/services/announce.rs index b40162283..ffd382a20 100644 --- a/packages/udp-tracker-core/src/services/announce.rs +++ b/packages/udp-tracker-core/src/services/announce.rs @@ -76,8 +76,6 @@ pub async fn handle_announce( opt_udp_stats_event_sender: &Arc>>, cookie_valid_range: Range, ) -> Result { - // todo: return a UDP response like the HTTP tracker instead of raw AnnounceData. - // Authentication check( &request.connection_id, diff --git a/packages/udp-tracker-core/src/services/connect.rs b/packages/udp-tracker-core/src/services/connect.rs index 3354595e5..92c51799b 100644 --- a/packages/udp-tracker-core/src/services/connect.rs +++ b/packages/udp-tracker-core/src/services/connect.rs @@ -17,8 +17,6 @@ pub async fn handle_connect( opt_udp_stats_event_sender: &Arc>>, cookie_issue_time: f64, ) -> ConnectionId { - // todo: return a UDP response like the HTTP tracker instead of raw ConnectionId. - let connection_id = make(gen_remote_fingerprint(&remote_addr), cookie_issue_time).expect("it should be a normal value"); if let Some(udp_stats_event_sender) = opt_udp_stats_event_sender.as_deref() { diff --git a/packages/udp-tracker-core/src/services/scrape.rs b/packages/udp-tracker-core/src/services/scrape.rs index bec55afe3..04fcb2fe6 100644 --- a/packages/udp-tracker-core/src/services/scrape.rs +++ b/packages/udp-tracker-core/src/services/scrape.rs @@ -68,8 +68,6 @@ pub async fn handle_scrape( opt_udp_stats_event_sender: &Arc>>, cookie_valid_range: Range, ) -> Result { - // todo: return a UDP response like the HTTP tracker instead of raw ScrapeData. - check( &request.connection_id, gen_remote_fingerprint(&remote_addr), From ddfbcd286152220b055151e73a3ea1d462d6005b Mon Sep 17 00:00:00 2001 From: Jose Celano Date: Fri, 28 Feb 2025 12:33:10 +0000 Subject: [PATCH 346/802] fix: [#1326] formatting --- packages/axum-http-tracker-server/src/server.rs | 3 ++- 1 file changed, 2 insertions(+), 1 deletion(-) diff --git a/packages/axum-http-tracker-server/src/server.rs b/packages/axum-http-tracker-server/src/server.rs index 15eef3c38..ea8003a4f 100644 --- a/packages/axum-http-tracker-server/src/server.rs +++ b/packages/axum-http-tracker-server/src/server.rs @@ -238,8 +238,9 @@ pub fn check_fn(binding: &SocketAddr) -> ServiceHealthCheckJob { mod tests { use std::sync::Arc; + use bittorrent_http_tracker_core::container::HttpTrackerCoreContainer; use bittorrent_http_tracker_core::services::announce::AnnounceService; - use bittorrent_http_tracker_core::{container::HttpTrackerCoreContainer, services::scrape::ScrapeService}; + use bittorrent_http_tracker_core::services::scrape::ScrapeService; use bittorrent_tracker_core::announce_handler::AnnounceHandler; use bittorrent_tracker_core::authentication::key::repository::in_memory::InMemoryKeyRepository; use bittorrent_tracker_core::authentication::service; From e1d9aa4655356db5e15297ab92de30235fea2043 Mon Sep 17 00:00:00 2001 From: Jose Celano Date: Fri, 28 Feb 2025 15:48:07 +0000 Subject: [PATCH 347/802] refactor: [#1326] extract bittorrent_udp_tracker_core::services::connect::ConnectService --- .gitignore | 3 +- cSpell.json | 1 + packages/udp-tracker-core/src/container.rs | 5 +- .../udp-tracker-core/src/services/connect.rs | 93 +++++++++++++------ packages/udp-tracker-core/src/services/mod.rs | 4 +- .../src/handlers/connect.rs | 29 ++++-- .../udp-tracker-server/src/handlers/mod.rs | 2 +- src/container.rs | 10 +- 8 files changed, 101 insertions(+), 46 deletions(-) diff --git a/.gitignore b/.gitignore index d9087bcff..8bfa717b7 100644 --- a/.gitignore +++ b/.gitignore @@ -15,4 +15,5 @@ callgrind.out codecov.json lcov.info -perf.data* \ No newline at end of file +perf.data* +rustc-ice-*.txt diff --git a/cSpell.json b/cSpell.json index e067df932..dcdcc9cf6 100644 --- a/cSpell.json +++ b/cSpell.json @@ -136,6 +136,7 @@ "routable", "rstest", "rusqlite", + "rustc", "RUSTDOCFLAGS", "RUSTFLAGS", "rustfmt", diff --git a/packages/udp-tracker-core/src/container.rs b/packages/udp-tracker-core/src/container.rs index 1467134c5..f64149209 100644 --- a/packages/udp-tracker-core/src/container.rs +++ b/packages/udp-tracker-core/src/container.rs @@ -8,6 +8,7 @@ use tokio::sync::RwLock; use torrust_tracker_configuration::{Core, UdpTracker}; use crate::services::banning::BanService; +use crate::services::connect::ConnectService; use crate::{statistics, MAX_CONNECTION_ID_ERRORS_PER_IP}; pub struct UdpTrackerCoreContainer { @@ -21,6 +22,7 @@ pub struct UdpTrackerCoreContainer { pub udp_core_stats_event_sender: Arc>>, pub udp_core_stats_repository: Arc, pub ban_service: Arc>, + pub connect_service: Arc, } impl UdpTrackerCoreContainer { @@ -39,8 +41,8 @@ impl UdpTrackerCoreContainer { statistics::setup::factory(tracker_core_container.core_config.tracker_usage_statistics); let udp_core_stats_event_sender = Arc::new(udp_core_stats_event_sender); let udp_core_stats_repository = Arc::new(udp_core_stats_repository); - let ban_service = Arc::new(RwLock::new(BanService::new(MAX_CONNECTION_ID_ERRORS_PER_IP))); + let connect_service = Arc::new(ConnectService::new(udp_core_stats_event_sender.clone())); Arc::new(UdpTrackerCoreContainer { core_config: tracker_core_container.core_config.clone(), @@ -52,6 +54,7 @@ impl UdpTrackerCoreContainer { udp_core_stats_event_sender: udp_core_stats_event_sender.clone(), udp_core_stats_repository: udp_core_stats_repository.clone(), ban_service: ban_service.clone(), + connect_service: connect_service.clone(), }) } } diff --git a/packages/udp-tracker-core/src/services/connect.rs b/packages/udp-tracker-core/src/services/connect.rs index 92c51799b..14a3068e4 100644 --- a/packages/udp-tracker-core/src/services/connect.rs +++ b/packages/udp-tracker-core/src/services/connect.rs @@ -9,28 +9,43 @@ use aquatic_udp_protocol::ConnectionId; use crate::connection_cookie::{gen_remote_fingerprint, make}; use crate::statistics; -/// # Panics +/// The `ConnectService` is responsible for handling the `connect` requests. /// -/// IT will panic if there was an error making the connection cookie. -pub async fn handle_connect( - remote_addr: SocketAddr, - opt_udp_stats_event_sender: &Arc>>, - cookie_issue_time: f64, -) -> ConnectionId { - let connection_id = make(gen_remote_fingerprint(&remote_addr), cookie_issue_time).expect("it should be a normal value"); - - if let Some(udp_stats_event_sender) = opt_udp_stats_event_sender.as_deref() { - match remote_addr { - SocketAddr::V4(_) => { - udp_stats_event_sender.send_event(statistics::event::Event::Udp4Connect).await; - } - SocketAddr::V6(_) => { - udp_stats_event_sender.send_event(statistics::event::Event::Udp6Connect).await; - } +/// It is responsible for generating the connection cookie and sending the +/// appropriate statistics events. +pub struct ConnectService { + pub opt_udp_core_stats_event_sender: Arc>>, +} + +impl ConnectService { + #[must_use] + pub fn new(opt_udp_core_stats_event_sender: Arc>>) -> Self { + Self { + opt_udp_core_stats_event_sender, } } - connection_id + /// Handles a `connect` request. + /// + /// # Panics + /// + /// It will panic if there was an error making the connection cookie. + pub async fn handle_connect(&self, remote_addr: SocketAddr, cookie_issue_time: f64) -> ConnectionId { + let connection_id = make(gen_remote_fingerprint(&remote_addr), cookie_issue_time).expect("it should be a normal value"); + + if let Some(udp_stats_event_sender) = self.opt_udp_core_stats_event_sender.as_deref() { + match remote_addr { + SocketAddr::V4(_) => { + udp_stats_event_sender.send_event(statistics::event::Event::Udp4Connect).await; + } + SocketAddr::V6(_) => { + udp_stats_event_sender.send_event(statistics::event::Event::Udp6Connect).await; + } + } + } + + connection_id + } } #[cfg(test)] @@ -44,10 +59,10 @@ mod tests { use mockall::predicate::eq; use crate::connection_cookie::make; - use crate::services::connect::handle_connect; + use crate::services::connect::ConnectService; use crate::services::tests::{ sample_ipv4_remote_addr, sample_ipv4_remote_addr_fingerprint, sample_ipv4_socket_address, sample_ipv6_remote_addr, - sample_ipv6_remote_addr_fingerprint, sample_issue_time, MockUdpStatsEventSender, + sample_ipv6_remote_addr_fingerprint, sample_issue_time, MockUdpCoreStatsEventSender, }; use crate::statistics; @@ -56,7 +71,11 @@ mod tests { let (udp_core_stats_event_sender, _udp_core_stats_repository) = statistics::setup::factory(false); let udp_core_stats_event_sender = Arc::new(udp_core_stats_event_sender); - let response = handle_connect(sample_ipv4_remote_addr(), &udp_core_stats_event_sender, sample_issue_time()).await; + let connect_service = Arc::new(ConnectService::new(udp_core_stats_event_sender)); + + let response = connect_service + .handle_connect(sample_ipv4_remote_addr(), sample_issue_time()) + .await; assert_eq!( response, @@ -69,7 +88,11 @@ mod tests { let (udp_core_stats_event_sender, _udp_core_stats_repository) = statistics::setup::factory(false); let udp_core_stats_event_sender = Arc::new(udp_core_stats_event_sender); - let response = handle_connect(sample_ipv4_remote_addr(), &udp_core_stats_event_sender, sample_issue_time()).await; + let connect_service = Arc::new(ConnectService::new(udp_core_stats_event_sender)); + + let response = connect_service + .handle_connect(sample_ipv4_remote_addr(), sample_issue_time()) + .await; assert_eq!( response, @@ -82,7 +105,11 @@ mod tests { let (udp_core_stats_event_sender, _udp_core_stats_repository) = statistics::setup::factory(false); let udp_core_stats_event_sender = Arc::new(udp_core_stats_event_sender); - let response = handle_connect(sample_ipv6_remote_addr(), &udp_core_stats_event_sender, sample_issue_time()).await; + let connect_service = Arc::new(ConnectService::new(udp_core_stats_event_sender)); + + let response = connect_service + .handle_connect(sample_ipv6_remote_addr(), sample_issue_time()) + .await; assert_eq!( response, @@ -92,32 +119,40 @@ mod tests { #[tokio::test] async fn it_should_send_the_upd4_connect_event_when_a_client_tries_to_connect_using_a_ip4_socket_address() { - let mut udp_stats_event_sender_mock = MockUdpStatsEventSender::new(); + let mut udp_stats_event_sender_mock = MockUdpCoreStatsEventSender::new(); udp_stats_event_sender_mock .expect_send_event() .with(eq(statistics::event::Event::Udp4Connect)) .times(1) .returning(|_| Box::pin(future::ready(Some(Ok(()))))); - let udp_stats_event_sender: Arc>> = + let opt_udp_stats_event_sender: Arc>> = Arc::new(Some(Box::new(udp_stats_event_sender_mock))); let client_socket_address = sample_ipv4_socket_address(); - handle_connect(client_socket_address, &udp_stats_event_sender, sample_issue_time()).await; + let connect_service = Arc::new(ConnectService::new(opt_udp_stats_event_sender)); + + connect_service + .handle_connect(client_socket_address, sample_issue_time()) + .await; } #[tokio::test] async fn it_should_send_the_upd6_connect_event_when_a_client_tries_to_connect_using_a_ip6_socket_address() { - let mut udp_stats_event_sender_mock = MockUdpStatsEventSender::new(); + let mut udp_stats_event_sender_mock = MockUdpCoreStatsEventSender::new(); udp_stats_event_sender_mock .expect_send_event() .with(eq(statistics::event::Event::Udp6Connect)) .times(1) .returning(|_| Box::pin(future::ready(Some(Ok(()))))); - let udp_stats_event_sender: Arc>> = + let opt_udp_stats_event_sender: Arc>> = Arc::new(Some(Box::new(udp_stats_event_sender_mock))); - handle_connect(sample_ipv6_remote_addr(), &udp_stats_event_sender, sample_issue_time()).await; + let connect_service = Arc::new(ConnectService::new(opt_udp_stats_event_sender)); + + connect_service + .handle_connect(sample_ipv6_remote_addr(), sample_issue_time()) + .await; } } } diff --git a/packages/udp-tracker-core/src/services/mod.rs b/packages/udp-tracker-core/src/services/mod.rs index 0fcb612e4..6aa254f41 100644 --- a/packages/udp-tracker-core/src/services/mod.rs +++ b/packages/udp-tracker-core/src/services/mod.rs @@ -44,8 +44,8 @@ pub(crate) mod tests { } mock! { - pub(crate) UdpStatsEventSender {} - impl statistics::event::sender::Sender for UdpStatsEventSender { + pub(crate) UdpCoreStatsEventSender {} + impl statistics::event::sender::Sender for UdpCoreStatsEventSender { fn send_event(&self, event: statistics::event::Event) -> BoxFuture<'static,Option > > > ; } } diff --git a/packages/udp-tracker-server/src/handlers/connect.rs b/packages/udp-tracker-server/src/handlers/connect.rs index be6dc45d4..93d3bb6f1 100644 --- a/packages/udp-tracker-server/src/handlers/connect.rs +++ b/packages/udp-tracker-server/src/handlers/connect.rs @@ -3,18 +3,18 @@ use std::net::{IpAddr, SocketAddr}; use std::sync::Arc; use aquatic_udp_protocol::{ConnectRequest, ConnectResponse, ConnectionId, Response}; -use bittorrent_udp_tracker_core::{services, statistics as core_statistics}; +use bittorrent_udp_tracker_core::services::connect::ConnectService; use tracing::{instrument, Level}; use crate::statistics as server_statistics; use crate::statistics::event::UdpResponseKind; /// It handles the `Connect` request. -#[instrument(fields(transaction_id), skip(opt_udp_core_stats_event_sender, opt_udp_server_stats_event_sender), ret(level = Level::TRACE))] +#[instrument(fields(transaction_id), skip(connect_service, opt_udp_server_stats_event_sender), ret(level = Level::TRACE))] pub async fn handle_connect( remote_addr: SocketAddr, request: &ConnectRequest, - opt_udp_core_stats_event_sender: &Arc>>, + connect_service: &Arc, opt_udp_server_stats_event_sender: &Arc>>, cookie_issue_time: f64, ) -> Response { @@ -40,7 +40,7 @@ pub async fn handle_connect( } } - let connection_id = services::connect::handle_connect(remote_addr, opt_udp_core_stats_event_sender, cookie_issue_time).await; + let connection_id = connect_service.handle_connect(remote_addr, cookie_issue_time).await; build_response(*request, connection_id) } @@ -64,6 +64,7 @@ mod tests { use aquatic_udp_protocol::{ConnectRequest, ConnectResponse, Response, TransactionId}; use bittorrent_udp_tracker_core::connection_cookie::make; + use bittorrent_udp_tracker_core::services::connect::ConnectService; use bittorrent_udp_tracker_core::statistics as core_statistics; use mockall::predicate::eq; @@ -94,10 +95,12 @@ mod tests { transaction_id: TransactionId(0i32.into()), }; + let connect_service = Arc::new(ConnectService::new(udp_core_stats_event_sender)); + let response = handle_connect( sample_ipv4_remote_addr(), &request, - &udp_core_stats_event_sender, + &connect_service, &udp_server_stats_event_sender, sample_issue_time(), ) @@ -125,10 +128,12 @@ mod tests { transaction_id: TransactionId(0i32.into()), }; + let connect_service = Arc::new(ConnectService::new(udp_core_stats_event_sender)); + let response = handle_connect( sample_ipv4_remote_addr(), &request, - &udp_core_stats_event_sender, + &connect_service, &udp_server_stats_event_sender, sample_issue_time(), ) @@ -156,10 +161,12 @@ mod tests { transaction_id: TransactionId(0i32.into()), }; + let connect_service = Arc::new(ConnectService::new(udp_core_stats_event_sender)); + let response = handle_connect( sample_ipv6_remote_addr(), &request, - &udp_core_stats_event_sender, + &connect_service, &udp_server_stats_event_sender, sample_issue_time(), ) @@ -198,10 +205,12 @@ mod tests { let client_socket_address = sample_ipv4_socket_address(); + let connect_service = Arc::new(ConnectService::new(udp_core_stats_event_sender)); + handle_connect( client_socket_address, &sample_connect_request(), - &udp_core_stats_event_sender, + &connect_service, &udp_server_stats_event_sender, sample_issue_time(), ) @@ -230,10 +239,12 @@ mod tests { let udp_server_stats_event_sender: Arc>> = Arc::new(Some(Box::new(udp_server_stats_event_sender_mock))); + let connect_service = Arc::new(ConnectService::new(udp_core_stats_event_sender)); + handle_connect( sample_ipv6_remote_addr(), &sample_connect_request(), - &udp_core_stats_event_sender, + &connect_service, &udp_server_stats_event_sender, sample_issue_time(), ) diff --git a/packages/udp-tracker-server/src/handlers/mod.rs b/packages/udp-tracker-server/src/handlers/mod.rs index fd0536b8b..eedf45e7d 100644 --- a/packages/udp-tracker-server/src/handlers/mod.rs +++ b/packages/udp-tracker-server/src/handlers/mod.rs @@ -147,7 +147,7 @@ pub async fn handle_request( Request::Connect(connect_request) => Ok(handle_connect( remote_addr, &connect_request, - &udp_tracker_core_container.udp_core_stats_event_sender, + &udp_tracker_core_container.connect_service, &udp_tracker_server_container.udp_server_stats_event_sender, cookie_time_values.issue_time, ) diff --git a/src/container.rs b/src/container.rs index c9e58bdc4..a53217c7c 100644 --- a/src/container.rs +++ b/src/container.rs @@ -17,6 +17,7 @@ use bittorrent_tracker_core::whitelist::manager::WhitelistManager; use bittorrent_tracker_core::whitelist::repository::in_memory::InMemoryWhitelist; use bittorrent_udp_tracker_core::container::UdpTrackerCoreContainer; use bittorrent_udp_tracker_core::services::banning::BanService; +use bittorrent_udp_tracker_core::services::connect::ConnectService; use bittorrent_udp_tracker_core::{self, MAX_CONNECTION_ID_ERRORS_PER_IP}; use tokio::sync::RwLock; use torrust_rest_tracker_api_core::container::TrackerHttpApiCoreContainer; @@ -40,9 +41,10 @@ pub struct AppContainer { pub torrents_manager: Arc, // UDP Tracker Core Services - pub ban_service: Arc>, pub udp_core_stats_event_sender: Arc>>, pub udp_core_stats_repository: Arc, + pub ban_service: Arc>, + pub connect_service: Arc, // HTTP Tracker Core Services pub http_stats_event_sender: Arc>>, @@ -86,8 +88,8 @@ impl AppContainer { bittorrent_udp_tracker_core::statistics::setup::factory(configuration.core.tracker_usage_statistics); let udp_core_stats_event_sender = Arc::new(udp_core_stats_event_sender); let udp_core_stats_repository = Arc::new(udp_core_stats_repository); - let ban_service = Arc::new(RwLock::new(BanService::new(MAX_CONNECTION_ID_ERRORS_PER_IP))); + let connect_service = Arc::new(ConnectService::new(udp_core_stats_event_sender.clone())); // UDP Tracker Server Services let (udp_server_stats_event_sender, udp_server_stats_repository) = @@ -111,9 +113,10 @@ impl AppContainer { torrents_manager: tracker_core_container.torrents_manager, // UDP Tracker Core Services - ban_service, udp_core_stats_event_sender, udp_core_stats_repository, + ban_service, + connect_service, // HTTP Tracker Core Services http_stats_event_sender, @@ -156,6 +159,7 @@ impl AppContainer { udp_core_stats_event_sender: self.udp_core_stats_event_sender.clone(), udp_core_stats_repository: self.udp_core_stats_repository.clone(), ban_service: self.ban_service.clone(), + connect_service: self.connect_service.clone(), } } From 67d62efb12bc3154a1748916988e61cbb3c64dd5 Mon Sep 17 00:00:00 2001 From: Jose Celano Date: Fri, 28 Feb 2025 17:02:02 +0000 Subject: [PATCH 348/802] refactor: [#1326] extract bittorrent_udp_tracker_core::services::announce::AnnounceService --- packages/udp-tracker-core/src/container.rs | 8 + .../udp-tracker-core/src/services/announce.rs | 158 ++++++++---------- .../src/handlers/announce.rs | 138 +++++---------- .../udp-tracker-server/src/handlers/mod.rs | 13 +- src/container.rs | 13 +- 5 files changed, 145 insertions(+), 185 deletions(-) diff --git a/packages/udp-tracker-core/src/container.rs b/packages/udp-tracker-core/src/container.rs index f64149209..e09505d64 100644 --- a/packages/udp-tracker-core/src/container.rs +++ b/packages/udp-tracker-core/src/container.rs @@ -7,6 +7,7 @@ use bittorrent_tracker_core::whitelist; use tokio::sync::RwLock; use torrust_tracker_configuration::{Core, UdpTracker}; +use crate::services::announce::AnnounceService; use crate::services::banning::BanService; use crate::services::connect::ConnectService; use crate::{statistics, MAX_CONNECTION_ID_ERRORS_PER_IP}; @@ -23,6 +24,7 @@ pub struct UdpTrackerCoreContainer { pub udp_core_stats_repository: Arc, pub ban_service: Arc>, pub connect_service: Arc, + pub announce_service: Arc, } impl UdpTrackerCoreContainer { @@ -43,6 +45,11 @@ impl UdpTrackerCoreContainer { let udp_core_stats_repository = Arc::new(udp_core_stats_repository); let ban_service = Arc::new(RwLock::new(BanService::new(MAX_CONNECTION_ID_ERRORS_PER_IP))); let connect_service = Arc::new(ConnectService::new(udp_core_stats_event_sender.clone())); + let announce_service = Arc::new(AnnounceService::new( + tracker_core_container.announce_handler.clone(), + tracker_core_container.whitelist_authorization.clone(), + udp_core_stats_event_sender.clone(), + )); Arc::new(UdpTrackerCoreContainer { core_config: tracker_core_container.core_config.clone(), @@ -55,6 +62,7 @@ impl UdpTrackerCoreContainer { udp_core_stats_repository: udp_core_stats_repository.clone(), ban_service: ban_service.clone(), connect_service: connect_service.clone(), + announce_service: announce_service.clone(), }) } } diff --git a/packages/udp-tracker-core/src/services/announce.rs b/packages/udp-tracker-core/src/services/announce.rs index ffd382a20..051944d7e 100644 --- a/packages/udp-tracker-core/src/services/announce.rs +++ b/packages/udp-tracker-core/src/services/announce.rs @@ -12,13 +12,11 @@ use std::ops::Range; use std::sync::Arc; use aquatic_udp_protocol::AnnounceRequest; -use bittorrent_primitives::info_hash::InfoHash; use bittorrent_tracker_core::announce_handler::{AnnounceHandler, PeersWanted}; use bittorrent_tracker_core::error::{AnnounceError, WhitelistError}; use bittorrent_tracker_core::whitelist; use bittorrent_udp_tracker_protocol::peer_builder; use torrust_tracker_primitives::core::AnnounceData; -use torrust_tracker_primitives::peer; use crate::connection_cookie::{check, gen_remote_fingerprint, ConnectionCookieError}; use crate::statistics; @@ -59,95 +57,81 @@ impl From for UdpAnnounceError { } } -/// It handles the `Announce` request. -/// -/// # Errors -/// -/// It will return an error if: -/// -/// - The tracker is running in listed mode and the torrent is not in the -/// whitelist. -#[allow(clippy::too_many_arguments)] -pub async fn handle_announce( - remote_addr: SocketAddr, - request: &AnnounceRequest, - announce_handler: &Arc, - whitelist_authorization: &Arc, - opt_udp_stats_event_sender: &Arc>>, - cookie_valid_range: Range, -) -> Result { - // Authentication - check( - &request.connection_id, - gen_remote_fingerprint(&remote_addr), - cookie_valid_range, - )?; - - let info_hash = request.info_hash.into(); - let remote_client_ip = remote_addr.ip(); - - // Authorization - whitelist_authorization.authorize(&info_hash).await?; - - let mut peer = peer_builder::from_request(request, &remote_client_ip); - let peers_wanted: PeersWanted = i32::from(request.peers_wanted.0).into(); - - let original_peer_ip = peer.peer_addr.ip(); - - // The tracker could change the original peer ip - let announce_data = announce_handler - .announce(&info_hash, &mut peer, &original_peer_ip, &peers_wanted) - .await?; - - if let Some(udp_stats_event_sender) = opt_udp_stats_event_sender.as_deref() { - match original_peer_ip { - IpAddr::V4(_) => { - udp_stats_event_sender - .send_event(statistics::event::Event::Udp4Announce) - .await; - } - IpAddr::V6(_) => { - udp_stats_event_sender - .send_event(statistics::event::Event::Udp6Announce) - .await; - } +/// The `AnnounceService` is responsible for handling the `announce` requests. +pub struct AnnounceService { + pub announce_handler: Arc, + pub whitelist_authorization: Arc, + pub opt_udp_core_stats_event_sender: Arc>>, +} + +impl AnnounceService { + #[must_use] + pub fn new( + announce_handler: Arc, + whitelist_authorization: Arc, + opt_udp_core_stats_event_sender: Arc>>, + ) -> Self { + Self { + announce_handler, + whitelist_authorization, + opt_udp_core_stats_event_sender, } } - Ok(announce_data) -} - -/// # Errors -/// -/// It will return an error if the announce request fails. -pub async fn invoke( - announce_handler: Arc, - opt_udp_stats_event_sender: Arc>>, - info_hash: InfoHash, - peer: &mut peer::Peer, - peers_wanted: &PeersWanted, -) -> Result { - let original_peer_ip = peer.peer_addr.ip(); - - // The tracker could change the original peer ip - let announce_data = announce_handler - .announce(&info_hash, peer, &original_peer_ip, peers_wanted) - .await?; - - if let Some(udp_stats_event_sender) = opt_udp_stats_event_sender.as_deref() { - match original_peer_ip { - IpAddr::V4(_) => { - udp_stats_event_sender - .send_event(statistics::event::Event::Udp4Announce) - .await; - } - IpAddr::V6(_) => { - udp_stats_event_sender - .send_event(statistics::event::Event::Udp6Announce) - .await; + /// It handles the `Announce` request. + /// + /// # Errors + /// + /// It will return an error if: + /// + /// - The tracker is running in listed mode and the torrent is not in the + /// whitelist. + #[allow(clippy::too_many_arguments)] + pub async fn handle_announce( + &self, + remote_addr: SocketAddr, + request: &AnnounceRequest, + cookie_valid_range: Range, + ) -> Result { + // Authentication + check( + &request.connection_id, + gen_remote_fingerprint(&remote_addr), + cookie_valid_range, + )?; + + let info_hash = request.info_hash.into(); + let remote_client_ip = remote_addr.ip(); + + // Authorization + self.whitelist_authorization.authorize(&info_hash).await?; + + let mut peer = peer_builder::from_request(request, &remote_client_ip); + let peers_wanted: PeersWanted = i32::from(request.peers_wanted.0).into(); + + let original_peer_ip = peer.peer_addr.ip(); + + // The tracker could change the original peer ip + let announce_data = self + .announce_handler + .announce(&info_hash, &mut peer, &original_peer_ip, &peers_wanted) + .await?; + + if let Some(udp_stats_event_sender) = self.opt_udp_core_stats_event_sender.as_deref() { + match original_peer_ip { + IpAddr::V4(_) => { + udp_stats_event_sender + .send_event(statistics::event::Event::Udp4Announce) + .await; + } + IpAddr::V6(_) => { + udp_stats_event_sender + .send_event(statistics::event::Event::Udp6Announce) + .await; + } } } - } - Ok(announce_data) + Ok(announce_data) + } } diff --git a/packages/udp-tracker-server/src/handlers/announce.rs b/packages/udp-tracker-server/src/handlers/announce.rs index 97ce6ba4a..9269dadfe 100644 --- a/packages/udp-tracker-server/src/handlers/announce.rs +++ b/packages/udp-tracker-server/src/handlers/announce.rs @@ -8,9 +8,7 @@ use aquatic_udp_protocol::{ Port, Response, ResponsePeer, TransactionId, }; use bittorrent_primitives::info_hash::InfoHash; -use bittorrent_tracker_core::announce_handler::AnnounceHandler; -use bittorrent_tracker_core::whitelist; -use bittorrent_udp_tracker_core::{services, statistics as core_statistics}; +use bittorrent_udp_tracker_core::services::announce::AnnounceService; use torrust_tracker_configuration::Core; use torrust_tracker_primitives::core::AnnounceData; use tracing::{instrument, Level}; @@ -25,15 +23,12 @@ use crate::statistics::event::UdpResponseKind; /// # Errors /// /// If a error happens in the `handle_announce` function, it will just return the `ServerError`. -#[allow(clippy::too_many_arguments)] -#[instrument(fields(transaction_id, connection_id, info_hash), skip(announce_handler, whitelist_authorization, opt_udp_core_stats_event_sender, opt_udp_server_stats_event_sender), ret(level = Level::TRACE))] +#[instrument(fields(transaction_id, connection_id, info_hash), skip(announce_service, opt_udp_server_stats_event_sender), ret(level = Level::TRACE))] pub async fn handle_announce( + announce_service: &Arc, remote_addr: SocketAddr, request: &AnnounceRequest, core_config: &Arc, - announce_handler: &Arc, - whitelist_authorization: &Arc, - opt_udp_core_stats_event_sender: &Arc>>, opt_udp_server_stats_event_sender: &Arc>>, cookie_valid_range: Range, ) -> Result { @@ -63,16 +58,10 @@ pub async fn handle_announce( } } - let announce_data = services::announce::handle_announce( - remote_addr, - request, - announce_handler, - whitelist_authorization, - opt_udp_core_stats_event_sender, - cookie_valid_range, - ) - .await - .map_err(|e| (e.into(), request.transaction_id))?; + let announce_data = announce_service + .handle_announce(remote_addr, request, cookie_valid_range) + .await + .map_err(|e| (e.into(), request.transaction_id))?; Ok(build_response(remote_addr, request, core_config, &announce_data)) } @@ -223,20 +212,17 @@ mod tests { AnnounceInterval, AnnounceResponse, AnnounceResponseFixedData, InfoHash as AquaticInfoHash, Ipv4AddrBytes, Ipv6AddrBytes, NumberOfPeers, PeerId as AquaticPeerId, Response, ResponsePeer, }; - use bittorrent_tracker_core::announce_handler::AnnounceHandler; use bittorrent_tracker_core::torrent::repository::in_memory::InMemoryTorrentRepository; - use bittorrent_tracker_core::whitelist; use bittorrent_udp_tracker_core::connection_cookie::{gen_remote_fingerprint, make}; - use bittorrent_udp_tracker_core::statistics as core_statistics; use mockall::predicate::eq; - use torrust_tracker_configuration::Core; use crate::handlers::announce::tests::announce_request::AnnounceRequestBuilder; use crate::handlers::handle_announce; use crate::handlers::tests::{ initialize_core_tracker_services_for_default_tracker_configuration, initialize_core_tracker_services_for_public_tracker, sample_cookie_valid_range, sample_ipv4_socket_address, - sample_issue_time, MockUdpCoreStatsEventSender, MockUdpServerStatsEventSender, TorrentPeerBuilder, + sample_issue_time, CoreTrackerServices, CoreUdpTrackerServices, MockUdpServerStatsEventSender, + TorrentPeerBuilder, }; use crate::statistics as server_statistics; use crate::statistics::event::UdpResponseKind; @@ -262,12 +248,10 @@ mod tests { .into(); handle_announce( + &core_udp_tracker_services.announce_service, remote_addr, &request, &core_tracker_services.core_config, - &core_tracker_services.announce_handler, - &core_tracker_services.whitelist_authorization, - &core_udp_tracker_services.udp_core_stats_event_sender, &server_udp_tracker_services.udp_server_stats_event_sender, sample_cookie_valid_range(), ) @@ -299,12 +283,10 @@ mod tests { .into(); let response = handle_announce( + &core_udp_tracker_services.announce_service, remote_addr, &request, &core_tracker_services.core_config, - &core_tracker_services.announce_handler, - &core_tracker_services.whitelist_authorization, - &core_udp_tracker_services.udp_core_stats_event_sender, &server_udp_tracker_services.udp_server_stats_event_sender, sample_cookie_valid_range(), ) @@ -354,12 +336,10 @@ mod tests { .into(); handle_announce( + &core_udp_tracker_services.announce_service, remote_addr, &request, &core_tracker_services.core_config, - &core_tracker_services.announce_handler, - &core_tracker_services.whitelist_authorization, - &core_udp_tracker_services.udp_core_stats_event_sender, &server_udp_tracker_services.udp_server_stats_event_sender, sample_cookie_valid_range(), ) @@ -390,13 +370,12 @@ mod tests { } async fn announce_a_new_peer_using_ipv4( - core_config: Arc, - announce_handler: Arc, - whitelist_authorization: Arc, + core_tracker_services: Arc, + core_udp_tracker_services: Arc, ) -> Response { let (udp_core_stats_event_sender, _udp_core_stats_repository) = bittorrent_udp_tracker_core::statistics::setup::factory(false); - let udp_core_stats_event_sender = Arc::new(udp_core_stats_event_sender); + let _udp_core_stats_event_sender = Arc::new(udp_core_stats_event_sender); let (udp_server_stats_event_sender, _udp_server_stats_repository) = crate::statistics::setup::factory(false); let udp_server_stats_event_sender = Arc::new(udp_server_stats_event_sender); @@ -407,12 +386,10 @@ mod tests { .into(); handle_announce( + &core_udp_tracker_services.announce_service, remote_addr, &request, - &core_config, - &announce_handler, - &whitelist_authorization, - &udp_core_stats_event_sender, + &core_tracker_services.core_config, &udp_server_stats_event_sender, sample_cookie_valid_range(), ) @@ -422,17 +399,13 @@ mod tests { #[tokio::test] async fn when_the_announce_request_comes_from_a_client_using_ipv4_the_response_should_not_include_peers_using_ipv6() { - let (core_tracker_services, _core_udp_tracker_services, _server_udp_tracker_services) = + let (core_tracker_services, core_udp_tracker_services, _server_udp_tracker_services) = initialize_core_tracker_services_for_public_tracker(); add_a_torrent_peer_using_ipv6(&core_tracker_services.in_memory_torrent_repository); - let response = announce_a_new_peer_using_ipv4( - core_tracker_services.core_config.clone(), - core_tracker_services.announce_handler.clone(), - core_tracker_services.whitelist_authorization, - ) - .await; + let response = + announce_a_new_peer_using_ipv4(Arc::new(core_tracker_services), Arc::new(core_udp_tracker_services)).await; // The response should not contain the peer using IPV6 let peers: Option>> = match response { @@ -445,15 +418,6 @@ mod tests { #[tokio::test] async fn should_send_the_upd4_announce_event() { - let mut udp_core_stats_event_sender_mock = MockUdpCoreStatsEventSender::new(); - udp_core_stats_event_sender_mock - .expect_send_event() - .with(eq(core_statistics::event::Event::Udp4Announce)) - .times(1) - .returning(|_| Box::pin(future::ready(Some(Ok(()))))); - let udp_core_stats_event_sender: Arc>> = - Arc::new(Some(Box::new(udp_core_stats_event_sender_mock))); - let mut udp_server_stats_event_sender_mock = MockUdpServerStatsEventSender::new(); udp_server_stats_event_sender_mock .expect_send_event() @@ -465,16 +429,14 @@ mod tests { let udp_server_stats_event_sender: Arc>> = Arc::new(Some(Box::new(udp_server_stats_event_sender_mock))); - let (core_tracker_services, _core_udp_tracker_services, _server_udp_tracker_services) = + let (core_tracker_services, core_udp_tracker_services, _server_udp_tracker_services) = initialize_core_tracker_services_for_default_tracker_configuration(); handle_announce( + &core_udp_tracker_services.announce_service, sample_ipv4_socket_address(), &AnnounceRequestBuilder::default().into(), &core_tracker_services.core_config, - &core_tracker_services.announce_handler, - &core_tracker_services.whitelist_authorization, - &udp_core_stats_event_sender, &udp_server_stats_event_sender, sample_cookie_valid_range(), ) @@ -517,12 +479,10 @@ mod tests { .into(); handle_announce( + &core_udp_tracker_services.announce_service, remote_addr, &request, &core_tracker_services.core_config, - &core_tracker_services.announce_handler, - &core_tracker_services.whitelist_authorization, - &core_udp_tracker_services.udp_core_stats_event_sender, &server_udp_tracker_services.udp_server_stats_event_sender, sample_cookie_valid_range(), ) @@ -560,7 +520,7 @@ mod tests { use bittorrent_tracker_core::torrent::repository::in_memory::InMemoryTorrentRepository; use bittorrent_tracker_core::whitelist; use bittorrent_udp_tracker_core::connection_cookie::{gen_remote_fingerprint, make}; - use bittorrent_udp_tracker_core::statistics as core_statistics; + use bittorrent_udp_tracker_core::services::announce::AnnounceService; use mockall::predicate::eq; use torrust_tracker_configuration::Core; @@ -569,7 +529,7 @@ mod tests { use crate::handlers::tests::{ initialize_core_tracker_services_for_default_tracker_configuration, initialize_core_tracker_services_for_public_tracker, sample_cookie_valid_range, sample_ipv6_remote_addr, - sample_issue_time, MockUdpCoreStatsEventSender, MockUdpServerStatsEventSender, TorrentPeerBuilder, + sample_issue_time, MockUdpServerStatsEventSender, TorrentPeerBuilder, }; use crate::statistics as server_statistics; use crate::statistics::event::UdpResponseKind; @@ -596,12 +556,10 @@ mod tests { .into(); handle_announce( + &core_udp_tracker_services.announce_service, remote_addr, &request, &core_tracker_services.core_config, - &core_tracker_services.announce_handler, - &core_tracker_services.whitelist_authorization, - &core_udp_tracker_services.udp_core_stats_event_sender, &server_udp_tracker_services.udp_server_stats_event_sender, sample_cookie_valid_range(), ) @@ -636,12 +594,10 @@ mod tests { .into(); let response = handle_announce( + &core_udp_tracker_services.announce_service, remote_addr, &request, &core_tracker_services.core_config, - &core_tracker_services.announce_handler, - &core_tracker_services.whitelist_authorization, - &core_udp_tracker_services.udp_core_stats_event_sender, &server_udp_tracker_services.udp_server_stats_event_sender, sample_cookie_valid_range(), ) @@ -691,12 +647,10 @@ mod tests { .into(); handle_announce( + &core_udp_tracker_services.announce_service, remote_addr, &request, &core_tracker_services.core_config, - &core_tracker_services.announce_handler, - &core_tracker_services.whitelist_authorization, - &core_udp_tracker_services.udp_core_stats_event_sender, &server_udp_tracker_service.udp_server_stats_event_sender, sample_cookie_valid_range(), ) @@ -746,13 +700,17 @@ mod tests { .with_connection_id(make(gen_remote_fingerprint(&remote_addr), sample_issue_time()).unwrap()) .into(); + let announce_service = Arc::new(AnnounceService::new( + announce_handler.clone(), + whitelist_authorization.clone(), + udp_core_stats_event_sender.clone(), + )); + handle_announce( + &announce_service, remote_addr, &request, &core_config, - &announce_handler, - &whitelist_authorization, - &udp_core_stats_event_sender, &udp_server_stats_event_sender, sample_cookie_valid_range(), ) @@ -785,15 +743,6 @@ mod tests { #[tokio::test] async fn should_send_the_upd6_announce_event() { - let mut udp_core_stats_event_sender_mock = MockUdpCoreStatsEventSender::new(); - udp_core_stats_event_sender_mock - .expect_send_event() - .with(eq(core_statistics::event::Event::Udp6Announce)) - .times(1) - .returning(|_| Box::pin(future::ready(Some(Ok(()))))); - let udp_core_stats_event_sender: Arc>> = - Arc::new(Some(Box::new(udp_core_stats_event_sender_mock))); - let mut udp_server_stats_event_sender_mock = MockUdpServerStatsEventSender::new(); udp_server_stats_event_sender_mock .expect_send_event() @@ -805,7 +754,7 @@ mod tests { let udp_server_stats_event_sender: Arc>> = Arc::new(Some(Box::new(udp_server_stats_event_sender_mock))); - let (core_tracker_services, _core_udp_tracker_services, _server_udp_tracker_services) = + let (core_tracker_services, core_udp_tracker_services, _server_udp_tracker_services) = initialize_core_tracker_services_for_default_tracker_configuration(); let remote_addr = sample_ipv6_remote_addr(); @@ -815,12 +764,10 @@ mod tests { .into(); handle_announce( + &core_udp_tracker_services.announce_service, remote_addr, &announce_request, &core_tracker_services.core_config, - &core_tracker_services.announce_handler, - &core_tracker_services.whitelist_authorization, - &udp_core_stats_event_sender, &udp_server_stats_event_sender, sample_cookie_valid_range(), ) @@ -841,6 +788,7 @@ mod tests { use bittorrent_tracker_core::whitelist::authorization::WhitelistAuthorization; use bittorrent_tracker_core::whitelist::repository::in_memory::InMemoryWhitelist; use bittorrent_udp_tracker_core::connection_cookie::{gen_remote_fingerprint, make}; + use bittorrent_udp_tracker_core::services::announce::AnnounceService; use bittorrent_udp_tracker_core::{self, statistics as core_statistics}; use mockall::predicate::eq; @@ -913,13 +861,17 @@ mod tests { let core_config = Arc::new(config.core.clone()); + let announce_service = Arc::new(AnnounceService::new( + announce_handler.clone(), + whitelist_authorization.clone(), + udp_core_stats_event_sender.clone(), + )); + handle_announce( + &announce_service, remote_addr, &request, &core_config, - &announce_handler, - &whitelist_authorization, - &udp_core_stats_event_sender, &udp_server_stats_event_sender, sample_cookie_valid_range(), ) diff --git a/packages/udp-tracker-server/src/handlers/mod.rs b/packages/udp-tracker-server/src/handlers/mod.rs index eedf45e7d..333bf91fe 100644 --- a/packages/udp-tracker-server/src/handlers/mod.rs +++ b/packages/udp-tracker-server/src/handlers/mod.rs @@ -154,12 +154,10 @@ pub async fn handle_request( .await), Request::Announce(announce_request) => { handle_announce( + &udp_tracker_core_container.announce_service, remote_addr, &announce_request, &udp_tracker_core_container.core_config, - &udp_tracker_core_container.announce_handler, - &udp_tracker_core_container.whitelist_authorization, - &udp_tracker_core_container.udp_core_stats_event_sender, &udp_tracker_server_container.udp_server_stats_event_sender, cookie_time_values.valid_range, ) @@ -196,6 +194,7 @@ pub(crate) mod tests { use bittorrent_tracker_core::whitelist::authorization::WhitelistAuthorization; use bittorrent_tracker_core::whitelist::repository::in_memory::InMemoryWhitelist; use bittorrent_udp_tracker_core::connection_cookie::gen_remote_fingerprint; + use bittorrent_udp_tracker_core::services::announce::AnnounceService; use bittorrent_udp_tracker_core::{self, statistics as core_statistics}; use futures::future::BoxFuture; use mockall::mock; @@ -218,6 +217,7 @@ pub(crate) mod tests { pub(crate) struct CoreUdpTrackerServices { pub udp_core_stats_event_sender: Arc>>, + pub announce_service: Arc, } pub(crate) struct ServerUdpTrackerServices { @@ -267,6 +267,12 @@ pub(crate) mod tests { let (udp_server_stats_event_sender, _udp_server_stats_repository) = crate::statistics::setup::factory(false); let udp_server_stats_event_sender = Arc::new(udp_server_stats_event_sender); + let announce_service = Arc::new(AnnounceService::new( + announce_handler.clone(), + whitelist_authorization.clone(), + udp_core_stats_event_sender.clone(), + )); + ( CoreTrackerServices { core_config, @@ -278,6 +284,7 @@ pub(crate) mod tests { }, CoreUdpTrackerServices { udp_core_stats_event_sender, + announce_service, }, ServerUdpTrackerServices { udp_server_stats_event_sender, diff --git a/src/container.rs b/src/container.rs index a53217c7c..3ef9b6f5b 100644 --- a/src/container.rs +++ b/src/container.rs @@ -17,7 +17,6 @@ use bittorrent_tracker_core::whitelist::manager::WhitelistManager; use bittorrent_tracker_core::whitelist::repository::in_memory::InMemoryWhitelist; use bittorrent_udp_tracker_core::container::UdpTrackerCoreContainer; use bittorrent_udp_tracker_core::services::banning::BanService; -use bittorrent_udp_tracker_core::services::connect::ConnectService; use bittorrent_udp_tracker_core::{self, MAX_CONNECTION_ID_ERRORS_PER_IP}; use tokio::sync::RwLock; use torrust_rest_tracker_api_core::container::TrackerHttpApiCoreContainer; @@ -45,6 +44,7 @@ pub struct AppContainer { pub udp_core_stats_repository: Arc, pub ban_service: Arc>, pub connect_service: Arc, + pub announce_service: Arc, // HTTP Tracker Core Services pub http_stats_event_sender: Arc>>, @@ -89,7 +89,14 @@ impl AppContainer { let udp_core_stats_event_sender = Arc::new(udp_core_stats_event_sender); let udp_core_stats_repository = Arc::new(udp_core_stats_repository); let ban_service = Arc::new(RwLock::new(BanService::new(MAX_CONNECTION_ID_ERRORS_PER_IP))); - let connect_service = Arc::new(ConnectService::new(udp_core_stats_event_sender.clone())); + let connect_service = Arc::new(bittorrent_udp_tracker_core::services::connect::ConnectService::new( + udp_core_stats_event_sender.clone(), + )); + let announce_service = Arc::new(bittorrent_udp_tracker_core::services::announce::AnnounceService::new( + tracker_core_container.announce_handler.clone(), + tracker_core_container.whitelist_authorization.clone(), + udp_core_stats_event_sender.clone(), + )); // UDP Tracker Server Services let (udp_server_stats_event_sender, udp_server_stats_repository) = @@ -117,6 +124,7 @@ impl AppContainer { udp_core_stats_repository, ban_service, connect_service, + announce_service, // HTTP Tracker Core Services http_stats_event_sender, @@ -160,6 +168,7 @@ impl AppContainer { udp_core_stats_repository: self.udp_core_stats_repository.clone(), ban_service: self.ban_service.clone(), connect_service: self.connect_service.clone(), + announce_service: self.announce_service.clone(), } } From 6a8386660f82278ed9945634a80d3c44508f3c4a Mon Sep 17 00:00:00 2001 From: Jose Celano Date: Fri, 28 Feb 2025 17:24:01 +0000 Subject: [PATCH 349/802] refactor: [1326] extract bittorrent_udp_tracker_core::services::scrape::ScrapeService --- packages/udp-tracker-core/src/container.rs | 7 ++ .../udp-tracker-core/src/services/scrape.rs | 77 +++++++++------ .../udp-tracker-server/src/handlers/mod.rs | 15 +-- .../udp-tracker-server/src/handlers/scrape.rs | 96 ++++++------------- src/container.rs | 7 ++ 5 files changed, 101 insertions(+), 101 deletions(-) diff --git a/packages/udp-tracker-core/src/container.rs b/packages/udp-tracker-core/src/container.rs index e09505d64..c4cce3dc1 100644 --- a/packages/udp-tracker-core/src/container.rs +++ b/packages/udp-tracker-core/src/container.rs @@ -10,6 +10,7 @@ use torrust_tracker_configuration::{Core, UdpTracker}; use crate::services::announce::AnnounceService; use crate::services::banning::BanService; use crate::services::connect::ConnectService; +use crate::services::scrape::ScrapeService; use crate::{statistics, MAX_CONNECTION_ID_ERRORS_PER_IP}; pub struct UdpTrackerCoreContainer { @@ -25,6 +26,7 @@ pub struct UdpTrackerCoreContainer { pub ban_service: Arc>, pub connect_service: Arc, pub announce_service: Arc, + pub scrape_service: Arc, } impl UdpTrackerCoreContainer { @@ -50,6 +52,10 @@ impl UdpTrackerCoreContainer { tracker_core_container.whitelist_authorization.clone(), udp_core_stats_event_sender.clone(), )); + let scrape_service = Arc::new(ScrapeService::new( + tracker_core_container.scrape_handler.clone(), + udp_core_stats_event_sender.clone(), + )); Arc::new(UdpTrackerCoreContainer { core_config: tracker_core_container.core_config.clone(), @@ -63,6 +69,7 @@ impl UdpTrackerCoreContainer { ban_service: ban_service.clone(), connect_service: connect_service.clone(), announce_service: announce_service.clone(), + scrape_service: scrape_service.clone(), }) } } diff --git a/packages/udp-tracker-core/src/services/scrape.rs b/packages/udp-tracker-core/src/services/scrape.rs index 04fcb2fe6..fddc2ec2d 100644 --- a/packages/udp-tracker-core/src/services/scrape.rs +++ b/packages/udp-tracker-core/src/services/scrape.rs @@ -56,39 +56,58 @@ impl From for UdpScrapeError { } } -/// It handles the `Scrape` request. -/// -/// # Errors -/// -/// It will return an error if the tracker core scrape handler returns an error. -pub async fn handle_scrape( - remote_addr: SocketAddr, - request: &ScrapeRequest, - scrape_handler: &Arc, - opt_udp_stats_event_sender: &Arc>>, - cookie_valid_range: Range, -) -> Result { - check( - &request.connection_id, - gen_remote_fingerprint(&remote_addr), - cookie_valid_range, - )?; +/// The `ScrapeService` is responsible for handling the `scrape` requests. +pub struct ScrapeService { + scrape_handler: Arc, + opt_udp_stats_event_sender: Arc>>, +} - // Convert from aquatic infohashes - let info_hashes: Vec = request.info_hashes.iter().map(|&x| x.into()).collect(); +impl ScrapeService { + /// Creates a new `ScrapeService`. + #[must_use] + pub fn new( + scrape_handler: Arc, + opt_udp_stats_event_sender: Arc>>, + ) -> Self { + Self { + scrape_handler, + opt_udp_stats_event_sender, + } + } - let scrape_data = scrape_handler.scrape(&info_hashes).await?; + /// It handles the `Scrape` request. + /// + /// # Errors + /// + /// It will return an error if the tracker core scrape handler returns an error. + pub async fn handle_scrape( + &self, + remote_addr: SocketAddr, + request: &ScrapeRequest, + cookie_valid_range: Range, + ) -> Result { + check( + &request.connection_id, + gen_remote_fingerprint(&remote_addr), + cookie_valid_range, + )?; - if let Some(udp_stats_event_sender) = opt_udp_stats_event_sender.as_deref() { - match remote_addr { - SocketAddr::V4(_) => { - udp_stats_event_sender.send_event(statistics::event::Event::Udp4Scrape).await; - } - SocketAddr::V6(_) => { - udp_stats_event_sender.send_event(statistics::event::Event::Udp6Scrape).await; + // Convert from aquatic infohashes + let info_hashes: Vec = request.info_hashes.iter().map(|&x| x.into()).collect(); + + let scrape_data = self.scrape_handler.scrape(&info_hashes).await?; + + if let Some(udp_stats_event_sender) = self.opt_udp_stats_event_sender.as_deref() { + match remote_addr { + SocketAddr::V4(_) => { + udp_stats_event_sender.send_event(statistics::event::Event::Udp4Scrape).await; + } + SocketAddr::V6(_) => { + udp_stats_event_sender.send_event(statistics::event::Event::Udp6Scrape).await; + } } } - } - Ok(scrape_data) + Ok(scrape_data) + } } diff --git a/packages/udp-tracker-server/src/handlers/mod.rs b/packages/udp-tracker-server/src/handlers/mod.rs index 333bf91fe..165b307e0 100644 --- a/packages/udp-tracker-server/src/handlers/mod.rs +++ b/packages/udp-tracker-server/src/handlers/mod.rs @@ -165,10 +165,9 @@ pub async fn handle_request( } Request::Scrape(scrape_request) => { handle_scrape( + &udp_tracker_core_container.scrape_service, remote_addr, &scrape_request, - &udp_tracker_core_container.scrape_handler, - &udp_tracker_core_container.udp_core_stats_event_sender, &udp_tracker_server_container.udp_server_stats_event_sender, cookie_time_values.valid_range, ) @@ -195,6 +194,7 @@ pub(crate) mod tests { use bittorrent_tracker_core::whitelist::repository::in_memory::InMemoryWhitelist; use bittorrent_udp_tracker_core::connection_cookie::gen_remote_fingerprint; use bittorrent_udp_tracker_core::services::announce::AnnounceService; + use bittorrent_udp_tracker_core::services::scrape::ScrapeService; use bittorrent_udp_tracker_core::{self, statistics as core_statistics}; use futures::future::BoxFuture; use mockall::mock; @@ -209,15 +209,14 @@ pub(crate) mod tests { pub(crate) struct CoreTrackerServices { pub core_config: Arc, pub announce_handler: Arc, - pub scrape_handler: Arc, pub in_memory_torrent_repository: Arc, pub in_memory_whitelist: Arc, pub whitelist_authorization: Arc, } pub(crate) struct CoreUdpTrackerServices { - pub udp_core_stats_event_sender: Arc>>, pub announce_service: Arc, + pub scrape_service: Arc, } pub(crate) struct ServerUdpTrackerServices { @@ -273,18 +272,22 @@ pub(crate) mod tests { udp_core_stats_event_sender.clone(), )); + let scrape_service = Arc::new(ScrapeService::new( + scrape_handler.clone(), + udp_core_stats_event_sender.clone(), + )); + ( CoreTrackerServices { core_config, announce_handler, - scrape_handler, in_memory_torrent_repository, in_memory_whitelist, whitelist_authorization, }, CoreUdpTrackerServices { - udp_core_stats_event_sender, announce_service, + scrape_service, }, ServerUdpTrackerServices { udp_server_stats_event_sender, diff --git a/packages/udp-tracker-server/src/handlers/scrape.rs b/packages/udp-tracker-server/src/handlers/scrape.rs index 248f0ca12..3e6da4778 100644 --- a/packages/udp-tracker-server/src/handlers/scrape.rs +++ b/packages/udp-tracker-server/src/handlers/scrape.rs @@ -6,8 +6,8 @@ use std::sync::Arc; use aquatic_udp_protocol::{ NumberOfDownloads, NumberOfPeers, Response, ScrapeRequest, ScrapeResponse, TorrentScrapeStatistics, TransactionId, }; -use bittorrent_tracker_core::scrape_handler::ScrapeHandler; -use bittorrent_udp_tracker_core::{self, services, statistics as core_statistics}; +use bittorrent_udp_tracker_core::services::scrape::ScrapeService; +use bittorrent_udp_tracker_core::{self}; use torrust_tracker_primitives::core::ScrapeData; use tracing::{instrument, Level}; use zerocopy::network_endian::I32; @@ -21,12 +21,11 @@ use crate::statistics::event::UdpResponseKind; /// # Errors /// /// This function does not ever return an error. -#[instrument(fields(transaction_id, connection_id), skip(scrape_handler, opt_udp_core_stats_event_sender, opt_udp_server_stats_event_sender), ret(level = Level::TRACE))] +#[instrument(fields(transaction_id, connection_id), skip(scrape_service, opt_udp_server_stats_event_sender), ret(level = Level::TRACE))] pub async fn handle_scrape( + scrape_service: &Arc, remote_addr: SocketAddr, request: &ScrapeRequest, - scrape_handler: &Arc, - opt_udp_core_stats_event_sender: &Arc>>, opt_udp_server_stats_event_sender: &Arc>>, cookie_valid_range: Range, ) -> Result { @@ -55,15 +54,10 @@ pub async fn handle_scrape( } } - let scrape_data = services::scrape::handle_scrape( - remote_addr, - request, - scrape_handler, - opt_udp_core_stats_event_sender, - cookie_valid_range, - ) - .await - .map_err(|e| (e.into(), request.transaction_id))?; + let scrape_data = scrape_service + .handle_scrape(remote_addr, request, cookie_valid_range) + .await + .map_err(|e| (e.into(), request.transaction_id))?; Ok(build_response(request, &scrape_data)) } @@ -105,14 +99,13 @@ mod tests { InfoHash, NumberOfDownloads, NumberOfPeers, PeerId, Response, ScrapeRequest, ScrapeResponse, TorrentScrapeStatistics, TransactionId, }; - use bittorrent_tracker_core::scrape_handler::ScrapeHandler; use bittorrent_tracker_core::torrent::repository::in_memory::InMemoryTorrentRepository; use bittorrent_udp_tracker_core::connection_cookie::{gen_remote_fingerprint, make}; use crate::handlers::handle_scrape; use crate::handlers::tests::{ initialize_core_tracker_services_for_public_tracker, sample_cookie_valid_range, sample_ipv4_remote_addr, - sample_issue_time, TorrentPeerBuilder, + sample_issue_time, CoreTrackerServices, CoreUdpTrackerServices, TorrentPeerBuilder, }; fn zeroed_torrent_statistics() -> TorrentScrapeStatistics { @@ -125,7 +118,7 @@ mod tests { #[tokio::test] async fn should_return_no_stats_when_the_tracker_does_not_have_any_torrent() { - let (core_tracker_services, core_udp_tracker_services, server_udp_tracker_services) = + let (_core_tracker_services, core_udp_tracker_services, server_udp_tracker_services) = initialize_core_tracker_services_for_public_tracker(); let remote_addr = sample_ipv4_remote_addr(); @@ -140,10 +133,9 @@ mod tests { }; let response = handle_scrape( + &core_udp_tracker_services.scrape_service, remote_addr, &request, - &core_tracker_services.scrape_handler, - &core_udp_tracker_services.udp_core_stats_event_sender, &server_udp_tracker_services.udp_server_stats_event_sender, sample_cookie_valid_range(), ) @@ -188,28 +180,28 @@ mod tests { } async fn add_a_sample_seeder_and_scrape( - in_memory_torrent_repository: Arc, - scrape_handler: Arc, + core_tracker_services: Arc, + core_udp_tracker_services: Arc, ) -> Response { - let (udp_core_stats_event_sender, _udp_core_stats_repository) = - bittorrent_udp_tracker_core::statistics::setup::factory(false); - let udp_core_stats_event_sender = Arc::new(udp_core_stats_event_sender); - let (udp_server_stats_event_sender, _udp_server_stats_repository) = crate::statistics::setup::factory(false); let udp_server_stats_event_sender = Arc::new(udp_server_stats_event_sender); let remote_addr = sample_ipv4_remote_addr(); let info_hash = InfoHash([0u8; 20]); - add_a_seeder(in_memory_torrent_repository.clone(), &remote_addr, &info_hash).await; + add_a_seeder( + core_tracker_services.in_memory_torrent_repository.clone(), + &remote_addr, + &info_hash, + ) + .await; let request = build_scrape_request(&remote_addr, &info_hash); handle_scrape( + &core_udp_tracker_services.scrape_service, remote_addr, &request, - &scrape_handler, - &udp_core_stats_event_sender, &udp_server_stats_event_sender, sample_cookie_valid_range(), ) @@ -232,15 +224,11 @@ mod tests { #[tokio::test] async fn should_return_torrent_statistics_when_the_tracker_has_the_requested_torrent() { - let (core_tracker_services, _core_udp_tracker_services, _server_udp_tracker_services) = + let (core_tracker_services, core_udp_tracker_services, _server_udp_tracker_services) = initialize_core_tracker_services_for_public_tracker(); let torrent_stats = match_scrape_response( - add_a_sample_seeder_and_scrape( - core_tracker_services.in_memory_torrent_repository.clone(), - core_tracker_services.scrape_handler.clone(), - ) - .await, + add_a_sample_seeder_and_scrape(core_tracker_services.into(), core_udp_tracker_services.into()).await, ); let expected_torrent_stats = vec![TorrentScrapeStatistics { @@ -285,10 +273,9 @@ mod tests { let torrent_stats = match_scrape_response( handle_scrape( + &core_udp_tracker_services.scrape_service, remote_addr, &request, - &core_tracker_services.scrape_handler, - &core_udp_tracker_services.udp_core_stats_event_sender, &server_udp_tracker_services.udp_server_stats_event_sender, sample_cookie_valid_range(), ) @@ -325,10 +312,9 @@ mod tests { let torrent_stats = match_scrape_response( handle_scrape( + &core_udp_tracker_services.scrape_service, remote_addr, &request, - &core_tracker_services.scrape_handler, - &core_udp_tracker_services.udp_core_stats_event_sender, &server_udp_tracker_services.udp_server_stats_event_sender, sample_cookie_valid_range(), ) @@ -358,28 +344,18 @@ mod tests { use std::future; use std::sync::Arc; - use bittorrent_udp_tracker_core::statistics as core_statistics; use mockall::predicate::eq; use super::sample_scrape_request; use crate::handlers::handle_scrape; use crate::handlers::tests::{ initialize_core_tracker_services_for_default_tracker_configuration, sample_cookie_valid_range, - sample_ipv4_remote_addr, MockUdpCoreStatsEventSender, MockUdpServerStatsEventSender, + sample_ipv4_remote_addr, MockUdpServerStatsEventSender, }; use crate::statistics as server_statistics; #[tokio::test] async fn should_send_the_upd4_scrape_event() { - let mut udp_core_stats_event_sender_mock = MockUdpCoreStatsEventSender::new(); - udp_core_stats_event_sender_mock - .expect_send_event() - .with(eq(core_statistics::event::Event::Udp4Scrape)) - .times(1) - .returning(|_| Box::pin(future::ready(Some(Ok(()))))); - let udp_core_stats_event_sender: Arc>> = - Arc::new(Some(Box::new(udp_core_stats_event_sender_mock))); - let mut udp_server_stats_event_sender_mock = MockUdpServerStatsEventSender::new(); udp_server_stats_event_sender_mock .expect_send_event() @@ -393,14 +369,13 @@ mod tests { let remote_addr = sample_ipv4_remote_addr(); - let (core_tracker_services, _core_udp_tracker_services, _server_udp_tracker_services) = + let (_core_tracker_services, core_udp_tracker_services, _server_udp_tracker_services) = initialize_core_tracker_services_for_default_tracker_configuration(); handle_scrape( + &core_udp_tracker_services.scrape_service, remote_addr, &sample_scrape_request(&remote_addr), - &core_tracker_services.scrape_handler, - &udp_core_stats_event_sender, &udp_server_stats_event_sender, sample_cookie_valid_range(), ) @@ -413,28 +388,18 @@ mod tests { use std::future; use std::sync::Arc; - use bittorrent_udp_tracker_core::statistics as core_statistics; use mockall::predicate::eq; use super::sample_scrape_request; use crate::handlers::handle_scrape; use crate::handlers::tests::{ initialize_core_tracker_services_for_default_tracker_configuration, sample_cookie_valid_range, - sample_ipv6_remote_addr, MockUdpCoreStatsEventSender, MockUdpServerStatsEventSender, + sample_ipv6_remote_addr, MockUdpServerStatsEventSender, }; use crate::statistics as server_statistics; #[tokio::test] async fn should_send_the_upd6_scrape_event() { - let mut udp_core_stats_event_sender_mock = MockUdpCoreStatsEventSender::new(); - udp_core_stats_event_sender_mock - .expect_send_event() - .with(eq(core_statistics::event::Event::Udp6Scrape)) - .times(1) - .returning(|_| Box::pin(future::ready(Some(Ok(()))))); - let udp_core_stats_event_sender: Arc>> = - Arc::new(Some(Box::new(udp_core_stats_event_sender_mock))); - let mut udp_server_stats_event_sender_mock = MockUdpServerStatsEventSender::new(); udp_server_stats_event_sender_mock .expect_send_event() @@ -448,14 +413,13 @@ mod tests { let remote_addr = sample_ipv6_remote_addr(); - let (core_tracker_services, _core_udp_tracker_services, _server_udp_tracker_services) = + let (_core_tracker_services, core_udp_tracker_services, _server_udp_tracker_services) = initialize_core_tracker_services_for_default_tracker_configuration(); handle_scrape( + &core_udp_tracker_services.scrape_service, remote_addr, &sample_scrape_request(&remote_addr), - &core_tracker_services.scrape_handler, - &udp_core_stats_event_sender, &udp_server_stats_event_sender, sample_cookie_valid_range(), ) diff --git a/src/container.rs b/src/container.rs index 3ef9b6f5b..46cf0c987 100644 --- a/src/container.rs +++ b/src/container.rs @@ -45,6 +45,7 @@ pub struct AppContainer { pub ban_service: Arc>, pub connect_service: Arc, pub announce_service: Arc, + pub scrape_service: Arc, // HTTP Tracker Core Services pub http_stats_event_sender: Arc>>, @@ -97,6 +98,10 @@ impl AppContainer { tracker_core_container.whitelist_authorization.clone(), udp_core_stats_event_sender.clone(), )); + let scrape_service = Arc::new(bittorrent_udp_tracker_core::services::scrape::ScrapeService::new( + tracker_core_container.scrape_handler.clone(), + udp_core_stats_event_sender.clone(), + )); // UDP Tracker Server Services let (udp_server_stats_event_sender, udp_server_stats_repository) = @@ -125,6 +130,7 @@ impl AppContainer { ban_service, connect_service, announce_service, + scrape_service, // HTTP Tracker Core Services http_stats_event_sender, @@ -169,6 +175,7 @@ impl AppContainer { ban_service: self.ban_service.clone(), connect_service: self.connect_service.clone(), announce_service: self.announce_service.clone(), + scrape_service: self.scrape_service.clone(), } } From ddb33812d12695ed458a3d4d8fa436898f4ff32e Mon Sep 17 00:00:00 2001 From: Jose Celano Date: Fri, 28 Feb 2025 17:44:23 +0000 Subject: [PATCH 350/802] refactor: rename AppContainer fields --- src/container.rs | 46 +++++++++++++++++++++++++++++----------------- 1 file changed, 29 insertions(+), 17 deletions(-) diff --git a/src/container.rs b/src/container.rs index 46cf0c987..07c30d604 100644 --- a/src/container.rs +++ b/src/container.rs @@ -24,6 +24,18 @@ use torrust_tracker_configuration::{Configuration, Core, HttpApi, HttpTracker, U use torrust_udp_tracker_server::container::UdpTrackerServerContainer; use tracing::instrument; +/* todo: remove duplicate code. + + Use containers from packages as AppContainer fields: + + - bittorrent_tracker_core::container::TrackerCoreContainer + - bittorrent_udp_tracker_core::container::UdpTrackerCoreContainer + - bittorrent_http_tracker_core::container::HttpTrackerCoreContainer + - torrust_udp_tracker_server::container::UdpTrackerServerContainer + + Container initialization is duplicated. +*/ + pub struct AppContainer { // Tracker Core Services pub core_config: Arc, @@ -42,10 +54,10 @@ pub struct AppContainer { // UDP Tracker Core Services pub udp_core_stats_event_sender: Arc>>, pub udp_core_stats_repository: Arc, - pub ban_service: Arc>, - pub connect_service: Arc, - pub announce_service: Arc, - pub scrape_service: Arc, + pub udp_ban_service: Arc>, + pub udp_connect_service: Arc, + pub udp_announce_service: Arc, + pub udp_scrape_service: Arc, // HTTP Tracker Core Services pub http_stats_event_sender: Arc>>, @@ -89,16 +101,16 @@ impl AppContainer { bittorrent_udp_tracker_core::statistics::setup::factory(configuration.core.tracker_usage_statistics); let udp_core_stats_event_sender = Arc::new(udp_core_stats_event_sender); let udp_core_stats_repository = Arc::new(udp_core_stats_repository); - let ban_service = Arc::new(RwLock::new(BanService::new(MAX_CONNECTION_ID_ERRORS_PER_IP))); - let connect_service = Arc::new(bittorrent_udp_tracker_core::services::connect::ConnectService::new( + let udp_ban_service = Arc::new(RwLock::new(BanService::new(MAX_CONNECTION_ID_ERRORS_PER_IP))); + let udp_connect_service = Arc::new(bittorrent_udp_tracker_core::services::connect::ConnectService::new( udp_core_stats_event_sender.clone(), )); - let announce_service = Arc::new(bittorrent_udp_tracker_core::services::announce::AnnounceService::new( + let udp_announce_service = Arc::new(bittorrent_udp_tracker_core::services::announce::AnnounceService::new( tracker_core_container.announce_handler.clone(), tracker_core_container.whitelist_authorization.clone(), udp_core_stats_event_sender.clone(), )); - let scrape_service = Arc::new(bittorrent_udp_tracker_core::services::scrape::ScrapeService::new( + let udp_scrape_service = Arc::new(bittorrent_udp_tracker_core::services::scrape::ScrapeService::new( tracker_core_container.scrape_handler.clone(), udp_core_stats_event_sender.clone(), )); @@ -127,10 +139,10 @@ impl AppContainer { // UDP Tracker Core Services udp_core_stats_event_sender, udp_core_stats_repository, - ban_service, - connect_service, - announce_service, - scrape_service, + udp_ban_service, + udp_connect_service, + udp_announce_service, + udp_scrape_service, // HTTP Tracker Core Services http_stats_event_sender, @@ -172,10 +184,10 @@ impl AppContainer { udp_tracker_config: udp_tracker_config.clone(), udp_core_stats_event_sender: self.udp_core_stats_event_sender.clone(), udp_core_stats_repository: self.udp_core_stats_repository.clone(), - ban_service: self.ban_service.clone(), - connect_service: self.connect_service.clone(), - announce_service: self.announce_service.clone(), - scrape_service: self.scrape_service.clone(), + ban_service: self.udp_ban_service.clone(), + connect_service: self.udp_connect_service.clone(), + announce_service: self.udp_announce_service.clone(), + scrape_service: self.udp_scrape_service.clone(), } } @@ -187,7 +199,7 @@ impl AppContainer { in_memory_torrent_repository: self.in_memory_torrent_repository.clone(), keys_handler: self.keys_handler.clone(), whitelist_manager: self.whitelist_manager.clone(), - ban_service: self.ban_service.clone(), + ban_service: self.udp_ban_service.clone(), http_stats_repository: self.http_stats_repository.clone(), udp_core_stats_repository: self.udp_core_stats_repository.clone(), udp_server_stats_repository: self.udp_server_stats_repository.clone(), From a8224e836e601623ff23cf572e5daebd74d8baa7 Mon Sep 17 00:00:00 2001 From: Jose Celano Date: Mon, 3 Mar 2025 09:16:51 +0000 Subject: [PATCH 351/802] docs: [#1290] add documentation for packages --- README.md | 12 +- cSpell.json | 1 + docs/index.md | 11 ++ .../packages-dependencies-http-tracker.png | Bin 0 -> 33225 bytes .../packages-dependencies-udp-tracker.png | Bin 0 -> 33537 bytes .../torrust-tracker-layers-with-packages.png | Bin 0 -> 68191 bytes docs/media/torrust-tracker-components.png | Bin 84935 -> 0 bytes docs/packages.md | 115 ++++++++++++++++++ src/lib.rs | 13 +- 9 files changed, 139 insertions(+), 13 deletions(-) create mode 100644 docs/index.md create mode 100644 docs/media/packages/packages-dependencies-http-tracker.png create mode 100644 docs/media/packages/packages-dependencies-udp-tracker.png create mode 100644 docs/media/packages/torrust-tracker-layers-with-packages.png delete mode 100644 docs/media/torrust-tracker-components.png create mode 100644 docs/packages.md diff --git a/README.md b/README.md index 6d611d9a5..671a484e6 100644 --- a/README.md +++ b/README.md @@ -40,12 +40,12 @@ Protocols: Integrations: -- [ ] Monitoring (Prometheus). +- [x] Monitoring (Prometheus). Utils: -- [ ] Tracker client. -- [ ] Tracker checker. +- [ ] Tracker client. WIP. +- [ ] Tracker checker. WIP. Others: @@ -65,6 +65,10 @@ Others: - [BEP 27]: Private Torrents. - [BEP 48]: Tracker Protocol Extension: Scrape. +## Architecture + +![Torrust Tracker Layers with main packages](./docs/media/packages/torrust-tracker-layers-with-packages.png) + ## Getting Started ### Container Version @@ -167,6 +171,8 @@ Some specific sections: - [Tracker (HTTP/TLS)][HTTP] - [Tracker (UDP)][UDP] +There is also extra documentation in the [docs](./docs) folder. + ## Benchmarking - [Benchmarking](./docs/benchmarking.md) diff --git a/cSpell.json b/cSpell.json index dcdcc9cf6..3121d6175 100644 --- a/cSpell.json +++ b/cSpell.json @@ -15,6 +15,7 @@ "bdecode", "bencode", "bencoded", + "bencoding", "beps", "binascii", "binstall", diff --git a/docs/index.md b/docs/index.md new file mode 100644 index 000000000..873f3758b --- /dev/null +++ b/docs/index.md @@ -0,0 +1,11 @@ +# Torrust Tracker Documentation + +For more detailed instructions, please view our [crate documentation][docs]. + +- [Benchmarking](benchmarking.md) +- [Containers](containers.md) +- [Packages](packages.md) +- [Profiling](profiling.md) +- [Releases process](release_process.md) + +[docs]: https://docs.rs/torrust-tracker/latest/torrust_tracker/ diff --git a/docs/media/packages/packages-dependencies-http-tracker.png b/docs/media/packages/packages-dependencies-http-tracker.png new file mode 100644 index 0000000000000000000000000000000000000000..45dbd9839d533ae1cd548343f4190f56eac9abca GIT binary patch literal 33225 zcmdSCc|28Z+c&;wP*hZgLPV3yB}3*k&|oN;Gl$G%9c%cF4G;P^QYf zGe*cf&uqVA?M-#v&vifdbv@7f`MsZa{Xy2=YpwG<&SUr<-{Uw}_mvc6w^K4wA_%hm zyxf^92(noYK{oB!LJnv0HgI^uFEYC;vNA|oE%N~UvC;7SX(h7l2w9}5*l`5ekDNbq z>Z)_(aJ#(q0lbUUut4qmAdB-7L55CGgTy_z-}oX;^9;L}lCT9YC{g83adD{xFYK++ zzhUFsGrH_dU;Nai>G$e+9Si!Hh<|j%%3r74V(+mW1kyi6k}#LN(n1{DOk`W=06 zd>R?j_X&^JSbkGf6qQduk1~WOH(Ju~o)o zh?!|TlU9^MLZex>W~xF$aDFJ+*AYi4;ArvvkeA3It-CewjCrBYdpsM$*3O#K zUg5=ob_*eDYHHROk0`3+lasmb!%~FrJ9HxFMYzrVk-OY^vANkdSQ2r;*~y1p&k07< ztgNieySU5TrOA?=ot<5*_*ATwfy3c)bHmEs!ZZvIQBY9$`1l-a>BaAW#iZl13`h-c z+UqQ@8tG&@6m{Ebp-RpG|+4_oLY1#jKW~4}AOvtWy_rT1Rf@#qAW^bODN33kh}Qbe+EaU563u_8PojE#s7tR?s`=1qUewak(s*`w8_u13^V@Ex zDTx5WTP&Yesw(qIozf;ulxB`Jb4G^iY7>?ZL^GvJFBHbg(`wKkWl=WoLQr~;b|fuY z>e|YI$2!-4M4V4;x1wHhk+=7v(%O0eGp#Q~?zthRgDq^!26!;1vWMZ&{Je-qP&Tq- z$Bx;#Io)FSg~{G3!o|j{fb-Y-e*ECLi=*^2+Wu%yX7Yxwz>Cx0o z2WRI!>4&TGSr&Gjb)Sl%*6KYV!`5uUIi32Vr>Ez(cIvWkl!A%+)5AEM)%dB-eYTle z&ua;Oyx5$pO#}D*6A0XjISJmi4dFB2%{fw9F#J>z9~O@*)9$7C6!^J%AYEX2K(P=L zI5|iEK)E)s_WJg=3;l}z>MN32RoYmILB9CU%o|}WESjdl75!yTab%?hiC$7tvR2n8d@Re`sad8^ZFns3dLZrMaqp_p%>hCwuk^JFvC%sPA6WacAJOis zJ-}?DLp|2*{I2f?Q|ju++0P%72x1-zE6VT)WHk1B0sGxz57{PX-?uT#slAlRf!L5? zEBnii0~$ik`gc=6!QVM$ZJ)7D9{&(U^+2eMxj(qcZk2pg_xqW5ps3%8e63@m>uTj(pZ#zq>M`M->S4c;2a+#YL}Un`<-g*eBz~xF=wHEGmHSc1^6iz{ zTO>O{?==@VBa!%J1nVq4TDXf|FJKz?p>rWisyzciY*<0Nr8F_uxbCKQdv#Bky(JhK zXt70XH|zR7s{i?Y2fk3vZEV_Fw4lB_gEH2_Bh8|FN?&aYz198}DNT3&CDmfG6CM&9 zda3&){0fx`{er`)K^rjF@qLr6`@SqxnmOjNN((NIY1QrZXzp6t2F`tMy30Q`h3k<# zkM6vhWs`-dxcCF%9H!8=`%Y@x^3I)as@N;!ynLMLqykxV)Kim)+W5_%4&zue57<6k zCRiFxoi>{KQBk(oPa8OMovG4Qba$h`*L&<(Ly`Os`dRG{tZ$xME}Se-ZMo*oAMj9u zg_Ze{+tV66o}zpdIZo|r?zb~oCP1pR1XI$6dFT=Avrpv15B&s@9H+G7edO;#_nOSu z4!Ym_f*@u|UgfHVpRLuU*oWf0oN`;LM&rXIobBzezM)U&clLNaFDJC*yee|Q_kPyu zo_@8D*0=P0y1Tme>zZ6k$r%0e<+A=vX+t0W6vu;l9My)QfzAS%?JH;L0~}Y6d$TV& zaZaz$QWj{;o6>WlW}P81h;eUu%h%RwtEX~ckac4I$Cq>B(I2BP9tb6)SDIA}8cz>qCvTQO-qLS~) z_T#Pvw!0VJ1#J)7cYApM`>$E;1Jr)KeyomSkKbJSrf_(}MSrnyUA@Qyp{#|g1q*bB z-gt7`(<@KjEPHOih~jTdTsq8T`GJiBQQu}gG_)|)?|-hR%#UG(S`tGLSCKd`q`kra zTMau!wu3UBoc5wo$HLqvy3fv1{FtAAGd{<$+Uv)_H_3Nk1(VlJzTgle zh0#$j!jzQmcli_}^(hb{8}q6R9sQomr*o8dTh74~y!CtbYaGL$5|NZ-v7MfrVP$1C zG4HM7Wo|6p?|sEkf5nLQJBVGl)!zLe%ctdN7_wJp(OXHWG zeks$w(b(g)oo^sk^H~ovWG;`KGC1e#i(JNzl~*pbe>{@?t{{0T>z!D}yz}WGckw(} zm>rkD9oc1)&FYNl*B3PLGdk2e^^YZdQAmDCq5rj z(G?XH3wh*}t-et;^UgwVOEb)txb|vuA|>64c7|M?lpk3d?uP3^}z3xC}9omxy$3jMuRVZl2jJ#=HI5}X7g4YcZ?+$ea*u7=MoAA4g|QTUnLk6OXr zuwuDE``EIBy*(|J-<qA4UqC!JUG6Ve4bN2laEWe1 z$-9rd{M9-OhA~{YOO8ew^sO#ik~GKX@K*GBl{>1=Xh%B_xF$Dp+Seam`J`G*a3AL+ zv~t%9$Kbx&`o`BFgLQ0+qWIvNWBhBf-hC%ZGf{M=Y6s>|keTP>H4zF6@X zwJTneU{)}m@a0ufuGr#a^%pfUW3E2FX}>^4*FCbGa&td^+%Pv!&RE)g|54HK@$ym; ztX;bVQt}{q>iY!p@C+vSg;W}y7XJ!1JTP3-6JYl)8d1LiWl{q2E)+aeRV zwYaSkGgMVa=9@2;vtAI|t;%NNWN+VKmYwz1P++vSYAf}Q9o?67ypMO^0K~Gu#GNm) z6VZDnwbaXpd6K^w38ywOF==gWb>hvzVuND}mjO(8?3Hp9jkBWnK zMROf~T;*lmXqeNwuAC4@GIMP?rU?4I}-_ITe5xOvOrq0aRA^En3JO8Sy-jTOxSNS1Aj zJT|>@$xxYFtW-=)Y$3+sPv8Nf3DkKSmoOo_gM)+NRXEofPOtIrAA`>ePxsYC+~+kZ z$jcjWEPw-USs_K3m@fzxCOi-SvHw!h0Z61if?-Izwp$51Y=vBm738(Q=v?fhd_ZJ< zfiUxj(eEKGQ~nIgP!|k8^eACU(ym|%$fZ+|smUoCP$8a2Z$NH0a2|f}Z$fV&fhH31 z=q%%oz>gAI$oV{{9i5z%!yqGE{(|03?BvPw+K?Ge$)dLz!(&a7E<&0}_*=I+2svc< zAHT-uEad>nB{sYzKMy_5Yr{kJKns#ChHoVf_{fnX0*$Sj+S<1!zn{--mDk<^Ul0}+ z7I0X6jIR2_j&0GQ5-yV=~Y(p6=^AXH~_F&KFffJ<9rnIXXqIGlPxEbr;Ey z^4IzKn%LFa8a-45cwr;MD*O>d>4ioFOvp2@>&h7rg!WHCm<0U%XKq13#4ic!pu_no zh;)d;ykv;yLhQdt?_a0o~h)lK+0#NRXgqL_ThDF)lx+F%NLRw-#|@w{YR zJ757yWSl{?0|QAP3z2dsJG-f2JQfo92QOff+`jGY?O?9e!S(g^5fMyzJbIx|)<}T= zC5P}=mk<1D_;JlQPF(q=ltYdM{|hbr=S$+bcba;2oO*TEdNnkk((`hMB9JJa@9FvH zczJncwiM|AyI{1Kj9W}@Z`WRR+tXQIUS3&w0eo2|(M}u+Kz8jQ$xA)m-Q6=o%@b2k z1qBJCV+3A@ty6K$Lo=>LA;iPq-@l=^r-Mer;=OsCw2sRF-fiy?H8O`zw7= z(ftz5dB_w;4!@AFFlMnnX3%V>jJk$~=?TV&h=?McPccG?cZqg4L~}bY*dhD%YmSzd zmU~61B2$dx&xm%epr8OE<$od)5tK&1p{zqyXn}FuhWJsmgj;C-M$fJ9O-miJ-vocA z7eBG)FZ7DvTb->Ly)Bn#(1~pxdda)dOkvHxgp1{CwwW3koi(IldAerv{JQpBdwfQZ zT$Nt?(%*GLf71AjaA0tf8!u>|T^P*QE!l=rRj8ZW6JI~jwjg)(9eMSEwefl07L8IU zXy+AgvC19m3=K*oA6)l;&*}%JbCVYfG&eACar4*a9Mq_^H5l1(GD>DjB>i0jUC?sIB(WB8S7n_%?nX2l}&zqN> zO^4OCjeIkHucqu&_m+vNHzDrFG9M@;?53!eq+gU==r-K@&XHfYF5}8l)t(%Ng%$t1 zAtzjym%p&gDl>dB5K~`i4Y(=n3c1*Stgs+D;8Bt zAQ0dlZI5JBG>+Lmu>b<9&6OFeWLEq@zx()tJat=J$>}e+h=puQsZDz2C-xoV83AmwuOTNFDJTjqczJG})Mqzz13PV@r9RV{Lrcl}MXQp$ufx)c(Wf0!AW zZ)dhs-@ECUSBR7qJ{L%*ij%b7M5;y%NE6yv9c5*6C$p9_MjXY`uDogD(;bW&iF*V5Mo+kREXi@vLlZ2Cen!*>38qrk;s@#5m*ZLO_badqcD#i%H~VZOOccf{ZvPq^}^ z?-nGkdBhRIrP679+~p_tRRb@_yX%@RyKO4JpVoasec^{{nVY?&U zcE?#Ez2wHq9eUsH29|IyVRZ?UVZmur5KoUNMvF@yLrU(v$;;cymXew(pWhvanO+_D zk-Dafj};>C-dE2B-aQ)wz07=nH(!_J}( z{}K8J;(!8X6r4$~OuH5}9Fkrk+9t%^a4}c<2ZlyQha9#l*%8gi+}s?3Vz~6M>a}av z;&{FAjc8bjE+;u}#Pg$tg+(0iZ4HK{=&Gu!B&yp_>%or#Ht*}}8){DX&e`*$yPLf{ z`(RGjbMz_5B@07Cc*0q0KMw{5hICz=j)sPyW$7*~ zGsNn|sJTx3RvZyIORWqjj$NA!#4H=eeo+Vfc<*>eq04SgoMBh8S~{VdkI)T}F_ys% zPG4?rl8EO8uwA1^s05!~*`+0|@pryj2Pxu30FjMwT_tg8N!~-+;b4wumK^GK1sVMT zWGO|-2C}^5B}?22k4161r}s?{^oY)Sw-cmeuHQ-BhgpEm}b z$x!-vR~RcdEwC`$kP#5m0xFmUhu{DImvxXLYhu8fNcR_+e2RvNdJc-aPIEYHm6j(; zAu2Kwuqj~FKP?PN5Lv(0h9m{w^?MQJ1Y#ww5SsGnJ%_sy7YT(6>Ktq9^mi|NYMQQ7 zlwVw|0C|etSYMy9lasImT`b&_Buqvv><}2w;$l9U4Q%uj1;vxsF6DqM##IaPJ5Zh- z8WyI^Kwd53pQ<6;Gjy+5s)R23xG6Z=K(Ps_J%>b!jDp{z#MaQmnIxh&7y4({@NicWx}s{O*}tc)BfD@ z$E)_Xw#evc@WFcAfHIPdPp?@~&+M#_S4LrBWIm1FsHw%N#{BkCI0gsJ24xWAdueM< zkEq4M4hH6ymZ_~HX{o6pI=iqImX@;ZHn%nN?LS8{YfAiGtOAyhgb5hDCcP`p+Li~;LnZ44!pAmw@G?A1Ubrx&N9J1@{@6IMPG!!DtZ_cK*RqOvPX9Ft^nCZW9HjvDs zu^#+OKJ8A9GGd~J=w+^lmH*@x{%0`ZFYl_S3WUwUD1IzZyGR%c~(HB zY3b=l7Wc?F1Mig)_*DxD(a_XHm+Ko!a1}K+Hqw-W*_>hR(d@x67nOj5XO$t&O2^W& zf7%v+Go_=pfD%Bhfpi?4oIH+B6eS~INP(EgklgS#zk;TL8PzL9`=%{>OWIY44xc@6X(%#I~lr z2pbIgs@&hb>^Xp8IDnjSfV&_x#2bTV!vKl6iuBxkVCjpBpF{fMH4$dlRoeWkB^zW*sWXfc7Ll{M(o1YmyKK@v_f#bwL+* z-X=OTk^sP&JtrZjU(=`p2cVaTSxmqzHe*(rfq}s-MBvVAQy~p8@0$wcoCBrOzdv(l8eC+Jg>kbQ&5O=e0|_pAM8aLGIhws4~bIw1EHQ zk&}fUlX#PumuZKdjEsyxHx=k+25RS{qgI2Qbp@_R9N11D5|@27H1I@#_f@WvZK|K2 zA5AGFamox=l5i`nxRsBJ9$7a`e*mZG@4sC>&9}zDaAbQtg)pfkHglFA`}NUe>ZgFa^T_i`r1i%Y{*QIaeKSJH;Aq7#tBtbRY<(|rOWg_ zueoM81h-)SJndJ+W1@+S(?s6cIbeDD33`>|34=Qa9C!{soXNSukVRvhm`Rs#<>t9E zeyG(cV3B9`6XYlQ_0B|Zy&BtbKXv)c#UyjKC-e0TUtg7M6U^y;y(1}EFCH4wqz9mD zE%MGI+x)YFgHbKMMcdT&^S-gDOl|OP=uA10)6Mg8<<7k?FlUp6(H?}~De}%2MQcCG z2C+;!8JcExuZ+CBq)e6D8m1O+MNqu^Y%|=Zu4@0Msc)n-Tq)_SoW)Dt+tX+=5tczi zB%cw)?`1i-Y6H|8Ay;3`f^#@d`uE_MSST(dX5cu{&=nPf9tmh}>z_)|JYavtQn)dm z3^BfK#}=*uPTd2=o}7KrYI#X6xNAokwWyQZt#UO}uSQtqwoXhD3D4d+dPJRh!H!L@ zNzNkVMWdNZm~fP8vV*&OWIko(^Gv$PiAxu(nGg=|dVY2FU0RJlFg-$5V|~Us{Kk>x zsv_dzDJx@&SUmJ!7-8|;Mz4E%3=^IsB@zyzQP2qh3T`TAp%U9}HHT}B!aLb*NcI-v zS=8RylWtFc>OpD@FsV}aJ0(O!IJjA5T3TC&#&-J?tY|Tpr#k)3XVQ$5 z{cgP|Eogu9y`;IrR!1(j)USm${idO+m}`g_!-vgph*M$6XmMlpkIBv z=hMmVLHy)I0Tgs?(Kf{u#@yBs-Q{iuR?PAQ@M8LKa>`G4nLUjTXOXSBVzjNzFJ^VQ ze?h*Y=GNjs0-M(xS^Nz=G$e|R%B&W{g}5zj%q%R?MZr9u2kf)%%r>g&f^?U|8%4y# zT01(@Zu-4kY9I3edTJ_DGVnSAy`=vgWFHhGiVh>Fjz2B5&Ju`Ne zaDrRx$u>41s4)v+n2#4KG3L`&#wK^Y+f(Ym&u!7ab*pGmUO<^;I%Q^gdAZdZ2d$mA z?!EPair`kZtK(NNwziohFZh`WdX~Ut)#}usTy1j+!Knah*0}jHzQxX(vakv$oSU_X zz4w?X<1Du0wh-3Z@Aa@q#@O1}r$}f=isLlrmyFzZ-`Wet^tpf@7mBFy@$g?kOSVwJ z1rlFsbxI0zZ=xuBd9^62EgiEmbhL?aWO8y+iqIoflCGKaE*@Imsoxm_yxTR-UE{`G67ndHE48Bc4b(NhBaR`s zpV2?yn8O&QaHKK5YI31JMi**xY2;m}fZ@Ur7W$Kt!|M!+MG{0Q+*+Ofm~b4Piecc> z`L1}D(Z4*SK4W;I)KqgN!xj}27iVJb zLSJdA>eCuy{nB(`aK(H^SPyO!6|fMBSXx^0o}e0W`1!y(b(xn02yW|A1i%q)40?Z)0~1 zJ7@p6d!PaM`&Eb1SVdJzJ`Qi)Qe!M$%C0G@d;i4M6#j+`6=*r=zUWyMM3$IIgds9} z2zNQf%qwGFKX73T9Wyh!GaYl+U!-+FYCBW@2KYmMds_{-uG|=tQ{jfm+(t#RTn~aV zKI%8rwqKJhuIy#qn;<4sCRVe|>Ft#L62pmv9kd&NF%kemovv~;DAQVk!B;R?T zu_!j;^8%mjYCx~`uFE%q<-bMun;=R8?N&}inrWMo$o49x6-SPOftnJh5o)#|iLdnZj}8H0d9qPu6yGm^UG_CxKyg<~I$3>IIlyBO@$@7F<_ z?q6RNndNyRr+iocSX*c3r>DiXO`ndbNr{SzN=gRsQQ)^sxZh|QD$TH^41A!V>2G6W z)3u2lnLLzZ6RE~fe3>i)ZI3HtiqTd}uwirqDwueX@C+|-fmKJ55mPefs4*rTJ3tzRTv{s!Rxatyz9$&nvI$>f-5qsR?;q89S%+j6K{nd$O zFOF5H8kM^bsX3o2m^z!4Q4*!K7qkIaB9i>Tc^`{5zr%}6y3!WhAU^3FVbzOG_)dF? z-$Q|kIL<1Uvg)g$5BV$%E=@2eHp1dQ5Rgd`!hJu}%F}y|1~K-3ue!@A+KaX}j50wV zyd16sY!6gWYC9Fib+e#F+0I3+%lLk8fp_XtB}4R6i>q5-eLCwe;jyfKgX_NjSl^;< z&FD6>-p>8h1$yGvzRl%dU`%4yoIf$8zsbkS^fkN?&dG|tn%oGTB#b%g;(IIUhwMeY zMO+lf;x{kOR@G%E<5_2qbADoT z_W1%Q!ucRRf546Tr61V`b~~M(OQj>T4lHe75$Oim0in0gwuRh*;N;y4N#Isq*O-Gf%uy$$*+I(G{@)F~CeGa)$U1C(rO&EIZV%aRtN_HEM}eg8(aEVx2I zsqj$undES7zbD<0+>`R7*RNk+9s8`B-VF7HfwBV3kaAVDJ4&oWbI;wAVq$jv zrU=W2(!)6bah)6-UIK%KqFnz6ka0A4KfIET_AcaW(uF^V>)xKHJMez>h*%AI*KJ~I z%eMD9&v8Pa9Eyy;wzL2rI#4!5Be9p0|FnN9uv)A9*S)Y<0qDMAsl}Rnx)S#A+^_-Q zdlV-fP9p+pN;V5Ct74B8%^T+W+SjfXdaSrYNy;FZT%e%^KC`yg)z&_l{-P7o4QK@$ zUG+i4BZ&#~!Kvv1<$k?L(3YquiCc?T4c9<7^g(=X)l}C|GuJmcnb-mu9f#}U^Z=T# zK-LAF-%Ak}oSg#bHr#@3D9>u?g)-?*QfOVzOh`&T8#1`B*|%Hp2E^6?i2z8gzD0fY z<4L9YBTY5LI4zUz=Bfa#S08Yb(1;crD0u<;mu8%9-h7eU%WOeb#yST~BV=r}J(|Ec zBvHAjGchs2zo5Ei%QDf>tMsU*Mqb%m#gv{&i_HrXKWDR${D+r4mRdUr6M-0a=*`){ zg8{q-WwO=pN2H$Ql!)4>ZkINqFHEMZDLvV8OW`(QrE_&9eRVNCp>;Ga!E6PpU{mPV zp+w6(RkRpaCBV_5n}<@<(*yB|nouMD66kSarcW=8N6vsNSe{)HryU5zLELf`?wYnX z&#mvm)*bHsVx0?Jcc}aFGtbIk1zp4uY0iAjtHZ$H>Ut~~ZL=+Tk+h<{^ER=Z=#|mp zboStVAXurTmThczaVaT{6|j?stg!=rmFF!oKR{LF&@Xc1Z12Gw_3Kck2HxN(nsNaF z@Z#n!E$-te!?H$OT+V}ARs^zMw8#u!C)|J+INJ*XwR3SfnptLma#8C9NQjn>j>q(m zNAKzV&ZEo|iS8I49R&nb#-3Tm4)rI}mu0pP$yIJBDAGRxt|D|!nF01&?FK|SxJ#Y; zSSIX^S^|0*^nzkVU7pv%#bx1?Q`}Qe#Z6L=C>;;3V%0S@pS~p|tyR5H^H021$kiQ@ z#VN!ngzT~}ak1S?jAaJ~y!P{~H2Xh9p%KgB>?0WK| z;&M&J_f3x{kGYUxGY)_cMCtAf6Ftt~`4}@!9rdUAzU;q=mP~MFe!>t=l&=Y&+-$Il z4<{}qrKo)tB>0o{@qD^C{*KzjvTq(3smEvoBKqWK@y0@#A!v2>=jCR`yR~@FZY0*h zP%cYJSs>~tz5pWci707-pf6~bBI~P(m<>%%-rS76{OiO;1FA$}v|jD`6?Ak^9wh>vZHIG=GY;x0paUn~8$AP_!6buI zkL9E$E2^k%oIK6wjM zXnDBQoRj#HM7@Qx6*&|cu zwOc|T#?Od$du(CbsLjI}>X6OofA<-E&%GzbM_si9_5?nC8{~P?Xwn(Lgp3HW2?()s8mL=kKsGNXTPCN=D=~%x|<_7RHj$#gUhltv|=8O88 zxP=X%<1nHAymr|};3%dxeVWYN7xSBcIhJdj=h9%68yFa{Ler&Lxogi>-^yGuZBbfqm^wXUZFR;iF{6Ds=s*q=Yo1-f zyMU66u|6F4P|J?i#geD2sQuVkADJ7{U3K-%9!!7{7qt6m5f&F0r#?x=6!IFk_s3v7 zP(!8kq&U`$DJ&HEbZ<%AZ`AIo=s?m}i8bD!xPLz|9XFPfhP8mEKLcY5ohR6r znxL^T_RBriDf2Rlp2>-+W!23)W|kMGfUmgT>dRHWX|ji5GxxpD%2}i35o4-t%$WCs z8kQ@a{(+C3EqX*2jIzq^uBcZC_hElv2>qC1aGgiXF`uzb*2N3)%|jBOyWU-0HrElL zo^$^(N&}XTtL$D2+q=2J}gI3)t&<6Z$(bTr02w z*NQXREpUrYY*xZrFNs@-m%|ng0HM}>z;@QObyBXo_qCRO*br!$Qi#FGvIg&h z$d{O9>CbmKXT9MgjGY{Tsur;a;PHMP*03=wB*?>KV+>!RlM;*TY*^JB=I!;N#E8Ch z)ekYoo@_tsxYBAV6rnL^qGz6x5vZZRBKr()K_4JpoPNS1&@}SV>CR(Exj##NI4w}I z?sUPj7;Lj<#Ucbj5G&O5O};#@WD0K#HmP`2m-cP85}tPWyg5y5yvhSUp9t{D)TNh_ z5)lb!KPhMv;y#>gV*%S&Ky5)$B>4_!u6H+~Vb+W%NAui?yOVx1L*Km~*(T^8I0M== zqoD7xcs^IGG%B?SHn8kDZJw$7##srTRy(&AflzRwj?};OiuV)T z{yH~TSb>Ry(K++`{N~!o5LWTooXrZ-vow>rN-2|~?@@mrwdTef&JHuvU6HG=QDsrv zyGQw{!jz(a$LJ03Hs$5xqiz;Vbd>=V*Sii*tOQ@*E->8Ep<$&T{u};4CQ_-R!x`6czssQD~B_Wuem&$ecQ^E!ErA1#QPbg<2%O2|3n!-S+9MYBJs7!q@z~Pe#-{#$L1> z@jWny8P-47x!wzqiyL0v`N3&%vAKrH*+TY#xNBfePEP4-2Ct5aFkO$l!a{NLB6LR` z38{t~`sF#;*kt79Isq9UQ;bq`Ifem&fmGTN7OqKu=#RWwR`Tw9J4ckM#?Bv{*75O8TMA7(X=3BWIJl?~ia1 z1k<1RxJg3l(gzIj9csz&8m4{K#~~Sqv>EZz3*hNuJM=_I@+tmgju!y?H)!PD2%k}d zwBH{slF7e^udamxi#i}-?JfLsL@&-?L0=sRO1oS`X>x6wnF>bW{ezP^p032e2oAqb z_Lcc!JU`XT3xFw%p~>Ep28gxl5S?`4z92~92j$niKBusUA^F|CM<%9w!*ru#V$_tX z&j64xyg}6!O?=XpZ^UjO?uQULb`um_nt` zTbA}iL_3ve10Q?Hko~twQP;;wo_CKj1a&*|fRj`CklGNz6|&XBVi_~suBd1@bc%|vCuyN_m+pdw;-1>nNpYjY8&p7mw-RPK%kZOcoNYaBF z0UgrF=e9zW9P22mG8f6>ku;li+6)Cnh9XA?5oTkJCac_w;mL0b(**k)A0yN>u*lMv z7CLjPdQw}k?saIO*x~a|YNcN4R@SF^*3}7C*eDrD_z@UAETD>q#ypPveOis|sgdRM z8Qgz>Xz%d_<{BtYoDcr^9cB9q+DDZ=%B2>9rA&n)6leD;LziV1ce1AN1w$%z`nM>& z%{r0e`FbH4h20+^6{A~TxIjJLF-CnNZH%ci(J8 z9!P9@NRF8C!CqxHhT8Lb#>_5EdV$}ia5IksY=M) z6h~A#FPhDFP7UuwLKO|EFNebl-**~Dj{}@ zpx=qR2;V_>%1Lj5%k$xKul390lK=2eMN1t128v{H=yr|$8~u6qYtva+sMs8Q7Ejb` z=me=#d0XDJG&ja2LZ?K6s!;VGc%>)t>W>3tf-lkG!-lViX_St!aQn3)Jv>!y)6iu) z=B@Wz}%wM5V5o1z8n4 z5yE-_W^OrvJbrOyCK0N&a6q2sSg_BHe*Y#|d=$cpiTO#8OCRN|^Jm=Q1NoLlC=?#J zs|*4+94uu)vlZf?ZBpA>jLr7|K=m&`%@~S*+FNXF*9l@IlvxHu$1s-d(ooeVykS;Op2oh(2W<^ z=9@ThddsT;$S$g~psedg16M$Ms**#r5t-m5wAcZ=%sorH31zu5WxFu2eV~kmtS%Fa zRrbEh1?$!e-GYqcOA<;9E??r~rQj_us<2r`VpB;FDm9`rf6MZ_adf4#livrikc!RYoG?T=^^9K)qGXoa;2HPaz9BcA+V!XoUC97 z_45iAGP#E};Kz|5l_;K8+1N)GRO+Jt!5yG0jvP#`AGmt(@}%0%+sC(nsvtq&tU7QQ zRySW7P>q|UbFlpgde;mUcfxd(cKq^SsB$4e>zai;n+$^xidxl-|I zT<0obj?L?vje1v(w<3qlFiC&574<0%Bx*XxZDc* zhx|k>$S(S0R~HtVI}8I6B{Y!ya;)oSUI&YD@Y1@O!&c_~`s?at4+^_=-ONe8dELyl zVPQ(qg&}$>Sesqw+H8S6|QdifLm zJsY|LEM(7U=?2adUm`*4M!8>*w1fa289;T>-lwfZ`(XoC9*eqI8F=2mi5J9BvOe6) z;A1mo=XHto=L+|yNSy^YYYPK;D$Cm2<}UpWJYzinL<|HfP%JFq9P0^oMMlsar;HWxi95a=@RU(u7DE%;M20xEirEZJ{eS{zEp za<{^kNC`P-*ysb;mOa`?!A>qnk4m-q`tg%eQr1EdnG|Go$D$O{)6f zc2>atBAP!i^iqc^@$Dmy5|2CzzuANvY0wGf`VtJAOm%FDvR5xg!`nkiq7I^qEsLq! zlwq%O@AXaa|D8)6bFT!b=^jC-Fv39_hynE#U+yYkE{2cpIzsYasC9w=f|u2HDL2PVP(u?t^i^!XQU@dXK70H_)$Qh9Ca7gftqGTDg5W6qi&^`$qns4 z;#h8;MqZ_~TY(fIKzPvr6o#NReAKf6^zE;p3ykV)PB!R#B|sJuoJfRn6KS}ozz33= z^ia%W2Co^RB!y!XlM$iEe{$~sjo<(I(JKW*a}j3Pd{X`=T?R?M`^G;@%jWgjDi=)t zpd4z~GFwoG2VPMvsyuXij^~=&MxAg?-vsJgU)6?=02Pq4i5q;XTi+`OY z`R~R7d(I#ZK%>%oNzNG%Dfpd0iL)qfJ4reR1`b^;nF**<;P3&_|D`*hmkRzEN6!~a zo$)yA3&oqco1#%^!*d4FVpoQTrozK|Cf+{DEfi}n~GH(+?9IM z#rp!dT^V3^E|`_s$}`O$!yJ4aXn>iGvc|1AerbiO9JDru9Fs5lzPbe>e2Bd}p#b`m ztuiMPQvkX^1=d2SU)HT;dYoy8 zk4C7LdjhI!Ytkl(su6F3C%}a-c6$K%qXkL5@4WmgO(kig@yj5KG`iO^u%sHOnejtFBTb6e?a}uSKNo z!NDP(9{|0(_?fSHd*qC&jv*J(6QXeep?LxU_894l0Iz&^qC-6cnSjy*;!uzK^LQE% z*ZdJYlJ)=^r0-oaq_RexJmMU2ei|Nw0^M2y8{?n7_3fqVj78UugAyE+JOEeADKkvsEw}v~3T9c)6!ve2I#c}6v z=@9sVxDc4qYmx&JWZuCQ!~xny4BCK6*WEV}-xC-7I-rQTvCehOPdW!z5c8HlSsE%k zlFRyqF^SnRQGJPtH|eQ>G*!<|3hpP_!gWYdHYwdDt*R0|148}}Gi&6Yg*Z#t>ns2X zLL^Ip-nh6BESeefnc8~(AlcOR&37j0o7WH)axbM2IvG%$g+{ZrRr*`T_m_H%svNv) z=Fxf2$0=njpg($zsWNDSV4^|44#1E{aLOFrPaE5!j3l9z>oxl(sV(Xg{+p8)PENUn zWxv!B8cbE|D(TK=|4I$61N#d~%jnN}^C-MA0KDrj2Z7GFpgm2_6e7{zRF+>3=U?V* zo(Wq+5OoL8$Sm%vtdzK!erPy`wDO=FQRPvcgp>;G<07mrcpE%8A{g(lZ~UcEWI;p- zQf-3xfzl%gjYe)OkEs!w*^nF+tl*zM26yw#CJ}|*q^C|YYC+d1q^NOa3{bFtOr)*$ zXTfO*=ny;DTGzqGzwv)W$MmmG@i%XE!O~*8&$}RcUAaGHiC~+_pMR!^ppM|HpoTQN zMf#7wrBT4GuX!TWjXs2TPbfb+120}(TPBUzFF#h6=KhQT;AIuJZb<+{0U`IIi2HwL zihr3wf{uv`)8Yozc}X>Q?-=D%JP4ut{9y zwgG#XIz*~7s1lPN2eX8XAd4HS5rR(S>p|}yAh1-`%u3CIuV>DTm-c_Bz;H~AV%0~rXc<(goLOb^kKlGq`lw11tX!8?od{_b7j zMKHg6SG*oShiNZF{(ph98O6mq9o>j6$v(N#q56W7oJ30LuhHt?)>~8q0Z*@mOp>jG zk^ZvCKZjNr*jjk`_kNqCVk4uYp}B8RK80lF(9e27Gw(Nk7koCnaVIWBmS~3-#1?>A zT4^(q`vwh#B#iE;<VvxVVT%x@aku;I;0=HL2A;YeIaS z@)Ft}v=K=~z}eCPOJ^3<-P^~YDw$Ak8f^q0^7fwO&lZ4JMC0W>%$j=( zcO=`IG4P9JcNX7Tnpf6BZ`RN;oRmI);E>Dn1A*LA&Q-!rjUw5>uiMr>Z5>Xs&TVyg ztLZvn>3ZBoufrPInsC~ zrSKgk(W@g$7ttX?d)s+*l87`9QIU|uLqvt3gZ-JMxg&E+fq5sDqIJ@n$L3h`?%BB$ zQ&Od~umdR2Wp?R9#D+uAn6kk=e$!Oe1VNHpF9Sa``EnP zhB{Y#{&>&@QJ46C1pM$}&3C)xQL-Uc(nc2EnIrkV=+n#>H;U@%r4W7=n1hGw7%U}_FtFb{|O^yvlJscXZdqSU&<&aSQic_gSB zCh11gFmtgBLotkm>FE==1&i>4%&WLgSe9Zp*-?)Zot6DvU0tKmk_!<|W9{s^3UZyp zkwp>rrQFpCi$w=a5<1@bF|)GvXEwhgCMffT(0VOiaPs8ICq>xKF?fw|Ok`v=B`w&s z3vS>Yb?>Awkd^ShcB$4>c-OE8w44st08;}S0o=XaR|@>TE8ZVt!B{7`QNXpGx9l(& zjAQfYtrK3bCB??sSww6q^7h z-=}Rx?fCqk?prPqXe=nOryKt!o{*40{oaOkwzd!Fl<~(T>yEM=t(~1Q+8N8^HHwOg z&Q zsw&v9g6o9-q%D{?lY$Z|Gc@ZM9}7f)|#f(0p%wLltpzB6Nbq^&AQQsK?*Z-p(V$5&})K z9V3nrHGPp4eD>u$um8X5t}`mCY)fCE6$KSg#BPxSvq&-^C=^1YBt=AWkYGYZLZbpz z6iEe9XhcO&X$c~TfMg3Aut+{cMMTL#3Q&RsB`7(~cgya#-h1aH|V7qfEVe8akg=sNEV`>2gnf7h~Xz33u3VwqS%~ z)REwSR&pd8l@ND!8;zN#b%hhkaCYsQqu&9#5<&DFR*ZN?Illo3(?@~}4dAL5CX*UW zH3+%Xhf5PBM*42KJKLq+#LO+uUPh|a?IU3M2^8nKVeO#UfHwNs^8VX59cH4spOy`x zK5%h-7Dta~gSG;u$g{``m1uqUxp2)pYh`COl$a(zQ>=w|G3SzR;pMOyHoiTlSYA-l zW1d7Byd6w~(a+6!rPa&}oCoD0xe{e9I*a`G({Fsm8EW%K*Sz!-ZQZa*3j3Ipvv#S{ z&fS&Yp7S$Xb2HI^lYhh6$4Of8YT z1uka210oGk$_xbZZz(PDo9c#QNW&R>B)LAL_%3tt^t2pnBXg_4V4!_9EjRe|$w<|njOQc_((v{X?3a^77 z57z0F-lb)7l;DWTzhV0Es!R=;*51!n1S7j;9i@}=!nH3*lFI+Z&ir=}3iA7X!$^(< zQMf5LHy0wF9xuNA&jEnG1!w+D*Bs&{=`Th$9^3j;a4AWG5|1Uzs||ce{AKk#Vh{P6 z_t(~>orBnZ;3bR$l(TGhk}1$|hVW!NLHnXJ3+G*on)w?4W#zaNS+94hVwT#KK^E^F z_#ie*o7p04j->V3@e1#mFXPLtKN@=vn|N=ZU5oqmeD(<@_#@W^Ty`9oPnBd^6s8su zI)*Ye!{4<6!63%)dH8r2vkBBV|0kSf!2*l!Imr{Mt5*28pjjnAj&4 z34)*d`&DfHQ2(K!7sv{gys6e! z;;cv0BfJ{OPWH^GvYSFpc=MB%{;sHOZ_Sxb`%Achv8^H_ZEDyffjT#D(#%#YynJ)s zGW31*Jtl}mtUeA7{K#@&cH8U23Dgu8hxEz*oAV(8wDqf`@$PqT2yxK z1d_>DDzwDz5~Gf{{jG7)crP57%nA{;dNw;!)B`05A?HXI8Mu~ruQHzNfxk&TpsBjn zc#+%ExYE8mfSR;y(J+M^+Xaoy6_ag6^a65kCOfbYg|GF;xQv$59ful5sTu_>QPMQ0 zycEWA-PcwXyHlnv$&;!)I$tg;I%O!!gw1-Hy88*k>^dygrWHbj{{$#(vc>;aS-qPDYZJlre}8g1r_|GX%x zpupQrGuhwdcQ?;$V_XijrX3g*E?rM;VoXmAqbP*Dy!`6d*PM0@qozOd zEhTvW?#fz9&m;*qtd-mp9IeM99V++ zinJh1V`qin?75^I8Q$F|NZv+zy8>okUCA}5?d$4e0-&ax>GYo%yphk5>4?^Y_X%h= zJwNN=#)jGe)B@qSbmSuQvQQq2jO2E5S2hA9kH8-H@u4_-Ne8GNc6~i(kpWB9fjvm^ zrD1~|AV8EN0(LtR{In89%MKwm5+AQmsuapm*noj+W?3$DdJeDB77lI`WCWb)_=#*o zKC-InGjW}s(*}(z+K+XTJ2!AFKlQqCQU{N?j}70ZV^oU2z|;k<&GCb@ zCwY0jrR%qayf}uk3nM~AVO2VvYIu3pm6Xu&1zFK*&pxC_{ERl)=w6V12)ZJE-j2dk z5X8pMPG}llPFH=qnzP(t9ig{zkNf9c2bS`#}2h# z73zSdweauD1KECD=ZlxVyPi?7{P0zOJ!y*J+|OF)8P}g(?78o@XLKpyvuyA`kuZ>Y z>nG4T8$75U%;Uo~^uYAdqDc;5f)C^w{*)t~Da{M+44&RJKvX;z*9O+RCRC8hO{6Kp zFkBO=TgCP5CpzRWLf`)aI+&ADPt_c>f&{*E543?_P8x-HVv`+}0x}>1h5JY!I@bOV zb|II~n0YdXN`)DQm@2WP&R$+Q#7q#k<<}8u`kax$L`ahD)3vB-Ic?N=T>6|NUpwnm>P%GGL>Q zknT5H-J|&kFU4v=MtqimD*TP@l(w4eF3zHoF_1i=PsVHt12BbD?a1bt3c}oS7<+%b z>VNQ1Q?dusbbxRq+3&OveFf6AEebk5bN&TVK(6R2Zn0 zo>c0wWe)Lh2U3{|mtY!8olYQumC9s81+VNF86REcOX8uj@Dr=7J%?+^9PX*U6Do|c zG_sBnS?Gb{T@S)s8Cl$!-#fW;{1B8A{{b#Sb-QO5S@-9wJXXH2Hho;jKz7`ZGBYGp z_%ave2w8`YY#mCnp5A0EWu#O#Vczk!9y=;PKlmPE!s#YVw?hEa(!OCUWS;p7kcy*2 zW(L*rJ7jLBb$BH^Iyw&B*2jj6ddo-9`z7rOwTRhT1Dl<&h&_Dt=m#Jwv<527>OC_K z3z(W1S-$7{l<8A~4B@@kyFxiR{uuu#@Z)^uYB7y}j&$g_wT_F>JB~rRU1uLSSbtGZ z5u>asjZwF;%3SopY%WlmPY*bqg-_6eY9{nHIYS7s^z`&Lr!`#A$KUN3u1(==3;~ef zRUoGJ+#3DHMT$>`RVs3QR;s~rt0*fYa+&Fy#5*xPg9Jw8iI5`8@h6GsJ=0zYpv{t~ zsF+DL_Q&3d4{tGo z6BAJwD6r(nhj(&YruBr>10FX@i1hzvFLLL&S)Oj*%sE8|laoCCDe=m*+t1wU-c28T ze767d=f;Xy1*mHN5@hWBXmwEN%Q!WntSm!nu4;q9`cP%jhO!KMxfL!O9*iX@i3jzc zx*z4J5jM}QV$*}Ds-i;n^AXoi6{l2t+G^O!jF*wQ!;iI%!y{G9tj|v0|EPQ5VqVmX z323!lLP{888;M5^wAD_Y3I=veHtR-b3cpr^9`6--ib`LzTe@j%bzg@_wElYLA(_18 zj8VIg-wibm3N=$KluxA^kKWfRYM7X8-h##kKP4MmZ!h2b*~X5k`6+jKSwDgvnV<3Kc6tuNQn4 zQE|8#)+740$*s5Q*~=4{@$!-Zt3_Qr#_dE@0@$HJVvq{45QyHcht!IynbhhBAmN4{ zr?=ZaOiXOd^vB*9h7rvNmbA|y24+4&DRDQ6PTMzCxD0X0%a1pjC>qlnubLPc;g;gL z*ql|N50m12oN#|{odLCa*QzZIwbyz#e;I!wRVHeYlxq=ZmEmBge5vVlW{o>fx?D4T zH$li}K<~_xkI=267s|h|ixR1yr!3KsQ&7;a%sr@eYT9w6di|k<#E}KRB3DzSDm#eJ zHR;p#_==22mS>gsTFh?L|N6iJ!YYFJy$vm}WVMThQ|jaHN^rRC<@epwHw$Ck>{^#x z%V>&doFh@u6C@k3`cqqvkx^L3k#wzlO;_^{#8=^K6*9bh)1=B=3(w>nN=+L*__)NL zp(IjdB4NS-*REF=grwFCi8L?|ne<(?`n=L0>TWNbMi|(7~RcoV7z8 z7a`7zA@tBoSm1e3h{Mi{%Q6luQ7gmlRxxj=6wW9e81qrb4p7BcOB*8el*l zi&!bW_mWzJs$G;R!Tu>+gpVaor1?KMbxG!Y_8YnVRdO(UyO(njg)-1qw1h$0JGr3! z7Gmc=nS)eL>MMQ_LsL#qX0fwxTJpm7czNQ&`ll!qb8IV}M!V*~GLiTlaUjk^zU*3L ziH(dyGLl^lg5`DkH7x78e~2+eQVRJcebUD*5?X2}*}20NDzhLT^IMjPz=KDyf49jT zqL6~N{SqzYw8*lgcVcFG5}`d=8JSBm2pq}3;h-oV^1E>OeuD$zYw=VZtG+GO%>N_o zpcQpVMm!5O*AO>7`*9OtlZnJ+YvgF4Ks)h7xTv-b=>SOoBs(OFd=a4N8zV#pP7B(L zmJ}3`K=@Kj>A!t6J|2hKw|Lz@_BQ_dFRdDUWGwm5?XzW?H;~jMLe=owbKa+a`>coG z^;v)6-GBB-chwub?`|R7OdM~FHm89Md~Y8?uy10P)A%8{5aCOtWGJ}Uq;;so5WmF2 zV1h_}F9{l`aB+9~FyuVy$zo#cBx*pmi9y(JzG3=re9xaF{QvmY!p&Gp9l;J3o^S_4YiB2hd+p_w&P=P!PJL<0c8cHBJ$$#l zoQn{69e)p~FeIT@ar%v1d0Oa`zm1QN7tGEuyu4DgYY|?`Enph3A=bQ4GD5?C zbYc!6073F8?8yzYbG|)DlLETEjNCR=!~IpX=c1+89Oe*vfQ-a9H8FRPx=EZwLO4hP zy}U_OePu2-NVnPiz2mtuB{A{sNkgNWH-1zU8~|$Nh^YbQqbji~V;eN#Q9Ns`#>nV+ zCLG&GlXB-^s}?gw9t}}jw%AP91l1WNGM&qt6&hV{osjI`pOBcys$Bf>>Y*rRa#}N- zN*h@`U`HHmV^l?x^%ttOl(5n~Dd9k&C3xFs`qK<4td)lyM=p@1L_kcQjZ=i{)U&*s z$8(%I64FL$Px;A3OO;V8PGp)zcELh*zt1WjqM|hEC;cYuJ6OG6f$A2j zFW;IQ;Rv;ISo(O}gU#M(+1KozbIA6<^+QexavF*76t<9xbSn^!gWlePPC7g2p?-9x zsZ=g^V@~t5v4hs*Pa3CMs8|O}V?D7|jCjB@1PGX!*~LhStF}6z(zlqcyd^Z95n*VRWYYC4qe>%l zII!0wx5MEs6b}zvmcoI=#JvvjR=3Au?>=<T+j& zvC*3=<38G1cvZ954$MgVSC`zMHg1uq@5#^2Z_Nx9J33u!UcyZbtl*3m0{t+_XZ;!s zN|V@YxB_ENGET*5F3PHq)5+1dGpH%Qt=~IZeNa;Rc5R==!EUQ=L0^_h-ExntAxDQv zFD{nF2{YrcxHSRL#3rR4%(pDWdLCo2$ zz2|2`g2y!37y=x#MEe(N%WA^s4SEXm?Y}aWp zlg`8pE$c{(KpN;a$km?hl;GZ6N-i(g>2O;rg`c#k{&d>9;qE16Z2-;?VYZoBo)LKHhAGv6 z3D5qjSXx=zR@alkVs^=MiGf{c7fmJn!6~wxV{v4ti@o#@O>&BYG*?1Z3!j4HyG-++ f=J8+5$%L4t*_e*@{)d2}1fi|2yC+4>=E8pf;Zl4@ literal 0 HcmV?d00001 diff --git a/docs/media/packages/packages-dependencies-udp-tracker.png b/docs/media/packages/packages-dependencies-udp-tracker.png new file mode 100644 index 0000000000000000000000000000000000000000..57987a31eabf33ac27ed1d51b1b659c56af28018 GIT binary patch literal 33537 zcmce;c|28X`#-)^QfbhDPGl%eGBnCenp23(vxqX53?X(QAyGnQEJ+zM+vZIfQkjQr z+mIoSDWSdD#_x46bcW|V&-3~IzMt36ALq39TI*i-aNY0geZ8;i-rhJ84!fHF&P~_`&XM;-{9%^o;5w2z~HGb|X!q7uG#HcSzdw-6gBTEamGRWgz zRj`U_vw4U!OSmV;hj&X7JlUsco z-duE{Hg?q1)u})qFv85w#QKf+zd|-zGr&KR%Zjzp=fV9WZ0P^a=kgfge}!ECZs6wpa$2p36y1@MSe6G@!`EPi5u#2`PG_Z?bxQ-StN{V}(0)!d74uZ*n7< z{Pg-7)!$uhoSix30&EyC$xpSTSTZA7EimKF!?q#&s>;fpf#nt=@>1*=;T4RkkgYT4 zbZX+|tK^)4Ym7oL?83BXT-JUkqdROwxA-Y58hej8N^-8{kjQ`CbLK%=SB+VH_pW78 zeCFjUv96@yL@@>|BEvm{crv=iQBtVI@#amj&8t7IljibgXR>}EN(`5*?XMRr4-E~C zkB?`0N2k*($2XJemb({y`)0DjP?Ym7OR8e@lO)d;2LpTgw$Ab1w9t}PU48vw+EIq7 z6L!9nB_8sxS7FJeYdaHTR@U2y=4EAl=v~YCpvWM3%)(rftL&@5)GIAk3Tpv}wUt${ ztJ~V;?k;K>SJMV5j{7$2=<52N+}_?`^T8-D$O%@p@W|k3XYm03Ct!8Ya2eCL(b3V_ z*#aLQl?1h*m6gzil9CgR{XU)vcYJ+)W7M0HBWm}#jP58+)S4Y~ z3QF6&xA<^oqlHq_by3dJx99U-3CK9|-qPR{a~prxGD=KpcFz1(QBe`sXF}=T6)5Pg zy4keCR`rQu<_1keVNbE7-y|tKq>7z^0)sDpr1#EAtWq3KtFq~cmwz9fz^m~}jFluK zEG(R6T@%C3SNe&6ti6=%!A?qHh`PGU9mA@ci%PtG**3nA3*38`N3+cI&=yM#>c`B_ zlZVMhik`PhIQep440U!IO&|U6ZIh>NnRjAN!U66IfqcG$6x|hu`!);QJup(Io}_0v zadA^mpxX*h%kRU(Aqv;`cQo5P^6l!5y|7t?a(!e2iR%A4z&^2x&B3vv3zjadfmzKy zHz()%t!)@)Nv+R0@`7<~(f4+-YkgkRYicKWn9rsTe3Dy{ za^H6DD5vBnmDIh>qv7E{49kDGOl180ev|Bxc2nr2AlayQWJ$h7`n!!APJNJJy%rH# zwTp5>#A~qJ#r_f5R{4qX^Rh!+{#}QH?MFJ9uDMUMy7?+LKRfj}(sH{0+1z9ahlA1i zyo{e~Hb&&L!)88AW=r$MGvcDbq@cmJ1#4V+)`}>_`3f`@No@fu|{WxxUp!}WBV7$+1 z@-6m*;T9eiTjH0O_XJ7>OmF{aY8xt$m_hm0ukIJDOMNQ!`7Y@ovK`K>vqB21+gCp? z?HcLa;qf$PuLf(kZM@v04WEqSmD2`0b2q;--Vr6&7);Xop{d!FvXh(alD>OD&r&qf z;!AID??BRk&f#*{MpOr#APtki7QUb5B6`#?9UYzGE5-DBx>cy>6YGYb!Iix|Jtun* za;-`EnYuB#I`z&J*}`iOojKVcU&Reg^+*9D!O0(y~~Nk={wYjeD<4YJJ4 zN}8zj9vId;v1vuX@mb|-7}lhctFPr6B;dopj73wKCB(pg|0%=JCnd&bijBnCnDkdy zrfOHpXlxnpZYNpy)lPJT`kpiy6F7HZPBzt#eQC(_c0b}Kx#06XY4vM&(G#+^+{z50 zDmK@idQ7@BThh>}Bwtr3kb2*p{iJq|i}ue>yNrb0>(W2lh_)DeV@X@})Buxsx&4QxZRabMy2%#~HDM70^Z;midm%J%NGai>iE%*!6< zm+O;`+C^KMh=dNgd_LBx`xT3tv9E5^qUw1)ozCH4XJ`NN1Hxs1V)$Qnc6Pbd^ynvHcI$JZ+#;Lix(eXa#f?AOnIxs8@}0-Ox8^#i-skkc?#VG$ zmhZTU-{qOi?prjnY2+=MrmK6E^pwT5&#GO$Oy&N8=Y=U34C6ez#wse#w;ZV4CzC(@ z#M7CzR8mZ={8dW->)Z*C1gE!bw8^tU z?BAcX^SFqrYKUvi-Wi`h_|;(C3S97@jOEHOC*|9@F9%8pEw|m@q(#l#=lPUQzH4emu>XJ@6U0+=Wd znX{_V#uizTAVg>zBJ9jWgk9z?zgoLxPzIGmrGO^ON~pbU67V(*$%6 zO1pJUmLz6A;_fGV3-Sw;aqZR3t++qh`nHLE@8>acYl*>Ly%Npcxg~d+)S?+MrCmdU z*lA%Q`X<{KPbxRDo$}`>wtE$9#Uj0I|FzJj9DrsG6_byT-V_-GG$gD_ZM8g*GAQJm zkucq!iW|BJaB}v_qMec*hM1Ope+zLRG zBz=-A>?F7lQhM$toiS0*tuAe&ufrY-mf_Iz0y6noxemi^Teog4;TU^gN_L&4_tLXj zU!{oBronozqXSo&SK7L|>HgQ^Wc znqNWpj1QcVVqOdVke6ayr4q);O_Pf-G=|l``#0j=hUc>Kl6&LzfCQAgCzic0l zjbKcFCbJStzXOeC)(wwaTU&F4+tjQg5{YrzN@8vsFf-5S*j;YP7-8rf&~@YM_VzoS zqE?M?B(x95TQOi3auAjX?+>l$RP||Hr(9*GqWE#udDgwR$&B8qf*4_qHxY|^OlDFY z{@sBzr5jsEpXl;GLY^R!W`@?eO}wvQnuDo46j|l=!<~E3rQ5Qs-CjS`e>zpHe$U;P z%Fu%4t;bGFu;liu@m9Bf)|}e4YgbuWS*XQyugn~MaL{6`h#UU6xD*r>9i*>|%-%hS z8mMv^69hvS4i09*`uBw^s3kf1C`aN$Z%;Ia$zyI5v20Wg1J!-hRZ3z~&jXNd+7R?p< z2!C%(8Q?`%;A&8)LTodv*~R0|&{~)$YU}|#t0V@y2#br(XT&Zlz$HRI>QPV~YD*j* zko}fUY?4>x<&23~iX}4)32yni2YU7T360J9OeQ1dEa&XI+~)g=YWs>S)>6{auOgOU zg;t#0+_4dIDrg`dOZwHC0|q80vg-OIB_;X7mSTl#?Zm@-rq}NmMB~oB8|FxL%D?-0 z85Z>t`t0S!)GP8Z13jvVcPQ^=KI>9Bd36lq7XO%`%wO92Z;0oHGlA*e5hUAxKm(g3sZ!T3(cz1Ry2b7DclZI#vcpwC~F6s!zL@Dzxm-} zYlR)+aptG+W_qo-yaO{>rHsvz<(<@_qbEOmzNj7fLW|UY}e3HLJtb+rxL#m z-0Wji@|<6UGL0xYOe-||Ut<158f*}yd6^)+88)|U$sf{z^U)yGH4TNl5APX0_{$MJM@S1PPy?Tr&YN>%XdXiQ{l$$Kt(NDR$SeXeQv8&zh)sX zzVV+Z#BLCZN;H~0`1IW5jB9`PU=wMELW+raC+*$7a>iBv1LI*dH+BJ*43i=0vlK{Bvmfk}=!iNnFMRn6M6Q=cy%91@IZ7_z?=FS`c6 z$n?~b5yzC5uu}gPQ~nG;Q6mw`ii8lf;qVP~sO@g7?`ZSF<9d%2a z@s0TQ%fKERhC?46B^9I?3E}wIpaHYTBaSY%wt=8-NGxthr8Y-N*Uak~I)T~m(UQ@+mnlMU1FSMnA!gh+>3#EsncV(DB?KE#&zwCUN1y3}V4(@)q@-j9|APqR{^eDa`(<5O=RcdX5xwEXaKT{T zhjp7tH|*>VbFLK@5_+4Il*Un2Rh1@)KD>RK+M|B%qmkUEac5W8f~>NvTfxe#T$Kj< zHm~)fS6<94qEe}o7JE7PNCTo;A~7Yg+Z1wt?CTOCo6)__9MF50vId7aW^0B#jnA5? zJUe%)_=f80>6KcJmc(0*vu=z;94ma(B&DQLqktobr0$9&uIa65&fm&D3om1P|3-Bv z4b7R%>CA!6W8Y%aCoXoERefh=i|BAUyxL5feb*8!Xn^YE@9z&jD>-S^(JF|xjV6?v zF7Ml%XYkvb!`{lAf~2E)+pbrb?Wg1v6jX|Q(z=ZJQqG~B+F|9jzD$?w)v}xo9Sl1R zOPyJ_7>7#AMu{Pu_}1C1jT#Lx;`-T{%g**f%Pw1AQ?L0zQU{*b-~)|%9WeociP0(8{|(n_j>N==-P3c&N0TElC*{Q zZF0e7e_@TM@%vIX>n3vdj*g}#lWUzVC7!CvlghmEhEyI}W;bvWdW^M?qD_38##YF$ zS+d96f;CPXY_n@blw!~qV?RRcu)%GH^^=OZy_20?kR^$KhHPwfEx_Umtwer0wW2ds z)6?1V0OiEB0N8^jxs`3FQ}*?qU+(W9AXyvZ&uErl?KV=vPHEfqHoSw6729Gi9&V-j zWO8yKsdtP%UND{??#~I%K8Z9tDbY@^R2z397hwJS5dh9A-1Yf%$*-&nh_V-p>ttPI z!7Pu}4|;E;DMy~`%p@q0)6N %L$BHdvu)tvxrr4xsjo7)fv~ykt>=m{K3+nOPuqe=~$VWzBJpW z;$gSBDJ$Ys>#(cCuIQs}J-yamUw`lGyF!@)4EV~w6f;XETDQAS_Qi%;&rM8B{P`J-|6&d!wKafJTf)pDVc zq^(O3-3}U4-w*r+0aJnDfZq6WBPs`c=0TsI`y45%XAJ=7ZA)1cCUc#(pGN+?>l0T_}`4ou#qe)!NMZs1)w6g=>b#P1Bxw_?`$XKW@j|kK8Th z_#6TTvIUGsBD6xe%JbDdTw3pEtM7@%69!F}!2Rs6T)}%ISJ%6=$Y;H&fMq_KjT>K+ z@mHVq{t{JjIW0d+ZR8II{>7RGi*Q9ca_%dWjM)bl`_4r|u4}2r5_rCdjgJ?-zUlU@ zH-Dnt14o=)TwEL-MWv(?ourX^5E=gY?KHj0ON(r{VURs$>^BN+FL(Yv}X^=o2N z=V7Wa*#qGUv6UZ-YZp;+Fx&-eoONd2t?0b>Lnaor9Lz-^2KtMLX+6LJ zAqEZe|DxG>p0&^>pb4T;^1yw+A^wYEj`?9jucEGx(RKd(hL3No7?MAhmzR%>T*H?F zp;5~X@dkM44c+A}bZe0#5LCs(AskV9gocRtv>IBXMMhA_KR?8Ia;$LzA0M=Z_;EbU z4&E0Ovly_VCBXXv15bFdFmS{|2&(e~?-vPI=pDNW{i(iwe(GS>ViVrPtJH}J(a0)A z1qEfrWrSe!<^d0l)(zJgl6TM2NmoeOgqdw^XA5t05RPqcT6hz_sZq=j4|WH0{x5w3 zx;1-X-TV?QlJ|t?&@HsN_)R-tpnW$W&Po^qQyW~0yIC~)r&Yc1rR`t8T(sJt_}u(& z{L2mG*K({xD}e&Zj}5JD*IAb)@9^^TSTJPSZ@?4hrw)HLV8#6W{CdkmgM&UNt{CoE zoOIY1gJ%sdXkB5S=Kj-n7mKy`70tmD|J(*1q5O|7Wh=x6qRoL@^!(17Ub$?y;`}}x zqqmfJ%buCvrZ6=iKAc7D84LLQoJa(oh-n?2ztgL&4Uv2^Mg(S9Ij55|bJIWk8<_`8 z5Qd!wM!Wcy`3()5zF#sXLbh&U5&??QjGu?S^4I-cZ8JFynVnbj-)~WfkO;+7mv80fFgW?V`YT#N7G>@NJTJ`IcVp5J<4rRQT+(6FB&F+%nXbXNA8 zKpq%rxWcLT?7Jb{TdARsnMIx5_0eps3hxBZSEO92!eC$CteyTWpGE&uxl2PUYiFa*cP>w+UgNW0sv+gq?4lk>9%FL;7yyG>}G2s?_H0vT$*6iOe0z zwQkvt*&&92t3U+zfczWY+!tqt`}+FS=D4`IWu&EPPWf69tFc+d=`;@y=H17@zP?Hg z2@)o$PEus>!Z2$5bE4#sQ2-zk_&H<^SR0s6)5jX2PRI<^92oYg~u2L4i@wFWc`ty>B= zNre?|x*@(5%^r;{xeOWQY0D5VP)wt^6bwLR!dVw42Ud6*G+3N0#Qp59`soav=UIs+ z0g&kfuUR` ziW&a?#l@<2UBWH665ufp9ZFJe4h%Gi5oI_0YS9A}YY??ns+N5Hx@g#V$Bm^Urv{eb z_QB)90?0kz3YZHIxHAIa5&_>N&|?O^1Cdo3fBszC_t)#F&xl5^09wkke!&W4SHOt9 z|MF6NwqPSK4h3;>jDVD=4U3G(0}bfjg6ZI0Ilf?QmOsIxCwsxoj~4cSdJRaFg1u= z(uoFJhR-^T`SD|<|h2-CdgH;!B@ZNyNsN8qN3b30nD2{go9B!l5(@1!G1bu0Nx>Z;h7kf^^B7U8#zoDiSGv z+*8?i4@@s(;J7*~*0j#`hx^?4+^qg1+c>p|N;{d;QtQ{&-%ck!b2DqrcAJcKn}mdj zwtbs@;BB2$RN2;3(qRX7TXpY;34OFfe<=H_HJY?SvGN#kEL+{9B&FNb# zirLDFOqkXuv8Ez%2?=Iico&62NlQB@T>e_qm!iE`)FqM5wHCF~(yTp__wfkB%~q=; z5+`3qFoQ&FyU!@2n?|Emzm43Mo%uGOH7yYu@M=8P#b~ji!iSj3rVnBFafnM(#322J zRf}9R+|+Aeyi&d2+eV|gx$W7xo&5n`0tEyrlY8@_y=~7bE?35DyeBeY7mLI;Kb;8w zy3VKmP0&!{=Bpnv*Xoek*{vI19UTddbc<*h9uK!*D71oz_O39WVuf*deFEH=73|Wl z8SsQX+3u=R-nZV!^P;kl1`%V5F6?7FY;NxSDRK=SAv3;pIiU-h8ZsVdp2 zdqn;toNJ+EtiOF21kJTxD5^8NOcY^~Ut!Bh#p$ zV(mQ?)x(sI_m3YRy&XUM92EJh_M)+dg#f?D^)4&lb=>Rmeqv8fUS3{7L0f|v)GsKU z%Wd{AgNtuWi@1=}Qg)my-COg-(!{U=a?_De#3n$4 zM(%Ti?CH3{-qn?hC~(g~Oh%^auGS`|Dz}^)pG9M!?ueEp`k_|WG+Hp+vV&%ESN_Po zBbRqYn_p%uWN>QkEp!kMKbv(mFx>20m2zX)oH0dMYL635XE()SB;oS4&xs1}8_!uZ zJnfhqIYQo(UJit@f6rh_jPTD@WjV>T!qnM@_PB89%;BU5>iJP?5-3dH?VEO7-_?5U#NBe^Q7EqfncCHL7ShI$ zh;C5VhnfqayJaA3J&@T2(&8kZLk=wGYYdT|n;x@x4R;{{Y$SHS$WWJh(%ZL?OXoC{ zdg<;unLu6-N9SNecKZG-WU(UG7l)t_5 z00cuP>BuiO26?%;Q{O>13Lkcf?ceH|Gkbp|gftChJh$ei+LN448FqN;P5ihcq+jDW z)^naFcUVH}G6z!QT*eyoXU#HqOLV_=+L7DOXxFjBc2+OC#_YgTW8u$Z#H1W~!vmgO zZnGDJEBDRP%Dt=_-uz^=u#|Z@)p5mn9sVbr)#ZfBQ)Yj7$qc$QnJ!kq%mxdSrboNG zU)prl)zv}W%WPd>U?6;VU`lzmPBj$W2lXHiQVp5iUEtsHu@_jFG+XRA1vw<=_OY8z zeGAcND|ps!Z8-Jr!{PaIo8d=;PF}7v1NyNMOA481Mng#Fq&(w3_m1&b+B_3(5NoPC zq8Ru>wO59|(f?g}Rh;c}QCFy@DOc=RoxJb#bbsxBcGFx(<@BFA1Kr)mGw*fszQmQO zx{o-98Iulubdk1h*EE+Rm7t+a_p6iQ3{NS3(yJ)TCqV`oh9e z0y56`w4IXJTTB{8=E%Rjbt2@d_Axi9%?1=a$snp}-M`LkSt{3eZQteuMYl^k_(|t2 zEG~By`!kzsNrmn%vu|R))B8?$m3eLt=~*Nnmi}b#Mh>HEMb*=9K6HwGp0s_gVVsqf zwL9$YHyPc$sWi7H|6T>illaD-@io0W()G?M$#aB#*zVdj!yQ}J-p(M#rD>>aS-l)m zE1ngrk|sbvkGM79|Bm-NSv_?G%bG3J{xPVlme2O!;GUNK(yyt`#z|Ydo3%qVxBPA# zLKSK&kj8w-Y8^U>Hb<}9UvgdR`(UvbAbn>8|?IhhfMW3SD0JxP#5KdjDRV5GVWNr5j~ov zOqi>a&HARubuIDJ_K8LsBU++dZC|)cuW*kvzDi|%lR6eF_cFt^b$n2O-)-fM4)^n= z-nRFkz?67vlV^=ci!jEyf~dC!(^S61&TEG81FM+a zi)v2~^z>Ygk3QGpm_l2JF`+=t=y;Hf9eMguzT^2$8&S@+UY>!Gj)yDmb|r|fan5{4 z3VO%-*oG&a7?(Npkbh3u;k|Onr?q!Y);=G3Ag|9XtX3|vjnZt}*w$R%xlV7^)OF_L z3b^t`XLnyo*`4Jk-B|VJ)Wqdmg!6i5)MEFK+)9NE+Sr#F6Jei^=PW9C zRIMjvEYj+_(54V&J-02iJEWtfN7)o#v(3HE&XOH{@uWCWGl;dh|L4#8Aa7wd{i@4l zJ~d_+MXfeHxaiAaOf=;#-a+}|b=3FLTbW`@M*|&~+_Y~FVv{7VOvh>eZ!Oc?TaE7y zO0e%L&=(nQh*vkXgAY}bu=$V>AA24n~YwbdeiK)qLQP?nEhgs zl9EnEUzio|kJ+flFLCs~SIr}N0t3_8nb4jKt#SI2iOti81f~wnHCQUX?_`o!d|z2r zMVzF#3V5=zy(*X9?00hHxd}0WTT&s{Ch2_TfI+*abNv01r5B#B(rV)2x;jenvgY!8 zf0_NPzK+f&`pfO0r(4&Q5-j9Zf%#_O121gVmtKCLS(E*kZJ}TlBIAb z=T_|X>7*|bpO=K#?pqaRbs>%JGCMKx#EGrY)m$kLsA!k=<#+re-4r?-rYhP*QzKIB z)}CxLKJeyusX@axv<%{%5TXZ59wAZPy)*GEOQCI;1%p!fF)M2^rtLr*pJk-xf6F@i zV@akS&}O+6koS>_i0K@^8z_kJUr!!N{2;cuW@spp{Sr=cc{;=$N8H+(n-FL2AD^0W zouknj$*hd{S&gvp@R*1dSfU*~_m%VhIxB1IOQFKl?@Jka+5(j z7NVG_-CRpj+Y=rsbPl^ev81~~xlu}v-G`OxHf!w*J0mw-s($Ju#2J>ENqpzEL|x#7 z_*<@f6HC-GRY|2K+8yuP*FmZ(ER<})R+WMZC7gt?^p>LK+D{$VvR!&mZ~I(r5V zwm0BFS>${-zMW`pl-ujYMFfY$KN|6Q<7W$N^sFPR&6i*@K;%Q>Ny zC_^j#lF3Vbo{?v%Uu9(lxh<&5oAYv;@Zw*d6!`v%T!kO;#a%bs>7n-7A+>`o_ERK! zD2bj+n##539$<*#b;{S<%kFmbX?m}Lur%W&u(Qo)&lTV=F`LL1s*i3RO3b@8_pqHt z9qcKfCR)#o_`9`?_j-YfmoACP@_S8ZdcVB>b@2^$aLy) zvpWj0V-pj*JC=NeP#A^XzMImCJRT6jn#_x_xY7XP0>ttL><}^|wPG6-b3ja9D0Gh) zWfjMORlGcPYzBH7TL(3Nrv);J%2i96tQR8k(?t$j@rRL8hGdI7!{Q4MqsF3n8Qbk3 za3|=ZT%||Usbi}O26!OY&qXQF6-WK=Zt8TyoDOyUV@k08RT~b|J%42$CMwfMq zE)t{w+MJqc(j)f3oAzes3(VIFs~=_Th&3&<%0d zA(0|q#oK2iUIoiguSc9a2tGLfYj;{^T4Uc0EsElKUpL=|s2tNO`IjLhd3YkqgXa;} z3?z(?x~cJ(K#{!eQyrFfE>A5b1+p^p=>@a2yZX7ml8I(%kZXPw@eZ;<_;@jbi1~z3 zf4IUf>Lx-yo^Lcu>md^1fD@iQ!v6LN>TpI;egx9VaekLpK*oeXW4noR4XP~G4W;{` zl<>5yGqai02pemi)>4e{5*7l=GB4~;1Rdiv2&iyL?X1svNS8LCODGas`^;z~RA%20 z4+o7MWi_`6)xU_V$7?NNG=q^HS&E9w2 zw6B8DmMyv->eRw;jdjQn#2zXxrlOomI-Y-xs(_$XS<&zhIq=%eON3ykS3e`=X}74v z3~Lr|F^fUPYbqM27RAA~8{#N0&eV0fCRTQ2^vQ))4zO7f` zh0fn$WLx4oh7zGx7PluNZh_(yY&A!&5+$@$>j403Ms^MDl1M6d*W zDvJakkeM3I$w6Tiwr4^}PH=7W-8@IX^Dv!O?< zQQd-sgoN)IGa|)Og@)z{l~!PK;DGlOTMP^iCY-;(7}Rf{tN9_wNl0Q*VOhZoX?xdNF>QG*_#wqqLA_U$At<;& z5H)rW6RUY43^B3o$k4E>K)3*XiMsk9|2CQ27W1}D~jG}%W8**g}wX(LXb#Gpj~xz-iriM zrr)o`M&tbj_-~E}^H+$4kuv>m_P4L+Q}ZZ8jigyK;YBM&sxB|625>OIi7gk|C2oHJ z__Rh)jr^h9I0a4uo|W0^7f?2xt^+_+Oi1C%%3aC+4*MHXR-(f7Cf4f zOg@~k(K`o<7nJrv^qzO?&=&ZrAL`Q}e&3c`V|K&V_TU!3yYD-ovw2^BcXupH$VeBx zD58+J!-~aMfxf2m>xg?6xz>xtK^#us>80kH5(T6^9OYXKu94(pw-Nuby-=`U34r%T5kB+gNfA3cp4tC&ib`dgcrGgnk>}Ah)|^!&ubx zdl(n_;@H$J+Hd#t1tmY1Y-@NMLD}5)&0+nK4>C=0Fwtx5(*rLoOhesiTqT$GLkN$#_v zMW88<`6W+Fp$#rYwj4)33=6GQrUZF|;fR7zjtEV>Gk$W}TY(9w}+2X-a(*f4O z`XOnl%l195a3VC~ylvy=9!ZHv>XhIIwrlIm>xjRj2|346$!Fd7Vc^t3qzrNBIEuAA$7A%sic?K&Jv)U(0 zJ>yHguUw$Txa4Js4oz>Nd+zA(Dx;fo9E`8cpZ;*|QI#~2{1^>r@@Ji*d0Us7k@H@v z0AZ%5^EgyWU4pYWprL|#=#yOzWuoVlQ-AK7o$6#>4Z$TCI+*fm6CyW7}Rlut-w-z$iUbqG5thbZF4`-L=DpI6gm zBGUbYH(wpQ@|Zl_4#MxQ^472A>eq??0q=6KlII z_IgzJ-2it9H7n9hFDBMum<6AZGwHHt(iXr=pb4Ezq23>xf@)is)t3<^uR5I%l&a3D zR6H*ys{s_-qk@*0J%vl^_ZnVIPi##jQb+X>%^0|~7af-O*S~XVu@+zsyxZ%RPj2Kx zVic&qMywpeqO2fB7YU7tV4Si%@bpG*>-kIZf-$)r^*cb8)E;?m zCz9F0jdGM~FA^0VDDV|#OA~|~-f2YRR>Hx1E8^acPDWUip&?6Bd%KPEq8kV(C6gfz z541@M5}-6E=OA2LiU>Tx*ytVPe9U8Ria>>@QD(+XJ51$HCp;f(eV={ANP?kSV9ehjA%-NFi735E1AQ{Gj+-2w^ONMIjrcjY1 zYPLVm%)%M3;&;y1v)o?{1uCfQFj1)JQV<+FfhStgiH9HmL@2Azf+P&Is}hQMpg8ka z>N{QeJw-b<7O!Xd#n8az4#2R0@t}eUl#}@%kizB`sBM7LCkU?QNn>LVN+V;hga6(0 z$-(j0Zy$q1>LNol5LExRWG*saqS2C1zG}Hk`u5EMya#OdVGZ~5G#FRyR62B3^m;e5 zQhMbPUD=%53H;613)4{)PMLg+0TpG&Gr>3`uF2yuL z7Bk__R@Pew$9v%D8t}FGJU1@dz>dWe_={ZjUz6{@viA6;PZ0vT1^XelPhgRQGkI`; z5Q@VR!JmCH)(+iht=hc@Mp$?mCO`)&rR%pRC~xv?7s8<4f`6MiBjIk!54M1Kknx>7 zRDVqpjD{|bBBOV^%&bj5-8?_&iy{H=Z#?>kt*u#4JkJjj>^+m30p-^CiEmYkzxN zZ5ZQO7$1gx&^`?J5{$=magH!CpuYG7 zVIQ8tUXdDUER9eG_kbl!<5QC8xAi3#d^EVV@JKqLM-UT)0M!_xSXpMXwk6d@|GqW6 zD}bEP6}$=z--5$Q1OX_i2P;~vu9$#?*Gn!2gK zRbB*$c@x4LS~Ma2Vvs%pXW(4sW@lWj_BuoDA7FT> z9)w&_U#+&k>d{Qgs(>RSY{rmJWIb_VY*wt=Y`C3NMLp4jlAd!T+;ecg?(ua;$BB;j zUWt(}pan>$-i)my4_zcTjdg9j^ifCOhBjE`HalkT-|gxeIRa+{u3vvNFswkBVX;1{ z=JNtsm?%`1-nj92#P{5VO_hok>6`Ca%xsbp7az5};W;BCed5BzaFaJG^RRQ}gLD^l z)4QYnzePDAxq0GrCZqA2p1tZC&hc8Qg3X95K!FlG(tVsTGgj`+BKPAVKVF@+SjaR8 zMN}Rvn})zg)rf4khUvlVAxN#t8I`>m92{%O*9lUT9W#&jBkolUsE`H&zDwY9G1{X9 zu1aa6bn52QV9|?JNl`n2-hd@yXa0nuK|Z5Gz{u4C9cx~_#hG$-+u0Hd-|5#}-289z z8~F6n-|WRv4-A4W?$kvj5J1fMr!t;}@Zi4z=wF`dj^~P0Xh!9yNMA&co+w!Rt}a6Q z#SML;3Sh{B3%Q)82}QZ~!ab)lV)9Q^(t^Y?Bq^&9lp#BBC@(+Lzg}&R&pT`Og^DE) zgAPF+btsdduE0ym7XszDuy1y}Phk(p0V3jc$p{PiO;uS=zv>xEe6brw_p9WI4jUu> zSM4NT>op2*U#TD%``e#j^To2*;JVdbZ3cue_&Q$4wRxAT{HuTiO^px3 zF>{)&EjfNn-r=0W_?TPxxI;=DeFux(&bB*hz!f33fQqPR#`w;874c$2V0m|UcX84y z4nd(PwH`^8s@n}T`UEh$=;&yCOLTNRuk`OWK0v1Qb^PYW?v5Y(^k9L|-nMjAK+MU6 z{2iKBX%i$jH@8Tq0EF&NPQ$n7^{kzN-gQDakcq1Y`@Hzk!BtP+qjgeeTsAL*Nm^_@ zc)G~272WachZ5^mu!^0qO;&V-n1o`ZDq|(}cG?S6;_puC1Ne3vVL(FBP>A2lwpoVNBbEJ)aUVi5h(%E(dbWUoVdw0Uq2p@Hc2FgEy}PTG-oK<|CgQs z$8NA3+*e}P7a$)9I*7P9jADea1q_vyl8S*t;pbp5$Z^U_A*b={BZmEQJzt|=>{j;2 zlAXcJq22;I<6`IqR5rAv@caMFE#M>$D1=|xCGfP!;my;yt59wKN6n{8{Q%X--5^}% zOG`qJ+fHtSU2c_g8)P`6KEM4qY`DweWrUlwwDk9}GY#+NeEaM3@@0W%ir1_ zOIkDCQgtB^J#7=kT9L_~y0 zzJd!mL-fRoj@jAS>FK|TWUf#Ky00Rv!TMmCEZ~fhY^a5fCgeNc ze4pM7m8)o}s&_J#TWKaQFGP20!xx3pFPR%Lv*m!bi3nb>+PXF)=Kc zj0e@_F5DguxY~Dvzj;gn0rC7(@SUpdq5+R~x)+WAVgq;Vp0#v?3!`u^psmd@q} zp29=8pTKI(Ef;ZWYjEylVEY3&VDb0{NcYY6Vt4zn$e9mICvV(A(S~18zzC_Z9pMOJ z<5~0z1laEQ3}$P{2^p}XP$}Tvx{0N(z&4#$MUjyg*pTvh=?~^1L6j8*@{J+Cfso@o zgxeSo(BS{A3n^zQ+Hj~VCp-iV6*(3xKqF=-al}!lCc*))?8IM*?t?PkhM_#Duf=O@ zF+y>3OWUec!KgzxpEJtWOUhwQ1q%mZ9jE8AY53f3i2r$Mqj_5!zYl5-MRKxRROg z2^iCGzxIpOVjsx!E*y&HIftD09LGmj2|g6u`d`2MqcycLq*DW_!h?5AfHI!CpLf%V z3pEk2$bUk&g)2Xu@_lGJ@XY)7$6$v|`Agl-z)v*RhMwEWI;81r?m**qUSf{Uo?|b; z0+SEpy>MFC?tl6+nIo9Oh|25@%wduZjtCH$(e@~8#4TYFL%>>K_5S0Rs`!svunyha z1;fGvW~>jsi-Y46cya)fb!x~UUPa0SKy7-hN9k(Kd$K0MvaP^t?0dI}#y) z8w5@N|8a{)9f0#LFC*XwX_hQvgRF_MHdg4APE=N8I25}0hWVO%WT1b!0QM{%u3#r2 zj2{*Fwd1=+vR+6HFO;~WF2a2P`f(Jx0{mjZ9~fk51;H{eWx*o&8V9Ix+Pv{V>=a_5 z1QPE64#FvCWkfFgit$oJlJXamI2bVHz0^&MkWmL)iLu&Zek(2X6mMbwA6(_%J`%Ma z1&B(#f84sIg9p?B*4qD3s|mHZtu9Um4nGJOAbau$FvN9VDuCbPI0oOt&l1A-dKWgm zIfZQjb5R}Lua98~&*AU`aqRy`ii0UJBPyn|7&R&}gN6R_5$hgA+VLN>9J>y>@UO5p zen)}{7`dPwgk88sqEV5n*~r^=b)~r`tUR1mtubq>NPU1?IHvR*xID^VuP2m3d=824 zq@0aE0sEaC9YNWdM~0V2o}S}xwuN9AxYRGac&Xkh277xxeEsG4{|B3Pfg=3lowCPp zD71$l8OMe#{b<3HK}W+W17L`{u5<|0DbwV<<5& zaACK@3^>6qM~cw=p;PR1!fu~Nv};fTxz-Q?1lIgEqapFfCnkxr3u!WJ17IBL!~7hb zbC!R{_!ydyT?H!zhm>pP4HE8nX@h|Kb)X+bTaoT^5KaZ*$9A#P7F{$op#_0^-uatD z7Or`3H2=1hFeBg%FC#k-cwpfRN(@?wK`Kpcwu0>sH?3!FZ4!ZgO=P|r1>2BD3tl*` zgk_F^LU3KVuUUJ)OdN>4{NFNicBB)dOh)zd88BM0!$teouphun;QPV%?9D_gIAvq@ zThez7qpDnShM!ASPwx$fS1)upp<)Q~-$>YIFafFr9fY_U;>Z&cNf6fUUbS#A3gbx; zA>yD@!&SXuext`}J0hi95UJn`yE}OhbUlmXe{cYUr&VFrY=0#P89NT200BsS5h%>% zulfKCzx^+4B8O~oVfUX}>c5UJF!|NIfR{71#m~V{oMN&R_>Cc;3EuMV8MN<9C7>^W zZ%xJr;FhLm^G-Wa7ww9N)qy*#N9V8OWY}y6S?wf>6|fC;ZI6C!wgqnd&lh}FqZv@f zC0TH|^6ylinbcl6jL-dF2|=^={}NM}XUMPuK-Dv{^I1UyyV5DB)sN(QY#*Gk{a2>o z8!UbrCMU7;3xDC&AXf_`Fp?7&7Q-(7Ui7u4o-@ea3qr#GVsK!G@Q{b~59;vR7XlOP z&3}QCg}_AZit_*{HVe@d3LL>t0DQvl-Tn)mn5PYYiZgMx^P6Vq^73Eq5YP6O~f4Bs0uAjgt*Z|+NXPt2tW8T zWPc1Qp=D+nIh*Ar=HO_be=}F}-iz1Fe8KPu=ZeSkqibHb4<8ZQNQd8HI+yj4wC2@Os0e{Gduf#?ja4MhWFhw=n^t0? zW2|jiJv?5bJ+ywEC0R`#MMT%T2W>SdW`{R3QPso>*_2eR4VCb1s{ z@H2!NxM#x-JwZGgBP_?Q1*6%ntW$9$OBGJgfx4^ZQ%_PBCdxU5m<*fCvwFT zm%ZZ)%}zNveY+il#borbU{hyZW^cTL&kufcm`aURbUpXpv!->Y2lOBn?*@GVZ|}VN zMDx3@PEP9@&3qcGvs|pw7MSwsIu15(>X5KS98<4LEZliG`&N6W`5cx;Bklr`YUcB8 zH%mZa_I2Zh5(Sq0$JJZtv1~6joKn_dsITv|ZJflrKVkuf>8yxy=4kVNHCKcyEfg{z zlXLKLch5RAgOh`78M^7Ucv@M~WOJTp{tm0G5qi;hw2^Zf=UJ=L*p;ODIZSb3o>A_x zM(R!PwNKChGj~}Ul7*-_&Qxwz))fK&2MY3Ct1nuXGt-ZwRY#A?>po~z7F|;8nx!`0 zQ`D?iG-WDlR9olu(Op7J@fR7*hK7dM-L<$4v2f{L`~LBE*{;QN817f)><61uvWAs< z?hf0$Y5jg^yT|3{W;2}+=KGoE(2{p)5Vdi0L}Wc z>M4K6NXqvngwtfwVp!64`Jepbi#s{9f6jXnwO>B6ZrDprQ}HvxfYIk0yB8-vrO&87wfSmj zQrafd(loNW#egtEK#h}d+nlF}f$*yPzWSX(km}Jj`f7I#JsNW=3k)0S$JkX>Rqf%E zO_!v^y6eRe?jgQxO^}1N}?vSs2@!|@`Y=^(&P7BSYkp~u52bdbV+enJbAKhv?xy3}b zYjvbaI%{pOZfpO}eoJN~U1nWb<8o6Bo#}^r?m)%mzeK$FWyeE!v83XT9g0S#QR> zUEe%E%xr`RbZm^-LB#%%bZi07<~I(aqoYhxy@SHecDfRx1>8iuG?WmZtVD7 zB%-=LWp}*TWgHY!*Xc9zDKz%vl3O=zSkt52WJTY#ZHT<~B>wBcEAlNN0uwtUR#$5t z+)+~;-$_p8`6HSk+vaJ%Mlp>yiM85-GpZV2z81A7Hye0|YY}8R(XL10eXdW``>lAd z##vljmx9Cx{0S4_0kB^9AJz(rT13=Za69K0Ka`}b?vtFGZ=|n1d$v1?szf_LF&e39 zz^_cdThqDceb4j2YcX>~4wqBh+itUKYCr>mH8?&wTFpezS*1327xCBeD-RA2FWkOa zNvXE0J?;IgH1i8z)gx!E(##MrfOwJkKOATkH_J|t4M_}5$0M)dte%{IGi0Pgf1}ht zUe2^(ylcDq%z~+5H;Y+;Xnop5TDlj5D01kY-)u<&2L=y%#&=~nRL4}b-dH4a9WG3A z&n{5tO<|uPK()hdpDNYzWz3D;n@cGJ$!+?SxgTFuLMXN7*Tzj)dJiolOXpu6$Cb<) zKFxLbhKjw+dugw+8lw?1;tl7H+&_m`$q2G|L^1H4Sk5~5lZGgl72O` z?nm<1q#*8=sBX@3dk>GK+88&D!5kZh+$vo7DtNq|N9;HVC4{d(Hs;Yr(YIn=3+_!~ ze@^^c8VDz%?SuRA?kW=JAj;3J!8sP!RnLf48{H7cbvNX742=8qX4iKCaC4hcq-Q4SDNB7%`lvroZ*lmZd*s%< zy(${(#!t6mKTg^0&eiaSC@$0)m7=aak7rMwT1tN8nOu@&`ZF-HmbTMOci{8Nk-1>@ zpBL0JeFPTm&DGd%5FD!tp%7y~*#T*sXhX0YJq)He&Zu+AOIRHq3yIHRD>W9!Iy~N= zS@(FHbbTGq&+!#^|6x1|WC>%PJe_9-k~U-aTd?TQk$0AhOwRL#(RbrTFBJukVUIz3 zB>~fW+S$oTtFPU$F+XC;8DX)erV2hT*3)Rl(5J6l_C^Jf4HmF%v~P-yk}aTZl9N4( zT1Qz~23Ip*c1uNni%G9Fk0eG(tMB9SJ5)5{D|+JGoo#&9|Aw?A&Zh((5Law8Ln)%) ztk*7lr6?|Pi3c({o|i*~E)}8Z5$kDna}~l*Ta8L)a}*a%rEV_ok7sTgX%0o*jfC?z ze$jF*cTKI5e-$q@T%zRKcXPPfj^=-jww^ z1xh*(u$X#-q_^aksFftDHokU>gKHr#V1Yswgf#Nw*U{^tv&*Sy|1HCEACQeSe&uGa)qvMZsD7J+`yHF(K@EO4Y_ z&kMVbk$pYN2I4+##V>kFZ(VE(eOhhaH=wfBDB%vqsk*bZW8rGew^ih+a;BGPMn2mW z_2C6Q)b8Xu2f9;LMa-Ht4Tp@gW&P%ab|E1E6{++MN61EP5%ROb?DPHD&9|=felB|O z!6wD(tf!}Vxe<;sHasMRW5N)nu!xJ8{Y2j>Kyj8%wt1=D0B1?bjjZPEKF!(wqy^`# z91Ev6Z`XUQ6yA+nYsd=|&8TTIl#h&BD90jZ4}w`{H}oC+eFLH_hP_-y+KVrwi!BK7 zbqe3y(y!BFkz(t!_qN)2YR~w1--7bvWNCeP>h#it??NqnAsR8x0fbkwebL>+w%F(5 z>^!%8g_l$KoTq5F*6klnTz!tcDn)OKDF`WeFI|pSZ(tRINqtxip`5>AQ^p_g(V%u6 z@>1vaC)Bv?t8!fJ-jQ@vwKKx-@bxJtv&EMAp?~1k(HHU)$%SWCoULvQm>#*+|BKLR zkeg8#F$0qkIgP?T=iH_cb$QWHJg6wX$!I@)$C8e;me# z(XHv3kM22SDiaBrV=k-lkrmY|7KpHBD3Ze)k3)irjVh~2t`5KFQUaaGw6}>vMFz>A zRc{@L;jQ-I5bvVAL;j$y=G9%xtXJZ92xI6hgi|pLhccmAGBA&j5YYL2W8-t=8(KxN zkth5Cw1>zJu6NLTdIM`ZWMlv;VHDCsrRMWRr(@ACSlv61^%YO1%MR@1?vk^IG9U?) zCvmTja(tLlI}$_WSWx72L)@H)zs>%bcMElqv5O#gbYIC_$G1XGZM`HTG0uI2|cUNh$1t+EqQvQ?q z3v*n3-nO0D*L7!=f5cCR7yKeZJDT>JVBq&yBU@TYTCgBh8D=U$-Qqn9UdRIGpD$)e zpg8G%LjuXNz@t;qI{)Isn)y*1YKd#&`85okg2dtbZ+F3mq3%HFB&+%4Dnz@Vri;^a zgMaLgo%Y8`lQmzziV^u2hyD56f>|LIcZ2>o$lxLLpjhun)=X{Z3+@M%(as_*}>c{ zh!~f6MWg-{BAT>ygofFD@Bl(}RLQu_UEgzj{mdl@<(SoKeG|;}!O0e3^%78~_q za3HmH8Z@Or6u_nuB?M-BEG9I0`gUA!t&mQ-SSV(Rwk-4`+D<1zW1(1o}b6Of$Iuh+l{}yD2{$E z$ToK=mFalwhpoF=sL1LMnSauv1=$bZ#kT7bT2=r3G%A`{UHtt^=JYSK1{aj}MUg1v z69TdfNGkv;k=e@IO$TNn{R?1NNZn-4FF9DQ(O8ZsLUmP?noiizZx8qE9Z7%zKB!>8xl?W%`b>c}n^OlK@YS@0kqfyvq>qc60)}{WAi$sz?Qi1XKNgZoBsMwO+HlUN0`|09proG;=p7XEAy6bLK zD_h(3W7kQ@^vE+@dgJiDAGV=8vpNmA*r|(C|_Aqk+4m*Hmop>13MBtBx}@ zyk^^0`E)oG%*q~3P!Jl5%ZS(!W&0R;QnnxIFjXv+f-FkiUR=!R? zv-KmrP>P~nSvzWFec@UTIrnc&Zhn6bvRFWMz%C*mQN!-;-_|4`#ia ziDbo)hodbbA6+<67GT{of4$7|hCJJoa*k`u$c~iHTdu$|?ewI-MAF<=^y4T>q^vY> z5>6NiHBX(Zz~sSa!&e?ASF=ObxKEbPk27-BO>w!c^U8;MnkVXi@Bf~Udj9I)z(7*TX7KoiMHl8^pVhwY?6O^ZczpQKh-?VH0+149@&Pk!}8*^0;U0hTC3gIohXbx)Lvot=h7KncNnnD<{@BnNAT`&;>H zmZ4J<;v2h>@mZ;zECV`@6f)Bm<*!7t0UxDL5(~rwPd$ESwE^gjpv2#%9=j@Tv9~=& zU0fVHcX|7<75h#c5z^mQq?8)`M)(Zhi}kg8*-r2F#srz!{r+Tn>^-*YsUxK>(m#)% zI(?h-y_U?`t*eVWy`8;M!*K4N%c*km_59SHb9kdLC!}A$pvoO`-xY|-Kl0p&>RCTa zj{`9YmGV#IliVvH6zG;wQhbRAXNZtSLD*vTk#U^^B z{@U-quNs_}v}8FWKuWDd63-+w2vvf;BTPT`BPRpH$;U_16J(}aa2^aS++8=(4(uMD zwh+`teYcW$rKrHQ?gXjP{9f=(=W_0TE?+SGD|0=+q1-Bfd$?qQ%WPp>8}^+}4Za5$ z!u7y8nTz6)D=&AnZ-I*Zpdi(AAFEBxGO+Nk-}AuVKTi1*!P}Jdu|6KcnR)&HXpOXs z4wtm-Zfx8>q@EpB7{nWD-Iw-tI4Nix*U>THG0ro#Uw$tzbeJo^y9JS-=j>4Y@`ZZs zf4s;4zCoo&QjoOpo1%agxqQL;yqam$%%1RbkI4Czno(tNP_RcS;X2>&vv7Px*%gB0 zL54l|hsz{KCuKKH&n!ZyK&^#Qtv!-7hvE`22~LkZ*KOz;d)rp|+036ie>X5LZ}LD6 zYltNY6`#@eOOM_xs^E^n$!-$bH0AY8#*VsX8djLBTOz>KnXT>k{CekWKI#%6R!eJZ zYg1DpKMLa2_#H$gjsM5v#<73tsdM^qxR2zO%tFKz`R59xd^Q)Q3c|*xgV2)Ybzt(w zkS+bAyyt6XE9cxg@WLqhdAKl66DZ_9Tg*L;^j+|hzYMNmvnhF_Veza_xhhNbh)*kQx4BlPR=A2{ZIwdO}S5r7NqY~J|pNX=UvP} z<%v=xGdt#yd2OeyHLr=DD);d~p9IJ+u&w2O2%MHsdQRM(=^%-U})z(r}1iIplz!Dum3jM1^{XZB}RQ;U35XL9IJ>eOwMSUvF zCr< zVZ+~YbWU^Iip2HK+P~!j4$pz zMNC+lNSt=1*;==^b3-Jix|vfUxBGsd&5-!0asI-xvQW>@zr8FBiNeteD*!Jy@ra2J z#?~M*!UayMNFH2yeKY_bYo&;bbHhE|J{mT2JW^Q*z4;M8hkNj@7)@tyf#vLV9 zCG~!z_^3ltLUmONBJAACQ&F@+Mfpj1xS)7>@^sLoAy*M)RMdfcrwB zc9{*O2ReqWHW=o=*L|cnF#l=RouwDn83wFL+S63b#?F*|yC?R%3)AQ$;-Qiy6=nDf z*nS>Wv-U3O@(V*;Ft!iDvKy-18D54*%g%k`vqkN2X6wC!kA_~nOy$B&-5D)kvXoOhgOdhc6KX?A(r|Zlkwn-@ur-_)< zV*4lp{}S&(r6^s{(09q4HFj!a)SSDA@7_28Xe|R@rT0o}OHx!5tA(43-kP zG~+`CCc+l-ST=!|X??AGOkso_($0{>m?>)QC}SiUM%}KtYS3gc2k7ysgTVkRJSt>i z$s>h@U1G+;@nSP($0RuAm@hlIG3^4(wZ}>u{xNRK29tj^6mOZ}!$v)kKw!fg6?v z^WgZ`AZv=gwvG-Ab!_eRT#X7mTbfAn5EkA7ljP5onJO{i1tvXvRT z&yPczddiF?WOY}cf$6*)g-V}E{_JOsnO!gK$QPLG!!(_DLBKfSj$A2}b^`goSddz1 zzsy@>66avVGb-vyab6W`DeKv>OUs=fpW_IsrZ;vQWeZSmgmLDhhe9k3LNv5m+!Ifw z>=ZQeIY3d_I~(|;XUEBPSQ-pwI%`B;R6W5$8-~7`lvivNbLE+qb2}`X2$vTj?O1rH sokdcRsf=xZ+AO^NKS2qOq4`aSN(7m;>uq~U7)ENZ#(|xQ+s)4X4+0SzfdBvi literal 0 HcmV?d00001 diff --git a/docs/media/packages/torrust-tracker-layers-with-packages.png b/docs/media/packages/torrust-tracker-layers-with-packages.png new file mode 100644 index 0000000000000000000000000000000000000000..9da465806bc6eb8c151ec5a96b8d6fbc2a27f6de GIT binary patch literal 68191 zcma&N1z40_*FQRdih`sF(kUpZNJ@ud(2aB>-Cc@+2#AP;Gz!w)F@U5rN(?phP(#-+ zx%a+zthM%9Yp?a&Vakef1b9?<5D0|e`7>En2n6pp1ac+v zIyQJxvhVsGd|bD=OutmztQVjBfQs0o#^hrKqfH`N=+R(nfRxj85jz-FZ>BokA!~`MPp|#hQ zzKJg zn)z-BfsZk%UM$Q{h$IfL9FAmI0%!z*D8|(?LXKH*2q9nI3u7M4nti3lh6Ivd+4__G zw@G)zLu^PhnzZ4iiwPm7$-PG%*Fm@Te}91ofdXwbm``ZoX>jbe2<^vR(U{onS zge=ySFgfS%ljbXacj_UMD#A<6VHlcX%K8%g->;I?|L2`UDj1jkb^w7`$^5fO2xKti z@AN@cI>AHSzb#^?JGmhc`ILXE0$%`P{g2=Ovnk~NXb#ORQ*gNcxwD6dpu1E5FN^Zp zUN2u?ntIvgm7Sf3n&+d2Qs*pn4Grtn@6=)>Oibg=+279yNeAmlE`>zINm-| z4_IN;?T<9)O6preqrHlskcU6vNfOuPVr8XuN%)bSoxR;^Q9f=fpg2-dUtce@ST}A< zM^8`t5CUQ4m}&?u@{cnwALl&vFV${K8EO1QFia@Nw*fj;?fJ!L~$#~_c*XEZA!KE3=KmCht-#u9~Q+|?>S;Ynrn8kaNV|K zt)A=O!jlv58>JAi}7qsE5AB z0!d##1TlXO7|xY9QjfDZR&*eK>_ii?Yrre1bF zxNzhUN$UH9zmz?;z|+ckZc(noNxp+Z`z##w9Ce-E^qb?ZYdA=u|K%KgMZ|-psj5Zl zC4DLDjXTKmFXB`>8j><|`CU}LA}V{|qm}94Ij7c%urzX9MwKckVSm5Ea=v|3kNa<5 z`lMtA1@X9I0WpEAh*-A6tS6?i80{ql$$HR&JsGnAe8#zb$=eVJ`xS3?C_%+qEi@KvwP9Gli&%3bbbIGY4*)xaf z%r9E1wd)2W%p58A-4MAp@_(oa9!(MGhMlSgBYQ5s_L&4}3(1jkwHi1**$ID1KiTaT z!onBEI8on#TUyhleL&a~I(g>z0<>XYr`^4p5ki6Ehv$l5yta3kwT?aBy1N8q*mx z;oI&mSXoJI%hGXT))?he;$M@bvqM8_-|i~e@@sd`r%@w0hR!*VM(d5O(5wA0%w6raG{9b_##cFe! z5ELSqw)!=@yNQ*qk>14lTLUwi`rI}8sXATAJu+u7&iSk9@ZARInDh@JloB|N^b<(1XQ*yXyN{n=a({XlrT@Jt z5un^0O**24bCN5rqq0uKgDC?DU5MPGepApT(p!CF>ThhLUianxi9{lI^*xNEr^D5A zeW&o`n7;@Ua;TFRZ;yP*?%p-DIkK3%N~3Op&*P9&_DMvI_KJ^2Sa(niz5D&*CRKle zV2O?7ridQ$Jhk|*6Ad*{Xor%9+Lj25bEQ?U?o%_%xv5@Q_37gz38okaJUqGY4{FHs zG-D{I>OQMCmQ`%x=#-DocT-OJ5-1h3{7!byrFPTcPUy36qb}6dRYtj4@{jW#vTqwa zu{IG%YUhntg0_TGgg;64P)e3veRQ{}y@pxA-O{pI>eLS%t{wNed-zPSKp@!(;m+CA zUiU26^}D`M(?m)Gq&XMKMr%ZY+Yco>eu-#QUU|GEp_c0_L-M$2ZW(%b*S}_p17BYj z$`2IO(BWiF5|3y^QWHsv2K3R%&W_VEjc%*%IQf>Q{L9`CvCAbK*(^XUb$ZJ`^5etX z${g?~FeY14L*f}8y(;IlAyxU!GRwfBAd5hKQ~m{vWFxzh0ebeGf*^f*mqWF$(a>%u zO94yMI@oAD{CUpwpf6;V#*vzoaLf18lL9`?N^QwGGwTMzDigBieGaNviDbpQ zj|fotQ^_IK&DRQXW$-nMl9d_hIzp%mPwDUtJvy(4ojTp~<=6ixoYLF(&p@lwaA$dttC;iEAp3G|63RalmD<-D>g$^PqgIiM>CDu__WN{=3 zk95?}@oPx3@emn@Dcl3wO2(6((K{H5 zRAIQ|y2Y8NhdTv7gBJRbN=>*KCGdTLT656d~%aXn(FIqc77c&@{3X#3w?+0{p31?8uMtn+R{!obzYK|4B zM7pbc>~;@<9be0e&LY*h;Y0S=R_@_VY}W@6Hjt=^d_{B+?EFY@R9R3CSIPj}d# z?~Mp<(fgCKi0}>FV#bUIer`uz$K<%#=_q!U`0LP0_Y>`SK5-R6(&7(WFA?>SSIHUZ zAR5)!B^V{$@JF>w1QAV#N`(M?AiW}p_MNLaowp#zZSJS5GQYpQ3)AOr7-S^5RkGLc zeqjOVvFn9*ID&D2^CAt)IaO^5@!n!zHazrLZ)K1a!vgNBdeUh9Z9>x;k?C>U!{g&u zwdamK@YApTVKog61~*MwP%r@jG>rQsA>g(&jc0d&(HG$s zgtHF%V+gc=a9>1(JxX3&e94I4Y<_L2R%!i6-b^qpHFYh|gOORC*zozhikN7Y4%Pxq$N(SIny+Yb&7z&7?uj|2@RL{ z>XCTpbM5xWfH{?eo}Qk@DmO@&Z`IIvUmd>kTpQbaM%b(u;3OfZ+)o64PSd(s0T!yzvlo3>G;TY_dok`tAi1Q(yIuD-_WLUXu%W#FMn+a)uC zzs039vM!AZBmTDQ0V|!TDA(rfRx9Bey%pcRrR5*x-7Dcl^~dhy3$9XmUNw{~&jy}V zxP8)Qzu6$Psn_aUds7Ds&q4B8nvRb~|EG19FUI0&O-t_-;wfI&7%IqbM~g!OIbm~@ z$G6|?oLvpF&tmoy0MmZ-(79L2*!%nTi>1*h29$Q@5@P&R^~f(ujgy&u9cEBF-&2YW zocl0exL5G|NN3)QQ(V&G{UYCycF99Or0m=gnL8t`R`iUrS3RvM?;k0Mf(iES)iLx3 zR0<=jQ+;78Hha7s1=c4XJAE&9!=(waS)=*yA2!IOiGN(Qz*@30wzG!)dip#b`8oCa zk1VrBTO%IjqvsUT?78L_@s|Dfp4M-EW_*haNk{~yeoK{Ts4&HU8GWyX8A$kw$*Bhi znd+BiHmV+n5b@@9X+{a-Z=&YL9fD-MFN^Ly8ww`jQl@2*R6XYyw^_Flxr>3Twh!r&4thDOeZSUb|`jWlN zRA2s^@S#Rxk2?RgGmfBLY)}4pZ!5;!Fm z@z*J;P8%So4aexyZxm zn#x>9&-|-L8lHv09a6tDrFoVciKk^}4=(L_nT#7;bNzENW)@N`8lJ{I_I_3DkieEb ztpU;Ab24qRPuV?{u&xqH1?Sdw|;BUV(uhKpA{=O^FC|+J( z9v<-lGn$;O2%gfLYMawI&8vf9*NjAQmW0P__5=Q&#P5G{{`gcT<%})zxAh#n zoCWnWA3RJm>;eG<|K4x*x^796we75baM>=7oa?${VKF%m6yo40e_ZXG%8N9{y}+0)@t(?v_^G!6Ma@52j5l?Fvqq5(|8123W{*Pvdl}1$Lb+~&+8bDvsR3mv=XiFpPtgZY}^~3^dfDC7l<~vWy=UD`&`{8d$q27RPqe5X%^6T zPfc4__%lP{r(us$?RUF;wiQUl={j<{zy*}?I<}-%M!oKMe2>4IDV|uf&Z>ecdIomT zv7mWe=aV<7SBpdMk+Et2(t!X%_=1fpaL3m(fa3HTt{b2&{r7R?nNL4 z1bfQBfq?`chl^|p7KIq59`3LDB?VOVrPd4c;DBW3ogN;82K(O}Rbl2k;~cHTm|BQq zCJ3S7eAtR05?#5;#&9ue@J9A;#gadDJ@Bh*>+A7{J}D_Fp=T&keXc1s4-E|gQ!zK} z@9W!}cHZxI*;>fhgfKAm_H;HeSA0(^#kCcNEb0qAA+Uf5;$5!2y+C1e;r8X6C#Zl6RKRg|z%eY~ z3^qAAd2(_B{)dK!UOvRswsjU26}32|rlqx7YUXQbuVG$q^(H(l^}`P+iK0DM7Bcym zz`XQc*yKA+DGAY&V^d~LI=NmFkuehLishThEMAu}*a>``O2bCqt}@P3A2+w#0sR+C zYioI3o!u{K&rJ*QCAvFl9)h=$CNQ4`C!J44$w0U6OLFUTbH-2uOUTL5_W8NEJ0;{A z>feyD`4$04^`z^Johg_;~*gn@p4S+(+F48%v+ z7#dx$(uFSrtXB-6xEqvTf%MFZUbbP_mS(pyt%o-@H2lKEf}6dLx6e1Fzz7VUg26r1sesgq&DNDv_1BwsqCxA+@@W~w zdNEayz{64G!Pyy96bM*yU}z|=n68oG?tkOamwiG8Q!ncy07nkI#oX@)GBXcg!r_uW z2447JV!|<(J*UTvSl`7|u+7fRNu}ca+fxPGpD_*jOnPUs{@V3;x9!quqdJzMB*J&|3=9xs^RvA1_p~5d7l;a^9%@}ufK^|l$>on zyc>|I`Ns8(ipSN>f15z89H0ggqN12}G~duJ)HJ2Q5)%K<0>Kx%f0j7y4Q*^{iU*Sm zyu}biG58j<3Ln#)MQu;GmXa<;{B&;IkpooYGNuJ zvFYgOz*G+pCjV*$A$u(?EieTL#M{xwha-rhgy-)Yb-cXzeP&;xJitHMQLAtr1<*|mm-h6w#DvqwO? z{C|@u@bz9vb44Kx3Am4$j1X4-i1j)Xq*E>xN9aoNhbx%gffPlF;yPZ(CQM*?XbJKI zK?mM`0RcRJxqW`_4>gF=$D952mzWSpwsgM+WVYmzO@IgWaPdnXG^>Lh@fah1#*3o3 z*xY~+f#1o$5j6UudEKSJBJwxf^DXh$Z7{W9E-RseAy^Mm{}n5v%2!=3kac7=&{zC($B2jzm>+BdKWklV2*6kU0vHlA?$3w3?AQ?H?cH5ocap$`JxpA99i*hd zp|^Cyo#T=K4DbG|O=0`|-Csh~IHF#q2F3K3tcT2ADbUO>4TsRs|K&7zF;L)NxH-fE#cnljO^nn>%vBqq#7Q!u^L*sQ` z`G~*bn^J&LI)9l8l);!(vs9OZDFvqQ*&2O){YZVl-j$yl8->7HHU6E}O7|~Lu!oJn zeww|y{_@gLNNQoI2@z77F2uY$XztUKyjWt0451j5{NNOw|WL>t9~gpF-`#iO$NoC+*cjVFs%b$ zM=N8`)_5YgK$;P&-H13LyPtbtkX$ZiOlF(^G znyPWz&CN~r3%;a2^6=oUr9CHb@}qEhkTw9K7jJrGq8fBDVv-Ksa6j4Ioj(AnPUXYX zQ$19+tPpm#W%>ByMsJ`bCw(Msi~V^-T;$HxV{a4^{CI%#8Nt3 zRu+2*Ok(z^0UQK?HGTEJOy!zKaCp0kcp9o+@#C>;7DZx&&+tbA$s`J>ycttuj2#3@W)0EBf-)-JJ=ao*p z0tZr=%2iSf93f0~Gf90drclF4qV3GU89b01h*a2jaAqaQhS5uOq`ad|Kmd5@Uf+4>?Xz3!PD| zc7#H=7$feV93*I3=@L#+>s^{~V2T~Dv`7Z2Tsu7YYjNe+r`Q9|Plcx>0!|L?((Ct2 z`~lgiOcLJ+9SJEfUQ@pR$kZeM2HWPcz8HiL9l_n zp-eR}jX#(L4jddDE_Qh?{=lsFx~MAuG!IJCXov^BY+@{jh#dougP~m#=Rw^7AoB74 zD%g#a)ed4sn$1{_6VeN4u;y(tbwgJ|tYg4@k{{{bs|fq*$@uvA4fjbujYRLj#x@U* zeg1Tk5Z;#CvK$07@>YG)6+f#TW{znsAuE9ItGt5{dD)vHEBSKNZ(S-KnqMQu)+LvT z(>)m3+)*LNOG`_KhgCkQI0|BtP15YU2G{sNZduYH?aDL!je+*|CtFd1U%X1O2j7Ce zynTG|x76tC86X!9O9k!QF;)0In=!gp|}Y-C&-VY;?6v<-fN`C zS+vw^Nl8gygSh)La5geD^t^!N3}xkBp!c#b`_tvK$_oOgk2y`kWcG)+w{A08_H|(m z?Epr6YRF&kj>y8c%qq6jaKm)>3ic5?b?Fbyhz-2?L1%QG_GEEx{KmqaX4!Yu9XAu# zfIpmJIzTQYV?y}jnH+h-53ngLQodV7W??V?IMEL5XKYTdLpeLx+n=BA4RCuAE|z26 zQD2D(?Ql2Dc;rBXlaM&4N_&c4P6IXsr;tWSl2O_K=!C1Ku8orq zkt6m*q{CA%xD0Zvr?1Sm@|Bw}(hkK7`83Ks@J5uc`HI`XaIE>3TY%@K3o`}-4oIMN z2UVaN;wf$V)fAb)uzD!eE@fHLma-Yk_w=Y#-zT4gP+qOGle%nO22NkMZ~LbF@?Zgx z%&xuo2u=4~ z-pwwId(#{}VJk2&FrX6$pdAAi<)z^+zW?il0bEk1b)cSSYedDx#l^(hFGA7`4GlXv zeyjZBo=^AnMFwKdS3&k%L%{CTjN6uR>T<`66}gv>w*}&?64d?0@L+7Vzn51n6v{6S zy!=w#Le1*xYH)n`AdsZ+UU&rtqCOZ1O2LeB$Rn1&))5NC6O_`Iz}$!tJ2s==JKY3k zX}ydF`p>~Xkh#jB7(kb*CpX5fVnG6nfKJ}GrR*X8dQ<4;Ute~KTzVmXXU7K^uP7cIG#=MBH8nw@x7B)n_Ac*% zcxY;F?eq2AH`cN1 zPCZ%04x)zw3s*j7(<-H9+i43xc3KC$(A>5w3OJH-yEG4q0GJ2bQM%ADheX<(3ghXQ zwGhi*fARhe;hgDCQxAgIbwoPKUOWwOwgT2QuyAA3B6jc;1pM+YMXLE1-hhmwD~K17 z++Um!%N(2NvBT-J^N%jO%OjrQW3s31X6WG=;#sO~B}ybY5Z4NXh>^|YFX{mm`MaUM zKB>>)SY=9Z8;xY2bozdbZOw#om6+>aO|k6#C0K-EYjffL0!S zli-;S$168{hzrP0B3J@4Apt3U7CUqoG<1)^CJ;%Mk%1^%T1s$=GrAffP^MVPfgk6v zzY%<#Fe&x)^Aih|qgPBQ2`c|4;ynz+1IQI9b8P3-Rfk)x@v>J#8l-K~quJ(Y@;d)= z!9ZMyVGO0mQOvmF|6RX&Ddp)+H!;zNxFxOHL$4ufCFQ0byB<2s!p(P>A3UMb5K=Yt zr>tjKmr5pAJ2aj>Ksrkjvqj+#senvLYIa&8!dW90zoZb%5Rp$qYbNMG7} zuIu^M@T8A|&t+V)%fogITnm%TEA zA)$bK#lGE0s z6@{Lb-scMv=L=02CrzE)PuK!3PG$lw&QMJkXIpOc048^ShPr&Pyu92^bhp66zarDpt*t6CI$qDW4L-E%#;aGZ z{=I9BoXGdQ_b1X!Pj@TQ1n^2q=V&pi-}u`E+eBC0qi<7+KThb-S=}0ZaK)vj(QfEie!rHzt0j@;3^}ut z#fTdzo_kzVyg;olV~(EgF^htF;b|vn{tLr+j;5VDrn~L}Pzprk=)oL^L|P3btGaQI zj$NgtnYu4TwY+jsiT~ryg_Vm0&nu7@H z1?ox%7S#kwx&aoJW<|6>o;0(b?ZF6*^??7@4eJ!OJvG4xH7+eX&_=uG9{5Ss6>eu^IVTFp{Vtd0#drI%J53M0@16RK zr)}xA0?p(pg>mdxA1l*-}9ZF(z3dcCx5gS zza*Atm8JB0TpuEMmGqfUDD1G%A?&4Ud|Gxh}Q%O3VS&tL5Xb+B+Z&PtC2XQ{qQ)qq3qW z_tmdoDMGv|`j+?4#nVVwnndPM!82`H?9?L#J02?4>a8j4oT(!o@N$dJCS}1geUZr$ zqZ7Kwg0&JY^2)BnA(7Y-`n99d!R?M2!Kq;8vR8%n4h|q-VVUV0AeeF!S7A|Pz=`zH zewWC!AI;a&Ai5k!>;Xa7E*E0vSUe~B;AiMv!&?uP(M^YqJP+^qTFM$A3~voMBf|Bk zzqdHB_k8uY&M4{)V``V|ADs&g!_k<|MIGbby*2aN*Zv3hx~nCc18E6Y-FV^LAY~^P zy*x|bMCNaZiqM}Yfz~~;TE4GLfjwSAn_mDe1PCwFr*2ARtcuODk#?Q}8&e2(kyxwb zq-rfLst;{+ZVt&v0eGg9J+1wW=|aP!+ftlndwJSpR{lRno+2F6X~6`3dcg%P~c0jOYjMUVZ5r)6#U3y0tngqVl=! zpzZViq_@X_%V0a@lJF7qCq^11&LjiA#adM}GrP-$(sA6$5}^`|9-DF9&2V-}DYkXP z-<raFPB%M`lfNqb1NTrtj_t zEtq$vjV&C{V{{v+RXoij?ek0yTd|AM(XVb){y|}fEPe;$NDf6<+7brE%n?vgk^*NDXq0nRLoouKDHCT+z0CyZbkiu`Ks_gRgL) zn0P?UC9ag1L{%YmJKy&)<4%Oz&A0-fGfL~ivdKw9AD;?}7O7KaQhqe6$Gy5H@TywP z?KzdZe|8u_`!YXLy|IT&{Kx3a=Zc*BZq{r4J0O+1n=UFLvE20bl-V2IZW{DR_Iq^E z%cQ;^z8~82CKpv&>LTNGgHG>DSV3N!x6*ldF(^#r*L4Wz3v-s5sBfyEI%+p5XMCf} z@~m~Ljc)B^1 z(lDVNu{|RzGQR25n!?r5$bW%`O`cGt>fqiYbeR8|nWFWlY)p;4xT>q4m6yh>%V4KJ zd;u70Z%dCnJ$Fm<{DRgDmbp8Jp5ui!P-JQ;$Hjfa5^2Hv-rnAM$ktH$HNq!#2Z~=Z ziw=r7x@s1Mtr-0nr%`Q6mSU8pBedT{9`9>Qb#7Y4XJW5%zc@Zltr@zZ^xfS-@QKvMSX}b8+b`P}|L5WHyBy^Pxjg5pv0b9MAq~}z14~;p1{w{Ua zOg-5tcGA^}Ywy#Te#!a)#zd>$NKJ`P{32Ey#lb@p{LWhB*5;B+IY^$ia04%WY`SBC zGF+>=JeH`(g5S`iPCXVjo=}kG+eX?cadEY>K2LK~tCw=H^tK&T4RIVJp@Vn8W+;Y- zo8d)QX}Yt$4RR{^mOaL8*Mt)$pwo(B#v?AwOM$PSv-w%`ifF)ApJyv=JwcwV>1Z%D zj`dd|JF;*|%JgHdI1y*t_m-kW_hRvGG~!_dL{mH(M<8Gk#q}J1{XMd8KNi<%x4KVe z_;5d0%VXXnIb%kLsD5T?}U{pE0n zg$5V*47mBc?cFtPbQ$$z*Be4p@5)toUNN@lPgV~fk5=ct*8f$K6wI8wd4J;z-R(Z> zj)QN$Z3gg2$#B(;it@6o$-06jqZNZ!Tb$$9aaXS*e~=&AK|SKk5(`m%mt`zVhpJYnTD&=-qLtZ= zFDS=Ga%6y-8|>&Bl4hn(v?D{_q90zp>%`TN~yXIHv`{AcmCs8^tJBbQv~!0V^910 zD3&E0b?UfGRI6LV2g43Cmd4y8e#An0Xnk;k`tob0xiK96{xkm|+U1emYE)R%MnjxJ zhExB9=7KDvsfIimbJJ1(=ggO$VB^ZgZY(+)zmLt2j5SM-WSwZ`KHxfQ*=@BEQ_#qM zSzeMGkS^fQaH{Ol-{HV@IKS&XGrmN6%jnGhn=z6$YM>uSRY(8U%XTsLfUB_}a`0Jx z=rWBnW?yYev6#TpAI#0>AOEZTjeNOCqUW|Ca*&o5+0W#}x2q;g>qcMxqF6Y8COV}w zVO2t5Bh!cn&Da=%g{#vyxdPC|bHe9k-Nv7Yb?E&2wX*m5p{G45DheXuTA%LJg*A;+ z*33BSy8Vg9atp{4P1WIzQMk2QY(-77(vpG`O^!#qY7rNZdBQCz&YbWAt|4<_!8w6w=`x<*yFd=m5)hsWY(Z|w$&5s#ksX>YpMlt8?R>liH`ck zPYotF#BnNqnI{n4F5d043ht|%v_Cd0?vYm>_ZDmztK{foew8Wk+*in*?y&D`G~H)A&(;xN!^F&> z2`^1n)h7bIW+iG!mG5;6<9B2F`ilZV&QTYB5(k65J3HNRYuOea}p z`S}i5o(8LLxIokN>h0DF)aU85?l-^dbU#YHY#}zGowUgn68>_rwXe~b;bOfdtjM<> z8znAZ<@E7TtawkqH>F%^gKAkedM&ZoJ<`QHg;t}3#wt0~QG8>u|9Hk%EUa>*i@x+@ zG;VAc=|pfR|ElelhRlSUUMuoV>wp5`vn^U!NN5GzoT+f`igwJz2>`FUvv`7Q7CN2o zqjTIUfx(n-+w2KlX(@kOSCYC9)wf%r~=;M^{BO-!dj`BOu5Ih;Q{zTWF=&5a|KMG}uHJ*PzBzgO-hPGxL}uCt9g|Cus~)IL@94-UTX zAHKlzD=ebbTOp^sZB+x5Q9V1Bus+ zxNlQk^>i#+lyKBLJB>lKYjpCL?Uwgz=W;{lz&@CJ9{l%`=~bwQoX5x8Z89q3LM#@) zy!nEkzQ!}3U96;YF8DI=FvhKaeWbs!eh9WHIQ|prTwUQ`|6;vTzzdrTDYBVS6!ww! zcWZA&N|*2S&&wQoLi&T6SLbK80+Z;&xH|J8_<4xn)g9{u9#z{&q=27y8;#>flSfVo zjSe(lH~ii8`GR=;RK$4PhsX`_DbG1^=zLYryc$XrgT%%RgV?Hh>{{ zEwY>0RM#oS2JtLq=P9X0DPVe%HP_0Dy0qT;0KN-19tHm|p>%cu$R z=@Y0#-fv&>sMQO>Azk`)%N&Vf$b?mu6U}7t@)=f{B2$Qd(bdUN4^f1^?C`3Z2Iz?~ zsiwN*qf#958Hqm)4goFj-(7&Ltkv*LpLwgc(I3TR*g-!Wk!L2j=1{}89P}J7*!VU) zkSDD6tH^dd5d@BQxnU5U6IElea^9G~MUzQePoK{;*0mg>;wStJ8*6JXs^f0F;Dakr zy{WT0a!nd9Ie4abK1^*tu$rl|7`KPd_o79x&h7xaGSR%j6Oe2_coWr6WyWqWCLWwLJKW7plwmtXv(OaW}lkA2%ykX~FO0Gl6q8xGNe^cLsqS6X(Wg z9{K8Pn>FE4mbYjRNpf}hiW8x%=9*Sp8}}SHbbo>C3$O0ZOgdB>o{)drA*>VoI0;qcKseMMX!xrOAABTpM z|M`CLF!!|;lL=LGx61>uT4(R5O*i_IG2sJe&PW$dvv{X5A(*zk>ilu`)I?kKrVI#! zjVwU!3X2so)0!G-Cg)fQWsZEiGc@erM{V-HJYJIz`Z>i=FWzXp&Z^0PYI>T#b`(Y% zd34fhW*#$-oI*#!$8D?hxO{a*4^2d4RkSMX19ppZFuvfkG#4qGx@Hcd=Jh&v(t-;rN04{U6`db-#soGq`icovL$m4f)>zpDl zRbiqsS?7bl_1*_=RMJ|!Ar8Qvy-$Gk|eK3Z|lwvmdRKOieNCC6a>|4dG8Re!x7Qk1$ zc>O#?DFlY2#Vs^>K>lakR?|R0qHVAaRmD}o}fQ$J3O^j|E1AbDuOUbWKd-tkJ?630np5{S{G zWk`rr<{Iu1t&Z}Wk3Wlasokbp=0th6^>@_nGMJt5hoS4@a~P?X{n|gINO;C?H+A>uLFP$?D_II1qfy8s@u3C2+QoZ)EQm$u8RjCWA1}Aoc9M!+vXY{ zHlD&mi3E6b$9+d4OJz$v74l3Pt4Fto^XpqT;QhNWH)apIZz-gGKB6WHYpPMZ)xyC{ zZm@4ugKgH8j~&N;{*EMqn41vZc|r4V>PzZ-JcrV}di~xRa}Dl0^ohB;qEp&&IIXSo zC!5dAy$)%^!h=fOMBFepFpPvEGP1{x!Hc}Hb96~|7XZ8kw%1s2H_RX_?~U; zR>|OAB7;nuUxRlu>OEU5va)4bUh6j2eqe$as@|3lW5yVR0r23t2kRFd^2U)A^A;opZk2MVR5{Y{GZDWQ= zDQdudFO;KdL%+K)<(`XhaP**WDkh;#=(XZ~G>D4eGu>{NgoL`u4u7oX27Bg1qs#MR^!)OQXFY zDWZ2R-@LQx>=Q-30sDhcmG?oM!^?e!X%hnjg7Ff`bdc8>B?m24Ekv;{)3GiCt0x~{ z1UqIqKL-iXJ?alz{?;`o@a3f*pStIbD*}G+NpTxWK6hu#7O;;Nf-5EO53-0KtDUda z(=7YtOUdnEg)tHiyZW9TyIo>Dd$DA`_E~_N2s?EAJqOFu^Z@w;gL8MBOs;Dgvfq^N z?DQyGoKzYLum03D)Bl2YXxpVORKR+*!&K%RRdFRsZz`ycUwVL0y64rdd{u(;@Q4@AcvhGq@8+ z1u25STw}vb&&AkI1nJTm;>zQbBHL%C~|$BzwDD6;-eqanIu_%9`(ofaa%-aoUu-op^*(w0&DDh(Xys!nywF zNZ+rz&(wQbs@4Re$6U3&4q<{XTpPwl?6~#ceEs4}W7epUbCcoy!{Na9)N?J>EyeYp9jl*1@Hx2JkcUm_z^Xk+Fox|E#Lo6Q4qZ-9!*Ls#G(x9IwH7C@W z@>V43fj;QD!=E z0Wb}wsF!<0mOAc!e$Yq8PZJ1xl?J)V!+q_4olcO(9LJt~HeeA;()DocMsjXRo1`%E z+cv%<=!v<|77~xG|N3nH&o)vqz~-UDirR@|Ff$2KomnC!}S3#9BKL}GJp zhE0%0*-Z_G-4*@?P?)bsTp*4&r_0w{$IJmX#TuZgjjVj2otf>SlYT$olN#694IYnX zIIW(~Fu6U%!qt9`5_=JlK1Z$Lt@NtarJl<`UhPP}lX+{u2+jTC9-kNhueZF!vny&|3U7XQBZ))cSCJ0dKVE6P`GM`0N)3&xsEiKDWgk7?5{FT zANqO-#Bv@W-2hS!z-?xa?l4|WC_C^z+uk00JRpVC#)TYy`@i^l>$oVp?S1$LBm_YW zP+m?Bo|<>HR(m)&7;Bw~X3sbxMFmIdHwXs@ z6CcO`b`&t$rl#Iy)gXN5n8a2Pk`s1uYqPysBpD~L#>*JRXVX5u%`=nATd)W`pOcPa z{k?{hRT6Zvq^D_(te8AX2;Aig=a+t5tvozDuuMMPbwEA?S54<6NtKj zS{HFObr{7c5fP(Xr7E$WyW1s2pApO>zCbo3Dx=*0ki~%ioo`SJl*@}sm#U0Pg;$ZA z7l&&11DKl3Mi>#r>(SvW4@vk0VObUz7k|Uru6^&~0M4RDspZ4X?ajPtL!t*=e!JKOh-J-A zPj_z3lp7Ewy6VT=-L&%EPkB9$1f~AM%$*Q1q5wpM2?zCJ8ja(3Ii`$CP2T(EP!T`% z+}kZ*519k*;$1z)s(k?sd0A?KaNhEu+Ja%RQV?Yfx4s5Gq5isryOsw2fTA3QKf||r zO6|yD7#!db=R3Cdw?DmKZ{iRg#<%YRc^94GP?{XolUaBVWA%aF+e>o1oIsXaf#HTD z{Fx9AE`gT`5}q@hMw|jK;ZmOahd(iUlnh_&WFSYrfxb!ic;~k;q4fIP#SCGxPAS9d zdCrA{wv7i2R4u^;?|7BywK(CrQT^JR?WIPRkqXb*3kf1s6?YF}DNGz|P&#Xwh0nQ% zMC3azW6Lk}fr?oAa4GAO1WVztU0ER0ak$L2P!fhErf1Z;QwN!f$W^n}mANpVO>&-o z994TH5-nOwUGq7eMXjH5-*mN`O1 z9gj0QRpGg{wzl>~lY>4{&V(jJ3S&pXWPZ`9im1+^56*Dkt&92aed`fWb}Sn;7M-UN zra!@`0yIBQ>h4zktX^2lKz}O_WUA}^X*EL><3?b9HvL)%rxHVCU9)}sAabrKEvVWA zmDR%PjmDFAqDFwu$wlSYIu@AIN*Wh}vwS(p{xR2YNFM0Qujv_3j_U)LIiri08B}~8 z&~&xVPu5eC0*CbhjZosDE)I%qQkv9t0ex$*;t&k-$m~1Yh%Sdu zk6QUx&tk`EOFto(kN;d6%EgKtiz)u0%z9NuvZV`h>8N*JGkg})Q> z@zxB{n&6#;4^Oj}z|h?Igw!@q+cneB;hviJ-zq3AFTYgvz~|X+gP1Rs! zE0=@MidZ$#lf+6Izn|}w?=t3Sob1HDD(kavUN!sg5tCqX67}D9cXwfEz)1awB@00X z3XR_{wSv?TnoJ;xjO*(hm-~o1gD&pB7%@F+Q@?o0UMQBSg6-$Emn1%TGbxZMyfZEv zeo*w4CT1GJsxK_6G`vteLd*dS^am>&8yi1=s%<83-UFbDMvwXKAFARJAn_w{50VT| zH1q?h(iv(LkpYtX#jpn6FnQ5WqiunXZP6fs%&4ECkwy@aVWq_k6IC|{NJ9dC3%!(q zQT|)XQx!yay!~J5{U6Cm#DRQNqxQ7o+H~Z}=Z5h>j{|C&?70Iwe-veZv;l-RXmK3C zfb@C2^hYHnRSjh_V77`=!i%VKM-RX<0E^mZ|9^A;L5tdV1qlJ66MECAb^tyEuY@XO z=KuZf=e#F0hpQq29Aj(CTEATyhD#PEUm2xf#Rjh!NK9e_!l5`g4?O@CiXxN0-XdLbM)Tk@|7y0suRC< zcRV7^!ImE{W393-EiMM;eb*TNAW9r2u|I?*G6CfW@Hc^^2dk`x25GNbUmO9X9V}DL zLA*rNfw(aE0v1dKK|#PIu?#*Jg^|$1;vu5I9=R|uIzEp2P52da**{hLDG!mIoEimt z3|6Q$cdjd0CD7W*><6*VAA)PMOPL@)6>eFkN?P8AbpSa&0POAq5*tf+ISMyXDSVzz z1`I9)Fl(p&h(Ex}By*Et9tt5eT>Dt+#5pe)$?&@oaT6NgMTh*>viJyr_Aic)h`j2X z%$Ac+w%@!<>~|1aq2zYBSys?jG+GjrHvZMxpiuJxD!2rY^C2!}k@UK^x9d5Vi);t{ zXFTC0*1tAFl4rnQLJUDn<8y8U7|!y}e3)UKMAAR3*}u|B=mNI32F>2u*OA>6!t3+;0kA9oa^rFGpt;Oc;T~zRzTf!mJHKVkwwd~KX zP9RuCz&#*w4i?_p-X3y`or_{z%$I7fzWxhXjrx>~xGB;2cBo=3?8O;~E$q)TpZe%y zdPwx6%c2YehS(;*>-xeSQKhb*OG`^rQx?bONjf8kBY4PDPIG1b0L5ug13d4qor*Xe zBTh$}kmkllxv=KU^5SBOEXEDu)c}!QSO%&>IKvm@dlPv4>3!K9e#G5J8!a;K#5TXL zK8c8wF;9YPmWK(e{-D3gcwm^D!kNa0asq=1oXfinKD{g@k|eGKD^KleDk&*hJGAHZ zD^8EsmEs^Z$@4>9ZzLoo!L5XOo?$r-_sdD+aCxd4rfpulWz4%BFT(+%s&o5ad%AM? z_!YdiEP{>)5M1_%Acd!hX+0d9La zUEj?t^ISx2E@Jnkus(4NH*6*k4s|>Ove0Z5n0sO^B7C`|=dF0BM=h&$zHS%cR7$$O zL+z`8i3(>>W7`jlboedPw{f4FhfCY^Ic&a*5Z$62cK$eW>wVVCj_CzZ#o~xR3#fw< z?TdW|lXi{t{)KZS8Q|Gw?}#QOBpCPnAp1V<6MnR`07&qcII2#TRaDr0K#x@tgymxk zeDnw)_`N~cZlv(giqPYi(uV7t4S?WjU(TA{t#R|j8 z2S$VHW%kJ89;wb4L4y{X#!*_Uu8u;IzPGZn@Wup`Zo-@ep{I3BArl(;V=$+VI93@` zUbGZs+|TW%b_{#O_(u z%U%r8*x20_I!TUsr<}a9vjc@l5%sEmSS%?wD+b|FU1eBwEuRkpbGqEX?<7%`a{OKD z(}|Hlmo2;uPDhkHFlF~~B_alDoa@PgU4|NYK@3#cHkPmfCJ>*?h)5lgMih+iVZeYhnd5ew5BQwU-L z0OH_(3rUEOLKl;MGPJE)%L+qiV|eqM!FiA?&PkpR2@oZhBAvk?*_w}00rOqL1Vga6 zlZT*JWW3osQYGlDcLU~JWE?>or~^w_lpJ|V*RmBmC~rdcudwVqpec8gB>p~l=uXd@$lNjkb5o6dT-#w<{832o5u(x=^PW0FmY#ek18wpT+X5@y;hJ zwe4;%p|E0nLwRml;8Tr-u%ME``G&6M{u#5@e_$LO5sSq?GI z>)f>wVmv@rG9QE|A2n(Qtt5h^fKW*Ri^ME}ea&Yg>AraUK<*WqFLdHSb`Q4C_;}#q z9=ve9ub1Y;jvshXJJ#}OW>N?r0Tb2BA2DpMo70`BCvt5c+kZs<=plCC)n7L#WG2%?lWUP z_+#wRAYQ@y1CX;*8hn{dYALdI49EY%-xZ(lI&_==1=lpjEV;_5Eu<08f3VB)wa!-w zc8u8wb(#O8H9!ZQr*)@tZJyjJ2?(23XJrqX$lWJ1>E~=s?$9`fi{AeA(=tzco2bc4 z3MK&k6=;q}$%si8P}jul2Ft#l zeDVFq)#x6Yyxye@Y*uA%zbz-`C>v&CYBcX;Mk@jnzn()kao3&hVVeAevIvIrc?`Ty z{mttS{QZUuj9Vwup4~Q1V>mb$>`#(-e`J6#k47IVC3nFR*}V7Fg%!a)f8Q3>XgwD_ zLJM!@XAf$U`CGV6$RpA5%mumoHWc7$E zij<~plJIGf9PAT3#=_co_HO)=8<$g*bGHcM({f&Ny;6CSNVlm%=?j+uZ5<=UtVLIl z)1mVTFf|)^HBZ#1k{qT|?f}us9Z_2`5aw@xh4lanx^w)9JMnlp)sU>bZxD8qpLZx0de1(zG4qb^n0O{MJ~1PEG7I(ERPhP@qJ_r@=qR zo-_3wG#;*Y6VG(&?Z6#U*81Z(5u4liZ+FF+1Lry>^%>Ho1(`!lZ~Ijn?ODGoae7kI z+>np-`9WzGlHgWOME98!61<OX&XXUXN8l?=PWHbI3 zaE~qPEVa1njc!k5JoV3=KKc|ut1{Zw{RaYTC$}=G=^0_-)sFFP3Qnv?U-5f144Sn}w=oc0U5 zYYd9#`2A3ms+)tt#{SZ*NI zWL+H@{nLU#^!d{km@3BNJb1 zyY1fho$i5ISFy!S0@EIV$yYlUqyICCAmR%4V~ zgDo6jy;!tWs$a;&lPT73+o;*fp^h^Z-NNVhbn@K8osnFMkY;~NKBG%oIM2e+{Mq%* zvcjPNk6egor1IXsou>(`jrdMH3AgT_GvX%R9$af$?mJ$8%*B%bg@D)4C#$1!FA>$? zI1-)6a)nYmKWCDcqnWx0nDJLT~ z+fX9DxOaKfP2X*-80_DqXS1H#*u&WcQ;TrEnJ3y*nGMVC8H2^F?YKON4;2@>ZFz~+ zvX`fEB{y4aBlRo!K3vG~cX zwU!Z5#;uu%V|m*r>W6#VtauF5ob{!PcR5vD%%gNM>R~SeT9`e}W4f2$az4CRM<1)R zs~6dYZ-H?+y?dPZpqt}tZ05Fkd^*c$1jcpp?k8J&!^oj>Bdst-+f2sQ0ORIT1G_Y1 zbqA_ePtEAH_IMr4Ic{NQWfya!rBlyy7T(k}xlA^d7)9&Q;)Zf$j%jW`o1|~kFLH3p z8MXK2_qJlY-CjECc>VJoQBQC0q95GIKf&pww02{ejFP;eF^=bxN$->mM;AZ6%~qlQ zVWWn}AJ1N<;M%vMK>1mG=z!wM!YAv$hudV@v8ys5n_1AG_0y&E=j9V2ZjtFk{x+ ze5w9;2d82Qonp0xWB_(jBQEDzc_8y;+lfxv1fR%In+oC4RSp=nrSS}Ue}TyGIYnfIpw?!=5EG>W?q2vIi^p7qYj*VX}{E{)1YyB`Z%sy@BqHSwpPnJ z&G%Pnr9-Zz+IG-8x4H7|Gn83@e)LZw`%`*n0b78?#pf)|dG>IH(q%mfi>nmGQUR*yu934(s-} zYHe>u?sqm<)3w$65l*j?v>`>uQ8CgFKIcSv#`{&x?}ymZJFn+|Q8RB{ETMYr>SQAwss3PCz}{ajfX4Qio>$Jj zV@308BD$;1^>W(66S}SsGajgOGM?+dz(ZbW*SO4~tMtNnrp4(Tw(Hi;pazraQi|7P z=u*dLZ>ytwO9f>e<67OA$#>SjD)-j+yu9H_*btp{!@X5Q!t0%_+)bO83Q@Ofiej9$ z!7!8edz8S7HFPR+!5e1P%XKt|xIea^u?UUuytFRYlI#4slXBgkibuX$x>x_}c&kC@ z2Fo?mdeQYYZ;Q-%mP#-2c6TlvrM5#hzI!X)yB*B3g(ddunFkxITUJwS11~#Q7R0Y0 z>UGnuy_YOC*qvO=MP^l*IJe6NM87cx_;*s4l#_N`te;+-?Y^t8c}1v_Ov=)XpOsbCwJt(~=YyMKXU4pID?%75}aASq1zCCS%eP$Mhx*`%glkJznqG1VWRC7%4EFOKvS_tudOkiK_@y4hqT8|4lCqhu?hxQN zo?|ssF>Fe0rw>ZY7Az*=o&^g&JSsH z&8~vt=n^d%qP>(Ua|8cU*2OKHNzc>MlP+{Z?)9gZr0Wa3--6{1?ln6X!{H1!l{k;>4PuRN+GrGaD(0pZX=|=tc(0dNS}l;ywvv+>l!p~Lty|``Ib2-z zhUZxW-JTh}_a&;kdCg{yR`ueVJgKS;rM$x$weKfAsls@}*&baxskX)aWuve7&3!+i zzEJhqaw@X?oYDExuVt*G9|vsiivx zL03{aBAiQFn48*OeR9WmFK-^fVi*@2=_=J7N0^lI1ifySwP$l}3H)fG9#@(O4(-a& zb1$-}k#45HYB$uzMN%0NbByTft=pf^oc$K#qSI{ZYON)Bgyzdz;{b^_Mm&{?JHI%} z)T!zjHFuSYE>Lq;WL`7t5DIx1DM##phwzh0-6<7XgD7b0Ql~|(b1xhZw{3XE8E9;K z=HB!D#i?HD6A`>lXM#p-*&eOYGJW=ZgvQ2LDmxX;o}ITDvJCr;Oz~TlX?Y1N16S~^ zN8IvF(f3r1f+$FoF@6HH~g}hwxNw~V>4Gm7Iw9Q6* z4=0Z8x3)9zE;|EX`P?(n7Cv+uo$hJDSG2im^#Y~H)o$2I6{rrY=Fb-1z1o9&+}1+VK;bMId} z#9>o2G*M`LH2=eL82h#Gd%8PMpL?el4y3CW1R-UG^9R3L36xe=hK^M8IT&!SjQ)HO zbU{*3yYlF@+)2f~-21aPr@|JdZ=|mPKWh0~$?YDaMSTet6Q=l;1@M5#r~nAAAVfxi z8(iJY5HIIf*GavBWsA1Ra%#r{az{`K@$$xssz#g5xRe>Y58vNvs%}v8v0Y5g8uChZ z%3QV27Vv?tjS}&P(oE0c@i0~gL-L zGP4$uhp3y$Pp%ljKO?Rd6pfi+Xn77Eqk2rO2DQWSWl_eh2L2b8!3U1zMcuJKJxwgv z+pE42q8bKB2UI$KXD)y>W;BV6nhA=z=amI6=8ea76|NU^HqjsCEe^%79J;;ja;y2s zJx@<=?{67GJoc`~h(fxp1O`;}cjnAKJ0^dE_I1&kjKQ+T_iypPIJ$nl=W5n5VYfTi z=;o${PpheJ&YqB!yRed|NOr?+3^lPx$EZ0%rZ#ek*V_xgF zXHGBDavba`CL9>`Sdw>ngW`D(x=sOHIYBhBncWD{_S>Wd-L$~Bl)LIZHh>1{ty$Q6x>?u+ogqw9W0&~mcZ#ndRGEFBQ2&c&X%qO|mWo(l$-@-*V!$jWlf zUbW5eNb|x6YpvI(e+e|B`x(qAHzlU-KJmdWHal1Lv+7rc4_I>v_;WAUJgr3ao40dM z!(CUd4f;aqDD?&1z^X%(A#?JOp0y(nZNyKizMkVAy?^n%#bfW z_M_+iqy1+&k1)Er9!h-U98NyTqF@-MnP1k@Gf77t89pyMqnHuk&DD`mA9ASkl)SCP_W963gGblyWdJlnbTTbeYrgU$JkxHGhduc$-nCzdnsc+~%mezG zq#{q7JCpqo`~y;g{hqsU?4Rwz2Nxl`sNBaM-VA?ic-@$_ZN%5)ET}M5q>`x#{j#Z< zuD`f!!?~`@?rKoN;~=t&D;e*(cI%CCebCdgnhP(7_4l1Ebo}S}_KF*;ZqxZ9?Ryut zPj9bv6tHZ)wTU$+(`n9XQ3y9QvC;L^29;Qp>BBu}SEn5%MH{F-D|^-a*K}VsV22T& zrpugXZo9uxE9p2#VX?XRkpJ{F>(9rtnGG7I8^I3jFAOowpBJ-icy+dgm2cQf#ipOo z=h(Zqz-@7YxxgSRxt*x$I}}xH0})j-xnZ@+k&!ETs~Ow)OWUS=$Gf+ktaW>4%xd?C zJ4tQjuFFPQKBQtU*k36*9zQu0GRACD?EsM=V4Tag{VkO*a)os{Eqm&>e3~UkmT9`K zk!s=r1{S|uc<)T>2w$+1TFF+^u-!Uv7DciXm&BS%o{ra9E)=&;N0d{PusnLRw$sJ8 zl)X&JAyz>VS5&Z{T-t?Mv?6-P+OVX0d0XR~@%SD21zNfaiIUXBzHW`1SWdr#GAsqT zeNsXSR+A?p4keRa&bq;OQY~remSd&4V_>^}-`o|g6J~9awF9LT+1INdp4FZS0Q`4Z zp1-1CS?n-5NlzG!sYacn%^7oYHcltb-bvFl0k!YL)~`O^YMAT1yV$NT-E8(r6n{PL>hUNx-Pw8I=a1D z`ng5&VXiAuGxGjS0%Z!U9^rUTEbe47Rys+a?gJhRzbzC%Hh(Nfm+@9p^aq;%>xE40Cu#ovF zF~lG`H<*+xvOa00McT>5CO)0P`??`y72;%ZjI}zsLWE=?voD@ofSoN^aKDGH;TtEQ=Zh}kB zhZ;g`wzzsXxF{|n)|w}JU?kd!wobGy*ld53NFSpJIS0O|5Ur#jSQ98pZEkAUzzW#^ zs1k~=+>Jy(I-p7k_sDaYCXCJ9%j>IijdX5)@t_(1IRjSv*AWpJP7U}aBfjEobH3wH z&j_p8=kAjAqq4r5cTxT1n?CvNktH{b>yt%|kbSqp2qkFMVJMN@y&KapZA%ViBB;y5 zB?d!?M=L>z0t6>#=YrR3P@8?%^*w~4qlGRZR*_whqo#KGb%~(DYtV>}l##F{^m_LWn@13e>wyngFQI=NDd(c{jKB&-?NFEw?!J{mo?@XcV zj-)gE|A}DPlIF$xC2PT8y)-MBnHN;T%&?e-N1JR4rNee{cthozXL;jlN{LA>#Gdr{ z#(y&78WTm-H%K1^7y_t{J5k8cG-^DPx;a5;j@<3&;W6k!fy}i4?L3SP=o+7Cd|W)C z!IYPmi)ef&@4P&iE|qvcR^=Qy($Me6g1%Fgydqs1+np>~1B_B!rLxCCQex;m3;;PigT^?`|`gx8qR)sJi z;dD?MwC3SwTjB z(9?WjWPZ20;pyW%p2sR?Y4VBEl|@WIWnj3-gCMH`l;luBh%4mlSqJ73(89jofhpxu z`chQt$I{X>w&qlJ3=}NqIBUnU+n>1EDFnnpZr8#cCCnU}mrP4ZH*xr!Nn9C)HN0kj zKmKP@evrk%C&f-Q{+;!E@CVIQN4(;dEus7T1c~55pW#g)BEic2{mIJAT#Lj(!_e`*6x%0_LMW8+{5S6^w{Wmn3Qx^hn(`|a;Bl_USD^GFfLuS(sj8PD@95c zh={4@#m=wZg<1&u7=FshM_L$R81F$5vAn##S2vid@W*KduRWa2xPqWsLoxNPmd~!=@1S?mKfx7^h@s)5>~In+E#;{#;#mAZ^F4pSJSd>}+C> zL`d9$AOy-(FeY1j`oyYGK!=h%@OWe9zm53h9X?DmbLS&*sHdFUYje>%#@N zlHh`l!Y8nba6W}{iK|exPwI64lC-j~FSYY}Z@SZuA3qY&wTm_gHx`Xw_;t2Klw1&M zvQVVmjgQqH+55)>TX6p~u?K~9$v2tLH#W)J1LOQioSmK3_tpfY@?ig&oe9q!Mx33@ zw07QJw>$B6Uc*)5z*FRc9^VNCxO+}cU21ADJ<-ZK{``1^5*7_qzOt%GKryF(|AxMn zNLS@&W|_yU%qDBKAvF7Z;R19oEw(l`tVC#h3}r&qyp2iW5*jnS>9qdMeDNGrypl1u zQl5p}l}L~zj19C0?t{fmX>kH-Y2wTW$^2kWN8|V~G@9+{p|Y=k{4(P|0VUzXR5_`# z_YYj$VT`)?Fl$ltP4O4M0J^?{kwF@tf|@Ry$bBRi(A z-M!yh{`IG`207#MiHBw|m`q$Netw=?+?lBJRp+rDZ)M@uuF|Sz^l7tK4$K2LK;RpE zZ~clvDU1y@sE{%2m+$(?H;b8o8o(G13UB}k3Q5$B%)3BD{)J9PYyECP%6Mha>w(Mi z4(_B^X!`tEB)x?SDNY6Ja?y{`Gz|~OlMnb5`fp}fh`st99smWUhsNh-K~PBhC~cxN zwM#cN7n4W;MH^-mrD0@8@Z}D#UeBeX^zUESXSu-X@y(ODsESAC@v%)@Ytf zLwdh+a4xgG6t z^PmWMlzRp)$$9455aY)ya|k;{vOQ|PIQnp6Dkp3> zi@_8}q572>CB^(M{$CRVe5J3vvXb!&bJ>zxdJ~Zc`}AmZ-85g#&g74Sq|UzY{FSH1 z$4w;r$x84KXt)($&aZq!l=^iGHO$~#4R9`qNv5V6;{ueNbhN3sE$s4U; zt2`SB0Up#Qog$wL2Qmpw(2I*e;J5}9fz`XVa$qH>-R|A9fh!&lmPG|hBlBdnC;rAT zQ=;T3Z0ltT+wG#=h=GO@H}JY^-X-IWRM2l9O)b*eyxQhW)4mu36W|Si#{h?5B`OQR zdBGpfw>vs|5*BpZ^a@If@hOG#yz8iTHlBy9*9zX}baSqrLgpyG1 zNAE&SIH3btzz@G5^&zG&BJnw5VT&>weP-^KqwPiEa;jD1E(osX|2Rd|gaW~1p4JEd z`gbt&A94jEkT|kPfYknT&~MTbVP)r|2c3qS1x9e`za|r?5r`@@B7OagI}k|0`UdC+ z61qaulL~)Ao+N_h#uF@eArq+$?M-SZUX?wgZTju77hs}v8KD3fSYB`;E=RLT-_p?d z{viteW)k@vfw#d8P1?=~Q|Usbmr3p)?caQ#Ti@5W;61`p1&7lkVq_0t*_t=7R$Axi zC>iTKxwCK-D9s8!K6s&oj!D9AKMQ~vh;XU;3LJoF0lpWC5Z*aGq7cbxs6{hNg|aVY^hz0-Up4g6tG?qEw|a2U z|9JqwY&_FOLb23jFjp&}GE({aX)bE!7nofY!_35)Eve2QU37bpPH2m-8YPy8GRjcP z)-f1z|Ki~mIy1xOFDJDk(=MqxX0e6egJ>ZTD$m%DneD3CTW;dO0*X^)A6#(L_&Zpz z3s_|(*hP8tQTx7tV}TfSd!*mS1$52tsV2;hX;5SVfNZnrOAH{?;QZ7LjL9h(D>L&? z_an|wL-$NKap1L+ioK*miE#+>9@f+OPGAc%9Jp;=50AGH5wS{A69mCZtS~%FxZw+; z`pDbo{XdhM*R-0`fz?0m)chu}i~Y|e`Fb z(bTFRK2|TE9ABu{rkZt8^m;3n9<-JY43W#rP%=QD;YL=k$Gv+$rH;d_{XI3O&!*pe z>S^vCBWVYmiUbc%8$I!*N*;To$&-bvusxq`WB8KCAdxC63*w?z2!m*>2xyTvfL$vJ z#K10VFJdxAw6KuEXH7H7l7hogC_TyUZYE2b% z#3)y&f1{L1?)9?(I%2R~Q@|@#2!rHx<8f+rn^y`8^49;}JNP$*dCtx~ChLbkzpwEo zqS2l^Pu+~PYyWGSvYR{DHb1!>z zu;f+An;coLZE(Zb{Ik?q$LvpL{Lh>^DZw|4&CkQWVBl(kf~fk*sut=F_^JM2EiNk? zT;&>oU;!M(SsR?QbHw&tlfF47T$71NBs^8|&z_fg1qgN@1iOb}YGA2nFxi&_KBnEl~NbrMX|DBQf!gAo6&sW;33(8PuVpwia?6J zFQ14S&lF|L2m?g0(tKo79RjHnTpI7yJ>RCrB zq09}O1dvI2_LEonKC~vMp2C6UPi^vXn<*6%KUI5mH{7s}6_mr(@SEdCHCW zjvvi8eA@L?m*iCMK!e#sCsv?j5)$iA-|w(tkif*Q?|SB8YJi2{ap_#dftpQ7nv%ONFp^`QtIi~PwO3D1C-XqJe5KVm z1e0r!#qoP&v|5u+&;%-c7llv^1)70UaPPJN!|=!mgpUns4}bx6!Fm+9(gF-X^-~>V zU4WfwvTCH+09XkKpsShrjs(85NF~w7;4~Px4wF<>$0#QQ2YknCWCy~J5Pg(fRylum zl;wQ7p6Z8d@o#BieSy$B|uxtZJr;Z}s}!7DWI;}byJ1;iL}F(>eSq2Bg8KmZc> z6B}RKR7m0!n~2C~$H!kOw@brH>yhU}{t|Ds zvU*`3i|f;{N{<+6QT+{ifQkBl8QR?7Sbj&S@&%Z)zd>V0Hih3qkwOTBq~b{u!HHCTX}~)Grcjo9A@!_HBFjTPS_JSJ z0VTi#A*c}lGbHUq*@AR5qyCWV29G$zONCGoBLsX5rLKe80OdWSu!;be@-{f&aa&5E zCW*|u-rk1UlPzRL!{ugq;9XW%3l#HOIgVe}DKg!R#z$U=HX7!M$da{dynop&>tmrD zQQ@TTE(W%>kqK|-sR-2lm;0|4u%wS^kqlo`K5 z*aRe6qr+DGFW?3~S>-Q0G>*d6-5)NBCp}nSdO2=sq~9W(4c0g(Nst&%!u9A6h9Q-T z_!4f8a0Lj|3=ABnwJIiCP6pWcy~`AdD-1iQU3S({b*huj>~v~pBmJUQ#gZ+r{A5~A z&*s9n7!ri^4b$8snD;*VAd?eJCoMcPjqK{Wye}I>w}j)fJo0Wku-3S`0>IyemxIC! z2I8L#Ivo|?P~4PrxWR|9)6Xl}+=%E>?pGhI-EYB|&}LE69elj1l+5Wu)@^tFz7nQ9 zLFTZx{*UGwN`h{##MGYCei4%w7ZpCp6*YY{>~U`VL}~hKn+mnu^8qQ2S36<0w8#qy zF^ZOUp5$_UQeu!fF-b|uBj1ck<<~sjeZ6VEn15rU_w(@*0Sap^B>VQ*%XBp0L*h6@ z$bTG!+q5{qrBV;E;^gMbIIVfsUYlqyKu>$7Dr*-?&J)Ay6I@w0o=P?PdWjvA&S_11 z_}sYe7|T*8=|!!a&p_MMd=TGuQcFa?Xql4B9#a#-l9Er--tWk)A{23cFo#PeaGFbH z#a{f*279LXoyq5k5LM`XmfEz4y4^4_(SD)VEi-Ur=Kj$lJ=!%dSV!Fg@yd{00FHo$ zNl1IAJtQuHrFV)A*Q4L?rrgiwK_NyUTGo8M z^*G0(b?Fz_bFRhW54hUW;JjnWGTXSTHAlB_*zI5F2SA6-U%wv5e}+QJkR#xD)HQn# zxLoc8%i=AxVhHZzi1I4$Pi|@Y24cO00Dm^t-|<~fhKTiE|uny~x1eISZky6Nx>z=kQSYl@9X8f)t!I&LZwQ`E zzFTG8)aK2x8uqwFSKIPT?1P9lx4!rNlM8PZ-(7EVq&S%|Cu}kj(P~B+7^Iml6Lq+^ zQQJ<0U4W}7u1!tr#Es0K*9z5C3gfdq6vg_4+(s!%;ty~~D<^{tJ$1V8 z7JlXINxK`q)Iu&2J&&oM=P%#ZlEfPi7kQYzyIws`oFmoRV3yjVKi&}OBxLcxw!mR- z{;hHK37psooz8!GQdRrEt#3gk4Z|Wr{*+Cfr6?n@=I2)kKO2fP8Y(Z0CA!7FxzR4& z-0gd-|G+1WgkI%jwLuHG>KA8LQl@J+TPCilNAK}8y&G`5LCRPsPgxVf$z>Cgd!v;z zoG<45&J7VuV&tspUr7x34b^HYlwbY4z(FYOW4@}u@dG?j{=WvNjW%`EuWg+TzeqHl zUT-g5o#gs_i8fR#>F2e~028`*^PKrZ8`s}YDk6wE>ei~7!D%iKPyoycSXwGB0+L06 za*tIW{6mCwZ#7(jKKxqdgZr0TnB-rO&_zXBznPpYKhK**g3Lj4qIChVcgbUa6IGlU znl@s&ppi%W^PW=AD2|kt^bV%=;p3k}92sUln$iX8cP?wNNXK_jVKIO-oDqvVz0<;? z)^}GK=?nO)O&W!bGk&MeK6y9AGt~KR8f0vEFC^wHxlDT$ivD^j%=5zZ5WPfJTt9+& zzWnHB{mAUjy;;sT?o4W~Y!D;4oJ5fiG(Y?w>bpYNi&~y+hXyOWnLEBq9KTfT*h+}< zl-R@XKc?zoZ$Z_gV!tG?8|0Znmxg>2)g~*7h3g9nr)Fm zMcVDwgI98Ur6(^5;Gv>yB}Rs9Ou{BS^N=qk65X)&rS!TjhFEDu-_;h*%eA|DPPy-> zqKAE4hIWyCn|pe2n?`I(kVk!8K-|W!Qjsm^0G4sN?99nx)67XA`yTb$2eeO)lO!qj zaup>R#xU9!Z zHiM2Zp)}g#%nhH&X}dbq0Hcw#@P?(}i$tT{GK;I74T`ChCiMaLWpB0jqYi*r`&$L& za#5}jyT`d5V#EV>6C)5}X29M2QNG2+XuXnDbPH^b;B%)>F4b)J4?x{nU(Vn>G&+gu zzX0ZGc8z*bgGU~iL9h=7ylwILUv2CP{0zZ0{yR`)7kWv(1sS?~BktW2WuQLb52kED zS*qs-3p*K!m!mx_=%}ziyqV24cj^8yJxt#1><9~%`=C&09jX3xPMWi9yw9edE-4AdZpUmKJmRb? z2)tGnj-qjcrE_DV+>N)RpKFDi2r$&EUo0ILtHR$kQDQ42 zcH!bVi2#GcsLap~xWKtEH+*~P>Lf3w7x{@(CHM1Tc=LwUEojtMZ1e`F_0 zKN3a~sp39aTij0_ne}7&j%ff~8|r>1H%}Lt=H5E*fVV8q-T8F|KjvQ_!*d}ATpZ+8 z8*Ob8-iE49XQr-YD6YI8y6|AvRzI&F%lWYRM*q!L=DX&7jJ)zf)VBvC!ws#qTBpnI zRfj94Mo)g3>8>Y29z#X}85KGnXdGAh$3lb`BCe(bcNGVP_G>eNv1f^ozy!T@N;+UskY#mPP{ess^ zNc+7RzPf?xM#-ejd0o54Fh=_iQ>Fh9-X5@&H#W`!(Bfkn^)5C4w|s82+a|7?x4pYR zy78vOCrcn+PR0&jX3%KXcZnKR-=zJLNaQ2?^FO%|T46v{h#hEv03Tc(A+oN$Ct;5w zEh)4Av88`tDrExn*#LY>h zFvx#$l)tFp(|0hps|q1ECXuueVKx%9G5H>{`pxHN?;4hK~rMDJt|J}ABw7lf{ z62TOIzw_(o&o_jO0ze7EAfRj{N`OUXW@c7aP~POc7P31Cxe6qVd4)24`cPMU4{^Qe z_j*8e=w*nELJ30&yjIvPunKmQ`aLFoe($=8a9FgepOdl|w*cbiSKcJb-HtWVVQfM5 zuPkcl5ppQA{tFJn23-)s5KJ{%EQM@CZrsQ3u;|-gP8Pp}N7J>mh(9K=e^n$-C6 z(4@@&c9ufuMr4rpBUV0?22!M;OP7j!0gx9sFtsV)vKvy=Nrf#i5MgX+l_=Mm-0?m1 z_83E~o=eV$JopX%s#dg)()T@%^TXpjJ%_*F0jflkI5ghjlOz|ggCe$?pIKBt+4YFH z^FAc>xzu6hcajiH%;voXg2>uZ9#F%FK}3NksuSlEeY)~HQgbKc-ow0MQ($93@tJejZw3|%wYHz_Kf5p`mpWL76iyEP;0@`EMO2-fLaoXVa+Kw6Eb=qr|!SACUKuD z4RRK?W&8iNHgHNLS%I<=vK&8OS6AVFls-D z&qHPN322xT-=XC$1b6%>Iw%5fP)-Kng-EG??nMwURA^1eGyqwfKMPxf|9%=CIxBRl zt!(NE5hSzdwg3115m+Q}>>-#azv&lH`$2QMKVIW60R$%B|GbX5&%d#?>}&t(iT{+m zYM!ld@L5`Wi;xp)v2*h{^s3rWDqRni^%o^a)vizo5or8qD+W*=)C`~5I(ylOTodaM z0i+(NG2qX7$-y-sXK5Fe2>^Q#YMf#K4MU8`KMob*|6~DqwQ+uMx{b!|o*s3#2Z{Y0 z`_XG%eY|>1(i;ejOOaR13DqqAKgCICb0O9&^01$*O+py`rK+}zZ9u)r(&pv~`ulTt zL}6>Zh}d$_t;0Q6S)-VdT(AUkcB7zJqJ$f zCf;YC#h^@lfLN73L_jARjSML1!}6vkumNn+a^n25XuCnEwsDs!Y6}K~~XgHyGK3KWd<4 zUr};nW5VM@JPNm2cvTEM;=zLl31dn82mcj>r2+TrbsA2E1VXQzQ{sl#e~Sa2KtV`f zKrFV+Yh6!q6m2{b`0u( z3*5&U)*+5T55W`(C}rG5x0Htt{eYu;hz;Se0$ummg}gd*RYC^#*6sJ!>;+>tP=y=2 z0_-BY%u7ue;?DOlDFGuu|FVbo-VWs-+(Ul^Ui}BwM)L6>RNLD`cUq$s0w7;9 zwrzJsa?fcqV+>~%vw47fj}NLd5MS@DR_j^q3DLvyqeu3TlU{)2c8c$RSN?+QACdkH=WK7VTG9ffdL z1PW!M{{Jy4mb%lUa_9@`Ow42YLhSs6f? zfO-Ix!sxiLB8Zr9yfQE(30?|Ccu({b6UzUv1Vx=`LhAm$q`-wg*5YrC9{4!>(Ulx~ zNnBhpxvTIs67mEVBF>D=Pr7Ij68!&1-J8c#xpwixcN9rzPzs?^RE7qzm05B$hziM= zA{ny%!C7HrTY%B?s-kAb|Ar`nHm4eO+hv+aw6 zjuyVhl>9)$5XnqBG|cF3ag=_!`_l{L7xGg_IO%S3aM)?%nf+3lFl+q&Htnw&x$8EF zr9qiAf3#q6boBFp3)Jr-!Z4|Mz=G9wyVoVzY^iL02jRxT6JN<|^ppBH-=&ZAvsv8g zuOGG>DGN&*EYQu$C~(Mb=HR*H-sx73qyclcxsn_^F^Vq$K_bhiW6`Sw4_$>|;cIHx zHejmzPHYoin)l>US4X$v`}_NItMYHU4~ia5dlpg0X@5)GsKKG@*qmcTV|G@6)nzCf zeqMIvU_`|l$JJ|?_FZmKF)K?PyXe{*w`MC}pL9c(1K3!c^i8#nl4HRLR`5J>a+^)c z6x2Ryqs^)g`|!5<_YWZpe1aXpmowXmus6tt-lkH#K<9M1ez-XW%|ambKT}ugzF^qy zt93c%(}|Vl8#o>B^Z0XT?;$ka*Di6eovgmp4<~F~bnGNn$b5Wqv~>8IwBve=lm)8> z00pFm2QZft?^j4EjdEhP)E4q%CnB#mG$72d*a*p4D7^-FyPgX~^p`7@T`F#M#VvGk zJ)FO|+H_{(`o3*8_YVD;b#VHRpw#pc!bY;h^GDj7ZaxjoH|&$pl&I{Ay!S`4_Hd(4 z{%TCAQvqIziXcRU%zK%U*c;@Y{-~ng-C^8+GTHm>(dg1XT*2%D#Z|h$5q=9BJ>{p> zpjcqwZlySXxoa?1t2ocKGteO;vYd@|q@dg|IcFsAx$hQLWk);ZNT@^wsU98#aobfw zS2gXEwZ9Z9nzccDhlgH;DV?$uwG+wzs`nL2;%>cq^l9Gf$>xras~kW;^802WPO_Vg z^e!iMt6^BvX9oQVjoyCIpV1(<14wTSQdNiD9$lf6u!x zM)QQM&h?EmvX2w;K5T#F7|3%9s5>3Dz0i(gpWY70ur`|hNDz~<>{Nqg3RzABjSFG( zgFfgd#s!l`k%jD5GI(N+KEO)& z*dGj1WKy+My7^9-Y5p+f)3u0gX195nctv%IDK6JaBkZl>*Y&UIY-{%3mGjid%`<-% z9$nkFOCv5}bHiQe%$h?m|Da56GqGn94Pgu0_UMP=GffC4sLfN^@3sM48aU0Ck6Ipf zSSIK<(Rz3g0^|UD_r))g*_7)B3|0DfcjHeB(WZ9!wY{*GJCq%&L1W`x9h{%xBTK#% z`0NNi}4!LXYn7e5qF%1(^96|}X3+a7@(SCmz8 zZL{Qstb({dwWTFes*>+>hn)vqn00n@>`A!BP7tT2vdTl|bjT)BOHpq9B}miTvbsm5 z$Vk|@_o_a=wh}g#y`7=)O>sURJ$o|C?;Sf}4r`~3VY)U#npBD7`L(C!`aOF|0X4AA z>zqPmd+%?g8@!ySZXGFXW-8C}n@}$Ao!?lM11+MSRa$!(D9OI}7U%6U?i{%QF6exv zjWoLvA{`QeuYI}7l zXCmCV2E|ZDMuiFYx>|pK93IAFazSuu!M{Og>-AJe<1S^Hub|D;OKx?Olh90T8yYIq zQh72(>1V$hs+wp4KuPLmxGY|HRUs+;{F zxJZl82dGBl@|jkh5%GLy;ZcKQ#-XVaj3$A*hq!43b0Q zN@!){%%(Kq$&&}~V5BwFbZ8t2WnXAQDX~ELgqg=CVS5>^wJ3`S6=^{co-3^RMC9<$ z<-o~XN}-rsi33=(Ng-Hu0ONuBe0T9}wGO%tC@{}tQIZ*$ieLEjITY)D0Z}8QzKs|u zf5LD1Eu8q`^<61x$^|A5UYuR{h=4dwW0=Y1dSsLz6pw^DptlnLlF5L77{1*3=ItKxzQ>sZ{z^}T!EmDPXoo}*{AYMs(;DJyDY$pcW)xatKFg{~7hApwL}m4C zApMY4sZ?$UMG@p0FT}%_JTd(a5U>#;`mRW&@SGy!v0V)mCu9 z^EYoZ(>O{-A68h5E#zO(m*%~X4-)gWr?(|myo&1l&My^FG2@wxq9ma$+c}AA!L$a% z898xw5EfGdV5FR(71z&O7&rJ*3;;{}IN*5bT*pac^sVmac3h{X9cEr=d1Hm_^>){CqG3TJrFw=-ngm!&u_0)v}gSy!n&-Ad7%? zfq(3Q6V~5uDa)$%UfDsFgy(N#RN#@Jpom)y!ex*CC}EAHx+5;yuvDx*v{#E5nCg{& zl7&c5LiYmlGw}Q+z8tLD+%K-}#V-j;4=|%i@m>7fxSj#qGztw}J%ej(q+N}4V!zmB z2VVqb%+2k^>D^+7x}q;rb@ zn6@)eU6xW1#^HfhNy?x$_caA{3-9;p-W1E{DDs?f41NBw(WV@y>kQZ&`R@nQ;zpP>WWn$0$(>iS(HB^OMFa1&(Qt!#W98 z=t#$pd8Yd;f&m!`PLLCY?-OssPo0$`oJdmMLrsS`mA#D#<5&7noD1y9c-rQMI{ztt z?(zn|jnF4$p-s?xy4_#Uo3IcIGe90&eV2mU;uUkSgr6SttZ82hPTl8O{k`0)2k+{?R540j$06l3U`raqv1k>X{nYq*#PEC_fJ;l3`u)$Q2})1kFI z>&yEQh#e%gwGu+R=hudQ-eTu30gn)*1UwcigU9k`JR)SYgxyx;f5tDIN``vY>FFNE z{DeSF5WFUN(175S9>tu&?4^>C8G8)hQR0wOP(b@gfN|Biz)rATxKUXztg#_)rB%nS z0EJu0z9FxjDvP1_wn{j<<`=`ix5|T;*LpU*V&h=tL4-ZBSNQ(qTef^(WbvV(#p9zO z{a+UE;@hu8f=%W{#+(47d+YDJVbwsZ9VQZnvjN}-T%81xmmT<2JfN-WS7!X8@=atm zxvo7ehk8Ebl+nQACfe5}UmU*s7}4F8}$|1~@PP+W;$BfNL6dX;AmG@-^v z8hiDY5M=Nc7f)GHN-zF2n3Lh=`pBw+_4k!Jts0J;n>;2_Z$V@f5V|_o0Wdp^1s0dBYAm z^fM@rUDa^wHZ#cyRCbSqN6I!glqEnEUVYdz6gq}4K$Bp_h4EVGuilgb8ubf_Nq$ySce0|Ha=wNmrM&Cl^b%&?=k_Zg?aOb?Qm|b>JwXNIv6i{ zp>`z*!tFvB~LaI%pGRts#qKt4NiBm(#f@3+d1Vs2}R`V46> zDPClbK+pwLR76F$r$U>(E22feDCM_=!D=dRT5}a<5^S1{v?>pp&Tw<`E9N#N>?A%J z=Z}27<9kevKbt5PA5TQ1KMdKblu3#v9z$MVf)}s@D*(bbu|aqRRH!vOxCQ(YU&Ih`ME`j zj&>!oMQE`pt(wj*4H#18p;+j`>Y=GlzgR~TzkmEP67$Unp*5$Nz60#nLtzcrr{^Kh z)v8_+QHLZU#=iXr_y{i#fL6~1g9}@4 z{#pwR0R0fUr@ly!gK;jcw#o$mnpC$29v0(20p4lZLjBsk`*D|@&#*nn_~AdqtkB5x z(;%+fj;2ACfmeFrs_+n@zz5~DTS~s~tm;Bfy zf~0$tYrc^a{U|WeC5dbJ>UAIZVA#V*=2wRx95alBefeuHu!&H5ZrQ}9&XI=0jnFQ# zW!iJ(P*xhmZW1Sw>VUI(tGmy6JFH?+@G1h(rRAd-*P2vG4%SE#5Qd^Z!3BhA48Tac zcQ=K(p&ls2R!2-IN%h{S>sJisP2GwUrpV|$b^o~3$HcfSK z%!UdG_tv2)R=?u%K}giCZI=u>`b-mwrpvd7$uwUWvyKqwG30(gpMhICvCvn-lie8} zaUH}N1A#m0x!&!l8Z#^H;7yMFGAq=_lU3Wxo%xolsLRn-F8(@STj{9njU|Z^ZCy^i z&}_M;(V~#DEAEjKenn%p7hApOUPu^w4TxMZf8bg~#)x3waC`$U7>@egR_f5BLfQ+p zyV#dMkb`K9Mq% zKVr3+kac1OCF+lOq5wv-6a^`i=T@OwL4LIFsGri*A4Zh!uB8k3nnD#J2FGhq4SmJQnnG zRPbK7P%|XiavUaV3sHW_bm5+K(K+)`Wd|4g!6L`kFl)2<1?iU?OVYcJWsS~kAZ_Dr zE1PAw-t!BQ%qFXuDT7(g%wpALFMV76ft33KwHh)LO z-S&=|dSJ|F=3Etg#G0?BXPY$0)Qh><`VHvK$q$u`e)Dv{q?jyzdFGBmWuA>~2Pd_=K~bcavC8A1-gUSlcXa_eQVRB{J6C11$nR^v5`rw$&pW5r$46! zl#LZM4+vPlD+EGqBE*Y6yS^_Vclh#`iF`BNz2EX3fVY9s+?-%&xU@qXiZ3Q}hWaDW z0B`2`0lmBGRl^6kq4;LOHa(T8|Kyxh|4wSU)M@_@vHWoELZY1%uRdWd4(Mp{1x}x* z_`7dYl@{aO#_nnQl<&J*-|GyDk0`o1T{(&lyi3~h{<2ix%WUVD)a|nei z*#f(YLrw4~O25VK&ea1Yo9jo#0;hKyoj2LA#kno5raptn3pii7s)X9!hgNbA5B`WT zywJ|is}G(Dxct-ST!FyTVlFh6)Yu@ONzr7Q;$Jb4ITmQ^Y5@(M7>@?GQ2Tv|5$F_u>wP@dfd4J<(16aC4O2Yv7Dyz(!*p(-Qsd zhk;YJCl2|GOUfko}s&?&?FA1<>6 zM90OA#fZ)EKRQ(Tfm*`PEmVNO7BF837Xu`urUUxiv@r+V;A%7XmoD*+2dB~XZ&ZcO ztl#FqaqGIhal?RdJ~v-m(A1|ltcKPxViTDmd(W-oeEqgyHPBmxYKNWs_DcNblk(Dx zrjpq_Pz@{%Wj%;X8vI~my~>bD7?ZKvFAz*EqXkD4iC+yA2`OXy%&B>Z%SiJbSW$VG zwuU#UT?3*9{8{AS5{nm?jRxZWsOJ>}hFZV%O%QOW7 z_*Ryk&Woc3L%@X5{eTH`FLjTi!+k}nMQJ7oUc4&no!u->73I!;3!K}w!flt_Z!h-v zg>M{ikAgpJ%2@0Z$uz%-%!kb^V$&7_w*}GznKvB;kMww3c#6x+cOc@_-RSSU8Zm8Y znK$?JgFviAwbc!(8=4CK%z4&$7rLyk=VCiQ6EP(rwfBs=ObJwa`zirE>A{Gi z4XNaUicyP_XR|34f4X{SEcBE}cd0NpXGdx#^!e{AZy+o!Sgc5~P4zzQ@-)tS>BL4p zNt^Lz4RKfR0{u(eY9$f|-Q6f}s`|@qvWx!$UebjI4&zy3d(-_Emr9P8k z3q7)bhx57q#q!LILw}FF+rguKDZmZ(rT8q4MXU0pXOI^L?18|6!X|(rls()@;@q`k zvDUAj@8~UFwUTxLlCnTNK&~vl#*HqK(Rd1aM!Z;#ou>Dz?R(3W9r^JDZ(mqMCcz2E zqRUuHjhfDguyeS1IY}2!)=J@Yn;@dw8ldyAgaE8blGZ=K+j3lr^k*^eLBP%p)oZZ5 zZziuIu|GD4`aqNeQsMw*ofuU`Z^ik0$kOiJQ{@En9m$Lk<(H4pFa!|`HRzL%!I{cc zg-1i_s|qK!8j7M{TziUWoi#?VCitJ%|ATwS_P%Kwg6KpD;vBfn2O?n{qC1^u9-MF< z)8@RD7%3Yv%hkKimpLC1cjEk$49LfE7f$rU4g# zR~T*eJ7~Bty-ajGrvIe%5Yq@-Js!3UL{vKQH4WE1@Ob9Smy5G_`3b!O^-^yeO>JZC7Wfc&WfSXT3Na3NWdWiNmV?rQe&AXxY!mt3&{HQytN=EZ zLGJ-AaW%2grjqWY4a~oK#^$Y9D6WNT*z;W|AP#w@#h6EMf(Ujm_X$Vkq<;e#6e)0aT1Th z3I{Um<>h=@(Vd*MHxv(C?IDoB_#Z~o*}E$e1{tyJiAv>Fq1RYJDl*M-Wk^yP?uPUp zntVPKYp|4>)Fg<$YaZCSY1QG$Jw2>wi*vb!)v2vBS#e9x+n*#&opCNo%WGfpU#|D(W|)))Ro9JuyIcamtym zpkV0s-$dYpD8GO`Ji4OI=-^x4JVfEWA_1kr&E>L2+AX6!#`17AQt?pCKRapZnBzaZ zPAkkDg67Rdt03Zn#DmuxJe^Y6d{k8qZfvzMiqn9d&N)_;y&}eCnG|ljC4n`ROD4m` z=x`_{XiTAN4iD4y;SG7Dmg!a&Qj-QU0F=3B)KdBxNlE(I@*2dKXmbN$at2XM_=~pm z-Mtm8IT@0otYWuSku3moVe&Vr^*8^Ej1B%G?2%>7+|JlY|K|0k6&MU-9h3y4jgN4W z6@h>D3VS~U#PA<#w)JAnw9|iwm_WWQzg3b@y$V}*>c81}8g^Fm4p8Hs_^f=gIK*a5 zNghWeJ7vZ5NW+5v&$+(;MR@*(k7p9p$3@X&lNxBXAoA$pRVc4gT;f-&IitA1owsgz zfELe0(IJgH-v@$MYXhkMy6-dqrgeG@^dWABm%F+EzW5ajIm?pI(IXk*I+~AHvbaq> zo>EUu7jxb;Yz&^dt8TyJSurKU>z&TL*EPt zoj;vmF7{Ogdu>JmcP6Xz`%@4_gte z24Muuee65n@wJ!-T$)QSGC_9)bHifq^4+)Vb*-St*gRY{cG{ZU)|T!?ZX0qw!Z#(p zWAmhddskV<6UR^Hh*D?~4bYX)9`Q`0CLiVx{3kJLs#kJsdQ@u0*B0V%Q?DIlt;$Aq z?JvE~0jk_=E8L^bAS7IWb(ZQy?VX2bkvlgCtOrOHs4l*a-_86wX2j1emXN3=+?NNx z>`FCm*F4Ps8{?)?4hs@NNp+u5QX@498Zv6$=ixka^p3Q6idHfq)w|Z`fC1V3CHWQ6 zs}!1Rjql?QncZh@f9l|ctOMRL4Sf||+rNtYOW25wcHZDDnu~EjgCU{J4-Ho=5n^WX ze6#D$#5L3zvQWLb;f+?-VVlTqNWZE{Q^Gd;Y9?e9b&jTeofFRDg5z4GrB|ScgR6Y^ z-Kpk3YPEDTOG!h*^q54jbTcAw<#pwdlJEhoz#{4tK6Dkb;Bi1xF1jDQ_H?&~WBE=M zKy+g2+r$(V0?hKfF1IPg8iuxKzVfS&GSolYPV@*KYYH;>8bR1B&ssM&J}wnq#qf&9 zz!vqM@tLiSUgTDL*!C4&c(l_T^zzaX3D*>=iAw7r`gu`CyD&EOVoK@&w-;Bbtt%As zig(YA$tliiyq(T@v9b01Sd<@5R<~Q?{!nz0wboJ!@+KRe7o!Z_g)l@^e`wc2=BM;5 zi%}JQ%hEh)&+oqh|%97eQ5cxE^jyim;*ty5c|tRAGUTvkURv!d~+=XKW= zU6oZqpNU%cW@^~C@>|NbfYM2wJmlX4eL%{a!lV405IGDgVtrUlup23Cz?=hx4ryC> z!G#_;QSNSCfLgZdARN@X2rEO5sqo zukr0dpPh)6c>Ysq*K&R$BsVwrZd?y!JbQK;T<|4vM)xDzvJu)mn{0hMZJirgocEaJ z&PKNZ&ohP}wR>aeaM0RJw#_H`*bZk33o?(o@AVQ~oYeHf(~SqtKc68R+cAz91p<_!g} z!sM#pY$uSsKDm^db`3Et+N^!bOw@%nA{Pu|3CFZ&ZtT+eB#V5SjqXi;m}FdifZ30s zq?(BCv2CkAb;kkf9UYnd<_Wgu(OEdpDxf95Wzf-HIX4kXFhG=0Dmi;yjrmUquFh@_ zD1t^9KC`z3IG+RX!#gQ#hv$PyY_pMuwjz-JLD?EYVM2n4EvsQry+sF{IyC+@eYM(2 z<@Lg;3a`n9Cha*KSJ#~QsRamN-n5OyJxWsMEVuc2i2T>;KB@vN>cURVHEgmR9Mc&xtF-{0Q8 zJs=L2@$xwRl@4j80~#CJsz6s^#?TLdW|x}A!hip`LZjosec98{`Hq1+2okY7njNDtzUn*{VWqATFVe++4TC!h`}lG#-629BIKUkt!X=7pt)sO6vB zzCEgaL&*~KwMlpkfPlDPCq60SGD3e;x>e%v%_MZu*Fh9X z4hekpUkN!|4|LUOEj-i*qGiO*v(o>l!!yGeLJ-hMeD6{6ZaG7OyT3`m7$$c=Jj>CP z095)@0sxVSXF5aU18p3D9O^&^i0%o__s)^4g0&uzBoYbVQ0FfN`W_ESLF1iI2m&*I z{vPAHx&xTzg9X<@hh#E-t$#>3CQv!UIPeMC=v{sKFO>t_Qs3e@b(Uhvy>A6hu*~jcG7e|umWD!uWwSTaXtc=Y2VSwxdosT zBvg^YsRH^-aL5-%!#gWkq*uf7$!WuCXoD!T08Vt%VRrM0|q;Jnj;N-(tL zlg+NN0skW`C1M<`!<;4K?y>_$l}t8-+F8QjeB2heXABsgWTwM3?Hy-az3nv!`qWfH1@CHc6&o=NQYxKQ-Sd|{tn(Kqu2_q~Zo2&VL+ z;HCsD6~YEQ7dDaYa3epG)^>2B=v-=X!2Rq|NyXf>M8|>K^+&b`#l5+7`HS+%_i`|K zT%h`Eb5m|Dc=QF(0SZM(Ef=-}NAW`i-K$bXVZt?9rVa~d*^-Y_+kL)BixOY)t<2^g z#gk!o#PfN$va%0ri8%0!EGOVm{tU=Ig-4-sa5u~b79-hJG4{_EedanBSNKK0w=P?0 z9!PP#MfGBy+1-^y9a&qleH6_zfL6i0iz`={|Cg8t5_L*%HOio!#8{SBhfiP9#7vH& zKydv|4&$R!P)oS@Fz`}f)@`)^>=J6BJ7e*jI3f98-1JZ3RJdj5XSQY@-G(7in0A=y zo34AE%3r1d5i3J8n?cR($B zYo}7lw0jhEQ;&Na+dKd}Uh3 z{-v#}+T(l#+7*FLFCGZx(VX*e%CeKp7O<9ohnmP5T6Kf4gwp7K<}$K2Yeji@t-U9MLY9fBXK!R7$*Y{9_2=}0?wcv;Re%ZR5v z!D>QpF_TZB_MQ{tySC{8zd9=dGcy9*59p06w=O`FzOpN4->tG~gEl{5@+Pc>|HAc*=@R$%7Kqr;1(G}1eVCmuq`(xJz`(AO3Gu#}-M~{SY9Fzv-8^gmy z=oVxt6?71K(~p*gfy|Hv8WSU#A;{|B=*Y zNP_rF@zt{v=%}AsL-N!HM4P?}6{FrLjDlUEa3{rzbneHp=IPZI@b~jkn;KSNraZ8i zrnQO^S7`SBz4Wn|(Qi}CN=pAj^rY04JH#se1MPu#ZtL+{yp;`9yL`2k^>D?}sog;mxB7VW2f;$~w-yM`d2 zoytUy$&zI~JUl+m4LmJII0|&LQ^-&7L4YZPdSW2()|WyLQn?OG%B=@Z#2}y!>0`OA zQgGJ<9|!*Ii|F9VRSrrRQ6u0}f2YKo2f}6R)tM!3M*1D9Ljsmtuh1eQIO&JoM`$Z@ z6%fTl4TwtPYY28#P$#C&&ylmvk+bsr`s?eESPi3b{1UE7P|ecr#P#H`?*o#7pELNV z5RvUWe`Jb9hHP||8j}A}WN`s1k|xlu;5_Ds8l=C(=XjgzshCOhc_^1dYQAf%5c?KHpcd&^2zt0<@8nK63xE`H@FF3@op zI7pRnG&1v=3_~_e09-9%NdSWEr&R9z2V7kw+62!7fsN zOz2&DozN`J&hK}mg1y1H7Zs}W-j5rDw#uQttVa+REEHqnJ<~;Anj=S4SU5z(U-<20 zr}7$Yjwf6s<=+_s4x|5Am2yca%H+W_!dTr-M&)vM;A=>dd;n>GzZ|Q%x^6-G0C(0F z(!e0(eeu{svog`I$yXzj8Fh4y>l zv6_TxkX)TNY}=p0Gv-N+)X_SPl|)`63erLaMxLzRN*E10yyZM?ois%k$7(pTF>H5U_9!G!`H@ zL?g!#3Jy3!Yhw|yivLXB;O{X}D;#8=Q&XdwXBc`#>q-}ht7k$G&(@J76SCXWUTe{I>n{=F%YIM&eK7<`F(Z3Z9j{Q z*Z32lk=V5HxL}J5CK5t-hm7#j?EE}`>K4us;CsvicGJkSdo5!qIQP*mDz z17{E*GU>=ML`wJjv9SI?+Q8F@Qotf}b&~*7L0x0~ANzrGfWlJ2hc2LcbR9q7+Y|!% zqGclJIYY8tD_YA5;~Jzejf?n!^)H^;8}=PB$ryH3f)PbWo_gX!9M>0)w(*sR=9lMi z*JP2g6(`qe9FSPV2x)Q6v>t6%+$+Lrpq&C0sb~NYX+0`%B~tb^RMqq+P<98NIUi$` z+w7BPrCWku+RVP4m6#fi$C%-`^n0bl_$y`ULf+|XNXYTV3&Cj_lkw_k}1HeAiW>@70|@N2i(Gc3Kx|`)g$$wJoJL< zig4A+;wQ7CT_;hl5^nV$HfcGtv%IB1(#A-@QhK*}JsbQbQiN~)BrbmHHva#?W*w&G(6qnrZzKp3S%bptM-nixIg z9@W;`+U;KS*@t)yvLd^p7fZ^2W!|C4gCQ(NgMaj|v4Wh(!2ByNSsZx=S_WXltLs+j z8a{b^d##mZP-i=_=k|VB=F53lJTFYz7Ai9W?e9m+fFz&)1Yt)n`_cOlV(xTadMH!*7cVm5)dM>ldlhBo=+y!lpS$~HU&vL#{aVun5RD*jE+2XK@5ropIc1L9 zUkWFv0kb~ke|TNEQlSX>srS>QGw(r0Lrw1tX8-aul9{IZ%k3E)C>1QUf9jKPKR9Q| zUBEJ7XHi1988vZ@dJ4UwCDUs@x|-|+r7$>q#Z(W@@Wl*V4d^C$Ptz*jC&wv1V2viF z1%k)qBt;U~fkfE9<;@vsh~%JbE!{kA)DqN=R!R8(vyb1e_frR zkb1M{0Wu%=<6doE3)7JA$n_T$7oTJ&l$(g206itOxI_1<(aADp7QKSiaee|cXf*#EaK1HeK znsAUIyu2?9QsAJNryIhe8wdEk?PuKpvZ<<@XQb(MCikKq#t(6Lgz3 zzy(+fKF=znO{~v}+RQ%`eY)W*-!VcVs2OqO%JlXfMF3HX@FJh9lz@kYSPG}Ahhurlj0&`zvX<3neHo>Xs)t+@fT!rA~m(I z%*=muzFGL#X}-(+8^POs$G&C=3rEL6#F=(_|wRViUewA8waryXU`A4ecrm@*B3%=mTbocd@ zZguXO=O7J zdC(posML{Aaco$zUS8o?>gpYT!g)BAHJl$c?$;idmC<^4&iwfS zx?dT_Nr(x7Tmm@)E*jhPxN}M@j7+Bi583_Ey3J3q-I%52-NvYIV_u?W*V^V~7SG!I z%eA^~=NaWqG1L#+z7n_Th6!iVkZq?eeQRrro}^{-Jg>?y+-9v^&^+dCc!_e9eoeZd zXYztsS^6o2I6i=(s`f<`Cn?aWns>~DGpM<+?bLL~W-eP+rBL$;lqiCOIT7e)9k^qWDATe2SqX7zk5OHgMLAzFJAX1vJdt>$SHI-Xkt6lb+BFMVs-o%Y<=;poe z$iw%rrG$kbI7?M5fIm+Omk)y=vq+r z%{JiI+{ErS;N>wey?Z0E=Ul?Irg=8qq`JUh+vo!~wyk=0YaN<}{TQ|DY}E>)C|Xn` zc6a6Pe)vMo})zi`8R^!`g$z{ir5#bjx2mf32lR6!ras{qd^s2cHtmRw?dHMle z()QpJ{&Q^AMcdD)U~1ZsXw}u3@E7qp_<1fGPAKeOS|35aHWm&@oms_Xy zT!wsGSFEjrC1jy~0Xa$AGXwSm!j>yQHZOHhf?*3u&HUOUONXkvZTa^rT%qVF=T z4c%RGOD7Kxl{NdkmMNg0DRcjFeV|-y*xyCNaihm=Ynm7IAxMa5vE%W$%{4O6oITV= z_F0%cJSk)}U+=R}A1-OKI9pHtC;-F*?Lx(xg=TYXV37%s=T&|V&Eqr}FHuOK_{{bu zpn}*jdXBaSc50-=S~D$7sG{b`*p@dhDMIcG9`jYfOSHSR@$&Sw+og8B zgZ3KM%>&YttCf}-4B&I5U`HRHwr_4%XUOtQJ@H~x&rNGF(xtz~diDg$tpO={`-hw?e&R>1bT>thu!^l;n*!;Wl zSR@oURE)_{(;NISuN^3!gYQ+*0f4J%7wBGxuSoG%TkD7A?uHm*KNMm#?8Fv!;-4!$ zQR|%1C`^`=wj2zWS|54VsDArRxvB|R`_-;DI7ysWP-I8gxSjd-2^u`irBw4R)x})M zI_4;V;fD0*CY4uxM<$`flK5twihfvH0(Rp#7)45B%b&1t39sQu9}mP2t>7?LV!gpB zQ3HyVrI{+9k5LFNxk>7wE`wy~)hDc>;IrtNxbf@tx5ey)&`-s?X0v_fvb#+XL{^f_ zIS6+@GqYwNk*)tNG1q%>hE86bNP*vjSL>lK ziN1oR%Oabvdnsgka-ew%mN5vzJRS_9yGNV~HC%EOc^Y_0EiYIQF)YF9_GwprrxFuE z0A7@sDB_xx4Xs%!6}S^d_Do_0KhTl)iZF;4hXm4K4uCX43j$Wo0PrjXzum+Ko=Rn) zONU-&RbygQO>YbEKKAp{r)x7#zb`A}H++Y-MMAXq26P?=De$rR6YF${L1ZpneM zn^9$kX8F(tAze(d+55iyU)SbA ziU!iRX!u!(qgfXWV7XvDF&cag7oaRPxYqMP=AMs>ITU!H>qc`|sI0FO6Ga0=T&49_ z`uY`x$oAZaSwo|VCz;#v0aJ=~HbeVq zLeXYi6c4Y(d6?NM+tC;{L8+rmgX>?QfPZN`L6KvqykA1Y&a4XVf^sVa8Dgv_`7ZBTN#cXm z$R#J7<1Vqvyy5MGT4c$ct{n^h^Y#2XkCJMgRDeI2#xdpd=sP5Vec;xsL&2#lrGVV3 zVdwr+Iy!}02$tT5pPWieJTmrMSstv~NI$H$_dMEM%+Qyfnto4ymEYsVRtSsw?-K4a02tPj zCI%w7K>1Y|#dP4&_uZ3g>n|4?gMaK1#E}}-Qg^+Oaa*42eEpn0J7LZ-V09t=)^>8m zZLz8`X!wmoqpxOZ!)2RqbTcLA;W_t8p-G=v%u{$5Y`?XttDz)i{L?DK>x=;>gaN0I zF+-3OW{w-QZgk7E3%t<&Z3`(c`NcI~)iN|0n6qB}giHQ)b5us#Hx8QxN?Isd8EX-Mro`e-0}I*((l!bPSpOe?>nca=WxWPIpfUM*$=S_s(ZZt< z!JAOnJOgm_s1%H+LKzTD;cHaOY4D6I$N80lSLee+8AdJ-BTU9RmHzdl#Tq9}eg`Ua zBszf2P8Y-d5tDn=KUd0QbJ=H_j0H^X`Yd8V)B2AAT=t6$Wq8a{{5esIi-AUQ50CZy z=g5UcF{5EXQ?Aao9X>+i9Wdj7Cf7@>Uo zF4m%TYcNXJ8uVq(UHDuR-?)}?dT!?i#mixVV7OlfmcF;{~6tHiF0@tOPk?K$~L-7A!wLtW)UAiU%d8 zxrQLyU49rP00sX5`*@^+IVZr5BNanTX|4}my=Uw4)`XtdC1wYB@0)SK50ox%&7AW% zY`_Z6pgjkhEEV6r{_wV8B1CO)TLqw-#eW8aIE0h&8~kcz@Zk^Z(BkAjfv+Cg1v9i3 zDQ^^SN@D(?0{V+h5RZjJwq3p?DrveriFjM#caE-r7Rc&Os_H@vvzHNht!Pv0et!fL& z0-Q(3D31Yl)wob{&+C9aZxRH2u!Hl%fpPw|>e}38C)pqpfka#87(L2aPMCaVdp|kk zt_0)~$C?^z1bTwe1_~^B|5zWxX6LN9a>eJNWc`x4wGNZ}* zQnrhxdN)l4#0)Q6g(iiZ4?w)!pp5&$X<~fapD0$rc02$;s-`W)!J5Vzn-52-MN|aD z-H}JRGQlJ<>bl?2mcx>36B?L@f3LI3Y_u^uQ=A5sK+piMA3jA%HP?TMcH1QooAiLl z@79T0sYir_w~W3J&301T#>%4)c^S#&$S-vvpDiDtp*6aG*dRA@Y>0wN3K+K}CR zC`$_7L4fT^)Kt5S)n{ z4>v)(@!eek?8O;aaHSVh>Q5Aqz|9)I!;hYeFz=5Oy~!n^KXw&gplrzh8a`$W84Fk` zcyutK#NN+REjH2vy6je564fE^d?|1-N+ujciO&|eRsi(GSAZ{QMo+&h>*%Oy11BSO zIz;#htQ?NY6wv@-4G6drtss9SC--L*3@+)9|A>xjoT^1mkHTrMu`|29#wKQ8&M3_k zZ+-V97_=AN9A|3ZEg(ZqtYCz2*FQT|pFBMc2+^V|Uo^ml4KN$xqg=jiXN^fr1VXxd za(T~X+AF{pg~?uQpY)31-*Pk$vMzjG$=);%3QMnvh{^1;tB1bqUi;M5UhyNGLTr&S z!UDrK49pd>C}V{U#Rxi9!b*E)lWoAD*gn)G2W3aS6FRR`Cs1v}N~{R%nPc;Fxf(Do z0}*2VJhG+3-YC5u-)8b;6Rho+@KL#7ga{6@s%EwQUTwL#9{Qod4);0bEvR5=CsVh& z%Ex1-iKoskFx7`168=qiZTFZv*%-DO77nrTy7vIKZUL=6D{)f@nqvR>U$%`0F&uc914vF&)djOOD-EEpAAVHxx&q97F>x%5Y#u|)WFqf)d@gAOkP(&|GDh!FIMo_imKR)68pd^l441L>z@ z+I15FPLiLYDe6Flmg7%g5d<|5K*vFh98=5=n-!T+N}3_CyQbKWysxxXzZ_XI-cjQj zzF@#B*Aj6IOX@?Y(e#cBgxjPf=m_%DW474gEFU~5X*)s{nBjfA1Uia&%Y$cCwV2`C zWgZZ>CZxggbVG^o8=R&}u!(bw5qkgI(f`EPSR@6s5`W{sJTNCmo-xb`RMY+)*h4H1 zARveLXapxlp+STAe_kyw8Rs&SNwDkIM*jmIm5q7AISdGCw6<6Lh$}H`4-qGd{u4W4 zF@mjcBp65trFOv_h<1(;kZ~h2@fIwPH-rIZTtxZ*9>m+0;le&z5#@VwfhWKy*VD7% zx3IMjbBc4p+Tzogqsf>}c(j9zNn6oimyKIWd~Q%61o?a%`6;0|2u$em8|L~F;U^hy ztls{M@xgjPbSy^2?Ue{Hs9||nC7>9{72Mz;^UbOwe}}@1%)?qGw7Lu*iVQFfXtIt6Ew812 zg*3i>(9KmhNyo_}^CD;+x8+qRXDGg3jDOny9-`+_v9GBq_X&`>$aWnRT>Ab=N9VA5 zk@WY)750h~!#JysIoFT#Lk?UXR>pXXWA3N1Zz2X}djiZd8Vi=DaXb6;Wkq~ITmv+` zl>rG8bO!SI3tZ4#bN649r4<&nS5`ee2U0qz)6sHQGB1Lk?5`?Vavm5!pmXN2Fc%{N zTnhD`Y!C)TLT3(cN^c)BBL}r~wmAqd!Kr+g<&Apoo3d{P+3$E{h`D z*eaN#g#~UikpAD%SjCv(%P=R9ip~YjwwSS2Y>m?6m6}S?O`2R>ID2!q)9`UV<&E3i zyt}<1i#6l*vOP0FAXN}zvhfB9JVL604UPl$ONGYl{6q`e)iF9cIvJOWm8BypLh2Tz zPDdyyjW%AuztTie}GRty5l*L_V0okLOxx(3*$5alJd6SZ^z6E=|(R5K}jFF+0`kda@k z-0MGi7YLT4ikAg{83NLGv!Y!rST!7*JDC=C2xTG^fvsuPVdqvG5teqaT@KM99KVYI z5`WK3*qVhk{QU_W0G}k)UM2qc!~2#uo(BN>DY4xSio$A^-Lsn>Pp|&vZV}D? zWjW`gi!b^V07VTu`Cqb~8kg8RvVM6d{hJgqOo@_cB}-2(X07)F^FcEMs17&95091~dVTNso zhJhOZLYZzE=O;daPt^(zAA9-|GBC}Tq1$k)+m~jtt*vxKVq!qtZ;t>1!&wtjwo&8v z69l3l-SNkqzt2CY(chLaQd$CS<@_F)CYF@sJ`^ybz!z;?{@~IZoP=PJ3GC~a*^{`X zrD!t-ABE0{vR$I1Dp6Fsw+KZ#!FZ@a`~wy*4oudmh%)#ML!+u6#LAV`V@@4hF>PJYu9H=-h|HFuFzAd6q2{hRs8d)7iwcvUlkCa9^Mm8mzJPg zdWdjY+u~$4YH$DuPec$+&op5f;jl!=PQyvKqfsutGQTsiZgVOi&v&6xAd^sJIhDv? zt{1l0!CA(Lf5Y@FQ=j-Do||5_Ch`GVr^KyzhKQvH0ME}w#(2*9WRxU<4lOv_@DA86 z3~}N=@`BJK^Y3G%R#}>z_S*mrI+-MOx=OZe1olmv8=7lk`@f7V%GmfB?+xhSx{T_} zU1yLt>3Q~&4!X!5&?LqTg;C9;=s@Q{wroqx?jTr2c;t~F@2<@a+x|1+k*kq~*Sm6c z=Rp=bhQE&nYedl&h(hT+A;AC8mFRs!QleGn#Z77s@0|d&D5c;`RUdxm!y_v>x4_98 z%LoE~>gmg76nba~M9R6qFJS#3bClLaOAgXV8zfOy-4EhMiwylo!a6$75}zpHDlIvl zWnNDV0U124NaBJbOuZ!N#FtcO1ufIuPHo%?5pQCmu*OrDn=_uRcGcBj2p|O^+pXaW z!o4$^i@VzZtOW=f2zb9%{gg>6L2~xuLjg&^F9oXe@@m9+Tn^t_6oiEh=YFgy4AM2 zF*A8fA!dFHYO|A*A+=fZE2`qT2ACr#%5#+N#ctn<-%`Yf$@E=PO{YLnU>$59-BV_* z;Vnq86c}B64da@#_qtKwHkiVn3tlF_MPhQT+;u4D1Oe{H6$4E(&HkBJt?dfvv@0oW zw3(^t2)b8(a*QnPeyzgR%ValNNjJ&QxW8F4)_q@V+nM2yeO{eLO;i4OOv9o?gJ-!U zN`=EEXNXjHxz~KzFTa|Ag(PuJnK98g)V&>+xT5WRrf!OGdA?t#FT|RDs zCbyc?QnP^`e09GYV*Lk`Oc9+MoJlMI^B5WZX@xwb3m=`kBJ_0Zjc$?)`SxZ0i$bvR zC_?|@|FMNJ;2e*A^^71JkrXwuA(94i%|9w@Y{TN5iORYEQ{0sYMRla{2PnvjsNfYv zF-ic}i&R_=(SBb>}=yO4p|^`z(YxKMKi#k5Ueg{T9wRjC{F+9D)H=> zHANbd;F`b905Rawfc1HjbR1p~_Q3Svyub|^Hgd#wqgU!m|DFybZ)v~Ldk&)K2sdXp z%-kxgAt4qMiPk-HgynUJr!oy^=et2Caxd|5UR~XNOS#2s&zPJ!hCPc-V#vE*yJO$- zcaQso>So#r&taz1(B>w?7Jy~N`!2eM>=P{;qj;QOYrb6YD6Q*?D0BBvk$FYZ;n5ES z7v^}{rzTa!E{lF@mxli-oILWaJ`16a1tJ(RnUpw79o~vv6a2h;VJ8Bs(X1UXDKJSo!H@(gU1(ITTAMr?n-!)m!7sW?yo?{Tj9$WZHaniibqo(k^M4P)qhO98`YEK^29IK7cBU`MFdg*}) z!eU&Ns76m4FKp!CLksbYoQAm3q9EI#VHtMtKp^=VCs}Oja+75ggtnzdL>0;BedW;+ zqJ%x>mX70={;4>>5GNF;&U&S<8R+A6wi-5zBJ;KSlUz?OuQ05!?S6eg`n^`7%lS0| zj{!GrOOv#k(mP<=&l`!dU)=U%gPV%>HJcG=C?393l#)bE8trn2b>JTr2m{~Yb<y-Q?q7dDN;JTu3NLdqN9(Yr5CB-ckwAh4!cjF#9B4qqQ0VmM5*`4sirZ4{E?x(By zD4r7OOsngPJMFODF>k<<>v9@=K4z68Q?wxPbT%q96r*i3?6#0-^jQRlX@ z3pj;Mq%1QV)zCKc9T1H2P~~+%y5lqlnJJcreR&K(6r2a=X`nUT@%#iEDCH`H4XYTj z{}QPm%?LwrU;qFgIjTn1U)7i^sUOUk@RjR8RC7AX6yh8JN7jzyM{2b-mK08jLAcQu zguuw5Dse>N?su4tcR<(&;PpKQl>@K;r%*8l9Jz#CM+Bme(IRtDnF9+u65$C%o|4cX zV&)F#p-_`9Wf9A3C810h*}X=x)@(HyVT`aTWjiZhxtmZXhUdBRa^^ZxHqoD%VGS@9 z_-b4m8uiRq{zah^hZk(M*{jBE|Nd>+-bX&-ubk#x;R||;P$pupzSLG>sJJ(L7j#G z3Dw>iZSRL_ehu}Y?N~%6A~nz`9O+~rHzuH(7r+I^kKjXwWtRwZ$jD}H{HUO-l0YfI zqMAn^@8gGBIA^h*btl4>6d7cZSP67XxWMY7Lng6*_`?R;@B;bRPc zfG&Xomfs%`LVfFdHo&+A*kFk$HvLPm&4eSgmtpww7%YG%!g6Kc$Y^HqL}IxCC)oNg zSeW3$%D($+{^RJAgmd~wdIc@j#PLf*z}ngj3eXtJDVj2SWI>7FQF+Gk6mfBYITil- zpIs%LR`^120=4uVq6(eqh}jewX>n*_O3mq#hs+`wG9C4WxDV#$LwKxQQZnJ)%gUmQ zL4C5fmAVYOCYJ{$8mVicgGj2!zw&`^qBe*;G{Zmar#sC}e^yn1Iea_D`jWVWymWKu zXm4)@-N$ik*!r93oZ|$aM~#o+YeTNPO?(}$_~;OrIDQT$!G9o=Y3YAm_u>1 zt1AqR9BwTie=a}jlCih+zW=M-C2Q!H!c#LL)Qkacx;rJwqOR0&Q@Xu>QAYZJ35`)l z%=}YyQtF<6fjeb^lzP;$(Mn4yD`ub%?$2L307b{_t-mOet7>w#7=I&uFv%OYieCgo zA?L#9L+{Ho-?p!Ds>H6~aJMbiEd0K;GO?dy(2RRd*KiYKdGbQeoB>fsShNA(p!K*0 zE15O&IPa-2zwuOcenMySLX))BS&x4(ntkTLt|61i)9A3dm|CK$uN&y_lB`}2i`JQT zr)7IwrRd;dODhLLfls5p0oLL|rn5b5JzW4fFhlG#u}NGB#IiL(ZU zuMEzw;Q$6Rlv*KsDp-FKubCb`!KMmqNI`84Lghs4#HjT)hy0P+@BzV&Wd)RA7l#yJ z4?xR1H-b{vnB0IA6a z#1grjKm74zHR0Y7??PdIZuTY3H`G62*g)npv8*qK)s1MY%%CHMF(~CIIp{MgCf@gM z_|yoTz86u{_O%_fV8eACmf(ezI3vq&o!;UFAjRsZE8plcS1a9>(;+He!BW0+M2^GA4p5#$ocrvgqX79-sI={g?FYk$u*V-KC!8(f&T~b-F zF_XFebA{?>;4D6D_!+S`2{OiKcFwvwO>7iA>Vq6~UIbms?D>Uu$q0cRo32c#T?6F^ zokrtTO2a0c6~=-S26q^Z| zNQx9M275x1bXQOlDWtuM zysXTO6BwNTupoZtHkp57w+r~W4@=>U!na+qB&*>btVeIyaHjD%NssllJjQC^rH!AE zMmq!Fvw^d@br1DVX?Q(?c@gKOI3&^$S-+E5^gP(AjvcyEeNy)cB=r_Ie#!4ro=A@h z5laR~Zz^kPnQPV99lR~FY}V7UEqkI$$|I__iH27UF5IVlAt({~&NmoQuH2($@cil1 zFVT0QjZ)JZ4%bnhAhJ=DXT?U+);irPyGCVRsFuZ$%e*1|*7pq6yP?O#SSP#sWj!!C zXVIM8T(_p9x@)Oxna_x`2d_<8^?5?zxvGMmylSODP(1LUMV81Bix%j$u%(<$01UHF zN_!glSo_9b^kM9>TCD&cEfmnK*l>4~hIs}lQ0S$XqMByLfA?oTtY2D-G|Dhj=#%o& pZ*C-i6RZF4PaESIZpbKP>I++s22WO$-X!ff%v(G+$KGqpe*qHo?Na~% literal 0 HcmV?d00001 diff --git a/docs/media/torrust-tracker-components.png b/docs/media/torrust-tracker-components.png deleted file mode 100644 index 19fe3c0b897a2413a4a6fbf807a0dbdac19b6268..0000000000000000000000000000000000000000 GIT binary patch literal 0 HcmV?d00001 literal 84935 zcmdSBcR1E@-#@IaB8o_oPDx}{MluU!k5saiy~*Ahib6(YWMn5rRyGxp?3EqaN;cVk zue1BPf6w!Kp5s2A;~D?l=W$(EUBY>OKi|*i{eG?Y_ldHi6#2fR`$$Mg$YrD@RY*v7 zd?q2;`G9mgzOv-_`3U~lYa^|0M?!L-lK9_kVeAL!Nk~qR$Vgtk;T%2L=W?meWnFCM zZ(Gs_W|bN;20`k0?*mddBwsj;s3l}uicjT`&wdg79uhz~ViI$WFwx1Mn-MDe?wFs} zHMYKvx`nl&jh@E8^D$}71y4OhH=};U{Vf?2X)O_(?!bTlu2rOEW}e2UzJH*xzLNbv`9ei8`B7fq#xaVcysrz{x09Urefo58e#Ko>?97=r zg}=9x%p4X!()Zf9F}C@*$;#ZwLmi7swQ zhlv)$fsgk$H#e<+lmx$RZ%&j-F6sT^M=RFoGykNw(5||@y}h!svZ=|Y?e(?U+1U`6 z4DI6YImSw&annD4&aRk~>gIm@c&S?jU#?O#*0_7^+Chb^>}#7OiQa<=I%0D7wNV%xl$Kqq2oux*_~}zJ5(L```53z`FRQkL0;3Ar0#AFmWtYN z?rMKJs^iCh_=oKzSspK_*S^pF11oh}FTG%DKFn#Y90ej@^S!GsgQP%}lW@cu5&$)AVvBbHA zoJWrzpJ8V=HZZX0E6U5ru)^E$7}YJ0zO?RiiSj(NoNZuW5H0EvMX6_QUTo2oJw84@ zFfdS8SNHz?S=XBJ)y3aBIy$(7V+W)x1Q)YR18`?a*SO?G6)<>bs`7n#Vqx)ycTI*d2nXDhjx zuIf!`XlTfi?6J0Ff80(_PftgsCUms#B6z$52AfBbl_ zq55_pQ|sQSlTy#9pGXQhO-=R{=ik47Uzyb)T>aX$yAv&^#J|q`tVorQJnuYnn~{-G zT-wufQ`c+b3h71nJ5IEG z`TY5Fd;86|+Z$s=G_wt4;zT{{9USWG>rX30g+@el4hTIB2xxC^{yD$WnyN%gx^tm$ zKMlwG_wSq9+i}{pu)8l`-uApaYkse$ygZr5`#Tkn%RJUHH1cHS>+cf$Gk^~>z=;lsr1sGBa|+Qh=+ zyFx-j78Vv(=Eri}Dsjs=wF=I1a=yb4y?OHsC1vm4y?Pa<-jwmmvWtqI{^iS;n%A#CkBf_oh!F5xU#7oc)s!mix$a!)dl2WSwzigvbmy*J zyJ~7`>X@xR;Gdn{-1a{b-@o6l)a|crVQ+7*ma*~J^;E@?u`%{DXUbd`&v0-|7EhP4 zv$5R~yEf;}q-rego1LAVm359FsiRYddh8wa_AN)e^6R7|YD&tpW^J#(e*MaC*tM zU?W&qa1adoiX5=dnY|5+jrZ)?LnVIo*)f^Sdv0!S)z#H#1THN}GIj00ii(QxYaOO~ zyMFvA!>Q3bY_$6$FCX8RkqG^|)Smv+=S{m06zWc&9gQi-o`Y)mli&O@`>U$$?~@o{#KVO2N#$A^fX4C8!DCS z!Oa|RPV7ovSy^eWMm_lc{X5zdnnaj1bI9}O`dqcdUf0|l{Ql}Ax;Jm$;DCxaP97mt zR#*E(ZZR-0C@|K=iN!VWM2fgMqPt$Xa>dnk_1?A}5r<2pz4cenr$mFa3|aXvn`2Xc`}S@6@0pPL%BrfzI!gp^0YO1C6BBA0 znlks55K0wo?U>L|Nz#et#Ohz|>88hVqq5FlVQOBOXqA(b!wG+OT%&5EL>>R zDRt#j9i1O*ykONUO?vJ6^+f&7Hq_qnho~@5M3UdX584<@l6ijAKYs6T6@i%U%W6Ar z1&Hh#;L&$%2tI%DvYcFPPft&4>wKy16_ma9b{xAaQc_}`>jl}_Ax|VHem$V&)@fon znLKhxoZqT=kgq^;=KJbrFxms}CuV%M@^xFM- z+^O~Z_u~RKC+3ayd$c_S(DJOUt+7S5wfiWv3j51E+?L{u8=~Xcm$0i**jrE(3knLN zqmQxd5PEM#7Nd2Ul#Pu|9alN8`Cy_075B^N!w;4{s9WVK7y_e+N zg4y0XyDF)39wjWi4~%@)Tk7VNB@rJVk1mYbE+{BCU7B?NkU2|PS(&|`D;3rA&5gD6 z*RQ?3y+gypTN4#b3%!G|ty}h>_1HT)@}E6>;_Zv{5P|Lw#G35pY$-w?cXNz_e7uJ#JYNBjoffARw9Qw77&sWMX%Bw>@nkE=-yYJKU%yRl7S4sEo-;a-a)82mJ3Ds#9g(%4jDMv0?MXPP1Hkfy0;Dbzc=b3lDzhghuI$ZNy zK|w)GGsbl2FzHUTuDrZFtUfn8yZ$qLdy8oo9UUD#zXiI0w5+T~j*)_a0dq0sty{Nn zXmxdU#lK>SO0D_?|MiiF4|folxwxj#ioLg5h;_UsH z^Ny2hMdAZFcWrD09Dd!<(TPRF@~QY~Bf!GKGASn`wz1HPg0XY^Cm@l?{){@#$WEh% z=*XZTn~`r}O-)UgAMPf{x}k$jOiX0zmg^MS;vd)}qk%MoM7yHuf%)p|IeB>(M(baq z6s0^t8=E|JiiAY>l>gPO(>UK8x@9G3MK}*Jf(~< z3rP!cb#+C*XW+Mpz>Q8w(8FE^!?7vfyFsxmb*?<& z(zmQqzR<9+#l^+b@)2lq$B!KOY9$c&30q2AOADnD5C{atWqIb#(zLv|Z(g2*fk9GY zVj?OGDwg&E6LWKRb!wlAt}OjM41u}@Hg|x^Q785E^+iNPfQ(ElI+~mBIyfZLi`Cw! z0Zs$^yK?#RWeJIjFJDeEF+J$&+(|NE{zx;2$f3bZ6Pt;DlAM<~LQeei1dsQB^@X*q zwX4-tRV_mMNj`>uZXv$rV!Cj)QJuvkJV*vaI(vEo>3C8K&9{+E4|!Ra+it8njAk^D zG<{A^N#Q^BI0a<~txD9C_|42LEc{M~Nd`7H;);)CsOO{<{@O-zo=(IySAL~Bka6pY z*>;j?@;kgAY02#pw-G;^!Q-<>iGN-)BK>dk1x{A~hK7dG`4#;tf77X_?((?l?xI`R z&GniA0|U23GEAt)>`kV5y(UMiON^S5r^+?cE7mUv8d?(Es|@9%nWunrwM)JYdB?7~!fNi@#u zPdDGvb;W3b**>TK$cTs|baVh-fBMUHS*bmrU7(1&1-1~^h^-Vq9N(UPYkp-N=f-1G z!{du2(>1giPjK zi;4tUS*1BPz%jt`fg;cI@~%sAdyLl6-M=(2IEWS$grt)=k#%2aj1;V%{9U6&>V(`hf%`n0p&)o-MC@#_L8`?qobpx zC1-IR-pIZC`6m*$mCKwD> zKGB=le*Xj$llYY@hQnWjm`}(GP3lo!)4DQ^yV`vC47#1F0-EfkU zR9Hl0XmIexjT;=CoQJ8Y&!0d4JyVzQ;KA9o>2h;3v!`Kn$BvbtYCwTNxkiUT@4S_% z^JGLuQE_Qz=Jo5>XZiUZZET!zHINs;lKCw<&w8vbP^{oYmrux%@4^k!(@S{w?%i zKX%a;@K{z>);kDamXG2!MZHI5#QpFNdd46WE9O=9X*(LWfb-0NRbSEKWY_Z)Z!DjW z4?P_ngNSQr_0U0a+y~F~*=nMAf#ND}a)41zUzdz6U_;*p#1J3x*|TTpm7T;-2@-)$ z3u(fuXrKFpf#7ez&u8=(rn+*B?MCaMMqRsk)ADSOb@>JjEo}#y0rVcnKivn#&CJb# zNRMFAOqvtGbl&q!_m}tM_3`6CxUS~}zE?l<_s9rtK^{>3blKW779%4gS7&E5(53Na ziEq{xB=>fEq&*kp(k{B8rp6knqp8`W7QK*VU}(6p@wXq>4NRvXw$tC&*O!9hC$={h zr?sQQ$<{W34Lt7W&z}GhePtd(0r7hY=n2Ft_VDnybm^l|E!n<(Z!$Cg&duqiQi=ml zUez}dhm!XVSCRsPNKJi1TYKaF^!7^Lr7tFLv%Dl#xS06(Wy#6Z8;fv?+*cRUz+Q3S z_0Q!>GYi@bkc8K6EOzLck|~gyw!aB0FE1}G_0ZEBT>RY;S%*`#u`(97;_itZrhkr? zFbs5J-CxEa<|#}iK_%|GG+AifFRGEAQof6Xfo5=!+Li1D|#-u?80tze;`1@XsHmq3n%0wkcmGcHRUH-bNO9XKFIOMJ0KJ3CqPA_fNsAp^7;0W(9Qo2lh42c+POxOnbdb6;N!{}ygW zAcGL@%)`27;0B!e;-aD!vfOtuB5ta#yN=+}P@YkP^)q!!p)qlCa2Pela=JV{=}o%x zL4^+me)>CfyI8>&T-vL{HB?y^04xw$mN(a?*OvaIV^7|>bIx^P9QAT+bo7vTQk=sn zr6dr0=%eE;$@|w%rzb%rOD!k>P*VfaoeWA%Jxv%!-I4$Hv%)7mJ^faOR?W^{@*;=d zGz0_iPQ3{*^$)qZsDO_ba(0s3OQN}uCV`Um<;w`zJjxuMsQWTRqU$Ou5ENQlTb=%N zUm-pCM3RPv#>~ttI5=3l*wK)M!*TkD2n2)Gg$Wd3uzpAk;AY-IfMM@z#q7tMQ0_T7 zIs1Bh%T`8Tg2SV*q8dK-_9mk{?_EQ4_%I3vU>KkE585r{XuGSc zFeRm`t<8RB;M1`Kry+YlP6Eaz>SpIg+}ykUK@;%V_V3&-_$R)kWF2%J^-aKOsuxNb zJ}Z3`Cliw%usZG^gi@ex?lWgxX9g}yOZ!5F1k)WE9yUpO2`)M?(1ZiF%7Hf#m$tP% zPktArdSr64S1qQ$#Kmr~axYH)63HD^RT@H))ASF$$cx9GJ$iISOH0ehXcV0BD7zhg zAHIplxWT|kxOMv%J(D*mG1%zkcyDNnN7*Sm0j`fRFras>F8wjTb7vMp3)(DR*q%x+ z`2#3hI%yLfet!?(b4-~51#5`D$iv6yfMbfa;pXCc=I^gn>}ZKjrlxjeo+51SPm$eN z1EhXS3kzm$CceA>Hb5xbb;$EVj*}+F#-|TGvFa}?#YdQ#nITH|6BAiHnOcQ&AWhzs zs}*?=tNHla>+1Y#DF3~h!2iL+4=@Nq$gS6Bb0errzhC_TNVrlt_o#|cKZ028#nQxwgOPpc~1 zpkIQG6N&Xiod3dk;_g{nvUKDoA(^qlp(CCA{4(L=fz?AWhsGg9%ey){ zeu$4J1XkD9)fpTb8X9sD{sFGdT#JJu5LoRtGjLyR6`$K=1vOqT^)xf{%*y(4bm$i^ z&Obp>7@Kv5zDC2&EGQUNJ;bT@{?eQSz3}kVRKw6Lmu^{IlJ}!Wv(6sC;y=TE(Sm=<_WCsDRN*J3f?Tw&?;n#^lE=|Q~3Ba(H}UhrlKM) zwc4e7PsD9$#<>JLQEs%{=I8dn>b2Qw#^f7wJV*G<+TLmAM}^F0CZ>Xu3{Q_#m*XJH6>OK!rp!=!=LIjD0U zYy8jE(5sb|Kdwi{#cg%ydhOl0on<6YY;$1N86wMMkxZ=Hh1 zUFAB!_J<}TXb)AKTBaA1HalEV=r8c#B0!KhK5nYmgTph5@Q|>AJ!106xE|s@k%b-#h5RgYDwJX=!Xi^x?Ijo~GZ7BtuTO zto@y)5(o3Idd9E@PcTcrx~QOlm5uF@w>QzDK{w=miC&B)LmdZ}`SNWYn6;){A(l;9 zTiXLZ7}f%uq6fORrKKf4DJlX4r7Gbp^cQc+nt=V&eZ^8^jy#6jNqDdM?N4lPYhz9& z9HpZ>C@%W#{7Dd_njEpxuaNL>vRr{t$na7HP@HJWJ)}C7d`z#lrUnjea(w(1b5o1| z%mNUKsUUg1U07U<6Lt}I+a)for^iGv_!i2h;BGD+|GuQ8BsVv-iansd*1-H*s-TU9BrNy&Hj(f{F3KAqQUyje(o>lpNV)gHIIUg>(S4qYeflVRcKf3fK@{iBp9n5%!CcE5fD zO&XoS)Rb+5&>d=Y>f+ygoFTwwc8IwBX2k2%^YGa4J$RD2pHJjx<(DtbVsKG`ap2ga zEi&`>m9Q}@B`y3N1HFSBhVKXR$Vc1u_TGv9S5hJj02ig8qw}MuN0a#wXg0)qS{fSI z@PO0>g@w&7opt_v8n?ByqG1JMdjo=%8r4UEsO_iaYT~QL5#l8}1Q6iyrT~?$uycS+ zfuV`phEXgwR_VvZ{Rc=c$>V-I*xNH#XrTp*e|3wlJOz*V^7$+jAnzckLULYHM3Lbo zE87TV4|>C10_u|G%xmmZg7?I-=_u-q4BPv%vQ4=4=<=v#pm6veK%wN5d5a1KTXNS$ z|Da>z3<9XjS+#v@i`=_`9cW}>ag2t>$kenDCk#%f)A#rZNR;TH0>>I~`&k`*QEI<` zmlgG6IbX4&2TA3cnwkKVStFqXPd!O-BLWUf^IW>lD-b0o_A$_wo1QH|!}e2j1l4K9 zpvPQHkaPiQ9)@03pwCqEGg4BvY671@;h7#E?@X4X1zCYwixVMuxPUDn%H!@6!uQvv z3NVwI?HrNiKrJTvcJrg;1RQ-sEHW%5SPf`RsAu|IBBhM3^PyM#XtGkzLwAIhgD!@) z4+X!q66fC8)fF!0Q?77cUfyG911~RFkMAl)4b0we0h$i}<@O{#Eo}m-z^~uGD=I2X zpGHM#K)`wStO4D?hl~y#gcfcNv{?@IZ11V*PoaBBnj|@`p4`#@*-+gwpv_~#cyeS^ ztl{T?1DQZGyMIW9v;XR)AM@&zJ0Ko5s$Yq`zp!Oqz{erj2niK~y0SAf4+HYz zZ@bDBtBdA1f$;U< zeiX?Z;IZ?k5wID!RKu_MP=AT*6dDltkqqp2^xpLJRw%mm_H9i~0(PTh#quQgSj_I; zSxEgpFnQtV5t->9=XrSSY;BKfHE@pv9VzlJ&B};L;fdMSw<#`r%m(R?@o^J<{UP`s zetnHfu6zber41ojWe*;F2I+)$92FVqg7}25FDaY}Bc;cGRZ;ye*uEfh*-Nm$^>%l4 zjY&eM)!sIc6Icmo0F6IR-%rqwgnu3GPNQJ){*nm|%#V;EUj@@PM#Ewg_*ST=yXk%3 zaz;^R!?b`)jts?3i}N+rr7M1K10ZmDz#u|(z~=5PcCv=C3RZ(&WKiL9nw&fUdJUhm zX|ShqT^*GyG(Pa!7;&dY1FPqfWBRtL2!sk(lYI;py;>&Cpsu6+` zKo$9Ywt+?)x&Rau(9N32?~M7^9={w(q`_61S@r{sD}o+U?f!w+(;LfmE2t38ai$Twjjzwpt9Gm^8jI`a767S zxxJ=RVIo-9G{EP?=0i$A-J$K#k1s2Wt@?xnkHH~iGjw#0&l>}->DR6#9}AJZB`V^& z;9EMu78VjRw>&$9gUj=r13*W9GoYEC`Op(mLP+XnpeLi0>#imIyWRwhC+Gx0&!0a( zZ(IQcqNFqpBvp5$F!UV{dK$eZhtl6HnjXVPcR-PnlI(we-j%iO15~q|SFc{79fa(^ z3eO4Kg;*X26tmDI&~shphM%Z;>E5^z8Whxo?z-B4Kk#)H_u~Z`1vT^KBVOyXQkO5^ z2kcwh6Wf{3e)8nW)2CBE1qehHwB1i2k88-Hk)Q%ZACChh>41nc7U#w6HGM7j3XqXs z=@fXI*p$aH9|}gU)2E&Ak!sj)={Z<6{q6TsBE8eU^G()PaOacI^|ia`R=xp@Vi19a zgfvmSR_mUA{P+=$E^UrRZaQx-N7r z`_ss}$^ky#AGZzB1tue(p)f^>d5IDXpy$d{{2Cl=iI?z6N=nl8TzS+t0-oyY=xP5> zMN)ESj=8roYcI@EsV=%8OVDsM_yeS*xBFdCaV@5L3&DA*SmbZ3@&RqjPb%Jq1g;b- z-2DB!5tn7_1SR}dy^V4FP+fif+QLM|RBRfyDg4r3Fkg?HOZapY*8pJ^q{b1}R~=t3 z6vL)RTM%%A-S-95BF=Jjf7m$rNg8gQI*ZRYuU$KbP6k=Vv^C|9)q+$08YE5&wzq<`H-&jn(Wp`vccPS~~ z2UyM!Stwxi%eXi(tj2Cyt?eYzf0G*m@PUE+3}8V+XMu^kbNhXG!+)0GqM#|!96J^r z7M7ftcwJf9X87yD(I>4b3Vu(Y{(|$2!g2G)4YUJBG0(TqnBgw!#EYl)pPMxy9%)9i zCam8J^lm^2D4Os)ZfR<+0VseMZohvU8O2GSvNvzcY;4}ArxTmnghnZjAXFbQw9`QKDv2a=FqP>-s z%pxM%Cn%wHHhUc_-p)I`xUgXH*6)-zhjtN=C7+<+DuOcreD-dzrq5b-^VTfN!#_q@ zX~&9Qu+q}s)!owr8op=Ou0;?<@D0S;@W#o>$xsWl3T>1983`qt|E!7_%7pm%JJ!}> zhTlRVnjATD1Ue0o%0RpUg`8y2t}N4rV&8o2R|<#2s{*YlfQ}S}mw;0rutcpZSMa$# zX9stIzwh09XSxFFunRy9x~!Yq8on3c6CnCWxmSNf0~a&1cg0T*wf6`&SmSs?#U7ep zA*zQErXX$o`SYji0@;=gz#f(gTNMyLFFV@|{tGZLq8}Q0W`}XJH>jb( z3fp@!I`L}2ua;y#_|76S0Q+=DkK*_!PY#9@7Op@>145Y?9MrS2`T-OHXb#wfXdIY_ zpNRD)TtTD(QhK(CJ_>>i6yden1vWn+8Hf!U?D3_S;*m6+rldEBiJt3_%=2+Dk>@fFzKrA3ER;UvBrBfTajUM$0QJ>S}7o03%WSw%@-bC1s5B3=sU;$|+79d%DG0|FkzGjP=#unOGaB<1p8l zg1>flI>XS!g5LJx`BJ8R_wHTP5lfqs&r-M@Zjb-^rBh%-K+*zyN=8OTJYM=Z(@23OKDTs3y1A3SQzB1x(rK3 zKj_skFCe2L`yBh?1>D7ycke_DzfmK*wWKHjzX#b?WM%;Ak)Oh?8t*O4Nli`7&%fXn z_o=F(Xp)LeLR(uzP|y`M4}uIfWAO9mJTOS_-i_q9jOt8bM`^eJ`4smL3K1+N@4Zw< zdn3NUMn@4tAOhO@*KgmhV~^!wAtApd*~=wF0~2B%(wi2IB~!BFU5_Whb@@%Y3*7x82L!eUy)cgx7CN>p#*M zGe7^!34+P}KV}qF(IfGy*WOREY?9QghS`UK>lG&dU#1WKUkoh#|Mw&QwVOlWN2VFB z#GD6-`{m6G{+`P-@x{e%JgU1$M1!Ggg0Ap6agw~+24rkGb%x}l+aRLU7n~SK2B^+( zaarL@_jpkw%!&WDcIiK3Ega`|5`Se7-DeZxpCpk4|J}qt|C@c`Ugq^1H>l$p$L3eC z?d!E^N$zD*el~H=M`mhvWgY*^=X8ZcRQmSq6XfhjcvdO?ot=%Jz+vG1^6lHxEpEtd zpzt_)Rh5-kx16|w|8-`=7HD#}ZXe?O{k|w%o(cKuF0cjf-9x+IO|&@G)Zm|k0-cw_ z(JR)kK7eWk-QwiQM2G}&LQWZU=KE*v#`~>v5Ai$g(#U|x3g44ToY)+RQI`!g0oY2+ z-pF??LOjFg!me?*K^ehnf3KAX*$5C3d!ELCWCz+T?3GX!WkhgIDS#M9S^|D4~oO+dY7EM=<<8>@MjZyKj=d9P#X{#f=mlW zid@y09h^oaMi5Gb+YCVgK&G&`I8MwfmQqDmx7?s6gzb8Yq?A;-^K8P04@&@gsQQ3p zY|u`oo>tuQYaEk8!pFx4I>(pFN^i;#gyp?9-D{%JIpO8bEH2(uRz5+zQ+aRR$Z{wt zD>tD#qtk6xP>3~!0?xLCV%gmX^XK*&8W=Pk>Hhv55zZF0x_>Ek8k(!5U=gD>0`LkU z#}Nnw^gRsaFnc43%w4Z(d5;_Uj+o z?9LwDh{p;Eq4OCz6qdp@9J+*I(_dtxd=70U5yPO)^_uV?B2>ki3?< zdd-OK1kc(4tFW@2)Z(>c248&Y`2i`#t9P1;_Nyy3lhW`Yc#1-@fyTrIIqPf?> zClhp*I3PkwK(c|ja_FsV_JHR%rZ7_w0k(|tT`{O-G25?wPii%cY4iF6>;apbn zuf(MF4(w={z%A7{@`yh1U$`LVG^KU>Hr{a$koQ!7Irl&2`iN-j4G)*NF3KV#{vXNC z$$i4W?k7&1U}H-I$wm{#e-`KFvh(x9-K7v{ME?+vZ~F9!01#R`BI>qe0W}d9f)EdQ zla0N-R<^;DloXbFF_WfO(5}XCTi`^&a|5=*N)kPc;yMJ&R&Q54Ca30;7Wm8=fn(3Y!rDQ} zabQ@KVmTrM8prnR-mQA$2KF(dkmFmpIP;_RNZ!Pif7TW8Ze}FLa&bwgyoKGDgNP$Y z!4pSoidPl+LC~YRdV8anf^k6gTm<3$$4l&4Ex?KWH*e%<8sOlDqbiD=G$;&|I^1f& zJm40jGe2^z16N`$p%BKWH1n^{tf3%+gz8&0nE4#4KA=S~Vd19d;Md{K!66}dA48@c z@9}rKf6PW8B_#zQso!&rv%X)={RB66W9a{B#z>qL2yAw75wU?Spz+-l#}Q>>0+GFz zc(vIWADSGA1j@Pb=96q&4EF-B$jF$2tK*k?6NPo(N6ln{ZL-8f>-6jy1h+jsJpp8y zydj1+TN5b;h_A_G6f4a3K|eU8jR@~S=lkh#!G#7TMXVPf$zqk)kT2yR5hPk%a|$2CfBdg8vQsB0dQB-wgp z&+Dv>K0vj?_>SO@#MiG)3=9^Ib6*2e#+F0XB91?>pY^5$<@NU`gE$2_L%Yb{6mVEP z{wxQF|Kny%XYxK?nwvW+;%bi-hNXyO*7XI0NZ?-=F62i=9mU2#%*`D!1n(g579>?* zMxl6P4rC0Ab3H}AroJAbDX}#{HD_l5p9<7RBq(!pbE(tqFdIhjE|tB1zz7xwl5~FW zbg*RQcZYTp4A4+*Z6j`y)+g?A48{Q^es$v$2VK;4G!&#fy(zI!cp=;sERBAA#aM;! z@m1<+2E}{!q>25=5BV$3t&u?qKsmTz(RpcW5>qT|VJEx(U>iJD2lKkeMdtDUJN}2lyum?#AT3oj{v`CiiBa($7t?KzikiLw<_>MlqPa$|w zh(Wzr;Zz_+?Te2&N2qLWz6MBuK>?VT|AzFEl9^F^QbwaS=lO>Hf)EMUsy z;L8)67;T}Tpa_;uN=qx$`?4=ml;&RGYpgoT5Q{hFB#DxYh3Ju!f`UaE8P6!sBVqzM z9Q~lb$iYuN=tLSmCQtw>%xOl-f6K2)ndMBh%^A=CzV)`p9O;<2I7W}R$Y@zwdZ1k3 z7(J0>!mTrM90DkYO|jSfMg(;4A~@jaUP+jo^8vNb(MSBcwE6=OG-O+?J=$o?-azE=E{$hGLajnY~M#XCLTPO|++nhn@!o`>$txyzbl~tTjST;ir*! z?p0l6a!o#_%jY%98PafNo*NoBZ!Y49bn5Sky0w3j33M1;nmM__{;Cg@V9RMBv{LYE z+M>edGM6LVgD*hFWPjvC9-T$17S}MJe7(BW3 zf1QeoJ&DNdM4XD?5L%ygZ$U6+0&-nC`jvGn$1x7jdA(9lKWq73fQ+%=&~B88uh@}@ zjF1u_uzqyeW+1+H_VYepHlt_C`u$2z8?K)4wx9l?J*kZ7A36qLBiyzLv0Mq4^M z!W%+(&z+ke)>=pq~&dgciL zSDD?|f#R&zY8(X2m8A@ijTILZ2)oQBK?p&!`ttDzZ{~}xD@BEcGHG_6o}zs-KQJDv z^H7h=<%*=F^aMG2C(+HaFyDAWoFRi#zqEZ<&F?}+KWO{;`N&3PXJkz6;VL`$dkKOS z(nk`c$VWz>HSFi3SRDS*Ht6K2t z)omyy$btdvBO4`t^L$RuWze1Ej#oe=ERH+aYR*PYwKc=C5RGfB3-VUl zcE^5yLRwl^=iC;BBsPci|6z>FG_C_b;PjlT%QA?1#h zo*WoJV3MAe)(K%xVCgfgtSAM8fY_L7%Un=rvVh#!jkF%zGN>uQ_x6~Qcg;C0crN$- zdmG|<9~XwP1P2mHLSlMmoC*kcyg=a6yxEF2^NQ0|sfWi!-6O)nzWg}k)0MVEl#e5H zQ|WewA<`MXk_nZSlCCno@SUU>A(eqLrKG0D3Ece*;XVdJvoSgo(bKSF59idpnn}%b z(vDsAZ2_>~r4y_i93{vEK*)ZnBZ1xQ`sp{sM^GeaWx&cOy|FAUa@Rm^?01DlMydnv zqi)cn^{+`pWjr@(%&l7%efG`+XztKe zP028Ts^;3aM4X>G;SI*o{K^h{K`*fogHu!t3w@<-`4F_AOrZ_vs;jf9w?Xoqc*NIT zNlHMp<~JZHRuLm6$PSQ^kzqbI7jcZ;9gF-`$T#Wu6DbgG^Tc)vR82uzE2J^(A3c|5 z?#A7+c!5m*-*sgY(Kj#puZViAh92Mq3?1H8i``+m+f`R$c^zcz-^5u(J(_Y$Z2|0R zn6UD@F`fr)8)y?(HF}_weCkx{n>R-(DKWaCqN+Oj=MMw0gZ1t#oKpy+ON|Re?gWv0 zB8=GZL5QH$<}@HQX3=&2@Zm6QBmlYQP~dIHEWpgpw>zS&Fuo0ku@g-SYfDw*a2H}f?^y3hJ2i8>8F;r@7@{L zM-jzLWK|GTg8aaVnKzUlAXg}{2&4UPS!DW>ybx;iLHtu6qPKvK7@a@{2#?6vQ?w?+ zlTS8K;V}JUBfsDCd907R0&5>c^>J_0$bzDx^CIVlF!R1PRXokilb)6)Q?|3u@gdk7 zgn2LHNZcZP>Wu$`JH%aN$xvHHK2F@+pR-19o~DM;?oNUw@8 zG<$E;2*m=OE%v{@CFYOeRPqA{te&0O_R&oP<_=z=WPoJ}PFdUT(U_oZwN#kID+|DU zYgA<<2^vJruTxN?+Y&n3m!d%L+R8>^vI6V2Ez|;F{S&vrJ({o8)v0V1dd)sWs1EX?ldF;=tn ziiY~C*JaSW#PoD?ef{g;J_`~j*clla%We?8=CRB-Z&pXbbsv+_>D|8F1pI|L5%@gs zd4N+SUGKY#ZUL_9wZG2D(0PA*8z$7Cv!C{Aoaus6g(NeyC)wi~nA(nO#B68VSZ4bz zdFDevFNir|(kv5<7vugI;0&*`Lv25&UljocOHWTUa9LSjCnP+G8B2^~9H@QpY>C*ui!y2|QHz$rkSXjWHMKbya za2tgO598kaLiU+W8ZIq0ZEXf>YUUSkO6=I#)A3Z;!v`U=tp3h%>(h6h(ZBH?_fb>(V{K_W7!v>XXoVQy?gz-z;naZ)^-kF{%B*I z(md;#GyJM22*@K8fB5i1n}sk8eGZBaK9c1xw}*dQI&URp8EqbfgA_spSCU!i>68Z# z;!V!m4u4hp`%70)y)hp&4GJbk4M04z5w*(BMx*~$TdO~IkaQ=09U2|vAQV-|ct9Rl zUT8bZsCi&PrXK=5J@5U!_iVYgz4BSaY{QSUcnS_&aS%O>%mV6nTY)(9q>F=sVkBLFdn;k?0D_}NepI^Jp0GB6-F3Jx*CqPp3- zf)q=zG@gh;9EEjZ6&2mQUcL&!X2p~G=1@yl3yga^*zbnI*vER1)ue;0VMcuw>*6VXNu+K*xd|&!QOJd8na{^M(9a4|Eu) zwCEKUs5!_NU~S2bpsw@-Izi~5J#?t$--8`s*P{PJ>L#KeSo=|but6`Wj{BBtDNOsO zr7S##W&IA*Q|QweelOJGA|xT{2o(_054IQsuSpPeg6AuMN3pS6fu%weui3VS#5PTX zIl}mQlT-+9+0@PQFS4E+l0D%#SEc52$N$vhb9F&v`i(R-Wsk&{EJHkMjvH}=J++u{ zoSqJxU;EL&(f6Y8bn=Y*`p}O?^{L|Dv35Ft?Zn28mBX`j|J&yj6`z&orr(w^*Nk-T|annou_=?&d2h11Z8A$~FHGsJ66{F|u2=LoN8bI<+ zHtfuV*xsW6RBnxY!g%}+fGJw6uE%0nTS!pQdw`Zhr(}2T+$mixP4IsA?himuyIG#- zUS~ilvph_~e2RL3Nqfxd;I0F1y~9sTtMgt%Quh@162({QJ%fSaZ+B$H zN>zZeAWEDKdad%9MjZKoh2I^G7)n2NY6Jv5ib#os?8j%|-XkJJ%>2QBkIZQ5?RAF- z2x!@4U_y|5wC}Lz$_Oo?5}Ryci)v2szF+g>$B!Y6Lg2$(eqm{8DdwxOvOuLTVq>}L zXK-z}y>em_lf8x4dC07Wssa!ivTAyDKll0K%uG->UmsbE+9og-!W6^@72tV3&poM* zoj1VaeEb#ZAVNFY*hGkVc~#H{aYyQ%Qd6<5P`TN^ZF%O1s^!wlr*hCwqH5C&}!}kpBHK;RP;okE21y$A6#HTEvwtx8W0WN{y4wal$ zX{~Zkk@R{kjrWLE{P^)hHktVJ0kG0{xctP#p-<_G2r|L z7X*Ucz>HZILM4*U*cTw!9+20H1H!^j!N#i#H z((!ySoa!N{Gq}eos_8j&<5~%ldp%Sm0{8X4I)DiuG*RfW&}mjtLOoX(OmTDZ7$|gY zbVpWi<+K|RFF>|Y$En_nNF9B4e$fx(o`v;sq81-X4F55!=R zdiy#a{(=pMg)w!a!ecS6kY3Bw5s8T;CnLjMhi3x#+lJW3+wX?c56 zM@wQA`7UGBbREhD9)G8D{W{)sU~B(?>po~Q;^mzt_+QP)dgk2H8QHIe35Lx8pA^7sa zxRMTLg2t)?Y7QH$}*NX2VUET6fzW9&r`_Jx)s_E@!i#Tgotki~E2(DbilW!g+AgTlLDryN*nV^6G zEA>-czUfgg{7hw5qRis|+Y~OWczl7w?-z<|*#60j+*$?7#T!TtAwQF6*6unx_$Y-q zMYHdb-rR7FAUi~#>#WqkI(5rMk&3_Y^gZ!-0oR4sDW?Pk)Zii`V;os?RgaSE;m z9A%?^iWX&l6rR@5=xB;ED~6TkM(gR*t|hBXtefK8cwr=cbOI-~7!QL`W<@DYpU>Y~ z>amWZQJ-D{d4)`foGpBk*QU7oxuqplR%&ARPr0_rz{$dLwfHmq8B8RY#KHi%8Ly0u zdUcH4AJ3L^9YD71RYNzvLcQ+`9y*uN$GYJKosVN^9q3Gjl^PA?n;G$$Y2kuLa>Yml z@i?qE%B*lgu$>8ErY<;~l16*o$6dM`#)3tbIi{65BL#xEjYG~+yGOFO0TVwyL zO{O)WOB{O>^T)|mI9}+D3h~OI9A;Yq&<1X@Qo~R~sqNrJ-Ur>qJQfcciV?Ugo_51b zgSarttiQg$OV`${Dt+39HKgHCd!=22y?l7z6~rm>Xc-0VUyH{dIeYuw(jHM*WvG&$ zK3&eybwvs$*Q+TOvC=<)tY^YxTSMA6`G>d`QsX?aJTNMs{6`2B%3E^lJj&kdjW|3j z!r_s4GxoZ}ZtSiWFV`xWqlIN&UxkQr!wXK$?6@)FgLTRdyZG#w^%175?N<7wYZrUm zcjP!ZK944z^9M->gPHQi(sFXxO)cSGBAhxU*gnz^vqhQlz=XE88!=VeFT9o@oyAic zx;i^eVqu)1icE?7`rl%z-3I;xmT!{Ji|Pz%2Joi`B4)~6G*zrroS?&-hQThf@HS)S zWZC4X3)ZoeL|L$HDvj8lctx!-YWr(T`;{?J5RL;R_bd!j7zxK=kiX$cqe2ufia?t% z@c|+W8+>bWbtdEURyZ0n=ofg7=+eSM8}AF&YlE5Pn`f>iAiL$epdUmz2#+3q{#(4o zk{q#OqFipN%b-VNDwvvq0b4nyT^QE^3~&=^LI=Sy2peqPU+r^~+xE|a1)#)Y3nAsW z^sAjc{ZrJ}b38mbh{*t;nQI`Vtd1jzh0`iru0cQI>#(F0r^XCDR2C}nw_1fNtrfxT zW3p*Ckn<2})nQKyUlprXmDPNWm|NTINj}UB2hv~204YZL8m%tq=*=@>3l$3wcI_wb zAaw~atlChvL>CP_8WXeu8Cz2gJY(d~dfH>5! zdCa{A(2Io70#V%M#1{7vz*-(r0s(dB&_U)ZES#`gdRsSinL-BPaTXkgOU6L7CY%^RtZ405HdK9;j3q+9N>uJ0~vmrkxI> zuc10(py&%Ivtbj?fyV$!weaW}q;N&cn>|Q&ab49^Q!_Z`4|j>x*7TS^#DgmRdkeK+ zK-2g*(K@?ddZShRgvJ?pAiT~f_8rz>9P(J3$qArVa;@`3@By*H z?2E=FlK1fNCLnmAVsv`mfJn~_w5UT=R6Tj-;det~_1TjHOG7#Br8|5{DJa5Ye?Xwa z(G0sPzd6Qu$^`=^va;Xsc!H#paia7$Z~`1OWYHG04Qhz-+LRPoP5?`@JmQEk-Ikf@ zEvkbD2_i(C8X4IIngDhZ^Dyo%&;MZVO~A2U+qQ4jTBTXDXwoQ+GDT^)Dn$c{L`7*b zG_0bOOluWQBt=#<5Q>BnS(?;JB{GI$70sqZq0r#_otL%l=YF5}`L_4{zVCgv@3r0A zy~5?X{{QoTp2xW#`>`MU`9Ts9sl)Q4IjowAfXp-nQ82tBNMH~SA*=uV$yO5u7wa_A zLIZo1NRVCs`E(kD9j(~OJRZpKS9cexHJEM*Q55XYEaP-o{| zVIFE6^$oTV#G<+S&%F!wxw-D-p_*MV8U4D@6u&CJ`LCB`1dIv1e3>0vvY#-qF6F(5 zL<-|iWDeiN9PKMHiLual9K~#u$Y#E@3NlwPIMXN5;->wR@w6f$ehjj<>|h0}cS zT;c2Zy5<{yQI@3rJi;I#@5ATEQRY_~U~kUzLU0-(nJw=6FE9UeFASL;VGJ}frkG9+ zQL*5tPRB{Q4KwZX>xY)hcI~=w-aOXK9!Y50J!7SZ30h{&9&cAoU7=UBF{{7f?~i*Z zD5y-T5{{~m9X77h48D?3RJk#uxShXR14$ntB?&JY?hA!D`WTwBj(WANx}t6WWuRU8 z#w_Cz{4itk2HeaH{FOff<$w?!m1uUjic||rVcgU16clWSB?uaPYMb7*UFp25K*+Z0 znwqr2Z6g80Yj=o`Q*R04(fsV}8kR*~#ZhccP!+j>IP(<)D@M;9x}|mP4Q`xq^77&h z(?>_Yo2n~URU^oY4&BBYhtES(8~g2Pv~d(w9G=@{q!L}^C%*V@_WPJl&qjOgx;f?X zw0#R^HQ%UwHgrtbmlT7D`kv+u=jK%h#rfuS_v@b$ZWmclNlgHO9rC`fjg36C0{SY` z(#pB9Jj1_Q^v^g6PlTfUu>t3sPEZ%p!M}nB0m9*W`HEc}cAvJP#JPLkJY@zj5uZ^i ztF|wl7jAWE()V{L1v~rQt4T%)!xHE#ae*_bu^Kc<ST= zwp^akiOm^BXKaxuynX$;4pJS^{28Uc#IsZ0^)Ih|r?Vuqz8VZ?X(C!oacMHHZ2P6Q zb~3yeSpho1@Sm?T;dRknmQ9TL6%p-NL!(z_-vNR{c0YOYg)ju* z(x3rv>sH(RNI%CrV^5vApdped?J<{@$2P9$o*HkL^%%-vJXSR{PQWx^2{QU^%BTPX zoCqR$ZdW%yXTK<_N%`}c@e6B5%M8aAr|Rw3eD078288Wg>ul_ z{mU!t5-qMRUV6^Y?3(7%b2rx+hh=H>8VXaPhugln_oEH)X1S9I6QdwXP(aW6;SUzdJWdTmS94;wh$x zIUs0gX@wikKB--&n}OLAAbTkxU+re;?~aBqxYvq==z>b}7e#sbwa)oMt@YE&+`(78 zgRhW!kQn5wamY$|q7rOb=^bBmGD0tN$Hy~&ZDW!W^;zG?w}x;3usN$J{~Kuxpy23; z`Rk{4f0&jQ2}Qvk`vY$ZaQCpTu}9`#ixnm5`}zZqIVWA~$Sueb)CQR_%UZ0qcc6Qa%uGfimYp!4uf=BM({WQKc(*PZ!Lax?g2-g(4G zq=#3oUM=k6*8J+wgw&QDT~8QCnE>vd+a|kxRjzP)%IK9R(9g%he~^1VsE+^m8iQ5f__w44q8SJ24-#(t#{Err(Ygbgi$1X=NIR9<| ziQjzLP}e61F8=U!+!6Z9eG9ftM;7gKgyn}@LFN>%5Tj&H8DdX|?x+cmRP34qmiCx7 zS?*Bx?L&^6I_M>DAM(Qxbr+#0kZL26=Sh}Px*gzcfPEAX$78_FZ2Rqa=)DJuVG$Gd zGrIj#YW0uBUu$^6JkawU6jOBKLO~_jApJ6-@GQknm!R9N0HzvG6a&5Hua^dcNh4Mc zb7ioKUk?P7OtGf&n~H-;NRa|d*%DF}{#qO7$bhhV5W#}6I)A{fHGhWcz~&BDBz^DL(V?I6KN zP`o9&KbbafJ}qs$Y9*T!)v~n~p_IU3`w?%<^-+juvYo7{xm4%7JNs(?etw6qGkArg zCMp@OYw$~3uNF9V`PBtq+h1XhDI$KA`!hS+(BXNK_nGL8&M~}Qc=l5ReM+--Phwrs zsX@=$W&K1;@EIiJFg-Y&(p(s>2fZ`;r7MDi;y^i2Q%ruwI_5H9m!Y$-`GadE)+%QX zJ~{^XJ$5Bd5)kDJ`%x9_hr&K1D5#sF<^sDWnm)f&|8?HMNwvayZSmZyi=UHpt{W-M zCK?EvnqIxsFrT;Ge^m~c-w!GEVwC`DOiym*ow+Imi}cPD$tjdjXXw8n?a=pj^>=<< z=5o8;nGsT{Ksj zGGoVHqzsO2q(MYH~cEY|Y?gxwDx5Q|clr`NvkbfRwOp zxUu^SZCgveT6EQsPRMXeJJg*6p)p0!R;Au70VlqFf`a*v46$WwUJr#C*WfsTmcyjdV%!72`xw?;USo<(Im8_ zzl9gaMSqXU8gDWQ%9q@Yx=1zYC@a@2%7hDcq7fNN9|j_^>^<>-q#Qj(#PiogBhIFP zwGXgO+8SFkIAm0}Q~~_mGj}--$+AQ<0Y8Aa=un?A=aOfI9!6WfVD9O)BZdrt?P>={ zN@9{1&|$m!rea5QuFY{1C%9ZxwUF&0Cl_L*EOz5i z^O&|#=&iLnD<%c&7h&w=kAwz3 zBm2}8jOI;Yr11`bf?jt+G3L;r`D@p{VJZ$CHq7vp&bo~oSH!N+CZ9l%p)?!rkLme? zeH{&vu%H4|q4QH@wVpb)GPZO$*`XSoF^i5q@sjIKaa8<{9a}<;Dg|q{>cQsJaRPy1 z4^mT4PI1b*eLIB0marbKt9$%xvx)3#JG*OiJM)M|^hM4CXJ=e&!}g0T0aaX9xu{Q* zu%c($)@>o*5M1Luzqc zv|qR}du&Vl{dCRkaXDVjQU>eIcy{*S(2$j*d37$B;dIvEzKc!-c&F!5UXKQAFmaaC8J)TVBqmsUL$O zq~#0=8JrmXEtdE)qaXoN&0B(gKL_;_xnqY8yqnH@;N{~<3Iu3+$^o<#C+3ZVnlV1a z&Z>SmnXOGFz>eJy_a5d??dj%xiN1(p#1=$B zbooI%%+}LeZ!wD+5kZ%$0;-2mMOz1@UGe_?MoOTsUQJ?UH#Y8c{b485_>A@P;!3Z+ zeMjob;}*W-Iy=ofIN=uRz3PUHqmX^T9}27pX<7iPy+v^!wr}r5Ev+Y(myALsc8SeI zB%tVNFeaUi+l+whW};T;++e4K^P#7o-(m}kVn}0L1;*HVk$#{_c!Ab}%es8&lAMI? zKaP(FsAv}?E!{DKwr01)_*m{N?qOJKbnMnmv>gHd zA_6uvn6S z@ol)D!J)&!r+`QSz7zKD_w{|^qE-9lOW>q>ZuRb;e^TU`0zBo7ZQ}}`KE=WRKJXKd z&5{x~N}F>I_U<8Ba7sr*RrNB%3Qzq97?o)fzHyw7_QT4!tUIMY%bbyM4r}AGBfO-L z4Pl+eT|icN2M{tdw3mc_I3fL_hk`;en`n?WkfLvo9;?y!=)!a!s9%NXlX8k=KSD$y zR%M&J_RGuPXbSFjLGUv0)5s+*+J5}}xp3+Syyf7?_sUV6?J2>LPSuBKK_LhxJYKtTo6gY3os_7^Bh9*5vuIzoB)+e4SMuI7l{cPf9lo0KNXHEDz zJh_0v@uMxv;@W#;_fl23z2QMtmT*MlE%^e~_TMm3GizI^^3Mx>M3H!UfLG_vN)!z2 zSFH-sjhcO+r*^d1NrN6hDzHx;YahY7o2;#Ub61_7g7&z@Q6|k7>)8DB_Q1kmtx{N0 zVtIWhPF1JmW7M|I`i~Dj&*Y+3-&Z0`F{Ihfr7KsC4PrEJlC?@vJjGTESwGha7ZID* zuOcrZ2TRL5>nSI}_H|);4lpin*>eO(K_Ie~wit3pgA82Nl!7&YESAm_dCFi)<$!c}Zyw7J?kd?hS-r$UmS!~j z2xF}CCvV6aP~9JGEv0r(IWmBgTq9|qaD|2B64le}cj$gN3I;-5DHWwHD`DhbWs3wG z9=IpEyB@+>USFRSwVJhle1a7gA@^294VIZUf-U~cDMFaYbf~GxUdBWQu)qv3JTYk< z;UmxU4Tu~pJ#6p(0|%N|`Us3r#&=YjO@iz=ephH1)pOufKZ$^#AlCc$Ih9O@(RhYN zCe~y#(yZLjn>|}r ziF-jlqMc~*9V=@lbtd8+CINifF64Q!`Q2sC^c2g(58DK;8enO=Y17I`BPCcj_7WyK z@{M_FU2u;lda<}jg3*KQK+cGZ0t-EBf0;LxtR$Bv+IpQ(`g4LD|Ib`tLY5g65lMOK z6#PchhUqtS4#RqxOD4Iu&1TTAvDPtCqKisiL4k_jS&;`SYGwVQ3E59n((ku-5HpaP zKk_U2L4!V&m2F@TiX?7%c4BvYh`5(Hmux&;#>JqEdYM0eCzmID5jeQd2&;_w3}sVX!?bxz-g{c%+<$x{Eq^=5)Lblm2nb?R)N>r%ZC0(SJ#Ak?zP`wt z^PncGkDbx7L=2_!%LNg}n<#d(QBm6h2_8w@NEwl$QC;Mvi(n?oe|AZT8ooojLf`B> zQ@}PqM^CR(z5P!=asJIW*QK4w9iEvQS=Zy_Aj&HQHt-)_=Qf69YG)8MBdyyJw!Ykf zRRjAEQORz}9>W)P>2aBBR;~JlLo*?gINOE(4$dSkXE7F!!6b7V`|yB)1EWo%C>}e! zzTdUF0c0bvGLew4xaCQ)XU>#Dk3~e$KRFn6s@z$!aWY?+={^$lxw+}-sN9wW1&}P? zd-5cSk)?D7ELeVhM@+sOLj^J9g64{!QpJOBL$%gZKrf`0)lhJVhVn{>jytQCwH!s+ zndvD6B$P!&UZ{dpZdZLt#3T;KL70lrY2%hHElAEm$~ya6R2k(o@2zhAYTaY3Qc7@I zBzh8*1tR2fsZpNK{*Cxy$q{=TSBT&i#%}c9OP4SEjns8%`~`{)hGGCjFcGEf(TT_4 z22SyViBw7@WTMgoG$X=0a{BZwc3eOTUJJ{XFQ0!14E7zWh4H>%d9#^|bZf3S4-Te% z{Mpl|e=w2Q$nk&x$~izdMD(eIv=YY@(Pg@?tKw2=-c| ziHCatTp~(+Ws^uFo^*Z7U;@(&%}%qu5BvH~r`L?d;VB2j5u~}v0yiJQDw&HyCgU%W z1u@VuT&>UED3sQCJk=+FK0w@u4;-L8drt08xi1{m^|je*%3UIk{SP`T64*uq}Dv4>2;*M8=%CksanE5O@&R zj(O*IF!^wilfOCMRyA@B1x$!o3UeIVwJrnZa|`@m%+i!HJ9d@GhdENH*-;4^NqMhG zL;z*wm_N_!0=@*FZ*!8&*6ZQQg8iRWqh>8bc#Jox0wHZG=o zSj(Dvq=8hA#{77){9=!uJ&!K*E*#~!UT3ZO?vH$^2F`5Jb4pqi9FPz8R|KUoK`;+1 zI&W*sdLpVuR5PNh%b zF}%IFSbu|v9j|F-OxMHrU-rX0>LHP~E=Yp0ej*3qg9N<(^#F8-~>UXBr2!+6MyoM zE-)E6{^trS9*ShNdHh;1s_xXOB&uHkh}+*kRmF`7S}?05qBO&FulgnJM6#M_uR)M{_+p{BIRhu6Sc7N2cy{|eHG|ddrv?{yx`>p$0cq?eJ zM>Q(7w@`IIY#AmqB9P4VKIQ_4yolQL$shWU#QLfAN|m#DBmto?$4YfN!@sT-JHJu3uBfZ3vGRy znMJ754v6&PjP|RLjdqDazqWp3ov?AmIpM07YE$&bf)4y+e{OuP6k7AEda)UK$d6N$ zW+p@Qup}0Q^Z&A!M470}npL93XJjjEHG*^QB+H_u()EVp`eaF9g#pas-qmYsGKN1=>X>XxqOt(V0{Q3jj$f&CgyHX(>3 z4d^I`S(vJ_VD5|=9*f#${YoXZGC?p0plp(J_iT-JGDb7xI;dEp0)qr`D`t^6<>V#F>^sh}tA3ow$C zuqfQ1ysPF`iiO(R9`}tsduE~X5=Eg?V?UIa%lGduYRXBrkMHC2$>Hp>xv(a{Hhnb$ z-h;}PvWkk)3k?=6B4%x5o)X#Y3NKdBD~+YvLV`OM(K!M_&!L1mbnh|_uH<4qM_b)= z=ERyO9JnlMEEb8Kx^^vyLZYydW+*O+jvEJTxW9baus~KOhnU4PEL3KK`}_| z&VZPRD2jEbC$5%&62E`@#*T!Y$5Vhe-5W3kc1zt*?Vyta*Y4cCTWq>z5i~5r5|63) z$fE<#DQega*A<7tP*0|IM1RpIwoD-yw@PtiN>Kwl!DkM3|JGyRz?;l3rM*ZC?oY4e zRqTu0btS-%{t!XHa4e4~XQB}Z+U15z6?`sbK(pp5GPydoDXLgYvcGf%N6hg)?j!Fr z0KKIlYCjxK<<0`OCFEE3AeY2Taj3)fd z7T^M;;gXoEIJW`3g$V>%c~8katBI6$c|u8GX8pR~gMv6RlXm`PvYaxw9Vo;e{xiLC z95iyi)diC>-VIY16mZ@N?QzX_Ymcvn|dMW3*v zL|RqFvW#}U=Sw=YE+90qngR(LTHKL&vkb@fR0fn%(FxR#p27pd0T1fU*z0(g0h1^4 z-czTpc>L<+%g|H5vnK;?B2u~$Jm;Q2%oU{xktFq6Om4JBkJh(qLQW*QY-r#8GWG4E z>q*=U$8zlDgzG*M`g4qAmA0?3e_B-3QHd1JK0i_^(Pq%vVYGb0hEf2 z8O*}n9A{!@cfmNSw*&!{Yx;4!$6Xfs`A@Ja$^LI5jjWx=28$TIZwQW!I>MsXV$hK?YQ3AN4xqyZ9Lz~qG?Sl`SZ_L z2=DU}@LIdTlZQa|?W>!0y-fLSwV@`yzri{VruxXC-st$Fjl3UCQ^|+V#8-T4;EuC{2pUO z0BUgPjLD zbHom0@B4rexkstu613fxTTLS$+aEQFu@J}6(dbfI>JDQ;3)Kl8E0(wQ7m0-~8$y;w zd&=Q4!z6@TZCaTNB)nuY!}qU|Z@o1}y!F-?>DF88 zux?3|iO>yrWDodX^E{Z5t5zN1o|4~Fbfy6zON@A1+CNc#pSJ4k|Nmu^bK6kC~$7bbSdC{{rFf=&;=6} zRzpy!=fAy!u6H@JYy96Yn#JV%*Ee@>^X6}cw!XQp0L?@CNhiR^3Z?(?H;EPtx2R83 z6&EPpgWl^!|NcFYdz$V)zx?mN0WZ)JBk~fN!M*;zxFZ~F!8svHsYE{tVpJ?omiB2Q zQ>*&-{dc9?qW;U58vGIv<1$u&9$mIZBI`S+l>qMn-gVbKe7oPwZB(bLGC$gkgN7X- z=;S0_22EWi%OdtG-Od|s4bc+w{NuwTXRqkNKz;xIy)LRRiOZx^n_Z+MEEe@Qu6NJR zs60|Q^%Nf5fU0SQ?PPAx-8)&i`^wzLR2!Y}4qX-Ayn1ysNxealntKnC7rWi>K{j*hbZ2kaO5{Ett~9%Du%Lzp(7`S-DqmyrgfZ z@kOw>zGIWfs2XR>g}tTPSg+^4SttcQd^lZdI;3AMxo+AVZ=d6HBaHt^c&17do*BIU ztTfuNocc*qAGuVUWa;epU_Q}~{fDO#YkJ<^PjVvW4B4N%O&Wau5C6yF+w{{tg$RGW zayrwcbI;JJsWvWnXedFmftw$1UP0vuc?z^iq9SzzTNGMr?ssurdL1T-;us6jzz||j zu}2TTE1g(|c-?Zy!>2P2)B`Z#c>A6;r~JEgTt40rWQr`CS;y-8sLiOboqF+=1Z7Ns z0h_+-QtHHfh3q0BFC6f|Mrkh_!{_37&XP%_YBwOIlZ?#IatDkCS*uU`9V!W)>EzU1 z#*b+f4TiB6Jz*qh$U_Pfgt=B~YB-cg^F^VJg^%R4-aq7q(!$8$*AM#8;ReCFlU=Dr zyNv3f!#AQy?*+ppyY}Zc9)VSMrfodHV1zF1zdP;hDglpl&Vpeyu3%>pPbC48FK-En z9Kinzy@##-ix~W|3Hz^KpZVe)8zBIDC6Yn~UF&u065*e0Pa_%x4^aTkfb-`ziyY0M zqi7eJ>wM8TijK?|FJE%<re${p1NeqGeGg zJR15k4I+$fAJ{|~H_=|u1$DGDC$?Bn)Thph+mvnGvw!E+>E{XhOh+Xr%njt+Iwp%` zf84F+N@5v`7=KG0MY{Ah6~#xqGo>^V-hqk^Y&<31;<&alw;vpyt=wHzVX!gqSpfZ0 z>*Ra&LNL@q)94CW8dx(pp3C-WJ_;SB|seRVlAWGyU@5d%3q~5*z znkPrqtZ2eA?vUXkR4$2GTGd2-&gBMM(%hJ(y%>mOz|Hcc@$^4Xk0HGwXYM0GYZXRC z7*xmG3CC%`tdF;MLf{3mxX(+_azTXZTB&Mk()k(Pe?qtK6w*m8)?N`KO3ou>QyhIG zq{JkMR6U$;DYLQ%va-n&Slsg%?S=aK$&|aeA^e6r_fZ+1Nc

!6@s*1qeD2(L%u>-m4}}2kuR|S5QGq9Y{PiT3_zRYJj-pTZi>x`4M$*~$ z5GM45YE&~H5Bm5#rxKHOyZnn8Kow&Th5nS0%CEp~6ab<*Yt5XOp?i62y_$un%kbNzDsvy2?~ntMBItL2$AmOUsk~+khVgTh;v-&yM1q_K`RMD14QOzyV$eu zh6@o%#ye9yC(3JTN>M?OVSrsy3;+;Ha`ol$YxONGf;ctu zoJ95newNzTpvvFy4H8y-A(P<5Ss(&#FXMM+Olktu^Tke}V?kLsm) zd>ffnllSL>QhYL*iw!F!wzA#2i9QJUj9Wa~kY_`Bqzf!7BY@4EfBpf&q2sAufJ&Kq zZ~1Y)G(RB3lO{PiYSsy2d>p0OmiO^d)2E-2J`g@9N8~=zclsB6dM<`h);eK@^z{ND z<^JEjNU3-8_>hyKid}E-Jjwa-r6XQ+twU$$)bVZ4l+JG#6JACA_pBjPU>~3OdMg4? zux&Yat2)@F_SakUmE)Miq8R6D)~3I>O)4)(-THFDe7bx91R;_u@%lF3rrd%$)I1R! z%wX9>vtdNkj(y}KePbEvy!&4^w~phw!q>k|uPzCq>^B579%3A2GpBV@t!tfK5@r8i zr8@48p+@aRnPcY?j{g*vLf{@`5zxEqVIB#~$2IXzXIU;T%q8QYSL5ug{_G^_ft-^j z**EK9AGQxV3OX5@NW{QSRnog_bdv!A|3LjUkN-)=`yNz=&@13sgeVOqEii5%oCh>a z&5gRla<58Tetco#tXo$Ew16+yw9B8D>i=9n0!awVtY!w zk8=mJOj{`mMq6g$P!uVf@*{KkxIPD4_FkrL9+iY=MTU+A4mg~6N=-A;L6_a$)XZ21 z88iLN26g>QCrd)Rq}s@<^mX{j!}v3<0L0v($LxV2P)eAIbhUN6M_DiYBP1bGlZ4IB zi=n9zYC)};h{i#(7xc>S-o9P<`^1^fX@uSJLV1b;A&%AsU=S7e0fPov8X1MSdIG2} zB_&}umowne!-re9Z$G8&aL(}ymOmZ4baABXHYel`yC!Vig%SrgY|5`|f4 zX$qAiHm$t0Y9`(fa)`31=25q|w^~*-Q~F$Iji^fPB(u|%D9s6*j|lHpB?-{#MDx)B z>GT`%1vO^-4Gjo%7M)Gae0&ib9ui#cYoYANtSL399_Pq@EZT5JzdMC}K_kJx;#vZ2 zC7?5-%pqzEmOcpQ2OQ)j8tDeWTHA1v8Vxg2Jt65|KMBem7hoRr!TZvb7Ct0@itt#W zkDVIURD_J8mxH3OynG{=x1$mkD5??nniB;+T8r3_HK7bE!ZITa8R8@mLr!N zW-;Gknj;RspwUR>)@jV!K{^d9nOD)%B zwQ&a-|9GGQ+wHBEA74R83_Pw!Q>o-$95#8%6rpJ`UCsj+M}Sk4X8PMYs5?1*Ij10w zIo?}d7!!;a&^0AgFRs+4qxzf*2xvgY1-WN5(1$9*@&uc$)W0+Dq75;1y-sHX+4tyk z@3!sRoFJX>Be%=UsECH7;|u`KrWi1Ia0)CQvt$zz5H+X1N7*H*NOCe^0|^EY+1%Wm z)>xN^KIQ04mT-0Dg8Yik>IYd1BjGj7fL zu1ORzm@>Hd%^4)pmC{nH2xHDju-d$uA2Ybb9lj8>S3f@QtNw$9wtLqu?p;4v-D)_% zRw)(9lH=~3<};!r=c09=Tpn+E?H6SIi|Y`AEWgC4ji`YoJlVt3uUL#L-u?h@OfAW3dE zqq3n{5B8ayGAgrsC}Ey5#i{A;ZqUnQ>J0IlAMy={<;S&CRq#3}?=uOP+cFm6oXu1s zPdu}~5s*NZLGEf%7!kbs(S@Zk#ays31tzB9Wk(a4NR)6wYgQRf_&q`U3CD8qSTV{3 z-onEB1qSw`5K0CQTO{;+JP-sHCWF=B;F*I24I*VR@!{F?=eU?*+JHC_Q_uo}9c=y5 z3nmq&BdDI(l~g0f(a|GeM-KG~0u0Gm&a?AORkZsEk@Pt?h$ll6_)7|_F`nmr5NaTb zq<%0_Svif2f9idkUKI|HL_;=*S7o|)x$z$UqvidT?5kT!V{0THS{pfqinmoxm@#@H zf-uTq6$Wb}CJ=Kt_VwCLGI(-&AuhZ2wQ%_s>`^KsE|_U^P%Y^nYR#A$iREa7O4-?Q ztgYtp;VZbBTy&ce`m)OXSd^F9@2#)ZY}3}!vApS=m{TTqEq79mk5}kfMW0FYu4pt+ zT*gA_IjzvX4ZY(oZhz3mzwO_fuC;o46v9#-V&cZ4JXCGqC0J3;l1g^2B0#_>P;FQ2 z+n40}U1g=#^z z_jp=?W6iN)jK|hNX>a|&i&VXtF7&M=vKgaZl6oO}27O8XM+*>faFE*q8G|mWV$sX^ z4`g<-CYN%$2O&%DEXb5BnEK80D;s0c`_Fq=egtN( zefy%v0D8veo@wuqH@6kK+zapKKNqHh0u)799LgTvxg*Sxg03?$VvMjRMVo9~cl4f+ z1M&zcBNYr2v3o`GC#HSeAWjzdpOi5BTI{Fjr7vS(YmXg!%&k&F-!RZp!DHL6M#{tm zne@Xn1lz5*Ie?pyb9#Ju+Wd>c>Zoor6<4{lJj?VJjF&j+HuvP9P2hwkIotTTZf-xK z(+OM0s0dO*rtdz9xOC&j)N0GV9^3Zl4`hO@jx5&;n<^5ScdpCHCQ=6a$Mz8~lZ*)} zxM;q_b@_*;(%f9ZwouC>Xo&7M%G9LUwA`#4_|pt?J?T)iLm`s9W((e~%?*-0@Y9tU zqwRr(&JtNle)n5o`#xwK670$B*{LvWC^R`RH1Ek&1&>Ld(wp=$aMd;s86X^sVXZXI z{~|Bz}=DPQDHH4CrvUgEA?ch(Y;`>vN}+h3!)Qar59AckVzlein|10Ne`nI zhXv<%LOI$l4j{#GW*a9yAR?fQIOdBH4;Td_ylGciSxS#dmoELmND}Yh0jQ)WX#W-% z$YE(Fh_ks7TmplGmt)++#(47NNxH-cHZ+O>fn1v6dd?_9*dVf$b#tK+DVXCErGSlo zYfCR(>LB`MH}kHHrRNJ^Bkal8uTXH}a90pItAlO)K24a$Nw#_6&_Yz4;W)-xB&6Yx zhqbY;?nxZfbNAx{$cQ^_zk56{DO4@aGG`Sfs9<#l4!*iS|tStNxC z?1ii_+M`XmdCH?mjTO|^FW|cCR|fe>QO4iEaBFMnHihG-aqOVed+E|bQX{AvJTD__ z^1Lbhv=1ert+}z77eg>xC#xy^!ANezLCkMMAXfOO| zv_7Y2O#FM3kV4_DJfAoe-h?`&=S$vngQjPkzn6y*tnC~j2ccpKfHUfX+tHi_{sfgK zVhYB7I*kZJkRow3SdO}wa|Q74!qV}mpC2VBVY-z0nl^Ss3TyD}puL9<(Gt;^wDMX4 zr=>LnI-|IxsI5vrB&0Y9DejaV0ECR& zBG+#Ovq#KaHmOQENWkyVb@(iTg!P9}U4!Qf=CsHIKL(`t7R&KLH?M($tN{wLChlHqbI~`oszL!zzvQ6PBBs+iu3nk=w}IWZR&Jem-*`(&XkH|HRmP12~%NzRs-+ z+YW)&22Mex>_mpf_9BjYAl*dmPj-rKk)tcn@rLRWA`6mQcI1{=JA39O^>L~$UPsEO zZHZR6p&HO=*V3~~I~K>a+=(VGvw!l~HusuE4uUJCy^&^dC9u~x#DZoOiX)ethk4*e zVJRx_u>VGFnQvAv&fWg=xP$8NisRZXZs&C&BxFYE2r8zC1-<1Y7N1gp5>#^ClNDnk znDn@;Ff*6v3Yixrj>sAimw>!9@P2a-JwTe}7`FO$<9=D0bwvbwq~Y&5a1?+FQv)!7 zaYZ7rKCfUI=(Hc|eIzaD;1bE1(BYCo-Y3}5ES#YIM`B_FxP;yv?15iYRgwA8S!zYR zz2JmeynE-)!8+UR5DccIq*R^T#hDf4d<~F z%7og9a72v8`8ImPRz%Ihx)m28k9sV4$XQD``<}dkjLukCJwd0ZHvNM7=b=ZfkK-H-`sykr%%a zMSN&86_XdEg#N=iVSr$WNSGyub@<>E1!7bdG(%Tl#z2@W_l8 zvY$V{Nlbx?Tla+d)UdK``p{PJO#~K0^)KM{&sB@7+1rXB{@WYtGwYw1v=T8MW4R-a zP~fKKEvOX21>TtD&R7i>tfCkZ9uNa4ct)`%1wVV_5brHzZ0C{uw&n?FcUJITM*f+S(0$<&%M?o{~^5$bUM2%8!Ti2wVN8T(4bD*HPX6^*wimYTNPlfmu8w znN>Ua{zQJ886E!qlUyG!KWMiZS|=y36;IO(GTw@?s2tcjv@_2tO7B3$^g{T5{-{5` zb&}qN^zZilTq9HN)oY&^?ju}G8stuh;@?Bd9Qu1y?4v_MJf?qZ(Q>kg zwM;1{&^9ruyv5bY*?$`SGYsGtaZ7EIjg~X>nAH>4ySNVU*qr8BJd_EwZvFag)g9&4 z7S+f&-aRmD`N<8dBLx!>7`A?AUR2(0rAS zZzP+T)VGIhb>6oER&Nj$chaBxWZXo8AK$+s?XM!B<`ufPd@W_a58Qs655b<-yv$jg zc@I{Em~E&josO9qs0~^=r^#lE2%`CF?b(hvF77?;tW`|aU z|J~0|72XSAi^B&@(|T%~Y~5yCgv}h8H`r&uz<~&Rv`HmIG5*-0Lm4FCo^ntnvK>42 zkYFUBELD*-++`DHA}j%!L2`BD_%H#dwg}S(<0uzlkTz~P`fqX&Q+VoI$@zPwr7c&or!jM)<^C)w`N2d*V?&AR4zvT4LUiJT zD{Y!q*PtFHfA%w#gYXuBEzUtaZXwv9%Ie9#N7**g?Kylm#P#>bOy#Acoj7?K` z>gcUIc4%s8q1Q!Y`otxTsgR#U^BJdy?c9k1@;l7XK0{DMEF>#fNI%)4MX>@~o&{5qX9(9r9X(F@u%>@a>QqAG zvEZ&Tz=J;rJ>#B@@E;_K$j;agM~z>coN!Gq=Ywck`#m_A2U7&&N={%fcg!}sgHWY3 zZF|ZQeaPkVR#9-lAf&|g%X2!t<_$yGPKLz1nZEVaUY)I*XwHSFMBdf@7LKPCwgIGCserD)33ZYgmMlGKXPwcQyNU+c{zn#S<7_H{uzOnAD0@ zG@(isJ_lU5yI?6t-N3v};0#JA!Qug#1qF#S_q3<>B(H8`#G_^HK#+$oL_qra<3~T_ zEcnb4m?(r9`9=Rff+wVn3ffVNY4+g3-1MBvQ^I*)6gnXP|Bb`hRGIGx?L+lP z_#5h4+u7-`ei5)_&Y2IQjLOQ9$kO!7GNs>p<+WgnBb+?snt1kSRg2vM!^K+5uDL&= zOK32O7@cshnNNZYL!{2?29g>-`oV*sd{B+ZkKaJxa;3_dKI4zl{0-5_kP$VSD2zot zk=Yf%fC>w0@)a<<=)UWc30H$;#@LRI4EkZ6>*w9{y*6ts&Wa4!vGnVl*}UHCIKF`w z%-W&}|8K~o@zsHb9P@fy?rhGTh-78K`uNe-+S_>*JPB)S{rYoqL+KpX(&DJ1(t=O= zrIWXXN@xCpN)OdFxi&WrlL~WQSPkg{ThrFyk~XiTA^hJ9E!YHIhi>Ex zoz&MYgNd{LSeOXCm##4x4bx52?ZCTx_MHbdap|lSuQl%+ej zOA;2K2a~>)!X}JapSnouAdy?nM?|n#gF_Ms1!&pcKq@SUP6!3(w?Tci-e9V99R9~?OSmMXy3jB{t2EE6uK|zuIbh5 z3WJJpld6L&+967WCWaA_SRlYpkh0PRL{{6tD&ItRKX``(l>873!t#nwOk|gi(tJxX zgcU1Vk62wjB%&mVYdvC05^F~Xhu!SIlx*Pi!)QdB{}svCTNHwr;2CSqmnHS%p)ZNmoOa(CH&*`bPiiLrI$N_ zsR{uB!I?>6ffxWkhla8i^I36d!22RKEiEUybC9(GcE0^(rB4S3F%X=?!-!-jFaEa7 zKvva4ibbu}z551E+`EFzc+=x^nNFpyP6Pp9Ii1ZAO#>(YGQ-u>4<>2>E-)iUu7^}4 zezP|~_@`xLKuWsM1w=0wDto^1W=UW0MS!6SeFNMQZ_Bm7-Lsbvg|pMs>0z`Z#mVrkYxPuy_Tk?hfRH<5I zp|}yX`&EscZg?p?e z1TI{(XuSHMfdfw+KYq?~+t;q!935$QvJ365mSC1RM{oz5d0fYIJhqil5+Z=$xv}?F z=heph3I(=!efq;YtK+t2rTA0x}P8#WmJ zK2ZtX0F?{0uE}mGE0mS=><>X{d+pl}P{AFU?cT=KEOZ>}jsw5ul$w6){vXj$#!*kY zDSI7ECZ%ECWMpJK7U}4S)|kJF{cIdZKrv%05Wu|c2|!l~I4JU0vo5P2EsJXBk+na! zn97z*uwwfI`U5AfzerxUA=PFRIbECAH@k-2yLY=yxwm}GIFX!7w~&Z#GCIS!O4ySE z;=s-=cWp>s{`DHnFx10WCy2~b&%3bib9U5QVn=;u^RGYa6JsN|M_B8qe$f=8 zD)X=R5T7FwN^v1QG*(6ap5JDT9@(q7*t2Wew7Wldw*0uP>VH0g^cmWq+?xR|Uv}F_ z#43RdNs$Dtl0E|HM>6U0vBWF)0;?K%3JJQ$Htx5W%yia0-c2pQz%;aNOP{tmKbLjm zGyhJPr62guOGtek?DO37an*NaS3jLf1$;{D{Lb~*i7(~j@?Q>uV}|3;9E zdsWxK{+!zSA*b)+b~0VMkLxL;vnF8hrmYt|LthOW>fOI=ht0$1&sRcQj6w7g@p(n- zZ9`_=wc%!pCnNc|HGgLv9&;S>-gL(;mK=Wz z(cJY@M1TTw;cp7&TLe>2o^d7N{J3vA;|9&3GpwJTbjpnyGe#W8EEtRA*?-R~F(q({ zq7f_i-{zGGD*fFYz63#4zsF%p>%E<~kXPZ5fm=~b8tKFS`)&9A{edFGt^H-8jEv6f zXQAVG{PKgSPzX*!nufRYc)1$Q%PGIki9!H)cb2zTE_(P1cEX00bDCNwI-}p#HdoO6F+CnS42wR_%!j84|sm$pZ5}T$5N}GCI!4G6Kr2s=)%qP6(wC2 z0qdVk-FZSHKJ>1*@ZD-`dxka^g@?q(|&t z_P}fe5ukf;&n3uq;LQB)sOGdul1R^J9>k1UdiD=BWD0hW&^x)v;W~X;#1wQv93+M^ z{`0pC>36~80{NyOU!d=K^Qzpiea^?!pNsAu5CI7e`l`JBsaaWzWR*^(U=LG}gn|a? zeOSrNYktAOU)fXiou;5XiZULdw3ilW77le`k+3m8NIcKKIrm4a616zE<0)#|WN0Tj z43Bd_iT&Xn2T5cp^(*a7O8TWs-8)B(`P`Y*4bXy$Ebejl8=4^r3&W z03a0}xt_2R(Xgve>L89GVHYN!o{Z3wO04iF1bEHa5hOO}b?XA)flS!+bxYGSM)#C5 zw~;q5uXxI__DXx-d3SrrYHQzpkT^FOq&RBiNc>}LcHMUWUS-Ado(`@R;>+H>#erEv zogmv<2?(-OAV6GQ&x3PY?_(r9pJYF_MLT#J3}C*K?zKh?PK0uTge>{mwd2pOc$1}o zJmB3%eCh3Bx#Gsot!%<*hw_}1JO)v%8bBQ!M6&{y4V?+Y?oQ^cFpr`auzn7j-bnJ8=wW?!x>lc;b-6` zLuQZifztL@V1$`GmDdk`ig9rYqizW(IGMMrRWNQeru2Dv-t5*plkT{JR#y;v2n4rG zNbBy&`$T@vH0u&^XybhNw~M!ALVFw*t<8))_oy1-gk;LSd*!5cEO`pB)o&LDO?MvFKRY9X$!H!rFn!f2Z?&w+ zuUr4GO7jP&^%54Ipy^2)*hzJ7wEXeIWbK~&_F?T(ab503*Vigu6W6b}C#QEPTE4IC z`=^(AXLoJAe!Ke2dZ%YUKf+j5+}dpqK_z@Wbs*P$*zn2#=H*{}8*sSqa&@v&{p%*@bDP{CfmVMEf!B4c_BM`S&EB*F$4K$`<+Ccfkbh`-Cj z^txj5=o;AG-z#E%pwI;Zf+?(vqfzAkeDcJsbWXthd)vc$_3R1Pxx_BQezc^Rqjfhg4sikoLHj2}_XKuJfC~=0Htg6EO zRe9n1!cjCU>lVdpompzLEVLF>HYx&WpaFfGV|{!$&T0Rksx~Dd2!2?+HYKdnU-^N- zLI?--me1mt)!Vmc=oYr~S6IG$d8X5DFmbqHz>&RPUbu`HW{3x6*!*ndbB64({kYq~ z;U6A$lsaR)Q(rY4ZR_NuyOV7-7jj}{lj3!F?-kWfBlTKz8I7oR&)E#o7gbyz@M2*l z;^zEg!Rod>-E+MbFaGjI+~SXJ?bZxyFd{bJE`4gXZ+w-W=>9}tXX%?1m`$3 zdVep6s}yihXj`15+2);j1{hQJn0uPkIB;Dk7u{9P3cab}_G{m{%P*=yMUm0Nmc}r8DXv``tHVE2!tEnnK>NS|5 zrl#e>7SnVKEsM`81E_ssyik)9dY5y78=(WEAHa{5Wd@t2xVa{Gm|f)W=Z6Hh2trlb z(#N4}PTC_j^Aq?|x(;CQuExh3`*Kdt71$G~q=Ge@lsh6O)YjG}A9%~<(E0f4#fy-d zg9bvb7IyY5oN_$5E?fsPnt88FUFsI1(ZDvYe)*vE!yu43bLP;2*~pl;PZ?W)nIDV7 zgan+(1)&_ZWFdB1us!4x(WRe^dUBgUrW;6%>9N4*zc6T8(P}UKp}qc5 zXeBJ8Xsa~uF#AM=iAu1BJv#kJdYEQ&xW(s(_3WC{QDT6E=+NrCiR|wJ^u?BqcZYos z2CmYNxB)u~kjG=EIU}mb-QS~ZpmD#GPfA1Ka#GQj>PHq*_*P3kx zIq2P}`;)U;+Dx4=!7$XQcK7Nd;X8Y=qi_WWSbl=y=QmhqMAKGTIk%&4pvZ zDUB$Hq`_HbRe<4Zwn6HD!YC~0fnU;Qj6afGU0f{dMlZgF{KLF3X=z3nyJzO_WnO2Q z#XLx`+E0+1QRHZ%#Iyi@tF=aGkOMWdZuRP!8AUeeec!W1?#$eiwmfRslqtcci@g_8 z-EsT&aW2avo+AAREDdT@LbOx@(l3?i2~+dpVhf7CC&BJm7?2i@NJ~#QaESOxKG-ZK z`>wRH#AD+{Y4z3r;;W^VScg*kk3>L5IM#nIpq) zMJvw)2`t1B{?jMtUB?`o0)}ecS;gAvt?eH6(-R-UsbKN>*iL~~s)#!(PdSy4e~gI6 zO^+Bp=99vh6>j(By%jVrtiJ0dMvx~;ni7>}fAN>5J~z@yV}!1=vL*!n7?_}2G2;?w#GKefK4&MN7ZhOGUc z{^0iZs7@(ve;YJd#>04|{c{^@b)#;eak@+pbz3^rvK%==91P~UgBZ)BzL}^kPCUdP zu8y>(0!7USp3wI3pK$4fLP;(C@Hr^E?JzpE)!#YF&(j+paOl7L)yST6Wy+7~syw88{`ondd)N!n}uYKQ!-}c*n zn*i?M0?v(%k?xaw|Gp78AMLbtlm7-}4_ob@9osu#dcyJV3uG;u)(8;*V~Tn*E^7HB zk>7s6C+PQDpPrMJEGEC0)Xlii9};B>*eERjle4V6j%p9LBP>H-ort6`}8@*3nM2bJ$hS#A#=RCdNFYS zo5avidOV+Tswu_MF|6b7-$po_;QRPUxiAqf>E6t58+Xf+kS(dIE+E?IkdEh?-J4kh>?4O zmH~H^y~E9sHcBk$s?ycd6B;x~s33ps3<-(+Cdl0qo1ha(kYPyk5H{_sLib+1gz@Ac z#pGOtWo2a*6`OblvggPvgPoEzb#*NTISFQ&)0q*A75B7D!_U58@+FL!z<>q2fzVC`^O7bOaL|e=@LidVBsC<{WOjI7eEGe5_ZASCLw@9(Jj@;jIUF3ig0~N&LP!7*nc_pC3OK~t)AyD4 z?Adz$n)?IBChM(vlr0}{7s)2#a$&IDo;_gv1C})cyHJ-A+!Ph+DaA2i>uglk*uj|i zKD=^c*IvEcmo0l!Q{#{jaQ^&2H0KE&6?LqrxY2odrcD8<&F@v)6yVafaM)3%9m8C$ zjgw_|e|tXK$dkQ`a>;WUgX3@HQj$k-(^Yr=iUr<&a z^P^tp%_?#TSk44fqc}kpg}(oz&^%Lt*w%y;dc=sU>=ugR zzoxYO{HgMJV9tt>G@fJe(9<77fqU6WEFF+Ma|Rb&agR1$4H=(Ej-!2%eyu|h^C)Vu zu^>x6)Zq+KhCok$x1K$crKQiGo1uRM_u;-gU>AeF5S37}K;Q%`KR{oFFmO>cbA4Gul6O4%EK z4^pUm2rvbY-NZRZ^fd$sg@P@S->}Lp6ru*~BgF@SIixlcAQWn=Fo^7jjhM2T&?iaV zV0~`@HNN49Fb#op#ouw=uaU=f!9}itu2Eg_XCAHbC2pQTT;<1rVeUVs;V04h$(KKPj}LS&F-0ZK-60}u(q+3BbS4-rj;5i|)~{B-+#V6a1M z8=N4j*>Q2T|JJfE@E1FG?OKK00ALoLa9H%kNo}qddKRDDrTy#kv9@x^&Ol~^iubm= z9Rm6Q+ImLoH~HITw^EB9AL-`ALvk>hDfb9G(~`4mf-VIOWVi{iK6#9RaqFjtZ!L(1 znSZkbK|%N$!YDe5nZlM_{aa!_m=u5JKr& zJ+$x8t08o&V8v^@j9EltMuhF;sxXBnq}9Ywyikq5(}_A=*wE!#KgyTDO0d)({8AVI z)$z`q^O)sJ@_ja0(m-7LePvsJVVk0v8v&X>+dZV+44Cn-V%#~l7b=cMXioXYjd4?>$gD{o{zW+ z!bdV#pu0>f5a$I)`kzz%Umb2&7vtq~D9jfr8X`2L{;%4Py?AiH%a3m#klwq?{&JhI zs55lvv>#Xe(7VRR(Ns1YG>5Q7w@)AZ^8^J0IoXpu9j=b<1wlG=SRu}f(d45}Im~+L zI&2V`3;8(sHVg8 z%~TLJvLOjT==~gvW+j+DHts>S8{{?cb92H3>X)d+SAhE?>;A&zDNT_ahWjZ^8ekDiGTn9C~CJoAHUAek5=}@WQl~B zI24Ye2^JJXha zI|gP`pt^AD)-5_@WMyQCHvvJ<99+<$>T4HHA>P@*|Em3kmbBDWuU`^Os8fx>15;Z6 zjn)fO%1))$HZ(y0N`|07ghKHbSHK{jsn=~cT-EfL)r?u@;*{aDuT}YzQs8GC)|_Np z0)q_J|7XbI+v(I22+JUPTqAGBt4Txg7_wL$DU!}^las#iuT)tE^FR)edZ3OF9h7eIEU54 zb8pd2=fZOc2{}%PHUO&zJ=L99ePPmiT@y-|pl?~D8mg)y871i!=@ZvhOFhSQV@ndn z3<9ZPD%n4u?|0$R;BOLp21!6Kcq?yEk0uA+x8Quw-@SCP`cnr3+h~(myDXS<{)Da@fGUd+@&p=s=3lqH2uYM7$Mm)U`h$Kw zq*9SJ17T$QB}KD`A5#527TYhNSL7;R9V`zGC~#Z;G$}T98*@kLHzA=reDvt%S9jFD zo^pFcPR6V9qBn;VwOrO)xT}5w7X9gG(1{jRW@#)9{4AkTCg^)&NX8=c>Xz10N}>i+ zE*sN`vmtm*xwVO$r36Tibn3wL3O+}F!ug)_oG9dC06*jJsoede!wL37D2x-#-*#0| zNrR6;t^`(dt~W}w)eHtgCyl6<;NNB&ks+$cLJWZi$A*2s=ifSX#E9q#yZC&$c6&$e z5Qbd-`GG$V~sM68^Dn_j#YV(9#Sx`42WN9030E#ZJQL z<=Fyiz<*u(4+=7XMh*a$TXWeALVJQZ_YYoXK55Z^0Xu);Gs$20tUllm)Dp0#;J;9{ zJAYuGz`xjvd58Z3*#`c_X=}0N3`QNC~25Z)J58!}FDnlV}1& zg1t~MB(b^PID-M-&n{{9uGu(~C6otKCBcn~8I(_-!0NS@Qee7yKU%|8Z>P5TPu|bPgObjjrTW z^ipMJJ-UCNWk+8!$99!UjKbLm!`E*Yd_i z{rK^NAf{WlZd78VeU-DG6T@LnNJC6fH=r%i=lb`Vj*dLvjgK-~i(Oq$(&JlK2fcH1 z+i&HL^1<6A!VF*Hc_Dxs)MkgZB?dxJ=b-J9C(d?uyP$X-Ia2N`D}DWPYAWcB{(u3U zKTYbRtBcgc#GR_iD65PcV_*f~0ehUvYlVluin8)qu7m_hVZ4 zfXF={xukE~IBU(C6f&f%SHEMzv;5Z`vBnS6QJO$tcj3bR0|#EoJDhp=`0)&Y?Q3P? z^HfQ*vgS{o9B!!;E}uff9wS-kM&~2aQ^cgI6B&w%I$&n&*thldG!uOsGgB7c)=sHmf_q}9bxl?ZH+p(??; z*0)cSOM+1m=rwx%!;%t-Z=?q`bd<=?B_yb-t6xY+2r-kF^fB(iG}Coh@B;O~;biZq z-B3;7@?0t<`gJ`aK_3{yV_@8b+8fEHKS_$zE+kf}mS0==>(k*ZQrPb^I!v(l@@0%7- z5>Z_F#R2{MQvhsyB~>+tfuW8wW~|Q6@^9Ph;GpzklzD1WnfugPZ6(8Hw+V_^iNS{5 zrA1A@QnE@dCXhQ6FR8W5T`+pdCo&u9&uZqk9H&o*0D9`D*w%WRe_(+NiUc)wx$QwWWEgw8`$<@j%? zF8I>jMKAfe1dW_D9V@kpuj^LhL)aPR#kP3oSA*}j6N!ZrH+!E+YqNQa(QKHRm6eqU zD1uJF7Xd;Ep50zNw!@!GK_PZ8mZlo8A~5oTb}&h(n13P3$Yd| zA%_Q(k(AR>R9#reQwKFJHcspHW(+QQ%K$l1Wx_kh+ZFn!6B3|vZM?QSz-^&X^yqJS zVcJOetXd&Y-KL`G!OubJBF(oy`p1Doa>*#y9ow^SpKG#H`ZTe0^;koZg-c3VO_G9Y zW3M5%>jwvx4e2PV|8RIoTxRdZ#sry(&w70HBhyCAXyU}+$d}cXE%;hD;~veKd66A% z%8Y#huJ-k^;mhv_n!r?%?pi&kSUg@sRb@ZJMAt&1sV@2)Q;U>+#Ye?O_uADO%GYj+ zi0C<7ep8AEcU4{<==qDcQ=zc(=FwpPTA;_5a+wWi@#nW)G&zTD!a8r+msC`?Hx2+Ot(oH9h1F?->{ zhqSa?%JYyP0rwi?*QnW`DeAe~tU#ibO4!OfAzZDIm$tkE4Oi%6p>|(B&M)rxaiQfN zK5?OXdr{-OkhMEvQ&Yc;eKxsY1~6*i_g=k3Ss{kMl>-8>IOq>n+2syxg!Hl{)KhKH zn1WL+zfJ~NA73wOvxCG#km(s;rPko!QQi+BN3FcYsXKSd?JdfWv5*|!Dch~nDcXa4 zclgVm?a=s1$hKCnc)ttT;HXV5!OMjhh`Bhpk?{nj&WUv|mH{pGpTSqE%M+z;wEBQ7@P3qRO*HRd z!7wnw6Y0lEX&NdX4aaz{^zbO;aq?gHm03Vjyw2*2TPSK~{Tzg0I79>Sk001V?Z8zp zuJucBGfx<1V$y%W0FLp{_RocFXBW6Mt{^_W+p%NaHiqkG$sU66G&qj0fkwsYCRCg| zbLPa8pO57>qjq9Hb!u+R)}`VriX<%+G(@DR4NuIb!t2SrO55NEJJY6J~(yq z$NJ!@)25+JN+swd_MU+&U_mm<3gVO>Kh`@sI%2|3pE)CpTk%D8a6t&KZ*@>XwqD=9 zPo6#7VJ4F^>7}xdp{krwbb0eIlAc2(9<0k7?~%J7=WYF%>yoT0C!4z@4y3^|9=Ycr zaa1Ybg^R=7+eJk~qHRqMPu31?oV~Vjv}MDt{7REKw~C6Exx2sgG^HQQW7#sY%RNj# z%Bhixb(}dfJ~45ZnT&y=d!erAxS8VMfAg2`ngP0PJ0mH;KzqL0JV!^Ks=J297@C<$ z3#eIi8rV~^1R(R}?32ym&SrX2q6%RT&e(qn#>1D#-Ir?5v}*e>IGOxJPfriuP0s`x zsik>&7S3e>hH(yO@c-%48kP2yiX;cc25%+jZabXc>{N1?ecOz4?j}7s7XemrU#E@~lsc%x!BwZH{x)2qGiMZC*^cR9ue{=`qBAl^b_&{RCL;=1 zV41%209tOzPV^$AZxc)3u?icp+f42VACo-w{N>AzlO~-xaUzdI9yzx1Hi4!~cb~;Lk(8sB5jL6(qdw~^w!=aPZik%JFB~RG{p)(c;(ZLv# z9aGOdJaOU#nZwyLXZq>uQ>zmj^_0vWsC+IX1COHOD-k~F(Zh%NF=+i7oS27M=rw3x zsf*pqC2>FN(;^QY>Y<^rYSAJF)2syXhms?;w4DX#d+wAe3cj7@YtOG$PKPi@|;S>FqmbOyj4ZfNM7?1W9dlAJtFj7#2e zES=szJaV!=4^8H{t0 zd+OP<%QtNrjhQ`s;D9m?vAkcFBVpE)+x4U`G95akUA_#g$plS}C2`}Ts19J1w0+)c zzSuQMWUg>xbWdS*Ue?u3(si)03FAOF3>38+J^DK23*n=B5130^GTxAzrc?n5tJ<|w z$*JSVbA%kC)BLx!pLH2_Wx2s*_IZ;RXV~O!#<(C2?v)Ga=k#sAMw=X-H+%Mv%X2{UYrVkF zBXrlUS9cu@Pzs}y1`JMmN{7Iin>S~%sT;aon?850ZPHC0QGkYJ7X17P>S8U_FPyCV z-?cn-eG$qJgu5S`K%79Iy!-oOGI{v1#`tI4)mW2 zs($y$fi$CNvcbtpp~ipvd47|1NZ*wWRMYy`ZR>}F?L}YoJ}pjdIj_21Sc3d#^<&nC zdYM@bH!;C+AkvqU=T;vSX|LT*>YZD(3=9m+ z%rM*8sKYE;xUh87OCVi;$VDzLCwLncZZK$p2b$=Q+N1R0cX2>4@9~za(e7QT?o9MG zR4p;?A`E@?i)gky=_x zN>8TyBnFy`nPAfOeV@|8w|XMc?vRaY7ZMW(kD1sBDNc6E6=6Zh*om$mQ6z z{Oi`RgJDFWFDglpuuN<=u&<#eCXesmSK2sAGnMgPgkdAun$+m?Up$1JG=2$9zqb&xjk-n}cIKGm;sHa|$bbRa6KtGmC@uARGC zTa=?B*ehi56B3+e&I~padyrrY6cCa;TfL#i#;JuS1Lw`FKuK|KnP~p(*|SOIZp550 z*tD-y0K{;k>_Q?H_yD@XH?Swr4v`m(JUzcFSN?Ws_7;h9B@0uo9XK$<$LDgwF>TuO z{a9=Q<$I)}m6gYA^_sI}g-!k|z7wYL9*N(IN_7fK)Ji25?k1UuiFObD7fM}(HN4w- zu{VQUE?*udY@2bCZ)5LNT09*7f?5u&hE}A$Nr5rcn4tydV-+o+@nIXKcXbFv?R}D^ zW&4J1qlZM_fnDiQHe)~W;iozqDNAW>Em#5YtA!AzNr*+Vwu@6rPt3O>!%PGZCw~;j zQkUS^UA}U~F52i+^Ugf#Xxcq|^axS=+FcL2`c8S2W*hDQ z<4dU7pe~{qJ0z$y=c`?JFEbKC_^sxFWo|?{yXU`kc5yKxi!?b*2Qh*M%G6)T9Wh^7 z)GC_`z&^-bwkmWqB-|Cm+-QJC*bO*=8L#i&y$ds&J_Tf`^68=DNl8f{^c#1zvmDP! z*|x16M&V&uSv1^c%JSvo{_AV8{%{Df{!choE0!-Ob|5B^SWq~Fp29r%w_ed(wv1!; z>U$}b0nD`mkO6>jHHyyVY{_wd<+1C@v*<^AKUxD_$?6*&5ThkH<4pDI-~a8KH=C0C zX-LM?iM?QBSx6!#gy4kVAWafU+H2pjdiUmyqVv{o0H$=LzkFQs$Cmd9fBe&E{P-K3 zQC!EtF1<_}aSikr0^8C6IQRPcVZKXx_$Id1hJ~+QW;`1thO!Kx2*f=Q!{LF1m@2yh z%vG-g1JFDA-|zE>d1RTJ^uAJz5GzH2QmiE(yoMy%;Ds{kJ(Q*{m6fvF)>kt8dZcBcm=NvJ4v-KVUW23mK+Z_de`-CKEynve}#lKnwpmS zacnz+0%Ag#3SLXUH&e4qfdq?hCPqb_mp^&I-?9iG2hy$hsczK;Jvrqtf-#6iTEgp$Chr_G%)0W-{Zvz>uH}Bq=A^xEC zE4}5i(GPqBC zE|qobzN3Ju8CdVVo{S&tX~Kj8-aX)6xz7<2;MBy#Rm1~H#S^8on3-6N3%qqJ9cjm@ zQ+IFQev?D=)AXHX4x+2^Ou5}a{E0>v0N+>a(GioChpM(o#TK|EQ;!kwBr~xA@)+YD zw#i9}S`};dmE!w(>sI6q+&(cO7Hb6~2db(hii*{s2tK)lsEoxVJ{v&0%{&|H5xwm2 z5Oe1Ad|_Qd4hw*qTK`QDQ{i6>BF7 zsFJu&4Mx3f;enCD>@4|=#Fxb@T>89ve#@dtFG7lWa!gjNV z{x~|~q@-p)T${W5z_0=me?j|#gt4rzbS-POm2PgL4`lT$rao5Azo!NTA3$?|fJnw%9oM zMj5@bVPTz{JLByAO1BOM9l$`eH@jn`r6o0Hu(akX@uh@zTCOfG4o*&>6bcXqo}!jV zD>R4<^+!stvG)3-6NeZZyQY+}N(-HrE@f;Hd3A%Pc+;t!gZuY~m9^T}_ZTDy#MvT( zCYj&4jszr1@obW(B$exrnROOvnppe^e)zlX;Iy1eypN`i&KiD>rZd`P0IAI?E-qWS zvY@+ycpFBGEU3JnWN*g+>OM3`;l;eqh4qrH7& zfywoN1fk!TzGlLH$ZZW{`*rJdxR_S&okx#q+=T7EjPmq#p@jeyY3xOztI@*#J-k(V z_Jn-u#AbaDhzazUK20QQA8x-`2#AFc22>}pM-Q#(bhs>7@R7;c^X8$G<+3w3plb3_ zp?9&UVj{hylXUms!V<0L@g`Zdhs>X_C%ZOsuegCw0(xf5=uX+Zxx2S{Ohrh^) z*;Go9#B2mAD_@czLy`+8QF zbjJMT6EqXZp&Eh#Zi znh;o3N%>|3Q+g~C$gdzD|MSms;yGkf{igAeUtWAbbj6T;N;Hf2#|x4D4lE0htVg1v zd4o!RBP39eb&>>8&Nh3qM_;~vMd6w~*zKKC^I*{ za~)X$bK<_FMJ1QL$MAOqoLx{CK0PH#q!<8n7!Tf4k8mPCJ)B?DT1%`|n)^9$v%;q~ z?ZpuUBj4&b*@=XLP-d};C?KkR@Qra|VA4R<_^N?J~enM5{Wf{YfGF!Q)le#sO* zM?;j<5QKcZaL~}zYUJ~5z!7>Ixr*;twbL9!P_=$vY1@Z~;|}WB0KfBZ3+BvOv2ta& zS!WN=wu_imrn~w2_zddTucKJ(oKl8`w4XWib!%5&D@`%LIqBZs$jE~P4JctW9d-TQ zP++JN`#;6hHOOIcE32ke8md-)BK5qO&c^s zURDX)edl7K@vMRU;;yg}1{KCB#Ze*0i)~d@iSxkJ2QNz91Ba zB}ojgrfoLhcaSLT%NK9tLo~KwL*YzVh_TSimM_nX?iTQl+6|!t$V?&nC--8WF|u^1 z^np^?jqqCXVzwF(UR*}=qT%!b-i$hS3U|!5r0kW4 z(YF5%b&0G7eY~wiiFgDMQ`kp`6iu8NIA?_Jg z;DB?=Tsvs`eV0=dgkh+vwj(_TrTCgP@t#XEU=HNct3=^24!bU?HEPkoVYI?#*=8ma zfp_(wW-mCtMjdCha7hm+RjXGUk&nd#>q$m}y~7CR)h;s@)IcbsLKzG)>mli76iwND z+Yobd*NC7}G`++E95OP7WN-;t!@+~97zbOaElF5bQkBDFHWBVzn?7%zdhgy-t}X5_ z3h>`@aB(W06HF$Niq&8^6HpS7s{>^uNF*^&A1{?Vl{&x!=A3^>mu8v1=*RIW^QKOP z;1nO&^CXP@>KHL!A_Lh?cip_P9oUZFHmO){4gk+j&Gifco+UoP(M zbBWX>kgtx>^1%3EO7mOHx=HjIgbqo2?!BYCcbCR>6&k`56B{6aGs+!HmPK?AZLDfX zhrssXR*b0BUal4nKxeKmNvc-2um-iXh;?1EglT%!ck?!o+p~iaZ|wm=$ihNUV5WWs ze~-HN78X##?m|@LRK*6454$2WsuU6unJ8|ZZN-IY*g8cfu!IZ&V(NsX!%9E~I83lp z0e%gX9`dhSV2RMTlI=g`meDgj6dGaY&m%76Mn#VdkbVIIZuVd<5vXyGF8@wj6MrJ} z*gy58eb54p`e<-w+#3*V9~2M)$l*t`S<0LUV9C&{;u z<&acJE1L9`s9g%Se(CF~g$EpjW~tFj&UL<-fK)6i7~T4 z@?DTcS-O|bi|OV$fEZr;OK~~ZJw82r@`N;}0j4?<)k=Gd0N8T&bP-2@6x=;LoDfVy z=Os{cPP3-F8nQ5E05=>24CEpX?53s~ndWyywdkM+1O!m6z>?GT4g7GeDZtrYzZS&4%|7P>B7w@1dOey& z-JMeO1}$VHzoVRpxh0XYu#k4oQCh<}c#-LAK*%9X{U#-=B~FkGZf_6=vwu6seX9OG zNT4HQ?pAw}aGgBq`Xr4<#T;VT)8kV`(l;)f{8&-pK|_wfKf&&ho_7jTQdQM&aW}VS zOBCO~zZwYxc7+28GkhjmOn8AJ^K{A8ohCzu5Z@3}2s%-oORI;_74>2pyl2jEmyR9D zQRJd+l{fa+G)%u;Z_sK&6)+oS8WboRpN@f5V0f!HhnVsL9qjET!gC1h(lxL1>qH6IMAQ}|4phETLa3ZqULD;Z@aBhS<|6IFeFw%aGFv89$3+{zoss!D{7n9uU#WB zOQxst&XEz*Y;4X&tN_g>m&T4T704jTStNQNa*gzn`covq*{4sd>18RO)nYMaN`zhf z12>5k-zquR(LvTG9agFWKUlL^2w*&e3etji3+_@WeW#$} zikN_aYsJKPEtT4;)ABiILsThb6mcwKo|=JA-7$_PiK`xq1UmU3rzCTsVken@;=vnE z0O--`%BV@!)}B+;KQ%WmphpVU%ggm^`^lgXXWiYEK#% zNeq$l2{Y>~>9tTlPSFCB55WAHK>EkaqOSOI)4sDtp?QlatUJgs>*QoN%b5+KNz(vuNYjr(QPLyIxVN-8SBW-<#fb>vza@a&|5EY4iK zsPb|sfeT&dUsP$vL;vO2(4}@jGVu5z^UA1o6o;Z~D-_--|HGkyl`^vBa7>)t&c zL^mM?tiTd%5D5gaBmJK#z%uv_8sRz`4it$LlJ(;X!+SpQ2{6f}m3g_F8)Kq2H)WPj z4i1Q9GijRn2)4y(AP%G92C7@Ynt|N*7-;oy$3GX`m?cUs$j>Jk?&?upPA3AH z!tcu;I*=3r(q!^debvJ zAYxly-fO;d%^UV|{yTXJqJpsi+1~hJE0|a-TN&_;gC+>Q(M|~xL-f*KO-U3G^Ad`@ z=Tb=^n~c@N@Ni6~OqftdO30?*9M0P&CtJ%YASWj*XTX6KB^25XK=f}O2}t7=0Lsv{ z(EnikhSpzKL2sTM=tm+8_JujX&_!CG1l(XpUve5qNi6rJltm4FZoTARW9@FL2Nt3^ ziHrH+^vGUGD-3b=Tmh1#bR$)N{1_pj zSPj}wy&)NfFq?&GNMivsxC27wnq3|JkYk)2IY(}+f9w02n){0yc|O8bAPb!ruDUJC zEG8?JGgC zoWx!suD7gcbM^4ZUS0Q3pgn}fw6x*TwnD4~`Gl{>DZ+z?gK!D)=JFv@24FOuF73RF zy!gbfl3`^HSa;lKZ8p=}pDYdJ2=Q9ux3Do)P81 zIco2==5MHn=E*BGZ6Gn6$ncmSV3drGW)l1wC7BoPq&9;J&# z^EPfAwf`RmGtY=59>qGf;(iHN0N}`_Sg^YC@6g({EEp4OwbNd zzF)!eRBUYN*RQNSQouNzVax!O57yDr+A>77szawBAj@;7P8EFYOI_c{#H3+}69x{j z3XG2+UZ4|pwmqNZbRK|DAgHq|<}<2BWo*~w%a$=yxxE%_{VY;PT0bVojNEUjO<&2b z=L33(G`HuwnLGo?E-(K@$iXkF1m7U!oQ2pAf>5Ld0Pjo3k5>T^NjErmN$T9OBcjmaCObQXRc9c1O%tg~Hab4AG$5M_*!$VDj3jm< z50L8HUL@{5S6#~eZ8BMu2i1pkReUHr~K!$p5DLjLgL`&*1~ua zs-4(olf!~(0R-S>QKQ<0`T^MrWdXZ*CJZ8}WWGXPlz-&u{!)Ghc=t`M=S*p_oZR~% zN)kyxpN>@nwy3*0IXK*}z2dWUX*}f*4(|E$@5wI-qC_qtEy^Luq(v7em+gQlq}MPa z2IdmxaWrT$OHcHN3g%I05lnM%bcEe{9&HWc1wcf#49j*R-TalGftd?wekzC`3qSY~ z*e;%$0gO!9(RIrCN0*swU`PmUzHIJFTcy6HX3ou@@t*FS#pgfJGz`H7r9 zy{puGC8u}wmo6DKgcdHiu_WbQj9K3RT~XttX28%?szb1QA+Tm)eauc=ya)vM4aRh@ zHxU2-d4JoD)E?jUV$;{q_>(7JQpUh3%ZtURFDrEF1ll5TjbE7Kv2~d`MafwMXUyze zVDQ6b<}!0Rcaxu#(P)JJlTuGO4w|G)tPOZYR@PvRF{wP(S4vc4#g_z@VD?cABZ;k~ zXC@;v)7NLb&TebaEc}DtG${?GJMP|CY|7!y?=Thw;d*<^Y7dcUJ}WD0GV7S*0#r$r z4YNRt9CN$#)q}I02F{=)!&h=J6)j+1(`)?g%NIH|YRCg|lgHE4VXJu!j4Rx!cEj0_ z4<^7fKy#rX-b7dv_$augMY=h2wnm48wm(F_b3L7|>B}DI_3nL4II&-!ERd6Jr}dC@ zt=3c7Tn?iOVVQuPe}qnvziLh3`j}}UlGzOW6BNhkzM&bz<5_A)Uvp7_r(z+ogfXT7 z4+Qgf!C!|3Od|}bB4TeQTcf!8qVTzN>4aGO_V!k6DE7vhY!hQ+Y-w^*5{)CV#6-Yk z$fXlkG4)!UIkV%L>(^r}wW&MPB1$*gq zH!kzoG@6#Hu?RGDY6mjabj#2_B&MM=E9(bC_2DMrr^#w5qKq6yrX=f4{M_P|X@Ttrq5jONQ^P3f6I5|N%1cEGfHlNtjbHnL(ebu2 zHP$lrZU26d*zN4oz(8hLe)?LrB?niycK;p-@o@gx$Y|7xwTz;f!gG~E8oGxIi*Ief z4xC_Sb7&aASw6Xgt7 zsw{I0j06S(!UPK3VHOeqEa~+zkJ`pOkiIi#I{IoiUZ;9l?>!Qg5P@r?q%?1su|-Jn zl8duakjknfV8QefO#a%iR@0e43Cx#}3UL&&1)3#)gHCZX>`wxwSK*~39}eUP2A3e+K(JsLEmzsNi= zK&BN9is+Cl zLCM>>=mj*oFZeJ7P3cXwr+F_^cBH<(&{_h{f}#Sl$Y+`A9if2GVaf28JK?a@7~w6^ z(AZ@bqWPEBBnaIB>b}+(AhJA!Wogr8d8**Y|6Ok}NDGG}WHHeG_+}_c#f|hQEd2==IZxEjrxF~h{r z(39R^0ajO+h&1;cr{W_pLa-4KIizU)`PA5$zM(lWKE7{xSYNr#_pFAh?o2cSUmY=Y z=-$kX)T>vcGjC~xjD(Pe(<5^^oa@YtWQ`H4bxlnV`uV=5;na-sJB~u2I%uU5r^2UU zSTm=e?L=_gUGS;aTaA8X6KZUP1JE$sxZ~*y7sCH`*2qiyg;vIl%s6HYz)W-a``542 zcre<~ZWMJpv8%{+b!8L!rBMC~Ij1^-G@n5W)$H3h*i1cwgq)e0N?N)G2EBXrnmcRO zRn!+bWb`k6F@2{&V(_0tv{QL^c{W+T+3BzW1_mF2UD?lIyz>FKh@M`py;l{C|6w{A z#lH(lNlYxrD=qZ`mZB97wn-1~oytDK=9iPRwYElD^9PEv%u|>(@BmgDVnws6r#5j^ zIPV&gs)aQsmN@ngZF-8(4`3_`5N|81Z}Wh#$O|WFrt!~!r-8ln2P2_o=J~5rwC#(e zh1*Di(@JycNEOO<`}XB$_m$boNEl-Tz)2ta_UXeWvY9q*C_`#ao^0G3 z+svClNpc@Vp}+LtZMpW-r@#E*pUs27yaDbb9pw^hnJ}CpMvz(mX=xsrZlKDNQ{QKx z0dXaqysu;mh@&N#ML>bzWcEZc>LLT+7JRI$li4&vB&t+c64!=TRUnUR(CAj(_Yv@ZrNa4FA^8y&Fb!HlP0f*=3<$Uru&B@0b%iaNwPSf?@GIu zHz+PMBGOuy_yU9Y`CDi}4oC~WpPZ3+@kLqLtZCCelPZ@x=J6`2-{z=CE?|1?dJGp^ zl-Ea}ebugZB2DRzriF>YFqU(2awsZFRNj{WgBWdG zU3D7*s^%~{ZY*SSEmsO3$as{OpO1^{s-{--yyKfTMssqAWo2bY{qm5053&aIOTI!$ z0^En5v(+{E9i95fi9~JP?pQUG=Z~c#gjcWSH31Ir)#hSQAyZS+^W>Nt(zgQugZ_g& z5?z9*UHz&iSwba2dvB3gpiom-4~&e6P`Ib!lX~Tf#Rkh7t=-ndzQQqO6-Z#tojuFc zfgNVuJfNgi0QBqWy;`BdJvCmQs4Fr*Z?5)R25%0!0;z@SYXn)-pGf7t8`bjLv`#`d z+<=FvxYu4buZZ-#urU5z?5J_KR5Eok&U#dH?(y|l(jj)=tpP$Znys;L!oBk-r*XfM=}Bf{PoI} zZyX7>?leb7?AJo-;w+)~>hT~yoHfDiz26xdCKZpjx9Vz3;0ECc^eIDus813XM-yr} z`Q)$Pzd2LSA3weV8q&}mTE-_}9XM%n*Lz5bu1T+m%f3x(KymR(jy~DD4HYZy_}|;N zd2O8SkSG0S^fFZ4DPTd2Dgj@mQaHJdS6+_);UajX_XC2Y?Gj=R&??$De)?x@{ynKp z2k@PXIE8D#0H#k2CFY`3O|nhU3=9@o!)|-FM*)i8ehrYPw3a<1_&+Ww*6%c;aP2)$#Z7ovmjH#(8g66DBBL zHh?QI&B^I0IhePP&s7}|9u99w;J5^uT`6A+7g#K15VEnyj|D06{r|+3zJ2$Oocrp8 zL1`I%AUzpHKfh}Was?2}^~#?;!ary3SRx$h6z}eju@L1BnV87y!WApA3)HCS_sRu7inbmeJgVuKlFF6f%MUsI(#@Bt4a!m+&O^;7dYU5$~g=SPR?`njDc7%%`wz44jgVuh?MD zcA^SM>2+a)nkdJ^-l{De^Fg2ayzI>}2`jTXc0{j;D+`jgtPHK6M2H^=;O7k(E%Nv~ z-DV`oBt9Or3~&Z&WeN)pmuC3z!ZDWzsTleDU&*N3@VslMbs8N<8}7& zl?;{sj_ik4;}#tvBt~>oS>~<1+(looFmVPhx!*P^kywGX^o1&`x0iHk2C20$nuw9O z_=H~aO96t(O0n5A))d5qxzRw_n_!sGi=sOVLM;aLyU7Ly zV1kSAjH4O`Zfs(~*bOPv$x-SR2EdN=c3ZcO)Ma;6RH1sfm^+}7Nh{@Z}S%b#UpXa1|YBiW@zv8#4^pR`!MWPs- z5^??8o3d-JOy3a_0*DzN7Pc2H;?=A3gcx=(YczvXPssZs&7(J|wji;Lk|fk;kW20n zNVeKe_iQWk-*^_v9u-fXsQ4-?(#zSaS1$n>;m=d2g3NY_-Q3;F=|d#uqNW>ad8geK z%((Cymlp^mA_AUrJI1SOTIX-vTNli3CmV*<3x{(vqE>Uzn5A$qYSbu*5DZRy#(G!D z_c_b!TSMXqK$sAly_d5IyI|nJfnaU82hT#qwd`Rm7$_0k?y*hF>tQv~W@mDEG!;so z#3!64q#NH~h(rUOO@8zSjU{={AYTuVYDdXeP(-m4m|CgJ%7bBVlPJ$faA9# zB4W6aQC>`tsmZEx7;qUh5tNS$oC#Tn$`$uG49R~iO@KM?*1mq-Ea8$GMOB>4`3dnl_26Z zfq?L;Ps3N=G$Q@P2^T8kHzTzYC!t%hciyl5-xhU~l9I@uUkN>c!h?n@D4;GtnuYU%G7P_(ipGS= zlLs;X?B_5b2)-*_X-E4-ixv@{3s}Xt5FMPCzP>f(7hg&BCzJ$_@y&b}emp&;v4E@y z(hV>SKc*=3fUrMiU0?4rZk$vD(k}s$eWjIXJpUBNSVav+`e7*#3qFpb$H0F5MnKmk zp5`IkVM@4VRaO=3BC&OivgQkid;b>!+TK9()0KCZx~O!*mtEU_(DlQHW32`=x#xMiu9UQZ1JzNOiOojSt z;<{S)sC5YKqfeUe z^M?2DC*(LX(x-p3fpW9?0RUHaxtB`NySHzTj##qwKu!KcL0HESRwc7tD*&di%JLGGU)&%EniMkLno&dc#i`8 z9ENw%ty|_@kPJ{V?D^Y+S~RsKSVIC!oNNRhUpi$-8c7=F3jIXiF_TD`0Pg~DLhI3( z_@3i}j^bh_0st}J=E_a6d`ZGAp)sF_*PMg0tv=QxybbkWL+s}FF@00 z&K#lj1TBSzf4kRvXB@m|b&Lfj6mkOa#@ zo;wE_ptxeP!uoqw{!_UXLKq@U&gZ&yS0Vff(K~p~p9{n8e7}?2wp`F?x;5^RrvrT7 zy}QIvm0SyL29y%k(9`wvtIpAsQueB#ltk!AB9gtG>%H-J)>VVh9OmdG^2gl&#(aY!KI-JA{hs*w2PS^fe=4!o-Qk7(|aGH(TRv>%Z34 zG!T(8ca)chkJ_eUkPho*{vXL)R5l18_2aT_p#Fi~?Eh!9Z^IG7dXftgRB}on1UmH4 zB=PL*b(93m}gn_UqjyxesT8 ze3Jm2+7lB)jvl>xVKS9S&>l7`tRXHTN1HyKFj%-~@9f@PNXl#*o?JYJN*$>x@s97g z`s^Dn$+&e;X)aI|QXgz8J~GB`siA6@w((?kZ{H?NU%qnX1-QB}i&;Zfl6aGJnfMw0 zL)DUkG@D721TV#=2ZzZVvqJGXuM6SDM?3iTQ&{QwbU{(G%UPIeb* zS~^cHY=%3;ZWPB-Am%s8ERas35=P{MEobHE2;%!2DxxMxZ_5?S%k#P)KYVz^c-egp z{mGN9ug1?L8=;1N;)K9KEm+r9K78=omM54M?jFcB+^Tj)GN~9>`P(aOb;wZsiMQwN zgE7a&jeq6O3UZOGPSRBhwXCsw55-G9m=bK6`&yFqKlwVzCoR2Z7Whd)E`#--bBK)h zZ)VOmS|xLli2Ll>+k@U=a$3LneEJwtbYd6FNSvLu{_E|+!r;c0o}@Y8D;!bsC#d6o zA76|nN2c$O%o@@k&^MkyClF*9b`Pdvp$7B;cS61mUMuSPWgI1W{e=~Szma$7DxEn)cu-fd{goLL*du7pe0M17H&*)0L=i;1;9LT$A3PYGcr^}*2l7ujS`MT`iHJgOR7?!xI!by! z3pj$wV1bHU_o%mTy-$#ZKcaq3m-5rJ*EN1ohYvp|J`#!@*Ip8xDK$s!+Xwpymv-19 z7B8k$cv4FAQgSoeC!G4=wdb*?9F{>igS?Fy#~PzE2B^geoMhXwnKqUQ+EbBK1y5xC z$Q(wYqD;2=`*$3!eGRpuPC=g-)XP|xl!<$!zx=~v9>ZM{{sNs*uKGD8__V#g!x>|q zdg##4wA8&%XVlFaz1`qjY(<{QOxPp`DXSZPD}7%SrGJ z`IN7FKa0(Td#BC@Ql6TUVn&^jVdS(hF?%$pZZ+9ARTR(uTB0wVS}vXq<2=IHu+vq7 zfO4$B1!$ActEM9JkF70TQGLx~?ob9ea_G={A_XiZTu@a0LB8)Qtk~qKrM|2Ia-u@Y9;R*vQ`_JFT?ttx%=(Z$dmPNog^~2YHbN9bd~*TFm0&sl`$gBK z?M3z7?mR3lAio!CdT8JF-`_)7`M}}BJrxykExEN#*xL=Y$2x2=Z)D^*cGi~E3&#OA zzy|UjIq!71=hbq7R}ddBG~*DHTyJ_^vaVwEw25sGJ1luC5w{oijJ0)AO3GnUF#i?z z+J{2Gj)|sd zN=3$pdN~wIp~j@v%Us}G%_kKfGQ^9Nl8{)H!AE7Gx3nZ;nA5ONrW{#^$L-a<`(->} zk8t8U6Z%;%wUZXzk{yN@YCd4k0lwA!>Q|GNI=;w z!v$WapP%Wmdm#K(L^6w?;l3WhgQgZLw2{GLg>l4T<=l8r@f3Zc5G3$0?JM9>;nS!{a7^ zrxesjx;i`410-|lL6JM5bbj|U({ERMl2y>@sHqhCV$n2$Jf<7j+1f&Qg!6on^3c(J z`vfw{sdZAf07ttq#Fp|ec?pFBMm;exF?Fmjbe>B9g}67$5LC*{kf^6vlOII^5)(9O z73KKmZ9OXH@k8((aN?-x*Hu?1gUtZme)b!M$)eoIjor!1s{z{U)+xv;u49k|mH=u| z8iMl8vI2GN_isRah|k1#dQ>)h_2RJX$o)~W;n9;PBy>xdV#`U!Aku$Na)&6emYGTs zivi-!09ythA3kyfy5c2hfw-XNn{6GXL@$EIsaJ%kw$bG?l=z!!3gJAsC9UCrLy_u} zmk{1DKqG2M8uRR_p(2NaMwW;s77!UNg6?C-wTw^@E$>X!;jCb`YepI z<#=txlF+yY8^#pY+NJSDowt}vphRL$@UCNyLA9}pwMT^>J2hqO?`}l8Yt31<;2sqp zZeoj3BE-wq*QXQR6XCpQ>ARLsL|vC;<}DkFNa*U^RSOr!QV^XPGmsrw$Iy=2+8eb^ zbm?$5yYlLg*oU^p1<@Kxw@*=5DewJcSp+)NwPDVAlE@Qs!AEr(^V|R0=HIj7Hcy@E zg*O1X#fhx&@Ob_CJI$5{4Hrt6!O9vV*1PtuBSo0y6o9ffU#GHNHfv0`h(dy-nFL(u zG!U*br-V`H!$;EyOX8pa<8)tC6bvp`QglM>l`8>&BNXR+3>EW|2;DjL)Xt3s}zeu`>H(RaMpd_oHYj1{po7@L&#mV`Ec+3K$K1 zGbT_iKnEMzl;BJmNl0+PZ$sL>G*nu&QGa9W)A7)Je~fLW-;Z%u z`rx1_7*NnzfOB5xX#2Q7fle2*yN}nm{#wUdqWKf20AZlCyqs#sjmX$p-5n9sst4DC&f$jRl73 zL4ZW}t(w}KmoL4Ub>QtLs_$tZH>sAxyYR+#;#@Y<-k4J zv56+Bw5&YzLv**M@-s?AHBT#2w#iCJ{gA1seq~7tVG>G3^N=Dci=r5l`+aiX|J?uH zuh)J3V~g+gz0UJG&*MCf&+$1v2Mt}Kqwm7IrGoP`jw++%?BVoj+A-|sbL{*!*9HHb z7g04csSLy(PSgy57FMeBniL8~X&~~olhfZ6ui~1b#-ugxv&jWXibqk98ar}7>F;+x|9+V~I`KJ}2eZ^cP($9H zg)`H*I&tEdkqnP0goB*5$Q5WZhymlR$nHzZ%37^631=Gc@Zm;V?X{FC+t^SaLH(0a z!nw3G)B@1V+JEat@YfW6IO=$lwxhBg^FYO-d?=fMW0je~A%_)ouew@c?AU_e^_XSpP$M-UQ+yl30IMc$s6Vw#CS*mAX zdC7XfXgy-BOpmrLTWS`yERoC_52&on1I57o`m-+Qeof7>cAddP#m^vP1r84M)ceo5 zQY_Lw^oh@XFqGo6s&=E?E8t6=NpzEZy7&tur$k-8{wWV+QYR#A9Fl}{BiFDdy_$S&8mFttO%3y9yL_u*=eh4`&9GpY#hLR6Z z`CCZb$1J0qfcn5rmVk~B#Q|)Pn7qy)jS|$<)b4s-q`<#aag0=Xo#(1zpNTeb^svxC z2fdxjl3-GbUqN|rzY~TfqX@#qMJ=`z4|bOJA95ik;|(_00UppGuoo43E8`H zjfITMFXn>)x~M`ehn3H8myaBfc_@^aMGKt4^;WibfQna24Q4 zhV8)3wyUMj-Uz}DCYEp*MM6ACNI4Y3uGCV(NB|Z*ZfH1JcnRUXj|?`PL!*xHs2Dv; zykp$<*@*XcYhpP$yZHW&1`ZtvIySbCRwmn@NJt=x!EYkj$bB;-BE~%3D}{WJx~zH> zNgMn@qKQ-^Yq{jE>>&T@A0AzlU#fU-0p2DN!>--C2X>mzQ9F#zgg>o<9zXt#M6N=J zT*wE7@Nog7y}Vjkpl|>B8vz>HKpf+pR4#tAE0>(V?#6%L0pDFUc^w`YY9ZKJ1-W4< z5pweL(ZFg=k(FW;4Sst>Z8i=8l$JisDg|_2+0P`iL=B}#Rz8^O3*SBh3%mwwdE?TY zPG>mABn@y-@VJm5I7W%~_|#ES%;>bL>xZsf@h1aB$u3L@Y5CoNG{s-=sDQ1qr7A(6q(pSjkXQ2G_=Yp;3jd zIQ9^L@9=3kDaM$|ItlfMdEk`vtT>r)@}wObG{$u}%ER?%!5oZ;n8KoW4Vrzp_#jop zKm0)d`6^a2)DiAy^oxE~GEKrnYXi$F!t(K3eO;3XgQ#X>kgP}V$CuJCUHY+onWVm) z=no?bxc^IR>_w9604t^mcV0Vlz%glsIbjNN0to=G!nMU_bGi@?^UMik4h|QNmSWbX zRRx7(n~;+Ral$JGwskFc7$psijw>VAa18Dv+7TdUEN){&TkbBYymjjo%y~LyUIn+~ z+yj+IR0YQu@x9;DM#rlom7mAJwxm|*!)N~6fstSiS7e(QvqqPCTX3t*ri~b7$7kY{y zuETfsBfLiS%yd;%zV26Yw3(%hV)7T+Bh37>E?*v?YE^jQa1Cb6FeNIYZ~FJ`_EM!Y z6HU`tkygTFz-}w6uXp}eIQ|GpWHvmE8^T%8=EEz_2+7$`KRtI`c7)XJ$HdoR@hJ}t zVp)hkE-FGE`020eAtu^P1<@mX0oW0ZfYnr0H@ItpRuZRf){Cd81a_6kMCb0x0_xxe z#*LO6^6mOZk)QylgDt^li*MiF`@YtTnEBYBpCql%APBXy`#_aJye(~kTM>tH)OomP zdsMmS_^EoQ5&EPWgae3D1MW|dYDUwA-uY=&10{Iyz&o+S-C__ofr-grzoFjOqg7U_ z1ycK)Uc5{}V&L0S!YOp%8nzoNMQR}sMSR1;g_uPULO5Nh<&S9xhhXww-g_cRLGoLD zZ#=^pr&|`oW{dCHy*r4vN*cAjdAg%yb_%!Gm6`|kJYgM^h0nHm;glKKH_2g9P(;kG zIeT_-W232-mIu)WU>S%eC};zq%KBNOrMB3GYnR`6SXT!%nK3TNcKnqXk|;Q^B>a|H zzO8C~vS~&2Y8MyiOz*(|!G3H{4$1i7i*;N{2WbKde;NR}p7X}&Uq$3e>Y;FObfOYQ znj$M}(N8~2U5$j7VzgDyH^XEI64Rw4I&>xJXz0Suf~|1p_uH&`z3^9A2Oal025CwezjC}(IX=m1MCe<;8eh^kaK^A{AS`wD^`;+s-3f!T zQ*QR-?j0al!TUlqE}1a~*BH~eKdrdY_cDXH0}278wcEe{KF`45GW#<#Qx~9s z(&zGu3NO9*LR&9z8J2PxC|zkOTD_EYBRZ|wU-WLL)2EAv2t9vtT#yH0Dd71!XKx-B zw#Raz0ttEo1A_Sw1PHitnt93iEeO zf&=0}vOI9&0XHsRyomgXF7d~eE3^AIzgx-T#SzMbcf-lYF)wtrD7gGI_-7H0>%U-0 zg@%Ro_4gymaV;kY2=qQc1opBlQCUhlZxIz{`VS1YySvyt4tm%71+k5(^QtH-iI>>9RMX26_|KoNdJBNttx$p*cmJ-`jR7}14={@_0vlw zU<)&heCI4#a$?096TnzD0^==mgH{&QRlbKelU>d}AW7X{8LEtP zPC@C;)O3(Q7AJgnUw1k=LjpvW;BYP!(rT^D9*m8DxXVuc4O&V<0(lr3Xldb*7$Eon zS5NaXdgHWB6*nyq`L}9c2$+TCB)W*Wxq5qh!MTYx51aIr0N4_bu!RNPI6rB-&bwPJ zJ_WaKVRkZ7Q)Bh!jvY^)Fc|TR2RJnaSQaH7x>uGD@)Xvtpbxr&P-beY7SYkvb;e=i;MW zctwFGuE0W&HNnL!Ee{k34X5E>4oBdh9Y@9c4kZXWKU3*`;HG*{=twy^(3{nBlu5IY!MI*ITI`aC%9wLLFg z+bk@SFL0b;>qydNXCJM(OwdmPNRLWdP)0pHn00SDI~U@RS66EpFOZ~?vDI5*jG@s^ zsjvDge8T462F5QNT&Xi@VjMY0EhSQsS&o3_2o_!w|1$3=*BqulF&KKrQ9Rdc07u8A198_G{&qhcgPL@$& zc_BA9#TZ`FT(~gi>Zy#3X+WzAFV*MUHu^P}eC#$p)!j9s##LzufyvzdaiU0=K)z&z zN}Br(r$;kd79hh0)qB6uFdx2p$7lnUCkJ-qIJs@=W`Xf#tdT_@Z|_;{;GiW}Gr@9a z#7^VFO=>ub&o?42Q|`$;(TgQplg*zPsxq!K`S#%%Gp0<5_^2ZwAJV3cAKGyt!In{S zz3QaR$T5znMdwXQN?l#5$WI6o{T##MKS~q~RK!m+&$KG>Q_|y8;%Cg%t@`3;$Q}C> z8Sx(;VJf1hm8AY3)uMZqB*iEHf8IiwC$D@&)XMA2UfXRTyH7G*$79aNj#L&2Dp_k+ zsa@5h=I75Lp`U0+pG_LvH}pH(ctXu+wWTZkV)CeS^C1KQg4aL_M#>Ln9(=D8C2MPG zZEMU+Xjcz1-?|l%u2z^xgYV7yS{!?YaLnGmbV^ccBLifT?TJ1`{WCj^ z5RDeqAtGZ+w~HsiOswxU!I~muh0$e~ipi~oS%U0vyS+~;9Kl($S~&99#xAm%)(GfZ6sA9ATJX3s(unlkPuKGRXkJo!$#4fTMu2_l%5JI1VtUPeURH z=wjJ~sEjk)&r{(Lh;(XT@4^KORzX)_S)_=Bq8e+u|FLkaxYxy#0K-2 z5N@EFLPO^A_w}9r*|sUQGOQmDaaX-`2lPo|8JW^2T|h$EPqI7!jJV@$3S4kjD;osn zDSb5S;$RVP4D$rpr{Frm+5E)`?GG?|ZUxhk1GCYs>X=oft<4>^O z7$Ptm*?nf}6EIF(oqq+Y9Tt%d#W>hSXXi||G&Tc6GF0FQCb|XeeA~7|$?UJnIF?zr z_At=9%&Xtf`vZW^oi?74>AlYA(HVf=WsN|Cf#nmd2~L<48wG62Udot$TkYgr~$yL33( zbVKzeyLdo;tG-#I)}(`Ar4ed%Ab0xir)Kfx-1Ufcglljpx*;l67X{WV*eyfpe1Jc> z1Lp6?GRrXw{>NGrxIJO1|KH5U4>-IYUPHHO+pG-9!^j)oIC7aqN|m)j_j2n8yM2zs zs|_vpj8$#EQSxVne7cd1_u8iV2AiSZ$#=ZBvp9P<_78{PKJWE-%pH4Usg8GWXs_Ki zD5V>%GFi*Xz}kClyi%>CReYR(;^Vvd_}h3se%F$ZS2zT}^DeIWU&gI{Q0BWup356c SBJPMMwE4nr**S|%`~M5@Hz=$C diff --git a/docs/packages.md b/docs/packages.md new file mode 100644 index 000000000..118046a87 --- /dev/null +++ b/docs/packages.md @@ -0,0 +1,115 @@ +# Torrust Tracker Package Architecture + +- [Package Conventions](#package-conventions) +- [Package Catalog](#package-catalog) +- [Architectural Philosophy](#architectural-philosophy) +- [Protocol Implementation Details](#protocol-implementation-details) +- [Architectural Philosophy](#architectural-philosophy) + +```output +packages/ +├── axum-health-check-api-server +├── axum-http-tracker-server +├── axum-rest-tracker-api-server +├── axum-server +├── clock +├── configuration +├── http-protocol +├── http-tracker-core +├── located-error +├── primitives +├── rest-tracker-api-client +├── rest-tracker-api-core +├── server-lib +├── test-helpers +├── torrent-repository +├── tracker-client +├── tracker-core +├── udp-protocol +├── udp-tracker-core +└── udp-tracker-server +``` + +```output +console/ +└── tracker-client # Client for interacting with trackers +``` + +```output +contrib/ +└── bencode # Community-contributed Bencode utilities +``` + +## Package Conventions + +| Prefix | Responsibility | Dependencies | +|-----------------|-----------------------------------------|---------------------------| +| `axum-*` | HTTP server components using Axum | Axum framework | +| `*-server` | Server implementations | Corresponding *-core | +| `*-core` | Domain logic & business rules | Protocol implementations | +| `*-protocol` | BitTorrent protocol implementations | BitTorrent protocol | +| `udp-*` | UDP Protocol-specific implementations | Tracker core | +| `http-*` | HTTP Protocol-specific implementations | Tracker core | + +Key Architectural Principles: + +1. **Separation of Concerns**: Servers contain only network I/O logic. +2. **Protocol Compliance**: `*-protocol` packages strictly implement BEP specifications. +3. **Extensibility**: Core logic is framework-agnostic for easy protocol additions. + +## Package Catalog + +| Package | Description | Key Responsibilities | +|---------|-------------|----------------------| +| **axum-*** | | | +| `axum-server` | Base Axum HTTP server infrastructure | HTTP server lifecycle management | +| `axum-http-tracker-server` | BitTorrent HTTP tracker (BEP 3/23) | Handle announce/scrape requests | +| `axum-rest-tracker-api-server` | Management REST API | Tracker configuration & monitoring | +| `axum-health-check-api-server` | Health monitoring endpoint | System health reporting | +| **Core Components** | | | +| `http-tracker-core` | HTTP-specific implementation | Request validation, Response formatting | +| `udp-tracker-core` | UDP-specific implementation | Connectionless request handling | +| `tracker-core` | Central tracker logic | Peer management | +| **Protocols** | | | +| `http-protocol` | HTTP tracker protocol (BEP 3/23) | Announce/scrape request parsing | +| `udp-protocol` | UDP tracker protocol (BEP 15) | UDP message framing/parsing | +| **Domain** | | | +| `torrent-repository` | Torrent metadata storage | InfoHash management, Peer coordination | +| `configuration` | Runtime configuration | Config file parsing, Environment variables | +| `primitives` | Domain-specific types | InfoHash, PeerId, Byte handling | +| **Utilities** | | | +| `clock` | Time abstraction | Mockable time source for testing | +| `located-error` | Diagnostic errors | Error tracing with source locations | +| `test-helpers` | Testing utilities | Mock servers, Test data generation | +| **Client Tools** | | | +| `tracker-client` | CLI client | Tracker interaction/testing | +| `rest-tracker-api-client` | API client library | REST API integration | + +## Protocol Implementation Details + +### HTTP Tracker (BEP 3/23) + +- `http-protocol` implements: + - URL parameter parsing + - Response bencoding + - Error code mapping + - Compact peer formatting + +### UDP Tracker (BEP 15) + +- `udp-protocol` handles: + - Connection ID management + - Message framing (32-bit big-endian) + - Transaction ID tracking + - Error response codes + +## Architectural Philosophy + +1. **Testability**: Core packages have minimal dependencies for easy unit testing +2. **Observability**: Health checks and metrics built into server packages +3. **Modularity**: Protocol implementations decoupled from transport layers +4. **Extensibility**: New protocols can be added without modifying core logic + +![Torrust Tracker Architecture Diagram](./media/packages/torrust-tracker-layers-with-packages.png) + +> Diagram shows clean separation between network I/O (servers), protocol handling, and core tracker logic diff --git a/src/lib.rs b/src/lib.rs index e947d2ab5..0aaf34fe4 100644 --- a/src/lib.rs +++ b/src/lib.rs @@ -36,7 +36,7 @@ //! - [API](#api) //! - [HTTP Tracker](#http-tracker) //! - [UDP Tracker](#udp-tracker) -//! - [Components](#components) +//! - [Packages](#packages) //! - [Implemented BEPs](#implemented-beps) //! - [Contributing](#contributing) //! - [Documentation](#documentation) @@ -401,16 +401,9 @@ //! //! - [BEP 15. UDP Tracker Protocol for `BitTorrent`](https://www.bittorrent.org/beps/bep_0015.html) //! -//! # Components +//! # Packages //! -//! Torrust Tracker has four main components: -//! -//! - The core tracker [`core`] -//! - The tracker REST [`API`](torrust_axum_rest_tracker_api_server) -//! - The [`UDP`](torrust_udp_tracker_server) tracker -//! - The [`HTTP`](torrust_axum_http_tracker_server) tracker -//! -//! ![Torrust Tracker Components](https://raw.githubusercontent.com/torrust/torrust-tracker/main/docs/media/torrust-tracker-components.png) +//! ![Torrust Tracker Layers with Main Packages](https://raw.githubusercontent.com/torrust/torrust-tracker/main/docs/media/packages/torrust-tracker-layers-with-packages.png) //! //! ## Core tracker //! From b76eff6a5ec7b1d4a35007ee82826b73eaba9807 Mon Sep 17 00:00:00 2001 From: Jose Celano Date: Mon, 3 Mar 2025 09:52:43 +0000 Subject: [PATCH 352/802] fix: clippy error ```output error: unnecessary `Debug` formatting in `format!` args --> src/console/ci/e2e/runner.rs:140:98 | 140 | let config = std::fs::read_to_string(path).with_context(|| format!("CSan't read config file {path:?}"))?; | ^^^^ | = help: use `Display` formatting and change this to `path.display()` = note: switching to `Display` formatting will change how the value is shown; escaped characters will no longer be escaped and surrounding quotes will be removed = help: for further information visit https://rust-lang.github.io/rust-clippy/master/index.html#unnecessary_debug_formatting = note: `-D clippy::unnecessary-debug-formatting` implied by `-D clippy::pedantic` = help: to override `-D clippy::pedantic` add `#[allow(clippy::unnecessary_debug_formatting)]` error: could not compile `torrust-tracker` (lib) due to 1 previous error warning: build failed, waiting for other jobs to finish... error: could not compile `torrust-tracker` (lib test) due to 1 previous error ``` --- src/console/ci/e2e/runner.rs | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/src/console/ci/e2e/runner.rs b/src/console/ci/e2e/runner.rs index 118ecda42..624878c70 100644 --- a/src/console/ci/e2e/runner.rs +++ b/src/console/ci/e2e/runner.rs @@ -137,7 +137,7 @@ fn load_tracker_configuration(args: &Args) -> anyhow::Result { } fn load_config_from_file(path: &PathBuf) -> anyhow::Result { - let config = std::fs::read_to_string(path).with_context(|| format!("CSan't read config file {path:?}"))?; + let config = std::fs::read_to_string(path).with_context(|| format!("CSan't read config file {}", path.display()))?; Ok(config) } From 29344b113da9159f98c85193baec7d858b37b767 Mon Sep 17 00:00:00 2001 From: Jose Celano Date: Mon, 3 Mar 2025 10:39:38 +0000 Subject: [PATCH 353/802] refactor: rearrange code --- .../src/services/announce.rs | 100 +++++++++--------- 1 file changed, 50 insertions(+), 50 deletions(-) diff --git a/packages/http-tracker-core/src/services/announce.rs b/packages/http-tracker-core/src/services/announce.rs index 5890d35c1..c2eba5e1a 100644 --- a/packages/http-tracker-core/src/services/announce.rs +++ b/packages/http-tracker-core/src/services/announce.rs @@ -23,56 +23,6 @@ use torrust_tracker_primitives::core::AnnounceData; use crate::statistics; -/// Errors related to announce requests. -#[derive(thiserror::Error, Debug, Clone)] -pub enum HttpAnnounceError { - #[error("Error resolving peer IP: {source}")] - PeerIpResolutionError { source: PeerIpResolutionError }, - - #[error("Tracker core error: {source}")] - TrackerCoreError { source: TrackerCoreError }, -} - -impl From for HttpAnnounceError { - fn from(peer_ip_resolution_error: PeerIpResolutionError) -> Self { - Self::PeerIpResolutionError { - source: peer_ip_resolution_error, - } - } -} - -impl From for HttpAnnounceError { - fn from(tracker_core_error: TrackerCoreError) -> Self { - Self::TrackerCoreError { - source: tracker_core_error, - } - } -} - -impl From for HttpAnnounceError { - fn from(announce_error: AnnounceError) -> Self { - Self::TrackerCoreError { - source: announce_error.into(), - } - } -} - -impl From for HttpAnnounceError { - fn from(whitelist_error: WhitelistError) -> Self { - Self::TrackerCoreError { - source: whitelist_error.into(), - } - } -} - -impl From for HttpAnnounceError { - fn from(whitelist_error: authentication::key::Error) -> Self { - Self::TrackerCoreError { - source: whitelist_error.into(), - } - } -} - /// The HTTP tracker `announce` service. /// /// The service sends an statistics event that increments: @@ -184,6 +134,56 @@ impl AnnounceService { } } +/// Errors related to announce requests. +#[derive(thiserror::Error, Debug, Clone)] +pub enum HttpAnnounceError { + #[error("Error resolving peer IP: {source}")] + PeerIpResolutionError { source: PeerIpResolutionError }, + + #[error("Tracker core error: {source}")] + TrackerCoreError { source: TrackerCoreError }, +} + +impl From for HttpAnnounceError { + fn from(peer_ip_resolution_error: PeerIpResolutionError) -> Self { + Self::PeerIpResolutionError { + source: peer_ip_resolution_error, + } + } +} + +impl From for HttpAnnounceError { + fn from(tracker_core_error: TrackerCoreError) -> Self { + Self::TrackerCoreError { + source: tracker_core_error, + } + } +} + +impl From for HttpAnnounceError { + fn from(announce_error: AnnounceError) -> Self { + Self::TrackerCoreError { + source: announce_error.into(), + } + } +} + +impl From for HttpAnnounceError { + fn from(whitelist_error: WhitelistError) -> Self { + Self::TrackerCoreError { + source: whitelist_error.into(), + } + } +} + +impl From for HttpAnnounceError { + fn from(whitelist_error: authentication::key::Error) -> Self { + Self::TrackerCoreError { + source: whitelist_error.into(), + } + } +} + #[cfg(test)] mod tests { use std::net::{IpAddr, Ipv4Addr, Ipv6Addr, SocketAddr}; From 5178abab373a15302482655c60b436f34e0d2c45 Mon Sep 17 00:00:00 2001 From: Jose Celano Date: Mon, 3 Mar 2025 10:40:32 +0000 Subject: [PATCH 354/802] fix: clippy error ```output error: unnecessary `Debug` formatting in `format!` args --> console/tracker-client/src/console/clients/checker/app.rs:117:103 | 117 | let file_content = std::fs::read_to_string(path).with_context(|| format!("can't read config file {path:?}"))?; | ^^^^ | = help: use `Display` formatting and change this to `path.display()` = note: switching to `Display` formatting will change how the value is shown; escaped characters will no longer be escaped and surrounding quotes will be removed = help: for further information visit https://rust-lang.github.io/rust-clippy/master/index.html#unnecessary_debug_formatting = note: `-D clippy::unnecessary-debug-formatting` implied by `-D clippy::pedantic` = help: to override `-D clippy::pedantic` add `#[allow(clippy::unnecessary_debug_formatting)]` Checking torrust-axum-http-tracker-server v3.0.0-develop (/home/josecelano/Documents/git/committer/me/github/torrust/torrust-tracker/packages/axum-http-tracker-server) error: could not compile `torrust-tracker-client` (lib) due to 1 previous error warning: build failed, waiting for other jobs to finish... error: could not compile `torrust-tracker-client` (lib test) due to 1 previous error ``` --- console/tracker-client/src/console/clients/checker/app.rs | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/console/tracker-client/src/console/clients/checker/app.rs b/console/tracker-client/src/console/clients/checker/app.rs index 395f65df9..88ce5a8ac 100644 --- a/console/tracker-client/src/console/clients/checker/app.rs +++ b/console/tracker-client/src/console/clients/checker/app.rs @@ -114,7 +114,7 @@ fn setup_config(args: Args) -> Result { } fn load_config_from_file(path: &PathBuf) -> Result { - let file_content = std::fs::read_to_string(path).with_context(|| format!("can't read config file {path:?}"))?; + let file_content = std::fs::read_to_string(path).with_context(|| format!("can't read config file {}", path.display()))?; parse_from_json(&file_content).context("invalid config format") } From ec0e437822dc9ad8e01f1dac55ba1eafdeedaa02 Mon Sep 17 00:00:00 2001 From: Jose Celano Date: Mon, 3 Mar 2025 11:18:59 +0000 Subject: [PATCH 355/802] refactor: [#1338] clean bittorrent_http_tracker_core::services::announce::AnnounceService --- .../src/services/announce.rs | 82 +++++++++++-------- 1 file changed, 46 insertions(+), 36 deletions(-) diff --git a/packages/http-tracker-core/src/services/announce.rs b/packages/http-tracker-core/src/services/announce.rs index c2eba5e1a..959dcc615 100644 --- a/packages/http-tracker-core/src/services/announce.rs +++ b/packages/http-tracker-core/src/services/announce.rs @@ -13,6 +13,7 @@ use std::sync::Arc; use bittorrent_http_tracker_protocol::v1::requests::announce::{peer_from_request, Announce}; use bittorrent_http_tracker_protocol::v1::services::peer_ip_resolver::{self, ClientIpSources, PeerIpResolutionError}; +use bittorrent_primitives::info_hash::InfoHash; use bittorrent_tracker_core::announce_handler::{AnnounceHandler, PeersWanted}; use bittorrent_tracker_core::authentication::service::AuthenticationService; use bittorrent_tracker_core::authentication::{self, Key}; @@ -73,50 +74,61 @@ impl AnnounceService { client_ip_sources: &ClientIpSources, maybe_key: Option, ) -> Result { - // Authentication - if self.core_config.private { - match maybe_key { - Some(key) => match self.authentication_service.authenticate(&key).await { - Ok(()) => (), - Err(error) => return Err(error.into()), - }, - None => { - return Err(authentication::key::Error::MissingAuthKey { - location: Location::caller(), - } - .into()) - } - } - } + self.authenticate(maybe_key).await?; - // Authorization - match self.whitelist_authorization.authorize(&announce_request.info_hash).await { - Ok(()) => (), - Err(error) => return Err(error.into()), - } + self.authorize(announce_request.info_hash).await?; - let peer_ip = match peer_ip_resolver::invoke(self.core_config.net.on_reverse_proxy, client_ip_sources) { - Ok(peer_ip) => peer_ip, - Err(error) => return Err(error.into()), - }; + let remote_client_ip = self.resolve_remote_client_ip(client_ip_sources)?; - let mut peer = peer_from_request(announce_request, &peer_ip); + let mut peer = peer_from_request(announce_request, &remote_client_ip); - let peers_wanted = match announce_request.numwant { - Some(numwant) => PeersWanted::only(numwant), - None => PeersWanted::AsManyAsPossible, - }; - - let original_peer_ip = peer.peer_addr.ip(); + let peers_wanted = Self::peers_wanted(announce_request); - // The tracker could change the original peer ip let announce_data = self .announce_handler - .announce(&announce_request.info_hash, &mut peer, &original_peer_ip, &peers_wanted) + .announce(&announce_request.info_hash, &mut peer, &remote_client_ip, &peers_wanted) .await?; + self.send_stats_event(remote_client_ip).await; + + Ok(announce_data) + } + + async fn authenticate(&self, maybe_key: Option) -> Result<(), authentication::key::Error> { + if self.core_config.private { + let key = maybe_key.ok_or(authentication::key::Error::MissingAuthKey { + location: Location::caller(), + })?; + + self.authentication_service.authenticate(&key).await?; + } + + Ok(()) + } + + async fn authorize(&self, info_hash: InfoHash) -> Result<(), WhitelistError> { + self.whitelist_authorization.authorize(&info_hash).await + } + + /// Resolves the client's real IP address considering proxy headers + fn resolve_remote_client_ip(&self, client_ip_sources: &ClientIpSources) -> Result { + match peer_ip_resolver::invoke(self.core_config.net.on_reverse_proxy, client_ip_sources) { + Ok(peer_ip) => Ok(peer_ip), + Err(error) => Err(error), + } + } + + /// Determines how many peers the client wants in the response + fn peers_wanted(announce_request: &Announce) -> PeersWanted { + match announce_request.numwant { + Some(numwant) => PeersWanted::only(numwant), + None => PeersWanted::AsManyAsPossible, + } + } + + async fn send_stats_event(&self, peer_ip: IpAddr) { if let Some(http_stats_event_sender) = self.opt_http_stats_event_sender.as_deref() { - match original_peer_ip { + match peer_ip { IpAddr::V4(_) => { http_stats_event_sender .send_event(statistics::event::Event::Tcp4Announce) @@ -129,8 +141,6 @@ impl AnnounceService { } } } - - Ok(announce_data) } } From 1b01bf09c50797ac71c12738fda572bb48e6e27b Mon Sep 17 00:00:00 2001 From: Jose Celano Date: Mon, 3 Mar 2025 11:22:23 +0000 Subject: [PATCH 356/802] refactor: [#1338] rearrange code --- .../http-tracker-core/src/services/scrape.rs | 101 +++++++++--------- 1 file changed, 52 insertions(+), 49 deletions(-) diff --git a/packages/http-tracker-core/src/services/scrape.rs b/packages/http-tracker-core/src/services/scrape.rs index 48cee7c8c..fbfa0d024 100644 --- a/packages/http-tracker-core/src/services/scrape.rs +++ b/packages/http-tracker-core/src/services/scrape.rs @@ -22,55 +22,6 @@ use torrust_tracker_primitives::core::ScrapeData; use crate::statistics; -/// Errors related to announce requests. -#[derive(thiserror::Error, Debug, Clone)] -pub enum HttpScrapeError { - #[error("Error resolving peer IP: {source}")] - PeerIpResolutionError { source: PeerIpResolutionError }, - - #[error("Tracker core error: {source}")] - TrackerCoreError { source: TrackerCoreError }, -} - -impl From for HttpScrapeError { - fn from(peer_ip_resolution_error: PeerIpResolutionError) -> Self { - Self::PeerIpResolutionError { - source: peer_ip_resolution_error, - } - } -} - -impl From for HttpScrapeError { - fn from(tracker_core_error: TrackerCoreError) -> Self { - Self::TrackerCoreError { - source: tracker_core_error, - } - } -} - -impl From for HttpScrapeError { - fn from(announce_error: ScrapeError) -> Self { - Self::TrackerCoreError { - source: announce_error.into(), - } - } -} - -impl From for HttpScrapeError { - fn from(whitelist_error: WhitelistError) -> Self { - Self::TrackerCoreError { - source: whitelist_error.into(), - } - } -} - -impl From for HttpScrapeError { - fn from(whitelist_error: authentication::key::Error) -> Self { - Self::TrackerCoreError { - source: whitelist_error.into(), - } - } -} /// The HTTP tracker `scrape` service. /// /// The service sends an statistics event that increments: @@ -110,6 +61,8 @@ impl ScrapeService { } } + /// Handles a scrape request. + /// /// # Errors /// /// This function will return an error if: @@ -186,6 +139,56 @@ async fn send_scrape_event( } } +/// Errors related to announce requests. +#[derive(thiserror::Error, Debug, Clone)] +pub enum HttpScrapeError { + #[error("Error resolving peer IP: {source}")] + PeerIpResolutionError { source: PeerIpResolutionError }, + + #[error("Tracker core error: {source}")] + TrackerCoreError { source: TrackerCoreError }, +} + +impl From for HttpScrapeError { + fn from(peer_ip_resolution_error: PeerIpResolutionError) -> Self { + Self::PeerIpResolutionError { + source: peer_ip_resolution_error, + } + } +} + +impl From for HttpScrapeError { + fn from(tracker_core_error: TrackerCoreError) -> Self { + Self::TrackerCoreError { + source: tracker_core_error, + } + } +} + +impl From for HttpScrapeError { + fn from(announce_error: ScrapeError) -> Self { + Self::TrackerCoreError { + source: announce_error.into(), + } + } +} + +impl From for HttpScrapeError { + fn from(whitelist_error: WhitelistError) -> Self { + Self::TrackerCoreError { + source: whitelist_error.into(), + } + } +} + +impl From for HttpScrapeError { + fn from(whitelist_error: authentication::key::Error) -> Self { + Self::TrackerCoreError { + source: whitelist_error.into(), + } + } +} + #[cfg(test)] mod tests { From a42625934d2e0258a78a934458f71edebef3ffa0 Mon Sep 17 00:00:00 2001 From: Jose Celano Date: Mon, 3 Mar 2025 12:32:41 +0000 Subject: [PATCH 357/802] refactor: [#1338] bittorrent_http_tracker_core::services::scrape::ScrapeService --- .../http-tracker-core/src/services/scrape.rs | 174 +++++++++++------- 1 file changed, 109 insertions(+), 65 deletions(-) diff --git a/packages/http-tracker-core/src/services/scrape.rs b/packages/http-tracker-core/src/services/scrape.rs index fbfa0d024..dcb88508c 100644 --- a/packages/http-tracker-core/src/services/scrape.rs +++ b/packages/http-tracker-core/src/services/scrape.rs @@ -12,7 +12,6 @@ use std::sync::Arc; use bittorrent_http_tracker_protocol::v1::requests::scrape::Scrape; use bittorrent_http_tracker_protocol::v1::services::peer_ip_resolver::{self, ClientIpSources, PeerIpResolutionError}; -use bittorrent_primitives::info_hash::InfoHash; use bittorrent_tracker_core::authentication::service::AuthenticationService; use bittorrent_tracker_core::authentication::{self, Key}; use bittorrent_tracker_core::error::{ScrapeError, TrackerCoreError, WhitelistError}; @@ -63,6 +62,9 @@ impl ScrapeService { /// Handles a scrape request. /// + /// When the peer is not authenticated and the tracker is running in `private` + /// mode, the tracker returns empty stats for all the torrents. + /// /// # Errors /// /// This function will return an error if: @@ -74,67 +76,43 @@ impl ScrapeService { client_ip_sources: &ClientIpSources, maybe_key: Option, ) -> Result { - // Authentication - let return_fake_scrape_data = if self.core_config.private { - match maybe_key { - Some(key) => match self.authentication_service.authenticate(&key).await { - Ok(()) => false, - Err(_error) => true, - }, - None => true, - } + let scrape_data = if self.authentication_is_required() && !self.is_authenticated(maybe_key).await { + ScrapeData::zeroed(&scrape_request.info_hashes) } else { - false + self.scrape_handler.scrape(&scrape_request.info_hashes).await? }; - // Authorization for scrape requests is handled at the `bittorrent_tracker_core` - // level for each torrent. + let remote_client_ip = self.resolve_remote_client_ip(client_ip_sources)?; - let peer_ip = match peer_ip_resolver::invoke(self.core_config.net.on_reverse_proxy, client_ip_sources) { - Ok(peer_ip) => peer_ip, - Err(error) => return Err(error.into()), - }; + self.send_stats_event(&remote_client_ip).await; - if return_fake_scrape_data { - return Ok(fake(&self.opt_http_stats_event_sender, &scrape_request.info_hashes, &peer_ip).await); - } + Ok(scrape_data) + } - let scrape_data = self.scrape_handler.scrape(&scrape_request.info_hashes).await?; + fn authentication_is_required(&self) -> bool { + self.core_config.private + } - send_scrape_event(&peer_ip, &self.opt_http_stats_event_sender).await; + async fn is_authenticated(&self, maybe_key: Option) -> bool { + if let Some(key) = maybe_key { + return self.authentication_service.authenticate(&key).await.is_ok(); + } - Ok(scrape_data) + false } -} -/// The HTTP tracker fake `scrape` service. It returns zeroed stats. -/// -/// When the peer is not authenticated and the tracker is running in `private` mode, -/// the tracker returns empty stats for all the torrents. -/// -/// > **NOTICE**: tracker statistics are not updated in this case. -pub async fn fake( - opt_http_stats_event_sender: &Arc>>, - info_hashes: &Vec, - original_peer_ip: &IpAddr, -) -> ScrapeData { - send_scrape_event(original_peer_ip, opt_http_stats_event_sender).await; - - ScrapeData::zeroed(info_hashes) -} + /// Resolves the client's real IP address considering proxy headers. + fn resolve_remote_client_ip(&self, client_ip_sources: &ClientIpSources) -> Result { + peer_ip_resolver::invoke(self.core_config.net.on_reverse_proxy, client_ip_sources) + } -async fn send_scrape_event( - original_peer_ip: &IpAddr, - opt_http_stats_event_sender: &Arc>>, -) { - if let Some(http_stats_event_sender) = opt_http_stats_event_sender.as_deref() { - match original_peer_ip { - IpAddr::V4(_) => { - http_stats_event_sender.send_event(statistics::event::Event::Tcp4Scrape).await; - } - IpAddr::V6(_) => { - http_stats_event_sender.send_event(statistics::event::Event::Tcp6Scrape).await; - } + async fn send_stats_event(&self, original_peer_ip: &IpAddr) { + if let Some(http_stats_event_sender) = self.opt_http_stats_event_sender.as_deref() { + let event = match original_peer_ip { + IpAddr::V4(_) => statistics::event::Event::Tcp4Scrape, + IpAddr::V6(_) => statistics::event::Event::Tcp6Scrape, + }; + http_stats_event_sender.send_event(event).await; } } } @@ -211,7 +189,6 @@ mod tests { use tokio::sync::mpsc::error::SendError; use torrust_tracker_configuration::Configuration; use torrust_tracker_primitives::{peer, DurationSinceUnixEpoch}; - use torrust_tracker_test_helpers::configuration; use crate::statistics; use crate::tests::sample_info_hash; @@ -222,10 +199,6 @@ mod tests { authentication_service: Arc, } - fn initialize_services_for_public_tracker() -> Container { - initialize_services_with_configuration(&configuration::ephemeral_public()) - } - fn initialize_services_with_configuration(config: &Configuration) -> Container { let database = initialize_database(&config.core); let in_memory_whitelist = Arc::new(InMemoryWhitelist::default()); @@ -436,28 +409,34 @@ mod tests { use std::net::{IpAddr, Ipv4Addr, Ipv6Addr}; use std::sync::Arc; + use bittorrent_http_tracker_protocol::v1::requests::scrape::Scrape; + use bittorrent_http_tracker_protocol::v1::services::peer_ip_resolver::ClientIpSources; use bittorrent_tracker_core::announce_handler::PeersWanted; use mockall::predicate::eq; use torrust_tracker_primitives::core::ScrapeData; + use torrust_tracker_test_helpers::configuration; - use crate::services::scrape::fake; use crate::services::scrape::tests::{ - initialize_services_for_public_tracker, sample_info_hashes, sample_peer, MockHttpStatsEventSender, + initialize_services_with_configuration, sample_info_hashes, sample_peer, MockHttpStatsEventSender, }; + use crate::services::scrape::ScrapeService; use crate::statistics; use crate::tests::sample_info_hash; #[tokio::test] - async fn it_should_always_return_the_zeroed_scrape_data_for_a_torrent() { + async fn it_should_return_the_zeroed_scrape_data_when_the_tracker_is_running_in_private_mode_and_the_peer_is_not_authenticated( + ) { + let config = configuration::ephemeral_private(); + + let container = initialize_services_with_configuration(&config); + let (http_stats_event_sender, _http_stats_repository) = statistics::setup::factory(false); let http_stats_event_sender = Arc::new(http_stats_event_sender); - let container = initialize_services_for_public_tracker(); - let info_hash = sample_info_hash(); let info_hashes = vec![info_hash]; - // Announce a new peer to force scrape data to contain not zeroed data + // Announce a new peer to force scrape data to contain non zeroed data let mut peer = sample_peer(); let original_peer_ip = peer.ip(); container @@ -466,7 +445,26 @@ mod tests { .await .unwrap(); - let scrape_data = fake(&http_stats_event_sender, &info_hashes, &original_peer_ip).await; + let scrape_request = Scrape { + info_hashes: sample_info_hashes(), + }; + + let client_ip_sources = ClientIpSources { + right_most_x_forwarded_for: None, + connection_info_ip: Some(original_peer_ip), + }; + + let scrape_service = Arc::new(ScrapeService::new( + Arc::new(config.core), + container.scrape_handler.clone(), + container.authentication_service.clone(), + http_stats_event_sender.clone(), + )); + + let scrape_data = scrape_service + .handle_scrape(&scrape_request, &client_ip_sources, None) + .await + .unwrap(); let expected_scrape_data = ScrapeData::zeroed(&info_hashes); @@ -475,6 +473,10 @@ mod tests { #[tokio::test] async fn it_should_send_the_tcp_4_scrape_event_when_the_peer_uses_ipv4() { + let config = configuration::ephemeral(); + + let container = initialize_services_with_configuration(&config); + let mut http_stats_event_sender_mock = MockHttpStatsEventSender::new(); http_stats_event_sender_mock .expect_send_event() @@ -486,11 +488,34 @@ mod tests { let peer_ip = IpAddr::V4(Ipv4Addr::new(126, 0, 0, 1)); - fake(&http_stats_event_sender, &sample_info_hashes(), &peer_ip).await; + let scrape_request = Scrape { + info_hashes: sample_info_hashes(), + }; + + let client_ip_sources = ClientIpSources { + right_most_x_forwarded_for: None, + connection_info_ip: Some(peer_ip), + }; + + let scrape_service = Arc::new(ScrapeService::new( + Arc::new(config.core), + container.scrape_handler.clone(), + container.authentication_service.clone(), + http_stats_event_sender.clone(), + )); + + scrape_service + .handle_scrape(&scrape_request, &client_ip_sources, None) + .await + .unwrap(); } #[tokio::test] async fn it_should_send_the_tcp_6_scrape_event_when_the_peer_uses_ipv6() { + let config = configuration::ephemeral(); + + let container = initialize_services_with_configuration(&config); + let mut http_stats_event_sender_mock = MockHttpStatsEventSender::new(); http_stats_event_sender_mock .expect_send_event() @@ -502,7 +527,26 @@ mod tests { let peer_ip = IpAddr::V6(Ipv6Addr::new(0x6969, 0x6969, 0x6969, 0x6969, 0x6969, 0x6969, 0x6969, 0x6969)); - fake(&http_stats_event_sender, &sample_info_hashes(), &peer_ip).await; + let scrape_request = Scrape { + info_hashes: sample_info_hashes(), + }; + + let client_ip_sources = ClientIpSources { + right_most_x_forwarded_for: None, + connection_info_ip: Some(peer_ip), + }; + + let scrape_service = Arc::new(ScrapeService::new( + Arc::new(config.core), + container.scrape_handler.clone(), + container.authentication_service.clone(), + http_stats_event_sender.clone(), + )); + + scrape_service + .handle_scrape(&scrape_request, &client_ip_sources, None) + .await + .unwrap(); } } } From 2a7e2cbf3b6a800650ebf6df791a914f7af985de Mon Sep 17 00:00:00 2001 From: Jose Celano Date: Mon, 3 Mar 2025 12:35:03 +0000 Subject: [PATCH 358/802] refactor: rearrange code --- .../udp-tracker-core/src/services/announce.rs | 72 +++++++++---------- 1 file changed, 36 insertions(+), 36 deletions(-) diff --git a/packages/udp-tracker-core/src/services/announce.rs b/packages/udp-tracker-core/src/services/announce.rs index 051944d7e..a48242833 100644 --- a/packages/udp-tracker-core/src/services/announce.rs +++ b/packages/udp-tracker-core/src/services/announce.rs @@ -21,42 +21,6 @@ use torrust_tracker_primitives::core::AnnounceData; use crate::connection_cookie::{check, gen_remote_fingerprint, ConnectionCookieError}; use crate::statistics; -/// Errors related to announce requests. -#[derive(thiserror::Error, Debug, Clone)] -pub enum UdpAnnounceError { - /// Error returned when there was an error with the connection cookie. - #[error("Connection cookie error: {source}")] - ConnectionCookieError { source: ConnectionCookieError }, - - /// Error returned when there was an error with the tracker core announce handler. - #[error("Tracker core announce error: {source}")] - TrackerCoreAnnounceError { source: AnnounceError }, - - /// Error returned when there was an error with the tracker core whitelist. - #[error("Tracker core whitelist error: {source}")] - TrackerCoreWhitelistError { source: WhitelistError }, -} - -impl From for UdpAnnounceError { - fn from(connection_cookie_error: ConnectionCookieError) -> Self { - Self::ConnectionCookieError { - source: connection_cookie_error, - } - } -} - -impl From for UdpAnnounceError { - fn from(announce_error: AnnounceError) -> Self { - Self::TrackerCoreAnnounceError { source: announce_error } - } -} - -impl From for UdpAnnounceError { - fn from(whitelist_error: WhitelistError) -> Self { - Self::TrackerCoreWhitelistError { source: whitelist_error } - } -} - /// The `AnnounceService` is responsible for handling the `announce` requests. pub struct AnnounceService { pub announce_handler: Arc, @@ -135,3 +99,39 @@ impl AnnounceService { Ok(announce_data) } } + +/// Errors related to announce requests. +#[derive(thiserror::Error, Debug, Clone)] +pub enum UdpAnnounceError { + /// Error returned when there was an error with the connection cookie. + #[error("Connection cookie error: {source}")] + ConnectionCookieError { source: ConnectionCookieError }, + + /// Error returned when there was an error with the tracker core announce handler. + #[error("Tracker core announce error: {source}")] + TrackerCoreAnnounceError { source: AnnounceError }, + + /// Error returned when there was an error with the tracker core whitelist. + #[error("Tracker core whitelist error: {source}")] + TrackerCoreWhitelistError { source: WhitelistError }, +} + +impl From for UdpAnnounceError { + fn from(connection_cookie_error: ConnectionCookieError) -> Self { + Self::ConnectionCookieError { + source: connection_cookie_error, + } + } +} + +impl From for UdpAnnounceError { + fn from(announce_error: AnnounceError) -> Self { + Self::TrackerCoreAnnounceError { source: announce_error } + } +} + +impl From for UdpAnnounceError { + fn from(whitelist_error: WhitelistError) -> Self { + Self::TrackerCoreWhitelistError { source: whitelist_error } + } +} From bb6af4dd559d7cb032e5d517f15974e9fa0100d2 Mon Sep 17 00:00:00 2001 From: Jose Celano Date: Mon, 3 Mar 2025 12:40:20 +0000 Subject: [PATCH 359/802] refactor: make service fields private --- packages/udp-tracker-core/src/services/announce.rs | 6 +++--- 1 file changed, 3 insertions(+), 3 deletions(-) diff --git a/packages/udp-tracker-core/src/services/announce.rs b/packages/udp-tracker-core/src/services/announce.rs index a48242833..0ec7b21af 100644 --- a/packages/udp-tracker-core/src/services/announce.rs +++ b/packages/udp-tracker-core/src/services/announce.rs @@ -23,9 +23,9 @@ use crate::statistics; /// The `AnnounceService` is responsible for handling the `announce` requests. pub struct AnnounceService { - pub announce_handler: Arc, - pub whitelist_authorization: Arc, - pub opt_udp_core_stats_event_sender: Arc>>, + announce_handler: Arc, + whitelist_authorization: Arc, + opt_udp_core_stats_event_sender: Arc>>, } impl AnnounceService { From 0fce925cadde7bd1343c11d2413f4a632feea567 Mon Sep 17 00:00:00 2001 From: Jose Celano Date: Mon, 3 Mar 2025 12:40:51 +0000 Subject: [PATCH 360/802] chore: remove deprecated clippy attribute --- packages/udp-tracker-core/src/services/announce.rs | 5 ++++- 1 file changed, 4 insertions(+), 1 deletion(-) diff --git a/packages/udp-tracker-core/src/services/announce.rs b/packages/udp-tracker-core/src/services/announce.rs index 0ec7b21af..8af936a93 100644 --- a/packages/udp-tracker-core/src/services/announce.rs +++ b/packages/udp-tracker-core/src/services/announce.rs @@ -22,6 +22,10 @@ use crate::connection_cookie::{check, gen_remote_fingerprint, ConnectionCookieEr use crate::statistics; /// The `AnnounceService` is responsible for handling the `announce` requests. +/// +/// The service sends an statistics event that increments: +/// +/// - The number of UDP `announce` requests handled by the UDP tracker. pub struct AnnounceService { announce_handler: Arc, whitelist_authorization: Arc, @@ -50,7 +54,6 @@ impl AnnounceService { /// /// - The tracker is running in listed mode and the torrent is not in the /// whitelist. - #[allow(clippy::too_many_arguments)] pub async fn handle_announce( &self, remote_addr: SocketAddr, From 9326da3c2e0e03052f8a450d36a2596ca4f93cb5 Mon Sep 17 00:00:00 2001 From: Jose Celano Date: Mon, 3 Mar 2025 13:00:56 +0000 Subject: [PATCH 361/802] refactor: [#1338] clean bittorrent_udp_tracker_core::services::announce::AnnunceService --- .../udp-tracker-core/src/services/announce.rs | 62 +++++++++++-------- 1 file changed, 35 insertions(+), 27 deletions(-) diff --git a/packages/udp-tracker-core/src/services/announce.rs b/packages/udp-tracker-core/src/services/announce.rs index 8af936a93..698f5fba6 100644 --- a/packages/udp-tracker-core/src/services/announce.rs +++ b/packages/udp-tracker-core/src/services/announce.rs @@ -12,6 +12,7 @@ use std::ops::Range; use std::sync::Arc; use aquatic_udp_protocol::AnnounceRequest; +use bittorrent_primitives::info_hash::InfoHash; use bittorrent_tracker_core::announce_handler::{AnnounceHandler, PeersWanted}; use bittorrent_tracker_core::error::{AnnounceError, WhitelistError}; use bittorrent_tracker_core::whitelist; @@ -60,47 +61,54 @@ impl AnnounceService { request: &AnnounceRequest, cookie_valid_range: Range, ) -> Result { - // Authentication - check( - &request.connection_id, - gen_remote_fingerprint(&remote_addr), - cookie_valid_range, - )?; + Self::authenticate(remote_addr, request, cookie_valid_range)?; let info_hash = request.info_hash.into(); - let remote_client_ip = remote_addr.ip(); - // Authorization - self.whitelist_authorization.authorize(&info_hash).await?; + self.authorize(&info_hash).await?; + + let remote_client_ip = remote_addr.ip(); let mut peer = peer_builder::from_request(request, &remote_client_ip); - let peers_wanted: PeersWanted = i32::from(request.peers_wanted.0).into(); - let original_peer_ip = peer.peer_addr.ip(); + let peers_wanted: PeersWanted = i32::from(request.peers_wanted.0).into(); - // The tracker could change the original peer ip let announce_data = self .announce_handler - .announce(&info_hash, &mut peer, &original_peer_ip, &peers_wanted) + .announce(&info_hash, &mut peer, &remote_client_ip, &peers_wanted) .await?; - if let Some(udp_stats_event_sender) = self.opt_udp_core_stats_event_sender.as_deref() { - match original_peer_ip { - IpAddr::V4(_) => { - udp_stats_event_sender - .send_event(statistics::event::Event::Udp4Announce) - .await; - } - IpAddr::V6(_) => { - udp_stats_event_sender - .send_event(statistics::event::Event::Udp6Announce) - .await; - } - } - } + self.send_stats_event(remote_client_ip).await; Ok(announce_data) } + + fn authenticate( + remote_addr: SocketAddr, + request: &AnnounceRequest, + cookie_valid_range: Range, + ) -> Result { + check( + &request.connection_id, + gen_remote_fingerprint(&remote_addr), + cookie_valid_range, + ) + } + + async fn authorize(&self, info_hash: &InfoHash) -> Result<(), WhitelistError> { + self.whitelist_authorization.authorize(info_hash).await + } + + async fn send_stats_event(&self, peer_ip: IpAddr) { + if let Some(udp_stats_event_sender) = self.opt_udp_core_stats_event_sender.as_deref() { + let event = match peer_ip { + IpAddr::V4(_) => statistics::event::Event::Udp4Announce, + IpAddr::V6(_) => statistics::event::Event::Udp6Announce, + }; + + udp_stats_event_sender.send_event(event).await; + } + } } /// Errors related to announce requests. From 3c4dcdbc7857f48b512f16f70aa63073537ecaca Mon Sep 17 00:00:00 2001 From: Jose Celano Date: Mon, 3 Mar 2025 13:01:38 +0000 Subject: [PATCH 362/802] refactor: rearrange code --- .../udp-tracker-core/src/services/scrape.rs | 72 +++++++++---------- 1 file changed, 36 insertions(+), 36 deletions(-) diff --git a/packages/udp-tracker-core/src/services/scrape.rs b/packages/udp-tracker-core/src/services/scrape.rs index fddc2ec2d..78c09ed94 100644 --- a/packages/udp-tracker-core/src/services/scrape.rs +++ b/packages/udp-tracker-core/src/services/scrape.rs @@ -20,42 +20,6 @@ use torrust_tracker_primitives::core::ScrapeData; use crate::connection_cookie::{check, gen_remote_fingerprint, ConnectionCookieError}; use crate::statistics; -/// Errors related to scrape requests. -#[derive(thiserror::Error, Debug, Clone)] -pub enum UdpScrapeError { - /// Error returned when there was an error with the connection cookie. - #[error("Connection cookie error: {source}")] - ConnectionCookieError { source: ConnectionCookieError }, - - /// Error returned when there was an error with the tracker core scrape handler. - #[error("Tracker core scrape error: {source}")] - TrackerCoreScrapeError { source: ScrapeError }, - - /// Error returned when there was an error with the tracker core whitelist. - #[error("Tracker core whitelist error: {source}")] - TrackerCoreWhitelistError { source: WhitelistError }, -} - -impl From for UdpScrapeError { - fn from(connection_cookie_error: ConnectionCookieError) -> Self { - Self::ConnectionCookieError { - source: connection_cookie_error, - } - } -} - -impl From for UdpScrapeError { - fn from(scrape_error: ScrapeError) -> Self { - Self::TrackerCoreScrapeError { source: scrape_error } - } -} - -impl From for UdpScrapeError { - fn from(whitelist_error: WhitelistError) -> Self { - Self::TrackerCoreWhitelistError { source: whitelist_error } - } -} - /// The `ScrapeService` is responsible for handling the `scrape` requests. pub struct ScrapeService { scrape_handler: Arc, @@ -111,3 +75,39 @@ impl ScrapeService { Ok(scrape_data) } } + +/// Errors related to scrape requests. +#[derive(thiserror::Error, Debug, Clone)] +pub enum UdpScrapeError { + /// Error returned when there was an error with the connection cookie. + #[error("Connection cookie error: {source}")] + ConnectionCookieError { source: ConnectionCookieError }, + + /// Error returned when there was an error with the tracker core scrape handler. + #[error("Tracker core scrape error: {source}")] + TrackerCoreScrapeError { source: ScrapeError }, + + /// Error returned when there was an error with the tracker core whitelist. + #[error("Tracker core whitelist error: {source}")] + TrackerCoreWhitelistError { source: WhitelistError }, +} + +impl From for UdpScrapeError { + fn from(connection_cookie_error: ConnectionCookieError) -> Self { + Self::ConnectionCookieError { + source: connection_cookie_error, + } + } +} + +impl From for UdpScrapeError { + fn from(scrape_error: ScrapeError) -> Self { + Self::TrackerCoreScrapeError { source: scrape_error } + } +} + +impl From for UdpScrapeError { + fn from(whitelist_error: WhitelistError) -> Self { + Self::TrackerCoreWhitelistError { source: whitelist_error } + } +} From f9a7bfd59b9d264e7a49aefb580b1340143d1612 Mon Sep 17 00:00:00 2001 From: Jose Celano Date: Mon, 3 Mar 2025 13:18:17 +0000 Subject: [PATCH 363/802] refactor: [#1338] clean bittorrent_udp_tracker_core::services::scrape::ScrapeService --- .../udp-tracker-core/src/services/scrape.rs | 50 ++++++++++++------- 1 file changed, 33 insertions(+), 17 deletions(-) diff --git a/packages/udp-tracker-core/src/services/scrape.rs b/packages/udp-tracker-core/src/services/scrape.rs index 78c09ed94..61301cd43 100644 --- a/packages/udp-tracker-core/src/services/scrape.rs +++ b/packages/udp-tracker-core/src/services/scrape.rs @@ -21,13 +21,16 @@ use crate::connection_cookie::{check, gen_remote_fingerprint, ConnectionCookieEr use crate::statistics; /// The `ScrapeService` is responsible for handling the `scrape` requests. +/// +/// The service sends an statistics event that increments: +/// +/// - The number of UDP `scrape` requests handled by the UDP tracker. pub struct ScrapeService { scrape_handler: Arc, opt_udp_stats_event_sender: Arc>>, } impl ScrapeService { - /// Creates a new `ScrapeService`. #[must_use] pub fn new( scrape_handler: Arc, @@ -46,33 +49,46 @@ impl ScrapeService { /// It will return an error if the tracker core scrape handler returns an error. pub async fn handle_scrape( &self, - remote_addr: SocketAddr, + remote_client_addr: SocketAddr, request: &ScrapeRequest, cookie_valid_range: Range, ) -> Result { + Self::authenticate(remote_client_addr, request, cookie_valid_range)?; + + let scrape_data = self + .scrape_handler + .scrape(&Self::convert_from_aquatic(&request.info_hashes)) + .await?; + + self.send_stats_event(remote_client_addr).await; + + Ok(scrape_data) + } + + fn authenticate( + remote_addr: SocketAddr, + request: &ScrapeRequest, + cookie_valid_range: Range, + ) -> Result { check( &request.connection_id, gen_remote_fingerprint(&remote_addr), cookie_valid_range, - )?; - - // Convert from aquatic infohashes - let info_hashes: Vec = request.info_hashes.iter().map(|&x| x.into()).collect(); + ) + } - let scrape_data = self.scrape_handler.scrape(&info_hashes).await?; + fn convert_from_aquatic(aquatic_infohashes: &[aquatic_udp_protocol::common::InfoHash]) -> Vec { + aquatic_infohashes.iter().map(|&x| x.into()).collect() + } + async fn send_stats_event(&self, remote_addr: SocketAddr) { if let Some(udp_stats_event_sender) = self.opt_udp_stats_event_sender.as_deref() { - match remote_addr { - SocketAddr::V4(_) => { - udp_stats_event_sender.send_event(statistics::event::Event::Udp4Scrape).await; - } - SocketAddr::V6(_) => { - udp_stats_event_sender.send_event(statistics::event::Event::Udp6Scrape).await; - } - } + let event = match remote_addr { + SocketAddr::V4(_) => statistics::event::Event::Udp4Scrape, + SocketAddr::V6(_) => statistics::event::Event::Udp6Scrape, + }; + udp_stats_event_sender.send_event(event).await; } - - Ok(scrape_data) } } From aaf74446c12e789a53d72627709c263407510750 Mon Sep 17 00:00:00 2001 From: Jose Celano Date: Mon, 3 Mar 2025 18:18:12 +0000 Subject: [PATCH 364/802] chore: [#1345] add git hook scripts --- contrib/dev-tools/git/hooks/pre-commit.sh | 9 +++++++++ contrib/dev-tools/git/hooks/pre-push.sh | 10 ++++++++++ 2 files changed, 19 insertions(+) create mode 100755 contrib/dev-tools/git/hooks/pre-commit.sh create mode 100755 contrib/dev-tools/git/hooks/pre-push.sh diff --git a/contrib/dev-tools/git/hooks/pre-commit.sh b/contrib/dev-tools/git/hooks/pre-commit.sh new file mode 100755 index 000000000..37b80bb8a --- /dev/null +++ b/contrib/dev-tools/git/hooks/pre-commit.sh @@ -0,0 +1,9 @@ +#!/bin/bash + +cargo +nightly fmt --check && + cargo +nightly check --tests --benches --examples --workspace --all-targets --all-features && + cargo +nightly doc --no-deps --bins --examples --workspace --all-features && + cargo +nightly machete && + cargo +stable build && + CARGO_INCREMENTAL=0 cargo +stable clippy --no-deps --tests --benches --examples --workspace --all-targets --all-features -- -D clippy::correctness -D clippy::suspicious -D clippy::complexity -D clippy::perf -D clippy::style -D clippy::pedantic && + cargo +stable test --tests --benches --examples --workspace --all-targets --all-features diff --git a/contrib/dev-tools/git/hooks/pre-push.sh b/contrib/dev-tools/git/hooks/pre-push.sh new file mode 100755 index 000000000..c1a724156 --- /dev/null +++ b/contrib/dev-tools/git/hooks/pre-push.sh @@ -0,0 +1,10 @@ +#!/bin/bash + +cargo +nightly fmt --check && + cargo +nightly check --tests --benches --examples --workspace --all-targets --all-features && + cargo +nightly doc --no-deps --bins --examples --workspace --all-features && + cargo +nightly machete && + cargo +stable build && + CARGO_INCREMENTAL=0 cargo +stable clippy --no-deps --tests --benches --examples --workspace --all-targets --all-features -- -D clippy::correctness -D clippy::suspicious -D clippy::complexity -D clippy::perf -D clippy::style -D clippy::pedantic && + cargo +stable test --tests --benches --examples --workspace --all-targets --all-features && + cargo +stable run --bin e2e_tests_runner -- --config-toml-path "./share/default/config/tracker.e2e.container.sqlite3.toml" From ea802bf47f91ee0dea10b8b9e5d554526eca8268 Mon Sep 17 00:00:00 2001 From: Jose Celano Date: Tue, 4 Mar 2025 07:25:19 +0000 Subject: [PATCH 365/802] chore(deps): udpate dependencies ``` cargo update Updating crates.io index Locking 32 packages to latest compatible versions Updating anyhow v1.0.96 -> v1.0.97 Updating async-compression v0.4.19 -> v0.4.20 Updating async-trait v0.1.86 -> v0.1.87 Updating bitflags v2.8.0 -> v2.9.0 Updating bytemuck v1.21.0 -> v1.22.0 Updating cc v1.2.15 -> v1.2.16 Updating httparse v1.10.0 -> v1.10.1 Updating itoa v1.0.14 -> v1.0.15 Updating pin-project v1.1.9 -> v1.1.10 Updating pin-project-internal v1.1.9 -> v1.1.10 Updating pkg-config v0.3.31 -> v0.3.32 Updating proc-macro2 v1.0.93 -> v1.0.94 Updating quote v1.0.38 -> v1.0.39 Updating rand_core v0.9.2 -> v0.9.3 Updating redox_syscall v0.5.9 -> v0.5.10 Updating rstest v0.24.0 -> v0.25.0 Updating rstest_macros v0.24.0 -> v0.25.0 Updating rustversion v1.0.19 -> v1.0.20 Updating ryu v1.0.19 -> v1.0.20 Updating semver v1.0.25 -> v1.0.26 Updating serde_bytes v0.11.15 -> v0.11.16 Updating serde_json v1.0.139 -> v1.0.140 Updating serde_path_to_error v0.1.16 -> v0.1.17 Updating serde_repr v0.1.19 -> v0.1.20 Updating syn v2.0.98 -> v2.0.99 Updating thiserror v2.0.11 -> v2.0.12 Updating thiserror-impl v2.0.11 -> v2.0.12 Updating tinyvec v1.8.1 -> v1.9.0 Updating tokio-rustls v0.26.1 -> v0.26.2 Updating unicode-ident v1.0.17 -> v1.0.18 Updating zerocopy v0.8.20 -> v0.8.21 Updating zerocopy-derive v0.8.20 -> v0.8.21 ``` --- Cargo.lock | 281 ++++++++++++++++++++++++++--------------------------- 1 file changed, 140 insertions(+), 141 deletions(-) diff --git a/Cargo.lock b/Cargo.lock index 3e1cea83d..cb7ac4b80 100644 --- a/Cargo.lock +++ b/Cargo.lock @@ -131,9 +131,9 @@ dependencies = [ [[package]] name = "anyhow" -version = "1.0.96" +version = "1.0.97" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "6b964d184e89d9b6b67dd2715bc8e74cf3107fb2b529990c90cf517326150bf4" +checksum = "dcfed56ad506cb2c684a14971b8861fdc3baaaae314b9e5f9bb532cbe3ba7a4f" [[package]] name = "aquatic_peer_id" @@ -208,9 +208,9 @@ dependencies = [ [[package]] name = "async-compression" -version = "0.4.19" +version = "0.4.20" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "06575e6a9673580f52661c92107baabffbf41e2141373441cbcdc47cb733003c" +checksum = "310c9bcae737a48ef5cdee3174184e6d548b292739ede61a1f955ef76a738861" dependencies = [ "brotli", "flate2", @@ -316,13 +316,13 @@ checksum = "8b75356056920673b02621b35afd0f7dda9306d03c79a30f5c56c44cf256e3de" [[package]] name = "async-trait" -version = "0.1.86" +version = "0.1.87" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "644dd749086bf3771a2fbc5f256fdb982d53f011c7d5d560304eafeecebce79d" +checksum = "d556ec1359574147ec0c4fc5eb525f3f23263a592b1a9c07e0a75b427de55c97" dependencies = [ "proc-macro2", "quote", - "syn 2.0.98", + "syn 2.0.99", ] [[package]] @@ -444,7 +444,7 @@ checksum = "604fde5e028fea851ce1d8570bbdc034bec850d157f7569d10f347d06808c05c" dependencies = [ "proc-macro2", "quote", - "syn 2.0.98", + "syn 2.0.99", ] [[package]] @@ -523,7 +523,7 @@ version = "0.71.1" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "5f58bf3d7db68cfbac37cfc485a8d711e87e064c3d0fe0435b92f7a407f9d6b3" dependencies = [ - "bitflags 2.8.0", + "bitflags 2.9.0", "cexpr", "clang-sys", "itertools 0.13.0", @@ -532,7 +532,7 @@ dependencies = [ "regex", "rustc-hash", "shlex", - "syn 2.0.98", + "syn 2.0.99", ] [[package]] @@ -549,9 +549,9 @@ checksum = "bef38d45163c2f1dde094a7dfd33ccf595c92905c8f8f4fdc18d06fb1037718a" [[package]] name = "bitflags" -version = "2.8.0" +version = "2.9.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "8f68f53c83ab957f72c32642f3868eec03eb974d1fb82e453128456482613d36" +checksum = "5c8214115b7bf84099f1309324e63141d4c5d7cc26862f97a0a857dbefe165bd" [[package]] name = "bittorrent-http-tracker-core" @@ -563,7 +563,7 @@ dependencies = [ "bittorrent-tracker-core", "futures", "mockall", - "thiserror 2.0.11", + "thiserror 2.0.12", "tokio", "torrust-tracker-configuration", "torrust-tracker-primitives", @@ -583,7 +583,7 @@ dependencies = [ "percent-encoding", "serde", "serde_bencode", - "thiserror 2.0.11", + "thiserror 2.0.12", "torrust-tracker-clock", "torrust-tracker-configuration", "torrust-tracker-contrib-bencode", @@ -619,7 +619,7 @@ dependencies = [ "serde_bencode", "serde_bytes", "serde_repr", - "thiserror 2.0.11", + "thiserror 2.0.12", "tokio", "torrust-tracker-configuration", "torrust-tracker-located-error", @@ -645,7 +645,7 @@ dependencies = [ "serde", "serde_json", "testcontainers", - "thiserror 2.0.11", + "thiserror 2.0.12", "tokio", "torrust-rest-tracker-api-client", "torrust-tracker-clock", @@ -673,7 +673,7 @@ dependencies = [ "lazy_static", "mockall", "rand 0.9.0", - "thiserror 2.0.11", + "thiserror 2.0.12", "tokio", "torrust-tracker-configuration", "torrust-tracker-primitives", @@ -775,7 +775,7 @@ dependencies = [ "serde_json", "serde_repr", "serde_urlencoded", - "thiserror 2.0.11", + "thiserror 2.0.12", "tokio", "tokio-util", "tower-service", @@ -814,7 +814,7 @@ dependencies = [ "proc-macro-crate", "proc-macro2", "quote", - "syn 2.0.98", + "syn 2.0.99", ] [[package]] @@ -883,9 +883,9 @@ dependencies = [ [[package]] name = "bytemuck" -version = "1.21.0" +version = "1.22.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "ef657dfab802224e671f5818e9a4935f9b1957ed18e58292690cc39e7a4092a3" +checksum = "b6b1fc10dbac614ebc03540c9dbd60e83887fda27794998c6528f1782047d540" [[package]] name = "byteorder" @@ -925,9 +925,9 @@ dependencies = [ [[package]] name = "cc" -version = "1.2.15" +version = "1.2.16" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "c736e259eea577f443d5c86c304f9f4ae0295c43f3ba05c21f1d66b5f06001af" +checksum = "be714c154be609ec7f5dad223a33bf1482fff90472de28f7362806e6d4832b8c" dependencies = [ "jobserver", "libc", @@ -1047,7 +1047,7 @@ dependencies = [ "heck", "proc-macro2", "quote", - "syn 2.0.98", + "syn 2.0.99", ] [[package]] @@ -1278,7 +1278,7 @@ dependencies = [ "proc-macro2", "quote", "strsim", - "syn 2.0.98", + "syn 2.0.99", ] [[package]] @@ -1289,7 +1289,7 @@ checksum = "d336a2a514f6ccccaa3e09b02d41d35330c07ddf03a62165fcec10bb561c7806" dependencies = [ "darling_core", "quote", - "syn 2.0.98", + "syn 2.0.99", ] [[package]] @@ -1333,7 +1333,7 @@ checksum = "bda628edc44c4bb645fbe0f758797143e4e07926f7ebf4e9bdfbd3d2ce621df3" dependencies = [ "proc-macro2", "quote", - "syn 2.0.98", + "syn 2.0.99", "unicode-xid", ] @@ -1345,7 +1345,7 @@ checksum = "ccfae181bab5ab6c5478b2ccb69e4c68a02f8c3ec72f6616bfec9dbc599d2ee0" dependencies = [ "proc-macro2", "quote", - "syn 2.0.98", + "syn 2.0.99", ] [[package]] @@ -1366,7 +1366,7 @@ checksum = "97369cbbc041bc366949bc74d34658d6cda5621039731c6310521892a3a20ae0" dependencies = [ "proc-macro2", "quote", - "syn 2.0.98", + "syn 2.0.99", ] [[package]] @@ -1603,7 +1603,7 @@ checksum = "e99b8b3c28ae0e84b604c75f721c21dc77afb3706076af5e8216d15fd1deaae3" dependencies = [ "frunk_proc_macro_helpers", "quote", - "syn 2.0.98", + "syn 2.0.99", ] [[package]] @@ -1615,7 +1615,7 @@ dependencies = [ "frunk_core", "proc-macro2", "quote", - "syn 2.0.98", + "syn 2.0.99", ] [[package]] @@ -1627,7 +1627,7 @@ dependencies = [ "frunk_core", "frunk_proc_macro_helpers", "quote", - "syn 2.0.98", + "syn 2.0.99", ] [[package]] @@ -1705,7 +1705,7 @@ checksum = "162ee34ebcb7c64a8abebc059ce0fee27c2262618d7b60ed8faf72fef13c3650" dependencies = [ "proc-macro2", "quote", - "syn 2.0.98", + "syn 2.0.99", ] [[package]] @@ -1934,9 +1934,9 @@ dependencies = [ [[package]] name = "httparse" -version = "1.10.0" +version = "1.10.1" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "f2d708df4e7140240a16cd6ab0ab65c972d7433ab77819ea693fde9c43811e2a" +checksum = "6dbf3de79e51f3d586ab4cb9d5c3e2c14aa28ed23d180cf89b4df0454a69cc87" [[package]] name = "httpdate" @@ -2185,7 +2185,7 @@ checksum = "1ec89e9337638ecdc08744df490b221a7399bf8d164eb52a665454e60e075ad6" dependencies = [ "proc-macro2", "quote", - "syn 2.0.98", + "syn 2.0.99", ] [[package]] @@ -2304,9 +2304,9 @@ dependencies = [ [[package]] name = "itoa" -version = "1.0.14" +version = "1.0.15" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "d75a2a4b1b190afb6f5425f10f6a8f959d2ea0b9c2b1d79553551850539e4674" +checksum = "4a5f13b858c8d314ee3e8f639011f7ccefe71f97f96e50151fb991f267928e2c" [[package]] name = "jobserver" @@ -2370,9 +2370,9 @@ version = "0.1.3" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "c0ff37bd590ca25063e35af745c343cb7a0271906fb7b37e4813e8f79f00268d" dependencies = [ - "bitflags 2.8.0", + "bitflags 2.9.0", "libc", - "redox_syscall 0.5.9", + "redox_syscall 0.5.10", ] [[package]] @@ -2516,7 +2516,7 @@ dependencies = [ "cfg-if", "proc-macro2", "quote", - "syn 2.0.98", + "syn 2.0.99", ] [[package]] @@ -2566,7 +2566,7 @@ dependencies = [ "proc-macro-error2", "proc-macro2", "quote", - "syn 2.0.98", + "syn 2.0.99", "termcolor", "thiserror 1.0.69", ] @@ -2580,7 +2580,7 @@ dependencies = [ "base64 0.21.7", "bigdecimal", "bindgen", - "bitflags 2.8.0", + "bitflags 2.9.0", "bitvec", "btoi", "byteorder", @@ -2748,7 +2748,7 @@ version = "0.10.71" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "5e14130c6a98cd258fdcb0fb6d744152343ff729cbfcb28c656a9d12b999fbcd" dependencies = [ - "bitflags 2.8.0", + "bitflags 2.9.0", "cfg-if", "foreign-types", "libc", @@ -2765,7 +2765,7 @@ checksum = "a948666b637a0f465e8564c73e89d4dde00d72d4d473cc972f390fc3dcee7d9c" dependencies = [ "proc-macro2", "quote", - "syn 2.0.98", + "syn 2.0.99", ] [[package]] @@ -2816,7 +2816,7 @@ checksum = "1e401f977ab385c9e4e3ab30627d6f26d00e2c73eef317493c4ec6d468726cf8" dependencies = [ "cfg-if", "libc", - "redox_syscall 0.5.9", + "redox_syscall 0.5.10", "smallvec", "windows-targets 0.52.6", ] @@ -2843,7 +2843,7 @@ dependencies = [ "regex", "regex-syntax", "structmeta", - "syn 2.0.98", + "syn 2.0.99", ] [[package]] @@ -2866,7 +2866,7 @@ dependencies = [ "proc-macro2", "proc-macro2-diagnostics", "quote", - "syn 2.0.98", + "syn 2.0.99", ] [[package]] @@ -2925,22 +2925,22 @@ dependencies = [ [[package]] name = "pin-project" -version = "1.1.9" +version = "1.1.10" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "dfe2e71e1471fe07709406bf725f710b02927c9c54b2b5b2ec0e8087d97c327d" +checksum = "677f1add503faace112b9f1373e43e9e054bfdd22ff1a63c1bc485eaec6a6a8a" dependencies = [ "pin-project-internal", ] [[package]] name = "pin-project-internal" -version = "1.1.9" +version = "1.1.10" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "f6e859e6e5bd50440ab63c47e3ebabc90f26251f7c73c3d3e837b74a1cc3fa67" +checksum = "6e918e4ff8c4549eb882f14b3a4bc8c8bc93de829416eacf579f1207a8fbf861" dependencies = [ "proc-macro2", "quote", - "syn 2.0.98", + "syn 2.0.99", ] [[package]] @@ -2968,9 +2968,9 @@ dependencies = [ [[package]] name = "pkg-config" -version = "0.3.31" +version = "0.3.32" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "953ec861398dccce10c670dfeaf3ec4911ca479e9c02154b3a215178c5f566f2" +checksum = "7edddbd0b52d732b21ad9a5fab5c704c14cd949e5e9a1ec5929a24fded1b904c" [[package]] name = "plotters" @@ -3090,14 +3090,14 @@ dependencies = [ "proc-macro-error-attr2", "proc-macro2", "quote", - "syn 2.0.98", + "syn 2.0.99", ] [[package]] name = "proc-macro2" -version = "1.0.93" +version = "1.0.94" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "60946a68e5f9d28b0dc1c21bb8a97ee7d018a8b322fa57838ba31cc878e22d99" +checksum = "a31971752e70b8b2686d7e46ec17fb38dad4051d94024c88df49b667caea9c84" dependencies = [ "unicode-ident", ] @@ -3110,7 +3110,7 @@ checksum = "af066a9c399a26e020ada66a034357a868728e72cd426f3adcd35f80d88d88c8" dependencies = [ "proc-macro2", "quote", - "syn 2.0.98", + "syn 2.0.99", "version_check", "yansi", ] @@ -3148,9 +3148,9 @@ dependencies = [ [[package]] name = "quote" -version = "1.0.38" +version = "1.0.39" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "0e4dccaaaf89514f546c693ddc140f729f958c247918a13380cccc6078391acc" +checksum = "c1f1914ce909e1658d9907913b4b91947430c7d9be598b15a1912935b8c04801" dependencies = [ "proc-macro2", ] @@ -3211,8 +3211,8 @@ source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "3779b94aeb87e8bd4e834cee3650289ee9e0d5677f976ecdb6d219e5f4f6cd94" dependencies = [ "rand_chacha 0.9.0", - "rand_core 0.9.2", - "zerocopy 0.8.20", + "rand_core 0.9.3", + "zerocopy 0.8.21", ] [[package]] @@ -3232,7 +3232,7 @@ source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "d3022b5f1df60f26e1ffddd6c66e8aa15de382ae63b3a0c1bfc0e4d3e3f325cb" dependencies = [ "ppv-lite86", - "rand_core 0.9.2", + "rand_core 0.9.3", ] [[package]] @@ -3246,12 +3246,11 @@ dependencies = [ [[package]] name = "rand_core" -version = "0.9.2" +version = "0.9.3" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "7a509b1a2ffbe92afab0e55c8fd99dea1c280e8171bd2d88682bb20bc41cbc2c" +checksum = "99d9a13982dcf210057a8a78572b2217b667c3beacbf3a0d8b454f6f82837d38" dependencies = [ "getrandom 0.3.1", - "zerocopy 0.8.20", ] [[package]] @@ -3285,11 +3284,11 @@ dependencies = [ [[package]] name = "redox_syscall" -version = "0.5.9" +version = "0.5.10" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "82b568323e98e49e2a0899dcee453dd679fae22d69adf9b11dd508d1549b7e2f" +checksum = "0b8c0c260b63a8219631167be35e6a988e9554dbd323f8bd08439c8ed1302bd1" dependencies = [ - "bitflags 2.8.0", + "bitflags 2.9.0", ] [[package]] @@ -3435,9 +3434,9 @@ dependencies = [ [[package]] name = "rstest" -version = "0.24.0" +version = "0.25.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "03e905296805ab93e13c1ec3a03f4b6c4f35e9498a3d5fa96dc626d22c03cd89" +checksum = "6fc39292f8613e913f7df8fa892b8944ceb47c247b78e1b1ae2f09e019be789d" dependencies = [ "futures-timer", "futures-util", @@ -3447,9 +3446,9 @@ dependencies = [ [[package]] name = "rstest_macros" -version = "0.24.0" +version = "0.25.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "ef0053bbffce09062bee4bcc499b0fbe7a57b879f1efe088d6d8d4c7adcdef9b" +checksum = "1f168d99749d307be9de54d23fd226628d99768225ef08f6ffb52e0182a27746" dependencies = [ "cfg-if", "glob", @@ -3459,7 +3458,7 @@ dependencies = [ "regex", "relative-path", "rustc_version", - "syn 2.0.98", + "syn 2.0.99", "unicode-ident", ] @@ -3469,7 +3468,7 @@ version = "0.33.0" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "1c6d5e5acb6f6129fe3f7ba0a7fc77bca1942cb568535e18e7bc40262baf3110" dependencies = [ - "bitflags 2.8.0", + "bitflags 2.9.0", "fallible-iterator", "fallible-streaming-iterator", "hashlink", @@ -3520,7 +3519,7 @@ version = "0.38.44" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "fdb5bc1ae2baa591800df16c9ca78619bf65c0488b41b96ccec5d11220d8c154" dependencies = [ - "bitflags 2.8.0", + "bitflags 2.9.0", "errno", "libc", "linux-raw-sys", @@ -3581,15 +3580,15 @@ dependencies = [ [[package]] name = "rustversion" -version = "1.0.19" +version = "1.0.20" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "f7c45b9784283f1b2e7fb61b42047c2fd678ef0960d4f6f1eba131594cc369d4" +checksum = "eded382c5f5f786b989652c49544c4877d9f015cc22e145a5ea8ea66c2921cd2" [[package]] name = "ryu" -version = "1.0.19" +version = "1.0.20" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "6ea1a2d0a644769cc99faa24c3ad26b379b786fe7c36fd3c546254801650e6dd" +checksum = "28d3b2b1366ec20994f1fd18c3c594f05c5dd4bc44d8bb0c1c632c8d6829481f" [[package]] name = "same-file" @@ -3642,7 +3641,7 @@ version = "2.11.1" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "897b2245f0b511c87893af39b033e5ca9cce68824c4d7e7630b5a1d339658d02" dependencies = [ - "bitflags 2.8.0", + "bitflags 2.9.0", "core-foundation 0.9.4", "core-foundation-sys", "libc", @@ -3655,7 +3654,7 @@ version = "3.2.0" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "271720403f46ca04f7ba6f55d438f8bd878d6b8ca0a1046e8228c4145bcbb316" dependencies = [ - "bitflags 2.8.0", + "bitflags 2.9.0", "core-foundation 0.10.0", "core-foundation-sys", "libc", @@ -3674,9 +3673,9 @@ dependencies = [ [[package]] name = "semver" -version = "1.0.25" +version = "1.0.26" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "f79dfe2d285b0488816f30e700a7438c5a73d816b5b7d3ac72fbc48b0d185e03" +checksum = "56e6fa9c48d24d85fb3de5ad847117517440f6beceb7798af16b4a87d616b8d0" [[package]] name = "serde" @@ -3699,9 +3698,9 @@ dependencies = [ [[package]] name = "serde_bytes" -version = "0.11.15" +version = "0.11.16" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "387cc504cb06bb40a96c8e04e951fe01854cf6bc921053c954e4a606d9675c6a" +checksum = "364fec0df39c49a083c9a8a18a23a6bcfd9af130fe9fe321d18520a0d113e09e" dependencies = [ "serde", ] @@ -3714,7 +3713,7 @@ checksum = "f09503e191f4e797cb8aac08e9a4a4695c5edf6a2e70e376d961ddd5c969f82b" dependencies = [ "proc-macro2", "quote", - "syn 2.0.98", + "syn 2.0.99", ] [[package]] @@ -3732,9 +3731,9 @@ dependencies = [ [[package]] name = "serde_json" -version = "1.0.139" +version = "1.0.140" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "44f86c3acccc9c65b153fe1b85a3be07fe5515274ec9f0653b4a0875731c72a6" +checksum = "20068b6e96dc6c9bd23e01df8827e6c7e1f2fddd43c21810382803c136b99373" dependencies = [ "indexmap 2.7.1", "itoa", @@ -3745,9 +3744,9 @@ dependencies = [ [[package]] name = "serde_path_to_error" -version = "0.1.16" +version = "0.1.17" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "af99884400da37c88f5e9146b7f1fd0fbcae8f6eec4e9da38b67d05486f814a6" +checksum = "59fab13f937fa393d08645bf3a84bdfe86e296747b506ada67bb15f10f218b2a" dependencies = [ "itoa", "serde", @@ -3755,13 +3754,13 @@ dependencies = [ [[package]] name = "serde_repr" -version = "0.1.19" +version = "0.1.20" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "6c64451ba24fc7a6a2d60fc75dd9c83c90903b19028d4eff35e88fc1e86564e9" +checksum = "175ee3e80ae9982737ca543e96133087cbd9a485eecc3bc4de9c1a37b47ea59c" dependencies = [ "proc-macro2", "quote", - "syn 2.0.98", + "syn 2.0.99", ] [[package]] @@ -3812,7 +3811,7 @@ dependencies = [ "darling", "proc-macro2", "quote", - "syn 2.0.98", + "syn 2.0.99", ] [[package]] @@ -3925,7 +3924,7 @@ dependencies = [ "proc-macro2", "quote", "structmeta-derive", - "syn 2.0.98", + "syn 2.0.99", ] [[package]] @@ -3936,7 +3935,7 @@ checksum = "152a0b65a590ff6c3da95cabe2353ee04e6167c896b28e3b14478c2636c922fc" dependencies = [ "proc-macro2", "quote", - "syn 2.0.98", + "syn 2.0.99", ] [[package]] @@ -3968,9 +3967,9 @@ dependencies = [ [[package]] name = "syn" -version = "2.0.98" +version = "2.0.99" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "36147f1a48ae0ec2b5b3bc5b537d267457555a10dc06f3dbc8cb11ba3006d3b1" +checksum = "e02e925281e18ffd9d640e234264753c43edc62d64b2d4cf898f1bc5e75f3fc2" dependencies = [ "proc-macro2", "quote", @@ -3994,7 +3993,7 @@ checksum = "c8af7666ab7b6390ab78131fb5b0fce11d6b7a6951602017c35fa82800708971" dependencies = [ "proc-macro2", "quote", - "syn 2.0.98", + "syn 2.0.99", ] [[package]] @@ -4003,7 +4002,7 @@ version = "0.6.1" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "3c879d448e9d986b661742763247d3693ed13609438cf3d006f51f5368a5ba6b" dependencies = [ - "bitflags 2.8.0", + "bitflags 2.9.0", "core-foundation 0.9.4", "system-configuration-sys", ] @@ -4091,7 +4090,7 @@ dependencies = [ "serde", "serde_json", "serde_with", - "thiserror 2.0.11", + "thiserror 2.0.12", "tokio", "tokio-stream", "tokio-tar", @@ -4110,11 +4109,11 @@ dependencies = [ [[package]] name = "thiserror" -version = "2.0.11" +version = "2.0.12" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "d452f284b73e6d76dd36758a0c8684b1d5be31f92b89d07fd5822175732206fc" +checksum = "567b8a2dae586314f7be2a752ec7474332959c6460e02bde30d702a66d488708" dependencies = [ - "thiserror-impl 2.0.11", + "thiserror-impl 2.0.12", ] [[package]] @@ -4125,18 +4124,18 @@ checksum = "4fee6c4efc90059e10f81e6d42c60a18f76588c3d74cb83a0b242a2b6c7504c1" dependencies = [ "proc-macro2", "quote", - "syn 2.0.98", + "syn 2.0.99", ] [[package]] name = "thiserror-impl" -version = "2.0.11" +version = "2.0.12" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "26afc1baea8a989337eeb52b6e72a039780ce45c3edfcc9c5b9d112feeb173c2" +checksum = "7f7cf42b4507d8ea322120659672cf1b9dbb93f8f2d4ecfd6e51350ff5b17a1d" dependencies = [ "proc-macro2", "quote", - "syn 2.0.98", + "syn 2.0.99", ] [[package]] @@ -4202,9 +4201,9 @@ dependencies = [ [[package]] name = "tinyvec" -version = "1.8.1" +version = "1.9.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "022db8904dfa342efe721985167e9fcd16c29b226db4397ed752a761cfce81e8" +checksum = "09b3661f17e86524eccd4371ab0429194e0d7c008abb45f7a7495b1719463c71" dependencies = [ "tinyvec_macros", ] @@ -4240,7 +4239,7 @@ checksum = "6e06d43f1345a3bcd39f6a56dbb7dcab2ba47e68e8ac134855e7e2bdbaf8cab8" dependencies = [ "proc-macro2", "quote", - "syn 2.0.98", + "syn 2.0.99", ] [[package]] @@ -4255,9 +4254,9 @@ dependencies = [ [[package]] name = "tokio-rustls" -version = "0.26.1" +version = "0.26.2" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "5f6d0975eaace0cf0fcadee4e4aaa5da15b5c079146f2cffb67c113be122bf37" +checksum = "8e727b36a1a0e8b74c376ac2211e40c2c8af09fb4013c60d910495810f008e9b" dependencies = [ "rustls", "tokio", @@ -4420,7 +4419,7 @@ dependencies = [ "serde", "serde_json", "serde_with", - "thiserror 2.0.11", + "thiserror 2.0.12", "tokio", "torrust-axum-server", "torrust-rest-tracker-api-client", @@ -4449,7 +4448,7 @@ dependencies = [ "hyper", "hyper-util", "pin-project-lite", - "thiserror 2.0.11", + "thiserror 2.0.12", "tokio", "torrust-server-lib", "torrust-tracker-configuration", @@ -4465,7 +4464,7 @@ dependencies = [ "hyper", "reqwest", "serde", - "thiserror 2.0.11", + "thiserror 2.0.12", "url", "uuid", ] @@ -4546,7 +4545,7 @@ dependencies = [ "serde_bencode", "serde_bytes", "serde_json", - "thiserror 2.0.11", + "thiserror 2.0.12", "tokio", "torrust-tracker-configuration", "tracing", @@ -4574,7 +4573,7 @@ dependencies = [ "serde", "serde_json", "serde_with", - "thiserror 2.0.11", + "thiserror 2.0.12", "toml", "torrust-tracker-located-error", "tracing", @@ -4588,14 +4587,14 @@ name = "torrust-tracker-contrib-bencode" version = "3.0.0-develop" dependencies = [ "criterion", - "thiserror 2.0.11", + "thiserror 2.0.12", ] [[package]] name = "torrust-tracker-located-error" version = "3.0.0-develop" dependencies = [ - "thiserror 2.0.11", + "thiserror 2.0.12", "tracing", ] @@ -4610,7 +4609,7 @@ dependencies = [ "serde", "tdyne-peer-id", "tdyne-peer-id-registry", - "thiserror 2.0.11", + "thiserror 2.0.12", "torrust-tracker-configuration", "zerocopy 0.7.35", ] @@ -4661,7 +4660,7 @@ dependencies = [ "mockall", "rand 0.9.0", "ringbuf", - "thiserror 2.0.11", + "thiserror 2.0.12", "tokio", "torrust-server-lib", "torrust-tracker-clock", @@ -4713,7 +4712,7 @@ source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "403fa3b783d4b626a8ad51d766ab03cb6d2dbfc46b1c5d4448395e6628dc9697" dependencies = [ "async-compression", - "bitflags 2.8.0", + "bitflags 2.9.0", "bytes", "futures-core", "http", @@ -4759,7 +4758,7 @@ checksum = "395ae124c09f9e6918a2310af6038fba074bcf474ac352496d5910dd59a2226d" dependencies = [ "proc-macro2", "quote", - "syn 2.0.98", + "syn 2.0.99", ] [[package]] @@ -4844,9 +4843,9 @@ dependencies = [ [[package]] name = "unicode-ident" -version = "1.0.17" +version = "1.0.18" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "00e2473a93778eb0bad35909dff6a10d28e63f792f16ed15e404fca9d5eeedbe" +checksum = "5a5f39404a5da50712a4c1eecf25e90dd62b613502b7e925fd4e4d19b5c96512" [[package]] name = "unicode-xid" @@ -4980,7 +4979,7 @@ dependencies = [ "log", "proc-macro2", "quote", - "syn 2.0.98", + "syn 2.0.99", "wasm-bindgen-shared", ] @@ -5015,7 +5014,7 @@ checksum = "8ae87ea40c9f689fc23f209965b6fb8a99ad69aeeb0231408be24920604395de" dependencies = [ "proc-macro2", "quote", - "syn 2.0.98", + "syn 2.0.99", "wasm-bindgen-backend", "wasm-bindgen-shared", ] @@ -5278,7 +5277,7 @@ version = "0.33.0" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "3268f3d866458b787f390cf61f4bbb563b922d091359f9608842999eaee3943c" dependencies = [ - "bitflags 2.8.0", + "bitflags 2.9.0", ] [[package]] @@ -5339,7 +5338,7 @@ checksum = "2380878cad4ac9aac1e2435f3eb4020e8374b5f13c296cb75b4620ff8e229154" dependencies = [ "proc-macro2", "quote", - "syn 2.0.98", + "syn 2.0.99", "synstructure", ] @@ -5355,11 +5354,11 @@ dependencies = [ [[package]] name = "zerocopy" -version = "0.8.20" +version = "0.8.21" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "dde3bb8c68a8f3f1ed4ac9221aad6b10cece3e60a8e2ea54a6a2dec806d0084c" +checksum = "dcf01143b2dd5d134f11f545cf9f1431b13b749695cb33bcce051e7568f99478" dependencies = [ - "zerocopy-derive 0.8.20", + "zerocopy-derive 0.8.21", ] [[package]] @@ -5370,18 +5369,18 @@ checksum = "fa4f8080344d4671fb4e831a13ad1e68092748387dfc4f55e356242fae12ce3e" dependencies = [ "proc-macro2", "quote", - "syn 2.0.98", + "syn 2.0.99", ] [[package]] name = "zerocopy-derive" -version = "0.8.20" +version = "0.8.21" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "eea57037071898bf96a6da35fd626f4f27e9cee3ead2a6c703cf09d472b2e700" +checksum = "712c8386f4f4299382c9abee219bee7084f78fb939d88b6840fcc1320d5f6da2" dependencies = [ "proc-macro2", "quote", - "syn 2.0.98", + "syn 2.0.99", ] [[package]] @@ -5401,7 +5400,7 @@ checksum = "d71e5d6e06ab090c67b5e44993ec16b72dcbaabc526db883a360057678b48502" dependencies = [ "proc-macro2", "quote", - "syn 2.0.98", + "syn 2.0.99", "synstructure", ] @@ -5430,7 +5429,7 @@ checksum = "6eafa6dfb17584ea3e2bd6e76e0cc15ad7af12b09abdd1ca55961bed9b1063c6" dependencies = [ "proc-macro2", "quote", - "syn 2.0.98", + "syn 2.0.99", ] [[package]] From bb96580a4bb175ecd0c31f8b9b5e5599477eca20 Mon Sep 17 00:00:00 2001 From: Jose Celano Date: Tue, 4 Mar 2025 07:49:30 +0000 Subject: [PATCH 366/802] chore(deps): bump hex-literal from 0.4.1 to 1.0.0 --- Cargo.lock | 4 ++-- console/tracker-client/Cargo.toml | 2 +- 2 files changed, 3 insertions(+), 3 deletions(-) diff --git a/Cargo.lock b/Cargo.lock index cb7ac4b80..4c7524f49 100644 --- a/Cargo.lock +++ b/Cargo.lock @@ -1885,9 +1885,9 @@ checksum = "7f24254aa9a54b5c858eaee2f5bccdb46aaf0e486a595ed5fd8f86ba55232a70" [[package]] name = "hex-literal" -version = "0.4.1" +version = "1.0.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "6fe2267d4ed49bc07b63801559be28c718ea06c4738b7a03c94df7386d2cde46" +checksum = "bcaaec4551594c969335c98c903c1397853d4198408ea609190f420500f6be71" [[package]] name = "home" diff --git a/console/tracker-client/Cargo.toml b/console/tracker-client/Cargo.toml index 4db6702cb..d4ab7c9e3 100644 --- a/console/tracker-client/Cargo.toml +++ b/console/tracker-client/Cargo.toml @@ -21,7 +21,7 @@ bittorrent-primitives = "0.1.0" bittorrent-tracker-client = { version = "3.0.0-develop", path = "../../packages/tracker-client" } clap = { version = "4", features = ["derive", "env"] } futures = "0" -hex-literal = "0" +hex-literal = "1" hyper = "1" reqwest = { version = "0", features = ["json"] } serde = { version = "1", features = ["derive"] } From 09396b5c647ee87218617f813325b564a71586fa Mon Sep 17 00:00:00 2001 From: Jose Celano Date: Tue, 4 Mar 2025 09:13:22 +0000 Subject: [PATCH 367/802] fix: issue generating coverage report There wass a missing feature for tokio crate in the `udp-tracker-core` package. ```output error[E0432]: unresolved import `tokio::time` --> packages/udp-tracker-core/src/services/banning.rs:22:12 | 22 | use tokio::time::Instant; | ^^^^ could not find `time` in `tokio` | note: found an item that was configured out --> /home/josecelano/.cargo/registry/src/index.crates.io-1949cf8c6b5b557f/tokio-1.43.0/src/lib.rs:556:13 | 556 | pub mod time; | ^^^^ note: the item is gated behind the `time` feature --> /home/josecelano/.cargo/registry/src/index.crates.io-1949cf8c6b5b557f/tokio-1.43.0/src/lib.rs:555:1 | 555 | / cfg_time! { 556 | | pub mod time; 557 | | } | |_^ = note: this error originates in the macro `cfg_time` (in Nightly builds, run with -Z macro-backtrace for more info) error[E0433]: failed to resolve: could not find `time` in `tokio` --> packages/udp-tracker-core/src/services/banning.rs:40:53 | 40 | last_connection_id_errors_reset: tokio::time::Instant::now(), | ^^^^ could not find `time` in `tokio` | note: found an item that was configured out --> /home/josecelano/.cargo/registry/src/index.crates.io-1949cf8c6b5b557f/tokio-1.43.0/src/lib.rs:556:13 | 556 | pub mod time; | ^^^^ note: the item is gated behind the `time` feature --> /home/josecelano/.cargo/registry/src/index.crates.io-1949cf8c6b5b557f/tokio-1.43.0/src/lib.rs:555:1 | 555 | / cfg_time! { 556 | | pub mod time; 557 | | } | |_^ = note: this error originates in the macro `cfg_time` (in Nightly builds, run with -Z macro-backtrace for more info) help: consider importing this struct | 18 + use std::time::Instant; | help: if you import `Instant`, refer to it directly | 40 - last_connection_id_errors_reset: tokio::time::Instant::now(), 40 + last_connection_id_errors_reset: Instant::now(), | Some errors have detailed explanations: E0432, E0433. For more information about an error, try `rustc --explain E0432`. error: could not compile `bittorrent-udp-tracker-core` (lib test) due to 2 previous errors error: process didn't exit successfully: `/home/josecelano/.rustup/toolchains/nightly-x86_64-unknown-linux-gnu/bin/cargo test --tests --manifest-path /home/josecelano/Documents/git/committer/me/github/torrust/torrust-tracker/Cargo.toml --target-dir /home/josecelano/Documents/git/committer/me/github/torrust/torrust-tracker/target/llvm-cov-target --package bittorrent-udp-tracker-core` (exit status: 101) ``` --- packages/udp-tracker-core/Cargo.toml | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/packages/udp-tracker-core/Cargo.toml b/packages/udp-tracker-core/Cargo.toml index 5f7622032..fc8e2328c 100644 --- a/packages/udp-tracker-core/Cargo.toml +++ b/packages/udp-tracker-core/Cargo.toml @@ -25,7 +25,7 @@ futures = "0" lazy_static = "1" rand = "0" thiserror = "2" -tokio = { version = "1", features = ["macros", "net", "rt-multi-thread", "signal", "sync"] } +tokio = { version = "1", features = ["macros", "net", "rt-multi-thread", "signal", "sync", "time"] } torrust-tracker-configuration = { version = "3.0.0-develop", path = "../configuration" } torrust-tracker-primitives = { version = "3.0.0-develop", path = "../primitives" } tracing = "0" From 820329b924d92ba27c318271e6afc6bc4df98dc8 Mon Sep 17 00:00:00 2001 From: Jose Celano Date: Tue, 4 Mar 2025 16:42:43 +0000 Subject: [PATCH 368/802] refactor: [#1264] return wether swarm stats have change or not after upserting a peer When you upsert a new peer (announce requests) the swarm stats migth change if the peer announced that has completed the download. If that case we return `true` given the caller the opportunity to know if stats have changed. In the current implementation we get stats before and after upserintg the peer, and we compare them to check if they have changed. However stats migth have change due to a parallel (race condition) announce from another peer. This is the only way to know exactly if this announce request has altered the stats (incremented the number of downloads). --- .../src/environment.rs | 2 +- .../src/environment.rs | 2 +- .../torrent-repository/src/entry/single.rs | 2 + .../src/repository/dash_map_mutex_std.rs | 8 ++- .../torrent-repository/src/repository/mod.rs | 4 +- .../src/repository/rw_lock_std.rs | 4 +- .../src/repository/rw_lock_std_mutex_std.rs | 4 +- .../src/repository/rw_lock_std_mutex_tokio.rs | 4 +- .../src/repository/rw_lock_tokio.rs | 4 +- .../src/repository/rw_lock_tokio_mutex_std.rs | 4 +- .../repository/rw_lock_tokio_mutex_tokio.rs | 4 +- .../src/repository/skip_map_mutex_std.rs | 12 ++-- .../torrent-repository/tests/common/repo.rs | 2 +- packages/tracker-core/src/announce_handler.rs | 2 +- packages/tracker-core/src/torrent/manager.rs | 4 +- .../src/torrent/repository/in_memory.rs | 57 ++++++++++--------- packages/tracker-core/src/torrent/services.rs | 18 +++--- .../udp-tracker-server/src/environment.rs | 2 +- .../src/handlers/announce.rs | 4 +- .../udp-tracker-server/src/handlers/scrape.rs | 2 +- 20 files changed, 77 insertions(+), 68 deletions(-) diff --git a/packages/axum-http-tracker-server/src/environment.rs b/packages/axum-http-tracker-server/src/environment.rs index 45cc276fd..97c91a8bf 100644 --- a/packages/axum-http-tracker-server/src/environment.rs +++ b/packages/axum-http-tracker-server/src/environment.rs @@ -22,7 +22,7 @@ pub struct Environment { impl Environment { /// Add a torrent to the tracker pub fn add_torrent_peer(&self, info_hash: &InfoHash, peer: &peer::Peer) { - let () = self + let _stats_updated = self .container .tracker_core_container .in_memory_torrent_repository diff --git a/packages/axum-rest-tracker-api-server/src/environment.rs b/packages/axum-rest-tracker-api-server/src/environment.rs index 2ee5cf744..9c15dd628 100644 --- a/packages/axum-rest-tracker-api-server/src/environment.rs +++ b/packages/axum-rest-tracker-api-server/src/environment.rs @@ -33,7 +33,7 @@ where { /// Add a torrent to the tracker pub fn add_torrent_peer(&self, info_hash: &InfoHash, peer: &peer::Peer) { - let () = self + let _stats_updated = self .container .tracker_core_container .in_memory_torrent_repository diff --git a/packages/torrent-repository/src/entry/single.rs b/packages/torrent-repository/src/entry/single.rs index 7f8cfc4e6..27bf299bf 100644 --- a/packages/torrent-repository/src/entry/single.rs +++ b/packages/torrent-repository/src/entry/single.rs @@ -66,6 +66,8 @@ impl Entry for EntrySingle { } } _ => { + // `Started` event (first announced event) or + // `None` event (announcements done at regular intervals). drop(self.swarm.upsert(Arc::new(*peer))); } } diff --git a/packages/torrent-repository/src/repository/dash_map_mutex_std.rs b/packages/torrent-repository/src/repository/dash_map_mutex_std.rs index 54a83aeb4..731280486 100644 --- a/packages/torrent-repository/src/repository/dash_map_mutex_std.rs +++ b/packages/torrent-repository/src/repository/dash_map_mutex_std.rs @@ -23,13 +23,15 @@ where EntryMutexStd: EntrySync, EntrySingle: Entry, { - fn upsert_peer(&self, info_hash: &InfoHash, peer: &peer::Peer) { + fn upsert_peer(&self, info_hash: &InfoHash, peer: &peer::Peer) -> bool { if let Some(entry) = self.torrents.get(info_hash) { - entry.upsert_peer(peer); + entry.upsert_peer(peer) } else { let _unused = self.torrents.insert(*info_hash, Arc::default()); if let Some(entry) = self.torrents.get(info_hash) { - entry.upsert_peer(peer); + entry.upsert_peer(peer) + } else { + false } } } diff --git a/packages/torrent-repository/src/repository/mod.rs b/packages/torrent-repository/src/repository/mod.rs index 14f03ed9d..bfb7f20f4 100644 --- a/packages/torrent-repository/src/repository/mod.rs +++ b/packages/torrent-repository/src/repository/mod.rs @@ -24,7 +24,7 @@ pub trait Repository: Debug + Default + Sized + 'static { fn remove(&self, key: &InfoHash) -> Option; fn remove_inactive_peers(&self, current_cutoff: DurationSinceUnixEpoch); fn remove_peerless_torrents(&self, policy: &TrackerPolicy); - fn upsert_peer(&self, info_hash: &InfoHash, peer: &peer::Peer); + fn upsert_peer(&self, info_hash: &InfoHash, peer: &peer::Peer) -> bool; fn get_swarm_metadata(&self, info_hash: &InfoHash) -> Option; } @@ -37,6 +37,6 @@ pub trait RepositoryAsync: Debug + Default + Sized + 'static { fn remove(&self, key: &InfoHash) -> impl std::future::Future> + Send; fn remove_inactive_peers(&self, current_cutoff: DurationSinceUnixEpoch) -> impl std::future::Future + Send; fn remove_peerless_torrents(&self, policy: &TrackerPolicy) -> impl std::future::Future + Send; - fn upsert_peer(&self, info_hash: &InfoHash, peer: &peer::Peer) -> impl std::future::Future + Send; + fn upsert_peer(&self, info_hash: &InfoHash, peer: &peer::Peer) -> impl std::future::Future + Send; fn get_swarm_metadata(&self, info_hash: &InfoHash) -> impl std::future::Future> + Send; } diff --git a/packages/torrent-repository/src/repository/rw_lock_std.rs b/packages/torrent-repository/src/repository/rw_lock_std.rs index 409a16498..2ff757654 100644 --- a/packages/torrent-repository/src/repository/rw_lock_std.rs +++ b/packages/torrent-repository/src/repository/rw_lock_std.rs @@ -46,12 +46,12 @@ impl Repository for TorrentsRwLockStd where EntrySingle: Entry, { - fn upsert_peer(&self, info_hash: &InfoHash, peer: &peer::Peer) { + fn upsert_peer(&self, info_hash: &InfoHash, peer: &peer::Peer) -> bool { let mut db = self.get_torrents_mut(); let entry = db.entry(*info_hash).or_insert(EntrySingle::default()); - entry.upsert_peer(peer); + entry.upsert_peer(peer) } fn get_swarm_metadata(&self, info_hash: &InfoHash) -> Option { diff --git a/packages/torrent-repository/src/repository/rw_lock_std_mutex_std.rs b/packages/torrent-repository/src/repository/rw_lock_std_mutex_std.rs index 8814f09ed..1f1155df5 100644 --- a/packages/torrent-repository/src/repository/rw_lock_std_mutex_std.rs +++ b/packages/torrent-repository/src/repository/rw_lock_std_mutex_std.rs @@ -33,7 +33,7 @@ where EntryMutexStd: EntrySync, EntrySingle: Entry, { - fn upsert_peer(&self, info_hash: &InfoHash, peer: &peer::Peer) { + fn upsert_peer(&self, info_hash: &InfoHash, peer: &peer::Peer) -> bool { let maybe_entry = self.get_torrents().get(info_hash).cloned(); let entry = if let Some(entry) = maybe_entry { @@ -44,7 +44,7 @@ where entry.clone() }; - entry.upsert_peer(peer); + entry.upsert_peer(peer) } fn get_swarm_metadata(&self, info_hash: &InfoHash) -> Option { diff --git a/packages/torrent-repository/src/repository/rw_lock_std_mutex_tokio.rs b/packages/torrent-repository/src/repository/rw_lock_std_mutex_tokio.rs index 46f4a9567..f8fd2871d 100644 --- a/packages/torrent-repository/src/repository/rw_lock_std_mutex_tokio.rs +++ b/packages/torrent-repository/src/repository/rw_lock_std_mutex_tokio.rs @@ -37,7 +37,7 @@ where EntryMutexTokio: EntryAsync, EntrySingle: Entry, { - async fn upsert_peer(&self, info_hash: &InfoHash, peer: &peer::Peer) { + async fn upsert_peer(&self, info_hash: &InfoHash, peer: &peer::Peer) -> bool { let maybe_entry = self.get_torrents().get(info_hash).cloned(); let entry = if let Some(entry) = maybe_entry { @@ -48,7 +48,7 @@ where entry.clone() }; - entry.upsert_peer(peer).await; + entry.upsert_peer(peer).await } async fn get_swarm_metadata(&self, info_hash: &InfoHash) -> Option { diff --git a/packages/torrent-repository/src/repository/rw_lock_tokio.rs b/packages/torrent-repository/src/repository/rw_lock_tokio.rs index ce6646e92..964149393 100644 --- a/packages/torrent-repository/src/repository/rw_lock_tokio.rs +++ b/packages/torrent-repository/src/repository/rw_lock_tokio.rs @@ -47,12 +47,12 @@ impl RepositoryAsync for TorrentsRwLockTokio where EntrySingle: Entry, { - async fn upsert_peer(&self, info_hash: &InfoHash, peer: &peer::Peer) { + async fn upsert_peer(&self, info_hash: &InfoHash, peer: &peer::Peer) -> bool { let mut db = self.get_torrents_mut().await; let entry = db.entry(*info_hash).or_insert(EntrySingle::default()); - entry.upsert_peer(peer); + entry.upsert_peer(peer) } async fn get_swarm_metadata(&self, info_hash: &InfoHash) -> Option { diff --git a/packages/torrent-repository/src/repository/rw_lock_tokio_mutex_std.rs b/packages/torrent-repository/src/repository/rw_lock_tokio_mutex_std.rs index 7efb093e9..c4541dea2 100644 --- a/packages/torrent-repository/src/repository/rw_lock_tokio_mutex_std.rs +++ b/packages/torrent-repository/src/repository/rw_lock_tokio_mutex_std.rs @@ -35,7 +35,7 @@ where EntryMutexStd: EntrySync, EntrySingle: Entry, { - async fn upsert_peer(&self, info_hash: &InfoHash, peer: &peer::Peer) { + async fn upsert_peer(&self, info_hash: &InfoHash, peer: &peer::Peer) -> bool { let maybe_entry = self.get_torrents().await.get(info_hash).cloned(); let entry = if let Some(entry) = maybe_entry { @@ -46,7 +46,7 @@ where entry.clone() }; - entry.upsert_peer(peer); + entry.upsert_peer(peer) } async fn get_swarm_metadata(&self, info_hash: &InfoHash) -> Option { diff --git a/packages/torrent-repository/src/repository/rw_lock_tokio_mutex_tokio.rs b/packages/torrent-repository/src/repository/rw_lock_tokio_mutex_tokio.rs index e08a6af59..ff1e77cda 100644 --- a/packages/torrent-repository/src/repository/rw_lock_tokio_mutex_tokio.rs +++ b/packages/torrent-repository/src/repository/rw_lock_tokio_mutex_tokio.rs @@ -35,7 +35,7 @@ where EntryMutexTokio: EntryAsync, EntrySingle: Entry, { - async fn upsert_peer(&self, info_hash: &InfoHash, peer: &peer::Peer) { + async fn upsert_peer(&self, info_hash: &InfoHash, peer: &peer::Peer) -> bool { let maybe_entry = self.get_torrents().await.get(info_hash).cloned(); let entry = if let Some(entry) = maybe_entry { @@ -46,7 +46,7 @@ where entry.clone() }; - entry.upsert_peer(peer).await; + entry.upsert_peer(peer).await } async fn get_swarm_metadata(&self, info_hash: &InfoHash) -> Option { diff --git a/packages/torrent-repository/src/repository/skip_map_mutex_std.rs b/packages/torrent-repository/src/repository/skip_map_mutex_std.rs index 47fe9620a..7a4e4afb9 100644 --- a/packages/torrent-repository/src/repository/skip_map_mutex_std.rs +++ b/packages/torrent-repository/src/repository/skip_map_mutex_std.rs @@ -23,9 +23,9 @@ where EntryMutexStd: EntrySync, EntrySingle: Entry, { - fn upsert_peer(&self, info_hash: &InfoHash, peer: &peer::Peer) { + fn upsert_peer(&self, info_hash: &InfoHash, peer: &peer::Peer) -> bool { let entry = self.torrents.get_or_insert(*info_hash, Arc::default()); - entry.value().upsert_peer(peer); + entry.value().upsert_peer(peer) } fn get_swarm_metadata(&self, info_hash: &InfoHash) -> Option { @@ -114,9 +114,9 @@ where EntryRwLockParkingLot: EntrySync, EntrySingle: Entry, { - fn upsert_peer(&self, info_hash: &InfoHash, peer: &peer::Peer) { + fn upsert_peer(&self, info_hash: &InfoHash, peer: &peer::Peer) -> bool { let entry = self.torrents.get_or_insert(*info_hash, Arc::default()); - entry.value().upsert_peer(peer); + entry.value().upsert_peer(peer) } fn get_swarm_metadata(&self, info_hash: &InfoHash) -> Option { @@ -205,9 +205,9 @@ where EntryMutexParkingLot: EntrySync, EntrySingle: Entry, { - fn upsert_peer(&self, info_hash: &InfoHash, peer: &peer::Peer) { + fn upsert_peer(&self, info_hash: &InfoHash, peer: &peer::Peer) -> bool { let entry = self.torrents.get_or_insert(*info_hash, Arc::default()); - entry.value().upsert_peer(peer); + entry.value().upsert_peer(peer) } fn get_swarm_metadata(&self, info_hash: &InfoHash) -> Option { diff --git a/packages/torrent-repository/tests/common/repo.rs b/packages/torrent-repository/tests/common/repo.rs index c8412952c..809c59d2a 100644 --- a/packages/torrent-repository/tests/common/repo.rs +++ b/packages/torrent-repository/tests/common/repo.rs @@ -26,7 +26,7 @@ pub(crate) enum Repo { } impl Repo { - pub(crate) async fn upsert_peer(&self, info_hash: &InfoHash, peer: &peer::Peer) { + pub(crate) async fn upsert_peer(&self, info_hash: &InfoHash, peer: &peer::Peer) -> bool { match self { Repo::RwLockStd(repo) => repo.upsert_peer(info_hash, peer), Repo::RwLockStdMutexStd(repo) => repo.upsert_peer(info_hash, peer), diff --git a/packages/tracker-core/src/announce_handler.rs b/packages/tracker-core/src/announce_handler.rs index cb48a321a..db5a0ca5d 100644 --- a/packages/tracker-core/src/announce_handler.rs +++ b/packages/tracker-core/src/announce_handler.rs @@ -187,7 +187,7 @@ impl AnnounceHandler { fn upsert_peer_and_get_stats(&self, info_hash: &InfoHash, peer: &peer::Peer) -> SwarmMetadata { let swarm_metadata_before = self.in_memory_torrent_repository.get_swarm_metadata(info_hash); - self.in_memory_torrent_repository.upsert_peer(info_hash, peer); + let _stats_updated = self.in_memory_torrent_repository.upsert_peer(info_hash, peer); let swarm_metadata_after = self.in_memory_torrent_repository.get_swarm_metadata(info_hash); diff --git a/packages/tracker-core/src/torrent/manager.rs b/packages/tracker-core/src/torrent/manager.rs index 51df97fb5..20074a5c9 100644 --- a/packages/tracker-core/src/torrent/manager.rs +++ b/packages/tracker-core/src/torrent/manager.rs @@ -195,7 +195,7 @@ mod tests { // Add a peer to the torrent let mut peer = sample_peer(); peer.updated = DurationSinceUnixEpoch::new(0, 0); - let () = services.in_memory_torrent_repository.upsert_peer(&infohash, &peer); + let _stats_updated = services.in_memory_torrent_repository.upsert_peer(&infohash, &peer); // Simulate the time has passed 1 second more than the max peer timeout. clock::Stopped::local_add(&Duration::from_secs( @@ -212,7 +212,7 @@ mod tests { // Add a peer to the torrent let mut peer = sample_peer(); peer.updated = DurationSinceUnixEpoch::new(0, 0); - let () = in_memory_torrent_repository.upsert_peer(infohash, &peer); + let _stats_updated = in_memory_torrent_repository.upsert_peer(infohash, &peer); // Remove the peer. The torrent is now peerless. in_memory_torrent_repository.remove_inactive_peers(peer.updated.add(Duration::from_secs(1))); diff --git a/packages/tracker-core/src/torrent/repository/in_memory.rs b/packages/tracker-core/src/torrent/repository/in_memory.rs index 584feabc9..55f9c17b1 100644 --- a/packages/tracker-core/src/torrent/repository/in_memory.rs +++ b/packages/tracker-core/src/torrent/repository/in_memory.rs @@ -40,8 +40,13 @@ impl InMemoryTorrentRepository { /// /// * `info_hash` - The unique identifier of the torrent. /// * `peer` - The peer to insert or update in the torrent entry. - pub fn upsert_peer(&self, info_hash: &InfoHash, peer: &peer::Peer) { - self.torrents.upsert_peer(info_hash, peer); + /// + /// # Returns + /// + /// `true` if the peer stats were updated. + #[must_use] + pub fn upsert_peer(&self, info_hash: &InfoHash, peer: &peer::Peer) -> bool { + self.torrents.upsert_peer(info_hash, peer) } /// Removes a torrent entry from the repository. @@ -263,7 +268,7 @@ mod tests { let info_hash = sample_info_hash(); - let () = in_memory_torrent_repository.upsert_peer(&info_hash, &sample_peer()); + let _stats_updated = in_memory_torrent_repository.upsert_peer(&info_hash, &sample_peer()); assert!(in_memory_torrent_repository.get(&info_hash).is_some()); } @@ -274,8 +279,8 @@ mod tests { let info_hash = sample_info_hash(); - let () = in_memory_torrent_repository.upsert_peer(&info_hash, &sample_peer()); - let () = in_memory_torrent_repository.upsert_peer(&info_hash, &sample_peer()); + let _stats_updated = in_memory_torrent_repository.upsert_peer(&info_hash, &sample_peer()); + let _stats_updated = in_memory_torrent_repository.upsert_peer(&info_hash, &sample_peer()); assert!(in_memory_torrent_repository.get(&info_hash).is_some()); } @@ -301,7 +306,7 @@ mod tests { let info_hash = sample_info_hash(); let peer = sample_peer(); - let () = in_memory_torrent_repository.upsert_peer(&info_hash, &peer); + let _stats_updated = in_memory_torrent_repository.upsert_peer(&info_hash, &peer); let peers = in_memory_torrent_repository.get_torrent_peers(&info_hash); @@ -334,7 +339,7 @@ mod tests { event: AnnounceEvent::Completed, }; - let () = in_memory_torrent_repository.upsert_peer(&info_hash, &peer); + let _stats_updated = in_memory_torrent_repository.upsert_peer(&info_hash, &peer); } let peers = in_memory_torrent_repository.get_torrent_peers(&info_hash); @@ -373,7 +378,7 @@ mod tests { let info_hash = sample_info_hash(); let peer = sample_peer(); - let () = in_memory_torrent_repository.upsert_peer(&info_hash, &peer); + let _stats_updated = in_memory_torrent_repository.upsert_peer(&info_hash, &peer); let peers = in_memory_torrent_repository.get_peers_for(&info_hash, &peer, TORRENT_PEERS_LIMIT); @@ -388,7 +393,7 @@ mod tests { let excluded_peer = sample_peer(); - let () = in_memory_torrent_repository.upsert_peer(&info_hash, &excluded_peer); + let _stats_updated = in_memory_torrent_repository.upsert_peer(&info_hash, &excluded_peer); // Add 74 peers for idx in 2..=75 { @@ -402,7 +407,7 @@ mod tests { event: AnnounceEvent::Completed, }; - let () = in_memory_torrent_repository.upsert_peer(&info_hash, &peer); + let _stats_updated = in_memory_torrent_repository.upsert_peer(&info_hash, &peer); } let peers = in_memory_torrent_repository.get_peers_for(&info_hash, &excluded_peer, TORRENT_PEERS_LIMIT); @@ -430,7 +435,7 @@ mod tests { let in_memory_torrent_repository = Arc::new(InMemoryTorrentRepository::default()); let info_hash = sample_info_hash(); - let () = in_memory_torrent_repository.upsert_peer(&info_hash, &sample_peer()); + let _stats_updated = in_memory_torrent_repository.upsert_peer(&info_hash, &sample_peer()); let _unused = in_memory_torrent_repository.remove(&info_hash); @@ -445,7 +450,7 @@ mod tests { let mut peer = sample_peer(); peer.updated = DurationSinceUnixEpoch::new(0, 0); - let () = in_memory_torrent_repository.upsert_peer(&info_hash, &peer); + let _stats_updated = in_memory_torrent_repository.upsert_peer(&info_hash, &peer); // Cut off time is 1 second after the peer was updated in_memory_torrent_repository.remove_inactive_peers(peer.updated.add(Duration::from_secs(1))); @@ -461,7 +466,7 @@ mod tests { // Insert a sample peer for the torrent to force adding the torrent entry let mut peer = sample_peer(); peer.updated = DurationSinceUnixEpoch::new(0, 0); - let () = in_memory_torrent_repository.upsert_peer(info_hash, &peer); + let _stats_updated = in_memory_torrent_repository.upsert_peer(info_hash, &peer); // Remove the peer in_memory_torrent_repository.remove_inactive_peers(peer.updated.add(Duration::from_secs(1))); @@ -525,7 +530,7 @@ mod tests { let info_hash = sample_info_hash(); let peer = sample_peer(); - let () = in_memory_torrent_repository.upsert_peer(&info_hash, &peer); + let _stats_updated = in_memory_torrent_repository.upsert_peer(&info_hash, &peer); let torrent_entry = in_memory_torrent_repository.get(&info_hash).unwrap(); @@ -558,7 +563,7 @@ mod tests { let info_hash = sample_info_hash(); let peer = sample_peer(); - let () = in_memory_torrent_repository.upsert_peer(&info_hash, &peer); + let _stats_updated = in_memory_torrent_repository.upsert_peer(&info_hash, &peer); let torrent_entries = in_memory_torrent_repository.get_paginated(None); @@ -600,12 +605,12 @@ mod tests { // Insert one torrent entry let info_hash_one = sample_info_hash_one(); let peer_one = sample_peer_one(); - let () = in_memory_torrent_repository.upsert_peer(&info_hash_one, &peer_one); + let _stats_updated = in_memory_torrent_repository.upsert_peer(&info_hash_one, &peer_one); // Insert another torrent entry let info_hash_one = sample_info_hash_alphabetically_ordered_after_sample_info_hash_one(); let peer_two = sample_peer_two(); - let () = in_memory_torrent_repository.upsert_peer(&info_hash_one, &peer_two); + let _stats_updated = in_memory_torrent_repository.upsert_peer(&info_hash_one, &peer_two); // Get only the first page where page size is 1 let torrent_entries = @@ -636,12 +641,12 @@ mod tests { // Insert one torrent entry let info_hash_one = sample_info_hash_one(); let peer_one = sample_peer_one(); - let () = in_memory_torrent_repository.upsert_peer(&info_hash_one, &peer_one); + let _stats_updated = in_memory_torrent_repository.upsert_peer(&info_hash_one, &peer_one); // Insert another torrent entry let info_hash_one = sample_info_hash_alphabetically_ordered_after_sample_info_hash_one(); let peer_two = sample_peer_two(); - let () = in_memory_torrent_repository.upsert_peer(&info_hash_one, &peer_two); + let _stats_updated = in_memory_torrent_repository.upsert_peer(&info_hash_one, &peer_two); // Get only the first page where page size is 1 let torrent_entries = @@ -672,12 +677,12 @@ mod tests { // Insert one torrent entry let info_hash_one = sample_info_hash_one(); let peer_one = sample_peer_one(); - let () = in_memory_torrent_repository.upsert_peer(&info_hash_one, &peer_one); + let _stats_updated = in_memory_torrent_repository.upsert_peer(&info_hash_one, &peer_one); // Insert another torrent entry let info_hash_one = sample_info_hash_alphabetically_ordered_after_sample_info_hash_one(); let peer_two = sample_peer_two(); - let () = in_memory_torrent_repository.upsert_peer(&info_hash_one, &peer_two); + let _stats_updated = in_memory_torrent_repository.upsert_peer(&info_hash_one, &peer_two); // Get only the first page where page size is 1 let torrent_entries = @@ -722,7 +727,7 @@ mod tests { async fn it_should_return_the_torrent_metrics_when_there_is_a_leecher() { let in_memory_torrent_repository = Arc::new(InMemoryTorrentRepository::default()); - let () = in_memory_torrent_repository.upsert_peer(&sample_info_hash(), &leecher()); + let _stats_updated = in_memory_torrent_repository.upsert_peer(&sample_info_hash(), &leecher()); let torrent_metrics = in_memory_torrent_repository.get_torrents_metrics(); @@ -741,7 +746,7 @@ mod tests { async fn it_should_return_the_torrent_metrics_when_there_is_a_seeder() { let in_memory_torrent_repository = Arc::new(InMemoryTorrentRepository::default()); - let () = in_memory_torrent_repository.upsert_peer(&sample_info_hash(), &seeder()); + let _stats_updated = in_memory_torrent_repository.upsert_peer(&sample_info_hash(), &seeder()); let torrent_metrics = in_memory_torrent_repository.get_torrents_metrics(); @@ -760,7 +765,7 @@ mod tests { async fn it_should_return_the_torrent_metrics_when_there_is_a_completed_peer() { let in_memory_torrent_repository = Arc::new(InMemoryTorrentRepository::default()); - let () = in_memory_torrent_repository.upsert_peer(&sample_info_hash(), &complete_peer()); + let _stats_updated = in_memory_torrent_repository.upsert_peer(&sample_info_hash(), &complete_peer()); let torrent_metrics = in_memory_torrent_repository.get_torrents_metrics(); @@ -781,7 +786,7 @@ mod tests { let start_time = std::time::Instant::now(); for i in 0..1_000_000 { - let () = in_memory_torrent_repository.upsert_peer(&gen_seeded_infohash(&i), &leecher()); + let _stats_updated = in_memory_torrent_repository.upsert_peer(&gen_seeded_infohash(&i), &leecher()); } let result_a = start_time.elapsed(); @@ -817,7 +822,7 @@ mod tests { let infohash = sample_info_hash(); - let () = in_memory_torrent_repository.upsert_peer(&infohash, &leecher()); + let _stats_updated = in_memory_torrent_repository.upsert_peer(&infohash, &leecher()); let swarm_metadata = in_memory_torrent_repository.get_swarm_metadata(&infohash); diff --git a/packages/tracker-core/src/torrent/services.rs b/packages/tracker-core/src/torrent/services.rs index 98d25ba47..2d072cf05 100644 --- a/packages/tracker-core/src/torrent/services.rs +++ b/packages/tracker-core/src/torrent/services.rs @@ -231,7 +231,7 @@ mod tests { let hash = "9e0217d0fa71c87332cd8bf9dbeabcb2c2cf3c4d".to_owned(); // DevSkim: ignore DS173237 let info_hash = InfoHash::from_str(&hash).unwrap(); - let () = in_memory_torrent_repository.upsert_peer(&info_hash, &sample_peer()); + let _stats_updated = in_memory_torrent_repository.upsert_peer(&info_hash, &sample_peer()); let torrent_info = get_torrent_info(&in_memory_torrent_repository, &info_hash).unwrap(); @@ -275,7 +275,7 @@ mod tests { let hash = "9e0217d0fa71c87332cd8bf9dbeabcb2c2cf3c4d".to_owned(); // DevSkim: ignore DS173237 let info_hash = InfoHash::from_str(&hash).unwrap(); - let () = in_memory_torrent_repository.upsert_peer(&info_hash, &sample_peer()); + let _stats_updated = in_memory_torrent_repository.upsert_peer(&info_hash, &sample_peer()); let torrents = get_torrents_page(&in_memory_torrent_repository, Some(&Pagination::default())); @@ -300,8 +300,8 @@ mod tests { let hash2 = "03840548643af2a7b63a9f5cbca348bc7150ca3a".to_owned(); // DevSkim: ignore DS173237 let info_hash2 = InfoHash::from_str(&hash2).unwrap(); - let () = in_memory_torrent_repository.upsert_peer(&info_hash1, &sample_peer()); - let () = in_memory_torrent_repository.upsert_peer(&info_hash2, &sample_peer()); + let _stats_updated = in_memory_torrent_repository.upsert_peer(&info_hash1, &sample_peer()); + let _stats_updated = in_memory_torrent_repository.upsert_peer(&info_hash2, &sample_peer()); let offset = 0; let limit = 1; @@ -321,8 +321,8 @@ mod tests { let hash2 = "03840548643af2a7b63a9f5cbca348bc7150ca3a".to_owned(); // DevSkim: ignore DS173237 let info_hash2 = InfoHash::from_str(&hash2).unwrap(); - let () = in_memory_torrent_repository.upsert_peer(&info_hash1, &sample_peer()); - let () = in_memory_torrent_repository.upsert_peer(&info_hash2, &sample_peer()); + let _stats_updated = in_memory_torrent_repository.upsert_peer(&info_hash1, &sample_peer()); + let _stats_updated = in_memory_torrent_repository.upsert_peer(&info_hash2, &sample_peer()); let offset = 1; let limit = 4000; @@ -347,11 +347,11 @@ mod tests { let hash1 = "9e0217d0fa71c87332cd8bf9dbeabcb2c2cf3c4d".to_owned(); // DevSkim: ignore DS173237 let info_hash1 = InfoHash::from_str(&hash1).unwrap(); - let () = in_memory_torrent_repository.upsert_peer(&info_hash1, &sample_peer()); + let _stats_updated = in_memory_torrent_repository.upsert_peer(&info_hash1, &sample_peer()); let hash2 = "03840548643af2a7b63a9f5cbca348bc7150ca3a".to_owned(); // DevSkim: ignore DS173237 let info_hash2 = InfoHash::from_str(&hash2).unwrap(); - let () = in_memory_torrent_repository.upsert_peer(&info_hash2, &sample_peer()); + let _stats_updated = in_memory_torrent_repository.upsert_peer(&info_hash2, &sample_peer()); let torrents = get_torrents_page(&in_memory_torrent_repository, Some(&Pagination::default())); @@ -399,7 +399,7 @@ mod tests { let info_hash = sample_info_hash(); - let () = in_memory_torrent_repository.upsert_peer(&info_hash, &sample_peer()); + let _ = in_memory_torrent_repository.upsert_peer(&info_hash, &sample_peer()); let torrent_info = get_torrents(&in_memory_torrent_repository, &[info_hash]); diff --git a/packages/udp-tracker-server/src/environment.rs b/packages/udp-tracker-server/src/environment.rs index c6ec98290..850e81d18 100644 --- a/packages/udp-tracker-server/src/environment.rs +++ b/packages/udp-tracker-server/src/environment.rs @@ -31,7 +31,7 @@ where /// Add a torrent to the tracker #[allow(dead_code)] pub fn add_torrent(&self, info_hash: &InfoHash, peer: &peer::Peer) { - let () = self + let _stats_updated = self .container .tracker_core_container .in_memory_torrent_repository diff --git a/packages/udp-tracker-server/src/handlers/announce.rs b/packages/udp-tracker-server/src/handlers/announce.rs index 9269dadfe..071261164 100644 --- a/packages/udp-tracker-server/src/handlers/announce.rs +++ b/packages/udp-tracker-server/src/handlers/announce.rs @@ -366,7 +366,7 @@ mod tests { .with_peer_address(SocketAddr::new(IpAddr::V6(client_ip_v6), client_port)) .into(); - let () = in_memory_torrent_repository.upsert_peer(&info_hash.0.into(), &peer_using_ipv6); + let _stats_updated = in_memory_torrent_repository.upsert_peer(&info_hash.0.into(), &peer_using_ipv6); } async fn announce_a_new_peer_using_ipv4( @@ -677,7 +677,7 @@ mod tests { .with_peer_address(SocketAddr::new(IpAddr::V4(client_ip_v4), client_port)) .into(); - let () = in_memory_torrent_repository.upsert_peer(&info_hash.0.into(), &peer_using_ipv4); + let _stats_updated = in_memory_torrent_repository.upsert_peer(&info_hash.0.into(), &peer_using_ipv4); } async fn announce_a_new_peer_using_ipv6( diff --git a/packages/udp-tracker-server/src/handlers/scrape.rs b/packages/udp-tracker-server/src/handlers/scrape.rs index 3e6da4778..0adf10637 100644 --- a/packages/udp-tracker-server/src/handlers/scrape.rs +++ b/packages/udp-tracker-server/src/handlers/scrape.rs @@ -166,7 +166,7 @@ mod tests { .with_number_of_bytes_left(0) .into(); - let () = in_memory_torrent_repository.upsert_peer(&info_hash.0.into(), &peer); + let _stats_updated = in_memory_torrent_repository.upsert_peer(&info_hash.0.into(), &peer); } fn build_scrape_request(remote_addr: &SocketAddr, info_hash: &InfoHash) -> ScrapeRequest { From c5db71a512df2f59e3b3ff7074acb86ad631144d Mon Sep 17 00:00:00 2001 From: Jose Celano Date: Tue, 4 Mar 2025 17:01:55 +0000 Subject: [PATCH 369/802] refactor: [#1264] remove unnecessary fn call Now the `upsert_peer` fn returns `true`if the stats have changed. IN the other hand, stats migth have changed ebcuase there was a different paralell announce request (race condtions) so it migth not be needed. This removes unnecessary database queries to persits the stats. --- packages/tracker-core/src/announce_handler.rs | 12 +++++------- 1 file changed, 5 insertions(+), 7 deletions(-) diff --git a/packages/tracker-core/src/announce_handler.rs b/packages/tracker-core/src/announce_handler.rs index db5a0ca5d..7903ae3e2 100644 --- a/packages/tracker-core/src/announce_handler.rs +++ b/packages/tracker-core/src/announce_handler.rs @@ -185,17 +185,15 @@ impl AnnounceHandler { /// returns the updated swarm stats. #[must_use] fn upsert_peer_and_get_stats(&self, info_hash: &InfoHash, peer: &peer::Peer) -> SwarmMetadata { - let swarm_metadata_before = self.in_memory_torrent_repository.get_swarm_metadata(info_hash); + let stats_updated = self.in_memory_torrent_repository.upsert_peer(info_hash, peer); - let _stats_updated = self.in_memory_torrent_repository.upsert_peer(info_hash, peer); + let swarm_metadata = self.in_memory_torrent_repository.get_swarm_metadata(info_hash); - let swarm_metadata_after = self.in_memory_torrent_repository.get_swarm_metadata(info_hash); - - if swarm_metadata_before != swarm_metadata_after { - self.persist_stats(info_hash, &swarm_metadata_after); + if stats_updated { + self.persist_stats(info_hash, &swarm_metadata); } - swarm_metadata_after + swarm_metadata } /// Persists torrent statistics to the database if persistence is enabled. From 2fb1c6f404499012119f01de0c31d6fd39aae78e Mon Sep 17 00:00:00 2001 From: Jose Celano Date: Tue, 4 Mar 2025 18:37:24 +0000 Subject: [PATCH 370/802] reafactor: [#1264] add a DB methog to get the number of completed downloads for one torrent This will be used to preset the counter from the database when a noew torrent entry is added to the torrent repo after begin removing for a while (peerless torrents are removed). The process is: - A new torrent is announced and added to the torrent repo. - The downloads counter is 0. - One of the peers finishes downloading and the counter increases to 1. - The torrent is removed from the repo becuase none of the peer announce for a while. - A new peer announce the torrent. We have to preset the counter to 1 even if that peer has not completed downloading yet. --- .../tracker-core/src/databases/driver/mod.rs | 13 +++++++++++++ .../tracker-core/src/databases/driver/mysql.rs | 16 +++++++++++++++- .../src/databases/driver/sqlite.rs | 18 +++++++++++++++++- packages/tracker-core/src/databases/mod.rs | 13 +++++++++++-- .../src/torrent/repository/persisted.rs | 15 ++++++++++++++- 5 files changed, 70 insertions(+), 5 deletions(-) diff --git a/packages/tracker-core/src/databases/driver/mod.rs b/packages/tracker-core/src/databases/driver/mod.rs index 06e912f7c..bd15f8e27 100644 --- a/packages/tracker-core/src/databases/driver/mod.rs +++ b/packages/tracker-core/src/databases/driver/mod.rs @@ -98,6 +98,7 @@ pub(crate) mod tests { // Persistent torrents (stats) handling_torrent_persistence::it_should_save_and_load_persistent_torrents(driver); + handling_torrent_persistence::it_should_load_all_persistent_torrents(driver); // Authentication keys (for private trackers) @@ -159,6 +160,18 @@ pub(crate) mod tests { driver.save_persistent_torrent(&infohash, number_of_downloads).unwrap(); + let number_of_downloads = driver.load_persistent_torrent(&infohash).unwrap().unwrap(); + + assert_eq!(number_of_downloads, 1); + } + + pub fn it_should_load_all_persistent_torrents(driver: &Arc>) { + let infohash = sample_info_hash(); + + let number_of_downloads = 1; + + driver.save_persistent_torrent(&infohash, number_of_downloads).unwrap(); + let torrents = driver.load_persistent_torrents().unwrap(); assert_eq!(torrents.len(), 1); diff --git a/packages/tracker-core/src/databases/driver/mysql.rs b/packages/tracker-core/src/databases/driver/mysql.rs index 6f7deb2b9..c2cf24bb1 100644 --- a/packages/tracker-core/src/databases/driver/mysql.rs +++ b/packages/tracker-core/src/databases/driver/mysql.rs @@ -13,7 +13,7 @@ use r2d2::Pool; use r2d2_mysql::mysql::prelude::Queryable; use r2d2_mysql::mysql::{params, Opts, OptsBuilder}; use r2d2_mysql::MySqlConnectionManager; -use torrust_tracker_primitives::PersistentTorrents; +use torrust_tracker_primitives::{PersistentTorrent, PersistentTorrents}; use super::{Database, Driver, Error}; use crate::authentication::key::AUTH_KEY_LENGTH; @@ -129,6 +129,20 @@ impl Database for Mysql { Ok(torrents.iter().copied().collect()) } + /// Refer to [`databases::Database::load_persistent_torrent`](crate::core::databases::Database::load_persistent_torrent). + fn load_persistent_torrent(&self, info_hash: &InfoHash) -> Result, Error> { + let mut conn = self.pool.get().map_err(|e| (e, DRIVER))?; + + let query = conn.exec_first::( + "SELECT completed FROM torrents WHERE info_hash = :info_hash", + params! { "info_hash" => info_hash.to_hex_string() }, + ); + + let persistent_torrent = query?; + + Ok(persistent_torrent) + } + /// Refer to [`databases::Database::load_keys`](crate::core::databases::Database::load_keys). fn load_keys(&self) -> Result, Error> { let mut conn = self.pool.get().map_err(|e| (e, DRIVER))?; diff --git a/packages/tracker-core/src/databases/driver/sqlite.rs b/packages/tracker-core/src/databases/driver/sqlite.rs index bab2fb6a7..d9a2fc8d8 100644 --- a/packages/tracker-core/src/databases/driver/sqlite.rs +++ b/packages/tracker-core/src/databases/driver/sqlite.rs @@ -13,7 +13,7 @@ use r2d2::Pool; use r2d2_sqlite::rusqlite::params; use r2d2_sqlite::rusqlite::types::Null; use r2d2_sqlite::SqliteConnectionManager; -use torrust_tracker_primitives::{DurationSinceUnixEpoch, PersistentTorrents}; +use torrust_tracker_primitives::{DurationSinceUnixEpoch, PersistentTorrent, PersistentTorrents}; use super::{Database, Driver, Error}; use crate::authentication::{self, Key}; @@ -125,6 +125,22 @@ impl Database for Sqlite { Ok(torrent_iter.filter_map(std::result::Result::ok).collect()) } + /// Refer to [`databases::Database::load_persistent_torrent`](crate::core::databases::Database::load_persistent_torrent). + fn load_persistent_torrent(&self, info_hash: &InfoHash) -> Result, Error> { + let conn = self.pool.get().map_err(|e| (e, DRIVER))?; + + let mut stmt = conn.prepare("SELECT completed FROM torrents WHERE info_hash = ?")?; + + let mut rows = stmt.query([info_hash.to_hex_string()])?; + + let persistent_torrent = rows.next()?; + + Ok(persistent_torrent.map(|f| { + let completed: i64 = f.get(0).unwrap(); + u32::try_from(completed).unwrap() + })) + } + /// Refer to [`databases::Database::load_keys`](crate::core::databases::Database::load_keys). fn load_keys(&self) -> Result, Error> { let conn = self.pool.get().map_err(|e| (e, DRIVER))?; diff --git a/packages/tracker-core/src/databases/mod.rs b/packages/tracker-core/src/databases/mod.rs index 33a7e3c69..1ffeb518f 100644 --- a/packages/tracker-core/src/databases/mod.rs +++ b/packages/tracker-core/src/databases/mod.rs @@ -52,7 +52,7 @@ pub mod setup; use bittorrent_primitives::info_hash::InfoHash; use mockall::automock; -use torrust_tracker_primitives::PersistentTorrents; +use torrust_tracker_primitives::{PersistentTorrent, PersistentTorrents}; use self::error::Error; use crate::authentication::{self, Key}; @@ -90,7 +90,7 @@ pub trait Database: Sync + Send { // Torrent Metrics - /// Loads torrent metrics data from the database. + /// Loads torrent metrics data from the database for all torrents. /// /// This function returns the persistent torrent metrics as a collection of /// tuples, where each tuple contains an [`InfoHash`] and the `downloaded` @@ -103,6 +103,15 @@ pub trait Database: Sync + Send { /// Returns an [`Error`] if the metrics cannot be loaded. fn load_persistent_torrents(&self) -> Result; + /// Loads torrent metrics data from the database for one torrent. + /// + /// # Context: Torrent Metrics + /// + /// # Errors + /// + /// Returns an [`Error`] if the metrics cannot be loaded. + fn load_persistent_torrent(&self, info_hash: &InfoHash) -> Result, Error>; + /// Saves torrent metrics data into the database. /// /// # Arguments diff --git a/packages/tracker-core/src/torrent/repository/persisted.rs b/packages/tracker-core/src/torrent/repository/persisted.rs index 694a2fe7c..89d931bb2 100644 --- a/packages/tracker-core/src/torrent/repository/persisted.rs +++ b/packages/tracker-core/src/torrent/repository/persisted.rs @@ -2,7 +2,7 @@ use std::sync::Arc; use bittorrent_primitives::info_hash::InfoHash; -use torrust_tracker_primitives::PersistentTorrents; +use torrust_tracker_primitives::{PersistentTorrent, PersistentTorrents}; use crate::databases::error::Error; use crate::databases::Database; @@ -59,6 +59,19 @@ impl DatabasePersistentTorrentRepository { self.database.load_persistent_torrents() } + /// Loads one persistent torrent metrics from the database. + /// + /// This function retrieves the torrent metrics (e.g., download counts) from the persistent store + /// and returns them as a [`PersistentTorrents`] map. + /// + /// # Errors + /// + /// Returns an [`Error`] if the underlying database query fails. + #[allow(dead_code)] + pub(crate) fn load(&self, info_hash: &InfoHash) -> Result, Error> { + self.database.load_persistent_torrent(info_hash) + } + /// Saves the persistent torrent metric into the database. /// /// This function stores or updates the download count for the torrent From 8a4dba3cbf7e221dd51fb4199e65b12d33143a1f Mon Sep 17 00:00:00 2001 From: Jose Celano Date: Tue, 4 Mar 2025 18:58:40 +0000 Subject: [PATCH 371/802] refactor: [#1264] rename variables --- .../src/environment.rs | 2 +- .../src/environment.rs | 2 +- .../torrent-repository/src/entry/single.rs | 6 +-- packages/tracker-core/src/announce_handler.rs | 4 +- packages/tracker-core/src/torrent/manager.rs | 4 +- .../src/torrent/repository/in_memory.rs | 50 ++++++++++--------- packages/tracker-core/src/torrent/services.rs | 16 +++--- .../udp-tracker-server/src/environment.rs | 2 +- .../src/handlers/announce.rs | 6 ++- .../udp-tracker-server/src/handlers/scrape.rs | 2 +- 10 files changed, 49 insertions(+), 45 deletions(-) diff --git a/packages/axum-http-tracker-server/src/environment.rs b/packages/axum-http-tracker-server/src/environment.rs index 97c91a8bf..b7cabad0e 100644 --- a/packages/axum-http-tracker-server/src/environment.rs +++ b/packages/axum-http-tracker-server/src/environment.rs @@ -22,7 +22,7 @@ pub struct Environment { impl Environment { /// Add a torrent to the tracker pub fn add_torrent_peer(&self, info_hash: &InfoHash, peer: &peer::Peer) { - let _stats_updated = self + let _number_of_downloads_increased = self .container .tracker_core_container .in_memory_torrent_repository diff --git a/packages/axum-rest-tracker-api-server/src/environment.rs b/packages/axum-rest-tracker-api-server/src/environment.rs index 9c15dd628..f130e24f0 100644 --- a/packages/axum-rest-tracker-api-server/src/environment.rs +++ b/packages/axum-rest-tracker-api-server/src/environment.rs @@ -33,7 +33,7 @@ where { /// Add a torrent to the tracker pub fn add_torrent_peer(&self, info_hash: &InfoHash, peer: &peer::Peer) { - let _stats_updated = self + let _number_of_downloads_increased = self .container .tracker_core_container .in_memory_torrent_repository diff --git a/packages/torrent-repository/src/entry/single.rs b/packages/torrent-repository/src/entry/single.rs index 27bf299bf..0f922bd02 100644 --- a/packages/torrent-repository/src/entry/single.rs +++ b/packages/torrent-repository/src/entry/single.rs @@ -51,7 +51,7 @@ impl Entry for EntrySingle { } fn upsert_peer(&mut self, peer: &peer::Peer) -> bool { - let mut downloaded_stats_updated: bool = false; + let mut number_of_downloads_increased: bool = false; match peer::ReadInfo::get_event(peer) { AnnounceEvent::Stopped => { @@ -62,7 +62,7 @@ impl Entry for EntrySingle { // Don't count if peer was not previously known and not already completed. if previous.is_some_and(|p| p.event != AnnounceEvent::Completed) { self.downloaded += 1; - downloaded_stats_updated = true; + number_of_downloads_increased = true; } } _ => { @@ -72,7 +72,7 @@ impl Entry for EntrySingle { } } - downloaded_stats_updated + number_of_downloads_increased } fn remove_inactive_peers(&mut self, current_cutoff: DurationSinceUnixEpoch) { diff --git a/packages/tracker-core/src/announce_handler.rs b/packages/tracker-core/src/announce_handler.rs index 7903ae3e2..28d3b252f 100644 --- a/packages/tracker-core/src/announce_handler.rs +++ b/packages/tracker-core/src/announce_handler.rs @@ -185,11 +185,11 @@ impl AnnounceHandler { /// returns the updated swarm stats. #[must_use] fn upsert_peer_and_get_stats(&self, info_hash: &InfoHash, peer: &peer::Peer) -> SwarmMetadata { - let stats_updated = self.in_memory_torrent_repository.upsert_peer(info_hash, peer); + let number_of_downloads_increased = self.in_memory_torrent_repository.upsert_peer(info_hash, peer); let swarm_metadata = self.in_memory_torrent_repository.get_swarm_metadata(info_hash); - if stats_updated { + if number_of_downloads_increased { self.persist_stats(info_hash, &swarm_metadata); } diff --git a/packages/tracker-core/src/torrent/manager.rs b/packages/tracker-core/src/torrent/manager.rs index 20074a5c9..e4691a86f 100644 --- a/packages/tracker-core/src/torrent/manager.rs +++ b/packages/tracker-core/src/torrent/manager.rs @@ -195,7 +195,7 @@ mod tests { // Add a peer to the torrent let mut peer = sample_peer(); peer.updated = DurationSinceUnixEpoch::new(0, 0); - let _stats_updated = services.in_memory_torrent_repository.upsert_peer(&infohash, &peer); + let _number_of_downloads_increased = services.in_memory_torrent_repository.upsert_peer(&infohash, &peer); // Simulate the time has passed 1 second more than the max peer timeout. clock::Stopped::local_add(&Duration::from_secs( @@ -212,7 +212,7 @@ mod tests { // Add a peer to the torrent let mut peer = sample_peer(); peer.updated = DurationSinceUnixEpoch::new(0, 0); - let _stats_updated = in_memory_torrent_repository.upsert_peer(infohash, &peer); + let _number_of_downloads_increased = in_memory_torrent_repository.upsert_peer(infohash, &peer); // Remove the peer. The torrent is now peerless. in_memory_torrent_repository.remove_inactive_peers(peer.updated.add(Duration::from_secs(1))); diff --git a/packages/tracker-core/src/torrent/repository/in_memory.rs b/packages/tracker-core/src/torrent/repository/in_memory.rs index 55f9c17b1..bec28bcc0 100644 --- a/packages/tracker-core/src/torrent/repository/in_memory.rs +++ b/packages/tracker-core/src/torrent/repository/in_memory.rs @@ -268,7 +268,7 @@ mod tests { let info_hash = sample_info_hash(); - let _stats_updated = in_memory_torrent_repository.upsert_peer(&info_hash, &sample_peer()); + let _number_of_downloads_increased = in_memory_torrent_repository.upsert_peer(&info_hash, &sample_peer()); assert!(in_memory_torrent_repository.get(&info_hash).is_some()); } @@ -279,8 +279,8 @@ mod tests { let info_hash = sample_info_hash(); - let _stats_updated = in_memory_torrent_repository.upsert_peer(&info_hash, &sample_peer()); - let _stats_updated = in_memory_torrent_repository.upsert_peer(&info_hash, &sample_peer()); + let _number_of_downloads_increased = in_memory_torrent_repository.upsert_peer(&info_hash, &sample_peer()); + let _number_of_downloads_increased = in_memory_torrent_repository.upsert_peer(&info_hash, &sample_peer()); assert!(in_memory_torrent_repository.get(&info_hash).is_some()); } @@ -306,7 +306,7 @@ mod tests { let info_hash = sample_info_hash(); let peer = sample_peer(); - let _stats_updated = in_memory_torrent_repository.upsert_peer(&info_hash, &peer); + let _number_of_downloads_increased = in_memory_torrent_repository.upsert_peer(&info_hash, &peer); let peers = in_memory_torrent_repository.get_torrent_peers(&info_hash); @@ -339,7 +339,7 @@ mod tests { event: AnnounceEvent::Completed, }; - let _stats_updated = in_memory_torrent_repository.upsert_peer(&info_hash, &peer); + let _number_of_downloads_increased = in_memory_torrent_repository.upsert_peer(&info_hash, &peer); } let peers = in_memory_torrent_repository.get_torrent_peers(&info_hash); @@ -378,7 +378,7 @@ mod tests { let info_hash = sample_info_hash(); let peer = sample_peer(); - let _stats_updated = in_memory_torrent_repository.upsert_peer(&info_hash, &peer); + let _number_of_downloads_increased = in_memory_torrent_repository.upsert_peer(&info_hash, &peer); let peers = in_memory_torrent_repository.get_peers_for(&info_hash, &peer, TORRENT_PEERS_LIMIT); @@ -393,7 +393,7 @@ mod tests { let excluded_peer = sample_peer(); - let _stats_updated = in_memory_torrent_repository.upsert_peer(&info_hash, &excluded_peer); + let _number_of_downloads_increased = in_memory_torrent_repository.upsert_peer(&info_hash, &excluded_peer); // Add 74 peers for idx in 2..=75 { @@ -407,7 +407,7 @@ mod tests { event: AnnounceEvent::Completed, }; - let _stats_updated = in_memory_torrent_repository.upsert_peer(&info_hash, &peer); + let _number_of_downloads_increased = in_memory_torrent_repository.upsert_peer(&info_hash, &peer); } let peers = in_memory_torrent_repository.get_peers_for(&info_hash, &excluded_peer, TORRENT_PEERS_LIMIT); @@ -435,7 +435,7 @@ mod tests { let in_memory_torrent_repository = Arc::new(InMemoryTorrentRepository::default()); let info_hash = sample_info_hash(); - let _stats_updated = in_memory_torrent_repository.upsert_peer(&info_hash, &sample_peer()); + let _number_of_downloads_increased = in_memory_torrent_repository.upsert_peer(&info_hash, &sample_peer()); let _unused = in_memory_torrent_repository.remove(&info_hash); @@ -450,7 +450,7 @@ mod tests { let mut peer = sample_peer(); peer.updated = DurationSinceUnixEpoch::new(0, 0); - let _stats_updated = in_memory_torrent_repository.upsert_peer(&info_hash, &peer); + let _number_of_downloads_increased = in_memory_torrent_repository.upsert_peer(&info_hash, &peer); // Cut off time is 1 second after the peer was updated in_memory_torrent_repository.remove_inactive_peers(peer.updated.add(Duration::from_secs(1))); @@ -466,7 +466,7 @@ mod tests { // Insert a sample peer for the torrent to force adding the torrent entry let mut peer = sample_peer(); peer.updated = DurationSinceUnixEpoch::new(0, 0); - let _stats_updated = in_memory_torrent_repository.upsert_peer(info_hash, &peer); + let _number_of_downloads_increased = in_memory_torrent_repository.upsert_peer(info_hash, &peer); // Remove the peer in_memory_torrent_repository.remove_inactive_peers(peer.updated.add(Duration::from_secs(1))); @@ -530,7 +530,7 @@ mod tests { let info_hash = sample_info_hash(); let peer = sample_peer(); - let _stats_updated = in_memory_torrent_repository.upsert_peer(&info_hash, &peer); + let _number_of_downloads_increased = in_memory_torrent_repository.upsert_peer(&info_hash, &peer); let torrent_entry = in_memory_torrent_repository.get(&info_hash).unwrap(); @@ -563,7 +563,7 @@ mod tests { let info_hash = sample_info_hash(); let peer = sample_peer(); - let _stats_updated = in_memory_torrent_repository.upsert_peer(&info_hash, &peer); + let _number_of_downloads_increased = in_memory_torrent_repository.upsert_peer(&info_hash, &peer); let torrent_entries = in_memory_torrent_repository.get_paginated(None); @@ -605,12 +605,12 @@ mod tests { // Insert one torrent entry let info_hash_one = sample_info_hash_one(); let peer_one = sample_peer_one(); - let _stats_updated = in_memory_torrent_repository.upsert_peer(&info_hash_one, &peer_one); + let _number_of_downloads_increased = in_memory_torrent_repository.upsert_peer(&info_hash_one, &peer_one); // Insert another torrent entry let info_hash_one = sample_info_hash_alphabetically_ordered_after_sample_info_hash_one(); let peer_two = sample_peer_two(); - let _stats_updated = in_memory_torrent_repository.upsert_peer(&info_hash_one, &peer_two); + let _number_of_downloads_increased = in_memory_torrent_repository.upsert_peer(&info_hash_one, &peer_two); // Get only the first page where page size is 1 let torrent_entries = @@ -641,12 +641,12 @@ mod tests { // Insert one torrent entry let info_hash_one = sample_info_hash_one(); let peer_one = sample_peer_one(); - let _stats_updated = in_memory_torrent_repository.upsert_peer(&info_hash_one, &peer_one); + let _number_of_downloads_increased = in_memory_torrent_repository.upsert_peer(&info_hash_one, &peer_one); // Insert another torrent entry let info_hash_one = sample_info_hash_alphabetically_ordered_after_sample_info_hash_one(); let peer_two = sample_peer_two(); - let _stats_updated = in_memory_torrent_repository.upsert_peer(&info_hash_one, &peer_two); + let _number_of_downloads_increased = in_memory_torrent_repository.upsert_peer(&info_hash_one, &peer_two); // Get only the first page where page size is 1 let torrent_entries = @@ -677,12 +677,12 @@ mod tests { // Insert one torrent entry let info_hash_one = sample_info_hash_one(); let peer_one = sample_peer_one(); - let _stats_updated = in_memory_torrent_repository.upsert_peer(&info_hash_one, &peer_one); + let _number_of_downloads_increased = in_memory_torrent_repository.upsert_peer(&info_hash_one, &peer_one); // Insert another torrent entry let info_hash_one = sample_info_hash_alphabetically_ordered_after_sample_info_hash_one(); let peer_two = sample_peer_two(); - let _stats_updated = in_memory_torrent_repository.upsert_peer(&info_hash_one, &peer_two); + let _number_of_downloads_increased = in_memory_torrent_repository.upsert_peer(&info_hash_one, &peer_two); // Get only the first page where page size is 1 let torrent_entries = @@ -727,7 +727,7 @@ mod tests { async fn it_should_return_the_torrent_metrics_when_there_is_a_leecher() { let in_memory_torrent_repository = Arc::new(InMemoryTorrentRepository::default()); - let _stats_updated = in_memory_torrent_repository.upsert_peer(&sample_info_hash(), &leecher()); + let _number_of_downloads_increased = in_memory_torrent_repository.upsert_peer(&sample_info_hash(), &leecher()); let torrent_metrics = in_memory_torrent_repository.get_torrents_metrics(); @@ -746,7 +746,7 @@ mod tests { async fn it_should_return_the_torrent_metrics_when_there_is_a_seeder() { let in_memory_torrent_repository = Arc::new(InMemoryTorrentRepository::default()); - let _stats_updated = in_memory_torrent_repository.upsert_peer(&sample_info_hash(), &seeder()); + let _number_of_downloads_increased = in_memory_torrent_repository.upsert_peer(&sample_info_hash(), &seeder()); let torrent_metrics = in_memory_torrent_repository.get_torrents_metrics(); @@ -765,7 +765,8 @@ mod tests { async fn it_should_return_the_torrent_metrics_when_there_is_a_completed_peer() { let in_memory_torrent_repository = Arc::new(InMemoryTorrentRepository::default()); - let _stats_updated = in_memory_torrent_repository.upsert_peer(&sample_info_hash(), &complete_peer()); + let _number_of_downloads_increased = + in_memory_torrent_repository.upsert_peer(&sample_info_hash(), &complete_peer()); let torrent_metrics = in_memory_torrent_repository.get_torrents_metrics(); @@ -786,7 +787,8 @@ mod tests { let start_time = std::time::Instant::now(); for i in 0..1_000_000 { - let _stats_updated = in_memory_torrent_repository.upsert_peer(&gen_seeded_infohash(&i), &leecher()); + let _number_of_downloads_increased = + in_memory_torrent_repository.upsert_peer(&gen_seeded_infohash(&i), &leecher()); } let result_a = start_time.elapsed(); @@ -822,7 +824,7 @@ mod tests { let infohash = sample_info_hash(); - let _stats_updated = in_memory_torrent_repository.upsert_peer(&infohash, &leecher()); + let _number_of_downloads_increased = in_memory_torrent_repository.upsert_peer(&infohash, &leecher()); let swarm_metadata = in_memory_torrent_repository.get_swarm_metadata(&infohash); diff --git a/packages/tracker-core/src/torrent/services.rs b/packages/tracker-core/src/torrent/services.rs index 2d072cf05..1d06b2945 100644 --- a/packages/tracker-core/src/torrent/services.rs +++ b/packages/tracker-core/src/torrent/services.rs @@ -231,7 +231,7 @@ mod tests { let hash = "9e0217d0fa71c87332cd8bf9dbeabcb2c2cf3c4d".to_owned(); // DevSkim: ignore DS173237 let info_hash = InfoHash::from_str(&hash).unwrap(); - let _stats_updated = in_memory_torrent_repository.upsert_peer(&info_hash, &sample_peer()); + let _number_of_downloads_increased = in_memory_torrent_repository.upsert_peer(&info_hash, &sample_peer()); let torrent_info = get_torrent_info(&in_memory_torrent_repository, &info_hash).unwrap(); @@ -275,7 +275,7 @@ mod tests { let hash = "9e0217d0fa71c87332cd8bf9dbeabcb2c2cf3c4d".to_owned(); // DevSkim: ignore DS173237 let info_hash = InfoHash::from_str(&hash).unwrap(); - let _stats_updated = in_memory_torrent_repository.upsert_peer(&info_hash, &sample_peer()); + let _number_of_downloads_increased = in_memory_torrent_repository.upsert_peer(&info_hash, &sample_peer()); let torrents = get_torrents_page(&in_memory_torrent_repository, Some(&Pagination::default())); @@ -300,8 +300,8 @@ mod tests { let hash2 = "03840548643af2a7b63a9f5cbca348bc7150ca3a".to_owned(); // DevSkim: ignore DS173237 let info_hash2 = InfoHash::from_str(&hash2).unwrap(); - let _stats_updated = in_memory_torrent_repository.upsert_peer(&info_hash1, &sample_peer()); - let _stats_updated = in_memory_torrent_repository.upsert_peer(&info_hash2, &sample_peer()); + let _number_of_downloads_increased = in_memory_torrent_repository.upsert_peer(&info_hash1, &sample_peer()); + let _number_of_downloads_increased = in_memory_torrent_repository.upsert_peer(&info_hash2, &sample_peer()); let offset = 0; let limit = 1; @@ -321,8 +321,8 @@ mod tests { let hash2 = "03840548643af2a7b63a9f5cbca348bc7150ca3a".to_owned(); // DevSkim: ignore DS173237 let info_hash2 = InfoHash::from_str(&hash2).unwrap(); - let _stats_updated = in_memory_torrent_repository.upsert_peer(&info_hash1, &sample_peer()); - let _stats_updated = in_memory_torrent_repository.upsert_peer(&info_hash2, &sample_peer()); + let _number_of_downloads_increased = in_memory_torrent_repository.upsert_peer(&info_hash1, &sample_peer()); + let _number_of_downloads_increased = in_memory_torrent_repository.upsert_peer(&info_hash2, &sample_peer()); let offset = 1; let limit = 4000; @@ -347,11 +347,11 @@ mod tests { let hash1 = "9e0217d0fa71c87332cd8bf9dbeabcb2c2cf3c4d".to_owned(); // DevSkim: ignore DS173237 let info_hash1 = InfoHash::from_str(&hash1).unwrap(); - let _stats_updated = in_memory_torrent_repository.upsert_peer(&info_hash1, &sample_peer()); + let _number_of_downloads_increased = in_memory_torrent_repository.upsert_peer(&info_hash1, &sample_peer()); let hash2 = "03840548643af2a7b63a9f5cbca348bc7150ca3a".to_owned(); // DevSkim: ignore DS173237 let info_hash2 = InfoHash::from_str(&hash2).unwrap(); - let _stats_updated = in_memory_torrent_repository.upsert_peer(&info_hash2, &sample_peer()); + let _number_of_downloads_increased = in_memory_torrent_repository.upsert_peer(&info_hash2, &sample_peer()); let torrents = get_torrents_page(&in_memory_torrent_repository, Some(&Pagination::default())); diff --git a/packages/udp-tracker-server/src/environment.rs b/packages/udp-tracker-server/src/environment.rs index 850e81d18..a04773134 100644 --- a/packages/udp-tracker-server/src/environment.rs +++ b/packages/udp-tracker-server/src/environment.rs @@ -31,7 +31,7 @@ where /// Add a torrent to the tracker #[allow(dead_code)] pub fn add_torrent(&self, info_hash: &InfoHash, peer: &peer::Peer) { - let _stats_updated = self + let _number_of_downloads_increased = self .container .tracker_core_container .in_memory_torrent_repository diff --git a/packages/udp-tracker-server/src/handlers/announce.rs b/packages/udp-tracker-server/src/handlers/announce.rs index 071261164..c30101678 100644 --- a/packages/udp-tracker-server/src/handlers/announce.rs +++ b/packages/udp-tracker-server/src/handlers/announce.rs @@ -366,7 +366,8 @@ mod tests { .with_peer_address(SocketAddr::new(IpAddr::V6(client_ip_v6), client_port)) .into(); - let _stats_updated = in_memory_torrent_repository.upsert_peer(&info_hash.0.into(), &peer_using_ipv6); + let _number_of_downloads_increased = + in_memory_torrent_repository.upsert_peer(&info_hash.0.into(), &peer_using_ipv6); } async fn announce_a_new_peer_using_ipv4( @@ -677,7 +678,8 @@ mod tests { .with_peer_address(SocketAddr::new(IpAddr::V4(client_ip_v4), client_port)) .into(); - let _stats_updated = in_memory_torrent_repository.upsert_peer(&info_hash.0.into(), &peer_using_ipv4); + let _number_of_downloads_increased = + in_memory_torrent_repository.upsert_peer(&info_hash.0.into(), &peer_using_ipv4); } async fn announce_a_new_peer_using_ipv6( diff --git a/packages/udp-tracker-server/src/handlers/scrape.rs b/packages/udp-tracker-server/src/handlers/scrape.rs index 0adf10637..fb17ecc97 100644 --- a/packages/udp-tracker-server/src/handlers/scrape.rs +++ b/packages/udp-tracker-server/src/handlers/scrape.rs @@ -166,7 +166,7 @@ mod tests { .with_number_of_bytes_left(0) .into(); - let _stats_updated = in_memory_torrent_repository.upsert_peer(&info_hash.0.into(), &peer); + let _number_of_downloads_increased = in_memory_torrent_repository.upsert_peer(&info_hash.0.into(), &peer); } fn build_scrape_request(remote_addr: &SocketAddr, info_hash: &InfoHash) -> ScrapeRequest { From 6beec3a01cf3623cb89cdd9fadafd8ad6da88041 Mon Sep 17 00:00:00 2001 From: Jose Celano Date: Wed, 5 Mar 2025 07:52:03 +0000 Subject: [PATCH 372/802] refactor: [#1264] add a database method to increase the number of downloads for a torrent --- .../tracker-core/src/databases/driver/mod.rs | 15 +++++ .../src/databases/driver/mysql.rs | 38 ++++++++----- .../src/databases/driver/sqlite.rs | 57 ++++++++++++------- packages/tracker-core/src/databases/error.rs | 9 +++ packages/tracker-core/src/databases/mod.rs | 13 +++++ 5 files changed, 100 insertions(+), 32 deletions(-) diff --git a/packages/tracker-core/src/databases/driver/mod.rs b/packages/tracker-core/src/databases/driver/mod.rs index bd15f8e27..2cedab2d7 100644 --- a/packages/tracker-core/src/databases/driver/mod.rs +++ b/packages/tracker-core/src/databases/driver/mod.rs @@ -99,6 +99,7 @@ pub(crate) mod tests { handling_torrent_persistence::it_should_save_and_load_persistent_torrents(driver); handling_torrent_persistence::it_should_load_all_persistent_torrents(driver); + handling_torrent_persistence::it_should_increase_the_number_of_downloads_for_a_given_torrent(driver); // Authentication keys (for private trackers) @@ -177,6 +178,20 @@ pub(crate) mod tests { assert_eq!(torrents.len(), 1); assert_eq!(torrents.get(&infohash), Some(number_of_downloads).as_ref()); } + + pub fn it_should_increase_the_number_of_downloads_for_a_given_torrent(driver: &Arc>) { + let infohash = sample_info_hash(); + + let number_of_downloads = 1; + + driver.save_persistent_torrent(&infohash, number_of_downloads).unwrap(); + + driver.increase_number_of_downloads(&infohash).unwrap(); + + let number_of_downloads = driver.load_persistent_torrent(&infohash).unwrap().unwrap(); + + assert_eq!(number_of_downloads, 2); + } } mod handling_authentication_keys { diff --git a/packages/tracker-core/src/databases/driver/mysql.rs b/packages/tracker-core/src/databases/driver/mysql.rs index c2cf24bb1..d07f061c2 100644 --- a/packages/tracker-core/src/databases/driver/mysql.rs +++ b/packages/tracker-core/src/databases/driver/mysql.rs @@ -143,6 +143,31 @@ impl Database for Mysql { Ok(persistent_torrent) } + /// Refer to [`databases::Database::save_persistent_torrent`](crate::core::databases::Database::save_persistent_torrent). + fn save_persistent_torrent(&self, info_hash: &InfoHash, completed: u32) -> Result<(), Error> { + const COMMAND : &str = "INSERT INTO torrents (info_hash, completed) VALUES (:info_hash_str, :completed) ON DUPLICATE KEY UPDATE completed = VALUES(completed)"; + + let mut conn = self.pool.get().map_err(|e| (e, DRIVER))?; + + let info_hash_str = info_hash.to_string(); + + Ok(conn.exec_drop(COMMAND, params! { info_hash_str, completed })?) + } + + /// Refer to [`databases::Database::increase_number_of_downloads`](crate::core::databases::Database::increase_number_of_downloads). + fn increase_number_of_downloads(&self, info_hash: &InfoHash) -> Result<(), Error> { + let mut conn = self.pool.get().map_err(|e| (e, DRIVER))?; + + let info_hash_str = info_hash.to_string(); + + conn.exec_drop( + "UPDATE torrents SET completed = completed + 1 WHERE info_hash = :info_hash_str", + params! { info_hash_str }, + )?; + + Ok(()) + } + /// Refer to [`databases::Database::load_keys`](crate::core::databases::Database::load_keys). fn load_keys(&self) -> Result, Error> { let mut conn = self.pool.get().map_err(|e| (e, DRIVER))?; @@ -175,19 +200,6 @@ impl Database for Mysql { Ok(info_hashes) } - /// Refer to [`databases::Database::save_persistent_torrent`](crate::core::databases::Database::save_persistent_torrent). - fn save_persistent_torrent(&self, info_hash: &InfoHash, completed: u32) -> Result<(), Error> { - const COMMAND : &str = "INSERT INTO torrents (info_hash, completed) VALUES (:info_hash_str, :completed) ON DUPLICATE KEY UPDATE completed = VALUES(completed)"; - - let mut conn = self.pool.get().map_err(|e| (e, DRIVER))?; - - let info_hash_str = info_hash.to_string(); - - tracing::debug!("{}", info_hash_str); - - Ok(conn.exec_drop(COMMAND, params! { info_hash_str, completed })?) - } - /// Refer to [`databases::Database::get_info_hash_from_whitelist`](crate::core::databases::Database::get_info_hash_from_whitelist). fn get_info_hash_from_whitelist(&self, info_hash: InfoHash) -> Result, Error> { let mut conn = self.pool.get().map_err(|e| (e, DRIVER))?; diff --git a/packages/tracker-core/src/databases/driver/sqlite.rs b/packages/tracker-core/src/databases/driver/sqlite.rs index d9a2fc8d8..ffcef3d96 100644 --- a/packages/tracker-core/src/databases/driver/sqlite.rs +++ b/packages/tracker-core/src/databases/driver/sqlite.rs @@ -141,6 +141,44 @@ impl Database for Sqlite { })) } + /// Refer to [`databases::Database::save_persistent_torrent`](crate::core::databases::Database::save_persistent_torrent). + fn save_persistent_torrent(&self, info_hash: &InfoHash, completed: u32) -> Result<(), Error> { + let conn = self.pool.get().map_err(|e| (e, DRIVER))?; + + let insert = conn.execute( + "INSERT INTO torrents (info_hash, completed) VALUES (?1, ?2) ON CONFLICT(info_hash) DO UPDATE SET completed = ?2", + [info_hash.to_string(), completed.to_string()], + )?; + + if insert == 0 { + Err(Error::InsertFailed { + location: Location::caller(), + driver: DRIVER, + }) + } else { + Ok(()) + } + } + + /// Refer to [`databases::Database::increase_number_of_downloads`](crate::core::databases::Database::increase_number_of_downloads). + fn increase_number_of_downloads(&self, info_hash: &InfoHash) -> Result<(), Error> { + let conn = self.pool.get().map_err(|e| (e, DRIVER))?; + + let update = conn.execute( + "UPDATE torrents SET completed = completed + 1 WHERE info_hash = ?", + [info_hash.to_string()], + )?; + + if update == 0 { + Err(Error::UpdateFailed { + location: Location::caller(), + driver: DRIVER, + }) + } else { + Ok(()) + } + } + /// Refer to [`databases::Database::load_keys`](crate::core::databases::Database::load_keys). fn load_keys(&self) -> Result, Error> { let conn = self.pool.get().map_err(|e| (e, DRIVER))?; @@ -185,25 +223,6 @@ impl Database for Sqlite { Ok(info_hashes) } - /// Refer to [`databases::Database::save_persistent_torrent`](crate::core::databases::Database::save_persistent_torrent). - fn save_persistent_torrent(&self, info_hash: &InfoHash, completed: u32) -> Result<(), Error> { - let conn = self.pool.get().map_err(|e| (e, DRIVER))?; - - let insert = conn.execute( - "INSERT INTO torrents (info_hash, completed) VALUES (?1, ?2) ON CONFLICT(info_hash) DO UPDATE SET completed = ?2", - [info_hash.to_string(), completed.to_string()], - )?; - - if insert == 0 { - Err(Error::InsertFailed { - location: Location::caller(), - driver: DRIVER, - }) - } else { - Ok(()) - } - } - /// Refer to [`databases::Database::get_info_hash_from_whitelist`](crate::core::databases::Database::get_info_hash_from_whitelist). fn get_info_hash_from_whitelist(&self, info_hash: InfoHash) -> Result, Error> { let conn = self.pool.get().map_err(|e| (e, DRIVER))?; diff --git a/packages/tracker-core/src/databases/error.rs b/packages/tracker-core/src/databases/error.rs index fd9adfc22..2df2cb277 100644 --- a/packages/tracker-core/src/databases/error.rs +++ b/packages/tracker-core/src/databases/error.rs @@ -49,6 +49,15 @@ pub enum Error { driver: Driver, }, + /// Indicates a failure to update a record into the database. + /// + /// This error is raised when an insertion operation fails. + #[error("Unable to update record into {driver} database, {location}")] + UpdateFailed { + location: &'static Location<'static>, + driver: Driver, + }, + /// Indicates a failure to delete a record from the database. /// /// This error includes an error code that may be returned by the database diff --git a/packages/tracker-core/src/databases/mod.rs b/packages/tracker-core/src/databases/mod.rs index 1ffeb518f..fae2ce527 100644 --- a/packages/tracker-core/src/databases/mod.rs +++ b/packages/tracker-core/src/databases/mod.rs @@ -126,6 +126,19 @@ pub trait Database: Sync + Send { /// Returns an [`Error`] if the metrics cannot be saved. fn save_persistent_torrent(&self, info_hash: &InfoHash, downloaded: u32) -> Result<(), Error>; + /// Increases the number of downloads for a given torrent. + /// + /// # Arguments + /// + /// * `info_hash` - A reference to the torrent's info hash. + /// + /// # Context: Torrent Metrics + /// + /// # Errors + /// + /// Returns an [`Error`] if the query failed. + fn increase_number_of_downloads(&self, info_hash: &InfoHash) -> Result<(), Error>; + // Whitelist /// Loads the whitelisted torrents from the database. From 8f67f129fbe4a4c917b986d0b0eabc947f334906 Mon Sep 17 00:00:00 2001 From: Jose Celano Date: Wed, 5 Mar 2025 08:41:12 +0000 Subject: [PATCH 373/802] fix: [#1264] partially. The correct number of downloads is persited However, we still have to load the value counter from the database the first time the otrrent is added to the repository. --- packages/tracker-core/src/announce_handler.rs | 45 ++++++------------- .../src/databases/driver/sqlite.rs | 11 +---- packages/tracker-core/src/databases/mod.rs | 3 ++ packages/tracker-core/src/error.rs | 4 ++ .../src/torrent/repository/persisted.rs | 34 +++++++++++++- 5 files changed, 56 insertions(+), 41 deletions(-) diff --git a/packages/tracker-core/src/announce_handler.rs b/packages/tracker-core/src/announce_handler.rs index 28d3b252f..e125adb66 100644 --- a/packages/tracker-core/src/announce_handler.rs +++ b/packages/tracker-core/src/announce_handler.rs @@ -97,7 +97,6 @@ use bittorrent_primitives::info_hash::InfoHash; use torrust_tracker_configuration::{Core, TORRENT_PEERS_LIMIT}; use torrust_tracker_primitives::core::AnnounceData; use torrust_tracker_primitives::peer; -use torrust_tracker_primitives::swarm_metadata::SwarmMetadata; use super::torrent::repository::in_memory::InMemoryTorrentRepository; use super::torrent::repository::persisted::DatabasePersistentTorrentRepository; @@ -164,45 +163,29 @@ impl AnnounceHandler { ) -> Result { self.whitelist_authorization.authorize(info_hash).await?; - tracing::debug!("Before: {peer:?}"); peer.change_ip(&assign_ip_address_to_peer(remote_client_ip, self.config.net.external_ip)); - tracing::debug!("After: {peer:?}"); - let stats = self.upsert_peer_and_get_stats(info_hash, peer); - - let peers = self - .in_memory_torrent_repository - .get_peers_for(info_hash, peer, peers_wanted.limit()); - - Ok(AnnounceData { - peers, - stats, - policy: self.config.announce_policy, - }) - } - - /// Updates the torrent data in memory, persists statistics if needed, and - /// returns the updated swarm stats. - #[must_use] - fn upsert_peer_and_get_stats(&self, info_hash: &InfoHash, peer: &peer::Peer) -> SwarmMetadata { let number_of_downloads_increased = self.in_memory_torrent_repository.upsert_peer(info_hash, peer); - let swarm_metadata = self.in_memory_torrent_repository.get_swarm_metadata(info_hash); - - if number_of_downloads_increased { - self.persist_stats(info_hash, &swarm_metadata); + if self.config.tracker_policy.persistent_torrent_completed_stat && number_of_downloads_increased { + self.db_torrent_repository.increase_number_of_downloads(info_hash)?; } - swarm_metadata + Ok(self.build_announce_data(info_hash, peer, peers_wanted)) } - /// Persists torrent statistics to the database if persistence is enabled. - fn persist_stats(&self, info_hash: &InfoHash, swarm_metadata: &SwarmMetadata) { - if self.config.tracker_policy.persistent_torrent_completed_stat { - let completed = swarm_metadata.downloaded; - let info_hash = *info_hash; + /// Builds the announce data for the peer making the request. + fn build_announce_data(&self, info_hash: &InfoHash, peer: &peer::Peer, peers_wanted: &PeersWanted) -> AnnounceData { + let peers = self + .in_memory_torrent_repository + .get_peers_for(info_hash, peer, peers_wanted.limit()); - drop(self.db_torrent_repository.save(&info_hash, completed)); + let swarm_metadata = self.in_memory_torrent_repository.get_swarm_metadata(info_hash); + + AnnounceData { + peers, + stats: swarm_metadata, + policy: self.config.announce_policy, } } } diff --git a/packages/tracker-core/src/databases/driver/sqlite.rs b/packages/tracker-core/src/databases/driver/sqlite.rs index ffcef3d96..d36f24f8b 100644 --- a/packages/tracker-core/src/databases/driver/sqlite.rs +++ b/packages/tracker-core/src/databases/driver/sqlite.rs @@ -164,19 +164,12 @@ impl Database for Sqlite { fn increase_number_of_downloads(&self, info_hash: &InfoHash) -> Result<(), Error> { let conn = self.pool.get().map_err(|e| (e, DRIVER))?; - let update = conn.execute( + let _ = conn.execute( "UPDATE torrents SET completed = completed + 1 WHERE info_hash = ?", [info_hash.to_string()], )?; - if update == 0 { - Err(Error::UpdateFailed { - location: Location::caller(), - driver: DRIVER, - }) - } else { - Ok(()) - } + Ok(()) } /// Refer to [`databases::Database::load_keys`](crate::core::databases::Database::load_keys). diff --git a/packages/tracker-core/src/databases/mod.rs b/packages/tracker-core/src/databases/mod.rs index fae2ce527..2703ab8bf 100644 --- a/packages/tracker-core/src/databases/mod.rs +++ b/packages/tracker-core/src/databases/mod.rs @@ -128,6 +128,9 @@ pub trait Database: Sync + Send { /// Increases the number of downloads for a given torrent. /// + /// It does not create a new entry if the torrent is not found and it does + /// not return an error. + /// /// # Arguments /// /// * `info_hash` - A reference to the torrent's info hash. diff --git a/packages/tracker-core/src/error.rs b/packages/tracker-core/src/error.rs index 0b94483eb..4a35e9a0b 100644 --- a/packages/tracker-core/src/error.rs +++ b/packages/tracker-core/src/error.rs @@ -66,6 +66,10 @@ pub enum AnnounceError { /// Wraps errors related to torrent whitelisting. #[error("Whitelist error: {0}")] Whitelist(#[from] WhitelistError), + + /// Wraps errors related to database. + #[error("Database error: {0}")] + Database(#[from] databases::error::Error), } /// Errors related to scrape requests. diff --git a/packages/tracker-core/src/torrent/repository/persisted.rs b/packages/tracker-core/src/torrent/repository/persisted.rs index 89d931bb2..dec571baf 100644 --- a/packages/tracker-core/src/torrent/repository/persisted.rs +++ b/packages/tracker-core/src/torrent/repository/persisted.rs @@ -47,6 +47,26 @@ impl DatabasePersistentTorrentRepository { } } + /// Increases the number of downloads for a given torrent. + /// + /// If the torrent is not found, it creates a new entry. + /// + /// # Arguments + /// + /// * `info_hash` - The info hash of the torrent. + /// + /// # Errors + /// + /// Returns an [`Error`] if the database operation fails. + pub(crate) fn increase_number_of_downloads(&self, info_hash: &InfoHash) -> Result<(), Error> { + let torrent = self.load(info_hash)?; + + match torrent { + Some(_number_of_downloads) => self.database.increase_number_of_downloads(info_hash), + None => self.save(info_hash, 1), + } + } + /// Loads all persistent torrent metrics from the database. /// /// This function retrieves the torrent metrics (e.g., download counts) from the persistent store @@ -67,7 +87,6 @@ impl DatabasePersistentTorrentRepository { /// # Errors /// /// Returns an [`Error`] if the underlying database query fails. - #[allow(dead_code)] pub(crate) fn load(&self, info_hash: &InfoHash) -> Result, Error> { self.database.load_persistent_torrent(info_hash) } @@ -118,6 +137,19 @@ mod tests { assert_eq!(torrents.get(&infohash), Some(1).as_ref()); } + #[test] + fn it_increases_the_numbers_of_downloads_for_a_torrent_into_the_database() { + let repository = initialize_db_persistent_torrent_repository(); + + let infohash = sample_info_hash(); + + repository.increase_number_of_downloads(&infohash).unwrap(); + + let torrents = repository.load_all().unwrap(); + + assert_eq!(torrents.get(&infohash), Some(1).as_ref()); + } + #[test] fn it_loads_the_numbers_of_downloads_for_all_torrents_from_the_database() { let repository = initialize_db_persistent_torrent_repository(); From 06ef1d5fc6e3f0f06869d94e2b5e963bb6fa5fca Mon Sep 17 00:00:00 2001 From: Jose Celano Date: Wed, 5 Mar 2025 13:10:28 +0000 Subject: [PATCH 374/802] fix: [#1264] number of downloads preset when torrent is persisted This fixed a bug: the number of donwloads for a torrent is not loaded from the database when the torrent is added to the in-memory torrent repository. It should be done when stats are enabled with the configuration option: persistent_torrent_completed_stat = true The patch is applied only to the torrent repository implementation we are using in prodcution (`CrossbeamSkipList`). The other implementations have this comment: ``` // todo: load persistent torrent data if provided ``` It was not implemented for the others becuase I'm considering taking the counter (for the number of downloads) out of the in-memory repository. And increase it by suing events triggered from the core tracker. I will open a new issue for that. If that's implemented we will need to remove this patch for the repository, meaning reverting cahnges in this commit. --- .../src/environment.rs | 2 +- .../src/environment.rs | 2 +- .../benches/helpers/asyn.rs | 12 ++-- .../benches/helpers/sync.rs | 12 ++-- .../src/repository/dash_map_mutex_std.rs | 6 +- .../torrent-repository/src/repository/mod.rs | 11 ++- .../src/repository/rw_lock_std.rs | 6 +- .../src/repository/rw_lock_std_mutex_std.rs | 6 +- .../src/repository/rw_lock_std_mutex_tokio.rs | 11 ++- .../src/repository/rw_lock_tokio.rs | 11 ++- .../src/repository/rw_lock_tokio_mutex_std.rs | 11 ++- .../repository/rw_lock_tokio_mutex_tokio.rs | 11 ++- .../src/repository/skip_map_mutex_std.rs | 49 +++++++++++-- .../torrent-repository/tests/common/repo.rs | 29 ++++---- .../tests/repository/mod.rs | 4 +- packages/tracker-core/src/announce_handler.rs | 10 ++- packages/tracker-core/src/torrent/manager.rs | 4 +- .../src/torrent/repository/in_memory.rs | 68 +++++++++++-------- packages/tracker-core/src/torrent/services.rs | 18 ++--- .../udp-tracker-server/src/environment.rs | 2 +- .../src/handlers/announce.rs | 4 +- .../udp-tracker-server/src/handlers/scrape.rs | 2 +- 22 files changed, 197 insertions(+), 94 deletions(-) diff --git a/packages/axum-http-tracker-server/src/environment.rs b/packages/axum-http-tracker-server/src/environment.rs index b7cabad0e..81f0a1ef3 100644 --- a/packages/axum-http-tracker-server/src/environment.rs +++ b/packages/axum-http-tracker-server/src/environment.rs @@ -26,7 +26,7 @@ impl Environment { .container .tracker_core_container .in_memory_torrent_repository - .upsert_peer(info_hash, peer); + .upsert_peer(info_hash, peer, None); } } diff --git a/packages/axum-rest-tracker-api-server/src/environment.rs b/packages/axum-rest-tracker-api-server/src/environment.rs index f130e24f0..c2d89e064 100644 --- a/packages/axum-rest-tracker-api-server/src/environment.rs +++ b/packages/axum-rest-tracker-api-server/src/environment.rs @@ -37,7 +37,7 @@ where .container .tracker_core_container .in_memory_torrent_repository - .upsert_peer(info_hash, peer); + .upsert_peer(info_hash, peer, None); } } diff --git a/packages/torrent-repository/benches/helpers/asyn.rs b/packages/torrent-repository/benches/helpers/asyn.rs index dec3984c6..fc6b3ffb0 100644 --- a/packages/torrent-repository/benches/helpers/asyn.rs +++ b/packages/torrent-repository/benches/helpers/asyn.rs @@ -18,7 +18,7 @@ where let info_hash = InfoHash::default(); - torrent_repository.upsert_peer(&info_hash, &DEFAULT_PEER).await; + torrent_repository.upsert_peer(&info_hash, &DEFAULT_PEER, None).await; torrent_repository.get_swarm_metadata(&info_hash).await; } @@ -37,7 +37,7 @@ where let handles = FuturesUnordered::new(); // Add the torrent/peer to the torrent repository - torrent_repository.upsert_peer(&info_hash, &DEFAULT_PEER).await; + torrent_repository.upsert_peer(&info_hash, &DEFAULT_PEER, None).await; torrent_repository.get_swarm_metadata(&info_hash).await; @@ -47,7 +47,7 @@ where let torrent_repository_clone = torrent_repository.clone(); let handle = runtime.spawn(async move { - torrent_repository_clone.upsert_peer(&info_hash, &DEFAULT_PEER).await; + torrent_repository_clone.upsert_peer(&info_hash, &DEFAULT_PEER, None).await; torrent_repository_clone.get_swarm_metadata(&info_hash).await; @@ -87,7 +87,7 @@ where let torrent_repository_clone = torrent_repository.clone(); let handle = runtime.spawn(async move { - torrent_repository_clone.upsert_peer(&info_hash, &DEFAULT_PEER).await; + torrent_repository_clone.upsert_peer(&info_hash, &DEFAULT_PEER, None).await; torrent_repository_clone.get_swarm_metadata(&info_hash).await; @@ -123,7 +123,7 @@ where // Add the torrents/peers to the torrent repository for info_hash in &info_hashes { - torrent_repository.upsert_peer(info_hash, &DEFAULT_PEER).await; + torrent_repository.upsert_peer(info_hash, &DEFAULT_PEER, None).await; torrent_repository.get_swarm_metadata(info_hash).await; } @@ -133,7 +133,7 @@ where let torrent_repository_clone = torrent_repository.clone(); let handle = runtime.spawn(async move { - torrent_repository_clone.upsert_peer(&info_hash, &DEFAULT_PEER).await; + torrent_repository_clone.upsert_peer(&info_hash, &DEFAULT_PEER, None).await; torrent_repository_clone.get_swarm_metadata(&info_hash).await; if let Some(sleep_time) = sleep { diff --git a/packages/torrent-repository/benches/helpers/sync.rs b/packages/torrent-repository/benches/helpers/sync.rs index 048e709bc..e00401446 100644 --- a/packages/torrent-repository/benches/helpers/sync.rs +++ b/packages/torrent-repository/benches/helpers/sync.rs @@ -20,7 +20,7 @@ where let info_hash = InfoHash::default(); - torrent_repository.upsert_peer(&info_hash, &DEFAULT_PEER); + torrent_repository.upsert_peer(&info_hash, &DEFAULT_PEER, None); torrent_repository.get_swarm_metadata(&info_hash); } @@ -39,7 +39,7 @@ where let handles = FuturesUnordered::new(); // Add the torrent/peer to the torrent repository - torrent_repository.upsert_peer(&info_hash, &DEFAULT_PEER); + torrent_repository.upsert_peer(&info_hash, &DEFAULT_PEER, None); torrent_repository.get_swarm_metadata(&info_hash); @@ -49,7 +49,7 @@ where let torrent_repository_clone = torrent_repository.clone(); let handle = runtime.spawn(async move { - torrent_repository_clone.upsert_peer(&info_hash, &DEFAULT_PEER); + torrent_repository_clone.upsert_peer(&info_hash, &DEFAULT_PEER, None); torrent_repository_clone.get_swarm_metadata(&info_hash); @@ -89,7 +89,7 @@ where let torrent_repository_clone = torrent_repository.clone(); let handle = runtime.spawn(async move { - torrent_repository_clone.upsert_peer(&info_hash, &DEFAULT_PEER); + torrent_repository_clone.upsert_peer(&info_hash, &DEFAULT_PEER, None); torrent_repository_clone.get_swarm_metadata(&info_hash); @@ -125,7 +125,7 @@ where // Add the torrents/peers to the torrent repository for info_hash in &info_hashes { - torrent_repository.upsert_peer(info_hash, &DEFAULT_PEER); + torrent_repository.upsert_peer(info_hash, &DEFAULT_PEER, None); torrent_repository.get_swarm_metadata(info_hash); } @@ -135,7 +135,7 @@ where let torrent_repository_clone = torrent_repository.clone(); let handle = runtime.spawn(async move { - torrent_repository_clone.upsert_peer(&info_hash, &DEFAULT_PEER); + torrent_repository_clone.upsert_peer(&info_hash, &DEFAULT_PEER, None); torrent_repository_clone.get_swarm_metadata(&info_hash); if let Some(sleep_time) = sleep { diff --git a/packages/torrent-repository/src/repository/dash_map_mutex_std.rs b/packages/torrent-repository/src/repository/dash_map_mutex_std.rs index 731280486..9e2b5cc59 100644 --- a/packages/torrent-repository/src/repository/dash_map_mutex_std.rs +++ b/packages/torrent-repository/src/repository/dash_map_mutex_std.rs @@ -6,7 +6,7 @@ use torrust_tracker_configuration::TrackerPolicy; use torrust_tracker_primitives::pagination::Pagination; use torrust_tracker_primitives::swarm_metadata::SwarmMetadata; use torrust_tracker_primitives::torrent_metrics::TorrentsMetrics; -use torrust_tracker_primitives::{peer, DurationSinceUnixEpoch, PersistentTorrents}; +use torrust_tracker_primitives::{peer, DurationSinceUnixEpoch, PersistentTorrent, PersistentTorrents}; use super::Repository; use crate::entry::peer_list::PeerList; @@ -23,7 +23,9 @@ where EntryMutexStd: EntrySync, EntrySingle: Entry, { - fn upsert_peer(&self, info_hash: &InfoHash, peer: &peer::Peer) -> bool { + fn upsert_peer(&self, info_hash: &InfoHash, peer: &peer::Peer, _opt_persistent_torrent: Option) -> bool { + // todo: load persistent torrent data if provided + if let Some(entry) = self.torrents.get(info_hash) { entry.upsert_peer(peer) } else { diff --git a/packages/torrent-repository/src/repository/mod.rs b/packages/torrent-repository/src/repository/mod.rs index bfb7f20f4..16ebdf3c1 100644 --- a/packages/torrent-repository/src/repository/mod.rs +++ b/packages/torrent-repository/src/repository/mod.rs @@ -3,7 +3,7 @@ use torrust_tracker_configuration::TrackerPolicy; use torrust_tracker_primitives::pagination::Pagination; use torrust_tracker_primitives::swarm_metadata::SwarmMetadata; use torrust_tracker_primitives::torrent_metrics::TorrentsMetrics; -use torrust_tracker_primitives::{peer, DurationSinceUnixEpoch, PersistentTorrents}; +use torrust_tracker_primitives::{peer, DurationSinceUnixEpoch, PersistentTorrent, PersistentTorrents}; pub mod dash_map_mutex_std; pub mod rw_lock_std; @@ -24,7 +24,7 @@ pub trait Repository: Debug + Default + Sized + 'static { fn remove(&self, key: &InfoHash) -> Option; fn remove_inactive_peers(&self, current_cutoff: DurationSinceUnixEpoch); fn remove_peerless_torrents(&self, policy: &TrackerPolicy); - fn upsert_peer(&self, info_hash: &InfoHash, peer: &peer::Peer) -> bool; + fn upsert_peer(&self, info_hash: &InfoHash, peer: &peer::Peer, opt_persistent_torrent: Option) -> bool; fn get_swarm_metadata(&self, info_hash: &InfoHash) -> Option; } @@ -37,6 +37,11 @@ pub trait RepositoryAsync: Debug + Default + Sized + 'static { fn remove(&self, key: &InfoHash) -> impl std::future::Future> + Send; fn remove_inactive_peers(&self, current_cutoff: DurationSinceUnixEpoch) -> impl std::future::Future + Send; fn remove_peerless_torrents(&self, policy: &TrackerPolicy) -> impl std::future::Future + Send; - fn upsert_peer(&self, info_hash: &InfoHash, peer: &peer::Peer) -> impl std::future::Future + Send; + fn upsert_peer( + &self, + info_hash: &InfoHash, + peer: &peer::Peer, + opt_persistent_torrent: Option, + ) -> impl std::future::Future + Send; fn get_swarm_metadata(&self, info_hash: &InfoHash) -> impl std::future::Future> + Send; } diff --git a/packages/torrent-repository/src/repository/rw_lock_std.rs b/packages/torrent-repository/src/repository/rw_lock_std.rs index 2ff757654..7038b0b38 100644 --- a/packages/torrent-repository/src/repository/rw_lock_std.rs +++ b/packages/torrent-repository/src/repository/rw_lock_std.rs @@ -3,7 +3,7 @@ use torrust_tracker_configuration::TrackerPolicy; use torrust_tracker_primitives::pagination::Pagination; use torrust_tracker_primitives::swarm_metadata::SwarmMetadata; use torrust_tracker_primitives::torrent_metrics::TorrentsMetrics; -use torrust_tracker_primitives::{peer, DurationSinceUnixEpoch, PersistentTorrents}; +use torrust_tracker_primitives::{peer, DurationSinceUnixEpoch, PersistentTorrent, PersistentTorrents}; use super::Repository; use crate::entry::peer_list::PeerList; @@ -46,7 +46,9 @@ impl Repository for TorrentsRwLockStd where EntrySingle: Entry, { - fn upsert_peer(&self, info_hash: &InfoHash, peer: &peer::Peer) -> bool { + fn upsert_peer(&self, info_hash: &InfoHash, peer: &peer::Peer, _opt_persistent_torrent: Option) -> bool { + // todo: load persistent torrent data if provided + let mut db = self.get_torrents_mut(); let entry = db.entry(*info_hash).or_insert(EntrySingle::default()); diff --git a/packages/torrent-repository/src/repository/rw_lock_std_mutex_std.rs b/packages/torrent-repository/src/repository/rw_lock_std_mutex_std.rs index 1f1155df5..a9958bd7c 100644 --- a/packages/torrent-repository/src/repository/rw_lock_std_mutex_std.rs +++ b/packages/torrent-repository/src/repository/rw_lock_std_mutex_std.rs @@ -5,7 +5,7 @@ use torrust_tracker_configuration::TrackerPolicy; use torrust_tracker_primitives::pagination::Pagination; use torrust_tracker_primitives::swarm_metadata::SwarmMetadata; use torrust_tracker_primitives::torrent_metrics::TorrentsMetrics; -use torrust_tracker_primitives::{peer, DurationSinceUnixEpoch, PersistentTorrents}; +use torrust_tracker_primitives::{peer, DurationSinceUnixEpoch, PersistentTorrent, PersistentTorrents}; use super::Repository; use crate::entry::peer_list::PeerList; @@ -33,7 +33,9 @@ where EntryMutexStd: EntrySync, EntrySingle: Entry, { - fn upsert_peer(&self, info_hash: &InfoHash, peer: &peer::Peer) -> bool { + fn upsert_peer(&self, info_hash: &InfoHash, peer: &peer::Peer, _opt_persistent_torrent: Option) -> bool { + // todo: load persistent torrent data if provided + let maybe_entry = self.get_torrents().get(info_hash).cloned(); let entry = if let Some(entry) = maybe_entry { diff --git a/packages/torrent-repository/src/repository/rw_lock_std_mutex_tokio.rs b/packages/torrent-repository/src/repository/rw_lock_std_mutex_tokio.rs index f8fd2871d..deba42b67 100644 --- a/packages/torrent-repository/src/repository/rw_lock_std_mutex_tokio.rs +++ b/packages/torrent-repository/src/repository/rw_lock_std_mutex_tokio.rs @@ -9,7 +9,7 @@ use torrust_tracker_configuration::TrackerPolicy; use torrust_tracker_primitives::pagination::Pagination; use torrust_tracker_primitives::swarm_metadata::SwarmMetadata; use torrust_tracker_primitives::torrent_metrics::TorrentsMetrics; -use torrust_tracker_primitives::{peer, DurationSinceUnixEpoch, PersistentTorrents}; +use torrust_tracker_primitives::{peer, DurationSinceUnixEpoch, PersistentTorrent, PersistentTorrents}; use super::RepositoryAsync; use crate::entry::peer_list::PeerList; @@ -37,7 +37,14 @@ where EntryMutexTokio: EntryAsync, EntrySingle: Entry, { - async fn upsert_peer(&self, info_hash: &InfoHash, peer: &peer::Peer) -> bool { + async fn upsert_peer( + &self, + info_hash: &InfoHash, + peer: &peer::Peer, + _opt_persistent_torrent: Option, + ) -> bool { + // todo: load persistent torrent data if provided + let maybe_entry = self.get_torrents().get(info_hash).cloned(); let entry = if let Some(entry) = maybe_entry { diff --git a/packages/torrent-repository/src/repository/rw_lock_tokio.rs b/packages/torrent-repository/src/repository/rw_lock_tokio.rs index 964149393..bbda42f17 100644 --- a/packages/torrent-repository/src/repository/rw_lock_tokio.rs +++ b/packages/torrent-repository/src/repository/rw_lock_tokio.rs @@ -3,7 +3,7 @@ use torrust_tracker_configuration::TrackerPolicy; use torrust_tracker_primitives::pagination::Pagination; use torrust_tracker_primitives::swarm_metadata::SwarmMetadata; use torrust_tracker_primitives::torrent_metrics::TorrentsMetrics; -use torrust_tracker_primitives::{peer, DurationSinceUnixEpoch, PersistentTorrents}; +use torrust_tracker_primitives::{peer, DurationSinceUnixEpoch, PersistentTorrent, PersistentTorrents}; use super::RepositoryAsync; use crate::entry::peer_list::PeerList; @@ -47,7 +47,14 @@ impl RepositoryAsync for TorrentsRwLockTokio where EntrySingle: Entry, { - async fn upsert_peer(&self, info_hash: &InfoHash, peer: &peer::Peer) -> bool { + async fn upsert_peer( + &self, + info_hash: &InfoHash, + peer: &peer::Peer, + _opt_persistent_torrent: Option, + ) -> bool { + // todo: load persistent torrent data if provided + let mut db = self.get_torrents_mut().await; let entry = db.entry(*info_hash).or_insert(EntrySingle::default()); diff --git a/packages/torrent-repository/src/repository/rw_lock_tokio_mutex_std.rs b/packages/torrent-repository/src/repository/rw_lock_tokio_mutex_std.rs index c4541dea2..551c1c5ec 100644 --- a/packages/torrent-repository/src/repository/rw_lock_tokio_mutex_std.rs +++ b/packages/torrent-repository/src/repository/rw_lock_tokio_mutex_std.rs @@ -5,7 +5,7 @@ use torrust_tracker_configuration::TrackerPolicy; use torrust_tracker_primitives::pagination::Pagination; use torrust_tracker_primitives::swarm_metadata::SwarmMetadata; use torrust_tracker_primitives::torrent_metrics::TorrentsMetrics; -use torrust_tracker_primitives::{peer, DurationSinceUnixEpoch, PersistentTorrents}; +use torrust_tracker_primitives::{peer, DurationSinceUnixEpoch, PersistentTorrent, PersistentTorrents}; use super::RepositoryAsync; use crate::entry::peer_list::PeerList; @@ -35,7 +35,14 @@ where EntryMutexStd: EntrySync, EntrySingle: Entry, { - async fn upsert_peer(&self, info_hash: &InfoHash, peer: &peer::Peer) -> bool { + async fn upsert_peer( + &self, + info_hash: &InfoHash, + peer: &peer::Peer, + _opt_persistent_torrent: Option, + ) -> bool { + // todo: load persistent torrent data if provided + let maybe_entry = self.get_torrents().await.get(info_hash).cloned(); let entry = if let Some(entry) = maybe_entry { diff --git a/packages/torrent-repository/src/repository/rw_lock_tokio_mutex_tokio.rs b/packages/torrent-repository/src/repository/rw_lock_tokio_mutex_tokio.rs index ff1e77cda..3ac859ab0 100644 --- a/packages/torrent-repository/src/repository/rw_lock_tokio_mutex_tokio.rs +++ b/packages/torrent-repository/src/repository/rw_lock_tokio_mutex_tokio.rs @@ -5,7 +5,7 @@ use torrust_tracker_configuration::TrackerPolicy; use torrust_tracker_primitives::pagination::Pagination; use torrust_tracker_primitives::swarm_metadata::SwarmMetadata; use torrust_tracker_primitives::torrent_metrics::TorrentsMetrics; -use torrust_tracker_primitives::{peer, DurationSinceUnixEpoch, PersistentTorrents}; +use torrust_tracker_primitives::{peer, DurationSinceUnixEpoch, PersistentTorrent, PersistentTorrents}; use super::RepositoryAsync; use crate::entry::peer_list::PeerList; @@ -35,7 +35,14 @@ where EntryMutexTokio: EntryAsync, EntrySingle: Entry, { - async fn upsert_peer(&self, info_hash: &InfoHash, peer: &peer::Peer) -> bool { + async fn upsert_peer( + &self, + info_hash: &InfoHash, + peer: &peer::Peer, + _opt_persistent_torrent: Option, + ) -> bool { + // todo: load persistent torrent data if provided + let maybe_entry = self.get_torrents().await.get(info_hash).cloned(); let entry = if let Some(entry) = maybe_entry { diff --git a/packages/torrent-repository/src/repository/skip_map_mutex_std.rs b/packages/torrent-repository/src/repository/skip_map_mutex_std.rs index 7a4e4afb9..2c4ff5ce7 100644 --- a/packages/torrent-repository/src/repository/skip_map_mutex_std.rs +++ b/packages/torrent-repository/src/repository/skip_map_mutex_std.rs @@ -6,7 +6,7 @@ use torrust_tracker_configuration::TrackerPolicy; use torrust_tracker_primitives::pagination::Pagination; use torrust_tracker_primitives::swarm_metadata::SwarmMetadata; use torrust_tracker_primitives::torrent_metrics::TorrentsMetrics; -use torrust_tracker_primitives::{peer, DurationSinceUnixEpoch, PersistentTorrents}; +use torrust_tracker_primitives::{peer, DurationSinceUnixEpoch, PersistentTorrent, PersistentTorrents}; use super::Repository; use crate::entry::peer_list::PeerList; @@ -23,9 +23,42 @@ where EntryMutexStd: EntrySync, EntrySingle: Entry, { - fn upsert_peer(&self, info_hash: &InfoHash, peer: &peer::Peer) -> bool { - let entry = self.torrents.get_or_insert(*info_hash, Arc::default()); - entry.value().upsert_peer(peer) + /// Upsert a peer into the swarm of a torrent. + /// + /// Optionally, it can also preset the number of downloads of the torrent + /// only if it's the first time the torrent is being inserted. + /// + /// # Arguments + /// + /// * `info_hash` - The info hash of the torrent. + /// * `peer` - The peer to upsert. + /// * `opt_persistent_torrent` - The optional persisted data about a torrent + /// (number of downloads for the torrent). + /// + /// # Returns + /// + /// Returns `true` if the number of downloads was increased because the peer + /// completed the download. + fn upsert_peer(&self, info_hash: &InfoHash, peer: &peer::Peer, opt_persistent_torrent: Option) -> bool { + if let Some(existing_entry) = self.torrents.get(info_hash) { + existing_entry.value().upsert_peer(peer) + } else { + let new_entry = if let Some(number_of_downloads) = opt_persistent_torrent { + EntryMutexStd::new( + EntrySingle { + swarm: PeerList::default(), + downloaded: number_of_downloads, + } + .into(), + ) + } else { + EntryMutexStd::default() + }; + + let inserted_entry = self.torrents.get_or_insert(*info_hash, new_entry); + + inserted_entry.value().upsert_peer(peer) + } } fn get_swarm_metadata(&self, info_hash: &InfoHash) -> Option { @@ -114,7 +147,9 @@ where EntryRwLockParkingLot: EntrySync, EntrySingle: Entry, { - fn upsert_peer(&self, info_hash: &InfoHash, peer: &peer::Peer) -> bool { + fn upsert_peer(&self, info_hash: &InfoHash, peer: &peer::Peer, _opt_persistent_torrent: Option) -> bool { + // todo: load persistent torrent data if provided + let entry = self.torrents.get_or_insert(*info_hash, Arc::default()); entry.value().upsert_peer(peer) } @@ -205,7 +240,9 @@ where EntryMutexParkingLot: EntrySync, EntrySingle: Entry, { - fn upsert_peer(&self, info_hash: &InfoHash, peer: &peer::Peer) -> bool { + fn upsert_peer(&self, info_hash: &InfoHash, peer: &peer::Peer, _opt_persistent_torrent: Option) -> bool { + // todo: load persistent torrent data if provided + let entry = self.torrents.get_or_insert(*info_hash, Arc::default()); entry.value().upsert_peer(peer) } diff --git a/packages/torrent-repository/tests/common/repo.rs b/packages/torrent-repository/tests/common/repo.rs index 809c59d2a..65ce45f8e 100644 --- a/packages/torrent-repository/tests/common/repo.rs +++ b/packages/torrent-repository/tests/common/repo.rs @@ -3,7 +3,7 @@ use torrust_tracker_configuration::TrackerPolicy; use torrust_tracker_primitives::pagination::Pagination; use torrust_tracker_primitives::swarm_metadata::SwarmMetadata; use torrust_tracker_primitives::torrent_metrics::TorrentsMetrics; -use torrust_tracker_primitives::{peer, DurationSinceUnixEpoch, PersistentTorrents}; +use torrust_tracker_primitives::{peer, DurationSinceUnixEpoch, PersistentTorrent, PersistentTorrents}; use torrust_tracker_torrent_repository::repository::{Repository as _, RepositoryAsync as _}; use torrust_tracker_torrent_repository::{ EntrySingle, TorrentsDashMapMutexStd, TorrentsRwLockStd, TorrentsRwLockStdMutexStd, TorrentsRwLockStdMutexTokio, @@ -26,18 +26,23 @@ pub(crate) enum Repo { } impl Repo { - pub(crate) async fn upsert_peer(&self, info_hash: &InfoHash, peer: &peer::Peer) -> bool { + pub(crate) async fn upsert_peer( + &self, + info_hash: &InfoHash, + peer: &peer::Peer, + opt_persistent_torrent: Option, + ) -> bool { match self { - Repo::RwLockStd(repo) => repo.upsert_peer(info_hash, peer), - Repo::RwLockStdMutexStd(repo) => repo.upsert_peer(info_hash, peer), - Repo::RwLockStdMutexTokio(repo) => repo.upsert_peer(info_hash, peer).await, - Repo::RwLockTokio(repo) => repo.upsert_peer(info_hash, peer).await, - Repo::RwLockTokioMutexStd(repo) => repo.upsert_peer(info_hash, peer).await, - Repo::RwLockTokioMutexTokio(repo) => repo.upsert_peer(info_hash, peer).await, - Repo::SkipMapMutexStd(repo) => repo.upsert_peer(info_hash, peer), - Repo::SkipMapMutexParkingLot(repo) => repo.upsert_peer(info_hash, peer), - Repo::SkipMapRwLockParkingLot(repo) => repo.upsert_peer(info_hash, peer), - Repo::DashMapMutexStd(repo) => repo.upsert_peer(info_hash, peer), + Repo::RwLockStd(repo) => repo.upsert_peer(info_hash, peer, opt_persistent_torrent), + Repo::RwLockStdMutexStd(repo) => repo.upsert_peer(info_hash, peer, opt_persistent_torrent), + Repo::RwLockStdMutexTokio(repo) => repo.upsert_peer(info_hash, peer, opt_persistent_torrent).await, + Repo::RwLockTokio(repo) => repo.upsert_peer(info_hash, peer, opt_persistent_torrent).await, + Repo::RwLockTokioMutexStd(repo) => repo.upsert_peer(info_hash, peer, opt_persistent_torrent).await, + Repo::RwLockTokioMutexTokio(repo) => repo.upsert_peer(info_hash, peer, opt_persistent_torrent).await, + Repo::SkipMapMutexStd(repo) => repo.upsert_peer(info_hash, peer, opt_persistent_torrent), + Repo::SkipMapMutexParkingLot(repo) => repo.upsert_peer(info_hash, peer, opt_persistent_torrent), + Repo::SkipMapRwLockParkingLot(repo) => repo.upsert_peer(info_hash, peer, opt_persistent_torrent), + Repo::DashMapMutexStd(repo) => repo.upsert_peer(info_hash, peer, opt_persistent_torrent), } } diff --git a/packages/torrent-repository/tests/repository/mod.rs b/packages/torrent-repository/tests/repository/mod.rs index c5cf2059c..d38208e0d 100644 --- a/packages/torrent-repository/tests/repository/mod.rs +++ b/packages/torrent-repository/tests/repository/mod.rs @@ -562,14 +562,14 @@ async fn it_should_remove_inactive_peers( // Insert the infohash and peer into the repository // and verify there is an extra torrent entry. { - repo.upsert_peer(&info_hash, &peer).await; + repo.upsert_peer(&info_hash, &peer, None).await; assert_eq!(repo.get_metrics().await.torrents, entries.len() as u64 + 1); } // Insert the infohash and peer into the repository // and verify the swarm metadata was updated. { - repo.upsert_peer(&info_hash, &peer).await; + repo.upsert_peer(&info_hash, &peer, None).await; let stats = repo.get_swarm_metadata(&info_hash).await; assert_eq!( stats, diff --git a/packages/tracker-core/src/announce_handler.rs b/packages/tracker-core/src/announce_handler.rs index e125adb66..b858cae6c 100644 --- a/packages/tracker-core/src/announce_handler.rs +++ b/packages/tracker-core/src/announce_handler.rs @@ -163,9 +163,17 @@ impl AnnounceHandler { ) -> Result { self.whitelist_authorization.authorize(info_hash).await?; + let opt_persistent_torrent = if self.config.tracker_policy.persistent_torrent_completed_stat { + self.db_torrent_repository.load(info_hash)? + } else { + None + }; + peer.change_ip(&assign_ip_address_to_peer(remote_client_ip, self.config.net.external_ip)); - let number_of_downloads_increased = self.in_memory_torrent_repository.upsert_peer(info_hash, peer); + let number_of_downloads_increased = + self.in_memory_torrent_repository + .upsert_peer(info_hash, peer, opt_persistent_torrent); if self.config.tracker_policy.persistent_torrent_completed_stat && number_of_downloads_increased { self.db_torrent_repository.increase_number_of_downloads(info_hash)?; diff --git a/packages/tracker-core/src/torrent/manager.rs b/packages/tracker-core/src/torrent/manager.rs index e4691a86f..792bb024d 100644 --- a/packages/tracker-core/src/torrent/manager.rs +++ b/packages/tracker-core/src/torrent/manager.rs @@ -195,7 +195,7 @@ mod tests { // Add a peer to the torrent let mut peer = sample_peer(); peer.updated = DurationSinceUnixEpoch::new(0, 0); - let _number_of_downloads_increased = services.in_memory_torrent_repository.upsert_peer(&infohash, &peer); + let _number_of_downloads_increased = services.in_memory_torrent_repository.upsert_peer(&infohash, &peer, None); // Simulate the time has passed 1 second more than the max peer timeout. clock::Stopped::local_add(&Duration::from_secs( @@ -212,7 +212,7 @@ mod tests { // Add a peer to the torrent let mut peer = sample_peer(); peer.updated = DurationSinceUnixEpoch::new(0, 0); - let _number_of_downloads_increased = in_memory_torrent_repository.upsert_peer(infohash, &peer); + let _number_of_downloads_increased = in_memory_torrent_repository.upsert_peer(infohash, &peer, None); // Remove the peer. The torrent is now peerless. in_memory_torrent_repository.remove_inactive_peers(peer.updated.add(Duration::from_secs(1))); diff --git a/packages/tracker-core/src/torrent/repository/in_memory.rs b/packages/tracker-core/src/torrent/repository/in_memory.rs index bec28bcc0..c3852654c 100644 --- a/packages/tracker-core/src/torrent/repository/in_memory.rs +++ b/packages/tracker-core/src/torrent/repository/in_memory.rs @@ -7,7 +7,7 @@ use torrust_tracker_configuration::{TrackerPolicy, TORRENT_PEERS_LIMIT}; use torrust_tracker_primitives::pagination::Pagination; use torrust_tracker_primitives::swarm_metadata::SwarmMetadata; use torrust_tracker_primitives::torrent_metrics::TorrentsMetrics; -use torrust_tracker_primitives::{peer, DurationSinceUnixEpoch, PersistentTorrents}; +use torrust_tracker_primitives::{peer, DurationSinceUnixEpoch, PersistentTorrent, PersistentTorrents}; use torrust_tracker_torrent_repository::entry::EntrySync; use torrust_tracker_torrent_repository::repository::Repository; use torrust_tracker_torrent_repository::EntryMutexStd; @@ -45,8 +45,13 @@ impl InMemoryTorrentRepository { /// /// `true` if the peer stats were updated. #[must_use] - pub fn upsert_peer(&self, info_hash: &InfoHash, peer: &peer::Peer) -> bool { - self.torrents.upsert_peer(info_hash, peer) + pub fn upsert_peer( + &self, + info_hash: &InfoHash, + peer: &peer::Peer, + opt_persistent_torrent: Option, + ) -> bool { + self.torrents.upsert_peer(info_hash, peer, opt_persistent_torrent) } /// Removes a torrent entry from the repository. @@ -268,7 +273,7 @@ mod tests { let info_hash = sample_info_hash(); - let _number_of_downloads_increased = in_memory_torrent_repository.upsert_peer(&info_hash, &sample_peer()); + let _number_of_downloads_increased = in_memory_torrent_repository.upsert_peer(&info_hash, &sample_peer(), None); assert!(in_memory_torrent_repository.get(&info_hash).is_some()); } @@ -279,8 +284,8 @@ mod tests { let info_hash = sample_info_hash(); - let _number_of_downloads_increased = in_memory_torrent_repository.upsert_peer(&info_hash, &sample_peer()); - let _number_of_downloads_increased = in_memory_torrent_repository.upsert_peer(&info_hash, &sample_peer()); + let _number_of_downloads_increased = in_memory_torrent_repository.upsert_peer(&info_hash, &sample_peer(), None); + let _number_of_downloads_increased = in_memory_torrent_repository.upsert_peer(&info_hash, &sample_peer(), None); assert!(in_memory_torrent_repository.get(&info_hash).is_some()); } @@ -306,7 +311,7 @@ mod tests { let info_hash = sample_info_hash(); let peer = sample_peer(); - let _number_of_downloads_increased = in_memory_torrent_repository.upsert_peer(&info_hash, &peer); + let _number_of_downloads_increased = in_memory_torrent_repository.upsert_peer(&info_hash, &peer, None); let peers = in_memory_torrent_repository.get_torrent_peers(&info_hash); @@ -339,7 +344,7 @@ mod tests { event: AnnounceEvent::Completed, }; - let _number_of_downloads_increased = in_memory_torrent_repository.upsert_peer(&info_hash, &peer); + let _number_of_downloads_increased = in_memory_torrent_repository.upsert_peer(&info_hash, &peer, None); } let peers = in_memory_torrent_repository.get_torrent_peers(&info_hash); @@ -378,7 +383,7 @@ mod tests { let info_hash = sample_info_hash(); let peer = sample_peer(); - let _number_of_downloads_increased = in_memory_torrent_repository.upsert_peer(&info_hash, &peer); + let _number_of_downloads_increased = in_memory_torrent_repository.upsert_peer(&info_hash, &peer, None); let peers = in_memory_torrent_repository.get_peers_for(&info_hash, &peer, TORRENT_PEERS_LIMIT); @@ -393,7 +398,8 @@ mod tests { let excluded_peer = sample_peer(); - let _number_of_downloads_increased = in_memory_torrent_repository.upsert_peer(&info_hash, &excluded_peer); + let _number_of_downloads_increased = + in_memory_torrent_repository.upsert_peer(&info_hash, &excluded_peer, None); // Add 74 peers for idx in 2..=75 { @@ -407,7 +413,7 @@ mod tests { event: AnnounceEvent::Completed, }; - let _number_of_downloads_increased = in_memory_torrent_repository.upsert_peer(&info_hash, &peer); + let _number_of_downloads_increased = in_memory_torrent_repository.upsert_peer(&info_hash, &peer, None); } let peers = in_memory_torrent_repository.get_peers_for(&info_hash, &excluded_peer, TORRENT_PEERS_LIMIT); @@ -435,7 +441,7 @@ mod tests { let in_memory_torrent_repository = Arc::new(InMemoryTorrentRepository::default()); let info_hash = sample_info_hash(); - let _number_of_downloads_increased = in_memory_torrent_repository.upsert_peer(&info_hash, &sample_peer()); + let _number_of_downloads_increased = in_memory_torrent_repository.upsert_peer(&info_hash, &sample_peer(), None); let _unused = in_memory_torrent_repository.remove(&info_hash); @@ -450,7 +456,7 @@ mod tests { let mut peer = sample_peer(); peer.updated = DurationSinceUnixEpoch::new(0, 0); - let _number_of_downloads_increased = in_memory_torrent_repository.upsert_peer(&info_hash, &peer); + let _number_of_downloads_increased = in_memory_torrent_repository.upsert_peer(&info_hash, &peer, None); // Cut off time is 1 second after the peer was updated in_memory_torrent_repository.remove_inactive_peers(peer.updated.add(Duration::from_secs(1))); @@ -466,7 +472,7 @@ mod tests { // Insert a sample peer for the torrent to force adding the torrent entry let mut peer = sample_peer(); peer.updated = DurationSinceUnixEpoch::new(0, 0); - let _number_of_downloads_increased = in_memory_torrent_repository.upsert_peer(info_hash, &peer); + let _number_of_downloads_increased = in_memory_torrent_repository.upsert_peer(info_hash, &peer, None); // Remove the peer in_memory_torrent_repository.remove_inactive_peers(peer.updated.add(Duration::from_secs(1))); @@ -530,7 +536,7 @@ mod tests { let info_hash = sample_info_hash(); let peer = sample_peer(); - let _number_of_downloads_increased = in_memory_torrent_repository.upsert_peer(&info_hash, &peer); + let _number_of_downloads_increased = in_memory_torrent_repository.upsert_peer(&info_hash, &peer, None); let torrent_entry = in_memory_torrent_repository.get(&info_hash).unwrap(); @@ -563,7 +569,7 @@ mod tests { let info_hash = sample_info_hash(); let peer = sample_peer(); - let _number_of_downloads_increased = in_memory_torrent_repository.upsert_peer(&info_hash, &peer); + let _number_of_downloads_increased = in_memory_torrent_repository.upsert_peer(&info_hash, &peer, None); let torrent_entries = in_memory_torrent_repository.get_paginated(None); @@ -605,12 +611,14 @@ mod tests { // Insert one torrent entry let info_hash_one = sample_info_hash_one(); let peer_one = sample_peer_one(); - let _number_of_downloads_increased = in_memory_torrent_repository.upsert_peer(&info_hash_one, &peer_one); + let _number_of_downloads_increased = + in_memory_torrent_repository.upsert_peer(&info_hash_one, &peer_one, None); // Insert another torrent entry let info_hash_one = sample_info_hash_alphabetically_ordered_after_sample_info_hash_one(); let peer_two = sample_peer_two(); - let _number_of_downloads_increased = in_memory_torrent_repository.upsert_peer(&info_hash_one, &peer_two); + let _number_of_downloads_increased = + in_memory_torrent_repository.upsert_peer(&info_hash_one, &peer_two, None); // Get only the first page where page size is 1 let torrent_entries = @@ -641,12 +649,14 @@ mod tests { // Insert one torrent entry let info_hash_one = sample_info_hash_one(); let peer_one = sample_peer_one(); - let _number_of_downloads_increased = in_memory_torrent_repository.upsert_peer(&info_hash_one, &peer_one); + let _number_of_downloads_increased = + in_memory_torrent_repository.upsert_peer(&info_hash_one, &peer_one, None); // Insert another torrent entry let info_hash_one = sample_info_hash_alphabetically_ordered_after_sample_info_hash_one(); let peer_two = sample_peer_two(); - let _number_of_downloads_increased = in_memory_torrent_repository.upsert_peer(&info_hash_one, &peer_two); + let _number_of_downloads_increased = + in_memory_torrent_repository.upsert_peer(&info_hash_one, &peer_two, None); // Get only the first page where page size is 1 let torrent_entries = @@ -677,12 +687,14 @@ mod tests { // Insert one torrent entry let info_hash_one = sample_info_hash_one(); let peer_one = sample_peer_one(); - let _number_of_downloads_increased = in_memory_torrent_repository.upsert_peer(&info_hash_one, &peer_one); + let _number_of_downloads_increased = + in_memory_torrent_repository.upsert_peer(&info_hash_one, &peer_one, None); // Insert another torrent entry let info_hash_one = sample_info_hash_alphabetically_ordered_after_sample_info_hash_one(); let peer_two = sample_peer_two(); - let _number_of_downloads_increased = in_memory_torrent_repository.upsert_peer(&info_hash_one, &peer_two); + let _number_of_downloads_increased = + in_memory_torrent_repository.upsert_peer(&info_hash_one, &peer_two, None); // Get only the first page where page size is 1 let torrent_entries = @@ -727,7 +739,8 @@ mod tests { async fn it_should_return_the_torrent_metrics_when_there_is_a_leecher() { let in_memory_torrent_repository = Arc::new(InMemoryTorrentRepository::default()); - let _number_of_downloads_increased = in_memory_torrent_repository.upsert_peer(&sample_info_hash(), &leecher()); + let _number_of_downloads_increased = + in_memory_torrent_repository.upsert_peer(&sample_info_hash(), &leecher(), None); let torrent_metrics = in_memory_torrent_repository.get_torrents_metrics(); @@ -746,7 +759,8 @@ mod tests { async fn it_should_return_the_torrent_metrics_when_there_is_a_seeder() { let in_memory_torrent_repository = Arc::new(InMemoryTorrentRepository::default()); - let _number_of_downloads_increased = in_memory_torrent_repository.upsert_peer(&sample_info_hash(), &seeder()); + let _number_of_downloads_increased = + in_memory_torrent_repository.upsert_peer(&sample_info_hash(), &seeder(), None); let torrent_metrics = in_memory_torrent_repository.get_torrents_metrics(); @@ -766,7 +780,7 @@ mod tests { let in_memory_torrent_repository = Arc::new(InMemoryTorrentRepository::default()); let _number_of_downloads_increased = - in_memory_torrent_repository.upsert_peer(&sample_info_hash(), &complete_peer()); + in_memory_torrent_repository.upsert_peer(&sample_info_hash(), &complete_peer(), None); let torrent_metrics = in_memory_torrent_repository.get_torrents_metrics(); @@ -788,7 +802,7 @@ mod tests { let start_time = std::time::Instant::now(); for i in 0..1_000_000 { let _number_of_downloads_increased = - in_memory_torrent_repository.upsert_peer(&gen_seeded_infohash(&i), &leecher()); + in_memory_torrent_repository.upsert_peer(&gen_seeded_infohash(&i), &leecher(), None); } let result_a = start_time.elapsed(); @@ -824,7 +838,7 @@ mod tests { let infohash = sample_info_hash(); - let _number_of_downloads_increased = in_memory_torrent_repository.upsert_peer(&infohash, &leecher()); + let _number_of_downloads_increased = in_memory_torrent_repository.upsert_peer(&infohash, &leecher(), None); let swarm_metadata = in_memory_torrent_repository.get_swarm_metadata(&infohash); diff --git a/packages/tracker-core/src/torrent/services.rs b/packages/tracker-core/src/torrent/services.rs index 1d06b2945..88af3b570 100644 --- a/packages/tracker-core/src/torrent/services.rs +++ b/packages/tracker-core/src/torrent/services.rs @@ -231,7 +231,7 @@ mod tests { let hash = "9e0217d0fa71c87332cd8bf9dbeabcb2c2cf3c4d".to_owned(); // DevSkim: ignore DS173237 let info_hash = InfoHash::from_str(&hash).unwrap(); - let _number_of_downloads_increased = in_memory_torrent_repository.upsert_peer(&info_hash, &sample_peer()); + let _number_of_downloads_increased = in_memory_torrent_repository.upsert_peer(&info_hash, &sample_peer(), None); let torrent_info = get_torrent_info(&in_memory_torrent_repository, &info_hash).unwrap(); @@ -275,7 +275,7 @@ mod tests { let hash = "9e0217d0fa71c87332cd8bf9dbeabcb2c2cf3c4d".to_owned(); // DevSkim: ignore DS173237 let info_hash = InfoHash::from_str(&hash).unwrap(); - let _number_of_downloads_increased = in_memory_torrent_repository.upsert_peer(&info_hash, &sample_peer()); + let _number_of_downloads_increased = in_memory_torrent_repository.upsert_peer(&info_hash, &sample_peer(), None); let torrents = get_torrents_page(&in_memory_torrent_repository, Some(&Pagination::default())); @@ -300,8 +300,8 @@ mod tests { let hash2 = "03840548643af2a7b63a9f5cbca348bc7150ca3a".to_owned(); // DevSkim: ignore DS173237 let info_hash2 = InfoHash::from_str(&hash2).unwrap(); - let _number_of_downloads_increased = in_memory_torrent_repository.upsert_peer(&info_hash1, &sample_peer()); - let _number_of_downloads_increased = in_memory_torrent_repository.upsert_peer(&info_hash2, &sample_peer()); + let _number_of_downloads_increased = in_memory_torrent_repository.upsert_peer(&info_hash1, &sample_peer(), None); + let _number_of_downloads_increased = in_memory_torrent_repository.upsert_peer(&info_hash2, &sample_peer(), None); let offset = 0; let limit = 1; @@ -321,8 +321,8 @@ mod tests { let hash2 = "03840548643af2a7b63a9f5cbca348bc7150ca3a".to_owned(); // DevSkim: ignore DS173237 let info_hash2 = InfoHash::from_str(&hash2).unwrap(); - let _number_of_downloads_increased = in_memory_torrent_repository.upsert_peer(&info_hash1, &sample_peer()); - let _number_of_downloads_increased = in_memory_torrent_repository.upsert_peer(&info_hash2, &sample_peer()); + let _number_of_downloads_increased = in_memory_torrent_repository.upsert_peer(&info_hash1, &sample_peer(), None); + let _number_of_downloads_increased = in_memory_torrent_repository.upsert_peer(&info_hash2, &sample_peer(), None); let offset = 1; let limit = 4000; @@ -347,11 +347,11 @@ mod tests { let hash1 = "9e0217d0fa71c87332cd8bf9dbeabcb2c2cf3c4d".to_owned(); // DevSkim: ignore DS173237 let info_hash1 = InfoHash::from_str(&hash1).unwrap(); - let _number_of_downloads_increased = in_memory_torrent_repository.upsert_peer(&info_hash1, &sample_peer()); + let _number_of_downloads_increased = in_memory_torrent_repository.upsert_peer(&info_hash1, &sample_peer(), None); let hash2 = "03840548643af2a7b63a9f5cbca348bc7150ca3a".to_owned(); // DevSkim: ignore DS173237 let info_hash2 = InfoHash::from_str(&hash2).unwrap(); - let _number_of_downloads_increased = in_memory_torrent_repository.upsert_peer(&info_hash2, &sample_peer()); + let _number_of_downloads_increased = in_memory_torrent_repository.upsert_peer(&info_hash2, &sample_peer(), None); let torrents = get_torrents_page(&in_memory_torrent_repository, Some(&Pagination::default())); @@ -399,7 +399,7 @@ mod tests { let info_hash = sample_info_hash(); - let _ = in_memory_torrent_repository.upsert_peer(&info_hash, &sample_peer()); + let _ = in_memory_torrent_repository.upsert_peer(&info_hash, &sample_peer(), None); let torrent_info = get_torrents(&in_memory_torrent_repository, &[info_hash]); diff --git a/packages/udp-tracker-server/src/environment.rs b/packages/udp-tracker-server/src/environment.rs index a04773134..158e39a7e 100644 --- a/packages/udp-tracker-server/src/environment.rs +++ b/packages/udp-tracker-server/src/environment.rs @@ -35,7 +35,7 @@ where .container .tracker_core_container .in_memory_torrent_repository - .upsert_peer(info_hash, peer); + .upsert_peer(info_hash, peer, None); } } diff --git a/packages/udp-tracker-server/src/handlers/announce.rs b/packages/udp-tracker-server/src/handlers/announce.rs index c30101678..e56e1d831 100644 --- a/packages/udp-tracker-server/src/handlers/announce.rs +++ b/packages/udp-tracker-server/src/handlers/announce.rs @@ -367,7 +367,7 @@ mod tests { .into(); let _number_of_downloads_increased = - in_memory_torrent_repository.upsert_peer(&info_hash.0.into(), &peer_using_ipv6); + in_memory_torrent_repository.upsert_peer(&info_hash.0.into(), &peer_using_ipv6, None); } async fn announce_a_new_peer_using_ipv4( @@ -679,7 +679,7 @@ mod tests { .into(); let _number_of_downloads_increased = - in_memory_torrent_repository.upsert_peer(&info_hash.0.into(), &peer_using_ipv4); + in_memory_torrent_repository.upsert_peer(&info_hash.0.into(), &peer_using_ipv4, None); } async fn announce_a_new_peer_using_ipv6( diff --git a/packages/udp-tracker-server/src/handlers/scrape.rs b/packages/udp-tracker-server/src/handlers/scrape.rs index fb17ecc97..c385718a2 100644 --- a/packages/udp-tracker-server/src/handlers/scrape.rs +++ b/packages/udp-tracker-server/src/handlers/scrape.rs @@ -166,7 +166,7 @@ mod tests { .with_number_of_bytes_left(0) .into(); - let _number_of_downloads_increased = in_memory_torrent_repository.upsert_peer(&info_hash.0.into(), &peer); + let _number_of_downloads_increased = in_memory_torrent_repository.upsert_peer(&info_hash.0.into(), &peer, None); } fn build_scrape_request(remote_addr: &SocketAddr, info_hash: &InfoHash) -> ScrapeRequest { From 3c9e72f0410c66b85f955e1d9ee1bdc5fdac0adc Mon Sep 17 00:00:00 2001 From: Jose Celano Date: Wed, 5 Mar 2025 16:14:05 +0000 Subject: [PATCH 375/802] chore(deps): update dependencies ```output cargo update Updating crates.io index Locking 4 packages to latest compatible versions Updating bytes v1.10.0 -> v1.10.1 Updating time v0.3.37 -> v0.3.38 Updating time-core v0.1.2 -> v0.1.3 Updating time-macros v0.2.19 -> v0.2.20 ``` --- Cargo.lock | 16 ++++++++-------- 1 file changed, 8 insertions(+), 8 deletions(-) diff --git a/Cargo.lock b/Cargo.lock index 4c7524f49..1a6a09244 100644 --- a/Cargo.lock +++ b/Cargo.lock @@ -895,9 +895,9 @@ checksum = "1fd0f2584146f6f2ef48085050886acf353beff7305ebd1ae69500e27c67f64b" [[package]] name = "bytes" -version = "1.10.0" +version = "1.10.1" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "f61dac84819c6588b558454b194026eb1f09c293b9036ae9b159e74e73ab6cf9" +checksum = "d71b6127be86fdcfddb610f7182ac57211d4b18a3e9c82eb2d17662f2227ad6a" [[package]] name = "camino" @@ -4150,9 +4150,9 @@ dependencies = [ [[package]] name = "time" -version = "0.3.37" +version = "0.3.38" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "35e7868883861bd0e56d9ac6efcaaca0d6d5d82a2a7ec8209ff492c07cf37b21" +checksum = "bb041120f25f8fbe8fd2dbe4671c7c2ed74d83be2e7a77529bf7e0790ae3f472" dependencies = [ "deranged", "itoa", @@ -4165,15 +4165,15 @@ dependencies = [ [[package]] name = "time-core" -version = "0.1.2" +version = "0.1.3" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "ef927ca75afb808a4d64dd374f00a2adf8d0fcff8e7b184af886c3c87ec4a3f3" +checksum = "765c97a5b985b7c11d7bc27fa927dc4fe6af3a6dfb021d28deb60d3bf51e76ef" [[package]] name = "time-macros" -version = "0.2.19" +version = "0.2.20" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "2834e6017e3e5e4b9834939793b282bc03b37a3336245fa820e35e233e2a85de" +checksum = "e8093bc3e81c3bc5f7879de09619d06c9a5a5e45ca44dfeeb7225bae38005c5c" dependencies = [ "num-conv", "time-core", From 99a372435eb2fb4284cba41ce892dcb395252749 Mon Sep 17 00:00:00 2001 From: Jose Celano Date: Wed, 5 Mar 2025 17:51:52 +0000 Subject: [PATCH 376/802] docs: add link to packages docs in the README --- README.md | 2 ++ 1 file changed, 2 insertions(+) diff --git a/README.md b/README.md index 671a484e6..b7431e859 100644 --- a/README.md +++ b/README.md @@ -69,6 +69,8 @@ Others: ![Torrust Tracker Layers with main packages](./docs/media/packages/torrust-tracker-layers-with-packages.png) +There is also extra [documentation about the packages](./docs/packages.md). + ## Getting Started ### Container Version From 2020162422f21143aabb0826a409b908cd02b82f Mon Sep 17 00:00:00 2001 From: Jose Celano Date: Fri, 7 Mar 2025 08:36:11 +0000 Subject: [PATCH 377/802] chore: [#1243] minor changes in comments and format --- packages/primitives/src/swarm_metadata.rs | 15 ++++++++++----- packages/primitives/src/torrent_metrics.rs | 10 +++++++--- 2 files changed, 17 insertions(+), 8 deletions(-) diff --git a/packages/primitives/src/swarm_metadata.rs b/packages/primitives/src/swarm_metadata.rs index ca880b54d..68d354e21 100644 --- a/packages/primitives/src/swarm_metadata.rs +++ b/packages/primitives/src/swarm_metadata.rs @@ -6,11 +6,16 @@ use derive_more::Constructor; /// See [BEP 48: Tracker Protocol Extension: Scrape](https://www.bittorrent.org/beps/bep_0048.html) #[derive(Copy, Clone, Debug, PartialEq, Default, Constructor)] pub struct SwarmMetadata { - /// (i.e `completed`): The number of peers that have ever completed downloading - pub downloaded: u32, // - /// (i.e `seeders`): The number of active peers that have completed downloading (seeders) - pub complete: u32, //seeders - /// (i.e `leechers`): The number of active peers that have not completed downloading (leechers) + /// (i.e `completed`): The number of peers that have ever completed + /// downloading a given torrent. + pub downloaded: u32, + + /// (i.e `seeders`): The number of active peers that have completed + /// downloading (seeders) a given torrent. + pub complete: u32, + + /// (i.e `leechers`): The number of active peers that have not completed + /// downloading (leechers) a given torrent. pub incomplete: u32, } diff --git a/packages/primitives/src/torrent_metrics.rs b/packages/primitives/src/torrent_metrics.rs index 02de02954..51c96a3ee 100644 --- a/packages/primitives/src/torrent_metrics.rs +++ b/packages/primitives/src/torrent_metrics.rs @@ -5,12 +5,16 @@ use std::ops::AddAssign; /// Metrics are aggregate values for all torrents. #[derive(Copy, Clone, Debug, PartialEq, Default)] pub struct TorrentsMetrics { - /// Total number of seeders for all torrents - pub complete: u64, - /// Total number of peers that have ever completed downloading for all torrents. + /// Total number of peers that have ever completed downloading for all + /// torrents. pub downloaded: u64, + + /// Total number of seeders for all torrents. + pub complete: u64, + /// Total number of leechers for all torrents. pub incomplete: u64, + /// Total number of torrents. pub torrents: u64, } From 144a338b9cfc4b82a0cc19be54e906684d20e8df Mon Sep 17 00:00:00 2001 From: Jose Celano Date: Fri, 7 Mar 2025 08:55:40 +0000 Subject: [PATCH 378/802] refactor: [#1243] move and rename struct and fields (AggregateSwarmMetadata) To avoid confusion with `SwarmMetadata` - `SwarmMetadata`: metrics for one torrent. - `AggregateSwarmMetadata`: metrics for all torrents. --- .../src/v1/context/stats/resources.rs | 20 ++--- .../src/v1/context/stats/responses.rs | 8 +- .../src/statistics/services.rs | 8 +- packages/primitives/src/lib.rs | 1 - packages/primitives/src/swarm_metadata.rs | 37 +++++++- packages/primitives/src/torrent_metrics.rs | 29 ------ .../src/statistics/services.rs | 8 +- .../src/repository/dash_map_mutex_std.rs | 15 ++-- .../torrent-repository/src/repository/mod.rs | 7 +- .../src/repository/rw_lock_std.rs | 15 ++-- .../src/repository/rw_lock_std_mutex_std.rs | 15 ++-- .../src/repository/rw_lock_std_mutex_tokio.rs | 15 ++-- .../src/repository/rw_lock_tokio.rs | 15 ++-- .../src/repository/rw_lock_tokio_mutex_std.rs | 15 ++-- .../repository/rw_lock_tokio_mutex_tokio.rs | 15 ++-- .../src/repository/skip_map_mutex_std.rs | 39 ++++---- .../torrent-repository/tests/common/repo.rs | 5 +- .../tests/repository/mod.rs | 20 ++--- .../src/torrent/repository/in_memory.rs | 89 +++++++++---------- .../src/statistics/services.rs | 8 +- .../src/statistics/services.rs | 8 +- 21 files changed, 191 insertions(+), 201 deletions(-) delete mode 100644 packages/primitives/src/torrent_metrics.rs diff --git a/packages/axum-rest-tracker-api-server/src/v1/context/stats/resources.rs b/packages/axum-rest-tracker-api-server/src/v1/context/stats/resources.rs index 9a82593c7..9ed61cc6b 100644 --- a/packages/axum-rest-tracker-api-server/src/v1/context/stats/resources.rs +++ b/packages/axum-rest-tracker-api-server/src/v1/context/stats/resources.rs @@ -79,10 +79,10 @@ pub struct Stats { impl From for Stats { fn from(metrics: TrackerMetrics) -> Self { Self { - torrents: metrics.torrents_metrics.torrents, - seeders: metrics.torrents_metrics.complete, - completed: metrics.torrents_metrics.downloaded, - leechers: metrics.torrents_metrics.incomplete, + torrents: metrics.torrents_metrics.total_torrents, + seeders: metrics.torrents_metrics.total_complete, + completed: metrics.torrents_metrics.total_downloaded, + leechers: metrics.torrents_metrics.total_incomplete, // TCP tcp4_connections_handled: metrics.protocol_metrics.tcp4_connections_handled, tcp4_announces_handled: metrics.protocol_metrics.tcp4_announces_handled, @@ -119,7 +119,7 @@ impl From for Stats { mod tests { use torrust_rest_tracker_api_core::statistics::metrics::Metrics; use torrust_rest_tracker_api_core::statistics::services::TrackerMetrics; - use torrust_tracker_primitives::torrent_metrics::TorrentsMetrics; + use torrust_tracker_primitives::swarm_metadata::AggregateSwarmMetadata; use super::Stats; @@ -127,11 +127,11 @@ mod tests { fn stats_resource_should_be_converted_from_tracker_metrics() { assert_eq!( Stats::from(TrackerMetrics { - torrents_metrics: TorrentsMetrics { - complete: 1, - downloaded: 2, - incomplete: 3, - torrents: 4 + torrents_metrics: AggregateSwarmMetadata { + total_complete: 1, + total_downloaded: 2, + total_incomplete: 3, + total_torrents: 4 }, protocol_metrics: Metrics { // TCP diff --git a/packages/axum-rest-tracker-api-server/src/v1/context/stats/responses.rs b/packages/axum-rest-tracker-api-server/src/v1/context/stats/responses.rs index 61455178c..6d279726c 100644 --- a/packages/axum-rest-tracker-api-server/src/v1/context/stats/responses.rs +++ b/packages/axum-rest-tracker-api-server/src/v1/context/stats/responses.rs @@ -16,10 +16,10 @@ pub fn stats_response(tracker_metrics: TrackerMetrics) -> Response { pub fn metrics_response(tracker_metrics: &TrackerMetrics) -> Response { let mut lines = vec![]; - lines.push(format!("torrents {}", tracker_metrics.torrents_metrics.torrents)); - lines.push(format!("seeders {}", tracker_metrics.torrents_metrics.complete)); - lines.push(format!("completed {}", tracker_metrics.torrents_metrics.downloaded)); - lines.push(format!("leechers {}", tracker_metrics.torrents_metrics.incomplete)); + lines.push(format!("torrents {}", tracker_metrics.torrents_metrics.total_torrents)); + lines.push(format!("seeders {}", tracker_metrics.torrents_metrics.total_complete)); + lines.push(format!("completed {}", tracker_metrics.torrents_metrics.total_downloaded)); + lines.push(format!("leechers {}", tracker_metrics.torrents_metrics.total_incomplete)); // TCP diff --git a/packages/http-tracker-core/src/statistics/services.rs b/packages/http-tracker-core/src/statistics/services.rs index 57806677e..f7808440a 100644 --- a/packages/http-tracker-core/src/statistics/services.rs +++ b/packages/http-tracker-core/src/statistics/services.rs @@ -23,7 +23,7 @@ use std::sync::Arc; use bittorrent_tracker_core::torrent::repository::in_memory::InMemoryTorrentRepository; -use torrust_tracker_primitives::torrent_metrics::TorrentsMetrics; +use torrust_tracker_primitives::swarm_metadata::AggregateSwarmMetadata; use crate::statistics::metrics::Metrics; use crate::statistics::repository::Repository; @@ -34,7 +34,7 @@ pub struct TrackerMetrics { /// Domain level metrics. /// /// General metrics for all torrents (number of seeders, leechers, etcetera) - pub torrents_metrics: TorrentsMetrics, + pub torrents_metrics: AggregateSwarmMetadata, /// Application level metrics. Usage statistics/metrics. /// @@ -72,7 +72,7 @@ mod tests { use bittorrent_tracker_core::torrent::repository::in_memory::InMemoryTorrentRepository; use bittorrent_tracker_core::{self}; use torrust_tracker_configuration::Configuration; - use torrust_tracker_primitives::torrent_metrics::TorrentsMetrics; + use torrust_tracker_primitives::swarm_metadata::AggregateSwarmMetadata; use torrust_tracker_test_helpers::configuration; use crate::statistics; @@ -96,7 +96,7 @@ mod tests { assert_eq!( tracker_metrics, TrackerMetrics { - torrents_metrics: TorrentsMetrics::default(), + torrents_metrics: AggregateSwarmMetadata::default(), protocol_metrics: statistics::metrics::Metrics::default(), } ); diff --git a/packages/primitives/src/lib.rs b/packages/primitives/src/lib.rs index ec9732778..b50516893 100644 --- a/packages/primitives/src/lib.rs +++ b/packages/primitives/src/lib.rs @@ -8,7 +8,6 @@ pub mod core; pub mod pagination; pub mod peer; pub mod swarm_metadata; -pub mod torrent_metrics; use std::collections::BTreeMap; use std::time::Duration; diff --git a/packages/primitives/src/swarm_metadata.rs b/packages/primitives/src/swarm_metadata.rs index 68d354e21..792eff632 100644 --- a/packages/primitives/src/swarm_metadata.rs +++ b/packages/primitives/src/swarm_metadata.rs @@ -1,20 +1,23 @@ +use std::ops::AddAssign; + use derive_more::Constructor; /// Swarm statistics for one torrent. +/// /// Swarm metadata dictionary in the scrape response. /// /// See [BEP 48: Tracker Protocol Extension: Scrape](https://www.bittorrent.org/beps/bep_0048.html) #[derive(Copy, Clone, Debug, PartialEq, Default, Constructor)] pub struct SwarmMetadata { - /// (i.e `completed`): The number of peers that have ever completed + /// (i.e `completed`): The number of peers that have ever completed /// downloading a given torrent. pub downloaded: u32, - /// (i.e `seeders`): The number of active peers that have completed + /// (i.e `seeders`): The number of active peers that have completed /// downloading (seeders) a given torrent. pub complete: u32, - /// (i.e `leechers`): The number of active peers that have not completed + /// (i.e `leechers`): The number of active peers that have not completed /// downloading (leechers) a given torrent. pub incomplete: u32, } @@ -25,3 +28,31 @@ impl SwarmMetadata { Self::default() } } + +/// Structure that holds aggregate swarm metadata. +/// +/// Metrics are aggregate values for all torrents. +#[derive(Copy, Clone, Debug, PartialEq, Default)] +pub struct AggregateSwarmMetadata { + /// Total number of peers that have ever completed downloading for all + /// torrents. + pub total_downloaded: u64, + + /// Total number of seeders for all torrents. + pub total_complete: u64, + + /// Total number of leechers for all torrents. + pub total_incomplete: u64, + + /// Total number of torrents. + pub total_torrents: u64, +} + +impl AddAssign for AggregateSwarmMetadata { + fn add_assign(&mut self, rhs: Self) { + self.total_complete += rhs.total_complete; + self.total_downloaded += rhs.total_downloaded; + self.total_incomplete += rhs.total_incomplete; + self.total_torrents += rhs.total_torrents; + } +} diff --git a/packages/primitives/src/torrent_metrics.rs b/packages/primitives/src/torrent_metrics.rs deleted file mode 100644 index 51c96a3ee..000000000 --- a/packages/primitives/src/torrent_metrics.rs +++ /dev/null @@ -1,29 +0,0 @@ -use std::ops::AddAssign; - -/// Structure that holds general `Tracker` torrents metrics. -/// -/// Metrics are aggregate values for all torrents. -#[derive(Copy, Clone, Debug, PartialEq, Default)] -pub struct TorrentsMetrics { - /// Total number of peers that have ever completed downloading for all - /// torrents. - pub downloaded: u64, - - /// Total number of seeders for all torrents. - pub complete: u64, - - /// Total number of leechers for all torrents. - pub incomplete: u64, - - /// Total number of torrents. - pub torrents: u64, -} - -impl AddAssign for TorrentsMetrics { - fn add_assign(&mut self, rhs: Self) { - self.complete += rhs.complete; - self.downloaded += rhs.downloaded; - self.incomplete += rhs.incomplete; - self.torrents += rhs.torrents; - } -} diff --git a/packages/rest-tracker-api-core/src/statistics/services.rs b/packages/rest-tracker-api-core/src/statistics/services.rs index c4dfcf533..ea4e159b6 100644 --- a/packages/rest-tracker-api-core/src/statistics/services.rs +++ b/packages/rest-tracker-api-core/src/statistics/services.rs @@ -4,7 +4,7 @@ use bittorrent_tracker_core::torrent::repository::in_memory::InMemoryTorrentRepo use bittorrent_udp_tracker_core::services::banning::BanService; use bittorrent_udp_tracker_core::{self, statistics as udp_core_statistics}; use tokio::sync::RwLock; -use torrust_tracker_primitives::torrent_metrics::TorrentsMetrics; +use torrust_tracker_primitives::swarm_metadata::AggregateSwarmMetadata; use torrust_udp_tracker_server::statistics as udp_server_statistics; use crate::statistics::metrics::Metrics; @@ -15,7 +15,7 @@ pub struct TrackerMetrics { /// Domain level metrics. /// /// General metrics for all torrents (number of seeders, leechers, etcetera) - pub torrents_metrics: TorrentsMetrics, + pub torrents_metrics: AggregateSwarmMetadata, /// Application level metrics. Usage statistics/metrics. /// @@ -83,7 +83,7 @@ mod tests { use bittorrent_udp_tracker_core::MAX_CONNECTION_ID_ERRORS_PER_IP; use tokio::sync::RwLock; use torrust_tracker_configuration::Configuration; - use torrust_tracker_primitives::torrent_metrics::TorrentsMetrics; + use torrust_tracker_primitives::swarm_metadata::AggregateSwarmMetadata; use torrust_tracker_test_helpers::configuration; use crate::statistics::metrics::Metrics; @@ -127,7 +127,7 @@ mod tests { assert_eq!( tracker_metrics, TrackerMetrics { - torrents_metrics: TorrentsMetrics::default(), + torrents_metrics: AggregateSwarmMetadata::default(), protocol_metrics: Metrics::default(), } ); diff --git a/packages/torrent-repository/src/repository/dash_map_mutex_std.rs b/packages/torrent-repository/src/repository/dash_map_mutex_std.rs index 9e2b5cc59..d4a84caa0 100644 --- a/packages/torrent-repository/src/repository/dash_map_mutex_std.rs +++ b/packages/torrent-repository/src/repository/dash_map_mutex_std.rs @@ -4,8 +4,7 @@ use bittorrent_primitives::info_hash::InfoHash; use dashmap::DashMap; use torrust_tracker_configuration::TrackerPolicy; use torrust_tracker_primitives::pagination::Pagination; -use torrust_tracker_primitives::swarm_metadata::SwarmMetadata; -use torrust_tracker_primitives::torrent_metrics::TorrentsMetrics; +use torrust_tracker_primitives::swarm_metadata::{AggregateSwarmMetadata, SwarmMetadata}; use torrust_tracker_primitives::{peer, DurationSinceUnixEpoch, PersistentTorrent, PersistentTorrents}; use super::Repository; @@ -47,15 +46,15 @@ where maybe_entry.map(|entry| entry.clone()) } - fn get_metrics(&self) -> TorrentsMetrics { - let mut metrics = TorrentsMetrics::default(); + fn get_metrics(&self) -> AggregateSwarmMetadata { + let mut metrics = AggregateSwarmMetadata::default(); for entry in &self.torrents { let stats = entry.value().lock().expect("it should get a lock").get_swarm_metadata(); - metrics.complete += u64::from(stats.complete); - metrics.downloaded += u64::from(stats.downloaded); - metrics.incomplete += u64::from(stats.incomplete); - metrics.torrents += 1; + metrics.total_complete += u64::from(stats.complete); + metrics.total_downloaded += u64::from(stats.downloaded); + metrics.total_incomplete += u64::from(stats.incomplete); + metrics.total_torrents += 1; } metrics diff --git a/packages/torrent-repository/src/repository/mod.rs b/packages/torrent-repository/src/repository/mod.rs index 16ebdf3c1..9284ff6e6 100644 --- a/packages/torrent-repository/src/repository/mod.rs +++ b/packages/torrent-repository/src/repository/mod.rs @@ -1,8 +1,7 @@ use bittorrent_primitives::info_hash::InfoHash; use torrust_tracker_configuration::TrackerPolicy; use torrust_tracker_primitives::pagination::Pagination; -use torrust_tracker_primitives::swarm_metadata::SwarmMetadata; -use torrust_tracker_primitives::torrent_metrics::TorrentsMetrics; +use torrust_tracker_primitives::swarm_metadata::{AggregateSwarmMetadata, SwarmMetadata}; use torrust_tracker_primitives::{peer, DurationSinceUnixEpoch, PersistentTorrent, PersistentTorrents}; pub mod dash_map_mutex_std; @@ -18,7 +17,7 @@ use std::fmt::Debug; pub trait Repository: Debug + Default + Sized + 'static { fn get(&self, key: &InfoHash) -> Option; - fn get_metrics(&self) -> TorrentsMetrics; + fn get_metrics(&self) -> AggregateSwarmMetadata; fn get_paginated(&self, pagination: Option<&Pagination>) -> Vec<(InfoHash, T)>; fn import_persistent(&self, persistent_torrents: &PersistentTorrents); fn remove(&self, key: &InfoHash) -> Option; @@ -31,7 +30,7 @@ pub trait Repository: Debug + Default + Sized + 'static { #[allow(clippy::module_name_repetitions)] pub trait RepositoryAsync: Debug + Default + Sized + 'static { fn get(&self, key: &InfoHash) -> impl std::future::Future> + Send; - fn get_metrics(&self) -> impl std::future::Future + Send; + fn get_metrics(&self) -> impl std::future::Future + Send; fn get_paginated(&self, pagination: Option<&Pagination>) -> impl std::future::Future> + Send; fn import_persistent(&self, persistent_torrents: &PersistentTorrents) -> impl std::future::Future + Send; fn remove(&self, key: &InfoHash) -> impl std::future::Future> + Send; diff --git a/packages/torrent-repository/src/repository/rw_lock_std.rs b/packages/torrent-repository/src/repository/rw_lock_std.rs index 7038b0b38..d190718af 100644 --- a/packages/torrent-repository/src/repository/rw_lock_std.rs +++ b/packages/torrent-repository/src/repository/rw_lock_std.rs @@ -1,8 +1,7 @@ use bittorrent_primitives::info_hash::InfoHash; use torrust_tracker_configuration::TrackerPolicy; use torrust_tracker_primitives::pagination::Pagination; -use torrust_tracker_primitives::swarm_metadata::SwarmMetadata; -use torrust_tracker_primitives::torrent_metrics::TorrentsMetrics; +use torrust_tracker_primitives::swarm_metadata::{AggregateSwarmMetadata, SwarmMetadata}; use torrust_tracker_primitives::{peer, DurationSinceUnixEpoch, PersistentTorrent, PersistentTorrents}; use super::Repository; @@ -65,15 +64,15 @@ where db.get(key).cloned() } - fn get_metrics(&self) -> TorrentsMetrics { - let mut metrics = TorrentsMetrics::default(); + fn get_metrics(&self) -> AggregateSwarmMetadata { + let mut metrics = AggregateSwarmMetadata::default(); for entry in self.get_torrents().values() { let stats = entry.get_swarm_metadata(); - metrics.complete += u64::from(stats.complete); - metrics.downloaded += u64::from(stats.downloaded); - metrics.incomplete += u64::from(stats.incomplete); - metrics.torrents += 1; + metrics.total_complete += u64::from(stats.complete); + metrics.total_downloaded += u64::from(stats.downloaded); + metrics.total_incomplete += u64::from(stats.incomplete); + metrics.total_torrents += 1; } metrics diff --git a/packages/torrent-repository/src/repository/rw_lock_std_mutex_std.rs b/packages/torrent-repository/src/repository/rw_lock_std_mutex_std.rs index a9958bd7c..1764b94e8 100644 --- a/packages/torrent-repository/src/repository/rw_lock_std_mutex_std.rs +++ b/packages/torrent-repository/src/repository/rw_lock_std_mutex_std.rs @@ -3,8 +3,7 @@ use std::sync::Arc; use bittorrent_primitives::info_hash::InfoHash; use torrust_tracker_configuration::TrackerPolicy; use torrust_tracker_primitives::pagination::Pagination; -use torrust_tracker_primitives::swarm_metadata::SwarmMetadata; -use torrust_tracker_primitives::torrent_metrics::TorrentsMetrics; +use torrust_tracker_primitives::swarm_metadata::{AggregateSwarmMetadata, SwarmMetadata}; use torrust_tracker_primitives::{peer, DurationSinceUnixEpoch, PersistentTorrent, PersistentTorrents}; use super::Repository; @@ -60,15 +59,15 @@ where db.get(key).cloned() } - fn get_metrics(&self) -> TorrentsMetrics { - let mut metrics = TorrentsMetrics::default(); + fn get_metrics(&self) -> AggregateSwarmMetadata { + let mut metrics = AggregateSwarmMetadata::default(); for entry in self.get_torrents().values() { let stats = entry.lock().expect("it should get a lock").get_swarm_metadata(); - metrics.complete += u64::from(stats.complete); - metrics.downloaded += u64::from(stats.downloaded); - metrics.incomplete += u64::from(stats.incomplete); - metrics.torrents += 1; + metrics.total_complete += u64::from(stats.complete); + metrics.total_downloaded += u64::from(stats.downloaded); + metrics.total_incomplete += u64::from(stats.incomplete); + metrics.total_torrents += 1; } metrics diff --git a/packages/torrent-repository/src/repository/rw_lock_std_mutex_tokio.rs b/packages/torrent-repository/src/repository/rw_lock_std_mutex_tokio.rs index deba42b67..116c1ff87 100644 --- a/packages/torrent-repository/src/repository/rw_lock_std_mutex_tokio.rs +++ b/packages/torrent-repository/src/repository/rw_lock_std_mutex_tokio.rs @@ -7,8 +7,7 @@ use futures::future::join_all; use futures::{Future, FutureExt}; use torrust_tracker_configuration::TrackerPolicy; use torrust_tracker_primitives::pagination::Pagination; -use torrust_tracker_primitives::swarm_metadata::SwarmMetadata; -use torrust_tracker_primitives::torrent_metrics::TorrentsMetrics; +use torrust_tracker_primitives::swarm_metadata::{AggregateSwarmMetadata, SwarmMetadata}; use torrust_tracker_primitives::{peer, DurationSinceUnixEpoch, PersistentTorrent, PersistentTorrents}; use super::RepositoryAsync; @@ -86,17 +85,17 @@ where } } - async fn get_metrics(&self) -> TorrentsMetrics { - let mut metrics = TorrentsMetrics::default(); + async fn get_metrics(&self) -> AggregateSwarmMetadata { + let mut metrics = AggregateSwarmMetadata::default(); let entries: Vec<_> = self.get_torrents().values().cloned().collect(); for entry in entries { let stats = entry.lock().await.get_swarm_metadata(); - metrics.complete += u64::from(stats.complete); - metrics.downloaded += u64::from(stats.downloaded); - metrics.incomplete += u64::from(stats.incomplete); - metrics.torrents += 1; + metrics.total_complete += u64::from(stats.complete); + metrics.total_downloaded += u64::from(stats.downloaded); + metrics.total_incomplete += u64::from(stats.incomplete); + metrics.total_torrents += 1; } metrics diff --git a/packages/torrent-repository/src/repository/rw_lock_tokio.rs b/packages/torrent-repository/src/repository/rw_lock_tokio.rs index bbda42f17..53838023d 100644 --- a/packages/torrent-repository/src/repository/rw_lock_tokio.rs +++ b/packages/torrent-repository/src/repository/rw_lock_tokio.rs @@ -1,8 +1,7 @@ use bittorrent_primitives::info_hash::InfoHash; use torrust_tracker_configuration::TrackerPolicy; use torrust_tracker_primitives::pagination::Pagination; -use torrust_tracker_primitives::swarm_metadata::SwarmMetadata; -use torrust_tracker_primitives::torrent_metrics::TorrentsMetrics; +use torrust_tracker_primitives::swarm_metadata::{AggregateSwarmMetadata, SwarmMetadata}; use torrust_tracker_primitives::{peer, DurationSinceUnixEpoch, PersistentTorrent, PersistentTorrents}; use super::RepositoryAsync; @@ -85,15 +84,15 @@ where } } - async fn get_metrics(&self) -> TorrentsMetrics { - let mut metrics = TorrentsMetrics::default(); + async fn get_metrics(&self) -> AggregateSwarmMetadata { + let mut metrics = AggregateSwarmMetadata::default(); for entry in self.get_torrents().await.values() { let stats = entry.get_swarm_metadata(); - metrics.complete += u64::from(stats.complete); - metrics.downloaded += u64::from(stats.downloaded); - metrics.incomplete += u64::from(stats.incomplete); - metrics.torrents += 1; + metrics.total_complete += u64::from(stats.complete); + metrics.total_downloaded += u64::from(stats.downloaded); + metrics.total_incomplete += u64::from(stats.incomplete); + metrics.total_torrents += 1; } metrics diff --git a/packages/torrent-repository/src/repository/rw_lock_tokio_mutex_std.rs b/packages/torrent-repository/src/repository/rw_lock_tokio_mutex_std.rs index 551c1c5ec..eb7e300fd 100644 --- a/packages/torrent-repository/src/repository/rw_lock_tokio_mutex_std.rs +++ b/packages/torrent-repository/src/repository/rw_lock_tokio_mutex_std.rs @@ -3,8 +3,7 @@ use std::sync::Arc; use bittorrent_primitives::info_hash::InfoHash; use torrust_tracker_configuration::TrackerPolicy; use torrust_tracker_primitives::pagination::Pagination; -use torrust_tracker_primitives::swarm_metadata::SwarmMetadata; -use torrust_tracker_primitives::torrent_metrics::TorrentsMetrics; +use torrust_tracker_primitives::swarm_metadata::{AggregateSwarmMetadata, SwarmMetadata}; use torrust_tracker_primitives::{peer, DurationSinceUnixEpoch, PersistentTorrent, PersistentTorrents}; use super::RepositoryAsync; @@ -79,15 +78,15 @@ where } } - async fn get_metrics(&self) -> TorrentsMetrics { - let mut metrics = TorrentsMetrics::default(); + async fn get_metrics(&self) -> AggregateSwarmMetadata { + let mut metrics = AggregateSwarmMetadata::default(); for entry in self.get_torrents().await.values() { let stats = entry.get_swarm_metadata(); - metrics.complete += u64::from(stats.complete); - metrics.downloaded += u64::from(stats.downloaded); - metrics.incomplete += u64::from(stats.incomplete); - metrics.torrents += 1; + metrics.total_complete += u64::from(stats.complete); + metrics.total_downloaded += u64::from(stats.downloaded); + metrics.total_incomplete += u64::from(stats.incomplete); + metrics.total_torrents += 1; } metrics diff --git a/packages/torrent-repository/src/repository/rw_lock_tokio_mutex_tokio.rs b/packages/torrent-repository/src/repository/rw_lock_tokio_mutex_tokio.rs index 3ac859ab0..c8ebaf4d6 100644 --- a/packages/torrent-repository/src/repository/rw_lock_tokio_mutex_tokio.rs +++ b/packages/torrent-repository/src/repository/rw_lock_tokio_mutex_tokio.rs @@ -3,8 +3,7 @@ use std::sync::Arc; use bittorrent_primitives::info_hash::InfoHash; use torrust_tracker_configuration::TrackerPolicy; use torrust_tracker_primitives::pagination::Pagination; -use torrust_tracker_primitives::swarm_metadata::SwarmMetadata; -use torrust_tracker_primitives::torrent_metrics::TorrentsMetrics; +use torrust_tracker_primitives::swarm_metadata::{AggregateSwarmMetadata, SwarmMetadata}; use torrust_tracker_primitives::{peer, DurationSinceUnixEpoch, PersistentTorrent, PersistentTorrents}; use super::RepositoryAsync; @@ -82,15 +81,15 @@ where } } - async fn get_metrics(&self) -> TorrentsMetrics { - let mut metrics = TorrentsMetrics::default(); + async fn get_metrics(&self) -> AggregateSwarmMetadata { + let mut metrics = AggregateSwarmMetadata::default(); for entry in self.get_torrents().await.values() { let stats = entry.get_swarm_metadata().await; - metrics.complete += u64::from(stats.complete); - metrics.downloaded += u64::from(stats.downloaded); - metrics.incomplete += u64::from(stats.incomplete); - metrics.torrents += 1; + metrics.total_complete += u64::from(stats.complete); + metrics.total_downloaded += u64::from(stats.downloaded); + metrics.total_incomplete += u64::from(stats.incomplete); + metrics.total_torrents += 1; } metrics diff --git a/packages/torrent-repository/src/repository/skip_map_mutex_std.rs b/packages/torrent-repository/src/repository/skip_map_mutex_std.rs index 2c4ff5ce7..8a15a9442 100644 --- a/packages/torrent-repository/src/repository/skip_map_mutex_std.rs +++ b/packages/torrent-repository/src/repository/skip_map_mutex_std.rs @@ -4,8 +4,7 @@ use bittorrent_primitives::info_hash::InfoHash; use crossbeam_skiplist::SkipMap; use torrust_tracker_configuration::TrackerPolicy; use torrust_tracker_primitives::pagination::Pagination; -use torrust_tracker_primitives::swarm_metadata::SwarmMetadata; -use torrust_tracker_primitives::torrent_metrics::TorrentsMetrics; +use torrust_tracker_primitives::swarm_metadata::{AggregateSwarmMetadata, SwarmMetadata}; use torrust_tracker_primitives::{peer, DurationSinceUnixEpoch, PersistentTorrent, PersistentTorrents}; use super::Repository; @@ -70,15 +69,15 @@ where maybe_entry.map(|entry| entry.value().clone()) } - fn get_metrics(&self) -> TorrentsMetrics { - let mut metrics = TorrentsMetrics::default(); + fn get_metrics(&self) -> AggregateSwarmMetadata { + let mut metrics = AggregateSwarmMetadata::default(); for entry in &self.torrents { let stats = entry.value().lock().expect("it should get a lock").get_swarm_metadata(); - metrics.complete += u64::from(stats.complete); - metrics.downloaded += u64::from(stats.downloaded); - metrics.incomplete += u64::from(stats.incomplete); - metrics.torrents += 1; + metrics.total_complete += u64::from(stats.complete); + metrics.total_downloaded += u64::from(stats.downloaded); + metrics.total_incomplete += u64::from(stats.incomplete); + metrics.total_torrents += 1; } metrics @@ -163,15 +162,15 @@ where maybe_entry.map(|entry| entry.value().clone()) } - fn get_metrics(&self) -> TorrentsMetrics { - let mut metrics = TorrentsMetrics::default(); + fn get_metrics(&self) -> AggregateSwarmMetadata { + let mut metrics = AggregateSwarmMetadata::default(); for entry in &self.torrents { let stats = entry.value().read().get_swarm_metadata(); - metrics.complete += u64::from(stats.complete); - metrics.downloaded += u64::from(stats.downloaded); - metrics.incomplete += u64::from(stats.incomplete); - metrics.torrents += 1; + metrics.total_complete += u64::from(stats.complete); + metrics.total_downloaded += u64::from(stats.downloaded); + metrics.total_incomplete += u64::from(stats.incomplete); + metrics.total_torrents += 1; } metrics @@ -256,15 +255,15 @@ where maybe_entry.map(|entry| entry.value().clone()) } - fn get_metrics(&self) -> TorrentsMetrics { - let mut metrics = TorrentsMetrics::default(); + fn get_metrics(&self) -> AggregateSwarmMetadata { + let mut metrics = AggregateSwarmMetadata::default(); for entry in &self.torrents { let stats = entry.value().lock().get_swarm_metadata(); - metrics.complete += u64::from(stats.complete); - metrics.downloaded += u64::from(stats.downloaded); - metrics.incomplete += u64::from(stats.incomplete); - metrics.torrents += 1; + metrics.total_complete += u64::from(stats.complete); + metrics.total_downloaded += u64::from(stats.downloaded); + metrics.total_incomplete += u64::from(stats.incomplete); + metrics.total_torrents += 1; } metrics diff --git a/packages/torrent-repository/tests/common/repo.rs b/packages/torrent-repository/tests/common/repo.rs index 65ce45f8e..224fc6aa3 100644 --- a/packages/torrent-repository/tests/common/repo.rs +++ b/packages/torrent-repository/tests/common/repo.rs @@ -1,8 +1,7 @@ use bittorrent_primitives::info_hash::InfoHash; use torrust_tracker_configuration::TrackerPolicy; use torrust_tracker_primitives::pagination::Pagination; -use torrust_tracker_primitives::swarm_metadata::SwarmMetadata; -use torrust_tracker_primitives::torrent_metrics::TorrentsMetrics; +use torrust_tracker_primitives::swarm_metadata::{AggregateSwarmMetadata, SwarmMetadata}; use torrust_tracker_primitives::{peer, DurationSinceUnixEpoch, PersistentTorrent, PersistentTorrents}; use torrust_tracker_torrent_repository::repository::{Repository as _, RepositoryAsync as _}; use torrust_tracker_torrent_repository::{ @@ -76,7 +75,7 @@ impl Repo { } } - pub(crate) async fn get_metrics(&self) -> TorrentsMetrics { + pub(crate) async fn get_metrics(&self) -> AggregateSwarmMetadata { match self { Repo::RwLockStd(repo) => repo.get_metrics(), Repo::RwLockStdMutexStd(repo) => repo.get_metrics(), diff --git a/packages/torrent-repository/tests/repository/mod.rs b/packages/torrent-repository/tests/repository/mod.rs index d38208e0d..77977837f 100644 --- a/packages/torrent-repository/tests/repository/mod.rs +++ b/packages/torrent-repository/tests/repository/mod.rs @@ -402,19 +402,19 @@ async fn it_should_get_metrics( repo: Repo, #[case] entries: Entries, ) { - use torrust_tracker_primitives::torrent_metrics::TorrentsMetrics; + use torrust_tracker_primitives::swarm_metadata::AggregateSwarmMetadata; make(&repo, &entries).await; - let mut metrics = TorrentsMetrics::default(); + let mut metrics = AggregateSwarmMetadata::default(); for (_, torrent) in entries { let stats = torrent.get_swarm_metadata(); - metrics.torrents += 1; - metrics.incomplete += u64::from(stats.incomplete); - metrics.complete += u64::from(stats.complete); - metrics.downloaded += u64::from(stats.downloaded); + metrics.total_torrents += 1; + metrics.total_incomplete += u64::from(stats.incomplete); + metrics.total_complete += u64::from(stats.complete); + metrics.total_downloaded += u64::from(stats.downloaded); } assert_eq!(repo.get_metrics().await, metrics); @@ -449,12 +449,12 @@ async fn it_should_import_persistent_torrents( ) { make(&repo, &entries).await; - let mut downloaded = repo.get_metrics().await.downloaded; + let mut downloaded = repo.get_metrics().await.total_downloaded; persistent_torrents.iter().for_each(|(_, d)| downloaded += u64::from(*d)); repo.import_persistent(&persistent_torrents).await; - assert_eq!(repo.get_metrics().await.downloaded, downloaded); + assert_eq!(repo.get_metrics().await.total_downloaded, downloaded); for (entry, _) in persistent_torrents { assert!(repo.get(&entry).await.is_some()); @@ -497,7 +497,7 @@ async fn it_should_remove_an_entry( assert_eq!(repo.remove(&info_hash).await, None); } - assert_eq!(repo.get_metrics().await.torrents, 0); + assert_eq!(repo.get_metrics().await.total_torrents, 0); } #[rstest] @@ -563,7 +563,7 @@ async fn it_should_remove_inactive_peers( // and verify there is an extra torrent entry. { repo.upsert_peer(&info_hash, &peer, None).await; - assert_eq!(repo.get_metrics().await.torrents, entries.len() as u64 + 1); + assert_eq!(repo.get_metrics().await.total_torrents, entries.len() as u64 + 1); } // Insert the infohash and peer into the repository diff --git a/packages/tracker-core/src/torrent/repository/in_memory.rs b/packages/tracker-core/src/torrent/repository/in_memory.rs index c3852654c..e09bede8e 100644 --- a/packages/tracker-core/src/torrent/repository/in_memory.rs +++ b/packages/tracker-core/src/torrent/repository/in_memory.rs @@ -5,8 +5,7 @@ use std::sync::Arc; use bittorrent_primitives::info_hash::InfoHash; use torrust_tracker_configuration::{TrackerPolicy, TORRENT_PEERS_LIMIT}; use torrust_tracker_primitives::pagination::Pagination; -use torrust_tracker_primitives::swarm_metadata::SwarmMetadata; -use torrust_tracker_primitives::torrent_metrics::TorrentsMetrics; +use torrust_tracker_primitives::swarm_metadata::{AggregateSwarmMetadata, SwarmMetadata}; use torrust_tracker_primitives::{peer, DurationSinceUnixEpoch, PersistentTorrent, PersistentTorrents}; use torrust_tracker_torrent_repository::entry::EntrySync; use torrust_tracker_torrent_repository::repository::Repository; @@ -208,7 +207,7 @@ impl InMemoryTorrentRepository { /// /// A [`TorrentsMetrics`] struct with the aggregated metrics. #[must_use] - pub fn get_torrents_metrics(&self) -> TorrentsMetrics { + pub fn get_torrents_metrics(&self) -> AggregateSwarmMetadata { self.torrents.get_metrics() } @@ -706,12 +705,12 @@ mod tests { } } - mod returning_torrent_metrics { + mod returning_aggregate_swarm_metadata { use std::sync::Arc; use bittorrent_primitives::info_hash::fixture::gen_seeded_infohash; - use torrust_tracker_primitives::torrent_metrics::TorrentsMetrics; + use torrust_tracker_primitives::swarm_metadata::AggregateSwarmMetadata; use crate::test_helpers::tests::{complete_peer, leecher, sample_info_hash, seeder}; use crate::torrent::repository::in_memory::InMemoryTorrentRepository; @@ -719,84 +718,84 @@ mod tests { // todo: refactor to use test parametrization #[tokio::test] - async fn it_should_get_empty_torrent_metrics_when_there_are_no_torrents() { + async fn it_should_get_empty_aggregate_swarm_metadata_when_there_are_no_torrents() { let in_memory_torrent_repository = Arc::new(InMemoryTorrentRepository::default()); - let torrents_metrics = in_memory_torrent_repository.get_torrents_metrics(); + let aggregate_swarm_metadata = in_memory_torrent_repository.get_torrents_metrics(); assert_eq!( - torrents_metrics, - TorrentsMetrics { - complete: 0, - downloaded: 0, - incomplete: 0, - torrents: 0 + aggregate_swarm_metadata, + AggregateSwarmMetadata { + total_complete: 0, + total_downloaded: 0, + total_incomplete: 0, + total_torrents: 0 } ); } #[tokio::test] - async fn it_should_return_the_torrent_metrics_when_there_is_a_leecher() { + async fn it_should_return_the_aggregate_swarm_metadata_when_there_is_a_leecher() { let in_memory_torrent_repository = Arc::new(InMemoryTorrentRepository::default()); let _number_of_downloads_increased = in_memory_torrent_repository.upsert_peer(&sample_info_hash(), &leecher(), None); - let torrent_metrics = in_memory_torrent_repository.get_torrents_metrics(); + let aggregate_swarm_metadata = in_memory_torrent_repository.get_torrents_metrics(); assert_eq!( - torrent_metrics, - TorrentsMetrics { - complete: 0, - downloaded: 0, - incomplete: 1, - torrents: 1, + aggregate_swarm_metadata, + AggregateSwarmMetadata { + total_complete: 0, + total_downloaded: 0, + total_incomplete: 1, + total_torrents: 1, } ); } #[tokio::test] - async fn it_should_return_the_torrent_metrics_when_there_is_a_seeder() { + async fn it_should_return_the_aggregate_swarm_metadata_when_there_is_a_seeder() { let in_memory_torrent_repository = Arc::new(InMemoryTorrentRepository::default()); let _number_of_downloads_increased = in_memory_torrent_repository.upsert_peer(&sample_info_hash(), &seeder(), None); - let torrent_metrics = in_memory_torrent_repository.get_torrents_metrics(); + let aggregate_swarm_metadata = in_memory_torrent_repository.get_torrents_metrics(); assert_eq!( - torrent_metrics, - TorrentsMetrics { - complete: 1, - downloaded: 0, - incomplete: 0, - torrents: 1, + aggregate_swarm_metadata, + AggregateSwarmMetadata { + total_complete: 1, + total_downloaded: 0, + total_incomplete: 0, + total_torrents: 1, } ); } #[tokio::test] - async fn it_should_return_the_torrent_metrics_when_there_is_a_completed_peer() { + async fn it_should_return_the_aggregate_swarm_metadata_when_there_is_a_completed_peer() { let in_memory_torrent_repository = Arc::new(InMemoryTorrentRepository::default()); let _number_of_downloads_increased = in_memory_torrent_repository.upsert_peer(&sample_info_hash(), &complete_peer(), None); - let torrent_metrics = in_memory_torrent_repository.get_torrents_metrics(); + let aggregate_swarm_metadata = in_memory_torrent_repository.get_torrents_metrics(); assert_eq!( - torrent_metrics, - TorrentsMetrics { - complete: 1, - downloaded: 0, - incomplete: 0, - torrents: 1, + aggregate_swarm_metadata, + AggregateSwarmMetadata { + total_complete: 1, + total_downloaded: 0, + total_incomplete: 0, + total_torrents: 1, } ); } #[tokio::test] - async fn it_should_return_the_torrent_metrics_when_there_are_multiple_torrents() { + async fn it_should_return_the_aggregate_swarm_metadata_when_there_are_multiple_torrents() { let in_memory_torrent_repository = Arc::new(InMemoryTorrentRepository::default()); let start_time = std::time::Instant::now(); @@ -807,16 +806,16 @@ mod tests { let result_a = start_time.elapsed(); let start_time = std::time::Instant::now(); - let torrent_metrics = in_memory_torrent_repository.get_torrents_metrics(); + let aggregate_swarm_metadata = in_memory_torrent_repository.get_torrents_metrics(); let result_b = start_time.elapsed(); assert_eq!( - (torrent_metrics), - (TorrentsMetrics { - complete: 0, - downloaded: 0, - incomplete: 1_000_000, - torrents: 1_000_000, + (aggregate_swarm_metadata), + (AggregateSwarmMetadata { + total_complete: 0, + total_downloaded: 0, + total_incomplete: 1_000_000, + total_torrents: 1_000_000, }), "{result_a:?} {result_b:?}" ); diff --git a/packages/udp-tracker-core/src/statistics/services.rs b/packages/udp-tracker-core/src/statistics/services.rs index 7ffa127e6..56814f5d5 100644 --- a/packages/udp-tracker-core/src/statistics/services.rs +++ b/packages/udp-tracker-core/src/statistics/services.rs @@ -39,7 +39,7 @@ use std::sync::Arc; use bittorrent_tracker_core::torrent::repository::in_memory::InMemoryTorrentRepository; -use torrust_tracker_primitives::torrent_metrics::TorrentsMetrics; +use torrust_tracker_primitives::swarm_metadata::AggregateSwarmMetadata; use crate::statistics::metrics::Metrics; use crate::statistics::repository::Repository; @@ -50,7 +50,7 @@ pub struct TrackerMetrics { /// Domain level metrics. /// /// General metrics for all torrents (number of seeders, leechers, etcetera) - pub torrents_metrics: TorrentsMetrics, + pub torrents_metrics: AggregateSwarmMetadata, /// Application level metrics. Usage statistics/metrics. /// @@ -88,7 +88,7 @@ mod tests { use bittorrent_tracker_core::torrent::repository::in_memory::InMemoryTorrentRepository; use bittorrent_tracker_core::{self}; use torrust_tracker_configuration::Configuration; - use torrust_tracker_primitives::torrent_metrics::TorrentsMetrics; + use torrust_tracker_primitives::swarm_metadata::AggregateSwarmMetadata; use torrust_tracker_test_helpers::configuration; use crate::statistics; @@ -113,7 +113,7 @@ mod tests { assert_eq!( tracker_metrics, TrackerMetrics { - torrents_metrics: TorrentsMetrics::default(), + torrents_metrics: AggregateSwarmMetadata::default(), protocol_metrics: statistics::metrics::Metrics::default(), } ); diff --git a/packages/udp-tracker-server/src/statistics/services.rs b/packages/udp-tracker-server/src/statistics/services.rs index 92ee14f50..a16685077 100644 --- a/packages/udp-tracker-server/src/statistics/services.rs +++ b/packages/udp-tracker-server/src/statistics/services.rs @@ -41,7 +41,7 @@ use std::sync::Arc; use bittorrent_tracker_core::torrent::repository::in_memory::InMemoryTorrentRepository; use bittorrent_udp_tracker_core::services::banning::BanService; use tokio::sync::RwLock; -use torrust_tracker_primitives::torrent_metrics::TorrentsMetrics; +use torrust_tracker_primitives::swarm_metadata::AggregateSwarmMetadata; use crate::statistics::metrics::Metrics; use crate::statistics::repository::Repository; @@ -52,7 +52,7 @@ pub struct TrackerMetrics { /// Domain level metrics. /// /// General metrics for all torrents (number of seeders, leechers, etcetera) - pub torrents_metrics: TorrentsMetrics, + pub torrents_metrics: AggregateSwarmMetadata, /// Application level metrics. Usage statistics/metrics. /// @@ -108,7 +108,7 @@ mod tests { use bittorrent_udp_tracker_core::MAX_CONNECTION_ID_ERRORS_PER_IP; use tokio::sync::RwLock; use torrust_tracker_configuration::Configuration; - use torrust_tracker_primitives::torrent_metrics::TorrentsMetrics; + use torrust_tracker_primitives::swarm_metadata::AggregateSwarmMetadata; use torrust_tracker_test_helpers::configuration; use crate::statistics; @@ -139,7 +139,7 @@ mod tests { assert_eq!( tracker_metrics, TrackerMetrics { - torrents_metrics: TorrentsMetrics::default(), + torrents_metrics: AggregateSwarmMetadata::default(), protocol_metrics: statistics::metrics::Metrics::default(), } ); From 89607cc8025fe048a3b9ee41c8b8082b1a8d8321 Mon Sep 17 00:00:00 2001 From: Jose Celano Date: Fri, 7 Mar 2025 11:48:50 +0000 Subject: [PATCH 379/802] refactor: [#1342] remove counter for HTTP connetions internally The number of HTTP tracker connections don't make sense. There are connection requests only in the UDP tracker. That code is removed but, in order to keep backward compatibility, the API still exposes that value which is the: number of announce requests + number of scrape requests --- .../tests/server/v1/contract.rs | 85 ------------------- .../src/v1/context/stats/resources.rs | 2 + .../src/v1/context/stats/responses.rs | 1 + .../src/services/announce.rs | 6 +- .../http-tracker-core/src/services/scrape.rs | 6 +- .../src/statistics/event/handler.rs | 48 ----------- .../src/statistics/metrics.rs | 8 -- .../src/statistics/repository.rs | 12 --- .../src/statistics/services.rs | 2 - .../src/statistics/metrics.rs | 2 + .../src/statistics/services.rs | 10 ++- 11 files changed, 15 insertions(+), 167 deletions(-) diff --git a/packages/axum-http-tracker-server/tests/server/v1/contract.rs b/packages/axum-http-tracker-server/tests/server/v1/contract.rs index 992793022..ad5b5a482 100644 --- a/packages/axum-http-tracker-server/tests/server/v1/contract.rs +++ b/packages/axum-http-tracker-server/tests/server/v1/contract.rs @@ -666,91 +666,6 @@ mod for_all_config_modes { compact_announce.is_ok() } - #[tokio::test] - async fn should_increase_the_number_of_tcp4_connections_handled_in_statistics() { - logging::setup(); - - let env = Started::new(&configuration::ephemeral_public().into()).await; - - Client::new(*env.bind_address()) - .announce(&QueryBuilder::default().query()) - .await; - - let stats = env - .container - .http_tracker_core_container - .http_stats_repository - .get_stats() - .await; - - assert_eq!(stats.tcp4_connections_handled, 1); - - drop(stats); - - env.stop().await; - } - - #[tokio::test] - async fn should_increase_the_number_of_tcp6_connections_handled_in_statistics() { - logging::setup(); - - if TcpListener::bind(SocketAddrV6::new(Ipv6Addr::LOCALHOST, 0, 0, 0)) - .await - .is_err() - { - return; // we cannot bind to a ipv6 socket, so we will skip this test - } - - let env = Started::new(&configuration::ephemeral_ipv6().into()).await; - - Client::bind(*env.bind_address(), IpAddr::from_str("::1").unwrap()) - .announce(&QueryBuilder::default().query()) - .await; - - let stats = env - .container - .http_tracker_core_container - .http_stats_repository - .get_stats() - .await; - - assert_eq!(stats.tcp6_connections_handled, 1); - - drop(stats); - - env.stop().await; - } - - #[tokio::test] - async fn should_not_increase_the_number_of_tcp6_connections_handled_if_the_client_is_not_using_an_ipv6_ip() { - logging::setup(); - - // The tracker ignores the peer address in the request param. It uses the client remote ip address. - - let env = Started::new(&configuration::ephemeral_public().into()).await; - - Client::new(*env.bind_address()) - .announce( - &QueryBuilder::default() - .with_peer_addr(&IpAddr::V6(Ipv6Addr::new(0, 0, 0, 0, 0, 0, 0, 1))) - .query(), - ) - .await; - - let stats = env - .container - .http_tracker_core_container - .http_stats_repository - .get_stats() - .await; - - assert_eq!(stats.tcp6_connections_handled, 0); - - drop(stats); - - env.stop().await; - } - #[tokio::test] async fn should_increase_the_number_of_tcp4_announce_requests_handled_in_statistics() { logging::setup(); diff --git a/packages/axum-rest-tracker-api-server/src/v1/context/stats/resources.rs b/packages/axum-rest-tracker-api-server/src/v1/context/stats/resources.rs index 9ed61cc6b..d9480259e 100644 --- a/packages/axum-rest-tracker-api-server/src/v1/context/stats/resources.rs +++ b/packages/axum-rest-tracker-api-server/src/v1/context/stats/resources.rs @@ -77,6 +77,7 @@ pub struct Stats { } impl From for Stats { + #[allow(deprecated)] fn from(metrics: TrackerMetrics) -> Self { Self { torrents: metrics.torrents_metrics.total_torrents, @@ -124,6 +125,7 @@ mod tests { use super::Stats; #[test] + #[allow(deprecated)] fn stats_resource_should_be_converted_from_tracker_metrics() { assert_eq!( Stats::from(TrackerMetrics { diff --git a/packages/axum-rest-tracker-api-server/src/v1/context/stats/responses.rs b/packages/axum-rest-tracker-api-server/src/v1/context/stats/responses.rs index 6d279726c..853fdd2e2 100644 --- a/packages/axum-rest-tracker-api-server/src/v1/context/stats/responses.rs +++ b/packages/axum-rest-tracker-api-server/src/v1/context/stats/responses.rs @@ -12,6 +12,7 @@ pub fn stats_response(tracker_metrics: TrackerMetrics) -> Response { } /// `200` response that contains the [`Stats`] resource in Prometheus Text Exposition Format . +#[allow(deprecated)] #[must_use] pub fn metrics_response(tracker_metrics: &TrackerMetrics) -> Response { let mut lines = vec![]; diff --git a/packages/http-tracker-core/src/services/announce.rs b/packages/http-tracker-core/src/services/announce.rs index 959dcc615..896387b28 100644 --- a/packages/http-tracker-core/src/services/announce.rs +++ b/packages/http-tracker-core/src/services/announce.rs @@ -28,12 +28,8 @@ use crate::statistics; /// /// The service sends an statistics event that increments: /// -/// - The number of TCP connections handled by the HTTP tracker. /// - The number of TCP `announce` requests handled by the HTTP tracker. -/// -/// > **NOTICE**: as the HTTP tracker does not requires a connection request -/// > like the UDP tracker, the number of TCP connections is incremented for -/// > each `announce` request. +/// - The number of TCP `scrape` requests handled by the HTTP tracker. pub struct AnnounceService { core_config: Arc, announce_handler: Arc, diff --git a/packages/http-tracker-core/src/services/scrape.rs b/packages/http-tracker-core/src/services/scrape.rs index dcb88508c..53eed0361 100644 --- a/packages/http-tracker-core/src/services/scrape.rs +++ b/packages/http-tracker-core/src/services/scrape.rs @@ -25,13 +25,9 @@ use crate::statistics; /// /// The service sends an statistics event that increments: /// -/// - The number of TCP connections handled by the HTTP tracker. +/// - The number of TCP `announce` requests handled by the HTTP tracker. /// - The number of TCP `scrape` requests handled by the HTTP tracker. /// -/// > **NOTICE**: as the HTTP tracker does not requires a connection request -/// > like the UDP tracker, the number of TCP connections is incremented for -/// > each `scrape` request. -/// /// # Errors /// /// This function will return an error if: diff --git a/packages/http-tracker-core/src/statistics/event/handler.rs b/packages/http-tracker-core/src/statistics/event/handler.rs index af323d06b..b0a0c186f 100644 --- a/packages/http-tracker-core/src/statistics/event/handler.rs +++ b/packages/http-tracker-core/src/statistics/event/handler.rs @@ -6,21 +6,17 @@ pub async fn handle_event(event: Event, stats_repository: &Repository) { // TCP4 Event::Tcp4Announce => { stats_repository.increase_tcp4_announces().await; - stats_repository.increase_tcp4_connections().await; } Event::Tcp4Scrape => { stats_repository.increase_tcp4_scrapes().await; - stats_repository.increase_tcp4_connections().await; } // TCP6 Event::Tcp6Announce => { stats_repository.increase_tcp6_announces().await; - stats_repository.increase_tcp6_connections().await; } Event::Tcp6Scrape => { stats_repository.increase_tcp6_scrapes().await; - stats_repository.increase_tcp6_connections().await; } } @@ -44,17 +40,6 @@ mod tests { assert_eq!(stats.tcp4_announces_handled, 1); } - #[tokio::test] - async fn should_increase_the_tcp4_connections_counter_when_it_receives_a_tcp4_announce_event() { - let stats_repository = Repository::new(); - - handle_event(Event::Tcp4Announce, &stats_repository).await; - - let stats = stats_repository.get_stats().await; - - assert_eq!(stats.tcp4_connections_handled, 1); - } - #[tokio::test] async fn should_increase_the_tcp4_scrapes_counter_when_it_receives_a_tcp4_scrape_event() { let stats_repository = Repository::new(); @@ -66,17 +51,6 @@ mod tests { assert_eq!(stats.tcp4_scrapes_handled, 1); } - #[tokio::test] - async fn should_increase_the_tcp4_connections_counter_when_it_receives_a_tcp4_scrape_event() { - let stats_repository = Repository::new(); - - handle_event(Event::Tcp4Scrape, &stats_repository).await; - - let stats = stats_repository.get_stats().await; - - assert_eq!(stats.tcp4_connections_handled, 1); - } - #[tokio::test] async fn should_increase_the_tcp6_announces_counter_when_it_receives_a_tcp6_announce_event() { let stats_repository = Repository::new(); @@ -88,17 +62,6 @@ mod tests { assert_eq!(stats.tcp6_announces_handled, 1); } - #[tokio::test] - async fn should_increase_the_tcp6_connections_counter_when_it_receives_a_tcp6_announce_event() { - let stats_repository = Repository::new(); - - handle_event(Event::Tcp6Announce, &stats_repository).await; - - let stats = stats_repository.get_stats().await; - - assert_eq!(stats.tcp6_connections_handled, 1); - } - #[tokio::test] async fn should_increase_the_tcp6_scrapes_counter_when_it_receives_a_tcp6_scrape_event() { let stats_repository = Repository::new(); @@ -109,15 +72,4 @@ mod tests { assert_eq!(stats.tcp6_scrapes_handled, 1); } - - #[tokio::test] - async fn should_increase_the_tcp6_connections_counter_when_it_receives_a_tcp6_scrape_event() { - let stats_repository = Repository::new(); - - handle_event(Event::Tcp6Scrape, &stats_repository).await; - - let stats = stats_repository.get_stats().await; - - assert_eq!(stats.tcp6_connections_handled, 1); - } } diff --git a/packages/http-tracker-core/src/statistics/metrics.rs b/packages/http-tracker-core/src/statistics/metrics.rs index ae4db9704..6c102770b 100644 --- a/packages/http-tracker-core/src/statistics/metrics.rs +++ b/packages/http-tracker-core/src/statistics/metrics.rs @@ -8,20 +8,12 @@ /// and also for each IP version used by the peers: IPv4 and IPv6. #[derive(Debug, PartialEq, Default)] pub struct Metrics { - /// Total number of TCP (HTTP tracker) connections from IPv4 peers. - /// Since the HTTP tracker spec does not require a handshake, this metric - /// increases for every HTTP request. - pub tcp4_connections_handled: u64, - /// Total number of TCP (HTTP tracker) `announce` requests from IPv4 peers. pub tcp4_announces_handled: u64, /// Total number of TCP (HTTP tracker) `scrape` requests from IPv4 peers. pub tcp4_scrapes_handled: u64, - /// Total number of TCP (HTTP tracker) connections from IPv6 peers. - pub tcp6_connections_handled: u64, - /// Total number of TCP (HTTP tracker) `announce` requests from IPv6 peers. pub tcp6_announces_handled: u64, diff --git a/packages/http-tracker-core/src/statistics/repository.rs b/packages/http-tracker-core/src/statistics/repository.rs index 41f048e29..5e15fc298 100644 --- a/packages/http-tracker-core/src/statistics/repository.rs +++ b/packages/http-tracker-core/src/statistics/repository.rs @@ -34,12 +34,6 @@ impl Repository { drop(stats_lock); } - pub async fn increase_tcp4_connections(&self) { - let mut stats_lock = self.stats.write().await; - stats_lock.tcp4_connections_handled += 1; - drop(stats_lock); - } - pub async fn increase_tcp4_scrapes(&self) { let mut stats_lock = self.stats.write().await; stats_lock.tcp4_scrapes_handled += 1; @@ -52,12 +46,6 @@ impl Repository { drop(stats_lock); } - pub async fn increase_tcp6_connections(&self) { - let mut stats_lock = self.stats.write().await; - stats_lock.tcp6_connections_handled += 1; - drop(stats_lock); - } - pub async fn increase_tcp6_scrapes(&self) { let mut stats_lock = self.stats.write().await; stats_lock.tcp6_scrapes_handled += 1; diff --git a/packages/http-tracker-core/src/statistics/services.rs b/packages/http-tracker-core/src/statistics/services.rs index f7808440a..dce7098b9 100644 --- a/packages/http-tracker-core/src/statistics/services.rs +++ b/packages/http-tracker-core/src/statistics/services.rs @@ -54,11 +54,9 @@ pub async fn get_metrics( torrents_metrics, protocol_metrics: Metrics { // TCPv4 - tcp4_connections_handled: stats.tcp4_connections_handled, tcp4_announces_handled: stats.tcp4_announces_handled, tcp4_scrapes_handled: stats.tcp4_scrapes_handled, // TCPv6 - tcp6_connections_handled: stats.tcp6_connections_handled, tcp6_announces_handled: stats.tcp6_announces_handled, tcp6_scrapes_handled: stats.tcp6_scrapes_handled, }, diff --git a/packages/rest-tracker-api-core/src/statistics/metrics.rs b/packages/rest-tracker-api-core/src/statistics/metrics.rs index 40262efd6..7e41cf713 100644 --- a/packages/rest-tracker-api-core/src/statistics/metrics.rs +++ b/packages/rest-tracker-api-core/src/statistics/metrics.rs @@ -11,6 +11,7 @@ pub struct Metrics { /// Total number of TCP (HTTP tracker) connections from IPv4 peers. /// Since the HTTP tracker spec does not require a handshake, this metric /// increases for every HTTP request. + #[deprecated(since = "3.1.0")] pub tcp4_connections_handled: u64, /// Total number of TCP (HTTP tracker) `announce` requests from IPv4 peers. @@ -20,6 +21,7 @@ pub struct Metrics { pub tcp4_scrapes_handled: u64, /// Total number of TCP (HTTP tracker) connections from IPv6 peers. + #[deprecated(since = "3.1.0")] pub tcp6_connections_handled: u64, /// Total number of TCP (HTTP tracker) `announce` requests from IPv6 peers. diff --git a/packages/rest-tracker-api-core/src/statistics/services.rs b/packages/rest-tracker-api-core/src/statistics/services.rs index ea4e159b6..5d7629443 100644 --- a/packages/rest-tracker-api-core/src/statistics/services.rs +++ b/packages/rest-tracker-api-core/src/statistics/services.rs @@ -24,6 +24,7 @@ pub struct TrackerMetrics { } /// It returns all the [`TrackerMetrics`] +#[allow(deprecated)] pub async fn get_metrics( in_memory_torrent_repository: Arc, ban_service: Arc>, @@ -37,15 +38,20 @@ pub async fn get_metrics( let udp_core_stats = udp_core_stats_repository.get_stats().await; let udp_server_stats = udp_server_stats_repository.get_stats().await; + // For backward compatibility we keep the `tcp4_connections_handled` and + // `tcp6_connections_handled` metrics. They don't make sense for the HTTP + // tracker, but we keep them for now. In new major versions we should remove + // them. + TrackerMetrics { torrents_metrics, protocol_metrics: Metrics { // TCPv4 - tcp4_connections_handled: http_stats.tcp4_connections_handled, + tcp4_connections_handled: http_stats.tcp4_announces_handled + http_stats.tcp4_scrapes_handled, tcp4_announces_handled: http_stats.tcp4_announces_handled, tcp4_scrapes_handled: http_stats.tcp4_scrapes_handled, // TCPv6 - tcp6_connections_handled: http_stats.tcp6_connections_handled, + tcp6_connections_handled: http_stats.tcp6_announces_handled + http_stats.tcp6_scrapes_handled, tcp6_announces_handled: http_stats.tcp6_announces_handled, tcp6_scrapes_handled: http_stats.tcp6_scrapes_handled, // UDP From 084beb2ef6f0bdd29bbc74dbc2896a499171eb8f Mon Sep 17 00:00:00 2001 From: Jose Celano Date: Mon, 10 Mar 2025 11:28:30 +0000 Subject: [PATCH 380/802] feat: [#727] allow to authenticate API via authentication header The API allos client authentication via a `token` parameter in the URL query: ```console curl http://0.0.0.0:1212/api/v1/stats?token=MyAccessToken | jq ``` Now it's also possible to do it via Authentication Header: ```console curl -H "Authorization: Bearer MyAccessToken" http://0.0.0.0:1212/api/v1/stats | jq ``` This is to avoid leaking the token in logs, proxies, etc. For now, it's only optional and recommendable. It could be mandatory in future major API versions. --- .../src/v1/middlewares/auth.rs | 85 ++++- .../server/v1/contract/authentication.rs | 325 +++++++++++++----- .../rest-tracker-api-client/src/v1/client.rs | 35 +- 3 files changed, 345 insertions(+), 100 deletions(-) diff --git a/packages/axum-rest-tracker-api-server/src/v1/middlewares/auth.rs b/packages/axum-rest-tracker-api-server/src/v1/middlewares/auth.rs index 2ec046bed..9b5ec2320 100644 --- a/packages/axum-rest-tracker-api-server/src/v1/middlewares/auth.rs +++ b/packages/axum-rest-tracker-api-server/src/v1/middlewares/auth.rs @@ -1,7 +1,20 @@ //! Authentication middleware for the API. //! -//! It uses a "token" GET param to authenticate the user. URLs must be of the -//! form: +//! It uses a "token" to authenticate the user. The token must be one of the +//! `access_tokens` in the tracker [HTTP API configuration](torrust_tracker_configuration::HttpApi). +//! +//! There are two ways to provide the token: +//! +//! 1. As a `Bearer` token in the `Authorization` header. +//! 2. As a `token` GET param in the URL. +//! +//! Using the `Authorization` header: +//! +//! ```console +//! curl -H "Authorization: Bearer MyAccessToken" http://:/api/v1/ +//! ``` +//! +//! Using the `token` GET param: //! //! `http://:/api/v1/?token=`. //! @@ -21,6 +34,12 @@ //! All the tokes have the same permissions, so it is not possible to have //! different permissions for different tokens. The label is only used to //! identify the token. +//! +//! NOTICE: The token is not encrypted, so it is recommended to use HTTPS to +//! protect the token from being intercepted. +//! +//! NOTICE: If both the `Authorization` header and the `token` GET param are +//! provided, the `Authorization` header will be used. use std::sync::Arc; use axum::extract::{self}; @@ -32,6 +51,8 @@ use torrust_tracker_configuration::AccessTokens; use crate::v1::responses::unhandled_rejection_response; +pub const AUTH_BEARER_TOKEN_HEADER_PREFIX: &str = "Bearer"; + /// Container for the `token` extracted from the query params. #[derive(Deserialize, Debug)] pub struct QueryParams { @@ -43,7 +64,8 @@ pub struct State { pub access_tokens: Arc, } -/// Middleware for authentication using a "token" GET param. +/// Middleware for authentication. +/// /// The token must be one of the tokens in the tracker [HTTP API configuration](torrust_tracker_configuration::HttpApi). pub async fn auth( extract::State(state): extract::State, @@ -51,8 +73,20 @@ pub async fn auth( request: Request, next: Next, ) -> Response { - let Some(token) = params.token else { - return AuthError::Unauthorized.into_response(); + let token_from_header = match extract_bearer_token_from_header(&request) { + Ok(token) => token, + Err(err) => return err.into_response(), + }; + + let token_from_get_param = params.token.clone(); + + let provided_tokens = (token_from_header, token_from_get_param); + + let token = match provided_tokens { + (Some(token_from_header), Some(_token_from_get_param)) => token_from_header, + (Some(token_from_header), None) => token_from_header, + (None, Some(token_from_get_param)) => token_from_get_param, + (None, None) => return AuthError::Unauthorized.into_response(), }; if !authenticate(&token, &state.access_tokens) { @@ -62,11 +96,42 @@ pub async fn auth( next.run(request).await } +fn extract_bearer_token_from_header(request: &Request) -> Result, AuthError> { + let headers = request.headers(); + + let header_value = headers + .get(axum::http::header::AUTHORIZATION) + .and_then(|header_value| header_value.to_str().ok()); + + match header_value { + None => Ok(None), + Some(header_value) => { + if header_value == AUTH_BEARER_TOKEN_HEADER_PREFIX { + // Empty token + return Ok(Some(String::new())); + } + + if !header_value.starts_with(&format!("{AUTH_BEARER_TOKEN_HEADER_PREFIX} ").to_string()) { + // Invalid token type. Missing "Bearer" prefix. + return Err(AuthError::UnknownTokenProvided); + } + + Ok(header_value + .strip_prefix(&format!("{AUTH_BEARER_TOKEN_HEADER_PREFIX} ").to_string()) + .map(std::string::ToString::to_string)) + } + } +} + enum AuthError { /// Missing token for authentication. Unauthorized, + /// Token was provided but it is not valid. TokenNotValid, + + /// Token was provided but it is not in a format that the server can't understands. + UnknownTokenProvided, } impl IntoResponse for AuthError { @@ -74,6 +139,7 @@ impl IntoResponse for AuthError { match self { AuthError::Unauthorized => unauthorized_response(), AuthError::TokenNotValid => token_not_valid_response(), + AuthError::UnknownTokenProvided => unknown_auth_data_provided_response(), } } } @@ -93,3 +159,12 @@ pub fn unauthorized_response() -> Response { pub fn token_not_valid_response() -> Response { unhandled_rejection_response("token not valid".to_string()) } + +/// `500` error response when the provided token type is not valid. +/// +/// The client has provided authentication information that the server does not +/// understand. +#[must_use] +pub fn unknown_auth_data_provided_response() -> Response { + unhandled_rejection_response("unknown token provided".to_string()) +} diff --git a/packages/axum-rest-tracker-api-server/tests/server/v1/contract/authentication.rs b/packages/axum-rest-tracker-api-server/tests/server/v1/contract/authentication.rs index 3b6419187..0822f9fec 100644 --- a/packages/axum-rest-tracker-api-server/tests/server/v1/contract/authentication.rs +++ b/packages/axum-rest-tracker-api-server/tests/server/v1/contract/authentication.rs @@ -1,130 +1,275 @@ -use torrust_axum_rest_tracker_api_server::environment::Started; -use torrust_rest_tracker_api_client::common::http::{Query, QueryParam}; -use torrust_rest_tracker_api_client::v1::client::{headers_with_request_id, Client}; -use torrust_tracker_test_helpers::logging::logs_contains_a_line_with; -use torrust_tracker_test_helpers::{configuration, logging}; -use uuid::Uuid; +mod given_that_the_token_is_only_provided_in_the_authentication_header { + use hyper::header; + use torrust_axum_rest_tracker_api_server::environment::Started; + use torrust_rest_tracker_api_client::common::http::Query; + use torrust_rest_tracker_api_client::v1::client::{ + headers_with_auth_token, headers_with_request_id, Client, AUTH_BEARER_TOKEN_HEADER_PREFIX, + }; + use torrust_tracker_test_helpers::logging::logs_contains_a_line_with; + use torrust_tracker_test_helpers::{configuration, logging}; + use uuid::Uuid; -use crate::server::v1::asserts::{assert_token_not_valid, assert_unauthorized}; + use crate::server::v1::asserts::assert_token_not_valid; -#[tokio::test] -async fn should_authenticate_requests_by_using_a_token_query_param() { - logging::setup(); + #[tokio::test] + async fn it_should_authenticate_requests_when_the_token_is_provided_in_the_authentication_header() { + logging::setup(); - let env = Started::new(&configuration::ephemeral().into()).await; + let env = Started::new(&configuration::ephemeral().into()).await; - let token = env.get_connection_info().api_token.unwrap(); + let token = env.get_connection_info().api_token.unwrap(); - let response = Client::new(env.get_connection_info()) - .unwrap() - .get_request_with_query("stats", Query::params([QueryParam::new("token", &token)].to_vec()), None) - .await; + let response = Client::new(env.get_connection_info()) + .unwrap() + .get_request_with_query("stats", Query::default(), Some(headers_with_auth_token(&token))) + .await; - assert_eq!(response.status(), 200); + assert_eq!(response.status(), 200); - env.stop().await; -} + env.stop().await; + } + + #[tokio::test] + async fn it_should_not_authenticate_requests_when_the_token_is_empty() { + logging::setup(); + + let env = Started::new(&configuration::ephemeral().into()).await; + + let request_id = Uuid::new_v4(); + + let mut headers = headers_with_request_id(request_id); + + // Send the header with an empty token + headers.insert( + header::AUTHORIZATION, + format!("{AUTH_BEARER_TOKEN_HEADER_PREFIX} ") + .parse() + .expect("the auth token is not a valid header value"), + ); + + let response = Client::new(env.get_connection_info()) + .unwrap() + .get_request_with_query("stats", Query::default(), Some(headers)) + .await; + + assert_token_not_valid(response).await; + + assert!( + logs_contains_a_line_with(&["ERROR", "API", &format!("{request_id}")]), + "Expected logs to contain: ERROR ... API ... request_id={request_id}" + ); + + env.stop().await; + } -#[tokio::test] -async fn should_not_authenticate_requests_when_the_token_is_missing() { - logging::setup(); + #[tokio::test] + async fn it_should_not_authenticate_requests_when_the_token_is_invalid() { + logging::setup(); - let env = Started::new(&configuration::ephemeral().into()).await; + let env = Started::new(&configuration::ephemeral().into()).await; - let request_id = Uuid::new_v4(); + let request_id = Uuid::new_v4(); - let response = Client::new(env.get_connection_info()) - .unwrap() - .get_request_with_query("stats", Query::default(), Some(headers_with_request_id(request_id))) - .await; + let mut headers = headers_with_request_id(request_id); - assert_unauthorized(response).await; + // Send the header with an empty token + headers.insert( + header::AUTHORIZATION, + "Bearer INVALID TOKEN" + .parse() + .expect("the auth token is not a valid header value"), + ); - assert!( - logs_contains_a_line_with(&["ERROR", "API", &format!("{request_id}")]), - "Expected logs to contain: ERROR ... API ... request_id={request_id}" - ); + let response = Client::new(env.get_connection_info()) + .unwrap() + .get_request_with_query("stats", Query::default(), Some(headers)) + .await; - env.stop().await; + assert_token_not_valid(response).await; + + assert!( + logs_contains_a_line_with(&["ERROR", "API", &format!("{request_id}")]), + "Expected logs to contain: ERROR ... API ... request_id={request_id}" + ); + + env.stop().await; + } } +mod given_that_the_token_is_only_provided_in_the_query_param { + + use torrust_axum_rest_tracker_api_server::environment::Started; + use torrust_rest_tracker_api_client::common::http::{Query, QueryParam}; + use torrust_rest_tracker_api_client::v1::client::{headers_with_request_id, Client}; + use torrust_tracker_test_helpers::logging::logs_contains_a_line_with; + use torrust_tracker_test_helpers::{configuration, logging}; + use uuid::Uuid; + + use crate::server::v1::asserts::assert_token_not_valid; + + #[tokio::test] + async fn it_should_authenticate_requests_when_the_token_is_provided_as_a_query_param() { + logging::setup(); + + let env = Started::new(&configuration::ephemeral().into()).await; + + let token = env.get_connection_info().api_token.unwrap(); + + let response = Client::new(env.get_connection_info()) + .unwrap() + .get_request_with_query("stats", Query::params([QueryParam::new("token", &token)].to_vec()), None) + .await; + + assert_eq!(response.status(), 200); -#[tokio::test] -async fn should_not_authenticate_requests_when_the_token_is_empty() { - logging::setup(); + env.stop().await; + } - let env = Started::new(&configuration::ephemeral().into()).await; + #[tokio::test] + async fn it_should_not_authenticate_requests_when_the_token_is_empty() { + logging::setup(); - let request_id = Uuid::new_v4(); + let env = Started::new(&configuration::ephemeral().into()).await; - let response = Client::new(env.get_connection_info()) - .unwrap() - .get_request_with_query( - "stats", - Query::params([QueryParam::new("token", "")].to_vec()), - Some(headers_with_request_id(request_id)), - ) - .await; + let request_id = Uuid::new_v4(); - assert_token_not_valid(response).await; + let response = Client::new(env.get_connection_info()) + .unwrap() + .get_request_with_query( + "stats", + Query::params([QueryParam::new("token", "")].to_vec()), + Some(headers_with_request_id(request_id)), + ) + .await; - assert!( - logs_contains_a_line_with(&["ERROR", "API", &format!("{request_id}")]), - "Expected logs to contain: ERROR ... API ... request_id={request_id}" - ); + assert_token_not_valid(response).await; - env.stop().await; + assert!( + logs_contains_a_line_with(&["ERROR", "API", &format!("{request_id}")]), + "Expected logs to contain: ERROR ... API ... request_id={request_id}" + ); + + env.stop().await; + } + + #[tokio::test] + async fn it_should_not_authenticate_requests_when_the_token_is_invalid() { + logging::setup(); + + let env = Started::new(&configuration::ephemeral().into()).await; + + let request_id = Uuid::new_v4(); + + let response = Client::new(env.get_connection_info()) + .unwrap() + .get_request_with_query( + "stats", + Query::params([QueryParam::new("token", "INVALID TOKEN")].to_vec()), + Some(headers_with_request_id(request_id)), + ) + .await; + + assert_token_not_valid(response).await; + + assert!( + logs_contains_a_line_with(&["ERROR", "API", &format!("{request_id}")]), + "Expected logs to contain: ERROR ... API ... request_id={request_id}" + ); + + env.stop().await; + } + + #[tokio::test] + async fn it_should_allow_the_token_query_param_to_be_at_any_position_in_the_url_query() { + logging::setup(); + + let env = Started::new(&configuration::ephemeral().into()).await; + + let token = env.get_connection_info().api_token.unwrap(); + + // At the beginning of the query component + let response = Client::new(env.get_connection_info()) + .unwrap() + .get_request(&format!("torrents?token={token}&limit=1")) + .await; + + assert_eq!(response.status(), 200); + + // At the end of the query component + let response = Client::new(env.get_connection_info()) + .unwrap() + .get_request(&format!("torrents?limit=1&token={token}")) + .await; + + assert_eq!(response.status(), 200); + + env.stop().await; + } } -#[tokio::test] -async fn should_not_authenticate_requests_when_the_token_is_invalid() { - logging::setup(); +mod given_that_not_token_is_provided { + + use torrust_axum_rest_tracker_api_server::environment::Started; + use torrust_rest_tracker_api_client::common::http::Query; + use torrust_rest_tracker_api_client::v1::client::{headers_with_request_id, Client}; + use torrust_tracker_test_helpers::logging::logs_contains_a_line_with; + use torrust_tracker_test_helpers::{configuration, logging}; + use uuid::Uuid; + + use crate::server::v1::asserts::assert_unauthorized; + + #[tokio::test] + async fn it_should_not_authenticate_requests_when_the_token_is_missing() { + logging::setup(); - let env = Started::new(&configuration::ephemeral().into()).await; + let env = Started::new(&configuration::ephemeral().into()).await; - let request_id = Uuid::new_v4(); + let request_id = Uuid::new_v4(); - let response = Client::new(env.get_connection_info()) - .unwrap() - .get_request_with_query( - "stats", - Query::params([QueryParam::new("token", "INVALID TOKEN")].to_vec()), - Some(headers_with_request_id(request_id)), - ) - .await; + let response = Client::new(env.get_connection_info()) + .unwrap() + .get_request_with_query("stats", Query::default(), Some(headers_with_request_id(request_id))) + .await; - assert_token_not_valid(response).await; + assert_unauthorized(response).await; - assert!( - logs_contains_a_line_with(&["ERROR", "API", &format!("{request_id}")]), - "Expected logs to contain: ERROR ... API ... request_id={request_id}" - ); + assert!( + logs_contains_a_line_with(&["ERROR", "API", &format!("{request_id}")]), + "Expected logs to contain: ERROR ... API ... request_id={request_id}" + ); - env.stop().await; + env.stop().await; + } } -#[tokio::test] -async fn should_allow_the_token_query_param_to_be_at_any_position_in_the_url_query() { - logging::setup(); +mod given_that_token_is_provided_via_get_param_and_authentication_header { + use torrust_axum_rest_tracker_api_server::environment::Started; + use torrust_rest_tracker_api_client::common::http::{Query, QueryParam}; + use torrust_rest_tracker_api_client::v1::client::{headers_with_auth_token, Client, TOKEN_PARAM_NAME}; + use torrust_tracker_test_helpers::{configuration, logging}; - let env = Started::new(&configuration::ephemeral().into()).await; + #[tokio::test] + async fn it_should_authenticate_requests_using_the_token_provided_in_the_authentication_header() { + logging::setup(); - let token = env.get_connection_info().api_token.unwrap(); + let env = Started::new(&configuration::ephemeral().into()).await; - // At the beginning of the query component - let response = Client::new(env.get_connection_info()) - .unwrap() - .get_request(&format!("torrents?token={token}&limit=1")) - .await; + let authorized_token = env.get_connection_info().api_token.unwrap(); - assert_eq!(response.status(), 200); + let non_authorized_token = "NonAuthorizedToken"; - // At the end of the query component - let response = Client::new(env.get_connection_info()) - .unwrap() - .get_request(&format!("torrents?limit=1&token={token}")) - .await; + let response = Client::new(env.get_connection_info()) + .unwrap() + .get_request_with_query( + "stats", + Query::params([QueryParam::new(TOKEN_PARAM_NAME, non_authorized_token)].to_vec()), + Some(headers_with_auth_token(&authorized_token)), + ) + .await; - assert_eq!(response.status(), 200); + // The token provided in the query param should be ignored and the token + // in the authentication header should be used. + assert_eq!(response.status(), 200); - env.stop().await; + env.stop().await; + } } diff --git a/packages/rest-tracker-api-client/src/v1/client.rs b/packages/rest-tracker-api-client/src/v1/client.rs index 65e3fceb8..d13a567bb 100644 --- a/packages/rest-tracker-api-client/src/v1/client.rs +++ b/packages/rest-tracker-api-client/src/v1/client.rs @@ -1,6 +1,6 @@ use std::time::Duration; -use hyper::HeaderMap; +use hyper::{header, HeaderMap}; use reqwest::{Error, Response}; use serde::Serialize; use url::Url; @@ -9,7 +9,9 @@ use uuid::Uuid; use crate::common::http::{Query, QueryParam, ReqwestQuery}; use crate::connection_info::ConnectionInfo; -const TOKEN_PARAM_NAME: &str = "token"; +pub const TOKEN_PARAM_NAME: &str = "token"; +pub const AUTH_BEARER_TOKEN_HEADER_PREFIX: &str = "Bearer"; + const API_PATH: &str = "api/v1/"; const DEFAULT_REQUEST_TIMEOUT_IN_SECS: u64 = 5; @@ -180,15 +182,38 @@ pub async fn get(path: Url, query: Option, headers: Option) -> builder.send().await.unwrap() } -/// Returns a `HeaderMap` with a request id header +/// Returns a `HeaderMap` with a request id header. /// /// # Panics /// -/// Will panic if the request ID can't be parsed into a string. +/// Will panic if the request ID can't be parsed into a `HeaderValue`. #[must_use] pub fn headers_with_request_id(request_id: Uuid) -> HeaderMap { let mut headers = HeaderMap::new(); - headers.insert("x-request-id", request_id.to_string().parse().unwrap()); + headers.insert( + "x-request-id", + request_id + .to_string() + .parse() + .expect("the request ID is not a valid header value"), + ); + headers +} + +/// Returns a `HeaderMap` with an authorization token. +/// +/// # Panics +/// +/// Will panic if the token can't be parsed into a `HeaderValue`. +#[must_use] +pub fn headers_with_auth_token(token: &str) -> HeaderMap { + let mut headers = HeaderMap::new(); + headers.insert( + header::AUTHORIZATION, + format!("{AUTH_BEARER_TOKEN_HEADER_PREFIX} {token}") + .parse() + .expect("the auth token is not a valid header value"), + ); headers } From 34f2f437db7cfbce2fed5a94f906c39a7794b0f9 Mon Sep 17 00:00:00 2001 From: Jose Celano Date: Mon, 10 Mar 2025 15:58:26 +0000 Subject: [PATCH 381/802] refactor: [#727] use the Authentication header in the API client Instead of passing the `token` via GET param. The server supports both. Since we have not released any version crate for the client yet we can use the header by deafault which is more secure. --- .../server/v1/contract/authentication.rs | 39 ++++++--- .../rest-tracker-api-client/src/v1/client.rs | 81 ++++++++++++++----- 2 files changed, 89 insertions(+), 31 deletions(-) diff --git a/packages/axum-rest-tracker-api-server/tests/server/v1/contract/authentication.rs b/packages/axum-rest-tracker-api-server/tests/server/v1/contract/authentication.rs index 0822f9fec..be291a50c 100644 --- a/packages/axum-rest-tracker-api-server/tests/server/v1/contract/authentication.rs +++ b/packages/axum-rest-tracker-api-server/tests/server/v1/contract/authentication.rs @@ -2,6 +2,7 @@ mod given_that_the_token_is_only_provided_in_the_authentication_header { use hyper::header; use torrust_axum_rest_tracker_api_server::environment::Started; use torrust_rest_tracker_api_client::common::http::Query; + use torrust_rest_tracker_api_client::connection_info::ConnectionInfo; use torrust_rest_tracker_api_client::v1::client::{ headers_with_auth_token, headers_with_request_id, Client, AUTH_BEARER_TOKEN_HEADER_PREFIX, }; @@ -80,7 +81,9 @@ mod given_that_the_token_is_only_provided_in_the_authentication_header { .expect("the auth token is not a valid header value"), ); - let response = Client::new(env.get_connection_info()) + let connection_info = ConnectionInfo::anonymous(env.get_connection_info().origin); + + let response = Client::new(connection_info) .unwrap() .get_request_with_query("stats", Query::default(), Some(headers)) .await; @@ -99,7 +102,8 @@ mod given_that_the_token_is_only_provided_in_the_query_param { use torrust_axum_rest_tracker_api_server::environment::Started; use torrust_rest_tracker_api_client::common::http::{Query, QueryParam}; - use torrust_rest_tracker_api_client::v1::client::{headers_with_request_id, Client}; + use torrust_rest_tracker_api_client::connection_info::ConnectionInfo; + use torrust_rest_tracker_api_client::v1::client::{headers_with_request_id, Client, TOKEN_PARAM_NAME}; use torrust_tracker_test_helpers::logging::logs_contains_a_line_with; use torrust_tracker_test_helpers::{configuration, logging}; use uuid::Uuid; @@ -114,9 +118,15 @@ mod given_that_the_token_is_only_provided_in_the_query_param { let token = env.get_connection_info().api_token.unwrap(); - let response = Client::new(env.get_connection_info()) + let connection_info = ConnectionInfo::anonymous(env.get_connection_info().origin); + + let response = Client::new(connection_info) .unwrap() - .get_request_with_query("stats", Query::params([QueryParam::new("token", &token)].to_vec()), None) + .get_request_with_query( + "stats", + Query::params([QueryParam::new(TOKEN_PARAM_NAME, &token)].to_vec()), + None, + ) .await; assert_eq!(response.status(), 200); @@ -132,11 +142,13 @@ mod given_that_the_token_is_only_provided_in_the_query_param { let request_id = Uuid::new_v4(); - let response = Client::new(env.get_connection_info()) + let connection_info = ConnectionInfo::anonymous(env.get_connection_info().origin); + + let response = Client::new(connection_info) .unwrap() .get_request_with_query( "stats", - Query::params([QueryParam::new("token", "")].to_vec()), + Query::params([QueryParam::new(TOKEN_PARAM_NAME, "")].to_vec()), Some(headers_with_request_id(request_id)), ) .await; @@ -159,11 +171,13 @@ mod given_that_the_token_is_only_provided_in_the_query_param { let request_id = Uuid::new_v4(); - let response = Client::new(env.get_connection_info()) + let connection_info = ConnectionInfo::anonymous(env.get_connection_info().origin); + + let response = Client::new(connection_info) .unwrap() .get_request_with_query( "stats", - Query::params([QueryParam::new("token", "INVALID TOKEN")].to_vec()), + Query::params([QueryParam::new(TOKEN_PARAM_NAME, "INVALID TOKEN")].to_vec()), Some(headers_with_request_id(request_id)), ) .await; @@ -186,8 +200,10 @@ mod given_that_the_token_is_only_provided_in_the_query_param { let token = env.get_connection_info().api_token.unwrap(); + let connection_info = ConnectionInfo::anonymous(env.get_connection_info().origin); + // At the beginning of the query component - let response = Client::new(env.get_connection_info()) + let response = Client::new(connection_info) .unwrap() .get_request(&format!("torrents?token={token}&limit=1")) .await; @@ -210,6 +226,7 @@ mod given_that_not_token_is_provided { use torrust_axum_rest_tracker_api_server::environment::Started; use torrust_rest_tracker_api_client::common::http::Query; + use torrust_rest_tracker_api_client::connection_info::ConnectionInfo; use torrust_rest_tracker_api_client::v1::client::{headers_with_request_id, Client}; use torrust_tracker_test_helpers::logging::logs_contains_a_line_with; use torrust_tracker_test_helpers::{configuration, logging}; @@ -225,7 +242,9 @@ mod given_that_not_token_is_provided { let request_id = Uuid::new_v4(); - let response = Client::new(env.get_connection_info()) + let connection_info = ConnectionInfo::anonymous(env.get_connection_info().origin); + + let response = Client::new(connection_info) .unwrap() .get_request_with_query("stats", Query::default(), Some(headers_with_request_id(request_id))) .await; diff --git a/packages/rest-tracker-api-client/src/v1/client.rs b/packages/rest-tracker-api-client/src/v1/client.rs index d13a567bb..da1b709da 100644 --- a/packages/rest-tracker-api-client/src/v1/client.rs +++ b/packages/rest-tracker-api-client/src/v1/client.rs @@ -92,16 +92,18 @@ impl Client { /// /// Will panic if the request can't be sent pub async fn post_empty(&self, path: &str, headers: Option) -> Response { - let builder = self - .client - .post(self.base_url(path).clone()) - .query(&ReqwestQuery::from(self.query_with_token())); + let builder = self.client.post(self.base_url(path).clone()); let builder = match headers { Some(headers) => builder.headers(headers), None => builder, }; + let builder = match &self.connection_info.api_token { + Some(token) => builder.header(header::AUTHORIZATION, format!("{AUTH_BEARER_TOKEN_HEADER_PREFIX} {token}")), + None => builder, + }; + builder.send().await.unwrap() } @@ -109,17 +111,18 @@ impl Client { /// /// Will panic if the request can't be sent pub async fn post_form(&self, path: &str, form: &T, headers: Option) -> Response { - let builder = self - .client - .post(self.base_url(path).clone()) - .query(&ReqwestQuery::from(self.query_with_token())) - .json(&form); + let builder = self.client.post(self.base_url(path).clone()).json(&form); let builder = match headers { Some(headers) => builder.headers(headers), None => builder, }; + let builder = match &self.connection_info.api_token { + Some(token) => builder.header(header::AUTHORIZATION, format!("{AUTH_BEARER_TOKEN_HEADER_PREFIX} {token}")), + None => builder, + }; + builder.send().await.unwrap() } @@ -127,34 +130,70 @@ impl Client { /// /// Will panic if the request can't be sent async fn delete(&self, path: &str, headers: Option) -> Response { - let builder = self - .client - .delete(self.base_url(path).clone()) - .query(&ReqwestQuery::from(self.query_with_token())); + let builder = self.client.delete(self.base_url(path).clone()); let builder = match headers { Some(headers) => builder.headers(headers), None => builder, }; + let builder = match &self.connection_info.api_token { + Some(token) => builder.header(header::AUTHORIZATION, format!("{AUTH_BEARER_TOKEN_HEADER_PREFIX} {token}")), + None => builder, + }; + builder.send().await.unwrap() } + /// # Panics + /// + /// Will panic if it can't convert the authentication token to a `HeaderValue`. pub async fn get_request_with_query(&self, path: &str, params: Query, headers: Option) -> Response { - get(self.base_url(path), Some(params), headers).await + match &self.connection_info.api_token { + Some(token) => { + let headers = if let Some(headers) = headers { + // Headers provided -> add auth token if not already present + + if headers.get(header::AUTHORIZATION).is_some() { + // Auth token already present -> use provided + headers + } else { + let mut headers = headers; + + headers.insert( + header::AUTHORIZATION, + format!("{AUTH_BEARER_TOKEN_HEADER_PREFIX} {token}") + .parse() + .expect("the auth token is not a valid header value"), + ); + + headers + } + } else { + // No headers provided -> create headers with auth token + + let mut headers = HeaderMap::new(); + + headers.insert( + header::AUTHORIZATION, + format!("{AUTH_BEARER_TOKEN_HEADER_PREFIX} {token}") + .parse() + .expect("the auth token is not a valid header value"), + ); + + headers + }; + + get(self.base_url(path), Some(params), Some(headers)).await + } + None => get(self.base_url(path), Some(params), headers).await, + } } pub async fn get_request(&self, path: &str) -> Response { get(self.base_url(path), None, None).await } - fn query_with_token(&self) -> Query { - match &self.connection_info.api_token { - Some(token) => Query::params([QueryParam::new("token", token)].to_vec()), - None => Query::default(), - } - } - fn base_url(&self, path: &str) -> Url { Url::parse(&format!("{}{}{path}", &self.connection_info.origin, &self.base_path)).unwrap() } From aedcd3e261d77c12f4d127844c84c0286da7222c Mon Sep 17 00:00:00 2001 From: Ikko Eltociear Ashimine Date: Tue, 11 Mar 2025 03:19:43 +0900 Subject: [PATCH 382/802] docs: update README.md minor fix --- README.md | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/README.md b/README.md index b7431e859..33fc4a028 100644 --- a/README.md +++ b/README.md @@ -104,7 +104,7 @@ podman run -it docker.io/torrust/tracker:develop # Checkout repository into a new folder: git clone https://github.com/torrust/torrust-tracker.git -# Change into directory and create a empty database file: +# Change into directory and create an empty database file: cd torrust-tracker mkdir -p ./storage/tracker/lib/database/ touch ./storage/tracker/lib/database/sqlite3.db From c15573200caad686cbca63522ff1a8752760d645 Mon Sep 17 00:00:00 2001 From: nuts_rice Date: Tue, 4 Feb 2025 12:39:36 -0500 Subject: [PATCH 383/802] chore: barebones benchmarks for UDP and HTTP core packages - http + udp tracker core bench mods - bench sh script --- Cargo.lock | 240 ++++++++++-------- contrib/dev-tools/benches/run-benches.sh | 9 + packages/http-tracker-core/Cargo.toml | 6 + .../http-tracker-core/benches/helpers/mod.rs | 2 + .../http-tracker-core/benches/helpers/sync.rs | 31 +++ .../http-tracker-core/benches/helpers/util.rs | 118 +++++++++ .../benches/http_tracker_core_benchmark.rs | 23 ++ packages/udp-tracker-core/Cargo.toml | 6 + .../udp-tracker-core/benches/helpers/mod.rs | 2 + .../udp-tracker-core/benches/helpers/sync.rs | 21 ++ .../udp-tracker-core/benches/helpers/utils.rs | 25 ++ .../benches/udp_tracker_core_benchmark.rs | 20 ++ .../src/statistics/event/handler.rs | 125 ++++++++- 13 files changed, 515 insertions(+), 113 deletions(-) create mode 100755 contrib/dev-tools/benches/run-benches.sh create mode 100644 packages/http-tracker-core/benches/helpers/mod.rs create mode 100644 packages/http-tracker-core/benches/helpers/sync.rs create mode 100644 packages/http-tracker-core/benches/helpers/util.rs create mode 100644 packages/http-tracker-core/benches/http_tracker_core_benchmark.rs create mode 100644 packages/udp-tracker-core/benches/helpers/mod.rs create mode 100644 packages/udp-tracker-core/benches/helpers/sync.rs create mode 100644 packages/udp-tracker-core/benches/helpers/utils.rs create mode 100644 packages/udp-tracker-core/benches/udp_tracker_core_benchmark.rs diff --git a/Cargo.lock b/Cargo.lock index 1a6a09244..4d5157055 100644 --- a/Cargo.lock +++ b/Cargo.lock @@ -264,7 +264,7 @@ dependencies = [ "futures-lite", "parking", "polling", - "rustix", + "rustix 0.38.44", "slab", "tracing", "windows-sys 0.59.0", @@ -322,7 +322,7 @@ checksum = "d556ec1359574147ec0c4fc5eb525f3f23263a592b1a9c07e0a75b427de55c97" dependencies = [ "proc-macro2", "quote", - "syn 2.0.99", + "syn 2.0.100", ] [[package]] @@ -444,7 +444,7 @@ checksum = "604fde5e028fea851ce1d8570bbdc034bec850d157f7569d10f347d06808c05c" dependencies = [ "proc-macro2", "quote", - "syn 2.0.99", + "syn 2.0.100", ] [[package]] @@ -532,7 +532,7 @@ dependencies = [ "regex", "rustc-hash", "shlex", - "syn 2.0.99", + "syn 2.0.100", ] [[package]] @@ -561,6 +561,7 @@ dependencies = [ "bittorrent-http-tracker-protocol", "bittorrent-primitives", "bittorrent-tracker-core", + "criterion", "futures", "mockall", "thiserror 2.0.12", @@ -669,6 +670,7 @@ dependencies = [ "bloom", "blowfish", "cipher", + "criterion", "futures", "lazy_static", "mockall", @@ -814,7 +816,7 @@ dependencies = [ "proc-macro-crate", "proc-macro2", "quote", - "syn 2.0.99", + "syn 2.0.100", ] [[package]] @@ -1018,9 +1020,9 @@ dependencies = [ [[package]] name = "clap" -version = "4.5.31" +version = "4.5.32" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "027bb0d98429ae334a8698531da7077bdf906419543a35a55c2cb1b66437d767" +checksum = "6088f3ae8c3608d19260cd7445411865a485688711b78b5be70d78cd96136f83" dependencies = [ "clap_builder", "clap_derive", @@ -1028,9 +1030,9 @@ dependencies = [ [[package]] name = "clap_builder" -version = "4.5.31" +version = "4.5.32" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "5589e0cba072e0f3d23791efac0fd8627b49c829c196a492e88168e6a669d863" +checksum = "22a7ef7f676155edfb82daa97f99441f3ebf4a58d5e32f295a56259f1b6facc8" dependencies = [ "anstream", "anstyle", @@ -1040,14 +1042,14 @@ dependencies = [ [[package]] name = "clap_derive" -version = "4.5.28" +version = "4.5.32" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "bf4ced95c6f4a675af3da73304b9ac4ed991640c36374e4b46795c49e17cf1ed" +checksum = "09176aae279615badda0765c0c0b3f6ed53f4709118af73cf4655d85d1530cd7" dependencies = [ "heck", "proc-macro2", "quote", - "syn 2.0.99", + "syn 2.0.100", ] [[package]] @@ -1278,7 +1280,7 @@ dependencies = [ "proc-macro2", "quote", "strsim", - "syn 2.0.99", + "syn 2.0.100", ] [[package]] @@ -1289,7 +1291,7 @@ checksum = "d336a2a514f6ccccaa3e09b02d41d35330c07ddf03a62165fcec10bb561c7806" dependencies = [ "darling_core", "quote", - "syn 2.0.99", + "syn 2.0.100", ] [[package]] @@ -1333,7 +1335,7 @@ checksum = "bda628edc44c4bb645fbe0f758797143e4e07926f7ebf4e9bdfbd3d2ce621df3" dependencies = [ "proc-macro2", "quote", - "syn 2.0.99", + "syn 2.0.100", "unicode-xid", ] @@ -1345,7 +1347,7 @@ checksum = "ccfae181bab5ab6c5478b2ccb69e4c68a02f8c3ec72f6616bfec9dbc599d2ee0" dependencies = [ "proc-macro2", "quote", - "syn 2.0.99", + "syn 2.0.100", ] [[package]] @@ -1366,7 +1368,7 @@ checksum = "97369cbbc041bc366949bc74d34658d6cda5621039731c6310521892a3a20ae0" dependencies = [ "proc-macro2", "quote", - "syn 2.0.99", + "syn 2.0.100", ] [[package]] @@ -1388,9 +1390,9 @@ checksum = "1435fa1053d8b2fbbe9be7e97eca7f33d37b28409959813daefc1446a14247f1" [[package]] name = "either" -version = "1.14.0" +version = "1.15.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "b7914353092ddf589ad78f25c5c1c21b7f80b0ff8621e7c814c3485b5306da9d" +checksum = "48c757948c5ede0e46177b7add2e67155f70e33c07fea8284df6576da70b3719" [[package]] name = "encoding_rs" @@ -1603,7 +1605,7 @@ checksum = "e99b8b3c28ae0e84b604c75f721c21dc77afb3706076af5e8216d15fd1deaae3" dependencies = [ "frunk_proc_macro_helpers", "quote", - "syn 2.0.99", + "syn 2.0.100", ] [[package]] @@ -1615,7 +1617,7 @@ dependencies = [ "frunk_core", "proc-macro2", "quote", - "syn 2.0.99", + "syn 2.0.100", ] [[package]] @@ -1627,7 +1629,7 @@ dependencies = [ "frunk_core", "frunk_proc_macro_helpers", "quote", - "syn 2.0.99", + "syn 2.0.100", ] [[package]] @@ -1705,7 +1707,7 @@ checksum = "162ee34ebcb7c64a8abebc059ce0fee27c2262618d7b60ed8faf72fef13c3650" dependencies = [ "proc-macro2", "quote", - "syn 2.0.99", + "syn 2.0.100", ] [[package]] @@ -1813,7 +1815,7 @@ dependencies = [ "futures-core", "futures-sink", "http", - "indexmap 2.7.1", + "indexmap 2.8.0", "slab", "tokio", "tokio-util", @@ -1877,6 +1879,12 @@ version = "0.4.0" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "fbf6a919d6cf397374f7dfeeea91d974c7c0a7221d0d0f4f20d859d329e53fcc" +[[package]] +name = "hermit-abi" +version = "0.5.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "fbd780fe5cc30f81464441920d82ac8740e2e46b29a6fad543ddd075229ce37e" + [[package]] name = "hex" version = "0.4.3" @@ -2185,7 +2193,7 @@ checksum = "1ec89e9337638ecdc08744df490b221a7399bf8d164eb52a665454e60e075ad6" dependencies = [ "proc-macro2", "quote", - "syn 2.0.99", + "syn 2.0.100", ] [[package]] @@ -2228,9 +2236,9 @@ dependencies = [ [[package]] name = "indexmap" -version = "2.7.1" +version = "2.8.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "8c9c992b02b5b4c94ea26e32fe5bccb7aa7d9f390ab5c1221ff895bc7ea8b652" +checksum = "3954d50fe15b02142bf25d3b8bdadb634ec3948f103d04ffe3031bc8fe9d7058" dependencies = [ "equivalent", "hashbrown 0.15.2", @@ -2269,11 +2277,11 @@ checksum = "469fb0b9cefa57e3ef31275ee7cacb78f2fdca44e4765491884a2b119d4eb130" [[package]] name = "is-terminal" -version = "0.4.15" +version = "0.4.16" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "e19b23d53f35ce9f56aebc7d1bb4e6ac1e9c0db7ac85c8d1760c04379edced37" +checksum = "e04d7f318608d35d4b61ddd75cbdaee86b023ebe2bd5a66ee0915f0bf93095a9" dependencies = [ - "hermit-abi", + "hermit-abi 0.5.0", "libc", "windows-sys 0.59.0", ] @@ -2344,9 +2352,9 @@ checksum = "bbd2bcb4c963f2ddae06a2efc7e9f3591312473c50c6685e1f298068316e66fe" [[package]] name = "libc" -version = "0.2.170" +version = "0.2.171" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "875b3680cb2f8f71bdcf9a30f38d48282f5d3c95cbf9b3fa57269bb5d5c06828" +checksum = "c19937216e9d3aa9956d9bb8dfc0b0c8beb6058fc4f7a4dc4d850edf86a237d6" [[package]] name = "libloading" @@ -2377,9 +2385,9 @@ dependencies = [ [[package]] name = "libsqlite3-sys" -version = "0.31.0" +version = "0.32.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "ad8935b44e7c13394a179a438e0cebba0fe08fe01b54f152e29a93b5cf993fd4" +checksum = "fbb8270bb4060bd76c6e96f20c52d80620f1d82a3470885694e41e0f81ef6fe7" dependencies = [ "cc", "pkg-config", @@ -2403,6 +2411,12 @@ version = "0.4.15" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "d26c52dbd32dccf2d10cac7725f8eae5296885fb5703b261f7d0a0739ec807ab" +[[package]] +name = "linux-raw-sys" +version = "0.9.2" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "6db9c683daf087dc577b7506e9695b3d556a9f3849903fa28186283afd6809e9" + [[package]] name = "litemap" version = "0.7.5" @@ -2516,7 +2530,7 @@ dependencies = [ "cfg-if", "proc-macro2", "quote", - "syn 2.0.99", + "syn 2.0.100", ] [[package]] @@ -2566,7 +2580,7 @@ dependencies = [ "proc-macro-error2", "proc-macro2", "quote", - "syn 2.0.99", + "syn 2.0.100", "termcolor", "thiserror 1.0.69", ] @@ -2732,15 +2746,15 @@ dependencies = [ [[package]] name = "once_cell" -version = "1.20.3" +version = "1.21.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "945462a4b81e43c4e3ba96bd7b49d834c6f61198356aa858733bc4acf3cbe62e" +checksum = "cde51589ab56b20a6f686b2c68f7a0bd6add753d697abf720d63f8db3ab7b1ad" [[package]] name = "oorandom" -version = "11.1.4" +version = "11.1.5" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "b410bbe7e14ab526a0e86877eb47c6996a2bd7746f027ba551028c925390e4e9" +checksum = "d6790f58c7ff633d8771f42965289203411a5e5c68388703c06e14f24770b41e" [[package]] name = "openssl" @@ -2765,7 +2779,7 @@ checksum = "a948666b637a0f465e8564c73e89d4dde00d72d4d473cc972f390fc3dcee7d9c" dependencies = [ "proc-macro2", "quote", - "syn 2.0.99", + "syn 2.0.100", ] [[package]] @@ -2843,7 +2857,7 @@ dependencies = [ "regex", "regex-syntax", "structmeta", - "syn 2.0.99", + "syn 2.0.100", ] [[package]] @@ -2866,7 +2880,7 @@ dependencies = [ "proc-macro2", "proc-macro2-diagnostics", "quote", - "syn 2.0.99", + "syn 2.0.100", ] [[package]] @@ -2940,7 +2954,7 @@ checksum = "6e918e4ff8c4549eb882f14b3a4bc8c8bc93de829416eacf579f1207a8fbf861" dependencies = [ "proc-macro2", "quote", - "syn 2.0.99", + "syn 2.0.100", ] [[package]] @@ -3008,9 +3022,9 @@ checksum = "a604568c3202727d1507653cb121dbd627a58684eb09a820fd746bee38b4442f" dependencies = [ "cfg-if", "concurrent-queue", - "hermit-abi", + "hermit-abi 0.4.0", "pin-project-lite", - "rustix", + "rustix 0.38.44", "tracing", "windows-sys 0.59.0", ] @@ -3029,11 +3043,11 @@ checksum = "439ee305def115ba05938db6eb1644ff94165c5ab5e9420d1c1bcedbba909391" [[package]] name = "ppv-lite86" -version = "0.2.20" +version = "0.2.21" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "77957b295656769bb8ad2b6a6b09d897d94f05c41b069aede1fcdaa675eaea04" +checksum = "85eae3c4ed2f50dcfe72643da4befc30deadb458a9b590d720cde2f2b1e97da9" dependencies = [ - "zerocopy 0.7.35", + "zerocopy 0.8.23", ] [[package]] @@ -3064,9 +3078,9 @@ dependencies = [ [[package]] name = "proc-macro-crate" -version = "3.2.0" +version = "3.3.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "8ecf48c7ca261d60b74ab1a7b20da18bede46776b2e55535cb958eb595c5fa7b" +checksum = "edce586971a4dfaa28950c6f18ed55e0406c1ab88bbce2c6f6293a7aaba73d35" dependencies = [ "toml_edit", ] @@ -3090,7 +3104,7 @@ dependencies = [ "proc-macro-error-attr2", "proc-macro2", "quote", - "syn 2.0.99", + "syn 2.0.100", ] [[package]] @@ -3110,7 +3124,7 @@ checksum = "af066a9c399a26e020ada66a034357a868728e72cd426f3adcd35f80d88d88c8" dependencies = [ "proc-macro2", "quote", - "syn 2.0.99", + "syn 2.0.100", "version_check", "yansi", ] @@ -3178,9 +3192,9 @@ dependencies = [ [[package]] name = "r2d2_sqlite" -version = "0.26.0" +version = "0.27.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "ee025287c0188d75ae2563bcb91c9b0d1843cfc56e4bd3ab867597971b5cc256" +checksum = "180da684f0a188977d3968f139eb44260192ef8d9a5b7b7cbd01d881e0353179" dependencies = [ "r2d2", "rusqlite", @@ -3212,7 +3226,7 @@ checksum = "3779b94aeb87e8bd4e834cee3650289ee9e0d5677f976ecdb6d219e5f4f6cd94" dependencies = [ "rand_chacha 0.9.0", "rand_core 0.9.3", - "zerocopy 0.8.21", + "zerocopy 0.8.23", ] [[package]] @@ -3381,9 +3395,9 @@ dependencies = [ [[package]] name = "ring" -version = "0.17.11" +version = "0.17.13" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "da5349ae27d3887ca812fb375b45a4fbb36d8d12d2df394968cd86e35683fe73" +checksum = "70ac5d832aa16abd7d1def883a8545280c20a60f523a370aa3a9617c2b8550ee" dependencies = [ "cc", "cfg-if", @@ -3458,15 +3472,15 @@ dependencies = [ "regex", "relative-path", "rustc_version", - "syn 2.0.99", + "syn 2.0.100", "unicode-ident", ] [[package]] name = "rusqlite" -version = "0.33.0" +version = "0.34.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "1c6d5e5acb6f6129fe3f7ba0a7fc77bca1942cb568535e18e7bc40262baf3110" +checksum = "37e34486da88d8e051c7c0e23c3f15fd806ea8546260aa2fec247e97242ec143" dependencies = [ "bitflags 2.9.0", "fallible-iterator", @@ -3522,7 +3536,20 @@ dependencies = [ "bitflags 2.9.0", "errno", "libc", - "linux-raw-sys", + "linux-raw-sys 0.4.15", + "windows-sys 0.59.0", +] + +[[package]] +name = "rustix" +version = "1.0.2" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "f7178faa4b75a30e269c71e61c353ce2748cf3d76f0c44c393f4e60abf49b825" +dependencies = [ + "bitflags 2.9.0", + "errno", + "libc", + "linux-raw-sys 0.9.2", "windows-sys 0.59.0", ] @@ -3679,9 +3706,9 @@ checksum = "56e6fa9c48d24d85fb3de5ad847117517440f6beceb7798af16b4a87d616b8d0" [[package]] name = "serde" -version = "1.0.218" +version = "1.0.219" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "e8dfc9d19bdbf6d17e22319da49161d5d0108e4188e8b680aef6299eed22df60" +checksum = "5f0e2c6ed6606019b4e29e69dbaba95b11854410e5347d525002456dbbb786b6" dependencies = [ "serde_derive", ] @@ -3698,22 +3725,22 @@ dependencies = [ [[package]] name = "serde_bytes" -version = "0.11.16" +version = "0.11.17" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "364fec0df39c49a083c9a8a18a23a6bcfd9af130fe9fe321d18520a0d113e09e" +checksum = "8437fd221bde2d4ca316d61b90e337e9e702b3820b87d63caa9ba6c02bd06d96" dependencies = [ "serde", ] [[package]] name = "serde_derive" -version = "1.0.218" +version = "1.0.219" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "f09503e191f4e797cb8aac08e9a4a4695c5edf6a2e70e376d961ddd5c969f82b" +checksum = "5b0276cf7f2c73365f7157c8123c21cd9a50fbbd844757af28ca1f5925fc2a00" dependencies = [ "proc-macro2", "quote", - "syn 2.0.99", + "syn 2.0.100", ] [[package]] @@ -3723,7 +3750,7 @@ source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "9d2de91cf02bbc07cde38891769ccd5d4f073d22a40683aa4bc7a95781aaa2c4" dependencies = [ "form_urlencoded", - "indexmap 2.7.1", + "indexmap 2.8.0", "itoa", "ryu", "serde", @@ -3735,7 +3762,7 @@ version = "1.0.140" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "20068b6e96dc6c9bd23e01df8827e6c7e1f2fddd43c21810382803c136b99373" dependencies = [ - "indexmap 2.7.1", + "indexmap 2.8.0", "itoa", "memchr", "ryu", @@ -3760,7 +3787,7 @@ checksum = "175ee3e80ae9982737ca543e96133087cbd9a485eecc3bc4de9c1a37b47ea59c" dependencies = [ "proc-macro2", "quote", - "syn 2.0.99", + "syn 2.0.100", ] [[package]] @@ -3794,7 +3821,7 @@ dependencies = [ "chrono", "hex", "indexmap 1.9.3", - "indexmap 2.7.1", + "indexmap 2.8.0", "serde", "serde_derive", "serde_json", @@ -3811,7 +3838,7 @@ dependencies = [ "darling", "proc-macro2", "quote", - "syn 2.0.99", + "syn 2.0.100", ] [[package]] @@ -3924,7 +3951,7 @@ dependencies = [ "proc-macro2", "quote", "structmeta-derive", - "syn 2.0.99", + "syn 2.0.100", ] [[package]] @@ -3935,7 +3962,7 @@ checksum = "152a0b65a590ff6c3da95cabe2353ee04e6167c896b28e3b14478c2636c922fc" dependencies = [ "proc-macro2", "quote", - "syn 2.0.99", + "syn 2.0.100", ] [[package]] @@ -3967,9 +3994,9 @@ dependencies = [ [[package]] name = "syn" -version = "2.0.99" +version = "2.0.100" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "e02e925281e18ffd9d640e234264753c43edc62d64b2d4cf898f1bc5e75f3fc2" +checksum = "b09a44accad81e1ba1cd74a32461ba89dee89095ba17b32f5d03683b1b1fc2a0" dependencies = [ "proc-macro2", "quote", @@ -3993,7 +4020,7 @@ checksum = "c8af7666ab7b6390ab78131fb5b0fce11d6b7a6951602017c35fa82800708971" dependencies = [ "proc-macro2", "quote", - "syn 2.0.99", + "syn 2.0.100", ] [[package]] @@ -4042,15 +4069,15 @@ dependencies = [ [[package]] name = "tempfile" -version = "3.17.1" +version = "3.18.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "22e5a0acb1f3f55f65cc4a866c361b2fb2a0ff6366785ae6fbb5f85df07ba230" +checksum = "2c317e0a526ee6120d8dabad239c8dadca62b24b6f168914bbbc8e2fb1f0e567" dependencies = [ "cfg-if", "fastrand", "getrandom 0.3.1", "once_cell", - "rustix", + "rustix 1.0.2", "windows-sys 0.59.0", ] @@ -4124,7 +4151,7 @@ checksum = "4fee6c4efc90059e10f81e6d42c60a18f76588c3d74cb83a0b242a2b6c7504c1" dependencies = [ "proc-macro2", "quote", - "syn 2.0.99", + "syn 2.0.100", ] [[package]] @@ -4135,7 +4162,7 @@ checksum = "7f7cf42b4507d8ea322120659672cf1b9dbb93f8f2d4ecfd6e51350ff5b17a1d" dependencies = [ "proc-macro2", "quote", - "syn 2.0.99", + "syn 2.0.100", ] [[package]] @@ -4150,9 +4177,9 @@ dependencies = [ [[package]] name = "time" -version = "0.3.38" +version = "0.3.39" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "bb041120f25f8fbe8fd2dbe4671c7c2ed74d83be2e7a77529bf7e0790ae3f472" +checksum = "dad298b01a40a23aac4580b67e3dbedb7cc8402f3592d7f49469de2ea4aecdd8" dependencies = [ "deranged", "itoa", @@ -4216,9 +4243,9 @@ checksum = "1f3ccbac311fea05f86f61904b462b55fb3df8837a366dfc601a0161d0532f20" [[package]] name = "tokio" -version = "1.43.0" +version = "1.44.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "3d61fa4ffa3de412bfea335c6ecff681de2b609ba3c77ef3e00e521813a9ed9e" +checksum = "9975ea0f48b5aa3972bf2d888c238182458437cc2a19374b81b25cdf1023fb3a" dependencies = [ "backtrace", "bytes", @@ -4239,7 +4266,7 @@ checksum = "6e06d43f1345a3bcd39f6a56dbb7dcab2ba47e68e8ac134855e7e2bdbaf8cab8" dependencies = [ "proc-macro2", "quote", - "syn 2.0.99", + "syn 2.0.100", ] [[package]] @@ -4328,7 +4355,7 @@ version = "0.22.24" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "17b4795ff5edd201c7cd6dca065ae59972ce77d1b80fa0a84d94950ece7d1474" dependencies = [ - "indexmap 2.7.1", + "indexmap 2.8.0", "serde", "serde_spanned", "toml_datetime", @@ -4758,7 +4785,7 @@ checksum = "395ae124c09f9e6918a2310af6038fba074bcf474ac352496d5910dd59a2226d" dependencies = [ "proc-macro2", "quote", - "syn 2.0.99", + "syn 2.0.100", ] [[package]] @@ -4979,7 +5006,7 @@ dependencies = [ "log", "proc-macro2", "quote", - "syn 2.0.99", + "syn 2.0.100", "wasm-bindgen-shared", ] @@ -5014,7 +5041,7 @@ checksum = "8ae87ea40c9f689fc23f209965b6fb8a99ad69aeeb0231408be24920604395de" dependencies = [ "proc-macro2", "quote", - "syn 2.0.99", + "syn 2.0.100", "wasm-bindgen-backend", "wasm-bindgen-shared", ] @@ -5303,13 +5330,12 @@ dependencies = [ [[package]] name = "xattr" -version = "1.4.0" +version = "1.5.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "e105d177a3871454f754b33bb0ee637ecaaac997446375fd3e5d43a2ed00c909" +checksum = "0d65cbf2f12c15564212d48f4e3dfb87923d25d611f2aed18f4cb23f0413d89e" dependencies = [ "libc", - "linux-raw-sys", - "rustix", + "rustix 1.0.2", ] [[package]] @@ -5338,7 +5364,7 @@ checksum = "2380878cad4ac9aac1e2435f3eb4020e8374b5f13c296cb75b4620ff8e229154" dependencies = [ "proc-macro2", "quote", - "syn 2.0.99", + "syn 2.0.100", "synstructure", ] @@ -5354,11 +5380,11 @@ dependencies = [ [[package]] name = "zerocopy" -version = "0.8.21" +version = "0.8.23" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "dcf01143b2dd5d134f11f545cf9f1431b13b749695cb33bcce051e7568f99478" +checksum = "fd97444d05a4328b90e75e503a34bad781f14e28a823ad3557f0750df1ebcbc6" dependencies = [ - "zerocopy-derive 0.8.21", + "zerocopy-derive 0.8.23", ] [[package]] @@ -5369,18 +5395,18 @@ checksum = "fa4f8080344d4671fb4e831a13ad1e68092748387dfc4f55e356242fae12ce3e" dependencies = [ "proc-macro2", "quote", - "syn 2.0.99", + "syn 2.0.100", ] [[package]] name = "zerocopy-derive" -version = "0.8.21" +version = "0.8.23" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "712c8386f4f4299382c9abee219bee7084f78fb939d88b6840fcc1320d5f6da2" +checksum = "6352c01d0edd5db859a63e2605f4ea3183ddbd15e2c4a9e7d32184df75e4f154" dependencies = [ "proc-macro2", "quote", - "syn 2.0.99", + "syn 2.0.100", ] [[package]] @@ -5400,7 +5426,7 @@ checksum = "d71e5d6e06ab090c67b5e44993ec16b72dcbaabc526db883a360057678b48502" dependencies = [ "proc-macro2", "quote", - "syn 2.0.99", + "syn 2.0.100", "synstructure", ] @@ -5429,7 +5455,7 @@ checksum = "6eafa6dfb17584ea3e2bd6e76e0cc15ad7af12b09abdd1ca55961bed9b1063c6" dependencies = [ "proc-macro2", "quote", - "syn 2.0.99", + "syn 2.0.100", ] [[package]] diff --git a/contrib/dev-tools/benches/run-benches.sh b/contrib/dev-tools/benches/run-benches.sh new file mode 100755 index 000000000..0de356492 --- /dev/null +++ b/contrib/dev-tools/benches/run-benches.sh @@ -0,0 +1,9 @@ +#!/bin/bash + +# This script is only intended to be used for local development or testing environments. + +cargo bench --package torrust-tracker-torrent-repository + +cargo bench --package bittorrent-http-tracker-core + +cargo bench --package bittorrent-udp-tracker-core diff --git a/packages/http-tracker-core/Cargo.toml b/packages/http-tracker-core/Cargo.toml index 1e0bcff28..aaf982b04 100644 --- a/packages/http-tracker-core/Cargo.toml +++ b/packages/http-tracker-core/Cargo.toml @@ -18,6 +18,7 @@ aquatic_udp_protocol = "0" bittorrent-http-tracker-protocol = { version = "3.0.0-develop", path = "../http-protocol" } bittorrent-primitives = "0.1.0" bittorrent-tracker-core = { version = "3.0.0-develop", path = "../tracker-core" } +criterion = { version = "0.5.1", features = ["async_tokio"] } futures = "0" thiserror = "2" tokio = { version = "1", features = ["macros", "net", "rt-multi-thread", "signal", "sync"] } @@ -28,3 +29,8 @@ tracing = "0" [dev-dependencies] mockall = "0" torrust-tracker-test-helpers = { version = "3.0.0-develop", path = "../test-helpers" } + +[[bench]] +harness = false +name = "http_tracker_core_benchmark" + diff --git a/packages/http-tracker-core/benches/helpers/mod.rs b/packages/http-tracker-core/benches/helpers/mod.rs new file mode 100644 index 000000000..4a91f2224 --- /dev/null +++ b/packages/http-tracker-core/benches/helpers/mod.rs @@ -0,0 +1,2 @@ +pub mod sync; +pub mod util; diff --git a/packages/http-tracker-core/benches/helpers/sync.rs b/packages/http-tracker-core/benches/helpers/sync.rs new file mode 100644 index 000000000..c19943b1d --- /dev/null +++ b/packages/http-tracker-core/benches/helpers/sync.rs @@ -0,0 +1,31 @@ +use std::time::{Duration, Instant}; + +use bittorrent_http_tracker_core::services::announce::AnnounceService; + +use crate::helpers::util::{initialize_core_tracker_services, sample_announce_request_for_peer, sample_peer}; + +#[must_use] +pub async fn return_announce_data_once(samples: u64) -> Duration { + let (core_tracker_services, core_http_tracker_services) = initialize_core_tracker_services(); + + let peer = sample_peer(); + + let (announce_request, client_ip_sources) = sample_announce_request_for_peer(peer); + + let announce_service = AnnounceService::new( + core_tracker_services.core_config.clone(), + core_tracker_services.announce_handler.clone(), + core_tracker_services.authentication_service.clone(), + core_tracker_services.whitelist_authorization.clone(), + core_http_tracker_services.http_stats_event_sender.clone(), + ); + + let start = Instant::now(); + for _ in 0..samples { + let _announce_data = announce_service + .handle_announce(&announce_request, &client_ip_sources, None) + .await + .unwrap(); + } + start.elapsed() +} diff --git a/packages/http-tracker-core/benches/helpers/util.rs b/packages/http-tracker-core/benches/helpers/util.rs new file mode 100644 index 000000000..f15e9db8f --- /dev/null +++ b/packages/http-tracker-core/benches/helpers/util.rs @@ -0,0 +1,118 @@ +use std::net::{IpAddr, Ipv4Addr, SocketAddr}; +use std::sync::Arc; + +use aquatic_udp_protocol::{AnnounceEvent, NumberOfBytes, PeerId}; +use bittorrent_http_tracker_protocol::v1::requests::announce::Announce; +use bittorrent_http_tracker_protocol::v1::services::peer_ip_resolver::ClientIpSources; +use bittorrent_primitives::info_hash::InfoHash; +use bittorrent_tracker_core::announce_handler::AnnounceHandler; +use bittorrent_tracker_core::authentication::key::repository::in_memory::InMemoryKeyRepository; +use bittorrent_tracker_core::authentication::service::AuthenticationService; +use bittorrent_tracker_core::databases::setup::initialize_database; +use bittorrent_tracker_core::torrent::repository::in_memory::InMemoryTorrentRepository; +use bittorrent_tracker_core::torrent::repository::persisted::DatabasePersistentTorrentRepository; +use bittorrent_tracker_core::whitelist::authorization::WhitelistAuthorization; +use bittorrent_tracker_core::whitelist::repository::in_memory::InMemoryWhitelist; +use torrust_tracker_configuration::{Configuration, Core}; +use torrust_tracker_primitives::peer::Peer; +use torrust_tracker_primitives::{peer, DurationSinceUnixEpoch}; +use torrust_tracker_test_helpers::configuration; + +pub struct CoreTrackerServices { + pub core_config: Arc, + pub announce_handler: Arc, + pub authentication_service: Arc, + pub whitelist_authorization: Arc, +} + +pub struct CoreHttpTrackerServices { + pub http_stats_event_sender: Arc>>, +} + +pub fn initialize_core_tracker_services() -> (CoreTrackerServices, CoreHttpTrackerServices) { + initialize_core_tracker_services_with_config(&configuration::ephemeral_public()) +} + +pub fn initialize_core_tracker_services_with_config(config: &Configuration) -> (CoreTrackerServices, CoreHttpTrackerServices) { + let core_config = Arc::new(config.core.clone()); + let database = initialize_database(&config.core); + let in_memory_torrent_repository = Arc::new(InMemoryTorrentRepository::default()); + let db_torrent_repository = Arc::new(DatabasePersistentTorrentRepository::new(&database)); + let in_memory_whitelist = Arc::new(InMemoryWhitelist::default()); + let whitelist_authorization = Arc::new(WhitelistAuthorization::new(&config.core, &in_memory_whitelist.clone())); + let in_memory_key_repository = Arc::new(InMemoryKeyRepository::default()); + let authentication_service = Arc::new(AuthenticationService::new(&core_config, &in_memory_key_repository)); + + let announce_handler = Arc::new(AnnounceHandler::new( + &config.core, + &whitelist_authorization, + &in_memory_torrent_repository, + &db_torrent_repository, + )); + + // HTTP stats + let (http_stats_event_sender, http_stats_repository) = statistics::setup::factory(config.core.tracker_usage_statistics); + let http_stats_event_sender = Arc::new(http_stats_event_sender); + let _http_stats_repository = Arc::new(http_stats_repository); + + ( + CoreTrackerServices { + core_config, + announce_handler, + authentication_service, + whitelist_authorization, + }, + CoreHttpTrackerServices { http_stats_event_sender }, + ) +} + +pub fn sample_peer() -> peer::Peer { + peer::Peer { + peer_id: PeerId(*b"-qB00000000000000000"), + peer_addr: SocketAddr::new(IpAddr::V4(Ipv4Addr::new(126, 0, 0, 1)), 8080), + updated: DurationSinceUnixEpoch::new(1_669_397_478_934, 0), + uploaded: NumberOfBytes::new(0), + downloaded: NumberOfBytes::new(0), + left: NumberOfBytes::new(0), + event: AnnounceEvent::Started, + } +} + +pub fn sample_announce_request_for_peer(peer: Peer) -> (Announce, ClientIpSources) { + let announce_request = Announce { + info_hash: sample_info_hash(), + peer_id: peer.peer_id, + port: peer.peer_addr.port(), + uploaded: Some(peer.uploaded), + downloaded: Some(peer.downloaded), + left: Some(peer.left), + event: Some(peer.event.into()), + compact: None, + numwant: None, + }; + + let client_ip_sources = ClientIpSources { + right_most_x_forwarded_for: None, + connection_info_ip: Some(peer.peer_addr.ip()), + }; + + (announce_request, client_ip_sources) +} +#[must_use] +pub fn sample_info_hash() -> InfoHash { + "3b245504cf5f11bbdbe1201cea6a6bf45aee1bc0" // DevSkim: ignore DS173237 + .parse::() + .expect("String should be a valid info hash") +} + +use bittorrent_http_tracker_core::statistics; +use futures::future::BoxFuture; +use mockall::mock; +use tokio::sync::mpsc::error::SendError; + +mock! { + HttpStatsEventSender {} + impl statistics::event::sender::Sender for HttpStatsEventSender { + fn send_event(&self, event: statistics::event::Event) -> BoxFuture<'static,Option > > > ; + } +} diff --git a/packages/http-tracker-core/benches/http_tracker_core_benchmark.rs b/packages/http-tracker-core/benches/http_tracker_core_benchmark.rs new file mode 100644 index 000000000..aa50ceeb9 --- /dev/null +++ b/packages/http-tracker-core/benches/http_tracker_core_benchmark.rs @@ -0,0 +1,23 @@ +mod helpers; + +use std::time::Duration; + +use criterion::{criterion_group, criterion_main, Criterion}; + +use crate::helpers::sync; + +fn announce_once(c: &mut Criterion) { + let _rt = tokio::runtime::Builder::new_multi_thread().worker_threads(4).build().unwrap(); + + let mut group = c.benchmark_group("http_tracker_handle_announce_once"); + + group.warm_up_time(Duration::from_millis(500)); + group.measurement_time(Duration::from_millis(1000)); + + group.bench_function("handle_announce_data", |b| { + b.iter(|| sync::return_announce_data_once(100)); + }); +} + +criterion_group!(benches, announce_once); +criterion_main!(benches); diff --git a/packages/udp-tracker-core/Cargo.toml b/packages/udp-tracker-core/Cargo.toml index fc8e2328c..88bab51c1 100644 --- a/packages/udp-tracker-core/Cargo.toml +++ b/packages/udp-tracker-core/Cargo.toml @@ -21,6 +21,7 @@ bittorrent-udp-tracker-protocol = { version = "3.0.0-develop", path = "../udp-pr bloom = "0.3.2" blowfish = "0" cipher = "0" +criterion = { version = "0.5.1", features = ["async_tokio"] } futures = "0" lazy_static = "1" rand = "0" @@ -34,3 +35,8 @@ zerocopy = "0.7" [dev-dependencies] mockall = "0" torrust-tracker-test-helpers = { version = "3.0.0-develop", path = "../test-helpers" } + +[[bench]] +harness = false +name = "udp_tracker_core_benchmark" + diff --git a/packages/udp-tracker-core/benches/helpers/mod.rs b/packages/udp-tracker-core/benches/helpers/mod.rs new file mode 100644 index 000000000..ea1959bb4 --- /dev/null +++ b/packages/udp-tracker-core/benches/helpers/mod.rs @@ -0,0 +1,2 @@ +pub mod sync; +mod utils; diff --git a/packages/udp-tracker-core/benches/helpers/sync.rs b/packages/udp-tracker-core/benches/helpers/sync.rs new file mode 100644 index 000000000..b7d8e848d --- /dev/null +++ b/packages/udp-tracker-core/benches/helpers/sync.rs @@ -0,0 +1,21 @@ +use std::sync::Arc; +use std::time::{Duration, Instant}; + +use bittorrent_udp_tracker_core::services::connect::ConnectService; +use bittorrent_udp_tracker_core::statistics; + +use crate::helpers::utils::{sample_ipv4_remote_addr, sample_issue_time}; + +#[allow(clippy::unused_async)] +pub async fn connect_once(samples: u64) -> Duration { + let (udp_core_stats_event_sender, _udp_core_stats_repository) = statistics::setup::factory(false); + let udp_core_stats_event_sender = Arc::new(udp_core_stats_event_sender); + let connect_service = Arc::new(ConnectService::new(udp_core_stats_event_sender)); + let start = Instant::now(); + + for _ in 0..samples { + let _response = connect_service.handle_connect(sample_ipv4_remote_addr(), sample_issue_time()); + } + + start.elapsed() +} diff --git a/packages/udp-tracker-core/benches/helpers/utils.rs b/packages/udp-tracker-core/benches/helpers/utils.rs new file mode 100644 index 000000000..7fd6d175f --- /dev/null +++ b/packages/udp-tracker-core/benches/helpers/utils.rs @@ -0,0 +1,25 @@ +use std::net::{IpAddr, Ipv4Addr, SocketAddr}; + +use bittorrent_udp_tracker_core::statistics; +use futures::future::BoxFuture; +use mockall::mock; +use tokio::sync::mpsc::error::SendError; + +pub(crate) fn sample_ipv4_remote_addr() -> SocketAddr { + sample_ipv4_socket_address() +} + +pub(crate) fn sample_ipv4_socket_address() -> SocketAddr { + SocketAddr::new(IpAddr::V4(Ipv4Addr::new(127, 0, 0, 1)), 8080) +} + +pub(crate) fn sample_issue_time() -> f64 { + 1_000_000_000_f64 +} + +mock! { + pub(crate) UdpCoreStatsEventSender {} + impl statistics::event::sender::Sender for UdpCoreStatsEventSender { + fn send_event(&self, event: statistics::event::Event) -> BoxFuture<'static,Option > > > ; + } +} diff --git a/packages/udp-tracker-core/benches/udp_tracker_core_benchmark.rs b/packages/udp-tracker-core/benches/udp_tracker_core_benchmark.rs new file mode 100644 index 000000000..5bd0e27c8 --- /dev/null +++ b/packages/udp-tracker-core/benches/udp_tracker_core_benchmark.rs @@ -0,0 +1,20 @@ +mod helpers; + +use std::time::Duration; + +use criterion::{criterion_group, criterion_main, Criterion}; + +use crate::helpers::sync; + +fn bench_connect_once(c: &mut Criterion) { + let mut group = c.benchmark_group("udp_tracker/connect_once"); + group.warm_up_time(Duration::from_millis(500)); + group.measurement_time(Duration::from_millis(1000)); + + group.bench_function("connect_once", |b| { + b.iter(|| sync::connect_once(100)); + }); +} + +criterion_group!(benches, bench_connect_once); +criterion_main!(benches); diff --git a/packages/udp-tracker-server/src/statistics/event/handler.rs b/packages/udp-tracker-server/src/statistics/event/handler.rs index b3b86e20a..5ce9f6307 100644 --- a/packages/udp-tracker-server/src/statistics/event/handler.rs +++ b/packages/udp-tracker-server/src/statistics/event/handler.rs @@ -93,24 +93,29 @@ mod tests { use crate::statistics::repository::Repository; #[tokio::test] - async fn should_increase_the_udp_abort_counter_when_it_receives_a_udp_abort_event() { + async fn should_increase_the_number_of_aborted_requests_when_it_receives_a_udp_request_aborted_event() { let stats_repository = Repository::new(); handle_event(Event::UdpRequestAborted, &stats_repository).await; + let stats = stats_repository.get_stats().await; + assert_eq!(stats.udp_requests_aborted, 1); } + #[tokio::test] - async fn should_increase_the_udp_ban_counter_when_it_receives_a_udp_banned_event() { + async fn should_increase_the_number_of_banned_requests_when_it_receives_a_udp_request_banned_event() { let stats_repository = Repository::new(); handle_event(Event::UdpRequestBanned, &stats_repository).await; + let stats = stats_repository.get_stats().await; + assert_eq!(stats.udp_requests_banned, 1); } #[tokio::test] - async fn should_increase_the_udp4_requests_counter_when_it_receives_a_udp4_request_event() { + async fn should_increase_the_number_of_incoming_requests_when_it_receives_a_udp4_incoming_request_event() { let stats_repository = Repository::new(); handle_event(Event::Udp4IncomingRequest, &stats_repository).await; @@ -120,6 +125,74 @@ mod tests { assert_eq!(stats.udp4_requests, 1); } + #[tokio::test] + async fn should_increase_the_udp_abort_counter_when_it_receives_a_udp_abort_event() { + let stats_repository = Repository::new(); + + handle_event(Event::UdpRequestAborted, &stats_repository).await; + let stats = stats_repository.get_stats().await; + assert_eq!(stats.udp_requests_aborted, 1); + } + #[tokio::test] + async fn should_increase_the_udp_ban_counter_when_it_receives_a_udp_banned_event() { + let stats_repository = Repository::new(); + + handle_event(Event::UdpRequestBanned, &stats_repository).await; + let stats = stats_repository.get_stats().await; + assert_eq!(stats.udp_requests_banned, 1); + } + + #[tokio::test] + async fn should_increase_the_udp4_connect_requests_counter_when_it_receives_a_udp4_request_event_of_connect_kind() { + let stats_repository = Repository::new(); + + handle_event( + Event::Udp4Request { + kind: crate::statistics::event::UdpResponseKind::Connect, + }, + &stats_repository, + ) + .await; + + let stats = stats_repository.get_stats().await; + + assert_eq!(stats.udp4_connections_handled, 1); + } + + #[tokio::test] + async fn should_increase_the_udp4_announce_requests_counter_when_it_receives_a_udp4_request_event_of_announce_kind() { + let stats_repository = Repository::new(); + + handle_event( + Event::Udp4Request { + kind: crate::statistics::event::UdpResponseKind::Announce, + }, + &stats_repository, + ) + .await; + + let stats = stats_repository.get_stats().await; + + assert_eq!(stats.udp4_announces_handled, 1); + } + + #[tokio::test] + async fn should_increase_the_udp4_scrape_requests_counter_when_it_receives_a_udp4_request_event_of_scrape_kind() { + let stats_repository = Repository::new(); + + handle_event( + Event::Udp4Request { + kind: crate::statistics::event::UdpResponseKind::Scrape, + }, + &stats_repository, + ) + .await; + + let stats = stats_repository.get_stats().await; + + assert_eq!(stats.udp4_scrapes_handled, 1); + } + #[tokio::test] async fn should_increase_the_udp4_responses_counter_when_it_receives_a_udp4_response_event() { let stats_repository = Repository::new(); @@ -150,14 +223,54 @@ mod tests { } #[tokio::test] - async fn should_increase_the_udp6_requests_counter_when_it_receives_a_udp6_request_event() { + async fn should_increase_the_udp6_connect_requests_counter_when_it_receives_a_udp6_request_event_of_connect_kind() { + let stats_repository = Repository::new(); + + handle_event( + Event::Udp6Request { + kind: crate::statistics::event::UdpResponseKind::Connect, + }, + &stats_repository, + ) + .await; + + let stats = stats_repository.get_stats().await; + + assert_eq!(stats.udp6_connections_handled, 1); + } + + #[tokio::test] + async fn should_increase_the_udp6_announce_requests_counter_when_it_receives_a_udp6_request_event_of_announce_kind() { + let stats_repository = Repository::new(); + + handle_event( + Event::Udp6Request { + kind: crate::statistics::event::UdpResponseKind::Announce, + }, + &stats_repository, + ) + .await; + + let stats = stats_repository.get_stats().await; + + assert_eq!(stats.udp6_announces_handled, 1); + } + + #[tokio::test] + async fn should_increase_the_udp6_scrape_requests_counter_when_it_receives_a_udp6_request_event_of_scrape_kind() { let stats_repository = Repository::new(); - handle_event(Event::Udp6IncomingRequest, &stats_repository).await; + handle_event( + Event::Udp6Request { + kind: crate::statistics::event::UdpResponseKind::Scrape, + }, + &stats_repository, + ) + .await; let stats = stats_repository.get_stats().await; - assert_eq!(stats.udp6_requests, 1); + assert_eq!(stats.udp6_scrapes_handled, 1); } #[tokio::test] From 6de2dd997f8a72e2443466c437a45a97829a2507 Mon Sep 17 00:00:00 2001 From: Jose Celano Date: Tue, 11 Mar 2025 15:51:58 +0000 Subject: [PATCH 384/802] refactor: [#1371] add connection context to HTTP core events --- .../src/v1/handlers/announce.rs | 45 +++++++-- .../src/v1/handlers/scrape.rs | 39 ++++++-- .../axum-http-tracker-server/src/v1/routes.rs | 8 +- .../http-tracker-core/benches/helpers/sync.rs | 7 +- .../src/services/announce.rs | 63 +++++++++--- .../http-tracker-core/src/services/scrape.rs | 84 ++++++++++++---- .../src/statistics/event/handler.rs | 99 +++++++++++++++---- .../src/statistics/event/mod.rs | 23 +++-- .../src/statistics/keeper.rs | 13 ++- 9 files changed, 301 insertions(+), 80 deletions(-) diff --git a/packages/axum-http-tracker-server/src/v1/handlers/announce.rs b/packages/axum-http-tracker-server/src/v1/handlers/announce.rs index 6c2e4b713..63ab96fe5 100644 --- a/packages/axum-http-tracker-server/src/v1/handlers/announce.rs +++ b/packages/axum-http-tracker-server/src/v1/handlers/announce.rs @@ -2,6 +2,7 @@ //! //! The handlers perform the authentication and authorization of the request, //! and resolve the client IP address. +use std::net::SocketAddr; use std::sync::Arc; use axum::extract::State; @@ -22,27 +23,27 @@ use crate::v1::extractors::client_ip_sources::Extract as ExtractClientIpSources; /// authentication (no PATH `key` parameter required). #[allow(clippy::unused_async)] pub async fn handle_without_key( - State(state): State>, + State(state): State<(Arc, SocketAddr)>, ExtractRequest(announce_request): ExtractRequest, ExtractClientIpSources(client_ip_sources): ExtractClientIpSources, ) -> Response { tracing::debug!("http announce request: {:#?}", announce_request); - handle(&state, &announce_request, &client_ip_sources, None).await + handle(&state.0, &announce_request, &client_ip_sources, &state.1, None).await } /// It handles the `announce` request when the HTTP tracker requires /// authentication (PATH `key` parameter required). #[allow(clippy::unused_async)] pub async fn handle_with_key( - State(state): State>, + State(state): State<(Arc, SocketAddr)>, ExtractRequest(announce_request): ExtractRequest, ExtractClientIpSources(client_ip_sources): ExtractClientIpSources, ExtractKey(key): ExtractKey, ) -> Response { tracing::debug!("http announce request: {:#?}", announce_request); - handle(&state, &announce_request, &client_ip_sources, Some(key)).await + handle(&state.0, &announce_request, &client_ip_sources, &state.1, Some(key)).await } /// It handles the `announce` request. @@ -53,9 +54,18 @@ async fn handle( announce_service: &Arc, announce_request: &Announce, client_ip_sources: &ClientIpSources, + server_socket_addr: &SocketAddr, maybe_key: Option, ) -> Response { - let announce_data = match handle_announce(announce_service, announce_request, client_ip_sources, maybe_key).await { + let announce_data = match handle_announce( + announce_service, + announce_request, + client_ip_sources, + server_socket_addr, + maybe_key, + ) + .await + { Ok(announce_data) => announce_data, Err(error) => { let error_response = responses::error::Error { @@ -71,10 +81,11 @@ async fn handle_announce( announce_service: &Arc, announce_request: &Announce, client_ip_sources: &ClientIpSources, + server_socket_addr: &SocketAddr, maybe_key: Option, ) -> Result { announce_service - .handle_announce(announce_request, client_ip_sources, maybe_key) + .handle_announce(announce_request, client_ip_sources, server_socket_addr, maybe_key) .await } @@ -196,6 +207,7 @@ mod tests { mod with_tracker_in_private_mode { + use std::net::{IpAddr, Ipv4Addr, SocketAddr}; use std::str::FromStr; use bittorrent_http_tracker_protocol::v1::responses; @@ -209,12 +221,15 @@ mod tests { async fn it_should_fail_when_the_authentication_key_is_missing() { let http_core_tracker_services = initialize_private_tracker(); + let server_socket_addr = SocketAddr::new(IpAddr::V4(Ipv4Addr::new(127, 0, 0, 1)), 7070); + let maybe_key = None; let response = handle_announce( &http_core_tracker_services.announce_service, &sample_announce_request(), &sample_client_ip_sources(), + &server_socket_addr, maybe_key, ) .await @@ -236,12 +251,15 @@ mod tests { let unregistered_key = authentication::Key::from_str("YZSl4lMZupRuOpSRC3krIKR5BPB14nrJ").unwrap(); + let server_socket_addr = SocketAddr::new(IpAddr::V4(Ipv4Addr::new(127, 0, 0, 1)), 7070); + let maybe_key = Some(unregistered_key); let response = handle_announce( &http_core_tracker_services.announce_service, &sample_announce_request(), &sample_client_ip_sources(), + &server_socket_addr, maybe_key, ) .await @@ -260,6 +278,8 @@ mod tests { mod with_tracker_in_listed_mode { + use std::net::{IpAddr, Ipv4Addr, SocketAddr}; + use bittorrent_http_tracker_protocol::v1::responses; use super::{initialize_listed_tracker, sample_announce_request, sample_client_ip_sources}; @@ -272,10 +292,13 @@ mod tests { let announce_request = sample_announce_request(); + let server_socket_addr = SocketAddr::new(IpAddr::V4(Ipv4Addr::new(127, 0, 0, 1)), 7070); + let response = handle_announce( &http_core_tracker_services.announce_service, &announce_request, &sample_client_ip_sources(), + &server_socket_addr, None, ) .await @@ -297,6 +320,8 @@ mod tests { mod with_tracker_on_reverse_proxy { + use std::net::{IpAddr, Ipv4Addr, SocketAddr}; + use bittorrent_http_tracker_protocol::v1::responses; use bittorrent_http_tracker_protocol::v1::services::peer_ip_resolver::ClientIpSources; @@ -313,10 +338,13 @@ mod tests { connection_info_ip: None, }; + let server_socket_addr = SocketAddr::new(IpAddr::V4(Ipv4Addr::new(127, 0, 0, 1)), 7070); + let response = handle_announce( &http_core_tracker_services.announce_service, &sample_announce_request(), &client_ip_sources, + &server_socket_addr, None, ) .await @@ -335,6 +363,8 @@ mod tests { mod with_tracker_not_on_reverse_proxy { + use std::net::{IpAddr, Ipv4Addr, SocketAddr}; + use bittorrent_http_tracker_protocol::v1::responses; use bittorrent_http_tracker_protocol::v1::services::peer_ip_resolver::ClientIpSources; @@ -351,10 +381,13 @@ mod tests { connection_info_ip: None, }; + let server_socket_addr = SocketAddr::new(IpAddr::V4(Ipv4Addr::new(127, 0, 0, 1)), 7070); + let response = handle_announce( &http_core_tracker_services.announce_service, &sample_announce_request(), &client_ip_sources, + &server_socket_addr, None, ) .await diff --git a/packages/axum-http-tracker-server/src/v1/handlers/scrape.rs b/packages/axum-http-tracker-server/src/v1/handlers/scrape.rs index ae3a35bd3..ca90f74c6 100644 --- a/packages/axum-http-tracker-server/src/v1/handlers/scrape.rs +++ b/packages/axum-http-tracker-server/src/v1/handlers/scrape.rs @@ -2,6 +2,7 @@ //! //! The handlers perform the authentication and authorization of the request, //! and resolve the client IP address. +use std::net::SocketAddr; use std::sync::Arc; use axum::extract::State; @@ -22,13 +23,13 @@ use crate::v1::extractors::scrape_request::ExtractRequest; /// to run in `public` mode. #[allow(clippy::unused_async)] pub async fn handle_without_key( - State(state): State>, + State(state): State<(Arc, SocketAddr)>, ExtractRequest(scrape_request): ExtractRequest, ExtractClientIpSources(client_ip_sources): ExtractClientIpSources, ) -> Response { tracing::debug!("http scrape request: {:#?}", &scrape_request); - handle(&state, &scrape_request, &client_ip_sources, None).await + handle(&state.0, &scrape_request, &client_ip_sources, &state.1, None).await } /// It handles the `scrape` request when the HTTP tracker is configured @@ -37,24 +38,25 @@ pub async fn handle_without_key( /// In this case, the authentication `key` parameter is required. #[allow(clippy::unused_async)] pub async fn handle_with_key( - State(state): State>, + State(state): State<(Arc, SocketAddr)>, ExtractRequest(scrape_request): ExtractRequest, ExtractClientIpSources(client_ip_sources): ExtractClientIpSources, ExtractKey(key): ExtractKey, ) -> Response { tracing::debug!("http scrape request: {:#?}", &scrape_request); - handle(&state, &scrape_request, &client_ip_sources, Some(key)).await + handle(&state.0, &scrape_request, &client_ip_sources, &state.1, Some(key)).await } async fn handle( scrape_service: &Arc, scrape_request: &Scrape, client_ip_sources: &ClientIpSources, + server_socket_addr: &SocketAddr, maybe_key: Option, ) -> Response { let scrape_data = match scrape_service - .handle_scrape(scrape_request, client_ip_sources, maybe_key) + .handle_scrape(scrape_request, client_ip_sources, server_socket_addr, maybe_key) .await { Ok(scrape_data) => scrape_data, @@ -165,6 +167,7 @@ mod tests { } mod with_tracker_in_private_mode { + use std::net::{IpAddr, Ipv4Addr, SocketAddr}; use std::str::FromStr; use bittorrent_http_tracker_core::services::scrape::ScrapeService; @@ -175,6 +178,8 @@ mod tests { #[tokio::test] async fn it_should_return_zeroed_swarm_metadata_when_the_authentication_key_is_missing() { + let server_socket_addr = SocketAddr::new(IpAddr::V4(Ipv4Addr::new(127, 0, 0, 1)), 7070); + let (core_tracker_services, core_http_tracker_services) = initialize_private_tracker(); let scrape_request = sample_scrape_request(); @@ -188,7 +193,7 @@ mod tests { ); let scrape_data = scrape_service - .handle_scrape(&scrape_request, &sample_client_ip_sources(), maybe_key) + .handle_scrape(&scrape_request, &sample_client_ip_sources(), &server_socket_addr, maybe_key) .await .unwrap(); @@ -199,6 +204,8 @@ mod tests { #[tokio::test] async fn it_should_return_zeroed_swarm_metadata_when_the_authentication_key_is_invalid() { + let server_socket_addr = SocketAddr::new(IpAddr::V4(Ipv4Addr::new(127, 0, 0, 1)), 7070); + let (core_tracker_services, core_http_tracker_services) = initialize_private_tracker(); let scrape_request = sample_scrape_request(); @@ -213,7 +220,7 @@ mod tests { ); let scrape_data = scrape_service - .handle_scrape(&scrape_request, &sample_client_ip_sources(), maybe_key) + .handle_scrape(&scrape_request, &sample_client_ip_sources(), &server_socket_addr, maybe_key) .await .unwrap(); @@ -225,6 +232,8 @@ mod tests { mod with_tracker_in_listed_mode { + use std::net::{IpAddr, Ipv4Addr, SocketAddr}; + use bittorrent_http_tracker_core::services::scrape::ScrapeService; use torrust_tracker_primitives::core::ScrapeData; @@ -236,6 +245,8 @@ mod tests { let scrape_request = sample_scrape_request(); + let server_socket_addr = SocketAddr::new(IpAddr::V4(Ipv4Addr::new(127, 0, 0, 1)), 7070); + let scrape_service = ScrapeService::new( core_tracker_services.core_config.clone(), core_tracker_services.scrape_handler.clone(), @@ -244,7 +255,7 @@ mod tests { ); let scrape_data = scrape_service - .handle_scrape(&scrape_request, &sample_client_ip_sources(), None) + .handle_scrape(&scrape_request, &sample_client_ip_sources(), &server_socket_addr, None) .await .unwrap(); @@ -256,6 +267,8 @@ mod tests { mod with_tracker_on_reverse_proxy { + use std::net::{IpAddr, Ipv4Addr, SocketAddr}; + use bittorrent_http_tracker_core::services::scrape::ScrapeService; use bittorrent_http_tracker_protocol::v1::responses; use bittorrent_http_tracker_protocol::v1::services::peer_ip_resolver::ClientIpSources; @@ -272,6 +285,8 @@ mod tests { connection_info_ip: None, }; + let server_socket_addr = SocketAddr::new(IpAddr::V4(Ipv4Addr::new(127, 0, 0, 1)), 7070); + let scrape_service = ScrapeService::new( core_tracker_services.core_config.clone(), core_tracker_services.scrape_handler.clone(), @@ -280,7 +295,7 @@ mod tests { ); let response = scrape_service - .handle_scrape(&sample_scrape_request(), &client_ip_sources, None) + .handle_scrape(&sample_scrape_request(), &client_ip_sources, &server_socket_addr, None) .await .unwrap_err(); @@ -297,6 +312,8 @@ mod tests { mod with_tracker_not_on_reverse_proxy { + use std::net::{IpAddr, Ipv4Addr, SocketAddr}; + use bittorrent_http_tracker_core::services::scrape::ScrapeService; use bittorrent_http_tracker_protocol::v1::responses; use bittorrent_http_tracker_protocol::v1::services::peer_ip_resolver::ClientIpSources; @@ -313,6 +330,8 @@ mod tests { connection_info_ip: None, }; + let server_socket_addr = SocketAddr::new(IpAddr::V4(Ipv4Addr::new(127, 0, 0, 1)), 7070); + let scrape_service = ScrapeService::new( core_tracker_services.core_config.clone(), core_tracker_services.scrape_handler.clone(), @@ -321,7 +340,7 @@ mod tests { ); let response = scrape_service - .handle_scrape(&sample_scrape_request(), &client_ip_sources, None) + .handle_scrape(&sample_scrape_request(), &client_ip_sources, &server_socket_addr, None) .await .unwrap_err(); diff --git a/packages/axum-http-tracker-server/src/v1/routes.rs b/packages/axum-http-tracker-server/src/v1/routes.rs index 5f666e9d4..d5907887e 100644 --- a/packages/axum-http-tracker-server/src/v1/routes.rs +++ b/packages/axum-http-tracker-server/src/v1/routes.rs @@ -38,20 +38,20 @@ pub fn router(http_tracker_container: Arc, server_sock // Announce request .route( "/announce", - get(announce::handle_without_key).with_state(http_tracker_container.announce_service.clone()), + get(announce::handle_without_key).with_state((http_tracker_container.announce_service.clone(), server_socket_addr)), ) .route( "/announce/{key}", - get(announce::handle_with_key).with_state(http_tracker_container.announce_service.clone()), + get(announce::handle_with_key).with_state((http_tracker_container.announce_service.clone(), server_socket_addr)), ) // Scrape request .route( "/scrape", - get(scrape::handle_without_key).with_state(http_tracker_container.scrape_service.clone()), + get(scrape::handle_without_key).with_state((http_tracker_container.scrape_service.clone(), server_socket_addr)), ) .route( "/scrape/{key}", - get(scrape::handle_with_key).with_state(http_tracker_container.scrape_service.clone()), + get(scrape::handle_with_key).with_state((http_tracker_container.scrape_service.clone(), server_socket_addr)), ) // Add extension to get the client IP from the connection info .layer(SecureClientIpSource::ConnectInfo.into_extension()) diff --git a/packages/http-tracker-core/benches/helpers/sync.rs b/packages/http-tracker-core/benches/helpers/sync.rs index c19943b1d..9d41c2459 100644 --- a/packages/http-tracker-core/benches/helpers/sync.rs +++ b/packages/http-tracker-core/benches/helpers/sync.rs @@ -1,3 +1,4 @@ +use std::net::{IpAddr, Ipv4Addr, SocketAddr}; use std::time::{Duration, Instant}; use bittorrent_http_tracker_core::services::announce::AnnounceService; @@ -20,12 +21,16 @@ pub async fn return_announce_data_once(samples: u64) -> Duration { core_http_tracker_services.http_stats_event_sender.clone(), ); + let server_socket_addr = SocketAddr::new(IpAddr::V4(Ipv4Addr::new(127, 0, 0, 1)), 7070); + let start = Instant::now(); + for _ in 0..samples { let _announce_data = announce_service - .handle_announce(&announce_request, &client_ip_sources, None) + .handle_announce(&announce_request, &client_ip_sources, &server_socket_addr, None) .await .unwrap(); } + start.elapsed() } diff --git a/packages/http-tracker-core/src/services/announce.rs b/packages/http-tracker-core/src/services/announce.rs index 896387b28..b027ee0d9 100644 --- a/packages/http-tracker-core/src/services/announce.rs +++ b/packages/http-tracker-core/src/services/announce.rs @@ -7,7 +7,7 @@ //! //! It also sends an [`http_tracker_core::statistics::event::Event`] //! because events are specific for the HTTP tracker. -use std::net::IpAddr; +use std::net::{IpAddr, SocketAddr}; use std::panic::Location; use std::sync::Arc; @@ -68,6 +68,7 @@ impl AnnounceService { &self, announce_request: &Announce, client_ip_sources: &ClientIpSources, + server_socket_addr: &SocketAddr, maybe_key: Option, ) -> Result { self.authenticate(maybe_key).await?; @@ -85,7 +86,7 @@ impl AnnounceService { .announce(&announce_request.info_hash, &mut peer, &remote_client_ip, &peers_wanted) .await?; - self.send_stats_event(remote_client_ip).await; + self.send_stats_event(remote_client_ip, *server_socket_addr).await; Ok(announce_data) } @@ -122,17 +123,27 @@ impl AnnounceService { } } - async fn send_stats_event(&self, peer_ip: IpAddr) { + async fn send_stats_event(&self, peer_ip: IpAddr, server_socket_addr: SocketAddr) { if let Some(http_stats_event_sender) = self.opt_http_stats_event_sender.as_deref() { match peer_ip { IpAddr::V4(_) => { http_stats_event_sender - .send_event(statistics::event::Event::Tcp4Announce) + .send_event(statistics::event::Event::Tcp4Announce { + connection: statistics::event::ConnectionContext { + client_ip_addr: peer_ip, + server_socket_addr, + }, + }) .await; } IpAddr::V6(_) => { http_stats_event_sender - .send_event(statistics::event::Event::Tcp6Announce) + .send_event(statistics::event::Event::Tcp6Announce { + connection: statistics::event::ConnectionContext { + client_ip_addr: peer_ip, + server_socket_addr, + }, + }) .await; } } @@ -338,6 +349,7 @@ mod tests { }; use crate::services::announce::AnnounceService; use crate::statistics; + use crate::statistics::event::ConnectionContext; #[tokio::test] async fn it_should_return_the_announce_data() { @@ -347,6 +359,8 @@ mod tests { let (announce_request, client_ip_sources) = sample_announce_request_for_peer(peer); + let server_socket_addr = SocketAddr::new(IpAddr::V4(Ipv4Addr::new(127, 0, 0, 1)), 7070); + let announce_service = AnnounceService::new( core_tracker_services.core_config.clone(), core_tracker_services.announce_handler.clone(), @@ -356,7 +370,7 @@ mod tests { ); let announce_data = announce_service - .handle_announce(&announce_request, &client_ip_sources, None) + .handle_announce(&announce_request, &client_ip_sources, &server_socket_addr, None) .await .unwrap(); @@ -375,16 +389,24 @@ mod tests { #[tokio::test] async fn it_should_send_the_tcp_4_announce_event_when_the_peer_uses_ipv4() { + let server_socket_addr = SocketAddr::new(IpAddr::V4(Ipv4Addr::new(127, 0, 0, 1)), 7070); + let mut http_stats_event_sender_mock = MockHttpStatsEventSender::new(); http_stats_event_sender_mock .expect_send_event() - .with(eq(statistics::event::Event::Tcp4Announce)) + .with(eq(statistics::event::Event::Tcp4Announce { + connection: ConnectionContext { + client_ip_addr: IpAddr::V4(Ipv4Addr::new(126, 0, 0, 1)), + server_socket_addr, + }, + })) .times(1) .returning(|_| Box::pin(future::ready(Some(Ok(()))))); let http_stats_event_sender: Arc>> = Arc::new(Some(Box::new(http_stats_event_sender_mock))); let (core_tracker_services, mut core_http_tracker_services) = initialize_core_tracker_services(); + core_http_tracker_services.http_stats_event_sender = http_stats_event_sender; let peer = sample_peer_using_ipv4(); @@ -400,7 +422,7 @@ mod tests { ); let _announce_data = announce_service - .handle_announce(&announce_request, &client_ip_sources, None) + .handle_announce(&announce_request, &client_ip_sources, &server_socket_addr, None) .await .unwrap(); } @@ -425,11 +447,18 @@ mod tests { { // Tracker changes the peer IP to the tracker external IP when the peer is using the loopback IP. + let server_socket_addr = SocketAddr::new(IpAddr::V4(Ipv4Addr::new(127, 0, 0, 1)), 7070); + // Assert that the event sent is a TCP4 event let mut http_stats_event_sender_mock = MockHttpStatsEventSender::new(); http_stats_event_sender_mock .expect_send_event() - .with(eq(statistics::event::Event::Tcp4Announce)) + .with(eq(statistics::event::Event::Tcp4Announce { + connection: ConnectionContext { + client_ip_addr: IpAddr::V4(Ipv4Addr::new(127, 0, 0, 1)), + server_socket_addr, + }, + })) .times(1) .returning(|_| Box::pin(future::ready(Some(Ok(()))))); let http_stats_event_sender: Arc>> = @@ -437,6 +466,7 @@ mod tests { let (core_tracker_services, mut core_http_tracker_services) = initialize_core_tracker_services_with_config(&tracker_with_an_ipv6_external_ip()); + core_http_tracker_services.http_stats_event_sender = http_stats_event_sender; let peer = peer_with_the_ipv4_loopback_ip(); @@ -452,7 +482,7 @@ mod tests { ); let _announce_data = announce_service - .handle_announce(&announce_request, &client_ip_sources, None) + .handle_announce(&announce_request, &client_ip_sources, &server_socket_addr, None) .await .unwrap(); } @@ -460,10 +490,17 @@ mod tests { #[tokio::test] async fn it_should_send_the_tcp_6_announce_event_when_the_peer_uses_ipv6_even_if_the_tracker_changes_the_peer_ip_to_ipv4() { + let server_socket_addr = SocketAddr::new(IpAddr::V4(Ipv4Addr::new(127, 0, 0, 1)), 7070); + let mut http_stats_event_sender_mock = MockHttpStatsEventSender::new(); http_stats_event_sender_mock .expect_send_event() - .with(eq(statistics::event::Event::Tcp6Announce)) + .with(eq(statistics::event::Event::Tcp6Announce { + connection: ConnectionContext { + client_ip_addr: IpAddr::V6(Ipv6Addr::new(0x6969, 0x6969, 0x6969, 0x6969, 0x6969, 0x6969, 0x6969, 0x6969)), + server_socket_addr, + }, + })) .times(1) .returning(|_| Box::pin(future::ready(Some(Ok(()))))); let http_stats_event_sender: Arc>> = @@ -484,8 +521,10 @@ mod tests { core_http_tracker_services.http_stats_event_sender.clone(), ); + let server_socket_addr = SocketAddr::new(IpAddr::V4(Ipv4Addr::new(127, 0, 0, 1)), 7070); + let _announce_data = announce_service - .handle_announce(&announce_request, &client_ip_sources, None) + .handle_announce(&announce_request, &client_ip_sources, &server_socket_addr, None) .await .unwrap(); } diff --git a/packages/http-tracker-core/src/services/scrape.rs b/packages/http-tracker-core/src/services/scrape.rs index 53eed0361..607ee2a3f 100644 --- a/packages/http-tracker-core/src/services/scrape.rs +++ b/packages/http-tracker-core/src/services/scrape.rs @@ -7,7 +7,7 @@ //! //! It also sends an [`http_tracker_core::statistics::event::Event`] //! because events are specific for the HTTP tracker. -use std::net::IpAddr; +use std::net::{IpAddr, SocketAddr}; use std::sync::Arc; use bittorrent_http_tracker_protocol::v1::requests::scrape::Scrape; @@ -20,6 +20,7 @@ use torrust_tracker_configuration::Core; use torrust_tracker_primitives::core::ScrapeData; use crate::statistics; +use crate::statistics::event::ConnectionContext; /// The HTTP tracker `scrape` service. /// @@ -70,6 +71,7 @@ impl ScrapeService { &self, scrape_request: &Scrape, client_ip_sources: &ClientIpSources, + server_socket_addr: &SocketAddr, maybe_key: Option, ) -> Result { let scrape_data = if self.authentication_is_required() && !self.is_authenticated(maybe_key).await { @@ -80,7 +82,7 @@ impl ScrapeService { let remote_client_ip = self.resolve_remote_client_ip(client_ip_sources)?; - self.send_stats_event(&remote_client_ip).await; + self.send_stats_event(remote_client_ip, *server_socket_addr).await; Ok(scrape_data) } @@ -102,11 +104,21 @@ impl ScrapeService { peer_ip_resolver::invoke(self.core_config.net.on_reverse_proxy, client_ip_sources) } - async fn send_stats_event(&self, original_peer_ip: &IpAddr) { + async fn send_stats_event(&self, original_peer_ip: IpAddr, server_socket_addr: SocketAddr) { if let Some(http_stats_event_sender) = self.opt_http_stats_event_sender.as_deref() { let event = match original_peer_ip { - IpAddr::V4(_) => statistics::event::Event::Tcp4Scrape, - IpAddr::V6(_) => statistics::event::Event::Tcp6Scrape, + IpAddr::V4(_) => statistics::event::Event::Tcp4Scrape { + connection: ConnectionContext { + client_ip_addr: original_peer_ip, + server_socket_addr, + }, + }, + IpAddr::V6(_) => statistics::event::Event::Tcp6Scrape { + connection: ConnectionContext { + client_ip_addr: original_peer_ip, + server_socket_addr, + }, + }, }; http_stats_event_sender.send_event(event).await; } @@ -246,7 +258,7 @@ mod tests { mod with_real_data { use std::future; - use std::net::{IpAddr, Ipv4Addr, Ipv6Addr}; + use std::net::{IpAddr, Ipv4Addr, Ipv6Addr, SocketAddr}; use std::sync::Arc; use bittorrent_http_tracker_protocol::v1::requests::scrape::Scrape; @@ -262,6 +274,7 @@ mod tests { }; use crate::services::scrape::ScrapeService; use crate::statistics; + use crate::statistics::event::ConnectionContext; use crate::tests::sample_info_hash; #[tokio::test] @@ -295,6 +308,8 @@ mod tests { connection_info_ip: Some(original_peer_ip), }; + let server_socket_addr = SocketAddr::new(IpAddr::V4(Ipv4Addr::new(127, 0, 0, 1)), 7070); + let scrape_service = Arc::new(ScrapeService::new( core_config.clone(), container.scrape_handler.clone(), @@ -303,7 +318,7 @@ mod tests { )); let scrape_data = scrape_service - .handle_scrape(&scrape_request, &client_ip_sources, None) + .handle_scrape(&scrape_request, &client_ip_sources, &server_socket_addr, None) .await .unwrap(); @@ -327,7 +342,12 @@ mod tests { let mut http_stats_event_sender_mock = MockHttpStatsEventSender::new(); http_stats_event_sender_mock .expect_send_event() - .with(eq(statistics::event::Event::Tcp4Scrape)) + .with(eq(statistics::event::Event::Tcp4Scrape { + connection: ConnectionContext { + client_ip_addr: IpAddr::V4(Ipv4Addr::new(126, 0, 0, 1)), + server_socket_addr: SocketAddr::new(IpAddr::V4(Ipv4Addr::new(127, 0, 0, 1)), 7070), + }, + })) .times(1) .returning(|_| Box::pin(future::ready(Some(Ok(()))))); let http_stats_event_sender: Arc>> = @@ -346,6 +366,8 @@ mod tests { connection_info_ip: Some(peer_ip), }; + let server_socket_addr = SocketAddr::new(IpAddr::V4(Ipv4Addr::new(127, 0, 0, 1)), 7070); + let scrape_service = Arc::new(ScrapeService::new( Arc::new(config.core), container.scrape_handler.clone(), @@ -354,19 +376,26 @@ mod tests { )); scrape_service - .handle_scrape(&scrape_request, &client_ip_sources, None) + .handle_scrape(&scrape_request, &client_ip_sources, &server_socket_addr, None) .await .unwrap(); } #[tokio::test] async fn it_should_send_the_tcp_6_scrape_event_when_the_peer_uses_ipv6() { + let server_socket_addr = SocketAddr::new(IpAddr::V4(Ipv4Addr::new(127, 0, 0, 1)), 7070); + let config = configuration::ephemeral(); let mut http_stats_event_sender_mock = MockHttpStatsEventSender::new(); http_stats_event_sender_mock .expect_send_event() - .with(eq(statistics::event::Event::Tcp6Scrape)) + .with(eq(statistics::event::Event::Tcp6Scrape { + connection: ConnectionContext { + client_ip_addr: IpAddr::V6(Ipv6Addr::new(0x6969, 0x6969, 0x6969, 0x6969, 0x6969, 0x6969, 0x6969, 0x6969)), + server_socket_addr, + }, + })) .times(1) .returning(|_| Box::pin(future::ready(Some(Ok(()))))); let http_stats_event_sender: Arc>> = @@ -385,6 +414,8 @@ mod tests { connection_info_ip: Some(peer_ip), }; + let server_socket_addr = SocketAddr::new(IpAddr::V4(Ipv4Addr::new(127, 0, 0, 1)), 7070); + let scrape_service = Arc::new(ScrapeService::new( Arc::new(config.core), container.scrape_handler.clone(), @@ -393,7 +424,7 @@ mod tests { )); scrape_service - .handle_scrape(&scrape_request, &client_ip_sources, None) + .handle_scrape(&scrape_request, &client_ip_sources, &server_socket_addr, None) .await .unwrap(); } @@ -402,7 +433,7 @@ mod tests { mod with_zeroed_data { use std::future; - use std::net::{IpAddr, Ipv4Addr, Ipv6Addr}; + use std::net::{IpAddr, Ipv4Addr, Ipv6Addr, SocketAddr}; use std::sync::Arc; use bittorrent_http_tracker_protocol::v1::requests::scrape::Scrape; @@ -417,6 +448,7 @@ mod tests { }; use crate::services::scrape::ScrapeService; use crate::statistics; + use crate::statistics::event::ConnectionContext; use crate::tests::sample_info_hash; #[tokio::test] @@ -450,6 +482,8 @@ mod tests { connection_info_ip: Some(original_peer_ip), }; + let server_socket_addr = SocketAddr::new(IpAddr::V4(Ipv4Addr::new(127, 0, 0, 1)), 7070); + let scrape_service = Arc::new(ScrapeService::new( Arc::new(config.core), container.scrape_handler.clone(), @@ -458,7 +492,7 @@ mod tests { )); let scrape_data = scrape_service - .handle_scrape(&scrape_request, &client_ip_sources, None) + .handle_scrape(&scrape_request, &client_ip_sources, &server_socket_addr, None) .await .unwrap(); @@ -476,7 +510,12 @@ mod tests { let mut http_stats_event_sender_mock = MockHttpStatsEventSender::new(); http_stats_event_sender_mock .expect_send_event() - .with(eq(statistics::event::Event::Tcp4Scrape)) + .with(eq(statistics::event::Event::Tcp4Scrape { + connection: ConnectionContext { + client_ip_addr: IpAddr::V4(Ipv4Addr::new(126, 0, 0, 1)), + server_socket_addr: SocketAddr::new(IpAddr::V4(Ipv4Addr::new(127, 0, 0, 1)), 7070), + }, + })) .times(1) .returning(|_| Box::pin(future::ready(Some(Ok(()))))); let http_stats_event_sender: Arc>> = @@ -493,6 +532,8 @@ mod tests { connection_info_ip: Some(peer_ip), }; + let server_socket_addr = SocketAddr::new(IpAddr::V4(Ipv4Addr::new(127, 0, 0, 1)), 7070); + let scrape_service = Arc::new(ScrapeService::new( Arc::new(config.core), container.scrape_handler.clone(), @@ -501,13 +542,15 @@ mod tests { )); scrape_service - .handle_scrape(&scrape_request, &client_ip_sources, None) + .handle_scrape(&scrape_request, &client_ip_sources, &server_socket_addr, None) .await .unwrap(); } #[tokio::test] async fn it_should_send_the_tcp_6_scrape_event_when_the_peer_uses_ipv6() { + let server_socket_addr = SocketAddr::new(IpAddr::V4(Ipv4Addr::new(127, 0, 0, 1)), 7070); + let config = configuration::ephemeral(); let container = initialize_services_with_configuration(&config); @@ -515,7 +558,12 @@ mod tests { let mut http_stats_event_sender_mock = MockHttpStatsEventSender::new(); http_stats_event_sender_mock .expect_send_event() - .with(eq(statistics::event::Event::Tcp6Scrape)) + .with(eq(statistics::event::Event::Tcp6Scrape { + connection: ConnectionContext { + client_ip_addr: IpAddr::V6(Ipv6Addr::new(0x6969, 0x6969, 0x6969, 0x6969, 0x6969, 0x6969, 0x6969, 0x6969)), + server_socket_addr, + }, + })) .times(1) .returning(|_| Box::pin(future::ready(Some(Ok(()))))); let http_stats_event_sender: Arc>> = @@ -532,6 +580,8 @@ mod tests { connection_info_ip: Some(peer_ip), }; + let server_socket_addr = SocketAddr::new(IpAddr::V4(Ipv4Addr::new(127, 0, 0, 1)), 7070); + let scrape_service = Arc::new(ScrapeService::new( Arc::new(config.core), container.scrape_handler.clone(), @@ -540,7 +590,7 @@ mod tests { )); scrape_service - .handle_scrape(&scrape_request, &client_ip_sources, None) + .handle_scrape(&scrape_request, &client_ip_sources, &server_socket_addr, None) .await .unwrap(); } diff --git a/packages/http-tracker-core/src/statistics/event/handler.rs b/packages/http-tracker-core/src/statistics/event/handler.rs index b0a0c186f..662c82ee2 100644 --- a/packages/http-tracker-core/src/statistics/event/handler.rs +++ b/packages/http-tracker-core/src/statistics/event/handler.rs @@ -1,23 +1,48 @@ +use std::net::IpAddr; + use crate::statistics::event::Event; use crate::statistics::repository::Repository; +/// # Panics +/// +/// This function panics if the client IP address is not the same as the IP +/// version of the event. pub async fn handle_event(event: Event, stats_repository: &Repository) { match event { // TCP4 - Event::Tcp4Announce => { - stats_repository.increase_tcp4_announces().await; - } - Event::Tcp4Scrape => { - stats_repository.increase_tcp4_scrapes().await; - } - + Event::Tcp4Announce { connection } => match connection.client_ip_addr { + IpAddr::V4(_) => { + stats_repository.increase_tcp4_announces().await; + } + IpAddr::V6(_) => { + panic!("A client IPv6 address was received in a TCP4 announce event"); + } + }, + Event::Tcp4Scrape { connection } => match connection.client_ip_addr { + IpAddr::V4(_) => { + stats_repository.increase_tcp4_scrapes().await; + } + IpAddr::V6(_) => { + panic!("A client IPv6 address was received in a TCP4 scrape event"); + } + }, // TCP6 - Event::Tcp6Announce => { - stats_repository.increase_tcp6_announces().await; - } - Event::Tcp6Scrape => { - stats_repository.increase_tcp6_scrapes().await; - } + Event::Tcp6Announce { connection } => match connection.client_ip_addr { + IpAddr::V4(_) => { + panic!("A client IPv4 address was received in a TCP6 announce event"); + } + IpAddr::V6(_) => { + stats_repository.increase_tcp6_announces().await; + } + }, + Event::Tcp6Scrape { connection } => match connection.client_ip_addr { + IpAddr::V4(_) => { + panic!("A client IPv4 address was received in a TCP6 scrape event"); + } + IpAddr::V6(_) => { + stats_repository.increase_tcp6_scrapes().await; + } + }, } tracing::debug!("stats: {:?}", stats_repository.get_stats().await); @@ -25,15 +50,26 @@ pub async fn handle_event(event: Event, stats_repository: &Repository) { #[cfg(test)] mod tests { + use std::net::{IpAddr, Ipv4Addr, Ipv6Addr, SocketAddr}; + use crate::statistics::event::handler::handle_event; - use crate::statistics::event::Event; + use crate::statistics::event::{ConnectionContext, Event}; use crate::statistics::repository::Repository; #[tokio::test] async fn should_increase_the_tcp4_announces_counter_when_it_receives_a_tcp4_announce_event() { let stats_repository = Repository::new(); - handle_event(Event::Tcp4Announce, &stats_repository).await; + handle_event( + Event::Tcp4Announce { + connection: ConnectionContext { + client_ip_addr: IpAddr::V4(Ipv4Addr::new(127, 0, 0, 2)), + server_socket_addr: SocketAddr::new(IpAddr::V4(Ipv4Addr::new(127, 0, 0, 1)), 7070), + }, + }, + &stats_repository, + ) + .await; let stats = stats_repository.get_stats().await; @@ -44,7 +80,16 @@ mod tests { async fn should_increase_the_tcp4_scrapes_counter_when_it_receives_a_tcp4_scrape_event() { let stats_repository = Repository::new(); - handle_event(Event::Tcp4Scrape, &stats_repository).await; + handle_event( + Event::Tcp4Scrape { + connection: ConnectionContext { + client_ip_addr: IpAddr::V4(Ipv4Addr::new(127, 0, 0, 2)), + server_socket_addr: SocketAddr::new(IpAddr::V4(Ipv4Addr::new(127, 0, 0, 1)), 7070), + }, + }, + &stats_repository, + ) + .await; let stats = stats_repository.get_stats().await; @@ -55,7 +100,16 @@ mod tests { async fn should_increase_the_tcp6_announces_counter_when_it_receives_a_tcp6_announce_event() { let stats_repository = Repository::new(); - handle_event(Event::Tcp6Announce, &stats_repository).await; + handle_event( + Event::Tcp6Announce { + connection: ConnectionContext { + client_ip_addr: IpAddr::V6(Ipv6Addr::new(0x6969, 0x6969, 0x6969, 0x6969, 0x6969, 0x6969, 0x6969, 0x6969)), + server_socket_addr: SocketAddr::new(IpAddr::V4(Ipv4Addr::new(127, 0, 0, 1)), 7070), + }, + }, + &stats_repository, + ) + .await; let stats = stats_repository.get_stats().await; @@ -66,7 +120,16 @@ mod tests { async fn should_increase_the_tcp6_scrapes_counter_when_it_receives_a_tcp6_scrape_event() { let stats_repository = Repository::new(); - handle_event(Event::Tcp6Scrape, &stats_repository).await; + handle_event( + Event::Tcp6Scrape { + connection: ConnectionContext { + client_ip_addr: IpAddr::V6(Ipv6Addr::new(0x6969, 0x6969, 0x6969, 0x6969, 0x6969, 0x6969, 0x6969, 0x6969)), + server_socket_addr: SocketAddr::new(IpAddr::V4(Ipv4Addr::new(127, 0, 0, 1)), 7070), + }, + }, + &stats_repository, + ) + .await; let stats = stats_repository.get_stats().await; diff --git a/packages/http-tracker-core/src/statistics/event/mod.rs b/packages/http-tracker-core/src/statistics/event/mod.rs index e25148666..29dba0b6a 100644 --- a/packages/http-tracker-core/src/statistics/event/mod.rs +++ b/packages/http-tracker-core/src/statistics/event/mod.rs @@ -1,3 +1,5 @@ +use std::net::{IpAddr, SocketAddr}; + pub mod handler; pub mod listener; pub mod sender; @@ -5,17 +7,18 @@ pub mod sender; /// An statistics event. It is used to collect tracker metrics. /// /// - `Tcp` prefix means the event was triggered by the HTTP tracker -/// - `Udp` prefix means the event was triggered by the UDP tracker /// - `4` or `6` prefixes means the IP version used by the peer -/// - Finally the event suffix is the type of request: `announce`, `scrape` or `connection` -/// -/// > NOTE: HTTP trackers do not use `connection` requests. +/// - Finally the event suffix is the type of request: `announce` or `scrape` #[derive(Debug, PartialEq, Eq)] pub enum Event { - // code-review: consider one single event for request type with data: Event::Announce { scheme: HTTPorUDP, ip_version: V4orV6 } - // Attributes are enums too. - Tcp4Announce, - Tcp4Scrape, - Tcp6Announce, - Tcp6Scrape, + Tcp4Announce { connection: ConnectionContext }, + Tcp4Scrape { connection: ConnectionContext }, + Tcp6Announce { connection: ConnectionContext }, + Tcp6Scrape { connection: ConnectionContext }, +} + +#[derive(Debug, PartialEq, Eq)] +pub struct ConnectionContext { + pub client_ip_addr: IpAddr, + pub server_socket_addr: SocketAddr, } diff --git a/packages/http-tracker-core/src/statistics/keeper.rs b/packages/http-tracker-core/src/statistics/keeper.rs index ae5c3276e..bdbac8e77 100644 --- a/packages/http-tracker-core/src/statistics/keeper.rs +++ b/packages/http-tracker-core/src/statistics/keeper.rs @@ -51,7 +51,9 @@ impl Keeper { #[cfg(test)] mod tests { - use crate::statistics::event::Event; + use std::net::{IpAddr, Ipv4Addr, SocketAddr}; + + use crate::statistics::event::{ConnectionContext, Event}; use crate::statistics::keeper::Keeper; use crate::statistics::metrics::Metrics; @@ -70,7 +72,14 @@ mod tests { let event_sender = stats_tracker.run_event_listener(); - let result = event_sender.send_event(Event::Tcp4Announce).await; + let result = event_sender + .send_event(Event::Tcp4Announce { + connection: ConnectionContext { + client_ip_addr: IpAddr::V4(Ipv4Addr::new(127, 0, 0, 2)), + server_socket_addr: SocketAddr::new(IpAddr::V4(Ipv4Addr::new(127, 0, 0, 1)), 7070), + }, + }) + .await; assert!(result.is_some()); } From 2de6c14bbc2967bcf45c1584e9c8b8f1786e98b5 Mon Sep 17 00:00:00 2001 From: Jose Celano Date: Mon, 17 Mar 2025 10:14:24 +0000 Subject: [PATCH 385/802] refactor: [#1373] merge HTTP stats events with different IP version --- .../src/services/announce.rs | 36 ++++++------------- .../http-tracker-core/src/services/scrape.rs | 23 +++++------- .../src/statistics/event/handler.rs | 32 ++++------------- .../src/statistics/event/mod.rs | 11 +++--- .../src/statistics/keeper.rs | 2 +- 5 files changed, 31 insertions(+), 73 deletions(-) diff --git a/packages/http-tracker-core/src/services/announce.rs b/packages/http-tracker-core/src/services/announce.rs index b027ee0d9..87250af30 100644 --- a/packages/http-tracker-core/src/services/announce.rs +++ b/packages/http-tracker-core/src/services/announce.rs @@ -125,28 +125,14 @@ impl AnnounceService { async fn send_stats_event(&self, peer_ip: IpAddr, server_socket_addr: SocketAddr) { if let Some(http_stats_event_sender) = self.opt_http_stats_event_sender.as_deref() { - match peer_ip { - IpAddr::V4(_) => { - http_stats_event_sender - .send_event(statistics::event::Event::Tcp4Announce { - connection: statistics::event::ConnectionContext { - client_ip_addr: peer_ip, - server_socket_addr, - }, - }) - .await; - } - IpAddr::V6(_) => { - http_stats_event_sender - .send_event(statistics::event::Event::Tcp6Announce { - connection: statistics::event::ConnectionContext { - client_ip_addr: peer_ip, - server_socket_addr, - }, - }) - .await; - } - } + http_stats_event_sender + .send_event(statistics::event::Event::TcpAnnounce { + connection: statistics::event::ConnectionContext { + client_ip_addr: peer_ip, + server_socket_addr, + }, + }) + .await; } } } @@ -394,7 +380,7 @@ mod tests { let mut http_stats_event_sender_mock = MockHttpStatsEventSender::new(); http_stats_event_sender_mock .expect_send_event() - .with(eq(statistics::event::Event::Tcp4Announce { + .with(eq(statistics::event::Event::TcpAnnounce { connection: ConnectionContext { client_ip_addr: IpAddr::V4(Ipv4Addr::new(126, 0, 0, 1)), server_socket_addr, @@ -453,7 +439,7 @@ mod tests { let mut http_stats_event_sender_mock = MockHttpStatsEventSender::new(); http_stats_event_sender_mock .expect_send_event() - .with(eq(statistics::event::Event::Tcp4Announce { + .with(eq(statistics::event::Event::TcpAnnounce { connection: ConnectionContext { client_ip_addr: IpAddr::V4(Ipv4Addr::new(127, 0, 0, 1)), server_socket_addr, @@ -495,7 +481,7 @@ mod tests { let mut http_stats_event_sender_mock = MockHttpStatsEventSender::new(); http_stats_event_sender_mock .expect_send_event() - .with(eq(statistics::event::Event::Tcp6Announce { + .with(eq(statistics::event::Event::TcpAnnounce { connection: ConnectionContext { client_ip_addr: IpAddr::V6(Ipv6Addr::new(0x6969, 0x6969, 0x6969, 0x6969, 0x6969, 0x6969, 0x6969, 0x6969)), server_socket_addr, diff --git a/packages/http-tracker-core/src/services/scrape.rs b/packages/http-tracker-core/src/services/scrape.rs index 607ee2a3f..31c2ce2c4 100644 --- a/packages/http-tracker-core/src/services/scrape.rs +++ b/packages/http-tracker-core/src/services/scrape.rs @@ -106,21 +106,14 @@ impl ScrapeService { async fn send_stats_event(&self, original_peer_ip: IpAddr, server_socket_addr: SocketAddr) { if let Some(http_stats_event_sender) = self.opt_http_stats_event_sender.as_deref() { - let event = match original_peer_ip { - IpAddr::V4(_) => statistics::event::Event::Tcp4Scrape { + http_stats_event_sender + .send_event(statistics::event::Event::TcpScrape { connection: ConnectionContext { client_ip_addr: original_peer_ip, server_socket_addr, }, - }, - IpAddr::V6(_) => statistics::event::Event::Tcp6Scrape { - connection: ConnectionContext { - client_ip_addr: original_peer_ip, - server_socket_addr, - }, - }, - }; - http_stats_event_sender.send_event(event).await; + }) + .await; } } } @@ -342,7 +335,7 @@ mod tests { let mut http_stats_event_sender_mock = MockHttpStatsEventSender::new(); http_stats_event_sender_mock .expect_send_event() - .with(eq(statistics::event::Event::Tcp4Scrape { + .with(eq(statistics::event::Event::TcpScrape { connection: ConnectionContext { client_ip_addr: IpAddr::V4(Ipv4Addr::new(126, 0, 0, 1)), server_socket_addr: SocketAddr::new(IpAddr::V4(Ipv4Addr::new(127, 0, 0, 1)), 7070), @@ -390,7 +383,7 @@ mod tests { let mut http_stats_event_sender_mock = MockHttpStatsEventSender::new(); http_stats_event_sender_mock .expect_send_event() - .with(eq(statistics::event::Event::Tcp6Scrape { + .with(eq(statistics::event::Event::TcpScrape { connection: ConnectionContext { client_ip_addr: IpAddr::V6(Ipv6Addr::new(0x6969, 0x6969, 0x6969, 0x6969, 0x6969, 0x6969, 0x6969, 0x6969)), server_socket_addr, @@ -510,7 +503,7 @@ mod tests { let mut http_stats_event_sender_mock = MockHttpStatsEventSender::new(); http_stats_event_sender_mock .expect_send_event() - .with(eq(statistics::event::Event::Tcp4Scrape { + .with(eq(statistics::event::Event::TcpScrape { connection: ConnectionContext { client_ip_addr: IpAddr::V4(Ipv4Addr::new(126, 0, 0, 1)), server_socket_addr: SocketAddr::new(IpAddr::V4(Ipv4Addr::new(127, 0, 0, 1)), 7070), @@ -558,7 +551,7 @@ mod tests { let mut http_stats_event_sender_mock = MockHttpStatsEventSender::new(); http_stats_event_sender_mock .expect_send_event() - .with(eq(statistics::event::Event::Tcp6Scrape { + .with(eq(statistics::event::Event::TcpScrape { connection: ConnectionContext { client_ip_addr: IpAddr::V6(Ipv6Addr::new(0x6969, 0x6969, 0x6969, 0x6969, 0x6969, 0x6969, 0x6969, 0x6969)), server_socket_addr, diff --git a/packages/http-tracker-core/src/statistics/event/handler.rs b/packages/http-tracker-core/src/statistics/event/handler.rs index 662c82ee2..ea8cedc71 100644 --- a/packages/http-tracker-core/src/statistics/event/handler.rs +++ b/packages/http-tracker-core/src/statistics/event/handler.rs @@ -9,35 +9,17 @@ use crate::statistics::repository::Repository; /// version of the event. pub async fn handle_event(event: Event, stats_repository: &Repository) { match event { - // TCP4 - Event::Tcp4Announce { connection } => match connection.client_ip_addr { + Event::TcpAnnounce { connection } => match connection.client_ip_addr { IpAddr::V4(_) => { stats_repository.increase_tcp4_announces().await; } - IpAddr::V6(_) => { - panic!("A client IPv6 address was received in a TCP4 announce event"); - } - }, - Event::Tcp4Scrape { connection } => match connection.client_ip_addr { - IpAddr::V4(_) => { - stats_repository.increase_tcp4_scrapes().await; - } - IpAddr::V6(_) => { - panic!("A client IPv6 address was received in a TCP4 scrape event"); - } - }, - // TCP6 - Event::Tcp6Announce { connection } => match connection.client_ip_addr { - IpAddr::V4(_) => { - panic!("A client IPv4 address was received in a TCP6 announce event"); - } IpAddr::V6(_) => { stats_repository.increase_tcp6_announces().await; } }, - Event::Tcp6Scrape { connection } => match connection.client_ip_addr { + Event::TcpScrape { connection } => match connection.client_ip_addr { IpAddr::V4(_) => { - panic!("A client IPv4 address was received in a TCP6 scrape event"); + stats_repository.increase_tcp4_scrapes().await; } IpAddr::V6(_) => { stats_repository.increase_tcp6_scrapes().await; @@ -61,7 +43,7 @@ mod tests { let stats_repository = Repository::new(); handle_event( - Event::Tcp4Announce { + Event::TcpAnnounce { connection: ConnectionContext { client_ip_addr: IpAddr::V4(Ipv4Addr::new(127, 0, 0, 2)), server_socket_addr: SocketAddr::new(IpAddr::V4(Ipv4Addr::new(127, 0, 0, 1)), 7070), @@ -81,7 +63,7 @@ mod tests { let stats_repository = Repository::new(); handle_event( - Event::Tcp4Scrape { + Event::TcpScrape { connection: ConnectionContext { client_ip_addr: IpAddr::V4(Ipv4Addr::new(127, 0, 0, 2)), server_socket_addr: SocketAddr::new(IpAddr::V4(Ipv4Addr::new(127, 0, 0, 1)), 7070), @@ -101,7 +83,7 @@ mod tests { let stats_repository = Repository::new(); handle_event( - Event::Tcp6Announce { + Event::TcpAnnounce { connection: ConnectionContext { client_ip_addr: IpAddr::V6(Ipv6Addr::new(0x6969, 0x6969, 0x6969, 0x6969, 0x6969, 0x6969, 0x6969, 0x6969)), server_socket_addr: SocketAddr::new(IpAddr::V4(Ipv4Addr::new(127, 0, 0, 1)), 7070), @@ -121,7 +103,7 @@ mod tests { let stats_repository = Repository::new(); handle_event( - Event::Tcp6Scrape { + Event::TcpScrape { connection: ConnectionContext { client_ip_addr: IpAddr::V6(Ipv6Addr::new(0x6969, 0x6969, 0x6969, 0x6969, 0x6969, 0x6969, 0x6969, 0x6969)), server_socket_addr: SocketAddr::new(IpAddr::V4(Ipv4Addr::new(127, 0, 0, 1)), 7070), diff --git a/packages/http-tracker-core/src/statistics/event/mod.rs b/packages/http-tracker-core/src/statistics/event/mod.rs index 29dba0b6a..c27ce7c6d 100644 --- a/packages/http-tracker-core/src/statistics/event/mod.rs +++ b/packages/http-tracker-core/src/statistics/event/mod.rs @@ -6,15 +6,12 @@ pub mod sender; /// An statistics event. It is used to collect tracker metrics. /// -/// - `Tcp` prefix means the event was triggered by the HTTP tracker -/// - `4` or `6` prefixes means the IP version used by the peer -/// - Finally the event suffix is the type of request: `announce` or `scrape` +/// - `Tcp` prefix means the event was triggered by the HTTP tracker. +/// - The event suffix is the type of request: `announce` or `scrape`. #[derive(Debug, PartialEq, Eq)] pub enum Event { - Tcp4Announce { connection: ConnectionContext }, - Tcp4Scrape { connection: ConnectionContext }, - Tcp6Announce { connection: ConnectionContext }, - Tcp6Scrape { connection: ConnectionContext }, + TcpAnnounce { connection: ConnectionContext }, + TcpScrape { connection: ConnectionContext }, } #[derive(Debug, PartialEq, Eq)] diff --git a/packages/http-tracker-core/src/statistics/keeper.rs b/packages/http-tracker-core/src/statistics/keeper.rs index bdbac8e77..6f84e27b1 100644 --- a/packages/http-tracker-core/src/statistics/keeper.rs +++ b/packages/http-tracker-core/src/statistics/keeper.rs @@ -73,7 +73,7 @@ mod tests { let event_sender = stats_tracker.run_event_listener(); let result = event_sender - .send_event(Event::Tcp4Announce { + .send_event(Event::TcpAnnounce { connection: ConnectionContext { client_ip_addr: IpAddr::V4(Ipv4Addr::new(127, 0, 0, 2)), server_socket_addr: SocketAddr::new(IpAddr::V4(Ipv4Addr::new(127, 0, 0, 1)), 7070), From b8a3d44671cbff5486dc57ac3915ee865a1756d4 Mon Sep 17 00:00:00 2001 From: Jose Celano Date: Mon, 17 Mar 2025 10:34:27 +0000 Subject: [PATCH 386/802] refactor: [#1373] capture socket address from connection info in HTTP tracker Instead of only the IP. The port will be abailable in evetns so we can build metrics also using the client's port. --- .../src/v1/extractors/client_ip_sources.rs | 4 +-- .../src/v1/handlers/announce.rs | 6 ++-- .../src/v1/handlers/scrape.rs | 8 +++--- .../src/v1/services/peer_ip_resolver.rs | 28 ++++++++++--------- .../http-tracker-core/benches/helpers/util.rs | 2 +- .../src/services/announce.rs | 2 +- .../http-tracker-core/src/services/scrape.rs | 12 ++++---- .../src/statistics/event/mod.rs | 3 -- 8 files changed, 32 insertions(+), 33 deletions(-) diff --git a/packages/axum-http-tracker-server/src/v1/extractors/client_ip_sources.rs b/packages/axum-http-tracker-server/src/v1/extractors/client_ip_sources.rs index 8c7a2bf40..ed568e0b9 100644 --- a/packages/axum-http-tracker-server/src/v1/extractors/client_ip_sources.rs +++ b/packages/axum-http-tracker-server/src/v1/extractors/client_ip_sources.rs @@ -63,13 +63,13 @@ where }; let connection_info_ip = match ConnectInfo::::from_request_parts(parts, state).await { - Ok(connection_info_socket_addr) => Some(connection_info_socket_addr.0.ip()), + Ok(connection_info_socket_addr) => Some(connection_info_socket_addr.0), Err(_) => None, }; Ok(Extract(ClientIpSources { right_most_x_forwarded_for, - connection_info_ip, + connection_info_socket_address: connection_info_ip, })) } } diff --git a/packages/axum-http-tracker-server/src/v1/handlers/announce.rs b/packages/axum-http-tracker-server/src/v1/handlers/announce.rs index 63ab96fe5..53fd38997 100644 --- a/packages/axum-http-tracker-server/src/v1/handlers/announce.rs +++ b/packages/axum-http-tracker-server/src/v1/handlers/announce.rs @@ -194,7 +194,7 @@ mod tests { fn sample_client_ip_sources() -> ClientIpSources { ClientIpSources { right_most_x_forwarded_for: None, - connection_info_ip: None, + connection_info_socket_address: None, } } @@ -335,7 +335,7 @@ mod tests { let client_ip_sources = ClientIpSources { right_most_x_forwarded_for: None, - connection_info_ip: None, + connection_info_socket_address: None, }; let server_socket_addr = SocketAddr::new(IpAddr::V4(Ipv4Addr::new(127, 0, 0, 1)), 7070); @@ -378,7 +378,7 @@ mod tests { let client_ip_sources = ClientIpSources { right_most_x_forwarded_for: None, - connection_info_ip: None, + connection_info_socket_address: None, }; let server_socket_addr = SocketAddr::new(IpAddr::V4(Ipv4Addr::new(127, 0, 0, 1)), 7070); diff --git a/packages/axum-http-tracker-server/src/v1/handlers/scrape.rs b/packages/axum-http-tracker-server/src/v1/handlers/scrape.rs index ca90f74c6..1ba89eaaf 100644 --- a/packages/axum-http-tracker-server/src/v1/handlers/scrape.rs +++ b/packages/axum-http-tracker-server/src/v1/handlers/scrape.rs @@ -79,7 +79,7 @@ fn build_response(scrape_data: ScrapeData) -> Response { #[cfg(test)] mod tests { - use std::net::IpAddr; + use std::net::{IpAddr, Ipv4Addr, SocketAddr}; use std::str::FromStr; use std::sync::Arc; @@ -155,7 +155,7 @@ mod tests { fn sample_client_ip_sources() -> ClientIpSources { ClientIpSources { right_most_x_forwarded_for: Some(IpAddr::from_str("203.0.113.195").unwrap()), - connection_info_ip: Some(IpAddr::from_str("203.0.113.196").unwrap()), + connection_info_socket_address: Some(SocketAddr::new(IpAddr::V4(Ipv4Addr::new(203, 0, 113, 196)), 8080)), } } @@ -282,7 +282,7 @@ mod tests { let client_ip_sources = ClientIpSources { right_most_x_forwarded_for: None, - connection_info_ip: None, + connection_info_socket_address: None, }; let server_socket_addr = SocketAddr::new(IpAddr::V4(Ipv4Addr::new(127, 0, 0, 1)), 7070); @@ -327,7 +327,7 @@ mod tests { let client_ip_sources = ClientIpSources { right_most_x_forwarded_for: None, - connection_info_ip: None, + connection_info_socket_address: None, }; let server_socket_addr = SocketAddr::new(IpAddr::V4(Ipv4Addr::new(127, 0, 0, 1)), 7070); diff --git a/packages/http-protocol/src/v1/services/peer_ip_resolver.rs b/packages/http-protocol/src/v1/services/peer_ip_resolver.rs index bea93f1ba..b375694b9 100644 --- a/packages/http-protocol/src/v1/services/peer_ip_resolver.rs +++ b/packages/http-protocol/src/v1/services/peer_ip_resolver.rs @@ -20,7 +20,7 @@ //! ``` //! //! Depending on the tracker configuration. -use std::net::IpAddr; +use std::net::{IpAddr, SocketAddr}; use std::panic::Location; use serde::{Deserialize, Serialize}; @@ -31,8 +31,9 @@ use thiserror::Error; pub struct ClientIpSources { /// The right most IP from the `X-Forwarded-For` HTTP header. pub right_most_x_forwarded_for: Option, - /// The IP from the connection info. - pub connection_info_ip: Option, + + /// The client's socket address from the connection info. + pub connection_info_socket_address: Option, } /// The error that can occur when resolving the peer IP. @@ -45,6 +46,7 @@ pub enum PeerIpResolutionError { "missing or invalid the right most X-Forwarded-For IP (mandatory on reverse proxy tracker configuration) in {location}" )] MissingRightMostXForwardedForIp { location: &'static Location<'static> }, + /// The peer IP cannot be obtained because the tracker is not configured as /// a reverse proxy but the connection info was not provided to the Axum /// framework via a route extension. @@ -71,7 +73,7 @@ pub enum PeerIpResolutionError { /// on_reverse_proxy, /// &ClientIpSources { /// right_most_x_forwarded_for: Some(IpAddr::from_str("203.0.113.195").unwrap()), -/// connection_info_ip: None, +/// connection_info_socket_address: None, /// }, /// ) /// .unwrap(); @@ -82,7 +84,7 @@ pub enum PeerIpResolutionError { /// With the tracker non running on reverse proxy mode: /// /// ```rust -/// use std::net::IpAddr; +/// use std::net::{IpAddr,Ipv4Addr,SocketAddr}; /// use std::str::FromStr; /// /// use bittorrent_http_tracker_protocol::v1::services::peer_ip_resolver::{invoke, ClientIpSources, PeerIpResolutionError}; @@ -93,7 +95,7 @@ pub enum PeerIpResolutionError { /// on_reverse_proxy, /// &ClientIpSources { /// right_most_x_forwarded_for: None, -/// connection_info_ip: Some(IpAddr::from_str("203.0.113.195").unwrap()), +/// connection_info_socket_address: Some(SocketAddr::new(IpAddr::V4(Ipv4Addr::new(203, 0, 113, 195)), 8080)) /// }, /// ) /// .unwrap(); @@ -114,8 +116,8 @@ pub fn invoke(on_reverse_proxy: bool, client_ip_sources: &ClientIpSources) -> Re } fn resolve_peer_ip_without_reverse_proxy(remote_client_ip: &ClientIpSources) -> Result { - if let Some(ip) = remote_client_ip.connection_info_ip { - Ok(ip) + if let Some(socket_addr) = remote_client_ip.connection_info_socket_address { + Ok(socket_addr.ip()) } else { Err(PeerIpResolutionError::MissingClientIp { location: Location::caller(), @@ -138,7 +140,7 @@ mod tests { use super::invoke; mod working_without_reverse_proxy { - use std::net::IpAddr; + use std::net::{IpAddr, Ipv4Addr, SocketAddr}; use std::str::FromStr; use super::invoke; @@ -152,7 +154,7 @@ mod tests { on_reverse_proxy, &ClientIpSources { right_most_x_forwarded_for: None, - connection_info_ip: Some(IpAddr::from_str("203.0.113.195").unwrap()), + connection_info_socket_address: Some(SocketAddr::new(IpAddr::V4(Ipv4Addr::new(203, 0, 113, 195)), 8080)), }, ) .unwrap(); @@ -168,7 +170,7 @@ mod tests { on_reverse_proxy, &ClientIpSources { right_most_x_forwarded_for: None, - connection_info_ip: None, + connection_info_socket_address: None, }, ) .unwrap_err(); @@ -191,7 +193,7 @@ mod tests { on_reverse_proxy, &ClientIpSources { right_most_x_forwarded_for: Some(IpAddr::from_str("203.0.113.195").unwrap()), - connection_info_ip: None, + connection_info_socket_address: None, }, ) .unwrap(); @@ -207,7 +209,7 @@ mod tests { on_reverse_proxy, &ClientIpSources { right_most_x_forwarded_for: None, - connection_info_ip: None, + connection_info_socket_address: None, }, ) .unwrap_err(); diff --git a/packages/http-tracker-core/benches/helpers/util.rs b/packages/http-tracker-core/benches/helpers/util.rs index f15e9db8f..169c4a56a 100644 --- a/packages/http-tracker-core/benches/helpers/util.rs +++ b/packages/http-tracker-core/benches/helpers/util.rs @@ -93,7 +93,7 @@ pub fn sample_announce_request_for_peer(peer: Peer) -> (Announce, ClientIpSource let client_ip_sources = ClientIpSources { right_most_x_forwarded_for: None, - connection_info_ip: Some(peer.peer_addr.ip()), + connection_info_socket_address: Some(SocketAddr::new(peer.peer_addr.ip(), 8080)), }; (announce_request, client_ip_sources) diff --git a/packages/http-tracker-core/src/services/announce.rs b/packages/http-tracker-core/src/services/announce.rs index 87250af30..7eb73ff53 100644 --- a/packages/http-tracker-core/src/services/announce.rs +++ b/packages/http-tracker-core/src/services/announce.rs @@ -296,7 +296,7 @@ mod tests { let client_ip_sources = ClientIpSources { right_most_x_forwarded_for: None, - connection_info_ip: Some(peer.peer_addr.ip()), + connection_info_socket_address: Some(SocketAddr::new(peer.peer_addr.ip(), 8080)), }; (announce_request, client_ip_sources) diff --git a/packages/http-tracker-core/src/services/scrape.rs b/packages/http-tracker-core/src/services/scrape.rs index 31c2ce2c4..93d2688e5 100644 --- a/packages/http-tracker-core/src/services/scrape.rs +++ b/packages/http-tracker-core/src/services/scrape.rs @@ -298,7 +298,7 @@ mod tests { let client_ip_sources = ClientIpSources { right_most_x_forwarded_for: None, - connection_info_ip: Some(original_peer_ip), + connection_info_socket_address: Some(SocketAddr::new(original_peer_ip, 8080)), }; let server_socket_addr = SocketAddr::new(IpAddr::V4(Ipv4Addr::new(127, 0, 0, 1)), 7070); @@ -356,7 +356,7 @@ mod tests { let client_ip_sources = ClientIpSources { right_most_x_forwarded_for: None, - connection_info_ip: Some(peer_ip), + connection_info_socket_address: Some(SocketAddr::new(peer_ip, 8080)), }; let server_socket_addr = SocketAddr::new(IpAddr::V4(Ipv4Addr::new(127, 0, 0, 1)), 7070); @@ -404,7 +404,7 @@ mod tests { let client_ip_sources = ClientIpSources { right_most_x_forwarded_for: None, - connection_info_ip: Some(peer_ip), + connection_info_socket_address: Some(SocketAddr::new(peer_ip, 8080)), }; let server_socket_addr = SocketAddr::new(IpAddr::V4(Ipv4Addr::new(127, 0, 0, 1)), 7070); @@ -472,7 +472,7 @@ mod tests { let client_ip_sources = ClientIpSources { right_most_x_forwarded_for: None, - connection_info_ip: Some(original_peer_ip), + connection_info_socket_address: Some(SocketAddr::new(original_peer_ip, 8080)), }; let server_socket_addr = SocketAddr::new(IpAddr::V4(Ipv4Addr::new(127, 0, 0, 1)), 7070); @@ -522,7 +522,7 @@ mod tests { let client_ip_sources = ClientIpSources { right_most_x_forwarded_for: None, - connection_info_ip: Some(peer_ip), + connection_info_socket_address: Some(SocketAddr::new(peer_ip, 8080)), }; let server_socket_addr = SocketAddr::new(IpAddr::V4(Ipv4Addr::new(127, 0, 0, 1)), 7070); @@ -570,7 +570,7 @@ mod tests { let client_ip_sources = ClientIpSources { right_most_x_forwarded_for: None, - connection_info_ip: Some(peer_ip), + connection_info_socket_address: Some(SocketAddr::new(peer_ip, 8080)), }; let server_socket_addr = SocketAddr::new(IpAddr::V4(Ipv4Addr::new(127, 0, 0, 1)), 7070); diff --git a/packages/http-tracker-core/src/statistics/event/mod.rs b/packages/http-tracker-core/src/statistics/event/mod.rs index c27ce7c6d..a6e54ce83 100644 --- a/packages/http-tracker-core/src/statistics/event/mod.rs +++ b/packages/http-tracker-core/src/statistics/event/mod.rs @@ -5,9 +5,6 @@ pub mod listener; pub mod sender; /// An statistics event. It is used to collect tracker metrics. -/// -/// - `Tcp` prefix means the event was triggered by the HTTP tracker. -/// - The event suffix is the type of request: `announce` or `scrape`. #[derive(Debug, PartialEq, Eq)] pub enum Event { TcpAnnounce { connection: ConnectionContext }, From 3969c67a67fc2f2bea7a3ad4a52ec60cf8baf898 Mon Sep 17 00:00:00 2001 From: Jose Celano Date: Mon, 17 Mar 2025 11:36:06 +0000 Subject: [PATCH 387/802] refactor: [1373] include client's port in stats events when provided --- .../src/services/announce.rs | 48 ++++++++------ .../http-tracker-core/src/services/scrape.rs | 66 ++++++++++++------- .../src/statistics/event/handler.rs | 40 ++++++----- .../src/statistics/event/mod.rs | 37 ++++++++++- .../src/statistics/keeper.rs | 9 +-- 5 files changed, 132 insertions(+), 68 deletions(-) diff --git a/packages/http-tracker-core/src/services/announce.rs b/packages/http-tracker-core/src/services/announce.rs index 7eb73ff53..6b8b700c9 100644 --- a/packages/http-tracker-core/src/services/announce.rs +++ b/packages/http-tracker-core/src/services/announce.rs @@ -75,7 +75,7 @@ impl AnnounceService { self.authorize(announce_request.info_hash).await?; - let remote_client_ip = self.resolve_remote_client_ip(client_ip_sources)?; + let (remote_client_ip, opt_remote_client_port) = self.resolve_remote_client_address(client_ip_sources)?; let mut peer = peer_from_request(announce_request, &remote_client_ip); @@ -86,7 +86,8 @@ impl AnnounceService { .announce(&announce_request.info_hash, &mut peer, &remote_client_ip, &peers_wanted) .await?; - self.send_stats_event(remote_client_ip, *server_socket_addr).await; + self.send_stats_event(remote_client_ip, opt_remote_client_port, *server_socket_addr) + .await; Ok(announce_data) } @@ -108,11 +109,24 @@ impl AnnounceService { } /// Resolves the client's real IP address considering proxy headers - fn resolve_remote_client_ip(&self, client_ip_sources: &ClientIpSources) -> Result { - match peer_ip_resolver::invoke(self.core_config.net.on_reverse_proxy, client_ip_sources) { + fn resolve_remote_client_address( + &self, + client_ip_sources: &ClientIpSources, + ) -> Result<(IpAddr, Option), PeerIpResolutionError> { + let ip = match peer_ip_resolver::invoke(self.core_config.net.on_reverse_proxy, client_ip_sources) { Ok(peer_ip) => Ok(peer_ip), Err(error) => Err(error), - } + }?; + + let port = if client_ip_sources.connection_info_socket_address.is_some() { + client_ip_sources + .connection_info_socket_address + .map(|socket_addr| socket_addr.port()) + } else { + None + }; + + Ok((ip, port)) } /// Determines how many peers the client wants in the response @@ -123,14 +137,11 @@ impl AnnounceService { } } - async fn send_stats_event(&self, peer_ip: IpAddr, server_socket_addr: SocketAddr) { + async fn send_stats_event(&self, peer_ip: IpAddr, opt_peer_ip_port: Option, server_socket_addr: SocketAddr) { if let Some(http_stats_event_sender) = self.opt_http_stats_event_sender.as_deref() { http_stats_event_sender .send_event(statistics::event::Event::TcpAnnounce { - connection: statistics::event::ConnectionContext { - client_ip_addr: peer_ip, - server_socket_addr, - }, + connection: statistics::event::ConnectionContext::new(peer_ip, opt_peer_ip_port, server_socket_addr), }) .await; } @@ -381,10 +392,7 @@ mod tests { http_stats_event_sender_mock .expect_send_event() .with(eq(statistics::event::Event::TcpAnnounce { - connection: ConnectionContext { - client_ip_addr: IpAddr::V4(Ipv4Addr::new(126, 0, 0, 1)), - server_socket_addr, - }, + connection: ConnectionContext::new(IpAddr::V4(Ipv4Addr::new(126, 0, 0, 1)), Some(8080), server_socket_addr), })) .times(1) .returning(|_| Box::pin(future::ready(Some(Ok(()))))); @@ -440,10 +448,7 @@ mod tests { http_stats_event_sender_mock .expect_send_event() .with(eq(statistics::event::Event::TcpAnnounce { - connection: ConnectionContext { - client_ip_addr: IpAddr::V4(Ipv4Addr::new(127, 0, 0, 1)), - server_socket_addr, - }, + connection: ConnectionContext::new(IpAddr::V4(Ipv4Addr::new(127, 0, 0, 1)), Some(8080), server_socket_addr), })) .times(1) .returning(|_| Box::pin(future::ready(Some(Ok(()))))); @@ -482,10 +487,11 @@ mod tests { http_stats_event_sender_mock .expect_send_event() .with(eq(statistics::event::Event::TcpAnnounce { - connection: ConnectionContext { - client_ip_addr: IpAddr::V6(Ipv6Addr::new(0x6969, 0x6969, 0x6969, 0x6969, 0x6969, 0x6969, 0x6969, 0x6969)), + connection: ConnectionContext::new( + IpAddr::V6(Ipv6Addr::new(0x6969, 0x6969, 0x6969, 0x6969, 0x6969, 0x6969, 0x6969, 0x6969)), + Some(8080), server_socket_addr, - }, + ), })) .times(1) .returning(|_| Box::pin(future::ready(Some(Ok(()))))); diff --git a/packages/http-tracker-core/src/services/scrape.rs b/packages/http-tracker-core/src/services/scrape.rs index 93d2688e5..ed927efc3 100644 --- a/packages/http-tracker-core/src/services/scrape.rs +++ b/packages/http-tracker-core/src/services/scrape.rs @@ -80,9 +80,10 @@ impl ScrapeService { self.scrape_handler.scrape(&scrape_request.info_hashes).await? }; - let remote_client_ip = self.resolve_remote_client_ip(client_ip_sources)?; + let (remote_client_ip, opt_client_port) = self.resolve_remote_client_ip(client_ip_sources)?; - self.send_stats_event(remote_client_ip, *server_socket_addr).await; + self.send_stats_event(remote_client_ip, opt_client_port, *server_socket_addr) + .await; Ok(scrape_data) } @@ -100,18 +101,33 @@ impl ScrapeService { } /// Resolves the client's real IP address considering proxy headers. - fn resolve_remote_client_ip(&self, client_ip_sources: &ClientIpSources) -> Result { - peer_ip_resolver::invoke(self.core_config.net.on_reverse_proxy, client_ip_sources) + fn resolve_remote_client_ip( + &self, + client_ip_sources: &ClientIpSources, + ) -> Result<(IpAddr, Option), PeerIpResolutionError> { + let ip = peer_ip_resolver::invoke(self.core_config.net.on_reverse_proxy, client_ip_sources)?; + + let port = if client_ip_sources.connection_info_socket_address.is_some() { + client_ip_sources + .connection_info_socket_address + .map(|socket_addr| socket_addr.port()) + } else { + None + }; + + Ok((ip, port)) } - async fn send_stats_event(&self, original_peer_ip: IpAddr, server_socket_addr: SocketAddr) { + async fn send_stats_event( + &self, + original_peer_ip: IpAddr, + opt_original_peer_port: Option, + server_socket_addr: SocketAddr, + ) { if let Some(http_stats_event_sender) = self.opt_http_stats_event_sender.as_deref() { http_stats_event_sender .send_event(statistics::event::Event::TcpScrape { - connection: ConnectionContext { - client_ip_addr: original_peer_ip, - server_socket_addr, - }, + connection: ConnectionContext::new(original_peer_ip, opt_original_peer_port, server_socket_addr), }) .await; } @@ -336,10 +352,11 @@ mod tests { http_stats_event_sender_mock .expect_send_event() .with(eq(statistics::event::Event::TcpScrape { - connection: ConnectionContext { - client_ip_addr: IpAddr::V4(Ipv4Addr::new(126, 0, 0, 1)), - server_socket_addr: SocketAddr::new(IpAddr::V4(Ipv4Addr::new(127, 0, 0, 1)), 7070), - }, + connection: ConnectionContext::new( + IpAddr::V4(Ipv4Addr::new(126, 0, 0, 1)), + Some(8080), + SocketAddr::new(IpAddr::V4(Ipv4Addr::new(127, 0, 0, 1)), 7070), + ), })) .times(1) .returning(|_| Box::pin(future::ready(Some(Ok(()))))); @@ -384,10 +401,11 @@ mod tests { http_stats_event_sender_mock .expect_send_event() .with(eq(statistics::event::Event::TcpScrape { - connection: ConnectionContext { - client_ip_addr: IpAddr::V6(Ipv6Addr::new(0x6969, 0x6969, 0x6969, 0x6969, 0x6969, 0x6969, 0x6969, 0x6969)), + connection: ConnectionContext::new( + IpAddr::V6(Ipv6Addr::new(0x6969, 0x6969, 0x6969, 0x6969, 0x6969, 0x6969, 0x6969, 0x6969)), + Some(8080), server_socket_addr, - }, + ), })) .times(1) .returning(|_| Box::pin(future::ready(Some(Ok(()))))); @@ -504,10 +522,11 @@ mod tests { http_stats_event_sender_mock .expect_send_event() .with(eq(statistics::event::Event::TcpScrape { - connection: ConnectionContext { - client_ip_addr: IpAddr::V4(Ipv4Addr::new(126, 0, 0, 1)), - server_socket_addr: SocketAddr::new(IpAddr::V4(Ipv4Addr::new(127, 0, 0, 1)), 7070), - }, + connection: ConnectionContext::new( + IpAddr::V4(Ipv4Addr::new(126, 0, 0, 1)), + Some(8080), + SocketAddr::new(IpAddr::V4(Ipv4Addr::new(127, 0, 0, 1)), 7070), + ), })) .times(1) .returning(|_| Box::pin(future::ready(Some(Ok(()))))); @@ -552,10 +571,11 @@ mod tests { http_stats_event_sender_mock .expect_send_event() .with(eq(statistics::event::Event::TcpScrape { - connection: ConnectionContext { - client_ip_addr: IpAddr::V6(Ipv6Addr::new(0x6969, 0x6969, 0x6969, 0x6969, 0x6969, 0x6969, 0x6969, 0x6969)), + connection: ConnectionContext::new( + IpAddr::V6(Ipv6Addr::new(0x6969, 0x6969, 0x6969, 0x6969, 0x6969, 0x6969, 0x6969, 0x6969)), + Some(8080), server_socket_addr, - }, + ), })) .times(1) .returning(|_| Box::pin(future::ready(Some(Ok(()))))); diff --git a/packages/http-tracker-core/src/statistics/event/handler.rs b/packages/http-tracker-core/src/statistics/event/handler.rs index ea8cedc71..b8806b9d2 100644 --- a/packages/http-tracker-core/src/statistics/event/handler.rs +++ b/packages/http-tracker-core/src/statistics/event/handler.rs @@ -9,7 +9,7 @@ use crate::statistics::repository::Repository; /// version of the event. pub async fn handle_event(event: Event, stats_repository: &Repository) { match event { - Event::TcpAnnounce { connection } => match connection.client_ip_addr { + Event::TcpAnnounce { connection } => match connection.client_ip_addr() { IpAddr::V4(_) => { stats_repository.increase_tcp4_announces().await; } @@ -17,7 +17,7 @@ pub async fn handle_event(event: Event, stats_repository: &Repository) { stats_repository.increase_tcp6_announces().await; } }, - Event::TcpScrape { connection } => match connection.client_ip_addr { + Event::TcpScrape { connection } => match connection.client_ip_addr() { IpAddr::V4(_) => { stats_repository.increase_tcp4_scrapes().await; } @@ -44,10 +44,11 @@ mod tests { handle_event( Event::TcpAnnounce { - connection: ConnectionContext { - client_ip_addr: IpAddr::V4(Ipv4Addr::new(127, 0, 0, 2)), - server_socket_addr: SocketAddr::new(IpAddr::V4(Ipv4Addr::new(127, 0, 0, 1)), 7070), - }, + connection: ConnectionContext::new( + IpAddr::V4(Ipv4Addr::new(127, 0, 0, 2)), + Some(8080), + SocketAddr::new(IpAddr::V4(Ipv4Addr::new(127, 0, 0, 1)), 7070), + ), }, &stats_repository, ) @@ -64,10 +65,11 @@ mod tests { handle_event( Event::TcpScrape { - connection: ConnectionContext { - client_ip_addr: IpAddr::V4(Ipv4Addr::new(127, 0, 0, 2)), - server_socket_addr: SocketAddr::new(IpAddr::V4(Ipv4Addr::new(127, 0, 0, 1)), 7070), - }, + connection: ConnectionContext::new( + IpAddr::V4(Ipv4Addr::new(127, 0, 0, 2)), + Some(8080), + SocketAddr::new(IpAddr::V4(Ipv4Addr::new(127, 0, 0, 1)), 7070), + ), }, &stats_repository, ) @@ -84,10 +86,11 @@ mod tests { handle_event( Event::TcpAnnounce { - connection: ConnectionContext { - client_ip_addr: IpAddr::V6(Ipv6Addr::new(0x6969, 0x6969, 0x6969, 0x6969, 0x6969, 0x6969, 0x6969, 0x6969)), - server_socket_addr: SocketAddr::new(IpAddr::V4(Ipv4Addr::new(127, 0, 0, 1)), 7070), - }, + connection: ConnectionContext::new( + IpAddr::V6(Ipv6Addr::new(0x6969, 0x6969, 0x6969, 0x6969, 0x6969, 0x6969, 0x6969, 0x6969)), + Some(8080), + SocketAddr::new(IpAddr::V4(Ipv4Addr::new(127, 0, 0, 1)), 7070), + ), }, &stats_repository, ) @@ -104,10 +107,11 @@ mod tests { handle_event( Event::TcpScrape { - connection: ConnectionContext { - client_ip_addr: IpAddr::V6(Ipv6Addr::new(0x6969, 0x6969, 0x6969, 0x6969, 0x6969, 0x6969, 0x6969, 0x6969)), - server_socket_addr: SocketAddr::new(IpAddr::V4(Ipv4Addr::new(127, 0, 0, 1)), 7070), - }, + connection: ConnectionContext::new( + IpAddr::V6(Ipv6Addr::new(0x6969, 0x6969, 0x6969, 0x6969, 0x6969, 0x6969, 0x6969, 0x6969)), + Some(8080), + SocketAddr::new(IpAddr::V4(Ipv4Addr::new(127, 0, 0, 1)), 7070), + ), }, &stats_repository, ) diff --git a/packages/http-tracker-core/src/statistics/event/mod.rs b/packages/http-tracker-core/src/statistics/event/mod.rs index a6e54ce83..7520e1a97 100644 --- a/packages/http-tracker-core/src/statistics/event/mod.rs +++ b/packages/http-tracker-core/src/statistics/event/mod.rs @@ -13,6 +13,39 @@ pub enum Event { #[derive(Debug, PartialEq, Eq)] pub struct ConnectionContext { - pub client_ip_addr: IpAddr, - pub server_socket_addr: SocketAddr, + client: ClientConnectionContext, + server: ServerConnectionContext, +} + +impl ConnectionContext { + #[must_use] + pub fn new(client_ip_addr: IpAddr, opt_client_port: Option, server_socket_addr: SocketAddr) -> Self { + Self { + client: ClientConnectionContext { + ip_addr: client_ip_addr, + port: opt_client_port, + }, + server: ServerConnectionContext { + socket_addr: server_socket_addr, + }, + } + } + + #[must_use] + pub fn client_ip_addr(&self) -> IpAddr { + self.client.ip_addr + } +} + +#[derive(Debug, PartialEq, Eq)] +pub struct ClientConnectionContext { + ip_addr: IpAddr, + + /// It's provided if you use the `torrust-axum-http-tracker-server` crate. + port: Option, +} + +#[derive(Debug, PartialEq, Eq)] +pub struct ServerConnectionContext { + socket_addr: SocketAddr, } diff --git a/packages/http-tracker-core/src/statistics/keeper.rs b/packages/http-tracker-core/src/statistics/keeper.rs index 6f84e27b1..783309eff 100644 --- a/packages/http-tracker-core/src/statistics/keeper.rs +++ b/packages/http-tracker-core/src/statistics/keeper.rs @@ -74,10 +74,11 @@ mod tests { let result = event_sender .send_event(Event::TcpAnnounce { - connection: ConnectionContext { - client_ip_addr: IpAddr::V4(Ipv4Addr::new(127, 0, 0, 2)), - server_socket_addr: SocketAddr::new(IpAddr::V4(Ipv4Addr::new(127, 0, 0, 1)), 7070), - }, + connection: ConnectionContext::new( + IpAddr::V4(Ipv4Addr::new(127, 0, 0, 2)), + Some(8080), + SocketAddr::new(IpAddr::V4(Ipv4Addr::new(127, 0, 0, 1)), 7070), + ), }) .await; From 1f30f8ef3d734051bc5f79d76200f0a5c786a737 Mon Sep 17 00:00:00 2001 From: Jose Celano Date: Mon, 17 Mar 2025 12:00:03 +0000 Subject: [PATCH 388/802] ci: update git hooks scripts To also run doctests. --- contrib/dev-tools/git/hooks/pre-commit.sh | 1 + contrib/dev-tools/git/hooks/pre-push.sh | 1 + 2 files changed, 2 insertions(+) diff --git a/contrib/dev-tools/git/hooks/pre-commit.sh b/contrib/dev-tools/git/hooks/pre-commit.sh index 37b80bb8a..c1b183fde 100755 --- a/contrib/dev-tools/git/hooks/pre-commit.sh +++ b/contrib/dev-tools/git/hooks/pre-commit.sh @@ -6,4 +6,5 @@ cargo +nightly fmt --check && cargo +nightly machete && cargo +stable build && CARGO_INCREMENTAL=0 cargo +stable clippy --no-deps --tests --benches --examples --workspace --all-targets --all-features -- -D clippy::correctness -D clippy::suspicious -D clippy::complexity -D clippy::perf -D clippy::style -D clippy::pedantic && + cargo +stable test --doc --workspace && cargo +stable test --tests --benches --examples --workspace --all-targets --all-features diff --git a/contrib/dev-tools/git/hooks/pre-push.sh b/contrib/dev-tools/git/hooks/pre-push.sh index c1a724156..593068cee 100755 --- a/contrib/dev-tools/git/hooks/pre-push.sh +++ b/contrib/dev-tools/git/hooks/pre-push.sh @@ -6,5 +6,6 @@ cargo +nightly fmt --check && cargo +nightly machete && cargo +stable build && CARGO_INCREMENTAL=0 cargo +stable clippy --no-deps --tests --benches --examples --workspace --all-targets --all-features -- -D clippy::correctness -D clippy::suspicious -D clippy::complexity -D clippy::perf -D clippy::style -D clippy::pedantic && + cargo +stable test --doc --workspace && cargo +stable test --tests --benches --examples --workspace --all-targets --all-features && cargo +stable run --bin e2e_tests_runner -- --config-toml-path "./share/default/config/tracker.e2e.container.sqlite3.toml" From 2be682e1779088432126e7a2c6ee39dd4f4b7094 Mon Sep 17 00:00:00 2001 From: Jose Celano Date: Mon, 17 Mar 2025 17:07:55 +0000 Subject: [PATCH 389/802] refactor: [#1380] refactor: [#1371] add connection context to UDP core events --- .../udp-tracker-core/benches/helpers/sync.rs | 6 +- .../udp-tracker-core/src/services/announce.rs | 22 ++- .../udp-tracker-core/src/services/connect.rs | 60 ++++++-- .../udp-tracker-core/src/services/scrape.rs | 20 ++- .../src/statistics/event/handler.rs | 139 ++++++++++++++---- .../src/statistics/event/mod.rs | 35 +++-- .../udp-tracker-core/src/statistics/keeper.rs | 13 +- .../src/handlers/announce.rs | 135 ++++++++++------- .../src/handlers/connect.rs | 38 ++++- .../udp-tracker-server/src/handlers/mod.rs | 22 ++- .../udp-tracker-server/src/handlers/scrape.rs | 70 +++++---- 11 files changed, 401 insertions(+), 159 deletions(-) diff --git a/packages/udp-tracker-core/benches/helpers/sync.rs b/packages/udp-tracker-core/benches/helpers/sync.rs index b7d8e848d..ca459c640 100644 --- a/packages/udp-tracker-core/benches/helpers/sync.rs +++ b/packages/udp-tracker-core/benches/helpers/sync.rs @@ -1,3 +1,4 @@ +use std::net::{IpAddr, Ipv4Addr, SocketAddr}; use std::sync::Arc; use std::time::{Duration, Instant}; @@ -8,13 +9,16 @@ use crate::helpers::utils::{sample_ipv4_remote_addr, sample_issue_time}; #[allow(clippy::unused_async)] pub async fn connect_once(samples: u64) -> Duration { + let client_socket_addr = sample_ipv4_remote_addr(); + let server_socket_addr = SocketAddr::new(IpAddr::V4(Ipv4Addr::new(203, 0, 113, 196)), 6969); + let (udp_core_stats_event_sender, _udp_core_stats_repository) = statistics::setup::factory(false); let udp_core_stats_event_sender = Arc::new(udp_core_stats_event_sender); let connect_service = Arc::new(ConnectService::new(udp_core_stats_event_sender)); let start = Instant::now(); for _ in 0..samples { - let _response = connect_service.handle_connect(sample_ipv4_remote_addr(), sample_issue_time()); + let _response = connect_service.handle_connect(client_socket_addr, server_socket_addr, sample_issue_time()); } start.elapsed() diff --git a/packages/udp-tracker-core/src/services/announce.rs b/packages/udp-tracker-core/src/services/announce.rs index 698f5fba6..22bc05a9e 100644 --- a/packages/udp-tracker-core/src/services/announce.rs +++ b/packages/udp-tracker-core/src/services/announce.rs @@ -21,6 +21,7 @@ use torrust_tracker_primitives::core::AnnounceData; use crate::connection_cookie::{check, gen_remote_fingerprint, ConnectionCookieError}; use crate::statistics; +use crate::statistics::event::ConnectionContext; /// The `AnnounceService` is responsible for handling the `announce` requests. /// @@ -57,17 +58,18 @@ impl AnnounceService { /// whitelist. pub async fn handle_announce( &self, - remote_addr: SocketAddr, + client_socket_addr: SocketAddr, + server_socket_addr: SocketAddr, request: &AnnounceRequest, cookie_valid_range: Range, ) -> Result { - Self::authenticate(remote_addr, request, cookie_valid_range)?; + Self::authenticate(client_socket_addr, request, cookie_valid_range)?; let info_hash = request.info_hash.into(); self.authorize(&info_hash).await?; - let remote_client_ip = remote_addr.ip(); + let remote_client_ip = client_socket_addr.ip(); let mut peer = peer_builder::from_request(request, &remote_client_ip); @@ -78,7 +80,7 @@ impl AnnounceService { .announce(&info_hash, &mut peer, &remote_client_ip, &peers_wanted) .await?; - self.send_stats_event(remote_client_ip).await; + self.send_stats_event(client_socket_addr, server_socket_addr).await; Ok(announce_data) } @@ -99,11 +101,15 @@ impl AnnounceService { self.whitelist_authorization.authorize(info_hash).await } - async fn send_stats_event(&self, peer_ip: IpAddr) { + async fn send_stats_event(&self, client_socket_addr: SocketAddr, server_socket_addr: SocketAddr) { if let Some(udp_stats_event_sender) = self.opt_udp_core_stats_event_sender.as_deref() { - let event = match peer_ip { - IpAddr::V4(_) => statistics::event::Event::Udp4Announce, - IpAddr::V6(_) => statistics::event::Event::Udp6Announce, + let event = match client_socket_addr.ip() { + IpAddr::V4(_) => statistics::event::Event::Udp4Announce { + context: ConnectionContext::new(client_socket_addr, server_socket_addr), + }, + IpAddr::V6(_) => statistics::event::Event::Udp6Announce { + context: ConnectionContext::new(client_socket_addr, server_socket_addr), + }, }; udp_stats_event_sender.send_event(event).await; diff --git a/packages/udp-tracker-core/src/services/connect.rs b/packages/udp-tracker-core/src/services/connect.rs index 14a3068e4..5309a79d3 100644 --- a/packages/udp-tracker-core/src/services/connect.rs +++ b/packages/udp-tracker-core/src/services/connect.rs @@ -8,6 +8,7 @@ use aquatic_udp_protocol::ConnectionId; use crate::connection_cookie::{gen_remote_fingerprint, make}; use crate::statistics; +use crate::statistics::event::ConnectionContext; /// The `ConnectService` is responsible for handling the `connect` requests. /// @@ -30,16 +31,30 @@ impl ConnectService { /// # Panics /// /// It will panic if there was an error making the connection cookie. - pub async fn handle_connect(&self, remote_addr: SocketAddr, cookie_issue_time: f64) -> ConnectionId { - let connection_id = make(gen_remote_fingerprint(&remote_addr), cookie_issue_time).expect("it should be a normal value"); + pub async fn handle_connect( + &self, + client_socket_addr: SocketAddr, + server_socket_addr: SocketAddr, + cookie_issue_time: f64, + ) -> ConnectionId { + let connection_id = + make(gen_remote_fingerprint(&client_socket_addr), cookie_issue_time).expect("it should be a normal value"); if let Some(udp_stats_event_sender) = self.opt_udp_core_stats_event_sender.as_deref() { - match remote_addr { + match client_socket_addr { SocketAddr::V4(_) => { - udp_stats_event_sender.send_event(statistics::event::Event::Udp4Connect).await; + udp_stats_event_sender + .send_event(statistics::event::Event::Udp4Connect { + context: ConnectionContext::new(client_socket_addr, server_socket_addr), + }) + .await; } SocketAddr::V6(_) => { - udp_stats_event_sender.send_event(statistics::event::Event::Udp6Connect).await; + udp_stats_event_sender + .send_event(statistics::event::Event::Udp6Connect { + context: ConnectionContext::new(client_socket_addr, server_socket_addr), + }) + .await; } } } @@ -54,6 +69,7 @@ mod tests { mod connect_request { use std::future; + use std::net::{IpAddr, Ipv4Addr, SocketAddr}; use std::sync::Arc; use mockall::predicate::eq; @@ -65,16 +81,19 @@ mod tests { sample_ipv6_remote_addr_fingerprint, sample_issue_time, MockUdpCoreStatsEventSender, }; use crate::statistics; + use crate::statistics::event::ConnectionContext; #[tokio::test] async fn a_connect_response_should_contain_the_same_transaction_id_as_the_connect_request() { + let server_socket_addr = SocketAddr::new(IpAddr::V4(Ipv4Addr::new(203, 0, 113, 196)), 6969); + let (udp_core_stats_event_sender, _udp_core_stats_repository) = statistics::setup::factory(false); let udp_core_stats_event_sender = Arc::new(udp_core_stats_event_sender); let connect_service = Arc::new(ConnectService::new(udp_core_stats_event_sender)); let response = connect_service - .handle_connect(sample_ipv4_remote_addr(), sample_issue_time()) + .handle_connect(sample_ipv4_remote_addr(), server_socket_addr, sample_issue_time()) .await; assert_eq!( @@ -85,13 +104,15 @@ mod tests { #[tokio::test] async fn a_connect_response_should_contain_a_new_connection_id() { + let server_socket_addr = SocketAddr::new(IpAddr::V4(Ipv4Addr::new(203, 0, 113, 196)), 6969); + let (udp_core_stats_event_sender, _udp_core_stats_repository) = statistics::setup::factory(false); let udp_core_stats_event_sender = Arc::new(udp_core_stats_event_sender); let connect_service = Arc::new(ConnectService::new(udp_core_stats_event_sender)); let response = connect_service - .handle_connect(sample_ipv4_remote_addr(), sample_issue_time()) + .handle_connect(sample_ipv4_remote_addr(), server_socket_addr, sample_issue_time()) .await; assert_eq!( @@ -102,13 +123,16 @@ mod tests { #[tokio::test] async fn a_connect_response_should_contain_a_new_connection_id_ipv6() { + let client_socket_addr = sample_ipv6_remote_addr(); + let server_socket_addr = SocketAddr::new(IpAddr::V4(Ipv4Addr::new(203, 0, 113, 196)), 6969); + let (udp_core_stats_event_sender, _udp_core_stats_repository) = statistics::setup::factory(false); let udp_core_stats_event_sender = Arc::new(udp_core_stats_event_sender); let connect_service = Arc::new(ConnectService::new(udp_core_stats_event_sender)); let response = connect_service - .handle_connect(sample_ipv6_remote_addr(), sample_issue_time()) + .handle_connect(client_socket_addr, server_socket_addr, sample_issue_time()) .await; assert_eq!( @@ -119,30 +143,38 @@ mod tests { #[tokio::test] async fn it_should_send_the_upd4_connect_event_when_a_client_tries_to_connect_using_a_ip4_socket_address() { + let client_socket_addr = sample_ipv4_socket_address(); + let server_socket_addr = SocketAddr::new(IpAddr::V4(Ipv4Addr::new(203, 0, 113, 196)), 6969); + let mut udp_stats_event_sender_mock = MockUdpCoreStatsEventSender::new(); udp_stats_event_sender_mock .expect_send_event() - .with(eq(statistics::event::Event::Udp4Connect)) + .with(eq(statistics::event::Event::Udp4Connect { + context: ConnectionContext::new(client_socket_addr, server_socket_addr), + })) .times(1) .returning(|_| Box::pin(future::ready(Some(Ok(()))))); let opt_udp_stats_event_sender: Arc>> = Arc::new(Some(Box::new(udp_stats_event_sender_mock))); - let client_socket_address = sample_ipv4_socket_address(); - let connect_service = Arc::new(ConnectService::new(opt_udp_stats_event_sender)); connect_service - .handle_connect(client_socket_address, sample_issue_time()) + .handle_connect(client_socket_addr, server_socket_addr, sample_issue_time()) .await; } #[tokio::test] async fn it_should_send_the_upd6_connect_event_when_a_client_tries_to_connect_using_a_ip6_socket_address() { + let client_socket_addr = sample_ipv6_remote_addr(); + let server_socket_addr = SocketAddr::new(IpAddr::V4(Ipv4Addr::new(203, 0, 113, 196)), 6969); + let mut udp_stats_event_sender_mock = MockUdpCoreStatsEventSender::new(); udp_stats_event_sender_mock .expect_send_event() - .with(eq(statistics::event::Event::Udp6Connect)) + .with(eq(statistics::event::Event::Udp6Connect { + context: ConnectionContext::new(client_socket_addr, server_socket_addr), + })) .times(1) .returning(|_| Box::pin(future::ready(Some(Ok(()))))); let opt_udp_stats_event_sender: Arc>> = @@ -151,7 +183,7 @@ mod tests { let connect_service = Arc::new(ConnectService::new(opt_udp_stats_event_sender)); connect_service - .handle_connect(sample_ipv6_remote_addr(), sample_issue_time()) + .handle_connect(client_socket_addr, server_socket_addr, sample_issue_time()) .await; } } diff --git a/packages/udp-tracker-core/src/services/scrape.rs b/packages/udp-tracker-core/src/services/scrape.rs index 61301cd43..0f1ab14d8 100644 --- a/packages/udp-tracker-core/src/services/scrape.rs +++ b/packages/udp-tracker-core/src/services/scrape.rs @@ -19,6 +19,7 @@ use torrust_tracker_primitives::core::ScrapeData; use crate::connection_cookie::{check, gen_remote_fingerprint, ConnectionCookieError}; use crate::statistics; +use crate::statistics::event::ConnectionContext; /// The `ScrapeService` is responsible for handling the `scrape` requests. /// @@ -49,18 +50,19 @@ impl ScrapeService { /// It will return an error if the tracker core scrape handler returns an error. pub async fn handle_scrape( &self, - remote_client_addr: SocketAddr, + client_socket_addr: SocketAddr, + server_socket_addr: SocketAddr, request: &ScrapeRequest, cookie_valid_range: Range, ) -> Result { - Self::authenticate(remote_client_addr, request, cookie_valid_range)?; + Self::authenticate(client_socket_addr, request, cookie_valid_range)?; let scrape_data = self .scrape_handler .scrape(&Self::convert_from_aquatic(&request.info_hashes)) .await?; - self.send_stats_event(remote_client_addr).await; + self.send_stats_event(client_socket_addr, server_socket_addr).await; Ok(scrape_data) } @@ -81,11 +83,15 @@ impl ScrapeService { aquatic_infohashes.iter().map(|&x| x.into()).collect() } - async fn send_stats_event(&self, remote_addr: SocketAddr) { + async fn send_stats_event(&self, client_socket_addr: SocketAddr, server_socket_addr: SocketAddr) { if let Some(udp_stats_event_sender) = self.opt_udp_stats_event_sender.as_deref() { - let event = match remote_addr { - SocketAddr::V4(_) => statistics::event::Event::Udp4Scrape, - SocketAddr::V6(_) => statistics::event::Event::Udp6Scrape, + let event = match client_socket_addr { + SocketAddr::V4(_) => statistics::event::Event::Udp4Scrape { + context: ConnectionContext::new(client_socket_addr, server_socket_addr), + }, + SocketAddr::V6(_) => statistics::event::Event::Udp6Scrape { + context: ConnectionContext::new(client_socket_addr, server_socket_addr), + }, }; udp_stats_event_sender.send_event(event).await; } diff --git a/packages/udp-tracker-core/src/statistics/event/handler.rs b/packages/udp-tracker-core/src/statistics/event/handler.rs index 096059b91..1f8a64a88 100644 --- a/packages/udp-tracker-core/src/statistics/event/handler.rs +++ b/packages/udp-tracker-core/src/statistics/event/handler.rs @@ -1,29 +1,62 @@ use crate::statistics::event::Event; use crate::statistics::repository::Repository; +/// # Panics +/// +/// This function panics if the IP version does not match the event type. pub async fn handle_event(event: Event, stats_repository: &Repository) { match event { // UDP4 - Event::Udp4Connect => { - stats_repository.increase_udp4_connections().await; - } - Event::Udp4Announce => { - stats_repository.increase_udp4_announces().await; - } - Event::Udp4Scrape => { - stats_repository.increase_udp4_scrapes().await; - } + Event::Udp4Connect { context } => match context.client_socket_addr.ip() { + std::net::IpAddr::V4(_) => { + stats_repository.increase_udp4_connections().await; + } + std::net::IpAddr::V6(_) => { + panic!("IP Version 6 does not match the event type for connect"); + } + }, + Event::Udp4Announce { context } => match context.client_socket_addr.ip() { + std::net::IpAddr::V4(_) => { + stats_repository.increase_udp4_announces().await; + } + std::net::IpAddr::V6(_) => { + panic!("IP Version 6 does not match the event type for announce"); + } + }, + Event::Udp4Scrape { context } => match context.client_socket_addr.ip() { + std::net::IpAddr::V4(_) => { + stats_repository.increase_udp4_scrapes().await; + } + std::net::IpAddr::V6(_) => { + panic!("IP Version 6 does not match the event type for scrape"); + } + }, // UDP6 - Event::Udp6Connect => { - stats_repository.increase_udp6_connections().await; - } - Event::Udp6Announce => { - stats_repository.increase_udp6_announces().await; - } - Event::Udp6Scrape => { - stats_repository.increase_udp6_scrapes().await; - } + Event::Udp6Connect { context } => match context.client_socket_addr.ip() { + std::net::IpAddr::V4(_) => { + panic!("IP Version 4 does not match the event type for connect"); + } + std::net::IpAddr::V6(_) => { + stats_repository.increase_udp6_connections().await; + } + }, + Event::Udp6Announce { context } => match context.client_socket_addr.ip() { + std::net::IpAddr::V4(_) => { + panic!("IP Version 4 does not match the event type for announce"); + } + std::net::IpAddr::V6(_) => { + stats_repository.increase_udp6_announces().await; + } + }, + Event::Udp6Scrape { context } => match context.client_socket_addr.ip() { + std::net::IpAddr::V4(_) => { + panic!("IP Version 4 does not match the event type for scrape"); + } + std::net::IpAddr::V6(_) => { + stats_repository.increase_udp6_scrapes().await; + } + }, } tracing::debug!("stats: {:?}", stats_repository.get_stats().await); @@ -31,15 +64,26 @@ pub async fn handle_event(event: Event, stats_repository: &Repository) { #[cfg(test)] mod tests { + use std::net::{IpAddr, Ipv4Addr, Ipv6Addr, SocketAddr}; + use crate::statistics::event::handler::handle_event; - use crate::statistics::event::Event; + use crate::statistics::event::{ConnectionContext, Event}; use crate::statistics::repository::Repository; #[tokio::test] async fn should_increase_the_udp4_connections_counter_when_it_receives_a_udp4_connect_event() { let stats_repository = Repository::new(); - handle_event(Event::Udp4Connect, &stats_repository).await; + handle_event( + Event::Udp4Connect { + context: ConnectionContext::new( + SocketAddr::new(IpAddr::V4(Ipv4Addr::new(203, 0, 113, 195)), 8080), + SocketAddr::new(IpAddr::V4(Ipv4Addr::new(203, 0, 113, 196)), 6969), + ), + }, + &stats_repository, + ) + .await; let stats = stats_repository.get_stats().await; @@ -50,7 +94,16 @@ mod tests { async fn should_increase_the_udp4_announces_counter_when_it_receives_a_udp4_announce_event() { let stats_repository = Repository::new(); - handle_event(Event::Udp4Announce, &stats_repository).await; + handle_event( + Event::Udp4Announce { + context: ConnectionContext::new( + SocketAddr::new(IpAddr::V4(Ipv4Addr::new(203, 0, 113, 195)), 8080), + SocketAddr::new(IpAddr::V4(Ipv4Addr::new(203, 0, 113, 196)), 6969), + ), + }, + &stats_repository, + ) + .await; let stats = stats_repository.get_stats().await; @@ -61,7 +114,16 @@ mod tests { async fn should_increase_the_udp4_scrapes_counter_when_it_receives_a_udp4_scrape_event() { let stats_repository = Repository::new(); - handle_event(Event::Udp4Scrape, &stats_repository).await; + handle_event( + Event::Udp4Scrape { + context: ConnectionContext::new( + SocketAddr::new(IpAddr::V4(Ipv4Addr::new(203, 0, 113, 195)), 8080), + SocketAddr::new(IpAddr::V4(Ipv4Addr::new(203, 0, 113, 196)), 6969), + ), + }, + &stats_repository, + ) + .await; let stats = stats_repository.get_stats().await; @@ -72,7 +134,16 @@ mod tests { async fn should_increase_the_udp6_connections_counter_when_it_receives_a_udp6_connect_event() { let stats_repository = Repository::new(); - handle_event(Event::Udp6Connect, &stats_repository).await; + handle_event( + Event::Udp6Connect { + context: ConnectionContext::new( + SocketAddr::new(IpAddr::V6(Ipv6Addr::new(0, 0, 0, 0, 203, 0, 113, 195)), 8080), + SocketAddr::new(IpAddr::V6(Ipv6Addr::new(0, 0, 0, 0, 203, 0, 113, 196)), 6969), + ), + }, + &stats_repository, + ) + .await; let stats = stats_repository.get_stats().await; @@ -83,7 +154,16 @@ mod tests { async fn should_increase_the_udp6_announces_counter_when_it_receives_a_udp6_announce_event() { let stats_repository = Repository::new(); - handle_event(Event::Udp6Announce, &stats_repository).await; + handle_event( + Event::Udp6Announce { + context: ConnectionContext::new( + SocketAddr::new(IpAddr::V6(Ipv6Addr::new(0, 0, 0, 0, 203, 0, 113, 195)), 8080), + SocketAddr::new(IpAddr::V6(Ipv6Addr::new(0, 0, 0, 0, 203, 0, 113, 196)), 6969), + ), + }, + &stats_repository, + ) + .await; let stats = stats_repository.get_stats().await; @@ -94,7 +174,16 @@ mod tests { async fn should_increase_the_udp6_scrapes_counter_when_it_receives_a_udp6_scrape_event() { let stats_repository = Repository::new(); - handle_event(Event::Udp6Scrape, &stats_repository).await; + handle_event( + Event::Udp6Scrape { + context: ConnectionContext::new( + SocketAddr::new(IpAddr::V6(Ipv6Addr::new(0, 0, 0, 0, 203, 0, 113, 195)), 8080), + SocketAddr::new(IpAddr::V6(Ipv6Addr::new(0, 0, 0, 0, 203, 0, 113, 196)), 6969), + ), + }, + &stats_repository, + ) + .await; let stats = stats_repository.get_stats().await; diff --git a/packages/udp-tracker-core/src/statistics/event/mod.rs b/packages/udp-tracker-core/src/statistics/event/mod.rs index bfc733657..f460f0113 100644 --- a/packages/udp-tracker-core/src/statistics/event/mod.rs +++ b/packages/udp-tracker-core/src/statistics/event/mod.rs @@ -1,23 +1,36 @@ +use std::net::SocketAddr; + pub mod handler; pub mod listener; pub mod sender; /// An statistics event. It is used to collect tracker metrics. /// -/// - `Tcp` prefix means the event was triggered by the HTTP tracker /// - `Udp` prefix means the event was triggered by the UDP tracker /// - `4` or `6` prefixes means the IP version used by the peer /// - Finally the event suffix is the type of request: `announce`, `scrape` or `connection` -/// -/// > NOTE: HTTP trackers do not use `connection` requests. #[derive(Debug, PartialEq, Eq)] pub enum Event { - // code-review: consider one single event for request type with data: Event::Announce { scheme: HTTPorUDP, ip_version: V4orV6 } - // Attributes are enums too. - Udp4Connect, - Udp4Announce, - Udp4Scrape, - Udp6Connect, - Udp6Announce, - Udp6Scrape, + Udp4Connect { context: ConnectionContext }, + Udp4Announce { context: ConnectionContext }, + Udp4Scrape { context: ConnectionContext }, + Udp6Connect { context: ConnectionContext }, + Udp6Announce { context: ConnectionContext }, + Udp6Scrape { context: ConnectionContext }, +} + +#[derive(Debug, PartialEq, Eq)] +pub struct ConnectionContext { + client_socket_addr: SocketAddr, + server_socket_addr: SocketAddr, +} + +impl ConnectionContext { + #[must_use] + pub fn new(client_socket_addr: SocketAddr, server_socket_addr: SocketAddr) -> Self { + Self { + client_socket_addr, + server_socket_addr, + } + } } diff --git a/packages/udp-tracker-core/src/statistics/keeper.rs b/packages/udp-tracker-core/src/statistics/keeper.rs index dac7e7541..9d0768e31 100644 --- a/packages/udp-tracker-core/src/statistics/keeper.rs +++ b/packages/udp-tracker-core/src/statistics/keeper.rs @@ -51,7 +51,9 @@ impl Keeper { #[cfg(test)] mod tests { - use crate::statistics::event::Event; + use std::net::{IpAddr, Ipv4Addr, SocketAddr}; + + use crate::statistics::event::{ConnectionContext, Event}; use crate::statistics::keeper::Keeper; use crate::statistics::metrics::Metrics; @@ -70,7 +72,14 @@ mod tests { let event_sender = stats_tracker.run_event_listener(); - let result = event_sender.send_event(Event::Udp4Connect).await; + let result = event_sender + .send_event(Event::Udp4Connect { + context: ConnectionContext::new( + SocketAddr::new(IpAddr::V4(Ipv4Addr::new(203, 0, 113, 195)), 8080), + SocketAddr::new(IpAddr::V4(Ipv4Addr::new(203, 0, 113, 196)), 6969), + ), + }) + .await; assert!(result.is_some()); } diff --git a/packages/udp-tracker-server/src/handlers/announce.rs b/packages/udp-tracker-server/src/handlers/announce.rs index e56e1d831..d18a81329 100644 --- a/packages/udp-tracker-server/src/handlers/announce.rs +++ b/packages/udp-tracker-server/src/handlers/announce.rs @@ -26,7 +26,8 @@ use crate::statistics::event::UdpResponseKind; #[instrument(fields(transaction_id, connection_id, info_hash), skip(announce_service, opt_udp_server_stats_event_sender), ret(level = Level::TRACE))] pub async fn handle_announce( announce_service: &Arc, - remote_addr: SocketAddr, + client_socket_addr: SocketAddr, + server_socket_addr: SocketAddr, request: &AnnounceRequest, core_config: &Arc, opt_udp_server_stats_event_sender: &Arc>>, @@ -40,7 +41,7 @@ pub async fn handle_announce( tracing::trace!("handle announce"); if let Some(udp_server_stats_event_sender) = opt_udp_server_stats_event_sender.as_deref() { - match remote_addr.ip() { + match client_socket_addr.ip() { IpAddr::V4(_) => { udp_server_stats_event_sender .send_event(server_statistics::event::Event::Udp4Request { @@ -59,11 +60,11 @@ pub async fn handle_announce( } let announce_data = announce_service - .handle_announce(remote_addr, request, cookie_valid_range) + .handle_announce(client_socket_addr, server_socket_addr, request, cookie_valid_range) .await .map_err(|e| (e.into(), request.transaction_id))?; - Ok(build_response(remote_addr, request, core_config, &announce_data)) + Ok(build_response(client_socket_addr, request, core_config, &announce_data)) } fn build_response( @@ -237,10 +238,11 @@ mod tests { let info_hash = AquaticInfoHash([0u8; 20]); let peer_id = AquaticPeerId([255u8; 20]); - let remote_addr = SocketAddr::new(IpAddr::V4(client_ip), client_port); + let client_socket_addr = SocketAddr::new(IpAddr::V4(client_ip), client_port); + let server_socket_addr = SocketAddr::new(IpAddr::V4(Ipv4Addr::new(203, 0, 113, 196)), 6969); let request = AnnounceRequestBuilder::default() - .with_connection_id(make(gen_remote_fingerprint(&remote_addr), sample_issue_time()).unwrap()) + .with_connection_id(make(gen_remote_fingerprint(&client_socket_addr), sample_issue_time()).unwrap()) .with_info_hash(info_hash) .with_peer_id(peer_id) .with_ip_address(client_ip) @@ -249,7 +251,8 @@ mod tests { handle_announce( &core_udp_tracker_services.announce_service, - remote_addr, + client_socket_addr, + server_socket_addr, &request, &core_tracker_services.core_config, &server_udp_tracker_services.udp_server_stats_event_sender, @@ -276,15 +279,17 @@ mod tests { let (core_tracker_services, core_udp_tracker_services, server_udp_tracker_services) = initialize_core_tracker_services_for_public_tracker(); - let remote_addr = SocketAddr::new(IpAddr::V4(Ipv4Addr::new(126, 0, 0, 1)), 8080); + let client_socket_addr = SocketAddr::new(IpAddr::V4(Ipv4Addr::new(126, 0, 0, 1)), 8080); + let server_socket_addr = SocketAddr::new(IpAddr::V4(Ipv4Addr::new(203, 0, 113, 196)), 6969); let request = AnnounceRequestBuilder::default() - .with_connection_id(make(gen_remote_fingerprint(&remote_addr), sample_issue_time()).unwrap()) + .with_connection_id(make(gen_remote_fingerprint(&client_socket_addr), sample_issue_time()).unwrap()) .into(); let response = handle_announce( &core_udp_tracker_services.announce_service, - remote_addr, + client_socket_addr, + server_socket_addr, &request, &core_tracker_services.core_config, &server_udp_tracker_services.udp_server_stats_event_sender, @@ -325,10 +330,11 @@ mod tests { let remote_client_port = 8081; let peer_address = Ipv4Addr::new(126, 0, 0, 2); - let remote_addr = SocketAddr::new(IpAddr::V4(remote_client_ip), remote_client_port); + let client_socket_addr = SocketAddr::new(IpAddr::V4(remote_client_ip), remote_client_port); + let server_socket_addr = SocketAddr::new(IpAddr::V4(Ipv4Addr::new(203, 0, 113, 196)), 6969); let request = AnnounceRequestBuilder::default() - .with_connection_id(make(gen_remote_fingerprint(&remote_addr), sample_issue_time()).unwrap()) + .with_connection_id(make(gen_remote_fingerprint(&client_socket_addr), sample_issue_time()).unwrap()) .with_info_hash(info_hash) .with_peer_id(peer_id) .with_ip_address(peer_address) @@ -337,7 +343,8 @@ mod tests { handle_announce( &core_udp_tracker_services.announce_service, - remote_addr, + client_socket_addr, + server_socket_addr, &request, &core_tracker_services.core_config, &server_udp_tracker_services.udp_server_stats_event_sender, @@ -381,14 +388,17 @@ mod tests { let (udp_server_stats_event_sender, _udp_server_stats_repository) = crate::statistics::setup::factory(false); let udp_server_stats_event_sender = Arc::new(udp_server_stats_event_sender); - let remote_addr = SocketAddr::new(IpAddr::V4(Ipv4Addr::new(126, 0, 0, 1)), 8080); + let client_socket_addr = SocketAddr::new(IpAddr::V4(Ipv4Addr::new(126, 0, 0, 1)), 8080); + let server_socket_addr = SocketAddr::new(IpAddr::V4(Ipv4Addr::new(203, 0, 113, 196)), 6969); + let request = AnnounceRequestBuilder::default() - .with_connection_id(make(gen_remote_fingerprint(&remote_addr), sample_issue_time()).unwrap()) + .with_connection_id(make(gen_remote_fingerprint(&client_socket_addr), sample_issue_time()).unwrap()) .into(); handle_announce( &core_udp_tracker_services.announce_service, - remote_addr, + client_socket_addr, + server_socket_addr, &request, &core_tracker_services.core_config, &udp_server_stats_event_sender, @@ -433,9 +443,13 @@ mod tests { let (core_tracker_services, core_udp_tracker_services, _server_udp_tracker_services) = initialize_core_tracker_services_for_default_tracker_configuration(); + let client_socket_addr = sample_ipv4_socket_address(); + let server_socket_addr = SocketAddr::new(IpAddr::V4(Ipv4Addr::new(203, 0, 113, 196)), 6969); + handle_announce( &core_udp_tracker_services.announce_service, - sample_ipv4_socket_address(), + client_socket_addr, + server_socket_addr, &AnnounceRequestBuilder::default().into(), &core_tracker_services.core_config, &udp_server_stats_event_sender, @@ -469,10 +483,11 @@ mod tests { let info_hash = AquaticInfoHash([0u8; 20]); let peer_id = AquaticPeerId([255u8; 20]); - let remote_addr = SocketAddr::new(IpAddr::V4(client_ip), client_port); + let client_socket_addr = SocketAddr::new(IpAddr::V4(client_ip), client_port); + let server_socket_addr = SocketAddr::new(IpAddr::V4(Ipv4Addr::new(203, 0, 113, 196)), 6969); let request = AnnounceRequestBuilder::default() - .with_connection_id(make(gen_remote_fingerprint(&remote_addr), sample_issue_time()).unwrap()) + .with_connection_id(make(gen_remote_fingerprint(&client_socket_addr), sample_issue_time()).unwrap()) .with_info_hash(info_hash) .with_peer_id(peer_id) .with_ip_address(client_ip) @@ -481,7 +496,8 @@ mod tests { handle_announce( &core_udp_tracker_services.announce_service, - remote_addr, + client_socket_addr, + server_socket_addr, &request, &core_tracker_services.core_config, &server_udp_tracker_services.udp_server_stats_event_sender, @@ -510,7 +526,7 @@ mod tests { mod using_ipv6 { use std::future; - use std::net::{IpAddr, Ipv4Addr, SocketAddr}; + use std::net::{IpAddr, Ipv4Addr, Ipv6Addr, SocketAddr}; use std::sync::Arc; use aquatic_udp_protocol::{ @@ -546,10 +562,11 @@ mod tests { let info_hash = AquaticInfoHash([0u8; 20]); let peer_id = AquaticPeerId([255u8; 20]); - let remote_addr = SocketAddr::new(IpAddr::V6(client_ip_v6), client_port); + let client_socket_addr = SocketAddr::new(IpAddr::V6(client_ip_v6), client_port); + let server_socket_addr = SocketAddr::new(IpAddr::V6(Ipv6Addr::new(0, 0, 0, 0, 203, 0, 113, 196)), 6969); let request = AnnounceRequestBuilder::default() - .with_connection_id(make(gen_remote_fingerprint(&remote_addr), sample_issue_time()).unwrap()) + .with_connection_id(make(gen_remote_fingerprint(&client_socket_addr), sample_issue_time()).unwrap()) .with_info_hash(info_hash) .with_peer_id(peer_id) .with_ip_address(client_ip_v4) @@ -558,7 +575,8 @@ mod tests { handle_announce( &core_udp_tracker_services.announce_service, - remote_addr, + client_socket_addr, + server_socket_addr, &request, &core_tracker_services.core_config, &server_udp_tracker_services.udp_server_stats_event_sender, @@ -588,15 +606,17 @@ mod tests { let client_ip_v4 = Ipv4Addr::new(126, 0, 0, 1); let client_ip_v6 = client_ip_v4.to_ipv6_compatible(); - let remote_addr = SocketAddr::new(IpAddr::V6(client_ip_v6), 8080); + let client_socket_addr = SocketAddr::new(IpAddr::V6(client_ip_v6), 8080); + let server_socket_addr = SocketAddr::new(IpAddr::V6(Ipv6Addr::new(0, 0, 0, 0, 203, 0, 113, 196)), 6969); let request = AnnounceRequestBuilder::default() - .with_connection_id(make(gen_remote_fingerprint(&remote_addr), sample_issue_time()).unwrap()) + .with_connection_id(make(gen_remote_fingerprint(&client_socket_addr), sample_issue_time()).unwrap()) .into(); let response = handle_announce( &core_udp_tracker_services.announce_service, - remote_addr, + client_socket_addr, + server_socket_addr, &request, &core_tracker_services.core_config, &server_udp_tracker_services.udp_server_stats_event_sender, @@ -637,10 +657,11 @@ mod tests { let remote_client_port = 8081; let peer_address = "126.0.0.1".parse().unwrap(); - let remote_addr = SocketAddr::new(IpAddr::V6(remote_client_ip), remote_client_port); + let client_socket_addr = SocketAddr::new(IpAddr::V6(remote_client_ip), remote_client_port); + let server_socket_addr = SocketAddr::new(IpAddr::V6(Ipv6Addr::new(0, 0, 0, 0, 203, 0, 113, 196)), 6969); let request = AnnounceRequestBuilder::default() - .with_connection_id(make(gen_remote_fingerprint(&remote_addr), sample_issue_time()).unwrap()) + .with_connection_id(make(gen_remote_fingerprint(&client_socket_addr), sample_issue_time()).unwrap()) .with_info_hash(info_hash) .with_peer_id(peer_id) .with_ip_address(peer_address) @@ -649,7 +670,8 @@ mod tests { handle_announce( &core_udp_tracker_services.announce_service, - remote_addr, + client_socket_addr, + server_socket_addr, &request, &core_tracker_services.core_config, &server_udp_tracker_service.udp_server_stats_event_sender, @@ -697,9 +719,12 @@ mod tests { let client_ip_v4 = Ipv4Addr::new(126, 0, 0, 1); let client_ip_v6 = client_ip_v4.to_ipv6_compatible(); let client_port = 8080; - let remote_addr = SocketAddr::new(IpAddr::V6(client_ip_v6), client_port); + + let client_socket_addr = SocketAddr::new(IpAddr::V6(client_ip_v6), client_port); + let server_socket_addr = SocketAddr::new(IpAddr::V6(Ipv6Addr::new(0, 0, 0, 0, 203, 0, 113, 196)), 6969); + let request = AnnounceRequestBuilder::default() - .with_connection_id(make(gen_remote_fingerprint(&remote_addr), sample_issue_time()).unwrap()) + .with_connection_id(make(gen_remote_fingerprint(&client_socket_addr), sample_issue_time()).unwrap()) .into(); let announce_service = Arc::new(AnnounceService::new( @@ -710,7 +735,8 @@ mod tests { handle_announce( &announce_service, - remote_addr, + client_socket_addr, + server_socket_addr, &request, &core_config, &udp_server_stats_event_sender, @@ -759,15 +785,17 @@ mod tests { let (core_tracker_services, core_udp_tracker_services, _server_udp_tracker_services) = initialize_core_tracker_services_for_default_tracker_configuration(); - let remote_addr = sample_ipv6_remote_addr(); + let client_socket_addr = sample_ipv6_remote_addr(); + let server_socket_addr = SocketAddr::new(IpAddr::V6(Ipv6Addr::new(0, 0, 0, 0, 203, 0, 113, 196)), 6969); let announce_request = AnnounceRequestBuilder::default() - .with_connection_id(make(gen_remote_fingerprint(&remote_addr), sample_issue_time()).unwrap()) + .with_connection_id(make(gen_remote_fingerprint(&client_socket_addr), sample_issue_time()).unwrap()) .into(); handle_announce( &core_udp_tracker_services.announce_service, - remote_addr, + client_socket_addr, + server_socket_addr, &announce_request, &core_tracker_services.core_config, &udp_server_stats_event_sender, @@ -791,6 +819,7 @@ mod tests { use bittorrent_tracker_core::whitelist::repository::in_memory::InMemoryWhitelist; use bittorrent_udp_tracker_core::connection_cookie::{gen_remote_fingerprint, make}; use bittorrent_udp_tracker_core::services::announce::AnnounceService; + use bittorrent_udp_tracker_core::statistics::event::ConnectionContext; use bittorrent_udp_tracker_core::{self, statistics as core_statistics}; use mockall::predicate::eq; @@ -807,6 +836,19 @@ mod tests { async fn the_peer_ip_should_be_changed_to_the_external_ip_in_the_tracker_configuration() { let config = Arc::new(TrackerConfigurationBuilder::default().with_external_ip("::126.0.0.1").into()); + let loopback_ipv4 = Ipv4Addr::new(127, 0, 0, 1); + let loopback_ipv6 = Ipv6Addr::new(0, 0, 0, 0, 0, 0, 0, 1); + + let client_ip_v4 = loopback_ipv4; + let client_ip_v6 = loopback_ipv6; + let client_port = 8080; + + let info_hash = AquaticInfoHash([0u8; 20]); + let peer_id = AquaticPeerId([255u8; 20]); + + let client_socket_addr = SocketAddr::new(IpAddr::V6(client_ip_v6), client_port); + let server_socket_addr = config.udp_trackers.clone().unwrap()[0].bind_address; + let database = initialize_database(&config.core); let in_memory_whitelist = Arc::new(InMemoryWhitelist::default()); let whitelist_authorization = @@ -817,7 +859,9 @@ mod tests { let mut udp_core_stats_event_sender_mock = MockUdpCoreStatsEventSender::new(); udp_core_stats_event_sender_mock .expect_send_event() - .with(eq(core_statistics::event::Event::Udp6Announce)) + .with(eq(core_statistics::event::Event::Udp6Announce { + context: ConnectionContext::new(client_socket_addr, server_socket_addr), + })) .times(1) .returning(|_| Box::pin(future::ready(Some(Ok(()))))); let udp_core_stats_event_sender: Arc>> = @@ -841,20 +885,8 @@ mod tests { &db_torrent_repository, )); - let loopback_ipv4 = Ipv4Addr::new(127, 0, 0, 1); - let loopback_ipv6 = Ipv6Addr::new(0, 0, 0, 0, 0, 0, 0, 1); - - let client_ip_v4 = loopback_ipv4; - let client_ip_v6 = loopback_ipv6; - let client_port = 8080; - - let info_hash = AquaticInfoHash([0u8; 20]); - let peer_id = AquaticPeerId([255u8; 20]); - - let remote_addr = SocketAddr::new(IpAddr::V6(client_ip_v6), client_port); - let request = AnnounceRequestBuilder::default() - .with_connection_id(make(gen_remote_fingerprint(&remote_addr), sample_issue_time()).unwrap()) + .with_connection_id(make(gen_remote_fingerprint(&client_socket_addr), sample_issue_time()).unwrap()) .with_info_hash(info_hash) .with_peer_id(peer_id) .with_ip_address(client_ip_v4) @@ -871,7 +903,8 @@ mod tests { handle_announce( &announce_service, - remote_addr, + client_socket_addr, + server_socket_addr, &request, &core_config, &udp_server_stats_event_sender, diff --git a/packages/udp-tracker-server/src/handlers/connect.rs b/packages/udp-tracker-server/src/handlers/connect.rs index 93d3bb6f1..e3070264d 100644 --- a/packages/udp-tracker-server/src/handlers/connect.rs +++ b/packages/udp-tracker-server/src/handlers/connect.rs @@ -13,6 +13,7 @@ use crate::statistics::event::UdpResponseKind; #[instrument(fields(transaction_id), skip(connect_service, opt_udp_server_stats_event_sender), ret(level = Level::TRACE))] pub async fn handle_connect( remote_addr: SocketAddr, + server_addr: SocketAddr, request: &ConnectRequest, connect_service: &Arc, opt_udp_server_stats_event_sender: &Arc>>, @@ -40,7 +41,9 @@ pub async fn handle_connect( } } - let connection_id = connect_service.handle_connect(remote_addr, cookie_issue_time).await; + let connection_id = connect_service + .handle_connect(remote_addr, server_addr, cookie_issue_time) + .await; build_response(*request, connection_id) } @@ -60,12 +63,14 @@ mod tests { mod connect_request { use std::future; + use std::net::{IpAddr, Ipv4Addr, SocketAddr}; use std::sync::Arc; use aquatic_udp_protocol::{ConnectRequest, ConnectResponse, Response, TransactionId}; use bittorrent_udp_tracker_core::connection_cookie::make; use bittorrent_udp_tracker_core::services::connect::ConnectService; use bittorrent_udp_tracker_core::statistics as core_statistics; + use bittorrent_udp_tracker_core::statistics::event::ConnectionContext; use mockall::predicate::eq; use crate::handlers::handle_connect; @@ -84,6 +89,8 @@ mod tests { #[tokio::test] async fn a_connect_response_should_contain_the_same_transaction_id_as_the_connect_request() { + let server_socket_addr = SocketAddr::new(IpAddr::V4(Ipv4Addr::new(203, 0, 113, 196)), 6969); + let (udp_core_stats_event_sender, _udp_core_stats_repository) = bittorrent_udp_tracker_core::statistics::setup::factory(false); let udp_core_stats_event_sender = Arc::new(udp_core_stats_event_sender); @@ -99,6 +106,7 @@ mod tests { let response = handle_connect( sample_ipv4_remote_addr(), + server_socket_addr, &request, &connect_service, &udp_server_stats_event_sender, @@ -117,6 +125,8 @@ mod tests { #[tokio::test] async fn a_connect_response_should_contain_a_new_connection_id() { + let server_socket_addr = SocketAddr::new(IpAddr::V4(Ipv4Addr::new(203, 0, 113, 196)), 6969); + let (udp_core_stats_event_sender, _udp_core_stats_repository) = bittorrent_udp_tracker_core::statistics::setup::factory(false); let udp_core_stats_event_sender = Arc::new(udp_core_stats_event_sender); @@ -132,6 +142,7 @@ mod tests { let response = handle_connect( sample_ipv4_remote_addr(), + server_socket_addr, &request, &connect_service, &udp_server_stats_event_sender, @@ -150,6 +161,8 @@ mod tests { #[tokio::test] async fn a_connect_response_should_contain_a_new_connection_id_ipv6() { + let server_socket_addr = SocketAddr::new(IpAddr::V4(Ipv4Addr::new(203, 0, 113, 196)), 6969); + let (udp_core_stats_event_sender, _udp_core_stats_repository) = bittorrent_udp_tracker_core::statistics::setup::factory(false); let udp_core_stats_event_sender = Arc::new(udp_core_stats_event_sender); @@ -165,6 +178,7 @@ mod tests { let response = handle_connect( sample_ipv6_remote_addr(), + server_socket_addr, &request, &connect_service, &udp_server_stats_event_sender, @@ -183,10 +197,15 @@ mod tests { #[tokio::test] async fn it_should_send_the_upd4_connect_event_when_a_client_tries_to_connect_using_a_ip4_socket_address() { + let client_socket_addr = sample_ipv4_socket_address(); + let server_socket_addr = SocketAddr::new(IpAddr::V4(Ipv4Addr::new(203, 0, 113, 196)), 6969); + let mut udp_core_stats_event_sender_mock = MockUdpCoreStatsEventSender::new(); udp_core_stats_event_sender_mock .expect_send_event() - .with(eq(core_statistics::event::Event::Udp4Connect)) + .with(eq(core_statistics::event::Event::Udp4Connect { + context: core_statistics::event::ConnectionContext::new(client_socket_addr, server_socket_addr), + })) .times(1) .returning(|_| Box::pin(future::ready(Some(Ok(()))))); let udp_core_stats_event_sender: Arc>> = @@ -203,12 +222,11 @@ mod tests { let udp_server_stats_event_sender: Arc>> = Arc::new(Some(Box::new(udp_server_stats_event_sender_mock))); - let client_socket_address = sample_ipv4_socket_address(); - let connect_service = Arc::new(ConnectService::new(udp_core_stats_event_sender)); handle_connect( - client_socket_address, + client_socket_addr, + server_socket_addr, &sample_connect_request(), &connect_service, &udp_server_stats_event_sender, @@ -219,10 +237,15 @@ mod tests { #[tokio::test] async fn it_should_send_the_upd6_connect_event_when_a_client_tries_to_connect_using_a_ip6_socket_address() { + let client_socket_addr = sample_ipv6_remote_addr(); + let server_socket_addr = SocketAddr::new(IpAddr::V4(Ipv4Addr::new(203, 0, 113, 196)), 6969); + let mut udp_core_stats_event_sender_mock = MockUdpCoreStatsEventSender::new(); udp_core_stats_event_sender_mock .expect_send_event() - .with(eq(core_statistics::event::Event::Udp6Connect)) + .with(eq(core_statistics::event::Event::Udp6Connect { + context: ConnectionContext::new(client_socket_addr, server_socket_addr), + })) .times(1) .returning(|_| Box::pin(future::ready(Some(Ok(()))))); let udp_core_stats_event_sender: Arc>> = @@ -242,7 +265,8 @@ mod tests { let connect_service = Arc::new(ConnectService::new(udp_core_stats_event_sender)); handle_connect( - sample_ipv6_remote_addr(), + client_socket_addr, + server_socket_addr, &sample_connect_request(), &connect_service, &udp_server_stats_event_sender, diff --git a/packages/udp-tracker-server/src/handlers/mod.rs b/packages/udp-tracker-server/src/handlers/mod.rs index 165b307e0..162af3020 100644 --- a/packages/udp-tracker-server/src/handlers/mod.rs +++ b/packages/udp-tracker-server/src/handlers/mod.rs @@ -58,7 +58,7 @@ pub(crate) async fn handle_packet( udp_request: RawRequest, udp_tracker_core_container: Arc, udp_tracker_server_container: Arc, - local_addr: SocketAddr, + server_socket_addr: SocketAddr, cookie_time_values: CookieTimeValues, ) -> Response { let request_id = Uuid::new_v4(); @@ -73,6 +73,7 @@ pub(crate) async fn handle_packet( Ok(request) => match handle_request( request, udp_request.from, + server_socket_addr, udp_tracker_core_container.clone(), udp_tracker_server_container.clone(), cookie_time_values.clone(), @@ -92,7 +93,7 @@ pub(crate) async fn handle_packet( handle_error( udp_request.from, - local_addr, + server_socket_addr, request_id, &udp_tracker_server_container.udp_server_stats_event_sender, cookie_time_values.valid_range.clone(), @@ -105,7 +106,7 @@ pub(crate) async fn handle_packet( Err(e) => { handle_error( udp_request.from, - local_addr, + server_socket_addr, request_id, &udp_tracker_server_container.udp_server_stats_event_sender, cookie_time_values.valid_range.clone(), @@ -129,14 +130,16 @@ pub(crate) async fn handle_packet( /// If a error happens in the `handle_request` function, it will just return the `ServerError`. #[instrument(skip( request, - remote_addr, + client_socket_addr, + server_socket_addr, udp_tracker_core_container, udp_tracker_server_container, cookie_time_values ))] pub async fn handle_request( request: Request, - remote_addr: SocketAddr, + client_socket_addr: SocketAddr, + server_socket_addr: SocketAddr, udp_tracker_core_container: Arc, udp_tracker_server_container: Arc, cookie_time_values: CookieTimeValues, @@ -145,7 +148,8 @@ pub async fn handle_request( match request { Request::Connect(connect_request) => Ok(handle_connect( - remote_addr, + client_socket_addr, + server_socket_addr, &connect_request, &udp_tracker_core_container.connect_service, &udp_tracker_server_container.udp_server_stats_event_sender, @@ -155,7 +159,8 @@ pub async fn handle_request( Request::Announce(announce_request) => { handle_announce( &udp_tracker_core_container.announce_service, - remote_addr, + client_socket_addr, + server_socket_addr, &announce_request, &udp_tracker_core_container.core_config, &udp_tracker_server_container.udp_server_stats_event_sender, @@ -166,7 +171,8 @@ pub async fn handle_request( Request::Scrape(scrape_request) => { handle_scrape( &udp_tracker_core_container.scrape_service, - remote_addr, + client_socket_addr, + server_socket_addr, &scrape_request, &udp_tracker_server_container.udp_server_stats_event_sender, cookie_time_values.valid_range, diff --git a/packages/udp-tracker-server/src/handlers/scrape.rs b/packages/udp-tracker-server/src/handlers/scrape.rs index c385718a2..e820b2e96 100644 --- a/packages/udp-tracker-server/src/handlers/scrape.rs +++ b/packages/udp-tracker-server/src/handlers/scrape.rs @@ -24,7 +24,8 @@ use crate::statistics::event::UdpResponseKind; #[instrument(fields(transaction_id, connection_id), skip(scrape_service, opt_udp_server_stats_event_sender), ret(level = Level::TRACE))] pub async fn handle_scrape( scrape_service: &Arc, - remote_addr: SocketAddr, + client_socket_addr: SocketAddr, + server_socket_addr: SocketAddr, request: &ScrapeRequest, opt_udp_server_stats_event_sender: &Arc>>, cookie_valid_range: Range, @@ -36,7 +37,7 @@ pub async fn handle_scrape( tracing::trace!("handle scrape"); if let Some(udp_server_stats_event_sender) = opt_udp_server_stats_event_sender.as_deref() { - match remote_addr.ip() { + match client_socket_addr.ip() { IpAddr::V4(_) => { udp_server_stats_event_sender .send_event(server_statistics::event::Event::Udp4Request { @@ -55,7 +56,7 @@ pub async fn handle_scrape( } let scrape_data = scrape_service - .handle_scrape(remote_addr, request, cookie_valid_range) + .handle_scrape(client_socket_addr, server_socket_addr, request, cookie_valid_range) .await .map_err(|e| (e.into(), request.transaction_id))?; @@ -92,7 +93,7 @@ fn build_response(request: &ScrapeRequest, scrape_data: &ScrapeData) -> Response mod tests { mod scrape_request { - use std::net::SocketAddr; + use std::net::{IpAddr, Ipv4Addr, SocketAddr}; use std::sync::Arc; use aquatic_udp_protocol::{ @@ -121,20 +122,22 @@ mod tests { let (_core_tracker_services, core_udp_tracker_services, server_udp_tracker_services) = initialize_core_tracker_services_for_public_tracker(); - let remote_addr = sample_ipv4_remote_addr(); + let client_socket_addr = sample_ipv4_remote_addr(); + let server_socket_addr = SocketAddr::new(IpAddr::V4(Ipv4Addr::new(203, 0, 113, 196)), 6969); let info_hash = InfoHash([0u8; 20]); let info_hashes = vec![info_hash]; let request = ScrapeRequest { - connection_id: make(gen_remote_fingerprint(&remote_addr), sample_issue_time()).unwrap(), + connection_id: make(gen_remote_fingerprint(&client_socket_addr), sample_issue_time()).unwrap(), transaction_id: TransactionId(0i32.into()), info_hashes, }; let response = handle_scrape( &core_udp_tracker_services.scrape_service, - remote_addr, + client_socket_addr, + server_socket_addr, &request, &server_udp_tracker_services.udp_server_stats_event_sender, sample_cookie_valid_range(), @@ -186,21 +189,24 @@ mod tests { let (udp_server_stats_event_sender, _udp_server_stats_repository) = crate::statistics::setup::factory(false); let udp_server_stats_event_sender = Arc::new(udp_server_stats_event_sender); - let remote_addr = sample_ipv4_remote_addr(); + let client_socket_addr = sample_ipv4_remote_addr(); + let server_socket_addr = SocketAddr::new(IpAddr::V4(Ipv4Addr::new(203, 0, 113, 196)), 6969); + let info_hash = InfoHash([0u8; 20]); add_a_seeder( core_tracker_services.in_memory_torrent_repository.clone(), - &remote_addr, + &client_socket_addr, &info_hash, ) .await; - let request = build_scrape_request(&remote_addr, &info_hash); + let request = build_scrape_request(&client_socket_addr, &info_hash); handle_scrape( &core_udp_tracker_services.scrape_service, - remote_addr, + client_socket_addr, + server_socket_addr, &request, &udp_server_stats_event_sender, sample_cookie_valid_range(), @@ -242,6 +248,8 @@ mod tests { } mod with_a_whitelisted_tracker { + use std::net::{IpAddr, Ipv4Addr, SocketAddr}; + use aquatic_udp_protocol::{InfoHash, NumberOfDownloads, NumberOfPeers, TorrentScrapeStatistics}; use crate::handlers::handle_scrape; @@ -257,24 +265,27 @@ mod tests { let (core_tracker_services, core_udp_tracker_services, server_udp_tracker_services) = initialize_core_tracker_services_for_listed_tracker(); - let remote_addr = sample_ipv4_remote_addr(); + let client_socket_addr = sample_ipv4_remote_addr(); + let server_socket_addr = SocketAddr::new(IpAddr::V4(Ipv4Addr::new(203, 0, 113, 196)), 6969); + let info_hash = InfoHash([0u8; 20]); add_a_seeder( core_tracker_services.in_memory_torrent_repository.clone(), - &remote_addr, + &client_socket_addr, &info_hash, ) .await; core_tracker_services.in_memory_whitelist.add(&info_hash.0.into()).await; - let request = build_scrape_request(&remote_addr, &info_hash); + let request = build_scrape_request(&client_socket_addr, &info_hash); let torrent_stats = match_scrape_response( handle_scrape( &core_udp_tracker_services.scrape_service, - remote_addr, + client_socket_addr, + server_socket_addr, &request, &server_udp_tracker_services.udp_server_stats_event_sender, sample_cookie_valid_range(), @@ -298,22 +309,25 @@ mod tests { let (core_tracker_services, core_udp_tracker_services, server_udp_tracker_services) = initialize_core_tracker_services_for_listed_tracker(); - let remote_addr = sample_ipv4_remote_addr(); + let client_socket_addr = sample_ipv4_remote_addr(); + let server_socket_addr = SocketAddr::new(IpAddr::V4(Ipv4Addr::new(203, 0, 113, 196)), 6969); + let info_hash = InfoHash([0u8; 20]); add_a_seeder( core_tracker_services.in_memory_torrent_repository.clone(), - &remote_addr, + &client_socket_addr, &info_hash, ) .await; - let request = build_scrape_request(&remote_addr, &info_hash); + let request = build_scrape_request(&client_socket_addr, &info_hash); let torrent_stats = match_scrape_response( handle_scrape( &core_udp_tracker_services.scrape_service, - remote_addr, + client_socket_addr, + server_socket_addr, &request, &server_udp_tracker_services.udp_server_stats_event_sender, sample_cookie_valid_range(), @@ -342,6 +356,7 @@ mod tests { mod using_ipv4 { use std::future; + use std::net::{IpAddr, Ipv6Addr, SocketAddr}; use std::sync::Arc; use mockall::predicate::eq; @@ -367,15 +382,17 @@ mod tests { let udp_server_stats_event_sender: Arc>> = Arc::new(Some(Box::new(udp_server_stats_event_sender_mock))); - let remote_addr = sample_ipv4_remote_addr(); + let client_socket_addr = sample_ipv4_remote_addr(); + let server_socket_addr = SocketAddr::new(IpAddr::V6(Ipv6Addr::new(0, 0, 0, 0, 203, 0, 113, 196)), 6969); let (_core_tracker_services, core_udp_tracker_services, _server_udp_tracker_services) = initialize_core_tracker_services_for_default_tracker_configuration(); handle_scrape( &core_udp_tracker_services.scrape_service, - remote_addr, - &sample_scrape_request(&remote_addr), + client_socket_addr, + server_socket_addr, + &sample_scrape_request(&client_socket_addr), &udp_server_stats_event_sender, sample_cookie_valid_range(), ) @@ -386,6 +403,7 @@ mod tests { mod using_ipv6 { use std::future; + use std::net::{IpAddr, Ipv6Addr, SocketAddr}; use std::sync::Arc; use mockall::predicate::eq; @@ -411,15 +429,17 @@ mod tests { let udp_server_stats_event_sender: Arc>> = Arc::new(Some(Box::new(udp_server_stats_event_sender_mock))); - let remote_addr = sample_ipv6_remote_addr(); + let client_socket_addr = sample_ipv6_remote_addr(); + let server_socket_addr = SocketAddr::new(IpAddr::V6(Ipv6Addr::new(0, 0, 0, 0, 203, 0, 113, 196)), 6969); let (_core_tracker_services, core_udp_tracker_services, _server_udp_tracker_services) = initialize_core_tracker_services_for_default_tracker_configuration(); handle_scrape( &core_udp_tracker_services.scrape_service, - remote_addr, - &sample_scrape_request(&remote_addr), + client_socket_addr, + server_socket_addr, + &sample_scrape_request(&client_socket_addr), &udp_server_stats_event_sender, sample_cookie_valid_range(), ) From 8603f8b871bb10c3d86449d9ff471d1af5d26c92 Mon Sep 17 00:00:00 2001 From: Jose Celano Date: Mon, 17 Mar 2025 17:25:41 +0000 Subject: [PATCH 390/802] refactor: [#1380] refactor: [#1373] merge UDP stats events with different IP version --- .../udp-tracker-core/src/services/announce.rs | 15 ++---- .../udp-tracker-core/src/services/connect.rs | 25 +++------- .../udp-tracker-core/src/services/scrape.rs | 12 ++--- .../src/statistics/event/handler.rs | 49 +++++-------------- .../src/statistics/event/mod.rs | 22 +++++---- .../udp-tracker-core/src/statistics/keeper.rs | 2 +- .../src/handlers/announce.rs | 2 +- .../src/handlers/connect.rs | 4 +- 8 files changed, 44 insertions(+), 87 deletions(-) diff --git a/packages/udp-tracker-core/src/services/announce.rs b/packages/udp-tracker-core/src/services/announce.rs index 22bc05a9e..f745a90fd 100644 --- a/packages/udp-tracker-core/src/services/announce.rs +++ b/packages/udp-tracker-core/src/services/announce.rs @@ -7,7 +7,7 @@ //! //! It also sends an [`udp_tracker_core::statistics::event::Event`] //! because events are specific for the HTTP tracker. -use std::net::{IpAddr, SocketAddr}; +use std::net::SocketAddr; use std::ops::Range; use std::sync::Arc; @@ -103,16 +103,11 @@ impl AnnounceService { async fn send_stats_event(&self, client_socket_addr: SocketAddr, server_socket_addr: SocketAddr) { if let Some(udp_stats_event_sender) = self.opt_udp_core_stats_event_sender.as_deref() { - let event = match client_socket_addr.ip() { - IpAddr::V4(_) => statistics::event::Event::Udp4Announce { + udp_stats_event_sender + .send_event(statistics::event::Event::UdpAnnounce { context: ConnectionContext::new(client_socket_addr, server_socket_addr), - }, - IpAddr::V6(_) => statistics::event::Event::Udp6Announce { - context: ConnectionContext::new(client_socket_addr, server_socket_addr), - }, - }; - - udp_stats_event_sender.send_event(event).await; + }) + .await; } } } diff --git a/packages/udp-tracker-core/src/services/connect.rs b/packages/udp-tracker-core/src/services/connect.rs index 5309a79d3..c3c2459cd 100644 --- a/packages/udp-tracker-core/src/services/connect.rs +++ b/packages/udp-tracker-core/src/services/connect.rs @@ -41,22 +41,11 @@ impl ConnectService { make(gen_remote_fingerprint(&client_socket_addr), cookie_issue_time).expect("it should be a normal value"); if let Some(udp_stats_event_sender) = self.opt_udp_core_stats_event_sender.as_deref() { - match client_socket_addr { - SocketAddr::V4(_) => { - udp_stats_event_sender - .send_event(statistics::event::Event::Udp4Connect { - context: ConnectionContext::new(client_socket_addr, server_socket_addr), - }) - .await; - } - SocketAddr::V6(_) => { - udp_stats_event_sender - .send_event(statistics::event::Event::Udp6Connect { - context: ConnectionContext::new(client_socket_addr, server_socket_addr), - }) - .await; - } - } + udp_stats_event_sender + .send_event(statistics::event::Event::UdpConnect { + context: ConnectionContext::new(client_socket_addr, server_socket_addr), + }) + .await; } connection_id @@ -149,7 +138,7 @@ mod tests { let mut udp_stats_event_sender_mock = MockUdpCoreStatsEventSender::new(); udp_stats_event_sender_mock .expect_send_event() - .with(eq(statistics::event::Event::Udp4Connect { + .with(eq(statistics::event::Event::UdpConnect { context: ConnectionContext::new(client_socket_addr, server_socket_addr), })) .times(1) @@ -172,7 +161,7 @@ mod tests { let mut udp_stats_event_sender_mock = MockUdpCoreStatsEventSender::new(); udp_stats_event_sender_mock .expect_send_event() - .with(eq(statistics::event::Event::Udp6Connect { + .with(eq(statistics::event::Event::UdpConnect { context: ConnectionContext::new(client_socket_addr, server_socket_addr), })) .times(1) diff --git a/packages/udp-tracker-core/src/services/scrape.rs b/packages/udp-tracker-core/src/services/scrape.rs index 0f1ab14d8..446c1182f 100644 --- a/packages/udp-tracker-core/src/services/scrape.rs +++ b/packages/udp-tracker-core/src/services/scrape.rs @@ -85,15 +85,11 @@ impl ScrapeService { async fn send_stats_event(&self, client_socket_addr: SocketAddr, server_socket_addr: SocketAddr) { if let Some(udp_stats_event_sender) = self.opt_udp_stats_event_sender.as_deref() { - let event = match client_socket_addr { - SocketAddr::V4(_) => statistics::event::Event::Udp4Scrape { + udp_stats_event_sender + .send_event(statistics::event::Event::UdpScrape { context: ConnectionContext::new(client_socket_addr, server_socket_addr), - }, - SocketAddr::V6(_) => statistics::event::Event::Udp6Scrape { - context: ConnectionContext::new(client_socket_addr, server_socket_addr), - }, - }; - udp_stats_event_sender.send_event(event).await; + }) + .await; } } } diff --git a/packages/udp-tracker-core/src/statistics/event/handler.rs b/packages/udp-tracker-core/src/statistics/event/handler.rs index 1f8a64a88..98860592f 100644 --- a/packages/udp-tracker-core/src/statistics/event/handler.rs +++ b/packages/udp-tracker-core/src/statistics/event/handler.rs @@ -6,52 +6,25 @@ use crate::statistics::repository::Repository; /// This function panics if the IP version does not match the event type. pub async fn handle_event(event: Event, stats_repository: &Repository) { match event { - // UDP4 - Event::Udp4Connect { context } => match context.client_socket_addr.ip() { + Event::UdpConnect { context } => match context.client_socket_addr.ip() { std::net::IpAddr::V4(_) => { stats_repository.increase_udp4_connections().await; } - std::net::IpAddr::V6(_) => { - panic!("IP Version 6 does not match the event type for connect"); - } - }, - Event::Udp4Announce { context } => match context.client_socket_addr.ip() { - std::net::IpAddr::V4(_) => { - stats_repository.increase_udp4_announces().await; - } - std::net::IpAddr::V6(_) => { - panic!("IP Version 6 does not match the event type for announce"); - } - }, - Event::Udp4Scrape { context } => match context.client_socket_addr.ip() { - std::net::IpAddr::V4(_) => { - stats_repository.increase_udp4_scrapes().await; - } - std::net::IpAddr::V6(_) => { - panic!("IP Version 6 does not match the event type for scrape"); - } - }, - - // UDP6 - Event::Udp6Connect { context } => match context.client_socket_addr.ip() { - std::net::IpAddr::V4(_) => { - panic!("IP Version 4 does not match the event type for connect"); - } std::net::IpAddr::V6(_) => { stats_repository.increase_udp6_connections().await; } }, - Event::Udp6Announce { context } => match context.client_socket_addr.ip() { + Event::UdpAnnounce { context } => match context.client_socket_addr.ip() { std::net::IpAddr::V4(_) => { - panic!("IP Version 4 does not match the event type for announce"); + stats_repository.increase_udp4_announces().await; } std::net::IpAddr::V6(_) => { stats_repository.increase_udp6_announces().await; } }, - Event::Udp6Scrape { context } => match context.client_socket_addr.ip() { + Event::UdpScrape { context } => match context.client_socket_addr.ip() { std::net::IpAddr::V4(_) => { - panic!("IP Version 4 does not match the event type for scrape"); + stats_repository.increase_udp4_scrapes().await; } std::net::IpAddr::V6(_) => { stats_repository.increase_udp6_scrapes().await; @@ -75,7 +48,7 @@ mod tests { let stats_repository = Repository::new(); handle_event( - Event::Udp4Connect { + Event::UdpConnect { context: ConnectionContext::new( SocketAddr::new(IpAddr::V4(Ipv4Addr::new(203, 0, 113, 195)), 8080), SocketAddr::new(IpAddr::V4(Ipv4Addr::new(203, 0, 113, 196)), 6969), @@ -95,7 +68,7 @@ mod tests { let stats_repository = Repository::new(); handle_event( - Event::Udp4Announce { + Event::UdpAnnounce { context: ConnectionContext::new( SocketAddr::new(IpAddr::V4(Ipv4Addr::new(203, 0, 113, 195)), 8080), SocketAddr::new(IpAddr::V4(Ipv4Addr::new(203, 0, 113, 196)), 6969), @@ -115,7 +88,7 @@ mod tests { let stats_repository = Repository::new(); handle_event( - Event::Udp4Scrape { + Event::UdpScrape { context: ConnectionContext::new( SocketAddr::new(IpAddr::V4(Ipv4Addr::new(203, 0, 113, 195)), 8080), SocketAddr::new(IpAddr::V4(Ipv4Addr::new(203, 0, 113, 196)), 6969), @@ -135,7 +108,7 @@ mod tests { let stats_repository = Repository::new(); handle_event( - Event::Udp6Connect { + Event::UdpConnect { context: ConnectionContext::new( SocketAddr::new(IpAddr::V6(Ipv6Addr::new(0, 0, 0, 0, 203, 0, 113, 195)), 8080), SocketAddr::new(IpAddr::V6(Ipv6Addr::new(0, 0, 0, 0, 203, 0, 113, 196)), 6969), @@ -155,7 +128,7 @@ mod tests { let stats_repository = Repository::new(); handle_event( - Event::Udp6Announce { + Event::UdpAnnounce { context: ConnectionContext::new( SocketAddr::new(IpAddr::V6(Ipv6Addr::new(0, 0, 0, 0, 203, 0, 113, 195)), 8080), SocketAddr::new(IpAddr::V6(Ipv6Addr::new(0, 0, 0, 0, 203, 0, 113, 196)), 6969), @@ -175,7 +148,7 @@ mod tests { let stats_repository = Repository::new(); handle_event( - Event::Udp6Scrape { + Event::UdpScrape { context: ConnectionContext::new( SocketAddr::new(IpAddr::V6(Ipv6Addr::new(0, 0, 0, 0, 203, 0, 113, 195)), 8080), SocketAddr::new(IpAddr::V6(Ipv6Addr::new(0, 0, 0, 0, 203, 0, 113, 196)), 6969), diff --git a/packages/udp-tracker-core/src/statistics/event/mod.rs b/packages/udp-tracker-core/src/statistics/event/mod.rs index f460f0113..05de5d118 100644 --- a/packages/udp-tracker-core/src/statistics/event/mod.rs +++ b/packages/udp-tracker-core/src/statistics/event/mod.rs @@ -6,17 +6,13 @@ pub mod sender; /// An statistics event. It is used to collect tracker metrics. /// -/// - `Udp` prefix means the event was triggered by the UDP tracker -/// - `4` or `6` prefixes means the IP version used by the peer -/// - Finally the event suffix is the type of request: `announce`, `scrape` or `connection` +/// - `Udp` prefix means the event was triggered by the UDP tracker. +/// - The event suffix is the type of request: `announce`, `scrape` or `connection`. #[derive(Debug, PartialEq, Eq)] pub enum Event { - Udp4Connect { context: ConnectionContext }, - Udp4Announce { context: ConnectionContext }, - Udp4Scrape { context: ConnectionContext }, - Udp6Connect { context: ConnectionContext }, - Udp6Announce { context: ConnectionContext }, - Udp6Scrape { context: ConnectionContext }, + UdpConnect { context: ConnectionContext }, + UdpAnnounce { context: ConnectionContext }, + UdpScrape { context: ConnectionContext }, } #[derive(Debug, PartialEq, Eq)] @@ -33,4 +29,12 @@ impl ConnectionContext { server_socket_addr, } } + + pub fn client_socket_addr(&self) -> SocketAddr { + self.client_socket_addr + } + + pub fn server_socket_addr(&self) -> SocketAddr { + self.server_socket_addr + } } diff --git a/packages/udp-tracker-core/src/statistics/keeper.rs b/packages/udp-tracker-core/src/statistics/keeper.rs index 9d0768e31..e46e634e8 100644 --- a/packages/udp-tracker-core/src/statistics/keeper.rs +++ b/packages/udp-tracker-core/src/statistics/keeper.rs @@ -73,7 +73,7 @@ mod tests { let event_sender = stats_tracker.run_event_listener(); let result = event_sender - .send_event(Event::Udp4Connect { + .send_event(Event::UdpConnect { context: ConnectionContext::new( SocketAddr::new(IpAddr::V4(Ipv4Addr::new(203, 0, 113, 195)), 8080), SocketAddr::new(IpAddr::V4(Ipv4Addr::new(203, 0, 113, 196)), 6969), diff --git a/packages/udp-tracker-server/src/handlers/announce.rs b/packages/udp-tracker-server/src/handlers/announce.rs index d18a81329..a0aabb765 100644 --- a/packages/udp-tracker-server/src/handlers/announce.rs +++ b/packages/udp-tracker-server/src/handlers/announce.rs @@ -859,7 +859,7 @@ mod tests { let mut udp_core_stats_event_sender_mock = MockUdpCoreStatsEventSender::new(); udp_core_stats_event_sender_mock .expect_send_event() - .with(eq(core_statistics::event::Event::Udp6Announce { + .with(eq(core_statistics::event::Event::UdpAnnounce { context: ConnectionContext::new(client_socket_addr, server_socket_addr), })) .times(1) diff --git a/packages/udp-tracker-server/src/handlers/connect.rs b/packages/udp-tracker-server/src/handlers/connect.rs index e3070264d..bac3d7961 100644 --- a/packages/udp-tracker-server/src/handlers/connect.rs +++ b/packages/udp-tracker-server/src/handlers/connect.rs @@ -203,7 +203,7 @@ mod tests { let mut udp_core_stats_event_sender_mock = MockUdpCoreStatsEventSender::new(); udp_core_stats_event_sender_mock .expect_send_event() - .with(eq(core_statistics::event::Event::Udp4Connect { + .with(eq(core_statistics::event::Event::UdpConnect { context: core_statistics::event::ConnectionContext::new(client_socket_addr, server_socket_addr), })) .times(1) @@ -243,7 +243,7 @@ mod tests { let mut udp_core_stats_event_sender_mock = MockUdpCoreStatsEventSender::new(); udp_core_stats_event_sender_mock .expect_send_event() - .with(eq(core_statistics::event::Event::Udp6Connect { + .with(eq(core_statistics::event::Event::UdpConnect { context: ConnectionContext::new(client_socket_addr, server_socket_addr), })) .times(1) From 74ffa4cfd2549fdc28484f3c4cb8844eaf6c61cf Mon Sep 17 00:00:00 2001 From: Jose Celano Date: Mon, 17 Mar 2025 18:30:09 +0000 Subject: [PATCH 391/802] refactor: [#1382] error request kind in UDP req does not make sense --- .../src/statistics/event/mod.rs | 2 + .../src/handlers/announce.rs | 18 ++--- .../src/handlers/connect.rs | 12 ++-- .../udp-tracker-server/src/handlers/scrape.rs | 10 +-- .../src/server/processor.rs | 12 +++- .../src/statistics/event/handler.rs | 70 ++++++++++--------- .../src/statistics/event/mod.rs | 13 ++-- 7 files changed, 77 insertions(+), 60 deletions(-) diff --git a/packages/udp-tracker-core/src/statistics/event/mod.rs b/packages/udp-tracker-core/src/statistics/event/mod.rs index 05de5d118..216562506 100644 --- a/packages/udp-tracker-core/src/statistics/event/mod.rs +++ b/packages/udp-tracker-core/src/statistics/event/mod.rs @@ -30,10 +30,12 @@ impl ConnectionContext { } } + #[must_use] pub fn client_socket_addr(&self) -> SocketAddr { self.client_socket_addr } + #[must_use] pub fn server_socket_addr(&self) -> SocketAddr { self.server_socket_addr } diff --git a/packages/udp-tracker-server/src/handlers/announce.rs b/packages/udp-tracker-server/src/handlers/announce.rs index a0aabb765..38fe5acc6 100644 --- a/packages/udp-tracker-server/src/handlers/announce.rs +++ b/packages/udp-tracker-server/src/handlers/announce.rs @@ -16,7 +16,7 @@ use zerocopy::network_endian::I32; use crate::error::Error; use crate::statistics as server_statistics; -use crate::statistics::event::UdpResponseKind; +use crate::statistics::event::UdpRequestKind; /// It handles the `Announce` request. /// @@ -45,14 +45,14 @@ pub async fn handle_announce( IpAddr::V4(_) => { udp_server_stats_event_sender .send_event(server_statistics::event::Event::Udp4Request { - kind: UdpResponseKind::Announce, + kind: UdpRequestKind::Announce, }) .await; } IpAddr::V6(_) => { udp_server_stats_event_sender .send_event(server_statistics::event::Event::Udp6Request { - kind: UdpResponseKind::Announce, + kind: UdpRequestKind::Announce, }) .await; } @@ -226,7 +226,7 @@ mod tests { TorrentPeerBuilder, }; use crate::statistics as server_statistics; - use crate::statistics::event::UdpResponseKind; + use crate::statistics::event::UdpRequestKind; #[tokio::test] async fn an_announced_peer_should_be_added_to_the_tracker() { @@ -433,7 +433,7 @@ mod tests { udp_server_stats_event_sender_mock .expect_send_event() .with(eq(server_statistics::event::Event::Udp4Request { - kind: UdpResponseKind::Announce, + kind: UdpRequestKind::Announce, })) .times(1) .returning(|_| Box::pin(future::ready(Some(Ok(()))))); @@ -549,7 +549,7 @@ mod tests { sample_issue_time, MockUdpServerStatsEventSender, TorrentPeerBuilder, }; use crate::statistics as server_statistics; - use crate::statistics::event::UdpResponseKind; + use crate::statistics::event::UdpRequestKind; #[tokio::test] async fn an_announced_peer_should_be_added_to_the_tracker() { @@ -775,7 +775,7 @@ mod tests { udp_server_stats_event_sender_mock .expect_send_event() .with(eq(server_statistics::event::Event::Udp6Request { - kind: UdpResponseKind::Announce, + kind: UdpRequestKind::Announce, })) .times(1) .returning(|_| Box::pin(future::ready(Some(Ok(()))))); @@ -830,7 +830,7 @@ mod tests { TrackerConfigurationBuilder, }; use crate::statistics as server_statistics; - use crate::statistics::event::UdpResponseKind; + use crate::statistics::event::UdpRequestKind; #[tokio::test] async fn the_peer_ip_should_be_changed_to_the_external_ip_in_the_tracker_configuration() { @@ -871,7 +871,7 @@ mod tests { udp_server_stats_event_sender_mock .expect_send_event() .with(eq(server_statistics::event::Event::Udp6Request { - kind: UdpResponseKind::Announce, + kind: UdpRequestKind::Announce, })) .times(1) .returning(|_| Box::pin(future::ready(Some(Ok(()))))); diff --git a/packages/udp-tracker-server/src/handlers/connect.rs b/packages/udp-tracker-server/src/handlers/connect.rs index bac3d7961..2111e7584 100644 --- a/packages/udp-tracker-server/src/handlers/connect.rs +++ b/packages/udp-tracker-server/src/handlers/connect.rs @@ -7,7 +7,7 @@ use bittorrent_udp_tracker_core::services::connect::ConnectService; use tracing::{instrument, Level}; use crate::statistics as server_statistics; -use crate::statistics::event::UdpResponseKind; +use crate::statistics::event::UdpRequestKind; /// It handles the `Connect` request. #[instrument(fields(transaction_id), skip(connect_service, opt_udp_server_stats_event_sender), ret(level = Level::TRACE))] @@ -27,14 +27,14 @@ pub async fn handle_connect( IpAddr::V4(_) => { udp_server_stats_event_sender .send_event(server_statistics::event::Event::Udp4Request { - kind: UdpResponseKind::Connect, + kind: UdpRequestKind::Connect, }) .await; } IpAddr::V6(_) => { udp_server_stats_event_sender .send_event(server_statistics::event::Event::Udp6Request { - kind: UdpResponseKind::Connect, + kind: UdpRequestKind::Connect, }) .await; } @@ -79,7 +79,7 @@ mod tests { sample_ipv6_remote_addr_fingerprint, sample_issue_time, MockUdpCoreStatsEventSender, MockUdpServerStatsEventSender, }; use crate::statistics as server_statistics; - use crate::statistics::event::UdpResponseKind; + use crate::statistics::event::UdpRequestKind; fn sample_connect_request() -> ConnectRequest { ConnectRequest { @@ -215,7 +215,7 @@ mod tests { udp_server_stats_event_sender_mock .expect_send_event() .with(eq(server_statistics::event::Event::Udp4Request { - kind: UdpResponseKind::Connect, + kind: UdpRequestKind::Connect, })) .times(1) .returning(|_| Box::pin(future::ready(Some(Ok(()))))); @@ -255,7 +255,7 @@ mod tests { udp_server_stats_event_sender_mock .expect_send_event() .with(eq(server_statistics::event::Event::Udp6Request { - kind: UdpResponseKind::Connect, + kind: UdpRequestKind::Connect, })) .times(1) .returning(|_| Box::pin(future::ready(Some(Ok(()))))); diff --git a/packages/udp-tracker-server/src/handlers/scrape.rs b/packages/udp-tracker-server/src/handlers/scrape.rs index e820b2e96..137c8a3cb 100644 --- a/packages/udp-tracker-server/src/handlers/scrape.rs +++ b/packages/udp-tracker-server/src/handlers/scrape.rs @@ -14,7 +14,7 @@ use zerocopy::network_endian::I32; use crate::error::Error; use crate::statistics as server_statistics; -use crate::statistics::event::UdpResponseKind; +use crate::statistics::event::UdpRequestKind; /// It handles the `Scrape` request. /// @@ -41,14 +41,14 @@ pub async fn handle_scrape( IpAddr::V4(_) => { udp_server_stats_event_sender .send_event(server_statistics::event::Event::Udp4Request { - kind: UdpResponseKind::Scrape, + kind: UdpRequestKind::Scrape, }) .await; } IpAddr::V6(_) => { udp_server_stats_event_sender .send_event(server_statistics::event::Event::Udp6Request { - kind: UdpResponseKind::Scrape, + kind: UdpRequestKind::Scrape, }) .await; } @@ -375,7 +375,7 @@ mod tests { udp_server_stats_event_sender_mock .expect_send_event() .with(eq(server_statistics::event::Event::Udp4Request { - kind: server_statistics::event::UdpResponseKind::Scrape, + kind: server_statistics::event::UdpRequestKind::Scrape, })) .times(1) .returning(|_| Box::pin(future::ready(Some(Ok(()))))); @@ -422,7 +422,7 @@ mod tests { udp_server_stats_event_sender_mock .expect_send_event() .with(eq(server_statistics::event::Event::Udp6Request { - kind: server_statistics::event::UdpResponseKind::Scrape, + kind: server_statistics::event::UdpRequestKind::Scrape, })) .times(1) .returning(|_| Box::pin(future::ready(Some(Ok(()))))); diff --git a/packages/udp-tracker-server/src/server/processor.rs b/packages/udp-tracker-server/src/server/processor.rs index 44b543571..52188c4c2 100644 --- a/packages/udp-tracker-server/src/server/processor.rs +++ b/packages/udp-tracker-server/src/server/processor.rs @@ -69,9 +69,15 @@ impl Processor { }; let udp_response_kind = match &response { - Response::Connect(_) => statistics::event::UdpResponseKind::Connect, - Response::AnnounceIpv4(_) | Response::AnnounceIpv6(_) => statistics::event::UdpResponseKind::Announce, - Response::Scrape(_) => statistics::event::UdpResponseKind::Scrape, + Response::Connect(_) => statistics::event::UdpResponseKind::Ok { + req_kind: statistics::event::UdpRequestKind::Connect, + }, + Response::AnnounceIpv4(_) | Response::AnnounceIpv6(_) => statistics::event::UdpResponseKind::Ok { + req_kind: statistics::event::UdpRequestKind::Announce, + }, + Response::Scrape(_) => statistics::event::UdpResponseKind::Ok { + req_kind: statistics::event::UdpRequestKind::Scrape, + }, Response::Error(_e) => statistics::event::UdpResponseKind::Error, }; diff --git a/packages/udp-tracker-server/src/statistics/event/handler.rs b/packages/udp-tracker-server/src/statistics/event/handler.rs index 5ce9f6307..7c7e4a8e7 100644 --- a/packages/udp-tracker-server/src/statistics/event/handler.rs +++ b/packages/udp-tracker-server/src/statistics/event/handler.rs @@ -1,4 +1,4 @@ -use crate::statistics::event::{Event, UdpResponseKind}; +use crate::statistics::event::{Event, UdpRequestKind, UdpResponseKind}; use crate::statistics::repository::Repository; pub async fn handle_event(event: Event, stats_repository: &Repository) { @@ -16,16 +16,15 @@ pub async fn handle_event(event: Event, stats_repository: &Repository) { stats_repository.increase_udp4_requests().await; } Event::Udp4Request { kind } => match kind { - UdpResponseKind::Connect => { + UdpRequestKind::Connect => { stats_repository.increase_udp4_connections().await; } - UdpResponseKind::Announce => { + UdpRequestKind::Announce => { stats_repository.increase_udp4_announces().await; } - UdpResponseKind::Scrape => { + UdpRequestKind::Scrape => { stats_repository.increase_udp4_scrapes().await; } - UdpResponseKind::Error => {} }, Event::Udp4Response { kind, @@ -34,21 +33,23 @@ pub async fn handle_event(event: Event, stats_repository: &Repository) { stats_repository.increase_udp4_responses().await; match kind { - UdpResponseKind::Connect => { - stats_repository - .recalculate_udp_avg_connect_processing_time_ns(req_processing_time) - .await; - } - UdpResponseKind::Announce => { - stats_repository - .recalculate_udp_avg_announce_processing_time_ns(req_processing_time) - .await; - } - UdpResponseKind::Scrape => { - stats_repository - .recalculate_udp_avg_scrape_processing_time_ns(req_processing_time) - .await; - } + UdpResponseKind::Ok { req_kind } => match req_kind { + UdpRequestKind::Connect => { + stats_repository + .recalculate_udp_avg_connect_processing_time_ns(req_processing_time) + .await; + } + UdpRequestKind::Announce => { + stats_repository + .recalculate_udp_avg_announce_processing_time_ns(req_processing_time) + .await; + } + UdpRequestKind::Scrape => { + stats_repository + .recalculate_udp_avg_scrape_processing_time_ns(req_processing_time) + .await; + } + }, UdpResponseKind::Error => {} } } @@ -61,16 +62,15 @@ pub async fn handle_event(event: Event, stats_repository: &Repository) { stats_repository.increase_udp6_requests().await; } Event::Udp6Request { kind } => match kind { - UdpResponseKind::Connect => { + UdpRequestKind::Connect => { stats_repository.increase_udp6_connections().await; } - UdpResponseKind::Announce => { + UdpRequestKind::Announce => { stats_repository.increase_udp6_announces().await; } - UdpResponseKind::Scrape => { + UdpRequestKind::Scrape => { stats_repository.increase_udp6_scrapes().await; } - UdpResponseKind::Error => {} }, Event::Udp6Response { kind: _, @@ -89,7 +89,7 @@ pub async fn handle_event(event: Event, stats_repository: &Repository) { #[cfg(test)] mod tests { use crate::statistics::event::handler::handle_event; - use crate::statistics::event::Event; + use crate::statistics::event::{Event, UdpRequestKind}; use crate::statistics::repository::Repository; #[tokio::test] @@ -148,7 +148,7 @@ mod tests { handle_event( Event::Udp4Request { - kind: crate::statistics::event::UdpResponseKind::Connect, + kind: crate::statistics::event::UdpRequestKind::Connect, }, &stats_repository, ) @@ -165,7 +165,7 @@ mod tests { handle_event( Event::Udp4Request { - kind: crate::statistics::event::UdpResponseKind::Announce, + kind: crate::statistics::event::UdpRequestKind::Announce, }, &stats_repository, ) @@ -182,7 +182,7 @@ mod tests { handle_event( Event::Udp4Request { - kind: crate::statistics::event::UdpResponseKind::Scrape, + kind: crate::statistics::event::UdpRequestKind::Scrape, }, &stats_repository, ) @@ -199,7 +199,9 @@ mod tests { handle_event( Event::Udp4Response { - kind: crate::statistics::event::UdpResponseKind::Announce, + kind: crate::statistics::event::UdpResponseKind::Ok { + req_kind: UdpRequestKind::Announce, + }, req_processing_time: std::time::Duration::from_secs(1), }, &stats_repository, @@ -228,7 +230,7 @@ mod tests { handle_event( Event::Udp6Request { - kind: crate::statistics::event::UdpResponseKind::Connect, + kind: crate::statistics::event::UdpRequestKind::Connect, }, &stats_repository, ) @@ -245,7 +247,7 @@ mod tests { handle_event( Event::Udp6Request { - kind: crate::statistics::event::UdpResponseKind::Announce, + kind: crate::statistics::event::UdpRequestKind::Announce, }, &stats_repository, ) @@ -262,7 +264,7 @@ mod tests { handle_event( Event::Udp6Request { - kind: crate::statistics::event::UdpResponseKind::Scrape, + kind: crate::statistics::event::UdpRequestKind::Scrape, }, &stats_repository, ) @@ -279,7 +281,9 @@ mod tests { handle_event( Event::Udp6Response { - kind: crate::statistics::event::UdpResponseKind::Announce, + kind: crate::statistics::event::UdpResponseKind::Ok { + req_kind: UdpRequestKind::Announce, + }, req_processing_time: std::time::Duration::from_secs(1), }, &stats_repository, diff --git a/packages/udp-tracker-server/src/statistics/event/mod.rs b/packages/udp-tracker-server/src/statistics/event/mod.rs index 6a48b9449..3b14806aa 100644 --- a/packages/udp-tracker-server/src/statistics/event/mod.rs +++ b/packages/udp-tracker-server/src/statistics/event/mod.rs @@ -22,7 +22,7 @@ pub enum Event { // UDP4 Udp4IncomingRequest, Udp4Request { - kind: UdpResponseKind, + kind: UdpRequestKind, }, Udp4Response { kind: UdpResponseKind, @@ -33,7 +33,7 @@ pub enum Event { // UDP6 Udp6IncomingRequest, Udp6Request { - kind: UdpResponseKind, + kind: UdpRequestKind, }, Udp6Response { kind: UdpResponseKind, @@ -43,9 +43,14 @@ pub enum Event { } #[derive(Debug, PartialEq, Eq)] -pub enum UdpResponseKind { +pub enum UdpRequestKind { Connect, Announce, Scrape, - Error, +} + +#[derive(Debug, PartialEq, Eq)] +pub enum UdpResponseKind { + Ok { req_kind: UdpRequestKind }, + Error, // todo: add the request kind `{ req_kind: Option(UdpRequestKind) }` when we know it. } From e4c6000645bca78ba6d69900574931603b4581f1 Mon Sep 17 00:00:00 2001 From: Jose Celano Date: Tue, 18 Mar 2025 08:46:29 +0000 Subject: [PATCH 392/802] refactor: [#1382] add connection context to UDP server events --- .../src/handlers/announce.rs | 22 +- .../src/handlers/connect.rs | 17 +- .../udp-tracker-server/src/handlers/error.rs | 21 +- .../udp-tracker-server/src/handlers/scrape.rs | 20 +- .../udp-tracker-server/src/server/launcher.rs | 23 +- .../src/server/processor.rs | 13 +- .../src/statistics/event/handler.rs | 273 ++++++++++++++---- .../src/statistics/event/mod.rs | 60 +++- .../src/statistics/keeper.rs | 13 +- 9 files changed, 357 insertions(+), 105 deletions(-) diff --git a/packages/udp-tracker-server/src/handlers/announce.rs b/packages/udp-tracker-server/src/handlers/announce.rs index 38fe5acc6..41e40695d 100644 --- a/packages/udp-tracker-server/src/handlers/announce.rs +++ b/packages/udp-tracker-server/src/handlers/announce.rs @@ -16,7 +16,7 @@ use zerocopy::network_endian::I32; use crate::error::Error; use crate::statistics as server_statistics; -use crate::statistics::event::UdpRequestKind; +use crate::statistics::event::{ConnectionContext, UdpRequestKind}; /// It handles the `Announce` request. /// @@ -45,6 +45,7 @@ pub async fn handle_announce( IpAddr::V4(_) => { udp_server_stats_event_sender .send_event(server_statistics::event::Event::Udp4Request { + context: ConnectionContext::new(client_socket_addr, server_socket_addr), kind: UdpRequestKind::Announce, }) .await; @@ -52,6 +53,7 @@ pub async fn handle_announce( IpAddr::V6(_) => { udp_server_stats_event_sender .send_event(server_statistics::event::Event::Udp6Request { + context: ConnectionContext::new(client_socket_addr, server_socket_addr), kind: UdpRequestKind::Announce, }) .await; @@ -429,10 +431,14 @@ mod tests { #[tokio::test] async fn should_send_the_upd4_announce_event() { + let client_socket_addr = sample_ipv4_socket_address(); + let server_socket_addr = SocketAddr::new(IpAddr::V4(Ipv4Addr::new(203, 0, 113, 196)), 6969); + let mut udp_server_stats_event_sender_mock = MockUdpServerStatsEventSender::new(); udp_server_stats_event_sender_mock .expect_send_event() .with(eq(server_statistics::event::Event::Udp4Request { + context: server_statistics::event::ConnectionContext::new(client_socket_addr, server_socket_addr), kind: UdpRequestKind::Announce, })) .times(1) @@ -443,9 +449,6 @@ mod tests { let (core_tracker_services, core_udp_tracker_services, _server_udp_tracker_services) = initialize_core_tracker_services_for_default_tracker_configuration(); - let client_socket_addr = sample_ipv4_socket_address(); - let server_socket_addr = SocketAddr::new(IpAddr::V4(Ipv4Addr::new(203, 0, 113, 196)), 6969); - handle_announce( &core_udp_tracker_services.announce_service, client_socket_addr, @@ -771,10 +774,14 @@ mod tests { #[tokio::test] async fn should_send_the_upd6_announce_event() { + let client_socket_addr = sample_ipv6_remote_addr(); + let server_socket_addr = SocketAddr::new(IpAddr::V6(Ipv6Addr::new(0, 0, 0, 0, 203, 0, 113, 196)), 6969); + let mut udp_server_stats_event_sender_mock = MockUdpServerStatsEventSender::new(); udp_server_stats_event_sender_mock .expect_send_event() .with(eq(server_statistics::event::Event::Udp6Request { + context: server_statistics::event::ConnectionContext::new(client_socket_addr, server_socket_addr), kind: UdpRequestKind::Announce, })) .times(1) @@ -785,9 +792,6 @@ mod tests { let (core_tracker_services, core_udp_tracker_services, _server_udp_tracker_services) = initialize_core_tracker_services_for_default_tracker_configuration(); - let client_socket_addr = sample_ipv6_remote_addr(); - let server_socket_addr = SocketAddr::new(IpAddr::V6(Ipv6Addr::new(0, 0, 0, 0, 203, 0, 113, 196)), 6969); - let announce_request = AnnounceRequestBuilder::default() .with_connection_id(make(gen_remote_fingerprint(&client_socket_addr), sample_issue_time()).unwrap()) .into(); @@ -819,7 +823,6 @@ mod tests { use bittorrent_tracker_core::whitelist::repository::in_memory::InMemoryWhitelist; use bittorrent_udp_tracker_core::connection_cookie::{gen_remote_fingerprint, make}; use bittorrent_udp_tracker_core::services::announce::AnnounceService; - use bittorrent_udp_tracker_core::statistics::event::ConnectionContext; use bittorrent_udp_tracker_core::{self, statistics as core_statistics}; use mockall::predicate::eq; @@ -860,7 +863,7 @@ mod tests { udp_core_stats_event_sender_mock .expect_send_event() .with(eq(core_statistics::event::Event::UdpAnnounce { - context: ConnectionContext::new(client_socket_addr, server_socket_addr), + context: core_statistics::event::ConnectionContext::new(client_socket_addr, server_socket_addr), })) .times(1) .returning(|_| Box::pin(future::ready(Some(Ok(()))))); @@ -871,6 +874,7 @@ mod tests { udp_server_stats_event_sender_mock .expect_send_event() .with(eq(server_statistics::event::Event::Udp6Request { + context: server_statistics::event::ConnectionContext::new(client_socket_addr, server_socket_addr), kind: UdpRequestKind::Announce, })) .times(1) diff --git a/packages/udp-tracker-server/src/handlers/connect.rs b/packages/udp-tracker-server/src/handlers/connect.rs index 2111e7584..3e0012d7d 100644 --- a/packages/udp-tracker-server/src/handlers/connect.rs +++ b/packages/udp-tracker-server/src/handlers/connect.rs @@ -7,13 +7,13 @@ use bittorrent_udp_tracker_core::services::connect::ConnectService; use tracing::{instrument, Level}; use crate::statistics as server_statistics; -use crate::statistics::event::UdpRequestKind; +use crate::statistics::event::{ConnectionContext, UdpRequestKind}; /// It handles the `Connect` request. #[instrument(fields(transaction_id), skip(connect_service, opt_udp_server_stats_event_sender), ret(level = Level::TRACE))] pub async fn handle_connect( - remote_addr: SocketAddr, - server_addr: SocketAddr, + client_socket_addr: SocketAddr, + server_socket_addr: SocketAddr, request: &ConnectRequest, connect_service: &Arc, opt_udp_server_stats_event_sender: &Arc>>, @@ -23,10 +23,11 @@ pub async fn handle_connect( tracing::trace!("handle connect"); if let Some(udp_server_stats_event_sender) = opt_udp_server_stats_event_sender.as_deref() { - match remote_addr.ip() { + match client_socket_addr.ip() { IpAddr::V4(_) => { udp_server_stats_event_sender .send_event(server_statistics::event::Event::Udp4Request { + context: ConnectionContext::new(client_socket_addr, server_socket_addr), kind: UdpRequestKind::Connect, }) .await; @@ -34,6 +35,7 @@ pub async fn handle_connect( IpAddr::V6(_) => { udp_server_stats_event_sender .send_event(server_statistics::event::Event::Udp6Request { + context: ConnectionContext::new(client_socket_addr, server_socket_addr), kind: UdpRequestKind::Connect, }) .await; @@ -42,7 +44,7 @@ pub async fn handle_connect( } let connection_id = connect_service - .handle_connect(remote_addr, server_addr, cookie_issue_time) + .handle_connect(client_socket_addr, server_socket_addr, cookie_issue_time) .await; build_response(*request, connection_id) @@ -70,7 +72,6 @@ mod tests { use bittorrent_udp_tracker_core::connection_cookie::make; use bittorrent_udp_tracker_core::services::connect::ConnectService; use bittorrent_udp_tracker_core::statistics as core_statistics; - use bittorrent_udp_tracker_core::statistics::event::ConnectionContext; use mockall::predicate::eq; use crate::handlers::handle_connect; @@ -215,6 +216,7 @@ mod tests { udp_server_stats_event_sender_mock .expect_send_event() .with(eq(server_statistics::event::Event::Udp4Request { + context: server_statistics::event::ConnectionContext::new(client_socket_addr, server_socket_addr), kind: UdpRequestKind::Connect, })) .times(1) @@ -244,7 +246,7 @@ mod tests { udp_core_stats_event_sender_mock .expect_send_event() .with(eq(core_statistics::event::Event::UdpConnect { - context: ConnectionContext::new(client_socket_addr, server_socket_addr), + context: core_statistics::event::ConnectionContext::new(client_socket_addr, server_socket_addr), })) .times(1) .returning(|_| Box::pin(future::ready(Some(Ok(()))))); @@ -255,6 +257,7 @@ mod tests { udp_server_stats_event_sender_mock .expect_send_event() .with(eq(server_statistics::event::Event::Udp6Request { + context: server_statistics::event::ConnectionContext::new(client_socket_addr, server_socket_addr), kind: UdpRequestKind::Connect, })) .times(1) diff --git a/packages/udp-tracker-server/src/handlers/error.rs b/packages/udp-tracker-server/src/handlers/error.rs index e4bd382da..df553be9f 100644 --- a/packages/udp-tracker-server/src/handlers/error.rs +++ b/packages/udp-tracker-server/src/handlers/error.rs @@ -12,12 +12,13 @@ use zerocopy::network_endian::I32; use crate::error::Error; use crate::statistics as server_statistics; +use crate::statistics::event::ConnectionContext; #[allow(clippy::too_many_arguments)] #[instrument(fields(transaction_id), skip(opt_udp_server_stats_event_sender), ret(level = Level::TRACE))] pub async fn handle_error( - remote_addr: SocketAddr, - local_addr: SocketAddr, + client_socket_addr: SocketAddr, + server_socket_addr: SocketAddr, request_id: Uuid, opt_udp_server_stats_event_sender: &Arc>>, cookie_valid_range: Range, @@ -29,10 +30,10 @@ pub async fn handle_error( match transaction_id { Some(transaction_id) => { let transaction_id = transaction_id.0.to_string(); - tracing::error!(target: UDP_TRACKER_LOG_TARGET, error = %e, %remote_addr, %local_addr, %request_id, %transaction_id, "response error"); + tracing::error!(target: UDP_TRACKER_LOG_TARGET, error = %e, %client_socket_addr, %server_socket_addr, %request_id, %transaction_id, "response error"); } None => { - tracing::error!(target: UDP_TRACKER_LOG_TARGET, error = %e, %remote_addr, %local_addr, %request_id, "response error"); + tracing::error!(target: UDP_TRACKER_LOG_TARGET, error = %e, %client_socket_addr, %server_socket_addr, %request_id, "response error"); } } @@ -43,7 +44,7 @@ pub async fn handle_error( transaction_id, err, } => { - if let Err(e) = check(connection_id, gen_remote_fingerprint(&remote_addr), cookie_valid_range) { + if let Err(e) = check(connection_id, gen_remote_fingerprint(&client_socket_addr), cookie_valid_range) { (e.to_string(), Some(*transaction_id)) } else { ((*err).to_string(), Some(*transaction_id)) @@ -57,15 +58,19 @@ pub async fn handle_error( if e.1.is_some() { if let Some(udp_server_stats_event_sender) = opt_udp_server_stats_event_sender.as_deref() { - match remote_addr { + match client_socket_addr { SocketAddr::V4(_) => { udp_server_stats_event_sender - .send_event(server_statistics::event::Event::Udp4Error) + .send_event(server_statistics::event::Event::Udp4Error { + context: ConnectionContext::new(client_socket_addr, server_socket_addr), + }) .await; } SocketAddr::V6(_) => { udp_server_stats_event_sender - .send_event(server_statistics::event::Event::Udp6Error) + .send_event(server_statistics::event::Event::Udp6Error { + context: ConnectionContext::new(client_socket_addr, server_socket_addr), + }) .await; } } diff --git a/packages/udp-tracker-server/src/handlers/scrape.rs b/packages/udp-tracker-server/src/handlers/scrape.rs index 137c8a3cb..5f33f55ad 100644 --- a/packages/udp-tracker-server/src/handlers/scrape.rs +++ b/packages/udp-tracker-server/src/handlers/scrape.rs @@ -14,7 +14,7 @@ use zerocopy::network_endian::I32; use crate::error::Error; use crate::statistics as server_statistics; -use crate::statistics::event::UdpRequestKind; +use crate::statistics::event::{ConnectionContext, UdpRequestKind}; /// It handles the `Scrape` request. /// @@ -41,6 +41,7 @@ pub async fn handle_scrape( IpAddr::V4(_) => { udp_server_stats_event_sender .send_event(server_statistics::event::Event::Udp4Request { + context: ConnectionContext::new(client_socket_addr, server_socket_addr), kind: UdpRequestKind::Scrape, }) .await; @@ -48,6 +49,7 @@ pub async fn handle_scrape( IpAddr::V6(_) => { udp_server_stats_event_sender .send_event(server_statistics::event::Event::Udp6Request { + context: ConnectionContext::new(client_socket_addr, server_socket_addr), kind: UdpRequestKind::Scrape, }) .await; @@ -368,13 +370,18 @@ mod tests { sample_ipv4_remote_addr, MockUdpServerStatsEventSender, }; use crate::statistics as server_statistics; + use crate::statistics::event::ConnectionContext; #[tokio::test] async fn should_send_the_upd4_scrape_event() { + let client_socket_addr = sample_ipv4_remote_addr(); + let server_socket_addr = SocketAddr::new(IpAddr::V6(Ipv6Addr::new(0, 0, 0, 0, 203, 0, 113, 196)), 6969); + let mut udp_server_stats_event_sender_mock = MockUdpServerStatsEventSender::new(); udp_server_stats_event_sender_mock .expect_send_event() .with(eq(server_statistics::event::Event::Udp4Request { + context: ConnectionContext::new(client_socket_addr, server_socket_addr), kind: server_statistics::event::UdpRequestKind::Scrape, })) .times(1) @@ -382,9 +389,6 @@ mod tests { let udp_server_stats_event_sender: Arc>> = Arc::new(Some(Box::new(udp_server_stats_event_sender_mock))); - let client_socket_addr = sample_ipv4_remote_addr(); - let server_socket_addr = SocketAddr::new(IpAddr::V6(Ipv6Addr::new(0, 0, 0, 0, 203, 0, 113, 196)), 6969); - let (_core_tracker_services, core_udp_tracker_services, _server_udp_tracker_services) = initialize_core_tracker_services_for_default_tracker_configuration(); @@ -415,13 +419,18 @@ mod tests { sample_ipv6_remote_addr, MockUdpServerStatsEventSender, }; use crate::statistics as server_statistics; + use crate::statistics::event::ConnectionContext; #[tokio::test] async fn should_send_the_upd6_scrape_event() { + let client_socket_addr = sample_ipv6_remote_addr(); + let server_socket_addr = SocketAddr::new(IpAddr::V6(Ipv6Addr::new(0, 0, 0, 0, 203, 0, 113, 196)), 6969); + let mut udp_server_stats_event_sender_mock = MockUdpServerStatsEventSender::new(); udp_server_stats_event_sender_mock .expect_send_event() .with(eq(server_statistics::event::Event::Udp6Request { + context: ConnectionContext::new(client_socket_addr, server_socket_addr), kind: server_statistics::event::UdpRequestKind::Scrape, })) .times(1) @@ -429,9 +438,6 @@ mod tests { let udp_server_stats_event_sender: Arc>> = Arc::new(Some(Box::new(udp_server_stats_event_sender_mock))); - let client_socket_addr = sample_ipv6_remote_addr(); - let server_socket_addr = SocketAddr::new(IpAddr::V6(Ipv6Addr::new(0, 0, 0, 0, 203, 0, 113, 196)), 6969); - let (_core_tracker_services, core_udp_tracker_services, _server_udp_tracker_services) = initialize_core_tracker_services_for_default_tracker_configuration(); diff --git a/packages/udp-tracker-server/src/server/launcher.rs b/packages/udp-tracker-server/src/server/launcher.rs index acd214ab0..0dfbba174 100644 --- a/packages/udp-tracker-server/src/server/launcher.rs +++ b/packages/udp-tracker-server/src/server/launcher.rs @@ -21,6 +21,7 @@ use crate::server::bound_socket::BoundSocket; use crate::server::processor::Processor; use crate::server::receiver::Receiver; use crate::statistics; +use crate::statistics::event::ConnectionContext; const IP_BANS_RESET_INTERVAL_IN_SECS: u64 = 3600; @@ -129,9 +130,9 @@ impl Launcher { ) { let active_requests = &mut ActiveRequests::default(); - let addr = receiver.bound_socket_address(); + let server_socket_addr = receiver.bound_socket_address(); - let local_addr = format!("udp://{addr}"); + let local_addr = format!("udp://{server_socket_addr}"); let cookie_lifetime = cookie_lifetime.as_secs_f64(); @@ -167,17 +168,23 @@ impl Launcher { } }; + let client_socket_addr = req.from; + if let Some(udp_server_stats_event_sender) = udp_tracker_server_container.udp_server_stats_event_sender.as_deref() { match req.from.ip() { IpAddr::V4(_) => { udp_server_stats_event_sender - .send_event(statistics::event::Event::Udp4IncomingRequest) + .send_event(statistics::event::Event::Udp4IncomingRequest { + context: ConnectionContext::new(client_socket_addr, server_socket_addr), + }) .await; } IpAddr::V6(_) => { udp_server_stats_event_sender - .send_event(statistics::event::Event::Udp6IncomingRequest) + .send_event(statistics::event::Event::Udp6IncomingRequest { + context: ConnectionContext::new(client_socket_addr, server_socket_addr), + }) .await; } } @@ -190,7 +197,9 @@ impl Launcher { udp_tracker_server_container.udp_server_stats_event_sender.as_deref() { udp_server_stats_event_sender - .send_event(statistics::event::Event::UdpRequestBanned) + .send_event(statistics::event::Event::UdpRequestBanned { + context: ConnectionContext::new(client_socket_addr, server_socket_addr), + }) .await; } @@ -230,7 +239,9 @@ impl Launcher { udp_tracker_server_container.udp_server_stats_event_sender.as_deref() { udp_server_stats_event_sender - .send_event(statistics::event::Event::UdpRequestAborted) + .send_event(statistics::event::Event::UdpRequestAborted { + context: ConnectionContext::new(client_socket_addr, server_socket_addr), + }) .await; } } diff --git a/packages/udp-tracker-server/src/server/processor.rs b/packages/udp-tracker-server/src/server/processor.rs index 52188c4c2..999d74d00 100644 --- a/packages/udp-tracker-server/src/server/processor.rs +++ b/packages/udp-tracker-server/src/server/processor.rs @@ -12,6 +12,7 @@ use tracing::{instrument, Level}; use super::bound_socket::BoundSocket; use crate::container::UdpTrackerServerContainer; use crate::handlers::CookieTimeValues; +use crate::statistics::event::ConnectionContext; use crate::{handlers, statistics, RawRequest}; pub struct Processor { @@ -38,7 +39,7 @@ impl Processor { #[instrument(skip(self, request))] pub async fn process_request(self, request: RawRequest) { - let from = request.from; + let client_socket_addr = request.from; let start_time = Instant::now(); @@ -53,11 +54,11 @@ impl Processor { let elapsed_time = start_time.elapsed(); - self.send_response(from, response, elapsed_time).await; + self.send_response(client_socket_addr, response, elapsed_time).await; } #[instrument(skip(self))] - async fn send_response(self, target: SocketAddr, response: Response, req_processing_time: Duration) { + async fn send_response(self, client_socket_addr: SocketAddr, response: Response, req_processing_time: Duration) { tracing::debug!("send response"); let response_type = match &response { @@ -88,7 +89,7 @@ impl Processor { let bytes_count = writer.get_ref().len(); let payload = writer.get_ref(); - let () = match self.send_packet(&target, payload).await { + let () = match self.send_packet(&client_socket_addr, payload).await { Ok(sent_bytes) => { if tracing::event_enabled!(Level::TRACE) { tracing::debug!(%bytes_count, %sent_bytes, ?payload, "sent {response_type}"); @@ -99,10 +100,11 @@ impl Processor { if let Some(udp_server_stats_event_sender) = self.udp_tracker_server_container.udp_server_stats_event_sender.as_deref() { - match target.ip() { + match client_socket_addr.ip() { IpAddr::V4(_) => { udp_server_stats_event_sender .send_event(statistics::event::Event::Udp4Response { + context: ConnectionContext::new(client_socket_addr, self.socket.address()), kind: udp_response_kind, req_processing_time, }) @@ -111,6 +113,7 @@ impl Processor { IpAddr::V6(_) => { udp_server_stats_event_sender .send_event(statistics::event::Event::Udp6Response { + context: ConnectionContext::new(client_socket_addr, self.socket.address()), kind: udp_response_kind, req_processing_time, }) diff --git a/packages/udp-tracker-server/src/statistics/event/handler.rs b/packages/udp-tracker-server/src/statistics/event/handler.rs index 7c7e4a8e7..bda07f678 100644 --- a/packages/udp-tracker-server/src/statistics/event/handler.rs +++ b/packages/udp-tracker-server/src/statistics/event/handler.rs @@ -1,85 +1,161 @@ use crate::statistics::event::{Event, UdpRequestKind, UdpResponseKind}; use crate::statistics::repository::Repository; +/// # Panics +/// +/// This function panics if the client IP version does not match the expected +/// version. +#[allow(clippy::too_many_lines)] pub async fn handle_event(event: Event, stats_repository: &Repository) { match event { // UDP - Event::UdpRequestAborted => { + Event::UdpRequestAborted { .. } => { stats_repository.increase_udp_requests_aborted().await; } - Event::UdpRequestBanned => { + Event::UdpRequestBanned { .. } => { stats_repository.increase_udp_requests_banned().await; } // UDP4 - Event::Udp4IncomingRequest => { - stats_repository.increase_udp4_requests().await; + Event::Udp4IncomingRequest { context } => { + if context.client_socket_addr.is_ipv4() { + stats_repository.increase_udp4_requests().await; + } else { + panic!("Client IP version does not match the expected version IPv4 for incoming request"); + } } - Event::Udp4Request { kind } => match kind { + Event::Udp4Request { context, kind } => match kind { UdpRequestKind::Connect => { - stats_repository.increase_udp4_connections().await; + if context.client_socket_addr.is_ipv4() { + stats_repository.increase_udp4_connections().await; + } else { + panic!("Client IP version does not match the expected version IPv4 for connect request"); + } } UdpRequestKind::Announce => { - stats_repository.increase_udp4_announces().await; + if context.client_socket_addr.is_ipv4() { + stats_repository.increase_udp4_announces().await; + } else { + panic!("Client IP version does not match the expected version IPv4 for announce request"); + } } UdpRequestKind::Scrape => { - stats_repository.increase_udp4_scrapes().await; + if context.client_socket_addr.is_ipv4() { + stats_repository.increase_udp4_scrapes().await; + } else { + panic!("Client IP version does not match the expected version IPv4 for scrape request"); + } } }, Event::Udp4Response { + context, kind, req_processing_time, } => { - stats_repository.increase_udp4_responses().await; - - match kind { - UdpResponseKind::Ok { req_kind } => match req_kind { - UdpRequestKind::Connect => { - stats_repository - .recalculate_udp_avg_connect_processing_time_ns(req_processing_time) - .await; - } - UdpRequestKind::Announce => { - stats_repository - .recalculate_udp_avg_announce_processing_time_ns(req_processing_time) - .await; - } - UdpRequestKind::Scrape => { - stats_repository - .recalculate_udp_avg_scrape_processing_time_ns(req_processing_time) - .await; - } - }, - UdpResponseKind::Error => {} + if context.client_socket_addr.is_ipv4() { + stats_repository.increase_udp4_responses().await; + + match kind { + UdpResponseKind::Ok { req_kind } => match req_kind { + UdpRequestKind::Connect => { + stats_repository + .recalculate_udp_avg_connect_processing_time_ns(req_processing_time) + .await; + } + UdpRequestKind::Announce => { + stats_repository + .recalculate_udp_avg_announce_processing_time_ns(req_processing_time) + .await; + } + UdpRequestKind::Scrape => { + stats_repository + .recalculate_udp_avg_scrape_processing_time_ns(req_processing_time) + .await; + } + }, + UdpResponseKind::Error => {} + } + } else { + panic!("Client IP version does not match the expected version IPv4 for response"); } } - Event::Udp4Error => { - stats_repository.increase_udp4_errors().await; + Event::Udp4Error { context } => { + if context.client_socket_addr.is_ipv4() { + stats_repository.increase_udp4_errors().await; + } else { + panic!("Client IP version does not match the expected version IPv4 for error"); + } } // UDP6 - Event::Udp6IncomingRequest => { - stats_repository.increase_udp6_requests().await; + Event::Udp6IncomingRequest { context } => { + if context.client_socket_addr.is_ipv6() { + stats_repository.increase_udp6_requests().await; + } else { + panic!("Client IP version does not match the expected version IPv6 for incoming request"); + } } - Event::Udp6Request { kind } => match kind { + Event::Udp6Request { context, kind } => match kind { UdpRequestKind::Connect => { - stats_repository.increase_udp6_connections().await; + if context.client_socket_addr.is_ipv6() { + stats_repository.increase_udp6_connections().await; + } else { + panic!("Client IP version does not match the expected version IPv6 for connect request"); + } } UdpRequestKind::Announce => { - stats_repository.increase_udp6_announces().await; + if context.client_socket_addr.is_ipv6() { + stats_repository.increase_udp6_announces().await; + } else { + panic!("Client IP version does not match the expected version IPv6 for announce request"); + } } UdpRequestKind::Scrape => { - stats_repository.increase_udp6_scrapes().await; + if context.client_socket_addr.is_ipv6() { + stats_repository.increase_udp6_scrapes().await; + } else { + panic!("Client IP version does not match the expected version IPv6 for scrape request"); + } } }, Event::Udp6Response { - kind: _, - req_processing_time: _, + context, + kind, + req_processing_time, } => { - stats_repository.increase_udp6_responses().await; + if context.client_socket_addr.is_ipv6() { + stats_repository.increase_udp6_responses().await; + + match kind { + UdpResponseKind::Ok { req_kind } => match req_kind { + UdpRequestKind::Connect => { + stats_repository + .recalculate_udp_avg_connect_processing_time_ns(req_processing_time) + .await; + } + UdpRequestKind::Announce => { + stats_repository + .recalculate_udp_avg_announce_processing_time_ns(req_processing_time) + .await; + } + UdpRequestKind::Scrape => { + stats_repository + .recalculate_udp_avg_scrape_processing_time_ns(req_processing_time) + .await; + } + }, + UdpResponseKind::Error => {} + } + } else { + panic!("Client IP version does not match the expected version IPv6 for response"); + } } - Event::Udp6Error => { - stats_repository.increase_udp6_errors().await; + Event::Udp6Error { context } => { + if context.client_socket_addr.is_ipv6() { + stats_repository.increase_udp6_errors().await; + } else { + panic!("Client IP version does not match the expected version IPv6 for error"); + } } } @@ -88,15 +164,26 @@ pub async fn handle_event(event: Event, stats_repository: &Repository) { #[cfg(test)] mod tests { + use std::net::{IpAddr, Ipv4Addr, Ipv6Addr, SocketAddr}; + use crate::statistics::event::handler::handle_event; - use crate::statistics::event::{Event, UdpRequestKind}; + use crate::statistics::event::{ConnectionContext, Event, UdpRequestKind}; use crate::statistics::repository::Repository; #[tokio::test] async fn should_increase_the_number_of_aborted_requests_when_it_receives_a_udp_request_aborted_event() { let stats_repository = Repository::new(); - handle_event(Event::UdpRequestAborted, &stats_repository).await; + handle_event( + Event::UdpRequestAborted { + context: ConnectionContext::new( + SocketAddr::new(IpAddr::V4(Ipv4Addr::new(203, 0, 113, 195)), 8080), + SocketAddr::new(IpAddr::V4(Ipv4Addr::new(203, 0, 113, 196)), 6969), + ), + }, + &stats_repository, + ) + .await; let stats = stats_repository.get_stats().await; @@ -107,7 +194,16 @@ mod tests { async fn should_increase_the_number_of_banned_requests_when_it_receives_a_udp_request_banned_event() { let stats_repository = Repository::new(); - handle_event(Event::UdpRequestBanned, &stats_repository).await; + handle_event( + Event::UdpRequestBanned { + context: ConnectionContext::new( + SocketAddr::new(IpAddr::V4(Ipv4Addr::new(203, 0, 113, 195)), 8080), + SocketAddr::new(IpAddr::V4(Ipv4Addr::new(203, 0, 113, 196)), 6969), + ), + }, + &stats_repository, + ) + .await; let stats = stats_repository.get_stats().await; @@ -118,7 +214,16 @@ mod tests { async fn should_increase_the_number_of_incoming_requests_when_it_receives_a_udp4_incoming_request_event() { let stats_repository = Repository::new(); - handle_event(Event::Udp4IncomingRequest, &stats_repository).await; + handle_event( + Event::Udp4IncomingRequest { + context: ConnectionContext::new( + SocketAddr::new(IpAddr::V4(Ipv4Addr::new(203, 0, 113, 195)), 8080), + SocketAddr::new(IpAddr::V4(Ipv4Addr::new(203, 0, 113, 196)), 6969), + ), + }, + &stats_repository, + ) + .await; let stats = stats_repository.get_stats().await; @@ -129,7 +234,16 @@ mod tests { async fn should_increase_the_udp_abort_counter_when_it_receives_a_udp_abort_event() { let stats_repository = Repository::new(); - handle_event(Event::UdpRequestAborted, &stats_repository).await; + handle_event( + Event::UdpRequestAborted { + context: ConnectionContext::new( + SocketAddr::new(IpAddr::V4(Ipv4Addr::new(203, 0, 113, 195)), 8080), + SocketAddr::new(IpAddr::V4(Ipv4Addr::new(203, 0, 113, 196)), 6969), + ), + }, + &stats_repository, + ) + .await; let stats = stats_repository.get_stats().await; assert_eq!(stats.udp_requests_aborted, 1); } @@ -137,7 +251,16 @@ mod tests { async fn should_increase_the_udp_ban_counter_when_it_receives_a_udp_banned_event() { let stats_repository = Repository::new(); - handle_event(Event::UdpRequestBanned, &stats_repository).await; + handle_event( + Event::UdpRequestBanned { + context: ConnectionContext::new( + SocketAddr::new(IpAddr::V4(Ipv4Addr::new(203, 0, 113, 195)), 8080), + SocketAddr::new(IpAddr::V4(Ipv4Addr::new(203, 0, 113, 196)), 6969), + ), + }, + &stats_repository, + ) + .await; let stats = stats_repository.get_stats().await; assert_eq!(stats.udp_requests_banned, 1); } @@ -148,6 +271,10 @@ mod tests { handle_event( Event::Udp4Request { + context: ConnectionContext::new( + SocketAddr::new(IpAddr::V4(Ipv4Addr::new(203, 0, 113, 195)), 8080), + SocketAddr::new(IpAddr::V4(Ipv4Addr::new(203, 0, 113, 196)), 6969), + ), kind: crate::statistics::event::UdpRequestKind::Connect, }, &stats_repository, @@ -165,6 +292,10 @@ mod tests { handle_event( Event::Udp4Request { + context: ConnectionContext::new( + SocketAddr::new(IpAddr::V4(Ipv4Addr::new(203, 0, 113, 195)), 8080), + SocketAddr::new(IpAddr::V4(Ipv4Addr::new(203, 0, 113, 196)), 6969), + ), kind: crate::statistics::event::UdpRequestKind::Announce, }, &stats_repository, @@ -182,6 +313,10 @@ mod tests { handle_event( Event::Udp4Request { + context: ConnectionContext::new( + SocketAddr::new(IpAddr::V4(Ipv4Addr::new(203, 0, 113, 195)), 8080), + SocketAddr::new(IpAddr::V4(Ipv4Addr::new(203, 0, 113, 196)), 6969), + ), kind: crate::statistics::event::UdpRequestKind::Scrape, }, &stats_repository, @@ -199,6 +334,10 @@ mod tests { handle_event( Event::Udp4Response { + context: ConnectionContext::new( + SocketAddr::new(IpAddr::V4(Ipv4Addr::new(203, 0, 113, 195)), 8080), + SocketAddr::new(IpAddr::V4(Ipv4Addr::new(203, 0, 113, 196)), 6969), + ), kind: crate::statistics::event::UdpResponseKind::Ok { req_kind: UdpRequestKind::Announce, }, @@ -217,7 +356,16 @@ mod tests { async fn should_increase_the_udp4_errors_counter_when_it_receives_a_udp4_error_event() { let stats_repository = Repository::new(); - handle_event(Event::Udp4Error, &stats_repository).await; + handle_event( + Event::Udp4Error { + context: ConnectionContext::new( + SocketAddr::new(IpAddr::V4(Ipv4Addr::new(203, 0, 113, 195)), 8080), + SocketAddr::new(IpAddr::V4(Ipv4Addr::new(203, 0, 113, 196)), 6969), + ), + }, + &stats_repository, + ) + .await; let stats = stats_repository.get_stats().await; @@ -230,6 +378,10 @@ mod tests { handle_event( Event::Udp6Request { + context: ConnectionContext::new( + SocketAddr::new(IpAddr::V6(Ipv6Addr::new(0, 0, 0, 0, 203, 0, 113, 195)), 8080), + SocketAddr::new(IpAddr::V6(Ipv6Addr::new(0, 0, 0, 0, 203, 0, 113, 196)), 6969), + ), kind: crate::statistics::event::UdpRequestKind::Connect, }, &stats_repository, @@ -247,6 +399,10 @@ mod tests { handle_event( Event::Udp6Request { + context: ConnectionContext::new( + SocketAddr::new(IpAddr::V6(Ipv6Addr::new(0, 0, 0, 0, 203, 0, 113, 195)), 8080), + SocketAddr::new(IpAddr::V6(Ipv6Addr::new(0, 0, 0, 0, 203, 0, 113, 196)), 6969), + ), kind: crate::statistics::event::UdpRequestKind::Announce, }, &stats_repository, @@ -264,6 +420,10 @@ mod tests { handle_event( Event::Udp6Request { + context: ConnectionContext::new( + SocketAddr::new(IpAddr::V6(Ipv6Addr::new(0, 0, 0, 0, 203, 0, 113, 195)), 8080), + SocketAddr::new(IpAddr::V6(Ipv6Addr::new(0, 0, 0, 0, 203, 0, 113, 196)), 6969), + ), kind: crate::statistics::event::UdpRequestKind::Scrape, }, &stats_repository, @@ -281,6 +441,10 @@ mod tests { handle_event( Event::Udp6Response { + context: ConnectionContext::new( + SocketAddr::new(IpAddr::V6(Ipv6Addr::new(0, 0, 0, 0, 203, 0, 113, 195)), 8080), + SocketAddr::new(IpAddr::V6(Ipv6Addr::new(0, 0, 0, 0, 203, 0, 113, 196)), 6969), + ), kind: crate::statistics::event::UdpResponseKind::Ok { req_kind: UdpRequestKind::Announce, }, @@ -298,7 +462,16 @@ mod tests { async fn should_increase_the_udp6_errors_counter_when_it_receives_a_udp6_error_event() { let stats_repository = Repository::new(); - handle_event(Event::Udp6Error, &stats_repository).await; + handle_event( + Event::Udp6Error { + context: ConnectionContext::new( + SocketAddr::new(IpAddr::V6(Ipv6Addr::new(0, 0, 0, 0, 203, 0, 113, 195)), 8080), + SocketAddr::new(IpAddr::V6(Ipv6Addr::new(0, 0, 0, 0, 203, 0, 113, 196)), 6969), + ), + }, + &stats_repository, + ) + .await; let stats = stats_repository.get_stats().await; diff --git a/packages/udp-tracker-server/src/statistics/event/mod.rs b/packages/udp-tracker-server/src/statistics/event/mod.rs index 3b14806aa..64e2cb9c1 100644 --- a/packages/udp-tracker-server/src/statistics/event/mod.rs +++ b/packages/udp-tracker-server/src/statistics/event/mod.rs @@ -1,3 +1,4 @@ +use std::net::SocketAddr; use std::time::Duration; pub mod handler; @@ -6,40 +7,51 @@ pub mod sender; /// An statistics event. It is used to collect tracker metrics. /// -/// - `Tcp` prefix means the event was triggered by the HTTP tracker /// - `Udp` prefix means the event was triggered by the UDP tracker /// - `4` or `6` prefixes means the IP version used by the peer /// - Finally the event suffix is the type of request: `announce`, `scrape` or `connection` -/// -/// > NOTE: HTTP trackers do not use `connection` requests. #[derive(Debug, PartialEq, Eq)] pub enum Event { - // code-review: consider one single event for request type with data: Event::Announce { scheme: HTTPorUDP, ip_version: V4orV6 } - // Attributes are enums too. - UdpRequestAborted, - UdpRequestBanned, + UdpRequestAborted { + context: ConnectionContext, + }, + UdpRequestBanned { + context: ConnectionContext, + }, // UDP4 - Udp4IncomingRequest, + Udp4IncomingRequest { + context: ConnectionContext, + }, Udp4Request { + context: ConnectionContext, kind: UdpRequestKind, }, Udp4Response { + context: ConnectionContext, kind: UdpResponseKind, req_processing_time: Duration, }, - Udp4Error, + Udp4Error { + context: ConnectionContext, + }, // UDP6 - Udp6IncomingRequest, + Udp6IncomingRequest { + context: ConnectionContext, + }, Udp6Request { + context: ConnectionContext, kind: UdpRequestKind, }, Udp6Response { + context: ConnectionContext, kind: UdpResponseKind, req_processing_time: Duration, }, - Udp6Error, + Udp6Error { + context: ConnectionContext, + }, } #[derive(Debug, PartialEq, Eq)] @@ -54,3 +66,29 @@ pub enum UdpResponseKind { Ok { req_kind: UdpRequestKind }, Error, // todo: add the request kind `{ req_kind: Option(UdpRequestKind) }` when we know it. } + +#[derive(Debug, PartialEq, Eq)] +pub struct ConnectionContext { + client_socket_addr: SocketAddr, + server_socket_addr: SocketAddr, +} + +impl ConnectionContext { + #[must_use] + pub fn new(client_socket_addr: SocketAddr, server_socket_addr: SocketAddr) -> Self { + Self { + client_socket_addr, + server_socket_addr, + } + } + + #[must_use] + pub fn client_socket_addr(&self) -> SocketAddr { + self.client_socket_addr + } + + #[must_use] + pub fn server_socket_addr(&self) -> SocketAddr { + self.server_socket_addr + } +} diff --git a/packages/udp-tracker-server/src/statistics/keeper.rs b/packages/udp-tracker-server/src/statistics/keeper.rs index ae80e7970..a6e6dde70 100644 --- a/packages/udp-tracker-server/src/statistics/keeper.rs +++ b/packages/udp-tracker-server/src/statistics/keeper.rs @@ -51,7 +51,9 @@ impl Keeper { #[cfg(test)] mod tests { - use crate::statistics::event::Event; + use std::net::{IpAddr, Ipv6Addr, SocketAddr}; + + use crate::statistics::event::{ConnectionContext, Event}; use crate::statistics::keeper::Keeper; use crate::statistics::metrics::Metrics; @@ -70,7 +72,14 @@ mod tests { let event_sender = stats_tracker.run_event_listener(); - let result = event_sender.send_event(Event::Udp4IncomingRequest).await; + let result = event_sender + .send_event(Event::Udp4IncomingRequest { + context: ConnectionContext::new( + SocketAddr::new(IpAddr::V6(Ipv6Addr::new(0, 0, 0, 0, 203, 0, 113, 195)), 8080), + SocketAddr::new(IpAddr::V6(Ipv6Addr::new(0, 0, 0, 0, 203, 0, 113, 196)), 6969), + ), + }) + .await; assert!(result.is_some()); } From 203a1b45e4e34103b9788393533e79613aab3dfa Mon Sep 17 00:00:00 2001 From: Jose Celano Date: Tue, 18 Mar 2025 10:36:50 +0000 Subject: [PATCH 393/802] refactor: [#1382] merge UDP server stats events with different IP version --- .../src/handlers/announce.rs | 30 +-- .../src/handlers/connect.rs | 30 +-- .../udp-tracker-server/src/handlers/error.rs | 21 +- .../udp-tracker-server/src/handlers/scrape.rs | 30 +-- .../udp-tracker-server/src/server/launcher.rs | 23 +- .../src/server/processor.rs | 29 +-- .../src/statistics/event/handler.rs | 200 ++++++------------ .../src/statistics/event/mod.rs | 35 +-- .../src/statistics/keeper.rs | 2 +- 9 files changed, 120 insertions(+), 280 deletions(-) diff --git a/packages/udp-tracker-server/src/handlers/announce.rs b/packages/udp-tracker-server/src/handlers/announce.rs index 41e40695d..6b5cbb42b 100644 --- a/packages/udp-tracker-server/src/handlers/announce.rs +++ b/packages/udp-tracker-server/src/handlers/announce.rs @@ -41,24 +41,12 @@ pub async fn handle_announce( tracing::trace!("handle announce"); if let Some(udp_server_stats_event_sender) = opt_udp_server_stats_event_sender.as_deref() { - match client_socket_addr.ip() { - IpAddr::V4(_) => { - udp_server_stats_event_sender - .send_event(server_statistics::event::Event::Udp4Request { - context: ConnectionContext::new(client_socket_addr, server_socket_addr), - kind: UdpRequestKind::Announce, - }) - .await; - } - IpAddr::V6(_) => { - udp_server_stats_event_sender - .send_event(server_statistics::event::Event::Udp6Request { - context: ConnectionContext::new(client_socket_addr, server_socket_addr), - kind: UdpRequestKind::Announce, - }) - .await; - } - } + udp_server_stats_event_sender + .send_event(server_statistics::event::Event::UdpRequest { + context: ConnectionContext::new(client_socket_addr, server_socket_addr), + kind: UdpRequestKind::Announce, + }) + .await; } let announce_data = announce_service @@ -437,7 +425,7 @@ mod tests { let mut udp_server_stats_event_sender_mock = MockUdpServerStatsEventSender::new(); udp_server_stats_event_sender_mock .expect_send_event() - .with(eq(server_statistics::event::Event::Udp4Request { + .with(eq(server_statistics::event::Event::UdpRequest { context: server_statistics::event::ConnectionContext::new(client_socket_addr, server_socket_addr), kind: UdpRequestKind::Announce, })) @@ -780,7 +768,7 @@ mod tests { let mut udp_server_stats_event_sender_mock = MockUdpServerStatsEventSender::new(); udp_server_stats_event_sender_mock .expect_send_event() - .with(eq(server_statistics::event::Event::Udp6Request { + .with(eq(server_statistics::event::Event::UdpRequest { context: server_statistics::event::ConnectionContext::new(client_socket_addr, server_socket_addr), kind: UdpRequestKind::Announce, })) @@ -873,7 +861,7 @@ mod tests { let mut udp_server_stats_event_sender_mock = MockUdpServerStatsEventSender::new(); udp_server_stats_event_sender_mock .expect_send_event() - .with(eq(server_statistics::event::Event::Udp6Request { + .with(eq(server_statistics::event::Event::UdpRequest { context: server_statistics::event::ConnectionContext::new(client_socket_addr, server_socket_addr), kind: UdpRequestKind::Announce, })) diff --git a/packages/udp-tracker-server/src/handlers/connect.rs b/packages/udp-tracker-server/src/handlers/connect.rs index 3e0012d7d..7d96f4cbd 100644 --- a/packages/udp-tracker-server/src/handlers/connect.rs +++ b/packages/udp-tracker-server/src/handlers/connect.rs @@ -1,5 +1,5 @@ //! UDP tracker connect handler. -use std::net::{IpAddr, SocketAddr}; +use std::net::SocketAddr; use std::sync::Arc; use aquatic_udp_protocol::{ConnectRequest, ConnectResponse, ConnectionId, Response}; @@ -23,24 +23,12 @@ pub async fn handle_connect( tracing::trace!("handle connect"); if let Some(udp_server_stats_event_sender) = opt_udp_server_stats_event_sender.as_deref() { - match client_socket_addr.ip() { - IpAddr::V4(_) => { - udp_server_stats_event_sender - .send_event(server_statistics::event::Event::Udp4Request { - context: ConnectionContext::new(client_socket_addr, server_socket_addr), - kind: UdpRequestKind::Connect, - }) - .await; - } - IpAddr::V6(_) => { - udp_server_stats_event_sender - .send_event(server_statistics::event::Event::Udp6Request { - context: ConnectionContext::new(client_socket_addr, server_socket_addr), - kind: UdpRequestKind::Connect, - }) - .await; - } - } + udp_server_stats_event_sender + .send_event(server_statistics::event::Event::UdpRequest { + context: ConnectionContext::new(client_socket_addr, server_socket_addr), + kind: UdpRequestKind::Connect, + }) + .await; } let connection_id = connect_service @@ -215,7 +203,7 @@ mod tests { let mut udp_server_stats_event_sender_mock = MockUdpServerStatsEventSender::new(); udp_server_stats_event_sender_mock .expect_send_event() - .with(eq(server_statistics::event::Event::Udp4Request { + .with(eq(server_statistics::event::Event::UdpRequest { context: server_statistics::event::ConnectionContext::new(client_socket_addr, server_socket_addr), kind: UdpRequestKind::Connect, })) @@ -256,7 +244,7 @@ mod tests { let mut udp_server_stats_event_sender_mock = MockUdpServerStatsEventSender::new(); udp_server_stats_event_sender_mock .expect_send_event() - .with(eq(server_statistics::event::Event::Udp6Request { + .with(eq(server_statistics::event::Event::UdpRequest { context: server_statistics::event::ConnectionContext::new(client_socket_addr, server_socket_addr), kind: UdpRequestKind::Connect, })) diff --git a/packages/udp-tracker-server/src/handlers/error.rs b/packages/udp-tracker-server/src/handlers/error.rs index df553be9f..cb341bc5c 100644 --- a/packages/udp-tracker-server/src/handlers/error.rs +++ b/packages/udp-tracker-server/src/handlers/error.rs @@ -58,22 +58,11 @@ pub async fn handle_error( if e.1.is_some() { if let Some(udp_server_stats_event_sender) = opt_udp_server_stats_event_sender.as_deref() { - match client_socket_addr { - SocketAddr::V4(_) => { - udp_server_stats_event_sender - .send_event(server_statistics::event::Event::Udp4Error { - context: ConnectionContext::new(client_socket_addr, server_socket_addr), - }) - .await; - } - SocketAddr::V6(_) => { - udp_server_stats_event_sender - .send_event(server_statistics::event::Event::Udp6Error { - context: ConnectionContext::new(client_socket_addr, server_socket_addr), - }) - .await; - } - } + udp_server_stats_event_sender + .send_event(server_statistics::event::Event::UdpError { + context: ConnectionContext::new(client_socket_addr, server_socket_addr), + }) + .await; } } diff --git a/packages/udp-tracker-server/src/handlers/scrape.rs b/packages/udp-tracker-server/src/handlers/scrape.rs index 5f33f55ad..7597c9b8e 100644 --- a/packages/udp-tracker-server/src/handlers/scrape.rs +++ b/packages/udp-tracker-server/src/handlers/scrape.rs @@ -1,5 +1,5 @@ //! UDP tracker scrape handler. -use std::net::{IpAddr, SocketAddr}; +use std::net::SocketAddr; use std::ops::Range; use std::sync::Arc; @@ -37,24 +37,12 @@ pub async fn handle_scrape( tracing::trace!("handle scrape"); if let Some(udp_server_stats_event_sender) = opt_udp_server_stats_event_sender.as_deref() { - match client_socket_addr.ip() { - IpAddr::V4(_) => { - udp_server_stats_event_sender - .send_event(server_statistics::event::Event::Udp4Request { - context: ConnectionContext::new(client_socket_addr, server_socket_addr), - kind: UdpRequestKind::Scrape, - }) - .await; - } - IpAddr::V6(_) => { - udp_server_stats_event_sender - .send_event(server_statistics::event::Event::Udp6Request { - context: ConnectionContext::new(client_socket_addr, server_socket_addr), - kind: UdpRequestKind::Scrape, - }) - .await; - } - } + udp_server_stats_event_sender + .send_event(server_statistics::event::Event::UdpRequest { + context: ConnectionContext::new(client_socket_addr, server_socket_addr), + kind: UdpRequestKind::Scrape, + }) + .await; } let scrape_data = scrape_service @@ -380,7 +368,7 @@ mod tests { let mut udp_server_stats_event_sender_mock = MockUdpServerStatsEventSender::new(); udp_server_stats_event_sender_mock .expect_send_event() - .with(eq(server_statistics::event::Event::Udp4Request { + .with(eq(server_statistics::event::Event::UdpRequest { context: ConnectionContext::new(client_socket_addr, server_socket_addr), kind: server_statistics::event::UdpRequestKind::Scrape, })) @@ -429,7 +417,7 @@ mod tests { let mut udp_server_stats_event_sender_mock = MockUdpServerStatsEventSender::new(); udp_server_stats_event_sender_mock .expect_send_event() - .with(eq(server_statistics::event::Event::Udp6Request { + .with(eq(server_statistics::event::Event::UdpRequest { context: ConnectionContext::new(client_socket_addr, server_socket_addr), kind: server_statistics::event::UdpRequestKind::Scrape, })) diff --git a/packages/udp-tracker-server/src/server/launcher.rs b/packages/udp-tracker-server/src/server/launcher.rs index 0dfbba174..a3da6a2a8 100644 --- a/packages/udp-tracker-server/src/server/launcher.rs +++ b/packages/udp-tracker-server/src/server/launcher.rs @@ -1,4 +1,4 @@ -use std::net::{IpAddr, SocketAddr}; +use std::net::SocketAddr; use std::sync::Arc; use std::time::Duration; @@ -172,22 +172,11 @@ impl Launcher { if let Some(udp_server_stats_event_sender) = udp_tracker_server_container.udp_server_stats_event_sender.as_deref() { - match req.from.ip() { - IpAddr::V4(_) => { - udp_server_stats_event_sender - .send_event(statistics::event::Event::Udp4IncomingRequest { - context: ConnectionContext::new(client_socket_addr, server_socket_addr), - }) - .await; - } - IpAddr::V6(_) => { - udp_server_stats_event_sender - .send_event(statistics::event::Event::Udp6IncomingRequest { - context: ConnectionContext::new(client_socket_addr, server_socket_addr), - }) - .await; - } - } + udp_server_stats_event_sender + .send_event(statistics::event::Event::UdpIncomingRequest { + context: ConnectionContext::new(client_socket_addr, server_socket_addr), + }) + .await; } if udp_tracker_core_container.ban_service.read().await.is_banned(&req.from.ip()) { diff --git a/packages/udp-tracker-server/src/server/processor.rs b/packages/udp-tracker-server/src/server/processor.rs index 999d74d00..acf8e8ae3 100644 --- a/packages/udp-tracker-server/src/server/processor.rs +++ b/packages/udp-tracker-server/src/server/processor.rs @@ -1,5 +1,5 @@ use std::io::Cursor; -use std::net::{IpAddr, SocketAddr}; +use std::net::SocketAddr; use std::sync::Arc; use std::time::Duration; @@ -100,26 +100,13 @@ impl Processor { if let Some(udp_server_stats_event_sender) = self.udp_tracker_server_container.udp_server_stats_event_sender.as_deref() { - match client_socket_addr.ip() { - IpAddr::V4(_) => { - udp_server_stats_event_sender - .send_event(statistics::event::Event::Udp4Response { - context: ConnectionContext::new(client_socket_addr, self.socket.address()), - kind: udp_response_kind, - req_processing_time, - }) - .await; - } - IpAddr::V6(_) => { - udp_server_stats_event_sender - .send_event(statistics::event::Event::Udp6Response { - context: ConnectionContext::new(client_socket_addr, self.socket.address()), - kind: udp_response_kind, - req_processing_time, - }) - .await; - } - } + udp_server_stats_event_sender + .send_event(statistics::event::Event::UdpResponse { + context: ConnectionContext::new(client_socket_addr, self.socket.address()), + kind: udp_response_kind, + req_processing_time, + }) + .await; } } Err(error) => tracing::warn!(%bytes_count, %error, ?payload, "failed to send"), diff --git a/packages/udp-tracker-server/src/statistics/event/handler.rs b/packages/udp-tracker-server/src/statistics/event/handler.rs index bda07f678..5200561c7 100644 --- a/packages/udp-tracker-server/src/statistics/event/handler.rs +++ b/packages/udp-tracker-server/src/statistics/event/handler.rs @@ -8,155 +8,89 @@ use crate::statistics::repository::Repository; #[allow(clippy::too_many_lines)] pub async fn handle_event(event: Event, stats_repository: &Repository) { match event { - // UDP Event::UdpRequestAborted { .. } => { stats_repository.increase_udp_requests_aborted().await; } Event::UdpRequestBanned { .. } => { stats_repository.increase_udp_requests_banned().await; } - - // UDP4 - Event::Udp4IncomingRequest { context } => { - if context.client_socket_addr.is_ipv4() { + Event::UdpIncomingRequest { context } => match context.client_socket_addr().ip() { + std::net::IpAddr::V4(_) => { stats_repository.increase_udp4_requests().await; - } else { - panic!("Client IP version does not match the expected version IPv4 for incoming request"); - } - } - Event::Udp4Request { context, kind } => match kind { - UdpRequestKind::Connect => { - if context.client_socket_addr.is_ipv4() { - stats_repository.increase_udp4_connections().await; - } else { - panic!("Client IP version does not match the expected version IPv4 for connect request"); - } } - UdpRequestKind::Announce => { - if context.client_socket_addr.is_ipv4() { - stats_repository.increase_udp4_announces().await; - } else { - panic!("Client IP version does not match the expected version IPv4 for announce request"); - } - } - UdpRequestKind::Scrape => { - if context.client_socket_addr.is_ipv4() { - stats_repository.increase_udp4_scrapes().await; - } else { - panic!("Client IP version does not match the expected version IPv4 for scrape request"); - } + std::net::IpAddr::V6(_) => { + stats_repository.increase_udp6_requests().await; } }, - Event::Udp4Response { - context, - kind, - req_processing_time, - } => { - if context.client_socket_addr.is_ipv4() { - stats_repository.increase_udp4_responses().await; - - match kind { - UdpResponseKind::Ok { req_kind } => match req_kind { - UdpRequestKind::Connect => { - stats_repository - .recalculate_udp_avg_connect_processing_time_ns(req_processing_time) - .await; - } - UdpRequestKind::Announce => { - stats_repository - .recalculate_udp_avg_announce_processing_time_ns(req_processing_time) - .await; - } - UdpRequestKind::Scrape => { - stats_repository - .recalculate_udp_avg_scrape_processing_time_ns(req_processing_time) - .await; - } - }, - UdpResponseKind::Error => {} + Event::UdpRequest { context, kind } => match kind { + UdpRequestKind::Connect => match context.client_socket_addr().ip() { + std::net::IpAddr::V4(_) => { + stats_repository.increase_udp4_connections().await; } - } else { - panic!("Client IP version does not match the expected version IPv4 for response"); - } - } - Event::Udp4Error { context } => { - if context.client_socket_addr.is_ipv4() { - stats_repository.increase_udp4_errors().await; - } else { - panic!("Client IP version does not match the expected version IPv4 for error"); - } - } - - // UDP6 - Event::Udp6IncomingRequest { context } => { - if context.client_socket_addr.is_ipv6() { - stats_repository.increase_udp6_requests().await; - } else { - panic!("Client IP version does not match the expected version IPv6 for incoming request"); - } - } - Event::Udp6Request { context, kind } => match kind { - UdpRequestKind::Connect => { - if context.client_socket_addr.is_ipv6() { + std::net::IpAddr::V6(_) => { stats_repository.increase_udp6_connections().await; - } else { - panic!("Client IP version does not match the expected version IPv6 for connect request"); } - } - UdpRequestKind::Announce => { - if context.client_socket_addr.is_ipv6() { + }, + UdpRequestKind::Announce => match context.client_socket_addr().ip() { + std::net::IpAddr::V4(_) => { + stats_repository.increase_udp4_announces().await; + } + std::net::IpAddr::V6(_) => { stats_repository.increase_udp6_announces().await; - } else { - panic!("Client IP version does not match the expected version IPv6 for announce request"); } - } - UdpRequestKind::Scrape => { - if context.client_socket_addr.is_ipv6() { + }, + UdpRequestKind::Scrape => match context.client_socket_addr().ip() { + std::net::IpAddr::V4(_) => { + stats_repository.increase_udp4_scrapes().await; + } + std::net::IpAddr::V6(_) => { stats_repository.increase_udp6_scrapes().await; - } else { - panic!("Client IP version does not match the expected version IPv6 for scrape request"); } - } + }, }, - Event::Udp6Response { + Event::UdpResponse { context, kind, req_processing_time, } => { - if context.client_socket_addr.is_ipv6() { - stats_repository.increase_udp6_responses().await; - - match kind { - UdpResponseKind::Ok { req_kind } => match req_kind { - UdpRequestKind::Connect => { - stats_repository - .recalculate_udp_avg_connect_processing_time_ns(req_processing_time) - .await; - } - UdpRequestKind::Announce => { - stats_repository - .recalculate_udp_avg_announce_processing_time_ns(req_processing_time) - .await; - } - UdpRequestKind::Scrape => { - stats_repository - .recalculate_udp_avg_scrape_processing_time_ns(req_processing_time) - .await; - } - }, - UdpResponseKind::Error => {} + match context.client_socket_addr().ip() { + std::net::IpAddr::V4(_) => { + stats_repository.increase_udp4_responses().await; + } + std::net::IpAddr::V6(_) => { + stats_repository.increase_udp6_responses().await; } - } else { - panic!("Client IP version does not match the expected version IPv6 for response"); + } + + match kind { + UdpResponseKind::Ok { req_kind } => match req_kind { + UdpRequestKind::Connect => { + stats_repository + .recalculate_udp_avg_connect_processing_time_ns(req_processing_time) + .await; + } + UdpRequestKind::Announce => { + stats_repository + .recalculate_udp_avg_announce_processing_time_ns(req_processing_time) + .await; + } + UdpRequestKind::Scrape => { + stats_repository + .recalculate_udp_avg_scrape_processing_time_ns(req_processing_time) + .await; + } + }, + UdpResponseKind::Error => {} } } - Event::Udp6Error { context } => { - if context.client_socket_addr.is_ipv6() { + Event::UdpError { context } => match context.client_socket_addr().ip() { + std::net::IpAddr::V4(_) => { + stats_repository.increase_udp4_errors().await; + } + std::net::IpAddr::V6(_) => { stats_repository.increase_udp6_errors().await; - } else { - panic!("Client IP version does not match the expected version IPv6 for error"); } - } + }, } tracing::debug!("stats: {:?}", stats_repository.get_stats().await); @@ -215,7 +149,7 @@ mod tests { let stats_repository = Repository::new(); handle_event( - Event::Udp4IncomingRequest { + Event::UdpIncomingRequest { context: ConnectionContext::new( SocketAddr::new(IpAddr::V4(Ipv4Addr::new(203, 0, 113, 195)), 8080), SocketAddr::new(IpAddr::V4(Ipv4Addr::new(203, 0, 113, 196)), 6969), @@ -270,7 +204,7 @@ mod tests { let stats_repository = Repository::new(); handle_event( - Event::Udp4Request { + Event::UdpRequest { context: ConnectionContext::new( SocketAddr::new(IpAddr::V4(Ipv4Addr::new(203, 0, 113, 195)), 8080), SocketAddr::new(IpAddr::V4(Ipv4Addr::new(203, 0, 113, 196)), 6969), @@ -291,7 +225,7 @@ mod tests { let stats_repository = Repository::new(); handle_event( - Event::Udp4Request { + Event::UdpRequest { context: ConnectionContext::new( SocketAddr::new(IpAddr::V4(Ipv4Addr::new(203, 0, 113, 195)), 8080), SocketAddr::new(IpAddr::V4(Ipv4Addr::new(203, 0, 113, 196)), 6969), @@ -312,7 +246,7 @@ mod tests { let stats_repository = Repository::new(); handle_event( - Event::Udp4Request { + Event::UdpRequest { context: ConnectionContext::new( SocketAddr::new(IpAddr::V4(Ipv4Addr::new(203, 0, 113, 195)), 8080), SocketAddr::new(IpAddr::V4(Ipv4Addr::new(203, 0, 113, 196)), 6969), @@ -333,7 +267,7 @@ mod tests { let stats_repository = Repository::new(); handle_event( - Event::Udp4Response { + Event::UdpResponse { context: ConnectionContext::new( SocketAddr::new(IpAddr::V4(Ipv4Addr::new(203, 0, 113, 195)), 8080), SocketAddr::new(IpAddr::V4(Ipv4Addr::new(203, 0, 113, 196)), 6969), @@ -357,7 +291,7 @@ mod tests { let stats_repository = Repository::new(); handle_event( - Event::Udp4Error { + Event::UdpError { context: ConnectionContext::new( SocketAddr::new(IpAddr::V4(Ipv4Addr::new(203, 0, 113, 195)), 8080), SocketAddr::new(IpAddr::V4(Ipv4Addr::new(203, 0, 113, 196)), 6969), @@ -377,7 +311,7 @@ mod tests { let stats_repository = Repository::new(); handle_event( - Event::Udp6Request { + Event::UdpRequest { context: ConnectionContext::new( SocketAddr::new(IpAddr::V6(Ipv6Addr::new(0, 0, 0, 0, 203, 0, 113, 195)), 8080), SocketAddr::new(IpAddr::V6(Ipv6Addr::new(0, 0, 0, 0, 203, 0, 113, 196)), 6969), @@ -398,7 +332,7 @@ mod tests { let stats_repository = Repository::new(); handle_event( - Event::Udp6Request { + Event::UdpRequest { context: ConnectionContext::new( SocketAddr::new(IpAddr::V6(Ipv6Addr::new(0, 0, 0, 0, 203, 0, 113, 195)), 8080), SocketAddr::new(IpAddr::V6(Ipv6Addr::new(0, 0, 0, 0, 203, 0, 113, 196)), 6969), @@ -419,7 +353,7 @@ mod tests { let stats_repository = Repository::new(); handle_event( - Event::Udp6Request { + Event::UdpRequest { context: ConnectionContext::new( SocketAddr::new(IpAddr::V6(Ipv6Addr::new(0, 0, 0, 0, 203, 0, 113, 195)), 8080), SocketAddr::new(IpAddr::V6(Ipv6Addr::new(0, 0, 0, 0, 203, 0, 113, 196)), 6969), @@ -440,7 +374,7 @@ mod tests { let stats_repository = Repository::new(); handle_event( - Event::Udp6Response { + Event::UdpResponse { context: ConnectionContext::new( SocketAddr::new(IpAddr::V6(Ipv6Addr::new(0, 0, 0, 0, 203, 0, 113, 195)), 8080), SocketAddr::new(IpAddr::V6(Ipv6Addr::new(0, 0, 0, 0, 203, 0, 113, 196)), 6969), @@ -463,7 +397,7 @@ mod tests { let stats_repository = Repository::new(); handle_event( - Event::Udp6Error { + Event::UdpError { context: ConnectionContext::new( SocketAddr::new(IpAddr::V6(Ipv6Addr::new(0, 0, 0, 0, 203, 0, 113, 195)), 8080), SocketAddr::new(IpAddr::V6(Ipv6Addr::new(0, 0, 0, 0, 203, 0, 113, 196)), 6969), diff --git a/packages/udp-tracker-server/src/statistics/event/mod.rs b/packages/udp-tracker-server/src/statistics/event/mod.rs index 64e2cb9c1..b22cd455d 100644 --- a/packages/udp-tracker-server/src/statistics/event/mod.rs +++ b/packages/udp-tracker-server/src/statistics/event/mod.rs @@ -6,50 +6,27 @@ pub mod listener; pub mod sender; /// An statistics event. It is used to collect tracker metrics. -/// -/// - `Udp` prefix means the event was triggered by the UDP tracker -/// - `4` or `6` prefixes means the IP version used by the peer -/// - Finally the event suffix is the type of request: `announce`, `scrape` or `connection` #[derive(Debug, PartialEq, Eq)] pub enum Event { - UdpRequestAborted { + UdpIncomingRequest { context: ConnectionContext, }, - UdpRequestBanned { - context: ConnectionContext, - }, - - // UDP4 - Udp4IncomingRequest { - context: ConnectionContext, - }, - Udp4Request { - context: ConnectionContext, - kind: UdpRequestKind, - }, - Udp4Response { - context: ConnectionContext, - kind: UdpResponseKind, - req_processing_time: Duration, - }, - Udp4Error { + UdpRequestAborted { context: ConnectionContext, }, - - // UDP6 - Udp6IncomingRequest { + UdpRequestBanned { context: ConnectionContext, }, - Udp6Request { + UdpRequest { context: ConnectionContext, kind: UdpRequestKind, }, - Udp6Response { + UdpResponse { context: ConnectionContext, kind: UdpResponseKind, req_processing_time: Duration, }, - Udp6Error { + UdpError { context: ConnectionContext, }, } diff --git a/packages/udp-tracker-server/src/statistics/keeper.rs b/packages/udp-tracker-server/src/statistics/keeper.rs index a6e6dde70..c29dcb1b2 100644 --- a/packages/udp-tracker-server/src/statistics/keeper.rs +++ b/packages/udp-tracker-server/src/statistics/keeper.rs @@ -73,7 +73,7 @@ mod tests { let event_sender = stats_tracker.run_event_listener(); let result = event_sender - .send_event(Event::Udp4IncomingRequest { + .send_event(Event::UdpIncomingRequest { context: ConnectionContext::new( SocketAddr::new(IpAddr::V6(Ipv6Addr::new(0, 0, 0, 0, 203, 0, 113, 195)), 8080), SocketAddr::new(IpAddr::V6(Ipv6Addr::new(0, 0, 0, 0, 203, 0, 113, 196)), 6969), From 625d20adf7a502ecbab01b21bbaf5002635418f6 Mon Sep 17 00:00:00 2001 From: Jose Celano Date: Tue, 18 Mar 2025 10:50:33 +0000 Subject: [PATCH 394/802] refactor: [#1382] rename torrust_udp_tracker_server::statistics::event::Event::UdpRequest --- .../udp-tracker-server/src/handlers/announce.rs | 8 ++++---- .../udp-tracker-server/src/handlers/connect.rs | 6 +++--- packages/udp-tracker-server/src/handlers/scrape.rs | 6 +++--- .../src/statistics/event/handler.rs | 14 +++++++------- .../udp-tracker-server/src/statistics/event/mod.rs | 2 +- 5 files changed, 18 insertions(+), 18 deletions(-) diff --git a/packages/udp-tracker-server/src/handlers/announce.rs b/packages/udp-tracker-server/src/handlers/announce.rs index 6b5cbb42b..32c9e0cbd 100644 --- a/packages/udp-tracker-server/src/handlers/announce.rs +++ b/packages/udp-tracker-server/src/handlers/announce.rs @@ -42,7 +42,7 @@ pub async fn handle_announce( if let Some(udp_server_stats_event_sender) = opt_udp_server_stats_event_sender.as_deref() { udp_server_stats_event_sender - .send_event(server_statistics::event::Event::UdpRequest { + .send_event(server_statistics::event::Event::UdpRequestAccepted { context: ConnectionContext::new(client_socket_addr, server_socket_addr), kind: UdpRequestKind::Announce, }) @@ -425,7 +425,7 @@ mod tests { let mut udp_server_stats_event_sender_mock = MockUdpServerStatsEventSender::new(); udp_server_stats_event_sender_mock .expect_send_event() - .with(eq(server_statistics::event::Event::UdpRequest { + .with(eq(server_statistics::event::Event::UdpRequestAccepted { context: server_statistics::event::ConnectionContext::new(client_socket_addr, server_socket_addr), kind: UdpRequestKind::Announce, })) @@ -768,7 +768,7 @@ mod tests { let mut udp_server_stats_event_sender_mock = MockUdpServerStatsEventSender::new(); udp_server_stats_event_sender_mock .expect_send_event() - .with(eq(server_statistics::event::Event::UdpRequest { + .with(eq(server_statistics::event::Event::UdpRequestAccepted { context: server_statistics::event::ConnectionContext::new(client_socket_addr, server_socket_addr), kind: UdpRequestKind::Announce, })) @@ -861,7 +861,7 @@ mod tests { let mut udp_server_stats_event_sender_mock = MockUdpServerStatsEventSender::new(); udp_server_stats_event_sender_mock .expect_send_event() - .with(eq(server_statistics::event::Event::UdpRequest { + .with(eq(server_statistics::event::Event::UdpRequestAccepted { context: server_statistics::event::ConnectionContext::new(client_socket_addr, server_socket_addr), kind: UdpRequestKind::Announce, })) diff --git a/packages/udp-tracker-server/src/handlers/connect.rs b/packages/udp-tracker-server/src/handlers/connect.rs index 7d96f4cbd..c38eb56e5 100644 --- a/packages/udp-tracker-server/src/handlers/connect.rs +++ b/packages/udp-tracker-server/src/handlers/connect.rs @@ -24,7 +24,7 @@ pub async fn handle_connect( if let Some(udp_server_stats_event_sender) = opt_udp_server_stats_event_sender.as_deref() { udp_server_stats_event_sender - .send_event(server_statistics::event::Event::UdpRequest { + .send_event(server_statistics::event::Event::UdpRequestAccepted { context: ConnectionContext::new(client_socket_addr, server_socket_addr), kind: UdpRequestKind::Connect, }) @@ -203,7 +203,7 @@ mod tests { let mut udp_server_stats_event_sender_mock = MockUdpServerStatsEventSender::new(); udp_server_stats_event_sender_mock .expect_send_event() - .with(eq(server_statistics::event::Event::UdpRequest { + .with(eq(server_statistics::event::Event::UdpRequestAccepted { context: server_statistics::event::ConnectionContext::new(client_socket_addr, server_socket_addr), kind: UdpRequestKind::Connect, })) @@ -244,7 +244,7 @@ mod tests { let mut udp_server_stats_event_sender_mock = MockUdpServerStatsEventSender::new(); udp_server_stats_event_sender_mock .expect_send_event() - .with(eq(server_statistics::event::Event::UdpRequest { + .with(eq(server_statistics::event::Event::UdpRequestAccepted { context: server_statistics::event::ConnectionContext::new(client_socket_addr, server_socket_addr), kind: UdpRequestKind::Connect, })) diff --git a/packages/udp-tracker-server/src/handlers/scrape.rs b/packages/udp-tracker-server/src/handlers/scrape.rs index 7597c9b8e..aeca7bd12 100644 --- a/packages/udp-tracker-server/src/handlers/scrape.rs +++ b/packages/udp-tracker-server/src/handlers/scrape.rs @@ -38,7 +38,7 @@ pub async fn handle_scrape( if let Some(udp_server_stats_event_sender) = opt_udp_server_stats_event_sender.as_deref() { udp_server_stats_event_sender - .send_event(server_statistics::event::Event::UdpRequest { + .send_event(server_statistics::event::Event::UdpRequestAccepted { context: ConnectionContext::new(client_socket_addr, server_socket_addr), kind: UdpRequestKind::Scrape, }) @@ -368,7 +368,7 @@ mod tests { let mut udp_server_stats_event_sender_mock = MockUdpServerStatsEventSender::new(); udp_server_stats_event_sender_mock .expect_send_event() - .with(eq(server_statistics::event::Event::UdpRequest { + .with(eq(server_statistics::event::Event::UdpRequestAccepted { context: ConnectionContext::new(client_socket_addr, server_socket_addr), kind: server_statistics::event::UdpRequestKind::Scrape, })) @@ -417,7 +417,7 @@ mod tests { let mut udp_server_stats_event_sender_mock = MockUdpServerStatsEventSender::new(); udp_server_stats_event_sender_mock .expect_send_event() - .with(eq(server_statistics::event::Event::UdpRequest { + .with(eq(server_statistics::event::Event::UdpRequestAccepted { context: ConnectionContext::new(client_socket_addr, server_socket_addr), kind: server_statistics::event::UdpRequestKind::Scrape, })) diff --git a/packages/udp-tracker-server/src/statistics/event/handler.rs b/packages/udp-tracker-server/src/statistics/event/handler.rs index 5200561c7..03bfeae65 100644 --- a/packages/udp-tracker-server/src/statistics/event/handler.rs +++ b/packages/udp-tracker-server/src/statistics/event/handler.rs @@ -22,7 +22,7 @@ pub async fn handle_event(event: Event, stats_repository: &Repository) { stats_repository.increase_udp6_requests().await; } }, - Event::UdpRequest { context, kind } => match kind { + Event::UdpRequestAccepted { context, kind } => match kind { UdpRequestKind::Connect => match context.client_socket_addr().ip() { std::net::IpAddr::V4(_) => { stats_repository.increase_udp4_connections().await; @@ -204,7 +204,7 @@ mod tests { let stats_repository = Repository::new(); handle_event( - Event::UdpRequest { + Event::UdpRequestAccepted { context: ConnectionContext::new( SocketAddr::new(IpAddr::V4(Ipv4Addr::new(203, 0, 113, 195)), 8080), SocketAddr::new(IpAddr::V4(Ipv4Addr::new(203, 0, 113, 196)), 6969), @@ -225,7 +225,7 @@ mod tests { let stats_repository = Repository::new(); handle_event( - Event::UdpRequest { + Event::UdpRequestAccepted { context: ConnectionContext::new( SocketAddr::new(IpAddr::V4(Ipv4Addr::new(203, 0, 113, 195)), 8080), SocketAddr::new(IpAddr::V4(Ipv4Addr::new(203, 0, 113, 196)), 6969), @@ -246,7 +246,7 @@ mod tests { let stats_repository = Repository::new(); handle_event( - Event::UdpRequest { + Event::UdpRequestAccepted { context: ConnectionContext::new( SocketAddr::new(IpAddr::V4(Ipv4Addr::new(203, 0, 113, 195)), 8080), SocketAddr::new(IpAddr::V4(Ipv4Addr::new(203, 0, 113, 196)), 6969), @@ -311,7 +311,7 @@ mod tests { let stats_repository = Repository::new(); handle_event( - Event::UdpRequest { + Event::UdpRequestAccepted { context: ConnectionContext::new( SocketAddr::new(IpAddr::V6(Ipv6Addr::new(0, 0, 0, 0, 203, 0, 113, 195)), 8080), SocketAddr::new(IpAddr::V6(Ipv6Addr::new(0, 0, 0, 0, 203, 0, 113, 196)), 6969), @@ -332,7 +332,7 @@ mod tests { let stats_repository = Repository::new(); handle_event( - Event::UdpRequest { + Event::UdpRequestAccepted { context: ConnectionContext::new( SocketAddr::new(IpAddr::V6(Ipv6Addr::new(0, 0, 0, 0, 203, 0, 113, 195)), 8080), SocketAddr::new(IpAddr::V6(Ipv6Addr::new(0, 0, 0, 0, 203, 0, 113, 196)), 6969), @@ -353,7 +353,7 @@ mod tests { let stats_repository = Repository::new(); handle_event( - Event::UdpRequest { + Event::UdpRequestAccepted { context: ConnectionContext::new( SocketAddr::new(IpAddr::V6(Ipv6Addr::new(0, 0, 0, 0, 203, 0, 113, 195)), 8080), SocketAddr::new(IpAddr::V6(Ipv6Addr::new(0, 0, 0, 0, 203, 0, 113, 196)), 6969), diff --git a/packages/udp-tracker-server/src/statistics/event/mod.rs b/packages/udp-tracker-server/src/statistics/event/mod.rs index b22cd455d..207916846 100644 --- a/packages/udp-tracker-server/src/statistics/event/mod.rs +++ b/packages/udp-tracker-server/src/statistics/event/mod.rs @@ -17,7 +17,7 @@ pub enum Event { UdpRequestBanned { context: ConnectionContext, }, - UdpRequest { + UdpRequestAccepted { context: ConnectionContext, kind: UdpRequestKind, }, From 27e2db4b8f7515cf9ae5c08232431ae5719f8b7a Mon Sep 17 00:00:00 2001 From: Jose Celano Date: Tue, 18 Mar 2025 11:31:33 +0000 Subject: [PATCH 395/802] refactor: [#1382] include req kin in UDP error response if it's known It could be unkown if the request couldb be parsed succesfully. --- .../src/handlers/announce.rs | 4 +- .../udp-tracker-server/src/handlers/error.rs | 3 +- .../udp-tracker-server/src/handlers/mod.rs | 60 ++++++++++++------- .../udp-tracker-server/src/handlers/scrape.rs | 4 +- .../src/server/processor.rs | 17 ++++-- .../src/statistics/event/handler.rs | 2 +- .../src/statistics/event/mod.rs | 19 ++++-- 7 files changed, 71 insertions(+), 38 deletions(-) diff --git a/packages/udp-tracker-server/src/handlers/announce.rs b/packages/udp-tracker-server/src/handlers/announce.rs index 32c9e0cbd..5cc3cf3c8 100644 --- a/packages/udp-tracker-server/src/handlers/announce.rs +++ b/packages/udp-tracker-server/src/handlers/announce.rs @@ -32,7 +32,7 @@ pub async fn handle_announce( core_config: &Arc, opt_udp_server_stats_event_sender: &Arc>>, cookie_valid_range: Range, -) -> Result { +) -> Result { tracing::Span::current() .record("transaction_id", request.transaction_id.0.to_string()) .record("connection_id", request.connection_id.0.to_string()) @@ -52,7 +52,7 @@ pub async fn handle_announce( let announce_data = announce_service .handle_announce(client_socket_addr, server_socket_addr, request, cookie_valid_range) .await - .map_err(|e| (e.into(), request.transaction_id))?; + .map_err(|e| (e.into(), request.transaction_id, UdpRequestKind::Announce))?; Ok(build_response(client_socket_addr, request, core_config, &announce_data)) } diff --git a/packages/udp-tracker-server/src/handlers/error.rs b/packages/udp-tracker-server/src/handlers/error.rs index cb341bc5c..d1ffe2fd4 100644 --- a/packages/udp-tracker-server/src/handlers/error.rs +++ b/packages/udp-tracker-server/src/handlers/error.rs @@ -12,11 +12,12 @@ use zerocopy::network_endian::I32; use crate::error::Error; use crate::statistics as server_statistics; -use crate::statistics::event::ConnectionContext; +use crate::statistics::event::{ConnectionContext, UdpRequestKind}; #[allow(clippy::too_many_arguments)] #[instrument(fields(transaction_id), skip(opt_udp_server_stats_event_sender), ret(level = Level::TRACE))] pub async fn handle_error( + req_kind: Option, client_socket_addr: SocketAddr, server_socket_addr: SocketAddr, request_id: Uuid, diff --git a/packages/udp-tracker-server/src/handlers/mod.rs b/packages/udp-tracker-server/src/handlers/mod.rs index 162af3020..e346d1953 100644 --- a/packages/udp-tracker-server/src/handlers/mod.rs +++ b/packages/udp-tracker-server/src/handlers/mod.rs @@ -24,6 +24,7 @@ use uuid::Uuid; use super::RawRequest; use crate::container::UdpTrackerServerContainer; use crate::error::Error; +use crate::statistics::event::UdpRequestKind; use crate::CurrentClock; #[derive(Debug, Clone, PartialEq)] @@ -60,7 +61,7 @@ pub(crate) async fn handle_packet( udp_tracker_server_container: Arc, server_socket_addr: SocketAddr, cookie_time_values: CookieTimeValues, -) -> Response { +) -> (Response, Option) { let request_id = Uuid::new_v4(); tracing::Span::current().record("request_id", request_id.to_string()); @@ -68,7 +69,7 @@ pub(crate) async fn handle_packet( let start_time = Instant::now(); - let response = + let (response, opt_req_kind) = match Request::parse_bytes(&udp_request.payload[..udp_request.payload.len()], MAX_SCRAPE_TORRENTS).map_err(Error::from) { Ok(request) => match handle_request( request, @@ -80,8 +81,8 @@ pub(crate) async fn handle_packet( ) .await { - Ok(response) => return response, - Err((error, transaction_id)) => { + Ok((response, req_kid)) => return (response, Some(req_kid)), + Err((error, transaction_id, req_kind)) => { if let Error::UdpAnnounceError { source: UdpAnnounceError::ConnectionCookieError { .. }, } = error @@ -91,7 +92,8 @@ pub(crate) async fn handle_packet( ban_service.increase_counter(&udp_request.from.ip()); } - handle_error( + let response = handle_error( + Some(req_kind.clone()), udp_request.from, server_socket_addr, request_id, @@ -100,11 +102,14 @@ pub(crate) async fn handle_packet( &error, Some(transaction_id), ) - .await + .await; + + (response, Some(req_kind)) } }, Err(e) => { - handle_error( + let response = handle_error( + None, udp_request.from, server_socket_addr, request_id, @@ -113,14 +118,16 @@ pub(crate) async fn handle_packet( &e, None, ) - .await + .await; + + (response, None) } }; let latency = start_time.elapsed(); tracing::trace!(?latency, "responded"); - response + (response, opt_req_kind) } /// It dispatches the request to the correct handler. @@ -143,21 +150,24 @@ pub async fn handle_request( udp_tracker_core_container: Arc, udp_tracker_server_container: Arc, cookie_time_values: CookieTimeValues, -) -> Result { +) -> Result<(Response, UdpRequestKind), (Error, TransactionId, UdpRequestKind)> { tracing::trace!("handle request"); match request { - Request::Connect(connect_request) => Ok(handle_connect( - client_socket_addr, - server_socket_addr, - &connect_request, - &udp_tracker_core_container.connect_service, - &udp_tracker_server_container.udp_server_stats_event_sender, - cookie_time_values.issue_time, - ) - .await), + Request::Connect(connect_request) => Ok(( + handle_connect( + client_socket_addr, + server_socket_addr, + &connect_request, + &udp_tracker_core_container.connect_service, + &udp_tracker_server_container.udp_server_stats_event_sender, + cookie_time_values.issue_time, + ) + .await, + UdpRequestKind::Connect, + )), Request::Announce(announce_request) => { - handle_announce( + match handle_announce( &udp_tracker_core_container.announce_service, client_socket_addr, server_socket_addr, @@ -167,9 +177,13 @@ pub async fn handle_request( cookie_time_values.valid_range, ) .await + { + Ok(response) => Ok((response, UdpRequestKind::Announce)), + Err(err) => Err(err), + } } Request::Scrape(scrape_request) => { - handle_scrape( + match handle_scrape( &udp_tracker_core_container.scrape_service, client_socket_addr, server_socket_addr, @@ -178,6 +192,10 @@ pub async fn handle_request( cookie_time_values.valid_range, ) .await + { + Ok(response) => Ok((response, UdpRequestKind::Scrape)), + Err(err) => Err(err), + } } } } diff --git a/packages/udp-tracker-server/src/handlers/scrape.rs b/packages/udp-tracker-server/src/handlers/scrape.rs index aeca7bd12..db6b4a18b 100644 --- a/packages/udp-tracker-server/src/handlers/scrape.rs +++ b/packages/udp-tracker-server/src/handlers/scrape.rs @@ -29,7 +29,7 @@ pub async fn handle_scrape( request: &ScrapeRequest, opt_udp_server_stats_event_sender: &Arc>>, cookie_valid_range: Range, -) -> Result { +) -> Result { tracing::Span::current() .record("transaction_id", request.transaction_id.0.to_string()) .record("connection_id", request.connection_id.0.to_string()); @@ -48,7 +48,7 @@ pub async fn handle_scrape( let scrape_data = scrape_service .handle_scrape(client_socket_addr, server_socket_addr, request, cookie_valid_range) .await - .map_err(|e| (e.into(), request.transaction_id))?; + .map_err(|e| (e.into(), request.transaction_id, UdpRequestKind::Scrape))?; Ok(build_response(request, &scrape_data)) } diff --git a/packages/udp-tracker-server/src/server/processor.rs b/packages/udp-tracker-server/src/server/processor.rs index acf8e8ae3..59d21673f 100644 --- a/packages/udp-tracker-server/src/server/processor.rs +++ b/packages/udp-tracker-server/src/server/processor.rs @@ -12,7 +12,7 @@ use tracing::{instrument, Level}; use super::bound_socket::BoundSocket; use crate::container::UdpTrackerServerContainer; use crate::handlers::CookieTimeValues; -use crate::statistics::event::ConnectionContext; +use crate::statistics::event::{ConnectionContext, UdpRequestKind}; use crate::{handlers, statistics, RawRequest}; pub struct Processor { @@ -43,7 +43,7 @@ impl Processor { let start_time = Instant::now(); - let response = handlers::handle_packet( + let (response, opt_req_kind) = handlers::handle_packet( request, self.udp_tracker_core_container.clone(), self.udp_tracker_server_container.clone(), @@ -54,11 +54,18 @@ impl Processor { let elapsed_time = start_time.elapsed(); - self.send_response(client_socket_addr, response, elapsed_time).await; + self.send_response(client_socket_addr, response, opt_req_kind, elapsed_time) + .await; } #[instrument(skip(self))] - async fn send_response(self, client_socket_addr: SocketAddr, response: Response, req_processing_time: Duration) { + async fn send_response( + self, + client_socket_addr: SocketAddr, + response: Response, + opt_req_kind: Option, + req_processing_time: Duration, + ) { tracing::debug!("send response"); let response_type = match &response { @@ -79,7 +86,7 @@ impl Processor { Response::Scrape(_) => statistics::event::UdpResponseKind::Ok { req_kind: statistics::event::UdpRequestKind::Scrape, }, - Response::Error(_e) => statistics::event::UdpResponseKind::Error, + Response::Error(_e) => statistics::event::UdpResponseKind::Error { opt_req_kind: None }, }; let mut writer = Cursor::new(Vec::with_capacity(200)); diff --git a/packages/udp-tracker-server/src/statistics/event/handler.rs b/packages/udp-tracker-server/src/statistics/event/handler.rs index 03bfeae65..75441f7e4 100644 --- a/packages/udp-tracker-server/src/statistics/event/handler.rs +++ b/packages/udp-tracker-server/src/statistics/event/handler.rs @@ -80,7 +80,7 @@ pub async fn handle_event(event: Event, stats_repository: &Repository) { .await; } }, - UdpResponseKind::Error => {} + UdpResponseKind::Error { opt_req_kind: _ } => {} } } Event::UdpError { context } => match context.client_socket_addr().ip() { diff --git a/packages/udp-tracker-server/src/statistics/event/mod.rs b/packages/udp-tracker-server/src/statistics/event/mod.rs index 207916846..1516d79c3 100644 --- a/packages/udp-tracker-server/src/statistics/event/mod.rs +++ b/packages/udp-tracker-server/src/statistics/event/mod.rs @@ -6,7 +6,7 @@ pub mod listener; pub mod sender; /// An statistics event. It is used to collect tracker metrics. -#[derive(Debug, PartialEq, Eq)] +#[derive(Debug, PartialEq, Eq, Clone)] pub enum Event { UdpIncomingRequest { context: ConnectionContext, @@ -31,20 +31,27 @@ pub enum Event { }, } -#[derive(Debug, PartialEq, Eq)] +#[derive(Debug, PartialEq, Eq, Clone)] pub enum UdpRequestKind { Connect, Announce, Scrape, } -#[derive(Debug, PartialEq, Eq)] +#[derive(Debug, PartialEq, Eq, Clone)] pub enum UdpResponseKind { - Ok { req_kind: UdpRequestKind }, - Error, // todo: add the request kind `{ req_kind: Option(UdpRequestKind) }` when we know it. + Ok { + req_kind: UdpRequestKind, + }, + + /// There was an error handling the requests. The error contains the request + /// kind if the request was parsed successfully. + Error { + opt_req_kind: Option, + }, } -#[derive(Debug, PartialEq, Eq)] +#[derive(Debug, PartialEq, Eq, Clone)] pub struct ConnectionContext { client_socket_addr: SocketAddr, server_socket_addr: SocketAddr, From 9a8a0dc0e575c127c032649de9e6b9155e1cc329 Mon Sep 17 00:00:00 2001 From: Jose Celano Date: Tue, 18 Mar 2025 11:46:17 +0000 Subject: [PATCH 396/802] refactor: [#1382] rename UDP server event enum variants --- packages/udp-tracker-server/src/server/launcher.rs | 2 +- packages/udp-tracker-server/src/server/processor.rs | 2 +- .../udp-tracker-server/src/statistics/event/handler.rs | 10 +++++----- .../udp-tracker-server/src/statistics/event/mod.rs | 6 +++--- packages/udp-tracker-server/src/statistics/keeper.rs | 2 +- 5 files changed, 11 insertions(+), 11 deletions(-) diff --git a/packages/udp-tracker-server/src/server/launcher.rs b/packages/udp-tracker-server/src/server/launcher.rs index a3da6a2a8..c6a105230 100644 --- a/packages/udp-tracker-server/src/server/launcher.rs +++ b/packages/udp-tracker-server/src/server/launcher.rs @@ -173,7 +173,7 @@ impl Launcher { if let Some(udp_server_stats_event_sender) = udp_tracker_server_container.udp_server_stats_event_sender.as_deref() { udp_server_stats_event_sender - .send_event(statistics::event::Event::UdpIncomingRequest { + .send_event(statistics::event::Event::UdpRequestReceived { context: ConnectionContext::new(client_socket_addr, server_socket_addr), }) .await; diff --git a/packages/udp-tracker-server/src/server/processor.rs b/packages/udp-tracker-server/src/server/processor.rs index 59d21673f..4d1e4429a 100644 --- a/packages/udp-tracker-server/src/server/processor.rs +++ b/packages/udp-tracker-server/src/server/processor.rs @@ -108,7 +108,7 @@ impl Processor { self.udp_tracker_server_container.udp_server_stats_event_sender.as_deref() { udp_server_stats_event_sender - .send_event(statistics::event::Event::UdpResponse { + .send_event(statistics::event::Event::UdpResponseSent { context: ConnectionContext::new(client_socket_addr, self.socket.address()), kind: udp_response_kind, req_processing_time, diff --git a/packages/udp-tracker-server/src/statistics/event/handler.rs b/packages/udp-tracker-server/src/statistics/event/handler.rs index 75441f7e4..6abf7d3c7 100644 --- a/packages/udp-tracker-server/src/statistics/event/handler.rs +++ b/packages/udp-tracker-server/src/statistics/event/handler.rs @@ -14,7 +14,7 @@ pub async fn handle_event(event: Event, stats_repository: &Repository) { Event::UdpRequestBanned { .. } => { stats_repository.increase_udp_requests_banned().await; } - Event::UdpIncomingRequest { context } => match context.client_socket_addr().ip() { + Event::UdpRequestReceived { context } => match context.client_socket_addr().ip() { std::net::IpAddr::V4(_) => { stats_repository.increase_udp4_requests().await; } @@ -48,7 +48,7 @@ pub async fn handle_event(event: Event, stats_repository: &Repository) { } }, }, - Event::UdpResponse { + Event::UdpResponseSent { context, kind, req_processing_time, @@ -149,7 +149,7 @@ mod tests { let stats_repository = Repository::new(); handle_event( - Event::UdpIncomingRequest { + Event::UdpRequestReceived { context: ConnectionContext::new( SocketAddr::new(IpAddr::V4(Ipv4Addr::new(203, 0, 113, 195)), 8080), SocketAddr::new(IpAddr::V4(Ipv4Addr::new(203, 0, 113, 196)), 6969), @@ -267,7 +267,7 @@ mod tests { let stats_repository = Repository::new(); handle_event( - Event::UdpResponse { + Event::UdpResponseSent { context: ConnectionContext::new( SocketAddr::new(IpAddr::V4(Ipv4Addr::new(203, 0, 113, 195)), 8080), SocketAddr::new(IpAddr::V4(Ipv4Addr::new(203, 0, 113, 196)), 6969), @@ -374,7 +374,7 @@ mod tests { let stats_repository = Repository::new(); handle_event( - Event::UdpResponse { + Event::UdpResponseSent { context: ConnectionContext::new( SocketAddr::new(IpAddr::V6(Ipv6Addr::new(0, 0, 0, 0, 203, 0, 113, 195)), 8080), SocketAddr::new(IpAddr::V6(Ipv6Addr::new(0, 0, 0, 0, 203, 0, 113, 196)), 6969), diff --git a/packages/udp-tracker-server/src/statistics/event/mod.rs b/packages/udp-tracker-server/src/statistics/event/mod.rs index 1516d79c3..1b0be960b 100644 --- a/packages/udp-tracker-server/src/statistics/event/mod.rs +++ b/packages/udp-tracker-server/src/statistics/event/mod.rs @@ -8,7 +8,7 @@ pub mod sender; /// An statistics event. It is used to collect tracker metrics. #[derive(Debug, PartialEq, Eq, Clone)] pub enum Event { - UdpIncomingRequest { + UdpRequestReceived { context: ConnectionContext, }, UdpRequestAborted { @@ -21,7 +21,7 @@ pub enum Event { context: ConnectionContext, kind: UdpRequestKind, }, - UdpResponse { + UdpResponseSent { context: ConnectionContext, kind: UdpResponseKind, req_processing_time: Duration, @@ -44,7 +44,7 @@ pub enum UdpResponseKind { req_kind: UdpRequestKind, }, - /// There was an error handling the requests. The error contains the request + /// There was an error handling the request. The error contains the request /// kind if the request was parsed successfully. Error { opt_req_kind: Option, diff --git a/packages/udp-tracker-server/src/statistics/keeper.rs b/packages/udp-tracker-server/src/statistics/keeper.rs index c29dcb1b2..4ce832227 100644 --- a/packages/udp-tracker-server/src/statistics/keeper.rs +++ b/packages/udp-tracker-server/src/statistics/keeper.rs @@ -73,7 +73,7 @@ mod tests { let event_sender = stats_tracker.run_event_listener(); let result = event_sender - .send_event(Event::UdpIncomingRequest { + .send_event(Event::UdpRequestReceived { context: ConnectionContext::new( SocketAddr::new(IpAddr::V6(Ipv6Addr::new(0, 0, 0, 0, 203, 0, 113, 195)), 8080), SocketAddr::new(IpAddr::V6(Ipv6Addr::new(0, 0, 0, 0, 203, 0, 113, 196)), 6969), From e729a5fb7a53f26c722902e18e66972dad9cd309 Mon Sep 17 00:00:00 2001 From: Jose Celano Date: Tue, 18 Mar 2025 15:29:32 +0000 Subject: [PATCH 397/802] refactor: [#1386] remove dependency on UDP core metrics from API --- .../src/v1/context/stats/handlers.rs | 10 +--------- .../src/v1/context/stats/routes.rs | 1 - .../src/statistics/services.rs | 20 ++++++++----------- 3 files changed, 9 insertions(+), 22 deletions(-) diff --git a/packages/axum-rest-tracker-api-server/src/v1/context/stats/handlers.rs b/packages/axum-rest-tracker-api-server/src/v1/context/stats/handlers.rs index 5273df332..484c12ff9 100644 --- a/packages/axum-rest-tracker-api-server/src/v1/context/stats/handlers.rs +++ b/packages/axum-rest-tracker-api-server/src/v1/context/stats/handlers.rs @@ -43,19 +43,11 @@ pub async fn get_stats_handler( Arc, Arc>, Arc, - Arc, Arc, )>, params: Query, ) -> Response { - let metrics = get_metrics( - state.0.clone(), - state.1.clone(), - state.2.clone(), - state.3.clone(), - state.4.clone(), - ) - .await; + let metrics = get_metrics(state.0.clone(), state.1.clone(), state.2.clone(), state.3.clone()).await; match params.0.format { Some(format) => match format { diff --git a/packages/axum-rest-tracker-api-server/src/v1/context/stats/routes.rs b/packages/axum-rest-tracker-api-server/src/v1/context/stats/routes.rs index 1334c0d70..49ba9e829 100644 --- a/packages/axum-rest-tracker-api-server/src/v1/context/stats/routes.rs +++ b/packages/axum-rest-tracker-api-server/src/v1/context/stats/routes.rs @@ -19,7 +19,6 @@ pub fn add(prefix: &str, router: Router, http_api_container: &Arc, ban_service: Arc>, http_stats_repository: Arc, - udp_core_stats_repository: Arc, udp_server_stats_repository: Arc, ) -> TrackerMetrics { let torrents_metrics = in_memory_torrent_repository.get_torrents_metrics(); let udp_banned_ips_total = ban_service.read().await.get_banned_ips_total(); let http_stats = http_stats_repository.get_stats().await; - let udp_core_stats = udp_core_stats_repository.get_stats().await; let udp_server_stats = udp_server_stats_repository.get_stats().await; // For backward compatibility we keep the `tcp4_connections_handled` and @@ -63,16 +61,16 @@ pub async fn get_metrics( udp_avg_scrape_processing_time_ns: udp_server_stats.udp_avg_scrape_processing_time_ns, // UDPv4 udp4_requests: udp_server_stats.udp4_requests, - udp4_connections_handled: udp_core_stats.udp4_connections_handled, - udp4_announces_handled: udp_core_stats.udp4_announces_handled, - udp4_scrapes_handled: udp_core_stats.udp4_scrapes_handled, + udp4_connections_handled: udp_server_stats.udp4_connections_handled, + udp4_announces_handled: udp_server_stats.udp4_announces_handled, + udp4_scrapes_handled: udp_server_stats.udp4_scrapes_handled, udp4_responses: udp_server_stats.udp4_responses, udp4_errors_handled: udp_server_stats.udp4_errors_handled, // UDPv6 udp6_requests: udp_server_stats.udp6_requests, - udp6_connections_handled: udp_core_stats.udp6_connections_handled, - udp6_announces_handled: udp_core_stats.udp6_announces_handled, - udp6_scrapes_handled: udp_core_stats.udp6_scrapes_handled, + udp6_connections_handled: udp_server_stats.udp6_connections_handled, + udp6_announces_handled: udp_server_stats.udp6_announces_handled, + udp6_scrapes_handled: udp_server_stats.udp6_scrapes_handled, udp6_responses: udp_server_stats.udp6_responses, udp6_errors_handled: udp_server_stats.udp6_errors_handled, }, @@ -112,9 +110,8 @@ mod tests { let http_stats_repository = Arc::new(http_stats_repository); // UDP core stats - let (_udp_stats_event_sender, udp_stats_repository) = + let (_udp_stats_event_sender, _udp_stats_repository) = bittorrent_udp_tracker_core::statistics::setup::factory(config.core.tracker_usage_statistics); - let udp_stats_repository = Arc::new(udp_stats_repository); // UDP server stats let (_udp_server_stats_event_sender, udp_server_stats_repository) = @@ -125,7 +122,6 @@ mod tests { in_memory_torrent_repository.clone(), ban_service.clone(), http_stats_repository.clone(), - udp_stats_repository.clone(), udp_server_stats_repository.clone(), ) .await; From 64c7b21fb812fd14e4865de1500a718b8bbd4929 Mon Sep 17 00:00:00 2001 From: Jose Celano Date: Tue, 18 Mar 2025 16:19:29 +0000 Subject: [PATCH 398/802] refactor: [#1388] minor changes to HTTP core events --- .../src/statistics/event/mod.rs | 18 ++++++++++++++---- 1 file changed, 14 insertions(+), 4 deletions(-) diff --git a/packages/http-tracker-core/src/statistics/event/mod.rs b/packages/http-tracker-core/src/statistics/event/mod.rs index 7520e1a97..2964956d8 100644 --- a/packages/http-tracker-core/src/statistics/event/mod.rs +++ b/packages/http-tracker-core/src/statistics/event/mod.rs @@ -5,13 +5,13 @@ pub mod listener; pub mod sender; /// An statistics event. It is used to collect tracker metrics. -#[derive(Debug, PartialEq, Eq)] +#[derive(Debug, PartialEq, Eq, Clone)] pub enum Event { TcpAnnounce { connection: ConnectionContext }, TcpScrape { connection: ConnectionContext }, } -#[derive(Debug, PartialEq, Eq)] +#[derive(Debug, PartialEq, Eq, Clone)] pub struct ConnectionContext { client: ClientConnectionContext, server: ServerConnectionContext, @@ -35,9 +35,19 @@ impl ConnectionContext { pub fn client_ip_addr(&self) -> IpAddr { self.client.ip_addr } + + #[must_use] + pub fn client_port(&self) -> Option { + self.client.port + } + + #[must_use] + pub fn server_socket_addr(&self) -> SocketAddr { + self.server.socket_addr + } } -#[derive(Debug, PartialEq, Eq)] +#[derive(Debug, PartialEq, Eq, Clone)] pub struct ClientConnectionContext { ip_addr: IpAddr, @@ -45,7 +55,7 @@ pub struct ClientConnectionContext { port: Option, } -#[derive(Debug, PartialEq, Eq)] +#[derive(Debug, PartialEq, Eq, Clone)] pub struct ServerConnectionContext { socket_addr: SocketAddr, } From 5f9c4d3f2a04289055e6ccfc2f530cb82d3f47ea Mon Sep 17 00:00:00 2001 From: Jose Celano Date: Tue, 18 Mar 2025 17:04:55 +0000 Subject: [PATCH 399/802] refactor: [#1388] change channel in HTTP core from mpsc to broadcast Stats events were introduced to collect tracker metrics. We only have global metrics (aggregate metrics for all UDP and HTTP trackers). This will change in the future. We will have: - Segregated metrics: one listeners per tracker (per socket). - Generic events: there could be other event consumers. Events will be decoupled from stats. This change allows multiple receivers in the channel. For now, we one use one listener but with this change will be easy to add more. --- .../http-tracker-core/benches/helpers/util.rs | 4 +- .../src/services/announce.rs | 10 ++--- .../http-tracker-core/src/services/scrape.rs | 12 +++--- .../src/statistics/event/listener.rs | 14 +++++-- .../src/statistics/event/sender.rs | 12 +++--- .../src/statistics/keeper.rs | 41 +------------------ .../http-tracker-core/src/statistics/setup.rs | 19 +++++++-- 7 files changed, 46 insertions(+), 66 deletions(-) diff --git a/packages/http-tracker-core/benches/helpers/util.rs b/packages/http-tracker-core/benches/helpers/util.rs index 169c4a56a..19010041e 100644 --- a/packages/http-tracker-core/benches/helpers/util.rs +++ b/packages/http-tracker-core/benches/helpers/util.rs @@ -108,11 +108,11 @@ pub fn sample_info_hash() -> InfoHash { use bittorrent_http_tracker_core::statistics; use futures::future::BoxFuture; use mockall::mock; -use tokio::sync::mpsc::error::SendError; +use tokio::sync::broadcast::error::SendError; mock! { HttpStatsEventSender {} impl statistics::event::sender::Sender for HttpStatsEventSender { - fn send_event(&self, event: statistics::event::Event) -> BoxFuture<'static,Option > > > ; + fn send_event(&self, event: statistics::event::Event) -> BoxFuture<'static,Option > > > ; } } diff --git a/packages/http-tracker-core/src/services/announce.rs b/packages/http-tracker-core/src/services/announce.rs index 6b8b700c9..25fc1b861 100644 --- a/packages/http-tracker-core/src/services/announce.rs +++ b/packages/http-tracker-core/src/services/announce.rs @@ -315,7 +315,7 @@ mod tests { use futures::future::BoxFuture; use mockall::mock; - use tokio::sync::mpsc::error::SendError; + use tokio::sync::broadcast::error::SendError; use crate::statistics; use crate::tests::sample_info_hash; @@ -323,7 +323,7 @@ mod tests { mock! { HttpStatsEventSender {} impl statistics::event::sender::Sender for HttpStatsEventSender { - fn send_event(&self, event: statistics::event::Event) -> BoxFuture<'static,Option > > > ; + fn send_event(&self, event: statistics::event::Event) -> BoxFuture<'static,Option > > > ; } } @@ -395,7 +395,7 @@ mod tests { connection: ConnectionContext::new(IpAddr::V4(Ipv4Addr::new(126, 0, 0, 1)), Some(8080), server_socket_addr), })) .times(1) - .returning(|_| Box::pin(future::ready(Some(Ok(()))))); + .returning(|_| Box::pin(future::ready(Some(Ok(1))))); let http_stats_event_sender: Arc>> = Arc::new(Some(Box::new(http_stats_event_sender_mock))); @@ -451,7 +451,7 @@ mod tests { connection: ConnectionContext::new(IpAddr::V4(Ipv4Addr::new(127, 0, 0, 1)), Some(8080), server_socket_addr), })) .times(1) - .returning(|_| Box::pin(future::ready(Some(Ok(()))))); + .returning(|_| Box::pin(future::ready(Some(Ok(1))))); let http_stats_event_sender: Arc>> = Arc::new(Some(Box::new(http_stats_event_sender_mock))); @@ -494,7 +494,7 @@ mod tests { ), })) .times(1) - .returning(|_| Box::pin(future::ready(Some(Ok(()))))); + .returning(|_| Box::pin(future::ready(Some(Ok(1))))); let http_stats_event_sender: Arc>> = Arc::new(Some(Box::new(http_stats_event_sender_mock))); diff --git a/packages/http-tracker-core/src/services/scrape.rs b/packages/http-tracker-core/src/services/scrape.rs index ed927efc3..6341ed301 100644 --- a/packages/http-tracker-core/src/services/scrape.rs +++ b/packages/http-tracker-core/src/services/scrape.rs @@ -203,7 +203,7 @@ mod tests { use bittorrent_tracker_core::whitelist::repository::in_memory::InMemoryWhitelist; use futures::future::BoxFuture; use mockall::mock; - use tokio::sync::mpsc::error::SendError; + use tokio::sync::broadcast::error::SendError; use torrust_tracker_configuration::Configuration; use torrust_tracker_primitives::{peer, DurationSinceUnixEpoch}; @@ -260,7 +260,7 @@ mod tests { mock! { HttpStatsEventSender {} impl statistics::event::sender::Sender for HttpStatsEventSender { - fn send_event(&self, event: statistics::event::Event) -> BoxFuture<'static,Option > > > ; + fn send_event(&self, event: statistics::event::Event) -> BoxFuture<'static,Option > > > ; } } @@ -359,7 +359,7 @@ mod tests { ), })) .times(1) - .returning(|_| Box::pin(future::ready(Some(Ok(()))))); + .returning(|_| Box::pin(future::ready(Some(Ok(1))))); let http_stats_event_sender: Arc>> = Arc::new(Some(Box::new(http_stats_event_sender_mock))); @@ -408,7 +408,7 @@ mod tests { ), })) .times(1) - .returning(|_| Box::pin(future::ready(Some(Ok(()))))); + .returning(|_| Box::pin(future::ready(Some(Ok(1))))); let http_stats_event_sender: Arc>> = Arc::new(Some(Box::new(http_stats_event_sender_mock))); @@ -529,7 +529,7 @@ mod tests { ), })) .times(1) - .returning(|_| Box::pin(future::ready(Some(Ok(()))))); + .returning(|_| Box::pin(future::ready(Some(Ok(1))))); let http_stats_event_sender: Arc>> = Arc::new(Some(Box::new(http_stats_event_sender_mock))); @@ -578,7 +578,7 @@ mod tests { ), })) .times(1) - .returning(|_| Box::pin(future::ready(Some(Ok(()))))); + .returning(|_| Box::pin(future::ready(Some(Ok(1))))); let http_stats_event_sender: Arc>> = Arc::new(Some(Box::new(http_stats_event_sender_mock))); diff --git a/packages/http-tracker-core/src/statistics/event/listener.rs b/packages/http-tracker-core/src/statistics/event/listener.rs index f1a2e25de..a70992a02 100644 --- a/packages/http-tracker-core/src/statistics/event/listener.rs +++ b/packages/http-tracker-core/src/statistics/event/listener.rs @@ -1,11 +1,17 @@ -use tokio::sync::mpsc; +use tokio::sync::broadcast; use super::handler::handle_event; use super::Event; use crate::statistics::repository::Repository; -pub async fn dispatch_events(mut receiver: mpsc::Receiver, stats_repository: Repository) { - while let Some(event) = receiver.recv().await { - handle_event(event, &stats_repository).await; +pub async fn dispatch_events(mut receiver: broadcast::Receiver, stats_repository: Repository) { + loop { + match receiver.recv().await { + Ok(event) => handle_event(event, &stats_repository).await, + Err(e) => { + tracing::error!("Error receiving http tracker core event: {:?}", e); + break; + } + } } } diff --git a/packages/http-tracker-core/src/statistics/event/sender.rs b/packages/http-tracker-core/src/statistics/event/sender.rs index ca4b4e210..9092a8e0b 100644 --- a/packages/http-tracker-core/src/statistics/event/sender.rs +++ b/packages/http-tracker-core/src/statistics/event/sender.rs @@ -2,15 +2,15 @@ use futures::future::BoxFuture; use futures::FutureExt; #[cfg(test)] use mockall::{automock, predicate::str}; -use tokio::sync::mpsc; -use tokio::sync::mpsc::error::SendError; +use tokio::sync::broadcast; +use tokio::sync::broadcast::error::SendError; use super::Event; /// A trait to allow sending statistics events #[cfg_attr(test, automock)] pub trait Sender: Sync + Send { - fn send_event(&self, event: Event) -> BoxFuture<'_, Option>>>; + fn send_event(&self, event: Event) -> BoxFuture<'_, Option>>>; } /// An [`statistics::EventSender`](crate::statistics::event::sender::Sender) implementation. @@ -19,11 +19,11 @@ pub trait Sender: Sync + Send { /// [`statistics::Keeper`](crate::statistics::keeper::Keeper) #[allow(clippy::module_name_repetitions)] pub struct ChannelSender { - pub(crate) sender: mpsc::Sender, + pub(crate) sender: broadcast::Sender, } impl Sender for ChannelSender { - fn send_event(&self, event: Event) -> BoxFuture<'_, Option>>> { - async move { Some(self.sender.send(event).await) }.boxed() + fn send_event(&self, event: Event) -> BoxFuture<'_, Option>>> { + async move { Some(self.sender.send(event)) }.boxed() } } diff --git a/packages/http-tracker-core/src/statistics/keeper.rs b/packages/http-tracker-core/src/statistics/keeper.rs index 783309eff..f4428ec70 100644 --- a/packages/http-tracker-core/src/statistics/keeper.rs +++ b/packages/http-tracker-core/src/statistics/keeper.rs @@ -1,12 +1,9 @@ -use tokio::sync::mpsc; +use tokio::sync::broadcast::Receiver; use super::event::listener::dispatch_events; -use super::event::sender::{ChannelSender, Sender}; use super::event::Event; use super::repository::Repository; -const CHANNEL_BUFFER_SIZE: usize = 65_535; - /// The service responsible for keeping tracker metrics (listening to statistics events and handle them). /// /// It actively listen to new statistics events. When it receives a new event @@ -29,31 +26,16 @@ impl Keeper { } } - #[must_use] - pub fn new_active_instance() -> (Box, Repository) { - let mut stats_tracker = Self::new(); - - let stats_event_sender = stats_tracker.run_event_listener(); - - (stats_event_sender, stats_tracker.repository) - } - - pub fn run_event_listener(&mut self) -> Box { - let (sender, receiver) = mpsc::channel::(CHANNEL_BUFFER_SIZE); - + pub fn run_event_listener(&mut self, receiver: Receiver) { let stats_repository = self.repository.clone(); tokio::spawn(async move { dispatch_events(receiver, stats_repository).await }); - - Box::new(ChannelSender { sender }) } } #[cfg(test)] mod tests { - use std::net::{IpAddr, Ipv4Addr, SocketAddr}; - use crate::statistics::event::{ConnectionContext, Event}; use crate::statistics::keeper::Keeper; use crate::statistics::metrics::Metrics; @@ -65,23 +47,4 @@ mod tests { assert_eq!(stats.tcp4_announces_handled, Metrics::default().tcp4_announces_handled); } - - #[tokio::test] - async fn should_create_an_event_sender_to_send_statistical_events() { - let mut stats_tracker = Keeper::new(); - - let event_sender = stats_tracker.run_event_listener(); - - let result = event_sender - .send_event(Event::TcpAnnounce { - connection: ConnectionContext::new( - IpAddr::V4(Ipv4Addr::new(127, 0, 0, 2)), - Some(8080), - SocketAddr::new(IpAddr::V4(Ipv4Addr::new(127, 0, 0, 1)), 7070), - ), - }) - .await; - - assert!(result.is_some()); - } } diff --git a/packages/http-tracker-core/src/statistics/setup.rs b/packages/http-tracker-core/src/statistics/setup.rs index d3114a75e..a9ac751c6 100644 --- a/packages/http-tracker-core/src/statistics/setup.rs +++ b/packages/http-tracker-core/src/statistics/setup.rs @@ -1,8 +1,13 @@ //! Setup for the tracker statistics. //! //! The [`factory`] function builds the structs needed for handling the tracker metrics. +use tokio::sync::broadcast; + +use super::event::sender::ChannelSender; use crate::statistics; +const CHANNEL_CAPACITY: usize = 1024; + /// It builds the structs needed for handling the tracker metrics. /// /// It returns: @@ -19,15 +24,21 @@ pub fn factory( Option>, statistics::repository::Repository, ) { - let mut stats_event_sender = None; + let mut stats_event_sender: Option> = None; - let mut stats_tracker = statistics::keeper::Keeper::new(); + let mut keeper = statistics::keeper::Keeper::new(); if tracker_usage_statistics { - stats_event_sender = Some(stats_tracker.run_event_listener()); + let (sender, _) = broadcast::channel(CHANNEL_CAPACITY); + + let receiver = sender.subscribe(); + + stats_event_sender = Some(Box::new(ChannelSender { sender })); + + keeper.run_event_listener(receiver); } - (stats_event_sender, stats_tracker.repository) + (stats_event_sender, keeper.repository) } #[cfg(test)] From d2de1de84c0fc8469018c10271e4ca2f150631c9 Mon Sep 17 00:00:00 2001 From: Jose Celano Date: Wed, 19 Mar 2025 08:02:58 +0000 Subject: [PATCH 400/802] refactor: [#1389] change channel in UDP core from mpsc to broadcast --- .../udp-tracker-core/benches/helpers/utils.rs | 4 +- .../udp-tracker-core/src/services/connect.rs | 4 +- packages/udp-tracker-core/src/services/mod.rs | 4 +- .../src/statistics/event/listener.rs | 14 +++++-- .../src/statistics/event/mod.rs | 4 +- .../src/statistics/event/sender.rs | 12 +++--- .../udp-tracker-core/src/statistics/keeper.rs | 41 +------------------ .../udp-tracker-core/src/statistics/setup.rs | 19 +++++++-- .../src/handlers/announce.rs | 2 +- .../src/handlers/connect.rs | 4 +- .../udp-tracker-server/src/handlers/mod.rs | 7 ++-- 11 files changed, 48 insertions(+), 67 deletions(-) diff --git a/packages/udp-tracker-core/benches/helpers/utils.rs b/packages/udp-tracker-core/benches/helpers/utils.rs index 7fd6d175f..aed4d9542 100644 --- a/packages/udp-tracker-core/benches/helpers/utils.rs +++ b/packages/udp-tracker-core/benches/helpers/utils.rs @@ -3,7 +3,7 @@ use std::net::{IpAddr, Ipv4Addr, SocketAddr}; use bittorrent_udp_tracker_core::statistics; use futures::future::BoxFuture; use mockall::mock; -use tokio::sync::mpsc::error::SendError; +use tokio::sync::broadcast::error::SendError; pub(crate) fn sample_ipv4_remote_addr() -> SocketAddr { sample_ipv4_socket_address() @@ -20,6 +20,6 @@ pub(crate) fn sample_issue_time() -> f64 { mock! { pub(crate) UdpCoreStatsEventSender {} impl statistics::event::sender::Sender for UdpCoreStatsEventSender { - fn send_event(&self, event: statistics::event::Event) -> BoxFuture<'static,Option > > > ; + fn send_event(&self, event: statistics::event::Event) -> BoxFuture<'static,Option > > > ; } } diff --git a/packages/udp-tracker-core/src/services/connect.rs b/packages/udp-tracker-core/src/services/connect.rs index c3c2459cd..fb28fe70b 100644 --- a/packages/udp-tracker-core/src/services/connect.rs +++ b/packages/udp-tracker-core/src/services/connect.rs @@ -142,7 +142,7 @@ mod tests { context: ConnectionContext::new(client_socket_addr, server_socket_addr), })) .times(1) - .returning(|_| Box::pin(future::ready(Some(Ok(()))))); + .returning(|_| Box::pin(future::ready(Some(Ok(1))))); let opt_udp_stats_event_sender: Arc>> = Arc::new(Some(Box::new(udp_stats_event_sender_mock))); @@ -165,7 +165,7 @@ mod tests { context: ConnectionContext::new(client_socket_addr, server_socket_addr), })) .times(1) - .returning(|_| Box::pin(future::ready(Some(Ok(()))))); + .returning(|_| Box::pin(future::ready(Some(Ok(1))))); let opt_udp_stats_event_sender: Arc>> = Arc::new(Some(Box::new(udp_stats_event_sender_mock))); diff --git a/packages/udp-tracker-core/src/services/mod.rs b/packages/udp-tracker-core/src/services/mod.rs index 6aa254f41..55a533a22 100644 --- a/packages/udp-tracker-core/src/services/mod.rs +++ b/packages/udp-tracker-core/src/services/mod.rs @@ -10,7 +10,7 @@ pub(crate) mod tests { use futures::future::BoxFuture; use mockall::mock; - use tokio::sync::mpsc::error::SendError; + use tokio::sync::broadcast::error::SendError; use crate::connection_cookie::gen_remote_fingerprint; use crate::statistics; @@ -46,7 +46,7 @@ pub(crate) mod tests { mock! { pub(crate) UdpCoreStatsEventSender {} impl statistics::event::sender::Sender for UdpCoreStatsEventSender { - fn send_event(&self, event: statistics::event::Event) -> BoxFuture<'static,Option > > > ; + fn send_event(&self, event: statistics::event::Event) -> BoxFuture<'static,Option > > > ; } } } diff --git a/packages/udp-tracker-core/src/statistics/event/listener.rs b/packages/udp-tracker-core/src/statistics/event/listener.rs index f1a2e25de..36b1e7a22 100644 --- a/packages/udp-tracker-core/src/statistics/event/listener.rs +++ b/packages/udp-tracker-core/src/statistics/event/listener.rs @@ -1,11 +1,17 @@ -use tokio::sync::mpsc; +use tokio::sync::broadcast; use super::handler::handle_event; use super::Event; use crate::statistics::repository::Repository; -pub async fn dispatch_events(mut receiver: mpsc::Receiver, stats_repository: Repository) { - while let Some(event) = receiver.recv().await { - handle_event(event, &stats_repository).await; +pub async fn dispatch_events(mut receiver: broadcast::Receiver, stats_repository: Repository) { + loop { + match receiver.recv().await { + Ok(event) => handle_event(event, &stats_repository).await, + Err(e) => { + tracing::error!("Error receiving udp tracker core event: {:?}", e); + break; + } + } } } diff --git a/packages/udp-tracker-core/src/statistics/event/mod.rs b/packages/udp-tracker-core/src/statistics/event/mod.rs index 216562506..2e8ae39a9 100644 --- a/packages/udp-tracker-core/src/statistics/event/mod.rs +++ b/packages/udp-tracker-core/src/statistics/event/mod.rs @@ -8,14 +8,14 @@ pub mod sender; /// /// - `Udp` prefix means the event was triggered by the UDP tracker. /// - The event suffix is the type of request: `announce`, `scrape` or `connection`. -#[derive(Debug, PartialEq, Eq)] +#[derive(Debug, PartialEq, Eq, Clone)] pub enum Event { UdpConnect { context: ConnectionContext }, UdpAnnounce { context: ConnectionContext }, UdpScrape { context: ConnectionContext }, } -#[derive(Debug, PartialEq, Eq)] +#[derive(Debug, PartialEq, Eq, Clone)] pub struct ConnectionContext { client_socket_addr: SocketAddr, server_socket_addr: SocketAddr, diff --git a/packages/udp-tracker-core/src/statistics/event/sender.rs b/packages/udp-tracker-core/src/statistics/event/sender.rs index ca4b4e210..9092a8e0b 100644 --- a/packages/udp-tracker-core/src/statistics/event/sender.rs +++ b/packages/udp-tracker-core/src/statistics/event/sender.rs @@ -2,15 +2,15 @@ use futures::future::BoxFuture; use futures::FutureExt; #[cfg(test)] use mockall::{automock, predicate::str}; -use tokio::sync::mpsc; -use tokio::sync::mpsc::error::SendError; +use tokio::sync::broadcast; +use tokio::sync::broadcast::error::SendError; use super::Event; /// A trait to allow sending statistics events #[cfg_attr(test, automock)] pub trait Sender: Sync + Send { - fn send_event(&self, event: Event) -> BoxFuture<'_, Option>>>; + fn send_event(&self, event: Event) -> BoxFuture<'_, Option>>>; } /// An [`statistics::EventSender`](crate::statistics::event::sender::Sender) implementation. @@ -19,11 +19,11 @@ pub trait Sender: Sync + Send { /// [`statistics::Keeper`](crate::statistics::keeper::Keeper) #[allow(clippy::module_name_repetitions)] pub struct ChannelSender { - pub(crate) sender: mpsc::Sender, + pub(crate) sender: broadcast::Sender, } impl Sender for ChannelSender { - fn send_event(&self, event: Event) -> BoxFuture<'_, Option>>> { - async move { Some(self.sender.send(event).await) }.boxed() + fn send_event(&self, event: Event) -> BoxFuture<'_, Option>>> { + async move { Some(self.sender.send(event)) }.boxed() } } diff --git a/packages/udp-tracker-core/src/statistics/keeper.rs b/packages/udp-tracker-core/src/statistics/keeper.rs index e46e634e8..f06642908 100644 --- a/packages/udp-tracker-core/src/statistics/keeper.rs +++ b/packages/udp-tracker-core/src/statistics/keeper.rs @@ -1,12 +1,9 @@ -use tokio::sync::mpsc; +use tokio::sync::broadcast::Receiver; use super::event::listener::dispatch_events; -use super::event::sender::{ChannelSender, Sender}; use super::event::Event; use super::repository::Repository; -const CHANNEL_BUFFER_SIZE: usize = 65_535; - /// The service responsible for keeping tracker metrics (listening to statistics events and handle them). /// /// It actively listen to new statistics events. When it receives a new event @@ -29,31 +26,15 @@ impl Keeper { } } - #[must_use] - pub fn new_active_instance() -> (Box, Repository) { - let mut stats_tracker = Self::new(); - - let stats_event_sender = stats_tracker.run_event_listener(); - - (stats_event_sender, stats_tracker.repository) - } - - pub fn run_event_listener(&mut self) -> Box { - let (sender, receiver) = mpsc::channel::(CHANNEL_BUFFER_SIZE); - + pub fn run_event_listener(&mut self, receiver: Receiver) { let stats_repository = self.repository.clone(); tokio::spawn(async move { dispatch_events(receiver, stats_repository).await }); - - Box::new(ChannelSender { sender }) } } #[cfg(test)] mod tests { - use std::net::{IpAddr, Ipv4Addr, SocketAddr}; - - use crate::statistics::event::{ConnectionContext, Event}; use crate::statistics::keeper::Keeper; use crate::statistics::metrics::Metrics; @@ -65,22 +46,4 @@ mod tests { assert_eq!(stats.udp4_announces_handled, Metrics::default().udp4_announces_handled); } - - #[tokio::test] - async fn should_create_an_event_sender_to_send_statistical_events() { - let mut stats_tracker = Keeper::new(); - - let event_sender = stats_tracker.run_event_listener(); - - let result = event_sender - .send_event(Event::UdpConnect { - context: ConnectionContext::new( - SocketAddr::new(IpAddr::V4(Ipv4Addr::new(203, 0, 113, 195)), 8080), - SocketAddr::new(IpAddr::V4(Ipv4Addr::new(203, 0, 113, 196)), 6969), - ), - }) - .await; - - assert!(result.is_some()); - } } diff --git a/packages/udp-tracker-core/src/statistics/setup.rs b/packages/udp-tracker-core/src/statistics/setup.rs index d3114a75e..a9ac751c6 100644 --- a/packages/udp-tracker-core/src/statistics/setup.rs +++ b/packages/udp-tracker-core/src/statistics/setup.rs @@ -1,8 +1,13 @@ //! Setup for the tracker statistics. //! //! The [`factory`] function builds the structs needed for handling the tracker metrics. +use tokio::sync::broadcast; + +use super::event::sender::ChannelSender; use crate::statistics; +const CHANNEL_CAPACITY: usize = 1024; + /// It builds the structs needed for handling the tracker metrics. /// /// It returns: @@ -19,15 +24,21 @@ pub fn factory( Option>, statistics::repository::Repository, ) { - let mut stats_event_sender = None; + let mut stats_event_sender: Option> = None; - let mut stats_tracker = statistics::keeper::Keeper::new(); + let mut keeper = statistics::keeper::Keeper::new(); if tracker_usage_statistics { - stats_event_sender = Some(stats_tracker.run_event_listener()); + let (sender, _) = broadcast::channel(CHANNEL_CAPACITY); + + let receiver = sender.subscribe(); + + stats_event_sender = Some(Box::new(ChannelSender { sender })); + + keeper.run_event_listener(receiver); } - (stats_event_sender, stats_tracker.repository) + (stats_event_sender, keeper.repository) } #[cfg(test)] diff --git a/packages/udp-tracker-server/src/handlers/announce.rs b/packages/udp-tracker-server/src/handlers/announce.rs index 5cc3cf3c8..1988f3d79 100644 --- a/packages/udp-tracker-server/src/handlers/announce.rs +++ b/packages/udp-tracker-server/src/handlers/announce.rs @@ -854,7 +854,7 @@ mod tests { context: core_statistics::event::ConnectionContext::new(client_socket_addr, server_socket_addr), })) .times(1) - .returning(|_| Box::pin(future::ready(Some(Ok(()))))); + .returning(|_| Box::pin(future::ready(Some(Ok(1))))); let udp_core_stats_event_sender: Arc>> = Arc::new(Some(Box::new(udp_core_stats_event_sender_mock))); diff --git a/packages/udp-tracker-server/src/handlers/connect.rs b/packages/udp-tracker-server/src/handlers/connect.rs index c38eb56e5..7e96ce37a 100644 --- a/packages/udp-tracker-server/src/handlers/connect.rs +++ b/packages/udp-tracker-server/src/handlers/connect.rs @@ -196,7 +196,7 @@ mod tests { context: core_statistics::event::ConnectionContext::new(client_socket_addr, server_socket_addr), })) .times(1) - .returning(|_| Box::pin(future::ready(Some(Ok(()))))); + .returning(|_| Box::pin(future::ready(Some(Ok(1))))); let udp_core_stats_event_sender: Arc>> = Arc::new(Some(Box::new(udp_core_stats_event_sender_mock))); @@ -237,7 +237,7 @@ mod tests { context: core_statistics::event::ConnectionContext::new(client_socket_addr, server_socket_addr), })) .times(1) - .returning(|_| Box::pin(future::ready(Some(Ok(()))))); + .returning(|_| Box::pin(future::ready(Some(Ok(1))))); let udp_core_stats_event_sender: Arc>> = Arc::new(Some(Box::new(udp_core_stats_event_sender_mock))); diff --git a/packages/udp-tracker-server/src/handlers/mod.rs b/packages/udp-tracker-server/src/handlers/mod.rs index e346d1953..771147b4a 100644 --- a/packages/udp-tracker-server/src/handlers/mod.rs +++ b/packages/udp-tracker-server/src/handlers/mod.rs @@ -222,7 +222,8 @@ pub(crate) mod tests { use bittorrent_udp_tracker_core::{self, statistics as core_statistics}; use futures::future::BoxFuture; use mockall::mock; - use tokio::sync::mpsc::error::SendError; + use tokio::sync::broadcast::error::SendError; + use tokio::sync::mpsc::error::SendError as MpscSendError; use torrust_tracker_clock::clock::Time; use torrust_tracker_configuration::{Configuration, Core}; use torrust_tracker_primitives::{peer, DurationSinceUnixEpoch}; @@ -422,14 +423,14 @@ pub(crate) mod tests { mock! { pub(crate) UdpCoreStatsEventSender {} impl core_statistics::event::sender::Sender for UdpCoreStatsEventSender { - fn send_event(&self, event: core_statistics::event::Event) -> BoxFuture<'static,Option > > > ; + fn send_event(&self, event: core_statistics::event::Event) -> BoxFuture<'static,Option > > > ; } } mock! { pub(crate) UdpServerStatsEventSender {} impl server_statistics::event::sender::Sender for UdpServerStatsEventSender { - fn send_event(&self, event: server_statistics::event::Event) -> BoxFuture<'static,Option > > > ; + fn send_event(&self, event: server_statistics::event::Event) -> BoxFuture<'static,Option > > > ; } } } From 37c8f2bc8187197e81b1d7a338e490b739fe438e Mon Sep 17 00:00:00 2001 From: Jose Celano Date: Wed, 19 Mar 2025 09:03:10 +0000 Subject: [PATCH 401/802] refactor: [#1390] change channel in UDP server from mpsc to broadcast --- .../src/handlers/announce.rs | 6 +-- .../src/handlers/connect.rs | 4 +- .../udp-tracker-server/src/handlers/mod.rs | 3 +- .../udp-tracker-server/src/handlers/scrape.rs | 4 +- .../src/statistics/event/listener.rs | 14 +++++-- .../src/statistics/event/sender.rs | 12 +++--- .../src/statistics/keeper.rs | 41 +------------------ .../src/statistics/setup.rs | 19 +++++++-- 8 files changed, 41 insertions(+), 62 deletions(-) diff --git a/packages/udp-tracker-server/src/handlers/announce.rs b/packages/udp-tracker-server/src/handlers/announce.rs index 1988f3d79..a2cb55e59 100644 --- a/packages/udp-tracker-server/src/handlers/announce.rs +++ b/packages/udp-tracker-server/src/handlers/announce.rs @@ -430,7 +430,7 @@ mod tests { kind: UdpRequestKind::Announce, })) .times(1) - .returning(|_| Box::pin(future::ready(Some(Ok(()))))); + .returning(|_| Box::pin(future::ready(Some(Ok(1))))); let udp_server_stats_event_sender: Arc>> = Arc::new(Some(Box::new(udp_server_stats_event_sender_mock))); @@ -773,7 +773,7 @@ mod tests { kind: UdpRequestKind::Announce, })) .times(1) - .returning(|_| Box::pin(future::ready(Some(Ok(()))))); + .returning(|_| Box::pin(future::ready(Some(Ok(1))))); let udp_server_stats_event_sender: Arc>> = Arc::new(Some(Box::new(udp_server_stats_event_sender_mock))); @@ -866,7 +866,7 @@ mod tests { kind: UdpRequestKind::Announce, })) .times(1) - .returning(|_| Box::pin(future::ready(Some(Ok(()))))); + .returning(|_| Box::pin(future::ready(Some(Ok(1))))); let udp_server_stats_event_sender: Arc>> = Arc::new(Some(Box::new(udp_server_stats_event_sender_mock))); diff --git a/packages/udp-tracker-server/src/handlers/connect.rs b/packages/udp-tracker-server/src/handlers/connect.rs index 7e96ce37a..992ef459d 100644 --- a/packages/udp-tracker-server/src/handlers/connect.rs +++ b/packages/udp-tracker-server/src/handlers/connect.rs @@ -208,7 +208,7 @@ mod tests { kind: UdpRequestKind::Connect, })) .times(1) - .returning(|_| Box::pin(future::ready(Some(Ok(()))))); + .returning(|_| Box::pin(future::ready(Some(Ok(1))))); let udp_server_stats_event_sender: Arc>> = Arc::new(Some(Box::new(udp_server_stats_event_sender_mock))); @@ -249,7 +249,7 @@ mod tests { kind: UdpRequestKind::Connect, })) .times(1) - .returning(|_| Box::pin(future::ready(Some(Ok(()))))); + .returning(|_| Box::pin(future::ready(Some(Ok(1))))); let udp_server_stats_event_sender: Arc>> = Arc::new(Some(Box::new(udp_server_stats_event_sender_mock))); diff --git a/packages/udp-tracker-server/src/handlers/mod.rs b/packages/udp-tracker-server/src/handlers/mod.rs index 771147b4a..e573cc184 100644 --- a/packages/udp-tracker-server/src/handlers/mod.rs +++ b/packages/udp-tracker-server/src/handlers/mod.rs @@ -223,7 +223,6 @@ pub(crate) mod tests { use futures::future::BoxFuture; use mockall::mock; use tokio::sync::broadcast::error::SendError; - use tokio::sync::mpsc::error::SendError as MpscSendError; use torrust_tracker_clock::clock::Time; use torrust_tracker_configuration::{Configuration, Core}; use torrust_tracker_primitives::{peer, DurationSinceUnixEpoch}; @@ -430,7 +429,7 @@ pub(crate) mod tests { mock! { pub(crate) UdpServerStatsEventSender {} impl server_statistics::event::sender::Sender for UdpServerStatsEventSender { - fn send_event(&self, event: server_statistics::event::Event) -> BoxFuture<'static,Option > > > ; + fn send_event(&self, event: server_statistics::event::Event) -> BoxFuture<'static,Option > > > ; } } } diff --git a/packages/udp-tracker-server/src/handlers/scrape.rs b/packages/udp-tracker-server/src/handlers/scrape.rs index db6b4a18b..fbf2b7c43 100644 --- a/packages/udp-tracker-server/src/handlers/scrape.rs +++ b/packages/udp-tracker-server/src/handlers/scrape.rs @@ -373,7 +373,7 @@ mod tests { kind: server_statistics::event::UdpRequestKind::Scrape, })) .times(1) - .returning(|_| Box::pin(future::ready(Some(Ok(()))))); + .returning(|_| Box::pin(future::ready(Some(Ok(1))))); let udp_server_stats_event_sender: Arc>> = Arc::new(Some(Box::new(udp_server_stats_event_sender_mock))); @@ -422,7 +422,7 @@ mod tests { kind: server_statistics::event::UdpRequestKind::Scrape, })) .times(1) - .returning(|_| Box::pin(future::ready(Some(Ok(()))))); + .returning(|_| Box::pin(future::ready(Some(Ok(1))))); let udp_server_stats_event_sender: Arc>> = Arc::new(Some(Box::new(udp_server_stats_event_sender_mock))); diff --git a/packages/udp-tracker-server/src/statistics/event/listener.rs b/packages/udp-tracker-server/src/statistics/event/listener.rs index f1a2e25de..b755cbf18 100644 --- a/packages/udp-tracker-server/src/statistics/event/listener.rs +++ b/packages/udp-tracker-server/src/statistics/event/listener.rs @@ -1,11 +1,17 @@ -use tokio::sync::mpsc; +use tokio::sync::broadcast; use super::handler::handle_event; use super::Event; use crate::statistics::repository::Repository; -pub async fn dispatch_events(mut receiver: mpsc::Receiver, stats_repository: Repository) { - while let Some(event) = receiver.recv().await { - handle_event(event, &stats_repository).await; +pub async fn dispatch_events(mut receiver: broadcast::Receiver, stats_repository: Repository) { + loop { + match receiver.recv().await { + Ok(event) => handle_event(event, &stats_repository).await, + Err(e) => { + tracing::error!("Error receiving udp tracker server event: {:?}", e); + break; + } + } } } diff --git a/packages/udp-tracker-server/src/statistics/event/sender.rs b/packages/udp-tracker-server/src/statistics/event/sender.rs index ca4b4e210..9092a8e0b 100644 --- a/packages/udp-tracker-server/src/statistics/event/sender.rs +++ b/packages/udp-tracker-server/src/statistics/event/sender.rs @@ -2,15 +2,15 @@ use futures::future::BoxFuture; use futures::FutureExt; #[cfg(test)] use mockall::{automock, predicate::str}; -use tokio::sync::mpsc; -use tokio::sync::mpsc::error::SendError; +use tokio::sync::broadcast; +use tokio::sync::broadcast::error::SendError; use super::Event; /// A trait to allow sending statistics events #[cfg_attr(test, automock)] pub trait Sender: Sync + Send { - fn send_event(&self, event: Event) -> BoxFuture<'_, Option>>>; + fn send_event(&self, event: Event) -> BoxFuture<'_, Option>>>; } /// An [`statistics::EventSender`](crate::statistics::event::sender::Sender) implementation. @@ -19,11 +19,11 @@ pub trait Sender: Sync + Send { /// [`statistics::Keeper`](crate::statistics::keeper::Keeper) #[allow(clippy::module_name_repetitions)] pub struct ChannelSender { - pub(crate) sender: mpsc::Sender, + pub(crate) sender: broadcast::Sender, } impl Sender for ChannelSender { - fn send_event(&self, event: Event) -> BoxFuture<'_, Option>>> { - async move { Some(self.sender.send(event).await) }.boxed() + fn send_event(&self, event: Event) -> BoxFuture<'_, Option>>> { + async move { Some(self.sender.send(event)) }.boxed() } } diff --git a/packages/udp-tracker-server/src/statistics/keeper.rs b/packages/udp-tracker-server/src/statistics/keeper.rs index 4ce832227..099e0d0aa 100644 --- a/packages/udp-tracker-server/src/statistics/keeper.rs +++ b/packages/udp-tracker-server/src/statistics/keeper.rs @@ -1,12 +1,9 @@ -use tokio::sync::mpsc; +use tokio::sync::broadcast::Receiver; use super::event::listener::dispatch_events; -use super::event::sender::{ChannelSender, Sender}; use super::event::Event; use super::repository::Repository; -const CHANNEL_BUFFER_SIZE: usize = 65_535; - /// The service responsible for keeping tracker metrics (listening to statistics events and handle them). /// /// It actively listen to new statistics events. When it receives a new event @@ -29,31 +26,15 @@ impl Keeper { } } - #[must_use] - pub fn new_active_instance() -> (Box, Repository) { - let mut stats_tracker = Self::new(); - - let stats_event_sender = stats_tracker.run_event_listener(); - - (stats_event_sender, stats_tracker.repository) - } - - pub fn run_event_listener(&mut self) -> Box { - let (sender, receiver) = mpsc::channel::(CHANNEL_BUFFER_SIZE); - + pub fn run_event_listener(&mut self, receiver: Receiver) { let stats_repository = self.repository.clone(); tokio::spawn(async move { dispatch_events(receiver, stats_repository).await }); - - Box::new(ChannelSender { sender }) } } #[cfg(test)] mod tests { - use std::net::{IpAddr, Ipv6Addr, SocketAddr}; - - use crate::statistics::event::{ConnectionContext, Event}; use crate::statistics::keeper::Keeper; use crate::statistics::metrics::Metrics; @@ -65,22 +46,4 @@ mod tests { assert_eq!(stats.udp4_requests, Metrics::default().udp4_requests); } - - #[tokio::test] - async fn should_create_an_event_sender_to_send_statistical_events() { - let mut stats_tracker = Keeper::new(); - - let event_sender = stats_tracker.run_event_listener(); - - let result = event_sender - .send_event(Event::UdpRequestReceived { - context: ConnectionContext::new( - SocketAddr::new(IpAddr::V6(Ipv6Addr::new(0, 0, 0, 0, 203, 0, 113, 195)), 8080), - SocketAddr::new(IpAddr::V6(Ipv6Addr::new(0, 0, 0, 0, 203, 0, 113, 196)), 6969), - ), - }) - .await; - - assert!(result.is_some()); - } } diff --git a/packages/udp-tracker-server/src/statistics/setup.rs b/packages/udp-tracker-server/src/statistics/setup.rs index d3114a75e..a9ac751c6 100644 --- a/packages/udp-tracker-server/src/statistics/setup.rs +++ b/packages/udp-tracker-server/src/statistics/setup.rs @@ -1,8 +1,13 @@ //! Setup for the tracker statistics. //! //! The [`factory`] function builds the structs needed for handling the tracker metrics. +use tokio::sync::broadcast; + +use super::event::sender::ChannelSender; use crate::statistics; +const CHANNEL_CAPACITY: usize = 1024; + /// It builds the structs needed for handling the tracker metrics. /// /// It returns: @@ -19,15 +24,21 @@ pub fn factory( Option>, statistics::repository::Repository, ) { - let mut stats_event_sender = None; + let mut stats_event_sender: Option> = None; - let mut stats_tracker = statistics::keeper::Keeper::new(); + let mut keeper = statistics::keeper::Keeper::new(); if tracker_usage_statistics { - stats_event_sender = Some(stats_tracker.run_event_listener()); + let (sender, _) = broadcast::channel(CHANNEL_CAPACITY); + + let receiver = sender.subscribe(); + + stats_event_sender = Some(Box::new(ChannelSender { sender })); + + keeper.run_event_listener(receiver); } - (stats_event_sender, stats_tracker.repository) + (stats_event_sender, keeper.repository) } #[cfg(test)] From 3d2243b9b032318568ae2dfdd311d2239bacfcc9 Mon Sep 17 00:00:00 2001 From: Jose Celano Date: Wed, 19 Mar 2025 11:57:12 +0000 Subject: [PATCH 402/802] refactor: [#1396] extract event module in HTTP core --- .../src/v1/handlers/scrape.rs | 2 +- .../http-tracker-core/benches/helpers/util.rs | 9 +-- packages/http-tracker-core/src/container.rs | 4 +- packages/http-tracker-core/src/event/mod.rs | 59 +++++++++++++++++++ .../src/{statistics => }/event/sender.rs | 7 +-- packages/http-tracker-core/src/lib.rs | 1 + .../src/services/announce.rs | 38 ++++++------ .../http-tracker-core/src/services/scrape.rs | 40 ++++++------- .../src/statistics/event/handler.rs | 4 +- .../src/statistics/event/listener.rs | 2 +- .../src/statistics/event/mod.rs | 59 ------------------- .../src/statistics/keeper.rs | 2 +- .../http-tracker-core/src/statistics/setup.rs | 13 ++-- src/container.rs | 2 +- 14 files changed, 119 insertions(+), 123 deletions(-) create mode 100644 packages/http-tracker-core/src/event/mod.rs rename packages/http-tracker-core/src/{statistics => }/event/sender.rs (70%) diff --git a/packages/axum-http-tracker-server/src/v1/handlers/scrape.rs b/packages/axum-http-tracker-server/src/v1/handlers/scrape.rs index 1ba89eaaf..e9544c983 100644 --- a/packages/axum-http-tracker-server/src/v1/handlers/scrape.rs +++ b/packages/axum-http-tracker-server/src/v1/handlers/scrape.rs @@ -103,7 +103,7 @@ mod tests { } struct CoreHttpTrackerServices { - pub http_stats_event_sender: Arc>>, + pub http_stats_event_sender: Arc>>, } fn initialize_private_tracker() -> (CoreTrackerServices, CoreHttpTrackerServices) { diff --git a/packages/http-tracker-core/benches/helpers/util.rs b/packages/http-tracker-core/benches/helpers/util.rs index 19010041e..dff516063 100644 --- a/packages/http-tracker-core/benches/helpers/util.rs +++ b/packages/http-tracker-core/benches/helpers/util.rs @@ -26,7 +26,7 @@ pub struct CoreTrackerServices { } pub struct CoreHttpTrackerServices { - pub http_stats_event_sender: Arc>>, + pub http_stats_event_sender: Arc>>, } pub fn initialize_core_tracker_services() -> (CoreTrackerServices, CoreHttpTrackerServices) { @@ -105,14 +105,15 @@ pub fn sample_info_hash() -> InfoHash { .expect("String should be a valid info hash") } -use bittorrent_http_tracker_core::statistics; +use bittorrent_http_tracker_core::event::Event; +use bittorrent_http_tracker_core::{event, statistics}; use futures::future::BoxFuture; use mockall::mock; use tokio::sync::broadcast::error::SendError; mock! { HttpStatsEventSender {} - impl statistics::event::sender::Sender for HttpStatsEventSender { - fn send_event(&self, event: statistics::event::Event) -> BoxFuture<'static,Option > > > ; + impl event::sender::Sender for HttpStatsEventSender { + fn send_event(&self, event: Event) -> BoxFuture<'static,Option > > > ; } } diff --git a/packages/http-tracker-core/src/container.rs b/packages/http-tracker-core/src/container.rs index 448dce246..bb9b5014c 100644 --- a/packages/http-tracker-core/src/container.rs +++ b/packages/http-tracker-core/src/container.rs @@ -9,7 +9,7 @@ use torrust_tracker_configuration::{Core, HttpTracker}; use crate::services::announce::AnnounceService; use crate::services::scrape::ScrapeService; -use crate::statistics; +use crate::{event, statistics}; pub struct HttpTrackerCoreContainer { // todo: replace with TrackerCoreContainer @@ -20,7 +20,7 @@ pub struct HttpTrackerCoreContainer { pub authentication_service: Arc, pub http_tracker_config: Arc, - pub http_stats_event_sender: Arc>>, + pub http_stats_event_sender: Arc>>, pub http_stats_repository: Arc, pub announce_service: Arc, pub scrape_service: Arc, diff --git a/packages/http-tracker-core/src/event/mod.rs b/packages/http-tracker-core/src/event/mod.rs new file mode 100644 index 000000000..da824c240 --- /dev/null +++ b/packages/http-tracker-core/src/event/mod.rs @@ -0,0 +1,59 @@ +use std::net::{IpAddr, SocketAddr}; + +pub mod sender; + +/// An event. +#[derive(Debug, PartialEq, Eq, Clone)] +pub enum Event { + TcpAnnounce { connection: ConnectionContext }, + TcpScrape { connection: ConnectionContext }, +} + +#[derive(Debug, PartialEq, Eq, Clone)] +pub struct ConnectionContext { + client: ClientConnectionContext, + server: ServerConnectionContext, +} + +impl ConnectionContext { + #[must_use] + pub fn new(client_ip_addr: IpAddr, opt_client_port: Option, server_socket_addr: SocketAddr) -> Self { + Self { + client: ClientConnectionContext { + ip_addr: client_ip_addr, + port: opt_client_port, + }, + server: ServerConnectionContext { + socket_addr: server_socket_addr, + }, + } + } + + #[must_use] + pub fn client_ip_addr(&self) -> IpAddr { + self.client.ip_addr + } + + #[must_use] + pub fn client_port(&self) -> Option { + self.client.port + } + + #[must_use] + pub fn server_socket_addr(&self) -> SocketAddr { + self.server.socket_addr + } +} + +#[derive(Debug, PartialEq, Eq, Clone)] +pub struct ClientConnectionContext { + ip_addr: IpAddr, + + /// It's provided if you use the `torrust-axum-http-tracker-server` crate. + port: Option, +} + +#[derive(Debug, PartialEq, Eq, Clone)] +pub struct ServerConnectionContext { + socket_addr: SocketAddr, +} diff --git a/packages/http-tracker-core/src/statistics/event/sender.rs b/packages/http-tracker-core/src/event/sender.rs similarity index 70% rename from packages/http-tracker-core/src/statistics/event/sender.rs rename to packages/http-tracker-core/src/event/sender.rs index 9092a8e0b..59ab4496b 100644 --- a/packages/http-tracker-core/src/statistics/event/sender.rs +++ b/packages/http-tracker-core/src/event/sender.rs @@ -7,16 +7,13 @@ use tokio::sync::broadcast::error::SendError; use super::Event; -/// A trait to allow sending statistics events +/// A trait to allow sending events. #[cfg_attr(test, automock)] pub trait Sender: Sync + Send { fn send_event(&self, event: Event) -> BoxFuture<'_, Option>>>; } -/// An [`statistics::EventSender`](crate::statistics::event::sender::Sender) implementation. -/// -/// It uses a channel sender to send the statistic events. The channel is created by a -/// [`statistics::Keeper`](crate::statistics::keeper::Keeper) +/// An event sender implementation using a broadcast channel. #[allow(clippy::module_name_repetitions)] pub struct ChannelSender { pub(crate) sender: broadcast::Sender, diff --git a/packages/http-tracker-core/src/lib.rs b/packages/http-tracker-core/src/lib.rs index b42b99f8e..0b0b3ba78 100644 --- a/packages/http-tracker-core/src/lib.rs +++ b/packages/http-tracker-core/src/lib.rs @@ -1,4 +1,5 @@ pub mod container; +pub mod event; pub mod services; pub mod statistics; diff --git a/packages/http-tracker-core/src/services/announce.rs b/packages/http-tracker-core/src/services/announce.rs index 25fc1b861..cd7417e98 100644 --- a/packages/http-tracker-core/src/services/announce.rs +++ b/packages/http-tracker-core/src/services/announce.rs @@ -5,7 +5,7 @@ //! It delegates the `announce` logic to the [`AnnounceHandler`] and it returns //! the [`AnnounceData`]. //! -//! It also sends an [`http_tracker_core::statistics::event::Event`] +//! It also sends an [`http_tracker_core::event::Event`] //! because events are specific for the HTTP tracker. use std::net::{IpAddr, SocketAddr}; use std::panic::Location; @@ -22,7 +22,8 @@ use bittorrent_tracker_core::whitelist; use torrust_tracker_configuration::Core; use torrust_tracker_primitives::core::AnnounceData; -use crate::statistics; +use crate::event; +use crate::event::Event; /// The HTTP tracker `announce` service. /// @@ -35,7 +36,7 @@ pub struct AnnounceService { announce_handler: Arc, authentication_service: Arc, whitelist_authorization: Arc, - opt_http_stats_event_sender: Arc>>, + opt_http_stats_event_sender: Arc>>, } impl AnnounceService { @@ -45,7 +46,7 @@ impl AnnounceService { announce_handler: Arc, authentication_service: Arc, whitelist_authorization: Arc, - opt_http_stats_event_sender: Arc>>, + opt_http_stats_event_sender: Arc>>, ) -> Self { Self { core_config, @@ -140,8 +141,8 @@ impl AnnounceService { async fn send_stats_event(&self, peer_ip: IpAddr, opt_peer_ip_port: Option, server_socket_addr: SocketAddr) { if let Some(http_stats_event_sender) = self.opt_http_stats_event_sender.as_deref() { http_stats_event_sender - .send_event(statistics::event::Event::TcpAnnounce { - connection: statistics::event::ConnectionContext::new(peer_ip, opt_peer_ip_port, server_socket_addr), + .send_event(Event::TcpAnnounce { + connection: event::ConnectionContext::new(peer_ip, opt_peer_ip_port, server_socket_addr), }) .await; } @@ -227,7 +228,7 @@ mod tests { } struct CoreHttpTrackerServices { - pub http_stats_event_sender: Arc>>, + pub http_stats_event_sender: Arc>>, } fn initialize_core_tracker_services() -> (CoreTrackerServices, CoreHttpTrackerServices) { @@ -317,13 +318,14 @@ mod tests { use mockall::mock; use tokio::sync::broadcast::error::SendError; - use crate::statistics; + use crate::event::Event; use crate::tests::sample_info_hash; + use crate::{event, statistics}; mock! { HttpStatsEventSender {} - impl statistics::event::sender::Sender for HttpStatsEventSender { - fn send_event(&self, event: statistics::event::Event) -> BoxFuture<'static,Option > > > ; + impl event::sender::Sender for HttpStatsEventSender { + fn send_event(&self, event: Event) -> BoxFuture<'static,Option > > > ; } } @@ -340,13 +342,13 @@ mod tests { use torrust_tracker_test_helpers::configuration; use super::{sample_peer_using_ipv4, sample_peer_using_ipv6}; + use crate::event; + use crate::event::{ConnectionContext, Event}; use crate::services::announce::tests::{ initialize_core_tracker_services, initialize_core_tracker_services_with_config, sample_announce_request_for_peer, sample_peer, MockHttpStatsEventSender, }; use crate::services::announce::AnnounceService; - use crate::statistics; - use crate::statistics::event::ConnectionContext; #[tokio::test] async fn it_should_return_the_announce_data() { @@ -391,12 +393,12 @@ mod tests { let mut http_stats_event_sender_mock = MockHttpStatsEventSender::new(); http_stats_event_sender_mock .expect_send_event() - .with(eq(statistics::event::Event::TcpAnnounce { + .with(eq(Event::TcpAnnounce { connection: ConnectionContext::new(IpAddr::V4(Ipv4Addr::new(126, 0, 0, 1)), Some(8080), server_socket_addr), })) .times(1) .returning(|_| Box::pin(future::ready(Some(Ok(1))))); - let http_stats_event_sender: Arc>> = + let http_stats_event_sender: Arc>> = Arc::new(Some(Box::new(http_stats_event_sender_mock))); let (core_tracker_services, mut core_http_tracker_services) = initialize_core_tracker_services(); @@ -447,12 +449,12 @@ mod tests { let mut http_stats_event_sender_mock = MockHttpStatsEventSender::new(); http_stats_event_sender_mock .expect_send_event() - .with(eq(statistics::event::Event::TcpAnnounce { + .with(eq(Event::TcpAnnounce { connection: ConnectionContext::new(IpAddr::V4(Ipv4Addr::new(127, 0, 0, 1)), Some(8080), server_socket_addr), })) .times(1) .returning(|_| Box::pin(future::ready(Some(Ok(1))))); - let http_stats_event_sender: Arc>> = + let http_stats_event_sender: Arc>> = Arc::new(Some(Box::new(http_stats_event_sender_mock))); let (core_tracker_services, mut core_http_tracker_services) = @@ -486,7 +488,7 @@ mod tests { let mut http_stats_event_sender_mock = MockHttpStatsEventSender::new(); http_stats_event_sender_mock .expect_send_event() - .with(eq(statistics::event::Event::TcpAnnounce { + .with(eq(Event::TcpAnnounce { connection: ConnectionContext::new( IpAddr::V6(Ipv6Addr::new(0x6969, 0x6969, 0x6969, 0x6969, 0x6969, 0x6969, 0x6969, 0x6969)), Some(8080), @@ -495,7 +497,7 @@ mod tests { })) .times(1) .returning(|_| Box::pin(future::ready(Some(Ok(1))))); - let http_stats_event_sender: Arc>> = + let http_stats_event_sender: Arc>> = Arc::new(Some(Box::new(http_stats_event_sender_mock))); let (core_tracker_services, mut core_http_tracker_services) = initialize_core_tracker_services(); diff --git a/packages/http-tracker-core/src/services/scrape.rs b/packages/http-tracker-core/src/services/scrape.rs index 6341ed301..1f4c14b5a 100644 --- a/packages/http-tracker-core/src/services/scrape.rs +++ b/packages/http-tracker-core/src/services/scrape.rs @@ -19,8 +19,8 @@ use bittorrent_tracker_core::scrape_handler::ScrapeHandler; use torrust_tracker_configuration::Core; use torrust_tracker_primitives::core::ScrapeData; -use crate::statistics; -use crate::statistics::event::ConnectionContext; +use crate::event; +use crate::event::{ConnectionContext, Event}; /// The HTTP tracker `scrape` service. /// @@ -38,7 +38,7 @@ pub struct ScrapeService { core_config: Arc, scrape_handler: Arc, authentication_service: Arc, - opt_http_stats_event_sender: Arc>>, + opt_http_stats_event_sender: Arc>>, } impl ScrapeService { @@ -47,7 +47,7 @@ impl ScrapeService { core_config: Arc, scrape_handler: Arc, authentication_service: Arc, - opt_http_stats_event_sender: Arc>>, + opt_http_stats_event_sender: Arc>>, ) -> Self { Self { core_config, @@ -126,7 +126,7 @@ impl ScrapeService { ) { if let Some(http_stats_event_sender) = self.opt_http_stats_event_sender.as_deref() { http_stats_event_sender - .send_event(statistics::event::Event::TcpScrape { + .send_event(Event::TcpScrape { connection: ConnectionContext::new(original_peer_ip, opt_original_peer_port, server_socket_addr), }) .await; @@ -207,7 +207,7 @@ mod tests { use torrust_tracker_configuration::Configuration; use torrust_tracker_primitives::{peer, DurationSinceUnixEpoch}; - use crate::statistics; + use crate::event::{self, Event}; use crate::tests::sample_info_hash; struct Container { @@ -259,8 +259,8 @@ mod tests { mock! { HttpStatsEventSender {} - impl statistics::event::sender::Sender for HttpStatsEventSender { - fn send_event(&self, event: statistics::event::Event) -> BoxFuture<'static,Option > > > ; + impl event::sender::Sender for HttpStatsEventSender { + fn send_event(&self, event: Event) -> BoxFuture<'static,Option > > > ; } } @@ -278,13 +278,13 @@ mod tests { use torrust_tracker_primitives::swarm_metadata::SwarmMetadata; use torrust_tracker_test_helpers::configuration; + use crate::event::{ConnectionContext, Event}; use crate::services::scrape::tests::{ initialize_services_with_configuration, sample_info_hashes, sample_peer, MockHttpStatsEventSender, }; use crate::services::scrape::ScrapeService; - use crate::statistics; - use crate::statistics::event::ConnectionContext; use crate::tests::sample_info_hash; + use crate::{event, statistics}; #[tokio::test] async fn it_should_return_the_scrape_data_for_a_torrent() { @@ -351,7 +351,7 @@ mod tests { let mut http_stats_event_sender_mock = MockHttpStatsEventSender::new(); http_stats_event_sender_mock .expect_send_event() - .with(eq(statistics::event::Event::TcpScrape { + .with(eq(Event::TcpScrape { connection: ConnectionContext::new( IpAddr::V4(Ipv4Addr::new(126, 0, 0, 1)), Some(8080), @@ -360,7 +360,7 @@ mod tests { })) .times(1) .returning(|_| Box::pin(future::ready(Some(Ok(1))))); - let http_stats_event_sender: Arc>> = + let http_stats_event_sender: Arc>> = Arc::new(Some(Box::new(http_stats_event_sender_mock))); let container = initialize_services_with_configuration(&config); @@ -400,7 +400,7 @@ mod tests { let mut http_stats_event_sender_mock = MockHttpStatsEventSender::new(); http_stats_event_sender_mock .expect_send_event() - .with(eq(statistics::event::Event::TcpScrape { + .with(eq(Event::TcpScrape { connection: ConnectionContext::new( IpAddr::V6(Ipv6Addr::new(0x6969, 0x6969, 0x6969, 0x6969, 0x6969, 0x6969, 0x6969, 0x6969)), Some(8080), @@ -409,7 +409,7 @@ mod tests { })) .times(1) .returning(|_| Box::pin(future::ready(Some(Ok(1))))); - let http_stats_event_sender: Arc>> = + let http_stats_event_sender: Arc>> = Arc::new(Some(Box::new(http_stats_event_sender_mock))); let container = initialize_services_with_configuration(&config); @@ -454,13 +454,13 @@ mod tests { use torrust_tracker_primitives::core::ScrapeData; use torrust_tracker_test_helpers::configuration; + use crate::event::{ConnectionContext, Event}; use crate::services::scrape::tests::{ initialize_services_with_configuration, sample_info_hashes, sample_peer, MockHttpStatsEventSender, }; use crate::services::scrape::ScrapeService; - use crate::statistics; - use crate::statistics::event::ConnectionContext; use crate::tests::sample_info_hash; + use crate::{event, statistics}; #[tokio::test] async fn it_should_return_the_zeroed_scrape_data_when_the_tracker_is_running_in_private_mode_and_the_peer_is_not_authenticated( @@ -521,7 +521,7 @@ mod tests { let mut http_stats_event_sender_mock = MockHttpStatsEventSender::new(); http_stats_event_sender_mock .expect_send_event() - .with(eq(statistics::event::Event::TcpScrape { + .with(eq(Event::TcpScrape { connection: ConnectionContext::new( IpAddr::V4(Ipv4Addr::new(126, 0, 0, 1)), Some(8080), @@ -530,7 +530,7 @@ mod tests { })) .times(1) .returning(|_| Box::pin(future::ready(Some(Ok(1))))); - let http_stats_event_sender: Arc>> = + let http_stats_event_sender: Arc>> = Arc::new(Some(Box::new(http_stats_event_sender_mock))); let peer_ip = IpAddr::V4(Ipv4Addr::new(126, 0, 0, 1)); @@ -570,7 +570,7 @@ mod tests { let mut http_stats_event_sender_mock = MockHttpStatsEventSender::new(); http_stats_event_sender_mock .expect_send_event() - .with(eq(statistics::event::Event::TcpScrape { + .with(eq(Event::TcpScrape { connection: ConnectionContext::new( IpAddr::V6(Ipv6Addr::new(0x6969, 0x6969, 0x6969, 0x6969, 0x6969, 0x6969, 0x6969, 0x6969)), Some(8080), @@ -579,7 +579,7 @@ mod tests { })) .times(1) .returning(|_| Box::pin(future::ready(Some(Ok(1))))); - let http_stats_event_sender: Arc>> = + let http_stats_event_sender: Arc>> = Arc::new(Some(Box::new(http_stats_event_sender_mock))); let peer_ip = IpAddr::V6(Ipv6Addr::new(0x6969, 0x6969, 0x6969, 0x6969, 0x6969, 0x6969, 0x6969, 0x6969)); diff --git a/packages/http-tracker-core/src/statistics/event/handler.rs b/packages/http-tracker-core/src/statistics/event/handler.rs index b8806b9d2..700e39476 100644 --- a/packages/http-tracker-core/src/statistics/event/handler.rs +++ b/packages/http-tracker-core/src/statistics/event/handler.rs @@ -1,6 +1,6 @@ use std::net::IpAddr; -use crate::statistics::event::Event; +use crate::event::Event; use crate::statistics::repository::Repository; /// # Panics @@ -34,8 +34,8 @@ pub async fn handle_event(event: Event, stats_repository: &Repository) { mod tests { use std::net::{IpAddr, Ipv4Addr, Ipv6Addr, SocketAddr}; + use crate::event::{ConnectionContext, Event}; use crate::statistics::event::handler::handle_event; - use crate::statistics::event::{ConnectionContext, Event}; use crate::statistics::repository::Repository; #[tokio::test] diff --git a/packages/http-tracker-core/src/statistics/event/listener.rs b/packages/http-tracker-core/src/statistics/event/listener.rs index a70992a02..a03a56a21 100644 --- a/packages/http-tracker-core/src/statistics/event/listener.rs +++ b/packages/http-tracker-core/src/statistics/event/listener.rs @@ -1,7 +1,7 @@ use tokio::sync::broadcast; use super::handler::handle_event; -use super::Event; +use crate::event::Event; use crate::statistics::repository::Repository; pub async fn dispatch_events(mut receiver: broadcast::Receiver, stats_repository: Repository) { diff --git a/packages/http-tracker-core/src/statistics/event/mod.rs b/packages/http-tracker-core/src/statistics/event/mod.rs index 2964956d8..dae683398 100644 --- a/packages/http-tracker-core/src/statistics/event/mod.rs +++ b/packages/http-tracker-core/src/statistics/event/mod.rs @@ -1,61 +1,2 @@ -use std::net::{IpAddr, SocketAddr}; - pub mod handler; pub mod listener; -pub mod sender; - -/// An statistics event. It is used to collect tracker metrics. -#[derive(Debug, PartialEq, Eq, Clone)] -pub enum Event { - TcpAnnounce { connection: ConnectionContext }, - TcpScrape { connection: ConnectionContext }, -} - -#[derive(Debug, PartialEq, Eq, Clone)] -pub struct ConnectionContext { - client: ClientConnectionContext, - server: ServerConnectionContext, -} - -impl ConnectionContext { - #[must_use] - pub fn new(client_ip_addr: IpAddr, opt_client_port: Option, server_socket_addr: SocketAddr) -> Self { - Self { - client: ClientConnectionContext { - ip_addr: client_ip_addr, - port: opt_client_port, - }, - server: ServerConnectionContext { - socket_addr: server_socket_addr, - }, - } - } - - #[must_use] - pub fn client_ip_addr(&self) -> IpAddr { - self.client.ip_addr - } - - #[must_use] - pub fn client_port(&self) -> Option { - self.client.port - } - - #[must_use] - pub fn server_socket_addr(&self) -> SocketAddr { - self.server.socket_addr - } -} - -#[derive(Debug, PartialEq, Eq, Clone)] -pub struct ClientConnectionContext { - ip_addr: IpAddr, - - /// It's provided if you use the `torrust-axum-http-tracker-server` crate. - port: Option, -} - -#[derive(Debug, PartialEq, Eq, Clone)] -pub struct ServerConnectionContext { - socket_addr: SocketAddr, -} diff --git a/packages/http-tracker-core/src/statistics/keeper.rs b/packages/http-tracker-core/src/statistics/keeper.rs index f4428ec70..01a7a1569 100644 --- a/packages/http-tracker-core/src/statistics/keeper.rs +++ b/packages/http-tracker-core/src/statistics/keeper.rs @@ -1,8 +1,8 @@ use tokio::sync::broadcast::Receiver; use super::event::listener::dispatch_events; -use super::event::Event; use super::repository::Repository; +use crate::event::Event; /// The service responsible for keeping tracker metrics (listening to statistics events and handle them). /// diff --git a/packages/http-tracker-core/src/statistics/setup.rs b/packages/http-tracker-core/src/statistics/setup.rs index a9ac751c6..ca31e5d52 100644 --- a/packages/http-tracker-core/src/statistics/setup.rs +++ b/packages/http-tracker-core/src/statistics/setup.rs @@ -3,8 +3,8 @@ //! The [`factory`] function builds the structs needed for handling the tracker metrics. use tokio::sync::broadcast; -use super::event::sender::ChannelSender; -use crate::statistics; +use crate::event::sender::ChannelSender; +use crate::{event, statistics}; const CHANNEL_CAPACITY: usize = 1024; @@ -18,13 +18,8 @@ const CHANNEL_CAPACITY: usize = 1024; /// When the input argument `tracker_usage_statistics`is false the setup does not run the event listeners, consequently the statistics /// events are sent are received but not dispatched to the handler. #[must_use] -pub fn factory( - tracker_usage_statistics: bool, -) -> ( - Option>, - statistics::repository::Repository, -) { - let mut stats_event_sender: Option> = None; +pub fn factory(tracker_usage_statistics: bool) -> (Option>, statistics::repository::Repository) { + let mut stats_event_sender: Option> = None; let mut keeper = statistics::keeper::Keeper::new(); diff --git a/src/container.rs b/src/container.rs index 07c30d604..1c8c9c1d3 100644 --- a/src/container.rs +++ b/src/container.rs @@ -60,7 +60,7 @@ pub struct AppContainer { pub udp_scrape_service: Arc, // HTTP Tracker Core Services - pub http_stats_event_sender: Arc>>, + pub http_stats_event_sender: Arc>>, pub http_stats_repository: Arc, pub http_announce_service: Arc, pub http_scrape_service: Arc, From 7e364d14ca468105054e42a92b19719209c63e38 Mon Sep 17 00:00:00 2001 From: Jose Celano Date: Wed, 19 Mar 2025 16:18:17 +0000 Subject: [PATCH 403/802] refactor: [#1396] move event channel creation to events mod in HTTP tracker core --- .../http-tracker-core/src/event/sender.rs | 23 +++++++++--- .../http-tracker-core/src/statistics/setup.rs | 35 +++++++++---------- 2 files changed, 35 insertions(+), 23 deletions(-) diff --git a/packages/http-tracker-core/src/event/sender.rs b/packages/http-tracker-core/src/event/sender.rs index 59ab4496b..e9431abf2 100644 --- a/packages/http-tracker-core/src/event/sender.rs +++ b/packages/http-tracker-core/src/event/sender.rs @@ -7,20 +7,35 @@ use tokio::sync::broadcast::error::SendError; use super::Event; -/// A trait to allow sending events. +const CHANNEL_CAPACITY: usize = 1024; + +/// A trait for sending sending. #[cfg_attr(test, automock)] pub trait Sender: Sync + Send { fn send_event(&self, event: Event) -> BoxFuture<'_, Option>>>; } /// An event sender implementation using a broadcast channel. -#[allow(clippy::module_name_repetitions)] -pub struct ChannelSender { +pub struct Broadcaster { pub(crate) sender: broadcast::Sender, } -impl Sender for ChannelSender { +impl Sender for Broadcaster { fn send_event(&self, event: Event) -> BoxFuture<'_, Option>>> { async move { Some(self.sender.send(event)) }.boxed() } } + +impl Default for Broadcaster { + fn default() -> Self { + let (sender, _) = broadcast::channel(CHANNEL_CAPACITY); + Self { sender } + } +} + +impl Broadcaster { + #[must_use] + pub fn subscribe(&self) -> broadcast::Receiver { + self.sender.subscribe() + } +} diff --git a/packages/http-tracker-core/src/statistics/setup.rs b/packages/http-tracker-core/src/statistics/setup.rs index ca31e5d52..e2974e4c0 100644 --- a/packages/http-tracker-core/src/statistics/setup.rs +++ b/packages/http-tracker-core/src/statistics/setup.rs @@ -1,39 +1,36 @@ //! Setup for the tracker statistics. //! //! The [`factory`] function builds the structs needed for handling the tracker metrics. -use tokio::sync::broadcast; - -use crate::event::sender::ChannelSender; +use crate::event::sender::Broadcaster; use crate::{event, statistics}; -const CHANNEL_CAPACITY: usize = 1024; - /// It builds the structs needed for handling the tracker metrics. /// /// It returns: /// -/// - An statistics event [`Sender`](crate::statistics::event::sender::Sender) that allows you to send events related to statistics. -/// - An statistics [`Repository`](crate::statistics::repository::Repository) which is an in-memory repository for the tracker metrics. +/// - An event [`Sender`](crate::event::sender::Sender) that allows you to send +/// events related to statistics. +/// - An statistics [`Repository`](crate::statistics::repository::Repository) +/// which is an in-memory repository for the tracker metrics. /// -/// When the input argument `tracker_usage_statistics`is false the setup does not run the event listeners, consequently the statistics -/// events are sent are received but not dispatched to the handler. +/// When the input argument `tracker_usage_statistics`is false the setup does +/// not run the event listeners, consequently the statistics events are sent are +/// received but not dispatched to the handler. #[must_use] pub fn factory(tracker_usage_statistics: bool) -> (Option>, statistics::repository::Repository) { - let mut stats_event_sender: Option> = None; - let mut keeper = statistics::keeper::Keeper::new(); - if tracker_usage_statistics { - let (sender, _) = broadcast::channel(CHANNEL_CAPACITY); + let opt_event_sender: Option> = if tracker_usage_statistics { + let broadcaster = Broadcaster::default(); - let receiver = sender.subscribe(); + keeper.run_event_listener(broadcaster.subscribe()); - stats_event_sender = Some(Box::new(ChannelSender { sender })); - - keeper.run_event_listener(receiver); - } + Some(Box::new(broadcaster)) + } else { + None + }; - (stats_event_sender, keeper.repository) + (opt_event_sender, keeper.repository) } #[cfg(test)] From ed9383610337492ca3d6f7f7c499fd4ba735cbc6 Mon Sep 17 00:00:00 2001 From: Jose Celano Date: Wed, 19 Mar 2025 16:44:43 +0000 Subject: [PATCH 404/802] refactor: [#1397] extract event module in UDP core --- .../udp-tracker-core/benches/helpers/utils.rs | 7 +-- packages/udp-tracker-core/src/container.rs | 4 +- packages/udp-tracker-core/src/event/mod.rs | 40 +++++++++++++++++ .../src/{statistics => }/event/sender.rs | 28 ++++++++---- packages/udp-tracker-core/src/lib.rs | 1 + .../udp-tracker-core/src/services/announce.rs | 9 ++-- .../udp-tracker-core/src/services/connect.rs | 21 +++++---- packages/udp-tracker-core/src/services/mod.rs | 7 +-- .../udp-tracker-core/src/services/scrape.rs | 9 ++-- .../src/statistics/event/handler.rs | 4 +- .../src/statistics/event/listener.rs | 2 +- .../src/statistics/event/mod.rs | 40 ----------------- .../udp-tracker-core/src/statistics/keeper.rs | 2 +- .../src/statistics/services.rs | 2 +- .../udp-tracker-core/src/statistics/setup.rs | 44 ++++++++----------- .../src/handlers/announce.rs | 8 ++-- .../src/handlers/connect.rs | 14 +++--- .../udp-tracker-server/src/handlers/mod.rs | 6 +-- src/container.rs | 2 +- 19 files changed, 127 insertions(+), 123 deletions(-) create mode 100644 packages/udp-tracker-core/src/event/mod.rs rename packages/udp-tracker-core/src/{statistics => }/event/sender.rs (54%) diff --git a/packages/udp-tracker-core/benches/helpers/utils.rs b/packages/udp-tracker-core/benches/helpers/utils.rs index aed4d9542..f6c2f6fad 100644 --- a/packages/udp-tracker-core/benches/helpers/utils.rs +++ b/packages/udp-tracker-core/benches/helpers/utils.rs @@ -1,6 +1,7 @@ use std::net::{IpAddr, Ipv4Addr, SocketAddr}; -use bittorrent_udp_tracker_core::statistics; +use bittorrent_udp_tracker_core::event; +use bittorrent_udp_tracker_core::event::Event; use futures::future::BoxFuture; use mockall::mock; use tokio::sync::broadcast::error::SendError; @@ -19,7 +20,7 @@ pub(crate) fn sample_issue_time() -> f64 { mock! { pub(crate) UdpCoreStatsEventSender {} - impl statistics::event::sender::Sender for UdpCoreStatsEventSender { - fn send_event(&self, event: statistics::event::Event) -> BoxFuture<'static,Option > > > ; + impl event::sender::Sender for UdpCoreStatsEventSender { + fn send_event(&self, event: Event) -> BoxFuture<'static,Option > > > ; } } diff --git a/packages/udp-tracker-core/src/container.rs b/packages/udp-tracker-core/src/container.rs index c4cce3dc1..aaa07f150 100644 --- a/packages/udp-tracker-core/src/container.rs +++ b/packages/udp-tracker-core/src/container.rs @@ -11,7 +11,7 @@ use crate::services::announce::AnnounceService; use crate::services::banning::BanService; use crate::services::connect::ConnectService; use crate::services::scrape::ScrapeService; -use crate::{statistics, MAX_CONNECTION_ID_ERRORS_PER_IP}; +use crate::{event, statistics, MAX_CONNECTION_ID_ERRORS_PER_IP}; pub struct UdpTrackerCoreContainer { // todo: replace with TrackerCoreContainer @@ -21,7 +21,7 @@ pub struct UdpTrackerCoreContainer { pub whitelist_authorization: Arc, pub udp_tracker_config: Arc, - pub udp_core_stats_event_sender: Arc>>, + pub udp_core_stats_event_sender: Arc>>, pub udp_core_stats_repository: Arc, pub ban_service: Arc>, pub connect_service: Arc, diff --git a/packages/udp-tracker-core/src/event/mod.rs b/packages/udp-tracker-core/src/event/mod.rs new file mode 100644 index 000000000..48a5b501b --- /dev/null +++ b/packages/udp-tracker-core/src/event/mod.rs @@ -0,0 +1,40 @@ +use std::net::SocketAddr; + +pub mod sender; + +/// An statistics event. It is used to collect tracker metrics. +/// +/// - `Udp` prefix means the event was triggered by the UDP tracker. +/// - The event suffix is the type of request: `announce`, `scrape` or `connection`. +#[derive(Debug, PartialEq, Eq, Clone)] +pub enum Event { + UdpConnect { context: ConnectionContext }, + UdpAnnounce { context: ConnectionContext }, + UdpScrape { context: ConnectionContext }, +} + +#[derive(Debug, PartialEq, Eq, Clone)] +pub struct ConnectionContext { + pub client_socket_addr: SocketAddr, + pub server_socket_addr: SocketAddr, +} + +impl ConnectionContext { + #[must_use] + pub fn new(client_socket_addr: SocketAddr, server_socket_addr: SocketAddr) -> Self { + Self { + client_socket_addr, + server_socket_addr, + } + } + + #[must_use] + pub fn client_socket_addr(&self) -> SocketAddr { + self.client_socket_addr + } + + #[must_use] + pub fn server_socket_addr(&self) -> SocketAddr { + self.server_socket_addr + } +} diff --git a/packages/udp-tracker-core/src/statistics/event/sender.rs b/packages/udp-tracker-core/src/event/sender.rs similarity index 54% rename from packages/udp-tracker-core/src/statistics/event/sender.rs rename to packages/udp-tracker-core/src/event/sender.rs index 9092a8e0b..e9431abf2 100644 --- a/packages/udp-tracker-core/src/statistics/event/sender.rs +++ b/packages/udp-tracker-core/src/event/sender.rs @@ -7,23 +7,35 @@ use tokio::sync::broadcast::error::SendError; use super::Event; -/// A trait to allow sending statistics events +const CHANNEL_CAPACITY: usize = 1024; + +/// A trait for sending sending. #[cfg_attr(test, automock)] pub trait Sender: Sync + Send { fn send_event(&self, event: Event) -> BoxFuture<'_, Option>>>; } -/// An [`statistics::EventSender`](crate::statistics::event::sender::Sender) implementation. -/// -/// It uses a channel sender to send the statistic events. The channel is created by a -/// [`statistics::Keeper`](crate::statistics::keeper::Keeper) -#[allow(clippy::module_name_repetitions)] -pub struct ChannelSender { +/// An event sender implementation using a broadcast channel. +pub struct Broadcaster { pub(crate) sender: broadcast::Sender, } -impl Sender for ChannelSender { +impl Sender for Broadcaster { fn send_event(&self, event: Event) -> BoxFuture<'_, Option>>> { async move { Some(self.sender.send(event)) }.boxed() } } + +impl Default for Broadcaster { + fn default() -> Self { + let (sender, _) = broadcast::channel(CHANNEL_CAPACITY); + Self { sender } + } +} + +impl Broadcaster { + #[must_use] + pub fn subscribe(&self) -> broadcast::Receiver { + self.sender.subscribe() + } +} diff --git a/packages/udp-tracker-core/src/lib.rs b/packages/udp-tracker-core/src/lib.rs index 5aa714d35..94ce93068 100644 --- a/packages/udp-tracker-core/src/lib.rs +++ b/packages/udp-tracker-core/src/lib.rs @@ -1,6 +1,7 @@ pub mod connection_cookie; pub mod container; pub mod crypto; +pub mod event; pub mod services; pub mod statistics; diff --git a/packages/udp-tracker-core/src/services/announce.rs b/packages/udp-tracker-core/src/services/announce.rs index f745a90fd..bba9b51fc 100644 --- a/packages/udp-tracker-core/src/services/announce.rs +++ b/packages/udp-tracker-core/src/services/announce.rs @@ -20,8 +20,7 @@ use bittorrent_udp_tracker_protocol::peer_builder; use torrust_tracker_primitives::core::AnnounceData; use crate::connection_cookie::{check, gen_remote_fingerprint, ConnectionCookieError}; -use crate::statistics; -use crate::statistics::event::ConnectionContext; +use crate::event::{self, ConnectionContext, Event}; /// The `AnnounceService` is responsible for handling the `announce` requests. /// @@ -31,7 +30,7 @@ use crate::statistics::event::ConnectionContext; pub struct AnnounceService { announce_handler: Arc, whitelist_authorization: Arc, - opt_udp_core_stats_event_sender: Arc>>, + opt_udp_core_stats_event_sender: Arc>>, } impl AnnounceService { @@ -39,7 +38,7 @@ impl AnnounceService { pub fn new( announce_handler: Arc, whitelist_authorization: Arc, - opt_udp_core_stats_event_sender: Arc>>, + opt_udp_core_stats_event_sender: Arc>>, ) -> Self { Self { announce_handler, @@ -104,7 +103,7 @@ impl AnnounceService { async fn send_stats_event(&self, client_socket_addr: SocketAddr, server_socket_addr: SocketAddr) { if let Some(udp_stats_event_sender) = self.opt_udp_core_stats_event_sender.as_deref() { udp_stats_event_sender - .send_event(statistics::event::Event::UdpAnnounce { + .send_event(Event::UdpAnnounce { context: ConnectionContext::new(client_socket_addr, server_socket_addr), }) .await; diff --git a/packages/udp-tracker-core/src/services/connect.rs b/packages/udp-tracker-core/src/services/connect.rs index fb28fe70b..e543fbb1e 100644 --- a/packages/udp-tracker-core/src/services/connect.rs +++ b/packages/udp-tracker-core/src/services/connect.rs @@ -7,20 +7,19 @@ use std::sync::Arc; use aquatic_udp_protocol::ConnectionId; use crate::connection_cookie::{gen_remote_fingerprint, make}; -use crate::statistics; -use crate::statistics::event::ConnectionContext; +use crate::event::{self, ConnectionContext, Event}; /// The `ConnectService` is responsible for handling the `connect` requests. /// /// It is responsible for generating the connection cookie and sending the /// appropriate statistics events. pub struct ConnectService { - pub opt_udp_core_stats_event_sender: Arc>>, + pub opt_udp_core_stats_event_sender: Arc>>, } impl ConnectService { #[must_use] - pub fn new(opt_udp_core_stats_event_sender: Arc>>) -> Self { + pub fn new(opt_udp_core_stats_event_sender: Arc>>) -> Self { Self { opt_udp_core_stats_event_sender, } @@ -42,7 +41,7 @@ impl ConnectService { if let Some(udp_stats_event_sender) = self.opt_udp_core_stats_event_sender.as_deref() { udp_stats_event_sender - .send_event(statistics::event::Event::UdpConnect { + .send_event(Event::UdpConnect { context: ConnectionContext::new(client_socket_addr, server_socket_addr), }) .await; @@ -64,13 +63,13 @@ mod tests { use mockall::predicate::eq; use crate::connection_cookie::make; + use crate::event::{ConnectionContext, Event}; use crate::services::connect::ConnectService; use crate::services::tests::{ sample_ipv4_remote_addr, sample_ipv4_remote_addr_fingerprint, sample_ipv4_socket_address, sample_ipv6_remote_addr, sample_ipv6_remote_addr_fingerprint, sample_issue_time, MockUdpCoreStatsEventSender, }; - use crate::statistics; - use crate::statistics::event::ConnectionContext; + use crate::{event, statistics}; #[tokio::test] async fn a_connect_response_should_contain_the_same_transaction_id_as_the_connect_request() { @@ -138,12 +137,12 @@ mod tests { let mut udp_stats_event_sender_mock = MockUdpCoreStatsEventSender::new(); udp_stats_event_sender_mock .expect_send_event() - .with(eq(statistics::event::Event::UdpConnect { + .with(eq(Event::UdpConnect { context: ConnectionContext::new(client_socket_addr, server_socket_addr), })) .times(1) .returning(|_| Box::pin(future::ready(Some(Ok(1))))); - let opt_udp_stats_event_sender: Arc>> = + let opt_udp_stats_event_sender: Arc>> = Arc::new(Some(Box::new(udp_stats_event_sender_mock))); let connect_service = Arc::new(ConnectService::new(opt_udp_stats_event_sender)); @@ -161,12 +160,12 @@ mod tests { let mut udp_stats_event_sender_mock = MockUdpCoreStatsEventSender::new(); udp_stats_event_sender_mock .expect_send_event() - .with(eq(statistics::event::Event::UdpConnect { + .with(eq(Event::UdpConnect { context: ConnectionContext::new(client_socket_addr, server_socket_addr), })) .times(1) .returning(|_| Box::pin(future::ready(Some(Ok(1))))); - let opt_udp_stats_event_sender: Arc>> = + let opt_udp_stats_event_sender: Arc>> = Arc::new(Some(Box::new(udp_stats_event_sender_mock))); let connect_service = Arc::new(ConnectService::new(opt_udp_stats_event_sender)); diff --git a/packages/udp-tracker-core/src/services/mod.rs b/packages/udp-tracker-core/src/services/mod.rs index 55a533a22..ac82d71e8 100644 --- a/packages/udp-tracker-core/src/services/mod.rs +++ b/packages/udp-tracker-core/src/services/mod.rs @@ -13,7 +13,8 @@ pub(crate) mod tests { use tokio::sync::broadcast::error::SendError; use crate::connection_cookie::gen_remote_fingerprint; - use crate::statistics; + use crate::event; + use crate::event::Event; pub(crate) fn sample_ipv4_remote_addr() -> SocketAddr { sample_ipv4_socket_address() @@ -45,8 +46,8 @@ pub(crate) mod tests { mock! { pub(crate) UdpCoreStatsEventSender {} - impl statistics::event::sender::Sender for UdpCoreStatsEventSender { - fn send_event(&self, event: statistics::event::Event) -> BoxFuture<'static,Option > > > ; + impl event::sender::Sender for UdpCoreStatsEventSender { + fn send_event(&self, event: Event) -> BoxFuture<'static,Option > > > ; } } } diff --git a/packages/udp-tracker-core/src/services/scrape.rs b/packages/udp-tracker-core/src/services/scrape.rs index 446c1182f..9f0941c2a 100644 --- a/packages/udp-tracker-core/src/services/scrape.rs +++ b/packages/udp-tracker-core/src/services/scrape.rs @@ -18,8 +18,7 @@ use bittorrent_tracker_core::scrape_handler::ScrapeHandler; use torrust_tracker_primitives::core::ScrapeData; use crate::connection_cookie::{check, gen_remote_fingerprint, ConnectionCookieError}; -use crate::statistics; -use crate::statistics::event::ConnectionContext; +use crate::event::{self, ConnectionContext, Event}; /// The `ScrapeService` is responsible for handling the `scrape` requests. /// @@ -28,14 +27,14 @@ use crate::statistics::event::ConnectionContext; /// - The number of UDP `scrape` requests handled by the UDP tracker. pub struct ScrapeService { scrape_handler: Arc, - opt_udp_stats_event_sender: Arc>>, + opt_udp_stats_event_sender: Arc>>, } impl ScrapeService { #[must_use] pub fn new( scrape_handler: Arc, - opt_udp_stats_event_sender: Arc>>, + opt_udp_stats_event_sender: Arc>>, ) -> Self { Self { scrape_handler, @@ -86,7 +85,7 @@ impl ScrapeService { async fn send_stats_event(&self, client_socket_addr: SocketAddr, server_socket_addr: SocketAddr) { if let Some(udp_stats_event_sender) = self.opt_udp_stats_event_sender.as_deref() { udp_stats_event_sender - .send_event(statistics::event::Event::UdpScrape { + .send_event(Event::UdpScrape { context: ConnectionContext::new(client_socket_addr, server_socket_addr), }) .await; diff --git a/packages/udp-tracker-core/src/statistics/event/handler.rs b/packages/udp-tracker-core/src/statistics/event/handler.rs index 98860592f..a9ac0dade 100644 --- a/packages/udp-tracker-core/src/statistics/event/handler.rs +++ b/packages/udp-tracker-core/src/statistics/event/handler.rs @@ -1,4 +1,4 @@ -use crate::statistics::event::Event; +use crate::event::Event; use crate::statistics::repository::Repository; /// # Panics @@ -39,8 +39,8 @@ pub async fn handle_event(event: Event, stats_repository: &Repository) { mod tests { use std::net::{IpAddr, Ipv4Addr, Ipv6Addr, SocketAddr}; + use crate::event::{ConnectionContext, Event}; use crate::statistics::event::handler::handle_event; - use crate::statistics::event::{ConnectionContext, Event}; use crate::statistics::repository::Repository; #[tokio::test] diff --git a/packages/udp-tracker-core/src/statistics/event/listener.rs b/packages/udp-tracker-core/src/statistics/event/listener.rs index 36b1e7a22..f3afafc4f 100644 --- a/packages/udp-tracker-core/src/statistics/event/listener.rs +++ b/packages/udp-tracker-core/src/statistics/event/listener.rs @@ -1,7 +1,7 @@ use tokio::sync::broadcast; use super::handler::handle_event; -use super::Event; +use crate::event::Event; use crate::statistics::repository::Repository; pub async fn dispatch_events(mut receiver: broadcast::Receiver, stats_repository: Repository) { diff --git a/packages/udp-tracker-core/src/statistics/event/mod.rs b/packages/udp-tracker-core/src/statistics/event/mod.rs index 2e8ae39a9..dae683398 100644 --- a/packages/udp-tracker-core/src/statistics/event/mod.rs +++ b/packages/udp-tracker-core/src/statistics/event/mod.rs @@ -1,42 +1,2 @@ -use std::net::SocketAddr; - pub mod handler; pub mod listener; -pub mod sender; - -/// An statistics event. It is used to collect tracker metrics. -/// -/// - `Udp` prefix means the event was triggered by the UDP tracker. -/// - The event suffix is the type of request: `announce`, `scrape` or `connection`. -#[derive(Debug, PartialEq, Eq, Clone)] -pub enum Event { - UdpConnect { context: ConnectionContext }, - UdpAnnounce { context: ConnectionContext }, - UdpScrape { context: ConnectionContext }, -} - -#[derive(Debug, PartialEq, Eq, Clone)] -pub struct ConnectionContext { - client_socket_addr: SocketAddr, - server_socket_addr: SocketAddr, -} - -impl ConnectionContext { - #[must_use] - pub fn new(client_socket_addr: SocketAddr, server_socket_addr: SocketAddr) -> Self { - Self { - client_socket_addr, - server_socket_addr, - } - } - - #[must_use] - pub fn client_socket_addr(&self) -> SocketAddr { - self.client_socket_addr - } - - #[must_use] - pub fn server_socket_addr(&self) -> SocketAddr { - self.server_socket_addr - } -} diff --git a/packages/udp-tracker-core/src/statistics/keeper.rs b/packages/udp-tracker-core/src/statistics/keeper.rs index f06642908..16ea51aac 100644 --- a/packages/udp-tracker-core/src/statistics/keeper.rs +++ b/packages/udp-tracker-core/src/statistics/keeper.rs @@ -1,8 +1,8 @@ use tokio::sync::broadcast::Receiver; use super::event::listener::dispatch_events; -use super::event::Event; use super::repository::Repository; +use crate::event::Event; /// The service responsible for keeping tracker metrics (listening to statistics events and handle them). /// diff --git a/packages/udp-tracker-core/src/statistics/services.rs b/packages/udp-tracker-core/src/statistics/services.rs index 56814f5d5..d3c1d4710 100644 --- a/packages/udp-tracker-core/src/statistics/services.rs +++ b/packages/udp-tracker-core/src/statistics/services.rs @@ -9,7 +9,7 @@ //! //! The factory function builds two structs: //! -//! - An statistics event [`Sender`](crate::statistics::event::sender::Sender) +//! - An event [`Sender`](crate::event::sender::Sender) //! - An statistics [`Repository`] //! //! ```text diff --git a/packages/udp-tracker-core/src/statistics/setup.rs b/packages/udp-tracker-core/src/statistics/setup.rs index a9ac751c6..e2974e4c0 100644 --- a/packages/udp-tracker-core/src/statistics/setup.rs +++ b/packages/udp-tracker-core/src/statistics/setup.rs @@ -1,44 +1,36 @@ //! Setup for the tracker statistics. //! //! The [`factory`] function builds the structs needed for handling the tracker metrics. -use tokio::sync::broadcast; - -use super::event::sender::ChannelSender; -use crate::statistics; - -const CHANNEL_CAPACITY: usize = 1024; +use crate::event::sender::Broadcaster; +use crate::{event, statistics}; /// It builds the structs needed for handling the tracker metrics. /// /// It returns: /// -/// - An statistics event [`Sender`](crate::statistics::event::sender::Sender) that allows you to send events related to statistics. -/// - An statistics [`Repository`](crate::statistics::repository::Repository) which is an in-memory repository for the tracker metrics. +/// - An event [`Sender`](crate::event::sender::Sender) that allows you to send +/// events related to statistics. +/// - An statistics [`Repository`](crate::statistics::repository::Repository) +/// which is an in-memory repository for the tracker metrics. /// -/// When the input argument `tracker_usage_statistics`is false the setup does not run the event listeners, consequently the statistics -/// events are sent are received but not dispatched to the handler. +/// When the input argument `tracker_usage_statistics`is false the setup does +/// not run the event listeners, consequently the statistics events are sent are +/// received but not dispatched to the handler. #[must_use] -pub fn factory( - tracker_usage_statistics: bool, -) -> ( - Option>, - statistics::repository::Repository, -) { - let mut stats_event_sender: Option> = None; - +pub fn factory(tracker_usage_statistics: bool) -> (Option>, statistics::repository::Repository) { let mut keeper = statistics::keeper::Keeper::new(); - if tracker_usage_statistics { - let (sender, _) = broadcast::channel(CHANNEL_CAPACITY); + let opt_event_sender: Option> = if tracker_usage_statistics { + let broadcaster = Broadcaster::default(); - let receiver = sender.subscribe(); + keeper.run_event_listener(broadcaster.subscribe()); - stats_event_sender = Some(Box::new(ChannelSender { sender })); - - keeper.run_event_listener(receiver); - } + Some(Box::new(broadcaster)) + } else { + None + }; - (stats_event_sender, keeper.repository) + (opt_event_sender, keeper.repository) } #[cfg(test)] diff --git a/packages/udp-tracker-server/src/handlers/announce.rs b/packages/udp-tracker-server/src/handlers/announce.rs index a2cb55e59..a26961a05 100644 --- a/packages/udp-tracker-server/src/handlers/announce.rs +++ b/packages/udp-tracker-server/src/handlers/announce.rs @@ -811,7 +811,7 @@ mod tests { use bittorrent_tracker_core::whitelist::repository::in_memory::InMemoryWhitelist; use bittorrent_udp_tracker_core::connection_cookie::{gen_remote_fingerprint, make}; use bittorrent_udp_tracker_core::services::announce::AnnounceService; - use bittorrent_udp_tracker_core::{self, statistics as core_statistics}; + use bittorrent_udp_tracker_core::{self, event as core_event}; use mockall::predicate::eq; use crate::handlers::announce::tests::announce_request::AnnounceRequestBuilder; @@ -850,12 +850,12 @@ mod tests { let mut udp_core_stats_event_sender_mock = MockUdpCoreStatsEventSender::new(); udp_core_stats_event_sender_mock .expect_send_event() - .with(eq(core_statistics::event::Event::UdpAnnounce { - context: core_statistics::event::ConnectionContext::new(client_socket_addr, server_socket_addr), + .with(eq(core_event::Event::UdpAnnounce { + context: core_event::ConnectionContext::new(client_socket_addr, server_socket_addr), })) .times(1) .returning(|_| Box::pin(future::ready(Some(Ok(1))))); - let udp_core_stats_event_sender: Arc>> = + let udp_core_stats_event_sender: Arc>> = Arc::new(Some(Box::new(udp_core_stats_event_sender_mock))); let mut udp_server_stats_event_sender_mock = MockUdpServerStatsEventSender::new(); diff --git a/packages/udp-tracker-server/src/handlers/connect.rs b/packages/udp-tracker-server/src/handlers/connect.rs index 992ef459d..aae9f1136 100644 --- a/packages/udp-tracker-server/src/handlers/connect.rs +++ b/packages/udp-tracker-server/src/handlers/connect.rs @@ -58,8 +58,8 @@ mod tests { use aquatic_udp_protocol::{ConnectRequest, ConnectResponse, Response, TransactionId}; use bittorrent_udp_tracker_core::connection_cookie::make; + use bittorrent_udp_tracker_core::event as core_event; use bittorrent_udp_tracker_core::services::connect::ConnectService; - use bittorrent_udp_tracker_core::statistics as core_statistics; use mockall::predicate::eq; use crate::handlers::handle_connect; @@ -192,12 +192,12 @@ mod tests { let mut udp_core_stats_event_sender_mock = MockUdpCoreStatsEventSender::new(); udp_core_stats_event_sender_mock .expect_send_event() - .with(eq(core_statistics::event::Event::UdpConnect { - context: core_statistics::event::ConnectionContext::new(client_socket_addr, server_socket_addr), + .with(eq(core_event::Event::UdpConnect { + context: core_event::ConnectionContext::new(client_socket_addr, server_socket_addr), })) .times(1) .returning(|_| Box::pin(future::ready(Some(Ok(1))))); - let udp_core_stats_event_sender: Arc>> = + let udp_core_stats_event_sender: Arc>> = Arc::new(Some(Box::new(udp_core_stats_event_sender_mock))); let mut udp_server_stats_event_sender_mock = MockUdpServerStatsEventSender::new(); @@ -233,12 +233,12 @@ mod tests { let mut udp_core_stats_event_sender_mock = MockUdpCoreStatsEventSender::new(); udp_core_stats_event_sender_mock .expect_send_event() - .with(eq(core_statistics::event::Event::UdpConnect { - context: core_statistics::event::ConnectionContext::new(client_socket_addr, server_socket_addr), + .with(eq(core_event::Event::UdpConnect { + context: core_event::ConnectionContext::new(client_socket_addr, server_socket_addr), })) .times(1) .returning(|_| Box::pin(future::ready(Some(Ok(1))))); - let udp_core_stats_event_sender: Arc>> = + let udp_core_stats_event_sender: Arc>> = Arc::new(Some(Box::new(udp_core_stats_event_sender_mock))); let mut udp_server_stats_event_sender_mock = MockUdpServerStatsEventSender::new(); diff --git a/packages/udp-tracker-server/src/handlers/mod.rs b/packages/udp-tracker-server/src/handlers/mod.rs index e573cc184..98f7a2fa2 100644 --- a/packages/udp-tracker-server/src/handlers/mod.rs +++ b/packages/udp-tracker-server/src/handlers/mod.rs @@ -219,7 +219,7 @@ pub(crate) mod tests { use bittorrent_udp_tracker_core::connection_cookie::gen_remote_fingerprint; use bittorrent_udp_tracker_core::services::announce::AnnounceService; use bittorrent_udp_tracker_core::services::scrape::ScrapeService; - use bittorrent_udp_tracker_core::{self, statistics as core_statistics}; + use bittorrent_udp_tracker_core::{self, event as core_event}; use futures::future::BoxFuture; use mockall::mock; use tokio::sync::broadcast::error::SendError; @@ -421,8 +421,8 @@ pub(crate) mod tests { mock! { pub(crate) UdpCoreStatsEventSender {} - impl core_statistics::event::sender::Sender for UdpCoreStatsEventSender { - fn send_event(&self, event: core_statistics::event::Event) -> BoxFuture<'static,Option > > > ; + impl core_event::sender::Sender for UdpCoreStatsEventSender { + fn send_event(&self, event: core_event::Event) -> BoxFuture<'static,Option > > > ; } } diff --git a/src/container.rs b/src/container.rs index 1c8c9c1d3..7822b5d61 100644 --- a/src/container.rs +++ b/src/container.rs @@ -52,7 +52,7 @@ pub struct AppContainer { pub torrents_manager: Arc, // UDP Tracker Core Services - pub udp_core_stats_event_sender: Arc>>, + pub udp_core_stats_event_sender: Arc>>, pub udp_core_stats_repository: Arc, pub udp_ban_service: Arc>, pub udp_connect_service: Arc, From d8f1696141c065ac41ae81182752da4e1c7714de Mon Sep 17 00:00:00 2001 From: Jose Celano Date: Wed, 19 Mar 2025 17:03:47 +0000 Subject: [PATCH 405/802] refactor: [#1398] extract event module in UDP server --- packages/udp-tracker-server/src/container.rs | 4 +- packages/udp-tracker-server/src/event/mod.rs | 76 +++++++++++++++++++ .../src/{statistics => }/event/sender.rs | 28 +++++-- .../src/handlers/announce.rs | 34 ++++----- .../src/handlers/connect.rs | 22 +++--- .../udp-tracker-server/src/handlers/error.rs | 7 +- .../udp-tracker-server/src/handlers/mod.rs | 10 +-- .../udp-tracker-server/src/handlers/scrape.rs | 26 +++---- packages/udp-tracker-server/src/lib.rs | 1 + .../udp-tracker-server/src/server/launcher.rs | 9 +-- .../src/server/processor.rs | 20 ++--- .../src/statistics/event/handler.rs | 20 ++--- .../src/statistics/event/listener.rs | 2 +- .../src/statistics/event/mod.rs | 76 ------------------- .../src/statistics/keeper.rs | 2 +- .../src/statistics/setup.rs | 47 +++++------- src/container.rs | 2 +- 17 files changed, 191 insertions(+), 195 deletions(-) create mode 100644 packages/udp-tracker-server/src/event/mod.rs rename packages/udp-tracker-server/src/{statistics => }/event/sender.rs (54%) diff --git a/packages/udp-tracker-server/src/container.rs b/packages/udp-tracker-server/src/container.rs index 36ad0e671..0c8039b26 100644 --- a/packages/udp-tracker-server/src/container.rs +++ b/packages/udp-tracker-server/src/container.rs @@ -2,10 +2,10 @@ use std::sync::Arc; use torrust_tracker_configuration::Core; -use crate::statistics; +use crate::{event, statistics}; pub struct UdpTrackerServerContainer { - pub udp_server_stats_event_sender: Arc>>, + pub udp_server_stats_event_sender: Arc>>, pub udp_server_stats_repository: Arc, } diff --git a/packages/udp-tracker-server/src/event/mod.rs b/packages/udp-tracker-server/src/event/mod.rs new file mode 100644 index 000000000..adc1396cc --- /dev/null +++ b/packages/udp-tracker-server/src/event/mod.rs @@ -0,0 +1,76 @@ +use std::net::SocketAddr; +use std::time::Duration; + +pub mod sender; + +/// An statistics event. It is used to collect tracker metrics. +#[derive(Debug, PartialEq, Eq, Clone)] +pub enum Event { + UdpRequestReceived { + context: ConnectionContext, + }, + UdpRequestAborted { + context: ConnectionContext, + }, + UdpRequestBanned { + context: ConnectionContext, + }, + UdpRequestAccepted { + context: ConnectionContext, + kind: UdpRequestKind, + }, + UdpResponseSent { + context: ConnectionContext, + kind: UdpResponseKind, + req_processing_time: Duration, + }, + UdpError { + context: ConnectionContext, + }, +} + +#[derive(Debug, PartialEq, Eq, Clone)] +pub enum UdpRequestKind { + Connect, + Announce, + Scrape, +} + +#[derive(Debug, PartialEq, Eq, Clone)] +pub enum UdpResponseKind { + Ok { + req_kind: UdpRequestKind, + }, + + /// There was an error handling the request. The error contains the request + /// kind if the request was parsed successfully. + Error { + opt_req_kind: Option, + }, +} + +#[derive(Debug, PartialEq, Eq, Clone)] +pub struct ConnectionContext { + client_socket_addr: SocketAddr, + server_socket_addr: SocketAddr, +} + +impl ConnectionContext { + #[must_use] + pub fn new(client_socket_addr: SocketAddr, server_socket_addr: SocketAddr) -> Self { + Self { + client_socket_addr, + server_socket_addr, + } + } + + #[must_use] + pub fn client_socket_addr(&self) -> SocketAddr { + self.client_socket_addr + } + + #[must_use] + pub fn server_socket_addr(&self) -> SocketAddr { + self.server_socket_addr + } +} diff --git a/packages/udp-tracker-server/src/statistics/event/sender.rs b/packages/udp-tracker-server/src/event/sender.rs similarity index 54% rename from packages/udp-tracker-server/src/statistics/event/sender.rs rename to packages/udp-tracker-server/src/event/sender.rs index 9092a8e0b..e9431abf2 100644 --- a/packages/udp-tracker-server/src/statistics/event/sender.rs +++ b/packages/udp-tracker-server/src/event/sender.rs @@ -7,23 +7,35 @@ use tokio::sync::broadcast::error::SendError; use super::Event; -/// A trait to allow sending statistics events +const CHANNEL_CAPACITY: usize = 1024; + +/// A trait for sending sending. #[cfg_attr(test, automock)] pub trait Sender: Sync + Send { fn send_event(&self, event: Event) -> BoxFuture<'_, Option>>>; } -/// An [`statistics::EventSender`](crate::statistics::event::sender::Sender) implementation. -/// -/// It uses a channel sender to send the statistic events. The channel is created by a -/// [`statistics::Keeper`](crate::statistics::keeper::Keeper) -#[allow(clippy::module_name_repetitions)] -pub struct ChannelSender { +/// An event sender implementation using a broadcast channel. +pub struct Broadcaster { pub(crate) sender: broadcast::Sender, } -impl Sender for ChannelSender { +impl Sender for Broadcaster { fn send_event(&self, event: Event) -> BoxFuture<'_, Option>>> { async move { Some(self.sender.send(event)) }.boxed() } } + +impl Default for Broadcaster { + fn default() -> Self { + let (sender, _) = broadcast::channel(CHANNEL_CAPACITY); + Self { sender } + } +} + +impl Broadcaster { + #[must_use] + pub fn subscribe(&self) -> broadcast::Receiver { + self.sender.subscribe() + } +} diff --git a/packages/udp-tracker-server/src/handlers/announce.rs b/packages/udp-tracker-server/src/handlers/announce.rs index a26961a05..5df46125d 100644 --- a/packages/udp-tracker-server/src/handlers/announce.rs +++ b/packages/udp-tracker-server/src/handlers/announce.rs @@ -15,8 +15,7 @@ use tracing::{instrument, Level}; use zerocopy::network_endian::I32; use crate::error::Error; -use crate::statistics as server_statistics; -use crate::statistics::event::{ConnectionContext, UdpRequestKind}; +use crate::event::{self, ConnectionContext, Event, UdpRequestKind}; /// It handles the `Announce` request. /// @@ -30,7 +29,7 @@ pub async fn handle_announce( server_socket_addr: SocketAddr, request: &AnnounceRequest, core_config: &Arc, - opt_udp_server_stats_event_sender: &Arc>>, + opt_udp_server_stats_event_sender: &Arc>>, cookie_valid_range: Range, ) -> Result { tracing::Span::current() @@ -42,7 +41,7 @@ pub async fn handle_announce( if let Some(udp_server_stats_event_sender) = opt_udp_server_stats_event_sender.as_deref() { udp_server_stats_event_sender - .send_event(server_statistics::event::Event::UdpRequestAccepted { + .send_event(Event::UdpRequestAccepted { context: ConnectionContext::new(client_socket_addr, server_socket_addr), kind: UdpRequestKind::Announce, }) @@ -207,6 +206,7 @@ mod tests { use bittorrent_udp_tracker_core::connection_cookie::{gen_remote_fingerprint, make}; use mockall::predicate::eq; + use crate::event::{self, ConnectionContext, Event, UdpRequestKind}; use crate::handlers::announce::tests::announce_request::AnnounceRequestBuilder; use crate::handlers::handle_announce; use crate::handlers::tests::{ @@ -215,8 +215,6 @@ mod tests { sample_issue_time, CoreTrackerServices, CoreUdpTrackerServices, MockUdpServerStatsEventSender, TorrentPeerBuilder, }; - use crate::statistics as server_statistics; - use crate::statistics::event::UdpRequestKind; #[tokio::test] async fn an_announced_peer_should_be_added_to_the_tracker() { @@ -425,13 +423,13 @@ mod tests { let mut udp_server_stats_event_sender_mock = MockUdpServerStatsEventSender::new(); udp_server_stats_event_sender_mock .expect_send_event() - .with(eq(server_statistics::event::Event::UdpRequestAccepted { - context: server_statistics::event::ConnectionContext::new(client_socket_addr, server_socket_addr), + .with(eq(Event::UdpRequestAccepted { + context: ConnectionContext::new(client_socket_addr, server_socket_addr), kind: UdpRequestKind::Announce, })) .times(1) .returning(|_| Box::pin(future::ready(Some(Ok(1))))); - let udp_server_stats_event_sender: Arc>> = + let udp_server_stats_event_sender: Arc>> = Arc::new(Some(Box::new(udp_server_stats_event_sender_mock))); let (core_tracker_services, core_udp_tracker_services, _server_udp_tracker_services) = @@ -532,6 +530,7 @@ mod tests { use mockall::predicate::eq; use torrust_tracker_configuration::Core; + use crate::event::{self, ConnectionContext, Event, UdpRequestKind}; use crate::handlers::announce::tests::announce_request::AnnounceRequestBuilder; use crate::handlers::handle_announce; use crate::handlers::tests::{ @@ -539,8 +538,6 @@ mod tests { initialize_core_tracker_services_for_public_tracker, sample_cookie_valid_range, sample_ipv6_remote_addr, sample_issue_time, MockUdpServerStatsEventSender, TorrentPeerBuilder, }; - use crate::statistics as server_statistics; - use crate::statistics::event::UdpRequestKind; #[tokio::test] async fn an_announced_peer_should_be_added_to_the_tracker() { @@ -768,13 +765,13 @@ mod tests { let mut udp_server_stats_event_sender_mock = MockUdpServerStatsEventSender::new(); udp_server_stats_event_sender_mock .expect_send_event() - .with(eq(server_statistics::event::Event::UdpRequestAccepted { - context: server_statistics::event::ConnectionContext::new(client_socket_addr, server_socket_addr), + .with(eq(Event::UdpRequestAccepted { + context: ConnectionContext::new(client_socket_addr, server_socket_addr), kind: UdpRequestKind::Announce, })) .times(1) .returning(|_| Box::pin(future::ready(Some(Ok(1))))); - let udp_server_stats_event_sender: Arc>> = + let udp_server_stats_event_sender: Arc>> = Arc::new(Some(Box::new(udp_server_stats_event_sender_mock))); let (core_tracker_services, core_udp_tracker_services, _server_udp_tracker_services) = @@ -814,14 +811,13 @@ mod tests { use bittorrent_udp_tracker_core::{self, event as core_event}; use mockall::predicate::eq; + use crate::event::{self, ConnectionContext, Event, UdpRequestKind}; use crate::handlers::announce::tests::announce_request::AnnounceRequestBuilder; use crate::handlers::handle_announce; use crate::handlers::tests::{ sample_cookie_valid_range, sample_issue_time, MockUdpCoreStatsEventSender, MockUdpServerStatsEventSender, TrackerConfigurationBuilder, }; - use crate::statistics as server_statistics; - use crate::statistics::event::UdpRequestKind; #[tokio::test] async fn the_peer_ip_should_be_changed_to_the_external_ip_in_the_tracker_configuration() { @@ -861,13 +857,13 @@ mod tests { let mut udp_server_stats_event_sender_mock = MockUdpServerStatsEventSender::new(); udp_server_stats_event_sender_mock .expect_send_event() - .with(eq(server_statistics::event::Event::UdpRequestAccepted { - context: server_statistics::event::ConnectionContext::new(client_socket_addr, server_socket_addr), + .with(eq(Event::UdpRequestAccepted { + context: ConnectionContext::new(client_socket_addr, server_socket_addr), kind: UdpRequestKind::Announce, })) .times(1) .returning(|_| Box::pin(future::ready(Some(Ok(1))))); - let udp_server_stats_event_sender: Arc>> = + let udp_server_stats_event_sender: Arc>> = Arc::new(Some(Box::new(udp_server_stats_event_sender_mock))); let announce_handler = Arc::new(AnnounceHandler::new( diff --git a/packages/udp-tracker-server/src/handlers/connect.rs b/packages/udp-tracker-server/src/handlers/connect.rs index aae9f1136..a0fbaead3 100644 --- a/packages/udp-tracker-server/src/handlers/connect.rs +++ b/packages/udp-tracker-server/src/handlers/connect.rs @@ -6,8 +6,7 @@ use aquatic_udp_protocol::{ConnectRequest, ConnectResponse, ConnectionId, Respon use bittorrent_udp_tracker_core::services::connect::ConnectService; use tracing::{instrument, Level}; -use crate::statistics as server_statistics; -use crate::statistics::event::{ConnectionContext, UdpRequestKind}; +use crate::event::{self, ConnectionContext, Event, UdpRequestKind}; /// It handles the `Connect` request. #[instrument(fields(transaction_id), skip(connect_service, opt_udp_server_stats_event_sender), ret(level = Level::TRACE))] @@ -16,7 +15,7 @@ pub async fn handle_connect( server_socket_addr: SocketAddr, request: &ConnectRequest, connect_service: &Arc, - opt_udp_server_stats_event_sender: &Arc>>, + opt_udp_server_stats_event_sender: &Arc>>, cookie_issue_time: f64, ) -> Response { tracing::Span::current().record("transaction_id", request.transaction_id.0.to_string()); @@ -24,7 +23,7 @@ pub async fn handle_connect( if let Some(udp_server_stats_event_sender) = opt_udp_server_stats_event_sender.as_deref() { udp_server_stats_event_sender - .send_event(server_statistics::event::Event::UdpRequestAccepted { + .send_event(Event::UdpRequestAccepted { context: ConnectionContext::new(client_socket_addr, server_socket_addr), kind: UdpRequestKind::Connect, }) @@ -62,13 +61,12 @@ mod tests { use bittorrent_udp_tracker_core::services::connect::ConnectService; use mockall::predicate::eq; + use crate::event::{self, ConnectionContext, Event, UdpRequestKind}; use crate::handlers::handle_connect; use crate::handlers::tests::{ sample_ipv4_remote_addr, sample_ipv4_remote_addr_fingerprint, sample_ipv4_socket_address, sample_ipv6_remote_addr, sample_ipv6_remote_addr_fingerprint, sample_issue_time, MockUdpCoreStatsEventSender, MockUdpServerStatsEventSender, }; - use crate::statistics as server_statistics; - use crate::statistics::event::UdpRequestKind; fn sample_connect_request() -> ConnectRequest { ConnectRequest { @@ -203,13 +201,13 @@ mod tests { let mut udp_server_stats_event_sender_mock = MockUdpServerStatsEventSender::new(); udp_server_stats_event_sender_mock .expect_send_event() - .with(eq(server_statistics::event::Event::UdpRequestAccepted { - context: server_statistics::event::ConnectionContext::new(client_socket_addr, server_socket_addr), + .with(eq(Event::UdpRequestAccepted { + context: ConnectionContext::new(client_socket_addr, server_socket_addr), kind: UdpRequestKind::Connect, })) .times(1) .returning(|_| Box::pin(future::ready(Some(Ok(1))))); - let udp_server_stats_event_sender: Arc>> = + let udp_server_stats_event_sender: Arc>> = Arc::new(Some(Box::new(udp_server_stats_event_sender_mock))); let connect_service = Arc::new(ConnectService::new(udp_core_stats_event_sender)); @@ -244,13 +242,13 @@ mod tests { let mut udp_server_stats_event_sender_mock = MockUdpServerStatsEventSender::new(); udp_server_stats_event_sender_mock .expect_send_event() - .with(eq(server_statistics::event::Event::UdpRequestAccepted { - context: server_statistics::event::ConnectionContext::new(client_socket_addr, server_socket_addr), + .with(eq(Event::UdpRequestAccepted { + context: ConnectionContext::new(client_socket_addr, server_socket_addr), kind: UdpRequestKind::Connect, })) .times(1) .returning(|_| Box::pin(future::ready(Some(Ok(1))))); - let udp_server_stats_event_sender: Arc>> = + let udp_server_stats_event_sender: Arc>> = Arc::new(Some(Box::new(udp_server_stats_event_sender_mock))); let connect_service = Arc::new(ConnectService::new(udp_core_stats_event_sender)); diff --git a/packages/udp-tracker-server/src/handlers/error.rs b/packages/udp-tracker-server/src/handlers/error.rs index d1ffe2fd4..70c33b5ba 100644 --- a/packages/udp-tracker-server/src/handlers/error.rs +++ b/packages/udp-tracker-server/src/handlers/error.rs @@ -11,8 +11,7 @@ use uuid::Uuid; use zerocopy::network_endian::I32; use crate::error::Error; -use crate::statistics as server_statistics; -use crate::statistics::event::{ConnectionContext, UdpRequestKind}; +use crate::event::{self, ConnectionContext, Event, UdpRequestKind}; #[allow(clippy::too_many_arguments)] #[instrument(fields(transaction_id), skip(opt_udp_server_stats_event_sender), ret(level = Level::TRACE))] @@ -21,7 +20,7 @@ pub async fn handle_error( client_socket_addr: SocketAddr, server_socket_addr: SocketAddr, request_id: Uuid, - opt_udp_server_stats_event_sender: &Arc>>, + opt_udp_server_stats_event_sender: &Arc>>, cookie_valid_range: Range, e: &Error, transaction_id: Option, @@ -60,7 +59,7 @@ pub async fn handle_error( if e.1.is_some() { if let Some(udp_server_stats_event_sender) = opt_udp_server_stats_event_sender.as_deref() { udp_server_stats_event_sender - .send_event(server_statistics::event::Event::UdpError { + .send_event(Event::UdpError { context: ConnectionContext::new(client_socket_addr, server_socket_addr), }) .await; diff --git a/packages/udp-tracker-server/src/handlers/mod.rs b/packages/udp-tracker-server/src/handlers/mod.rs index 98f7a2fa2..61f7bb187 100644 --- a/packages/udp-tracker-server/src/handlers/mod.rs +++ b/packages/udp-tracker-server/src/handlers/mod.rs @@ -24,7 +24,7 @@ use uuid::Uuid; use super::RawRequest; use crate::container::UdpTrackerServerContainer; use crate::error::Error; -use crate::statistics::event::UdpRequestKind; +use crate::event::UdpRequestKind; use crate::CurrentClock; #[derive(Debug, Clone, PartialEq)] @@ -228,7 +228,7 @@ pub(crate) mod tests { use torrust_tracker_primitives::{peer, DurationSinceUnixEpoch}; use torrust_tracker_test_helpers::configuration; - use crate::{statistics as server_statistics, CurrentClock}; + use crate::{event as server_event, CurrentClock}; pub(crate) struct CoreTrackerServices { pub core_config: Arc, @@ -244,7 +244,7 @@ pub(crate) mod tests { } pub(crate) struct ServerUdpTrackerServices { - pub udp_server_stats_event_sender: Arc>>, + pub udp_server_stats_event_sender: Arc>>, } fn default_testing_tracker_configuration() -> Configuration { @@ -428,8 +428,8 @@ pub(crate) mod tests { mock! { pub(crate) UdpServerStatsEventSender {} - impl server_statistics::event::sender::Sender for UdpServerStatsEventSender { - fn send_event(&self, event: server_statistics::event::Event) -> BoxFuture<'static,Option > > > ; + impl server_event::sender::Sender for UdpServerStatsEventSender { + fn send_event(&self, event: server_event::Event) -> BoxFuture<'static,Option > > > ; } } } diff --git a/packages/udp-tracker-server/src/handlers/scrape.rs b/packages/udp-tracker-server/src/handlers/scrape.rs index fbf2b7c43..ac0faef61 100644 --- a/packages/udp-tracker-server/src/handlers/scrape.rs +++ b/packages/udp-tracker-server/src/handlers/scrape.rs @@ -13,8 +13,7 @@ use tracing::{instrument, Level}; use zerocopy::network_endian::I32; use crate::error::Error; -use crate::statistics as server_statistics; -use crate::statistics::event::{ConnectionContext, UdpRequestKind}; +use crate::event::{self, ConnectionContext, Event, UdpRequestKind}; /// It handles the `Scrape` request. /// @@ -27,7 +26,7 @@ pub async fn handle_scrape( client_socket_addr: SocketAddr, server_socket_addr: SocketAddr, request: &ScrapeRequest, - opt_udp_server_stats_event_sender: &Arc>>, + opt_udp_server_stats_event_sender: &Arc>>, cookie_valid_range: Range, ) -> Result { tracing::Span::current() @@ -38,7 +37,7 @@ pub async fn handle_scrape( if let Some(udp_server_stats_event_sender) = opt_udp_server_stats_event_sender.as_deref() { udp_server_stats_event_sender - .send_event(server_statistics::event::Event::UdpRequestAccepted { + .send_event(Event::UdpRequestAccepted { context: ConnectionContext::new(client_socket_addr, server_socket_addr), kind: UdpRequestKind::Scrape, }) @@ -352,13 +351,13 @@ mod tests { use mockall::predicate::eq; use super::sample_scrape_request; + use crate::event; + use crate::event::{ConnectionContext, Event, UdpRequestKind}; use crate::handlers::handle_scrape; use crate::handlers::tests::{ initialize_core_tracker_services_for_default_tracker_configuration, sample_cookie_valid_range, sample_ipv4_remote_addr, MockUdpServerStatsEventSender, }; - use crate::statistics as server_statistics; - use crate::statistics::event::ConnectionContext; #[tokio::test] async fn should_send_the_upd4_scrape_event() { @@ -368,13 +367,13 @@ mod tests { let mut udp_server_stats_event_sender_mock = MockUdpServerStatsEventSender::new(); udp_server_stats_event_sender_mock .expect_send_event() - .with(eq(server_statistics::event::Event::UdpRequestAccepted { + .with(eq(Event::UdpRequestAccepted { context: ConnectionContext::new(client_socket_addr, server_socket_addr), - kind: server_statistics::event::UdpRequestKind::Scrape, + kind: UdpRequestKind::Scrape, })) .times(1) .returning(|_| Box::pin(future::ready(Some(Ok(1))))); - let udp_server_stats_event_sender: Arc>> = + let udp_server_stats_event_sender: Arc>> = Arc::new(Some(Box::new(udp_server_stats_event_sender_mock))); let (_core_tracker_services, core_udp_tracker_services, _server_udp_tracker_services) = @@ -401,13 +400,12 @@ mod tests { use mockall::predicate::eq; use super::sample_scrape_request; + use crate::event::{self, ConnectionContext, Event, UdpRequestKind}; use crate::handlers::handle_scrape; use crate::handlers::tests::{ initialize_core_tracker_services_for_default_tracker_configuration, sample_cookie_valid_range, sample_ipv6_remote_addr, MockUdpServerStatsEventSender, }; - use crate::statistics as server_statistics; - use crate::statistics::event::ConnectionContext; #[tokio::test] async fn should_send_the_upd6_scrape_event() { @@ -417,13 +415,13 @@ mod tests { let mut udp_server_stats_event_sender_mock = MockUdpServerStatsEventSender::new(); udp_server_stats_event_sender_mock .expect_send_event() - .with(eq(server_statistics::event::Event::UdpRequestAccepted { + .with(eq(Event::UdpRequestAccepted { context: ConnectionContext::new(client_socket_addr, server_socket_addr), - kind: server_statistics::event::UdpRequestKind::Scrape, + kind: UdpRequestKind::Scrape, })) .times(1) .returning(|_| Box::pin(future::ready(Some(Ok(1))))); - let udp_server_stats_event_sender: Arc>> = + let udp_server_stats_event_sender: Arc>> = Arc::new(Some(Box::new(udp_server_stats_event_sender_mock))); let (_core_tracker_services, core_udp_tracker_services, _server_udp_tracker_services) = diff --git a/packages/udp-tracker-server/src/lib.rs b/packages/udp-tracker-server/src/lib.rs index 9e013bf81..ff53adcfb 100644 --- a/packages/udp-tracker-server/src/lib.rs +++ b/packages/udp-tracker-server/src/lib.rs @@ -637,6 +637,7 @@ pub mod container; pub mod environment; pub mod error; +pub mod event; pub mod handlers; pub mod server; pub mod statistics; diff --git a/packages/udp-tracker-server/src/server/launcher.rs b/packages/udp-tracker-server/src/server/launcher.rs index c6a105230..c98db0500 100644 --- a/packages/udp-tracker-server/src/server/launcher.rs +++ b/packages/udp-tracker-server/src/server/launcher.rs @@ -17,11 +17,10 @@ use tracing::instrument; use super::request_buffer::ActiveRequests; use crate::container::UdpTrackerServerContainer; +use crate::event::{ConnectionContext, Event}; use crate::server::bound_socket::BoundSocket; use crate::server::processor::Processor; use crate::server::receiver::Receiver; -use crate::statistics; -use crate::statistics::event::ConnectionContext; const IP_BANS_RESET_INTERVAL_IN_SECS: u64 = 3600; @@ -173,7 +172,7 @@ impl Launcher { if let Some(udp_server_stats_event_sender) = udp_tracker_server_container.udp_server_stats_event_sender.as_deref() { udp_server_stats_event_sender - .send_event(statistics::event::Event::UdpRequestReceived { + .send_event(Event::UdpRequestReceived { context: ConnectionContext::new(client_socket_addr, server_socket_addr), }) .await; @@ -186,7 +185,7 @@ impl Launcher { udp_tracker_server_container.udp_server_stats_event_sender.as_deref() { udp_server_stats_event_sender - .send_event(statistics::event::Event::UdpRequestBanned { + .send_event(Event::UdpRequestBanned { context: ConnectionContext::new(client_socket_addr, server_socket_addr), }) .await; @@ -228,7 +227,7 @@ impl Launcher { udp_tracker_server_container.udp_server_stats_event_sender.as_deref() { udp_server_stats_event_sender - .send_event(statistics::event::Event::UdpRequestAborted { + .send_event(Event::UdpRequestAborted { context: ConnectionContext::new(client_socket_addr, server_socket_addr), }) .await; diff --git a/packages/udp-tracker-server/src/server/processor.rs b/packages/udp-tracker-server/src/server/processor.rs index 4d1e4429a..02e084356 100644 --- a/packages/udp-tracker-server/src/server/processor.rs +++ b/packages/udp-tracker-server/src/server/processor.rs @@ -11,9 +11,9 @@ use tracing::{instrument, Level}; use super::bound_socket::BoundSocket; use crate::container::UdpTrackerServerContainer; +use crate::event::{self, ConnectionContext, Event, UdpRequestKind}; use crate::handlers::CookieTimeValues; -use crate::statistics::event::{ConnectionContext, UdpRequestKind}; -use crate::{handlers, statistics, RawRequest}; +use crate::{handlers, RawRequest}; pub struct Processor { socket: Arc, @@ -77,16 +77,16 @@ impl Processor { }; let udp_response_kind = match &response { - Response::Connect(_) => statistics::event::UdpResponseKind::Ok { - req_kind: statistics::event::UdpRequestKind::Connect, + Response::Connect(_) => event::UdpResponseKind::Ok { + req_kind: event::UdpRequestKind::Connect, }, - Response::AnnounceIpv4(_) | Response::AnnounceIpv6(_) => statistics::event::UdpResponseKind::Ok { - req_kind: statistics::event::UdpRequestKind::Announce, + Response::AnnounceIpv4(_) | Response::AnnounceIpv6(_) => event::UdpResponseKind::Ok { + req_kind: event::UdpRequestKind::Announce, }, - Response::Scrape(_) => statistics::event::UdpResponseKind::Ok { - req_kind: statistics::event::UdpRequestKind::Scrape, + Response::Scrape(_) => event::UdpResponseKind::Ok { + req_kind: event::UdpRequestKind::Scrape, }, - Response::Error(_e) => statistics::event::UdpResponseKind::Error { opt_req_kind: None }, + Response::Error(_e) => event::UdpResponseKind::Error { opt_req_kind: None }, }; let mut writer = Cursor::new(Vec::with_capacity(200)); @@ -108,7 +108,7 @@ impl Processor { self.udp_tracker_server_container.udp_server_stats_event_sender.as_deref() { udp_server_stats_event_sender - .send_event(statistics::event::Event::UdpResponseSent { + .send_event(Event::UdpResponseSent { context: ConnectionContext::new(client_socket_addr, self.socket.address()), kind: udp_response_kind, req_processing_time, diff --git a/packages/udp-tracker-server/src/statistics/event/handler.rs b/packages/udp-tracker-server/src/statistics/event/handler.rs index 6abf7d3c7..f65a1e567 100644 --- a/packages/udp-tracker-server/src/statistics/event/handler.rs +++ b/packages/udp-tracker-server/src/statistics/event/handler.rs @@ -1,4 +1,4 @@ -use crate::statistics::event::{Event, UdpRequestKind, UdpResponseKind}; +use crate::event::{Event, UdpRequestKind, UdpResponseKind}; use crate::statistics::repository::Repository; /// # Panics @@ -100,8 +100,8 @@ pub async fn handle_event(event: Event, stats_repository: &Repository) { mod tests { use std::net::{IpAddr, Ipv4Addr, Ipv6Addr, SocketAddr}; + use crate::event::{ConnectionContext, Event, UdpRequestKind}; use crate::statistics::event::handler::handle_event; - use crate::statistics::event::{ConnectionContext, Event, UdpRequestKind}; use crate::statistics::repository::Repository; #[tokio::test] @@ -209,7 +209,7 @@ mod tests { SocketAddr::new(IpAddr::V4(Ipv4Addr::new(203, 0, 113, 195)), 8080), SocketAddr::new(IpAddr::V4(Ipv4Addr::new(203, 0, 113, 196)), 6969), ), - kind: crate::statistics::event::UdpRequestKind::Connect, + kind: crate::event::UdpRequestKind::Connect, }, &stats_repository, ) @@ -230,7 +230,7 @@ mod tests { SocketAddr::new(IpAddr::V4(Ipv4Addr::new(203, 0, 113, 195)), 8080), SocketAddr::new(IpAddr::V4(Ipv4Addr::new(203, 0, 113, 196)), 6969), ), - kind: crate::statistics::event::UdpRequestKind::Announce, + kind: crate::event::UdpRequestKind::Announce, }, &stats_repository, ) @@ -251,7 +251,7 @@ mod tests { SocketAddr::new(IpAddr::V4(Ipv4Addr::new(203, 0, 113, 195)), 8080), SocketAddr::new(IpAddr::V4(Ipv4Addr::new(203, 0, 113, 196)), 6969), ), - kind: crate::statistics::event::UdpRequestKind::Scrape, + kind: crate::event::UdpRequestKind::Scrape, }, &stats_repository, ) @@ -272,7 +272,7 @@ mod tests { SocketAddr::new(IpAddr::V4(Ipv4Addr::new(203, 0, 113, 195)), 8080), SocketAddr::new(IpAddr::V4(Ipv4Addr::new(203, 0, 113, 196)), 6969), ), - kind: crate::statistics::event::UdpResponseKind::Ok { + kind: crate::event::UdpResponseKind::Ok { req_kind: UdpRequestKind::Announce, }, req_processing_time: std::time::Duration::from_secs(1), @@ -316,7 +316,7 @@ mod tests { SocketAddr::new(IpAddr::V6(Ipv6Addr::new(0, 0, 0, 0, 203, 0, 113, 195)), 8080), SocketAddr::new(IpAddr::V6(Ipv6Addr::new(0, 0, 0, 0, 203, 0, 113, 196)), 6969), ), - kind: crate::statistics::event::UdpRequestKind::Connect, + kind: crate::event::UdpRequestKind::Connect, }, &stats_repository, ) @@ -337,7 +337,7 @@ mod tests { SocketAddr::new(IpAddr::V6(Ipv6Addr::new(0, 0, 0, 0, 203, 0, 113, 195)), 8080), SocketAddr::new(IpAddr::V6(Ipv6Addr::new(0, 0, 0, 0, 203, 0, 113, 196)), 6969), ), - kind: crate::statistics::event::UdpRequestKind::Announce, + kind: crate::event::UdpRequestKind::Announce, }, &stats_repository, ) @@ -358,7 +358,7 @@ mod tests { SocketAddr::new(IpAddr::V6(Ipv6Addr::new(0, 0, 0, 0, 203, 0, 113, 195)), 8080), SocketAddr::new(IpAddr::V6(Ipv6Addr::new(0, 0, 0, 0, 203, 0, 113, 196)), 6969), ), - kind: crate::statistics::event::UdpRequestKind::Scrape, + kind: crate::event::UdpRequestKind::Scrape, }, &stats_repository, ) @@ -379,7 +379,7 @@ mod tests { SocketAddr::new(IpAddr::V6(Ipv6Addr::new(0, 0, 0, 0, 203, 0, 113, 195)), 8080), SocketAddr::new(IpAddr::V6(Ipv6Addr::new(0, 0, 0, 0, 203, 0, 113, 196)), 6969), ), - kind: crate::statistics::event::UdpResponseKind::Ok { + kind: crate::event::UdpResponseKind::Ok { req_kind: UdpRequestKind::Announce, }, req_processing_time: std::time::Duration::from_secs(1), diff --git a/packages/udp-tracker-server/src/statistics/event/listener.rs b/packages/udp-tracker-server/src/statistics/event/listener.rs index b755cbf18..b23260747 100644 --- a/packages/udp-tracker-server/src/statistics/event/listener.rs +++ b/packages/udp-tracker-server/src/statistics/event/listener.rs @@ -1,7 +1,7 @@ use tokio::sync::broadcast; use super::handler::handle_event; -use super::Event; +use crate::event::Event; use crate::statistics::repository::Repository; pub async fn dispatch_events(mut receiver: broadcast::Receiver, stats_repository: Repository) { diff --git a/packages/udp-tracker-server/src/statistics/event/mod.rs b/packages/udp-tracker-server/src/statistics/event/mod.rs index 1b0be960b..dae683398 100644 --- a/packages/udp-tracker-server/src/statistics/event/mod.rs +++ b/packages/udp-tracker-server/src/statistics/event/mod.rs @@ -1,78 +1,2 @@ -use std::net::SocketAddr; -use std::time::Duration; - pub mod handler; pub mod listener; -pub mod sender; - -/// An statistics event. It is used to collect tracker metrics. -#[derive(Debug, PartialEq, Eq, Clone)] -pub enum Event { - UdpRequestReceived { - context: ConnectionContext, - }, - UdpRequestAborted { - context: ConnectionContext, - }, - UdpRequestBanned { - context: ConnectionContext, - }, - UdpRequestAccepted { - context: ConnectionContext, - kind: UdpRequestKind, - }, - UdpResponseSent { - context: ConnectionContext, - kind: UdpResponseKind, - req_processing_time: Duration, - }, - UdpError { - context: ConnectionContext, - }, -} - -#[derive(Debug, PartialEq, Eq, Clone)] -pub enum UdpRequestKind { - Connect, - Announce, - Scrape, -} - -#[derive(Debug, PartialEq, Eq, Clone)] -pub enum UdpResponseKind { - Ok { - req_kind: UdpRequestKind, - }, - - /// There was an error handling the request. The error contains the request - /// kind if the request was parsed successfully. - Error { - opt_req_kind: Option, - }, -} - -#[derive(Debug, PartialEq, Eq, Clone)] -pub struct ConnectionContext { - client_socket_addr: SocketAddr, - server_socket_addr: SocketAddr, -} - -impl ConnectionContext { - #[must_use] - pub fn new(client_socket_addr: SocketAddr, server_socket_addr: SocketAddr) -> Self { - Self { - client_socket_addr, - server_socket_addr, - } - } - - #[must_use] - pub fn client_socket_addr(&self) -> SocketAddr { - self.client_socket_addr - } - - #[must_use] - pub fn server_socket_addr(&self) -> SocketAddr { - self.server_socket_addr - } -} diff --git a/packages/udp-tracker-server/src/statistics/keeper.rs b/packages/udp-tracker-server/src/statistics/keeper.rs index 099e0d0aa..62216ce88 100644 --- a/packages/udp-tracker-server/src/statistics/keeper.rs +++ b/packages/udp-tracker-server/src/statistics/keeper.rs @@ -1,8 +1,8 @@ use tokio::sync::broadcast::Receiver; use super::event::listener::dispatch_events; -use super::event::Event; use super::repository::Repository; +use crate::event::Event; /// The service responsible for keeping tracker metrics (listening to statistics events and handle them). /// diff --git a/packages/udp-tracker-server/src/statistics/setup.rs b/packages/udp-tracker-server/src/statistics/setup.rs index a9ac751c6..d8cc7bca9 100644 --- a/packages/udp-tracker-server/src/statistics/setup.rs +++ b/packages/udp-tracker-server/src/statistics/setup.rs @@ -1,44 +1,37 @@ //! Setup for the tracker statistics. //! -//! The [`factory`] function builds the structs needed for handling the tracker metrics. -use tokio::sync::broadcast; - -use super::event::sender::ChannelSender; -use crate::statistics; - -const CHANNEL_CAPACITY: usize = 1024; +//! The [`factory`] function builds the structs needed for handling the tracker +//! metrics. +use crate::event::sender::Broadcaster; +use crate::{event, statistics}; /// It builds the structs needed for handling the tracker metrics. /// /// It returns: /// -/// - An statistics event [`Sender`](crate::statistics::event::sender::Sender) that allows you to send events related to statistics. -/// - An statistics [`Repository`](crate::statistics::repository::Repository) which is an in-memory repository for the tracker metrics. +/// - An event [`Sender`](crate::event::sender::Sender) that allows you to send +/// events related to statistics. +/// - An statistics [`Repository`](crate::statistics::repository::Repository) +/// which is an in-memory repository for the tracker metrics. /// -/// When the input argument `tracker_usage_statistics`is false the setup does not run the event listeners, consequently the statistics -/// events are sent are received but not dispatched to the handler. +/// When the input argument `tracker_usage_statistics`is false the setup does +/// not run the event listeners, consequently the statistics events are sent are +/// received but not dispatched to the handler. #[must_use] -pub fn factory( - tracker_usage_statistics: bool, -) -> ( - Option>, - statistics::repository::Repository, -) { - let mut stats_event_sender: Option> = None; - +pub fn factory(tracker_usage_statistics: bool) -> (Option>, statistics::repository::Repository) { let mut keeper = statistics::keeper::Keeper::new(); - if tracker_usage_statistics { - let (sender, _) = broadcast::channel(CHANNEL_CAPACITY); + let opt_event_sender: Option> = if tracker_usage_statistics { + let broadcaster = Broadcaster::default(); - let receiver = sender.subscribe(); + keeper.run_event_listener(broadcaster.subscribe()); - stats_event_sender = Some(Box::new(ChannelSender { sender })); - - keeper.run_event_listener(receiver); - } + Some(Box::new(broadcaster)) + } else { + None + }; - (stats_event_sender, keeper.repository) + (opt_event_sender, keeper.repository) } #[cfg(test)] diff --git a/src/container.rs b/src/container.rs index 7822b5d61..3fcda55f0 100644 --- a/src/container.rs +++ b/src/container.rs @@ -66,7 +66,7 @@ pub struct AppContainer { pub http_scrape_service: Arc, // UDP Tracker Server Services - pub udp_server_stats_event_sender: Arc>>, + pub udp_server_stats_event_sender: Arc>>, pub udp_server_stats_repository: Arc, } From 055db4e67dd89183a9e838ba101c0567479de45c Mon Sep 17 00:00:00 2001 From: Jose Celano Date: Wed, 19 Mar 2025 17:06:18 +0000 Subject: [PATCH 406/802] docs: [#1395] minor changes in comments --- packages/http-tracker-core/src/event/mod.rs | 2 +- packages/udp-tracker-core/src/event/mod.rs | 5 +---- packages/udp-tracker-server/src/event/mod.rs | 2 +- 3 files changed, 3 insertions(+), 6 deletions(-) diff --git a/packages/http-tracker-core/src/event/mod.rs b/packages/http-tracker-core/src/event/mod.rs index da824c240..3db258238 100644 --- a/packages/http-tracker-core/src/event/mod.rs +++ b/packages/http-tracker-core/src/event/mod.rs @@ -2,7 +2,7 @@ use std::net::{IpAddr, SocketAddr}; pub mod sender; -/// An event. +/// A HTTP core event. #[derive(Debug, PartialEq, Eq, Clone)] pub enum Event { TcpAnnounce { connection: ConnectionContext }, diff --git a/packages/udp-tracker-core/src/event/mod.rs b/packages/udp-tracker-core/src/event/mod.rs index 48a5b501b..04b3170e2 100644 --- a/packages/udp-tracker-core/src/event/mod.rs +++ b/packages/udp-tracker-core/src/event/mod.rs @@ -2,10 +2,7 @@ use std::net::SocketAddr; pub mod sender; -/// An statistics event. It is used to collect tracker metrics. -/// -/// - `Udp` prefix means the event was triggered by the UDP tracker. -/// - The event suffix is the type of request: `announce`, `scrape` or `connection`. +/// A UDP core event. #[derive(Debug, PartialEq, Eq, Clone)] pub enum Event { UdpConnect { context: ConnectionContext }, diff --git a/packages/udp-tracker-server/src/event/mod.rs b/packages/udp-tracker-server/src/event/mod.rs index adc1396cc..0adf29c8b 100644 --- a/packages/udp-tracker-server/src/event/mod.rs +++ b/packages/udp-tracker-server/src/event/mod.rs @@ -3,7 +3,7 @@ use std::time::Duration; pub mod sender; -/// An statistics event. It is used to collect tracker metrics. +/// A UDP server event. #[derive(Debug, PartialEq, Eq, Clone)] pub enum Event { UdpRequestReceived { From 9eba80fa62f0f8e971c3c6fd726c681eb1fdd2fa Mon Sep 17 00:00:00 2001 From: Jose Celano Date: Wed, 19 Mar 2025 17:07:31 +0000 Subject: [PATCH 407/802] refactor: [#1395] rename send_stats_event to send_event Events are now generic even if they are only used for stats for now. --- packages/http-tracker-core/src/services/announce.rs | 4 ++-- packages/http-tracker-core/src/services/scrape.rs | 10 ++-------- packages/udp-tracker-core/src/services/announce.rs | 4 ++-- packages/udp-tracker-core/src/services/scrape.rs | 4 ++-- 4 files changed, 8 insertions(+), 14 deletions(-) diff --git a/packages/http-tracker-core/src/services/announce.rs b/packages/http-tracker-core/src/services/announce.rs index cd7417e98..f8d2e0b11 100644 --- a/packages/http-tracker-core/src/services/announce.rs +++ b/packages/http-tracker-core/src/services/announce.rs @@ -87,7 +87,7 @@ impl AnnounceService { .announce(&announce_request.info_hash, &mut peer, &remote_client_ip, &peers_wanted) .await?; - self.send_stats_event(remote_client_ip, opt_remote_client_port, *server_socket_addr) + self.send_event(remote_client_ip, opt_remote_client_port, *server_socket_addr) .await; Ok(announce_data) @@ -138,7 +138,7 @@ impl AnnounceService { } } - async fn send_stats_event(&self, peer_ip: IpAddr, opt_peer_ip_port: Option, server_socket_addr: SocketAddr) { + async fn send_event(&self, peer_ip: IpAddr, opt_peer_ip_port: Option, server_socket_addr: SocketAddr) { if let Some(http_stats_event_sender) = self.opt_http_stats_event_sender.as_deref() { http_stats_event_sender .send_event(Event::TcpAnnounce { diff --git a/packages/http-tracker-core/src/services/scrape.rs b/packages/http-tracker-core/src/services/scrape.rs index 1f4c14b5a..c9b3182f8 100644 --- a/packages/http-tracker-core/src/services/scrape.rs +++ b/packages/http-tracker-core/src/services/scrape.rs @@ -82,8 +82,7 @@ impl ScrapeService { let (remote_client_ip, opt_client_port) = self.resolve_remote_client_ip(client_ip_sources)?; - self.send_stats_event(remote_client_ip, opt_client_port, *server_socket_addr) - .await; + self.send_event(remote_client_ip, opt_client_port, *server_socket_addr).await; Ok(scrape_data) } @@ -118,12 +117,7 @@ impl ScrapeService { Ok((ip, port)) } - async fn send_stats_event( - &self, - original_peer_ip: IpAddr, - opt_original_peer_port: Option, - server_socket_addr: SocketAddr, - ) { + async fn send_event(&self, original_peer_ip: IpAddr, opt_original_peer_port: Option, server_socket_addr: SocketAddr) { if let Some(http_stats_event_sender) = self.opt_http_stats_event_sender.as_deref() { http_stats_event_sender .send_event(Event::TcpScrape { diff --git a/packages/udp-tracker-core/src/services/announce.rs b/packages/udp-tracker-core/src/services/announce.rs index bba9b51fc..d99618316 100644 --- a/packages/udp-tracker-core/src/services/announce.rs +++ b/packages/udp-tracker-core/src/services/announce.rs @@ -79,7 +79,7 @@ impl AnnounceService { .announce(&info_hash, &mut peer, &remote_client_ip, &peers_wanted) .await?; - self.send_stats_event(client_socket_addr, server_socket_addr).await; + self.send_event(client_socket_addr, server_socket_addr).await; Ok(announce_data) } @@ -100,7 +100,7 @@ impl AnnounceService { self.whitelist_authorization.authorize(info_hash).await } - async fn send_stats_event(&self, client_socket_addr: SocketAddr, server_socket_addr: SocketAddr) { + async fn send_event(&self, client_socket_addr: SocketAddr, server_socket_addr: SocketAddr) { if let Some(udp_stats_event_sender) = self.opt_udp_core_stats_event_sender.as_deref() { udp_stats_event_sender .send_event(Event::UdpAnnounce { diff --git a/packages/udp-tracker-core/src/services/scrape.rs b/packages/udp-tracker-core/src/services/scrape.rs index 9f0941c2a..3b6898311 100644 --- a/packages/udp-tracker-core/src/services/scrape.rs +++ b/packages/udp-tracker-core/src/services/scrape.rs @@ -61,7 +61,7 @@ impl ScrapeService { .scrape(&Self::convert_from_aquatic(&request.info_hashes)) .await?; - self.send_stats_event(client_socket_addr, server_socket_addr).await; + self.send_event(client_socket_addr, server_socket_addr).await; Ok(scrape_data) } @@ -82,7 +82,7 @@ impl ScrapeService { aquatic_infohashes.iter().map(|&x| x.into()).collect() } - async fn send_stats_event(&self, client_socket_addr: SocketAddr, server_socket_addr: SocketAddr) { + async fn send_event(&self, client_socket_addr: SocketAddr, server_socket_addr: SocketAddr) { if let Some(udp_stats_event_sender) = self.opt_udp_stats_event_sender.as_deref() { udp_stats_event_sender .send_event(Event::UdpScrape { From 57d884d8dd6128a019ad5253eb094507689cb83b Mon Sep 17 00:00:00 2001 From: Jose Celano Date: Fri, 21 Mar 2025 09:52:31 +0000 Subject: [PATCH 408/802] refactor: [#1401] add config option to enable/disable tracker usage stats per server It does not have any effect yet. --- packages/configuration/src/v2_0_0/core.rs | 1 + packages/configuration/src/v2_0_0/http_tracker.rs | 9 +++++++++ packages/configuration/src/v2_0_0/udp_tracker.rs | 9 +++++++++ packages/test-helpers/src/configuration.rs | 2 ++ 4 files changed, 21 insertions(+) diff --git a/packages/configuration/src/v2_0_0/core.rs b/packages/configuration/src/v2_0_0/core.rs index ed3e6aeb7..32dac8b3c 100644 --- a/packages/configuration/src/v2_0_0/core.rs +++ b/packages/configuration/src/v2_0_0/core.rs @@ -103,6 +103,7 @@ impl Core { fn default_tracker_policy() -> TrackerPolicy { TrackerPolicy::default() } + fn default_tracker_usage_statistics() -> bool { true } diff --git a/packages/configuration/src/v2_0_0/http_tracker.rs b/packages/configuration/src/v2_0_0/http_tracker.rs index 42ec02bf2..b3b21bda8 100644 --- a/packages/configuration/src/v2_0_0/http_tracker.rs +++ b/packages/configuration/src/v2_0_0/http_tracker.rs @@ -19,6 +19,10 @@ pub struct HttpTracker { /// TSL config. #[serde(default = "HttpTracker::default_tsl_config")] pub tsl_config: Option, + + /// Weather the tracker should collect statistics about tracker usage. + #[serde(default = "HttpTracker::default_tracker_usage_statistics")] + pub tracker_usage_statistics: bool, } impl Default for HttpTracker { @@ -26,6 +30,7 @@ impl Default for HttpTracker { Self { bind_address: Self::default_bind_address(), tsl_config: Self::default_tsl_config(), + tracker_usage_statistics: Self::default_tracker_usage_statistics(), } } } @@ -38,4 +43,8 @@ impl HttpTracker { fn default_tsl_config() -> Option { None } + + fn default_tracker_usage_statistics() -> bool { + false + } } diff --git a/packages/configuration/src/v2_0_0/udp_tracker.rs b/packages/configuration/src/v2_0_0/udp_tracker.rs index 0eee87700..9918bc1fa 100644 --- a/packages/configuration/src/v2_0_0/udp_tracker.rs +++ b/packages/configuration/src/v2_0_0/udp_tracker.rs @@ -16,12 +16,17 @@ pub struct UdpTracker { /// the client as the `ConnectionId`. #[serde(default = "UdpTracker::default_cookie_lifetime")] pub cookie_lifetime: Duration, + + /// Weather the tracker should collect statistics about tracker usage. + #[serde(default = "UdpTracker::default_tracker_usage_statistics")] + pub tracker_usage_statistics: bool, } impl Default for UdpTracker { fn default() -> Self { Self { bind_address: Self::default_bind_address(), cookie_lifetime: Self::default_cookie_lifetime(), + tracker_usage_statistics: Self::default_tracker_usage_statistics(), } } } @@ -34,4 +39,8 @@ impl UdpTracker { fn default_cookie_lifetime() -> Duration { Duration::from_secs(120) } + + fn default_tracker_usage_statistics() -> bool { + false + } } diff --git a/packages/test-helpers/src/configuration.rs b/packages/test-helpers/src/configuration.rs index 130820334..986981b1f 100644 --- a/packages/test-helpers/src/configuration.rs +++ b/packages/test-helpers/src/configuration.rs @@ -55,6 +55,7 @@ pub fn ephemeral() -> Configuration { config.udp_trackers = Some(vec![UdpTracker { bind_address: SocketAddr::new(IpAddr::V4(Ipv4Addr::new(127, 0, 0, 1)), udp_port), cookie_lifetime: Duration::from_secs(120), + tracker_usage_statistics: true, }]); // Ephemeral socket address for HTTP tracker @@ -62,6 +63,7 @@ pub fn ephemeral() -> Configuration { config.http_trackers = Some(vec![HttpTracker { bind_address: SocketAddr::new(IpAddr::V4(Ipv4Addr::new(127, 0, 0, 1)), http_port), tsl_config: None, + tracker_usage_statistics: true, }]); let temp_file = ephemeral_sqlite_database(); From 82bacf042ecb92cbf97c492f0f64a88654ccc954 Mon Sep 17 00:00:00 2001 From: Jose Celano Date: Fri, 21 Mar 2025 16:33:12 +0000 Subject: [PATCH 409/802] chore: enable seggregate stats for dev env by default --- share/default/config/tracker.development.sqlite3.toml | 2 ++ 1 file changed, 2 insertions(+) diff --git a/share/default/config/tracker.development.sqlite3.toml b/share/default/config/tracker.development.sqlite3.toml index 96addaf87..d07868c51 100644 --- a/share/default/config/tracker.development.sqlite3.toml +++ b/share/default/config/tracker.development.sqlite3.toml @@ -12,9 +12,11 @@ private = false [[udp_trackers]] bind_address = "0.0.0.0:6969" +tracker_usage_statistics = true [[http_trackers]] bind_address = "0.0.0.0:7070" +tracker_usage_statistics = true [http_api] bind_address = "0.0.0.0:1212" From c7297c16ae7e8a71b0ea1aa45bef77246be469bc Mon Sep 17 00:00:00 2001 From: Jose Celano Date: Fri, 21 Mar 2025 16:34:45 +0000 Subject: [PATCH 410/802] chore: add a second HTTP and UDP tracker in dev env We will start making changes in the `AppContainer` and services. Having more than one server of the same type could help to detect bugs prematurely. --- share/default/config/tracker.development.sqlite3.toml | 8 ++++++++ 1 file changed, 8 insertions(+) diff --git a/share/default/config/tracker.development.sqlite3.toml b/share/default/config/tracker.development.sqlite3.toml index d07868c51..333c6d66c 100644 --- a/share/default/config/tracker.development.sqlite3.toml +++ b/share/default/config/tracker.development.sqlite3.toml @@ -10,6 +10,10 @@ threshold = "info" listed = false private = false +[[udp_trackers]] +bind_address = "0.0.0.0:6868" +tracker_usage_statistics = true + [[udp_trackers]] bind_address = "0.0.0.0:6969" tracker_usage_statistics = true @@ -18,6 +22,10 @@ tracker_usage_statistics = true bind_address = "0.0.0.0:7070" tracker_usage_statistics = true +[[http_trackers]] +bind_address = "0.0.0.0:7171" +tracker_usage_statistics = true + [http_api] bind_address = "0.0.0.0:1212" From 9cee15ee22230e304588bf4f1312abd0bc567bf9 Mon Sep 17 00:00:00 2001 From: Jose Celano Date: Fri, 21 Mar 2025 17:05:50 +0000 Subject: [PATCH 411/802] refactor: encapsule fiel in AppContainer for TrackerCoreContainer --- src/app.rs | 7 ++++- src/container.rs | 69 ++++++++++++------------------------------------ 2 files changed, 23 insertions(+), 53 deletions(-) diff --git a/src/app.rs b/src/app.rs index 60e907a88..fb8a459ea 100644 --- a/src/app.rs +++ b/src/app.rs @@ -53,6 +53,7 @@ pub async fn start(config: &Configuration, app_container: &Arc) -> // Load peer keys if config.core.private { app_container + .tracker_core_container .keys_handler .load_peer_keys_from_database() .await @@ -62,6 +63,7 @@ pub async fn start(config: &Configuration, app_container: &Arc) -> // Load whitelisted torrents if config.core.listed { app_container + .tracker_core_container .whitelist_manager .load_whitelist_from_database() .await @@ -130,7 +132,10 @@ pub async fn start(config: &Configuration, app_container: &Arc) -> // Start runners to remove torrents without peers, every interval if config.core.inactive_peer_cleanup_interval > 0 { - jobs.push(torrent_cleanup::start_job(&config.core, &app_container.torrents_manager)); + jobs.push(torrent_cleanup::start_job( + &config.core, + &app_container.tracker_core_container.torrents_manager, + )); } // Start Health Check API diff --git a/src/container.rs b/src/container.rs index 3fcda55f0..b02dc8811 100644 --- a/src/container.rs +++ b/src/container.rs @@ -3,24 +3,13 @@ use std::sync::Arc; use bittorrent_http_tracker_core::container::HttpTrackerCoreContainer; use bittorrent_http_tracker_core::services::announce::AnnounceService; use bittorrent_http_tracker_core::services::scrape::ScrapeService; -use bittorrent_tracker_core::announce_handler::AnnounceHandler; -use bittorrent_tracker_core::authentication::handler::KeysHandler; -use bittorrent_tracker_core::authentication::service::AuthenticationService; use bittorrent_tracker_core::container::TrackerCoreContainer; -use bittorrent_tracker_core::databases::Database; -use bittorrent_tracker_core::scrape_handler::ScrapeHandler; -use bittorrent_tracker_core::torrent::manager::TorrentsManager; -use bittorrent_tracker_core::torrent::repository::in_memory::InMemoryTorrentRepository; -use bittorrent_tracker_core::torrent::repository::persisted::DatabasePersistentTorrentRepository; -use bittorrent_tracker_core::whitelist; -use bittorrent_tracker_core::whitelist::manager::WhitelistManager; -use bittorrent_tracker_core::whitelist::repository::in_memory::InMemoryWhitelist; use bittorrent_udp_tracker_core::container::UdpTrackerCoreContainer; use bittorrent_udp_tracker_core::services::banning::BanService; use bittorrent_udp_tracker_core::{self, MAX_CONNECTION_ID_ERRORS_PER_IP}; use tokio::sync::RwLock; use torrust_rest_tracker_api_core::container::TrackerHttpApiCoreContainer; -use torrust_tracker_configuration::{Configuration, Core, HttpApi, HttpTracker, UdpTracker}; +use torrust_tracker_configuration::{Configuration, HttpApi, HttpTracker, UdpTracker}; use torrust_udp_tracker_server::container::UdpTrackerServerContainer; use tracing::instrument; @@ -28,7 +17,6 @@ use tracing::instrument; Use containers from packages as AppContainer fields: - - bittorrent_tracker_core::container::TrackerCoreContainer - bittorrent_udp_tracker_core::container::UdpTrackerCoreContainer - bittorrent_http_tracker_core::container::HttpTrackerCoreContainer - torrust_udp_tracker_server::container::UdpTrackerServerContainer @@ -37,19 +25,7 @@ use tracing::instrument; */ pub struct AppContainer { - // Tracker Core Services - pub core_config: Arc, - pub database: Arc>, - pub announce_handler: Arc, - pub scrape_handler: Arc, - pub keys_handler: Arc, - pub authentication_service: Arc, - pub in_memory_whitelist: Arc, - pub whitelist_authorization: Arc, - pub whitelist_manager: Arc, - pub in_memory_torrent_repository: Arc, - pub db_torrent_repository: Arc, - pub torrents_manager: Arc, + pub tracker_core_container: TrackerCoreContainer, // UDP Tracker Core Services pub udp_core_stats_event_sender: Arc>>, @@ -122,19 +98,7 @@ impl AppContainer { let udp_server_stats_repository = Arc::new(udp_server_stats_repository); AppContainer { - // Tracker Core Services - core_config, - database: tracker_core_container.database, - announce_handler: tracker_core_container.announce_handler, - scrape_handler: tracker_core_container.scrape_handler, - keys_handler: tracker_core_container.keys_handler, - authentication_service: tracker_core_container.authentication_service, - in_memory_whitelist: tracker_core_container.in_memory_whitelist, - whitelist_authorization: tracker_core_container.whitelist_authorization, - whitelist_manager: tracker_core_container.whitelist_manager, - in_memory_torrent_repository: tracker_core_container.in_memory_torrent_repository, - db_torrent_repository: tracker_core_container.db_torrent_repository, - torrents_manager: tracker_core_container.torrents_manager, + tracker_core_container, // UDP Tracker Core Services udp_core_stats_event_sender, @@ -159,11 +123,11 @@ impl AppContainer { #[must_use] pub fn http_tracker_container(&self, http_tracker_config: &Arc) -> HttpTrackerCoreContainer { HttpTrackerCoreContainer { - core_config: self.core_config.clone(), - announce_handler: self.announce_handler.clone(), - scrape_handler: self.scrape_handler.clone(), - whitelist_authorization: self.whitelist_authorization.clone(), - authentication_service: self.authentication_service.clone(), + core_config: self.tracker_core_container.core_config.clone(), + announce_handler: self.tracker_core_container.announce_handler.clone(), + scrape_handler: self.tracker_core_container.scrape_handler.clone(), + whitelist_authorization: self.tracker_core_container.whitelist_authorization.clone(), + authentication_service: self.tracker_core_container.authentication_service.clone(), http_tracker_config: http_tracker_config.clone(), http_stats_event_sender: self.http_stats_event_sender.clone(), @@ -176,10 +140,10 @@ impl AppContainer { #[must_use] pub fn udp_tracker_container(&self, udp_tracker_config: &Arc) -> UdpTrackerCoreContainer { UdpTrackerCoreContainer { - core_config: self.core_config.clone(), - announce_handler: self.announce_handler.clone(), - scrape_handler: self.scrape_handler.clone(), - whitelist_authorization: self.whitelist_authorization.clone(), + core_config: self.tracker_core_container.core_config.clone(), + announce_handler: self.tracker_core_container.announce_handler.clone(), + scrape_handler: self.tracker_core_container.scrape_handler.clone(), + whitelist_authorization: self.tracker_core_container.whitelist_authorization.clone(), udp_tracker_config: udp_tracker_config.clone(), udp_core_stats_event_sender: self.udp_core_stats_event_sender.clone(), @@ -194,11 +158,12 @@ impl AppContainer { #[must_use] pub fn tracker_http_api_container(&self, http_api_config: &Arc) -> TrackerHttpApiCoreContainer { TrackerHttpApiCoreContainer { + core_config: self.tracker_core_container.core_config.clone(), + in_memory_torrent_repository: self.tracker_core_container.in_memory_torrent_repository.clone(), + keys_handler: self.tracker_core_container.keys_handler.clone(), + whitelist_manager: self.tracker_core_container.whitelist_manager.clone(), + http_api_config: http_api_config.clone(), - core_config: self.core_config.clone(), - in_memory_torrent_repository: self.in_memory_torrent_repository.clone(), - keys_handler: self.keys_handler.clone(), - whitelist_manager: self.whitelist_manager.clone(), ban_service: self.udp_ban_service.clone(), http_stats_repository: self.http_stats_repository.clone(), udp_core_stats_repository: self.udp_core_stats_repository.clone(), From 0d42586c6034e535ce2a9f812bb2327f6fd3b80b Mon Sep 17 00:00:00 2001 From: Jose Celano Date: Fri, 21 Mar 2025 17:29:29 +0000 Subject: [PATCH 412/802] refactor: encapsule field TrackerCoreContainer in HttpTrackerCoreContainer --- .../axum-http-tracker-server/src/server.rs | 51 ++++--------------- packages/http-tracker-core/src/container.rs | 19 +------ src/container.rs | 11 ++-- 3 files changed, 15 insertions(+), 66 deletions(-) diff --git a/packages/axum-http-tracker-server/src/server.rs b/packages/axum-http-tracker-server/src/server.rs index ea8003a4f..f14a33602 100644 --- a/packages/axum-http-tracker-server/src/server.rs +++ b/packages/axum-http-tracker-server/src/server.rs @@ -241,15 +241,7 @@ mod tests { use bittorrent_http_tracker_core::container::HttpTrackerCoreContainer; use bittorrent_http_tracker_core::services::announce::AnnounceService; use bittorrent_http_tracker_core::services::scrape::ScrapeService; - use bittorrent_tracker_core::announce_handler::AnnounceHandler; - use bittorrent_tracker_core::authentication::key::repository::in_memory::InMemoryKeyRepository; - use bittorrent_tracker_core::authentication::service; - use bittorrent_tracker_core::databases::setup::initialize_database; - use bittorrent_tracker_core::scrape_handler::ScrapeHandler; - use bittorrent_tracker_core::torrent::repository::in_memory::InMemoryTorrentRepository; - use bittorrent_tracker_core::torrent::repository::persisted::DatabasePersistentTorrentRepository; - use bittorrent_tracker_core::whitelist::authorization::WhitelistAuthorization; - use bittorrent_tracker_core::whitelist::repository::in_memory::InMemoryWhitelist; + use bittorrent_tracker_core::container::TrackerCoreContainer; use torrust_axum_server::tsl::make_rust_tls; use torrust_server_lib::registar::Registar; use torrust_tracker_configuration::{logging, Configuration}; @@ -275,48 +267,25 @@ mod tests { let http_stats_event_sender = Arc::new(http_stats_event_sender); let http_stats_repository = Arc::new(http_stats_repository); - let database = initialize_database(&configuration.core); - let in_memory_whitelist = Arc::new(InMemoryWhitelist::default()); - let whitelist_authorization = Arc::new(WhitelistAuthorization::new(&configuration.core, &in_memory_whitelist.clone())); - let in_memory_key_repository = Arc::new(InMemoryKeyRepository::default()); - let authentication_service = Arc::new(service::AuthenticationService::new( - &configuration.core, - &in_memory_key_repository, - )); - let in_memory_torrent_repository = Arc::new(InMemoryTorrentRepository::default()); - let db_torrent_repository = Arc::new(DatabasePersistentTorrentRepository::new(&database)); - - let announce_handler = Arc::new(AnnounceHandler::new( - &configuration.core, - &whitelist_authorization, - &in_memory_torrent_repository, - &db_torrent_repository, - )); - - let scrape_handler = Arc::new(ScrapeHandler::new(&whitelist_authorization, &in_memory_torrent_repository)); + let tracker_core_container = Arc::new(TrackerCoreContainer::initialize(&core_config)); let announce_service = Arc::new(AnnounceService::new( - core_config.clone(), - announce_handler.clone(), - authentication_service.clone(), - whitelist_authorization.clone(), + tracker_core_container.core_config.clone(), + tracker_core_container.announce_handler.clone(), + tracker_core_container.authentication_service.clone(), + tracker_core_container.whitelist_authorization.clone(), http_stats_event_sender.clone(), )); let scrape_service = Arc::new(ScrapeService::new( - core_config.clone(), - scrape_handler.clone(), - authentication_service.clone(), + tracker_core_container.core_config.clone(), + tracker_core_container.scrape_handler.clone(), + tracker_core_container.authentication_service.clone(), http_stats_event_sender.clone(), )); HttpTrackerCoreContainer { - core_config, - announce_handler, - scrape_handler, - whitelist_authorization, - authentication_service, - + tracker_core_container, http_tracker_config, http_stats_event_sender, http_stats_repository, diff --git a/packages/http-tracker-core/src/container.rs b/packages/http-tracker-core/src/container.rs index bb9b5014c..ce577f1d8 100644 --- a/packages/http-tracker-core/src/container.rs +++ b/packages/http-tracker-core/src/container.rs @@ -1,10 +1,6 @@ use std::sync::Arc; -use bittorrent_tracker_core::announce_handler::AnnounceHandler; -use bittorrent_tracker_core::authentication::service::AuthenticationService; use bittorrent_tracker_core::container::TrackerCoreContainer; -use bittorrent_tracker_core::scrape_handler::ScrapeHandler; -use bittorrent_tracker_core::whitelist; use torrust_tracker_configuration::{Core, HttpTracker}; use crate::services::announce::AnnounceService; @@ -12,13 +8,7 @@ use crate::services::scrape::ScrapeService; use crate::{event, statistics}; pub struct HttpTrackerCoreContainer { - // todo: replace with TrackerCoreContainer - pub core_config: Arc, - pub announce_handler: Arc, - pub scrape_handler: Arc, - pub whitelist_authorization: Arc, - pub authentication_service: Arc, - + pub tracker_core_container: Arc, pub http_tracker_config: Arc, pub http_stats_event_sender: Arc>>, pub http_stats_repository: Arc, @@ -59,12 +49,7 @@ impl HttpTrackerCoreContainer { )); Arc::new(Self { - core_config: tracker_core_container.core_config.clone(), - announce_handler: tracker_core_container.announce_handler.clone(), - scrape_handler: tracker_core_container.scrape_handler.clone(), - whitelist_authorization: tracker_core_container.whitelist_authorization.clone(), - authentication_service: tracker_core_container.authentication_service.clone(), - + tracker_core_container: tracker_core_container.clone(), http_tracker_config: http_tracker_config.clone(), http_stats_event_sender: http_stats_event_sender.clone(), http_stats_repository: http_stats_repository.clone(), diff --git a/src/container.rs b/src/container.rs index b02dc8811..ce236dc58 100644 --- a/src/container.rs +++ b/src/container.rs @@ -25,7 +25,7 @@ use tracing::instrument; */ pub struct AppContainer { - pub tracker_core_container: TrackerCoreContainer, + pub tracker_core_container: Arc, // UDP Tracker Core Services pub udp_core_stats_event_sender: Arc>>, @@ -51,7 +51,7 @@ impl AppContainer { pub fn initialize(configuration: &Configuration) -> AppContainer { let core_config = Arc::new(configuration.core.clone()); - let tracker_core_container = TrackerCoreContainer::initialize(&core_config); + let tracker_core_container = Arc::new(TrackerCoreContainer::initialize(&core_config)); // HTTP Tracker Core Services let (http_stats_event_sender, http_stats_repository) = @@ -123,12 +123,7 @@ impl AppContainer { #[must_use] pub fn http_tracker_container(&self, http_tracker_config: &Arc) -> HttpTrackerCoreContainer { HttpTrackerCoreContainer { - core_config: self.tracker_core_container.core_config.clone(), - announce_handler: self.tracker_core_container.announce_handler.clone(), - scrape_handler: self.tracker_core_container.scrape_handler.clone(), - whitelist_authorization: self.tracker_core_container.whitelist_authorization.clone(), - authentication_service: self.tracker_core_container.authentication_service.clone(), - + tracker_core_container: self.tracker_core_container.clone(), http_tracker_config: http_tracker_config.clone(), http_stats_event_sender: self.http_stats_event_sender.clone(), http_stats_repository: self.http_stats_repository.clone(), From 239f352ab590ac4e1e908dd5b123ed09e60b59a9 Mon Sep 17 00:00:00 2001 From: Jose Celano Date: Fri, 21 Mar 2025 17:35:58 +0000 Subject: [PATCH 413/802] refactor: encapsule field TrackerCoreContainer in UdpTrackerCoreContainer --- packages/udp-tracker-core/src/container.rs | 16 ++-------------- packages/udp-tracker-server/src/handlers/mod.rs | 2 +- .../udp-tracker-server/src/server/launcher.rs | 2 +- src/container.rs | 6 +----- 4 files changed, 5 insertions(+), 21 deletions(-) diff --git a/packages/udp-tracker-core/src/container.rs b/packages/udp-tracker-core/src/container.rs index aaa07f150..2ab578151 100644 --- a/packages/udp-tracker-core/src/container.rs +++ b/packages/udp-tracker-core/src/container.rs @@ -1,9 +1,6 @@ use std::sync::Arc; -use bittorrent_tracker_core::announce_handler::AnnounceHandler; use bittorrent_tracker_core::container::TrackerCoreContainer; -use bittorrent_tracker_core::scrape_handler::ScrapeHandler; -use bittorrent_tracker_core::whitelist; use tokio::sync::RwLock; use torrust_tracker_configuration::{Core, UdpTracker}; @@ -14,12 +11,7 @@ use crate::services::scrape::ScrapeService; use crate::{event, statistics, MAX_CONNECTION_ID_ERRORS_PER_IP}; pub struct UdpTrackerCoreContainer { - // todo: replace with TrackerCoreContainer - pub core_config: Arc, - pub announce_handler: Arc, - pub scrape_handler: Arc, - pub whitelist_authorization: Arc, - + pub tracker_core_container: Arc, pub udp_tracker_config: Arc, pub udp_core_stats_event_sender: Arc>>, pub udp_core_stats_repository: Arc, @@ -58,11 +50,7 @@ impl UdpTrackerCoreContainer { )); Arc::new(UdpTrackerCoreContainer { - core_config: tracker_core_container.core_config.clone(), - announce_handler: tracker_core_container.announce_handler.clone(), - scrape_handler: tracker_core_container.scrape_handler.clone(), - whitelist_authorization: tracker_core_container.whitelist_authorization.clone(), - + tracker_core_container: tracker_core_container.clone(), udp_tracker_config: udp_tracker_config.clone(), udp_core_stats_event_sender: udp_core_stats_event_sender.clone(), udp_core_stats_repository: udp_core_stats_repository.clone(), diff --git a/packages/udp-tracker-server/src/handlers/mod.rs b/packages/udp-tracker-server/src/handlers/mod.rs index 61f7bb187..34ac374fa 100644 --- a/packages/udp-tracker-server/src/handlers/mod.rs +++ b/packages/udp-tracker-server/src/handlers/mod.rs @@ -172,7 +172,7 @@ pub async fn handle_request( client_socket_addr, server_socket_addr, &announce_request, - &udp_tracker_core_container.core_config, + &udp_tracker_core_container.tracker_core_container.core_config, &udp_tracker_server_container.udp_server_stats_event_sender, cookie_time_values.valid_range, ) diff --git a/packages/udp-tracker-server/src/server/launcher.rs b/packages/udp-tracker-server/src/server/launcher.rs index c98db0500..b21ac11ba 100644 --- a/packages/udp-tracker-server/src/server/launcher.rs +++ b/packages/udp-tracker-server/src/server/launcher.rs @@ -47,7 +47,7 @@ impl Launcher { ) { tracing::info!(target: UDP_TRACKER_LOG_TARGET, "Starting on: {bind_to}"); - if udp_tracker_core_container.core_config.private { + if udp_tracker_core_container.tracker_core_container.core_config.private { tracing::error!("udp services cannot be used for private trackers"); panic!("it should not use udp if using authentication"); } diff --git a/src/container.rs b/src/container.rs index ce236dc58..bb872d98f 100644 --- a/src/container.rs +++ b/src/container.rs @@ -135,11 +135,7 @@ impl AppContainer { #[must_use] pub fn udp_tracker_container(&self, udp_tracker_config: &Arc) -> UdpTrackerCoreContainer { UdpTrackerCoreContainer { - core_config: self.tracker_core_container.core_config.clone(), - announce_handler: self.tracker_core_container.announce_handler.clone(), - scrape_handler: self.tracker_core_container.scrape_handler.clone(), - whitelist_authorization: self.tracker_core_container.whitelist_authorization.clone(), - + tracker_core_container: self.tracker_core_container.clone(), udp_tracker_config: udp_tracker_config.clone(), udp_core_stats_event_sender: self.udp_core_stats_event_sender.clone(), udp_core_stats_repository: self.udp_core_stats_repository.clone(), From c785d545771921d8dc66287493231cc6252da968 Mon Sep 17 00:00:00 2001 From: Jose Celano Date: Fri, 21 Mar 2025 17:40:42 +0000 Subject: [PATCH 414/802] refactor: encapsule field TrackerCoreContainer in TrackerHttpApiCoreContainer --- .../src/v1/context/stats/routes.rs | 2 +- .../src/v1/routes.rs | 18 +++++++++++++--- .../rest-tracker-api-core/src/container.rs | 21 ++----------------- src/container.rs | 17 +-------------- 4 files changed, 19 insertions(+), 39 deletions(-) diff --git a/packages/axum-rest-tracker-api-server/src/v1/context/stats/routes.rs b/packages/axum-rest-tracker-api-server/src/v1/context/stats/routes.rs index 49ba9e829..e92b5b34d 100644 --- a/packages/axum-rest-tracker-api-server/src/v1/context/stats/routes.rs +++ b/packages/axum-rest-tracker-api-server/src/v1/context/stats/routes.rs @@ -16,7 +16,7 @@ pub fn add(prefix: &str, router: Router, http_api_container: &Arc) -> Router { let v1_prefix = format!("{prefix}/v1"); - let router = auth_key::routes::add(&v1_prefix, router, &http_api_container.keys_handler.clone()); + let router = auth_key::routes::add( + &v1_prefix, + router, + &http_api_container.tracker_core_container.keys_handler.clone(), + ); let router = stats::routes::add(&v1_prefix, router, http_api_container); - let router = whitelist::routes::add(&v1_prefix, router, &http_api_container.whitelist_manager); + let router = whitelist::routes::add( + &v1_prefix, + router, + &http_api_container.tracker_core_container.whitelist_manager, + ); - torrent::routes::add(&v1_prefix, router, &http_api_container.in_memory_torrent_repository.clone()) + torrent::routes::add( + &v1_prefix, + router, + &http_api_container.tracker_core_container.in_memory_torrent_repository.clone(), + ) } diff --git a/packages/rest-tracker-api-core/src/container.rs b/packages/rest-tracker-api-core/src/container.rs index eb770c1c5..c6a46a195 100644 --- a/packages/rest-tracker-api-core/src/container.rs +++ b/packages/rest-tracker-api-core/src/container.rs @@ -1,10 +1,7 @@ use std::sync::Arc; use bittorrent_http_tracker_core::container::HttpTrackerCoreContainer; -use bittorrent_tracker_core::authentication::handler::KeysHandler; use bittorrent_tracker_core::container::TrackerCoreContainer; -use bittorrent_tracker_core::torrent::repository::in_memory::InMemoryTorrentRepository; -use bittorrent_tracker_core::whitelist::manager::WhitelistManager; use bittorrent_udp_tracker_core::container::UdpTrackerCoreContainer; use bittorrent_udp_tracker_core::services::banning::BanService; use bittorrent_udp_tracker_core::{self}; @@ -13,22 +10,11 @@ use torrust_tracker_configuration::{Core, HttpApi, HttpTracker, UdpTracker}; use torrust_udp_tracker_server::container::UdpTrackerServerContainer; pub struct TrackerHttpApiCoreContainer { - // todo: replace with TrackerCoreContainer - pub core_config: Arc, - pub in_memory_torrent_repository: Arc, - pub keys_handler: Arc, - pub whitelist_manager: Arc, - - // todo: replace with HttpTrackerCoreContainer + pub tracker_core_container: Arc, pub http_stats_repository: Arc, - - // todo: replace with UdpTrackerCoreContainer pub ban_service: Arc>, pub udp_core_stats_repository: Arc, - - // todo: replace with UdpTrackerServerContainer pub udp_server_stats_repository: Arc, - pub http_api_config: Arc, } @@ -63,10 +49,7 @@ impl TrackerHttpApiCoreContainer { http_api_config: &Arc, ) -> Arc { Arc::new(TrackerHttpApiCoreContainer { - core_config: tracker_core_container.core_config.clone(), - in_memory_torrent_repository: tracker_core_container.in_memory_torrent_repository.clone(), - keys_handler: tracker_core_container.keys_handler.clone(), - whitelist_manager: tracker_core_container.whitelist_manager.clone(), + tracker_core_container: tracker_core_container.clone(), http_stats_repository: http_tracker_core_container.http_stats_repository.clone(), diff --git a/src/container.rs b/src/container.rs index bb872d98f..d3253b5d9 100644 --- a/src/container.rs +++ b/src/container.rs @@ -13,17 +13,6 @@ use torrust_tracker_configuration::{Configuration, HttpApi, HttpTracker, UdpTrac use torrust_udp_tracker_server::container::UdpTrackerServerContainer; use tracing::instrument; -/* todo: remove duplicate code. - - Use containers from packages as AppContainer fields: - - - bittorrent_udp_tracker_core::container::UdpTrackerCoreContainer - - bittorrent_http_tracker_core::container::HttpTrackerCoreContainer - - torrust_udp_tracker_server::container::UdpTrackerServerContainer - - Container initialization is duplicated. -*/ - pub struct AppContainer { pub tracker_core_container: Arc, @@ -149,11 +138,7 @@ impl AppContainer { #[must_use] pub fn tracker_http_api_container(&self, http_api_config: &Arc) -> TrackerHttpApiCoreContainer { TrackerHttpApiCoreContainer { - core_config: self.tracker_core_container.core_config.clone(), - in_memory_torrent_repository: self.tracker_core_container.in_memory_torrent_repository.clone(), - keys_handler: self.tracker_core_container.keys_handler.clone(), - whitelist_manager: self.tracker_core_container.whitelist_manager.clone(), - + tracker_core_container: self.tracker_core_container.clone(), http_api_config: http_api_config.clone(), ban_service: self.udp_ban_service.clone(), http_stats_repository: self.http_stats_repository.clone(), From c7c87a20d0a176f3d5f1a26c82aee594ece04845 Mon Sep 17 00:00:00 2001 From: Jose Celano Date: Mon, 24 Mar 2025 07:11:45 +0000 Subject: [PATCH 415/802] chore(deps): udpate dependencies ``` cargo update Updating crates.io index Locking 53 packages to latest compatible versions Updating async-compression v0.4.20 -> v0.4.21 Updating async-std v1.13.0 -> v1.13.1 Updating async-trait v0.1.87 -> v0.1.88 Updating axum-server v0.7.1 -> v0.7.2 Updating borsh v1.5.5 -> v1.5.6 Updating borsh-derive v1.5.5 -> v1.5.6 Updating cc v1.2.16 -> v1.2.17 Updating deranged v0.3.11 -> v0.4.1 Updating foldhash v0.1.4 -> v0.1.5 Adding fs-err v3.1.0 Updating getrandom v0.3.1 -> v0.3.2 Updating half v2.4.1 -> v2.5.0 Updating http v1.2.0 -> v1.3.1 Updating http-body-util v0.1.2 -> v0.1.3 Updating iana-time-zone v0.1.61 -> v0.1.62 Updating libz-sys v1.1.21 -> v1.1.22 Updating linux-raw-sys v0.9.2 -> v0.9.3 Updating once_cell v1.21.0 -> v1.21.1 Removing pin-project v1.1.10 Removing pin-project-internal v1.1.10 Updating quote v1.0.39 -> v1.0.40 Adding r-efi v5.2.0 Updating reqwest v0.12.12 -> v0.12.15 Updating ring v0.17.13 -> v0.17.14 Updating rust_decimal v1.36.0 -> v1.37.1 Updating rustix v1.0.2 -> v1.0.3 Updating rustls v0.23.23 -> v0.23.25 Updating rustls-webpki v0.102.8 -> v0.103.0 Updating tempfile v3.18.0 -> v3.19.1 Updating time v0.3.39 -> v0.3.41 Updating time-core v0.1.3 -> v0.1.4 Updating time-macros v0.2.20 -> v0.2.22 Updating tokio v1.44.0 -> v1.44.1 Updating tokio-util v0.7.13 -> v0.7.14 Removing tower v0.4.13 Updating uuid v1.15.1 -> v1.16.0 Updating wasi v0.13.3+wasi-0.2.2 -> v0.14.2+wasi-0.2.4 Updating windows-link v0.1.0 -> v0.1.1 Updating windows-registry v0.2.0 -> v0.4.0 Updating windows-result v0.2.0 -> v0.3.2 Updating windows-strings v0.1.0 -> v0.3.1 Adding windows-targets v0.53.0 Adding windows_aarch64_gnullvm v0.53.0 Adding windows_aarch64_msvc v0.53.0 Adding windows_i686_gnu v0.53.0 Adding windows_i686_gnullvm v0.53.0 Adding windows_i686_msvc v0.53.0 Adding windows_x86_64_gnu v0.53.0 Adding windows_x86_64_gnullvm v0.53.0 Adding windows_x86_64_msvc v0.53.0 Updating winnow v0.7.3 -> v0.7.4 Updating wit-bindgen-rt v0.33.0 -> v0.39.0 Updating zerocopy v0.8.23 -> v0.8.24 Updating zerocopy-derive v0.8.23 -> v0.8.24 Updating zstd-safe v7.2.3 -> v7.2.4 Updating zstd-sys v2.0.14+zstd.1.5.7 -> v2.0.15+zstd.1.5.7 ``` --- Cargo.lock | 334 ++++++++++++++++++++++++++++++----------------------- 1 file changed, 188 insertions(+), 146 deletions(-) diff --git a/Cargo.lock b/Cargo.lock index 4d5157055..076449944 100644 --- a/Cargo.lock +++ b/Cargo.lock @@ -208,9 +208,9 @@ dependencies = [ [[package]] name = "async-compression" -version = "0.4.20" +version = "0.4.21" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "310c9bcae737a48ef5cdee3174184e6d548b292739ede61a1f955ef76a738861" +checksum = "c0cf008e5e1a9e9e22a7d3c9a4992e21a350290069e36d8fb72304ed17e8f2d2" dependencies = [ "brotli", "flate2", @@ -283,9 +283,9 @@ dependencies = [ [[package]] name = "async-std" -version = "1.13.0" +version = "1.13.1" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "c634475f29802fde2b8f0b505b1bd00dfe4df7d4a000f0b36f7671197d5c3615" +checksum = "730294c1c08c2e0f85759590518f6333f0d5a0a766a27d519c1b244c3dfd8a24" dependencies = [ "async-attributes", "async-channel 1.9.0", @@ -316,9 +316,9 @@ checksum = "8b75356056920673b02621b35afd0f7dda9306d03c79a30f5c56c44cf256e3de" [[package]] name = "async-trait" -version = "0.1.87" +version = "0.1.88" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "d556ec1359574147ec0c4fc5eb525f3f23263a592b1a9c07e0a75b427de55c97" +checksum = "e539d3fca749fcee5236ab05e93a52867dd549cc157c8cb7f99595f3cedffdb5" dependencies = [ "proc-macro2", "quote", @@ -375,7 +375,7 @@ dependencies = [ "serde_urlencoded", "sync_wrapper", "tokio", - "tower 0.5.2", + "tower", "tower-layer", "tower-service", "tracing", @@ -431,7 +431,7 @@ dependencies = [ "serde", "serde_html_form", "serde_path_to_error", - "tower 0.5.2", + "tower", "tower-layer", "tower-service", ] @@ -449,16 +449,15 @@ dependencies = [ [[package]] name = "axum-server" -version = "0.7.1" +version = "0.7.2" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "56bac90848f6a9393ac03c63c640925c4b7c8ca21654de40d53f55964667c7d8" +checksum = "495c05f60d6df0093e8fb6e74aa5846a0ad06abaf96d76166283720bf740f8ab" dependencies = [ "arc-swap", "bytes", - "futures-util", + "fs-err", "http", "http-body", - "http-body-util", "hyper", "hyper-util", "pin-project-lite", @@ -467,7 +466,6 @@ dependencies = [ "rustls-pki-types", "tokio", "tokio-rustls", - "tower 0.4.13", "tower-service", ] @@ -798,9 +796,9 @@ dependencies = [ [[package]] name = "borsh" -version = "1.5.5" +version = "1.5.6" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "5430e3be710b68d984d1391c854eb431a9d548640711faa54eecb1df93db91cc" +checksum = "b2b74d67a0fc0af8e9823b79fd1c43a0900e5a8f0e0f4cc9210796bf3a820126" dependencies = [ "borsh-derive", "cfg_aliases", @@ -808,9 +806,9 @@ dependencies = [ [[package]] name = "borsh-derive" -version = "1.5.5" +version = "1.5.6" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "f8b668d39970baad5356d7c83a86fee3a539e6f93bf6764c97368243e17a0487" +checksum = "2d37ed1b2c9b78421218a0b4f6d8349132d6ec2cfeba1cfb0118b0a8e268df9e" dependencies = [ "once_cell", "proc-macro-crate", @@ -927,9 +925,9 @@ dependencies = [ [[package]] name = "cc" -version = "1.2.16" +version = "1.2.17" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "be714c154be609ec7f5dad223a33bf1482fff90472de28f7362806e6d4832b8c" +checksum = "1fcb57c740ae1daf453ae85f16e37396f672b039e00d9d866e07ddb24e328e3a" dependencies = [ "jobserver", "libc", @@ -1310,9 +1308,9 @@ dependencies = [ [[package]] name = "deranged" -version = "0.3.11" +version = "0.4.1" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "b42b6fa04a440b495c8b04d0e71b707c585f83cb9cb28cf8cd0d976c315e31b4" +checksum = "28cfac68e08048ae1883171632c2aef3ebc555621ae56fbccce1cbf22dd7f058" dependencies = [ "powerfmt", "serde", @@ -1532,9 +1530,9 @@ checksum = "3f9eec918d3f24069decb9af1554cad7c880e2da24a9afd88aca000531ab82c1" [[package]] name = "foldhash" -version = "0.1.4" +version = "0.1.5" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "a0d2fde1f7b3d48b8395d5f2de76c18a528bd6a9cdde438df747bfcba3e05d6f" +checksum = "d9c4f5dac5e15c24eb999c26181a6ca40b39fe946cbe4c263c7209467bc83af2" [[package]] name = "foreign-types" @@ -1632,6 +1630,16 @@ dependencies = [ "syn 2.0.100", ] +[[package]] +name = "fs-err" +version = "3.1.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "1f89bda4c2a21204059a977ed3bfe746677dfd137b83c339e702b0ac91d482aa" +dependencies = [ + "autocfg", + "tokio", +] + [[package]] name = "funty" version = "2.0.0" @@ -1769,14 +1777,14 @@ dependencies = [ [[package]] name = "getrandom" -version = "0.3.1" +version = "0.3.2" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "43a49c392881ce6d5c3b8cb70f98717b7c07aabbdff06687b9030dbfbe2725f8" +checksum = "73fea8450eea4bac3940448fb7ae50d91f034f941199fcd9d909a5a07aa455f0" dependencies = [ "cfg-if", "libc", - "wasi 0.13.3+wasi-0.2.2", - "windows-targets 0.52.6", + "r-efi", + "wasi 0.14.2+wasi-0.2.4", ] [[package]] @@ -1824,9 +1832,9 @@ dependencies = [ [[package]] name = "half" -version = "2.4.1" +version = "2.5.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "6dd08c532ae367adf81c312a4580bc67f1d0fe8bc9c460520283f4c0ff277888" +checksum = "7db2ff139bba50379da6aa0766b52fdcb62cb5b263009b09ed58ba604e14bbd1" dependencies = [ "cfg-if", "crunchy", @@ -1908,9 +1916,9 @@ dependencies = [ [[package]] name = "http" -version = "1.2.0" +version = "1.3.1" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "f16ca2af56261c99fba8bac40a10251ce8188205a4c448fbb745a2e4daa76fea" +checksum = "f4a85d31aea989eead29a3aaf9e1115a180df8282431156e533de47660892565" dependencies = [ "bytes", "fnv", @@ -1929,12 +1937,12 @@ dependencies = [ [[package]] name = "http-body-util" -version = "0.1.2" +version = "0.1.3" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "793429d76616a256bcb62c2a2ec2bed781c8307e797e2598c50010f2bee2544f" +checksum = "b021d93e26becf5dc7e1b75b1bed1fd93124b374ceb73f43d4d4eafec896a64a" dependencies = [ "bytes", - "futures-util", + "futures-core", "http", "http-body", "pin-project-lite", @@ -2057,14 +2065,15 @@ dependencies = [ [[package]] name = "iana-time-zone" -version = "0.1.61" +version = "0.1.62" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "235e081f3925a06703c2d0117ea8b91f042756fd6e7a6e5d901e8ca1a996b220" +checksum = "b2fd658b06e56721792c5df4475705b6cda790e9298d19d2f8af083457bcd127" dependencies = [ "android_system_properties", "core-foundation-sys", "iana-time-zone-haiku", "js-sys", + "log", "wasm-bindgen", "windows-core", ] @@ -2396,9 +2405,9 @@ dependencies = [ [[package]] name = "libz-sys" -version = "1.1.21" +version = "1.1.22" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "df9b68e50e6e0b26f672573834882eb57759f6db9b3be2ea3c35c91188bb4eaa" +checksum = "8b70e7a7df205e92a1a4cd9aaae7898dac0aa555503cc0a649494d0d60e7651d" dependencies = [ "cc", "pkg-config", @@ -2413,9 +2422,9 @@ checksum = "d26c52dbd32dccf2d10cac7725f8eae5296885fb5703b261f7d0a0739ec807ab" [[package]] name = "linux-raw-sys" -version = "0.9.2" +version = "0.9.3" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "6db9c683daf087dc577b7506e9695b3d556a9f3849903fa28186283afd6809e9" +checksum = "fe7db12097d22ec582439daf8618b8fdd1a7bef6270e9af3b1ebcd30893cf413" [[package]] name = "litemap" @@ -2746,9 +2755,9 @@ dependencies = [ [[package]] name = "once_cell" -version = "1.21.0" +version = "1.21.1" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "cde51589ab56b20a6f686b2c68f7a0bd6add753d697abf720d63f8db3ab7b1ad" +checksum = "d75b0bedcc4fe52caa0e03d9f1151a323e4aa5e2d78ba3580400cd3c9e2bc4bc" [[package]] name = "oorandom" @@ -2937,26 +2946,6 @@ dependencies = [ "siphasher", ] -[[package]] -name = "pin-project" -version = "1.1.10" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "677f1add503faace112b9f1373e43e9e054bfdd22ff1a63c1bc485eaec6a6a8a" -dependencies = [ - "pin-project-internal", -] - -[[package]] -name = "pin-project-internal" -version = "1.1.10" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "6e918e4ff8c4549eb882f14b3a4bc8c8bc93de829416eacf579f1207a8fbf861" -dependencies = [ - "proc-macro2", - "quote", - "syn 2.0.100", -] - [[package]] name = "pin-project-lite" version = "0.2.16" @@ -3047,7 +3036,7 @@ version = "0.2.21" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "85eae3c4ed2f50dcfe72643da4befc30deadb458a9b590d720cde2f2b1e97da9" dependencies = [ - "zerocopy 0.8.23", + "zerocopy 0.8.24", ] [[package]] @@ -3162,13 +3151,19 @@ dependencies = [ [[package]] name = "quote" -version = "1.0.39" +version = "1.0.40" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "c1f1914ce909e1658d9907913b4b91947430c7d9be598b15a1912935b8c04801" +checksum = "1885c039570dc00dcb4ff087a89e185fd56bae234ddc7f056a945bf36467248d" dependencies = [ "proc-macro2", ] +[[package]] +name = "r-efi" +version = "5.2.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "74765f6d916ee2faa39bc8e68e4f3ed8949b48cccdac59983d287a7cb71ce9c5" + [[package]] name = "r2d2" version = "0.8.10" @@ -3226,7 +3221,7 @@ checksum = "3779b94aeb87e8bd4e834cee3650289ee9e0d5677f976ecdb6d219e5f4f6cd94" dependencies = [ "rand_chacha 0.9.0", "rand_core 0.9.3", - "zerocopy 0.8.23", + "zerocopy 0.8.24", ] [[package]] @@ -3264,7 +3259,7 @@ version = "0.9.3" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "99d9a13982dcf210057a8a78572b2217b667c3beacbf3a0d8b454f6f82837d38" dependencies = [ - "getrandom 0.3.1", + "getrandom 0.3.2", ] [[package]] @@ -3351,9 +3346,9 @@ dependencies = [ [[package]] name = "reqwest" -version = "0.12.12" +version = "0.12.15" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "43e734407157c3c2034e0258f5e4473ddb361b1e85f95a66690d67264d7cd1da" +checksum = "d19c46a6fdd48bc4dab94b6103fccc55d34c67cc0ad04653aad4ea2a07cd7bbb" dependencies = [ "base64 0.22.1", "bytes", @@ -3384,7 +3379,7 @@ dependencies = [ "system-configuration", "tokio", "tokio-native-tls", - "tower 0.5.2", + "tower", "tower-service", "url", "wasm-bindgen", @@ -3395,9 +3390,9 @@ dependencies = [ [[package]] name = "ring" -version = "0.17.13" +version = "0.17.14" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "70ac5d832aa16abd7d1def883a8545280c20a60f523a370aa3a9617c2b8550ee" +checksum = "a4689e6c2294d81e88dc6261c768b63bc4fcdb852be6d1352498b114f61383b7" dependencies = [ "cc", "cfg-if", @@ -3492,9 +3487,9 @@ dependencies = [ [[package]] name = "rust_decimal" -version = "1.36.0" +version = "1.37.1" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "b082d80e3e3cc52b2ed634388d436fe1f4de6af5786cc2de9ba9737527bdf555" +checksum = "faa7de2ba56ac291bd90c6b9bece784a52ae1411f9506544b3eae36dd2356d50" dependencies = [ "arrayvec", "borsh", @@ -3542,22 +3537,22 @@ dependencies = [ [[package]] name = "rustix" -version = "1.0.2" +version = "1.0.3" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "f7178faa4b75a30e269c71e61c353ce2748cf3d76f0c44c393f4e60abf49b825" +checksum = "e56a18552996ac8d29ecc3b190b4fdbb2d91ca4ec396de7bbffaf43f3d637e96" dependencies = [ "bitflags 2.9.0", "errno", "libc", - "linux-raw-sys 0.9.2", + "linux-raw-sys 0.9.3", "windows-sys 0.59.0", ] [[package]] name = "rustls" -version = "0.23.23" +version = "0.23.25" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "47796c98c480fce5406ef69d1c76378375492c3b0a0de587be0c1d9feb12f395" +checksum = "822ee9188ac4ec04a2f0531e55d035fb2de73f18b41a63c70c2712503b6fb13c" dependencies = [ "once_cell", "ring", @@ -3596,9 +3591,9 @@ checksum = "917ce264624a4b4db1c364dcc35bfca9ded014d0a958cd47ad3e960e988ea51c" [[package]] name = "rustls-webpki" -version = "0.102.8" +version = "0.103.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "64ca1bc8749bd4cf37b5ce386cc146580777b4e8572c7b97baf22c83f444bee9" +checksum = "0aa4eeac2588ffff23e9d7a7e9b3f971c5fb5b7ebc9452745e0c232c64f83b2f" dependencies = [ "ring", "rustls-pki-types", @@ -4069,15 +4064,14 @@ dependencies = [ [[package]] name = "tempfile" -version = "3.18.0" +version = "3.19.1" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "2c317e0a526ee6120d8dabad239c8dadca62b24b6f168914bbbc8e2fb1f0e567" +checksum = "7437ac7763b9b123ccf33c338a5cc1bac6f69b45a136c19bdd8a65e3916435bf" dependencies = [ - "cfg-if", "fastrand", - "getrandom 0.3.1", + "getrandom 0.3.2", "once_cell", - "rustix 1.0.2", + "rustix 1.0.3", "windows-sys 0.59.0", ] @@ -4177,9 +4171,9 @@ dependencies = [ [[package]] name = "time" -version = "0.3.39" +version = "0.3.41" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "dad298b01a40a23aac4580b67e3dbedb7cc8402f3592d7f49469de2ea4aecdd8" +checksum = "8a7619e19bc266e0f9c5e6686659d394bc57973859340060a69221e57dbc0c40" dependencies = [ "deranged", "itoa", @@ -4192,15 +4186,15 @@ dependencies = [ [[package]] name = "time-core" -version = "0.1.3" +version = "0.1.4" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "765c97a5b985b7c11d7bc27fa927dc4fe6af3a6dfb021d28deb60d3bf51e76ef" +checksum = "c9e9a38711f559d9e3ce1cdb06dd7c5b8ea546bc90052da6d06bb76da74bb07c" [[package]] name = "time-macros" -version = "0.2.20" +version = "0.2.22" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "e8093bc3e81c3bc5f7879de09619d06c9a5a5e45ca44dfeeb7225bae38005c5c" +checksum = "3526739392ec93fd8b359c8e98514cb3e8e021beb4e5f597b00a0221f8ed8a49" dependencies = [ "num-conv", "time-core", @@ -4243,9 +4237,9 @@ checksum = "1f3ccbac311fea05f86f61904b462b55fb3df8837a366dfc601a0161d0532f20" [[package]] name = "tokio" -version = "1.44.0" +version = "1.44.1" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "9975ea0f48b5aa3972bf2d888c238182458437cc2a19374b81b25cdf1023fb3a" +checksum = "f382da615b842244d4b8738c82ed1275e6c5dd90c459a30941cd07080b06c91a" dependencies = [ "backtrace", "bytes", @@ -4317,9 +4311,9 @@ dependencies = [ [[package]] name = "tokio-util" -version = "0.7.13" +version = "0.7.14" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "d7fcaa8d55a2bdd6b83ace262b016eca0d79ee02818c5c1bcdf0305114081078" +checksum = "6b9590b93e6fcc1739458317cccd391ad3955e2bde8913edf6f95f9e65a8f034" dependencies = [ "bytes", "futures-core", @@ -4418,7 +4412,7 @@ dependencies = [ "torrust-tracker-configuration", "torrust-tracker-primitives", "torrust-tracker-test-helpers", - "tower 0.5.2", + "tower", "tower-http", "tracing", "uuid", @@ -4457,7 +4451,7 @@ dependencies = [ "torrust-tracker-primitives", "torrust-tracker-test-helpers", "torrust-udp-tracker-server", - "tower 0.5.2", + "tower", "tower-http", "tracing", "url", @@ -4480,7 +4474,7 @@ dependencies = [ "torrust-server-lib", "torrust-tracker-configuration", "torrust-tracker-located-error", - "tower 0.5.2", + "tower", "tracing", ] @@ -4701,21 +4695,6 @@ dependencies = [ "zerocopy 0.7.35", ] -[[package]] -name = "tower" -version = "0.4.13" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "b8fa9be0de6cf49e536ce1851f987bd21a43b771b09473c3549a6c853db37c1c" -dependencies = [ - "futures-core", - "futures-util", - "pin-project", - "pin-project-lite", - "tower-layer", - "tower-service", - "tracing", -] - [[package]] name = "tower" version = "0.5.2" @@ -4918,11 +4897,11 @@ checksum = "06abde3611657adf66d383f00b093d7faecc7fa57071cce2578660c9f1010821" [[package]] name = "uuid" -version = "1.15.1" +version = "1.16.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "e0f540e3240398cce6128b64ba83fdbdd86129c16a3aa1a3a252efd66eb3d587" +checksum = "458f7a779bf54acc9f347480ac654f68407d3aab21269a6e3c9f922acd9e2da9" dependencies = [ - "getrandom 0.3.1", + "getrandom 0.3.2", "rand 0.9.0", ] @@ -4977,9 +4956,9 @@ checksum = "9c8d87e72b64a3b4db28d11ce29237c246188f4f51057d65a7eab63b7987e423" [[package]] name = "wasi" -version = "0.13.3+wasi-0.2.2" +version = "0.14.2+wasi-0.2.4" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "26816d2e1a4a36a2940b96c5296ce403917633dff8f3440e9b236ed6f6bacad2" +checksum = "9683f9a5a998d873c0d21fcbe3c083009670149a8fab228644b8bd36b2c48cb3" dependencies = [ "wit-bindgen-rt", ] @@ -5107,38 +5086,37 @@ dependencies = [ [[package]] name = "windows-link" -version = "0.1.0" +version = "0.1.1" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "6dccfd733ce2b1753b03b6d3c65edf020262ea35e20ccdf3e288043e6dd620e3" +checksum = "76840935b766e1b0a05c0066835fb9ec80071d4c09a16f6bd5f7e655e3c14c38" [[package]] name = "windows-registry" -version = "0.2.0" +version = "0.4.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "e400001bb720a623c1c69032f8e3e4cf09984deec740f007dd2b03ec864804b0" +checksum = "4286ad90ddb45071efd1a66dfa43eb02dd0dfbae1545ad6cc3c51cf34d7e8ba3" dependencies = [ "windows-result", "windows-strings", - "windows-targets 0.52.6", + "windows-targets 0.53.0", ] [[package]] name = "windows-result" -version = "0.2.0" +version = "0.3.2" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "1d1043d8214f791817bab27572aaa8af63732e11bf84aa21a45a78d6c317ae0e" +checksum = "c64fd11a4fd95df68efcfee5f44a294fe71b8bc6a91993e2791938abcc712252" dependencies = [ - "windows-targets 0.52.6", + "windows-link", ] [[package]] name = "windows-strings" -version = "0.1.0" +version = "0.3.1" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "4cd9b125c486025df0eabcb585e62173c6c9eddcec5d117d3b6e8c30e2ee4d10" +checksum = "87fa48cc5d406560701792be122a10132491cff9d0aeb23583cc2dcafc847319" dependencies = [ - "windows-result", - "windows-targets 0.52.6", + "windows-link", ] [[package]] @@ -5192,13 +5170,29 @@ dependencies = [ "windows_aarch64_gnullvm 0.52.6", "windows_aarch64_msvc 0.52.6", "windows_i686_gnu 0.52.6", - "windows_i686_gnullvm", + "windows_i686_gnullvm 0.52.6", "windows_i686_msvc 0.52.6", "windows_x86_64_gnu 0.52.6", "windows_x86_64_gnullvm 0.52.6", "windows_x86_64_msvc 0.52.6", ] +[[package]] +name = "windows-targets" +version = "0.53.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "b1e4c7e8ceaaf9cb7d7507c974735728ab453b67ef8f18febdd7c11fe59dca8b" +dependencies = [ + "windows_aarch64_gnullvm 0.53.0", + "windows_aarch64_msvc 0.53.0", + "windows_i686_gnu 0.53.0", + "windows_i686_gnullvm 0.53.0", + "windows_i686_msvc 0.53.0", + "windows_x86_64_gnu 0.53.0", + "windows_x86_64_gnullvm 0.53.0", + "windows_x86_64_msvc 0.53.0", +] + [[package]] name = "windows_aarch64_gnullvm" version = "0.48.5" @@ -5211,6 +5205,12 @@ version = "0.52.6" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "32a4622180e7a0ec044bb555404c800bc9fd9ec262ec147edd5989ccd0c02cd3" +[[package]] +name = "windows_aarch64_gnullvm" +version = "0.53.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "86b8d5f90ddd19cb4a147a5fa63ca848db3df085e25fee3cc10b39b6eebae764" + [[package]] name = "windows_aarch64_msvc" version = "0.48.5" @@ -5223,6 +5223,12 @@ version = "0.52.6" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "09ec2a7bb152e2252b53fa7803150007879548bc709c039df7627cabbd05d469" +[[package]] +name = "windows_aarch64_msvc" +version = "0.53.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "c7651a1f62a11b8cbd5e0d42526e55f2c99886c77e007179efff86c2b137e66c" + [[package]] name = "windows_i686_gnu" version = "0.48.5" @@ -5235,12 +5241,24 @@ version = "0.52.6" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "8e9b5ad5ab802e97eb8e295ac6720e509ee4c243f69d781394014ebfe8bbfa0b" +[[package]] +name = "windows_i686_gnu" +version = "0.53.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "c1dc67659d35f387f5f6c479dc4e28f1d4bb90ddd1a5d3da2e5d97b42d6272c3" + [[package]] name = "windows_i686_gnullvm" version = "0.52.6" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "0eee52d38c090b3caa76c563b86c3a4bd71ef1a819287c19d586d7334ae8ed66" +[[package]] +name = "windows_i686_gnullvm" +version = "0.53.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "9ce6ccbdedbf6d6354471319e781c0dfef054c81fbc7cf83f338a4296c0cae11" + [[package]] name = "windows_i686_msvc" version = "0.48.5" @@ -5253,6 +5271,12 @@ version = "0.52.6" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "240948bc05c5e7c6dabba28bf89d89ffce3e303022809e73deaefe4f6ec56c66" +[[package]] +name = "windows_i686_msvc" +version = "0.53.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "581fee95406bb13382d2f65cd4a908ca7b1e4c2f1917f143ba16efe98a589b5d" + [[package]] name = "windows_x86_64_gnu" version = "0.48.5" @@ -5265,6 +5289,12 @@ version = "0.52.6" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "147a5c80aabfbf0c7d901cb5895d1de30ef2907eb21fbbab29ca94c5b08b1a78" +[[package]] +name = "windows_x86_64_gnu" +version = "0.53.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "2e55b5ac9ea33f2fc1716d1742db15574fd6fc8dadc51caab1c16a3d3b4190ba" + [[package]] name = "windows_x86_64_gnullvm" version = "0.48.5" @@ -5277,6 +5307,12 @@ version = "0.52.6" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "24d5b23dc417412679681396f2b49f3de8c1473deb516bd34410872eff51ed0d" +[[package]] +name = "windows_x86_64_gnullvm" +version = "0.53.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "0a6e035dd0599267ce1ee132e51c27dd29437f63325753051e71dd9e42406c57" + [[package]] name = "windows_x86_64_msvc" version = "0.48.5" @@ -5289,20 +5325,26 @@ version = "0.52.6" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "589f6da84c646204747d1270a2a5661ea66ed1cced2631d546fdfb155959f9ec" +[[package]] +name = "windows_x86_64_msvc" +version = "0.53.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "271414315aff87387382ec3d271b52d7ae78726f5d44ac98b4f4030c91880486" + [[package]] name = "winnow" -version = "0.7.3" +version = "0.7.4" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "0e7f4ea97f6f78012141bcdb6a216b2609f0979ada50b20ca5b52dde2eac2bb1" +checksum = "0e97b544156e9bebe1a0ffbc03484fc1ffe3100cbce3ffb17eac35f7cdd7ab36" dependencies = [ "memchr", ] [[package]] name = "wit-bindgen-rt" -version = "0.33.0" +version = "0.39.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "3268f3d866458b787f390cf61f4bbb563b922d091359f9608842999eaee3943c" +checksum = "6f42320e61fe2cfd34354ecb597f86f413484a798ba44a8ca1165c58d42da6c1" dependencies = [ "bitflags 2.9.0", ] @@ -5335,7 +5377,7 @@ source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "0d65cbf2f12c15564212d48f4e3dfb87923d25d611f2aed18f4cb23f0413d89e" dependencies = [ "libc", - "rustix 1.0.2", + "rustix 1.0.3", ] [[package]] @@ -5380,11 +5422,11 @@ dependencies = [ [[package]] name = "zerocopy" -version = "0.8.23" +version = "0.8.24" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "fd97444d05a4328b90e75e503a34bad781f14e28a823ad3557f0750df1ebcbc6" +checksum = "2586fea28e186957ef732a5f8b3be2da217d65c5969d4b1e17f973ebbe876879" dependencies = [ - "zerocopy-derive 0.8.23", + "zerocopy-derive 0.8.24", ] [[package]] @@ -5400,9 +5442,9 @@ dependencies = [ [[package]] name = "zerocopy-derive" -version = "0.8.23" +version = "0.8.24" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "6352c01d0edd5db859a63e2605f4ea3183ddbd15e2c4a9e7d32184df75e4f154" +checksum = "a996a8f63c5c4448cd959ac1bab0aaa3306ccfd060472f85943ee0750f0169be" dependencies = [ "proc-macro2", "quote", @@ -5469,18 +5511,18 @@ dependencies = [ [[package]] name = "zstd-safe" -version = "7.2.3" +version = "7.2.4" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "f3051792fbdc2e1e143244dc28c60f73d8470e93f3f9cbd0ead44da5ed802722" +checksum = "8f49c4d5f0abb602a93fb8736af2a4f4dd9512e36f7f570d66e65ff867ed3b9d" dependencies = [ "zstd-sys", ] [[package]] name = "zstd-sys" -version = "2.0.14+zstd.1.5.7" +version = "2.0.15+zstd.1.5.7" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "8fb060d4926e4ac3a3ad15d864e99ceb5f343c6b34f5bd6d81ae6ed417311be5" +checksum = "eb81183ddd97d0c74cedf1d50d85c8d08c1b8b68ee863bdee9e706eedba1a237" dependencies = [ "cc", "pkg-config", From a6608cbb8bc0aa1d3207d8a69bdcfa785cc463fe Mon Sep 17 00:00:00 2001 From: Jose Celano Date: Mon, 24 Mar 2025 08:26:15 +0000 Subject: [PATCH 416/802] fix: clippy errors --- packages/rest-tracker-api-client/src/v1/client.rs | 11 ++++++----- packages/tracker-client/src/http/client/mod.rs | 13 +++++++------ src/bootstrap/jobs/health_check_api.rs | 2 +- src/bootstrap/jobs/tracker_apis.rs | 2 +- 4 files changed, 15 insertions(+), 13 deletions(-) diff --git a/packages/rest-tracker-api-client/src/v1/client.rs b/packages/rest-tracker-api-client/src/v1/client.rs index da1b709da..3137b8b41 100644 --- a/packages/rest-tracker-api-client/src/v1/client.rs +++ b/packages/rest-tracker-api-client/src/v1/client.rs @@ -16,10 +16,11 @@ const API_PATH: &str = "api/v1/"; const DEFAULT_REQUEST_TIMEOUT_IN_SECS: u64 = 5; /// API Client +#[allow(clippy::struct_field_names)] pub struct Client { connection_info: ConnectionInfo, base_path: String, - client: reqwest::Client, + http_client: reqwest::Client, } impl Client { @@ -34,7 +35,7 @@ impl Client { Ok(Self { connection_info, base_path: API_PATH.to_string(), - client, + http_client: client, }) } @@ -92,7 +93,7 @@ impl Client { /// /// Will panic if the request can't be sent pub async fn post_empty(&self, path: &str, headers: Option) -> Response { - let builder = self.client.post(self.base_url(path).clone()); + let builder = self.http_client.post(self.base_url(path).clone()); let builder = match headers { Some(headers) => builder.headers(headers), @@ -111,7 +112,7 @@ impl Client { /// /// Will panic if the request can't be sent pub async fn post_form(&self, path: &str, form: &T, headers: Option) -> Response { - let builder = self.client.post(self.base_url(path).clone()).json(&form); + let builder = self.http_client.post(self.base_url(path).clone()).json(&form); let builder = match headers { Some(headers) => builder.headers(headers), @@ -130,7 +131,7 @@ impl Client { /// /// Will panic if the request can't be sent async fn delete(&self, path: &str, headers: Option) -> Response { - let builder = self.client.delete(self.base_url(path).clone()); + let builder = self.http_client.delete(self.base_url(path).clone()); let builder = match headers { Some(headers) => builder.headers(headers), diff --git a/packages/tracker-client/src/http/client/mod.rs b/packages/tracker-client/src/http/client/mod.rs index 3c904a7c9..50e979c79 100644 --- a/packages/tracker-client/src/http/client/mod.rs +++ b/packages/tracker-client/src/http/client/mod.rs @@ -23,8 +23,9 @@ pub enum Error { } /// HTTP Tracker Client +#[allow(clippy::struct_field_names)] pub struct Client { - client: reqwest::Client, + http_client: reqwest::Client, base_url: Url, key: Option, } @@ -49,7 +50,7 @@ impl Client { Ok(Self { base_url, - client, + http_client: client, key: None, }) } @@ -68,7 +69,7 @@ impl Client { Ok(Self { base_url, - client, + http_client: client, key: None, }) } @@ -84,7 +85,7 @@ impl Client { Ok(Self { base_url, - client, + http_client: client, key: Some(key), }) } @@ -159,7 +160,7 @@ impl Client { /// /// This method fails if there was an error while sending request. pub async fn get(&self, path: &str) -> Result { - self.client + self.http_client .get(self.build_url(path)) .send() .await @@ -170,7 +171,7 @@ impl Client { /// /// This method fails if there was an error while sending request. pub async fn get_with_header(&self, path: &str, key: &str, value: &str) -> Result { - self.client + self.http_client .get(self.build_url(path)) .header(key, value) .send() diff --git a/src/bootstrap/jobs/health_check_api.rs b/src/bootstrap/jobs/health_check_api.rs index 5d342a7f0..7c529fadd 100644 --- a/src/bootstrap/jobs/health_check_api.rs +++ b/src/bootstrap/jobs/health_check_api.rs @@ -3,7 +3,7 @@ //! The [`health_check_api::start_job`](crate::bootstrap::jobs::health_check_api::start_job) //! function starts the Health Check REST API. //! -//! The [`health_check_api::start_job`](crate::bootstrap::jobs::health_check_api::start_job) +//! The [`health_check_api::start_job`](crate::bootstrap::jobs::health_check_api::start_job) //! function spawns a new asynchronous task, that tasks is the "**launcher**". //! The "**launcher**" starts the actual server and sends a message back //! to the main application. diff --git a/src/bootstrap/jobs/tracker_apis.rs b/src/bootstrap/jobs/tracker_apis.rs index d152e853f..9f3964c20 100644 --- a/src/bootstrap/jobs/tracker_apis.rs +++ b/src/bootstrap/jobs/tracker_apis.rs @@ -7,7 +7,7 @@ //! > versions. API consumers can choose which version to use. The API version is //! > part of the URL, for example: `http://localhost:1212/api/v1/stats`. //! -//! The [`tracker_apis::start_job`](crate::bootstrap::jobs::tracker_apis::start_job) +//! The [`tracker_apis::start_job`](crate::bootstrap::jobs::tracker_apis::start_job) //! function spawns a new asynchronous task, that tasks is the "**launcher**". //! The "**launcher**" starts the actual server and sends a message back //! to the main application. The main application waits until receives From 85109900a9e8c6d943246e0cfd8fc32ddcf22d47 Mon Sep 17 00:00:00 2001 From: Jose Celano Date: Mon, 24 Mar 2025 12:15:47 +0000 Subject: [PATCH 417/802] test: [#1407] add test for global metrics with 2 http trackers A new integration test that checks that the global metrics are udapted when you run 2 HTTP trackers. Ony one metric is checked in this test. It uses fixed port that migth conflict with other running instances in the future. We should use a random free port if we run more integration tests like this in the future. --- Cargo.lock | 2 + Cargo.toml | 2 + src/app.rs | 2 + tests/servers/api/contract/mod.rs | 1 + tests/servers/api/contract/stats/mod.rs | 95 +++++++++++++++++++++++++ tests/servers/api/mod.rs | 1 + tests/servers/mod.rs | 1 + 7 files changed, 104 insertions(+) create mode 100644 tests/servers/api/contract/mod.rs create mode 100644 tests/servers/api/contract/stats/mod.rs create mode 100644 tests/servers/api/mod.rs diff --git a/Cargo.lock b/Cargo.lock index 076449944..055c02a9e 100644 --- a/Cargo.lock +++ b/Cargo.lock @@ -4521,6 +4521,8 @@ dependencies = [ "anyhow", "axum-server", "bittorrent-http-tracker-core", + "bittorrent-primitives", + "bittorrent-tracker-client", "bittorrent-tracker-core", "bittorrent-udp-tracker-core", "chrono", diff --git a/Cargo.toml b/Cargo.toml index bcac4bf66..91393ad72 100644 --- a/Cargo.toml +++ b/Cargo.toml @@ -60,6 +60,8 @@ tracing = "0" tracing-subscriber = { version = "0", features = ["json"] } [dev-dependencies] +bittorrent-primitives = "0.1.0" +bittorrent-tracker-client = { version = "3.0.0-develop", path = "packages/tracker-client" } local-ip-address = "0" mockall = "0" torrust-rest-tracker-api-client = { version = "3.0.0-develop", path = "packages/rest-tracker-api-client" } diff --git a/src/app.rs b/src/app.rs index fb8a459ea..fcd650c24 100644 --- a/src/app.rs +++ b/src/app.rs @@ -138,6 +138,8 @@ pub async fn start(config: &Configuration, app_container: &Arc) -> )); } + println!("Registar entries: {:?}", registar.entries()); + // Start Health Check API jobs.push(health_check_api::start_job(&config.health_check_api, registar.entries()).await); diff --git a/tests/servers/api/contract/mod.rs b/tests/servers/api/contract/mod.rs new file mode 100644 index 000000000..9d34677fc --- /dev/null +++ b/tests/servers/api/contract/mod.rs @@ -0,0 +1 @@ +pub mod stats; diff --git a/tests/servers/api/contract/stats/mod.rs b/tests/servers/api/contract/stats/mod.rs new file mode 100644 index 000000000..fa7b4e6aa --- /dev/null +++ b/tests/servers/api/contract/stats/mod.rs @@ -0,0 +1,95 @@ +use std::env; +use std::str::FromStr as _; +use std::sync::Arc; + +use bittorrent_primitives::info_hash::InfoHash; +use bittorrent_tracker_client::http::client::requests::announce::QueryBuilder; +use bittorrent_tracker_client::http::client::Client as HttpTrackerClient; +use reqwest::Url; +use serde::Deserialize; +use tokio::time::Duration; +use torrust_rest_tracker_api_client::connection_info::{ConnectionInfo, Origin}; +use torrust_rest_tracker_api_client::v1::client::Client as TrackerApiClient; +use torrust_tracker_lib::{app, bootstrap}; + +#[tokio::test] +async fn the_stats_api_endpoint_should_return_the_global_stats() { + // Logging must be OFF otherwise your will get the following error: + // `Unable to install global subscriber: SetGlobalDefaultError("a global default trace dispatcher has already been set")` + // That's because we can't initialize the logger twice. + // You can enable it if you run only this test. + let config_with_two_http_trackers = r#" + [metadata] + app = "torrust-tracker" + purpose = "configuration" + schema_version = "2.0.0" + + [logging] + threshold = "off" + + [core] + listed = false + private = false + + [[http_trackers]] + bind_address = "0.0.0.0:7272" + tracker_usage_statistics = true + + [[http_trackers]] + bind_address = "0.0.0.0:7373" + tracker_usage_statistics = true + + [http_api] + bind_address = "0.0.0.0:1414" + + [http_api.access_tokens] + admin = "MyAccessToken" + "#; + + env::set_var("TORRUST_TRACKER_CONFIG_TOML", config_with_two_http_trackers); + + let (config, app_container) = bootstrap::app::setup(); + + let app_container = Arc::new(app_container); + + let _jobs = app::start(&config, &app_container).await; + + announce_to_tracker("http://127.0.0.1:7272").await; + announce_to_tracker("http://127.0.0.1:7373").await; + + let partial_metrics = get_partial_metrics("http://127.0.0.1:1414", "MyAccessToken").await; + + assert_eq!(partial_metrics.tcp4_announces_handled, 2); +} + +/// Make a sample announce request to the tracker. +async fn announce_to_tracker(tracker_url: &str) { + let response = HttpTrackerClient::new(Url::parse(tracker_url).unwrap(), Duration::from_secs(1)) + .unwrap() + .announce( + &QueryBuilder::with_default_values() + .with_info_hash(&InfoHash::from_str("9c38422213e30bff212b30c360d26f9a02136422").unwrap()) // DevSkim: ignore DS173237 + .query(), + ) + .await; + + assert!(response.is_ok()); +} + +/// Metrics only relevant to the test. +#[derive(Deserialize)] +struct PartialMetrics { + tcp4_announces_handled: u64, +} + +async fn get_partial_metrics(aip_url: &str, token: &str) -> PartialMetrics { + let response = TrackerApiClient::new(ConnectionInfo::authenticated(Origin::new(aip_url).unwrap(), token)) + .unwrap() + .get_tracker_statistics(None) + .await; + + response + .json::() + .await + .expect("Failed to parse JSON response") +} diff --git a/tests/servers/api/mod.rs b/tests/servers/api/mod.rs new file mode 100644 index 000000000..2943dbb50 --- /dev/null +++ b/tests/servers/api/mod.rs @@ -0,0 +1 @@ +pub mod contract; diff --git a/tests/servers/mod.rs b/tests/servers/mod.rs index 7aeefeec4..0bbd5c433 100644 --- a/tests/servers/mod.rs +++ b/tests/servers/mod.rs @@ -1 +1,2 @@ +pub mod api; pub mod health_check_api; From eeea77a9c61d59ef90124639519b8c945b36d1e4 Mon Sep 17 00:00:00 2001 From: Jose Celano Date: Mon, 24 Mar 2025 12:24:26 +0000 Subject: [PATCH 418/802] chore: remove sample integration test now that we have a real one. --- tests/integration.rs | 7 ++++--- tests/servers/health_check_api.rs | 32 ------------------------------- tests/servers/mod.rs | 1 - 3 files changed, 4 insertions(+), 36 deletions(-) delete mode 100644 tests/servers/health_check_api.rs diff --git a/tests/integration.rs b/tests/integration.rs index 6a139e047..92289c415 100644 --- a/tests/integration.rs +++ b/tests/integration.rs @@ -1,13 +1,14 @@ //! Scaffolding for integration tests. //! +//! Integration tests are used to test the interaction between multiple modules, +//! multiple running trackers, etc. Tests for one specific module should be in +//! the corresponding package. +//! //! ```text //! cargo test --test integration //! ``` mod servers; -// todo: there is only one test example that was copied from other package. -// We have to add tests for the whole app. - use torrust_tracker_clock::clock; /// This code needs to be copied into each crate. diff --git a/tests/servers/health_check_api.rs b/tests/servers/health_check_api.rs deleted file mode 100644 index 0e66014da..000000000 --- a/tests/servers/health_check_api.rs +++ /dev/null @@ -1,32 +0,0 @@ -use reqwest::Response; -use torrust_axum_health_check_api_server::environment::Started; -use torrust_axum_health_check_api_server::resources::{Report, Status}; -use torrust_server_lib::registar::Registar; -use torrust_tracker_test_helpers::{configuration, logging}; - -pub async fn get(path: &str) -> Response { - reqwest::Client::builder().build().unwrap().get(path).send().await.unwrap() -} - -#[tokio::test] -async fn the_health_check_endpoint_should_return_status_ok_when_there_is_not_any_service_registered() { - logging::setup(); - - let configuration = configuration::ephemeral_with_no_services(); - - let env = Started::new(&configuration.health_check_api.into(), Registar::default()).await; - - let response = get(&format!("http://{}/health_check", env.state.binding)).await; // DevSkim: ignore DS137138 - - assert_eq!(response.status(), 200); - assert_eq!(response.headers().get("content-type").unwrap(), "application/json"); - - let report = response - .json::() - .await - .expect("it should be able to get the report as json"); - - assert_eq!(report.status, Status::None); - - env.stop().await.expect("it should stop the service"); -} diff --git a/tests/servers/mod.rs b/tests/servers/mod.rs index 0bbd5c433..e5fdf85ee 100644 --- a/tests/servers/mod.rs +++ b/tests/servers/mod.rs @@ -1,2 +1 @@ pub mod api; -pub mod health_check_api; From 398ad9bad7fa7af67ddfe8de5dbc356871ed51b4 Mon Sep 17 00:00:00 2001 From: Jose Celano Date: Mon, 24 Mar 2025 12:33:50 +0000 Subject: [PATCH 419/802] refactor: remove duplicate code --- src/app.rs | 15 +++++++++++++-- src/console/profiling.rs | 2 +- src/main.rs | 10 ++-------- tests/servers/api/contract/stats/mod.rs | 9 ++------- 4 files changed, 18 insertions(+), 18 deletions(-) diff --git a/src/app.rs b/src/app.rs index fcd650c24..007eb16d0 100644 --- a/src/app.rs +++ b/src/app.rs @@ -28,9 +28,20 @@ use torrust_server_lib::registar::Registar; use torrust_tracker_configuration::Configuration; use tracing::instrument; +use crate::bootstrap; use crate::bootstrap::jobs::{health_check_api, http_tracker, torrent_cleanup, tracker_apis, udp_tracker}; use crate::container::AppContainer; +pub async fn run() -> (Arc, Vec>, Registar) { + let (config, app_container) = bootstrap::app::setup(); + + let app_container = Arc::new(app_container); + + let (jobs, registar) = start(&config, &app_container).await; + + (app_container, jobs, registar) +} + /// # Panics /// /// Will panic if: @@ -38,7 +49,7 @@ use crate::container::AppContainer; /// - Can't retrieve tracker keys from database. /// - Can't load whitelist from database. #[instrument(skip(config, app_container))] -pub async fn start(config: &Configuration, app_container: &Arc) -> Vec> { +pub async fn start(config: &Configuration, app_container: &Arc) -> (Vec>, Registar) { if config.http_api.is_none() && (config.udp_trackers.is_none() || config.udp_trackers.as_ref().map_or(true, std::vec::Vec::is_empty)) && (config.http_trackers.is_none() || config.http_trackers.as_ref().map_or(true, std::vec::Vec::is_empty)) @@ -143,5 +154,5 @@ pub async fn start(config: &Configuration, app_container: &Arc) -> // Start Health Check API jobs.push(health_check_api::start_job(&config.health_check_api, registar.entries()).await); - jobs + (jobs, registar) } diff --git a/src/console/profiling.rs b/src/console/profiling.rs index f3829c073..ffbd835fb 100644 --- a/src/console/profiling.rs +++ b/src/console/profiling.rs @@ -184,7 +184,7 @@ pub async fn run() { let app_container = Arc::new(app_container); - let jobs = app::start(&config, &app_container).await; + let (jobs, _registar) = app::start(&config, &app_container).await; // Run the tracker for a fixed duration let run_duration = sleep(Duration::from_secs(duration_secs)); diff --git a/src/main.rs b/src/main.rs index 77f6e32a3..cc7c202c4 100644 --- a/src/main.rs +++ b/src/main.rs @@ -1,14 +1,8 @@ -use std::sync::Arc; - -use torrust_tracker_lib::{app, bootstrap}; +use torrust_tracker_lib::app; #[tokio::main] async fn main() { - let (config, app_container) = bootstrap::app::setup(); - - let app_container = Arc::new(app_container); - - let jobs = app::start(&config, &app_container).await; + let (_app_container, jobs, _registar) = app::run().await; // handle the signals tokio::select! { diff --git a/tests/servers/api/contract/stats/mod.rs b/tests/servers/api/contract/stats/mod.rs index fa7b4e6aa..c31ab1907 100644 --- a/tests/servers/api/contract/stats/mod.rs +++ b/tests/servers/api/contract/stats/mod.rs @@ -1,6 +1,5 @@ use std::env; use std::str::FromStr as _; -use std::sync::Arc; use bittorrent_primitives::info_hash::InfoHash; use bittorrent_tracker_client::http::client::requests::announce::QueryBuilder; @@ -10,7 +9,7 @@ use serde::Deserialize; use tokio::time::Duration; use torrust_rest_tracker_api_client::connection_info::{ConnectionInfo, Origin}; use torrust_rest_tracker_api_client::v1::client::Client as TrackerApiClient; -use torrust_tracker_lib::{app, bootstrap}; +use torrust_tracker_lib::app; #[tokio::test] async fn the_stats_api_endpoint_should_return_the_global_stats() { @@ -48,11 +47,7 @@ async fn the_stats_api_endpoint_should_return_the_global_stats() { env::set_var("TORRUST_TRACKER_CONFIG_TOML", config_with_two_http_trackers); - let (config, app_container) = bootstrap::app::setup(); - - let app_container = Arc::new(app_container); - - let _jobs = app::start(&config, &app_container).await; + let (_app_container, _jobs, _registar) = app::run().await; announce_to_tracker("http://127.0.0.1:7272").await; announce_to_tracker("http://127.0.0.1:7373").await; From 4e59dd7879b96a8f07c49725b2ab930d241d834b Mon Sep 17 00:00:00 2001 From: Jose Celano Date: Mon, 24 Mar 2025 12:36:35 +0000 Subject: [PATCH 420/802] refactor: [#1407] rename --- tests/servers/api/contract/stats/mod.rs | 12 ++++++------ 1 file changed, 6 insertions(+), 6 deletions(-) diff --git a/tests/servers/api/contract/stats/mod.rs b/tests/servers/api/contract/stats/mod.rs index c31ab1907..a645fd7e1 100644 --- a/tests/servers/api/contract/stats/mod.rs +++ b/tests/servers/api/contract/stats/mod.rs @@ -52,9 +52,9 @@ async fn the_stats_api_endpoint_should_return_the_global_stats() { announce_to_tracker("http://127.0.0.1:7272").await; announce_to_tracker("http://127.0.0.1:7373").await; - let partial_metrics = get_partial_metrics("http://127.0.0.1:1414", "MyAccessToken").await; + let global_stats = get_tracker_statistics("http://127.0.0.1:1414", "MyAccessToken").await; - assert_eq!(partial_metrics.tcp4_announces_handled, 2); + assert_eq!(global_stats.tcp4_announces_handled, 2); } /// Make a sample announce request to the tracker. @@ -71,20 +71,20 @@ async fn announce_to_tracker(tracker_url: &str) { assert!(response.is_ok()); } -/// Metrics only relevant to the test. +/// Global statistics with only metrics relevant to the test. #[derive(Deserialize)] -struct PartialMetrics { +struct PartialGlobalStatistics { tcp4_announces_handled: u64, } -async fn get_partial_metrics(aip_url: &str, token: &str) -> PartialMetrics { +async fn get_tracker_statistics(aip_url: &str, token: &str) -> PartialGlobalStatistics { let response = TrackerApiClient::new(ConnectionInfo::authenticated(Origin::new(aip_url).unwrap(), token)) .unwrap() .get_tracker_statistics(None) .await; response - .json::() + .json::() .await .expect("Failed to parse JSON response") } From aff065cbbde622decf8555bb870e07efb9b3dde6 Mon Sep 17 00:00:00 2001 From: Jose Celano Date: Mon, 24 Mar 2025 13:10:21 +0000 Subject: [PATCH 421/802] fix: [#1407] docker build after adding new integration test --- .gitignore | 1 + tests/servers/api/contract/stats/mod.rs | 4 ++++ 2 files changed, 5 insertions(+) diff --git a/.gitignore b/.gitignore index 8bfa717b7..fd83ee918 100644 --- a/.gitignore +++ b/.gitignore @@ -14,6 +14,7 @@ /tracker.toml callgrind.out codecov.json +integration_tests_sqlite3.db lcov.info perf.data* rustc-ice-*.txt diff --git a/tests/servers/api/contract/stats/mod.rs b/tests/servers/api/contract/stats/mod.rs index a645fd7e1..016a372dd 100644 --- a/tests/servers/api/contract/stats/mod.rs +++ b/tests/servers/api/contract/stats/mod.rs @@ -30,6 +30,10 @@ async fn the_stats_api_endpoint_should_return_the_global_stats() { listed = false private = false + [core.database] + driver = "sqlite3" + path = "./integration_tests_sqlite3.db" + [[http_trackers]] bind_address = "0.0.0.0:7272" tracker_usage_statistics = true From b53da0736078719364fa98e07071ce8adc90b63c Mon Sep 17 00:00:00 2001 From: Jose Celano Date: Mon, 24 Mar 2025 13:11:28 +0000 Subject: [PATCH 422/802] refactor: [#1407] remove duplicate code --- src/console/profiling.rs | 9 ++------- 1 file changed, 2 insertions(+), 7 deletions(-) diff --git a/src/console/profiling.rs b/src/console/profiling.rs index ffbd835fb..426712c34 100644 --- a/src/console/profiling.rs +++ b/src/console/profiling.rs @@ -157,12 +157,11 @@ //! kcachegrind callgrind.out //! ``` use std::env; -use std::sync::Arc; use std::time::Duration; use tokio::time::sleep; -use crate::{app, bootstrap}; +use crate::app; pub async fn run() { // Parse command line arguments @@ -180,11 +179,7 @@ pub async fn run() { return; }; - let (config, app_container) = bootstrap::app::setup(); - - let app_container = Arc::new(app_container); - - let (jobs, _registar) = app::start(&config, &app_container).await; + let (_app_container, jobs, _registar) = app::run().await; // Run the tracker for a fixed duration let run_duration = sleep(Duration::from_secs(duration_secs)); From af80adaa053d4b19ee8e65bffd0c256ed72e5469 Mon Sep 17 00:00:00 2001 From: Jose Celano Date: Tue, 25 Mar 2025 10:16:24 +0000 Subject: [PATCH 423/802] refactor: [#1411] store instance containers in app container --- Cargo.lock | 1 + Cargo.toml | 1 + .../rest-tracker-api-core/src/container.rs | 2 +- src/app.rs | 16 +-- src/container.rs | 131 ++++++++++++++---- 5 files changed, 113 insertions(+), 38 deletions(-) diff --git a/Cargo.lock b/Cargo.lock index 055c02a9e..c4755225f 100644 --- a/Cargo.lock +++ b/Cargo.lock @@ -4535,6 +4535,7 @@ dependencies = [ "reqwest", "serde", "serde_json", + "thiserror 2.0.12", "tokio", "torrust-axum-health-check-api-server", "torrust-axum-http-tracker-server", diff --git a/Cargo.toml b/Cargo.toml index 91393ad72..9243ed483 100644 --- a/Cargo.toml +++ b/Cargo.toml @@ -46,6 +46,7 @@ regex = "1" reqwest = { version = "0", features = ["json"] } serde = { version = "1", features = ["derive"] } serde_json = { version = "1", features = ["preserve_order"] } +thiserror = "2.0.12" tokio = { version = "1", features = ["macros", "net", "rt-multi-thread", "signal", "sync"] } torrust-axum-health-check-api-server = { version = "3.0.0-develop", path = "packages/axum-health-check-api-server" } torrust-axum-http-tracker-server = { version = "3.0.0-develop", path = "packages/axum-http-tracker-server" } diff --git a/packages/rest-tracker-api-core/src/container.rs b/packages/rest-tracker-api-core/src/container.rs index c6a46a195..be5e9d7f6 100644 --- a/packages/rest-tracker-api-core/src/container.rs +++ b/packages/rest-tracker-api-core/src/container.rs @@ -10,12 +10,12 @@ use torrust_tracker_configuration::{Core, HttpApi, HttpTracker, UdpTracker}; use torrust_udp_tracker_server::container::UdpTrackerServerContainer; pub struct TrackerHttpApiCoreContainer { + pub http_api_config: Arc, pub tracker_core_container: Arc, pub http_stats_repository: Arc, pub ban_service: Arc>, pub udp_core_stats_repository: Arc, pub udp_server_stats_repository: Arc, - pub http_api_config: Arc, } impl TrackerHttpApiCoreContainer { diff --git a/src/app.rs b/src/app.rs index 007eb16d0..5eb162e18 100644 --- a/src/app.rs +++ b/src/app.rs @@ -90,9 +90,10 @@ pub async fn start(config: &Configuration, app_container: &Arc) -> udp_tracker_config.bind_address ); } else { - let udp_tracker_config = Arc::new(udp_tracker_config.clone()); - let udp_tracker_container = Arc::new(app_container.udp_tracker_container(&udp_tracker_config)); - let udp_tracker_server_container = Arc::new(app_container.udp_tracker_server_container()); + let udp_tracker_container = app_container + .udp_tracker_container(udp_tracker_config.bind_address) + .expect("Could not create UDP tracker container"); + let udp_tracker_server_container = app_container.udp_tracker_server_container(); jobs.push( udp_tracker::start_job(udp_tracker_container, udp_tracker_server_container, registar.give_form()).await, @@ -106,8 +107,9 @@ pub async fn start(config: &Configuration, app_container: &Arc) -> // Start the HTTP blocks if let Some(http_trackers) = &config.http_trackers { for http_tracker_config in http_trackers { - let http_tracker_config = Arc::new(http_tracker_config.clone()); - let http_tracker_container = Arc::new(app_container.http_tracker_container(&http_tracker_config)); + let http_tracker_container = app_container + .http_tracker_container(http_tracker_config.bind_address) + .expect("Could not create HTTP tracker container"); if let Some(job) = http_tracker::start_job( http_tracker_container, @@ -126,7 +128,7 @@ pub async fn start(config: &Configuration, app_container: &Arc) -> // Start HTTP API if let Some(http_api_config) = &config.http_api { let http_api_config = Arc::new(http_api_config.clone()); - let http_api_container = Arc::new(app_container.tracker_http_api_container(&http_api_config)); + let http_api_container = app_container.tracker_http_api_container(&http_api_config); if let Some(job) = tracker_apis::start_job( http_api_container, @@ -149,8 +151,6 @@ pub async fn start(config: &Configuration, app_container: &Arc) -> )); } - println!("Registar entries: {:?}", registar.entries()); - // Start Health Check API jobs.push(health_check_api::start_job(&config.health_check_api, registar.entries()).await); diff --git a/src/container.rs b/src/container.rs index d3253b5d9..e55a1d2f8 100644 --- a/src/container.rs +++ b/src/container.rs @@ -1,3 +1,5 @@ +use std::collections::HashMap; +use std::net::SocketAddr; use std::sync::Arc; use bittorrent_http_tracker_core::container::HttpTrackerCoreContainer; @@ -9,12 +11,22 @@ use bittorrent_udp_tracker_core::services::banning::BanService; use bittorrent_udp_tracker_core::{self, MAX_CONNECTION_ID_ERRORS_PER_IP}; use tokio::sync::RwLock; use torrust_rest_tracker_api_core::container::TrackerHttpApiCoreContainer; -use torrust_tracker_configuration::{Configuration, HttpApi, HttpTracker, UdpTracker}; +use torrust_tracker_configuration::{Configuration, HttpApi}; use torrust_udp_tracker_server::container::UdpTrackerServerContainer; use tracing::instrument; +#[derive(thiserror::Error, Debug, Clone)] +pub enum Error { + #[error("There is not a HTTP tracker server instance bound to the socket address: {bind_address}")] + MissingHttpTrackerCoreContainer { bind_address: SocketAddr }, + + #[error("There is not a UDP tracker server instance bound to the socket address: {bind_address}")] + MissingUdpTrackerCoreContainer { bind_address: SocketAddr }, +} + pub struct AppContainer { pub tracker_core_container: Arc, + pub http_api_config: Arc>, // UDP Tracker Core Services pub udp_core_stats_event_sender: Arc>>, @@ -33,6 +45,13 @@ pub struct AppContainer { // UDP Tracker Server Services pub udp_server_stats_event_sender: Arc>>, pub udp_server_stats_repository: Arc, + + // UDP Tracker Server Container + pub udp_tracker_server_container: Arc, + + // Tracker Instance Containers + pub http_tracker_containers: Arc>>, + pub udp_tracker_containers: Arc>>, } impl AppContainer { @@ -40,6 +59,8 @@ impl AppContainer { pub fn initialize(configuration: &Configuration) -> AppContainer { let core_config = Arc::new(configuration.core.clone()); + let http_api_config = Arc::new(configuration.http_api.clone()); + let tracker_core_container = Arc::new(TrackerCoreContainer::initialize(&core_config)); // HTTP Tracker Core Services @@ -86,8 +107,59 @@ impl AppContainer { let udp_server_stats_event_sender = Arc::new(udp_server_stats_event_sender); let udp_server_stats_repository = Arc::new(udp_server_stats_repository); + // UDP Tracker Server Container + let udp_tracker_server_container = Arc::new(UdpTrackerServerContainer { + udp_server_stats_event_sender: udp_server_stats_event_sender.clone(), + udp_server_stats_repository: udp_server_stats_repository.clone(), + }); + + // Tracker Instance Containers + + let mut http_tracker_containers = HashMap::new(); + + if let Some(http_trackers) = &configuration.http_trackers { + for http_tracker_config in http_trackers { + http_tracker_containers.insert( + http_tracker_config.bind_address, + Arc::new(HttpTrackerCoreContainer { + tracker_core_container: tracker_core_container.clone(), + http_tracker_config: Arc::new(http_tracker_config.clone()), + http_stats_event_sender: http_stats_event_sender.clone(), + http_stats_repository: http_stats_repository.clone(), + announce_service: http_announce_service.clone(), + scrape_service: http_scrape_service.clone(), + }), + ); + } + } + + let http_tracker_containers = Arc::new(http_tracker_containers); + + let mut udp_tracker_containers = HashMap::new(); + + if let Some(udp_trackers) = &configuration.udp_trackers { + for udp_tracker_config in udp_trackers { + udp_tracker_containers.insert( + udp_tracker_config.bind_address, + Arc::new(UdpTrackerCoreContainer { + tracker_core_container: tracker_core_container.clone(), + udp_tracker_config: Arc::new(udp_tracker_config.clone()), + udp_core_stats_event_sender: udp_core_stats_event_sender.clone(), + udp_core_stats_repository: udp_core_stats_repository.clone(), + ban_service: udp_ban_service.clone(), + connect_service: udp_connect_service.clone(), + announce_service: udp_announce_service.clone(), + scrape_service: udp_scrape_service.clone(), + }), + ); + } + } + + let udp_tracker_containers = Arc::new(udp_tracker_containers); + AppContainer { tracker_core_container, + http_api_config, // UDP Tracker Core Services udp_core_stats_event_sender, @@ -106,37 +178,45 @@ impl AppContainer { // UDP Tracker Server Services udp_server_stats_event_sender, udp_server_stats_repository, + + // UDP Tracker Server Container + udp_tracker_server_container, + + // Tracker Instance Containers + http_tracker_containers, + udp_tracker_containers, } } #[must_use] - pub fn http_tracker_container(&self, http_tracker_config: &Arc) -> HttpTrackerCoreContainer { - HttpTrackerCoreContainer { - tracker_core_container: self.tracker_core_container.clone(), - http_tracker_config: http_tracker_config.clone(), - http_stats_event_sender: self.http_stats_event_sender.clone(), - http_stats_repository: self.http_stats_repository.clone(), - announce_service: self.http_announce_service.clone(), - scrape_service: self.http_scrape_service.clone(), + pub fn udp_tracker_server_container(&self) -> Arc { + self.udp_tracker_server_container.clone() + } + + /// # Errors + /// + /// Return an error if there is no HTTP tracker server instance bound to the + /// socket address. + pub fn http_tracker_container(&self, bind_address: SocketAddr) -> Result, Error> { + match self.http_tracker_containers.get(&bind_address) { + Some(http_tracker_container) => Ok(http_tracker_container.clone()), + None => Err(Error::MissingHttpTrackerCoreContainer { bind_address }), } } - #[must_use] - pub fn udp_tracker_container(&self, udp_tracker_config: &Arc) -> UdpTrackerCoreContainer { - UdpTrackerCoreContainer { - tracker_core_container: self.tracker_core_container.clone(), - udp_tracker_config: udp_tracker_config.clone(), - udp_core_stats_event_sender: self.udp_core_stats_event_sender.clone(), - udp_core_stats_repository: self.udp_core_stats_repository.clone(), - ban_service: self.udp_ban_service.clone(), - connect_service: self.udp_connect_service.clone(), - announce_service: self.udp_announce_service.clone(), - scrape_service: self.udp_scrape_service.clone(), + /// # Errors + /// + /// Return an error if there is no UDP tracker server instance bound to the + /// socket address. + pub fn udp_tracker_container(&self, bind_address: SocketAddr) -> Result, Error> { + match self.udp_tracker_containers.get(&bind_address) { + Some(udp_tracker_container) => Ok(udp_tracker_container.clone()), + None => Err(Error::MissingUdpTrackerCoreContainer { bind_address }), } } #[must_use] - pub fn tracker_http_api_container(&self, http_api_config: &Arc) -> TrackerHttpApiCoreContainer { + pub fn tracker_http_api_container(&self, http_api_config: &Arc) -> Arc { TrackerHttpApiCoreContainer { tracker_core_container: self.tracker_core_container.clone(), http_api_config: http_api_config.clone(), @@ -145,13 +225,6 @@ impl AppContainer { udp_core_stats_repository: self.udp_core_stats_repository.clone(), udp_server_stats_repository: self.udp_server_stats_repository.clone(), } - } - - #[must_use] - pub fn udp_tracker_server_container(&self) -> UdpTrackerServerContainer { - UdpTrackerServerContainer { - udp_server_stats_event_sender: self.udp_server_stats_event_sender.clone(), - udp_server_stats_repository: self.udp_server_stats_repository.clone(), - } + .into() } } From fdf2055ef0ffa98af765e272e4fa2c56a9ee09a9 Mon Sep 17 00:00:00 2001 From: Jose Celano Date: Tue, 25 Mar 2025 11:01:00 +0000 Subject: [PATCH 424/802] refactor: [#1411] remove duplicate code for HttpTrackerCoreServices initialization --- .../src/environment.rs | 3 +- .../src/environment.rs | 2 +- packages/http-tracker-core/src/container.rs | 58 ++++++++++++++----- .../rest-tracker-api-core/src/container.rs | 3 +- src/container.rs | 52 ++++------------- 5 files changed, 59 insertions(+), 59 deletions(-) diff --git a/packages/axum-http-tracker-server/src/environment.rs b/packages/axum-http-tracker-server/src/environment.rs index 81f0a1ef3..a89d9af08 100644 --- a/packages/axum-http-tracker-server/src/environment.rs +++ b/packages/axum-http-tracker-server/src/environment.rs @@ -115,7 +115,8 @@ impl EnvContainer { let http_tracker_config = Arc::new(http_tracker_config[0].clone()); let tracker_core_container = Arc::new(TrackerCoreContainer::initialize(&core_config)); - let http_tracker_container = HttpTrackerCoreContainer::initialize_from(&tracker_core_container, &http_tracker_config); + let http_tracker_container = + HttpTrackerCoreContainer::initialize_from_tracker_core(&tracker_core_container, &http_tracker_config); Self { tracker_core_container, diff --git a/packages/axum-rest-tracker-api-server/src/environment.rs b/packages/axum-rest-tracker-api-server/src/environment.rs index c2d89e064..96295a5d3 100644 --- a/packages/axum-rest-tracker-api-server/src/environment.rs +++ b/packages/axum-rest-tracker-api-server/src/environment.rs @@ -174,7 +174,7 @@ impl EnvContainer { let tracker_core_container = Arc::new(TrackerCoreContainer::initialize(&core_config)); let http_tracker_core_container = - HttpTrackerCoreContainer::initialize_from(&tracker_core_container, &http_tracker_config); + HttpTrackerCoreContainer::initialize_from_tracker_core(&tracker_core_container, &http_tracker_config); let udp_tracker_core_container = UdpTrackerCoreContainer::initialize_from(&tracker_core_container, &udp_tracker_config); let udp_tracker_server_container = UdpTrackerServerContainer::initialize(&core_config); diff --git a/packages/http-tracker-core/src/container.rs b/packages/http-tracker-core/src/container.rs index ce577f1d8..7fc2f48a6 100644 --- a/packages/http-tracker-core/src/container.rs +++ b/packages/http-tracker-core/src/container.rs @@ -5,11 +5,14 @@ use torrust_tracker_configuration::{Core, HttpTracker}; use crate::services::announce::AnnounceService; use crate::services::scrape::ScrapeService; -use crate::{event, statistics}; +use crate::{event, services, statistics}; pub struct HttpTrackerCoreContainer { - pub tracker_core_container: Arc, pub http_tracker_config: Arc, + + pub tracker_core_container: Arc, + + // `HttpTrackerCoreServices` pub http_stats_event_sender: Arc>>, pub http_stats_repository: Arc, pub announce_service: Arc, @@ -20,28 +23,57 @@ impl HttpTrackerCoreContainer { #[must_use] pub fn initialize(core_config: &Arc, http_tracker_config: &Arc) -> Arc { let tracker_core_container = Arc::new(TrackerCoreContainer::initialize(core_config)); - Self::initialize_from(&tracker_core_container, http_tracker_config) + Self::initialize_from_tracker_core(&tracker_core_container, http_tracker_config) } #[must_use] - pub fn initialize_from( + pub fn initialize_from_tracker_core( tracker_core_container: &Arc, http_tracker_config: &Arc, ) -> Arc { + let http_tracker_core_services = HttpTrackerCoreServices::initialize_from(tracker_core_container); + Self::initialize_from_services(tracker_core_container, &http_tracker_core_services, http_tracker_config) + } + + #[must_use] + pub fn initialize_from_services( + tracker_core_container: &Arc, + http_tracker_core_services: &Arc, + http_tracker_config: &Arc, + ) -> Arc { + Arc::new(Self { + tracker_core_container: tracker_core_container.clone(), + http_tracker_config: http_tracker_config.clone(), + http_stats_event_sender: http_tracker_core_services.http_stats_event_sender.clone(), + http_stats_repository: http_tracker_core_services.http_stats_repository.clone(), + announce_service: http_tracker_core_services.http_announce_service.clone(), + scrape_service: http_tracker_core_services.http_scrape_service.clone(), + }) + } +} + +pub struct HttpTrackerCoreServices { + pub http_stats_event_sender: Arc>>, + pub http_stats_repository: Arc, + pub http_announce_service: Arc, + pub http_scrape_service: Arc, +} + +impl HttpTrackerCoreServices { + #[must_use] + pub fn initialize_from(tracker_core_container: &Arc) -> Arc { let (http_stats_event_sender, http_stats_repository) = statistics::setup::factory(tracker_core_container.core_config.tracker_usage_statistics); let http_stats_event_sender = Arc::new(http_stats_event_sender); let http_stats_repository = Arc::new(http_stats_repository); - - let announce_service = Arc::new(AnnounceService::new( + let http_announce_service = Arc::new(AnnounceService::new( tracker_core_container.core_config.clone(), tracker_core_container.announce_handler.clone(), tracker_core_container.authentication_service.clone(), tracker_core_container.whitelist_authorization.clone(), http_stats_event_sender.clone(), )); - - let scrape_service = Arc::new(ScrapeService::new( + let http_scrape_service = Arc::new(ScrapeService::new( tracker_core_container.core_config.clone(), tracker_core_container.scrape_handler.clone(), tracker_core_container.authentication_service.clone(), @@ -49,12 +81,10 @@ impl HttpTrackerCoreContainer { )); Arc::new(Self { - tracker_core_container: tracker_core_container.clone(), - http_tracker_config: http_tracker_config.clone(), - http_stats_event_sender: http_stats_event_sender.clone(), - http_stats_repository: http_stats_repository.clone(), - announce_service: announce_service.clone(), - scrape_service: scrape_service.clone(), + http_stats_event_sender, + http_stats_repository, + http_announce_service, + http_scrape_service, }) } } diff --git a/packages/rest-tracker-api-core/src/container.rs b/packages/rest-tracker-api-core/src/container.rs index be5e9d7f6..040c16b26 100644 --- a/packages/rest-tracker-api-core/src/container.rs +++ b/packages/rest-tracker-api-core/src/container.rs @@ -27,7 +27,8 @@ impl TrackerHttpApiCoreContainer { http_api_config: &Arc, ) -> Arc { let tracker_core_container = Arc::new(TrackerCoreContainer::initialize(core_config)); - let http_tracker_core_container = HttpTrackerCoreContainer::initialize_from(&tracker_core_container, http_tracker_config); + let http_tracker_core_container = + HttpTrackerCoreContainer::initialize_from_tracker_core(&tracker_core_container, http_tracker_config); let udp_tracker_core_container = UdpTrackerCoreContainer::initialize_from(&tracker_core_container, udp_tracker_config); let udp_tracker_server_container = UdpTrackerServerContainer::initialize(core_config); diff --git a/src/container.rs b/src/container.rs index e55a1d2f8..f311453d1 100644 --- a/src/container.rs +++ b/src/container.rs @@ -2,9 +2,7 @@ use std::collections::HashMap; use std::net::SocketAddr; use std::sync::Arc; -use bittorrent_http_tracker_core::container::HttpTrackerCoreContainer; -use bittorrent_http_tracker_core::services::announce::AnnounceService; -use bittorrent_http_tracker_core::services::scrape::ScrapeService; +use bittorrent_http_tracker_core::container::{HttpTrackerCoreContainer, HttpTrackerCoreServices}; use bittorrent_tracker_core::container::TrackerCoreContainer; use bittorrent_udp_tracker_core::container::UdpTrackerCoreContainer; use bittorrent_udp_tracker_core::services::banning::BanService; @@ -27,6 +25,7 @@ pub enum Error { pub struct AppContainer { pub tracker_core_container: Arc, pub http_api_config: Arc>, + pub http_tracker_core_services: Arc, // UDP Tracker Core Services pub udp_core_stats_event_sender: Arc>>, @@ -36,12 +35,6 @@ pub struct AppContainer { pub udp_announce_service: Arc, pub udp_scrape_service: Arc, - // HTTP Tracker Core Services - pub http_stats_event_sender: Arc>>, - pub http_stats_repository: Arc, - pub http_announce_service: Arc, - pub http_scrape_service: Arc, - // UDP Tracker Server Services pub udp_server_stats_event_sender: Arc>>, pub udp_server_stats_repository: Arc, @@ -63,24 +56,7 @@ impl AppContainer { let tracker_core_container = Arc::new(TrackerCoreContainer::initialize(&core_config)); - // HTTP Tracker Core Services - let (http_stats_event_sender, http_stats_repository) = - bittorrent_http_tracker_core::statistics::setup::factory(configuration.core.tracker_usage_statistics); - let http_stats_event_sender = Arc::new(http_stats_event_sender); - let http_stats_repository = Arc::new(http_stats_repository); - let http_announce_service = Arc::new(AnnounceService::new( - tracker_core_container.core_config.clone(), - tracker_core_container.announce_handler.clone(), - tracker_core_container.authentication_service.clone(), - tracker_core_container.whitelist_authorization.clone(), - http_stats_event_sender.clone(), - )); - let http_scrape_service = Arc::new(ScrapeService::new( - tracker_core_container.core_config.clone(), - tracker_core_container.scrape_handler.clone(), - tracker_core_container.authentication_service.clone(), - http_stats_event_sender.clone(), - )); + let http_tracker_core_services = HttpTrackerCoreServices::initialize_from(&tracker_core_container); // UDP Tracker Core Services let (udp_core_stats_event_sender, udp_core_stats_repository) = @@ -121,14 +97,11 @@ impl AppContainer { for http_tracker_config in http_trackers { http_tracker_containers.insert( http_tracker_config.bind_address, - Arc::new(HttpTrackerCoreContainer { - tracker_core_container: tracker_core_container.clone(), - http_tracker_config: Arc::new(http_tracker_config.clone()), - http_stats_event_sender: http_stats_event_sender.clone(), - http_stats_repository: http_stats_repository.clone(), - announce_service: http_announce_service.clone(), - scrape_service: http_scrape_service.clone(), - }), + HttpTrackerCoreContainer::initialize_from_services( + &tracker_core_container, + &http_tracker_core_services, + &Arc::new(http_tracker_config.clone()), + ), ); } } @@ -160,6 +133,7 @@ impl AppContainer { AppContainer { tracker_core_container, http_api_config, + http_tracker_core_services, // UDP Tracker Core Services udp_core_stats_event_sender, @@ -169,12 +143,6 @@ impl AppContainer { udp_announce_service, udp_scrape_service, - // HTTP Tracker Core Services - http_stats_event_sender, - http_stats_repository, - http_announce_service, - http_scrape_service, - // UDP Tracker Server Services udp_server_stats_event_sender, udp_server_stats_repository, @@ -221,7 +189,7 @@ impl AppContainer { tracker_core_container: self.tracker_core_container.clone(), http_api_config: http_api_config.clone(), ban_service: self.udp_ban_service.clone(), - http_stats_repository: self.http_stats_repository.clone(), + http_stats_repository: self.http_tracker_core_services.http_stats_repository.clone(), udp_core_stats_repository: self.udp_core_stats_repository.clone(), udp_server_stats_repository: self.udp_server_stats_repository.clone(), } From 4c7feb5397690fb598f3795beaf68f9f9911d50a Mon Sep 17 00:00:00 2001 From: Jose Celano Date: Tue, 25 Mar 2025 11:19:14 +0000 Subject: [PATCH 425/802] refactor: [#1411] remove duplicate code for UdpTrackerCoreServices initialization --- .../src/environment.rs | 3 +- .../rest-tracker-api-core/src/container.rs | 3 +- packages/udp-tracker-core/src/container.rs | 65 +++++++++++++++---- .../udp-tracker-server/src/environment.rs | 3 +- src/container.rs | 62 ++++-------------- 5 files changed, 70 insertions(+), 66 deletions(-) diff --git a/packages/axum-rest-tracker-api-server/src/environment.rs b/packages/axum-rest-tracker-api-server/src/environment.rs index 96295a5d3..275d72574 100644 --- a/packages/axum-rest-tracker-api-server/src/environment.rs +++ b/packages/axum-rest-tracker-api-server/src/environment.rs @@ -175,7 +175,8 @@ impl EnvContainer { let tracker_core_container = Arc::new(TrackerCoreContainer::initialize(&core_config)); let http_tracker_core_container = HttpTrackerCoreContainer::initialize_from_tracker_core(&tracker_core_container, &http_tracker_config); - let udp_tracker_core_container = UdpTrackerCoreContainer::initialize_from(&tracker_core_container, &udp_tracker_config); + let udp_tracker_core_container = + UdpTrackerCoreContainer::initialize_from_tracker_core(&tracker_core_container, &udp_tracker_config); let udp_tracker_server_container = UdpTrackerServerContainer::initialize(&core_config); let tracker_http_api_core_container = TrackerHttpApiCoreContainer::initialize_from( diff --git a/packages/rest-tracker-api-core/src/container.rs b/packages/rest-tracker-api-core/src/container.rs index 040c16b26..329c77eed 100644 --- a/packages/rest-tracker-api-core/src/container.rs +++ b/packages/rest-tracker-api-core/src/container.rs @@ -29,7 +29,8 @@ impl TrackerHttpApiCoreContainer { let tracker_core_container = Arc::new(TrackerCoreContainer::initialize(core_config)); let http_tracker_core_container = HttpTrackerCoreContainer::initialize_from_tracker_core(&tracker_core_container, http_tracker_config); - let udp_tracker_core_container = UdpTrackerCoreContainer::initialize_from(&tracker_core_container, udp_tracker_config); + let udp_tracker_core_container = + UdpTrackerCoreContainer::initialize_from_tracker_core(&tracker_core_container, udp_tracker_config); let udp_tracker_server_container = UdpTrackerServerContainer::initialize(core_config); Self::initialize_from( diff --git a/packages/udp-tracker-core/src/container.rs b/packages/udp-tracker-core/src/container.rs index 2ab578151..79ce15d01 100644 --- a/packages/udp-tracker-core/src/container.rs +++ b/packages/udp-tracker-core/src/container.rs @@ -8,11 +8,14 @@ use crate::services::announce::AnnounceService; use crate::services::banning::BanService; use crate::services::connect::ConnectService; use crate::services::scrape::ScrapeService; -use crate::{event, statistics, MAX_CONNECTION_ID_ERRORS_PER_IP}; +use crate::{event, services, statistics, MAX_CONNECTION_ID_ERRORS_PER_IP}; pub struct UdpTrackerCoreContainer { - pub tracker_core_container: Arc, pub udp_tracker_config: Arc, + + pub tracker_core_container: Arc, + + // `UdpTrackerCoreServices` pub udp_core_stats_event_sender: Arc>>, pub udp_core_stats_repository: Arc, pub ban_service: Arc>, @@ -25,14 +28,52 @@ impl UdpTrackerCoreContainer { #[must_use] pub fn initialize(core_config: &Arc, udp_tracker_config: &Arc) -> Arc { let tracker_core_container = Arc::new(TrackerCoreContainer::initialize(core_config)); - Self::initialize_from(&tracker_core_container, udp_tracker_config) + Self::initialize_from_tracker_core(&tracker_core_container, udp_tracker_config) } #[must_use] - pub fn initialize_from( + pub fn initialize_from_tracker_core( tracker_core_container: &Arc, udp_tracker_config: &Arc, ) -> Arc { + let udp_tracker_core_services = UdpTrackerCoreServices::initialize_from(tracker_core_container); + Self::initialize_from_services(tracker_core_container, &udp_tracker_core_services, udp_tracker_config) + } + + #[must_use] + pub fn initialize_from_services( + tracker_core_container: &Arc, + udp_tracker_core_services: &Arc, + udp_tracker_config: &Arc, + ) -> Arc { + Arc::new(Self { + udp_tracker_config: udp_tracker_config.clone(), + + tracker_core_container: tracker_core_container.clone(), + + // `UdpTrackerCoreServices` + udp_core_stats_event_sender: udp_tracker_core_services.udp_core_stats_event_sender.clone(), + udp_core_stats_repository: udp_tracker_core_services.udp_core_stats_repository.clone(), + ban_service: udp_tracker_core_services.udp_ban_service.clone(), + connect_service: udp_tracker_core_services.udp_connect_service.clone(), + announce_service: udp_tracker_core_services.udp_announce_service.clone(), + scrape_service: udp_tracker_core_services.udp_scrape_service.clone(), + }) + } +} + +pub struct UdpTrackerCoreServices { + pub udp_core_stats_event_sender: Arc>>, + pub udp_core_stats_repository: Arc, + pub udp_ban_service: Arc>, + pub udp_connect_service: Arc, + pub udp_announce_service: Arc, + pub udp_scrape_service: Arc, +} + +impl UdpTrackerCoreServices { + #[must_use] + pub fn initialize_from(tracker_core_container: &Arc) -> Arc { let (udp_core_stats_event_sender, udp_core_stats_repository) = statistics::setup::factory(tracker_core_container.core_config.tracker_usage_statistics); let udp_core_stats_event_sender = Arc::new(udp_core_stats_event_sender); @@ -49,15 +90,13 @@ impl UdpTrackerCoreContainer { udp_core_stats_event_sender.clone(), )); - Arc::new(UdpTrackerCoreContainer { - tracker_core_container: tracker_core_container.clone(), - udp_tracker_config: udp_tracker_config.clone(), - udp_core_stats_event_sender: udp_core_stats_event_sender.clone(), - udp_core_stats_repository: udp_core_stats_repository.clone(), - ban_service: ban_service.clone(), - connect_service: connect_service.clone(), - announce_service: announce_service.clone(), - scrape_service: scrape_service.clone(), + Arc::new(Self { + udp_core_stats_event_sender, + udp_core_stats_repository, + udp_ban_service: ban_service, + udp_connect_service: connect_service, + udp_announce_service: announce_service, + udp_scrape_service: scrape_service, }) } } diff --git a/packages/udp-tracker-server/src/environment.rs b/packages/udp-tracker-server/src/environment.rs index 158e39a7e..b97da90ad 100644 --- a/packages/udp-tracker-server/src/environment.rs +++ b/packages/udp-tracker-server/src/environment.rs @@ -131,7 +131,8 @@ impl EnvContainer { let udp_tracker_config = Arc::new(udp_tracker_configurations[0].clone()); let tracker_core_container = Arc::new(TrackerCoreContainer::initialize(&core_config)); - let udp_tracker_core_container = UdpTrackerCoreContainer::initialize_from(&tracker_core_container, &udp_tracker_config); + let udp_tracker_core_container = + UdpTrackerCoreContainer::initialize_from_tracker_core(&tracker_core_container, &udp_tracker_config); let udp_tracker_server_container = UdpTrackerServerContainer::initialize(&core_config); Self { diff --git a/src/container.rs b/src/container.rs index f311453d1..ce5eb8ae9 100644 --- a/src/container.rs +++ b/src/container.rs @@ -4,10 +4,8 @@ use std::sync::Arc; use bittorrent_http_tracker_core::container::{HttpTrackerCoreContainer, HttpTrackerCoreServices}; use bittorrent_tracker_core::container::TrackerCoreContainer; -use bittorrent_udp_tracker_core::container::UdpTrackerCoreContainer; -use bittorrent_udp_tracker_core::services::banning::BanService; -use bittorrent_udp_tracker_core::{self, MAX_CONNECTION_ID_ERRORS_PER_IP}; -use tokio::sync::RwLock; +use bittorrent_udp_tracker_core::container::{UdpTrackerCoreContainer, UdpTrackerCoreServices}; +use bittorrent_udp_tracker_core::{self}; use torrust_rest_tracker_api_core::container::TrackerHttpApiCoreContainer; use torrust_tracker_configuration::{Configuration, HttpApi}; use torrust_udp_tracker_server::container::UdpTrackerServerContainer; @@ -26,14 +24,7 @@ pub struct AppContainer { pub tracker_core_container: Arc, pub http_api_config: Arc>, pub http_tracker_core_services: Arc, - - // UDP Tracker Core Services - pub udp_core_stats_event_sender: Arc>>, - pub udp_core_stats_repository: Arc, - pub udp_ban_service: Arc>, - pub udp_connect_service: Arc, - pub udp_announce_service: Arc, - pub udp_scrape_service: Arc, + pub udp_tracker_core_services: Arc, // UDP Tracker Server Services pub udp_server_stats_event_sender: Arc>>, @@ -58,24 +49,7 @@ impl AppContainer { let http_tracker_core_services = HttpTrackerCoreServices::initialize_from(&tracker_core_container); - // UDP Tracker Core Services - let (udp_core_stats_event_sender, udp_core_stats_repository) = - bittorrent_udp_tracker_core::statistics::setup::factory(configuration.core.tracker_usage_statistics); - let udp_core_stats_event_sender = Arc::new(udp_core_stats_event_sender); - let udp_core_stats_repository = Arc::new(udp_core_stats_repository); - let udp_ban_service = Arc::new(RwLock::new(BanService::new(MAX_CONNECTION_ID_ERRORS_PER_IP))); - let udp_connect_service = Arc::new(bittorrent_udp_tracker_core::services::connect::ConnectService::new( - udp_core_stats_event_sender.clone(), - )); - let udp_announce_service = Arc::new(bittorrent_udp_tracker_core::services::announce::AnnounceService::new( - tracker_core_container.announce_handler.clone(), - tracker_core_container.whitelist_authorization.clone(), - udp_core_stats_event_sender.clone(), - )); - let udp_scrape_service = Arc::new(bittorrent_udp_tracker_core::services::scrape::ScrapeService::new( - tracker_core_container.scrape_handler.clone(), - udp_core_stats_event_sender.clone(), - )); + let udp_tracker_core_services = UdpTrackerCoreServices::initialize_from(&tracker_core_container); // UDP Tracker Server Services let (udp_server_stats_event_sender, udp_server_stats_repository) = @@ -114,16 +88,11 @@ impl AppContainer { for udp_tracker_config in udp_trackers { udp_tracker_containers.insert( udp_tracker_config.bind_address, - Arc::new(UdpTrackerCoreContainer { - tracker_core_container: tracker_core_container.clone(), - udp_tracker_config: Arc::new(udp_tracker_config.clone()), - udp_core_stats_event_sender: udp_core_stats_event_sender.clone(), - udp_core_stats_repository: udp_core_stats_repository.clone(), - ban_service: udp_ban_service.clone(), - connect_service: udp_connect_service.clone(), - announce_service: udp_announce_service.clone(), - scrape_service: udp_scrape_service.clone(), - }), + UdpTrackerCoreContainer::initialize_from_services( + &tracker_core_container, + &udp_tracker_core_services, + &Arc::new(udp_tracker_config.clone()), + ), ); } } @@ -134,14 +103,7 @@ impl AppContainer { tracker_core_container, http_api_config, http_tracker_core_services, - - // UDP Tracker Core Services - udp_core_stats_event_sender, - udp_core_stats_repository, - udp_ban_service, - udp_connect_service, - udp_announce_service, - udp_scrape_service, + udp_tracker_core_services, // UDP Tracker Server Services udp_server_stats_event_sender, @@ -188,9 +150,9 @@ impl AppContainer { TrackerHttpApiCoreContainer { tracker_core_container: self.tracker_core_container.clone(), http_api_config: http_api_config.clone(), - ban_service: self.udp_ban_service.clone(), + ban_service: self.udp_tracker_core_services.udp_ban_service.clone(), http_stats_repository: self.http_tracker_core_services.http_stats_repository.clone(), - udp_core_stats_repository: self.udp_core_stats_repository.clone(), + udp_core_stats_repository: self.udp_tracker_core_services.udp_core_stats_repository.clone(), udp_server_stats_repository: self.udp_server_stats_repository.clone(), } .into() From 60ed2e4a6894da61a9bcec9b7139866b000a9093 Mon Sep 17 00:00:00 2001 From: Jose Celano Date: Tue, 25 Mar 2025 11:34:51 +0000 Subject: [PATCH 426/802] refactor: [#1411] remove duplicate code for UdpTrackerServerServices initialization --- packages/udp-tracker-server/src/container.rs | 17 ++++++++++++ src/container.rs | 28 +++++--------------- 2 files changed, 23 insertions(+), 22 deletions(-) diff --git a/packages/udp-tracker-server/src/container.rs b/packages/udp-tracker-server/src/container.rs index 0c8039b26..2b1ce8c99 100644 --- a/packages/udp-tracker-server/src/container.rs +++ b/packages/udp-tracker-server/src/container.rs @@ -10,6 +10,23 @@ pub struct UdpTrackerServerContainer { } impl UdpTrackerServerContainer { + #[must_use] + pub fn initialize(core_config: &Arc) -> Arc { + let udp_tracker_server_services = UdpTrackerServerServices::initialize(core_config); + + Arc::new(Self { + udp_server_stats_event_sender: udp_tracker_server_services.udp_server_stats_event_sender.clone(), + udp_server_stats_repository: udp_tracker_server_services.udp_server_stats_repository.clone(), + }) + } +} + +pub struct UdpTrackerServerServices { + pub udp_server_stats_event_sender: Arc>>, + pub udp_server_stats_repository: Arc, +} + +impl UdpTrackerServerServices { #[must_use] pub fn initialize(core_config: &Arc) -> Arc { let (udp_server_stats_event_sender, udp_server_stats_repository) = diff --git a/src/container.rs b/src/container.rs index ce5eb8ae9..aa832cac4 100644 --- a/src/container.rs +++ b/src/container.rs @@ -21,15 +21,12 @@ pub enum Error { } pub struct AppContainer { - pub tracker_core_container: Arc, pub http_api_config: Arc>, + + pub tracker_core_container: Arc, pub http_tracker_core_services: Arc, pub udp_tracker_core_services: Arc, - // UDP Tracker Server Services - pub udp_server_stats_event_sender: Arc>>, - pub udp_server_stats_repository: Arc, - // UDP Tracker Server Container pub udp_tracker_server_container: Arc, @@ -51,17 +48,7 @@ impl AppContainer { let udp_tracker_core_services = UdpTrackerCoreServices::initialize_from(&tracker_core_container); - // UDP Tracker Server Services - let (udp_server_stats_event_sender, udp_server_stats_repository) = - torrust_udp_tracker_server::statistics::setup::factory(configuration.core.tracker_usage_statistics); - let udp_server_stats_event_sender = Arc::new(udp_server_stats_event_sender); - let udp_server_stats_repository = Arc::new(udp_server_stats_repository); - - // UDP Tracker Server Container - let udp_tracker_server_container = Arc::new(UdpTrackerServerContainer { - udp_server_stats_event_sender: udp_server_stats_event_sender.clone(), - udp_server_stats_repository: udp_server_stats_repository.clone(), - }); + let udp_tracker_server_container = UdpTrackerServerContainer::initialize(&core_config); // Tracker Instance Containers @@ -100,15 +87,12 @@ impl AppContainer { let udp_tracker_containers = Arc::new(udp_tracker_containers); AppContainer { - tracker_core_container, http_api_config, + + tracker_core_container, http_tracker_core_services, udp_tracker_core_services, - // UDP Tracker Server Services - udp_server_stats_event_sender, - udp_server_stats_repository, - // UDP Tracker Server Container udp_tracker_server_container, @@ -153,7 +137,7 @@ impl AppContainer { ban_service: self.udp_tracker_core_services.udp_ban_service.clone(), http_stats_repository: self.http_tracker_core_services.http_stats_repository.clone(), udp_core_stats_repository: self.udp_tracker_core_services.udp_core_stats_repository.clone(), - udp_server_stats_repository: self.udp_server_stats_repository.clone(), + udp_server_stats_repository: self.udp_tracker_server_container.udp_server_stats_repository.clone(), } .into() } From ef5dc322b58aa6fa17997ca5f34bdb715911862a Mon Sep 17 00:00:00 2001 From: Jose Celano Date: Tue, 25 Mar 2025 11:42:47 +0000 Subject: [PATCH 427/802] refactor: [#1411] reorganize fields in AppContainer --- src/container.rs | 62 ++++++++++++++++++++++++++++-------------------- 1 file changed, 36 insertions(+), 26 deletions(-) diff --git a/src/container.rs b/src/container.rs index aa832cac4..918a6ea03 100644 --- a/src/container.rs +++ b/src/container.rs @@ -21,42 +21,44 @@ pub enum Error { } pub struct AppContainer { + // Configuration pub http_api_config: Arc>, + // Core pub tracker_core_container: Arc, + + // HTTP pub http_tracker_core_services: Arc, - pub udp_tracker_core_services: Arc, + pub http_tracker_instance_containers: Arc>>, - // UDP Tracker Server Container + // UDP + pub udp_tracker_core_services: Arc, pub udp_tracker_server_container: Arc, - - // Tracker Instance Containers - pub http_tracker_containers: Arc>>, - pub udp_tracker_containers: Arc>>, + pub udp_tracker_instance_containers: Arc>>, } impl AppContainer { #[instrument(skip())] pub fn initialize(configuration: &Configuration) -> AppContainer { + // Configuration + let core_config = Arc::new(configuration.core.clone()); let http_api_config = Arc::new(configuration.http_api.clone()); - let tracker_core_container = Arc::new(TrackerCoreContainer::initialize(&core_config)); + // Core - let http_tracker_core_services = HttpTrackerCoreServices::initialize_from(&tracker_core_container); - - let udp_tracker_core_services = UdpTrackerCoreServices::initialize_from(&tracker_core_container); + let tracker_core_container = Arc::new(TrackerCoreContainer::initialize(&core_config)); - let udp_tracker_server_container = UdpTrackerServerContainer::initialize(&core_config); + // HTTP - // Tracker Instance Containers + let http_tracker_core_services = HttpTrackerCoreServices::initialize_from(&tracker_core_container); - let mut http_tracker_containers = HashMap::new(); + let mut http_tracker_instance_containers = HashMap::new(); if let Some(http_trackers) = &configuration.http_trackers { for http_tracker_config in http_trackers { - http_tracker_containers.insert( + http_tracker_instance_containers.insert( http_tracker_config.bind_address, HttpTrackerCoreContainer::initialize_from_services( &tracker_core_container, @@ -67,13 +69,19 @@ impl AppContainer { } } - let http_tracker_containers = Arc::new(http_tracker_containers); + let http_tracker_instance_containers = Arc::new(http_tracker_instance_containers); + + // UDP + + let udp_tracker_core_services = UdpTrackerCoreServices::initialize_from(&tracker_core_container); + + let udp_tracker_server_container = UdpTrackerServerContainer::initialize(&core_config); - let mut udp_tracker_containers = HashMap::new(); + let mut udp_tracker_instance_containers = HashMap::new(); if let Some(udp_trackers) = &configuration.udp_trackers { for udp_tracker_config in udp_trackers { - udp_tracker_containers.insert( + udp_tracker_instance_containers.insert( udp_tracker_config.bind_address, UdpTrackerCoreContainer::initialize_from_services( &tracker_core_container, @@ -84,21 +92,23 @@ impl AppContainer { } } - let udp_tracker_containers = Arc::new(udp_tracker_containers); + let udp_tracker_instance_containers = Arc::new(udp_tracker_instance_containers); AppContainer { + // Configuration http_api_config, + // Core tracker_core_container, + + // HTTP http_tracker_core_services, - udp_tracker_core_services, + http_tracker_instance_containers, - // UDP Tracker Server Container + // UDP + udp_tracker_core_services, udp_tracker_server_container, - - // Tracker Instance Containers - http_tracker_containers, - udp_tracker_containers, + udp_tracker_instance_containers, } } @@ -112,7 +122,7 @@ impl AppContainer { /// Return an error if there is no HTTP tracker server instance bound to the /// socket address. pub fn http_tracker_container(&self, bind_address: SocketAddr) -> Result, Error> { - match self.http_tracker_containers.get(&bind_address) { + match self.http_tracker_instance_containers.get(&bind_address) { Some(http_tracker_container) => Ok(http_tracker_container.clone()), None => Err(Error::MissingHttpTrackerCoreContainer { bind_address }), } @@ -123,7 +133,7 @@ impl AppContainer { /// Return an error if there is no UDP tracker server instance bound to the /// socket address. pub fn udp_tracker_container(&self, bind_address: SocketAddr) -> Result, Error> { - match self.udp_tracker_containers.get(&bind_address) { + match self.udp_tracker_instance_containers.get(&bind_address) { Some(udp_tracker_container) => Ok(udp_tracker_container.clone()), None => Err(Error::MissingUdpTrackerCoreContainer { bind_address }), } From 299d0f3ea7566a73aa1edeedef839d92d9545896 Mon Sep 17 00:00:00 2001 From: Jose Celano Date: Tue, 25 Mar 2025 11:56:10 +0000 Subject: [PATCH 428/802] refactor: [#1411] extract static methods in AppContainer --- src/container.rs | 87 ++++++++++++++++++++++++++++++------------------ 1 file changed, 55 insertions(+), 32 deletions(-) diff --git a/src/container.rs b/src/container.rs index 918a6ea03..7742d8e40 100644 --- a/src/container.rs +++ b/src/container.rs @@ -54,22 +54,11 @@ impl AppContainer { let http_tracker_core_services = HttpTrackerCoreServices::initialize_from(&tracker_core_container); - let mut http_tracker_instance_containers = HashMap::new(); - - if let Some(http_trackers) = &configuration.http_trackers { - for http_tracker_config in http_trackers { - http_tracker_instance_containers.insert( - http_tracker_config.bind_address, - HttpTrackerCoreContainer::initialize_from_services( - &tracker_core_container, - &http_tracker_core_services, - &Arc::new(http_tracker_config.clone()), - ), - ); - } - } - - let http_tracker_instance_containers = Arc::new(http_tracker_instance_containers); + let http_tracker_instance_containers = Self::initialize_http_tracker_instance_containers( + configuration, + &tracker_core_container, + &http_tracker_core_services, + ); // UDP @@ -77,22 +66,8 @@ impl AppContainer { let udp_tracker_server_container = UdpTrackerServerContainer::initialize(&core_config); - let mut udp_tracker_instance_containers = HashMap::new(); - - if let Some(udp_trackers) = &configuration.udp_trackers { - for udp_tracker_config in udp_trackers { - udp_tracker_instance_containers.insert( - udp_tracker_config.bind_address, - UdpTrackerCoreContainer::initialize_from_services( - &tracker_core_container, - &udp_tracker_core_services, - &Arc::new(udp_tracker_config.clone()), - ), - ); - } - } - - let udp_tracker_instance_containers = Arc::new(udp_tracker_instance_containers); + let udp_tracker_instance_containers = + Self::initialize_udp_tracker_instance_containers(configuration, &tracker_core_container, &udp_tracker_core_services); AppContainer { // Configuration @@ -151,4 +126,52 @@ impl AppContainer { } .into() } + + #[must_use] + fn initialize_http_tracker_instance_containers( + configuration: &Configuration, + tracker_core_container: &Arc, + http_tracker_core_services: &Arc, + ) -> Arc>> { + let mut http_tracker_instance_containers = HashMap::new(); + + if let Some(http_trackers) = &configuration.http_trackers { + for http_tracker_config in http_trackers { + http_tracker_instance_containers.insert( + http_tracker_config.bind_address, + HttpTrackerCoreContainer::initialize_from_services( + tracker_core_container, + http_tracker_core_services, + &Arc::new(http_tracker_config.clone()), + ), + ); + } + } + + Arc::new(http_tracker_instance_containers) + } + + #[must_use] + fn initialize_udp_tracker_instance_containers( + configuration: &Configuration, + tracker_core_container: &Arc, + udp_tracker_core_services: &Arc, + ) -> Arc>> { + let mut udp_tracker_instance_containers = HashMap::new(); + + if let Some(udp_trackers) = &configuration.udp_trackers { + for udp_tracker_config in udp_trackers { + udp_tracker_instance_containers.insert( + udp_tracker_config.bind_address, + UdpTrackerCoreContainer::initialize_from_services( + tracker_core_container, + udp_tracker_core_services, + &Arc::new(udp_tracker_config.clone()), + ), + ); + } + } + + Arc::new(udp_tracker_instance_containers) + } } From 99aa2594751dcb0a307817b21871eb5bd2fe8cff Mon Sep 17 00:00:00 2001 From: nuts_rice Date: Tue, 25 Mar 2025 11:56:25 -0400 Subject: [PATCH 429/802] health_check: service_type field --- packages/axum-health-check-api-server/src/handlers.rs | 1 + packages/axum-health-check-api-server/src/resources.rs | 1 + packages/axum-http-tracker-server/src/server.rs | 3 ++- packages/axum-rest-tracker-api-server/src/server.rs | 4 +++- packages/server-lib/src/registar.rs | 1 + packages/udp-tracker-server/src/server/launcher.rs | 3 ++- 6 files changed, 10 insertions(+), 3 deletions(-) diff --git a/packages/axum-health-check-api-server/src/handlers.rs b/packages/axum-health-check-api-server/src/handlers.rs index 0af2ab05d..7e54d36ec 100644 --- a/packages/axum-health-check-api-server/src/handlers.rs +++ b/packages/axum-health-check-api-server/src/handlers.rs @@ -33,6 +33,7 @@ pub(crate) async fn health_check_handler(State(register): State CheckReport { binding: c.binding, info: c.info.clone(), + service_type: c.service_type, result: c.job.await.expect("it should be able to join into the checking function"), } }) diff --git a/packages/axum-health-check-api-server/src/resources.rs b/packages/axum-health-check-api-server/src/resources.rs index 3302fb966..c01547fad 100644 --- a/packages/axum-health-check-api-server/src/resources.rs +++ b/packages/axum-health-check-api-server/src/resources.rs @@ -12,6 +12,7 @@ pub enum Status { #[derive(Clone, Serialize, Deserialize, Debug, PartialEq, Eq)] pub struct CheckReport { pub binding: SocketAddr, + pub service_type: String, pub info: String, pub result: Result, } diff --git a/packages/axum-http-tracker-server/src/server.rs b/packages/axum-http-tracker-server/src/server.rs index f14a33602..cc965e1d7 100644 --- a/packages/axum-http-tracker-server/src/server.rs +++ b/packages/axum-http-tracker-server/src/server.rs @@ -18,6 +18,7 @@ use tracing::instrument; use super::v1::routes::router; use crate::HTTP_TRACKER_LOG_TARGET; +const TYPE_STRING: &str = "http_tracker"; /// Error that can occur when starting or stopping the HTTP server. /// /// Some errors triggered while starting the server are: @@ -231,7 +232,7 @@ pub fn check_fn(binding: &SocketAddr) -> ServiceHealthCheckJob { } }); - ServiceHealthCheckJob::new(*binding, info, job) + ServiceHealthCheckJob::new(*binding, info, TYPE_STRING.to_string(), job) } #[cfg(test)] diff --git a/packages/axum-rest-tracker-api-server/src/server.rs b/packages/axum-rest-tracker-api-server/src/server.rs index fd8f92944..4d41347ab 100644 --- a/packages/axum-rest-tracker-api-server/src/server.rs +++ b/packages/axum-rest-tracker-api-server/src/server.rs @@ -45,6 +45,8 @@ use tracing::{instrument, Level}; use super::routes::router; use crate::API_LOG_TARGET; +const TYPE_STRING: &str = "tracker_rest_api"; + /// Errors that can occur when starting or stopping the API server. #[derive(Debug, Error)] pub enum Error { @@ -204,7 +206,7 @@ pub fn check_fn(binding: &SocketAddr) -> ServiceHealthCheckJob { Err(err) => Err(err.to_string()), } }); - ServiceHealthCheckJob::new(*binding, info, job) + ServiceHealthCheckJob::new(*binding, info, TYPE_STRING.to_string(), job) } /// A struct responsible for starting the API server. diff --git a/packages/server-lib/src/registar.rs b/packages/server-lib/src/registar.rs index 6b67188dc..0a0e9ada0 100644 --- a/packages/server-lib/src/registar.rs +++ b/packages/server-lib/src/registar.rs @@ -18,6 +18,7 @@ pub type ServiceHeathCheckResult = Result; pub struct ServiceHealthCheckJob { pub binding: SocketAddr, pub info: String, + pub service_type: String, pub job: JoinHandle, } diff --git a/packages/udp-tracker-server/src/server/launcher.rs b/packages/udp-tracker-server/src/server/launcher.rs index b21ac11ba..6473f300b 100644 --- a/packages/udp-tracker-server/src/server/launcher.rs +++ b/packages/udp-tracker-server/src/server/launcher.rs @@ -24,6 +24,7 @@ use crate::server::receiver::Receiver; const IP_BANS_RESET_INTERVAL_IN_SECS: u64 = 3600; +const TYPE_STRING: &str = "udp_tracker"; /// A UDP server instance launcher. #[derive(Constructor)] pub struct Launcher; @@ -117,7 +118,7 @@ impl Launcher { let job = tokio::spawn(async move { check(&binding).await }); - ServiceHealthCheckJob::new(binding, info, job) + ServiceHealthCheckJob::new(binding, info, TYPE_STRING.to_string(), job) } #[instrument(skip(receiver, udp_tracker_core_container, udp_tracker_server_container))] From abfb7331cfb01f2104804a8c7db25a79555c80a7 Mon Sep 17 00:00:00 2001 From: Jose Celano Date: Wed, 26 Mar 2025 17:06:56 +0000 Subject: [PATCH 430/802] feat: [#1409] add listen_url field to the health check API ```json { "status": "Ok", "message": "", "details": [ { "listen_url": "http://0.0.0.0:7070/", "binding": "0.0.0.0:7070", "service_type": "http_tracker", "info": "checking http tracker health check at: http://0.0.0.0:7070/health_check", "result": { "Ok": "200 OK" } } ] } ``` The `binding` is not removed for back compatibility. --- Cargo.lock | 2 ++ .../axum-health-check-api-server/Cargo.toml | 1 + .../src/handlers.rs | 1 + .../src/resources.rs | 2 ++ .../axum-health-check-api-server/src/server.rs | 6 +++++- .../axum-http-tracker-server/src/server.rs | 18 ++++++++++++------ .../axum-rest-tracker-api-server/Cargo.toml | 1 + .../axum-rest-tracker-api-server/src/server.rs | 15 +++++++++------ packages/server-lib/Cargo.toml | 1 + packages/server-lib/src/logging.rs | 2 +- packages/server-lib/src/registar.rs | 7 +++++-- packages/server-lib/src/signals.rs | 2 ++ .../udp-tracker-server/src/server/launcher.rs | 8 +++++--- .../udp-tracker-server/src/server/states.rs | 7 +++++-- src/console/ci/e2e/logs_parser.rs | 8 ++++---- 15 files changed, 56 insertions(+), 25 deletions(-) diff --git a/Cargo.lock b/Cargo.lock index c4755225f..c2d68639a 100644 --- a/Cargo.lock +++ b/Cargo.lock @@ -4380,6 +4380,7 @@ dependencies = [ "tower-http", "tracing", "tracing-subscriber", + "url", ] [[package]] @@ -4512,6 +4513,7 @@ dependencies = [ "tokio", "tower-http", "tracing", + "url", ] [[package]] diff --git a/packages/axum-health-check-api-server/Cargo.toml b/packages/axum-health-check-api-server/Cargo.toml index e24e609bf..6766ce587 100644 --- a/packages/axum-health-check-api-server/Cargo.toml +++ b/packages/axum-health-check-api-server/Cargo.toml @@ -26,6 +26,7 @@ torrust-server-lib = { version = "3.0.0-develop", path = "../server-lib" } torrust-tracker-configuration = { version = "3.0.0-develop", path = "../configuration" } tower-http = { version = "0", features = ["compression-full", "cors", "propagate-header", "request-id", "trace"] } tracing = "0" +url = "2.5.4" [dev-dependencies] reqwest = { version = "0", features = ["json"] } diff --git a/packages/axum-health-check-api-server/src/handlers.rs b/packages/axum-health-check-api-server/src/handlers.rs index 7e54d36ec..66390f089 100644 --- a/packages/axum-health-check-api-server/src/handlers.rs +++ b/packages/axum-health-check-api-server/src/handlers.rs @@ -31,6 +31,7 @@ pub(crate) async fn health_check_handler(State(register): State let jobs = checks.drain(..).map(|c| { tokio::spawn(async move { CheckReport { + listen_url: c.listen_url.clone(), binding: c.binding, info: c.info.clone(), service_type: c.service_type, diff --git a/packages/axum-health-check-api-server/src/resources.rs b/packages/axum-health-check-api-server/src/resources.rs index c01547fad..24079b00f 100644 --- a/packages/axum-health-check-api-server/src/resources.rs +++ b/packages/axum-health-check-api-server/src/resources.rs @@ -1,6 +1,7 @@ use std::net::SocketAddr; use serde::{Deserialize, Serialize}; +use url::Url; #[derive(Copy, Clone, Serialize, Deserialize, Debug, PartialEq, Eq)] pub enum Status { @@ -11,6 +12,7 @@ pub enum Status { #[derive(Clone, Serialize, Deserialize, Debug, PartialEq, Eq)] pub struct CheckReport { + pub listen_url: Url, pub binding: SocketAddr, pub service_type: String, pub info: String, diff --git a/packages/axum-health-check-api-server/src/server.rs b/packages/axum-health-check-api-server/src/server.rs index 733fec3a0..cc721f5eb 100644 --- a/packages/axum-health-check-api-server/src/server.rs +++ b/packages/axum-health-check-api-server/src/server.rs @@ -25,6 +25,7 @@ use tower_http::request_id::{MakeRequestUuid, SetRequestIdLayer}; use tower_http::trace::{DefaultMakeSpan, TraceLayer}; use tower_http::LatencyUnit; use tracing::{instrument, Level, Span}; +use url::Url; use crate::handlers::health_check_handler; use crate::HEALTH_CHECK_API_LOG_TARGET; @@ -101,6 +102,9 @@ pub fn start( let socket = std::net::TcpListener::bind(bind_to).expect("Could not bind tcp_listener to address."); let address = socket.local_addr().expect("Could not get local_addr from tcp_listener."); + let protocol = "http"; // The health check API only supports HTTP directly now. Use a reverse proxy for HTTPS. + let listen_url = + Url::parse(&format!("{protocol}://{address}")).expect("Could not parse internal service url for health check API."); let handle = Handle::new(); @@ -116,7 +120,7 @@ pub fn start( .handle(handle) .serve(router.into_make_service_with_connect_info::()); - tx.send(Started { address }) + tx.send(Started { listen_url, address }) .expect("the Health Check API server should not be dropped"); running diff --git a/packages/axum-http-tracker-server/src/server.rs b/packages/axum-http-tracker-server/src/server.rs index cc965e1d7..eefb124e8 100644 --- a/packages/axum-http-tracker-server/src/server.rs +++ b/packages/axum-http-tracker-server/src/server.rs @@ -7,6 +7,7 @@ use axum_server::Handle; use bittorrent_http_tracker_core::container::HttpTrackerCoreContainer; use derive_more::Constructor; use futures::future::BoxFuture; +use reqwest::Url; use tokio::sync::oneshot::{Receiver, Sender}; use torrust_axum_server::custom_axum_server::{self, TimeoutAcceptor}; use torrust_axum_server::signals::graceful_shutdown; @@ -63,8 +64,10 @@ impl Launcher { let tls = self.tls.clone(); let protocol = if tls.is_some() { "https" } else { "http" }; + let listen_url = + Url::parse(&format!("{protocol}://{address}")).expect("Could not parse internal service url for HTTP tracker."); - tracing::info!(target: HTTP_TRACKER_LOG_TARGET, "Starting on: {protocol}://{}", address); + tracing::info!(target: HTTP_TRACKER_LOG_TARGET, "Starting on: {protocol}://{address}"); let app = router(http_tracker_container, address); @@ -90,7 +93,7 @@ impl Launcher { tracing::info!(target: HTTP_TRACKER_LOG_TARGET, "{STARTED_ON}: {protocol}://{}", address); tx_start - .send(Started { address }) + .send(Started { listen_url, address }) .expect("the HTTP(s) Tracker service should not be dropped"); running @@ -177,9 +180,12 @@ impl HttpServer { launcher }); - let binding = rx_start.await.expect("it should be able to start the service").address; + let started = rx_start.await.expect("it should be able to start the service"); - form.send(ServiceRegistration::new(binding, check_fn)) + let listen_url = started.listen_url; + let binding = started.address; + + form.send(ServiceRegistration::new(listen_url, binding, check_fn)) .expect("it should be able to send service registration"); Ok(HttpServer { @@ -220,7 +226,7 @@ impl HttpServer { /// This function will return an error if unable to connect. /// Or if the request returns an error. #[must_use] -pub fn check_fn(binding: &SocketAddr) -> ServiceHealthCheckJob { +pub fn check_fn(listen_url: &Url, binding: &SocketAddr) -> ServiceHealthCheckJob { let url = format!("http://{binding}/health_check"); // DevSkim: ignore DS137138 let info = format!("checking http tracker health check at: {url}"); @@ -232,7 +238,7 @@ pub fn check_fn(binding: &SocketAddr) -> ServiceHealthCheckJob { } }); - ServiceHealthCheckJob::new(*binding, info, TYPE_STRING.to_string(), job) + ServiceHealthCheckJob::new(listen_url.clone(), *binding, info, TYPE_STRING.to_string(), job) } #[cfg(test)] diff --git a/packages/axum-rest-tracker-api-server/Cargo.toml b/packages/axum-rest-tracker-api-server/Cargo.toml index 9c0d2bc2f..42fe68584 100644 --- a/packages/axum-rest-tracker-api-server/Cargo.toml +++ b/packages/axum-rest-tracker-api-server/Cargo.toml @@ -42,6 +42,7 @@ torrust-udp-tracker-server = { version = "3.0.0-develop", path = "../udp-tracker tower = { version = "0", features = ["timeout"] } tower-http = { version = "0", features = ["compression-full", "cors", "propagate-header", "request-id", "trace"] } tracing = "0" +url = "2" [dev-dependencies] local-ip-address = "0" diff --git a/packages/axum-rest-tracker-api-server/src/server.rs b/packages/axum-rest-tracker-api-server/src/server.rs index 4d41347ab..20775dbc1 100644 --- a/packages/axum-rest-tracker-api-server/src/server.rs +++ b/packages/axum-rest-tracker-api-server/src/server.rs @@ -41,6 +41,7 @@ use torrust_server_lib::registar::{ServiceHealthCheckJob, ServiceRegistration, S use torrust_server_lib::signals::{Halted, Started}; use torrust_tracker_configuration::AccessTokens; use tracing::{instrument, Level}; +use url::Url; use super::routes::router; use crate::API_LOG_TARGET; @@ -148,7 +149,7 @@ impl ApiServer { let api_server = match rx_start.await { Ok(started) => { - form.send(ServiceRegistration::new(started.address, check_fn)) + form.send(ServiceRegistration::new(started.listen_url, started.address, check_fn)) .expect("it should be able to send service registration"); ApiServer { @@ -195,7 +196,7 @@ impl ApiServer { /// Or if there request returns an error code. #[must_use] #[instrument(skip())] -pub fn check_fn(binding: &SocketAddr) -> ServiceHealthCheckJob { +pub fn check_fn(listen_url: &Url, binding: &SocketAddr) -> ServiceHealthCheckJob { let url = format!("http://{binding}/api/health_check"); // DevSkim: ignore DS137138 let info = format!("checking api health check at: {url}"); @@ -206,7 +207,7 @@ pub fn check_fn(binding: &SocketAddr) -> ServiceHealthCheckJob { Err(err) => Err(err.to_string()), } }); - ServiceHealthCheckJob::new(*binding, info, TYPE_STRING.to_string(), job) + ServiceHealthCheckJob::new(listen_url.clone(), *binding, info, TYPE_STRING.to_string(), job) } /// A struct responsible for starting the API server. @@ -260,8 +261,10 @@ impl Launcher { let tls = self.tls.clone(); let protocol = if tls.is_some() { "https" } else { "http" }; + let listen_url = + Url::parse(&format!("{protocol}://{address}")).expect("Could not parse internal service url for tracker API."); - tracing::info!(target: API_LOG_TARGET, "Starting on {protocol}://{}", address); + tracing::info!(target: API_LOG_TARGET, "Starting on: {protocol}://{address}"); let running = Box::pin(async { match tls { @@ -282,10 +285,10 @@ impl Launcher { } }); - tracing::info!(target: API_LOG_TARGET, "{STARTED_ON} {protocol}://{}", address); + tracing::info!(target: API_LOG_TARGET, "{STARTED_ON}: {protocol}://{}", address); tx_start - .send(Started { address }) + .send(Started { listen_url, address }) .expect("the HTTP(s) Tracker API service should not be dropped"); running diff --git a/packages/server-lib/Cargo.toml b/packages/server-lib/Cargo.toml index b8514fbf4..514828953 100644 --- a/packages/server-lib/Cargo.toml +++ b/packages/server-lib/Cargo.toml @@ -18,5 +18,6 @@ derive_more = { version = "2", features = ["as_ref", "constructor", "from"] } tokio = { version = "1", features = ["macros", "net", "rt-multi-thread", "signal", "sync"] } tower-http = { version = "0", features = ["compression-full", "cors", "propagate-header", "request-id", "trace"] } tracing = "0" +url = "2.5.4" [dev-dependencies] diff --git a/packages/server-lib/src/logging.rs b/packages/server-lib/src/logging.rs index c503cfd35..c63ba3caf 100644 --- a/packages/server-lib/src/logging.rs +++ b/packages/server-lib/src/logging.rs @@ -10,7 +10,7 @@ use tower_http::LatencyUnit; /// ```text /// 2024-06-25T12:36:25.025312Z INFO UDP TRACKER: Started on: udp://0.0.0.0:6969 /// 2024-06-25T12:36:25.025445Z INFO HTTP TRACKER: Started on: http://0.0.0.0:7070 -/// 2024-06-25T12:36:25.025527Z INFO API: Started on http://0.0.0.0:1212 +/// 2024-06-25T12:36:25.025527Z INFO API: Started on: http://0.0.0.0:1212 /// 2024-06-25T12:36:25.025580Z INFO HEALTH CHECK API: Started on: http://127.0.0.1:1313 /// ``` pub const STARTED_ON: &str = "Started on"; diff --git a/packages/server-lib/src/registar.rs b/packages/server-lib/src/registar.rs index 0a0e9ada0..9d36cf1fe 100644 --- a/packages/server-lib/src/registar.rs +++ b/packages/server-lib/src/registar.rs @@ -7,6 +7,7 @@ use std::sync::Arc; use derive_more::Constructor; use tokio::sync::Mutex; use tokio::task::JoinHandle; +use url::Url; /// A [`ServiceHeathCheckResult`] is returned by a completed health check. pub type ServiceHeathCheckResult = Result; @@ -16,6 +17,7 @@ pub type ServiceHeathCheckResult = Result; /// The `job` awaits a [`ServiceHeathCheckResult`]. #[derive(Debug, Constructor)] pub struct ServiceHealthCheckJob { + pub listen_url: Url, pub binding: SocketAddr, pub info: String, pub service_type: String, @@ -25,13 +27,14 @@ pub struct ServiceHealthCheckJob { /// The function specification [`FnSpawnServiceHeathCheck`]. /// /// A function fulfilling this specification will spawn a new [`ServiceHealthCheckJob`]. -pub type FnSpawnServiceHeathCheck = fn(&SocketAddr) -> ServiceHealthCheckJob; +pub type FnSpawnServiceHeathCheck = fn(&Url, &SocketAddr) -> ServiceHealthCheckJob; /// A [`ServiceRegistration`] is provided to the [`Registar`] for registration. /// /// Each registration includes a function that fulfils the [`FnSpawnServiceHeathCheck`] specification. #[derive(Clone, Debug, Constructor)] pub struct ServiceRegistration { + listen_url: Url, binding: SocketAddr, check_fn: FnSpawnServiceHeathCheck, } @@ -39,7 +42,7 @@ pub struct ServiceRegistration { impl ServiceRegistration { #[must_use] pub fn spawn_check(&self) -> ServiceHealthCheckJob { - (self.check_fn)(&self.binding) + (self.check_fn)(&self.listen_url, &self.binding) } } diff --git a/packages/server-lib/src/signals.rs b/packages/server-lib/src/signals.rs index 63f7554c8..94ee474ea 100644 --- a/packages/server-lib/src/signals.rs +++ b/packages/server-lib/src/signals.rs @@ -1,12 +1,14 @@ //! This module contains functions to handle signals. use derive_more::Display; use tracing::instrument; +use url::Url; /// This is the message that the "launcher" spawned task sends to the main /// application process to notify the service was successfully started. /// #[derive(Debug)] pub struct Started { + pub listen_url: Url, pub address: std::net::SocketAddr, } diff --git a/packages/udp-tracker-server/src/server/launcher.rs b/packages/udp-tracker-server/src/server/launcher.rs index 6473f300b..67e2ceed6 100644 --- a/packages/udp-tracker-server/src/server/launcher.rs +++ b/packages/udp-tracker-server/src/server/launcher.rs @@ -14,6 +14,7 @@ use torrust_server_lib::logging::STARTED_ON; use torrust_server_lib::registar::ServiceHealthCheckJob; use torrust_server_lib::signals::{shutdown_signal_with_message, Halted, Started}; use tracing::instrument; +use url::Url; use super::request_buffer::ActiveRequests; use crate::container::UdpTrackerServerContainer; @@ -65,6 +66,7 @@ impl Launcher { } }; + let listen_url = bound_socket.url().clone(); let address = bound_socket.address(); let local_udp_url = bound_socket.url().to_string(); @@ -89,7 +91,7 @@ impl Launcher { }; tx_start - .send(Started { address }) + .send(Started { listen_url, address }) .expect("the UDP Tracker service should not be dropped"); tracing::debug!(target: UDP_TRACKER_LOG_TARGET, local_udp_url, "Udp::run_with_graceful_shutdown (started)"); @@ -112,13 +114,13 @@ impl Launcher { #[must_use] #[instrument(skip(binding))] - pub fn check(binding: &SocketAddr) -> ServiceHealthCheckJob { + pub fn check(listen_url: &Url, binding: &SocketAddr) -> ServiceHealthCheckJob { let binding = *binding; let info = format!("checking the udp tracker health check at: {binding}"); let job = tokio::spawn(async move { check(&binding).await }); - ServiceHealthCheckJob::new(binding, info, TYPE_STRING.to_string(), job) + ServiceHealthCheckJob::new(listen_url.clone(), binding, info, TYPE_STRING.to_string(), job) } #[instrument(skip(receiver, udp_tracker_core_container, udp_tracker_server_container))] diff --git a/packages/udp-tracker-server/src/server/states.rs b/packages/udp-tracker-server/src/server/states.rs index 4d1c97167..f10a02fb7 100644 --- a/packages/udp-tracker-server/src/server/states.rs +++ b/packages/udp-tracker-server/src/server/states.rs @@ -83,9 +83,12 @@ impl Server { rx_halt, ); - let local_addr = rx_start.await.expect("it should be able to start the service").address; + let started = rx_start.await.expect("it should be able to start the service"); - form.send(ServiceRegistration::new(local_addr, Launcher::check)) + let listen_url = started.listen_url; + let local_addr = started.address; + + form.send(ServiceRegistration::new(listen_url, local_addr, Launcher::check)) .expect("it should be able to send service registration"); let running_udp_server: Server = Server { diff --git a/src/console/ci/e2e/logs_parser.rs b/src/console/ci/e2e/logs_parser.rs index c406fa7a5..e8b6b3b8f 100644 --- a/src/console/ci/e2e/logs_parser.rs +++ b/src/console/ci/e2e/logs_parser.rs @@ -31,8 +31,8 @@ impl RunningServices { /// 2024-06-10T16:07:39.990303Z INFO HTTP TRACKER: Starting on: http://0.0.0.0:7070 /// 2024-06-10T16:07:39.990439Z INFO HTTP TRACKER: Started on: http://0.0.0.0:7070 /// 2024-06-10T16:07:39.990448Z INFO torrust_tracker::bootstrap::jobs: TLS not enabled - /// 2024-06-10T16:07:39.990563Z INFO API: Starting on http://127.0.0.1:1212 - /// 2024-06-10T16:07:39.990565Z INFO API: Started on http://127.0.0.1:1212 + /// 2024-06-10T16:07:39.990563Z INFO API: Starting on: http://127.0.0.1:1212 + /// 2024-06-10T16:07:39.990565Z INFO API: Started on: http://127.0.0.1:1212 /// 2024-06-10T16:07:39.990577Z INFO HEALTH CHECK API: Starting on: http://127.0.0.1:1313 /// 2024-06-10T16:07:39.990638Z INFO HEALTH CHECK API: Started on: http://127.0.0.1:1313 /// ``` @@ -122,8 +122,8 @@ mod tests { 2024-06-10T16:07:39.990303Z INFO HTTP TRACKER: Starting on: http://0.0.0.0:7070 2024-06-10T16:07:39.990439Z INFO HTTP TRACKER: Started on: http://0.0.0.0:7070 2024-06-10T16:07:39.990448Z INFO torrust_tracker::bootstrap::jobs: TLS not enabled - 2024-06-10T16:07:39.990563Z INFO API: Starting on http://127.0.0.1:1212 - 2024-06-10T16:07:39.990565Z INFO API: Started on http://127.0.0.1:1212 + 2024-06-10T16:07:39.990563Z INFO API: Starting on: http://127.0.0.1:1212 + 2024-06-10T16:07:39.990565Z INFO API: Started on: http://127.0.0.1:1212 2024-06-10T16:07:39.990577Z INFO HEALTH CHECK API: Starting on: http://127.0.0.1:1313 2024-06-10T16:07:39.990638Z INFO HEALTH CHECK API: Started on: http://127.0.0.1:1313 "; From 376c1df2d63e7f29ba17d23f10d763a68918fa6f Mon Sep 17 00:00:00 2001 From: Jose Celano Date: Fri, 28 Mar 2025 12:24:45 +0000 Subject: [PATCH 431/802] fix: missing feature in package cargo config --- packages/server-lib/Cargo.toml | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/packages/server-lib/Cargo.toml b/packages/server-lib/Cargo.toml index 514828953..25a63e99d 100644 --- a/packages/server-lib/Cargo.toml +++ b/packages/server-lib/Cargo.toml @@ -14,7 +14,7 @@ rust-version.workspace = true version.workspace = true [dependencies] -derive_more = { version = "2", features = ["as_ref", "constructor", "from"] } +derive_more = { version = "2", features = ["as_ref", "constructor", "display", "from"] } tokio = { version = "1", features = ["macros", "net", "rt-multi-thread", "signal", "sync"] } tower-http = { version = "0", features = ["compression-full", "cors", "propagate-header", "request-id", "trace"] } tracing = "0" From 29fa324417201eb6a8ec0103e7fb200ca46670d2 Mon Sep 17 00:00:00 2001 From: Jose Celano Date: Fri, 28 Mar 2025 17:12:01 +0000 Subject: [PATCH 432/802] refactor: [#1422] extract type ServiceBinding Refactor: extract type `ServiceBinding`. It's the protocol + socket binding address for a service. - Protocols are only: udp, http and https (protocols used by the tracker now). - The port must be greater than zero (already assigned). - Any protocol and port combination is possible. For example: http protocol with a port different than 80. --- Cargo.lock | 6 +- .../axum-health-check-api-server/Cargo.toml | 1 + .../src/handlers.rs | 4 +- .../src/resources.rs | 2 +- .../src/server.rs | 14 +- .../axum-http-tracker-server/src/server.rs | 22 +- .../src/server.rs | 20 +- packages/primitives/Cargo.toml | 4 + packages/primitives/src/lib.rs | 1 + packages/primitives/src/service_binding.rs | 188 ++++++++++++++++++ packages/server-lib/Cargo.toml | 3 +- packages/server-lib/src/registar.rs | 17 +- packages/server-lib/src/signals.rs | 4 +- packages/tracker-client/src/udp/client.rs | 7 +- .../src/server/bound_socket.rs | 10 + .../udp-tracker-server/src/server/launcher.rs | 22 +- .../udp-tracker-server/src/server/states.rs | 4 +- 17 files changed, 274 insertions(+), 55 deletions(-) create mode 100644 packages/primitives/src/service_binding.rs diff --git a/Cargo.lock b/Cargo.lock index c2d68639a..328e2db93 100644 --- a/Cargo.lock +++ b/Cargo.lock @@ -4375,6 +4375,7 @@ dependencies = [ "torrust-server-lib", "torrust-tracker-clock", "torrust-tracker-configuration", + "torrust-tracker-primitives", "torrust-tracker-test-helpers", "torrust-udp-tracker-server", "tower-http", @@ -4510,10 +4511,11 @@ name = "torrust-server-lib" version = "3.0.0-develop" dependencies = [ "derive_more", + "rstest", "tokio", + "torrust-tracker-primitives", "tower-http", "tracing", - "url", ] [[package]] @@ -4632,11 +4634,13 @@ dependencies = [ "binascii", "bittorrent-primitives", "derive_more", + "rstest", "serde", "tdyne-peer-id", "tdyne-peer-id-registry", "thiserror 2.0.12", "torrust-tracker-configuration", + "url", "zerocopy 0.7.35", ] diff --git a/packages/axum-health-check-api-server/Cargo.toml b/packages/axum-health-check-api-server/Cargo.toml index 6766ce587..e0504f7df 100644 --- a/packages/axum-health-check-api-server/Cargo.toml +++ b/packages/axum-health-check-api-server/Cargo.toml @@ -24,6 +24,7 @@ tokio = { version = "1", features = ["macros", "net", "rt-multi-thread", "signal torrust-axum-server = { version = "3.0.0-develop", path = "../axum-server" } torrust-server-lib = { version = "3.0.0-develop", path = "../server-lib" } torrust-tracker-configuration = { version = "3.0.0-develop", path = "../configuration" } +torrust-tracker-primitives = { version = "3.0.0-develop", path = "../primitives" } tower-http = { version = "0", features = ["compression-full", "cors", "propagate-header", "request-id", "trace"] } tracing = "0" url = "2.5.4" diff --git a/packages/axum-health-check-api-server/src/handlers.rs b/packages/axum-health-check-api-server/src/handlers.rs index 66390f089..a26c901d7 100644 --- a/packages/axum-health-check-api-server/src/handlers.rs +++ b/packages/axum-health-check-api-server/src/handlers.rs @@ -31,8 +31,8 @@ pub(crate) async fn health_check_handler(State(register): State let jobs = checks.drain(..).map(|c| { tokio::spawn(async move { CheckReport { - listen_url: c.listen_url.clone(), - binding: c.binding, + service_binding: c.service_binding.url(), + binding: c.service_binding.bind_address(), info: c.info.clone(), service_type: c.service_type, result: c.job.await.expect("it should be able to join into the checking function"), diff --git a/packages/axum-health-check-api-server/src/resources.rs b/packages/axum-health-check-api-server/src/resources.rs index 24079b00f..44e64b24c 100644 --- a/packages/axum-health-check-api-server/src/resources.rs +++ b/packages/axum-health-check-api-server/src/resources.rs @@ -12,7 +12,7 @@ pub enum Status { #[derive(Clone, Serialize, Deserialize, Debug, PartialEq, Eq)] pub struct CheckReport { - pub listen_url: Url, + pub service_binding: Url, pub binding: SocketAddr, pub service_type: String, pub info: String, diff --git a/packages/axum-health-check-api-server/src/server.rs b/packages/axum-health-check-api-server/src/server.rs index cc721f5eb..8b37f1828 100644 --- a/packages/axum-health-check-api-server/src/server.rs +++ b/packages/axum-health-check-api-server/src/server.rs @@ -18,6 +18,7 @@ use torrust_axum_server::signals::graceful_shutdown; use torrust_server_lib::logging::Latency; use torrust_server_lib::registar::ServiceRegistry; use torrust_server_lib::signals::{Halted, Started}; +use torrust_tracker_primitives::service_binding::{Protocol, ServiceBinding}; use tower_http::classify::ServerErrorsFailureClass; use tower_http::compression::CompressionLayer; use tower_http::propagate_header::PropagateHeaderLayer; @@ -25,7 +26,6 @@ use tower_http::request_id::{MakeRequestUuid, SetRequestIdLayer}; use tower_http::trace::{DefaultMakeSpan, TraceLayer}; use tower_http::LatencyUnit; use tracing::{instrument, Level, Span}; -use url::Url; use crate::handlers::health_check_handler; use crate::HEALTH_CHECK_API_LOG_TARGET; @@ -102,9 +102,8 @@ pub fn start( let socket = std::net::TcpListener::bind(bind_to).expect("Could not bind tcp_listener to address."); let address = socket.local_addr().expect("Could not get local_addr from tcp_listener."); - let protocol = "http"; // The health check API only supports HTTP directly now. Use a reverse proxy for HTTPS. - let listen_url = - Url::parse(&format!("{protocol}://{address}")).expect("Could not parse internal service url for health check API."); + let protocol = Protocol::HTTP; // The health check API only supports HTTP directly now. Use a reverse proxy for HTTPS. + let service_binding = ServiceBinding::new(protocol.clone(), address).expect("Service binding creation failed"); let handle = Handle::new(); @@ -120,8 +119,11 @@ pub fn start( .handle(handle) .serve(router.into_make_service_with_connect_info::()); - tx.send(Started { listen_url, address }) - .expect("the Health Check API server should not be dropped"); + tx.send(Started { + service_binding, + address, + }) + .expect("the Health Check API server should not be dropped"); running } diff --git a/packages/axum-http-tracker-server/src/server.rs b/packages/axum-http-tracker-server/src/server.rs index eefb124e8..610f70020 100644 --- a/packages/axum-http-tracker-server/src/server.rs +++ b/packages/axum-http-tracker-server/src/server.rs @@ -7,13 +7,13 @@ use axum_server::Handle; use bittorrent_http_tracker_core::container::HttpTrackerCoreContainer; use derive_more::Constructor; use futures::future::BoxFuture; -use reqwest::Url; use tokio::sync::oneshot::{Receiver, Sender}; use torrust_axum_server::custom_axum_server::{self, TimeoutAcceptor}; use torrust_axum_server::signals::graceful_shutdown; use torrust_server_lib::logging::STARTED_ON; use torrust_server_lib::registar::{ServiceHealthCheckJob, ServiceRegistration, ServiceRegistrationForm}; use torrust_server_lib::signals::{Halted, Started}; +use torrust_tracker_primitives::service_binding::{Protocol, ServiceBinding}; use tracing::instrument; use super::v1::routes::router; @@ -63,9 +63,8 @@ impl Launcher { )); let tls = self.tls.clone(); - let protocol = if tls.is_some() { "https" } else { "http" }; - let listen_url = - Url::parse(&format!("{protocol}://{address}")).expect("Could not parse internal service url for HTTP tracker."); + let protocol = if tls.is_some() { Protocol::HTTPS } else { Protocol::HTTP }; + let service_binding = ServiceBinding::new(protocol.clone(), address).expect("Service binding creation failed"); tracing::info!(target: HTTP_TRACKER_LOG_TARGET, "Starting on: {protocol}://{address}"); @@ -93,7 +92,10 @@ impl Launcher { tracing::info!(target: HTTP_TRACKER_LOG_TARGET, "{STARTED_ON}: {protocol}://{}", address); tx_start - .send(Started { listen_url, address }) + .send(Started { + service_binding, + address, + }) .expect("the HTTP(s) Tracker service should not be dropped"); running @@ -182,10 +184,10 @@ impl HttpServer { let started = rx_start.await.expect("it should be able to start the service"); - let listen_url = started.listen_url; + let listen_url = started.service_binding; let binding = started.address; - form.send(ServiceRegistration::new(listen_url, binding, check_fn)) + form.send(ServiceRegistration::new(listen_url, check_fn)) .expect("it should be able to send service registration"); Ok(HttpServer { @@ -226,8 +228,8 @@ impl HttpServer { /// This function will return an error if unable to connect. /// Or if the request returns an error. #[must_use] -pub fn check_fn(listen_url: &Url, binding: &SocketAddr) -> ServiceHealthCheckJob { - let url = format!("http://{binding}/health_check"); // DevSkim: ignore DS137138 +pub fn check_fn(service_binding: &ServiceBinding) -> ServiceHealthCheckJob { + let url = format!("http://{}/health_check", service_binding.bind_address()); // DevSkim: ignore DS137138 let info = format!("checking http tracker health check at: {url}"); @@ -238,7 +240,7 @@ pub fn check_fn(listen_url: &Url, binding: &SocketAddr) -> ServiceHealthCheckJob } }); - ServiceHealthCheckJob::new(listen_url.clone(), *binding, info, TYPE_STRING.to_string(), job) + ServiceHealthCheckJob::new(service_binding.clone(), info, TYPE_STRING.to_string(), job) } #[cfg(test)] diff --git a/packages/axum-rest-tracker-api-server/src/server.rs b/packages/axum-rest-tracker-api-server/src/server.rs index 20775dbc1..cbd4948ff 100644 --- a/packages/axum-rest-tracker-api-server/src/server.rs +++ b/packages/axum-rest-tracker-api-server/src/server.rs @@ -40,8 +40,8 @@ use torrust_server_lib::logging::STARTED_ON; use torrust_server_lib::registar::{ServiceHealthCheckJob, ServiceRegistration, ServiceRegistrationForm}; use torrust_server_lib::signals::{Halted, Started}; use torrust_tracker_configuration::AccessTokens; +use torrust_tracker_primitives::service_binding::{Protocol, ServiceBinding}; use tracing::{instrument, Level}; -use url::Url; use super::routes::router; use crate::API_LOG_TARGET; @@ -149,7 +149,7 @@ impl ApiServer { let api_server = match rx_start.await { Ok(started) => { - form.send(ServiceRegistration::new(started.listen_url, started.address, check_fn)) + form.send(ServiceRegistration::new(started.service_binding, check_fn)) .expect("it should be able to send service registration"); ApiServer { @@ -196,8 +196,8 @@ impl ApiServer { /// Or if there request returns an error code. #[must_use] #[instrument(skip())] -pub fn check_fn(listen_url: &Url, binding: &SocketAddr) -> ServiceHealthCheckJob { - let url = format!("http://{binding}/api/health_check"); // DevSkim: ignore DS137138 +pub fn check_fn(service_binding: &ServiceBinding) -> ServiceHealthCheckJob { + let url = format!("http://{}/api/health_check", service_binding.bind_address()); // DevSkim: ignore DS137138 let info = format!("checking api health check at: {url}"); @@ -207,7 +207,7 @@ pub fn check_fn(listen_url: &Url, binding: &SocketAddr) -> ServiceHealthCheckJob Err(err) => Err(err.to_string()), } }); - ServiceHealthCheckJob::new(listen_url.clone(), *binding, info, TYPE_STRING.to_string(), job) + ServiceHealthCheckJob::new(service_binding.clone(), info, TYPE_STRING.to_string(), job) } /// A struct responsible for starting the API server. @@ -260,9 +260,8 @@ impl Launcher { )); let tls = self.tls.clone(); - let protocol = if tls.is_some() { "https" } else { "http" }; - let listen_url = - Url::parse(&format!("{protocol}://{address}")).expect("Could not parse internal service url for tracker API."); + let protocol = if tls.is_some() { Protocol::HTTPS } else { Protocol::HTTP }; + let service_binding = ServiceBinding::new(protocol.clone(), address).expect("Service binding creation failed"); tracing::info!(target: API_LOG_TARGET, "Starting on: {protocol}://{address}"); @@ -288,7 +287,10 @@ impl Launcher { tracing::info!(target: API_LOG_TARGET, "{STARTED_ON}: {protocol}://{}", address); tx_start - .send(Started { listen_url, address }) + .send(Started { + service_binding, + address, + }) .expect("the HTTP(s) Tracker API service should not be dropped"); running diff --git a/packages/primitives/Cargo.toml b/packages/primitives/Cargo.toml index 1396d8bc8..21fab09bf 100644 --- a/packages/primitives/Cargo.toml +++ b/packages/primitives/Cargo.toml @@ -24,4 +24,8 @@ tdyne-peer-id = "1" tdyne-peer-id-registry = "0" thiserror = "2" torrust-tracker-configuration = { version = "3.0.0-develop", path = "../configuration" } +url = "2.5.4" zerocopy = "0.7" + +[dev-dependencies] +rstest = "0.25.0" diff --git a/packages/primitives/src/lib.rs b/packages/primitives/src/lib.rs index b50516893..c901e5276 100644 --- a/packages/primitives/src/lib.rs +++ b/packages/primitives/src/lib.rs @@ -7,6 +7,7 @@ pub mod core; pub mod pagination; pub mod peer; +pub mod service_binding; pub mod swarm_metadata; use std::collections::BTreeMap; diff --git a/packages/primitives/src/service_binding.rs b/packages/primitives/src/service_binding.rs new file mode 100644 index 000000000..dbbb32fd5 --- /dev/null +++ b/packages/primitives/src/service_binding.rs @@ -0,0 +1,188 @@ +use std::fmt; +use std::net::SocketAddr; + +use serde::{Deserialize, Serialize}; +use url::Url; + +/// Represents the supported network protocols. +#[derive(Debug, Clone, PartialEq, Eq, Serialize, Deserialize, Hash)] +pub enum Protocol { + UDP, + HTTP, + HTTPS, +} + +impl fmt::Display for Protocol { + fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result { + let proto_str = match self { + Protocol::UDP => "udp", + Protocol::HTTP => "http", + Protocol::HTTPS => "https", + }; + write!(f, "{proto_str}") + } +} + +#[derive(thiserror::Error, Debug, Clone)] +pub enum Error { + #[error("The port number cannot be zero. It must be an assigned valid port.")] + PortZeroNotAllowed, +} + +/// Represents a network service binding, encapsulating protocol and socket +/// address. +/// +/// This struct is used to define how a service binds to a network interface and +/// port. +/// +/// It's an URL without path and some restrictions: +/// +/// - Only some schemes are accepted: `udp`, `http`, `https`. +/// - The port number must be greater than zero. The service should be already +/// listening on that port. +/// - The authority part of the URL must be a valid socket address (wildcard is +/// accepted). +/// +/// Besides it accepts some non well-formed URLs, like: +/// or . Those URLs are not valid because they use non +/// standard ports (80 and 443). +/// +/// NOTICE: It does not represent a public valid URL clients can connect to. It +/// represents the service's internal URL configuration after assigning a port. +/// If the port in the configuration is not zero, it's basically the same +/// information you get from the configuration (binding address + protocol). +/// +/// # Examples +/// +/// ``` +/// use std::net::{IpAddr, Ipv4Addr, SocketAddr}; +/// use torrust_tracker_primitives::service_binding::{ServiceBinding, Protocol}; +/// +/// let service_binding = ServiceBinding::new(Protocol::HTTP, SocketAddr::new(IpAddr::V4(Ipv4Addr::new(127, 0, 0, 1)), 7070)).unwrap(); +/// +/// assert_eq!(service_binding.url().to_string(), "http://127.0.0.1:7070/".to_string()); +/// ``` +#[derive(Debug, Clone, PartialEq, Eq, Serialize, Deserialize, Hash)] +pub struct ServiceBinding { + /// The network protocol used by the service (UDP, HTTP, HTTPS). + protocol: Protocol, + + /// The socket address (IP and port) to which the service binds. + bind_address: SocketAddr, +} + +impl ServiceBinding { + /// # Errors + /// + /// This function will return an error if the port number is zero. + pub fn new(protocol: Protocol, bind_address: SocketAddr) -> Result { + if bind_address.port() == 0 { + return Err(Error::PortZeroNotAllowed); + } + + Ok(Self { protocol, bind_address }) + } + + #[must_use] + pub fn bind_address(&self) -> SocketAddr { + self.bind_address + } + + /// # Panics + /// + /// It never panics because the URL is always valid. + #[must_use] + pub fn url(&self) -> Url { + Url::parse(&format!("{}://{}", self.protocol, self.bind_address)) + .expect("Service binding can always be parsed into a URL") + } +} + +impl From for Url { + fn from(service_binding: ServiceBinding) -> Self { + service_binding.url() + } +} + +impl fmt::Display for ServiceBinding { + fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result { + write!(f, "{}", self.url()) + } +} + +#[cfg(test)] +mod tests { + + mod the_service_binding { + use std::net::SocketAddr; + use std::str::FromStr; + + use rstest::rstest; + use url::Url; + + use crate::service_binding::{Error, Protocol, ServiceBinding}; + + #[rstest] + #[case("wildcard_ip", Protocol::UDP, SocketAddr::from_str("0.0.0.0:6969").unwrap())] + #[case("udp_service", Protocol::UDP, SocketAddr::from_str("127.0.0.1:6969").unwrap())] + #[case("http_service", Protocol::HTTP, SocketAddr::from_str("127.0.0.1:7070").unwrap())] + #[case("https_service", Protocol::HTTPS, SocketAddr::from_str("127.0.0.1:7070").unwrap())] + fn should_allow_a_subset_of_urls(#[case] case: &str, #[case] protocol: Protocol, #[case] bind_address: SocketAddr) { + let service_binding = ServiceBinding::new(protocol.clone(), bind_address); + + assert!(service_binding.is_ok(), "{}", format!("{case} failed: {service_binding:?}")); + } + + #[test] + fn should_not_allow_undefined_port_zero() { + let service_binding = ServiceBinding::new(Protocol::UDP, SocketAddr::from_str("127.0.0.1:0").unwrap()); + + assert!(matches!(service_binding, Err(Error::PortZeroNotAllowed))); + } + + #[test] + fn should_return_the_bind_address() { + let service_binding = ServiceBinding::new(Protocol::UDP, SocketAddr::from_str("127.0.0.1:6969").unwrap()).unwrap(); + + assert_eq!( + service_binding.bind_address(), + SocketAddr::from_str("127.0.0.1:6969").unwrap() + ); + } + + #[test] + fn should_return_the_corresponding_url() { + let service_binding = ServiceBinding::new(Protocol::UDP, SocketAddr::from_str("127.0.0.1:6969").unwrap()).unwrap(); + + assert_eq!(service_binding.url(), Url::parse("udp://127.0.0.1:6969").unwrap()); + } + + #[test] + fn should_be_converted_into_an_url() { + let service_binding = ServiceBinding::new(Protocol::UDP, SocketAddr::from_str("127.0.0.1:6969").unwrap()).unwrap(); + + let url: Url = service_binding.clone().into(); + + assert_eq!(url, Url::parse("udp://127.0.0.1:6969").unwrap()); + } + + #[rstest] + #[case("udp_service", Protocol::UDP, SocketAddr::from_str("127.0.0.1:6969").unwrap(), "udp://127.0.0.1:6969")] + #[case("http_service", Protocol::HTTP, SocketAddr::from_str("127.0.0.1:7070").unwrap(), "http://127.0.0.1:7070/")] + #[case("https_service", Protocol::HTTPS, SocketAddr::from_str("127.0.0.1:7070").unwrap(), "https://127.0.0.1:7070/")] + fn should_always_have_a_corresponding_unique_url( + #[case] case: &str, + #[case] protocol: Protocol, + #[case] bind_address: SocketAddr, + #[case] expected_url: String, + ) { + let service_binding = ServiceBinding::new(protocol.clone(), bind_address).unwrap(); + + assert_eq!( + service_binding.url().to_string(), + expected_url, + "{case} failed: {service_binding:?}", + ); + } + } +} diff --git a/packages/server-lib/Cargo.toml b/packages/server-lib/Cargo.toml index 25a63e99d..1d30e7fb5 100644 --- a/packages/server-lib/Cargo.toml +++ b/packages/server-lib/Cargo.toml @@ -16,8 +16,9 @@ version.workspace = true [dependencies] derive_more = { version = "2", features = ["as_ref", "constructor", "display", "from"] } tokio = { version = "1", features = ["macros", "net", "rt-multi-thread", "signal", "sync"] } +torrust-tracker-primitives = { version = "3.0.0-develop", path = "../primitives" } tower-http = { version = "0", features = ["compression-full", "cors", "propagate-header", "request-id", "trace"] } tracing = "0" -url = "2.5.4" [dev-dependencies] +rstest = "0.25.0" diff --git a/packages/server-lib/src/registar.rs b/packages/server-lib/src/registar.rs index 9d36cf1fe..efa94034b 100644 --- a/packages/server-lib/src/registar.rs +++ b/packages/server-lib/src/registar.rs @@ -1,13 +1,12 @@ //! Registar. Registers Services for Health Check. use std::collections::HashMap; -use std::net::SocketAddr; use std::sync::Arc; use derive_more::Constructor; use tokio::sync::Mutex; use tokio::task::JoinHandle; -use url::Url; +use torrust_tracker_primitives::service_binding::ServiceBinding; /// A [`ServiceHeathCheckResult`] is returned by a completed health check. pub type ServiceHeathCheckResult = Result; @@ -17,8 +16,7 @@ pub type ServiceHeathCheckResult = Result; /// The `job` awaits a [`ServiceHeathCheckResult`]. #[derive(Debug, Constructor)] pub struct ServiceHealthCheckJob { - pub listen_url: Url, - pub binding: SocketAddr, + pub service_binding: ServiceBinding, pub info: String, pub service_type: String, pub job: JoinHandle, @@ -27,22 +25,21 @@ pub struct ServiceHealthCheckJob { /// The function specification [`FnSpawnServiceHeathCheck`]. /// /// A function fulfilling this specification will spawn a new [`ServiceHealthCheckJob`]. -pub type FnSpawnServiceHeathCheck = fn(&Url, &SocketAddr) -> ServiceHealthCheckJob; +pub type FnSpawnServiceHeathCheck = fn(&ServiceBinding) -> ServiceHealthCheckJob; /// A [`ServiceRegistration`] is provided to the [`Registar`] for registration. /// /// Each registration includes a function that fulfils the [`FnSpawnServiceHeathCheck`] specification. #[derive(Clone, Debug, Constructor)] pub struct ServiceRegistration { - listen_url: Url, - binding: SocketAddr, + service_binding: ServiceBinding, check_fn: FnSpawnServiceHeathCheck, } impl ServiceRegistration { #[must_use] pub fn spawn_check(&self) -> ServiceHealthCheckJob { - (self.check_fn)(&self.listen_url, &self.binding) + (self.check_fn)(&self.service_binding) } } @@ -50,7 +47,7 @@ impl ServiceRegistration { pub type ServiceRegistrationForm = tokio::sync::oneshot::Sender; /// The [`ServiceRegistry`] contains each unique [`ServiceRegistration`] by it's [`SocketAddr`]. -pub type ServiceRegistry = Arc>>; +pub type ServiceRegistry = Arc>>; /// The [`Registar`] manages the [`ServiceRegistry`]. #[derive(Clone, Debug)] @@ -93,7 +90,7 @@ impl Registar { let mut mutex = self.registry.lock().await; - mutex.insert(service_registration.binding, service_registration); + mutex.insert(service_registration.service_binding.clone(), service_registration); } /// Returns the [`ServiceRegistry`] of services diff --git a/packages/server-lib/src/signals.rs b/packages/server-lib/src/signals.rs index 94ee474ea..581729e57 100644 --- a/packages/server-lib/src/signals.rs +++ b/packages/server-lib/src/signals.rs @@ -1,14 +1,14 @@ //! This module contains functions to handle signals. use derive_more::Display; +use torrust_tracker_primitives::service_binding::ServiceBinding; use tracing::instrument; -use url::Url; /// This is the message that the "launcher" spawned task sends to the main /// application process to notify the service was successfully started. /// #[derive(Debug)] pub struct Started { - pub listen_url: Url, + pub service_binding: ServiceBinding, pub address: std::net::SocketAddr, } diff --git a/packages/tracker-client/src/udp/client.rs b/packages/tracker-client/src/udp/client.rs index 89a33726d..1c5ffd901 100644 --- a/packages/tracker-client/src/udp/client.rs +++ b/packages/tracker-client/src/udp/client.rs @@ -8,6 +8,7 @@ use aquatic_udp_protocol::{ConnectRequest, Request, Response, TransactionId}; use tokio::net::UdpSocket; use tokio::time; use torrust_tracker_configuration::DEFAULT_TIMEOUT; +use torrust_tracker_primitives::service_binding::ServiceBinding; use zerocopy::network_endian::I32; use super::Error; @@ -230,10 +231,12 @@ impl UdpTrackerClient { /// /// # Errors /// -pub async fn check(remote_addr: &SocketAddr) -> Result { +pub async fn check(service_binding: &ServiceBinding) -> Result { + let remote_addr = service_binding.bind_address(); + tracing::debug!("Checking Service (detail): {remote_addr:?}."); - match UdpTrackerClient::new(*remote_addr, DEFAULT_TIMEOUT).await { + match UdpTrackerClient::new(remote_addr, DEFAULT_TIMEOUT).await { Ok(client) => { let connect_request = ConnectRequest { transaction_id: TransactionId(I32::new(123)), diff --git a/packages/udp-tracker-server/src/server/bound_socket.rs b/packages/udp-tracker-server/src/server/bound_socket.rs index 988bfb67f..6b81545d2 100644 --- a/packages/udp-tracker-server/src/server/bound_socket.rs +++ b/packages/udp-tracker-server/src/server/bound_socket.rs @@ -3,6 +3,7 @@ use std::net::SocketAddr; use std::ops::Deref; use bittorrent_udp_tracker_core::UDP_TRACKER_LOG_TARGET; +use torrust_tracker_primitives::service_binding::{Protocol, ServiceBinding}; use url::Url; /// Wrapper for Tokio [`UdpSocket`][`tokio::net::UdpSocket`] that is bound to a particular socket. @@ -47,6 +48,15 @@ impl BoundSocket { pub fn url(&self) -> Url { Url::parse(&format!("udp://{}", self.address())).expect("UDP socket address should be valid") } + + /// # Panics + /// + /// It should never panic because the conversion to a [`ServiceBinding`] + /// is infallible. + #[must_use] + pub fn service_binding(&self) -> ServiceBinding { + ServiceBinding::new(Protocol::UDP, self.address()).expect("Conversion to ServiceBinding should not fail") + } } impl Deref for BoundSocket { diff --git a/packages/udp-tracker-server/src/server/launcher.rs b/packages/udp-tracker-server/src/server/launcher.rs index 67e2ceed6..fd689a96f 100644 --- a/packages/udp-tracker-server/src/server/launcher.rs +++ b/packages/udp-tracker-server/src/server/launcher.rs @@ -13,8 +13,8 @@ use tokio::time::interval; use torrust_server_lib::logging::STARTED_ON; use torrust_server_lib::registar::ServiceHealthCheckJob; use torrust_server_lib::signals::{shutdown_signal_with_message, Halted, Started}; +use torrust_tracker_primitives::service_binding::ServiceBinding; use tracing::instrument; -use url::Url; use super::request_buffer::ActiveRequests; use crate::container::UdpTrackerServerContainer; @@ -66,7 +66,7 @@ impl Launcher { } }; - let listen_url = bound_socket.url().clone(); + let service_binding = bound_socket.service_binding().clone(); let address = bound_socket.address(); let local_udp_url = bound_socket.url().to_string(); @@ -91,7 +91,10 @@ impl Launcher { }; tx_start - .send(Started { listen_url, address }) + .send(Started { + service_binding, + address, + }) .expect("the UDP Tracker service should not be dropped"); tracing::debug!(target: UDP_TRACKER_LOG_TARGET, local_udp_url, "Udp::run_with_graceful_shutdown (started)"); @@ -113,14 +116,15 @@ impl Launcher { } #[must_use] - #[instrument(skip(binding))] - pub fn check(listen_url: &Url, binding: &SocketAddr) -> ServiceHealthCheckJob { - let binding = *binding; - let info = format!("checking the udp tracker health check at: {binding}"); + #[instrument(skip(service_binding))] + pub fn check(service_binding: &ServiceBinding) -> ServiceHealthCheckJob { + let info = format!("checking the udp tracker health check at: {}", service_binding.bind_address()); + + let service_binding_clone = service_binding.clone(); - let job = tokio::spawn(async move { check(&binding).await }); + let job = tokio::spawn(async move { check(&service_binding_clone).await }); - ServiceHealthCheckJob::new(listen_url.clone(), binding, info, TYPE_STRING.to_string(), job) + ServiceHealthCheckJob::new(service_binding.clone(), info, TYPE_STRING.to_string(), job) } #[instrument(skip(receiver, udp_tracker_core_container, udp_tracker_server_container))] diff --git a/packages/udp-tracker-server/src/server/states.rs b/packages/udp-tracker-server/src/server/states.rs index f10a02fb7..4ad059095 100644 --- a/packages/udp-tracker-server/src/server/states.rs +++ b/packages/udp-tracker-server/src/server/states.rs @@ -85,10 +85,10 @@ impl Server { let started = rx_start.await.expect("it should be able to start the service"); - let listen_url = started.listen_url; + let service_binding = started.service_binding; let local_addr = started.address; - form.send(ServiceRegistration::new(listen_url, local_addr, Launcher::check)) + form.send(ServiceRegistration::new(service_binding, Launcher::check)) .expect("it should be able to send service registration"); let running_udp_server: Server = Server { From 8c4403300418f3bb67ab738e9015fbad7e6f0e37 Mon Sep 17 00:00:00 2001 From: Jose Celano Date: Mon, 31 Mar 2025 11:55:00 +0100 Subject: [PATCH 433/802] feat: [#1424] add ServiceBinding to HTTP core events --- .../axum-http-tracker-server/src/server.rs | 2 +- .../src/v1/handlers/announce.rs | 33 ++++++++----- .../src/v1/handlers/scrape.rs | 39 +++++++++++---- .../axum-http-tracker-server/src/v1/routes.rs | 24 ++++++--- .../http-tracker-core/benches/helpers/sync.rs | 4 +- packages/http-tracker-core/src/event/mod.rs | 10 ++-- .../src/services/announce.rs | 39 ++++++++++----- .../http-tracker-core/src/services/scrape.rs | 49 +++++++++++++------ .../src/statistics/event/handler.rs | 10 ++-- 9 files changed, 144 insertions(+), 66 deletions(-) diff --git a/packages/axum-http-tracker-server/src/server.rs b/packages/axum-http-tracker-server/src/server.rs index 610f70020..eea00c142 100644 --- a/packages/axum-http-tracker-server/src/server.rs +++ b/packages/axum-http-tracker-server/src/server.rs @@ -68,7 +68,7 @@ impl Launcher { tracing::info!(target: HTTP_TRACKER_LOG_TARGET, "Starting on: {protocol}://{address}"); - let app = router(http_tracker_container, address); + let app = router(http_tracker_container, service_binding.clone()); let running = Box::pin(async { match tls { diff --git a/packages/axum-http-tracker-server/src/v1/handlers/announce.rs b/packages/axum-http-tracker-server/src/v1/handlers/announce.rs index 53fd38997..296cefcd5 100644 --- a/packages/axum-http-tracker-server/src/v1/handlers/announce.rs +++ b/packages/axum-http-tracker-server/src/v1/handlers/announce.rs @@ -2,7 +2,6 @@ //! //! The handlers perform the authentication and authorization of the request, //! and resolve the client IP address. -use std::net::SocketAddr; use std::sync::Arc; use axum::extract::State; @@ -14,6 +13,7 @@ use bittorrent_http_tracker_protocol::v1::services::peer_ip_resolver::ClientIpSo use bittorrent_tracker_core::authentication::Key; use hyper::StatusCode; use torrust_tracker_primitives::core::AnnounceData; +use torrust_tracker_primitives::service_binding::ServiceBinding; use crate::v1::extractors::announce_request::ExtractRequest; use crate::v1::extractors::authentication_key::Extract as ExtractKey; @@ -23,7 +23,7 @@ use crate::v1::extractors::client_ip_sources::Extract as ExtractClientIpSources; /// authentication (no PATH `key` parameter required). #[allow(clippy::unused_async)] pub async fn handle_without_key( - State(state): State<(Arc, SocketAddr)>, + State(state): State<(Arc, ServiceBinding)>, ExtractRequest(announce_request): ExtractRequest, ExtractClientIpSources(client_ip_sources): ExtractClientIpSources, ) -> Response { @@ -36,7 +36,7 @@ pub async fn handle_without_key( /// authentication (PATH `key` parameter required). #[allow(clippy::unused_async)] pub async fn handle_with_key( - State(state): State<(Arc, SocketAddr)>, + State(state): State<(Arc, ServiceBinding)>, ExtractRequest(announce_request): ExtractRequest, ExtractClientIpSources(client_ip_sources): ExtractClientIpSources, ExtractKey(key): ExtractKey, @@ -54,14 +54,14 @@ async fn handle( announce_service: &Arc, announce_request: &Announce, client_ip_sources: &ClientIpSources, - server_socket_addr: &SocketAddr, + server_service_binding: &ServiceBinding, maybe_key: Option, ) -> Response { let announce_data = match handle_announce( announce_service, announce_request, client_ip_sources, - server_socket_addr, + server_service_binding, maybe_key, ) .await @@ -81,11 +81,11 @@ async fn handle_announce( announce_service: &Arc, announce_request: &Announce, client_ip_sources: &ClientIpSources, - server_socket_addr: &SocketAddr, + server_service_binding: &ServiceBinding, maybe_key: Option, ) -> Result { announce_service - .handle_announce(announce_request, client_ip_sources, server_socket_addr, maybe_key) + .handle_announce(announce_request, client_ip_sources, server_service_binding, maybe_key) .await } @@ -212,6 +212,7 @@ mod tests { use bittorrent_http_tracker_protocol::v1::responses; use bittorrent_tracker_core::authentication; + use torrust_tracker_primitives::service_binding::{Protocol, ServiceBinding}; use super::{initialize_private_tracker, sample_announce_request, sample_client_ip_sources}; use crate::v1::handlers::announce::handle_announce; @@ -222,6 +223,7 @@ mod tests { let http_core_tracker_services = initialize_private_tracker(); let server_socket_addr = SocketAddr::new(IpAddr::V4(Ipv4Addr::new(127, 0, 0, 1)), 7070); + let server_service_binding = ServiceBinding::new(Protocol::HTTP, server_socket_addr).unwrap(); let maybe_key = None; @@ -229,7 +231,7 @@ mod tests { &http_core_tracker_services.announce_service, &sample_announce_request(), &sample_client_ip_sources(), - &server_socket_addr, + &server_service_binding, maybe_key, ) .await @@ -252,6 +254,7 @@ mod tests { let unregistered_key = authentication::Key::from_str("YZSl4lMZupRuOpSRC3krIKR5BPB14nrJ").unwrap(); let server_socket_addr = SocketAddr::new(IpAddr::V4(Ipv4Addr::new(127, 0, 0, 1)), 7070); + let server_service_binding = ServiceBinding::new(Protocol::HTTP, server_socket_addr).unwrap(); let maybe_key = Some(unregistered_key); @@ -259,7 +262,7 @@ mod tests { &http_core_tracker_services.announce_service, &sample_announce_request(), &sample_client_ip_sources(), - &server_socket_addr, + &server_service_binding, maybe_key, ) .await @@ -281,6 +284,7 @@ mod tests { use std::net::{IpAddr, Ipv4Addr, SocketAddr}; use bittorrent_http_tracker_protocol::v1::responses; + use torrust_tracker_primitives::service_binding::{Protocol, ServiceBinding}; use super::{initialize_listed_tracker, sample_announce_request, sample_client_ip_sources}; use crate::v1::handlers::announce::handle_announce; @@ -293,12 +297,13 @@ mod tests { let announce_request = sample_announce_request(); let server_socket_addr = SocketAddr::new(IpAddr::V4(Ipv4Addr::new(127, 0, 0, 1)), 7070); + let server_service_binding = ServiceBinding::new(Protocol::HTTP, server_socket_addr).unwrap(); let response = handle_announce( &http_core_tracker_services.announce_service, &announce_request, &sample_client_ip_sources(), - &server_socket_addr, + &server_service_binding, None, ) .await @@ -324,6 +329,7 @@ mod tests { use bittorrent_http_tracker_protocol::v1::responses; use bittorrent_http_tracker_protocol::v1::services::peer_ip_resolver::ClientIpSources; + use torrust_tracker_primitives::service_binding::{Protocol, ServiceBinding}; use super::{initialize_tracker_on_reverse_proxy, sample_announce_request}; use crate::v1::handlers::announce::handle_announce; @@ -339,12 +345,13 @@ mod tests { }; let server_socket_addr = SocketAddr::new(IpAddr::V4(Ipv4Addr::new(127, 0, 0, 1)), 7070); + let server_service_binding = ServiceBinding::new(Protocol::HTTP, server_socket_addr).unwrap(); let response = handle_announce( &http_core_tracker_services.announce_service, &sample_announce_request(), &client_ip_sources, - &server_socket_addr, + &server_service_binding, None, ) .await @@ -367,6 +374,7 @@ mod tests { use bittorrent_http_tracker_protocol::v1::responses; use bittorrent_http_tracker_protocol::v1::services::peer_ip_resolver::ClientIpSources; + use torrust_tracker_primitives::service_binding::{Protocol, ServiceBinding}; use super::{initialize_tracker_not_on_reverse_proxy, sample_announce_request}; use crate::v1::handlers::announce::handle_announce; @@ -382,12 +390,13 @@ mod tests { }; let server_socket_addr = SocketAddr::new(IpAddr::V4(Ipv4Addr::new(127, 0, 0, 1)), 7070); + let server_service_binding = ServiceBinding::new(Protocol::HTTP, server_socket_addr).unwrap(); let response = handle_announce( &http_core_tracker_services.announce_service, &sample_announce_request(), &client_ip_sources, - &server_socket_addr, + &server_service_binding, None, ) .await diff --git a/packages/axum-http-tracker-server/src/v1/handlers/scrape.rs b/packages/axum-http-tracker-server/src/v1/handlers/scrape.rs index e9544c983..e5d94a072 100644 --- a/packages/axum-http-tracker-server/src/v1/handlers/scrape.rs +++ b/packages/axum-http-tracker-server/src/v1/handlers/scrape.rs @@ -2,7 +2,6 @@ //! //! The handlers perform the authentication and authorization of the request, //! and resolve the client IP address. -use std::net::SocketAddr; use std::sync::Arc; use axum::extract::State; @@ -14,6 +13,7 @@ use bittorrent_http_tracker_protocol::v1::services::peer_ip_resolver::ClientIpSo use bittorrent_tracker_core::authentication::Key; use hyper::StatusCode; use torrust_tracker_primitives::core::ScrapeData; +use torrust_tracker_primitives::service_binding::ServiceBinding; use crate::v1::extractors::authentication_key::Extract as ExtractKey; use crate::v1::extractors::client_ip_sources::Extract as ExtractClientIpSources; @@ -23,7 +23,7 @@ use crate::v1::extractors::scrape_request::ExtractRequest; /// to run in `public` mode. #[allow(clippy::unused_async)] pub async fn handle_without_key( - State(state): State<(Arc, SocketAddr)>, + State(state): State<(Arc, ServiceBinding)>, ExtractRequest(scrape_request): ExtractRequest, ExtractClientIpSources(client_ip_sources): ExtractClientIpSources, ) -> Response { @@ -38,7 +38,7 @@ pub async fn handle_without_key( /// In this case, the authentication `key` parameter is required. #[allow(clippy::unused_async)] pub async fn handle_with_key( - State(state): State<(Arc, SocketAddr)>, + State(state): State<(Arc, ServiceBinding)>, ExtractRequest(scrape_request): ExtractRequest, ExtractClientIpSources(client_ip_sources): ExtractClientIpSources, ExtractKey(key): ExtractKey, @@ -52,11 +52,11 @@ async fn handle( scrape_service: &Arc, scrape_request: &Scrape, client_ip_sources: &ClientIpSources, - server_socket_addr: &SocketAddr, + server_service_binding: &ServiceBinding, maybe_key: Option, ) -> Response { let scrape_data = match scrape_service - .handle_scrape(scrape_request, client_ip_sources, server_socket_addr, maybe_key) + .handle_scrape(scrape_request, client_ip_sources, server_service_binding, maybe_key) .await { Ok(scrape_data) => scrape_data, @@ -173,12 +173,14 @@ mod tests { use bittorrent_http_tracker_core::services::scrape::ScrapeService; use bittorrent_tracker_core::authentication; use torrust_tracker_primitives::core::ScrapeData; + use torrust_tracker_primitives::service_binding::{Protocol, ServiceBinding}; use super::{initialize_private_tracker, sample_client_ip_sources, sample_scrape_request}; #[tokio::test] async fn it_should_return_zeroed_swarm_metadata_when_the_authentication_key_is_missing() { let server_socket_addr = SocketAddr::new(IpAddr::V4(Ipv4Addr::new(127, 0, 0, 1)), 7070); + let server_service_binding = ServiceBinding::new(Protocol::HTTP, server_socket_addr).unwrap(); let (core_tracker_services, core_http_tracker_services) = initialize_private_tracker(); @@ -193,7 +195,12 @@ mod tests { ); let scrape_data = scrape_service - .handle_scrape(&scrape_request, &sample_client_ip_sources(), &server_socket_addr, maybe_key) + .handle_scrape( + &scrape_request, + &sample_client_ip_sources(), + &server_service_binding, + maybe_key, + ) .await .unwrap(); @@ -205,6 +212,7 @@ mod tests { #[tokio::test] async fn it_should_return_zeroed_swarm_metadata_when_the_authentication_key_is_invalid() { let server_socket_addr = SocketAddr::new(IpAddr::V4(Ipv4Addr::new(127, 0, 0, 1)), 7070); + let server_service_binding = ServiceBinding::new(Protocol::HTTP, server_socket_addr).unwrap(); let (core_tracker_services, core_http_tracker_services) = initialize_private_tracker(); @@ -220,7 +228,12 @@ mod tests { ); let scrape_data = scrape_service - .handle_scrape(&scrape_request, &sample_client_ip_sources(), &server_socket_addr, maybe_key) + .handle_scrape( + &scrape_request, + &sample_client_ip_sources(), + &server_service_binding, + maybe_key, + ) .await .unwrap(); @@ -236,6 +249,7 @@ mod tests { use bittorrent_http_tracker_core::services::scrape::ScrapeService; use torrust_tracker_primitives::core::ScrapeData; + use torrust_tracker_primitives::service_binding::{Protocol, ServiceBinding}; use super::{initialize_listed_tracker, sample_client_ip_sources, sample_scrape_request}; @@ -246,6 +260,7 @@ mod tests { let scrape_request = sample_scrape_request(); let server_socket_addr = SocketAddr::new(IpAddr::V4(Ipv4Addr::new(127, 0, 0, 1)), 7070); + let server_service_binding = ServiceBinding::new(Protocol::HTTP, server_socket_addr).unwrap(); let scrape_service = ScrapeService::new( core_tracker_services.core_config.clone(), @@ -255,7 +270,7 @@ mod tests { ); let scrape_data = scrape_service - .handle_scrape(&scrape_request, &sample_client_ip_sources(), &server_socket_addr, None) + .handle_scrape(&scrape_request, &sample_client_ip_sources(), &server_service_binding, None) .await .unwrap(); @@ -272,6 +287,7 @@ mod tests { use bittorrent_http_tracker_core::services::scrape::ScrapeService; use bittorrent_http_tracker_protocol::v1::responses; use bittorrent_http_tracker_protocol::v1::services::peer_ip_resolver::ClientIpSources; + use torrust_tracker_primitives::service_binding::{Protocol, ServiceBinding}; use super::{initialize_tracker_on_reverse_proxy, sample_scrape_request}; use crate::v1::handlers::scrape::tests::assert_error_response; @@ -286,6 +302,7 @@ mod tests { }; let server_socket_addr = SocketAddr::new(IpAddr::V4(Ipv4Addr::new(127, 0, 0, 1)), 7070); + let server_service_binding = ServiceBinding::new(Protocol::HTTP, server_socket_addr).unwrap(); let scrape_service = ScrapeService::new( core_tracker_services.core_config.clone(), @@ -295,7 +312,7 @@ mod tests { ); let response = scrape_service - .handle_scrape(&sample_scrape_request(), &client_ip_sources, &server_socket_addr, None) + .handle_scrape(&sample_scrape_request(), &client_ip_sources, &server_service_binding, None) .await .unwrap_err(); @@ -317,6 +334,7 @@ mod tests { use bittorrent_http_tracker_core::services::scrape::ScrapeService; use bittorrent_http_tracker_protocol::v1::responses; use bittorrent_http_tracker_protocol::v1::services::peer_ip_resolver::ClientIpSources; + use torrust_tracker_primitives::service_binding::{Protocol, ServiceBinding}; use super::{initialize_tracker_not_on_reverse_proxy, sample_scrape_request}; use crate::v1::handlers::scrape::tests::assert_error_response; @@ -331,6 +349,7 @@ mod tests { }; let server_socket_addr = SocketAddr::new(IpAddr::V4(Ipv4Addr::new(127, 0, 0, 1)), 7070); + let server_service_binding = ServiceBinding::new(Protocol::HTTP, server_socket_addr).unwrap(); let scrape_service = ScrapeService::new( core_tracker_services.core_config.clone(), @@ -340,7 +359,7 @@ mod tests { ); let response = scrape_service - .handle_scrape(&sample_scrape_request(), &client_ip_sources, &server_socket_addr, None) + .handle_scrape(&sample_scrape_request(), &client_ip_sources, &server_service_binding, None) .await .unwrap_err(); diff --git a/packages/axum-http-tracker-server/src/v1/routes.rs b/packages/axum-http-tracker-server/src/v1/routes.rs index d5907887e..3fe467a0d 100644 --- a/packages/axum-http-tracker-server/src/v1/routes.rs +++ b/packages/axum-http-tracker-server/src/v1/routes.rs @@ -1,5 +1,4 @@ //! HTTP server routes for version `v1`. -use std::net::SocketAddr; use std::sync::Arc; use std::time::Duration; @@ -13,6 +12,7 @@ use bittorrent_http_tracker_core::container::HttpTrackerCoreContainer; use hyper::{Request, StatusCode}; use torrust_server_lib::logging::Latency; use torrust_tracker_configuration::DEFAULT_TIMEOUT; +use torrust_tracker_primitives::service_binding::ServiceBinding; use tower::timeout::TimeoutLayer; use tower::ServiceBuilder; use tower_http::classify::ServerErrorsFailureClass; @@ -30,28 +30,38 @@ use crate::HTTP_TRACKER_LOG_TARGET; /// /// > **NOTICE**: it's added a layer to get the client IP from the connection /// > info. The tracker could use the connection info to get the client IP. -#[instrument(skip(http_tracker_container, server_socket_addr))] -pub fn router(http_tracker_container: Arc, server_socket_addr: SocketAddr) -> Router { +#[instrument(skip(http_tracker_container, server_service_binding))] +pub fn router(http_tracker_container: Arc, server_service_binding: ServiceBinding) -> Router { + let server_socket_addr = server_service_binding.bind_address(); + Router::new() // Health check .route("/health_check", get(health_check::handler)) // Announce request .route( "/announce", - get(announce::handle_without_key).with_state((http_tracker_container.announce_service.clone(), server_socket_addr)), + get(announce::handle_without_key).with_state(( + http_tracker_container.announce_service.clone(), + server_service_binding.clone(), + )), ) .route( "/announce/{key}", - get(announce::handle_with_key).with_state((http_tracker_container.announce_service.clone(), server_socket_addr)), + get(announce::handle_with_key).with_state(( + http_tracker_container.announce_service.clone(), + server_service_binding.clone(), + )), ) // Scrape request .route( "/scrape", - get(scrape::handle_without_key).with_state((http_tracker_container.scrape_service.clone(), server_socket_addr)), + get(scrape::handle_without_key) + .with_state((http_tracker_container.scrape_service.clone(), server_service_binding.clone())), ) .route( "/scrape/{key}", - get(scrape::handle_with_key).with_state((http_tracker_container.scrape_service.clone(), server_socket_addr)), + get(scrape::handle_with_key) + .with_state((http_tracker_container.scrape_service.clone(), server_service_binding.clone())), ) // Add extension to get the client IP from the connection info .layer(SecureClientIpSource::ConnectInfo.into_extension()) diff --git a/packages/http-tracker-core/benches/helpers/sync.rs b/packages/http-tracker-core/benches/helpers/sync.rs index 9d41c2459..e0f022108 100644 --- a/packages/http-tracker-core/benches/helpers/sync.rs +++ b/packages/http-tracker-core/benches/helpers/sync.rs @@ -2,6 +2,7 @@ use std::net::{IpAddr, Ipv4Addr, SocketAddr}; use std::time::{Duration, Instant}; use bittorrent_http_tracker_core::services::announce::AnnounceService; +use torrust_tracker_primitives::service_binding::{Protocol, ServiceBinding}; use crate::helpers::util::{initialize_core_tracker_services, sample_announce_request_for_peer, sample_peer}; @@ -22,12 +23,13 @@ pub async fn return_announce_data_once(samples: u64) -> Duration { ); let server_socket_addr = SocketAddr::new(IpAddr::V4(Ipv4Addr::new(127, 0, 0, 1)), 7070); + let server_service_binding = ServiceBinding::new(Protocol::HTTP, server_socket_addr).unwrap(); let start = Instant::now(); for _ in 0..samples { let _announce_data = announce_service - .handle_announce(&announce_request, &client_ip_sources, &server_socket_addr, None) + .handle_announce(&announce_request, &client_ip_sources, &server_service_binding, None) .await .unwrap(); } diff --git a/packages/http-tracker-core/src/event/mod.rs b/packages/http-tracker-core/src/event/mod.rs index 3db258238..7caf8a596 100644 --- a/packages/http-tracker-core/src/event/mod.rs +++ b/packages/http-tracker-core/src/event/mod.rs @@ -1,5 +1,7 @@ use std::net::{IpAddr, SocketAddr}; +use torrust_tracker_primitives::service_binding::ServiceBinding; + pub mod sender; /// A HTTP core event. @@ -17,14 +19,14 @@ pub struct ConnectionContext { impl ConnectionContext { #[must_use] - pub fn new(client_ip_addr: IpAddr, opt_client_port: Option, server_socket_addr: SocketAddr) -> Self { + pub fn new(client_ip_addr: IpAddr, opt_client_port: Option, server_service_binding: ServiceBinding) -> Self { Self { client: ClientConnectionContext { ip_addr: client_ip_addr, port: opt_client_port, }, server: ServerConnectionContext { - socket_addr: server_socket_addr, + service_binding: server_service_binding, }, } } @@ -41,7 +43,7 @@ impl ConnectionContext { #[must_use] pub fn server_socket_addr(&self) -> SocketAddr { - self.server.socket_addr + self.server.service_binding.bind_address() } } @@ -55,5 +57,5 @@ pub struct ClientConnectionContext { #[derive(Debug, PartialEq, Eq, Clone)] pub struct ServerConnectionContext { - socket_addr: SocketAddr, + service_binding: ServiceBinding, } diff --git a/packages/http-tracker-core/src/services/announce.rs b/packages/http-tracker-core/src/services/announce.rs index f8d2e0b11..c249cb4db 100644 --- a/packages/http-tracker-core/src/services/announce.rs +++ b/packages/http-tracker-core/src/services/announce.rs @@ -7,7 +7,7 @@ //! //! It also sends an [`http_tracker_core::event::Event`] //! because events are specific for the HTTP tracker. -use std::net::{IpAddr, SocketAddr}; +use std::net::IpAddr; use std::panic::Location; use std::sync::Arc; @@ -21,6 +21,7 @@ use bittorrent_tracker_core::error::{AnnounceError, TrackerCoreError, WhitelistE use bittorrent_tracker_core::whitelist; use torrust_tracker_configuration::Core; use torrust_tracker_primitives::core::AnnounceData; +use torrust_tracker_primitives::service_binding::ServiceBinding; use crate::event; use crate::event::Event; @@ -69,7 +70,7 @@ impl AnnounceService { &self, announce_request: &Announce, client_ip_sources: &ClientIpSources, - server_socket_addr: &SocketAddr, + server_service_binding: &ServiceBinding, maybe_key: Option, ) -> Result { self.authenticate(maybe_key).await?; @@ -87,7 +88,7 @@ impl AnnounceService { .announce(&announce_request.info_hash, &mut peer, &remote_client_ip, &peers_wanted) .await?; - self.send_event(remote_client_ip, opt_remote_client_port, *server_socket_addr) + self.send_event(remote_client_ip, opt_remote_client_port, server_service_binding.clone()) .await; Ok(announce_data) @@ -138,11 +139,11 @@ impl AnnounceService { } } - async fn send_event(&self, peer_ip: IpAddr, opt_peer_ip_port: Option, server_socket_addr: SocketAddr) { + async fn send_event(&self, peer_ip: IpAddr, opt_peer_ip_port: Option, server_service_binding: ServiceBinding) { if let Some(http_stats_event_sender) = self.opt_http_stats_event_sender.as_deref() { http_stats_event_sender .send_event(Event::TcpAnnounce { - connection: event::ConnectionContext::new(peer_ip, opt_peer_ip_port, server_socket_addr), + connection: event::ConnectionContext::new(peer_ip, opt_peer_ip_port, server_service_binding), }) .await; } @@ -338,6 +339,7 @@ mod tests { use torrust_tracker_configuration::Configuration; use torrust_tracker_primitives::core::AnnounceData; use torrust_tracker_primitives::peer; + use torrust_tracker_primitives::service_binding::{Protocol, ServiceBinding}; use torrust_tracker_primitives::swarm_metadata::SwarmMetadata; use torrust_tracker_test_helpers::configuration; @@ -359,6 +361,7 @@ mod tests { let (announce_request, client_ip_sources) = sample_announce_request_for_peer(peer); let server_socket_addr = SocketAddr::new(IpAddr::V4(Ipv4Addr::new(127, 0, 0, 1)), 7070); + let server_service_binding = ServiceBinding::new(Protocol::HTTP, server_socket_addr).unwrap(); let announce_service = AnnounceService::new( core_tracker_services.core_config.clone(), @@ -369,7 +372,7 @@ mod tests { ); let announce_data = announce_service - .handle_announce(&announce_request, &client_ip_sources, &server_socket_addr, None) + .handle_announce(&announce_request, &client_ip_sources, &server_service_binding, None) .await .unwrap(); @@ -389,12 +392,17 @@ mod tests { #[tokio::test] async fn it_should_send_the_tcp_4_announce_event_when_the_peer_uses_ipv4() { let server_socket_addr = SocketAddr::new(IpAddr::V4(Ipv4Addr::new(127, 0, 0, 1)), 7070); + let server_service_binding = ServiceBinding::new(Protocol::HTTP, server_socket_addr).unwrap(); let mut http_stats_event_sender_mock = MockHttpStatsEventSender::new(); http_stats_event_sender_mock .expect_send_event() .with(eq(Event::TcpAnnounce { - connection: ConnectionContext::new(IpAddr::V4(Ipv4Addr::new(126, 0, 0, 1)), Some(8080), server_socket_addr), + connection: ConnectionContext::new( + IpAddr::V4(Ipv4Addr::new(126, 0, 0, 1)), + Some(8080), + server_service_binding.clone(), + ), })) .times(1) .returning(|_| Box::pin(future::ready(Some(Ok(1))))); @@ -418,7 +426,7 @@ mod tests { ); let _announce_data = announce_service - .handle_announce(&announce_request, &client_ip_sources, &server_socket_addr, None) + .handle_announce(&announce_request, &client_ip_sources, &server_service_binding, None) .await .unwrap(); } @@ -444,13 +452,18 @@ mod tests { // Tracker changes the peer IP to the tracker external IP when the peer is using the loopback IP. let server_socket_addr = SocketAddr::new(IpAddr::V4(Ipv4Addr::new(127, 0, 0, 1)), 7070); + let server_service_binding = ServiceBinding::new(Protocol::HTTP, server_socket_addr).unwrap(); // Assert that the event sent is a TCP4 event let mut http_stats_event_sender_mock = MockHttpStatsEventSender::new(); http_stats_event_sender_mock .expect_send_event() .with(eq(Event::TcpAnnounce { - connection: ConnectionContext::new(IpAddr::V4(Ipv4Addr::new(127, 0, 0, 1)), Some(8080), server_socket_addr), + connection: ConnectionContext::new( + IpAddr::V4(Ipv4Addr::new(127, 0, 0, 1)), + Some(8080), + server_service_binding.clone(), + ), })) .times(1) .returning(|_| Box::pin(future::ready(Some(Ok(1))))); @@ -475,7 +488,7 @@ mod tests { ); let _announce_data = announce_service - .handle_announce(&announce_request, &client_ip_sources, &server_socket_addr, None) + .handle_announce(&announce_request, &client_ip_sources, &server_service_binding, None) .await .unwrap(); } @@ -484,6 +497,7 @@ mod tests { async fn it_should_send_the_tcp_6_announce_event_when_the_peer_uses_ipv6_even_if_the_tracker_changes_the_peer_ip_to_ipv4() { let server_socket_addr = SocketAddr::new(IpAddr::V4(Ipv4Addr::new(127, 0, 0, 1)), 7070); + let server_service_binding = ServiceBinding::new(Protocol::HTTP, server_socket_addr).unwrap(); let mut http_stats_event_sender_mock = MockHttpStatsEventSender::new(); http_stats_event_sender_mock @@ -492,7 +506,7 @@ mod tests { connection: ConnectionContext::new( IpAddr::V6(Ipv6Addr::new(0x6969, 0x6969, 0x6969, 0x6969, 0x6969, 0x6969, 0x6969, 0x6969)), Some(8080), - server_socket_addr, + server_service_binding, ), })) .times(1) @@ -516,9 +530,10 @@ mod tests { ); let server_socket_addr = SocketAddr::new(IpAddr::V4(Ipv4Addr::new(127, 0, 0, 1)), 7070); + let server_service_binding = ServiceBinding::new(Protocol::HTTP, server_socket_addr).unwrap(); let _announce_data = announce_service - .handle_announce(&announce_request, &client_ip_sources, &server_socket_addr, None) + .handle_announce(&announce_request, &client_ip_sources, &server_service_binding, None) .await .unwrap(); } diff --git a/packages/http-tracker-core/src/services/scrape.rs b/packages/http-tracker-core/src/services/scrape.rs index c9b3182f8..baa406e63 100644 --- a/packages/http-tracker-core/src/services/scrape.rs +++ b/packages/http-tracker-core/src/services/scrape.rs @@ -7,7 +7,7 @@ //! //! It also sends an [`http_tracker_core::statistics::event::Event`] //! because events are specific for the HTTP tracker. -use std::net::{IpAddr, SocketAddr}; +use std::net::IpAddr; use std::sync::Arc; use bittorrent_http_tracker_protocol::v1::requests::scrape::Scrape; @@ -18,6 +18,7 @@ use bittorrent_tracker_core::error::{ScrapeError, TrackerCoreError, WhitelistErr use bittorrent_tracker_core::scrape_handler::ScrapeHandler; use torrust_tracker_configuration::Core; use torrust_tracker_primitives::core::ScrapeData; +use torrust_tracker_primitives::service_binding::ServiceBinding; use crate::event; use crate::event::{ConnectionContext, Event}; @@ -71,7 +72,7 @@ impl ScrapeService { &self, scrape_request: &Scrape, client_ip_sources: &ClientIpSources, - server_socket_addr: &SocketAddr, + server_service_binding: &ServiceBinding, maybe_key: Option, ) -> Result { let scrape_data = if self.authentication_is_required() && !self.is_authenticated(maybe_key).await { @@ -82,7 +83,8 @@ impl ScrapeService { let (remote_client_ip, opt_client_port) = self.resolve_remote_client_ip(client_ip_sources)?; - self.send_event(remote_client_ip, opt_client_port, *server_socket_addr).await; + self.send_event(remote_client_ip, opt_client_port, server_service_binding.clone()) + .await; Ok(scrape_data) } @@ -117,11 +119,16 @@ impl ScrapeService { Ok((ip, port)) } - async fn send_event(&self, original_peer_ip: IpAddr, opt_original_peer_port: Option, server_socket_addr: SocketAddr) { + async fn send_event( + &self, + original_peer_ip: IpAddr, + opt_original_peer_port: Option, + server_service_binding: ServiceBinding, + ) { if let Some(http_stats_event_sender) = self.opt_http_stats_event_sender.as_deref() { http_stats_event_sender .send_event(Event::TcpScrape { - connection: ConnectionContext::new(original_peer_ip, opt_original_peer_port, server_socket_addr), + connection: ConnectionContext::new(original_peer_ip, opt_original_peer_port, server_service_binding), }) .await; } @@ -269,6 +276,7 @@ mod tests { use bittorrent_tracker_core::announce_handler::PeersWanted; use mockall::predicate::eq; use torrust_tracker_primitives::core::ScrapeData; + use torrust_tracker_primitives::service_binding::{Protocol, ServiceBinding}; use torrust_tracker_primitives::swarm_metadata::SwarmMetadata; use torrust_tracker_test_helpers::configuration; @@ -312,6 +320,7 @@ mod tests { }; let server_socket_addr = SocketAddr::new(IpAddr::V4(Ipv4Addr::new(127, 0, 0, 1)), 7070); + let server_service_binding = ServiceBinding::new(Protocol::HTTP, server_socket_addr).unwrap(); let scrape_service = Arc::new(ScrapeService::new( core_config.clone(), @@ -321,7 +330,7 @@ mod tests { )); let scrape_data = scrape_service - .handle_scrape(&scrape_request, &client_ip_sources, &server_socket_addr, None) + .handle_scrape(&scrape_request, &client_ip_sources, &server_service_binding, None) .await .unwrap(); @@ -349,7 +358,8 @@ mod tests { connection: ConnectionContext::new( IpAddr::V4(Ipv4Addr::new(126, 0, 0, 1)), Some(8080), - SocketAddr::new(IpAddr::V4(Ipv4Addr::new(127, 0, 0, 1)), 7070), + ServiceBinding::new(Protocol::HTTP, SocketAddr::new(IpAddr::V4(Ipv4Addr::new(127, 0, 0, 1)), 7070)) + .unwrap(), ), })) .times(1) @@ -371,6 +381,7 @@ mod tests { }; let server_socket_addr = SocketAddr::new(IpAddr::V4(Ipv4Addr::new(127, 0, 0, 1)), 7070); + let server_service_binding = ServiceBinding::new(Protocol::HTTP, server_socket_addr).unwrap(); let scrape_service = Arc::new(ScrapeService::new( Arc::new(config.core), @@ -380,7 +391,7 @@ mod tests { )); scrape_service - .handle_scrape(&scrape_request, &client_ip_sources, &server_socket_addr, None) + .handle_scrape(&scrape_request, &client_ip_sources, &server_service_binding, None) .await .unwrap(); } @@ -388,6 +399,7 @@ mod tests { #[tokio::test] async fn it_should_send_the_tcp_6_scrape_event_when_the_peer_uses_ipv6() { let server_socket_addr = SocketAddr::new(IpAddr::V4(Ipv4Addr::new(127, 0, 0, 1)), 7070); + let server_service_binding = ServiceBinding::new(Protocol::HTTP, server_socket_addr).unwrap(); let config = configuration::ephemeral(); @@ -398,7 +410,7 @@ mod tests { connection: ConnectionContext::new( IpAddr::V6(Ipv6Addr::new(0x6969, 0x6969, 0x6969, 0x6969, 0x6969, 0x6969, 0x6969, 0x6969)), Some(8080), - server_socket_addr, + server_service_binding, ), })) .times(1) @@ -420,6 +432,7 @@ mod tests { }; let server_socket_addr = SocketAddr::new(IpAddr::V4(Ipv4Addr::new(127, 0, 0, 1)), 7070); + let server_service_binding = ServiceBinding::new(Protocol::HTTP, server_socket_addr).unwrap(); let scrape_service = Arc::new(ScrapeService::new( Arc::new(config.core), @@ -429,7 +442,7 @@ mod tests { )); scrape_service - .handle_scrape(&scrape_request, &client_ip_sources, &server_socket_addr, None) + .handle_scrape(&scrape_request, &client_ip_sources, &server_service_binding, None) .await .unwrap(); } @@ -446,6 +459,7 @@ mod tests { use bittorrent_tracker_core::announce_handler::PeersWanted; use mockall::predicate::eq; use torrust_tracker_primitives::core::ScrapeData; + use torrust_tracker_primitives::service_binding::{Protocol, ServiceBinding}; use torrust_tracker_test_helpers::configuration; use crate::event::{ConnectionContext, Event}; @@ -488,6 +502,7 @@ mod tests { }; let server_socket_addr = SocketAddr::new(IpAddr::V4(Ipv4Addr::new(127, 0, 0, 1)), 7070); + let server_service_binding = ServiceBinding::new(Protocol::HTTP, server_socket_addr).unwrap(); let scrape_service = Arc::new(ScrapeService::new( Arc::new(config.core), @@ -497,7 +512,7 @@ mod tests { )); let scrape_data = scrape_service - .handle_scrape(&scrape_request, &client_ip_sources, &server_socket_addr, None) + .handle_scrape(&scrape_request, &client_ip_sources, &server_service_binding, None) .await .unwrap(); @@ -519,7 +534,8 @@ mod tests { connection: ConnectionContext::new( IpAddr::V4(Ipv4Addr::new(126, 0, 0, 1)), Some(8080), - SocketAddr::new(IpAddr::V4(Ipv4Addr::new(127, 0, 0, 1)), 7070), + ServiceBinding::new(Protocol::HTTP, SocketAddr::new(IpAddr::V4(Ipv4Addr::new(127, 0, 0, 1)), 7070)) + .unwrap(), ), })) .times(1) @@ -539,6 +555,7 @@ mod tests { }; let server_socket_addr = SocketAddr::new(IpAddr::V4(Ipv4Addr::new(127, 0, 0, 1)), 7070); + let server_service_binding = ServiceBinding::new(Protocol::HTTP, server_socket_addr).unwrap(); let scrape_service = Arc::new(ScrapeService::new( Arc::new(config.core), @@ -548,7 +565,7 @@ mod tests { )); scrape_service - .handle_scrape(&scrape_request, &client_ip_sources, &server_socket_addr, None) + .handle_scrape(&scrape_request, &client_ip_sources, &server_service_binding, None) .await .unwrap(); } @@ -556,6 +573,7 @@ mod tests { #[tokio::test] async fn it_should_send_the_tcp_6_scrape_event_when_the_peer_uses_ipv6() { let server_socket_addr = SocketAddr::new(IpAddr::V4(Ipv4Addr::new(127, 0, 0, 1)), 7070); + let server_service_binding = ServiceBinding::new(Protocol::HTTP, server_socket_addr).unwrap(); let config = configuration::ephemeral(); @@ -568,7 +586,7 @@ mod tests { connection: ConnectionContext::new( IpAddr::V6(Ipv6Addr::new(0x6969, 0x6969, 0x6969, 0x6969, 0x6969, 0x6969, 0x6969, 0x6969)), Some(8080), - server_socket_addr, + server_service_binding, ), })) .times(1) @@ -588,6 +606,7 @@ mod tests { }; let server_socket_addr = SocketAddr::new(IpAddr::V4(Ipv4Addr::new(127, 0, 0, 1)), 7070); + let server_service_binding = ServiceBinding::new(Protocol::HTTP, server_socket_addr).unwrap(); let scrape_service = Arc::new(ScrapeService::new( Arc::new(config.core), @@ -597,7 +616,7 @@ mod tests { )); scrape_service - .handle_scrape(&scrape_request, &client_ip_sources, &server_socket_addr, None) + .handle_scrape(&scrape_request, &client_ip_sources, &server_service_binding, None) .await .unwrap(); } diff --git a/packages/http-tracker-core/src/statistics/event/handler.rs b/packages/http-tracker-core/src/statistics/event/handler.rs index 700e39476..0df1c41d3 100644 --- a/packages/http-tracker-core/src/statistics/event/handler.rs +++ b/packages/http-tracker-core/src/statistics/event/handler.rs @@ -34,6 +34,8 @@ pub async fn handle_event(event: Event, stats_repository: &Repository) { mod tests { use std::net::{IpAddr, Ipv4Addr, Ipv6Addr, SocketAddr}; + use torrust_tracker_primitives::service_binding::{Protocol, ServiceBinding}; + use crate::event::{ConnectionContext, Event}; use crate::statistics::event::handler::handle_event; use crate::statistics::repository::Repository; @@ -47,7 +49,7 @@ mod tests { connection: ConnectionContext::new( IpAddr::V4(Ipv4Addr::new(127, 0, 0, 2)), Some(8080), - SocketAddr::new(IpAddr::V4(Ipv4Addr::new(127, 0, 0, 1)), 7070), + ServiceBinding::new(Protocol::HTTP, SocketAddr::new(IpAddr::V4(Ipv4Addr::new(127, 0, 0, 1)), 7070)).unwrap(), ), }, &stats_repository, @@ -68,7 +70,7 @@ mod tests { connection: ConnectionContext::new( IpAddr::V4(Ipv4Addr::new(127, 0, 0, 2)), Some(8080), - SocketAddr::new(IpAddr::V4(Ipv4Addr::new(127, 0, 0, 1)), 7070), + ServiceBinding::new(Protocol::HTTP, SocketAddr::new(IpAddr::V4(Ipv4Addr::new(127, 0, 0, 1)), 7070)).unwrap(), ), }, &stats_repository, @@ -89,7 +91,7 @@ mod tests { connection: ConnectionContext::new( IpAddr::V6(Ipv6Addr::new(0x6969, 0x6969, 0x6969, 0x6969, 0x6969, 0x6969, 0x6969, 0x6969)), Some(8080), - SocketAddr::new(IpAddr::V4(Ipv4Addr::new(127, 0, 0, 1)), 7070), + ServiceBinding::new(Protocol::HTTP, SocketAddr::new(IpAddr::V4(Ipv4Addr::new(127, 0, 0, 1)), 7070)).unwrap(), ), }, &stats_repository, @@ -110,7 +112,7 @@ mod tests { connection: ConnectionContext::new( IpAddr::V6(Ipv6Addr::new(0x6969, 0x6969, 0x6969, 0x6969, 0x6969, 0x6969, 0x6969, 0x6969)), Some(8080), - SocketAddr::new(IpAddr::V4(Ipv4Addr::new(127, 0, 0, 1)), 7070), + ServiceBinding::new(Protocol::HTTP, SocketAddr::new(IpAddr::V4(Ipv4Addr::new(127, 0, 0, 1)), 7070)).unwrap(), ), }, &stats_repository, From 0784a9e4b3054a0f27564c37c1f782288feb51e6 Mon Sep 17 00:00:00 2001 From: Jose Celano Date: Mon, 31 Mar 2025 13:02:42 +0100 Subject: [PATCH 434/802] feat: [#1424] add ServiceBinding to UDP events --- .../udp-tracker-core/benches/helpers/sync.rs | 4 +- packages/udp-tracker-core/src/event/mod.rs | 10 +- .../udp-tracker-core/src/services/announce.rs | 9 +- .../udp-tracker-core/src/services/connect.rs | 25 +++-- .../udp-tracker-core/src/services/scrape.rs | 9 +- .../src/statistics/event/handler.rs | 38 ++++++-- packages/udp-tracker-server/src/event/mod.rs | 10 +- .../src/handlers/announce.rs | 61 ++++++++---- .../src/handlers/connect.rs | 31 ++++--- .../udp-tracker-server/src/handlers/error.rs | 7 +- .../udp-tracker-server/src/handlers/mod.rs | 19 ++-- .../udp-tracker-server/src/handlers/scrape.rs | 33 ++++--- .../udp-tracker-server/src/server/launcher.rs | 16 +++- .../src/server/processor.rs | 14 ++- .../src/statistics/event/handler.rs | 92 ++++++++++++++++--- 15 files changed, 270 insertions(+), 108 deletions(-) diff --git a/packages/udp-tracker-core/benches/helpers/sync.rs b/packages/udp-tracker-core/benches/helpers/sync.rs index ca459c640..b61204586 100644 --- a/packages/udp-tracker-core/benches/helpers/sync.rs +++ b/packages/udp-tracker-core/benches/helpers/sync.rs @@ -4,6 +4,7 @@ use std::time::{Duration, Instant}; use bittorrent_udp_tracker_core::services::connect::ConnectService; use bittorrent_udp_tracker_core::statistics; +use torrust_tracker_primitives::service_binding::{Protocol, ServiceBinding}; use crate::helpers::utils::{sample_ipv4_remote_addr, sample_issue_time}; @@ -11,6 +12,7 @@ use crate::helpers::utils::{sample_ipv4_remote_addr, sample_issue_time}; pub async fn connect_once(samples: u64) -> Duration { let client_socket_addr = sample_ipv4_remote_addr(); let server_socket_addr = SocketAddr::new(IpAddr::V4(Ipv4Addr::new(203, 0, 113, 196)), 6969); + let server_service_binding = ServiceBinding::new(Protocol::UDP, server_socket_addr).unwrap(); let (udp_core_stats_event_sender, _udp_core_stats_repository) = statistics::setup::factory(false); let udp_core_stats_event_sender = Arc::new(udp_core_stats_event_sender); @@ -18,7 +20,7 @@ pub async fn connect_once(samples: u64) -> Duration { let start = Instant::now(); for _ in 0..samples { - let _response = connect_service.handle_connect(client_socket_addr, server_socket_addr, sample_issue_time()); + let _response = connect_service.handle_connect(client_socket_addr, server_service_binding.clone(), sample_issue_time()); } start.elapsed() diff --git a/packages/udp-tracker-core/src/event/mod.rs b/packages/udp-tracker-core/src/event/mod.rs index 04b3170e2..e25f557e2 100644 --- a/packages/udp-tracker-core/src/event/mod.rs +++ b/packages/udp-tracker-core/src/event/mod.rs @@ -1,5 +1,7 @@ use std::net::SocketAddr; +use torrust_tracker_primitives::service_binding::ServiceBinding; + pub mod sender; /// A UDP core event. @@ -13,15 +15,15 @@ pub enum Event { #[derive(Debug, PartialEq, Eq, Clone)] pub struct ConnectionContext { pub client_socket_addr: SocketAddr, - pub server_socket_addr: SocketAddr, + pub server_service_binding: ServiceBinding, } impl ConnectionContext { #[must_use] - pub fn new(client_socket_addr: SocketAddr, server_socket_addr: SocketAddr) -> Self { + pub fn new(client_socket_addr: SocketAddr, server_service_binding: ServiceBinding) -> Self { Self { client_socket_addr, - server_socket_addr, + server_service_binding, } } @@ -32,6 +34,6 @@ impl ConnectionContext { #[must_use] pub fn server_socket_addr(&self) -> SocketAddr { - self.server_socket_addr + self.server_service_binding.bind_address() } } diff --git a/packages/udp-tracker-core/src/services/announce.rs b/packages/udp-tracker-core/src/services/announce.rs index d99618316..0a9bf6b82 100644 --- a/packages/udp-tracker-core/src/services/announce.rs +++ b/packages/udp-tracker-core/src/services/announce.rs @@ -18,6 +18,7 @@ use bittorrent_tracker_core::error::{AnnounceError, WhitelistError}; use bittorrent_tracker_core::whitelist; use bittorrent_udp_tracker_protocol::peer_builder; use torrust_tracker_primitives::core::AnnounceData; +use torrust_tracker_primitives::service_binding::ServiceBinding; use crate::connection_cookie::{check, gen_remote_fingerprint, ConnectionCookieError}; use crate::event::{self, ConnectionContext, Event}; @@ -58,7 +59,7 @@ impl AnnounceService { pub async fn handle_announce( &self, client_socket_addr: SocketAddr, - server_socket_addr: SocketAddr, + server_service_binding: ServiceBinding, request: &AnnounceRequest, cookie_valid_range: Range, ) -> Result { @@ -79,7 +80,7 @@ impl AnnounceService { .announce(&info_hash, &mut peer, &remote_client_ip, &peers_wanted) .await?; - self.send_event(client_socket_addr, server_socket_addr).await; + self.send_event(client_socket_addr, server_service_binding).await; Ok(announce_data) } @@ -100,11 +101,11 @@ impl AnnounceService { self.whitelist_authorization.authorize(info_hash).await } - async fn send_event(&self, client_socket_addr: SocketAddr, server_socket_addr: SocketAddr) { + async fn send_event(&self, client_socket_addr: SocketAddr, server_service_binding: ServiceBinding) { if let Some(udp_stats_event_sender) = self.opt_udp_core_stats_event_sender.as_deref() { udp_stats_event_sender .send_event(Event::UdpAnnounce { - context: ConnectionContext::new(client_socket_addr, server_socket_addr), + context: ConnectionContext::new(client_socket_addr, server_service_binding), }) .await; } diff --git a/packages/udp-tracker-core/src/services/connect.rs b/packages/udp-tracker-core/src/services/connect.rs index e543fbb1e..92bcd299f 100644 --- a/packages/udp-tracker-core/src/services/connect.rs +++ b/packages/udp-tracker-core/src/services/connect.rs @@ -5,6 +5,7 @@ use std::net::SocketAddr; use std::sync::Arc; use aquatic_udp_protocol::ConnectionId; +use torrust_tracker_primitives::service_binding::ServiceBinding; use crate::connection_cookie::{gen_remote_fingerprint, make}; use crate::event::{self, ConnectionContext, Event}; @@ -33,7 +34,7 @@ impl ConnectService { pub async fn handle_connect( &self, client_socket_addr: SocketAddr, - server_socket_addr: SocketAddr, + server_service_binding: ServiceBinding, cookie_issue_time: f64, ) -> ConnectionId { let connection_id = @@ -42,7 +43,7 @@ impl ConnectService { if let Some(udp_stats_event_sender) = self.opt_udp_core_stats_event_sender.as_deref() { udp_stats_event_sender .send_event(Event::UdpConnect { - context: ConnectionContext::new(client_socket_addr, server_socket_addr), + context: ConnectionContext::new(client_socket_addr, server_service_binding), }) .await; } @@ -61,6 +62,7 @@ mod tests { use std::sync::Arc; use mockall::predicate::eq; + use torrust_tracker_primitives::service_binding::{Protocol, ServiceBinding}; use crate::connection_cookie::make; use crate::event::{ConnectionContext, Event}; @@ -74,6 +76,7 @@ mod tests { #[tokio::test] async fn a_connect_response_should_contain_the_same_transaction_id_as_the_connect_request() { let server_socket_addr = SocketAddr::new(IpAddr::V4(Ipv4Addr::new(203, 0, 113, 196)), 6969); + let server_service_binding = ServiceBinding::new(Protocol::UDP, server_socket_addr).unwrap(); let (udp_core_stats_event_sender, _udp_core_stats_repository) = statistics::setup::factory(false); let udp_core_stats_event_sender = Arc::new(udp_core_stats_event_sender); @@ -81,7 +84,7 @@ mod tests { let connect_service = Arc::new(ConnectService::new(udp_core_stats_event_sender)); let response = connect_service - .handle_connect(sample_ipv4_remote_addr(), server_socket_addr, sample_issue_time()) + .handle_connect(sample_ipv4_remote_addr(), server_service_binding, sample_issue_time()) .await; assert_eq!( @@ -93,6 +96,7 @@ mod tests { #[tokio::test] async fn a_connect_response_should_contain_a_new_connection_id() { let server_socket_addr = SocketAddr::new(IpAddr::V4(Ipv4Addr::new(203, 0, 113, 196)), 6969); + let server_service_binding = ServiceBinding::new(Protocol::UDP, server_socket_addr).unwrap(); let (udp_core_stats_event_sender, _udp_core_stats_repository) = statistics::setup::factory(false); let udp_core_stats_event_sender = Arc::new(udp_core_stats_event_sender); @@ -100,7 +104,7 @@ mod tests { let connect_service = Arc::new(ConnectService::new(udp_core_stats_event_sender)); let response = connect_service - .handle_connect(sample_ipv4_remote_addr(), server_socket_addr, sample_issue_time()) + .handle_connect(sample_ipv4_remote_addr(), server_service_binding, sample_issue_time()) .await; assert_eq!( @@ -113,6 +117,7 @@ mod tests { async fn a_connect_response_should_contain_a_new_connection_id_ipv6() { let client_socket_addr = sample_ipv6_remote_addr(); let server_socket_addr = SocketAddr::new(IpAddr::V4(Ipv4Addr::new(203, 0, 113, 196)), 6969); + let server_service_binding = ServiceBinding::new(Protocol::UDP, server_socket_addr).unwrap(); let (udp_core_stats_event_sender, _udp_core_stats_repository) = statistics::setup::factory(false); let udp_core_stats_event_sender = Arc::new(udp_core_stats_event_sender); @@ -120,7 +125,7 @@ mod tests { let connect_service = Arc::new(ConnectService::new(udp_core_stats_event_sender)); let response = connect_service - .handle_connect(client_socket_addr, server_socket_addr, sample_issue_time()) + .handle_connect(client_socket_addr, server_service_binding, sample_issue_time()) .await; assert_eq!( @@ -133,12 +138,13 @@ mod tests { async fn it_should_send_the_upd4_connect_event_when_a_client_tries_to_connect_using_a_ip4_socket_address() { let client_socket_addr = sample_ipv4_socket_address(); let server_socket_addr = SocketAddr::new(IpAddr::V4(Ipv4Addr::new(203, 0, 113, 196)), 6969); + let server_service_binding = ServiceBinding::new(Protocol::UDP, server_socket_addr).unwrap(); let mut udp_stats_event_sender_mock = MockUdpCoreStatsEventSender::new(); udp_stats_event_sender_mock .expect_send_event() .with(eq(Event::UdpConnect { - context: ConnectionContext::new(client_socket_addr, server_socket_addr), + context: ConnectionContext::new(client_socket_addr, server_service_binding.clone()), })) .times(1) .returning(|_| Box::pin(future::ready(Some(Ok(1))))); @@ -148,7 +154,7 @@ mod tests { let connect_service = Arc::new(ConnectService::new(opt_udp_stats_event_sender)); connect_service - .handle_connect(client_socket_addr, server_socket_addr, sample_issue_time()) + .handle_connect(client_socket_addr, server_service_binding, sample_issue_time()) .await; } @@ -156,12 +162,13 @@ mod tests { async fn it_should_send_the_upd6_connect_event_when_a_client_tries_to_connect_using_a_ip6_socket_address() { let client_socket_addr = sample_ipv6_remote_addr(); let server_socket_addr = SocketAddr::new(IpAddr::V4(Ipv4Addr::new(203, 0, 113, 196)), 6969); + let server_service_binding = ServiceBinding::new(Protocol::UDP, server_socket_addr).unwrap(); let mut udp_stats_event_sender_mock = MockUdpCoreStatsEventSender::new(); udp_stats_event_sender_mock .expect_send_event() .with(eq(Event::UdpConnect { - context: ConnectionContext::new(client_socket_addr, server_socket_addr), + context: ConnectionContext::new(client_socket_addr, server_service_binding.clone()), })) .times(1) .returning(|_| Box::pin(future::ready(Some(Ok(1))))); @@ -171,7 +178,7 @@ mod tests { let connect_service = Arc::new(ConnectService::new(opt_udp_stats_event_sender)); connect_service - .handle_connect(client_socket_addr, server_socket_addr, sample_issue_time()) + .handle_connect(client_socket_addr, server_service_binding, sample_issue_time()) .await; } } diff --git a/packages/udp-tracker-core/src/services/scrape.rs b/packages/udp-tracker-core/src/services/scrape.rs index 3b6898311..6ee64111c 100644 --- a/packages/udp-tracker-core/src/services/scrape.rs +++ b/packages/udp-tracker-core/src/services/scrape.rs @@ -16,6 +16,7 @@ use bittorrent_primitives::info_hash::InfoHash; use bittorrent_tracker_core::error::{ScrapeError, WhitelistError}; use bittorrent_tracker_core::scrape_handler::ScrapeHandler; use torrust_tracker_primitives::core::ScrapeData; +use torrust_tracker_primitives::service_binding::ServiceBinding; use crate::connection_cookie::{check, gen_remote_fingerprint, ConnectionCookieError}; use crate::event::{self, ConnectionContext, Event}; @@ -50,7 +51,7 @@ impl ScrapeService { pub async fn handle_scrape( &self, client_socket_addr: SocketAddr, - server_socket_addr: SocketAddr, + server_service_binding: ServiceBinding, request: &ScrapeRequest, cookie_valid_range: Range, ) -> Result { @@ -61,7 +62,7 @@ impl ScrapeService { .scrape(&Self::convert_from_aquatic(&request.info_hashes)) .await?; - self.send_event(client_socket_addr, server_socket_addr).await; + self.send_event(client_socket_addr, server_service_binding).await; Ok(scrape_data) } @@ -82,11 +83,11 @@ impl ScrapeService { aquatic_infohashes.iter().map(|&x| x.into()).collect() } - async fn send_event(&self, client_socket_addr: SocketAddr, server_socket_addr: SocketAddr) { + async fn send_event(&self, client_socket_addr: SocketAddr, server_service_binding: ServiceBinding) { if let Some(udp_stats_event_sender) = self.opt_udp_stats_event_sender.as_deref() { udp_stats_event_sender .send_event(Event::UdpScrape { - context: ConnectionContext::new(client_socket_addr, server_socket_addr), + context: ConnectionContext::new(client_socket_addr, server_service_binding), }) .await; } diff --git a/packages/udp-tracker-core/src/statistics/event/handler.rs b/packages/udp-tracker-core/src/statistics/event/handler.rs index a9ac0dade..3968ca4e7 100644 --- a/packages/udp-tracker-core/src/statistics/event/handler.rs +++ b/packages/udp-tracker-core/src/statistics/event/handler.rs @@ -39,6 +39,8 @@ pub async fn handle_event(event: Event, stats_repository: &Repository) { mod tests { use std::net::{IpAddr, Ipv4Addr, Ipv6Addr, SocketAddr}; + use torrust_tracker_primitives::service_binding::{Protocol, ServiceBinding}; + use crate::event::{ConnectionContext, Event}; use crate::statistics::event::handler::handle_event; use crate::statistics::repository::Repository; @@ -51,7 +53,11 @@ mod tests { Event::UdpConnect { context: ConnectionContext::new( SocketAddr::new(IpAddr::V4(Ipv4Addr::new(203, 0, 113, 195)), 8080), - SocketAddr::new(IpAddr::V4(Ipv4Addr::new(203, 0, 113, 196)), 6969), + ServiceBinding::new( + Protocol::UDP, + SocketAddr::new(IpAddr::V4(Ipv4Addr::new(203, 0, 113, 196)), 6969), + ) + .unwrap(), ), }, &stats_repository, @@ -71,7 +77,11 @@ mod tests { Event::UdpAnnounce { context: ConnectionContext::new( SocketAddr::new(IpAddr::V4(Ipv4Addr::new(203, 0, 113, 195)), 8080), - SocketAddr::new(IpAddr::V4(Ipv4Addr::new(203, 0, 113, 196)), 6969), + ServiceBinding::new( + Protocol::UDP, + SocketAddr::new(IpAddr::V4(Ipv4Addr::new(203, 0, 113, 196)), 6969), + ) + .unwrap(), ), }, &stats_repository, @@ -91,7 +101,11 @@ mod tests { Event::UdpScrape { context: ConnectionContext::new( SocketAddr::new(IpAddr::V4(Ipv4Addr::new(203, 0, 113, 195)), 8080), - SocketAddr::new(IpAddr::V4(Ipv4Addr::new(203, 0, 113, 196)), 6969), + ServiceBinding::new( + Protocol::UDP, + SocketAddr::new(IpAddr::V4(Ipv4Addr::new(203, 0, 113, 196)), 6969), + ) + .unwrap(), ), }, &stats_repository, @@ -111,7 +125,11 @@ mod tests { Event::UdpConnect { context: ConnectionContext::new( SocketAddr::new(IpAddr::V6(Ipv6Addr::new(0, 0, 0, 0, 203, 0, 113, 195)), 8080), - SocketAddr::new(IpAddr::V6(Ipv6Addr::new(0, 0, 0, 0, 203, 0, 113, 196)), 6969), + ServiceBinding::new( + Protocol::UDP, + SocketAddr::new(IpAddr::V6(Ipv6Addr::new(0, 0, 0, 0, 203, 0, 113, 196)), 6969), + ) + .unwrap(), ), }, &stats_repository, @@ -131,7 +149,11 @@ mod tests { Event::UdpAnnounce { context: ConnectionContext::new( SocketAddr::new(IpAddr::V6(Ipv6Addr::new(0, 0, 0, 0, 203, 0, 113, 195)), 8080), - SocketAddr::new(IpAddr::V6(Ipv6Addr::new(0, 0, 0, 0, 203, 0, 113, 196)), 6969), + ServiceBinding::new( + Protocol::UDP, + SocketAddr::new(IpAddr::V6(Ipv6Addr::new(0, 0, 0, 0, 203, 0, 113, 196)), 6969), + ) + .unwrap(), ), }, &stats_repository, @@ -151,7 +173,11 @@ mod tests { Event::UdpScrape { context: ConnectionContext::new( SocketAddr::new(IpAddr::V6(Ipv6Addr::new(0, 0, 0, 0, 203, 0, 113, 195)), 8080), - SocketAddr::new(IpAddr::V6(Ipv6Addr::new(0, 0, 0, 0, 203, 0, 113, 196)), 6969), + ServiceBinding::new( + Protocol::UDP, + SocketAddr::new(IpAddr::V6(Ipv6Addr::new(0, 0, 0, 0, 203, 0, 113, 196)), 6969), + ) + .unwrap(), ), }, &stats_repository, diff --git a/packages/udp-tracker-server/src/event/mod.rs b/packages/udp-tracker-server/src/event/mod.rs index 0adf29c8b..68f07cfd6 100644 --- a/packages/udp-tracker-server/src/event/mod.rs +++ b/packages/udp-tracker-server/src/event/mod.rs @@ -1,6 +1,8 @@ use std::net::SocketAddr; use std::time::Duration; +use torrust_tracker_primitives::service_binding::ServiceBinding; + pub mod sender; /// A UDP server event. @@ -52,15 +54,15 @@ pub enum UdpResponseKind { #[derive(Debug, PartialEq, Eq, Clone)] pub struct ConnectionContext { client_socket_addr: SocketAddr, - server_socket_addr: SocketAddr, + server_service_binding: ServiceBinding, } impl ConnectionContext { #[must_use] - pub fn new(client_socket_addr: SocketAddr, server_socket_addr: SocketAddr) -> Self { + pub fn new(client_socket_addr: SocketAddr, server_service_binding: ServiceBinding) -> Self { Self { client_socket_addr, - server_socket_addr, + server_service_binding, } } @@ -71,6 +73,6 @@ impl ConnectionContext { #[must_use] pub fn server_socket_addr(&self) -> SocketAddr { - self.server_socket_addr + self.server_service_binding.bind_address() } } diff --git a/packages/udp-tracker-server/src/handlers/announce.rs b/packages/udp-tracker-server/src/handlers/announce.rs index 5df46125d..1cf0f0b7d 100644 --- a/packages/udp-tracker-server/src/handlers/announce.rs +++ b/packages/udp-tracker-server/src/handlers/announce.rs @@ -11,6 +11,7 @@ use bittorrent_primitives::info_hash::InfoHash; use bittorrent_udp_tracker_core::services::announce::AnnounceService; use torrust_tracker_configuration::Core; use torrust_tracker_primitives::core::AnnounceData; +use torrust_tracker_primitives::service_binding::ServiceBinding; use tracing::{instrument, Level}; use zerocopy::network_endian::I32; @@ -26,7 +27,7 @@ use crate::event::{self, ConnectionContext, Event, UdpRequestKind}; pub async fn handle_announce( announce_service: &Arc, client_socket_addr: SocketAddr, - server_socket_addr: SocketAddr, + server_service_binding: ServiceBinding, request: &AnnounceRequest, core_config: &Arc, opt_udp_server_stats_event_sender: &Arc>>, @@ -42,14 +43,14 @@ pub async fn handle_announce( if let Some(udp_server_stats_event_sender) = opt_udp_server_stats_event_sender.as_deref() { udp_server_stats_event_sender .send_event(Event::UdpRequestAccepted { - context: ConnectionContext::new(client_socket_addr, server_socket_addr), + context: ConnectionContext::new(client_socket_addr, server_service_binding.clone()), kind: UdpRequestKind::Announce, }) .await; } let announce_data = announce_service - .handle_announce(client_socket_addr, server_socket_addr, request, cookie_valid_range) + .handle_announce(client_socket_addr, server_service_binding, request, cookie_valid_range) .await .map_err(|e| (e.into(), request.transaction_id, UdpRequestKind::Announce))?; @@ -205,6 +206,7 @@ mod tests { use bittorrent_tracker_core::torrent::repository::in_memory::InMemoryTorrentRepository; use bittorrent_udp_tracker_core::connection_cookie::{gen_remote_fingerprint, make}; use mockall::predicate::eq; + use torrust_tracker_primitives::service_binding::{Protocol, ServiceBinding}; use crate::event::{self, ConnectionContext, Event, UdpRequestKind}; use crate::handlers::announce::tests::announce_request::AnnounceRequestBuilder; @@ -228,6 +230,7 @@ mod tests { let client_socket_addr = SocketAddr::new(IpAddr::V4(client_ip), client_port); let server_socket_addr = SocketAddr::new(IpAddr::V4(Ipv4Addr::new(203, 0, 113, 196)), 6969); + let server_service_binding = ServiceBinding::new(Protocol::UDP, server_socket_addr).unwrap(); let request = AnnounceRequestBuilder::default() .with_connection_id(make(gen_remote_fingerprint(&client_socket_addr), sample_issue_time()).unwrap()) @@ -240,7 +243,7 @@ mod tests { handle_announce( &core_udp_tracker_services.announce_service, client_socket_addr, - server_socket_addr, + server_service_binding, &request, &core_tracker_services.core_config, &server_udp_tracker_services.udp_server_stats_event_sender, @@ -269,6 +272,7 @@ mod tests { let client_socket_addr = SocketAddr::new(IpAddr::V4(Ipv4Addr::new(126, 0, 0, 1)), 8080); let server_socket_addr = SocketAddr::new(IpAddr::V4(Ipv4Addr::new(203, 0, 113, 196)), 6969); + let server_service_binding = ServiceBinding::new(Protocol::UDP, server_socket_addr).unwrap(); let request = AnnounceRequestBuilder::default() .with_connection_id(make(gen_remote_fingerprint(&client_socket_addr), sample_issue_time()).unwrap()) @@ -277,7 +281,7 @@ mod tests { let response = handle_announce( &core_udp_tracker_services.announce_service, client_socket_addr, - server_socket_addr, + server_service_binding, &request, &core_tracker_services.core_config, &server_udp_tracker_services.udp_server_stats_event_sender, @@ -320,6 +324,7 @@ mod tests { let client_socket_addr = SocketAddr::new(IpAddr::V4(remote_client_ip), remote_client_port); let server_socket_addr = SocketAddr::new(IpAddr::V4(Ipv4Addr::new(203, 0, 113, 196)), 6969); + let server_service_binding = ServiceBinding::new(Protocol::UDP, server_socket_addr).unwrap(); let request = AnnounceRequestBuilder::default() .with_connection_id(make(gen_remote_fingerprint(&client_socket_addr), sample_issue_time()).unwrap()) @@ -332,7 +337,7 @@ mod tests { handle_announce( &core_udp_tracker_services.announce_service, client_socket_addr, - server_socket_addr, + server_service_binding, &request, &core_tracker_services.core_config, &server_udp_tracker_services.udp_server_stats_event_sender, @@ -378,6 +383,7 @@ mod tests { let client_socket_addr = SocketAddr::new(IpAddr::V4(Ipv4Addr::new(126, 0, 0, 1)), 8080); let server_socket_addr = SocketAddr::new(IpAddr::V4(Ipv4Addr::new(203, 0, 113, 196)), 6969); + let server_service_binding = ServiceBinding::new(Protocol::UDP, server_socket_addr).unwrap(); let request = AnnounceRequestBuilder::default() .with_connection_id(make(gen_remote_fingerprint(&client_socket_addr), sample_issue_time()).unwrap()) @@ -386,7 +392,7 @@ mod tests { handle_announce( &core_udp_tracker_services.announce_service, client_socket_addr, - server_socket_addr, + server_service_binding, &request, &core_tracker_services.core_config, &udp_server_stats_event_sender, @@ -419,12 +425,13 @@ mod tests { async fn should_send_the_upd4_announce_event() { let client_socket_addr = sample_ipv4_socket_address(); let server_socket_addr = SocketAddr::new(IpAddr::V4(Ipv4Addr::new(203, 0, 113, 196)), 6969); + let server_service_binding = ServiceBinding::new(Protocol::UDP, server_socket_addr).unwrap(); let mut udp_server_stats_event_sender_mock = MockUdpServerStatsEventSender::new(); udp_server_stats_event_sender_mock .expect_send_event() .with(eq(Event::UdpRequestAccepted { - context: ConnectionContext::new(client_socket_addr, server_socket_addr), + context: ConnectionContext::new(client_socket_addr, server_service_binding.clone()), kind: UdpRequestKind::Announce, })) .times(1) @@ -438,7 +445,7 @@ mod tests { handle_announce( &core_udp_tracker_services.announce_service, client_socket_addr, - server_socket_addr, + server_service_binding, &AnnounceRequestBuilder::default().into(), &core_tracker_services.core_config, &udp_server_stats_event_sender, @@ -454,6 +461,7 @@ mod tests { use aquatic_udp_protocol::{InfoHash as AquaticInfoHash, PeerId as AquaticPeerId}; use bittorrent_udp_tracker_core::connection_cookie::{gen_remote_fingerprint, make}; + use torrust_tracker_primitives::service_binding::{Protocol, ServiceBinding}; use crate::handlers::announce::tests::announce_request::AnnounceRequestBuilder; use crate::handlers::handle_announce; @@ -474,6 +482,7 @@ mod tests { let client_socket_addr = SocketAddr::new(IpAddr::V4(client_ip), client_port); let server_socket_addr = SocketAddr::new(IpAddr::V4(Ipv4Addr::new(203, 0, 113, 196)), 6969); + let server_service_binding = ServiceBinding::new(Protocol::UDP, server_socket_addr).unwrap(); let request = AnnounceRequestBuilder::default() .with_connection_id(make(gen_remote_fingerprint(&client_socket_addr), sample_issue_time()).unwrap()) @@ -486,7 +495,7 @@ mod tests { handle_announce( &core_udp_tracker_services.announce_service, client_socket_addr, - server_socket_addr, + server_service_binding, &request, &core_tracker_services.core_config, &server_udp_tracker_services.udp_server_stats_event_sender, @@ -529,6 +538,7 @@ mod tests { use bittorrent_udp_tracker_core::services::announce::AnnounceService; use mockall::predicate::eq; use torrust_tracker_configuration::Core; + use torrust_tracker_primitives::service_binding::{Protocol, ServiceBinding}; use crate::event::{self, ConnectionContext, Event, UdpRequestKind}; use crate::handlers::announce::tests::announce_request::AnnounceRequestBuilder; @@ -552,6 +562,7 @@ mod tests { let client_socket_addr = SocketAddr::new(IpAddr::V6(client_ip_v6), client_port); let server_socket_addr = SocketAddr::new(IpAddr::V6(Ipv6Addr::new(0, 0, 0, 0, 203, 0, 113, 196)), 6969); + let server_service_binding = ServiceBinding::new(Protocol::UDP, server_socket_addr).unwrap(); let request = AnnounceRequestBuilder::default() .with_connection_id(make(gen_remote_fingerprint(&client_socket_addr), sample_issue_time()).unwrap()) @@ -564,7 +575,7 @@ mod tests { handle_announce( &core_udp_tracker_services.announce_service, client_socket_addr, - server_socket_addr, + server_service_binding, &request, &core_tracker_services.core_config, &server_udp_tracker_services.udp_server_stats_event_sender, @@ -596,6 +607,7 @@ mod tests { let client_socket_addr = SocketAddr::new(IpAddr::V6(client_ip_v6), 8080); let server_socket_addr = SocketAddr::new(IpAddr::V6(Ipv6Addr::new(0, 0, 0, 0, 203, 0, 113, 196)), 6969); + let server_service_binding = ServiceBinding::new(Protocol::UDP, server_socket_addr).unwrap(); let request = AnnounceRequestBuilder::default() .with_connection_id(make(gen_remote_fingerprint(&client_socket_addr), sample_issue_time()).unwrap()) @@ -604,7 +616,7 @@ mod tests { let response = handle_announce( &core_udp_tracker_services.announce_service, client_socket_addr, - server_socket_addr, + server_service_binding, &request, &core_tracker_services.core_config, &server_udp_tracker_services.udp_server_stats_event_sender, @@ -647,6 +659,7 @@ mod tests { let client_socket_addr = SocketAddr::new(IpAddr::V6(remote_client_ip), remote_client_port); let server_socket_addr = SocketAddr::new(IpAddr::V6(Ipv6Addr::new(0, 0, 0, 0, 203, 0, 113, 196)), 6969); + let server_service_binding = ServiceBinding::new(Protocol::UDP, server_socket_addr).unwrap(); let request = AnnounceRequestBuilder::default() .with_connection_id(make(gen_remote_fingerprint(&client_socket_addr), sample_issue_time()).unwrap()) @@ -659,7 +672,7 @@ mod tests { handle_announce( &core_udp_tracker_services.announce_service, client_socket_addr, - server_socket_addr, + server_service_binding, &request, &core_tracker_services.core_config, &server_udp_tracker_service.udp_server_stats_event_sender, @@ -710,6 +723,7 @@ mod tests { let client_socket_addr = SocketAddr::new(IpAddr::V6(client_ip_v6), client_port); let server_socket_addr = SocketAddr::new(IpAddr::V6(Ipv6Addr::new(0, 0, 0, 0, 203, 0, 113, 196)), 6969); + let server_service_binding = ServiceBinding::new(Protocol::UDP, server_socket_addr).unwrap(); let request = AnnounceRequestBuilder::default() .with_connection_id(make(gen_remote_fingerprint(&client_socket_addr), sample_issue_time()).unwrap()) @@ -724,7 +738,7 @@ mod tests { handle_announce( &announce_service, client_socket_addr, - server_socket_addr, + server_service_binding, &request, &core_config, &udp_server_stats_event_sender, @@ -761,12 +775,13 @@ mod tests { async fn should_send_the_upd6_announce_event() { let client_socket_addr = sample_ipv6_remote_addr(); let server_socket_addr = SocketAddr::new(IpAddr::V6(Ipv6Addr::new(0, 0, 0, 0, 203, 0, 113, 196)), 6969); + let server_service_binding = ServiceBinding::new(Protocol::UDP, server_socket_addr).unwrap(); let mut udp_server_stats_event_sender_mock = MockUdpServerStatsEventSender::new(); udp_server_stats_event_sender_mock .expect_send_event() .with(eq(Event::UdpRequestAccepted { - context: ConnectionContext::new(client_socket_addr, server_socket_addr), + context: ConnectionContext::new(client_socket_addr, server_service_binding.clone()), kind: UdpRequestKind::Announce, })) .times(1) @@ -784,7 +799,7 @@ mod tests { handle_announce( &core_udp_tracker_services.announce_service, client_socket_addr, - server_socket_addr, + server_service_binding, &announce_request, &core_tracker_services.core_config, &udp_server_stats_event_sender, @@ -810,6 +825,7 @@ mod tests { use bittorrent_udp_tracker_core::services::announce::AnnounceService; use bittorrent_udp_tracker_core::{self, event as core_event}; use mockall::predicate::eq; + use torrust_tracker_primitives::service_binding::{Protocol, ServiceBinding}; use crate::event::{self, ConnectionContext, Event, UdpRequestKind}; use crate::handlers::announce::tests::announce_request::AnnounceRequestBuilder; @@ -834,7 +850,12 @@ mod tests { let peer_id = AquaticPeerId([255u8; 20]); let client_socket_addr = SocketAddr::new(IpAddr::V6(client_ip_v6), client_port); - let server_socket_addr = config.udp_trackers.clone().unwrap()[0].bind_address; + let mut server_socket_addr = config.udp_trackers.clone().unwrap()[0].bind_address; + if server_socket_addr.port() == 0 { + // Port 0 cannot be use in service binding + server_socket_addr.set_port(6969); + } + let server_service_binding = ServiceBinding::new(Protocol::UDP, server_socket_addr).unwrap(); let database = initialize_database(&config.core); let in_memory_whitelist = Arc::new(InMemoryWhitelist::default()); @@ -847,7 +868,7 @@ mod tests { udp_core_stats_event_sender_mock .expect_send_event() .with(eq(core_event::Event::UdpAnnounce { - context: core_event::ConnectionContext::new(client_socket_addr, server_socket_addr), + context: core_event::ConnectionContext::new(client_socket_addr, server_service_binding.clone()), })) .times(1) .returning(|_| Box::pin(future::ready(Some(Ok(1))))); @@ -858,7 +879,7 @@ mod tests { udp_server_stats_event_sender_mock .expect_send_event() .with(eq(Event::UdpRequestAccepted { - context: ConnectionContext::new(client_socket_addr, server_socket_addr), + context: ConnectionContext::new(client_socket_addr, server_service_binding.clone()), kind: UdpRequestKind::Announce, })) .times(1) @@ -892,7 +913,7 @@ mod tests { handle_announce( &announce_service, client_socket_addr, - server_socket_addr, + server_service_binding, &request, &core_config, &udp_server_stats_event_sender, diff --git a/packages/udp-tracker-server/src/handlers/connect.rs b/packages/udp-tracker-server/src/handlers/connect.rs index a0fbaead3..88f0b7f3a 100644 --- a/packages/udp-tracker-server/src/handlers/connect.rs +++ b/packages/udp-tracker-server/src/handlers/connect.rs @@ -4,6 +4,7 @@ use std::sync::Arc; use aquatic_udp_protocol::{ConnectRequest, ConnectResponse, ConnectionId, Response}; use bittorrent_udp_tracker_core::services::connect::ConnectService; +use torrust_tracker_primitives::service_binding::ServiceBinding; use tracing::{instrument, Level}; use crate::event::{self, ConnectionContext, Event, UdpRequestKind}; @@ -12,7 +13,7 @@ use crate::event::{self, ConnectionContext, Event, UdpRequestKind}; #[instrument(fields(transaction_id), skip(connect_service, opt_udp_server_stats_event_sender), ret(level = Level::TRACE))] pub async fn handle_connect( client_socket_addr: SocketAddr, - server_socket_addr: SocketAddr, + server_service_binding: ServiceBinding, request: &ConnectRequest, connect_service: &Arc, opt_udp_server_stats_event_sender: &Arc>>, @@ -24,14 +25,14 @@ pub async fn handle_connect( if let Some(udp_server_stats_event_sender) = opt_udp_server_stats_event_sender.as_deref() { udp_server_stats_event_sender .send_event(Event::UdpRequestAccepted { - context: ConnectionContext::new(client_socket_addr, server_socket_addr), + context: ConnectionContext::new(client_socket_addr, server_service_binding.clone()), kind: UdpRequestKind::Connect, }) .await; } let connection_id = connect_service - .handle_connect(client_socket_addr, server_socket_addr, cookie_issue_time) + .handle_connect(client_socket_addr, server_service_binding, cookie_issue_time) .await; build_response(*request, connection_id) @@ -60,6 +61,7 @@ mod tests { use bittorrent_udp_tracker_core::event as core_event; use bittorrent_udp_tracker_core::services::connect::ConnectService; use mockall::predicate::eq; + use torrust_tracker_primitives::service_binding::{Protocol, ServiceBinding}; use crate::event::{self, ConnectionContext, Event, UdpRequestKind}; use crate::handlers::handle_connect; @@ -77,6 +79,7 @@ mod tests { #[tokio::test] async fn a_connect_response_should_contain_the_same_transaction_id_as_the_connect_request() { let server_socket_addr = SocketAddr::new(IpAddr::V4(Ipv4Addr::new(203, 0, 113, 196)), 6969); + let server_service_binding = ServiceBinding::new(Protocol::UDP, server_socket_addr).unwrap(); let (udp_core_stats_event_sender, _udp_core_stats_repository) = bittorrent_udp_tracker_core::statistics::setup::factory(false); @@ -93,7 +96,7 @@ mod tests { let response = handle_connect( sample_ipv4_remote_addr(), - server_socket_addr, + server_service_binding, &request, &connect_service, &udp_server_stats_event_sender, @@ -113,6 +116,7 @@ mod tests { #[tokio::test] async fn a_connect_response_should_contain_a_new_connection_id() { let server_socket_addr = SocketAddr::new(IpAddr::V4(Ipv4Addr::new(203, 0, 113, 196)), 6969); + let server_service_binding = ServiceBinding::new(Protocol::UDP, server_socket_addr).unwrap(); let (udp_core_stats_event_sender, _udp_core_stats_repository) = bittorrent_udp_tracker_core::statistics::setup::factory(false); @@ -129,7 +133,7 @@ mod tests { let response = handle_connect( sample_ipv4_remote_addr(), - server_socket_addr, + server_service_binding, &request, &connect_service, &udp_server_stats_event_sender, @@ -149,6 +153,7 @@ mod tests { #[tokio::test] async fn a_connect_response_should_contain_a_new_connection_id_ipv6() { let server_socket_addr = SocketAddr::new(IpAddr::V4(Ipv4Addr::new(203, 0, 113, 196)), 6969); + let server_service_binding = ServiceBinding::new(Protocol::UDP, server_socket_addr).unwrap(); let (udp_core_stats_event_sender, _udp_core_stats_repository) = bittorrent_udp_tracker_core::statistics::setup::factory(false); @@ -165,7 +170,7 @@ mod tests { let response = handle_connect( sample_ipv6_remote_addr(), - server_socket_addr, + server_service_binding, &request, &connect_service, &udp_server_stats_event_sender, @@ -186,12 +191,13 @@ mod tests { async fn it_should_send_the_upd4_connect_event_when_a_client_tries_to_connect_using_a_ip4_socket_address() { let client_socket_addr = sample_ipv4_socket_address(); let server_socket_addr = SocketAddr::new(IpAddr::V4(Ipv4Addr::new(203, 0, 113, 196)), 6969); + let server_service_binding = ServiceBinding::new(Protocol::UDP, server_socket_addr).unwrap(); let mut udp_core_stats_event_sender_mock = MockUdpCoreStatsEventSender::new(); udp_core_stats_event_sender_mock .expect_send_event() .with(eq(core_event::Event::UdpConnect { - context: core_event::ConnectionContext::new(client_socket_addr, server_socket_addr), + context: core_event::ConnectionContext::new(client_socket_addr, server_service_binding.clone()), })) .times(1) .returning(|_| Box::pin(future::ready(Some(Ok(1))))); @@ -202,7 +208,7 @@ mod tests { udp_server_stats_event_sender_mock .expect_send_event() .with(eq(Event::UdpRequestAccepted { - context: ConnectionContext::new(client_socket_addr, server_socket_addr), + context: ConnectionContext::new(client_socket_addr, server_service_binding.clone()), kind: UdpRequestKind::Connect, })) .times(1) @@ -214,7 +220,7 @@ mod tests { handle_connect( client_socket_addr, - server_socket_addr, + server_service_binding, &sample_connect_request(), &connect_service, &udp_server_stats_event_sender, @@ -227,12 +233,13 @@ mod tests { async fn it_should_send_the_upd6_connect_event_when_a_client_tries_to_connect_using_a_ip6_socket_address() { let client_socket_addr = sample_ipv6_remote_addr(); let server_socket_addr = SocketAddr::new(IpAddr::V4(Ipv4Addr::new(203, 0, 113, 196)), 6969); + let server_service_binding = ServiceBinding::new(Protocol::UDP, server_socket_addr).unwrap(); let mut udp_core_stats_event_sender_mock = MockUdpCoreStatsEventSender::new(); udp_core_stats_event_sender_mock .expect_send_event() .with(eq(core_event::Event::UdpConnect { - context: core_event::ConnectionContext::new(client_socket_addr, server_socket_addr), + context: core_event::ConnectionContext::new(client_socket_addr, server_service_binding.clone()), })) .times(1) .returning(|_| Box::pin(future::ready(Some(Ok(1))))); @@ -243,7 +250,7 @@ mod tests { udp_server_stats_event_sender_mock .expect_send_event() .with(eq(Event::UdpRequestAccepted { - context: ConnectionContext::new(client_socket_addr, server_socket_addr), + context: ConnectionContext::new(client_socket_addr, server_service_binding.clone()), kind: UdpRequestKind::Connect, })) .times(1) @@ -255,7 +262,7 @@ mod tests { handle_connect( client_socket_addr, - server_socket_addr, + server_service_binding, &sample_connect_request(), &connect_service, &udp_server_stats_event_sender, diff --git a/packages/udp-tracker-server/src/handlers/error.rs b/packages/udp-tracker-server/src/handlers/error.rs index 70c33b5ba..6a1bce51c 100644 --- a/packages/udp-tracker-server/src/handlers/error.rs +++ b/packages/udp-tracker-server/src/handlers/error.rs @@ -6,6 +6,7 @@ use std::sync::Arc; use aquatic_udp_protocol::{ErrorResponse, RequestParseError, Response, TransactionId}; use bittorrent_udp_tracker_core::connection_cookie::{check, gen_remote_fingerprint}; use bittorrent_udp_tracker_core::{self, UDP_TRACKER_LOG_TARGET}; +use torrust_tracker_primitives::service_binding::ServiceBinding; use tracing::{instrument, Level}; use uuid::Uuid; use zerocopy::network_endian::I32; @@ -18,7 +19,7 @@ use crate::event::{self, ConnectionContext, Event, UdpRequestKind}; pub async fn handle_error( req_kind: Option, client_socket_addr: SocketAddr, - server_socket_addr: SocketAddr, + server_service_binding: ServiceBinding, request_id: Uuid, opt_udp_server_stats_event_sender: &Arc>>, cookie_valid_range: Range, @@ -27,6 +28,8 @@ pub async fn handle_error( ) -> Response { tracing::trace!("handle error"); + let server_socket_addr = server_service_binding.bind_address(); + match transaction_id { Some(transaction_id) => { let transaction_id = transaction_id.0.to_string(); @@ -60,7 +63,7 @@ pub async fn handle_error( if let Some(udp_server_stats_event_sender) = opt_udp_server_stats_event_sender.as_deref() { udp_server_stats_event_sender .send_event(Event::UdpError { - context: ConnectionContext::new(client_socket_addr, server_socket_addr), + context: ConnectionContext::new(client_socket_addr, server_service_binding), }) .await; } diff --git a/packages/udp-tracker-server/src/handlers/mod.rs b/packages/udp-tracker-server/src/handlers/mod.rs index 34ac374fa..f8ca9d8ea 100644 --- a/packages/udp-tracker-server/src/handlers/mod.rs +++ b/packages/udp-tracker-server/src/handlers/mod.rs @@ -18,6 +18,7 @@ use connect::handle_connect; use error::handle_error; use scrape::handle_scrape; use torrust_tracker_clock::clock::Time; +use torrust_tracker_primitives::service_binding::ServiceBinding; use tracing::{instrument, Level}; use uuid::Uuid; @@ -59,7 +60,7 @@ pub(crate) async fn handle_packet( udp_request: RawRequest, udp_tracker_core_container: Arc, udp_tracker_server_container: Arc, - server_socket_addr: SocketAddr, + server_service_binding: ServiceBinding, cookie_time_values: CookieTimeValues, ) -> (Response, Option) { let request_id = Uuid::new_v4(); @@ -74,7 +75,7 @@ pub(crate) async fn handle_packet( Ok(request) => match handle_request( request, udp_request.from, - server_socket_addr, + server_service_binding.clone(), udp_tracker_core_container.clone(), udp_tracker_server_container.clone(), cookie_time_values.clone(), @@ -95,7 +96,7 @@ pub(crate) async fn handle_packet( let response = handle_error( Some(req_kind.clone()), udp_request.from, - server_socket_addr, + server_service_binding, request_id, &udp_tracker_server_container.udp_server_stats_event_sender, cookie_time_values.valid_range.clone(), @@ -111,7 +112,7 @@ pub(crate) async fn handle_packet( let response = handle_error( None, udp_request.from, - server_socket_addr, + server_service_binding, request_id, &udp_tracker_server_container.udp_server_stats_event_sender, cookie_time_values.valid_range.clone(), @@ -138,7 +139,7 @@ pub(crate) async fn handle_packet( #[instrument(skip( request, client_socket_addr, - server_socket_addr, + server_service_binding, udp_tracker_core_container, udp_tracker_server_container, cookie_time_values @@ -146,7 +147,7 @@ pub(crate) async fn handle_packet( pub async fn handle_request( request: Request, client_socket_addr: SocketAddr, - server_socket_addr: SocketAddr, + server_service_binding: ServiceBinding, udp_tracker_core_container: Arc, udp_tracker_server_container: Arc, cookie_time_values: CookieTimeValues, @@ -157,7 +158,7 @@ pub async fn handle_request( Request::Connect(connect_request) => Ok(( handle_connect( client_socket_addr, - server_socket_addr, + server_service_binding, &connect_request, &udp_tracker_core_container.connect_service, &udp_tracker_server_container.udp_server_stats_event_sender, @@ -170,7 +171,7 @@ pub async fn handle_request( match handle_announce( &udp_tracker_core_container.announce_service, client_socket_addr, - server_socket_addr, + server_service_binding, &announce_request, &udp_tracker_core_container.tracker_core_container.core_config, &udp_tracker_server_container.udp_server_stats_event_sender, @@ -186,7 +187,7 @@ pub async fn handle_request( match handle_scrape( &udp_tracker_core_container.scrape_service, client_socket_addr, - server_socket_addr, + server_service_binding, &scrape_request, &udp_tracker_server_container.udp_server_stats_event_sender, cookie_time_values.valid_range, diff --git a/packages/udp-tracker-server/src/handlers/scrape.rs b/packages/udp-tracker-server/src/handlers/scrape.rs index ac0faef61..35b5ee65c 100644 --- a/packages/udp-tracker-server/src/handlers/scrape.rs +++ b/packages/udp-tracker-server/src/handlers/scrape.rs @@ -9,6 +9,7 @@ use aquatic_udp_protocol::{ use bittorrent_udp_tracker_core::services::scrape::ScrapeService; use bittorrent_udp_tracker_core::{self}; use torrust_tracker_primitives::core::ScrapeData; +use torrust_tracker_primitives::service_binding::ServiceBinding; use tracing::{instrument, Level}; use zerocopy::network_endian::I32; @@ -24,7 +25,7 @@ use crate::event::{self, ConnectionContext, Event, UdpRequestKind}; pub async fn handle_scrape( scrape_service: &Arc, client_socket_addr: SocketAddr, - server_socket_addr: SocketAddr, + server_service_binding: ServiceBinding, request: &ScrapeRequest, opt_udp_server_stats_event_sender: &Arc>>, cookie_valid_range: Range, @@ -38,14 +39,14 @@ pub async fn handle_scrape( if let Some(udp_server_stats_event_sender) = opt_udp_server_stats_event_sender.as_deref() { udp_server_stats_event_sender .send_event(Event::UdpRequestAccepted { - context: ConnectionContext::new(client_socket_addr, server_socket_addr), + context: ConnectionContext::new(client_socket_addr, server_service_binding.clone()), kind: UdpRequestKind::Scrape, }) .await; } let scrape_data = scrape_service - .handle_scrape(client_socket_addr, server_socket_addr, request, cookie_valid_range) + .handle_scrape(client_socket_addr, server_service_binding, request, cookie_valid_range) .await .map_err(|e| (e.into(), request.transaction_id, UdpRequestKind::Scrape))?; @@ -91,6 +92,7 @@ mod tests { }; use bittorrent_tracker_core::torrent::repository::in_memory::InMemoryTorrentRepository; use bittorrent_udp_tracker_core::connection_cookie::{gen_remote_fingerprint, make}; + use torrust_tracker_primitives::service_binding::{Protocol, ServiceBinding}; use crate::handlers::handle_scrape; use crate::handlers::tests::{ @@ -113,6 +115,7 @@ mod tests { let client_socket_addr = sample_ipv4_remote_addr(); let server_socket_addr = SocketAddr::new(IpAddr::V4(Ipv4Addr::new(203, 0, 113, 196)), 6969); + let server_service_binding = ServiceBinding::new(Protocol::UDP, server_socket_addr).unwrap(); let info_hash = InfoHash([0u8; 20]); let info_hashes = vec![info_hash]; @@ -126,7 +129,7 @@ mod tests { let response = handle_scrape( &core_udp_tracker_services.scrape_service, client_socket_addr, - server_socket_addr, + server_service_binding, &request, &server_udp_tracker_services.udp_server_stats_event_sender, sample_cookie_valid_range(), @@ -180,6 +183,7 @@ mod tests { let client_socket_addr = sample_ipv4_remote_addr(); let server_socket_addr = SocketAddr::new(IpAddr::V4(Ipv4Addr::new(203, 0, 113, 196)), 6969); + let server_service_binding = ServiceBinding::new(Protocol::UDP, server_socket_addr).unwrap(); let info_hash = InfoHash([0u8; 20]); @@ -195,7 +199,7 @@ mod tests { handle_scrape( &core_udp_tracker_services.scrape_service, client_socket_addr, - server_socket_addr, + server_service_binding, &request, &udp_server_stats_event_sender, sample_cookie_valid_range(), @@ -240,6 +244,7 @@ mod tests { use std::net::{IpAddr, Ipv4Addr, SocketAddr}; use aquatic_udp_protocol::{InfoHash, NumberOfDownloads, NumberOfPeers, TorrentScrapeStatistics}; + use torrust_tracker_primitives::service_binding::{Protocol, ServiceBinding}; use crate::handlers::handle_scrape; use crate::handlers::scrape::tests::scrape_request::{ @@ -256,6 +261,7 @@ mod tests { let client_socket_addr = sample_ipv4_remote_addr(); let server_socket_addr = SocketAddr::new(IpAddr::V4(Ipv4Addr::new(203, 0, 113, 196)), 6969); + let server_service_binding = ServiceBinding::new(Protocol::UDP, server_socket_addr).unwrap(); let info_hash = InfoHash([0u8; 20]); @@ -274,7 +280,7 @@ mod tests { handle_scrape( &core_udp_tracker_services.scrape_service, client_socket_addr, - server_socket_addr, + server_service_binding, &request, &server_udp_tracker_services.udp_server_stats_event_sender, sample_cookie_valid_range(), @@ -300,6 +306,7 @@ mod tests { let client_socket_addr = sample_ipv4_remote_addr(); let server_socket_addr = SocketAddr::new(IpAddr::V4(Ipv4Addr::new(203, 0, 113, 196)), 6969); + let server_service_binding = ServiceBinding::new(Protocol::UDP, server_socket_addr).unwrap(); let info_hash = InfoHash([0u8; 20]); @@ -316,7 +323,7 @@ mod tests { handle_scrape( &core_udp_tracker_services.scrape_service, client_socket_addr, - server_socket_addr, + server_service_binding, &request, &server_udp_tracker_services.udp_server_stats_event_sender, sample_cookie_valid_range(), @@ -349,6 +356,7 @@ mod tests { use std::sync::Arc; use mockall::predicate::eq; + use torrust_tracker_primitives::service_binding::{Protocol, ServiceBinding}; use super::sample_scrape_request; use crate::event; @@ -363,12 +371,13 @@ mod tests { async fn should_send_the_upd4_scrape_event() { let client_socket_addr = sample_ipv4_remote_addr(); let server_socket_addr = SocketAddr::new(IpAddr::V6(Ipv6Addr::new(0, 0, 0, 0, 203, 0, 113, 196)), 6969); + let server_service_binding = ServiceBinding::new(Protocol::UDP, server_socket_addr).unwrap(); let mut udp_server_stats_event_sender_mock = MockUdpServerStatsEventSender::new(); udp_server_stats_event_sender_mock .expect_send_event() .with(eq(Event::UdpRequestAccepted { - context: ConnectionContext::new(client_socket_addr, server_socket_addr), + context: ConnectionContext::new(client_socket_addr, server_service_binding.clone()), kind: UdpRequestKind::Scrape, })) .times(1) @@ -382,7 +391,7 @@ mod tests { handle_scrape( &core_udp_tracker_services.scrape_service, client_socket_addr, - server_socket_addr, + server_service_binding, &sample_scrape_request(&client_socket_addr), &udp_server_stats_event_sender, sample_cookie_valid_range(), @@ -398,6 +407,7 @@ mod tests { use std::sync::Arc; use mockall::predicate::eq; + use torrust_tracker_primitives::service_binding::{Protocol, ServiceBinding}; use super::sample_scrape_request; use crate::event::{self, ConnectionContext, Event, UdpRequestKind}; @@ -411,12 +421,13 @@ mod tests { async fn should_send_the_upd6_scrape_event() { let client_socket_addr = sample_ipv6_remote_addr(); let server_socket_addr = SocketAddr::new(IpAddr::V6(Ipv6Addr::new(0, 0, 0, 0, 203, 0, 113, 196)), 6969); + let server_service_binding = ServiceBinding::new(Protocol::UDP, server_socket_addr).unwrap(); let mut udp_server_stats_event_sender_mock = MockUdpServerStatsEventSender::new(); udp_server_stats_event_sender_mock .expect_send_event() .with(eq(Event::UdpRequestAccepted { - context: ConnectionContext::new(client_socket_addr, server_socket_addr), + context: ConnectionContext::new(client_socket_addr, server_service_binding.clone()), kind: UdpRequestKind::Scrape, })) .times(1) @@ -430,7 +441,7 @@ mod tests { handle_scrape( &core_udp_tracker_services.scrape_service, client_socket_addr, - server_socket_addr, + server_service_binding, &sample_scrape_request(&client_socket_addr), &udp_server_stats_event_sender, sample_cookie_valid_range(), diff --git a/packages/udp-tracker-server/src/server/launcher.rs b/packages/udp-tracker-server/src/server/launcher.rs index fd689a96f..5de41066f 100644 --- a/packages/udp-tracker-server/src/server/launcher.rs +++ b/packages/udp-tracker-server/src/server/launcher.rs @@ -13,7 +13,7 @@ use tokio::time::interval; use torrust_server_lib::logging::STARTED_ON; use torrust_server_lib::registar::ServiceHealthCheckJob; use torrust_server_lib::signals::{shutdown_signal_with_message, Halted, Started}; -use torrust_tracker_primitives::service_binding::ServiceBinding; +use torrust_tracker_primitives::service_binding::{Protocol, ServiceBinding}; use tracing::instrument; use super::request_buffer::ActiveRequests; @@ -138,7 +138,10 @@ impl Launcher { let server_socket_addr = receiver.bound_socket_address(); - let local_addr = format!("udp://{server_socket_addr}"); + let server_service_binding = + ServiceBinding::new(Protocol::UDP, server_socket_addr).expect("Bound socket to service binding should not fail"); + + let local_addr = server_service_binding.clone().to_string(); let cookie_lifetime = cookie_lifetime.as_secs_f64(); @@ -156,6 +159,9 @@ impl Launcher { }); loop { + let server_service_binding = + ServiceBinding::new(Protocol::UDP, server_socket_addr).expect("Bound socket to service binding should not fail"); + if let Some(req) = { tracing::trace!(target: UDP_TRACKER_LOG_TARGET, local_addr, "Udp::run_udp_server (wait for request)"); receiver.next().await @@ -180,7 +186,7 @@ impl Launcher { { udp_server_stats_event_sender .send_event(Event::UdpRequestReceived { - context: ConnectionContext::new(client_socket_addr, server_socket_addr), + context: ConnectionContext::new(client_socket_addr, server_service_binding.clone()), }) .await; } @@ -193,7 +199,7 @@ impl Launcher { { udp_server_stats_event_sender .send_event(Event::UdpRequestBanned { - context: ConnectionContext::new(client_socket_addr, server_socket_addr), + context: ConnectionContext::new(client_socket_addr, server_service_binding.clone()), }) .await; } @@ -235,7 +241,7 @@ impl Launcher { { udp_server_stats_event_sender .send_event(Event::UdpRequestAborted { - context: ConnectionContext::new(client_socket_addr, server_socket_addr), + context: ConnectionContext::new(client_socket_addr, server_service_binding), }) .await; } diff --git a/packages/udp-tracker-server/src/server/processor.rs b/packages/udp-tracker-server/src/server/processor.rs index 02e084356..5e98b0361 100644 --- a/packages/udp-tracker-server/src/server/processor.rs +++ b/packages/udp-tracker-server/src/server/processor.rs @@ -7,6 +7,7 @@ use aquatic_udp_protocol::Response; use bittorrent_udp_tracker_core::container::UdpTrackerCoreContainer; use bittorrent_udp_tracker_core::{self}; use tokio::time::Instant; +use torrust_tracker_primitives::service_binding::{Protocol, ServiceBinding}; use tracing::{instrument, Level}; use super::bound_socket::BoundSocket; @@ -20,20 +21,29 @@ pub struct Processor { udp_tracker_core_container: Arc, udp_tracker_server_container: Arc, cookie_lifetime: f64, + server_service_binding: ServiceBinding, } impl Processor { + /// # Panics + /// + /// It will panic if a bound socket address port is 0. It should never + /// happen. pub fn new( socket: Arc, udp_tracker_core_container: Arc, udp_tracker_server_container: Arc, cookie_lifetime: f64, ) -> Self { + let server_service_binding = + ServiceBinding::new(Protocol::UDP, socket.address()).expect("Bound socket port should't be 0"); + Self { socket, udp_tracker_core_container, udp_tracker_server_container, cookie_lifetime, + server_service_binding, } } @@ -47,7 +57,7 @@ impl Processor { request, self.udp_tracker_core_container.clone(), self.udp_tracker_server_container.clone(), - self.socket.address(), + self.server_service_binding.clone(), CookieTimeValues::new(self.cookie_lifetime), ) .await; @@ -109,7 +119,7 @@ impl Processor { { udp_server_stats_event_sender .send_event(Event::UdpResponseSent { - context: ConnectionContext::new(client_socket_addr, self.socket.address()), + context: ConnectionContext::new(client_socket_addr, self.server_service_binding), kind: udp_response_kind, req_processing_time, }) diff --git a/packages/udp-tracker-server/src/statistics/event/handler.rs b/packages/udp-tracker-server/src/statistics/event/handler.rs index f65a1e567..b06c8d725 100644 --- a/packages/udp-tracker-server/src/statistics/event/handler.rs +++ b/packages/udp-tracker-server/src/statistics/event/handler.rs @@ -100,6 +100,8 @@ pub async fn handle_event(event: Event, stats_repository: &Repository) { mod tests { use std::net::{IpAddr, Ipv4Addr, Ipv6Addr, SocketAddr}; + use torrust_tracker_primitives::service_binding::{Protocol, ServiceBinding}; + use crate::event::{ConnectionContext, Event, UdpRequestKind}; use crate::statistics::event::handler::handle_event; use crate::statistics::repository::Repository; @@ -112,7 +114,11 @@ mod tests { Event::UdpRequestAborted { context: ConnectionContext::new( SocketAddr::new(IpAddr::V4(Ipv4Addr::new(203, 0, 113, 195)), 8080), - SocketAddr::new(IpAddr::V4(Ipv4Addr::new(203, 0, 113, 196)), 6969), + ServiceBinding::new( + Protocol::UDP, + SocketAddr::new(IpAddr::V4(Ipv4Addr::new(203, 0, 113, 196)), 6969), + ) + .unwrap(), ), }, &stats_repository, @@ -132,7 +138,11 @@ mod tests { Event::UdpRequestBanned { context: ConnectionContext::new( SocketAddr::new(IpAddr::V4(Ipv4Addr::new(203, 0, 113, 195)), 8080), - SocketAddr::new(IpAddr::V4(Ipv4Addr::new(203, 0, 113, 196)), 6969), + ServiceBinding::new( + Protocol::UDP, + SocketAddr::new(IpAddr::V4(Ipv4Addr::new(203, 0, 113, 196)), 6969), + ) + .unwrap(), ), }, &stats_repository, @@ -152,7 +162,11 @@ mod tests { Event::UdpRequestReceived { context: ConnectionContext::new( SocketAddr::new(IpAddr::V4(Ipv4Addr::new(203, 0, 113, 195)), 8080), - SocketAddr::new(IpAddr::V4(Ipv4Addr::new(203, 0, 113, 196)), 6969), + ServiceBinding::new( + Protocol::UDP, + SocketAddr::new(IpAddr::V4(Ipv4Addr::new(203, 0, 113, 196)), 6969), + ) + .unwrap(), ), }, &stats_repository, @@ -172,7 +186,11 @@ mod tests { Event::UdpRequestAborted { context: ConnectionContext::new( SocketAddr::new(IpAddr::V4(Ipv4Addr::new(203, 0, 113, 195)), 8080), - SocketAddr::new(IpAddr::V4(Ipv4Addr::new(203, 0, 113, 196)), 6969), + ServiceBinding::new( + Protocol::UDP, + SocketAddr::new(IpAddr::V4(Ipv4Addr::new(203, 0, 113, 196)), 6969), + ) + .unwrap(), ), }, &stats_repository, @@ -189,7 +207,11 @@ mod tests { Event::UdpRequestBanned { context: ConnectionContext::new( SocketAddr::new(IpAddr::V4(Ipv4Addr::new(203, 0, 113, 195)), 8080), - SocketAddr::new(IpAddr::V4(Ipv4Addr::new(203, 0, 113, 196)), 6969), + ServiceBinding::new( + Protocol::UDP, + SocketAddr::new(IpAddr::V4(Ipv4Addr::new(203, 0, 113, 196)), 6969), + ) + .unwrap(), ), }, &stats_repository, @@ -207,7 +229,11 @@ mod tests { Event::UdpRequestAccepted { context: ConnectionContext::new( SocketAddr::new(IpAddr::V4(Ipv4Addr::new(203, 0, 113, 195)), 8080), - SocketAddr::new(IpAddr::V4(Ipv4Addr::new(203, 0, 113, 196)), 6969), + ServiceBinding::new( + Protocol::UDP, + SocketAddr::new(IpAddr::V4(Ipv4Addr::new(203, 0, 113, 196)), 6969), + ) + .unwrap(), ), kind: crate::event::UdpRequestKind::Connect, }, @@ -228,7 +254,11 @@ mod tests { Event::UdpRequestAccepted { context: ConnectionContext::new( SocketAddr::new(IpAddr::V4(Ipv4Addr::new(203, 0, 113, 195)), 8080), - SocketAddr::new(IpAddr::V4(Ipv4Addr::new(203, 0, 113, 196)), 6969), + ServiceBinding::new( + Protocol::UDP, + SocketAddr::new(IpAddr::V4(Ipv4Addr::new(203, 0, 113, 196)), 6969), + ) + .unwrap(), ), kind: crate::event::UdpRequestKind::Announce, }, @@ -249,7 +279,11 @@ mod tests { Event::UdpRequestAccepted { context: ConnectionContext::new( SocketAddr::new(IpAddr::V4(Ipv4Addr::new(203, 0, 113, 195)), 8080), - SocketAddr::new(IpAddr::V4(Ipv4Addr::new(203, 0, 113, 196)), 6969), + ServiceBinding::new( + Protocol::UDP, + SocketAddr::new(IpAddr::V4(Ipv4Addr::new(203, 0, 113, 196)), 6969), + ) + .unwrap(), ), kind: crate::event::UdpRequestKind::Scrape, }, @@ -270,7 +304,11 @@ mod tests { Event::UdpResponseSent { context: ConnectionContext::new( SocketAddr::new(IpAddr::V4(Ipv4Addr::new(203, 0, 113, 195)), 8080), - SocketAddr::new(IpAddr::V4(Ipv4Addr::new(203, 0, 113, 196)), 6969), + ServiceBinding::new( + Protocol::UDP, + SocketAddr::new(IpAddr::V4(Ipv4Addr::new(203, 0, 113, 196)), 6969), + ) + .unwrap(), ), kind: crate::event::UdpResponseKind::Ok { req_kind: UdpRequestKind::Announce, @@ -294,7 +332,11 @@ mod tests { Event::UdpError { context: ConnectionContext::new( SocketAddr::new(IpAddr::V4(Ipv4Addr::new(203, 0, 113, 195)), 8080), - SocketAddr::new(IpAddr::V4(Ipv4Addr::new(203, 0, 113, 196)), 6969), + ServiceBinding::new( + Protocol::UDP, + SocketAddr::new(IpAddr::V4(Ipv4Addr::new(203, 0, 113, 196)), 6969), + ) + .unwrap(), ), }, &stats_repository, @@ -314,7 +356,11 @@ mod tests { Event::UdpRequestAccepted { context: ConnectionContext::new( SocketAddr::new(IpAddr::V6(Ipv6Addr::new(0, 0, 0, 0, 203, 0, 113, 195)), 8080), - SocketAddr::new(IpAddr::V6(Ipv6Addr::new(0, 0, 0, 0, 203, 0, 113, 196)), 6969), + ServiceBinding::new( + Protocol::UDP, + SocketAddr::new(IpAddr::V6(Ipv6Addr::new(0, 0, 0, 0, 203, 0, 113, 196)), 6969), + ) + .unwrap(), ), kind: crate::event::UdpRequestKind::Connect, }, @@ -335,7 +381,11 @@ mod tests { Event::UdpRequestAccepted { context: ConnectionContext::new( SocketAddr::new(IpAddr::V6(Ipv6Addr::new(0, 0, 0, 0, 203, 0, 113, 195)), 8080), - SocketAddr::new(IpAddr::V6(Ipv6Addr::new(0, 0, 0, 0, 203, 0, 113, 196)), 6969), + ServiceBinding::new( + Protocol::UDP, + SocketAddr::new(IpAddr::V6(Ipv6Addr::new(0, 0, 0, 0, 203, 0, 113, 196)), 6969), + ) + .unwrap(), ), kind: crate::event::UdpRequestKind::Announce, }, @@ -356,7 +406,11 @@ mod tests { Event::UdpRequestAccepted { context: ConnectionContext::new( SocketAddr::new(IpAddr::V6(Ipv6Addr::new(0, 0, 0, 0, 203, 0, 113, 195)), 8080), - SocketAddr::new(IpAddr::V6(Ipv6Addr::new(0, 0, 0, 0, 203, 0, 113, 196)), 6969), + ServiceBinding::new( + Protocol::UDP, + SocketAddr::new(IpAddr::V6(Ipv6Addr::new(0, 0, 0, 0, 203, 0, 113, 196)), 6969), + ) + .unwrap(), ), kind: crate::event::UdpRequestKind::Scrape, }, @@ -377,7 +431,11 @@ mod tests { Event::UdpResponseSent { context: ConnectionContext::new( SocketAddr::new(IpAddr::V6(Ipv6Addr::new(0, 0, 0, 0, 203, 0, 113, 195)), 8080), - SocketAddr::new(IpAddr::V6(Ipv6Addr::new(0, 0, 0, 0, 203, 0, 113, 196)), 6969), + ServiceBinding::new( + Protocol::UDP, + SocketAddr::new(IpAddr::V6(Ipv6Addr::new(0, 0, 0, 0, 203, 0, 113, 196)), 6969), + ) + .unwrap(), ), kind: crate::event::UdpResponseKind::Ok { req_kind: UdpRequestKind::Announce, @@ -400,7 +458,11 @@ mod tests { Event::UdpError { context: ConnectionContext::new( SocketAddr::new(IpAddr::V6(Ipv6Addr::new(0, 0, 0, 0, 203, 0, 113, 195)), 8080), - SocketAddr::new(IpAddr::V6(Ipv6Addr::new(0, 0, 0, 0, 203, 0, 113, 196)), 6969), + ServiceBinding::new( + Protocol::UDP, + SocketAddr::new(IpAddr::V6(Ipv6Addr::new(0, 0, 0, 0, 203, 0, 113, 196)), 6969), + ) + .unwrap(), ), }, &stats_repository, From a46011637ab11a0aef09ac70985069efe50364f9 Mon Sep 17 00:00:00 2001 From: Victor Bjelkholm Date: Fri, 4 Apr 2025 13:56:49 +0200 Subject: [PATCH 435/802] Change rustdoc docs to only compile+run in release mode As-is, it asks the user to do a release mode build first, then after creating the directories, then do a debug mode build + run. This commit simplifies it and avoids compiling the tracker in both debug and release mode, and instead only compiles+run with release mode --- src/lib.rs | 3 +-- 1 file changed, 1 insertion(+), 2 deletions(-) diff --git a/src/lib.rs b/src/lib.rs index 0aaf34fe4..b26960899 100644 --- a/src/lib.rs +++ b/src/lib.rs @@ -138,7 +138,6 @@ //! ```text //! git clone https://github.com/torrust/torrust-tracker.git \ //! && cd torrust-tracker \ -//! && cargo build --release \ //! && mkdir -p ./storage/tracker/etc \ //! && mkdir -p ./storage/tracker/lib/database \ //! && mkdir -p ./storage/tracker/lib/tls \ @@ -149,7 +148,7 @@ //! compile and after being compiled it will start running the tracker. //! //! ```text -//! cargo run +//! cargo run --release //! ``` //! //! ## Run with docker From 8a169b1f48f662e941a5ca7f4911b365325bebe1 Mon Sep 17 00:00:00 2001 From: Jose Celano Date: Tue, 8 Apr 2025 17:59:00 +0100 Subject: [PATCH 436/802] feat: add protocol method to ServiceBinding --- packages/primitives/src/service_binding.rs | 6 ++++++ 1 file changed, 6 insertions(+) diff --git a/packages/primitives/src/service_binding.rs b/packages/primitives/src/service_binding.rs index dbbb32fd5..30eb1aa9e 100644 --- a/packages/primitives/src/service_binding.rs +++ b/packages/primitives/src/service_binding.rs @@ -83,6 +83,12 @@ impl ServiceBinding { Ok(Self { protocol, bind_address }) } + /// Returns the protocol used by the service. + #[must_use] + pub fn protocol(&self) -> Protocol { + self.protocol.clone() + } + #[must_use] pub fn bind_address(&self) -> SocketAddr { self.bind_address From 730de9ff633fe0b444cac784d33adae204a13d2d Mon Sep 17 00:00:00 2001 From: Jose Celano Date: Tue, 8 Apr 2025 18:01:34 +0100 Subject: [PATCH 437/802] feat: [#1403] add new package for extendable labeled metrics This package allow creating collection of metrics that can have labels. It's similar to the `metrics` crate. There are two types of metrics: - Counter - Gauge For example, you can increase a counter with: ```rust let time = DurationSinceUnixEpoch::from_secs(1_743_552_000); let label_set: LabelSet = (LabelName::new("label_name"), LabelValue::new("value")).into(); let mut metric_collection = MetricCollection::new( // Collection of counter-type metrics MetricKindCollection::new(vec![ Metric::new( MetricName::new("test_counter"), SampleCollection::new(vec![Sample::new(Counter::new(0), time, label_set.clone())])) ]), // Empty colelction of gauge-type metrics MetricKindCollection::new(vec![]) ); metric_collection.increase_counter(&MetricName::new("test_counter"), &label_set, time); ``` Metric colelctions are serializable into JSON and exportable to Prometheus format. --- .github/workflows/deployment.yaml | 1 + cSpell.json | 6 + packages/metrics/.gitignore | 1 + packages/metrics/Cargo.toml | 29 + packages/metrics/LICENSE | 661 +++++++++++++++ packages/metrics/README.md | 15 + packages/metrics/src/counter.rs | 81 ++ packages/metrics/src/gauge.rs | 83 ++ packages/metrics/src/label/mod.rs | 9 + packages/metrics/src/label/name.rs | 117 +++ packages/metrics/src/label/pair.rs | 29 + packages/metrics/src/label/set.rs | 340 ++++++++ packages/metrics/src/label/value.rs | 32 + packages/metrics/src/lib.rs | 29 + packages/metrics/src/metric/description.rs | 29 + packages/metrics/src/metric/mod.rs | 192 +++++ packages/metrics/src/metric/name.rs | 92 +++ packages/metrics/src/metric_collection.rs | 759 ++++++++++++++++++ packages/metrics/src/prometheus.rs | 15 + packages/metrics/src/sample.rs | 355 ++++++++ packages/metrics/src/sample_collection.rs | 411 ++++++++++ .../src/thread_safe_metric_collection.rs | 92 +++ packages/metrics/src/unit.rs | 25 + 23 files changed, 3403 insertions(+) create mode 100644 packages/metrics/.gitignore create mode 100644 packages/metrics/Cargo.toml create mode 100644 packages/metrics/LICENSE create mode 100644 packages/metrics/README.md create mode 100644 packages/metrics/src/counter.rs create mode 100644 packages/metrics/src/gauge.rs create mode 100644 packages/metrics/src/label/mod.rs create mode 100644 packages/metrics/src/label/name.rs create mode 100644 packages/metrics/src/label/pair.rs create mode 100644 packages/metrics/src/label/set.rs create mode 100644 packages/metrics/src/label/value.rs create mode 100644 packages/metrics/src/lib.rs create mode 100644 packages/metrics/src/metric/description.rs create mode 100644 packages/metrics/src/metric/mod.rs create mode 100644 packages/metrics/src/metric/name.rs create mode 100644 packages/metrics/src/metric_collection.rs create mode 100644 packages/metrics/src/prometheus.rs create mode 100644 packages/metrics/src/sample.rs create mode 100644 packages/metrics/src/sample_collection.rs create mode 100644 packages/metrics/src/thread_safe_metric_collection.rs create mode 100644 packages/metrics/src/unit.rs diff --git a/.github/workflows/deployment.yaml b/.github/workflows/deployment.yaml index 1422ec394..983817273 100644 --- a/.github/workflows/deployment.yaml +++ b/.github/workflows/deployment.yaml @@ -74,6 +74,7 @@ jobs: cargo publish -p torrust-tracker-configuration cargo publish -p torrust-tracker-contrib-bencode cargo publish -p torrust-tracker-located-error + cargo publish -p torrust-tracker-metrics cargo publish -p torrust-tracker-primitives cargo publish -p torrust-tracker-test-helpers cargo publish -p torrust-tracker-torrent-repository diff --git a/cSpell.json b/cSpell.json index 3121d6175..e384a08d9 100644 --- a/cSpell.json +++ b/cSpell.json @@ -59,9 +59,11 @@ "Eray", "filesd", "flamegraph", + "formatjson", "Freebox", "Frostegård", "gecos", + "Gibibytes", "Grcov", "hasher", "healthcheck", @@ -86,6 +88,7 @@ "kcachegrind", "kexec", "keyout", + "Kibibytes", "kptr", "lcov", "leecher", @@ -96,12 +99,14 @@ "LOGNAME", "Lphant", "matchmakes", + "Mebibytes", "metainfo", "middlewares", "misresolved", "mockall", "multimap", "myacicontext", + "ñaca", "Naim", "nanos", "newkey", @@ -157,6 +162,7 @@ "Swiftbit", "taiki", "tdyne", + "Tebibytes", "tempfile", "testcontainers", "thiserror", diff --git a/packages/metrics/.gitignore b/packages/metrics/.gitignore new file mode 100644 index 000000000..0b1372e5c --- /dev/null +++ b/packages/metrics/.gitignore @@ -0,0 +1 @@ +./.coverage diff --git a/packages/metrics/Cargo.toml b/packages/metrics/Cargo.toml new file mode 100644 index 000000000..6520cf244 --- /dev/null +++ b/packages/metrics/Cargo.toml @@ -0,0 +1,29 @@ +[package] +description = "A library with the primitive types shared by the Torrust tracker packages." +keywords = ["api", "library", "metrics"] +name = "torrust-tracker-metrics" +readme = "README.md" + +authors.workspace = true +documentation.workspace = true +edition.workspace = true +homepage.workspace = true +license.workspace = true +publish.workspace = true +repository.workspace = true +rust-version.workspace = true +version.workspace = true + +[dependencies] +chrono = { version = "0", default-features = false, features = ["clock"] } +derive_more = { version = "2", features = ["constructor"] } +serde = { version = "1", features = ["derive"] } +serde_json = "1.0.140" +thiserror = "2" +torrust-tracker-primitives = { version = "3.0.0-develop", path = "../primitives" } + +[dev-dependencies] +approx = "0.5.1" +formatjson = "0.3.1" +pretty_assertions = "1.4.1" +rstest = "0.25.0" diff --git a/packages/metrics/LICENSE b/packages/metrics/LICENSE new file mode 100644 index 000000000..0ad25db4b --- /dev/null +++ b/packages/metrics/LICENSE @@ -0,0 +1,661 @@ + GNU AFFERO GENERAL PUBLIC LICENSE + Version 3, 19 November 2007 + + Copyright (C) 2007 Free Software Foundation, Inc. + Everyone is permitted to copy and distribute verbatim copies + of this license document, but changing it is not allowed. + + Preamble + + The GNU Affero General Public License is a free, copyleft license for +software and other kinds of works, specifically designed to ensure +cooperation with the community in the case of network server software. + + The licenses for most software and other practical works are designed +to take away your freedom to share and change the works. By contrast, +our General Public Licenses are intended to guarantee your freedom to +share and change all versions of a program--to make sure it remains free +software for all its users. + + When we speak of free software, we are referring to freedom, not +price. Our General Public Licenses are designed to make sure that you +have the freedom to distribute copies of free software (and charge for +them if you wish), that you receive source code or can get it if you +want it, that you can change the software or use pieces of it in new +free programs, and that you know you can do these things. + + Developers that use our General Public Licenses protect your rights +with two steps: (1) assert copyright on the software, and (2) offer +you this License which gives you legal permission to copy, distribute +and/or modify the software. + + A secondary benefit of defending all users' freedom is that +improvements made in alternate versions of the program, if they +receive widespread use, become available for other developers to +incorporate. Many developers of free software are heartened and +encouraged by the resulting cooperation. However, in the case of +software used on network servers, this result may fail to come about. +The GNU General Public License permits making a modified version and +letting the public access it on a server without ever releasing its +source code to the public. + + The GNU Affero General Public License is designed specifically to +ensure that, in such cases, the modified source code becomes available +to the community. It requires the operator of a network server to +provide the source code of the modified version running there to the +users of that server. Therefore, public use of a modified version, on +a publicly accessible server, gives the public access to the source +code of the modified version. + + An older license, called the Affero General Public License and +published by Affero, was designed to accomplish similar goals. This is +a different license, not a version of the Affero GPL, but Affero has +released a new version of the Affero GPL which permits relicensing under +this license. + + The precise terms and conditions for copying, distribution and +modification follow. + + TERMS AND CONDITIONS + + 0. Definitions. + + "This License" refers to version 3 of the GNU Affero General Public License. + + "Copyright" also means copyright-like laws that apply to other kinds of +works, such as semiconductor masks. + + "The Program" refers to any copyrightable work licensed under this +License. Each licensee is addressed as "you". "Licensees" and +"recipients" may be individuals or organizations. + + To "modify" a work means to copy from or adapt all or part of the work +in a fashion requiring copyright permission, other than the making of an +exact copy. The resulting work is called a "modified version" of the +earlier work or a work "based on" the earlier work. + + A "covered work" means either the unmodified Program or a work based +on the Program. + + To "propagate" a work means to do anything with it that, without +permission, would make you directly or secondarily liable for +infringement under applicable copyright law, except executing it on a +computer or modifying a private copy. Propagation includes copying, +distribution (with or without modification), making available to the +public, and in some countries other activities as well. + + To "convey" a work means any kind of propagation that enables other +parties to make or receive copies. Mere interaction with a user through +a computer network, with no transfer of a copy, is not conveying. + + An interactive user interface displays "Appropriate Legal Notices" +to the extent that it includes a convenient and prominently visible +feature that (1) displays an appropriate copyright notice, and (2) +tells the user that there is no warranty for the work (except to the +extent that warranties are provided), that licensees may convey the +work under this License, and how to view a copy of this License. If +the interface presents a list of user commands or options, such as a +menu, a prominent item in the list meets this criterion. + + 1. Source Code. + + The "source code" for a work means the preferred form of the work +for making modifications to it. "Object code" means any non-source +form of a work. + + A "Standard Interface" means an interface that either is an official +standard defined by a recognized standards body, or, in the case of +interfaces specified for a particular programming language, one that +is widely used among developers working in that language. + + The "System Libraries" of an executable work include anything, other +than the work as a whole, that (a) is included in the normal form of +packaging a Major Component, but which is not part of that Major +Component, and (b) serves only to enable use of the work with that +Major Component, or to implement a Standard Interface for which an +implementation is available to the public in source code form. A +"Major Component", in this context, means a major essential component +(kernel, window system, and so on) of the specific operating system +(if any) on which the executable work runs, or a compiler used to +produce the work, or an object code interpreter used to run it. + + The "Corresponding Source" for a work in object code form means all +the source code needed to generate, install, and (for an executable +work) run the object code and to modify the work, including scripts to +control those activities. However, it does not include the work's +System Libraries, or general-purpose tools or generally available free +programs which are used unmodified in performing those activities but +which are not part of the work. For example, Corresponding Source +includes interface definition files associated with source files for +the work, and the source code for shared libraries and dynamically +linked subprograms that the work is specifically designed to require, +such as by intimate data communication or control flow between those +subprograms and other parts of the work. + + The Corresponding Source need not include anything that users +can regenerate automatically from other parts of the Corresponding +Source. + + The Corresponding Source for a work in source code form is that +same work. + + 2. Basic Permissions. + + All rights granted under this License are granted for the term of +copyright on the Program, and are irrevocable provided the stated +conditions are met. This License explicitly affirms your unlimited +permission to run the unmodified Program. The output from running a +covered work is covered by this License only if the output, given its +content, constitutes a covered work. This License acknowledges your +rights of fair use or other equivalent, as provided by copyright law. + + You may make, run and propagate covered works that you do not +convey, without conditions so long as your license otherwise remains +in force. You may convey covered works to others for the sole purpose +of having them make modifications exclusively for you, or provide you +with facilities for running those works, provided that you comply with +the terms of this License in conveying all material for which you do +not control copyright. Those thus making or running the covered works +for you must do so exclusively on your behalf, under your direction +and control, on terms that prohibit them from making any copies of +your copyrighted material outside their relationship with you. + + Conveying under any other circumstances is permitted solely under +the conditions stated below. Sublicensing is not allowed; section 10 +makes it unnecessary. + + 3. Protecting Users' Legal Rights From Anti-Circumvention Law. + + No covered work shall be deemed part of an effective technological +measure under any applicable law fulfilling obligations under article +11 of the WIPO copyright treaty adopted on 20 December 1996, or +similar laws prohibiting or restricting circumvention of such +measures. + + When you convey a covered work, you waive any legal power to forbid +circumvention of technological measures to the extent such circumvention +is effected by exercising rights under this License with respect to +the covered work, and you disclaim any intention to limit operation or +modification of the work as a means of enforcing, against the work's +users, your or third parties' legal rights to forbid circumvention of +technological measures. + + 4. Conveying Verbatim Copies. + + You may convey verbatim copies of the Program's source code as you +receive it, in any medium, provided that you conspicuously and +appropriately publish on each copy an appropriate copyright notice; +keep intact all notices stating that this License and any +non-permissive terms added in accord with section 7 apply to the code; +keep intact all notices of the absence of any warranty; and give all +recipients a copy of this License along with the Program. + + You may charge any price or no price for each copy that you convey, +and you may offer support or warranty protection for a fee. + + 5. Conveying Modified Source Versions. + + You may convey a work based on the Program, or the modifications to +produce it from the Program, in the form of source code under the +terms of section 4, provided that you also meet all of these conditions: + + a) The work must carry prominent notices stating that you modified + it, and giving a relevant date. + + b) The work must carry prominent notices stating that it is + released under this License and any conditions added under section + 7. This requirement modifies the requirement in section 4 to + "keep intact all notices". + + c) You must license the entire work, as a whole, under this + License to anyone who comes into possession of a copy. This + License will therefore apply, along with any applicable section 7 + additional terms, to the whole of the work, and all its parts, + regardless of how they are packaged. This License gives no + permission to license the work in any other way, but it does not + invalidate such permission if you have separately received it. + + d) If the work has interactive user interfaces, each must display + Appropriate Legal Notices; however, if the Program has interactive + interfaces that do not display Appropriate Legal Notices, your + work need not make them do so. + + A compilation of a covered work with other separate and independent +works, which are not by their nature extensions of the covered work, +and which are not combined with it such as to form a larger program, +in or on a volume of a storage or distribution medium, is called an +"aggregate" if the compilation and its resulting copyright are not +used to limit the access or legal rights of the compilation's users +beyond what the individual works permit. Inclusion of a covered work +in an aggregate does not cause this License to apply to the other +parts of the aggregate. + + 6. Conveying Non-Source Forms. + + You may convey a covered work in object code form under the terms +of sections 4 and 5, provided that you also convey the +machine-readable Corresponding Source under the terms of this License, +in one of these ways: + + a) Convey the object code in, or embodied in, a physical product + (including a physical distribution medium), accompanied by the + Corresponding Source fixed on a durable physical medium + customarily used for software interchange. + + b) Convey the object code in, or embodied in, a physical product + (including a physical distribution medium), accompanied by a + written offer, valid for at least three years and valid for as + long as you offer spare parts or customer support for that product + model, to give anyone who possesses the object code either (1) a + copy of the Corresponding Source for all the software in the + product that is covered by this License, on a durable physical + medium customarily used for software interchange, for a price no + more than your reasonable cost of physically performing this + conveying of source, or (2) access to copy the + Corresponding Source from a network server at no charge. + + c) Convey individual copies of the object code with a copy of the + written offer to provide the Corresponding Source. This + alternative is allowed only occasionally and noncommercially, and + only if you received the object code with such an offer, in accord + with subsection 6b. + + d) Convey the object code by offering access from a designated + place (gratis or for a charge), and offer equivalent access to the + Corresponding Source in the same way through the same place at no + further charge. You need not require recipients to copy the + Corresponding Source along with the object code. If the place to + copy the object code is a network server, the Corresponding Source + may be on a different server (operated by you or a third party) + that supports equivalent copying facilities, provided you maintain + clear directions next to the object code saying where to find the + Corresponding Source. Regardless of what server hosts the + Corresponding Source, you remain obligated to ensure that it is + available for as long as needed to satisfy these requirements. + + e) Convey the object code using peer-to-peer transmission, provided + you inform other peers where the object code and Corresponding + Source of the work are being offered to the general public at no + charge under subsection 6d. + + A separable portion of the object code, whose source code is excluded +from the Corresponding Source as a System Library, need not be +included in conveying the object code work. + + A "User Product" is either (1) a "consumer product", which means any +tangible personal property which is normally used for personal, family, +or household purposes, or (2) anything designed or sold for incorporation +into a dwelling. In determining whether a product is a consumer product, +doubtful cases shall be resolved in favor of coverage. For a particular +product received by a particular user, "normally used" refers to a +typical or common use of that class of product, regardless of the status +of the particular user or of the way in which the particular user +actually uses, or expects or is expected to use, the product. A product +is a consumer product regardless of whether the product has substantial +commercial, industrial or non-consumer uses, unless such uses represent +the only significant mode of use of the product. + + "Installation Information" for a User Product means any methods, +procedures, authorization keys, or other information required to install +and execute modified versions of a covered work in that User Product from +a modified version of its Corresponding Source. The information must +suffice to ensure that the continued functioning of the modified object +code is in no case prevented or interfered with solely because +modification has been made. + + If you convey an object code work under this section in, or with, or +specifically for use in, a User Product, and the conveying occurs as +part of a transaction in which the right of possession and use of the +User Product is transferred to the recipient in perpetuity or for a +fixed term (regardless of how the transaction is characterized), the +Corresponding Source conveyed under this section must be accompanied +by the Installation Information. But this requirement does not apply +if neither you nor any third party retains the ability to install +modified object code on the User Product (for example, the work has +been installed in ROM). + + The requirement to provide Installation Information does not include a +requirement to continue to provide support service, warranty, or updates +for a work that has been modified or installed by the recipient, or for +the User Product in which it has been modified or installed. Access to a +network may be denied when the modification itself materially and +adversely affects the operation of the network or violates the rules and +protocols for communication across the network. + + Corresponding Source conveyed, and Installation Information provided, +in accord with this section must be in a format that is publicly +documented (and with an implementation available to the public in +source code form), and must require no special password or key for +unpacking, reading or copying. + + 7. Additional Terms. + + "Additional permissions" are terms that supplement the terms of this +License by making exceptions from one or more of its conditions. +Additional permissions that are applicable to the entire Program shall +be treated as though they were included in this License, to the extent +that they are valid under applicable law. If additional permissions +apply only to part of the Program, that part may be used separately +under those permissions, but the entire Program remains governed by +this License without regard to the additional permissions. + + When you convey a copy of a covered work, you may at your option +remove any additional permissions from that copy, or from any part of +it. (Additional permissions may be written to require their own +removal in certain cases when you modify the work.) You may place +additional permissions on material, added by you to a covered work, +for which you have or can give appropriate copyright permission. + + Notwithstanding any other provision of this License, for material you +add to a covered work, you may (if authorized by the copyright holders of +that material) supplement the terms of this License with terms: + + a) Disclaiming warranty or limiting liability differently from the + terms of sections 15 and 16 of this License; or + + b) Requiring preservation of specified reasonable legal notices or + author attributions in that material or in the Appropriate Legal + Notices displayed by works containing it; or + + c) Prohibiting misrepresentation of the origin of that material, or + requiring that modified versions of such material be marked in + reasonable ways as different from the original version; or + + d) Limiting the use for publicity purposes of names of licensors or + authors of the material; or + + e) Declining to grant rights under trademark law for use of some + trade names, trademarks, or service marks; or + + f) Requiring indemnification of licensors and authors of that + material by anyone who conveys the material (or modified versions of + it) with contractual assumptions of liability to the recipient, for + any liability that these contractual assumptions directly impose on + those licensors and authors. + + All other non-permissive additional terms are considered "further +restrictions" within the meaning of section 10. If the Program as you +received it, or any part of it, contains a notice stating that it is +governed by this License along with a term that is a further +restriction, you may remove that term. If a license document contains +a further restriction but permits relicensing or conveying under this +License, you may add to a covered work material governed by the terms +of that license document, provided that the further restriction does +not survive such relicensing or conveying. + + If you add terms to a covered work in accord with this section, you +must place, in the relevant source files, a statement of the +additional terms that apply to those files, or a notice indicating +where to find the applicable terms. + + Additional terms, permissive or non-permissive, may be stated in the +form of a separately written license, or stated as exceptions; +the above requirements apply either way. + + 8. Termination. + + You may not propagate or modify a covered work except as expressly +provided under this License. Any attempt otherwise to propagate or +modify it is void, and will automatically terminate your rights under +this License (including any patent licenses granted under the third +paragraph of section 11). + + However, if you cease all violation of this License, then your +license from a particular copyright holder is reinstated (a) +provisionally, unless and until the copyright holder explicitly and +finally terminates your license, and (b) permanently, if the copyright +holder fails to notify you of the violation by some reasonable means +prior to 60 days after the cessation. + + Moreover, your license from a particular copyright holder is +reinstated permanently if the copyright holder notifies you of the +violation by some reasonable means, this is the first time you have +received notice of violation of this License (for any work) from that +copyright holder, and you cure the violation prior to 30 days after +your receipt of the notice. + + Termination of your rights under this section does not terminate the +licenses of parties who have received copies or rights from you under +this License. If your rights have been terminated and not permanently +reinstated, you do not qualify to receive new licenses for the same +material under section 10. + + 9. Acceptance Not Required for Having Copies. + + You are not required to accept this License in order to receive or +run a copy of the Program. Ancillary propagation of a covered work +occurring solely as a consequence of using peer-to-peer transmission +to receive a copy likewise does not require acceptance. However, +nothing other than this License grants you permission to propagate or +modify any covered work. These actions infringe copyright if you do +not accept this License. Therefore, by modifying or propagating a +covered work, you indicate your acceptance of this License to do so. + + 10. Automatic Licensing of Downstream Recipients. + + Each time you convey a covered work, the recipient automatically +receives a license from the original licensors, to run, modify and +propagate that work, subject to this License. You are not responsible +for enforcing compliance by third parties with this License. + + An "entity transaction" is a transaction transferring control of an +organization, or substantially all assets of one, or subdividing an +organization, or merging organizations. If propagation of a covered +work results from an entity transaction, each party to that +transaction who receives a copy of the work also receives whatever +licenses to the work the party's predecessor in interest had or could +give under the previous paragraph, plus a right to possession of the +Corresponding Source of the work from the predecessor in interest, if +the predecessor has it or can get it with reasonable efforts. + + You may not impose any further restrictions on the exercise of the +rights granted or affirmed under this License. For example, you may +not impose a license fee, royalty, or other charge for exercise of +rights granted under this License, and you may not initiate litigation +(including a cross-claim or counterclaim in a lawsuit) alleging that +any patent claim is infringed by making, using, selling, offering for +sale, or importing the Program or any portion of it. + + 11. Patents. + + A "contributor" is a copyright holder who authorizes use under this +License of the Program or a work on which the Program is based. The +work thus licensed is called the contributor's "contributor version". + + A contributor's "essential patent claims" are all patent claims +owned or controlled by the contributor, whether already acquired or +hereafter acquired, that would be infringed by some manner, permitted +by this License, of making, using, or selling its contributor version, +but do not include claims that would be infringed only as a +consequence of further modification of the contributor version. For +purposes of this definition, "control" includes the right to grant +patent sublicenses in a manner consistent with the requirements of +this License. + + Each contributor grants you a non-exclusive, worldwide, royalty-free +patent license under the contributor's essential patent claims, to +make, use, sell, offer for sale, import and otherwise run, modify and +propagate the contents of its contributor version. + + In the following three paragraphs, a "patent license" is any express +agreement or commitment, however denominated, not to enforce a patent +(such as an express permission to practice a patent or covenant not to +sue for patent infringement). To "grant" such a patent license to a +party means to make such an agreement or commitment not to enforce a +patent against the party. + + If you convey a covered work, knowingly relying on a patent license, +and the Corresponding Source of the work is not available for anyone +to copy, free of charge and under the terms of this License, through a +publicly available network server or other readily accessible means, +then you must either (1) cause the Corresponding Source to be so +available, or (2) arrange to deprive yourself of the benefit of the +patent license for this particular work, or (3) arrange, in a manner +consistent with the requirements of this License, to extend the patent +license to downstream recipients. "Knowingly relying" means you have +actual knowledge that, but for the patent license, your conveying the +covered work in a country, or your recipient's use of the covered work +in a country, would infringe one or more identifiable patents in that +country that you have reason to believe are valid. + + If, pursuant to or in connection with a single transaction or +arrangement, you convey, or propagate by procuring conveyance of, a +covered work, and grant a patent license to some of the parties +receiving the covered work authorizing them to use, propagate, modify +or convey a specific copy of the covered work, then the patent license +you grant is automatically extended to all recipients of the covered +work and works based on it. + + A patent license is "discriminatory" if it does not include within +the scope of its coverage, prohibits the exercise of, or is +conditioned on the non-exercise of one or more of the rights that are +specifically granted under this License. You may not convey a covered +work if you are a party to an arrangement with a third party that is +in the business of distributing software, under which you make payment +to the third party based on the extent of your activity of conveying +the work, and under which the third party grants, to any of the +parties who would receive the covered work from you, a discriminatory +patent license (a) in connection with copies of the covered work +conveyed by you (or copies made from those copies), or (b) primarily +for and in connection with specific products or compilations that +contain the covered work, unless you entered into that arrangement, +or that patent license was granted, prior to 28 March 2007. + + Nothing in this License shall be construed as excluding or limiting +any implied license or other defenses to infringement that may +otherwise be available to you under applicable patent law. + + 12. No Surrender of Others' Freedom. + + If conditions are imposed on you (whether by court order, agreement or +otherwise) that contradict the conditions of this License, they do not +excuse you from the conditions of this License. If you cannot convey a +covered work so as to satisfy simultaneously your obligations under this +License and any other pertinent obligations, then as a consequence you may +not convey it at all. For example, if you agree to terms that obligate you +to collect a royalty for further conveying from those to whom you convey +the Program, the only way you could satisfy both those terms and this +License would be to refrain entirely from conveying the Program. + + 13. Remote Network Interaction; Use with the GNU General Public License. + + Notwithstanding any other provision of this License, if you modify the +Program, your modified version must prominently offer all users +interacting with it remotely through a computer network (if your version +supports such interaction) an opportunity to receive the Corresponding +Source of your version by providing access to the Corresponding Source +from a network server at no charge, through some standard or customary +means of facilitating copying of software. This Corresponding Source +shall include the Corresponding Source for any work covered by version 3 +of the GNU General Public License that is incorporated pursuant to the +following paragraph. + + Notwithstanding any other provision of this License, you have +permission to link or combine any covered work with a work licensed +under version 3 of the GNU General Public License into a single +combined work, and to convey the resulting work. The terms of this +License will continue to apply to the part which is the covered work, +but the work with which it is combined will remain governed by version +3 of the GNU General Public License. + + 14. Revised Versions of this License. + + The Free Software Foundation may publish revised and/or new versions of +the GNU Affero General Public License from time to time. Such new versions +will be similar in spirit to the present version, but may differ in detail to +address new problems or concerns. + + Each version is given a distinguishing version number. If the +Program specifies that a certain numbered version of the GNU Affero General +Public License "or any later version" applies to it, you have the +option of following the terms and conditions either of that numbered +version or of any later version published by the Free Software +Foundation. If the Program does not specify a version number of the +GNU Affero General Public License, you may choose any version ever published +by the Free Software Foundation. + + If the Program specifies that a proxy can decide which future +versions of the GNU Affero General Public License can be used, that proxy's +public statement of acceptance of a version permanently authorizes you +to choose that version for the Program. + + Later license versions may give you additional or different +permissions. However, no additional obligations are imposed on any +author or copyright holder as a result of your choosing to follow a +later version. + + 15. Disclaimer of Warranty. + + THERE IS NO WARRANTY FOR THE PROGRAM, TO THE EXTENT PERMITTED BY +APPLICABLE LAW. EXCEPT WHEN OTHERWISE STATED IN WRITING THE COPYRIGHT +HOLDERS AND/OR OTHER PARTIES PROVIDE THE PROGRAM "AS IS" WITHOUT WARRANTY +OF ANY KIND, EITHER EXPRESSED OR IMPLIED, INCLUDING, BUT NOT LIMITED TO, +THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR +PURPOSE. THE ENTIRE RISK AS TO THE QUALITY AND PERFORMANCE OF THE PROGRAM +IS WITH YOU. SHOULD THE PROGRAM PROVE DEFECTIVE, YOU ASSUME THE COST OF +ALL NECESSARY SERVICING, REPAIR OR CORRECTION. + + 16. Limitation of Liability. + + IN NO EVENT UNLESS REQUIRED BY APPLICABLE LAW OR AGREED TO IN WRITING +WILL ANY COPYRIGHT HOLDER, OR ANY OTHER PARTY WHO MODIFIES AND/OR CONVEYS +THE PROGRAM AS PERMITTED ABOVE, BE LIABLE TO YOU FOR DAMAGES, INCLUDING ANY +GENERAL, SPECIAL, INCIDENTAL OR CONSEQUENTIAL DAMAGES ARISING OUT OF THE +USE OR INABILITY TO USE THE PROGRAM (INCLUDING BUT NOT LIMITED TO LOSS OF +DATA OR DATA BEING RENDERED INACCURATE OR LOSSES SUSTAINED BY YOU OR THIRD +PARTIES OR A FAILURE OF THE PROGRAM TO OPERATE WITH ANY OTHER PROGRAMS), +EVEN IF SUCH HOLDER OR OTHER PARTY HAS BEEN ADVISED OF THE POSSIBILITY OF +SUCH DAMAGES. + + 17. Interpretation of Sections 15 and 16. + + If the disclaimer of warranty and limitation of liability provided +above cannot be given local legal effect according to their terms, +reviewing courts shall apply local law that most closely approximates +an absolute waiver of all civil liability in connection with the +Program, unless a warranty or assumption of liability accompanies a +copy of the Program in return for a fee. + + END OF TERMS AND CONDITIONS + + How to Apply These Terms to Your New Programs + + If you develop a new program, and you want it to be of the greatest +possible use to the public, the best way to achieve this is to make it +free software which everyone can redistribute and change under these terms. + + To do so, attach the following notices to the program. It is safest +to attach them to the start of each source file to most effectively +state the exclusion of warranty; and each file should have at least +the "copyright" line and a pointer to where the full notice is found. + + + Copyright (C) + + This program is free software: you can redistribute it and/or modify + it under the terms of the GNU Affero General Public License as published + by the Free Software Foundation, either version 3 of the License, or + (at your option) any later version. + + This program is distributed in the hope that it will be useful, + but WITHOUT ANY WARRANTY; without even the implied warranty of + MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + GNU Affero General Public License for more details. + + You should have received a copy of the GNU Affero General Public License + along with this program. If not, see . + +Also add information on how to contact you by electronic and paper mail. + + If your software can interact with users remotely through a computer +network, you should also make sure that it provides a way for users to +get its source. For example, if your program is a web application, its +interface could display a "Source" link that leads users to an archive +of the code. There are many ways you could offer source, and different +solutions will be better for different programs; see section 13 for the +specific requirements. + + You should also get your employer (if you work as a programmer) or school, +if any, to sign a "copyright disclaimer" for the program, if necessary. +For more information on this, and how to apply and follow the GNU AGPL, see +. diff --git a/packages/metrics/README.md b/packages/metrics/README.md new file mode 100644 index 000000000..627640eec --- /dev/null +++ b/packages/metrics/README.md @@ -0,0 +1,15 @@ +# Torrust Tracker Metrics + +A library with the metrics types used by the [Torrust Tracker](https://github.com/torrust/torrust-tracker) packages. + +## Documentation + +[Crate documentation](https://docs.rs/torrust-tracker-metrics). + +## Acknowledgements + +We copied some parts like units or function names and signatures from the crate [metrics](https://crates.io/crates/metrics) because we wanted to make it compatible as much as possible with it. In the future, we may consider using the `metrics` crate directly instead of maintaining our own version. + +## License + +The project is licensed under the terms of the [GNU AFFERO GENERAL PUBLIC LICENSE](./LICENSE). diff --git a/packages/metrics/src/counter.rs b/packages/metrics/src/counter.rs new file mode 100644 index 000000000..3a816c75b --- /dev/null +++ b/packages/metrics/src/counter.rs @@ -0,0 +1,81 @@ +use derive_more::Display; +use serde::{Deserialize, Serialize}; + +use super::prometheus::PrometheusSerializable; + +#[derive(Debug, Display, Clone, Default, PartialEq, Serialize, Deserialize)] +pub struct Counter(u64); + +impl Counter { + #[must_use] + pub fn new(value: u64) -> Self { + Self(value) + } + + #[must_use] + pub fn value(&self) -> u64 { + self.0 + } + + pub fn increment(&mut self, value: u64) { + self.0 += value; + } +} + +impl From for Counter { + fn from(value: u64) -> Self { + Self(value) + } +} + +impl From for u64 { + fn from(counter: Counter) -> Self { + counter.value() + } +} + +impl PrometheusSerializable for Counter { + fn to_prometheus(&self) -> String { + format!("{}", self.value()) + } +} + +#[cfg(test)] +mod tests { + use super::*; + + #[test] + fn it_should_be_created_from_integer_values() { + let counter = Counter::new(0); + assert_eq!(counter.value(), 0); + } + + #[test] + fn it_could_be_converted_from_u64() { + let counter: Counter = 42.into(); + assert_eq!(counter.value(), 42); + } + + #[test] + fn it_could_be_converted_into_u64() { + let counter = Counter::new(42); + let value: u64 = counter.into(); + assert_eq!(value, 42); + } + + #[test] + fn it_could_be_incremented() { + let mut counter = Counter::new(0); + counter.increment(1); + assert_eq!(counter.value(), 1); + + counter.increment(2); + assert_eq!(counter.value(), 3); + } + + #[test] + fn it_serializes_to_prometheus() { + let counter = Counter::new(42); + assert_eq!(counter.to_prometheus(), "42"); + } +} diff --git a/packages/metrics/src/gauge.rs b/packages/metrics/src/gauge.rs new file mode 100644 index 000000000..61ff3024c --- /dev/null +++ b/packages/metrics/src/gauge.rs @@ -0,0 +1,83 @@ +use derive_more::Display; +use serde::{Deserialize, Serialize}; + +use super::prometheus::PrometheusSerializable; + +#[derive(Debug, Display, Clone, Default, PartialEq, Serialize, Deserialize)] +pub struct Gauge(f64); + +impl Gauge { + #[must_use] + pub fn new(value: f64) -> Self { + Self(value) + } + + #[must_use] + pub fn value(&self) -> f64 { + self.0 + } + + pub fn set(&mut self, value: f64) { + self.0 = value; + } +} + +impl From for Gauge { + fn from(value: f64) -> Self { + Self(value) + } +} + +impl From for f64 { + fn from(counter: Gauge) -> Self { + counter.value() + } +} + +impl PrometheusSerializable for Gauge { + fn to_prometheus(&self) -> String { + format!("{}", self.value()) + } +} + +#[cfg(test)] +mod tests { + use approx::assert_relative_eq; + + use super::*; + + #[test] + fn it_should_be_created_from_integer_values() { + let gauge = Gauge::new(0.0); + assert_relative_eq!(gauge.value(), 0.0); + } + + #[test] + fn it_could_be_converted_from_u64() { + let gauge: Gauge = 42.0.into(); + assert_relative_eq!(gauge.value(), 42.0); + } + + #[test] + fn it_could_be_converted_into_i64() { + let gauge = Gauge::new(42.0); + let value: f64 = gauge.into(); + assert_relative_eq!(value, 42.0); + } + + #[test] + fn it_could_be_set() { + let mut gauge = Gauge::new(0.0); + gauge.set(1.0); + assert_relative_eq!(gauge.value(), 1.0); + } + + #[test] + fn it_serializes_to_prometheus() { + let counter = Gauge::new(42.0); + assert_eq!(counter.to_prometheus(), "42"); + + let counter = Gauge::new(42.1); + assert_eq!(counter.to_prometheus(), "42.1"); + } +} diff --git a/packages/metrics/src/label/mod.rs b/packages/metrics/src/label/mod.rs new file mode 100644 index 000000000..b5fd3b745 --- /dev/null +++ b/packages/metrics/src/label/mod.rs @@ -0,0 +1,9 @@ +mod name; +mod pair; +mod set; +mod value; + +pub type LabelName = name::LabelName; +pub type LabelValue = value::LabelValue; +pub type LabelPair = pair::LabelPair; +pub type LabelSet = set::LabelSet; diff --git a/packages/metrics/src/label/name.rs b/packages/metrics/src/label/name.rs new file mode 100644 index 000000000..22e75572f --- /dev/null +++ b/packages/metrics/src/label/name.rs @@ -0,0 +1,117 @@ +use derive_more::Display; +use serde::{Deserialize, Serialize}; + +use crate::prometheus::PrometheusSerializable; + +#[derive(Debug, Display, Clone, Eq, PartialEq, Default, Deserialize, Serialize, Hash, Ord, PartialOrd)] +pub struct LabelName(String); + +impl LabelName { + /// Creates a new `LabelName` instance. + /// + /// # Panics + /// + /// Panics if the provided name is empty. + #[must_use] + pub fn new(name: &str) -> Self { + assert!( + !name.is_empty(), + "Label name cannot be empty. It must have at least one character." + ); + + Self(name.to_owned()) + } +} + +impl PrometheusSerializable for LabelName { + /// In Prometheus: + /// + /// - Labels may contain ASCII letters, numbers, as well as underscores. + /// They must match the regex [a-zA-Z_][a-zA-Z0-9_]*. + /// - Label names beginning with __ (two "_") are reserved for internal + /// use. + /// - Label values may contain any Unicode characters. + /// - Labels with an empty label value are considered equivalent to + /// labels that do not exist. + /// + /// The label name is changed: + /// + /// - If a label name starts with, or contains, an invalid character: + /// replace character with underscore. + /// - If th label name starts with two underscores: + /// add additional underscore (three underscores total) + fn to_prometheus(&self) -> String { + // Replace invalid characters with underscore + let processed: String = self + .0 + .chars() + .enumerate() + .map(|(i, c)| { + if i == 0 { + if c.is_ascii_alphabetic() || c == '_' { + c + } else { + '_' + } + } else if c.is_ascii_alphanumeric() || c == '_' { + c + } else { + '_' + } + }) + .collect(); + + // If the label name starts with two underscores, add an additional + if processed.starts_with("__") && !processed.starts_with("___") { + format!("_{processed}") + } else { + processed + } + } +} +#[cfg(test)] +mod tests { + mod serialization_of_label_name_to_prometheus { + use rstest::rstest; + + use crate::label::LabelName; + use crate::prometheus::PrometheusSerializable; + + #[rstest] + #[case("1 valid name", "valid_name", "valid_name")] + #[case("2 leading underscore", "_leading_underscore", "_leading_underscore")] + #[case("3 leading lowercase", "v123", "v123")] + #[case("4 leading uppercase", "V123", "V123")] + fn valid_names_in_prometheus(#[case] case: &str, #[case] input: &str, #[case] output: &str) { + assert_eq!(LabelName::new(input).to_prometheus(), output, "{case} failed: {input:?}"); + } + + #[rstest] + #[case("1 invalid start 1", "9invalid_start", "_invalid_start")] + #[case("2 invalid start 2", "@test", "_test")] + #[case("3 invalid dash", "invalid-char", "invalid_char")] + #[case("4 invalid spaces", "spaces are bad", "spaces_are_bad")] + #[case("5 invalid special chars", "a!b@c#d$e%f^g&h*i(j)", "a_b_c_d_e_f_g_h_i_j_")] + #[case("6 invalid colon", "my:metric/version", "my_metric_version")] + #[case("7 all invalid characters", "!@#$%^&*()", "__________")] + #[case("8 non_ascii_characters", "ñaca©", "_aca_")] + fn names_that_need_changes_in_prometheus(#[case] case: &str, #[case] input: &str, #[case] output: &str) { + assert_eq!(LabelName::new(input).to_prometheus(), output, "{case} failed: {input:?}"); + } + + #[rstest] + #[case("1 double underscore start", "__private", "___private")] + #[case("2 double underscore only", "__", "___")] + #[case("3 processed to double underscore", "^^name", "___name")] + #[case("4 processed to double underscore after first char", "0__name", "___name")] + fn names_starting_with_double_underscore(#[case] case: &str, #[case] input: &str, #[case] output: &str) { + assert_eq!(LabelName::new(input).to_prometheus(), output, "{case} failed: {input:?}"); + } + + #[test] + #[should_panic(expected = "Label name cannot be empty. It must have at least one character.")] + fn empty_name() { + let _name = LabelName::new(""); + } + } +} diff --git a/packages/metrics/src/label/pair.rs b/packages/metrics/src/label/pair.rs new file mode 100644 index 000000000..c89c726bd --- /dev/null +++ b/packages/metrics/src/label/pair.rs @@ -0,0 +1,29 @@ +use super::{LabelName, LabelValue}; +use crate::prometheus::PrometheusSerializable; + +pub type LabelPair = (LabelName, LabelValue); + +// Generic implementation for any tuple (A, B) where A and B implement PrometheusSerializable +impl PrometheusSerializable for (A, B) { + fn to_prometheus(&self) -> String { + format!("{}=\"{}\"", self.0.to_prometheus(), self.1.to_prometheus()) + } +} + +#[cfg(test)] +mod tests { + mod serialization_of_label_pair_to_prometheus { + use super::super::LabelName; + use crate::label::LabelValue; + use crate::prometheus::PrometheusSerializable; + + #[test] + fn test_label_pair_serialization_to_prometheus() { + let label_pair = (LabelName::new("label_name"), LabelValue::new("value")); + assert_eq!(label_pair.to_prometheus(), r#"label_name="value""#); + + let label_pair = (&LabelName::new("label_name"), &LabelValue::new("value")); + assert_eq!(label_pair.to_prometheus(), r#"label_name="value""#); + } + } +} diff --git a/packages/metrics/src/label/set.rs b/packages/metrics/src/label/set.rs new file mode 100644 index 000000000..f46b01095 --- /dev/null +++ b/packages/metrics/src/label/set.rs @@ -0,0 +1,340 @@ +use std::collections::BTreeMap; +use std::fmt::Display; + +use serde::{Deserialize, Deserializer, Serialize, Serializer}; + +use super::{LabelName, LabelPair, LabelValue}; +use crate::prometheus::PrometheusSerializable; + +#[derive(Debug, Clone, Eq, PartialEq, Default, Ord, PartialOrd, Hash)] +pub struct LabelSet { + items: BTreeMap, +} + +impl LabelSet { + /// Insert a new label pair or update the value of an existing label. + pub fn upsert(&mut self, key: LabelName, value: LabelValue) { + self.items.insert(key, value); + } +} + +impl Display for LabelSet { + fn fmt(&self, f: &mut std::fmt::Formatter<'_>) -> std::fmt::Result { + let items = self + .items + .iter() + .map(|(key, value)| format!("{key}=\"{value}\"")) + .collect::>() + .join(","); + + write!(f, "{{{items}}}") + } +} + +impl From> for LabelSet { + fn from(values: BTreeMap) -> Self { + Self { items: values } + } +} + +impl From> for LabelSet { + fn from(vec: Vec<(&str, &str)>) -> Self { + let mut items = BTreeMap::new(); + + for (name, value) in vec { + items.insert(LabelName::new(name), LabelValue::new(value)); + } + + Self { items } + } +} + +impl From> for LabelSet { + fn from(vec: Vec<(String, String)>) -> Self { + let mut items = BTreeMap::new(); + + for (name, value) in vec { + items.insert(LabelName::new(&name), LabelValue::new(&value)); + } + + Self { items } + } +} + +impl From> for LabelSet { + fn from(vec: Vec) -> Self { + let mut items = BTreeMap::new(); + + for (key, value) in vec { + items.insert(key, value); + } + + Self { items } + } +} + +impl From> for LabelSet { + fn from(vec: Vec) -> Self { + let mut items = BTreeMap::new(); + + for serialized_label in vec { + items.insert(serialized_label.name, serialized_label.value); + } + + Self { items } + } +} + +impl From<[LabelPair; N]> for LabelSet { + fn from(arr: [LabelPair; N]) -> Self { + let values = BTreeMap::from(arr); + Self { items: values } + } +} + +impl From<[(String, String); N]> for LabelSet { + fn from(arr: [(String, String); N]) -> Self { + let values = arr + .iter() + .map(|(name, value)| (LabelName::new(name), LabelValue::new(value))) + .collect::>(); + Self { items: values } + } +} + +impl From<[(&str, &str); N]> for LabelSet { + fn from(arr: [(&str, &str); N]) -> Self { + let values = arr + .iter() + .map(|(name, value)| (LabelName::new(name), LabelValue::new(value))) + .collect::>(); + Self { items: values } + } +} + +impl From for LabelSet { + fn from(label_pair: LabelPair) -> Self { + let mut set = BTreeMap::new(); + + set.insert(label_pair.0, label_pair.1); + + Self { items: set } + } +} + +#[derive(Debug, Clone, Eq, PartialEq, Default, Deserialize, Serialize)] +struct SerializedLabel { + name: LabelName, + value: LabelValue, +} + +impl Serialize for LabelSet { + fn serialize(&self, serializer: S) -> Result + where + S: Serializer, + { + self.items + .iter() + .map(|(key, value)| SerializedLabel { + name: key.clone(), + value: value.clone(), + }) + .collect::>() + .serialize(serializer) + } +} + +impl<'de> Deserialize<'de> for LabelSet { + fn deserialize(deserializer: D) -> Result + where + D: Deserializer<'de>, + { + let serialized_labels = Vec::::deserialize(deserializer)?; + + Ok(LabelSet::from(serialized_labels)) + } +} + +impl PrometheusSerializable for LabelSet { + fn to_prometheus(&self) -> String { + let items = self.items.iter().fold(String::new(), |mut output, label_pair| { + if !output.is_empty() { + output.push(','); + } + + output.push_str(&label_pair.to_prometheus()); + + output + }); + + format!("{{{items}}}") + } +} + +#[cfg(test)] +mod tests { + + use std::collections::BTreeMap; + + use pretty_assertions::assert_eq; + + use super::{LabelName, LabelValue}; + use crate::label::LabelSet; + use crate::prometheus::PrometheusSerializable; + + fn sample_vec_of_label_pairs() -> Vec<(LabelName, LabelValue)> { + sample_array_of_label_pairs().into() + } + + fn sample_array_of_label_pairs() -> [(LabelName, LabelValue); 3] { + [ + (LabelName::new("server_service_binding_protocol"), LabelValue::new("http")), + (LabelName::new("server_service_binding_ip"), LabelValue::new("0.0.0.0")), + (LabelName::new("server_service_binding_port"), LabelValue::new("7070")), + ] + } + + #[test] + fn it_should_allow_instantiation_from_an_array_of_label_pairs() { + let label_set: LabelSet = sample_array_of_label_pairs().into(); + + assert_eq!( + label_set, + LabelSet { + items: BTreeMap::from(sample_array_of_label_pairs()) + } + ); + } + + #[test] + fn it_should_allow_instantiation_from_a_vec_of_label_pairs() { + let label_set: LabelSet = sample_vec_of_label_pairs().into(); + + assert_eq!( + label_set, + LabelSet { + items: BTreeMap::from(sample_array_of_label_pairs()) + } + ); + } + + #[test] + fn it_should_allow_instantiation_from_a_b_tree_map() { + let label_set: LabelSet = BTreeMap::from(sample_array_of_label_pairs()).into(); + + assert_eq!( + label_set, + LabelSet { + items: BTreeMap::from(sample_array_of_label_pairs()) + } + ); + } + + #[test] + fn it_should_allow_instantiation_from_a_label_pair() { + let label_set: LabelSet = (LabelName::new("label_name"), LabelValue::new("value")).into(); + + assert_eq!( + label_set, + LabelSet { + items: BTreeMap::from([(LabelName::new("label_name"), LabelValue::new("value"))]) + } + ); + } + + #[test] + fn it_should_allow_inserting_a_new_label_pair() { + let mut label_set = LabelSet::default(); + + label_set.upsert(LabelName::new("label_name"), LabelValue::new("value")); + + assert_eq!( + label_set.items.get(&LabelName::new("label_name")).unwrap(), + &LabelValue::new("value") + ); + } + + #[test] + fn it_should_allow_updating_a_label_value() { + let mut label_set = LabelSet::default(); + + label_set.upsert(LabelName::new("label_name"), LabelValue::new("old value")); + label_set.upsert(LabelName::new("label_name"), LabelValue::new("new value")); + + assert_eq!( + label_set.items.get(&LabelName::new("label_name")).unwrap(), + &LabelValue::new("new value") + ); + } + + #[test] + fn it_should_allow_serializing_to_json_as_an_array_of_label_objects() { + let label_set = LabelSet::from((LabelName::new("label_name"), LabelValue::new("label value"))); + + let json = serde_json::to_string(&label_set).unwrap(); + + assert_eq!( + formatjson::format_json(&json).unwrap(), + formatjson::format_json( + r#" + [ + { + "name": "label_name", + "value": "label value" + } + ] + "# + ) + .unwrap() + ); + } + + #[test] + fn it_should_allow_deserializing_from_json_as_an_array_of_label_objects() { + let json = formatjson::format_json( + r#" + [ + { + "name": "label_name", + "value": "label value" + } + ] + "#, + ) + .unwrap(); + + let label_set: LabelSet = serde_json::from_str(&json).unwrap(); + + assert_eq!( + label_set, + LabelSet::from((LabelName::new("label_name"), LabelValue::new("label value"))) + ); + } + + #[test] + fn it_should_allow_serializing_to_prometheus_format() { + let label_set = LabelSet::from((LabelName::new("label_name"), LabelValue::new("label value"))); + + assert_eq!(label_set.to_prometheus(), r#"{label_name="label value"}"#); + } + + #[test] + fn it_should_alphabetically_order_labels_in_prometheus_format() { + let label_set = LabelSet::from([ + (LabelName::new("b_label_name"), LabelValue::new("b label value")), + (LabelName::new("a_label_name"), LabelValue::new("a label value")), + ]); + + assert_eq!( + label_set.to_prometheus(), + r#"{a_label_name="a label value",b_label_name="b label value"}"# + ); + } + + #[test] + fn it_should_allow_displaying() { + let label_set = LabelSet::from((LabelName::new("label_name"), LabelValue::new("label value"))); + + assert_eq!(label_set.to_string(), r#"{label_name="label value"}"#); + } +} diff --git a/packages/metrics/src/label/value.rs b/packages/metrics/src/label/value.rs new file mode 100644 index 000000000..ce657250c --- /dev/null +++ b/packages/metrics/src/label/value.rs @@ -0,0 +1,32 @@ +use derive_more::Display; +use serde::{Deserialize, Serialize}; + +use crate::prometheus::PrometheusSerializable; + +#[derive(Debug, Display, Clone, Eq, PartialEq, Default, Deserialize, Serialize, Hash, Ord, PartialOrd)] +pub struct LabelValue(String); + +impl LabelValue { + #[must_use] + pub fn new(value: &str) -> Self { + Self(value.to_owned()) + } +} + +impl PrometheusSerializable for LabelValue { + fn to_prometheus(&self) -> String { + self.0.clone() + } +} + +#[cfg(test)] +mod tests { + use crate::label::value::LabelValue; + use crate::prometheus::PrometheusSerializable; + + #[test] + fn it_serializes_to_prometheus() { + let label_value = LabelValue::new("value"); + assert_eq!(label_value.to_prometheus(), "value"); + } +} diff --git a/packages/metrics/src/lib.rs b/packages/metrics/src/lib.rs new file mode 100644 index 000000000..1cb0df195 --- /dev/null +++ b/packages/metrics/src/lib.rs @@ -0,0 +1,29 @@ +pub mod counter; +pub mod gauge; +pub mod label; +pub mod metric; +pub mod metric_collection; +pub mod prometheus; +pub mod sample; +pub mod sample_collection; +pub mod thread_safe_metric_collection; +pub mod unit; + +#[cfg(test)] +mod tests { + /// It removes leading and trailing whitespace from each line, and empty lines. + pub fn format_prometheus_output(output: &str) -> String { + output + .lines() + .map(str::trim) + .filter(|line| !line.is_empty()) + .collect::>() + .join("\n") + } + + pub fn sort_lines(s: &str) -> String { + let mut lines: Vec<&str> = s.split('\n').collect(); + lines.sort_unstable(); + lines.join("\n") + } +} diff --git a/packages/metrics/src/metric/description.rs b/packages/metrics/src/metric/description.rs new file mode 100644 index 000000000..8a50dee90 --- /dev/null +++ b/packages/metrics/src/metric/description.rs @@ -0,0 +1,29 @@ +use derive_more::Display; +use serde::{Deserialize, Serialize}; + +#[derive(Debug, Display, Clone, Eq, PartialEq, Default, Deserialize, Serialize, Hash, Ord, PartialOrd)] +pub struct MetricDescription(String); + +impl MetricDescription { + #[must_use] + pub fn new(name: &str) -> Self { + Self(name.to_owned()) + } +} + +#[cfg(test)] +mod tests { + use super::*; + + #[test] + fn it_should_be_created_from_a_string_reference() { + let metric = MetricDescription::new("Metric description"); + assert_eq!(metric.0, "Metric description"); + } + + #[test] + fn it_should_be_displayed() { + let metric = MetricDescription::new("Metric description"); + assert_eq!(metric.to_string(), "Metric description"); + } +} diff --git a/packages/metrics/src/metric/mod.rs b/packages/metrics/src/metric/mod.rs new file mode 100644 index 000000000..0d79a24d3 --- /dev/null +++ b/packages/metrics/src/metric/mod.rs @@ -0,0 +1,192 @@ +pub mod description; +pub mod name; + +use serde::{Deserialize, Serialize}; +use torrust_tracker_primitives::DurationSinceUnixEpoch; + +use super::counter::Counter; +use super::label::LabelSet; +use super::prometheus::PrometheusSerializable; +use super::sample::Sample; +use super::sample_collection::SampleCollection; +use crate::gauge::Gauge; + +pub type MetricName = name::MetricName; + +#[derive(Debug, Clone, Default, PartialEq, Serialize, Deserialize)] +pub struct Metric { + name: MetricName, + + #[serde(rename = "samples")] + sample_collection: SampleCollection, +} + +impl Metric { + #[must_use] + pub fn new(name: MetricName, samples: SampleCollection) -> Self { + Self { + name, + sample_collection: samples, + } + } + + #[must_use] + pub fn name(&self) -> &MetricName { + &self.name + } + + #[must_use] + pub fn get_sample(&self, label_set: &LabelSet) -> Option<&Sample> { + self.sample_collection.get(label_set) + } + + #[must_use] + pub fn number_of_samples(&self) -> usize { + self.sample_collection.len() + } + + #[must_use] + pub fn is_empty(&self) -> bool { + self.sample_collection.is_empty() + } +} + +impl Metric { + pub fn increment(&mut self, label_set: &LabelSet, time: DurationSinceUnixEpoch) { + self.sample_collection.increment(label_set, time); + } +} + +impl Metric { + pub fn set(&mut self, label_set: &LabelSet, value: f64, time: DurationSinceUnixEpoch) { + self.sample_collection.set(label_set, value, time); + } +} + +impl PrometheusSerializable for Metric { + fn to_prometheus(&self) -> String { + let samples: Vec = self + .sample_collection + .iter() + .map(|(_label_set, sample)| { + format!( + "{}{} {}", + self.name.to_prometheus(), + sample.labels().to_prometheus(), + sample.value().to_prometheus() + ) + }) + .collect(); + samples.join("\n") + } +} + +#[cfg(test)] +mod tests { + mod for_generic_metrics { + use super::super::*; + use crate::gauge::Gauge; + use crate::label::{LabelName, LabelValue}; + + #[test] + fn it_should_be_empty_when_it_does_not_have_any_sample() { + let name = MetricName::new("test_metric"); + + let samples = SampleCollection::::default(); + + let metric = Metric::::new(name.clone(), samples); + + assert!(metric.is_empty()); + } + + fn counter_metric_with_one_sample() -> Metric { + let time = DurationSinceUnixEpoch::from_secs(1_743_552_000); + + let name = MetricName::new("test_metric"); + + let label_set: LabelSet = [(LabelName::new("server_binding_protocol"), LabelValue::new("http"))].into(); + + let samples = SampleCollection::new(vec![Sample::new(Counter::new(1), time, label_set.clone())]); + + Metric::::new(name.clone(), samples) + } + + #[test] + fn it_should_return_the_number_of_samples() { + assert_eq!(counter_metric_with_one_sample().number_of_samples(), 1); + } + + #[test] + fn it_should_return_zero_number_of_samples_for_an_empty_metric() { + let name = MetricName::new("test_metric"); + + let samples = SampleCollection::::default(); + + let metric = Metric::::new(name.clone(), samples); + + assert_eq!(metric.number_of_samples(), 0); + } + } + + mod for_counter_metrics { + use super::super::*; + use crate::counter::Counter; + use crate::label::{LabelName, LabelValue}; + + #[test] + fn it_should_be_created_from_its_name_and_a_collection_of_samples() { + let name = MetricName::new("test_metric"); + + let samples = SampleCollection::::default(); + + let _metric = Metric::::new(name, samples); + } + + #[test] + fn it_should_allow_incrementing_a_sample() { + let time = DurationSinceUnixEpoch::from_secs(1_743_552_000); + + let name = MetricName::new("test_metric"); + + let label_set: LabelSet = [(LabelName::new("server_binding_protocol"), LabelValue::new("http"))].into(); + + let samples = SampleCollection::new(vec![Sample::new(Counter::new(1), time, label_set.clone())]); + + let metric = Metric::::new(name.clone(), samples); + + assert_eq!(metric.get_sample(&label_set).unwrap().value().value(), 1); + } + } + + mod for_gauge_metrics { + use approx::assert_relative_eq; + + use super::super::*; + use crate::gauge::Gauge; + use crate::label::{LabelName, LabelValue}; + + #[test] + fn it_should_be_created_from_its_name_and_a_collection_of_samples() { + let name = MetricName::new("test_metric"); + + let samples = SampleCollection::::default(); + + let _metric = Metric::::new(name, samples); + } + + #[test] + fn it_should_allow_setting_a_sample() { + let time = DurationSinceUnixEpoch::from_secs(1_743_552_000); + + let name = MetricName::new("test_metric"); + + let label_set: LabelSet = [(LabelName::new("server_binding_protocol"), LabelValue::new("http"))].into(); + + let samples = SampleCollection::new(vec![Sample::new(Gauge::new(1.0), time, label_set.clone())]); + + let metric = Metric::::new(name.clone(), samples); + + assert_relative_eq!(metric.get_sample(&label_set).unwrap().value().value(), 1.0); + } + } +} diff --git a/packages/metrics/src/metric/name.rs b/packages/metrics/src/metric/name.rs new file mode 100644 index 000000000..c904f34d3 --- /dev/null +++ b/packages/metrics/src/metric/name.rs @@ -0,0 +1,92 @@ +use derive_more::Display; +use serde::{Deserialize, Serialize}; + +use crate::prometheus::PrometheusSerializable; + +#[derive(Debug, Display, Clone, Eq, PartialEq, Default, Deserialize, Serialize, Hash, Ord, PartialOrd)] +pub struct MetricName(String); + +impl MetricName { + /// Creates a new `MetricName` instance. + /// + /// # Panics + /// + /// Panics if the provided name is empty. + #[must_use] + pub fn new(name: &str) -> Self { + assert!( + !name.is_empty(), + "Metric name cannot be empty. It must have at least one character." + ); + + Self(name.to_owned()) + } +} + +impl PrometheusSerializable for MetricName { + fn to_prometheus(&self) -> String { + // Metric names may contain ASCII letters, digits, underscores, and + // colons. It must match the regex [a-zA-Z_:][a-zA-Z0-9_:]*. + // If the metric name starts with, or contains, an invalid character: + // replace character with underscore. + + self.0 + .chars() + .enumerate() + .map(|(i, c)| { + if i == 0 { + if c.is_ascii_alphabetic() || c == '_' || c == ':' { + c + } else { + '_' + } + } else if c.is_ascii_alphanumeric() || c == '_' || c == ':' { + c + } else { + '_' + } + }) + .collect() + } +} + +#[cfg(test)] +mod tests { + + mod serialization_of_metric_name_to_prometheus { + + use rstest::rstest; + + use crate::metric::MetricName; + use crate::prometheus::PrometheusSerializable; + + #[rstest] + #[case("valid name", "valid_name", "valid_name")] + #[case("leading underscore", "_leading_underscore", "_leading_underscore")] + #[case("leading colon", ":leading_colon", ":leading_colon")] + #[case("leading lowercase", "v123", "v123")] + #[case("leading uppercase", "V123", "V123")] + fn valid_names_in_prometheus(#[case] case: &str, #[case] input: &str, #[case] output: &str) { + assert_eq!(MetricName::new(input).to_prometheus(), output, "{case} failed: {input:?}"); + } + + #[rstest] + #[case("invalid start 1", "9invalid_start", "_invalid_start")] + #[case("invalid start 2", "@test", "_test")] + #[case("invalid dash", "invalid-char", "invalid_char")] + #[case("invalid spaces", "spaces are bad", "spaces_are_bad")] + #[case("invalid special chars", "a!b@c#d$e%f^g&h*i(j)", "a_b_c_d_e_f_g_h_i_j_")] + #[case("invalid slash", "my:metric/version", "my:metric_version")] + #[case("all invalid characters", "!@#$%^&*()", "__________")] + #[case("non_ascii_characters", "ñaca©", "_aca_")] + fn names_that_need_changes_in_prometheus(#[case] case: &str, #[case] input: &str, #[case] output: &str) { + assert_eq!(MetricName::new(input).to_prometheus(), output, "{case} failed: {input:?}"); + } + + #[test] + #[should_panic(expected = "Metric name cannot be empty. It must have at least one character.")] + fn empty_name() { + let _name = MetricName::new(""); + } + } +} diff --git a/packages/metrics/src/metric_collection.rs b/packages/metrics/src/metric_collection.rs new file mode 100644 index 000000000..588194e5f --- /dev/null +++ b/packages/metrics/src/metric_collection.rs @@ -0,0 +1,759 @@ +use std::collections::{HashMap, HashSet}; + +use serde::ser::{SerializeSeq, Serializer}; +use serde::{Deserialize, Deserializer, Serialize}; +use torrust_tracker_primitives::DurationSinceUnixEpoch; + +use super::counter::Counter; +use super::gauge::Gauge; +use super::label::LabelSet; +use super::metric::{Metric, MetricName}; +use super::prometheus::PrometheusSerializable; +use crate::metric::description::MetricDescription; +use crate::sample_collection::SampleCollection; +use crate::unit::Unit; + +// todo: serialize in a deterministic order. For example: +// - First the counter metrics ordered by name. +// - Then the gauge metrics ordered by name. + +/// Use this type only when behind a lock that guarantees thread-safety. +/// Otherwise, there could be race conditions that lead to duplicate metric +/// names in different metric types. +#[derive(Debug, Clone, Default, PartialEq)] +pub struct MetricCollection { + counters: MetricKindCollection, + gauges: MetricKindCollection, +} + +impl MetricCollection { + /// # Panics + /// + /// Panics if there are duplicate metric names across counters and gauges. + #[must_use] + pub fn new(counters: MetricKindCollection, gauges: MetricKindCollection) -> Self { + // Check for name collisions across metric types + let counter_names: HashSet<_> = counters.names().collect(); + let gauge_names: HashSet<_> = gauges.names().collect(); + + assert!( + counter_names.is_disjoint(&gauge_names), + "Metric names must be unique across counters and gauges" + ); + + Self { counters, gauges } + } + + /// Merges another `MetricCollection` into this one. + /// + /// # Errors + /// + /// Returns an error if a metric name already exists in the current collection. + pub fn merge(&mut self, other: &Self) -> Result<(), MergeError> { + self.counters.merge(&other.counters)?; + self.gauges.merge(&other.gauges)?; + Ok(()) + } + + // Counter-specific methods + + pub fn describe_counter(&mut self, name: &MetricName, _opt_unit: Option, _opt_description: Option) { + self.counters.ensure_metric_exists(name); + } + + #[must_use] + pub fn get_counter_value(&self, name: &MetricName, label_set: &LabelSet) -> Counter { + self.counters.get_value(name, label_set) + } + + /// # Panics + /// + /// Panics if a gauge with the same name already exists. + pub fn increase_counter(&mut self, name: &MetricName, label_set: &LabelSet, time: DurationSinceUnixEpoch) { + assert!( + !self.gauges.metrics.contains_key(name), + "Cannot create counter with name '{name}': a gauge with this name already exists", + ); + + self.counters.increment(name, label_set, time); + } + + pub fn ensure_counter_exists(&mut self, name: &MetricName) { + self.counters.ensure_metric_exists(name); + } + + // Gauge-specific methods + + pub fn describe_gauge(&mut self, name: &MetricName, _opt_unit: Option, _opt_description: Option) { + self.gauges.ensure_metric_exists(name); + } + + #[must_use] + pub fn get_gauge_value(&self, name: &MetricName, label_set: &LabelSet) -> Gauge { + self.gauges.get_value(name, label_set) + } + + /// # Panics + /// + /// Panics if a counter with the same name already exists. + pub fn set_gauge(&mut self, name: &MetricName, label_set: &LabelSet, value: f64, time: DurationSinceUnixEpoch) { + assert!( + !self.counters.metrics.contains_key(name), + "Cannot create gauge with name '{name}': a counter with this name already exists" + ); + + self.gauges.set(name, label_set, value, time); + } + + pub fn ensure_gauge_exists(&mut self, name: &MetricName) { + self.gauges.ensure_metric_exists(name); + } +} + +#[derive(thiserror::Error, Debug, Clone)] +pub enum MergeError { + #[error("Cannot merge metric '{metric_name}': it already exists in the current collection")] + MetricNameAlreadyExists { metric_name: MetricName }, +} + +/// Implements serialization for `MetricCollection`. +impl Serialize for MetricCollection { + fn serialize(&self, serializer: S) -> Result + where + S: Serializer, + { + #[derive(Serialize)] + #[serde(tag = "kind", rename_all = "lowercase")] + enum SerializableMetric<'a> { + Counter(&'a Metric), + Gauge(&'a Metric), + } + + let mut seq = serializer.serialize_seq(Some(self.counters.metrics.len() + self.gauges.metrics.len()))?; + + for metric in self.counters.metrics.values() { + seq.serialize_element(&SerializableMetric::Counter(metric))?; + } + + for metric in self.gauges.metrics.values() { + seq.serialize_element(&SerializableMetric::Gauge(metric))?; + } + + seq.end() + } +} + +impl<'de> Deserialize<'de> for MetricCollection { + fn deserialize(deserializer: D) -> Result + where + D: Deserializer<'de>, + { + #[derive(Deserialize)] + #[serde(tag = "kind", rename_all = "lowercase")] + enum MetricPayload { + Counter(Metric), + Gauge(Metric), + } + + let payload = Vec::::deserialize(deserializer)?; + + let mut counters = Vec::new(); + let mut gauges = Vec::new(); + + for metric in payload { + match metric { + MetricPayload::Counter(counter) => counters.push(counter), + MetricPayload::Gauge(gauge) => gauges.push(gauge), + } + } + + Ok(MetricCollection::new( + MetricKindCollection::new(counters), + MetricKindCollection::new(gauges), + )) + } +} + +impl PrometheusSerializable for MetricCollection { + fn to_prometheus(&self) -> String { + self.counters + .metrics + .values() + .filter(|metric| !metric.is_empty()) + .map(Metric::::to_prometheus) + .chain( + self.gauges + .metrics + .values() + .filter(|metric| !metric.is_empty()) + .map(Metric::::to_prometheus), + ) + .collect::>() + .join("\n") + } +} + +#[derive(Debug, Clone, Default, PartialEq)] +pub struct MetricKindCollection { + metrics: HashMap>, +} + +impl MetricKindCollection { + /// Creates a new `MetricKindCollection` from a vector of metrics + /// + /// # Panics + /// + /// Panics if duplicate metric names are found + #[must_use] + pub fn new(metrics: Vec>) -> Self { + let mut map = HashMap::with_capacity(metrics.len()); + + for metric in metrics { + assert!( + map.insert(metric.name().clone(), metric).is_none(), + "Duplicate MetricName found in MetricKindCollection" + ); + } + Self { metrics: map } + } + + /// Returns an iterator over all metric names in this collection. + pub fn names(&self) -> impl Iterator { + self.metrics.keys() + } + + pub fn ensure_metric_exists(&mut self, name: &MetricName) { + if !self.metrics.contains_key(name) { + self.metrics + .insert(name.clone(), Metric::new(name.clone(), SampleCollection::new(vec![]))); + } + } +} + +impl MetricKindCollection { + /// Merges another `MetricKindCollection` into this one. + /// + /// # Errors + /// + /// Returns an error if a metric name already exists in the current collection. + pub fn merge(&mut self, other: &Self) -> Result<(), MergeError> { + // Check for name collisions + for metric_name in other.metrics.keys() { + if self.metrics.contains_key(metric_name) { + return Err(MergeError::MetricNameAlreadyExists { + metric_name: metric_name.clone(), + }); + } + } + + for (metric_name, metric) in &other.metrics { + if self.metrics.insert(metric_name.clone(), metric.clone()).is_some() { + return Err(MergeError::MetricNameAlreadyExists { + metric_name: metric_name.clone(), + }); + } + } + + Ok(()) + } +} + +impl MetricKindCollection { + /// Increments the counter for the given metric name and labels. + /// + /// If the metric name does not exist, it will be created. + /// + /// # Panics + /// + /// Panics if the metric does not exist and it could not be created. + pub fn increment(&mut self, name: &MetricName, label_set: &LabelSet, time: DurationSinceUnixEpoch) { + self.ensure_metric_exists(name); + + let metric = self.metrics.get_mut(name).expect("Counter metric should exist"); + + metric.increment(label_set, time); + } + + #[must_use] + pub fn get_value(&self, name: &MetricName, label_set: &LabelSet) -> Counter { + self.metrics + .get(name) + .and_then(|metric| metric.get_sample(label_set)) + .map_or(Counter::default(), |sample| sample.value().clone()) + } +} + +impl MetricKindCollection { + /// Sets the gauge for the given metric name and labels. + /// + /// If the metric name does not exist, it will be created. + /// + /// # Panics + /// + /// Panics if the metric does not exist and it could not be created. + pub fn set(&mut self, name: &MetricName, label_set: &LabelSet, value: f64, time: DurationSinceUnixEpoch) { + self.ensure_metric_exists(name); + + let metric = self.metrics.get_mut(name).expect("Gauge metric should exist"); + + metric.set(label_set, value, time); + } + + #[must_use] + pub fn get_value(&self, name: &MetricName, label_set: &LabelSet) -> Gauge { + self.metrics + .get(name) + .and_then(|metric| metric.get_sample(label_set)) + .map_or(Gauge::default(), |sample| sample.value().clone()) + } +} + +#[cfg(test)] +mod tests { + + use pretty_assertions::assert_eq; + + use super::*; + use crate::label::{LabelName, LabelValue}; + use crate::sample::Sample; + use crate::tests::{format_prometheus_output, sort_lines}; + + /// Fixture for testing serialization and deserialization of `MetricCollection`. + /// + /// It contains a default `MetricCollection` object, its JSON representation, + /// and its Prometheus format representation. + struct MetricCollectionFixture { + pub object: MetricCollection, + pub json: String, + pub prometheus: String, + } + + impl Default for MetricCollectionFixture { + fn default() -> Self { + Self { + object: Self::object(), + json: Self::json(), + prometheus: Self::prometheus(), + } + } + } + + impl MetricCollectionFixture { + fn deconstruct(&self) -> (MetricCollection, String, String) { + (self.object.clone(), self.json.clone(), self.prometheus.clone()) + } + + fn object() -> MetricCollection { + let time = DurationSinceUnixEpoch::from_secs(1_743_552_000); + + let label_set_1: LabelSet = [ + (LabelName::new("server_binding_protocol"), LabelValue::new("http")), + (LabelName::new("server_binding_ip"), LabelValue::new("0.0.0.0")), + (LabelName::new("server_binding_port"), LabelValue::new("7070")), + ] + .into(); + + MetricCollection::new( + MetricKindCollection::new(vec![Metric::new( + MetricName::new("http_tracker_core_announce_requests_received_total"), + SampleCollection::new(vec![Sample::new(Counter::new(1), time, label_set_1.clone())]), + )]), + MetricKindCollection::new(vec![Metric::new( + MetricName::new("udp_tracker_server_performance_avg_announce_processing_time_ns"), + SampleCollection::new(vec![Sample::new(Gauge::new(1.0), time, label_set_1.clone())]), + )]), + ) + } + + fn json() -> String { + r#" + [ + { + "kind":"counter", + "name":"http_tracker_core_announce_requests_received_total", + "samples":[ + { + "value":1, + "update_at":"2025-04-02T00:00:00+00:00", + "labels":[ + { + "name":"server_binding_ip", + "value":"0.0.0.0" + }, + { + "name":"server_binding_port", + "value":"7070" + }, + { + "name":"server_binding_protocol", + "value":"http" + } + ] + } + ] + }, + { + "kind":"gauge", + "name":"udp_tracker_server_performance_avg_announce_processing_time_ns", + "samples":[ + { + "value":1.0, + "update_at":"2025-04-02T00:00:00+00:00", + "labels":[ + { + "name":"server_binding_ip", + "value":"0.0.0.0" + }, + { + "name":"server_binding_port", + "value":"7070" + }, + { + "name":"server_binding_protocol", + "value":"http" + } + ] + } + ] + } + ] + "# + .to_owned() + } + + fn prometheus() -> String { + format_prometheus_output( + r#" + http_tracker_core_announce_requests_received_total{server_binding_ip="0.0.0.0",server_binding_port="7070",server_binding_protocol="http"} 1 + udp_tracker_server_performance_avg_announce_processing_time_ns{server_binding_ip="0.0.0.0",server_binding_port="7070",server_binding_protocol="http"} 1 + "#, + ) + } + } + + #[test] + #[should_panic(expected = "Metric names must be unique across counters and gauges")] + fn it_should_not_allow_duplicate_names_across_types() { + let counter = MetricKindCollection::new(vec![Metric::new( + MetricName::new("test_metric"), + SampleCollection::new(vec![]), + )]); + + let gauge = MetricKindCollection::new(vec![Metric::new( + MetricName::new("test_metric"), + SampleCollection::new(vec![]), + )]); + + let _unused = MetricCollection::new(counter, gauge); + } + + #[test] + #[should_panic(expected = "Cannot create gauge with name 'test_metric': a counter with this name already exists")] + fn it_should_not_allow_creating_a_gauge_with_the_same_name_as_a_counter() { + let mut collection = MetricCollection::default(); + let label_set = LabelSet::default(); + let time = DurationSinceUnixEpoch::from_secs(1_743_552_000); + + // First create a counter + collection.increase_counter(&MetricName::new("test_metric"), &label_set, time); + + // Then try to create a gauge with the same name - this should panic + collection.set_gauge(&MetricName::new("test_metric"), &label_set, 1.0, time); + } + + #[test] + #[should_panic(expected = "Cannot create counter with name 'test_metric': a gauge with this name already exists")] + fn it_should_not_allow_creating_a_counter_with_the_same_name_as_a_gauge() { + let mut collection = MetricCollection::default(); + let label_set = LabelSet::default(); + let time = DurationSinceUnixEpoch::from_secs(1_743_552_000); + + // First set the gauge + collection.set_gauge(&MetricName::new("test_metric"), &label_set, 1.0, time); + + // Then try to create a counter with the same name - this should panic + collection.increase_counter(&MetricName::new("test_metric"), &label_set, time); + } + + #[test] + fn it_should_allow_serializing_to_json() { + // todo: this test does work with metric with multiple samples becuase + // samples are not serialized in the same order as they are created. + let (metric_collection, expected_json, _expected_prometheus) = MetricCollectionFixture::default().deconstruct(); + + let json = serde_json::to_string_pretty(&metric_collection).unwrap(); + + assert_eq!( + serde_json::from_str::(&json).unwrap(), + serde_json::from_str::(&expected_json).unwrap() + ); + } + + #[test] + fn it_should_allow_deserializing_from_json() { + let (expected_metric_collection, metric_collection_json, _expected_prometheus) = + MetricCollectionFixture::default().deconstruct(); + + let metric_collection: MetricCollection = serde_json::from_str(&metric_collection_json).unwrap(); + + assert_eq!(metric_collection, expected_metric_collection); + } + + #[test] + fn it_should_allow_serializing_to_prometheus_format() { + let (metric_collection, _expected_json, expected_prometheus) = MetricCollectionFixture::default().deconstruct(); + + let prometheus_output = metric_collection.to_prometheus(); + + assert_eq!(prometheus_output, expected_prometheus); + } + + #[test] + fn it_should_allow_serializing_to_prometheus_format_with_multiple_samples_per_metric() { + let time = DurationSinceUnixEpoch::from_secs(1_743_552_000); + + let label_set_1: LabelSet = [ + (LabelName::new("server_binding_protocol"), LabelValue::new("http")), + (LabelName::new("server_binding_ip"), LabelValue::new("0.0.0.0")), + (LabelName::new("server_binding_port"), LabelValue::new("7070")), + ] + .into(); + + let label_set_2: LabelSet = [ + (LabelName::new("server_binding_protocol"), LabelValue::new("http")), + (LabelName::new("server_binding_ip"), LabelValue::new("0.0.0.0")), + (LabelName::new("server_binding_port"), LabelValue::new("7171")), + ] + .into(); + + let metric_collection = MetricCollection::new( + MetricKindCollection::new(vec![Metric::new( + MetricName::new("http_tracker_core_announce_requests_received_total"), + SampleCollection::new(vec![ + Sample::new(Counter::new(1), time, label_set_1.clone()), + Sample::new(Counter::new(2), time, label_set_2.clone()), + ]), + )]), + MetricKindCollection::new(vec![]), + ); + + let prometheus_output = metric_collection.to_prometheus(); + + let expected_prometheus_output = format_prometheus_output( + r#" + http_tracker_core_announce_requests_received_total{server_binding_ip="0.0.0.0",server_binding_port="7171",server_binding_protocol="http"} 2 + http_tracker_core_announce_requests_received_total{server_binding_ip="0.0.0.0",server_binding_port="7070",server_binding_protocol="http"} 1 + "#, + ); + + // code-review: samples are not serialized in the same order as they are created. + // Should we use a deterministic order? + + assert_eq!(sort_lines(&prometheus_output), sort_lines(&expected_prometheus_output)); + } + + #[test] + fn it_should_exclude_metrics_without_samples_from_prometheus_format() { + let mut counters = MetricKindCollection::new(vec![]); + let mut gauges = MetricKindCollection::new(vec![]); + + counters.ensure_metric_exists(&MetricName::new("test_counter")); + gauges.ensure_metric_exists(&MetricName::new("test_gauge")); + + let metric_collection = MetricCollection::new(counters, gauges); + + let prometheus_output = metric_collection.to_prometheus(); + + assert_eq!(prometheus_output, ""); + } + + mod for_counters { + + use pretty_assertions::assert_eq; + + use super::*; + use crate::label::{LabelName, LabelValue}; + use crate::sample::Sample; + + #[test] + fn it_should_increase_a_preexistent_counter() { + let time = DurationSinceUnixEpoch::from_secs(1_743_552_000); + let label_set: LabelSet = (LabelName::new("label_name"), LabelValue::new("value")).into(); + + let mut metric_collection = MetricCollection::new( + MetricKindCollection::new(vec![Metric::new( + MetricName::new("test_counter"), + SampleCollection::new(vec![Sample::new(Counter::new(0), time, label_set.clone())]), + )]), + MetricKindCollection::new(vec![]), + ); + + metric_collection.increase_counter(&MetricName::new("test_counter"), &label_set, time); + metric_collection.increase_counter(&MetricName::new("test_counter"), &label_set, time); + + assert_eq!( + metric_collection.get_counter_value(&MetricName::new("test_counter"), &label_set), + Counter::new(2) + ); + } + + #[test] + fn it_should_automatically_create_a_counter_when_increasing_if_it_does_not_exist() { + let time = DurationSinceUnixEpoch::from_secs(1_743_552_000); + let label_set: LabelSet = (LabelName::new("label_name"), LabelValue::new("value")).into(); + + let mut metric_collection = + MetricCollection::new(MetricKindCollection::new(vec![]), MetricKindCollection::new(vec![])); + + metric_collection.increase_counter(&MetricName::new("test_counter"), &label_set, time); + metric_collection.increase_counter(&MetricName::new("test_counter"), &label_set, time); + + assert_eq!( + metric_collection.get_counter_value(&MetricName::new("test_counter"), &label_set), + Counter::new(2) + ); + } + + #[test] + fn it_should_allow_making_sure_a_counter_exists_without_increasing_it() { + let label_set: LabelSet = (LabelName::new("label_name"), LabelValue::new("value")).into(); + + let mut metric_collection = + MetricCollection::new(MetricKindCollection::new(vec![]), MetricKindCollection::new(vec![])); + + metric_collection.ensure_counter_exists(&MetricName::new("test_counter")); + + assert_eq!( + metric_collection.get_counter_value(&MetricName::new("test_counter"), &label_set), + Counter::default() + ); + } + + #[test] + fn it_should_allow_describing_a_counter_before_using_it() { + let label_set: LabelSet = (LabelName::new("label_name"), LabelValue::new("value")).into(); + + let mut metric_collection = + MetricCollection::new(MetricKindCollection::new(vec![]), MetricKindCollection::new(vec![])); + + metric_collection.describe_counter(&MetricName::new("test_counter"), None, None); + + assert_eq!( + metric_collection.get_counter_value(&MetricName::new("test_counter"), &label_set), + Counter::default() + ); + } + + #[test] + #[should_panic(expected = "Duplicate MetricName found in MetricKindCollection")] + fn it_should_not_allow_duplicate_metric_names_when_instantiating() { + let time = DurationSinceUnixEpoch::from_secs(1_743_552_000); + let label_set: LabelSet = (LabelName::new("label_name"), LabelValue::new("value")).into(); + + let _unused = MetricKindCollection::new(vec![ + Metric::new( + MetricName::new("test_counter"), + SampleCollection::new(vec![Sample::new(Counter::new(0), time, label_set.clone())]), + ), + Metric::new( + MetricName::new("test_counter"), + SampleCollection::new(vec![Sample::new(Counter::new(0), time, label_set.clone())]), + ), + ]); + } + } + + mod for_gauges { + + use pretty_assertions::assert_eq; + + use super::*; + use crate::label::{LabelName, LabelValue}; + use crate::sample::Sample; + + #[test] + fn it_should_set_a_preexistent_gauge() { + let time = DurationSinceUnixEpoch::from_secs(1_743_552_000); + let label_set: LabelSet = (LabelName::new("label_name"), LabelValue::new("value")).into(); + + let mut metric_collection = MetricCollection::new( + MetricKindCollection::new(vec![]), + MetricKindCollection::new(vec![Metric::new( + MetricName::new("test_gauge"), + SampleCollection::new(vec![Sample::new(Gauge::new(0.0), time, label_set.clone())]), + )]), + ); + + metric_collection.set_gauge(&MetricName::new("test_gauge"), &label_set, 1.0, time); + + assert_eq!( + metric_collection.get_gauge_value(&MetricName::new("test_gauge"), &label_set), + Gauge::new(1.0) + ); + } + + #[test] + fn it_should_automatically_create_a_gauge_when_setting_if_it_does_not_exist() { + let time = DurationSinceUnixEpoch::from_secs(1_743_552_000); + let label_set: LabelSet = (LabelName::new("label_name"), LabelValue::new("value")).into(); + + let mut metric_collection = + MetricCollection::new(MetricKindCollection::new(vec![]), MetricKindCollection::new(vec![])); + + metric_collection.set_gauge(&MetricName::new("test_gauge"), &label_set, 1.0, time); + + assert_eq!( + metric_collection.get_gauge_value(&MetricName::new("test_gauge"), &label_set), + Gauge::new(1.0) + ); + } + + #[test] + fn it_should_allow_making_sure_a_gauge_exists_without_increasing_it() { + let label_set: LabelSet = (LabelName::new("label_name"), LabelValue::new("value")).into(); + + let mut metric_collection = + MetricCollection::new(MetricKindCollection::new(vec![]), MetricKindCollection::new(vec![])); + + metric_collection.ensure_gauge_exists(&MetricName::new("test_gauge")); + + assert_eq!( + metric_collection.get_gauge_value(&MetricName::new("test_gauge"), &label_set), + Gauge::default() + ); + } + + #[test] + fn it_should_allow_describing_a_gauge_before_using_it() { + let label_set: LabelSet = (LabelName::new("label_name"), LabelValue::new("value")).into(); + + let mut metric_collection = + MetricCollection::new(MetricKindCollection::new(vec![]), MetricKindCollection::new(vec![])); + + metric_collection.describe_gauge(&MetricName::new("test_gauge"), None, None); + + assert_eq!( + metric_collection.get_gauge_value(&MetricName::new("test_gauge"), &label_set), + Gauge::default() + ); + } + + #[test] + #[should_panic(expected = "Duplicate MetricName found in MetricKindCollection")] + fn it_should_not_allow_duplicate_metric_names_when_instantiating() { + let time = DurationSinceUnixEpoch::from_secs(1_743_552_000); + let label_set: LabelSet = (LabelName::new("label_name"), LabelValue::new("value")).into(); + + let _unused = MetricKindCollection::new(vec![ + Metric::new( + MetricName::new("test_gauge"), + SampleCollection::new(vec![Sample::new(Gauge::new(0.0), time, label_set.clone())]), + ), + Metric::new( + MetricName::new("test_gauge"), + SampleCollection::new(vec![Sample::new(Gauge::new(0.0), time, label_set.clone())]), + ), + ]); + } + } +} diff --git a/packages/metrics/src/prometheus.rs b/packages/metrics/src/prometheus.rs new file mode 100644 index 000000000..bf058e442 --- /dev/null +++ b/packages/metrics/src/prometheus.rs @@ -0,0 +1,15 @@ +pub trait PrometheusSerializable { + /// Convert the implementing type into a Prometheus exposition format string. + /// + /// # Returns + /// + /// A `String` containing the serialized representation. + fn to_prometheus(&self) -> String; +} + +// Blanket implementation for references +impl PrometheusSerializable for &T { + fn to_prometheus(&self) -> String { + (*self).to_prometheus() + } +} diff --git a/packages/metrics/src/sample.rs b/packages/metrics/src/sample.rs new file mode 100644 index 000000000..eddb2eefc --- /dev/null +++ b/packages/metrics/src/sample.rs @@ -0,0 +1,355 @@ +use chrono::{DateTime, Utc}; +use serde::{de, Deserialize, Deserializer, Serialize, Serializer}; +use torrust_tracker_primitives::DurationSinceUnixEpoch; + +use super::counter::Counter; +use super::gauge::Gauge; +use super::label::LabelSet; +use super::prometheus::PrometheusSerializable; + +#[derive(Debug, Clone, PartialEq, Serialize, Deserialize)] +pub struct Sample { + value: T, + + #[serde(serialize_with = "serialize_duration", deserialize_with = "deserialize_duration")] + update_at: DurationSinceUnixEpoch, + + #[serde(rename = "labels")] + label_set: LabelSet, +} + +impl Sample { + #[must_use] + pub fn new(value: T, update_at: DurationSinceUnixEpoch, label_set: LabelSet) -> Self { + Self { + value, + update_at, + label_set, + } + } + + #[must_use] + pub fn labels(&self) -> &LabelSet { + &self.label_set + } + + #[must_use] + pub fn value(&self) -> &T { + &self.value + } + + #[must_use] + pub fn update_at(&self) -> DurationSinceUnixEpoch { + self.update_at + } + + fn set_update_at(&mut self, time: DurationSinceUnixEpoch) { + self.update_at = time; + } +} + +impl PrometheusSerializable for Sample { + fn to_prometheus(&self) -> String { + format!("{} {}", self.label_set.to_prometheus(), self.value.to_prometheus()) + } +} + +impl Sample { + pub fn increment(&mut self, time: DurationSinceUnixEpoch) { + self.value.increment(1); + self.set_update_at(time); + } +} + +impl Sample { + pub fn set(&mut self, value: f64, time: DurationSinceUnixEpoch) { + self.value.set(value); + self.set_update_at(time); + } +} + +/// Serializes the `update_at` field as a string in ISO 8601 format (RFC 3339). +/// +/// # Errors +/// +/// Returns an error if: +/// - The conversion from `u64` to `i64` fails. +/// - The timestamp is invalid. +fn serialize_duration(duration: &DurationSinceUnixEpoch, serializer: S) -> Result +where + S: Serializer, +{ + let secs = i64::try_from(duration.as_secs()).map_err(|_| serde::ser::Error::custom("Timestamp too large"))?; + let nanos = duration.subsec_nanos(); + + let datetime = DateTime::from_timestamp(secs, nanos).ok_or_else(|| serde::ser::Error::custom("Invalid timestamp"))?; + + serializer.serialize_str(&datetime.to_rfc3339()) // Serializes as ISO 8601 (RFC 3339) +} + +fn deserialize_duration<'de, D>(deserializer: D) -> Result +where + D: Deserializer<'de>, +{ + // Deserialize theISO 8601 (RFC 3339) formatted string + let datetime_str = String::deserialize(deserializer)?; + + let datetime = + DateTime::parse_from_rfc3339(&datetime_str).map_err(|e| de::Error::custom(format!("Invalid datetime format: {e}")))?; + + let datetime_utc = datetime.with_timezone(&Utc); + + let secs = u64::try_from(datetime_utc.timestamp()).map_err(|_| de::Error::custom("Timestamp out of range"))?; + + Ok(DurationSinceUnixEpoch::new(secs, datetime_utc.timestamp_subsec_nanos())) +} + +#[cfg(test)] +mod tests { + use torrust_tracker_primitives::DurationSinceUnixEpoch; + + use super::*; + + // Helper function to create a sample update time. + fn updated_at_time() -> DurationSinceUnixEpoch { + DurationSinceUnixEpoch::from_secs(1_743_552_000) + } + + #[test] + fn it_should_have_a_value() { + let sample = Sample::new( + 42, + DurationSinceUnixEpoch::from_secs(1_743_552_000), + LabelSet::from(vec![("test", "label")]), + ); + + assert_eq!(sample.value(), &42); + } + + #[test] + fn it_should_record_the_latest_update_time() { + let sample = Sample::new( + 42, + DurationSinceUnixEpoch::from_secs(1_743_552_000), + LabelSet::from(vec![("test", "label")]), + ); + + assert_eq!(sample.update_at(), updated_at_time()); + } + + #[test] + fn it_should_include_a_label_set() { + let sample = Sample::new( + 42, + DurationSinceUnixEpoch::from_secs(1_743_552_000), + LabelSet::from(vec![("test", "label")]), + ); + + assert_eq!(sample.labels(), &LabelSet::from(vec![("test", "label")])); + } + + mod for_counter_type_sample { + use torrust_tracker_primitives::DurationSinceUnixEpoch; + + use crate::label::LabelSet; + use crate::prometheus::PrometheusSerializable; + use crate::sample::tests::updated_at_time; + use crate::sample::{Counter, Sample}; + + #[test] + fn it_should_allow_a_counter_type_value() { + let sample = Sample::new( + Counter::new(42), + DurationSinceUnixEpoch::from_secs(1_743_552_000), + LabelSet::from(vec![("label_name", "label vale")]), + ); + + assert_eq!(sample.value(), &Counter::new(42)); + } + + #[test] + fn it_should_allow_incrementing_the_counter() { + let mut sample = Sample::new(Counter::default(), DurationSinceUnixEpoch::default(), LabelSet::default()); + + sample.increment(updated_at_time()); + + assert_eq!(sample.value(), &Counter::new(1)); + } + + #[test] + fn it_should_record_the_latest_update_time_when_the_counter_is_incremented() { + let mut sample = Sample::new(Counter::default(), DurationSinceUnixEpoch::default(), LabelSet::default()); + + let time = updated_at_time(); + + sample.increment(time); + + assert_eq!(sample.update_at(), time); + } + + #[test] + fn it_should_allow_exporting_to_prometheus_format() { + let counter = Counter::new(42); + + let labels = LabelSet::from(vec![("label_name", "label_value"), ("method", "GET")]); + + let sample = Sample::new(counter, DurationSinceUnixEpoch::default(), labels); + + assert_eq!(sample.to_prometheus(), r#"{label_name="label_value",method="GET"} 42"#); + } + } + mod for_gauge_type_sample { + use torrust_tracker_primitives::DurationSinceUnixEpoch; + + use crate::label::LabelSet; + use crate::prometheus::PrometheusSerializable; + use crate::sample::tests::updated_at_time; + use crate::sample::{Gauge, Sample}; + + #[test] + fn it_should_allow_a_counter_type_value() { + let sample = Sample::new( + Gauge::new(42.0), + DurationSinceUnixEpoch::from_secs(1_743_552_000), + LabelSet::from(vec![("label_name", "label vale")]), + ); + + assert_eq!(sample.value(), &Gauge::new(42.0)); + } + + #[test] + fn it_should_allow_incrementing_the_counter() { + let mut sample = Sample::new(Gauge::default(), DurationSinceUnixEpoch::default(), LabelSet::default()); + + sample.set(1.0, updated_at_time()); + + assert_eq!(sample.value(), &Gauge::new(1.0)); + } + + #[test] + fn it_should_record_the_latest_update_time_when_the_counter_is_incremented() { + let mut sample = Sample::new(Gauge::default(), DurationSinceUnixEpoch::default(), LabelSet::default()); + + let time = updated_at_time(); + + sample.set(1.0, time); + + assert_eq!(sample.update_at(), time); + } + + #[test] + fn it_should_allow_exporting_to_prometheus_format() { + let counter = Gauge::new(42.0); + + let labels = LabelSet::from(vec![("label_name", "label_value"), ("method", "GET")]); + + let sample = Sample::new(counter, DurationSinceUnixEpoch::default(), labels); + + assert_eq!(sample.to_prometheus(), r#"{label_name="label_value",method="GET"} 42"#); + } + } + + mod serialization_to_json { + use pretty_assertions::assert_eq; + use serde_json::json; + use torrust_tracker_primitives::DurationSinceUnixEpoch; + + use crate::label::LabelSet; + use crate::sample::tests::updated_at_time; + use crate::sample::Sample; + + #[test] + fn test_serialization_round_trip() { + let original = Sample { + value: 42, + update_at: updated_at_time(), + label_set: LabelSet::from(vec![("test", "serialization")]), + }; + + let json = serde_json::to_string(&original).unwrap(); + let deserialized: Sample = serde_json::from_str(&json).unwrap(); + + assert_eq!(original.value, deserialized.value); + assert_eq!(original.update_at, deserialized.update_at); + assert_eq!(original.label_set, deserialized.label_set); + } + + #[test] + fn test_rfc3339_serialization_format_for_update_time() { + let sample = Sample::new( + 42, + DurationSinceUnixEpoch::new(1_743_552_000, 100), + LabelSet::from(vec![("label_name", "label value")]), + ); + + let json = serde_json::to_string(&sample).unwrap(); + + let expected_json = r#" + { + "value": 42, + "update_at": "2025-04-02T00:00:00.000000100+00:00", + "labels": [ + { + "name": "label_name", + "value": "label value" + } + ] + } + "#; + + assert_eq!( + serde_json::from_str::(&json).unwrap(), + serde_json::from_str::(expected_json).unwrap() + ); + } + + #[test] + fn test_invalid_update_timestamp_serialization() { + let timestamp_too_large = DurationSinceUnixEpoch::new(i64::MAX as u64 + 1, 0); + + let sample = Sample::new(42, timestamp_too_large, LabelSet::from(vec![("label_name", "label value")])); + + let result = serde_json::to_string(&sample); + + assert!(result.is_err()); + assert!(result.unwrap_err().to_string().contains("Timestamp too large")); + } + + #[test] + fn test_invalid_update_datetime_deserialization() { + let invalid_json = json!( + r#" + { + "value": 42, + "update_at": "1-1-2023T25:00:00Z", + "labels": [ + { + "name": "label_name", + "value": "label value" + } + ] + } + "# + ); + + let result: Result = serde_json::from_value(invalid_json); + + assert!(result.unwrap_err().to_string().contains("invalid type")); + } + + #[test] + fn test_update_datetime_high_precision_nanoseconds() { + let sample = Sample::new( + 42, + DurationSinceUnixEpoch::new(1_743_552_000, 100), + LabelSet::from(vec![("label_name", "label value")]), + ); + + let json = serde_json::to_string(&sample).unwrap(); + + let deserialized: Sample = serde_json::from_str(&json).unwrap(); + + assert_eq!(deserialized, sample); + } + } +} diff --git a/packages/metrics/src/sample_collection.rs b/packages/metrics/src/sample_collection.rs new file mode 100644 index 000000000..02977597f --- /dev/null +++ b/packages/metrics/src/sample_collection.rs @@ -0,0 +1,411 @@ +use std::collections::hash_map::Iter; +use std::collections::{HashMap, HashSet}; + +use serde::{de, Deserialize, Deserializer, Serialize, Serializer}; +use torrust_tracker_primitives::DurationSinceUnixEpoch; + +use super::counter::Counter; +use super::gauge::Gauge; +use super::label::LabelSet; +use super::prometheus::PrometheusSerializable; +use super::sample::Sample; + +#[derive(Debug, Clone, Default, PartialEq)] +pub struct SampleCollection { + samples: HashMap>, +} + +impl SampleCollection { + // IMPORTANT: It should never allow mutation of the samples because it would + // break the invariants. If the sample's `LabelSet` is changed, it can + // create duplicate `LabelSet`s even if the `LabelSet` in the `HashMap` key + // is unique. + + /// # Panics + /// + /// Panics if there are duplicate `LabelSets` in the provided samples. + #[must_use] + pub fn new(samples: Vec>) -> Self { + let mut map = HashMap::with_capacity(samples.len()); + + for sample in samples { + assert!( + map.insert(sample.labels().clone(), sample).is_none(), + "Duplicate LabelSet found in SampleCollection" + ); + } + + Self { samples: map } + } + + #[must_use] + pub fn get(&self, label: &LabelSet) -> Option<&Sample> { + self.samples.get(label) + } + + #[must_use] + pub fn len(&self) -> usize { + self.samples.len() + } + + #[must_use] + pub fn is_empty(&self) -> bool { + self.samples.is_empty() + } + + #[must_use] + #[allow(clippy::iter_without_into_iter)] + pub fn iter(&self) -> Iter<'_, LabelSet, Sample> { + self.samples.iter() + } +} + +impl SampleCollection { + pub fn increment(&mut self, label_set: &LabelSet, time: DurationSinceUnixEpoch) { + let sample = self + .samples + .entry(label_set.clone()) + .or_insert_with(|| Sample::new(Counter::default(), time, label_set.clone())); + + sample.increment(time); + } +} + +impl SampleCollection { + pub fn set(&mut self, label_set: &LabelSet, value: f64, time: DurationSinceUnixEpoch) { + let sample = self + .samples + .entry(label_set.clone()) + .or_insert_with(|| Sample::new(Gauge::default(), time, label_set.clone())); + + sample.set(value, time); + } +} + +impl Serialize for SampleCollection { + fn serialize(&self, serializer: S) -> Result + where + S: Serializer, + { + let samples: Vec<&Sample> = self.samples.values().collect(); + samples.serialize(serializer) + } +} + +impl<'de, T> Deserialize<'de> for SampleCollection +where + T: Deserialize<'de>, +{ + fn deserialize(deserializer: D) -> Result + where + D: Deserializer<'de>, + { + // First deserialize into a temporary Vec + let samples = Vec::>::deserialize(deserializer)?; + + // Check for duplicate label sets + let mut seen_labels = HashSet::new(); + + for sample in &samples { + if !seen_labels.insert(sample.labels()) { + return Err(de::Error::custom(format!("Duplicate label set found: {}", sample.labels()))); + } + } + + // Convert to HashMap-based storage + Ok(SampleCollection::new(samples)) + } +} + +impl PrometheusSerializable for SampleCollection { + fn to_prometheus(&self) -> String { + let mut output = String::new(); + + for sample in self.samples.values() { + output.push_str(&sample.to_prometheus()); + } + + output + } +} + +#[cfg(test)] +mod tests { + use torrust_tracker_primitives::DurationSinceUnixEpoch; + + use crate::counter::Counter; + use crate::label::LabelSet; + use crate::prometheus::PrometheusSerializable; + use crate::sample::Sample; + use crate::sample_collection::SampleCollection; + use crate::tests::format_prometheus_output; + + fn sample_update_time() -> DurationSinceUnixEpoch { + DurationSinceUnixEpoch::from_secs(1_743_552_000) + } + + #[test] + #[should_panic(expected = "Duplicate LabelSet found in SampleCollection")] + fn it_should_fail_trying_to_create_a_sample_collection_with_duplicate_label_sets() { + let samples = vec![ + Sample::new(Counter::default(), sample_update_time(), LabelSet::default()), + Sample::new(Counter::default(), sample_update_time(), LabelSet::default()), + ]; + + let _unused = SampleCollection::new(samples); + } + + #[test] + fn it_should_return_a_sample_searching_by_label_set_with_one_empty_label_set() { + let label_set = LabelSet::default(); + + let sample = Sample::new(Counter::default(), sample_update_time(), label_set.clone()); + + let collection = SampleCollection::new(vec![sample.clone()]); + + let retrieved = collection.get(&label_set); + + assert_eq!(retrieved.unwrap(), &sample); + } + + #[test] + fn it_should_return_a_sample_searching_by_label_set_with_two_label_sets() { + let label_set_1 = LabelSet::from(vec![("label_name_1", "label value 1")]); + let label_set_2 = LabelSet::from(vec![("label_name_2", "label value 2")]); + + let sample_1 = Sample::new(Counter::new(1), sample_update_time(), label_set_1.clone()); + let sample_2 = Sample::new(Counter::new(2), sample_update_time(), label_set_2.clone()); + + let collection = SampleCollection::new(vec![sample_1.clone(), sample_2.clone()]); + + let retrieved = collection.get(&label_set_1); + assert_eq!(retrieved.unwrap(), &sample_1); + + let retrieved = collection.get(&label_set_2); + assert_eq!(retrieved.unwrap(), &sample_2); + } + + #[test] + fn it_should_return_the_number_of_samples_in_the_collection() { + let samples = vec![Sample::new(Counter::default(), sample_update_time(), LabelSet::default())]; + let collection = SampleCollection::new(samples); + assert_eq!(collection.len(), 1); + } + + #[test] + fn it_should_return_zero_number_of_samples_when_empty() { + let empty = SampleCollection::::default(); + assert_eq!(empty.len(), 0); + } + + #[test] + fn it_should_indicate_is_it_is_empty() { + let empty = SampleCollection::::default(); + assert!(empty.is_empty()); + + let samples = vec![Sample::new(Counter::default(), sample_update_time(), LabelSet::default())]; + let collection = SampleCollection::new(samples); + assert!(!collection.is_empty()); + } + + #[test] + fn it_should_be_serializable_and_deserializable_for_json_format() { + let sample = Sample::new(Counter::default(), sample_update_time(), LabelSet::default()); + let collection = SampleCollection::new(vec![sample]); + + let serialized = serde_json::to_string(&collection).unwrap(); + let deserialized: SampleCollection = serde_json::from_str(&serialized).unwrap(); + + assert_eq!(deserialized, collection); + } + + #[test] + fn it_should_fail_deserializing_from_json_with_duplicate_label_sets() { + let samples = vec![ + Sample::new(Counter::default(), sample_update_time(), LabelSet::default()), + Sample::new(Counter::default(), sample_update_time(), LabelSet::default()), + ]; + + let serialized = serde_json::to_string(&samples).unwrap(); + + let result: Result, _> = serde_json::from_str(&serialized); + + assert!(result.is_err()); + } + + #[test] + fn it_should_be_exportable_to_prometheus_format_when_empty() { + let sample = Sample::new(Counter::default(), sample_update_time(), LabelSet::default()); + let collection = SampleCollection::new(vec![sample]); + + let prometheus_output = collection.to_prometheus(); + + assert!(!prometheus_output.is_empty()); + } + + #[test] + fn it_should_be_exportable_to_prometheus_format() { + let sample = Sample::new( + Counter::new(1), + sample_update_time(), + LabelSet::from(vec![("labe_name_1", "label value value 1")]), + ); + + let collection = SampleCollection::new(vec![sample]); + + let prometheus_output = collection.to_prometheus(); + + let expected_prometheus_output = format_prometheus_output("{labe_name_1=\"label value value 1\"} 1"); + + assert_eq!(prometheus_output, expected_prometheus_output); + } + + #[cfg(test)] + mod for_counters { + + use std::ops::Add; + + use super::super::LabelSet; + use super::*; + + #[test] + fn it_should_increment_the_counter_for_a_preexisting_label_set() { + let label_set = LabelSet::default(); + let mut collection = SampleCollection::default(); + + // Initialize the sample + collection.increment(&label_set, sample_update_time()); + + // Verify initial state + let sample = collection.get(&label_set).unwrap(); + assert_eq!(sample.value(), &Counter::new(1)); + + // Increment again + collection.increment(&label_set, sample_update_time()); + let sample = collection.get(&label_set).unwrap(); + assert_eq!(*sample.value(), Counter::new(2)); + } + + #[test] + fn it_should_allow_increment_the_counter_for_a_non_existent_label_set() { + let label_set = LabelSet::default(); + let mut collection = SampleCollection::default(); + + // Increment a non-existent label + collection.increment(&label_set, sample_update_time()); + + // Verify the label exists + assert!(collection.get(&label_set).is_some()); + let sample = collection.get(&label_set).unwrap(); + assert_eq!(*sample.value(), Counter::new(1)); + } + + #[test] + fn it_should_update_the_latest_update_time_when_incremented() { + let label_set = LabelSet::default(); + let initial_time = sample_update_time(); + + let mut collection = SampleCollection::default(); + collection.increment(&label_set, initial_time); + + // Increment with a new time + let new_time = initial_time.add(DurationSinceUnixEpoch::from_secs(1)); + collection.increment(&label_set, new_time); + + let sample = collection.get(&label_set).unwrap(); + assert_eq!(sample.update_at(), new_time); + assert_eq!(*sample.value(), Counter::new(2)); + } + + #[test] + fn it_should_increment_the_counter_for_multiple_labels() { + let label1 = LabelSet::from([("name", "value1")]); + let label2 = LabelSet::from([("name", "value2")]); + let now = sample_update_time(); + + let mut collection = SampleCollection::default(); + + collection.increment(&label1, now); + collection.increment(&label2, now); + + assert_eq!(collection.get(&label1).unwrap().value(), &Counter::new(1)); + assert_eq!(collection.get(&label2).unwrap().value(), &Counter::new(1)); + assert_eq!(collection.len(), 2); + } + } + + #[cfg(test)] + mod for_gauges { + + use std::ops::Add; + + use super::super::LabelSet; + use super::*; + use crate::gauge::Gauge; + + #[test] + fn it_should_increment_the_gauge_for_a_preexisting_label_set() { + let label_set = LabelSet::default(); + let mut collection = SampleCollection::default(); + + // Initialize the sample + collection.set(&label_set, 1.0, sample_update_time()); + + // Verify initial state + let sample = collection.get(&label_set).unwrap(); + assert_eq!(sample.value(), &Gauge::new(1.0)); + + // Set again + collection.set(&label_set, 2.0, sample_update_time()); + let sample = collection.get(&label_set).unwrap(); + assert_eq!(*sample.value(), Gauge::new(2.0)); + } + + #[test] + fn it_should_allow_increment_the_gauge_for_a_non_existent_label_set() { + let label_set = LabelSet::default(); + let mut collection = SampleCollection::default(); + + // Set a non-existent label + collection.set(&label_set, 1.0, sample_update_time()); + + // Verify the label exists + assert!(collection.get(&label_set).is_some()); + let sample = collection.get(&label_set).unwrap(); + assert_eq!(*sample.value(), Gauge::new(1.0)); + } + + #[test] + fn it_should_update_the_latest_update_time_when_incremented() { + let label_set = LabelSet::default(); + let initial_time = sample_update_time(); + + let mut collection = SampleCollection::default(); + collection.set(&label_set, 1.0, initial_time); + + // Set with a new time + let new_time = initial_time.add(DurationSinceUnixEpoch::from_secs(1)); + collection.set(&label_set, 2.0, new_time); + + let sample = collection.get(&label_set).unwrap(); + assert_eq!(sample.update_at(), new_time); + assert_eq!(*sample.value(), Gauge::new(2.0)); + } + + #[test] + fn it_should_increment_the_gauge_for_multiple_labels() { + let label1 = LabelSet::from([("name", "value1")]); + let label2 = LabelSet::from([("name", "value2")]); + let now = sample_update_time(); + + let mut collection = SampleCollection::default(); + + collection.set(&label1, 1.0, now); + collection.set(&label2, 2.0, now); + + assert_eq!(collection.get(&label1).unwrap().value(), &Gauge::new(1.0)); + assert_eq!(collection.get(&label2).unwrap().value(), &Gauge::new(2.0)); + assert_eq!(collection.len(), 2); + } + } +} diff --git a/packages/metrics/src/thread_safe_metric_collection.rs b/packages/metrics/src/thread_safe_metric_collection.rs new file mode 100644 index 000000000..d9774c9af --- /dev/null +++ b/packages/metrics/src/thread_safe_metric_collection.rs @@ -0,0 +1,92 @@ +use std::sync::RwLock; + +use torrust_tracker_primitives::DurationSinceUnixEpoch; + +use crate::counter::Counter; +use crate::gauge::Gauge; +use crate::label::LabelSet; +use crate::metric::description::MetricDescription; +use crate::metric::MetricName; +use crate::metric_collection::{MetricCollection, MetricKindCollection}; +use crate::unit::Unit; + +/* code-review: + + This might be not necessary, since the `MetricCollection` doesn't expose + any method to mutate the collection items directly. + +*/ + +/// A thread-safe wrapper around `MetricCollection` that allows concurrent +/// access to the metrics collection. +/// +/// It protects the `MetricCollection` invariant: +/// +/// "Metric's names must be unique in the collection for all types of metrics." +#[derive(Debug, Default)] +pub struct ThreadSafeMetricCollection { + inner: RwLock, +} + +impl ThreadSafeMetricCollection { + #[must_use] + pub fn new(counters: MetricKindCollection, gauges: MetricKindCollection) -> Self { + Self { + inner: RwLock::new(MetricCollection::new(counters, gauges)), + } + } + + // Counter-specific methods + + /// # Panics + /// + /// Panics if it can't get write access to the inner collection. + pub fn describe_counter(&mut self, name: &MetricName, _opt_unit: Option, _opt_description: Option) { + self.inner.write().unwrap().ensure_counter_exists(name); + } + + /// It allows to describe a counter metric so the metrics appear in the JSON + /// response even if there are no samples yet. + /// + /// # Panics + /// + /// Panics if it can't get read access to the inner collection. + #[must_use] + pub fn get_counter_value(&self, name: &MetricName, label_set: &LabelSet) -> Counter { + self.inner.read().unwrap().get_counter_value(name, label_set) + } + + /// # Panics + /// + /// Panics if it can't get write access to the inner collection. + pub fn increase_counter(&mut self, name: &MetricName, label_set: &LabelSet, time: DurationSinceUnixEpoch) { + self.inner.write().unwrap().increase_counter(name, label_set, time); + } + + // Gauge-specific methods + + /// It allows to describe a gauge metric so the metrics appear in the JSON + /// response even if there are no samples yet. + /// + /// # Panics + /// + /// Panics if it can't get write access to the inner collection. + pub fn describe_gauge(&mut self, name: &MetricName, _opt_unit: Option, _opt_description: Option) { + self.inner.write().unwrap().ensure_gauge_exists(name); + } + + /// # Panics + /// + /// Panics if it can't get read access to the inner collection. + #[must_use] + pub fn get_gauge_value(&self, name: &MetricName, label_set: &LabelSet) -> Gauge { + self.inner.read().unwrap().get_gauge_value(name, label_set) + } + + /// # Panics + /// + /// Panics if it can't get write access to the inner collection. + pub fn set_gauge(&mut self, name: &MetricName, label_set: &LabelSet, value: f64, time: DurationSinceUnixEpoch) { + self.inner.write().unwrap().set_gauge(name, label_set, value, time); + } +} diff --git a/packages/metrics/src/unit.rs b/packages/metrics/src/unit.rs new file mode 100644 index 000000000..b98e6836d --- /dev/null +++ b/packages/metrics/src/unit.rs @@ -0,0 +1,25 @@ +//! This module defines the `Unit` enum, which represents various units of +//! measurement. +//! +//! The `Unit` enum is used to specify the unit of measurement for metrics. +//! +//! They were copied from the `metrics` crate, to allow future compatibility. +pub enum Unit { + Count, + Percent, + Seconds, + Milliseconds, + Microseconds, + Nanoseconds, + Tebibytes, + Gibibytes, + Mebibytes, + Kibibytes, + Bytes, + TerabitsPerSecond, + GigabitsPerSecond, + MegabitsPerSecond, + KilobitsPerSecond, + BitsPerSecond, + CountPerSecond, +} From d7178180dedb65318ec88429beb3b7c684d2ee6d Mon Sep 17 00:00:00 2001 From: Jose Celano Date: Tue, 8 Apr 2025 18:10:59 +0100 Subject: [PATCH 438/802] feat: [#1403] add extendable-labeled metrics to http-tracker-core and expose in REST API **URL:** http://0.0.0.0:1212/api/v1/metrics?token=MyAccessToken **Sample response:** ```json { "metrics":[ { "kind":"counter", "name":"http_tracker_core_announce_requests_received_total", "samples":[ { "value":1, "update_at":"2025-04-02T00:00:00+00:00", "labels":[ { "name":"server_binding_ip", "value":"0.0.0.0" }, { "name":"server_binding_port", "value":"7070" }, { "name":"server_binding_protocol", "value":"http" } ] } ] }, { "kind":"gauge", "name":"udp_tracker_server_performance_avg_announce_processing_time_ns", "samples":[ { "value":1.0, "update_at":"2025-04-02T00:00:00+00:00", "labels":[ { "name":"server_binding_ip", "value":"0.0.0.0" }, { "name":"server_binding_port", "value":"7070" }, { "name":"server_binding_protocol", "value":"http" } ] } ] } ] } ``` **URL:** http://0.0.0.0:1212/api/v1/stats?token=MyAccessToken&format=prometheus ``` http_tracker_core_announce_requests_received_total{server_binding_ip="0.0.0.0",server_binding_port="7070",server_binding_protocol="http"} 1 udp_tracker_server_performance_avg_announce_processing_time_ns{server_binding_ip="0.0.0.0",server_binding_port="7070",server_binding_protocol="http"} 1 ``` --- Cargo.lock | 169 ++++++++++++++++++ .../axum-rest-tracker-api-server/Cargo.toml | 1 + .../src/v1/context/stats/handlers.rs | 33 +++- .../src/v1/context/stats/resources.rs | 18 +- .../src/v1/context/stats/responses.rs | 16 +- .../src/v1/context/stats/routes.rs | 30 ++-- packages/http-tracker-core/Cargo.toml | 6 +- packages/http-tracker-core/src/event/mod.rs | 20 +++ packages/http-tracker-core/src/lib.rs | 13 ++ .../src/statistics/event/handler.rs | 68 +++++-- .../src/statistics/event/listener.rs | 4 +- .../src/statistics/metrics.rs | 28 ++- .../http-tracker-core/src/statistics/mod.rs | 24 +++ .../src/statistics/repository.rs | 16 +- .../src/statistics/services.rs | 6 +- packages/rest-tracker-api-core/Cargo.toml | 1 + .../src/statistics/services.rs | 25 +++ 17 files changed, 432 insertions(+), 46 deletions(-) diff --git a/Cargo.lock b/Cargo.lock index 328e2db93..fdba742dc 100644 --- a/Cargo.lock +++ b/Cargo.lock @@ -135,6 +135,15 @@ version = "1.0.97" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "dcfed56ad506cb2c684a14971b8861fdc3baaaae314b9e5f9bb532cbe3ba7a4f" +[[package]] +name = "approx" +version = "0.5.1" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "cab112f0a86d568ea0e627cc1d6be74a1e9cd55214684db5561995f6dad897c6" +dependencies = [ + "num-traits", +] + [[package]] name = "aquatic_peer_id" version = "0.9.0" @@ -484,6 +493,15 @@ dependencies = [ "windows-targets 0.52.6", ] +[[package]] +name = "backtrace-ext" +version = "0.2.1" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "537beee3be4a18fb023b570f80e3ae28003db9167a751266b259926e25539d50" +dependencies = [ + "backtrace", +] + [[package]] name = "base64" version = "0.21.7" @@ -560,11 +578,16 @@ dependencies = [ "bittorrent-primitives", "bittorrent-tracker-core", "criterion", + "formatjson", "futures", "mockall", + "serde", + "serde_json", "thiserror 2.0.12", "tokio", + "torrust-tracker-clock", "torrust-tracker-configuration", + "torrust-tracker-metrics", "torrust-tracker-primitives", "torrust-tracker-test-helpers", "tracing", @@ -1348,6 +1371,12 @@ dependencies = [ "syn 2.0.100", ] +[[package]] +name = "diff" +version = "0.1.13" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "56254986775e3233ffa9c4d7d3faaf6d36a2c09d30b20687e9f88bc8bafc16c8" + [[package]] name = "digest" version = "0.10.7" @@ -1558,6 +1587,16 @@ dependencies = [ "percent-encoding", ] +[[package]] +name = "formatjson" +version = "0.3.1" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "4d3ba17cfe2aff8969f35b2bffec13b34756c51ea53eadcc5d5446f71370e2ed" +dependencies = [ + "miette", + "thiserror 1.0.69", +] + [[package]] name = "forwarded-header-value" version = "0.1.1" @@ -2295,6 +2334,12 @@ dependencies = [ "windows-sys 0.59.0", ] +[[package]] +name = "is_ci" +version = "1.2.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "7655c9839580ee829dfacba1d1278c2b7883e50a277ff7541299489d6bdfdc45" + [[package]] name = "is_terminal_polyfill" version = "1.70.1" @@ -2484,6 +2529,37 @@ version = "2.7.4" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "78ca9ab1a0babb1e7d5695e3530886289c18cf2f87ec19a575a0abdce112e3a3" +[[package]] +name = "miette" +version = "7.5.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "1a955165f87b37fd1862df2a59547ac542c77ef6d17c666f619d1ad22dd89484" +dependencies = [ + "backtrace", + "backtrace-ext", + "cfg-if", + "miette-derive", + "owo-colors", + "supports-color", + "supports-hyperlinks", + "supports-unicode", + "terminal_size", + "textwrap", + "thiserror 1.0.69", + "unicode-width 0.1.14", +] + +[[package]] +name = "miette-derive" +version = "7.5.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "bf45bf44ab49be92fd1227a3be6fc6f617f1a337c06af54981048574d8783147" +dependencies = [ + "proc-macro2", + "quote", + "syn 2.0.100", +] + [[package]] name = "mime" version = "0.3.17" @@ -2815,6 +2891,12 @@ version = "0.1.1" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "b15813163c1d831bf4a13c3610c05c0d03b39feb07f7e09fa234dac9b15aaf39" +[[package]] +name = "owo-colors" +version = "4.2.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "1036865bb9422d3300cf723f657c2851d0e9ab12567854b1f4eba3d77decf564" + [[package]] name = "parking" version = "2.2.1" @@ -3065,6 +3147,16 @@ dependencies = [ "termtree", ] +[[package]] +name = "pretty_assertions" +version = "1.4.1" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "3ae130e2f271fbc2ac3a40fb1d07180839cdbbe443c7a27e1e3c13c5cac0116d" +dependencies = [ + "diff", + "yansi", +] + [[package]] name = "proc-macro-crate" version = "3.3.0" @@ -3976,6 +4068,27 @@ version = "2.6.1" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "13c2bddecc57b384dee18652358fb23172facb8a2c51ccc10d74c157bdea3292" +[[package]] +name = "supports-color" +version = "3.0.2" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "c64fc7232dd8d2e4ac5ce4ef302b1d81e0b80d055b9d77c7c4f51f6aa4c867d6" +dependencies = [ + "is_ci", +] + +[[package]] +name = "supports-hyperlinks" +version = "3.1.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "804f44ed3c63152de6a9f90acbea1a110441de43006ea51bcce8f436196a288b" + +[[package]] +name = "supports-unicode" +version = "3.0.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "b7401a30af6cb5818bb64852270bb722533397edcfc7344954a38f420819ece2" + [[package]] name = "syn" version = "1.0.109" @@ -4084,6 +4197,16 @@ dependencies = [ "winapi-util", ] +[[package]] +name = "terminal_size" +version = "0.4.2" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "45c6481c4829e4cc63825e62c49186a34538b7b2750b73b266581ffb612fb5ed" +dependencies = [ + "rustix 1.0.3", + "windows-sys 0.59.0", +] + [[package]] name = "termtree" version = "0.5.1" @@ -4119,6 +4242,16 @@ dependencies = [ "url", ] +[[package]] +name = "textwrap" +version = "0.16.2" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "c13547615a44dc9c452a8a534638acdf07120d4b6847c8178705da06306a3057" +dependencies = [ + "unicode-linebreak", + "unicode-width 0.2.0", +] + [[package]] name = "thiserror" version = "1.0.69" @@ -4450,6 +4583,7 @@ dependencies = [ "torrust-server-lib", "torrust-tracker-clock", "torrust-tracker-configuration", + "torrust-tracker-metrics", "torrust-tracker-primitives", "torrust-tracker-test-helpers", "torrust-udp-tracker-server", @@ -4501,6 +4635,7 @@ dependencies = [ "bittorrent-udp-tracker-core", "tokio", "torrust-tracker-configuration", + "torrust-tracker-metrics", "torrust-tracker-primitives", "torrust-tracker-test-helpers", "torrust-udp-tracker-server", @@ -4626,6 +4761,22 @@ dependencies = [ "tracing", ] +[[package]] +name = "torrust-tracker-metrics" +version = "3.0.0-develop" +dependencies = [ + "approx", + "chrono", + "derive_more", + "formatjson", + "pretty_assertions", + "rstest", + "serde", + "serde_json", + "thiserror 2.0.12", + "torrust-tracker-primitives", +] + [[package]] name = "torrust-tracker-primitives" version = "3.0.0-develop" @@ -4862,6 +5013,24 @@ version = "1.0.18" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "5a5f39404a5da50712a4c1eecf25e90dd62b613502b7e925fd4e4d19b5c96512" +[[package]] +name = "unicode-linebreak" +version = "0.1.5" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "3b09c83c3c29d37506a3e260c08c03743a6bb66a9cd432c6934ab501a190571f" + +[[package]] +name = "unicode-width" +version = "0.1.14" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "7dd6e30e90baa6f72411720665d41d89b9a3d039dc45b8faea1ddd07f617f6af" + +[[package]] +name = "unicode-width" +version = "0.2.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "1fc81956842c57dac11422a97c3b8195a1ff727f06e85c84ed2e8aa277c9a0fd" + [[package]] name = "unicode-xid" version = "0.2.6" diff --git a/packages/axum-rest-tracker-api-server/Cargo.toml b/packages/axum-rest-tracker-api-server/Cargo.toml index 42fe68584..d1491c96e 100644 --- a/packages/axum-rest-tracker-api-server/Cargo.toml +++ b/packages/axum-rest-tracker-api-server/Cargo.toml @@ -37,6 +37,7 @@ torrust-rest-tracker-api-core = { version = "3.0.0-develop", path = "../rest-tra torrust-server-lib = { version = "3.0.0-develop", path = "../server-lib" } torrust-tracker-clock = { version = "3.0.0-develop", path = "../clock" } torrust-tracker-configuration = { version = "3.0.0-develop", path = "../configuration" } +torrust-tracker-metrics = { version = "3.0.0-develop", path = "../metrics" } torrust-tracker-primitives = { version = "3.0.0-develop", path = "../primitives" } torrust-udp-tracker-server = { version = "3.0.0-develop", path = "../udp-tracker-server" } tower = { version = "0", features = ["timeout"] } diff --git a/packages/axum-rest-tracker-api-server/src/v1/context/stats/handlers.rs b/packages/axum-rest-tracker-api-server/src/v1/context/stats/handlers.rs index 484c12ff9..26c812037 100644 --- a/packages/axum-rest-tracker-api-server/src/v1/context/stats/handlers.rs +++ b/packages/axum-rest-tracker-api-server/src/v1/context/stats/handlers.rs @@ -9,9 +9,9 @@ use bittorrent_tracker_core::torrent::repository::in_memory::InMemoryTorrentRepo use bittorrent_udp_tracker_core::services::banning::BanService; use serde::Deserialize; use tokio::sync::RwLock; -use torrust_rest_tracker_api_core::statistics::services::get_metrics; +use torrust_rest_tracker_api_core::statistics::services::{get_labeled_metrics, get_metrics}; -use super::responses::{metrics_response, stats_response}; +use super::responses::{labeled_metrics_response, labeled_stats_response, metrics_response, stats_response}; #[derive(Deserialize, Debug, Default)] #[serde(rename_all = "lowercase")] @@ -28,7 +28,7 @@ pub struct QueryParams { pub format: Option, } -/// It handles the request to get the tracker statistics. +/// It handles the request to get the tracker global metrics. /// /// By default it returns a `200` response with the stats in JSON format. /// @@ -57,3 +57,30 @@ pub async fn get_stats_handler( None => stats_response(metrics), } } + +/// It handles the request to get the tracker extendable metrics. +/// +/// By default it returns a `200` response with the stats in JSON format. +/// +/// You can add the GET parameter `format=prometheus` to get the stats in +/// Prometheus Text Exposition Format. +#[allow(clippy::type_complexity)] +pub async fn get_metrics_handler( + State(state): State<( + Arc, + Arc>, + Arc, + Arc, + )>, + params: Query, +) -> Response { + let metrics = get_labeled_metrics(state.0.clone(), state.1.clone(), state.2.clone(), state.3.clone()).await; + + match params.0.format { + Some(format) => match format { + Format::Json => labeled_stats_response(metrics), + Format::Prometheus => labeled_metrics_response(&metrics), + }, + None => labeled_stats_response(metrics), + } +} diff --git a/packages/axum-rest-tracker-api-server/src/v1/context/stats/resources.rs b/packages/axum-rest-tracker-api-server/src/v1/context/stats/resources.rs index d9480259e..8fcfd1be0 100644 --- a/packages/axum-rest-tracker-api-server/src/v1/context/stats/resources.rs +++ b/packages/axum-rest-tracker-api-server/src/v1/context/stats/resources.rs @@ -1,7 +1,8 @@ //! API resources for the [`stats`](crate::v1::context::stats) //! API context. use serde::{Deserialize, Serialize}; -use torrust_rest_tracker_api_core::statistics::services::TrackerMetrics; +use torrust_rest_tracker_api_core::statistics::services::{TrackerLabeledMetrics, TrackerMetrics}; +use torrust_tracker_metrics::metric_collection::MetricCollection; /// It contains all the statistics generated by the tracker. #[derive(Serialize, Deserialize, Debug, PartialEq, Eq)] @@ -116,6 +117,21 @@ impl From for Stats { } } +/// It contains all the statistics generated by the tracker. +#[derive(Serialize, Debug, PartialEq)] +pub struct LabeledStats { + metrics: MetricCollection, +} + +impl From for LabeledStats { + #[allow(deprecated)] + fn from(metrics: TrackerLabeledMetrics) -> Self { + Self { + metrics: metrics.metrics, + } + } +} + #[cfg(test)] mod tests { use torrust_rest_tracker_api_core::statistics::metrics::Metrics; diff --git a/packages/axum-rest-tracker-api-server/src/v1/context/stats/responses.rs b/packages/axum-rest-tracker-api-server/src/v1/context/stats/responses.rs index 853fdd2e2..e79f7e562 100644 --- a/packages/axum-rest-tracker-api-server/src/v1/context/stats/responses.rs +++ b/packages/axum-rest-tracker-api-server/src/v1/context/stats/responses.rs @@ -1,9 +1,21 @@ //! API responses for the [`stats`](crate::v1::context::stats) //! API context. use axum::response::{IntoResponse, Json, Response}; -use torrust_rest_tracker_api_core::statistics::services::TrackerMetrics; +use torrust_rest_tracker_api_core::statistics::services::{TrackerLabeledMetrics, TrackerMetrics}; +use torrust_tracker_metrics::prometheus::PrometheusSerializable; -use super::resources::Stats; +use super::resources::{LabeledStats, Stats}; + +/// `200` response that contains the [`LabeledStats`] resource as json. +#[must_use] +pub fn labeled_stats_response(tracker_metrics: TrackerLabeledMetrics) -> Response { + Json(LabeledStats::from(tracker_metrics)).into_response() +} + +#[must_use] +pub fn labeled_metrics_response(tracker_metrics: &TrackerLabeledMetrics) -> Response { + tracker_metrics.metrics.to_prometheus().into_response() +} /// `200` response that contains the [`Stats`] resource as json. #[must_use] diff --git a/packages/axum-rest-tracker-api-server/src/v1/context/stats/routes.rs b/packages/axum-rest-tracker-api-server/src/v1/context/stats/routes.rs index e92b5b34d..d516e5ffb 100644 --- a/packages/axum-rest-tracker-api-server/src/v1/context/stats/routes.rs +++ b/packages/axum-rest-tracker-api-server/src/v1/context/stats/routes.rs @@ -9,17 +9,27 @@ use axum::routing::get; use axum::Router; use torrust_rest_tracker_api_core::container::TrackerHttpApiCoreContainer; -use super::handlers::get_stats_handler; +use super::handlers::{get_metrics_handler, get_stats_handler}; /// It adds the routes to the router for the [`stats`](crate::v1::context::stats) API context. pub fn add(prefix: &str, router: Router, http_api_container: &Arc) -> Router { - router.route( - &format!("{prefix}/stats"), - get(get_stats_handler).with_state(( - http_api_container.tracker_core_container.in_memory_torrent_repository.clone(), - http_api_container.ban_service.clone(), - http_api_container.http_stats_repository.clone(), - http_api_container.udp_server_stats_repository.clone(), - )), - ) + router + .route( + &format!("{prefix}/stats"), + get(get_stats_handler).with_state(( + http_api_container.tracker_core_container.in_memory_torrent_repository.clone(), + http_api_container.ban_service.clone(), + http_api_container.http_stats_repository.clone(), + http_api_container.udp_server_stats_repository.clone(), + )), + ) + .route( + &format!("{prefix}/metrics"), + get(get_metrics_handler).with_state(( + http_api_container.tracker_core_container.in_memory_torrent_repository.clone(), + http_api_container.ban_service.clone(), + http_api_container.http_stats_repository.clone(), + http_api_container.udp_server_stats_repository.clone(), + )), + ) } diff --git a/packages/http-tracker-core/Cargo.toml b/packages/http-tracker-core/Cargo.toml index aaf982b04..8bd54a483 100644 --- a/packages/http-tracker-core/Cargo.toml +++ b/packages/http-tracker-core/Cargo.toml @@ -20,17 +20,21 @@ bittorrent-primitives = "0.1.0" bittorrent-tracker-core = { version = "3.0.0-develop", path = "../tracker-core" } criterion = { version = "0.5.1", features = ["async_tokio"] } futures = "0" +serde = "1.0.219" thiserror = "2" tokio = { version = "1", features = ["macros", "net", "rt-multi-thread", "signal", "sync"] } +torrust-tracker-clock = { version = "3.0.0-develop", path = "../clock" } torrust-tracker-configuration = { version = "3.0.0-develop", path = "../configuration" } +torrust-tracker-metrics = { version = "3.0.0-develop", path = "../metrics" } torrust-tracker-primitives = { version = "3.0.0-develop", path = "../primitives" } tracing = "0" [dev-dependencies] +formatjson = "0.3.1" mockall = "0" +serde_json = "1.0.140" torrust-tracker-test-helpers = { version = "3.0.0-develop", path = "../test-helpers" } [[bench]] harness = false name = "http_tracker_core_benchmark" - diff --git a/packages/http-tracker-core/src/event/mod.rs b/packages/http-tracker-core/src/event/mod.rs index 7caf8a596..d235c179f 100644 --- a/packages/http-tracker-core/src/event/mod.rs +++ b/packages/http-tracker-core/src/event/mod.rs @@ -1,5 +1,6 @@ use std::net::{IpAddr, SocketAddr}; +use torrust_tracker_metrics::label::{LabelName, LabelSet, LabelValue}; use torrust_tracker_primitives::service_binding::ServiceBinding; pub mod sender; @@ -59,3 +60,22 @@ pub struct ClientConnectionContext { pub struct ServerConnectionContext { service_binding: ServiceBinding, } + +impl From for LabelSet { + fn from(connection_context: ConnectionContext) -> Self { + LabelSet::from([ + ( + LabelName::new("server_binding_protocol"), + LabelValue::new(&connection_context.server.service_binding.protocol().to_string()), + ), + ( + LabelName::new("server_binding_ip"), + LabelValue::new(&connection_context.server.service_binding.bind_address().ip().to_string()), + ), + ( + LabelName::new("server_binding_port"), + LabelValue::new(&connection_context.server.service_binding.bind_address().port().to_string()), + ), + ]) + } +} diff --git a/packages/http-tracker-core/src/lib.rs b/packages/http-tracker-core/src/lib.rs index 0b0b3ba78..cdb26ca89 100644 --- a/packages/http-tracker-core/src/lib.rs +++ b/packages/http-tracker-core/src/lib.rs @@ -1,8 +1,21 @@ +use torrust_tracker_clock::clock; + pub mod container; pub mod event; pub mod services; pub mod statistics; +/// This code needs to be copied into each crate. +/// Working version, for production. +#[cfg(not(test))] +#[allow(dead_code)] +pub(crate) type CurrentClock = clock::Working; + +/// Stopped version, for testing. +#[cfg(test)] +#[allow(dead_code)] +pub(crate) type CurrentClock = clock::Stopped; + #[cfg(test)] pub(crate) mod tests { use bittorrent_primitives::info_hash::InfoHash; diff --git a/packages/http-tracker-core/src/statistics/event/handler.rs b/packages/http-tracker-core/src/statistics/event/handler.rs index 0df1c41d3..046cb7775 100644 --- a/packages/http-tracker-core/src/statistics/event/handler.rs +++ b/packages/http-tracker-core/src/statistics/event/handler.rs @@ -1,5 +1,9 @@ use std::net::IpAddr; +use torrust_tracker_metrics::label::LabelSet; +use torrust_tracker_metrics::metric::MetricName; +use torrust_tracker_primitives::DurationSinceUnixEpoch; + use crate::event::Event; use crate::statistics::repository::Repository; @@ -7,24 +11,52 @@ use crate::statistics::repository::Repository; /// /// This function panics if the client IP address is not the same as the IP /// version of the event. -pub async fn handle_event(event: Event, stats_repository: &Repository) { +pub async fn handle_event(event: Event, stats_repository: &Repository, now: DurationSinceUnixEpoch) { match event { - Event::TcpAnnounce { connection } => match connection.client_ip_addr() { - IpAddr::V4(_) => { - stats_repository.increase_tcp4_announces().await; - } - IpAddr::V6(_) => { - stats_repository.increase_tcp6_announces().await; - } - }, - Event::TcpScrape { connection } => match connection.client_ip_addr() { - IpAddr::V4(_) => { - stats_repository.increase_tcp4_scrapes().await; + Event::TcpAnnounce { connection } => { + // Global fixed metrics + + match connection.client_ip_addr() { + IpAddr::V4(_) => { + stats_repository.increase_tcp4_announces().await; + } + IpAddr::V6(_) => { + stats_repository.increase_tcp6_announces().await; + } } - IpAddr::V6(_) => { - stats_repository.increase_tcp6_scrapes().await; + + // Extendable metrics + + stats_repository + .increase_counter( + &MetricName::new("http_tracker_core_announce_requests_received_total"), + &LabelSet::from(connection), + now, + ) + .await; + } + Event::TcpScrape { connection } => { + // Global fixed metrics + + match connection.client_ip_addr() { + IpAddr::V4(_) => { + stats_repository.increase_tcp4_scrapes().await; + } + IpAddr::V6(_) => { + stats_repository.increase_tcp6_scrapes().await; + } } - }, + + // Extendable metrics + + stats_repository + .increase_counter( + &MetricName::new("http_tracker_core_scrape_requests_received_total"), + &LabelSet::from(connection), + now, + ) + .await; + } } tracing::debug!("stats: {:?}", stats_repository.get_stats().await); @@ -34,11 +66,13 @@ pub async fn handle_event(event: Event, stats_repository: &Repository) { mod tests { use std::net::{IpAddr, Ipv4Addr, Ipv6Addr, SocketAddr}; + use torrust_tracker_clock::clock::Time; use torrust_tracker_primitives::service_binding::{Protocol, ServiceBinding}; use crate::event::{ConnectionContext, Event}; use crate::statistics::event::handler::handle_event; use crate::statistics::repository::Repository; + use crate::CurrentClock; #[tokio::test] async fn should_increase_the_tcp4_announces_counter_when_it_receives_a_tcp4_announce_event() { @@ -53,6 +87,7 @@ mod tests { ), }, &stats_repository, + CurrentClock::now(), ) .await; @@ -74,6 +109,7 @@ mod tests { ), }, &stats_repository, + CurrentClock::now(), ) .await; @@ -95,6 +131,7 @@ mod tests { ), }, &stats_repository, + CurrentClock::now(), ) .await; @@ -116,6 +153,7 @@ mod tests { ), }, &stats_repository, + CurrentClock::now(), ) .await; diff --git a/packages/http-tracker-core/src/statistics/event/listener.rs b/packages/http-tracker-core/src/statistics/event/listener.rs index a03a56a21..ca53a20bb 100644 --- a/packages/http-tracker-core/src/statistics/event/listener.rs +++ b/packages/http-tracker-core/src/statistics/event/listener.rs @@ -1,13 +1,15 @@ use tokio::sync::broadcast; +use torrust_tracker_clock::clock::Time; use super::handler::handle_event; use crate::event::Event; use crate::statistics::repository::Repository; +use crate::CurrentClock; pub async fn dispatch_events(mut receiver: broadcast::Receiver, stats_repository: Repository) { loop { match receiver.recv().await { - Ok(event) => handle_event(event, &stats_repository).await, + Ok(event) => handle_event(event, &stats_repository, CurrentClock::now()).await, Err(e) => { tracing::error!("Error receiving http tracker core event: {:?}", e); break; diff --git a/packages/http-tracker-core/src/statistics/metrics.rs b/packages/http-tracker-core/src/statistics/metrics.rs index 6c102770b..0b442c1cb 100644 --- a/packages/http-tracker-core/src/statistics/metrics.rs +++ b/packages/http-tracker-core/src/statistics/metrics.rs @@ -1,12 +1,11 @@ +use serde::Serialize; +use torrust_tracker_metrics::label::LabelSet; +use torrust_tracker_metrics::metric::MetricName; +use torrust_tracker_metrics::metric_collection::MetricCollection; +use torrust_tracker_primitives::DurationSinceUnixEpoch; + /// Metrics collected by the tracker. -/// -/// - Number of connections handled -/// - Number of `announce` requests handled -/// - Number of `scrape` request handled -/// -/// These metrics are collected for each connection type: UDP and HTTP -/// and also for each IP version used by the peers: IPv4 and IPv6. -#[derive(Debug, PartialEq, Default)] +#[derive(Debug, Clone, PartialEq, Default, Serialize)] pub struct Metrics { /// Total number of TCP (HTTP tracker) `announce` requests from IPv4 peers. pub tcp4_announces_handled: u64, @@ -19,4 +18,17 @@ pub struct Metrics { /// Total number of TCP (HTTP tracker) `scrape` requests from IPv6 peers. pub tcp6_scrapes_handled: u64, + + /// A collection of metrics. + pub metric_collection: MetricCollection, +} + +impl Metrics { + pub fn increase_counter(&mut self, metric_name: &MetricName, labels: &LabelSet, now: DurationSinceUnixEpoch) { + self.metric_collection.increase_counter(metric_name, labels, now); + } + + pub fn set_gauge(&mut self, metric_name: &MetricName, labels: &LabelSet, value: f64, now: DurationSinceUnixEpoch) { + self.metric_collection.set_gauge(metric_name, labels, value, now); + } } diff --git a/packages/http-tracker-core/src/statistics/mod.rs b/packages/http-tracker-core/src/statistics/mod.rs index 939a41061..dd365495d 100644 --- a/packages/http-tracker-core/src/statistics/mod.rs +++ b/packages/http-tracker-core/src/statistics/mod.rs @@ -4,3 +4,27 @@ pub mod metrics; pub mod repository; pub mod services; pub mod setup; + +use metrics::Metrics; +use torrust_tracker_metrics::metric::description::MetricDescription; +use torrust_tracker_metrics::metric::MetricName; +use torrust_tracker_metrics::unit::Unit; + +#[must_use] +pub fn describe_metrics() -> Metrics { + let mut metrics = Metrics::default(); + + metrics.metric_collection.describe_counter( + &MetricName::new("http_tracker_core_announce_requests_received_total"), + Some(Unit::Count), + Some(MetricDescription::new("Total number of announce requests received")), + ); + + metrics.metric_collection.describe_counter( + &MetricName::new("http_tracker_core_scrape_requests_received_total"), + Some(Unit::Count), + Some(MetricDescription::new("Total number of scrape requests received")), + ); + + metrics +} diff --git a/packages/http-tracker-core/src/statistics/repository.rs b/packages/http-tracker-core/src/statistics/repository.rs index 5e15fc298..88345722b 100644 --- a/packages/http-tracker-core/src/statistics/repository.rs +++ b/packages/http-tracker-core/src/statistics/repository.rs @@ -1,7 +1,11 @@ use std::sync::Arc; use tokio::sync::{RwLock, RwLockReadGuard}; +use torrust_tracker_metrics::label::LabelSet; +use torrust_tracker_metrics::metric::MetricName; +use torrust_tracker_primitives::DurationSinceUnixEpoch; +use super::describe_metrics; use super::metrics::Metrics; /// A repository for the tracker metrics. @@ -19,9 +23,9 @@ impl Default for Repository { impl Repository { #[must_use] pub fn new() -> Self { - Self { - stats: Arc::new(RwLock::new(Metrics::default())), - } + let stats = Arc::new(RwLock::new(describe_metrics())); + + Self { stats } } pub async fn get_stats(&self) -> RwLockReadGuard<'_, Metrics> { @@ -51,4 +55,10 @@ impl Repository { stats_lock.tcp6_scrapes_handled += 1; drop(stats_lock); } + + pub async fn increase_counter(&self, metric_name: &MetricName, labels: &LabelSet, now: DurationSinceUnixEpoch) { + let mut stats_lock = self.stats.write().await; + stats_lock.increase_counter(metric_name, labels, now); + drop(stats_lock); + } } diff --git a/packages/http-tracker-core/src/statistics/services.rs b/packages/http-tracker-core/src/statistics/services.rs index dce7098b9..418b0d082 100644 --- a/packages/http-tracker-core/src/statistics/services.rs +++ b/packages/http-tracker-core/src/statistics/services.rs @@ -59,6 +59,8 @@ pub async fn get_metrics( // TCPv6 tcp6_announces_handled: stats.tcp6_announces_handled, tcp6_scrapes_handled: stats.tcp6_scrapes_handled, + // Samples + metric_collection: stats.metric_collection.clone(), }, } } @@ -73,8 +75,8 @@ mod tests { use torrust_tracker_primitives::swarm_metadata::AggregateSwarmMetadata; use torrust_tracker_test_helpers::configuration; - use crate::statistics; use crate::statistics::services::{get_metrics, TrackerMetrics}; + use crate::statistics::{self, describe_metrics}; pub fn tracker_configuration() -> Configuration { configuration::ephemeral() @@ -95,7 +97,7 @@ mod tests { tracker_metrics, TrackerMetrics { torrents_metrics: AggregateSwarmMetadata::default(), - protocol_metrics: statistics::metrics::Metrics::default(), + protocol_metrics: describe_metrics(), } ); } diff --git a/packages/rest-tracker-api-core/Cargo.toml b/packages/rest-tracker-api-core/Cargo.toml index d9ccb5d3f..0077572fb 100644 --- a/packages/rest-tracker-api-core/Cargo.toml +++ b/packages/rest-tracker-api-core/Cargo.toml @@ -19,6 +19,7 @@ bittorrent-tracker-core = { version = "3.0.0-develop", path = "../tracker-core" bittorrent-udp-tracker-core = { version = "3.0.0-develop", path = "../udp-tracker-core" } tokio = { version = "1", features = ["macros", "net", "rt-multi-thread", "signal", "sync"] } torrust-tracker-configuration = { version = "3.0.0-develop", path = "../configuration" } +torrust-tracker-metrics = { version = "3.0.0-develop", path = "../metrics" } torrust-tracker-primitives = { version = "3.0.0-develop", path = "../primitives" } torrust-udp-tracker-server = { version = "3.0.0-develop", path = "../udp-tracker-server" } diff --git a/packages/rest-tracker-api-core/src/statistics/services.rs b/packages/rest-tracker-api-core/src/statistics/services.rs index c40f7c82e..d18f6598d 100644 --- a/packages/rest-tracker-api-core/src/statistics/services.rs +++ b/packages/rest-tracker-api-core/src/statistics/services.rs @@ -4,6 +4,7 @@ use bittorrent_tracker_core::torrent::repository::in_memory::InMemoryTorrentRepo use bittorrent_udp_tracker_core::services::banning::BanService; use bittorrent_udp_tracker_core::{self}; use tokio::sync::RwLock; +use torrust_tracker_metrics::metric_collection::MetricCollection; use torrust_tracker_primitives::swarm_metadata::AggregateSwarmMetadata; use torrust_udp_tracker_server::statistics as udp_server_statistics; @@ -77,6 +78,30 @@ pub async fn get_metrics( } } +#[derive(Debug, PartialEq)] +pub struct TrackerLabeledMetrics { + pub metrics: MetricCollection, +} + +/// It returns all the [`TrackerLabeledMetrics`] +#[allow(deprecated)] +pub async fn get_labeled_metrics( + in_memory_torrent_repository: Arc, + ban_service: Arc>, + http_stats_repository: Arc, + udp_server_stats_repository: Arc, +) -> TrackerLabeledMetrics { + let _torrents_metrics = in_memory_torrent_repository.get_torrents_metrics(); + let _udp_banned_ips_total = ban_service.read().await.get_banned_ips_total(); + let _udp_server_stats = udp_server_stats_repository.get_stats().await; + + let http_stats = http_stats_repository.get_stats().await; + + TrackerLabeledMetrics { + metrics: http_stats.metric_collection.clone(), + } +} + #[cfg(test)] mod tests { use std::sync::Arc; From 9d0933712f35d7555d7bdfe2e92e1607b2cbff92 Mon Sep 17 00:00:00 2001 From: Jose Celano Date: Wed, 9 Apr 2025 13:01:58 +0100 Subject: [PATCH 439/802] feat: [#1403] add extendable-labeled metrics to udp-tracker-core --- Cargo.lock | 3 + packages/http-tracker-core/src/lib.rs | 4 +- .../http-tracker-core/src/statistics/mod.rs | 4 +- packages/udp-tracker-core/Cargo.toml | 4 +- packages/udp-tracker-core/src/event/mod.rs | 20 ++++ packages/udp-tracker-core/src/lib.rs | 13 +++ .../src/statistics/event/handler.rs | 98 ++++++++++++++----- .../src/statistics/event/listener.rs | 4 +- .../src/statistics/metrics.rs | 21 +++- .../udp-tracker-core/src/statistics/mod.rs | 30 ++++++ .../src/statistics/repository.rs | 12 ++- .../src/statistics/services.rs | 6 +- 12 files changed, 187 insertions(+), 32 deletions(-) diff --git a/Cargo.lock b/Cargo.lock index fdba742dc..700781fcf 100644 --- a/Cargo.lock +++ b/Cargo.lock @@ -696,9 +696,12 @@ dependencies = [ "lazy_static", "mockall", "rand 0.9.0", + "serde", "thiserror 2.0.12", "tokio", + "torrust-tracker-clock", "torrust-tracker-configuration", + "torrust-tracker-metrics", "torrust-tracker-primitives", "torrust-tracker-test-helpers", "tracing", diff --git a/packages/http-tracker-core/src/lib.rs b/packages/http-tracker-core/src/lib.rs index cdb26ca89..2260242e0 100644 --- a/packages/http-tracker-core/src/lib.rs +++ b/packages/http-tracker-core/src/lib.rs @@ -1,10 +1,10 @@ -use torrust_tracker_clock::clock; - pub mod container; pub mod event; pub mod services; pub mod statistics; +use torrust_tracker_clock::clock; + /// This code needs to be copied into each crate. /// Working version, for production. #[cfg(not(test))] diff --git a/packages/http-tracker-core/src/statistics/mod.rs b/packages/http-tracker-core/src/statistics/mod.rs index dd365495d..8148df3c1 100644 --- a/packages/http-tracker-core/src/statistics/mod.rs +++ b/packages/http-tracker-core/src/statistics/mod.rs @@ -17,13 +17,13 @@ pub fn describe_metrics() -> Metrics { metrics.metric_collection.describe_counter( &MetricName::new("http_tracker_core_announce_requests_received_total"), Some(Unit::Count), - Some(MetricDescription::new("Total number of announce requests received")), + Some(MetricDescription::new("Total number of HTTP announce requests received")), ); metrics.metric_collection.describe_counter( &MetricName::new("http_tracker_core_scrape_requests_received_total"), Some(Unit::Count), - Some(MetricDescription::new("Total number of scrape requests received")), + Some(MetricDescription::new("Total number of HTTP scrape requests received")), ); metrics diff --git a/packages/udp-tracker-core/Cargo.toml b/packages/udp-tracker-core/Cargo.toml index 88bab51c1..0354777db 100644 --- a/packages/udp-tracker-core/Cargo.toml +++ b/packages/udp-tracker-core/Cargo.toml @@ -25,9 +25,12 @@ criterion = { version = "0.5.1", features = ["async_tokio"] } futures = "0" lazy_static = "1" rand = "0" +serde = "1.0.219" thiserror = "2" tokio = { version = "1", features = ["macros", "net", "rt-multi-thread", "signal", "sync", "time"] } +torrust-tracker-clock = { version = "3.0.0-develop", path = "../clock" } torrust-tracker-configuration = { version = "3.0.0-develop", path = "../configuration" } +torrust-tracker-metrics = { version = "3.0.0-develop", path = "../metrics" } torrust-tracker-primitives = { version = "3.0.0-develop", path = "../primitives" } tracing = "0" zerocopy = "0.7" @@ -39,4 +42,3 @@ torrust-tracker-test-helpers = { version = "3.0.0-develop", path = "../test-help [[bench]] harness = false name = "udp_tracker_core_benchmark" - diff --git a/packages/udp-tracker-core/src/event/mod.rs b/packages/udp-tracker-core/src/event/mod.rs index e25f557e2..6cb43e5a1 100644 --- a/packages/udp-tracker-core/src/event/mod.rs +++ b/packages/udp-tracker-core/src/event/mod.rs @@ -1,5 +1,6 @@ use std::net::SocketAddr; +use torrust_tracker_metrics::label::{LabelName, LabelSet, LabelValue}; use torrust_tracker_primitives::service_binding::ServiceBinding; pub mod sender; @@ -37,3 +38,22 @@ impl ConnectionContext { self.server_service_binding.bind_address() } } + +impl From for LabelSet { + fn from(connection_context: ConnectionContext) -> Self { + LabelSet::from([ + ( + LabelName::new("server_binding_protocol"), + LabelValue::new(&connection_context.server_service_binding.protocol().to_string()), + ), + ( + LabelName::new("server_binding_ip"), + LabelValue::new(&connection_context.server_service_binding.bind_address().ip().to_string()), + ), + ( + LabelName::new("server_binding_port"), + LabelValue::new(&connection_context.server_service_binding.bind_address().port().to_string()), + ), + ]) + } +} diff --git a/packages/udp-tracker-core/src/lib.rs b/packages/udp-tracker-core/src/lib.rs index 94ce93068..8e937e79c 100644 --- a/packages/udp-tracker-core/src/lib.rs +++ b/packages/udp-tracker-core/src/lib.rs @@ -5,6 +5,19 @@ pub mod event; pub mod services; pub mod statistics; +use torrust_tracker_clock::clock; + +/// This code needs to be copied into each crate. +/// Working version, for production. +#[cfg(not(test))] +#[allow(dead_code)] +pub(crate) type CurrentClock = clock::Working; + +/// Stopped version, for testing. +#[cfg(test)] +#[allow(dead_code)] +pub(crate) type CurrentClock = clock::Stopped; + use crypto::ephemeral_instance_keys; use tracing::instrument; diff --git a/packages/udp-tracker-core/src/statistics/event/handler.rs b/packages/udp-tracker-core/src/statistics/event/handler.rs index 3968ca4e7..a910d9373 100644 --- a/packages/udp-tracker-core/src/statistics/event/handler.rs +++ b/packages/udp-tracker-core/src/statistics/event/handler.rs @@ -1,35 +1,81 @@ +use torrust_tracker_metrics::label::LabelSet; +use torrust_tracker_metrics::metric::MetricName; +use torrust_tracker_primitives::DurationSinceUnixEpoch; + use crate::event::Event; use crate::statistics::repository::Repository; /// # Panics /// /// This function panics if the IP version does not match the event type. -pub async fn handle_event(event: Event, stats_repository: &Repository) { +pub async fn handle_event(event: Event, stats_repository: &Repository, now: DurationSinceUnixEpoch) { match event { - Event::UdpConnect { context } => match context.client_socket_addr.ip() { - std::net::IpAddr::V4(_) => { - stats_repository.increase_udp4_connections().await; - } - std::net::IpAddr::V6(_) => { - stats_repository.increase_udp6_connections().await; - } - }, - Event::UdpAnnounce { context } => match context.client_socket_addr.ip() { - std::net::IpAddr::V4(_) => { - stats_repository.increase_udp4_announces().await; - } - std::net::IpAddr::V6(_) => { - stats_repository.increase_udp6_announces().await; + Event::UdpConnect { context } => { + // Global fixed metrics + + match context.client_socket_addr.ip() { + std::net::IpAddr::V4(_) => { + stats_repository.increase_udp4_connections().await; + } + std::net::IpAddr::V6(_) => { + stats_repository.increase_udp6_connections().await; + } } - }, - Event::UdpScrape { context } => match context.client_socket_addr.ip() { - std::net::IpAddr::V4(_) => { - stats_repository.increase_udp4_scrapes().await; + + // Extendable metrics + + stats_repository + .increase_counter( + &MetricName::new("udp_tracker_core_connect_requests_received_total"), + &LabelSet::from(context), + now, + ) + .await; + } + Event::UdpAnnounce { context } => { + // Global fixed metrics + + match context.client_socket_addr.ip() { + std::net::IpAddr::V4(_) => { + stats_repository.increase_udp4_announces().await; + } + std::net::IpAddr::V6(_) => { + stats_repository.increase_udp6_announces().await; + } } - std::net::IpAddr::V6(_) => { - stats_repository.increase_udp6_scrapes().await; + + // Extendable metrics + + stats_repository + .increase_counter( + &MetricName::new("udp_tracker_core_announce_requests_received_total"), + &LabelSet::from(context), + now, + ) + .await; + } + Event::UdpScrape { context } => { + // Global fixed metrics + + match context.client_socket_addr.ip() { + std::net::IpAddr::V4(_) => { + stats_repository.increase_udp4_scrapes().await; + } + std::net::IpAddr::V6(_) => { + stats_repository.increase_udp6_scrapes().await; + } } - }, + + // Extendable metrics + + stats_repository + .increase_counter( + &MetricName::new("udp_tracker_core_scrape_requests_received_total"), + &LabelSet::from(context), + now, + ) + .await; + } } tracing::debug!("stats: {:?}", stats_repository.get_stats().await); @@ -39,11 +85,13 @@ pub async fn handle_event(event: Event, stats_repository: &Repository) { mod tests { use std::net::{IpAddr, Ipv4Addr, Ipv6Addr, SocketAddr}; + use torrust_tracker_clock::clock::Time; use torrust_tracker_primitives::service_binding::{Protocol, ServiceBinding}; use crate::event::{ConnectionContext, Event}; use crate::statistics::event::handler::handle_event; use crate::statistics::repository::Repository; + use crate::CurrentClock; #[tokio::test] async fn should_increase_the_udp4_connections_counter_when_it_receives_a_udp4_connect_event() { @@ -61,6 +109,7 @@ mod tests { ), }, &stats_repository, + CurrentClock::now(), ) .await; @@ -85,6 +134,7 @@ mod tests { ), }, &stats_repository, + CurrentClock::now(), ) .await; @@ -109,6 +159,7 @@ mod tests { ), }, &stats_repository, + CurrentClock::now(), ) .await; @@ -133,6 +184,7 @@ mod tests { ), }, &stats_repository, + CurrentClock::now(), ) .await; @@ -157,6 +209,7 @@ mod tests { ), }, &stats_repository, + CurrentClock::now(), ) .await; @@ -181,6 +234,7 @@ mod tests { ), }, &stats_repository, + CurrentClock::now(), ) .await; diff --git a/packages/udp-tracker-core/src/statistics/event/listener.rs b/packages/udp-tracker-core/src/statistics/event/listener.rs index f3afafc4f..8fc82fbcb 100644 --- a/packages/udp-tracker-core/src/statistics/event/listener.rs +++ b/packages/udp-tracker-core/src/statistics/event/listener.rs @@ -1,13 +1,15 @@ use tokio::sync::broadcast; +use torrust_tracker_clock::clock::Time; use super::handler::handle_event; use crate::event::Event; use crate::statistics::repository::Repository; +use crate::CurrentClock; pub async fn dispatch_events(mut receiver: broadcast::Receiver, stats_repository: Repository) { loop { match receiver.recv().await { - Ok(event) => handle_event(event, &stats_repository).await, + Ok(event) => handle_event(event, &stats_repository, CurrentClock::now()).await, Err(e) => { tracing::error!("Error receiving udp tracker core event: {:?}", e); break; diff --git a/packages/udp-tracker-core/src/statistics/metrics.rs b/packages/udp-tracker-core/src/statistics/metrics.rs index 1b3805288..23cec8036 100644 --- a/packages/udp-tracker-core/src/statistics/metrics.rs +++ b/packages/udp-tracker-core/src/statistics/metrics.rs @@ -1,3 +1,9 @@ +use serde::Serialize; +use torrust_tracker_metrics::label::LabelSet; +use torrust_tracker_metrics::metric::MetricName; +use torrust_tracker_metrics::metric_collection::MetricCollection; +use torrust_tracker_primitives::DurationSinceUnixEpoch; + /// Metrics collected by the tracker. /// /// - Number of connections handled @@ -6,7 +12,7 @@ /// /// These metrics are collected for each connection type: UDP and HTTP /// and also for each IP version used by the peers: IPv4 and IPv6. -#[derive(Debug, PartialEq, Default)] +#[derive(Debug, PartialEq, Default, Serialize)] pub struct Metrics { /// Total number of UDP (UDP tracker) connections from IPv4 peers. pub udp4_connections_handled: u64, @@ -25,4 +31,17 @@ pub struct Metrics { /// Total number of UDP (UDP tracker) `scrape` requests from IPv6 peers. pub udp6_scrapes_handled: u64, + + /// A collection of metrics. + pub metric_collection: MetricCollection, +} + +impl Metrics { + pub fn increase_counter(&mut self, metric_name: &MetricName, labels: &LabelSet, now: DurationSinceUnixEpoch) { + self.metric_collection.increase_counter(metric_name, labels, now); + } + + pub fn set_gauge(&mut self, metric_name: &MetricName, labels: &LabelSet, value: f64, now: DurationSinceUnixEpoch) { + self.metric_collection.set_gauge(metric_name, labels, value, now); + } } diff --git a/packages/udp-tracker-core/src/statistics/mod.rs b/packages/udp-tracker-core/src/statistics/mod.rs index 939a41061..cdba76df3 100644 --- a/packages/udp-tracker-core/src/statistics/mod.rs +++ b/packages/udp-tracker-core/src/statistics/mod.rs @@ -4,3 +4,33 @@ pub mod metrics; pub mod repository; pub mod services; pub mod setup; + +use metrics::Metrics; +use torrust_tracker_metrics::metric::description::MetricDescription; +use torrust_tracker_metrics::metric::MetricName; +use torrust_tracker_metrics::unit::Unit; + +#[must_use] +pub fn describe_metrics() -> Metrics { + let mut metrics = Metrics::default(); + + metrics.metric_collection.describe_counter( + &MetricName::new("udp_tracker_core_connect_requests_received_total"), + Some(Unit::Count), + Some(MetricDescription::new("Total number of UDP connect requests received")), + ); + + metrics.metric_collection.describe_counter( + &MetricName::new("udp_tracker_core_announce_requests_received_total"), + Some(Unit::Count), + Some(MetricDescription::new("Total number of UDP announce requests received")), + ); + + metrics.metric_collection.describe_counter( + &MetricName::new("udp_tracker_core_scrape_requests_received_total"), + Some(Unit::Count), + Some(MetricDescription::new("Total number of UDP scrape requests received")), + ); + + metrics +} diff --git a/packages/udp-tracker-core/src/statistics/repository.rs b/packages/udp-tracker-core/src/statistics/repository.rs index f7609e5c2..49c91c751 100644 --- a/packages/udp-tracker-core/src/statistics/repository.rs +++ b/packages/udp-tracker-core/src/statistics/repository.rs @@ -1,7 +1,11 @@ use std::sync::Arc; use tokio::sync::{RwLock, RwLockReadGuard}; +use torrust_tracker_metrics::label::LabelSet; +use torrust_tracker_metrics::metric::MetricName; +use torrust_tracker_primitives::DurationSinceUnixEpoch; +use super::describe_metrics; use super::metrics::Metrics; /// A repository for the tracker metrics. @@ -20,7 +24,7 @@ impl Repository { #[must_use] pub fn new() -> Self { Self { - stats: Arc::new(RwLock::new(Metrics::default())), + stats: Arc::new(RwLock::new(describe_metrics())), } } @@ -63,4 +67,10 @@ impl Repository { stats_lock.udp6_scrapes_handled += 1; drop(stats_lock); } + + pub async fn increase_counter(&self, metric_name: &MetricName, labels: &LabelSet, now: DurationSinceUnixEpoch) { + let mut stats_lock = self.stats.write().await; + stats_lock.increase_counter(metric_name, labels, now); + drop(stats_lock); + } } diff --git a/packages/udp-tracker-core/src/statistics/services.rs b/packages/udp-tracker-core/src/statistics/services.rs index d3c1d4710..7dbbfc947 100644 --- a/packages/udp-tracker-core/src/statistics/services.rs +++ b/packages/udp-tracker-core/src/statistics/services.rs @@ -77,6 +77,8 @@ pub async fn get_metrics( udp6_connections_handled: stats.udp6_connections_handled, udp6_announces_handled: stats.udp6_announces_handled, udp6_scrapes_handled: stats.udp6_scrapes_handled, + // Samples + metric_collection: stats.metric_collection.clone(), }, } } @@ -91,7 +93,7 @@ mod tests { use torrust_tracker_primitives::swarm_metadata::AggregateSwarmMetadata; use torrust_tracker_test_helpers::configuration; - use crate::statistics; + use crate::statistics::describe_metrics; use crate::statistics::services::{get_metrics, TrackerMetrics}; pub fn tracker_configuration() -> Configuration { @@ -114,7 +116,7 @@ mod tests { tracker_metrics, TrackerMetrics { torrents_metrics: AggregateSwarmMetadata::default(), - protocol_metrics: statistics::metrics::Metrics::default(), + protocol_metrics: describe_metrics(), } ); } From 3f51afcd49c67fe1816abada3293d6b1808ef74e Mon Sep 17 00:00:00 2001 From: Jose Celano Date: Wed, 9 Apr 2025 13:46:48 +0100 Subject: [PATCH 440/802] feat: [#1403] expose udp-tracker-core metrics expose in REST API --- .../src/v1/context/stats/handlers.rs | 10 ++++++++- .../src/v1/context/stats/routes.rs | 1 + .../src/statistics/services.rs | 22 +++++++++++++++---- 3 files changed, 28 insertions(+), 5 deletions(-) diff --git a/packages/axum-rest-tracker-api-server/src/v1/context/stats/handlers.rs b/packages/axum-rest-tracker-api-server/src/v1/context/stats/handlers.rs index 26c812037..17d3e4f2d 100644 --- a/packages/axum-rest-tracker-api-server/src/v1/context/stats/handlers.rs +++ b/packages/axum-rest-tracker-api-server/src/v1/context/stats/handlers.rs @@ -70,11 +70,19 @@ pub async fn get_metrics_handler( Arc, Arc>, Arc, + Arc, Arc, )>, params: Query, ) -> Response { - let metrics = get_labeled_metrics(state.0.clone(), state.1.clone(), state.2.clone(), state.3.clone()).await; + let metrics = get_labeled_metrics( + state.0.clone(), + state.1.clone(), + state.2.clone(), + state.3.clone(), + state.4.clone(), + ) + .await; match params.0.format { Some(format) => match format { diff --git a/packages/axum-rest-tracker-api-server/src/v1/context/stats/routes.rs b/packages/axum-rest-tracker-api-server/src/v1/context/stats/routes.rs index d516e5ffb..c19f08b2a 100644 --- a/packages/axum-rest-tracker-api-server/src/v1/context/stats/routes.rs +++ b/packages/axum-rest-tracker-api-server/src/v1/context/stats/routes.rs @@ -29,6 +29,7 @@ pub fn add(prefix: &str, router: Router, http_api_container: &Arc, ban_service: Arc>, http_stats_repository: Arc, + udp_stats_repository: Arc, udp_server_stats_repository: Arc, ) -> TrackerLabeledMetrics { let _torrents_metrics = in_memory_torrent_repository.get_torrents_metrics(); @@ -96,10 +102,18 @@ pub async fn get_labeled_metrics( let _udp_server_stats = udp_server_stats_repository.get_stats().await; let http_stats = http_stats_repository.get_stats().await; - - TrackerLabeledMetrics { - metrics: http_stats.metric_collection.clone(), - } + let udp_stats_repository = udp_stats_repository.get_stats().await; + + // Merge the metrics from the HTTP and UDP metrics + let mut metrics = MetricCollection::default(); + metrics + .merge(&http_stats.metric_collection) + .expect("msg: failed to merge HTTP core metrics"); + metrics + .merge(&udp_stats_repository.metric_collection) + .expect("failed to merge UDP core metrics"); + + TrackerLabeledMetrics { metrics } } #[cfg(test)] From 786f6f0cf646bf0d435d99403bc0fdff4b6f8d7f Mon Sep 17 00:00:00 2001 From: Jose Celano Date: Wed, 9 Apr 2025 16:28:59 +0100 Subject: [PATCH 441/802] feat: [#1403] add extendable-labeled metrics to udp-tracker-server --- Cargo.lock | 2 + packages/metrics/src/label/value.rs | 6 + .../src/statistics/services.rs | 2 +- packages/udp-tracker-server/Cargo.toml | 2 + packages/udp-tracker-server/src/event/mod.rs | 32 +++ .../src/statistics/event/handler.rs | 222 ++++++++++++++---- .../src/statistics/event/listener.rs | 4 +- .../src/statistics/metrics.rs | 21 +- .../udp-tracker-server/src/statistics/mod.rs | 72 ++++++ .../src/statistics/repository.rs | 30 ++- .../src/statistics/services.rs | 6 +- 11 files changed, 344 insertions(+), 55 deletions(-) diff --git a/Cargo.lock b/Cargo.lock index 700781fcf..5feea957d 100644 --- a/Cargo.lock +++ b/Cargo.lock @@ -4844,12 +4844,14 @@ dependencies = [ "mockall", "rand 0.9.0", "ringbuf", + "serde", "thiserror 2.0.12", "tokio", "torrust-server-lib", "torrust-tracker-clock", "torrust-tracker-configuration", "torrust-tracker-located-error", + "torrust-tracker-metrics", "torrust-tracker-primitives", "torrust-tracker-test-helpers", "tracing", diff --git a/packages/metrics/src/label/value.rs b/packages/metrics/src/label/value.rs index ce657250c..528a0e2ab 100644 --- a/packages/metrics/src/label/value.rs +++ b/packages/metrics/src/label/value.rs @@ -11,6 +11,12 @@ impl LabelValue { pub fn new(value: &str) -> Self { Self(value.to_owned()) } + + /// Empty label values are ignored in Prometheus. + #[must_use] + pub fn ignore() -> Self { + Self(String::default()) + } } impl PrometheusSerializable for LabelValue { diff --git a/packages/udp-tracker-core/src/statistics/services.rs b/packages/udp-tracker-core/src/statistics/services.rs index 7dbbfc947..d9b016b0d 100644 --- a/packages/udp-tracker-core/src/statistics/services.rs +++ b/packages/udp-tracker-core/src/statistics/services.rs @@ -77,7 +77,7 @@ pub async fn get_metrics( udp6_connections_handled: stats.udp6_connections_handled, udp6_announces_handled: stats.udp6_announces_handled, udp6_scrapes_handled: stats.udp6_scrapes_handled, - // Samples + // Extendable metrics metric_collection: stats.metric_collection.clone(), }, } diff --git a/packages/udp-tracker-server/Cargo.toml b/packages/udp-tracker-server/Cargo.toml index f8fcd2def..23719d141 100644 --- a/packages/udp-tracker-server/Cargo.toml +++ b/packages/udp-tracker-server/Cargo.toml @@ -23,12 +23,14 @@ derive_more = { version = "2", features = ["as_ref", "constructor", "from"] } futures = "0" futures-util = "0" ringbuf = "0" +serde = "1.0.219" thiserror = "2" tokio = { version = "1", features = ["macros", "net", "rt-multi-thread", "signal", "sync"] } torrust-server-lib = { version = "3.0.0-develop", path = "../server-lib" } torrust-tracker-clock = { version = "3.0.0-develop", path = "../clock" } torrust-tracker-configuration = { version = "3.0.0-develop", path = "../configuration" } torrust-tracker-located-error = { version = "3.0.0-develop", path = "../located-error" } +torrust-tracker-metrics = { version = "3.0.0-develop", path = "../metrics" } torrust-tracker-primitives = { version = "3.0.0-develop", path = "../primitives" } tracing = "0" url = { version = "2", features = ["serde"] } diff --git a/packages/udp-tracker-server/src/event/mod.rs b/packages/udp-tracker-server/src/event/mod.rs index 68f07cfd6..316e1a414 100644 --- a/packages/udp-tracker-server/src/event/mod.rs +++ b/packages/udp-tracker-server/src/event/mod.rs @@ -1,6 +1,8 @@ +use std::fmt; use std::net::SocketAddr; use std::time::Duration; +use torrust_tracker_metrics::label::{LabelName, LabelSet, LabelValue}; use torrust_tracker_primitives::service_binding::ServiceBinding; pub mod sender; @@ -38,6 +40,17 @@ pub enum UdpRequestKind { Scrape, } +impl fmt::Display for UdpRequestKind { + fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result { + let proto_str = match self { + UdpRequestKind::Connect => "connect", + UdpRequestKind::Announce => "announce", + UdpRequestKind::Scrape => "scrape", + }; + write!(f, "{proto_str}") + } +} + #[derive(Debug, PartialEq, Eq, Clone)] pub enum UdpResponseKind { Ok { @@ -76,3 +89,22 @@ impl ConnectionContext { self.server_service_binding.bind_address() } } + +impl From for LabelSet { + fn from(connection_context: ConnectionContext) -> Self { + LabelSet::from([ + ( + LabelName::new("server_binding_protocol"), + LabelValue::new(&connection_context.server_service_binding.protocol().to_string()), + ), + ( + LabelName::new("server_binding_ip"), + LabelValue::new(&connection_context.server_service_binding.bind_address().ip().to_string()), + ), + ( + LabelName::new("server_binding_port"), + LabelValue::new(&connection_context.server_service_binding.bind_address().port().to_string()), + ), + ]) + } +} diff --git a/packages/udp-tracker-server/src/statistics/event/handler.rs b/packages/udp-tracker-server/src/statistics/event/handler.rs index b06c8d725..91f5cef0c 100644 --- a/packages/udp-tracker-server/src/statistics/event/handler.rs +++ b/packages/udp-tracker-server/src/statistics/event/handler.rs @@ -1,3 +1,7 @@ +use torrust_tracker_metrics::label::{LabelName, LabelSet, LabelValue}; +use torrust_tracker_metrics::metric::MetricName; +use torrust_tracker_primitives::DurationSinceUnixEpoch; + use crate::event::{Event, UdpRequestKind, UdpResponseKind}; use crate::statistics::repository::Repository; @@ -6,53 +10,103 @@ use crate::statistics::repository::Repository; /// This function panics if the client IP version does not match the expected /// version. #[allow(clippy::too_many_lines)] -pub async fn handle_event(event: Event, stats_repository: &Repository) { +pub async fn handle_event(event: Event, stats_repository: &Repository, now: DurationSinceUnixEpoch) { match event { - Event::UdpRequestAborted { .. } => { + Event::UdpRequestAborted { context } => { + // Global fixed metrics stats_repository.increase_udp_requests_aborted().await; + + // Extendable metrics + stats_repository + .increase_counter( + &MetricName::new("udp_tracker_server_requests_aborted_total"), + &LabelSet::from(context), + now, + ) + .await; } - Event::UdpRequestBanned { .. } => { + Event::UdpRequestBanned { context } => { + // Global fixed metrics stats_repository.increase_udp_requests_banned().await; + + // Extendable metrics + stats_repository + .increase_counter( + &MetricName::new("udp_tracker_server_requests_banned_total"), + &LabelSet::from(context), + now, + ) + .await; } - Event::UdpRequestReceived { context } => match context.client_socket_addr().ip() { - std::net::IpAddr::V4(_) => { - stats_repository.increase_udp4_requests().await; - } - std::net::IpAddr::V6(_) => { - stats_repository.increase_udp6_requests().await; - } - }, - Event::UdpRequestAccepted { context, kind } => match kind { - UdpRequestKind::Connect => match context.client_socket_addr().ip() { - std::net::IpAddr::V4(_) => { - stats_repository.increase_udp4_connections().await; - } - std::net::IpAddr::V6(_) => { - stats_repository.increase_udp6_connections().await; - } - }, - UdpRequestKind::Announce => match context.client_socket_addr().ip() { - std::net::IpAddr::V4(_) => { - stats_repository.increase_udp4_announces().await; - } - std::net::IpAddr::V6(_) => { - stats_repository.increase_udp6_announces().await; - } - }, - UdpRequestKind::Scrape => match context.client_socket_addr().ip() { + Event::UdpRequestReceived { context } => { + // Global fixed metrics + match context.client_socket_addr().ip() { std::net::IpAddr::V4(_) => { - stats_repository.increase_udp4_scrapes().await; + stats_repository.increase_udp4_requests().await; } std::net::IpAddr::V6(_) => { - stats_repository.increase_udp6_scrapes().await; + stats_repository.increase_udp6_requests().await; } - }, - }, + } + + // Extendable metrics + stats_repository + .increase_counter( + &MetricName::new("udp_tracker_server_requests_received_total"), + &LabelSet::from(context), + now, + ) + .await; + } + Event::UdpRequestAccepted { context, kind } => { + // Global fixed metrics + match kind { + UdpRequestKind::Connect => match context.client_socket_addr().ip() { + std::net::IpAddr::V4(_) => { + stats_repository.increase_udp4_connections().await; + } + std::net::IpAddr::V6(_) => { + stats_repository.increase_udp6_connections().await; + } + }, + UdpRequestKind::Announce => match context.client_socket_addr().ip() { + std::net::IpAddr::V4(_) => { + stats_repository.increase_udp4_announces().await; + } + std::net::IpAddr::V6(_) => { + stats_repository.increase_udp6_announces().await; + } + }, + UdpRequestKind::Scrape => match context.client_socket_addr().ip() { + std::net::IpAddr::V4(_) => { + stats_repository.increase_udp4_scrapes().await; + } + std::net::IpAddr::V6(_) => { + stats_repository.increase_udp6_scrapes().await; + } + }, + } + + // Extendable metrics + + let mut label_set = LabelSet::from(context); + + label_set.upsert(LabelName::new("kind"), LabelValue::new(&kind.to_string())); + + stats_repository + .increase_counter( + &MetricName::new("udp_tracker_server_requests_accepted_total"), + &label_set, + now, + ) + .await; + } Event::UdpResponseSent { context, kind, req_processing_time, } => { + // Global fixed metrics match context.client_socket_addr().ip() { std::net::IpAddr::V4(_) => { stats_repository.increase_udp4_responses().await; @@ -62,35 +116,94 @@ pub async fn handle_event(event: Event, stats_repository: &Repository) { } } - match kind { + let (result_label_value, kind_label_value) = match kind { UdpResponseKind::Ok { req_kind } => match req_kind { UdpRequestKind::Connect => { - stats_repository + let new_avg = stats_repository .recalculate_udp_avg_connect_processing_time_ns(req_processing_time) .await; + + // Extendable metrics + stats_repository + .set_gauge( + &MetricName::new("udp_tracker_server_performance_avg_connect_processing_time_ns"), + &LabelSet::from(context.clone()), + new_avg, + now, + ) + .await; + + (LabelValue::new("ok"), LabelValue::new(&UdpRequestKind::Connect.to_string())) } UdpRequestKind::Announce => { - stats_repository + let new_avg = stats_repository .recalculate_udp_avg_announce_processing_time_ns(req_processing_time) .await; + + // Extendable metrics + stats_repository + .set_gauge( + &MetricName::new("udp_tracker_server_performance_avg_announce_processing_time_ns"), + &LabelSet::from(context.clone()), + new_avg, + now, + ) + .await; + + (LabelValue::new("ok"), LabelValue::new(&UdpRequestKind::Connect.to_string())) } UdpRequestKind::Scrape => { - stats_repository + let new_avg = stats_repository .recalculate_udp_avg_scrape_processing_time_ns(req_processing_time) .await; + + // Extendable metrics + stats_repository + .set_gauge( + &MetricName::new("udp_tracker_server_performance_avg_scrape_processing_time_ns"), + &LabelSet::from(context.clone()), + new_avg, + now, + ) + .await; + + (LabelValue::new("ok"), LabelValue::new(&UdpRequestKind::Connect.to_string())) } }, - UdpResponseKind::Error { opt_req_kind: _ } => {} - } + UdpResponseKind::Error { opt_req_kind: _ } => (LabelValue::new("ok"), LabelValue::ignore()), + }; + + // Extendable metrics + + let mut label_set = LabelSet::from(context); + + label_set.upsert(LabelName::new("result"), result_label_value); + label_set.upsert(LabelName::new("kind"), kind_label_value); + + stats_repository + .increase_counter(&MetricName::new("udp_tracker_server_responses_sent_total"), &label_set, now) + .await; } - Event::UdpError { context } => match context.client_socket_addr().ip() { - std::net::IpAddr::V4(_) => { - stats_repository.increase_udp4_errors().await; - } - std::net::IpAddr::V6(_) => { - stats_repository.increase_udp6_errors().await; + Event::UdpError { context } => { + // Global fixed metrics + match context.client_socket_addr().ip() { + std::net::IpAddr::V4(_) => { + stats_repository.increase_udp4_errors().await; + } + std::net::IpAddr::V6(_) => { + stats_repository.increase_udp6_errors().await; + } } - }, + + // Extendable metrics + stats_repository + .increase_counter( + &MetricName::new("udp_tracker_server_errors_total"), + &LabelSet::from(context), + now, + ) + .await; + } } tracing::debug!("stats: {:?}", stats_repository.get_stats().await); @@ -100,11 +213,13 @@ pub async fn handle_event(event: Event, stats_repository: &Repository) { mod tests { use std::net::{IpAddr, Ipv4Addr, Ipv6Addr, SocketAddr}; + use torrust_tracker_clock::clock::Time; use torrust_tracker_primitives::service_binding::{Protocol, ServiceBinding}; use crate::event::{ConnectionContext, Event, UdpRequestKind}; use crate::statistics::event::handler::handle_event; use crate::statistics::repository::Repository; + use crate::CurrentClock; #[tokio::test] async fn should_increase_the_number_of_aborted_requests_when_it_receives_a_udp_request_aborted_event() { @@ -122,6 +237,7 @@ mod tests { ), }, &stats_repository, + CurrentClock::now(), ) .await; @@ -146,6 +262,7 @@ mod tests { ), }, &stats_repository, + CurrentClock::now(), ) .await; @@ -170,6 +287,7 @@ mod tests { ), }, &stats_repository, + CurrentClock::now(), ) .await; @@ -194,6 +312,7 @@ mod tests { ), }, &stats_repository, + CurrentClock::now(), ) .await; let stats = stats_repository.get_stats().await; @@ -215,6 +334,7 @@ mod tests { ), }, &stats_repository, + CurrentClock::now(), ) .await; let stats = stats_repository.get_stats().await; @@ -238,6 +358,7 @@ mod tests { kind: crate::event::UdpRequestKind::Connect, }, &stats_repository, + CurrentClock::now(), ) .await; @@ -263,6 +384,7 @@ mod tests { kind: crate::event::UdpRequestKind::Announce, }, &stats_repository, + CurrentClock::now(), ) .await; @@ -288,6 +410,7 @@ mod tests { kind: crate::event::UdpRequestKind::Scrape, }, &stats_repository, + CurrentClock::now(), ) .await; @@ -316,6 +439,7 @@ mod tests { req_processing_time: std::time::Duration::from_secs(1), }, &stats_repository, + CurrentClock::now(), ) .await; @@ -340,6 +464,7 @@ mod tests { ), }, &stats_repository, + CurrentClock::now(), ) .await; @@ -365,6 +490,7 @@ mod tests { kind: crate::event::UdpRequestKind::Connect, }, &stats_repository, + CurrentClock::now(), ) .await; @@ -390,6 +516,7 @@ mod tests { kind: crate::event::UdpRequestKind::Announce, }, &stats_repository, + CurrentClock::now(), ) .await; @@ -415,6 +542,7 @@ mod tests { kind: crate::event::UdpRequestKind::Scrape, }, &stats_repository, + CurrentClock::now(), ) .await; @@ -443,6 +571,7 @@ mod tests { req_processing_time: std::time::Duration::from_secs(1), }, &stats_repository, + CurrentClock::now(), ) .await; @@ -466,6 +595,7 @@ mod tests { ), }, &stats_repository, + CurrentClock::now(), ) .await; diff --git a/packages/udp-tracker-server/src/statistics/event/listener.rs b/packages/udp-tracker-server/src/statistics/event/listener.rs index b23260747..c50ce70c9 100644 --- a/packages/udp-tracker-server/src/statistics/event/listener.rs +++ b/packages/udp-tracker-server/src/statistics/event/listener.rs @@ -1,13 +1,15 @@ use tokio::sync::broadcast; +use torrust_tracker_clock::clock::Time; use super::handler::handle_event; use crate::event::Event; use crate::statistics::repository::Repository; +use crate::CurrentClock; pub async fn dispatch_events(mut receiver: broadcast::Receiver, stats_repository: Repository) { loop { match receiver.recv().await { - Ok(event) => handle_event(event, &stats_repository).await, + Ok(event) => handle_event(event, &stats_repository, CurrentClock::now()).await, Err(e) => { tracing::error!("Error receiving udp tracker server event: {:?}", e); break; diff --git a/packages/udp-tracker-server/src/statistics/metrics.rs b/packages/udp-tracker-server/src/statistics/metrics.rs index cce618d74..4fe07e7da 100644 --- a/packages/udp-tracker-server/src/statistics/metrics.rs +++ b/packages/udp-tracker-server/src/statistics/metrics.rs @@ -1,5 +1,11 @@ +use serde::Serialize; +use torrust_tracker_metrics::label::LabelSet; +use torrust_tracker_metrics::metric::MetricName; +use torrust_tracker_metrics::metric_collection::MetricCollection; +use torrust_tracker_primitives::DurationSinceUnixEpoch; + /// Metrics collected by the UDP tracker server. -#[derive(Debug, PartialEq, Default)] +#[derive(Debug, PartialEq, Default, Serialize)] pub struct Metrics { // UDP /// Total number of UDP (UDP tracker) requests aborted. @@ -57,4 +63,17 @@ pub struct Metrics { /// Total number of UDP (UDP tracker) `error` requests from IPv6 peers. pub udp6_errors_handled: u64, + + /// A collection of metrics. + pub metric_collection: MetricCollection, +} + +impl Metrics { + pub fn increase_counter(&mut self, metric_name: &MetricName, labels: &LabelSet, now: DurationSinceUnixEpoch) { + self.metric_collection.increase_counter(metric_name, labels, now); + } + + pub fn set_gauge(&mut self, metric_name: &MetricName, labels: &LabelSet, value: f64, now: DurationSinceUnixEpoch) { + self.metric_collection.set_gauge(metric_name, labels, value, now); + } } diff --git a/packages/udp-tracker-server/src/statistics/mod.rs b/packages/udp-tracker-server/src/statistics/mod.rs index 939a41061..535031483 100644 --- a/packages/udp-tracker-server/src/statistics/mod.rs +++ b/packages/udp-tracker-server/src/statistics/mod.rs @@ -4,3 +4,75 @@ pub mod metrics; pub mod repository; pub mod services; pub mod setup; + +use metrics::Metrics; +use torrust_tracker_metrics::metric::description::MetricDescription; +use torrust_tracker_metrics::metric::MetricName; +use torrust_tracker_metrics::unit::Unit; + +#[must_use] +pub fn describe_metrics() -> Metrics { + let mut metrics = Metrics::default(); + + metrics.metric_collection.describe_counter( + &MetricName::new("udp_tracker_server_requests_aborted_total"), + Some(Unit::Count), + Some(MetricDescription::new("Total number of UDP requests aborted")), + ); + + metrics.metric_collection.describe_counter( + &MetricName::new("udp_tracker_server_requests_banned_total"), + Some(Unit::Count), + Some(MetricDescription::new("Total number of UDP requests banned")), + ); + + metrics.metric_collection.describe_counter( + &MetricName::new("udp_tracker_server_requests_received_total"), + Some(Unit::Count), + Some(MetricDescription::new("Total number of UDP requests received")), + ); + + metrics.metric_collection.describe_counter( + &MetricName::new("udp_tracker_server_requests_accepted_total"), + Some(Unit::Count), + Some(MetricDescription::new("Total number of UDP requests accepted")), + ); + + metrics.metric_collection.describe_counter( + &MetricName::new("udp_tracker_server_responses_sent_total"), + Some(Unit::Count), + Some(MetricDescription::new("Total number of UDP responses sent")), + ); + + metrics.metric_collection.describe_counter( + &MetricName::new("udp_tracker_server_errors_total"), + Some(Unit::Count), + Some(MetricDescription::new("Total number of errors processing UDP requests")), + ); + + metrics.metric_collection.describe_gauge( + &MetricName::new("udp_tracker_server_performance_avg_connect_processing_time_ns"), + Some(Unit::Nanoseconds), + Some(MetricDescription::new( + "Average time to process a UDP connect request in nanoseconds", + )), + ); + + metrics.metric_collection.describe_gauge( + &MetricName::new("udp_tracker_server_performance_avg_announce_processing_time_ns"), + Some(Unit::Nanoseconds), + Some(MetricDescription::new( + "Average time to process a UDP announce request in nanoseconds", + )), + ); + + metrics.metric_collection.describe_gauge( + &MetricName::new("udp_tracker_server_performance_avg_scrape_processing_time_ns"), + Some(Unit::Nanoseconds), + Some(MetricDescription::new( + "Average time to process a UDP scrape request in nanoseconds", + )), + ); + + metrics +} diff --git a/packages/udp-tracker-server/src/statistics/repository.rs b/packages/udp-tracker-server/src/statistics/repository.rs index 22e793036..c33c1231c 100644 --- a/packages/udp-tracker-server/src/statistics/repository.rs +++ b/packages/udp-tracker-server/src/statistics/repository.rs @@ -2,7 +2,11 @@ use std::sync::Arc; use std::time::Duration; use tokio::sync::{RwLock, RwLockReadGuard}; +use torrust_tracker_metrics::label::LabelSet; +use torrust_tracker_metrics::metric::MetricName; +use torrust_tracker_primitives::DurationSinceUnixEpoch; +use super::describe_metrics; use super::metrics::Metrics; /// A repository for the tracker metrics. @@ -21,7 +25,7 @@ impl Repository { #[must_use] pub fn new() -> Self { Self { - stats: Arc::new(RwLock::new(Metrics::default())), + stats: Arc::new(RwLock::new(describe_metrics())), } } @@ -80,7 +84,7 @@ impl Repository { #[allow(clippy::cast_precision_loss)] #[allow(clippy::cast_possible_truncation)] #[allow(clippy::cast_sign_loss)] - pub async fn recalculate_udp_avg_connect_processing_time_ns(&self, req_processing_time: Duration) { + pub async fn recalculate_udp_avg_connect_processing_time_ns(&self, req_processing_time: Duration) -> f64 { let mut stats_lock = self.stats.write().await; let req_processing_time = req_processing_time.as_nanos() as f64; @@ -94,12 +98,14 @@ impl Repository { stats_lock.udp_avg_connect_processing_time_ns = new_avg.ceil() as u64; drop(stats_lock); + + new_avg } #[allow(clippy::cast_precision_loss)] #[allow(clippy::cast_possible_truncation)] #[allow(clippy::cast_sign_loss)] - pub async fn recalculate_udp_avg_announce_processing_time_ns(&self, req_processing_time: Duration) { + pub async fn recalculate_udp_avg_announce_processing_time_ns(&self, req_processing_time: Duration) -> f64 { let mut stats_lock = self.stats.write().await; let req_processing_time = req_processing_time.as_nanos() as f64; @@ -114,12 +120,14 @@ impl Repository { stats_lock.udp_avg_announce_processing_time_ns = new_avg.ceil() as u64; drop(stats_lock); + + new_avg } #[allow(clippy::cast_precision_loss)] #[allow(clippy::cast_possible_truncation)] #[allow(clippy::cast_sign_loss)] - pub async fn recalculate_udp_avg_scrape_processing_time_ns(&self, req_processing_time: Duration) { + pub async fn recalculate_udp_avg_scrape_processing_time_ns(&self, req_processing_time: Duration) -> f64 { let mut stats_lock = self.stats.write().await; let req_processing_time = req_processing_time.as_nanos() as f64; @@ -133,6 +141,8 @@ impl Repository { stats_lock.udp_avg_scrape_processing_time_ns = new_avg.ceil() as u64; drop(stats_lock); + + new_avg } pub async fn increase_udp6_requests(&self) { @@ -170,4 +180,16 @@ impl Repository { stats_lock.udp6_errors_handled += 1; drop(stats_lock); } + + pub async fn increase_counter(&self, metric_name: &MetricName, labels: &LabelSet, now: DurationSinceUnixEpoch) { + let mut stats_lock = self.stats.write().await; + stats_lock.increase_counter(metric_name, labels, now); + drop(stats_lock); + } + + pub async fn set_gauge(&self, metric_name: &MetricName, labels: &LabelSet, value: f64, now: DurationSinceUnixEpoch) { + let mut stats_lock = self.stats.write().await; + stats_lock.set_gauge(metric_name, labels, value, now); + drop(stats_lock); + } } diff --git a/packages/udp-tracker-server/src/statistics/services.rs b/packages/udp-tracker-server/src/statistics/services.rs index a16685077..b84bf4cd0 100644 --- a/packages/udp-tracker-server/src/statistics/services.rs +++ b/packages/udp-tracker-server/src/statistics/services.rs @@ -94,6 +94,8 @@ pub async fn get_metrics( udp6_scrapes_handled: stats.udp6_scrapes_handled, udp6_responses: stats.udp6_responses, udp6_errors_handled: stats.udp6_errors_handled, + // Extendable metrics + metric_collection: stats.metric_collection.clone(), }, } } @@ -111,8 +113,8 @@ mod tests { use torrust_tracker_primitives::swarm_metadata::AggregateSwarmMetadata; use torrust_tracker_test_helpers::configuration; - use crate::statistics; use crate::statistics::services::{get_metrics, TrackerMetrics}; + use crate::statistics::{self, describe_metrics}; pub fn tracker_configuration() -> Configuration { configuration::ephemeral() @@ -140,7 +142,7 @@ mod tests { tracker_metrics, TrackerMetrics { torrents_metrics: AggregateSwarmMetadata::default(), - protocol_metrics: statistics::metrics::Metrics::default(), + protocol_metrics: describe_metrics(), } ); } From af8dbfa665feafcf12942dfdc7b672db1fd60416 Mon Sep 17 00:00:00 2001 From: Jose Celano Date: Wed, 9 Apr 2025 16:37:47 +0100 Subject: [PATCH 442/802] feat: [#1403] expose udp-tracker-server metrics expose in REST API --- packages/rest-tracker-api-core/src/statistics/services.rs | 7 +++++-- 1 file changed, 5 insertions(+), 2 deletions(-) diff --git a/packages/rest-tracker-api-core/src/statistics/services.rs b/packages/rest-tracker-api-core/src/statistics/services.rs index 037563b11..9277df92b 100644 --- a/packages/rest-tracker-api-core/src/statistics/services.rs +++ b/packages/rest-tracker-api-core/src/statistics/services.rs @@ -99,12 +99,12 @@ pub async fn get_labeled_metrics( ) -> TrackerLabeledMetrics { let _torrents_metrics = in_memory_torrent_repository.get_torrents_metrics(); let _udp_banned_ips_total = ban_service.read().await.get_banned_ips_total(); - let _udp_server_stats = udp_server_stats_repository.get_stats().await; let http_stats = http_stats_repository.get_stats().await; let udp_stats_repository = udp_stats_repository.get_stats().await; + let udp_server_stats = udp_server_stats_repository.get_stats().await; - // Merge the metrics from the HTTP and UDP metrics + // Merge all the metrics into a single collection let mut metrics = MetricCollection::default(); metrics .merge(&http_stats.metric_collection) @@ -112,6 +112,9 @@ pub async fn get_labeled_metrics( metrics .merge(&udp_stats_repository.metric_collection) .expect("failed to merge UDP core metrics"); + metrics + .merge(&udp_server_stats.metric_collection) + .expect("failed to merge UDP server metrics"); TrackerLabeledMetrics { metrics } } From 017d977344408eb4e1295ed2c7c26916aea20fdd Mon Sep 17 00:00:00 2001 From: Jose Celano Date: Thu, 10 Apr 2025 12:58:28 +0100 Subject: [PATCH 443/802] refactor: [#1403] remove unused code After discussing with @da2ce7 we don't think this is necessary. --- packages/metrics/src/lib.rs | 1 - .../src/thread_safe_metric_collection.rs | 92 ------------------- 2 files changed, 93 deletions(-) delete mode 100644 packages/metrics/src/thread_safe_metric_collection.rs diff --git a/packages/metrics/src/lib.rs b/packages/metrics/src/lib.rs index 1cb0df195..fd677b891 100644 --- a/packages/metrics/src/lib.rs +++ b/packages/metrics/src/lib.rs @@ -6,7 +6,6 @@ pub mod metric_collection; pub mod prometheus; pub mod sample; pub mod sample_collection; -pub mod thread_safe_metric_collection; pub mod unit; #[cfg(test)] diff --git a/packages/metrics/src/thread_safe_metric_collection.rs b/packages/metrics/src/thread_safe_metric_collection.rs deleted file mode 100644 index d9774c9af..000000000 --- a/packages/metrics/src/thread_safe_metric_collection.rs +++ /dev/null @@ -1,92 +0,0 @@ -use std::sync::RwLock; - -use torrust_tracker_primitives::DurationSinceUnixEpoch; - -use crate::counter::Counter; -use crate::gauge::Gauge; -use crate::label::LabelSet; -use crate::metric::description::MetricDescription; -use crate::metric::MetricName; -use crate::metric_collection::{MetricCollection, MetricKindCollection}; -use crate::unit::Unit; - -/* code-review: - - This might be not necessary, since the `MetricCollection` doesn't expose - any method to mutate the collection items directly. - -*/ - -/// A thread-safe wrapper around `MetricCollection` that allows concurrent -/// access to the metrics collection. -/// -/// It protects the `MetricCollection` invariant: -/// -/// "Metric's names must be unique in the collection for all types of metrics." -#[derive(Debug, Default)] -pub struct ThreadSafeMetricCollection { - inner: RwLock, -} - -impl ThreadSafeMetricCollection { - #[must_use] - pub fn new(counters: MetricKindCollection, gauges: MetricKindCollection) -> Self { - Self { - inner: RwLock::new(MetricCollection::new(counters, gauges)), - } - } - - // Counter-specific methods - - /// # Panics - /// - /// Panics if it can't get write access to the inner collection. - pub fn describe_counter(&mut self, name: &MetricName, _opt_unit: Option, _opt_description: Option) { - self.inner.write().unwrap().ensure_counter_exists(name); - } - - /// It allows to describe a counter metric so the metrics appear in the JSON - /// response even if there are no samples yet. - /// - /// # Panics - /// - /// Panics if it can't get read access to the inner collection. - #[must_use] - pub fn get_counter_value(&self, name: &MetricName, label_set: &LabelSet) -> Counter { - self.inner.read().unwrap().get_counter_value(name, label_set) - } - - /// # Panics - /// - /// Panics if it can't get write access to the inner collection. - pub fn increase_counter(&mut self, name: &MetricName, label_set: &LabelSet, time: DurationSinceUnixEpoch) { - self.inner.write().unwrap().increase_counter(name, label_set, time); - } - - // Gauge-specific methods - - /// It allows to describe a gauge metric so the metrics appear in the JSON - /// response even if there are no samples yet. - /// - /// # Panics - /// - /// Panics if it can't get write access to the inner collection. - pub fn describe_gauge(&mut self, name: &MetricName, _opt_unit: Option, _opt_description: Option) { - self.inner.write().unwrap().ensure_gauge_exists(name); - } - - /// # Panics - /// - /// Panics if it can't get read access to the inner collection. - #[must_use] - pub fn get_gauge_value(&self, name: &MetricName, label_set: &LabelSet) -> Gauge { - self.inner.read().unwrap().get_gauge_value(name, label_set) - } - - /// # Panics - /// - /// Panics if it can't get write access to the inner collection. - pub fn set_gauge(&mut self, name: &MetricName, label_set: &LabelSet, value: f64, time: DurationSinceUnixEpoch) { - self.inner.write().unwrap().set_gauge(name, label_set, value, time); - } -} From 5099e90816324e3104684cf07b386714fe25388a Mon Sep 17 00:00:00 2001 From: Jose Celano Date: Thu, 10 Apr 2025 16:34:59 +0100 Subject: [PATCH 444/802] refactor: [#1403] extract Measurement strcut To remove duplicate data. LabelSet is the HashMap key and it was also included in the HashMap value. --- packages/metrics/src/metric/mod.rs | 15 ++-- packages/metrics/src/metric_collection.rs | 4 +- packages/metrics/src/sample.rs | 85 ++++++++++++++++++----- packages/metrics/src/sample_collection.rs | 40 ++++++----- 4 files changed, 101 insertions(+), 43 deletions(-) diff --git a/packages/metrics/src/metric/mod.rs b/packages/metrics/src/metric/mod.rs index 0d79a24d3..edea035bb 100644 --- a/packages/metrics/src/metric/mod.rs +++ b/packages/metrics/src/metric/mod.rs @@ -7,9 +7,9 @@ use torrust_tracker_primitives::DurationSinceUnixEpoch; use super::counter::Counter; use super::label::LabelSet; use super::prometheus::PrometheusSerializable; -use super::sample::Sample; use super::sample_collection::SampleCollection; use crate::gauge::Gauge; +use crate::sample::Measurement; pub type MetricName = name::MetricName; @@ -36,7 +36,7 @@ impl Metric { } #[must_use] - pub fn get_sample(&self, label_set: &LabelSet) -> Option<&Sample> { + pub fn get_sample_data(&self, label_set: &LabelSet) -> Option<&Measurement> { self.sample_collection.get(label_set) } @@ -68,11 +68,11 @@ impl PrometheusSerializable for Metric { let samples: Vec = self .sample_collection .iter() - .map(|(_label_set, sample)| { + .map(|(label_set, sample)| { format!( "{}{} {}", self.name.to_prometheus(), - sample.labels().to_prometheus(), + label_set.to_prometheus(), sample.value().to_prometheus() ) }) @@ -87,6 +87,7 @@ mod tests { use super::super::*; use crate::gauge::Gauge; use crate::label::{LabelName, LabelValue}; + use crate::sample::Sample; #[test] fn it_should_be_empty_when_it_does_not_have_any_sample() { @@ -132,6 +133,7 @@ mod tests { use super::super::*; use crate::counter::Counter; use crate::label::{LabelName, LabelValue}; + use crate::sample::Sample; #[test] fn it_should_be_created_from_its_name_and_a_collection_of_samples() { @@ -154,7 +156,7 @@ mod tests { let metric = Metric::::new(name.clone(), samples); - assert_eq!(metric.get_sample(&label_set).unwrap().value().value(), 1); + assert_eq!(metric.get_sample_data(&label_set).unwrap().value().value(), 1); } } @@ -164,6 +166,7 @@ mod tests { use super::super::*; use crate::gauge::Gauge; use crate::label::{LabelName, LabelValue}; + use crate::sample::Sample; #[test] fn it_should_be_created_from_its_name_and_a_collection_of_samples() { @@ -186,7 +189,7 @@ mod tests { let metric = Metric::::new(name.clone(), samples); - assert_relative_eq!(metric.get_sample(&label_set).unwrap().value().value(), 1.0); + assert_relative_eq!(metric.get_sample_data(&label_set).unwrap().value().value(), 1.0); } } } diff --git a/packages/metrics/src/metric_collection.rs b/packages/metrics/src/metric_collection.rs index 588194e5f..ac62a7e8a 100644 --- a/packages/metrics/src/metric_collection.rs +++ b/packages/metrics/src/metric_collection.rs @@ -278,7 +278,7 @@ impl MetricKindCollection { pub fn get_value(&self, name: &MetricName, label_set: &LabelSet) -> Counter { self.metrics .get(name) - .and_then(|metric| metric.get_sample(label_set)) + .and_then(|metric| metric.get_sample_data(label_set)) .map_or(Counter::default(), |sample| sample.value().clone()) } } @@ -303,7 +303,7 @@ impl MetricKindCollection { pub fn get_value(&self, name: &MetricName, label_set: &LabelSet) -> Gauge { self.metrics .get(name) - .and_then(|metric| metric.get_sample(label_set)) + .and_then(|metric| metric.get_sample_data(label_set)) .map_or(Gauge::default(), |sample| sample.value().clone()) } } diff --git a/packages/metrics/src/sample.rs b/packages/metrics/src/sample.rs index eddb2eefc..2b1fb4cc2 100644 --- a/packages/metrics/src/sample.rs +++ b/packages/metrics/src/sample.rs @@ -9,10 +9,8 @@ use super::prometheus::PrometheusSerializable; #[derive(Debug, Clone, PartialEq, Serialize, Deserialize)] pub struct Sample { - value: T, - - #[serde(serialize_with = "serialize_duration", deserialize_with = "deserialize_duration")] - update_at: DurationSinceUnixEpoch, + #[serde(flatten)] + measurement: Measurement, #[serde(rename = "labels")] label_set: LabelSet, @@ -21,17 +19,68 @@ pub struct Sample { impl Sample { #[must_use] pub fn new(value: T, update_at: DurationSinceUnixEpoch, label_set: LabelSet) -> Self { + let data = Measurement { value, update_at }; + Self { - value, - update_at, + measurement: data, label_set, } } + #[must_use] + pub fn measurement(&self) -> &Measurement { + &self.measurement + } + + #[must_use] + pub fn value(&self) -> &T { + &self.measurement.value + } + + #[must_use] + pub fn update_at(&self) -> DurationSinceUnixEpoch { + self.measurement.update_at + } + #[must_use] pub fn labels(&self) -> &LabelSet { &self.label_set } +} + +impl PrometheusSerializable for Sample { + fn to_prometheus(&self) -> String { + format!("{} {}", self.label_set.to_prometheus(), self.measurement.to_prometheus()) + } +} + +impl Sample { + pub fn increment(&mut self, time: DurationSinceUnixEpoch) { + self.measurement.increment(time); + } +} + +impl Sample { + pub fn set(&mut self, value: f64, time: DurationSinceUnixEpoch) { + self.measurement.set(value, time); + } +} + +#[derive(Debug, Clone, PartialEq, Serialize, Deserialize)] +pub struct Measurement { + /// The value of the sample. + value: T, + + /// The time when the sample was last updated. + #[serde(serialize_with = "serialize_duration", deserialize_with = "deserialize_duration")] + update_at: DurationSinceUnixEpoch, +} + +impl Measurement { + #[must_use] + pub fn new(value: T, update_at: DurationSinceUnixEpoch) -> Self { + Self { value, update_at } + } #[must_use] pub fn value(&self) -> &T { @@ -48,20 +97,26 @@ impl Sample { } } -impl PrometheusSerializable for Sample { +impl From> for (LabelSet, Measurement) { + fn from(sample: Sample) -> Self { + (sample.label_set, sample.measurement) + } +} + +impl PrometheusSerializable for Measurement { fn to_prometheus(&self) -> String { - format!("{} {}", self.label_set.to_prometheus(), self.value.to_prometheus()) + self.value.to_prometheus() } } -impl Sample { +impl Measurement { pub fn increment(&mut self, time: DurationSinceUnixEpoch) { self.value.increment(1); self.set_update_at(time); } } -impl Sample { +impl Measurement { pub fn set(&mut self, value: f64, time: DurationSinceUnixEpoch) { self.value.set(value); self.set_update_at(time); @@ -260,17 +315,13 @@ mod tests { #[test] fn test_serialization_round_trip() { - let original = Sample { - value: 42, - update_at: updated_at_time(), - label_set: LabelSet::from(vec![("test", "serialization")]), - }; + let original = Sample::new(42, updated_at_time(), LabelSet::from(vec![("test", "serialization")])); let json = serde_json::to_string(&original).unwrap(); let deserialized: Sample = serde_json::from_str(&json).unwrap(); - assert_eq!(original.value, deserialized.value); - assert_eq!(original.update_at, deserialized.update_at); + assert_eq!(original.measurement.value, deserialized.measurement.value); + assert_eq!(original.measurement.update_at, deserialized.measurement.update_at); assert_eq!(original.label_set, deserialized.label_set); } diff --git a/packages/metrics/src/sample_collection.rs b/packages/metrics/src/sample_collection.rs index 02977597f..c6dc4e27d 100644 --- a/packages/metrics/src/sample_collection.rs +++ b/packages/metrics/src/sample_collection.rs @@ -1,5 +1,6 @@ use std::collections::hash_map::Iter; use std::collections::{HashMap, HashSet}; +use std::fmt::Write as _; use serde::{de, Deserialize, Deserializer, Serialize, Serializer}; use torrust_tracker_primitives::DurationSinceUnixEpoch; @@ -9,28 +10,26 @@ use super::gauge::Gauge; use super::label::LabelSet; use super::prometheus::PrometheusSerializable; use super::sample::Sample; +use crate::sample::Measurement; #[derive(Debug, Clone, Default, PartialEq)] pub struct SampleCollection { - samples: HashMap>, + samples: HashMap>, } impl SampleCollection { - // IMPORTANT: It should never allow mutation of the samples because it would - // break the invariants. If the sample's `LabelSet` is changed, it can - // create duplicate `LabelSet`s even if the `LabelSet` in the `HashMap` key - // is unique. - /// # Panics /// /// Panics if there are duplicate `LabelSets` in the provided samples. #[must_use] pub fn new(samples: Vec>) -> Self { - let mut map = HashMap::with_capacity(samples.len()); + let mut map: HashMap> = HashMap::with_capacity(samples.len()); for sample in samples { + let (label_set, sample_data): (LabelSet, Measurement) = sample.into(); + assert!( - map.insert(sample.labels().clone(), sample).is_none(), + map.insert(label_set, sample_data).is_none(), "Duplicate LabelSet found in SampleCollection" ); } @@ -39,7 +38,7 @@ impl SampleCollection { } #[must_use] - pub fn get(&self, label: &LabelSet) -> Option<&Sample> { + pub fn get(&self, label: &LabelSet) -> Option<&Measurement> { self.samples.get(label) } @@ -55,7 +54,7 @@ impl SampleCollection { #[must_use] #[allow(clippy::iter_without_into_iter)] - pub fn iter(&self) -> Iter<'_, LabelSet, Sample> { + pub fn iter(&self) -> Iter<'_, LabelSet, Measurement> { self.samples.iter() } } @@ -65,7 +64,7 @@ impl SampleCollection { let sample = self .samples .entry(label_set.clone()) - .or_insert_with(|| Sample::new(Counter::default(), time, label_set.clone())); + .or_insert_with(|| Measurement::new(Counter::default(), time)); sample.increment(time); } @@ -76,7 +75,7 @@ impl SampleCollection { let sample = self .samples .entry(label_set.clone()) - .or_insert_with(|| Sample::new(Gauge::default(), time, label_set.clone())); + .or_insert_with(|| Measurement::new(Gauge::default(), time)); sample.set(value, time); } @@ -87,7 +86,12 @@ impl Serialize for SampleCollection { where S: Serializer, { - let samples: Vec<&Sample> = self.samples.values().collect(); + let mut samples: Vec> = vec![]; + + for (label_set, sample_data) in &self.samples { + samples.push(Sample::new(sample_data.value(), sample_data.update_at(), label_set.clone())); + } + samples.serialize(serializer) } } @@ -121,8 +125,8 @@ impl PrometheusSerializable for SampleCollection { fn to_prometheus(&self) -> String { let mut output = String::new(); - for sample in self.samples.values() { - output.push_str(&sample.to_prometheus()); + for (label_set, sample_data) in &self.samples { + let _ = write!(output, "{} {}", label_set.to_prometheus(), sample_data.to_prometheus()); } output @@ -165,7 +169,7 @@ mod tests { let retrieved = collection.get(&label_set); - assert_eq!(retrieved.unwrap(), &sample); + assert_eq!(retrieved.unwrap(), sample.measurement()); } #[test] @@ -179,10 +183,10 @@ mod tests { let collection = SampleCollection::new(vec![sample_1.clone(), sample_2.clone()]); let retrieved = collection.get(&label_set_1); - assert_eq!(retrieved.unwrap(), &sample_1); + assert_eq!(retrieved.unwrap(), sample_1.measurement()); let retrieved = collection.get(&label_set_2); - assert_eq!(retrieved.unwrap(), &sample_2); + assert_eq!(retrieved.unwrap(), sample_2.measurement()); } #[test] From e3b84a4e4a5b12f26132f74405483a6448da017a Mon Sep 17 00:00:00 2001 From: Jose Celano Date: Thu, 10 Apr 2025 16:45:56 +0100 Subject: [PATCH 445/802] feat: [#1403] rename field update_at to recorded_at in metrics Sample The new name is more common in the context of metrics and time-series data packages like Prometheus. --- packages/metrics/src/metric_collection.rs | 4 +-- packages/metrics/src/sample.rs | 40 +++++++++++------------ packages/metrics/src/sample_collection.rs | 6 ++-- 3 files changed, 25 insertions(+), 25 deletions(-) diff --git a/packages/metrics/src/metric_collection.rs b/packages/metrics/src/metric_collection.rs index ac62a7e8a..d0ed96554 100644 --- a/packages/metrics/src/metric_collection.rs +++ b/packages/metrics/src/metric_collection.rs @@ -374,7 +374,7 @@ mod tests { "samples":[ { "value":1, - "update_at":"2025-04-02T00:00:00+00:00", + "recorded_at":"2025-04-02T00:00:00+00:00", "labels":[ { "name":"server_binding_ip", @@ -398,7 +398,7 @@ mod tests { "samples":[ { "value":1.0, - "update_at":"2025-04-02T00:00:00+00:00", + "recorded_at":"2025-04-02T00:00:00+00:00", "labels":[ { "name":"server_binding_ip", diff --git a/packages/metrics/src/sample.rs b/packages/metrics/src/sample.rs index 2b1fb4cc2..5567dffec 100644 --- a/packages/metrics/src/sample.rs +++ b/packages/metrics/src/sample.rs @@ -18,8 +18,8 @@ pub struct Sample { impl Sample { #[must_use] - pub fn new(value: T, update_at: DurationSinceUnixEpoch, label_set: LabelSet) -> Self { - let data = Measurement { value, update_at }; + pub fn new(value: T, recorded_at: DurationSinceUnixEpoch, label_set: LabelSet) -> Self { + let data = Measurement { value, recorded_at }; Self { measurement: data, @@ -38,8 +38,8 @@ impl Sample { } #[must_use] - pub fn update_at(&self) -> DurationSinceUnixEpoch { - self.measurement.update_at + pub fn recorded_at(&self) -> DurationSinceUnixEpoch { + self.measurement.recorded_at } #[must_use] @@ -73,13 +73,13 @@ pub struct Measurement { /// The time when the sample was last updated. #[serde(serialize_with = "serialize_duration", deserialize_with = "deserialize_duration")] - update_at: DurationSinceUnixEpoch, + recorded_at: DurationSinceUnixEpoch, } impl Measurement { #[must_use] - pub fn new(value: T, update_at: DurationSinceUnixEpoch) -> Self { - Self { value, update_at } + pub fn new(value: T, recorded_at: DurationSinceUnixEpoch) -> Self { + Self { value, recorded_at } } #[must_use] @@ -88,12 +88,12 @@ impl Measurement { } #[must_use] - pub fn update_at(&self) -> DurationSinceUnixEpoch { - self.update_at + pub fn recorded_at(&self) -> DurationSinceUnixEpoch { + self.recorded_at } - fn set_update_at(&mut self, time: DurationSinceUnixEpoch) { - self.update_at = time; + fn set_recorded_at(&mut self, time: DurationSinceUnixEpoch) { + self.recorded_at = time; } } @@ -112,18 +112,18 @@ impl PrometheusSerializable for Measurement { impl Measurement { pub fn increment(&mut self, time: DurationSinceUnixEpoch) { self.value.increment(1); - self.set_update_at(time); + self.set_recorded_at(time); } } impl Measurement { pub fn set(&mut self, value: f64, time: DurationSinceUnixEpoch) { self.value.set(value); - self.set_update_at(time); + self.set_recorded_at(time); } } -/// Serializes the `update_at` field as a string in ISO 8601 format (RFC 3339). +/// Serializes the `recorded_at` field as a string in ISO 8601 format (RFC 3339). /// /// # Errors /// @@ -189,7 +189,7 @@ mod tests { LabelSet::from(vec![("test", "label")]), ); - assert_eq!(sample.update_at(), updated_at_time()); + assert_eq!(sample.recorded_at(), updated_at_time()); } #[test] @@ -239,7 +239,7 @@ mod tests { sample.increment(time); - assert_eq!(sample.update_at(), time); + assert_eq!(sample.recorded_at(), time); } #[test] @@ -289,7 +289,7 @@ mod tests { sample.set(1.0, time); - assert_eq!(sample.update_at(), time); + assert_eq!(sample.recorded_at(), time); } #[test] @@ -321,7 +321,7 @@ mod tests { let deserialized: Sample = serde_json::from_str(&json).unwrap(); assert_eq!(original.measurement.value, deserialized.measurement.value); - assert_eq!(original.measurement.update_at, deserialized.measurement.update_at); + assert_eq!(original.measurement.recorded_at, deserialized.measurement.recorded_at); assert_eq!(original.label_set, deserialized.label_set); } @@ -338,7 +338,7 @@ mod tests { let expected_json = r#" { "value": 42, - "update_at": "2025-04-02T00:00:00.000000100+00:00", + "recorded_at": "2025-04-02T00:00:00.000000100+00:00", "labels": [ { "name": "label_name", @@ -372,7 +372,7 @@ mod tests { r#" { "value": 42, - "update_at": "1-1-2023T25:00:00Z", + "recorded_at": "1-1-2023T25:00:00Z", "labels": [ { "name": "label_name", diff --git a/packages/metrics/src/sample_collection.rs b/packages/metrics/src/sample_collection.rs index c6dc4e27d..436a4bc7d 100644 --- a/packages/metrics/src/sample_collection.rs +++ b/packages/metrics/src/sample_collection.rs @@ -89,7 +89,7 @@ impl Serialize for SampleCollection { let mut samples: Vec> = vec![]; for (label_set, sample_data) in &self.samples { - samples.push(Sample::new(sample_data.value(), sample_data.update_at(), label_set.clone())); + samples.push(Sample::new(sample_data.value(), sample_data.recorded_at(), label_set.clone())); } samples.serialize(serializer) @@ -317,7 +317,7 @@ mod tests { collection.increment(&label_set, new_time); let sample = collection.get(&label_set).unwrap(); - assert_eq!(sample.update_at(), new_time); + assert_eq!(sample.recorded_at(), new_time); assert_eq!(*sample.value(), Counter::new(2)); } @@ -392,7 +392,7 @@ mod tests { collection.set(&label_set, 2.0, new_time); let sample = collection.get(&label_set).unwrap(); - assert_eq!(sample.update_at(), new_time); + assert_eq!(sample.recorded_at(), new_time); assert_eq!(*sample.value(), Gauge::new(2.0)); } From 3ef9e13f0eb8f91bec853ce7f57df8acec5af976 Mon Sep 17 00:00:00 2001 From: Jose Celano Date: Fri, 11 Apr 2025 11:20:15 +0100 Subject: [PATCH 446/802] chore(deps): udpate dependencies ```output cargo update Updating crates.io index Locking 48 packages to latest compatible versions Updating async-compression v0.4.21 -> v0.4.22 Updating axum v0.8.1 -> v0.8.3 Updating axum-core v0.5.0 -> v0.5.2 Updating axum-extra v0.10.0 -> v0.10.1 Updating bigdecimal v0.4.7 -> v0.4.8 Updating borsh v1.5.6 -> v1.5.7 Updating borsh-derive v1.5.6 -> v1.5.7 Updating cc v1.2.17 -> v1.2.19 Updating clap v4.5.32 -> v4.5.35 Updating clap_builder v4.5.32 -> v4.5.35 Updating crossbeam-channel v0.5.14 -> v0.5.15 Updating darling v0.20.10 -> v0.20.11 Updating darling_core v0.20.10 -> v0.20.11 Updating darling_macro v0.20.10 -> v0.20.11 Downgrading deranged v0.4.1 -> v0.4.0 Updating errno v0.3.10 -> v0.3.11 Updating event-listener-strategy v0.5.3 -> v0.5.4 Updating flate2 v1.1.0 -> v1.1.1 Updating fragile v2.0.0 -> v2.0.1 Updating half v2.5.0 -> v2.6.0 Updating hyper-util v0.1.10 -> v0.1.11 Updating iana-time-zone v0.1.62 -> v0.1.63 Updating icu_locid_transform_data v1.5.0 -> v1.5.1 Updating icu_normalizer_data v1.5.0 -> v1.5.1 Updating icu_properties_data v1.5.0 -> v1.5.1 Updating indexmap v2.8.0 -> v2.9.0 Updating jobserver v0.1.32 -> v0.1.33 Updating linux-raw-sys v0.9.3 -> v0.9.4 Updating log v0.4.26 -> v0.4.27 Updating miniz_oxide v0.8.5 -> v0.8.8 Updating once_cell v1.21.1 -> v1.21.3 Updating openssl v0.10.71 -> v0.10.72 Updating openssl-sys v0.9.106 -> v0.9.107 Adding portable-atomic-util v0.2.4 Updating redox_syscall v0.5.10 -> v0.5.11 Updating ringbuf v0.4.7 -> v0.4.8 Updating rustix v1.0.3 -> v1.0.5 Updating rustls v0.23.25 -> v0.23.26 Updating rustls-webpki v0.103.0 -> v0.103.1 Updating smallvec v1.14.0 -> v1.15.0 Updating socket2 v0.5.8 -> v0.5.9 Updating tokio v1.44.1 -> v1.44.2 Updating value-bag v1.10.0 -> v1.11.1 Updating windows-core v0.52.0 -> v0.61.0 Adding windows-implement v0.60.0 Adding windows-interface v0.59.1 Adding windows-strings v0.4.0 Updating winnow v0.7.4 -> v0.7.6 ``` --- Cargo.lock | 252 +++++++++++++++++++++++++++++++---------------------- 1 file changed, 150 insertions(+), 102 deletions(-) diff --git a/Cargo.lock b/Cargo.lock index 5feea957d..c3fb651ef 100644 --- a/Cargo.lock +++ b/Cargo.lock @@ -217,9 +217,9 @@ dependencies = [ [[package]] name = "async-compression" -version = "0.4.21" +version = "0.4.22" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "c0cf008e5e1a9e9e22a7d3c9a4992e21a350290069e36d8fb72304ed17e8f2d2" +checksum = "59a194f9d963d8099596278594b3107448656ba73831c9d8c783e613ce86da64" dependencies = [ "brotli", "flate2", @@ -357,9 +357,9 @@ checksum = "ace50bade8e6234aa140d9a2f552bbee1db4d353f69b8217bc503490fc1a9f26" [[package]] name = "axum" -version = "0.8.1" +version = "0.8.3" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "6d6fd624c75e18b3b4c6b9caf42b1afe24437daaee904069137d8bab077be8b8" +checksum = "de45108900e1f9b9242f7f2e254aa3e2c029c921c258fe9e6b4217eeebd54288" dependencies = [ "axum-core", "axum-macros", @@ -403,12 +403,12 @@ dependencies = [ [[package]] name = "axum-core" -version = "0.5.0" +version = "0.5.2" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "df1362f362fd16024ae199c1970ce98f9661bf5ef94b9808fee734bc3698b733" +checksum = "68464cd0412f486726fb3373129ef5d2993f90c34bc2bc1c1e9943b2f4fc7ca6" dependencies = [ "bytes", - "futures-util", + "futures-core", "http", "http-body", "http-body-util", @@ -423,9 +423,9 @@ dependencies = [ [[package]] name = "axum-extra" -version = "0.10.0" +version = "0.10.1" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "460fc6f625a1f7705c6cf62d0d070794e94668988b1c38111baeec177c715f7b" +checksum = "45bf463831f5131b7d3c756525b305d40f1185b688565648a92e1392ca35713d" dependencies = [ "axum", "axum-core", @@ -437,6 +437,7 @@ dependencies = [ "http-body-util", "mime", "pin-project-lite", + "rustversion", "serde", "serde_html_form", "serde_path_to_error", @@ -516,9 +517,9 @@ checksum = "72b3254f16251a8381aa12e40e3c4d2f0199f8c6508fbecb9d91f575e0fbb8c6" [[package]] name = "bigdecimal" -version = "0.4.7" +version = "0.4.8" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "7f31f3af01c5c65a07985c804d3366560e6fa7883d640a122819b14ec327482c" +checksum = "1a22f228ab7a1b23027ccc6c350b72868017af7ea8356fbdf19f8d991c690013" dependencies = [ "autocfg", "libm", @@ -822,9 +823,9 @@ dependencies = [ [[package]] name = "borsh" -version = "1.5.6" +version = "1.5.7" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "b2b74d67a0fc0af8e9823b79fd1c43a0900e5a8f0e0f4cc9210796bf3a820126" +checksum = "ad8646f98db542e39fc66e68a20b2144f6a732636df7c2354e74645faaa433ce" dependencies = [ "borsh-derive", "cfg_aliases", @@ -832,9 +833,9 @@ dependencies = [ [[package]] name = "borsh-derive" -version = "1.5.6" +version = "1.5.7" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "2d37ed1b2c9b78421218a0b4f6d8349132d6ec2cfeba1cfb0118b0a8e268df9e" +checksum = "fdd1d3c0c2f5833f22386f252fe8ed005c7f59fdcddeef025c01b4c3b9fd9ac3" dependencies = [ "once_cell", "proc-macro-crate", @@ -951,9 +952,9 @@ dependencies = [ [[package]] name = "cc" -version = "1.2.17" +version = "1.2.19" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "1fcb57c740ae1daf453ae85f16e37396f672b039e00d9d866e07ddb24e328e3a" +checksum = "8e3a13707ac958681c13b39b458c073d0d9bc8a22cb1b2f4c8e55eb72c13f362" dependencies = [ "jobserver", "libc", @@ -1044,9 +1045,9 @@ dependencies = [ [[package]] name = "clap" -version = "4.5.32" +version = "4.5.35" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "6088f3ae8c3608d19260cd7445411865a485688711b78b5be70d78cd96136f83" +checksum = "d8aa86934b44c19c50f87cc2790e19f54f7a67aedb64101c2e1a2e5ecfb73944" dependencies = [ "clap_builder", "clap_derive", @@ -1054,9 +1055,9 @@ dependencies = [ [[package]] name = "clap_builder" -version = "4.5.32" +version = "4.5.35" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "22a7ef7f676155edfb82daa97f99441f3ebf4a58d5e32f295a56259f1b6facc8" +checksum = "2414dbb2dd0695280da6ea9261e327479e9d37b0630f6b53ba2a11c60c679fd9" dependencies = [ "anstream", "anstyle", @@ -1216,9 +1217,9 @@ dependencies = [ [[package]] name = "crossbeam-channel" -version = "0.5.14" +version = "0.5.15" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "06ba6d68e24814cb8de6bb986db8222d3a027d15872cabc0d18817bc3c0e4471" +checksum = "82b8f8f868b36967f9606790d1903570de9ceaf870a7bf9fbbd3016d636a2cb2" dependencies = [ "crossbeam-utils", ] @@ -1285,9 +1286,9 @@ dependencies = [ [[package]] name = "darling" -version = "0.20.10" +version = "0.20.11" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "6f63b86c8a8826a49b8c21f08a2d07338eec8d900540f8630dc76284be802989" +checksum = "fc7f46116c46ff9ab3eb1597a45688b6715c6e628b5c133e288e709a29bcb4ee" dependencies = [ "darling_core", "darling_macro", @@ -1295,9 +1296,9 @@ dependencies = [ [[package]] name = "darling_core" -version = "0.20.10" +version = "0.20.11" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "95133861a8032aaea082871032f5815eb9e98cef03fa916ab4500513994df9e5" +checksum = "0d00b9596d185e565c2207a0b01f8bd1a135483d02d9b7b0a54b11da8d53412e" dependencies = [ "fnv", "ident_case", @@ -1309,9 +1310,9 @@ dependencies = [ [[package]] name = "darling_macro" -version = "0.20.10" +version = "0.20.11" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "d336a2a514f6ccccaa3e09b02d41d35330c07ddf03a62165fcec10bb561c7806" +checksum = "fc34b93ccb385b40dc71c6fceac4b2ad23662c7eeb248cf10d529b7e055b6ead" dependencies = [ "darling_core", "quote", @@ -1334,9 +1335,9 @@ dependencies = [ [[package]] name = "deranged" -version = "0.4.1" +version = "0.4.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "28cfac68e08048ae1883171632c2aef3ebc555621ae56fbccce1cbf22dd7f058" +checksum = "9c9e6a11ca8224451684bc0d7d5a7adbf8f2fd6887261a1cfc3c0432f9d4068e" dependencies = [ "powerfmt", "serde", @@ -1451,9 +1452,9 @@ checksum = "877a4ace8713b0bcf2a4e7eec82529c029f1d0619886d18145fea96c3ffe5c0f" [[package]] name = "errno" -version = "0.3.10" +version = "0.3.11" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "33d852cb9b869c2a9b3df2f71a3074817f01e1844f839a144f5fcef059a4eb5d" +checksum = "976dd42dc7e85965fe702eb8164f21f450704bdde31faefd6471dba214cb594e" dependencies = [ "libc", "windows-sys 0.59.0", @@ -1489,9 +1490,9 @@ dependencies = [ [[package]] name = "event-listener-strategy" -version = "0.5.3" +version = "0.5.4" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "3c3e4e0dd3673c1139bf041f3008816d9cf2946bbfac2945c09e523b8d7b05b2" +checksum = "8be9f3dfaaffdae2972880079a491a1a8bb7cbed0b8dd7a347f668b4150a3b93" dependencies = [ "event-listener 5.4.0", "pin-project-lite", @@ -1545,9 +1546,9 @@ dependencies = [ [[package]] name = "flate2" -version = "1.1.0" +version = "1.1.1" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "11faaf5a5236997af9848be0bef4db95824b1d534ebc64d0f0c6cf3e67bd38dc" +checksum = "7ced92e76e966ca2fd84c8f7aa01a4aea65b0eb6648d72f7c8f3e2764a67fece" dependencies = [ "crc32fast", "libz-sys", @@ -1612,9 +1613,9 @@ dependencies = [ [[package]] name = "fragile" -version = "2.0.0" +version = "2.0.1" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "6c2141d6d6c8512188a7891b4b01590a45f6dac67afb4f255c4124dbb86d4eaa" +checksum = "28dd6caf6059519a65843af8fe2a3ae298b14b80179855aeb4adc2c1934ee619" [[package]] name = "frunk" @@ -1865,7 +1866,7 @@ dependencies = [ "futures-core", "futures-sink", "http", - "indexmap 2.8.0", + "indexmap 2.9.0", "slab", "tokio", "tokio-util", @@ -1874,9 +1875,9 @@ dependencies = [ [[package]] name = "half" -version = "2.5.0" +version = "2.6.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "7db2ff139bba50379da6aa0766b52fdcb62cb5b263009b09ed58ba604e14bbd1" +checksum = "459196ed295495a68f7d7fe1d84f6c4b7ff0e21fe3017b2f283c6fac3ad803c9" dependencies = [ "cfg-if", "crunchy", @@ -2073,9 +2074,9 @@ dependencies = [ [[package]] name = "hyper-util" -version = "0.1.10" +version = "0.1.11" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "df2dcfbe0677734ab2f3ffa7fa7bfd4706bfdc1ef393f2ee30184aed67e631b4" +checksum = "497bbc33a26fdd4af9ed9c70d63f61cf56a938375fbb32df34db9b1cd6d643f2" dependencies = [ "bytes", "futures-channel", @@ -2083,6 +2084,7 @@ dependencies = [ "http", "http-body", "hyper", + "libc", "pin-project-lite", "socket2", "tokio", @@ -2107,9 +2109,9 @@ dependencies = [ [[package]] name = "iana-time-zone" -version = "0.1.62" +version = "0.1.63" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "b2fd658b06e56721792c5df4475705b6cda790e9298d19d2f8af083457bcd127" +checksum = "b0c919e5debc312ad217002b8048a17b7d83f80703865bbfcfebb0458b0b27d8" dependencies = [ "android_system_properties", "core-foundation-sys", @@ -2170,9 +2172,9 @@ dependencies = [ [[package]] name = "icu_locid_transform_data" -version = "1.5.0" +version = "1.5.1" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "fdc8ff3388f852bede6b579ad4e978ab004f139284d7b28715f773507b946f6e" +checksum = "7515e6d781098bf9f7205ab3fc7e9709d34554ae0b21ddbcb5febfa4bc7df11d" [[package]] name = "icu_normalizer" @@ -2194,9 +2196,9 @@ dependencies = [ [[package]] name = "icu_normalizer_data" -version = "1.5.0" +version = "1.5.1" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "f8cafbf7aa791e9b22bec55a167906f9e1215fd475cd22adfcf660e03e989516" +checksum = "c5e8338228bdc8ab83303f16b797e177953730f601a96c25d10cb3ab0daa0cb7" [[package]] name = "icu_properties" @@ -2215,9 +2217,9 @@ dependencies = [ [[package]] name = "icu_properties_data" -version = "1.5.0" +version = "1.5.1" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "67a8effbc3dd3e4ba1afa8ad918d5684b8868b3b26500753effea8d2eed19569" +checksum = "85fb8799753b75aee8d2a21d7c14d9f38921b54b3dbda10f5a3c7a7b82dba5e2" [[package]] name = "icu_provider" @@ -2287,9 +2289,9 @@ dependencies = [ [[package]] name = "indexmap" -version = "2.8.0" +version = "2.9.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "3954d50fe15b02142bf25d3b8bdadb634ec3948f103d04ffe3031bc8fe9d7058" +checksum = "cea70ddb795996207ad57735b50c5982d8844f38ba9ee5f1aedcfb708a2aa11e" dependencies = [ "equivalent", "hashbrown 0.15.2", @@ -2375,10 +2377,11 @@ checksum = "4a5f13b858c8d314ee3e8f639011f7ccefe71f97f96e50151fb991f267928e2c" [[package]] name = "jobserver" -version = "0.1.32" +version = "0.1.33" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "48d1dbcbbeb6a7fec7e059840aa538bd62aaccf972c7346c4d9d2059312853d0" +checksum = "38f262f097c174adebe41eb73d66ae9c06b2844fb0da69969647bbddd9b0538a" dependencies = [ + "getrandom 0.3.2", "libc", ] @@ -2437,7 +2440,7 @@ checksum = "c0ff37bd590ca25063e35af745c343cb7a0271906fb7b37e4813e8f79f00268d" dependencies = [ "bitflags 2.9.0", "libc", - "redox_syscall 0.5.10", + "redox_syscall 0.5.11", ] [[package]] @@ -2470,9 +2473,9 @@ checksum = "d26c52dbd32dccf2d10cac7725f8eae5296885fb5703b261f7d0a0739ec807ab" [[package]] name = "linux-raw-sys" -version = "0.9.3" +version = "0.9.4" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "fe7db12097d22ec582439daf8618b8fdd1a7bef6270e9af3b1ebcd30893cf413" +checksum = "cd945864f07fe9f5371a27ad7b52a172b4b499999f1d97574c9fa68373937e12" [[package]] name = "litemap" @@ -2504,9 +2507,9 @@ dependencies = [ [[package]] name = "log" -version = "0.4.26" +version = "0.4.27" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "30bde2b3dc3671ae49d8e2e9f044c7c005836e7a023ee57cffa25ab82764bb9e" +checksum = "13dc2df351e3202783a1fe0d44375f7295ffb4049267b0f3018346dc122a1d94" dependencies = [ "value-bag", ] @@ -2577,9 +2580,9 @@ checksum = "68354c5c6bd36d73ff3feceb05efa59b6acb7626617f4962be322a825e61f79a" [[package]] name = "miniz_oxide" -version = "0.8.5" +version = "0.8.8" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "8e3e04debbb59698c15bacbb6d93584a8c0ca9cc3213cb423d31f760d8843ce5" +checksum = "3be647b768db090acb35d5ec5db2b0e1f1de11133ca123b9eacf5137868f892a" dependencies = [ "adler2", ] @@ -2834,9 +2837,9 @@ dependencies = [ [[package]] name = "once_cell" -version = "1.21.1" +version = "1.21.3" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "d75b0bedcc4fe52caa0e03d9f1151a323e4aa5e2d78ba3580400cd3c9e2bc4bc" +checksum = "42f5e15c9953c5e4ccceeb2e7382a716482c34515315f7b03532b8b4e8393d2d" [[package]] name = "oorandom" @@ -2846,9 +2849,9 @@ checksum = "d6790f58c7ff633d8771f42965289203411a5e5c68388703c06e14f24770b41e" [[package]] name = "openssl" -version = "0.10.71" +version = "0.10.72" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "5e14130c6a98cd258fdcb0fb6d744152343ff729cbfcb28c656a9d12b999fbcd" +checksum = "fedfea7d58a1f73118430a55da6a286e7b044961736ce96a16a17068ea25e5da" dependencies = [ "bitflags 2.9.0", "cfg-if", @@ -2878,9 +2881,9 @@ checksum = "d05e27ee213611ffe7d6348b942e8f942b37114c00cc03cec254295a4a17852e" [[package]] name = "openssl-sys" -version = "0.9.106" +version = "0.9.107" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "8bb61ea9811cc39e3c2069f40b8b8e2e70d8569b361f879786cc7ed48b777cdd" +checksum = "8288979acd84749c744a9014b4382d42b8f7b2592847b5afb2ed29e5d16ede07" dependencies = [ "cc", "libc", @@ -2924,7 +2927,7 @@ checksum = "1e401f977ab385c9e4e3ab30627d6f26d00e2c73eef317493c4ec6d468726cf8" dependencies = [ "cfg-if", "libc", - "redox_syscall 0.5.10", + "redox_syscall 0.5.11", "smallvec", "windows-targets 0.52.6", ] @@ -3109,6 +3112,15 @@ version = "1.11.0" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "350e9b48cbc6b0e028b0473b114454c6316e57336ee184ceab6e53f72c178b3e" +[[package]] +name = "portable-atomic-util" +version = "0.2.4" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "d8a2f0d8d040d7848a709caf78912debcc3f33ee4b3cac47d73d1e1069e83507" +dependencies = [ + "portable-atomic", +] + [[package]] name = "powerfmt" version = "0.2.0" @@ -3388,9 +3400,9 @@ dependencies = [ [[package]] name = "redox_syscall" -version = "0.5.10" +version = "0.5.11" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "0b8c0c260b63a8219631167be35e6a988e9554dbd323f8bd08439c8ed1302bd1" +checksum = "d2f103c6d277498fbceb16e84d317e2a400f160f46904d5f5410848c829511a3" dependencies = [ "bitflags 2.9.0", ] @@ -3499,12 +3511,13 @@ dependencies = [ [[package]] name = "ringbuf" -version = "0.4.7" +version = "0.4.8" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "726bb493fe9cac765e8f96a144c3a8396bdf766dedad22e504b70b908dcbceb4" +checksum = "fe47b720588c8702e34b5979cb3271a8b1842c7cb6f57408efa70c779363488c" dependencies = [ "crossbeam-utils", "portable-atomic", + "portable-atomic-util", ] [[package]] @@ -3632,22 +3645,22 @@ dependencies = [ [[package]] name = "rustix" -version = "1.0.3" +version = "1.0.5" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "e56a18552996ac8d29ecc3b190b4fdbb2d91ca4ec396de7bbffaf43f3d637e96" +checksum = "d97817398dd4bb2e6da002002db259209759911da105da92bec29ccb12cf58bf" dependencies = [ "bitflags 2.9.0", "errno", "libc", - "linux-raw-sys 0.9.3", + "linux-raw-sys 0.9.4", "windows-sys 0.59.0", ] [[package]] name = "rustls" -version = "0.23.25" +version = "0.23.26" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "822ee9188ac4ec04a2f0531e55d035fb2de73f18b41a63c70c2712503b6fb13c" +checksum = "df51b5869f3a441595eac5e8ff14d486ff285f7b8c0df8770e49c3b56351f0f0" dependencies = [ "once_cell", "ring", @@ -3686,9 +3699,9 @@ checksum = "917ce264624a4b4db1c364dcc35bfca9ded014d0a958cd47ad3e960e988ea51c" [[package]] name = "rustls-webpki" -version = "0.103.0" +version = "0.103.1" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "0aa4eeac2588ffff23e9d7a7e9b3f971c5fb5b7ebc9452745e0c232c64f83b2f" +checksum = "fef8b8769aaccf73098557a87cd1816b4f9c7c16811c9c77142aa695c16f2c03" dependencies = [ "ring", "rustls-pki-types", @@ -3840,7 +3853,7 @@ source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "9d2de91cf02bbc07cde38891769ccd5d4f073d22a40683aa4bc7a95781aaa2c4" dependencies = [ "form_urlencoded", - "indexmap 2.8.0", + "indexmap 2.9.0", "itoa", "ryu", "serde", @@ -3852,7 +3865,7 @@ version = "1.0.140" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "20068b6e96dc6c9bd23e01df8827e6c7e1f2fddd43c21810382803c136b99373" dependencies = [ - "indexmap 2.8.0", + "indexmap 2.9.0", "itoa", "memchr", "ryu", @@ -3911,7 +3924,7 @@ dependencies = [ "chrono", "hex", "indexmap 1.9.3", - "indexmap 2.8.0", + "indexmap 2.9.0", "serde", "serde_derive", "serde_json", @@ -4000,15 +4013,15 @@ dependencies = [ [[package]] name = "smallvec" -version = "1.14.0" +version = "1.15.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "7fcf8323ef1faaee30a44a340193b1ac6814fd9b7b4e88e9d4519a3e4abe1cfd" +checksum = "8917285742e9f3e1683f0a9c4e6b57960b7314d0b08d30d1ecd426713ee2eee9" [[package]] name = "socket2" -version = "0.5.8" +version = "0.5.9" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "c970269d99b64e60ec3bd6ad27270092a5394c4e309314b18ae3fe575695fbe8" +checksum = "4f5fd57c80058a56cf5c777ab8a126398ece8e442983605d280a44ce79d0edef" dependencies = [ "libc", "windows-sys 0.52.0", @@ -4187,7 +4200,7 @@ dependencies = [ "fastrand", "getrandom 0.3.2", "once_cell", - "rustix 1.0.3", + "rustix 1.0.5", "windows-sys 0.59.0", ] @@ -4206,7 +4219,7 @@ version = "0.4.2" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "45c6481c4829e4cc63825e62c49186a34538b7b2750b73b266581ffb612fb5ed" dependencies = [ - "rustix 1.0.3", + "rustix 1.0.5", "windows-sys 0.59.0", ] @@ -4373,9 +4386,9 @@ checksum = "1f3ccbac311fea05f86f61904b462b55fb3df8837a366dfc601a0161d0532f20" [[package]] name = "tokio" -version = "1.44.1" +version = "1.44.2" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "f382da615b842244d4b8738c82ed1275e6c5dd90c459a30941cd07080b06c91a" +checksum = "e6b88822cbe49de4185e3a4cbf8321dd487cf5fe0c5c65695fef6346371e9c48" dependencies = [ "backtrace", "bytes", @@ -4485,7 +4498,7 @@ version = "0.22.24" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "17b4795ff5edd201c7cd6dca065ae59972ce77d1b80fa0a84d94950ece7d1474" dependencies = [ - "indexmap 2.8.0", + "indexmap 2.9.0", "serde", "serde_spanned", "toml_datetime", @@ -5096,9 +5109,9 @@ checksum = "ba73ea9cf16a25df0c8caa16c51acb937d5712a8429db78a3ee29d5dcacd3a65" [[package]] name = "value-bag" -version = "1.10.0" +version = "1.11.1" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "3ef4c4aa54d5d05a279399bfa921ec387b7aba77caf7a682ae8d86785b8fdad2" +checksum = "943ce29a8a743eb10d6082545d861b24f9d1b160b7d741e0f2cdf726bec909c5" [[package]] name = "vcpkg" @@ -5260,11 +5273,37 @@ checksum = "712e227841d057c1ee1cd2fb22fa7e5a5461ae8e48fa2ca79ec42cfc1931183f" [[package]] name = "windows-core" -version = "0.52.0" +version = "0.61.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "33ab640c8d7e35bf8ba19b884ba838ceb4fba93a4e8c65a9059d08afcfc683d9" +checksum = "4763c1de310c86d75a878046489e2e5ba02c649d185f21c67d4cf8a56d098980" dependencies = [ - "windows-targets 0.52.6", + "windows-implement", + "windows-interface", + "windows-link", + "windows-result", + "windows-strings 0.4.0", +] + +[[package]] +name = "windows-implement" +version = "0.60.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "a47fddd13af08290e67f4acabf4b459f647552718f683a7b415d290ac744a836" +dependencies = [ + "proc-macro2", + "quote", + "syn 2.0.100", +] + +[[package]] +name = "windows-interface" +version = "0.59.1" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "bd9211b69f8dcdfa817bfd14bf1c97c9188afa36f4750130fcdf3f400eca9fa8" +dependencies = [ + "proc-macro2", + "quote", + "syn 2.0.100", ] [[package]] @@ -5280,7 +5319,7 @@ source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "4286ad90ddb45071efd1a66dfa43eb02dd0dfbae1545ad6cc3c51cf34d7e8ba3" dependencies = [ "windows-result", - "windows-strings", + "windows-strings 0.3.1", "windows-targets 0.53.0", ] @@ -5302,6 +5341,15 @@ dependencies = [ "windows-link", ] +[[package]] +name = "windows-strings" +version = "0.4.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "7a2ba9642430ee452d5a7aa78d72907ebe8cfda358e8cb7918a2050581322f97" +dependencies = [ + "windows-link", +] + [[package]] name = "windows-sys" version = "0.48.0" @@ -5516,9 +5564,9 @@ checksum = "271414315aff87387382ec3d271b52d7ae78726f5d44ac98b4f4030c91880486" [[package]] name = "winnow" -version = "0.7.4" +version = "0.7.6" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "0e97b544156e9bebe1a0ffbc03484fc1ffe3100cbce3ffb17eac35f7cdd7ab36" +checksum = "63d3fcd9bba44b03821e7d699eeee959f3126dcc4aa8e4ae18ec617c2a5cea10" dependencies = [ "memchr", ] @@ -5560,7 +5608,7 @@ source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "0d65cbf2f12c15564212d48f4e3dfb87923d25d611f2aed18f4cb23f0413d89e" dependencies = [ "libc", - "rustix 1.0.3", + "rustix 1.0.5", ] [[package]] From 4205c7184d32c3bf953101d8bad335ad12025a55 Mon Sep 17 00:00:00 2001 From: Jose Celano Date: Fri, 11 Apr 2025 15:58:56 +0100 Subject: [PATCH 447/802] fix: [#1441] do not reveal API token in logs --- src/container.rs | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/src/container.rs b/src/container.rs index 7742d8e40..9df9c9611 100644 --- a/src/container.rs +++ b/src/container.rs @@ -38,7 +38,7 @@ pub struct AppContainer { } impl AppContainer { - #[instrument(skip())] + #[instrument(skip(configuration))] pub fn initialize(configuration: &Configuration) -> AppContainer { // Configuration From 15a34c7fa3aa3d2cf28d8e42c3529bd9135a5b92 Mon Sep 17 00:00:00 2001 From: Jose Celano Date: Fri, 11 Apr 2025 16:15:29 +0100 Subject: [PATCH 448/802] feat: [#1438] merge UDP tracker core metrics Extract `request_kind` label. Putting the request type in the metric name does not make sense. The purpose of the refactor to build the new extendable-labeled metrics was to start using labels to group metrics instead of changes in metric's names. From this: ``` udp_tracker_core_announce_requests_received_total{server_binding_ip="0.0.0.0",server_binding_port="6969",server_binding_protocol="udp"} 619656 udp_tracker_core_connect_requests_received_total{server_binding_ip="0.0.0.0",server_binding_port="6969",server_binding_protocol="udp"} 308493 udp_tracker_core_scrape_requests_received_total{server_binding_ip="0.0.0.0",server_binding_port="6969",server_binding_protocol="udp"} 32487 ``` To this: ``` udp_tracker_core_requests_received_total{request_kind="announce",server_binding_ip="0.0.0.0",server_binding_port="6969",server_binding_protocol="udp"} 619656 udp_tracker_core_requests_received_total{request_kind="connect",server_binding_ip="0.0.0.0",server_binding_port="6969",server_binding_protocol="udp"} 308493 udp_tracker_core_requests_received_total{request_kind="scrape",server_binding_ip="0.0.0.0",server_binding_port="6969",server_binding_protocol="udp"} 32487 ``` --- .../src/statistics/event/handler.rs | 30 +++++++++---------- .../udp-tracker-core/src/statistics/mod.rs | 18 +++-------- 2 files changed, 18 insertions(+), 30 deletions(-) diff --git a/packages/udp-tracker-core/src/statistics/event/handler.rs b/packages/udp-tracker-core/src/statistics/event/handler.rs index a910d9373..59c382755 100644 --- a/packages/udp-tracker-core/src/statistics/event/handler.rs +++ b/packages/udp-tracker-core/src/statistics/event/handler.rs @@ -1,9 +1,10 @@ -use torrust_tracker_metrics::label::LabelSet; +use torrust_tracker_metrics::label::{LabelName, LabelSet, LabelValue}; use torrust_tracker_metrics::metric::MetricName; use torrust_tracker_primitives::DurationSinceUnixEpoch; use crate::event::Event; use crate::statistics::repository::Repository; +use crate::statistics::UDP_TRACKER_CORE_REQUESTS_RECEIVED_TOTAL; /// # Panics /// @@ -24,12 +25,11 @@ pub async fn handle_event(event: Event, stats_repository: &Repository, now: Dura // Extendable metrics + let mut label_set = LabelSet::from(context); + label_set.upsert(LabelName::new("request_kind"), LabelValue::new("connect")); + stats_repository - .increase_counter( - &MetricName::new("udp_tracker_core_connect_requests_received_total"), - &LabelSet::from(context), - now, - ) + .increase_counter(&MetricName::new(UDP_TRACKER_CORE_REQUESTS_RECEIVED_TOTAL), &label_set, now) .await; } Event::UdpAnnounce { context } => { @@ -46,12 +46,11 @@ pub async fn handle_event(event: Event, stats_repository: &Repository, now: Dura // Extendable metrics + let mut label_set = LabelSet::from(context); + label_set.upsert(LabelName::new("request_kind"), LabelValue::new("announce")); + stats_repository - .increase_counter( - &MetricName::new("udp_tracker_core_announce_requests_received_total"), - &LabelSet::from(context), - now, - ) + .increase_counter(&MetricName::new(UDP_TRACKER_CORE_REQUESTS_RECEIVED_TOTAL), &label_set, now) .await; } Event::UdpScrape { context } => { @@ -68,12 +67,11 @@ pub async fn handle_event(event: Event, stats_repository: &Repository, now: Dura // Extendable metrics + let mut label_set = LabelSet::from(context); + label_set.upsert(LabelName::new("request_kind"), LabelValue::new("scrape")); + stats_repository - .increase_counter( - &MetricName::new("udp_tracker_core_scrape_requests_received_total"), - &LabelSet::from(context), - now, - ) + .increase_counter(&MetricName::new(UDP_TRACKER_CORE_REQUESTS_RECEIVED_TOTAL), &label_set, now) .await; } } diff --git a/packages/udp-tracker-core/src/statistics/mod.rs b/packages/udp-tracker-core/src/statistics/mod.rs index cdba76df3..bc4d8d836 100644 --- a/packages/udp-tracker-core/src/statistics/mod.rs +++ b/packages/udp-tracker-core/src/statistics/mod.rs @@ -10,26 +10,16 @@ use torrust_tracker_metrics::metric::description::MetricDescription; use torrust_tracker_metrics::metric::MetricName; use torrust_tracker_metrics::unit::Unit; +const UDP_TRACKER_CORE_REQUESTS_RECEIVED_TOTAL: &str = "udp_tracker_core_requests_received_total"; + #[must_use] pub fn describe_metrics() -> Metrics { let mut metrics = Metrics::default(); metrics.metric_collection.describe_counter( - &MetricName::new("udp_tracker_core_connect_requests_received_total"), - Some(Unit::Count), - Some(MetricDescription::new("Total number of UDP connect requests received")), - ); - - metrics.metric_collection.describe_counter( - &MetricName::new("udp_tracker_core_announce_requests_received_total"), - Some(Unit::Count), - Some(MetricDescription::new("Total number of UDP announce requests received")), - ); - - metrics.metric_collection.describe_counter( - &MetricName::new("udp_tracker_core_scrape_requests_received_total"), + &MetricName::new(UDP_TRACKER_CORE_REQUESTS_RECEIVED_TOTAL), Some(Unit::Count), - Some(MetricDescription::new("Total number of UDP scrape requests received")), + Some(MetricDescription::new("Total number of UDP requests received")), ); metrics From 44c700d3bc8c0f8e5457b75de12fdf14cc80fb58 Mon Sep 17 00:00:00 2001 From: Jose Celano Date: Fri, 11 Apr 2025 16:27:40 +0100 Subject: [PATCH 449/802] feat: [#1438] merge UDP tracker core metrics Extract `request_kind` label. Putting the request type in the metric name does not make sense. The purpose of the refactor to build the new extendable-labeled metrics was to start using labels to group metrics instead of changes in metric's names. From this: ``` http_tracker_core_announce_requests_received_total{server_binding_ip="0.0.0.0",server_binding_port="7070",server_binding_protocol="http"} 1 http_tracker_core_scrape_requests_received_total{server_binding_ip="0.0.0.0",server_binding_port="7070",server_binding_protocol="http"} 1 ``` To this: ``` http_tracker_core_requests_received_total{request_kind="announce",server_binding_ip="0.0.0.0",server_binding_port="7070",server_binding_protocol="http"} 1 http_tracker_core_requests_received_total{request_kind="scrape", server_binding_ip="0.0.0.0",server_binding_port="7070",server_binding_protocol="http"} 1 ``` --- .../src/statistics/event/handler.rs | 21 +++++++++---------- .../http-tracker-core/src/statistics/mod.rs | 12 ++++------- 2 files changed, 14 insertions(+), 19 deletions(-) diff --git a/packages/http-tracker-core/src/statistics/event/handler.rs b/packages/http-tracker-core/src/statistics/event/handler.rs index 046cb7775..0baec1cd9 100644 --- a/packages/http-tracker-core/src/statistics/event/handler.rs +++ b/packages/http-tracker-core/src/statistics/event/handler.rs @@ -1,11 +1,12 @@ use std::net::IpAddr; -use torrust_tracker_metrics::label::LabelSet; +use torrust_tracker_metrics::label::{LabelName, LabelSet, LabelValue}; use torrust_tracker_metrics::metric::MetricName; use torrust_tracker_primitives::DurationSinceUnixEpoch; use crate::event::Event; use crate::statistics::repository::Repository; +use crate::statistics::HTTP_TRACKER_CORE_REQUESTS_RECEIVED_TOTAL; /// # Panics /// @@ -27,12 +28,11 @@ pub async fn handle_event(event: Event, stats_repository: &Repository, now: Dura // Extendable metrics + let mut label_set = LabelSet::from(connection); + label_set.upsert(LabelName::new("request_kind"), LabelValue::new("announce")); + stats_repository - .increase_counter( - &MetricName::new("http_tracker_core_announce_requests_received_total"), - &LabelSet::from(connection), - now, - ) + .increase_counter(&MetricName::new(HTTP_TRACKER_CORE_REQUESTS_RECEIVED_TOTAL), &label_set, now) .await; } Event::TcpScrape { connection } => { @@ -49,12 +49,11 @@ pub async fn handle_event(event: Event, stats_repository: &Repository, now: Dura // Extendable metrics + let mut label_set = LabelSet::from(connection); + label_set.upsert(LabelName::new("request_kind"), LabelValue::new("scrape")); + stats_repository - .increase_counter( - &MetricName::new("http_tracker_core_scrape_requests_received_total"), - &LabelSet::from(connection), - now, - ) + .increase_counter(&MetricName::new(HTTP_TRACKER_CORE_REQUESTS_RECEIVED_TOTAL), &label_set, now) .await; } } diff --git a/packages/http-tracker-core/src/statistics/mod.rs b/packages/http-tracker-core/src/statistics/mod.rs index 8148df3c1..026c435af 100644 --- a/packages/http-tracker-core/src/statistics/mod.rs +++ b/packages/http-tracker-core/src/statistics/mod.rs @@ -10,20 +10,16 @@ use torrust_tracker_metrics::metric::description::MetricDescription; use torrust_tracker_metrics::metric::MetricName; use torrust_tracker_metrics::unit::Unit; +const HTTP_TRACKER_CORE_REQUESTS_RECEIVED_TOTAL: &str = "http_tracker_core_requests_received_total"; + #[must_use] pub fn describe_metrics() -> Metrics { let mut metrics = Metrics::default(); metrics.metric_collection.describe_counter( - &MetricName::new("http_tracker_core_announce_requests_received_total"), - Some(Unit::Count), - Some(MetricDescription::new("Total number of HTTP announce requests received")), - ); - - metrics.metric_collection.describe_counter( - &MetricName::new("http_tracker_core_scrape_requests_received_total"), + &MetricName::new(HTTP_TRACKER_CORE_REQUESTS_RECEIVED_TOTAL), Some(Unit::Count), - Some(MetricDescription::new("Total number of HTTP scrape requests received")), + Some(MetricDescription::new("Total number of HTTP requests received")), ); metrics From 5f57f7889f60dc862b3bcdc05e8c88255643a82c Mon Sep 17 00:00:00 2001 From: Jose Celano Date: Fri, 11 Apr 2025 16:53:22 +0100 Subject: [PATCH 450/802] feat: [#1438] merge UDP tracker server metrics - Rename label `kind` to `request_kind`. It's more explicit. There could be other "kind" of things in the future. - Remove the empty label `kind=""` when the response is an error. - Fix result label for error response. - Merge performace metrics in one and convert request kind into a label: ``` udp_tracker_server_performance_avg_connect_processing_time_ns{request_kind="connect"} udp_tracker_server_performance_avg_connect_processing_time_ns{request_kind="announce"} udp_tracker_server_performance_avg_connect_processing_time_ns{request_kind="scrape"} ``` --- .../src/statistics/event/handler.rs | 56 ++++++++++++------- .../udp-tracker-server/src/statistics/mod.rs | 38 +++++-------- 2 files changed, 51 insertions(+), 43 deletions(-) diff --git a/packages/udp-tracker-server/src/statistics/event/handler.rs b/packages/udp-tracker-server/src/statistics/event/handler.rs index 91f5cef0c..4c10576c0 100644 --- a/packages/udp-tracker-server/src/statistics/event/handler.rs +++ b/packages/udp-tracker-server/src/statistics/event/handler.rs @@ -4,6 +4,12 @@ use torrust_tracker_primitives::DurationSinceUnixEpoch; use crate::event::{Event, UdpRequestKind, UdpResponseKind}; use crate::statistics::repository::Repository; +use crate::statistics::{ + UDP_TRACKER_SERVER_ERRORS_TOTAL, UDP_TRACKER_SERVER_PERFORMANCE_AVG_PROCESSING_TIME_NS, + UDP_TRACKER_SERVER_REQUESTS_ABORTED_TOTAL, UDP_TRACKER_SERVER_REQUESTS_ACCEPTED_TOTAL, + UDP_TRACKER_SERVER_REQUESTS_BANNED_TOTAL, UDP_TRACKER_SERVER_REQUESTS_RECEIVED_TOTAL, + UDP_TRACKER_SERVER_RESPONSES_SENT_TOTAL, +}; /// # Panics /// @@ -19,7 +25,7 @@ pub async fn handle_event(event: Event, stats_repository: &Repository, now: Dura // Extendable metrics stats_repository .increase_counter( - &MetricName::new("udp_tracker_server_requests_aborted_total"), + &MetricName::new(UDP_TRACKER_SERVER_REQUESTS_ABORTED_TOTAL), &LabelSet::from(context), now, ) @@ -32,7 +38,7 @@ pub async fn handle_event(event: Event, stats_repository: &Repository, now: Dura // Extendable metrics stats_repository .increase_counter( - &MetricName::new("udp_tracker_server_requests_banned_total"), + &MetricName::new(UDP_TRACKER_SERVER_REQUESTS_BANNED_TOTAL), &LabelSet::from(context), now, ) @@ -52,7 +58,7 @@ pub async fn handle_event(event: Event, stats_repository: &Repository, now: Dura // Extendable metrics stats_repository .increase_counter( - &MetricName::new("udp_tracker_server_requests_received_total"), + &MetricName::new(UDP_TRACKER_SERVER_REQUESTS_RECEIVED_TOTAL), &LabelSet::from(context), now, ) @@ -94,11 +100,7 @@ pub async fn handle_event(event: Event, stats_repository: &Repository, now: Dura label_set.upsert(LabelName::new("kind"), LabelValue::new(&kind.to_string())); stats_repository - .increase_counter( - &MetricName::new("udp_tracker_server_requests_accepted_total"), - &label_set, - now, - ) + .increase_counter(&MetricName::new(UDP_TRACKER_SERVER_REQUESTS_ACCEPTED_TOTAL), &label_set, now) .await; } Event::UdpResponseSent { @@ -124,10 +126,14 @@ pub async fn handle_event(event: Event, stats_repository: &Repository, now: Dura .await; // Extendable metrics + + let mut label_set = LabelSet::from(context.clone()); + label_set.upsert(LabelName::new("request_kind"), LabelValue::new(&req_kind.to_string())); + stats_repository .set_gauge( - &MetricName::new("udp_tracker_server_performance_avg_connect_processing_time_ns"), - &LabelSet::from(context.clone()), + &MetricName::new(UDP_TRACKER_SERVER_PERFORMANCE_AVG_PROCESSING_TIME_NS), + &label_set, new_avg, now, ) @@ -141,16 +147,20 @@ pub async fn handle_event(event: Event, stats_repository: &Repository, now: Dura .await; // Extendable metrics + + let mut label_set = LabelSet::from(context.clone()); + label_set.upsert(LabelName::new("request_kind"), LabelValue::new(&req_kind.to_string())); + stats_repository .set_gauge( - &MetricName::new("udp_tracker_server_performance_avg_announce_processing_time_ns"), - &LabelSet::from(context.clone()), + &MetricName::new(UDP_TRACKER_SERVER_PERFORMANCE_AVG_PROCESSING_TIME_NS), + &label_set, new_avg, now, ) .await; - (LabelValue::new("ok"), LabelValue::new(&UdpRequestKind::Connect.to_string())) + (LabelValue::new("ok"), LabelValue::new(&UdpRequestKind::Announce.to_string())) } UdpRequestKind::Scrape => { let new_avg = stats_repository @@ -158,30 +168,36 @@ pub async fn handle_event(event: Event, stats_repository: &Repository, now: Dura .await; // Extendable metrics + + let mut label_set = LabelSet::from(context.clone()); + label_set.upsert(LabelName::new("request_kind"), LabelValue::new(&req_kind.to_string())); + stats_repository .set_gauge( - &MetricName::new("udp_tracker_server_performance_avg_scrape_processing_time_ns"), - &LabelSet::from(context.clone()), + &MetricName::new(UDP_TRACKER_SERVER_PERFORMANCE_AVG_PROCESSING_TIME_NS), + &label_set, new_avg, now, ) .await; - (LabelValue::new("ok"), LabelValue::new(&UdpRequestKind::Connect.to_string())) + (LabelValue::new("ok"), LabelValue::new(&UdpRequestKind::Scrape.to_string())) } }, - UdpResponseKind::Error { opt_req_kind: _ } => (LabelValue::new("ok"), LabelValue::ignore()), + UdpResponseKind::Error { opt_req_kind: _ } => (LabelValue::new("error"), LabelValue::ignore()), }; // Extendable metrics let mut label_set = LabelSet::from(context); + if result_label_value == LabelValue::new("ok") { + label_set.upsert(LabelName::new("request_kind"), kind_label_value); + } label_set.upsert(LabelName::new("result"), result_label_value); - label_set.upsert(LabelName::new("kind"), kind_label_value); stats_repository - .increase_counter(&MetricName::new("udp_tracker_server_responses_sent_total"), &label_set, now) + .increase_counter(&MetricName::new(UDP_TRACKER_SERVER_RESPONSES_SENT_TOTAL), &label_set, now) .await; } Event::UdpError { context } => { @@ -198,7 +214,7 @@ pub async fn handle_event(event: Event, stats_repository: &Repository, now: Dura // Extendable metrics stats_repository .increase_counter( - &MetricName::new("udp_tracker_server_errors_total"), + &MetricName::new(UDP_TRACKER_SERVER_ERRORS_TOTAL), &LabelSet::from(context), now, ) diff --git a/packages/udp-tracker-server/src/statistics/mod.rs b/packages/udp-tracker-server/src/statistics/mod.rs index 535031483..523cd4bac 100644 --- a/packages/udp-tracker-server/src/statistics/mod.rs +++ b/packages/udp-tracker-server/src/statistics/mod.rs @@ -10,69 +10,61 @@ use torrust_tracker_metrics::metric::description::MetricDescription; use torrust_tracker_metrics::metric::MetricName; use torrust_tracker_metrics::unit::Unit; +const UDP_TRACKER_SERVER_REQUESTS_ABORTED_TOTAL: &str = "udp_tracker_server_requests_aborted_total"; +const UDP_TRACKER_SERVER_REQUESTS_BANNED_TOTAL: &str = "udp_tracker_server_requests_banned_total"; +const UDP_TRACKER_SERVER_REQUESTS_RECEIVED_TOTAL: &str = "udp_tracker_server_requests_received_total"; +const UDP_TRACKER_SERVER_REQUESTS_ACCEPTED_TOTAL: &str = "udp_tracker_server_requests_accepted_total"; +const UDP_TRACKER_SERVER_RESPONSES_SENT_TOTAL: &str = "udp_tracker_server_responses_sent_total"; +const UDP_TRACKER_SERVER_ERRORS_TOTAL: &str = "udp_tracker_server_errors_total"; +const UDP_TRACKER_SERVER_PERFORMANCE_AVG_PROCESSING_TIME_NS: &str = "udp_tracker_server_performance_avg_processing_time_ns"; + #[must_use] pub fn describe_metrics() -> Metrics { let mut metrics = Metrics::default(); metrics.metric_collection.describe_counter( - &MetricName::new("udp_tracker_server_requests_aborted_total"), + &MetricName::new(UDP_TRACKER_SERVER_REQUESTS_ABORTED_TOTAL), Some(Unit::Count), Some(MetricDescription::new("Total number of UDP requests aborted")), ); metrics.metric_collection.describe_counter( - &MetricName::new("udp_tracker_server_requests_banned_total"), + &MetricName::new(UDP_TRACKER_SERVER_REQUESTS_BANNED_TOTAL), Some(Unit::Count), Some(MetricDescription::new("Total number of UDP requests banned")), ); metrics.metric_collection.describe_counter( - &MetricName::new("udp_tracker_server_requests_received_total"), + &MetricName::new(UDP_TRACKER_SERVER_REQUESTS_RECEIVED_TOTAL), Some(Unit::Count), Some(MetricDescription::new("Total number of UDP requests received")), ); metrics.metric_collection.describe_counter( - &MetricName::new("udp_tracker_server_requests_accepted_total"), + &MetricName::new(UDP_TRACKER_SERVER_REQUESTS_ACCEPTED_TOTAL), Some(Unit::Count), Some(MetricDescription::new("Total number of UDP requests accepted")), ); metrics.metric_collection.describe_counter( - &MetricName::new("udp_tracker_server_responses_sent_total"), + &MetricName::new(UDP_TRACKER_SERVER_RESPONSES_SENT_TOTAL), Some(Unit::Count), Some(MetricDescription::new("Total number of UDP responses sent")), ); metrics.metric_collection.describe_counter( - &MetricName::new("udp_tracker_server_errors_total"), + &MetricName::new(UDP_TRACKER_SERVER_ERRORS_TOTAL), Some(Unit::Count), Some(MetricDescription::new("Total number of errors processing UDP requests")), ); metrics.metric_collection.describe_gauge( - &MetricName::new("udp_tracker_server_performance_avg_connect_processing_time_ns"), + &MetricName::new(UDP_TRACKER_SERVER_PERFORMANCE_AVG_PROCESSING_TIME_NS), Some(Unit::Nanoseconds), Some(MetricDescription::new( "Average time to process a UDP connect request in nanoseconds", )), ); - metrics.metric_collection.describe_gauge( - &MetricName::new("udp_tracker_server_performance_avg_announce_processing_time_ns"), - Some(Unit::Nanoseconds), - Some(MetricDescription::new( - "Average time to process a UDP announce request in nanoseconds", - )), - ); - - metrics.metric_collection.describe_gauge( - &MetricName::new("udp_tracker_server_performance_avg_scrape_processing_time_ns"), - Some(Unit::Nanoseconds), - Some(MetricDescription::new( - "Average time to process a UDP scrape request in nanoseconds", - )), - ); - metrics } From ed8acac48e2ac538dc6e959d35c22badaae8d683 Mon Sep 17 00:00:00 2001 From: Jose Celano Date: Mon, 14 Apr 2025 11:12:54 +0100 Subject: [PATCH 451/802] fix: [#1449] increase broadcast channel capacity for events channel to avoid lagged listeners. This does not fix the problem. Stats listener migth loose events and keep imprecise metrics. --- packages/http-tracker-core/src/event/sender.rs | 2 +- packages/udp-tracker-core/src/event/sender.rs | 2 +- packages/udp-tracker-server/src/event/sender.rs | 2 +- 3 files changed, 3 insertions(+), 3 deletions(-) diff --git a/packages/http-tracker-core/src/event/sender.rs b/packages/http-tracker-core/src/event/sender.rs index e9431abf2..511a381d0 100644 --- a/packages/http-tracker-core/src/event/sender.rs +++ b/packages/http-tracker-core/src/event/sender.rs @@ -7,7 +7,7 @@ use tokio::sync::broadcast::error::SendError; use super::Event; -const CHANNEL_CAPACITY: usize = 1024; +const CHANNEL_CAPACITY: usize = 32768; /// A trait for sending sending. #[cfg_attr(test, automock)] diff --git a/packages/udp-tracker-core/src/event/sender.rs b/packages/udp-tracker-core/src/event/sender.rs index e9431abf2..511a381d0 100644 --- a/packages/udp-tracker-core/src/event/sender.rs +++ b/packages/udp-tracker-core/src/event/sender.rs @@ -7,7 +7,7 @@ use tokio::sync::broadcast::error::SendError; use super::Event; -const CHANNEL_CAPACITY: usize = 1024; +const CHANNEL_CAPACITY: usize = 32768; /// A trait for sending sending. #[cfg_attr(test, automock)] diff --git a/packages/udp-tracker-server/src/event/sender.rs b/packages/udp-tracker-server/src/event/sender.rs index e9431abf2..511a381d0 100644 --- a/packages/udp-tracker-server/src/event/sender.rs +++ b/packages/udp-tracker-server/src/event/sender.rs @@ -7,7 +7,7 @@ use tokio::sync::broadcast::error::SendError; use super::Event; -const CHANNEL_CAPACITY: usize = 1024; +const CHANNEL_CAPACITY: usize = 32768; /// A trait for sending sending. #[cfg_attr(test, automock)] From 6fdbc47d23fb4d246284a27721235134ac34038f Mon Sep 17 00:00:00 2001 From: Jose Celano Date: Mon, 14 Apr 2025 11:32:50 +0100 Subject: [PATCH 452/802] fix: [#1449] don't stop stats listeners when lagged If the stats listener is lagged we continue processing new events even if metrics become imprecise after it. This is a temporary solution. See https://github.com/torrust/torrust-tracker/issues/1449. --- packages/http-tracker-core/src/lib.rs | 2 ++ .../src/statistics/event/listener.rs | 14 +++++++++++--- .../src/statistics/event/listener.rs | 14 +++++++++++--- .../src/statistics/event/listener.rs | 13 +++++++++++-- src/bootstrap/jobs/torrent_cleanup.rs | 2 +- 5 files changed, 36 insertions(+), 9 deletions(-) diff --git a/packages/http-tracker-core/src/lib.rs b/packages/http-tracker-core/src/lib.rs index 2260242e0..c4f131bcb 100644 --- a/packages/http-tracker-core/src/lib.rs +++ b/packages/http-tracker-core/src/lib.rs @@ -16,6 +16,8 @@ pub(crate) type CurrentClock = clock::Working; #[allow(dead_code)] pub(crate) type CurrentClock = clock::Stopped; +pub const HTTP_TRACKER_LOG_TARGET: &str = "HTTP TRACKER"; + #[cfg(test)] pub(crate) mod tests { use bittorrent_primitives::info_hash::InfoHash; diff --git a/packages/http-tracker-core/src/statistics/event/listener.rs b/packages/http-tracker-core/src/statistics/event/listener.rs index ca53a20bb..5e87b47df 100644 --- a/packages/http-tracker-core/src/statistics/event/listener.rs +++ b/packages/http-tracker-core/src/statistics/event/listener.rs @@ -4,15 +4,23 @@ use torrust_tracker_clock::clock::Time; use super::handler::handle_event; use crate::event::Event; use crate::statistics::repository::Repository; -use crate::CurrentClock; +use crate::{CurrentClock, HTTP_TRACKER_LOG_TARGET}; pub async fn dispatch_events(mut receiver: broadcast::Receiver, stats_repository: Repository) { loop { match receiver.recv().await { Ok(event) => handle_event(event, &stats_repository, CurrentClock::now()).await, Err(e) => { - tracing::error!("Error receiving http tracker core event: {:?}", e); - break; + match e { + broadcast::error::RecvError::Closed => { + tracing::info!(target: HTTP_TRACKER_LOG_TARGET, "Http core statistics receiver closed."); + break; + } + broadcast::error::RecvError::Lagged(n) => { + // From now on, metrics will be imprecise + tracing::warn!(target: HTTP_TRACKER_LOG_TARGET, "Http core statistics receiver lagged by {} events.", n); + } + } } } } diff --git a/packages/udp-tracker-core/src/statistics/event/listener.rs b/packages/udp-tracker-core/src/statistics/event/listener.rs index 8fc82fbcb..888fb8204 100644 --- a/packages/udp-tracker-core/src/statistics/event/listener.rs +++ b/packages/udp-tracker-core/src/statistics/event/listener.rs @@ -4,15 +4,23 @@ use torrust_tracker_clock::clock::Time; use super::handler::handle_event; use crate::event::Event; use crate::statistics::repository::Repository; -use crate::CurrentClock; +use crate::{CurrentClock, UDP_TRACKER_LOG_TARGET}; pub async fn dispatch_events(mut receiver: broadcast::Receiver, stats_repository: Repository) { loop { match receiver.recv().await { Ok(event) => handle_event(event, &stats_repository, CurrentClock::now()).await, Err(e) => { - tracing::error!("Error receiving udp tracker core event: {:?}", e); - break; + match e { + broadcast::error::RecvError::Closed => { + tracing::info!(target: UDP_TRACKER_LOG_TARGET, "Udp core statistics receiver closed."); + break; + } + broadcast::error::RecvError::Lagged(n) => { + // From now on, metrics will be imprecise + tracing::warn!(target: UDP_TRACKER_LOG_TARGET, "Udp core statistics receiver lagged by {} events.", n); + } + } } } } diff --git a/packages/udp-tracker-server/src/statistics/event/listener.rs b/packages/udp-tracker-server/src/statistics/event/listener.rs index c50ce70c9..cf348ea17 100644 --- a/packages/udp-tracker-server/src/statistics/event/listener.rs +++ b/packages/udp-tracker-server/src/statistics/event/listener.rs @@ -1,3 +1,4 @@ +use bittorrent_udp_tracker_core::UDP_TRACKER_LOG_TARGET; use tokio::sync::broadcast; use torrust_tracker_clock::clock::Time; @@ -11,8 +12,16 @@ pub async fn dispatch_events(mut receiver: broadcast::Receiver, stats_rep match receiver.recv().await { Ok(event) => handle_event(event, &stats_repository, CurrentClock::now()).await, Err(e) => { - tracing::error!("Error receiving udp tracker server event: {:?}", e); - break; + match e { + broadcast::error::RecvError::Closed => { + tracing::info!(target: UDP_TRACKER_LOG_TARGET, "Udp server statistics receiver closed."); + break; + } + broadcast::error::RecvError::Lagged(n) => { + // From now on, metrics will be imprecise + tracing::warn!(target: UDP_TRACKER_LOG_TARGET, "Udp server statistics receiver lagged by {} events.", n); + } + } } } } diff --git a/src/bootstrap/jobs/torrent_cleanup.rs b/src/bootstrap/jobs/torrent_cleanup.rs index 7085aa7e2..54b1eeef7 100644 --- a/src/bootstrap/jobs/torrent_cleanup.rs +++ b/src/bootstrap/jobs/torrent_cleanup.rs @@ -37,7 +37,7 @@ pub fn start_job(config: &Core, torrents_manager: &Arc) -> Join loop { tokio::select! { _ = tokio::signal::ctrl_c() => { - tracing::info!("Stopping torrent cleanup job.."); + tracing::info!("Stopping torrent cleanup job ..."); break; } _ = interval.tick() => { From bc9942f1823d16cc88c2aa43e3d0b39f0e0f2ee1 Mon Sep 17 00:00:00 2001 From: Jose Celano Date: Mon, 14 Apr 2025 16:23:02 +0100 Subject: [PATCH 453/802] ci: [#1454] remove contract workflow Since we have moved most of the tests to workspace crates, the report only contains a few tests. Therefore, I think it's not useful anymore. It's consuming resources and making the execution of PR checks slower. --- .github/workflows/contract.yaml | 58 --------------------------------- 1 file changed, 58 deletions(-) delete mode 100644 .github/workflows/contract.yaml diff --git a/.github/workflows/contract.yaml b/.github/workflows/contract.yaml deleted file mode 100644 index 2777417e3..000000000 --- a/.github/workflows/contract.yaml +++ /dev/null @@ -1,58 +0,0 @@ -name: Contract - -on: - push: - pull_request: - -env: - CARGO_TERM_COLOR: always - -jobs: - contract: - name: Contract - runs-on: ubuntu-latest - - strategy: - matrix: - toolchain: [nightly, stable] - - steps: - - id: checkout - name: Checkout Repository - uses: actions/checkout@v4 - - - id: setup - name: Setup Toolchain - uses: dtolnay/rust-toolchain@stable - with: - toolchain: ${{ matrix.toolchain }} - components: llvm-tools-preview - - - id: cache - name: Enable Job Cache - uses: Swatinem/rust-cache@v2 - - - id: tools - name: Install Tools - uses: taiki-e/install-action@v2 - with: - tool: cargo-llvm-cov, cargo-nextest - - - id: pretty-test - name: Install pretty-test - run: cargo install cargo-pretty-test - - - id: contract - name: Run contract - run: | - cargo test --lib --bins - cargo pretty-test --lib --bins - - - id: summary - name: Generate contract Summary - run: | - echo "### Tracker Living Contract! :rocket:" >> $GITHUB_STEP_SUMMARY - cargo pretty-test --lib --bins --color=never >> $GITHUB_STEP_SUMMARY - echo '```console' >> $GITHUB_STEP_SUMMARY - echo "$OUTPUT" >> $GITHUB_STEP_SUMMARY - echo '```' >> $GITHUB_STEP_SUMMARY From c4239ebd3a121e4760c6ae43dcdd6dc6c5efdf87 Mon Sep 17 00:00:00 2001 From: Jose Celano Date: Mon, 14 Apr 2025 18:06:33 +0100 Subject: [PATCH 454/802] fix: [#1452] increase IP bans reset interval to 24 hours --- packages/udp-tracker-server/src/server/launcher.rs | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/packages/udp-tracker-server/src/server/launcher.rs b/packages/udp-tracker-server/src/server/launcher.rs index 5de41066f..d62a4d04e 100644 --- a/packages/udp-tracker-server/src/server/launcher.rs +++ b/packages/udp-tracker-server/src/server/launcher.rs @@ -23,7 +23,7 @@ use crate::server::bound_socket::BoundSocket; use crate::server::processor::Processor; use crate::server::receiver::Receiver; -const IP_BANS_RESET_INTERVAL_IN_SECS: u64 = 3600; +const IP_BANS_RESET_INTERVAL_IN_SECS: u64 = 3600 * 24; const TYPE_STRING: &str = "udp_tracker"; /// A UDP server instance launcher. From 8f2def13925e1497c0cdf493fcf584cfab38315c Mon Sep 17 00:00:00 2001 From: Jose Celano Date: Tue, 15 Apr 2025 09:32:57 +0100 Subject: [PATCH 455/802] chore(deps): udpate dependencies ``` cargo update Updating crates.io index Locking 5 packages to latest compatible versions Updating anyhow v1.0.97 -> v1.0.98 Updating clap v4.5.35 -> v4.5.36 Updating clap_builder v4.5.35 -> v4.5.36 Updating h2 v0.4.8 -> v0.4.9 Updating libc v0.2.171 -> v0.2.172 ``` --- Cargo.lock | 20 ++++++++++---------- 1 file changed, 10 insertions(+), 10 deletions(-) diff --git a/Cargo.lock b/Cargo.lock index c3fb651ef..e05894e3c 100644 --- a/Cargo.lock +++ b/Cargo.lock @@ -131,9 +131,9 @@ dependencies = [ [[package]] name = "anyhow" -version = "1.0.97" +version = "1.0.98" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "dcfed56ad506cb2c684a14971b8861fdc3baaaae314b9e5f9bb532cbe3ba7a4f" +checksum = "e16d2d3311acee920a9eb8d33b8cbc1787ce4a264e85f964c2404b969bdcd487" [[package]] name = "approx" @@ -1045,9 +1045,9 @@ dependencies = [ [[package]] name = "clap" -version = "4.5.35" +version = "4.5.36" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "d8aa86934b44c19c50f87cc2790e19f54f7a67aedb64101c2e1a2e5ecfb73944" +checksum = "2df961d8c8a0d08aa9945718ccf584145eee3f3aa06cddbeac12933781102e04" dependencies = [ "clap_builder", "clap_derive", @@ -1055,9 +1055,9 @@ dependencies = [ [[package]] name = "clap_builder" -version = "4.5.35" +version = "4.5.36" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "2414dbb2dd0695280da6ea9261e327479e9d37b0630f6b53ba2a11c60c679fd9" +checksum = "132dbda40fb6753878316a489d5a1242a8ef2f0d9e47ba01c951ea8aa7d013a5" dependencies = [ "anstream", "anstyle", @@ -1856,9 +1856,9 @@ dependencies = [ [[package]] name = "h2" -version = "0.4.8" +version = "0.4.9" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "5017294ff4bb30944501348f6f8e42e6ad28f42c8bbef7a74029aff064a4e3c2" +checksum = "75249d144030531f8dee69fe9cea04d3edf809a017ae445e2abdff6629e86633" dependencies = [ "atomic-waker", "bytes", @@ -2412,9 +2412,9 @@ checksum = "bbd2bcb4c963f2ddae06a2efc7e9f3591312473c50c6685e1f298068316e66fe" [[package]] name = "libc" -version = "0.2.171" +version = "0.2.172" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "c19937216e9d3aa9956d9bb8dfc0b0c8beb6058fc4f7a4dc4d850edf86a237d6" +checksum = "d750af042f7ef4f724306de029d18836c26c1765a54a6a3f094cbd23a7267ffa" [[package]] name = "libloading" From fc14a818c820bb4249a0b83ec0b5128e946a2686 Mon Sep 17 00:00:00 2001 From: Jose Celano Date: Tue, 15 Apr 2025 10:59:45 +0100 Subject: [PATCH 456/802] refactor: [#1445] new macro to create metric names --- packages/metrics/src/metric/name.rs | 59 +++++++++++++---------------- 1 file changed, 27 insertions(+), 32 deletions(-) diff --git a/packages/metrics/src/metric/name.rs b/packages/metrics/src/metric/name.rs index c904f34d3..453d5c777 100644 --- a/packages/metrics/src/metric/name.rs +++ b/packages/metrics/src/metric/name.rs @@ -14,11 +14,7 @@ impl MetricName { /// Panics if the provided name is empty. #[must_use] pub fn new(name: &str) -> Self { - assert!( - !name.is_empty(), - "Metric name cannot be empty. It must have at least one character." - ); - + assert!(!name.is_empty(), "Metric name cannot be empty."); Self(name.to_owned()) } } @@ -50,43 +46,42 @@ impl PrometheusSerializable for MetricName { } } +#[macro_export] +macro_rules! metric_name { + ("") => { + compile_error!("Metric name cannot be empty"); + }; + ($name:literal) => { + $crate::metric::name::MetricName::new($name) + }; +} + #[cfg(test)] mod tests { mod serialization_of_metric_name_to_prometheus { - use rstest::rstest; - - use crate::metric::MetricName; use crate::prometheus::PrometheusSerializable; - #[rstest] - #[case("valid name", "valid_name", "valid_name")] - #[case("leading underscore", "_leading_underscore", "_leading_underscore")] - #[case("leading colon", ":leading_colon", ":leading_colon")] - #[case("leading lowercase", "v123", "v123")] - #[case("leading uppercase", "V123", "V123")] - fn valid_names_in_prometheus(#[case] case: &str, #[case] input: &str, #[case] output: &str) { - assert_eq!(MetricName::new(input).to_prometheus(), output, "{case} failed: {input:?}"); - } - - #[rstest] - #[case("invalid start 1", "9invalid_start", "_invalid_start")] - #[case("invalid start 2", "@test", "_test")] - #[case("invalid dash", "invalid-char", "invalid_char")] - #[case("invalid spaces", "spaces are bad", "spaces_are_bad")] - #[case("invalid special chars", "a!b@c#d$e%f^g&h*i(j)", "a_b_c_d_e_f_g_h_i_j_")] - #[case("invalid slash", "my:metric/version", "my:metric_version")] - #[case("all invalid characters", "!@#$%^&*()", "__________")] - #[case("non_ascii_characters", "ñaca©", "_aca_")] - fn names_that_need_changes_in_prometheus(#[case] case: &str, #[case] input: &str, #[case] output: &str) { - assert_eq!(MetricName::new(input).to_prometheus(), output, "{case} failed: {input:?}"); + #[test] + fn valid_names_in_prometheus() { + assert_eq!(metric_name!("valid_name").to_prometheus(), "valid_name"); + assert_eq!(metric_name!("_leading_underscore").to_prometheus(), "_leading_underscore"); + assert_eq!(metric_name!(":leading_colon").to_prometheus(), ":leading_colon"); + assert_eq!(metric_name!("v123").to_prometheus(), "v123"); // leading lowercase + assert_eq!(metric_name!("V123").to_prometheus(), "V123"); // leading lowercase } #[test] - #[should_panic(expected = "Metric name cannot be empty. It must have at least one character.")] - fn empty_name() { - let _name = MetricName::new(""); + fn names_that_need_changes_in_prometheus() { + assert_eq!(metric_name!("9invalid_start").to_prometheus(), "_invalid_start"); + assert_eq!(metric_name!("@test").to_prometheus(), "_test"); + assert_eq!(metric_name!("invalid-char").to_prometheus(), "invalid_char"); + assert_eq!(metric_name!("spaces are bad").to_prometheus(), "spaces_are_bad"); + assert_eq!(metric_name!("a!b@c#d$e%f^g&h*i(j)").to_prometheus(), "a_b_c_d_e_f_g_h_i_j_"); + assert_eq!(metric_name!("my:metric/version").to_prometheus(), "my:metric_version"); + assert_eq!(metric_name!("!@#$%^&*()").to_prometheus(), "__________"); + assert_eq!(metric_name!("ñaca©").to_prometheus(), "_aca_"); } } } From 5497970f86edbb6da3ae327d29e26eabd4c6b7bc Mon Sep 17 00:00:00 2001 From: Jose Celano Date: Tue, 15 Apr 2025 11:25:01 +0100 Subject: [PATCH 457/802] refactor: [#1445] replace metric name constructor by macro --- .../src/statistics/event/handler.rs | 6 +- .../http-tracker-core/src/statistics/mod.rs | 4 +- packages/metrics/src/metric/mod.rs | 17 ++-- packages/metrics/src/metric/name.rs | 3 + packages/metrics/src/metric_collection.rs | 79 +++++++++---------- .../src/statistics/event/handler.rs | 8 +- .../udp-tracker-core/src/statistics/mod.rs | 4 +- .../src/statistics/event/handler.rs | 24 +++--- .../udp-tracker-server/src/statistics/mod.rs | 16 ++-- 9 files changed, 79 insertions(+), 82 deletions(-) diff --git a/packages/http-tracker-core/src/statistics/event/handler.rs b/packages/http-tracker-core/src/statistics/event/handler.rs index 0baec1cd9..6e37b0209 100644 --- a/packages/http-tracker-core/src/statistics/event/handler.rs +++ b/packages/http-tracker-core/src/statistics/event/handler.rs @@ -1,7 +1,7 @@ use std::net::IpAddr; use torrust_tracker_metrics::label::{LabelName, LabelSet, LabelValue}; -use torrust_tracker_metrics::metric::MetricName; +use torrust_tracker_metrics::metric_name; use torrust_tracker_primitives::DurationSinceUnixEpoch; use crate::event::Event; @@ -32,7 +32,7 @@ pub async fn handle_event(event: Event, stats_repository: &Repository, now: Dura label_set.upsert(LabelName::new("request_kind"), LabelValue::new("announce")); stats_repository - .increase_counter(&MetricName::new(HTTP_TRACKER_CORE_REQUESTS_RECEIVED_TOTAL), &label_set, now) + .increase_counter(&metric_name!(HTTP_TRACKER_CORE_REQUESTS_RECEIVED_TOTAL), &label_set, now) .await; } Event::TcpScrape { connection } => { @@ -53,7 +53,7 @@ pub async fn handle_event(event: Event, stats_repository: &Repository, now: Dura label_set.upsert(LabelName::new("request_kind"), LabelValue::new("scrape")); stats_repository - .increase_counter(&MetricName::new(HTTP_TRACKER_CORE_REQUESTS_RECEIVED_TOTAL), &label_set, now) + .increase_counter(&metric_name!(HTTP_TRACKER_CORE_REQUESTS_RECEIVED_TOTAL), &label_set, now) .await; } } diff --git a/packages/http-tracker-core/src/statistics/mod.rs b/packages/http-tracker-core/src/statistics/mod.rs index 026c435af..a5d6d37a5 100644 --- a/packages/http-tracker-core/src/statistics/mod.rs +++ b/packages/http-tracker-core/src/statistics/mod.rs @@ -7,7 +7,7 @@ pub mod setup; use metrics::Metrics; use torrust_tracker_metrics::metric::description::MetricDescription; -use torrust_tracker_metrics::metric::MetricName; +use torrust_tracker_metrics::metric_name; use torrust_tracker_metrics::unit::Unit; const HTTP_TRACKER_CORE_REQUESTS_RECEIVED_TOTAL: &str = "http_tracker_core_requests_received_total"; @@ -17,7 +17,7 @@ pub fn describe_metrics() -> Metrics { let mut metrics = Metrics::default(); metrics.metric_collection.describe_counter( - &MetricName::new(HTTP_TRACKER_CORE_REQUESTS_RECEIVED_TOTAL), + &metric_name!(HTTP_TRACKER_CORE_REQUESTS_RECEIVED_TOTAL), Some(Unit::Count), Some(MetricDescription::new("Total number of HTTP requests received")), ); diff --git a/packages/metrics/src/metric/mod.rs b/packages/metrics/src/metric/mod.rs index edea035bb..95e35b520 100644 --- a/packages/metrics/src/metric/mod.rs +++ b/packages/metrics/src/metric/mod.rs @@ -87,11 +87,12 @@ mod tests { use super::super::*; use crate::gauge::Gauge; use crate::label::{LabelName, LabelValue}; + use crate::metric_name; use crate::sample::Sample; #[test] fn it_should_be_empty_when_it_does_not_have_any_sample() { - let name = MetricName::new("test_metric"); + let name = metric_name!("test_metric"); let samples = SampleCollection::::default(); @@ -103,7 +104,7 @@ mod tests { fn counter_metric_with_one_sample() -> Metric { let time = DurationSinceUnixEpoch::from_secs(1_743_552_000); - let name = MetricName::new("test_metric"); + let name = metric_name!("test_metric"); let label_set: LabelSet = [(LabelName::new("server_binding_protocol"), LabelValue::new("http"))].into(); @@ -119,7 +120,7 @@ mod tests { #[test] fn it_should_return_zero_number_of_samples_for_an_empty_metric() { - let name = MetricName::new("test_metric"); + let name = metric_name!("test_metric"); let samples = SampleCollection::::default(); @@ -133,11 +134,12 @@ mod tests { use super::super::*; use crate::counter::Counter; use crate::label::{LabelName, LabelValue}; + use crate::metric_name; use crate::sample::Sample; #[test] fn it_should_be_created_from_its_name_and_a_collection_of_samples() { - let name = MetricName::new("test_metric"); + let name = metric_name!("test_metric"); let samples = SampleCollection::::default(); @@ -148,7 +150,7 @@ mod tests { fn it_should_allow_incrementing_a_sample() { let time = DurationSinceUnixEpoch::from_secs(1_743_552_000); - let name = MetricName::new("test_metric"); + let name = metric_name!("test_metric"); let label_set: LabelSet = [(LabelName::new("server_binding_protocol"), LabelValue::new("http"))].into(); @@ -166,11 +168,12 @@ mod tests { use super::super::*; use crate::gauge::Gauge; use crate::label::{LabelName, LabelValue}; + use crate::metric_name; use crate::sample::Sample; #[test] fn it_should_be_created_from_its_name_and_a_collection_of_samples() { - let name = MetricName::new("test_metric"); + let name = metric_name!("test_metric"); let samples = SampleCollection::::default(); @@ -181,7 +184,7 @@ mod tests { fn it_should_allow_setting_a_sample() { let time = DurationSinceUnixEpoch::from_secs(1_743_552_000); - let name = MetricName::new("test_metric"); + let name = metric_name!("test_metric"); let label_set: LabelSet = [(LabelName::new("server_binding_protocol"), LabelValue::new("http"))].into(); diff --git a/packages/metrics/src/metric/name.rs b/packages/metrics/src/metric/name.rs index 453d5c777..41f5e7058 100644 --- a/packages/metrics/src/metric/name.rs +++ b/packages/metrics/src/metric/name.rs @@ -54,6 +54,9 @@ macro_rules! metric_name { ($name:literal) => { $crate::metric::name::MetricName::new($name) }; + ($name:ident) => { + $crate::metric::name::MetricName::new($name) + }; } #[cfg(test)] diff --git a/packages/metrics/src/metric_collection.rs b/packages/metrics/src/metric_collection.rs index d0ed96554..eb75e5c77 100644 --- a/packages/metrics/src/metric_collection.rs +++ b/packages/metrics/src/metric_collection.rs @@ -315,6 +315,7 @@ mod tests { use super::*; use crate::label::{LabelName, LabelValue}; + use crate::metric_name; use crate::sample::Sample; use crate::tests::{format_prometheus_output, sort_lines}; @@ -355,11 +356,11 @@ mod tests { MetricCollection::new( MetricKindCollection::new(vec![Metric::new( - MetricName::new("http_tracker_core_announce_requests_received_total"), + metric_name!("http_tracker_core_announce_requests_received_total"), SampleCollection::new(vec![Sample::new(Counter::new(1), time, label_set_1.clone())]), )]), MetricKindCollection::new(vec![Metric::new( - MetricName::new("udp_tracker_server_performance_avg_announce_processing_time_ns"), + metric_name!("udp_tracker_server_performance_avg_announce_processing_time_ns"), SampleCollection::new(vec![Sample::new(Gauge::new(1.0), time, label_set_1.clone())]), )]), ) @@ -434,15 +435,9 @@ mod tests { #[test] #[should_panic(expected = "Metric names must be unique across counters and gauges")] fn it_should_not_allow_duplicate_names_across_types() { - let counter = MetricKindCollection::new(vec![Metric::new( - MetricName::new("test_metric"), - SampleCollection::new(vec![]), - )]); + let counter = MetricKindCollection::new(vec![Metric::new(metric_name!("test_metric"), SampleCollection::new(vec![]))]); - let gauge = MetricKindCollection::new(vec![Metric::new( - MetricName::new("test_metric"), - SampleCollection::new(vec![]), - )]); + let gauge = MetricKindCollection::new(vec![Metric::new(metric_name!("test_metric"), SampleCollection::new(vec![]))]); let _unused = MetricCollection::new(counter, gauge); } @@ -455,10 +450,10 @@ mod tests { let time = DurationSinceUnixEpoch::from_secs(1_743_552_000); // First create a counter - collection.increase_counter(&MetricName::new("test_metric"), &label_set, time); + collection.increase_counter(&metric_name!("test_metric"), &label_set, time); // Then try to create a gauge with the same name - this should panic - collection.set_gauge(&MetricName::new("test_metric"), &label_set, 1.0, time); + collection.set_gauge(&metric_name!("test_metric"), &label_set, 1.0, time); } #[test] @@ -469,15 +464,15 @@ mod tests { let time = DurationSinceUnixEpoch::from_secs(1_743_552_000); // First set the gauge - collection.set_gauge(&MetricName::new("test_metric"), &label_set, 1.0, time); + collection.set_gauge(&metric_name!("test_metric"), &label_set, 1.0, time); // Then try to create a counter with the same name - this should panic - collection.increase_counter(&MetricName::new("test_metric"), &label_set, time); + collection.increase_counter(&metric_name!("test_metric"), &label_set, time); } #[test] fn it_should_allow_serializing_to_json() { - // todo: this test does work with metric with multiple samples becuase + // todo: this test does work with metric with multiple samples because // samples are not serialized in the same order as they are created. let (metric_collection, expected_json, _expected_prometheus) = MetricCollectionFixture::default().deconstruct(); @@ -528,7 +523,7 @@ mod tests { let metric_collection = MetricCollection::new( MetricKindCollection::new(vec![Metric::new( - MetricName::new("http_tracker_core_announce_requests_received_total"), + metric_name!("http_tracker_core_announce_requests_received_total"), SampleCollection::new(vec![ Sample::new(Counter::new(1), time, label_set_1.clone()), Sample::new(Counter::new(2), time, label_set_2.clone()), @@ -557,8 +552,8 @@ mod tests { let mut counters = MetricKindCollection::new(vec![]); let mut gauges = MetricKindCollection::new(vec![]); - counters.ensure_metric_exists(&MetricName::new("test_counter")); - gauges.ensure_metric_exists(&MetricName::new("test_gauge")); + counters.ensure_metric_exists(&metric_name!("test_counter")); + gauges.ensure_metric_exists(&metric_name!("test_gauge")); let metric_collection = MetricCollection::new(counters, gauges); @@ -582,17 +577,17 @@ mod tests { let mut metric_collection = MetricCollection::new( MetricKindCollection::new(vec![Metric::new( - MetricName::new("test_counter"), + metric_name!("test_counter"), SampleCollection::new(vec![Sample::new(Counter::new(0), time, label_set.clone())]), )]), MetricKindCollection::new(vec![]), ); - metric_collection.increase_counter(&MetricName::new("test_counter"), &label_set, time); - metric_collection.increase_counter(&MetricName::new("test_counter"), &label_set, time); + metric_collection.increase_counter(&metric_name!("test_counter"), &label_set, time); + metric_collection.increase_counter(&metric_name!("test_counter"), &label_set, time); assert_eq!( - metric_collection.get_counter_value(&MetricName::new("test_counter"), &label_set), + metric_collection.get_counter_value(&metric_name!("test_counter"), &label_set), Counter::new(2) ); } @@ -605,11 +600,11 @@ mod tests { let mut metric_collection = MetricCollection::new(MetricKindCollection::new(vec![]), MetricKindCollection::new(vec![])); - metric_collection.increase_counter(&MetricName::new("test_counter"), &label_set, time); - metric_collection.increase_counter(&MetricName::new("test_counter"), &label_set, time); + metric_collection.increase_counter(&metric_name!("test_counter"), &label_set, time); + metric_collection.increase_counter(&metric_name!("test_counter"), &label_set, time); assert_eq!( - metric_collection.get_counter_value(&MetricName::new("test_counter"), &label_set), + metric_collection.get_counter_value(&metric_name!("test_counter"), &label_set), Counter::new(2) ); } @@ -621,10 +616,10 @@ mod tests { let mut metric_collection = MetricCollection::new(MetricKindCollection::new(vec![]), MetricKindCollection::new(vec![])); - metric_collection.ensure_counter_exists(&MetricName::new("test_counter")); + metric_collection.ensure_counter_exists(&metric_name!("test_counter")); assert_eq!( - metric_collection.get_counter_value(&MetricName::new("test_counter"), &label_set), + metric_collection.get_counter_value(&metric_name!("test_counter"), &label_set), Counter::default() ); } @@ -636,10 +631,10 @@ mod tests { let mut metric_collection = MetricCollection::new(MetricKindCollection::new(vec![]), MetricKindCollection::new(vec![])); - metric_collection.describe_counter(&MetricName::new("test_counter"), None, None); + metric_collection.describe_counter(&metric_name!("test_counter"), None, None); assert_eq!( - metric_collection.get_counter_value(&MetricName::new("test_counter"), &label_set), + metric_collection.get_counter_value(&metric_name!("test_counter"), &label_set), Counter::default() ); } @@ -652,11 +647,11 @@ mod tests { let _unused = MetricKindCollection::new(vec![ Metric::new( - MetricName::new("test_counter"), + metric_name!("test_counter"), SampleCollection::new(vec![Sample::new(Counter::new(0), time, label_set.clone())]), ), Metric::new( - MetricName::new("test_counter"), + metric_name!("test_counter"), SampleCollection::new(vec![Sample::new(Counter::new(0), time, label_set.clone())]), ), ]); @@ -679,15 +674,15 @@ mod tests { let mut metric_collection = MetricCollection::new( MetricKindCollection::new(vec![]), MetricKindCollection::new(vec![Metric::new( - MetricName::new("test_gauge"), + metric_name!("test_gauge"), SampleCollection::new(vec![Sample::new(Gauge::new(0.0), time, label_set.clone())]), )]), ); - metric_collection.set_gauge(&MetricName::new("test_gauge"), &label_set, 1.0, time); + metric_collection.set_gauge(&metric_name!("test_gauge"), &label_set, 1.0, time); assert_eq!( - metric_collection.get_gauge_value(&MetricName::new("test_gauge"), &label_set), + metric_collection.get_gauge_value(&metric_name!("test_gauge"), &label_set), Gauge::new(1.0) ); } @@ -700,10 +695,10 @@ mod tests { let mut metric_collection = MetricCollection::new(MetricKindCollection::new(vec![]), MetricKindCollection::new(vec![])); - metric_collection.set_gauge(&MetricName::new("test_gauge"), &label_set, 1.0, time); + metric_collection.set_gauge(&metric_name!("test_gauge"), &label_set, 1.0, time); assert_eq!( - metric_collection.get_gauge_value(&MetricName::new("test_gauge"), &label_set), + metric_collection.get_gauge_value(&metric_name!("test_gauge"), &label_set), Gauge::new(1.0) ); } @@ -715,10 +710,10 @@ mod tests { let mut metric_collection = MetricCollection::new(MetricKindCollection::new(vec![]), MetricKindCollection::new(vec![])); - metric_collection.ensure_gauge_exists(&MetricName::new("test_gauge")); + metric_collection.ensure_gauge_exists(&metric_name!("test_gauge")); assert_eq!( - metric_collection.get_gauge_value(&MetricName::new("test_gauge"), &label_set), + metric_collection.get_gauge_value(&metric_name!("test_gauge"), &label_set), Gauge::default() ); } @@ -730,10 +725,10 @@ mod tests { let mut metric_collection = MetricCollection::new(MetricKindCollection::new(vec![]), MetricKindCollection::new(vec![])); - metric_collection.describe_gauge(&MetricName::new("test_gauge"), None, None); + metric_collection.describe_gauge(&metric_name!("test_gauge"), None, None); assert_eq!( - metric_collection.get_gauge_value(&MetricName::new("test_gauge"), &label_set), + metric_collection.get_gauge_value(&metric_name!("test_gauge"), &label_set), Gauge::default() ); } @@ -746,11 +741,11 @@ mod tests { let _unused = MetricKindCollection::new(vec![ Metric::new( - MetricName::new("test_gauge"), + metric_name!("test_gauge"), SampleCollection::new(vec![Sample::new(Gauge::new(0.0), time, label_set.clone())]), ), Metric::new( - MetricName::new("test_gauge"), + metric_name!("test_gauge"), SampleCollection::new(vec![Sample::new(Gauge::new(0.0), time, label_set.clone())]), ), ]); diff --git a/packages/udp-tracker-core/src/statistics/event/handler.rs b/packages/udp-tracker-core/src/statistics/event/handler.rs index 59c382755..dcb512783 100644 --- a/packages/udp-tracker-core/src/statistics/event/handler.rs +++ b/packages/udp-tracker-core/src/statistics/event/handler.rs @@ -1,5 +1,5 @@ use torrust_tracker_metrics::label::{LabelName, LabelSet, LabelValue}; -use torrust_tracker_metrics::metric::MetricName; +use torrust_tracker_metrics::metric_name; use torrust_tracker_primitives::DurationSinceUnixEpoch; use crate::event::Event; @@ -29,7 +29,7 @@ pub async fn handle_event(event: Event, stats_repository: &Repository, now: Dura label_set.upsert(LabelName::new("request_kind"), LabelValue::new("connect")); stats_repository - .increase_counter(&MetricName::new(UDP_TRACKER_CORE_REQUESTS_RECEIVED_TOTAL), &label_set, now) + .increase_counter(&metric_name!(UDP_TRACKER_CORE_REQUESTS_RECEIVED_TOTAL), &label_set, now) .await; } Event::UdpAnnounce { context } => { @@ -50,7 +50,7 @@ pub async fn handle_event(event: Event, stats_repository: &Repository, now: Dura label_set.upsert(LabelName::new("request_kind"), LabelValue::new("announce")); stats_repository - .increase_counter(&MetricName::new(UDP_TRACKER_CORE_REQUESTS_RECEIVED_TOTAL), &label_set, now) + .increase_counter(&metric_name!(UDP_TRACKER_CORE_REQUESTS_RECEIVED_TOTAL), &label_set, now) .await; } Event::UdpScrape { context } => { @@ -71,7 +71,7 @@ pub async fn handle_event(event: Event, stats_repository: &Repository, now: Dura label_set.upsert(LabelName::new("request_kind"), LabelValue::new("scrape")); stats_repository - .increase_counter(&MetricName::new(UDP_TRACKER_CORE_REQUESTS_RECEIVED_TOTAL), &label_set, now) + .increase_counter(&metric_name!(UDP_TRACKER_CORE_REQUESTS_RECEIVED_TOTAL), &label_set, now) .await; } } diff --git a/packages/udp-tracker-core/src/statistics/mod.rs b/packages/udp-tracker-core/src/statistics/mod.rs index bc4d8d836..40a30f51b 100644 --- a/packages/udp-tracker-core/src/statistics/mod.rs +++ b/packages/udp-tracker-core/src/statistics/mod.rs @@ -7,7 +7,7 @@ pub mod setup; use metrics::Metrics; use torrust_tracker_metrics::metric::description::MetricDescription; -use torrust_tracker_metrics::metric::MetricName; +use torrust_tracker_metrics::metric_name; use torrust_tracker_metrics::unit::Unit; const UDP_TRACKER_CORE_REQUESTS_RECEIVED_TOTAL: &str = "udp_tracker_core_requests_received_total"; @@ -17,7 +17,7 @@ pub fn describe_metrics() -> Metrics { let mut metrics = Metrics::default(); metrics.metric_collection.describe_counter( - &MetricName::new(UDP_TRACKER_CORE_REQUESTS_RECEIVED_TOTAL), + &metric_name!(UDP_TRACKER_CORE_REQUESTS_RECEIVED_TOTAL), Some(Unit::Count), Some(MetricDescription::new("Total number of UDP requests received")), ); diff --git a/packages/udp-tracker-server/src/statistics/event/handler.rs b/packages/udp-tracker-server/src/statistics/event/handler.rs index 4c10576c0..721d415ea 100644 --- a/packages/udp-tracker-server/src/statistics/event/handler.rs +++ b/packages/udp-tracker-server/src/statistics/event/handler.rs @@ -1,5 +1,5 @@ use torrust_tracker_metrics::label::{LabelName, LabelSet, LabelValue}; -use torrust_tracker_metrics::metric::MetricName; +use torrust_tracker_metrics::metric_name; use torrust_tracker_primitives::DurationSinceUnixEpoch; use crate::event::{Event, UdpRequestKind, UdpResponseKind}; @@ -25,7 +25,7 @@ pub async fn handle_event(event: Event, stats_repository: &Repository, now: Dura // Extendable metrics stats_repository .increase_counter( - &MetricName::new(UDP_TRACKER_SERVER_REQUESTS_ABORTED_TOTAL), + &metric_name!(UDP_TRACKER_SERVER_REQUESTS_ABORTED_TOTAL), &LabelSet::from(context), now, ) @@ -38,7 +38,7 @@ pub async fn handle_event(event: Event, stats_repository: &Repository, now: Dura // Extendable metrics stats_repository .increase_counter( - &MetricName::new(UDP_TRACKER_SERVER_REQUESTS_BANNED_TOTAL), + &metric_name!(UDP_TRACKER_SERVER_REQUESTS_BANNED_TOTAL), &LabelSet::from(context), now, ) @@ -58,7 +58,7 @@ pub async fn handle_event(event: Event, stats_repository: &Repository, now: Dura // Extendable metrics stats_repository .increase_counter( - &MetricName::new(UDP_TRACKER_SERVER_REQUESTS_RECEIVED_TOTAL), + &metric_name!(UDP_TRACKER_SERVER_REQUESTS_RECEIVED_TOTAL), &LabelSet::from(context), now, ) @@ -100,7 +100,7 @@ pub async fn handle_event(event: Event, stats_repository: &Repository, now: Dura label_set.upsert(LabelName::new("kind"), LabelValue::new(&kind.to_string())); stats_repository - .increase_counter(&MetricName::new(UDP_TRACKER_SERVER_REQUESTS_ACCEPTED_TOTAL), &label_set, now) + .increase_counter(&metric_name!(UDP_TRACKER_SERVER_REQUESTS_ACCEPTED_TOTAL), &label_set, now) .await; } Event::UdpResponseSent { @@ -132,7 +132,7 @@ pub async fn handle_event(event: Event, stats_repository: &Repository, now: Dura stats_repository .set_gauge( - &MetricName::new(UDP_TRACKER_SERVER_PERFORMANCE_AVG_PROCESSING_TIME_NS), + &metric_name!(UDP_TRACKER_SERVER_PERFORMANCE_AVG_PROCESSING_TIME_NS), &label_set, new_avg, now, @@ -153,7 +153,7 @@ pub async fn handle_event(event: Event, stats_repository: &Repository, now: Dura stats_repository .set_gauge( - &MetricName::new(UDP_TRACKER_SERVER_PERFORMANCE_AVG_PROCESSING_TIME_NS), + &metric_name!(UDP_TRACKER_SERVER_PERFORMANCE_AVG_PROCESSING_TIME_NS), &label_set, new_avg, now, @@ -174,7 +174,7 @@ pub async fn handle_event(event: Event, stats_repository: &Repository, now: Dura stats_repository .set_gauge( - &MetricName::new(UDP_TRACKER_SERVER_PERFORMANCE_AVG_PROCESSING_TIME_NS), + &metric_name!(UDP_TRACKER_SERVER_PERFORMANCE_AVG_PROCESSING_TIME_NS), &label_set, new_avg, now, @@ -197,7 +197,7 @@ pub async fn handle_event(event: Event, stats_repository: &Repository, now: Dura label_set.upsert(LabelName::new("result"), result_label_value); stats_repository - .increase_counter(&MetricName::new(UDP_TRACKER_SERVER_RESPONSES_SENT_TOTAL), &label_set, now) + .increase_counter(&metric_name!(UDP_TRACKER_SERVER_RESPONSES_SENT_TOTAL), &label_set, now) .await; } Event::UdpError { context } => { @@ -213,11 +213,7 @@ pub async fn handle_event(event: Event, stats_repository: &Repository, now: Dura // Extendable metrics stats_repository - .increase_counter( - &MetricName::new(UDP_TRACKER_SERVER_ERRORS_TOTAL), - &LabelSet::from(context), - now, - ) + .increase_counter(&metric_name!(UDP_TRACKER_SERVER_ERRORS_TOTAL), &LabelSet::from(context), now) .await; } } diff --git a/packages/udp-tracker-server/src/statistics/mod.rs b/packages/udp-tracker-server/src/statistics/mod.rs index 523cd4bac..4eea13224 100644 --- a/packages/udp-tracker-server/src/statistics/mod.rs +++ b/packages/udp-tracker-server/src/statistics/mod.rs @@ -7,7 +7,7 @@ pub mod setup; use metrics::Metrics; use torrust_tracker_metrics::metric::description::MetricDescription; -use torrust_tracker_metrics::metric::MetricName; +use torrust_tracker_metrics::metric_name; use torrust_tracker_metrics::unit::Unit; const UDP_TRACKER_SERVER_REQUESTS_ABORTED_TOTAL: &str = "udp_tracker_server_requests_aborted_total"; @@ -23,43 +23,43 @@ pub fn describe_metrics() -> Metrics { let mut metrics = Metrics::default(); metrics.metric_collection.describe_counter( - &MetricName::new(UDP_TRACKER_SERVER_REQUESTS_ABORTED_TOTAL), + &metric_name!(UDP_TRACKER_SERVER_REQUESTS_ABORTED_TOTAL), Some(Unit::Count), Some(MetricDescription::new("Total number of UDP requests aborted")), ); metrics.metric_collection.describe_counter( - &MetricName::new(UDP_TRACKER_SERVER_REQUESTS_BANNED_TOTAL), + &metric_name!(UDP_TRACKER_SERVER_REQUESTS_BANNED_TOTAL), Some(Unit::Count), Some(MetricDescription::new("Total number of UDP requests banned")), ); metrics.metric_collection.describe_counter( - &MetricName::new(UDP_TRACKER_SERVER_REQUESTS_RECEIVED_TOTAL), + &metric_name!(UDP_TRACKER_SERVER_REQUESTS_RECEIVED_TOTAL), Some(Unit::Count), Some(MetricDescription::new("Total number of UDP requests received")), ); metrics.metric_collection.describe_counter( - &MetricName::new(UDP_TRACKER_SERVER_REQUESTS_ACCEPTED_TOTAL), + &metric_name!(UDP_TRACKER_SERVER_REQUESTS_ACCEPTED_TOTAL), Some(Unit::Count), Some(MetricDescription::new("Total number of UDP requests accepted")), ); metrics.metric_collection.describe_counter( - &MetricName::new(UDP_TRACKER_SERVER_RESPONSES_SENT_TOTAL), + &metric_name!(UDP_TRACKER_SERVER_RESPONSES_SENT_TOTAL), Some(Unit::Count), Some(MetricDescription::new("Total number of UDP responses sent")), ); metrics.metric_collection.describe_counter( - &MetricName::new(UDP_TRACKER_SERVER_ERRORS_TOTAL), + &metric_name!(UDP_TRACKER_SERVER_ERRORS_TOTAL), Some(Unit::Count), Some(MetricDescription::new("Total number of errors processing UDP requests")), ); metrics.metric_collection.describe_gauge( - &MetricName::new(UDP_TRACKER_SERVER_PERFORMANCE_AVG_PROCESSING_TIME_NS), + &metric_name!(UDP_TRACKER_SERVER_PERFORMANCE_AVG_PROCESSING_TIME_NS), Some(Unit::Nanoseconds), Some(MetricDescription::new( "Average time to process a UDP connect request in nanoseconds", From 7a24f855c1ac129bba890a72a9ccf9355bba2b85 Mon Sep 17 00:00:00 2001 From: Jose Celano Date: Tue, 15 Apr 2025 11:38:53 +0100 Subject: [PATCH 458/802] refactor: [#1445] new macro to create metric label names --- packages/metrics/src/label/name.rs | 27 ++++++++++++++++++--------- packages/metrics/src/metric/name.rs | 7 +++++++ 2 files changed, 25 insertions(+), 9 deletions(-) diff --git a/packages/metrics/src/label/name.rs b/packages/metrics/src/label/name.rs index 22e75572f..194aeb2b3 100644 --- a/packages/metrics/src/label/name.rs +++ b/packages/metrics/src/label/name.rs @@ -14,11 +14,7 @@ impl LabelName { /// Panics if the provided name is empty. #[must_use] pub fn new(name: &str) -> Self { - assert!( - !name.is_empty(), - "Label name cannot be empty. It must have at least one character." - ); - + assert!(!name.is_empty(), "Label name cannot be empty."); Self(name.to_owned()) } } @@ -69,6 +65,19 @@ impl PrometheusSerializable for LabelName { } } } + +#[macro_export] +macro_rules! label_name { + ("") => { + compile_error!("Label name cannot be empty"); + }; + ($name:literal) => { + $crate::label::name::LabelName::new($name) + }; + ($name:ident) => { + $crate::label::name::LabelName::new($name) + }; +} #[cfg(test)] mod tests { mod serialization_of_label_name_to_prometheus { @@ -83,7 +92,7 @@ mod tests { #[case("3 leading lowercase", "v123", "v123")] #[case("4 leading uppercase", "V123", "V123")] fn valid_names_in_prometheus(#[case] case: &str, #[case] input: &str, #[case] output: &str) { - assert_eq!(LabelName::new(input).to_prometheus(), output, "{case} failed: {input:?}"); + assert_eq!(label_name!(input).to_prometheus(), output, "{case} failed: {input:?}"); } #[rstest] @@ -96,7 +105,7 @@ mod tests { #[case("7 all invalid characters", "!@#$%^&*()", "__________")] #[case("8 non_ascii_characters", "ñaca©", "_aca_")] fn names_that_need_changes_in_prometheus(#[case] case: &str, #[case] input: &str, #[case] output: &str) { - assert_eq!(LabelName::new(input).to_prometheus(), output, "{case} failed: {input:?}"); + assert_eq!(label_name!(input).to_prometheus(), output, "{case} failed: {input:?}"); } #[rstest] @@ -105,11 +114,11 @@ mod tests { #[case("3 processed to double underscore", "^^name", "___name")] #[case("4 processed to double underscore after first char", "0__name", "___name")] fn names_starting_with_double_underscore(#[case] case: &str, #[case] input: &str, #[case] output: &str) { - assert_eq!(LabelName::new(input).to_prometheus(), output, "{case} failed: {input:?}"); + assert_eq!(label_name!(input).to_prometheus(), output, "{case} failed: {input:?}"); } #[test] - #[should_panic(expected = "Label name cannot be empty. It must have at least one character.")] + #[should_panic(expected = "Label name cannot be empty.")] fn empty_name() { let _name = LabelName::new(""); } diff --git a/packages/metrics/src/metric/name.rs b/packages/metrics/src/metric/name.rs index 41f5e7058..09c8c9e6d 100644 --- a/packages/metrics/src/metric/name.rs +++ b/packages/metrics/src/metric/name.rs @@ -64,6 +64,7 @@ mod tests { mod serialization_of_metric_name_to_prometheus { + use crate::metric::name::MetricName; use crate::prometheus::PrometheusSerializable; #[test] @@ -86,5 +87,11 @@ mod tests { assert_eq!(metric_name!("!@#$%^&*()").to_prometheus(), "__________"); assert_eq!(metric_name!("ñaca©").to_prometheus(), "_aca_"); } + + #[test] + #[should_panic(expected = "Metric name cannot be empty.")] + fn empty_name() { + let _name = MetricName::new(""); + } } } From 4d68267fff03505c9de1aca279a45a07b0ba32cc Mon Sep 17 00:00:00 2001 From: Jose Celano Date: Tue, 15 Apr 2025 11:51:21 +0100 Subject: [PATCH 459/802] refactor: [#1445] replace metric label name constructor by macro --- packages/http-tracker-core/src/event/mod.rs | 9 ++-- .../src/statistics/event/handler.rs | 8 ++-- packages/metrics/src/label/mod.rs | 4 +- packages/metrics/src/label/pair.rs | 6 +-- packages/metrics/src/label/set.rs | 33 ++++++------- packages/metrics/src/metric/mod.rs | 18 ++++---- packages/metrics/src/metric_collection.rs | 46 +++++++++---------- packages/udp-tracker-core/src/event/mod.rs | 9 ++-- .../src/statistics/event/handler.rs | 10 ++-- packages/udp-tracker-server/src/event/mod.rs | 9 ++-- .../src/statistics/event/handler.rs | 16 +++---- 11 files changed, 86 insertions(+), 82 deletions(-) diff --git a/packages/http-tracker-core/src/event/mod.rs b/packages/http-tracker-core/src/event/mod.rs index d235c179f..ae997156a 100644 --- a/packages/http-tracker-core/src/event/mod.rs +++ b/packages/http-tracker-core/src/event/mod.rs @@ -1,6 +1,7 @@ use std::net::{IpAddr, SocketAddr}; -use torrust_tracker_metrics::label::{LabelName, LabelSet, LabelValue}; +use torrust_tracker_metrics::label::{LabelSet, LabelValue}; +use torrust_tracker_metrics::label_name; use torrust_tracker_primitives::service_binding::ServiceBinding; pub mod sender; @@ -65,15 +66,15 @@ impl From for LabelSet { fn from(connection_context: ConnectionContext) -> Self { LabelSet::from([ ( - LabelName::new("server_binding_protocol"), + label_name!("server_binding_protocol"), LabelValue::new(&connection_context.server.service_binding.protocol().to_string()), ), ( - LabelName::new("server_binding_ip"), + label_name!("server_binding_ip"), LabelValue::new(&connection_context.server.service_binding.bind_address().ip().to_string()), ), ( - LabelName::new("server_binding_port"), + label_name!("server_binding_port"), LabelValue::new(&connection_context.server.service_binding.bind_address().port().to_string()), ), ]) diff --git a/packages/http-tracker-core/src/statistics/event/handler.rs b/packages/http-tracker-core/src/statistics/event/handler.rs index 6e37b0209..cea224d04 100644 --- a/packages/http-tracker-core/src/statistics/event/handler.rs +++ b/packages/http-tracker-core/src/statistics/event/handler.rs @@ -1,7 +1,7 @@ use std::net::IpAddr; -use torrust_tracker_metrics::label::{LabelName, LabelSet, LabelValue}; -use torrust_tracker_metrics::metric_name; +use torrust_tracker_metrics::label::{LabelSet, LabelValue}; +use torrust_tracker_metrics::{label_name, metric_name}; use torrust_tracker_primitives::DurationSinceUnixEpoch; use crate::event::Event; @@ -29,7 +29,7 @@ pub async fn handle_event(event: Event, stats_repository: &Repository, now: Dura // Extendable metrics let mut label_set = LabelSet::from(connection); - label_set.upsert(LabelName::new("request_kind"), LabelValue::new("announce")); + label_set.upsert(label_name!("request_kind"), LabelValue::new("announce")); stats_repository .increase_counter(&metric_name!(HTTP_TRACKER_CORE_REQUESTS_RECEIVED_TOTAL), &label_set, now) @@ -50,7 +50,7 @@ pub async fn handle_event(event: Event, stats_repository: &Repository, now: Dura // Extendable metrics let mut label_set = LabelSet::from(connection); - label_set.upsert(LabelName::new("request_kind"), LabelValue::new("scrape")); + label_set.upsert(label_name!("request_kind"), LabelValue::new("scrape")); stats_repository .increase_counter(&metric_name!(HTTP_TRACKER_CORE_REQUESTS_RECEIVED_TOTAL), &label_set, now) diff --git a/packages/metrics/src/label/mod.rs b/packages/metrics/src/label/mod.rs index b5fd3b745..880fdbbb1 100644 --- a/packages/metrics/src/label/mod.rs +++ b/packages/metrics/src/label/mod.rs @@ -1,7 +1,7 @@ -mod name; +pub mod name; mod pair; mod set; -mod value; +pub mod value; pub type LabelName = name::LabelName; pub type LabelValue = value::LabelValue; diff --git a/packages/metrics/src/label/pair.rs b/packages/metrics/src/label/pair.rs index c89c726bd..858902451 100644 --- a/packages/metrics/src/label/pair.rs +++ b/packages/metrics/src/label/pair.rs @@ -13,16 +13,16 @@ impl PrometheusSerializabl #[cfg(test)] mod tests { mod serialization_of_label_pair_to_prometheus { - use super::super::LabelName; use crate::label::LabelValue; + use crate::label_name; use crate::prometheus::PrometheusSerializable; #[test] fn test_label_pair_serialization_to_prometheus() { - let label_pair = (LabelName::new("label_name"), LabelValue::new("value")); + let label_pair = (label_name!("label_name"), LabelValue::new("value")); assert_eq!(label_pair.to_prometheus(), r#"label_name="value""#); - let label_pair = (&LabelName::new("label_name"), &LabelValue::new("value")); + let label_pair = (&label_name!("label_name"), &LabelValue::new("value")); assert_eq!(label_pair.to_prometheus(), r#"label_name="value""#); } } diff --git a/packages/metrics/src/label/set.rs b/packages/metrics/src/label/set.rs index f46b01095..2b6334fc7 100644 --- a/packages/metrics/src/label/set.rs +++ b/packages/metrics/src/label/set.rs @@ -180,6 +180,7 @@ mod tests { use super::{LabelName, LabelValue}; use crate::label::LabelSet; + use crate::label_name; use crate::prometheus::PrometheusSerializable; fn sample_vec_of_label_pairs() -> Vec<(LabelName, LabelValue)> { @@ -188,9 +189,9 @@ mod tests { fn sample_array_of_label_pairs() -> [(LabelName, LabelValue); 3] { [ - (LabelName::new("server_service_binding_protocol"), LabelValue::new("http")), - (LabelName::new("server_service_binding_ip"), LabelValue::new("0.0.0.0")), - (LabelName::new("server_service_binding_port"), LabelValue::new("7070")), + (label_name!("server_service_binding_protocol"), LabelValue::new("http")), + (label_name!("server_service_binding_ip"), LabelValue::new("0.0.0.0")), + (label_name!("server_service_binding_port"), LabelValue::new("7070")), ] } @@ -232,12 +233,12 @@ mod tests { #[test] fn it_should_allow_instantiation_from_a_label_pair() { - let label_set: LabelSet = (LabelName::new("label_name"), LabelValue::new("value")).into(); + let label_set: LabelSet = (label_name!("label_name"), LabelValue::new("value")).into(); assert_eq!( label_set, LabelSet { - items: BTreeMap::from([(LabelName::new("label_name"), LabelValue::new("value"))]) + items: BTreeMap::from([(label_name!("label_name"), LabelValue::new("value"))]) } ); } @@ -246,10 +247,10 @@ mod tests { fn it_should_allow_inserting_a_new_label_pair() { let mut label_set = LabelSet::default(); - label_set.upsert(LabelName::new("label_name"), LabelValue::new("value")); + label_set.upsert(label_name!("label_name"), LabelValue::new("value")); assert_eq!( - label_set.items.get(&LabelName::new("label_name")).unwrap(), + label_set.items.get(&label_name!("label_name")).unwrap(), &LabelValue::new("value") ); } @@ -258,18 +259,18 @@ mod tests { fn it_should_allow_updating_a_label_value() { let mut label_set = LabelSet::default(); - label_set.upsert(LabelName::new("label_name"), LabelValue::new("old value")); - label_set.upsert(LabelName::new("label_name"), LabelValue::new("new value")); + label_set.upsert(label_name!("label_name"), LabelValue::new("old value")); + label_set.upsert(label_name!("label_name"), LabelValue::new("new value")); assert_eq!( - label_set.items.get(&LabelName::new("label_name")).unwrap(), + label_set.items.get(&label_name!("label_name")).unwrap(), &LabelValue::new("new value") ); } #[test] fn it_should_allow_serializing_to_json_as_an_array_of_label_objects() { - let label_set = LabelSet::from((LabelName::new("label_name"), LabelValue::new("label value"))); + let label_set = LabelSet::from((label_name!("label_name"), LabelValue::new("label value"))); let json = serde_json::to_string(&label_set).unwrap(); @@ -307,13 +308,13 @@ mod tests { assert_eq!( label_set, - LabelSet::from((LabelName::new("label_name"), LabelValue::new("label value"))) + LabelSet::from((label_name!("label_name"), LabelValue::new("label value"))) ); } #[test] fn it_should_allow_serializing_to_prometheus_format() { - let label_set = LabelSet::from((LabelName::new("label_name"), LabelValue::new("label value"))); + let label_set = LabelSet::from((label_name!("label_name"), LabelValue::new("label value"))); assert_eq!(label_set.to_prometheus(), r#"{label_name="label value"}"#); } @@ -321,8 +322,8 @@ mod tests { #[test] fn it_should_alphabetically_order_labels_in_prometheus_format() { let label_set = LabelSet::from([ - (LabelName::new("b_label_name"), LabelValue::new("b label value")), - (LabelName::new("a_label_name"), LabelValue::new("a label value")), + (label_name!("b_label_name"), LabelValue::new("b label value")), + (label_name!("a_label_name"), LabelValue::new("a label value")), ]); assert_eq!( @@ -333,7 +334,7 @@ mod tests { #[test] fn it_should_allow_displaying() { - let label_set = LabelSet::from((LabelName::new("label_name"), LabelValue::new("label value"))); + let label_set = LabelSet::from((label_name!("label_name"), LabelValue::new("label value"))); assert_eq!(label_set.to_string(), r#"{label_name="label value"}"#); } diff --git a/packages/metrics/src/metric/mod.rs b/packages/metrics/src/metric/mod.rs index 95e35b520..777981fd8 100644 --- a/packages/metrics/src/metric/mod.rs +++ b/packages/metrics/src/metric/mod.rs @@ -86,9 +86,9 @@ mod tests { mod for_generic_metrics { use super::super::*; use crate::gauge::Gauge; - use crate::label::{LabelName, LabelValue}; - use crate::metric_name; + use crate::label::LabelValue; use crate::sample::Sample; + use crate::{label_name, metric_name}; #[test] fn it_should_be_empty_when_it_does_not_have_any_sample() { @@ -106,7 +106,7 @@ mod tests { let name = metric_name!("test_metric"); - let label_set: LabelSet = [(LabelName::new("server_binding_protocol"), LabelValue::new("http"))].into(); + let label_set: LabelSet = [(label_name!("server_binding_protocol"), LabelValue::new("http"))].into(); let samples = SampleCollection::new(vec![Sample::new(Counter::new(1), time, label_set.clone())]); @@ -133,9 +133,9 @@ mod tests { mod for_counter_metrics { use super::super::*; use crate::counter::Counter; - use crate::label::{LabelName, LabelValue}; - use crate::metric_name; + use crate::label::LabelValue; use crate::sample::Sample; + use crate::{label_name, metric_name}; #[test] fn it_should_be_created_from_its_name_and_a_collection_of_samples() { @@ -152,7 +152,7 @@ mod tests { let name = metric_name!("test_metric"); - let label_set: LabelSet = [(LabelName::new("server_binding_protocol"), LabelValue::new("http"))].into(); + let label_set: LabelSet = [(label_name!("server_binding_protocol"), LabelValue::new("http"))].into(); let samples = SampleCollection::new(vec![Sample::new(Counter::new(1), time, label_set.clone())]); @@ -167,9 +167,9 @@ mod tests { use super::super::*; use crate::gauge::Gauge; - use crate::label::{LabelName, LabelValue}; - use crate::metric_name; + use crate::label::LabelValue; use crate::sample::Sample; + use crate::{label_name, metric_name}; #[test] fn it_should_be_created_from_its_name_and_a_collection_of_samples() { @@ -186,7 +186,7 @@ mod tests { let name = metric_name!("test_metric"); - let label_set: LabelSet = [(LabelName::new("server_binding_protocol"), LabelValue::new("http"))].into(); + let label_set: LabelSet = [(label_name!("server_binding_protocol"), LabelValue::new("http"))].into(); let samples = SampleCollection::new(vec![Sample::new(Gauge::new(1.0), time, label_set.clone())]); diff --git a/packages/metrics/src/metric_collection.rs b/packages/metrics/src/metric_collection.rs index eb75e5c77..5b3f92a19 100644 --- a/packages/metrics/src/metric_collection.rs +++ b/packages/metrics/src/metric_collection.rs @@ -314,10 +314,10 @@ mod tests { use pretty_assertions::assert_eq; use super::*; - use crate::label::{LabelName, LabelValue}; - use crate::metric_name; + use crate::label::LabelValue; use crate::sample::Sample; use crate::tests::{format_prometheus_output, sort_lines}; + use crate::{label_name, metric_name}; /// Fixture for testing serialization and deserialization of `MetricCollection`. /// @@ -348,9 +348,9 @@ mod tests { let time = DurationSinceUnixEpoch::from_secs(1_743_552_000); let label_set_1: LabelSet = [ - (LabelName::new("server_binding_protocol"), LabelValue::new("http")), - (LabelName::new("server_binding_ip"), LabelValue::new("0.0.0.0")), - (LabelName::new("server_binding_port"), LabelValue::new("7070")), + (label_name!("server_binding_protocol"), LabelValue::new("http")), + (label_name!("server_binding_ip"), LabelValue::new("0.0.0.0")), + (label_name!("server_binding_port"), LabelValue::new("7070")), ] .into(); @@ -508,16 +508,16 @@ mod tests { let time = DurationSinceUnixEpoch::from_secs(1_743_552_000); let label_set_1: LabelSet = [ - (LabelName::new("server_binding_protocol"), LabelValue::new("http")), - (LabelName::new("server_binding_ip"), LabelValue::new("0.0.0.0")), - (LabelName::new("server_binding_port"), LabelValue::new("7070")), + (label_name!("server_binding_protocol"), LabelValue::new("http")), + (label_name!("server_binding_ip"), LabelValue::new("0.0.0.0")), + (label_name!("server_binding_port"), LabelValue::new("7070")), ] .into(); let label_set_2: LabelSet = [ - (LabelName::new("server_binding_protocol"), LabelValue::new("http")), - (LabelName::new("server_binding_ip"), LabelValue::new("0.0.0.0")), - (LabelName::new("server_binding_port"), LabelValue::new("7171")), + (label_name!("server_binding_protocol"), LabelValue::new("http")), + (label_name!("server_binding_ip"), LabelValue::new("0.0.0.0")), + (label_name!("server_binding_port"), LabelValue::new("7171")), ] .into(); @@ -567,13 +567,13 @@ mod tests { use pretty_assertions::assert_eq; use super::*; - use crate::label::{LabelName, LabelValue}; + use crate::label::LabelValue; use crate::sample::Sample; #[test] fn it_should_increase_a_preexistent_counter() { let time = DurationSinceUnixEpoch::from_secs(1_743_552_000); - let label_set: LabelSet = (LabelName::new("label_name"), LabelValue::new("value")).into(); + let label_set: LabelSet = (label_name!("label_name"), LabelValue::new("value")).into(); let mut metric_collection = MetricCollection::new( MetricKindCollection::new(vec![Metric::new( @@ -595,7 +595,7 @@ mod tests { #[test] fn it_should_automatically_create_a_counter_when_increasing_if_it_does_not_exist() { let time = DurationSinceUnixEpoch::from_secs(1_743_552_000); - let label_set: LabelSet = (LabelName::new("label_name"), LabelValue::new("value")).into(); + let label_set: LabelSet = (label_name!("label_name"), LabelValue::new("value")).into(); let mut metric_collection = MetricCollection::new(MetricKindCollection::new(vec![]), MetricKindCollection::new(vec![])); @@ -611,7 +611,7 @@ mod tests { #[test] fn it_should_allow_making_sure_a_counter_exists_without_increasing_it() { - let label_set: LabelSet = (LabelName::new("label_name"), LabelValue::new("value")).into(); + let label_set: LabelSet = (label_name!("label_name"), LabelValue::new("value")).into(); let mut metric_collection = MetricCollection::new(MetricKindCollection::new(vec![]), MetricKindCollection::new(vec![])); @@ -626,7 +626,7 @@ mod tests { #[test] fn it_should_allow_describing_a_counter_before_using_it() { - let label_set: LabelSet = (LabelName::new("label_name"), LabelValue::new("value")).into(); + let label_set: LabelSet = (label_name!("label_name"), LabelValue::new("value")).into(); let mut metric_collection = MetricCollection::new(MetricKindCollection::new(vec![]), MetricKindCollection::new(vec![])); @@ -643,7 +643,7 @@ mod tests { #[should_panic(expected = "Duplicate MetricName found in MetricKindCollection")] fn it_should_not_allow_duplicate_metric_names_when_instantiating() { let time = DurationSinceUnixEpoch::from_secs(1_743_552_000); - let label_set: LabelSet = (LabelName::new("label_name"), LabelValue::new("value")).into(); + let label_set: LabelSet = (label_name!("label_name"), LabelValue::new("value")).into(); let _unused = MetricKindCollection::new(vec![ Metric::new( @@ -663,13 +663,13 @@ mod tests { use pretty_assertions::assert_eq; use super::*; - use crate::label::{LabelName, LabelValue}; + use crate::label::LabelValue; use crate::sample::Sample; #[test] fn it_should_set_a_preexistent_gauge() { let time = DurationSinceUnixEpoch::from_secs(1_743_552_000); - let label_set: LabelSet = (LabelName::new("label_name"), LabelValue::new("value")).into(); + let label_set: LabelSet = (label_name!("label_name"), LabelValue::new("value")).into(); let mut metric_collection = MetricCollection::new( MetricKindCollection::new(vec![]), @@ -690,7 +690,7 @@ mod tests { #[test] fn it_should_automatically_create_a_gauge_when_setting_if_it_does_not_exist() { let time = DurationSinceUnixEpoch::from_secs(1_743_552_000); - let label_set: LabelSet = (LabelName::new("label_name"), LabelValue::new("value")).into(); + let label_set: LabelSet = (label_name!("label_name"), LabelValue::new("value")).into(); let mut metric_collection = MetricCollection::new(MetricKindCollection::new(vec![]), MetricKindCollection::new(vec![])); @@ -705,7 +705,7 @@ mod tests { #[test] fn it_should_allow_making_sure_a_gauge_exists_without_increasing_it() { - let label_set: LabelSet = (LabelName::new("label_name"), LabelValue::new("value")).into(); + let label_set: LabelSet = (label_name!("label_name"), LabelValue::new("value")).into(); let mut metric_collection = MetricCollection::new(MetricKindCollection::new(vec![]), MetricKindCollection::new(vec![])); @@ -720,7 +720,7 @@ mod tests { #[test] fn it_should_allow_describing_a_gauge_before_using_it() { - let label_set: LabelSet = (LabelName::new("label_name"), LabelValue::new("value")).into(); + let label_set: LabelSet = (label_name!("label_name"), LabelValue::new("value")).into(); let mut metric_collection = MetricCollection::new(MetricKindCollection::new(vec![]), MetricKindCollection::new(vec![])); @@ -737,7 +737,7 @@ mod tests { #[should_panic(expected = "Duplicate MetricName found in MetricKindCollection")] fn it_should_not_allow_duplicate_metric_names_when_instantiating() { let time = DurationSinceUnixEpoch::from_secs(1_743_552_000); - let label_set: LabelSet = (LabelName::new("label_name"), LabelValue::new("value")).into(); + let label_set: LabelSet = (label_name!("label_name"), LabelValue::new("value")).into(); let _unused = MetricKindCollection::new(vec![ Metric::new( diff --git a/packages/udp-tracker-core/src/event/mod.rs b/packages/udp-tracker-core/src/event/mod.rs index 6cb43e5a1..ddcba7792 100644 --- a/packages/udp-tracker-core/src/event/mod.rs +++ b/packages/udp-tracker-core/src/event/mod.rs @@ -1,6 +1,7 @@ use std::net::SocketAddr; -use torrust_tracker_metrics::label::{LabelName, LabelSet, LabelValue}; +use torrust_tracker_metrics::label::{LabelSet, LabelValue}; +use torrust_tracker_metrics::label_name; use torrust_tracker_primitives::service_binding::ServiceBinding; pub mod sender; @@ -43,15 +44,15 @@ impl From for LabelSet { fn from(connection_context: ConnectionContext) -> Self { LabelSet::from([ ( - LabelName::new("server_binding_protocol"), + label_name!("server_binding_protocol"), LabelValue::new(&connection_context.server_service_binding.protocol().to_string()), ), ( - LabelName::new("server_binding_ip"), + label_name!("server_binding_ip"), LabelValue::new(&connection_context.server_service_binding.bind_address().ip().to_string()), ), ( - LabelName::new("server_binding_port"), + label_name!("server_binding_port"), LabelValue::new(&connection_context.server_service_binding.bind_address().port().to_string()), ), ]) diff --git a/packages/udp-tracker-core/src/statistics/event/handler.rs b/packages/udp-tracker-core/src/statistics/event/handler.rs index dcb512783..13a4840d5 100644 --- a/packages/udp-tracker-core/src/statistics/event/handler.rs +++ b/packages/udp-tracker-core/src/statistics/event/handler.rs @@ -1,5 +1,5 @@ -use torrust_tracker_metrics::label::{LabelName, LabelSet, LabelValue}; -use torrust_tracker_metrics::metric_name; +use torrust_tracker_metrics::label::{LabelSet, LabelValue}; +use torrust_tracker_metrics::{label_name, metric_name}; use torrust_tracker_primitives::DurationSinceUnixEpoch; use crate::event::Event; @@ -26,7 +26,7 @@ pub async fn handle_event(event: Event, stats_repository: &Repository, now: Dura // Extendable metrics let mut label_set = LabelSet::from(context); - label_set.upsert(LabelName::new("request_kind"), LabelValue::new("connect")); + label_set.upsert(label_name!("request_kind"), LabelValue::new("connect")); stats_repository .increase_counter(&metric_name!(UDP_TRACKER_CORE_REQUESTS_RECEIVED_TOTAL), &label_set, now) @@ -47,7 +47,7 @@ pub async fn handle_event(event: Event, stats_repository: &Repository, now: Dura // Extendable metrics let mut label_set = LabelSet::from(context); - label_set.upsert(LabelName::new("request_kind"), LabelValue::new("announce")); + label_set.upsert(label_name!("request_kind"), LabelValue::new("announce")); stats_repository .increase_counter(&metric_name!(UDP_TRACKER_CORE_REQUESTS_RECEIVED_TOTAL), &label_set, now) @@ -68,7 +68,7 @@ pub async fn handle_event(event: Event, stats_repository: &Repository, now: Dura // Extendable metrics let mut label_set = LabelSet::from(context); - label_set.upsert(LabelName::new("request_kind"), LabelValue::new("scrape")); + label_set.upsert(label_name!("request_kind"), LabelValue::new("scrape")); stats_repository .increase_counter(&metric_name!(UDP_TRACKER_CORE_REQUESTS_RECEIVED_TOTAL), &label_set, now) diff --git a/packages/udp-tracker-server/src/event/mod.rs b/packages/udp-tracker-server/src/event/mod.rs index 316e1a414..0236b26a9 100644 --- a/packages/udp-tracker-server/src/event/mod.rs +++ b/packages/udp-tracker-server/src/event/mod.rs @@ -2,7 +2,8 @@ use std::fmt; use std::net::SocketAddr; use std::time::Duration; -use torrust_tracker_metrics::label::{LabelName, LabelSet, LabelValue}; +use torrust_tracker_metrics::label::{LabelSet, LabelValue}; +use torrust_tracker_metrics::label_name; use torrust_tracker_primitives::service_binding::ServiceBinding; pub mod sender; @@ -94,15 +95,15 @@ impl From for LabelSet { fn from(connection_context: ConnectionContext) -> Self { LabelSet::from([ ( - LabelName::new("server_binding_protocol"), + label_name!("server_binding_protocol"), LabelValue::new(&connection_context.server_service_binding.protocol().to_string()), ), ( - LabelName::new("server_binding_ip"), + label_name!("server_binding_ip"), LabelValue::new(&connection_context.server_service_binding.bind_address().ip().to_string()), ), ( - LabelName::new("server_binding_port"), + label_name!("server_binding_port"), LabelValue::new(&connection_context.server_service_binding.bind_address().port().to_string()), ), ]) diff --git a/packages/udp-tracker-server/src/statistics/event/handler.rs b/packages/udp-tracker-server/src/statistics/event/handler.rs index 721d415ea..430bbc34c 100644 --- a/packages/udp-tracker-server/src/statistics/event/handler.rs +++ b/packages/udp-tracker-server/src/statistics/event/handler.rs @@ -1,5 +1,5 @@ -use torrust_tracker_metrics::label::{LabelName, LabelSet, LabelValue}; -use torrust_tracker_metrics::metric_name; +use torrust_tracker_metrics::label::{LabelSet, LabelValue}; +use torrust_tracker_metrics::{label_name, metric_name}; use torrust_tracker_primitives::DurationSinceUnixEpoch; use crate::event::{Event, UdpRequestKind, UdpResponseKind}; @@ -97,7 +97,7 @@ pub async fn handle_event(event: Event, stats_repository: &Repository, now: Dura let mut label_set = LabelSet::from(context); - label_set.upsert(LabelName::new("kind"), LabelValue::new(&kind.to_string())); + label_set.upsert(label_name!("kind"), LabelValue::new(&kind.to_string())); stats_repository .increase_counter(&metric_name!(UDP_TRACKER_SERVER_REQUESTS_ACCEPTED_TOTAL), &label_set, now) @@ -128,7 +128,7 @@ pub async fn handle_event(event: Event, stats_repository: &Repository, now: Dura // Extendable metrics let mut label_set = LabelSet::from(context.clone()); - label_set.upsert(LabelName::new("request_kind"), LabelValue::new(&req_kind.to_string())); + label_set.upsert(label_name!("request_kind"), LabelValue::new(&req_kind.to_string())); stats_repository .set_gauge( @@ -149,7 +149,7 @@ pub async fn handle_event(event: Event, stats_repository: &Repository, now: Dura // Extendable metrics let mut label_set = LabelSet::from(context.clone()); - label_set.upsert(LabelName::new("request_kind"), LabelValue::new(&req_kind.to_string())); + label_set.upsert(label_name!("request_kind"), LabelValue::new(&req_kind.to_string())); stats_repository .set_gauge( @@ -170,7 +170,7 @@ pub async fn handle_event(event: Event, stats_repository: &Repository, now: Dura // Extendable metrics let mut label_set = LabelSet::from(context.clone()); - label_set.upsert(LabelName::new("request_kind"), LabelValue::new(&req_kind.to_string())); + label_set.upsert(label_name!("request_kind"), LabelValue::new(&req_kind.to_string())); stats_repository .set_gauge( @@ -192,9 +192,9 @@ pub async fn handle_event(event: Event, stats_repository: &Repository, now: Dura let mut label_set = LabelSet::from(context); if result_label_value == LabelValue::new("ok") { - label_set.upsert(LabelName::new("request_kind"), kind_label_value); + label_set.upsert(label_name!("request_kind"), kind_label_value); } - label_set.upsert(LabelName::new("result"), result_label_value); + label_set.upsert(label_name!("result"), result_label_value); stats_repository .increase_counter(&metric_name!(UDP_TRACKER_SERVER_RESPONSES_SENT_TOTAL), &label_set, now) From d263be70a5ed5c5f64c4ed296f0a930c1c48dee7 Mon Sep 17 00:00:00 2001 From: Jose Celano Date: Tue, 15 Apr 2025 12:20:23 +0100 Subject: [PATCH 460/802] refactor: [#1445] return optionals for metric values in metric collection --- packages/metrics/src/metric_collection.rs | 60 ++++++++++------------- 1 file changed, 25 insertions(+), 35 deletions(-) diff --git a/packages/metrics/src/metric_collection.rs b/packages/metrics/src/metric_collection.rs index 5b3f92a19..c4db0706f 100644 --- a/packages/metrics/src/metric_collection.rs +++ b/packages/metrics/src/metric_collection.rs @@ -62,7 +62,12 @@ impl MetricCollection { } #[must_use] - pub fn get_counter_value(&self, name: &MetricName, label_set: &LabelSet) -> Counter { + pub fn contains_counter(&self, name: &MetricName) -> bool { + self.counters.metrics.contains_key(name) + } + + #[must_use] + pub fn get_counter_value(&self, name: &MetricName, label_set: &LabelSet) -> Option { self.counters.get_value(name, label_set) } @@ -89,7 +94,12 @@ impl MetricCollection { } #[must_use] - pub fn get_gauge_value(&self, name: &MetricName, label_set: &LabelSet) -> Gauge { + pub fn contains_gauge(&self, name: &MetricName) -> bool { + self.gauges.metrics.contains_key(name) + } + + #[must_use] + pub fn get_gauge_value(&self, name: &MetricName, label_set: &LabelSet) -> Option { self.gauges.get_value(name, label_set) } @@ -275,11 +285,11 @@ impl MetricKindCollection { } #[must_use] - pub fn get_value(&self, name: &MetricName, label_set: &LabelSet) -> Counter { + pub fn get_value(&self, name: &MetricName, label_set: &LabelSet) -> Option { self.metrics .get(name) .and_then(|metric| metric.get_sample_data(label_set)) - .map_or(Counter::default(), |sample| sample.value().clone()) + .map(|sample| sample.value().clone()) } } @@ -300,11 +310,11 @@ impl MetricKindCollection { } #[must_use] - pub fn get_value(&self, name: &MetricName, label_set: &LabelSet) -> Gauge { + pub fn get_value(&self, name: &MetricName, label_set: &LabelSet) -> Option { self.metrics .get(name) .and_then(|metric| metric.get_sample_data(label_set)) - .map_or(Gauge::default(), |sample| sample.value().clone()) + .map(|sample| sample.value().clone()) } } @@ -588,7 +598,7 @@ mod tests { assert_eq!( metric_collection.get_counter_value(&metric_name!("test_counter"), &label_set), - Counter::new(2) + Some(Counter::new(2)) ); } @@ -605,38 +615,28 @@ mod tests { assert_eq!( metric_collection.get_counter_value(&metric_name!("test_counter"), &label_set), - Counter::new(2) + Some(Counter::new(2)) ); } #[test] fn it_should_allow_making_sure_a_counter_exists_without_increasing_it() { - let label_set: LabelSet = (label_name!("label_name"), LabelValue::new("value")).into(); - let mut metric_collection = MetricCollection::new(MetricKindCollection::new(vec![]), MetricKindCollection::new(vec![])); metric_collection.ensure_counter_exists(&metric_name!("test_counter")); - assert_eq!( - metric_collection.get_counter_value(&metric_name!("test_counter"), &label_set), - Counter::default() - ); + assert!(metric_collection.contains_counter(&metric_name!("test_counter"))); } #[test] fn it_should_allow_describing_a_counter_before_using_it() { - let label_set: LabelSet = (label_name!("label_name"), LabelValue::new("value")).into(); - let mut metric_collection = MetricCollection::new(MetricKindCollection::new(vec![]), MetricKindCollection::new(vec![])); metric_collection.describe_counter(&metric_name!("test_counter"), None, None); - assert_eq!( - metric_collection.get_counter_value(&metric_name!("test_counter"), &label_set), - Counter::default() - ); + assert!(metric_collection.contains_counter(&metric_name!("test_counter"))); } #[test] @@ -683,7 +683,7 @@ mod tests { assert_eq!( metric_collection.get_gauge_value(&metric_name!("test_gauge"), &label_set), - Gauge::new(1.0) + Some(Gauge::new(1.0)) ); } @@ -699,38 +699,28 @@ mod tests { assert_eq!( metric_collection.get_gauge_value(&metric_name!("test_gauge"), &label_set), - Gauge::new(1.0) + Some(Gauge::new(1.0)) ); } #[test] - fn it_should_allow_making_sure_a_gauge_exists_without_increasing_it() { - let label_set: LabelSet = (label_name!("label_name"), LabelValue::new("value")).into(); - + fn it_should_allow_making_sure_a_gauge_exists_without_setting_it() { let mut metric_collection = MetricCollection::new(MetricKindCollection::new(vec![]), MetricKindCollection::new(vec![])); metric_collection.ensure_gauge_exists(&metric_name!("test_gauge")); - assert_eq!( - metric_collection.get_gauge_value(&metric_name!("test_gauge"), &label_set), - Gauge::default() - ); + assert!(metric_collection.contains_gauge(&metric_name!("test_gauge"))); } #[test] fn it_should_allow_describing_a_gauge_before_using_it() { - let label_set: LabelSet = (label_name!("label_name"), LabelValue::new("value")).into(); - let mut metric_collection = MetricCollection::new(MetricKindCollection::new(vec![]), MetricKindCollection::new(vec![])); metric_collection.describe_gauge(&metric_name!("test_gauge"), None, None); - assert_eq!( - metric_collection.get_gauge_value(&metric_name!("test_gauge"), &label_set), - Gauge::default() - ); + assert!(metric_collection.contains_gauge(&metric_name!("test_gauge"))); } #[test] From 785a978e0d2ac45d70dc6f5c2a4109ced175d4d9 Mon Sep 17 00:00:00 2001 From: Jose Celano Date: Tue, 15 Apr 2025 13:03:15 +0100 Subject: [PATCH 461/802] refactor: [#1445] remove panic from MetricColelction::new method --- packages/metrics/src/metric_collection.rs | 64 +++++++++++++---------- 1 file changed, 35 insertions(+), 29 deletions(-) diff --git a/packages/metrics/src/metric_collection.rs b/packages/metrics/src/metric_collection.rs index c4db0706f..29da4e509 100644 --- a/packages/metrics/src/metric_collection.rs +++ b/packages/metrics/src/metric_collection.rs @@ -27,21 +27,20 @@ pub struct MetricCollection { } impl MetricCollection { - /// # Panics + /// # Errors /// - /// Panics if there are duplicate metric names across counters and gauges. - #[must_use] - pub fn new(counters: MetricKindCollection, gauges: MetricKindCollection) -> Self { + /// Returns an error if there are duplicate metric names across counters and + /// gauges. + pub fn new(counters: MetricKindCollection, gauges: MetricKindCollection) -> Result { // Check for name collisions across metric types let counter_names: HashSet<_> = counters.names().collect(); let gauge_names: HashSet<_> = gauges.names().collect(); - assert!( - counter_names.is_disjoint(&gauge_names), - "Metric names must be unique across counters and gauges" - ); + if !counter_names.is_disjoint(&gauge_names) { + return Err(Error::DuplicateMetricNames); + } - Self { counters, gauges } + Ok(Self { counters, gauges }) } /// Merges another `MetricCollection` into this one. @@ -49,7 +48,7 @@ impl MetricCollection { /// # Errors /// /// Returns an error if a metric name already exists in the current collection. - pub fn merge(&mut self, other: &Self) -> Result<(), MergeError> { + pub fn merge(&mut self, other: &Self) -> Result<(), Error> { self.counters.merge(&other.counters)?; self.gauges.merge(&other.gauges)?; Ok(()) @@ -121,9 +120,12 @@ impl MetricCollection { } #[derive(thiserror::Error, Debug, Clone)] -pub enum MergeError { +pub enum Error { + #[error("Metric names must be unique across counters and gauges.")] + DuplicateMetricNames, + #[error("Cannot merge metric '{metric_name}': it already exists in the current collection")] - MetricNameAlreadyExists { metric_name: MetricName }, + MetricNameCollisionInMerge { metric_name: MetricName }, } /// Implements serialization for `MetricCollection`. @@ -177,10 +179,10 @@ impl<'de> Deserialize<'de> for MetricCollection { } } - Ok(MetricCollection::new( - MetricKindCollection::new(counters), - MetricKindCollection::new(gauges), - )) + let metric_collection = MetricCollection::new(MetricKindCollection::new(counters), MetricKindCollection::new(gauges)) + .map_err(serde::de::Error::custom)?; + + Ok(metric_collection) } } @@ -246,11 +248,11 @@ impl MetricKindCollection { /// # Errors /// /// Returns an error if a metric name already exists in the current collection. - pub fn merge(&mut self, other: &Self) -> Result<(), MergeError> { + pub fn merge(&mut self, other: &Self) -> Result<(), Error> { // Check for name collisions for metric_name in other.metrics.keys() { if self.metrics.contains_key(metric_name) { - return Err(MergeError::MetricNameAlreadyExists { + return Err(Error::MetricNameCollisionInMerge { metric_name: metric_name.clone(), }); } @@ -258,7 +260,7 @@ impl MetricKindCollection { for (metric_name, metric) in &other.metrics { if self.metrics.insert(metric_name.clone(), metric.clone()).is_some() { - return Err(MergeError::MetricNameAlreadyExists { + return Err(Error::MetricNameCollisionInMerge { metric_name: metric_name.clone(), }); } @@ -374,6 +376,7 @@ mod tests { SampleCollection::new(vec![Sample::new(Gauge::new(1.0), time, label_set_1.clone())]), )]), ) + .unwrap() } fn json() -> String { @@ -540,7 +543,8 @@ mod tests { ]), )]), MetricKindCollection::new(vec![]), - ); + ) + .unwrap(); let prometheus_output = metric_collection.to_prometheus(); @@ -565,7 +569,7 @@ mod tests { counters.ensure_metric_exists(&metric_name!("test_counter")); gauges.ensure_metric_exists(&metric_name!("test_gauge")); - let metric_collection = MetricCollection::new(counters, gauges); + let metric_collection = MetricCollection::new(counters, gauges).unwrap(); let prometheus_output = metric_collection.to_prometheus(); @@ -591,7 +595,8 @@ mod tests { SampleCollection::new(vec![Sample::new(Counter::new(0), time, label_set.clone())]), )]), MetricKindCollection::new(vec![]), - ); + ) + .unwrap(); metric_collection.increase_counter(&metric_name!("test_counter"), &label_set, time); metric_collection.increase_counter(&metric_name!("test_counter"), &label_set, time); @@ -608,7 +613,7 @@ mod tests { let label_set: LabelSet = (label_name!("label_name"), LabelValue::new("value")).into(); let mut metric_collection = - MetricCollection::new(MetricKindCollection::new(vec![]), MetricKindCollection::new(vec![])); + MetricCollection::new(MetricKindCollection::new(vec![]), MetricKindCollection::new(vec![])).unwrap(); metric_collection.increase_counter(&metric_name!("test_counter"), &label_set, time); metric_collection.increase_counter(&metric_name!("test_counter"), &label_set, time); @@ -622,7 +627,7 @@ mod tests { #[test] fn it_should_allow_making_sure_a_counter_exists_without_increasing_it() { let mut metric_collection = - MetricCollection::new(MetricKindCollection::new(vec![]), MetricKindCollection::new(vec![])); + MetricCollection::new(MetricKindCollection::new(vec![]), MetricKindCollection::new(vec![])).unwrap(); metric_collection.ensure_counter_exists(&metric_name!("test_counter")); @@ -632,7 +637,7 @@ mod tests { #[test] fn it_should_allow_describing_a_counter_before_using_it() { let mut metric_collection = - MetricCollection::new(MetricKindCollection::new(vec![]), MetricKindCollection::new(vec![])); + MetricCollection::new(MetricKindCollection::new(vec![]), MetricKindCollection::new(vec![])).unwrap(); metric_collection.describe_counter(&metric_name!("test_counter"), None, None); @@ -677,7 +682,8 @@ mod tests { metric_name!("test_gauge"), SampleCollection::new(vec![Sample::new(Gauge::new(0.0), time, label_set.clone())]), )]), - ); + ) + .unwrap(); metric_collection.set_gauge(&metric_name!("test_gauge"), &label_set, 1.0, time); @@ -693,7 +699,7 @@ mod tests { let label_set: LabelSet = (label_name!("label_name"), LabelValue::new("value")).into(); let mut metric_collection = - MetricCollection::new(MetricKindCollection::new(vec![]), MetricKindCollection::new(vec![])); + MetricCollection::new(MetricKindCollection::new(vec![]), MetricKindCollection::new(vec![])).unwrap(); metric_collection.set_gauge(&metric_name!("test_gauge"), &label_set, 1.0, time); @@ -706,7 +712,7 @@ mod tests { #[test] fn it_should_allow_making_sure_a_gauge_exists_without_setting_it() { let mut metric_collection = - MetricCollection::new(MetricKindCollection::new(vec![]), MetricKindCollection::new(vec![])); + MetricCollection::new(MetricKindCollection::new(vec![]), MetricKindCollection::new(vec![])).unwrap(); metric_collection.ensure_gauge_exists(&metric_name!("test_gauge")); @@ -716,7 +722,7 @@ mod tests { #[test] fn it_should_allow_describing_a_gauge_before_using_it() { let mut metric_collection = - MetricCollection::new(MetricKindCollection::new(vec![]), MetricKindCollection::new(vec![])); + MetricCollection::new(MetricKindCollection::new(vec![]), MetricKindCollection::new(vec![])).unwrap(); metric_collection.describe_gauge(&metric_name!("test_gauge"), None, None); From 42e1524c560f546d0b57c61b50bf83c928112990 Mon Sep 17 00:00:00 2001 From: Jose Celano Date: Tue, 15 Apr 2025 16:21:49 +0100 Subject: [PATCH 462/802] refactor: [#1445] return errors instead of panicking in the MetricCollection struct --- .../src/statistics/event/handler.rs | 16 +- .../src/statistics/metrics.rs | 27 ++- .../src/statistics/repository.rs | 18 +- packages/metrics/src/metric_collection.rs | 229 +++++++++++------- .../src/statistics/event/handler.rs | 24 +- .../src/statistics/metrics.rs | 29 ++- .../src/statistics/repository.rs | 18 +- .../src/statistics/event/handler.rs | 72 ++++-- .../src/statistics/metrics.rs | 27 ++- .../src/statistics/repository.rs | 36 ++- 10 files changed, 363 insertions(+), 133 deletions(-) diff --git a/packages/http-tracker-core/src/statistics/event/handler.rs b/packages/http-tracker-core/src/statistics/event/handler.rs index cea224d04..182c86b01 100644 --- a/packages/http-tracker-core/src/statistics/event/handler.rs +++ b/packages/http-tracker-core/src/statistics/event/handler.rs @@ -31,9 +31,13 @@ pub async fn handle_event(event: Event, stats_repository: &Repository, now: Dura let mut label_set = LabelSet::from(connection); label_set.upsert(label_name!("request_kind"), LabelValue::new("announce")); - stats_repository + match stats_repository .increase_counter(&metric_name!(HTTP_TRACKER_CORE_REQUESTS_RECEIVED_TOTAL), &label_set, now) - .await; + .await + { + Ok(()) => {} + Err(err) => tracing::error!("Failed to increase the counter: {}", err), + }; } Event::TcpScrape { connection } => { // Global fixed metrics @@ -52,9 +56,13 @@ pub async fn handle_event(event: Event, stats_repository: &Repository, now: Dura let mut label_set = LabelSet::from(connection); label_set.upsert(label_name!("request_kind"), LabelValue::new("scrape")); - stats_repository + match stats_repository .increase_counter(&metric_name!(HTTP_TRACKER_CORE_REQUESTS_RECEIVED_TOTAL), &label_set, now) - .await; + .await + { + Ok(()) => {} + Err(err) => tracing::error!("Failed to increase the counter: {}", err), + }; } } diff --git a/packages/http-tracker-core/src/statistics/metrics.rs b/packages/http-tracker-core/src/statistics/metrics.rs index 0b442c1cb..bf053b04e 100644 --- a/packages/http-tracker-core/src/statistics/metrics.rs +++ b/packages/http-tracker-core/src/statistics/metrics.rs @@ -1,7 +1,7 @@ use serde::Serialize; use torrust_tracker_metrics::label::LabelSet; use torrust_tracker_metrics::metric::MetricName; -use torrust_tracker_metrics::metric_collection::MetricCollection; +use torrust_tracker_metrics::metric_collection::{Error, MetricCollection}; use torrust_tracker_primitives::DurationSinceUnixEpoch; /// Metrics collected by the tracker. @@ -24,11 +24,28 @@ pub struct Metrics { } impl Metrics { - pub fn increase_counter(&mut self, metric_name: &MetricName, labels: &LabelSet, now: DurationSinceUnixEpoch) { - self.metric_collection.increase_counter(metric_name, labels, now); + /// # Errors + /// + /// Returns an error if the metric does not exist and it cannot be created. + pub fn increase_counter( + &mut self, + metric_name: &MetricName, + labels: &LabelSet, + now: DurationSinceUnixEpoch, + ) -> Result<(), Error> { + self.metric_collection.increase_counter(metric_name, labels, now) } - pub fn set_gauge(&mut self, metric_name: &MetricName, labels: &LabelSet, value: f64, now: DurationSinceUnixEpoch) { - self.metric_collection.set_gauge(metric_name, labels, value, now); + /// # Errors + /// + /// Returns an error if the metric does not exist and it cannot be created. + pub fn set_gauge( + &mut self, + metric_name: &MetricName, + labels: &LabelSet, + value: f64, + now: DurationSinceUnixEpoch, + ) -> Result<(), Error> { + self.metric_collection.set_gauge(metric_name, labels, value, now) } } diff --git a/packages/http-tracker-core/src/statistics/repository.rs b/packages/http-tracker-core/src/statistics/repository.rs index 88345722b..d5e718821 100644 --- a/packages/http-tracker-core/src/statistics/repository.rs +++ b/packages/http-tracker-core/src/statistics/repository.rs @@ -3,6 +3,7 @@ use std::sync::Arc; use tokio::sync::{RwLock, RwLockReadGuard}; use torrust_tracker_metrics::label::LabelSet; use torrust_tracker_metrics::metric::MetricName; +use torrust_tracker_metrics::metric_collection::Error; use torrust_tracker_primitives::DurationSinceUnixEpoch; use super::describe_metrics; @@ -56,9 +57,22 @@ impl Repository { drop(stats_lock); } - pub async fn increase_counter(&self, metric_name: &MetricName, labels: &LabelSet, now: DurationSinceUnixEpoch) { + /// # Errors + /// + /// This function will return an error if the metric collection fails to + /// increase the counter. + pub async fn increase_counter( + &self, + metric_name: &MetricName, + labels: &LabelSet, + now: DurationSinceUnixEpoch, + ) -> Result<(), Error> { let mut stats_lock = self.stats.write().await; - stats_lock.increase_counter(metric_name, labels, now); + + let result = stats_lock.increase_counter(metric_name, labels, now); + drop(stats_lock); + + result } } diff --git a/packages/metrics/src/metric_collection.rs b/packages/metrics/src/metric_collection.rs index 29da4e509..c719e6054 100644 --- a/packages/metrics/src/metric_collection.rs +++ b/packages/metrics/src/metric_collection.rs @@ -13,13 +13,10 @@ use crate::metric::description::MetricDescription; use crate::sample_collection::SampleCollection; use crate::unit::Unit; -// todo: serialize in a deterministic order. For example: +// code-review: serialize in a deterministic order? For example: // - First the counter metrics ordered by name. // - Then the gauge metrics ordered by name. -/// Use this type only when behind a lock that guarantees thread-safety. -/// Otherwise, there could be race conditions that lead to duplicate metric -/// names in different metric types. #[derive(Debug, Clone, Default, PartialEq)] pub struct MetricCollection { counters: MetricKindCollection, @@ -37,7 +34,10 @@ impl MetricCollection { let gauge_names: HashSet<_> = gauges.names().collect(); if !counter_names.is_disjoint(&gauge_names) { - return Err(Error::DuplicateMetricNames); + return Err(Error::MetricNameCollisionInConstructor { + counter_names: counter_names.iter().map(std::string::ToString::to_string).collect(), + gauge_names: gauge_names.iter().map(std::string::ToString::to_string).collect(), + }); } Ok(Self { counters, gauges }) @@ -70,16 +70,25 @@ impl MetricCollection { self.counters.get_value(name, label_set) } - /// # Panics + /// # Errors /// - /// Panics if a gauge with the same name already exists. - pub fn increase_counter(&mut self, name: &MetricName, label_set: &LabelSet, time: DurationSinceUnixEpoch) { - assert!( - !self.gauges.metrics.contains_key(name), - "Cannot create counter with name '{name}': a gauge with this name already exists", - ); + /// Return an error if a metrics of a different type with the same name + /// already exists. + pub fn increase_counter( + &mut self, + name: &MetricName, + label_set: &LabelSet, + time: DurationSinceUnixEpoch, + ) -> Result<(), Error> { + if self.gauges.metrics.contains_key(name) { + return Err(Error::MetricNameCollisionAdding { + metric_name: name.clone(), + }); + } self.counters.increment(name, label_set, time); + + Ok(()) } pub fn ensure_counter_exists(&mut self, name: &MetricName) { @@ -102,16 +111,26 @@ impl MetricCollection { self.gauges.get_value(name, label_set) } - /// # Panics + /// # Errors /// - /// Panics if a counter with the same name already exists. - pub fn set_gauge(&mut self, name: &MetricName, label_set: &LabelSet, value: f64, time: DurationSinceUnixEpoch) { - assert!( - !self.counters.metrics.contains_key(name), - "Cannot create gauge with name '{name}': a counter with this name already exists" - ); + /// Return an error if a metrics of a different type with the same name + /// already exists. + pub fn set_gauge( + &mut self, + name: &MetricName, + label_set: &LabelSet, + value: f64, + time: DurationSinceUnixEpoch, + ) -> Result<(), Error> { + if self.counters.metrics.contains_key(name) { + return Err(Error::MetricNameCollisionAdding { + metric_name: name.clone(), + }); + } self.gauges.set(name, label_set, value, time); + + Ok(()) } pub fn ensure_gauge_exists(&mut self, name: &MetricName) { @@ -121,11 +140,20 @@ impl MetricCollection { #[derive(thiserror::Error, Debug, Clone)] pub enum Error { - #[error("Metric names must be unique across counters and gauges.")] - DuplicateMetricNames, + #[error("Metric names must be unique across all metrics types.")] + MetricNameCollisionInConstructor { + counter_names: Vec, + gauge_names: Vec, + }, + + #[error("Found duplicate metric name in list. Metric names must be unique across all metrics types.")] + DuplicateMetricNameInList { metric_name: MetricName }, #[error("Cannot merge metric '{metric_name}': it already exists in the current collection")] MetricNameCollisionInMerge { metric_name: MetricName }, + + #[error("Cannot create metric with name '{metric_name}': another metric with this name already exists")] + MetricNameCollisionAdding { metric_name: MetricName }, } /// Implements serialization for `MetricCollection`. @@ -179,8 +207,10 @@ impl<'de> Deserialize<'de> for MetricCollection { } } - let metric_collection = MetricCollection::new(MetricKindCollection::new(counters), MetricKindCollection::new(gauges)) - .map_err(serde::de::Error::custom)?; + let counters = MetricKindCollection::new(counters).map_err(serde::de::Error::custom)?; + let gauges = MetricKindCollection::new(gauges).map_err(serde::de::Error::custom)?; + + let metric_collection = MetricCollection::new(counters, gauges).map_err(serde::de::Error::custom)?; Ok(metric_collection) } @@ -213,20 +243,21 @@ pub struct MetricKindCollection { impl MetricKindCollection { /// Creates a new `MetricKindCollection` from a vector of metrics /// - /// # Panics + /// # Errors /// - /// Panics if duplicate metric names are found - #[must_use] - pub fn new(metrics: Vec>) -> Self { + /// Returns an error if duplicate metric names are passed. + pub fn new(metrics: Vec>) -> Result { let mut map = HashMap::with_capacity(metrics.len()); for metric in metrics { - assert!( - map.insert(metric.name().clone(), metric).is_none(), - "Duplicate MetricName found in MetricKindCollection" - ); + let metric_name = metric.name().clone(); + + if let Some(_old_metric) = map.insert(metric.name().clone(), metric) { + return Err(Error::DuplicateMetricNameInList { metric_name }); + } } - Self { metrics: map } + + Ok(Self { metrics: map }) } /// Returns an iterator over all metric names in this collection. @@ -234,10 +265,18 @@ impl MetricKindCollection { self.metrics.keys() } + /// # Panics + /// + /// It should not panic as long as empty sample collections are allowed. pub fn ensure_metric_exists(&mut self, name: &MetricName) { if !self.metrics.contains_key(name) { - self.metrics - .insert(name.clone(), Metric::new(name.clone(), SampleCollection::new(vec![]))); + self.metrics.insert( + name.clone(), + Metric::new( + name.clone(), + SampleCollection::new(vec![]).expect("Empty sample collection creation should not fail"), + ), + ); } } } @@ -369,12 +408,14 @@ mod tests { MetricCollection::new( MetricKindCollection::new(vec![Metric::new( metric_name!("http_tracker_core_announce_requests_received_total"), - SampleCollection::new(vec![Sample::new(Counter::new(1), time, label_set_1.clone())]), - )]), + SampleCollection::new(vec![Sample::new(Counter::new(1), time, label_set_1.clone())]).unwrap(), + )]) + .unwrap(), MetricKindCollection::new(vec![Metric::new( metric_name!("udp_tracker_server_performance_avg_announce_processing_time_ns"), - SampleCollection::new(vec![Sample::new(Gauge::new(1.0), time, label_set_1.clone())]), - )]), + SampleCollection::new(vec![Sample::new(Gauge::new(1.0), time, label_set_1.clone())]).unwrap(), + )]) + .unwrap(), ) .unwrap() } @@ -446,41 +487,47 @@ mod tests { } #[test] - #[should_panic(expected = "Metric names must be unique across counters and gauges")] fn it_should_not_allow_duplicate_names_across_types() { - let counter = MetricKindCollection::new(vec![Metric::new(metric_name!("test_metric"), SampleCollection::new(vec![]))]); - - let gauge = MetricKindCollection::new(vec![Metric::new(metric_name!("test_metric"), SampleCollection::new(vec![]))]); + let counters = + MetricKindCollection::new(vec![Metric::new(metric_name!("test_metric"), SampleCollection::default())]).unwrap(); + let gauges = + MetricKindCollection::new(vec![Metric::new(metric_name!("test_metric"), SampleCollection::default())]).unwrap(); - let _unused = MetricCollection::new(counter, gauge); + assert!(MetricCollection::new(counters, gauges).is_err()); } #[test] - #[should_panic(expected = "Cannot create gauge with name 'test_metric': a counter with this name already exists")] fn it_should_not_allow_creating_a_gauge_with_the_same_name_as_a_counter() { let mut collection = MetricCollection::default(); let label_set = LabelSet::default(); let time = DurationSinceUnixEpoch::from_secs(1_743_552_000); // First create a counter - collection.increase_counter(&metric_name!("test_metric"), &label_set, time); + collection + .increase_counter(&metric_name!("test_metric"), &label_set, time) + .unwrap(); + + // Then try to create a gauge with the same name + let result = collection.set_gauge(&metric_name!("test_metric"), &label_set, 1.0, time); - // Then try to create a gauge with the same name - this should panic - collection.set_gauge(&metric_name!("test_metric"), &label_set, 1.0, time); + assert!(result.is_err()); } #[test] - #[should_panic(expected = "Cannot create counter with name 'test_metric': a gauge with this name already exists")] fn it_should_not_allow_creating_a_counter_with_the_same_name_as_a_gauge() { let mut collection = MetricCollection::default(); let label_set = LabelSet::default(); let time = DurationSinceUnixEpoch::from_secs(1_743_552_000); // First set the gauge - collection.set_gauge(&metric_name!("test_metric"), &label_set, 1.0, time); + collection + .set_gauge(&metric_name!("test_metric"), &label_set, 1.0, time) + .unwrap(); + + // Then try to create a counter with the same name + let result = collection.increase_counter(&metric_name!("test_metric"), &label_set, time); - // Then try to create a counter with the same name - this should panic - collection.increase_counter(&metric_name!("test_metric"), &label_set, time); + assert!(result.is_err()); } #[test] @@ -540,9 +587,11 @@ mod tests { SampleCollection::new(vec![ Sample::new(Counter::new(1), time, label_set_1.clone()), Sample::new(Counter::new(2), time, label_set_2.clone()), - ]), - )]), - MetricKindCollection::new(vec![]), + ]) + .unwrap(), + )]) + .unwrap(), + MetricKindCollection::default(), ) .unwrap(); @@ -563,8 +612,8 @@ mod tests { #[test] fn it_should_exclude_metrics_without_samples_from_prometheus_format() { - let mut counters = MetricKindCollection::new(vec![]); - let mut gauges = MetricKindCollection::new(vec![]); + let mut counters = MetricKindCollection::default(); + let mut gauges = MetricKindCollection::default(); counters.ensure_metric_exists(&metric_name!("test_counter")); gauges.ensure_metric_exists(&metric_name!("test_gauge")); @@ -592,14 +641,19 @@ mod tests { let mut metric_collection = MetricCollection::new( MetricKindCollection::new(vec![Metric::new( metric_name!("test_counter"), - SampleCollection::new(vec![Sample::new(Counter::new(0), time, label_set.clone())]), - )]), - MetricKindCollection::new(vec![]), + SampleCollection::new(vec![Sample::new(Counter::new(0), time, label_set.clone())]).unwrap(), + )]) + .unwrap(), + MetricKindCollection::default(), ) .unwrap(); - metric_collection.increase_counter(&metric_name!("test_counter"), &label_set, time); - metric_collection.increase_counter(&metric_name!("test_counter"), &label_set, time); + metric_collection + .increase_counter(&metric_name!("test_counter"), &label_set, time) + .unwrap(); + metric_collection + .increase_counter(&metric_name!("test_counter"), &label_set, time) + .unwrap(); assert_eq!( metric_collection.get_counter_value(&metric_name!("test_counter"), &label_set), @@ -613,10 +667,14 @@ mod tests { let label_set: LabelSet = (label_name!("label_name"), LabelValue::new("value")).into(); let mut metric_collection = - MetricCollection::new(MetricKindCollection::new(vec![]), MetricKindCollection::new(vec![])).unwrap(); + MetricCollection::new(MetricKindCollection::default(), MetricKindCollection::default()).unwrap(); - metric_collection.increase_counter(&metric_name!("test_counter"), &label_set, time); - metric_collection.increase_counter(&metric_name!("test_counter"), &label_set, time); + metric_collection + .increase_counter(&metric_name!("test_counter"), &label_set, time) + .unwrap(); + metric_collection + .increase_counter(&metric_name!("test_counter"), &label_set, time) + .unwrap(); assert_eq!( metric_collection.get_counter_value(&metric_name!("test_counter"), &label_set), @@ -627,7 +685,7 @@ mod tests { #[test] fn it_should_allow_making_sure_a_counter_exists_without_increasing_it() { let mut metric_collection = - MetricCollection::new(MetricKindCollection::new(vec![]), MetricKindCollection::new(vec![])).unwrap(); + MetricCollection::new(MetricKindCollection::default(), MetricKindCollection::default()).unwrap(); metric_collection.ensure_counter_exists(&metric_name!("test_counter")); @@ -637,7 +695,7 @@ mod tests { #[test] fn it_should_allow_describing_a_counter_before_using_it() { let mut metric_collection = - MetricCollection::new(MetricKindCollection::new(vec![]), MetricKindCollection::new(vec![])).unwrap(); + MetricCollection::new(MetricKindCollection::default(), MetricKindCollection::default()).unwrap(); metric_collection.describe_counter(&metric_name!("test_counter"), None, None); @@ -645,21 +703,22 @@ mod tests { } #[test] - #[should_panic(expected = "Duplicate MetricName found in MetricKindCollection")] fn it_should_not_allow_duplicate_metric_names_when_instantiating() { let time = DurationSinceUnixEpoch::from_secs(1_743_552_000); let label_set: LabelSet = (label_name!("label_name"), LabelValue::new("value")).into(); - let _unused = MetricKindCollection::new(vec![ + let result = MetricKindCollection::new(vec![ Metric::new( metric_name!("test_counter"), - SampleCollection::new(vec![Sample::new(Counter::new(0), time, label_set.clone())]), + SampleCollection::new(vec![Sample::new(Counter::new(0), time, label_set.clone())]).unwrap(), ), Metric::new( metric_name!("test_counter"), - SampleCollection::new(vec![Sample::new(Counter::new(0), time, label_set.clone())]), + SampleCollection::new(vec![Sample::new(Counter::new(0), time, label_set.clone())]).unwrap(), ), ]); + + assert!(result.is_err()); } } @@ -677,15 +736,18 @@ mod tests { let label_set: LabelSet = (label_name!("label_name"), LabelValue::new("value")).into(); let mut metric_collection = MetricCollection::new( - MetricKindCollection::new(vec![]), + MetricKindCollection::default(), MetricKindCollection::new(vec![Metric::new( metric_name!("test_gauge"), - SampleCollection::new(vec![Sample::new(Gauge::new(0.0), time, label_set.clone())]), - )]), + SampleCollection::new(vec![Sample::new(Gauge::new(0.0), time, label_set.clone())]).unwrap(), + )]) + .unwrap(), ) .unwrap(); - metric_collection.set_gauge(&metric_name!("test_gauge"), &label_set, 1.0, time); + metric_collection + .set_gauge(&metric_name!("test_gauge"), &label_set, 1.0, time) + .unwrap(); assert_eq!( metric_collection.get_gauge_value(&metric_name!("test_gauge"), &label_set), @@ -699,9 +761,11 @@ mod tests { let label_set: LabelSet = (label_name!("label_name"), LabelValue::new("value")).into(); let mut metric_collection = - MetricCollection::new(MetricKindCollection::new(vec![]), MetricKindCollection::new(vec![])).unwrap(); + MetricCollection::new(MetricKindCollection::default(), MetricKindCollection::default()).unwrap(); - metric_collection.set_gauge(&metric_name!("test_gauge"), &label_set, 1.0, time); + metric_collection + .set_gauge(&metric_name!("test_gauge"), &label_set, 1.0, time) + .unwrap(); assert_eq!( metric_collection.get_gauge_value(&metric_name!("test_gauge"), &label_set), @@ -712,7 +776,7 @@ mod tests { #[test] fn it_should_allow_making_sure_a_gauge_exists_without_setting_it() { let mut metric_collection = - MetricCollection::new(MetricKindCollection::new(vec![]), MetricKindCollection::new(vec![])).unwrap(); + MetricCollection::new(MetricKindCollection::default(), MetricKindCollection::default()).unwrap(); metric_collection.ensure_gauge_exists(&metric_name!("test_gauge")); @@ -722,7 +786,7 @@ mod tests { #[test] fn it_should_allow_describing_a_gauge_before_using_it() { let mut metric_collection = - MetricCollection::new(MetricKindCollection::new(vec![]), MetricKindCollection::new(vec![])).unwrap(); + MetricCollection::new(MetricKindCollection::default(), MetricKindCollection::default()).unwrap(); metric_collection.describe_gauge(&metric_name!("test_gauge"), None, None); @@ -730,21 +794,22 @@ mod tests { } #[test] - #[should_panic(expected = "Duplicate MetricName found in MetricKindCollection")] fn it_should_not_allow_duplicate_metric_names_when_instantiating() { let time = DurationSinceUnixEpoch::from_secs(1_743_552_000); let label_set: LabelSet = (label_name!("label_name"), LabelValue::new("value")).into(); - let _unused = MetricKindCollection::new(vec![ + let result = MetricKindCollection::new(vec![ Metric::new( metric_name!("test_gauge"), - SampleCollection::new(vec![Sample::new(Gauge::new(0.0), time, label_set.clone())]), + SampleCollection::new(vec![Sample::new(Gauge::new(0.0), time, label_set.clone())]).unwrap(), ), Metric::new( metric_name!("test_gauge"), - SampleCollection::new(vec![Sample::new(Gauge::new(0.0), time, label_set.clone())]), + SampleCollection::new(vec![Sample::new(Gauge::new(0.0), time, label_set.clone())]).unwrap(), ), ]); + + assert!(result.is_err()); } } } diff --git a/packages/udp-tracker-core/src/statistics/event/handler.rs b/packages/udp-tracker-core/src/statistics/event/handler.rs index 13a4840d5..2680c442f 100644 --- a/packages/udp-tracker-core/src/statistics/event/handler.rs +++ b/packages/udp-tracker-core/src/statistics/event/handler.rs @@ -28,9 +28,13 @@ pub async fn handle_event(event: Event, stats_repository: &Repository, now: Dura let mut label_set = LabelSet::from(context); label_set.upsert(label_name!("request_kind"), LabelValue::new("connect")); - stats_repository + match stats_repository .increase_counter(&metric_name!(UDP_TRACKER_CORE_REQUESTS_RECEIVED_TOTAL), &label_set, now) - .await; + .await + { + Ok(()) => {} + Err(err) => tracing::error!("Failed to increase the counter: {}", err), + }; } Event::UdpAnnounce { context } => { // Global fixed metrics @@ -49,9 +53,13 @@ pub async fn handle_event(event: Event, stats_repository: &Repository, now: Dura let mut label_set = LabelSet::from(context); label_set.upsert(label_name!("request_kind"), LabelValue::new("announce")); - stats_repository + match stats_repository .increase_counter(&metric_name!(UDP_TRACKER_CORE_REQUESTS_RECEIVED_TOTAL), &label_set, now) - .await; + .await + { + Ok(()) => {} + Err(err) => tracing::error!("Failed to increase the counter: {}", err), + }; } Event::UdpScrape { context } => { // Global fixed metrics @@ -70,9 +78,13 @@ pub async fn handle_event(event: Event, stats_repository: &Repository, now: Dura let mut label_set = LabelSet::from(context); label_set.upsert(label_name!("request_kind"), LabelValue::new("scrape")); - stats_repository + match stats_repository .increase_counter(&metric_name!(UDP_TRACKER_CORE_REQUESTS_RECEIVED_TOTAL), &label_set, now) - .await; + .await + { + Ok(()) => {} + Err(err) => tracing::error!("Failed to increase the counter: {}", err), + }; } } diff --git a/packages/udp-tracker-core/src/statistics/metrics.rs b/packages/udp-tracker-core/src/statistics/metrics.rs index 23cec8036..94aa7d08f 100644 --- a/packages/udp-tracker-core/src/statistics/metrics.rs +++ b/packages/udp-tracker-core/src/statistics/metrics.rs @@ -1,7 +1,7 @@ use serde::Serialize; use torrust_tracker_metrics::label::LabelSet; use torrust_tracker_metrics::metric::MetricName; -use torrust_tracker_metrics::metric_collection::MetricCollection; +use torrust_tracker_metrics::metric_collection::{Error, MetricCollection}; use torrust_tracker_primitives::DurationSinceUnixEpoch; /// Metrics collected by the tracker. @@ -37,11 +37,30 @@ pub struct Metrics { } impl Metrics { - pub fn increase_counter(&mut self, metric_name: &MetricName, labels: &LabelSet, now: DurationSinceUnixEpoch) { - self.metric_collection.increase_counter(metric_name, labels, now); + /// # Errors + /// + /// This function returns an error if the metric does not exist and it + /// cannot be created. + pub fn increase_counter( + &mut self, + metric_name: &MetricName, + labels: &LabelSet, + now: DurationSinceUnixEpoch, + ) -> Result<(), Error> { + self.metric_collection.increase_counter(metric_name, labels, now) } - pub fn set_gauge(&mut self, metric_name: &MetricName, labels: &LabelSet, value: f64, now: DurationSinceUnixEpoch) { - self.metric_collection.set_gauge(metric_name, labels, value, now); + /// # Errors + /// + /// This function returns an error if the metric does not exist and it + /// cannot be created. + pub fn set_gauge( + &mut self, + metric_name: &MetricName, + labels: &LabelSet, + value: f64, + now: DurationSinceUnixEpoch, + ) -> Result<(), Error> { + self.metric_collection.set_gauge(metric_name, labels, value, now) } } diff --git a/packages/udp-tracker-core/src/statistics/repository.rs b/packages/udp-tracker-core/src/statistics/repository.rs index 49c91c751..c68fa14f7 100644 --- a/packages/udp-tracker-core/src/statistics/repository.rs +++ b/packages/udp-tracker-core/src/statistics/repository.rs @@ -3,6 +3,7 @@ use std::sync::Arc; use tokio::sync::{RwLock, RwLockReadGuard}; use torrust_tracker_metrics::label::LabelSet; use torrust_tracker_metrics::metric::MetricName; +use torrust_tracker_metrics::metric_collection::Error; use torrust_tracker_primitives::DurationSinceUnixEpoch; use super::describe_metrics; @@ -68,9 +69,22 @@ impl Repository { drop(stats_lock); } - pub async fn increase_counter(&self, metric_name: &MetricName, labels: &LabelSet, now: DurationSinceUnixEpoch) { + /// # Errors + /// + /// This function will return an error if the metric collection fails to + /// increase the counter. + pub async fn increase_counter( + &self, + metric_name: &MetricName, + labels: &LabelSet, + now: DurationSinceUnixEpoch, + ) -> Result<(), Error> { let mut stats_lock = self.stats.write().await; - stats_lock.increase_counter(metric_name, labels, now); + + let result = stats_lock.increase_counter(metric_name, labels, now); + drop(stats_lock); + + result } } diff --git a/packages/udp-tracker-server/src/statistics/event/handler.rs b/packages/udp-tracker-server/src/statistics/event/handler.rs index 430bbc34c..092ce93f2 100644 --- a/packages/udp-tracker-server/src/statistics/event/handler.rs +++ b/packages/udp-tracker-server/src/statistics/event/handler.rs @@ -23,26 +23,34 @@ pub async fn handle_event(event: Event, stats_repository: &Repository, now: Dura stats_repository.increase_udp_requests_aborted().await; // Extendable metrics - stats_repository + match stats_repository .increase_counter( &metric_name!(UDP_TRACKER_SERVER_REQUESTS_ABORTED_TOTAL), &LabelSet::from(context), now, ) - .await; + .await + { + Ok(()) => {} + Err(err) => tracing::error!("Failed to increase the counter: {}", err), + }; } Event::UdpRequestBanned { context } => { // Global fixed metrics stats_repository.increase_udp_requests_banned().await; // Extendable metrics - stats_repository + match stats_repository .increase_counter( &metric_name!(UDP_TRACKER_SERVER_REQUESTS_BANNED_TOTAL), &LabelSet::from(context), now, ) - .await; + .await + { + Ok(()) => {} + Err(err) => tracing::error!("Failed to increase the counter: {}", err), + }; } Event::UdpRequestReceived { context } => { // Global fixed metrics @@ -56,13 +64,17 @@ pub async fn handle_event(event: Event, stats_repository: &Repository, now: Dura } // Extendable metrics - stats_repository + match stats_repository .increase_counter( &metric_name!(UDP_TRACKER_SERVER_REQUESTS_RECEIVED_TOTAL), &LabelSet::from(context), now, ) - .await; + .await + { + Ok(()) => {} + Err(err) => tracing::error!("Failed to increase the counter: {}", err), + }; } Event::UdpRequestAccepted { context, kind } => { // Global fixed metrics @@ -99,9 +111,13 @@ pub async fn handle_event(event: Event, stats_repository: &Repository, now: Dura label_set.upsert(label_name!("kind"), LabelValue::new(&kind.to_string())); - stats_repository + match stats_repository .increase_counter(&metric_name!(UDP_TRACKER_SERVER_REQUESTS_ACCEPTED_TOTAL), &label_set, now) - .await; + .await + { + Ok(()) => {} + Err(err) => tracing::error!("Failed to increase the counter: {}", err), + }; } Event::UdpResponseSent { context, @@ -130,14 +146,18 @@ pub async fn handle_event(event: Event, stats_repository: &Repository, now: Dura let mut label_set = LabelSet::from(context.clone()); label_set.upsert(label_name!("request_kind"), LabelValue::new(&req_kind.to_string())); - stats_repository + match stats_repository .set_gauge( &metric_name!(UDP_TRACKER_SERVER_PERFORMANCE_AVG_PROCESSING_TIME_NS), &label_set, new_avg, now, ) - .await; + .await + { + Ok(()) => {} + Err(err) => tracing::error!("Failed to set gauge: {}", err), + } (LabelValue::new("ok"), LabelValue::new(&UdpRequestKind::Connect.to_string())) } @@ -151,14 +171,18 @@ pub async fn handle_event(event: Event, stats_repository: &Repository, now: Dura let mut label_set = LabelSet::from(context.clone()); label_set.upsert(label_name!("request_kind"), LabelValue::new(&req_kind.to_string())); - stats_repository + match stats_repository .set_gauge( &metric_name!(UDP_TRACKER_SERVER_PERFORMANCE_AVG_PROCESSING_TIME_NS), &label_set, new_avg, now, ) - .await; + .await + { + Ok(()) => {} + Err(err) => tracing::error!("Failed to set gauge: {}", err), + } (LabelValue::new("ok"), LabelValue::new(&UdpRequestKind::Announce.to_string())) } @@ -172,14 +196,18 @@ pub async fn handle_event(event: Event, stats_repository: &Repository, now: Dura let mut label_set = LabelSet::from(context.clone()); label_set.upsert(label_name!("request_kind"), LabelValue::new(&req_kind.to_string())); - stats_repository + match stats_repository .set_gauge( &metric_name!(UDP_TRACKER_SERVER_PERFORMANCE_AVG_PROCESSING_TIME_NS), &label_set, new_avg, now, ) - .await; + .await + { + Ok(()) => {} + Err(err) => tracing::error!("Failed to set gauge: {}", err), + } (LabelValue::new("ok"), LabelValue::new(&UdpRequestKind::Scrape.to_string())) } @@ -196,9 +224,13 @@ pub async fn handle_event(event: Event, stats_repository: &Repository, now: Dura } label_set.upsert(label_name!("result"), result_label_value); - stats_repository + match stats_repository .increase_counter(&metric_name!(UDP_TRACKER_SERVER_RESPONSES_SENT_TOTAL), &label_set, now) - .await; + .await + { + Ok(()) => {} + Err(err) => tracing::error!("Failed to increase the counter: {}", err), + }; } Event::UdpError { context } => { // Global fixed metrics @@ -212,9 +244,13 @@ pub async fn handle_event(event: Event, stats_repository: &Repository, now: Dura } // Extendable metrics - stats_repository + match stats_repository .increase_counter(&metric_name!(UDP_TRACKER_SERVER_ERRORS_TOTAL), &LabelSet::from(context), now) - .await; + .await + { + Ok(()) => {} + Err(err) => tracing::error!("Failed to increase the counter: {}", err), + }; } } diff --git a/packages/udp-tracker-server/src/statistics/metrics.rs b/packages/udp-tracker-server/src/statistics/metrics.rs index 4fe07e7da..7b18f6418 100644 --- a/packages/udp-tracker-server/src/statistics/metrics.rs +++ b/packages/udp-tracker-server/src/statistics/metrics.rs @@ -1,7 +1,7 @@ use serde::Serialize; use torrust_tracker_metrics::label::LabelSet; use torrust_tracker_metrics::metric::MetricName; -use torrust_tracker_metrics::metric_collection::MetricCollection; +use torrust_tracker_metrics::metric_collection::{Error, MetricCollection}; use torrust_tracker_primitives::DurationSinceUnixEpoch; /// Metrics collected by the UDP tracker server. @@ -69,11 +69,28 @@ pub struct Metrics { } impl Metrics { - pub fn increase_counter(&mut self, metric_name: &MetricName, labels: &LabelSet, now: DurationSinceUnixEpoch) { - self.metric_collection.increase_counter(metric_name, labels, now); + /// # Errors + /// + /// Returns an error if the metric does not exist and it cannot be created. + pub fn increase_counter( + &mut self, + metric_name: &MetricName, + labels: &LabelSet, + now: DurationSinceUnixEpoch, + ) -> Result<(), Error> { + self.metric_collection.increase_counter(metric_name, labels, now) } - pub fn set_gauge(&mut self, metric_name: &MetricName, labels: &LabelSet, value: f64, now: DurationSinceUnixEpoch) { - self.metric_collection.set_gauge(metric_name, labels, value, now); + /// # Errors + /// + /// Returns an error if the metric does not exist and it cannot be created. + pub fn set_gauge( + &mut self, + metric_name: &MetricName, + labels: &LabelSet, + value: f64, + now: DurationSinceUnixEpoch, + ) -> Result<(), Error> { + self.metric_collection.set_gauge(metric_name, labels, value, now) } } diff --git a/packages/udp-tracker-server/src/statistics/repository.rs b/packages/udp-tracker-server/src/statistics/repository.rs index c33c1231c..1a1db89c7 100644 --- a/packages/udp-tracker-server/src/statistics/repository.rs +++ b/packages/udp-tracker-server/src/statistics/repository.rs @@ -4,6 +4,7 @@ use std::time::Duration; use tokio::sync::{RwLock, RwLockReadGuard}; use torrust_tracker_metrics::label::LabelSet; use torrust_tracker_metrics::metric::MetricName; +use torrust_tracker_metrics::metric_collection::Error; use torrust_tracker_primitives::DurationSinceUnixEpoch; use super::describe_metrics; @@ -181,15 +182,42 @@ impl Repository { drop(stats_lock); } - pub async fn increase_counter(&self, metric_name: &MetricName, labels: &LabelSet, now: DurationSinceUnixEpoch) { + /// # Errors + /// + /// This function will return an error if the metric collection fails to + /// increase the counter. + pub async fn increase_counter( + &self, + metric_name: &MetricName, + labels: &LabelSet, + now: DurationSinceUnixEpoch, + ) -> Result<(), Error> { let mut stats_lock = self.stats.write().await; - stats_lock.increase_counter(metric_name, labels, now); + + let result = stats_lock.increase_counter(metric_name, labels, now); + drop(stats_lock); + + result } - pub async fn set_gauge(&self, metric_name: &MetricName, labels: &LabelSet, value: f64, now: DurationSinceUnixEpoch) { + /// # Errors + /// + /// This function will return an error if the metric collection fails to + /// increase the counter. + pub async fn set_gauge( + &self, + metric_name: &MetricName, + labels: &LabelSet, + value: f64, + now: DurationSinceUnixEpoch, + ) -> Result<(), Error> { let mut stats_lock = self.stats.write().await; - stats_lock.set_gauge(metric_name, labels, value, now); + + let result = stats_lock.set_gauge(metric_name, labels, value, now); + drop(stats_lock); + + result } } From 2ccb247ff5e890708fdfd3d89ba3bac8c659b6d7 Mon Sep 17 00:00:00 2001 From: Jose Celano Date: Tue, 15 Apr 2025 16:44:52 +0100 Subject: [PATCH 463/802] refactor: [#1445] return errors instead of panicking in the SampleCollection struct --- packages/metrics/src/metric/mod.rs | 6 +-- packages/metrics/src/sample_collection.rs | 65 ++++++++++++----------- 2 files changed, 37 insertions(+), 34 deletions(-) diff --git a/packages/metrics/src/metric/mod.rs b/packages/metrics/src/metric/mod.rs index 777981fd8..ecce90f18 100644 --- a/packages/metrics/src/metric/mod.rs +++ b/packages/metrics/src/metric/mod.rs @@ -108,7 +108,7 @@ mod tests { let label_set: LabelSet = [(label_name!("server_binding_protocol"), LabelValue::new("http"))].into(); - let samples = SampleCollection::new(vec![Sample::new(Counter::new(1), time, label_set.clone())]); + let samples = SampleCollection::new(vec![Sample::new(Counter::new(1), time, label_set.clone())]).unwrap(); Metric::::new(name.clone(), samples) } @@ -154,7 +154,7 @@ mod tests { let label_set: LabelSet = [(label_name!("server_binding_protocol"), LabelValue::new("http"))].into(); - let samples = SampleCollection::new(vec![Sample::new(Counter::new(1), time, label_set.clone())]); + let samples = SampleCollection::new(vec![Sample::new(Counter::new(1), time, label_set.clone())]).unwrap(); let metric = Metric::::new(name.clone(), samples); @@ -188,7 +188,7 @@ mod tests { let label_set: LabelSet = [(label_name!("server_binding_protocol"), LabelValue::new("http"))].into(); - let samples = SampleCollection::new(vec![Sample::new(Gauge::new(1.0), time, label_set.clone())]); + let samples = SampleCollection::new(vec![Sample::new(Gauge::new(1.0), time, label_set.clone())]).unwrap(); let metric = Metric::::new(name.clone(), samples); diff --git a/packages/metrics/src/sample_collection.rs b/packages/metrics/src/sample_collection.rs index 436a4bc7d..49c839673 100644 --- a/packages/metrics/src/sample_collection.rs +++ b/packages/metrics/src/sample_collection.rs @@ -1,8 +1,8 @@ use std::collections::hash_map::Iter; -use std::collections::{HashMap, HashSet}; +use std::collections::HashMap; use std::fmt::Write as _; -use serde::{de, Deserialize, Deserializer, Serialize, Serializer}; +use serde::{Deserialize, Deserializer, Serialize, Serializer}; use torrust_tracker_primitives::DurationSinceUnixEpoch; use super::counter::Counter; @@ -18,23 +18,28 @@ pub struct SampleCollection { } impl SampleCollection { - /// # Panics + /// Creates a new `MetricKindCollection` from a vector of metrics /// - /// Panics if there are duplicate `LabelSets` in the provided samples. - #[must_use] - pub fn new(samples: Vec>) -> Self { + /// # Errors + /// + /// Returns an error if there are duplicate `LabelSets` in the provided + /// samples. + pub fn new(samples: Vec>) -> Result { let mut map: HashMap> = HashMap::with_capacity(samples.len()); for sample in samples { let (label_set, sample_data): (LabelSet, Measurement) = sample.into(); - assert!( - map.insert(label_set, sample_data).is_none(), - "Duplicate LabelSet found in SampleCollection" - ); + let label_set_clone = label_set.clone(); + + if let Some(_old_measurement) = map.insert(label_set, sample_data) { + return Err(Error::DuplicateLabelSetInList { + label_set: label_set_clone, + }); + } } - Self { samples: map } + Ok(Self { samples: map }) } #[must_use] @@ -59,6 +64,12 @@ impl SampleCollection { } } +#[derive(thiserror::Error, Debug, Clone)] +pub enum Error { + #[error("Found duplicate label set in list. Label set must be unique in a SampleCollection.")] + DuplicateLabelSetInList { label_set: LabelSet }, +} + impl SampleCollection { pub fn increment(&mut self, label_set: &LabelSet, time: DurationSinceUnixEpoch) { let sample = self @@ -104,20 +115,11 @@ where where D: Deserializer<'de>, { - // First deserialize into a temporary Vec let samples = Vec::>::deserialize(deserializer)?; - // Check for duplicate label sets - let mut seen_labels = HashSet::new(); + let sample_collection = SampleCollection::new(samples).map_err(serde::de::Error::custom)?; - for sample in &samples { - if !seen_labels.insert(sample.labels()) { - return Err(de::Error::custom(format!("Duplicate label set found: {}", sample.labels()))); - } - } - - // Convert to HashMap-based storage - Ok(SampleCollection::new(samples)) + Ok(sample_collection) } } @@ -149,14 +151,15 @@ mod tests { } #[test] - #[should_panic(expected = "Duplicate LabelSet found in SampleCollection")] fn it_should_fail_trying_to_create_a_sample_collection_with_duplicate_label_sets() { let samples = vec![ Sample::new(Counter::default(), sample_update_time(), LabelSet::default()), Sample::new(Counter::default(), sample_update_time(), LabelSet::default()), ]; - let _unused = SampleCollection::new(samples); + let result = SampleCollection::new(samples); + + assert!(result.is_err()); } #[test] @@ -165,7 +168,7 @@ mod tests { let sample = Sample::new(Counter::default(), sample_update_time(), label_set.clone()); - let collection = SampleCollection::new(vec![sample.clone()]); + let collection = SampleCollection::new(vec![sample.clone()]).unwrap(); let retrieved = collection.get(&label_set); @@ -180,7 +183,7 @@ mod tests { let sample_1 = Sample::new(Counter::new(1), sample_update_time(), label_set_1.clone()); let sample_2 = Sample::new(Counter::new(2), sample_update_time(), label_set_2.clone()); - let collection = SampleCollection::new(vec![sample_1.clone(), sample_2.clone()]); + let collection = SampleCollection::new(vec![sample_1.clone(), sample_2.clone()]).unwrap(); let retrieved = collection.get(&label_set_1); assert_eq!(retrieved.unwrap(), sample_1.measurement()); @@ -192,7 +195,7 @@ mod tests { #[test] fn it_should_return_the_number_of_samples_in_the_collection() { let samples = vec![Sample::new(Counter::default(), sample_update_time(), LabelSet::default())]; - let collection = SampleCollection::new(samples); + let collection = SampleCollection::new(samples).unwrap(); assert_eq!(collection.len(), 1); } @@ -208,14 +211,14 @@ mod tests { assert!(empty.is_empty()); let samples = vec![Sample::new(Counter::default(), sample_update_time(), LabelSet::default())]; - let collection = SampleCollection::new(samples); + let collection = SampleCollection::new(samples).unwrap(); assert!(!collection.is_empty()); } #[test] fn it_should_be_serializable_and_deserializable_for_json_format() { let sample = Sample::new(Counter::default(), sample_update_time(), LabelSet::default()); - let collection = SampleCollection::new(vec![sample]); + let collection = SampleCollection::new(vec![sample]).unwrap(); let serialized = serde_json::to_string(&collection).unwrap(); let deserialized: SampleCollection = serde_json::from_str(&serialized).unwrap(); @@ -240,7 +243,7 @@ mod tests { #[test] fn it_should_be_exportable_to_prometheus_format_when_empty() { let sample = Sample::new(Counter::default(), sample_update_time(), LabelSet::default()); - let collection = SampleCollection::new(vec![sample]); + let collection = SampleCollection::new(vec![sample]).unwrap(); let prometheus_output = collection.to_prometheus(); @@ -255,7 +258,7 @@ mod tests { LabelSet::from(vec![("labe_name_1", "label value value 1")]), ); - let collection = SampleCollection::new(vec![sample]); + let collection = SampleCollection::new(vec![sample]).unwrap(); let prometheus_output = collection.to_prometheus(); From 13ea09183a8b8eb6e632b63aa6a831efe5509917 Mon Sep 17 00:00:00 2001 From: Jose Celano Date: Mon, 21 Apr 2025 09:57:11 +0100 Subject: [PATCH 464/802] feat: [#1445] add logs to the event listener's initialization --- packages/http-tracker-core/src/statistics/keeper.rs | 9 ++++++++- packages/udp-tracker-core/src/statistics/keeper.rs | 9 ++++++++- packages/udp-tracker-server/src/statistics/keeper.rs | 9 ++++++++- 3 files changed, 24 insertions(+), 3 deletions(-) diff --git a/packages/http-tracker-core/src/statistics/keeper.rs b/packages/http-tracker-core/src/statistics/keeper.rs index 01a7a1569..1b69f032d 100644 --- a/packages/http-tracker-core/src/statistics/keeper.rs +++ b/packages/http-tracker-core/src/statistics/keeper.rs @@ -3,6 +3,7 @@ use tokio::sync::broadcast::Receiver; use super::event::listener::dispatch_events; use super::repository::Repository; use crate::event::Event; +use crate::HTTP_TRACKER_LOG_TARGET; /// The service responsible for keeping tracker metrics (listening to statistics events and handle them). /// @@ -29,7 +30,13 @@ impl Keeper { pub fn run_event_listener(&mut self, receiver: Receiver) { let stats_repository = self.repository.clone(); - tokio::spawn(async move { dispatch_events(receiver, stats_repository).await }); + tracing::info!(target: HTTP_TRACKER_LOG_TARGET, "Starting HTTP tracker core event listener"); + + tokio::spawn(async move { + dispatch_events(receiver, stats_repository).await; + + tracing::info!(target: HTTP_TRACKER_LOG_TARGET, "HTTP tracker core event listener finished"); + }); } } diff --git a/packages/udp-tracker-core/src/statistics/keeper.rs b/packages/udp-tracker-core/src/statistics/keeper.rs index 16ea51aac..d72dcb260 100644 --- a/packages/udp-tracker-core/src/statistics/keeper.rs +++ b/packages/udp-tracker-core/src/statistics/keeper.rs @@ -3,6 +3,7 @@ use tokio::sync::broadcast::Receiver; use super::event::listener::dispatch_events; use super::repository::Repository; use crate::event::Event; +use crate::UDP_TRACKER_LOG_TARGET; /// The service responsible for keeping tracker metrics (listening to statistics events and handle them). /// @@ -29,7 +30,13 @@ impl Keeper { pub fn run_event_listener(&mut self, receiver: Receiver) { let stats_repository = self.repository.clone(); - tokio::spawn(async move { dispatch_events(receiver, stats_repository).await }); + tracing::info!(target: UDP_TRACKER_LOG_TARGET, "Starting UDP tracker core event listener"); + + tokio::spawn(async move { + dispatch_events(receiver, stats_repository).await; + + tracing::info!(target: UDP_TRACKER_LOG_TARGET, "UDP tracker core event listener finished"); + }); } } diff --git a/packages/udp-tracker-server/src/statistics/keeper.rs b/packages/udp-tracker-server/src/statistics/keeper.rs index 62216ce88..c200b4cdf 100644 --- a/packages/udp-tracker-server/src/statistics/keeper.rs +++ b/packages/udp-tracker-server/src/statistics/keeper.rs @@ -1,3 +1,4 @@ +use bittorrent_udp_tracker_core::UDP_TRACKER_LOG_TARGET; use tokio::sync::broadcast::Receiver; use super::event::listener::dispatch_events; @@ -29,7 +30,13 @@ impl Keeper { pub fn run_event_listener(&mut self, receiver: Receiver) { let stats_repository = self.repository.clone(); - tokio::spawn(async move { dispatch_events(receiver, stats_repository).await }); + tracing::info!(target: UDP_TRACKER_LOG_TARGET, "Starting UDP tracker server event listener"); + + tokio::spawn(async move { + dispatch_events(receiver, stats_repository).await; + + tracing::info!(target: UDP_TRACKER_LOG_TARGET, "UDP tracker core server listener finished"); + }); } } From 482a1be9f46234db628b260b148cf5102d83f53d Mon Sep 17 00:00:00 2001 From: Jose Celano Date: Mon, 21 Apr 2025 10:24:50 +0100 Subject: [PATCH 465/802] feat: [#1445] add request kind label to total errors metric --- packages/metrics/src/label/value.rs | 6 ++++++ packages/udp-tracker-server/src/event/mod.rs | 1 + packages/udp-tracker-server/src/handlers/error.rs | 1 + .../src/statistics/event/handler.rs | 13 +++++++++++-- 4 files changed, 19 insertions(+), 2 deletions(-) diff --git a/packages/metrics/src/label/value.rs b/packages/metrics/src/label/value.rs index 528a0e2ab..ffdbce333 100644 --- a/packages/metrics/src/label/value.rs +++ b/packages/metrics/src/label/value.rs @@ -25,6 +25,12 @@ impl PrometheusSerializable for LabelValue { } } +impl From for LabelValue { + fn from(value: String) -> Self { + Self(value) + } +} + #[cfg(test)] mod tests { use crate::label::value::LabelValue; diff --git a/packages/udp-tracker-server/src/event/mod.rs b/packages/udp-tracker-server/src/event/mod.rs index 0236b26a9..a1770acc0 100644 --- a/packages/udp-tracker-server/src/event/mod.rs +++ b/packages/udp-tracker-server/src/event/mod.rs @@ -31,6 +31,7 @@ pub enum Event { }, UdpError { context: ConnectionContext, + kind: Option, }, } diff --git a/packages/udp-tracker-server/src/handlers/error.rs b/packages/udp-tracker-server/src/handlers/error.rs index 6a1bce51c..9d9ee8b1d 100644 --- a/packages/udp-tracker-server/src/handlers/error.rs +++ b/packages/udp-tracker-server/src/handlers/error.rs @@ -64,6 +64,7 @@ pub async fn handle_error( udp_server_stats_event_sender .send_event(Event::UdpError { context: ConnectionContext::new(client_socket_addr, server_service_binding), + kind: req_kind, }) .await; } diff --git a/packages/udp-tracker-server/src/statistics/event/handler.rs b/packages/udp-tracker-server/src/statistics/event/handler.rs index 092ce93f2..22253852c 100644 --- a/packages/udp-tracker-server/src/statistics/event/handler.rs +++ b/packages/udp-tracker-server/src/statistics/event/handler.rs @@ -232,7 +232,7 @@ pub async fn handle_event(event: Event, stats_repository: &Repository, now: Dura Err(err) => tracing::error!("Failed to increase the counter: {}", err), }; } - Event::UdpError { context } => { + Event::UdpError { context, kind } => { // Global fixed metrics match context.client_socket_addr().ip() { std::net::IpAddr::V4(_) => { @@ -244,8 +244,15 @@ pub async fn handle_event(event: Event, stats_repository: &Repository, now: Dura } // Extendable metrics + + let mut label_set = LabelSet::from(context); + + if let Some(kind) = kind { + label_set.upsert(label_name!("request_kind"), kind.to_string().into()); + } + match stats_repository - .increase_counter(&metric_name!(UDP_TRACKER_SERVER_ERRORS_TOTAL), &LabelSet::from(context), now) + .increase_counter(&metric_name!(UDP_TRACKER_SERVER_ERRORS_TOTAL), &label_set, now) .await { Ok(()) => {} @@ -510,6 +517,7 @@ mod tests { ) .unwrap(), ), + kind: None, }, &stats_repository, CurrentClock::now(), @@ -641,6 +649,7 @@ mod tests { ) .unwrap(), ), + kind: None, }, &stats_repository, CurrentClock::now(), From ad782eb3dd66dbd2f125aed4bf99b61ec464ee8a Mon Sep 17 00:00:00 2001 From: Jose Celano Date: Mon, 21 Apr 2025 10:25:26 +0100 Subject: [PATCH 466/802] refactor: [#1445] rename metric label --- packages/udp-tracker-server/src/statistics/event/handler.rs | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/packages/udp-tracker-server/src/statistics/event/handler.rs b/packages/udp-tracker-server/src/statistics/event/handler.rs index 22253852c..1e1502339 100644 --- a/packages/udp-tracker-server/src/statistics/event/handler.rs +++ b/packages/udp-tracker-server/src/statistics/event/handler.rs @@ -109,7 +109,7 @@ pub async fn handle_event(event: Event, stats_repository: &Repository, now: Dura let mut label_set = LabelSet::from(context); - label_set.upsert(label_name!("kind"), LabelValue::new(&kind.to_string())); + label_set.upsert(label_name!("request_kind"), LabelValue::new(&kind.to_string())); match stats_repository .increase_counter(&metric_name!(UDP_TRACKER_SERVER_REQUESTS_ACCEPTED_TOTAL), &label_set, now) From f9ad729f2899ddd351506adb240757b73bbaa309 Mon Sep 17 00:00:00 2001 From: Jose Celano Date: Mon, 21 Apr 2025 10:49:13 +0100 Subject: [PATCH 467/802] feat: [#1445] add logs for metrics initialization --- Cargo.lock | 1 + packages/metrics/Cargo.toml | 1 + packages/metrics/src/lib.rs | 2 ++ packages/metrics/src/metric_collection.rs | 7 +++++-- packages/metrics/src/unit.rs | 1 + 5 files changed, 10 insertions(+), 2 deletions(-) diff --git a/Cargo.lock b/Cargo.lock index e05894e3c..b72047f37 100644 --- a/Cargo.lock +++ b/Cargo.lock @@ -4791,6 +4791,7 @@ dependencies = [ "serde_json", "thiserror 2.0.12", "torrust-tracker-primitives", + "tracing", ] [[package]] diff --git a/packages/metrics/Cargo.toml b/packages/metrics/Cargo.toml index 6520cf244..0597785f4 100644 --- a/packages/metrics/Cargo.toml +++ b/packages/metrics/Cargo.toml @@ -21,6 +21,7 @@ serde = { version = "1", features = ["derive"] } serde_json = "1.0.140" thiserror = "2" torrust-tracker-primitives = { version = "3.0.0-develop", path = "../primitives" } +tracing = "0.1.41" [dev-dependencies] approx = "0.5.1" diff --git a/packages/metrics/src/lib.rs b/packages/metrics/src/lib.rs index fd677b891..95d70bf6c 100644 --- a/packages/metrics/src/lib.rs +++ b/packages/metrics/src/lib.rs @@ -8,6 +8,8 @@ pub mod sample; pub mod sample_collection; pub mod unit; +pub const METRICS_TARGET: &str = "METRICS"; + #[cfg(test)] mod tests { /// It removes leading and trailing whitespace from each line, and empty lines. diff --git a/packages/metrics/src/metric_collection.rs b/packages/metrics/src/metric_collection.rs index c719e6054..6a2a7735d 100644 --- a/packages/metrics/src/metric_collection.rs +++ b/packages/metrics/src/metric_collection.rs @@ -12,6 +12,7 @@ use super::prometheus::PrometheusSerializable; use crate::metric::description::MetricDescription; use crate::sample_collection::SampleCollection; use crate::unit::Unit; +use crate::METRICS_TARGET; // code-review: serialize in a deterministic order? For example: // - First the counter metrics ordered by name. @@ -56,7 +57,8 @@ impl MetricCollection { // Counter-specific methods - pub fn describe_counter(&mut self, name: &MetricName, _opt_unit: Option, _opt_description: Option) { + pub fn describe_counter(&mut self, name: &MetricName, opt_unit: Option, opt_description: Option) { + tracing::info!(target: METRICS_TARGET, type = "counter", name = name.to_string(), unit = ?opt_unit, description = ?opt_description); self.counters.ensure_metric_exists(name); } @@ -97,7 +99,8 @@ impl MetricCollection { // Gauge-specific methods - pub fn describe_gauge(&mut self, name: &MetricName, _opt_unit: Option, _opt_description: Option) { + pub fn describe_gauge(&mut self, name: &MetricName, opt_unit: Option, opt_description: Option) { + tracing::info!(target: METRICS_TARGET, type = "gauge", name = name.to_string(), unit = ?opt_unit, description = ?opt_description); self.gauges.ensure_metric_exists(name); } diff --git a/packages/metrics/src/unit.rs b/packages/metrics/src/unit.rs index b98e6836d..f7a528bed 100644 --- a/packages/metrics/src/unit.rs +++ b/packages/metrics/src/unit.rs @@ -4,6 +4,7 @@ //! The `Unit` enum is used to specify the unit of measurement for metrics. //! //! They were copied from the `metrics` crate, to allow future compatibility. +#[derive(Debug, Clone, Copy, PartialEq, Eq, Hash)] pub enum Unit { Count, Percent, From 4be6c974d08ca4fcd0ed0549dcdd1e8e60fa79aa Mon Sep 17 00:00:00 2001 From: Jose Celano Date: Tue, 22 Apr 2025 08:20:28 +0100 Subject: [PATCH 468/802] chore(deps): udpate dependencies ``` cargo update Updating crates.io index Locking 7 packages to latest compatible versions Updating brotli-decompressor v4.0.2 -> v4.0.3 Updating clap v4.5.36 -> v4.5.37 Updating clap_builder v4.5.36 -> v4.5.37 Updating libm v0.2.11 -> v0.2.12 Updating proc-macro2 v1.0.94 -> v1.0.95 Updating rand v0.9.0 -> v0.9.1 Updating signal-hook-registry v1.4.2 -> v1.4.5 ``` --- Cargo.lock | 43 +++++++++++++++++++++---------------------- 1 file changed, 21 insertions(+), 22 deletions(-) diff --git a/Cargo.lock b/Cargo.lock index b72047f37..370562982 100644 --- a/Cargo.lock +++ b/Cargo.lock @@ -664,7 +664,7 @@ dependencies = [ "r2d2", "r2d2_mysql", "r2d2_sqlite", - "rand 0.9.0", + "rand 0.9.1", "serde", "serde_json", "testcontainers", @@ -696,7 +696,7 @@ dependencies = [ "futures", "lazy_static", "mockall", - "rand 0.9.0", + "rand 0.9.1", "serde", "thiserror 2.0.12", "tokio", @@ -857,9 +857,9 @@ dependencies = [ [[package]] name = "brotli-decompressor" -version = "4.0.2" +version = "4.0.3" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "74fa05ad7d803d413eb8380983b092cbbaf9a85f151b871360e7b00cd7060b37" +checksum = "a334ef7c9e23abf0ce748e8cd309037da93e606ad52eb372e4ce327a0dcfbdfd" dependencies = [ "alloc-no-stdlib", "alloc-stdlib", @@ -1045,9 +1045,9 @@ dependencies = [ [[package]] name = "clap" -version = "4.5.36" +version = "4.5.37" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "2df961d8c8a0d08aa9945718ccf584145eee3f3aa06cddbeac12933781102e04" +checksum = "eccb054f56cbd38340b380d4a8e69ef1f02f1af43db2f0cc817a4774d80ae071" dependencies = [ "clap_builder", "clap_derive", @@ -1055,9 +1055,9 @@ dependencies = [ [[package]] name = "clap_builder" -version = "4.5.36" +version = "4.5.37" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "132dbda40fb6753878316a489d5a1242a8ef2f0d9e47ba01c951ea8aa7d013a5" +checksum = "efd9466fac8543255d3b1fcad4762c5e116ffe808c8a3043d4263cd4fd4862a2" dependencies = [ "anstream", "anstyle", @@ -2428,9 +2428,9 @@ dependencies = [ [[package]] name = "libm" -version = "0.2.11" +version = "0.2.12" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "8355be11b20d696c8f18f6cc018c4e372165b1fa8126cef092399c9951984ffa" +checksum = "e6d154aedcb0b7a1e91a3fddbe2a8350d3da76ac9d0220ae20da5c7aa8269612" [[package]] name = "libredox" @@ -3205,9 +3205,9 @@ dependencies = [ [[package]] name = "proc-macro2" -version = "1.0.94" +version = "1.0.95" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "a31971752e70b8b2686d7e46ec17fb38dad4051d94024c88df49b667caea9c84" +checksum = "02b3e5e68a3a1a02aad3ec490a98007cbc13c37cbe84a3cd7b8e406d76e7f778" dependencies = [ "unicode-ident", ] @@ -3322,13 +3322,12 @@ dependencies = [ [[package]] name = "rand" -version = "0.9.0" +version = "0.9.1" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "3779b94aeb87e8bd4e834cee3650289ee9e0d5677f976ecdb6d219e5f4f6cd94" +checksum = "9fbfd9d094a40bf3ae768db9361049ace4c0e04a4fd6b359518bd7b73a73dd97" dependencies = [ "rand_chacha 0.9.0", "rand_core 0.9.3", - "zerocopy 0.8.24", ] [[package]] @@ -3983,9 +3982,9 @@ checksum = "0fda2ff0d084019ba4d7c6f371c95d8fd75ce3524c3cb8fb653a3023f6323e64" [[package]] name = "signal-hook-registry" -version = "1.4.2" +version = "1.4.5" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "a9e9e0b4211b72e7b8b6e85c807d36c212bdb33ea8587f7569562a84df5465b1" +checksum = "9203b8055f63a2a00e2f593bb0510367fe707d7ff1e5c872de2f537b339e5410" dependencies = [ "libc", ] @@ -4550,7 +4549,7 @@ dependencies = [ "hyper", "local-ip-address", "percent-encoding", - "rand 0.9.0", + "rand 0.9.1", "reqwest", "serde", "serde_bencode", @@ -4685,7 +4684,7 @@ dependencies = [ "futures", "local-ip-address", "mockall", - "rand 0.9.0", + "rand 0.9.1", "regex", "reqwest", "serde", @@ -4816,7 +4815,7 @@ dependencies = [ name = "torrust-tracker-test-helpers" version = "3.0.0-develop" dependencies = [ - "rand 0.9.0", + "rand 0.9.1", "torrust-tracker-configuration", "tracing", "tracing-subscriber", @@ -4856,7 +4855,7 @@ dependencies = [ "futures-util", "local-ip-address", "mockall", - "rand 0.9.0", + "rand 0.9.1", "ringbuf", "serde", "thiserror 2.0.12", @@ -5099,7 +5098,7 @@ source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "458f7a779bf54acc9f347480ac654f68407d3aab21269a6e3c9f922acd9e2da9" dependencies = [ "getrandom 0.3.2", - "rand 0.9.0", + "rand 0.9.1", ] [[package]] From 53c869476813d9d729abc1b4df2ec842e6a99633 Mon Sep 17 00:00:00 2001 From: Jose Celano Date: Tue, 22 Apr 2025 10:14:24 +0100 Subject: [PATCH 469/802] fix: clippy errors --- packages/http-tracker-core/src/statistics/mod.rs | 2 +- packages/metrics/src/metric_collection.rs | 4 ++-- packages/udp-tracker-core/src/statistics/mod.rs | 2 +- packages/udp-tracker-server/src/statistics/mod.rs | 14 +++++++------- 4 files changed, 11 insertions(+), 11 deletions(-) diff --git a/packages/http-tracker-core/src/statistics/mod.rs b/packages/http-tracker-core/src/statistics/mod.rs index a5d6d37a5..d7a8da402 100644 --- a/packages/http-tracker-core/src/statistics/mod.rs +++ b/packages/http-tracker-core/src/statistics/mod.rs @@ -19,7 +19,7 @@ pub fn describe_metrics() -> Metrics { metrics.metric_collection.describe_counter( &metric_name!(HTTP_TRACKER_CORE_REQUESTS_RECEIVED_TOTAL), Some(Unit::Count), - Some(MetricDescription::new("Total number of HTTP requests received")), + Some(&MetricDescription::new("Total number of HTTP requests received")), ); metrics diff --git a/packages/metrics/src/metric_collection.rs b/packages/metrics/src/metric_collection.rs index 6a2a7735d..9e89c3c4b 100644 --- a/packages/metrics/src/metric_collection.rs +++ b/packages/metrics/src/metric_collection.rs @@ -57,7 +57,7 @@ impl MetricCollection { // Counter-specific methods - pub fn describe_counter(&mut self, name: &MetricName, opt_unit: Option, opt_description: Option) { + pub fn describe_counter(&mut self, name: &MetricName, opt_unit: Option, opt_description: Option<&MetricDescription>) { tracing::info!(target: METRICS_TARGET, type = "counter", name = name.to_string(), unit = ?opt_unit, description = ?opt_description); self.counters.ensure_metric_exists(name); } @@ -99,7 +99,7 @@ impl MetricCollection { // Gauge-specific methods - pub fn describe_gauge(&mut self, name: &MetricName, opt_unit: Option, opt_description: Option) { + pub fn describe_gauge(&mut self, name: &MetricName, opt_unit: Option, opt_description: Option<&MetricDescription>) { tracing::info!(target: METRICS_TARGET, type = "gauge", name = name.to_string(), unit = ?opt_unit, description = ?opt_description); self.gauges.ensure_metric_exists(name); } diff --git a/packages/udp-tracker-core/src/statistics/mod.rs b/packages/udp-tracker-core/src/statistics/mod.rs index 40a30f51b..ec37deae7 100644 --- a/packages/udp-tracker-core/src/statistics/mod.rs +++ b/packages/udp-tracker-core/src/statistics/mod.rs @@ -19,7 +19,7 @@ pub fn describe_metrics() -> Metrics { metrics.metric_collection.describe_counter( &metric_name!(UDP_TRACKER_CORE_REQUESTS_RECEIVED_TOTAL), Some(Unit::Count), - Some(MetricDescription::new("Total number of UDP requests received")), + Some(&MetricDescription::new("Total number of UDP requests received")), ); metrics diff --git a/packages/udp-tracker-server/src/statistics/mod.rs b/packages/udp-tracker-server/src/statistics/mod.rs index 4eea13224..45c696fdb 100644 --- a/packages/udp-tracker-server/src/statistics/mod.rs +++ b/packages/udp-tracker-server/src/statistics/mod.rs @@ -25,43 +25,43 @@ pub fn describe_metrics() -> Metrics { metrics.metric_collection.describe_counter( &metric_name!(UDP_TRACKER_SERVER_REQUESTS_ABORTED_TOTAL), Some(Unit::Count), - Some(MetricDescription::new("Total number of UDP requests aborted")), + Some(&MetricDescription::new("Total number of UDP requests aborted")), ); metrics.metric_collection.describe_counter( &metric_name!(UDP_TRACKER_SERVER_REQUESTS_BANNED_TOTAL), Some(Unit::Count), - Some(MetricDescription::new("Total number of UDP requests banned")), + Some(&MetricDescription::new("Total number of UDP requests banned")), ); metrics.metric_collection.describe_counter( &metric_name!(UDP_TRACKER_SERVER_REQUESTS_RECEIVED_TOTAL), Some(Unit::Count), - Some(MetricDescription::new("Total number of UDP requests received")), + Some(&MetricDescription::new("Total number of UDP requests received")), ); metrics.metric_collection.describe_counter( &metric_name!(UDP_TRACKER_SERVER_REQUESTS_ACCEPTED_TOTAL), Some(Unit::Count), - Some(MetricDescription::new("Total number of UDP requests accepted")), + Some(&MetricDescription::new("Total number of UDP requests accepted")), ); metrics.metric_collection.describe_counter( &metric_name!(UDP_TRACKER_SERVER_RESPONSES_SENT_TOTAL), Some(Unit::Count), - Some(MetricDescription::new("Total number of UDP responses sent")), + Some(&MetricDescription::new("Total number of UDP responses sent")), ); metrics.metric_collection.describe_counter( &metric_name!(UDP_TRACKER_SERVER_ERRORS_TOTAL), Some(Unit::Count), - Some(MetricDescription::new("Total number of errors processing UDP requests")), + Some(&MetricDescription::new("Total number of errors processing UDP requests")), ); metrics.metric_collection.describe_gauge( &metric_name!(UDP_TRACKER_SERVER_PERFORMANCE_AVG_PROCESSING_TIME_NS), Some(Unit::Nanoseconds), - Some(MetricDescription::new( + Some(&MetricDescription::new( "Average time to process a UDP connect request in nanoseconds", )), ); From a67e137f73e3c2b7b9c6eef32a6001a3a6017c3e Mon Sep 17 00:00:00 2001 From: Jose Celano Date: Tue, 22 Apr 2025 12:00:15 +0100 Subject: [PATCH 470/802] feat: [#1376] add peer info to TcpAnnounce event --- packages/http-tracker-core/src/event/mod.rs | 18 +- packages/http-tracker-core/src/lib.rs | 29 +++ .../src/services/announce.rs | 213 ++++++++++++------ .../src/statistics/event/handler.rs | 9 +- 4 files changed, 200 insertions(+), 69 deletions(-) diff --git a/packages/http-tracker-core/src/event/mod.rs b/packages/http-tracker-core/src/event/mod.rs index ae997156a..0490fe2be 100644 --- a/packages/http-tracker-core/src/event/mod.rs +++ b/packages/http-tracker-core/src/event/mod.rs @@ -2,6 +2,7 @@ use std::net::{IpAddr, SocketAddr}; use torrust_tracker_metrics::label::{LabelSet, LabelValue}; use torrust_tracker_metrics::label_name; +use torrust_tracker_primitives::peer::Peer; use torrust_tracker_primitives::service_binding::ServiceBinding; pub mod sender; @@ -9,8 +10,21 @@ pub mod sender; /// A HTTP core event. #[derive(Debug, PartialEq, Eq, Clone)] pub enum Event { - TcpAnnounce { connection: ConnectionContext }, - TcpScrape { connection: ConnectionContext }, + TcpAnnounce { + connection: ConnectionContext, + + /// The peer that is announcing itself to the tracker. + announced_peer: Peer, + + /// The peer that is added to the tracker. + /// + /// It might not be the same as the `announced_peer` because the tracker + /// can change the peer's IP address. + added_peer: Peer, + }, + TcpScrape { + connection: ConnectionContext, + }, } #[derive(Debug, PartialEq, Eq, Clone)] diff --git a/packages/http-tracker-core/src/lib.rs b/packages/http-tracker-core/src/lib.rs index c4f131bcb..1692a68fa 100644 --- a/packages/http-tracker-core/src/lib.rs +++ b/packages/http-tracker-core/src/lib.rs @@ -20,7 +20,11 @@ pub const HTTP_TRACKER_LOG_TARGET: &str = "HTTP TRACKER"; #[cfg(test)] pub(crate) mod tests { + use std::net::{IpAddr, Ipv4Addr, Ipv6Addr, SocketAddr}; + + use aquatic_udp_protocol::{AnnounceEvent, NumberOfBytes, PeerId}; use bittorrent_primitives::info_hash::InfoHash; + use torrust_tracker_primitives::{peer, DurationSinceUnixEpoch}; /// # Panics /// @@ -31,4 +35,29 @@ pub(crate) mod tests { .parse::() .expect("String should be a valid info hash") } + + pub fn sample_peer_using_ipv4() -> peer::Peer { + sample_peer() + } + + pub fn sample_peer_using_ipv6() -> peer::Peer { + let mut peer = sample_peer(); + peer.peer_addr = SocketAddr::new( + IpAddr::V6(Ipv6Addr::new(0x6969, 0x6969, 0x6969, 0x6969, 0x6969, 0x6969, 0x6969, 0x6969)), + 8080, + ); + peer + } + + pub fn sample_peer() -> peer::Peer { + peer::Peer { + peer_id: PeerId(*b"-qB00000000000000000"), + peer_addr: SocketAddr::new(IpAddr::V4(Ipv4Addr::new(126, 0, 0, 1)), 8080), + updated: DurationSinceUnixEpoch::new(1_669_397_478_934, 0), + uploaded: NumberOfBytes::new(0), + downloaded: NumberOfBytes::new(0), + left: NumberOfBytes::new(0), + event: AnnounceEvent::Started, + } + } } diff --git a/packages/http-tracker-core/src/services/announce.rs b/packages/http-tracker-core/src/services/announce.rs index c249cb4db..3ac4de1b9 100644 --- a/packages/http-tracker-core/src/services/announce.rs +++ b/packages/http-tracker-core/src/services/announce.rs @@ -21,6 +21,7 @@ use bittorrent_tracker_core::error::{AnnounceError, TrackerCoreError, WhitelistE use bittorrent_tracker_core::whitelist; use torrust_tracker_configuration::Core; use torrust_tracker_primitives::core::AnnounceData; +use torrust_tracker_primitives::peer::Peer; use torrust_tracker_primitives::service_binding::ServiceBinding; use crate::event; @@ -81,6 +82,8 @@ impl AnnounceService { let mut peer = peer_from_request(announce_request, &remote_client_ip); + let announced_peer = peer; + let peers_wanted = Self::peers_wanted(announce_request); let announce_data = self @@ -88,8 +91,16 @@ impl AnnounceService { .announce(&announce_request.info_hash, &mut peer, &remote_client_ip, &peers_wanted) .await?; - self.send_event(remote_client_ip, opt_remote_client_port, server_service_binding.clone()) - .await; + let added_peer = peer; + + self.send_event( + remote_client_ip, + opt_remote_client_port, + server_service_binding.clone(), + announced_peer, + added_peer, + ) + .await; Ok(announce_data) } @@ -139,13 +150,24 @@ impl AnnounceService { } } - async fn send_event(&self, peer_ip: IpAddr, opt_peer_ip_port: Option, server_service_binding: ServiceBinding) { + async fn send_event( + &self, + peer_ip: IpAddr, + opt_peer_ip_port: Option, + server_service_binding: ServiceBinding, + announced_peer: Peer, + added_peer: Peer, + ) { if let Some(http_stats_event_sender) = self.opt_http_stats_event_sender.as_deref() { - http_stats_event_sender - .send_event(Event::TcpAnnounce { - connection: event::ConnectionContext::new(peer_ip, opt_peer_ip_port, server_service_binding), - }) - .await; + let event = Event::TcpAnnounce { + connection: event::ConnectionContext::new(peer_ip, opt_peer_ip_port, server_service_binding), + announced_peer, + added_peer, + }; + + tracing::debug!("Sending TcpAnnounce event: {:?}", event); + + http_stats_event_sender.send_event(event).await; } } } @@ -202,10 +224,9 @@ impl From for HttpAnnounceError { #[cfg(test)] mod tests { - use std::net::{IpAddr, Ipv4Addr, Ipv6Addr, SocketAddr}; + use std::net::SocketAddr; use std::sync::Arc; - use aquatic_udp_protocol::{AnnounceEvent, NumberOfBytes, PeerId}; use bittorrent_http_tracker_protocol::v1::requests::announce::Announce; use bittorrent_http_tracker_protocol::v1::services::peer_ip_resolver::ClientIpSources; use bittorrent_tracker_core::announce_handler::AnnounceHandler; @@ -218,7 +239,6 @@ mod tests { use bittorrent_tracker_core::whitelist::repository::in_memory::InMemoryWhitelist; use torrust_tracker_configuration::{Configuration, Core}; use torrust_tracker_primitives::peer::Peer; - use torrust_tracker_primitives::{peer, DurationSinceUnixEpoch}; use torrust_tracker_test_helpers::configuration; struct CoreTrackerServices { @@ -269,31 +289,6 @@ mod tests { ) } - fn sample_peer_using_ipv4() -> peer::Peer { - sample_peer() - } - - fn sample_peer_using_ipv6() -> peer::Peer { - let mut peer = sample_peer(); - peer.peer_addr = SocketAddr::new( - IpAddr::V6(Ipv6Addr::new(0x6969, 0x6969, 0x6969, 0x6969, 0x6969, 0x6969, 0x6969, 0x6969)), - 8080, - ); - peer - } - - fn sample_peer() -> peer::Peer { - peer::Peer { - peer_id: PeerId(*b"-qB00000000000000000"), - peer_addr: SocketAddr::new(IpAddr::V4(Ipv4Addr::new(126, 0, 0, 1)), 8080), - updated: DurationSinceUnixEpoch::new(1_669_397_478_934, 0), - uploaded: NumberOfBytes::new(0), - downloaded: NumberOfBytes::new(0), - left: NumberOfBytes::new(0), - event: AnnounceEvent::Started, - } - } - fn sample_announce_request_for_peer(peer: Peer) -> (Announce, ClientIpSources) { let announce_request = Announce { info_hash: sample_info_hash(), @@ -335,7 +330,7 @@ mod tests { use std::net::{IpAddr, Ipv4Addr, Ipv6Addr, SocketAddr}; use std::sync::Arc; - use mockall::predicate::eq; + use mockall::predicate::{self}; use torrust_tracker_configuration::Configuration; use torrust_tracker_primitives::core::AnnounceData; use torrust_tracker_primitives::peer; @@ -343,14 +338,14 @@ mod tests { use torrust_tracker_primitives::swarm_metadata::SwarmMetadata; use torrust_tracker_test_helpers::configuration; - use super::{sample_peer_using_ipv4, sample_peer_using_ipv6}; use crate::event; use crate::event::{ConnectionContext, Event}; use crate::services::announce::tests::{ initialize_core_tracker_services, initialize_core_tracker_services_with_config, sample_announce_request_for_peer, - sample_peer, MockHttpStatsEventSender, + MockHttpStatsEventSender, }; use crate::services::announce::AnnounceService; + use crate::tests::{sample_peer, sample_peer_using_ipv4, sample_peer_using_ipv6}; #[tokio::test] async fn it_should_return_the_announce_data() { @@ -393,16 +388,46 @@ mod tests { async fn it_should_send_the_tcp_4_announce_event_when_the_peer_uses_ipv4() { let server_socket_addr = SocketAddr::new(IpAddr::V4(Ipv4Addr::new(127, 0, 0, 1)), 7070); let server_service_binding = ServiceBinding::new(Protocol::HTTP, server_socket_addr).unwrap(); + let peer = sample_peer_using_ipv4(); + + let server_service_binding_clone = server_service_binding.clone(); + let peer_copy = peer; let mut http_stats_event_sender_mock = MockHttpStatsEventSender::new(); http_stats_event_sender_mock .expect_send_event() - .with(eq(Event::TcpAnnounce { - connection: ConnectionContext::new( - IpAddr::V4(Ipv4Addr::new(126, 0, 0, 1)), - Some(8080), - server_service_binding.clone(), - ), + .with(predicate::function(move |event| { + let mut announced_peer = peer_copy; + announced_peer.peer_addr = SocketAddr::new(IpAddr::V4(Ipv4Addr::new(126, 0, 0, 1)), 8080); + + let mut added_peer = peer; + added_peer.peer_addr = SocketAddr::new(IpAddr::V4(Ipv4Addr::new(126, 0, 0, 1)), 8080); + + let expected_event = Event::TcpAnnounce { + connection: ConnectionContext::new( + IpAddr::V4(Ipv4Addr::new(126, 0, 0, 1)), + Some(8080), + server_service_binding.clone(), + ), + announced_peer, + added_peer, + }; + + match (event, expected_event) { + ( + Event::TcpAnnounce { + connection: a_conn, + announced_peer: a1, + added_peer: a2, + }, + Event::TcpAnnounce { + connection: b_conn, + announced_peer: b1, + added_peer: b2, + }, + ) => *a_conn == b_conn && a1.peer_addr == b1.peer_addr && a2.peer_addr == b2.peer_addr, + _ => false, + } })) .times(1) .returning(|_| Box::pin(future::ready(Some(Ok(1))))); @@ -413,8 +438,6 @@ mod tests { core_http_tracker_services.http_stats_event_sender = http_stats_event_sender; - let peer = sample_peer_using_ipv4(); - let (announce_request, client_ip_sources) = sample_announce_request_for_peer(peer); let announce_service = AnnounceService::new( @@ -426,7 +449,7 @@ mod tests { ); let _announce_data = announce_service - .handle_announce(&announce_request, &client_ip_sources, &server_service_binding, None) + .handle_announce(&announce_request, &client_ip_sources, &server_service_binding_clone, None) .await .unwrap(); } @@ -453,20 +476,53 @@ mod tests { let server_socket_addr = SocketAddr::new(IpAddr::V4(Ipv4Addr::new(127, 0, 0, 1)), 7070); let server_service_binding = ServiceBinding::new(Protocol::HTTP, server_socket_addr).unwrap(); + let peer = peer_with_the_ipv4_loopback_ip(); + + let server_service_binding_clone = server_service_binding.clone(); + let peer_copy = peer; - // Assert that the event sent is a TCP4 event let mut http_stats_event_sender_mock = MockHttpStatsEventSender::new(); http_stats_event_sender_mock .expect_send_event() - .with(eq(Event::TcpAnnounce { - connection: ConnectionContext::new( - IpAddr::V4(Ipv4Addr::new(127, 0, 0, 1)), - Some(8080), - server_service_binding.clone(), - ), + .with(predicate::function(move |event| { + let mut announced_peer = peer_copy; + announced_peer.peer_addr = SocketAddr::new(IpAddr::V4(Ipv4Addr::new(127, 0, 0, 1)), 8080); + + let mut added_peer = peer; + added_peer.peer_addr = SocketAddr::new( + IpAddr::V6(Ipv6Addr::new(0x6969, 0x6969, 0x6969, 0x6969, 0x6969, 0x6969, 0x6969, 0x6969)), + 8080, + ); + + let expected_event = Event::TcpAnnounce { + connection: ConnectionContext::new( + IpAddr::V4(Ipv4Addr::new(127, 0, 0, 1)), + Some(8080), + server_service_binding.clone(), + ), + announced_peer, + added_peer, + }; + + match (event, expected_event) { + ( + Event::TcpAnnounce { + connection: a_conn, + announced_peer: a1, + added_peer: a2, + }, + Event::TcpAnnounce { + connection: b_conn, + announced_peer: b1, + added_peer: b2, + }, + ) => *a_conn == b_conn && a1.peer_addr == b1.peer_addr && a2.peer_addr == b2.peer_addr, + _ => false, + } })) .times(1) .returning(|_| Box::pin(future::ready(Some(Ok(1))))); + let http_stats_event_sender: Arc>> = Arc::new(Some(Box::new(http_stats_event_sender_mock))); @@ -475,8 +531,6 @@ mod tests { core_http_tracker_services.http_stats_event_sender = http_stats_event_sender; - let peer = peer_with_the_ipv4_loopback_ip(); - let (announce_request, client_ip_sources) = sample_announce_request_for_peer(peer); let announce_service = AnnounceService::new( @@ -488,7 +542,7 @@ mod tests { ); let _announce_data = announce_service - .handle_announce(&announce_request, &client_ip_sources, &server_service_binding, None) + .handle_announce(&announce_request, &client_ip_sources, &server_service_binding_clone, None) .await .unwrap(); } @@ -498,16 +552,45 @@ mod tests { { let server_socket_addr = SocketAddr::new(IpAddr::V4(Ipv4Addr::new(127, 0, 0, 1)), 7070); let server_service_binding = ServiceBinding::new(Protocol::HTTP, server_socket_addr).unwrap(); + let peer = sample_peer_using_ipv6(); + + let peer_copy = peer; let mut http_stats_event_sender_mock = MockHttpStatsEventSender::new(); http_stats_event_sender_mock .expect_send_event() - .with(eq(Event::TcpAnnounce { - connection: ConnectionContext::new( - IpAddr::V6(Ipv6Addr::new(0x6969, 0x6969, 0x6969, 0x6969, 0x6969, 0x6969, 0x6969, 0x6969)), - Some(8080), - server_service_binding, - ), + .with(predicate::function(move |event| { + let announced_peer = peer_copy; + //announced_peer.peer_addr = SocketAddr::new(IpAddr::V4(Ipv4Addr::new(126, 0, 0, 1)), 8080); + + let added_peer = peer; + //added_peer.peer_addr = SocketAddr::new(IpAddr::V4(Ipv4Addr::new(126, 0, 0, 1)), 8080); + + let expected_event = Event::TcpAnnounce { + connection: ConnectionContext::new( + IpAddr::V6(Ipv6Addr::new(0x6969, 0x6969, 0x6969, 0x6969, 0x6969, 0x6969, 0x6969, 0x6969)), + Some(8080), + server_service_binding.clone(), + ), + announced_peer, + added_peer, + }; + + match (event, expected_event) { + ( + Event::TcpAnnounce { + connection: a_conn, + announced_peer: a1, + added_peer: a2, + }, + Event::TcpAnnounce { + connection: b_conn, + announced_peer: b1, + added_peer: b2, + }, + ) => *a_conn == b_conn && a1.peer_addr == b1.peer_addr && a2.peer_addr == b2.peer_addr, + _ => false, + } })) .times(1) .returning(|_| Box::pin(future::ready(Some(Ok(1))))); @@ -517,8 +600,6 @@ mod tests { let (core_tracker_services, mut core_http_tracker_services) = initialize_core_tracker_services(); core_http_tracker_services.http_stats_event_sender = http_stats_event_sender; - let peer = sample_peer_using_ipv6(); - let (announce_request, client_ip_sources) = sample_announce_request_for_peer(peer); let announce_service = AnnounceService::new( diff --git a/packages/http-tracker-core/src/statistics/event/handler.rs b/packages/http-tracker-core/src/statistics/event/handler.rs index 182c86b01..b24ddfde6 100644 --- a/packages/http-tracker-core/src/statistics/event/handler.rs +++ b/packages/http-tracker-core/src/statistics/event/handler.rs @@ -14,7 +14,7 @@ use crate::statistics::HTTP_TRACKER_CORE_REQUESTS_RECEIVED_TOTAL; /// version of the event. pub async fn handle_event(event: Event, stats_repository: &Repository, now: DurationSinceUnixEpoch) { match event { - Event::TcpAnnounce { connection } => { + Event::TcpAnnounce { connection, .. } => { // Global fixed metrics match connection.client_ip_addr() { @@ -79,11 +79,13 @@ mod tests { use crate::event::{ConnectionContext, Event}; use crate::statistics::event::handler::handle_event; use crate::statistics::repository::Repository; + use crate::tests::{sample_peer_using_ipv4, sample_peer_using_ipv6}; use crate::CurrentClock; #[tokio::test] async fn should_increase_the_tcp4_announces_counter_when_it_receives_a_tcp4_announce_event() { let stats_repository = Repository::new(); + let peer = sample_peer_using_ipv4(); handle_event( Event::TcpAnnounce { @@ -92,6 +94,8 @@ mod tests { Some(8080), ServiceBinding::new(Protocol::HTTP, SocketAddr::new(IpAddr::V4(Ipv4Addr::new(127, 0, 0, 1)), 7070)).unwrap(), ), + announced_peer: peer, + added_peer: peer, }, &stats_repository, CurrentClock::now(), @@ -128,6 +132,7 @@ mod tests { #[tokio::test] async fn should_increase_the_tcp6_announces_counter_when_it_receives_a_tcp6_announce_event() { let stats_repository = Repository::new(); + let peer = sample_peer_using_ipv6(); handle_event( Event::TcpAnnounce { @@ -136,6 +141,8 @@ mod tests { Some(8080), ServiceBinding::new(Protocol::HTTP, SocketAddr::new(IpAddr::V4(Ipv4Addr::new(127, 0, 0, 1)), 7070)).unwrap(), ), + announced_peer: peer, + added_peer: peer, }, &stats_repository, CurrentClock::now(), From 5d479b79030050aad0692af80be2212197c8f195 Mon Sep 17 00:00:00 2001 From: Jose Celano Date: Tue, 22 Apr 2025 12:05:25 +0100 Subject: [PATCH 471/802] chore: add logs for sending TcpScrape event --- packages/http-tracker-core/src/services/scrape.rs | 12 +++++++----- 1 file changed, 7 insertions(+), 5 deletions(-) diff --git a/packages/http-tracker-core/src/services/scrape.rs b/packages/http-tracker-core/src/services/scrape.rs index baa406e63..072c76bb7 100644 --- a/packages/http-tracker-core/src/services/scrape.rs +++ b/packages/http-tracker-core/src/services/scrape.rs @@ -126,11 +126,13 @@ impl ScrapeService { server_service_binding: ServiceBinding, ) { if let Some(http_stats_event_sender) = self.opt_http_stats_event_sender.as_deref() { - http_stats_event_sender - .send_event(Event::TcpScrape { - connection: ConnectionContext::new(original_peer_ip, opt_original_peer_port, server_service_binding), - }) - .await; + let event = Event::TcpScrape { + connection: ConnectionContext::new(original_peer_ip, opt_original_peer_port, server_service_binding), + }; + + tracing::debug!("Sending TcpScrape event: {:?}", event); + + http_stats_event_sender.send_event(event).await; } } } From 4566ad58ee42ce0d6d56657a335ee98d4ef86b8b Mon Sep 17 00:00:00 2001 From: Jose Celano Date: Tue, 22 Apr 2025 12:13:31 +0100 Subject: [PATCH 472/802] test: add test for Peer comparison --- packages/primitives/src/peer.rs | 16 ++++++++++++++++ 1 file changed, 16 insertions(+) diff --git a/packages/primitives/src/peer.rs b/packages/primitives/src/peer.rs index c8ff1791d..20fc4bcb4 100644 --- a/packages/primitives/src/peer.rs +++ b/packages/primitives/src/peer.rs @@ -513,6 +513,22 @@ pub mod fixture { #[cfg(test)] pub mod test { + + mod peer { + use crate::peer::fixture::PeerBuilder; + + #[test] + fn should_be_comparable() { + let seeder1 = PeerBuilder::seeder().build(); + let seeder2 = PeerBuilder::seeder().build(); + + let leecher1 = PeerBuilder::leecher().build(); + + assert!(seeder1 == seeder2); + assert!(seeder1 != leecher1); + } + } + mod torrent_peer_id { use aquatic_udp_protocol::PeerId; From 92f049e786dac3999aa2d15a18ee9d1fae4ba1ba Mon Sep 17 00:00:00 2001 From: Jose Celano Date: Tue, 22 Apr 2025 12:24:42 +0100 Subject: [PATCH 473/802] tests: add tests for HTTP tracker core events comparison This test has been added becuase this code was not working: ```rust let mut announced_peer = peer_copy; announced_peer.peer_addr = SocketAddr::new(IpAddr::V4(Ipv4Addr::new(127, 0, 0, 1)), 8080); let mut added_peer = peer; added_peer.peer_addr = SocketAddr::new( IpAddr::V6(Ipv6Addr::new(0x6969, 0x6969, 0x6969, 0x6969, 0x6969, 0x6969, 0x6969, 0x6969)), 8080, ); let mut http_stats_event_sender_mock = MockHttpStatsEventSender::new(); http_stats_event_sender_mock .expect_send_event() .with(eq(Event::TcpAnnounce { connection: ConnectionContext::new( IpAddr::V4(Ipv4Addr::new(127, 0, 0, 1)), Some(8080), server_service_binding, ), announced_peer: peer, added_peer: peer, })) .times(1) .returning(|_| Box::pin(future::ready(Some(Ok(1))))); ``` using the same events: Event sent: TcpAnnounce { connection: ConnectionContext { client: ClientConnectionContext { ip_addr: 127.0.0.1, port: Some(8080) }, server: ServerConnectionContext { service_binding: ServiceBinding { protocol: HTTP, bind_address: 127.0.0.1:7070 } } }, announced_peer: Peer { peer_id: PeerId([45, 113, 66, 48, 48, 48, 48, 48, 48, 48, 48, 48, 48, 48, 48, 48, 48, 48, 48, 48]), peer_addr: 127.0.0.1:8080, updated: 1745316858.487824645s, uploaded: NumberOfBytes(I64(0)), downloaded: NumberOfBytes(I64(0)), left: NumberOfBytes(I64(0)), event: Started }, added_peer: Peer { peer_id: PeerId([45, 113, 66, 48, 48, 48, 48, 48, 48, 48, 48, 48, 48, 48, 48, 48, 48, 48, 48, 48]), peer_addr: [6969:6969:6969:6969:6969:6969:6969:6969]:8080, updated: 1745316858.487824645s, uploaded: NumberOfBytes(I64(0)), downloaded: NumberOfBytes(I64(0)), left: NumberOfBytes(I64(0)), event: Started } } Event expected in the mock: TcpAnnounce { connection: ConnectionContext { client: ClientConnectionContext { ip_addr: 127.0.0.1, port: Some(8080) }, server: ServerConnectionContext { service_binding: ServiceBinding { protocol: HTTP, bind_address: 127.0.0.1:7070 } } }, announced_peer: Peer { peer_id: PeerId([45, 113, 66, 48, 48, 48, 48, 48, 48, 48, 48, 48, 48, 48, 48, 48, 48, 48, 48, 48]), peer_addr: 127.0.0.1:8080, updated: 1745316858.487824645s, uploaded: NumberOfBytes(I64(0)), downloaded: NumberOfBytes(I64(0)), left: NumberOfBytes(I64(0)), event: Started }, added_peer: Peer { peer_id: PeerId([45, 113, 66, 48, 48, 48, 48, 48, 48, 48, 48, 48, 48, 48, 48, 48, 48, 48, 48, 48]), peer_addr: [6969:6969:6969:6969:6969:6969:6969:6969]:8080, updated: 1745316858.487824645s, uploaded: NumberOfBytes(I64(0)), downloaded: NumberOfBytes(I64(0)), left: NumberOfBytes(I64(0)), event: Started } } That's one of the reasons why the expectation was changed. The other reason is the only relevant part for the peer in the test is the updated peer address. --- packages/http-tracker-core/src/event/mod.rs | 41 +++++++++++++++++++++ 1 file changed, 41 insertions(+) diff --git a/packages/http-tracker-core/src/event/mod.rs b/packages/http-tracker-core/src/event/mod.rs index 0490fe2be..2eac2b9d6 100644 --- a/packages/http-tracker-core/src/event/mod.rs +++ b/packages/http-tracker-core/src/event/mod.rs @@ -94,3 +94,44 @@ impl From for LabelSet { ]) } } + +#[cfg(test)] +pub mod test { + + use torrust_tracker_primitives::peer::Peer; + use torrust_tracker_primitives::service_binding::Protocol; + + #[test] + fn events_should_be_comparable() { + use std::net::{IpAddr, Ipv4Addr, SocketAddr}; + + use torrust_tracker_primitives::service_binding::ServiceBinding; + + use crate::event::{ConnectionContext, Event}; + + let event1 = Event::TcpAnnounce { + connection: ConnectionContext::new( + IpAddr::V4(Ipv4Addr::new(127, 0, 0, 1)), + Some(8080), + ServiceBinding::new(Protocol::HTTP, SocketAddr::new(IpAddr::V4(Ipv4Addr::new(127, 0, 0, 1)), 7070)).unwrap(), + ), + announced_peer: Peer::default(), + added_peer: Peer::default(), + }; + + let event2 = Event::TcpAnnounce { + connection: ConnectionContext::new( + IpAddr::V4(Ipv4Addr::new(127, 0, 0, 2)), + Some(8080), + ServiceBinding::new(Protocol::HTTP, SocketAddr::new(IpAddr::V4(Ipv4Addr::new(127, 0, 0, 1)), 7070)).unwrap(), + ), + announced_peer: Peer::default(), + added_peer: Peer::default(), + }; + + let event1_clone = event1.clone(); + + assert!(event1 == event1_clone); + assert!(event1 != event2); + } +} From 657a5d0c0ac1c7094d59e0c8de6783b38b9d0eb8 Mon Sep 17 00:00:00 2001 From: Jose Celano Date: Tue, 22 Apr 2025 16:36:21 +0100 Subject: [PATCH 474/802] refactor: [#1376] remove initial peer state from event. Only the peer IP may change before adding the peer to the swarm, and the original remote client ip is already included in the `ConnectionCotext`. --- packages/http-tracker-core/src/event/mod.rs | 22 ++-- .../src/services/announce.rs | 100 ++++++------------ .../src/statistics/event/handler.rs | 12 +-- packages/primitives/src/peer.rs | 2 + 4 files changed, 46 insertions(+), 90 deletions(-) diff --git a/packages/http-tracker-core/src/event/mod.rs b/packages/http-tracker-core/src/event/mod.rs index 2eac2b9d6..5e2a0f384 100644 --- a/packages/http-tracker-core/src/event/mod.rs +++ b/packages/http-tracker-core/src/event/mod.rs @@ -2,7 +2,7 @@ use std::net::{IpAddr, SocketAddr}; use torrust_tracker_metrics::label::{LabelSet, LabelValue}; use torrust_tracker_metrics::label_name; -use torrust_tracker_primitives::peer::Peer; +use torrust_tracker_primitives::peer::PeerAnnouncement; use torrust_tracker_primitives::service_binding::ServiceBinding; pub mod sender; @@ -12,15 +12,7 @@ pub mod sender; pub enum Event { TcpAnnounce { connection: ConnectionContext, - - /// The peer that is announcing itself to the tracker. - announced_peer: Peer, - - /// The peer that is added to the tracker. - /// - /// It might not be the same as the `announced_peer` because the tracker - /// can change the peer's IP address. - added_peer: Peer, + announcement: PeerAnnouncement, }, TcpScrape { connection: ConnectionContext, @@ -109,14 +101,15 @@ pub mod test { use crate::event::{ConnectionContext, Event}; + let remote_client_ip = IpAddr::V4(Ipv4Addr::new(127, 0, 0, 1)); + let event1 = Event::TcpAnnounce { connection: ConnectionContext::new( - IpAddr::V4(Ipv4Addr::new(127, 0, 0, 1)), + remote_client_ip, Some(8080), ServiceBinding::new(Protocol::HTTP, SocketAddr::new(IpAddr::V4(Ipv4Addr::new(127, 0, 0, 1)), 7070)).unwrap(), ), - announced_peer: Peer::default(), - added_peer: Peer::default(), + announcement: Peer::default(), }; let event2 = Event::TcpAnnounce { @@ -125,8 +118,7 @@ pub mod test { Some(8080), ServiceBinding::new(Protocol::HTTP, SocketAddr::new(IpAddr::V4(Ipv4Addr::new(127, 0, 0, 1)), 7070)).unwrap(), ), - announced_peer: Peer::default(), - added_peer: Peer::default(), + announcement: Peer::default(), }; let event1_clone = event1.clone(); diff --git a/packages/http-tracker-core/src/services/announce.rs b/packages/http-tracker-core/src/services/announce.rs index 3ac4de1b9..c27d3dbee 100644 --- a/packages/http-tracker-core/src/services/announce.rs +++ b/packages/http-tracker-core/src/services/announce.rs @@ -21,7 +21,7 @@ use bittorrent_tracker_core::error::{AnnounceError, TrackerCoreError, WhitelistE use bittorrent_tracker_core::whitelist; use torrust_tracker_configuration::Core; use torrust_tracker_primitives::core::AnnounceData; -use torrust_tracker_primitives::peer::Peer; +use torrust_tracker_primitives::peer::PeerAnnouncement; use torrust_tracker_primitives::service_binding::ServiceBinding; use crate::event; @@ -78,12 +78,10 @@ impl AnnounceService { self.authorize(announce_request.info_hash).await?; - let (remote_client_ip, opt_remote_client_port) = self.resolve_remote_client_address(client_ip_sources)?; + let (remote_client_ip, opt_remote_client_port) = self.resolve_remote_client_ip(client_ip_sources)?; let mut peer = peer_from_request(announce_request, &remote_client_ip); - let announced_peer = peer; - let peers_wanted = Self::peers_wanted(announce_request); let announce_data = self @@ -91,16 +89,8 @@ impl AnnounceService { .announce(&announce_request.info_hash, &mut peer, &remote_client_ip, &peers_wanted) .await?; - let added_peer = peer; - - self.send_event( - remote_client_ip, - opt_remote_client_port, - server_service_binding.clone(), - announced_peer, - added_peer, - ) - .await; + self.send_event(remote_client_ip, opt_remote_client_port, server_service_binding.clone(), peer) + .await; Ok(announce_data) } @@ -122,7 +112,7 @@ impl AnnounceService { } /// Resolves the client's real IP address considering proxy headers - fn resolve_remote_client_address( + fn resolve_remote_client_ip( &self, client_ip_sources: &ClientIpSources, ) -> Result<(IpAddr, Option), PeerIpResolutionError> { @@ -152,17 +142,15 @@ impl AnnounceService { async fn send_event( &self, - peer_ip: IpAddr, + remote_client_ip: IpAddr, opt_peer_ip_port: Option, server_service_binding: ServiceBinding, - announced_peer: Peer, - added_peer: Peer, + announcement: PeerAnnouncement, ) { if let Some(http_stats_event_sender) = self.opt_http_stats_event_sender.as_deref() { let event = Event::TcpAnnounce { - connection: event::ConnectionContext::new(peer_ip, opt_peer_ip_port, server_service_binding), - announced_peer, - added_peer, + connection: event::ConnectionContext::new(remote_client_ip, opt_peer_ip_port, server_service_binding), + announcement, }; tracing::debug!("Sending TcpAnnounce event: {:?}", event); @@ -389,6 +377,7 @@ mod tests { let server_socket_addr = SocketAddr::new(IpAddr::V4(Ipv4Addr::new(127, 0, 0, 1)), 7070); let server_service_binding = ServiceBinding::new(Protocol::HTTP, server_socket_addr).unwrap(); let peer = sample_peer_using_ipv4(); + let remote_client_ip = IpAddr::V4(Ipv4Addr::new(126, 0, 0, 1)); let server_service_binding_clone = server_service_binding.clone(); let peer_copy = peer; @@ -400,32 +389,25 @@ mod tests { let mut announced_peer = peer_copy; announced_peer.peer_addr = SocketAddr::new(IpAddr::V4(Ipv4Addr::new(126, 0, 0, 1)), 8080); - let mut added_peer = peer; - added_peer.peer_addr = SocketAddr::new(IpAddr::V4(Ipv4Addr::new(126, 0, 0, 1)), 8080); + let mut announcement = peer; + announcement.peer_addr = SocketAddr::new(IpAddr::V4(Ipv4Addr::new(126, 0, 0, 1)), 8080); let expected_event = Event::TcpAnnounce { - connection: ConnectionContext::new( - IpAddr::V4(Ipv4Addr::new(126, 0, 0, 1)), - Some(8080), - server_service_binding.clone(), - ), - announced_peer, - added_peer, + connection: ConnectionContext::new(remote_client_ip, Some(8080), server_service_binding.clone()), + announcement, }; match (event, expected_event) { ( Event::TcpAnnounce { connection: a_conn, - announced_peer: a1, - added_peer: a2, + announcement: a2, }, Event::TcpAnnounce { connection: b_conn, - announced_peer: b1, - added_peer: b2, + announcement: b2, }, - ) => *a_conn == b_conn && a1.peer_addr == b1.peer_addr && a2.peer_addr == b2.peer_addr, + ) => *a_conn == b_conn && a2.peer_addr == b2.peer_addr, _ => false, } })) @@ -477,6 +459,7 @@ mod tests { let server_socket_addr = SocketAddr::new(IpAddr::V4(Ipv4Addr::new(127, 0, 0, 1)), 7070); let server_service_binding = ServiceBinding::new(Protocol::HTTP, server_socket_addr).unwrap(); let peer = peer_with_the_ipv4_loopback_ip(); + let remote_client_ip = IpAddr::V4(Ipv4Addr::new(127, 0, 0, 1)); let server_service_binding_clone = server_service_binding.clone(); let peer_copy = peer; @@ -488,35 +471,28 @@ mod tests { let mut announced_peer = peer_copy; announced_peer.peer_addr = SocketAddr::new(IpAddr::V4(Ipv4Addr::new(127, 0, 0, 1)), 8080); - let mut added_peer = peer; - added_peer.peer_addr = SocketAddr::new( + let mut peer_announcement = peer; + peer_announcement.peer_addr = SocketAddr::new( IpAddr::V6(Ipv6Addr::new(0x6969, 0x6969, 0x6969, 0x6969, 0x6969, 0x6969, 0x6969, 0x6969)), 8080, ); let expected_event = Event::TcpAnnounce { - connection: ConnectionContext::new( - IpAddr::V4(Ipv4Addr::new(127, 0, 0, 1)), - Some(8080), - server_service_binding.clone(), - ), - announced_peer, - added_peer, + connection: ConnectionContext::new(remote_client_ip, Some(8080), server_service_binding.clone()), + announcement: peer_announcement, }; match (event, expected_event) { ( Event::TcpAnnounce { connection: a_conn, - announced_peer: a1, - added_peer: a2, + announcement: a2, }, Event::TcpAnnounce { connection: b_conn, - announced_peer: b1, - added_peer: b2, + announcement: b2, }, - ) => *a_conn == b_conn && a1.peer_addr == b1.peer_addr && a2.peer_addr == b2.peer_addr, + ) => *a_conn == b_conn && a2.peer_addr == b2.peer_addr, _ => false, } })) @@ -553,42 +529,28 @@ mod tests { let server_socket_addr = SocketAddr::new(IpAddr::V4(Ipv4Addr::new(127, 0, 0, 1)), 7070); let server_service_binding = ServiceBinding::new(Protocol::HTTP, server_socket_addr).unwrap(); let peer = sample_peer_using_ipv6(); - - let peer_copy = peer; + let remote_client_ip = IpAddr::V6(Ipv6Addr::new(0x6969, 0x6969, 0x6969, 0x6969, 0x6969, 0x6969, 0x6969, 0x6969)); let mut http_stats_event_sender_mock = MockHttpStatsEventSender::new(); http_stats_event_sender_mock .expect_send_event() .with(predicate::function(move |event| { - let announced_peer = peer_copy; - //announced_peer.peer_addr = SocketAddr::new(IpAddr::V4(Ipv4Addr::new(126, 0, 0, 1)), 8080); - - let added_peer = peer; - //added_peer.peer_addr = SocketAddr::new(IpAddr::V4(Ipv4Addr::new(126, 0, 0, 1)), 8080); - let expected_event = Event::TcpAnnounce { - connection: ConnectionContext::new( - IpAddr::V6(Ipv6Addr::new(0x6969, 0x6969, 0x6969, 0x6969, 0x6969, 0x6969, 0x6969, 0x6969)), - Some(8080), - server_service_binding.clone(), - ), - announced_peer, - added_peer, + connection: ConnectionContext::new(remote_client_ip, Some(8080), server_service_binding.clone()), + announcement: peer, }; match (event, expected_event) { ( Event::TcpAnnounce { connection: a_conn, - announced_peer: a1, - added_peer: a2, + announcement: a2, }, Event::TcpAnnounce { connection: b_conn, - announced_peer: b1, - added_peer: b2, + announcement: b2, }, - ) => *a_conn == b_conn && a1.peer_addr == b1.peer_addr && a2.peer_addr == b2.peer_addr, + ) => *a_conn == b_conn && a2.peer_addr == b2.peer_addr, _ => false, } })) diff --git a/packages/http-tracker-core/src/statistics/event/handler.rs b/packages/http-tracker-core/src/statistics/event/handler.rs index b24ddfde6..df8f29175 100644 --- a/packages/http-tracker-core/src/statistics/event/handler.rs +++ b/packages/http-tracker-core/src/statistics/event/handler.rs @@ -86,16 +86,16 @@ mod tests { async fn should_increase_the_tcp4_announces_counter_when_it_receives_a_tcp4_announce_event() { let stats_repository = Repository::new(); let peer = sample_peer_using_ipv4(); + let remote_client_ip = IpAddr::V4(Ipv4Addr::new(127, 0, 0, 2)); handle_event( Event::TcpAnnounce { connection: ConnectionContext::new( - IpAddr::V4(Ipv4Addr::new(127, 0, 0, 2)), + remote_client_ip, Some(8080), ServiceBinding::new(Protocol::HTTP, SocketAddr::new(IpAddr::V4(Ipv4Addr::new(127, 0, 0, 1)), 7070)).unwrap(), ), - announced_peer: peer, - added_peer: peer, + announcement: peer, }, &stats_repository, CurrentClock::now(), @@ -133,16 +133,16 @@ mod tests { async fn should_increase_the_tcp6_announces_counter_when_it_receives_a_tcp6_announce_event() { let stats_repository = Repository::new(); let peer = sample_peer_using_ipv6(); + let remote_client_ip = IpAddr::V6(Ipv6Addr::new(0x6969, 0x6969, 0x6969, 0x6969, 0x6969, 0x6969, 0x6969, 0x6969)); handle_event( Event::TcpAnnounce { connection: ConnectionContext::new( - IpAddr::V6(Ipv6Addr::new(0x6969, 0x6969, 0x6969, 0x6969, 0x6969, 0x6969, 0x6969, 0x6969)), + remote_client_ip, Some(8080), ServiceBinding::new(Protocol::HTTP, SocketAddr::new(IpAddr::V4(Ipv4Addr::new(127, 0, 0, 1)), 7070)).unwrap(), ), - announced_peer: peer, - added_peer: peer, + announcement: peer, }, &stats_repository, CurrentClock::now(), diff --git a/packages/primitives/src/peer.rs b/packages/primitives/src/peer.rs index 20fc4bcb4..bd753b220 100644 --- a/packages/primitives/src/peer.rs +++ b/packages/primitives/src/peer.rs @@ -32,6 +32,8 @@ use zerocopy::FromBytes as _; use crate::DurationSinceUnixEpoch; +pub type PeerAnnouncement = Peer; + /// Peer struct used by the core `Tracker`. /// /// A sample peer: From bc02e9bd0690f46b19e9bdd06b53fb4795512e98 Mon Sep 17 00:00:00 2001 From: Jose Celano Date: Tue, 22 Apr 2025 16:54:17 +0100 Subject: [PATCH 475/802] feat: [#1376] add info-hash to bittorrent_http_tracker_core::event::TcpAnnounce --- packages/http-tracker-core/src/event/mod.rs | 31 ++++++++++ .../src/services/announce.rs | 61 ++++++------------- .../src/statistics/event/handler.rs | 4 +- 3 files changed, 52 insertions(+), 44 deletions(-) diff --git a/packages/http-tracker-core/src/event/mod.rs b/packages/http-tracker-core/src/event/mod.rs index 5e2a0f384..9f7635ffe 100644 --- a/packages/http-tracker-core/src/event/mod.rs +++ b/packages/http-tracker-core/src/event/mod.rs @@ -1,5 +1,6 @@ use std::net::{IpAddr, SocketAddr}; +use bittorrent_primitives::info_hash::InfoHash; use torrust_tracker_metrics::label::{LabelSet, LabelValue}; use torrust_tracker_metrics::label_name; use torrust_tracker_primitives::peer::PeerAnnouncement; @@ -12,6 +13,7 @@ pub mod sender; pub enum Event { TcpAnnounce { connection: ConnectionContext, + info_hash: InfoHash, announcement: PeerAnnouncement, }, TcpScrape { @@ -93,6 +95,32 @@ pub mod test { use torrust_tracker_primitives::peer::Peer; use torrust_tracker_primitives::service_binding::Protocol; + use super::Event; + use crate::tests::sample_info_hash; + + #[must_use] + pub fn events_match(event: &Event, expected_event: &Event) -> bool { + match (event, expected_event) { + ( + Event::TcpAnnounce { + connection, + info_hash, + announcement, + }, + Event::TcpAnnounce { + connection: expected_connection, + info_hash: expected_info_hash, + announcement: expected_announcement, + }, + ) => { + *connection == *expected_connection + && *info_hash == *expected_info_hash + && announcement.peer_addr == expected_announcement.peer_addr + } + _ => false, + } + } + #[test] fn events_should_be_comparable() { use std::net::{IpAddr, Ipv4Addr, SocketAddr}; @@ -102,6 +130,7 @@ pub mod test { use crate::event::{ConnectionContext, Event}; let remote_client_ip = IpAddr::V4(Ipv4Addr::new(127, 0, 0, 1)); + let info_hash = sample_info_hash(); let event1 = Event::TcpAnnounce { connection: ConnectionContext::new( @@ -109,6 +138,7 @@ pub mod test { Some(8080), ServiceBinding::new(Protocol::HTTP, SocketAddr::new(IpAddr::V4(Ipv4Addr::new(127, 0, 0, 1)), 7070)).unwrap(), ), + info_hash, announcement: Peer::default(), }; @@ -118,6 +148,7 @@ pub mod test { Some(8080), ServiceBinding::new(Protocol::HTTP, SocketAddr::new(IpAddr::V4(Ipv4Addr::new(127, 0, 0, 1)), 7070)).unwrap(), ), + info_hash, announcement: Peer::default(), }; diff --git a/packages/http-tracker-core/src/services/announce.rs b/packages/http-tracker-core/src/services/announce.rs index c27d3dbee..8dedeade7 100644 --- a/packages/http-tracker-core/src/services/announce.rs +++ b/packages/http-tracker-core/src/services/announce.rs @@ -89,8 +89,14 @@ impl AnnounceService { .announce(&announce_request.info_hash, &mut peer, &remote_client_ip, &peers_wanted) .await?; - self.send_event(remote_client_ip, opt_remote_client_port, server_service_binding.clone(), peer) - .await; + self.send_event( + announce_request.info_hash, + remote_client_ip, + opt_remote_client_port, + server_service_binding.clone(), + peer, + ) + .await; Ok(announce_data) } @@ -142,6 +148,7 @@ impl AnnounceService { async fn send_event( &self, + info_hash: InfoHash, remote_client_ip: IpAddr, opt_peer_ip_port: Option, server_service_binding: ServiceBinding, @@ -150,6 +157,7 @@ impl AnnounceService { if let Some(http_stats_event_sender) = self.opt_http_stats_event_sender.as_deref() { let event = Event::TcpAnnounce { connection: event::ConnectionContext::new(remote_client_ip, opt_peer_ip_port, server_service_binding), + info_hash, announcement, }; @@ -327,13 +335,14 @@ mod tests { use torrust_tracker_test_helpers::configuration; use crate::event; + use crate::event::test::events_match; use crate::event::{ConnectionContext, Event}; use crate::services::announce::tests::{ initialize_core_tracker_services, initialize_core_tracker_services_with_config, sample_announce_request_for_peer, MockHttpStatsEventSender, }; use crate::services::announce::AnnounceService; - use crate::tests::{sample_peer, sample_peer_using_ipv4, sample_peer_using_ipv6}; + use crate::tests::{sample_info_hash, sample_peer, sample_peer_using_ipv4, sample_peer_using_ipv6}; #[tokio::test] async fn it_should_return_the_announce_data() { @@ -394,22 +403,11 @@ mod tests { let expected_event = Event::TcpAnnounce { connection: ConnectionContext::new(remote_client_ip, Some(8080), server_service_binding.clone()), + info_hash: sample_info_hash(), announcement, }; - match (event, expected_event) { - ( - Event::TcpAnnounce { - connection: a_conn, - announcement: a2, - }, - Event::TcpAnnounce { - connection: b_conn, - announcement: b2, - }, - ) => *a_conn == b_conn && a2.peer_addr == b2.peer_addr, - _ => false, - } + events_match(event, &expected_event) })) .times(1) .returning(|_| Box::pin(future::ready(Some(Ok(1))))); @@ -479,22 +477,11 @@ mod tests { let expected_event = Event::TcpAnnounce { connection: ConnectionContext::new(remote_client_ip, Some(8080), server_service_binding.clone()), + info_hash: sample_info_hash(), announcement: peer_announcement, }; - match (event, expected_event) { - ( - Event::TcpAnnounce { - connection: a_conn, - announcement: a2, - }, - Event::TcpAnnounce { - connection: b_conn, - announcement: b2, - }, - ) => *a_conn == b_conn && a2.peer_addr == b2.peer_addr, - _ => false, - } + events_match(event, &expected_event) })) .times(1) .returning(|_| Box::pin(future::ready(Some(Ok(1))))); @@ -537,22 +524,10 @@ mod tests { .with(predicate::function(move |event| { let expected_event = Event::TcpAnnounce { connection: ConnectionContext::new(remote_client_ip, Some(8080), server_service_binding.clone()), + info_hash: sample_info_hash(), announcement: peer, }; - - match (event, expected_event) { - ( - Event::TcpAnnounce { - connection: a_conn, - announcement: a2, - }, - Event::TcpAnnounce { - connection: b_conn, - announcement: b2, - }, - ) => *a_conn == b_conn && a2.peer_addr == b2.peer_addr, - _ => false, - } + events_match(event, &expected_event) })) .times(1) .returning(|_| Box::pin(future::ready(Some(Ok(1))))); diff --git a/packages/http-tracker-core/src/statistics/event/handler.rs b/packages/http-tracker-core/src/statistics/event/handler.rs index df8f29175..6dce6c4f4 100644 --- a/packages/http-tracker-core/src/statistics/event/handler.rs +++ b/packages/http-tracker-core/src/statistics/event/handler.rs @@ -79,7 +79,7 @@ mod tests { use crate::event::{ConnectionContext, Event}; use crate::statistics::event::handler::handle_event; use crate::statistics::repository::Repository; - use crate::tests::{sample_peer_using_ipv4, sample_peer_using_ipv6}; + use crate::tests::{sample_info_hash, sample_peer_using_ipv4, sample_peer_using_ipv6}; use crate::CurrentClock; #[tokio::test] @@ -95,6 +95,7 @@ mod tests { Some(8080), ServiceBinding::new(Protocol::HTTP, SocketAddr::new(IpAddr::V4(Ipv4Addr::new(127, 0, 0, 1)), 7070)).unwrap(), ), + info_hash: sample_info_hash(), announcement: peer, }, &stats_repository, @@ -142,6 +143,7 @@ mod tests { Some(8080), ServiceBinding::new(Protocol::HTTP, SocketAddr::new(IpAddr::V4(Ipv4Addr::new(127, 0, 0, 1)), 7070)).unwrap(), ), + info_hash: sample_info_hash(), announcement: peer, }, &stats_repository, From 7ed750020af1a2b5ba075714c765db2e51d410b4 Mon Sep 17 00:00:00 2001 From: Jose Celano Date: Tue, 22 Apr 2025 17:05:14 +0100 Subject: [PATCH 476/802] refactor: extract fn for duplicate code --- .../src/services/announce.rs | 27 +++-------------- .../http-tracker-core/src/services/mod.rs | 29 +++++++++++++++++++ .../http-tracker-core/src/services/scrape.rs | 24 +++------------ 3 files changed, 37 insertions(+), 43 deletions(-) diff --git a/packages/http-tracker-core/src/services/announce.rs b/packages/http-tracker-core/src/services/announce.rs index 8dedeade7..8d52302de 100644 --- a/packages/http-tracker-core/src/services/announce.rs +++ b/packages/http-tracker-core/src/services/announce.rs @@ -12,7 +12,7 @@ use std::panic::Location; use std::sync::Arc; use bittorrent_http_tracker_protocol::v1::requests::announce::{peer_from_request, Announce}; -use bittorrent_http_tracker_protocol::v1::services::peer_ip_resolver::{self, ClientIpSources, PeerIpResolutionError}; +use bittorrent_http_tracker_protocol::v1::services::peer_ip_resolver::{ClientIpSources, PeerIpResolutionError}; use bittorrent_primitives::info_hash::InfoHash; use bittorrent_tracker_core::announce_handler::{AnnounceHandler, PeersWanted}; use bittorrent_tracker_core::authentication::service::AuthenticationService; @@ -24,6 +24,7 @@ use torrust_tracker_primitives::core::AnnounceData; use torrust_tracker_primitives::peer::PeerAnnouncement; use torrust_tracker_primitives::service_binding::ServiceBinding; +use super::resolve_remote_client_ip; use crate::event; use crate::event::Event; @@ -78,7 +79,8 @@ impl AnnounceService { self.authorize(announce_request.info_hash).await?; - let (remote_client_ip, opt_remote_client_port) = self.resolve_remote_client_ip(client_ip_sources)?; + let (remote_client_ip, opt_remote_client_port) = + resolve_remote_client_ip(self.core_config.net.on_reverse_proxy, client_ip_sources)?; let mut peer = peer_from_request(announce_request, &remote_client_ip); @@ -117,27 +119,6 @@ impl AnnounceService { self.whitelist_authorization.authorize(&info_hash).await } - /// Resolves the client's real IP address considering proxy headers - fn resolve_remote_client_ip( - &self, - client_ip_sources: &ClientIpSources, - ) -> Result<(IpAddr, Option), PeerIpResolutionError> { - let ip = match peer_ip_resolver::invoke(self.core_config.net.on_reverse_proxy, client_ip_sources) { - Ok(peer_ip) => Ok(peer_ip), - Err(error) => Err(error), - }?; - - let port = if client_ip_sources.connection_info_socket_address.is_some() { - client_ip_sources - .connection_info_socket_address - .map(|socket_addr| socket_addr.port()) - } else { - None - }; - - Ok((ip, port)) - } - /// Determines how many peers the client wants in the response fn peers_wanted(announce_request: &Announce) -> PeersWanted { match announce_request.numwant { diff --git a/packages/http-tracker-core/src/services/mod.rs b/packages/http-tracker-core/src/services/mod.rs index ce99c6856..ad127324a 100644 --- a/packages/http-tracker-core/src/services/mod.rs +++ b/packages/http-tracker-core/src/services/mod.rs @@ -5,5 +5,34 @@ //! servers. //! //! Refer to [`torrust_tracker`](crate) documentation. + +use std::net::IpAddr; + +use bittorrent_http_tracker_protocol::v1::services::peer_ip_resolver::{self, ClientIpSources, PeerIpResolutionError}; pub mod announce; pub mod scrape; + +/// Resolves the client's real IP address considering proxy headers +/// +/// # Errors +/// +/// This function returns an error if the IP address cannot be resolved. +pub fn resolve_remote_client_ip( + on_reverse_proxy: bool, + client_ip_sources: &ClientIpSources, +) -> Result<(IpAddr, Option), PeerIpResolutionError> { + let ip = match peer_ip_resolver::invoke(on_reverse_proxy, client_ip_sources) { + Ok(peer_ip) => Ok(peer_ip), + Err(error) => Err(error), + }?; + + let port = if client_ip_sources.connection_info_socket_address.is_some() { + client_ip_sources + .connection_info_socket_address + .map(|socket_addr| socket_addr.port()) + } else { + None + }; + + Ok((ip, port)) +} diff --git a/packages/http-tracker-core/src/services/scrape.rs b/packages/http-tracker-core/src/services/scrape.rs index 072c76bb7..b7fc5a813 100644 --- a/packages/http-tracker-core/src/services/scrape.rs +++ b/packages/http-tracker-core/src/services/scrape.rs @@ -11,7 +11,7 @@ use std::net::IpAddr; use std::sync::Arc; use bittorrent_http_tracker_protocol::v1::requests::scrape::Scrape; -use bittorrent_http_tracker_protocol::v1::services::peer_ip_resolver::{self, ClientIpSources, PeerIpResolutionError}; +use bittorrent_http_tracker_protocol::v1::services::peer_ip_resolver::{ClientIpSources, PeerIpResolutionError}; use bittorrent_tracker_core::authentication::service::AuthenticationService; use bittorrent_tracker_core::authentication::{self, Key}; use bittorrent_tracker_core::error::{ScrapeError, TrackerCoreError, WhitelistError}; @@ -20,6 +20,7 @@ use torrust_tracker_configuration::Core; use torrust_tracker_primitives::core::ScrapeData; use torrust_tracker_primitives::service_binding::ServiceBinding; +use super::resolve_remote_client_ip; use crate::event; use crate::event::{ConnectionContext, Event}; @@ -81,7 +82,8 @@ impl ScrapeService { self.scrape_handler.scrape(&scrape_request.info_hashes).await? }; - let (remote_client_ip, opt_client_port) = self.resolve_remote_client_ip(client_ip_sources)?; + let (remote_client_ip, opt_client_port) = + resolve_remote_client_ip(self.core_config.net.on_reverse_proxy, client_ip_sources)?; self.send_event(remote_client_ip, opt_client_port, server_service_binding.clone()) .await; @@ -101,24 +103,6 @@ impl ScrapeService { false } - /// Resolves the client's real IP address considering proxy headers. - fn resolve_remote_client_ip( - &self, - client_ip_sources: &ClientIpSources, - ) -> Result<(IpAddr, Option), PeerIpResolutionError> { - let ip = peer_ip_resolver::invoke(self.core_config.net.on_reverse_proxy, client_ip_sources)?; - - let port = if client_ip_sources.connection_info_socket_address.is_some() { - client_ip_sources - .connection_info_socket_address - .map(|socket_addr| socket_addr.port()) - } else { - None - }; - - Ok((ip, port)) - } - async fn send_event( &self, original_peer_ip: IpAddr, From a422e4ee737aec0ea78aa361b2fc36bbe45733a1 Mon Sep 17 00:00:00 2001 From: Jose Celano Date: Tue, 22 Apr 2025 17:20:14 +0100 Subject: [PATCH 477/802] refactor: extract type RemoteClientAddr --- packages/http-tracker-core/src/event/mod.rs | 35 ++++++++++------- .../src/services/announce.rs | 34 ++++++++++------- .../http-tracker-core/src/services/mod.rs | 19 ++++++++-- .../http-tracker-core/src/services/scrape.rs | 38 +++++++++---------- .../src/statistics/event/handler.rs | 16 ++++---- 5 files changed, 82 insertions(+), 60 deletions(-) diff --git a/packages/http-tracker-core/src/event/mod.rs b/packages/http-tracker-core/src/event/mod.rs index 9f7635ffe..07d27127a 100644 --- a/packages/http-tracker-core/src/event/mod.rs +++ b/packages/http-tracker-core/src/event/mod.rs @@ -6,6 +6,8 @@ use torrust_tracker_metrics::label_name; use torrust_tracker_primitives::peer::PeerAnnouncement; use torrust_tracker_primitives::service_binding::ServiceBinding; +use crate::services::RemoteClientAddr; + pub mod sender; /// A HTTP core event. @@ -29,12 +31,9 @@ pub struct ConnectionContext { impl ConnectionContext { #[must_use] - pub fn new(client_ip_addr: IpAddr, opt_client_port: Option, server_service_binding: ServiceBinding) -> Self { + pub fn new(remote_client_addr: RemoteClientAddr, server_service_binding: ServiceBinding) -> Self { Self { - client: ClientConnectionContext { - ip_addr: client_ip_addr, - port: opt_client_port, - }, + client: ClientConnectionContext { remote_client_addr }, server: ServerConnectionContext { service_binding: server_service_binding, }, @@ -43,12 +42,12 @@ impl ConnectionContext { #[must_use] pub fn client_ip_addr(&self) -> IpAddr { - self.client.ip_addr + self.client.ip_addr() } #[must_use] pub fn client_port(&self) -> Option { - self.client.port + self.client.port() } #[must_use] @@ -59,10 +58,19 @@ impl ConnectionContext { #[derive(Debug, PartialEq, Eq, Clone)] pub struct ClientConnectionContext { - ip_addr: IpAddr, + remote_client_addr: RemoteClientAddr, +} + +impl ClientConnectionContext { + #[must_use] + pub fn ip_addr(&self) -> IpAddr { + self.remote_client_addr.ip + } - /// It's provided if you use the `torrust-axum-http-tracker-server` crate. - port: Option, + #[must_use] + pub fn port(&self) -> Option { + self.remote_client_addr.port + } } #[derive(Debug, PartialEq, Eq, Clone)] @@ -96,6 +104,7 @@ pub mod test { use torrust_tracker_primitives::service_binding::Protocol; use super::Event; + use crate::services::RemoteClientAddr; use crate::tests::sample_info_hash; #[must_use] @@ -134,8 +143,7 @@ pub mod test { let event1 = Event::TcpAnnounce { connection: ConnectionContext::new( - remote_client_ip, - Some(8080), + RemoteClientAddr::new(remote_client_ip, Some(8080)), ServiceBinding::new(Protocol::HTTP, SocketAddr::new(IpAddr::V4(Ipv4Addr::new(127, 0, 0, 1)), 7070)).unwrap(), ), info_hash, @@ -144,8 +152,7 @@ pub mod test { let event2 = Event::TcpAnnounce { connection: ConnectionContext::new( - IpAddr::V4(Ipv4Addr::new(127, 0, 0, 2)), - Some(8080), + RemoteClientAddr::new(IpAddr::V4(Ipv4Addr::new(127, 0, 0, 2)), Some(8080)), ServiceBinding::new(Protocol::HTTP, SocketAddr::new(IpAddr::V4(Ipv4Addr::new(127, 0, 0, 1)), 7070)).unwrap(), ), info_hash, diff --git a/packages/http-tracker-core/src/services/announce.rs b/packages/http-tracker-core/src/services/announce.rs index 8d52302de..a0c31585e 100644 --- a/packages/http-tracker-core/src/services/announce.rs +++ b/packages/http-tracker-core/src/services/announce.rs @@ -7,7 +7,6 @@ //! //! It also sends an [`http_tracker_core::event::Event`] //! because events are specific for the HTTP tracker. -use std::net::IpAddr; use std::panic::Location; use std::sync::Arc; @@ -24,7 +23,7 @@ use torrust_tracker_primitives::core::AnnounceData; use torrust_tracker_primitives::peer::PeerAnnouncement; use torrust_tracker_primitives::service_binding::ServiceBinding; -use super::resolve_remote_client_ip; +use super::{resolve_remote_client_addr, RemoteClientAddr}; use crate::event; use crate::event::Event; @@ -79,22 +78,20 @@ impl AnnounceService { self.authorize(announce_request.info_hash).await?; - let (remote_client_ip, opt_remote_client_port) = - resolve_remote_client_ip(self.core_config.net.on_reverse_proxy, client_ip_sources)?; + let remote_client_addr = resolve_remote_client_addr(self.core_config.net.on_reverse_proxy, client_ip_sources)?; - let mut peer = peer_from_request(announce_request, &remote_client_ip); + let mut peer = peer_from_request(announce_request, &remote_client_addr.ip); let peers_wanted = Self::peers_wanted(announce_request); let announce_data = self .announce_handler - .announce(&announce_request.info_hash, &mut peer, &remote_client_ip, &peers_wanted) + .announce(&announce_request.info_hash, &mut peer, &remote_client_addr.ip, &peers_wanted) .await?; self.send_event( announce_request.info_hash, - remote_client_ip, - opt_remote_client_port, + remote_client_addr, server_service_binding.clone(), peer, ) @@ -130,14 +127,13 @@ impl AnnounceService { async fn send_event( &self, info_hash: InfoHash, - remote_client_ip: IpAddr, - opt_peer_ip_port: Option, + remote_client_addr: RemoteClientAddr, server_service_binding: ServiceBinding, announcement: PeerAnnouncement, ) { if let Some(http_stats_event_sender) = self.opt_http_stats_event_sender.as_deref() { let event = Event::TcpAnnounce { - connection: event::ConnectionContext::new(remote_client_ip, opt_peer_ip_port, server_service_binding), + connection: event::ConnectionContext::new(remote_client_addr, server_service_binding), info_hash, announcement, }; @@ -323,6 +319,7 @@ mod tests { MockHttpStatsEventSender, }; use crate::services::announce::AnnounceService; + use crate::services::RemoteClientAddr; use crate::tests::{sample_info_hash, sample_peer, sample_peer_using_ipv4, sample_peer_using_ipv6}; #[tokio::test] @@ -383,7 +380,10 @@ mod tests { announcement.peer_addr = SocketAddr::new(IpAddr::V4(Ipv4Addr::new(126, 0, 0, 1)), 8080); let expected_event = Event::TcpAnnounce { - connection: ConnectionContext::new(remote_client_ip, Some(8080), server_service_binding.clone()), + connection: ConnectionContext::new( + RemoteClientAddr::new(remote_client_ip, Some(8080)), + server_service_binding.clone(), + ), info_hash: sample_info_hash(), announcement, }; @@ -457,7 +457,10 @@ mod tests { ); let expected_event = Event::TcpAnnounce { - connection: ConnectionContext::new(remote_client_ip, Some(8080), server_service_binding.clone()), + connection: ConnectionContext::new( + RemoteClientAddr::new(remote_client_ip, Some(8080)), + server_service_binding.clone(), + ), info_hash: sample_info_hash(), announcement: peer_announcement, }; @@ -504,7 +507,10 @@ mod tests { .expect_send_event() .with(predicate::function(move |event| { let expected_event = Event::TcpAnnounce { - connection: ConnectionContext::new(remote_client_ip, Some(8080), server_service_binding.clone()), + connection: ConnectionContext::new( + RemoteClientAddr::new(remote_client_ip, Some(8080)), + server_service_binding.clone(), + ), info_hash: sample_info_hash(), announcement: peer, }; diff --git a/packages/http-tracker-core/src/services/mod.rs b/packages/http-tracker-core/src/services/mod.rs index ad127324a..5ec6dd22d 100644 --- a/packages/http-tracker-core/src/services/mod.rs +++ b/packages/http-tracker-core/src/services/mod.rs @@ -12,15 +12,28 @@ use bittorrent_http_tracker_protocol::v1::services::peer_ip_resolver::{self, Cli pub mod announce; pub mod scrape; +#[derive(Debug, PartialEq, Eq, Clone)] +pub struct RemoteClientAddr { + pub ip: IpAddr, + pub port: Option, +} + +impl RemoteClientAddr { + #[must_use] + pub fn new(ip: IpAddr, port: Option) -> Self { + Self { ip, port } + } +} + /// Resolves the client's real IP address considering proxy headers /// /// # Errors /// /// This function returns an error if the IP address cannot be resolved. -pub fn resolve_remote_client_ip( +pub fn resolve_remote_client_addr( on_reverse_proxy: bool, client_ip_sources: &ClientIpSources, -) -> Result<(IpAddr, Option), PeerIpResolutionError> { +) -> Result { let ip = match peer_ip_resolver::invoke(on_reverse_proxy, client_ip_sources) { Ok(peer_ip) => Ok(peer_ip), Err(error) => Err(error), @@ -34,5 +47,5 @@ pub fn resolve_remote_client_ip( None }; - Ok((ip, port)) + Ok(RemoteClientAddr { ip, port }) } diff --git a/packages/http-tracker-core/src/services/scrape.rs b/packages/http-tracker-core/src/services/scrape.rs index b7fc5a813..e206b909c 100644 --- a/packages/http-tracker-core/src/services/scrape.rs +++ b/packages/http-tracker-core/src/services/scrape.rs @@ -7,7 +7,6 @@ //! //! It also sends an [`http_tracker_core::statistics::event::Event`] //! because events are specific for the HTTP tracker. -use std::net::IpAddr; use std::sync::Arc; use bittorrent_http_tracker_protocol::v1::requests::scrape::Scrape; @@ -20,7 +19,7 @@ use torrust_tracker_configuration::Core; use torrust_tracker_primitives::core::ScrapeData; use torrust_tracker_primitives::service_binding::ServiceBinding; -use super::resolve_remote_client_ip; +use super::{resolve_remote_client_addr, RemoteClientAddr}; use crate::event; use crate::event::{ConnectionContext, Event}; @@ -82,11 +81,9 @@ impl ScrapeService { self.scrape_handler.scrape(&scrape_request.info_hashes).await? }; - let (remote_client_ip, opt_client_port) = - resolve_remote_client_ip(self.core_config.net.on_reverse_proxy, client_ip_sources)?; + let remote_client_addr = resolve_remote_client_addr(self.core_config.net.on_reverse_proxy, client_ip_sources)?; - self.send_event(remote_client_ip, opt_client_port, server_service_binding.clone()) - .await; + self.send_event(remote_client_addr, server_service_binding.clone()).await; Ok(scrape_data) } @@ -103,15 +100,10 @@ impl ScrapeService { false } - async fn send_event( - &self, - original_peer_ip: IpAddr, - opt_original_peer_port: Option, - server_service_binding: ServiceBinding, - ) { + async fn send_event(&self, remote_client_addr: RemoteClientAddr, server_service_binding: ServiceBinding) { if let Some(http_stats_event_sender) = self.opt_http_stats_event_sender.as_deref() { let event = Event::TcpScrape { - connection: ConnectionContext::new(original_peer_ip, opt_original_peer_port, server_service_binding), + connection: ConnectionContext::new(remote_client_addr, server_service_binding), }; tracing::debug!("Sending TcpScrape event: {:?}", event); @@ -271,6 +263,7 @@ mod tests { initialize_services_with_configuration, sample_info_hashes, sample_peer, MockHttpStatsEventSender, }; use crate::services::scrape::ScrapeService; + use crate::services::RemoteClientAddr; use crate::tests::sample_info_hash; use crate::{event, statistics}; @@ -342,8 +335,7 @@ mod tests { .expect_send_event() .with(eq(Event::TcpScrape { connection: ConnectionContext::new( - IpAddr::V4(Ipv4Addr::new(126, 0, 0, 1)), - Some(8080), + RemoteClientAddr::new(IpAddr::V4(Ipv4Addr::new(126, 0, 0, 1)), Some(8080)), ServiceBinding::new(Protocol::HTTP, SocketAddr::new(IpAddr::V4(Ipv4Addr::new(127, 0, 0, 1)), 7070)) .unwrap(), ), @@ -394,8 +386,10 @@ mod tests { .expect_send_event() .with(eq(Event::TcpScrape { connection: ConnectionContext::new( - IpAddr::V6(Ipv6Addr::new(0x6969, 0x6969, 0x6969, 0x6969, 0x6969, 0x6969, 0x6969, 0x6969)), - Some(8080), + RemoteClientAddr::new( + IpAddr::V6(Ipv6Addr::new(0x6969, 0x6969, 0x6969, 0x6969, 0x6969, 0x6969, 0x6969, 0x6969)), + Some(8080), + ), server_service_binding, ), })) @@ -453,6 +447,7 @@ mod tests { initialize_services_with_configuration, sample_info_hashes, sample_peer, MockHttpStatsEventSender, }; use crate::services::scrape::ScrapeService; + use crate::services::RemoteClientAddr; use crate::tests::sample_info_hash; use crate::{event, statistics}; @@ -518,8 +513,7 @@ mod tests { .expect_send_event() .with(eq(Event::TcpScrape { connection: ConnectionContext::new( - IpAddr::V4(Ipv4Addr::new(126, 0, 0, 1)), - Some(8080), + RemoteClientAddr::new(IpAddr::V4(Ipv4Addr::new(126, 0, 0, 1)), Some(8080)), ServiceBinding::new(Protocol::HTTP, SocketAddr::new(IpAddr::V4(Ipv4Addr::new(127, 0, 0, 1)), 7070)) .unwrap(), ), @@ -570,8 +564,10 @@ mod tests { .expect_send_event() .with(eq(Event::TcpScrape { connection: ConnectionContext::new( - IpAddr::V6(Ipv6Addr::new(0x6969, 0x6969, 0x6969, 0x6969, 0x6969, 0x6969, 0x6969, 0x6969)), - Some(8080), + RemoteClientAddr::new( + IpAddr::V6(Ipv6Addr::new(0x6969, 0x6969, 0x6969, 0x6969, 0x6969, 0x6969, 0x6969, 0x6969)), + Some(8080), + ), server_service_binding, ), })) diff --git a/packages/http-tracker-core/src/statistics/event/handler.rs b/packages/http-tracker-core/src/statistics/event/handler.rs index 6dce6c4f4..d59c640c1 100644 --- a/packages/http-tracker-core/src/statistics/event/handler.rs +++ b/packages/http-tracker-core/src/statistics/event/handler.rs @@ -77,6 +77,7 @@ mod tests { use torrust_tracker_primitives::service_binding::{Protocol, ServiceBinding}; use crate::event::{ConnectionContext, Event}; + use crate::services::RemoteClientAddr; use crate::statistics::event::handler::handle_event; use crate::statistics::repository::Repository; use crate::tests::{sample_info_hash, sample_peer_using_ipv4, sample_peer_using_ipv6}; @@ -91,8 +92,7 @@ mod tests { handle_event( Event::TcpAnnounce { connection: ConnectionContext::new( - remote_client_ip, - Some(8080), + RemoteClientAddr::new(remote_client_ip, Some(8080)), ServiceBinding::new(Protocol::HTTP, SocketAddr::new(IpAddr::V4(Ipv4Addr::new(127, 0, 0, 1)), 7070)).unwrap(), ), info_hash: sample_info_hash(), @@ -115,8 +115,7 @@ mod tests { handle_event( Event::TcpScrape { connection: ConnectionContext::new( - IpAddr::V4(Ipv4Addr::new(127, 0, 0, 2)), - Some(8080), + RemoteClientAddr::new(IpAddr::V4(Ipv4Addr::new(127, 0, 0, 2)), Some(8080)), ServiceBinding::new(Protocol::HTTP, SocketAddr::new(IpAddr::V4(Ipv4Addr::new(127, 0, 0, 1)), 7070)).unwrap(), ), }, @@ -139,8 +138,7 @@ mod tests { handle_event( Event::TcpAnnounce { connection: ConnectionContext::new( - remote_client_ip, - Some(8080), + RemoteClientAddr::new(remote_client_ip, Some(8080)), ServiceBinding::new(Protocol::HTTP, SocketAddr::new(IpAddr::V4(Ipv4Addr::new(127, 0, 0, 1)), 7070)).unwrap(), ), info_hash: sample_info_hash(), @@ -163,8 +161,10 @@ mod tests { handle_event( Event::TcpScrape { connection: ConnectionContext::new( - IpAddr::V6(Ipv6Addr::new(0x6969, 0x6969, 0x6969, 0x6969, 0x6969, 0x6969, 0x6969, 0x6969)), - Some(8080), + RemoteClientAddr::new( + IpAddr::V6(Ipv6Addr::new(0x6969, 0x6969, 0x6969, 0x6969, 0x6969, 0x6969, 0x6969, 0x6969)), + Some(8080), + ), ServiceBinding::new(Protocol::HTTP, SocketAddr::new(IpAddr::V4(Ipv4Addr::new(127, 0, 0, 1)), 7070)).unwrap(), ), }, From 2670a0a8d3fa57224c0b2343c1ab4eca685e62ac Mon Sep 17 00:00:00 2001 From: Jose Celano Date: Tue, 22 Apr 2025 18:11:38 +0100 Subject: [PATCH 478/802] refactor: [#1384] rename fields --- packages/udp-tracker-core/src/event/mod.rs | 6 +++--- .../udp-tracker-core/src/services/announce.rs | 2 +- .../udp-tracker-core/src/services/connect.rs | 6 +++--- .../udp-tracker-core/src/services/scrape.rs | 2 +- .../src/statistics/event/handler.rs | 18 +++++++++--------- .../src/handlers/announce.rs | 2 +- .../udp-tracker-server/src/handlers/connect.rs | 4 ++-- 7 files changed, 20 insertions(+), 20 deletions(-) diff --git a/packages/udp-tracker-core/src/event/mod.rs b/packages/udp-tracker-core/src/event/mod.rs index ddcba7792..6785fd34d 100644 --- a/packages/udp-tracker-core/src/event/mod.rs +++ b/packages/udp-tracker-core/src/event/mod.rs @@ -9,9 +9,9 @@ pub mod sender; /// A UDP core event. #[derive(Debug, PartialEq, Eq, Clone)] pub enum Event { - UdpConnect { context: ConnectionContext }, - UdpAnnounce { context: ConnectionContext }, - UdpScrape { context: ConnectionContext }, + UdpConnect { connection: ConnectionContext }, + UdpAnnounce { connection: ConnectionContext }, + UdpScrape { connection: ConnectionContext }, } #[derive(Debug, PartialEq, Eq, Clone)] diff --git a/packages/udp-tracker-core/src/services/announce.rs b/packages/udp-tracker-core/src/services/announce.rs index 0a9bf6b82..2f2c3e093 100644 --- a/packages/udp-tracker-core/src/services/announce.rs +++ b/packages/udp-tracker-core/src/services/announce.rs @@ -105,7 +105,7 @@ impl AnnounceService { if let Some(udp_stats_event_sender) = self.opt_udp_core_stats_event_sender.as_deref() { udp_stats_event_sender .send_event(Event::UdpAnnounce { - context: ConnectionContext::new(client_socket_addr, server_service_binding), + connection: ConnectionContext::new(client_socket_addr, server_service_binding), }) .await; } diff --git a/packages/udp-tracker-core/src/services/connect.rs b/packages/udp-tracker-core/src/services/connect.rs index 92bcd299f..df3db6c4b 100644 --- a/packages/udp-tracker-core/src/services/connect.rs +++ b/packages/udp-tracker-core/src/services/connect.rs @@ -43,7 +43,7 @@ impl ConnectService { if let Some(udp_stats_event_sender) = self.opt_udp_core_stats_event_sender.as_deref() { udp_stats_event_sender .send_event(Event::UdpConnect { - context: ConnectionContext::new(client_socket_addr, server_service_binding), + connection: ConnectionContext::new(client_socket_addr, server_service_binding), }) .await; } @@ -144,7 +144,7 @@ mod tests { udp_stats_event_sender_mock .expect_send_event() .with(eq(Event::UdpConnect { - context: ConnectionContext::new(client_socket_addr, server_service_binding.clone()), + connection: ConnectionContext::new(client_socket_addr, server_service_binding.clone()), })) .times(1) .returning(|_| Box::pin(future::ready(Some(Ok(1))))); @@ -168,7 +168,7 @@ mod tests { udp_stats_event_sender_mock .expect_send_event() .with(eq(Event::UdpConnect { - context: ConnectionContext::new(client_socket_addr, server_service_binding.clone()), + connection: ConnectionContext::new(client_socket_addr, server_service_binding.clone()), })) .times(1) .returning(|_| Box::pin(future::ready(Some(Ok(1))))); diff --git a/packages/udp-tracker-core/src/services/scrape.rs b/packages/udp-tracker-core/src/services/scrape.rs index 6ee64111c..c20e9b16c 100644 --- a/packages/udp-tracker-core/src/services/scrape.rs +++ b/packages/udp-tracker-core/src/services/scrape.rs @@ -87,7 +87,7 @@ impl ScrapeService { if let Some(udp_stats_event_sender) = self.opt_udp_stats_event_sender.as_deref() { udp_stats_event_sender .send_event(Event::UdpScrape { - context: ConnectionContext::new(client_socket_addr, server_service_binding), + connection: ConnectionContext::new(client_socket_addr, server_service_binding), }) .await; } diff --git a/packages/udp-tracker-core/src/statistics/event/handler.rs b/packages/udp-tracker-core/src/statistics/event/handler.rs index 2680c442f..18a331581 100644 --- a/packages/udp-tracker-core/src/statistics/event/handler.rs +++ b/packages/udp-tracker-core/src/statistics/event/handler.rs @@ -11,7 +11,7 @@ use crate::statistics::UDP_TRACKER_CORE_REQUESTS_RECEIVED_TOTAL; /// This function panics if the IP version does not match the event type. pub async fn handle_event(event: Event, stats_repository: &Repository, now: DurationSinceUnixEpoch) { match event { - Event::UdpConnect { context } => { + Event::UdpConnect { connection: context } => { // Global fixed metrics match context.client_socket_addr.ip() { @@ -36,7 +36,7 @@ pub async fn handle_event(event: Event, stats_repository: &Repository, now: Dura Err(err) => tracing::error!("Failed to increase the counter: {}", err), }; } - Event::UdpAnnounce { context } => { + Event::UdpAnnounce { connection: context } => { // Global fixed metrics match context.client_socket_addr.ip() { @@ -61,7 +61,7 @@ pub async fn handle_event(event: Event, stats_repository: &Repository, now: Dura Err(err) => tracing::error!("Failed to increase the counter: {}", err), }; } - Event::UdpScrape { context } => { + Event::UdpScrape { connection: context } => { // Global fixed metrics match context.client_socket_addr.ip() { @@ -109,7 +109,7 @@ mod tests { handle_event( Event::UdpConnect { - context: ConnectionContext::new( + connection: ConnectionContext::new( SocketAddr::new(IpAddr::V4(Ipv4Addr::new(203, 0, 113, 195)), 8080), ServiceBinding::new( Protocol::UDP, @@ -134,7 +134,7 @@ mod tests { handle_event( Event::UdpAnnounce { - context: ConnectionContext::new( + connection: ConnectionContext::new( SocketAddr::new(IpAddr::V4(Ipv4Addr::new(203, 0, 113, 195)), 8080), ServiceBinding::new( Protocol::UDP, @@ -159,7 +159,7 @@ mod tests { handle_event( Event::UdpScrape { - context: ConnectionContext::new( + connection: ConnectionContext::new( SocketAddr::new(IpAddr::V4(Ipv4Addr::new(203, 0, 113, 195)), 8080), ServiceBinding::new( Protocol::UDP, @@ -184,7 +184,7 @@ mod tests { handle_event( Event::UdpConnect { - context: ConnectionContext::new( + connection: ConnectionContext::new( SocketAddr::new(IpAddr::V6(Ipv6Addr::new(0, 0, 0, 0, 203, 0, 113, 195)), 8080), ServiceBinding::new( Protocol::UDP, @@ -209,7 +209,7 @@ mod tests { handle_event( Event::UdpAnnounce { - context: ConnectionContext::new( + connection: ConnectionContext::new( SocketAddr::new(IpAddr::V6(Ipv6Addr::new(0, 0, 0, 0, 203, 0, 113, 195)), 8080), ServiceBinding::new( Protocol::UDP, @@ -234,7 +234,7 @@ mod tests { handle_event( Event::UdpScrape { - context: ConnectionContext::new( + connection: ConnectionContext::new( SocketAddr::new(IpAddr::V6(Ipv6Addr::new(0, 0, 0, 0, 203, 0, 113, 195)), 8080), ServiceBinding::new( Protocol::UDP, diff --git a/packages/udp-tracker-server/src/handlers/announce.rs b/packages/udp-tracker-server/src/handlers/announce.rs index 1cf0f0b7d..0020a5f3a 100644 --- a/packages/udp-tracker-server/src/handlers/announce.rs +++ b/packages/udp-tracker-server/src/handlers/announce.rs @@ -868,7 +868,7 @@ mod tests { udp_core_stats_event_sender_mock .expect_send_event() .with(eq(core_event::Event::UdpAnnounce { - context: core_event::ConnectionContext::new(client_socket_addr, server_service_binding.clone()), + connection: core_event::ConnectionContext::new(client_socket_addr, server_service_binding.clone()), })) .times(1) .returning(|_| Box::pin(future::ready(Some(Ok(1))))); diff --git a/packages/udp-tracker-server/src/handlers/connect.rs b/packages/udp-tracker-server/src/handlers/connect.rs index 88f0b7f3a..aef8833b9 100644 --- a/packages/udp-tracker-server/src/handlers/connect.rs +++ b/packages/udp-tracker-server/src/handlers/connect.rs @@ -197,7 +197,7 @@ mod tests { udp_core_stats_event_sender_mock .expect_send_event() .with(eq(core_event::Event::UdpConnect { - context: core_event::ConnectionContext::new(client_socket_addr, server_service_binding.clone()), + connection: core_event::ConnectionContext::new(client_socket_addr, server_service_binding.clone()), })) .times(1) .returning(|_| Box::pin(future::ready(Some(Ok(1))))); @@ -239,7 +239,7 @@ mod tests { udp_core_stats_event_sender_mock .expect_send_event() .with(eq(core_event::Event::UdpConnect { - context: core_event::ConnectionContext::new(client_socket_addr, server_service_binding.clone()), + connection: core_event::ConnectionContext::new(client_socket_addr, server_service_binding.clone()), })) .times(1) .returning(|_| Box::pin(future::ready(Some(Ok(1))))); From 1477975a10ec233b888d1bd4997738bea95f00ec Mon Sep 17 00:00:00 2001 From: Jose Celano Date: Tue, 22 Apr 2025 19:30:07 +0100 Subject: [PATCH 479/802] feat: [#1384] enrich bittorrent_udp_tracker_core::event::Event::UdpAnnounce Added: - info-hash - peer announcement info Following the same chage as in the HTTP tracker core. --- packages/udp-tracker-core/src/event/mod.rs | 16 +++++-- packages/udp-tracker-core/src/lib.rs | 15 +++++++ .../udp-tracker-core/src/services/announce.rs | 28 +++++++++--- .../udp-tracker-core/src/services/scrape.rs | 12 ++--- .../src/statistics/event/handler.rs | 8 +++- .../src/handlers/announce.rs | 28 +++++++++--- packages/udp-tracker-server/src/lib.rs | 44 +++++++++++++++++++ 7 files changed, 130 insertions(+), 21 deletions(-) diff --git a/packages/udp-tracker-core/src/event/mod.rs b/packages/udp-tracker-core/src/event/mod.rs index 6785fd34d..1ec502572 100644 --- a/packages/udp-tracker-core/src/event/mod.rs +++ b/packages/udp-tracker-core/src/event/mod.rs @@ -1,7 +1,9 @@ use std::net::SocketAddr; +use bittorrent_primitives::info_hash::InfoHash; use torrust_tracker_metrics::label::{LabelSet, LabelValue}; use torrust_tracker_metrics::label_name; +use torrust_tracker_primitives::peer::PeerAnnouncement; use torrust_tracker_primitives::service_binding::ServiceBinding; pub mod sender; @@ -9,9 +11,17 @@ pub mod sender; /// A UDP core event. #[derive(Debug, PartialEq, Eq, Clone)] pub enum Event { - UdpConnect { connection: ConnectionContext }, - UdpAnnounce { connection: ConnectionContext }, - UdpScrape { connection: ConnectionContext }, + UdpConnect { + connection: ConnectionContext, + }, + UdpAnnounce { + connection: ConnectionContext, + info_hash: InfoHash, + announcement: PeerAnnouncement, + }, + UdpScrape { + connection: ConnectionContext, + }, } #[derive(Debug, PartialEq, Eq, Clone)] diff --git a/packages/udp-tracker-core/src/lib.rs b/packages/udp-tracker-core/src/lib.rs index 8e937e79c..2c1943853 100644 --- a/packages/udp-tracker-core/src/lib.rs +++ b/packages/udp-tracker-core/src/lib.rs @@ -42,3 +42,18 @@ pub fn initialize_static() { // Initialize the Zeroed Cipher lazy_static::initialize(&ephemeral_instance_keys::ZEROED_TEST_CIPHER_BLOWFISH); } + +#[cfg(test)] +pub(crate) mod tests { + use bittorrent_primitives::info_hash::InfoHash; + + /// # Panics + /// + /// Will panic if the string representation of the info hash is not a valid info hash. + #[must_use] + pub fn sample_info_hash() -> InfoHash { + "3b245504cf5f11bbdbe1201cea6a6bf45aee1bc0" // DevSkim: ignore DS173237 + .parse::() + .expect("String should be a valid info hash") + } +} diff --git a/packages/udp-tracker-core/src/services/announce.rs b/packages/udp-tracker-core/src/services/announce.rs index 2f2c3e093..def24ffd7 100644 --- a/packages/udp-tracker-core/src/services/announce.rs +++ b/packages/udp-tracker-core/src/services/announce.rs @@ -18,6 +18,7 @@ use bittorrent_tracker_core::error::{AnnounceError, WhitelistError}; use bittorrent_tracker_core::whitelist; use bittorrent_udp_tracker_protocol::peer_builder; use torrust_tracker_primitives::core::AnnounceData; +use torrust_tracker_primitives::peer::PeerAnnouncement; use torrust_tracker_primitives::service_binding::ServiceBinding; use crate::connection_cookie::{check, gen_remote_fingerprint, ConnectionCookieError}; @@ -80,7 +81,8 @@ impl AnnounceService { .announce(&info_hash, &mut peer, &remote_client_ip, &peers_wanted) .await?; - self.send_event(client_socket_addr, server_service_binding).await; + self.send_event(info_hash, peer, client_socket_addr, server_service_binding) + .await; Ok(announce_data) } @@ -101,13 +103,25 @@ impl AnnounceService { self.whitelist_authorization.authorize(info_hash).await } - async fn send_event(&self, client_socket_addr: SocketAddr, server_service_binding: ServiceBinding) { + async fn send_event( + &self, + info_hash: InfoHash, + announcement: PeerAnnouncement, + client_socket_addr: SocketAddr, + server_service_binding: ServiceBinding, + ) { if let Some(udp_stats_event_sender) = self.opt_udp_core_stats_event_sender.as_deref() { - udp_stats_event_sender - .send_event(Event::UdpAnnounce { - connection: ConnectionContext::new(client_socket_addr, server_service_binding), - }) - .await; + let event = Event::UdpAnnounce { + connection: ConnectionContext::new(client_socket_addr, server_service_binding), + info_hash, + announcement, + }; + + tracing::debug!(target = crate::UDP_TRACKER_LOG_TARGET, "Sending UdpAnnounce event: {event:?}"); + + println!("Sending UdpAnnounce event: {event:?}"); + + udp_stats_event_sender.send_event(event).await; } } } diff --git a/packages/udp-tracker-core/src/services/scrape.rs b/packages/udp-tracker-core/src/services/scrape.rs index c20e9b16c..5b2cf7d46 100644 --- a/packages/udp-tracker-core/src/services/scrape.rs +++ b/packages/udp-tracker-core/src/services/scrape.rs @@ -85,11 +85,13 @@ impl ScrapeService { async fn send_event(&self, client_socket_addr: SocketAddr, server_service_binding: ServiceBinding) { if let Some(udp_stats_event_sender) = self.opt_udp_stats_event_sender.as_deref() { - udp_stats_event_sender - .send_event(Event::UdpScrape { - connection: ConnectionContext::new(client_socket_addr, server_service_binding), - }) - .await; + let event = Event::UdpScrape { + connection: ConnectionContext::new(client_socket_addr, server_service_binding), + }; + + tracing::debug!(target = crate::UDP_TRACKER_LOG_TARGET, "Sending UdpScrape event: {event:?}"); + + udp_stats_event_sender.send_event(event).await; } } } diff --git a/packages/udp-tracker-core/src/statistics/event/handler.rs b/packages/udp-tracker-core/src/statistics/event/handler.rs index 18a331581..039b6b0d5 100644 --- a/packages/udp-tracker-core/src/statistics/event/handler.rs +++ b/packages/udp-tracker-core/src/statistics/event/handler.rs @@ -36,7 +36,7 @@ pub async fn handle_event(event: Event, stats_repository: &Repository, now: Dura Err(err) => tracing::error!("Failed to increase the counter: {}", err), }; } - Event::UdpAnnounce { connection: context } => { + Event::UdpAnnounce { connection: context, .. } => { // Global fixed metrics match context.client_socket_addr.ip() { @@ -96,11 +96,13 @@ mod tests { use std::net::{IpAddr, Ipv4Addr, Ipv6Addr, SocketAddr}; use torrust_tracker_clock::clock::Time; + use torrust_tracker_primitives::peer::PeerAnnouncement; use torrust_tracker_primitives::service_binding::{Protocol, ServiceBinding}; use crate::event::{ConnectionContext, Event}; use crate::statistics::event::handler::handle_event; use crate::statistics::repository::Repository; + use crate::tests::sample_info_hash; use crate::CurrentClock; #[tokio::test] @@ -142,6 +144,8 @@ mod tests { ) .unwrap(), ), + info_hash: sample_info_hash(), + announcement: PeerAnnouncement::default(), }, &stats_repository, CurrentClock::now(), @@ -217,6 +221,8 @@ mod tests { ) .unwrap(), ), + info_hash: sample_info_hash(), + announcement: PeerAnnouncement::default(), }, &stats_repository, CurrentClock::now(), diff --git a/packages/udp-tracker-server/src/handlers/announce.rs b/packages/udp-tracker-server/src/handlers/announce.rs index 0020a5f3a..7e6d42834 100644 --- a/packages/udp-tracker-server/src/handlers/announce.rs +++ b/packages/udp-tracker-server/src/handlers/announce.rs @@ -824,7 +824,7 @@ mod tests { use bittorrent_udp_tracker_core::connection_cookie::{gen_remote_fingerprint, make}; use bittorrent_udp_tracker_core::services::announce::AnnounceService; use bittorrent_udp_tracker_core::{self, event as core_event}; - use mockall::predicate::eq; + use mockall::predicate::{self, eq}; use torrust_tracker_primitives::service_binding::{Protocol, ServiceBinding}; use crate::event::{self, ConnectionContext, Event, UdpRequestKind}; @@ -834,6 +834,7 @@ mod tests { sample_cookie_valid_range, sample_issue_time, MockUdpCoreStatsEventSender, MockUdpServerStatsEventSender, TrackerConfigurationBuilder, }; + use crate::tests::{announce_events_match, sample_peer}; #[tokio::test] async fn the_peer_ip_should_be_changed_to_the_external_ip_in_the_tracker_configuration() { @@ -848,6 +849,11 @@ mod tests { let info_hash = AquaticInfoHash([0u8; 20]); let peer_id = AquaticPeerId([255u8; 20]); + let mut announcement = sample_peer(); + announcement.peer_id = peer_id; + announcement.peer_addr = SocketAddr::new(IpAddr::V6(Ipv6Addr::new(0, 0, 0, 0, 0, 0, 0x7e00, 1)), client_port); + + println!("announcement.peer_addr: {}", announcement.peer_addr); let client_socket_addr = SocketAddr::new(IpAddr::V6(client_ip_v6), client_port); let mut server_socket_addr = config.udp_trackers.clone().unwrap()[0].bind_address; @@ -856,6 +862,7 @@ mod tests { server_socket_addr.set_port(6969); } let server_service_binding = ServiceBinding::new(Protocol::UDP, server_socket_addr).unwrap(); + let server_service_binding_clone = server_service_binding.clone(); let database = initialize_database(&config.core); let in_memory_whitelist = Arc::new(InMemoryWhitelist::default()); @@ -867,8 +874,17 @@ mod tests { let mut udp_core_stats_event_sender_mock = MockUdpCoreStatsEventSender::new(); udp_core_stats_event_sender_mock .expect_send_event() - .with(eq(core_event::Event::UdpAnnounce { - connection: core_event::ConnectionContext::new(client_socket_addr, server_service_binding.clone()), + .with(predicate::function(move |event| { + let expected_event = core_event::Event::UdpAnnounce { + connection: core_event::ConnectionContext::new( + client_socket_addr, + server_service_binding.clone(), + ), + info_hash: info_hash.into(), + announcement, + }; + + announce_events_match(event, &expected_event) })) .times(1) .returning(|_| Box::pin(future::ready(Some(Ok(1))))); @@ -879,7 +895,7 @@ mod tests { udp_server_stats_event_sender_mock .expect_send_event() .with(eq(Event::UdpRequestAccepted { - context: ConnectionContext::new(client_socket_addr, server_service_binding.clone()), + context: ConnectionContext::new(client_socket_addr, server_service_binding_clone.clone()), kind: UdpRequestKind::Announce, })) .times(1) @@ -913,7 +929,7 @@ mod tests { handle_announce( &announce_service, client_socket_addr, - server_service_binding, + server_service_binding_clone, &request, &core_config, &udp_server_stats_event_sender, @@ -928,6 +944,8 @@ mod tests { assert!(external_ip_in_tracker_configuration.is_ipv6()); + println!("Peer addr: {}", peers[0].peer_addr.ip()); + // There's a special type of IPv6 addresses that provide compatibility with IPv4. // The last 32 bits of these addresses represent an IPv4, and are represented like this: // 1111:2222:3333:4444:5555:6666:1.2.3.4 diff --git a/packages/udp-tracker-server/src/lib.rs b/packages/udp-tracker-server/src/lib.rs index ff53adcfb..741c81b07 100644 --- a/packages/udp-tracker-server/src/lib.rs +++ b/packages/udp-tracker-server/src/lib.rs @@ -673,3 +673,47 @@ pub struct RawRequest { payload: Vec, from: SocketAddr, } + +#[cfg(test)] +pub(crate) mod tests { + use std::net::{IpAddr, Ipv4Addr, SocketAddr}; + + use aquatic_udp_protocol::{AnnounceEvent, NumberOfBytes, PeerId}; + use bittorrent_udp_tracker_core::event::Event; + use torrust_tracker_primitives::{peer, DurationSinceUnixEpoch}; + + pub fn sample_peer() -> peer::Peer { + peer::Peer { + peer_id: PeerId(*b"-qB00000000000000000"), + peer_addr: SocketAddr::new(IpAddr::V4(Ipv4Addr::new(126, 0, 0, 1)), 8080), + updated: DurationSinceUnixEpoch::new(1_669_397_478_934, 0), + uploaded: NumberOfBytes::new(0), + downloaded: NumberOfBytes::new(0), + left: NumberOfBytes::new(0), + event: AnnounceEvent::Started, + } + } + + #[must_use] + pub fn announce_events_match(event: &Event, expected_event: &Event) -> bool { + match (event, expected_event) { + ( + Event::UdpAnnounce { + connection, + info_hash, + announcement, + }, + Event::UdpAnnounce { + connection: expected_connection, + info_hash: expected_info_hash, + announcement: expected_announcement, + }, + ) => { + *connection == *expected_connection + && *info_hash == *expected_info_hash + && announcement.peer_addr == expected_announcement.peer_addr + } + _ => false, + } + } +} From f5bdec51ed46b8c14a6a0e140555d7034e73199f Mon Sep 17 00:00:00 2001 From: Jose Celano Date: Tue, 22 Apr 2025 19:33:24 +0100 Subject: [PATCH 480/802] refactor: [#1376] rename function --- packages/http-tracker-core/src/event/mod.rs | 2 +- packages/http-tracker-core/src/services/announce.rs | 8 ++++---- 2 files changed, 5 insertions(+), 5 deletions(-) diff --git a/packages/http-tracker-core/src/event/mod.rs b/packages/http-tracker-core/src/event/mod.rs index 07d27127a..2e856c694 100644 --- a/packages/http-tracker-core/src/event/mod.rs +++ b/packages/http-tracker-core/src/event/mod.rs @@ -108,7 +108,7 @@ pub mod test { use crate::tests::sample_info_hash; #[must_use] - pub fn events_match(event: &Event, expected_event: &Event) -> bool { + pub fn announce_events_match(event: &Event, expected_event: &Event) -> bool { match (event, expected_event) { ( Event::TcpAnnounce { diff --git a/packages/http-tracker-core/src/services/announce.rs b/packages/http-tracker-core/src/services/announce.rs index a0c31585e..eafe63ea1 100644 --- a/packages/http-tracker-core/src/services/announce.rs +++ b/packages/http-tracker-core/src/services/announce.rs @@ -312,7 +312,7 @@ mod tests { use torrust_tracker_test_helpers::configuration; use crate::event; - use crate::event::test::events_match; + use crate::event::test::announce_events_match; use crate::event::{ConnectionContext, Event}; use crate::services::announce::tests::{ initialize_core_tracker_services, initialize_core_tracker_services_with_config, sample_announce_request_for_peer, @@ -388,7 +388,7 @@ mod tests { announcement, }; - events_match(event, &expected_event) + announce_events_match(event, &expected_event) })) .times(1) .returning(|_| Box::pin(future::ready(Some(Ok(1))))); @@ -465,7 +465,7 @@ mod tests { announcement: peer_announcement, }; - events_match(event, &expected_event) + announce_events_match(event, &expected_event) })) .times(1) .returning(|_| Box::pin(future::ready(Some(Ok(1))))); @@ -514,7 +514,7 @@ mod tests { info_hash: sample_info_hash(), announcement: peer, }; - events_match(event, &expected_event) + announce_events_match(event, &expected_event) })) .times(1) .returning(|_| Box::pin(future::ready(Some(Ok(1))))); From 925fc938f58e5e74a08b03ead9ff776b5dfefe28 Mon Sep 17 00:00:00 2001 From: Jose Celano Date: Tue, 22 Apr 2025 19:47:41 +0100 Subject: [PATCH 481/802] test: [#1384,#1376] add comment to mock time In order to be able to compare full events. --- packages/http-tracker-core/src/event/mod.rs | 10 ++++++++++ packages/udp-tracker-server/src/lib.rs | 10 ++++++++++ 2 files changed, 20 insertions(+) diff --git a/packages/http-tracker-core/src/event/mod.rs b/packages/http-tracker-core/src/event/mod.rs index 2e856c694..921c4e32c 100644 --- a/packages/http-tracker-core/src/event/mod.rs +++ b/packages/http-tracker-core/src/event/mod.rs @@ -124,7 +124,17 @@ pub mod test { ) => { *connection == *expected_connection && *info_hash == *expected_info_hash + && announcement.peer_id == expected_announcement.peer_id && announcement.peer_addr == expected_announcement.peer_addr + // Events can't be compared due to the `updated` field. + // The `announcement.uploaded` contains the current time + // when the test is executed. + // todo: mock time + //&& announcement.updated == expected_announcement.updated + && announcement.uploaded == expected_announcement.uploaded + && announcement.downloaded == expected_announcement.downloaded + && announcement.left == expected_announcement.left + && announcement.event == expected_announcement.event } _ => false, } diff --git a/packages/udp-tracker-server/src/lib.rs b/packages/udp-tracker-server/src/lib.rs index 741c81b07..996c41917 100644 --- a/packages/udp-tracker-server/src/lib.rs +++ b/packages/udp-tracker-server/src/lib.rs @@ -711,7 +711,17 @@ pub(crate) mod tests { ) => { *connection == *expected_connection && *info_hash == *expected_info_hash + && announcement.peer_id == expected_announcement.peer_id && announcement.peer_addr == expected_announcement.peer_addr + // Events can't be compared due to the `updated` field. + // The `announcement.uploaded` contains the current time + // when the test is executed. + // todo: mock time + //&& announcement.updated == expected_announcement.updated + && announcement.uploaded == expected_announcement.uploaded + && announcement.downloaded == expected_announcement.downloaded + && announcement.left == expected_announcement.left + && announcement.event == expected_announcement.event } _ => false, } From e9ec15a45d19daeb1fca4d84b982f24f0de51417 Mon Sep 17 00:00:00 2001 From: Jose Celano Date: Tue, 22 Apr 2025 19:48:42 +0100 Subject: [PATCH 482/802] chore: remove print statment from tests --- packages/udp-tracker-server/src/handlers/announce.rs | 4 ---- 1 file changed, 4 deletions(-) diff --git a/packages/udp-tracker-server/src/handlers/announce.rs b/packages/udp-tracker-server/src/handlers/announce.rs index 7e6d42834..0167553f2 100644 --- a/packages/udp-tracker-server/src/handlers/announce.rs +++ b/packages/udp-tracker-server/src/handlers/announce.rs @@ -853,8 +853,6 @@ mod tests { announcement.peer_id = peer_id; announcement.peer_addr = SocketAddr::new(IpAddr::V6(Ipv6Addr::new(0, 0, 0, 0, 0, 0, 0x7e00, 1)), client_port); - println!("announcement.peer_addr: {}", announcement.peer_addr); - let client_socket_addr = SocketAddr::new(IpAddr::V6(client_ip_v6), client_port); let mut server_socket_addr = config.udp_trackers.clone().unwrap()[0].bind_address; if server_socket_addr.port() == 0 { @@ -944,8 +942,6 @@ mod tests { assert!(external_ip_in_tracker_configuration.is_ipv6()); - println!("Peer addr: {}", peers[0].peer_addr.ip()); - // There's a special type of IPv6 addresses that provide compatibility with IPv4. // The last 32 bits of these addresses represent an IPv4, and are represented like this: // 1111:2222:3333:4444:5555:6666:1.2.3.4 From e0162d1bcd0cfaae93d14394b3cfba3f9bed89f6 Mon Sep 17 00:00:00 2001 From: Jose Celano Date: Wed, 23 Apr 2025 13:16:13 +0100 Subject: [PATCH 483/802] refactor: the remote client IP resolution code - [x] Extract `ReverseProxyMode` flag. - [x] Extract `RemoteClientAddr` type. - [x] Add IP wrapper `ResolvedIp` to track IP source. - [x] Move all code to the `http-protocol` package. - [x] Other improvements. --- .../src/v1/services/peer_ip_resolver.rs | 240 ++++++++++-------- packages/http-tracker-core/src/event/mod.rs | 16 +- .../src/services/announce.rs | 24 +- .../http-tracker-core/src/services/mod.rs | 42 --- .../http-tracker-core/src/services/scrape.rs | 31 ++- .../src/statistics/event/handler.rs | 15 +- 6 files changed, 195 insertions(+), 173 deletions(-) diff --git a/packages/http-protocol/src/v1/services/peer_ip_resolver.rs b/packages/http-protocol/src/v1/services/peer_ip_resolver.rs index b375694b9..ceaa7e11c 100644 --- a/packages/http-protocol/src/v1/services/peer_ip_resolver.rs +++ b/packages/http-protocol/src/v1/services/peer_ip_resolver.rs @@ -1,4 +1,4 @@ -//! This service resolves the peer IP from the request. +//! This service resolves the remote client address. //! //! The peer IP is used to identify the peer in the tracker. It's the peer IP //! that is used in the `announce` responses (peer list). And it's also used to @@ -12,20 +12,65 @@ //! X-Forwarded-For: 126.0.0.1 X-Forwarded-For: 126.0.0.1,126.0.0.2 //! ``` //! -//! This service returns two options for the peer IP: +//! This `ClientIpSources` contains two options for the peer IP: //! //! ```text //! right_most_x_forwarded_for = 126.0.0.2 //! connection_info_ip = 126.0.0.3 //! ``` //! -//! Depending on the tracker configuration. +//! Which one to use depends on the `ReverseProxyMode`. use std::net::{IpAddr, SocketAddr}; use std::panic::Location; use serde::{Deserialize, Serialize}; use thiserror::Error; +/// Resolves the client's real address considering proxy headers. Port is also +/// included when available. +/// +/// # Errors +/// +/// This function returns an error if the IP address cannot be resolved. +pub fn resolve_remote_client_addr( + reverse_proxy_mode: &ReverseProxyMode, + client_ip_sources: &ClientIpSources, +) -> Result { + let ip = match reverse_proxy_mode { + ReverseProxyMode::Enabled => ResolvedIp::FromXForwardedFor(client_ip_sources.try_client_ip_from_proxy_header()?), + ReverseProxyMode::Disabled => ResolvedIp::FromSocketAddr(client_ip_sources.try_client_ip_from_connection_info()?), + }; + + let port = client_ip_sources.client_port_from_connection_info(); + + Ok(RemoteClientAddr::new(ip, port)) +} + +/// This struct indicates whether the tracker is running on reverse proxy mode. +#[derive(Serialize, Deserialize, Debug, PartialEq, Eq, Clone, Copy)] +pub enum ReverseProxyMode { + Enabled, + Disabled, +} + +impl From for bool { + fn from(reverse_proxy_mode: ReverseProxyMode) -> Self { + match reverse_proxy_mode { + ReverseProxyMode::Enabled => true, + ReverseProxyMode::Disabled => false, + } + } +} + +impl From for ReverseProxyMode { + fn from(reverse_proxy_mode: bool) -> Self { + if reverse_proxy_mode { + ReverseProxyMode::Enabled + } else { + ReverseProxyMode::Disabled + } + } +} /// This struct contains the sources from which the peer IP can be obtained. #[derive(Serialize, Deserialize, Debug, PartialEq, Eq, Clone)] pub struct ClientIpSources { @@ -36,6 +81,36 @@ pub struct ClientIpSources { pub connection_info_socket_address: Option, } +impl ClientIpSources { + fn try_client_ip_from_connection_info(&self) -> Result { + if let Some(socket_addr) = self.connection_info_socket_address { + Ok(socket_addr.ip()) + } else { + Err(PeerIpResolutionError::MissingClientIp { + location: Location::caller(), + }) + } + } + + fn try_client_ip_from_proxy_header(&self) -> Result { + if let Some(ip) = self.right_most_x_forwarded_for { + Ok(ip) + } else { + Err(PeerIpResolutionError::MissingRightMostXForwardedForIp { + location: Location::caller(), + }) + } + } + + fn client_port_from_connection_info(&self) -> Option { + if self.connection_info_socket_address.is_some() { + self.connection_info_socket_address.map(|socket_addr| socket_addr.port()) + } else { + None + } + } +} + /// The error that can occur when resolving the peer IP. #[derive(Error, Debug, Clone)] pub enum PeerIpResolutionError { @@ -54,104 +129,57 @@ pub enum PeerIpResolutionError { MissingClientIp { location: &'static Location<'static> }, } -/// Resolves the peer IP from the request. -/// -/// Given the sources from which the peer IP can be obtained, this function -/// resolves the peer IP according to the tracker configuration. -/// -/// With the tracker running on reverse proxy mode: -/// -/// ```rust -/// use std::net::IpAddr; -/// use std::str::FromStr; -/// -/// use bittorrent_http_tracker_protocol::v1::services::peer_ip_resolver::{invoke, ClientIpSources, PeerIpResolutionError}; -/// -/// let on_reverse_proxy = true; -/// -/// let ip = invoke( -/// on_reverse_proxy, -/// &ClientIpSources { -/// right_most_x_forwarded_for: Some(IpAddr::from_str("203.0.113.195").unwrap()), -/// connection_info_socket_address: None, -/// }, -/// ) -/// .unwrap(); -/// -/// assert_eq!(ip, IpAddr::from_str("203.0.113.195").unwrap()); -/// ``` -/// -/// With the tracker non running on reverse proxy mode: -/// -/// ```rust -/// use std::net::{IpAddr,Ipv4Addr,SocketAddr}; -/// use std::str::FromStr; -/// -/// use bittorrent_http_tracker_protocol::v1::services::peer_ip_resolver::{invoke, ClientIpSources, PeerIpResolutionError}; -/// -/// let on_reverse_proxy = false; -/// -/// let ip = invoke( -/// on_reverse_proxy, -/// &ClientIpSources { -/// right_most_x_forwarded_for: None, -/// connection_info_socket_address: Some(SocketAddr::new(IpAddr::V4(Ipv4Addr::new(203, 0, 113, 195)), 8080)) -/// }, -/// ) -/// .unwrap(); -/// -/// assert_eq!(ip, IpAddr::from_str("203.0.113.195").unwrap()); -/// ``` -/// -/// # Errors -/// -/// Will return an error if the peer IP cannot be obtained according to the configuration. -/// For example, if the IP is extracted from an HTTP header which is missing in the request. -pub fn invoke(on_reverse_proxy: bool, client_ip_sources: &ClientIpSources) -> Result { - if on_reverse_proxy { - resolve_peer_ip_on_reverse_proxy(client_ip_sources) - } else { - resolve_peer_ip_without_reverse_proxy(client_ip_sources) - } +#[derive(Serialize, Deserialize, Debug, PartialEq, Eq, Clone, Copy)] +pub struct RemoteClientAddr { + ip: ResolvedIp, + port: Option, } -fn resolve_peer_ip_without_reverse_proxy(remote_client_ip: &ClientIpSources) -> Result { - if let Some(socket_addr) = remote_client_ip.connection_info_socket_address { - Ok(socket_addr.ip()) - } else { - Err(PeerIpResolutionError::MissingClientIp { - location: Location::caller(), - }) +impl RemoteClientAddr { + #[must_use] + pub fn new(ip: ResolvedIp, port: Option) -> Self { + Self { ip, port } + } + + #[must_use] + pub fn ip(&self) -> IpAddr { + match self.ip { + ResolvedIp::FromSocketAddr(ip) | ResolvedIp::FromXForwardedFor(ip) => ip, + } } -} -fn resolve_peer_ip_on_reverse_proxy(remote_client_ip: &ClientIpSources) -> Result { - if let Some(ip) = remote_client_ip.right_most_x_forwarded_for { - Ok(ip) - } else { - Err(PeerIpResolutionError::MissingRightMostXForwardedForIp { - location: Location::caller(), - }) + #[must_use] + pub fn port(&self) -> Option { + self.port } } +/// This enum indicates the source of the resolved IP address. +#[derive(Serialize, Deserialize, Debug, PartialEq, Eq, Clone, Copy)] +pub enum ResolvedIp { + FromXForwardedFor(IpAddr), + FromSocketAddr(IpAddr), +} + #[cfg(test)] mod tests { - use super::invoke; + use super::resolve_remote_client_addr; mod working_without_reverse_proxy { use std::net::{IpAddr, Ipv4Addr, SocketAddr}; use std::str::FromStr; - use super::invoke; - use crate::v1::services::peer_ip_resolver::{ClientIpSources, PeerIpResolutionError}; + use super::resolve_remote_client_addr; + use crate::v1::services::peer_ip_resolver::{ + ClientIpSources, PeerIpResolutionError, RemoteClientAddr, ResolvedIp, ReverseProxyMode, + }; #[test] - fn it_should_get_the_peer_ip_from_the_connection_info() { - let on_reverse_proxy = false; + fn it_should_get_the_remote_client_address_from_the_connection_info() { + let reverse_proxy_mode = ReverseProxyMode::Disabled; - let ip = invoke( - on_reverse_proxy, + let ip = resolve_remote_client_addr( + &reverse_proxy_mode, &ClientIpSources { right_most_x_forwarded_for: None, connection_info_socket_address: Some(SocketAddr::new(IpAddr::V4(Ipv4Addr::new(203, 0, 113, 195)), 8080)), @@ -159,15 +187,21 @@ mod tests { ) .unwrap(); - assert_eq!(ip, IpAddr::from_str("203.0.113.195").unwrap()); + assert_eq!( + ip, + RemoteClientAddr::new( + ResolvedIp::FromSocketAddr(IpAddr::from_str("203.0.113.195").unwrap()), + Some(8080) + ) + ); } #[test] - fn it_should_return_an_error_if_it_cannot_get_the_peer_ip_from_the_connection_info() { - let on_reverse_proxy = false; + fn it_should_return_an_error_if_it_cannot_get_the_remote_client_ip_from_the_connection_info() { + let reverse_proxy_mode = ReverseProxyMode::Disabled; - let error = invoke( - on_reverse_proxy, + let error = resolve_remote_client_addr( + &reverse_proxy_mode, &ClientIpSources { right_most_x_forwarded_for: None, connection_info_socket_address: None, @@ -179,18 +213,20 @@ mod tests { } } - mod working_on_reverse_proxy { + mod working_on_reverse_proxy_mode { use std::net::IpAddr; use std::str::FromStr; - use crate::v1::services::peer_ip_resolver::{invoke, ClientIpSources, PeerIpResolutionError}; + use crate::v1::services::peer_ip_resolver::{ + resolve_remote_client_addr, ClientIpSources, PeerIpResolutionError, RemoteClientAddr, ResolvedIp, ReverseProxyMode, + }; #[test] - fn it_should_get_the_peer_ip_from_the_right_most_ip_in_the_x_forwarded_for_header() { - let on_reverse_proxy = true; + fn it_should_get_the_remote_client_ip_from_the_right_most_ip_in_the_x_forwarded_for_header() { + let reverse_proxy_mode = ReverseProxyMode::Enabled; - let ip = invoke( - on_reverse_proxy, + let ip = resolve_remote_client_addr( + &reverse_proxy_mode, &ClientIpSources { right_most_x_forwarded_for: Some(IpAddr::from_str("203.0.113.195").unwrap()), connection_info_socket_address: None, @@ -198,15 +234,21 @@ mod tests { ) .unwrap(); - assert_eq!(ip, IpAddr::from_str("203.0.113.195").unwrap()); + assert_eq!( + ip, + RemoteClientAddr::new( + ResolvedIp::FromXForwardedFor(IpAddr::from_str("203.0.113.195").unwrap()), + None + ) + ); } #[test] fn it_should_return_an_error_if_it_cannot_get_the_right_most_ip_from_the_x_forwarded_for_header() { - let on_reverse_proxy = true; + let reverse_proxy_mode = ReverseProxyMode::Enabled; - let error = invoke( - on_reverse_proxy, + let error = resolve_remote_client_addr( + &reverse_proxy_mode, &ClientIpSources { right_most_x_forwarded_for: None, connection_info_socket_address: None, diff --git a/packages/http-tracker-core/src/event/mod.rs b/packages/http-tracker-core/src/event/mod.rs index 921c4e32c..4f0b84e48 100644 --- a/packages/http-tracker-core/src/event/mod.rs +++ b/packages/http-tracker-core/src/event/mod.rs @@ -1,13 +1,12 @@ use std::net::{IpAddr, SocketAddr}; +use bittorrent_http_tracker_protocol::v1::services::peer_ip_resolver::RemoteClientAddr; use bittorrent_primitives::info_hash::InfoHash; use torrust_tracker_metrics::label::{LabelSet, LabelValue}; use torrust_tracker_metrics::label_name; use torrust_tracker_primitives::peer::PeerAnnouncement; use torrust_tracker_primitives::service_binding::ServiceBinding; -use crate::services::RemoteClientAddr; - pub mod sender; /// A HTTP core event. @@ -64,12 +63,12 @@ pub struct ClientConnectionContext { impl ClientConnectionContext { #[must_use] pub fn ip_addr(&self) -> IpAddr { - self.remote_client_addr.ip + self.remote_client_addr.ip() } #[must_use] pub fn port(&self) -> Option { - self.remote_client_addr.port + self.remote_client_addr.port() } } @@ -100,11 +99,11 @@ impl From for LabelSet { #[cfg(test)] pub mod test { + use bittorrent_http_tracker_protocol::v1::services::peer_ip_resolver::{RemoteClientAddr, ResolvedIp}; use torrust_tracker_primitives::peer::Peer; use torrust_tracker_primitives::service_binding::Protocol; use super::Event; - use crate::services::RemoteClientAddr; use crate::tests::sample_info_hash; #[must_use] @@ -153,7 +152,7 @@ pub mod test { let event1 = Event::TcpAnnounce { connection: ConnectionContext::new( - RemoteClientAddr::new(remote_client_ip, Some(8080)), + RemoteClientAddr::new(ResolvedIp::FromSocketAddr(remote_client_ip), Some(8080)), ServiceBinding::new(Protocol::HTTP, SocketAddr::new(IpAddr::V4(Ipv4Addr::new(127, 0, 0, 1)), 7070)).unwrap(), ), info_hash, @@ -162,7 +161,10 @@ pub mod test { let event2 = Event::TcpAnnounce { connection: ConnectionContext::new( - RemoteClientAddr::new(IpAddr::V4(Ipv4Addr::new(127, 0, 0, 2)), Some(8080)), + RemoteClientAddr::new( + ResolvedIp::FromSocketAddr(IpAddr::V4(Ipv4Addr::new(127, 0, 0, 2))), + Some(8080), + ), ServiceBinding::new(Protocol::HTTP, SocketAddr::new(IpAddr::V4(Ipv4Addr::new(127, 0, 0, 1)), 7070)).unwrap(), ), info_hash, diff --git a/packages/http-tracker-core/src/services/announce.rs b/packages/http-tracker-core/src/services/announce.rs index eafe63ea1..fa0c0c38c 100644 --- a/packages/http-tracker-core/src/services/announce.rs +++ b/packages/http-tracker-core/src/services/announce.rs @@ -11,7 +11,9 @@ use std::panic::Location; use std::sync::Arc; use bittorrent_http_tracker_protocol::v1::requests::announce::{peer_from_request, Announce}; -use bittorrent_http_tracker_protocol::v1::services::peer_ip_resolver::{ClientIpSources, PeerIpResolutionError}; +use bittorrent_http_tracker_protocol::v1::services::peer_ip_resolver::{ + resolve_remote_client_addr, ClientIpSources, PeerIpResolutionError, RemoteClientAddr, +}; use bittorrent_primitives::info_hash::InfoHash; use bittorrent_tracker_core::announce_handler::{AnnounceHandler, PeersWanted}; use bittorrent_tracker_core::authentication::service::AuthenticationService; @@ -23,7 +25,6 @@ use torrust_tracker_primitives::core::AnnounceData; use torrust_tracker_primitives::peer::PeerAnnouncement; use torrust_tracker_primitives::service_binding::ServiceBinding; -use super::{resolve_remote_client_addr, RemoteClientAddr}; use crate::event; use crate::event::Event; @@ -78,15 +79,20 @@ impl AnnounceService { self.authorize(announce_request.info_hash).await?; - let remote_client_addr = resolve_remote_client_addr(self.core_config.net.on_reverse_proxy, client_ip_sources)?; + let remote_client_addr = resolve_remote_client_addr(&self.core_config.net.on_reverse_proxy.into(), client_ip_sources)?; - let mut peer = peer_from_request(announce_request, &remote_client_addr.ip); + let mut peer = peer_from_request(announce_request, &remote_client_addr.ip()); let peers_wanted = Self::peers_wanted(announce_request); let announce_data = self .announce_handler - .announce(&announce_request.info_hash, &mut peer, &remote_client_addr.ip, &peers_wanted) + .announce( + &announce_request.info_hash, + &mut peer, + &remote_client_addr.ip(), + &peers_wanted, + ) .await?; self.send_event( @@ -303,6 +309,7 @@ mod tests { use std::net::{IpAddr, Ipv4Addr, Ipv6Addr, SocketAddr}; use std::sync::Arc; + use bittorrent_http_tracker_protocol::v1::services::peer_ip_resolver::{RemoteClientAddr, ResolvedIp}; use mockall::predicate::{self}; use torrust_tracker_configuration::Configuration; use torrust_tracker_primitives::core::AnnounceData; @@ -319,7 +326,6 @@ mod tests { MockHttpStatsEventSender, }; use crate::services::announce::AnnounceService; - use crate::services::RemoteClientAddr; use crate::tests::{sample_info_hash, sample_peer, sample_peer_using_ipv4, sample_peer_using_ipv6}; #[tokio::test] @@ -381,7 +387,7 @@ mod tests { let expected_event = Event::TcpAnnounce { connection: ConnectionContext::new( - RemoteClientAddr::new(remote_client_ip, Some(8080)), + RemoteClientAddr::new(ResolvedIp::FromSocketAddr(remote_client_ip), Some(8080)), server_service_binding.clone(), ), info_hash: sample_info_hash(), @@ -458,7 +464,7 @@ mod tests { let expected_event = Event::TcpAnnounce { connection: ConnectionContext::new( - RemoteClientAddr::new(remote_client_ip, Some(8080)), + RemoteClientAddr::new(ResolvedIp::FromSocketAddr(remote_client_ip), Some(8080)), server_service_binding.clone(), ), info_hash: sample_info_hash(), @@ -508,7 +514,7 @@ mod tests { .with(predicate::function(move |event| { let expected_event = Event::TcpAnnounce { connection: ConnectionContext::new( - RemoteClientAddr::new(remote_client_ip, Some(8080)), + RemoteClientAddr::new(ResolvedIp::FromSocketAddr(remote_client_ip), Some(8080)), server_service_binding.clone(), ), info_hash: sample_info_hash(), diff --git a/packages/http-tracker-core/src/services/mod.rs b/packages/http-tracker-core/src/services/mod.rs index 5ec6dd22d..ce99c6856 100644 --- a/packages/http-tracker-core/src/services/mod.rs +++ b/packages/http-tracker-core/src/services/mod.rs @@ -5,47 +5,5 @@ //! servers. //! //! Refer to [`torrust_tracker`](crate) documentation. - -use std::net::IpAddr; - -use bittorrent_http_tracker_protocol::v1::services::peer_ip_resolver::{self, ClientIpSources, PeerIpResolutionError}; pub mod announce; pub mod scrape; - -#[derive(Debug, PartialEq, Eq, Clone)] -pub struct RemoteClientAddr { - pub ip: IpAddr, - pub port: Option, -} - -impl RemoteClientAddr { - #[must_use] - pub fn new(ip: IpAddr, port: Option) -> Self { - Self { ip, port } - } -} - -/// Resolves the client's real IP address considering proxy headers -/// -/// # Errors -/// -/// This function returns an error if the IP address cannot be resolved. -pub fn resolve_remote_client_addr( - on_reverse_proxy: bool, - client_ip_sources: &ClientIpSources, -) -> Result { - let ip = match peer_ip_resolver::invoke(on_reverse_proxy, client_ip_sources) { - Ok(peer_ip) => Ok(peer_ip), - Err(error) => Err(error), - }?; - - let port = if client_ip_sources.connection_info_socket_address.is_some() { - client_ip_sources - .connection_info_socket_address - .map(|socket_addr| socket_addr.port()) - } else { - None - }; - - Ok(RemoteClientAddr { ip, port }) -} diff --git a/packages/http-tracker-core/src/services/scrape.rs b/packages/http-tracker-core/src/services/scrape.rs index e206b909c..5e8f54cc1 100644 --- a/packages/http-tracker-core/src/services/scrape.rs +++ b/packages/http-tracker-core/src/services/scrape.rs @@ -10,7 +10,9 @@ use std::sync::Arc; use bittorrent_http_tracker_protocol::v1::requests::scrape::Scrape; -use bittorrent_http_tracker_protocol::v1::services::peer_ip_resolver::{ClientIpSources, PeerIpResolutionError}; +use bittorrent_http_tracker_protocol::v1::services::peer_ip_resolver::{ + resolve_remote_client_addr, ClientIpSources, PeerIpResolutionError, RemoteClientAddr, +}; use bittorrent_tracker_core::authentication::service::AuthenticationService; use bittorrent_tracker_core::authentication::{self, Key}; use bittorrent_tracker_core::error::{ScrapeError, TrackerCoreError, WhitelistError}; @@ -19,7 +21,6 @@ use torrust_tracker_configuration::Core; use torrust_tracker_primitives::core::ScrapeData; use torrust_tracker_primitives::service_binding::ServiceBinding; -use super::{resolve_remote_client_addr, RemoteClientAddr}; use crate::event; use crate::event::{ConnectionContext, Event}; @@ -81,7 +82,7 @@ impl ScrapeService { self.scrape_handler.scrape(&scrape_request.info_hashes).await? }; - let remote_client_addr = resolve_remote_client_addr(self.core_config.net.on_reverse_proxy, client_ip_sources)?; + let remote_client_addr = resolve_remote_client_addr(&self.core_config.net.on_reverse_proxy.into(), client_ip_sources)?; self.send_event(remote_client_addr, server_service_binding.clone()).await; @@ -250,7 +251,7 @@ mod tests { use std::sync::Arc; use bittorrent_http_tracker_protocol::v1::requests::scrape::Scrape; - use bittorrent_http_tracker_protocol::v1::services::peer_ip_resolver::ClientIpSources; + use bittorrent_http_tracker_protocol::v1::services::peer_ip_resolver::{ClientIpSources, RemoteClientAddr, ResolvedIp}; use bittorrent_tracker_core::announce_handler::PeersWanted; use mockall::predicate::eq; use torrust_tracker_primitives::core::ScrapeData; @@ -263,7 +264,6 @@ mod tests { initialize_services_with_configuration, sample_info_hashes, sample_peer, MockHttpStatsEventSender, }; use crate::services::scrape::ScrapeService; - use crate::services::RemoteClientAddr; use crate::tests::sample_info_hash; use crate::{event, statistics}; @@ -335,7 +335,10 @@ mod tests { .expect_send_event() .with(eq(Event::TcpScrape { connection: ConnectionContext::new( - RemoteClientAddr::new(IpAddr::V4(Ipv4Addr::new(126, 0, 0, 1)), Some(8080)), + RemoteClientAddr::new( + ResolvedIp::FromSocketAddr(IpAddr::V4(Ipv4Addr::new(126, 0, 0, 1))), + Some(8080), + ), ServiceBinding::new(Protocol::HTTP, SocketAddr::new(IpAddr::V4(Ipv4Addr::new(127, 0, 0, 1)), 7070)) .unwrap(), ), @@ -387,7 +390,9 @@ mod tests { .with(eq(Event::TcpScrape { connection: ConnectionContext::new( RemoteClientAddr::new( - IpAddr::V6(Ipv6Addr::new(0x6969, 0x6969, 0x6969, 0x6969, 0x6969, 0x6969, 0x6969, 0x6969)), + ResolvedIp::FromSocketAddr(IpAddr::V6(Ipv6Addr::new( + 0x6969, 0x6969, 0x6969, 0x6969, 0x6969, 0x6969, 0x6969, 0x6969, + ))), Some(8080), ), server_service_binding, @@ -435,7 +440,7 @@ mod tests { use std::sync::Arc; use bittorrent_http_tracker_protocol::v1::requests::scrape::Scrape; - use bittorrent_http_tracker_protocol::v1::services::peer_ip_resolver::ClientIpSources; + use bittorrent_http_tracker_protocol::v1::services::peer_ip_resolver::{ClientIpSources, RemoteClientAddr, ResolvedIp}; use bittorrent_tracker_core::announce_handler::PeersWanted; use mockall::predicate::eq; use torrust_tracker_primitives::core::ScrapeData; @@ -447,7 +452,6 @@ mod tests { initialize_services_with_configuration, sample_info_hashes, sample_peer, MockHttpStatsEventSender, }; use crate::services::scrape::ScrapeService; - use crate::services::RemoteClientAddr; use crate::tests::sample_info_hash; use crate::{event, statistics}; @@ -513,7 +517,10 @@ mod tests { .expect_send_event() .with(eq(Event::TcpScrape { connection: ConnectionContext::new( - RemoteClientAddr::new(IpAddr::V4(Ipv4Addr::new(126, 0, 0, 1)), Some(8080)), + RemoteClientAddr::new( + ResolvedIp::FromSocketAddr(IpAddr::V4(Ipv4Addr::new(126, 0, 0, 1))), + Some(8080), + ), ServiceBinding::new(Protocol::HTTP, SocketAddr::new(IpAddr::V4(Ipv4Addr::new(127, 0, 0, 1)), 7070)) .unwrap(), ), @@ -565,7 +572,9 @@ mod tests { .with(eq(Event::TcpScrape { connection: ConnectionContext::new( RemoteClientAddr::new( - IpAddr::V6(Ipv6Addr::new(0x6969, 0x6969, 0x6969, 0x6969, 0x6969, 0x6969, 0x6969, 0x6969)), + ResolvedIp::FromSocketAddr(IpAddr::V6(Ipv6Addr::new( + 0x6969, 0x6969, 0x6969, 0x6969, 0x6969, 0x6969, 0x6969, 0x6969, + ))), Some(8080), ), server_service_binding, diff --git a/packages/http-tracker-core/src/statistics/event/handler.rs b/packages/http-tracker-core/src/statistics/event/handler.rs index d59c640c1..7e8338edf 100644 --- a/packages/http-tracker-core/src/statistics/event/handler.rs +++ b/packages/http-tracker-core/src/statistics/event/handler.rs @@ -73,11 +73,11 @@ pub async fn handle_event(event: Event, stats_repository: &Repository, now: Dura mod tests { use std::net::{IpAddr, Ipv4Addr, Ipv6Addr, SocketAddr}; + use bittorrent_http_tracker_protocol::v1::services::peer_ip_resolver::{RemoteClientAddr, ResolvedIp}; use torrust_tracker_clock::clock::Time; use torrust_tracker_primitives::service_binding::{Protocol, ServiceBinding}; use crate::event::{ConnectionContext, Event}; - use crate::services::RemoteClientAddr; use crate::statistics::event::handler::handle_event; use crate::statistics::repository::Repository; use crate::tests::{sample_info_hash, sample_peer_using_ipv4, sample_peer_using_ipv6}; @@ -92,7 +92,7 @@ mod tests { handle_event( Event::TcpAnnounce { connection: ConnectionContext::new( - RemoteClientAddr::new(remote_client_ip, Some(8080)), + RemoteClientAddr::new(ResolvedIp::FromSocketAddr(remote_client_ip), Some(8080)), ServiceBinding::new(Protocol::HTTP, SocketAddr::new(IpAddr::V4(Ipv4Addr::new(127, 0, 0, 1)), 7070)).unwrap(), ), info_hash: sample_info_hash(), @@ -115,7 +115,10 @@ mod tests { handle_event( Event::TcpScrape { connection: ConnectionContext::new( - RemoteClientAddr::new(IpAddr::V4(Ipv4Addr::new(127, 0, 0, 2)), Some(8080)), + RemoteClientAddr::new( + ResolvedIp::FromSocketAddr(IpAddr::V4(Ipv4Addr::new(127, 0, 0, 2))), + Some(8080), + ), ServiceBinding::new(Protocol::HTTP, SocketAddr::new(IpAddr::V4(Ipv4Addr::new(127, 0, 0, 1)), 7070)).unwrap(), ), }, @@ -138,7 +141,7 @@ mod tests { handle_event( Event::TcpAnnounce { connection: ConnectionContext::new( - RemoteClientAddr::new(remote_client_ip, Some(8080)), + RemoteClientAddr::new(ResolvedIp::FromSocketAddr(remote_client_ip), Some(8080)), ServiceBinding::new(Protocol::HTTP, SocketAddr::new(IpAddr::V4(Ipv4Addr::new(127, 0, 0, 1)), 7070)).unwrap(), ), info_hash: sample_info_hash(), @@ -162,7 +165,9 @@ mod tests { Event::TcpScrape { connection: ConnectionContext::new( RemoteClientAddr::new( - IpAddr::V6(Ipv6Addr::new(0x6969, 0x6969, 0x6969, 0x6969, 0x6969, 0x6969, 0x6969, 0x6969)), + ResolvedIp::FromSocketAddr(IpAddr::V6(Ipv6Addr::new( + 0x6969, 0x6969, 0x6969, 0x6969, 0x6969, 0x6969, 0x6969, 0x6969, + ))), Some(8080), ), ServiceBinding::new(Protocol::HTTP, SocketAddr::new(IpAddr::V4(Ipv4Addr::new(127, 0, 0, 1)), 7070)).unwrap(), From 00e43ca396cef6c24eef96ee7395e95baa6346e5 Mon Sep 17 00:00:00 2001 From: Jose Celano Date: Wed, 23 Apr 2025 17:23:09 +0100 Subject: [PATCH 484/802] refactor: move Registar to AppContainer --- src/app.rs | 26 +++++++++++++------------ src/console/profiling.rs | 2 +- src/container.rs | 11 +++++++++++ src/main.rs | 2 +- tests/servers/api/contract/stats/mod.rs | 2 +- 5 files changed, 28 insertions(+), 15 deletions(-) diff --git a/src/app.rs b/src/app.rs index 5eb162e18..365aae392 100644 --- a/src/app.rs +++ b/src/app.rs @@ -24,7 +24,6 @@ use std::sync::Arc; use tokio::task::JoinHandle; -use torrust_server_lib::registar::Registar; use torrust_tracker_configuration::Configuration; use tracing::instrument; @@ -32,14 +31,14 @@ use crate::bootstrap; use crate::bootstrap::jobs::{health_check_api, http_tracker, torrent_cleanup, tracker_apis, udp_tracker}; use crate::container::AppContainer; -pub async fn run() -> (Arc, Vec>, Registar) { +pub async fn run() -> (Arc, Vec>) { let (config, app_container) = bootstrap::app::setup(); let app_container = Arc::new(app_container); - let (jobs, registar) = start(&config, &app_container).await; + let jobs = start(&config, &app_container).await; - (app_container, jobs, registar) + (app_container, jobs) } /// # Panics @@ -49,7 +48,7 @@ pub async fn run() -> (Arc, Vec>, Registar) { /// - Can't retrieve tracker keys from database. /// - Can't load whitelist from database. #[instrument(skip(config, app_container))] -pub async fn start(config: &Configuration, app_container: &Arc) -> (Vec>, Registar) { +pub async fn start(config: &Configuration, app_container: &Arc) -> Vec> { if config.http_api.is_none() && (config.udp_trackers.is_none() || config.udp_trackers.as_ref().map_or(true, std::vec::Vec::is_empty)) && (config.http_trackers.is_none() || config.http_trackers.as_ref().map_or(true, std::vec::Vec::is_empty)) @@ -59,8 +58,6 @@ pub async fn start(config: &Configuration, app_container: &Arc) -> let mut jobs: Vec> = Vec::new(); - let registar = Registar::default(); - // Load peer keys if config.core.private { app_container @@ -96,7 +93,12 @@ pub async fn start(config: &Configuration, app_container: &Arc) -> let udp_tracker_server_container = app_container.udp_tracker_server_container(); jobs.push( - udp_tracker::start_job(udp_tracker_container, udp_tracker_server_container, registar.give_form()).await, + udp_tracker::start_job( + udp_tracker_container, + udp_tracker_server_container, + app_container.registar.give_form(), + ) + .await, ); } } @@ -113,7 +115,7 @@ pub async fn start(config: &Configuration, app_container: &Arc) -> if let Some(job) = http_tracker::start_job( http_tracker_container, - registar.give_form(), + app_container.registar.give_form(), torrust_axum_http_tracker_server::Version::V1, ) .await @@ -132,7 +134,7 @@ pub async fn start(config: &Configuration, app_container: &Arc) -> if let Some(job) = tracker_apis::start_job( http_api_container, - registar.give_form(), + app_container.registar.give_form(), torrust_axum_rest_tracker_api_server::Version::V1, ) .await @@ -152,7 +154,7 @@ pub async fn start(config: &Configuration, app_container: &Arc) -> } // Start Health Check API - jobs.push(health_check_api::start_job(&config.health_check_api, registar.entries()).await); + jobs.push(health_check_api::start_job(&config.health_check_api, app_container.registar.entries()).await); - (jobs, registar) + jobs } diff --git a/src/console/profiling.rs b/src/console/profiling.rs index 426712c34..3ed9c6389 100644 --- a/src/console/profiling.rs +++ b/src/console/profiling.rs @@ -179,7 +179,7 @@ pub async fn run() { return; }; - let (_app_container, jobs, _registar) = app::run().await; + let (_app_container, jobs) = app::run().await; // Run the tracker for a fixed duration let run_duration = sleep(Duration::from_secs(duration_secs)); diff --git a/src/container.rs b/src/container.rs index 9df9c9611..537be2605 100644 --- a/src/container.rs +++ b/src/container.rs @@ -7,6 +7,7 @@ use bittorrent_tracker_core::container::TrackerCoreContainer; use bittorrent_udp_tracker_core::container::{UdpTrackerCoreContainer, UdpTrackerCoreServices}; use bittorrent_udp_tracker_core::{self}; use torrust_rest_tracker_api_core::container::TrackerHttpApiCoreContainer; +use torrust_server_lib::registar::Registar; use torrust_tracker_configuration::{Configuration, HttpApi}; use torrust_udp_tracker_server::container::UdpTrackerServerContainer; use tracing::instrument; @@ -24,6 +25,9 @@ pub struct AppContainer { // Configuration pub http_api_config: Arc>, + // Registar + pub registar: Arc, + // Core pub tracker_core_container: Arc, @@ -46,6 +50,10 @@ impl AppContainer { let http_api_config = Arc::new(configuration.http_api.clone()); + // Registar + + let registar = Arc::new(Registar::default()); + // Core let tracker_core_container = Arc::new(TrackerCoreContainer::initialize(&core_config)); @@ -73,6 +81,9 @@ impl AppContainer { // Configuration http_api_config, + // Registar + registar, + // Core tracker_core_container, diff --git a/src/main.rs b/src/main.rs index cc7c202c4..de73d0a15 100644 --- a/src/main.rs +++ b/src/main.rs @@ -2,7 +2,7 @@ use torrust_tracker_lib::app; #[tokio::main] async fn main() { - let (_app_container, jobs, _registar) = app::run().await; + let (_app_container, jobs) = app::run().await; // handle the signals tokio::select! { diff --git a/tests/servers/api/contract/stats/mod.rs b/tests/servers/api/contract/stats/mod.rs index 016a372dd..d50bc58a5 100644 --- a/tests/servers/api/contract/stats/mod.rs +++ b/tests/servers/api/contract/stats/mod.rs @@ -51,7 +51,7 @@ async fn the_stats_api_endpoint_should_return_the_global_stats() { env::set_var("TORRUST_TRACKER_CONFIG_TOML", config_with_two_http_trackers); - let (_app_container, _jobs, _registar) = app::run().await; + let (_app_container, _jobs) = app::run().await; announce_to_tracker("http://127.0.0.1:7272").await; announce_to_tracker("http://127.0.0.1:7373").await; From 56c3bd1b3c50b74fb03a5c73e6a10c541a96016a Mon Sep 17 00:00:00 2001 From: Jose Celano Date: Wed, 23 Apr 2025 17:35:46 +0100 Subject: [PATCH 485/802] refactor: update logs messages --- src/console/profiling.rs | 3 ++- src/main.rs | 5 +++-- 2 files changed, 5 insertions(+), 3 deletions(-) diff --git a/src/console/profiling.rs b/src/console/profiling.rs index 3ed9c6389..873dbb574 100644 --- a/src/console/profiling.rs +++ b/src/console/profiling.rs @@ -189,7 +189,8 @@ pub async fn run() { tracing::info!("Torrust timed shutdown.."); }, _ = tokio::signal::ctrl_c() => { - tracing::info!("Torrust shutting down via Ctrl+C ..."); + tracing::info!("Torrust tracker shutting down via Ctrl+C ..."); + // Await for all jobs to shutdown futures::future::join_all(jobs).await; } diff --git a/src/main.rs b/src/main.rs index de73d0a15..8ba4311f7 100644 --- a/src/main.rs +++ b/src/main.rs @@ -7,11 +7,12 @@ async fn main() { // handle the signals tokio::select! { _ = tokio::signal::ctrl_c() => { - tracing::info!("Torrust shutting down ..."); + tracing::info!("Torrust tracker shutting down ..."); // Await for all jobs to shutdown futures::future::join_all(jobs).await; - tracing::info!("Torrust successfully shutdown."); + + tracing::info!("Torrust tracker successfully shutdown."); } } } From d80bfc0772ffee2bc40883540bee33bcce58d83b Mon Sep 17 00:00:00 2001 From: Jose Celano Date: Wed, 23 Apr 2025 18:08:51 +0100 Subject: [PATCH 486/802] refactor: extract functions in app start --- src/app.rs | 120 ++++++++++++++++++++++++++++++++++++----------------- 1 file changed, 81 insertions(+), 39 deletions(-) diff --git a/src/app.rs b/src/app.rs index 365aae392..d394fe644 100644 --- a/src/app.rs +++ b/src/app.rs @@ -24,11 +24,11 @@ use std::sync::Arc; use tokio::task::JoinHandle; -use torrust_tracker_configuration::Configuration; +use torrust_tracker_configuration::{Configuration, HttpTracker, UdpTracker}; use tracing::instrument; -use crate::bootstrap; use crate::bootstrap::jobs::{health_check_api, http_tracker, torrent_cleanup, tracker_apis, udp_tracker}; +use crate::bootstrap::{self}; use crate::container::AppContainer; pub async fn run() -> (Arc, Vec>) { @@ -41,6 +41,8 @@ pub async fn run() -> (Arc, Vec>) { (app_container, jobs) } +/// Starts the tracker application. +/// /// # Panics /// /// Will panic if: @@ -49,16 +51,40 @@ pub async fn run() -> (Arc, Vec>) { /// - Can't load whitelist from database. #[instrument(skip(config, app_container))] pub async fn start(config: &Configuration, app_container: &Arc) -> Vec> { + warn_if_no_services_enabled(config); + + load_data_from_database(config, app_container).await; + + start_jobs(config, app_container).await +} + +async fn load_data_from_database(config: &Configuration, app_container: &Arc) { + load_peer_keys(config, app_container).await; + load_whitelisted_torrents(config, app_container).await; +} + +async fn start_jobs(config: &Configuration, app_container: &Arc) -> Vec> { + let mut jobs: Vec> = Vec::new(); + + start_the_udp_instances(config, app_container, &mut jobs).await; + start_the_http_instances(config, app_container, &mut jobs).await; + start_the_http_api(config, app_container, &mut jobs).await; + start_torrent_cleanup(config, app_container, &mut jobs); + start_health_check_api(config, app_container, &mut jobs).await; + + jobs +} + +fn warn_if_no_services_enabled(config: &Configuration) { if config.http_api.is_none() && (config.udp_trackers.is_none() || config.udp_trackers.as_ref().map_or(true, std::vec::Vec::is_empty)) && (config.http_trackers.is_none() || config.http_trackers.as_ref().map_or(true, std::vec::Vec::is_empty)) { tracing::warn!("No services enabled in configuration"); } +} - let mut jobs: Vec> = Vec::new(); - - // Load peer keys +async fn load_peer_keys(config: &Configuration, app_container: &Arc) { if config.core.private { app_container .tracker_core_container @@ -67,8 +93,9 @@ pub async fn start(config: &Configuration, app_container: &Arc) -> .await .expect("Could not retrieve keys from database."); } +} - // Load whitelisted torrents +async fn load_whitelisted_torrents(config: &Configuration, app_container: &Arc) { if config.core.listed { app_container .tracker_core_container @@ -77,8 +104,9 @@ pub async fn start(config: &Configuration, app_container: &Arc) -> .await .expect("Could not load whitelist from database."); } +} - // Start the UDP blocks +async fn start_the_udp_instances(config: &Configuration, app_container: &Arc, jobs: &mut Vec>) { if let Some(udp_trackers) = &config.udp_trackers { for udp_tracker_config in udp_trackers { if config.core.private { @@ -87,47 +115,61 @@ pub async fn start(config: &Configuration, app_container: &Arc) -> udp_tracker_config.bind_address ); } else { - let udp_tracker_container = app_container - .udp_tracker_container(udp_tracker_config.bind_address) - .expect("Could not create UDP tracker container"); - let udp_tracker_server_container = app_container.udp_tracker_server_container(); - - jobs.push( - udp_tracker::start_job( - udp_tracker_container, - udp_tracker_server_container, - app_container.registar.give_form(), - ) - .await, - ); + start_udp_instance(udp_tracker_config, app_container, jobs).await; } } } else { tracing::info!("No UDP blocks in configuration"); } +} + +async fn start_udp_instance(udp_tracker_config: &UdpTracker, app_container: &Arc, jobs: &mut Vec>) { + let udp_tracker_container = app_container + .udp_tracker_container(udp_tracker_config.bind_address) + .expect("Could not create UDP tracker container"); + let udp_tracker_server_container = app_container.udp_tracker_server_container(); - // Start the HTTP blocks + jobs.push( + udp_tracker::start_job( + udp_tracker_container, + udp_tracker_server_container, + app_container.registar.give_form(), + ) + .await, + ); +} + +async fn start_the_http_instances(config: &Configuration, app_container: &Arc, jobs: &mut Vec>) { if let Some(http_trackers) = &config.http_trackers { for http_tracker_config in http_trackers { - let http_tracker_container = app_container - .http_tracker_container(http_tracker_config.bind_address) - .expect("Could not create HTTP tracker container"); - - if let Some(job) = http_tracker::start_job( - http_tracker_container, - app_container.registar.give_form(), - torrust_axum_http_tracker_server::Version::V1, - ) - .await - { - jobs.push(job); - } + start_http_instance(http_tracker_config, app_container, jobs).await; } } else { tracing::info!("No HTTP blocks in configuration"); } +} + +async fn start_http_instance( + http_tracker_config: &HttpTracker, + app_container: &Arc, + jobs: &mut Vec>, +) { + let http_tracker_container = app_container + .http_tracker_container(http_tracker_config.bind_address) + .expect("Could not create HTTP tracker container"); + + if let Some(job) = http_tracker::start_job( + http_tracker_container, + app_container.registar.give_form(), + torrust_axum_http_tracker_server::Version::V1, + ) + .await + { + jobs.push(job); + } +} - // Start HTTP API +async fn start_the_http_api(config: &Configuration, app_container: &Arc, jobs: &mut Vec>) { if let Some(http_api_config) = &config.http_api { let http_api_config = Arc::new(http_api_config.clone()); let http_api_container = app_container.tracker_http_api_container(&http_api_config); @@ -144,17 +186,17 @@ pub async fn start(config: &Configuration, app_container: &Arc) -> } else { tracing::info!("No API block in configuration"); } +} - // Start runners to remove torrents without peers, every interval +fn start_torrent_cleanup(config: &Configuration, app_container: &Arc, jobs: &mut Vec>) { if config.core.inactive_peer_cleanup_interval > 0 { jobs.push(torrent_cleanup::start_job( &config.core, &app_container.tracker_core_container.torrents_manager, )); } +} - // Start Health Check API +async fn start_health_check_api(config: &Configuration, app_container: &Arc, jobs: &mut Vec>) { jobs.push(health_check_api::start_job(&config.health_check_api, app_container.registar.entries()).await); - - jobs } From 17fb90943bdb95ab7ed1da72589f34e9a0a9d356 Mon Sep 17 00:00:00 2001 From: Jose Celano Date: Wed, 23 Apr 2025 19:56:49 +0100 Subject: [PATCH 487/802] refactor: [#1444] http core event listener start in app start. Step 1 This is the first step in a bigger refactor to move the start of event listeners from app container instantiation to app start (jobs creation). --- .../axum-http-tracker-server/src/server.rs | 1 - packages/http-tracker-core/src/container.rs | 1 - .../http-tracker-core/src/event/sender.rs | 1 + .../src/statistics/event/handler.rs | 12 ++--- .../src/statistics/event/listener.rs | 4 +- .../src/statistics/keeper.rs | 45 +++++++++++++++---- .../src/statistics/services.rs | 3 +- .../http-tracker-core/src/statistics/setup.rs | 31 ++++++++----- .../src/statistics/services.rs | 1 - 9 files changed, 67 insertions(+), 32 deletions(-) diff --git a/packages/axum-http-tracker-server/src/server.rs b/packages/axum-http-tracker-server/src/server.rs index eea00c142..896922751 100644 --- a/packages/axum-http-tracker-server/src/server.rs +++ b/packages/axum-http-tracker-server/src/server.rs @@ -274,7 +274,6 @@ mod tests { let (http_stats_event_sender, http_stats_repository) = bittorrent_http_tracker_core::statistics::setup::factory(configuration.core.tracker_usage_statistics); let http_stats_event_sender = Arc::new(http_stats_event_sender); - let http_stats_repository = Arc::new(http_stats_repository); let tracker_core_container = Arc::new(TrackerCoreContainer::initialize(&core_config)); diff --git a/packages/http-tracker-core/src/container.rs b/packages/http-tracker-core/src/container.rs index 7fc2f48a6..913236483 100644 --- a/packages/http-tracker-core/src/container.rs +++ b/packages/http-tracker-core/src/container.rs @@ -65,7 +65,6 @@ impl HttpTrackerCoreServices { let (http_stats_event_sender, http_stats_repository) = statistics::setup::factory(tracker_core_container.core_config.tracker_usage_statistics); let http_stats_event_sender = Arc::new(http_stats_event_sender); - let http_stats_repository = Arc::new(http_stats_repository); let http_announce_service = Arc::new(AnnounceService::new( tracker_core_container.core_config.clone(), tracker_core_container.announce_handler.clone(), diff --git a/packages/http-tracker-core/src/event/sender.rs b/packages/http-tracker-core/src/event/sender.rs index 511a381d0..b720926bb 100644 --- a/packages/http-tracker-core/src/event/sender.rs +++ b/packages/http-tracker-core/src/event/sender.rs @@ -16,6 +16,7 @@ pub trait Sender: Sync + Send { } /// An event sender implementation using a broadcast channel. +#[derive(Clone)] pub struct Broadcaster { pub(crate) sender: broadcast::Sender, } diff --git a/packages/http-tracker-core/src/statistics/event/handler.rs b/packages/http-tracker-core/src/statistics/event/handler.rs index 7e8338edf..8d2ad1aa2 100644 --- a/packages/http-tracker-core/src/statistics/event/handler.rs +++ b/packages/http-tracker-core/src/statistics/event/handler.rs @@ -1,4 +1,5 @@ use std::net::IpAddr; +use std::sync::Arc; use torrust_tracker_metrics::label::{LabelSet, LabelValue}; use torrust_tracker_metrics::{label_name, metric_name}; @@ -12,7 +13,7 @@ use crate::statistics::HTTP_TRACKER_CORE_REQUESTS_RECEIVED_TOTAL; /// /// This function panics if the client IP address is not the same as the IP /// version of the event. -pub async fn handle_event(event: Event, stats_repository: &Repository, now: DurationSinceUnixEpoch) { +pub async fn handle_event(event: Event, stats_repository: &Arc, now: DurationSinceUnixEpoch) { match event { Event::TcpAnnounce { connection, .. } => { // Global fixed metrics @@ -72,6 +73,7 @@ pub async fn handle_event(event: Event, stats_repository: &Repository, now: Dura #[cfg(test)] mod tests { use std::net::{IpAddr, Ipv4Addr, Ipv6Addr, SocketAddr}; + use std::sync::Arc; use bittorrent_http_tracker_protocol::v1::services::peer_ip_resolver::{RemoteClientAddr, ResolvedIp}; use torrust_tracker_clock::clock::Time; @@ -85,7 +87,7 @@ mod tests { #[tokio::test] async fn should_increase_the_tcp4_announces_counter_when_it_receives_a_tcp4_announce_event() { - let stats_repository = Repository::new(); + let stats_repository = Arc::new(Repository::new()); let peer = sample_peer_using_ipv4(); let remote_client_ip = IpAddr::V4(Ipv4Addr::new(127, 0, 0, 2)); @@ -110,7 +112,7 @@ mod tests { #[tokio::test] async fn should_increase_the_tcp4_scrapes_counter_when_it_receives_a_tcp4_scrape_event() { - let stats_repository = Repository::new(); + let stats_repository = Arc::new(Repository::new()); handle_event( Event::TcpScrape { @@ -134,7 +136,7 @@ mod tests { #[tokio::test] async fn should_increase_the_tcp6_announces_counter_when_it_receives_a_tcp6_announce_event() { - let stats_repository = Repository::new(); + let stats_repository = Arc::new(Repository::new()); let peer = sample_peer_using_ipv6(); let remote_client_ip = IpAddr::V6(Ipv6Addr::new(0x6969, 0x6969, 0x6969, 0x6969, 0x6969, 0x6969, 0x6969, 0x6969)); @@ -159,7 +161,7 @@ mod tests { #[tokio::test] async fn should_increase_the_tcp6_scrapes_counter_when_it_receives_a_tcp6_scrape_event() { - let stats_repository = Repository::new(); + let stats_repository = Arc::new(Repository::new()); handle_event( Event::TcpScrape { diff --git a/packages/http-tracker-core/src/statistics/event/listener.rs b/packages/http-tracker-core/src/statistics/event/listener.rs index 5e87b47df..00fce6b77 100644 --- a/packages/http-tracker-core/src/statistics/event/listener.rs +++ b/packages/http-tracker-core/src/statistics/event/listener.rs @@ -1,3 +1,5 @@ +use std::sync::Arc; + use tokio::sync::broadcast; use torrust_tracker_clock::clock::Time; @@ -6,7 +8,7 @@ use crate::event::Event; use crate::statistics::repository::Repository; use crate::{CurrentClock, HTTP_TRACKER_LOG_TARGET}; -pub async fn dispatch_events(mut receiver: broadcast::Receiver, stats_repository: Repository) { +pub async fn dispatch_events(mut receiver: broadcast::Receiver, stats_repository: Arc) { loop { match receiver.recv().await { Ok(event) => handle_event(event, &stats_repository, CurrentClock::now()).await, diff --git a/packages/http-tracker-core/src/statistics/keeper.rs b/packages/http-tracker-core/src/statistics/keeper.rs index 1b69f032d..fad9382d7 100644 --- a/packages/http-tracker-core/src/statistics/keeper.rs +++ b/packages/http-tracker-core/src/statistics/keeper.rs @@ -1,8 +1,10 @@ -use tokio::sync::broadcast::Receiver; +use std::sync::Arc; + +use tokio::task::JoinHandle; use super::event::listener::dispatch_events; use super::repository::Repository; -use crate::event::Event; +use crate::event::sender::{self, Broadcaster}; use crate::HTTP_TRACKER_LOG_TARGET; /// The service responsible for keeping tracker metrics (listening to statistics events and handle them). @@ -10,25 +12,50 @@ use crate::HTTP_TRACKER_LOG_TARGET; /// It actively listen to new statistics events. When it receives a new event /// it accordingly increases the counters. pub struct Keeper { - pub repository: Repository, + pub enable_sender: bool, + pub broadcaster: Broadcaster, + pub repository: Arc, } impl Default for Keeper { fn default() -> Self { - Self::new() + let enable_sender = true; + let broadcaster = Broadcaster::default(); + let repository = Arc::new(Repository::new()); + + Self::new(enable_sender, broadcaster, repository) } } impl Keeper { + /// Creates a new instance of [`Keeper`]. #[must_use] - pub fn new() -> Self { + pub fn new(enable_sender: bool, broadcaster: Broadcaster, repository: Arc) -> Self { Self { - repository: Repository::new(), + enable_sender, + broadcaster, + repository, + } + } + + #[must_use] + pub fn sender(&self) -> Option> { + if self.enable_sender { + Some(Box::new(self.broadcaster.clone())) + } else { + None } } - pub fn run_event_listener(&mut self, receiver: Receiver) { + #[must_use] + pub fn repository(&self) -> Arc { + self.repository.clone() + } + + #[must_use] + pub fn run_event_listener(&self) -> JoinHandle<()> { let stats_repository = self.repository.clone(); + let receiver = self.broadcaster.subscribe(); tracing::info!(target: HTTP_TRACKER_LOG_TARGET, "Starting HTTP tracker core event listener"); @@ -36,7 +63,7 @@ impl Keeper { dispatch_events(receiver, stats_repository).await; tracing::info!(target: HTTP_TRACKER_LOG_TARGET, "HTTP tracker core event listener finished"); - }); + }) } } @@ -48,7 +75,7 @@ mod tests { #[tokio::test] async fn should_contain_the_tracker_statistics() { - let stats_tracker = Keeper::new(); + let stats_tracker = Keeper::default(); let stats = stats_tracker.repository.get_stats().await; diff --git a/packages/http-tracker-core/src/statistics/services.rs b/packages/http-tracker-core/src/statistics/services.rs index 418b0d082..2895d1b6d 100644 --- a/packages/http-tracker-core/src/statistics/services.rs +++ b/packages/http-tracker-core/src/statistics/services.rs @@ -89,9 +89,8 @@ mod tests { let in_memory_torrent_repository = Arc::new(InMemoryTorrentRepository::default()); let (_http_stats_event_sender, http_stats_repository) = statistics::setup::factory(config.core.tracker_usage_statistics); - let http_stats_repository = Arc::new(http_stats_repository); - let tracker_metrics = get_metrics(in_memory_torrent_repository.clone(), http_stats_repository.clone()).await; + let tracker_metrics = get_metrics(in_memory_torrent_repository.clone(), http_stats_repository).await; assert_eq!( tracker_metrics, diff --git a/packages/http-tracker-core/src/statistics/setup.rs b/packages/http-tracker-core/src/statistics/setup.rs index e2974e4c0..e2b252c23 100644 --- a/packages/http-tracker-core/src/statistics/setup.rs +++ b/packages/http-tracker-core/src/statistics/setup.rs @@ -1,8 +1,12 @@ //! Setup for the tracker statistics. //! //! The [`factory`] function builds the structs needed for handling the tracker metrics. +use std::sync::Arc; + +use super::keeper::Keeper; +use super::repository::Repository; +use crate::event; use crate::event::sender::Broadcaster; -use crate::{event, statistics}; /// It builds the structs needed for handling the tracker metrics. /// @@ -17,20 +21,23 @@ use crate::{event, statistics}; /// not run the event listeners, consequently the statistics events are sent are /// received but not dispatched to the handler. #[must_use] -pub fn factory(tracker_usage_statistics: bool) -> (Option>, statistics::repository::Repository) { - let mut keeper = statistics::keeper::Keeper::new(); - - let opt_event_sender: Option> = if tracker_usage_statistics { - let broadcaster = Broadcaster::default(); +pub fn factory(tracker_usage_statistics: bool) -> (Option>, Arc) { + let keeper = keeper_factory(tracker_usage_statistics); - keeper.run_event_listener(broadcaster.subscribe()); + if tracker_usage_statistics { + // todo: this should be started like the other jobs during `app::start` + // and keep the join handle in a list of jobs. + let _unused = keeper.run_event_listener(); + } - Some(Box::new(broadcaster)) - } else { - None - }; + (keeper.sender(), keeper.repository()) +} - (opt_event_sender, keeper.repository) +#[must_use] +pub fn keeper_factory(tracker_usage_statistics: bool) -> Arc { + let broadcaster = Broadcaster::default(); + let repository = Arc::new(Repository::new()); + Arc::new(Keeper::new(tracker_usage_statistics, broadcaster.clone(), repository.clone())) } #[cfg(test)] diff --git a/packages/rest-tracker-api-core/src/statistics/services.rs b/packages/rest-tracker-api-core/src/statistics/services.rs index 9277df92b..744c8fd7c 100644 --- a/packages/rest-tracker-api-core/src/statistics/services.rs +++ b/packages/rest-tracker-api-core/src/statistics/services.rs @@ -149,7 +149,6 @@ mod tests { // HTTP core stats let (_http_stats_event_sender, http_stats_repository) = bittorrent_http_tracker_core::statistics::setup::factory(config.core.tracker_usage_statistics); - let http_stats_repository = Arc::new(http_stats_repository); // UDP core stats let (_udp_stats_event_sender, _udp_stats_repository) = From 07d13146f5cd13055a37323e12fbf9f970f41379 Mon Sep 17 00:00:00 2001 From: Jose Celano Date: Thu, 24 Apr 2025 15:54:47 +0100 Subject: [PATCH 488/802] refactor: [#1444] http core event listener start in app start. Step 2 --- .../axum-http-tracker-server/src/server.rs | 14 +++++--- .../src/v1/handlers/announce.rs | 15 ++++++--- .../src/v1/handlers/scrape.rs | 14 +++++--- .../http-tracker-core/benches/helpers/util.rs | 25 ++++++++------ packages/http-tracker-core/src/container.rs | 15 +++++++-- .../src/services/announce.rs | 14 +++++--- .../http-tracker-core/src/services/scrape.rs | 12 ++++--- .../src/statistics/keeper.rs | 6 ++-- .../src/statistics/services.rs | 11 ++++++- .../http-tracker-core/src/statistics/setup.rs | 33 ++++++++++--------- .../src/statistics/services.rs | 11 +++++-- 11 files changed, 115 insertions(+), 55 deletions(-) diff --git a/packages/axum-http-tracker-server/src/server.rs b/packages/axum-http-tracker-server/src/server.rs index 896922751..40620674f 100644 --- a/packages/axum-http-tracker-server/src/server.rs +++ b/packages/axum-http-tracker-server/src/server.rs @@ -270,10 +270,16 @@ mod tests { let http_tracker_config = Arc::new(http_tracker_config.clone()); - // HTTP stats - let (http_stats_event_sender, http_stats_repository) = - bittorrent_http_tracker_core::statistics::setup::factory(configuration.core.tracker_usage_statistics); - let http_stats_event_sender = Arc::new(http_stats_event_sender); + // HTTP core stats + let keeper = bittorrent_http_tracker_core::statistics::setup::factory(configuration.core.tracker_usage_statistics); + let http_stats_event_sender = keeper.sender(); + let http_stats_repository = keeper.repository(); + + if configuration.core.tracker_usage_statistics { + // todo: this should be started like the other jobs during `app::start` + // and keep the join handle in a list of jobs. + let _unused = keeper.run_event_listener(); + } let tracker_core_container = Arc::new(TrackerCoreContainer::initialize(&core_config)); diff --git a/packages/axum-http-tracker-server/src/v1/handlers/announce.rs b/packages/axum-http-tracker-server/src/v1/handlers/announce.rs index 296cefcd5..3729f5bdc 100644 --- a/packages/axum-http-tracker-server/src/v1/handlers/announce.rs +++ b/packages/axum-http-tracker-server/src/v1/handlers/announce.rs @@ -160,11 +160,16 @@ mod tests { &db_torrent_repository, )); - // HTTP stats - let (http_stats_event_sender, http_stats_repository) = - bittorrent_http_tracker_core::statistics::setup::factory(config.core.tracker_usage_statistics); - let http_stats_event_sender = Arc::new(http_stats_event_sender); - let _http_stats_repository = Arc::new(http_stats_repository); + // HTTP core stats + let keeper = bittorrent_http_tracker_core::statistics::setup::factory(config.core.tracker_usage_statistics); + let http_stats_event_sender = keeper.sender(); + let _http_stats_repository = keeper.repository(); + + if config.core.tracker_usage_statistics { + // todo: this should be started like the other jobs during `app::start` + // and keep the join handle in a list of jobs. + let _unused = keeper.run_event_listener(); + } let announce_service = Arc::new(AnnounceService::new( core_config.clone(), diff --git a/packages/axum-http-tracker-server/src/v1/handlers/scrape.rs b/packages/axum-http-tracker-server/src/v1/handlers/scrape.rs index e5d94a072..9e5fafd46 100644 --- a/packages/axum-http-tracker-server/src/v1/handlers/scrape.rs +++ b/packages/axum-http-tracker-server/src/v1/handlers/scrape.rs @@ -131,10 +131,16 @@ mod tests { let in_memory_torrent_repository = Arc::new(InMemoryTorrentRepository::default()); let scrape_handler = Arc::new(ScrapeHandler::new(&whitelist_authorization, &in_memory_torrent_repository)); - // HTTP stats - let (http_stats_event_sender, _http_stats_repository) = - bittorrent_http_tracker_core::statistics::setup::factory(config.core.tracker_usage_statistics); - let http_stats_event_sender = Arc::new(http_stats_event_sender); + // HTTP core stats + let keeper = bittorrent_http_tracker_core::statistics::setup::factory(config.core.tracker_usage_statistics); + let http_stats_event_sender = keeper.sender(); + let _http_stats_repository = keeper.repository(); + + if config.core.tracker_usage_statistics { + // todo: this should be started like the other jobs during `app::start` + // and keep the join handle in a list of jobs. + let _unused = keeper.run_event_listener(); + } ( CoreTrackerServices { diff --git a/packages/http-tracker-core/benches/helpers/util.rs b/packages/http-tracker-core/benches/helpers/util.rs index dff516063..957f70444 100644 --- a/packages/http-tracker-core/benches/helpers/util.rs +++ b/packages/http-tracker-core/benches/helpers/util.rs @@ -2,6 +2,8 @@ use std::net::{IpAddr, Ipv4Addr, SocketAddr}; use std::sync::Arc; use aquatic_udp_protocol::{AnnounceEvent, NumberOfBytes, PeerId}; +use bittorrent_http_tracker_core::event::Event; +use bittorrent_http_tracker_core::{event, statistics}; use bittorrent_http_tracker_protocol::v1::requests::announce::Announce; use bittorrent_http_tracker_protocol::v1::services::peer_ip_resolver::ClientIpSources; use bittorrent_primitives::info_hash::InfoHash; @@ -13,6 +15,9 @@ use bittorrent_tracker_core::torrent::repository::in_memory::InMemoryTorrentRepo use bittorrent_tracker_core::torrent::repository::persisted::DatabasePersistentTorrentRepository; use bittorrent_tracker_core::whitelist::authorization::WhitelistAuthorization; use bittorrent_tracker_core::whitelist::repository::in_memory::InMemoryWhitelist; +use futures::future::BoxFuture; +use mockall::mock; +use tokio::sync::broadcast::error::SendError; use torrust_tracker_configuration::{Configuration, Core}; use torrust_tracker_primitives::peer::Peer; use torrust_tracker_primitives::{peer, DurationSinceUnixEpoch}; @@ -50,10 +55,16 @@ pub fn initialize_core_tracker_services_with_config(config: &Configuration) -> ( &db_torrent_repository, )); - // HTTP stats - let (http_stats_event_sender, http_stats_repository) = statistics::setup::factory(config.core.tracker_usage_statistics); - let http_stats_event_sender = Arc::new(http_stats_event_sender); - let _http_stats_repository = Arc::new(http_stats_repository); + // HTTP core stats + let keeper = statistics::setup::factory(config.core.tracker_usage_statistics); + let http_stats_event_sender = keeper.sender(); + let _http_stats_repository = keeper.repository(); + + if config.core.tracker_usage_statistics { + // todo: this should be started like the other jobs during `app::start` + // and keep the join handle in a list of jobs. + let _unused = keeper.run_event_listener(); + } ( CoreTrackerServices { @@ -105,12 +116,6 @@ pub fn sample_info_hash() -> InfoHash { .expect("String should be a valid info hash") } -use bittorrent_http_tracker_core::event::Event; -use bittorrent_http_tracker_core::{event, statistics}; -use futures::future::BoxFuture; -use mockall::mock; -use tokio::sync::broadcast::error::SendError; - mock! { HttpStatsEventSender {} impl event::sender::Sender for HttpStatsEventSender { diff --git a/packages/http-tracker-core/src/container.rs b/packages/http-tracker-core/src/container.rs index 913236483..302a4fbbe 100644 --- a/packages/http-tracker-core/src/container.rs +++ b/packages/http-tracker-core/src/container.rs @@ -62,9 +62,17 @@ pub struct HttpTrackerCoreServices { impl HttpTrackerCoreServices { #[must_use] pub fn initialize_from(tracker_core_container: &Arc) -> Arc { - let (http_stats_event_sender, http_stats_repository) = - statistics::setup::factory(tracker_core_container.core_config.tracker_usage_statistics); - let http_stats_event_sender = Arc::new(http_stats_event_sender); + // HTTP core stats + let keeper = statistics::setup::factory(tracker_core_container.core_config.tracker_usage_statistics); + let http_stats_event_sender = keeper.sender(); + let http_stats_repository = keeper.repository(); + + if tracker_core_container.core_config.tracker_usage_statistics { + // todo: this should be started like the other jobs during `app::start` + // and keep the join handle in a list of jobs. + let _unused = keeper.run_event_listener(); + } + let http_announce_service = Arc::new(AnnounceService::new( tracker_core_container.core_config.clone(), tracker_core_container.announce_handler.clone(), @@ -72,6 +80,7 @@ impl HttpTrackerCoreServices { tracker_core_container.whitelist_authorization.clone(), http_stats_event_sender.clone(), )); + let http_scrape_service = Arc::new(ScrapeService::new( tracker_core_container.core_config.clone(), tracker_core_container.scrape_handler.clone(), diff --git a/packages/http-tracker-core/src/services/announce.rs b/packages/http-tracker-core/src/services/announce.rs index fa0c0c38c..a3014873e 100644 --- a/packages/http-tracker-core/src/services/announce.rs +++ b/packages/http-tracker-core/src/services/announce.rs @@ -252,10 +252,16 @@ mod tests { &db_torrent_repository, )); - // HTTP stats - let (http_stats_event_sender, http_stats_repository) = statistics::setup::factory(config.core.tracker_usage_statistics); - let http_stats_event_sender = Arc::new(http_stats_event_sender); - let _http_stats_repository = Arc::new(http_stats_repository); + // HTTP core stats + let keeper = statistics::setup::factory(config.core.tracker_usage_statistics); + let http_stats_event_sender = keeper.sender(); + let _http_stats_repository = keeper.repository(); + + if config.core.tracker_usage_statistics { + // todo: this should be started like the other jobs during `app::start` + // and keep the join handle in a list of jobs. + let _unused = keeper.run_event_listener(); + } ( CoreTrackerServices { diff --git a/packages/http-tracker-core/src/services/scrape.rs b/packages/http-tracker-core/src/services/scrape.rs index 5e8f54cc1..21308a6aa 100644 --- a/packages/http-tracker-core/src/services/scrape.rs +++ b/packages/http-tracker-core/src/services/scrape.rs @@ -272,8 +272,10 @@ mod tests { let configuration = configuration::ephemeral_public(); let core_config = Arc::new(configuration.core.clone()); - let (http_stats_event_sender, _http_stats_repository) = statistics::setup::factory(false); - let http_stats_event_sender = Arc::new(http_stats_event_sender); + // HTTP core stats + let keeper = statistics::setup::factory(false); + let http_stats_event_sender = keeper.sender(); + let _http_stats_repository = keeper.repository(); let container = initialize_services_with_configuration(&configuration); @@ -462,8 +464,10 @@ mod tests { let container = initialize_services_with_configuration(&config); - let (http_stats_event_sender, _http_stats_repository) = statistics::setup::factory(false); - let http_stats_event_sender = Arc::new(http_stats_event_sender); + // HTTP core stats + let keeper = statistics::setup::factory(false); + let http_stats_event_sender = keeper.sender(); + let _http_stats_repository = keeper.repository(); let info_hash = sample_info_hash(); let info_hashes = vec![info_hash]; diff --git a/packages/http-tracker-core/src/statistics/keeper.rs b/packages/http-tracker-core/src/statistics/keeper.rs index fad9382d7..4c0f7c916 100644 --- a/packages/http-tracker-core/src/statistics/keeper.rs +++ b/packages/http-tracker-core/src/statistics/keeper.rs @@ -39,11 +39,11 @@ impl Keeper { } #[must_use] - pub fn sender(&self) -> Option> { + pub fn sender(&self) -> Arc>> { if self.enable_sender { - Some(Box::new(self.broadcaster.clone())) + Arc::new(Some(Box::new(self.broadcaster.clone()))) } else { - None + Arc::new(None) } } diff --git a/packages/http-tracker-core/src/statistics/services.rs b/packages/http-tracker-core/src/statistics/services.rs index 2895d1b6d..172e7b9ab 100644 --- a/packages/http-tracker-core/src/statistics/services.rs +++ b/packages/http-tracker-core/src/statistics/services.rs @@ -88,7 +88,16 @@ mod tests { let in_memory_torrent_repository = Arc::new(InMemoryTorrentRepository::default()); - let (_http_stats_event_sender, http_stats_repository) = statistics::setup::factory(config.core.tracker_usage_statistics); + // HTTP core stats + let keeper = statistics::setup::factory(config.core.tracker_usage_statistics); + let _http_stats_event_sender = keeper.sender(); + let http_stats_repository = keeper.repository(); + + if config.core.tracker_usage_statistics { + // todo: this should be started like the other jobs during `app::start` + // and keep the join handle in a list of jobs. + let _unused = keeper.run_event_listener(); + } let tracker_metrics = get_metrics(in_memory_torrent_repository.clone(), http_stats_repository).await; diff --git a/packages/http-tracker-core/src/statistics/setup.rs b/packages/http-tracker-core/src/statistics/setup.rs index e2b252c23..565e86fd4 100644 --- a/packages/http-tracker-core/src/statistics/setup.rs +++ b/packages/http-tracker-core/src/statistics/setup.rs @@ -5,7 +5,6 @@ use std::sync::Arc; use super::keeper::Keeper; use super::repository::Repository; -use crate::event; use crate::event::sender::Broadcaster; /// It builds the structs needed for handling the tracker metrics. @@ -21,16 +20,8 @@ use crate::event::sender::Broadcaster; /// not run the event listeners, consequently the statistics events are sent are /// received but not dispatched to the handler. #[must_use] -pub fn factory(tracker_usage_statistics: bool) -> (Option>, Arc) { - let keeper = keeper_factory(tracker_usage_statistics); - - if tracker_usage_statistics { - // todo: this should be started like the other jobs during `app::start` - // and keep the join handle in a list of jobs. - let _unused = keeper.run_event_listener(); - } - - (keeper.sender(), keeper.repository()) +pub fn factory(tracker_usage_statistics: bool) -> Arc { + keeper_factory(tracker_usage_statistics) } #[must_use] @@ -48,17 +39,29 @@ mod test { async fn should_not_send_any_event_when_statistics_are_disabled() { let tracker_usage_statistics = false; - let (stats_event_sender, _stats_repository) = factory(tracker_usage_statistics); + // HTTP core stats + let keeper = factory(tracker_usage_statistics); + let http_stats_event_sender = keeper.sender(); + let _http_stats_repository = keeper.repository(); + + if tracker_usage_statistics { + // todo: this should be started like the other jobs during `app::start` + // and keep the join handle in a list of jobs. + let _unused = keeper.run_event_listener(); + } - assert!(stats_event_sender.is_none()); + assert!(http_stats_event_sender.is_none()); } #[tokio::test] async fn should_send_events_when_statistics_are_enabled() { let tracker_usage_statistics = true; - let (stats_event_sender, _stats_repository) = factory(tracker_usage_statistics); + // HTTP core stats + let keeper = factory(tracker_usage_statistics); + let http_stats_event_sender = keeper.sender(); + let _http_stats_repository = keeper.repository(); - assert!(stats_event_sender.is_some()); + assert!(http_stats_event_sender.is_some()); } } diff --git a/packages/rest-tracker-api-core/src/statistics/services.rs b/packages/rest-tracker-api-core/src/statistics/services.rs index 744c8fd7c..ac8948e42 100644 --- a/packages/rest-tracker-api-core/src/statistics/services.rs +++ b/packages/rest-tracker-api-core/src/statistics/services.rs @@ -147,8 +147,15 @@ mod tests { let ban_service = Arc::new(RwLock::new(BanService::new(MAX_CONNECTION_ID_ERRORS_PER_IP))); // HTTP core stats - let (_http_stats_event_sender, http_stats_repository) = - bittorrent_http_tracker_core::statistics::setup::factory(config.core.tracker_usage_statistics); + let keeper = bittorrent_http_tracker_core::statistics::setup::factory(config.core.tracker_usage_statistics); + let _http_stats_event_sender = keeper.sender(); + let http_stats_repository = keeper.repository(); + + if config.core.tracker_usage_statistics { + // todo: this should be started like the other jobs during `app::start` + // and keep the join handle in a list of jobs. + let _unused = keeper.run_event_listener(); + } // UDP core stats let (_udp_stats_event_sender, _udp_stats_repository) = From b2cf5d9e921326e6d5f163023fa563ee49a583de Mon Sep 17 00:00:00 2001 From: Jose Celano Date: Thu, 24 Apr 2025 15:59:26 +0100 Subject: [PATCH 489/802] refactor: [#1444] rename variable --- packages/axum-http-tracker-server/src/server.rs | 9 +++++---- .../src/v1/handlers/announce.rs | 9 +++++---- .../src/v1/handlers/scrape.rs | 9 +++++---- packages/http-tracker-core/benches/helpers/util.rs | 8 ++++---- packages/http-tracker-core/src/container.rs | 8 ++++---- .../http-tracker-core/src/services/announce.rs | 8 ++++---- packages/http-tracker-core/src/services/scrape.rs | 12 ++++++------ .../http-tracker-core/src/statistics/services.rs | 8 ++++---- packages/http-tracker-core/src/statistics/setup.rs | 14 +++++++------- .../src/statistics/services.rs | 9 +++++---- 10 files changed, 49 insertions(+), 45 deletions(-) diff --git a/packages/axum-http-tracker-server/src/server.rs b/packages/axum-http-tracker-server/src/server.rs index 40620674f..52085f822 100644 --- a/packages/axum-http-tracker-server/src/server.rs +++ b/packages/axum-http-tracker-server/src/server.rs @@ -271,14 +271,15 @@ mod tests { let http_tracker_config = Arc::new(http_tracker_config.clone()); // HTTP core stats - let keeper = bittorrent_http_tracker_core::statistics::setup::factory(configuration.core.tracker_usage_statistics); - let http_stats_event_sender = keeper.sender(); - let http_stats_repository = keeper.repository(); + let http_core_stats_keeper = + bittorrent_http_tracker_core::statistics::setup::factory(configuration.core.tracker_usage_statistics); + let http_stats_event_sender = http_core_stats_keeper.sender(); + let http_stats_repository = http_core_stats_keeper.repository(); if configuration.core.tracker_usage_statistics { // todo: this should be started like the other jobs during `app::start` // and keep the join handle in a list of jobs. - let _unused = keeper.run_event_listener(); + let _unused = http_core_stats_keeper.run_event_listener(); } let tracker_core_container = Arc::new(TrackerCoreContainer::initialize(&core_config)); diff --git a/packages/axum-http-tracker-server/src/v1/handlers/announce.rs b/packages/axum-http-tracker-server/src/v1/handlers/announce.rs index 3729f5bdc..5c08e97eb 100644 --- a/packages/axum-http-tracker-server/src/v1/handlers/announce.rs +++ b/packages/axum-http-tracker-server/src/v1/handlers/announce.rs @@ -161,14 +161,15 @@ mod tests { )); // HTTP core stats - let keeper = bittorrent_http_tracker_core::statistics::setup::factory(config.core.tracker_usage_statistics); - let http_stats_event_sender = keeper.sender(); - let _http_stats_repository = keeper.repository(); + let http_core_stats_keeper = + bittorrent_http_tracker_core::statistics::setup::factory(config.core.tracker_usage_statistics); + let http_stats_event_sender = http_core_stats_keeper.sender(); + let _http_stats_repository = http_core_stats_keeper.repository(); if config.core.tracker_usage_statistics { // todo: this should be started like the other jobs during `app::start` // and keep the join handle in a list of jobs. - let _unused = keeper.run_event_listener(); + let _unused = http_core_stats_keeper.run_event_listener(); } let announce_service = Arc::new(AnnounceService::new( diff --git a/packages/axum-http-tracker-server/src/v1/handlers/scrape.rs b/packages/axum-http-tracker-server/src/v1/handlers/scrape.rs index 9e5fafd46..76390ea0d 100644 --- a/packages/axum-http-tracker-server/src/v1/handlers/scrape.rs +++ b/packages/axum-http-tracker-server/src/v1/handlers/scrape.rs @@ -132,14 +132,15 @@ mod tests { let scrape_handler = Arc::new(ScrapeHandler::new(&whitelist_authorization, &in_memory_torrent_repository)); // HTTP core stats - let keeper = bittorrent_http_tracker_core::statistics::setup::factory(config.core.tracker_usage_statistics); - let http_stats_event_sender = keeper.sender(); - let _http_stats_repository = keeper.repository(); + let http_core_stats_keeper = + bittorrent_http_tracker_core::statistics::setup::factory(config.core.tracker_usage_statistics); + let http_stats_event_sender = http_core_stats_keeper.sender(); + let _http_stats_repository = http_core_stats_keeper.repository(); if config.core.tracker_usage_statistics { // todo: this should be started like the other jobs during `app::start` // and keep the join handle in a list of jobs. - let _unused = keeper.run_event_listener(); + let _unused = http_core_stats_keeper.run_event_listener(); } ( diff --git a/packages/http-tracker-core/benches/helpers/util.rs b/packages/http-tracker-core/benches/helpers/util.rs index 957f70444..3ef1ccf46 100644 --- a/packages/http-tracker-core/benches/helpers/util.rs +++ b/packages/http-tracker-core/benches/helpers/util.rs @@ -56,14 +56,14 @@ pub fn initialize_core_tracker_services_with_config(config: &Configuration) -> ( )); // HTTP core stats - let keeper = statistics::setup::factory(config.core.tracker_usage_statistics); - let http_stats_event_sender = keeper.sender(); - let _http_stats_repository = keeper.repository(); + let http_core_stats_keeper = statistics::setup::factory(config.core.tracker_usage_statistics); + let http_stats_event_sender = http_core_stats_keeper.sender(); + let _http_stats_repository = http_core_stats_keeper.repository(); if config.core.tracker_usage_statistics { // todo: this should be started like the other jobs during `app::start` // and keep the join handle in a list of jobs. - let _unused = keeper.run_event_listener(); + let _unused = http_core_stats_keeper.run_event_listener(); } ( diff --git a/packages/http-tracker-core/src/container.rs b/packages/http-tracker-core/src/container.rs index 302a4fbbe..0b8bd9337 100644 --- a/packages/http-tracker-core/src/container.rs +++ b/packages/http-tracker-core/src/container.rs @@ -63,14 +63,14 @@ impl HttpTrackerCoreServices { #[must_use] pub fn initialize_from(tracker_core_container: &Arc) -> Arc { // HTTP core stats - let keeper = statistics::setup::factory(tracker_core_container.core_config.tracker_usage_statistics); - let http_stats_event_sender = keeper.sender(); - let http_stats_repository = keeper.repository(); + let http_core_stats_keeper = statistics::setup::factory(tracker_core_container.core_config.tracker_usage_statistics); + let http_stats_event_sender = http_core_stats_keeper.sender(); + let http_stats_repository = http_core_stats_keeper.repository(); if tracker_core_container.core_config.tracker_usage_statistics { // todo: this should be started like the other jobs during `app::start` // and keep the join handle in a list of jobs. - let _unused = keeper.run_event_listener(); + let _unused = http_core_stats_keeper.run_event_listener(); } let http_announce_service = Arc::new(AnnounceService::new( diff --git a/packages/http-tracker-core/src/services/announce.rs b/packages/http-tracker-core/src/services/announce.rs index a3014873e..5e50ebd8f 100644 --- a/packages/http-tracker-core/src/services/announce.rs +++ b/packages/http-tracker-core/src/services/announce.rs @@ -253,14 +253,14 @@ mod tests { )); // HTTP core stats - let keeper = statistics::setup::factory(config.core.tracker_usage_statistics); - let http_stats_event_sender = keeper.sender(); - let _http_stats_repository = keeper.repository(); + let http_core_stats_keeper = statistics::setup::factory(config.core.tracker_usage_statistics); + let http_stats_event_sender = http_core_stats_keeper.sender(); + let _http_stats_repository = http_core_stats_keeper.repository(); if config.core.tracker_usage_statistics { // todo: this should be started like the other jobs during `app::start` // and keep the join handle in a list of jobs. - let _unused = keeper.run_event_listener(); + let _unused = http_core_stats_keeper.run_event_listener(); } ( diff --git a/packages/http-tracker-core/src/services/scrape.rs b/packages/http-tracker-core/src/services/scrape.rs index 21308a6aa..7cd1a5991 100644 --- a/packages/http-tracker-core/src/services/scrape.rs +++ b/packages/http-tracker-core/src/services/scrape.rs @@ -273,9 +273,9 @@ mod tests { let core_config = Arc::new(configuration.core.clone()); // HTTP core stats - let keeper = statistics::setup::factory(false); - let http_stats_event_sender = keeper.sender(); - let _http_stats_repository = keeper.repository(); + let http_core_stats_keeper = statistics::setup::factory(false); + let http_stats_event_sender = http_core_stats_keeper.sender(); + let _http_stats_repository = http_core_stats_keeper.repository(); let container = initialize_services_with_configuration(&configuration); @@ -465,9 +465,9 @@ mod tests { let container = initialize_services_with_configuration(&config); // HTTP core stats - let keeper = statistics::setup::factory(false); - let http_stats_event_sender = keeper.sender(); - let _http_stats_repository = keeper.repository(); + let http_core_stats_keeper = statistics::setup::factory(false); + let http_stats_event_sender = http_core_stats_keeper.sender(); + let _http_stats_repository = http_core_stats_keeper.repository(); let info_hash = sample_info_hash(); let info_hashes = vec![info_hash]; diff --git a/packages/http-tracker-core/src/statistics/services.rs b/packages/http-tracker-core/src/statistics/services.rs index 172e7b9ab..94ade2e45 100644 --- a/packages/http-tracker-core/src/statistics/services.rs +++ b/packages/http-tracker-core/src/statistics/services.rs @@ -89,14 +89,14 @@ mod tests { let in_memory_torrent_repository = Arc::new(InMemoryTorrentRepository::default()); // HTTP core stats - let keeper = statistics::setup::factory(config.core.tracker_usage_statistics); - let _http_stats_event_sender = keeper.sender(); - let http_stats_repository = keeper.repository(); + let http_core_stats_keeper = statistics::setup::factory(config.core.tracker_usage_statistics); + let _http_stats_event_sender = http_core_stats_keeper.sender(); + let http_stats_repository = http_core_stats_keeper.repository(); if config.core.tracker_usage_statistics { // todo: this should be started like the other jobs during `app::start` // and keep the join handle in a list of jobs. - let _unused = keeper.run_event_listener(); + let _unused = http_core_stats_keeper.run_event_listener(); } let tracker_metrics = get_metrics(in_memory_torrent_repository.clone(), http_stats_repository).await; diff --git a/packages/http-tracker-core/src/statistics/setup.rs b/packages/http-tracker-core/src/statistics/setup.rs index 565e86fd4..a78c53f6d 100644 --- a/packages/http-tracker-core/src/statistics/setup.rs +++ b/packages/http-tracker-core/src/statistics/setup.rs @@ -40,14 +40,14 @@ mod test { let tracker_usage_statistics = false; // HTTP core stats - let keeper = factory(tracker_usage_statistics); - let http_stats_event_sender = keeper.sender(); - let _http_stats_repository = keeper.repository(); + let http_core_stats_keeper = factory(tracker_usage_statistics); + let http_stats_event_sender = http_core_stats_keeper.sender(); + let _http_stats_repository = http_core_stats_keeper.repository(); if tracker_usage_statistics { // todo: this should be started like the other jobs during `app::start` // and keep the join handle in a list of jobs. - let _unused = keeper.run_event_listener(); + let _unused = http_core_stats_keeper.run_event_listener(); } assert!(http_stats_event_sender.is_none()); @@ -58,9 +58,9 @@ mod test { let tracker_usage_statistics = true; // HTTP core stats - let keeper = factory(tracker_usage_statistics); - let http_stats_event_sender = keeper.sender(); - let _http_stats_repository = keeper.repository(); + let http_core_stats_keeper = factory(tracker_usage_statistics); + let http_stats_event_sender = http_core_stats_keeper.sender(); + let _http_stats_repository = http_core_stats_keeper.repository(); assert!(http_stats_event_sender.is_some()); } diff --git a/packages/rest-tracker-api-core/src/statistics/services.rs b/packages/rest-tracker-api-core/src/statistics/services.rs index ac8948e42..93c8951f7 100644 --- a/packages/rest-tracker-api-core/src/statistics/services.rs +++ b/packages/rest-tracker-api-core/src/statistics/services.rs @@ -147,14 +147,15 @@ mod tests { let ban_service = Arc::new(RwLock::new(BanService::new(MAX_CONNECTION_ID_ERRORS_PER_IP))); // HTTP core stats - let keeper = bittorrent_http_tracker_core::statistics::setup::factory(config.core.tracker_usage_statistics); - let _http_stats_event_sender = keeper.sender(); - let http_stats_repository = keeper.repository(); + let http_core_stats_keeper = + bittorrent_http_tracker_core::statistics::setup::factory(config.core.tracker_usage_statistics); + let _http_stats_event_sender = http_core_stats_keeper.sender(); + let http_stats_repository = http_core_stats_keeper.repository(); if config.core.tracker_usage_statistics { // todo: this should be started like the other jobs during `app::start` // and keep the join handle in a list of jobs. - let _unused = keeper.run_event_listener(); + let _unused = http_core_stats_keeper.run_event_listener(); } // UDP core stats From 5906037113a0d5a83b5451db790eb0fdcbc84ae6 Mon Sep 17 00:00:00 2001 From: Jose Celano Date: Thu, 24 Apr 2025 16:09:31 +0100 Subject: [PATCH 490/802] refactor: [#1444] http core event listener start in app start. Step 3 --- packages/axum-http-tracker-server/src/server.rs | 1 + packages/http-tracker-core/src/container.rs | 4 ++++ 2 files changed, 5 insertions(+) diff --git a/packages/axum-http-tracker-server/src/server.rs b/packages/axum-http-tracker-server/src/server.rs index 52085f822..a169e2565 100644 --- a/packages/axum-http-tracker-server/src/server.rs +++ b/packages/axum-http-tracker-server/src/server.rs @@ -302,6 +302,7 @@ mod tests { HttpTrackerCoreContainer { tracker_core_container, http_tracker_config, + http_core_stats_keeper, http_stats_event_sender, http_stats_repository, announce_service, diff --git a/packages/http-tracker-core/src/container.rs b/packages/http-tracker-core/src/container.rs index 0b8bd9337..c41fac6dc 100644 --- a/packages/http-tracker-core/src/container.rs +++ b/packages/http-tracker-core/src/container.rs @@ -13,6 +13,7 @@ pub struct HttpTrackerCoreContainer { pub tracker_core_container: Arc, // `HttpTrackerCoreServices` + pub http_core_stats_keeper: Arc, pub http_stats_event_sender: Arc>>, pub http_stats_repository: Arc, pub announce_service: Arc, @@ -44,6 +45,7 @@ impl HttpTrackerCoreContainer { Arc::new(Self { tracker_core_container: tracker_core_container.clone(), http_tracker_config: http_tracker_config.clone(), + http_core_stats_keeper: http_tracker_core_services.http_core_stats_keeper.clone(), http_stats_event_sender: http_tracker_core_services.http_stats_event_sender.clone(), http_stats_repository: http_tracker_core_services.http_stats_repository.clone(), announce_service: http_tracker_core_services.http_announce_service.clone(), @@ -53,6 +55,7 @@ impl HttpTrackerCoreContainer { } pub struct HttpTrackerCoreServices { + pub http_core_stats_keeper: Arc, pub http_stats_event_sender: Arc>>, pub http_stats_repository: Arc, pub http_announce_service: Arc, @@ -89,6 +92,7 @@ impl HttpTrackerCoreServices { )); Arc::new(Self { + http_core_stats_keeper, http_stats_event_sender, http_stats_repository, http_announce_service, From 6d49a1308384812563cd165879237e89ee3e5528 Mon Sep 17 00:00:00 2001 From: Jose Celano Date: Thu, 24 Apr 2025 16:11:34 +0100 Subject: [PATCH 491/802] refactor: [#1444] rename fields --- .../axum-http-tracker-server/src/server.rs | 6 ++-- .../tests/server/v1/contract.rs | 35 +++---------------- packages/http-tracker-core/src/container.rs | 12 +++---- .../rest-tracker-api-core/src/container.rs | 2 +- 4 files changed, 15 insertions(+), 40 deletions(-) diff --git a/packages/axum-http-tracker-server/src/server.rs b/packages/axum-http-tracker-server/src/server.rs index a169e2565..bf694de79 100644 --- a/packages/axum-http-tracker-server/src/server.rs +++ b/packages/axum-http-tracker-server/src/server.rs @@ -302,9 +302,9 @@ mod tests { HttpTrackerCoreContainer { tracker_core_container, http_tracker_config, - http_core_stats_keeper, - http_stats_event_sender, - http_stats_repository, + stats_keeper: http_core_stats_keeper, + stats_event_sender: http_stats_event_sender, + stats_repository: http_stats_repository, announce_service, scrape_service, } diff --git a/packages/axum-http-tracker-server/tests/server/v1/contract.rs b/packages/axum-http-tracker-server/tests/server/v1/contract.rs index ad5b5a482..37d96052f 100644 --- a/packages/axum-http-tracker-server/tests/server/v1/contract.rs +++ b/packages/axum-http-tracker-server/tests/server/v1/contract.rs @@ -676,12 +676,7 @@ mod for_all_config_modes { .announce(&QueryBuilder::default().query()) .await; - let stats = env - .container - .http_tracker_core_container - .http_stats_repository - .get_stats() - .await; + let stats = env.container.http_tracker_core_container.stats_repository.get_stats().await; assert_eq!(stats.tcp4_announces_handled, 1); @@ -707,12 +702,7 @@ mod for_all_config_modes { .announce(&QueryBuilder::default().query()) .await; - let stats = env - .container - .http_tracker_core_container - .http_stats_repository - .get_stats() - .await; + let stats = env.container.http_tracker_core_container.stats_repository.get_stats().await; assert_eq!(stats.tcp6_announces_handled, 1); @@ -737,12 +727,7 @@ mod for_all_config_modes { ) .await; - let stats = env - .container - .http_tracker_core_container - .http_stats_repository - .get_stats() - .await; + let stats = env.container.http_tracker_core_container.stats_repository.get_stats().await; assert_eq!(stats.tcp6_announces_handled, 0); @@ -1130,12 +1115,7 @@ mod for_all_config_modes { ) .await; - let stats = env - .container - .http_tracker_core_container - .http_stats_repository - .get_stats() - .await; + let stats = env.container.http_tracker_core_container.stats_repository.get_stats().await; assert_eq!(stats.tcp4_scrapes_handled, 1); @@ -1167,12 +1147,7 @@ mod for_all_config_modes { ) .await; - let stats = env - .container - .http_tracker_core_container - .http_stats_repository - .get_stats() - .await; + let stats = env.container.http_tracker_core_container.stats_repository.get_stats().await; assert_eq!(stats.tcp6_scrapes_handled, 1); diff --git a/packages/http-tracker-core/src/container.rs b/packages/http-tracker-core/src/container.rs index c41fac6dc..0fcf6338c 100644 --- a/packages/http-tracker-core/src/container.rs +++ b/packages/http-tracker-core/src/container.rs @@ -13,9 +13,9 @@ pub struct HttpTrackerCoreContainer { pub tracker_core_container: Arc, // `HttpTrackerCoreServices` - pub http_core_stats_keeper: Arc, - pub http_stats_event_sender: Arc>>, - pub http_stats_repository: Arc, + pub stats_keeper: Arc, + pub stats_event_sender: Arc>>, + pub stats_repository: Arc, pub announce_service: Arc, pub scrape_service: Arc, } @@ -45,9 +45,9 @@ impl HttpTrackerCoreContainer { Arc::new(Self { tracker_core_container: tracker_core_container.clone(), http_tracker_config: http_tracker_config.clone(), - http_core_stats_keeper: http_tracker_core_services.http_core_stats_keeper.clone(), - http_stats_event_sender: http_tracker_core_services.http_stats_event_sender.clone(), - http_stats_repository: http_tracker_core_services.http_stats_repository.clone(), + stats_keeper: http_tracker_core_services.http_core_stats_keeper.clone(), + stats_event_sender: http_tracker_core_services.http_stats_event_sender.clone(), + stats_repository: http_tracker_core_services.http_stats_repository.clone(), announce_service: http_tracker_core_services.http_announce_service.clone(), scrape_service: http_tracker_core_services.http_scrape_service.clone(), }) diff --git a/packages/rest-tracker-api-core/src/container.rs b/packages/rest-tracker-api-core/src/container.rs index 329c77eed..4451eb2c4 100644 --- a/packages/rest-tracker-api-core/src/container.rs +++ b/packages/rest-tracker-api-core/src/container.rs @@ -53,7 +53,7 @@ impl TrackerHttpApiCoreContainer { Arc::new(TrackerHttpApiCoreContainer { tracker_core_container: tracker_core_container.clone(), - http_stats_repository: http_tracker_core_container.http_stats_repository.clone(), + http_stats_repository: http_tracker_core_container.stats_repository.clone(), ban_service: udp_tracker_core_container.ban_service.clone(), udp_core_stats_repository: udp_tracker_core_container.udp_core_stats_repository.clone(), From 19bb37d980601a2b7ca1e135734cd2f764a880c0 Mon Sep 17 00:00:00 2001 From: Jose Celano Date: Thu, 24 Apr 2025 16:14:19 +0100 Subject: [PATCH 492/802] refactor: [#1444] renaem variables --- packages/axum-http-tracker-server/src/server.rs | 10 +++++----- .../src/v1/handlers/announce.rs | 9 ++++----- .../src/v1/handlers/scrape.rs | 9 ++++----- packages/http-tracker-core/benches/helpers/util.rs | 8 ++++---- packages/http-tracker-core/src/container.rs | 14 +++++++------- .../http-tracker-core/src/services/announce.rs | 8 ++++---- packages/http-tracker-core/src/services/scrape.rs | 12 ++++++------ .../http-tracker-core/src/statistics/services.rs | 8 ++++---- packages/http-tracker-core/src/statistics/setup.rs | 14 +++++++------- .../src/statistics/services.rs | 9 ++++----- 10 files changed, 49 insertions(+), 52 deletions(-) diff --git a/packages/axum-http-tracker-server/src/server.rs b/packages/axum-http-tracker-server/src/server.rs index bf694de79..95a13ab1c 100644 --- a/packages/axum-http-tracker-server/src/server.rs +++ b/packages/axum-http-tracker-server/src/server.rs @@ -271,15 +271,15 @@ mod tests { let http_tracker_config = Arc::new(http_tracker_config.clone()); // HTTP core stats - let http_core_stats_keeper = + let http_stats_keeper = bittorrent_http_tracker_core::statistics::setup::factory(configuration.core.tracker_usage_statistics); - let http_stats_event_sender = http_core_stats_keeper.sender(); - let http_stats_repository = http_core_stats_keeper.repository(); + let http_stats_event_sender = http_stats_keeper.sender(); + let http_stats_repository = http_stats_keeper.repository(); if configuration.core.tracker_usage_statistics { // todo: this should be started like the other jobs during `app::start` // and keep the join handle in a list of jobs. - let _unused = http_core_stats_keeper.run_event_listener(); + let _unused = http_stats_keeper.run_event_listener(); } let tracker_core_container = Arc::new(TrackerCoreContainer::initialize(&core_config)); @@ -302,7 +302,7 @@ mod tests { HttpTrackerCoreContainer { tracker_core_container, http_tracker_config, - stats_keeper: http_core_stats_keeper, + stats_keeper: http_stats_keeper, stats_event_sender: http_stats_event_sender, stats_repository: http_stats_repository, announce_service, diff --git a/packages/axum-http-tracker-server/src/v1/handlers/announce.rs b/packages/axum-http-tracker-server/src/v1/handlers/announce.rs index 5c08e97eb..b4c54ce09 100644 --- a/packages/axum-http-tracker-server/src/v1/handlers/announce.rs +++ b/packages/axum-http-tracker-server/src/v1/handlers/announce.rs @@ -161,15 +161,14 @@ mod tests { )); // HTTP core stats - let http_core_stats_keeper = - bittorrent_http_tracker_core::statistics::setup::factory(config.core.tracker_usage_statistics); - let http_stats_event_sender = http_core_stats_keeper.sender(); - let _http_stats_repository = http_core_stats_keeper.repository(); + let http_stats_keeper = bittorrent_http_tracker_core::statistics::setup::factory(config.core.tracker_usage_statistics); + let http_stats_event_sender = http_stats_keeper.sender(); + let _http_stats_repository = http_stats_keeper.repository(); if config.core.tracker_usage_statistics { // todo: this should be started like the other jobs during `app::start` // and keep the join handle in a list of jobs. - let _unused = http_core_stats_keeper.run_event_listener(); + let _unused = http_stats_keeper.run_event_listener(); } let announce_service = Arc::new(AnnounceService::new( diff --git a/packages/axum-http-tracker-server/src/v1/handlers/scrape.rs b/packages/axum-http-tracker-server/src/v1/handlers/scrape.rs index 76390ea0d..e4ba6ed51 100644 --- a/packages/axum-http-tracker-server/src/v1/handlers/scrape.rs +++ b/packages/axum-http-tracker-server/src/v1/handlers/scrape.rs @@ -132,15 +132,14 @@ mod tests { let scrape_handler = Arc::new(ScrapeHandler::new(&whitelist_authorization, &in_memory_torrent_repository)); // HTTP core stats - let http_core_stats_keeper = - bittorrent_http_tracker_core::statistics::setup::factory(config.core.tracker_usage_statistics); - let http_stats_event_sender = http_core_stats_keeper.sender(); - let _http_stats_repository = http_core_stats_keeper.repository(); + let http_stats_keeper = bittorrent_http_tracker_core::statistics::setup::factory(config.core.tracker_usage_statistics); + let http_stats_event_sender = http_stats_keeper.sender(); + let _http_stats_repository = http_stats_keeper.repository(); if config.core.tracker_usage_statistics { // todo: this should be started like the other jobs during `app::start` // and keep the join handle in a list of jobs. - let _unused = http_core_stats_keeper.run_event_listener(); + let _unused = http_stats_keeper.run_event_listener(); } ( diff --git a/packages/http-tracker-core/benches/helpers/util.rs b/packages/http-tracker-core/benches/helpers/util.rs index 3ef1ccf46..fc8969c10 100644 --- a/packages/http-tracker-core/benches/helpers/util.rs +++ b/packages/http-tracker-core/benches/helpers/util.rs @@ -56,14 +56,14 @@ pub fn initialize_core_tracker_services_with_config(config: &Configuration) -> ( )); // HTTP core stats - let http_core_stats_keeper = statistics::setup::factory(config.core.tracker_usage_statistics); - let http_stats_event_sender = http_core_stats_keeper.sender(); - let _http_stats_repository = http_core_stats_keeper.repository(); + let http_stats_keeper = statistics::setup::factory(config.core.tracker_usage_statistics); + let http_stats_event_sender = http_stats_keeper.sender(); + let _http_stats_repository = http_stats_keeper.repository(); if config.core.tracker_usage_statistics { // todo: this should be started like the other jobs during `app::start` // and keep the join handle in a list of jobs. - let _unused = http_core_stats_keeper.run_event_listener(); + let _unused = http_stats_keeper.run_event_listener(); } ( diff --git a/packages/http-tracker-core/src/container.rs b/packages/http-tracker-core/src/container.rs index 0fcf6338c..381d1f770 100644 --- a/packages/http-tracker-core/src/container.rs +++ b/packages/http-tracker-core/src/container.rs @@ -45,7 +45,7 @@ impl HttpTrackerCoreContainer { Arc::new(Self { tracker_core_container: tracker_core_container.clone(), http_tracker_config: http_tracker_config.clone(), - stats_keeper: http_tracker_core_services.http_core_stats_keeper.clone(), + stats_keeper: http_tracker_core_services.http_stats_keeper.clone(), stats_event_sender: http_tracker_core_services.http_stats_event_sender.clone(), stats_repository: http_tracker_core_services.http_stats_repository.clone(), announce_service: http_tracker_core_services.http_announce_service.clone(), @@ -55,7 +55,7 @@ impl HttpTrackerCoreContainer { } pub struct HttpTrackerCoreServices { - pub http_core_stats_keeper: Arc, + pub http_stats_keeper: Arc, pub http_stats_event_sender: Arc>>, pub http_stats_repository: Arc, pub http_announce_service: Arc, @@ -66,14 +66,14 @@ impl HttpTrackerCoreServices { #[must_use] pub fn initialize_from(tracker_core_container: &Arc) -> Arc { // HTTP core stats - let http_core_stats_keeper = statistics::setup::factory(tracker_core_container.core_config.tracker_usage_statistics); - let http_stats_event_sender = http_core_stats_keeper.sender(); - let http_stats_repository = http_core_stats_keeper.repository(); + let http_stats_keeper = statistics::setup::factory(tracker_core_container.core_config.tracker_usage_statistics); + let http_stats_event_sender = http_stats_keeper.sender(); + let http_stats_repository = http_stats_keeper.repository(); if tracker_core_container.core_config.tracker_usage_statistics { // todo: this should be started like the other jobs during `app::start` // and keep the join handle in a list of jobs. - let _unused = http_core_stats_keeper.run_event_listener(); + let _unused = http_stats_keeper.run_event_listener(); } let http_announce_service = Arc::new(AnnounceService::new( @@ -92,7 +92,7 @@ impl HttpTrackerCoreServices { )); Arc::new(Self { - http_core_stats_keeper, + http_stats_keeper, http_stats_event_sender, http_stats_repository, http_announce_service, diff --git a/packages/http-tracker-core/src/services/announce.rs b/packages/http-tracker-core/src/services/announce.rs index 5e50ebd8f..07d576aca 100644 --- a/packages/http-tracker-core/src/services/announce.rs +++ b/packages/http-tracker-core/src/services/announce.rs @@ -253,14 +253,14 @@ mod tests { )); // HTTP core stats - let http_core_stats_keeper = statistics::setup::factory(config.core.tracker_usage_statistics); - let http_stats_event_sender = http_core_stats_keeper.sender(); - let _http_stats_repository = http_core_stats_keeper.repository(); + let http_stats_keeper = statistics::setup::factory(config.core.tracker_usage_statistics); + let http_stats_event_sender = http_stats_keeper.sender(); + let _http_stats_repository = http_stats_keeper.repository(); if config.core.tracker_usage_statistics { // todo: this should be started like the other jobs during `app::start` // and keep the join handle in a list of jobs. - let _unused = http_core_stats_keeper.run_event_listener(); + let _unused = http_stats_keeper.run_event_listener(); } ( diff --git a/packages/http-tracker-core/src/services/scrape.rs b/packages/http-tracker-core/src/services/scrape.rs index 7cd1a5991..23f1566b3 100644 --- a/packages/http-tracker-core/src/services/scrape.rs +++ b/packages/http-tracker-core/src/services/scrape.rs @@ -273,9 +273,9 @@ mod tests { let core_config = Arc::new(configuration.core.clone()); // HTTP core stats - let http_core_stats_keeper = statistics::setup::factory(false); - let http_stats_event_sender = http_core_stats_keeper.sender(); - let _http_stats_repository = http_core_stats_keeper.repository(); + let http_stats_keeper = statistics::setup::factory(false); + let http_stats_event_sender = http_stats_keeper.sender(); + let _http_stats_repository = http_stats_keeper.repository(); let container = initialize_services_with_configuration(&configuration); @@ -465,9 +465,9 @@ mod tests { let container = initialize_services_with_configuration(&config); // HTTP core stats - let http_core_stats_keeper = statistics::setup::factory(false); - let http_stats_event_sender = http_core_stats_keeper.sender(); - let _http_stats_repository = http_core_stats_keeper.repository(); + let http_stats_keeper = statistics::setup::factory(false); + let http_stats_event_sender = http_stats_keeper.sender(); + let _http_stats_repository = http_stats_keeper.repository(); let info_hash = sample_info_hash(); let info_hashes = vec![info_hash]; diff --git a/packages/http-tracker-core/src/statistics/services.rs b/packages/http-tracker-core/src/statistics/services.rs index 94ade2e45..4a27b3267 100644 --- a/packages/http-tracker-core/src/statistics/services.rs +++ b/packages/http-tracker-core/src/statistics/services.rs @@ -89,14 +89,14 @@ mod tests { let in_memory_torrent_repository = Arc::new(InMemoryTorrentRepository::default()); // HTTP core stats - let http_core_stats_keeper = statistics::setup::factory(config.core.tracker_usage_statistics); - let _http_stats_event_sender = http_core_stats_keeper.sender(); - let http_stats_repository = http_core_stats_keeper.repository(); + let http_stats_keeper = statistics::setup::factory(config.core.tracker_usage_statistics); + let _http_stats_event_sender = http_stats_keeper.sender(); + let http_stats_repository = http_stats_keeper.repository(); if config.core.tracker_usage_statistics { // todo: this should be started like the other jobs during `app::start` // and keep the join handle in a list of jobs. - let _unused = http_core_stats_keeper.run_event_listener(); + let _unused = http_stats_keeper.run_event_listener(); } let tracker_metrics = get_metrics(in_memory_torrent_repository.clone(), http_stats_repository).await; diff --git a/packages/http-tracker-core/src/statistics/setup.rs b/packages/http-tracker-core/src/statistics/setup.rs index a78c53f6d..bac9303a6 100644 --- a/packages/http-tracker-core/src/statistics/setup.rs +++ b/packages/http-tracker-core/src/statistics/setup.rs @@ -40,14 +40,14 @@ mod test { let tracker_usage_statistics = false; // HTTP core stats - let http_core_stats_keeper = factory(tracker_usage_statistics); - let http_stats_event_sender = http_core_stats_keeper.sender(); - let _http_stats_repository = http_core_stats_keeper.repository(); + let http_stats_keeper = factory(tracker_usage_statistics); + let http_stats_event_sender = http_stats_keeper.sender(); + let _http_stats_repository = http_stats_keeper.repository(); if tracker_usage_statistics { // todo: this should be started like the other jobs during `app::start` // and keep the join handle in a list of jobs. - let _unused = http_core_stats_keeper.run_event_listener(); + let _unused = http_stats_keeper.run_event_listener(); } assert!(http_stats_event_sender.is_none()); @@ -58,9 +58,9 @@ mod test { let tracker_usage_statistics = true; // HTTP core stats - let http_core_stats_keeper = factory(tracker_usage_statistics); - let http_stats_event_sender = http_core_stats_keeper.sender(); - let _http_stats_repository = http_core_stats_keeper.repository(); + let http_stats_keeper = factory(tracker_usage_statistics); + let http_stats_event_sender = http_stats_keeper.sender(); + let _http_stats_repository = http_stats_keeper.repository(); assert!(http_stats_event_sender.is_some()); } diff --git a/packages/rest-tracker-api-core/src/statistics/services.rs b/packages/rest-tracker-api-core/src/statistics/services.rs index 93c8951f7..93bbc7e1c 100644 --- a/packages/rest-tracker-api-core/src/statistics/services.rs +++ b/packages/rest-tracker-api-core/src/statistics/services.rs @@ -147,15 +147,14 @@ mod tests { let ban_service = Arc::new(RwLock::new(BanService::new(MAX_CONNECTION_ID_ERRORS_PER_IP))); // HTTP core stats - let http_core_stats_keeper = - bittorrent_http_tracker_core::statistics::setup::factory(config.core.tracker_usage_statistics); - let _http_stats_event_sender = http_core_stats_keeper.sender(); - let http_stats_repository = http_core_stats_keeper.repository(); + let http_stats_keeper = bittorrent_http_tracker_core::statistics::setup::factory(config.core.tracker_usage_statistics); + let _http_stats_event_sender = http_stats_keeper.sender(); + let http_stats_repository = http_stats_keeper.repository(); if config.core.tracker_usage_statistics { // todo: this should be started like the other jobs during `app::start` // and keep the join handle in a list of jobs. - let _unused = http_core_stats_keeper.run_event_listener(); + let _unused = http_stats_keeper.run_event_listener(); } // UDP core stats From 2d9af45fca91845af1446c8719f8f38626784d21 Mon Sep 17 00:00:00 2001 From: Jose Celano Date: Thu, 24 Apr 2025 16:16:49 +0100 Subject: [PATCH 493/802] chore: [#1444] event listener has to be run manually on tests We are moving the execution of the event listener from AppContainer initialization to jobs start. However, in tests we still have to run it manually if we need it. --- packages/axum-http-tracker-server/src/server.rs | 2 -- packages/axum-http-tracker-server/src/v1/handlers/announce.rs | 2 -- packages/axum-http-tracker-server/src/v1/handlers/scrape.rs | 2 -- packages/http-tracker-core/benches/helpers/util.rs | 2 -- packages/http-tracker-core/src/services/announce.rs | 2 -- packages/http-tracker-core/src/statistics/services.rs | 2 -- packages/http-tracker-core/src/statistics/setup.rs | 2 -- packages/rest-tracker-api-core/src/statistics/services.rs | 2 -- 8 files changed, 16 deletions(-) diff --git a/packages/axum-http-tracker-server/src/server.rs b/packages/axum-http-tracker-server/src/server.rs index 95a13ab1c..3d7adfaf2 100644 --- a/packages/axum-http-tracker-server/src/server.rs +++ b/packages/axum-http-tracker-server/src/server.rs @@ -277,8 +277,6 @@ mod tests { let http_stats_repository = http_stats_keeper.repository(); if configuration.core.tracker_usage_statistics { - // todo: this should be started like the other jobs during `app::start` - // and keep the join handle in a list of jobs. let _unused = http_stats_keeper.run_event_listener(); } diff --git a/packages/axum-http-tracker-server/src/v1/handlers/announce.rs b/packages/axum-http-tracker-server/src/v1/handlers/announce.rs index b4c54ce09..ddeff3ea4 100644 --- a/packages/axum-http-tracker-server/src/v1/handlers/announce.rs +++ b/packages/axum-http-tracker-server/src/v1/handlers/announce.rs @@ -166,8 +166,6 @@ mod tests { let _http_stats_repository = http_stats_keeper.repository(); if config.core.tracker_usage_statistics { - // todo: this should be started like the other jobs during `app::start` - // and keep the join handle in a list of jobs. let _unused = http_stats_keeper.run_event_listener(); } diff --git a/packages/axum-http-tracker-server/src/v1/handlers/scrape.rs b/packages/axum-http-tracker-server/src/v1/handlers/scrape.rs index e4ba6ed51..67c75d6ed 100644 --- a/packages/axum-http-tracker-server/src/v1/handlers/scrape.rs +++ b/packages/axum-http-tracker-server/src/v1/handlers/scrape.rs @@ -137,8 +137,6 @@ mod tests { let _http_stats_repository = http_stats_keeper.repository(); if config.core.tracker_usage_statistics { - // todo: this should be started like the other jobs during `app::start` - // and keep the join handle in a list of jobs. let _unused = http_stats_keeper.run_event_listener(); } diff --git a/packages/http-tracker-core/benches/helpers/util.rs b/packages/http-tracker-core/benches/helpers/util.rs index fc8969c10..6bfbcffd6 100644 --- a/packages/http-tracker-core/benches/helpers/util.rs +++ b/packages/http-tracker-core/benches/helpers/util.rs @@ -61,8 +61,6 @@ pub fn initialize_core_tracker_services_with_config(config: &Configuration) -> ( let _http_stats_repository = http_stats_keeper.repository(); if config.core.tracker_usage_statistics { - // todo: this should be started like the other jobs during `app::start` - // and keep the join handle in a list of jobs. let _unused = http_stats_keeper.run_event_listener(); } diff --git a/packages/http-tracker-core/src/services/announce.rs b/packages/http-tracker-core/src/services/announce.rs index 07d576aca..c4c94474f 100644 --- a/packages/http-tracker-core/src/services/announce.rs +++ b/packages/http-tracker-core/src/services/announce.rs @@ -258,8 +258,6 @@ mod tests { let _http_stats_repository = http_stats_keeper.repository(); if config.core.tracker_usage_statistics { - // todo: this should be started like the other jobs during `app::start` - // and keep the join handle in a list of jobs. let _unused = http_stats_keeper.run_event_listener(); } diff --git a/packages/http-tracker-core/src/statistics/services.rs b/packages/http-tracker-core/src/statistics/services.rs index 4a27b3267..7e4f03492 100644 --- a/packages/http-tracker-core/src/statistics/services.rs +++ b/packages/http-tracker-core/src/statistics/services.rs @@ -94,8 +94,6 @@ mod tests { let http_stats_repository = http_stats_keeper.repository(); if config.core.tracker_usage_statistics { - // todo: this should be started like the other jobs during `app::start` - // and keep the join handle in a list of jobs. let _unused = http_stats_keeper.run_event_listener(); } diff --git a/packages/http-tracker-core/src/statistics/setup.rs b/packages/http-tracker-core/src/statistics/setup.rs index bac9303a6..f1f907b2e 100644 --- a/packages/http-tracker-core/src/statistics/setup.rs +++ b/packages/http-tracker-core/src/statistics/setup.rs @@ -45,8 +45,6 @@ mod test { let _http_stats_repository = http_stats_keeper.repository(); if tracker_usage_statistics { - // todo: this should be started like the other jobs during `app::start` - // and keep the join handle in a list of jobs. let _unused = http_stats_keeper.run_event_listener(); } diff --git a/packages/rest-tracker-api-core/src/statistics/services.rs b/packages/rest-tracker-api-core/src/statistics/services.rs index 93bbc7e1c..a299ccbaa 100644 --- a/packages/rest-tracker-api-core/src/statistics/services.rs +++ b/packages/rest-tracker-api-core/src/statistics/services.rs @@ -152,8 +152,6 @@ mod tests { let http_stats_repository = http_stats_keeper.repository(); if config.core.tracker_usage_statistics { - // todo: this should be started like the other jobs during `app::start` - // and keep the join handle in a list of jobs. let _unused = http_stats_keeper.run_event_listener(); } From 07c58580e421ae3f32b32462d197db6732eb4ed6 Mon Sep 17 00:00:00 2001 From: Jose Celano Date: Thu, 24 Apr 2025 16:51:06 +0100 Subject: [PATCH 494/802] refactor: [#1444] http core event listener start in app start. Step 4 --- .../src/environment.rs | 37 ++++++++++++++++--- packages/http-tracker-core/src/container.rs | 6 --- .../http-tracker-core/src/statistics/setup.rs | 12 ------ src/app.rs | 21 +++++++++++ 4 files changed, 52 insertions(+), 24 deletions(-) diff --git a/packages/axum-http-tracker-server/src/environment.rs b/packages/axum-http-tracker-server/src/environment.rs index a89d9af08..30755b452 100644 --- a/packages/axum-http-tracker-server/src/environment.rs +++ b/packages/axum-http-tracker-server/src/environment.rs @@ -4,6 +4,7 @@ use bittorrent_http_tracker_core::container::HttpTrackerCoreContainer; use bittorrent_primitives::info_hash::InfoHash; use bittorrent_tracker_core::container::TrackerCoreContainer; use futures::executor::block_on; +use tokio::task::JoinHandle; use torrust_axum_server::tsl::make_rust_tls; use torrust_server_lib::registar::Registar; use torrust_tracker_configuration::{logging, Configuration}; @@ -17,6 +18,7 @@ pub struct Environment { pub container: Arc, pub registar: Registar, pub server: HttpServer, + pub event_listener_job: Option>, } impl Environment { @@ -54,22 +56,32 @@ impl Environment { container, registar: Registar::default(), server, + event_listener_job: None, } } + /// Starts the test environment and return a running environment. + /// /// # Panics /// /// Will panic if the server fails to start. #[allow(dead_code)] pub async fn start(self) -> Environment { + // Start the event listener + let event_listener_job = self.container.http_tracker_core_container.stats_keeper.run_event_listener(); + + // Start the server + let server = self + .server + .start(self.container.http_tracker_core_container.clone(), self.registar.give_form()) + .await + .unwrap(); + Environment { container: self.container.clone(), registar: self.registar.clone(), - server: self - .server - .start(self.container.http_tracker_core_container.clone(), self.registar.give_form()) - .await - .unwrap(), + server, + event_listener_job: Some(event_listener_job), } } } @@ -79,14 +91,27 @@ impl Environment { Environment::::new(configuration).start().await } + /// Stops the test environment and return a stopped environment. + /// /// # Panics /// /// Will panic if the server fails to stop. pub async fn stop(self) -> Environment { + // Stop the event listener + if let Some(event_listener_job) = self.event_listener_job { + // todo: send a message to the event listener to stop and wait for + // it to finish + event_listener_job.abort(); + } + + // Stop the server + let server = self.server.stop().await.expect("Failed to stop the http tracker server"); + Environment { container: self.container, registar: Registar::default(), - server: self.server.stop().await.unwrap(), + server, + event_listener_job: None, } } diff --git a/packages/http-tracker-core/src/container.rs b/packages/http-tracker-core/src/container.rs index 381d1f770..e685dd521 100644 --- a/packages/http-tracker-core/src/container.rs +++ b/packages/http-tracker-core/src/container.rs @@ -70,12 +70,6 @@ impl HttpTrackerCoreServices { let http_stats_event_sender = http_stats_keeper.sender(); let http_stats_repository = http_stats_keeper.repository(); - if tracker_core_container.core_config.tracker_usage_statistics { - // todo: this should be started like the other jobs during `app::start` - // and keep the join handle in a list of jobs. - let _unused = http_stats_keeper.run_event_listener(); - } - let http_announce_service = Arc::new(AnnounceService::new( tracker_core_container.core_config.clone(), tracker_core_container.announce_handler.clone(), diff --git a/packages/http-tracker-core/src/statistics/setup.rs b/packages/http-tracker-core/src/statistics/setup.rs index f1f907b2e..09f077507 100644 --- a/packages/http-tracker-core/src/statistics/setup.rs +++ b/packages/http-tracker-core/src/statistics/setup.rs @@ -7,18 +7,6 @@ use super::keeper::Keeper; use super::repository::Repository; use crate::event::sender::Broadcaster; -/// It builds the structs needed for handling the tracker metrics. -/// -/// It returns: -/// -/// - An event [`Sender`](crate::event::sender::Sender) that allows you to send -/// events related to statistics. -/// - An statistics [`Repository`](crate::statistics::repository::Repository) -/// which is an in-memory repository for the tracker metrics. -/// -/// When the input argument `tracker_usage_statistics`is false the setup does -/// not run the event listeners, consequently the statistics events are sent are -/// received but not dispatched to the handler. #[must_use] pub fn factory(tracker_usage_statistics: bool) -> Arc { keeper_factory(tracker_usage_statistics) diff --git a/src/app.rs b/src/app.rs index d394fe644..555900315 100644 --- a/src/app.rs +++ b/src/app.rs @@ -66,6 +66,7 @@ async fn load_data_from_database(config: &Configuration, app_container: &Arc) -> Vec> { let mut jobs: Vec> = Vec::new(); + start_http_core_event_listener(config, app_container); start_the_udp_instances(config, app_container, &mut jobs).await; start_the_http_instances(config, app_container, &mut jobs).await; start_the_http_api(config, app_container, &mut jobs).await; @@ -106,6 +107,26 @@ async fn load_whitelisted_torrents(config: &Configuration, app_container: &Arc) { + if config.core.tracker_usage_statistics { + let _job = app_container + .http_tracker_core_services + .http_stats_keeper + .run_event_listener(); + + // todo: this cannot be enabled otherwise the application never ends + // because the event listener never stops. You see this console message + // forever: + // + // !! shuting down in 90 seconds !! + // 2025-04-24T15:27:45.454101Z INFO graceful_shutdown: torrust_axum_server::signals: remaining alive connections: 0 + // + // Depends on: https://github.com/torrust/torrust-tracker/issues/1405 + + //jobs.push(job); + } +} + async fn start_the_udp_instances(config: &Configuration, app_container: &Arc, jobs: &mut Vec>) { if let Some(udp_trackers) = &config.udp_trackers { for udp_tracker_config in udp_trackers { From 2fa4e15d7e44859e9da51ab77305ca6d73f1ddd8 Mon Sep 17 00:00:00 2001 From: Jose Celano Date: Fri, 25 Apr 2025 11:24:53 +0100 Subject: [PATCH 495/802] refactor: [#1444] udp core event listener start in app start --- .../src/environment.rs | 4 +- .../src/statistics/services.rs | 4 +- .../udp-tracker-core/benches/helpers/sync.rs | 4 +- packages/udp-tracker-core/src/container.rs | 11 ++-- packages/udp-tracker-core/src/event/sender.rs | 1 + .../udp-tracker-core/src/services/connect.rs | 12 ++-- .../src/statistics/event/listener.rs | 4 +- .../udp-tracker-core/src/statistics/keeper.rs | 50 ++++++++++++---- .../src/statistics/services.rs | 5 +- .../udp-tracker-core/src/statistics/setup.rs | 59 +++++++++---------- .../udp-tracker-server/src/environment.rs | 51 +++++++++++----- .../src/handlers/announce.rs | 9 +-- .../src/handlers/connect.rs | 15 ++--- .../udp-tracker-server/src/handlers/mod.rs | 5 +- src/app.rs | 18 ++++++ 15 files changed, 155 insertions(+), 97 deletions(-) diff --git a/packages/axum-http-tracker-server/src/environment.rs b/packages/axum-http-tracker-server/src/environment.rs index 30755b452..f278ad29f 100644 --- a/packages/axum-http-tracker-server/src/environment.rs +++ b/packages/axum-http-tracker-server/src/environment.rs @@ -75,7 +75,7 @@ impl Environment { .server .start(self.container.http_tracker_core_container.clone(), self.registar.give_form()) .await - .unwrap(); + .expect("Failed to start the HTTP tracker server"); Environment { container: self.container.clone(), @@ -105,7 +105,7 @@ impl Environment { } // Stop the server - let server = self.server.stop().await.expect("Failed to stop the http tracker server"); + let server = self.server.stop().await.expect("Failed to stop the HTTP tracker server"); Environment { container: self.container, diff --git a/packages/rest-tracker-api-core/src/statistics/services.rs b/packages/rest-tracker-api-core/src/statistics/services.rs index a299ccbaa..093971b34 100644 --- a/packages/rest-tracker-api-core/src/statistics/services.rs +++ b/packages/rest-tracker-api-core/src/statistics/services.rs @@ -155,9 +155,7 @@ mod tests { let _unused = http_stats_keeper.run_event_listener(); } - // UDP core stats - let (_udp_stats_event_sender, _udp_stats_repository) = - bittorrent_udp_tracker_core::statistics::setup::factory(config.core.tracker_usage_statistics); + // UDP core stats (not used in this test) // UDP server stats let (_udp_server_stats_event_sender, udp_server_stats_repository) = diff --git a/packages/udp-tracker-core/benches/helpers/sync.rs b/packages/udp-tracker-core/benches/helpers/sync.rs index b61204586..926916d61 100644 --- a/packages/udp-tracker-core/benches/helpers/sync.rs +++ b/packages/udp-tracker-core/benches/helpers/sync.rs @@ -14,8 +14,8 @@ pub async fn connect_once(samples: u64) -> Duration { let server_socket_addr = SocketAddr::new(IpAddr::V4(Ipv4Addr::new(203, 0, 113, 196)), 6969); let server_service_binding = ServiceBinding::new(Protocol::UDP, server_socket_addr).unwrap(); - let (udp_core_stats_event_sender, _udp_core_stats_repository) = statistics::setup::factory(false); - let udp_core_stats_event_sender = Arc::new(udp_core_stats_event_sender); + let keeper = statistics::setup::factory(false); + let udp_core_stats_event_sender = keeper.sender(); let connect_service = Arc::new(ConnectService::new(udp_core_stats_event_sender)); let start = Instant::now(); diff --git a/packages/udp-tracker-core/src/container.rs b/packages/udp-tracker-core/src/container.rs index 79ce15d01..0a1bf54d4 100644 --- a/packages/udp-tracker-core/src/container.rs +++ b/packages/udp-tracker-core/src/container.rs @@ -16,6 +16,7 @@ pub struct UdpTrackerCoreContainer { pub tracker_core_container: Arc, // `UdpTrackerCoreServices` + pub stats_keeper: Arc, pub udp_core_stats_event_sender: Arc>>, pub udp_core_stats_repository: Arc, pub ban_service: Arc>, @@ -52,6 +53,7 @@ impl UdpTrackerCoreContainer { tracker_core_container: tracker_core_container.clone(), // `UdpTrackerCoreServices` + stats_keeper: udp_tracker_core_services.stats_keeper.clone(), udp_core_stats_event_sender: udp_tracker_core_services.udp_core_stats_event_sender.clone(), udp_core_stats_repository: udp_tracker_core_services.udp_core_stats_repository.clone(), ban_service: udp_tracker_core_services.udp_ban_service.clone(), @@ -63,6 +65,7 @@ impl UdpTrackerCoreContainer { } pub struct UdpTrackerCoreServices { + pub stats_keeper: Arc, pub udp_core_stats_event_sender: Arc>>, pub udp_core_stats_repository: Arc, pub udp_ban_service: Arc>, @@ -74,10 +77,9 @@ pub struct UdpTrackerCoreServices { impl UdpTrackerCoreServices { #[must_use] pub fn initialize_from(tracker_core_container: &Arc) -> Arc { - let (udp_core_stats_event_sender, udp_core_stats_repository) = - statistics::setup::factory(tracker_core_container.core_config.tracker_usage_statistics); - let udp_core_stats_event_sender = Arc::new(udp_core_stats_event_sender); - let udp_core_stats_repository = Arc::new(udp_core_stats_repository); + let keeper = statistics::setup::factory(tracker_core_container.core_config.tracker_usage_statistics); + let udp_core_stats_event_sender = keeper.sender(); + let udp_core_stats_repository = keeper.repository(); let ban_service = Arc::new(RwLock::new(BanService::new(MAX_CONNECTION_ID_ERRORS_PER_IP))); let connect_service = Arc::new(ConnectService::new(udp_core_stats_event_sender.clone())); let announce_service = Arc::new(AnnounceService::new( @@ -91,6 +93,7 @@ impl UdpTrackerCoreServices { )); Arc::new(Self { + stats_keeper: keeper, udp_core_stats_event_sender, udp_core_stats_repository, udp_ban_service: ban_service, diff --git a/packages/udp-tracker-core/src/event/sender.rs b/packages/udp-tracker-core/src/event/sender.rs index 511a381d0..b720926bb 100644 --- a/packages/udp-tracker-core/src/event/sender.rs +++ b/packages/udp-tracker-core/src/event/sender.rs @@ -16,6 +16,7 @@ pub trait Sender: Sync + Send { } /// An event sender implementation using a broadcast channel. +#[derive(Clone)] pub struct Broadcaster { pub(crate) sender: broadcast::Sender, } diff --git a/packages/udp-tracker-core/src/services/connect.rs b/packages/udp-tracker-core/src/services/connect.rs index df3db6c4b..c6c1c098f 100644 --- a/packages/udp-tracker-core/src/services/connect.rs +++ b/packages/udp-tracker-core/src/services/connect.rs @@ -78,8 +78,8 @@ mod tests { let server_socket_addr = SocketAddr::new(IpAddr::V4(Ipv4Addr::new(203, 0, 113, 196)), 6969); let server_service_binding = ServiceBinding::new(Protocol::UDP, server_socket_addr).unwrap(); - let (udp_core_stats_event_sender, _udp_core_stats_repository) = statistics::setup::factory(false); - let udp_core_stats_event_sender = Arc::new(udp_core_stats_event_sender); + let keeper = statistics::setup::factory(false); + let udp_core_stats_event_sender = keeper.sender(); let connect_service = Arc::new(ConnectService::new(udp_core_stats_event_sender)); @@ -98,8 +98,8 @@ mod tests { let server_socket_addr = SocketAddr::new(IpAddr::V4(Ipv4Addr::new(203, 0, 113, 196)), 6969); let server_service_binding = ServiceBinding::new(Protocol::UDP, server_socket_addr).unwrap(); - let (udp_core_stats_event_sender, _udp_core_stats_repository) = statistics::setup::factory(false); - let udp_core_stats_event_sender = Arc::new(udp_core_stats_event_sender); + let keeper = statistics::setup::factory(false); + let udp_core_stats_event_sender = keeper.sender(); let connect_service = Arc::new(ConnectService::new(udp_core_stats_event_sender)); @@ -119,8 +119,8 @@ mod tests { let server_socket_addr = SocketAddr::new(IpAddr::V4(Ipv4Addr::new(203, 0, 113, 196)), 6969); let server_service_binding = ServiceBinding::new(Protocol::UDP, server_socket_addr).unwrap(); - let (udp_core_stats_event_sender, _udp_core_stats_repository) = statistics::setup::factory(false); - let udp_core_stats_event_sender = Arc::new(udp_core_stats_event_sender); + let keeper = statistics::setup::factory(false); + let udp_core_stats_event_sender = keeper.sender(); let connect_service = Arc::new(ConnectService::new(udp_core_stats_event_sender)); diff --git a/packages/udp-tracker-core/src/statistics/event/listener.rs b/packages/udp-tracker-core/src/statistics/event/listener.rs index 888fb8204..835283d1e 100644 --- a/packages/udp-tracker-core/src/statistics/event/listener.rs +++ b/packages/udp-tracker-core/src/statistics/event/listener.rs @@ -1,3 +1,5 @@ +use std::sync::Arc; + use tokio::sync::broadcast; use torrust_tracker_clock::clock::Time; @@ -6,7 +8,7 @@ use crate::event::Event; use crate::statistics::repository::Repository; use crate::{CurrentClock, UDP_TRACKER_LOG_TARGET}; -pub async fn dispatch_events(mut receiver: broadcast::Receiver, stats_repository: Repository) { +pub async fn dispatch_events(mut receiver: broadcast::Receiver, stats_repository: Arc) { loop { match receiver.recv().await { Ok(event) => handle_event(event, &stats_repository, CurrentClock::now()).await, diff --git a/packages/udp-tracker-core/src/statistics/keeper.rs b/packages/udp-tracker-core/src/statistics/keeper.rs index d72dcb260..8acecc585 100644 --- a/packages/udp-tracker-core/src/statistics/keeper.rs +++ b/packages/udp-tracker-core/src/statistics/keeper.rs @@ -1,8 +1,10 @@ -use tokio::sync::broadcast::Receiver; +use std::sync::Arc; + +use tokio::task::JoinHandle; use super::event::listener::dispatch_events; use super::repository::Repository; -use crate::event::Event; +use crate::event::sender::{self, Broadcaster}; use crate::UDP_TRACKER_LOG_TARGET; /// The service responsible for keeping tracker metrics (listening to statistics events and handle them). @@ -10,44 +12,70 @@ use crate::UDP_TRACKER_LOG_TARGET; /// It actively listen to new statistics events. When it receives a new event /// it accordingly increases the counters. pub struct Keeper { - pub repository: Repository, + pub enable_sender: bool, + pub broadcaster: Broadcaster, + pub repository: Arc, } impl Default for Keeper { fn default() -> Self { - Self::new() + let enable_sender = true; + let broadcaster = Broadcaster::default(); + let repository = Arc::new(Repository::new()); + + Self::new(enable_sender, broadcaster, repository) } } impl Keeper { + /// Creates a new instance of [`Keeper`]. #[must_use] - pub fn new() -> Self { + pub fn new(enable_sender: bool, broadcaster: Broadcaster, repository: Arc) -> Self { Self { - repository: Repository::new(), + enable_sender, + broadcaster, + repository, + } + } + + #[must_use] + pub fn sender(&self) -> Arc>> { + if self.enable_sender { + Arc::new(Some(Box::new(self.broadcaster.clone()))) + } else { + Arc::new(None) } } - pub fn run_event_listener(&mut self, receiver: Receiver) { + #[must_use] + pub fn repository(&self) -> Arc { + self.repository.clone() + } + + #[must_use] + pub fn run_event_listener(&self) -> JoinHandle<()> { let stats_repository = self.repository.clone(); + let receiver = self.broadcaster.subscribe(); - tracing::info!(target: UDP_TRACKER_LOG_TARGET, "Starting UDP tracker core event listener"); + tracing::info!(target: UDP_TRACKER_LOG_TARGET, "Starting HTTP tracker core event listener"); tokio::spawn(async move { dispatch_events(receiver, stats_repository).await; - tracing::info!(target: UDP_TRACKER_LOG_TARGET, "UDP tracker core event listener finished"); - }); + tracing::info!(target: UDP_TRACKER_LOG_TARGET, "HTTP tracker core event listener finished"); + }) } } #[cfg(test)] mod tests { + use crate::statistics::keeper::Keeper; use crate::statistics::metrics::Metrics; #[tokio::test] async fn should_contain_the_tracker_statistics() { - let stats_tracker = Keeper::new(); + let stats_tracker = Keeper::default(); let stats = stats_tracker.repository.get_stats().await; diff --git a/packages/udp-tracker-core/src/statistics/services.rs b/packages/udp-tracker-core/src/statistics/services.rs index d9b016b0d..e1aa66f67 100644 --- a/packages/udp-tracker-core/src/statistics/services.rs +++ b/packages/udp-tracker-core/src/statistics/services.rs @@ -106,9 +106,8 @@ mod tests { let in_memory_torrent_repository = Arc::new(InMemoryTorrentRepository::default()); - let (_udp_core_stats_event_sender, udp_core_stats_repository) = - crate::statistics::setup::factory(config.core.tracker_usage_statistics); - let udp_core_stats_repository = Arc::new(udp_core_stats_repository); + let keeper = crate::statistics::setup::factory(config.core.tracker_usage_statistics); + let udp_core_stats_repository = keeper.repository(); let tracker_metrics = get_metrics(in_memory_torrent_repository.clone(), udp_core_stats_repository.clone()).await; diff --git a/packages/udp-tracker-core/src/statistics/setup.rs b/packages/udp-tracker-core/src/statistics/setup.rs index e2974e4c0..6466ac58b 100644 --- a/packages/udp-tracker-core/src/statistics/setup.rs +++ b/packages/udp-tracker-core/src/statistics/setup.rs @@ -1,38 +1,23 @@ //! Setup for the tracker statistics. //! //! The [`factory`] function builds the structs needed for handling the tracker metrics. -use crate::event::sender::Broadcaster; -use crate::{event, statistics}; - -/// It builds the structs needed for handling the tracker metrics. -/// -/// It returns: -/// -/// - An event [`Sender`](crate::event::sender::Sender) that allows you to send -/// events related to statistics. -/// - An statistics [`Repository`](crate::statistics::repository::Repository) -/// which is an in-memory repository for the tracker metrics. -/// -/// When the input argument `tracker_usage_statistics`is false the setup does -/// not run the event listeners, consequently the statistics events are sent are -/// received but not dispatched to the handler. -#[must_use] -pub fn factory(tracker_usage_statistics: bool) -> (Option>, statistics::repository::Repository) { - let mut keeper = statistics::keeper::Keeper::new(); - - let opt_event_sender: Option> = if tracker_usage_statistics { - let broadcaster = Broadcaster::default(); +use std::sync::Arc; - keeper.run_event_listener(broadcaster.subscribe()); - - Some(Box::new(broadcaster)) - } else { - None - }; +use super::keeper::Keeper; +use super::repository::Repository; +use crate::event::sender::Broadcaster; - (opt_event_sender, keeper.repository) +#[must_use] +pub fn factory(tracker_usage_statistics: bool) -> Arc { + keeper_factory(tracker_usage_statistics) } +#[must_use] +pub fn keeper_factory(tracker_usage_statistics: bool) -> Arc { + let broadcaster = Broadcaster::default(); + let repository = Arc::new(Repository::new()); + Arc::new(Keeper::new(tracker_usage_statistics, broadcaster.clone(), repository.clone())) +} #[cfg(test)] mod test { use super::factory; @@ -41,17 +26,27 @@ mod test { async fn should_not_send_any_event_when_statistics_are_disabled() { let tracker_usage_statistics = false; - let (stats_event_sender, _stats_repository) = factory(tracker_usage_statistics); + // UDP core stats + let http_stats_keeper = factory(tracker_usage_statistics); + let http_stats_event_sender = http_stats_keeper.sender(); + let _http_stats_repository = http_stats_keeper.repository(); + + if tracker_usage_statistics { + let _unused = http_stats_keeper.run_event_listener(); + } - assert!(stats_event_sender.is_none()); + assert!(http_stats_event_sender.is_none()); } #[tokio::test] async fn should_send_events_when_statistics_are_enabled() { let tracker_usage_statistics = true; - let (stats_event_sender, _stats_repository) = factory(tracker_usage_statistics); + // UDP core stats + let http_stats_keeper = factory(tracker_usage_statistics); + let http_stats_event_sender = http_stats_keeper.sender(); + let _http_stats_repository = http_stats_keeper.repository(); - assert!(stats_event_sender.is_some()); + assert!(http_stats_event_sender.is_some()); } } diff --git a/packages/udp-tracker-server/src/environment.rs b/packages/udp-tracker-server/src/environment.rs index b97da90ad..3115d3b0b 100644 --- a/packages/udp-tracker-server/src/environment.rs +++ b/packages/udp-tracker-server/src/environment.rs @@ -4,6 +4,7 @@ use std::sync::Arc; use bittorrent_primitives::info_hash::InfoHash; use bittorrent_tracker_core::container::TrackerCoreContainer; use bittorrent_udp_tracker_core::container::UdpTrackerCoreContainer; +use tokio::task::JoinHandle; use torrust_server_lib::registar::Registar; use torrust_tracker_configuration::{logging, Configuration, DEFAULT_TIMEOUT}; use torrust_tracker_primitives::peer; @@ -22,6 +23,7 @@ where pub container: Arc, pub registar: Registar, pub server: Server, + pub udp_core_event_listener_job: Option>, } impl Environment @@ -55,29 +57,38 @@ impl Environment { container, registar: Registar::default(), server, + udp_core_event_listener_job: None, } } + /// Starts the test environment and return a running environment. + /// /// # Panics /// /// Will panic if it cannot start the server. #[allow(dead_code)] pub async fn start(self) -> Environment { let cookie_lifetime = self.container.udp_tracker_core_container.udp_tracker_config.cookie_lifetime; + // Start the UDP tracker core event listener + let udp_core_event_listener_job = Some(self.container.udp_tracker_core_container.stats_keeper.run_event_listener()); + + // Start the UDP tracker server + let server = self + .server + .start( + self.container.udp_tracker_core_container.clone(), + self.container.udp_tracker_server_container.clone(), + self.registar.give_form(), + cookie_lifetime, + ) + .await + .expect("Failed to start the UDP tracker server"); Environment { container: self.container.clone(), registar: self.registar.clone(), - server: self - .server - .start( - self.container.udp_tracker_core_container.clone(), - self.container.udp_tracker_server_container.clone(), - self.registar.give_form(), - cookie_lifetime, - ) - .await - .unwrap(), + server, + udp_core_event_listener_job, } } } @@ -89,22 +100,34 @@ impl Environment { pub async fn new(configuration: &Arc) -> Self { tokio::time::timeout(DEFAULT_TIMEOUT, Environment::::new(configuration).start()) .await - .expect("it should create an environment within the timeout") + .expect("Failed to create a UDP tracker server running environment within the timeout") } + /// Stops the test environment and return a stopped environment. + /// /// # Panics /// /// Will panic if it cannot stop the service within the timeout. #[allow(dead_code)] pub async fn stop(self) -> Environment { - let stopped = tokio::time::timeout(DEFAULT_TIMEOUT, self.server.stop()) + // Stop the event listener + if let Some(udp_core_event_listener_job) = self.udp_core_event_listener_job { + // todo: send a message to the event listener to stop and wait for + // it to finish + udp_core_event_listener_job.abort(); + } + + // Stop the server + let server = tokio::time::timeout(DEFAULT_TIMEOUT, self.server.stop()) .await - .expect("it should stop the environment within the timeout"); + .expect("Failed to stop the UDP tracker server within the timeout") + .expect("Failed to stop the UDP tracker server"); Environment { container: self.container, registar: Registar::default(), - server: stopped.expect("it should stop the udp tracker service"), + server, + udp_core_event_listener_job: None, } } diff --git a/packages/udp-tracker-server/src/handlers/announce.rs b/packages/udp-tracker-server/src/handlers/announce.rs index 0167553f2..38b42a0b6 100644 --- a/packages/udp-tracker-server/src/handlers/announce.rs +++ b/packages/udp-tracker-server/src/handlers/announce.rs @@ -374,10 +374,6 @@ mod tests { core_tracker_services: Arc, core_udp_tracker_services: Arc, ) -> Response { - let (udp_core_stats_event_sender, _udp_core_stats_repository) = - bittorrent_udp_tracker_core::statistics::setup::factory(false); - let _udp_core_stats_event_sender = Arc::new(udp_core_stats_event_sender); - let (udp_server_stats_event_sender, _udp_server_stats_repository) = crate::statistics::setup::factory(false); let udp_server_stats_event_sender = Arc::new(udp_server_stats_event_sender); @@ -710,9 +706,8 @@ mod tests { announce_handler: Arc, whitelist_authorization: Arc, ) -> Response { - let (udp_core_stats_event_sender, _udp_core_stats_repository) = - bittorrent_udp_tracker_core::statistics::setup::factory(false); - let udp_core_stats_event_sender = Arc::new(udp_core_stats_event_sender); + let keeper = bittorrent_udp_tracker_core::statistics::setup::factory(false); + let udp_core_stats_event_sender = keeper.sender(); let (udp_server_stats_event_sender, _udp_server_stats_repository) = crate::statistics::setup::factory(false); let udp_server_stats_event_sender = Arc::new(udp_server_stats_event_sender); diff --git a/packages/udp-tracker-server/src/handlers/connect.rs b/packages/udp-tracker-server/src/handlers/connect.rs index aef8833b9..9ea36903c 100644 --- a/packages/udp-tracker-server/src/handlers/connect.rs +++ b/packages/udp-tracker-server/src/handlers/connect.rs @@ -81,9 +81,8 @@ mod tests { let server_socket_addr = SocketAddr::new(IpAddr::V4(Ipv4Addr::new(203, 0, 113, 196)), 6969); let server_service_binding = ServiceBinding::new(Protocol::UDP, server_socket_addr).unwrap(); - let (udp_core_stats_event_sender, _udp_core_stats_repository) = - bittorrent_udp_tracker_core::statistics::setup::factory(false); - let udp_core_stats_event_sender = Arc::new(udp_core_stats_event_sender); + let keeper = bittorrent_udp_tracker_core::statistics::setup::factory(false); + let udp_core_stats_event_sender = keeper.sender(); let (udp_server_stats_event_sender, _udp_server_stats_repository) = crate::statistics::setup::factory(false); let udp_server_stats_event_sender = Arc::new(udp_server_stats_event_sender); @@ -118,9 +117,8 @@ mod tests { let server_socket_addr = SocketAddr::new(IpAddr::V4(Ipv4Addr::new(203, 0, 113, 196)), 6969); let server_service_binding = ServiceBinding::new(Protocol::UDP, server_socket_addr).unwrap(); - let (udp_core_stats_event_sender, _udp_core_stats_repository) = - bittorrent_udp_tracker_core::statistics::setup::factory(false); - let udp_core_stats_event_sender = Arc::new(udp_core_stats_event_sender); + let keeper = bittorrent_udp_tracker_core::statistics::setup::factory(false); + let udp_core_stats_event_sender = keeper.sender(); let (udp_server_stats_event_sender, _udp_server_stats_repository) = crate::statistics::setup::factory(false); let udp_server_stats_event_sender = Arc::new(udp_server_stats_event_sender); @@ -155,9 +153,8 @@ mod tests { let server_socket_addr = SocketAddr::new(IpAddr::V4(Ipv4Addr::new(203, 0, 113, 196)), 6969); let server_service_binding = ServiceBinding::new(Protocol::UDP, server_socket_addr).unwrap(); - let (udp_core_stats_event_sender, _udp_core_stats_repository) = - bittorrent_udp_tracker_core::statistics::setup::factory(false); - let udp_core_stats_event_sender = Arc::new(udp_core_stats_event_sender); + let keeper = bittorrent_udp_tracker_core::statistics::setup::factory(false); + let udp_core_stats_event_sender = keeper.sender(); let (udp_server_stats_event_sender, _udp_server_stats_repository) = crate::statistics::setup::factory(false); let udp_server_stats_event_sender = Arc::new(udp_server_stats_event_sender); diff --git a/packages/udp-tracker-server/src/handlers/mod.rs b/packages/udp-tracker-server/src/handlers/mod.rs index f8ca9d8ea..bc39f63ae 100644 --- a/packages/udp-tracker-server/src/handlers/mod.rs +++ b/packages/udp-tracker-server/src/handlers/mod.rs @@ -284,9 +284,8 @@ pub(crate) mod tests { )); let scrape_handler = Arc::new(ScrapeHandler::new(&whitelist_authorization, &in_memory_torrent_repository)); - let (udp_core_stats_event_sender, _udp_core_stats_repository) = - bittorrent_udp_tracker_core::statistics::setup::factory(false); - let udp_core_stats_event_sender = Arc::new(udp_core_stats_event_sender); + let keeper = bittorrent_udp_tracker_core::statistics::setup::factory(false); + let udp_core_stats_event_sender = keeper.sender(); let (udp_server_stats_event_sender, _udp_server_stats_repository) = crate::statistics::setup::factory(false); let udp_server_stats_event_sender = Arc::new(udp_server_stats_event_sender); diff --git a/src/app.rs b/src/app.rs index 555900315..a0f63094b 100644 --- a/src/app.rs +++ b/src/app.rs @@ -67,6 +67,7 @@ async fn start_jobs(config: &Configuration, app_container: &Arc) - let mut jobs: Vec> = Vec::new(); start_http_core_event_listener(config, app_container); + start_udp_core_event_listener(config, app_container); start_the_udp_instances(config, app_container, &mut jobs).await; start_the_http_instances(config, app_container, &mut jobs).await; start_the_http_api(config, app_container, &mut jobs).await; @@ -127,6 +128,23 @@ fn start_http_core_event_listener(config: &Configuration, app_container: &Arc) { + if config.core.tracker_usage_statistics { + let _job = app_container.udp_tracker_core_services.stats_keeper.run_event_listener(); + + // todo: this cannot be enabled otherwise the application never ends + // because the event listener never stops. You see this console message + // forever: + // + // !! shuting down in 90 seconds !! + // 2025-04-24T15:27:45.454101Z INFO graceful_shutdown: torrust_axum_server::signals: remaining alive connections: 0 + // + // Depends on: https://github.com/torrust/torrust-tracker/issues/1405 + + //jobs.push(job); + } +} + async fn start_the_udp_instances(config: &Configuration, app_container: &Arc, jobs: &mut Vec>) { if let Some(udp_trackers) = &config.udp_trackers { for udp_tracker_config in udp_trackers { From 74e174d377264bee10b89c258f3917b9074d1215 Mon Sep 17 00:00:00 2001 From: Jose Celano Date: Fri, 25 Apr 2025 13:10:30 +0100 Subject: [PATCH 496/802] refactor: [#1444] udp server event listener start in app start --- .../src/statistics/services.rs | 4 +- packages/udp-tracker-server/src/container.rs | 11 ++-- .../udp-tracker-server/src/environment.rs | 23 ++++++- .../udp-tracker-server/src/event/sender.rs | 1 + .../src/handlers/announce.rs | 12 ++-- .../src/handlers/connect.rs | 24 ++++---- .../udp-tracker-server/src/handlers/mod.rs | 8 +-- .../udp-tracker-server/src/handlers/scrape.rs | 4 +- .../src/statistics/event/listener.rs | 4 +- .../src/statistics/keeper.rs | 52 ++++++++++++---- .../src/statistics/services.rs | 5 +- .../src/statistics/setup.rs | 61 +++++++++---------- src/app.rs | 21 +++++++ 13 files changed, 149 insertions(+), 81 deletions(-) diff --git a/packages/rest-tracker-api-core/src/statistics/services.rs b/packages/rest-tracker-api-core/src/statistics/services.rs index 093971b34..95e21633a 100644 --- a/packages/rest-tracker-api-core/src/statistics/services.rs +++ b/packages/rest-tracker-api-core/src/statistics/services.rs @@ -158,9 +158,9 @@ mod tests { // UDP core stats (not used in this test) // UDP server stats - let (_udp_server_stats_event_sender, udp_server_stats_repository) = + let udp_server_stats_keeper = torrust_udp_tracker_server::statistics::setup::factory(config.core.tracker_usage_statistics); - let udp_server_stats_repository = Arc::new(udp_server_stats_repository); + let udp_server_stats_repository = udp_server_stats_keeper.repository(); let tracker_metrics = get_metrics( in_memory_torrent_repository.clone(), diff --git a/packages/udp-tracker-server/src/container.rs b/packages/udp-tracker-server/src/container.rs index 2b1ce8c99..89740cf77 100644 --- a/packages/udp-tracker-server/src/container.rs +++ b/packages/udp-tracker-server/src/container.rs @@ -5,6 +5,7 @@ use torrust_tracker_configuration::Core; use crate::{event, statistics}; pub struct UdpTrackerServerContainer { + pub udp_server_stats_keeper: Arc, pub udp_server_stats_event_sender: Arc>>, pub udp_server_stats_repository: Arc, } @@ -15,6 +16,7 @@ impl UdpTrackerServerContainer { let udp_tracker_server_services = UdpTrackerServerServices::initialize(core_config); Arc::new(Self { + udp_server_stats_keeper: udp_tracker_server_services.udp_server_stats_keeper.clone(), udp_server_stats_event_sender: udp_tracker_server_services.udp_server_stats_event_sender.clone(), udp_server_stats_repository: udp_tracker_server_services.udp_server_stats_repository.clone(), }) @@ -22,6 +24,7 @@ impl UdpTrackerServerContainer { } pub struct UdpTrackerServerServices { + pub udp_server_stats_keeper: Arc, pub udp_server_stats_event_sender: Arc>>, pub udp_server_stats_repository: Arc, } @@ -29,12 +32,12 @@ pub struct UdpTrackerServerServices { impl UdpTrackerServerServices { #[must_use] pub fn initialize(core_config: &Arc) -> Arc { - let (udp_server_stats_event_sender, udp_server_stats_repository) = - statistics::setup::factory(core_config.tracker_usage_statistics); - let udp_server_stats_event_sender = Arc::new(udp_server_stats_event_sender); - let udp_server_stats_repository = Arc::new(udp_server_stats_repository); + let udp_server_stats_keeper = statistics::setup::factory(core_config.tracker_usage_statistics); + let udp_server_stats_event_sender = udp_server_stats_keeper.sender(); + let udp_server_stats_repository = udp_server_stats_keeper.repository(); Arc::new(Self { + udp_server_stats_keeper: udp_server_stats_keeper.clone(), udp_server_stats_event_sender: udp_server_stats_event_sender.clone(), udp_server_stats_repository: udp_server_stats_repository.clone(), }) diff --git a/packages/udp-tracker-server/src/environment.rs b/packages/udp-tracker-server/src/environment.rs index 3115d3b0b..2b31e78bd 100644 --- a/packages/udp-tracker-server/src/environment.rs +++ b/packages/udp-tracker-server/src/environment.rs @@ -24,6 +24,7 @@ where pub registar: Registar, pub server: Server, pub udp_core_event_listener_job: Option>, + pub udp_server_event_listener_job: Option>, } impl Environment @@ -58,6 +59,7 @@ impl Environment { registar: Registar::default(), server, udp_core_event_listener_job: None, + udp_server_event_listener_job: None, } } @@ -72,6 +74,14 @@ impl Environment { // Start the UDP tracker core event listener let udp_core_event_listener_job = Some(self.container.udp_tracker_core_container.stats_keeper.run_event_listener()); + // Start the UDP tracker server event listener + let udp_server_event_listener_job = Some( + self.container + .udp_tracker_server_container + .udp_server_stats_keeper + .run_event_listener(), + ); + // Start the UDP tracker server let server = self .server @@ -89,6 +99,7 @@ impl Environment { registar: self.registar.clone(), server, udp_core_event_listener_job, + udp_server_event_listener_job, } } } @@ -110,14 +121,21 @@ impl Environment { /// Will panic if it cannot stop the service within the timeout. #[allow(dead_code)] pub async fn stop(self) -> Environment { - // Stop the event listener + // Stop the UDP tracker core event listener if let Some(udp_core_event_listener_job) = self.udp_core_event_listener_job { // todo: send a message to the event listener to stop and wait for // it to finish udp_core_event_listener_job.abort(); } - // Stop the server + // Stop the UDP tracker server event listener + if let Some(udp_server_event_listener_job) = self.udp_server_event_listener_job { + // todo: send a message to the event listener to stop and wait for + // it to finish + udp_server_event_listener_job.abort(); + } + + // Stop the UDP tracker server let server = tokio::time::timeout(DEFAULT_TIMEOUT, self.server.stop()) .await .expect("Failed to stop the UDP tracker server within the timeout") @@ -128,6 +146,7 @@ impl Environment { registar: Registar::default(), server, udp_core_event_listener_job: None, + udp_server_event_listener_job: None, } } diff --git a/packages/udp-tracker-server/src/event/sender.rs b/packages/udp-tracker-server/src/event/sender.rs index 511a381d0..b720926bb 100644 --- a/packages/udp-tracker-server/src/event/sender.rs +++ b/packages/udp-tracker-server/src/event/sender.rs @@ -16,6 +16,7 @@ pub trait Sender: Sync + Send { } /// An event sender implementation using a broadcast channel. +#[derive(Clone)] pub struct Broadcaster { pub(crate) sender: broadcast::Sender, } diff --git a/packages/udp-tracker-server/src/handlers/announce.rs b/packages/udp-tracker-server/src/handlers/announce.rs index 38b42a0b6..9dd7156b1 100644 --- a/packages/udp-tracker-server/src/handlers/announce.rs +++ b/packages/udp-tracker-server/src/handlers/announce.rs @@ -374,8 +374,8 @@ mod tests { core_tracker_services: Arc, core_udp_tracker_services: Arc, ) -> Response { - let (udp_server_stats_event_sender, _udp_server_stats_repository) = crate::statistics::setup::factory(false); - let udp_server_stats_event_sender = Arc::new(udp_server_stats_event_sender); + let keeper = crate::statistics::setup::factory(false); + let udp_server_stats_event_sender = keeper.sender(); let client_socket_addr = SocketAddr::new(IpAddr::V4(Ipv4Addr::new(126, 0, 0, 1)), 8080); let server_socket_addr = SocketAddr::new(IpAddr::V4(Ipv4Addr::new(203, 0, 113, 196)), 6969); @@ -706,11 +706,11 @@ mod tests { announce_handler: Arc, whitelist_authorization: Arc, ) -> Response { - let keeper = bittorrent_udp_tracker_core::statistics::setup::factory(false); - let udp_core_stats_event_sender = keeper.sender(); + let core_keeper = bittorrent_udp_tracker_core::statistics::setup::factory(false); + let udp_core_stats_event_sender = core_keeper.sender(); - let (udp_server_stats_event_sender, _udp_server_stats_repository) = crate::statistics::setup::factory(false); - let udp_server_stats_event_sender = Arc::new(udp_server_stats_event_sender); + let server_keeper = crate::statistics::setup::factory(false); + let udp_server_stats_event_sender = server_keeper.sender(); let client_ip_v4 = Ipv4Addr::new(126, 0, 0, 1); let client_ip_v6 = client_ip_v4.to_ipv6_compatible(); diff --git a/packages/udp-tracker-server/src/handlers/connect.rs b/packages/udp-tracker-server/src/handlers/connect.rs index 9ea36903c..fb05b3693 100644 --- a/packages/udp-tracker-server/src/handlers/connect.rs +++ b/packages/udp-tracker-server/src/handlers/connect.rs @@ -81,11 +81,11 @@ mod tests { let server_socket_addr = SocketAddr::new(IpAddr::V4(Ipv4Addr::new(203, 0, 113, 196)), 6969); let server_service_binding = ServiceBinding::new(Protocol::UDP, server_socket_addr).unwrap(); - let keeper = bittorrent_udp_tracker_core::statistics::setup::factory(false); - let udp_core_stats_event_sender = keeper.sender(); + let core_keeper = bittorrent_udp_tracker_core::statistics::setup::factory(false); + let udp_core_stats_event_sender = core_keeper.sender(); - let (udp_server_stats_event_sender, _udp_server_stats_repository) = crate::statistics::setup::factory(false); - let udp_server_stats_event_sender = Arc::new(udp_server_stats_event_sender); + let server_keeper = crate::statistics::setup::factory(false); + let udp_server_stats_event_sender = server_keeper.sender(); let request = ConnectRequest { transaction_id: TransactionId(0i32.into()), @@ -117,11 +117,11 @@ mod tests { let server_socket_addr = SocketAddr::new(IpAddr::V4(Ipv4Addr::new(203, 0, 113, 196)), 6969); let server_service_binding = ServiceBinding::new(Protocol::UDP, server_socket_addr).unwrap(); - let keeper = bittorrent_udp_tracker_core::statistics::setup::factory(false); - let udp_core_stats_event_sender = keeper.sender(); + let core_keeper = bittorrent_udp_tracker_core::statistics::setup::factory(false); + let udp_core_stats_event_sender = core_keeper.sender(); - let (udp_server_stats_event_sender, _udp_server_stats_repository) = crate::statistics::setup::factory(false); - let udp_server_stats_event_sender = Arc::new(udp_server_stats_event_sender); + let server_keeper = crate::statistics::setup::factory(false); + let udp_server_stats_event_sender = server_keeper.sender(); let request = ConnectRequest { transaction_id: TransactionId(0i32.into()), @@ -153,11 +153,11 @@ mod tests { let server_socket_addr = SocketAddr::new(IpAddr::V4(Ipv4Addr::new(203, 0, 113, 196)), 6969); let server_service_binding = ServiceBinding::new(Protocol::UDP, server_socket_addr).unwrap(); - let keeper = bittorrent_udp_tracker_core::statistics::setup::factory(false); - let udp_core_stats_event_sender = keeper.sender(); + let core_keeper = bittorrent_udp_tracker_core::statistics::setup::factory(false); + let udp_core_stats_event_sender = core_keeper.sender(); - let (udp_server_stats_event_sender, _udp_server_stats_repository) = crate::statistics::setup::factory(false); - let udp_server_stats_event_sender = Arc::new(udp_server_stats_event_sender); + let server_keeper = crate::statistics::setup::factory(false); + let udp_server_stats_event_sender = server_keeper.sender(); let request = ConnectRequest { transaction_id: TransactionId(0i32.into()), diff --git a/packages/udp-tracker-server/src/handlers/mod.rs b/packages/udp-tracker-server/src/handlers/mod.rs index bc39f63ae..0ad593bb2 100644 --- a/packages/udp-tracker-server/src/handlers/mod.rs +++ b/packages/udp-tracker-server/src/handlers/mod.rs @@ -284,11 +284,11 @@ pub(crate) mod tests { )); let scrape_handler = Arc::new(ScrapeHandler::new(&whitelist_authorization, &in_memory_torrent_repository)); - let keeper = bittorrent_udp_tracker_core::statistics::setup::factory(false); - let udp_core_stats_event_sender = keeper.sender(); + let core_keeper = bittorrent_udp_tracker_core::statistics::setup::factory(false); + let udp_core_stats_event_sender = core_keeper.sender(); - let (udp_server_stats_event_sender, _udp_server_stats_repository) = crate::statistics::setup::factory(false); - let udp_server_stats_event_sender = Arc::new(udp_server_stats_event_sender); + let server_keeper = crate::statistics::setup::factory(false); + let udp_server_stats_event_sender = server_keeper.sender(); let announce_service = Arc::new(AnnounceService::new( announce_handler.clone(), diff --git a/packages/udp-tracker-server/src/handlers/scrape.rs b/packages/udp-tracker-server/src/handlers/scrape.rs index 35b5ee65c..cef896d73 100644 --- a/packages/udp-tracker-server/src/handlers/scrape.rs +++ b/packages/udp-tracker-server/src/handlers/scrape.rs @@ -178,8 +178,8 @@ mod tests { core_tracker_services: Arc, core_udp_tracker_services: Arc, ) -> Response { - let (udp_server_stats_event_sender, _udp_server_stats_repository) = crate::statistics::setup::factory(false); - let udp_server_stats_event_sender = Arc::new(udp_server_stats_event_sender); + let keeper = crate::statistics::setup::factory(false); + let udp_server_stats_event_sender = keeper.sender(); let client_socket_addr = sample_ipv4_remote_addr(); let server_socket_addr = SocketAddr::new(IpAddr::V4(Ipv4Addr::new(203, 0, 113, 196)), 6969); diff --git a/packages/udp-tracker-server/src/statistics/event/listener.rs b/packages/udp-tracker-server/src/statistics/event/listener.rs index cf348ea17..80c9f8d21 100644 --- a/packages/udp-tracker-server/src/statistics/event/listener.rs +++ b/packages/udp-tracker-server/src/statistics/event/listener.rs @@ -1,3 +1,5 @@ +use std::sync::Arc; + use bittorrent_udp_tracker_core::UDP_TRACKER_LOG_TARGET; use tokio::sync::broadcast; use torrust_tracker_clock::clock::Time; @@ -7,7 +9,7 @@ use crate::event::Event; use crate::statistics::repository::Repository; use crate::CurrentClock; -pub async fn dispatch_events(mut receiver: broadcast::Receiver, stats_repository: Repository) { +pub async fn dispatch_events(mut receiver: broadcast::Receiver, stats_repository: Arc) { loop { match receiver.recv().await { Ok(event) => handle_event(event, &stats_repository, CurrentClock::now()).await, diff --git a/packages/udp-tracker-server/src/statistics/keeper.rs b/packages/udp-tracker-server/src/statistics/keeper.rs index c200b4cdf..1d525e7b3 100644 --- a/packages/udp-tracker-server/src/statistics/keeper.rs +++ b/packages/udp-tracker-server/src/statistics/keeper.rs @@ -1,56 +1,84 @@ +use std::sync::Arc; + use bittorrent_udp_tracker_core::UDP_TRACKER_LOG_TARGET; -use tokio::sync::broadcast::Receiver; +use tokio::task::JoinHandle; use super::event::listener::dispatch_events; use super::repository::Repository; -use crate::event::Event; +use crate::event::sender::{self, Broadcaster}; /// The service responsible for keeping tracker metrics (listening to statistics events and handle them). /// /// It actively listen to new statistics events. When it receives a new event /// it accordingly increases the counters. pub struct Keeper { - pub repository: Repository, + pub enable_sender: bool, + pub broadcaster: Broadcaster, + pub repository: Arc, } impl Default for Keeper { fn default() -> Self { - Self::new() + let enable_sender = true; + let broadcaster = Broadcaster::default(); + let repository = Arc::new(Repository::new()); + + Self::new(enable_sender, broadcaster, repository) } } impl Keeper { + /// Creates a new instance of [`Keeper`]. #[must_use] - pub fn new() -> Self { + pub fn new(enable_sender: bool, broadcaster: Broadcaster, repository: Arc) -> Self { Self { - repository: Repository::new(), + enable_sender, + broadcaster, + repository, + } + } + + #[must_use] + pub fn sender(&self) -> Arc>> { + if self.enable_sender { + Arc::new(Some(Box::new(self.broadcaster.clone()))) + } else { + Arc::new(None) } } - pub fn run_event_listener(&mut self, receiver: Receiver) { + #[must_use] + pub fn repository(&self) -> Arc { + self.repository.clone() + } + + #[must_use] + pub fn run_event_listener(&self) -> JoinHandle<()> { let stats_repository = self.repository.clone(); + let receiver = self.broadcaster.subscribe(); - tracing::info!(target: UDP_TRACKER_LOG_TARGET, "Starting UDP tracker server event listener"); + tracing::info!(target: UDP_TRACKER_LOG_TARGET, "Starting HTTP tracker core event listener"); tokio::spawn(async move { dispatch_events(receiver, stats_repository).await; - tracing::info!(target: UDP_TRACKER_LOG_TARGET, "UDP tracker core server listener finished"); - }); + tracing::info!(target: UDP_TRACKER_LOG_TARGET, "HTTP tracker core event listener finished"); + }) } } #[cfg(test)] mod tests { + use crate::statistics::keeper::Keeper; use crate::statistics::metrics::Metrics; #[tokio::test] async fn should_contain_the_tracker_statistics() { - let stats_tracker = Keeper::new(); + let stats_tracker = Keeper::default(); let stats = stats_tracker.repository.get_stats().await; - assert_eq!(stats.udp4_requests, Metrics::default().udp4_requests); + assert_eq!(stats.udp4_announces_handled, Metrics::default().udp4_announces_handled); } } diff --git a/packages/udp-tracker-server/src/statistics/services.rs b/packages/udp-tracker-server/src/statistics/services.rs index b84bf4cd0..22f3f4754 100644 --- a/packages/udp-tracker-server/src/statistics/services.rs +++ b/packages/udp-tracker-server/src/statistics/services.rs @@ -127,9 +127,8 @@ mod tests { let in_memory_torrent_repository = Arc::new(InMemoryTorrentRepository::default()); let ban_service = Arc::new(RwLock::new(BanService::new(MAX_CONNECTION_ID_ERRORS_PER_IP))); - let (_udp_server_stats_event_sender, udp_server_stats_repository) = - statistics::setup::factory(config.core.tracker_usage_statistics); - let udp_server_stats_repository = Arc::new(udp_server_stats_repository); + let keeper = statistics::setup::factory(config.core.tracker_usage_statistics); + let udp_server_stats_repository = keeper.repository(); let tracker_metrics = get_metrics( in_memory_torrent_repository.clone(), diff --git a/packages/udp-tracker-server/src/statistics/setup.rs b/packages/udp-tracker-server/src/statistics/setup.rs index d8cc7bca9..09f077507 100644 --- a/packages/udp-tracker-server/src/statistics/setup.rs +++ b/packages/udp-tracker-server/src/statistics/setup.rs @@ -1,37 +1,22 @@ //! Setup for the tracker statistics. //! -//! The [`factory`] function builds the structs needed for handling the tracker -//! metrics. -use crate::event::sender::Broadcaster; -use crate::{event, statistics}; - -/// It builds the structs needed for handling the tracker metrics. -/// -/// It returns: -/// -/// - An event [`Sender`](crate::event::sender::Sender) that allows you to send -/// events related to statistics. -/// - An statistics [`Repository`](crate::statistics::repository::Repository) -/// which is an in-memory repository for the tracker metrics. -/// -/// When the input argument `tracker_usage_statistics`is false the setup does -/// not run the event listeners, consequently the statistics events are sent are -/// received but not dispatched to the handler. -#[must_use] -pub fn factory(tracker_usage_statistics: bool) -> (Option>, statistics::repository::Repository) { - let mut keeper = statistics::keeper::Keeper::new(); +//! The [`factory`] function builds the structs needed for handling the tracker metrics. +use std::sync::Arc; - let opt_event_sender: Option> = if tracker_usage_statistics { - let broadcaster = Broadcaster::default(); - - keeper.run_event_listener(broadcaster.subscribe()); +use super::keeper::Keeper; +use super::repository::Repository; +use crate::event::sender::Broadcaster; - Some(Box::new(broadcaster)) - } else { - None - }; +#[must_use] +pub fn factory(tracker_usage_statistics: bool) -> Arc { + keeper_factory(tracker_usage_statistics) +} - (opt_event_sender, keeper.repository) +#[must_use] +pub fn keeper_factory(tracker_usage_statistics: bool) -> Arc { + let broadcaster = Broadcaster::default(); + let repository = Arc::new(Repository::new()); + Arc::new(Keeper::new(tracker_usage_statistics, broadcaster.clone(), repository.clone())) } #[cfg(test)] @@ -42,17 +27,27 @@ mod test { async fn should_not_send_any_event_when_statistics_are_disabled() { let tracker_usage_statistics = false; - let (stats_event_sender, _stats_repository) = factory(tracker_usage_statistics); + // HTTP core stats + let http_stats_keeper = factory(tracker_usage_statistics); + let http_stats_event_sender = http_stats_keeper.sender(); + let _http_stats_repository = http_stats_keeper.repository(); + + if tracker_usage_statistics { + let _unused = http_stats_keeper.run_event_listener(); + } - assert!(stats_event_sender.is_none()); + assert!(http_stats_event_sender.is_none()); } #[tokio::test] async fn should_send_events_when_statistics_are_enabled() { let tracker_usage_statistics = true; - let (stats_event_sender, _stats_repository) = factory(tracker_usage_statistics); + // HTTP core stats + let http_stats_keeper = factory(tracker_usage_statistics); + let http_stats_event_sender = http_stats_keeper.sender(); + let _http_stats_repository = http_stats_keeper.repository(); - assert!(stats_event_sender.is_some()); + assert!(http_stats_event_sender.is_some()); } } diff --git a/src/app.rs b/src/app.rs index a0f63094b..67380d30d 100644 --- a/src/app.rs +++ b/src/app.rs @@ -68,6 +68,7 @@ async fn start_jobs(config: &Configuration, app_container: &Arc) - start_http_core_event_listener(config, app_container); start_udp_core_event_listener(config, app_container); + start_udp_server_event_listener(config, app_container); start_the_udp_instances(config, app_container, &mut jobs).await; start_the_http_instances(config, app_container, &mut jobs).await; start_the_http_api(config, app_container, &mut jobs).await; @@ -145,6 +146,26 @@ fn start_udp_core_event_listener(config: &Configuration, app_container: &Arc) { + if config.core.tracker_usage_statistics { + let _job = app_container + .udp_tracker_server_container + .udp_server_stats_keeper + .run_event_listener(); + + // todo: this cannot be enabled otherwise the application never ends + // because the event listener never stops. You see this console message + // forever: + // + // !! shuting down in 90 seconds !! + // 2025-04-24T15:27:45.454101Z INFO graceful_shutdown: torrust_axum_server::signals: remaining alive connections: 0 + // + // Depends on: https://github.com/torrust/torrust-tracker/issues/1405 + + //jobs.push(job); + } +} + async fn start_the_udp_instances(config: &Configuration, app_container: &Arc, jobs: &mut Vec>) { if let Some(udp_trackers) = &config.udp_trackers { for udp_tracker_config in udp_trackers { From 6d50a784239945299c63116fe5434da92fd0dd6e Mon Sep 17 00:00:00 2001 From: Jose Celano Date: Fri, 25 Apr 2025 13:52:55 +0100 Subject: [PATCH 497/802] refactor: normalize container field names --- packages/http-tracker-core/src/container.rs | 30 +++++++------- .../rest-tracker-api-core/src/container.rs | 4 +- packages/udp-tracker-core/src/container.rs | 40 +++++++++---------- packages/udp-tracker-server/src/container.rs | 24 +++++------ .../udp-tracker-server/src/environment.rs | 7 +--- .../udp-tracker-server/src/handlers/mod.rs | 10 ++--- .../udp-tracker-server/src/server/launcher.rs | 11 ++--- .../src/server/processor.rs | 2 +- .../tests/server/contract.rs | 4 +- src/app.rs | 10 +---- src/container.rs | 8 ++-- 11 files changed, 67 insertions(+), 83 deletions(-) diff --git a/packages/http-tracker-core/src/container.rs b/packages/http-tracker-core/src/container.rs index e685dd521..496856494 100644 --- a/packages/http-tracker-core/src/container.rs +++ b/packages/http-tracker-core/src/container.rs @@ -45,21 +45,21 @@ impl HttpTrackerCoreContainer { Arc::new(Self { tracker_core_container: tracker_core_container.clone(), http_tracker_config: http_tracker_config.clone(), - stats_keeper: http_tracker_core_services.http_stats_keeper.clone(), - stats_event_sender: http_tracker_core_services.http_stats_event_sender.clone(), - stats_repository: http_tracker_core_services.http_stats_repository.clone(), - announce_service: http_tracker_core_services.http_announce_service.clone(), - scrape_service: http_tracker_core_services.http_scrape_service.clone(), + stats_keeper: http_tracker_core_services.stats_keeper.clone(), + stats_event_sender: http_tracker_core_services.stats_event_sender.clone(), + stats_repository: http_tracker_core_services.stats_repository.clone(), + announce_service: http_tracker_core_services.announce_service.clone(), + scrape_service: http_tracker_core_services.scrape_service.clone(), }) } } pub struct HttpTrackerCoreServices { - pub http_stats_keeper: Arc, - pub http_stats_event_sender: Arc>>, - pub http_stats_repository: Arc, - pub http_announce_service: Arc, - pub http_scrape_service: Arc, + pub stats_keeper: Arc, + pub stats_event_sender: Arc>>, + pub stats_repository: Arc, + pub announce_service: Arc, + pub scrape_service: Arc, } impl HttpTrackerCoreServices { @@ -86,11 +86,11 @@ impl HttpTrackerCoreServices { )); Arc::new(Self { - http_stats_keeper, - http_stats_event_sender, - http_stats_repository, - http_announce_service, - http_scrape_service, + stats_keeper: http_stats_keeper, + stats_event_sender: http_stats_event_sender, + stats_repository: http_stats_repository, + announce_service: http_announce_service, + scrape_service: http_scrape_service, }) } } diff --git a/packages/rest-tracker-api-core/src/container.rs b/packages/rest-tracker-api-core/src/container.rs index 4451eb2c4..ec3786dfb 100644 --- a/packages/rest-tracker-api-core/src/container.rs +++ b/packages/rest-tracker-api-core/src/container.rs @@ -56,9 +56,9 @@ impl TrackerHttpApiCoreContainer { http_stats_repository: http_tracker_core_container.stats_repository.clone(), ban_service: udp_tracker_core_container.ban_service.clone(), - udp_core_stats_repository: udp_tracker_core_container.udp_core_stats_repository.clone(), + udp_core_stats_repository: udp_tracker_core_container.stats_repository.clone(), - udp_server_stats_repository: udp_tracker_server_container.udp_server_stats_repository.clone(), + udp_server_stats_repository: udp_tracker_server_container.stats_repository.clone(), http_api_config: http_api_config.clone(), }) diff --git a/packages/udp-tracker-core/src/container.rs b/packages/udp-tracker-core/src/container.rs index 0a1bf54d4..ef66e9b7e 100644 --- a/packages/udp-tracker-core/src/container.rs +++ b/packages/udp-tracker-core/src/container.rs @@ -17,8 +17,8 @@ pub struct UdpTrackerCoreContainer { // `UdpTrackerCoreServices` pub stats_keeper: Arc, - pub udp_core_stats_event_sender: Arc>>, - pub udp_core_stats_repository: Arc, + pub stats_event_sender: Arc>>, + pub stats_repository: Arc, pub ban_service: Arc>, pub connect_service: Arc, pub announce_service: Arc, @@ -54,24 +54,24 @@ impl UdpTrackerCoreContainer { // `UdpTrackerCoreServices` stats_keeper: udp_tracker_core_services.stats_keeper.clone(), - udp_core_stats_event_sender: udp_tracker_core_services.udp_core_stats_event_sender.clone(), - udp_core_stats_repository: udp_tracker_core_services.udp_core_stats_repository.clone(), - ban_service: udp_tracker_core_services.udp_ban_service.clone(), - connect_service: udp_tracker_core_services.udp_connect_service.clone(), - announce_service: udp_tracker_core_services.udp_announce_service.clone(), - scrape_service: udp_tracker_core_services.udp_scrape_service.clone(), + stats_event_sender: udp_tracker_core_services.stats_event_sender.clone(), + stats_repository: udp_tracker_core_services.stats_repository.clone(), + ban_service: udp_tracker_core_services.ban_service.clone(), + connect_service: udp_tracker_core_services.connect_service.clone(), + announce_service: udp_tracker_core_services.announce_service.clone(), + scrape_service: udp_tracker_core_services.scrape_service.clone(), }) } } pub struct UdpTrackerCoreServices { pub stats_keeper: Arc, - pub udp_core_stats_event_sender: Arc>>, - pub udp_core_stats_repository: Arc, - pub udp_ban_service: Arc>, - pub udp_connect_service: Arc, - pub udp_announce_service: Arc, - pub udp_scrape_service: Arc, + pub stats_event_sender: Arc>>, + pub stats_repository: Arc, + pub ban_service: Arc>, + pub connect_service: Arc, + pub announce_service: Arc, + pub scrape_service: Arc, } impl UdpTrackerCoreServices { @@ -94,12 +94,12 @@ impl UdpTrackerCoreServices { Arc::new(Self { stats_keeper: keeper, - udp_core_stats_event_sender, - udp_core_stats_repository, - udp_ban_service: ban_service, - udp_connect_service: connect_service, - udp_announce_service: announce_service, - udp_scrape_service: scrape_service, + stats_event_sender: udp_core_stats_event_sender, + stats_repository: udp_core_stats_repository, + ban_service, + connect_service, + announce_service, + scrape_service, }) } } diff --git a/packages/udp-tracker-server/src/container.rs b/packages/udp-tracker-server/src/container.rs index 89740cf77..64d01e754 100644 --- a/packages/udp-tracker-server/src/container.rs +++ b/packages/udp-tracker-server/src/container.rs @@ -5,9 +5,9 @@ use torrust_tracker_configuration::Core; use crate::{event, statistics}; pub struct UdpTrackerServerContainer { - pub udp_server_stats_keeper: Arc, - pub udp_server_stats_event_sender: Arc>>, - pub udp_server_stats_repository: Arc, + pub stats_keeper: Arc, + pub stats_event_sender: Arc>>, + pub stats_repository: Arc, } impl UdpTrackerServerContainer { @@ -16,17 +16,17 @@ impl UdpTrackerServerContainer { let udp_tracker_server_services = UdpTrackerServerServices::initialize(core_config); Arc::new(Self { - udp_server_stats_keeper: udp_tracker_server_services.udp_server_stats_keeper.clone(), - udp_server_stats_event_sender: udp_tracker_server_services.udp_server_stats_event_sender.clone(), - udp_server_stats_repository: udp_tracker_server_services.udp_server_stats_repository.clone(), + stats_keeper: udp_tracker_server_services.stats_keeper.clone(), + stats_event_sender: udp_tracker_server_services.stats_event_sender.clone(), + stats_repository: udp_tracker_server_services.stats_repository.clone(), }) } } pub struct UdpTrackerServerServices { - pub udp_server_stats_keeper: Arc, - pub udp_server_stats_event_sender: Arc>>, - pub udp_server_stats_repository: Arc, + pub stats_keeper: Arc, + pub stats_event_sender: Arc>>, + pub stats_repository: Arc, } impl UdpTrackerServerServices { @@ -37,9 +37,9 @@ impl UdpTrackerServerServices { let udp_server_stats_repository = udp_server_stats_keeper.repository(); Arc::new(Self { - udp_server_stats_keeper: udp_server_stats_keeper.clone(), - udp_server_stats_event_sender: udp_server_stats_event_sender.clone(), - udp_server_stats_repository: udp_server_stats_repository.clone(), + stats_keeper: udp_server_stats_keeper.clone(), + stats_event_sender: udp_server_stats_event_sender.clone(), + stats_repository: udp_server_stats_repository.clone(), }) } } diff --git a/packages/udp-tracker-server/src/environment.rs b/packages/udp-tracker-server/src/environment.rs index 2b31e78bd..cda8cd678 100644 --- a/packages/udp-tracker-server/src/environment.rs +++ b/packages/udp-tracker-server/src/environment.rs @@ -75,12 +75,7 @@ impl Environment { let udp_core_event_listener_job = Some(self.container.udp_tracker_core_container.stats_keeper.run_event_listener()); // Start the UDP tracker server event listener - let udp_server_event_listener_job = Some( - self.container - .udp_tracker_server_container - .udp_server_stats_keeper - .run_event_listener(), - ); + let udp_server_event_listener_job = Some(self.container.udp_tracker_server_container.stats_keeper.run_event_listener()); // Start the UDP tracker server let server = self diff --git a/packages/udp-tracker-server/src/handlers/mod.rs b/packages/udp-tracker-server/src/handlers/mod.rs index 0ad593bb2..8ef053684 100644 --- a/packages/udp-tracker-server/src/handlers/mod.rs +++ b/packages/udp-tracker-server/src/handlers/mod.rs @@ -98,7 +98,7 @@ pub(crate) async fn handle_packet( udp_request.from, server_service_binding, request_id, - &udp_tracker_server_container.udp_server_stats_event_sender, + &udp_tracker_server_container.stats_event_sender, cookie_time_values.valid_range.clone(), &error, Some(transaction_id), @@ -114,7 +114,7 @@ pub(crate) async fn handle_packet( udp_request.from, server_service_binding, request_id, - &udp_tracker_server_container.udp_server_stats_event_sender, + &udp_tracker_server_container.stats_event_sender, cookie_time_values.valid_range.clone(), &e, None, @@ -161,7 +161,7 @@ pub async fn handle_request( server_service_binding, &connect_request, &udp_tracker_core_container.connect_service, - &udp_tracker_server_container.udp_server_stats_event_sender, + &udp_tracker_server_container.stats_event_sender, cookie_time_values.issue_time, ) .await, @@ -174,7 +174,7 @@ pub async fn handle_request( server_service_binding, &announce_request, &udp_tracker_core_container.tracker_core_container.core_config, - &udp_tracker_server_container.udp_server_stats_event_sender, + &udp_tracker_server_container.stats_event_sender, cookie_time_values.valid_range, ) .await @@ -189,7 +189,7 @@ pub async fn handle_request( client_socket_addr, server_service_binding, &scrape_request, - &udp_tracker_server_container.udp_server_stats_event_sender, + &udp_tracker_server_container.stats_event_sender, cookie_time_values.valid_range, ) .await diff --git a/packages/udp-tracker-server/src/server/launcher.rs b/packages/udp-tracker-server/src/server/launcher.rs index d62a4d04e..02b9c8d74 100644 --- a/packages/udp-tracker-server/src/server/launcher.rs +++ b/packages/udp-tracker-server/src/server/launcher.rs @@ -182,8 +182,7 @@ impl Launcher { let client_socket_addr = req.from; - if let Some(udp_server_stats_event_sender) = udp_tracker_server_container.udp_server_stats_event_sender.as_deref() - { + if let Some(udp_server_stats_event_sender) = udp_tracker_server_container.stats_event_sender.as_deref() { udp_server_stats_event_sender .send_event(Event::UdpRequestReceived { context: ConnectionContext::new(client_socket_addr, server_service_binding.clone()), @@ -194,9 +193,7 @@ impl Launcher { if udp_tracker_core_container.ban_service.read().await.is_banned(&req.from.ip()) { tracing::debug!(target: UDP_TRACKER_LOG_TARGET, local_addr, "Udp::run_udp_server::loop continue: (banned ip)"); - if let Some(udp_server_stats_event_sender) = - udp_tracker_server_container.udp_server_stats_event_sender.as_deref() - { + if let Some(udp_server_stats_event_sender) = udp_tracker_server_container.stats_event_sender.as_deref() { udp_server_stats_event_sender .send_event(Event::UdpRequestBanned { context: ConnectionContext::new(client_socket_addr, server_service_binding.clone()), @@ -236,9 +233,7 @@ impl Launcher { if old_request_aborted { // Evicted task from active requests buffer was aborted. - if let Some(udp_server_stats_event_sender) = - udp_tracker_server_container.udp_server_stats_event_sender.as_deref() - { + if let Some(udp_server_stats_event_sender) = udp_tracker_server_container.stats_event_sender.as_deref() { udp_server_stats_event_sender .send_event(Event::UdpRequestAborted { context: ConnectionContext::new(client_socket_addr, server_service_binding), diff --git a/packages/udp-tracker-server/src/server/processor.rs b/packages/udp-tracker-server/src/server/processor.rs index 5e98b0361..297919bc3 100644 --- a/packages/udp-tracker-server/src/server/processor.rs +++ b/packages/udp-tracker-server/src/server/processor.rs @@ -115,7 +115,7 @@ impl Processor { } if let Some(udp_server_stats_event_sender) = - self.udp_tracker_server_container.udp_server_stats_event_sender.as_deref() + self.udp_tracker_server_container.stats_event_sender.as_deref() { udp_server_stats_event_sender .send_event(Event::UdpResponseSent { diff --git a/packages/udp-tracker-server/tests/server/contract.rs b/packages/udp-tracker-server/tests/server/contract.rs index 4cb23621d..860fd1f0b 100644 --- a/packages/udp-tracker-server/tests/server/contract.rs +++ b/packages/udp-tracker-server/tests/server/contract.rs @@ -268,7 +268,7 @@ mod receiving_an_announce_request { let udp_requests_banned_before = env .container .udp_tracker_server_container - .udp_server_stats_repository + .stats_repository .get_stats() .await .udp_requests_banned; @@ -284,7 +284,7 @@ mod receiving_an_announce_request { let udp_requests_banned_after = env .container .udp_tracker_server_container - .udp_server_stats_repository + .stats_repository .get_stats() .await .udp_requests_banned; diff --git a/src/app.rs b/src/app.rs index 67380d30d..41d8b67d1 100644 --- a/src/app.rs +++ b/src/app.rs @@ -111,10 +111,7 @@ async fn load_whitelisted_torrents(config: &Configuration, app_container: &Arc) { if config.core.tracker_usage_statistics { - let _job = app_container - .http_tracker_core_services - .http_stats_keeper - .run_event_listener(); + let _job = app_container.http_tracker_core_services.stats_keeper.run_event_listener(); // todo: this cannot be enabled otherwise the application never ends // because the event listener never stops. You see this console message @@ -148,10 +145,7 @@ fn start_udp_core_event_listener(config: &Configuration, app_container: &Arc) { if config.core.tracker_usage_statistics { - let _job = app_container - .udp_tracker_server_container - .udp_server_stats_keeper - .run_event_listener(); + let _job = app_container.udp_tracker_server_container.stats_keeper.run_event_listener(); // todo: this cannot be enabled otherwise the application never ends // because the event listener never stops. You see this console message diff --git a/src/container.rs b/src/container.rs index 537be2605..93f1fb4d7 100644 --- a/src/container.rs +++ b/src/container.rs @@ -130,10 +130,10 @@ impl AppContainer { TrackerHttpApiCoreContainer { tracker_core_container: self.tracker_core_container.clone(), http_api_config: http_api_config.clone(), - ban_service: self.udp_tracker_core_services.udp_ban_service.clone(), - http_stats_repository: self.http_tracker_core_services.http_stats_repository.clone(), - udp_core_stats_repository: self.udp_tracker_core_services.udp_core_stats_repository.clone(), - udp_server_stats_repository: self.udp_tracker_server_container.udp_server_stats_repository.clone(), + ban_service: self.udp_tracker_core_services.ban_service.clone(), + http_stats_repository: self.http_tracker_core_services.stats_repository.clone(), + udp_core_stats_repository: self.udp_tracker_core_services.stats_repository.clone(), + udp_server_stats_repository: self.udp_tracker_server_container.stats_repository.clone(), } .into() } From f25438af244a7601c482e9a78897548927de1bf3 Mon Sep 17 00:00:00 2001 From: Jose Celano Date: Fri, 25 Apr 2025 16:42:16 +0100 Subject: [PATCH 498/802] refactor: [#1478] decouple events from stats in http core keeper --- .../src/environment.rs | 6 ++- .../axum-http-tracker-server/src/server.rs | 6 +-- .../src/v1/handlers/announce.rs | 7 +-- .../src/v1/handlers/scrape.rs | 7 +-- .../http-tracker-core/benches/helpers/util.rs | 6 +-- packages/http-tracker-core/src/container.rs | 4 +- .../src/services/announce.rs | 6 +-- .../http-tracker-core/src/services/scrape.rs | 6 +-- .../src/statistics/event/listener.rs | 18 ++++++- .../src/statistics/keeper.rs | 52 +++---------------- .../src/statistics/services.rs | 7 ++- .../http-tracker-core/src/statistics/setup.rs | 17 +++--- .../src/statistics/services.rs | 8 +-- src/app.rs | 6 ++- 14 files changed, 69 insertions(+), 87 deletions(-) diff --git a/packages/axum-http-tracker-server/src/environment.rs b/packages/axum-http-tracker-server/src/environment.rs index f278ad29f..ffba790c2 100644 --- a/packages/axum-http-tracker-server/src/environment.rs +++ b/packages/axum-http-tracker-server/src/environment.rs @@ -1,6 +1,7 @@ use std::sync::Arc; use bittorrent_http_tracker_core::container::HttpTrackerCoreContainer; +use bittorrent_http_tracker_core::statistics::event::listener::run_event_listener; use bittorrent_primitives::info_hash::InfoHash; use bittorrent_tracker_core::container::TrackerCoreContainer; use futures::executor::block_on; @@ -68,7 +69,10 @@ impl Environment { #[allow(dead_code)] pub async fn start(self) -> Environment { // Start the event listener - let event_listener_job = self.container.http_tracker_core_container.stats_keeper.run_event_listener(); + let event_listener_job = run_event_listener( + self.container.http_tracker_core_container.stats_keeper.receiver(), + &self.container.http_tracker_core_container.stats_repository, + ); // Start the server let server = self diff --git a/packages/axum-http-tracker-server/src/server.rs b/packages/axum-http-tracker-server/src/server.rs index 3d7adfaf2..f15dc4258 100644 --- a/packages/axum-http-tracker-server/src/server.rs +++ b/packages/axum-http-tracker-server/src/server.rs @@ -250,6 +250,7 @@ mod tests { use bittorrent_http_tracker_core::container::HttpTrackerCoreContainer; use bittorrent_http_tracker_core::services::announce::AnnounceService; use bittorrent_http_tracker_core::services::scrape::ScrapeService; + use bittorrent_http_tracker_core::statistics::event::listener::run_event_listener; use bittorrent_tracker_core::container::TrackerCoreContainer; use torrust_axum_server::tsl::make_rust_tls; use torrust_server_lib::registar::Registar; @@ -271,13 +272,12 @@ mod tests { let http_tracker_config = Arc::new(http_tracker_config.clone()); // HTTP core stats - let http_stats_keeper = + let (http_stats_keeper, http_stats_repository) = bittorrent_http_tracker_core::statistics::setup::factory(configuration.core.tracker_usage_statistics); let http_stats_event_sender = http_stats_keeper.sender(); - let http_stats_repository = http_stats_keeper.repository(); if configuration.core.tracker_usage_statistics { - let _unused = http_stats_keeper.run_event_listener(); + let _unused = run_event_listener(http_stats_keeper.receiver(), &http_stats_repository); } let tracker_core_container = Arc::new(TrackerCoreContainer::initialize(&core_config)); diff --git a/packages/axum-http-tracker-server/src/v1/handlers/announce.rs b/packages/axum-http-tracker-server/src/v1/handlers/announce.rs index ddeff3ea4..eb3e21b7e 100644 --- a/packages/axum-http-tracker-server/src/v1/handlers/announce.rs +++ b/packages/axum-http-tracker-server/src/v1/handlers/announce.rs @@ -108,6 +108,7 @@ mod tests { use aquatic_udp_protocol::PeerId; use bittorrent_http_tracker_core::services::announce::AnnounceService; + use bittorrent_http_tracker_core::statistics::event::listener::run_event_listener; use bittorrent_http_tracker_protocol::v1::requests::announce::Announce; use bittorrent_http_tracker_protocol::v1::responses; use bittorrent_http_tracker_protocol::v1::services::peer_ip_resolver::ClientIpSources; @@ -161,12 +162,12 @@ mod tests { )); // HTTP core stats - let http_stats_keeper = bittorrent_http_tracker_core::statistics::setup::factory(config.core.tracker_usage_statistics); + let (http_stats_keeper, http_stats_repository) = + bittorrent_http_tracker_core::statistics::setup::factory(config.core.tracker_usage_statistics); let http_stats_event_sender = http_stats_keeper.sender(); - let _http_stats_repository = http_stats_keeper.repository(); if config.core.tracker_usage_statistics { - let _unused = http_stats_keeper.run_event_listener(); + let _unused = run_event_listener(http_stats_keeper.receiver(), &http_stats_repository); } let announce_service = Arc::new(AnnounceService::new( diff --git a/packages/axum-http-tracker-server/src/v1/handlers/scrape.rs b/packages/axum-http-tracker-server/src/v1/handlers/scrape.rs index 67c75d6ed..0cd36c7ab 100644 --- a/packages/axum-http-tracker-server/src/v1/handlers/scrape.rs +++ b/packages/axum-http-tracker-server/src/v1/handlers/scrape.rs @@ -83,6 +83,7 @@ mod tests { use std::str::FromStr; use std::sync::Arc; + use bittorrent_http_tracker_core::statistics::event::listener::run_event_listener; use bittorrent_http_tracker_protocol::v1::requests::scrape::Scrape; use bittorrent_http_tracker_protocol::v1::responses; use bittorrent_http_tracker_protocol::v1::services::peer_ip_resolver::ClientIpSources; @@ -132,12 +133,12 @@ mod tests { let scrape_handler = Arc::new(ScrapeHandler::new(&whitelist_authorization, &in_memory_torrent_repository)); // HTTP core stats - let http_stats_keeper = bittorrent_http_tracker_core::statistics::setup::factory(config.core.tracker_usage_statistics); + let (http_stats_keeper, http_stats_repository) = + bittorrent_http_tracker_core::statistics::setup::factory(config.core.tracker_usage_statistics); let http_stats_event_sender = http_stats_keeper.sender(); - let _http_stats_repository = http_stats_keeper.repository(); if config.core.tracker_usage_statistics { - let _unused = http_stats_keeper.run_event_listener(); + let _unused = run_event_listener(http_stats_keeper.receiver(), &http_stats_repository); } ( diff --git a/packages/http-tracker-core/benches/helpers/util.rs b/packages/http-tracker-core/benches/helpers/util.rs index 6bfbcffd6..590d55a15 100644 --- a/packages/http-tracker-core/benches/helpers/util.rs +++ b/packages/http-tracker-core/benches/helpers/util.rs @@ -3,6 +3,7 @@ use std::sync::Arc; use aquatic_udp_protocol::{AnnounceEvent, NumberOfBytes, PeerId}; use bittorrent_http_tracker_core::event::Event; +use bittorrent_http_tracker_core::statistics::event::listener::run_event_listener; use bittorrent_http_tracker_core::{event, statistics}; use bittorrent_http_tracker_protocol::v1::requests::announce::Announce; use bittorrent_http_tracker_protocol::v1::services::peer_ip_resolver::ClientIpSources; @@ -56,12 +57,11 @@ pub fn initialize_core_tracker_services_with_config(config: &Configuration) -> ( )); // HTTP core stats - let http_stats_keeper = statistics::setup::factory(config.core.tracker_usage_statistics); + let (http_stats_keeper, http_stats_repository) = statistics::setup::factory(config.core.tracker_usage_statistics); let http_stats_event_sender = http_stats_keeper.sender(); - let _http_stats_repository = http_stats_keeper.repository(); if config.core.tracker_usage_statistics { - let _unused = http_stats_keeper.run_event_listener(); + let _unused = run_event_listener(http_stats_keeper.receiver(), &http_stats_repository); } ( diff --git a/packages/http-tracker-core/src/container.rs b/packages/http-tracker-core/src/container.rs index 496856494..707d2d148 100644 --- a/packages/http-tracker-core/src/container.rs +++ b/packages/http-tracker-core/src/container.rs @@ -66,9 +66,9 @@ impl HttpTrackerCoreServices { #[must_use] pub fn initialize_from(tracker_core_container: &Arc) -> Arc { // HTTP core stats - let http_stats_keeper = statistics::setup::factory(tracker_core_container.core_config.tracker_usage_statistics); + let (http_stats_keeper, http_stats_repository) = + statistics::setup::factory(tracker_core_container.core_config.tracker_usage_statistics); let http_stats_event_sender = http_stats_keeper.sender(); - let http_stats_repository = http_stats_keeper.repository(); let http_announce_service = Arc::new(AnnounceService::new( tracker_core_container.core_config.clone(), diff --git a/packages/http-tracker-core/src/services/announce.rs b/packages/http-tracker-core/src/services/announce.rs index c4c94474f..17a1e5417 100644 --- a/packages/http-tracker-core/src/services/announce.rs +++ b/packages/http-tracker-core/src/services/announce.rs @@ -253,12 +253,11 @@ mod tests { )); // HTTP core stats - let http_stats_keeper = statistics::setup::factory(config.core.tracker_usage_statistics); + let (http_stats_keeper, http_stats_repository) = statistics::setup::factory(config.core.tracker_usage_statistics); let http_stats_event_sender = http_stats_keeper.sender(); - let _http_stats_repository = http_stats_keeper.repository(); if config.core.tracker_usage_statistics { - let _unused = http_stats_keeper.run_event_listener(); + let _unused = run_event_listener(http_stats_keeper.receiver(), &http_stats_repository); } ( @@ -298,6 +297,7 @@ mod tests { use tokio::sync::broadcast::error::SendError; use crate::event::Event; + use crate::statistics::event::listener::run_event_listener; use crate::tests::sample_info_hash; use crate::{event, statistics}; diff --git a/packages/http-tracker-core/src/services/scrape.rs b/packages/http-tracker-core/src/services/scrape.rs index 23f1566b3..13cf68070 100644 --- a/packages/http-tracker-core/src/services/scrape.rs +++ b/packages/http-tracker-core/src/services/scrape.rs @@ -273,9 +273,8 @@ mod tests { let core_config = Arc::new(configuration.core.clone()); // HTTP core stats - let http_stats_keeper = statistics::setup::factory(false); + let (http_stats_keeper, _http_stats_repository) = statistics::setup::factory(false); let http_stats_event_sender = http_stats_keeper.sender(); - let _http_stats_repository = http_stats_keeper.repository(); let container = initialize_services_with_configuration(&configuration); @@ -465,9 +464,8 @@ mod tests { let container = initialize_services_with_configuration(&config); // HTTP core stats - let http_stats_keeper = statistics::setup::factory(false); + let (http_stats_keeper, _http_stats_repository) = statistics::setup::factory(false); let http_stats_event_sender = http_stats_keeper.sender(); - let _http_stats_repository = http_stats_keeper.repository(); let info_hash = sample_info_hash(); let info_hashes = vec![info_hash]; diff --git a/packages/http-tracker-core/src/statistics/event/listener.rs b/packages/http-tracker-core/src/statistics/event/listener.rs index 00fce6b77..98711f2f5 100644 --- a/packages/http-tracker-core/src/statistics/event/listener.rs +++ b/packages/http-tracker-core/src/statistics/event/listener.rs @@ -1,6 +1,7 @@ use std::sync::Arc; -use tokio::sync::broadcast; +use tokio::sync::broadcast::{self, Receiver}; +use tokio::task::JoinHandle; use torrust_tracker_clock::clock::Time; use super::handler::handle_event; @@ -8,7 +9,20 @@ use crate::event::Event; use crate::statistics::repository::Repository; use crate::{CurrentClock, HTTP_TRACKER_LOG_TARGET}; -pub async fn dispatch_events(mut receiver: broadcast::Receiver, stats_repository: Arc) { +#[must_use] +pub fn run_event_listener(receiver: Receiver, repository: &Arc) -> JoinHandle<()> { + let stats_repository = repository.clone(); + + tracing::info!(target: HTTP_TRACKER_LOG_TARGET, "Starting HTTP tracker core event listener"); + + tokio::spawn(async move { + dispatch_events(receiver, stats_repository).await; + + tracing::info!(target: HTTP_TRACKER_LOG_TARGET, "HTTP tracker core event listener finished"); + }) +} + +async fn dispatch_events(mut receiver: broadcast::Receiver, stats_repository: Arc) { loop { match receiver.recv().await { Ok(event) => handle_event(event, &stats_repository, CurrentClock::now()).await, diff --git a/packages/http-tracker-core/src/statistics/keeper.rs b/packages/http-tracker-core/src/statistics/keeper.rs index 4c0f7c916..9ae0564ce 100644 --- a/packages/http-tracker-core/src/statistics/keeper.rs +++ b/packages/http-tracker-core/src/statistics/keeper.rs @@ -1,40 +1,30 @@ use std::sync::Arc; -use tokio::task::JoinHandle; +use tokio::sync::broadcast::Receiver; -use super::event::listener::dispatch_events; -use super::repository::Repository; use crate::event::sender::{self, Broadcaster}; -use crate::HTTP_TRACKER_LOG_TARGET; +use crate::event::Event; -/// The service responsible for keeping tracker metrics (listening to statistics events and handle them). -/// -/// It actively listen to new statistics events. When it receives a new event -/// it accordingly increases the counters. pub struct Keeper { pub enable_sender: bool, pub broadcaster: Broadcaster, - pub repository: Arc, } impl Default for Keeper { fn default() -> Self { let enable_sender = true; let broadcaster = Broadcaster::default(); - let repository = Arc::new(Repository::new()); - Self::new(enable_sender, broadcaster, repository) + Self::new(enable_sender, broadcaster) } } impl Keeper { - /// Creates a new instance of [`Keeper`]. #[must_use] - pub fn new(enable_sender: bool, broadcaster: Broadcaster, repository: Arc) -> Self { + pub fn new(enable_sender: bool, broadcaster: Broadcaster) -> Self { Self { enable_sender, broadcaster, - repository, } } @@ -48,37 +38,7 @@ impl Keeper { } #[must_use] - pub fn repository(&self) -> Arc { - self.repository.clone() - } - - #[must_use] - pub fn run_event_listener(&self) -> JoinHandle<()> { - let stats_repository = self.repository.clone(); - let receiver = self.broadcaster.subscribe(); - - tracing::info!(target: HTTP_TRACKER_LOG_TARGET, "Starting HTTP tracker core event listener"); - - tokio::spawn(async move { - dispatch_events(receiver, stats_repository).await; - - tracing::info!(target: HTTP_TRACKER_LOG_TARGET, "HTTP tracker core event listener finished"); - }) - } -} - -#[cfg(test)] -mod tests { - - use crate::statistics::keeper::Keeper; - use crate::statistics::metrics::Metrics; - - #[tokio::test] - async fn should_contain_the_tracker_statistics() { - let stats_tracker = Keeper::default(); - - let stats = stats_tracker.repository.get_stats().await; - - assert_eq!(stats.tcp4_announces_handled, Metrics::default().tcp4_announces_handled); + pub fn receiver(&self) -> Receiver { + self.broadcaster.subscribe() } } diff --git a/packages/http-tracker-core/src/statistics/services.rs b/packages/http-tracker-core/src/statistics/services.rs index 7e4f03492..58cb57c53 100644 --- a/packages/http-tracker-core/src/statistics/services.rs +++ b/packages/http-tracker-core/src/statistics/services.rs @@ -75,6 +75,7 @@ mod tests { use torrust_tracker_primitives::swarm_metadata::AggregateSwarmMetadata; use torrust_tracker_test_helpers::configuration; + use crate::statistics::event::listener::run_event_listener; use crate::statistics::services::{get_metrics, TrackerMetrics}; use crate::statistics::{self, describe_metrics}; @@ -89,12 +90,10 @@ mod tests { let in_memory_torrent_repository = Arc::new(InMemoryTorrentRepository::default()); // HTTP core stats - let http_stats_keeper = statistics::setup::factory(config.core.tracker_usage_statistics); - let _http_stats_event_sender = http_stats_keeper.sender(); - let http_stats_repository = http_stats_keeper.repository(); + let (http_stats_keeper, http_stats_repository) = statistics::setup::factory(config.core.tracker_usage_statistics); if config.core.tracker_usage_statistics { - let _unused = http_stats_keeper.run_event_listener(); + let _unused = run_event_listener(http_stats_keeper.receiver(), &http_stats_repository); } let tracker_metrics = get_metrics(in_memory_torrent_repository.clone(), http_stats_repository).await; diff --git a/packages/http-tracker-core/src/statistics/setup.rs b/packages/http-tracker-core/src/statistics/setup.rs index 09f077507..f4d7a2827 100644 --- a/packages/http-tracker-core/src/statistics/setup.rs +++ b/packages/http-tracker-core/src/statistics/setup.rs @@ -8,32 +8,34 @@ use super::repository::Repository; use crate::event::sender::Broadcaster; #[must_use] -pub fn factory(tracker_usage_statistics: bool) -> Arc { +pub fn factory(tracker_usage_statistics: bool) -> (Arc, Arc) { keeper_factory(tracker_usage_statistics) } #[must_use] -pub fn keeper_factory(tracker_usage_statistics: bool) -> Arc { +pub fn keeper_factory(tracker_usage_statistics: bool) -> (Arc, Arc) { let broadcaster = Broadcaster::default(); let repository = Arc::new(Repository::new()); - Arc::new(Keeper::new(tracker_usage_statistics, broadcaster.clone(), repository.clone())) + let keeper = Arc::new(Keeper::new(tracker_usage_statistics, broadcaster.clone())); + + (keeper, repository) } #[cfg(test)] mod test { use super::factory; + use crate::statistics::event::listener::run_event_listener; #[tokio::test] async fn should_not_send_any_event_when_statistics_are_disabled() { let tracker_usage_statistics = false; // HTTP core stats - let http_stats_keeper = factory(tracker_usage_statistics); + let (http_stats_keeper, http_stats_repository) = factory(tracker_usage_statistics); let http_stats_event_sender = http_stats_keeper.sender(); - let _http_stats_repository = http_stats_keeper.repository(); if tracker_usage_statistics { - let _unused = http_stats_keeper.run_event_listener(); + let _unused = run_event_listener(http_stats_keeper.receiver(), &http_stats_repository); } assert!(http_stats_event_sender.is_none()); @@ -44,9 +46,8 @@ mod test { let tracker_usage_statistics = true; // HTTP core stats - let http_stats_keeper = factory(tracker_usage_statistics); + let (http_stats_keeper, _http_stats_repository) = factory(tracker_usage_statistics); let http_stats_event_sender = http_stats_keeper.sender(); - let _http_stats_repository = http_stats_keeper.repository(); assert!(http_stats_event_sender.is_some()); } diff --git a/packages/rest-tracker-api-core/src/statistics/services.rs b/packages/rest-tracker-api-core/src/statistics/services.rs index 95e21633a..087807557 100644 --- a/packages/rest-tracker-api-core/src/statistics/services.rs +++ b/packages/rest-tracker-api-core/src/statistics/services.rs @@ -123,6 +123,7 @@ pub async fn get_labeled_metrics( mod tests { use std::sync::Arc; + use bittorrent_http_tracker_core::statistics::event::listener::run_event_listener; use bittorrent_tracker_core::torrent::repository::in_memory::InMemoryTorrentRepository; use bittorrent_tracker_core::{self}; use bittorrent_udp_tracker_core::services::banning::BanService; @@ -147,12 +148,11 @@ mod tests { let ban_service = Arc::new(RwLock::new(BanService::new(MAX_CONNECTION_ID_ERRORS_PER_IP))); // HTTP core stats - let http_stats_keeper = bittorrent_http_tracker_core::statistics::setup::factory(config.core.tracker_usage_statistics); - let _http_stats_event_sender = http_stats_keeper.sender(); - let http_stats_repository = http_stats_keeper.repository(); + let (http_stats_keeper, http_stats_repository) = + bittorrent_http_tracker_core::statistics::setup::factory(config.core.tracker_usage_statistics); if config.core.tracker_usage_statistics { - let _unused = http_stats_keeper.run_event_listener(); + let _unused = run_event_listener(http_stats_keeper.receiver(), &http_stats_repository); } // UDP core stats (not used in this test) diff --git a/src/app.rs b/src/app.rs index 41d8b67d1..ddb60425c 100644 --- a/src/app.rs +++ b/src/app.rs @@ -23,6 +23,7 @@ //! - Tracker REST API: the tracker API can be enabled/disabled. use std::sync::Arc; +use bittorrent_http_tracker_core::statistics::event::listener::run_event_listener; use tokio::task::JoinHandle; use torrust_tracker_configuration::{Configuration, HttpTracker, UdpTracker}; use tracing::instrument; @@ -111,7 +112,10 @@ async fn load_whitelisted_torrents(config: &Configuration, app_container: &Arc) { if config.core.tracker_usage_statistics { - let _job = app_container.http_tracker_core_services.stats_keeper.run_event_listener(); + let _job = run_event_listener( + app_container.http_tracker_core_services.stats_keeper.receiver(), + &app_container.http_tracker_core_services.stats_repository, + ); // todo: this cannot be enabled otherwise the application never ends // because the event listener never stops. You see this console message From f9f13a454edf4767a8bcd6d03f6e374910929509 Mon Sep 17 00:00:00 2001 From: Jose Celano Date: Fri, 25 Apr 2025 17:14:48 +0100 Subject: [PATCH 499/802] refactor: [#1478] decouple events from stats in udp core keeper --- .../udp-tracker-core/benches/helpers/sync.rs | 2 +- packages/udp-tracker-core/src/container.rs | 4 +- .../udp-tracker-core/src/services/connect.rs | 6 +-- .../src/statistics/event/listener.rs | 18 ++++++- .../udp-tracker-core/src/statistics/keeper.rs | 52 +++---------------- .../src/statistics/services.rs | 5 +- .../udp-tracker-core/src/statistics/setup.rs | 21 ++++---- .../udp-tracker-server/src/environment.rs | 6 ++- .../src/handlers/announce.rs | 2 +- .../src/handlers/connect.rs | 6 +-- .../udp-tracker-server/src/handlers/mod.rs | 2 +- src/app.rs | 8 +-- 12 files changed, 56 insertions(+), 76 deletions(-) diff --git a/packages/udp-tracker-core/benches/helpers/sync.rs b/packages/udp-tracker-core/benches/helpers/sync.rs index 926916d61..25d2b55b8 100644 --- a/packages/udp-tracker-core/benches/helpers/sync.rs +++ b/packages/udp-tracker-core/benches/helpers/sync.rs @@ -14,7 +14,7 @@ pub async fn connect_once(samples: u64) -> Duration { let server_socket_addr = SocketAddr::new(IpAddr::V4(Ipv4Addr::new(203, 0, 113, 196)), 6969); let server_service_binding = ServiceBinding::new(Protocol::UDP, server_socket_addr).unwrap(); - let keeper = statistics::setup::factory(false); + let (keeper, _repository) = statistics::setup::factory(false); let udp_core_stats_event_sender = keeper.sender(); let connect_service = Arc::new(ConnectService::new(udp_core_stats_event_sender)); let start = Instant::now(); diff --git a/packages/udp-tracker-core/src/container.rs b/packages/udp-tracker-core/src/container.rs index ef66e9b7e..6fe6d2bdf 100644 --- a/packages/udp-tracker-core/src/container.rs +++ b/packages/udp-tracker-core/src/container.rs @@ -77,9 +77,9 @@ pub struct UdpTrackerCoreServices { impl UdpTrackerCoreServices { #[must_use] pub fn initialize_from(tracker_core_container: &Arc) -> Arc { - let keeper = statistics::setup::factory(tracker_core_container.core_config.tracker_usage_statistics); + let (keeper, udp_core_stats_repository) = + statistics::setup::factory(tracker_core_container.core_config.tracker_usage_statistics); let udp_core_stats_event_sender = keeper.sender(); - let udp_core_stats_repository = keeper.repository(); let ban_service = Arc::new(RwLock::new(BanService::new(MAX_CONNECTION_ID_ERRORS_PER_IP))); let connect_service = Arc::new(ConnectService::new(udp_core_stats_event_sender.clone())); let announce_service = Arc::new(AnnounceService::new( diff --git a/packages/udp-tracker-core/src/services/connect.rs b/packages/udp-tracker-core/src/services/connect.rs index c6c1c098f..1626aa8d4 100644 --- a/packages/udp-tracker-core/src/services/connect.rs +++ b/packages/udp-tracker-core/src/services/connect.rs @@ -78,7 +78,7 @@ mod tests { let server_socket_addr = SocketAddr::new(IpAddr::V4(Ipv4Addr::new(203, 0, 113, 196)), 6969); let server_service_binding = ServiceBinding::new(Protocol::UDP, server_socket_addr).unwrap(); - let keeper = statistics::setup::factory(false); + let (keeper, _repository) = statistics::setup::factory(false); let udp_core_stats_event_sender = keeper.sender(); let connect_service = Arc::new(ConnectService::new(udp_core_stats_event_sender)); @@ -98,7 +98,7 @@ mod tests { let server_socket_addr = SocketAddr::new(IpAddr::V4(Ipv4Addr::new(203, 0, 113, 196)), 6969); let server_service_binding = ServiceBinding::new(Protocol::UDP, server_socket_addr).unwrap(); - let keeper = statistics::setup::factory(false); + let (keeper, _repository) = statistics::setup::factory(false); let udp_core_stats_event_sender = keeper.sender(); let connect_service = Arc::new(ConnectService::new(udp_core_stats_event_sender)); @@ -119,7 +119,7 @@ mod tests { let server_socket_addr = SocketAddr::new(IpAddr::V4(Ipv4Addr::new(203, 0, 113, 196)), 6969); let server_service_binding = ServiceBinding::new(Protocol::UDP, server_socket_addr).unwrap(); - let keeper = statistics::setup::factory(false); + let (keeper, _repository) = statistics::setup::factory(false); let udp_core_stats_event_sender = keeper.sender(); let connect_service = Arc::new(ConnectService::new(udp_core_stats_event_sender)); diff --git a/packages/udp-tracker-core/src/statistics/event/listener.rs b/packages/udp-tracker-core/src/statistics/event/listener.rs index 835283d1e..5aa510d04 100644 --- a/packages/udp-tracker-core/src/statistics/event/listener.rs +++ b/packages/udp-tracker-core/src/statistics/event/listener.rs @@ -1,6 +1,7 @@ use std::sync::Arc; -use tokio::sync::broadcast; +use tokio::sync::broadcast::{self, Receiver}; +use tokio::task::JoinHandle; use torrust_tracker_clock::clock::Time; use super::handler::handle_event; @@ -8,7 +9,20 @@ use crate::event::Event; use crate::statistics::repository::Repository; use crate::{CurrentClock, UDP_TRACKER_LOG_TARGET}; -pub async fn dispatch_events(mut receiver: broadcast::Receiver, stats_repository: Arc) { +#[must_use] +pub fn run_event_listener(receiver: Receiver, repository: &Arc) -> JoinHandle<()> { + let stats_repository = repository.clone(); + + tracing::info!(target: UDP_TRACKER_LOG_TARGET, "Starting UDP tracker core event listener"); + + tokio::spawn(async move { + dispatch_events(receiver, stats_repository).await; + + tracing::info!(target: UDP_TRACKER_LOG_TARGET, "UDP tracker core event listener finished"); + }) +} + +async fn dispatch_events(mut receiver: broadcast::Receiver, stats_repository: Arc) { loop { match receiver.recv().await { Ok(event) => handle_event(event, &stats_repository, CurrentClock::now()).await, diff --git a/packages/udp-tracker-core/src/statistics/keeper.rs b/packages/udp-tracker-core/src/statistics/keeper.rs index 8acecc585..9ae0564ce 100644 --- a/packages/udp-tracker-core/src/statistics/keeper.rs +++ b/packages/udp-tracker-core/src/statistics/keeper.rs @@ -1,40 +1,30 @@ use std::sync::Arc; -use tokio::task::JoinHandle; +use tokio::sync::broadcast::Receiver; -use super::event::listener::dispatch_events; -use super::repository::Repository; use crate::event::sender::{self, Broadcaster}; -use crate::UDP_TRACKER_LOG_TARGET; +use crate::event::Event; -/// The service responsible for keeping tracker metrics (listening to statistics events and handle them). -/// -/// It actively listen to new statistics events. When it receives a new event -/// it accordingly increases the counters. pub struct Keeper { pub enable_sender: bool, pub broadcaster: Broadcaster, - pub repository: Arc, } impl Default for Keeper { fn default() -> Self { let enable_sender = true; let broadcaster = Broadcaster::default(); - let repository = Arc::new(Repository::new()); - Self::new(enable_sender, broadcaster, repository) + Self::new(enable_sender, broadcaster) } } impl Keeper { - /// Creates a new instance of [`Keeper`]. #[must_use] - pub fn new(enable_sender: bool, broadcaster: Broadcaster, repository: Arc) -> Self { + pub fn new(enable_sender: bool, broadcaster: Broadcaster) -> Self { Self { enable_sender, broadcaster, - repository, } } @@ -48,37 +38,7 @@ impl Keeper { } #[must_use] - pub fn repository(&self) -> Arc { - self.repository.clone() - } - - #[must_use] - pub fn run_event_listener(&self) -> JoinHandle<()> { - let stats_repository = self.repository.clone(); - let receiver = self.broadcaster.subscribe(); - - tracing::info!(target: UDP_TRACKER_LOG_TARGET, "Starting HTTP tracker core event listener"); - - tokio::spawn(async move { - dispatch_events(receiver, stats_repository).await; - - tracing::info!(target: UDP_TRACKER_LOG_TARGET, "HTTP tracker core event listener finished"); - }) - } -} - -#[cfg(test)] -mod tests { - - use crate::statistics::keeper::Keeper; - use crate::statistics::metrics::Metrics; - - #[tokio::test] - async fn should_contain_the_tracker_statistics() { - let stats_tracker = Keeper::default(); - - let stats = stats_tracker.repository.get_stats().await; - - assert_eq!(stats.udp4_announces_handled, Metrics::default().udp4_announces_handled); + pub fn receiver(&self) -> Receiver { + self.broadcaster.subscribe() } } diff --git a/packages/udp-tracker-core/src/statistics/services.rs b/packages/udp-tracker-core/src/statistics/services.rs index e1aa66f67..aedd78ecd 100644 --- a/packages/udp-tracker-core/src/statistics/services.rs +++ b/packages/udp-tracker-core/src/statistics/services.rs @@ -106,10 +106,9 @@ mod tests { let in_memory_torrent_repository = Arc::new(InMemoryTorrentRepository::default()); - let keeper = crate::statistics::setup::factory(config.core.tracker_usage_statistics); - let udp_core_stats_repository = keeper.repository(); + let (_keeper, repository) = crate::statistics::setup::factory(config.core.tracker_usage_statistics); - let tracker_metrics = get_metrics(in_memory_torrent_repository.clone(), udp_core_stats_repository.clone()).await; + let tracker_metrics = get_metrics(in_memory_torrent_repository.clone(), repository.clone()).await; assert_eq!( tracker_metrics, diff --git a/packages/udp-tracker-core/src/statistics/setup.rs b/packages/udp-tracker-core/src/statistics/setup.rs index 6466ac58b..8e07719ed 100644 --- a/packages/udp-tracker-core/src/statistics/setup.rs +++ b/packages/udp-tracker-core/src/statistics/setup.rs @@ -8,31 +8,33 @@ use super::repository::Repository; use crate::event::sender::Broadcaster; #[must_use] -pub fn factory(tracker_usage_statistics: bool) -> Arc { +pub fn factory(tracker_usage_statistics: bool) -> (Arc, Arc) { keeper_factory(tracker_usage_statistics) } #[must_use] -pub fn keeper_factory(tracker_usage_statistics: bool) -> Arc { +pub fn keeper_factory(tracker_usage_statistics: bool) -> (Arc, Arc) { let broadcaster = Broadcaster::default(); let repository = Arc::new(Repository::new()); - Arc::new(Keeper::new(tracker_usage_statistics, broadcaster.clone(), repository.clone())) + let keeper = Arc::new(Keeper::new(tracker_usage_statistics, broadcaster.clone())); + + (keeper, repository) } #[cfg(test)] mod test { use super::factory; + use crate::statistics::event::listener::run_event_listener; #[tokio::test] async fn should_not_send_any_event_when_statistics_are_disabled() { let tracker_usage_statistics = false; // UDP core stats - let http_stats_keeper = factory(tracker_usage_statistics); - let http_stats_event_sender = http_stats_keeper.sender(); - let _http_stats_repository = http_stats_keeper.repository(); + let (stats_keeper, stats_repository) = factory(tracker_usage_statistics); + let http_stats_event_sender = stats_keeper.sender(); if tracker_usage_statistics { - let _unused = http_stats_keeper.run_event_listener(); + let _unused = run_event_listener(stats_keeper.receiver(), &stats_repository); } assert!(http_stats_event_sender.is_none()); @@ -43,9 +45,8 @@ mod test { let tracker_usage_statistics = true; // UDP core stats - let http_stats_keeper = factory(tracker_usage_statistics); - let http_stats_event_sender = http_stats_keeper.sender(); - let _http_stats_repository = http_stats_keeper.repository(); + let (stats_keeper, _stats_repository) = factory(tracker_usage_statistics); + let http_stats_event_sender = stats_keeper.sender(); assert!(http_stats_event_sender.is_some()); } diff --git a/packages/udp-tracker-server/src/environment.rs b/packages/udp-tracker-server/src/environment.rs index cda8cd678..2d3347bf9 100644 --- a/packages/udp-tracker-server/src/environment.rs +++ b/packages/udp-tracker-server/src/environment.rs @@ -4,6 +4,7 @@ use std::sync::Arc; use bittorrent_primitives::info_hash::InfoHash; use bittorrent_tracker_core::container::TrackerCoreContainer; use bittorrent_udp_tracker_core::container::UdpTrackerCoreContainer; +use bittorrent_udp_tracker_core::statistics::event::listener::run_event_listener; use tokio::task::JoinHandle; use torrust_server_lib::registar::Registar; use torrust_tracker_configuration::{logging, Configuration, DEFAULT_TIMEOUT}; @@ -72,7 +73,10 @@ impl Environment { pub async fn start(self) -> Environment { let cookie_lifetime = self.container.udp_tracker_core_container.udp_tracker_config.cookie_lifetime; // Start the UDP tracker core event listener - let udp_core_event_listener_job = Some(self.container.udp_tracker_core_container.stats_keeper.run_event_listener()); + let udp_core_event_listener_job = Some(run_event_listener( + self.container.udp_tracker_core_container.stats_keeper.receiver(), + &self.container.udp_tracker_core_container.stats_repository, + )); // Start the UDP tracker server event listener let udp_server_event_listener_job = Some(self.container.udp_tracker_server_container.stats_keeper.run_event_listener()); diff --git a/packages/udp-tracker-server/src/handlers/announce.rs b/packages/udp-tracker-server/src/handlers/announce.rs index 9dd7156b1..d4dc66492 100644 --- a/packages/udp-tracker-server/src/handlers/announce.rs +++ b/packages/udp-tracker-server/src/handlers/announce.rs @@ -706,7 +706,7 @@ mod tests { announce_handler: Arc, whitelist_authorization: Arc, ) -> Response { - let core_keeper = bittorrent_udp_tracker_core::statistics::setup::factory(false); + let (core_keeper, _core_repository) = bittorrent_udp_tracker_core::statistics::setup::factory(false); let udp_core_stats_event_sender = core_keeper.sender(); let server_keeper = crate::statistics::setup::factory(false); diff --git a/packages/udp-tracker-server/src/handlers/connect.rs b/packages/udp-tracker-server/src/handlers/connect.rs index fb05b3693..263d58e17 100644 --- a/packages/udp-tracker-server/src/handlers/connect.rs +++ b/packages/udp-tracker-server/src/handlers/connect.rs @@ -81,7 +81,7 @@ mod tests { let server_socket_addr = SocketAddr::new(IpAddr::V4(Ipv4Addr::new(203, 0, 113, 196)), 6969); let server_service_binding = ServiceBinding::new(Protocol::UDP, server_socket_addr).unwrap(); - let core_keeper = bittorrent_udp_tracker_core::statistics::setup::factory(false); + let (core_keeper, _core_repository) = bittorrent_udp_tracker_core::statistics::setup::factory(false); let udp_core_stats_event_sender = core_keeper.sender(); let server_keeper = crate::statistics::setup::factory(false); @@ -117,7 +117,7 @@ mod tests { let server_socket_addr = SocketAddr::new(IpAddr::V4(Ipv4Addr::new(203, 0, 113, 196)), 6969); let server_service_binding = ServiceBinding::new(Protocol::UDP, server_socket_addr).unwrap(); - let core_keeper = bittorrent_udp_tracker_core::statistics::setup::factory(false); + let (core_keeper, _core_repository) = bittorrent_udp_tracker_core::statistics::setup::factory(false); let udp_core_stats_event_sender = core_keeper.sender(); let server_keeper = crate::statistics::setup::factory(false); @@ -153,7 +153,7 @@ mod tests { let server_socket_addr = SocketAddr::new(IpAddr::V4(Ipv4Addr::new(203, 0, 113, 196)), 6969); let server_service_binding = ServiceBinding::new(Protocol::UDP, server_socket_addr).unwrap(); - let core_keeper = bittorrent_udp_tracker_core::statistics::setup::factory(false); + let (core_keeper, _core_repository) = bittorrent_udp_tracker_core::statistics::setup::factory(false); let udp_core_stats_event_sender = core_keeper.sender(); let server_keeper = crate::statistics::setup::factory(false); diff --git a/packages/udp-tracker-server/src/handlers/mod.rs b/packages/udp-tracker-server/src/handlers/mod.rs index 8ef053684..fdc014825 100644 --- a/packages/udp-tracker-server/src/handlers/mod.rs +++ b/packages/udp-tracker-server/src/handlers/mod.rs @@ -284,7 +284,7 @@ pub(crate) mod tests { )); let scrape_handler = Arc::new(ScrapeHandler::new(&whitelist_authorization, &in_memory_torrent_repository)); - let core_keeper = bittorrent_udp_tracker_core::statistics::setup::factory(false); + let (core_keeper, _core_repository) = bittorrent_udp_tracker_core::statistics::setup::factory(false); let udp_core_stats_event_sender = core_keeper.sender(); let server_keeper = crate::statistics::setup::factory(false); diff --git a/src/app.rs b/src/app.rs index ddb60425c..ba1f28a1c 100644 --- a/src/app.rs +++ b/src/app.rs @@ -23,7 +23,6 @@ //! - Tracker REST API: the tracker API can be enabled/disabled. use std::sync::Arc; -use bittorrent_http_tracker_core::statistics::event::listener::run_event_listener; use tokio::task::JoinHandle; use torrust_tracker_configuration::{Configuration, HttpTracker, UdpTracker}; use tracing::instrument; @@ -112,7 +111,7 @@ async fn load_whitelisted_torrents(config: &Configuration, app_container: &Arc) { if config.core.tracker_usage_statistics { - let _job = run_event_listener( + let _job = bittorrent_http_tracker_core::statistics::event::listener::run_event_listener( app_container.http_tracker_core_services.stats_keeper.receiver(), &app_container.http_tracker_core_services.stats_repository, ); @@ -132,7 +131,10 @@ fn start_http_core_event_listener(config: &Configuration, app_container: &Arc) { if config.core.tracker_usage_statistics { - let _job = app_container.udp_tracker_core_services.stats_keeper.run_event_listener(); + let _job = bittorrent_udp_tracker_core::statistics::event::listener::run_event_listener( + app_container.udp_tracker_core_services.stats_keeper.receiver(), + &app_container.udp_tracker_core_services.stats_repository, + ); // todo: this cannot be enabled otherwise the application never ends // because the event listener never stops. You see this console message From 5f383572c735d36cef11dfe21474de6f35717eb8 Mon Sep 17 00:00:00 2001 From: Jose Celano Date: Fri, 25 Apr 2025 17:36:15 +0100 Subject: [PATCH 500/802] refactor: [#1478] decouple events from stats in udp server keeper --- .../src/statistics/services.rs | 3 +- packages/udp-tracker-server/src/container.rs | 4 +- .../udp-tracker-server/src/environment.rs | 8 +-- .../src/handlers/announce.rs | 4 +- .../src/handlers/connect.rs | 6 +-- .../udp-tracker-server/src/handlers/mod.rs | 2 +- .../udp-tracker-server/src/handlers/scrape.rs | 2 +- .../src/statistics/event/listener.rs | 18 ++++++- .../src/statistics/keeper.rs | 52 +++---------------- .../src/statistics/services.rs | 5 +- .../src/statistics/setup.rs | 25 ++++----- src/app.rs | 5 +- 12 files changed, 56 insertions(+), 78 deletions(-) diff --git a/packages/rest-tracker-api-core/src/statistics/services.rs b/packages/rest-tracker-api-core/src/statistics/services.rs index 087807557..176c045d6 100644 --- a/packages/rest-tracker-api-core/src/statistics/services.rs +++ b/packages/rest-tracker-api-core/src/statistics/services.rs @@ -158,9 +158,8 @@ mod tests { // UDP core stats (not used in this test) // UDP server stats - let udp_server_stats_keeper = + let (_udp_server_stats_keeper, udp_server_stats_repository) = torrust_udp_tracker_server::statistics::setup::factory(config.core.tracker_usage_statistics); - let udp_server_stats_repository = udp_server_stats_keeper.repository(); let tracker_metrics = get_metrics( in_memory_torrent_repository.clone(), diff --git a/packages/udp-tracker-server/src/container.rs b/packages/udp-tracker-server/src/container.rs index 64d01e754..4898cb57d 100644 --- a/packages/udp-tracker-server/src/container.rs +++ b/packages/udp-tracker-server/src/container.rs @@ -32,9 +32,9 @@ pub struct UdpTrackerServerServices { impl UdpTrackerServerServices { #[must_use] pub fn initialize(core_config: &Arc) -> Arc { - let udp_server_stats_keeper = statistics::setup::factory(core_config.tracker_usage_statistics); + let (udp_server_stats_keeper, udp_server_stats_repository) = + statistics::setup::factory(core_config.tracker_usage_statistics); let udp_server_stats_event_sender = udp_server_stats_keeper.sender(); - let udp_server_stats_repository = udp_server_stats_keeper.repository(); Arc::new(Self { stats_keeper: udp_server_stats_keeper.clone(), diff --git a/packages/udp-tracker-server/src/environment.rs b/packages/udp-tracker-server/src/environment.rs index 2d3347bf9..7d70317aa 100644 --- a/packages/udp-tracker-server/src/environment.rs +++ b/packages/udp-tracker-server/src/environment.rs @@ -4,7 +4,6 @@ use std::sync::Arc; use bittorrent_primitives::info_hash::InfoHash; use bittorrent_tracker_core::container::TrackerCoreContainer; use bittorrent_udp_tracker_core::container::UdpTrackerCoreContainer; -use bittorrent_udp_tracker_core::statistics::event::listener::run_event_listener; use tokio::task::JoinHandle; use torrust_server_lib::registar::Registar; use torrust_tracker_configuration::{logging, Configuration, DEFAULT_TIMEOUT}; @@ -73,13 +72,16 @@ impl Environment { pub async fn start(self) -> Environment { let cookie_lifetime = self.container.udp_tracker_core_container.udp_tracker_config.cookie_lifetime; // Start the UDP tracker core event listener - let udp_core_event_listener_job = Some(run_event_listener( + let udp_core_event_listener_job = Some(bittorrent_udp_tracker_core::statistics::event::listener::run_event_listener( self.container.udp_tracker_core_container.stats_keeper.receiver(), &self.container.udp_tracker_core_container.stats_repository, )); // Start the UDP tracker server event listener - let udp_server_event_listener_job = Some(self.container.udp_tracker_server_container.stats_keeper.run_event_listener()); + let udp_server_event_listener_job = Some(crate::statistics::event::listener::run_event_listener( + self.container.udp_tracker_server_container.stats_keeper.receiver(), + &self.container.udp_tracker_server_container.stats_repository, + )); // Start the UDP tracker server let server = self diff --git a/packages/udp-tracker-server/src/handlers/announce.rs b/packages/udp-tracker-server/src/handlers/announce.rs index d4dc66492..f12bf3d13 100644 --- a/packages/udp-tracker-server/src/handlers/announce.rs +++ b/packages/udp-tracker-server/src/handlers/announce.rs @@ -374,7 +374,7 @@ mod tests { core_tracker_services: Arc, core_udp_tracker_services: Arc, ) -> Response { - let keeper = crate::statistics::setup::factory(false); + let (keeper, _repository) = crate::statistics::setup::factory(false); let udp_server_stats_event_sender = keeper.sender(); let client_socket_addr = SocketAddr::new(IpAddr::V4(Ipv4Addr::new(126, 0, 0, 1)), 8080); @@ -709,7 +709,7 @@ mod tests { let (core_keeper, _core_repository) = bittorrent_udp_tracker_core::statistics::setup::factory(false); let udp_core_stats_event_sender = core_keeper.sender(); - let server_keeper = crate::statistics::setup::factory(false); + let (server_keeper, _server_repository) = crate::statistics::setup::factory(false); let udp_server_stats_event_sender = server_keeper.sender(); let client_ip_v4 = Ipv4Addr::new(126, 0, 0, 1); diff --git a/packages/udp-tracker-server/src/handlers/connect.rs b/packages/udp-tracker-server/src/handlers/connect.rs index 263d58e17..264cd426d 100644 --- a/packages/udp-tracker-server/src/handlers/connect.rs +++ b/packages/udp-tracker-server/src/handlers/connect.rs @@ -84,7 +84,7 @@ mod tests { let (core_keeper, _core_repository) = bittorrent_udp_tracker_core::statistics::setup::factory(false); let udp_core_stats_event_sender = core_keeper.sender(); - let server_keeper = crate::statistics::setup::factory(false); + let (server_keeper, _server_repository) = crate::statistics::setup::factory(false); let udp_server_stats_event_sender = server_keeper.sender(); let request = ConnectRequest { @@ -120,7 +120,7 @@ mod tests { let (core_keeper, _core_repository) = bittorrent_udp_tracker_core::statistics::setup::factory(false); let udp_core_stats_event_sender = core_keeper.sender(); - let server_keeper = crate::statistics::setup::factory(false); + let (server_keeper, _server_repository) = crate::statistics::setup::factory(false); let udp_server_stats_event_sender = server_keeper.sender(); let request = ConnectRequest { @@ -156,7 +156,7 @@ mod tests { let (core_keeper, _core_repository) = bittorrent_udp_tracker_core::statistics::setup::factory(false); let udp_core_stats_event_sender = core_keeper.sender(); - let server_keeper = crate::statistics::setup::factory(false); + let (server_keeper, _server_repository) = crate::statistics::setup::factory(false); let udp_server_stats_event_sender = server_keeper.sender(); let request = ConnectRequest { diff --git a/packages/udp-tracker-server/src/handlers/mod.rs b/packages/udp-tracker-server/src/handlers/mod.rs index fdc014825..2905b20a9 100644 --- a/packages/udp-tracker-server/src/handlers/mod.rs +++ b/packages/udp-tracker-server/src/handlers/mod.rs @@ -287,7 +287,7 @@ pub(crate) mod tests { let (core_keeper, _core_repository) = bittorrent_udp_tracker_core::statistics::setup::factory(false); let udp_core_stats_event_sender = core_keeper.sender(); - let server_keeper = crate::statistics::setup::factory(false); + let (server_keeper, _server_repository) = crate::statistics::setup::factory(false); let udp_server_stats_event_sender = server_keeper.sender(); let announce_service = Arc::new(AnnounceService::new( diff --git a/packages/udp-tracker-server/src/handlers/scrape.rs b/packages/udp-tracker-server/src/handlers/scrape.rs index cef896d73..78a01fe6d 100644 --- a/packages/udp-tracker-server/src/handlers/scrape.rs +++ b/packages/udp-tracker-server/src/handlers/scrape.rs @@ -178,7 +178,7 @@ mod tests { core_tracker_services: Arc, core_udp_tracker_services: Arc, ) -> Response { - let keeper = crate::statistics::setup::factory(false); + let (keeper, _repository) = crate::statistics::setup::factory(false); let udp_server_stats_event_sender = keeper.sender(); let client_socket_addr = sample_ipv4_remote_addr(); diff --git a/packages/udp-tracker-server/src/statistics/event/listener.rs b/packages/udp-tracker-server/src/statistics/event/listener.rs index 80c9f8d21..8e8cc5195 100644 --- a/packages/udp-tracker-server/src/statistics/event/listener.rs +++ b/packages/udp-tracker-server/src/statistics/event/listener.rs @@ -1,7 +1,8 @@ use std::sync::Arc; use bittorrent_udp_tracker_core::UDP_TRACKER_LOG_TARGET; -use tokio::sync::broadcast; +use tokio::sync::broadcast::{self, Receiver}; +use tokio::task::JoinHandle; use torrust_tracker_clock::clock::Time; use super::handler::handle_event; @@ -9,7 +10,20 @@ use crate::event::Event; use crate::statistics::repository::Repository; use crate::CurrentClock; -pub async fn dispatch_events(mut receiver: broadcast::Receiver, stats_repository: Arc) { +#[must_use] +pub fn run_event_listener(receiver: Receiver, repository: &Arc) -> JoinHandle<()> { + let stats_repository = repository.clone(); + + tracing::info!(target: UDP_TRACKER_LOG_TARGET, "Starting UDP tracker server event listener"); + + tokio::spawn(async move { + dispatch_events(receiver, stats_repository).await; + + tracing::info!(target: UDP_TRACKER_LOG_TARGET, "DP tracker server event listener finished"); + }) +} + +async fn dispatch_events(mut receiver: broadcast::Receiver, stats_repository: Arc) { loop { match receiver.recv().await { Ok(event) => handle_event(event, &stats_repository, CurrentClock::now()).await, diff --git a/packages/udp-tracker-server/src/statistics/keeper.rs b/packages/udp-tracker-server/src/statistics/keeper.rs index 1d525e7b3..9ae0564ce 100644 --- a/packages/udp-tracker-server/src/statistics/keeper.rs +++ b/packages/udp-tracker-server/src/statistics/keeper.rs @@ -1,40 +1,30 @@ use std::sync::Arc; -use bittorrent_udp_tracker_core::UDP_TRACKER_LOG_TARGET; -use tokio::task::JoinHandle; +use tokio::sync::broadcast::Receiver; -use super::event::listener::dispatch_events; -use super::repository::Repository; use crate::event::sender::{self, Broadcaster}; +use crate::event::Event; -/// The service responsible for keeping tracker metrics (listening to statistics events and handle them). -/// -/// It actively listen to new statistics events. When it receives a new event -/// it accordingly increases the counters. pub struct Keeper { pub enable_sender: bool, pub broadcaster: Broadcaster, - pub repository: Arc, } impl Default for Keeper { fn default() -> Self { let enable_sender = true; let broadcaster = Broadcaster::default(); - let repository = Arc::new(Repository::new()); - Self::new(enable_sender, broadcaster, repository) + Self::new(enable_sender, broadcaster) } } impl Keeper { - /// Creates a new instance of [`Keeper`]. #[must_use] - pub fn new(enable_sender: bool, broadcaster: Broadcaster, repository: Arc) -> Self { + pub fn new(enable_sender: bool, broadcaster: Broadcaster) -> Self { Self { enable_sender, broadcaster, - repository, } } @@ -48,37 +38,7 @@ impl Keeper { } #[must_use] - pub fn repository(&self) -> Arc { - self.repository.clone() - } - - #[must_use] - pub fn run_event_listener(&self) -> JoinHandle<()> { - let stats_repository = self.repository.clone(); - let receiver = self.broadcaster.subscribe(); - - tracing::info!(target: UDP_TRACKER_LOG_TARGET, "Starting HTTP tracker core event listener"); - - tokio::spawn(async move { - dispatch_events(receiver, stats_repository).await; - - tracing::info!(target: UDP_TRACKER_LOG_TARGET, "HTTP tracker core event listener finished"); - }) - } -} - -#[cfg(test)] -mod tests { - - use crate::statistics::keeper::Keeper; - use crate::statistics::metrics::Metrics; - - #[tokio::test] - async fn should_contain_the_tracker_statistics() { - let stats_tracker = Keeper::default(); - - let stats = stats_tracker.repository.get_stats().await; - - assert_eq!(stats.udp4_announces_handled, Metrics::default().udp4_announces_handled); + pub fn receiver(&self) -> Receiver { + self.broadcaster.subscribe() } } diff --git a/packages/udp-tracker-server/src/statistics/services.rs b/packages/udp-tracker-server/src/statistics/services.rs index 22f3f4754..f8c385535 100644 --- a/packages/udp-tracker-server/src/statistics/services.rs +++ b/packages/udp-tracker-server/src/statistics/services.rs @@ -127,13 +127,12 @@ mod tests { let in_memory_torrent_repository = Arc::new(InMemoryTorrentRepository::default()); let ban_service = Arc::new(RwLock::new(BanService::new(MAX_CONNECTION_ID_ERRORS_PER_IP))); - let keeper = statistics::setup::factory(config.core.tracker_usage_statistics); - let udp_server_stats_repository = keeper.repository(); + let (_keeper, stats_repository) = statistics::setup::factory(config.core.tracker_usage_statistics); let tracker_metrics = get_metrics( in_memory_torrent_repository.clone(), ban_service.clone(), - udp_server_stats_repository.clone(), + stats_repository.clone(), ) .await; diff --git a/packages/udp-tracker-server/src/statistics/setup.rs b/packages/udp-tracker-server/src/statistics/setup.rs index 09f077507..b504ae41d 100644 --- a/packages/udp-tracker-server/src/statistics/setup.rs +++ b/packages/udp-tracker-server/src/statistics/setup.rs @@ -8,35 +8,37 @@ use super::repository::Repository; use crate::event::sender::Broadcaster; #[must_use] -pub fn factory(tracker_usage_statistics: bool) -> Arc { +pub fn factory(tracker_usage_statistics: bool) -> (Arc, Arc) { keeper_factory(tracker_usage_statistics) } #[must_use] -pub fn keeper_factory(tracker_usage_statistics: bool) -> Arc { +pub fn keeper_factory(tracker_usage_statistics: bool) -> (Arc, Arc) { let broadcaster = Broadcaster::default(); let repository = Arc::new(Repository::new()); - Arc::new(Keeper::new(tracker_usage_statistics, broadcaster.clone(), repository.clone())) + let keeper = Arc::new(Keeper::new(tracker_usage_statistics, broadcaster.clone())); + + (keeper, repository) } #[cfg(test)] mod test { use super::factory; + use crate::statistics::event::listener::run_event_listener; #[tokio::test] async fn should_not_send_any_event_when_statistics_are_disabled() { let tracker_usage_statistics = false; // HTTP core stats - let http_stats_keeper = factory(tracker_usage_statistics); - let http_stats_event_sender = http_stats_keeper.sender(); - let _http_stats_repository = http_stats_keeper.repository(); + let (stats_keeper, stats_repository) = factory(tracker_usage_statistics); + let stats_event_sender = stats_keeper.sender(); if tracker_usage_statistics { - let _unused = http_stats_keeper.run_event_listener(); + let _unused = run_event_listener(stats_keeper.receiver(), &stats_repository); } - assert!(http_stats_event_sender.is_none()); + assert!(stats_event_sender.is_none()); } #[tokio::test] @@ -44,10 +46,9 @@ mod test { let tracker_usage_statistics = true; // HTTP core stats - let http_stats_keeper = factory(tracker_usage_statistics); - let http_stats_event_sender = http_stats_keeper.sender(); - let _http_stats_repository = http_stats_keeper.repository(); + let (stats_keeper, _stats_repository) = factory(tracker_usage_statistics); + let stats_event_sender = stats_keeper.sender(); - assert!(http_stats_event_sender.is_some()); + assert!(stats_event_sender.is_some()); } } diff --git a/src/app.rs b/src/app.rs index ba1f28a1c..b01dc9c36 100644 --- a/src/app.rs +++ b/src/app.rs @@ -151,7 +151,10 @@ fn start_udp_core_event_listener(config: &Configuration, app_container: &Arc) { if config.core.tracker_usage_statistics { - let _job = app_container.udp_tracker_server_container.stats_keeper.run_event_listener(); + let _job = torrust_udp_tracker_server::statistics::event::listener::run_event_listener( + app_container.udp_tracker_server_container.stats_keeper.receiver(), + &app_container.udp_tracker_server_container.stats_repository, + ); // todo: this cannot be enabled otherwise the application never ends // because the event listener never stops. You see this console message From a055ab9787a5d9b7cf876ff7004c4cf6e3155d33 Mon Sep 17 00:00:00 2001 From: Jose Celano Date: Fri, 25 Apr 2025 17:41:26 +0100 Subject: [PATCH 501/802] refactor: [#1478] inline keeper_factory fn --- packages/http-tracker-core/src/statistics/setup.rs | 5 ----- packages/udp-tracker-core/src/statistics/setup.rs | 6 +----- packages/udp-tracker-server/src/statistics/setup.rs | 5 ----- 3 files changed, 1 insertion(+), 15 deletions(-) diff --git a/packages/http-tracker-core/src/statistics/setup.rs b/packages/http-tracker-core/src/statistics/setup.rs index f4d7a2827..84f48e09b 100644 --- a/packages/http-tracker-core/src/statistics/setup.rs +++ b/packages/http-tracker-core/src/statistics/setup.rs @@ -9,11 +9,6 @@ use crate::event::sender::Broadcaster; #[must_use] pub fn factory(tracker_usage_statistics: bool) -> (Arc, Arc) { - keeper_factory(tracker_usage_statistics) -} - -#[must_use] -pub fn keeper_factory(tracker_usage_statistics: bool) -> (Arc, Arc) { let broadcaster = Broadcaster::default(); let repository = Arc::new(Repository::new()); let keeper = Arc::new(Keeper::new(tracker_usage_statistics, broadcaster.clone())); diff --git a/packages/udp-tracker-core/src/statistics/setup.rs b/packages/udp-tracker-core/src/statistics/setup.rs index 8e07719ed..66d7522dc 100644 --- a/packages/udp-tracker-core/src/statistics/setup.rs +++ b/packages/udp-tracker-core/src/statistics/setup.rs @@ -9,17 +9,13 @@ use crate::event::sender::Broadcaster; #[must_use] pub fn factory(tracker_usage_statistics: bool) -> (Arc, Arc) { - keeper_factory(tracker_usage_statistics) -} - -#[must_use] -pub fn keeper_factory(tracker_usage_statistics: bool) -> (Arc, Arc) { let broadcaster = Broadcaster::default(); let repository = Arc::new(Repository::new()); let keeper = Arc::new(Keeper::new(tracker_usage_statistics, broadcaster.clone())); (keeper, repository) } + #[cfg(test)] mod test { use super::factory; diff --git a/packages/udp-tracker-server/src/statistics/setup.rs b/packages/udp-tracker-server/src/statistics/setup.rs index b504ae41d..2e9881c3d 100644 --- a/packages/udp-tracker-server/src/statistics/setup.rs +++ b/packages/udp-tracker-server/src/statistics/setup.rs @@ -9,11 +9,6 @@ use crate::event::sender::Broadcaster; #[must_use] pub fn factory(tracker_usage_statistics: bool) -> (Arc, Arc) { - keeper_factory(tracker_usage_statistics) -} - -#[must_use] -pub fn keeper_factory(tracker_usage_statistics: bool) -> (Arc, Arc) { let broadcaster = Broadcaster::default(); let repository = Arc::new(Repository::new()); let keeper = Arc::new(Keeper::new(tracker_usage_statistics, broadcaster.clone())); From cc7ead8863d40bdcf52726ff05752b3ba976b1e6 Mon Sep 17 00:00:00 2001 From: Jose Celano Date: Fri, 25 Apr 2025 18:07:10 +0100 Subject: [PATCH 502/802] refactor: [#1478] inline factory fn in http core stats --- .../axum-http-tracker-server/src/server.rs | 12 ++++- .../src/v1/handlers/announce.rs | 12 ++++- .../src/v1/handlers/scrape.rs | 12 ++++- .../http-tracker-core/benches/helpers/util.rs | 13 ++++- packages/http-tracker-core/src/container.rs | 12 ++++- .../src/services/announce.rs | 13 ++++- .../http-tracker-core/src/services/scrape.rs | 16 ++++-- .../http-tracker-core/src/statistics/mod.rs | 1 - .../src/statistics/services.rs | 12 ++++- .../http-tracker-core/src/statistics/setup.rs | 49 ------------------- .../src/statistics/services.rs | 11 ++++- 11 files changed, 93 insertions(+), 70 deletions(-) delete mode 100644 packages/http-tracker-core/src/statistics/setup.rs diff --git a/packages/axum-http-tracker-server/src/server.rs b/packages/axum-http-tracker-server/src/server.rs index f15dc4258..8a3b5325a 100644 --- a/packages/axum-http-tracker-server/src/server.rs +++ b/packages/axum-http-tracker-server/src/server.rs @@ -248,9 +248,12 @@ mod tests { use std::sync::Arc; use bittorrent_http_tracker_core::container::HttpTrackerCoreContainer; + use bittorrent_http_tracker_core::event::sender::Broadcaster; use bittorrent_http_tracker_core::services::announce::AnnounceService; use bittorrent_http_tracker_core::services::scrape::ScrapeService; use bittorrent_http_tracker_core::statistics::event::listener::run_event_listener; + use bittorrent_http_tracker_core::statistics::keeper::Keeper; + use bittorrent_http_tracker_core::statistics::repository::Repository; use bittorrent_tracker_core::container::TrackerCoreContainer; use torrust_axum_server::tsl::make_rust_tls; use torrust_server_lib::registar::Registar; @@ -272,8 +275,13 @@ mod tests { let http_tracker_config = Arc::new(http_tracker_config.clone()); // HTTP core stats - let (http_stats_keeper, http_stats_repository) = - bittorrent_http_tracker_core::statistics::setup::factory(configuration.core.tracker_usage_statistics); + let http_core_broadcaster = Broadcaster::default(); + let http_stats_repository = Arc::new(Repository::new()); + let http_stats_keeper = Arc::new(Keeper::new( + configuration.core.tracker_usage_statistics, + http_core_broadcaster.clone(), + )); + let http_stats_event_sender = http_stats_keeper.sender(); if configuration.core.tracker_usage_statistics { diff --git a/packages/axum-http-tracker-server/src/v1/handlers/announce.rs b/packages/axum-http-tracker-server/src/v1/handlers/announce.rs index eb3e21b7e..98cf34259 100644 --- a/packages/axum-http-tracker-server/src/v1/handlers/announce.rs +++ b/packages/axum-http-tracker-server/src/v1/handlers/announce.rs @@ -107,8 +107,11 @@ mod tests { use std::sync::Arc; use aquatic_udp_protocol::PeerId; + use bittorrent_http_tracker_core::event::sender::Broadcaster; use bittorrent_http_tracker_core::services::announce::AnnounceService; use bittorrent_http_tracker_core::statistics::event::listener::run_event_listener; + use bittorrent_http_tracker_core::statistics::keeper::Keeper; + use bittorrent_http_tracker_core::statistics::repository::Repository; use bittorrent_http_tracker_protocol::v1::requests::announce::Announce; use bittorrent_http_tracker_protocol::v1::responses; use bittorrent_http_tracker_protocol::v1::services::peer_ip_resolver::ClientIpSources; @@ -162,8 +165,13 @@ mod tests { )); // HTTP core stats - let (http_stats_keeper, http_stats_repository) = - bittorrent_http_tracker_core::statistics::setup::factory(config.core.tracker_usage_statistics); + let http_core_broadcaster = Broadcaster::default(); + let http_stats_repository = Arc::new(Repository::new()); + let http_stats_keeper = Arc::new(Keeper::new( + config.core.tracker_usage_statistics, + http_core_broadcaster.clone(), + )); + let http_stats_event_sender = http_stats_keeper.sender(); if config.core.tracker_usage_statistics { diff --git a/packages/axum-http-tracker-server/src/v1/handlers/scrape.rs b/packages/axum-http-tracker-server/src/v1/handlers/scrape.rs index 0cd36c7ab..fc88bbde9 100644 --- a/packages/axum-http-tracker-server/src/v1/handlers/scrape.rs +++ b/packages/axum-http-tracker-server/src/v1/handlers/scrape.rs @@ -83,7 +83,10 @@ mod tests { use std::str::FromStr; use std::sync::Arc; + use bittorrent_http_tracker_core::event::sender::Broadcaster; use bittorrent_http_tracker_core::statistics::event::listener::run_event_listener; + use bittorrent_http_tracker_core::statistics::keeper::Keeper; + use bittorrent_http_tracker_core::statistics::repository::Repository; use bittorrent_http_tracker_protocol::v1::requests::scrape::Scrape; use bittorrent_http_tracker_protocol::v1::responses; use bittorrent_http_tracker_protocol::v1::services::peer_ip_resolver::ClientIpSources; @@ -133,8 +136,13 @@ mod tests { let scrape_handler = Arc::new(ScrapeHandler::new(&whitelist_authorization, &in_memory_torrent_repository)); // HTTP core stats - let (http_stats_keeper, http_stats_repository) = - bittorrent_http_tracker_core::statistics::setup::factory(config.core.tracker_usage_statistics); + let http_core_broadcaster = Broadcaster::default(); + let http_stats_repository = Arc::new(Repository::new()); + let http_stats_keeper = Arc::new(Keeper::new( + config.core.tracker_usage_statistics, + http_core_broadcaster.clone(), + )); + let http_stats_event_sender = http_stats_keeper.sender(); if config.core.tracker_usage_statistics { diff --git a/packages/http-tracker-core/benches/helpers/util.rs b/packages/http-tracker-core/benches/helpers/util.rs index 590d55a15..9c45417d4 100644 --- a/packages/http-tracker-core/benches/helpers/util.rs +++ b/packages/http-tracker-core/benches/helpers/util.rs @@ -2,9 +2,12 @@ use std::net::{IpAddr, Ipv4Addr, SocketAddr}; use std::sync::Arc; use aquatic_udp_protocol::{AnnounceEvent, NumberOfBytes, PeerId}; +use bittorrent_http_tracker_core::event; +use bittorrent_http_tracker_core::event::sender::Broadcaster; use bittorrent_http_tracker_core::event::Event; use bittorrent_http_tracker_core::statistics::event::listener::run_event_listener; -use bittorrent_http_tracker_core::{event, statistics}; +use bittorrent_http_tracker_core::statistics::keeper::Keeper; +use bittorrent_http_tracker_core::statistics::repository::Repository; use bittorrent_http_tracker_protocol::v1::requests::announce::Announce; use bittorrent_http_tracker_protocol::v1::services::peer_ip_resolver::ClientIpSources; use bittorrent_primitives::info_hash::InfoHash; @@ -57,7 +60,13 @@ pub fn initialize_core_tracker_services_with_config(config: &Configuration) -> ( )); // HTTP core stats - let (http_stats_keeper, http_stats_repository) = statistics::setup::factory(config.core.tracker_usage_statistics); + let http_core_broadcaster = Broadcaster::default(); + let http_stats_repository = Arc::new(Repository::new()); + let http_stats_keeper = Arc::new(Keeper::new( + config.core.tracker_usage_statistics, + http_core_broadcaster.clone(), + )); + let http_stats_event_sender = http_stats_keeper.sender(); if config.core.tracker_usage_statistics { diff --git a/packages/http-tracker-core/src/container.rs b/packages/http-tracker-core/src/container.rs index 707d2d148..060e6289a 100644 --- a/packages/http-tracker-core/src/container.rs +++ b/packages/http-tracker-core/src/container.rs @@ -3,8 +3,11 @@ use std::sync::Arc; use bittorrent_tracker_core::container::TrackerCoreContainer; use torrust_tracker_configuration::{Core, HttpTracker}; +use crate::event::sender::Broadcaster; use crate::services::announce::AnnounceService; use crate::services::scrape::ScrapeService; +use crate::statistics::keeper::Keeper; +use crate::statistics::repository::Repository; use crate::{event, services, statistics}; pub struct HttpTrackerCoreContainer { @@ -66,8 +69,13 @@ impl HttpTrackerCoreServices { #[must_use] pub fn initialize_from(tracker_core_container: &Arc) -> Arc { // HTTP core stats - let (http_stats_keeper, http_stats_repository) = - statistics::setup::factory(tracker_core_container.core_config.tracker_usage_statistics); + let http_core_broadcaster = Broadcaster::default(); + let http_stats_repository = Arc::new(Repository::new()); + let http_stats_keeper = Arc::new(Keeper::new( + tracker_core_container.core_config.tracker_usage_statistics, + http_core_broadcaster.clone(), + )); + let http_stats_event_sender = http_stats_keeper.sender(); let http_announce_service = Arc::new(AnnounceService::new( diff --git a/packages/http-tracker-core/src/services/announce.rs b/packages/http-tracker-core/src/services/announce.rs index 17a1e5417..9dc5cc42a 100644 --- a/packages/http-tracker-core/src/services/announce.rs +++ b/packages/http-tracker-core/src/services/announce.rs @@ -253,7 +253,13 @@ mod tests { )); // HTTP core stats - let (http_stats_keeper, http_stats_repository) = statistics::setup::factory(config.core.tracker_usage_statistics); + let http_core_broadcaster = Broadcaster::default(); + let http_stats_repository = Arc::new(Repository::new()); + let http_stats_keeper = Arc::new(Keeper::new( + config.core.tracker_usage_statistics, + http_core_broadcaster.clone(), + )); + let http_stats_event_sender = http_stats_keeper.sender(); if config.core.tracker_usage_statistics { @@ -296,10 +302,13 @@ mod tests { use mockall::mock; use tokio::sync::broadcast::error::SendError; + use crate::event; + use crate::event::sender::Broadcaster; use crate::event::Event; use crate::statistics::event::listener::run_event_listener; + use crate::statistics::keeper::Keeper; + use crate::statistics::repository::Repository; use crate::tests::sample_info_hash; - use crate::{event, statistics}; mock! { HttpStatsEventSender {} diff --git a/packages/http-tracker-core/src/services/scrape.rs b/packages/http-tracker-core/src/services/scrape.rs index 13cf68070..c018f2f0b 100644 --- a/packages/http-tracker-core/src/services/scrape.rs +++ b/packages/http-tracker-core/src/services/scrape.rs @@ -259,13 +259,15 @@ mod tests { use torrust_tracker_primitives::swarm_metadata::SwarmMetadata; use torrust_tracker_test_helpers::configuration; + use crate::event; + use crate::event::sender::Broadcaster; use crate::event::{ConnectionContext, Event}; use crate::services::scrape::tests::{ initialize_services_with_configuration, sample_info_hashes, sample_peer, MockHttpStatsEventSender, }; use crate::services::scrape::ScrapeService; + use crate::statistics::keeper::Keeper; use crate::tests::sample_info_hash; - use crate::{event, statistics}; #[tokio::test] async fn it_should_return_the_scrape_data_for_a_torrent() { @@ -273,7 +275,9 @@ mod tests { let core_config = Arc::new(configuration.core.clone()); // HTTP core stats - let (http_stats_keeper, _http_stats_repository) = statistics::setup::factory(false); + let http_core_broadcaster = Broadcaster::default(); + let http_stats_keeper = Arc::new(Keeper::new(false, http_core_broadcaster.clone())); + let http_stats_event_sender = http_stats_keeper.sender(); let container = initialize_services_with_configuration(&configuration); @@ -448,13 +452,15 @@ mod tests { use torrust_tracker_primitives::service_binding::{Protocol, ServiceBinding}; use torrust_tracker_test_helpers::configuration; + use crate::event; + use crate::event::sender::Broadcaster; use crate::event::{ConnectionContext, Event}; use crate::services::scrape::tests::{ initialize_services_with_configuration, sample_info_hashes, sample_peer, MockHttpStatsEventSender, }; use crate::services::scrape::ScrapeService; + use crate::statistics::keeper::Keeper; use crate::tests::sample_info_hash; - use crate::{event, statistics}; #[tokio::test] async fn it_should_return_the_zeroed_scrape_data_when_the_tracker_is_running_in_private_mode_and_the_peer_is_not_authenticated( @@ -464,7 +470,9 @@ mod tests { let container = initialize_services_with_configuration(&config); // HTTP core stats - let (http_stats_keeper, _http_stats_repository) = statistics::setup::factory(false); + let http_core_broadcaster = Broadcaster::default(); + let http_stats_keeper = Arc::new(Keeper::new(false, http_core_broadcaster.clone())); + let http_stats_event_sender = http_stats_keeper.sender(); let info_hash = sample_info_hash(); diff --git a/packages/http-tracker-core/src/statistics/mod.rs b/packages/http-tracker-core/src/statistics/mod.rs index d7a8da402..e91181953 100644 --- a/packages/http-tracker-core/src/statistics/mod.rs +++ b/packages/http-tracker-core/src/statistics/mod.rs @@ -3,7 +3,6 @@ pub mod keeper; pub mod metrics; pub mod repository; pub mod services; -pub mod setup; use metrics::Metrics; use torrust_tracker_metrics::metric::description::MetricDescription; diff --git a/packages/http-tracker-core/src/statistics/services.rs b/packages/http-tracker-core/src/statistics/services.rs index 58cb57c53..19132e713 100644 --- a/packages/http-tracker-core/src/statistics/services.rs +++ b/packages/http-tracker-core/src/statistics/services.rs @@ -75,9 +75,12 @@ mod tests { use torrust_tracker_primitives::swarm_metadata::AggregateSwarmMetadata; use torrust_tracker_test_helpers::configuration; + use crate::event::sender::Broadcaster; + use crate::statistics::describe_metrics; use crate::statistics::event::listener::run_event_listener; + use crate::statistics::keeper::Keeper; + use crate::statistics::repository::Repository; use crate::statistics::services::{get_metrics, TrackerMetrics}; - use crate::statistics::{self, describe_metrics}; pub fn tracker_configuration() -> Configuration { configuration::ephemeral() @@ -90,7 +93,12 @@ mod tests { let in_memory_torrent_repository = Arc::new(InMemoryTorrentRepository::default()); // HTTP core stats - let (http_stats_keeper, http_stats_repository) = statistics::setup::factory(config.core.tracker_usage_statistics); + let http_core_broadcaster = Broadcaster::default(); + let http_stats_repository = Arc::new(Repository::new()); + let http_stats_keeper = Arc::new(Keeper::new( + config.core.tracker_usage_statistics, + http_core_broadcaster.clone(), + )); if config.core.tracker_usage_statistics { let _unused = run_event_listener(http_stats_keeper.receiver(), &http_stats_repository); diff --git a/packages/http-tracker-core/src/statistics/setup.rs b/packages/http-tracker-core/src/statistics/setup.rs deleted file mode 100644 index 84f48e09b..000000000 --- a/packages/http-tracker-core/src/statistics/setup.rs +++ /dev/null @@ -1,49 +0,0 @@ -//! Setup for the tracker statistics. -//! -//! The [`factory`] function builds the structs needed for handling the tracker metrics. -use std::sync::Arc; - -use super::keeper::Keeper; -use super::repository::Repository; -use crate::event::sender::Broadcaster; - -#[must_use] -pub fn factory(tracker_usage_statistics: bool) -> (Arc, Arc) { - let broadcaster = Broadcaster::default(); - let repository = Arc::new(Repository::new()); - let keeper = Arc::new(Keeper::new(tracker_usage_statistics, broadcaster.clone())); - - (keeper, repository) -} - -#[cfg(test)] -mod test { - use super::factory; - use crate::statistics::event::listener::run_event_listener; - - #[tokio::test] - async fn should_not_send_any_event_when_statistics_are_disabled() { - let tracker_usage_statistics = false; - - // HTTP core stats - let (http_stats_keeper, http_stats_repository) = factory(tracker_usage_statistics); - let http_stats_event_sender = http_stats_keeper.sender(); - - if tracker_usage_statistics { - let _unused = run_event_listener(http_stats_keeper.receiver(), &http_stats_repository); - } - - assert!(http_stats_event_sender.is_none()); - } - - #[tokio::test] - async fn should_send_events_when_statistics_are_enabled() { - let tracker_usage_statistics = true; - - // HTTP core stats - let (http_stats_keeper, _http_stats_repository) = factory(tracker_usage_statistics); - let http_stats_event_sender = http_stats_keeper.sender(); - - assert!(http_stats_event_sender.is_some()); - } -} diff --git a/packages/rest-tracker-api-core/src/statistics/services.rs b/packages/rest-tracker-api-core/src/statistics/services.rs index 176c045d6..552bda627 100644 --- a/packages/rest-tracker-api-core/src/statistics/services.rs +++ b/packages/rest-tracker-api-core/src/statistics/services.rs @@ -123,7 +123,10 @@ pub async fn get_labeled_metrics( mod tests { use std::sync::Arc; + use bittorrent_http_tracker_core::event::sender::Broadcaster; use bittorrent_http_tracker_core::statistics::event::listener::run_event_listener; + use bittorrent_http_tracker_core::statistics::keeper::Keeper; + use bittorrent_http_tracker_core::statistics::repository::Repository; use bittorrent_tracker_core::torrent::repository::in_memory::InMemoryTorrentRepository; use bittorrent_tracker_core::{self}; use bittorrent_udp_tracker_core::services::banning::BanService; @@ -148,8 +151,12 @@ mod tests { let ban_service = Arc::new(RwLock::new(BanService::new(MAX_CONNECTION_ID_ERRORS_PER_IP))); // HTTP core stats - let (http_stats_keeper, http_stats_repository) = - bittorrent_http_tracker_core::statistics::setup::factory(config.core.tracker_usage_statistics); + let http_core_broadcaster = Broadcaster::default(); + let http_stats_repository = Arc::new(Repository::new()); + let http_stats_keeper = Arc::new(Keeper::new( + config.core.tracker_usage_statistics, + http_core_broadcaster.clone(), + )); if config.core.tracker_usage_statistics { let _unused = run_event_listener(http_stats_keeper.receiver(), &http_stats_repository); From 8e8b1dd6189937ad153c7419cf5481a45ab6cb07 Mon Sep 17 00:00:00 2001 From: Jose Celano Date: Fri, 25 Apr 2025 18:20:59 +0100 Subject: [PATCH 503/802] refactor: [#1478] inline factory fn in udp core stats --- .../udp-tracker-core/benches/helpers/sync.rs | 7 ++- packages/udp-tracker-core/src/container.rs | 12 ++++- .../udp-tracker-core/src/services/connect.rs | 13 +++-- .../udp-tracker-core/src/statistics/mod.rs | 1 - .../src/statistics/services.rs | 10 +++- .../udp-tracker-core/src/statistics/setup.rs | 49 ------------------- .../src/handlers/announce.rs | 5 +- .../src/handlers/connect.rs | 12 +++-- .../udp-tracker-server/src/handlers/mod.rs | 7 ++- 9 files changed, 52 insertions(+), 64 deletions(-) delete mode 100644 packages/udp-tracker-core/src/statistics/setup.rs diff --git a/packages/udp-tracker-core/benches/helpers/sync.rs b/packages/udp-tracker-core/benches/helpers/sync.rs index 25d2b55b8..a64bb0bdf 100644 --- a/packages/udp-tracker-core/benches/helpers/sync.rs +++ b/packages/udp-tracker-core/benches/helpers/sync.rs @@ -2,8 +2,9 @@ use std::net::{IpAddr, Ipv4Addr, SocketAddr}; use std::sync::Arc; use std::time::{Duration, Instant}; +use bittorrent_udp_tracker_core::event::sender::Broadcaster; use bittorrent_udp_tracker_core::services::connect::ConnectService; -use bittorrent_udp_tracker_core::statistics; +use bittorrent_udp_tracker_core::statistics::keeper::Keeper; use torrust_tracker_primitives::service_binding::{Protocol, ServiceBinding}; use crate::helpers::utils::{sample_ipv4_remote_addr, sample_issue_time}; @@ -14,7 +15,9 @@ pub async fn connect_once(samples: u64) -> Duration { let server_socket_addr = SocketAddr::new(IpAddr::V4(Ipv4Addr::new(203, 0, 113, 196)), 6969); let server_service_binding = ServiceBinding::new(Protocol::UDP, server_socket_addr).unwrap(); - let (keeper, _repository) = statistics::setup::factory(false); + let udp_core_broadcaster = Broadcaster::default(); + let keeper = Arc::new(Keeper::new(false, udp_core_broadcaster.clone())); + let udp_core_stats_event_sender = keeper.sender(); let connect_service = Arc::new(ConnectService::new(udp_core_stats_event_sender)); let start = Instant::now(); diff --git a/packages/udp-tracker-core/src/container.rs b/packages/udp-tracker-core/src/container.rs index 6fe6d2bdf..34d48a7eb 100644 --- a/packages/udp-tracker-core/src/container.rs +++ b/packages/udp-tracker-core/src/container.rs @@ -4,10 +4,13 @@ use bittorrent_tracker_core::container::TrackerCoreContainer; use tokio::sync::RwLock; use torrust_tracker_configuration::{Core, UdpTracker}; +use crate::event::sender::Broadcaster; use crate::services::announce::AnnounceService; use crate::services::banning::BanService; use crate::services::connect::ConnectService; use crate::services::scrape::ScrapeService; +use crate::statistics::keeper::Keeper; +use crate::statistics::repository::Repository; use crate::{event, services, statistics, MAX_CONNECTION_ID_ERRORS_PER_IP}; pub struct UdpTrackerCoreContainer { @@ -77,8 +80,13 @@ pub struct UdpTrackerCoreServices { impl UdpTrackerCoreServices { #[must_use] pub fn initialize_from(tracker_core_container: &Arc) -> Arc { - let (keeper, udp_core_stats_repository) = - statistics::setup::factory(tracker_core_container.core_config.tracker_usage_statistics); + let udp_core_broadcaster = Broadcaster::default(); + let udp_core_stats_repository = Arc::new(Repository::new()); + let keeper = Arc::new(Keeper::new( + tracker_core_container.core_config.tracker_usage_statistics, + udp_core_broadcaster.clone(), + )); + let udp_core_stats_event_sender = keeper.sender(); let ban_service = Arc::new(RwLock::new(BanService::new(MAX_CONNECTION_ID_ERRORS_PER_IP))); let connect_service = Arc::new(ConnectService::new(udp_core_stats_event_sender.clone())); diff --git a/packages/udp-tracker-core/src/services/connect.rs b/packages/udp-tracker-core/src/services/connect.rs index 1626aa8d4..2073ad943 100644 --- a/packages/udp-tracker-core/src/services/connect.rs +++ b/packages/udp-tracker-core/src/services/connect.rs @@ -65,20 +65,23 @@ mod tests { use torrust_tracker_primitives::service_binding::{Protocol, ServiceBinding}; use crate::connection_cookie::make; + use crate::event; + use crate::event::sender::Broadcaster; use crate::event::{ConnectionContext, Event}; use crate::services::connect::ConnectService; use crate::services::tests::{ sample_ipv4_remote_addr, sample_ipv4_remote_addr_fingerprint, sample_ipv4_socket_address, sample_ipv6_remote_addr, sample_ipv6_remote_addr_fingerprint, sample_issue_time, MockUdpCoreStatsEventSender, }; - use crate::{event, statistics}; + use crate::statistics::keeper::Keeper; #[tokio::test] async fn a_connect_response_should_contain_the_same_transaction_id_as_the_connect_request() { let server_socket_addr = SocketAddr::new(IpAddr::V4(Ipv4Addr::new(203, 0, 113, 196)), 6969); let server_service_binding = ServiceBinding::new(Protocol::UDP, server_socket_addr).unwrap(); - let (keeper, _repository) = statistics::setup::factory(false); + let udp_core_broadcaster = Broadcaster::default(); + let keeper = Arc::new(Keeper::new(false, udp_core_broadcaster.clone())); let udp_core_stats_event_sender = keeper.sender(); let connect_service = Arc::new(ConnectService::new(udp_core_stats_event_sender)); @@ -98,7 +101,8 @@ mod tests { let server_socket_addr = SocketAddr::new(IpAddr::V4(Ipv4Addr::new(203, 0, 113, 196)), 6969); let server_service_binding = ServiceBinding::new(Protocol::UDP, server_socket_addr).unwrap(); - let (keeper, _repository) = statistics::setup::factory(false); + let udp_core_broadcaster = Broadcaster::default(); + let keeper = Arc::new(Keeper::new(false, udp_core_broadcaster.clone())); let udp_core_stats_event_sender = keeper.sender(); let connect_service = Arc::new(ConnectService::new(udp_core_stats_event_sender)); @@ -119,7 +123,8 @@ mod tests { let server_socket_addr = SocketAddr::new(IpAddr::V4(Ipv4Addr::new(203, 0, 113, 196)), 6969); let server_service_binding = ServiceBinding::new(Protocol::UDP, server_socket_addr).unwrap(); - let (keeper, _repository) = statistics::setup::factory(false); + let udp_core_broadcaster = Broadcaster::default(); + let keeper = Arc::new(Keeper::new(false, udp_core_broadcaster.clone())); let udp_core_stats_event_sender = keeper.sender(); let connect_service = Arc::new(ConnectService::new(udp_core_stats_event_sender)); diff --git a/packages/udp-tracker-core/src/statistics/mod.rs b/packages/udp-tracker-core/src/statistics/mod.rs index ec37deae7..ba0b24530 100644 --- a/packages/udp-tracker-core/src/statistics/mod.rs +++ b/packages/udp-tracker-core/src/statistics/mod.rs @@ -3,7 +3,6 @@ pub mod keeper; pub mod metrics; pub mod repository; pub mod services; -pub mod setup; use metrics::Metrics; use torrust_tracker_metrics::metric::description::MetricDescription; diff --git a/packages/udp-tracker-core/src/statistics/services.rs b/packages/udp-tracker-core/src/statistics/services.rs index aedd78ecd..cace8d8ba 100644 --- a/packages/udp-tracker-core/src/statistics/services.rs +++ b/packages/udp-tracker-core/src/statistics/services.rs @@ -93,7 +93,10 @@ mod tests { use torrust_tracker_primitives::swarm_metadata::AggregateSwarmMetadata; use torrust_tracker_test_helpers::configuration; + use crate::event::sender::Broadcaster; use crate::statistics::describe_metrics; + use crate::statistics::keeper::Keeper; + use crate::statistics::repository::Repository; use crate::statistics::services::{get_metrics, TrackerMetrics}; pub fn tracker_configuration() -> Configuration { @@ -106,7 +109,12 @@ mod tests { let in_memory_torrent_repository = Arc::new(InMemoryTorrentRepository::default()); - let (_keeper, repository) = crate::statistics::setup::factory(config.core.tracker_usage_statistics); + let udp_core_broadcaster = Broadcaster::default(); + let repository = Arc::new(Repository::new()); + let _keeper = Arc::new(Keeper::new( + config.core.tracker_usage_statistics, + udp_core_broadcaster.clone(), + )); let tracker_metrics = get_metrics(in_memory_torrent_repository.clone(), repository.clone()).await; diff --git a/packages/udp-tracker-core/src/statistics/setup.rs b/packages/udp-tracker-core/src/statistics/setup.rs deleted file mode 100644 index 66d7522dc..000000000 --- a/packages/udp-tracker-core/src/statistics/setup.rs +++ /dev/null @@ -1,49 +0,0 @@ -//! Setup for the tracker statistics. -//! -//! The [`factory`] function builds the structs needed for handling the tracker metrics. -use std::sync::Arc; - -use super::keeper::Keeper; -use super::repository::Repository; -use crate::event::sender::Broadcaster; - -#[must_use] -pub fn factory(tracker_usage_statistics: bool) -> (Arc, Arc) { - let broadcaster = Broadcaster::default(); - let repository = Arc::new(Repository::new()); - let keeper = Arc::new(Keeper::new(tracker_usage_statistics, broadcaster.clone())); - - (keeper, repository) -} - -#[cfg(test)] -mod test { - use super::factory; - use crate::statistics::event::listener::run_event_listener; - - #[tokio::test] - async fn should_not_send_any_event_when_statistics_are_disabled() { - let tracker_usage_statistics = false; - - // UDP core stats - let (stats_keeper, stats_repository) = factory(tracker_usage_statistics); - let http_stats_event_sender = stats_keeper.sender(); - - if tracker_usage_statistics { - let _unused = run_event_listener(stats_keeper.receiver(), &stats_repository); - } - - assert!(http_stats_event_sender.is_none()); - } - - #[tokio::test] - async fn should_send_events_when_statistics_are_enabled() { - let tracker_usage_statistics = true; - - // UDP core stats - let (stats_keeper, _stats_repository) = factory(tracker_usage_statistics); - let http_stats_event_sender = stats_keeper.sender(); - - assert!(http_stats_event_sender.is_some()); - } -} diff --git a/packages/udp-tracker-server/src/handlers/announce.rs b/packages/udp-tracker-server/src/handlers/announce.rs index f12bf3d13..6345d39ab 100644 --- a/packages/udp-tracker-server/src/handlers/announce.rs +++ b/packages/udp-tracker-server/src/handlers/announce.rs @@ -531,7 +531,9 @@ mod tests { use bittorrent_tracker_core::torrent::repository::in_memory::InMemoryTorrentRepository; use bittorrent_tracker_core::whitelist; use bittorrent_udp_tracker_core::connection_cookie::{gen_remote_fingerprint, make}; + use bittorrent_udp_tracker_core::event::sender::Broadcaster; use bittorrent_udp_tracker_core::services::announce::AnnounceService; + use bittorrent_udp_tracker_core::statistics::keeper::Keeper; use mockall::predicate::eq; use torrust_tracker_configuration::Core; use torrust_tracker_primitives::service_binding::{Protocol, ServiceBinding}; @@ -706,7 +708,8 @@ mod tests { announce_handler: Arc, whitelist_authorization: Arc, ) -> Response { - let (core_keeper, _core_repository) = bittorrent_udp_tracker_core::statistics::setup::factory(false); + let udp_core_broadcaster = Broadcaster::default(); + let core_keeper = Arc::new(Keeper::new(false, udp_core_broadcaster.clone())); let udp_core_stats_event_sender = core_keeper.sender(); let (server_keeper, _server_repository) = crate::statistics::setup::factory(false); diff --git a/packages/udp-tracker-server/src/handlers/connect.rs b/packages/udp-tracker-server/src/handlers/connect.rs index 264cd426d..c31e89cff 100644 --- a/packages/udp-tracker-server/src/handlers/connect.rs +++ b/packages/udp-tracker-server/src/handlers/connect.rs @@ -59,7 +59,9 @@ mod tests { use aquatic_udp_protocol::{ConnectRequest, ConnectResponse, Response, TransactionId}; use bittorrent_udp_tracker_core::connection_cookie::make; use bittorrent_udp_tracker_core::event as core_event; + use bittorrent_udp_tracker_core::event::sender::Broadcaster; use bittorrent_udp_tracker_core::services::connect::ConnectService; + use bittorrent_udp_tracker_core::statistics::keeper::Keeper; use mockall::predicate::eq; use torrust_tracker_primitives::service_binding::{Protocol, ServiceBinding}; @@ -81,7 +83,8 @@ mod tests { let server_socket_addr = SocketAddr::new(IpAddr::V4(Ipv4Addr::new(203, 0, 113, 196)), 6969); let server_service_binding = ServiceBinding::new(Protocol::UDP, server_socket_addr).unwrap(); - let (core_keeper, _core_repository) = bittorrent_udp_tracker_core::statistics::setup::factory(false); + let udp_core_broadcaster = Broadcaster::default(); + let core_keeper = Arc::new(Keeper::new(false, udp_core_broadcaster.clone())); let udp_core_stats_event_sender = core_keeper.sender(); let (server_keeper, _server_repository) = crate::statistics::setup::factory(false); @@ -117,7 +120,8 @@ mod tests { let server_socket_addr = SocketAddr::new(IpAddr::V4(Ipv4Addr::new(203, 0, 113, 196)), 6969); let server_service_binding = ServiceBinding::new(Protocol::UDP, server_socket_addr).unwrap(); - let (core_keeper, _core_repository) = bittorrent_udp_tracker_core::statistics::setup::factory(false); + let udp_core_broadcaster = Broadcaster::default(); + let core_keeper = Arc::new(Keeper::new(false, udp_core_broadcaster.clone())); let udp_core_stats_event_sender = core_keeper.sender(); let (server_keeper, _server_repository) = crate::statistics::setup::factory(false); @@ -153,7 +157,9 @@ mod tests { let server_socket_addr = SocketAddr::new(IpAddr::V4(Ipv4Addr::new(203, 0, 113, 196)), 6969); let server_service_binding = ServiceBinding::new(Protocol::UDP, server_socket_addr).unwrap(); - let (core_keeper, _core_repository) = bittorrent_udp_tracker_core::statistics::setup::factory(false); + let udp_core_broadcaster = Broadcaster::default(); + let core_keeper = Arc::new(Keeper::new(false, udp_core_broadcaster.clone())); + let udp_core_stats_event_sender = core_keeper.sender(); let (server_keeper, _server_repository) = crate::statistics::setup::factory(false); diff --git a/packages/udp-tracker-server/src/handlers/mod.rs b/packages/udp-tracker-server/src/handlers/mod.rs index 2905b20a9..18078f987 100644 --- a/packages/udp-tracker-server/src/handlers/mod.rs +++ b/packages/udp-tracker-server/src/handlers/mod.rs @@ -218,8 +218,10 @@ pub(crate) mod tests { use bittorrent_tracker_core::whitelist::authorization::WhitelistAuthorization; use bittorrent_tracker_core::whitelist::repository::in_memory::InMemoryWhitelist; use bittorrent_udp_tracker_core::connection_cookie::gen_remote_fingerprint; + use bittorrent_udp_tracker_core::event::sender::Broadcaster; use bittorrent_udp_tracker_core::services::announce::AnnounceService; use bittorrent_udp_tracker_core::services::scrape::ScrapeService; + use bittorrent_udp_tracker_core::statistics::keeper::Keeper; use bittorrent_udp_tracker_core::{self, event as core_event}; use futures::future::BoxFuture; use mockall::mock; @@ -229,6 +231,7 @@ pub(crate) mod tests { use torrust_tracker_primitives::{peer, DurationSinceUnixEpoch}; use torrust_tracker_test_helpers::configuration; + use crate::statistics::repository::Repository; use crate::{event as server_event, CurrentClock}; pub(crate) struct CoreTrackerServices { @@ -284,7 +287,9 @@ pub(crate) mod tests { )); let scrape_handler = Arc::new(ScrapeHandler::new(&whitelist_authorization, &in_memory_torrent_repository)); - let (core_keeper, _core_repository) = bittorrent_udp_tracker_core::statistics::setup::factory(false); + let udp_core_broadcaster = Broadcaster::default(); + let _core_repository = Arc::new(Repository::new()); + let core_keeper = Arc::new(Keeper::new(false, udp_core_broadcaster.clone())); let udp_core_stats_event_sender = core_keeper.sender(); let (server_keeper, _server_repository) = crate::statistics::setup::factory(false); From a660be8b0f231b4a0e22897caaf2b3d82ddefd4f Mon Sep 17 00:00:00 2001 From: Jose Celano Date: Fri, 25 Apr 2025 18:35:18 +0100 Subject: [PATCH 504/802] refactor: [#1478] inline factory fn in udp server stats --- .../src/statistics/services.rs | 5 +- packages/udp-tracker-server/src/container.rs | 15 ++++-- .../src/handlers/announce.rs | 8 ++- .../src/handlers/connect.rs | 12 +++-- .../udp-tracker-server/src/handlers/mod.rs | 6 +-- .../udp-tracker-server/src/handlers/scrape.rs | 6 ++- .../udp-tracker-server/src/statistics/mod.rs | 1 - .../src/statistics/services.rs | 12 ++++- .../src/statistics/setup.rs | 49 ------------------- 9 files changed, 46 insertions(+), 68 deletions(-) delete mode 100644 packages/udp-tracker-server/src/statistics/setup.rs diff --git a/packages/rest-tracker-api-core/src/statistics/services.rs b/packages/rest-tracker-api-core/src/statistics/services.rs index 552bda627..08997fc63 100644 --- a/packages/rest-tracker-api-core/src/statistics/services.rs +++ b/packages/rest-tracker-api-core/src/statistics/services.rs @@ -162,11 +162,8 @@ mod tests { let _unused = run_event_listener(http_stats_keeper.receiver(), &http_stats_repository); } - // UDP core stats (not used in this test) - // UDP server stats - let (_udp_server_stats_keeper, udp_server_stats_repository) = - torrust_udp_tracker_server::statistics::setup::factory(config.core.tracker_usage_statistics); + let udp_server_stats_repository = Arc::new(torrust_udp_tracker_server::statistics::repository::Repository::new()); let tracker_metrics = get_metrics( in_memory_torrent_repository.clone(), diff --git a/packages/udp-tracker-server/src/container.rs b/packages/udp-tracker-server/src/container.rs index 4898cb57d..3cba43d0e 100644 --- a/packages/udp-tracker-server/src/container.rs +++ b/packages/udp-tracker-server/src/container.rs @@ -2,7 +2,11 @@ use std::sync::Arc; use torrust_tracker_configuration::Core; -use crate::{event, statistics}; +use crate::event::sender::Broadcaster; +use crate::event::{self}; +use crate::statistics; +use crate::statistics::keeper::Keeper; +use crate::statistics::repository::Repository; pub struct UdpTrackerServerContainer { pub stats_keeper: Arc, @@ -32,8 +36,13 @@ pub struct UdpTrackerServerServices { impl UdpTrackerServerServices { #[must_use] pub fn initialize(core_config: &Arc) -> Arc { - let (udp_server_stats_keeper, udp_server_stats_repository) = - statistics::setup::factory(core_config.tracker_usage_statistics); + let udp_server_broadcaster = Broadcaster::default(); + let udp_server_stats_repository = Arc::new(Repository::new()); + let udp_server_stats_keeper = Arc::new(Keeper::new( + core_config.tracker_usage_statistics, + udp_server_broadcaster.clone(), + )); + let udp_server_stats_event_sender = udp_server_stats_keeper.sender(); Arc::new(Self { diff --git a/packages/udp-tracker-server/src/handlers/announce.rs b/packages/udp-tracker-server/src/handlers/announce.rs index 6345d39ab..3879816c5 100644 --- a/packages/udp-tracker-server/src/handlers/announce.rs +++ b/packages/udp-tracker-server/src/handlers/announce.rs @@ -374,7 +374,9 @@ mod tests { core_tracker_services: Arc, core_udp_tracker_services: Arc, ) -> Response { - let (keeper, _repository) = crate::statistics::setup::factory(false); + let udp_server_broadcaster = crate::event::sender::Broadcaster::default(); + let keeper = Arc::new(crate::statistics::keeper::Keeper::new(false, udp_server_broadcaster.clone())); + let udp_server_stats_event_sender = keeper.sender(); let client_socket_addr = SocketAddr::new(IpAddr::V4(Ipv4Addr::new(126, 0, 0, 1)), 8080); @@ -712,7 +714,9 @@ mod tests { let core_keeper = Arc::new(Keeper::new(false, udp_core_broadcaster.clone())); let udp_core_stats_event_sender = core_keeper.sender(); - let (server_keeper, _server_repository) = crate::statistics::setup::factory(false); + let udp_server_broadcaster = crate::event::sender::Broadcaster::default(); + let server_keeper = Arc::new(crate::statistics::keeper::Keeper::new(false, udp_server_broadcaster.clone())); + let udp_server_stats_event_sender = server_keeper.sender(); let client_ip_v4 = Ipv4Addr::new(126, 0, 0, 1); diff --git a/packages/udp-tracker-server/src/handlers/connect.rs b/packages/udp-tracker-server/src/handlers/connect.rs index c31e89cff..6e9c75612 100644 --- a/packages/udp-tracker-server/src/handlers/connect.rs +++ b/packages/udp-tracker-server/src/handlers/connect.rs @@ -87,7 +87,9 @@ mod tests { let core_keeper = Arc::new(Keeper::new(false, udp_core_broadcaster.clone())); let udp_core_stats_event_sender = core_keeper.sender(); - let (server_keeper, _server_repository) = crate::statistics::setup::factory(false); + let udp_server_broadcaster = crate::event::sender::Broadcaster::default(); + let server_keeper = Arc::new(crate::statistics::keeper::Keeper::new(false, udp_server_broadcaster.clone())); + let udp_server_stats_event_sender = server_keeper.sender(); let request = ConnectRequest { @@ -124,7 +126,9 @@ mod tests { let core_keeper = Arc::new(Keeper::new(false, udp_core_broadcaster.clone())); let udp_core_stats_event_sender = core_keeper.sender(); - let (server_keeper, _server_repository) = crate::statistics::setup::factory(false); + let udp_server_broadcaster = crate::event::sender::Broadcaster::default(); + let server_keeper = Arc::new(crate::statistics::keeper::Keeper::new(false, udp_server_broadcaster.clone())); + let udp_server_stats_event_sender = server_keeper.sender(); let request = ConnectRequest { @@ -162,7 +166,9 @@ mod tests { let udp_core_stats_event_sender = core_keeper.sender(); - let (server_keeper, _server_repository) = crate::statistics::setup::factory(false); + let udp_server_broadcaster = crate::event::sender::Broadcaster::default(); + let server_keeper = Arc::new(crate::statistics::keeper::Keeper::new(false, udp_server_broadcaster.clone())); + let udp_server_stats_event_sender = server_keeper.sender(); let request = ConnectRequest { diff --git a/packages/udp-tracker-server/src/handlers/mod.rs b/packages/udp-tracker-server/src/handlers/mod.rs index 18078f987..b4845c043 100644 --- a/packages/udp-tracker-server/src/handlers/mod.rs +++ b/packages/udp-tracker-server/src/handlers/mod.rs @@ -231,7 +231,6 @@ pub(crate) mod tests { use torrust_tracker_primitives::{peer, DurationSinceUnixEpoch}; use torrust_tracker_test_helpers::configuration; - use crate::statistics::repository::Repository; use crate::{event as server_event, CurrentClock}; pub(crate) struct CoreTrackerServices { @@ -288,11 +287,12 @@ pub(crate) mod tests { let scrape_handler = Arc::new(ScrapeHandler::new(&whitelist_authorization, &in_memory_torrent_repository)); let udp_core_broadcaster = Broadcaster::default(); - let _core_repository = Arc::new(Repository::new()); let core_keeper = Arc::new(Keeper::new(false, udp_core_broadcaster.clone())); let udp_core_stats_event_sender = core_keeper.sender(); - let (server_keeper, _server_repository) = crate::statistics::setup::factory(false); + let udp_server_broadcaster = crate::event::sender::Broadcaster::default(); + let server_keeper = Arc::new(crate::statistics::keeper::Keeper::new(false, udp_server_broadcaster.clone())); + let udp_server_stats_event_sender = server_keeper.sender(); let announce_service = Arc::new(AnnounceService::new( diff --git a/packages/udp-tracker-server/src/handlers/scrape.rs b/packages/udp-tracker-server/src/handlers/scrape.rs index 78a01fe6d..5f85e2dfa 100644 --- a/packages/udp-tracker-server/src/handlers/scrape.rs +++ b/packages/udp-tracker-server/src/handlers/scrape.rs @@ -94,11 +94,13 @@ mod tests { use bittorrent_udp_tracker_core::connection_cookie::{gen_remote_fingerprint, make}; use torrust_tracker_primitives::service_binding::{Protocol, ServiceBinding}; + use crate::event::sender::Broadcaster; use crate::handlers::handle_scrape; use crate::handlers::tests::{ initialize_core_tracker_services_for_public_tracker, sample_cookie_valid_range, sample_ipv4_remote_addr, sample_issue_time, CoreTrackerServices, CoreUdpTrackerServices, TorrentPeerBuilder, }; + use crate::statistics::keeper::Keeper; fn zeroed_torrent_statistics() -> TorrentScrapeStatistics { TorrentScrapeStatistics { @@ -178,7 +180,9 @@ mod tests { core_tracker_services: Arc, core_udp_tracker_services: Arc, ) -> Response { - let (keeper, _repository) = crate::statistics::setup::factory(false); + let udp_server_broadcaster = Broadcaster::default(); + let keeper = Arc::new(Keeper::new(false, udp_server_broadcaster.clone())); + let udp_server_stats_event_sender = keeper.sender(); let client_socket_addr = sample_ipv4_remote_addr(); diff --git a/packages/udp-tracker-server/src/statistics/mod.rs b/packages/udp-tracker-server/src/statistics/mod.rs index 45c696fdb..16b7adbd8 100644 --- a/packages/udp-tracker-server/src/statistics/mod.rs +++ b/packages/udp-tracker-server/src/statistics/mod.rs @@ -3,7 +3,6 @@ pub mod keeper; pub mod metrics; pub mod repository; pub mod services; -pub mod setup; use metrics::Metrics; use torrust_tracker_metrics::metric::description::MetricDescription; diff --git a/packages/udp-tracker-server/src/statistics/services.rs b/packages/udp-tracker-server/src/statistics/services.rs index f8c385535..1593031c1 100644 --- a/packages/udp-tracker-server/src/statistics/services.rs +++ b/packages/udp-tracker-server/src/statistics/services.rs @@ -113,8 +113,11 @@ mod tests { use torrust_tracker_primitives::swarm_metadata::AggregateSwarmMetadata; use torrust_tracker_test_helpers::configuration; + use crate::event::sender::Broadcaster; + use crate::statistics::describe_metrics; + use crate::statistics::keeper::Keeper; + use crate::statistics::repository::Repository; use crate::statistics::services::{get_metrics, TrackerMetrics}; - use crate::statistics::{self, describe_metrics}; pub fn tracker_configuration() -> Configuration { configuration::ephemeral() @@ -127,7 +130,12 @@ mod tests { let in_memory_torrent_repository = Arc::new(InMemoryTorrentRepository::default()); let ban_service = Arc::new(RwLock::new(BanService::new(MAX_CONNECTION_ID_ERRORS_PER_IP))); - let (_keeper, stats_repository) = statistics::setup::factory(config.core.tracker_usage_statistics); + let udp_server_broadcaster = Broadcaster::default(); + let stats_repository = Arc::new(Repository::new()); + let _keeper = Arc::new(Keeper::new( + config.core.tracker_usage_statistics, + udp_server_broadcaster.clone(), + )); let tracker_metrics = get_metrics( in_memory_torrent_repository.clone(), diff --git a/packages/udp-tracker-server/src/statistics/setup.rs b/packages/udp-tracker-server/src/statistics/setup.rs deleted file mode 100644 index 2e9881c3d..000000000 --- a/packages/udp-tracker-server/src/statistics/setup.rs +++ /dev/null @@ -1,49 +0,0 @@ -//! Setup for the tracker statistics. -//! -//! The [`factory`] function builds the structs needed for handling the tracker metrics. -use std::sync::Arc; - -use super::keeper::Keeper; -use super::repository::Repository; -use crate::event::sender::Broadcaster; - -#[must_use] -pub fn factory(tracker_usage_statistics: bool) -> (Arc, Arc) { - let broadcaster = Broadcaster::default(); - let repository = Arc::new(Repository::new()); - let keeper = Arc::new(Keeper::new(tracker_usage_statistics, broadcaster.clone())); - - (keeper, repository) -} - -#[cfg(test)] -mod test { - use super::factory; - use crate::statistics::event::listener::run_event_listener; - - #[tokio::test] - async fn should_not_send_any_event_when_statistics_are_disabled() { - let tracker_usage_statistics = false; - - // HTTP core stats - let (stats_keeper, stats_repository) = factory(tracker_usage_statistics); - let stats_event_sender = stats_keeper.sender(); - - if tracker_usage_statistics { - let _unused = run_event_listener(stats_keeper.receiver(), &stats_repository); - } - - assert!(stats_event_sender.is_none()); - } - - #[tokio::test] - async fn should_send_events_when_statistics_are_enabled() { - let tracker_usage_statistics = true; - - // HTTP core stats - let (stats_keeper, _stats_repository) = factory(tracker_usage_statistics); - let stats_event_sender = stats_keeper.sender(); - - assert!(stats_event_sender.is_some()); - } -} From 8a9314beff884f7b2dd1c2be90dc7464df542b62 Mon Sep 17 00:00:00 2001 From: Jose Celano Date: Fri, 25 Apr 2025 18:41:24 +0100 Subject: [PATCH 505/802] refactor: [#1478] rename Keeper to EventBus --- packages/axum-http-tracker-server/src/server.rs | 4 ++-- .../src/v1/handlers/announce.rs | 4 ++-- .../src/v1/handlers/scrape.rs | 4 ++-- packages/http-tracker-core/benches/helpers/util.rs | 4 ++-- packages/http-tracker-core/src/container.rs | 8 ++++---- .../http-tracker-core/src/services/announce.rs | 4 ++-- packages/http-tracker-core/src/services/scrape.rs | 8 ++++---- .../src/statistics/{keeper.rs => event_bus.rs} | 6 +++--- packages/http-tracker-core/src/statistics/mod.rs | 2 +- .../http-tracker-core/src/statistics/services.rs | 4 ++-- .../src/statistics/services.rs | 4 ++-- packages/udp-tracker-core/benches/helpers/sync.rs | 4 ++-- packages/udp-tracker-core/src/container.rs | 8 ++++---- packages/udp-tracker-core/src/services/connect.rs | 8 ++++---- .../src/statistics/{keeper.rs => event_bus.rs} | 6 +++--- packages/udp-tracker-core/src/statistics/mod.rs | 2 +- .../udp-tracker-core/src/statistics/services.rs | 4 ++-- packages/udp-tracker-server/src/container.rs | 8 ++++---- .../udp-tracker-server/src/handlers/announce.rs | 8 ++++---- .../udp-tracker-server/src/handlers/connect.rs | 14 +++++++------- packages/udp-tracker-server/src/handlers/mod.rs | 9 ++++++--- packages/udp-tracker-server/src/handlers/scrape.rs | 4 ++-- .../src/statistics/{keeper.rs => event_bus.rs} | 6 +++--- packages/udp-tracker-server/src/statistics/mod.rs | 2 +- .../udp-tracker-server/src/statistics/services.rs | 4 ++-- 25 files changed, 71 insertions(+), 68 deletions(-) rename packages/http-tracker-core/src/statistics/{keeper.rs => event_bus.rs} (93%) rename packages/udp-tracker-core/src/statistics/{keeper.rs => event_bus.rs} (93%) rename packages/udp-tracker-server/src/statistics/{keeper.rs => event_bus.rs} (93%) diff --git a/packages/axum-http-tracker-server/src/server.rs b/packages/axum-http-tracker-server/src/server.rs index 8a3b5325a..a2d0bc52c 100644 --- a/packages/axum-http-tracker-server/src/server.rs +++ b/packages/axum-http-tracker-server/src/server.rs @@ -252,7 +252,7 @@ mod tests { use bittorrent_http_tracker_core::services::announce::AnnounceService; use bittorrent_http_tracker_core::services::scrape::ScrapeService; use bittorrent_http_tracker_core::statistics::event::listener::run_event_listener; - use bittorrent_http_tracker_core::statistics::keeper::Keeper; + use bittorrent_http_tracker_core::statistics::event_bus::EventBus; use bittorrent_http_tracker_core::statistics::repository::Repository; use bittorrent_tracker_core::container::TrackerCoreContainer; use torrust_axum_server::tsl::make_rust_tls; @@ -277,7 +277,7 @@ mod tests { // HTTP core stats let http_core_broadcaster = Broadcaster::default(); let http_stats_repository = Arc::new(Repository::new()); - let http_stats_keeper = Arc::new(Keeper::new( + let http_stats_keeper = Arc::new(EventBus::new( configuration.core.tracker_usage_statistics, http_core_broadcaster.clone(), )); diff --git a/packages/axum-http-tracker-server/src/v1/handlers/announce.rs b/packages/axum-http-tracker-server/src/v1/handlers/announce.rs index 98cf34259..31c1df471 100644 --- a/packages/axum-http-tracker-server/src/v1/handlers/announce.rs +++ b/packages/axum-http-tracker-server/src/v1/handlers/announce.rs @@ -110,7 +110,7 @@ mod tests { use bittorrent_http_tracker_core::event::sender::Broadcaster; use bittorrent_http_tracker_core::services::announce::AnnounceService; use bittorrent_http_tracker_core::statistics::event::listener::run_event_listener; - use bittorrent_http_tracker_core::statistics::keeper::Keeper; + use bittorrent_http_tracker_core::statistics::event_bus::EventBus; use bittorrent_http_tracker_core::statistics::repository::Repository; use bittorrent_http_tracker_protocol::v1::requests::announce::Announce; use bittorrent_http_tracker_protocol::v1::responses; @@ -167,7 +167,7 @@ mod tests { // HTTP core stats let http_core_broadcaster = Broadcaster::default(); let http_stats_repository = Arc::new(Repository::new()); - let http_stats_keeper = Arc::new(Keeper::new( + let http_stats_keeper = Arc::new(EventBus::new( config.core.tracker_usage_statistics, http_core_broadcaster.clone(), )); diff --git a/packages/axum-http-tracker-server/src/v1/handlers/scrape.rs b/packages/axum-http-tracker-server/src/v1/handlers/scrape.rs index fc88bbde9..3a026a77c 100644 --- a/packages/axum-http-tracker-server/src/v1/handlers/scrape.rs +++ b/packages/axum-http-tracker-server/src/v1/handlers/scrape.rs @@ -85,7 +85,7 @@ mod tests { use bittorrent_http_tracker_core::event::sender::Broadcaster; use bittorrent_http_tracker_core::statistics::event::listener::run_event_listener; - use bittorrent_http_tracker_core::statistics::keeper::Keeper; + use bittorrent_http_tracker_core::statistics::event_bus::EventBus; use bittorrent_http_tracker_core::statistics::repository::Repository; use bittorrent_http_tracker_protocol::v1::requests::scrape::Scrape; use bittorrent_http_tracker_protocol::v1::responses; @@ -138,7 +138,7 @@ mod tests { // HTTP core stats let http_core_broadcaster = Broadcaster::default(); let http_stats_repository = Arc::new(Repository::new()); - let http_stats_keeper = Arc::new(Keeper::new( + let http_stats_keeper = Arc::new(EventBus::new( config.core.tracker_usage_statistics, http_core_broadcaster.clone(), )); diff --git a/packages/http-tracker-core/benches/helpers/util.rs b/packages/http-tracker-core/benches/helpers/util.rs index 9c45417d4..82a00d5b9 100644 --- a/packages/http-tracker-core/benches/helpers/util.rs +++ b/packages/http-tracker-core/benches/helpers/util.rs @@ -6,7 +6,7 @@ use bittorrent_http_tracker_core::event; use bittorrent_http_tracker_core::event::sender::Broadcaster; use bittorrent_http_tracker_core::event::Event; use bittorrent_http_tracker_core::statistics::event::listener::run_event_listener; -use bittorrent_http_tracker_core::statistics::keeper::Keeper; +use bittorrent_http_tracker_core::statistics::event_bus::EventBus; use bittorrent_http_tracker_core::statistics::repository::Repository; use bittorrent_http_tracker_protocol::v1::requests::announce::Announce; use bittorrent_http_tracker_protocol::v1::services::peer_ip_resolver::ClientIpSources; @@ -62,7 +62,7 @@ pub fn initialize_core_tracker_services_with_config(config: &Configuration) -> ( // HTTP core stats let http_core_broadcaster = Broadcaster::default(); let http_stats_repository = Arc::new(Repository::new()); - let http_stats_keeper = Arc::new(Keeper::new( + let http_stats_keeper = Arc::new(EventBus::new( config.core.tracker_usage_statistics, http_core_broadcaster.clone(), )); diff --git a/packages/http-tracker-core/src/container.rs b/packages/http-tracker-core/src/container.rs index 060e6289a..3cf344755 100644 --- a/packages/http-tracker-core/src/container.rs +++ b/packages/http-tracker-core/src/container.rs @@ -6,7 +6,7 @@ use torrust_tracker_configuration::{Core, HttpTracker}; use crate::event::sender::Broadcaster; use crate::services::announce::AnnounceService; use crate::services::scrape::ScrapeService; -use crate::statistics::keeper::Keeper; +use crate::statistics::event_bus::EventBus; use crate::statistics::repository::Repository; use crate::{event, services, statistics}; @@ -16,7 +16,7 @@ pub struct HttpTrackerCoreContainer { pub tracker_core_container: Arc, // `HttpTrackerCoreServices` - pub stats_keeper: Arc, + pub stats_keeper: Arc, pub stats_event_sender: Arc>>, pub stats_repository: Arc, pub announce_service: Arc, @@ -58,7 +58,7 @@ impl HttpTrackerCoreContainer { } pub struct HttpTrackerCoreServices { - pub stats_keeper: Arc, + pub stats_keeper: Arc, pub stats_event_sender: Arc>>, pub stats_repository: Arc, pub announce_service: Arc, @@ -71,7 +71,7 @@ impl HttpTrackerCoreServices { // HTTP core stats let http_core_broadcaster = Broadcaster::default(); let http_stats_repository = Arc::new(Repository::new()); - let http_stats_keeper = Arc::new(Keeper::new( + let http_stats_keeper = Arc::new(EventBus::new( tracker_core_container.core_config.tracker_usage_statistics, http_core_broadcaster.clone(), )); diff --git a/packages/http-tracker-core/src/services/announce.rs b/packages/http-tracker-core/src/services/announce.rs index 9dc5cc42a..a1e69d2cc 100644 --- a/packages/http-tracker-core/src/services/announce.rs +++ b/packages/http-tracker-core/src/services/announce.rs @@ -255,7 +255,7 @@ mod tests { // HTTP core stats let http_core_broadcaster = Broadcaster::default(); let http_stats_repository = Arc::new(Repository::new()); - let http_stats_keeper = Arc::new(Keeper::new( + let http_stats_keeper = Arc::new(EventBus::new( config.core.tracker_usage_statistics, http_core_broadcaster.clone(), )); @@ -306,7 +306,7 @@ mod tests { use crate::event::sender::Broadcaster; use crate::event::Event; use crate::statistics::event::listener::run_event_listener; - use crate::statistics::keeper::Keeper; + use crate::statistics::event_bus::EventBus; use crate::statistics::repository::Repository; use crate::tests::sample_info_hash; diff --git a/packages/http-tracker-core/src/services/scrape.rs b/packages/http-tracker-core/src/services/scrape.rs index c018f2f0b..2b5f74c83 100644 --- a/packages/http-tracker-core/src/services/scrape.rs +++ b/packages/http-tracker-core/src/services/scrape.rs @@ -266,7 +266,7 @@ mod tests { initialize_services_with_configuration, sample_info_hashes, sample_peer, MockHttpStatsEventSender, }; use crate::services::scrape::ScrapeService; - use crate::statistics::keeper::Keeper; + use crate::statistics::event_bus::EventBus; use crate::tests::sample_info_hash; #[tokio::test] @@ -276,7 +276,7 @@ mod tests { // HTTP core stats let http_core_broadcaster = Broadcaster::default(); - let http_stats_keeper = Arc::new(Keeper::new(false, http_core_broadcaster.clone())); + let http_stats_keeper = Arc::new(EventBus::new(false, http_core_broadcaster.clone())); let http_stats_event_sender = http_stats_keeper.sender(); @@ -459,7 +459,7 @@ mod tests { initialize_services_with_configuration, sample_info_hashes, sample_peer, MockHttpStatsEventSender, }; use crate::services::scrape::ScrapeService; - use crate::statistics::keeper::Keeper; + use crate::statistics::event_bus::EventBus; use crate::tests::sample_info_hash; #[tokio::test] @@ -471,7 +471,7 @@ mod tests { // HTTP core stats let http_core_broadcaster = Broadcaster::default(); - let http_stats_keeper = Arc::new(Keeper::new(false, http_core_broadcaster.clone())); + let http_stats_keeper = Arc::new(EventBus::new(false, http_core_broadcaster.clone())); let http_stats_event_sender = http_stats_keeper.sender(); diff --git a/packages/http-tracker-core/src/statistics/keeper.rs b/packages/http-tracker-core/src/statistics/event_bus.rs similarity index 93% rename from packages/http-tracker-core/src/statistics/keeper.rs rename to packages/http-tracker-core/src/statistics/event_bus.rs index 9ae0564ce..2d22c0a90 100644 --- a/packages/http-tracker-core/src/statistics/keeper.rs +++ b/packages/http-tracker-core/src/statistics/event_bus.rs @@ -5,12 +5,12 @@ use tokio::sync::broadcast::Receiver; use crate::event::sender::{self, Broadcaster}; use crate::event::Event; -pub struct Keeper { +pub struct EventBus { pub enable_sender: bool, pub broadcaster: Broadcaster, } -impl Default for Keeper { +impl Default for EventBus { fn default() -> Self { let enable_sender = true; let broadcaster = Broadcaster::default(); @@ -19,7 +19,7 @@ impl Default for Keeper { } } -impl Keeper { +impl EventBus { #[must_use] pub fn new(enable_sender: bool, broadcaster: Broadcaster) -> Self { Self { diff --git a/packages/http-tracker-core/src/statistics/mod.rs b/packages/http-tracker-core/src/statistics/mod.rs index e91181953..da2f0acd4 100644 --- a/packages/http-tracker-core/src/statistics/mod.rs +++ b/packages/http-tracker-core/src/statistics/mod.rs @@ -1,5 +1,5 @@ pub mod event; -pub mod keeper; +pub mod event_bus; pub mod metrics; pub mod repository; pub mod services; diff --git a/packages/http-tracker-core/src/statistics/services.rs b/packages/http-tracker-core/src/statistics/services.rs index 19132e713..c695f6d4f 100644 --- a/packages/http-tracker-core/src/statistics/services.rs +++ b/packages/http-tracker-core/src/statistics/services.rs @@ -78,7 +78,7 @@ mod tests { use crate::event::sender::Broadcaster; use crate::statistics::describe_metrics; use crate::statistics::event::listener::run_event_listener; - use crate::statistics::keeper::Keeper; + use crate::statistics::event_bus::EventBus; use crate::statistics::repository::Repository; use crate::statistics::services::{get_metrics, TrackerMetrics}; @@ -95,7 +95,7 @@ mod tests { // HTTP core stats let http_core_broadcaster = Broadcaster::default(); let http_stats_repository = Arc::new(Repository::new()); - let http_stats_keeper = Arc::new(Keeper::new( + let http_stats_keeper = Arc::new(EventBus::new( config.core.tracker_usage_statistics, http_core_broadcaster.clone(), )); diff --git a/packages/rest-tracker-api-core/src/statistics/services.rs b/packages/rest-tracker-api-core/src/statistics/services.rs index 08997fc63..cb09a3907 100644 --- a/packages/rest-tracker-api-core/src/statistics/services.rs +++ b/packages/rest-tracker-api-core/src/statistics/services.rs @@ -125,7 +125,7 @@ mod tests { use bittorrent_http_tracker_core::event::sender::Broadcaster; use bittorrent_http_tracker_core::statistics::event::listener::run_event_listener; - use bittorrent_http_tracker_core::statistics::keeper::Keeper; + use bittorrent_http_tracker_core::statistics::event_bus::EventBus; use bittorrent_http_tracker_core::statistics::repository::Repository; use bittorrent_tracker_core::torrent::repository::in_memory::InMemoryTorrentRepository; use bittorrent_tracker_core::{self}; @@ -153,7 +153,7 @@ mod tests { // HTTP core stats let http_core_broadcaster = Broadcaster::default(); let http_stats_repository = Arc::new(Repository::new()); - let http_stats_keeper = Arc::new(Keeper::new( + let http_stats_keeper = Arc::new(EventBus::new( config.core.tracker_usage_statistics, http_core_broadcaster.clone(), )); diff --git a/packages/udp-tracker-core/benches/helpers/sync.rs b/packages/udp-tracker-core/benches/helpers/sync.rs index a64bb0bdf..c465ae996 100644 --- a/packages/udp-tracker-core/benches/helpers/sync.rs +++ b/packages/udp-tracker-core/benches/helpers/sync.rs @@ -4,7 +4,7 @@ use std::time::{Duration, Instant}; use bittorrent_udp_tracker_core::event::sender::Broadcaster; use bittorrent_udp_tracker_core::services::connect::ConnectService; -use bittorrent_udp_tracker_core::statistics::keeper::Keeper; +use bittorrent_udp_tracker_core::statistics::event_bus::EventBus; use torrust_tracker_primitives::service_binding::{Protocol, ServiceBinding}; use crate::helpers::utils::{sample_ipv4_remote_addr, sample_issue_time}; @@ -16,7 +16,7 @@ pub async fn connect_once(samples: u64) -> Duration { let server_service_binding = ServiceBinding::new(Protocol::UDP, server_socket_addr).unwrap(); let udp_core_broadcaster = Broadcaster::default(); - let keeper = Arc::new(Keeper::new(false, udp_core_broadcaster.clone())); + let keeper = Arc::new(EventBus::new(false, udp_core_broadcaster.clone())); let udp_core_stats_event_sender = keeper.sender(); let connect_service = Arc::new(ConnectService::new(udp_core_stats_event_sender)); diff --git a/packages/udp-tracker-core/src/container.rs b/packages/udp-tracker-core/src/container.rs index 34d48a7eb..bc0d8ba4b 100644 --- a/packages/udp-tracker-core/src/container.rs +++ b/packages/udp-tracker-core/src/container.rs @@ -9,7 +9,7 @@ use crate::services::announce::AnnounceService; use crate::services::banning::BanService; use crate::services::connect::ConnectService; use crate::services::scrape::ScrapeService; -use crate::statistics::keeper::Keeper; +use crate::statistics::event_bus::EventBus; use crate::statistics::repository::Repository; use crate::{event, services, statistics, MAX_CONNECTION_ID_ERRORS_PER_IP}; @@ -19,7 +19,7 @@ pub struct UdpTrackerCoreContainer { pub tracker_core_container: Arc, // `UdpTrackerCoreServices` - pub stats_keeper: Arc, + pub stats_keeper: Arc, pub stats_event_sender: Arc>>, pub stats_repository: Arc, pub ban_service: Arc>, @@ -68,7 +68,7 @@ impl UdpTrackerCoreContainer { } pub struct UdpTrackerCoreServices { - pub stats_keeper: Arc, + pub stats_keeper: Arc, pub stats_event_sender: Arc>>, pub stats_repository: Arc, pub ban_service: Arc>, @@ -82,7 +82,7 @@ impl UdpTrackerCoreServices { pub fn initialize_from(tracker_core_container: &Arc) -> Arc { let udp_core_broadcaster = Broadcaster::default(); let udp_core_stats_repository = Arc::new(Repository::new()); - let keeper = Arc::new(Keeper::new( + let keeper = Arc::new(EventBus::new( tracker_core_container.core_config.tracker_usage_statistics, udp_core_broadcaster.clone(), )); diff --git a/packages/udp-tracker-core/src/services/connect.rs b/packages/udp-tracker-core/src/services/connect.rs index 2073ad943..7ea8b0882 100644 --- a/packages/udp-tracker-core/src/services/connect.rs +++ b/packages/udp-tracker-core/src/services/connect.rs @@ -73,7 +73,7 @@ mod tests { sample_ipv4_remote_addr, sample_ipv4_remote_addr_fingerprint, sample_ipv4_socket_address, sample_ipv6_remote_addr, sample_ipv6_remote_addr_fingerprint, sample_issue_time, MockUdpCoreStatsEventSender, }; - use crate::statistics::keeper::Keeper; + use crate::statistics::event_bus::EventBus; #[tokio::test] async fn a_connect_response_should_contain_the_same_transaction_id_as_the_connect_request() { @@ -81,7 +81,7 @@ mod tests { let server_service_binding = ServiceBinding::new(Protocol::UDP, server_socket_addr).unwrap(); let udp_core_broadcaster = Broadcaster::default(); - let keeper = Arc::new(Keeper::new(false, udp_core_broadcaster.clone())); + let keeper = Arc::new(EventBus::new(false, udp_core_broadcaster.clone())); let udp_core_stats_event_sender = keeper.sender(); let connect_service = Arc::new(ConnectService::new(udp_core_stats_event_sender)); @@ -102,7 +102,7 @@ mod tests { let server_service_binding = ServiceBinding::new(Protocol::UDP, server_socket_addr).unwrap(); let udp_core_broadcaster = Broadcaster::default(); - let keeper = Arc::new(Keeper::new(false, udp_core_broadcaster.clone())); + let keeper = Arc::new(EventBus::new(false, udp_core_broadcaster.clone())); let udp_core_stats_event_sender = keeper.sender(); let connect_service = Arc::new(ConnectService::new(udp_core_stats_event_sender)); @@ -124,7 +124,7 @@ mod tests { let server_service_binding = ServiceBinding::new(Protocol::UDP, server_socket_addr).unwrap(); let udp_core_broadcaster = Broadcaster::default(); - let keeper = Arc::new(Keeper::new(false, udp_core_broadcaster.clone())); + let keeper = Arc::new(EventBus::new(false, udp_core_broadcaster.clone())); let udp_core_stats_event_sender = keeper.sender(); let connect_service = Arc::new(ConnectService::new(udp_core_stats_event_sender)); diff --git a/packages/udp-tracker-core/src/statistics/keeper.rs b/packages/udp-tracker-core/src/statistics/event_bus.rs similarity index 93% rename from packages/udp-tracker-core/src/statistics/keeper.rs rename to packages/udp-tracker-core/src/statistics/event_bus.rs index 9ae0564ce..2d22c0a90 100644 --- a/packages/udp-tracker-core/src/statistics/keeper.rs +++ b/packages/udp-tracker-core/src/statistics/event_bus.rs @@ -5,12 +5,12 @@ use tokio::sync::broadcast::Receiver; use crate::event::sender::{self, Broadcaster}; use crate::event::Event; -pub struct Keeper { +pub struct EventBus { pub enable_sender: bool, pub broadcaster: Broadcaster, } -impl Default for Keeper { +impl Default for EventBus { fn default() -> Self { let enable_sender = true; let broadcaster = Broadcaster::default(); @@ -19,7 +19,7 @@ impl Default for Keeper { } } -impl Keeper { +impl EventBus { #[must_use] pub fn new(enable_sender: bool, broadcaster: Broadcaster) -> Self { Self { diff --git a/packages/udp-tracker-core/src/statistics/mod.rs b/packages/udp-tracker-core/src/statistics/mod.rs index ba0b24530..f4e6f06a6 100644 --- a/packages/udp-tracker-core/src/statistics/mod.rs +++ b/packages/udp-tracker-core/src/statistics/mod.rs @@ -1,5 +1,5 @@ pub mod event; -pub mod keeper; +pub mod event_bus; pub mod metrics; pub mod repository; pub mod services; diff --git a/packages/udp-tracker-core/src/statistics/services.rs b/packages/udp-tracker-core/src/statistics/services.rs index cace8d8ba..22d84c931 100644 --- a/packages/udp-tracker-core/src/statistics/services.rs +++ b/packages/udp-tracker-core/src/statistics/services.rs @@ -95,7 +95,7 @@ mod tests { use crate::event::sender::Broadcaster; use crate::statistics::describe_metrics; - use crate::statistics::keeper::Keeper; + use crate::statistics::event_bus::EventBus; use crate::statistics::repository::Repository; use crate::statistics::services::{get_metrics, TrackerMetrics}; @@ -111,7 +111,7 @@ mod tests { let udp_core_broadcaster = Broadcaster::default(); let repository = Arc::new(Repository::new()); - let _keeper = Arc::new(Keeper::new( + let _keeper = Arc::new(EventBus::new( config.core.tracker_usage_statistics, udp_core_broadcaster.clone(), )); diff --git a/packages/udp-tracker-server/src/container.rs b/packages/udp-tracker-server/src/container.rs index 3cba43d0e..0ad611070 100644 --- a/packages/udp-tracker-server/src/container.rs +++ b/packages/udp-tracker-server/src/container.rs @@ -5,11 +5,11 @@ use torrust_tracker_configuration::Core; use crate::event::sender::Broadcaster; use crate::event::{self}; use crate::statistics; -use crate::statistics::keeper::Keeper; +use crate::statistics::event_bus::EventBus; use crate::statistics::repository::Repository; pub struct UdpTrackerServerContainer { - pub stats_keeper: Arc, + pub stats_keeper: Arc, pub stats_event_sender: Arc>>, pub stats_repository: Arc, } @@ -28,7 +28,7 @@ impl UdpTrackerServerContainer { } pub struct UdpTrackerServerServices { - pub stats_keeper: Arc, + pub stats_keeper: Arc, pub stats_event_sender: Arc>>, pub stats_repository: Arc, } @@ -38,7 +38,7 @@ impl UdpTrackerServerServices { pub fn initialize(core_config: &Arc) -> Arc { let udp_server_broadcaster = Broadcaster::default(); let udp_server_stats_repository = Arc::new(Repository::new()); - let udp_server_stats_keeper = Arc::new(Keeper::new( + let udp_server_stats_keeper = Arc::new(EventBus::new( core_config.tracker_usage_statistics, udp_server_broadcaster.clone(), )); diff --git a/packages/udp-tracker-server/src/handlers/announce.rs b/packages/udp-tracker-server/src/handlers/announce.rs index 3879816c5..d060269f5 100644 --- a/packages/udp-tracker-server/src/handlers/announce.rs +++ b/packages/udp-tracker-server/src/handlers/announce.rs @@ -375,7 +375,7 @@ mod tests { core_udp_tracker_services: Arc, ) -> Response { let udp_server_broadcaster = crate::event::sender::Broadcaster::default(); - let keeper = Arc::new(crate::statistics::keeper::Keeper::new(false, udp_server_broadcaster.clone())); + let keeper = Arc::new(crate::statistics::event_bus::EventBus::new(false, udp_server_broadcaster.clone())); let udp_server_stats_event_sender = keeper.sender(); @@ -535,7 +535,7 @@ mod tests { use bittorrent_udp_tracker_core::connection_cookie::{gen_remote_fingerprint, make}; use bittorrent_udp_tracker_core::event::sender::Broadcaster; use bittorrent_udp_tracker_core::services::announce::AnnounceService; - use bittorrent_udp_tracker_core::statistics::keeper::Keeper; + use bittorrent_udp_tracker_core::statistics::event_bus::EventBus; use mockall::predicate::eq; use torrust_tracker_configuration::Core; use torrust_tracker_primitives::service_binding::{Protocol, ServiceBinding}; @@ -711,11 +711,11 @@ mod tests { whitelist_authorization: Arc, ) -> Response { let udp_core_broadcaster = Broadcaster::default(); - let core_keeper = Arc::new(Keeper::new(false, udp_core_broadcaster.clone())); + let core_keeper = Arc::new(EventBus::new(false, udp_core_broadcaster.clone())); let udp_core_stats_event_sender = core_keeper.sender(); let udp_server_broadcaster = crate::event::sender::Broadcaster::default(); - let server_keeper = Arc::new(crate::statistics::keeper::Keeper::new(false, udp_server_broadcaster.clone())); + let server_keeper = Arc::new(crate::statistics::event_bus::EventBus::new(false, udp_server_broadcaster.clone())); let udp_server_stats_event_sender = server_keeper.sender(); diff --git a/packages/udp-tracker-server/src/handlers/connect.rs b/packages/udp-tracker-server/src/handlers/connect.rs index 6e9c75612..9bfc8eaa6 100644 --- a/packages/udp-tracker-server/src/handlers/connect.rs +++ b/packages/udp-tracker-server/src/handlers/connect.rs @@ -61,7 +61,7 @@ mod tests { use bittorrent_udp_tracker_core::event as core_event; use bittorrent_udp_tracker_core::event::sender::Broadcaster; use bittorrent_udp_tracker_core::services::connect::ConnectService; - use bittorrent_udp_tracker_core::statistics::keeper::Keeper; + use bittorrent_udp_tracker_core::statistics::event_bus::EventBus; use mockall::predicate::eq; use torrust_tracker_primitives::service_binding::{Protocol, ServiceBinding}; @@ -84,11 +84,11 @@ mod tests { let server_service_binding = ServiceBinding::new(Protocol::UDP, server_socket_addr).unwrap(); let udp_core_broadcaster = Broadcaster::default(); - let core_keeper = Arc::new(Keeper::new(false, udp_core_broadcaster.clone())); + let core_keeper = Arc::new(EventBus::new(false, udp_core_broadcaster.clone())); let udp_core_stats_event_sender = core_keeper.sender(); let udp_server_broadcaster = crate::event::sender::Broadcaster::default(); - let server_keeper = Arc::new(crate::statistics::keeper::Keeper::new(false, udp_server_broadcaster.clone())); + let server_keeper = Arc::new(crate::statistics::event_bus::EventBus::new(false, udp_server_broadcaster.clone())); let udp_server_stats_event_sender = server_keeper.sender(); @@ -123,11 +123,11 @@ mod tests { let server_service_binding = ServiceBinding::new(Protocol::UDP, server_socket_addr).unwrap(); let udp_core_broadcaster = Broadcaster::default(); - let core_keeper = Arc::new(Keeper::new(false, udp_core_broadcaster.clone())); + let core_keeper = Arc::new(EventBus::new(false, udp_core_broadcaster.clone())); let udp_core_stats_event_sender = core_keeper.sender(); let udp_server_broadcaster = crate::event::sender::Broadcaster::default(); - let server_keeper = Arc::new(crate::statistics::keeper::Keeper::new(false, udp_server_broadcaster.clone())); + let server_keeper = Arc::new(crate::statistics::event_bus::EventBus::new(false, udp_server_broadcaster.clone())); let udp_server_stats_event_sender = server_keeper.sender(); @@ -162,12 +162,12 @@ mod tests { let server_service_binding = ServiceBinding::new(Protocol::UDP, server_socket_addr).unwrap(); let udp_core_broadcaster = Broadcaster::default(); - let core_keeper = Arc::new(Keeper::new(false, udp_core_broadcaster.clone())); + let core_keeper = Arc::new(EventBus::new(false, udp_core_broadcaster.clone())); let udp_core_stats_event_sender = core_keeper.sender(); let udp_server_broadcaster = crate::event::sender::Broadcaster::default(); - let server_keeper = Arc::new(crate::statistics::keeper::Keeper::new(false, udp_server_broadcaster.clone())); + let server_keeper = Arc::new(crate::statistics::event_bus::EventBus::new(false, udp_server_broadcaster.clone())); let udp_server_stats_event_sender = server_keeper.sender(); diff --git a/packages/udp-tracker-server/src/handlers/mod.rs b/packages/udp-tracker-server/src/handlers/mod.rs index b4845c043..b53f1e2ee 100644 --- a/packages/udp-tracker-server/src/handlers/mod.rs +++ b/packages/udp-tracker-server/src/handlers/mod.rs @@ -221,7 +221,7 @@ pub(crate) mod tests { use bittorrent_udp_tracker_core::event::sender::Broadcaster; use bittorrent_udp_tracker_core::services::announce::AnnounceService; use bittorrent_udp_tracker_core::services::scrape::ScrapeService; - use bittorrent_udp_tracker_core::statistics::keeper::Keeper; + use bittorrent_udp_tracker_core::statistics::event_bus::EventBus; use bittorrent_udp_tracker_core::{self, event as core_event}; use futures::future::BoxFuture; use mockall::mock; @@ -287,11 +287,14 @@ pub(crate) mod tests { let scrape_handler = Arc::new(ScrapeHandler::new(&whitelist_authorization, &in_memory_torrent_repository)); let udp_core_broadcaster = Broadcaster::default(); - let core_keeper = Arc::new(Keeper::new(false, udp_core_broadcaster.clone())); + let core_keeper = Arc::new(EventBus::new(false, udp_core_broadcaster.clone())); let udp_core_stats_event_sender = core_keeper.sender(); let udp_server_broadcaster = crate::event::sender::Broadcaster::default(); - let server_keeper = Arc::new(crate::statistics::keeper::Keeper::new(false, udp_server_broadcaster.clone())); + let server_keeper = Arc::new(crate::statistics::event_bus::EventBus::new( + false, + udp_server_broadcaster.clone(), + )); let udp_server_stats_event_sender = server_keeper.sender(); diff --git a/packages/udp-tracker-server/src/handlers/scrape.rs b/packages/udp-tracker-server/src/handlers/scrape.rs index 5f85e2dfa..1533a2146 100644 --- a/packages/udp-tracker-server/src/handlers/scrape.rs +++ b/packages/udp-tracker-server/src/handlers/scrape.rs @@ -100,7 +100,7 @@ mod tests { initialize_core_tracker_services_for_public_tracker, sample_cookie_valid_range, sample_ipv4_remote_addr, sample_issue_time, CoreTrackerServices, CoreUdpTrackerServices, TorrentPeerBuilder, }; - use crate::statistics::keeper::Keeper; + use crate::statistics::event_bus::EventBus; fn zeroed_torrent_statistics() -> TorrentScrapeStatistics { TorrentScrapeStatistics { @@ -181,7 +181,7 @@ mod tests { core_udp_tracker_services: Arc, ) -> Response { let udp_server_broadcaster = Broadcaster::default(); - let keeper = Arc::new(Keeper::new(false, udp_server_broadcaster.clone())); + let keeper = Arc::new(EventBus::new(false, udp_server_broadcaster.clone())); let udp_server_stats_event_sender = keeper.sender(); diff --git a/packages/udp-tracker-server/src/statistics/keeper.rs b/packages/udp-tracker-server/src/statistics/event_bus.rs similarity index 93% rename from packages/udp-tracker-server/src/statistics/keeper.rs rename to packages/udp-tracker-server/src/statistics/event_bus.rs index 9ae0564ce..2d22c0a90 100644 --- a/packages/udp-tracker-server/src/statistics/keeper.rs +++ b/packages/udp-tracker-server/src/statistics/event_bus.rs @@ -5,12 +5,12 @@ use tokio::sync::broadcast::Receiver; use crate::event::sender::{self, Broadcaster}; use crate::event::Event; -pub struct Keeper { +pub struct EventBus { pub enable_sender: bool, pub broadcaster: Broadcaster, } -impl Default for Keeper { +impl Default for EventBus { fn default() -> Self { let enable_sender = true; let broadcaster = Broadcaster::default(); @@ -19,7 +19,7 @@ impl Default for Keeper { } } -impl Keeper { +impl EventBus { #[must_use] pub fn new(enable_sender: bool, broadcaster: Broadcaster) -> Self { Self { diff --git a/packages/udp-tracker-server/src/statistics/mod.rs b/packages/udp-tracker-server/src/statistics/mod.rs index 16b7adbd8..9b6afc889 100644 --- a/packages/udp-tracker-server/src/statistics/mod.rs +++ b/packages/udp-tracker-server/src/statistics/mod.rs @@ -1,5 +1,5 @@ pub mod event; -pub mod keeper; +pub mod event_bus; pub mod metrics; pub mod repository; pub mod services; diff --git a/packages/udp-tracker-server/src/statistics/services.rs b/packages/udp-tracker-server/src/statistics/services.rs index 1593031c1..ca2cff7e8 100644 --- a/packages/udp-tracker-server/src/statistics/services.rs +++ b/packages/udp-tracker-server/src/statistics/services.rs @@ -115,7 +115,7 @@ mod tests { use crate::event::sender::Broadcaster; use crate::statistics::describe_metrics; - use crate::statistics::keeper::Keeper; + use crate::statistics::event_bus::EventBus; use crate::statistics::repository::Repository; use crate::statistics::services::{get_metrics, TrackerMetrics}; @@ -132,7 +132,7 @@ mod tests { let udp_server_broadcaster = Broadcaster::default(); let stats_repository = Arc::new(Repository::new()); - let _keeper = Arc::new(Keeper::new( + let _keeper = Arc::new(EventBus::new( config.core.tracker_usage_statistics, udp_server_broadcaster.clone(), )); From c103b60f889dc45d808c699b9e8366934de8d3df Mon Sep 17 00:00:00 2001 From: Jose Celano Date: Fri, 25 Apr 2025 18:47:59 +0100 Subject: [PATCH 506/802] refactor: [#1478] rename Keeper variables --- .../axum-http-tracker-server/src/server.rs | 8 ++--- .../src/v1/handlers/announce.rs | 6 ++-- .../src/v1/handlers/scrape.rs | 6 ++-- .../http-tracker-core/benches/helpers/util.rs | 6 ++-- packages/http-tracker-core/src/container.rs | 6 ++-- .../src/services/announce.rs | 6 ++-- .../http-tracker-core/src/services/scrape.rs | 8 ++--- .../src/statistics/services.rs | 4 +-- .../src/statistics/services.rs | 4 +-- .../udp-tracker-core/benches/helpers/sync.rs | 4 +-- packages/udp-tracker-core/src/container.rs | 6 ++-- .../udp-tracker-core/src/services/connect.rs | 12 +++---- .../src/statistics/services.rs | 15 --------- packages/udp-tracker-server/src/container.rs | 6 ++-- .../src/handlers/announce.rs | 18 ++++++---- .../src/handlers/connect.rs | 33 ++++++++++++------- .../udp-tracker-server/src/handlers/mod.rs | 8 ++--- .../udp-tracker-server/src/handlers/scrape.rs | 4 +-- .../src/statistics/services.rs | 15 --------- 19 files changed, 80 insertions(+), 95 deletions(-) diff --git a/packages/axum-http-tracker-server/src/server.rs b/packages/axum-http-tracker-server/src/server.rs index a2d0bc52c..9367e6a77 100644 --- a/packages/axum-http-tracker-server/src/server.rs +++ b/packages/axum-http-tracker-server/src/server.rs @@ -277,15 +277,15 @@ mod tests { // HTTP core stats let http_core_broadcaster = Broadcaster::default(); let http_stats_repository = Arc::new(Repository::new()); - let http_stats_keeper = Arc::new(EventBus::new( + let http_stats_event_bus = Arc::new(EventBus::new( configuration.core.tracker_usage_statistics, http_core_broadcaster.clone(), )); - let http_stats_event_sender = http_stats_keeper.sender(); + let http_stats_event_sender = http_stats_event_bus.sender(); if configuration.core.tracker_usage_statistics { - let _unused = run_event_listener(http_stats_keeper.receiver(), &http_stats_repository); + let _unused = run_event_listener(http_stats_event_bus.receiver(), &http_stats_repository); } let tracker_core_container = Arc::new(TrackerCoreContainer::initialize(&core_config)); @@ -308,7 +308,7 @@ mod tests { HttpTrackerCoreContainer { tracker_core_container, http_tracker_config, - stats_keeper: http_stats_keeper, + stats_keeper: http_stats_event_bus, stats_event_sender: http_stats_event_sender, stats_repository: http_stats_repository, announce_service, diff --git a/packages/axum-http-tracker-server/src/v1/handlers/announce.rs b/packages/axum-http-tracker-server/src/v1/handlers/announce.rs index 31c1df471..64d01dde6 100644 --- a/packages/axum-http-tracker-server/src/v1/handlers/announce.rs +++ b/packages/axum-http-tracker-server/src/v1/handlers/announce.rs @@ -167,15 +167,15 @@ mod tests { // HTTP core stats let http_core_broadcaster = Broadcaster::default(); let http_stats_repository = Arc::new(Repository::new()); - let http_stats_keeper = Arc::new(EventBus::new( + let http_stats_event_bus = Arc::new(EventBus::new( config.core.tracker_usage_statistics, http_core_broadcaster.clone(), )); - let http_stats_event_sender = http_stats_keeper.sender(); + let http_stats_event_sender = http_stats_event_bus.sender(); if config.core.tracker_usage_statistics { - let _unused = run_event_listener(http_stats_keeper.receiver(), &http_stats_repository); + let _unused = run_event_listener(http_stats_event_bus.receiver(), &http_stats_repository); } let announce_service = Arc::new(AnnounceService::new( diff --git a/packages/axum-http-tracker-server/src/v1/handlers/scrape.rs b/packages/axum-http-tracker-server/src/v1/handlers/scrape.rs index 3a026a77c..ae99ea89f 100644 --- a/packages/axum-http-tracker-server/src/v1/handlers/scrape.rs +++ b/packages/axum-http-tracker-server/src/v1/handlers/scrape.rs @@ -138,15 +138,15 @@ mod tests { // HTTP core stats let http_core_broadcaster = Broadcaster::default(); let http_stats_repository = Arc::new(Repository::new()); - let http_stats_keeper = Arc::new(EventBus::new( + let http_stats_event_bus = Arc::new(EventBus::new( config.core.tracker_usage_statistics, http_core_broadcaster.clone(), )); - let http_stats_event_sender = http_stats_keeper.sender(); + let http_stats_event_sender = http_stats_event_bus.sender(); if config.core.tracker_usage_statistics { - let _unused = run_event_listener(http_stats_keeper.receiver(), &http_stats_repository); + let _unused = run_event_listener(http_stats_event_bus.receiver(), &http_stats_repository); } ( diff --git a/packages/http-tracker-core/benches/helpers/util.rs b/packages/http-tracker-core/benches/helpers/util.rs index 82a00d5b9..532ee21bf 100644 --- a/packages/http-tracker-core/benches/helpers/util.rs +++ b/packages/http-tracker-core/benches/helpers/util.rs @@ -62,15 +62,15 @@ pub fn initialize_core_tracker_services_with_config(config: &Configuration) -> ( // HTTP core stats let http_core_broadcaster = Broadcaster::default(); let http_stats_repository = Arc::new(Repository::new()); - let http_stats_keeper = Arc::new(EventBus::new( + let http_stats_event_bus = Arc::new(EventBus::new( config.core.tracker_usage_statistics, http_core_broadcaster.clone(), )); - let http_stats_event_sender = http_stats_keeper.sender(); + let http_stats_event_sender = http_stats_event_bus.sender(); if config.core.tracker_usage_statistics { - let _unused = run_event_listener(http_stats_keeper.receiver(), &http_stats_repository); + let _unused = run_event_listener(http_stats_event_bus.receiver(), &http_stats_repository); } ( diff --git a/packages/http-tracker-core/src/container.rs b/packages/http-tracker-core/src/container.rs index 3cf344755..7486efe5b 100644 --- a/packages/http-tracker-core/src/container.rs +++ b/packages/http-tracker-core/src/container.rs @@ -71,12 +71,12 @@ impl HttpTrackerCoreServices { // HTTP core stats let http_core_broadcaster = Broadcaster::default(); let http_stats_repository = Arc::new(Repository::new()); - let http_stats_keeper = Arc::new(EventBus::new( + let http_stats_event_bus = Arc::new(EventBus::new( tracker_core_container.core_config.tracker_usage_statistics, http_core_broadcaster.clone(), )); - let http_stats_event_sender = http_stats_keeper.sender(); + let http_stats_event_sender = http_stats_event_bus.sender(); let http_announce_service = Arc::new(AnnounceService::new( tracker_core_container.core_config.clone(), @@ -94,7 +94,7 @@ impl HttpTrackerCoreServices { )); Arc::new(Self { - stats_keeper: http_stats_keeper, + stats_keeper: http_stats_event_bus, stats_event_sender: http_stats_event_sender, stats_repository: http_stats_repository, announce_service: http_announce_service, diff --git a/packages/http-tracker-core/src/services/announce.rs b/packages/http-tracker-core/src/services/announce.rs index a1e69d2cc..2c1e14b19 100644 --- a/packages/http-tracker-core/src/services/announce.rs +++ b/packages/http-tracker-core/src/services/announce.rs @@ -255,15 +255,15 @@ mod tests { // HTTP core stats let http_core_broadcaster = Broadcaster::default(); let http_stats_repository = Arc::new(Repository::new()); - let http_stats_keeper = Arc::new(EventBus::new( + let http_stats_event_bus = Arc::new(EventBus::new( config.core.tracker_usage_statistics, http_core_broadcaster.clone(), )); - let http_stats_event_sender = http_stats_keeper.sender(); + let http_stats_event_sender = http_stats_event_bus.sender(); if config.core.tracker_usage_statistics { - let _unused = run_event_listener(http_stats_keeper.receiver(), &http_stats_repository); + let _unused = run_event_listener(http_stats_event_bus.receiver(), &http_stats_repository); } ( diff --git a/packages/http-tracker-core/src/services/scrape.rs b/packages/http-tracker-core/src/services/scrape.rs index 2b5f74c83..f86615b9d 100644 --- a/packages/http-tracker-core/src/services/scrape.rs +++ b/packages/http-tracker-core/src/services/scrape.rs @@ -276,9 +276,9 @@ mod tests { // HTTP core stats let http_core_broadcaster = Broadcaster::default(); - let http_stats_keeper = Arc::new(EventBus::new(false, http_core_broadcaster.clone())); + let http_stats_event_bus = Arc::new(EventBus::new(false, http_core_broadcaster.clone())); - let http_stats_event_sender = http_stats_keeper.sender(); + let http_stats_event_sender = http_stats_event_bus.sender(); let container = initialize_services_with_configuration(&configuration); @@ -471,9 +471,9 @@ mod tests { // HTTP core stats let http_core_broadcaster = Broadcaster::default(); - let http_stats_keeper = Arc::new(EventBus::new(false, http_core_broadcaster.clone())); + let http_stats_event_bus = Arc::new(EventBus::new(false, http_core_broadcaster.clone())); - let http_stats_event_sender = http_stats_keeper.sender(); + let http_stats_event_sender = http_stats_event_bus.sender(); let info_hash = sample_info_hash(); let info_hashes = vec![info_hash]; diff --git a/packages/http-tracker-core/src/statistics/services.rs b/packages/http-tracker-core/src/statistics/services.rs index c695f6d4f..2cc96c15b 100644 --- a/packages/http-tracker-core/src/statistics/services.rs +++ b/packages/http-tracker-core/src/statistics/services.rs @@ -95,13 +95,13 @@ mod tests { // HTTP core stats let http_core_broadcaster = Broadcaster::default(); let http_stats_repository = Arc::new(Repository::new()); - let http_stats_keeper = Arc::new(EventBus::new( + let http_stats_event_bus = Arc::new(EventBus::new( config.core.tracker_usage_statistics, http_core_broadcaster.clone(), )); if config.core.tracker_usage_statistics { - let _unused = run_event_listener(http_stats_keeper.receiver(), &http_stats_repository); + let _unused = run_event_listener(http_stats_event_bus.receiver(), &http_stats_repository); } let tracker_metrics = get_metrics(in_memory_torrent_repository.clone(), http_stats_repository).await; diff --git a/packages/rest-tracker-api-core/src/statistics/services.rs b/packages/rest-tracker-api-core/src/statistics/services.rs index cb09a3907..85af56801 100644 --- a/packages/rest-tracker-api-core/src/statistics/services.rs +++ b/packages/rest-tracker-api-core/src/statistics/services.rs @@ -153,13 +153,13 @@ mod tests { // HTTP core stats let http_core_broadcaster = Broadcaster::default(); let http_stats_repository = Arc::new(Repository::new()); - let http_stats_keeper = Arc::new(EventBus::new( + let http_stats_event_bus = Arc::new(EventBus::new( config.core.tracker_usage_statistics, http_core_broadcaster.clone(), )); if config.core.tracker_usage_statistics { - let _unused = run_event_listener(http_stats_keeper.receiver(), &http_stats_repository); + let _unused = run_event_listener(http_stats_event_bus.receiver(), &http_stats_repository); } // UDP server stats diff --git a/packages/udp-tracker-core/benches/helpers/sync.rs b/packages/udp-tracker-core/benches/helpers/sync.rs index c465ae996..64eff2b48 100644 --- a/packages/udp-tracker-core/benches/helpers/sync.rs +++ b/packages/udp-tracker-core/benches/helpers/sync.rs @@ -16,9 +16,9 @@ pub async fn connect_once(samples: u64) -> Duration { let server_service_binding = ServiceBinding::new(Protocol::UDP, server_socket_addr).unwrap(); let udp_core_broadcaster = Broadcaster::default(); - let keeper = Arc::new(EventBus::new(false, udp_core_broadcaster.clone())); + let event_bus = Arc::new(EventBus::new(false, udp_core_broadcaster.clone())); - let udp_core_stats_event_sender = keeper.sender(); + let udp_core_stats_event_sender = event_bus.sender(); let connect_service = Arc::new(ConnectService::new(udp_core_stats_event_sender)); let start = Instant::now(); diff --git a/packages/udp-tracker-core/src/container.rs b/packages/udp-tracker-core/src/container.rs index bc0d8ba4b..3244b57b4 100644 --- a/packages/udp-tracker-core/src/container.rs +++ b/packages/udp-tracker-core/src/container.rs @@ -82,12 +82,12 @@ impl UdpTrackerCoreServices { pub fn initialize_from(tracker_core_container: &Arc) -> Arc { let udp_core_broadcaster = Broadcaster::default(); let udp_core_stats_repository = Arc::new(Repository::new()); - let keeper = Arc::new(EventBus::new( + let event_bus = Arc::new(EventBus::new( tracker_core_container.core_config.tracker_usage_statistics, udp_core_broadcaster.clone(), )); - let udp_core_stats_event_sender = keeper.sender(); + let udp_core_stats_event_sender = event_bus.sender(); let ban_service = Arc::new(RwLock::new(BanService::new(MAX_CONNECTION_ID_ERRORS_PER_IP))); let connect_service = Arc::new(ConnectService::new(udp_core_stats_event_sender.clone())); let announce_service = Arc::new(AnnounceService::new( @@ -101,7 +101,7 @@ impl UdpTrackerCoreServices { )); Arc::new(Self { - stats_keeper: keeper, + stats_keeper: event_bus, stats_event_sender: udp_core_stats_event_sender, stats_repository: udp_core_stats_repository, ban_service, diff --git a/packages/udp-tracker-core/src/services/connect.rs b/packages/udp-tracker-core/src/services/connect.rs index 7ea8b0882..81d9219e2 100644 --- a/packages/udp-tracker-core/src/services/connect.rs +++ b/packages/udp-tracker-core/src/services/connect.rs @@ -81,8 +81,8 @@ mod tests { let server_service_binding = ServiceBinding::new(Protocol::UDP, server_socket_addr).unwrap(); let udp_core_broadcaster = Broadcaster::default(); - let keeper = Arc::new(EventBus::new(false, udp_core_broadcaster.clone())); - let udp_core_stats_event_sender = keeper.sender(); + let event_bus = Arc::new(EventBus::new(false, udp_core_broadcaster.clone())); + let udp_core_stats_event_sender = event_bus.sender(); let connect_service = Arc::new(ConnectService::new(udp_core_stats_event_sender)); @@ -102,8 +102,8 @@ mod tests { let server_service_binding = ServiceBinding::new(Protocol::UDP, server_socket_addr).unwrap(); let udp_core_broadcaster = Broadcaster::default(); - let keeper = Arc::new(EventBus::new(false, udp_core_broadcaster.clone())); - let udp_core_stats_event_sender = keeper.sender(); + let event_bus = Arc::new(EventBus::new(false, udp_core_broadcaster.clone())); + let udp_core_stats_event_sender = event_bus.sender(); let connect_service = Arc::new(ConnectService::new(udp_core_stats_event_sender)); @@ -124,8 +124,8 @@ mod tests { let server_service_binding = ServiceBinding::new(Protocol::UDP, server_socket_addr).unwrap(); let udp_core_broadcaster = Broadcaster::default(); - let keeper = Arc::new(EventBus::new(false, udp_core_broadcaster.clone())); - let udp_core_stats_event_sender = keeper.sender(); + let event_bus = Arc::new(EventBus::new(false, udp_core_broadcaster.clone())); + let udp_core_stats_event_sender = event_bus.sender(); let connect_service = Arc::new(ConnectService::new(udp_core_stats_event_sender)); diff --git a/packages/udp-tracker-core/src/statistics/services.rs b/packages/udp-tracker-core/src/statistics/services.rs index 22d84c931..aa10e4acd 100644 --- a/packages/udp-tracker-core/src/statistics/services.rs +++ b/packages/udp-tracker-core/src/statistics/services.rs @@ -89,32 +89,17 @@ mod tests { use bittorrent_tracker_core::torrent::repository::in_memory::InMemoryTorrentRepository; use bittorrent_tracker_core::{self}; - use torrust_tracker_configuration::Configuration; use torrust_tracker_primitives::swarm_metadata::AggregateSwarmMetadata; - use torrust_tracker_test_helpers::configuration; - use crate::event::sender::Broadcaster; use crate::statistics::describe_metrics; - use crate::statistics::event_bus::EventBus; use crate::statistics::repository::Repository; use crate::statistics::services::{get_metrics, TrackerMetrics}; - pub fn tracker_configuration() -> Configuration { - configuration::ephemeral() - } - #[tokio::test] async fn the_statistics_service_should_return_the_tracker_metrics() { - let config = tracker_configuration(); - let in_memory_torrent_repository = Arc::new(InMemoryTorrentRepository::default()); - let udp_core_broadcaster = Broadcaster::default(); let repository = Arc::new(Repository::new()); - let _keeper = Arc::new(EventBus::new( - config.core.tracker_usage_statistics, - udp_core_broadcaster.clone(), - )); let tracker_metrics = get_metrics(in_memory_torrent_repository.clone(), repository.clone()).await; diff --git a/packages/udp-tracker-server/src/container.rs b/packages/udp-tracker-server/src/container.rs index 0ad611070..e1dbdfece 100644 --- a/packages/udp-tracker-server/src/container.rs +++ b/packages/udp-tracker-server/src/container.rs @@ -38,15 +38,15 @@ impl UdpTrackerServerServices { pub fn initialize(core_config: &Arc) -> Arc { let udp_server_broadcaster = Broadcaster::default(); let udp_server_stats_repository = Arc::new(Repository::new()); - let udp_server_stats_keeper = Arc::new(EventBus::new( + let udp_server_stats_event_bus = Arc::new(EventBus::new( core_config.tracker_usage_statistics, udp_server_broadcaster.clone(), )); - let udp_server_stats_event_sender = udp_server_stats_keeper.sender(); + let udp_server_stats_event_sender = udp_server_stats_event_bus.sender(); Arc::new(Self { - stats_keeper: udp_server_stats_keeper.clone(), + stats_keeper: udp_server_stats_event_bus.clone(), stats_event_sender: udp_server_stats_event_sender.clone(), stats_repository: udp_server_stats_repository.clone(), }) diff --git a/packages/udp-tracker-server/src/handlers/announce.rs b/packages/udp-tracker-server/src/handlers/announce.rs index d060269f5..452a12d65 100644 --- a/packages/udp-tracker-server/src/handlers/announce.rs +++ b/packages/udp-tracker-server/src/handlers/announce.rs @@ -375,9 +375,12 @@ mod tests { core_udp_tracker_services: Arc, ) -> Response { let udp_server_broadcaster = crate::event::sender::Broadcaster::default(); - let keeper = Arc::new(crate::statistics::event_bus::EventBus::new(false, udp_server_broadcaster.clone())); + let event_bus = Arc::new(crate::statistics::event_bus::EventBus::new( + false, + udp_server_broadcaster.clone(), + )); - let udp_server_stats_event_sender = keeper.sender(); + let udp_server_stats_event_sender = event_bus.sender(); let client_socket_addr = SocketAddr::new(IpAddr::V4(Ipv4Addr::new(126, 0, 0, 1)), 8080); let server_socket_addr = SocketAddr::new(IpAddr::V4(Ipv4Addr::new(203, 0, 113, 196)), 6969); @@ -711,13 +714,16 @@ mod tests { whitelist_authorization: Arc, ) -> Response { let udp_core_broadcaster = Broadcaster::default(); - let core_keeper = Arc::new(EventBus::new(false, udp_core_broadcaster.clone())); - let udp_core_stats_event_sender = core_keeper.sender(); + let core_event_bus = Arc::new(EventBus::new(false, udp_core_broadcaster.clone())); + let udp_core_stats_event_sender = core_event_bus.sender(); let udp_server_broadcaster = crate::event::sender::Broadcaster::default(); - let server_keeper = Arc::new(crate::statistics::event_bus::EventBus::new(false, udp_server_broadcaster.clone())); + let server_event_bus = Arc::new(crate::statistics::event_bus::EventBus::new( + false, + udp_server_broadcaster.clone(), + )); - let udp_server_stats_event_sender = server_keeper.sender(); + let udp_server_stats_event_sender = server_event_bus.sender(); let client_ip_v4 = Ipv4Addr::new(126, 0, 0, 1); let client_ip_v6 = client_ip_v4.to_ipv6_compatible(); diff --git a/packages/udp-tracker-server/src/handlers/connect.rs b/packages/udp-tracker-server/src/handlers/connect.rs index 9bfc8eaa6..f08084a20 100644 --- a/packages/udp-tracker-server/src/handlers/connect.rs +++ b/packages/udp-tracker-server/src/handlers/connect.rs @@ -84,13 +84,16 @@ mod tests { let server_service_binding = ServiceBinding::new(Protocol::UDP, server_socket_addr).unwrap(); let udp_core_broadcaster = Broadcaster::default(); - let core_keeper = Arc::new(EventBus::new(false, udp_core_broadcaster.clone())); - let udp_core_stats_event_sender = core_keeper.sender(); + let core_event_bus = Arc::new(EventBus::new(false, udp_core_broadcaster.clone())); + let udp_core_stats_event_sender = core_event_bus.sender(); let udp_server_broadcaster = crate::event::sender::Broadcaster::default(); - let server_keeper = Arc::new(crate::statistics::event_bus::EventBus::new(false, udp_server_broadcaster.clone())); + let server_event_bus = Arc::new(crate::statistics::event_bus::EventBus::new( + false, + udp_server_broadcaster.clone(), + )); - let udp_server_stats_event_sender = server_keeper.sender(); + let udp_server_stats_event_sender = server_event_bus.sender(); let request = ConnectRequest { transaction_id: TransactionId(0i32.into()), @@ -123,13 +126,16 @@ mod tests { let server_service_binding = ServiceBinding::new(Protocol::UDP, server_socket_addr).unwrap(); let udp_core_broadcaster = Broadcaster::default(); - let core_keeper = Arc::new(EventBus::new(false, udp_core_broadcaster.clone())); - let udp_core_stats_event_sender = core_keeper.sender(); + let core_event_bus = Arc::new(EventBus::new(false, udp_core_broadcaster.clone())); + let udp_core_stats_event_sender = core_event_bus.sender(); let udp_server_broadcaster = crate::event::sender::Broadcaster::default(); - let server_keeper = Arc::new(crate::statistics::event_bus::EventBus::new(false, udp_server_broadcaster.clone())); + let server_event_bus = Arc::new(crate::statistics::event_bus::EventBus::new( + false, + udp_server_broadcaster.clone(), + )); - let udp_server_stats_event_sender = server_keeper.sender(); + let udp_server_stats_event_sender = server_event_bus.sender(); let request = ConnectRequest { transaction_id: TransactionId(0i32.into()), @@ -162,14 +168,17 @@ mod tests { let server_service_binding = ServiceBinding::new(Protocol::UDP, server_socket_addr).unwrap(); let udp_core_broadcaster = Broadcaster::default(); - let core_keeper = Arc::new(EventBus::new(false, udp_core_broadcaster.clone())); + let core_event_bus = Arc::new(EventBus::new(false, udp_core_broadcaster.clone())); - let udp_core_stats_event_sender = core_keeper.sender(); + let udp_core_stats_event_sender = core_event_bus.sender(); let udp_server_broadcaster = crate::event::sender::Broadcaster::default(); - let server_keeper = Arc::new(crate::statistics::event_bus::EventBus::new(false, udp_server_broadcaster.clone())); + let server_event_bus = Arc::new(crate::statistics::event_bus::EventBus::new( + false, + udp_server_broadcaster.clone(), + )); - let udp_server_stats_event_sender = server_keeper.sender(); + let udp_server_stats_event_sender = server_event_bus.sender(); let request = ConnectRequest { transaction_id: TransactionId(0i32.into()), diff --git a/packages/udp-tracker-server/src/handlers/mod.rs b/packages/udp-tracker-server/src/handlers/mod.rs index b53f1e2ee..43c6c63b7 100644 --- a/packages/udp-tracker-server/src/handlers/mod.rs +++ b/packages/udp-tracker-server/src/handlers/mod.rs @@ -287,16 +287,16 @@ pub(crate) mod tests { let scrape_handler = Arc::new(ScrapeHandler::new(&whitelist_authorization, &in_memory_torrent_repository)); let udp_core_broadcaster = Broadcaster::default(); - let core_keeper = Arc::new(EventBus::new(false, udp_core_broadcaster.clone())); - let udp_core_stats_event_sender = core_keeper.sender(); + let core_event_bus = Arc::new(EventBus::new(false, udp_core_broadcaster.clone())); + let udp_core_stats_event_sender = core_event_bus.sender(); let udp_server_broadcaster = crate::event::sender::Broadcaster::default(); - let server_keeper = Arc::new(crate::statistics::event_bus::EventBus::new( + let server_event_bus = Arc::new(crate::statistics::event_bus::EventBus::new( false, udp_server_broadcaster.clone(), )); - let udp_server_stats_event_sender = server_keeper.sender(); + let udp_server_stats_event_sender = server_event_bus.sender(); let announce_service = Arc::new(AnnounceService::new( announce_handler.clone(), diff --git a/packages/udp-tracker-server/src/handlers/scrape.rs b/packages/udp-tracker-server/src/handlers/scrape.rs index 1533a2146..b945913ad 100644 --- a/packages/udp-tracker-server/src/handlers/scrape.rs +++ b/packages/udp-tracker-server/src/handlers/scrape.rs @@ -181,9 +181,9 @@ mod tests { core_udp_tracker_services: Arc, ) -> Response { let udp_server_broadcaster = Broadcaster::default(); - let keeper = Arc::new(EventBus::new(false, udp_server_broadcaster.clone())); + let event_bus = Arc::new(EventBus::new(false, udp_server_broadcaster.clone())); - let udp_server_stats_event_sender = keeper.sender(); + let udp_server_stats_event_sender = event_bus.sender(); let client_socket_addr = sample_ipv4_remote_addr(); let server_socket_addr = SocketAddr::new(IpAddr::V4(Ipv4Addr::new(203, 0, 113, 196)), 6969); diff --git a/packages/udp-tracker-server/src/statistics/services.rs b/packages/udp-tracker-server/src/statistics/services.rs index ca2cff7e8..4db80c465 100644 --- a/packages/udp-tracker-server/src/statistics/services.rs +++ b/packages/udp-tracker-server/src/statistics/services.rs @@ -109,33 +109,18 @@ mod tests { use bittorrent_udp_tracker_core::services::banning::BanService; use bittorrent_udp_tracker_core::MAX_CONNECTION_ID_ERRORS_PER_IP; use tokio::sync::RwLock; - use torrust_tracker_configuration::Configuration; use torrust_tracker_primitives::swarm_metadata::AggregateSwarmMetadata; - use torrust_tracker_test_helpers::configuration; - use crate::event::sender::Broadcaster; use crate::statistics::describe_metrics; - use crate::statistics::event_bus::EventBus; use crate::statistics::repository::Repository; use crate::statistics::services::{get_metrics, TrackerMetrics}; - pub fn tracker_configuration() -> Configuration { - configuration::ephemeral() - } - #[tokio::test] async fn the_statistics_service_should_return_the_tracker_metrics() { - let config = tracker_configuration(); - let in_memory_torrent_repository = Arc::new(InMemoryTorrentRepository::default()); let ban_service = Arc::new(RwLock::new(BanService::new(MAX_CONNECTION_ID_ERRORS_PER_IP))); - let udp_server_broadcaster = Broadcaster::default(); let stats_repository = Arc::new(Repository::new()); - let _keeper = Arc::new(EventBus::new( - config.core.tracker_usage_statistics, - udp_server_broadcaster.clone(), - )); let tracker_metrics = get_metrics( in_memory_torrent_repository.clone(), From b32072c5982708486012ef86d88735b985baf6ff Mon Sep 17 00:00:00 2001 From: Jose Celano Date: Fri, 25 Apr 2025 18:51:00 +0100 Subject: [PATCH 507/802] refactor: [#1478] rename struct fields for Keeper type --- packages/axum-http-tracker-server/src/environment.rs | 2 +- packages/axum-http-tracker-server/src/server.rs | 2 +- packages/http-tracker-core/src/container.rs | 8 ++++---- packages/udp-tracker-core/src/container.rs | 8 ++++---- packages/udp-tracker-server/src/container.rs | 8 ++++---- packages/udp-tracker-server/src/environment.rs | 4 ++-- src/app.rs | 6 +++--- 7 files changed, 19 insertions(+), 19 deletions(-) diff --git a/packages/axum-http-tracker-server/src/environment.rs b/packages/axum-http-tracker-server/src/environment.rs index ffba790c2..aeb53a710 100644 --- a/packages/axum-http-tracker-server/src/environment.rs +++ b/packages/axum-http-tracker-server/src/environment.rs @@ -70,7 +70,7 @@ impl Environment { pub async fn start(self) -> Environment { // Start the event listener let event_listener_job = run_event_listener( - self.container.http_tracker_core_container.stats_keeper.receiver(), + self.container.http_tracker_core_container.event_bus.receiver(), &self.container.http_tracker_core_container.stats_repository, ); diff --git a/packages/axum-http-tracker-server/src/server.rs b/packages/axum-http-tracker-server/src/server.rs index 9367e6a77..209925c04 100644 --- a/packages/axum-http-tracker-server/src/server.rs +++ b/packages/axum-http-tracker-server/src/server.rs @@ -308,7 +308,7 @@ mod tests { HttpTrackerCoreContainer { tracker_core_container, http_tracker_config, - stats_keeper: http_stats_event_bus, + event_bus: http_stats_event_bus, stats_event_sender: http_stats_event_sender, stats_repository: http_stats_repository, announce_service, diff --git a/packages/http-tracker-core/src/container.rs b/packages/http-tracker-core/src/container.rs index 7486efe5b..647c065df 100644 --- a/packages/http-tracker-core/src/container.rs +++ b/packages/http-tracker-core/src/container.rs @@ -16,7 +16,7 @@ pub struct HttpTrackerCoreContainer { pub tracker_core_container: Arc, // `HttpTrackerCoreServices` - pub stats_keeper: Arc, + pub event_bus: Arc, pub stats_event_sender: Arc>>, pub stats_repository: Arc, pub announce_service: Arc, @@ -48,7 +48,7 @@ impl HttpTrackerCoreContainer { Arc::new(Self { tracker_core_container: tracker_core_container.clone(), http_tracker_config: http_tracker_config.clone(), - stats_keeper: http_tracker_core_services.stats_keeper.clone(), + event_bus: http_tracker_core_services.event_bus.clone(), stats_event_sender: http_tracker_core_services.stats_event_sender.clone(), stats_repository: http_tracker_core_services.stats_repository.clone(), announce_service: http_tracker_core_services.announce_service.clone(), @@ -58,7 +58,7 @@ impl HttpTrackerCoreContainer { } pub struct HttpTrackerCoreServices { - pub stats_keeper: Arc, + pub event_bus: Arc, pub stats_event_sender: Arc>>, pub stats_repository: Arc, pub announce_service: Arc, @@ -94,7 +94,7 @@ impl HttpTrackerCoreServices { )); Arc::new(Self { - stats_keeper: http_stats_event_bus, + event_bus: http_stats_event_bus, stats_event_sender: http_stats_event_sender, stats_repository: http_stats_repository, announce_service: http_announce_service, diff --git a/packages/udp-tracker-core/src/container.rs b/packages/udp-tracker-core/src/container.rs index 3244b57b4..e229fe1a4 100644 --- a/packages/udp-tracker-core/src/container.rs +++ b/packages/udp-tracker-core/src/container.rs @@ -19,7 +19,7 @@ pub struct UdpTrackerCoreContainer { pub tracker_core_container: Arc, // `UdpTrackerCoreServices` - pub stats_keeper: Arc, + pub event_bus: Arc, pub stats_event_sender: Arc>>, pub stats_repository: Arc, pub ban_service: Arc>, @@ -56,7 +56,7 @@ impl UdpTrackerCoreContainer { tracker_core_container: tracker_core_container.clone(), // `UdpTrackerCoreServices` - stats_keeper: udp_tracker_core_services.stats_keeper.clone(), + event_bus: udp_tracker_core_services.event_bus.clone(), stats_event_sender: udp_tracker_core_services.stats_event_sender.clone(), stats_repository: udp_tracker_core_services.stats_repository.clone(), ban_service: udp_tracker_core_services.ban_service.clone(), @@ -68,7 +68,7 @@ impl UdpTrackerCoreContainer { } pub struct UdpTrackerCoreServices { - pub stats_keeper: Arc, + pub event_bus: Arc, pub stats_event_sender: Arc>>, pub stats_repository: Arc, pub ban_service: Arc>, @@ -101,7 +101,7 @@ impl UdpTrackerCoreServices { )); Arc::new(Self { - stats_keeper: event_bus, + event_bus, stats_event_sender: udp_core_stats_event_sender, stats_repository: udp_core_stats_repository, ban_service, diff --git a/packages/udp-tracker-server/src/container.rs b/packages/udp-tracker-server/src/container.rs index e1dbdfece..debeb0ecf 100644 --- a/packages/udp-tracker-server/src/container.rs +++ b/packages/udp-tracker-server/src/container.rs @@ -9,7 +9,7 @@ use crate::statistics::event_bus::EventBus; use crate::statistics::repository::Repository; pub struct UdpTrackerServerContainer { - pub stats_keeper: Arc, + pub event_bus: Arc, pub stats_event_sender: Arc>>, pub stats_repository: Arc, } @@ -20,7 +20,7 @@ impl UdpTrackerServerContainer { let udp_tracker_server_services = UdpTrackerServerServices::initialize(core_config); Arc::new(Self { - stats_keeper: udp_tracker_server_services.stats_keeper.clone(), + event_bus: udp_tracker_server_services.event_bus.clone(), stats_event_sender: udp_tracker_server_services.stats_event_sender.clone(), stats_repository: udp_tracker_server_services.stats_repository.clone(), }) @@ -28,7 +28,7 @@ impl UdpTrackerServerContainer { } pub struct UdpTrackerServerServices { - pub stats_keeper: Arc, + pub event_bus: Arc, pub stats_event_sender: Arc>>, pub stats_repository: Arc, } @@ -46,7 +46,7 @@ impl UdpTrackerServerServices { let udp_server_stats_event_sender = udp_server_stats_event_bus.sender(); Arc::new(Self { - stats_keeper: udp_server_stats_event_bus.clone(), + event_bus: udp_server_stats_event_bus.clone(), stats_event_sender: udp_server_stats_event_sender.clone(), stats_repository: udp_server_stats_repository.clone(), }) diff --git a/packages/udp-tracker-server/src/environment.rs b/packages/udp-tracker-server/src/environment.rs index 7d70317aa..962442fde 100644 --- a/packages/udp-tracker-server/src/environment.rs +++ b/packages/udp-tracker-server/src/environment.rs @@ -73,13 +73,13 @@ impl Environment { let cookie_lifetime = self.container.udp_tracker_core_container.udp_tracker_config.cookie_lifetime; // Start the UDP tracker core event listener let udp_core_event_listener_job = Some(bittorrent_udp_tracker_core::statistics::event::listener::run_event_listener( - self.container.udp_tracker_core_container.stats_keeper.receiver(), + self.container.udp_tracker_core_container.event_bus.receiver(), &self.container.udp_tracker_core_container.stats_repository, )); // Start the UDP tracker server event listener let udp_server_event_listener_job = Some(crate::statistics::event::listener::run_event_listener( - self.container.udp_tracker_server_container.stats_keeper.receiver(), + self.container.udp_tracker_server_container.event_bus.receiver(), &self.container.udp_tracker_server_container.stats_repository, )); diff --git a/src/app.rs b/src/app.rs index b01dc9c36..5d07eb8b3 100644 --- a/src/app.rs +++ b/src/app.rs @@ -112,7 +112,7 @@ async fn load_whitelisted_torrents(config: &Configuration, app_container: &Arc) { if config.core.tracker_usage_statistics { let _job = bittorrent_http_tracker_core::statistics::event::listener::run_event_listener( - app_container.http_tracker_core_services.stats_keeper.receiver(), + app_container.http_tracker_core_services.event_bus.receiver(), &app_container.http_tracker_core_services.stats_repository, ); @@ -132,7 +132,7 @@ fn start_http_core_event_listener(config: &Configuration, app_container: &Arc) { if config.core.tracker_usage_statistics { let _job = bittorrent_udp_tracker_core::statistics::event::listener::run_event_listener( - app_container.udp_tracker_core_services.stats_keeper.receiver(), + app_container.udp_tracker_core_services.event_bus.receiver(), &app_container.udp_tracker_core_services.stats_repository, ); @@ -152,7 +152,7 @@ fn start_udp_core_event_listener(config: &Configuration, app_container: &Arc) { if config.core.tracker_usage_statistics { let _job = torrust_udp_tracker_server::statistics::event::listener::run_event_listener( - app_container.udp_tracker_server_container.stats_keeper.receiver(), + app_container.udp_tracker_server_container.event_bus.receiver(), &app_container.udp_tracker_server_container.stats_repository, ); From b0951aad2a1027a5640a93f1870b83e4e7a651c9 Mon Sep 17 00:00:00 2001 From: Jose Celano Date: Fri, 25 Apr 2025 18:58:33 +0100 Subject: [PATCH 508/802] refactor: [#1478] move EventBus to event mod --- packages/axum-http-tracker-server/src/server.rs | 2 +- .../src/v1/handlers/announce.rs | 2 +- .../src/v1/handlers/scrape.rs | 2 +- .../http-tracker-core/benches/helpers/util.rs | 2 +- packages/http-tracker-core/src/container.rs | 6 +++--- .../{statistics/event_bus.rs => event/bus.rs} | 0 packages/http-tracker-core/src/event/mod.rs | 5 +++-- .../http-tracker-core/src/services/announce.rs | 2 +- .../http-tracker-core/src/services/scrape.rs | 4 ++-- .../http-tracker-core/src/statistics/mod.rs | 1 - .../src/statistics/services.rs | 2 +- .../src/statistics/services.rs | 2 +- .../udp-tracker-core/benches/helpers/sync.rs | 2 +- packages/udp-tracker-core/src/container.rs | 6 +++--- .../{statistics/event_bus.rs => event/bus.rs} | 0 packages/udp-tracker-core/src/event/mod.rs | 5 +++-- .../udp-tracker-core/src/services/connect.rs | 2 +- packages/udp-tracker-core/src/statistics/mod.rs | 1 - packages/udp-tracker-server/src/container.rs | 6 +++--- .../{statistics/event_bus.rs => event/bus.rs} | 0 packages/udp-tracker-server/src/event/mod.rs | 5 +++-- .../udp-tracker-server/src/handlers/announce.rs | 12 +++--------- .../udp-tracker-server/src/handlers/connect.rs | 17 ++++------------- packages/udp-tracker-server/src/handlers/mod.rs | 7 ++----- .../udp-tracker-server/src/handlers/scrape.rs | 2 +- .../udp-tracker-server/src/statistics/mod.rs | 1 - 26 files changed, 39 insertions(+), 57 deletions(-) rename packages/http-tracker-core/src/{statistics/event_bus.rs => event/bus.rs} (100%) rename packages/udp-tracker-core/src/{statistics/event_bus.rs => event/bus.rs} (100%) rename packages/udp-tracker-server/src/{statistics/event_bus.rs => event/bus.rs} (100%) diff --git a/packages/axum-http-tracker-server/src/server.rs b/packages/axum-http-tracker-server/src/server.rs index 209925c04..41a9dec6d 100644 --- a/packages/axum-http-tracker-server/src/server.rs +++ b/packages/axum-http-tracker-server/src/server.rs @@ -248,11 +248,11 @@ mod tests { use std::sync::Arc; use bittorrent_http_tracker_core::container::HttpTrackerCoreContainer; + use bittorrent_http_tracker_core::event::bus::EventBus; use bittorrent_http_tracker_core::event::sender::Broadcaster; use bittorrent_http_tracker_core::services::announce::AnnounceService; use bittorrent_http_tracker_core::services::scrape::ScrapeService; use bittorrent_http_tracker_core::statistics::event::listener::run_event_listener; - use bittorrent_http_tracker_core::statistics::event_bus::EventBus; use bittorrent_http_tracker_core::statistics::repository::Repository; use bittorrent_tracker_core::container::TrackerCoreContainer; use torrust_axum_server::tsl::make_rust_tls; diff --git a/packages/axum-http-tracker-server/src/v1/handlers/announce.rs b/packages/axum-http-tracker-server/src/v1/handlers/announce.rs index 64d01dde6..7489211a9 100644 --- a/packages/axum-http-tracker-server/src/v1/handlers/announce.rs +++ b/packages/axum-http-tracker-server/src/v1/handlers/announce.rs @@ -107,10 +107,10 @@ mod tests { use std::sync::Arc; use aquatic_udp_protocol::PeerId; + use bittorrent_http_tracker_core::event::bus::EventBus; use bittorrent_http_tracker_core::event::sender::Broadcaster; use bittorrent_http_tracker_core::services::announce::AnnounceService; use bittorrent_http_tracker_core::statistics::event::listener::run_event_listener; - use bittorrent_http_tracker_core::statistics::event_bus::EventBus; use bittorrent_http_tracker_core::statistics::repository::Repository; use bittorrent_http_tracker_protocol::v1::requests::announce::Announce; use bittorrent_http_tracker_protocol::v1::responses; diff --git a/packages/axum-http-tracker-server/src/v1/handlers/scrape.rs b/packages/axum-http-tracker-server/src/v1/handlers/scrape.rs index ae99ea89f..7f1247173 100644 --- a/packages/axum-http-tracker-server/src/v1/handlers/scrape.rs +++ b/packages/axum-http-tracker-server/src/v1/handlers/scrape.rs @@ -83,9 +83,9 @@ mod tests { use std::str::FromStr; use std::sync::Arc; + use bittorrent_http_tracker_core::event::bus::EventBus; use bittorrent_http_tracker_core::event::sender::Broadcaster; use bittorrent_http_tracker_core::statistics::event::listener::run_event_listener; - use bittorrent_http_tracker_core::statistics::event_bus::EventBus; use bittorrent_http_tracker_core::statistics::repository::Repository; use bittorrent_http_tracker_protocol::v1::requests::scrape::Scrape; use bittorrent_http_tracker_protocol::v1::responses; diff --git a/packages/http-tracker-core/benches/helpers/util.rs b/packages/http-tracker-core/benches/helpers/util.rs index 532ee21bf..9d2d80da3 100644 --- a/packages/http-tracker-core/benches/helpers/util.rs +++ b/packages/http-tracker-core/benches/helpers/util.rs @@ -3,10 +3,10 @@ use std::sync::Arc; use aquatic_udp_protocol::{AnnounceEvent, NumberOfBytes, PeerId}; use bittorrent_http_tracker_core::event; +use bittorrent_http_tracker_core::event::bus::EventBus; use bittorrent_http_tracker_core::event::sender::Broadcaster; use bittorrent_http_tracker_core::event::Event; use bittorrent_http_tracker_core::statistics::event::listener::run_event_listener; -use bittorrent_http_tracker_core::statistics::event_bus::EventBus; use bittorrent_http_tracker_core::statistics::repository::Repository; use bittorrent_http_tracker_protocol::v1::requests::announce::Announce; use bittorrent_http_tracker_protocol::v1::services::peer_ip_resolver::ClientIpSources; diff --git a/packages/http-tracker-core/src/container.rs b/packages/http-tracker-core/src/container.rs index 647c065df..c3ed8a0c7 100644 --- a/packages/http-tracker-core/src/container.rs +++ b/packages/http-tracker-core/src/container.rs @@ -3,10 +3,10 @@ use std::sync::Arc; use bittorrent_tracker_core::container::TrackerCoreContainer; use torrust_tracker_configuration::{Core, HttpTracker}; +use crate::event::bus::EventBus; use crate::event::sender::Broadcaster; use crate::services::announce::AnnounceService; use crate::services::scrape::ScrapeService; -use crate::statistics::event_bus::EventBus; use crate::statistics::repository::Repository; use crate::{event, services, statistics}; @@ -16,7 +16,7 @@ pub struct HttpTrackerCoreContainer { pub tracker_core_container: Arc, // `HttpTrackerCoreServices` - pub event_bus: Arc, + pub event_bus: Arc, pub stats_event_sender: Arc>>, pub stats_repository: Arc, pub announce_service: Arc, @@ -58,7 +58,7 @@ impl HttpTrackerCoreContainer { } pub struct HttpTrackerCoreServices { - pub event_bus: Arc, + pub event_bus: Arc, pub stats_event_sender: Arc>>, pub stats_repository: Arc, pub announce_service: Arc, diff --git a/packages/http-tracker-core/src/statistics/event_bus.rs b/packages/http-tracker-core/src/event/bus.rs similarity index 100% rename from packages/http-tracker-core/src/statistics/event_bus.rs rename to packages/http-tracker-core/src/event/bus.rs diff --git a/packages/http-tracker-core/src/event/mod.rs b/packages/http-tracker-core/src/event/mod.rs index 4f0b84e48..5b1c64dca 100644 --- a/packages/http-tracker-core/src/event/mod.rs +++ b/packages/http-tracker-core/src/event/mod.rs @@ -1,3 +1,6 @@ +pub mod bus; +pub mod sender; + use std::net::{IpAddr, SocketAddr}; use bittorrent_http_tracker_protocol::v1::services::peer_ip_resolver::RemoteClientAddr; @@ -7,8 +10,6 @@ use torrust_tracker_metrics::label_name; use torrust_tracker_primitives::peer::PeerAnnouncement; use torrust_tracker_primitives::service_binding::ServiceBinding; -pub mod sender; - /// A HTTP core event. #[derive(Debug, PartialEq, Eq, Clone)] pub enum Event { diff --git a/packages/http-tracker-core/src/services/announce.rs b/packages/http-tracker-core/src/services/announce.rs index 2c1e14b19..bef7449b7 100644 --- a/packages/http-tracker-core/src/services/announce.rs +++ b/packages/http-tracker-core/src/services/announce.rs @@ -303,10 +303,10 @@ mod tests { use tokio::sync::broadcast::error::SendError; use crate::event; + use crate::event::bus::EventBus; use crate::event::sender::Broadcaster; use crate::event::Event; use crate::statistics::event::listener::run_event_listener; - use crate::statistics::event_bus::EventBus; use crate::statistics::repository::Repository; use crate::tests::sample_info_hash; diff --git a/packages/http-tracker-core/src/services/scrape.rs b/packages/http-tracker-core/src/services/scrape.rs index f86615b9d..a0ae73d97 100644 --- a/packages/http-tracker-core/src/services/scrape.rs +++ b/packages/http-tracker-core/src/services/scrape.rs @@ -260,13 +260,13 @@ mod tests { use torrust_tracker_test_helpers::configuration; use crate::event; + use crate::event::bus::EventBus; use crate::event::sender::Broadcaster; use crate::event::{ConnectionContext, Event}; use crate::services::scrape::tests::{ initialize_services_with_configuration, sample_info_hashes, sample_peer, MockHttpStatsEventSender, }; use crate::services::scrape::ScrapeService; - use crate::statistics::event_bus::EventBus; use crate::tests::sample_info_hash; #[tokio::test] @@ -453,13 +453,13 @@ mod tests { use torrust_tracker_test_helpers::configuration; use crate::event; + use crate::event::bus::EventBus; use crate::event::sender::Broadcaster; use crate::event::{ConnectionContext, Event}; use crate::services::scrape::tests::{ initialize_services_with_configuration, sample_info_hashes, sample_peer, MockHttpStatsEventSender, }; use crate::services::scrape::ScrapeService; - use crate::statistics::event_bus::EventBus; use crate::tests::sample_info_hash; #[tokio::test] diff --git a/packages/http-tracker-core/src/statistics/mod.rs b/packages/http-tracker-core/src/statistics/mod.rs index da2f0acd4..f949babbd 100644 --- a/packages/http-tracker-core/src/statistics/mod.rs +++ b/packages/http-tracker-core/src/statistics/mod.rs @@ -1,5 +1,4 @@ pub mod event; -pub mod event_bus; pub mod metrics; pub mod repository; pub mod services; diff --git a/packages/http-tracker-core/src/statistics/services.rs b/packages/http-tracker-core/src/statistics/services.rs index 2cc96c15b..7f3c365d4 100644 --- a/packages/http-tracker-core/src/statistics/services.rs +++ b/packages/http-tracker-core/src/statistics/services.rs @@ -75,10 +75,10 @@ mod tests { use torrust_tracker_primitives::swarm_metadata::AggregateSwarmMetadata; use torrust_tracker_test_helpers::configuration; + use crate::event::bus::EventBus; use crate::event::sender::Broadcaster; use crate::statistics::describe_metrics; use crate::statistics::event::listener::run_event_listener; - use crate::statistics::event_bus::EventBus; use crate::statistics::repository::Repository; use crate::statistics::services::{get_metrics, TrackerMetrics}; diff --git a/packages/rest-tracker-api-core/src/statistics/services.rs b/packages/rest-tracker-api-core/src/statistics/services.rs index 85af56801..9489a5e3e 100644 --- a/packages/rest-tracker-api-core/src/statistics/services.rs +++ b/packages/rest-tracker-api-core/src/statistics/services.rs @@ -123,9 +123,9 @@ pub async fn get_labeled_metrics( mod tests { use std::sync::Arc; + use bittorrent_http_tracker_core::event::bus::EventBus; use bittorrent_http_tracker_core::event::sender::Broadcaster; use bittorrent_http_tracker_core::statistics::event::listener::run_event_listener; - use bittorrent_http_tracker_core::statistics::event_bus::EventBus; use bittorrent_http_tracker_core::statistics::repository::Repository; use bittorrent_tracker_core::torrent::repository::in_memory::InMemoryTorrentRepository; use bittorrent_tracker_core::{self}; diff --git a/packages/udp-tracker-core/benches/helpers/sync.rs b/packages/udp-tracker-core/benches/helpers/sync.rs index 64eff2b48..1814a865e 100644 --- a/packages/udp-tracker-core/benches/helpers/sync.rs +++ b/packages/udp-tracker-core/benches/helpers/sync.rs @@ -2,9 +2,9 @@ use std::net::{IpAddr, Ipv4Addr, SocketAddr}; use std::sync::Arc; use std::time::{Duration, Instant}; +use bittorrent_udp_tracker_core::event::bus::EventBus; use bittorrent_udp_tracker_core::event::sender::Broadcaster; use bittorrent_udp_tracker_core::services::connect::ConnectService; -use bittorrent_udp_tracker_core::statistics::event_bus::EventBus; use torrust_tracker_primitives::service_binding::{Protocol, ServiceBinding}; use crate::helpers::utils::{sample_ipv4_remote_addr, sample_issue_time}; diff --git a/packages/udp-tracker-core/src/container.rs b/packages/udp-tracker-core/src/container.rs index e229fe1a4..c1dd0461f 100644 --- a/packages/udp-tracker-core/src/container.rs +++ b/packages/udp-tracker-core/src/container.rs @@ -4,12 +4,12 @@ use bittorrent_tracker_core::container::TrackerCoreContainer; use tokio::sync::RwLock; use torrust_tracker_configuration::{Core, UdpTracker}; +use crate::event::bus::EventBus; use crate::event::sender::Broadcaster; use crate::services::announce::AnnounceService; use crate::services::banning::BanService; use crate::services::connect::ConnectService; use crate::services::scrape::ScrapeService; -use crate::statistics::event_bus::EventBus; use crate::statistics::repository::Repository; use crate::{event, services, statistics, MAX_CONNECTION_ID_ERRORS_PER_IP}; @@ -19,7 +19,7 @@ pub struct UdpTrackerCoreContainer { pub tracker_core_container: Arc, // `UdpTrackerCoreServices` - pub event_bus: Arc, + pub event_bus: Arc, pub stats_event_sender: Arc>>, pub stats_repository: Arc, pub ban_service: Arc>, @@ -68,7 +68,7 @@ impl UdpTrackerCoreContainer { } pub struct UdpTrackerCoreServices { - pub event_bus: Arc, + pub event_bus: Arc, pub stats_event_sender: Arc>>, pub stats_repository: Arc, pub ban_service: Arc>, diff --git a/packages/udp-tracker-core/src/statistics/event_bus.rs b/packages/udp-tracker-core/src/event/bus.rs similarity index 100% rename from packages/udp-tracker-core/src/statistics/event_bus.rs rename to packages/udp-tracker-core/src/event/bus.rs diff --git a/packages/udp-tracker-core/src/event/mod.rs b/packages/udp-tracker-core/src/event/mod.rs index 1ec502572..babc05fcc 100644 --- a/packages/udp-tracker-core/src/event/mod.rs +++ b/packages/udp-tracker-core/src/event/mod.rs @@ -1,3 +1,6 @@ +pub mod bus; +pub mod sender; + use std::net::SocketAddr; use bittorrent_primitives::info_hash::InfoHash; @@ -6,8 +9,6 @@ use torrust_tracker_metrics::label_name; use torrust_tracker_primitives::peer::PeerAnnouncement; use torrust_tracker_primitives::service_binding::ServiceBinding; -pub mod sender; - /// A UDP core event. #[derive(Debug, PartialEq, Eq, Clone)] pub enum Event { diff --git a/packages/udp-tracker-core/src/services/connect.rs b/packages/udp-tracker-core/src/services/connect.rs index 81d9219e2..b40af901c 100644 --- a/packages/udp-tracker-core/src/services/connect.rs +++ b/packages/udp-tracker-core/src/services/connect.rs @@ -66,6 +66,7 @@ mod tests { use crate::connection_cookie::make; use crate::event; + use crate::event::bus::EventBus; use crate::event::sender::Broadcaster; use crate::event::{ConnectionContext, Event}; use crate::services::connect::ConnectService; @@ -73,7 +74,6 @@ mod tests { sample_ipv4_remote_addr, sample_ipv4_remote_addr_fingerprint, sample_ipv4_socket_address, sample_ipv6_remote_addr, sample_ipv6_remote_addr_fingerprint, sample_issue_time, MockUdpCoreStatsEventSender, }; - use crate::statistics::event_bus::EventBus; #[tokio::test] async fn a_connect_response_should_contain_the_same_transaction_id_as_the_connect_request() { diff --git a/packages/udp-tracker-core/src/statistics/mod.rs b/packages/udp-tracker-core/src/statistics/mod.rs index f4e6f06a6..9eb85d7f1 100644 --- a/packages/udp-tracker-core/src/statistics/mod.rs +++ b/packages/udp-tracker-core/src/statistics/mod.rs @@ -1,5 +1,4 @@ pub mod event; -pub mod event_bus; pub mod metrics; pub mod repository; pub mod services; diff --git a/packages/udp-tracker-server/src/container.rs b/packages/udp-tracker-server/src/container.rs index debeb0ecf..121737d92 100644 --- a/packages/udp-tracker-server/src/container.rs +++ b/packages/udp-tracker-server/src/container.rs @@ -2,14 +2,14 @@ use std::sync::Arc; use torrust_tracker_configuration::Core; +use crate::event::bus::EventBus; use crate::event::sender::Broadcaster; use crate::event::{self}; use crate::statistics; -use crate::statistics::event_bus::EventBus; use crate::statistics::repository::Repository; pub struct UdpTrackerServerContainer { - pub event_bus: Arc, + pub event_bus: Arc, pub stats_event_sender: Arc>>, pub stats_repository: Arc, } @@ -28,7 +28,7 @@ impl UdpTrackerServerContainer { } pub struct UdpTrackerServerServices { - pub event_bus: Arc, + pub event_bus: Arc, pub stats_event_sender: Arc>>, pub stats_repository: Arc, } diff --git a/packages/udp-tracker-server/src/statistics/event_bus.rs b/packages/udp-tracker-server/src/event/bus.rs similarity index 100% rename from packages/udp-tracker-server/src/statistics/event_bus.rs rename to packages/udp-tracker-server/src/event/bus.rs diff --git a/packages/udp-tracker-server/src/event/mod.rs b/packages/udp-tracker-server/src/event/mod.rs index a1770acc0..a2140a11c 100644 --- a/packages/udp-tracker-server/src/event/mod.rs +++ b/packages/udp-tracker-server/src/event/mod.rs @@ -1,3 +1,6 @@ +pub mod bus; +pub mod sender; + use std::fmt; use std::net::SocketAddr; use std::time::Duration; @@ -6,8 +9,6 @@ use torrust_tracker_metrics::label::{LabelSet, LabelValue}; use torrust_tracker_metrics::label_name; use torrust_tracker_primitives::service_binding::ServiceBinding; -pub mod sender; - /// A UDP server event. #[derive(Debug, PartialEq, Eq, Clone)] pub enum Event { diff --git a/packages/udp-tracker-server/src/handlers/announce.rs b/packages/udp-tracker-server/src/handlers/announce.rs index 452a12d65..f8b2092b5 100644 --- a/packages/udp-tracker-server/src/handlers/announce.rs +++ b/packages/udp-tracker-server/src/handlers/announce.rs @@ -375,10 +375,7 @@ mod tests { core_udp_tracker_services: Arc, ) -> Response { let udp_server_broadcaster = crate::event::sender::Broadcaster::default(); - let event_bus = Arc::new(crate::statistics::event_bus::EventBus::new( - false, - udp_server_broadcaster.clone(), - )); + let event_bus = Arc::new(crate::event::bus::EventBus::new(false, udp_server_broadcaster.clone())); let udp_server_stats_event_sender = event_bus.sender(); @@ -536,9 +533,9 @@ mod tests { use bittorrent_tracker_core::torrent::repository::in_memory::InMemoryTorrentRepository; use bittorrent_tracker_core::whitelist; use bittorrent_udp_tracker_core::connection_cookie::{gen_remote_fingerprint, make}; + use bittorrent_udp_tracker_core::event::bus::EventBus; use bittorrent_udp_tracker_core::event::sender::Broadcaster; use bittorrent_udp_tracker_core::services::announce::AnnounceService; - use bittorrent_udp_tracker_core::statistics::event_bus::EventBus; use mockall::predicate::eq; use torrust_tracker_configuration::Core; use torrust_tracker_primitives::service_binding::{Protocol, ServiceBinding}; @@ -718,10 +715,7 @@ mod tests { let udp_core_stats_event_sender = core_event_bus.sender(); let udp_server_broadcaster = crate::event::sender::Broadcaster::default(); - let server_event_bus = Arc::new(crate::statistics::event_bus::EventBus::new( - false, - udp_server_broadcaster.clone(), - )); + let server_event_bus = Arc::new(crate::event::bus::EventBus::new(false, udp_server_broadcaster.clone())); let udp_server_stats_event_sender = server_event_bus.sender(); diff --git a/packages/udp-tracker-server/src/handlers/connect.rs b/packages/udp-tracker-server/src/handlers/connect.rs index f08084a20..85bfda680 100644 --- a/packages/udp-tracker-server/src/handlers/connect.rs +++ b/packages/udp-tracker-server/src/handlers/connect.rs @@ -59,9 +59,9 @@ mod tests { use aquatic_udp_protocol::{ConnectRequest, ConnectResponse, Response, TransactionId}; use bittorrent_udp_tracker_core::connection_cookie::make; use bittorrent_udp_tracker_core::event as core_event; + use bittorrent_udp_tracker_core::event::bus::EventBus; use bittorrent_udp_tracker_core::event::sender::Broadcaster; use bittorrent_udp_tracker_core::services::connect::ConnectService; - use bittorrent_udp_tracker_core::statistics::event_bus::EventBus; use mockall::predicate::eq; use torrust_tracker_primitives::service_binding::{Protocol, ServiceBinding}; @@ -88,10 +88,7 @@ mod tests { let udp_core_stats_event_sender = core_event_bus.sender(); let udp_server_broadcaster = crate::event::sender::Broadcaster::default(); - let server_event_bus = Arc::new(crate::statistics::event_bus::EventBus::new( - false, - udp_server_broadcaster.clone(), - )); + let server_event_bus = Arc::new(crate::event::bus::EventBus::new(false, udp_server_broadcaster.clone())); let udp_server_stats_event_sender = server_event_bus.sender(); @@ -130,10 +127,7 @@ mod tests { let udp_core_stats_event_sender = core_event_bus.sender(); let udp_server_broadcaster = crate::event::sender::Broadcaster::default(); - let server_event_bus = Arc::new(crate::statistics::event_bus::EventBus::new( - false, - udp_server_broadcaster.clone(), - )); + let server_event_bus = Arc::new(crate::event::bus::EventBus::new(false, udp_server_broadcaster.clone())); let udp_server_stats_event_sender = server_event_bus.sender(); @@ -173,10 +167,7 @@ mod tests { let udp_core_stats_event_sender = core_event_bus.sender(); let udp_server_broadcaster = crate::event::sender::Broadcaster::default(); - let server_event_bus = Arc::new(crate::statistics::event_bus::EventBus::new( - false, - udp_server_broadcaster.clone(), - )); + let server_event_bus = Arc::new(crate::event::bus::EventBus::new(false, udp_server_broadcaster.clone())); let udp_server_stats_event_sender = server_event_bus.sender(); diff --git a/packages/udp-tracker-server/src/handlers/mod.rs b/packages/udp-tracker-server/src/handlers/mod.rs index 43c6c63b7..dde8f0cc8 100644 --- a/packages/udp-tracker-server/src/handlers/mod.rs +++ b/packages/udp-tracker-server/src/handlers/mod.rs @@ -218,10 +218,10 @@ pub(crate) mod tests { use bittorrent_tracker_core::whitelist::authorization::WhitelistAuthorization; use bittorrent_tracker_core::whitelist::repository::in_memory::InMemoryWhitelist; use bittorrent_udp_tracker_core::connection_cookie::gen_remote_fingerprint; + use bittorrent_udp_tracker_core::event::bus::EventBus; use bittorrent_udp_tracker_core::event::sender::Broadcaster; use bittorrent_udp_tracker_core::services::announce::AnnounceService; use bittorrent_udp_tracker_core::services::scrape::ScrapeService; - use bittorrent_udp_tracker_core::statistics::event_bus::EventBus; use bittorrent_udp_tracker_core::{self, event as core_event}; use futures::future::BoxFuture; use mockall::mock; @@ -291,10 +291,7 @@ pub(crate) mod tests { let udp_core_stats_event_sender = core_event_bus.sender(); let udp_server_broadcaster = crate::event::sender::Broadcaster::default(); - let server_event_bus = Arc::new(crate::statistics::event_bus::EventBus::new( - false, - udp_server_broadcaster.clone(), - )); + let server_event_bus = Arc::new(crate::event::bus::EventBus::new(false, udp_server_broadcaster.clone())); let udp_server_stats_event_sender = server_event_bus.sender(); diff --git a/packages/udp-tracker-server/src/handlers/scrape.rs b/packages/udp-tracker-server/src/handlers/scrape.rs index b945913ad..5774bc8e6 100644 --- a/packages/udp-tracker-server/src/handlers/scrape.rs +++ b/packages/udp-tracker-server/src/handlers/scrape.rs @@ -94,13 +94,13 @@ mod tests { use bittorrent_udp_tracker_core::connection_cookie::{gen_remote_fingerprint, make}; use torrust_tracker_primitives::service_binding::{Protocol, ServiceBinding}; + use crate::event::bus::EventBus; use crate::event::sender::Broadcaster; use crate::handlers::handle_scrape; use crate::handlers::tests::{ initialize_core_tracker_services_for_public_tracker, sample_cookie_valid_range, sample_ipv4_remote_addr, sample_issue_time, CoreTrackerServices, CoreUdpTrackerServices, TorrentPeerBuilder, }; - use crate::statistics::event_bus::EventBus; fn zeroed_torrent_statistics() -> TorrentScrapeStatistics { TorrentScrapeStatistics { diff --git a/packages/udp-tracker-server/src/statistics/mod.rs b/packages/udp-tracker-server/src/statistics/mod.rs index 9b6afc889..8f6e9becf 100644 --- a/packages/udp-tracker-server/src/statistics/mod.rs +++ b/packages/udp-tracker-server/src/statistics/mod.rs @@ -1,5 +1,4 @@ pub mod event; -pub mod event_bus; pub mod metrics; pub mod repository; pub mod services; From 36f94df982110c22eb8f8096bc04c92f96c06762 Mon Sep 17 00:00:00 2001 From: Jose Celano Date: Fri, 25 Apr 2025 20:58:03 +0100 Subject: [PATCH 509/802] chore(deps): udpate dependencies ``` cargo update Updating crates.io index Locking 14 packages to latest compatible versions Updating async-compression v0.4.22 -> v0.4.23 Updating brotli v7.0.0 -> v8.0.0 Updating brotli-decompressor v4.0.3 -> v5.0.0 Updating cc v1.2.19 -> v1.2.20 Updating getrandom v0.2.15 -> v0.2.16 Updating libm v0.2.12 -> v0.2.13 Updating libsqlite3-sys v0.32.0 -> v0.33.0 Updating local-ip-address v0.6.3 -> v0.6.4 Updating r2d2_sqlite v0.27.0 -> v0.28.0 Updating rusqlite v0.34.0 -> v0.35.0 Updating tokio-util v0.7.14 -> v0.7.15 Updating winnow v0.7.6 -> v0.7.7 Updating zerocopy v0.8.24 -> v0.8.25 Updating zerocopy-derive v0.8.24 -> v0.8.25 ``` --- Cargo.lock | 68 +++++++++++++++++++++++++++--------------------------- 1 file changed, 34 insertions(+), 34 deletions(-) diff --git a/Cargo.lock b/Cargo.lock index 370562982..8f7b1ef6f 100644 --- a/Cargo.lock +++ b/Cargo.lock @@ -23,7 +23,7 @@ version = "0.7.8" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "891477e0c6a8957309ee5c45a6368af3ae14bb510732d2684ffa19af310920f9" dependencies = [ - "getrandom 0.2.15", + "getrandom 0.2.16", "once_cell", "version_check", ] @@ -217,9 +217,9 @@ dependencies = [ [[package]] name = "async-compression" -version = "0.4.22" +version = "0.4.23" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "59a194f9d963d8099596278594b3107448656ba73831c9d8c783e613ce86da64" +checksum = "b37fc50485c4f3f736a4fb14199f6d5f5ba008d7f28fe710306c92780f004c07" dependencies = [ "brotli", "flate2", @@ -846,9 +846,9 @@ dependencies = [ [[package]] name = "brotli" -version = "7.0.0" +version = "8.0.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "cc97b8f16f944bba54f0433f07e30be199b6dc2bd25937444bbad560bcea29bd" +checksum = "cf19e729cdbd51af9a397fb9ef8ac8378007b797f8273cfbfdf45dcaa316167b" dependencies = [ "alloc-no-stdlib", "alloc-stdlib", @@ -857,9 +857,9 @@ dependencies = [ [[package]] name = "brotli-decompressor" -version = "4.0.3" +version = "5.0.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "a334ef7c9e23abf0ce748e8cd309037da93e606ad52eb372e4ce327a0dcfbdfd" +checksum = "874bb8112abecc98cbd6d81ea4fa7e94fb9449648c93cc89aa40c81c24d7de03" dependencies = [ "alloc-no-stdlib", "alloc-stdlib", @@ -952,9 +952,9 @@ dependencies = [ [[package]] name = "cc" -version = "1.2.19" +version = "1.2.20" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "8e3a13707ac958681c13b39b458c073d0d9bc8a22cb1b2f4c8e55eb72c13f362" +checksum = "04da6a0d40b948dfc4fa8f5bbf402b0fc1a64a28dbf7d12ffd683550f2c1b63a" dependencies = [ "jobserver", "libc", @@ -1809,9 +1809,9 @@ dependencies = [ [[package]] name = "getrandom" -version = "0.2.15" +version = "0.2.16" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "c4567c8db10ae91089c99af84c68c38da3ec2f087c3f82960bcdbf3656b6f4d7" +checksum = "335ff9f135e4384c8150d6f27c6daed433577f86b4750418338c01a1a2528592" dependencies = [ "cfg-if", "libc", @@ -2428,9 +2428,9 @@ dependencies = [ [[package]] name = "libm" -version = "0.2.12" +version = "0.2.13" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "e6d154aedcb0b7a1e91a3fddbe2a8350d3da76ac9d0220ae20da5c7aa8269612" +checksum = "c9627da5196e5d8ed0b0495e61e518847578da83483c37288316d9b2e03a7f72" [[package]] name = "libredox" @@ -2445,9 +2445,9 @@ dependencies = [ [[package]] name = "libsqlite3-sys" -version = "0.32.0" +version = "0.33.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "fbb8270bb4060bd76c6e96f20c52d80620f1d82a3470885694e41e0f81ef6fe7" +checksum = "947e6816f7825b2b45027c2c32e7085da9934defa535de4a6a46b10a4d5257fa" dependencies = [ "cc", "pkg-config", @@ -2485,13 +2485,13 @@ checksum = "23fb14cb19457329c82206317a5663005a4d404783dc74f4252769b0d5f42856" [[package]] name = "local-ip-address" -version = "0.6.3" +version = "0.6.4" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "3669cf5561f8d27e8fc84cc15e58350e70f557d4d65f70e3154e54cd2f8e1782" +checksum = "c986b1747bbd3666abe4d57c64e60e6a82c2216140d8b12d5ceb33feb9de44b3" dependencies = [ "libc", "neli", - "thiserror 1.0.69", + "thiserror 2.0.12", "windows-sys 0.59.0", ] @@ -3133,7 +3133,7 @@ version = "0.2.21" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "85eae3c4ed2f50dcfe72643da4befc30deadb458a9b590d720cde2f2b1e97da9" dependencies = [ - "zerocopy 0.8.24", + "zerocopy 0.8.25", ] [[package]] @@ -3294,9 +3294,9 @@ dependencies = [ [[package]] name = "r2d2_sqlite" -version = "0.27.0" +version = "0.28.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "180da684f0a188977d3968f139eb44260192ef8d9a5b7b7cbd01d881e0353179" +checksum = "8998443b32daee2ad6f528afb19ad77c4a8acc4d8d55b3e5072ed42862fe261a" dependencies = [ "r2d2", "rusqlite", @@ -3356,7 +3356,7 @@ version = "0.6.4" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "ec0be4795e2f6a28069bec0b5ff3e2ac9bafc99e6a9a7dc3547996c5c816922c" dependencies = [ - "getrandom 0.2.15", + "getrandom 0.2.16", ] [[package]] @@ -3502,7 +3502,7 @@ checksum = "a4689e6c2294d81e88dc6261c768b63bc4fcdb852be6d1352498b114f61383b7" dependencies = [ "cc", "cfg-if", - "getrandom 0.2.15", + "getrandom 0.2.16", "libc", "untrusted", "windows-sys 0.52.0", @@ -3580,9 +3580,9 @@ dependencies = [ [[package]] name = "rusqlite" -version = "0.34.0" +version = "0.35.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "37e34486da88d8e051c7c0e23c3f15fd806ea8546260aa2fec247e97242ec143" +checksum = "a22715a5d6deef63c637207afbe68d0c72c3f8d0022d7cf9714c442d6157606b" dependencies = [ "bitflags 2.9.0", "fallible-iterator", @@ -4459,9 +4459,9 @@ dependencies = [ [[package]] name = "tokio-util" -version = "0.7.14" +version = "0.7.15" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "6b9590b93e6fcc1739458317cccd391ad3955e2bde8913edf6f95f9e65a8f034" +checksum = "66a539a9ad6d5d281510d5bd368c973d636c02dbf8a67300bfb6b950696ad7df" dependencies = [ "bytes", "futures-core", @@ -5564,9 +5564,9 @@ checksum = "271414315aff87387382ec3d271b52d7ae78726f5d44ac98b4f4030c91880486" [[package]] name = "winnow" -version = "0.7.6" +version = "0.7.7" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "63d3fcd9bba44b03821e7d699eeee959f3126dcc4aa8e4ae18ec617c2a5cea10" +checksum = "6cb8234a863ea0e8cd7284fcdd4f145233eb00fee02bbdd9861aec44e6477bc5" dependencies = [ "memchr", ] @@ -5653,11 +5653,11 @@ dependencies = [ [[package]] name = "zerocopy" -version = "0.8.24" +version = "0.8.25" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "2586fea28e186957ef732a5f8b3be2da217d65c5969d4b1e17f973ebbe876879" +checksum = "a1702d9583232ddb9174e01bb7c15a2ab8fb1bc6f227aa1233858c351a3ba0cb" dependencies = [ - "zerocopy-derive 0.8.24", + "zerocopy-derive 0.8.25", ] [[package]] @@ -5673,9 +5673,9 @@ dependencies = [ [[package]] name = "zerocopy-derive" -version = "0.8.24" +version = "0.8.25" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "a996a8f63c5c4448cd959ac1bab0aaa3306ccfd060472f85943ee0750f0169be" +checksum = "28a6e20d751156648aa063f3800b706ee209a32c0b4d9f24be3d980b01be55ef" dependencies = [ "proc-macro2", "quote", From 05bfd65b99396fd2bc1cb3c7b7cf538e72d1b2f2 Mon Sep 17 00:00:00 2001 From: Jose Celano Date: Mon, 28 Apr 2025 10:30:37 +0100 Subject: [PATCH 510/802] fet: [#1480] new events pacakge It will contain shared logic for handling events in other packages. --- .github/workflows/deployment.yaml | 1 + Cargo.lock | 9 + Cargo.toml | 2 +- packages/events/.gitignore | 1 + packages/events/Cargo.toml | 22 + packages/events/LICENSE | 661 ++++++++++++++++++++++++++++++ packages/events/README.md | 11 + packages/events/src/lib.rs | 2 + 8 files changed, 708 insertions(+), 1 deletion(-) create mode 100644 packages/events/.gitignore create mode 100644 packages/events/Cargo.toml create mode 100644 packages/events/LICENSE create mode 100644 packages/events/README.md create mode 100644 packages/events/src/lib.rs diff --git a/.github/workflows/deployment.yaml b/.github/workflows/deployment.yaml index 983817273..2ef298eab 100644 --- a/.github/workflows/deployment.yaml +++ b/.github/workflows/deployment.yaml @@ -73,6 +73,7 @@ jobs: cargo publish -p torrust-tracker-clock cargo publish -p torrust-tracker-configuration cargo publish -p torrust-tracker-contrib-bencode + cargo publish -p torrust-tracker-events cargo publish -p torrust-tracker-located-error cargo publish -p torrust-tracker-metrics cargo publish -p torrust-tracker-primitives diff --git a/Cargo.lock b/Cargo.lock index 8f7b1ef6f..e00040f18 100644 --- a/Cargo.lock +++ b/Cargo.lock @@ -4768,6 +4768,15 @@ dependencies = [ "thiserror 2.0.12", ] +[[package]] +name = "torrust-tracker-events" +version = "3.0.0-develop" +dependencies = [ + "futures", + "mockall", + "tokio", +] + [[package]] name = "torrust-tracker-located-error" version = "3.0.0-develop" diff --git a/Cargo.toml b/Cargo.toml index 9243ed483..9b348bfdc 100644 --- a/Cargo.toml +++ b/Cargo.toml @@ -69,7 +69,7 @@ torrust-rest-tracker-api-client = { version = "3.0.0-develop", path = "packages/ torrust-tracker-test-helpers = { version = "3.0.0-develop", path = "packages/test-helpers" } [workspace] -members = ["console/tracker-client"] +members = ["console/tracker-client", "packages/events"] [profile.dev] debug = 1 diff --git a/packages/events/.gitignore b/packages/events/.gitignore new file mode 100644 index 000000000..0b1372e5c --- /dev/null +++ b/packages/events/.gitignore @@ -0,0 +1 @@ +./.coverage diff --git a/packages/events/Cargo.toml b/packages/events/Cargo.toml new file mode 100644 index 000000000..86d6e38f4 --- /dev/null +++ b/packages/events/Cargo.toml @@ -0,0 +1,22 @@ +[package] +description = "A library with functionality to handle events in Torrust tracker packages." +keywords = ["events", "library", "rust", "torrust", "tracker"] +name = "torrust-tracker-events" +readme = "README.md" + +authors.workspace = true +documentation.workspace = true +edition.workspace = true +homepage.workspace = true +license.workspace = true +publish.workspace = true +repository.workspace = true +rust-version.workspace = true +version.workspace = true + +[dependencies] +futures = "0" +tokio = { version = "1", features = ["macros", "net", "rt-multi-thread", "signal", "sync"] } + +[dev-dependencies] +mockall = "0" diff --git a/packages/events/LICENSE b/packages/events/LICENSE new file mode 100644 index 000000000..0ad25db4b --- /dev/null +++ b/packages/events/LICENSE @@ -0,0 +1,661 @@ + GNU AFFERO GENERAL PUBLIC LICENSE + Version 3, 19 November 2007 + + Copyright (C) 2007 Free Software Foundation, Inc. + Everyone is permitted to copy and distribute verbatim copies + of this license document, but changing it is not allowed. + + Preamble + + The GNU Affero General Public License is a free, copyleft license for +software and other kinds of works, specifically designed to ensure +cooperation with the community in the case of network server software. + + The licenses for most software and other practical works are designed +to take away your freedom to share and change the works. By contrast, +our General Public Licenses are intended to guarantee your freedom to +share and change all versions of a program--to make sure it remains free +software for all its users. + + When we speak of free software, we are referring to freedom, not +price. Our General Public Licenses are designed to make sure that you +have the freedom to distribute copies of free software (and charge for +them if you wish), that you receive source code or can get it if you +want it, that you can change the software or use pieces of it in new +free programs, and that you know you can do these things. + + Developers that use our General Public Licenses protect your rights +with two steps: (1) assert copyright on the software, and (2) offer +you this License which gives you legal permission to copy, distribute +and/or modify the software. + + A secondary benefit of defending all users' freedom is that +improvements made in alternate versions of the program, if they +receive widespread use, become available for other developers to +incorporate. Many developers of free software are heartened and +encouraged by the resulting cooperation. However, in the case of +software used on network servers, this result may fail to come about. +The GNU General Public License permits making a modified version and +letting the public access it on a server without ever releasing its +source code to the public. + + The GNU Affero General Public License is designed specifically to +ensure that, in such cases, the modified source code becomes available +to the community. It requires the operator of a network server to +provide the source code of the modified version running there to the +users of that server. Therefore, public use of a modified version, on +a publicly accessible server, gives the public access to the source +code of the modified version. + + An older license, called the Affero General Public License and +published by Affero, was designed to accomplish similar goals. This is +a different license, not a version of the Affero GPL, but Affero has +released a new version of the Affero GPL which permits relicensing under +this license. + + The precise terms and conditions for copying, distribution and +modification follow. + + TERMS AND CONDITIONS + + 0. Definitions. + + "This License" refers to version 3 of the GNU Affero General Public License. + + "Copyright" also means copyright-like laws that apply to other kinds of +works, such as semiconductor masks. + + "The Program" refers to any copyrightable work licensed under this +License. Each licensee is addressed as "you". "Licensees" and +"recipients" may be individuals or organizations. + + To "modify" a work means to copy from or adapt all or part of the work +in a fashion requiring copyright permission, other than the making of an +exact copy. The resulting work is called a "modified version" of the +earlier work or a work "based on" the earlier work. + + A "covered work" means either the unmodified Program or a work based +on the Program. + + To "propagate" a work means to do anything with it that, without +permission, would make you directly or secondarily liable for +infringement under applicable copyright law, except executing it on a +computer or modifying a private copy. Propagation includes copying, +distribution (with or without modification), making available to the +public, and in some countries other activities as well. + + To "convey" a work means any kind of propagation that enables other +parties to make or receive copies. Mere interaction with a user through +a computer network, with no transfer of a copy, is not conveying. + + An interactive user interface displays "Appropriate Legal Notices" +to the extent that it includes a convenient and prominently visible +feature that (1) displays an appropriate copyright notice, and (2) +tells the user that there is no warranty for the work (except to the +extent that warranties are provided), that licensees may convey the +work under this License, and how to view a copy of this License. If +the interface presents a list of user commands or options, such as a +menu, a prominent item in the list meets this criterion. + + 1. Source Code. + + The "source code" for a work means the preferred form of the work +for making modifications to it. "Object code" means any non-source +form of a work. + + A "Standard Interface" means an interface that either is an official +standard defined by a recognized standards body, or, in the case of +interfaces specified for a particular programming language, one that +is widely used among developers working in that language. + + The "System Libraries" of an executable work include anything, other +than the work as a whole, that (a) is included in the normal form of +packaging a Major Component, but which is not part of that Major +Component, and (b) serves only to enable use of the work with that +Major Component, or to implement a Standard Interface for which an +implementation is available to the public in source code form. A +"Major Component", in this context, means a major essential component +(kernel, window system, and so on) of the specific operating system +(if any) on which the executable work runs, or a compiler used to +produce the work, or an object code interpreter used to run it. + + The "Corresponding Source" for a work in object code form means all +the source code needed to generate, install, and (for an executable +work) run the object code and to modify the work, including scripts to +control those activities. However, it does not include the work's +System Libraries, or general-purpose tools or generally available free +programs which are used unmodified in performing those activities but +which are not part of the work. For example, Corresponding Source +includes interface definition files associated with source files for +the work, and the source code for shared libraries and dynamically +linked subprograms that the work is specifically designed to require, +such as by intimate data communication or control flow between those +subprograms and other parts of the work. + + The Corresponding Source need not include anything that users +can regenerate automatically from other parts of the Corresponding +Source. + + The Corresponding Source for a work in source code form is that +same work. + + 2. Basic Permissions. + + All rights granted under this License are granted for the term of +copyright on the Program, and are irrevocable provided the stated +conditions are met. This License explicitly affirms your unlimited +permission to run the unmodified Program. The output from running a +covered work is covered by this License only if the output, given its +content, constitutes a covered work. This License acknowledges your +rights of fair use or other equivalent, as provided by copyright law. + + You may make, run and propagate covered works that you do not +convey, without conditions so long as your license otherwise remains +in force. You may convey covered works to others for the sole purpose +of having them make modifications exclusively for you, or provide you +with facilities for running those works, provided that you comply with +the terms of this License in conveying all material for which you do +not control copyright. Those thus making or running the covered works +for you must do so exclusively on your behalf, under your direction +and control, on terms that prohibit them from making any copies of +your copyrighted material outside their relationship with you. + + Conveying under any other circumstances is permitted solely under +the conditions stated below. Sublicensing is not allowed; section 10 +makes it unnecessary. + + 3. Protecting Users' Legal Rights From Anti-Circumvention Law. + + No covered work shall be deemed part of an effective technological +measure under any applicable law fulfilling obligations under article +11 of the WIPO copyright treaty adopted on 20 December 1996, or +similar laws prohibiting or restricting circumvention of such +measures. + + When you convey a covered work, you waive any legal power to forbid +circumvention of technological measures to the extent such circumvention +is effected by exercising rights under this License with respect to +the covered work, and you disclaim any intention to limit operation or +modification of the work as a means of enforcing, against the work's +users, your or third parties' legal rights to forbid circumvention of +technological measures. + + 4. Conveying Verbatim Copies. + + You may convey verbatim copies of the Program's source code as you +receive it, in any medium, provided that you conspicuously and +appropriately publish on each copy an appropriate copyright notice; +keep intact all notices stating that this License and any +non-permissive terms added in accord with section 7 apply to the code; +keep intact all notices of the absence of any warranty; and give all +recipients a copy of this License along with the Program. + + You may charge any price or no price for each copy that you convey, +and you may offer support or warranty protection for a fee. + + 5. Conveying Modified Source Versions. + + You may convey a work based on the Program, or the modifications to +produce it from the Program, in the form of source code under the +terms of section 4, provided that you also meet all of these conditions: + + a) The work must carry prominent notices stating that you modified + it, and giving a relevant date. + + b) The work must carry prominent notices stating that it is + released under this License and any conditions added under section + 7. This requirement modifies the requirement in section 4 to + "keep intact all notices". + + c) You must license the entire work, as a whole, under this + License to anyone who comes into possession of a copy. This + License will therefore apply, along with any applicable section 7 + additional terms, to the whole of the work, and all its parts, + regardless of how they are packaged. This License gives no + permission to license the work in any other way, but it does not + invalidate such permission if you have separately received it. + + d) If the work has interactive user interfaces, each must display + Appropriate Legal Notices; however, if the Program has interactive + interfaces that do not display Appropriate Legal Notices, your + work need not make them do so. + + A compilation of a covered work with other separate and independent +works, which are not by their nature extensions of the covered work, +and which are not combined with it such as to form a larger program, +in or on a volume of a storage or distribution medium, is called an +"aggregate" if the compilation and its resulting copyright are not +used to limit the access or legal rights of the compilation's users +beyond what the individual works permit. Inclusion of a covered work +in an aggregate does not cause this License to apply to the other +parts of the aggregate. + + 6. Conveying Non-Source Forms. + + You may convey a covered work in object code form under the terms +of sections 4 and 5, provided that you also convey the +machine-readable Corresponding Source under the terms of this License, +in one of these ways: + + a) Convey the object code in, or embodied in, a physical product + (including a physical distribution medium), accompanied by the + Corresponding Source fixed on a durable physical medium + customarily used for software interchange. + + b) Convey the object code in, or embodied in, a physical product + (including a physical distribution medium), accompanied by a + written offer, valid for at least three years and valid for as + long as you offer spare parts or customer support for that product + model, to give anyone who possesses the object code either (1) a + copy of the Corresponding Source for all the software in the + product that is covered by this License, on a durable physical + medium customarily used for software interchange, for a price no + more than your reasonable cost of physically performing this + conveying of source, or (2) access to copy the + Corresponding Source from a network server at no charge. + + c) Convey individual copies of the object code with a copy of the + written offer to provide the Corresponding Source. This + alternative is allowed only occasionally and noncommercially, and + only if you received the object code with such an offer, in accord + with subsection 6b. + + d) Convey the object code by offering access from a designated + place (gratis or for a charge), and offer equivalent access to the + Corresponding Source in the same way through the same place at no + further charge. You need not require recipients to copy the + Corresponding Source along with the object code. If the place to + copy the object code is a network server, the Corresponding Source + may be on a different server (operated by you or a third party) + that supports equivalent copying facilities, provided you maintain + clear directions next to the object code saying where to find the + Corresponding Source. Regardless of what server hosts the + Corresponding Source, you remain obligated to ensure that it is + available for as long as needed to satisfy these requirements. + + e) Convey the object code using peer-to-peer transmission, provided + you inform other peers where the object code and Corresponding + Source of the work are being offered to the general public at no + charge under subsection 6d. + + A separable portion of the object code, whose source code is excluded +from the Corresponding Source as a System Library, need not be +included in conveying the object code work. + + A "User Product" is either (1) a "consumer product", which means any +tangible personal property which is normally used for personal, family, +or household purposes, or (2) anything designed or sold for incorporation +into a dwelling. In determining whether a product is a consumer product, +doubtful cases shall be resolved in favor of coverage. For a particular +product received by a particular user, "normally used" refers to a +typical or common use of that class of product, regardless of the status +of the particular user or of the way in which the particular user +actually uses, or expects or is expected to use, the product. A product +is a consumer product regardless of whether the product has substantial +commercial, industrial or non-consumer uses, unless such uses represent +the only significant mode of use of the product. + + "Installation Information" for a User Product means any methods, +procedures, authorization keys, or other information required to install +and execute modified versions of a covered work in that User Product from +a modified version of its Corresponding Source. The information must +suffice to ensure that the continued functioning of the modified object +code is in no case prevented or interfered with solely because +modification has been made. + + If you convey an object code work under this section in, or with, or +specifically for use in, a User Product, and the conveying occurs as +part of a transaction in which the right of possession and use of the +User Product is transferred to the recipient in perpetuity or for a +fixed term (regardless of how the transaction is characterized), the +Corresponding Source conveyed under this section must be accompanied +by the Installation Information. But this requirement does not apply +if neither you nor any third party retains the ability to install +modified object code on the User Product (for example, the work has +been installed in ROM). + + The requirement to provide Installation Information does not include a +requirement to continue to provide support service, warranty, or updates +for a work that has been modified or installed by the recipient, or for +the User Product in which it has been modified or installed. Access to a +network may be denied when the modification itself materially and +adversely affects the operation of the network or violates the rules and +protocols for communication across the network. + + Corresponding Source conveyed, and Installation Information provided, +in accord with this section must be in a format that is publicly +documented (and with an implementation available to the public in +source code form), and must require no special password or key for +unpacking, reading or copying. + + 7. Additional Terms. + + "Additional permissions" are terms that supplement the terms of this +License by making exceptions from one or more of its conditions. +Additional permissions that are applicable to the entire Program shall +be treated as though they were included in this License, to the extent +that they are valid under applicable law. If additional permissions +apply only to part of the Program, that part may be used separately +under those permissions, but the entire Program remains governed by +this License without regard to the additional permissions. + + When you convey a copy of a covered work, you may at your option +remove any additional permissions from that copy, or from any part of +it. (Additional permissions may be written to require their own +removal in certain cases when you modify the work.) You may place +additional permissions on material, added by you to a covered work, +for which you have or can give appropriate copyright permission. + + Notwithstanding any other provision of this License, for material you +add to a covered work, you may (if authorized by the copyright holders of +that material) supplement the terms of this License with terms: + + a) Disclaiming warranty or limiting liability differently from the + terms of sections 15 and 16 of this License; or + + b) Requiring preservation of specified reasonable legal notices or + author attributions in that material or in the Appropriate Legal + Notices displayed by works containing it; or + + c) Prohibiting misrepresentation of the origin of that material, or + requiring that modified versions of such material be marked in + reasonable ways as different from the original version; or + + d) Limiting the use for publicity purposes of names of licensors or + authors of the material; or + + e) Declining to grant rights under trademark law for use of some + trade names, trademarks, or service marks; or + + f) Requiring indemnification of licensors and authors of that + material by anyone who conveys the material (or modified versions of + it) with contractual assumptions of liability to the recipient, for + any liability that these contractual assumptions directly impose on + those licensors and authors. + + All other non-permissive additional terms are considered "further +restrictions" within the meaning of section 10. If the Program as you +received it, or any part of it, contains a notice stating that it is +governed by this License along with a term that is a further +restriction, you may remove that term. If a license document contains +a further restriction but permits relicensing or conveying under this +License, you may add to a covered work material governed by the terms +of that license document, provided that the further restriction does +not survive such relicensing or conveying. + + If you add terms to a covered work in accord with this section, you +must place, in the relevant source files, a statement of the +additional terms that apply to those files, or a notice indicating +where to find the applicable terms. + + Additional terms, permissive or non-permissive, may be stated in the +form of a separately written license, or stated as exceptions; +the above requirements apply either way. + + 8. Termination. + + You may not propagate or modify a covered work except as expressly +provided under this License. Any attempt otherwise to propagate or +modify it is void, and will automatically terminate your rights under +this License (including any patent licenses granted under the third +paragraph of section 11). + + However, if you cease all violation of this License, then your +license from a particular copyright holder is reinstated (a) +provisionally, unless and until the copyright holder explicitly and +finally terminates your license, and (b) permanently, if the copyright +holder fails to notify you of the violation by some reasonable means +prior to 60 days after the cessation. + + Moreover, your license from a particular copyright holder is +reinstated permanently if the copyright holder notifies you of the +violation by some reasonable means, this is the first time you have +received notice of violation of this License (for any work) from that +copyright holder, and you cure the violation prior to 30 days after +your receipt of the notice. + + Termination of your rights under this section does not terminate the +licenses of parties who have received copies or rights from you under +this License. If your rights have been terminated and not permanently +reinstated, you do not qualify to receive new licenses for the same +material under section 10. + + 9. Acceptance Not Required for Having Copies. + + You are not required to accept this License in order to receive or +run a copy of the Program. Ancillary propagation of a covered work +occurring solely as a consequence of using peer-to-peer transmission +to receive a copy likewise does not require acceptance. However, +nothing other than this License grants you permission to propagate or +modify any covered work. These actions infringe copyright if you do +not accept this License. Therefore, by modifying or propagating a +covered work, you indicate your acceptance of this License to do so. + + 10. Automatic Licensing of Downstream Recipients. + + Each time you convey a covered work, the recipient automatically +receives a license from the original licensors, to run, modify and +propagate that work, subject to this License. You are not responsible +for enforcing compliance by third parties with this License. + + An "entity transaction" is a transaction transferring control of an +organization, or substantially all assets of one, or subdividing an +organization, or merging organizations. If propagation of a covered +work results from an entity transaction, each party to that +transaction who receives a copy of the work also receives whatever +licenses to the work the party's predecessor in interest had or could +give under the previous paragraph, plus a right to possession of the +Corresponding Source of the work from the predecessor in interest, if +the predecessor has it or can get it with reasonable efforts. + + You may not impose any further restrictions on the exercise of the +rights granted or affirmed under this License. For example, you may +not impose a license fee, royalty, or other charge for exercise of +rights granted under this License, and you may not initiate litigation +(including a cross-claim or counterclaim in a lawsuit) alleging that +any patent claim is infringed by making, using, selling, offering for +sale, or importing the Program or any portion of it. + + 11. Patents. + + A "contributor" is a copyright holder who authorizes use under this +License of the Program or a work on which the Program is based. The +work thus licensed is called the contributor's "contributor version". + + A contributor's "essential patent claims" are all patent claims +owned or controlled by the contributor, whether already acquired or +hereafter acquired, that would be infringed by some manner, permitted +by this License, of making, using, or selling its contributor version, +but do not include claims that would be infringed only as a +consequence of further modification of the contributor version. For +purposes of this definition, "control" includes the right to grant +patent sublicenses in a manner consistent with the requirements of +this License. + + Each contributor grants you a non-exclusive, worldwide, royalty-free +patent license under the contributor's essential patent claims, to +make, use, sell, offer for sale, import and otherwise run, modify and +propagate the contents of its contributor version. + + In the following three paragraphs, a "patent license" is any express +agreement or commitment, however denominated, not to enforce a patent +(such as an express permission to practice a patent or covenant not to +sue for patent infringement). To "grant" such a patent license to a +party means to make such an agreement or commitment not to enforce a +patent against the party. + + If you convey a covered work, knowingly relying on a patent license, +and the Corresponding Source of the work is not available for anyone +to copy, free of charge and under the terms of this License, through a +publicly available network server or other readily accessible means, +then you must either (1) cause the Corresponding Source to be so +available, or (2) arrange to deprive yourself of the benefit of the +patent license for this particular work, or (3) arrange, in a manner +consistent with the requirements of this License, to extend the patent +license to downstream recipients. "Knowingly relying" means you have +actual knowledge that, but for the patent license, your conveying the +covered work in a country, or your recipient's use of the covered work +in a country, would infringe one or more identifiable patents in that +country that you have reason to believe are valid. + + If, pursuant to or in connection with a single transaction or +arrangement, you convey, or propagate by procuring conveyance of, a +covered work, and grant a patent license to some of the parties +receiving the covered work authorizing them to use, propagate, modify +or convey a specific copy of the covered work, then the patent license +you grant is automatically extended to all recipients of the covered +work and works based on it. + + A patent license is "discriminatory" if it does not include within +the scope of its coverage, prohibits the exercise of, or is +conditioned on the non-exercise of one or more of the rights that are +specifically granted under this License. You may not convey a covered +work if you are a party to an arrangement with a third party that is +in the business of distributing software, under which you make payment +to the third party based on the extent of your activity of conveying +the work, and under which the third party grants, to any of the +parties who would receive the covered work from you, a discriminatory +patent license (a) in connection with copies of the covered work +conveyed by you (or copies made from those copies), or (b) primarily +for and in connection with specific products or compilations that +contain the covered work, unless you entered into that arrangement, +or that patent license was granted, prior to 28 March 2007. + + Nothing in this License shall be construed as excluding or limiting +any implied license or other defenses to infringement that may +otherwise be available to you under applicable patent law. + + 12. No Surrender of Others' Freedom. + + If conditions are imposed on you (whether by court order, agreement or +otherwise) that contradict the conditions of this License, they do not +excuse you from the conditions of this License. If you cannot convey a +covered work so as to satisfy simultaneously your obligations under this +License and any other pertinent obligations, then as a consequence you may +not convey it at all. For example, if you agree to terms that obligate you +to collect a royalty for further conveying from those to whom you convey +the Program, the only way you could satisfy both those terms and this +License would be to refrain entirely from conveying the Program. + + 13. Remote Network Interaction; Use with the GNU General Public License. + + Notwithstanding any other provision of this License, if you modify the +Program, your modified version must prominently offer all users +interacting with it remotely through a computer network (if your version +supports such interaction) an opportunity to receive the Corresponding +Source of your version by providing access to the Corresponding Source +from a network server at no charge, through some standard or customary +means of facilitating copying of software. This Corresponding Source +shall include the Corresponding Source for any work covered by version 3 +of the GNU General Public License that is incorporated pursuant to the +following paragraph. + + Notwithstanding any other provision of this License, you have +permission to link or combine any covered work with a work licensed +under version 3 of the GNU General Public License into a single +combined work, and to convey the resulting work. The terms of this +License will continue to apply to the part which is the covered work, +but the work with which it is combined will remain governed by version +3 of the GNU General Public License. + + 14. Revised Versions of this License. + + The Free Software Foundation may publish revised and/or new versions of +the GNU Affero General Public License from time to time. Such new versions +will be similar in spirit to the present version, but may differ in detail to +address new problems or concerns. + + Each version is given a distinguishing version number. If the +Program specifies that a certain numbered version of the GNU Affero General +Public License "or any later version" applies to it, you have the +option of following the terms and conditions either of that numbered +version or of any later version published by the Free Software +Foundation. If the Program does not specify a version number of the +GNU Affero General Public License, you may choose any version ever published +by the Free Software Foundation. + + If the Program specifies that a proxy can decide which future +versions of the GNU Affero General Public License can be used, that proxy's +public statement of acceptance of a version permanently authorizes you +to choose that version for the Program. + + Later license versions may give you additional or different +permissions. However, no additional obligations are imposed on any +author or copyright holder as a result of your choosing to follow a +later version. + + 15. Disclaimer of Warranty. + + THERE IS NO WARRANTY FOR THE PROGRAM, TO THE EXTENT PERMITTED BY +APPLICABLE LAW. EXCEPT WHEN OTHERWISE STATED IN WRITING THE COPYRIGHT +HOLDERS AND/OR OTHER PARTIES PROVIDE THE PROGRAM "AS IS" WITHOUT WARRANTY +OF ANY KIND, EITHER EXPRESSED OR IMPLIED, INCLUDING, BUT NOT LIMITED TO, +THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR +PURPOSE. THE ENTIRE RISK AS TO THE QUALITY AND PERFORMANCE OF THE PROGRAM +IS WITH YOU. SHOULD THE PROGRAM PROVE DEFECTIVE, YOU ASSUME THE COST OF +ALL NECESSARY SERVICING, REPAIR OR CORRECTION. + + 16. Limitation of Liability. + + IN NO EVENT UNLESS REQUIRED BY APPLICABLE LAW OR AGREED TO IN WRITING +WILL ANY COPYRIGHT HOLDER, OR ANY OTHER PARTY WHO MODIFIES AND/OR CONVEYS +THE PROGRAM AS PERMITTED ABOVE, BE LIABLE TO YOU FOR DAMAGES, INCLUDING ANY +GENERAL, SPECIAL, INCIDENTAL OR CONSEQUENTIAL DAMAGES ARISING OUT OF THE +USE OR INABILITY TO USE THE PROGRAM (INCLUDING BUT NOT LIMITED TO LOSS OF +DATA OR DATA BEING RENDERED INACCURATE OR LOSSES SUSTAINED BY YOU OR THIRD +PARTIES OR A FAILURE OF THE PROGRAM TO OPERATE WITH ANY OTHER PROGRAMS), +EVEN IF SUCH HOLDER OR OTHER PARTY HAS BEEN ADVISED OF THE POSSIBILITY OF +SUCH DAMAGES. + + 17. Interpretation of Sections 15 and 16. + + If the disclaimer of warranty and limitation of liability provided +above cannot be given local legal effect according to their terms, +reviewing courts shall apply local law that most closely approximates +an absolute waiver of all civil liability in connection with the +Program, unless a warranty or assumption of liability accompanies a +copy of the Program in return for a fee. + + END OF TERMS AND CONDITIONS + + How to Apply These Terms to Your New Programs + + If you develop a new program, and you want it to be of the greatest +possible use to the public, the best way to achieve this is to make it +free software which everyone can redistribute and change under these terms. + + To do so, attach the following notices to the program. It is safest +to attach them to the start of each source file to most effectively +state the exclusion of warranty; and each file should have at least +the "copyright" line and a pointer to where the full notice is found. + + + Copyright (C) + + This program is free software: you can redistribute it and/or modify + it under the terms of the GNU Affero General Public License as published + by the Free Software Foundation, either version 3 of the License, or + (at your option) any later version. + + This program is distributed in the hope that it will be useful, + but WITHOUT ANY WARRANTY; without even the implied warranty of + MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + GNU Affero General Public License for more details. + + You should have received a copy of the GNU Affero General Public License + along with this program. If not, see . + +Also add information on how to contact you by electronic and paper mail. + + If your software can interact with users remotely through a computer +network, you should also make sure that it provides a way for users to +get its source. For example, if your program is a web application, its +interface could display a "Source" link that leads users to an archive +of the code. There are many ways you could offer source, and different +solutions will be better for different programs; see section 13 for the +specific requirements. + + You should also get your employer (if you work as a programmer) or school, +if any, to sign a "copyright disclaimer" for the program, if necessary. +For more information on this, and how to apply and follow the GNU AGPL, see +. diff --git a/packages/events/README.md b/packages/events/README.md new file mode 100644 index 000000000..42a5a2f61 --- /dev/null +++ b/packages/events/README.md @@ -0,0 +1,11 @@ +# Torrust Tracker Events + +A library with functionality to handle events in [Torrust Tracker](https://github.com/torrust/torrust-tracker) packages. + +## Documentation + +[Crate documentation](https://docs.rs/torrust-tracker-events). + +## License + +The project is licensed under the terms of the [GNU AFFERO GENERAL PUBLIC LICENSE](./LICENSE). diff --git a/packages/events/src/lib.rs b/packages/events/src/lib.rs new file mode 100644 index 000000000..7d59598c3 --- /dev/null +++ b/packages/events/src/lib.rs @@ -0,0 +1,2 @@ +/// Target for tracing crate logs. +pub const EVENTS_TARGET: &str = "EVENTS"; From ff9d1f0b19610af58d2b7f6447873b1c60038687 Mon Sep 17 00:00:00 2001 From: Jose Celano Date: Mon, 28 Apr 2025 11:06:37 +0100 Subject: [PATCH 511/802] feat: [#1480] add event sender trait --- packages/events/src/lib.rs | 2 ++ packages/events/src/sender.rs | 15 +++++++++++++++ 2 files changed, 17 insertions(+) create mode 100644 packages/events/src/sender.rs diff --git a/packages/events/src/lib.rs b/packages/events/src/lib.rs index 7d59598c3..8d7ba2f39 100644 --- a/packages/events/src/lib.rs +++ b/packages/events/src/lib.rs @@ -1,2 +1,4 @@ +pub mod sender; + /// Target for tracing crate logs. pub const EVENTS_TARGET: &str = "EVENTS"; diff --git a/packages/events/src/sender.rs b/packages/events/src/sender.rs new file mode 100644 index 000000000..fe8c6575e --- /dev/null +++ b/packages/events/src/sender.rs @@ -0,0 +1,15 @@ +use futures::future::BoxFuture; +#[cfg(test)] +use mockall::{automock, predicate::str}; +use tokio::sync::broadcast::error::SendError; + +/// Target for tracing crate logs. +pub const EVENTS_TARGET: &str = "EVENTS"; + +/// A trait for sending events. +#[cfg_attr(test, automock(type Event=();))] +pub trait Sender: Sync + Send { + type Event: Send + Clone; + + fn send_event(&self, event: Self::Event) -> BoxFuture<'_, Option>>>; +} From 5ae485d368576db2a29cfd5396160b9e3354bdb0 Mon Sep 17 00:00:00 2001 From: Jose Celano Date: Mon, 28 Apr 2025 11:08:12 +0100 Subject: [PATCH 512/802] feat: [#1480] add event sender based on tokio broadcast channel --- packages/events/src/broadcaster.rs | 36 ++++++++++++++++++++++++++++++ packages/events/src/lib.rs | 1 + 2 files changed, 37 insertions(+) create mode 100644 packages/events/src/broadcaster.rs diff --git a/packages/events/src/broadcaster.rs b/packages/events/src/broadcaster.rs new file mode 100644 index 000000000..8f947cc67 --- /dev/null +++ b/packages/events/src/broadcaster.rs @@ -0,0 +1,36 @@ +use futures::future::BoxFuture; +use futures::FutureExt; +use tokio::sync::broadcast::error::SendError; +use tokio::sync::broadcast::{self}; + +use crate::sender::Sender; + +const CHANNEL_CAPACITY: usize = 32768; + +/// An event sender implementation using a broadcast channel. +#[derive(Clone)] +pub struct Broadcaster { + pub(crate) sender: broadcast::Sender, +} + +impl Sender for Broadcaster { + type Event = E; + + fn send_event(&self, event: E) -> BoxFuture<'_, Option>>> { + async move { Some(self.sender.send(event)) }.boxed() + } +} + +impl Default for Broadcaster { + fn default() -> Self { + let (sender, _) = broadcast::channel(CHANNEL_CAPACITY); + Self { sender } + } +} + +impl Broadcaster { + #[must_use] + pub fn subscribe(&self) -> broadcast::Receiver { + self.sender.subscribe() + } +} diff --git a/packages/events/src/lib.rs b/packages/events/src/lib.rs index 8d7ba2f39..154d78f63 100644 --- a/packages/events/src/lib.rs +++ b/packages/events/src/lib.rs @@ -1,3 +1,4 @@ +pub mod broadcaster; pub mod sender; /// Target for tracing crate logs. From 934d45e40a71dfc5e3df038a89e242fbf758abd3 Mon Sep 17 00:00:00 2001 From: Jose Celano Date: Mon, 28 Apr 2025 11:37:44 +0100 Subject: [PATCH 513/802] feat: [#1480] add generic EventBus to events package --- packages/events/src/bus.rs | 44 ++++++++++++++++++++++++++++++++++++++ packages/events/src/lib.rs | 1 + 2 files changed, 45 insertions(+) create mode 100644 packages/events/src/bus.rs diff --git a/packages/events/src/bus.rs b/packages/events/src/bus.rs new file mode 100644 index 000000000..d58c8f76d --- /dev/null +++ b/packages/events/src/bus.rs @@ -0,0 +1,44 @@ +use std::sync::Arc; + +use tokio::sync::broadcast::Receiver; + +use crate::broadcaster::Broadcaster; +use crate::sender; + +pub struct EventBus { + pub enable_sender: bool, + pub broadcaster: Broadcaster, +} + +impl Default for EventBus { + fn default() -> Self { + let enable_sender = true; + let broadcaster = Broadcaster::::default(); + + Self::new(enable_sender, broadcaster) + } +} + +impl EventBus { + #[must_use] + pub fn new(enable_sender: bool, broadcaster: Broadcaster) -> Self { + Self { + enable_sender, + broadcaster, + } + } + + #[must_use] + pub fn sender(&self) -> Arc>>> { + if self.enable_sender { + Arc::new(Some(Box::new(self.broadcaster.clone()))) + } else { + Arc::new(None) + } + } + + #[must_use] + pub fn receiver(&self) -> Receiver { + self.broadcaster.subscribe() + } +} diff --git a/packages/events/src/lib.rs b/packages/events/src/lib.rs index 154d78f63..3b02d5d49 100644 --- a/packages/events/src/lib.rs +++ b/packages/events/src/lib.rs @@ -1,4 +1,5 @@ pub mod broadcaster; +pub mod bus; pub mod sender; /// Target for tracing crate logs. From 29b00c800283b55ae936e926b83700d8eea57160 Mon Sep 17 00:00:00 2001 From: Jose Celano Date: Mon, 28 Apr 2025 12:11:06 +0100 Subject: [PATCH 514/802] refactor: [#1480] use the new events crate in http-tracker-core pkg --- Cargo.lock | 2 + packages/axum-http-tracker-server/Cargo.toml | 1 + .../src/v1/handlers/scrape.rs | 2 +- packages/http-tracker-core/Cargo.toml | 1 + .../http-tracker-core/benches/helpers/util.rs | 9 ++-- packages/http-tracker-core/src/container.rs | 4 +- packages/http-tracker-core/src/event/bus.rs | 43 +------------------ .../http-tracker-core/src/event/sender.rs | 42 ++---------------- .../src/services/announce.rs | 23 +++++----- .../http-tracker-core/src/services/scrape.rs | 25 +++++------ .../src/statistics/services.rs | 2 +- 11 files changed, 37 insertions(+), 117 deletions(-) diff --git a/Cargo.lock b/Cargo.lock index e00040f18..2b76781bc 100644 --- a/Cargo.lock +++ b/Cargo.lock @@ -588,6 +588,7 @@ dependencies = [ "tokio", "torrust-tracker-clock", "torrust-tracker-configuration", + "torrust-tracker-events", "torrust-tracker-metrics", "torrust-tracker-primitives", "torrust-tracker-test-helpers", @@ -4560,6 +4561,7 @@ dependencies = [ "torrust-server-lib", "torrust-tracker-clock", "torrust-tracker-configuration", + "torrust-tracker-events", "torrust-tracker-primitives", "torrust-tracker-test-helpers", "tower", diff --git a/packages/axum-http-tracker-server/Cargo.toml b/packages/axum-http-tracker-server/Cargo.toml index 0c64ee986..1b4627d41 100644 --- a/packages/axum-http-tracker-server/Cargo.toml +++ b/packages/axum-http-tracker-server/Cargo.toml @@ -45,6 +45,7 @@ serde_bencode = "0" serde_bytes = "0" serde_repr = "0" torrust-tracker-clock = { version = "3.0.0-develop", path = "../clock" } +torrust-tracker-events = { version = "3.0.0-develop", path = "../events" } torrust-tracker-test-helpers = { version = "3.0.0-develop", path = "../test-helpers" } uuid = { version = "1", features = ["v4"] } zerocopy = "0.7" diff --git a/packages/axum-http-tracker-server/src/v1/handlers/scrape.rs b/packages/axum-http-tracker-server/src/v1/handlers/scrape.rs index 7f1247173..330e7c13e 100644 --- a/packages/axum-http-tracker-server/src/v1/handlers/scrape.rs +++ b/packages/axum-http-tracker-server/src/v1/handlers/scrape.rs @@ -107,7 +107,7 @@ mod tests { } struct CoreHttpTrackerServices { - pub http_stats_event_sender: Arc>>, + pub http_stats_event_sender: bittorrent_http_tracker_core::event::sender::Sender, } fn initialize_private_tracker() -> (CoreTrackerServices, CoreHttpTrackerServices) { diff --git a/packages/http-tracker-core/Cargo.toml b/packages/http-tracker-core/Cargo.toml index 8bd54a483..5473c5a25 100644 --- a/packages/http-tracker-core/Cargo.toml +++ b/packages/http-tracker-core/Cargo.toml @@ -25,6 +25,7 @@ thiserror = "2" tokio = { version = "1", features = ["macros", "net", "rt-multi-thread", "signal", "sync"] } torrust-tracker-clock = { version = "3.0.0-develop", path = "../clock" } torrust-tracker-configuration = { version = "3.0.0-develop", path = "../configuration" } +torrust-tracker-events = { version = "3.0.0-develop", path = "../events" } torrust-tracker-metrics = { version = "3.0.0-develop", path = "../metrics" } torrust-tracker-primitives = { version = "3.0.0-develop", path = "../primitives" } tracing = "0" diff --git a/packages/http-tracker-core/benches/helpers/util.rs b/packages/http-tracker-core/benches/helpers/util.rs index 9d2d80da3..26c59a9d5 100644 --- a/packages/http-tracker-core/benches/helpers/util.rs +++ b/packages/http-tracker-core/benches/helpers/util.rs @@ -2,7 +2,6 @@ use std::net::{IpAddr, Ipv4Addr, SocketAddr}; use std::sync::Arc; use aquatic_udp_protocol::{AnnounceEvent, NumberOfBytes, PeerId}; -use bittorrent_http_tracker_core::event; use bittorrent_http_tracker_core::event::bus::EventBus; use bittorrent_http_tracker_core::event::sender::Broadcaster; use bittorrent_http_tracker_core::event::Event; @@ -35,7 +34,7 @@ pub struct CoreTrackerServices { } pub struct CoreHttpTrackerServices { - pub http_stats_event_sender: Arc>>, + pub http_stats_event_sender: bittorrent_http_tracker_core::event::sender::Sender, } pub fn initialize_core_tracker_services() -> (CoreTrackerServices, CoreHttpTrackerServices) { @@ -125,7 +124,9 @@ pub fn sample_info_hash() -> InfoHash { mock! { HttpStatsEventSender {} - impl event::sender::Sender for HttpStatsEventSender { - fn send_event(&self, event: Event) -> BoxFuture<'static,Option > > > ; + impl torrust_tracker_events::sender::Sender for HttpStatsEventSender { + type Event = Event; + + fn send_event(&self, event: Event) -> BoxFuture<'static,Option > > > ; } } diff --git a/packages/http-tracker-core/src/container.rs b/packages/http-tracker-core/src/container.rs index c3ed8a0c7..681d4a4f4 100644 --- a/packages/http-tracker-core/src/container.rs +++ b/packages/http-tracker-core/src/container.rs @@ -17,7 +17,7 @@ pub struct HttpTrackerCoreContainer { // `HttpTrackerCoreServices` pub event_bus: Arc, - pub stats_event_sender: Arc>>, + pub stats_event_sender: event::sender::Sender, pub stats_repository: Arc, pub announce_service: Arc, pub scrape_service: Arc, @@ -59,7 +59,7 @@ impl HttpTrackerCoreContainer { pub struct HttpTrackerCoreServices { pub event_bus: Arc, - pub stats_event_sender: Arc>>, + pub stats_event_sender: event::sender::Sender, pub stats_repository: Arc, pub announce_service: Arc, pub scrape_service: Arc, diff --git a/packages/http-tracker-core/src/event/bus.rs b/packages/http-tracker-core/src/event/bus.rs index 2d22c0a90..02bf71d2f 100644 --- a/packages/http-tracker-core/src/event/bus.rs +++ b/packages/http-tracker-core/src/event/bus.rs @@ -1,44 +1,3 @@ -use std::sync::Arc; - -use tokio::sync::broadcast::Receiver; - -use crate::event::sender::{self, Broadcaster}; use crate::event::Event; -pub struct EventBus { - pub enable_sender: bool, - pub broadcaster: Broadcaster, -} - -impl Default for EventBus { - fn default() -> Self { - let enable_sender = true; - let broadcaster = Broadcaster::default(); - - Self::new(enable_sender, broadcaster) - } -} - -impl EventBus { - #[must_use] - pub fn new(enable_sender: bool, broadcaster: Broadcaster) -> Self { - Self { - enable_sender, - broadcaster, - } - } - - #[must_use] - pub fn sender(&self) -> Arc>> { - if self.enable_sender { - Arc::new(Some(Box::new(self.broadcaster.clone()))) - } else { - Arc::new(None) - } - } - - #[must_use] - pub fn receiver(&self) -> Receiver { - self.broadcaster.subscribe() - } -} +pub type EventBus = torrust_tracker_events::bus::EventBus; diff --git a/packages/http-tracker-core/src/event/sender.rs b/packages/http-tracker-core/src/event/sender.rs index b720926bb..37f64573d 100644 --- a/packages/http-tracker-core/src/event/sender.rs +++ b/packages/http-tracker-core/src/event/sender.rs @@ -1,42 +1,6 @@ -use futures::future::BoxFuture; -use futures::FutureExt; -#[cfg(test)] -use mockall::{automock, predicate::str}; -use tokio::sync::broadcast; -use tokio::sync::broadcast::error::SendError; +use std::sync::Arc; use super::Event; -const CHANNEL_CAPACITY: usize = 32768; - -/// A trait for sending sending. -#[cfg_attr(test, automock)] -pub trait Sender: Sync + Send { - fn send_event(&self, event: Event) -> BoxFuture<'_, Option>>>; -} - -/// An event sender implementation using a broadcast channel. -#[derive(Clone)] -pub struct Broadcaster { - pub(crate) sender: broadcast::Sender, -} - -impl Sender for Broadcaster { - fn send_event(&self, event: Event) -> BoxFuture<'_, Option>>> { - async move { Some(self.sender.send(event)) }.boxed() - } -} - -impl Default for Broadcaster { - fn default() -> Self { - let (sender, _) = broadcast::channel(CHANNEL_CAPACITY); - Self { sender } - } -} - -impl Broadcaster { - #[must_use] - pub fn subscribe(&self) -> broadcast::Receiver { - self.sender.subscribe() - } -} +pub type Sender = Arc>>>; +pub type Broadcaster = torrust_tracker_events::broadcaster::Broadcaster; diff --git a/packages/http-tracker-core/src/services/announce.rs b/packages/http-tracker-core/src/services/announce.rs index bef7449b7..feecb03b1 100644 --- a/packages/http-tracker-core/src/services/announce.rs +++ b/packages/http-tracker-core/src/services/announce.rs @@ -39,7 +39,7 @@ pub struct AnnounceService { announce_handler: Arc, authentication_service: Arc, whitelist_authorization: Arc, - opt_http_stats_event_sender: Arc>>, + opt_http_stats_event_sender: event::sender::Sender, } impl AnnounceService { @@ -49,7 +49,7 @@ impl AnnounceService { announce_handler: Arc, authentication_service: Arc, whitelist_authorization: Arc, - opt_http_stats_event_sender: Arc>>, + opt_http_stats_event_sender: event::sender::Sender, ) -> Self { Self { core_config, @@ -228,7 +228,7 @@ mod tests { } struct CoreHttpTrackerServices { - pub http_stats_event_sender: Arc>>, + pub http_stats_event_sender: crate::event::sender::Sender, } fn initialize_core_tracker_services() -> (CoreTrackerServices, CoreHttpTrackerServices) { @@ -302,7 +302,6 @@ mod tests { use mockall::mock; use tokio::sync::broadcast::error::SendError; - use crate::event; use crate::event::bus::EventBus; use crate::event::sender::Broadcaster; use crate::event::Event; @@ -312,8 +311,10 @@ mod tests { mock! { HttpStatsEventSender {} - impl event::sender::Sender for HttpStatsEventSender { - fn send_event(&self, event: Event) -> BoxFuture<'static,Option > > > ; + impl torrust_tracker_events::sender::Sender for HttpStatsEventSender { + type Event = Event; + + fn send_event(&self, event: Event) -> BoxFuture<'static,Option > > > ; } } @@ -331,7 +332,6 @@ mod tests { use torrust_tracker_primitives::swarm_metadata::SwarmMetadata; use torrust_tracker_test_helpers::configuration; - use crate::event; use crate::event::test::announce_events_match; use crate::event::{ConnectionContext, Event}; use crate::services::announce::tests::{ @@ -411,8 +411,7 @@ mod tests { })) .times(1) .returning(|_| Box::pin(future::ready(Some(Ok(1))))); - let http_stats_event_sender: Arc>> = - Arc::new(Some(Box::new(http_stats_event_sender_mock))); + let http_stats_event_sender: crate::event::sender::Sender = Arc::new(Some(Box::new(http_stats_event_sender_mock))); let (core_tracker_services, mut core_http_tracker_services) = initialize_core_tracker_services(); @@ -489,8 +488,7 @@ mod tests { .times(1) .returning(|_| Box::pin(future::ready(Some(Ok(1))))); - let http_stats_event_sender: Arc>> = - Arc::new(Some(Box::new(http_stats_event_sender_mock))); + let http_stats_event_sender: crate::event::sender::Sender = Arc::new(Some(Box::new(http_stats_event_sender_mock))); let (core_tracker_services, mut core_http_tracker_services) = initialize_core_tracker_services_with_config(&tracker_with_an_ipv6_external_ip()); @@ -537,8 +535,7 @@ mod tests { })) .times(1) .returning(|_| Box::pin(future::ready(Some(Ok(1))))); - let http_stats_event_sender: Arc>> = - Arc::new(Some(Box::new(http_stats_event_sender_mock))); + let http_stats_event_sender: crate::event::sender::Sender = Arc::new(Some(Box::new(http_stats_event_sender_mock))); let (core_tracker_services, mut core_http_tracker_services) = initialize_core_tracker_services(); core_http_tracker_services.http_stats_event_sender = http_stats_event_sender; diff --git a/packages/http-tracker-core/src/services/scrape.rs b/packages/http-tracker-core/src/services/scrape.rs index a0ae73d97..6ffc4a5f6 100644 --- a/packages/http-tracker-core/src/services/scrape.rs +++ b/packages/http-tracker-core/src/services/scrape.rs @@ -21,7 +21,6 @@ use torrust_tracker_configuration::Core; use torrust_tracker_primitives::core::ScrapeData; use torrust_tracker_primitives::service_binding::ServiceBinding; -use crate::event; use crate::event::{ConnectionContext, Event}; /// The HTTP tracker `scrape` service. @@ -40,7 +39,7 @@ pub struct ScrapeService { core_config: Arc, scrape_handler: Arc, authentication_service: Arc, - opt_http_stats_event_sender: Arc>>, + opt_http_stats_event_sender: crate::event::sender::Sender, } impl ScrapeService { @@ -49,7 +48,7 @@ impl ScrapeService { core_config: Arc, scrape_handler: Arc, authentication_service: Arc, - opt_http_stats_event_sender: Arc>>, + opt_http_stats_event_sender: crate::event::sender::Sender, ) -> Self { Self { core_config, @@ -187,7 +186,7 @@ mod tests { use torrust_tracker_configuration::Configuration; use torrust_tracker_primitives::{peer, DurationSinceUnixEpoch}; - use crate::event::{self, Event}; + use crate::event::Event; use crate::tests::sample_info_hash; struct Container { @@ -239,7 +238,9 @@ mod tests { mock! { HttpStatsEventSender {} - impl event::sender::Sender for HttpStatsEventSender { + impl torrust_tracker_events::sender::Sender for HttpStatsEventSender { + type Event = Event; + fn send_event(&self, event: Event) -> BoxFuture<'static,Option > > > ; } } @@ -259,7 +260,6 @@ mod tests { use torrust_tracker_primitives::swarm_metadata::SwarmMetadata; use torrust_tracker_test_helpers::configuration; - use crate::event; use crate::event::bus::EventBus; use crate::event::sender::Broadcaster; use crate::event::{ConnectionContext, Event}; @@ -350,8 +350,7 @@ mod tests { })) .times(1) .returning(|_| Box::pin(future::ready(Some(Ok(1))))); - let http_stats_event_sender: Arc>> = - Arc::new(Some(Box::new(http_stats_event_sender_mock))); + let http_stats_event_sender: crate::event::sender::Sender = Arc::new(Some(Box::new(http_stats_event_sender_mock))); let container = initialize_services_with_configuration(&config); @@ -405,8 +404,7 @@ mod tests { })) .times(1) .returning(|_| Box::pin(future::ready(Some(Ok(1))))); - let http_stats_event_sender: Arc>> = - Arc::new(Some(Box::new(http_stats_event_sender_mock))); + let http_stats_event_sender: crate::event::sender::Sender = Arc::new(Some(Box::new(http_stats_event_sender_mock))); let container = initialize_services_with_configuration(&config); @@ -452,7 +450,6 @@ mod tests { use torrust_tracker_primitives::service_binding::{Protocol, ServiceBinding}; use torrust_tracker_test_helpers::configuration; - use crate::event; use crate::event::bus::EventBus; use crate::event::sender::Broadcaster; use crate::event::{ConnectionContext, Event}; @@ -537,8 +534,7 @@ mod tests { })) .times(1) .returning(|_| Box::pin(future::ready(Some(Ok(1))))); - let http_stats_event_sender: Arc>> = - Arc::new(Some(Box::new(http_stats_event_sender_mock))); + let http_stats_event_sender: crate::event::sender::Sender = Arc::new(Some(Box::new(http_stats_event_sender_mock))); let peer_ip = IpAddr::V4(Ipv4Addr::new(126, 0, 0, 1)); @@ -592,8 +588,7 @@ mod tests { })) .times(1) .returning(|_| Box::pin(future::ready(Some(Ok(1))))); - let http_stats_event_sender: Arc>> = - Arc::new(Some(Box::new(http_stats_event_sender_mock))); + let http_stats_event_sender: crate::event::sender::Sender = Arc::new(Some(Box::new(http_stats_event_sender_mock))); let peer_ip = IpAddr::V6(Ipv6Addr::new(0x6969, 0x6969, 0x6969, 0x6969, 0x6969, 0x6969, 0x6969, 0x6969)); diff --git a/packages/http-tracker-core/src/statistics/services.rs b/packages/http-tracker-core/src/statistics/services.rs index 7f3c365d4..e2fbfedd0 100644 --- a/packages/http-tracker-core/src/statistics/services.rs +++ b/packages/http-tracker-core/src/statistics/services.rs @@ -9,7 +9,7 @@ //! //! The factory function builds two structs: //! -//! - An statistics event [`Sender`](crate::statistics::event::sender::Sender) +//! - An statistics event [`Sender`](torrust_tracker_events::sender::Sender) //! - An statistics [`Repository`] //! //! ```text From efed46c6b53fe453e84095f7eaa724f5a8a72aeb Mon Sep 17 00:00:00 2001 From: Jose Celano Date: Mon, 28 Apr 2025 12:53:15 +0100 Subject: [PATCH 515/802] refactor: [#1480] use the new events crate in udp-tracker-core pkg --- Cargo.lock | 2 + packages/udp-tracker-core/Cargo.toml | 1 + .../udp-tracker-core/benches/helpers/utils.rs | 7 +-- packages/udp-tracker-core/src/container.rs | 4 +- packages/udp-tracker-core/src/event/bus.rs | 43 +------------------ packages/udp-tracker-core/src/event/sender.rs | 42 ++---------------- .../udp-tracker-core/src/services/announce.rs | 6 +-- .../udp-tracker-core/src/services/connect.rs | 14 +++--- packages/udp-tracker-core/src/services/mod.rs | 7 +-- .../udp-tracker-core/src/services/scrape.rs | 9 ++-- packages/udp-tracker-server/Cargo.toml | 1 + .../src/handlers/announce.rs | 2 +- .../src/handlers/connect.rs | 4 +- .../udp-tracker-server/src/handlers/mod.rs | 6 ++- 14 files changed, 36 insertions(+), 112 deletions(-) diff --git a/Cargo.lock b/Cargo.lock index 2b76781bc..f5cba3708 100644 --- a/Cargo.lock +++ b/Cargo.lock @@ -703,6 +703,7 @@ dependencies = [ "tokio", "torrust-tracker-clock", "torrust-tracker-configuration", + "torrust-tracker-events", "torrust-tracker-metrics", "torrust-tracker-primitives", "torrust-tracker-test-helpers", @@ -4874,6 +4875,7 @@ dependencies = [ "torrust-server-lib", "torrust-tracker-clock", "torrust-tracker-configuration", + "torrust-tracker-events", "torrust-tracker-located-error", "torrust-tracker-metrics", "torrust-tracker-primitives", diff --git a/packages/udp-tracker-core/Cargo.toml b/packages/udp-tracker-core/Cargo.toml index 0354777db..6cf250074 100644 --- a/packages/udp-tracker-core/Cargo.toml +++ b/packages/udp-tracker-core/Cargo.toml @@ -30,6 +30,7 @@ thiserror = "2" tokio = { version = "1", features = ["macros", "net", "rt-multi-thread", "signal", "sync", "time"] } torrust-tracker-clock = { version = "3.0.0-develop", path = "../clock" } torrust-tracker-configuration = { version = "3.0.0-develop", path = "../configuration" } +torrust-tracker-events = { version = "3.0.0-develop", path = "../events" } torrust-tracker-metrics = { version = "3.0.0-develop", path = "../metrics" } torrust-tracker-primitives = { version = "3.0.0-develop", path = "../primitives" } tracing = "0" diff --git a/packages/udp-tracker-core/benches/helpers/utils.rs b/packages/udp-tracker-core/benches/helpers/utils.rs index f6c2f6fad..e560e36fe 100644 --- a/packages/udp-tracker-core/benches/helpers/utils.rs +++ b/packages/udp-tracker-core/benches/helpers/utils.rs @@ -1,6 +1,5 @@ use std::net::{IpAddr, Ipv4Addr, SocketAddr}; -use bittorrent_udp_tracker_core::event; use bittorrent_udp_tracker_core::event::Event; use futures::future::BoxFuture; use mockall::mock; @@ -20,7 +19,9 @@ pub(crate) fn sample_issue_time() -> f64 { mock! { pub(crate) UdpCoreStatsEventSender {} - impl event::sender::Sender for UdpCoreStatsEventSender { - fn send_event(&self, event: Event) -> BoxFuture<'static,Option > > > ; + impl torrust_tracker_events::sender::Sender for UdpCoreStatsEventSender { + type Event = Event; + + fn send_event(&self, event: Event) -> BoxFuture<'static,Option > > > ; } } diff --git a/packages/udp-tracker-core/src/container.rs b/packages/udp-tracker-core/src/container.rs index c1dd0461f..98c01a703 100644 --- a/packages/udp-tracker-core/src/container.rs +++ b/packages/udp-tracker-core/src/container.rs @@ -20,7 +20,7 @@ pub struct UdpTrackerCoreContainer { // `UdpTrackerCoreServices` pub event_bus: Arc, - pub stats_event_sender: Arc>>, + pub stats_event_sender: crate::event::sender::Sender, pub stats_repository: Arc, pub ban_service: Arc>, pub connect_service: Arc, @@ -69,7 +69,7 @@ impl UdpTrackerCoreContainer { pub struct UdpTrackerCoreServices { pub event_bus: Arc, - pub stats_event_sender: Arc>>, + pub stats_event_sender: crate::event::sender::Sender, pub stats_repository: Arc, pub ban_service: Arc>, pub connect_service: Arc, diff --git a/packages/udp-tracker-core/src/event/bus.rs b/packages/udp-tracker-core/src/event/bus.rs index 2d22c0a90..02bf71d2f 100644 --- a/packages/udp-tracker-core/src/event/bus.rs +++ b/packages/udp-tracker-core/src/event/bus.rs @@ -1,44 +1,3 @@ -use std::sync::Arc; - -use tokio::sync::broadcast::Receiver; - -use crate::event::sender::{self, Broadcaster}; use crate::event::Event; -pub struct EventBus { - pub enable_sender: bool, - pub broadcaster: Broadcaster, -} - -impl Default for EventBus { - fn default() -> Self { - let enable_sender = true; - let broadcaster = Broadcaster::default(); - - Self::new(enable_sender, broadcaster) - } -} - -impl EventBus { - #[must_use] - pub fn new(enable_sender: bool, broadcaster: Broadcaster) -> Self { - Self { - enable_sender, - broadcaster, - } - } - - #[must_use] - pub fn sender(&self) -> Arc>> { - if self.enable_sender { - Arc::new(Some(Box::new(self.broadcaster.clone()))) - } else { - Arc::new(None) - } - } - - #[must_use] - pub fn receiver(&self) -> Receiver { - self.broadcaster.subscribe() - } -} +pub type EventBus = torrust_tracker_events::bus::EventBus; diff --git a/packages/udp-tracker-core/src/event/sender.rs b/packages/udp-tracker-core/src/event/sender.rs index b720926bb..37f64573d 100644 --- a/packages/udp-tracker-core/src/event/sender.rs +++ b/packages/udp-tracker-core/src/event/sender.rs @@ -1,42 +1,6 @@ -use futures::future::BoxFuture; -use futures::FutureExt; -#[cfg(test)] -use mockall::{automock, predicate::str}; -use tokio::sync::broadcast; -use tokio::sync::broadcast::error::SendError; +use std::sync::Arc; use super::Event; -const CHANNEL_CAPACITY: usize = 32768; - -/// A trait for sending sending. -#[cfg_attr(test, automock)] -pub trait Sender: Sync + Send { - fn send_event(&self, event: Event) -> BoxFuture<'_, Option>>>; -} - -/// An event sender implementation using a broadcast channel. -#[derive(Clone)] -pub struct Broadcaster { - pub(crate) sender: broadcast::Sender, -} - -impl Sender for Broadcaster { - fn send_event(&self, event: Event) -> BoxFuture<'_, Option>>> { - async move { Some(self.sender.send(event)) }.boxed() - } -} - -impl Default for Broadcaster { - fn default() -> Self { - let (sender, _) = broadcast::channel(CHANNEL_CAPACITY); - Self { sender } - } -} - -impl Broadcaster { - #[must_use] - pub fn subscribe(&self) -> broadcast::Receiver { - self.sender.subscribe() - } -} +pub type Sender = Arc>>>; +pub type Broadcaster = torrust_tracker_events::broadcaster::Broadcaster; diff --git a/packages/udp-tracker-core/src/services/announce.rs b/packages/udp-tracker-core/src/services/announce.rs index def24ffd7..481c3d7ca 100644 --- a/packages/udp-tracker-core/src/services/announce.rs +++ b/packages/udp-tracker-core/src/services/announce.rs @@ -22,7 +22,7 @@ use torrust_tracker_primitives::peer::PeerAnnouncement; use torrust_tracker_primitives::service_binding::ServiceBinding; use crate::connection_cookie::{check, gen_remote_fingerprint, ConnectionCookieError}; -use crate::event::{self, ConnectionContext, Event}; +use crate::event::{ConnectionContext, Event}; /// The `AnnounceService` is responsible for handling the `announce` requests. /// @@ -32,7 +32,7 @@ use crate::event::{self, ConnectionContext, Event}; pub struct AnnounceService { announce_handler: Arc, whitelist_authorization: Arc, - opt_udp_core_stats_event_sender: Arc>>, + opt_udp_core_stats_event_sender: crate::event::sender::Sender, } impl AnnounceService { @@ -40,7 +40,7 @@ impl AnnounceService { pub fn new( announce_handler: Arc, whitelist_authorization: Arc, - opt_udp_core_stats_event_sender: Arc>>, + opt_udp_core_stats_event_sender: crate::event::sender::Sender, ) -> Self { Self { announce_handler, diff --git a/packages/udp-tracker-core/src/services/connect.rs b/packages/udp-tracker-core/src/services/connect.rs index b40af901c..a69c84686 100644 --- a/packages/udp-tracker-core/src/services/connect.rs +++ b/packages/udp-tracker-core/src/services/connect.rs @@ -2,25 +2,24 @@ //! //! The service is responsible for handling the `connect` requests. use std::net::SocketAddr; -use std::sync::Arc; use aquatic_udp_protocol::ConnectionId; use torrust_tracker_primitives::service_binding::ServiceBinding; use crate::connection_cookie::{gen_remote_fingerprint, make}; -use crate::event::{self, ConnectionContext, Event}; +use crate::event::{ConnectionContext, Event}; /// The `ConnectService` is responsible for handling the `connect` requests. /// /// It is responsible for generating the connection cookie and sending the /// appropriate statistics events. pub struct ConnectService { - pub opt_udp_core_stats_event_sender: Arc>>, + pub opt_udp_core_stats_event_sender: crate::event::sender::Sender, } impl ConnectService { #[must_use] - pub fn new(opt_udp_core_stats_event_sender: Arc>>) -> Self { + pub fn new(opt_udp_core_stats_event_sender: crate::event::sender::Sender) -> Self { Self { opt_udp_core_stats_event_sender, } @@ -65,7 +64,6 @@ mod tests { use torrust_tracker_primitives::service_binding::{Protocol, ServiceBinding}; use crate::connection_cookie::make; - use crate::event; use crate::event::bus::EventBus; use crate::event::sender::Broadcaster; use crate::event::{ConnectionContext, Event}; @@ -153,8 +151,7 @@ mod tests { })) .times(1) .returning(|_| Box::pin(future::ready(Some(Ok(1))))); - let opt_udp_stats_event_sender: Arc>> = - Arc::new(Some(Box::new(udp_stats_event_sender_mock))); + let opt_udp_stats_event_sender: crate::event::sender::Sender = Arc::new(Some(Box::new(udp_stats_event_sender_mock))); let connect_service = Arc::new(ConnectService::new(opt_udp_stats_event_sender)); @@ -177,8 +174,7 @@ mod tests { })) .times(1) .returning(|_| Box::pin(future::ready(Some(Ok(1))))); - let opt_udp_stats_event_sender: Arc>> = - Arc::new(Some(Box::new(udp_stats_event_sender_mock))); + let opt_udp_stats_event_sender: crate::event::sender::Sender = Arc::new(Some(Box::new(udp_stats_event_sender_mock))); let connect_service = Arc::new(ConnectService::new(opt_udp_stats_event_sender)); diff --git a/packages/udp-tracker-core/src/services/mod.rs b/packages/udp-tracker-core/src/services/mod.rs index ac82d71e8..8cbae4584 100644 --- a/packages/udp-tracker-core/src/services/mod.rs +++ b/packages/udp-tracker-core/src/services/mod.rs @@ -13,7 +13,6 @@ pub(crate) mod tests { use tokio::sync::broadcast::error::SendError; use crate::connection_cookie::gen_remote_fingerprint; - use crate::event; use crate::event::Event; pub(crate) fn sample_ipv4_remote_addr() -> SocketAddr { @@ -46,8 +45,10 @@ pub(crate) mod tests { mock! { pub(crate) UdpCoreStatsEventSender {} - impl event::sender::Sender for UdpCoreStatsEventSender { - fn send_event(&self, event: Event) -> BoxFuture<'static,Option > > > ; + impl torrust_tracker_events::sender::Sender for UdpCoreStatsEventSender { + type Event = Event; + + fn send_event(&self, event: Event) -> BoxFuture<'static,Option > > > ; } } } diff --git a/packages/udp-tracker-core/src/services/scrape.rs b/packages/udp-tracker-core/src/services/scrape.rs index 5b2cf7d46..14ba95834 100644 --- a/packages/udp-tracker-core/src/services/scrape.rs +++ b/packages/udp-tracker-core/src/services/scrape.rs @@ -19,7 +19,7 @@ use torrust_tracker_primitives::core::ScrapeData; use torrust_tracker_primitives::service_binding::ServiceBinding; use crate::connection_cookie::{check, gen_remote_fingerprint, ConnectionCookieError}; -use crate::event::{self, ConnectionContext, Event}; +use crate::event::{ConnectionContext, Event}; /// The `ScrapeService` is responsible for handling the `scrape` requests. /// @@ -28,15 +28,12 @@ use crate::event::{self, ConnectionContext, Event}; /// - The number of UDP `scrape` requests handled by the UDP tracker. pub struct ScrapeService { scrape_handler: Arc, - opt_udp_stats_event_sender: Arc>>, + opt_udp_stats_event_sender: crate::event::sender::Sender, } impl ScrapeService { #[must_use] - pub fn new( - scrape_handler: Arc, - opt_udp_stats_event_sender: Arc>>, - ) -> Self { + pub fn new(scrape_handler: Arc, opt_udp_stats_event_sender: crate::event::sender::Sender) -> Self { Self { scrape_handler, opt_udp_stats_event_sender, diff --git a/packages/udp-tracker-server/Cargo.toml b/packages/udp-tracker-server/Cargo.toml index 23719d141..4d0296461 100644 --- a/packages/udp-tracker-server/Cargo.toml +++ b/packages/udp-tracker-server/Cargo.toml @@ -29,6 +29,7 @@ tokio = { version = "1", features = ["macros", "net", "rt-multi-thread", "signal torrust-server-lib = { version = "3.0.0-develop", path = "../server-lib" } torrust-tracker-clock = { version = "3.0.0-develop", path = "../clock" } torrust-tracker-configuration = { version = "3.0.0-develop", path = "../configuration" } +torrust-tracker-events = { version = "3.0.0-develop", path = "../events" } torrust-tracker-located-error = { version = "3.0.0-develop", path = "../located-error" } torrust-tracker-metrics = { version = "3.0.0-develop", path = "../metrics" } torrust-tracker-primitives = { version = "3.0.0-develop", path = "../primitives" } diff --git a/packages/udp-tracker-server/src/handlers/announce.rs b/packages/udp-tracker-server/src/handlers/announce.rs index f8b2092b5..2f6b71a6a 100644 --- a/packages/udp-tracker-server/src/handlers/announce.rs +++ b/packages/udp-tracker-server/src/handlers/announce.rs @@ -888,7 +888,7 @@ mod tests { })) .times(1) .returning(|_| Box::pin(future::ready(Some(Ok(1))))); - let udp_core_stats_event_sender: Arc>> = + let udp_core_stats_event_sender: bittorrent_udp_tracker_core::event::sender::Sender = Arc::new(Some(Box::new(udp_core_stats_event_sender_mock))); let mut udp_server_stats_event_sender_mock = MockUdpServerStatsEventSender::new(); diff --git a/packages/udp-tracker-server/src/handlers/connect.rs b/packages/udp-tracker-server/src/handlers/connect.rs index 85bfda680..a950e2c69 100644 --- a/packages/udp-tracker-server/src/handlers/connect.rs +++ b/packages/udp-tracker-server/src/handlers/connect.rs @@ -210,7 +210,7 @@ mod tests { })) .times(1) .returning(|_| Box::pin(future::ready(Some(Ok(1))))); - let udp_core_stats_event_sender: Arc>> = + let udp_core_stats_event_sender: bittorrent_udp_tracker_core::event::sender::Sender = Arc::new(Some(Box::new(udp_core_stats_event_sender_mock))); let mut udp_server_stats_event_sender_mock = MockUdpServerStatsEventSender::new(); @@ -252,7 +252,7 @@ mod tests { })) .times(1) .returning(|_| Box::pin(future::ready(Some(Ok(1))))); - let udp_core_stats_event_sender: Arc>> = + let udp_core_stats_event_sender: bittorrent_udp_tracker_core::event::sender::Sender = Arc::new(Some(Box::new(udp_core_stats_event_sender_mock))); let mut udp_server_stats_event_sender_mock = MockUdpServerStatsEventSender::new(); diff --git a/packages/udp-tracker-server/src/handlers/mod.rs b/packages/udp-tracker-server/src/handlers/mod.rs index dde8f0cc8..6a78d881e 100644 --- a/packages/udp-tracker-server/src/handlers/mod.rs +++ b/packages/udp-tracker-server/src/handlers/mod.rs @@ -426,8 +426,10 @@ pub(crate) mod tests { mock! { pub(crate) UdpCoreStatsEventSender {} - impl core_event::sender::Sender for UdpCoreStatsEventSender { - fn send_event(&self, event: core_event::Event) -> BoxFuture<'static,Option > > > ; + impl torrust_tracker_events::sender::Sender for UdpCoreStatsEventSender { + type Event = core_event::Event; + + fn send_event(&self, event: core_event::Event) -> BoxFuture<'static,Option > > > ; } } From e434e10bd8a355e657609e0da5f8845a811730f5 Mon Sep 17 00:00:00 2001 From: Jose Celano Date: Mon, 28 Apr 2025 16:24:36 +0100 Subject: [PATCH 516/802] refactor: [#1480] use the new events crate in udp-tracker-server pkg --- packages/udp-tracker-server/src/container.rs | 4 +- packages/udp-tracker-server/src/event/bus.rs | 43 +------------------ .../udp-tracker-server/src/event/sender.rs | 42 ++---------------- .../src/handlers/announce.rs | 16 +++---- .../src/handlers/connect.rs | 10 ++--- .../udp-tracker-server/src/handlers/error.rs | 5 +-- .../udp-tracker-server/src/handlers/mod.rs | 8 ++-- .../udp-tracker-server/src/handlers/scrape.rs | 11 +++-- 8 files changed, 31 insertions(+), 108 deletions(-) diff --git a/packages/udp-tracker-server/src/container.rs b/packages/udp-tracker-server/src/container.rs index 121737d92..a0bc8f35b 100644 --- a/packages/udp-tracker-server/src/container.rs +++ b/packages/udp-tracker-server/src/container.rs @@ -10,7 +10,7 @@ use crate::statistics::repository::Repository; pub struct UdpTrackerServerContainer { pub event_bus: Arc, - pub stats_event_sender: Arc>>, + pub stats_event_sender: crate::event::sender::Sender, pub stats_repository: Arc, } @@ -29,7 +29,7 @@ impl UdpTrackerServerContainer { pub struct UdpTrackerServerServices { pub event_bus: Arc, - pub stats_event_sender: Arc>>, + pub stats_event_sender: crate::event::sender::Sender, pub stats_repository: Arc, } diff --git a/packages/udp-tracker-server/src/event/bus.rs b/packages/udp-tracker-server/src/event/bus.rs index 2d22c0a90..02bf71d2f 100644 --- a/packages/udp-tracker-server/src/event/bus.rs +++ b/packages/udp-tracker-server/src/event/bus.rs @@ -1,44 +1,3 @@ -use std::sync::Arc; - -use tokio::sync::broadcast::Receiver; - -use crate::event::sender::{self, Broadcaster}; use crate::event::Event; -pub struct EventBus { - pub enable_sender: bool, - pub broadcaster: Broadcaster, -} - -impl Default for EventBus { - fn default() -> Self { - let enable_sender = true; - let broadcaster = Broadcaster::default(); - - Self::new(enable_sender, broadcaster) - } -} - -impl EventBus { - #[must_use] - pub fn new(enable_sender: bool, broadcaster: Broadcaster) -> Self { - Self { - enable_sender, - broadcaster, - } - } - - #[must_use] - pub fn sender(&self) -> Arc>> { - if self.enable_sender { - Arc::new(Some(Box::new(self.broadcaster.clone()))) - } else { - Arc::new(None) - } - } - - #[must_use] - pub fn receiver(&self) -> Receiver { - self.broadcaster.subscribe() - } -} +pub type EventBus = torrust_tracker_events::bus::EventBus; diff --git a/packages/udp-tracker-server/src/event/sender.rs b/packages/udp-tracker-server/src/event/sender.rs index b720926bb..37f64573d 100644 --- a/packages/udp-tracker-server/src/event/sender.rs +++ b/packages/udp-tracker-server/src/event/sender.rs @@ -1,42 +1,6 @@ -use futures::future::BoxFuture; -use futures::FutureExt; -#[cfg(test)] -use mockall::{automock, predicate::str}; -use tokio::sync::broadcast; -use tokio::sync::broadcast::error::SendError; +use std::sync::Arc; use super::Event; -const CHANNEL_CAPACITY: usize = 32768; - -/// A trait for sending sending. -#[cfg_attr(test, automock)] -pub trait Sender: Sync + Send { - fn send_event(&self, event: Event) -> BoxFuture<'_, Option>>>; -} - -/// An event sender implementation using a broadcast channel. -#[derive(Clone)] -pub struct Broadcaster { - pub(crate) sender: broadcast::Sender, -} - -impl Sender for Broadcaster { - fn send_event(&self, event: Event) -> BoxFuture<'_, Option>>> { - async move { Some(self.sender.send(event)) }.boxed() - } -} - -impl Default for Broadcaster { - fn default() -> Self { - let (sender, _) = broadcast::channel(CHANNEL_CAPACITY); - Self { sender } - } -} - -impl Broadcaster { - #[must_use] - pub fn subscribe(&self) -> broadcast::Receiver { - self.sender.subscribe() - } -} +pub type Sender = Arc>>>; +pub type Broadcaster = torrust_tracker_events::broadcaster::Broadcaster; diff --git a/packages/udp-tracker-server/src/handlers/announce.rs b/packages/udp-tracker-server/src/handlers/announce.rs index 2f6b71a6a..2fbddb544 100644 --- a/packages/udp-tracker-server/src/handlers/announce.rs +++ b/packages/udp-tracker-server/src/handlers/announce.rs @@ -16,7 +16,7 @@ use tracing::{instrument, Level}; use zerocopy::network_endian::I32; use crate::error::Error; -use crate::event::{self, ConnectionContext, Event, UdpRequestKind}; +use crate::event::{ConnectionContext, Event, UdpRequestKind}; /// It handles the `Announce` request. /// @@ -30,7 +30,7 @@ pub async fn handle_announce( server_service_binding: ServiceBinding, request: &AnnounceRequest, core_config: &Arc, - opt_udp_server_stats_event_sender: &Arc>>, + opt_udp_server_stats_event_sender: &crate::event::sender::Sender, cookie_valid_range: Range, ) -> Result { tracing::Span::current() @@ -208,7 +208,7 @@ mod tests { use mockall::predicate::eq; use torrust_tracker_primitives::service_binding::{Protocol, ServiceBinding}; - use crate::event::{self, ConnectionContext, Event, UdpRequestKind}; + use crate::event::{ConnectionContext, Event, UdpRequestKind}; use crate::handlers::announce::tests::announce_request::AnnounceRequestBuilder; use crate::handlers::handle_announce; use crate::handlers::tests::{ @@ -434,7 +434,7 @@ mod tests { })) .times(1) .returning(|_| Box::pin(future::ready(Some(Ok(1))))); - let udp_server_stats_event_sender: Arc>> = + let udp_server_stats_event_sender: crate::event::sender::Sender = Arc::new(Some(Box::new(udp_server_stats_event_sender_mock))); let (core_tracker_services, core_udp_tracker_services, _server_udp_tracker_services) = @@ -540,7 +540,7 @@ mod tests { use torrust_tracker_configuration::Core; use torrust_tracker_primitives::service_binding::{Protocol, ServiceBinding}; - use crate::event::{self, ConnectionContext, Event, UdpRequestKind}; + use crate::event::{ConnectionContext, Event, UdpRequestKind}; use crate::handlers::announce::tests::announce_request::AnnounceRequestBuilder; use crate::handlers::handle_announce; use crate::handlers::tests::{ @@ -788,7 +788,7 @@ mod tests { })) .times(1) .returning(|_| Box::pin(future::ready(Some(Ok(1))))); - let udp_server_stats_event_sender: Arc>> = + let udp_server_stats_event_sender: crate::event::sender::Sender = Arc::new(Some(Box::new(udp_server_stats_event_sender_mock))); let (core_tracker_services, core_udp_tracker_services, _server_udp_tracker_services) = @@ -829,7 +829,7 @@ mod tests { use mockall::predicate::{self, eq}; use torrust_tracker_primitives::service_binding::{Protocol, ServiceBinding}; - use crate::event::{self, ConnectionContext, Event, UdpRequestKind}; + use crate::event::{ConnectionContext, Event, UdpRequestKind}; use crate::handlers::announce::tests::announce_request::AnnounceRequestBuilder; use crate::handlers::handle_announce; use crate::handlers::tests::{ @@ -900,7 +900,7 @@ mod tests { })) .times(1) .returning(|_| Box::pin(future::ready(Some(Ok(1))))); - let udp_server_stats_event_sender: Arc>> = + let udp_server_stats_event_sender: crate::event::sender::Sender = Arc::new(Some(Box::new(udp_server_stats_event_sender_mock))); let announce_handler = Arc::new(AnnounceHandler::new( diff --git a/packages/udp-tracker-server/src/handlers/connect.rs b/packages/udp-tracker-server/src/handlers/connect.rs index a950e2c69..9f00298eb 100644 --- a/packages/udp-tracker-server/src/handlers/connect.rs +++ b/packages/udp-tracker-server/src/handlers/connect.rs @@ -7,7 +7,7 @@ use bittorrent_udp_tracker_core::services::connect::ConnectService; use torrust_tracker_primitives::service_binding::ServiceBinding; use tracing::{instrument, Level}; -use crate::event::{self, ConnectionContext, Event, UdpRequestKind}; +use crate::event::{ConnectionContext, Event, UdpRequestKind}; /// It handles the `Connect` request. #[instrument(fields(transaction_id), skip(connect_service, opt_udp_server_stats_event_sender), ret(level = Level::TRACE))] @@ -16,7 +16,7 @@ pub async fn handle_connect( server_service_binding: ServiceBinding, request: &ConnectRequest, connect_service: &Arc, - opt_udp_server_stats_event_sender: &Arc>>, + opt_udp_server_stats_event_sender: &crate::event::sender::Sender, cookie_issue_time: f64, ) -> Response { tracing::Span::current().record("transaction_id", request.transaction_id.0.to_string()); @@ -65,7 +65,7 @@ mod tests { use mockall::predicate::eq; use torrust_tracker_primitives::service_binding::{Protocol, ServiceBinding}; - use crate::event::{self, ConnectionContext, Event, UdpRequestKind}; + use crate::event::{ConnectionContext, Event, UdpRequestKind}; use crate::handlers::handle_connect; use crate::handlers::tests::{ sample_ipv4_remote_addr, sample_ipv4_remote_addr_fingerprint, sample_ipv4_socket_address, sample_ipv6_remote_addr, @@ -222,7 +222,7 @@ mod tests { })) .times(1) .returning(|_| Box::pin(future::ready(Some(Ok(1))))); - let udp_server_stats_event_sender: Arc>> = + let udp_server_stats_event_sender: crate::event::sender::Sender = Arc::new(Some(Box::new(udp_server_stats_event_sender_mock))); let connect_service = Arc::new(ConnectService::new(udp_core_stats_event_sender)); @@ -264,7 +264,7 @@ mod tests { })) .times(1) .returning(|_| Box::pin(future::ready(Some(Ok(1))))); - let udp_server_stats_event_sender: Arc>> = + let udp_server_stats_event_sender: crate::event::sender::Sender = Arc::new(Some(Box::new(udp_server_stats_event_sender_mock))); let connect_service = Arc::new(ConnectService::new(udp_core_stats_event_sender)); diff --git a/packages/udp-tracker-server/src/handlers/error.rs b/packages/udp-tracker-server/src/handlers/error.rs index 9d9ee8b1d..04b8d073b 100644 --- a/packages/udp-tracker-server/src/handlers/error.rs +++ b/packages/udp-tracker-server/src/handlers/error.rs @@ -1,7 +1,6 @@ //! UDP tracker error handling. use std::net::SocketAddr; use std::ops::Range; -use std::sync::Arc; use aquatic_udp_protocol::{ErrorResponse, RequestParseError, Response, TransactionId}; use bittorrent_udp_tracker_core::connection_cookie::{check, gen_remote_fingerprint}; @@ -12,7 +11,7 @@ use uuid::Uuid; use zerocopy::network_endian::I32; use crate::error::Error; -use crate::event::{self, ConnectionContext, Event, UdpRequestKind}; +use crate::event::{ConnectionContext, Event, UdpRequestKind}; #[allow(clippy::too_many_arguments)] #[instrument(fields(transaction_id), skip(opt_udp_server_stats_event_sender), ret(level = Level::TRACE))] @@ -21,7 +20,7 @@ pub async fn handle_error( client_socket_addr: SocketAddr, server_service_binding: ServiceBinding, request_id: Uuid, - opt_udp_server_stats_event_sender: &Arc>>, + opt_udp_server_stats_event_sender: &crate::event::sender::Sender, cookie_valid_range: Range, e: &Error, transaction_id: Option, diff --git a/packages/udp-tracker-server/src/handlers/mod.rs b/packages/udp-tracker-server/src/handlers/mod.rs index 6a78d881e..18e85e0ce 100644 --- a/packages/udp-tracker-server/src/handlers/mod.rs +++ b/packages/udp-tracker-server/src/handlers/mod.rs @@ -247,7 +247,7 @@ pub(crate) mod tests { } pub(crate) struct ServerUdpTrackerServices { - pub udp_server_stats_event_sender: Arc>>, + pub udp_server_stats_event_sender: crate::event::sender::Sender, } fn default_testing_tracker_configuration() -> Configuration { @@ -435,8 +435,10 @@ pub(crate) mod tests { mock! { pub(crate) UdpServerStatsEventSender {} - impl server_event::sender::Sender for UdpServerStatsEventSender { - fn send_event(&self, event: server_event::Event) -> BoxFuture<'static,Option > > > ; + impl torrust_tracker_events::sender::Sender for UdpServerStatsEventSender { + type Event = server_event::Event; + + fn send_event(&self, event: server_event::Event) -> BoxFuture<'static,Option > > > ; } } } diff --git a/packages/udp-tracker-server/src/handlers/scrape.rs b/packages/udp-tracker-server/src/handlers/scrape.rs index 5774bc8e6..b7be10f29 100644 --- a/packages/udp-tracker-server/src/handlers/scrape.rs +++ b/packages/udp-tracker-server/src/handlers/scrape.rs @@ -14,7 +14,7 @@ use tracing::{instrument, Level}; use zerocopy::network_endian::I32; use crate::error::Error; -use crate::event::{self, ConnectionContext, Event, UdpRequestKind}; +use crate::event::{ConnectionContext, Event, UdpRequestKind}; /// It handles the `Scrape` request. /// @@ -27,7 +27,7 @@ pub async fn handle_scrape( client_socket_addr: SocketAddr, server_service_binding: ServiceBinding, request: &ScrapeRequest, - opt_udp_server_stats_event_sender: &Arc>>, + opt_udp_server_stats_event_sender: &crate::event::sender::Sender, cookie_valid_range: Range, ) -> Result { tracing::Span::current() @@ -363,7 +363,6 @@ mod tests { use torrust_tracker_primitives::service_binding::{Protocol, ServiceBinding}; use super::sample_scrape_request; - use crate::event; use crate::event::{ConnectionContext, Event, UdpRequestKind}; use crate::handlers::handle_scrape; use crate::handlers::tests::{ @@ -386,7 +385,7 @@ mod tests { })) .times(1) .returning(|_| Box::pin(future::ready(Some(Ok(1))))); - let udp_server_stats_event_sender: Arc>> = + let udp_server_stats_event_sender: crate::event::sender::Sender = Arc::new(Some(Box::new(udp_server_stats_event_sender_mock))); let (_core_tracker_services, core_udp_tracker_services, _server_udp_tracker_services) = @@ -414,7 +413,7 @@ mod tests { use torrust_tracker_primitives::service_binding::{Protocol, ServiceBinding}; use super::sample_scrape_request; - use crate::event::{self, ConnectionContext, Event, UdpRequestKind}; + use crate::event::{ConnectionContext, Event, UdpRequestKind}; use crate::handlers::handle_scrape; use crate::handlers::tests::{ initialize_core_tracker_services_for_default_tracker_configuration, sample_cookie_valid_range, @@ -436,7 +435,7 @@ mod tests { })) .times(1) .returning(|_| Box::pin(future::ready(Some(Ok(1))))); - let udp_server_stats_event_sender: Arc>> = + let udp_server_stats_event_sender: crate::event::sender::Sender = Arc::new(Some(Box::new(udp_server_stats_event_sender_mock))); let (_core_tracker_services, core_udp_tracker_services, _server_udp_tracker_services) = From 540520c12dd1619dcf67131af8aab648c5760789 Mon Sep 17 00:00:00 2001 From: Jose Celano Date: Mon, 28 Apr 2025 16:53:12 +0100 Subject: [PATCH 517/802] chore: remove unneded explicit pkg inclusion in workspace --- Cargo.toml | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/Cargo.toml b/Cargo.toml index 9b348bfdc..9243ed483 100644 --- a/Cargo.toml +++ b/Cargo.toml @@ -69,7 +69,7 @@ torrust-rest-tracker-api-client = { version = "3.0.0-develop", path = "packages/ torrust-tracker-test-helpers = { version = "3.0.0-develop", path = "packages/test-helpers" } [workspace] -members = ["console/tracker-client", "packages/events"] +members = ["console/tracker-client"] [profile.dev] debug = 1 From d4343c02e9e0b0f95536164827dfbc016368d243 Mon Sep 17 00:00:00 2001 From: Jose Celano Date: Mon, 28 Apr 2025 17:59:39 +0100 Subject: [PATCH 518/802] fix: [#1485] remove duplicate const --- packages/events/src/sender.rs | 3 --- 1 file changed, 3 deletions(-) diff --git a/packages/events/src/sender.rs b/packages/events/src/sender.rs index fe8c6575e..0d901cb82 100644 --- a/packages/events/src/sender.rs +++ b/packages/events/src/sender.rs @@ -3,9 +3,6 @@ use futures::future::BoxFuture; use mockall::{automock, predicate::str}; use tokio::sync::broadcast::error::SendError; -/// Target for tracing crate logs. -pub const EVENTS_TARGET: &str = "EVENTS"; - /// A trait for sending events. #[cfg_attr(test, automock(type Event=();))] pub trait Sender: Sync + Send { From e3703c10660c88ee3b2399a69964b626186e8b27 Mon Sep 17 00:00:00 2001 From: Jose Celano Date: Mon, 28 Apr 2025 18:47:52 +0100 Subject: [PATCH 519/802] feat: [#1485] add Receiver trait to events package --- packages/events/src/broadcaster.rs | 27 ++++++++++++------- packages/events/src/bus.rs | 8 +++--- packages/events/src/lib.rs | 1 + packages/events/src/receiver.rs | 12 +++++++++ packages/http-tracker-core/src/event/mod.rs | 1 + .../http-tracker-core/src/event/receiver.rs | 3 +++ .../src/statistics/event/listener.rs | 8 +++--- packages/udp-tracker-core/src/event/mod.rs | 1 + .../udp-tracker-core/src/event/receiver.rs | 3 +++ .../src/statistics/event/listener.rs | 8 +++--- packages/udp-tracker-server/src/event/mod.rs | 1 + .../udp-tracker-server/src/event/receiver.rs | 3 +++ .../src/statistics/event/listener.rs | 8 +++--- 13 files changed, 58 insertions(+), 26 deletions(-) create mode 100644 packages/events/src/receiver.rs create mode 100644 packages/http-tracker-core/src/event/receiver.rs create mode 100644 packages/udp-tracker-core/src/event/receiver.rs create mode 100644 packages/udp-tracker-server/src/event/receiver.rs diff --git a/packages/events/src/broadcaster.rs b/packages/events/src/broadcaster.rs index 8f947cc67..137e9680c 100644 --- a/packages/events/src/broadcaster.rs +++ b/packages/events/src/broadcaster.rs @@ -1,8 +1,9 @@ use futures::future::BoxFuture; use futures::FutureExt; -use tokio::sync::broadcast::error::SendError; +use tokio::sync::broadcast::error::{RecvError, SendError}; use tokio::sync::broadcast::{self}; +use crate::receiver::Receiver; use crate::sender::Sender; const CHANNEL_CAPACITY: usize = 32768; @@ -13,14 +14,6 @@ pub struct Broadcaster { pub(crate) sender: broadcast::Sender, } -impl Sender for Broadcaster { - type Event = E; - - fn send_event(&self, event: E) -> BoxFuture<'_, Option>>> { - async move { Some(self.sender.send(event)) }.boxed() - } -} - impl Default for Broadcaster { fn default() -> Self { let (sender, _) = broadcast::channel(CHANNEL_CAPACITY); @@ -34,3 +27,19 @@ impl Broadcaster { self.sender.subscribe() } } + +impl Sender for Broadcaster { + type Event = E; + + fn send_event(&self, event: E) -> BoxFuture<'_, Option>>> { + async move { Some(self.sender.send(event)) }.boxed() + } +} + +impl Receiver for broadcast::Receiver { + type Event = E; + + fn recv(&mut self) -> BoxFuture<'_, Result> { + async move { self.recv().await }.boxed() + } +} diff --git a/packages/events/src/bus.rs b/packages/events/src/bus.rs index d58c8f76d..b714741b2 100644 --- a/packages/events/src/bus.rs +++ b/packages/events/src/bus.rs @@ -1,9 +1,7 @@ use std::sync::Arc; -use tokio::sync::broadcast::Receiver; - use crate::broadcaster::Broadcaster; -use crate::sender; +use crate::{receiver, sender}; pub struct EventBus { pub enable_sender: bool, @@ -38,7 +36,7 @@ impl EventBus { } #[must_use] - pub fn receiver(&self) -> Receiver { - self.broadcaster.subscribe() + pub fn receiver(&self) -> Box> { + Box::new(self.broadcaster.subscribe()) } } diff --git a/packages/events/src/lib.rs b/packages/events/src/lib.rs index 3b02d5d49..d933b304c 100644 --- a/packages/events/src/lib.rs +++ b/packages/events/src/lib.rs @@ -1,5 +1,6 @@ pub mod broadcaster; pub mod bus; +pub mod receiver; pub mod sender; /// Target for tracing crate logs. diff --git a/packages/events/src/receiver.rs b/packages/events/src/receiver.rs new file mode 100644 index 000000000..bdbd91616 --- /dev/null +++ b/packages/events/src/receiver.rs @@ -0,0 +1,12 @@ +use futures::future::BoxFuture; +#[cfg(test)] +use mockall::{automock, predicate::str}; +use tokio::sync::broadcast::error::RecvError; + +/// A trait for receiving events. +#[cfg_attr(test, automock(type Event=();))] +pub trait Receiver: Sync + Send { + type Event: Send + Clone; + + fn recv(&mut self) -> BoxFuture<'_, Result>; +} diff --git a/packages/http-tracker-core/src/event/mod.rs b/packages/http-tracker-core/src/event/mod.rs index 5b1c64dca..ad62b0fdc 100644 --- a/packages/http-tracker-core/src/event/mod.rs +++ b/packages/http-tracker-core/src/event/mod.rs @@ -1,4 +1,5 @@ pub mod bus; +pub mod receiver; pub mod sender; use std::net::{IpAddr, SocketAddr}; diff --git a/packages/http-tracker-core/src/event/receiver.rs b/packages/http-tracker-core/src/event/receiver.rs new file mode 100644 index 000000000..8d8e94b64 --- /dev/null +++ b/packages/http-tracker-core/src/event/receiver.rs @@ -0,0 +1,3 @@ +use super::Event; + +pub type Receiver = Box>; diff --git a/packages/http-tracker-core/src/statistics/event/listener.rs b/packages/http-tracker-core/src/statistics/event/listener.rs index 98711f2f5..333f4e588 100644 --- a/packages/http-tracker-core/src/statistics/event/listener.rs +++ b/packages/http-tracker-core/src/statistics/event/listener.rs @@ -1,16 +1,16 @@ use std::sync::Arc; -use tokio::sync::broadcast::{self, Receiver}; +use tokio::sync::broadcast::{self}; use tokio::task::JoinHandle; use torrust_tracker_clock::clock::Time; use super::handler::handle_event; -use crate::event::Event; +use crate::event::receiver::Receiver; use crate::statistics::repository::Repository; use crate::{CurrentClock, HTTP_TRACKER_LOG_TARGET}; #[must_use] -pub fn run_event_listener(receiver: Receiver, repository: &Arc) -> JoinHandle<()> { +pub fn run_event_listener(receiver: Receiver, repository: &Arc) -> JoinHandle<()> { let stats_repository = repository.clone(); tracing::info!(target: HTTP_TRACKER_LOG_TARGET, "Starting HTTP tracker core event listener"); @@ -22,7 +22,7 @@ pub fn run_event_listener(receiver: Receiver, repository: &Arc, stats_repository: Arc) { +async fn dispatch_events(mut receiver: Receiver, stats_repository: Arc) { loop { match receiver.recv().await { Ok(event) => handle_event(event, &stats_repository, CurrentClock::now()).await, diff --git a/packages/udp-tracker-core/src/event/mod.rs b/packages/udp-tracker-core/src/event/mod.rs index babc05fcc..9bdbd7449 100644 --- a/packages/udp-tracker-core/src/event/mod.rs +++ b/packages/udp-tracker-core/src/event/mod.rs @@ -1,4 +1,5 @@ pub mod bus; +pub mod receiver; pub mod sender; use std::net::SocketAddr; diff --git a/packages/udp-tracker-core/src/event/receiver.rs b/packages/udp-tracker-core/src/event/receiver.rs new file mode 100644 index 000000000..8d8e94b64 --- /dev/null +++ b/packages/udp-tracker-core/src/event/receiver.rs @@ -0,0 +1,3 @@ +use super::Event; + +pub type Receiver = Box>; diff --git a/packages/udp-tracker-core/src/statistics/event/listener.rs b/packages/udp-tracker-core/src/statistics/event/listener.rs index 5aa510d04..a8a491e37 100644 --- a/packages/udp-tracker-core/src/statistics/event/listener.rs +++ b/packages/udp-tracker-core/src/statistics/event/listener.rs @@ -1,16 +1,16 @@ use std::sync::Arc; -use tokio::sync::broadcast::{self, Receiver}; +use tokio::sync::broadcast::{self}; use tokio::task::JoinHandle; use torrust_tracker_clock::clock::Time; use super::handler::handle_event; -use crate::event::Event; +use crate::event::receiver::Receiver; use crate::statistics::repository::Repository; use crate::{CurrentClock, UDP_TRACKER_LOG_TARGET}; #[must_use] -pub fn run_event_listener(receiver: Receiver, repository: &Arc) -> JoinHandle<()> { +pub fn run_event_listener(receiver: Receiver, repository: &Arc) -> JoinHandle<()> { let stats_repository = repository.clone(); tracing::info!(target: UDP_TRACKER_LOG_TARGET, "Starting UDP tracker core event listener"); @@ -22,7 +22,7 @@ pub fn run_event_listener(receiver: Receiver, repository: &Arc, stats_repository: Arc) { +async fn dispatch_events(mut receiver: Receiver, stats_repository: Arc) { loop { match receiver.recv().await { Ok(event) => handle_event(event, &stats_repository, CurrentClock::now()).await, diff --git a/packages/udp-tracker-server/src/event/mod.rs b/packages/udp-tracker-server/src/event/mod.rs index a2140a11c..9ebbc18ef 100644 --- a/packages/udp-tracker-server/src/event/mod.rs +++ b/packages/udp-tracker-server/src/event/mod.rs @@ -1,4 +1,5 @@ pub mod bus; +pub mod receiver; pub mod sender; use std::fmt; diff --git a/packages/udp-tracker-server/src/event/receiver.rs b/packages/udp-tracker-server/src/event/receiver.rs new file mode 100644 index 000000000..8d8e94b64 --- /dev/null +++ b/packages/udp-tracker-server/src/event/receiver.rs @@ -0,0 +1,3 @@ +use super::Event; + +pub type Receiver = Box>; diff --git a/packages/udp-tracker-server/src/statistics/event/listener.rs b/packages/udp-tracker-server/src/statistics/event/listener.rs index 8e8cc5195..386a4fc33 100644 --- a/packages/udp-tracker-server/src/statistics/event/listener.rs +++ b/packages/udp-tracker-server/src/statistics/event/listener.rs @@ -1,17 +1,17 @@ use std::sync::Arc; use bittorrent_udp_tracker_core::UDP_TRACKER_LOG_TARGET; -use tokio::sync::broadcast::{self, Receiver}; +use tokio::sync::broadcast::{self}; use tokio::task::JoinHandle; use torrust_tracker_clock::clock::Time; use super::handler::handle_event; -use crate::event::Event; +use crate::event::receiver::Receiver; use crate::statistics::repository::Repository; use crate::CurrentClock; #[must_use] -pub fn run_event_listener(receiver: Receiver, repository: &Arc) -> JoinHandle<()> { +pub fn run_event_listener(receiver: Receiver, repository: &Arc) -> JoinHandle<()> { let stats_repository = repository.clone(); tracing::info!(target: UDP_TRACKER_LOG_TARGET, "Starting UDP tracker server event listener"); @@ -23,7 +23,7 @@ pub fn run_event_listener(receiver: Receiver, repository: &Arc, stats_repository: Arc) { +async fn dispatch_events(mut receiver: Receiver, stats_repository: Arc) { loop { match receiver.recv().await { Ok(event) => handle_event(event, &stats_repository, CurrentClock::now()).await, From 3057f486d3a58ed5e4150790433ac5568ec3d55f Mon Sep 17 00:00:00 2001 From: Jose Celano Date: Tue, 29 Apr 2025 08:07:31 +0100 Subject: [PATCH 520/802] refactor: [#1485] decouple events traits from tokio broadcast channel implementation --- packages/events/src/broadcaster.rs | 24 ++++++++++++---- packages/events/src/receiver.rs | 28 ++++++++++++++++++- packages/events/src/sender.rs | 15 +++++++++- .../http-tracker-core/benches/helpers/util.rs | 2 +- .../src/services/announce.rs | 2 +- .../http-tracker-core/src/services/scrape.rs | 2 +- .../src/statistics/event/listener.rs | 6 ++-- .../udp-tracker-core/benches/helpers/utils.rs | 2 +- packages/udp-tracker-core/src/services/mod.rs | 2 +- .../src/statistics/event/listener.rs | 6 ++-- .../udp-tracker-server/src/handlers/mod.rs | 2 +- .../src/statistics/event/listener.rs | 6 ++-- 12 files changed, 75 insertions(+), 22 deletions(-) diff --git a/packages/events/src/broadcaster.rs b/packages/events/src/broadcaster.rs index 137e9680c..6373a14d6 100644 --- a/packages/events/src/broadcaster.rs +++ b/packages/events/src/broadcaster.rs @@ -1,10 +1,9 @@ use futures::future::BoxFuture; use futures::FutureExt; -use tokio::sync::broadcast::error::{RecvError, SendError}; use tokio::sync::broadcast::{self}; -use crate::receiver::Receiver; -use crate::sender::Sender; +use crate::receiver::{Receiver, RecvError}; +use crate::sender::{SendError, Sender}; const CHANNEL_CAPACITY: usize = 32768; @@ -32,7 +31,7 @@ impl Sender for Broadcaster { type Event = E; fn send_event(&self, event: E) -> BoxFuture<'_, Option>>> { - async move { Some(self.sender.send(event)) }.boxed() + async move { Some(self.sender.send(event).map_err(std::convert::Into::into)) }.boxed() } } @@ -40,6 +39,21 @@ impl Receiver for broadcast::Receiver { type Event = E; fn recv(&mut self) -> BoxFuture<'_, Result> { - async move { self.recv().await }.boxed() + async move { self.recv().await.map_err(std::convert::Into::into) }.boxed() + } +} + +impl From> for SendError { + fn from(err: broadcast::error::SendError) -> Self { + SendError(err.0) + } +} + +impl From for RecvError { + fn from(err: broadcast::error::RecvError) -> Self { + match err { + broadcast::error::RecvError::Lagged(amt) => RecvError::Lagged(amt), + broadcast::error::RecvError::Closed => RecvError::Closed, + } } } diff --git a/packages/events/src/receiver.rs b/packages/events/src/receiver.rs index bdbd91616..15adb816a 100644 --- a/packages/events/src/receiver.rs +++ b/packages/events/src/receiver.rs @@ -1,7 +1,8 @@ +use std::fmt; + use futures::future::BoxFuture; #[cfg(test)] use mockall::{automock, predicate::str}; -use tokio::sync::broadcast::error::RecvError; /// A trait for receiving events. #[cfg_attr(test, automock(type Event=();))] @@ -10,3 +11,28 @@ pub trait Receiver: Sync + Send { fn recv(&mut self) -> BoxFuture<'_, Result>; } + +/// An error returned from the [`recv`] function on a [`Receiver`]. +#[derive(Debug, PartialEq, Eq, Clone)] +pub enum RecvError { + /// There are no more active senders implying no further messages will ever + /// be sent. + Closed, + + /// The receiver lagged too far behind. Attempting to receive again will + /// return the oldest message still retained by the channel. + /// + /// Includes the number of skipped messages. + Lagged(u64), +} + +impl fmt::Display for RecvError { + fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result { + match self { + RecvError::Closed => write!(f, "channel closed"), + RecvError::Lagged(amt) => write!(f, "channel lagged by {amt}"), + } + } +} + +impl std::error::Error for RecvError {} diff --git a/packages/events/src/sender.rs b/packages/events/src/sender.rs index 0d901cb82..e9205a7dd 100644 --- a/packages/events/src/sender.rs +++ b/packages/events/src/sender.rs @@ -1,7 +1,8 @@ +use std::fmt; + use futures::future::BoxFuture; #[cfg(test)] use mockall::{automock, predicate::str}; -use tokio::sync::broadcast::error::SendError; /// A trait for sending events. #[cfg_attr(test, automock(type Event=();))] @@ -10,3 +11,15 @@ pub trait Sender: Sync + Send { fn send_event(&self, event: Self::Event) -> BoxFuture<'_, Option>>>; } + +/// Error returned by the [`send_event`] function on a [`Sender`]. +#[derive(Debug)] +pub struct SendError(pub Event); + +impl fmt::Display for SendError { + fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result { + write!(f, "channel closed") + } +} + +impl std::error::Error for SendError {} diff --git a/packages/http-tracker-core/benches/helpers/util.rs b/packages/http-tracker-core/benches/helpers/util.rs index 26c59a9d5..b50c9538b 100644 --- a/packages/http-tracker-core/benches/helpers/util.rs +++ b/packages/http-tracker-core/benches/helpers/util.rs @@ -20,8 +20,8 @@ use bittorrent_tracker_core::whitelist::authorization::WhitelistAuthorization; use bittorrent_tracker_core::whitelist::repository::in_memory::InMemoryWhitelist; use futures::future::BoxFuture; use mockall::mock; -use tokio::sync::broadcast::error::SendError; use torrust_tracker_configuration::{Configuration, Core}; +use torrust_tracker_events::sender::SendError; use torrust_tracker_primitives::peer::Peer; use torrust_tracker_primitives::{peer, DurationSinceUnixEpoch}; use torrust_tracker_test_helpers::configuration; diff --git a/packages/http-tracker-core/src/services/announce.rs b/packages/http-tracker-core/src/services/announce.rs index feecb03b1..a9a75a786 100644 --- a/packages/http-tracker-core/src/services/announce.rs +++ b/packages/http-tracker-core/src/services/announce.rs @@ -300,7 +300,7 @@ mod tests { use futures::future::BoxFuture; use mockall::mock; - use tokio::sync::broadcast::error::SendError; + use torrust_tracker_events::sender::SendError; use crate::event::bus::EventBus; use crate::event::sender::Broadcaster; diff --git a/packages/http-tracker-core/src/services/scrape.rs b/packages/http-tracker-core/src/services/scrape.rs index 6ffc4a5f6..2322e6850 100644 --- a/packages/http-tracker-core/src/services/scrape.rs +++ b/packages/http-tracker-core/src/services/scrape.rs @@ -182,8 +182,8 @@ mod tests { use bittorrent_tracker_core::whitelist::repository::in_memory::InMemoryWhitelist; use futures::future::BoxFuture; use mockall::mock; - use tokio::sync::broadcast::error::SendError; use torrust_tracker_configuration::Configuration; + use torrust_tracker_events::sender::SendError; use torrust_tracker_primitives::{peer, DurationSinceUnixEpoch}; use crate::event::Event; diff --git a/packages/http-tracker-core/src/statistics/event/listener.rs b/packages/http-tracker-core/src/statistics/event/listener.rs index 333f4e588..37710fb2d 100644 --- a/packages/http-tracker-core/src/statistics/event/listener.rs +++ b/packages/http-tracker-core/src/statistics/event/listener.rs @@ -1,8 +1,8 @@ use std::sync::Arc; -use tokio::sync::broadcast::{self}; use tokio::task::JoinHandle; use torrust_tracker_clock::clock::Time; +use torrust_tracker_events::receiver::RecvError; use super::handler::handle_event; use crate::event::receiver::Receiver; @@ -28,11 +28,11 @@ async fn dispatch_events(mut receiver: Receiver, stats_repository: Arc handle_event(event, &stats_repository, CurrentClock::now()).await, Err(e) => { match e { - broadcast::error::RecvError::Closed => { + RecvError::Closed => { tracing::info!(target: HTTP_TRACKER_LOG_TARGET, "Http core statistics receiver closed."); break; } - broadcast::error::RecvError::Lagged(n) => { + RecvError::Lagged(n) => { // From now on, metrics will be imprecise tracing::warn!(target: HTTP_TRACKER_LOG_TARGET, "Http core statistics receiver lagged by {} events.", n); } diff --git a/packages/udp-tracker-core/benches/helpers/utils.rs b/packages/udp-tracker-core/benches/helpers/utils.rs index e560e36fe..06fa8e6c1 100644 --- a/packages/udp-tracker-core/benches/helpers/utils.rs +++ b/packages/udp-tracker-core/benches/helpers/utils.rs @@ -3,7 +3,7 @@ use std::net::{IpAddr, Ipv4Addr, SocketAddr}; use bittorrent_udp_tracker_core::event::Event; use futures::future::BoxFuture; use mockall::mock; -use tokio::sync::broadcast::error::SendError; +use torrust_tracker_events::sender::SendError; pub(crate) fn sample_ipv4_remote_addr() -> SocketAddr { sample_ipv4_socket_address() diff --git a/packages/udp-tracker-core/src/services/mod.rs b/packages/udp-tracker-core/src/services/mod.rs index 8cbae4584..b471edbd1 100644 --- a/packages/udp-tracker-core/src/services/mod.rs +++ b/packages/udp-tracker-core/src/services/mod.rs @@ -10,7 +10,7 @@ pub(crate) mod tests { use futures::future::BoxFuture; use mockall::mock; - use tokio::sync::broadcast::error::SendError; + use torrust_tracker_events::sender::SendError; use crate::connection_cookie::gen_remote_fingerprint; use crate::event::Event; diff --git a/packages/udp-tracker-core/src/statistics/event/listener.rs b/packages/udp-tracker-core/src/statistics/event/listener.rs index a8a491e37..0344fc668 100644 --- a/packages/udp-tracker-core/src/statistics/event/listener.rs +++ b/packages/udp-tracker-core/src/statistics/event/listener.rs @@ -1,8 +1,8 @@ use std::sync::Arc; -use tokio::sync::broadcast::{self}; use tokio::task::JoinHandle; use torrust_tracker_clock::clock::Time; +use torrust_tracker_events::receiver::RecvError; use super::handler::handle_event; use crate::event::receiver::Receiver; @@ -28,11 +28,11 @@ async fn dispatch_events(mut receiver: Receiver, stats_repository: Arc handle_event(event, &stats_repository, CurrentClock::now()).await, Err(e) => { match e { - broadcast::error::RecvError::Closed => { + RecvError::Closed => { tracing::info!(target: UDP_TRACKER_LOG_TARGET, "Udp core statistics receiver closed."); break; } - broadcast::error::RecvError::Lagged(n) => { + RecvError::Lagged(n) => { // From now on, metrics will be imprecise tracing::warn!(target: UDP_TRACKER_LOG_TARGET, "Udp core statistics receiver lagged by {} events.", n); } diff --git a/packages/udp-tracker-server/src/handlers/mod.rs b/packages/udp-tracker-server/src/handlers/mod.rs index 18e85e0ce..72ef6c536 100644 --- a/packages/udp-tracker-server/src/handlers/mod.rs +++ b/packages/udp-tracker-server/src/handlers/mod.rs @@ -225,9 +225,9 @@ pub(crate) mod tests { use bittorrent_udp_tracker_core::{self, event as core_event}; use futures::future::BoxFuture; use mockall::mock; - use tokio::sync::broadcast::error::SendError; use torrust_tracker_clock::clock::Time; use torrust_tracker_configuration::{Configuration, Core}; + use torrust_tracker_events::sender::SendError; use torrust_tracker_primitives::{peer, DurationSinceUnixEpoch}; use torrust_tracker_test_helpers::configuration; diff --git a/packages/udp-tracker-server/src/statistics/event/listener.rs b/packages/udp-tracker-server/src/statistics/event/listener.rs index 386a4fc33..0167b34f6 100644 --- a/packages/udp-tracker-server/src/statistics/event/listener.rs +++ b/packages/udp-tracker-server/src/statistics/event/listener.rs @@ -1,9 +1,9 @@ use std::sync::Arc; use bittorrent_udp_tracker_core::UDP_TRACKER_LOG_TARGET; -use tokio::sync::broadcast::{self}; use tokio::task::JoinHandle; use torrust_tracker_clock::clock::Time; +use torrust_tracker_events::receiver::RecvError; use super::handler::handle_event; use crate::event::receiver::Receiver; @@ -29,11 +29,11 @@ async fn dispatch_events(mut receiver: Receiver, stats_repository: Arc handle_event(event, &stats_repository, CurrentClock::now()).await, Err(e) => { match e { - broadcast::error::RecvError::Closed => { + RecvError::Closed => { tracing::info!(target: UDP_TRACKER_LOG_TARGET, "Udp server statistics receiver closed."); break; } - broadcast::error::RecvError::Lagged(n) => { + RecvError::Lagged(n) => { // From now on, metrics will be imprecise tracing::warn!(target: UDP_TRACKER_LOG_TARGET, "Udp server statistics receiver lagged by {} events.", n); } From f546bc19b29dfa3a6a0cef990371f1fc0faef4be Mon Sep 17 00:00:00 2001 From: Jose Celano Date: Tue, 29 Apr 2025 08:15:39 +0100 Subject: [PATCH 521/802] refactor: [#1485] normalize event type parameter name in events pkg --- packages/events/src/broadcaster.rs | 24 ++++++++++++------------ packages/events/src/bus.rs | 16 ++++++++-------- packages/events/src/sender.rs | 2 +- 3 files changed, 21 insertions(+), 21 deletions(-) diff --git a/packages/events/src/broadcaster.rs b/packages/events/src/broadcaster.rs index 6373a14d6..8be6b0acc 100644 --- a/packages/events/src/broadcaster.rs +++ b/packages/events/src/broadcaster.rs @@ -9,42 +9,42 @@ const CHANNEL_CAPACITY: usize = 32768; /// An event sender implementation using a broadcast channel. #[derive(Clone)] -pub struct Broadcaster { - pub(crate) sender: broadcast::Sender, +pub struct Broadcaster { + pub(crate) sender: broadcast::Sender, } -impl Default for Broadcaster { +impl Default for Broadcaster { fn default() -> Self { let (sender, _) = broadcast::channel(CHANNEL_CAPACITY); Self { sender } } } -impl Broadcaster { +impl Broadcaster { #[must_use] - pub fn subscribe(&self) -> broadcast::Receiver { + pub fn subscribe(&self) -> broadcast::Receiver { self.sender.subscribe() } } -impl Sender for Broadcaster { - type Event = E; +impl Sender for Broadcaster { + type Event = Event; - fn send_event(&self, event: E) -> BoxFuture<'_, Option>>> { + fn send_event(&self, event: Event) -> BoxFuture<'_, Option>>> { async move { Some(self.sender.send(event).map_err(std::convert::Into::into)) }.boxed() } } -impl Receiver for broadcast::Receiver { - type Event = E; +impl Receiver for broadcast::Receiver { + type Event = Event; fn recv(&mut self) -> BoxFuture<'_, Result> { async move { self.recv().await.map_err(std::convert::Into::into) }.boxed() } } -impl From> for SendError { - fn from(err: broadcast::error::SendError) -> Self { +impl From> for SendError { + fn from(err: broadcast::error::SendError) -> Self { SendError(err.0) } } diff --git a/packages/events/src/bus.rs b/packages/events/src/bus.rs index b714741b2..7e4d3a859 100644 --- a/packages/events/src/bus.rs +++ b/packages/events/src/bus.rs @@ -3,23 +3,23 @@ use std::sync::Arc; use crate::broadcaster::Broadcaster; use crate::{receiver, sender}; -pub struct EventBus { +pub struct EventBus { pub enable_sender: bool, - pub broadcaster: Broadcaster, + pub broadcaster: Broadcaster, } -impl Default for EventBus { +impl Default for EventBus { fn default() -> Self { let enable_sender = true; - let broadcaster = Broadcaster::::default(); + let broadcaster = Broadcaster::::default(); Self::new(enable_sender, broadcaster) } } -impl EventBus { +impl EventBus { #[must_use] - pub fn new(enable_sender: bool, broadcaster: Broadcaster) -> Self { + pub fn new(enable_sender: bool, broadcaster: Broadcaster) -> Self { Self { enable_sender, broadcaster, @@ -27,7 +27,7 @@ impl EventBus { } #[must_use] - pub fn sender(&self) -> Arc>>> { + pub fn sender(&self) -> Arc>>> { if self.enable_sender { Arc::new(Some(Box::new(self.broadcaster.clone()))) } else { @@ -36,7 +36,7 @@ impl EventBus { } #[must_use] - pub fn receiver(&self) -> Box> { + pub fn receiver(&self) -> Box> { Box::new(self.broadcaster.subscribe()) } } diff --git a/packages/events/src/sender.rs b/packages/events/src/sender.rs index e9205a7dd..f5b715524 100644 --- a/packages/events/src/sender.rs +++ b/packages/events/src/sender.rs @@ -22,4 +22,4 @@ impl fmt::Display for SendError { } } -impl std::error::Error for SendError {} +impl std::error::Error for SendError {} From c2df95f24b4119c2a92771a720841ded632ea69a Mon Sep 17 00:00:00 2001 From: Jose Celano Date: Tue, 29 Apr 2025 08:20:29 +0100 Subject: [PATCH 522/802] refactor: [#1485] rename trait fn send_event to send --- packages/events/src/broadcaster.rs | 2 +- packages/events/src/sender.rs | 4 ++-- packages/http-tracker-core/benches/helpers/util.rs | 2 +- packages/http-tracker-core/src/services/announce.rs | 10 +++++----- packages/http-tracker-core/src/services/scrape.rs | 12 ++++++------ packages/udp-tracker-core/benches/helpers/utils.rs | 2 +- packages/udp-tracker-core/src/services/announce.rs | 2 +- packages/udp-tracker-core/src/services/connect.rs | 6 +++--- packages/udp-tracker-core/src/services/mod.rs | 2 +- packages/udp-tracker-core/src/services/scrape.rs | 2 +- packages/udp-tracker-server/src/handlers/announce.rs | 10 +++++----- packages/udp-tracker-server/src/handlers/connect.rs | 10 +++++----- packages/udp-tracker-server/src/handlers/error.rs | 2 +- packages/udp-tracker-server/src/handlers/mod.rs | 4 ++-- packages/udp-tracker-server/src/handlers/scrape.rs | 6 +++--- packages/udp-tracker-server/src/server/launcher.rs | 6 +++--- packages/udp-tracker-server/src/server/processor.rs | 2 +- 17 files changed, 42 insertions(+), 42 deletions(-) diff --git a/packages/events/src/broadcaster.rs b/packages/events/src/broadcaster.rs index 8be6b0acc..caf0d3c85 100644 --- a/packages/events/src/broadcaster.rs +++ b/packages/events/src/broadcaster.rs @@ -30,7 +30,7 @@ impl Broadcaster { impl Sender for Broadcaster { type Event = Event; - fn send_event(&self, event: Event) -> BoxFuture<'_, Option>>> { + fn send(&self, event: Event) -> BoxFuture<'_, Option>>> { async move { Some(self.sender.send(event).map_err(std::convert::Into::into)) }.boxed() } } diff --git a/packages/events/src/sender.rs b/packages/events/src/sender.rs index f5b715524..b979fa481 100644 --- a/packages/events/src/sender.rs +++ b/packages/events/src/sender.rs @@ -9,10 +9,10 @@ use mockall::{automock, predicate::str}; pub trait Sender: Sync + Send { type Event: Send + Clone; - fn send_event(&self, event: Self::Event) -> BoxFuture<'_, Option>>>; + fn send(&self, event: Self::Event) -> BoxFuture<'_, Option>>>; } -/// Error returned by the [`send_event`] function on a [`Sender`]. +/// Error returned by the [`send`] function on a [`Sender`]. #[derive(Debug)] pub struct SendError(pub Event); diff --git a/packages/http-tracker-core/benches/helpers/util.rs b/packages/http-tracker-core/benches/helpers/util.rs index b50c9538b..7ee91a2c4 100644 --- a/packages/http-tracker-core/benches/helpers/util.rs +++ b/packages/http-tracker-core/benches/helpers/util.rs @@ -127,6 +127,6 @@ mock! { impl torrust_tracker_events::sender::Sender for HttpStatsEventSender { type Event = Event; - fn send_event(&self, event: Event) -> BoxFuture<'static,Option > > > ; + fn send(&self, event: Event) -> BoxFuture<'static,Option > > > ; } } diff --git a/packages/http-tracker-core/src/services/announce.rs b/packages/http-tracker-core/src/services/announce.rs index a9a75a786..22e30e650 100644 --- a/packages/http-tracker-core/src/services/announce.rs +++ b/packages/http-tracker-core/src/services/announce.rs @@ -146,7 +146,7 @@ impl AnnounceService { tracing::debug!("Sending TcpAnnounce event: {:?}", event); - http_stats_event_sender.send_event(event).await; + http_stats_event_sender.send(event).await; } } } @@ -314,7 +314,7 @@ mod tests { impl torrust_tracker_events::sender::Sender for HttpStatsEventSender { type Event = Event; - fn send_event(&self, event: Event) -> BoxFuture<'static,Option > > > ; + fn send(&self, event: Event) -> BoxFuture<'static,Option > > > ; } } @@ -390,7 +390,7 @@ mod tests { let mut http_stats_event_sender_mock = MockHttpStatsEventSender::new(); http_stats_event_sender_mock - .expect_send_event() + .expect_send() .with(predicate::function(move |event| { let mut announced_peer = peer_copy; announced_peer.peer_addr = SocketAddr::new(IpAddr::V4(Ipv4Addr::new(126, 0, 0, 1)), 8080); @@ -463,7 +463,7 @@ mod tests { let mut http_stats_event_sender_mock = MockHttpStatsEventSender::new(); http_stats_event_sender_mock - .expect_send_event() + .expect_send() .with(predicate::function(move |event| { let mut announced_peer = peer_copy; announced_peer.peer_addr = SocketAddr::new(IpAddr::V4(Ipv4Addr::new(127, 0, 0, 1)), 8080); @@ -521,7 +521,7 @@ mod tests { let mut http_stats_event_sender_mock = MockHttpStatsEventSender::new(); http_stats_event_sender_mock - .expect_send_event() + .expect_send() .with(predicate::function(move |event| { let expected_event = Event::TcpAnnounce { connection: ConnectionContext::new( diff --git a/packages/http-tracker-core/src/services/scrape.rs b/packages/http-tracker-core/src/services/scrape.rs index 2322e6850..5b58bff22 100644 --- a/packages/http-tracker-core/src/services/scrape.rs +++ b/packages/http-tracker-core/src/services/scrape.rs @@ -108,7 +108,7 @@ impl ScrapeService { tracing::debug!("Sending TcpScrape event: {:?}", event); - http_stats_event_sender.send_event(event).await; + http_stats_event_sender.send(event).await; } } } @@ -241,7 +241,7 @@ mod tests { impl torrust_tracker_events::sender::Sender for HttpStatsEventSender { type Event = Event; - fn send_event(&self, event: Event) -> BoxFuture<'static,Option > > > ; + fn send(&self, event: Event) -> BoxFuture<'static,Option > > > ; } } @@ -337,7 +337,7 @@ mod tests { let mut http_stats_event_sender_mock = MockHttpStatsEventSender::new(); http_stats_event_sender_mock - .expect_send_event() + .expect_send() .with(eq(Event::TcpScrape { connection: ConnectionContext::new( RemoteClientAddr::new( @@ -390,7 +390,7 @@ mod tests { let mut http_stats_event_sender_mock = MockHttpStatsEventSender::new(); http_stats_event_sender_mock - .expect_send_event() + .expect_send() .with(eq(Event::TcpScrape { connection: ConnectionContext::new( RemoteClientAddr::new( @@ -521,7 +521,7 @@ mod tests { let mut http_stats_event_sender_mock = MockHttpStatsEventSender::new(); http_stats_event_sender_mock - .expect_send_event() + .expect_send() .with(eq(Event::TcpScrape { connection: ConnectionContext::new( RemoteClientAddr::new( @@ -574,7 +574,7 @@ mod tests { let mut http_stats_event_sender_mock = MockHttpStatsEventSender::new(); http_stats_event_sender_mock - .expect_send_event() + .expect_send() .with(eq(Event::TcpScrape { connection: ConnectionContext::new( RemoteClientAddr::new( diff --git a/packages/udp-tracker-core/benches/helpers/utils.rs b/packages/udp-tracker-core/benches/helpers/utils.rs index 06fa8e6c1..f04805001 100644 --- a/packages/udp-tracker-core/benches/helpers/utils.rs +++ b/packages/udp-tracker-core/benches/helpers/utils.rs @@ -22,6 +22,6 @@ mock! { impl torrust_tracker_events::sender::Sender for UdpCoreStatsEventSender { type Event = Event; - fn send_event(&self, event: Event) -> BoxFuture<'static,Option > > > ; + fn send(&self, event: Event) -> BoxFuture<'static,Option > > > ; } } diff --git a/packages/udp-tracker-core/src/services/announce.rs b/packages/udp-tracker-core/src/services/announce.rs index 481c3d7ca..499da2945 100644 --- a/packages/udp-tracker-core/src/services/announce.rs +++ b/packages/udp-tracker-core/src/services/announce.rs @@ -121,7 +121,7 @@ impl AnnounceService { println!("Sending UdpAnnounce event: {event:?}"); - udp_stats_event_sender.send_event(event).await; + udp_stats_event_sender.send(event).await; } } } diff --git a/packages/udp-tracker-core/src/services/connect.rs b/packages/udp-tracker-core/src/services/connect.rs index a69c84686..a5837dfcc 100644 --- a/packages/udp-tracker-core/src/services/connect.rs +++ b/packages/udp-tracker-core/src/services/connect.rs @@ -41,7 +41,7 @@ impl ConnectService { if let Some(udp_stats_event_sender) = self.opt_udp_core_stats_event_sender.as_deref() { udp_stats_event_sender - .send_event(Event::UdpConnect { + .send(Event::UdpConnect { connection: ConnectionContext::new(client_socket_addr, server_service_binding), }) .await; @@ -145,7 +145,7 @@ mod tests { let mut udp_stats_event_sender_mock = MockUdpCoreStatsEventSender::new(); udp_stats_event_sender_mock - .expect_send_event() + .expect_send() .with(eq(Event::UdpConnect { connection: ConnectionContext::new(client_socket_addr, server_service_binding.clone()), })) @@ -168,7 +168,7 @@ mod tests { let mut udp_stats_event_sender_mock = MockUdpCoreStatsEventSender::new(); udp_stats_event_sender_mock - .expect_send_event() + .expect_send() .with(eq(Event::UdpConnect { connection: ConnectionContext::new(client_socket_addr, server_service_binding.clone()), })) diff --git a/packages/udp-tracker-core/src/services/mod.rs b/packages/udp-tracker-core/src/services/mod.rs index b471edbd1..64e357b1c 100644 --- a/packages/udp-tracker-core/src/services/mod.rs +++ b/packages/udp-tracker-core/src/services/mod.rs @@ -48,7 +48,7 @@ pub(crate) mod tests { impl torrust_tracker_events::sender::Sender for UdpCoreStatsEventSender { type Event = Event; - fn send_event(&self, event: Event) -> BoxFuture<'static,Option > > > ; + fn send(&self, event: Event) -> BoxFuture<'static,Option > > > ; } } } diff --git a/packages/udp-tracker-core/src/services/scrape.rs b/packages/udp-tracker-core/src/services/scrape.rs index 14ba95834..b42004f63 100644 --- a/packages/udp-tracker-core/src/services/scrape.rs +++ b/packages/udp-tracker-core/src/services/scrape.rs @@ -88,7 +88,7 @@ impl ScrapeService { tracing::debug!(target = crate::UDP_TRACKER_LOG_TARGET, "Sending UdpScrape event: {event:?}"); - udp_stats_event_sender.send_event(event).await; + udp_stats_event_sender.send(event).await; } } } diff --git a/packages/udp-tracker-server/src/handlers/announce.rs b/packages/udp-tracker-server/src/handlers/announce.rs index 2fbddb544..3086ad14d 100644 --- a/packages/udp-tracker-server/src/handlers/announce.rs +++ b/packages/udp-tracker-server/src/handlers/announce.rs @@ -42,7 +42,7 @@ pub async fn handle_announce( if let Some(udp_server_stats_event_sender) = opt_udp_server_stats_event_sender.as_deref() { udp_server_stats_event_sender - .send_event(Event::UdpRequestAccepted { + .send(Event::UdpRequestAccepted { context: ConnectionContext::new(client_socket_addr, server_service_binding.clone()), kind: UdpRequestKind::Announce, }) @@ -427,7 +427,7 @@ mod tests { let mut udp_server_stats_event_sender_mock = MockUdpServerStatsEventSender::new(); udp_server_stats_event_sender_mock - .expect_send_event() + .expect_send() .with(eq(Event::UdpRequestAccepted { context: ConnectionContext::new(client_socket_addr, server_service_binding.clone()), kind: UdpRequestKind::Announce, @@ -781,7 +781,7 @@ mod tests { let mut udp_server_stats_event_sender_mock = MockUdpServerStatsEventSender::new(); udp_server_stats_event_sender_mock - .expect_send_event() + .expect_send() .with(eq(Event::UdpRequestAccepted { context: ConnectionContext::new(client_socket_addr, server_service_binding.clone()), kind: UdpRequestKind::Announce, @@ -873,7 +873,7 @@ mod tests { let mut udp_core_stats_event_sender_mock = MockUdpCoreStatsEventSender::new(); udp_core_stats_event_sender_mock - .expect_send_event() + .expect_send() .with(predicate::function(move |event| { let expected_event = core_event::Event::UdpAnnounce { connection: core_event::ConnectionContext::new( @@ -893,7 +893,7 @@ mod tests { let mut udp_server_stats_event_sender_mock = MockUdpServerStatsEventSender::new(); udp_server_stats_event_sender_mock - .expect_send_event() + .expect_send() .with(eq(Event::UdpRequestAccepted { context: ConnectionContext::new(client_socket_addr, server_service_binding_clone.clone()), kind: UdpRequestKind::Announce, diff --git a/packages/udp-tracker-server/src/handlers/connect.rs b/packages/udp-tracker-server/src/handlers/connect.rs index 9f00298eb..f56500af4 100644 --- a/packages/udp-tracker-server/src/handlers/connect.rs +++ b/packages/udp-tracker-server/src/handlers/connect.rs @@ -24,7 +24,7 @@ pub async fn handle_connect( if let Some(udp_server_stats_event_sender) = opt_udp_server_stats_event_sender.as_deref() { udp_server_stats_event_sender - .send_event(Event::UdpRequestAccepted { + .send(Event::UdpRequestAccepted { context: ConnectionContext::new(client_socket_addr, server_service_binding.clone()), kind: UdpRequestKind::Connect, }) @@ -204,7 +204,7 @@ mod tests { let mut udp_core_stats_event_sender_mock = MockUdpCoreStatsEventSender::new(); udp_core_stats_event_sender_mock - .expect_send_event() + .expect_send() .with(eq(core_event::Event::UdpConnect { connection: core_event::ConnectionContext::new(client_socket_addr, server_service_binding.clone()), })) @@ -215,7 +215,7 @@ mod tests { let mut udp_server_stats_event_sender_mock = MockUdpServerStatsEventSender::new(); udp_server_stats_event_sender_mock - .expect_send_event() + .expect_send() .with(eq(Event::UdpRequestAccepted { context: ConnectionContext::new(client_socket_addr, server_service_binding.clone()), kind: UdpRequestKind::Connect, @@ -246,7 +246,7 @@ mod tests { let mut udp_core_stats_event_sender_mock = MockUdpCoreStatsEventSender::new(); udp_core_stats_event_sender_mock - .expect_send_event() + .expect_send() .with(eq(core_event::Event::UdpConnect { connection: core_event::ConnectionContext::new(client_socket_addr, server_service_binding.clone()), })) @@ -257,7 +257,7 @@ mod tests { let mut udp_server_stats_event_sender_mock = MockUdpServerStatsEventSender::new(); udp_server_stats_event_sender_mock - .expect_send_event() + .expect_send() .with(eq(Event::UdpRequestAccepted { context: ConnectionContext::new(client_socket_addr, server_service_binding.clone()), kind: UdpRequestKind::Connect, diff --git a/packages/udp-tracker-server/src/handlers/error.rs b/packages/udp-tracker-server/src/handlers/error.rs index 04b8d073b..6259e26ca 100644 --- a/packages/udp-tracker-server/src/handlers/error.rs +++ b/packages/udp-tracker-server/src/handlers/error.rs @@ -61,7 +61,7 @@ pub async fn handle_error( if e.1.is_some() { if let Some(udp_server_stats_event_sender) = opt_udp_server_stats_event_sender.as_deref() { udp_server_stats_event_sender - .send_event(Event::UdpError { + .send(Event::UdpError { context: ConnectionContext::new(client_socket_addr, server_service_binding), kind: req_kind, }) diff --git a/packages/udp-tracker-server/src/handlers/mod.rs b/packages/udp-tracker-server/src/handlers/mod.rs index 72ef6c536..d39ad0972 100644 --- a/packages/udp-tracker-server/src/handlers/mod.rs +++ b/packages/udp-tracker-server/src/handlers/mod.rs @@ -429,7 +429,7 @@ pub(crate) mod tests { impl torrust_tracker_events::sender::Sender for UdpCoreStatsEventSender { type Event = core_event::Event; - fn send_event(&self, event: core_event::Event) -> BoxFuture<'static,Option > > > ; + fn send(&self, event: core_event::Event) -> BoxFuture<'static,Option > > > ; } } @@ -438,7 +438,7 @@ pub(crate) mod tests { impl torrust_tracker_events::sender::Sender for UdpServerStatsEventSender { type Event = server_event::Event; - fn send_event(&self, event: server_event::Event) -> BoxFuture<'static,Option > > > ; + fn send(&self, event: server_event::Event) -> BoxFuture<'static,Option > > > ; } } } diff --git a/packages/udp-tracker-server/src/handlers/scrape.rs b/packages/udp-tracker-server/src/handlers/scrape.rs index b7be10f29..6f3dd0e1a 100644 --- a/packages/udp-tracker-server/src/handlers/scrape.rs +++ b/packages/udp-tracker-server/src/handlers/scrape.rs @@ -38,7 +38,7 @@ pub async fn handle_scrape( if let Some(udp_server_stats_event_sender) = opt_udp_server_stats_event_sender.as_deref() { udp_server_stats_event_sender - .send_event(Event::UdpRequestAccepted { + .send(Event::UdpRequestAccepted { context: ConnectionContext::new(client_socket_addr, server_service_binding.clone()), kind: UdpRequestKind::Scrape, }) @@ -378,7 +378,7 @@ mod tests { let mut udp_server_stats_event_sender_mock = MockUdpServerStatsEventSender::new(); udp_server_stats_event_sender_mock - .expect_send_event() + .expect_send() .with(eq(Event::UdpRequestAccepted { context: ConnectionContext::new(client_socket_addr, server_service_binding.clone()), kind: UdpRequestKind::Scrape, @@ -428,7 +428,7 @@ mod tests { let mut udp_server_stats_event_sender_mock = MockUdpServerStatsEventSender::new(); udp_server_stats_event_sender_mock - .expect_send_event() + .expect_send() .with(eq(Event::UdpRequestAccepted { context: ConnectionContext::new(client_socket_addr, server_service_binding.clone()), kind: UdpRequestKind::Scrape, diff --git a/packages/udp-tracker-server/src/server/launcher.rs b/packages/udp-tracker-server/src/server/launcher.rs index 02b9c8d74..a514921cc 100644 --- a/packages/udp-tracker-server/src/server/launcher.rs +++ b/packages/udp-tracker-server/src/server/launcher.rs @@ -184,7 +184,7 @@ impl Launcher { if let Some(udp_server_stats_event_sender) = udp_tracker_server_container.stats_event_sender.as_deref() { udp_server_stats_event_sender - .send_event(Event::UdpRequestReceived { + .send(Event::UdpRequestReceived { context: ConnectionContext::new(client_socket_addr, server_service_binding.clone()), }) .await; @@ -195,7 +195,7 @@ impl Launcher { if let Some(udp_server_stats_event_sender) = udp_tracker_server_container.stats_event_sender.as_deref() { udp_server_stats_event_sender - .send_event(Event::UdpRequestBanned { + .send(Event::UdpRequestBanned { context: ConnectionContext::new(client_socket_addr, server_service_binding.clone()), }) .await; @@ -235,7 +235,7 @@ impl Launcher { if let Some(udp_server_stats_event_sender) = udp_tracker_server_container.stats_event_sender.as_deref() { udp_server_stats_event_sender - .send_event(Event::UdpRequestAborted { + .send(Event::UdpRequestAborted { context: ConnectionContext::new(client_socket_addr, server_service_binding), }) .await; diff --git a/packages/udp-tracker-server/src/server/processor.rs b/packages/udp-tracker-server/src/server/processor.rs index 297919bc3..6b877f85b 100644 --- a/packages/udp-tracker-server/src/server/processor.rs +++ b/packages/udp-tracker-server/src/server/processor.rs @@ -118,7 +118,7 @@ impl Processor { self.udp_tracker_server_container.stats_event_sender.as_deref() { udp_server_stats_event_sender - .send_event(Event::UdpResponseSent { + .send(Event::UdpResponseSent { context: ConnectionContext::new(client_socket_addr, self.server_service_binding), kind: udp_response_kind, req_processing_time, From 8f5b57e04471d56b1e6e7eade3249a954b1f8666 Mon Sep 17 00:00:00 2001 From: Jose Celano Date: Tue, 29 Apr 2025 08:42:22 +0100 Subject: [PATCH 523/802] refactor: [#1485] replace Arc) { - if config.core.tracker_usage_statistics { - let _job = bittorrent_http_tracker_core::statistics::event::listener::run_event_listener( - app_container.http_tracker_core_services.event_bus.receiver(), - &app_container.http_tracker_core_services.stats_repository, - ); - - // todo: this cannot be enabled otherwise the application never ends - // because the event listener never stops. You see this console message - // forever: - // - // !! shuting down in 90 seconds !! - // 2025-04-24T15:27:45.454101Z INFO graceful_shutdown: torrust_axum_server::signals: remaining alive connections: 0 - // - // Depends on: https://github.com/torrust/torrust-tracker/issues/1405 - - //jobs.push(job); - } + let _job = jobs::http_tracker_core::start_event_listener(config, app_container); + + // todo: this cannot be enabled otherwise the application never ends + // because the event listener never stops. You see this console message + // forever: + // + // !! shuting down in 90 seconds !! + // 2025-04-24T15:27:45.454101Z INFO graceful_shutdown: torrust_axum_server::signals: remaining alive connections: 0 + // + // Depends on: https://github.com/torrust/torrust-tracker/issues/1405 } fn start_udp_core_event_listener(config: &Configuration, app_container: &Arc) { - if config.core.tracker_usage_statistics { - let _job = bittorrent_udp_tracker_core::statistics::event::listener::run_event_listener( - app_container.udp_tracker_core_services.event_bus.receiver(), - &app_container.udp_tracker_core_services.stats_repository, - ); - - // todo: this cannot be enabled otherwise the application never ends - // because the event listener never stops. You see this console message - // forever: - // - // !! shuting down in 90 seconds !! - // 2025-04-24T15:27:45.454101Z INFO graceful_shutdown: torrust_axum_server::signals: remaining alive connections: 0 - // - // Depends on: https://github.com/torrust/torrust-tracker/issues/1405 - - //jobs.push(job); - } + let _job = jobs::udp_tracker_core::start_event_listener(config, app_container); + + // todo: the job cannot be added in the jobs vector otherwise the application never ends + // because the event listener never stops. You see this console message + // forever: + // + // !! shuting down in 90 seconds !! + // 2025-04-24T15:27:45.454101Z INFO graceful_shutdown: torrust_axum_server::signals: remaining alive connections: 0 + // + // Depends on: https://github.com/torrust/torrust-tracker/issues/1405 } fn start_udp_server_event_listener(config: &Configuration, app_container: &Arc) { - if config.core.tracker_usage_statistics { - let _job = torrust_udp_tracker_server::statistics::event::listener::run_event_listener( - app_container.udp_tracker_server_container.event_bus.receiver(), - &app_container.udp_tracker_server_container.stats_repository, - ); - - // todo: this cannot be enabled otherwise the application never ends - // because the event listener never stops. You see this console message - // forever: - // - // !! shuting down in 90 seconds !! - // 2025-04-24T15:27:45.454101Z INFO graceful_shutdown: torrust_axum_server::signals: remaining alive connections: 0 - // - // Depends on: https://github.com/torrust/torrust-tracker/issues/1405 - - //jobs.push(job); - } + let _job = jobs::udp_tracker_server::start_event_listener(config, app_container); + + // todo: the job cannot be added in the jobs vector otherwise the application never ends + // because the event listener never stops. You see this console message + // forever: + // + // !! shuting down in 90 seconds !! + // 2025-04-24T15:27:45.454101Z INFO graceful_shutdown: torrust_axum_server::signals: remaining alive connections: 0 + // + // Depends on: https://github.com/torrust/torrust-tracker/issues/1405 } async fn start_the_udp_instances(config: &Configuration, app_container: &Arc, jobs: &mut Vec>) { diff --git a/src/bootstrap/jobs/http_tracker_core.rs b/src/bootstrap/jobs/http_tracker_core.rs new file mode 100644 index 000000000..952c80b40 --- /dev/null +++ b/src/bootstrap/jobs/http_tracker_core.rs @@ -0,0 +1,20 @@ +use std::sync::Arc; + +use tokio::task::JoinHandle; +use torrust_tracker_configuration::Configuration; + +use crate::container::AppContainer; + +pub fn start_event_listener(config: &Configuration, app_container: &Arc) -> Option> { + if config.core.tracker_usage_statistics { + let job = bittorrent_http_tracker_core::statistics::event::listener::run_event_listener( + app_container.http_tracker_core_services.event_bus.receiver(), + &app_container.http_tracker_core_services.stats_repository, + ); + + Some(job) + } else { + tracing::info!("HTTP tracker core event listener job is disabled."); + None + } +} diff --git a/src/bootstrap/jobs/mod.rs b/src/bootstrap/jobs/mod.rs index 8c85ba45b..947b01565 100644 --- a/src/bootstrap/jobs/mod.rs +++ b/src/bootstrap/jobs/mod.rs @@ -7,7 +7,10 @@ //! //! This modules contains all the functions needed to start those jobs. pub mod health_check_api; +pub mod http_tracker_core; pub mod http_tracker; pub mod torrent_cleanup; pub mod tracker_apis; +pub mod udp_tracker_core; +pub mod udp_tracker_server; pub mod udp_tracker; diff --git a/src/bootstrap/jobs/udp_tracker_core.rs b/src/bootstrap/jobs/udp_tracker_core.rs new file mode 100644 index 000000000..689fa8301 --- /dev/null +++ b/src/bootstrap/jobs/udp_tracker_core.rs @@ -0,0 +1,19 @@ +use std::sync::Arc; + +use tokio::task::JoinHandle; +use torrust_tracker_configuration::Configuration; + +use crate::container::AppContainer; + +pub fn start_event_listener(config: &Configuration, app_container: &Arc) -> Option> { + if config.core.tracker_usage_statistics { + let job = bittorrent_udp_tracker_core::statistics::event::listener::run_event_listener( + app_container.udp_tracker_core_services.event_bus.receiver(), + &app_container.udp_tracker_core_services.stats_repository, + ); + Some(job) + } else { + tracing::info!("UDP tracker core event listener job is disabled."); + None + } +} diff --git a/src/bootstrap/jobs/udp_tracker_server.rs b/src/bootstrap/jobs/udp_tracker_server.rs new file mode 100644 index 000000000..42ac2d03e --- /dev/null +++ b/src/bootstrap/jobs/udp_tracker_server.rs @@ -0,0 +1,19 @@ +use std::sync::Arc; + +use tokio::task::JoinHandle; +use torrust_tracker_configuration::Configuration; + +use crate::container::AppContainer; + +pub fn start_event_listener(config: &Configuration, app_container: &Arc) -> Option> { + if config.core.tracker_usage_statistics { + let job = torrust_udp_tracker_server::statistics::event::listener::run_event_listener( + app_container.udp_tracker_server_container.event_bus.receiver(), + &app_container.udp_tracker_server_container.stats_repository, + ); + Some(job) + } else { + tracing::info!("UDP tracker server event listener job is disabled."); + None + } +} From faf8111f696e0b4ccb178aa4cfe50f05ce24cb50 Mon Sep 17 00:00:00 2001 From: Jose Celano Date: Tue, 29 Apr 2025 16:05:15 +0100 Subject: [PATCH 532/802] fix: [#1477] shutdown event listeners on CRTL+c signal This fixes the problem of adding the jobs for event listeners in the main app JoinHandle vector. It was not possible to add the handles for those tokio tasks becuase in the main app we wait for all jobs and those jobs never end. ```rust async fn main() { let (_app_container, jobs) = app::run().await; // handle the signals tokio::select! { _ = tokio::signal::ctrl_c() => { tracing::info!("Torrust tracker shutting down ..."); // Await for all jobs to shutdown futures::future::join_all(jobs).await; tracing::info!("Torrust tracker successfully shutdown."); } } } ``` Now, we can wait for them becuase they listen for the halt signal. We will implement the shutdown in a different way in a new PR. See https://github.com/torrust/torrust-tracker/issues/1405 Instead of listen to the CRTL+c signal the main app will send a "stop" event to the listeners. The final goal it only the main app listen for this external signal and it propagates the shutdown in cascade via normal internal messages or channels. --- .../src/statistics/event/listener.rs | 36 +++++++++---- .../src/statistics/event/listener.rs | 35 +++++++++---- .../src/statistics/event/listener.rs | 37 +++++++++----- src/app.rs | 51 +++++++------------ src/bootstrap/jobs/mod.rs | 4 +- 5 files changed, 94 insertions(+), 69 deletions(-) diff --git a/packages/http-tracker-core/src/statistics/event/listener.rs b/packages/http-tracker-core/src/statistics/event/listener.rs index 37710fb2d..6730d4c70 100644 --- a/packages/http-tracker-core/src/statistics/event/listener.rs +++ b/packages/http-tracker-core/src/statistics/event/listener.rs @@ -23,18 +23,32 @@ pub fn run_event_listener(receiver: Receiver, repository: &Arc) -> J } async fn dispatch_events(mut receiver: Receiver, stats_repository: Arc) { + let shutdown_signal = tokio::signal::ctrl_c(); + + tokio::pin!(shutdown_signal); + loop { - match receiver.recv().await { - Ok(event) => handle_event(event, &stats_repository, CurrentClock::now()).await, - Err(e) => { - match e { - RecvError::Closed => { - tracing::info!(target: HTTP_TRACKER_LOG_TARGET, "Http core statistics receiver closed."); - break; - } - RecvError::Lagged(n) => { - // From now on, metrics will be imprecise - tracing::warn!(target: HTTP_TRACKER_LOG_TARGET, "Http core statistics receiver lagged by {} events.", n); + tokio::select! { + biased; + + _ = &mut shutdown_signal => { + tracing::info!(target: HTTP_TRACKER_LOG_TARGET, "Received Ctrl+C, shutting down HTTP tracker core event listener."); + break; + } + + result = receiver.recv() => { + match result { + Ok(event) => handle_event(event, &stats_repository, CurrentClock::now()).await, + Err(e) => { + match e { + RecvError::Closed => { + tracing::info!(target: HTTP_TRACKER_LOG_TARGET, "Http core statistics receiver closed."); + break; + } + RecvError::Lagged(n) => { + tracing::warn!(target: HTTP_TRACKER_LOG_TARGET, "Http core statistics receiver lagged by {} events.", n); + } + } } } } diff --git a/packages/udp-tracker-core/src/statistics/event/listener.rs b/packages/udp-tracker-core/src/statistics/event/listener.rs index 0344fc668..9b6f2e574 100644 --- a/packages/udp-tracker-core/src/statistics/event/listener.rs +++ b/packages/udp-tracker-core/src/statistics/event/listener.rs @@ -23,18 +23,31 @@ pub fn run_event_listener(receiver: Receiver, repository: &Arc) -> J } async fn dispatch_events(mut receiver: Receiver, stats_repository: Arc) { + let shutdown_signal = tokio::signal::ctrl_c(); + tokio::pin!(shutdown_signal); + loop { - match receiver.recv().await { - Ok(event) => handle_event(event, &stats_repository, CurrentClock::now()).await, - Err(e) => { - match e { - RecvError::Closed => { - tracing::info!(target: UDP_TRACKER_LOG_TARGET, "Udp core statistics receiver closed."); - break; - } - RecvError::Lagged(n) => { - // From now on, metrics will be imprecise - tracing::warn!(target: UDP_TRACKER_LOG_TARGET, "Udp core statistics receiver lagged by {} events.", n); + tokio::select! { + biased; + + _ = &mut shutdown_signal => { + tracing::info!(target: UDP_TRACKER_LOG_TARGET, "Received Ctrl+C, shutting down UDP tracker core event listener."); + break; + } + + result = receiver.recv() => { + match result { + Ok(event) => handle_event(event, &stats_repository, CurrentClock::now()).await, + Err(e) => { + match e { + RecvError::Closed => { + tracing::info!(target: UDP_TRACKER_LOG_TARGET, "Udp core statistics receiver closed."); + break; + } + RecvError::Lagged(n) => { + tracing::warn!(target: UDP_TRACKER_LOG_TARGET, "Udp core statistics receiver lagged by {} events.", n); + } + } } } } diff --git a/packages/udp-tracker-server/src/statistics/event/listener.rs b/packages/udp-tracker-server/src/statistics/event/listener.rs index 0167b34f6..d805cc87f 100644 --- a/packages/udp-tracker-server/src/statistics/event/listener.rs +++ b/packages/udp-tracker-server/src/statistics/event/listener.rs @@ -19,23 +19,36 @@ pub fn run_event_listener(receiver: Receiver, repository: &Arc) -> J tokio::spawn(async move { dispatch_events(receiver, stats_repository).await; - tracing::info!(target: UDP_TRACKER_LOG_TARGET, "DP tracker server event listener finished"); + tracing::info!(target: UDP_TRACKER_LOG_TARGET, "UDP tracker server event listener finished"); }) } async fn dispatch_events(mut receiver: Receiver, stats_repository: Arc) { + let shutdown_signal = tokio::signal::ctrl_c(); + tokio::pin!(shutdown_signal); + loop { - match receiver.recv().await { - Ok(event) => handle_event(event, &stats_repository, CurrentClock::now()).await, - Err(e) => { - match e { - RecvError::Closed => { - tracing::info!(target: UDP_TRACKER_LOG_TARGET, "Udp server statistics receiver closed."); - break; - } - RecvError::Lagged(n) => { - // From now on, metrics will be imprecise - tracing::warn!(target: UDP_TRACKER_LOG_TARGET, "Udp server statistics receiver lagged by {} events.", n); + tokio::select! { + biased; + + _ = &mut shutdown_signal => { + tracing::info!(target: UDP_TRACKER_LOG_TARGET, "Received Ctrl+C, shutting down UDP tracker server event listener."); + break; + } + + result = receiver.recv() => { + match result { + Ok(event) => handle_event(event, &stats_repository, CurrentClock::now()).await, + Err(e) => { + match e { + RecvError::Closed => { + tracing::info!(target: UDP_TRACKER_LOG_TARGET, "Udp server statistics receiver closed."); + break; + } + RecvError::Lagged(n) => { + tracing::warn!(target: UDP_TRACKER_LOG_TARGET, "Udp server statistics receiver lagged by {} events.", n); + } + } } } } diff --git a/src/app.rs b/src/app.rs index cd41bcd85..fcce6336c 100644 --- a/src/app.rs +++ b/src/app.rs @@ -66,9 +66,9 @@ async fn load_data_from_database(config: &Configuration, app_container: &Arc) -> Vec> { let mut jobs: Vec> = Vec::new(); - start_http_core_event_listener(config, app_container); - start_udp_core_event_listener(config, app_container); - start_udp_server_event_listener(config, app_container); + start_http_core_event_listener(config, app_container, &mut jobs); + start_udp_core_event_listener(config, app_container, &mut jobs); + start_udp_server_event_listener(config, app_container, &mut jobs); start_the_udp_instances(config, app_container, &mut jobs).await; start_the_http_instances(config, app_container, &mut jobs).await; start_the_http_api(config, app_container, &mut jobs).await; @@ -109,43 +109,28 @@ async fn load_whitelisted_torrents(config: &Configuration, app_container: &Arc) { - let _job = jobs::http_tracker_core::start_event_listener(config, app_container); +fn start_http_core_event_listener(config: &Configuration, app_container: &Arc, jobs: &mut Vec>) { + let opt_job = jobs::http_tracker_core::start_event_listener(config, app_container); - // todo: this cannot be enabled otherwise the application never ends - // because the event listener never stops. You see this console message - // forever: - // - // !! shuting down in 90 seconds !! - // 2025-04-24T15:27:45.454101Z INFO graceful_shutdown: torrust_axum_server::signals: remaining alive connections: 0 - // - // Depends on: https://github.com/torrust/torrust-tracker/issues/1405 + if let Some(job) = opt_job { + jobs.push(job); + } } -fn start_udp_core_event_listener(config: &Configuration, app_container: &Arc) { - let _job = jobs::udp_tracker_core::start_event_listener(config, app_container); +fn start_udp_core_event_listener(config: &Configuration, app_container: &Arc, jobs: &mut Vec>) { + let opt_job = jobs::udp_tracker_core::start_event_listener(config, app_container); - // todo: the job cannot be added in the jobs vector otherwise the application never ends - // because the event listener never stops. You see this console message - // forever: - // - // !! shuting down in 90 seconds !! - // 2025-04-24T15:27:45.454101Z INFO graceful_shutdown: torrust_axum_server::signals: remaining alive connections: 0 - // - // Depends on: https://github.com/torrust/torrust-tracker/issues/1405 + if let Some(job) = opt_job { + jobs.push(job); + } } -fn start_udp_server_event_listener(config: &Configuration, app_container: &Arc) { - let _job = jobs::udp_tracker_server::start_event_listener(config, app_container); +fn start_udp_server_event_listener(config: &Configuration, app_container: &Arc, jobs: &mut Vec>) { + let opt_job = jobs::udp_tracker_server::start_event_listener(config, app_container); - // todo: the job cannot be added in the jobs vector otherwise the application never ends - // because the event listener never stops. You see this console message - // forever: - // - // !! shuting down in 90 seconds !! - // 2025-04-24T15:27:45.454101Z INFO graceful_shutdown: torrust_axum_server::signals: remaining alive connections: 0 - // - // Depends on: https://github.com/torrust/torrust-tracker/issues/1405 + if let Some(job) = opt_job { + jobs.push(job); + } } async fn start_the_udp_instances(config: &Configuration, app_container: &Arc, jobs: &mut Vec>) { diff --git a/src/bootstrap/jobs/mod.rs b/src/bootstrap/jobs/mod.rs index 947b01565..579618d09 100644 --- a/src/bootstrap/jobs/mod.rs +++ b/src/bootstrap/jobs/mod.rs @@ -7,10 +7,10 @@ //! //! This modules contains all the functions needed to start those jobs. pub mod health_check_api; -pub mod http_tracker_core; pub mod http_tracker; +pub mod http_tracker_core; pub mod torrent_cleanup; pub mod tracker_apis; +pub mod udp_tracker; pub mod udp_tracker_core; pub mod udp_tracker_server; -pub mod udp_tracker; From 53fdafdaabec22001038831758611a28e8970b82 Mon Sep 17 00:00:00 2001 From: Jose Celano Date: Tue, 29 Apr 2025 17:43:13 +0100 Subject: [PATCH 533/802] feat: [#1477] extract JobManager to handle jobs It: - Give a name to all jobs so they can be identify later in logs. - Wait for all jobs to finish when the app receives teh halt signal (CRTL+c) - Only waits for a grace period per job. - Shows a message when a job don't complete in time. This could be improved in the future: - By showing a message every second while we are waiting for a job to finish. - Waiting for all of them in paralell. --- src/app.rs | 113 ++++++++++++++++++---------------- src/bootstrap/jobs/manager.rs | 89 ++++++++++++++++++++++++++ src/bootstrap/jobs/mod.rs | 1 + src/console/profiling.rs | 3 +- src/main.rs | 6 +- 5 files changed, 154 insertions(+), 58 deletions(-) create mode 100644 src/bootstrap/jobs/manager.rs diff --git a/src/app.rs b/src/app.rs index fcce6336c..8f5c6ca4c 100644 --- a/src/app.rs +++ b/src/app.rs @@ -23,15 +23,15 @@ //! - Tracker REST API: the tracker API can be enabled/disabled. use std::sync::Arc; -use tokio::task::JoinHandle; use torrust_tracker_configuration::{Configuration, HttpTracker, UdpTracker}; use tracing::instrument; +use crate::bootstrap::jobs::manager::JobManager; use crate::bootstrap::jobs::{self, health_check_api, http_tracker, torrent_cleanup, tracker_apis, udp_tracker}; use crate::bootstrap::{self}; use crate::container::AppContainer; -pub async fn run() -> (Arc, Vec>) { +pub async fn run() -> (Arc, JobManager) { let (config, app_container) = bootstrap::app::setup(); let app_container = Arc::new(app_container); @@ -50,7 +50,7 @@ pub async fn run() -> (Arc, Vec>) { /// - Can't retrieve tracker keys from database. /// - Can't load whitelist from database. #[instrument(skip(config, app_container))] -pub async fn start(config: &Configuration, app_container: &Arc) -> Vec> { +pub async fn start(config: &Configuration, app_container: &Arc) -> JobManager { warn_if_no_services_enabled(config); load_data_from_database(config, app_container).await; @@ -63,19 +63,19 @@ async fn load_data_from_database(config: &Configuration, app_container: &Arc) -> Vec> { - let mut jobs: Vec> = Vec::new(); +async fn start_jobs(config: &Configuration, app_container: &Arc) -> JobManager { + let mut job_manager = JobManager::new(); - start_http_core_event_listener(config, app_container, &mut jobs); - start_udp_core_event_listener(config, app_container, &mut jobs); - start_udp_server_event_listener(config, app_container, &mut jobs); - start_the_udp_instances(config, app_container, &mut jobs).await; - start_the_http_instances(config, app_container, &mut jobs).await; - start_the_http_api(config, app_container, &mut jobs).await; - start_torrent_cleanup(config, app_container, &mut jobs); - start_health_check_api(config, app_container, &mut jobs).await; + start_http_core_event_listener(config, app_container, &mut job_manager); + start_udp_core_event_listener(config, app_container, &mut job_manager); + start_udp_server_event_listener(config, app_container, &mut job_manager); + start_the_udp_instances(config, app_container, &mut job_manager).await; + start_the_http_instances(config, app_container, &mut job_manager).await; + start_the_http_api(config, app_container, &mut job_manager).await; + start_torrent_cleanup(config, app_container, &mut job_manager); + start_health_check_api(config, app_container, &mut job_manager).await; - jobs + job_manager } fn warn_if_no_services_enabled(config: &Configuration) { @@ -109,40 +109,40 @@ async fn load_whitelisted_torrents(config: &Configuration, app_container: &Arc, jobs: &mut Vec>) { - let opt_job = jobs::http_tracker_core::start_event_listener(config, app_container); +fn start_http_core_event_listener(config: &Configuration, app_container: &Arc, job_manager: &mut JobManager) { + let opt_handle = jobs::http_tracker_core::start_event_listener(config, app_container); - if let Some(job) = opt_job { - jobs.push(job); + if let Some(handle) = opt_handle { + job_manager.push("http_core_event_listener", handle); } } -fn start_udp_core_event_listener(config: &Configuration, app_container: &Arc, jobs: &mut Vec>) { - let opt_job = jobs::udp_tracker_core::start_event_listener(config, app_container); +fn start_udp_core_event_listener(config: &Configuration, app_container: &Arc, job_manager: &mut JobManager) { + let opt_handle = jobs::udp_tracker_core::start_event_listener(config, app_container); - if let Some(job) = opt_job { - jobs.push(job); + if let Some(handle) = opt_handle { + job_manager.push("udp_core_event_listener", handle); } } -fn start_udp_server_event_listener(config: &Configuration, app_container: &Arc, jobs: &mut Vec>) { - let opt_job = jobs::udp_tracker_server::start_event_listener(config, app_container); +fn start_udp_server_event_listener(config: &Configuration, app_container: &Arc, job_manager: &mut JobManager) { + let opt_handle = jobs::udp_tracker_server::start_event_listener(config, app_container); - if let Some(job) = opt_job { - jobs.push(job); + if let Some(handle) = opt_handle { + job_manager.push("udp_server_event_listener", handle); } } -async fn start_the_udp_instances(config: &Configuration, app_container: &Arc, jobs: &mut Vec>) { +async fn start_the_udp_instances(config: &Configuration, app_container: &Arc, job_manager: &mut JobManager) { if let Some(udp_trackers) = &config.udp_trackers { - for udp_tracker_config in udp_trackers { + for (idx, udp_tracker_config) in udp_trackers.iter().enumerate() { if config.core.private { tracing::warn!( "Could not start UDP tracker on: {} while in private mode. UDP is not safe for private trackers!", udp_tracker_config.bind_address ); } else { - start_udp_instance(udp_tracker_config, app_container, jobs).await; + start_udp_instance(idx, udp_tracker_config, app_container, job_manager).await; } } } else { @@ -150,26 +150,31 @@ async fn start_the_udp_instances(config: &Configuration, app_container: &Arc, jobs: &mut Vec>) { +async fn start_udp_instance( + idx: usize, + udp_tracker_config: &UdpTracker, + app_container: &Arc, + job_manager: &mut JobManager, +) { let udp_tracker_container = app_container .udp_tracker_container(udp_tracker_config.bind_address) .expect("Could not create UDP tracker container"); let udp_tracker_server_container = app_container.udp_tracker_server_container(); - jobs.push( - udp_tracker::start_job( - udp_tracker_container, - udp_tracker_server_container, - app_container.registar.give_form(), - ) - .await, - ); + let handle = udp_tracker::start_job( + udp_tracker_container, + udp_tracker_server_container, + app_container.registar.give_form(), + ) + .await; + + job_manager.push(format!("udp_instance_{}_{}", idx, udp_tracker_config.bind_address), handle); } -async fn start_the_http_instances(config: &Configuration, app_container: &Arc, jobs: &mut Vec>) { +async fn start_the_http_instances(config: &Configuration, app_container: &Arc, job_manager: &mut JobManager) { if let Some(http_trackers) = &config.http_trackers { - for http_tracker_config in http_trackers { - start_http_instance(http_tracker_config, app_container, jobs).await; + for (idx, http_tracker_config) in http_trackers.iter().enumerate() { + start_http_instance(idx, http_tracker_config, app_container, job_manager).await; } } else { tracing::info!("No HTTP blocks in configuration"); @@ -177,26 +182,27 @@ async fn start_the_http_instances(config: &Configuration, app_container: &Arc, - jobs: &mut Vec>, + job_manager: &mut JobManager, ) { let http_tracker_container = app_container .http_tracker_container(http_tracker_config.bind_address) .expect("Could not create HTTP tracker container"); - if let Some(job) = http_tracker::start_job( + if let Some(handle) = http_tracker::start_job( http_tracker_container, app_container.registar.give_form(), torrust_axum_http_tracker_server::Version::V1, ) .await { - jobs.push(job); + job_manager.push(format!("http_instance_{}_{}", idx, http_tracker_config.bind_address), handle); } } -async fn start_the_http_api(config: &Configuration, app_container: &Arc, jobs: &mut Vec>) { +async fn start_the_http_api(config: &Configuration, app_container: &Arc, job_manager: &mut JobManager) { if let Some(http_api_config) = &config.http_api { let http_api_config = Arc::new(http_api_config.clone()); let http_api_container = app_container.tracker_http_api_container(&http_api_config); @@ -208,22 +214,23 @@ async fn start_the_http_api(config: &Configuration, app_container: &Arc, jobs: &mut Vec>) { +fn start_torrent_cleanup(config: &Configuration, app_container: &Arc, job_manager: &mut JobManager) { if config.core.inactive_peer_cleanup_interval > 0 { - jobs.push(torrent_cleanup::start_job( - &config.core, - &app_container.tracker_core_container.torrents_manager, - )); + let handle = torrent_cleanup::start_job(&config.core, &app_container.tracker_core_container.torrents_manager); + + job_manager.push("torrent_cleanup", handle); } } -async fn start_health_check_api(config: &Configuration, app_container: &Arc, jobs: &mut Vec>) { - jobs.push(health_check_api::start_job(&config.health_check_api, app_container.registar.entries()).await); +async fn start_health_check_api(config: &Configuration, app_container: &Arc, job_manager: &mut JobManager) { + let handle = health_check_api::start_job(&config.health_check_api, app_container.registar.entries()).await; + + job_manager.push("health_check_api", handle); } diff --git a/src/bootstrap/jobs/manager.rs b/src/bootstrap/jobs/manager.rs new file mode 100644 index 000000000..5beab3224 --- /dev/null +++ b/src/bootstrap/jobs/manager.rs @@ -0,0 +1,89 @@ +use std::time::Duration; + +use tokio::task::JoinHandle; +use tokio::time::timeout; +use tracing::{info, warn}; + +/// Represents a named background job. +#[derive(Debug)] +pub struct Job { + pub name: String, + pub handle: JoinHandle<()>, +} + +impl Job { + pub fn new>(name: N, handle: JoinHandle<()>) -> Self { + Self { + name: name.into(), + handle, + } + } +} + +/// Manages multiple background jobs. +#[derive(Debug, Default)] +pub struct JobManager { + jobs: Vec, +} + +impl JobManager { + #[must_use] + pub fn new() -> Self { + Self { jobs: Vec::new() } + } + + pub fn push>(&mut self, name: N, handle: JoinHandle<()>) { + self.jobs.push(Job::new(name, handle)); + } + + /// Waits sequentially for all jobs to complete, with a graceful timeout per + /// job. + pub async fn wait_for_all(mut self, grace_period: Duration) { + for job in self.jobs.drain(..) { + let name = job.name.clone(); + + info!(job = %name, "Waiting for job to finish (timeout of {} seconds) ...", grace_period.as_secs()); + + if let Ok(result) = timeout(grace_period, job.handle).await { + if let Err(e) = result { + warn!(job = %name, "Job return an error: {:?}", e); + } else { + info!(job = %name, "Job completed gracefully"); + } + } else { + warn!(job = %name, "Job did not complete in time"); + } + } + } +} + +#[cfg(test)] +mod tests { + use tokio::time::Duration; + + use super::*; + + #[tokio::test] + async fn it_should_wait_for_all_jobs_to_finish() { + let mut manager = JobManager::new(); + + manager.push("job1", tokio::spawn(async {})); + manager.push("job2", tokio::spawn(async {})); + + manager.wait_for_all(Duration::from_secs(1)).await; + } + + #[tokio::test] + async fn it_should_log_when_a_job_panics() { + let mut manager = JobManager::new(); + + manager.push( + "panic_job", + tokio::spawn(async { + panic!("expected panic"); + }), + ); + + manager.wait_for_all(Duration::from_secs(1)).await; + } +} diff --git a/src/bootstrap/jobs/mod.rs b/src/bootstrap/jobs/mod.rs index 579618d09..2e3d798ad 100644 --- a/src/bootstrap/jobs/mod.rs +++ b/src/bootstrap/jobs/mod.rs @@ -9,6 +9,7 @@ pub mod health_check_api; pub mod http_tracker; pub mod http_tracker_core; +pub mod manager; pub mod torrent_cleanup; pub mod tracker_apis; pub mod udp_tracker; diff --git a/src/console/profiling.rs b/src/console/profiling.rs index 873dbb574..df44f4009 100644 --- a/src/console/profiling.rs +++ b/src/console/profiling.rs @@ -191,8 +191,7 @@ pub async fn run() { _ = tokio::signal::ctrl_c() => { tracing::info!("Torrust tracker shutting down via Ctrl+C ..."); - // Await for all jobs to shutdown - futures::future::join_all(jobs).await; + jobs.wait_for_all(Duration::from_secs(10)).await; } } diff --git a/src/main.rs b/src/main.rs index 8ba4311f7..a49c3aeba 100644 --- a/src/main.rs +++ b/src/main.rs @@ -1,16 +1,16 @@ +use std::time::Duration; + use torrust_tracker_lib::app; #[tokio::main] async fn main() { let (_app_container, jobs) = app::run().await; - // handle the signals tokio::select! { _ = tokio::signal::ctrl_c() => { tracing::info!("Torrust tracker shutting down ..."); - // Await for all jobs to shutdown - futures::future::join_all(jobs).await; + jobs.wait_for_all(Duration::from_secs(10)).await; tracing::info!("Torrust tracker successfully shutdown."); } From 8f42271efad18c7fe3563440156fee8b0f719d7f Mon Sep 17 00:00:00 2001 From: Jose Celano Date: Tue, 29 Apr 2025 17:52:49 +0100 Subject: [PATCH 534/802] chore: [#1477] remove unused dependency --- Cargo.lock | 1 - Cargo.toml | 1 - 2 files changed, 2 deletions(-) diff --git a/Cargo.lock b/Cargo.lock index f5cba3708..686af7854 100644 --- a/Cargo.lock +++ b/Cargo.lock @@ -4684,7 +4684,6 @@ dependencies = [ "bittorrent-udp-tracker-core", "chrono", "clap", - "futures", "local-ip-address", "mockall", "rand 0.9.1", diff --git a/Cargo.toml b/Cargo.toml index 9243ed483..c5ce7c216 100644 --- a/Cargo.toml +++ b/Cargo.toml @@ -40,7 +40,6 @@ bittorrent-tracker-core = { version = "3.0.0-develop", path = "packages/tracker- bittorrent-udp-tracker-core = { version = "3.0.0-develop", path = "packages/udp-tracker-core" } chrono = { version = "0", default-features = false, features = ["clock"] } clap = { version = "4", features = ["derive", "env"] } -futures = "0" rand = "0" regex = "1" reqwest = { version = "0", features = ["json"] } From 3d53b236d914282937790e3ec4ab3bff1b2dcb27 Mon Sep 17 00:00:00 2001 From: Jose Celano Date: Wed, 30 Apr 2025 11:35:05 +0100 Subject: [PATCH 535/802] feat: [#1491] copy torrent repo benchmarking into a new pkg It will be removed from the `torrent-repository` package, keeping only data strcutures used in production. --- .github/workflows/deployment.yaml | 1 + Cargo.lock | 20 + Cargo.toml | 2 +- .../Cargo.toml | 38 ++ .../torrent-repository-benchmarking/README.md | 32 + .../benches/helpers/asyn.rs | 153 +++++ .../benches/helpers/mod.rs | 3 + .../benches/helpers/sync.rs | 155 +++++ .../benches/helpers/utils.rs | 41 ++ .../benches/repository_benchmark.rs | 270 ++++++++ .../src/entry/mod.rs | 92 +++ .../src/entry/mutex_parking_lot.rs | 49 ++ .../src/entry/mutex_std.rs | 51 ++ .../src/entry/mutex_tokio.rs | 49 ++ .../src/entry/peer_list.rs | 286 ++++++++ .../src/entry/rw_lock_parking_lot.rs | 49 ++ .../src/entry/single.rs | 81 +++ .../src/lib.rs | 44 ++ .../src/repository/dash_map_mutex_std.rs | 111 +++ .../src/repository/mod.rs | 46 ++ .../src/repository/rw_lock_std.rs | 132 ++++ .../src/repository/rw_lock_std_mutex_std.rs | 130 ++++ .../src/repository/rw_lock_std_mutex_tokio.rs | 167 +++++ .../src/repository/rw_lock_tokio.rs | 138 ++++ .../src/repository/rw_lock_tokio_mutex_std.rs | 135 ++++ .../repository/rw_lock_tokio_mutex_tokio.rs | 148 ++++ .../src/repository/skip_map_mutex_std.rs | 328 +++++++++ .../tests/common/mod.rs | 3 + .../tests/common/repo.rs | 242 +++++++ .../tests/common/torrent.rs | 101 +++ .../tests/common/torrent_peer_builder.rs | 90 +++ .../tests/entry/mod.rs | 443 ++++++++++++ .../tests/integration.rs | 22 + .../tests/repository/mod.rs | 639 ++++++++++++++++++ 34 files changed, 4290 insertions(+), 1 deletion(-) create mode 100644 packages/torrent-repository-benchmarking/Cargo.toml create mode 100644 packages/torrent-repository-benchmarking/README.md create mode 100644 packages/torrent-repository-benchmarking/benches/helpers/asyn.rs create mode 100644 packages/torrent-repository-benchmarking/benches/helpers/mod.rs create mode 100644 packages/torrent-repository-benchmarking/benches/helpers/sync.rs create mode 100644 packages/torrent-repository-benchmarking/benches/helpers/utils.rs create mode 100644 packages/torrent-repository-benchmarking/benches/repository_benchmark.rs create mode 100644 packages/torrent-repository-benchmarking/src/entry/mod.rs create mode 100644 packages/torrent-repository-benchmarking/src/entry/mutex_parking_lot.rs create mode 100644 packages/torrent-repository-benchmarking/src/entry/mutex_std.rs create mode 100644 packages/torrent-repository-benchmarking/src/entry/mutex_tokio.rs create mode 100644 packages/torrent-repository-benchmarking/src/entry/peer_list.rs create mode 100644 packages/torrent-repository-benchmarking/src/entry/rw_lock_parking_lot.rs create mode 100644 packages/torrent-repository-benchmarking/src/entry/single.rs create mode 100644 packages/torrent-repository-benchmarking/src/lib.rs create mode 100644 packages/torrent-repository-benchmarking/src/repository/dash_map_mutex_std.rs create mode 100644 packages/torrent-repository-benchmarking/src/repository/mod.rs create mode 100644 packages/torrent-repository-benchmarking/src/repository/rw_lock_std.rs create mode 100644 packages/torrent-repository-benchmarking/src/repository/rw_lock_std_mutex_std.rs create mode 100644 packages/torrent-repository-benchmarking/src/repository/rw_lock_std_mutex_tokio.rs create mode 100644 packages/torrent-repository-benchmarking/src/repository/rw_lock_tokio.rs create mode 100644 packages/torrent-repository-benchmarking/src/repository/rw_lock_tokio_mutex_std.rs create mode 100644 packages/torrent-repository-benchmarking/src/repository/rw_lock_tokio_mutex_tokio.rs create mode 100644 packages/torrent-repository-benchmarking/src/repository/skip_map_mutex_std.rs create mode 100644 packages/torrent-repository-benchmarking/tests/common/mod.rs create mode 100644 packages/torrent-repository-benchmarking/tests/common/repo.rs create mode 100644 packages/torrent-repository-benchmarking/tests/common/torrent.rs create mode 100644 packages/torrent-repository-benchmarking/tests/common/torrent_peer_builder.rs create mode 100644 packages/torrent-repository-benchmarking/tests/entry/mod.rs create mode 100644 packages/torrent-repository-benchmarking/tests/integration.rs create mode 100644 packages/torrent-repository-benchmarking/tests/repository/mod.rs diff --git a/.github/workflows/deployment.yaml b/.github/workflows/deployment.yaml index 2ef298eab..d62b4bbcc 100644 --- a/.github/workflows/deployment.yaml +++ b/.github/workflows/deployment.yaml @@ -78,5 +78,6 @@ jobs: cargo publish -p torrust-tracker-metrics cargo publish -p torrust-tracker-primitives cargo publish -p torrust-tracker-test-helpers + cargo publish -p torrust-tracker-torrent-benchmarking cargo publish -p torrust-tracker-torrent-repository cargo publish -p torrust-udp-tracker-server diff --git a/Cargo.lock b/Cargo.lock index 686af7854..da46a5a8f 100644 --- a/Cargo.lock +++ b/Cargo.lock @@ -4852,6 +4852,26 @@ dependencies = [ "zerocopy 0.7.35", ] +[[package]] +name = "torrust-tracker-torrent-repository-benchmarking" +version = "3.0.0-develop" +dependencies = [ + "aquatic_udp_protocol", + "async-std", + "bittorrent-primitives", + "criterion", + "crossbeam-skiplist", + "dashmap", + "futures", + "parking_lot", + "rstest", + "tokio", + "torrust-tracker-clock", + "torrust-tracker-configuration", + "torrust-tracker-primitives", + "zerocopy 0.7.35", +] + [[package]] name = "torrust-udp-tracker-server" version = "3.0.0-develop" diff --git a/Cargo.toml b/Cargo.toml index c5ce7c216..a15ff78df 100644 --- a/Cargo.toml +++ b/Cargo.toml @@ -68,7 +68,7 @@ torrust-rest-tracker-api-client = { version = "3.0.0-develop", path = "packages/ torrust-tracker-test-helpers = { version = "3.0.0-develop", path = "packages/test-helpers" } [workspace] -members = ["console/tracker-client"] +members = ["console/tracker-client", "packages/torrent-repository-benchmarking"] [profile.dev] debug = 1 diff --git a/packages/torrent-repository-benchmarking/Cargo.toml b/packages/torrent-repository-benchmarking/Cargo.toml new file mode 100644 index 000000000..1a93c513c --- /dev/null +++ b/packages/torrent-repository-benchmarking/Cargo.toml @@ -0,0 +1,38 @@ +[package] +description = "A library to runt benchmarking for different implementations of a repository of torrents files and their peers." +keywords = ["library", "repository", "torrents"] +name = "torrust-tracker-torrent-repository-benchmarking" +readme = "README.md" + +authors.workspace = true +categories.workspace = true +documentation.workspace = true +edition.workspace = true +homepage.workspace = true +license.workspace = true +publish.workspace = true +repository.workspace = true +rust-version.workspace = true +version.workspace = true + +[dependencies] +aquatic_udp_protocol = "0" +bittorrent-primitives = "0.1.0" +crossbeam-skiplist = "0" +dashmap = "6" +futures = "0" +parking_lot = "0" +tokio = { version = "1", features = ["macros", "net", "rt-multi-thread", "signal", "sync"] } +torrust-tracker-clock = { version = "3.0.0-develop", path = "../clock" } +torrust-tracker-configuration = { version = "3.0.0-develop", path = "../configuration" } +torrust-tracker-primitives = { version = "3.0.0-develop", path = "../primitives" } +zerocopy = "0.7" + +[dev-dependencies] +async-std = { version = "1", features = ["attributes", "tokio1"] } +criterion = { version = "0", features = ["async_tokio"] } +rstest = "0" + +[[bench]] +harness = false +name = "repository_benchmark" diff --git a/packages/torrent-repository-benchmarking/README.md b/packages/torrent-repository-benchmarking/README.md new file mode 100644 index 000000000..f248ca0da --- /dev/null +++ b/packages/torrent-repository-benchmarking/README.md @@ -0,0 +1,32 @@ +# Torrust Tracker Torrent Repository Benchmarking + +A library to runt benchmarking for different implementations of a repository of torrents files and their peers. Torrent repositories are used by the [Torrust Tracker](https://github.com/torrust/torrust-tracker). + +## Benchmarking + +```console +cargo bench -p torrust-tracker-torrent-repository +``` + +Example partial output: + +```output + Running benches/repository_benchmark.rs (target/release/deps/repository_benchmark-a9b0013c8d09c3c3) +add_one_torrent/RwLockStd + time: [63.057 ns 63.242 ns 63.506 ns] +Found 12 outliers among 100 measurements (12.00%) + 2 (2.00%) low severe + 2 (2.00%) low mild + 2 (2.00%) high mild + 6 (6.00%) high severe +add_one_torrent/RwLockStdMutexStd + time: [62.505 ns 63.077 ns 63.817 ns] +``` + +## Documentation + +[Crate documentation](https://docs.rs/torrust-tracker-torrent-repository). + +## License + +The project is licensed under the terms of the [GNU AFFERO GENERAL PUBLIC LICENSE](./LICENSE). diff --git a/packages/torrent-repository-benchmarking/benches/helpers/asyn.rs b/packages/torrent-repository-benchmarking/benches/helpers/asyn.rs new file mode 100644 index 000000000..4deb1955a --- /dev/null +++ b/packages/torrent-repository-benchmarking/benches/helpers/asyn.rs @@ -0,0 +1,153 @@ +use std::sync::Arc; +use std::time::{Duration, Instant}; + +use bittorrent_primitives::info_hash::InfoHash; +use futures::stream::FuturesUnordered; +use torrust_tracker_torrent_repository_benchmarking::repository::RepositoryAsync; + +use super::utils::{generate_unique_info_hashes, DEFAULT_PEER}; + +pub async fn add_one_torrent(samples: u64) -> Duration +where + V: RepositoryAsync + Default, +{ + let start = Instant::now(); + + for _ in 0..samples { + let torrent_repository = V::default(); + + let info_hash = InfoHash::default(); + + torrent_repository.upsert_peer(&info_hash, &DEFAULT_PEER, None).await; + + torrent_repository.get_swarm_metadata(&info_hash).await; + } + + start.elapsed() +} + +// Add one torrent ten thousand times in parallel (depending on the set worker threads) +pub async fn update_one_torrent_in_parallel(runtime: &tokio::runtime::Runtime, samples: u64, sleep: Option) -> Duration +where + V: RepositoryAsync + Default, + Arc: Clone + Send + Sync + 'static, +{ + let torrent_repository = Arc::::default(); + let info_hash = InfoHash::default(); + let handles = FuturesUnordered::new(); + + // Add the torrent/peer to the torrent repository + torrent_repository.upsert_peer(&info_hash, &DEFAULT_PEER, None).await; + + torrent_repository.get_swarm_metadata(&info_hash).await; + + let start = Instant::now(); + + for _ in 0..samples { + let torrent_repository_clone = torrent_repository.clone(); + + let handle = runtime.spawn(async move { + torrent_repository_clone.upsert_peer(&info_hash, &DEFAULT_PEER, None).await; + + torrent_repository_clone.get_swarm_metadata(&info_hash).await; + + if let Some(sleep_time) = sleep { + let start_time = std::time::Instant::now(); + + while start_time.elapsed().as_nanos() < u128::from(sleep_time) {} + } + }); + + handles.push(handle); + } + + // Await all tasks + futures::future::join_all(handles).await; + + start.elapsed() +} + +// Add ten thousand torrents in parallel (depending on the set worker threads) +pub async fn add_multiple_torrents_in_parallel( + runtime: &tokio::runtime::Runtime, + samples: u64, + sleep: Option, +) -> Duration +where + V: RepositoryAsync + Default, + Arc: Clone + Send + Sync + 'static, +{ + let torrent_repository = Arc::::default(); + let info_hashes = generate_unique_info_hashes(samples.try_into().expect("it should fit in a usize")); + let handles = FuturesUnordered::new(); + + let start = Instant::now(); + + for info_hash in info_hashes { + let torrent_repository_clone = torrent_repository.clone(); + + let handle = runtime.spawn(async move { + torrent_repository_clone.upsert_peer(&info_hash, &DEFAULT_PEER, None).await; + + torrent_repository_clone.get_swarm_metadata(&info_hash).await; + + if let Some(sleep_time) = sleep { + let start_time = std::time::Instant::now(); + + while start_time.elapsed().as_nanos() < u128::from(sleep_time) {} + } + }); + + handles.push(handle); + } + + // Await all tasks + futures::future::join_all(handles).await; + + start.elapsed() +} + +// Async update ten thousand torrents in parallel (depending on the set worker threads) +pub async fn update_multiple_torrents_in_parallel( + runtime: &tokio::runtime::Runtime, + samples: u64, + sleep: Option, +) -> Duration +where + V: RepositoryAsync + Default, + Arc: Clone + Send + Sync + 'static, +{ + let torrent_repository = Arc::::default(); + let info_hashes = generate_unique_info_hashes(samples.try_into().expect("it should fit in usize")); + let handles = FuturesUnordered::new(); + + // Add the torrents/peers to the torrent repository + for info_hash in &info_hashes { + torrent_repository.upsert_peer(info_hash, &DEFAULT_PEER, None).await; + torrent_repository.get_swarm_metadata(info_hash).await; + } + + let start = Instant::now(); + + for info_hash in info_hashes { + let torrent_repository_clone = torrent_repository.clone(); + + let handle = runtime.spawn(async move { + torrent_repository_clone.upsert_peer(&info_hash, &DEFAULT_PEER, None).await; + torrent_repository_clone.get_swarm_metadata(&info_hash).await; + + if let Some(sleep_time) = sleep { + let start_time = std::time::Instant::now(); + + while start_time.elapsed().as_nanos() < u128::from(sleep_time) {} + } + }); + + handles.push(handle); + } + + // Await all tasks + futures::future::join_all(handles).await; + + start.elapsed() +} diff --git a/packages/torrent-repository-benchmarking/benches/helpers/mod.rs b/packages/torrent-repository-benchmarking/benches/helpers/mod.rs new file mode 100644 index 000000000..1026aa4bf --- /dev/null +++ b/packages/torrent-repository-benchmarking/benches/helpers/mod.rs @@ -0,0 +1,3 @@ +pub mod asyn; +pub mod sync; +pub mod utils; diff --git a/packages/torrent-repository-benchmarking/benches/helpers/sync.rs b/packages/torrent-repository-benchmarking/benches/helpers/sync.rs new file mode 100644 index 000000000..2cefb5a4a --- /dev/null +++ b/packages/torrent-repository-benchmarking/benches/helpers/sync.rs @@ -0,0 +1,155 @@ +use std::sync::Arc; +use std::time::{Duration, Instant}; + +use bittorrent_primitives::info_hash::InfoHash; +use futures::stream::FuturesUnordered; +use torrust_tracker_torrent_repository_benchmarking::repository::Repository; + +use super::utils::{generate_unique_info_hashes, DEFAULT_PEER}; + +// Simply add one torrent +#[must_use] +pub fn add_one_torrent(samples: u64) -> Duration +where + V: Repository + Default, +{ + let start = Instant::now(); + + for _ in 0..samples { + let torrent_repository = V::default(); + + let info_hash = InfoHash::default(); + + torrent_repository.upsert_peer(&info_hash, &DEFAULT_PEER, None); + + torrent_repository.get_swarm_metadata(&info_hash); + } + + start.elapsed() +} + +// Add one torrent ten thousand times in parallel (depending on the set worker threads) +pub async fn update_one_torrent_in_parallel(runtime: &tokio::runtime::Runtime, samples: u64, sleep: Option) -> Duration +where + V: Repository + Default, + Arc: Clone + Send + Sync + 'static, +{ + let torrent_repository = Arc::::default(); + let info_hash = InfoHash::default(); + let handles = FuturesUnordered::new(); + + // Add the torrent/peer to the torrent repository + torrent_repository.upsert_peer(&info_hash, &DEFAULT_PEER, None); + + torrent_repository.get_swarm_metadata(&info_hash); + + let start = Instant::now(); + + for _ in 0..samples { + let torrent_repository_clone = torrent_repository.clone(); + + let handle = runtime.spawn(async move { + torrent_repository_clone.upsert_peer(&info_hash, &DEFAULT_PEER, None); + + torrent_repository_clone.get_swarm_metadata(&info_hash); + + if let Some(sleep_time) = sleep { + let start_time = std::time::Instant::now(); + + while start_time.elapsed().as_nanos() < u128::from(sleep_time) {} + } + }); + + handles.push(handle); + } + + // Await all tasks + futures::future::join_all(handles).await; + + start.elapsed() +} + +// Add ten thousand torrents in parallel (depending on the set worker threads) +pub async fn add_multiple_torrents_in_parallel( + runtime: &tokio::runtime::Runtime, + samples: u64, + sleep: Option, +) -> Duration +where + V: Repository + Default, + Arc: Clone + Send + Sync + 'static, +{ + let torrent_repository = Arc::::default(); + let info_hashes = generate_unique_info_hashes(samples.try_into().expect("it should fit in a usize")); + let handles = FuturesUnordered::new(); + + let start = Instant::now(); + + for info_hash in info_hashes { + let torrent_repository_clone = torrent_repository.clone(); + + let handle = runtime.spawn(async move { + torrent_repository_clone.upsert_peer(&info_hash, &DEFAULT_PEER, None); + + torrent_repository_clone.get_swarm_metadata(&info_hash); + + if let Some(sleep_time) = sleep { + let start_time = std::time::Instant::now(); + + while start_time.elapsed().as_nanos() < u128::from(sleep_time) {} + } + }); + + handles.push(handle); + } + + // Await all tasks + futures::future::join_all(handles).await; + + start.elapsed() +} + +// Update ten thousand torrents in parallel (depending on the set worker threads) +pub async fn update_multiple_torrents_in_parallel( + runtime: &tokio::runtime::Runtime, + samples: u64, + sleep: Option, +) -> Duration +where + V: Repository + Default, + Arc: Clone + Send + Sync + 'static, +{ + let torrent_repository = Arc::::default(); + let info_hashes = generate_unique_info_hashes(samples.try_into().expect("it should fit in usize")); + let handles = FuturesUnordered::new(); + + // Add the torrents/peers to the torrent repository + for info_hash in &info_hashes { + torrent_repository.upsert_peer(info_hash, &DEFAULT_PEER, None); + torrent_repository.get_swarm_metadata(info_hash); + } + + let start = Instant::now(); + + for info_hash in info_hashes { + let torrent_repository_clone = torrent_repository.clone(); + + let handle = runtime.spawn(async move { + torrent_repository_clone.upsert_peer(&info_hash, &DEFAULT_PEER, None); + torrent_repository_clone.get_swarm_metadata(&info_hash); + + if let Some(sleep_time) = sleep { + let start_time = std::time::Instant::now(); + + while start_time.elapsed().as_nanos() < u128::from(sleep_time) {} + } + }); + + handles.push(handle); + } + + // Await all tasks + futures::future::join_all(handles).await; + + start.elapsed() +} diff --git a/packages/torrent-repository-benchmarking/benches/helpers/utils.rs b/packages/torrent-repository-benchmarking/benches/helpers/utils.rs new file mode 100644 index 000000000..51b09ec0f --- /dev/null +++ b/packages/torrent-repository-benchmarking/benches/helpers/utils.rs @@ -0,0 +1,41 @@ +use std::collections::HashSet; +use std::net::{IpAddr, Ipv4Addr, SocketAddr}; + +use aquatic_udp_protocol::{AnnounceEvent, NumberOfBytes, PeerId}; +use bittorrent_primitives::info_hash::InfoHash; +use torrust_tracker_primitives::peer::Peer; +use torrust_tracker_primitives::DurationSinceUnixEpoch; +use zerocopy::I64; + +pub const DEFAULT_PEER: Peer = Peer { + peer_id: PeerId([0; 20]), + peer_addr: SocketAddr::new(IpAddr::V4(Ipv4Addr::new(127, 0, 0, 1)), 8080), + updated: DurationSinceUnixEpoch::from_secs(0), + uploaded: NumberOfBytes(I64::ZERO), + downloaded: NumberOfBytes(I64::ZERO), + left: NumberOfBytes(I64::ZERO), + event: AnnounceEvent::Started, +}; + +#[must_use] +#[allow(clippy::missing_panics_doc)] +pub fn generate_unique_info_hashes(size: usize) -> Vec { + let mut result = HashSet::new(); + + let mut bytes = [0u8; 20]; + + #[allow(clippy::cast_possible_truncation)] + for i in 0..size { + bytes[0] = (i & 0xFF) as u8; + bytes[1] = ((i >> 8) & 0xFF) as u8; + bytes[2] = ((i >> 16) & 0xFF) as u8; + bytes[3] = ((i >> 24) & 0xFF) as u8; + + let info_hash = InfoHash::from_bytes(&bytes); + result.insert(info_hash); + } + + assert_eq!(result.len(), size); + + result.into_iter().collect() +} diff --git a/packages/torrent-repository-benchmarking/benches/repository_benchmark.rs b/packages/torrent-repository-benchmarking/benches/repository_benchmark.rs new file mode 100644 index 000000000..a58207492 --- /dev/null +++ b/packages/torrent-repository-benchmarking/benches/repository_benchmark.rs @@ -0,0 +1,270 @@ +use std::time::Duration; + +mod helpers; + +use criterion::{criterion_group, criterion_main, Criterion}; +use torrust_tracker_torrent_repository_benchmarking::{ + TorrentsDashMapMutexStd, TorrentsRwLockStd, TorrentsRwLockStdMutexStd, TorrentsRwLockStdMutexTokio, TorrentsRwLockTokio, + TorrentsRwLockTokioMutexStd, TorrentsRwLockTokioMutexTokio, TorrentsSkipMapMutexParkingLot, TorrentsSkipMapMutexStd, + TorrentsSkipMapRwLockParkingLot, +}; + +use crate::helpers::{asyn, sync}; + +fn add_one_torrent(c: &mut Criterion) { + let rt = tokio::runtime::Builder::new_multi_thread().worker_threads(4).build().unwrap(); + + let mut group = c.benchmark_group("add_one_torrent"); + + group.warm_up_time(Duration::from_millis(500)); + group.measurement_time(Duration::from_millis(1000)); + + group.bench_function("RwLockStd", |b| { + b.iter_custom(sync::add_one_torrent::); + }); + + group.bench_function("RwLockStdMutexStd", |b| { + b.iter_custom(sync::add_one_torrent::); + }); + + group.bench_function("RwLockStdMutexTokio", |b| { + b.to_async(&rt) + .iter_custom(asyn::add_one_torrent::); + }); + + group.bench_function("RwLockTokio", |b| { + b.to_async(&rt).iter_custom(asyn::add_one_torrent::); + }); + + group.bench_function("RwLockTokioMutexStd", |b| { + b.to_async(&rt) + .iter_custom(asyn::add_one_torrent::); + }); + + group.bench_function("RwLockTokioMutexTokio", |b| { + b.to_async(&rt) + .iter_custom(asyn::add_one_torrent::); + }); + + group.bench_function("SkipMapMutexStd", |b| { + b.iter_custom(sync::add_one_torrent::); + }); + + group.bench_function("SkipMapMutexParkingLot", |b| { + b.iter_custom(sync::add_one_torrent::); + }); + + group.bench_function("SkipMapRwLockParkingLot", |b| { + b.iter_custom(sync::add_one_torrent::); + }); + + group.bench_function("DashMapMutexStd", |b| { + b.iter_custom(sync::add_one_torrent::); + }); + + group.finish(); +} + +fn add_multiple_torrents_in_parallel(c: &mut Criterion) { + let rt = tokio::runtime::Builder::new_multi_thread().worker_threads(4).build().unwrap(); + + let mut group = c.benchmark_group("add_multiple_torrents_in_parallel"); + + //group.sampling_mode(criterion::SamplingMode::Flat); + //group.sample_size(10); + + group.warm_up_time(Duration::from_millis(500)); + group.measurement_time(Duration::from_millis(1000)); + + group.bench_function("RwLockStd", |b| { + b.to_async(&rt) + .iter_custom(|iters| sync::add_multiple_torrents_in_parallel::(&rt, iters, None)); + }); + + group.bench_function("RwLockStdMutexStd", |b| { + b.to_async(&rt) + .iter_custom(|iters| sync::add_multiple_torrents_in_parallel::(&rt, iters, None)); + }); + + group.bench_function("RwLockStdMutexTokio", |b| { + b.to_async(&rt) + .iter_custom(|iters| asyn::add_multiple_torrents_in_parallel::(&rt, iters, None)); + }); + + group.bench_function("RwLockTokio", |b| { + b.to_async(&rt) + .iter_custom(|iters| asyn::add_multiple_torrents_in_parallel::(&rt, iters, None)); + }); + + group.bench_function("RwLockTokioMutexStd", |b| { + b.to_async(&rt) + .iter_custom(|iters| asyn::add_multiple_torrents_in_parallel::(&rt, iters, None)); + }); + + group.bench_function("RwLockTokioMutexTokio", |b| { + b.to_async(&rt) + .iter_custom(|iters| asyn::add_multiple_torrents_in_parallel::(&rt, iters, None)); + }); + + group.bench_function("SkipMapMutexStd", |b| { + b.to_async(&rt) + .iter_custom(|iters| sync::add_multiple_torrents_in_parallel::(&rt, iters, None)); + }); + + group.bench_function("SkipMapMutexParkingLot", |b| { + b.to_async(&rt) + .iter_custom(|iters| sync::add_multiple_torrents_in_parallel::(&rt, iters, None)); + }); + + group.bench_function("SkipMapRwLockParkingLot", |b| { + b.to_async(&rt) + .iter_custom(|iters| sync::add_multiple_torrents_in_parallel::(&rt, iters, None)); + }); + + group.bench_function("DashMapMutexStd", |b| { + b.to_async(&rt) + .iter_custom(|iters| sync::add_multiple_torrents_in_parallel::(&rt, iters, None)); + }); + + group.finish(); +} + +fn update_one_torrent_in_parallel(c: &mut Criterion) { + let rt = tokio::runtime::Builder::new_multi_thread().worker_threads(4).build().unwrap(); + + let mut group = c.benchmark_group("update_one_torrent_in_parallel"); + + //group.sampling_mode(criterion::SamplingMode::Flat); + //group.sample_size(10); + + group.warm_up_time(Duration::from_millis(500)); + group.measurement_time(Duration::from_millis(1000)); + + group.bench_function("RwLockStd", |b| { + b.to_async(&rt) + .iter_custom(|iters| sync::update_one_torrent_in_parallel::(&rt, iters, None)); + }); + + group.bench_function("RwLockStdMutexStd", |b| { + b.to_async(&rt) + .iter_custom(|iters| sync::update_one_torrent_in_parallel::(&rt, iters, None)); + }); + + group.bench_function("RwLockStdMutexTokio", |b| { + b.to_async(&rt) + .iter_custom(|iters| asyn::update_one_torrent_in_parallel::(&rt, iters, None)); + }); + + group.bench_function("RwLockTokio", |b| { + b.to_async(&rt) + .iter_custom(|iters| asyn::update_one_torrent_in_parallel::(&rt, iters, None)); + }); + + group.bench_function("RwLockTokioMutexStd", |b| { + b.to_async(&rt) + .iter_custom(|iters| asyn::update_one_torrent_in_parallel::(&rt, iters, None)); + }); + + group.bench_function("RwLockTokioMutexTokio", |b| { + b.to_async(&rt) + .iter_custom(|iters| asyn::update_one_torrent_in_parallel::(&rt, iters, None)); + }); + + group.bench_function("SkipMapMutexStd", |b| { + b.to_async(&rt) + .iter_custom(|iters| sync::update_one_torrent_in_parallel::(&rt, iters, None)); + }); + + group.bench_function("SkipMapMutexParkingLot", |b| { + b.to_async(&rt) + .iter_custom(|iters| sync::update_one_torrent_in_parallel::(&rt, iters, None)); + }); + + group.bench_function("SkipMapRwLockParkingLot", |b| { + b.to_async(&rt) + .iter_custom(|iters| sync::update_one_torrent_in_parallel::(&rt, iters, None)); + }); + + group.bench_function("DashMapMutexStd", |b| { + b.to_async(&rt) + .iter_custom(|iters| sync::update_one_torrent_in_parallel::(&rt, iters, None)); + }); + + group.finish(); +} + +fn update_multiple_torrents_in_parallel(c: &mut Criterion) { + let rt = tokio::runtime::Builder::new_multi_thread().worker_threads(4).build().unwrap(); + + let mut group = c.benchmark_group("update_multiple_torrents_in_parallel"); + + //group.sampling_mode(criterion::SamplingMode::Flat); + //group.sample_size(10); + + group.warm_up_time(Duration::from_millis(500)); + group.measurement_time(Duration::from_millis(1000)); + + group.bench_function("RwLockStd", |b| { + b.to_async(&rt) + .iter_custom(|iters| sync::update_multiple_torrents_in_parallel::(&rt, iters, None)); + }); + + group.bench_function("RwLockStdMutexStd", |b| { + b.to_async(&rt) + .iter_custom(|iters| sync::update_multiple_torrents_in_parallel::(&rt, iters, None)); + }); + + group.bench_function("RwLockStdMutexTokio", |b| { + b.to_async(&rt) + .iter_custom(|iters| asyn::update_multiple_torrents_in_parallel::(&rt, iters, None)); + }); + + group.bench_function("RwLockTokio", |b| { + b.to_async(&rt) + .iter_custom(|iters| asyn::update_multiple_torrents_in_parallel::(&rt, iters, None)); + }); + + group.bench_function("RwLockTokioMutexStd", |b| { + b.to_async(&rt) + .iter_custom(|iters| asyn::update_multiple_torrents_in_parallel::(&rt, iters, None)); + }); + + group.bench_function("RwLockTokioMutexTokio", |b| { + b.to_async(&rt).iter_custom(|iters| { + asyn::update_multiple_torrents_in_parallel::(&rt, iters, None) + }); + }); + + group.bench_function("SkipMapMutexStd", |b| { + b.to_async(&rt) + .iter_custom(|iters| sync::update_multiple_torrents_in_parallel::(&rt, iters, None)); + }); + + group.bench_function("SkipMapMutexParkingLot", |b| { + b.to_async(&rt).iter_custom(|iters| { + sync::update_multiple_torrents_in_parallel::(&rt, iters, None) + }); + }); + + group.bench_function("SkipMapRwLockParkingLot", |b| { + b.to_async(&rt).iter_custom(|iters| { + sync::update_multiple_torrents_in_parallel::(&rt, iters, None) + }); + }); + + group.bench_function("DashMapMutexStd", |b| { + b.to_async(&rt) + .iter_custom(|iters| sync::update_multiple_torrents_in_parallel::(&rt, iters, None)); + }); + + group.finish(); +} + +criterion_group!( + benches, + add_one_torrent, + add_multiple_torrents_in_parallel, + update_one_torrent_in_parallel, + update_multiple_torrents_in_parallel +); +criterion_main!(benches); diff --git a/packages/torrent-repository-benchmarking/src/entry/mod.rs b/packages/torrent-repository-benchmarking/src/entry/mod.rs new file mode 100644 index 000000000..b920839d9 --- /dev/null +++ b/packages/torrent-repository-benchmarking/src/entry/mod.rs @@ -0,0 +1,92 @@ +use std::fmt::Debug; +use std::net::SocketAddr; +use std::sync::Arc; + +use torrust_tracker_configuration::TrackerPolicy; +use torrust_tracker_primitives::swarm_metadata::SwarmMetadata; +use torrust_tracker_primitives::{peer, DurationSinceUnixEpoch}; + +use self::peer_list::PeerList; + +pub mod mutex_parking_lot; +pub mod mutex_std; +pub mod mutex_tokio; +pub mod peer_list; +pub mod rw_lock_parking_lot; +pub mod single; + +pub trait Entry { + /// It returns the swarm metadata (statistics) as a struct: + /// + /// `(seeders, completed, leechers)` + fn get_swarm_metadata(&self) -> SwarmMetadata; + + /// Returns True if Still a Valid Entry according to the Tracker Policy + fn meets_retaining_policy(&self, policy: &TrackerPolicy) -> bool; + + /// Returns True if the Peers is Empty + fn peers_is_empty(&self) -> bool; + + /// Returns the number of Peers + fn get_peers_len(&self) -> usize; + + /// Get all swarm peers, optionally limiting the result. + fn get_peers(&self, limit: Option) -> Vec>; + + /// It returns the list of peers for a given peer client, optionally limiting the + /// result. + /// + /// It filters out the input peer, typically because we want to return this + /// list of peers to that client peer. + fn get_peers_for_client(&self, client: &SocketAddr, limit: Option) -> Vec>; + + /// It updates a peer and returns true if the number of complete downloads have increased. + /// + /// The number of peers that have complete downloading is synchronously updated when peers are updated. + /// That's the total torrent downloads counter. + fn upsert_peer(&mut self, peer: &peer::Peer) -> bool; + + /// It removes peer from the swarm that have not been updated for more than `current_cutoff` seconds + fn remove_inactive_peers(&mut self, current_cutoff: DurationSinceUnixEpoch); +} + +#[allow(clippy::module_name_repetitions)] +pub trait EntrySync { + fn get_swarm_metadata(&self) -> SwarmMetadata; + fn meets_retaining_policy(&self, policy: &TrackerPolicy) -> bool; + fn peers_is_empty(&self) -> bool; + fn get_peers_len(&self) -> usize; + fn get_peers(&self, limit: Option) -> Vec>; + fn get_peers_for_client(&self, client: &SocketAddr, limit: Option) -> Vec>; + fn upsert_peer(&self, peer: &peer::Peer) -> bool; + fn remove_inactive_peers(&self, current_cutoff: DurationSinceUnixEpoch); +} + +#[allow(clippy::module_name_repetitions)] +pub trait EntryAsync { + fn get_swarm_metadata(&self) -> impl std::future::Future + Send; + fn meets_retaining_policy(self, policy: &TrackerPolicy) -> impl std::future::Future + Send; + fn peers_is_empty(&self) -> impl std::future::Future + Send; + fn get_peers_len(&self) -> impl std::future::Future + Send; + fn get_peers(&self, limit: Option) -> impl std::future::Future>> + Send; + fn get_peers_for_client( + &self, + client: &SocketAddr, + limit: Option, + ) -> impl std::future::Future>> + Send; + fn upsert_peer(self, peer: &peer::Peer) -> impl std::future::Future + Send; + fn remove_inactive_peers(self, current_cutoff: DurationSinceUnixEpoch) -> impl std::future::Future + Send; +} + +/// A data structure containing all the information about a torrent in the tracker. +/// +/// This is the tracker entry for a given torrent and contains the swarm data, +/// that's the list of all the peers trying to download the same torrent. +/// The tracker keeps one entry like this for every torrent. +#[derive(Clone, Debug, Default, PartialEq, Eq, PartialOrd, Ord, Hash)] +pub struct Torrent { + /// A network of peers that are all trying to download the torrent associated to this entry + pub(crate) swarm: PeerList, + /// The number of peers that have ever completed downloading the torrent associated to this entry + pub(crate) downloaded: u32, +} diff --git a/packages/torrent-repository-benchmarking/src/entry/mutex_parking_lot.rs b/packages/torrent-repository-benchmarking/src/entry/mutex_parking_lot.rs new file mode 100644 index 000000000..738c3ff9d --- /dev/null +++ b/packages/torrent-repository-benchmarking/src/entry/mutex_parking_lot.rs @@ -0,0 +1,49 @@ +use std::net::SocketAddr; +use std::sync::Arc; + +use torrust_tracker_configuration::TrackerPolicy; +use torrust_tracker_primitives::swarm_metadata::SwarmMetadata; +use torrust_tracker_primitives::{peer, DurationSinceUnixEpoch}; + +use super::{Entry, EntrySync}; +use crate::{EntryMutexParkingLot, EntrySingle}; + +impl EntrySync for EntryMutexParkingLot { + fn get_swarm_metadata(&self) -> SwarmMetadata { + self.lock().get_swarm_metadata() + } + + fn meets_retaining_policy(&self, policy: &TrackerPolicy) -> bool { + self.lock().meets_retaining_policy(policy) + } + + fn peers_is_empty(&self) -> bool { + self.lock().peers_is_empty() + } + + fn get_peers_len(&self) -> usize { + self.lock().get_peers_len() + } + + fn get_peers(&self, limit: Option) -> Vec> { + self.lock().get_peers(limit) + } + + fn get_peers_for_client(&self, client: &SocketAddr, limit: Option) -> Vec> { + self.lock().get_peers_for_client(client, limit) + } + + fn upsert_peer(&self, peer: &peer::Peer) -> bool { + self.lock().upsert_peer(peer) + } + + fn remove_inactive_peers(&self, current_cutoff: DurationSinceUnixEpoch) { + self.lock().remove_inactive_peers(current_cutoff); + } +} + +impl From for EntryMutexParkingLot { + fn from(entry: EntrySingle) -> Self { + Arc::new(parking_lot::Mutex::new(entry)) + } +} diff --git a/packages/torrent-repository-benchmarking/src/entry/mutex_std.rs b/packages/torrent-repository-benchmarking/src/entry/mutex_std.rs new file mode 100644 index 000000000..0ab70a96f --- /dev/null +++ b/packages/torrent-repository-benchmarking/src/entry/mutex_std.rs @@ -0,0 +1,51 @@ +use std::net::SocketAddr; +use std::sync::Arc; + +use torrust_tracker_configuration::TrackerPolicy; +use torrust_tracker_primitives::swarm_metadata::SwarmMetadata; +use torrust_tracker_primitives::{peer, DurationSinceUnixEpoch}; + +use super::{Entry, EntrySync}; +use crate::{EntryMutexStd, EntrySingle}; + +impl EntrySync for EntryMutexStd { + fn get_swarm_metadata(&self) -> SwarmMetadata { + self.lock().expect("it should get a lock").get_swarm_metadata() + } + + fn meets_retaining_policy(&self, policy: &TrackerPolicy) -> bool { + self.lock().expect("it should get a lock").meets_retaining_policy(policy) + } + + fn peers_is_empty(&self) -> bool { + self.lock().expect("it should get a lock").peers_is_empty() + } + + fn get_peers_len(&self) -> usize { + self.lock().expect("it should get a lock").get_peers_len() + } + + fn get_peers(&self, limit: Option) -> Vec> { + self.lock().expect("it should get lock").get_peers(limit) + } + + fn get_peers_for_client(&self, client: &SocketAddr, limit: Option) -> Vec> { + self.lock().expect("it should get lock").get_peers_for_client(client, limit) + } + + fn upsert_peer(&self, peer: &peer::Peer) -> bool { + self.lock().expect("it should lock the entry").upsert_peer(peer) + } + + fn remove_inactive_peers(&self, current_cutoff: DurationSinceUnixEpoch) { + self.lock() + .expect("it should lock the entry") + .remove_inactive_peers(current_cutoff); + } +} + +impl From for EntryMutexStd { + fn from(entry: EntrySingle) -> Self { + Arc::new(std::sync::Mutex::new(entry)) + } +} diff --git a/packages/torrent-repository-benchmarking/src/entry/mutex_tokio.rs b/packages/torrent-repository-benchmarking/src/entry/mutex_tokio.rs new file mode 100644 index 000000000..6db789a72 --- /dev/null +++ b/packages/torrent-repository-benchmarking/src/entry/mutex_tokio.rs @@ -0,0 +1,49 @@ +use std::net::SocketAddr; +use std::sync::Arc; + +use torrust_tracker_configuration::TrackerPolicy; +use torrust_tracker_primitives::swarm_metadata::SwarmMetadata; +use torrust_tracker_primitives::{peer, DurationSinceUnixEpoch}; + +use super::{Entry, EntryAsync}; +use crate::{EntryMutexTokio, EntrySingle}; + +impl EntryAsync for EntryMutexTokio { + async fn get_swarm_metadata(&self) -> SwarmMetadata { + self.lock().await.get_swarm_metadata() + } + + async fn meets_retaining_policy(self, policy: &TrackerPolicy) -> bool { + self.lock().await.meets_retaining_policy(policy) + } + + async fn peers_is_empty(&self) -> bool { + self.lock().await.peers_is_empty() + } + + async fn get_peers_len(&self) -> usize { + self.lock().await.get_peers_len() + } + + async fn get_peers(&self, limit: Option) -> Vec> { + self.lock().await.get_peers(limit) + } + + async fn get_peers_for_client(&self, client: &SocketAddr, limit: Option) -> Vec> { + self.lock().await.get_peers_for_client(client, limit) + } + + async fn upsert_peer(self, peer: &peer::Peer) -> bool { + self.lock().await.upsert_peer(peer) + } + + async fn remove_inactive_peers(self, current_cutoff: DurationSinceUnixEpoch) { + self.lock().await.remove_inactive_peers(current_cutoff); + } +} + +impl From for EntryMutexTokio { + fn from(entry: EntrySingle) -> Self { + Arc::new(tokio::sync::Mutex::new(entry)) + } +} diff --git a/packages/torrent-repository-benchmarking/src/entry/peer_list.rs b/packages/torrent-repository-benchmarking/src/entry/peer_list.rs new file mode 100644 index 000000000..33270cf27 --- /dev/null +++ b/packages/torrent-repository-benchmarking/src/entry/peer_list.rs @@ -0,0 +1,286 @@ +//! A peer list. +use std::net::SocketAddr; +use std::sync::Arc; + +use aquatic_udp_protocol::PeerId; +use torrust_tracker_primitives::{peer, DurationSinceUnixEpoch}; + +// code-review: the current implementation uses the peer Id as the ``BTreeMap`` +// key. That would allow adding two identical peers except for the Id. +// For example, two peers with the same socket address but a different peer Id +// would be allowed. That would lead to duplicated peers in the tracker responses. + +#[derive(Clone, Debug, Default, PartialEq, Eq, PartialOrd, Ord, Hash)] +pub struct PeerList { + peers: std::collections::BTreeMap>, +} + +impl PeerList { + #[must_use] + pub fn len(&self) -> usize { + self.peers.len() + } + + #[must_use] + pub fn is_empty(&self) -> bool { + self.peers.is_empty() + } + + pub fn upsert(&mut self, value: Arc) -> Option> { + self.peers.insert(value.peer_id, value) + } + + pub fn remove(&mut self, key: &PeerId) -> Option> { + self.peers.remove(key) + } + + pub fn remove_inactive_peers(&mut self, current_cutoff: DurationSinceUnixEpoch) { + self.peers + .retain(|_, peer| peer::ReadInfo::get_updated(peer) > current_cutoff); + } + + #[must_use] + pub fn get(&self, peer_id: &PeerId) -> Option<&Arc> { + self.peers.get(peer_id) + } + + #[must_use] + pub fn get_all(&self, limit: Option) -> Vec> { + match limit { + Some(limit) => self.peers.values().take(limit).cloned().collect(), + None => self.peers.values().cloned().collect(), + } + } + + #[must_use] + pub fn seeders_and_leechers(&self) -> (usize, usize) { + let seeders = self.peers.values().filter(|peer| peer.is_seeder()).count(); + let leechers = self.len() - seeders; + + (seeders, leechers) + } + + #[must_use] + pub fn get_peers_excluding_addr(&self, peer_addr: &SocketAddr, limit: Option) -> Vec> { + match limit { + Some(limit) => self + .peers + .values() + // Take peers which are not the client peer + .filter(|peer| peer::ReadInfo::get_address(peer.as_ref()) != *peer_addr) + // Limit the number of peers on the result + .take(limit) + .cloned() + .collect(), + None => self + .peers + .values() + // Take peers which are not the client peer + .filter(|peer| peer::ReadInfo::get_address(peer.as_ref()) != *peer_addr) + .cloned() + .collect(), + } + } +} + +#[cfg(test)] +mod tests { + + mod it_should { + use std::net::{IpAddr, Ipv4Addr, SocketAddr}; + use std::sync::Arc; + + use aquatic_udp_protocol::PeerId; + use torrust_tracker_primitives::peer::fixture::PeerBuilder; + use torrust_tracker_primitives::DurationSinceUnixEpoch; + + use crate::entry::peer_list::PeerList; + + #[test] + fn be_empty_when_no_peers_have_been_inserted() { + let peer_list = PeerList::default(); + + assert!(peer_list.is_empty()); + } + + #[test] + fn have_zero_length_when_no_peers_have_been_inserted() { + let peer_list = PeerList::default(); + + assert_eq!(peer_list.len(), 0); + } + + #[test] + fn allow_inserting_a_new_peer() { + let mut peer_list = PeerList::default(); + + let peer = PeerBuilder::default().build(); + + assert_eq!(peer_list.upsert(peer.into()), None); + } + + #[test] + fn allow_updating_a_preexisting_peer() { + let mut peer_list = PeerList::default(); + + let peer = PeerBuilder::default().build(); + + peer_list.upsert(peer.into()); + + assert_eq!(peer_list.upsert(peer.into()), Some(Arc::new(peer))); + } + + #[test] + fn allow_getting_all_peers() { + let mut peer_list = PeerList::default(); + + let peer = PeerBuilder::default().build(); + + peer_list.upsert(peer.into()); + + assert_eq!(peer_list.get_all(None), [Arc::new(peer)]); + } + + #[test] + fn allow_getting_one_peer_by_id() { + let mut peer_list = PeerList::default(); + + let peer = PeerBuilder::default().build(); + + peer_list.upsert(peer.into()); + + assert_eq!(peer_list.get(&peer.peer_id), Some(Arc::new(peer)).as_ref()); + } + + #[test] + fn increase_the_number_of_peers_after_inserting_a_new_one() { + let mut peer_list = PeerList::default(); + + let peer = PeerBuilder::default().build(); + + peer_list.upsert(peer.into()); + + assert_eq!(peer_list.len(), 1); + } + + #[test] + fn decrease_the_number_of_peers_after_removing_one() { + let mut peer_list = PeerList::default(); + + let peer = PeerBuilder::default().build(); + + peer_list.upsert(peer.into()); + + peer_list.remove(&peer.peer_id); + + assert!(peer_list.is_empty()); + } + + #[test] + fn allow_removing_an_existing_peer() { + let mut peer_list = PeerList::default(); + + let peer = PeerBuilder::default().build(); + + peer_list.upsert(peer.into()); + + peer_list.remove(&peer.peer_id); + + assert_eq!(peer_list.get(&peer.peer_id), None); + } + + #[test] + fn allow_getting_all_peers_excluding_peers_with_a_given_address() { + let mut peer_list = PeerList::default(); + + let peer1 = PeerBuilder::default() + .with_peer_id(&PeerId(*b"-qB00000000000000001")) + .with_peer_addr(&SocketAddr::new(IpAddr::V4(Ipv4Addr::new(127, 0, 0, 1)), 6969)) + .build(); + peer_list.upsert(peer1.into()); + + let peer2 = PeerBuilder::default() + .with_peer_id(&PeerId(*b"-qB00000000000000002")) + .with_peer_addr(&SocketAddr::new(IpAddr::V4(Ipv4Addr::new(127, 0, 0, 2)), 6969)) + .build(); + peer_list.upsert(peer2.into()); + + assert_eq!(peer_list.get_peers_excluding_addr(&peer2.peer_addr, None), [Arc::new(peer1)]); + } + + #[test] + fn return_the_number_of_seeders_in_the_list() { + let mut peer_list = PeerList::default(); + + let seeder = PeerBuilder::seeder().build(); + let leecher = PeerBuilder::leecher().build(); + + peer_list.upsert(seeder.into()); + peer_list.upsert(leecher.into()); + + let (seeders, _leechers) = peer_list.seeders_and_leechers(); + + assert_eq!(seeders, 1); + } + + #[test] + fn return_the_number_of_leechers_in_the_list() { + let mut peer_list = PeerList::default(); + + let seeder = PeerBuilder::seeder().build(); + let leecher = PeerBuilder::leecher().build(); + + peer_list.upsert(seeder.into()); + peer_list.upsert(leecher.into()); + + let (_seeders, leechers) = peer_list.seeders_and_leechers(); + + assert_eq!(leechers, 1); + } + + #[test] + fn remove_inactive_peers() { + let mut peer_list = PeerList::default(); + let one_second = DurationSinceUnixEpoch::new(1, 0); + + // Insert the peer + let last_update_time = DurationSinceUnixEpoch::new(1_669_397_478_934, 0); + let peer = PeerBuilder::default().last_updated_on(last_update_time).build(); + peer_list.upsert(peer.into()); + + // Remove peers not updated since one second after inserting the peer + peer_list.remove_inactive_peers(last_update_time + one_second); + + assert_eq!(peer_list.len(), 0); + } + + #[test] + fn not_remove_active_peers() { + let mut peer_list = PeerList::default(); + let one_second = DurationSinceUnixEpoch::new(1, 0); + + // Insert the peer + let last_update_time = DurationSinceUnixEpoch::new(1_669_397_478_934, 0); + let peer = PeerBuilder::default().last_updated_on(last_update_time).build(); + peer_list.upsert(peer.into()); + + // Remove peers not updated since one second before inserting the peer. + peer_list.remove_inactive_peers(last_update_time - one_second); + + assert_eq!(peer_list.len(), 1); + } + + #[test] + fn allow_inserting_two_identical_peers_except_for_the_id() { + let mut peer_list = PeerList::default(); + + let peer1 = PeerBuilder::default().with_peer_id(&PeerId(*b"-qB00000000000000001")).build(); + peer_list.upsert(peer1.into()); + + let peer2 = PeerBuilder::default().with_peer_id(&PeerId(*b"-qB00000000000000002")).build(); + peer_list.upsert(peer2.into()); + + assert_eq!(peer_list.len(), 2); + } + } +} diff --git a/packages/torrent-repository-benchmarking/src/entry/rw_lock_parking_lot.rs b/packages/torrent-repository-benchmarking/src/entry/rw_lock_parking_lot.rs new file mode 100644 index 000000000..ac0dc0b30 --- /dev/null +++ b/packages/torrent-repository-benchmarking/src/entry/rw_lock_parking_lot.rs @@ -0,0 +1,49 @@ +use std::net::SocketAddr; +use std::sync::Arc; + +use torrust_tracker_configuration::TrackerPolicy; +use torrust_tracker_primitives::swarm_metadata::SwarmMetadata; +use torrust_tracker_primitives::{peer, DurationSinceUnixEpoch}; + +use super::{Entry, EntrySync}; +use crate::{EntryRwLockParkingLot, EntrySingle}; + +impl EntrySync for EntryRwLockParkingLot { + fn get_swarm_metadata(&self) -> SwarmMetadata { + self.read().get_swarm_metadata() + } + + fn meets_retaining_policy(&self, policy: &TrackerPolicy) -> bool { + self.read().meets_retaining_policy(policy) + } + + fn peers_is_empty(&self) -> bool { + self.read().peers_is_empty() + } + + fn get_peers_len(&self) -> usize { + self.read().get_peers_len() + } + + fn get_peers(&self, limit: Option) -> Vec> { + self.read().get_peers(limit) + } + + fn get_peers_for_client(&self, client: &SocketAddr, limit: Option) -> Vec> { + self.read().get_peers_for_client(client, limit) + } + + fn upsert_peer(&self, peer: &peer::Peer) -> bool { + self.write().upsert_peer(peer) + } + + fn remove_inactive_peers(&self, current_cutoff: DurationSinceUnixEpoch) { + self.write().remove_inactive_peers(current_cutoff); + } +} + +impl From for EntryRwLockParkingLot { + fn from(entry: EntrySingle) -> Self { + Arc::new(parking_lot::RwLock::new(entry)) + } +} diff --git a/packages/torrent-repository-benchmarking/src/entry/single.rs b/packages/torrent-repository-benchmarking/src/entry/single.rs new file mode 100644 index 000000000..0f922bd02 --- /dev/null +++ b/packages/torrent-repository-benchmarking/src/entry/single.rs @@ -0,0 +1,81 @@ +use std::net::SocketAddr; +use std::sync::Arc; + +use aquatic_udp_protocol::AnnounceEvent; +use torrust_tracker_configuration::TrackerPolicy; +use torrust_tracker_primitives::peer::{self}; +use torrust_tracker_primitives::swarm_metadata::SwarmMetadata; +use torrust_tracker_primitives::DurationSinceUnixEpoch; + +use super::Entry; +use crate::EntrySingle; + +impl Entry for EntrySingle { + #[allow(clippy::cast_possible_truncation)] + fn get_swarm_metadata(&self) -> SwarmMetadata { + let (seeders, leechers) = self.swarm.seeders_and_leechers(); + + SwarmMetadata { + downloaded: self.downloaded, + complete: seeders as u32, + incomplete: leechers as u32, + } + } + + fn meets_retaining_policy(&self, policy: &TrackerPolicy) -> bool { + if policy.persistent_torrent_completed_stat && self.downloaded > 0 { + return true; + } + + if policy.remove_peerless_torrents && self.swarm.is_empty() { + return false; + } + + true + } + + fn peers_is_empty(&self) -> bool { + self.swarm.is_empty() + } + + fn get_peers_len(&self) -> usize { + self.swarm.len() + } + + fn get_peers(&self, limit: Option) -> Vec> { + self.swarm.get_all(limit) + } + + fn get_peers_for_client(&self, client: &SocketAddr, limit: Option) -> Vec> { + self.swarm.get_peers_excluding_addr(client, limit) + } + + fn upsert_peer(&mut self, peer: &peer::Peer) -> bool { + let mut number_of_downloads_increased: bool = false; + + match peer::ReadInfo::get_event(peer) { + AnnounceEvent::Stopped => { + drop(self.swarm.remove(&peer::ReadInfo::get_id(peer))); + } + AnnounceEvent::Completed => { + let previous = self.swarm.upsert(Arc::new(*peer)); + // Don't count if peer was not previously known and not already completed. + if previous.is_some_and(|p| p.event != AnnounceEvent::Completed) { + self.downloaded += 1; + number_of_downloads_increased = true; + } + } + _ => { + // `Started` event (first announced event) or + // `None` event (announcements done at regular intervals). + drop(self.swarm.upsert(Arc::new(*peer))); + } + } + + number_of_downloads_increased + } + + fn remove_inactive_peers(&mut self, current_cutoff: DurationSinceUnixEpoch) { + self.swarm.remove_inactive_peers(current_cutoff); + } +} diff --git a/packages/torrent-repository-benchmarking/src/lib.rs b/packages/torrent-repository-benchmarking/src/lib.rs new file mode 100644 index 000000000..a8955808e --- /dev/null +++ b/packages/torrent-repository-benchmarking/src/lib.rs @@ -0,0 +1,44 @@ +use std::sync::Arc; + +use repository::dash_map_mutex_std::XacrimonDashMap; +use repository::rw_lock_std::RwLockStd; +use repository::rw_lock_tokio::RwLockTokio; +use repository::skip_map_mutex_std::CrossbeamSkipList; +use torrust_tracker_clock::clock; + +pub mod entry; +pub mod repository; + +// Repo Entries + +pub type EntrySingle = entry::Torrent; +pub type EntryMutexStd = Arc>; +pub type EntryMutexTokio = Arc>; +pub type EntryMutexParkingLot = Arc>; +pub type EntryRwLockParkingLot = Arc>; + +// Repos + +pub type TorrentsRwLockStd = RwLockStd; +pub type TorrentsRwLockStdMutexStd = RwLockStd; +pub type TorrentsRwLockStdMutexTokio = RwLockStd; +pub type TorrentsRwLockTokio = RwLockTokio; +pub type TorrentsRwLockTokioMutexStd = RwLockTokio; +pub type TorrentsRwLockTokioMutexTokio = RwLockTokio; + +pub type TorrentsSkipMapMutexStd = CrossbeamSkipList; +pub type TorrentsSkipMapMutexParkingLot = CrossbeamSkipList; +pub type TorrentsSkipMapRwLockParkingLot = CrossbeamSkipList; + +pub type TorrentsDashMapMutexStd = XacrimonDashMap; + +/// This code needs to be copied into each crate. +/// Working version, for production. +#[cfg(not(test))] +#[allow(dead_code)] +pub(crate) type CurrentClock = clock::Working; + +/// Stopped version, for testing. +#[cfg(test)] +#[allow(dead_code)] +pub(crate) type CurrentClock = clock::Stopped; diff --git a/packages/torrent-repository-benchmarking/src/repository/dash_map_mutex_std.rs b/packages/torrent-repository-benchmarking/src/repository/dash_map_mutex_std.rs new file mode 100644 index 000000000..d4a84caa0 --- /dev/null +++ b/packages/torrent-repository-benchmarking/src/repository/dash_map_mutex_std.rs @@ -0,0 +1,111 @@ +use std::sync::Arc; + +use bittorrent_primitives::info_hash::InfoHash; +use dashmap::DashMap; +use torrust_tracker_configuration::TrackerPolicy; +use torrust_tracker_primitives::pagination::Pagination; +use torrust_tracker_primitives::swarm_metadata::{AggregateSwarmMetadata, SwarmMetadata}; +use torrust_tracker_primitives::{peer, DurationSinceUnixEpoch, PersistentTorrent, PersistentTorrents}; + +use super::Repository; +use crate::entry::peer_list::PeerList; +use crate::entry::{Entry, EntrySync}; +use crate::{EntryMutexStd, EntrySingle}; + +#[derive(Default, Debug)] +pub struct XacrimonDashMap { + pub torrents: DashMap, +} + +impl Repository for XacrimonDashMap +where + EntryMutexStd: EntrySync, + EntrySingle: Entry, +{ + fn upsert_peer(&self, info_hash: &InfoHash, peer: &peer::Peer, _opt_persistent_torrent: Option) -> bool { + // todo: load persistent torrent data if provided + + if let Some(entry) = self.torrents.get(info_hash) { + entry.upsert_peer(peer) + } else { + let _unused = self.torrents.insert(*info_hash, Arc::default()); + if let Some(entry) = self.torrents.get(info_hash) { + entry.upsert_peer(peer) + } else { + false + } + } + } + + fn get_swarm_metadata(&self, info_hash: &InfoHash) -> Option { + self.torrents.get(info_hash).map(|entry| entry.value().get_swarm_metadata()) + } + + fn get(&self, key: &InfoHash) -> Option { + let maybe_entry = self.torrents.get(key); + maybe_entry.map(|entry| entry.clone()) + } + + fn get_metrics(&self) -> AggregateSwarmMetadata { + let mut metrics = AggregateSwarmMetadata::default(); + + for entry in &self.torrents { + let stats = entry.value().lock().expect("it should get a lock").get_swarm_metadata(); + metrics.total_complete += u64::from(stats.complete); + metrics.total_downloaded += u64::from(stats.downloaded); + metrics.total_incomplete += u64::from(stats.incomplete); + metrics.total_torrents += 1; + } + + metrics + } + + fn get_paginated(&self, pagination: Option<&Pagination>) -> Vec<(InfoHash, EntryMutexStd)> { + match pagination { + Some(pagination) => self + .torrents + .iter() + .skip(pagination.offset as usize) + .take(pagination.limit as usize) + .map(|entry| (*entry.key(), entry.value().clone())) + .collect(), + None => self + .torrents + .iter() + .map(|entry| (*entry.key(), entry.value().clone())) + .collect(), + } + } + + fn import_persistent(&self, persistent_torrents: &PersistentTorrents) { + for (info_hash, completed) in persistent_torrents { + if self.torrents.contains_key(info_hash) { + continue; + } + + let entry = EntryMutexStd::new( + EntrySingle { + swarm: PeerList::default(), + downloaded: *completed, + } + .into(), + ); + + self.torrents.insert(*info_hash, entry); + } + } + + fn remove(&self, key: &InfoHash) -> Option { + self.torrents.remove(key).map(|(_key, value)| value.clone()) + } + + fn remove_inactive_peers(&self, current_cutoff: DurationSinceUnixEpoch) { + for entry in &self.torrents { + entry.value().remove_inactive_peers(current_cutoff); + } + } + + fn remove_peerless_torrents(&self, policy: &TrackerPolicy) { + self.torrents.retain(|_, entry| entry.meets_retaining_policy(policy)); + } +} diff --git a/packages/torrent-repository-benchmarking/src/repository/mod.rs b/packages/torrent-repository-benchmarking/src/repository/mod.rs new file mode 100644 index 000000000..9284ff6e6 --- /dev/null +++ b/packages/torrent-repository-benchmarking/src/repository/mod.rs @@ -0,0 +1,46 @@ +use bittorrent_primitives::info_hash::InfoHash; +use torrust_tracker_configuration::TrackerPolicy; +use torrust_tracker_primitives::pagination::Pagination; +use torrust_tracker_primitives::swarm_metadata::{AggregateSwarmMetadata, SwarmMetadata}; +use torrust_tracker_primitives::{peer, DurationSinceUnixEpoch, PersistentTorrent, PersistentTorrents}; + +pub mod dash_map_mutex_std; +pub mod rw_lock_std; +pub mod rw_lock_std_mutex_std; +pub mod rw_lock_std_mutex_tokio; +pub mod rw_lock_tokio; +pub mod rw_lock_tokio_mutex_std; +pub mod rw_lock_tokio_mutex_tokio; +pub mod skip_map_mutex_std; + +use std::fmt::Debug; + +pub trait Repository: Debug + Default + Sized + 'static { + fn get(&self, key: &InfoHash) -> Option; + fn get_metrics(&self) -> AggregateSwarmMetadata; + fn get_paginated(&self, pagination: Option<&Pagination>) -> Vec<(InfoHash, T)>; + fn import_persistent(&self, persistent_torrents: &PersistentTorrents); + fn remove(&self, key: &InfoHash) -> Option; + fn remove_inactive_peers(&self, current_cutoff: DurationSinceUnixEpoch); + fn remove_peerless_torrents(&self, policy: &TrackerPolicy); + fn upsert_peer(&self, info_hash: &InfoHash, peer: &peer::Peer, opt_persistent_torrent: Option) -> bool; + fn get_swarm_metadata(&self, info_hash: &InfoHash) -> Option; +} + +#[allow(clippy::module_name_repetitions)] +pub trait RepositoryAsync: Debug + Default + Sized + 'static { + fn get(&self, key: &InfoHash) -> impl std::future::Future> + Send; + fn get_metrics(&self) -> impl std::future::Future + Send; + fn get_paginated(&self, pagination: Option<&Pagination>) -> impl std::future::Future> + Send; + fn import_persistent(&self, persistent_torrents: &PersistentTorrents) -> impl std::future::Future + Send; + fn remove(&self, key: &InfoHash) -> impl std::future::Future> + Send; + fn remove_inactive_peers(&self, current_cutoff: DurationSinceUnixEpoch) -> impl std::future::Future + Send; + fn remove_peerless_torrents(&self, policy: &TrackerPolicy) -> impl std::future::Future + Send; + fn upsert_peer( + &self, + info_hash: &InfoHash, + peer: &peer::Peer, + opt_persistent_torrent: Option, + ) -> impl std::future::Future + Send; + fn get_swarm_metadata(&self, info_hash: &InfoHash) -> impl std::future::Future> + Send; +} diff --git a/packages/torrent-repository-benchmarking/src/repository/rw_lock_std.rs b/packages/torrent-repository-benchmarking/src/repository/rw_lock_std.rs new file mode 100644 index 000000000..d190718af --- /dev/null +++ b/packages/torrent-repository-benchmarking/src/repository/rw_lock_std.rs @@ -0,0 +1,132 @@ +use bittorrent_primitives::info_hash::InfoHash; +use torrust_tracker_configuration::TrackerPolicy; +use torrust_tracker_primitives::pagination::Pagination; +use torrust_tracker_primitives::swarm_metadata::{AggregateSwarmMetadata, SwarmMetadata}; +use torrust_tracker_primitives::{peer, DurationSinceUnixEpoch, PersistentTorrent, PersistentTorrents}; + +use super::Repository; +use crate::entry::peer_list::PeerList; +use crate::entry::Entry; +use crate::{EntrySingle, TorrentsRwLockStd}; + +#[derive(Default, Debug)] +pub struct RwLockStd { + pub(crate) torrents: std::sync::RwLock>, +} + +impl RwLockStd { + /// # Panics + /// + /// Panics if unable to get a lock. + pub fn write( + &self, + ) -> std::sync::RwLockWriteGuard<'_, std::collections::BTreeMap> { + self.torrents.write().expect("it should get lock") + } +} + +impl TorrentsRwLockStd { + fn get_torrents<'a>(&'a self) -> std::sync::RwLockReadGuard<'a, std::collections::BTreeMap> + where + std::collections::BTreeMap: 'a, + { + self.torrents.read().expect("it should get the read lock") + } + + fn get_torrents_mut<'a>(&'a self) -> std::sync::RwLockWriteGuard<'a, std::collections::BTreeMap> + where + std::collections::BTreeMap: 'a, + { + self.torrents.write().expect("it should get the write lock") + } +} + +impl Repository for TorrentsRwLockStd +where + EntrySingle: Entry, +{ + fn upsert_peer(&self, info_hash: &InfoHash, peer: &peer::Peer, _opt_persistent_torrent: Option) -> bool { + // todo: load persistent torrent data if provided + + let mut db = self.get_torrents_mut(); + + let entry = db.entry(*info_hash).or_insert(EntrySingle::default()); + + entry.upsert_peer(peer) + } + + fn get_swarm_metadata(&self, info_hash: &InfoHash) -> Option { + self.get(info_hash).map(|entry| entry.get_swarm_metadata()) + } + + fn get(&self, key: &InfoHash) -> Option { + let db = self.get_torrents(); + db.get(key).cloned() + } + + fn get_metrics(&self) -> AggregateSwarmMetadata { + let mut metrics = AggregateSwarmMetadata::default(); + + for entry in self.get_torrents().values() { + let stats = entry.get_swarm_metadata(); + metrics.total_complete += u64::from(stats.complete); + metrics.total_downloaded += u64::from(stats.downloaded); + metrics.total_incomplete += u64::from(stats.incomplete); + metrics.total_torrents += 1; + } + + metrics + } + + fn get_paginated(&self, pagination: Option<&Pagination>) -> Vec<(InfoHash, EntrySingle)> { + let db = self.get_torrents(); + + match pagination { + Some(pagination) => db + .iter() + .skip(pagination.offset as usize) + .take(pagination.limit as usize) + .map(|(a, b)| (*a, b.clone())) + .collect(), + None => db.iter().map(|(a, b)| (*a, b.clone())).collect(), + } + } + + fn import_persistent(&self, persistent_torrents: &PersistentTorrents) { + let mut torrents = self.get_torrents_mut(); + + for (info_hash, downloaded) in persistent_torrents { + // Skip if torrent entry already exists + if torrents.contains_key(info_hash) { + continue; + } + + let entry = EntrySingle { + swarm: PeerList::default(), + downloaded: *downloaded, + }; + + torrents.insert(*info_hash, entry); + } + } + + fn remove(&self, key: &InfoHash) -> Option { + let mut db = self.get_torrents_mut(); + db.remove(key) + } + + fn remove_inactive_peers(&self, current_cutoff: DurationSinceUnixEpoch) { + let mut db = self.get_torrents_mut(); + let entries = db.values_mut(); + + for entry in entries { + entry.remove_inactive_peers(current_cutoff); + } + } + + fn remove_peerless_torrents(&self, policy: &TrackerPolicy) { + let mut db = self.get_torrents_mut(); + + db.retain(|_, e| e.meets_retaining_policy(policy)); + } +} diff --git a/packages/torrent-repository-benchmarking/src/repository/rw_lock_std_mutex_std.rs b/packages/torrent-repository-benchmarking/src/repository/rw_lock_std_mutex_std.rs new file mode 100644 index 000000000..1764b94e8 --- /dev/null +++ b/packages/torrent-repository-benchmarking/src/repository/rw_lock_std_mutex_std.rs @@ -0,0 +1,130 @@ +use std::sync::Arc; + +use bittorrent_primitives::info_hash::InfoHash; +use torrust_tracker_configuration::TrackerPolicy; +use torrust_tracker_primitives::pagination::Pagination; +use torrust_tracker_primitives::swarm_metadata::{AggregateSwarmMetadata, SwarmMetadata}; +use torrust_tracker_primitives::{peer, DurationSinceUnixEpoch, PersistentTorrent, PersistentTorrents}; + +use super::Repository; +use crate::entry::peer_list::PeerList; +use crate::entry::{Entry, EntrySync}; +use crate::{EntryMutexStd, EntrySingle, TorrentsRwLockStdMutexStd}; + +impl TorrentsRwLockStdMutexStd { + fn get_torrents<'a>(&'a self) -> std::sync::RwLockReadGuard<'a, std::collections::BTreeMap> + where + std::collections::BTreeMap: 'a, + { + self.torrents.read().expect("unable to get torrent list") + } + + fn get_torrents_mut<'a>(&'a self) -> std::sync::RwLockWriteGuard<'a, std::collections::BTreeMap> + where + std::collections::BTreeMap: 'a, + { + self.torrents.write().expect("unable to get writable torrent list") + } +} + +impl Repository for TorrentsRwLockStdMutexStd +where + EntryMutexStd: EntrySync, + EntrySingle: Entry, +{ + fn upsert_peer(&self, info_hash: &InfoHash, peer: &peer::Peer, _opt_persistent_torrent: Option) -> bool { + // todo: load persistent torrent data if provided + + let maybe_entry = self.get_torrents().get(info_hash).cloned(); + + let entry = if let Some(entry) = maybe_entry { + entry + } else { + let mut db = self.get_torrents_mut(); + let entry = db.entry(*info_hash).or_insert(Arc::default()); + entry.clone() + }; + + entry.upsert_peer(peer) + } + + fn get_swarm_metadata(&self, info_hash: &InfoHash) -> Option { + self.get_torrents() + .get(info_hash) + .map(super::super::entry::EntrySync::get_swarm_metadata) + } + + fn get(&self, key: &InfoHash) -> Option { + let db = self.get_torrents(); + db.get(key).cloned() + } + + fn get_metrics(&self) -> AggregateSwarmMetadata { + let mut metrics = AggregateSwarmMetadata::default(); + + for entry in self.get_torrents().values() { + let stats = entry.lock().expect("it should get a lock").get_swarm_metadata(); + metrics.total_complete += u64::from(stats.complete); + metrics.total_downloaded += u64::from(stats.downloaded); + metrics.total_incomplete += u64::from(stats.incomplete); + metrics.total_torrents += 1; + } + + metrics + } + + fn get_paginated(&self, pagination: Option<&Pagination>) -> Vec<(InfoHash, EntryMutexStd)> { + let db = self.get_torrents(); + + match pagination { + Some(pagination) => db + .iter() + .skip(pagination.offset as usize) + .take(pagination.limit as usize) + .map(|(a, b)| (*a, b.clone())) + .collect(), + None => db.iter().map(|(a, b)| (*a, b.clone())).collect(), + } + } + + fn import_persistent(&self, persistent_torrents: &PersistentTorrents) { + let mut torrents = self.get_torrents_mut(); + + for (info_hash, completed) in persistent_torrents { + // Skip if torrent entry already exists + if torrents.contains_key(info_hash) { + continue; + } + + let entry = EntryMutexStd::new( + EntrySingle { + swarm: PeerList::default(), + downloaded: *completed, + } + .into(), + ); + + torrents.insert(*info_hash, entry); + } + } + + fn remove(&self, key: &InfoHash) -> Option { + let mut db = self.get_torrents_mut(); + db.remove(key) + } + + fn remove_inactive_peers(&self, current_cutoff: DurationSinceUnixEpoch) { + let db = self.get_torrents(); + let entries = db.values().cloned(); + + for entry in entries { + entry.remove_inactive_peers(current_cutoff); + } + } + + fn remove_peerless_torrents(&self, policy: &TrackerPolicy) { + let mut db = self.get_torrents_mut(); + + db.retain(|_, e| e.lock().expect("it should lock entry").meets_retaining_policy(policy)); + } +} diff --git a/packages/torrent-repository-benchmarking/src/repository/rw_lock_std_mutex_tokio.rs b/packages/torrent-repository-benchmarking/src/repository/rw_lock_std_mutex_tokio.rs new file mode 100644 index 000000000..116c1ff87 --- /dev/null +++ b/packages/torrent-repository-benchmarking/src/repository/rw_lock_std_mutex_tokio.rs @@ -0,0 +1,167 @@ +use std::iter::zip; +use std::pin::Pin; +use std::sync::Arc; + +use bittorrent_primitives::info_hash::InfoHash; +use futures::future::join_all; +use futures::{Future, FutureExt}; +use torrust_tracker_configuration::TrackerPolicy; +use torrust_tracker_primitives::pagination::Pagination; +use torrust_tracker_primitives::swarm_metadata::{AggregateSwarmMetadata, SwarmMetadata}; +use torrust_tracker_primitives::{peer, DurationSinceUnixEpoch, PersistentTorrent, PersistentTorrents}; + +use super::RepositoryAsync; +use crate::entry::peer_list::PeerList; +use crate::entry::{Entry, EntryAsync}; +use crate::{EntryMutexTokio, EntrySingle, TorrentsRwLockStdMutexTokio}; + +impl TorrentsRwLockStdMutexTokio { + fn get_torrents<'a>(&'a self) -> std::sync::RwLockReadGuard<'a, std::collections::BTreeMap> + where + std::collections::BTreeMap: 'a, + { + self.torrents.read().expect("unable to get torrent list") + } + + fn get_torrents_mut<'a>(&'a self) -> std::sync::RwLockWriteGuard<'a, std::collections::BTreeMap> + where + std::collections::BTreeMap: 'a, + { + self.torrents.write().expect("unable to get writable torrent list") + } +} + +impl RepositoryAsync for TorrentsRwLockStdMutexTokio +where + EntryMutexTokio: EntryAsync, + EntrySingle: Entry, +{ + async fn upsert_peer( + &self, + info_hash: &InfoHash, + peer: &peer::Peer, + _opt_persistent_torrent: Option, + ) -> bool { + // todo: load persistent torrent data if provided + + let maybe_entry = self.get_torrents().get(info_hash).cloned(); + + let entry = if let Some(entry) = maybe_entry { + entry + } else { + let mut db = self.get_torrents_mut(); + let entry = db.entry(*info_hash).or_insert(Arc::default()); + entry.clone() + }; + + entry.upsert_peer(peer).await + } + + async fn get_swarm_metadata(&self, info_hash: &InfoHash) -> Option { + let maybe_entry = self.get_torrents().get(info_hash).cloned(); + + match maybe_entry { + Some(entry) => Some(entry.get_swarm_metadata().await), + None => None, + } + } + + async fn get(&self, key: &InfoHash) -> Option { + let db = self.get_torrents(); + db.get(key).cloned() + } + + async fn get_paginated(&self, pagination: Option<&Pagination>) -> Vec<(InfoHash, EntryMutexTokio)> { + let db = self.get_torrents(); + + match pagination { + Some(pagination) => db + .iter() + .skip(pagination.offset as usize) + .take(pagination.limit as usize) + .map(|(a, b)| (*a, b.clone())) + .collect(), + None => db.iter().map(|(a, b)| (*a, b.clone())).collect(), + } + } + + async fn get_metrics(&self) -> AggregateSwarmMetadata { + let mut metrics = AggregateSwarmMetadata::default(); + + let entries: Vec<_> = self.get_torrents().values().cloned().collect(); + + for entry in entries { + let stats = entry.lock().await.get_swarm_metadata(); + metrics.total_complete += u64::from(stats.complete); + metrics.total_downloaded += u64::from(stats.downloaded); + metrics.total_incomplete += u64::from(stats.incomplete); + metrics.total_torrents += 1; + } + + metrics + } + + async fn import_persistent(&self, persistent_torrents: &PersistentTorrents) { + let mut db = self.get_torrents_mut(); + + for (info_hash, completed) in persistent_torrents { + // Skip if torrent entry already exists + if db.contains_key(info_hash) { + continue; + } + + let entry = EntryMutexTokio::new( + EntrySingle { + swarm: PeerList::default(), + downloaded: *completed, + } + .into(), + ); + + db.insert(*info_hash, entry); + } + } + + async fn remove(&self, key: &InfoHash) -> Option { + let mut db = self.get_torrents_mut(); + db.remove(key) + } + + async fn remove_inactive_peers(&self, current_cutoff: DurationSinceUnixEpoch) { + let handles: Vec + Send>>>; + { + let db = self.get_torrents(); + handles = db + .values() + .cloned() + .map(|e| e.remove_inactive_peers(current_cutoff).boxed()) + .collect(); + } + join_all(handles).await; + } + + async fn remove_peerless_torrents(&self, policy: &TrackerPolicy) { + let handles: Vec> + Send>>>; + + { + let db = self.get_torrents(); + + handles = zip(db.keys().copied(), db.values().cloned()) + .map(|(infohash, torrent)| { + torrent + .meets_retaining_policy(policy) + .map(move |should_be_retained| if should_be_retained { None } else { Some(infohash) }) + .boxed() + }) + .collect::>(); + } + + let not_good = join_all(handles).await; + + let mut db = self.get_torrents_mut(); + + for remove in not_good.into_iter().flatten() { + drop(db.remove(&remove)); + } + } +} diff --git a/packages/torrent-repository-benchmarking/src/repository/rw_lock_tokio.rs b/packages/torrent-repository-benchmarking/src/repository/rw_lock_tokio.rs new file mode 100644 index 000000000..53838023d --- /dev/null +++ b/packages/torrent-repository-benchmarking/src/repository/rw_lock_tokio.rs @@ -0,0 +1,138 @@ +use bittorrent_primitives::info_hash::InfoHash; +use torrust_tracker_configuration::TrackerPolicy; +use torrust_tracker_primitives::pagination::Pagination; +use torrust_tracker_primitives::swarm_metadata::{AggregateSwarmMetadata, SwarmMetadata}; +use torrust_tracker_primitives::{peer, DurationSinceUnixEpoch, PersistentTorrent, PersistentTorrents}; + +use super::RepositoryAsync; +use crate::entry::peer_list::PeerList; +use crate::entry::Entry; +use crate::{EntrySingle, TorrentsRwLockTokio}; + +#[derive(Default, Debug)] +pub struct RwLockTokio { + pub(crate) torrents: tokio::sync::RwLock>, +} + +impl RwLockTokio { + pub fn write( + &self, + ) -> impl std::future::Future< + Output = tokio::sync::RwLockWriteGuard<'_, std::collections::BTreeMap>, + > { + self.torrents.write() + } +} + +impl TorrentsRwLockTokio { + async fn get_torrents<'a>(&'a self) -> tokio::sync::RwLockReadGuard<'a, std::collections::BTreeMap> + where + std::collections::BTreeMap: 'a, + { + self.torrents.read().await + } + + async fn get_torrents_mut<'a>( + &'a self, + ) -> tokio::sync::RwLockWriteGuard<'a, std::collections::BTreeMap> + where + std::collections::BTreeMap: 'a, + { + self.torrents.write().await + } +} + +impl RepositoryAsync for TorrentsRwLockTokio +where + EntrySingle: Entry, +{ + async fn upsert_peer( + &self, + info_hash: &InfoHash, + peer: &peer::Peer, + _opt_persistent_torrent: Option, + ) -> bool { + // todo: load persistent torrent data if provided + + let mut db = self.get_torrents_mut().await; + + let entry = db.entry(*info_hash).or_insert(EntrySingle::default()); + + entry.upsert_peer(peer) + } + + async fn get_swarm_metadata(&self, info_hash: &InfoHash) -> Option { + self.get(info_hash).await.map(|entry| entry.get_swarm_metadata()) + } + + async fn get(&self, key: &InfoHash) -> Option { + let db = self.get_torrents().await; + db.get(key).cloned() + } + + async fn get_paginated(&self, pagination: Option<&Pagination>) -> Vec<(InfoHash, EntrySingle)> { + let db = self.get_torrents().await; + + match pagination { + Some(pagination) => db + .iter() + .skip(pagination.offset as usize) + .take(pagination.limit as usize) + .map(|(a, b)| (*a, b.clone())) + .collect(), + None => db.iter().map(|(a, b)| (*a, b.clone())).collect(), + } + } + + async fn get_metrics(&self) -> AggregateSwarmMetadata { + let mut metrics = AggregateSwarmMetadata::default(); + + for entry in self.get_torrents().await.values() { + let stats = entry.get_swarm_metadata(); + metrics.total_complete += u64::from(stats.complete); + metrics.total_downloaded += u64::from(stats.downloaded); + metrics.total_incomplete += u64::from(stats.incomplete); + metrics.total_torrents += 1; + } + + metrics + } + + async fn import_persistent(&self, persistent_torrents: &PersistentTorrents) { + let mut torrents = self.get_torrents_mut().await; + + for (info_hash, completed) in persistent_torrents { + // Skip if torrent entry already exists + if torrents.contains_key(info_hash) { + continue; + } + + let entry = EntrySingle { + swarm: PeerList::default(), + downloaded: *completed, + }; + + torrents.insert(*info_hash, entry); + } + } + + async fn remove(&self, key: &InfoHash) -> Option { + let mut db = self.get_torrents_mut().await; + db.remove(key) + } + + async fn remove_inactive_peers(&self, current_cutoff: DurationSinceUnixEpoch) { + let mut db = self.get_torrents_mut().await; + let entries = db.values_mut(); + + for entry in entries { + entry.remove_inactive_peers(current_cutoff); + } + } + + async fn remove_peerless_torrents(&self, policy: &TrackerPolicy) { + let mut db = self.get_torrents_mut().await; + + db.retain(|_, e| e.meets_retaining_policy(policy)); + } +} diff --git a/packages/torrent-repository-benchmarking/src/repository/rw_lock_tokio_mutex_std.rs b/packages/torrent-repository-benchmarking/src/repository/rw_lock_tokio_mutex_std.rs new file mode 100644 index 000000000..eb7e300fd --- /dev/null +++ b/packages/torrent-repository-benchmarking/src/repository/rw_lock_tokio_mutex_std.rs @@ -0,0 +1,135 @@ +use std::sync::Arc; + +use bittorrent_primitives::info_hash::InfoHash; +use torrust_tracker_configuration::TrackerPolicy; +use torrust_tracker_primitives::pagination::Pagination; +use torrust_tracker_primitives::swarm_metadata::{AggregateSwarmMetadata, SwarmMetadata}; +use torrust_tracker_primitives::{peer, DurationSinceUnixEpoch, PersistentTorrent, PersistentTorrents}; + +use super::RepositoryAsync; +use crate::entry::peer_list::PeerList; +use crate::entry::{Entry, EntrySync}; +use crate::{EntryMutexStd, EntrySingle, TorrentsRwLockTokioMutexStd}; + +impl TorrentsRwLockTokioMutexStd { + async fn get_torrents<'a>(&'a self) -> tokio::sync::RwLockReadGuard<'a, std::collections::BTreeMap> + where + std::collections::BTreeMap: 'a, + { + self.torrents.read().await + } + + async fn get_torrents_mut<'a>( + &'a self, + ) -> tokio::sync::RwLockWriteGuard<'a, std::collections::BTreeMap> + where + std::collections::BTreeMap: 'a, + { + self.torrents.write().await + } +} + +impl RepositoryAsync for TorrentsRwLockTokioMutexStd +where + EntryMutexStd: EntrySync, + EntrySingle: Entry, +{ + async fn upsert_peer( + &self, + info_hash: &InfoHash, + peer: &peer::Peer, + _opt_persistent_torrent: Option, + ) -> bool { + // todo: load persistent torrent data if provided + + let maybe_entry = self.get_torrents().await.get(info_hash).cloned(); + + let entry = if let Some(entry) = maybe_entry { + entry + } else { + let mut db = self.get_torrents_mut().await; + let entry = db.entry(*info_hash).or_insert(Arc::default()); + entry.clone() + }; + + entry.upsert_peer(peer) + } + + async fn get_swarm_metadata(&self, info_hash: &InfoHash) -> Option { + self.get(info_hash).await.map(|entry| entry.get_swarm_metadata()) + } + + async fn get(&self, key: &InfoHash) -> Option { + let db = self.get_torrents().await; + db.get(key).cloned() + } + + async fn get_paginated(&self, pagination: Option<&Pagination>) -> Vec<(InfoHash, EntryMutexStd)> { + let db = self.get_torrents().await; + + match pagination { + Some(pagination) => db + .iter() + .skip(pagination.offset as usize) + .take(pagination.limit as usize) + .map(|(a, b)| (*a, b.clone())) + .collect(), + None => db.iter().map(|(a, b)| (*a, b.clone())).collect(), + } + } + + async fn get_metrics(&self) -> AggregateSwarmMetadata { + let mut metrics = AggregateSwarmMetadata::default(); + + for entry in self.get_torrents().await.values() { + let stats = entry.get_swarm_metadata(); + metrics.total_complete += u64::from(stats.complete); + metrics.total_downloaded += u64::from(stats.downloaded); + metrics.total_incomplete += u64::from(stats.incomplete); + metrics.total_torrents += 1; + } + + metrics + } + + async fn import_persistent(&self, persistent_torrents: &PersistentTorrents) { + let mut torrents = self.get_torrents_mut().await; + + for (info_hash, completed) in persistent_torrents { + // Skip if torrent entry already exists + if torrents.contains_key(info_hash) { + continue; + } + + let entry = EntryMutexStd::new( + EntrySingle { + swarm: PeerList::default(), + downloaded: *completed, + } + .into(), + ); + + torrents.insert(*info_hash, entry); + } + } + + async fn remove(&self, key: &InfoHash) -> Option { + let mut db = self.get_torrents_mut().await; + db.remove(key) + } + + async fn remove_inactive_peers(&self, current_cutoff: DurationSinceUnixEpoch) { + let db = self.get_torrents().await; + let entries = db.values().cloned(); + + for entry in entries { + entry.remove_inactive_peers(current_cutoff); + } + } + + async fn remove_peerless_torrents(&self, policy: &TrackerPolicy) { + let mut db = self.get_torrents_mut().await; + + db.retain(|_, e| e.lock().expect("it should lock entry").meets_retaining_policy(policy)); + } +} diff --git a/packages/torrent-repository-benchmarking/src/repository/rw_lock_tokio_mutex_tokio.rs b/packages/torrent-repository-benchmarking/src/repository/rw_lock_tokio_mutex_tokio.rs new file mode 100644 index 000000000..c8ebaf4d6 --- /dev/null +++ b/packages/torrent-repository-benchmarking/src/repository/rw_lock_tokio_mutex_tokio.rs @@ -0,0 +1,148 @@ +use std::sync::Arc; + +use bittorrent_primitives::info_hash::InfoHash; +use torrust_tracker_configuration::TrackerPolicy; +use torrust_tracker_primitives::pagination::Pagination; +use torrust_tracker_primitives::swarm_metadata::{AggregateSwarmMetadata, SwarmMetadata}; +use torrust_tracker_primitives::{peer, DurationSinceUnixEpoch, PersistentTorrent, PersistentTorrents}; + +use super::RepositoryAsync; +use crate::entry::peer_list::PeerList; +use crate::entry::{Entry, EntryAsync}; +use crate::{EntryMutexTokio, EntrySingle, TorrentsRwLockTokioMutexTokio}; + +impl TorrentsRwLockTokioMutexTokio { + async fn get_torrents<'a>(&'a self) -> tokio::sync::RwLockReadGuard<'a, std::collections::BTreeMap> + where + std::collections::BTreeMap: 'a, + { + self.torrents.read().await + } + + async fn get_torrents_mut<'a>( + &'a self, + ) -> tokio::sync::RwLockWriteGuard<'a, std::collections::BTreeMap> + where + std::collections::BTreeMap: 'a, + { + self.torrents.write().await + } +} + +impl RepositoryAsync for TorrentsRwLockTokioMutexTokio +where + EntryMutexTokio: EntryAsync, + EntrySingle: Entry, +{ + async fn upsert_peer( + &self, + info_hash: &InfoHash, + peer: &peer::Peer, + _opt_persistent_torrent: Option, + ) -> bool { + // todo: load persistent torrent data if provided + + let maybe_entry = self.get_torrents().await.get(info_hash).cloned(); + + let entry = if let Some(entry) = maybe_entry { + entry + } else { + let mut db = self.get_torrents_mut().await; + let entry = db.entry(*info_hash).or_insert(Arc::default()); + entry.clone() + }; + + entry.upsert_peer(peer).await + } + + async fn get_swarm_metadata(&self, info_hash: &InfoHash) -> Option { + match self.get(info_hash).await { + Some(entry) => Some(entry.get_swarm_metadata().await), + None => None, + } + } + + async fn get(&self, key: &InfoHash) -> Option { + let db = self.get_torrents().await; + db.get(key).cloned() + } + + async fn get_paginated(&self, pagination: Option<&Pagination>) -> Vec<(InfoHash, EntryMutexTokio)> { + let db = self.get_torrents().await; + + match pagination { + Some(pagination) => db + .iter() + .skip(pagination.offset as usize) + .take(pagination.limit as usize) + .map(|(a, b)| (*a, b.clone())) + .collect(), + None => db.iter().map(|(a, b)| (*a, b.clone())).collect(), + } + } + + async fn get_metrics(&self) -> AggregateSwarmMetadata { + let mut metrics = AggregateSwarmMetadata::default(); + + for entry in self.get_torrents().await.values() { + let stats = entry.get_swarm_metadata().await; + metrics.total_complete += u64::from(stats.complete); + metrics.total_downloaded += u64::from(stats.downloaded); + metrics.total_incomplete += u64::from(stats.incomplete); + metrics.total_torrents += 1; + } + + metrics + } + + async fn import_persistent(&self, persistent_torrents: &PersistentTorrents) { + let mut db = self.get_torrents_mut().await; + + for (info_hash, completed) in persistent_torrents { + // Skip if torrent entry already exists + if db.contains_key(info_hash) { + continue; + } + + let entry = EntryMutexTokio::new( + EntrySingle { + swarm: PeerList::default(), + downloaded: *completed, + } + .into(), + ); + + db.insert(*info_hash, entry); + } + } + + async fn remove(&self, key: &InfoHash) -> Option { + let mut db = self.get_torrents_mut().await; + db.remove(key) + } + + async fn remove_inactive_peers(&self, current_cutoff: DurationSinceUnixEpoch) { + let db = self.get_torrents().await; + let entries = db.values().cloned(); + + for entry in entries { + entry.remove_inactive_peers(current_cutoff).await; + } + } + + async fn remove_peerless_torrents(&self, policy: &TrackerPolicy) { + let mut db = self.get_torrents_mut().await; + + let mut not_good = Vec::::default(); + + for (&infohash, torrent) in db.iter() { + if !torrent.clone().meets_retaining_policy(policy).await { + not_good.push(infohash); + } + } + + for remove in not_good { + drop(db.remove(&remove)); + } + } +} diff --git a/packages/torrent-repository-benchmarking/src/repository/skip_map_mutex_std.rs b/packages/torrent-repository-benchmarking/src/repository/skip_map_mutex_std.rs new file mode 100644 index 000000000..8a15a9442 --- /dev/null +++ b/packages/torrent-repository-benchmarking/src/repository/skip_map_mutex_std.rs @@ -0,0 +1,328 @@ +use std::sync::Arc; + +use bittorrent_primitives::info_hash::InfoHash; +use crossbeam_skiplist::SkipMap; +use torrust_tracker_configuration::TrackerPolicy; +use torrust_tracker_primitives::pagination::Pagination; +use torrust_tracker_primitives::swarm_metadata::{AggregateSwarmMetadata, SwarmMetadata}; +use torrust_tracker_primitives::{peer, DurationSinceUnixEpoch, PersistentTorrent, PersistentTorrents}; + +use super::Repository; +use crate::entry::peer_list::PeerList; +use crate::entry::{Entry, EntrySync}; +use crate::{EntryMutexParkingLot, EntryMutexStd, EntryRwLockParkingLot, EntrySingle}; + +#[derive(Default, Debug)] +pub struct CrossbeamSkipList { + pub torrents: SkipMap, +} + +impl Repository for CrossbeamSkipList +where + EntryMutexStd: EntrySync, + EntrySingle: Entry, +{ + /// Upsert a peer into the swarm of a torrent. + /// + /// Optionally, it can also preset the number of downloads of the torrent + /// only if it's the first time the torrent is being inserted. + /// + /// # Arguments + /// + /// * `info_hash` - The info hash of the torrent. + /// * `peer` - The peer to upsert. + /// * `opt_persistent_torrent` - The optional persisted data about a torrent + /// (number of downloads for the torrent). + /// + /// # Returns + /// + /// Returns `true` if the number of downloads was increased because the peer + /// completed the download. + fn upsert_peer(&self, info_hash: &InfoHash, peer: &peer::Peer, opt_persistent_torrent: Option) -> bool { + if let Some(existing_entry) = self.torrents.get(info_hash) { + existing_entry.value().upsert_peer(peer) + } else { + let new_entry = if let Some(number_of_downloads) = opt_persistent_torrent { + EntryMutexStd::new( + EntrySingle { + swarm: PeerList::default(), + downloaded: number_of_downloads, + } + .into(), + ) + } else { + EntryMutexStd::default() + }; + + let inserted_entry = self.torrents.get_or_insert(*info_hash, new_entry); + + inserted_entry.value().upsert_peer(peer) + } + } + + fn get_swarm_metadata(&self, info_hash: &InfoHash) -> Option { + self.torrents.get(info_hash).map(|entry| entry.value().get_swarm_metadata()) + } + + fn get(&self, key: &InfoHash) -> Option { + let maybe_entry = self.torrents.get(key); + maybe_entry.map(|entry| entry.value().clone()) + } + + fn get_metrics(&self) -> AggregateSwarmMetadata { + let mut metrics = AggregateSwarmMetadata::default(); + + for entry in &self.torrents { + let stats = entry.value().lock().expect("it should get a lock").get_swarm_metadata(); + metrics.total_complete += u64::from(stats.complete); + metrics.total_downloaded += u64::from(stats.downloaded); + metrics.total_incomplete += u64::from(stats.incomplete); + metrics.total_torrents += 1; + } + + metrics + } + + fn get_paginated(&self, pagination: Option<&Pagination>) -> Vec<(InfoHash, EntryMutexStd)> { + match pagination { + Some(pagination) => self + .torrents + .iter() + .skip(pagination.offset as usize) + .take(pagination.limit as usize) + .map(|entry| (*entry.key(), entry.value().clone())) + .collect(), + None => self + .torrents + .iter() + .map(|entry| (*entry.key(), entry.value().clone())) + .collect(), + } + } + + fn import_persistent(&self, persistent_torrents: &PersistentTorrents) { + for (info_hash, completed) in persistent_torrents { + if self.torrents.contains_key(info_hash) { + continue; + } + + let entry = EntryMutexStd::new( + EntrySingle { + swarm: PeerList::default(), + downloaded: *completed, + } + .into(), + ); + + // Since SkipMap is lock-free the torrent could have been inserted + // after checking if it exists. + self.torrents.get_or_insert(*info_hash, entry); + } + } + + fn remove(&self, key: &InfoHash) -> Option { + self.torrents.remove(key).map(|entry| entry.value().clone()) + } + + fn remove_inactive_peers(&self, current_cutoff: DurationSinceUnixEpoch) { + for entry in &self.torrents { + entry.value().remove_inactive_peers(current_cutoff); + } + } + + fn remove_peerless_torrents(&self, policy: &TrackerPolicy) { + for entry in &self.torrents { + if entry.value().meets_retaining_policy(policy) { + continue; + } + + entry.remove(); + } + } +} + +impl Repository for CrossbeamSkipList +where + EntryRwLockParkingLot: EntrySync, + EntrySingle: Entry, +{ + fn upsert_peer(&self, info_hash: &InfoHash, peer: &peer::Peer, _opt_persistent_torrent: Option) -> bool { + // todo: load persistent torrent data if provided + + let entry = self.torrents.get_or_insert(*info_hash, Arc::default()); + entry.value().upsert_peer(peer) + } + + fn get_swarm_metadata(&self, info_hash: &InfoHash) -> Option { + self.torrents.get(info_hash).map(|entry| entry.value().get_swarm_metadata()) + } + + fn get(&self, key: &InfoHash) -> Option { + let maybe_entry = self.torrents.get(key); + maybe_entry.map(|entry| entry.value().clone()) + } + + fn get_metrics(&self) -> AggregateSwarmMetadata { + let mut metrics = AggregateSwarmMetadata::default(); + + for entry in &self.torrents { + let stats = entry.value().read().get_swarm_metadata(); + metrics.total_complete += u64::from(stats.complete); + metrics.total_downloaded += u64::from(stats.downloaded); + metrics.total_incomplete += u64::from(stats.incomplete); + metrics.total_torrents += 1; + } + + metrics + } + + fn get_paginated(&self, pagination: Option<&Pagination>) -> Vec<(InfoHash, EntryRwLockParkingLot)> { + match pagination { + Some(pagination) => self + .torrents + .iter() + .skip(pagination.offset as usize) + .take(pagination.limit as usize) + .map(|entry| (*entry.key(), entry.value().clone())) + .collect(), + None => self + .torrents + .iter() + .map(|entry| (*entry.key(), entry.value().clone())) + .collect(), + } + } + + fn import_persistent(&self, persistent_torrents: &PersistentTorrents) { + for (info_hash, completed) in persistent_torrents { + if self.torrents.contains_key(info_hash) { + continue; + } + + let entry = EntryRwLockParkingLot::new( + EntrySingle { + swarm: PeerList::default(), + downloaded: *completed, + } + .into(), + ); + + // Since SkipMap is lock-free the torrent could have been inserted + // after checking if it exists. + self.torrents.get_or_insert(*info_hash, entry); + } + } + + fn remove(&self, key: &InfoHash) -> Option { + self.torrents.remove(key).map(|entry| entry.value().clone()) + } + + fn remove_inactive_peers(&self, current_cutoff: DurationSinceUnixEpoch) { + for entry in &self.torrents { + entry.value().remove_inactive_peers(current_cutoff); + } + } + + fn remove_peerless_torrents(&self, policy: &TrackerPolicy) { + for entry in &self.torrents { + if entry.value().meets_retaining_policy(policy) { + continue; + } + + entry.remove(); + } + } +} + +impl Repository for CrossbeamSkipList +where + EntryMutexParkingLot: EntrySync, + EntrySingle: Entry, +{ + fn upsert_peer(&self, info_hash: &InfoHash, peer: &peer::Peer, _opt_persistent_torrent: Option) -> bool { + // todo: load persistent torrent data if provided + + let entry = self.torrents.get_or_insert(*info_hash, Arc::default()); + entry.value().upsert_peer(peer) + } + + fn get_swarm_metadata(&self, info_hash: &InfoHash) -> Option { + self.torrents.get(info_hash).map(|entry| entry.value().get_swarm_metadata()) + } + + fn get(&self, key: &InfoHash) -> Option { + let maybe_entry = self.torrents.get(key); + maybe_entry.map(|entry| entry.value().clone()) + } + + fn get_metrics(&self) -> AggregateSwarmMetadata { + let mut metrics = AggregateSwarmMetadata::default(); + + for entry in &self.torrents { + let stats = entry.value().lock().get_swarm_metadata(); + metrics.total_complete += u64::from(stats.complete); + metrics.total_downloaded += u64::from(stats.downloaded); + metrics.total_incomplete += u64::from(stats.incomplete); + metrics.total_torrents += 1; + } + + metrics + } + + fn get_paginated(&self, pagination: Option<&Pagination>) -> Vec<(InfoHash, EntryMutexParkingLot)> { + match pagination { + Some(pagination) => self + .torrents + .iter() + .skip(pagination.offset as usize) + .take(pagination.limit as usize) + .map(|entry| (*entry.key(), entry.value().clone())) + .collect(), + None => self + .torrents + .iter() + .map(|entry| (*entry.key(), entry.value().clone())) + .collect(), + } + } + + fn import_persistent(&self, persistent_torrents: &PersistentTorrents) { + for (info_hash, completed) in persistent_torrents { + if self.torrents.contains_key(info_hash) { + continue; + } + + let entry = EntryMutexParkingLot::new( + EntrySingle { + swarm: PeerList::default(), + downloaded: *completed, + } + .into(), + ); + + // Since SkipMap is lock-free the torrent could have been inserted + // after checking if it exists. + self.torrents.get_or_insert(*info_hash, entry); + } + } + + fn remove(&self, key: &InfoHash) -> Option { + self.torrents.remove(key).map(|entry| entry.value().clone()) + } + + fn remove_inactive_peers(&self, current_cutoff: DurationSinceUnixEpoch) { + for entry in &self.torrents { + entry.value().remove_inactive_peers(current_cutoff); + } + } + + fn remove_peerless_torrents(&self, policy: &TrackerPolicy) { + for entry in &self.torrents { + if entry.value().meets_retaining_policy(policy) { + continue; + } + + entry.remove(); + } + } +} diff --git a/packages/torrent-repository-benchmarking/tests/common/mod.rs b/packages/torrent-repository-benchmarking/tests/common/mod.rs new file mode 100644 index 000000000..efdf7f742 --- /dev/null +++ b/packages/torrent-repository-benchmarking/tests/common/mod.rs @@ -0,0 +1,3 @@ +pub mod repo; +pub mod torrent; +pub mod torrent_peer_builder; diff --git a/packages/torrent-repository-benchmarking/tests/common/repo.rs b/packages/torrent-repository-benchmarking/tests/common/repo.rs new file mode 100644 index 000000000..6c5c6ff77 --- /dev/null +++ b/packages/torrent-repository-benchmarking/tests/common/repo.rs @@ -0,0 +1,242 @@ +use bittorrent_primitives::info_hash::InfoHash; +use torrust_tracker_configuration::TrackerPolicy; +use torrust_tracker_primitives::pagination::Pagination; +use torrust_tracker_primitives::swarm_metadata::{AggregateSwarmMetadata, SwarmMetadata}; +use torrust_tracker_primitives::{peer, DurationSinceUnixEpoch, PersistentTorrent, PersistentTorrents}; +use torrust_tracker_torrent_repository_benchmarking::repository::{Repository as _, RepositoryAsync as _}; +use torrust_tracker_torrent_repository_benchmarking::{ + EntrySingle, TorrentsDashMapMutexStd, TorrentsRwLockStd, TorrentsRwLockStdMutexStd, TorrentsRwLockStdMutexTokio, + TorrentsRwLockTokio, TorrentsRwLockTokioMutexStd, TorrentsRwLockTokioMutexTokio, TorrentsSkipMapMutexParkingLot, + TorrentsSkipMapMutexStd, TorrentsSkipMapRwLockParkingLot, +}; + +#[derive(Debug)] +pub(crate) enum Repo { + RwLockStd(TorrentsRwLockStd), + RwLockStdMutexStd(TorrentsRwLockStdMutexStd), + RwLockStdMutexTokio(TorrentsRwLockStdMutexTokio), + RwLockTokio(TorrentsRwLockTokio), + RwLockTokioMutexStd(TorrentsRwLockTokioMutexStd), + RwLockTokioMutexTokio(TorrentsRwLockTokioMutexTokio), + SkipMapMutexStd(TorrentsSkipMapMutexStd), + SkipMapMutexParkingLot(TorrentsSkipMapMutexParkingLot), + SkipMapRwLockParkingLot(TorrentsSkipMapRwLockParkingLot), + DashMapMutexStd(TorrentsDashMapMutexStd), +} + +impl Repo { + pub(crate) async fn upsert_peer( + &self, + info_hash: &InfoHash, + peer: &peer::Peer, + opt_persistent_torrent: Option, + ) -> bool { + match self { + Repo::RwLockStd(repo) => repo.upsert_peer(info_hash, peer, opt_persistent_torrent), + Repo::RwLockStdMutexStd(repo) => repo.upsert_peer(info_hash, peer, opt_persistent_torrent), + Repo::RwLockStdMutexTokio(repo) => repo.upsert_peer(info_hash, peer, opt_persistent_torrent).await, + Repo::RwLockTokio(repo) => repo.upsert_peer(info_hash, peer, opt_persistent_torrent).await, + Repo::RwLockTokioMutexStd(repo) => repo.upsert_peer(info_hash, peer, opt_persistent_torrent).await, + Repo::RwLockTokioMutexTokio(repo) => repo.upsert_peer(info_hash, peer, opt_persistent_torrent).await, + Repo::SkipMapMutexStd(repo) => repo.upsert_peer(info_hash, peer, opt_persistent_torrent), + Repo::SkipMapMutexParkingLot(repo) => repo.upsert_peer(info_hash, peer, opt_persistent_torrent), + Repo::SkipMapRwLockParkingLot(repo) => repo.upsert_peer(info_hash, peer, opt_persistent_torrent), + Repo::DashMapMutexStd(repo) => repo.upsert_peer(info_hash, peer, opt_persistent_torrent), + } + } + + pub(crate) async fn get_swarm_metadata(&self, info_hash: &InfoHash) -> Option { + match self { + Repo::RwLockStd(repo) => repo.get_swarm_metadata(info_hash), + Repo::RwLockStdMutexStd(repo) => repo.get_swarm_metadata(info_hash), + Repo::RwLockStdMutexTokio(repo) => repo.get_swarm_metadata(info_hash).await, + Repo::RwLockTokio(repo) => repo.get_swarm_metadata(info_hash).await, + Repo::RwLockTokioMutexStd(repo) => repo.get_swarm_metadata(info_hash).await, + Repo::RwLockTokioMutexTokio(repo) => repo.get_swarm_metadata(info_hash).await, + Repo::SkipMapMutexStd(repo) => repo.get_swarm_metadata(info_hash), + Repo::SkipMapMutexParkingLot(repo) => repo.get_swarm_metadata(info_hash), + Repo::SkipMapRwLockParkingLot(repo) => repo.get_swarm_metadata(info_hash), + Repo::DashMapMutexStd(repo) => repo.get_swarm_metadata(info_hash), + } + } + + pub(crate) async fn get(&self, key: &InfoHash) -> Option { + match self { + Repo::RwLockStd(repo) => repo.get(key), + Repo::RwLockStdMutexStd(repo) => Some(repo.get(key)?.lock().unwrap().clone()), + Repo::RwLockStdMutexTokio(repo) => Some(repo.get(key).await?.lock().await.clone()), + Repo::RwLockTokio(repo) => repo.get(key).await, + Repo::RwLockTokioMutexStd(repo) => Some(repo.get(key).await?.lock().unwrap().clone()), + Repo::RwLockTokioMutexTokio(repo) => Some(repo.get(key).await?.lock().await.clone()), + Repo::SkipMapMutexStd(repo) => Some(repo.get(key)?.lock().unwrap().clone()), + Repo::SkipMapMutexParkingLot(repo) => Some(repo.get(key)?.lock().clone()), + Repo::SkipMapRwLockParkingLot(repo) => Some(repo.get(key)?.read().clone()), + Repo::DashMapMutexStd(repo) => Some(repo.get(key)?.lock().unwrap().clone()), + } + } + + pub(crate) async fn get_metrics(&self) -> AggregateSwarmMetadata { + match self { + Repo::RwLockStd(repo) => repo.get_metrics(), + Repo::RwLockStdMutexStd(repo) => repo.get_metrics(), + Repo::RwLockStdMutexTokio(repo) => repo.get_metrics().await, + Repo::RwLockTokio(repo) => repo.get_metrics().await, + Repo::RwLockTokioMutexStd(repo) => repo.get_metrics().await, + Repo::RwLockTokioMutexTokio(repo) => repo.get_metrics().await, + Repo::SkipMapMutexStd(repo) => repo.get_metrics(), + Repo::SkipMapMutexParkingLot(repo) => repo.get_metrics(), + Repo::SkipMapRwLockParkingLot(repo) => repo.get_metrics(), + Repo::DashMapMutexStd(repo) => repo.get_metrics(), + } + } + + pub(crate) async fn get_paginated(&self, pagination: Option<&Pagination>) -> Vec<(InfoHash, EntrySingle)> { + match self { + Repo::RwLockStd(repo) => repo.get_paginated(pagination), + Repo::RwLockStdMutexStd(repo) => repo + .get_paginated(pagination) + .iter() + .map(|(i, t)| (*i, t.lock().expect("it should get a lock").clone())) + .collect(), + Repo::RwLockStdMutexTokio(repo) => { + let mut v: Vec<(InfoHash, EntrySingle)> = vec![]; + + for (i, t) in repo.get_paginated(pagination).await { + v.push((i, t.lock().await.clone())); + } + v + } + Repo::RwLockTokio(repo) => repo.get_paginated(pagination).await, + Repo::RwLockTokioMutexStd(repo) => repo + .get_paginated(pagination) + .await + .iter() + .map(|(i, t)| (*i, t.lock().expect("it should get a lock").clone())) + .collect(), + Repo::RwLockTokioMutexTokio(repo) => { + let mut v: Vec<(InfoHash, EntrySingle)> = vec![]; + + for (i, t) in repo.get_paginated(pagination).await { + v.push((i, t.lock().await.clone())); + } + v + } + Repo::SkipMapMutexStd(repo) => repo + .get_paginated(pagination) + .iter() + .map(|(i, t)| (*i, t.lock().expect("it should get a lock").clone())) + .collect(), + Repo::SkipMapMutexParkingLot(repo) => repo + .get_paginated(pagination) + .iter() + .map(|(i, t)| (*i, t.lock().clone())) + .collect(), + Repo::SkipMapRwLockParkingLot(repo) => repo + .get_paginated(pagination) + .iter() + .map(|(i, t)| (*i, t.read().clone())) + .collect(), + Repo::DashMapMutexStd(repo) => repo + .get_paginated(pagination) + .iter() + .map(|(i, t)| (*i, t.lock().expect("it should get a lock").clone())) + .collect(), + } + } + + pub(crate) async fn import_persistent(&self, persistent_torrents: &PersistentTorrents) { + match self { + Repo::RwLockStd(repo) => repo.import_persistent(persistent_torrents), + Repo::RwLockStdMutexStd(repo) => repo.import_persistent(persistent_torrents), + Repo::RwLockStdMutexTokio(repo) => repo.import_persistent(persistent_torrents).await, + Repo::RwLockTokio(repo) => repo.import_persistent(persistent_torrents).await, + Repo::RwLockTokioMutexStd(repo) => repo.import_persistent(persistent_torrents).await, + Repo::RwLockTokioMutexTokio(repo) => repo.import_persistent(persistent_torrents).await, + Repo::SkipMapMutexStd(repo) => repo.import_persistent(persistent_torrents), + Repo::SkipMapMutexParkingLot(repo) => repo.import_persistent(persistent_torrents), + Repo::SkipMapRwLockParkingLot(repo) => repo.import_persistent(persistent_torrents), + Repo::DashMapMutexStd(repo) => repo.import_persistent(persistent_torrents), + } + } + + pub(crate) async fn remove(&self, key: &InfoHash) -> Option { + match self { + Repo::RwLockStd(repo) => repo.remove(key), + Repo::RwLockStdMutexStd(repo) => Some(repo.remove(key)?.lock().unwrap().clone()), + Repo::RwLockStdMutexTokio(repo) => Some(repo.remove(key).await?.lock().await.clone()), + Repo::RwLockTokio(repo) => repo.remove(key).await, + Repo::RwLockTokioMutexStd(repo) => Some(repo.remove(key).await?.lock().unwrap().clone()), + Repo::RwLockTokioMutexTokio(repo) => Some(repo.remove(key).await?.lock().await.clone()), + Repo::SkipMapMutexStd(repo) => Some(repo.remove(key)?.lock().unwrap().clone()), + Repo::SkipMapMutexParkingLot(repo) => Some(repo.remove(key)?.lock().clone()), + Repo::SkipMapRwLockParkingLot(repo) => Some(repo.remove(key)?.write().clone()), + Repo::DashMapMutexStd(repo) => Some(repo.remove(key)?.lock().unwrap().clone()), + } + } + + pub(crate) async fn remove_inactive_peers(&self, current_cutoff: DurationSinceUnixEpoch) { + match self { + Repo::RwLockStd(repo) => repo.remove_inactive_peers(current_cutoff), + Repo::RwLockStdMutexStd(repo) => repo.remove_inactive_peers(current_cutoff), + Repo::RwLockStdMutexTokio(repo) => repo.remove_inactive_peers(current_cutoff).await, + Repo::RwLockTokio(repo) => repo.remove_inactive_peers(current_cutoff).await, + Repo::RwLockTokioMutexStd(repo) => repo.remove_inactive_peers(current_cutoff).await, + Repo::RwLockTokioMutexTokio(repo) => repo.remove_inactive_peers(current_cutoff).await, + Repo::SkipMapMutexStd(repo) => repo.remove_inactive_peers(current_cutoff), + Repo::SkipMapMutexParkingLot(repo) => repo.remove_inactive_peers(current_cutoff), + Repo::SkipMapRwLockParkingLot(repo) => repo.remove_inactive_peers(current_cutoff), + Repo::DashMapMutexStd(repo) => repo.remove_inactive_peers(current_cutoff), + } + } + + pub(crate) async fn remove_peerless_torrents(&self, policy: &TrackerPolicy) { + match self { + Repo::RwLockStd(repo) => repo.remove_peerless_torrents(policy), + Repo::RwLockStdMutexStd(repo) => repo.remove_peerless_torrents(policy), + Repo::RwLockStdMutexTokio(repo) => repo.remove_peerless_torrents(policy).await, + Repo::RwLockTokio(repo) => repo.remove_peerless_torrents(policy).await, + Repo::RwLockTokioMutexStd(repo) => repo.remove_peerless_torrents(policy).await, + Repo::RwLockTokioMutexTokio(repo) => repo.remove_peerless_torrents(policy).await, + Repo::SkipMapMutexStd(repo) => repo.remove_peerless_torrents(policy), + Repo::SkipMapMutexParkingLot(repo) => repo.remove_peerless_torrents(policy), + Repo::SkipMapRwLockParkingLot(repo) => repo.remove_peerless_torrents(policy), + Repo::DashMapMutexStd(repo) => repo.remove_peerless_torrents(policy), + } + } + + pub(crate) async fn insert(&self, info_hash: &InfoHash, torrent: EntrySingle) -> Option { + match self { + Repo::RwLockStd(repo) => { + repo.write().insert(*info_hash, torrent); + } + Repo::RwLockStdMutexStd(repo) => { + repo.write().insert(*info_hash, torrent.into()); + } + Repo::RwLockStdMutexTokio(repo) => { + repo.write().insert(*info_hash, torrent.into()); + } + Repo::RwLockTokio(repo) => { + repo.write().await.insert(*info_hash, torrent); + } + Repo::RwLockTokioMutexStd(repo) => { + repo.write().await.insert(*info_hash, torrent.into()); + } + Repo::RwLockTokioMutexTokio(repo) => { + repo.write().await.insert(*info_hash, torrent.into()); + } + Repo::SkipMapMutexStd(repo) => { + repo.torrents.insert(*info_hash, torrent.into()); + } + Repo::SkipMapMutexParkingLot(repo) => { + repo.torrents.insert(*info_hash, torrent.into()); + } + Repo::SkipMapRwLockParkingLot(repo) => { + repo.torrents.insert(*info_hash, torrent.into()); + } + Repo::DashMapMutexStd(repo) => { + repo.torrents.insert(*info_hash, torrent.into()); + } + } + self.get(info_hash).await + } +} diff --git a/packages/torrent-repository-benchmarking/tests/common/torrent.rs b/packages/torrent-repository-benchmarking/tests/common/torrent.rs new file mode 100644 index 000000000..02874f9fc --- /dev/null +++ b/packages/torrent-repository-benchmarking/tests/common/torrent.rs @@ -0,0 +1,101 @@ +use std::net::SocketAddr; +use std::sync::Arc; + +use torrust_tracker_configuration::TrackerPolicy; +use torrust_tracker_primitives::swarm_metadata::SwarmMetadata; +use torrust_tracker_primitives::{peer, DurationSinceUnixEpoch}; +use torrust_tracker_torrent_repository_benchmarking::entry::{Entry as _, EntryAsync as _, EntrySync as _}; +use torrust_tracker_torrent_repository_benchmarking::{ + EntryMutexParkingLot, EntryMutexStd, EntryMutexTokio, EntryRwLockParkingLot, EntrySingle, +}; + +#[derive(Debug, Clone)] +pub(crate) enum Torrent { + Single(EntrySingle), + MutexStd(EntryMutexStd), + MutexTokio(EntryMutexTokio), + MutexParkingLot(EntryMutexParkingLot), + RwLockParkingLot(EntryRwLockParkingLot), +} + +impl Torrent { + pub(crate) async fn get_stats(&self) -> SwarmMetadata { + match self { + Torrent::Single(entry) => entry.get_swarm_metadata(), + Torrent::MutexStd(entry) => entry.get_swarm_metadata(), + Torrent::MutexTokio(entry) => entry.clone().get_swarm_metadata().await, + Torrent::MutexParkingLot(entry) => entry.clone().get_swarm_metadata(), + Torrent::RwLockParkingLot(entry) => entry.clone().get_swarm_metadata(), + } + } + + pub(crate) async fn meets_retaining_policy(&self, policy: &TrackerPolicy) -> bool { + match self { + Torrent::Single(entry) => entry.meets_retaining_policy(policy), + Torrent::MutexStd(entry) => entry.meets_retaining_policy(policy), + Torrent::MutexTokio(entry) => entry.clone().meets_retaining_policy(policy).await, + Torrent::MutexParkingLot(entry) => entry.meets_retaining_policy(policy), + Torrent::RwLockParkingLot(entry) => entry.meets_retaining_policy(policy), + } + } + + pub(crate) async fn peers_is_empty(&self) -> bool { + match self { + Torrent::Single(entry) => entry.peers_is_empty(), + Torrent::MutexStd(entry) => entry.peers_is_empty(), + Torrent::MutexTokio(entry) => entry.clone().peers_is_empty().await, + Torrent::MutexParkingLot(entry) => entry.peers_is_empty(), + Torrent::RwLockParkingLot(entry) => entry.peers_is_empty(), + } + } + + pub(crate) async fn get_peers_len(&self) -> usize { + match self { + Torrent::Single(entry) => entry.get_peers_len(), + Torrent::MutexStd(entry) => entry.get_peers_len(), + Torrent::MutexTokio(entry) => entry.clone().get_peers_len().await, + Torrent::MutexParkingLot(entry) => entry.get_peers_len(), + Torrent::RwLockParkingLot(entry) => entry.get_peers_len(), + } + } + + pub(crate) async fn get_peers(&self, limit: Option) -> Vec> { + match self { + Torrent::Single(entry) => entry.get_peers(limit), + Torrent::MutexStd(entry) => entry.get_peers(limit), + Torrent::MutexTokio(entry) => entry.clone().get_peers(limit).await, + Torrent::MutexParkingLot(entry) => entry.get_peers(limit), + Torrent::RwLockParkingLot(entry) => entry.get_peers(limit), + } + } + + pub(crate) async fn get_peers_for_client(&self, client: &SocketAddr, limit: Option) -> Vec> { + match self { + Torrent::Single(entry) => entry.get_peers_for_client(client, limit), + Torrent::MutexStd(entry) => entry.get_peers_for_client(client, limit), + Torrent::MutexTokio(entry) => entry.clone().get_peers_for_client(client, limit).await, + Torrent::MutexParkingLot(entry) => entry.get_peers_for_client(client, limit), + Torrent::RwLockParkingLot(entry) => entry.get_peers_for_client(client, limit), + } + } + + pub(crate) async fn upsert_peer(&mut self, peer: &peer::Peer) -> bool { + match self { + Torrent::Single(entry) => entry.upsert_peer(peer), + Torrent::MutexStd(entry) => entry.upsert_peer(peer), + Torrent::MutexTokio(entry) => entry.clone().upsert_peer(peer).await, + Torrent::MutexParkingLot(entry) => entry.upsert_peer(peer), + Torrent::RwLockParkingLot(entry) => entry.upsert_peer(peer), + } + } + + pub(crate) async fn remove_inactive_peers(&mut self, current_cutoff: DurationSinceUnixEpoch) { + match self { + Torrent::Single(entry) => entry.remove_inactive_peers(current_cutoff), + Torrent::MutexStd(entry) => entry.remove_inactive_peers(current_cutoff), + Torrent::MutexTokio(entry) => entry.clone().remove_inactive_peers(current_cutoff).await, + Torrent::MutexParkingLot(entry) => entry.remove_inactive_peers(current_cutoff), + Torrent::RwLockParkingLot(entry) => entry.remove_inactive_peers(current_cutoff), + } + } +} diff --git a/packages/torrent-repository-benchmarking/tests/common/torrent_peer_builder.rs b/packages/torrent-repository-benchmarking/tests/common/torrent_peer_builder.rs new file mode 100644 index 000000000..33120180d --- /dev/null +++ b/packages/torrent-repository-benchmarking/tests/common/torrent_peer_builder.rs @@ -0,0 +1,90 @@ +use std::net::SocketAddr; + +use aquatic_udp_protocol::{AnnounceEvent, NumberOfBytes, PeerId}; +use torrust_tracker_clock::clock::Time; +use torrust_tracker_primitives::{peer, DurationSinceUnixEpoch}; + +use crate::CurrentClock; + +#[derive(Debug, Default)] +struct TorrentPeerBuilder { + peer: peer::Peer, +} + +#[allow(dead_code)] +impl TorrentPeerBuilder { + #[must_use] + fn new() -> Self { + Self { + peer: peer::Peer { + updated: CurrentClock::now(), + ..Default::default() + }, + } + } + + #[must_use] + fn with_event_completed(mut self) -> Self { + self.peer.event = AnnounceEvent::Completed; + self + } + + #[must_use] + fn with_event_started(mut self) -> Self { + self.peer.event = AnnounceEvent::Started; + self + } + + #[must_use] + fn with_peer_address(mut self, peer_addr: SocketAddr) -> Self { + self.peer.peer_addr = peer_addr; + self + } + + #[must_use] + fn with_peer_id(mut self, peer_id: PeerId) -> Self { + self.peer.peer_id = peer_id; + self + } + + #[must_use] + fn with_number_of_bytes_left(mut self, left: i64) -> Self { + self.peer.left = NumberOfBytes::new(left); + self + } + + #[must_use] + fn updated_at(mut self, updated: DurationSinceUnixEpoch) -> Self { + self.peer.updated = updated; + self + } + + #[must_use] + fn into(self) -> peer::Peer { + self.peer + } +} + +/// A torrent seeder is a peer with 0 bytes left to download which +/// has not announced it has stopped +#[must_use] +pub fn a_completed_peer(id: i32) -> peer::Peer { + let peer_id = peer::Id::new(id); + TorrentPeerBuilder::new() + .with_number_of_bytes_left(0) + .with_event_completed() + .with_peer_id(*peer_id) + .into() +} + +/// A torrent leecher is a peer that is not a seeder. +/// Leecher: left > 0 OR event = Stopped +#[must_use] +pub fn a_started_peer(id: i32) -> peer::Peer { + let peer_id = peer::Id::new(id); + TorrentPeerBuilder::new() + .with_number_of_bytes_left(1) + .with_event_started() + .with_peer_id(*peer_id) + .into() +} diff --git a/packages/torrent-repository-benchmarking/tests/entry/mod.rs b/packages/torrent-repository-benchmarking/tests/entry/mod.rs new file mode 100644 index 000000000..b46c05415 --- /dev/null +++ b/packages/torrent-repository-benchmarking/tests/entry/mod.rs @@ -0,0 +1,443 @@ +use std::net::{IpAddr, Ipv4Addr, SocketAddr}; +use std::ops::Sub; +use std::time::Duration; + +use aquatic_udp_protocol::{AnnounceEvent, NumberOfBytes}; +use rstest::{fixture, rstest}; +use torrust_tracker_clock::clock::stopped::Stopped as _; +use torrust_tracker_clock::clock::{self, Time as _}; +use torrust_tracker_configuration::{TrackerPolicy, TORRENT_PEERS_LIMIT}; +use torrust_tracker_primitives::peer; +use torrust_tracker_primitives::peer::Peer; +use torrust_tracker_torrent_repository_benchmarking::{ + EntryMutexParkingLot, EntryMutexStd, EntryMutexTokio, EntryRwLockParkingLot, EntrySingle, +}; + +use crate::common::torrent::Torrent; +use crate::common::torrent_peer_builder::{a_completed_peer, a_started_peer}; +use crate::CurrentClock; + +#[fixture] +fn single() -> Torrent { + Torrent::Single(EntrySingle::default()) +} +#[fixture] +fn mutex_std() -> Torrent { + Torrent::MutexStd(EntryMutexStd::default()) +} + +#[fixture] +fn mutex_tokio() -> Torrent { + Torrent::MutexTokio(EntryMutexTokio::default()) +} + +#[fixture] +fn mutex_parking_lot() -> Torrent { + Torrent::MutexParkingLot(EntryMutexParkingLot::default()) +} + +#[fixture] +fn rw_lock_parking_lot() -> Torrent { + Torrent::RwLockParkingLot(EntryRwLockParkingLot::default()) +} + +#[fixture] +fn policy_none() -> TrackerPolicy { + TrackerPolicy::new(0, false, false) +} + +#[fixture] +fn policy_persist() -> TrackerPolicy { + TrackerPolicy::new(0, true, false) +} + +#[fixture] +fn policy_remove() -> TrackerPolicy { + TrackerPolicy::new(0, false, true) +} + +#[fixture] +fn policy_remove_persist() -> TrackerPolicy { + TrackerPolicy::new(0, true, true) +} + +pub enum Makes { + Empty, + Started, + Completed, + Downloaded, + Three, +} + +async fn make(torrent: &mut Torrent, makes: &Makes) -> Vec { + match makes { + Makes::Empty => vec![], + Makes::Started => { + let peer = a_started_peer(1); + torrent.upsert_peer(&peer).await; + vec![peer] + } + Makes::Completed => { + let peer = a_completed_peer(2); + torrent.upsert_peer(&peer).await; + vec![peer] + } + Makes::Downloaded => { + let mut peer = a_started_peer(3); + torrent.upsert_peer(&peer).await; + peer.event = AnnounceEvent::Completed; + peer.left = NumberOfBytes::new(0); + torrent.upsert_peer(&peer).await; + vec![peer] + } + Makes::Three => { + let peer_1 = a_started_peer(1); + torrent.upsert_peer(&peer_1).await; + + let peer_2 = a_completed_peer(2); + torrent.upsert_peer(&peer_2).await; + + let mut peer_3 = a_started_peer(3); + torrent.upsert_peer(&peer_3).await; + peer_3.event = AnnounceEvent::Completed; + peer_3.left = NumberOfBytes::new(0); + torrent.upsert_peer(&peer_3).await; + vec![peer_1, peer_2, peer_3] + } + } +} + +#[rstest] +#[case::empty(&Makes::Empty)] +#[tokio::test] +async fn it_should_be_empty_by_default( + #[values(single(), mutex_std(), mutex_tokio(), mutex_parking_lot(), rw_lock_parking_lot())] mut torrent: Torrent, + #[case] makes: &Makes, +) { + make(&mut torrent, makes).await; + + assert_eq!(torrent.get_peers_len().await, 0); +} + +#[rstest] +#[case::empty(&Makes::Empty)] +#[case::started(&Makes::Started)] +#[case::completed(&Makes::Completed)] +#[case::downloaded(&Makes::Downloaded)] +#[case::three(&Makes::Three)] +#[tokio::test] +async fn it_should_check_if_entry_should_be_retained_based_on_the_tracker_policy( + #[values(single(), mutex_std(), mutex_tokio(), mutex_parking_lot(), rw_lock_parking_lot())] mut torrent: Torrent, + #[case] makes: &Makes, + #[values(policy_none(), policy_persist(), policy_remove(), policy_remove_persist())] policy: TrackerPolicy, +) { + make(&mut torrent, makes).await; + + let has_peers = !torrent.peers_is_empty().await; + let has_downloads = torrent.get_stats().await.downloaded != 0; + + match (policy.remove_peerless_torrents, policy.persistent_torrent_completed_stat) { + // remove torrents without peers, and keep completed download stats + (true, true) => match (has_peers, has_downloads) { + // no peers, but has downloads + // peers, with or without downloads + (false, true) | (true, true | false) => assert!(torrent.meets_retaining_policy(&policy).await), + // no peers and no downloads + (false, false) => assert!(!torrent.meets_retaining_policy(&policy).await), + }, + // remove torrents without peers and drop completed download stats + (true, false) => match (has_peers, has_downloads) { + // peers, with or without downloads + (true, true | false) => assert!(torrent.meets_retaining_policy(&policy).await), + // no peers and with or without downloads + (false, true | false) => assert!(!torrent.meets_retaining_policy(&policy).await), + }, + // keep torrents without peers, but keep or drop completed download stats + (false, true | false) => assert!(torrent.meets_retaining_policy(&policy).await), + } +} + +#[rstest] +#[case::empty(&Makes::Empty)] +#[case::started(&Makes::Started)] +#[case::completed(&Makes::Completed)] +#[case::downloaded(&Makes::Downloaded)] +#[case::three(&Makes::Three)] +#[tokio::test] +async fn it_should_get_peers_for_torrent_entry( + #[values(single(), mutex_std(), mutex_tokio(), mutex_parking_lot(), rw_lock_parking_lot())] mut torrent: Torrent, + #[case] makes: &Makes, +) { + let peers = make(&mut torrent, makes).await; + + let torrent_peers = torrent.get_peers(None).await; + + assert_eq!(torrent_peers.len(), peers.len()); + + for peer in torrent_peers { + assert!(peers.contains(&peer)); + } +} + +#[rstest] +#[case::empty(&Makes::Empty)] +#[case::started(&Makes::Started)] +#[case::completed(&Makes::Completed)] +#[case::downloaded(&Makes::Downloaded)] +#[case::three(&Makes::Three)] +#[tokio::test] +async fn it_should_update_a_peer(#[values(single(), mutex_std(), mutex_tokio())] mut torrent: Torrent, #[case] makes: &Makes) { + make(&mut torrent, makes).await; + + // Make and insert a new peer. + let mut peer = a_started_peer(-1); + torrent.upsert_peer(&peer).await; + + // Get the Inserted Peer by Id. + let peers = torrent.get_peers(None).await; + let original = peers + .iter() + .find(|p| peer::ReadInfo::get_id(*p) == peer::ReadInfo::get_id(&peer)) + .expect("it should find peer by id"); + + assert_eq!(original.event, AnnounceEvent::Started, "it should be as created"); + + // Announce "Completed" torrent download event. + peer.event = AnnounceEvent::Completed; + torrent.upsert_peer(&peer).await; + + // Get the Updated Peer by Id. + let peers = torrent.get_peers(None).await; + let updated = peers + .iter() + .find(|p| peer::ReadInfo::get_id(*p) == peer::ReadInfo::get_id(&peer)) + .expect("it should find peer by id"); + + assert_eq!(updated.event, AnnounceEvent::Completed, "it should be updated"); +} + +#[rstest] +#[case::empty(&Makes::Empty)] +#[case::started(&Makes::Started)] +#[case::completed(&Makes::Completed)] +#[case::downloaded(&Makes::Downloaded)] +#[case::three(&Makes::Three)] +#[tokio::test] +async fn it_should_remove_a_peer_upon_stopped_announcement( + #[values(single(), mutex_std(), mutex_tokio(), mutex_parking_lot(), rw_lock_parking_lot())] mut torrent: Torrent, + #[case] makes: &Makes, +) { + use torrust_tracker_primitives::peer::ReadInfo as _; + + make(&mut torrent, makes).await; + + let mut peer = a_started_peer(-1); + + torrent.upsert_peer(&peer).await; + + // The started peer should be inserted. + let peers = torrent.get_peers(None).await; + let original = peers + .iter() + .find(|p| p.get_id() == peer.get_id()) + .expect("it should find peer by id"); + + assert_eq!(original.event, AnnounceEvent::Started); + + // Change peer to "Stopped" and insert. + peer.event = AnnounceEvent::Stopped; + torrent.upsert_peer(&peer).await; + + // It should be removed now. + let peers = torrent.get_peers(None).await; + + assert_eq!( + peers.iter().find(|p| p.get_id() == peer.get_id()), + None, + "it should be removed" + ); +} + +#[rstest] +#[case::started(&Makes::Started)] +#[case::completed(&Makes::Completed)] +#[case::downloaded(&Makes::Downloaded)] +#[case::three(&Makes::Three)] +#[tokio::test] +async fn it_should_handle_a_peer_completed_announcement_and_update_the_downloaded_statistic( + #[values(single(), mutex_std(), mutex_tokio(), mutex_parking_lot(), rw_lock_parking_lot())] mut torrent: Torrent, + #[case] makes: &Makes, +) { + make(&mut torrent, makes).await; + let downloaded = torrent.get_stats().await.downloaded; + + let peers = torrent.get_peers(None).await; + let mut peer = **peers.first().expect("there should be a peer"); + + let is_already_completed = peer.event == AnnounceEvent::Completed; + + // Announce "Completed" torrent download event. + peer.event = AnnounceEvent::Completed; + + torrent.upsert_peer(&peer).await; + let stats = torrent.get_stats().await; + + if is_already_completed { + assert_eq!(stats.downloaded, downloaded); + } else { + assert_eq!(stats.downloaded, downloaded + 1); + } +} + +#[rstest] +#[case::started(&Makes::Started)] +#[case::completed(&Makes::Completed)] +#[case::downloaded(&Makes::Downloaded)] +#[case::three(&Makes::Three)] +#[tokio::test] +async fn it_should_update_a_peer_as_a_seeder( + #[values(single(), mutex_std(), mutex_tokio(), mutex_parking_lot(), rw_lock_parking_lot())] mut torrent: Torrent, + #[case] makes: &Makes, +) { + let peers = make(&mut torrent, makes).await; + let completed = u32::try_from(peers.iter().filter(|p| p.is_seeder()).count()).expect("it_should_not_be_so_many"); + + let peers = torrent.get_peers(None).await; + let mut peer = **peers.first().expect("there should be a peer"); + + let is_already_non_left = peer.left == NumberOfBytes::new(0); + + // Set Bytes Left to Zero + peer.left = NumberOfBytes::new(0); + torrent.upsert_peer(&peer).await; + let stats = torrent.get_stats().await; + + if is_already_non_left { + // it was already complete + assert_eq!(stats.complete, completed); + } else { + // now it is complete + assert_eq!(stats.complete, completed + 1); + } +} + +#[rstest] +#[case::started(&Makes::Started)] +#[case::completed(&Makes::Completed)] +#[case::downloaded(&Makes::Downloaded)] +#[case::three(&Makes::Three)] +#[tokio::test] +async fn it_should_update_a_peer_as_incomplete( + #[values(single(), mutex_std(), mutex_tokio(), mutex_parking_lot(), rw_lock_parking_lot())] mut torrent: Torrent, + #[case] makes: &Makes, +) { + let peers = make(&mut torrent, makes).await; + let incomplete = u32::try_from(peers.iter().filter(|p| !p.is_seeder()).count()).expect("it should not be so many"); + + let peers = torrent.get_peers(None).await; + let mut peer = **peers.first().expect("there should be a peer"); + + let completed_already = peer.left == NumberOfBytes::new(0); + + // Set Bytes Left to no Zero + peer.left = NumberOfBytes::new(1); + torrent.upsert_peer(&peer).await; + let stats = torrent.get_stats().await; + + if completed_already { + // now it is incomplete + assert_eq!(stats.incomplete, incomplete + 1); + } else { + // was already incomplete + assert_eq!(stats.incomplete, incomplete); + } +} + +#[rstest] +#[case::started(&Makes::Started)] +#[case::completed(&Makes::Completed)] +#[case::downloaded(&Makes::Downloaded)] +#[case::three(&Makes::Three)] +#[tokio::test] +async fn it_should_get_peers_excluding_the_client_socket( + #[values(single(), mutex_std(), mutex_tokio(), mutex_parking_lot(), rw_lock_parking_lot())] mut torrent: Torrent, + #[case] makes: &Makes, +) { + make(&mut torrent, makes).await; + + let peers = torrent.get_peers(None).await; + let mut peer = **peers.first().expect("there should be a peer"); + + let socket = SocketAddr::new(IpAddr::V4(Ipv4Addr::new(127, 0, 0, 1)), 8081); + + // for this test, we should not already use this socket. + assert_ne!(peer.peer_addr, socket); + + // it should get the peer as it dose not share the socket. + assert!(torrent.get_peers_for_client(&socket, None).await.contains(&peer.into())); + + // set the address to the socket. + peer.peer_addr = socket; + torrent.upsert_peer(&peer).await; // Add peer + + // It should not include the peer that has the same socket. + assert!(!torrent.get_peers_for_client(&socket, None).await.contains(&peer.into())); +} + +#[rstest] +#[case::empty(&Makes::Empty)] +#[case::started(&Makes::Started)] +#[case::completed(&Makes::Completed)] +#[case::downloaded(&Makes::Downloaded)] +#[case::three(&Makes::Three)] +#[tokio::test] +async fn it_should_limit_the_number_of_peers_returned( + #[values(single(), mutex_std(), mutex_tokio(), mutex_parking_lot(), rw_lock_parking_lot())] mut torrent: Torrent, + #[case] makes: &Makes, +) { + make(&mut torrent, makes).await; + + // We add one more peer than the scrape limit + for peer_number in 1..=74 + 1 { + let mut peer = a_started_peer(1); + peer.peer_id = *peer::Id::new(peer_number); + torrent.upsert_peer(&peer).await; + } + + let peers = torrent.get_peers(Some(TORRENT_PEERS_LIMIT)).await; + + assert_eq!(peers.len(), 74); +} + +#[rstest] +#[case::empty(&Makes::Empty)] +#[case::started(&Makes::Started)] +#[case::completed(&Makes::Completed)] +#[case::downloaded(&Makes::Downloaded)] +#[case::three(&Makes::Three)] +#[tokio::test] +async fn it_should_remove_inactive_peers_beyond_cutoff( + #[values(single(), mutex_std(), mutex_tokio(), mutex_parking_lot(), rw_lock_parking_lot())] mut torrent: Torrent, + #[case] makes: &Makes, +) { + const TIMEOUT: Duration = Duration::from_secs(120); + const EXPIRE: Duration = Duration::from_secs(121); + + let peers = make(&mut torrent, makes).await; + + let mut peer = a_completed_peer(-1); + + let now = clock::Working::now(); + clock::Stopped::local_set(&now); + + peer.updated = now.sub(EXPIRE); + + torrent.upsert_peer(&peer).await; + + assert_eq!(torrent.get_peers_len().await, peers.len() + 1); + + let current_cutoff = CurrentClock::now_sub(&TIMEOUT).unwrap_or_default(); + torrent.remove_inactive_peers(current_cutoff).await; + + assert_eq!(torrent.get_peers_len().await, peers.len()); +} diff --git a/packages/torrent-repository-benchmarking/tests/integration.rs b/packages/torrent-repository-benchmarking/tests/integration.rs new file mode 100644 index 000000000..5aab67b03 --- /dev/null +++ b/packages/torrent-repository-benchmarking/tests/integration.rs @@ -0,0 +1,22 @@ +//! Integration tests. +//! +//! ```text +//! cargo test --test integration +//! ``` + +use torrust_tracker_clock::clock; + +pub mod common; +mod entry; +mod repository; + +/// This code needs to be copied into each crate. +/// Working version, for production. +#[cfg(not(test))] +#[allow(dead_code)] +pub(crate) type CurrentClock = clock::Working; + +/// Stopped version, for testing. +#[cfg(test)] +#[allow(dead_code)] +pub(crate) type CurrentClock = clock::Stopped; diff --git a/packages/torrent-repository-benchmarking/tests/repository/mod.rs b/packages/torrent-repository-benchmarking/tests/repository/mod.rs new file mode 100644 index 000000000..6973f38bd --- /dev/null +++ b/packages/torrent-repository-benchmarking/tests/repository/mod.rs @@ -0,0 +1,639 @@ +use std::collections::{BTreeMap, HashSet}; +use std::hash::{DefaultHasher, Hash, Hasher}; + +use aquatic_udp_protocol::{AnnounceEvent, NumberOfBytes}; +use bittorrent_primitives::info_hash::InfoHash; +use rstest::{fixture, rstest}; +use torrust_tracker_configuration::TrackerPolicy; +use torrust_tracker_primitives::pagination::Pagination; +use torrust_tracker_primitives::swarm_metadata::SwarmMetadata; +use torrust_tracker_primitives::PersistentTorrents; +use torrust_tracker_torrent_repository_benchmarking::entry::Entry as _; +use torrust_tracker_torrent_repository_benchmarking::repository::dash_map_mutex_std::XacrimonDashMap; +use torrust_tracker_torrent_repository_benchmarking::repository::rw_lock_std::RwLockStd; +use torrust_tracker_torrent_repository_benchmarking::repository::rw_lock_tokio::RwLockTokio; +use torrust_tracker_torrent_repository_benchmarking::repository::skip_map_mutex_std::CrossbeamSkipList; +use torrust_tracker_torrent_repository_benchmarking::EntrySingle; + +use crate::common::repo::Repo; +use crate::common::torrent_peer_builder::{a_completed_peer, a_started_peer}; + +#[fixture] +fn standard() -> Repo { + Repo::RwLockStd(RwLockStd::default()) +} + +#[fixture] +fn standard_mutex() -> Repo { + Repo::RwLockStdMutexStd(RwLockStd::default()) +} + +#[fixture] +fn standard_tokio() -> Repo { + Repo::RwLockStdMutexTokio(RwLockStd::default()) +} + +#[fixture] +fn tokio_std() -> Repo { + Repo::RwLockTokio(RwLockTokio::default()) +} + +#[fixture] +fn tokio_mutex() -> Repo { + Repo::RwLockTokioMutexStd(RwLockTokio::default()) +} + +#[fixture] +fn tokio_tokio() -> Repo { + Repo::RwLockTokioMutexTokio(RwLockTokio::default()) +} + +#[fixture] +fn skip_list_mutex_std() -> Repo { + Repo::SkipMapMutexStd(CrossbeamSkipList::default()) +} + +#[fixture] +fn skip_list_mutex_parking_lot() -> Repo { + Repo::SkipMapMutexParkingLot(CrossbeamSkipList::default()) +} + +#[fixture] +fn skip_list_rw_lock_parking_lot() -> Repo { + Repo::SkipMapRwLockParkingLot(CrossbeamSkipList::default()) +} + +#[fixture] +fn dash_map_std() -> Repo { + Repo::DashMapMutexStd(XacrimonDashMap::default()) +} + +type Entries = Vec<(InfoHash, EntrySingle)>; + +#[fixture] +fn empty() -> Entries { + vec![] +} + +#[fixture] +fn default() -> Entries { + vec![(InfoHash::default(), EntrySingle::default())] +} + +#[fixture] +fn started() -> Entries { + let mut torrent = EntrySingle::default(); + torrent.upsert_peer(&a_started_peer(1)); + vec![(InfoHash::default(), torrent)] +} + +#[fixture] +fn completed() -> Entries { + let mut torrent = EntrySingle::default(); + torrent.upsert_peer(&a_completed_peer(2)); + vec![(InfoHash::default(), torrent)] +} + +#[fixture] +fn downloaded() -> Entries { + let mut torrent = EntrySingle::default(); + let mut peer = a_started_peer(3); + torrent.upsert_peer(&peer); + peer.event = AnnounceEvent::Completed; + peer.left = NumberOfBytes::new(0); + torrent.upsert_peer(&peer); + vec![(InfoHash::default(), torrent)] +} + +#[fixture] +fn three() -> Entries { + let mut started = EntrySingle::default(); + let started_h = &mut DefaultHasher::default(); + started.upsert_peer(&a_started_peer(1)); + started.hash(started_h); + + let mut completed = EntrySingle::default(); + let completed_h = &mut DefaultHasher::default(); + completed.upsert_peer(&a_completed_peer(2)); + completed.hash(completed_h); + + let mut downloaded = EntrySingle::default(); + let downloaded_h = &mut DefaultHasher::default(); + let mut downloaded_peer = a_started_peer(3); + downloaded.upsert_peer(&downloaded_peer); + downloaded_peer.event = AnnounceEvent::Completed; + downloaded_peer.left = NumberOfBytes::new(0); + downloaded.upsert_peer(&downloaded_peer); + downloaded.hash(downloaded_h); + + vec![ + (InfoHash::from(&started_h.clone()), started), + (InfoHash::from(&completed_h.clone()), completed), + (InfoHash::from(&downloaded_h.clone()), downloaded), + ] +} + +#[fixture] +fn many_out_of_order() -> Entries { + let mut entries: HashSet<(InfoHash, EntrySingle)> = HashSet::default(); + + for i in 0..408 { + let mut entry = EntrySingle::default(); + entry.upsert_peer(&a_started_peer(i)); + + entries.insert((InfoHash::from(&i), entry)); + } + + // we keep the random order from the hashed set for the vector. + entries.iter().map(|(i, e)| (*i, e.clone())).collect() +} + +#[fixture] +fn many_hashed_in_order() -> Entries { + let mut entries: BTreeMap = BTreeMap::default(); + + for i in 0..408 { + let mut entry = EntrySingle::default(); + entry.upsert_peer(&a_started_peer(i)); + + let hash: &mut DefaultHasher = &mut DefaultHasher::default(); + hash.write_i32(i); + + entries.insert(InfoHash::from(&hash.clone()), entry); + } + + // We return the entries in-order from from the b-tree map. + entries.iter().map(|(i, e)| (*i, e.clone())).collect() +} + +#[fixture] +fn persistent_empty() -> PersistentTorrents { + PersistentTorrents::default() +} + +#[fixture] +fn persistent_single() -> PersistentTorrents { + let hash = &mut DefaultHasher::default(); + + hash.write_u8(1); + let t = [(InfoHash::from(&hash.clone()), 0_u32)]; + + t.iter().copied().collect() +} + +#[fixture] +fn persistent_three() -> PersistentTorrents { + let hash = &mut DefaultHasher::default(); + + hash.write_u8(1); + let info_1 = InfoHash::from(&hash.clone()); + hash.write_u8(2); + let info_2 = InfoHash::from(&hash.clone()); + hash.write_u8(3); + let info_3 = InfoHash::from(&hash.clone()); + + let t = [(info_1, 1_u32), (info_2, 2_u32), (info_3, 3_u32)]; + + t.iter().copied().collect() +} + +async fn make(repo: &Repo, entries: &Entries) { + for (info_hash, entry) in entries { + repo.insert(info_hash, entry.clone()).await; + } +} + +#[fixture] +fn paginated_limit_zero() -> Pagination { + Pagination::new(0, 0) +} + +#[fixture] +fn paginated_limit_one() -> Pagination { + Pagination::new(0, 1) +} + +#[fixture] +fn paginated_limit_one_offset_one() -> Pagination { + Pagination::new(1, 1) +} + +#[fixture] +fn policy_none() -> TrackerPolicy { + TrackerPolicy::new(0, false, false) +} + +#[fixture] +fn policy_persist() -> TrackerPolicy { + TrackerPolicy::new(0, true, false) +} + +#[fixture] +fn policy_remove() -> TrackerPolicy { + TrackerPolicy::new(0, false, true) +} + +#[fixture] +fn policy_remove_persist() -> TrackerPolicy { + TrackerPolicy::new(0, true, true) +} + +#[rstest] +#[case::empty(empty())] +#[case::default(default())] +#[case::started(started())] +#[case::completed(completed())] +#[case::downloaded(downloaded())] +#[case::three(three())] +#[case::out_of_order(many_out_of_order())] +#[case::in_order(many_hashed_in_order())] +#[tokio::test] +async fn it_should_get_a_torrent_entry( + #[values( + standard(), + standard_mutex(), + standard_tokio(), + tokio_std(), + tokio_mutex(), + tokio_tokio(), + skip_list_mutex_std(), + skip_list_mutex_parking_lot(), + skip_list_rw_lock_parking_lot(), + dash_map_std() + )] + repo: Repo, + #[case] entries: Entries, +) { + make(&repo, &entries).await; + + if let Some((info_hash, torrent)) = entries.first() { + assert_eq!(repo.get(info_hash).await, Some(torrent.clone())); + } else { + assert_eq!(repo.get(&InfoHash::default()).await, None); + } +} + +#[rstest] +#[case::empty(empty())] +#[case::default(default())] +#[case::started(started())] +#[case::completed(completed())] +#[case::downloaded(downloaded())] +#[case::three(three())] +#[case::out_of_order(many_out_of_order())] +#[case::in_order(many_hashed_in_order())] +#[tokio::test] +async fn it_should_get_paginated_entries_in_a_stable_or_sorted_order( + #[values( + standard(), + standard_mutex(), + standard_tokio(), + tokio_std(), + tokio_mutex(), + tokio_tokio(), + skip_list_mutex_std(), + skip_list_mutex_parking_lot(), + skip_list_rw_lock_parking_lot() + )] + repo: Repo, + #[case] entries: Entries, + many_out_of_order: Entries, +) { + make(&repo, &entries).await; + + let entries_a = repo.get_paginated(None).await.iter().map(|(i, _)| *i).collect::>(); + + make(&repo, &many_out_of_order).await; + + let entries_b = repo.get_paginated(None).await.iter().map(|(i, _)| *i).collect::>(); + + let is_equal = entries_b.iter().take(entries_a.len()).copied().collect::>() == entries_a; + + let is_sorted = entries_b.windows(2).all(|w| w[0] <= w[1]); + + assert!( + is_equal || is_sorted, + "The order is unstable: {is_equal}, or is sorted {is_sorted}." + ); +} + +#[rstest] +#[case::empty(empty())] +#[case::default(default())] +#[case::started(started())] +#[case::completed(completed())] +#[case::downloaded(downloaded())] +#[case::three(three())] +#[case::out_of_order(many_out_of_order())] +#[case::in_order(many_hashed_in_order())] +#[tokio::test] +async fn it_should_get_paginated( + #[values( + standard(), + standard_mutex(), + standard_tokio(), + tokio_std(), + tokio_mutex(), + tokio_tokio(), + skip_list_mutex_std(), + skip_list_mutex_parking_lot(), + skip_list_rw_lock_parking_lot() + )] + repo: Repo, + #[case] entries: Entries, + #[values(paginated_limit_zero(), paginated_limit_one(), paginated_limit_one_offset_one())] paginated: Pagination, +) { + make(&repo, &entries).await; + + let mut info_hashes = repo.get_paginated(None).await.iter().map(|(i, _)| *i).collect::>(); + info_hashes.sort(); + + match paginated { + // it should return empty if limit is zero. + Pagination { limit: 0, .. } => assert_eq!(repo.get_paginated(Some(&paginated)).await, vec![]), + + // it should return a single entry if the limit is one. + Pagination { limit: 1, offset: 0 } => { + if info_hashes.is_empty() { + assert_eq!(repo.get_paginated(Some(&paginated)).await.len(), 0); + } else { + let page = repo.get_paginated(Some(&paginated)).await; + assert_eq!(page.len(), 1); + assert_eq!(page.first().map(|(i, _)| i), info_hashes.first()); + } + } + + // it should return the only the second entry if both the limit and the offset are one. + Pagination { limit: 1, offset: 1 } => { + if info_hashes.len() > 1 { + let page = repo.get_paginated(Some(&paginated)).await; + assert_eq!(page.len(), 1); + assert_eq!(page[0].0, info_hashes[1]); + } + } + // the other cases are not yet tested. + _ => {} + } +} + +#[rstest] +#[case::empty(empty())] +#[case::default(default())] +#[case::started(started())] +#[case::completed(completed())] +#[case::downloaded(downloaded())] +#[case::three(three())] +#[case::out_of_order(many_out_of_order())] +#[case::in_order(many_hashed_in_order())] +#[tokio::test] +async fn it_should_get_metrics( + #[values( + standard(), + standard_mutex(), + standard_tokio(), + tokio_std(), + tokio_mutex(), + tokio_tokio(), + skip_list_mutex_std(), + skip_list_mutex_parking_lot(), + skip_list_rw_lock_parking_lot(), + dash_map_std() + )] + repo: Repo, + #[case] entries: Entries, +) { + use torrust_tracker_primitives::swarm_metadata::AggregateSwarmMetadata; + + make(&repo, &entries).await; + + let mut metrics = AggregateSwarmMetadata::default(); + + for (_, torrent) in entries { + let stats = torrent.get_swarm_metadata(); + + metrics.total_torrents += 1; + metrics.total_incomplete += u64::from(stats.incomplete); + metrics.total_complete += u64::from(stats.complete); + metrics.total_downloaded += u64::from(stats.downloaded); + } + + assert_eq!(repo.get_metrics().await, metrics); +} + +#[rstest] +#[case::empty(empty())] +#[case::default(default())] +#[case::started(started())] +#[case::completed(completed())] +#[case::downloaded(downloaded())] +#[case::three(three())] +#[case::out_of_order(many_out_of_order())] +#[case::in_order(many_hashed_in_order())] +#[tokio::test] +async fn it_should_import_persistent_torrents( + #[values( + standard(), + standard_mutex(), + standard_tokio(), + tokio_std(), + tokio_mutex(), + tokio_tokio(), + skip_list_mutex_std(), + skip_list_mutex_parking_lot(), + skip_list_rw_lock_parking_lot(), + dash_map_std() + )] + repo: Repo, + #[case] entries: Entries, + #[values(persistent_empty(), persistent_single(), persistent_three())] persistent_torrents: PersistentTorrents, +) { + make(&repo, &entries).await; + + let mut downloaded = repo.get_metrics().await.total_downloaded; + persistent_torrents.iter().for_each(|(_, d)| downloaded += u64::from(*d)); + + repo.import_persistent(&persistent_torrents).await; + + assert_eq!(repo.get_metrics().await.total_downloaded, downloaded); + + for (entry, _) in persistent_torrents { + assert!(repo.get(&entry).await.is_some()); + } +} + +#[rstest] +#[case::empty(empty())] +#[case::default(default())] +#[case::started(started())] +#[case::completed(completed())] +#[case::downloaded(downloaded())] +#[case::three(three())] +#[case::out_of_order(many_out_of_order())] +#[case::in_order(many_hashed_in_order())] +#[tokio::test] +async fn it_should_remove_an_entry( + #[values( + standard(), + standard_mutex(), + standard_tokio(), + tokio_std(), + tokio_mutex(), + tokio_tokio(), + skip_list_mutex_std(), + skip_list_mutex_parking_lot(), + skip_list_rw_lock_parking_lot(), + dash_map_std() + )] + repo: Repo, + #[case] entries: Entries, +) { + make(&repo, &entries).await; + + for (info_hash, torrent) in entries { + assert_eq!(repo.get(&info_hash).await, Some(torrent.clone())); + assert_eq!(repo.remove(&info_hash).await, Some(torrent)); + + assert_eq!(repo.get(&info_hash).await, None); + assert_eq!(repo.remove(&info_hash).await, None); + } + + assert_eq!(repo.get_metrics().await.total_torrents, 0); +} + +#[rstest] +#[case::empty(empty())] +#[case::default(default())] +#[case::started(started())] +#[case::completed(completed())] +#[case::downloaded(downloaded())] +#[case::three(three())] +#[case::out_of_order(many_out_of_order())] +#[case::in_order(many_hashed_in_order())] +#[tokio::test] +async fn it_should_remove_inactive_peers( + #[values( + standard(), + standard_mutex(), + standard_tokio(), + tokio_std(), + tokio_mutex(), + tokio_tokio(), + skip_list_mutex_std(), + skip_list_mutex_parking_lot(), + skip_list_rw_lock_parking_lot(), + dash_map_std() + )] + repo: Repo, + #[case] entries: Entries, +) { + use std::ops::Sub as _; + use std::time::Duration; + + use torrust_tracker_clock::clock::stopped::Stopped as _; + use torrust_tracker_clock::clock::{self, Time as _}; + use torrust_tracker_primitives::peer; + + use crate::CurrentClock; + + const TIMEOUT: Duration = Duration::from_secs(120); + const EXPIRE: Duration = Duration::from_secs(121); + + make(&repo, &entries).await; + + let info_hash: InfoHash; + let mut peer: peer::Peer; + + // Generate a new infohash and peer. + { + let hash = &mut DefaultHasher::default(); + hash.write_u8(255); + info_hash = InfoHash::from(&hash.clone()); + peer = a_completed_peer(-1); + } + + // Set the last updated time of the peer to be 121 seconds ago. + { + let now = clock::Working::now(); + clock::Stopped::local_set(&now); + + peer.updated = now.sub(EXPIRE); + } + + // Insert the infohash and peer into the repository + // and verify there is an extra torrent entry. + { + repo.upsert_peer(&info_hash, &peer, None).await; + assert_eq!(repo.get_metrics().await.total_torrents, entries.len() as u64 + 1); + } + + // Insert the infohash and peer into the repository + // and verify the swarm metadata was updated. + { + repo.upsert_peer(&info_hash, &peer, None).await; + let stats = repo.get_swarm_metadata(&info_hash).await; + assert_eq!( + stats, + Some(SwarmMetadata { + downloaded: 0, + complete: 1, + incomplete: 0 + }) + ); + } + + // Verify that this new peer was inserted into the repository. + { + let entry = repo.get(&info_hash).await.expect("it_should_get_some"); + assert!(entry.get_peers(None).contains(&peer.into())); + } + + // Remove peers that have not been updated since the timeout (120 seconds ago). + { + repo.remove_inactive_peers(CurrentClock::now_sub(&TIMEOUT).expect("it should get a time passed")) + .await; + } + + // Verify that the this peer was removed from the repository. + { + let entry = repo.get(&info_hash).await.expect("it_should_get_some"); + assert!(!entry.get_peers(None).contains(&peer.into())); + } +} + +#[rstest] +#[case::empty(empty())] +#[case::default(default())] +#[case::started(started())] +#[case::completed(completed())] +#[case::downloaded(downloaded())] +#[case::three(three())] +#[case::out_of_order(many_out_of_order())] +#[case::in_order(many_hashed_in_order())] +#[tokio::test] +async fn it_should_remove_peerless_torrents( + #[values( + standard(), + standard_mutex(), + standard_tokio(), + tokio_std(), + tokio_mutex(), + tokio_tokio(), + skip_list_mutex_std(), + skip_list_mutex_parking_lot(), + skip_list_rw_lock_parking_lot(), + dash_map_std() + )] + repo: Repo, + #[case] entries: Entries, + #[values(policy_none(), policy_persist(), policy_remove(), policy_remove_persist())] policy: TrackerPolicy, +) { + make(&repo, &entries).await; + + repo.remove_peerless_torrents(&policy).await; + + let torrents = repo.get_paginated(None).await; + + for (_, entry) in torrents { + assert!(entry.meets_retaining_policy(&policy)); + } +} From 16a6d08bf49531b482c2d8f7f47f2de0a01352b4 Mon Sep 17 00:00:00 2001 From: Jose Celano Date: Wed, 30 Apr 2025 11:54:37 +0100 Subject: [PATCH 536/802] feat!: [#1491] remove unused torrent repositories Repositories that are not used in production. THey have been moved to a new package `torrent-repository-benchmarking`. --- Cargo.lock | 3 - packages/torrent-repository/Cargo.toml | 7 - .../benches/helpers/asyn.rs | 153 ---------- .../torrent-repository/benches/helpers/mod.rs | 3 - .../benches/helpers/sync.rs | 155 ---------- .../benches/helpers/utils.rs | 41 --- .../benches/repository_benchmark.rs | 270 ------------------ packages/torrent-repository/src/lib.rs | 17 +- .../src/repository/dash_map_mutex_std.rs | 111 ------- .../torrent-repository/src/repository/mod.rs | 7 - .../src/repository/rw_lock_std.rs | 132 --------- .../src/repository/rw_lock_std_mutex_std.rs | 130 --------- .../src/repository/rw_lock_std_mutex_tokio.rs | 167 ----------- .../src/repository/rw_lock_tokio.rs | 138 --------- .../src/repository/rw_lock_tokio_mutex_std.rs | 135 --------- .../repository/rw_lock_tokio_mutex_tokio.rs | 148 ---------- .../torrent-repository/tests/common/repo.rs | 182 +----------- .../tests/repository/mod.rs | 249 +++------------- 18 files changed, 61 insertions(+), 1987 deletions(-) delete mode 100644 packages/torrent-repository/benches/helpers/asyn.rs delete mode 100644 packages/torrent-repository/benches/helpers/mod.rs delete mode 100644 packages/torrent-repository/benches/helpers/sync.rs delete mode 100644 packages/torrent-repository/benches/helpers/utils.rs delete mode 100644 packages/torrent-repository/benches/repository_benchmark.rs delete mode 100644 packages/torrent-repository/src/repository/dash_map_mutex_std.rs delete mode 100644 packages/torrent-repository/src/repository/rw_lock_std.rs delete mode 100644 packages/torrent-repository/src/repository/rw_lock_std_mutex_std.rs delete mode 100644 packages/torrent-repository/src/repository/rw_lock_std_mutex_tokio.rs delete mode 100644 packages/torrent-repository/src/repository/rw_lock_tokio.rs delete mode 100644 packages/torrent-repository/src/repository/rw_lock_tokio_mutex_std.rs delete mode 100644 packages/torrent-repository/src/repository/rw_lock_tokio_mutex_tokio.rs diff --git a/Cargo.lock b/Cargo.lock index da46a5a8f..5bce85e46 100644 --- a/Cargo.lock +++ b/Cargo.lock @@ -4841,15 +4841,12 @@ dependencies = [ "bittorrent-primitives", "criterion", "crossbeam-skiplist", - "dashmap", - "futures", "parking_lot", "rstest", "tokio", "torrust-tracker-clock", "torrust-tracker-configuration", "torrust-tracker-primitives", - "zerocopy 0.7.35", ] [[package]] diff --git a/packages/torrent-repository/Cargo.toml b/packages/torrent-repository/Cargo.toml index 2097d57d2..d12dcbf44 100644 --- a/packages/torrent-repository/Cargo.toml +++ b/packages/torrent-repository/Cargo.toml @@ -19,20 +19,13 @@ version.workspace = true aquatic_udp_protocol = "0" bittorrent-primitives = "0.1.0" crossbeam-skiplist = "0" -dashmap = "6" -futures = "0" parking_lot = "0" tokio = { version = "1", features = ["macros", "net", "rt-multi-thread", "signal", "sync"] } torrust-tracker-clock = { version = "3.0.0-develop", path = "../clock" } torrust-tracker-configuration = { version = "3.0.0-develop", path = "../configuration" } torrust-tracker-primitives = { version = "3.0.0-develop", path = "../primitives" } -zerocopy = "0.7" [dev-dependencies] async-std = { version = "1", features = ["attributes", "tokio1"] } criterion = { version = "0", features = ["async_tokio"] } rstest = "0" - -[[bench]] -harness = false -name = "repository_benchmark" diff --git a/packages/torrent-repository/benches/helpers/asyn.rs b/packages/torrent-repository/benches/helpers/asyn.rs deleted file mode 100644 index fc6b3ffb0..000000000 --- a/packages/torrent-repository/benches/helpers/asyn.rs +++ /dev/null @@ -1,153 +0,0 @@ -use std::sync::Arc; -use std::time::{Duration, Instant}; - -use bittorrent_primitives::info_hash::InfoHash; -use futures::stream::FuturesUnordered; -use torrust_tracker_torrent_repository::repository::RepositoryAsync; - -use super::utils::{generate_unique_info_hashes, DEFAULT_PEER}; - -pub async fn add_one_torrent(samples: u64) -> Duration -where - V: RepositoryAsync + Default, -{ - let start = Instant::now(); - - for _ in 0..samples { - let torrent_repository = V::default(); - - let info_hash = InfoHash::default(); - - torrent_repository.upsert_peer(&info_hash, &DEFAULT_PEER, None).await; - - torrent_repository.get_swarm_metadata(&info_hash).await; - } - - start.elapsed() -} - -// Add one torrent ten thousand times in parallel (depending on the set worker threads) -pub async fn update_one_torrent_in_parallel(runtime: &tokio::runtime::Runtime, samples: u64, sleep: Option) -> Duration -where - V: RepositoryAsync + Default, - Arc: Clone + Send + Sync + 'static, -{ - let torrent_repository = Arc::::default(); - let info_hash = InfoHash::default(); - let handles = FuturesUnordered::new(); - - // Add the torrent/peer to the torrent repository - torrent_repository.upsert_peer(&info_hash, &DEFAULT_PEER, None).await; - - torrent_repository.get_swarm_metadata(&info_hash).await; - - let start = Instant::now(); - - for _ in 0..samples { - let torrent_repository_clone = torrent_repository.clone(); - - let handle = runtime.spawn(async move { - torrent_repository_clone.upsert_peer(&info_hash, &DEFAULT_PEER, None).await; - - torrent_repository_clone.get_swarm_metadata(&info_hash).await; - - if let Some(sleep_time) = sleep { - let start_time = std::time::Instant::now(); - - while start_time.elapsed().as_nanos() < u128::from(sleep_time) {} - } - }); - - handles.push(handle); - } - - // Await all tasks - futures::future::join_all(handles).await; - - start.elapsed() -} - -// Add ten thousand torrents in parallel (depending on the set worker threads) -pub async fn add_multiple_torrents_in_parallel( - runtime: &tokio::runtime::Runtime, - samples: u64, - sleep: Option, -) -> Duration -where - V: RepositoryAsync + Default, - Arc: Clone + Send + Sync + 'static, -{ - let torrent_repository = Arc::::default(); - let info_hashes = generate_unique_info_hashes(samples.try_into().expect("it should fit in a usize")); - let handles = FuturesUnordered::new(); - - let start = Instant::now(); - - for info_hash in info_hashes { - let torrent_repository_clone = torrent_repository.clone(); - - let handle = runtime.spawn(async move { - torrent_repository_clone.upsert_peer(&info_hash, &DEFAULT_PEER, None).await; - - torrent_repository_clone.get_swarm_metadata(&info_hash).await; - - if let Some(sleep_time) = sleep { - let start_time = std::time::Instant::now(); - - while start_time.elapsed().as_nanos() < u128::from(sleep_time) {} - } - }); - - handles.push(handle); - } - - // Await all tasks - futures::future::join_all(handles).await; - - start.elapsed() -} - -// Async update ten thousand torrents in parallel (depending on the set worker threads) -pub async fn update_multiple_torrents_in_parallel( - runtime: &tokio::runtime::Runtime, - samples: u64, - sleep: Option, -) -> Duration -where - V: RepositoryAsync + Default, - Arc: Clone + Send + Sync + 'static, -{ - let torrent_repository = Arc::::default(); - let info_hashes = generate_unique_info_hashes(samples.try_into().expect("it should fit in usize")); - let handles = FuturesUnordered::new(); - - // Add the torrents/peers to the torrent repository - for info_hash in &info_hashes { - torrent_repository.upsert_peer(info_hash, &DEFAULT_PEER, None).await; - torrent_repository.get_swarm_metadata(info_hash).await; - } - - let start = Instant::now(); - - for info_hash in info_hashes { - let torrent_repository_clone = torrent_repository.clone(); - - let handle = runtime.spawn(async move { - torrent_repository_clone.upsert_peer(&info_hash, &DEFAULT_PEER, None).await; - torrent_repository_clone.get_swarm_metadata(&info_hash).await; - - if let Some(sleep_time) = sleep { - let start_time = std::time::Instant::now(); - - while start_time.elapsed().as_nanos() < u128::from(sleep_time) {} - } - }); - - handles.push(handle); - } - - // Await all tasks - futures::future::join_all(handles).await; - - start.elapsed() -} diff --git a/packages/torrent-repository/benches/helpers/mod.rs b/packages/torrent-repository/benches/helpers/mod.rs deleted file mode 100644 index 1026aa4bf..000000000 --- a/packages/torrent-repository/benches/helpers/mod.rs +++ /dev/null @@ -1,3 +0,0 @@ -pub mod asyn; -pub mod sync; -pub mod utils; diff --git a/packages/torrent-repository/benches/helpers/sync.rs b/packages/torrent-repository/benches/helpers/sync.rs deleted file mode 100644 index e00401446..000000000 --- a/packages/torrent-repository/benches/helpers/sync.rs +++ /dev/null @@ -1,155 +0,0 @@ -use std::sync::Arc; -use std::time::{Duration, Instant}; - -use bittorrent_primitives::info_hash::InfoHash; -use futures::stream::FuturesUnordered; -use torrust_tracker_torrent_repository::repository::Repository; - -use super::utils::{generate_unique_info_hashes, DEFAULT_PEER}; - -// Simply add one torrent -#[must_use] -pub fn add_one_torrent(samples: u64) -> Duration -where - V: Repository + Default, -{ - let start = Instant::now(); - - for _ in 0..samples { - let torrent_repository = V::default(); - - let info_hash = InfoHash::default(); - - torrent_repository.upsert_peer(&info_hash, &DEFAULT_PEER, None); - - torrent_repository.get_swarm_metadata(&info_hash); - } - - start.elapsed() -} - -// Add one torrent ten thousand times in parallel (depending on the set worker threads) -pub async fn update_one_torrent_in_parallel(runtime: &tokio::runtime::Runtime, samples: u64, sleep: Option) -> Duration -where - V: Repository + Default, - Arc: Clone + Send + Sync + 'static, -{ - let torrent_repository = Arc::::default(); - let info_hash = InfoHash::default(); - let handles = FuturesUnordered::new(); - - // Add the torrent/peer to the torrent repository - torrent_repository.upsert_peer(&info_hash, &DEFAULT_PEER, None); - - torrent_repository.get_swarm_metadata(&info_hash); - - let start = Instant::now(); - - for _ in 0..samples { - let torrent_repository_clone = torrent_repository.clone(); - - let handle = runtime.spawn(async move { - torrent_repository_clone.upsert_peer(&info_hash, &DEFAULT_PEER, None); - - torrent_repository_clone.get_swarm_metadata(&info_hash); - - if let Some(sleep_time) = sleep { - let start_time = std::time::Instant::now(); - - while start_time.elapsed().as_nanos() < u128::from(sleep_time) {} - } - }); - - handles.push(handle); - } - - // Await all tasks - futures::future::join_all(handles).await; - - start.elapsed() -} - -// Add ten thousand torrents in parallel (depending on the set worker threads) -pub async fn add_multiple_torrents_in_parallel( - runtime: &tokio::runtime::Runtime, - samples: u64, - sleep: Option, -) -> Duration -where - V: Repository + Default, - Arc: Clone + Send + Sync + 'static, -{ - let torrent_repository = Arc::::default(); - let info_hashes = generate_unique_info_hashes(samples.try_into().expect("it should fit in a usize")); - let handles = FuturesUnordered::new(); - - let start = Instant::now(); - - for info_hash in info_hashes { - let torrent_repository_clone = torrent_repository.clone(); - - let handle = runtime.spawn(async move { - torrent_repository_clone.upsert_peer(&info_hash, &DEFAULT_PEER, None); - - torrent_repository_clone.get_swarm_metadata(&info_hash); - - if let Some(sleep_time) = sleep { - let start_time = std::time::Instant::now(); - - while start_time.elapsed().as_nanos() < u128::from(sleep_time) {} - } - }); - - handles.push(handle); - } - - // Await all tasks - futures::future::join_all(handles).await; - - start.elapsed() -} - -// Update ten thousand torrents in parallel (depending on the set worker threads) -pub async fn update_multiple_torrents_in_parallel( - runtime: &tokio::runtime::Runtime, - samples: u64, - sleep: Option, -) -> Duration -where - V: Repository + Default, - Arc: Clone + Send + Sync + 'static, -{ - let torrent_repository = Arc::::default(); - let info_hashes = generate_unique_info_hashes(samples.try_into().expect("it should fit in usize")); - let handles = FuturesUnordered::new(); - - // Add the torrents/peers to the torrent repository - for info_hash in &info_hashes { - torrent_repository.upsert_peer(info_hash, &DEFAULT_PEER, None); - torrent_repository.get_swarm_metadata(info_hash); - } - - let start = Instant::now(); - - for info_hash in info_hashes { - let torrent_repository_clone = torrent_repository.clone(); - - let handle = runtime.spawn(async move { - torrent_repository_clone.upsert_peer(&info_hash, &DEFAULT_PEER, None); - torrent_repository_clone.get_swarm_metadata(&info_hash); - - if let Some(sleep_time) = sleep { - let start_time = std::time::Instant::now(); - - while start_time.elapsed().as_nanos() < u128::from(sleep_time) {} - } - }); - - handles.push(handle); - } - - // Await all tasks - futures::future::join_all(handles).await; - - start.elapsed() -} diff --git a/packages/torrent-repository/benches/helpers/utils.rs b/packages/torrent-repository/benches/helpers/utils.rs deleted file mode 100644 index 51b09ec0f..000000000 --- a/packages/torrent-repository/benches/helpers/utils.rs +++ /dev/null @@ -1,41 +0,0 @@ -use std::collections::HashSet; -use std::net::{IpAddr, Ipv4Addr, SocketAddr}; - -use aquatic_udp_protocol::{AnnounceEvent, NumberOfBytes, PeerId}; -use bittorrent_primitives::info_hash::InfoHash; -use torrust_tracker_primitives::peer::Peer; -use torrust_tracker_primitives::DurationSinceUnixEpoch; -use zerocopy::I64; - -pub const DEFAULT_PEER: Peer = Peer { - peer_id: PeerId([0; 20]), - peer_addr: SocketAddr::new(IpAddr::V4(Ipv4Addr::new(127, 0, 0, 1)), 8080), - updated: DurationSinceUnixEpoch::from_secs(0), - uploaded: NumberOfBytes(I64::ZERO), - downloaded: NumberOfBytes(I64::ZERO), - left: NumberOfBytes(I64::ZERO), - event: AnnounceEvent::Started, -}; - -#[must_use] -#[allow(clippy::missing_panics_doc)] -pub fn generate_unique_info_hashes(size: usize) -> Vec { - let mut result = HashSet::new(); - - let mut bytes = [0u8; 20]; - - #[allow(clippy::cast_possible_truncation)] - for i in 0..size { - bytes[0] = (i & 0xFF) as u8; - bytes[1] = ((i >> 8) & 0xFF) as u8; - bytes[2] = ((i >> 16) & 0xFF) as u8; - bytes[3] = ((i >> 24) & 0xFF) as u8; - - let info_hash = InfoHash::from_bytes(&bytes); - result.insert(info_hash); - } - - assert_eq!(result.len(), size); - - result.into_iter().collect() -} diff --git a/packages/torrent-repository/benches/repository_benchmark.rs b/packages/torrent-repository/benches/repository_benchmark.rs deleted file mode 100644 index 4e50f1454..000000000 --- a/packages/torrent-repository/benches/repository_benchmark.rs +++ /dev/null @@ -1,270 +0,0 @@ -use std::time::Duration; - -mod helpers; - -use criterion::{criterion_group, criterion_main, Criterion}; -use torrust_tracker_torrent_repository::{ - TorrentsDashMapMutexStd, TorrentsRwLockStd, TorrentsRwLockStdMutexStd, TorrentsRwLockStdMutexTokio, TorrentsRwLockTokio, - TorrentsRwLockTokioMutexStd, TorrentsRwLockTokioMutexTokio, TorrentsSkipMapMutexParkingLot, TorrentsSkipMapMutexStd, - TorrentsSkipMapRwLockParkingLot, -}; - -use crate::helpers::{asyn, sync}; - -fn add_one_torrent(c: &mut Criterion) { - let rt = tokio::runtime::Builder::new_multi_thread().worker_threads(4).build().unwrap(); - - let mut group = c.benchmark_group("add_one_torrent"); - - group.warm_up_time(Duration::from_millis(500)); - group.measurement_time(Duration::from_millis(1000)); - - group.bench_function("RwLockStd", |b| { - b.iter_custom(sync::add_one_torrent::); - }); - - group.bench_function("RwLockStdMutexStd", |b| { - b.iter_custom(sync::add_one_torrent::); - }); - - group.bench_function("RwLockStdMutexTokio", |b| { - b.to_async(&rt) - .iter_custom(asyn::add_one_torrent::); - }); - - group.bench_function("RwLockTokio", |b| { - b.to_async(&rt).iter_custom(asyn::add_one_torrent::); - }); - - group.bench_function("RwLockTokioMutexStd", |b| { - b.to_async(&rt) - .iter_custom(asyn::add_one_torrent::); - }); - - group.bench_function("RwLockTokioMutexTokio", |b| { - b.to_async(&rt) - .iter_custom(asyn::add_one_torrent::); - }); - - group.bench_function("SkipMapMutexStd", |b| { - b.iter_custom(sync::add_one_torrent::); - }); - - group.bench_function("SkipMapMutexParkingLot", |b| { - b.iter_custom(sync::add_one_torrent::); - }); - - group.bench_function("SkipMapRwLockParkingLot", |b| { - b.iter_custom(sync::add_one_torrent::); - }); - - group.bench_function("DashMapMutexStd", |b| { - b.iter_custom(sync::add_one_torrent::); - }); - - group.finish(); -} - -fn add_multiple_torrents_in_parallel(c: &mut Criterion) { - let rt = tokio::runtime::Builder::new_multi_thread().worker_threads(4).build().unwrap(); - - let mut group = c.benchmark_group("add_multiple_torrents_in_parallel"); - - //group.sampling_mode(criterion::SamplingMode::Flat); - //group.sample_size(10); - - group.warm_up_time(Duration::from_millis(500)); - group.measurement_time(Duration::from_millis(1000)); - - group.bench_function("RwLockStd", |b| { - b.to_async(&rt) - .iter_custom(|iters| sync::add_multiple_torrents_in_parallel::(&rt, iters, None)); - }); - - group.bench_function("RwLockStdMutexStd", |b| { - b.to_async(&rt) - .iter_custom(|iters| sync::add_multiple_torrents_in_parallel::(&rt, iters, None)); - }); - - group.bench_function("RwLockStdMutexTokio", |b| { - b.to_async(&rt) - .iter_custom(|iters| asyn::add_multiple_torrents_in_parallel::(&rt, iters, None)); - }); - - group.bench_function("RwLockTokio", |b| { - b.to_async(&rt) - .iter_custom(|iters| asyn::add_multiple_torrents_in_parallel::(&rt, iters, None)); - }); - - group.bench_function("RwLockTokioMutexStd", |b| { - b.to_async(&rt) - .iter_custom(|iters| asyn::add_multiple_torrents_in_parallel::(&rt, iters, None)); - }); - - group.bench_function("RwLockTokioMutexTokio", |b| { - b.to_async(&rt) - .iter_custom(|iters| asyn::add_multiple_torrents_in_parallel::(&rt, iters, None)); - }); - - group.bench_function("SkipMapMutexStd", |b| { - b.to_async(&rt) - .iter_custom(|iters| sync::add_multiple_torrents_in_parallel::(&rt, iters, None)); - }); - - group.bench_function("SkipMapMutexParkingLot", |b| { - b.to_async(&rt) - .iter_custom(|iters| sync::add_multiple_torrents_in_parallel::(&rt, iters, None)); - }); - - group.bench_function("SkipMapRwLockParkingLot", |b| { - b.to_async(&rt) - .iter_custom(|iters| sync::add_multiple_torrents_in_parallel::(&rt, iters, None)); - }); - - group.bench_function("DashMapMutexStd", |b| { - b.to_async(&rt) - .iter_custom(|iters| sync::add_multiple_torrents_in_parallel::(&rt, iters, None)); - }); - - group.finish(); -} - -fn update_one_torrent_in_parallel(c: &mut Criterion) { - let rt = tokio::runtime::Builder::new_multi_thread().worker_threads(4).build().unwrap(); - - let mut group = c.benchmark_group("update_one_torrent_in_parallel"); - - //group.sampling_mode(criterion::SamplingMode::Flat); - //group.sample_size(10); - - group.warm_up_time(Duration::from_millis(500)); - group.measurement_time(Duration::from_millis(1000)); - - group.bench_function("RwLockStd", |b| { - b.to_async(&rt) - .iter_custom(|iters| sync::update_one_torrent_in_parallel::(&rt, iters, None)); - }); - - group.bench_function("RwLockStdMutexStd", |b| { - b.to_async(&rt) - .iter_custom(|iters| sync::update_one_torrent_in_parallel::(&rt, iters, None)); - }); - - group.bench_function("RwLockStdMutexTokio", |b| { - b.to_async(&rt) - .iter_custom(|iters| asyn::update_one_torrent_in_parallel::(&rt, iters, None)); - }); - - group.bench_function("RwLockTokio", |b| { - b.to_async(&rt) - .iter_custom(|iters| asyn::update_one_torrent_in_parallel::(&rt, iters, None)); - }); - - group.bench_function("RwLockTokioMutexStd", |b| { - b.to_async(&rt) - .iter_custom(|iters| asyn::update_one_torrent_in_parallel::(&rt, iters, None)); - }); - - group.bench_function("RwLockTokioMutexTokio", |b| { - b.to_async(&rt) - .iter_custom(|iters| asyn::update_one_torrent_in_parallel::(&rt, iters, None)); - }); - - group.bench_function("SkipMapMutexStd", |b| { - b.to_async(&rt) - .iter_custom(|iters| sync::update_one_torrent_in_parallel::(&rt, iters, None)); - }); - - group.bench_function("SkipMapMutexParkingLot", |b| { - b.to_async(&rt) - .iter_custom(|iters| sync::update_one_torrent_in_parallel::(&rt, iters, None)); - }); - - group.bench_function("SkipMapRwLockParkingLot", |b| { - b.to_async(&rt) - .iter_custom(|iters| sync::update_one_torrent_in_parallel::(&rt, iters, None)); - }); - - group.bench_function("DashMapMutexStd", |b| { - b.to_async(&rt) - .iter_custom(|iters| sync::update_one_torrent_in_parallel::(&rt, iters, None)); - }); - - group.finish(); -} - -fn update_multiple_torrents_in_parallel(c: &mut Criterion) { - let rt = tokio::runtime::Builder::new_multi_thread().worker_threads(4).build().unwrap(); - - let mut group = c.benchmark_group("update_multiple_torrents_in_parallel"); - - //group.sampling_mode(criterion::SamplingMode::Flat); - //group.sample_size(10); - - group.warm_up_time(Duration::from_millis(500)); - group.measurement_time(Duration::from_millis(1000)); - - group.bench_function("RwLockStd", |b| { - b.to_async(&rt) - .iter_custom(|iters| sync::update_multiple_torrents_in_parallel::(&rt, iters, None)); - }); - - group.bench_function("RwLockStdMutexStd", |b| { - b.to_async(&rt) - .iter_custom(|iters| sync::update_multiple_torrents_in_parallel::(&rt, iters, None)); - }); - - group.bench_function("RwLockStdMutexTokio", |b| { - b.to_async(&rt) - .iter_custom(|iters| asyn::update_multiple_torrents_in_parallel::(&rt, iters, None)); - }); - - group.bench_function("RwLockTokio", |b| { - b.to_async(&rt) - .iter_custom(|iters| asyn::update_multiple_torrents_in_parallel::(&rt, iters, None)); - }); - - group.bench_function("RwLockTokioMutexStd", |b| { - b.to_async(&rt) - .iter_custom(|iters| asyn::update_multiple_torrents_in_parallel::(&rt, iters, None)); - }); - - group.bench_function("RwLockTokioMutexTokio", |b| { - b.to_async(&rt).iter_custom(|iters| { - asyn::update_multiple_torrents_in_parallel::(&rt, iters, None) - }); - }); - - group.bench_function("SkipMapMutexStd", |b| { - b.to_async(&rt) - .iter_custom(|iters| sync::update_multiple_torrents_in_parallel::(&rt, iters, None)); - }); - - group.bench_function("SkipMapMutexParkingLot", |b| { - b.to_async(&rt).iter_custom(|iters| { - sync::update_multiple_torrents_in_parallel::(&rt, iters, None) - }); - }); - - group.bench_function("SkipMapRwLockParkingLot", |b| { - b.to_async(&rt).iter_custom(|iters| { - sync::update_multiple_torrents_in_parallel::(&rt, iters, None) - }); - }); - - group.bench_function("DashMapMutexStd", |b| { - b.to_async(&rt) - .iter_custom(|iters| sync::update_multiple_torrents_in_parallel::(&rt, iters, None)); - }); - - group.finish(); -} - -criterion_group!( - benches, - add_one_torrent, - add_multiple_torrents_in_parallel, - update_one_torrent_in_parallel, - update_multiple_torrents_in_parallel -); -criterion_main!(benches); diff --git a/packages/torrent-repository/src/lib.rs b/packages/torrent-repository/src/lib.rs index a8955808e..b4ee5298e 100644 --- a/packages/torrent-repository/src/lib.rs +++ b/packages/torrent-repository/src/lib.rs @@ -1,8 +1,5 @@ use std::sync::Arc; -use repository::dash_map_mutex_std::XacrimonDashMap; -use repository::rw_lock_std::RwLockStd; -use repository::rw_lock_tokio::RwLockTokio; use repository::skip_map_mutex_std::CrossbeamSkipList; use torrust_tracker_clock::clock; @@ -17,20 +14,8 @@ pub type EntryMutexTokio = Arc>; pub type EntryMutexParkingLot = Arc>; pub type EntryRwLockParkingLot = Arc>; -// Repos - -pub type TorrentsRwLockStd = RwLockStd; -pub type TorrentsRwLockStdMutexStd = RwLockStd; -pub type TorrentsRwLockStdMutexTokio = RwLockStd; -pub type TorrentsRwLockTokio = RwLockTokio; -pub type TorrentsRwLockTokioMutexStd = RwLockTokio; -pub type TorrentsRwLockTokioMutexTokio = RwLockTokio; - +// Repository pub type TorrentsSkipMapMutexStd = CrossbeamSkipList; -pub type TorrentsSkipMapMutexParkingLot = CrossbeamSkipList; -pub type TorrentsSkipMapRwLockParkingLot = CrossbeamSkipList; - -pub type TorrentsDashMapMutexStd = XacrimonDashMap; /// This code needs to be copied into each crate. /// Working version, for production. diff --git a/packages/torrent-repository/src/repository/dash_map_mutex_std.rs b/packages/torrent-repository/src/repository/dash_map_mutex_std.rs deleted file mode 100644 index d4a84caa0..000000000 --- a/packages/torrent-repository/src/repository/dash_map_mutex_std.rs +++ /dev/null @@ -1,111 +0,0 @@ -use std::sync::Arc; - -use bittorrent_primitives::info_hash::InfoHash; -use dashmap::DashMap; -use torrust_tracker_configuration::TrackerPolicy; -use torrust_tracker_primitives::pagination::Pagination; -use torrust_tracker_primitives::swarm_metadata::{AggregateSwarmMetadata, SwarmMetadata}; -use torrust_tracker_primitives::{peer, DurationSinceUnixEpoch, PersistentTorrent, PersistentTorrents}; - -use super::Repository; -use crate::entry::peer_list::PeerList; -use crate::entry::{Entry, EntrySync}; -use crate::{EntryMutexStd, EntrySingle}; - -#[derive(Default, Debug)] -pub struct XacrimonDashMap { - pub torrents: DashMap, -} - -impl Repository for XacrimonDashMap -where - EntryMutexStd: EntrySync, - EntrySingle: Entry, -{ - fn upsert_peer(&self, info_hash: &InfoHash, peer: &peer::Peer, _opt_persistent_torrent: Option) -> bool { - // todo: load persistent torrent data if provided - - if let Some(entry) = self.torrents.get(info_hash) { - entry.upsert_peer(peer) - } else { - let _unused = self.torrents.insert(*info_hash, Arc::default()); - if let Some(entry) = self.torrents.get(info_hash) { - entry.upsert_peer(peer) - } else { - false - } - } - } - - fn get_swarm_metadata(&self, info_hash: &InfoHash) -> Option { - self.torrents.get(info_hash).map(|entry| entry.value().get_swarm_metadata()) - } - - fn get(&self, key: &InfoHash) -> Option { - let maybe_entry = self.torrents.get(key); - maybe_entry.map(|entry| entry.clone()) - } - - fn get_metrics(&self) -> AggregateSwarmMetadata { - let mut metrics = AggregateSwarmMetadata::default(); - - for entry in &self.torrents { - let stats = entry.value().lock().expect("it should get a lock").get_swarm_metadata(); - metrics.total_complete += u64::from(stats.complete); - metrics.total_downloaded += u64::from(stats.downloaded); - metrics.total_incomplete += u64::from(stats.incomplete); - metrics.total_torrents += 1; - } - - metrics - } - - fn get_paginated(&self, pagination: Option<&Pagination>) -> Vec<(InfoHash, EntryMutexStd)> { - match pagination { - Some(pagination) => self - .torrents - .iter() - .skip(pagination.offset as usize) - .take(pagination.limit as usize) - .map(|entry| (*entry.key(), entry.value().clone())) - .collect(), - None => self - .torrents - .iter() - .map(|entry| (*entry.key(), entry.value().clone())) - .collect(), - } - } - - fn import_persistent(&self, persistent_torrents: &PersistentTorrents) { - for (info_hash, completed) in persistent_torrents { - if self.torrents.contains_key(info_hash) { - continue; - } - - let entry = EntryMutexStd::new( - EntrySingle { - swarm: PeerList::default(), - downloaded: *completed, - } - .into(), - ); - - self.torrents.insert(*info_hash, entry); - } - } - - fn remove(&self, key: &InfoHash) -> Option { - self.torrents.remove(key).map(|(_key, value)| value.clone()) - } - - fn remove_inactive_peers(&self, current_cutoff: DurationSinceUnixEpoch) { - for entry in &self.torrents { - entry.value().remove_inactive_peers(current_cutoff); - } - } - - fn remove_peerless_torrents(&self, policy: &TrackerPolicy) { - self.torrents.retain(|_, entry| entry.meets_retaining_policy(policy)); - } -} diff --git a/packages/torrent-repository/src/repository/mod.rs b/packages/torrent-repository/src/repository/mod.rs index 9284ff6e6..96c71f3a0 100644 --- a/packages/torrent-repository/src/repository/mod.rs +++ b/packages/torrent-repository/src/repository/mod.rs @@ -4,13 +4,6 @@ use torrust_tracker_primitives::pagination::Pagination; use torrust_tracker_primitives::swarm_metadata::{AggregateSwarmMetadata, SwarmMetadata}; use torrust_tracker_primitives::{peer, DurationSinceUnixEpoch, PersistentTorrent, PersistentTorrents}; -pub mod dash_map_mutex_std; -pub mod rw_lock_std; -pub mod rw_lock_std_mutex_std; -pub mod rw_lock_std_mutex_tokio; -pub mod rw_lock_tokio; -pub mod rw_lock_tokio_mutex_std; -pub mod rw_lock_tokio_mutex_tokio; pub mod skip_map_mutex_std; use std::fmt::Debug; diff --git a/packages/torrent-repository/src/repository/rw_lock_std.rs b/packages/torrent-repository/src/repository/rw_lock_std.rs deleted file mode 100644 index d190718af..000000000 --- a/packages/torrent-repository/src/repository/rw_lock_std.rs +++ /dev/null @@ -1,132 +0,0 @@ -use bittorrent_primitives::info_hash::InfoHash; -use torrust_tracker_configuration::TrackerPolicy; -use torrust_tracker_primitives::pagination::Pagination; -use torrust_tracker_primitives::swarm_metadata::{AggregateSwarmMetadata, SwarmMetadata}; -use torrust_tracker_primitives::{peer, DurationSinceUnixEpoch, PersistentTorrent, PersistentTorrents}; - -use super::Repository; -use crate::entry::peer_list::PeerList; -use crate::entry::Entry; -use crate::{EntrySingle, TorrentsRwLockStd}; - -#[derive(Default, Debug)] -pub struct RwLockStd { - pub(crate) torrents: std::sync::RwLock>, -} - -impl RwLockStd { - /// # Panics - /// - /// Panics if unable to get a lock. - pub fn write( - &self, - ) -> std::sync::RwLockWriteGuard<'_, std::collections::BTreeMap> { - self.torrents.write().expect("it should get lock") - } -} - -impl TorrentsRwLockStd { - fn get_torrents<'a>(&'a self) -> std::sync::RwLockReadGuard<'a, std::collections::BTreeMap> - where - std::collections::BTreeMap: 'a, - { - self.torrents.read().expect("it should get the read lock") - } - - fn get_torrents_mut<'a>(&'a self) -> std::sync::RwLockWriteGuard<'a, std::collections::BTreeMap> - where - std::collections::BTreeMap: 'a, - { - self.torrents.write().expect("it should get the write lock") - } -} - -impl Repository for TorrentsRwLockStd -where - EntrySingle: Entry, -{ - fn upsert_peer(&self, info_hash: &InfoHash, peer: &peer::Peer, _opt_persistent_torrent: Option) -> bool { - // todo: load persistent torrent data if provided - - let mut db = self.get_torrents_mut(); - - let entry = db.entry(*info_hash).or_insert(EntrySingle::default()); - - entry.upsert_peer(peer) - } - - fn get_swarm_metadata(&self, info_hash: &InfoHash) -> Option { - self.get(info_hash).map(|entry| entry.get_swarm_metadata()) - } - - fn get(&self, key: &InfoHash) -> Option { - let db = self.get_torrents(); - db.get(key).cloned() - } - - fn get_metrics(&self) -> AggregateSwarmMetadata { - let mut metrics = AggregateSwarmMetadata::default(); - - for entry in self.get_torrents().values() { - let stats = entry.get_swarm_metadata(); - metrics.total_complete += u64::from(stats.complete); - metrics.total_downloaded += u64::from(stats.downloaded); - metrics.total_incomplete += u64::from(stats.incomplete); - metrics.total_torrents += 1; - } - - metrics - } - - fn get_paginated(&self, pagination: Option<&Pagination>) -> Vec<(InfoHash, EntrySingle)> { - let db = self.get_torrents(); - - match pagination { - Some(pagination) => db - .iter() - .skip(pagination.offset as usize) - .take(pagination.limit as usize) - .map(|(a, b)| (*a, b.clone())) - .collect(), - None => db.iter().map(|(a, b)| (*a, b.clone())).collect(), - } - } - - fn import_persistent(&self, persistent_torrents: &PersistentTorrents) { - let mut torrents = self.get_torrents_mut(); - - for (info_hash, downloaded) in persistent_torrents { - // Skip if torrent entry already exists - if torrents.contains_key(info_hash) { - continue; - } - - let entry = EntrySingle { - swarm: PeerList::default(), - downloaded: *downloaded, - }; - - torrents.insert(*info_hash, entry); - } - } - - fn remove(&self, key: &InfoHash) -> Option { - let mut db = self.get_torrents_mut(); - db.remove(key) - } - - fn remove_inactive_peers(&self, current_cutoff: DurationSinceUnixEpoch) { - let mut db = self.get_torrents_mut(); - let entries = db.values_mut(); - - for entry in entries { - entry.remove_inactive_peers(current_cutoff); - } - } - - fn remove_peerless_torrents(&self, policy: &TrackerPolicy) { - let mut db = self.get_torrents_mut(); - - db.retain(|_, e| e.meets_retaining_policy(policy)); - } -} diff --git a/packages/torrent-repository/src/repository/rw_lock_std_mutex_std.rs b/packages/torrent-repository/src/repository/rw_lock_std_mutex_std.rs deleted file mode 100644 index 1764b94e8..000000000 --- a/packages/torrent-repository/src/repository/rw_lock_std_mutex_std.rs +++ /dev/null @@ -1,130 +0,0 @@ -use std::sync::Arc; - -use bittorrent_primitives::info_hash::InfoHash; -use torrust_tracker_configuration::TrackerPolicy; -use torrust_tracker_primitives::pagination::Pagination; -use torrust_tracker_primitives::swarm_metadata::{AggregateSwarmMetadata, SwarmMetadata}; -use torrust_tracker_primitives::{peer, DurationSinceUnixEpoch, PersistentTorrent, PersistentTorrents}; - -use super::Repository; -use crate::entry::peer_list::PeerList; -use crate::entry::{Entry, EntrySync}; -use crate::{EntryMutexStd, EntrySingle, TorrentsRwLockStdMutexStd}; - -impl TorrentsRwLockStdMutexStd { - fn get_torrents<'a>(&'a self) -> std::sync::RwLockReadGuard<'a, std::collections::BTreeMap> - where - std::collections::BTreeMap: 'a, - { - self.torrents.read().expect("unable to get torrent list") - } - - fn get_torrents_mut<'a>(&'a self) -> std::sync::RwLockWriteGuard<'a, std::collections::BTreeMap> - where - std::collections::BTreeMap: 'a, - { - self.torrents.write().expect("unable to get writable torrent list") - } -} - -impl Repository for TorrentsRwLockStdMutexStd -where - EntryMutexStd: EntrySync, - EntrySingle: Entry, -{ - fn upsert_peer(&self, info_hash: &InfoHash, peer: &peer::Peer, _opt_persistent_torrent: Option) -> bool { - // todo: load persistent torrent data if provided - - let maybe_entry = self.get_torrents().get(info_hash).cloned(); - - let entry = if let Some(entry) = maybe_entry { - entry - } else { - let mut db = self.get_torrents_mut(); - let entry = db.entry(*info_hash).or_insert(Arc::default()); - entry.clone() - }; - - entry.upsert_peer(peer) - } - - fn get_swarm_metadata(&self, info_hash: &InfoHash) -> Option { - self.get_torrents() - .get(info_hash) - .map(super::super::entry::EntrySync::get_swarm_metadata) - } - - fn get(&self, key: &InfoHash) -> Option { - let db = self.get_torrents(); - db.get(key).cloned() - } - - fn get_metrics(&self) -> AggregateSwarmMetadata { - let mut metrics = AggregateSwarmMetadata::default(); - - for entry in self.get_torrents().values() { - let stats = entry.lock().expect("it should get a lock").get_swarm_metadata(); - metrics.total_complete += u64::from(stats.complete); - metrics.total_downloaded += u64::from(stats.downloaded); - metrics.total_incomplete += u64::from(stats.incomplete); - metrics.total_torrents += 1; - } - - metrics - } - - fn get_paginated(&self, pagination: Option<&Pagination>) -> Vec<(InfoHash, EntryMutexStd)> { - let db = self.get_torrents(); - - match pagination { - Some(pagination) => db - .iter() - .skip(pagination.offset as usize) - .take(pagination.limit as usize) - .map(|(a, b)| (*a, b.clone())) - .collect(), - None => db.iter().map(|(a, b)| (*a, b.clone())).collect(), - } - } - - fn import_persistent(&self, persistent_torrents: &PersistentTorrents) { - let mut torrents = self.get_torrents_mut(); - - for (info_hash, completed) in persistent_torrents { - // Skip if torrent entry already exists - if torrents.contains_key(info_hash) { - continue; - } - - let entry = EntryMutexStd::new( - EntrySingle { - swarm: PeerList::default(), - downloaded: *completed, - } - .into(), - ); - - torrents.insert(*info_hash, entry); - } - } - - fn remove(&self, key: &InfoHash) -> Option { - let mut db = self.get_torrents_mut(); - db.remove(key) - } - - fn remove_inactive_peers(&self, current_cutoff: DurationSinceUnixEpoch) { - let db = self.get_torrents(); - let entries = db.values().cloned(); - - for entry in entries { - entry.remove_inactive_peers(current_cutoff); - } - } - - fn remove_peerless_torrents(&self, policy: &TrackerPolicy) { - let mut db = self.get_torrents_mut(); - - db.retain(|_, e| e.lock().expect("it should lock entry").meets_retaining_policy(policy)); - } -} diff --git a/packages/torrent-repository/src/repository/rw_lock_std_mutex_tokio.rs b/packages/torrent-repository/src/repository/rw_lock_std_mutex_tokio.rs deleted file mode 100644 index 116c1ff87..000000000 --- a/packages/torrent-repository/src/repository/rw_lock_std_mutex_tokio.rs +++ /dev/null @@ -1,167 +0,0 @@ -use std::iter::zip; -use std::pin::Pin; -use std::sync::Arc; - -use bittorrent_primitives::info_hash::InfoHash; -use futures::future::join_all; -use futures::{Future, FutureExt}; -use torrust_tracker_configuration::TrackerPolicy; -use torrust_tracker_primitives::pagination::Pagination; -use torrust_tracker_primitives::swarm_metadata::{AggregateSwarmMetadata, SwarmMetadata}; -use torrust_tracker_primitives::{peer, DurationSinceUnixEpoch, PersistentTorrent, PersistentTorrents}; - -use super::RepositoryAsync; -use crate::entry::peer_list::PeerList; -use crate::entry::{Entry, EntryAsync}; -use crate::{EntryMutexTokio, EntrySingle, TorrentsRwLockStdMutexTokio}; - -impl TorrentsRwLockStdMutexTokio { - fn get_torrents<'a>(&'a self) -> std::sync::RwLockReadGuard<'a, std::collections::BTreeMap> - where - std::collections::BTreeMap: 'a, - { - self.torrents.read().expect("unable to get torrent list") - } - - fn get_torrents_mut<'a>(&'a self) -> std::sync::RwLockWriteGuard<'a, std::collections::BTreeMap> - where - std::collections::BTreeMap: 'a, - { - self.torrents.write().expect("unable to get writable torrent list") - } -} - -impl RepositoryAsync for TorrentsRwLockStdMutexTokio -where - EntryMutexTokio: EntryAsync, - EntrySingle: Entry, -{ - async fn upsert_peer( - &self, - info_hash: &InfoHash, - peer: &peer::Peer, - _opt_persistent_torrent: Option, - ) -> bool { - // todo: load persistent torrent data if provided - - let maybe_entry = self.get_torrents().get(info_hash).cloned(); - - let entry = if let Some(entry) = maybe_entry { - entry - } else { - let mut db = self.get_torrents_mut(); - let entry = db.entry(*info_hash).or_insert(Arc::default()); - entry.clone() - }; - - entry.upsert_peer(peer).await - } - - async fn get_swarm_metadata(&self, info_hash: &InfoHash) -> Option { - let maybe_entry = self.get_torrents().get(info_hash).cloned(); - - match maybe_entry { - Some(entry) => Some(entry.get_swarm_metadata().await), - None => None, - } - } - - async fn get(&self, key: &InfoHash) -> Option { - let db = self.get_torrents(); - db.get(key).cloned() - } - - async fn get_paginated(&self, pagination: Option<&Pagination>) -> Vec<(InfoHash, EntryMutexTokio)> { - let db = self.get_torrents(); - - match pagination { - Some(pagination) => db - .iter() - .skip(pagination.offset as usize) - .take(pagination.limit as usize) - .map(|(a, b)| (*a, b.clone())) - .collect(), - None => db.iter().map(|(a, b)| (*a, b.clone())).collect(), - } - } - - async fn get_metrics(&self) -> AggregateSwarmMetadata { - let mut metrics = AggregateSwarmMetadata::default(); - - let entries: Vec<_> = self.get_torrents().values().cloned().collect(); - - for entry in entries { - let stats = entry.lock().await.get_swarm_metadata(); - metrics.total_complete += u64::from(stats.complete); - metrics.total_downloaded += u64::from(stats.downloaded); - metrics.total_incomplete += u64::from(stats.incomplete); - metrics.total_torrents += 1; - } - - metrics - } - - async fn import_persistent(&self, persistent_torrents: &PersistentTorrents) { - let mut db = self.get_torrents_mut(); - - for (info_hash, completed) in persistent_torrents { - // Skip if torrent entry already exists - if db.contains_key(info_hash) { - continue; - } - - let entry = EntryMutexTokio::new( - EntrySingle { - swarm: PeerList::default(), - downloaded: *completed, - } - .into(), - ); - - db.insert(*info_hash, entry); - } - } - - async fn remove(&self, key: &InfoHash) -> Option { - let mut db = self.get_torrents_mut(); - db.remove(key) - } - - async fn remove_inactive_peers(&self, current_cutoff: DurationSinceUnixEpoch) { - let handles: Vec + Send>>>; - { - let db = self.get_torrents(); - handles = db - .values() - .cloned() - .map(|e| e.remove_inactive_peers(current_cutoff).boxed()) - .collect(); - } - join_all(handles).await; - } - - async fn remove_peerless_torrents(&self, policy: &TrackerPolicy) { - let handles: Vec> + Send>>>; - - { - let db = self.get_torrents(); - - handles = zip(db.keys().copied(), db.values().cloned()) - .map(|(infohash, torrent)| { - torrent - .meets_retaining_policy(policy) - .map(move |should_be_retained| if should_be_retained { None } else { Some(infohash) }) - .boxed() - }) - .collect::>(); - } - - let not_good = join_all(handles).await; - - let mut db = self.get_torrents_mut(); - - for remove in not_good.into_iter().flatten() { - drop(db.remove(&remove)); - } - } -} diff --git a/packages/torrent-repository/src/repository/rw_lock_tokio.rs b/packages/torrent-repository/src/repository/rw_lock_tokio.rs deleted file mode 100644 index 53838023d..000000000 --- a/packages/torrent-repository/src/repository/rw_lock_tokio.rs +++ /dev/null @@ -1,138 +0,0 @@ -use bittorrent_primitives::info_hash::InfoHash; -use torrust_tracker_configuration::TrackerPolicy; -use torrust_tracker_primitives::pagination::Pagination; -use torrust_tracker_primitives::swarm_metadata::{AggregateSwarmMetadata, SwarmMetadata}; -use torrust_tracker_primitives::{peer, DurationSinceUnixEpoch, PersistentTorrent, PersistentTorrents}; - -use super::RepositoryAsync; -use crate::entry::peer_list::PeerList; -use crate::entry::Entry; -use crate::{EntrySingle, TorrentsRwLockTokio}; - -#[derive(Default, Debug)] -pub struct RwLockTokio { - pub(crate) torrents: tokio::sync::RwLock>, -} - -impl RwLockTokio { - pub fn write( - &self, - ) -> impl std::future::Future< - Output = tokio::sync::RwLockWriteGuard<'_, std::collections::BTreeMap>, - > { - self.torrents.write() - } -} - -impl TorrentsRwLockTokio { - async fn get_torrents<'a>(&'a self) -> tokio::sync::RwLockReadGuard<'a, std::collections::BTreeMap> - where - std::collections::BTreeMap: 'a, - { - self.torrents.read().await - } - - async fn get_torrents_mut<'a>( - &'a self, - ) -> tokio::sync::RwLockWriteGuard<'a, std::collections::BTreeMap> - where - std::collections::BTreeMap: 'a, - { - self.torrents.write().await - } -} - -impl RepositoryAsync for TorrentsRwLockTokio -where - EntrySingle: Entry, -{ - async fn upsert_peer( - &self, - info_hash: &InfoHash, - peer: &peer::Peer, - _opt_persistent_torrent: Option, - ) -> bool { - // todo: load persistent torrent data if provided - - let mut db = self.get_torrents_mut().await; - - let entry = db.entry(*info_hash).or_insert(EntrySingle::default()); - - entry.upsert_peer(peer) - } - - async fn get_swarm_metadata(&self, info_hash: &InfoHash) -> Option { - self.get(info_hash).await.map(|entry| entry.get_swarm_metadata()) - } - - async fn get(&self, key: &InfoHash) -> Option { - let db = self.get_torrents().await; - db.get(key).cloned() - } - - async fn get_paginated(&self, pagination: Option<&Pagination>) -> Vec<(InfoHash, EntrySingle)> { - let db = self.get_torrents().await; - - match pagination { - Some(pagination) => db - .iter() - .skip(pagination.offset as usize) - .take(pagination.limit as usize) - .map(|(a, b)| (*a, b.clone())) - .collect(), - None => db.iter().map(|(a, b)| (*a, b.clone())).collect(), - } - } - - async fn get_metrics(&self) -> AggregateSwarmMetadata { - let mut metrics = AggregateSwarmMetadata::default(); - - for entry in self.get_torrents().await.values() { - let stats = entry.get_swarm_metadata(); - metrics.total_complete += u64::from(stats.complete); - metrics.total_downloaded += u64::from(stats.downloaded); - metrics.total_incomplete += u64::from(stats.incomplete); - metrics.total_torrents += 1; - } - - metrics - } - - async fn import_persistent(&self, persistent_torrents: &PersistentTorrents) { - let mut torrents = self.get_torrents_mut().await; - - for (info_hash, completed) in persistent_torrents { - // Skip if torrent entry already exists - if torrents.contains_key(info_hash) { - continue; - } - - let entry = EntrySingle { - swarm: PeerList::default(), - downloaded: *completed, - }; - - torrents.insert(*info_hash, entry); - } - } - - async fn remove(&self, key: &InfoHash) -> Option { - let mut db = self.get_torrents_mut().await; - db.remove(key) - } - - async fn remove_inactive_peers(&self, current_cutoff: DurationSinceUnixEpoch) { - let mut db = self.get_torrents_mut().await; - let entries = db.values_mut(); - - for entry in entries { - entry.remove_inactive_peers(current_cutoff); - } - } - - async fn remove_peerless_torrents(&self, policy: &TrackerPolicy) { - let mut db = self.get_torrents_mut().await; - - db.retain(|_, e| e.meets_retaining_policy(policy)); - } -} diff --git a/packages/torrent-repository/src/repository/rw_lock_tokio_mutex_std.rs b/packages/torrent-repository/src/repository/rw_lock_tokio_mutex_std.rs deleted file mode 100644 index eb7e300fd..000000000 --- a/packages/torrent-repository/src/repository/rw_lock_tokio_mutex_std.rs +++ /dev/null @@ -1,135 +0,0 @@ -use std::sync::Arc; - -use bittorrent_primitives::info_hash::InfoHash; -use torrust_tracker_configuration::TrackerPolicy; -use torrust_tracker_primitives::pagination::Pagination; -use torrust_tracker_primitives::swarm_metadata::{AggregateSwarmMetadata, SwarmMetadata}; -use torrust_tracker_primitives::{peer, DurationSinceUnixEpoch, PersistentTorrent, PersistentTorrents}; - -use super::RepositoryAsync; -use crate::entry::peer_list::PeerList; -use crate::entry::{Entry, EntrySync}; -use crate::{EntryMutexStd, EntrySingle, TorrentsRwLockTokioMutexStd}; - -impl TorrentsRwLockTokioMutexStd { - async fn get_torrents<'a>(&'a self) -> tokio::sync::RwLockReadGuard<'a, std::collections::BTreeMap> - where - std::collections::BTreeMap: 'a, - { - self.torrents.read().await - } - - async fn get_torrents_mut<'a>( - &'a self, - ) -> tokio::sync::RwLockWriteGuard<'a, std::collections::BTreeMap> - where - std::collections::BTreeMap: 'a, - { - self.torrents.write().await - } -} - -impl RepositoryAsync for TorrentsRwLockTokioMutexStd -where - EntryMutexStd: EntrySync, - EntrySingle: Entry, -{ - async fn upsert_peer( - &self, - info_hash: &InfoHash, - peer: &peer::Peer, - _opt_persistent_torrent: Option, - ) -> bool { - // todo: load persistent torrent data if provided - - let maybe_entry = self.get_torrents().await.get(info_hash).cloned(); - - let entry = if let Some(entry) = maybe_entry { - entry - } else { - let mut db = self.get_torrents_mut().await; - let entry = db.entry(*info_hash).or_insert(Arc::default()); - entry.clone() - }; - - entry.upsert_peer(peer) - } - - async fn get_swarm_metadata(&self, info_hash: &InfoHash) -> Option { - self.get(info_hash).await.map(|entry| entry.get_swarm_metadata()) - } - - async fn get(&self, key: &InfoHash) -> Option { - let db = self.get_torrents().await; - db.get(key).cloned() - } - - async fn get_paginated(&self, pagination: Option<&Pagination>) -> Vec<(InfoHash, EntryMutexStd)> { - let db = self.get_torrents().await; - - match pagination { - Some(pagination) => db - .iter() - .skip(pagination.offset as usize) - .take(pagination.limit as usize) - .map(|(a, b)| (*a, b.clone())) - .collect(), - None => db.iter().map(|(a, b)| (*a, b.clone())).collect(), - } - } - - async fn get_metrics(&self) -> AggregateSwarmMetadata { - let mut metrics = AggregateSwarmMetadata::default(); - - for entry in self.get_torrents().await.values() { - let stats = entry.get_swarm_metadata(); - metrics.total_complete += u64::from(stats.complete); - metrics.total_downloaded += u64::from(stats.downloaded); - metrics.total_incomplete += u64::from(stats.incomplete); - metrics.total_torrents += 1; - } - - metrics - } - - async fn import_persistent(&self, persistent_torrents: &PersistentTorrents) { - let mut torrents = self.get_torrents_mut().await; - - for (info_hash, completed) in persistent_torrents { - // Skip if torrent entry already exists - if torrents.contains_key(info_hash) { - continue; - } - - let entry = EntryMutexStd::new( - EntrySingle { - swarm: PeerList::default(), - downloaded: *completed, - } - .into(), - ); - - torrents.insert(*info_hash, entry); - } - } - - async fn remove(&self, key: &InfoHash) -> Option { - let mut db = self.get_torrents_mut().await; - db.remove(key) - } - - async fn remove_inactive_peers(&self, current_cutoff: DurationSinceUnixEpoch) { - let db = self.get_torrents().await; - let entries = db.values().cloned(); - - for entry in entries { - entry.remove_inactive_peers(current_cutoff); - } - } - - async fn remove_peerless_torrents(&self, policy: &TrackerPolicy) { - let mut db = self.get_torrents_mut().await; - - db.retain(|_, e| e.lock().expect("it should lock entry").meets_retaining_policy(policy)); - } -} diff --git a/packages/torrent-repository/src/repository/rw_lock_tokio_mutex_tokio.rs b/packages/torrent-repository/src/repository/rw_lock_tokio_mutex_tokio.rs deleted file mode 100644 index c8ebaf4d6..000000000 --- a/packages/torrent-repository/src/repository/rw_lock_tokio_mutex_tokio.rs +++ /dev/null @@ -1,148 +0,0 @@ -use std::sync::Arc; - -use bittorrent_primitives::info_hash::InfoHash; -use torrust_tracker_configuration::TrackerPolicy; -use torrust_tracker_primitives::pagination::Pagination; -use torrust_tracker_primitives::swarm_metadata::{AggregateSwarmMetadata, SwarmMetadata}; -use torrust_tracker_primitives::{peer, DurationSinceUnixEpoch, PersistentTorrent, PersistentTorrents}; - -use super::RepositoryAsync; -use crate::entry::peer_list::PeerList; -use crate::entry::{Entry, EntryAsync}; -use crate::{EntryMutexTokio, EntrySingle, TorrentsRwLockTokioMutexTokio}; - -impl TorrentsRwLockTokioMutexTokio { - async fn get_torrents<'a>(&'a self) -> tokio::sync::RwLockReadGuard<'a, std::collections::BTreeMap> - where - std::collections::BTreeMap: 'a, - { - self.torrents.read().await - } - - async fn get_torrents_mut<'a>( - &'a self, - ) -> tokio::sync::RwLockWriteGuard<'a, std::collections::BTreeMap> - where - std::collections::BTreeMap: 'a, - { - self.torrents.write().await - } -} - -impl RepositoryAsync for TorrentsRwLockTokioMutexTokio -where - EntryMutexTokio: EntryAsync, - EntrySingle: Entry, -{ - async fn upsert_peer( - &self, - info_hash: &InfoHash, - peer: &peer::Peer, - _opt_persistent_torrent: Option, - ) -> bool { - // todo: load persistent torrent data if provided - - let maybe_entry = self.get_torrents().await.get(info_hash).cloned(); - - let entry = if let Some(entry) = maybe_entry { - entry - } else { - let mut db = self.get_torrents_mut().await; - let entry = db.entry(*info_hash).or_insert(Arc::default()); - entry.clone() - }; - - entry.upsert_peer(peer).await - } - - async fn get_swarm_metadata(&self, info_hash: &InfoHash) -> Option { - match self.get(info_hash).await { - Some(entry) => Some(entry.get_swarm_metadata().await), - None => None, - } - } - - async fn get(&self, key: &InfoHash) -> Option { - let db = self.get_torrents().await; - db.get(key).cloned() - } - - async fn get_paginated(&self, pagination: Option<&Pagination>) -> Vec<(InfoHash, EntryMutexTokio)> { - let db = self.get_torrents().await; - - match pagination { - Some(pagination) => db - .iter() - .skip(pagination.offset as usize) - .take(pagination.limit as usize) - .map(|(a, b)| (*a, b.clone())) - .collect(), - None => db.iter().map(|(a, b)| (*a, b.clone())).collect(), - } - } - - async fn get_metrics(&self) -> AggregateSwarmMetadata { - let mut metrics = AggregateSwarmMetadata::default(); - - for entry in self.get_torrents().await.values() { - let stats = entry.get_swarm_metadata().await; - metrics.total_complete += u64::from(stats.complete); - metrics.total_downloaded += u64::from(stats.downloaded); - metrics.total_incomplete += u64::from(stats.incomplete); - metrics.total_torrents += 1; - } - - metrics - } - - async fn import_persistent(&self, persistent_torrents: &PersistentTorrents) { - let mut db = self.get_torrents_mut().await; - - for (info_hash, completed) in persistent_torrents { - // Skip if torrent entry already exists - if db.contains_key(info_hash) { - continue; - } - - let entry = EntryMutexTokio::new( - EntrySingle { - swarm: PeerList::default(), - downloaded: *completed, - } - .into(), - ); - - db.insert(*info_hash, entry); - } - } - - async fn remove(&self, key: &InfoHash) -> Option { - let mut db = self.get_torrents_mut().await; - db.remove(key) - } - - async fn remove_inactive_peers(&self, current_cutoff: DurationSinceUnixEpoch) { - let db = self.get_torrents().await; - let entries = db.values().cloned(); - - for entry in entries { - entry.remove_inactive_peers(current_cutoff).await; - } - } - - async fn remove_peerless_torrents(&self, policy: &TrackerPolicy) { - let mut db = self.get_torrents_mut().await; - - let mut not_good = Vec::::default(); - - for (&infohash, torrent) in db.iter() { - if !torrent.clone().meets_retaining_policy(policy).await { - not_good.push(infohash); - } - } - - for remove in not_good { - drop(db.remove(&remove)); - } - } -} diff --git a/packages/torrent-repository/tests/common/repo.rs b/packages/torrent-repository/tests/common/repo.rs index 224fc6aa3..95dd3f5ad 100644 --- a/packages/torrent-repository/tests/common/repo.rs +++ b/packages/torrent-repository/tests/common/repo.rs @@ -3,240 +3,84 @@ use torrust_tracker_configuration::TrackerPolicy; use torrust_tracker_primitives::pagination::Pagination; use torrust_tracker_primitives::swarm_metadata::{AggregateSwarmMetadata, SwarmMetadata}; use torrust_tracker_primitives::{peer, DurationSinceUnixEpoch, PersistentTorrent, PersistentTorrents}; -use torrust_tracker_torrent_repository::repository::{Repository as _, RepositoryAsync as _}; -use torrust_tracker_torrent_repository::{ - EntrySingle, TorrentsDashMapMutexStd, TorrentsRwLockStd, TorrentsRwLockStdMutexStd, TorrentsRwLockStdMutexTokio, - TorrentsRwLockTokio, TorrentsRwLockTokioMutexStd, TorrentsRwLockTokioMutexTokio, TorrentsSkipMapMutexParkingLot, - TorrentsSkipMapMutexStd, TorrentsSkipMapRwLockParkingLot, -}; +use torrust_tracker_torrent_repository::repository::Repository as _; +use torrust_tracker_torrent_repository::{EntrySingle, TorrentsSkipMapMutexStd}; #[derive(Debug)] pub(crate) enum Repo { - RwLockStd(TorrentsRwLockStd), - RwLockStdMutexStd(TorrentsRwLockStdMutexStd), - RwLockStdMutexTokio(TorrentsRwLockStdMutexTokio), - RwLockTokio(TorrentsRwLockTokio), - RwLockTokioMutexStd(TorrentsRwLockTokioMutexStd), - RwLockTokioMutexTokio(TorrentsRwLockTokioMutexTokio), SkipMapMutexStd(TorrentsSkipMapMutexStd), - SkipMapMutexParkingLot(TorrentsSkipMapMutexParkingLot), - SkipMapRwLockParkingLot(TorrentsSkipMapRwLockParkingLot), - DashMapMutexStd(TorrentsDashMapMutexStd), } impl Repo { - pub(crate) async fn upsert_peer( + pub(crate) fn upsert_peer( &self, info_hash: &InfoHash, peer: &peer::Peer, opt_persistent_torrent: Option, ) -> bool { match self { - Repo::RwLockStd(repo) => repo.upsert_peer(info_hash, peer, opt_persistent_torrent), - Repo::RwLockStdMutexStd(repo) => repo.upsert_peer(info_hash, peer, opt_persistent_torrent), - Repo::RwLockStdMutexTokio(repo) => repo.upsert_peer(info_hash, peer, opt_persistent_torrent).await, - Repo::RwLockTokio(repo) => repo.upsert_peer(info_hash, peer, opt_persistent_torrent).await, - Repo::RwLockTokioMutexStd(repo) => repo.upsert_peer(info_hash, peer, opt_persistent_torrent).await, - Repo::RwLockTokioMutexTokio(repo) => repo.upsert_peer(info_hash, peer, opt_persistent_torrent).await, Repo::SkipMapMutexStd(repo) => repo.upsert_peer(info_hash, peer, opt_persistent_torrent), - Repo::SkipMapMutexParkingLot(repo) => repo.upsert_peer(info_hash, peer, opt_persistent_torrent), - Repo::SkipMapRwLockParkingLot(repo) => repo.upsert_peer(info_hash, peer, opt_persistent_torrent), - Repo::DashMapMutexStd(repo) => repo.upsert_peer(info_hash, peer, opt_persistent_torrent), } } - pub(crate) async fn get_swarm_metadata(&self, info_hash: &InfoHash) -> Option { + pub(crate) fn get_swarm_metadata(&self, info_hash: &InfoHash) -> Option { match self { - Repo::RwLockStd(repo) => repo.get_swarm_metadata(info_hash), - Repo::RwLockStdMutexStd(repo) => repo.get_swarm_metadata(info_hash), - Repo::RwLockStdMutexTokio(repo) => repo.get_swarm_metadata(info_hash).await, - Repo::RwLockTokio(repo) => repo.get_swarm_metadata(info_hash).await, - Repo::RwLockTokioMutexStd(repo) => repo.get_swarm_metadata(info_hash).await, - Repo::RwLockTokioMutexTokio(repo) => repo.get_swarm_metadata(info_hash).await, Repo::SkipMapMutexStd(repo) => repo.get_swarm_metadata(info_hash), - Repo::SkipMapMutexParkingLot(repo) => repo.get_swarm_metadata(info_hash), - Repo::SkipMapRwLockParkingLot(repo) => repo.get_swarm_metadata(info_hash), - Repo::DashMapMutexStd(repo) => repo.get_swarm_metadata(info_hash), } } - pub(crate) async fn get(&self, key: &InfoHash) -> Option { + pub(crate) fn get(&self, key: &InfoHash) -> Option { match self { - Repo::RwLockStd(repo) => repo.get(key), - Repo::RwLockStdMutexStd(repo) => Some(repo.get(key)?.lock().unwrap().clone()), - Repo::RwLockStdMutexTokio(repo) => Some(repo.get(key).await?.lock().await.clone()), - Repo::RwLockTokio(repo) => repo.get(key).await, - Repo::RwLockTokioMutexStd(repo) => Some(repo.get(key).await?.lock().unwrap().clone()), - Repo::RwLockTokioMutexTokio(repo) => Some(repo.get(key).await?.lock().await.clone()), Repo::SkipMapMutexStd(repo) => Some(repo.get(key)?.lock().unwrap().clone()), - Repo::SkipMapMutexParkingLot(repo) => Some(repo.get(key)?.lock().clone()), - Repo::SkipMapRwLockParkingLot(repo) => Some(repo.get(key)?.read().clone()), - Repo::DashMapMutexStd(repo) => Some(repo.get(key)?.lock().unwrap().clone()), } } - pub(crate) async fn get_metrics(&self) -> AggregateSwarmMetadata { + pub(crate) fn get_metrics(&self) -> AggregateSwarmMetadata { match self { - Repo::RwLockStd(repo) => repo.get_metrics(), - Repo::RwLockStdMutexStd(repo) => repo.get_metrics(), - Repo::RwLockStdMutexTokio(repo) => repo.get_metrics().await, - Repo::RwLockTokio(repo) => repo.get_metrics().await, - Repo::RwLockTokioMutexStd(repo) => repo.get_metrics().await, - Repo::RwLockTokioMutexTokio(repo) => repo.get_metrics().await, Repo::SkipMapMutexStd(repo) => repo.get_metrics(), - Repo::SkipMapMutexParkingLot(repo) => repo.get_metrics(), - Repo::SkipMapRwLockParkingLot(repo) => repo.get_metrics(), - Repo::DashMapMutexStd(repo) => repo.get_metrics(), } } - pub(crate) async fn get_paginated(&self, pagination: Option<&Pagination>) -> Vec<(InfoHash, EntrySingle)> { + pub(crate) fn get_paginated(&self, pagination: Option<&Pagination>) -> Vec<(InfoHash, EntrySingle)> { match self { - Repo::RwLockStd(repo) => repo.get_paginated(pagination), - Repo::RwLockStdMutexStd(repo) => repo - .get_paginated(pagination) - .iter() - .map(|(i, t)| (*i, t.lock().expect("it should get a lock").clone())) - .collect(), - Repo::RwLockStdMutexTokio(repo) => { - let mut v: Vec<(InfoHash, EntrySingle)> = vec![]; - - for (i, t) in repo.get_paginated(pagination).await { - v.push((i, t.lock().await.clone())); - } - v - } - Repo::RwLockTokio(repo) => repo.get_paginated(pagination).await, - Repo::RwLockTokioMutexStd(repo) => repo - .get_paginated(pagination) - .await - .iter() - .map(|(i, t)| (*i, t.lock().expect("it should get a lock").clone())) - .collect(), - Repo::RwLockTokioMutexTokio(repo) => { - let mut v: Vec<(InfoHash, EntrySingle)> = vec![]; - - for (i, t) in repo.get_paginated(pagination).await { - v.push((i, t.lock().await.clone())); - } - v - } Repo::SkipMapMutexStd(repo) => repo .get_paginated(pagination) .iter() .map(|(i, t)| (*i, t.lock().expect("it should get a lock").clone())) .collect(), - Repo::SkipMapMutexParkingLot(repo) => repo - .get_paginated(pagination) - .iter() - .map(|(i, t)| (*i, t.lock().clone())) - .collect(), - Repo::SkipMapRwLockParkingLot(repo) => repo - .get_paginated(pagination) - .iter() - .map(|(i, t)| (*i, t.read().clone())) - .collect(), - Repo::DashMapMutexStd(repo) => repo - .get_paginated(pagination) - .iter() - .map(|(i, t)| (*i, t.lock().expect("it should get a lock").clone())) - .collect(), } } - pub(crate) async fn import_persistent(&self, persistent_torrents: &PersistentTorrents) { + pub(crate) fn import_persistent(&self, persistent_torrents: &PersistentTorrents) { match self { - Repo::RwLockStd(repo) => repo.import_persistent(persistent_torrents), - Repo::RwLockStdMutexStd(repo) => repo.import_persistent(persistent_torrents), - Repo::RwLockStdMutexTokio(repo) => repo.import_persistent(persistent_torrents).await, - Repo::RwLockTokio(repo) => repo.import_persistent(persistent_torrents).await, - Repo::RwLockTokioMutexStd(repo) => repo.import_persistent(persistent_torrents).await, - Repo::RwLockTokioMutexTokio(repo) => repo.import_persistent(persistent_torrents).await, Repo::SkipMapMutexStd(repo) => repo.import_persistent(persistent_torrents), - Repo::SkipMapMutexParkingLot(repo) => repo.import_persistent(persistent_torrents), - Repo::SkipMapRwLockParkingLot(repo) => repo.import_persistent(persistent_torrents), - Repo::DashMapMutexStd(repo) => repo.import_persistent(persistent_torrents), } } - pub(crate) async fn remove(&self, key: &InfoHash) -> Option { + pub(crate) fn remove(&self, key: &InfoHash) -> Option { match self { - Repo::RwLockStd(repo) => repo.remove(key), - Repo::RwLockStdMutexStd(repo) => Some(repo.remove(key)?.lock().unwrap().clone()), - Repo::RwLockStdMutexTokio(repo) => Some(repo.remove(key).await?.lock().await.clone()), - Repo::RwLockTokio(repo) => repo.remove(key).await, - Repo::RwLockTokioMutexStd(repo) => Some(repo.remove(key).await?.lock().unwrap().clone()), - Repo::RwLockTokioMutexTokio(repo) => Some(repo.remove(key).await?.lock().await.clone()), Repo::SkipMapMutexStd(repo) => Some(repo.remove(key)?.lock().unwrap().clone()), - Repo::SkipMapMutexParkingLot(repo) => Some(repo.remove(key)?.lock().clone()), - Repo::SkipMapRwLockParkingLot(repo) => Some(repo.remove(key)?.write().clone()), - Repo::DashMapMutexStd(repo) => Some(repo.remove(key)?.lock().unwrap().clone()), } } - pub(crate) async fn remove_inactive_peers(&self, current_cutoff: DurationSinceUnixEpoch) { + pub(crate) fn remove_inactive_peers(&self, current_cutoff: DurationSinceUnixEpoch) { match self { - Repo::RwLockStd(repo) => repo.remove_inactive_peers(current_cutoff), - Repo::RwLockStdMutexStd(repo) => repo.remove_inactive_peers(current_cutoff), - Repo::RwLockStdMutexTokio(repo) => repo.remove_inactive_peers(current_cutoff).await, - Repo::RwLockTokio(repo) => repo.remove_inactive_peers(current_cutoff).await, - Repo::RwLockTokioMutexStd(repo) => repo.remove_inactive_peers(current_cutoff).await, - Repo::RwLockTokioMutexTokio(repo) => repo.remove_inactive_peers(current_cutoff).await, Repo::SkipMapMutexStd(repo) => repo.remove_inactive_peers(current_cutoff), - Repo::SkipMapMutexParkingLot(repo) => repo.remove_inactive_peers(current_cutoff), - Repo::SkipMapRwLockParkingLot(repo) => repo.remove_inactive_peers(current_cutoff), - Repo::DashMapMutexStd(repo) => repo.remove_inactive_peers(current_cutoff), } } - pub(crate) async fn remove_peerless_torrents(&self, policy: &TrackerPolicy) { + pub(crate) fn remove_peerless_torrents(&self, policy: &TrackerPolicy) { match self { - Repo::RwLockStd(repo) => repo.remove_peerless_torrents(policy), - Repo::RwLockStdMutexStd(repo) => repo.remove_peerless_torrents(policy), - Repo::RwLockStdMutexTokio(repo) => repo.remove_peerless_torrents(policy).await, - Repo::RwLockTokio(repo) => repo.remove_peerless_torrents(policy).await, - Repo::RwLockTokioMutexStd(repo) => repo.remove_peerless_torrents(policy).await, - Repo::RwLockTokioMutexTokio(repo) => repo.remove_peerless_torrents(policy).await, Repo::SkipMapMutexStd(repo) => repo.remove_peerless_torrents(policy), - Repo::SkipMapMutexParkingLot(repo) => repo.remove_peerless_torrents(policy), - Repo::SkipMapRwLockParkingLot(repo) => repo.remove_peerless_torrents(policy), - Repo::DashMapMutexStd(repo) => repo.remove_peerless_torrents(policy), } } - pub(crate) async fn insert(&self, info_hash: &InfoHash, torrent: EntrySingle) -> Option { + pub(crate) fn insert(&self, info_hash: &InfoHash, torrent: EntrySingle) -> Option { match self { - Repo::RwLockStd(repo) => { - repo.write().insert(*info_hash, torrent); - } - Repo::RwLockStdMutexStd(repo) => { - repo.write().insert(*info_hash, torrent.into()); - } - Repo::RwLockStdMutexTokio(repo) => { - repo.write().insert(*info_hash, torrent.into()); - } - Repo::RwLockTokio(repo) => { - repo.write().await.insert(*info_hash, torrent); - } - Repo::RwLockTokioMutexStd(repo) => { - repo.write().await.insert(*info_hash, torrent.into()); - } - Repo::RwLockTokioMutexTokio(repo) => { - repo.write().await.insert(*info_hash, torrent.into()); - } Repo::SkipMapMutexStd(repo) => { repo.torrents.insert(*info_hash, torrent.into()); } - Repo::SkipMapMutexParkingLot(repo) => { - repo.torrents.insert(*info_hash, torrent.into()); - } - Repo::SkipMapRwLockParkingLot(repo) => { - repo.torrents.insert(*info_hash, torrent.into()); - } - Repo::DashMapMutexStd(repo) => { - repo.torrents.insert(*info_hash, torrent.into()); - } } - self.get(info_hash).await + self.get(info_hash) } } diff --git a/packages/torrent-repository/tests/repository/mod.rs b/packages/torrent-repository/tests/repository/mod.rs index 77977837f..d0ef61e81 100644 --- a/packages/torrent-repository/tests/repository/mod.rs +++ b/packages/torrent-repository/tests/repository/mod.rs @@ -9,65 +9,17 @@ use torrust_tracker_primitives::pagination::Pagination; use torrust_tracker_primitives::swarm_metadata::SwarmMetadata; use torrust_tracker_primitives::PersistentTorrents; use torrust_tracker_torrent_repository::entry::Entry as _; -use torrust_tracker_torrent_repository::repository::dash_map_mutex_std::XacrimonDashMap; -use torrust_tracker_torrent_repository::repository::rw_lock_std::RwLockStd; -use torrust_tracker_torrent_repository::repository::rw_lock_tokio::RwLockTokio; use torrust_tracker_torrent_repository::repository::skip_map_mutex_std::CrossbeamSkipList; use torrust_tracker_torrent_repository::EntrySingle; use crate::common::repo::Repo; use crate::common::torrent_peer_builder::{a_completed_peer, a_started_peer}; -#[fixture] -fn standard() -> Repo { - Repo::RwLockStd(RwLockStd::default()) -} - -#[fixture] -fn standard_mutex() -> Repo { - Repo::RwLockStdMutexStd(RwLockStd::default()) -} - -#[fixture] -fn standard_tokio() -> Repo { - Repo::RwLockStdMutexTokio(RwLockStd::default()) -} - -#[fixture] -fn tokio_std() -> Repo { - Repo::RwLockTokio(RwLockTokio::default()) -} - -#[fixture] -fn tokio_mutex() -> Repo { - Repo::RwLockTokioMutexStd(RwLockTokio::default()) -} - -#[fixture] -fn tokio_tokio() -> Repo { - Repo::RwLockTokioMutexTokio(RwLockTokio::default()) -} - #[fixture] fn skip_list_mutex_std() -> Repo { Repo::SkipMapMutexStd(CrossbeamSkipList::default()) } -#[fixture] -fn skip_list_mutex_parking_lot() -> Repo { - Repo::SkipMapMutexParkingLot(CrossbeamSkipList::default()) -} - -#[fixture] -fn skip_list_rw_lock_parking_lot() -> Repo { - Repo::SkipMapRwLockParkingLot(CrossbeamSkipList::default()) -} - -#[fixture] -fn dash_map_std() -> Repo { - Repo::DashMapMutexStd(XacrimonDashMap::default()) -} - type Entries = Vec<(InfoHash, EntrySingle)>; #[fixture] @@ -197,9 +149,9 @@ fn persistent_three() -> PersistentTorrents { t.iter().copied().collect() } -async fn make(repo: &Repo, entries: &Entries) { +fn make(repo: &Repo, entries: &Entries) { for (info_hash, entry) in entries { - repo.insert(info_hash, entry.clone()).await; + repo.insert(info_hash, entry.clone()); } } @@ -248,28 +200,13 @@ fn policy_remove_persist() -> TrackerPolicy { #[case::out_of_order(many_out_of_order())] #[case::in_order(many_hashed_in_order())] #[tokio::test] -async fn it_should_get_a_torrent_entry( - #[values( - standard(), - standard_mutex(), - standard_tokio(), - tokio_std(), - tokio_mutex(), - tokio_tokio(), - skip_list_mutex_std(), - skip_list_mutex_parking_lot(), - skip_list_rw_lock_parking_lot(), - dash_map_std() - )] - repo: Repo, - #[case] entries: Entries, -) { - make(&repo, &entries).await; +async fn it_should_get_a_torrent_entry(#[values(skip_list_mutex_std())] repo: Repo, #[case] entries: Entries) { + make(&repo, &entries); if let Some((info_hash, torrent)) = entries.first() { - assert_eq!(repo.get(info_hash).await, Some(torrent.clone())); + assert_eq!(repo.get(info_hash), Some(torrent.clone())); } else { - assert_eq!(repo.get(&InfoHash::default()).await, None); + assert_eq!(repo.get(&InfoHash::default()), None); } } @@ -284,28 +221,17 @@ async fn it_should_get_a_torrent_entry( #[case::in_order(many_hashed_in_order())] #[tokio::test] async fn it_should_get_paginated_entries_in_a_stable_or_sorted_order( - #[values( - standard(), - standard_mutex(), - standard_tokio(), - tokio_std(), - tokio_mutex(), - tokio_tokio(), - skip_list_mutex_std(), - skip_list_mutex_parking_lot(), - skip_list_rw_lock_parking_lot() - )] - repo: Repo, + #[values(skip_list_mutex_std())] repo: Repo, #[case] entries: Entries, many_out_of_order: Entries, ) { - make(&repo, &entries).await; + make(&repo, &entries); - let entries_a = repo.get_paginated(None).await.iter().map(|(i, _)| *i).collect::>(); + let entries_a = repo.get_paginated(None).iter().map(|(i, _)| *i).collect::>(); - make(&repo, &many_out_of_order).await; + make(&repo, &many_out_of_order); - let entries_b = repo.get_paginated(None).await.iter().map(|(i, _)| *i).collect::>(); + let entries_b = repo.get_paginated(None).iter().map(|(i, _)| *i).collect::>(); let is_equal = entries_b.iter().take(entries_a.len()).copied().collect::>() == entries_a; @@ -328,36 +254,25 @@ async fn it_should_get_paginated_entries_in_a_stable_or_sorted_order( #[case::in_order(many_hashed_in_order())] #[tokio::test] async fn it_should_get_paginated( - #[values( - standard(), - standard_mutex(), - standard_tokio(), - tokio_std(), - tokio_mutex(), - tokio_tokio(), - skip_list_mutex_std(), - skip_list_mutex_parking_lot(), - skip_list_rw_lock_parking_lot() - )] - repo: Repo, + #[values(skip_list_mutex_std())] repo: Repo, #[case] entries: Entries, #[values(paginated_limit_zero(), paginated_limit_one(), paginated_limit_one_offset_one())] paginated: Pagination, ) { - make(&repo, &entries).await; + make(&repo, &entries); - let mut info_hashes = repo.get_paginated(None).await.iter().map(|(i, _)| *i).collect::>(); + let mut info_hashes = repo.get_paginated(None).iter().map(|(i, _)| *i).collect::>(); info_hashes.sort(); match paginated { // it should return empty if limit is zero. - Pagination { limit: 0, .. } => assert_eq!(repo.get_paginated(Some(&paginated)).await, vec![]), + Pagination { limit: 0, .. } => assert_eq!(repo.get_paginated(Some(&paginated)), vec![]), // it should return a single entry if the limit is one. Pagination { limit: 1, offset: 0 } => { if info_hashes.is_empty() { - assert_eq!(repo.get_paginated(Some(&paginated)).await.len(), 0); + assert_eq!(repo.get_paginated(Some(&paginated)).len(), 0); } else { - let page = repo.get_paginated(Some(&paginated)).await; + let page = repo.get_paginated(Some(&paginated)); assert_eq!(page.len(), 1); assert_eq!(page.first().map(|(i, _)| i), info_hashes.first()); } @@ -366,7 +281,7 @@ async fn it_should_get_paginated( // it should return the only the second entry if both the limit and the offset are one. Pagination { limit: 1, offset: 1 } => { if info_hashes.len() > 1 { - let page = repo.get_paginated(Some(&paginated)).await; + let page = repo.get_paginated(Some(&paginated)); assert_eq!(page.len(), 1); assert_eq!(page[0].0, info_hashes[1]); } @@ -386,25 +301,10 @@ async fn it_should_get_paginated( #[case::out_of_order(many_out_of_order())] #[case::in_order(many_hashed_in_order())] #[tokio::test] -async fn it_should_get_metrics( - #[values( - standard(), - standard_mutex(), - standard_tokio(), - tokio_std(), - tokio_mutex(), - tokio_tokio(), - skip_list_mutex_std(), - skip_list_mutex_parking_lot(), - skip_list_rw_lock_parking_lot(), - dash_map_std() - )] - repo: Repo, - #[case] entries: Entries, -) { +async fn it_should_get_metrics(#[values(skip_list_mutex_std())] repo: Repo, #[case] entries: Entries) { use torrust_tracker_primitives::swarm_metadata::AggregateSwarmMetadata; - make(&repo, &entries).await; + make(&repo, &entries); let mut metrics = AggregateSwarmMetadata::default(); @@ -417,7 +317,7 @@ async fn it_should_get_metrics( metrics.total_downloaded += u64::from(stats.downloaded); } - assert_eq!(repo.get_metrics().await, metrics); + assert_eq!(repo.get_metrics(), metrics); } #[rstest] @@ -431,33 +331,21 @@ async fn it_should_get_metrics( #[case::in_order(many_hashed_in_order())] #[tokio::test] async fn it_should_import_persistent_torrents( - #[values( - standard(), - standard_mutex(), - standard_tokio(), - tokio_std(), - tokio_mutex(), - tokio_tokio(), - skip_list_mutex_std(), - skip_list_mutex_parking_lot(), - skip_list_rw_lock_parking_lot(), - dash_map_std() - )] - repo: Repo, + #[values(skip_list_mutex_std())] repo: Repo, #[case] entries: Entries, #[values(persistent_empty(), persistent_single(), persistent_three())] persistent_torrents: PersistentTorrents, ) { - make(&repo, &entries).await; + make(&repo, &entries); - let mut downloaded = repo.get_metrics().await.total_downloaded; + let mut downloaded = repo.get_metrics().total_downloaded; persistent_torrents.iter().for_each(|(_, d)| downloaded += u64::from(*d)); - repo.import_persistent(&persistent_torrents).await; + repo.import_persistent(&persistent_torrents); - assert_eq!(repo.get_metrics().await.total_downloaded, downloaded); + assert_eq!(repo.get_metrics().total_downloaded, downloaded); for (entry, _) in persistent_torrents { - assert!(repo.get(&entry).await.is_some()); + assert!(repo.get(&entry).is_some()); } } @@ -471,33 +359,18 @@ async fn it_should_import_persistent_torrents( #[case::out_of_order(many_out_of_order())] #[case::in_order(many_hashed_in_order())] #[tokio::test] -async fn it_should_remove_an_entry( - #[values( - standard(), - standard_mutex(), - standard_tokio(), - tokio_std(), - tokio_mutex(), - tokio_tokio(), - skip_list_mutex_std(), - skip_list_mutex_parking_lot(), - skip_list_rw_lock_parking_lot(), - dash_map_std() - )] - repo: Repo, - #[case] entries: Entries, -) { - make(&repo, &entries).await; +async fn it_should_remove_an_entry(#[values(skip_list_mutex_std())] repo: Repo, #[case] entries: Entries) { + make(&repo, &entries); for (info_hash, torrent) in entries { - assert_eq!(repo.get(&info_hash).await, Some(torrent.clone())); - assert_eq!(repo.remove(&info_hash).await, Some(torrent)); + assert_eq!(repo.get(&info_hash), Some(torrent.clone())); + assert_eq!(repo.remove(&info_hash), Some(torrent)); - assert_eq!(repo.get(&info_hash).await, None); - assert_eq!(repo.remove(&info_hash).await, None); + assert_eq!(repo.get(&info_hash), None); + assert_eq!(repo.remove(&info_hash), None); } - assert_eq!(repo.get_metrics().await.total_torrents, 0); + assert_eq!(repo.get_metrics().total_torrents, 0); } #[rstest] @@ -510,22 +383,7 @@ async fn it_should_remove_an_entry( #[case::out_of_order(many_out_of_order())] #[case::in_order(many_hashed_in_order())] #[tokio::test] -async fn it_should_remove_inactive_peers( - #[values( - standard(), - standard_mutex(), - standard_tokio(), - tokio_std(), - tokio_mutex(), - tokio_tokio(), - skip_list_mutex_std(), - skip_list_mutex_parking_lot(), - skip_list_rw_lock_parking_lot(), - dash_map_std() - )] - repo: Repo, - #[case] entries: Entries, -) { +async fn it_should_remove_inactive_peers(#[values(skip_list_mutex_std())] repo: Repo, #[case] entries: Entries) { use std::ops::Sub as _; use std::time::Duration; @@ -538,7 +396,7 @@ async fn it_should_remove_inactive_peers( const TIMEOUT: Duration = Duration::from_secs(120); const EXPIRE: Duration = Duration::from_secs(121); - make(&repo, &entries).await; + make(&repo, &entries); let info_hash: InfoHash; let mut peer: peer::Peer; @@ -562,15 +420,15 @@ async fn it_should_remove_inactive_peers( // Insert the infohash and peer into the repository // and verify there is an extra torrent entry. { - repo.upsert_peer(&info_hash, &peer, None).await; - assert_eq!(repo.get_metrics().await.total_torrents, entries.len() as u64 + 1); + repo.upsert_peer(&info_hash, &peer, None); + assert_eq!(repo.get_metrics().total_torrents, entries.len() as u64 + 1); } // Insert the infohash and peer into the repository // and verify the swarm metadata was updated. { - repo.upsert_peer(&info_hash, &peer, None).await; - let stats = repo.get_swarm_metadata(&info_hash).await; + repo.upsert_peer(&info_hash, &peer, None); + let stats = repo.get_swarm_metadata(&info_hash); assert_eq!( stats, Some(SwarmMetadata { @@ -583,19 +441,18 @@ async fn it_should_remove_inactive_peers( // Verify that this new peer was inserted into the repository. { - let entry = repo.get(&info_hash).await.expect("it_should_get_some"); + let entry = repo.get(&info_hash).expect("it_should_get_some"); assert!(entry.get_peers(None).contains(&peer.into())); } // Remove peers that have not been updated since the timeout (120 seconds ago). { - repo.remove_inactive_peers(CurrentClock::now_sub(&TIMEOUT).expect("it should get a time passed")) - .await; + repo.remove_inactive_peers(CurrentClock::now_sub(&TIMEOUT).expect("it should get a time passed")); } // Verify that the this peer was removed from the repository. { - let entry = repo.get(&info_hash).await.expect("it_should_get_some"); + let entry = repo.get(&info_hash).expect("it_should_get_some"); assert!(!entry.get_peers(None).contains(&peer.into())); } } @@ -611,27 +468,15 @@ async fn it_should_remove_inactive_peers( #[case::in_order(many_hashed_in_order())] #[tokio::test] async fn it_should_remove_peerless_torrents( - #[values( - standard(), - standard_mutex(), - standard_tokio(), - tokio_std(), - tokio_mutex(), - tokio_tokio(), - skip_list_mutex_std(), - skip_list_mutex_parking_lot(), - skip_list_rw_lock_parking_lot(), - dash_map_std() - )] - repo: Repo, + #[values(skip_list_mutex_std())] repo: Repo, #[case] entries: Entries, #[values(policy_none(), policy_persist(), policy_remove(), policy_remove_persist())] policy: TrackerPolicy, ) { - make(&repo, &entries).await; + make(&repo, &entries); - repo.remove_peerless_torrents(&policy).await; + repo.remove_peerless_torrents(&policy); - let torrents = repo.get_paginated(None).await; + let torrents = repo.get_paginated(None); for (_, entry) in torrents { assert!(entry.meets_retaining_policy(&policy)); From b2a96842e87e0c1bcb5b3ad140865b760b17683b Mon Sep 17 00:00:00 2001 From: Jose Celano Date: Wed, 30 Apr 2025 12:55:48 +0100 Subject: [PATCH 537/802] feat!: [#1491] remove unused torrent repository entry types Entry types that are not used in production. They have been moved to a new package `torrent-repository-benchmarking`. --- Cargo.lock | 1 - packages/torrent-repository/Cargo.toml | 1 - packages/torrent-repository/src/entry/mod.rs | 3 - .../src/entry/mutex_parking_lot.rs | 49 ----- .../src/entry/mutex_tokio.rs | 49 ----- .../src/entry/rw_lock_parking_lot.rs | 49 ----- packages/torrent-repository/src/lib.rs | 4 - .../src/repository/skip_map_mutex_std.rs | 190 +----------------- .../tests/common/torrent.rs | 49 +---- .../torrent-repository/tests/entry/mod.rs | 167 +++++++-------- 10 files changed, 80 insertions(+), 482 deletions(-) delete mode 100644 packages/torrent-repository/src/entry/mutex_parking_lot.rs delete mode 100644 packages/torrent-repository/src/entry/mutex_tokio.rs delete mode 100644 packages/torrent-repository/src/entry/rw_lock_parking_lot.rs diff --git a/Cargo.lock b/Cargo.lock index 5bce85e46..db6838e66 100644 --- a/Cargo.lock +++ b/Cargo.lock @@ -4841,7 +4841,6 @@ dependencies = [ "bittorrent-primitives", "criterion", "crossbeam-skiplist", - "parking_lot", "rstest", "tokio", "torrust-tracker-clock", diff --git a/packages/torrent-repository/Cargo.toml b/packages/torrent-repository/Cargo.toml index d12dcbf44..6fc5f483b 100644 --- a/packages/torrent-repository/Cargo.toml +++ b/packages/torrent-repository/Cargo.toml @@ -19,7 +19,6 @@ version.workspace = true aquatic_udp_protocol = "0" bittorrent-primitives = "0.1.0" crossbeam-skiplist = "0" -parking_lot = "0" tokio = { version = "1", features = ["macros", "net", "rt-multi-thread", "signal", "sync"] } torrust-tracker-clock = { version = "3.0.0-develop", path = "../clock" } torrust-tracker-configuration = { version = "3.0.0-develop", path = "../configuration" } diff --git a/packages/torrent-repository/src/entry/mod.rs b/packages/torrent-repository/src/entry/mod.rs index b920839d9..ddd567a57 100644 --- a/packages/torrent-repository/src/entry/mod.rs +++ b/packages/torrent-repository/src/entry/mod.rs @@ -8,11 +8,8 @@ use torrust_tracker_primitives::{peer, DurationSinceUnixEpoch}; use self::peer_list::PeerList; -pub mod mutex_parking_lot; pub mod mutex_std; -pub mod mutex_tokio; pub mod peer_list; -pub mod rw_lock_parking_lot; pub mod single; pub trait Entry { diff --git a/packages/torrent-repository/src/entry/mutex_parking_lot.rs b/packages/torrent-repository/src/entry/mutex_parking_lot.rs deleted file mode 100644 index 738c3ff9d..000000000 --- a/packages/torrent-repository/src/entry/mutex_parking_lot.rs +++ /dev/null @@ -1,49 +0,0 @@ -use std::net::SocketAddr; -use std::sync::Arc; - -use torrust_tracker_configuration::TrackerPolicy; -use torrust_tracker_primitives::swarm_metadata::SwarmMetadata; -use torrust_tracker_primitives::{peer, DurationSinceUnixEpoch}; - -use super::{Entry, EntrySync}; -use crate::{EntryMutexParkingLot, EntrySingle}; - -impl EntrySync for EntryMutexParkingLot { - fn get_swarm_metadata(&self) -> SwarmMetadata { - self.lock().get_swarm_metadata() - } - - fn meets_retaining_policy(&self, policy: &TrackerPolicy) -> bool { - self.lock().meets_retaining_policy(policy) - } - - fn peers_is_empty(&self) -> bool { - self.lock().peers_is_empty() - } - - fn get_peers_len(&self) -> usize { - self.lock().get_peers_len() - } - - fn get_peers(&self, limit: Option) -> Vec> { - self.lock().get_peers(limit) - } - - fn get_peers_for_client(&self, client: &SocketAddr, limit: Option) -> Vec> { - self.lock().get_peers_for_client(client, limit) - } - - fn upsert_peer(&self, peer: &peer::Peer) -> bool { - self.lock().upsert_peer(peer) - } - - fn remove_inactive_peers(&self, current_cutoff: DurationSinceUnixEpoch) { - self.lock().remove_inactive_peers(current_cutoff); - } -} - -impl From for EntryMutexParkingLot { - fn from(entry: EntrySingle) -> Self { - Arc::new(parking_lot::Mutex::new(entry)) - } -} diff --git a/packages/torrent-repository/src/entry/mutex_tokio.rs b/packages/torrent-repository/src/entry/mutex_tokio.rs deleted file mode 100644 index 6db789a72..000000000 --- a/packages/torrent-repository/src/entry/mutex_tokio.rs +++ /dev/null @@ -1,49 +0,0 @@ -use std::net::SocketAddr; -use std::sync::Arc; - -use torrust_tracker_configuration::TrackerPolicy; -use torrust_tracker_primitives::swarm_metadata::SwarmMetadata; -use torrust_tracker_primitives::{peer, DurationSinceUnixEpoch}; - -use super::{Entry, EntryAsync}; -use crate::{EntryMutexTokio, EntrySingle}; - -impl EntryAsync for EntryMutexTokio { - async fn get_swarm_metadata(&self) -> SwarmMetadata { - self.lock().await.get_swarm_metadata() - } - - async fn meets_retaining_policy(self, policy: &TrackerPolicy) -> bool { - self.lock().await.meets_retaining_policy(policy) - } - - async fn peers_is_empty(&self) -> bool { - self.lock().await.peers_is_empty() - } - - async fn get_peers_len(&self) -> usize { - self.lock().await.get_peers_len() - } - - async fn get_peers(&self, limit: Option) -> Vec> { - self.lock().await.get_peers(limit) - } - - async fn get_peers_for_client(&self, client: &SocketAddr, limit: Option) -> Vec> { - self.lock().await.get_peers_for_client(client, limit) - } - - async fn upsert_peer(self, peer: &peer::Peer) -> bool { - self.lock().await.upsert_peer(peer) - } - - async fn remove_inactive_peers(self, current_cutoff: DurationSinceUnixEpoch) { - self.lock().await.remove_inactive_peers(current_cutoff); - } -} - -impl From for EntryMutexTokio { - fn from(entry: EntrySingle) -> Self { - Arc::new(tokio::sync::Mutex::new(entry)) - } -} diff --git a/packages/torrent-repository/src/entry/rw_lock_parking_lot.rs b/packages/torrent-repository/src/entry/rw_lock_parking_lot.rs deleted file mode 100644 index ac0dc0b30..000000000 --- a/packages/torrent-repository/src/entry/rw_lock_parking_lot.rs +++ /dev/null @@ -1,49 +0,0 @@ -use std::net::SocketAddr; -use std::sync::Arc; - -use torrust_tracker_configuration::TrackerPolicy; -use torrust_tracker_primitives::swarm_metadata::SwarmMetadata; -use torrust_tracker_primitives::{peer, DurationSinceUnixEpoch}; - -use super::{Entry, EntrySync}; -use crate::{EntryRwLockParkingLot, EntrySingle}; - -impl EntrySync for EntryRwLockParkingLot { - fn get_swarm_metadata(&self) -> SwarmMetadata { - self.read().get_swarm_metadata() - } - - fn meets_retaining_policy(&self, policy: &TrackerPolicy) -> bool { - self.read().meets_retaining_policy(policy) - } - - fn peers_is_empty(&self) -> bool { - self.read().peers_is_empty() - } - - fn get_peers_len(&self) -> usize { - self.read().get_peers_len() - } - - fn get_peers(&self, limit: Option) -> Vec> { - self.read().get_peers(limit) - } - - fn get_peers_for_client(&self, client: &SocketAddr, limit: Option) -> Vec> { - self.read().get_peers_for_client(client, limit) - } - - fn upsert_peer(&self, peer: &peer::Peer) -> bool { - self.write().upsert_peer(peer) - } - - fn remove_inactive_peers(&self, current_cutoff: DurationSinceUnixEpoch) { - self.write().remove_inactive_peers(current_cutoff); - } -} - -impl From for EntryRwLockParkingLot { - fn from(entry: EntrySingle) -> Self { - Arc::new(parking_lot::RwLock::new(entry)) - } -} diff --git a/packages/torrent-repository/src/lib.rs b/packages/torrent-repository/src/lib.rs index b4ee5298e..26434b1d4 100644 --- a/packages/torrent-repository/src/lib.rs +++ b/packages/torrent-repository/src/lib.rs @@ -7,12 +7,8 @@ pub mod entry; pub mod repository; // Repo Entries - pub type EntrySingle = entry::Torrent; pub type EntryMutexStd = Arc>; -pub type EntryMutexTokio = Arc>; -pub type EntryMutexParkingLot = Arc>; -pub type EntryRwLockParkingLot = Arc>; // Repository pub type TorrentsSkipMapMutexStd = CrossbeamSkipList; diff --git a/packages/torrent-repository/src/repository/skip_map_mutex_std.rs b/packages/torrent-repository/src/repository/skip_map_mutex_std.rs index 8a15a9442..f91334bab 100644 --- a/packages/torrent-repository/src/repository/skip_map_mutex_std.rs +++ b/packages/torrent-repository/src/repository/skip_map_mutex_std.rs @@ -1,5 +1,3 @@ -use std::sync::Arc; - use bittorrent_primitives::info_hash::InfoHash; use crossbeam_skiplist::SkipMap; use torrust_tracker_configuration::TrackerPolicy; @@ -10,7 +8,7 @@ use torrust_tracker_primitives::{peer, DurationSinceUnixEpoch, PersistentTorrent use super::Repository; use crate::entry::peer_list::PeerList; use crate::entry::{Entry, EntrySync}; -use crate::{EntryMutexParkingLot, EntryMutexStd, EntryRwLockParkingLot, EntrySingle}; +use crate::{EntryMutexStd, EntrySingle}; #[derive(Default, Debug)] pub struct CrossbeamSkipList { @@ -140,189 +138,3 @@ where } } } - -impl Repository for CrossbeamSkipList -where - EntryRwLockParkingLot: EntrySync, - EntrySingle: Entry, -{ - fn upsert_peer(&self, info_hash: &InfoHash, peer: &peer::Peer, _opt_persistent_torrent: Option) -> bool { - // todo: load persistent torrent data if provided - - let entry = self.torrents.get_or_insert(*info_hash, Arc::default()); - entry.value().upsert_peer(peer) - } - - fn get_swarm_metadata(&self, info_hash: &InfoHash) -> Option { - self.torrents.get(info_hash).map(|entry| entry.value().get_swarm_metadata()) - } - - fn get(&self, key: &InfoHash) -> Option { - let maybe_entry = self.torrents.get(key); - maybe_entry.map(|entry| entry.value().clone()) - } - - fn get_metrics(&self) -> AggregateSwarmMetadata { - let mut metrics = AggregateSwarmMetadata::default(); - - for entry in &self.torrents { - let stats = entry.value().read().get_swarm_metadata(); - metrics.total_complete += u64::from(stats.complete); - metrics.total_downloaded += u64::from(stats.downloaded); - metrics.total_incomplete += u64::from(stats.incomplete); - metrics.total_torrents += 1; - } - - metrics - } - - fn get_paginated(&self, pagination: Option<&Pagination>) -> Vec<(InfoHash, EntryRwLockParkingLot)> { - match pagination { - Some(pagination) => self - .torrents - .iter() - .skip(pagination.offset as usize) - .take(pagination.limit as usize) - .map(|entry| (*entry.key(), entry.value().clone())) - .collect(), - None => self - .torrents - .iter() - .map(|entry| (*entry.key(), entry.value().clone())) - .collect(), - } - } - - fn import_persistent(&self, persistent_torrents: &PersistentTorrents) { - for (info_hash, completed) in persistent_torrents { - if self.torrents.contains_key(info_hash) { - continue; - } - - let entry = EntryRwLockParkingLot::new( - EntrySingle { - swarm: PeerList::default(), - downloaded: *completed, - } - .into(), - ); - - // Since SkipMap is lock-free the torrent could have been inserted - // after checking if it exists. - self.torrents.get_or_insert(*info_hash, entry); - } - } - - fn remove(&self, key: &InfoHash) -> Option { - self.torrents.remove(key).map(|entry| entry.value().clone()) - } - - fn remove_inactive_peers(&self, current_cutoff: DurationSinceUnixEpoch) { - for entry in &self.torrents { - entry.value().remove_inactive_peers(current_cutoff); - } - } - - fn remove_peerless_torrents(&self, policy: &TrackerPolicy) { - for entry in &self.torrents { - if entry.value().meets_retaining_policy(policy) { - continue; - } - - entry.remove(); - } - } -} - -impl Repository for CrossbeamSkipList -where - EntryMutexParkingLot: EntrySync, - EntrySingle: Entry, -{ - fn upsert_peer(&self, info_hash: &InfoHash, peer: &peer::Peer, _opt_persistent_torrent: Option) -> bool { - // todo: load persistent torrent data if provided - - let entry = self.torrents.get_or_insert(*info_hash, Arc::default()); - entry.value().upsert_peer(peer) - } - - fn get_swarm_metadata(&self, info_hash: &InfoHash) -> Option { - self.torrents.get(info_hash).map(|entry| entry.value().get_swarm_metadata()) - } - - fn get(&self, key: &InfoHash) -> Option { - let maybe_entry = self.torrents.get(key); - maybe_entry.map(|entry| entry.value().clone()) - } - - fn get_metrics(&self) -> AggregateSwarmMetadata { - let mut metrics = AggregateSwarmMetadata::default(); - - for entry in &self.torrents { - let stats = entry.value().lock().get_swarm_metadata(); - metrics.total_complete += u64::from(stats.complete); - metrics.total_downloaded += u64::from(stats.downloaded); - metrics.total_incomplete += u64::from(stats.incomplete); - metrics.total_torrents += 1; - } - - metrics - } - - fn get_paginated(&self, pagination: Option<&Pagination>) -> Vec<(InfoHash, EntryMutexParkingLot)> { - match pagination { - Some(pagination) => self - .torrents - .iter() - .skip(pagination.offset as usize) - .take(pagination.limit as usize) - .map(|entry| (*entry.key(), entry.value().clone())) - .collect(), - None => self - .torrents - .iter() - .map(|entry| (*entry.key(), entry.value().clone())) - .collect(), - } - } - - fn import_persistent(&self, persistent_torrents: &PersistentTorrents) { - for (info_hash, completed) in persistent_torrents { - if self.torrents.contains_key(info_hash) { - continue; - } - - let entry = EntryMutexParkingLot::new( - EntrySingle { - swarm: PeerList::default(), - downloaded: *completed, - } - .into(), - ); - - // Since SkipMap is lock-free the torrent could have been inserted - // after checking if it exists. - self.torrents.get_or_insert(*info_hash, entry); - } - } - - fn remove(&self, key: &InfoHash) -> Option { - self.torrents.remove(key).map(|entry| entry.value().clone()) - } - - fn remove_inactive_peers(&self, current_cutoff: DurationSinceUnixEpoch) { - for entry in &self.torrents { - entry.value().remove_inactive_peers(current_cutoff); - } - } - - fn remove_peerless_torrents(&self, policy: &TrackerPolicy) { - for entry in &self.torrents { - if entry.value().meets_retaining_policy(policy) { - continue; - } - - entry.remove(); - } - } -} diff --git a/packages/torrent-repository/tests/common/torrent.rs b/packages/torrent-repository/tests/common/torrent.rs index 927f13169..649c35cce 100644 --- a/packages/torrent-repository/tests/common/torrent.rs +++ b/packages/torrent-repository/tests/common/torrent.rs @@ -4,98 +4,69 @@ use std::sync::Arc; use torrust_tracker_configuration::TrackerPolicy; use torrust_tracker_primitives::swarm_metadata::SwarmMetadata; use torrust_tracker_primitives::{peer, DurationSinceUnixEpoch}; -use torrust_tracker_torrent_repository::entry::{Entry as _, EntryAsync as _, EntrySync as _}; -use torrust_tracker_torrent_repository::{ - EntryMutexParkingLot, EntryMutexStd, EntryMutexTokio, EntryRwLockParkingLot, EntrySingle, -}; +use torrust_tracker_torrent_repository::entry::{Entry as _, EntrySync as _}; +use torrust_tracker_torrent_repository::{EntryMutexStd, EntrySingle}; #[derive(Debug, Clone)] pub(crate) enum Torrent { Single(EntrySingle), MutexStd(EntryMutexStd), - MutexTokio(EntryMutexTokio), - MutexParkingLot(EntryMutexParkingLot), - RwLockParkingLot(EntryRwLockParkingLot), } impl Torrent { - pub(crate) async fn get_stats(&self) -> SwarmMetadata { + pub(crate) fn get_stats(&self) -> SwarmMetadata { match self { Torrent::Single(entry) => entry.get_swarm_metadata(), Torrent::MutexStd(entry) => entry.get_swarm_metadata(), - Torrent::MutexTokio(entry) => entry.clone().get_swarm_metadata().await, - Torrent::MutexParkingLot(entry) => entry.clone().get_swarm_metadata(), - Torrent::RwLockParkingLot(entry) => entry.clone().get_swarm_metadata(), } } - pub(crate) async fn meets_retaining_policy(&self, policy: &TrackerPolicy) -> bool { + pub(crate) fn meets_retaining_policy(&self, policy: &TrackerPolicy) -> bool { match self { Torrent::Single(entry) => entry.meets_retaining_policy(policy), Torrent::MutexStd(entry) => entry.meets_retaining_policy(policy), - Torrent::MutexTokio(entry) => entry.clone().meets_retaining_policy(policy).await, - Torrent::MutexParkingLot(entry) => entry.meets_retaining_policy(policy), - Torrent::RwLockParkingLot(entry) => entry.meets_retaining_policy(policy), } } - pub(crate) async fn peers_is_empty(&self) -> bool { + pub(crate) fn peers_is_empty(&self) -> bool { match self { Torrent::Single(entry) => entry.peers_is_empty(), Torrent::MutexStd(entry) => entry.peers_is_empty(), - Torrent::MutexTokio(entry) => entry.clone().peers_is_empty().await, - Torrent::MutexParkingLot(entry) => entry.peers_is_empty(), - Torrent::RwLockParkingLot(entry) => entry.peers_is_empty(), } } - pub(crate) async fn get_peers_len(&self) -> usize { + pub(crate) fn get_peers_len(&self) -> usize { match self { Torrent::Single(entry) => entry.get_peers_len(), Torrent::MutexStd(entry) => entry.get_peers_len(), - Torrent::MutexTokio(entry) => entry.clone().get_peers_len().await, - Torrent::MutexParkingLot(entry) => entry.get_peers_len(), - Torrent::RwLockParkingLot(entry) => entry.get_peers_len(), } } - pub(crate) async fn get_peers(&self, limit: Option) -> Vec> { + pub(crate) fn get_peers(&self, limit: Option) -> Vec> { match self { Torrent::Single(entry) => entry.get_peers(limit), Torrent::MutexStd(entry) => entry.get_peers(limit), - Torrent::MutexTokio(entry) => entry.clone().get_peers(limit).await, - Torrent::MutexParkingLot(entry) => entry.get_peers(limit), - Torrent::RwLockParkingLot(entry) => entry.get_peers(limit), } } - pub(crate) async fn get_peers_for_client(&self, client: &SocketAddr, limit: Option) -> Vec> { + pub(crate) fn get_peers_for_client(&self, client: &SocketAddr, limit: Option) -> Vec> { match self { Torrent::Single(entry) => entry.get_peers_for_client(client, limit), Torrent::MutexStd(entry) => entry.get_peers_for_client(client, limit), - Torrent::MutexTokio(entry) => entry.clone().get_peers_for_client(client, limit).await, - Torrent::MutexParkingLot(entry) => entry.get_peers_for_client(client, limit), - Torrent::RwLockParkingLot(entry) => entry.get_peers_for_client(client, limit), } } - pub(crate) async fn upsert_peer(&mut self, peer: &peer::Peer) -> bool { + pub(crate) fn upsert_peer(&mut self, peer: &peer::Peer) -> bool { match self { Torrent::Single(entry) => entry.upsert_peer(peer), Torrent::MutexStd(entry) => entry.upsert_peer(peer), - Torrent::MutexTokio(entry) => entry.clone().upsert_peer(peer).await, - Torrent::MutexParkingLot(entry) => entry.upsert_peer(peer), - Torrent::RwLockParkingLot(entry) => entry.upsert_peer(peer), } } - pub(crate) async fn remove_inactive_peers(&mut self, current_cutoff: DurationSinceUnixEpoch) { + pub(crate) fn remove_inactive_peers(&mut self, current_cutoff: DurationSinceUnixEpoch) { match self { Torrent::Single(entry) => entry.remove_inactive_peers(current_cutoff), Torrent::MutexStd(entry) => entry.remove_inactive_peers(current_cutoff), - Torrent::MutexTokio(entry) => entry.clone().remove_inactive_peers(current_cutoff).await, - Torrent::MutexParkingLot(entry) => entry.remove_inactive_peers(current_cutoff), - Torrent::RwLockParkingLot(entry) => entry.remove_inactive_peers(current_cutoff), } } } diff --git a/packages/torrent-repository/tests/entry/mod.rs b/packages/torrent-repository/tests/entry/mod.rs index 43d7f94da..0fb8e8d88 100644 --- a/packages/torrent-repository/tests/entry/mod.rs +++ b/packages/torrent-repository/tests/entry/mod.rs @@ -9,9 +9,7 @@ use torrust_tracker_clock::clock::{self, Time as _}; use torrust_tracker_configuration::{TrackerPolicy, TORRENT_PEERS_LIMIT}; use torrust_tracker_primitives::peer; use torrust_tracker_primitives::peer::Peer; -use torrust_tracker_torrent_repository::{ - EntryMutexParkingLot, EntryMutexStd, EntryMutexTokio, EntryRwLockParkingLot, EntrySingle, -}; +use torrust_tracker_torrent_repository::{EntryMutexStd, EntrySingle}; use crate::common::torrent::Torrent; use crate::common::torrent_peer_builder::{a_completed_peer, a_started_peer}; @@ -26,21 +24,6 @@ fn mutex_std() -> Torrent { Torrent::MutexStd(EntryMutexStd::default()) } -#[fixture] -fn mutex_tokio() -> Torrent { - Torrent::MutexTokio(EntryMutexTokio::default()) -} - -#[fixture] -fn mutex_parking_lot() -> Torrent { - Torrent::MutexParkingLot(EntryMutexParkingLot::default()) -} - -#[fixture] -fn rw_lock_parking_lot() -> Torrent { - Torrent::RwLockParkingLot(EntryRwLockParkingLot::default()) -} - #[fixture] fn policy_none() -> TrackerPolicy { TrackerPolicy::new(0, false, false) @@ -69,39 +52,39 @@ pub enum Makes { Three, } -async fn make(torrent: &mut Torrent, makes: &Makes) -> Vec { +fn make(torrent: &mut Torrent, makes: &Makes) -> Vec { match makes { Makes::Empty => vec![], Makes::Started => { let peer = a_started_peer(1); - torrent.upsert_peer(&peer).await; + torrent.upsert_peer(&peer); vec![peer] } Makes::Completed => { let peer = a_completed_peer(2); - torrent.upsert_peer(&peer).await; + torrent.upsert_peer(&peer); vec![peer] } Makes::Downloaded => { let mut peer = a_started_peer(3); - torrent.upsert_peer(&peer).await; + torrent.upsert_peer(&peer); peer.event = AnnounceEvent::Completed; peer.left = NumberOfBytes::new(0); - torrent.upsert_peer(&peer).await; + torrent.upsert_peer(&peer); vec![peer] } Makes::Three => { let peer_1 = a_started_peer(1); - torrent.upsert_peer(&peer_1).await; + torrent.upsert_peer(&peer_1); let peer_2 = a_completed_peer(2); - torrent.upsert_peer(&peer_2).await; + torrent.upsert_peer(&peer_2); let mut peer_3 = a_started_peer(3); - torrent.upsert_peer(&peer_3).await; + torrent.upsert_peer(&peer_3); peer_3.event = AnnounceEvent::Completed; peer_3.left = NumberOfBytes::new(0); - torrent.upsert_peer(&peer_3).await; + torrent.upsert_peer(&peer_3); vec![peer_1, peer_2, peer_3] } } @@ -110,13 +93,10 @@ async fn make(torrent: &mut Torrent, makes: &Makes) -> Vec { #[rstest] #[case::empty(&Makes::Empty)] #[tokio::test] -async fn it_should_be_empty_by_default( - #[values(single(), mutex_std(), mutex_tokio(), mutex_parking_lot(), rw_lock_parking_lot())] mut torrent: Torrent, - #[case] makes: &Makes, -) { - make(&mut torrent, makes).await; +async fn it_should_be_empty_by_default(#[values(single(), mutex_std())] mut torrent: Torrent, #[case] makes: &Makes) { + make(&mut torrent, makes); - assert_eq!(torrent.get_peers_len().await, 0); + assert_eq!(torrent.get_peers_len(), 0); } #[rstest] @@ -127,33 +107,33 @@ async fn it_should_be_empty_by_default( #[case::three(&Makes::Three)] #[tokio::test] async fn it_should_check_if_entry_should_be_retained_based_on_the_tracker_policy( - #[values(single(), mutex_std(), mutex_tokio(), mutex_parking_lot(), rw_lock_parking_lot())] mut torrent: Torrent, + #[values(single(), mutex_std())] mut torrent: Torrent, #[case] makes: &Makes, #[values(policy_none(), policy_persist(), policy_remove(), policy_remove_persist())] policy: TrackerPolicy, ) { - make(&mut torrent, makes).await; + make(&mut torrent, makes); - let has_peers = !torrent.peers_is_empty().await; - let has_downloads = torrent.get_stats().await.downloaded != 0; + let has_peers = !torrent.peers_is_empty(); + let has_downloads = torrent.get_stats().downloaded != 0; match (policy.remove_peerless_torrents, policy.persistent_torrent_completed_stat) { // remove torrents without peers, and keep completed download stats (true, true) => match (has_peers, has_downloads) { // no peers, but has downloads // peers, with or without downloads - (false, true) | (true, true | false) => assert!(torrent.meets_retaining_policy(&policy).await), + (false, true) | (true, true | false) => assert!(torrent.meets_retaining_policy(&policy)), // no peers and no downloads - (false, false) => assert!(!torrent.meets_retaining_policy(&policy).await), + (false, false) => assert!(!torrent.meets_retaining_policy(&policy)), }, // remove torrents without peers and drop completed download stats (true, false) => match (has_peers, has_downloads) { // peers, with or without downloads - (true, true | false) => assert!(torrent.meets_retaining_policy(&policy).await), + (true, true | false) => assert!(torrent.meets_retaining_policy(&policy)), // no peers and with or without downloads - (false, true | false) => assert!(!torrent.meets_retaining_policy(&policy).await), + (false, true | false) => assert!(!torrent.meets_retaining_policy(&policy)), }, // keep torrents without peers, but keep or drop completed download stats - (false, true | false) => assert!(torrent.meets_retaining_policy(&policy).await), + (false, true | false) => assert!(torrent.meets_retaining_policy(&policy)), } } @@ -164,13 +144,10 @@ async fn it_should_check_if_entry_should_be_retained_based_on_the_tracker_policy #[case::downloaded(&Makes::Downloaded)] #[case::three(&Makes::Three)] #[tokio::test] -async fn it_should_get_peers_for_torrent_entry( - #[values(single(), mutex_std(), mutex_tokio(), mutex_parking_lot(), rw_lock_parking_lot())] mut torrent: Torrent, - #[case] makes: &Makes, -) { - let peers = make(&mut torrent, makes).await; +async fn it_should_get_peers_for_torrent_entry(#[values(single(), mutex_std())] mut torrent: Torrent, #[case] makes: &Makes) { + let peers = make(&mut torrent, makes); - let torrent_peers = torrent.get_peers(None).await; + let torrent_peers = torrent.get_peers(None); assert_eq!(torrent_peers.len(), peers.len()); @@ -186,15 +163,15 @@ async fn it_should_get_peers_for_torrent_entry( #[case::downloaded(&Makes::Downloaded)] #[case::three(&Makes::Three)] #[tokio::test] -async fn it_should_update_a_peer(#[values(single(), mutex_std(), mutex_tokio())] mut torrent: Torrent, #[case] makes: &Makes) { - make(&mut torrent, makes).await; +async fn it_should_update_a_peer(#[values(single(), mutex_std())] mut torrent: Torrent, #[case] makes: &Makes) { + make(&mut torrent, makes); // Make and insert a new peer. let mut peer = a_started_peer(-1); - torrent.upsert_peer(&peer).await; + torrent.upsert_peer(&peer); // Get the Inserted Peer by Id. - let peers = torrent.get_peers(None).await; + let peers = torrent.get_peers(None); let original = peers .iter() .find(|p| peer::ReadInfo::get_id(*p) == peer::ReadInfo::get_id(&peer)) @@ -204,10 +181,10 @@ async fn it_should_update_a_peer(#[values(single(), mutex_std(), mutex_tokio())] // Announce "Completed" torrent download event. peer.event = AnnounceEvent::Completed; - torrent.upsert_peer(&peer).await; + torrent.upsert_peer(&peer); // Get the Updated Peer by Id. - let peers = torrent.get_peers(None).await; + let peers = torrent.get_peers(None); let updated = peers .iter() .find(|p| peer::ReadInfo::get_id(*p) == peer::ReadInfo::get_id(&peer)) @@ -224,19 +201,19 @@ async fn it_should_update_a_peer(#[values(single(), mutex_std(), mutex_tokio())] #[case::three(&Makes::Three)] #[tokio::test] async fn it_should_remove_a_peer_upon_stopped_announcement( - #[values(single(), mutex_std(), mutex_tokio(), mutex_parking_lot(), rw_lock_parking_lot())] mut torrent: Torrent, + #[values(single(), mutex_std())] mut torrent: Torrent, #[case] makes: &Makes, ) { use torrust_tracker_primitives::peer::ReadInfo as _; - make(&mut torrent, makes).await; + make(&mut torrent, makes); let mut peer = a_started_peer(-1); - torrent.upsert_peer(&peer).await; + torrent.upsert_peer(&peer); // The started peer should be inserted. - let peers = torrent.get_peers(None).await; + let peers = torrent.get_peers(None); let original = peers .iter() .find(|p| p.get_id() == peer.get_id()) @@ -246,10 +223,10 @@ async fn it_should_remove_a_peer_upon_stopped_announcement( // Change peer to "Stopped" and insert. peer.event = AnnounceEvent::Stopped; - torrent.upsert_peer(&peer).await; + torrent.upsert_peer(&peer); // It should be removed now. - let peers = torrent.get_peers(None).await; + let peers = torrent.get_peers(None); assert_eq!( peers.iter().find(|p| p.get_id() == peer.get_id()), @@ -265,13 +242,13 @@ async fn it_should_remove_a_peer_upon_stopped_announcement( #[case::three(&Makes::Three)] #[tokio::test] async fn it_should_handle_a_peer_completed_announcement_and_update_the_downloaded_statistic( - #[values(single(), mutex_std(), mutex_tokio(), mutex_parking_lot(), rw_lock_parking_lot())] mut torrent: Torrent, + #[values(single(), mutex_std())] mut torrent: Torrent, #[case] makes: &Makes, ) { - make(&mut torrent, makes).await; - let downloaded = torrent.get_stats().await.downloaded; + make(&mut torrent, makes); + let downloaded = torrent.get_stats().downloaded; - let peers = torrent.get_peers(None).await; + let peers = torrent.get_peers(None); let mut peer = **peers.first().expect("there should be a peer"); let is_already_completed = peer.event == AnnounceEvent::Completed; @@ -279,8 +256,8 @@ async fn it_should_handle_a_peer_completed_announcement_and_update_the_downloade // Announce "Completed" torrent download event. peer.event = AnnounceEvent::Completed; - torrent.upsert_peer(&peer).await; - let stats = torrent.get_stats().await; + torrent.upsert_peer(&peer); + let stats = torrent.get_stats(); if is_already_completed { assert_eq!(stats.downloaded, downloaded); @@ -295,22 +272,19 @@ async fn it_should_handle_a_peer_completed_announcement_and_update_the_downloade #[case::downloaded(&Makes::Downloaded)] #[case::three(&Makes::Three)] #[tokio::test] -async fn it_should_update_a_peer_as_a_seeder( - #[values(single(), mutex_std(), mutex_tokio(), mutex_parking_lot(), rw_lock_parking_lot())] mut torrent: Torrent, - #[case] makes: &Makes, -) { - let peers = make(&mut torrent, makes).await; +async fn it_should_update_a_peer_as_a_seeder(#[values(single(), mutex_std())] mut torrent: Torrent, #[case] makes: &Makes) { + let peers = make(&mut torrent, makes); let completed = u32::try_from(peers.iter().filter(|p| p.is_seeder()).count()).expect("it_should_not_be_so_many"); - let peers = torrent.get_peers(None).await; + let peers = torrent.get_peers(None); let mut peer = **peers.first().expect("there should be a peer"); let is_already_non_left = peer.left == NumberOfBytes::new(0); // Set Bytes Left to Zero peer.left = NumberOfBytes::new(0); - torrent.upsert_peer(&peer).await; - let stats = torrent.get_stats().await; + torrent.upsert_peer(&peer); + let stats = torrent.get_stats(); if is_already_non_left { // it was already complete @@ -327,22 +301,19 @@ async fn it_should_update_a_peer_as_a_seeder( #[case::downloaded(&Makes::Downloaded)] #[case::three(&Makes::Three)] #[tokio::test] -async fn it_should_update_a_peer_as_incomplete( - #[values(single(), mutex_std(), mutex_tokio(), mutex_parking_lot(), rw_lock_parking_lot())] mut torrent: Torrent, - #[case] makes: &Makes, -) { - let peers = make(&mut torrent, makes).await; +async fn it_should_update_a_peer_as_incomplete(#[values(single(), mutex_std())] mut torrent: Torrent, #[case] makes: &Makes) { + let peers = make(&mut torrent, makes); let incomplete = u32::try_from(peers.iter().filter(|p| !p.is_seeder()).count()).expect("it should not be so many"); - let peers = torrent.get_peers(None).await; + let peers = torrent.get_peers(None); let mut peer = **peers.first().expect("there should be a peer"); let completed_already = peer.left == NumberOfBytes::new(0); // Set Bytes Left to no Zero peer.left = NumberOfBytes::new(1); - torrent.upsert_peer(&peer).await; - let stats = torrent.get_stats().await; + torrent.upsert_peer(&peer); + let stats = torrent.get_stats(); if completed_already { // now it is incomplete @@ -360,12 +331,12 @@ async fn it_should_update_a_peer_as_incomplete( #[case::three(&Makes::Three)] #[tokio::test] async fn it_should_get_peers_excluding_the_client_socket( - #[values(single(), mutex_std(), mutex_tokio(), mutex_parking_lot(), rw_lock_parking_lot())] mut torrent: Torrent, + #[values(single(), mutex_std())] mut torrent: Torrent, #[case] makes: &Makes, ) { - make(&mut torrent, makes).await; + make(&mut torrent, makes); - let peers = torrent.get_peers(None).await; + let peers = torrent.get_peers(None); let mut peer = **peers.first().expect("there should be a peer"); let socket = SocketAddr::new(IpAddr::V4(Ipv4Addr::new(127, 0, 0, 1)), 8081); @@ -374,14 +345,14 @@ async fn it_should_get_peers_excluding_the_client_socket( assert_ne!(peer.peer_addr, socket); // it should get the peer as it dose not share the socket. - assert!(torrent.get_peers_for_client(&socket, None).await.contains(&peer.into())); + assert!(torrent.get_peers_for_client(&socket, None).contains(&peer.into())); // set the address to the socket. peer.peer_addr = socket; - torrent.upsert_peer(&peer).await; // Add peer + torrent.upsert_peer(&peer); // Add peer // It should not include the peer that has the same socket. - assert!(!torrent.get_peers_for_client(&socket, None).await.contains(&peer.into())); + assert!(!torrent.get_peers_for_client(&socket, None).contains(&peer.into())); } #[rstest] @@ -392,19 +363,19 @@ async fn it_should_get_peers_excluding_the_client_socket( #[case::three(&Makes::Three)] #[tokio::test] async fn it_should_limit_the_number_of_peers_returned( - #[values(single(), mutex_std(), mutex_tokio(), mutex_parking_lot(), rw_lock_parking_lot())] mut torrent: Torrent, + #[values(single(), mutex_std())] mut torrent: Torrent, #[case] makes: &Makes, ) { - make(&mut torrent, makes).await; + make(&mut torrent, makes); // We add one more peer than the scrape limit for peer_number in 1..=74 + 1 { let mut peer = a_started_peer(1); peer.peer_id = *peer::Id::new(peer_number); - torrent.upsert_peer(&peer).await; + torrent.upsert_peer(&peer); } - let peers = torrent.get_peers(Some(TORRENT_PEERS_LIMIT)).await; + let peers = torrent.get_peers(Some(TORRENT_PEERS_LIMIT)); assert_eq!(peers.len(), 74); } @@ -417,13 +388,13 @@ async fn it_should_limit_the_number_of_peers_returned( #[case::three(&Makes::Three)] #[tokio::test] async fn it_should_remove_inactive_peers_beyond_cutoff( - #[values(single(), mutex_std(), mutex_tokio(), mutex_parking_lot(), rw_lock_parking_lot())] mut torrent: Torrent, + #[values(single(), mutex_std())] mut torrent: Torrent, #[case] makes: &Makes, ) { const TIMEOUT: Duration = Duration::from_secs(120); const EXPIRE: Duration = Duration::from_secs(121); - let peers = make(&mut torrent, makes).await; + let peers = make(&mut torrent, makes); let mut peer = a_completed_peer(-1); @@ -432,12 +403,12 @@ async fn it_should_remove_inactive_peers_beyond_cutoff( peer.updated = now.sub(EXPIRE); - torrent.upsert_peer(&peer).await; + torrent.upsert_peer(&peer); - assert_eq!(torrent.get_peers_len().await, peers.len() + 1); + assert_eq!(torrent.get_peers_len(), peers.len() + 1); let current_cutoff = CurrentClock::now_sub(&TIMEOUT).unwrap_or_default(); - torrent.remove_inactive_peers(current_cutoff).await; + torrent.remove_inactive_peers(current_cutoff); - assert_eq!(torrent.get_peers_len().await, peers.len()); + assert_eq!(torrent.get_peers_len(), peers.len()); } From 89123fa397f1ffa4bad2e2a912ba03a85cff9920 Mon Sep 17 00:00:00 2001 From: Jose Celano Date: Wed, 30 Apr 2025 14:19:34 +0100 Subject: [PATCH 538/802] feat!: [#1491] remove unused traits RepositoryAsync and EntryAsync They have been moved to a new package `torrent-repository-benchmarking`. --- packages/torrent-repository/src/entry/mod.rs | 16 ---------------- .../torrent-repository/src/repository/mod.rs | 18 ------------------ 2 files changed, 34 deletions(-) diff --git a/packages/torrent-repository/src/entry/mod.rs b/packages/torrent-repository/src/entry/mod.rs index ddd567a57..24e85ae94 100644 --- a/packages/torrent-repository/src/entry/mod.rs +++ b/packages/torrent-repository/src/entry/mod.rs @@ -59,22 +59,6 @@ pub trait EntrySync { fn remove_inactive_peers(&self, current_cutoff: DurationSinceUnixEpoch); } -#[allow(clippy::module_name_repetitions)] -pub trait EntryAsync { - fn get_swarm_metadata(&self) -> impl std::future::Future + Send; - fn meets_retaining_policy(self, policy: &TrackerPolicy) -> impl std::future::Future + Send; - fn peers_is_empty(&self) -> impl std::future::Future + Send; - fn get_peers_len(&self) -> impl std::future::Future + Send; - fn get_peers(&self, limit: Option) -> impl std::future::Future>> + Send; - fn get_peers_for_client( - &self, - client: &SocketAddr, - limit: Option, - ) -> impl std::future::Future>> + Send; - fn upsert_peer(self, peer: &peer::Peer) -> impl std::future::Future + Send; - fn remove_inactive_peers(self, current_cutoff: DurationSinceUnixEpoch) -> impl std::future::Future + Send; -} - /// A data structure containing all the information about a torrent in the tracker. /// /// This is the tracker entry for a given torrent and contains the swarm data, diff --git a/packages/torrent-repository/src/repository/mod.rs b/packages/torrent-repository/src/repository/mod.rs index 96c71f3a0..850289a01 100644 --- a/packages/torrent-repository/src/repository/mod.rs +++ b/packages/torrent-repository/src/repository/mod.rs @@ -19,21 +19,3 @@ pub trait Repository: Debug + Default + Sized + 'static { fn upsert_peer(&self, info_hash: &InfoHash, peer: &peer::Peer, opt_persistent_torrent: Option) -> bool; fn get_swarm_metadata(&self, info_hash: &InfoHash) -> Option; } - -#[allow(clippy::module_name_repetitions)] -pub trait RepositoryAsync: Debug + Default + Sized + 'static { - fn get(&self, key: &InfoHash) -> impl std::future::Future> + Send; - fn get_metrics(&self) -> impl std::future::Future + Send; - fn get_paginated(&self, pagination: Option<&Pagination>) -> impl std::future::Future> + Send; - fn import_persistent(&self, persistent_torrents: &PersistentTorrents) -> impl std::future::Future + Send; - fn remove(&self, key: &InfoHash) -> impl std::future::Future> + Send; - fn remove_inactive_peers(&self, current_cutoff: DurationSinceUnixEpoch) -> impl std::future::Future + Send; - fn remove_peerless_torrents(&self, policy: &TrackerPolicy) -> impl std::future::Future + Send; - fn upsert_peer( - &self, - info_hash: &InfoHash, - peer: &peer::Peer, - opt_persistent_torrent: Option, - ) -> impl std::future::Future + Send; - fn get_swarm_metadata(&self, info_hash: &InfoHash) -> impl std::future::Future> + Send; -} From 382e0af7c9a694fcd2f97825bc16fc1e8784d16e Mon Sep 17 00:00:00 2001 From: Jose Celano Date: Wed, 30 Apr 2025 17:13:07 +0100 Subject: [PATCH 539/802] faet!: [#1491] remove unused trait Repository --- .../torrent-repository/src/repository/mod.rs | 20 ------------- .../src/repository/skip_map_mutex_std.rs | 29 ++++++++++++------- .../torrent-repository/tests/common/repo.rs | 1 - .../src/torrent/repository/in_memory.rs | 1 - 4 files changed, 18 insertions(+), 33 deletions(-) diff --git a/packages/torrent-repository/src/repository/mod.rs b/packages/torrent-repository/src/repository/mod.rs index 850289a01..3b8259f9d 100644 --- a/packages/torrent-repository/src/repository/mod.rs +++ b/packages/torrent-repository/src/repository/mod.rs @@ -1,21 +1 @@ -use bittorrent_primitives::info_hash::InfoHash; -use torrust_tracker_configuration::TrackerPolicy; -use torrust_tracker_primitives::pagination::Pagination; -use torrust_tracker_primitives::swarm_metadata::{AggregateSwarmMetadata, SwarmMetadata}; -use torrust_tracker_primitives::{peer, DurationSinceUnixEpoch, PersistentTorrent, PersistentTorrents}; - pub mod skip_map_mutex_std; - -use std::fmt::Debug; - -pub trait Repository: Debug + Default + Sized + 'static { - fn get(&self, key: &InfoHash) -> Option; - fn get_metrics(&self) -> AggregateSwarmMetadata; - fn get_paginated(&self, pagination: Option<&Pagination>) -> Vec<(InfoHash, T)>; - fn import_persistent(&self, persistent_torrents: &PersistentTorrents); - fn remove(&self, key: &InfoHash) -> Option; - fn remove_inactive_peers(&self, current_cutoff: DurationSinceUnixEpoch); - fn remove_peerless_torrents(&self, policy: &TrackerPolicy); - fn upsert_peer(&self, info_hash: &InfoHash, peer: &peer::Peer, opt_persistent_torrent: Option) -> bool; - fn get_swarm_metadata(&self, info_hash: &InfoHash) -> Option; -} diff --git a/packages/torrent-repository/src/repository/skip_map_mutex_std.rs b/packages/torrent-repository/src/repository/skip_map_mutex_std.rs index f91334bab..0d13e39b2 100644 --- a/packages/torrent-repository/src/repository/skip_map_mutex_std.rs +++ b/packages/torrent-repository/src/repository/skip_map_mutex_std.rs @@ -5,7 +5,6 @@ use torrust_tracker_primitives::pagination::Pagination; use torrust_tracker_primitives::swarm_metadata::{AggregateSwarmMetadata, SwarmMetadata}; use torrust_tracker_primitives::{peer, DurationSinceUnixEpoch, PersistentTorrent, PersistentTorrents}; -use super::Repository; use crate::entry::peer_list::PeerList; use crate::entry::{Entry, EntrySync}; use crate::{EntryMutexStd, EntrySingle}; @@ -15,7 +14,7 @@ pub struct CrossbeamSkipList { pub torrents: SkipMap, } -impl Repository for CrossbeamSkipList +impl CrossbeamSkipList where EntryMutexStd: EntrySync, EntrySingle: Entry, @@ -36,7 +35,12 @@ where /// /// Returns `true` if the number of downloads was increased because the peer /// completed the download. - fn upsert_peer(&self, info_hash: &InfoHash, peer: &peer::Peer, opt_persistent_torrent: Option) -> bool { + pub fn upsert_peer( + &self, + info_hash: &InfoHash, + peer: &peer::Peer, + opt_persistent_torrent: Option, + ) -> bool { if let Some(existing_entry) = self.torrents.get(info_hash) { existing_entry.value().upsert_peer(peer) } else { @@ -58,16 +62,19 @@ where } } - fn get_swarm_metadata(&self, info_hash: &InfoHash) -> Option { + pub fn get_swarm_metadata(&self, info_hash: &InfoHash) -> Option { self.torrents.get(info_hash).map(|entry| entry.value().get_swarm_metadata()) } - fn get(&self, key: &InfoHash) -> Option { + pub fn get(&self, key: &InfoHash) -> Option { let maybe_entry = self.torrents.get(key); maybe_entry.map(|entry| entry.value().clone()) } - fn get_metrics(&self) -> AggregateSwarmMetadata { + /// # Panics + /// + /// This function panics if the lock for the entry cannot be obtained. + pub fn get_metrics(&self) -> AggregateSwarmMetadata { let mut metrics = AggregateSwarmMetadata::default(); for entry in &self.torrents { @@ -81,7 +88,7 @@ where metrics } - fn get_paginated(&self, pagination: Option<&Pagination>) -> Vec<(InfoHash, EntryMutexStd)> { + pub fn get_paginated(&self, pagination: Option<&Pagination>) -> Vec<(InfoHash, EntryMutexStd)> { match pagination { Some(pagination) => self .torrents @@ -98,7 +105,7 @@ where } } - fn import_persistent(&self, persistent_torrents: &PersistentTorrents) { + pub fn import_persistent(&self, persistent_torrents: &PersistentTorrents) { for (info_hash, completed) in persistent_torrents { if self.torrents.contains_key(info_hash) { continue; @@ -118,17 +125,17 @@ where } } - fn remove(&self, key: &InfoHash) -> Option { + pub fn remove(&self, key: &InfoHash) -> Option { self.torrents.remove(key).map(|entry| entry.value().clone()) } - fn remove_inactive_peers(&self, current_cutoff: DurationSinceUnixEpoch) { + pub fn remove_inactive_peers(&self, current_cutoff: DurationSinceUnixEpoch) { for entry in &self.torrents { entry.value().remove_inactive_peers(current_cutoff); } } - fn remove_peerless_torrents(&self, policy: &TrackerPolicy) { + pub fn remove_peerless_torrents(&self, policy: &TrackerPolicy) { for entry in &self.torrents { if entry.value().meets_retaining_policy(policy) { continue; diff --git a/packages/torrent-repository/tests/common/repo.rs b/packages/torrent-repository/tests/common/repo.rs index 95dd3f5ad..54f6ba486 100644 --- a/packages/torrent-repository/tests/common/repo.rs +++ b/packages/torrent-repository/tests/common/repo.rs @@ -3,7 +3,6 @@ use torrust_tracker_configuration::TrackerPolicy; use torrust_tracker_primitives::pagination::Pagination; use torrust_tracker_primitives::swarm_metadata::{AggregateSwarmMetadata, SwarmMetadata}; use torrust_tracker_primitives::{peer, DurationSinceUnixEpoch, PersistentTorrent, PersistentTorrents}; -use torrust_tracker_torrent_repository::repository::Repository as _; use torrust_tracker_torrent_repository::{EntrySingle, TorrentsSkipMapMutexStd}; #[derive(Debug)] diff --git a/packages/tracker-core/src/torrent/repository/in_memory.rs b/packages/tracker-core/src/torrent/repository/in_memory.rs index e09bede8e..142338eea 100644 --- a/packages/tracker-core/src/torrent/repository/in_memory.rs +++ b/packages/tracker-core/src/torrent/repository/in_memory.rs @@ -8,7 +8,6 @@ use torrust_tracker_primitives::pagination::Pagination; use torrust_tracker_primitives::swarm_metadata::{AggregateSwarmMetadata, SwarmMetadata}; use torrust_tracker_primitives::{peer, DurationSinceUnixEpoch, PersistentTorrent, PersistentTorrents}; use torrust_tracker_torrent_repository::entry::EntrySync; -use torrust_tracker_torrent_repository::repository::Repository; use torrust_tracker_torrent_repository::EntryMutexStd; use crate::torrent::Torrents; From ecd2266fdc913099c172097a6677498b60a64686 Mon Sep 17 00:00:00 2001 From: Jose Celano Date: Wed, 30 Apr 2025 21:22:34 +0100 Subject: [PATCH 540/802] feat!: [#1491] remove unused traits Entry and EntrySync This is part of a bigger refactor makeing the torrent-repositoru package impler. Only using types used in production and removing traits that only have one implementation. --- packages/torrent-repository/src/entry/mod.rs | 55 +------------------ .../torrent-repository/src/entry/mutex_std.rs | 51 ----------------- .../torrent-repository/src/entry/single.rs | 25 +++++---- .../src/repository/skip_map_mutex_std.rs | 55 +++++++++++++++---- .../torrent-repository/tests/common/repo.rs | 4 +- .../tests/common/torrent.rs | 29 +++++++--- .../tests/repository/mod.rs | 1 - packages/tracker-core/src/announce_handler.rs | 15 ++++- packages/tracker-core/src/torrent/manager.rs | 3 +- .../src/torrent/repository/in_memory.rs | 36 +++++++++--- packages/tracker-core/src/torrent/services.rs | 33 +++++++++-- 11 files changed, 153 insertions(+), 154 deletions(-) delete mode 100644 packages/torrent-repository/src/entry/mutex_std.rs diff --git a/packages/torrent-repository/src/entry/mod.rs b/packages/torrent-repository/src/entry/mod.rs index 24e85ae94..4b1201730 100644 --- a/packages/torrent-repository/src/entry/mod.rs +++ b/packages/torrent-repository/src/entry/mod.rs @@ -1,64 +1,10 @@ use std::fmt::Debug; -use std::net::SocketAddr; -use std::sync::Arc; - -use torrust_tracker_configuration::TrackerPolicy; -use torrust_tracker_primitives::swarm_metadata::SwarmMetadata; -use torrust_tracker_primitives::{peer, DurationSinceUnixEpoch}; use self::peer_list::PeerList; -pub mod mutex_std; pub mod peer_list; pub mod single; -pub trait Entry { - /// It returns the swarm metadata (statistics) as a struct: - /// - /// `(seeders, completed, leechers)` - fn get_swarm_metadata(&self) -> SwarmMetadata; - - /// Returns True if Still a Valid Entry according to the Tracker Policy - fn meets_retaining_policy(&self, policy: &TrackerPolicy) -> bool; - - /// Returns True if the Peers is Empty - fn peers_is_empty(&self) -> bool; - - /// Returns the number of Peers - fn get_peers_len(&self) -> usize; - - /// Get all swarm peers, optionally limiting the result. - fn get_peers(&self, limit: Option) -> Vec>; - - /// It returns the list of peers for a given peer client, optionally limiting the - /// result. - /// - /// It filters out the input peer, typically because we want to return this - /// list of peers to that client peer. - fn get_peers_for_client(&self, client: &SocketAddr, limit: Option) -> Vec>; - - /// It updates a peer and returns true if the number of complete downloads have increased. - /// - /// The number of peers that have complete downloading is synchronously updated when peers are updated. - /// That's the total torrent downloads counter. - fn upsert_peer(&mut self, peer: &peer::Peer) -> bool; - - /// It removes peer from the swarm that have not been updated for more than `current_cutoff` seconds - fn remove_inactive_peers(&mut self, current_cutoff: DurationSinceUnixEpoch); -} - -#[allow(clippy::module_name_repetitions)] -pub trait EntrySync { - fn get_swarm_metadata(&self) -> SwarmMetadata; - fn meets_retaining_policy(&self, policy: &TrackerPolicy) -> bool; - fn peers_is_empty(&self) -> bool; - fn get_peers_len(&self) -> usize; - fn get_peers(&self, limit: Option) -> Vec>; - fn get_peers_for_client(&self, client: &SocketAddr, limit: Option) -> Vec>; - fn upsert_peer(&self, peer: &peer::Peer) -> bool; - fn remove_inactive_peers(&self, current_cutoff: DurationSinceUnixEpoch); -} - /// A data structure containing all the information about a torrent in the tracker. /// /// This is the tracker entry for a given torrent and contains the swarm data, @@ -68,6 +14,7 @@ pub trait EntrySync { pub struct Torrent { /// A network of peers that are all trying to download the torrent associated to this entry pub(crate) swarm: PeerList, + /// The number of peers that have ever completed downloading the torrent associated to this entry pub(crate) downloaded: u32, } diff --git a/packages/torrent-repository/src/entry/mutex_std.rs b/packages/torrent-repository/src/entry/mutex_std.rs deleted file mode 100644 index 0ab70a96f..000000000 --- a/packages/torrent-repository/src/entry/mutex_std.rs +++ /dev/null @@ -1,51 +0,0 @@ -use std::net::SocketAddr; -use std::sync::Arc; - -use torrust_tracker_configuration::TrackerPolicy; -use torrust_tracker_primitives::swarm_metadata::SwarmMetadata; -use torrust_tracker_primitives::{peer, DurationSinceUnixEpoch}; - -use super::{Entry, EntrySync}; -use crate::{EntryMutexStd, EntrySingle}; - -impl EntrySync for EntryMutexStd { - fn get_swarm_metadata(&self) -> SwarmMetadata { - self.lock().expect("it should get a lock").get_swarm_metadata() - } - - fn meets_retaining_policy(&self, policy: &TrackerPolicy) -> bool { - self.lock().expect("it should get a lock").meets_retaining_policy(policy) - } - - fn peers_is_empty(&self) -> bool { - self.lock().expect("it should get a lock").peers_is_empty() - } - - fn get_peers_len(&self) -> usize { - self.lock().expect("it should get a lock").get_peers_len() - } - - fn get_peers(&self, limit: Option) -> Vec> { - self.lock().expect("it should get lock").get_peers(limit) - } - - fn get_peers_for_client(&self, client: &SocketAddr, limit: Option) -> Vec> { - self.lock().expect("it should get lock").get_peers_for_client(client, limit) - } - - fn upsert_peer(&self, peer: &peer::Peer) -> bool { - self.lock().expect("it should lock the entry").upsert_peer(peer) - } - - fn remove_inactive_peers(&self, current_cutoff: DurationSinceUnixEpoch) { - self.lock() - .expect("it should lock the entry") - .remove_inactive_peers(current_cutoff); - } -} - -impl From for EntryMutexStd { - fn from(entry: EntrySingle) -> Self { - Arc::new(std::sync::Mutex::new(entry)) - } -} diff --git a/packages/torrent-repository/src/entry/single.rs b/packages/torrent-repository/src/entry/single.rs index 0f922bd02..44f1012e1 100644 --- a/packages/torrent-repository/src/entry/single.rs +++ b/packages/torrent-repository/src/entry/single.rs @@ -7,12 +7,12 @@ use torrust_tracker_primitives::peer::{self}; use torrust_tracker_primitives::swarm_metadata::SwarmMetadata; use torrust_tracker_primitives::DurationSinceUnixEpoch; -use super::Entry; use crate::EntrySingle; -impl Entry for EntrySingle { +impl EntrySingle { #[allow(clippy::cast_possible_truncation)] - fn get_swarm_metadata(&self) -> SwarmMetadata { + #[must_use] + pub fn get_swarm_metadata(&self) -> SwarmMetadata { let (seeders, leechers) = self.swarm.seeders_and_leechers(); SwarmMetadata { @@ -22,7 +22,8 @@ impl Entry for EntrySingle { } } - fn meets_retaining_policy(&self, policy: &TrackerPolicy) -> bool { + #[must_use] + pub fn meets_retaining_policy(&self, policy: &TrackerPolicy) -> bool { if policy.persistent_torrent_completed_stat && self.downloaded > 0 { return true; } @@ -34,23 +35,27 @@ impl Entry for EntrySingle { true } - fn peers_is_empty(&self) -> bool { + #[must_use] + pub fn peers_is_empty(&self) -> bool { self.swarm.is_empty() } - fn get_peers_len(&self) -> usize { + #[must_use] + pub fn get_peers_len(&self) -> usize { self.swarm.len() } - fn get_peers(&self, limit: Option) -> Vec> { + #[must_use] + pub fn get_peers(&self, limit: Option) -> Vec> { self.swarm.get_all(limit) } - fn get_peers_for_client(&self, client: &SocketAddr, limit: Option) -> Vec> { + #[must_use] + pub fn get_peers_for_client(&self, client: &SocketAddr, limit: Option) -> Vec> { self.swarm.get_peers_excluding_addr(client, limit) } - fn upsert_peer(&mut self, peer: &peer::Peer) -> bool { + pub fn upsert_peer(&mut self, peer: &peer::Peer) -> bool { let mut number_of_downloads_increased: bool = false; match peer::ReadInfo::get_event(peer) { @@ -75,7 +80,7 @@ impl Entry for EntrySingle { number_of_downloads_increased } - fn remove_inactive_peers(&mut self, current_cutoff: DurationSinceUnixEpoch) { + pub fn remove_inactive_peers(&mut self, current_cutoff: DurationSinceUnixEpoch) { self.swarm.remove_inactive_peers(current_cutoff); } } diff --git a/packages/torrent-repository/src/repository/skip_map_mutex_std.rs b/packages/torrent-repository/src/repository/skip_map_mutex_std.rs index 0d13e39b2..3c806ed69 100644 --- a/packages/torrent-repository/src/repository/skip_map_mutex_std.rs +++ b/packages/torrent-repository/src/repository/skip_map_mutex_std.rs @@ -6,7 +6,6 @@ use torrust_tracker_primitives::swarm_metadata::{AggregateSwarmMetadata, SwarmMe use torrust_tracker_primitives::{peer, DurationSinceUnixEpoch, PersistentTorrent, PersistentTorrents}; use crate::entry::peer_list::PeerList; -use crate::entry::{Entry, EntrySync}; use crate::{EntryMutexStd, EntrySingle}; #[derive(Default, Debug)] @@ -14,11 +13,7 @@ pub struct CrossbeamSkipList { pub torrents: SkipMap, } -impl CrossbeamSkipList -where - EntryMutexStd: EntrySync, - EntrySingle: Entry, -{ +impl CrossbeamSkipList { /// Upsert a peer into the swarm of a torrent. /// /// Optionally, it can also preset the number of downloads of the torrent @@ -35,6 +30,10 @@ where /// /// Returns `true` if the number of downloads was increased because the peer /// completed the download. + /// + /// # Panics + /// + /// This function panics if the lock for the entry cannot be obtained. pub fn upsert_peer( &self, info_hash: &InfoHash, @@ -42,7 +41,11 @@ where opt_persistent_torrent: Option, ) -> bool { if let Some(existing_entry) = self.torrents.get(info_hash) { - existing_entry.value().upsert_peer(peer) + existing_entry + .value() + .lock() + .expect("can't acquire lock for torrent entry") + .upsert_peer(peer) } else { let new_entry = if let Some(number_of_downloads) = opt_persistent_torrent { EntryMutexStd::new( @@ -58,12 +61,27 @@ where let inserted_entry = self.torrents.get_or_insert(*info_hash, new_entry); - inserted_entry.value().upsert_peer(peer) + let number_of_downloads_increased = inserted_entry + .value() + .lock() + .expect("can't acquire lock for torrent entry") + .upsert_peer(peer); + + number_of_downloads_increased } } + /// # Panics + /// + /// This function panics if the lock for the entry cannot be obtained. pub fn get_swarm_metadata(&self, info_hash: &InfoHash) -> Option { - self.torrents.get(info_hash).map(|entry| entry.value().get_swarm_metadata()) + self.torrents.get(info_hash).map(|entry| { + entry + .value() + .lock() + .expect("can't acquire lock for torrent entry") + .get_swarm_metadata() + }) } pub fn get(&self, key: &InfoHash) -> Option { @@ -129,15 +147,30 @@ where self.torrents.remove(key).map(|entry| entry.value().clone()) } + /// # Panics + /// + /// This function panics if the lock for the entry cannot be obtained. pub fn remove_inactive_peers(&self, current_cutoff: DurationSinceUnixEpoch) { for entry in &self.torrents { - entry.value().remove_inactive_peers(current_cutoff); + entry + .value() + .lock() + .expect("can't acquire lock for torrent entry") + .remove_inactive_peers(current_cutoff); } } + /// # Panics + /// + /// This function panics if the lock for the entry cannot be obtained. pub fn remove_peerless_torrents(&self, policy: &TrackerPolicy) { for entry in &self.torrents { - if entry.value().meets_retaining_policy(policy) { + if entry + .value() + .lock() + .expect("can't acquire lock for torrent entry") + .meets_retaining_policy(policy) + { continue; } diff --git a/packages/torrent-repository/tests/common/repo.rs b/packages/torrent-repository/tests/common/repo.rs index 54f6ba486..41df77bf9 100644 --- a/packages/torrent-repository/tests/common/repo.rs +++ b/packages/torrent-repository/tests/common/repo.rs @@ -1,3 +1,5 @@ +use std::sync::{Arc, Mutex}; + use bittorrent_primitives::info_hash::InfoHash; use torrust_tracker_configuration::TrackerPolicy; use torrust_tracker_primitives::pagination::Pagination; @@ -77,7 +79,7 @@ impl Repo { pub(crate) fn insert(&self, info_hash: &InfoHash, torrent: EntrySingle) -> Option { match self { Repo::SkipMapMutexStd(repo) => { - repo.torrents.insert(*info_hash, torrent.into()); + repo.torrents.insert(*info_hash, Arc::new(Mutex::new(torrent))); } } self.get(info_hash) diff --git a/packages/torrent-repository/tests/common/torrent.rs b/packages/torrent-repository/tests/common/torrent.rs index 649c35cce..84ea79eef 100644 --- a/packages/torrent-repository/tests/common/torrent.rs +++ b/packages/torrent-repository/tests/common/torrent.rs @@ -4,7 +4,6 @@ use std::sync::Arc; use torrust_tracker_configuration::TrackerPolicy; use torrust_tracker_primitives::swarm_metadata::SwarmMetadata; use torrust_tracker_primitives::{peer, DurationSinceUnixEpoch}; -use torrust_tracker_torrent_repository::entry::{Entry as _, EntrySync as _}; use torrust_tracker_torrent_repository::{EntryMutexStd, EntrySingle}; #[derive(Debug, Clone)] @@ -17,56 +16,68 @@ impl Torrent { pub(crate) fn get_stats(&self) -> SwarmMetadata { match self { Torrent::Single(entry) => entry.get_swarm_metadata(), - Torrent::MutexStd(entry) => entry.get_swarm_metadata(), + Torrent::MutexStd(entry) => entry + .lock() + .expect("can't acquire lock for torrent entry") + .get_swarm_metadata(), } } pub(crate) fn meets_retaining_policy(&self, policy: &TrackerPolicy) -> bool { match self { Torrent::Single(entry) => entry.meets_retaining_policy(policy), - Torrent::MutexStd(entry) => entry.meets_retaining_policy(policy), + Torrent::MutexStd(entry) => entry + .lock() + .expect("can't acquire lock for torrent entry") + .meets_retaining_policy(policy), } } pub(crate) fn peers_is_empty(&self) -> bool { match self { Torrent::Single(entry) => entry.peers_is_empty(), - Torrent::MutexStd(entry) => entry.peers_is_empty(), + Torrent::MutexStd(entry) => entry.lock().expect("can't acquire lock for torrent entry").peers_is_empty(), } } pub(crate) fn get_peers_len(&self) -> usize { match self { Torrent::Single(entry) => entry.get_peers_len(), - Torrent::MutexStd(entry) => entry.get_peers_len(), + Torrent::MutexStd(entry) => entry.lock().expect("can't acquire lock for torrent entry").get_peers_len(), } } pub(crate) fn get_peers(&self, limit: Option) -> Vec> { match self { Torrent::Single(entry) => entry.get_peers(limit), - Torrent::MutexStd(entry) => entry.get_peers(limit), + Torrent::MutexStd(entry) => entry.lock().expect("can't acquire lock for torrent entry").get_peers(limit), } } pub(crate) fn get_peers_for_client(&self, client: &SocketAddr, limit: Option) -> Vec> { match self { Torrent::Single(entry) => entry.get_peers_for_client(client, limit), - Torrent::MutexStd(entry) => entry.get_peers_for_client(client, limit), + Torrent::MutexStd(entry) => entry + .lock() + .expect("can't acquire lock for torrent entry") + .get_peers_for_client(client, limit), } } pub(crate) fn upsert_peer(&mut self, peer: &peer::Peer) -> bool { match self { Torrent::Single(entry) => entry.upsert_peer(peer), - Torrent::MutexStd(entry) => entry.upsert_peer(peer), + Torrent::MutexStd(entry) => entry.lock().expect("can't acquire lock for torrent entry").upsert_peer(peer), } } pub(crate) fn remove_inactive_peers(&mut self, current_cutoff: DurationSinceUnixEpoch) { match self { Torrent::Single(entry) => entry.remove_inactive_peers(current_cutoff), - Torrent::MutexStd(entry) => entry.remove_inactive_peers(current_cutoff), + Torrent::MutexStd(entry) => entry + .lock() + .expect("can't acquire lock for torrent entry") + .remove_inactive_peers(current_cutoff), } } } diff --git a/packages/torrent-repository/tests/repository/mod.rs b/packages/torrent-repository/tests/repository/mod.rs index d0ef61e81..ae0066b25 100644 --- a/packages/torrent-repository/tests/repository/mod.rs +++ b/packages/torrent-repository/tests/repository/mod.rs @@ -8,7 +8,6 @@ use torrust_tracker_configuration::TrackerPolicy; use torrust_tracker_primitives::pagination::Pagination; use torrust_tracker_primitives::swarm_metadata::SwarmMetadata; use torrust_tracker_primitives::PersistentTorrents; -use torrust_tracker_torrent_repository::entry::Entry as _; use torrust_tracker_torrent_repository::repository::skip_map_mutex_std::CrossbeamSkipList; use torrust_tracker_torrent_repository::EntrySingle; diff --git a/packages/tracker-core/src/announce_handler.rs b/packages/tracker-core/src/announce_handler.rs index b858cae6c..ac70c6f86 100644 --- a/packages/tracker-core/src/announce_handler.rs +++ b/packages/tracker-core/src/announce_handler.rs @@ -594,7 +594,6 @@ mod tests { use aquatic_udp_protocol::AnnounceEvent; use torrust_tracker_test_helpers::configuration; - use torrust_tracker_torrent_repository::entry::EntrySync; use crate::announce_handler::tests::the_announce_handler::peer_ip; use crate::announce_handler::{AnnounceHandler, PeersWanted}; @@ -657,10 +656,20 @@ mod tests { .expect("it should be able to get entry"); // It persists the number of completed peers. - assert_eq!(torrent_entry.get_swarm_metadata().downloaded, 1); + assert_eq!( + torrent_entry + .lock() + .expect("can't acquire lock for torrent entry") + .get_swarm_metadata() + .downloaded, + 1 + ); // It does not persist the peers - assert!(torrent_entry.peers_is_empty()); + assert!(torrent_entry + .lock() + .expect("can't acquire lock for torrent entry") + .peers_is_empty()); } } diff --git a/packages/tracker-core/src/torrent/manager.rs b/packages/tracker-core/src/torrent/manager.rs index 792bb024d..a69f8282b 100644 --- a/packages/tracker-core/src/torrent/manager.rs +++ b/packages/tracker-core/src/torrent/manager.rs @@ -110,7 +110,6 @@ mod tests { use std::sync::Arc; use torrust_tracker_configuration::Core; - use torrust_tracker_torrent_repository::entry::EntrySync; use super::{DatabasePersistentTorrentRepository, TorrentsManager}; use crate::databases::setup::initialize_database; @@ -164,6 +163,8 @@ mod tests { .in_memory_torrent_repository .get(&infohash) .unwrap() + .lock() + .expect("can't acquire lock for torrent entry") .get_swarm_metadata() .downloaded, 1 diff --git a/packages/tracker-core/src/torrent/repository/in_memory.rs b/packages/tracker-core/src/torrent/repository/in_memory.rs index 142338eea..746de190f 100644 --- a/packages/tracker-core/src/torrent/repository/in_memory.rs +++ b/packages/tracker-core/src/torrent/repository/in_memory.rs @@ -7,7 +7,6 @@ use torrust_tracker_configuration::{TrackerPolicy, TORRENT_PEERS_LIMIT}; use torrust_tracker_primitives::pagination::Pagination; use torrust_tracker_primitives::swarm_metadata::{AggregateSwarmMetadata, SwarmMetadata}; use torrust_tracker_primitives::{peer, DurationSinceUnixEpoch, PersistentTorrent, PersistentTorrents}; -use torrust_tracker_torrent_repository::entry::EntrySync; use torrust_tracker_torrent_repository::EntryMutexStd; use crate::torrent::Torrents; @@ -145,7 +144,10 @@ impl InMemoryTorrentRepository { #[must_use] pub(crate) fn get_swarm_metadata(&self, info_hash: &InfoHash) -> SwarmMetadata { match self.torrents.get(info_hash) { - Some(torrent_entry) => torrent_entry.get_swarm_metadata(), + Some(torrent_entry) => torrent_entry + .lock() + .expect("can't acquire lock for torrent entry") + .get_swarm_metadata(), None => SwarmMetadata::zeroed(), } } @@ -171,7 +173,10 @@ impl InMemoryTorrentRepository { pub(crate) fn get_peers_for(&self, info_hash: &InfoHash, peer: &peer::Peer, limit: usize) -> Vec> { match self.torrents.get(info_hash) { None => vec![], - Some(entry) => entry.get_peers_for_client(&peer.peer_addr, Some(max(limit, TORRENT_PEERS_LIMIT))), + Some(entry) => entry + .lock() + .expect("can't acquire lock for torrent entry") + .get_peers_for_client(&peer.peer_addr, Some(max(limit, TORRENT_PEERS_LIMIT))), } } @@ -188,11 +193,18 @@ impl InMemoryTorrentRepository { /// /// A vector of peers (wrapped in `Arc`) representing the active peers for /// the torrent. + /// + /// # Panics + /// + /// This function panics if the lock for the torrent entry cannot be obtained. #[must_use] pub fn get_torrent_peers(&self, info_hash: &InfoHash) -> Vec> { match self.torrents.get(info_hash) { None => vec![], - Some(entry) => entry.get_peers(Some(TORRENT_PEERS_LIMIT)), + Some(entry) => entry + .lock() + .expect("can't acquire lock for torrent entry") + .get_peers(Some(TORRENT_PEERS_LIMIT)), } } @@ -500,7 +512,6 @@ mod tests { use torrust_tracker_primitives::peer::Peer; use torrust_tracker_primitives::swarm_metadata::SwarmMetadata; - use torrust_tracker_torrent_repository::entry::EntrySync; use crate::test_helpers::tests::{sample_info_hash, sample_peer}; use crate::torrent::repository::in_memory::InMemoryTorrentRepository; @@ -520,9 +531,18 @@ mod tests { impl Into for TorrentEntry { fn into(self) -> TorrentEntryInfo { TorrentEntryInfo { - swarm_metadata: self.get_swarm_metadata(), - peers: self.get_peers(None).iter().map(|peer| *peer.clone()).collect(), - number_of_peers: self.get_peers_len(), + swarm_metadata: self + .lock() + .expect("can't acquire lock for torrent entry") + .get_swarm_metadata(), + peers: self + .lock() + .expect("can't acquire lock for torrent entry") + .get_peers(None) + .iter() + .map(|peer| *peer.clone()) + .collect(), + number_of_peers: self.lock().expect("can't acquire lock for torrent entry").get_peers_len(), } } } diff --git a/packages/tracker-core/src/torrent/services.rs b/packages/tracker-core/src/torrent/services.rs index 88af3b570..2bf4bba71 100644 --- a/packages/tracker-core/src/torrent/services.rs +++ b/packages/tracker-core/src/torrent/services.rs @@ -17,7 +17,6 @@ use std::sync::Arc; use bittorrent_primitives::info_hash::InfoHash; use torrust_tracker_primitives::pagination::Pagination; use torrust_tracker_primitives::peer; -use torrust_tracker_torrent_repository::entry::EntrySync; use crate::torrent::repository::in_memory::InMemoryTorrentRepository; @@ -89,15 +88,25 @@ pub struct BasicInfo { /// An [`Option`] which is: /// - `Some(Info)` if the torrent exists in the repository. /// - `None` if the torrent is not found. +/// +/// # Panics +/// +/// This function panics if the lock for the torrent entry cannot be obtained. #[must_use] pub fn get_torrent_info(in_memory_torrent_repository: &Arc, info_hash: &InfoHash) -> Option { let torrent_entry_option = in_memory_torrent_repository.get(info_hash); let torrent_entry = torrent_entry_option?; - let stats = torrent_entry.get_swarm_metadata(); + let stats = torrent_entry + .lock() + .expect("can't acquire lock for torrent entry") + .get_swarm_metadata(); - let peers = torrent_entry.get_peers(None); + let peers = torrent_entry + .lock() + .expect("can't acquire lock for torrent entry") + .get_peers(None); let peers = Some(peers.iter().map(|peer| (**peer)).collect()); @@ -127,6 +136,10 @@ pub fn get_torrent_info(in_memory_torrent_repository: &Arc, @@ -135,7 +148,10 @@ pub fn get_torrents_page( let mut basic_infos: Vec = vec![]; for (info_hash, torrent_entry) in in_memory_torrent_repository.get_paginated(pagination) { - let stats = torrent_entry.get_swarm_metadata(); + let stats = torrent_entry + .lock() + .expect("can't acquire lock for torrent entry") + .get_swarm_metadata(); basic_infos.push(BasicInfo { info_hash, @@ -165,12 +181,19 @@ pub fn get_torrents_page( /// # Returns /// /// A vector of [`BasicInfo`] structs for the requested torrents. +/// +/// # Panics +/// +/// This function panics if the lock for the torrent entry cannot be obtained. #[must_use] pub fn get_torrents(in_memory_torrent_repository: &Arc, info_hashes: &[InfoHash]) -> Vec { let mut basic_infos: Vec = vec![]; for info_hash in info_hashes { - if let Some(stats) = in_memory_torrent_repository.get(info_hash).map(|t| t.get_swarm_metadata()) { + if let Some(stats) = in_memory_torrent_repository + .get(info_hash) + .map(|t| t.lock().expect("can't acquire lock for torrent entry").get_swarm_metadata()) + { basic_infos.push(BasicInfo { info_hash: *info_hash, seeders: u64::from(stats.complete), From e0a4aac879c801f7ea0f646e62dd4d293b803a39 Mon Sep 17 00:00:00 2001 From: Jose Celano Date: Wed, 30 Apr 2025 22:12:10 +0100 Subject: [PATCH 541/802] feat!: [#1491] remove unneeded generic --- packages/torrent-repository/src/lib.rs | 3 +-- .../torrent-repository/src/repository/skip_map_mutex_std.rs | 6 +++--- packages/torrent-repository/tests/repository/mod.rs | 5 ++--- 3 files changed, 6 insertions(+), 8 deletions(-) diff --git a/packages/torrent-repository/src/lib.rs b/packages/torrent-repository/src/lib.rs index 26434b1d4..846545387 100644 --- a/packages/torrent-repository/src/lib.rs +++ b/packages/torrent-repository/src/lib.rs @@ -1,6 +1,5 @@ use std::sync::Arc; -use repository::skip_map_mutex_std::CrossbeamSkipList; use torrust_tracker_clock::clock; pub mod entry; @@ -11,7 +10,7 @@ pub type EntrySingle = entry::Torrent; pub type EntryMutexStd = Arc>; // Repository -pub type TorrentsSkipMapMutexStd = CrossbeamSkipList; +pub type TorrentsSkipMapMutexStd = repository::skip_map_mutex_std::TorrentsSkipMapMutexStd; /// This code needs to be copied into each crate. /// Working version, for production. diff --git a/packages/torrent-repository/src/repository/skip_map_mutex_std.rs b/packages/torrent-repository/src/repository/skip_map_mutex_std.rs index 3c806ed69..fb287b0f1 100644 --- a/packages/torrent-repository/src/repository/skip_map_mutex_std.rs +++ b/packages/torrent-repository/src/repository/skip_map_mutex_std.rs @@ -9,11 +9,11 @@ use crate::entry::peer_list::PeerList; use crate::{EntryMutexStd, EntrySingle}; #[derive(Default, Debug)] -pub struct CrossbeamSkipList { - pub torrents: SkipMap, +pub struct TorrentsSkipMapMutexStd { + pub torrents: SkipMap, } -impl CrossbeamSkipList { +impl TorrentsSkipMapMutexStd { /// Upsert a peer into the swarm of a torrent. /// /// Optionally, it can also preset the number of downloads of the torrent diff --git a/packages/torrent-repository/tests/repository/mod.rs b/packages/torrent-repository/tests/repository/mod.rs index ae0066b25..066c6d5c3 100644 --- a/packages/torrent-repository/tests/repository/mod.rs +++ b/packages/torrent-repository/tests/repository/mod.rs @@ -8,15 +8,14 @@ use torrust_tracker_configuration::TrackerPolicy; use torrust_tracker_primitives::pagination::Pagination; use torrust_tracker_primitives::swarm_metadata::SwarmMetadata; use torrust_tracker_primitives::PersistentTorrents; -use torrust_tracker_torrent_repository::repository::skip_map_mutex_std::CrossbeamSkipList; -use torrust_tracker_torrent_repository::EntrySingle; +use torrust_tracker_torrent_repository::{EntrySingle, TorrentsSkipMapMutexStd}; use crate::common::repo::Repo; use crate::common::torrent_peer_builder::{a_completed_peer, a_started_peer}; #[fixture] fn skip_list_mutex_std() -> Repo { - Repo::SkipMapMutexStd(CrossbeamSkipList::default()) + Repo::SkipMapMutexStd(TorrentsSkipMapMutexStd::default()) } type Entries = Vec<(InfoHash, EntrySingle)>; From e87479e0ba34ce21089cd6934f593b824d871d50 Mon Sep 17 00:00:00 2001 From: Jose Celano Date: Wed, 30 Apr 2025 22:18:16 +0100 Subject: [PATCH 542/802] refactor: [#1491] extract mod torrent --- packages/torrent-repository/src/entry/mod.rs | 19 +------------------ .../torrent-repository/src/entry/torrent.rs | 17 +++++++++++++++++ packages/torrent-repository/src/lib.rs | 4 ++-- 3 files changed, 20 insertions(+), 20 deletions(-) create mode 100644 packages/torrent-repository/src/entry/torrent.rs diff --git a/packages/torrent-repository/src/entry/mod.rs b/packages/torrent-repository/src/entry/mod.rs index 4b1201730..610750bb3 100644 --- a/packages/torrent-repository/src/entry/mod.rs +++ b/packages/torrent-repository/src/entry/mod.rs @@ -1,20 +1,3 @@ -use std::fmt::Debug; - -use self::peer_list::PeerList; - pub mod peer_list; pub mod single; - -/// A data structure containing all the information about a torrent in the tracker. -/// -/// This is the tracker entry for a given torrent and contains the swarm data, -/// that's the list of all the peers trying to download the same torrent. -/// The tracker keeps one entry like this for every torrent. -#[derive(Clone, Debug, Default, PartialEq, Eq, PartialOrd, Ord, Hash)] -pub struct Torrent { - /// A network of peers that are all trying to download the torrent associated to this entry - pub(crate) swarm: PeerList, - - /// The number of peers that have ever completed downloading the torrent associated to this entry - pub(crate) downloaded: u32, -} +pub mod torrent; diff --git a/packages/torrent-repository/src/entry/torrent.rs b/packages/torrent-repository/src/entry/torrent.rs new file mode 100644 index 000000000..8b923d880 --- /dev/null +++ b/packages/torrent-repository/src/entry/torrent.rs @@ -0,0 +1,17 @@ +use std::fmt::Debug; + +use super::peer_list::PeerList; + +/// A data structure containing all the information about a torrent in the tracker. +/// +/// This is the tracker entry for a given torrent and contains the swarm data, +/// that's the list of all the peers trying to download the same torrent. +/// The tracker keeps one entry like this for every torrent. +#[derive(Clone, Debug, Default, PartialEq, Eq, PartialOrd, Ord, Hash)] +pub struct Torrent { + /// A network of peers that are all trying to download the torrent associated to this entry + pub(crate) swarm: PeerList, + + /// The number of peers that have ever completed downloading the torrent associated to this entry + pub(crate) downloaded: u32, +} diff --git a/packages/torrent-repository/src/lib.rs b/packages/torrent-repository/src/lib.rs index 846545387..75a711198 100644 --- a/packages/torrent-repository/src/lib.rs +++ b/packages/torrent-repository/src/lib.rs @@ -6,8 +6,8 @@ pub mod entry; pub mod repository; // Repo Entries -pub type EntrySingle = entry::Torrent; -pub type EntryMutexStd = Arc>; +pub type EntrySingle = entry::torrent::Torrent; +pub type EntryMutexStd = Arc>; // Repository pub type TorrentsSkipMapMutexStd = repository::skip_map_mutex_std::TorrentsSkipMapMutexStd; From f868b04a4d5cebb926e850cb95f124df31d55cf4 Mon Sep 17 00:00:00 2001 From: Jose Celano Date: Wed, 30 Apr 2025 22:22:22 +0100 Subject: [PATCH 543/802] refactor: [#1491] put implementation and type in the same module --- packages/torrent-repository/src/entry/mod.rs | 1 - .../torrent-repository/src/entry/single.rs | 86 ------------------- .../torrent-repository/src/entry/torrent.rs | 84 ++++++++++++++++++ 3 files changed, 84 insertions(+), 87 deletions(-) delete mode 100644 packages/torrent-repository/src/entry/single.rs diff --git a/packages/torrent-repository/src/entry/mod.rs b/packages/torrent-repository/src/entry/mod.rs index 610750bb3..785672be5 100644 --- a/packages/torrent-repository/src/entry/mod.rs +++ b/packages/torrent-repository/src/entry/mod.rs @@ -1,3 +1,2 @@ pub mod peer_list; -pub mod single; pub mod torrent; diff --git a/packages/torrent-repository/src/entry/single.rs b/packages/torrent-repository/src/entry/single.rs deleted file mode 100644 index 44f1012e1..000000000 --- a/packages/torrent-repository/src/entry/single.rs +++ /dev/null @@ -1,86 +0,0 @@ -use std::net::SocketAddr; -use std::sync::Arc; - -use aquatic_udp_protocol::AnnounceEvent; -use torrust_tracker_configuration::TrackerPolicy; -use torrust_tracker_primitives::peer::{self}; -use torrust_tracker_primitives::swarm_metadata::SwarmMetadata; -use torrust_tracker_primitives::DurationSinceUnixEpoch; - -use crate::EntrySingle; - -impl EntrySingle { - #[allow(clippy::cast_possible_truncation)] - #[must_use] - pub fn get_swarm_metadata(&self) -> SwarmMetadata { - let (seeders, leechers) = self.swarm.seeders_and_leechers(); - - SwarmMetadata { - downloaded: self.downloaded, - complete: seeders as u32, - incomplete: leechers as u32, - } - } - - #[must_use] - pub fn meets_retaining_policy(&self, policy: &TrackerPolicy) -> bool { - if policy.persistent_torrent_completed_stat && self.downloaded > 0 { - return true; - } - - if policy.remove_peerless_torrents && self.swarm.is_empty() { - return false; - } - - true - } - - #[must_use] - pub fn peers_is_empty(&self) -> bool { - self.swarm.is_empty() - } - - #[must_use] - pub fn get_peers_len(&self) -> usize { - self.swarm.len() - } - - #[must_use] - pub fn get_peers(&self, limit: Option) -> Vec> { - self.swarm.get_all(limit) - } - - #[must_use] - pub fn get_peers_for_client(&self, client: &SocketAddr, limit: Option) -> Vec> { - self.swarm.get_peers_excluding_addr(client, limit) - } - - pub fn upsert_peer(&mut self, peer: &peer::Peer) -> bool { - let mut number_of_downloads_increased: bool = false; - - match peer::ReadInfo::get_event(peer) { - AnnounceEvent::Stopped => { - drop(self.swarm.remove(&peer::ReadInfo::get_id(peer))); - } - AnnounceEvent::Completed => { - let previous = self.swarm.upsert(Arc::new(*peer)); - // Don't count if peer was not previously known and not already completed. - if previous.is_some_and(|p| p.event != AnnounceEvent::Completed) { - self.downloaded += 1; - number_of_downloads_increased = true; - } - } - _ => { - // `Started` event (first announced event) or - // `None` event (announcements done at regular intervals). - drop(self.swarm.upsert(Arc::new(*peer))); - } - } - - number_of_downloads_increased - } - - pub fn remove_inactive_peers(&mut self, current_cutoff: DurationSinceUnixEpoch) { - self.swarm.remove_inactive_peers(current_cutoff); - } -} diff --git a/packages/torrent-repository/src/entry/torrent.rs b/packages/torrent-repository/src/entry/torrent.rs index 8b923d880..8d09a140f 100644 --- a/packages/torrent-repository/src/entry/torrent.rs +++ b/packages/torrent-repository/src/entry/torrent.rs @@ -1,4 +1,12 @@ use std::fmt::Debug; +use std::net::SocketAddr; +use std::sync::Arc; + +use aquatic_udp_protocol::AnnounceEvent; +use torrust_tracker_configuration::TrackerPolicy; +use torrust_tracker_primitives::peer::{self}; +use torrust_tracker_primitives::swarm_metadata::SwarmMetadata; +use torrust_tracker_primitives::DurationSinceUnixEpoch; use super::peer_list::PeerList; @@ -15,3 +23,79 @@ pub struct Torrent { /// The number of peers that have ever completed downloading the torrent associated to this entry pub(crate) downloaded: u32, } + +impl Torrent { + #[allow(clippy::cast_possible_truncation)] + #[must_use] + pub fn get_swarm_metadata(&self) -> SwarmMetadata { + let (seeders, leechers) = self.swarm.seeders_and_leechers(); + + SwarmMetadata { + downloaded: self.downloaded, + complete: seeders as u32, + incomplete: leechers as u32, + } + } + + #[must_use] + pub fn meets_retaining_policy(&self, policy: &TrackerPolicy) -> bool { + if policy.persistent_torrent_completed_stat && self.downloaded > 0 { + return true; + } + + if policy.remove_peerless_torrents && self.swarm.is_empty() { + return false; + } + + true + } + + #[must_use] + pub fn peers_is_empty(&self) -> bool { + self.swarm.is_empty() + } + + #[must_use] + pub fn get_peers_len(&self) -> usize { + self.swarm.len() + } + + #[must_use] + pub fn get_peers(&self, limit: Option) -> Vec> { + self.swarm.get_all(limit) + } + + #[must_use] + pub fn get_peers_for_client(&self, client: &SocketAddr, limit: Option) -> Vec> { + self.swarm.get_peers_excluding_addr(client, limit) + } + + pub fn upsert_peer(&mut self, peer: &peer::Peer) -> bool { + let mut number_of_downloads_increased: bool = false; + + match peer::ReadInfo::get_event(peer) { + AnnounceEvent::Stopped => { + drop(self.swarm.remove(&peer::ReadInfo::get_id(peer))); + } + AnnounceEvent::Completed => { + let previous = self.swarm.upsert(Arc::new(*peer)); + // Don't count if peer was not previously known and not already completed. + if previous.is_some_and(|p| p.event != AnnounceEvent::Completed) { + self.downloaded += 1; + number_of_downloads_increased = true; + } + } + _ => { + // `Started` event (first announced event) or + // `None` event (announcements done at regular intervals). + drop(self.swarm.upsert(Arc::new(*peer))); + } + } + + number_of_downloads_increased + } + + pub fn remove_inactive_peers(&mut self, current_cutoff: DurationSinceUnixEpoch) { + self.swarm.remove_inactive_peers(current_cutoff); + } +} From 0acfc8f55152aa62b186e6ced2459204211a901f Mon Sep 17 00:00:00 2001 From: Jose Celano Date: Wed, 30 Apr 2025 22:28:11 +0100 Subject: [PATCH 544/802] refactor: [#1491] remove unneeded type alias EntrySingle --- packages/torrent-repository/src/lib.rs | 3 +-- .../src/repository/skip_map_mutex_std.rs | 7 ++--- .../torrent-repository/tests/common/repo.rs | 11 ++++---- .../tests/common/torrent.rs | 4 +-- .../torrent-repository/tests/entry/mod.rs | 4 +-- .../tests/repository/mod.rs | 27 ++++++++++--------- 6 files changed, 29 insertions(+), 27 deletions(-) diff --git a/packages/torrent-repository/src/lib.rs b/packages/torrent-repository/src/lib.rs index 75a711198..3948261fe 100644 --- a/packages/torrent-repository/src/lib.rs +++ b/packages/torrent-repository/src/lib.rs @@ -5,8 +5,7 @@ use torrust_tracker_clock::clock; pub mod entry; pub mod repository; -// Repo Entries -pub type EntrySingle = entry::torrent::Torrent; +// Repo Entry pub type EntryMutexStd = Arc>; // Repository diff --git a/packages/torrent-repository/src/repository/skip_map_mutex_std.rs b/packages/torrent-repository/src/repository/skip_map_mutex_std.rs index fb287b0f1..117c2cff9 100644 --- a/packages/torrent-repository/src/repository/skip_map_mutex_std.rs +++ b/packages/torrent-repository/src/repository/skip_map_mutex_std.rs @@ -6,7 +6,8 @@ use torrust_tracker_primitives::swarm_metadata::{AggregateSwarmMetadata, SwarmMe use torrust_tracker_primitives::{peer, DurationSinceUnixEpoch, PersistentTorrent, PersistentTorrents}; use crate::entry::peer_list::PeerList; -use crate::{EntryMutexStd, EntrySingle}; +use crate::entry::torrent::Torrent; +use crate::EntryMutexStd; #[derive(Default, Debug)] pub struct TorrentsSkipMapMutexStd { @@ -49,7 +50,7 @@ impl TorrentsSkipMapMutexStd { } else { let new_entry = if let Some(number_of_downloads) = opt_persistent_torrent { EntryMutexStd::new( - EntrySingle { + Torrent { swarm: PeerList::default(), downloaded: number_of_downloads, } @@ -130,7 +131,7 @@ impl TorrentsSkipMapMutexStd { } let entry = EntryMutexStd::new( - EntrySingle { + Torrent { swarm: PeerList::default(), downloaded: *completed, } diff --git a/packages/torrent-repository/tests/common/repo.rs b/packages/torrent-repository/tests/common/repo.rs index 41df77bf9..5dc38003c 100644 --- a/packages/torrent-repository/tests/common/repo.rs +++ b/packages/torrent-repository/tests/common/repo.rs @@ -5,7 +5,8 @@ use torrust_tracker_configuration::TrackerPolicy; use torrust_tracker_primitives::pagination::Pagination; use torrust_tracker_primitives::swarm_metadata::{AggregateSwarmMetadata, SwarmMetadata}; use torrust_tracker_primitives::{peer, DurationSinceUnixEpoch, PersistentTorrent, PersistentTorrents}; -use torrust_tracker_torrent_repository::{EntrySingle, TorrentsSkipMapMutexStd}; +use torrust_tracker_torrent_repository::entry::torrent::Torrent; +use torrust_tracker_torrent_repository::TorrentsSkipMapMutexStd; #[derive(Debug)] pub(crate) enum Repo { @@ -30,7 +31,7 @@ impl Repo { } } - pub(crate) fn get(&self, key: &InfoHash) -> Option { + pub(crate) fn get(&self, key: &InfoHash) -> Option { match self { Repo::SkipMapMutexStd(repo) => Some(repo.get(key)?.lock().unwrap().clone()), } @@ -42,7 +43,7 @@ impl Repo { } } - pub(crate) fn get_paginated(&self, pagination: Option<&Pagination>) -> Vec<(InfoHash, EntrySingle)> { + pub(crate) fn get_paginated(&self, pagination: Option<&Pagination>) -> Vec<(InfoHash, Torrent)> { match self { Repo::SkipMapMutexStd(repo) => repo .get_paginated(pagination) @@ -58,7 +59,7 @@ impl Repo { } } - pub(crate) fn remove(&self, key: &InfoHash) -> Option { + pub(crate) fn remove(&self, key: &InfoHash) -> Option { match self { Repo::SkipMapMutexStd(repo) => Some(repo.remove(key)?.lock().unwrap().clone()), } @@ -76,7 +77,7 @@ impl Repo { } } - pub(crate) fn insert(&self, info_hash: &InfoHash, torrent: EntrySingle) -> Option { + pub(crate) fn insert(&self, info_hash: &InfoHash, torrent: Torrent) -> Option { match self { Repo::SkipMapMutexStd(repo) => { repo.torrents.insert(*info_hash, Arc::new(Mutex::new(torrent))); diff --git a/packages/torrent-repository/tests/common/torrent.rs b/packages/torrent-repository/tests/common/torrent.rs index 84ea79eef..4ef202431 100644 --- a/packages/torrent-repository/tests/common/torrent.rs +++ b/packages/torrent-repository/tests/common/torrent.rs @@ -4,11 +4,11 @@ use std::sync::Arc; use torrust_tracker_configuration::TrackerPolicy; use torrust_tracker_primitives::swarm_metadata::SwarmMetadata; use torrust_tracker_primitives::{peer, DurationSinceUnixEpoch}; -use torrust_tracker_torrent_repository::{EntryMutexStd, EntrySingle}; +use torrust_tracker_torrent_repository::{entry, EntryMutexStd}; #[derive(Debug, Clone)] pub(crate) enum Torrent { - Single(EntrySingle), + Single(entry::torrent::Torrent), MutexStd(EntryMutexStd), } diff --git a/packages/torrent-repository/tests/entry/mod.rs b/packages/torrent-repository/tests/entry/mod.rs index 0fb8e8d88..9d01354ef 100644 --- a/packages/torrent-repository/tests/entry/mod.rs +++ b/packages/torrent-repository/tests/entry/mod.rs @@ -9,7 +9,7 @@ use torrust_tracker_clock::clock::{self, Time as _}; use torrust_tracker_configuration::{TrackerPolicy, TORRENT_PEERS_LIMIT}; use torrust_tracker_primitives::peer; use torrust_tracker_primitives::peer::Peer; -use torrust_tracker_torrent_repository::{EntryMutexStd, EntrySingle}; +use torrust_tracker_torrent_repository::{entry, EntryMutexStd}; use crate::common::torrent::Torrent; use crate::common::torrent_peer_builder::{a_completed_peer, a_started_peer}; @@ -17,7 +17,7 @@ use crate::CurrentClock; #[fixture] fn single() -> Torrent { - Torrent::Single(EntrySingle::default()) + Torrent::Single(entry::torrent::Torrent::default()) } #[fixture] fn mutex_std() -> Torrent { diff --git a/packages/torrent-repository/tests/repository/mod.rs b/packages/torrent-repository/tests/repository/mod.rs index 066c6d5c3..55dbd6cc7 100644 --- a/packages/torrent-repository/tests/repository/mod.rs +++ b/packages/torrent-repository/tests/repository/mod.rs @@ -8,7 +8,8 @@ use torrust_tracker_configuration::TrackerPolicy; use torrust_tracker_primitives::pagination::Pagination; use torrust_tracker_primitives::swarm_metadata::SwarmMetadata; use torrust_tracker_primitives::PersistentTorrents; -use torrust_tracker_torrent_repository::{EntrySingle, TorrentsSkipMapMutexStd}; +use torrust_tracker_torrent_repository::entry::torrent::Torrent; +use torrust_tracker_torrent_repository::TorrentsSkipMapMutexStd; use crate::common::repo::Repo; use crate::common::torrent_peer_builder::{a_completed_peer, a_started_peer}; @@ -18,7 +19,7 @@ fn skip_list_mutex_std() -> Repo { Repo::SkipMapMutexStd(TorrentsSkipMapMutexStd::default()) } -type Entries = Vec<(InfoHash, EntrySingle)>; +type Entries = Vec<(InfoHash, Torrent)>; #[fixture] fn empty() -> Entries { @@ -27,26 +28,26 @@ fn empty() -> Entries { #[fixture] fn default() -> Entries { - vec![(InfoHash::default(), EntrySingle::default())] + vec![(InfoHash::default(), Torrent::default())] } #[fixture] fn started() -> Entries { - let mut torrent = EntrySingle::default(); + let mut torrent = Torrent::default(); torrent.upsert_peer(&a_started_peer(1)); vec![(InfoHash::default(), torrent)] } #[fixture] fn completed() -> Entries { - let mut torrent = EntrySingle::default(); + let mut torrent = Torrent::default(); torrent.upsert_peer(&a_completed_peer(2)); vec![(InfoHash::default(), torrent)] } #[fixture] fn downloaded() -> Entries { - let mut torrent = EntrySingle::default(); + let mut torrent = Torrent::default(); let mut peer = a_started_peer(3); torrent.upsert_peer(&peer); peer.event = AnnounceEvent::Completed; @@ -57,17 +58,17 @@ fn downloaded() -> Entries { #[fixture] fn three() -> Entries { - let mut started = EntrySingle::default(); + let mut started = Torrent::default(); let started_h = &mut DefaultHasher::default(); started.upsert_peer(&a_started_peer(1)); started.hash(started_h); - let mut completed = EntrySingle::default(); + let mut completed = Torrent::default(); let completed_h = &mut DefaultHasher::default(); completed.upsert_peer(&a_completed_peer(2)); completed.hash(completed_h); - let mut downloaded = EntrySingle::default(); + let mut downloaded = Torrent::default(); let downloaded_h = &mut DefaultHasher::default(); let mut downloaded_peer = a_started_peer(3); downloaded.upsert_peer(&downloaded_peer); @@ -85,10 +86,10 @@ fn three() -> Entries { #[fixture] fn many_out_of_order() -> Entries { - let mut entries: HashSet<(InfoHash, EntrySingle)> = HashSet::default(); + let mut entries: HashSet<(InfoHash, Torrent)> = HashSet::default(); for i in 0..408 { - let mut entry = EntrySingle::default(); + let mut entry = Torrent::default(); entry.upsert_peer(&a_started_peer(i)); entries.insert((InfoHash::from(&i), entry)); @@ -100,10 +101,10 @@ fn many_out_of_order() -> Entries { #[fixture] fn many_hashed_in_order() -> Entries { - let mut entries: BTreeMap = BTreeMap::default(); + let mut entries: BTreeMap = BTreeMap::default(); for i in 0..408 { - let mut entry = EntrySingle::default(); + let mut entry = Torrent::default(); entry.upsert_peer(&a_started_peer(i)); let hash: &mut DefaultHasher = &mut DefaultHasher::default(); From 2b0727e0925d9ee35bed8e5f297eaa38c068dda5 Mon Sep 17 00:00:00 2001 From: Jose Celano Date: Thu, 1 May 2025 12:41:34 +0100 Subject: [PATCH 545/802] refactor: [#1491] reorganize repository module --- packages/torrent-repository/src/lib.rs | 2 +- .../src/{repository/skip_map_mutex_std.rs => repository.rs} | 0 packages/torrent-repository/src/repository/mod.rs | 1 - 3 files changed, 1 insertion(+), 2 deletions(-) rename packages/torrent-repository/src/{repository/skip_map_mutex_std.rs => repository.rs} (100%) delete mode 100644 packages/torrent-repository/src/repository/mod.rs diff --git a/packages/torrent-repository/src/lib.rs b/packages/torrent-repository/src/lib.rs index 3948261fe..87f763ebb 100644 --- a/packages/torrent-repository/src/lib.rs +++ b/packages/torrent-repository/src/lib.rs @@ -9,7 +9,7 @@ pub mod repository; pub type EntryMutexStd = Arc>; // Repository -pub type TorrentsSkipMapMutexStd = repository::skip_map_mutex_std::TorrentsSkipMapMutexStd; +pub type TorrentsSkipMapMutexStd = repository::TorrentsSkipMapMutexStd; /// This code needs to be copied into each crate. /// Working version, for production. diff --git a/packages/torrent-repository/src/repository/skip_map_mutex_std.rs b/packages/torrent-repository/src/repository.rs similarity index 100% rename from packages/torrent-repository/src/repository/skip_map_mutex_std.rs rename to packages/torrent-repository/src/repository.rs diff --git a/packages/torrent-repository/src/repository/mod.rs b/packages/torrent-repository/src/repository/mod.rs deleted file mode 100644 index 3b8259f9d..000000000 --- a/packages/torrent-repository/src/repository/mod.rs +++ /dev/null @@ -1 +0,0 @@ -pub mod skip_map_mutex_std; From f106c01203d6c1260d33759ab2d045cc8cb7ebe3 Mon Sep 17 00:00:00 2001 From: Jose Celano Date: Thu, 1 May 2025 12:54:43 +0100 Subject: [PATCH 546/802] refactor: [#1491] move type alias to torrent-repository pkg --- packages/torrent-repository/src/lib.rs | 14 ++++++++++---- packages/torrent-repository/tests/common/repo.rs | 4 ++-- .../torrent-repository/tests/common/torrent.rs | 4 ++-- packages/torrent-repository/tests/entry/mod.rs | 4 ++-- .../torrent-repository/tests/repository/mod.rs | 4 ++-- packages/tracker-core/src/torrent/mod.rs | 13 ------------- .../src/torrent/repository/in_memory.rs | 14 ++++++-------- 7 files changed, 24 insertions(+), 33 deletions(-) diff --git a/packages/torrent-repository/src/lib.rs b/packages/torrent-repository/src/lib.rs index 87f763ebb..865d819e3 100644 --- a/packages/torrent-repository/src/lib.rs +++ b/packages/torrent-repository/src/lib.rs @@ -1,15 +1,21 @@ -use std::sync::Arc; +use std::sync::{Arc, Mutex}; use torrust_tracker_clock::clock; pub mod entry; pub mod repository; -// Repo Entry -pub type EntryMutexStd = Arc>; +// Repo entry +pub type TorrentEntry = EntryMutexStd; // Repository -pub type TorrentsSkipMapMutexStd = repository::TorrentsSkipMapMutexStd; +pub type Torrents = TorrentsSkipMapMutexStd; + +// The internal type of the entry +pub(crate) type EntryMutexStd = Arc>; + +// The internal type of the repository +pub(crate) type TorrentsSkipMapMutexStd = repository::TorrentsSkipMapMutexStd; /// This code needs to be copied into each crate. /// Working version, for production. diff --git a/packages/torrent-repository/tests/common/repo.rs b/packages/torrent-repository/tests/common/repo.rs index 5dc38003c..357b39776 100644 --- a/packages/torrent-repository/tests/common/repo.rs +++ b/packages/torrent-repository/tests/common/repo.rs @@ -6,11 +6,11 @@ use torrust_tracker_primitives::pagination::Pagination; use torrust_tracker_primitives::swarm_metadata::{AggregateSwarmMetadata, SwarmMetadata}; use torrust_tracker_primitives::{peer, DurationSinceUnixEpoch, PersistentTorrent, PersistentTorrents}; use torrust_tracker_torrent_repository::entry::torrent::Torrent; -use torrust_tracker_torrent_repository::TorrentsSkipMapMutexStd; +use torrust_tracker_torrent_repository::Torrents; #[derive(Debug)] pub(crate) enum Repo { - SkipMapMutexStd(TorrentsSkipMapMutexStd), + SkipMapMutexStd(Torrents), } impl Repo { diff --git a/packages/torrent-repository/tests/common/torrent.rs b/packages/torrent-repository/tests/common/torrent.rs index 4ef202431..1cca7740d 100644 --- a/packages/torrent-repository/tests/common/torrent.rs +++ b/packages/torrent-repository/tests/common/torrent.rs @@ -4,12 +4,12 @@ use std::sync::Arc; use torrust_tracker_configuration::TrackerPolicy; use torrust_tracker_primitives::swarm_metadata::SwarmMetadata; use torrust_tracker_primitives::{peer, DurationSinceUnixEpoch}; -use torrust_tracker_torrent_repository::{entry, EntryMutexStd}; +use torrust_tracker_torrent_repository::{entry, TorrentEntry}; #[derive(Debug, Clone)] pub(crate) enum Torrent { Single(entry::torrent::Torrent), - MutexStd(EntryMutexStd), + MutexStd(TorrentEntry), } impl Torrent { diff --git a/packages/torrent-repository/tests/entry/mod.rs b/packages/torrent-repository/tests/entry/mod.rs index 9d01354ef..e04bd004d 100644 --- a/packages/torrent-repository/tests/entry/mod.rs +++ b/packages/torrent-repository/tests/entry/mod.rs @@ -9,7 +9,7 @@ use torrust_tracker_clock::clock::{self, Time as _}; use torrust_tracker_configuration::{TrackerPolicy, TORRENT_PEERS_LIMIT}; use torrust_tracker_primitives::peer; use torrust_tracker_primitives::peer::Peer; -use torrust_tracker_torrent_repository::{entry, EntryMutexStd}; +use torrust_tracker_torrent_repository::{entry, TorrentEntry}; use crate::common::torrent::Torrent; use crate::common::torrent_peer_builder::{a_completed_peer, a_started_peer}; @@ -21,7 +21,7 @@ fn single() -> Torrent { } #[fixture] fn mutex_std() -> Torrent { - Torrent::MutexStd(EntryMutexStd::default()) + Torrent::MutexStd(TorrentEntry::default()) } #[fixture] diff --git a/packages/torrent-repository/tests/repository/mod.rs b/packages/torrent-repository/tests/repository/mod.rs index 55dbd6cc7..d997cfd7c 100644 --- a/packages/torrent-repository/tests/repository/mod.rs +++ b/packages/torrent-repository/tests/repository/mod.rs @@ -9,14 +9,14 @@ use torrust_tracker_primitives::pagination::Pagination; use torrust_tracker_primitives::swarm_metadata::SwarmMetadata; use torrust_tracker_primitives::PersistentTorrents; use torrust_tracker_torrent_repository::entry::torrent::Torrent; -use torrust_tracker_torrent_repository::TorrentsSkipMapMutexStd; +use torrust_tracker_torrent_repository::Torrents; use crate::common::repo::Repo; use crate::common::torrent_peer_builder::{a_completed_peer, a_started_peer}; #[fixture] fn skip_list_mutex_std() -> Repo { - Repo::SkipMapMutexStd(TorrentsSkipMapMutexStd::default()) + Repo::SkipMapMutexStd(Torrents::default()) } type Entries = Vec<(InfoHash, Torrent)>; diff --git a/packages/tracker-core/src/torrent/mod.rs b/packages/tracker-core/src/torrent/mod.rs index 8ee8fa6d3..01d33b893 100644 --- a/packages/tracker-core/src/torrent/mod.rs +++ b/packages/tracker-core/src/torrent/mod.rs @@ -166,16 +166,3 @@ pub mod manager; pub mod repository; pub mod services; - -#[cfg(test)] -use torrust_tracker_torrent_repository::EntryMutexStd; -use torrust_tracker_torrent_repository::TorrentsSkipMapMutexStd; - -/// Alias for the primary torrent collection type, implemented as a skip map -/// wrapped in a mutex. This type is used internally by the tracker to manage -/// and access torrent entries. -pub(crate) type Torrents = TorrentsSkipMapMutexStd; - -/// Alias for a single torrent entry. -#[cfg(test)] -pub(crate) type TorrentEntry = EntryMutexStd; diff --git a/packages/tracker-core/src/torrent/repository/in_memory.rs b/packages/tracker-core/src/torrent/repository/in_memory.rs index 746de190f..be758f990 100644 --- a/packages/tracker-core/src/torrent/repository/in_memory.rs +++ b/packages/tracker-core/src/torrent/repository/in_memory.rs @@ -7,9 +7,7 @@ use torrust_tracker_configuration::{TrackerPolicy, TORRENT_PEERS_LIMIT}; use torrust_tracker_primitives::pagination::Pagination; use torrust_tracker_primitives::swarm_metadata::{AggregateSwarmMetadata, SwarmMetadata}; use torrust_tracker_primitives::{peer, DurationSinceUnixEpoch, PersistentTorrent, PersistentTorrents}; -use torrust_tracker_torrent_repository::EntryMutexStd; - -use crate::torrent::Torrents; +use torrust_tracker_torrent_repository::{TorrentEntry, Torrents}; /// In-memory repository for torrent entries. /// @@ -66,7 +64,7 @@ impl InMemoryTorrentRepository { /// An `Option` containing the removed torrent entry if it existed. #[cfg(test)] #[must_use] - pub(crate) fn remove(&self, key: &InfoHash) -> Option { + pub(crate) fn remove(&self, key: &InfoHash) -> Option { self.torrents.remove(key) } @@ -106,7 +104,7 @@ impl InMemoryTorrentRepository { /// /// An `Option` containing the torrent entry if found. #[must_use] - pub(crate) fn get(&self, key: &InfoHash) -> Option { + pub(crate) fn get(&self, key: &InfoHash) -> Option { self.torrents.get(key) } @@ -122,9 +120,9 @@ impl InMemoryTorrentRepository { /// /// # Returns /// - /// A vector of `(InfoHash, EntryMutexStd)` tuples. + /// A vector of `(InfoHash, TorrentEntry)` tuples. #[must_use] - pub(crate) fn get_paginated(&self, pagination: Option<&Pagination>) -> Vec<(InfoHash, EntryMutexStd)> { + pub(crate) fn get_paginated(&self, pagination: Option<&Pagination>) -> Vec<(InfoHash, TorrentEntry)> { self.torrents.get_paginated(pagination) } @@ -512,10 +510,10 @@ mod tests { use torrust_tracker_primitives::peer::Peer; use torrust_tracker_primitives::swarm_metadata::SwarmMetadata; + use torrust_tracker_torrent_repository::TorrentEntry; use crate::test_helpers::tests::{sample_info_hash, sample_peer}; use crate::torrent::repository::in_memory::InMemoryTorrentRepository; - use crate::torrent::TorrentEntry; /// `TorrentEntry` data is not directly accessible. It's only /// accessible through the trait methods. We need this temporary From 71aa8d039488004504b7e45034cf4e9bc35b38ec Mon Sep 17 00:00:00 2001 From: Jose Celano Date: Thu, 1 May 2025 17:27:37 +0100 Subject: [PATCH 547/802] fix: [#1491] deadlock running tests I don't know why it was not happening before. The previous changes only: - Remove types aliases. - Remove generics. - Remove unused traits. However the concrete type for the repository should be the same after monomorphization. --- Cargo.lock | 1 + packages/torrent-repository/Cargo.toml | 1 + packages/torrent-repository/src/repository.rs | 12 ++++----- .../src/torrent/repository/in_memory.rs | 25 ++++++++----------- packages/tracker-core/src/torrent/services.rs | 10 +++++--- 5 files changed, 25 insertions(+), 24 deletions(-) diff --git a/Cargo.lock b/Cargo.lock index db6838e66..c301879f2 100644 --- a/Cargo.lock +++ b/Cargo.lock @@ -4846,6 +4846,7 @@ dependencies = [ "torrust-tracker-clock", "torrust-tracker-configuration", "torrust-tracker-primitives", + "tracing", ] [[package]] diff --git a/packages/torrent-repository/Cargo.toml b/packages/torrent-repository/Cargo.toml index 6fc5f483b..0a4fe5261 100644 --- a/packages/torrent-repository/Cargo.toml +++ b/packages/torrent-repository/Cargo.toml @@ -23,6 +23,7 @@ tokio = { version = "1", features = ["macros", "net", "rt-multi-thread", "signal torrust-tracker-clock = { version = "3.0.0-develop", path = "../clock" } torrust-tracker-configuration = { version = "3.0.0-develop", path = "../configuration" } torrust-tracker-primitives = { version = "3.0.0-develop", path = "../primitives" } +tracing = "0" [dev-dependencies] async-std = { version = "1", features = ["attributes", "tokio1"] } diff --git a/packages/torrent-repository/src/repository.rs b/packages/torrent-repository/src/repository.rs index 117c2cff9..c08adbfae 100644 --- a/packages/torrent-repository/src/repository.rs +++ b/packages/torrent-repository/src/repository.rs @@ -42,12 +42,16 @@ impl TorrentsSkipMapMutexStd { opt_persistent_torrent: Option, ) -> bool { if let Some(existing_entry) = self.torrents.get(info_hash) { + tracing::debug!("Torrent already exists: {:?}", info_hash); + existing_entry .value() .lock() .expect("can't acquire lock for torrent entry") .upsert_peer(peer) } else { + tracing::debug!("Inserting new torrent: {:?}", info_hash); + let new_entry = if let Some(number_of_downloads) = opt_persistent_torrent { EntryMutexStd::new( Torrent { @@ -62,13 +66,9 @@ impl TorrentsSkipMapMutexStd { let inserted_entry = self.torrents.get_or_insert(*info_hash, new_entry); - let number_of_downloads_increased = inserted_entry - .value() - .lock() - .expect("can't acquire lock for torrent entry") - .upsert_peer(peer); + let mut torrent_guard = inserted_entry.value().lock().expect("can't acquire lock for torrent entry"); - number_of_downloads_increased + torrent_guard.upsert_peer(peer) } } diff --git a/packages/tracker-core/src/torrent/repository/in_memory.rs b/packages/tracker-core/src/torrent/repository/in_memory.rs index be758f990..d12919da8 100644 --- a/packages/tracker-core/src/torrent/repository/in_memory.rs +++ b/packages/tracker-core/src/torrent/repository/in_memory.rs @@ -528,20 +528,17 @@ mod tests { #[allow(clippy::from_over_into)] impl Into for TorrentEntry { fn into(self) -> TorrentEntryInfo { - TorrentEntryInfo { - swarm_metadata: self - .lock() - .expect("can't acquire lock for torrent entry") - .get_swarm_metadata(), - peers: self - .lock() - .expect("can't acquire lock for torrent entry") - .get_peers(None) - .iter() - .map(|peer| *peer.clone()) - .collect(), - number_of_peers: self.lock().expect("can't acquire lock for torrent entry").get_peers_len(), - } + let torrent_guard = self.lock().expect("can't acquire lock for torrent entry"); + + let torrent_entry_info = TorrentEntryInfo { + swarm_metadata: torrent_guard.get_swarm_metadata(), + peers: torrent_guard.get_peers(None).iter().map(|peer| *peer.clone()).collect(), + number_of_peers: torrent_guard.get_peers_len(), + }; + + drop(torrent_guard); + + torrent_entry_info } } diff --git a/packages/tracker-core/src/torrent/services.rs b/packages/tracker-core/src/torrent/services.rs index 2bf4bba71..30055b150 100644 --- a/packages/tracker-core/src/torrent/services.rs +++ b/packages/tracker-core/src/torrent/services.rs @@ -190,10 +190,12 @@ pub fn get_torrents(in_memory_torrent_repository: &Arc = vec![]; for info_hash in info_hashes { - if let Some(stats) = in_memory_torrent_repository - .get(info_hash) - .map(|t| t.lock().expect("can't acquire lock for torrent entry").get_swarm_metadata()) - { + if let Some(stats) = in_memory_torrent_repository.get(info_hash).map(|torrent_entry| { + torrent_entry + .lock() + .expect("can't acquire lock for torrent entry") + .get_swarm_metadata() + }) { basic_infos.push(BasicInfo { info_hash: *info_hash, seeders: u64::from(stats.complete), From 9be7c6857b5bc7d7e430c046632817ab96801d90 Mon Sep 17 00:00:00 2001 From: Jose Celano Date: Thu, 1 May 2025 18:21:28 +0100 Subject: [PATCH 548/802] refactor: [#1491] remove redundant type aliases --- packages/torrent-repository/src/entry/mod.rs | 4 ++++ packages/torrent-repository/src/lib.rs | 19 ++++--------------- packages/torrent-repository/src/repository.rs | 16 ++++++++-------- 3 files changed, 16 insertions(+), 23 deletions(-) diff --git a/packages/torrent-repository/src/entry/mod.rs b/packages/torrent-repository/src/entry/mod.rs index 785672be5..5f8ccfcc5 100644 --- a/packages/torrent-repository/src/entry/mod.rs +++ b/packages/torrent-repository/src/entry/mod.rs @@ -1,2 +1,6 @@ pub mod peer_list; pub mod torrent; + +use std::sync::{Arc, Mutex}; + +pub type TorrentEntry = Arc>; diff --git a/packages/torrent-repository/src/lib.rs b/packages/torrent-repository/src/lib.rs index 865d819e3..b96858b82 100644 --- a/packages/torrent-repository/src/lib.rs +++ b/packages/torrent-repository/src/lib.rs @@ -1,23 +1,12 @@ -use std::sync::{Arc, Mutex}; - -use torrust_tracker_clock::clock; - pub mod entry; pub mod repository; -// Repo entry -pub type TorrentEntry = EntryMutexStd; - -// Repository -pub type Torrents = TorrentsSkipMapMutexStd; - -// The internal type of the entry -pub(crate) type EntryMutexStd = Arc>; +use torrust_tracker_clock::clock; -// The internal type of the repository -pub(crate) type TorrentsSkipMapMutexStd = repository::TorrentsSkipMapMutexStd; +pub type TorrentEntry = entry::TorrentEntry; +pub type Torrent = entry::torrent::Torrent; +pub type Torrents = repository::TorrentsSkipMapMutexStd; -/// This code needs to be copied into each crate. /// Working version, for production. #[cfg(not(test))] #[allow(dead_code)] diff --git a/packages/torrent-repository/src/repository.rs b/packages/torrent-repository/src/repository.rs index c08adbfae..ad0520c0c 100644 --- a/packages/torrent-repository/src/repository.rs +++ b/packages/torrent-repository/src/repository.rs @@ -7,11 +7,11 @@ use torrust_tracker_primitives::{peer, DurationSinceUnixEpoch, PersistentTorrent use crate::entry::peer_list::PeerList; use crate::entry::torrent::Torrent; -use crate::EntryMutexStd; +use crate::TorrentEntry; #[derive(Default, Debug)] pub struct TorrentsSkipMapMutexStd { - pub torrents: SkipMap, + pub torrents: SkipMap, } impl TorrentsSkipMapMutexStd { @@ -53,7 +53,7 @@ impl TorrentsSkipMapMutexStd { tracing::debug!("Inserting new torrent: {:?}", info_hash); let new_entry = if let Some(number_of_downloads) = opt_persistent_torrent { - EntryMutexStd::new( + TorrentEntry::new( Torrent { swarm: PeerList::default(), downloaded: number_of_downloads, @@ -61,7 +61,7 @@ impl TorrentsSkipMapMutexStd { .into(), ) } else { - EntryMutexStd::default() + TorrentEntry::default() }; let inserted_entry = self.torrents.get_or_insert(*info_hash, new_entry); @@ -85,7 +85,7 @@ impl TorrentsSkipMapMutexStd { }) } - pub fn get(&self, key: &InfoHash) -> Option { + pub fn get(&self, key: &InfoHash) -> Option { let maybe_entry = self.torrents.get(key); maybe_entry.map(|entry| entry.value().clone()) } @@ -107,7 +107,7 @@ impl TorrentsSkipMapMutexStd { metrics } - pub fn get_paginated(&self, pagination: Option<&Pagination>) -> Vec<(InfoHash, EntryMutexStd)> { + pub fn get_paginated(&self, pagination: Option<&Pagination>) -> Vec<(InfoHash, TorrentEntry)> { match pagination { Some(pagination) => self .torrents @@ -130,7 +130,7 @@ impl TorrentsSkipMapMutexStd { continue; } - let entry = EntryMutexStd::new( + let entry = TorrentEntry::new( Torrent { swarm: PeerList::default(), downloaded: *completed, @@ -144,7 +144,7 @@ impl TorrentsSkipMapMutexStd { } } - pub fn remove(&self, key: &InfoHash) -> Option { + pub fn remove(&self, key: &InfoHash) -> Option { self.torrents.remove(key).map(|entry| entry.value().clone()) } From df005336859eda533808a9a4d4047499b6550e9a Mon Sep 17 00:00:00 2001 From: Jose Celano Date: Thu, 1 May 2025 18:32:13 +0100 Subject: [PATCH 549/802] refactor: [#1491] rename main types in torrent-repository pkg --- packages/torrent-repository/src/entry/mod.rs | 4 --- .../torrent-repository/src/entry/torrent.rs | 4 +-- packages/torrent-repository/src/lib.rs | 8 +++-- packages/torrent-repository/src/repository.rs | 26 ++++++++-------- .../torrent-repository/tests/common/repo.rs | 14 ++++----- .../tests/common/torrent.rs | 6 ++-- .../torrent-repository/tests/entry/mod.rs | 6 ++-- .../tests/repository/mod.rs | 30 +++++++++---------- .../src/torrent/repository/in_memory.rs | 14 ++++----- 9 files changed, 55 insertions(+), 57 deletions(-) diff --git a/packages/torrent-repository/src/entry/mod.rs b/packages/torrent-repository/src/entry/mod.rs index 5f8ccfcc5..785672be5 100644 --- a/packages/torrent-repository/src/entry/mod.rs +++ b/packages/torrent-repository/src/entry/mod.rs @@ -1,6 +1,2 @@ pub mod peer_list; pub mod torrent; - -use std::sync::{Arc, Mutex}; - -pub type TorrentEntry = Arc>; diff --git a/packages/torrent-repository/src/entry/torrent.rs b/packages/torrent-repository/src/entry/torrent.rs index 8d09a140f..1cc0f7ba2 100644 --- a/packages/torrent-repository/src/entry/torrent.rs +++ b/packages/torrent-repository/src/entry/torrent.rs @@ -16,7 +16,7 @@ use super::peer_list::PeerList; /// that's the list of all the peers trying to download the same torrent. /// The tracker keeps one entry like this for every torrent. #[derive(Clone, Debug, Default, PartialEq, Eq, PartialOrd, Ord, Hash)] -pub struct Torrent { +pub struct TrackedTorrent { /// A network of peers that are all trying to download the torrent associated to this entry pub(crate) swarm: PeerList, @@ -24,7 +24,7 @@ pub struct Torrent { pub(crate) downloaded: u32, } -impl Torrent { +impl TrackedTorrent { #[allow(clippy::cast_possible_truncation)] #[must_use] pub fn get_swarm_metadata(&self) -> SwarmMetadata { diff --git a/packages/torrent-repository/src/lib.rs b/packages/torrent-repository/src/lib.rs index b96858b82..70ec23906 100644 --- a/packages/torrent-repository/src/lib.rs +++ b/packages/torrent-repository/src/lib.rs @@ -1,11 +1,13 @@ pub mod entry; pub mod repository; +use std::sync::{Arc, Mutex}; + use torrust_tracker_clock::clock; -pub type TorrentEntry = entry::TorrentEntry; -pub type Torrent = entry::torrent::Torrent; -pub type Torrents = repository::TorrentsSkipMapMutexStd; +pub type TorrentRepository = repository::TorrentRepository; +pub type TrackedTorrentHandle = Arc>; +pub type TrackedTorrent = entry::torrent::TrackedTorrent; /// Working version, for production. #[cfg(not(test))] diff --git a/packages/torrent-repository/src/repository.rs b/packages/torrent-repository/src/repository.rs index ad0520c0c..25163f4ec 100644 --- a/packages/torrent-repository/src/repository.rs +++ b/packages/torrent-repository/src/repository.rs @@ -6,15 +6,15 @@ use torrust_tracker_primitives::swarm_metadata::{AggregateSwarmMetadata, SwarmMe use torrust_tracker_primitives::{peer, DurationSinceUnixEpoch, PersistentTorrent, PersistentTorrents}; use crate::entry::peer_list::PeerList; -use crate::entry::torrent::Torrent; -use crate::TorrentEntry; +use crate::entry::torrent::TrackedTorrent; +use crate::TrackedTorrentHandle; #[derive(Default, Debug)] -pub struct TorrentsSkipMapMutexStd { - pub torrents: SkipMap, +pub struct TorrentRepository { + pub torrents: SkipMap, } -impl TorrentsSkipMapMutexStd { +impl TorrentRepository { /// Upsert a peer into the swarm of a torrent. /// /// Optionally, it can also preset the number of downloads of the torrent @@ -53,15 +53,15 @@ impl TorrentsSkipMapMutexStd { tracing::debug!("Inserting new torrent: {:?}", info_hash); let new_entry = if let Some(number_of_downloads) = opt_persistent_torrent { - TorrentEntry::new( - Torrent { + TrackedTorrentHandle::new( + TrackedTorrent { swarm: PeerList::default(), downloaded: number_of_downloads, } .into(), ) } else { - TorrentEntry::default() + TrackedTorrentHandle::default() }; let inserted_entry = self.torrents.get_or_insert(*info_hash, new_entry); @@ -85,7 +85,7 @@ impl TorrentsSkipMapMutexStd { }) } - pub fn get(&self, key: &InfoHash) -> Option { + pub fn get(&self, key: &InfoHash) -> Option { let maybe_entry = self.torrents.get(key); maybe_entry.map(|entry| entry.value().clone()) } @@ -107,7 +107,7 @@ impl TorrentsSkipMapMutexStd { metrics } - pub fn get_paginated(&self, pagination: Option<&Pagination>) -> Vec<(InfoHash, TorrentEntry)> { + pub fn get_paginated(&self, pagination: Option<&Pagination>) -> Vec<(InfoHash, TrackedTorrentHandle)> { match pagination { Some(pagination) => self .torrents @@ -130,8 +130,8 @@ impl TorrentsSkipMapMutexStd { continue; } - let entry = TorrentEntry::new( - Torrent { + let entry = TrackedTorrentHandle::new( + TrackedTorrent { swarm: PeerList::default(), downloaded: *completed, } @@ -144,7 +144,7 @@ impl TorrentsSkipMapMutexStd { } } - pub fn remove(&self, key: &InfoHash) -> Option { + pub fn remove(&self, key: &InfoHash) -> Option { self.torrents.remove(key).map(|entry| entry.value().clone()) } diff --git a/packages/torrent-repository/tests/common/repo.rs b/packages/torrent-repository/tests/common/repo.rs index 357b39776..0055f6bee 100644 --- a/packages/torrent-repository/tests/common/repo.rs +++ b/packages/torrent-repository/tests/common/repo.rs @@ -5,12 +5,12 @@ use torrust_tracker_configuration::TrackerPolicy; use torrust_tracker_primitives::pagination::Pagination; use torrust_tracker_primitives::swarm_metadata::{AggregateSwarmMetadata, SwarmMetadata}; use torrust_tracker_primitives::{peer, DurationSinceUnixEpoch, PersistentTorrent, PersistentTorrents}; -use torrust_tracker_torrent_repository::entry::torrent::Torrent; -use torrust_tracker_torrent_repository::Torrents; +use torrust_tracker_torrent_repository::entry::torrent::TrackedTorrent; +use torrust_tracker_torrent_repository::TorrentRepository; #[derive(Debug)] pub(crate) enum Repo { - SkipMapMutexStd(Torrents), + SkipMapMutexStd(TorrentRepository), } impl Repo { @@ -31,7 +31,7 @@ impl Repo { } } - pub(crate) fn get(&self, key: &InfoHash) -> Option { + pub(crate) fn get(&self, key: &InfoHash) -> Option { match self { Repo::SkipMapMutexStd(repo) => Some(repo.get(key)?.lock().unwrap().clone()), } @@ -43,7 +43,7 @@ impl Repo { } } - pub(crate) fn get_paginated(&self, pagination: Option<&Pagination>) -> Vec<(InfoHash, Torrent)> { + pub(crate) fn get_paginated(&self, pagination: Option<&Pagination>) -> Vec<(InfoHash, TrackedTorrent)> { match self { Repo::SkipMapMutexStd(repo) => repo .get_paginated(pagination) @@ -59,7 +59,7 @@ impl Repo { } } - pub(crate) fn remove(&self, key: &InfoHash) -> Option { + pub(crate) fn remove(&self, key: &InfoHash) -> Option { match self { Repo::SkipMapMutexStd(repo) => Some(repo.remove(key)?.lock().unwrap().clone()), } @@ -77,7 +77,7 @@ impl Repo { } } - pub(crate) fn insert(&self, info_hash: &InfoHash, torrent: Torrent) -> Option { + pub(crate) fn insert(&self, info_hash: &InfoHash, torrent: TrackedTorrent) -> Option { match self { Repo::SkipMapMutexStd(repo) => { repo.torrents.insert(*info_hash, Arc::new(Mutex::new(torrent))); diff --git a/packages/torrent-repository/tests/common/torrent.rs b/packages/torrent-repository/tests/common/torrent.rs index 1cca7740d..9fdabd136 100644 --- a/packages/torrent-repository/tests/common/torrent.rs +++ b/packages/torrent-repository/tests/common/torrent.rs @@ -4,12 +4,12 @@ use std::sync::Arc; use torrust_tracker_configuration::TrackerPolicy; use torrust_tracker_primitives::swarm_metadata::SwarmMetadata; use torrust_tracker_primitives::{peer, DurationSinceUnixEpoch}; -use torrust_tracker_torrent_repository::{entry, TorrentEntry}; +use torrust_tracker_torrent_repository::{entry, TrackedTorrentHandle}; #[derive(Debug, Clone)] pub(crate) enum Torrent { - Single(entry::torrent::Torrent), - MutexStd(TorrentEntry), + Single(entry::torrent::TrackedTorrent), + MutexStd(TrackedTorrentHandle), } impl Torrent { diff --git a/packages/torrent-repository/tests/entry/mod.rs b/packages/torrent-repository/tests/entry/mod.rs index e04bd004d..27bb5f238 100644 --- a/packages/torrent-repository/tests/entry/mod.rs +++ b/packages/torrent-repository/tests/entry/mod.rs @@ -9,7 +9,7 @@ use torrust_tracker_clock::clock::{self, Time as _}; use torrust_tracker_configuration::{TrackerPolicy, TORRENT_PEERS_LIMIT}; use torrust_tracker_primitives::peer; use torrust_tracker_primitives::peer::Peer; -use torrust_tracker_torrent_repository::{entry, TorrentEntry}; +use torrust_tracker_torrent_repository::{entry, TrackedTorrentHandle}; use crate::common::torrent::Torrent; use crate::common::torrent_peer_builder::{a_completed_peer, a_started_peer}; @@ -17,11 +17,11 @@ use crate::CurrentClock; #[fixture] fn single() -> Torrent { - Torrent::Single(entry::torrent::Torrent::default()) + Torrent::Single(entry::torrent::TrackedTorrent::default()) } #[fixture] fn mutex_std() -> Torrent { - Torrent::MutexStd(TorrentEntry::default()) + Torrent::MutexStd(TrackedTorrentHandle::default()) } #[fixture] diff --git a/packages/torrent-repository/tests/repository/mod.rs b/packages/torrent-repository/tests/repository/mod.rs index d997cfd7c..06ee1d622 100644 --- a/packages/torrent-repository/tests/repository/mod.rs +++ b/packages/torrent-repository/tests/repository/mod.rs @@ -8,18 +8,18 @@ use torrust_tracker_configuration::TrackerPolicy; use torrust_tracker_primitives::pagination::Pagination; use torrust_tracker_primitives::swarm_metadata::SwarmMetadata; use torrust_tracker_primitives::PersistentTorrents; -use torrust_tracker_torrent_repository::entry::torrent::Torrent; -use torrust_tracker_torrent_repository::Torrents; +use torrust_tracker_torrent_repository::entry::torrent::TrackedTorrent; +use torrust_tracker_torrent_repository::TorrentRepository; use crate::common::repo::Repo; use crate::common::torrent_peer_builder::{a_completed_peer, a_started_peer}; #[fixture] fn skip_list_mutex_std() -> Repo { - Repo::SkipMapMutexStd(Torrents::default()) + Repo::SkipMapMutexStd(TorrentRepository::default()) } -type Entries = Vec<(InfoHash, Torrent)>; +type Entries = Vec<(InfoHash, TrackedTorrent)>; #[fixture] fn empty() -> Entries { @@ -28,26 +28,26 @@ fn empty() -> Entries { #[fixture] fn default() -> Entries { - vec![(InfoHash::default(), Torrent::default())] + vec![(InfoHash::default(), TrackedTorrent::default())] } #[fixture] fn started() -> Entries { - let mut torrent = Torrent::default(); + let mut torrent = TrackedTorrent::default(); torrent.upsert_peer(&a_started_peer(1)); vec![(InfoHash::default(), torrent)] } #[fixture] fn completed() -> Entries { - let mut torrent = Torrent::default(); + let mut torrent = TrackedTorrent::default(); torrent.upsert_peer(&a_completed_peer(2)); vec![(InfoHash::default(), torrent)] } #[fixture] fn downloaded() -> Entries { - let mut torrent = Torrent::default(); + let mut torrent = TrackedTorrent::default(); let mut peer = a_started_peer(3); torrent.upsert_peer(&peer); peer.event = AnnounceEvent::Completed; @@ -58,17 +58,17 @@ fn downloaded() -> Entries { #[fixture] fn three() -> Entries { - let mut started = Torrent::default(); + let mut started = TrackedTorrent::default(); let started_h = &mut DefaultHasher::default(); started.upsert_peer(&a_started_peer(1)); started.hash(started_h); - let mut completed = Torrent::default(); + let mut completed = TrackedTorrent::default(); let completed_h = &mut DefaultHasher::default(); completed.upsert_peer(&a_completed_peer(2)); completed.hash(completed_h); - let mut downloaded = Torrent::default(); + let mut downloaded = TrackedTorrent::default(); let downloaded_h = &mut DefaultHasher::default(); let mut downloaded_peer = a_started_peer(3); downloaded.upsert_peer(&downloaded_peer); @@ -86,10 +86,10 @@ fn three() -> Entries { #[fixture] fn many_out_of_order() -> Entries { - let mut entries: HashSet<(InfoHash, Torrent)> = HashSet::default(); + let mut entries: HashSet<(InfoHash, TrackedTorrent)> = HashSet::default(); for i in 0..408 { - let mut entry = Torrent::default(); + let mut entry = TrackedTorrent::default(); entry.upsert_peer(&a_started_peer(i)); entries.insert((InfoHash::from(&i), entry)); @@ -101,10 +101,10 @@ fn many_out_of_order() -> Entries { #[fixture] fn many_hashed_in_order() -> Entries { - let mut entries: BTreeMap = BTreeMap::default(); + let mut entries: BTreeMap = BTreeMap::default(); for i in 0..408 { - let mut entry = Torrent::default(); + let mut entry = TrackedTorrent::default(); entry.upsert_peer(&a_started_peer(i)); let hash: &mut DefaultHasher = &mut DefaultHasher::default(); diff --git a/packages/tracker-core/src/torrent/repository/in_memory.rs b/packages/tracker-core/src/torrent/repository/in_memory.rs index d12919da8..83715789c 100644 --- a/packages/tracker-core/src/torrent/repository/in_memory.rs +++ b/packages/tracker-core/src/torrent/repository/in_memory.rs @@ -7,7 +7,7 @@ use torrust_tracker_configuration::{TrackerPolicy, TORRENT_PEERS_LIMIT}; use torrust_tracker_primitives::pagination::Pagination; use torrust_tracker_primitives::swarm_metadata::{AggregateSwarmMetadata, SwarmMetadata}; use torrust_tracker_primitives::{peer, DurationSinceUnixEpoch, PersistentTorrent, PersistentTorrents}; -use torrust_tracker_torrent_repository::{TorrentEntry, Torrents}; +use torrust_tracker_torrent_repository::{TorrentRepository, TrackedTorrentHandle}; /// In-memory repository for torrent entries. /// @@ -21,7 +21,7 @@ use torrust_tracker_torrent_repository::{TorrentEntry, Torrents}; #[derive(Debug, Default)] pub struct InMemoryTorrentRepository { /// The underlying in-memory data structure that stores torrent entries. - torrents: Arc, + torrents: Arc, } impl InMemoryTorrentRepository { @@ -64,7 +64,7 @@ impl InMemoryTorrentRepository { /// An `Option` containing the removed torrent entry if it existed. #[cfg(test)] #[must_use] - pub(crate) fn remove(&self, key: &InfoHash) -> Option { + pub(crate) fn remove(&self, key: &InfoHash) -> Option { self.torrents.remove(key) } @@ -104,7 +104,7 @@ impl InMemoryTorrentRepository { /// /// An `Option` containing the torrent entry if found. #[must_use] - pub(crate) fn get(&self, key: &InfoHash) -> Option { + pub(crate) fn get(&self, key: &InfoHash) -> Option { self.torrents.get(key) } @@ -122,7 +122,7 @@ impl InMemoryTorrentRepository { /// /// A vector of `(InfoHash, TorrentEntry)` tuples. #[must_use] - pub(crate) fn get_paginated(&self, pagination: Option<&Pagination>) -> Vec<(InfoHash, TorrentEntry)> { + pub(crate) fn get_paginated(&self, pagination: Option<&Pagination>) -> Vec<(InfoHash, TrackedTorrentHandle)> { self.torrents.get_paginated(pagination) } @@ -510,7 +510,7 @@ mod tests { use torrust_tracker_primitives::peer::Peer; use torrust_tracker_primitives::swarm_metadata::SwarmMetadata; - use torrust_tracker_torrent_repository::TorrentEntry; + use torrust_tracker_torrent_repository::TrackedTorrentHandle; use crate::test_helpers::tests::{sample_info_hash, sample_peer}; use crate::torrent::repository::in_memory::InMemoryTorrentRepository; @@ -526,7 +526,7 @@ mod tests { } #[allow(clippy::from_over_into)] - impl Into for TorrentEntry { + impl Into for TrackedTorrentHandle { fn into(self) -> TorrentEntryInfo { let torrent_guard = self.lock().expect("can't acquire lock for torrent entry"); From 21b18e4844a042269b6f2a2854c1b3b0ad07649d Mon Sep 17 00:00:00 2001 From: Jose Celano Date: Fri, 2 May 2025 11:22:01 +0100 Subject: [PATCH 550/802] refactor: [#1491] move functionality from InMemoryTorrentRepository to TorrentRepository InMemoryTorrentRepository is now a wrapper over TorrentRepository. It's planned to make the InMemoryTorrentRepository responsible for triggering events. --- Cargo.lock | 2 + packages/torrent-repository/Cargo.toml | 2 + packages/torrent-repository/src/lib.rs | 120 +++ packages/torrent-repository/src/repository.rs | 902 +++++++++++++++++- packages/tracker-core/src/announce_handler.rs | 2 +- packages/tracker-core/src/scrape_handler.rs | 2 +- packages/tracker-core/src/test_helpers.rs | 36 - .../src/torrent/repository/in_memory.rs | 709 +------------- 8 files changed, 994 insertions(+), 781 deletions(-) diff --git a/Cargo.lock b/Cargo.lock index c301879f2..02e674e95 100644 --- a/Cargo.lock +++ b/Cargo.lock @@ -4841,11 +4841,13 @@ dependencies = [ "bittorrent-primitives", "criterion", "crossbeam-skiplist", + "rand 0.8.5", "rstest", "tokio", "torrust-tracker-clock", "torrust-tracker-configuration", "torrust-tracker-primitives", + "torrust-tracker-test-helpers", "tracing", ] diff --git a/packages/torrent-repository/Cargo.toml b/packages/torrent-repository/Cargo.toml index 0a4fe5261..e584fadf4 100644 --- a/packages/torrent-repository/Cargo.toml +++ b/packages/torrent-repository/Cargo.toml @@ -28,4 +28,6 @@ tracing = "0" [dev-dependencies] async-std = { version = "1", features = ["attributes", "tokio1"] } criterion = { version = "0", features = ["async_tokio"] } +rand = "0" rstest = "0" +torrust-tracker-test-helpers = { version = "3.0.0-develop", path = "../test-helpers" } diff --git a/packages/torrent-repository/src/lib.rs b/packages/torrent-repository/src/lib.rs index 70ec23906..f2e2c643c 100644 --- a/packages/torrent-repository/src/lib.rs +++ b/packages/torrent-repository/src/lib.rs @@ -18,3 +18,123 @@ pub(crate) type CurrentClock = clock::Working; #[cfg(test)] #[allow(dead_code)] pub(crate) type CurrentClock = clock::Stopped; + +#[cfg(test)] +pub(crate) mod tests { + use std::net::{IpAddr, Ipv4Addr, SocketAddr}; + + use aquatic_udp_protocol::{AnnounceEvent, NumberOfBytes, PeerId}; + use bittorrent_primitives::info_hash::InfoHash; + use torrust_tracker_primitives::peer::Peer; + use torrust_tracker_primitives::DurationSinceUnixEpoch; + + /// # Panics + /// + /// Will panic if the string representation of the info hash is not a valid info hash. + #[must_use] + pub fn sample_info_hash() -> InfoHash { + "3b245504cf5f11bbdbe1201cea6a6bf45aee1bc0" // DevSkim: ignore DS173237 + .parse::() + .expect("String should be a valid info hash") + } + + /// # Panics + /// + /// Will panic if the string representation of the info hash is not a valid info hash. + #[must_use] + pub fn sample_info_hash_one() -> InfoHash { + "3b245504cf5f11bbdbe1201cea6a6bf45aee1bc0" // DevSkim: ignore DS173237 + .parse::() + .expect("String should be a valid info hash") + } + + /// # Panics + /// + /// Will panic if the string representation of the info hash is not a valid info hash. + #[must_use] + pub fn sample_info_hash_alphabetically_ordered_after_sample_info_hash_one() -> InfoHash { + "99c82bb73505a3c0b453f9fa0e881d6e5a32a0c1" // DevSkim: ignore DS173237 + .parse::() + .expect("String should be a valid info hash") + } + + /// Sample peer whose state is not relevant for the tests. + #[must_use] + pub fn sample_peer() -> Peer { + Peer { + peer_id: PeerId(*b"-qB00000000000000000"), + peer_addr: SocketAddr::new(IpAddr::V4(Ipv4Addr::new(126, 0, 0, 1)), 8080), + updated: DurationSinceUnixEpoch::new(1_669_397_478_934, 0), + uploaded: NumberOfBytes::new(0), + downloaded: NumberOfBytes::new(0), + left: NumberOfBytes::new(0), // No bytes left to download + event: AnnounceEvent::Completed, + } + } + + #[must_use] + pub fn sample_peer_one() -> Peer { + Peer { + peer_id: PeerId(*b"-qB00000000000000001"), + peer_addr: SocketAddr::new(IpAddr::V4(Ipv4Addr::new(126, 0, 0, 1)), 8081), + updated: DurationSinceUnixEpoch::new(1_669_397_478_934, 0), + uploaded: NumberOfBytes::new(0), + downloaded: NumberOfBytes::new(0), + left: NumberOfBytes::new(0), // No bytes left to download + event: AnnounceEvent::Completed, + } + } + + #[must_use] + pub fn sample_peer_two() -> Peer { + Peer { + peer_id: PeerId(*b"-qB00000000000000002"), + peer_addr: SocketAddr::new(IpAddr::V4(Ipv4Addr::new(126, 0, 0, 2)), 8082), + updated: DurationSinceUnixEpoch::new(1_669_397_478_934, 0), + uploaded: NumberOfBytes::new(0), + downloaded: NumberOfBytes::new(0), + left: NumberOfBytes::new(0), // No bytes left to download + event: AnnounceEvent::Completed, + } + } + + #[must_use] + pub fn seeder() -> Peer { + complete_peer() + } + + #[must_use] + pub fn leecher() -> Peer { + incomplete_peer() + } + + /// A peer that counts as `complete` is swarm metadata + /// IMPORTANT!: it only counts if the it has been announce at least once before + /// announcing the `AnnounceEvent::Completed` event. + #[must_use] + pub fn complete_peer() -> Peer { + Peer { + peer_id: PeerId(*b"-qB00000000000000000"), + peer_addr: SocketAddr::new(IpAddr::V4(Ipv4Addr::new(126, 0, 0, 1)), 8080), + updated: DurationSinceUnixEpoch::new(1_669_397_478_934, 0), + uploaded: NumberOfBytes::new(0), + downloaded: NumberOfBytes::new(0), + left: NumberOfBytes::new(0), // No bytes left to download + event: AnnounceEvent::Completed, + } + } + + /// A peer that counts as `incomplete` is swarm metadata + #[must_use] + pub fn incomplete_peer() -> Peer { + Peer { + peer_id: PeerId(*b"-qB00000000000000000"), + peer_addr: SocketAddr::new(IpAddr::V4(Ipv4Addr::new(126, 0, 0, 1)), 8080), + updated: DurationSinceUnixEpoch::new(1_669_397_478_934, 0), + uploaded: NumberOfBytes::new(0), + downloaded: NumberOfBytes::new(0), + left: NumberOfBytes::new(1000), // Still bytes to download + event: AnnounceEvent::Started, + } + } +} diff --git a/packages/torrent-repository/src/repository.rs b/packages/torrent-repository/src/repository.rs index 25163f4ec..6c8a7c9b4 100644 --- a/packages/torrent-repository/src/repository.rs +++ b/packages/torrent-repository/src/repository.rs @@ -1,3 +1,5 @@ +use std::sync::Arc; + use bittorrent_primitives::info_hash::InfoHash; use crossbeam_skiplist::SkipMap; use torrust_tracker_configuration::TrackerPolicy; @@ -47,7 +49,7 @@ impl TorrentRepository { existing_entry .value() .lock() - .expect("can't acquire lock for torrent entry") + .expect("can't acquire lock for tracked torrent handle") .upsert_peer(peer) } else { tracing::debug!("Inserting new torrent: {:?}", info_hash); @@ -66,47 +68,64 @@ impl TorrentRepository { let inserted_entry = self.torrents.get_or_insert(*info_hash, new_entry); - let mut torrent_guard = inserted_entry.value().lock().expect("can't acquire lock for torrent entry"); + let mut torrent_guard = inserted_entry + .value() + .lock() + .expect("can't acquire lock for tracked torrent handle"); torrent_guard.upsert_peer(peer) } } + /// Removes a torrent entry from the repository. + /// + /// # Returns + /// + /// An `Option` containing the removed torrent entry if it existed. + #[must_use] + pub fn remove(&self, key: &InfoHash) -> Option { + self.torrents.remove(key).map(|entry| entry.value().clone()) + } + + /// Removes inactive peers from all torrent entries. + /// + /// A peer is considered inactive if its last update timestamp is older than + /// the provided cutoff time. + /// /// # Panics /// /// This function panics if the lock for the entry cannot be obtained. - pub fn get_swarm_metadata(&self, info_hash: &InfoHash) -> Option { - self.torrents.get(info_hash).map(|entry| { + pub fn remove_inactive_peers(&self, current_cutoff: DurationSinceUnixEpoch) { + for entry in &self.torrents { entry .value() .lock() - .expect("can't acquire lock for torrent entry") - .get_swarm_metadata() - }) + .expect("can't acquire lock for tracked torrent handle") + .remove_inactive_peers(current_cutoff); + } } + /// Retrieves a tracked torrent handle by its infohash. + /// + /// # Returns + /// + /// An `Option` containing the tracked torrent handle if found. + #[must_use] pub fn get(&self, key: &InfoHash) -> Option { let maybe_entry = self.torrents.get(key); maybe_entry.map(|entry| entry.value().clone()) } - /// # Panics + /// Retrieves a paginated list of tracked torrent handles. /// - /// This function panics if the lock for the entry cannot be obtained. - pub fn get_metrics(&self) -> AggregateSwarmMetadata { - let mut metrics = AggregateSwarmMetadata::default(); - - for entry in &self.torrents { - let stats = entry.value().lock().expect("it should get a lock").get_swarm_metadata(); - metrics.total_complete += u64::from(stats.complete); - metrics.total_downloaded += u64::from(stats.downloaded); - metrics.total_incomplete += u64::from(stats.incomplete); - metrics.total_torrents += 1; - } - - metrics - } - + /// This method returns a vector of tuples, each containing an infohash and + /// its associated tracked torrent handle. The pagination parameters + /// (offset and limit) can be used to control the size of the result set. + /// + /// # Returns + /// + /// A vector of `(InfoHash, TorrentEntry)` tuples. + #[must_use] pub fn get_paginated(&self, pagination: Option<&Pagination>) -> Vec<(InfoHash, TrackedTorrentHandle)> { match pagination { Some(pagination) => self @@ -124,6 +143,132 @@ impl TorrentRepository { } } + /// Retrieves swarm metadata for a given torrent. + /// + /// # Returns + /// + /// A `SwarmMetadata` struct containing the aggregated torrent data if found. + /// + /// # Panics + /// + /// This function panics if the lock for the entry cannot be obtained. + #[must_use] + pub fn get_swarm_metadata(&self, info_hash: &InfoHash) -> Option { + self.torrents.get(info_hash).map(|entry| { + entry + .value() + .lock() + .expect("can't acquire lock for tracked torrent handle") + .get_swarm_metadata() + }) + } + + /// Retrieves swarm metadata for a given torrent. + /// + /// # Returns + /// + /// A `SwarmMetadata` struct containing the aggregated torrent data if it's + /// found or a zeroed metadata struct if not. + #[must_use] + pub fn get_swarm_metadata_or_default(&self, info_hash: &InfoHash) -> SwarmMetadata { + match self.get_swarm_metadata(info_hash) { + Some(swarm_metadata) => swarm_metadata, + None => SwarmMetadata::zeroed(), + } + } + + /// Retrieves torrent peers for a given torrent and client, excluding the + /// requesting client. + /// + /// This method filters out the client making the request (based on its + /// network address) and returns up to a maximum number of peers, defined by + /// the greater of the provided limit or the global `TORRENT_PEERS_LIMIT`. + /// + /// # Returns + /// + /// A vector of peers (wrapped in `Arc`) representing the active peers for + /// the torrent, excluding the requesting client. + /// + /// # Panics + /// + /// This function panics if the lock for the torrent entry cannot be obtained. + #[must_use] + pub fn get_peers_for(&self, info_hash: &InfoHash, peer: &peer::Peer, limit: usize) -> Vec> { + match self.get(info_hash) { + None => vec![], + Some(entry) => entry + .lock() + .expect("can't acquire lock for tracked torrent handle") + .get_peers_for_client(&peer.peer_addr, Some(limit)), + } + } + + /// Retrieves the list of peers for a given torrent. + /// + /// This method returns up to `TORRENT_PEERS_LIMIT` peers for the torrent + /// specified by the info-hash. + /// + /// # Returns + /// + /// A vector of peers (wrapped in `Arc`) representing the active peers for + /// the torrent. + /// + /// # Panics + /// + /// This function panics if the lock for the torrent entry cannot be obtained. + #[must_use] + pub fn get_torrent_peers(&self, info_hash: &InfoHash, limit: usize) -> Vec> { + match self.get(info_hash) { + None => vec![], + Some(entry) => entry + .lock() + .expect("can't acquire lock for tracked torrent handle") + .get_peers(Some(limit)), + } + } + + /// Removes torrent entries that have no active peers. + /// + /// Depending on the tracker policy, torrents without any peers may be + /// removed to conserve memory. + /// + /// # Panics + /// + /// This function panics if the lock for the entry cannot be obtained. + pub fn remove_peerless_torrents(&self, policy: &TrackerPolicy) { + for entry in &self.torrents { + if entry + .value() + .lock() + .expect("can't acquire lock for tracked torrent handle") + .meets_retaining_policy(policy) + { + continue; + } + + entry.remove(); + } + } + + /// Calculates and returns overall torrent metrics. + /// + /// The returned [`AggregateSwarmMetadata`] contains aggregate data such as + /// the total number of torrents, total complete (seeders), incomplete + /// (leechers), and downloaded counts. + /// + /// # Returns + /// + /// A [`AggregateSwarmMetadata`] struct with the aggregated metrics. + #[must_use] + pub fn get_torrents_metrics(&self) -> AggregateSwarmMetadata { + self.get_metrics() + } + + /// Imports persistent torrent data into the in-memory repository. + /// + /// This method takes a set of persisted torrent entries (e.g., from a + /// database) and imports them into the in-memory repository for immediate + /// access. pub fn import_persistent(&self, persistent_torrents: &PersistentTorrents) { for (info_hash, completed) in persistent_torrents { if self.torrents.contains_key(info_hash) { @@ -144,38 +289,707 @@ impl TorrentRepository { } } - pub fn remove(&self, key: &InfoHash) -> Option { - self.torrents.remove(key).map(|entry| entry.value().clone()) - } - + /// Calculates and returns overall torrent metrics. + /// + /// The returned [`AggregateSwarmMetadata`] contains aggregate data such as + /// the total number of torrents, total complete (seeders), incomplete + /// (leechers), and downloaded counts. + /// + /// # Returns + /// + /// A [`AggregateSwarmMetadata`] struct with the aggregated metrics. + /// /// # Panics /// /// This function panics if the lock for the entry cannot be obtained. - pub fn remove_inactive_peers(&self, current_cutoff: DurationSinceUnixEpoch) { + #[must_use] + pub fn get_metrics(&self) -> AggregateSwarmMetadata { + let mut metrics = AggregateSwarmMetadata::default(); + for entry in &self.torrents { - entry + let stats = entry .value() .lock() - .expect("can't acquire lock for torrent entry") - .remove_inactive_peers(current_cutoff); + .expect("can't acquire lock for tracked torrent handle") + .get_swarm_metadata(); + metrics.total_complete += u64::from(stats.complete); + metrics.total_downloaded += u64::from(stats.downloaded); + metrics.total_incomplete += u64::from(stats.incomplete); + metrics.total_torrents += 1; } + + metrics } +} - /// # Panics - /// - /// This function panics if the lock for the entry cannot be obtained. - pub fn remove_peerless_torrents(&self, policy: &TrackerPolicy) { - for entry in &self.torrents { - if entry - .value() - .lock() - .expect("can't acquire lock for torrent entry") - .meets_retaining_policy(policy) - { - continue; +#[cfg(test)] +mod tests { + + mod the_in_memory_torrent_repository { + + use aquatic_udp_protocol::PeerId; + + /// It generates a peer id from a number where the number is the last + /// part of the peer ID. For example, for `12` it returns + /// `-qB00000000000000012`. + fn numeric_peer_id(two_digits_value: i32) -> PeerId { + // Format idx as a string with leading zeros, ensuring it has exactly 2 digits + let idx_str = format!("{two_digits_value:02}"); + + // Create the base part of the peer ID. + let base = b"-qB00000000000000000"; + + // Concatenate the base with idx bytes, ensuring the total length is 20 bytes. + let mut peer_id_bytes = [0u8; 20]; + peer_id_bytes[..base.len()].copy_from_slice(base); + peer_id_bytes[base.len() - idx_str.len()..].copy_from_slice(idx_str.as_bytes()); + + PeerId(peer_id_bytes) + } + + // The `TorrentRepository` has these responsibilities: + // - To maintain the peer lists for each torrent. + // - To maintain the the torrent entries, which contains all the info about the + // torrents, including the peer lists. + // - To return the torrent entries. + // - To return the peer lists for a given torrent. + // - To return the torrent metrics. + // - To return the swarm metadata for a given torrent. + // - To handle the persistence of the torrent entries. + + mod maintaining_the_peer_lists { + + use std::sync::Arc; + + use crate::repository::TorrentRepository; + use crate::tests::{sample_info_hash, sample_peer}; + + #[tokio::test] + async fn it_should_add_the_first_peer_to_the_torrent_peer_list() { + let in_memory_torrent_repository = Arc::new(TorrentRepository::default()); + + let info_hash = sample_info_hash(); + + let _number_of_downloads_increased = in_memory_torrent_repository.upsert_peer(&info_hash, &sample_peer(), None); + + assert!(in_memory_torrent_repository.get(&info_hash).is_some()); } - entry.remove(); + #[tokio::test] + async fn it_should_allow_adding_the_same_peer_twice_to_the_torrent_peer_list() { + let in_memory_torrent_repository = Arc::new(TorrentRepository::default()); + + let info_hash = sample_info_hash(); + + let _number_of_downloads_increased = in_memory_torrent_repository.upsert_peer(&info_hash, &sample_peer(), None); + let _number_of_downloads_increased = in_memory_torrent_repository.upsert_peer(&info_hash, &sample_peer(), None); + + assert!(in_memory_torrent_repository.get(&info_hash).is_some()); + } + } + + mod returning_peer_lists_for_a_torrent { + + use std::net::{IpAddr, Ipv4Addr, SocketAddr}; + use std::sync::Arc; + + use aquatic_udp_protocol::{AnnounceEvent, NumberOfBytes}; + use torrust_tracker_primitives::peer::Peer; + use torrust_tracker_primitives::DurationSinceUnixEpoch; + + use crate::repository::tests::the_in_memory_torrent_repository::numeric_peer_id; + use crate::repository::TorrentRepository; + use crate::tests::{sample_info_hash, sample_peer}; + + #[tokio::test] + async fn it_should_return_the_peers_for_a_given_torrent() { + let in_memory_torrent_repository = Arc::new(TorrentRepository::default()); + + let info_hash = sample_info_hash(); + let peer = sample_peer(); + + let _number_of_downloads_increased = in_memory_torrent_repository.upsert_peer(&info_hash, &peer, None); + + let peers = in_memory_torrent_repository.get_torrent_peers(&info_hash, 74); + + assert_eq!(peers, vec![Arc::new(peer)]); + } + + #[tokio::test] + async fn it_should_return_an_empty_list_or_peers_for_a_non_existing_torrent() { + let in_memory_torrent_repository = Arc::new(TorrentRepository::default()); + + let peers = in_memory_torrent_repository.get_torrent_peers(&sample_info_hash(), 74); + + assert!(peers.is_empty()); + } + + #[tokio::test] + async fn it_should_return_74_peers_at_the_most_for_a_given_torrent() { + let in_memory_torrent_repository = Arc::new(TorrentRepository::default()); + + let info_hash = sample_info_hash(); + + for idx in 1..=75 { + let peer = Peer { + peer_id: numeric_peer_id(idx), + peer_addr: SocketAddr::new(IpAddr::V4(Ipv4Addr::new(126, 0, 0, idx.try_into().unwrap())), 8080), + updated: DurationSinceUnixEpoch::new(1_669_397_478_934, 0), + uploaded: NumberOfBytes::new(0), + downloaded: NumberOfBytes::new(0), + left: NumberOfBytes::new(0), // No bytes left to download + event: AnnounceEvent::Completed, + }; + + let _number_of_downloads_increased = in_memory_torrent_repository.upsert_peer(&info_hash, &peer, None); + } + + let peers = in_memory_torrent_repository.get_torrent_peers(&info_hash, 74); + + assert_eq!(peers.len(), 74); + } + + mod excluding_the_client_peer { + + use std::net::{IpAddr, Ipv4Addr, SocketAddr}; + use std::sync::Arc; + + use aquatic_udp_protocol::{AnnounceEvent, NumberOfBytes}; + use torrust_tracker_configuration::TORRENT_PEERS_LIMIT; + use torrust_tracker_primitives::peer::Peer; + use torrust_tracker_primitives::DurationSinceUnixEpoch; + + use crate::repository::tests::the_in_memory_torrent_repository::numeric_peer_id; + use crate::repository::TorrentRepository; + use crate::tests::{sample_info_hash, sample_peer}; + + #[tokio::test] + async fn it_should_return_an_empty_peer_list_for_a_non_existing_torrent() { + let in_memory_torrent_repository = Arc::new(TorrentRepository::default()); + + let peers = + in_memory_torrent_repository.get_peers_for(&sample_info_hash(), &sample_peer(), TORRENT_PEERS_LIMIT); + + assert_eq!(peers, vec![]); + } + + #[tokio::test] + async fn it_should_return_the_peers_for_a_given_torrent_excluding_a_given_peer() { + let in_memory_torrent_repository = Arc::new(TorrentRepository::default()); + + let info_hash = sample_info_hash(); + let peer = sample_peer(); + + let _number_of_downloads_increased = in_memory_torrent_repository.upsert_peer(&info_hash, &peer, None); + + let peers = in_memory_torrent_repository.get_peers_for(&info_hash, &peer, TORRENT_PEERS_LIMIT); + + assert_eq!(peers, vec![]); + } + + #[tokio::test] + async fn it_should_return_74_peers_at_the_most_for_a_given_torrent_when_it_filters_out_a_given_peer() { + let in_memory_torrent_repository = Arc::new(TorrentRepository::default()); + + let info_hash = sample_info_hash(); + + let excluded_peer = sample_peer(); + + let _number_of_downloads_increased = + in_memory_torrent_repository.upsert_peer(&info_hash, &excluded_peer, None); + + // Add 74 peers + for idx in 2..=75 { + let peer = Peer { + peer_id: numeric_peer_id(idx), + peer_addr: SocketAddr::new(IpAddr::V4(Ipv4Addr::new(126, 0, 0, idx.try_into().unwrap())), 8080), + updated: DurationSinceUnixEpoch::new(1_669_397_478_934, 0), + uploaded: NumberOfBytes::new(0), + downloaded: NumberOfBytes::new(0), + left: NumberOfBytes::new(0), // No bytes left to download + event: AnnounceEvent::Completed, + }; + + let _number_of_downloads_increased = in_memory_torrent_repository.upsert_peer(&info_hash, &peer, None); + } + + let peers = in_memory_torrent_repository.get_peers_for(&info_hash, &excluded_peer, TORRENT_PEERS_LIMIT); + + assert_eq!(peers.len(), 74); + } + } + } + + mod maintaining_the_torrent_entries { + + use std::ops::Add; + use std::sync::Arc; + use std::time::Duration; + + use bittorrent_primitives::info_hash::InfoHash; + use torrust_tracker_configuration::TrackerPolicy; + use torrust_tracker_primitives::DurationSinceUnixEpoch; + + use crate::repository::TorrentRepository; + use crate::tests::{sample_info_hash, sample_peer}; + + #[tokio::test] + async fn it_should_remove_a_torrent_entry() { + let in_memory_torrent_repository = Arc::new(TorrentRepository::default()); + + let info_hash = sample_info_hash(); + let _number_of_downloads_increased = in_memory_torrent_repository.upsert_peer(&info_hash, &sample_peer(), None); + + let _unused = in_memory_torrent_repository.remove(&info_hash); + + assert!(in_memory_torrent_repository.get(&info_hash).is_none()); + } + + #[tokio::test] + async fn it_should_remove_peers_that_have_not_been_updated_after_a_cutoff_time() { + let in_memory_torrent_repository = Arc::new(TorrentRepository::default()); + + let info_hash = sample_info_hash(); + let mut peer = sample_peer(); + peer.updated = DurationSinceUnixEpoch::new(0, 0); + + let _number_of_downloads_increased = in_memory_torrent_repository.upsert_peer(&info_hash, &peer, None); + + // Cut off time is 1 second after the peer was updated + in_memory_torrent_repository.remove_inactive_peers(peer.updated.add(Duration::from_secs(1))); + + assert!(!in_memory_torrent_repository + .get_torrent_peers(&info_hash, 74) + .contains(&Arc::new(peer))); + } + + fn initialize_repository_with_one_torrent_without_peers(info_hash: &InfoHash) -> Arc { + let in_memory_torrent_repository = Arc::new(TorrentRepository::default()); + + // Insert a sample peer for the torrent to force adding the torrent entry + let mut peer = sample_peer(); + peer.updated = DurationSinceUnixEpoch::new(0, 0); + let _number_of_downloads_increased = in_memory_torrent_repository.upsert_peer(info_hash, &peer, None); + + // Remove the peer + in_memory_torrent_repository.remove_inactive_peers(peer.updated.add(Duration::from_secs(1))); + + in_memory_torrent_repository + } + + #[tokio::test] + async fn it_should_remove_torrents_without_peers() { + let info_hash = sample_info_hash(); + + let in_memory_torrent_repository = initialize_repository_with_one_torrent_without_peers(&info_hash); + + let tracker_policy = TrackerPolicy { + remove_peerless_torrents: true, + ..Default::default() + }; + + in_memory_torrent_repository.remove_peerless_torrents(&tracker_policy); + + assert!(in_memory_torrent_repository.get(&info_hash).is_none()); + } + } + mod returning_torrent_entries { + + use std::sync::Arc; + + use torrust_tracker_primitives::peer::Peer; + use torrust_tracker_primitives::swarm_metadata::SwarmMetadata; + + use crate::repository::TorrentRepository; + use crate::tests::{sample_info_hash, sample_peer}; + use crate::TrackedTorrentHandle; + + /// `TorrentEntry` data is not directly accessible. It's only + /// accessible through the trait methods. We need this temporary + /// DTO to write simple and more readable assertions. + #[derive(Debug, Clone, PartialEq)] + struct TorrentEntryInfo { + swarm_metadata: SwarmMetadata, + peers: Vec, + number_of_peers: usize, + } + + #[allow(clippy::from_over_into)] + impl Into for TrackedTorrentHandle { + fn into(self) -> TorrentEntryInfo { + let torrent_guard = self.lock().expect("can't acquire lock for tracked torrent handle"); + + let torrent_entry_info = TorrentEntryInfo { + swarm_metadata: torrent_guard.get_swarm_metadata(), + peers: torrent_guard.get_peers(None).iter().map(|peer| *peer.clone()).collect(), + number_of_peers: torrent_guard.get_peers_len(), + }; + + drop(torrent_guard); + + torrent_entry_info + } + } + + #[tokio::test] + async fn it_should_return_one_torrent_entry_by_infohash() { + let in_memory_torrent_repository = Arc::new(TorrentRepository::default()); + + let info_hash = sample_info_hash(); + let peer = sample_peer(); + + let _number_of_downloads_increased = in_memory_torrent_repository.upsert_peer(&info_hash, &peer, None); + + let torrent_entry = in_memory_torrent_repository.get(&info_hash).unwrap(); + + assert_eq!( + TorrentEntryInfo { + swarm_metadata: SwarmMetadata { + downloaded: 0, + complete: 1, + incomplete: 0 + }, + peers: vec!(peer), + number_of_peers: 1 + }, + torrent_entry.into() + ); + } + + mod it_should_return_many_torrent_entries { + use std::sync::Arc; + + use torrust_tracker_primitives::swarm_metadata::SwarmMetadata; + + use crate::repository::tests::the_in_memory_torrent_repository::returning_torrent_entries::TorrentEntryInfo; + use crate::repository::TorrentRepository; + use crate::tests::{sample_info_hash, sample_peer}; + + #[tokio::test] + async fn without_pagination() { + let in_memory_torrent_repository = Arc::new(TorrentRepository::default()); + + let info_hash = sample_info_hash(); + let peer = sample_peer(); + let _number_of_downloads_increased = in_memory_torrent_repository.upsert_peer(&info_hash, &peer, None); + + let torrent_entries = in_memory_torrent_repository.get_paginated(None); + + assert_eq!(torrent_entries.len(), 1); + + let torrent_entry = torrent_entries.first().unwrap().1.clone(); + + assert_eq!( + TorrentEntryInfo { + swarm_metadata: SwarmMetadata { + downloaded: 0, + complete: 1, + incomplete: 0 + }, + peers: vec!(peer), + number_of_peers: 1 + }, + torrent_entry.into() + ); + } + + mod with_pagination { + use std::sync::Arc; + + use torrust_tracker_primitives::pagination::Pagination; + use torrust_tracker_primitives::swarm_metadata::SwarmMetadata; + + use crate::repository::tests::the_in_memory_torrent_repository::returning_torrent_entries::TorrentEntryInfo; + use crate::repository::TorrentRepository; + use crate::tests::{ + sample_info_hash_alphabetically_ordered_after_sample_info_hash_one, sample_info_hash_one, + sample_peer_one, sample_peer_two, + }; + + #[tokio::test] + async fn it_should_return_the_first_page() { + let in_memory_torrent_repository = Arc::new(TorrentRepository::default()); + + // Insert one torrent entry + let info_hash_one = sample_info_hash_one(); + let peer_one = sample_peer_one(); + let _number_of_downloads_increased = + in_memory_torrent_repository.upsert_peer(&info_hash_one, &peer_one, None); + + // Insert another torrent entry + let info_hash_one = sample_info_hash_alphabetically_ordered_after_sample_info_hash_one(); + let peer_two = sample_peer_two(); + let _number_of_downloads_increased = + in_memory_torrent_repository.upsert_peer(&info_hash_one, &peer_two, None); + + // Get only the first page where page size is 1 + let torrent_entries = + in_memory_torrent_repository.get_paginated(Some(&Pagination { offset: 0, limit: 1 })); + + assert_eq!(torrent_entries.len(), 1); + + let torrent_entry = torrent_entries.first().unwrap().1.clone(); + + assert_eq!( + TorrentEntryInfo { + swarm_metadata: SwarmMetadata { + downloaded: 0, + complete: 1, + incomplete: 0 + }, + peers: vec!(peer_one), + number_of_peers: 1 + }, + torrent_entry.into() + ); + } + + #[tokio::test] + async fn it_should_return_the_second_page() { + let in_memory_torrent_repository = Arc::new(TorrentRepository::default()); + + // Insert one torrent entry + let info_hash_one = sample_info_hash_one(); + let peer_one = sample_peer_one(); + let _number_of_downloads_increased = + in_memory_torrent_repository.upsert_peer(&info_hash_one, &peer_one, None); + + // Insert another torrent entry + let info_hash_one = sample_info_hash_alphabetically_ordered_after_sample_info_hash_one(); + let peer_two = sample_peer_two(); + let _number_of_downloads_increased = + in_memory_torrent_repository.upsert_peer(&info_hash_one, &peer_two, None); + + // Get only the first page where page size is 1 + let torrent_entries = + in_memory_torrent_repository.get_paginated(Some(&Pagination { offset: 1, limit: 1 })); + + assert_eq!(torrent_entries.len(), 1); + + let torrent_entry = torrent_entries.first().unwrap().1.clone(); + + assert_eq!( + TorrentEntryInfo { + swarm_metadata: SwarmMetadata { + downloaded: 0, + complete: 1, + incomplete: 0 + }, + peers: vec!(peer_two), + number_of_peers: 1 + }, + torrent_entry.into() + ); + } + + #[tokio::test] + async fn it_should_allow_changing_the_page_size() { + let in_memory_torrent_repository = Arc::new(TorrentRepository::default()); + + // Insert one torrent entry + let info_hash_one = sample_info_hash_one(); + let peer_one = sample_peer_one(); + let _number_of_downloads_increased = + in_memory_torrent_repository.upsert_peer(&info_hash_one, &peer_one, None); + + // Insert another torrent entry + let info_hash_one = sample_info_hash_alphabetically_ordered_after_sample_info_hash_one(); + let peer_two = sample_peer_two(); + let _number_of_downloads_increased = + in_memory_torrent_repository.upsert_peer(&info_hash_one, &peer_two, None); + + // Get only the first page where page size is 1 + let torrent_entries = + in_memory_torrent_repository.get_paginated(Some(&Pagination { offset: 1, limit: 1 })); + + assert_eq!(torrent_entries.len(), 1); + } + } + } + } + + mod returning_aggregate_swarm_metadata { + + use std::sync::Arc; + + use bittorrent_primitives::info_hash::fixture::gen_seeded_infohash; + use torrust_tracker_primitives::swarm_metadata::AggregateSwarmMetadata; + + use crate::repository::TorrentRepository; + use crate::tests::{complete_peer, leecher, sample_info_hash, seeder}; + + // todo: refactor to use test parametrization + + #[tokio::test] + async fn it_should_get_empty_aggregate_swarm_metadata_when_there_are_no_torrents() { + let in_memory_torrent_repository = Arc::new(TorrentRepository::default()); + + let aggregate_swarm_metadata = in_memory_torrent_repository.get_torrents_metrics(); + + assert_eq!( + aggregate_swarm_metadata, + AggregateSwarmMetadata { + total_complete: 0, + total_downloaded: 0, + total_incomplete: 0, + total_torrents: 0 + } + ); + } + + #[tokio::test] + async fn it_should_return_the_aggregate_swarm_metadata_when_there_is_a_leecher() { + let in_memory_torrent_repository = Arc::new(TorrentRepository::default()); + + let _number_of_downloads_increased = + in_memory_torrent_repository.upsert_peer(&sample_info_hash(), &leecher(), None); + + let aggregate_swarm_metadata = in_memory_torrent_repository.get_torrents_metrics(); + + assert_eq!( + aggregate_swarm_metadata, + AggregateSwarmMetadata { + total_complete: 0, + total_downloaded: 0, + total_incomplete: 1, + total_torrents: 1, + } + ); + } + + #[tokio::test] + async fn it_should_return_the_aggregate_swarm_metadata_when_there_is_a_seeder() { + let in_memory_torrent_repository = Arc::new(TorrentRepository::default()); + + let _number_of_downloads_increased = + in_memory_torrent_repository.upsert_peer(&sample_info_hash(), &seeder(), None); + + let aggregate_swarm_metadata = in_memory_torrent_repository.get_torrents_metrics(); + + assert_eq!( + aggregate_swarm_metadata, + AggregateSwarmMetadata { + total_complete: 1, + total_downloaded: 0, + total_incomplete: 0, + total_torrents: 1, + } + ); + } + + #[tokio::test] + async fn it_should_return_the_aggregate_swarm_metadata_when_there_is_a_completed_peer() { + let in_memory_torrent_repository = Arc::new(TorrentRepository::default()); + + let _number_of_downloads_increased = + in_memory_torrent_repository.upsert_peer(&sample_info_hash(), &complete_peer(), None); + + let aggregate_swarm_metadata = in_memory_torrent_repository.get_torrents_metrics(); + + assert_eq!( + aggregate_swarm_metadata, + AggregateSwarmMetadata { + total_complete: 1, + total_downloaded: 0, + total_incomplete: 0, + total_torrents: 1, + } + ); + } + + #[tokio::test] + async fn it_should_return_the_aggregate_swarm_metadata_when_there_are_multiple_torrents() { + let in_memory_torrent_repository = Arc::new(TorrentRepository::default()); + + let start_time = std::time::Instant::now(); + for i in 0..1_000_000 { + let _number_of_downloads_increased = + in_memory_torrent_repository.upsert_peer(&gen_seeded_infohash(&i), &leecher(), None); + } + let result_a = start_time.elapsed(); + + let start_time = std::time::Instant::now(); + let aggregate_swarm_metadata = in_memory_torrent_repository.get_torrents_metrics(); + let result_b = start_time.elapsed(); + + assert_eq!( + (aggregate_swarm_metadata), + (AggregateSwarmMetadata { + total_complete: 0, + total_downloaded: 0, + total_incomplete: 1_000_000, + total_torrents: 1_000_000, + }), + "{result_a:?} {result_b:?}" + ); + } + } + + mod returning_swarm_metadata { + + use std::sync::Arc; + + use torrust_tracker_primitives::swarm_metadata::SwarmMetadata; + + use crate::repository::TorrentRepository; + use crate::tests::{leecher, sample_info_hash}; + + #[tokio::test] + async fn it_should_get_swarm_metadata_for_an_existing_torrent() { + let in_memory_torrent_repository = Arc::new(TorrentRepository::default()); + + let infohash = sample_info_hash(); + + let _number_of_downloads_increased = in_memory_torrent_repository.upsert_peer(&infohash, &leecher(), None); + + let swarm_metadata = in_memory_torrent_repository.get_swarm_metadata_or_default(&infohash); + + assert_eq!( + swarm_metadata, + SwarmMetadata { + complete: 0, + downloaded: 0, + incomplete: 1, + } + ); + } + + #[tokio::test] + async fn it_should_return_zeroed_swarm_metadata_for_a_non_existing_torrent() { + let in_memory_torrent_repository = Arc::new(TorrentRepository::default()); + + let swarm_metadata = in_memory_torrent_repository.get_swarm_metadata_or_default(&sample_info_hash()); + + assert_eq!(swarm_metadata, SwarmMetadata::zeroed()); + } + } + + mod handling_persistence { + + use std::sync::Arc; + + use torrust_tracker_primitives::PersistentTorrents; + + use crate::repository::TorrentRepository; + use crate::tests::sample_info_hash; + + #[tokio::test] + async fn it_should_allow_importing_persisted_torrent_entries() { + let in_memory_torrent_repository = Arc::new(TorrentRepository::default()); + + let infohash = sample_info_hash(); + + let mut persistent_torrents = PersistentTorrents::default(); + + persistent_torrents.insert(infohash, 1); + + in_memory_torrent_repository.import_persistent(&persistent_torrents); + + let swarm_metadata = in_memory_torrent_repository.get_swarm_metadata_or_default(&infohash); + + // Only the number of downloads is persisted. + assert_eq!(swarm_metadata.downloaded, 1); + } } } } diff --git a/packages/tracker-core/src/announce_handler.rs b/packages/tracker-core/src/announce_handler.rs index ac70c6f86..76f28aafd 100644 --- a/packages/tracker-core/src/announce_handler.rs +++ b/packages/tracker-core/src/announce_handler.rs @@ -188,7 +188,7 @@ impl AnnounceHandler { .in_memory_torrent_repository .get_peers_for(info_hash, peer, peers_wanted.limit()); - let swarm_metadata = self.in_memory_torrent_repository.get_swarm_metadata(info_hash); + let swarm_metadata = self.in_memory_torrent_repository.get_swarm_metadata_or_default(info_hash); AnnounceData { peers, diff --git a/packages/tracker-core/src/scrape_handler.rs b/packages/tracker-core/src/scrape_handler.rs index 93b25dea6..5d78c7d90 100644 --- a/packages/tracker-core/src/scrape_handler.rs +++ b/packages/tracker-core/src/scrape_handler.rs @@ -112,7 +112,7 @@ impl ScrapeHandler { for info_hash in info_hashes { let swarm_metadata = match self.whitelist_authorization.authorize(info_hash).await { - Ok(()) => self.in_memory_torrent_repository.get_swarm_metadata(info_hash), + Ok(()) => self.in_memory_torrent_repository.get_swarm_metadata_or_default(info_hash), Err(_) => SwarmMetadata::zeroed(), }; scrape_data.add_file(info_hash, swarm_metadata); diff --git a/packages/tracker-core/src/test_helpers.rs b/packages/tracker-core/src/test_helpers.rs index 79904dec2..0d7ca012f 100644 --- a/packages/tracker-core/src/test_helpers.rs +++ b/packages/tracker-core/src/test_helpers.rs @@ -64,16 +64,6 @@ pub(crate) mod tests { .expect("String should be a valid info hash") } - /// # Panics - /// - /// Will panic if the string representation of the info hash is not a valid info hash. - #[must_use] - pub fn sample_info_hash_alphabetically_ordered_after_sample_info_hash_one() -> InfoHash { - "99c82bb73505a3c0b453f9fa0e881d6e5a32a0c1" // DevSkim: ignore DS173237 - .parse::() - .expect("String should be a valid info hash") - } - /// Sample peer whose state is not relevant for the tests. #[must_use] pub fn sample_peer() -> Peer { @@ -88,32 +78,6 @@ pub(crate) mod tests { } } - #[must_use] - pub fn sample_peer_one() -> Peer { - Peer { - peer_id: PeerId(*b"-qB00000000000000001"), - peer_addr: SocketAddr::new(IpAddr::V4(Ipv4Addr::new(126, 0, 0, 1)), 8081), - updated: DurationSinceUnixEpoch::new(1_669_397_478_934, 0), - uploaded: NumberOfBytes::new(0), - downloaded: NumberOfBytes::new(0), - left: NumberOfBytes::new(0), // No bytes left to download - event: AnnounceEvent::Completed, - } - } - - #[must_use] - pub fn sample_peer_two() -> Peer { - Peer { - peer_id: PeerId(*b"-qB00000000000000002"), - peer_addr: SocketAddr::new(IpAddr::V4(Ipv4Addr::new(126, 0, 0, 2)), 8082), - updated: DurationSinceUnixEpoch::new(1_669_397_478_934, 0), - uploaded: NumberOfBytes::new(0), - downloaded: NumberOfBytes::new(0), - left: NumberOfBytes::new(0), // No bytes left to download - event: AnnounceEvent::Completed, - } - } - #[must_use] pub fn seeder() -> Peer { complete_peer() diff --git a/packages/tracker-core/src/torrent/repository/in_memory.rs b/packages/tracker-core/src/torrent/repository/in_memory.rs index 83715789c..f622e909f 100644 --- a/packages/tracker-core/src/torrent/repository/in_memory.rs +++ b/packages/tracker-core/src/torrent/repository/in_memory.rs @@ -140,14 +140,8 @@ impl InMemoryTorrentRepository { /// /// A `SwarmMetadata` struct containing the aggregated torrent data. #[must_use] - pub(crate) fn get_swarm_metadata(&self, info_hash: &InfoHash) -> SwarmMetadata { - match self.torrents.get(info_hash) { - Some(torrent_entry) => torrent_entry - .lock() - .expect("can't acquire lock for torrent entry") - .get_swarm_metadata(), - None => SwarmMetadata::zeroed(), - } + pub(crate) fn get_swarm_metadata_or_default(&self, info_hash: &InfoHash) -> SwarmMetadata { + self.torrents.get_swarm_metadata_or_default(info_hash) } /// Retrieves torrent peers for a given torrent and client, excluding the @@ -169,13 +163,7 @@ impl InMemoryTorrentRepository { /// the torrent, excluding the requesting client. #[must_use] pub(crate) fn get_peers_for(&self, info_hash: &InfoHash, peer: &peer::Peer, limit: usize) -> Vec> { - match self.torrents.get(info_hash) { - None => vec![], - Some(entry) => entry - .lock() - .expect("can't acquire lock for torrent entry") - .get_peers_for_client(&peer.peer_addr, Some(max(limit, TORRENT_PEERS_LIMIT))), - } + self.torrents.get_peers_for(info_hash, peer, max(limit, TORRENT_PEERS_LIMIT)) } /// Retrieves the list of peers for a given torrent. @@ -197,27 +185,22 @@ impl InMemoryTorrentRepository { /// This function panics if the lock for the torrent entry cannot be obtained. #[must_use] pub fn get_torrent_peers(&self, info_hash: &InfoHash) -> Vec> { - match self.torrents.get(info_hash) { - None => vec![], - Some(entry) => entry - .lock() - .expect("can't acquire lock for torrent entry") - .get_peers(Some(TORRENT_PEERS_LIMIT)), - } + // todo: pass the limit as an argument like `get_peers_for` + self.torrents.get_torrent_peers(info_hash, TORRENT_PEERS_LIMIT) } /// Calculates and returns overall torrent metrics. /// - /// The returned [`TorrentsMetrics`] contains aggregate data such as the - /// total number of torrents, total complete (seeders), incomplete (leechers), - /// and downloaded counts. + /// The returned [`AggregateSwarmMetadata`] contains aggregate data such as + /// the total number of torrents, total complete (seeders), incomplete + /// (leechers), and downloaded counts. /// /// # Returns /// - /// A [`TorrentsMetrics`] struct with the aggregated metrics. + /// A [`AggregateSwarmMetadata`] struct with the aggregated metrics. #[must_use] pub fn get_torrents_metrics(&self) -> AggregateSwarmMetadata { - self.torrents.get_metrics() + self.torrents.get_torrents_metrics() } /// Imports persistent torrent data into the in-memory repository. @@ -232,675 +215,3 @@ impl InMemoryTorrentRepository { self.torrents.import_persistent(persistent_torrents); } } - -#[cfg(test)] -mod tests { - - mod the_in_memory_torrent_repository { - - use aquatic_udp_protocol::PeerId; - - /// It generates a peer id from a number where the number is the last - /// part of the peer ID. For example, for `12` it returns - /// `-qB00000000000000012`. - fn numeric_peer_id(two_digits_value: i32) -> PeerId { - // Format idx as a string with leading zeros, ensuring it has exactly 2 digits - let idx_str = format!("{two_digits_value:02}"); - - // Create the base part of the peer ID. - let base = b"-qB00000000000000000"; - - // Concatenate the base with idx bytes, ensuring the total length is 20 bytes. - let mut peer_id_bytes = [0u8; 20]; - peer_id_bytes[..base.len()].copy_from_slice(base); - peer_id_bytes[base.len() - idx_str.len()..].copy_from_slice(idx_str.as_bytes()); - - PeerId(peer_id_bytes) - } - - // The `InMemoryTorrentRepository` has these responsibilities: - // - To maintain the peer lists for each torrent. - // - To maintain the the torrent entries, which contains all the info about the - // torrents, including the peer lists. - // - To return the torrent entries. - // - To return the peer lists for a given torrent. - // - To return the torrent metrics. - // - To return the swarm metadata for a given torrent. - // - To handle the persistence of the torrent entries. - - mod maintaining_the_peer_lists { - - use std::sync::Arc; - - use crate::test_helpers::tests::{sample_info_hash, sample_peer}; - use crate::torrent::repository::in_memory::InMemoryTorrentRepository; - - #[tokio::test] - async fn it_should_add_the_first_peer_to_the_torrent_peer_list() { - let in_memory_torrent_repository = Arc::new(InMemoryTorrentRepository::default()); - - let info_hash = sample_info_hash(); - - let _number_of_downloads_increased = in_memory_torrent_repository.upsert_peer(&info_hash, &sample_peer(), None); - - assert!(in_memory_torrent_repository.get(&info_hash).is_some()); - } - - #[tokio::test] - async fn it_should_allow_adding_the_same_peer_twice_to_the_torrent_peer_list() { - let in_memory_torrent_repository = Arc::new(InMemoryTorrentRepository::default()); - - let info_hash = sample_info_hash(); - - let _number_of_downloads_increased = in_memory_torrent_repository.upsert_peer(&info_hash, &sample_peer(), None); - let _number_of_downloads_increased = in_memory_torrent_repository.upsert_peer(&info_hash, &sample_peer(), None); - - assert!(in_memory_torrent_repository.get(&info_hash).is_some()); - } - } - - mod returning_peer_lists_for_a_torrent { - - use std::net::{IpAddr, Ipv4Addr, SocketAddr}; - use std::sync::Arc; - - use aquatic_udp_protocol::{AnnounceEvent, NumberOfBytes}; - use torrust_tracker_primitives::peer::Peer; - use torrust_tracker_primitives::DurationSinceUnixEpoch; - - use crate::test_helpers::tests::{sample_info_hash, sample_peer}; - use crate::torrent::repository::in_memory::tests::the_in_memory_torrent_repository::numeric_peer_id; - use crate::torrent::repository::in_memory::InMemoryTorrentRepository; - - #[tokio::test] - async fn it_should_return_the_peers_for_a_given_torrent() { - let in_memory_torrent_repository = Arc::new(InMemoryTorrentRepository::default()); - - let info_hash = sample_info_hash(); - let peer = sample_peer(); - - let _number_of_downloads_increased = in_memory_torrent_repository.upsert_peer(&info_hash, &peer, None); - - let peers = in_memory_torrent_repository.get_torrent_peers(&info_hash); - - assert_eq!(peers, vec![Arc::new(peer)]); - } - - #[tokio::test] - async fn it_should_return_an_empty_list_or_peers_for_a_non_existing_torrent() { - let in_memory_torrent_repository = Arc::new(InMemoryTorrentRepository::default()); - - let peers = in_memory_torrent_repository.get_torrent_peers(&sample_info_hash()); - - assert!(peers.is_empty()); - } - - #[tokio::test] - async fn it_should_return_74_peers_at_the_most_for_a_given_torrent() { - let in_memory_torrent_repository = Arc::new(InMemoryTorrentRepository::default()); - - let info_hash = sample_info_hash(); - - for idx in 1..=75 { - let peer = Peer { - peer_id: numeric_peer_id(idx), - peer_addr: SocketAddr::new(IpAddr::V4(Ipv4Addr::new(126, 0, 0, idx.try_into().unwrap())), 8080), - updated: DurationSinceUnixEpoch::new(1_669_397_478_934, 0), - uploaded: NumberOfBytes::new(0), - downloaded: NumberOfBytes::new(0), - left: NumberOfBytes::new(0), // No bytes left to download - event: AnnounceEvent::Completed, - }; - - let _number_of_downloads_increased = in_memory_torrent_repository.upsert_peer(&info_hash, &peer, None); - } - - let peers = in_memory_torrent_repository.get_torrent_peers(&info_hash); - - assert_eq!(peers.len(), 74); - } - - mod excluding_the_client_peer { - - use std::net::{IpAddr, Ipv4Addr, SocketAddr}; - use std::sync::Arc; - - use aquatic_udp_protocol::{AnnounceEvent, NumberOfBytes}; - use torrust_tracker_configuration::TORRENT_PEERS_LIMIT; - use torrust_tracker_primitives::peer::Peer; - use torrust_tracker_primitives::DurationSinceUnixEpoch; - - use crate::test_helpers::tests::{sample_info_hash, sample_peer}; - use crate::torrent::repository::in_memory::tests::the_in_memory_torrent_repository::numeric_peer_id; - use crate::torrent::repository::in_memory::InMemoryTorrentRepository; - - #[tokio::test] - async fn it_should_return_an_empty_peer_list_for_a_non_existing_torrent() { - let in_memory_torrent_repository = Arc::new(InMemoryTorrentRepository::default()); - - let peers = - in_memory_torrent_repository.get_peers_for(&sample_info_hash(), &sample_peer(), TORRENT_PEERS_LIMIT); - - assert_eq!(peers, vec![]); - } - - #[tokio::test] - async fn it_should_return_the_peers_for_a_given_torrent_excluding_a_given_peer() { - let in_memory_torrent_repository = Arc::new(InMemoryTorrentRepository::default()); - - let info_hash = sample_info_hash(); - let peer = sample_peer(); - - let _number_of_downloads_increased = in_memory_torrent_repository.upsert_peer(&info_hash, &peer, None); - - let peers = in_memory_torrent_repository.get_peers_for(&info_hash, &peer, TORRENT_PEERS_LIMIT); - - assert_eq!(peers, vec![]); - } - - #[tokio::test] - async fn it_should_return_74_peers_at_the_most_for_a_given_torrent_when_it_filters_out_a_given_peer() { - let in_memory_torrent_repository = Arc::new(InMemoryTorrentRepository::default()); - - let info_hash = sample_info_hash(); - - let excluded_peer = sample_peer(); - - let _number_of_downloads_increased = - in_memory_torrent_repository.upsert_peer(&info_hash, &excluded_peer, None); - - // Add 74 peers - for idx in 2..=75 { - let peer = Peer { - peer_id: numeric_peer_id(idx), - peer_addr: SocketAddr::new(IpAddr::V4(Ipv4Addr::new(126, 0, 0, idx.try_into().unwrap())), 8080), - updated: DurationSinceUnixEpoch::new(1_669_397_478_934, 0), - uploaded: NumberOfBytes::new(0), - downloaded: NumberOfBytes::new(0), - left: NumberOfBytes::new(0), // No bytes left to download - event: AnnounceEvent::Completed, - }; - - let _number_of_downloads_increased = in_memory_torrent_repository.upsert_peer(&info_hash, &peer, None); - } - - let peers = in_memory_torrent_repository.get_peers_for(&info_hash, &excluded_peer, TORRENT_PEERS_LIMIT); - - assert_eq!(peers.len(), 74); - } - } - } - - mod maintaining_the_torrent_entries { - - use std::ops::Add; - use std::sync::Arc; - use std::time::Duration; - - use bittorrent_primitives::info_hash::InfoHash; - use torrust_tracker_configuration::TrackerPolicy; - use torrust_tracker_primitives::DurationSinceUnixEpoch; - - use crate::test_helpers::tests::{sample_info_hash, sample_peer}; - use crate::torrent::repository::in_memory::InMemoryTorrentRepository; - - #[tokio::test] - async fn it_should_remove_a_torrent_entry() { - let in_memory_torrent_repository = Arc::new(InMemoryTorrentRepository::default()); - - let info_hash = sample_info_hash(); - let _number_of_downloads_increased = in_memory_torrent_repository.upsert_peer(&info_hash, &sample_peer(), None); - - let _unused = in_memory_torrent_repository.remove(&info_hash); - - assert!(in_memory_torrent_repository.get(&info_hash).is_none()); - } - - #[tokio::test] - async fn it_should_remove_peers_that_have_not_been_updated_after_a_cutoff_time() { - let in_memory_torrent_repository = Arc::new(InMemoryTorrentRepository::default()); - - let info_hash = sample_info_hash(); - let mut peer = sample_peer(); - peer.updated = DurationSinceUnixEpoch::new(0, 0); - - let _number_of_downloads_increased = in_memory_torrent_repository.upsert_peer(&info_hash, &peer, None); - - // Cut off time is 1 second after the peer was updated - in_memory_torrent_repository.remove_inactive_peers(peer.updated.add(Duration::from_secs(1))); - - assert!(!in_memory_torrent_repository - .get_torrent_peers(&info_hash) - .contains(&Arc::new(peer))); - } - - fn initialize_repository_with_one_torrent_without_peers(info_hash: &InfoHash) -> Arc { - let in_memory_torrent_repository = Arc::new(InMemoryTorrentRepository::default()); - - // Insert a sample peer for the torrent to force adding the torrent entry - let mut peer = sample_peer(); - peer.updated = DurationSinceUnixEpoch::new(0, 0); - let _number_of_downloads_increased = in_memory_torrent_repository.upsert_peer(info_hash, &peer, None); - - // Remove the peer - in_memory_torrent_repository.remove_inactive_peers(peer.updated.add(Duration::from_secs(1))); - - in_memory_torrent_repository - } - - #[tokio::test] - async fn it_should_remove_torrents_without_peers() { - let info_hash = sample_info_hash(); - - let in_memory_torrent_repository = initialize_repository_with_one_torrent_without_peers(&info_hash); - - let tracker_policy = TrackerPolicy { - remove_peerless_torrents: true, - ..Default::default() - }; - - in_memory_torrent_repository.remove_peerless_torrents(&tracker_policy); - - assert!(in_memory_torrent_repository.get(&info_hash).is_none()); - } - } - mod returning_torrent_entries { - - use std::sync::Arc; - - use torrust_tracker_primitives::peer::Peer; - use torrust_tracker_primitives::swarm_metadata::SwarmMetadata; - use torrust_tracker_torrent_repository::TrackedTorrentHandle; - - use crate::test_helpers::tests::{sample_info_hash, sample_peer}; - use crate::torrent::repository::in_memory::InMemoryTorrentRepository; - - /// `TorrentEntry` data is not directly accessible. It's only - /// accessible through the trait methods. We need this temporary - /// DTO to write simple and more readable assertions. - #[derive(Debug, Clone, PartialEq)] - struct TorrentEntryInfo { - swarm_metadata: SwarmMetadata, - peers: Vec, - number_of_peers: usize, - } - - #[allow(clippy::from_over_into)] - impl Into for TrackedTorrentHandle { - fn into(self) -> TorrentEntryInfo { - let torrent_guard = self.lock().expect("can't acquire lock for torrent entry"); - - let torrent_entry_info = TorrentEntryInfo { - swarm_metadata: torrent_guard.get_swarm_metadata(), - peers: torrent_guard.get_peers(None).iter().map(|peer| *peer.clone()).collect(), - number_of_peers: torrent_guard.get_peers_len(), - }; - - drop(torrent_guard); - - torrent_entry_info - } - } - - #[tokio::test] - async fn it_should_return_one_torrent_entry_by_infohash() { - let in_memory_torrent_repository = Arc::new(InMemoryTorrentRepository::default()); - - let info_hash = sample_info_hash(); - let peer = sample_peer(); - - let _number_of_downloads_increased = in_memory_torrent_repository.upsert_peer(&info_hash, &peer, None); - - let torrent_entry = in_memory_torrent_repository.get(&info_hash).unwrap(); - - assert_eq!( - TorrentEntryInfo { - swarm_metadata: SwarmMetadata { - downloaded: 0, - complete: 1, - incomplete: 0 - }, - peers: vec!(peer), - number_of_peers: 1 - }, - torrent_entry.into() - ); - } - - mod it_should_return_many_torrent_entries { - use std::sync::Arc; - - use torrust_tracker_primitives::swarm_metadata::SwarmMetadata; - - use crate::test_helpers::tests::{sample_info_hash, sample_peer}; - use crate::torrent::repository::in_memory::tests::the_in_memory_torrent_repository::returning_torrent_entries::TorrentEntryInfo; - use crate::torrent::repository::in_memory::InMemoryTorrentRepository; - - #[tokio::test] - async fn without_pagination() { - let in_memory_torrent_repository = Arc::new(InMemoryTorrentRepository::default()); - - let info_hash = sample_info_hash(); - let peer = sample_peer(); - let _number_of_downloads_increased = in_memory_torrent_repository.upsert_peer(&info_hash, &peer, None); - - let torrent_entries = in_memory_torrent_repository.get_paginated(None); - - assert_eq!(torrent_entries.len(), 1); - - let torrent_entry = torrent_entries.first().unwrap().1.clone(); - - assert_eq!( - TorrentEntryInfo { - swarm_metadata: SwarmMetadata { - downloaded: 0, - complete: 1, - incomplete: 0 - }, - peers: vec!(peer), - number_of_peers: 1 - }, - torrent_entry.into() - ); - } - - mod with_pagination { - use std::sync::Arc; - - use torrust_tracker_primitives::pagination::Pagination; - use torrust_tracker_primitives::swarm_metadata::SwarmMetadata; - - use crate::test_helpers::tests::{ - sample_info_hash_alphabetically_ordered_after_sample_info_hash_one, sample_info_hash_one, - sample_peer_one, sample_peer_two, - }; - use crate::torrent::repository::in_memory::tests::the_in_memory_torrent_repository::returning_torrent_entries::TorrentEntryInfo; - use crate::torrent::repository::in_memory::InMemoryTorrentRepository; - - #[tokio::test] - async fn it_should_return_the_first_page() { - let in_memory_torrent_repository = Arc::new(InMemoryTorrentRepository::default()); - - // Insert one torrent entry - let info_hash_one = sample_info_hash_one(); - let peer_one = sample_peer_one(); - let _number_of_downloads_increased = - in_memory_torrent_repository.upsert_peer(&info_hash_one, &peer_one, None); - - // Insert another torrent entry - let info_hash_one = sample_info_hash_alphabetically_ordered_after_sample_info_hash_one(); - let peer_two = sample_peer_two(); - let _number_of_downloads_increased = - in_memory_torrent_repository.upsert_peer(&info_hash_one, &peer_two, None); - - // Get only the first page where page size is 1 - let torrent_entries = - in_memory_torrent_repository.get_paginated(Some(&Pagination { offset: 0, limit: 1 })); - - assert_eq!(torrent_entries.len(), 1); - - let torrent_entry = torrent_entries.first().unwrap().1.clone(); - - assert_eq!( - TorrentEntryInfo { - swarm_metadata: SwarmMetadata { - downloaded: 0, - complete: 1, - incomplete: 0 - }, - peers: vec!(peer_one), - number_of_peers: 1 - }, - torrent_entry.into() - ); - } - - #[tokio::test] - async fn it_should_return_the_second_page() { - let in_memory_torrent_repository = Arc::new(InMemoryTorrentRepository::default()); - - // Insert one torrent entry - let info_hash_one = sample_info_hash_one(); - let peer_one = sample_peer_one(); - let _number_of_downloads_increased = - in_memory_torrent_repository.upsert_peer(&info_hash_one, &peer_one, None); - - // Insert another torrent entry - let info_hash_one = sample_info_hash_alphabetically_ordered_after_sample_info_hash_one(); - let peer_two = sample_peer_two(); - let _number_of_downloads_increased = - in_memory_torrent_repository.upsert_peer(&info_hash_one, &peer_two, None); - - // Get only the first page where page size is 1 - let torrent_entries = - in_memory_torrent_repository.get_paginated(Some(&Pagination { offset: 1, limit: 1 })); - - assert_eq!(torrent_entries.len(), 1); - - let torrent_entry = torrent_entries.first().unwrap().1.clone(); - - assert_eq!( - TorrentEntryInfo { - swarm_metadata: SwarmMetadata { - downloaded: 0, - complete: 1, - incomplete: 0 - }, - peers: vec!(peer_two), - number_of_peers: 1 - }, - torrent_entry.into() - ); - } - - #[tokio::test] - async fn it_should_allow_changing_the_page_size() { - let in_memory_torrent_repository = Arc::new(InMemoryTorrentRepository::default()); - - // Insert one torrent entry - let info_hash_one = sample_info_hash_one(); - let peer_one = sample_peer_one(); - let _number_of_downloads_increased = - in_memory_torrent_repository.upsert_peer(&info_hash_one, &peer_one, None); - - // Insert another torrent entry - let info_hash_one = sample_info_hash_alphabetically_ordered_after_sample_info_hash_one(); - let peer_two = sample_peer_two(); - let _number_of_downloads_increased = - in_memory_torrent_repository.upsert_peer(&info_hash_one, &peer_two, None); - - // Get only the first page where page size is 1 - let torrent_entries = - in_memory_torrent_repository.get_paginated(Some(&Pagination { offset: 1, limit: 1 })); - - assert_eq!(torrent_entries.len(), 1); - } - } - } - } - - mod returning_aggregate_swarm_metadata { - - use std::sync::Arc; - - use bittorrent_primitives::info_hash::fixture::gen_seeded_infohash; - use torrust_tracker_primitives::swarm_metadata::AggregateSwarmMetadata; - - use crate::test_helpers::tests::{complete_peer, leecher, sample_info_hash, seeder}; - use crate::torrent::repository::in_memory::InMemoryTorrentRepository; - - // todo: refactor to use test parametrization - - #[tokio::test] - async fn it_should_get_empty_aggregate_swarm_metadata_when_there_are_no_torrents() { - let in_memory_torrent_repository = Arc::new(InMemoryTorrentRepository::default()); - - let aggregate_swarm_metadata = in_memory_torrent_repository.get_torrents_metrics(); - - assert_eq!( - aggregate_swarm_metadata, - AggregateSwarmMetadata { - total_complete: 0, - total_downloaded: 0, - total_incomplete: 0, - total_torrents: 0 - } - ); - } - - #[tokio::test] - async fn it_should_return_the_aggregate_swarm_metadata_when_there_is_a_leecher() { - let in_memory_torrent_repository = Arc::new(InMemoryTorrentRepository::default()); - - let _number_of_downloads_increased = - in_memory_torrent_repository.upsert_peer(&sample_info_hash(), &leecher(), None); - - let aggregate_swarm_metadata = in_memory_torrent_repository.get_torrents_metrics(); - - assert_eq!( - aggregate_swarm_metadata, - AggregateSwarmMetadata { - total_complete: 0, - total_downloaded: 0, - total_incomplete: 1, - total_torrents: 1, - } - ); - } - - #[tokio::test] - async fn it_should_return_the_aggregate_swarm_metadata_when_there_is_a_seeder() { - let in_memory_torrent_repository = Arc::new(InMemoryTorrentRepository::default()); - - let _number_of_downloads_increased = - in_memory_torrent_repository.upsert_peer(&sample_info_hash(), &seeder(), None); - - let aggregate_swarm_metadata = in_memory_torrent_repository.get_torrents_metrics(); - - assert_eq!( - aggregate_swarm_metadata, - AggregateSwarmMetadata { - total_complete: 1, - total_downloaded: 0, - total_incomplete: 0, - total_torrents: 1, - } - ); - } - - #[tokio::test] - async fn it_should_return_the_aggregate_swarm_metadata_when_there_is_a_completed_peer() { - let in_memory_torrent_repository = Arc::new(InMemoryTorrentRepository::default()); - - let _number_of_downloads_increased = - in_memory_torrent_repository.upsert_peer(&sample_info_hash(), &complete_peer(), None); - - let aggregate_swarm_metadata = in_memory_torrent_repository.get_torrents_metrics(); - - assert_eq!( - aggregate_swarm_metadata, - AggregateSwarmMetadata { - total_complete: 1, - total_downloaded: 0, - total_incomplete: 0, - total_torrents: 1, - } - ); - } - - #[tokio::test] - async fn it_should_return_the_aggregate_swarm_metadata_when_there_are_multiple_torrents() { - let in_memory_torrent_repository = Arc::new(InMemoryTorrentRepository::default()); - - let start_time = std::time::Instant::now(); - for i in 0..1_000_000 { - let _number_of_downloads_increased = - in_memory_torrent_repository.upsert_peer(&gen_seeded_infohash(&i), &leecher(), None); - } - let result_a = start_time.elapsed(); - - let start_time = std::time::Instant::now(); - let aggregate_swarm_metadata = in_memory_torrent_repository.get_torrents_metrics(); - let result_b = start_time.elapsed(); - - assert_eq!( - (aggregate_swarm_metadata), - (AggregateSwarmMetadata { - total_complete: 0, - total_downloaded: 0, - total_incomplete: 1_000_000, - total_torrents: 1_000_000, - }), - "{result_a:?} {result_b:?}" - ); - } - } - - mod returning_swarm_metadata { - - use std::sync::Arc; - - use torrust_tracker_primitives::swarm_metadata::SwarmMetadata; - - use crate::test_helpers::tests::{leecher, sample_info_hash}; - use crate::torrent::repository::in_memory::InMemoryTorrentRepository; - - #[tokio::test] - async fn it_should_get_swarm_metadata_for_an_existing_torrent() { - let in_memory_torrent_repository = Arc::new(InMemoryTorrentRepository::default()); - - let infohash = sample_info_hash(); - - let _number_of_downloads_increased = in_memory_torrent_repository.upsert_peer(&infohash, &leecher(), None); - - let swarm_metadata = in_memory_torrent_repository.get_swarm_metadata(&infohash); - - assert_eq!( - swarm_metadata, - SwarmMetadata { - complete: 0, - downloaded: 0, - incomplete: 1, - } - ); - } - - #[tokio::test] - async fn it_should_return_zeroed_swarm_metadata_for_a_non_existing_torrent() { - let in_memory_torrent_repository = Arc::new(InMemoryTorrentRepository::default()); - - let swarm_metadata = in_memory_torrent_repository.get_swarm_metadata(&sample_info_hash()); - - assert_eq!(swarm_metadata, SwarmMetadata::zeroed()); - } - } - - mod handling_persistence { - - use std::sync::Arc; - - use torrust_tracker_primitives::PersistentTorrents; - - use crate::test_helpers::tests::sample_info_hash; - use crate::torrent::repository::in_memory::InMemoryTorrentRepository; - - #[tokio::test] - async fn it_should_allow_importing_persisted_torrent_entries() { - let in_memory_torrent_repository = Arc::new(InMemoryTorrentRepository::default()); - - let infohash = sample_info_hash(); - - let mut persistent_torrents = PersistentTorrents::default(); - - persistent_torrents.insert(infohash, 1); - - in_memory_torrent_repository.import_persistent(&persistent_torrents); - - let swarm_metadata = in_memory_torrent_repository.get_swarm_metadata(&infohash); - - // Only the number of downloads is persisted. - assert_eq!(swarm_metadata.downloaded, 1); - } - } - } -} From 32acbb1dd48ad71cce2b050e6726ff440b969a00 Mon Sep 17 00:00:00 2001 From: Jose Celano Date: Fri, 2 May 2025 11:32:16 +0100 Subject: [PATCH 551/802] refactor: [#1491] rename method --- .../src/statistics/services.rs | 2 +- .../src/statistics/services.rs | 4 +-- packages/torrent-repository/src/repository.rs | 26 +++++-------------- .../torrent-repository/tests/common/repo.rs | 2 +- .../src/torrent/repository/in_memory.rs | 4 +-- .../src/statistics/services.rs | 2 +- .../src/statistics/services.rs | 2 +- 7 files changed, 14 insertions(+), 28 deletions(-) diff --git a/packages/http-tracker-core/src/statistics/services.rs b/packages/http-tracker-core/src/statistics/services.rs index e2fbfedd0..1c5890ea8 100644 --- a/packages/http-tracker-core/src/statistics/services.rs +++ b/packages/http-tracker-core/src/statistics/services.rs @@ -47,7 +47,7 @@ pub async fn get_metrics( in_memory_torrent_repository: Arc, stats_repository: Arc, ) -> TrackerMetrics { - let torrents_metrics = in_memory_torrent_repository.get_torrents_metrics(); + let torrents_metrics = in_memory_torrent_repository.get_aggregate_swarm_metadata(); let stats = stats_repository.get_stats().await; TrackerMetrics { diff --git a/packages/rest-tracker-api-core/src/statistics/services.rs b/packages/rest-tracker-api-core/src/statistics/services.rs index 9489a5e3e..8d5b7514a 100644 --- a/packages/rest-tracker-api-core/src/statistics/services.rs +++ b/packages/rest-tracker-api-core/src/statistics/services.rs @@ -32,7 +32,7 @@ pub async fn get_metrics( http_stats_repository: Arc, udp_server_stats_repository: Arc, ) -> TrackerMetrics { - let torrents_metrics = in_memory_torrent_repository.get_torrents_metrics(); + let torrents_metrics = in_memory_torrent_repository.get_aggregate_swarm_metadata(); let udp_banned_ips_total = ban_service.read().await.get_banned_ips_total(); let http_stats = http_stats_repository.get_stats().await; let udp_server_stats = udp_server_stats_repository.get_stats().await; @@ -97,7 +97,7 @@ pub async fn get_labeled_metrics( udp_stats_repository: Arc, udp_server_stats_repository: Arc, ) -> TrackerLabeledMetrics { - let _torrents_metrics = in_memory_torrent_repository.get_torrents_metrics(); + let _torrents_metrics = in_memory_torrent_repository.get_aggregate_swarm_metadata(); let _udp_banned_ips_total = ban_service.read().await.get_banned_ips_total(); let http_stats = http_stats_repository.get_stats().await; diff --git a/packages/torrent-repository/src/repository.rs b/packages/torrent-repository/src/repository.rs index 6c8a7c9b4..f5c4d7129 100644 --- a/packages/torrent-repository/src/repository.rs +++ b/packages/torrent-repository/src/repository.rs @@ -250,20 +250,6 @@ impl TorrentRepository { } } - /// Calculates and returns overall torrent metrics. - /// - /// The returned [`AggregateSwarmMetadata`] contains aggregate data such as - /// the total number of torrents, total complete (seeders), incomplete - /// (leechers), and downloaded counts. - /// - /// # Returns - /// - /// A [`AggregateSwarmMetadata`] struct with the aggregated metrics. - #[must_use] - pub fn get_torrents_metrics(&self) -> AggregateSwarmMetadata { - self.get_metrics() - } - /// Imports persistent torrent data into the in-memory repository. /// /// This method takes a set of persisted torrent entries (e.g., from a @@ -303,7 +289,7 @@ impl TorrentRepository { /// /// This function panics if the lock for the entry cannot be obtained. #[must_use] - pub fn get_metrics(&self) -> AggregateSwarmMetadata { + pub fn get_aggregate_swarm_metadata(&self) -> AggregateSwarmMetadata { let mut metrics = AggregateSwarmMetadata::default(); for entry in &self.torrents { @@ -824,7 +810,7 @@ mod tests { async fn it_should_get_empty_aggregate_swarm_metadata_when_there_are_no_torrents() { let in_memory_torrent_repository = Arc::new(TorrentRepository::default()); - let aggregate_swarm_metadata = in_memory_torrent_repository.get_torrents_metrics(); + let aggregate_swarm_metadata = in_memory_torrent_repository.get_aggregate_swarm_metadata(); assert_eq!( aggregate_swarm_metadata, @@ -844,7 +830,7 @@ mod tests { let _number_of_downloads_increased = in_memory_torrent_repository.upsert_peer(&sample_info_hash(), &leecher(), None); - let aggregate_swarm_metadata = in_memory_torrent_repository.get_torrents_metrics(); + let aggregate_swarm_metadata = in_memory_torrent_repository.get_aggregate_swarm_metadata(); assert_eq!( aggregate_swarm_metadata, @@ -864,7 +850,7 @@ mod tests { let _number_of_downloads_increased = in_memory_torrent_repository.upsert_peer(&sample_info_hash(), &seeder(), None); - let aggregate_swarm_metadata = in_memory_torrent_repository.get_torrents_metrics(); + let aggregate_swarm_metadata = in_memory_torrent_repository.get_aggregate_swarm_metadata(); assert_eq!( aggregate_swarm_metadata, @@ -884,7 +870,7 @@ mod tests { let _number_of_downloads_increased = in_memory_torrent_repository.upsert_peer(&sample_info_hash(), &complete_peer(), None); - let aggregate_swarm_metadata = in_memory_torrent_repository.get_torrents_metrics(); + let aggregate_swarm_metadata = in_memory_torrent_repository.get_aggregate_swarm_metadata(); assert_eq!( aggregate_swarm_metadata, @@ -909,7 +895,7 @@ mod tests { let result_a = start_time.elapsed(); let start_time = std::time::Instant::now(); - let aggregate_swarm_metadata = in_memory_torrent_repository.get_torrents_metrics(); + let aggregate_swarm_metadata = in_memory_torrent_repository.get_aggregate_swarm_metadata(); let result_b = start_time.elapsed(); assert_eq!( diff --git a/packages/torrent-repository/tests/common/repo.rs b/packages/torrent-repository/tests/common/repo.rs index 0055f6bee..eb500114e 100644 --- a/packages/torrent-repository/tests/common/repo.rs +++ b/packages/torrent-repository/tests/common/repo.rs @@ -39,7 +39,7 @@ impl Repo { pub(crate) fn get_metrics(&self) -> AggregateSwarmMetadata { match self { - Repo::SkipMapMutexStd(repo) => repo.get_metrics(), + Repo::SkipMapMutexStd(repo) => repo.get_aggregate_swarm_metadata(), } } diff --git a/packages/tracker-core/src/torrent/repository/in_memory.rs b/packages/tracker-core/src/torrent/repository/in_memory.rs index f622e909f..e362b20c1 100644 --- a/packages/tracker-core/src/torrent/repository/in_memory.rs +++ b/packages/tracker-core/src/torrent/repository/in_memory.rs @@ -199,8 +199,8 @@ impl InMemoryTorrentRepository { /// /// A [`AggregateSwarmMetadata`] struct with the aggregated metrics. #[must_use] - pub fn get_torrents_metrics(&self) -> AggregateSwarmMetadata { - self.torrents.get_torrents_metrics() + pub fn get_aggregate_swarm_metadata(&self) -> AggregateSwarmMetadata { + self.torrents.get_aggregate_swarm_metadata() } /// Imports persistent torrent data into the in-memory repository. diff --git a/packages/udp-tracker-core/src/statistics/services.rs b/packages/udp-tracker-core/src/statistics/services.rs index aa10e4acd..c76f02040 100644 --- a/packages/udp-tracker-core/src/statistics/services.rs +++ b/packages/udp-tracker-core/src/statistics/services.rs @@ -63,7 +63,7 @@ pub async fn get_metrics( in_memory_torrent_repository: Arc, stats_repository: Arc, ) -> TrackerMetrics { - let torrents_metrics = in_memory_torrent_repository.get_torrents_metrics(); + let torrents_metrics = in_memory_torrent_repository.get_aggregate_swarm_metadata(); let stats = stats_repository.get_stats().await; TrackerMetrics { diff --git a/packages/udp-tracker-server/src/statistics/services.rs b/packages/udp-tracker-server/src/statistics/services.rs index 4db80c465..a2215067b 100644 --- a/packages/udp-tracker-server/src/statistics/services.rs +++ b/packages/udp-tracker-server/src/statistics/services.rs @@ -66,7 +66,7 @@ pub async fn get_metrics( ban_service: Arc>, stats_repository: Arc, ) -> TrackerMetrics { - let torrents_metrics = in_memory_torrent_repository.get_torrents_metrics(); + let torrents_metrics = in_memory_torrent_repository.get_aggregate_swarm_metadata(); let stats = stats_repository.get_stats().await; let udp_banned_ips_total = ban_service.read().await.get_banned_ips_total(); From 09bbef77d9c5ed0df86e9dd61a8a3736817effbb Mon Sep 17 00:00:00 2001 From: Jose Celano Date: Fri, 2 May 2025 11:45:09 +0100 Subject: [PATCH 552/802] refactor: [#1491] rename varaible --- packages/torrent-repository/src/repository.rs | 174 ++++++++---------- 1 file changed, 79 insertions(+), 95 deletions(-) diff --git a/packages/torrent-repository/src/repository.rs b/packages/torrent-repository/src/repository.rs index f5c4d7129..f6ede60de 100644 --- a/packages/torrent-repository/src/repository.rs +++ b/packages/torrent-repository/src/repository.rs @@ -352,25 +352,25 @@ mod tests { #[tokio::test] async fn it_should_add_the_first_peer_to_the_torrent_peer_list() { - let in_memory_torrent_repository = Arc::new(TorrentRepository::default()); + let torrent_repository = Arc::new(TorrentRepository::default()); let info_hash = sample_info_hash(); - let _number_of_downloads_increased = in_memory_torrent_repository.upsert_peer(&info_hash, &sample_peer(), None); + let _number_of_downloads_increased = torrent_repository.upsert_peer(&info_hash, &sample_peer(), None); - assert!(in_memory_torrent_repository.get(&info_hash).is_some()); + assert!(torrent_repository.get(&info_hash).is_some()); } #[tokio::test] async fn it_should_allow_adding_the_same_peer_twice_to_the_torrent_peer_list() { - let in_memory_torrent_repository = Arc::new(TorrentRepository::default()); + let torrent_repository = Arc::new(TorrentRepository::default()); let info_hash = sample_info_hash(); - let _number_of_downloads_increased = in_memory_torrent_repository.upsert_peer(&info_hash, &sample_peer(), None); - let _number_of_downloads_increased = in_memory_torrent_repository.upsert_peer(&info_hash, &sample_peer(), None); + let _number_of_downloads_increased = torrent_repository.upsert_peer(&info_hash, &sample_peer(), None); + let _number_of_downloads_increased = torrent_repository.upsert_peer(&info_hash, &sample_peer(), None); - assert!(in_memory_torrent_repository.get(&info_hash).is_some()); + assert!(torrent_repository.get(&info_hash).is_some()); } } @@ -389,30 +389,30 @@ mod tests { #[tokio::test] async fn it_should_return_the_peers_for_a_given_torrent() { - let in_memory_torrent_repository = Arc::new(TorrentRepository::default()); + let torrent_repository = Arc::new(TorrentRepository::default()); let info_hash = sample_info_hash(); let peer = sample_peer(); - let _number_of_downloads_increased = in_memory_torrent_repository.upsert_peer(&info_hash, &peer, None); + let _number_of_downloads_increased = torrent_repository.upsert_peer(&info_hash, &peer, None); - let peers = in_memory_torrent_repository.get_torrent_peers(&info_hash, 74); + let peers = torrent_repository.get_torrent_peers(&info_hash, 74); assert_eq!(peers, vec![Arc::new(peer)]); } #[tokio::test] async fn it_should_return_an_empty_list_or_peers_for_a_non_existing_torrent() { - let in_memory_torrent_repository = Arc::new(TorrentRepository::default()); + let torrent_repository = Arc::new(TorrentRepository::default()); - let peers = in_memory_torrent_repository.get_torrent_peers(&sample_info_hash(), 74); + let peers = torrent_repository.get_torrent_peers(&sample_info_hash(), 74); assert!(peers.is_empty()); } #[tokio::test] async fn it_should_return_74_peers_at_the_most_for_a_given_torrent() { - let in_memory_torrent_repository = Arc::new(TorrentRepository::default()); + let torrent_repository = Arc::new(TorrentRepository::default()); let info_hash = sample_info_hash(); @@ -427,10 +427,10 @@ mod tests { event: AnnounceEvent::Completed, }; - let _number_of_downloads_increased = in_memory_torrent_repository.upsert_peer(&info_hash, &peer, None); + let _number_of_downloads_increased = torrent_repository.upsert_peer(&info_hash, &peer, None); } - let peers = in_memory_torrent_repository.get_torrent_peers(&info_hash, 74); + let peers = torrent_repository.get_torrent_peers(&info_hash, 74); assert_eq!(peers.len(), 74); } @@ -451,38 +451,36 @@ mod tests { #[tokio::test] async fn it_should_return_an_empty_peer_list_for_a_non_existing_torrent() { - let in_memory_torrent_repository = Arc::new(TorrentRepository::default()); + let torrent_repository = Arc::new(TorrentRepository::default()); - let peers = - in_memory_torrent_repository.get_peers_for(&sample_info_hash(), &sample_peer(), TORRENT_PEERS_LIMIT); + let peers = torrent_repository.get_peers_for(&sample_info_hash(), &sample_peer(), TORRENT_PEERS_LIMIT); assert_eq!(peers, vec![]); } #[tokio::test] async fn it_should_return_the_peers_for_a_given_torrent_excluding_a_given_peer() { - let in_memory_torrent_repository = Arc::new(TorrentRepository::default()); + let torrent_repository = Arc::new(TorrentRepository::default()); let info_hash = sample_info_hash(); let peer = sample_peer(); - let _number_of_downloads_increased = in_memory_torrent_repository.upsert_peer(&info_hash, &peer, None); + let _number_of_downloads_increased = torrent_repository.upsert_peer(&info_hash, &peer, None); - let peers = in_memory_torrent_repository.get_peers_for(&info_hash, &peer, TORRENT_PEERS_LIMIT); + let peers = torrent_repository.get_peers_for(&info_hash, &peer, TORRENT_PEERS_LIMIT); assert_eq!(peers, vec![]); } #[tokio::test] async fn it_should_return_74_peers_at_the_most_for_a_given_torrent_when_it_filters_out_a_given_peer() { - let in_memory_torrent_repository = Arc::new(TorrentRepository::default()); + let torrent_repository = Arc::new(TorrentRepository::default()); let info_hash = sample_info_hash(); let excluded_peer = sample_peer(); - let _number_of_downloads_increased = - in_memory_torrent_repository.upsert_peer(&info_hash, &excluded_peer, None); + let _number_of_downloads_increased = torrent_repository.upsert_peer(&info_hash, &excluded_peer, None); // Add 74 peers for idx in 2..=75 { @@ -496,10 +494,10 @@ mod tests { event: AnnounceEvent::Completed, }; - let _number_of_downloads_increased = in_memory_torrent_repository.upsert_peer(&info_hash, &peer, None); + let _number_of_downloads_increased = torrent_repository.upsert_peer(&info_hash, &peer, None); } - let peers = in_memory_torrent_repository.get_peers_for(&info_hash, &excluded_peer, TORRENT_PEERS_LIMIT); + let peers = torrent_repository.get_peers_for(&info_hash, &excluded_peer, TORRENT_PEERS_LIMIT); assert_eq!(peers.len(), 74); } @@ -521,62 +519,60 @@ mod tests { #[tokio::test] async fn it_should_remove_a_torrent_entry() { - let in_memory_torrent_repository = Arc::new(TorrentRepository::default()); + let torrent_repository = Arc::new(TorrentRepository::default()); let info_hash = sample_info_hash(); - let _number_of_downloads_increased = in_memory_torrent_repository.upsert_peer(&info_hash, &sample_peer(), None); + let _number_of_downloads_increased = torrent_repository.upsert_peer(&info_hash, &sample_peer(), None); - let _unused = in_memory_torrent_repository.remove(&info_hash); + let _unused = torrent_repository.remove(&info_hash); - assert!(in_memory_torrent_repository.get(&info_hash).is_none()); + assert!(torrent_repository.get(&info_hash).is_none()); } #[tokio::test] async fn it_should_remove_peers_that_have_not_been_updated_after_a_cutoff_time() { - let in_memory_torrent_repository = Arc::new(TorrentRepository::default()); + let torrent_repository = Arc::new(TorrentRepository::default()); let info_hash = sample_info_hash(); let mut peer = sample_peer(); peer.updated = DurationSinceUnixEpoch::new(0, 0); - let _number_of_downloads_increased = in_memory_torrent_repository.upsert_peer(&info_hash, &peer, None); + let _number_of_downloads_increased = torrent_repository.upsert_peer(&info_hash, &peer, None); // Cut off time is 1 second after the peer was updated - in_memory_torrent_repository.remove_inactive_peers(peer.updated.add(Duration::from_secs(1))); + torrent_repository.remove_inactive_peers(peer.updated.add(Duration::from_secs(1))); - assert!(!in_memory_torrent_repository - .get_torrent_peers(&info_hash, 74) - .contains(&Arc::new(peer))); + assert!(!torrent_repository.get_torrent_peers(&info_hash, 74).contains(&Arc::new(peer))); } fn initialize_repository_with_one_torrent_without_peers(info_hash: &InfoHash) -> Arc { - let in_memory_torrent_repository = Arc::new(TorrentRepository::default()); + let torrent_repository = Arc::new(TorrentRepository::default()); // Insert a sample peer for the torrent to force adding the torrent entry let mut peer = sample_peer(); peer.updated = DurationSinceUnixEpoch::new(0, 0); - let _number_of_downloads_increased = in_memory_torrent_repository.upsert_peer(info_hash, &peer, None); + let _number_of_downloads_increased = torrent_repository.upsert_peer(info_hash, &peer, None); // Remove the peer - in_memory_torrent_repository.remove_inactive_peers(peer.updated.add(Duration::from_secs(1))); + torrent_repository.remove_inactive_peers(peer.updated.add(Duration::from_secs(1))); - in_memory_torrent_repository + torrent_repository } #[tokio::test] async fn it_should_remove_torrents_without_peers() { let info_hash = sample_info_hash(); - let in_memory_torrent_repository = initialize_repository_with_one_torrent_without_peers(&info_hash); + let torrent_repository = initialize_repository_with_one_torrent_without_peers(&info_hash); let tracker_policy = TrackerPolicy { remove_peerless_torrents: true, ..Default::default() }; - in_memory_torrent_repository.remove_peerless_torrents(&tracker_policy); + torrent_repository.remove_peerless_torrents(&tracker_policy); - assert!(in_memory_torrent_repository.get(&info_hash).is_none()); + assert!(torrent_repository.get(&info_hash).is_none()); } } mod returning_torrent_entries { @@ -619,14 +615,14 @@ mod tests { #[tokio::test] async fn it_should_return_one_torrent_entry_by_infohash() { - let in_memory_torrent_repository = Arc::new(TorrentRepository::default()); + let torrent_repository = Arc::new(TorrentRepository::default()); let info_hash = sample_info_hash(); let peer = sample_peer(); - let _number_of_downloads_increased = in_memory_torrent_repository.upsert_peer(&info_hash, &peer, None); + let _number_of_downloads_increased = torrent_repository.upsert_peer(&info_hash, &peer, None); - let torrent_entry = in_memory_torrent_repository.get(&info_hash).unwrap(); + let torrent_entry = torrent_repository.get(&info_hash).unwrap(); assert_eq!( TorrentEntryInfo { @@ -653,13 +649,13 @@ mod tests { #[tokio::test] async fn without_pagination() { - let in_memory_torrent_repository = Arc::new(TorrentRepository::default()); + let torrent_repository = Arc::new(TorrentRepository::default()); let info_hash = sample_info_hash(); let peer = sample_peer(); - let _number_of_downloads_increased = in_memory_torrent_repository.upsert_peer(&info_hash, &peer, None); + let _number_of_downloads_increased = torrent_repository.upsert_peer(&info_hash, &peer, None); - let torrent_entries = in_memory_torrent_repository.get_paginated(None); + let torrent_entries = torrent_repository.get_paginated(None); assert_eq!(torrent_entries.len(), 1); @@ -694,23 +690,20 @@ mod tests { #[tokio::test] async fn it_should_return_the_first_page() { - let in_memory_torrent_repository = Arc::new(TorrentRepository::default()); + let torrent_repository = Arc::new(TorrentRepository::default()); // Insert one torrent entry let info_hash_one = sample_info_hash_one(); let peer_one = sample_peer_one(); - let _number_of_downloads_increased = - in_memory_torrent_repository.upsert_peer(&info_hash_one, &peer_one, None); + let _number_of_downloads_increased = torrent_repository.upsert_peer(&info_hash_one, &peer_one, None); // Insert another torrent entry let info_hash_one = sample_info_hash_alphabetically_ordered_after_sample_info_hash_one(); let peer_two = sample_peer_two(); - let _number_of_downloads_increased = - in_memory_torrent_repository.upsert_peer(&info_hash_one, &peer_two, None); + let _number_of_downloads_increased = torrent_repository.upsert_peer(&info_hash_one, &peer_two, None); // Get only the first page where page size is 1 - let torrent_entries = - in_memory_torrent_repository.get_paginated(Some(&Pagination { offset: 0, limit: 1 })); + let torrent_entries = torrent_repository.get_paginated(Some(&Pagination { offset: 0, limit: 1 })); assert_eq!(torrent_entries.len(), 1); @@ -732,23 +725,20 @@ mod tests { #[tokio::test] async fn it_should_return_the_second_page() { - let in_memory_torrent_repository = Arc::new(TorrentRepository::default()); + let torrent_repository = Arc::new(TorrentRepository::default()); // Insert one torrent entry let info_hash_one = sample_info_hash_one(); let peer_one = sample_peer_one(); - let _number_of_downloads_increased = - in_memory_torrent_repository.upsert_peer(&info_hash_one, &peer_one, None); + let _number_of_downloads_increased = torrent_repository.upsert_peer(&info_hash_one, &peer_one, None); // Insert another torrent entry let info_hash_one = sample_info_hash_alphabetically_ordered_after_sample_info_hash_one(); let peer_two = sample_peer_two(); - let _number_of_downloads_increased = - in_memory_torrent_repository.upsert_peer(&info_hash_one, &peer_two, None); + let _number_of_downloads_increased = torrent_repository.upsert_peer(&info_hash_one, &peer_two, None); // Get only the first page where page size is 1 - let torrent_entries = - in_memory_torrent_repository.get_paginated(Some(&Pagination { offset: 1, limit: 1 })); + let torrent_entries = torrent_repository.get_paginated(Some(&Pagination { offset: 1, limit: 1 })); assert_eq!(torrent_entries.len(), 1); @@ -770,23 +760,20 @@ mod tests { #[tokio::test] async fn it_should_allow_changing_the_page_size() { - let in_memory_torrent_repository = Arc::new(TorrentRepository::default()); + let torrent_repository = Arc::new(TorrentRepository::default()); // Insert one torrent entry let info_hash_one = sample_info_hash_one(); let peer_one = sample_peer_one(); - let _number_of_downloads_increased = - in_memory_torrent_repository.upsert_peer(&info_hash_one, &peer_one, None); + let _number_of_downloads_increased = torrent_repository.upsert_peer(&info_hash_one, &peer_one, None); // Insert another torrent entry let info_hash_one = sample_info_hash_alphabetically_ordered_after_sample_info_hash_one(); let peer_two = sample_peer_two(); - let _number_of_downloads_increased = - in_memory_torrent_repository.upsert_peer(&info_hash_one, &peer_two, None); + let _number_of_downloads_increased = torrent_repository.upsert_peer(&info_hash_one, &peer_two, None); // Get only the first page where page size is 1 - let torrent_entries = - in_memory_torrent_repository.get_paginated(Some(&Pagination { offset: 1, limit: 1 })); + let torrent_entries = torrent_repository.get_paginated(Some(&Pagination { offset: 1, limit: 1 })); assert_eq!(torrent_entries.len(), 1); } @@ -808,9 +795,9 @@ mod tests { #[tokio::test] async fn it_should_get_empty_aggregate_swarm_metadata_when_there_are_no_torrents() { - let in_memory_torrent_repository = Arc::new(TorrentRepository::default()); + let torrent_repository = Arc::new(TorrentRepository::default()); - let aggregate_swarm_metadata = in_memory_torrent_repository.get_aggregate_swarm_metadata(); + let aggregate_swarm_metadata = torrent_repository.get_aggregate_swarm_metadata(); assert_eq!( aggregate_swarm_metadata, @@ -825,12 +812,11 @@ mod tests { #[tokio::test] async fn it_should_return_the_aggregate_swarm_metadata_when_there_is_a_leecher() { - let in_memory_torrent_repository = Arc::new(TorrentRepository::default()); + let torrent_repository = Arc::new(TorrentRepository::default()); - let _number_of_downloads_increased = - in_memory_torrent_repository.upsert_peer(&sample_info_hash(), &leecher(), None); + let _number_of_downloads_increased = torrent_repository.upsert_peer(&sample_info_hash(), &leecher(), None); - let aggregate_swarm_metadata = in_memory_torrent_repository.get_aggregate_swarm_metadata(); + let aggregate_swarm_metadata = torrent_repository.get_aggregate_swarm_metadata(); assert_eq!( aggregate_swarm_metadata, @@ -845,12 +831,11 @@ mod tests { #[tokio::test] async fn it_should_return_the_aggregate_swarm_metadata_when_there_is_a_seeder() { - let in_memory_torrent_repository = Arc::new(TorrentRepository::default()); + let torrent_repository = Arc::new(TorrentRepository::default()); - let _number_of_downloads_increased = - in_memory_torrent_repository.upsert_peer(&sample_info_hash(), &seeder(), None); + let _number_of_downloads_increased = torrent_repository.upsert_peer(&sample_info_hash(), &seeder(), None); - let aggregate_swarm_metadata = in_memory_torrent_repository.get_aggregate_swarm_metadata(); + let aggregate_swarm_metadata = torrent_repository.get_aggregate_swarm_metadata(); assert_eq!( aggregate_swarm_metadata, @@ -865,12 +850,11 @@ mod tests { #[tokio::test] async fn it_should_return_the_aggregate_swarm_metadata_when_there_is_a_completed_peer() { - let in_memory_torrent_repository = Arc::new(TorrentRepository::default()); + let torrent_repository = Arc::new(TorrentRepository::default()); - let _number_of_downloads_increased = - in_memory_torrent_repository.upsert_peer(&sample_info_hash(), &complete_peer(), None); + let _number_of_downloads_increased = torrent_repository.upsert_peer(&sample_info_hash(), &complete_peer(), None); - let aggregate_swarm_metadata = in_memory_torrent_repository.get_aggregate_swarm_metadata(); + let aggregate_swarm_metadata = torrent_repository.get_aggregate_swarm_metadata(); assert_eq!( aggregate_swarm_metadata, @@ -885,17 +869,17 @@ mod tests { #[tokio::test] async fn it_should_return_the_aggregate_swarm_metadata_when_there_are_multiple_torrents() { - let in_memory_torrent_repository = Arc::new(TorrentRepository::default()); + let torrent_repository = Arc::new(TorrentRepository::default()); let start_time = std::time::Instant::now(); for i in 0..1_000_000 { let _number_of_downloads_increased = - in_memory_torrent_repository.upsert_peer(&gen_seeded_infohash(&i), &leecher(), None); + torrent_repository.upsert_peer(&gen_seeded_infohash(&i), &leecher(), None); } let result_a = start_time.elapsed(); let start_time = std::time::Instant::now(); - let aggregate_swarm_metadata = in_memory_torrent_repository.get_aggregate_swarm_metadata(); + let aggregate_swarm_metadata = torrent_repository.get_aggregate_swarm_metadata(); let result_b = start_time.elapsed(); assert_eq!( @@ -922,13 +906,13 @@ mod tests { #[tokio::test] async fn it_should_get_swarm_metadata_for_an_existing_torrent() { - let in_memory_torrent_repository = Arc::new(TorrentRepository::default()); + let torrent_repository = Arc::new(TorrentRepository::default()); let infohash = sample_info_hash(); - let _number_of_downloads_increased = in_memory_torrent_repository.upsert_peer(&infohash, &leecher(), None); + let _number_of_downloads_increased = torrent_repository.upsert_peer(&infohash, &leecher(), None); - let swarm_metadata = in_memory_torrent_repository.get_swarm_metadata_or_default(&infohash); + let swarm_metadata = torrent_repository.get_swarm_metadata_or_default(&infohash); assert_eq!( swarm_metadata, @@ -942,9 +926,9 @@ mod tests { #[tokio::test] async fn it_should_return_zeroed_swarm_metadata_for_a_non_existing_torrent() { - let in_memory_torrent_repository = Arc::new(TorrentRepository::default()); + let torrent_repository = Arc::new(TorrentRepository::default()); - let swarm_metadata = in_memory_torrent_repository.get_swarm_metadata_or_default(&sample_info_hash()); + let swarm_metadata = torrent_repository.get_swarm_metadata_or_default(&sample_info_hash()); assert_eq!(swarm_metadata, SwarmMetadata::zeroed()); } @@ -961,7 +945,7 @@ mod tests { #[tokio::test] async fn it_should_allow_importing_persisted_torrent_entries() { - let in_memory_torrent_repository = Arc::new(TorrentRepository::default()); + let torrent_repository = Arc::new(TorrentRepository::default()); let infohash = sample_info_hash(); @@ -969,9 +953,9 @@ mod tests { persistent_torrents.insert(infohash, 1); - in_memory_torrent_repository.import_persistent(&persistent_torrents); + torrent_repository.import_persistent(&persistent_torrents); - let swarm_metadata = in_memory_torrent_repository.get_swarm_metadata_or_default(&infohash); + let swarm_metadata = torrent_repository.get_swarm_metadata_or_default(&infohash); // Only the number of downloads is persisted. assert_eq!(swarm_metadata.downloaded, 1); From 1f5d18fa001929caab10fd9d8089ca30874916bc Mon Sep 17 00:00:00 2001 From: Jose Celano Date: Fri, 2 May 2025 11:59:59 +0100 Subject: [PATCH 553/802] refactor: [#1491] remove duplicate code --- packages/torrent-repository/src/lib.rs | 12 +++- packages/torrent-repository/src/repository.rs | 56 +++++-------------- .../tests/common/torrent.rs | 30 +++------- packages/tracker-core/src/announce_handler.rs | 15 +---- packages/tracker-core/src/torrent/manager.rs | 4 +- packages/tracker-core/src/torrent/services.rs | 26 +++------ 6 files changed, 46 insertions(+), 97 deletions(-) diff --git a/packages/torrent-repository/src/lib.rs b/packages/torrent-repository/src/lib.rs index f2e2c643c..d7042a1fd 100644 --- a/packages/torrent-repository/src/lib.rs +++ b/packages/torrent-repository/src/lib.rs @@ -1,7 +1,7 @@ pub mod entry; pub mod repository; -use std::sync::{Arc, Mutex}; +use std::sync::{Arc, Mutex, MutexGuard}; use torrust_tracker_clock::clock; @@ -19,6 +19,16 @@ pub(crate) type CurrentClock = clock::Working; #[allow(dead_code)] pub(crate) type CurrentClock = clock::Stopped; +pub trait LockTrackedTorrent { + fn lock_or_panic(&self) -> MutexGuard<'_, TrackedTorrent>; +} + +impl LockTrackedTorrent for Arc> { + fn lock_or_panic(&self) -> MutexGuard<'_, TrackedTorrent> { + self.lock().expect("can't acquire lock for tracked torrent handle") + } +} + #[cfg(test)] pub(crate) mod tests { use std::net::{IpAddr, Ipv4Addr, SocketAddr}; diff --git a/packages/torrent-repository/src/repository.rs b/packages/torrent-repository/src/repository.rs index f6ede60de..8e67f2487 100644 --- a/packages/torrent-repository/src/repository.rs +++ b/packages/torrent-repository/src/repository.rs @@ -9,7 +9,7 @@ use torrust_tracker_primitives::{peer, DurationSinceUnixEpoch, PersistentTorrent use crate::entry::peer_list::PeerList; use crate::entry::torrent::TrackedTorrent; -use crate::TrackedTorrentHandle; +use crate::{LockTrackedTorrent, TrackedTorrentHandle}; #[derive(Default, Debug)] pub struct TorrentRepository { @@ -46,11 +46,7 @@ impl TorrentRepository { if let Some(existing_entry) = self.torrents.get(info_hash) { tracing::debug!("Torrent already exists: {:?}", info_hash); - existing_entry - .value() - .lock() - .expect("can't acquire lock for tracked torrent handle") - .upsert_peer(peer) + existing_entry.value().lock_or_panic().upsert_peer(peer) } else { tracing::debug!("Inserting new torrent: {:?}", info_hash); @@ -68,10 +64,7 @@ impl TorrentRepository { let inserted_entry = self.torrents.get_or_insert(*info_hash, new_entry); - let mut torrent_guard = inserted_entry - .value() - .lock() - .expect("can't acquire lock for tracked torrent handle"); + let mut torrent_guard = inserted_entry.value().lock_or_panic(); torrent_guard.upsert_peer(peer) } @@ -97,11 +90,7 @@ impl TorrentRepository { /// This function panics if the lock for the entry cannot be obtained. pub fn remove_inactive_peers(&self, current_cutoff: DurationSinceUnixEpoch) { for entry in &self.torrents { - entry - .value() - .lock() - .expect("can't acquire lock for tracked torrent handle") - .remove_inactive_peers(current_cutoff); + entry.value().lock_or_panic().remove_inactive_peers(current_cutoff); } } @@ -154,13 +143,9 @@ impl TorrentRepository { /// This function panics if the lock for the entry cannot be obtained. #[must_use] pub fn get_swarm_metadata(&self, info_hash: &InfoHash) -> Option { - self.torrents.get(info_hash).map(|entry| { - entry - .value() - .lock() - .expect("can't acquire lock for tracked torrent handle") - .get_swarm_metadata() - }) + self.torrents + .get(info_hash) + .map(|entry| entry.value().lock_or_panic().get_swarm_metadata()) } /// Retrieves swarm metadata for a given torrent. @@ -196,10 +181,7 @@ impl TorrentRepository { pub fn get_peers_for(&self, info_hash: &InfoHash, peer: &peer::Peer, limit: usize) -> Vec> { match self.get(info_hash) { None => vec![], - Some(entry) => entry - .lock() - .expect("can't acquire lock for tracked torrent handle") - .get_peers_for_client(&peer.peer_addr, Some(limit)), + Some(entry) => entry.lock_or_panic().get_peers_for_client(&peer.peer_addr, Some(limit)), } } @@ -220,10 +202,7 @@ impl TorrentRepository { pub fn get_torrent_peers(&self, info_hash: &InfoHash, limit: usize) -> Vec> { match self.get(info_hash) { None => vec![], - Some(entry) => entry - .lock() - .expect("can't acquire lock for tracked torrent handle") - .get_peers(Some(limit)), + Some(entry) => entry.lock_or_panic().get_peers(Some(limit)), } } @@ -237,12 +216,7 @@ impl TorrentRepository { /// This function panics if the lock for the entry cannot be obtained. pub fn remove_peerless_torrents(&self, policy: &TrackerPolicy) { for entry in &self.torrents { - if entry - .value() - .lock() - .expect("can't acquire lock for tracked torrent handle") - .meets_retaining_policy(policy) - { + if entry.value().lock_or_panic().meets_retaining_policy(policy) { continue; } @@ -293,11 +267,7 @@ impl TorrentRepository { let mut metrics = AggregateSwarmMetadata::default(); for entry in &self.torrents { - let stats = entry - .value() - .lock() - .expect("can't acquire lock for tracked torrent handle") - .get_swarm_metadata(); + let stats = entry.value().lock_or_panic().get_swarm_metadata(); metrics.total_complete += u64::from(stats.complete); metrics.total_downloaded += u64::from(stats.downloaded); metrics.total_incomplete += u64::from(stats.incomplete); @@ -584,7 +554,7 @@ mod tests { use crate::repository::TorrentRepository; use crate::tests::{sample_info_hash, sample_peer}; - use crate::TrackedTorrentHandle; + use crate::{LockTrackedTorrent, TrackedTorrentHandle}; /// `TorrentEntry` data is not directly accessible. It's only /// accessible through the trait methods. We need this temporary @@ -599,7 +569,7 @@ mod tests { #[allow(clippy::from_over_into)] impl Into for TrackedTorrentHandle { fn into(self) -> TorrentEntryInfo { - let torrent_guard = self.lock().expect("can't acquire lock for tracked torrent handle"); + let torrent_guard = self.lock_or_panic(); let torrent_entry_info = TorrentEntryInfo { swarm_metadata: torrent_guard.get_swarm_metadata(), diff --git a/packages/torrent-repository/tests/common/torrent.rs b/packages/torrent-repository/tests/common/torrent.rs index 9fdabd136..ffa3c6d71 100644 --- a/packages/torrent-repository/tests/common/torrent.rs +++ b/packages/torrent-repository/tests/common/torrent.rs @@ -4,7 +4,7 @@ use std::sync::Arc; use torrust_tracker_configuration::TrackerPolicy; use torrust_tracker_primitives::swarm_metadata::SwarmMetadata; use torrust_tracker_primitives::{peer, DurationSinceUnixEpoch}; -use torrust_tracker_torrent_repository::{entry, TrackedTorrentHandle}; +use torrust_tracker_torrent_repository::{entry, LockTrackedTorrent, TrackedTorrentHandle}; #[derive(Debug, Clone)] pub(crate) enum Torrent { @@ -16,68 +16,56 @@ impl Torrent { pub(crate) fn get_stats(&self) -> SwarmMetadata { match self { Torrent::Single(entry) => entry.get_swarm_metadata(), - Torrent::MutexStd(entry) => entry - .lock() - .expect("can't acquire lock for torrent entry") - .get_swarm_metadata(), + Torrent::MutexStd(entry) => entry.lock_or_panic().get_swarm_metadata(), } } pub(crate) fn meets_retaining_policy(&self, policy: &TrackerPolicy) -> bool { match self { Torrent::Single(entry) => entry.meets_retaining_policy(policy), - Torrent::MutexStd(entry) => entry - .lock() - .expect("can't acquire lock for torrent entry") - .meets_retaining_policy(policy), + Torrent::MutexStd(entry) => entry.lock_or_panic().meets_retaining_policy(policy), } } pub(crate) fn peers_is_empty(&self) -> bool { match self { Torrent::Single(entry) => entry.peers_is_empty(), - Torrent::MutexStd(entry) => entry.lock().expect("can't acquire lock for torrent entry").peers_is_empty(), + Torrent::MutexStd(entry) => entry.lock_or_panic().peers_is_empty(), } } pub(crate) fn get_peers_len(&self) -> usize { match self { Torrent::Single(entry) => entry.get_peers_len(), - Torrent::MutexStd(entry) => entry.lock().expect("can't acquire lock for torrent entry").get_peers_len(), + Torrent::MutexStd(entry) => entry.lock_or_panic().get_peers_len(), } } pub(crate) fn get_peers(&self, limit: Option) -> Vec> { match self { Torrent::Single(entry) => entry.get_peers(limit), - Torrent::MutexStd(entry) => entry.lock().expect("can't acquire lock for torrent entry").get_peers(limit), + Torrent::MutexStd(entry) => entry.lock_or_panic().get_peers(limit), } } pub(crate) fn get_peers_for_client(&self, client: &SocketAddr, limit: Option) -> Vec> { match self { Torrent::Single(entry) => entry.get_peers_for_client(client, limit), - Torrent::MutexStd(entry) => entry - .lock() - .expect("can't acquire lock for torrent entry") - .get_peers_for_client(client, limit), + Torrent::MutexStd(entry) => entry.lock_or_panic().get_peers_for_client(client, limit), } } pub(crate) fn upsert_peer(&mut self, peer: &peer::Peer) -> bool { match self { Torrent::Single(entry) => entry.upsert_peer(peer), - Torrent::MutexStd(entry) => entry.lock().expect("can't acquire lock for torrent entry").upsert_peer(peer), + Torrent::MutexStd(entry) => entry.lock_or_panic().upsert_peer(peer), } } pub(crate) fn remove_inactive_peers(&mut self, current_cutoff: DurationSinceUnixEpoch) { match self { Torrent::Single(entry) => entry.remove_inactive_peers(current_cutoff), - Torrent::MutexStd(entry) => entry - .lock() - .expect("can't acquire lock for torrent entry") - .remove_inactive_peers(current_cutoff), + Torrent::MutexStd(entry) => entry.lock_or_panic().remove_inactive_peers(current_cutoff), } } } diff --git a/packages/tracker-core/src/announce_handler.rs b/packages/tracker-core/src/announce_handler.rs index 76f28aafd..6174190dc 100644 --- a/packages/tracker-core/src/announce_handler.rs +++ b/packages/tracker-core/src/announce_handler.rs @@ -594,6 +594,7 @@ mod tests { use aquatic_udp_protocol::AnnounceEvent; use torrust_tracker_test_helpers::configuration; + use torrust_tracker_torrent_repository::LockTrackedTorrent; use crate::announce_handler::tests::the_announce_handler::peer_ip; use crate::announce_handler::{AnnounceHandler, PeersWanted}; @@ -656,20 +657,10 @@ mod tests { .expect("it should be able to get entry"); // It persists the number of completed peers. - assert_eq!( - torrent_entry - .lock() - .expect("can't acquire lock for torrent entry") - .get_swarm_metadata() - .downloaded, - 1 - ); + assert_eq!(torrent_entry.lock_or_panic().get_swarm_metadata().downloaded, 1); // It does not persist the peers - assert!(torrent_entry - .lock() - .expect("can't acquire lock for torrent entry") - .peers_is_empty()); + assert!(torrent_entry.lock_or_panic().peers_is_empty()); } } diff --git a/packages/tracker-core/src/torrent/manager.rs b/packages/tracker-core/src/torrent/manager.rs index a69f8282b..ae7c61741 100644 --- a/packages/tracker-core/src/torrent/manager.rs +++ b/packages/tracker-core/src/torrent/manager.rs @@ -110,6 +110,7 @@ mod tests { use std::sync::Arc; use torrust_tracker_configuration::Core; + use torrust_tracker_torrent_repository::LockTrackedTorrent; use super::{DatabasePersistentTorrentRepository, TorrentsManager}; use crate::databases::setup::initialize_database; @@ -163,8 +164,7 @@ mod tests { .in_memory_torrent_repository .get(&infohash) .unwrap() - .lock() - .expect("can't acquire lock for torrent entry") + .lock_or_panic() .get_swarm_metadata() .downloaded, 1 diff --git a/packages/tracker-core/src/torrent/services.rs b/packages/tracker-core/src/torrent/services.rs index 30055b150..37846b4e3 100644 --- a/packages/tracker-core/src/torrent/services.rs +++ b/packages/tracker-core/src/torrent/services.rs @@ -17,6 +17,7 @@ use std::sync::Arc; use bittorrent_primitives::info_hash::InfoHash; use torrust_tracker_primitives::pagination::Pagination; use torrust_tracker_primitives::peer; +use torrust_tracker_torrent_repository::LockTrackedTorrent; use crate::torrent::repository::in_memory::InMemoryTorrentRepository; @@ -98,15 +99,9 @@ pub fn get_torrent_info(in_memory_torrent_repository: &Arc = vec![]; for (info_hash, torrent_entry) in in_memory_torrent_repository.get_paginated(pagination) { - let stats = torrent_entry - .lock() - .expect("can't acquire lock for torrent entry") - .get_swarm_metadata(); + let stats = torrent_entry.lock_or_panic().get_swarm_metadata(); basic_infos.push(BasicInfo { info_hash, @@ -190,12 +182,10 @@ pub fn get_torrents(in_memory_torrent_repository: &Arc = vec![]; for info_hash in info_hashes { - if let Some(stats) = in_memory_torrent_repository.get(info_hash).map(|torrent_entry| { - torrent_entry - .lock() - .expect("can't acquire lock for torrent entry") - .get_swarm_metadata() - }) { + if let Some(stats) = in_memory_torrent_repository + .get(info_hash) + .map(|torrent_entry| torrent_entry.lock_or_panic().get_swarm_metadata()) + { basic_infos.push(BasicInfo { info_hash: *info_hash, seeders: u64::from(stats.complete), From 7215f6e4d9073a781dc2bf2d4a99a5b629530567 Mon Sep 17 00:00:00 2001 From: Jose Celano Date: Fri, 2 May 2025 12:30:37 +0100 Subject: [PATCH 554/802] refactor: [#1491] clean tests in torrent-repository Unneeded wrapper for TorrentRepository now that there is only one implementaion. --- .../torrent-repository/tests/common/mod.rs | 1 - .../torrent-repository/tests/common/repo.rs | 88 ------------------- .../tests/repository/mod.rs | 79 +++++++++++------ 3 files changed, 50 insertions(+), 118 deletions(-) delete mode 100644 packages/torrent-repository/tests/common/repo.rs diff --git a/packages/torrent-repository/tests/common/mod.rs b/packages/torrent-repository/tests/common/mod.rs index efdf7f742..e083a05cc 100644 --- a/packages/torrent-repository/tests/common/mod.rs +++ b/packages/torrent-repository/tests/common/mod.rs @@ -1,3 +1,2 @@ -pub mod repo; pub mod torrent; pub mod torrent_peer_builder; diff --git a/packages/torrent-repository/tests/common/repo.rs b/packages/torrent-repository/tests/common/repo.rs deleted file mode 100644 index eb500114e..000000000 --- a/packages/torrent-repository/tests/common/repo.rs +++ /dev/null @@ -1,88 +0,0 @@ -use std::sync::{Arc, Mutex}; - -use bittorrent_primitives::info_hash::InfoHash; -use torrust_tracker_configuration::TrackerPolicy; -use torrust_tracker_primitives::pagination::Pagination; -use torrust_tracker_primitives::swarm_metadata::{AggregateSwarmMetadata, SwarmMetadata}; -use torrust_tracker_primitives::{peer, DurationSinceUnixEpoch, PersistentTorrent, PersistentTorrents}; -use torrust_tracker_torrent_repository::entry::torrent::TrackedTorrent; -use torrust_tracker_torrent_repository::TorrentRepository; - -#[derive(Debug)] -pub(crate) enum Repo { - SkipMapMutexStd(TorrentRepository), -} - -impl Repo { - pub(crate) fn upsert_peer( - &self, - info_hash: &InfoHash, - peer: &peer::Peer, - opt_persistent_torrent: Option, - ) -> bool { - match self { - Repo::SkipMapMutexStd(repo) => repo.upsert_peer(info_hash, peer, opt_persistent_torrent), - } - } - - pub(crate) fn get_swarm_metadata(&self, info_hash: &InfoHash) -> Option { - match self { - Repo::SkipMapMutexStd(repo) => repo.get_swarm_metadata(info_hash), - } - } - - pub(crate) fn get(&self, key: &InfoHash) -> Option { - match self { - Repo::SkipMapMutexStd(repo) => Some(repo.get(key)?.lock().unwrap().clone()), - } - } - - pub(crate) fn get_metrics(&self) -> AggregateSwarmMetadata { - match self { - Repo::SkipMapMutexStd(repo) => repo.get_aggregate_swarm_metadata(), - } - } - - pub(crate) fn get_paginated(&self, pagination: Option<&Pagination>) -> Vec<(InfoHash, TrackedTorrent)> { - match self { - Repo::SkipMapMutexStd(repo) => repo - .get_paginated(pagination) - .iter() - .map(|(i, t)| (*i, t.lock().expect("it should get a lock").clone())) - .collect(), - } - } - - pub(crate) fn import_persistent(&self, persistent_torrents: &PersistentTorrents) { - match self { - Repo::SkipMapMutexStd(repo) => repo.import_persistent(persistent_torrents), - } - } - - pub(crate) fn remove(&self, key: &InfoHash) -> Option { - match self { - Repo::SkipMapMutexStd(repo) => Some(repo.remove(key)?.lock().unwrap().clone()), - } - } - - pub(crate) fn remove_inactive_peers(&self, current_cutoff: DurationSinceUnixEpoch) { - match self { - Repo::SkipMapMutexStd(repo) => repo.remove_inactive_peers(current_cutoff), - } - } - - pub(crate) fn remove_peerless_torrents(&self, policy: &TrackerPolicy) { - match self { - Repo::SkipMapMutexStd(repo) => repo.remove_peerless_torrents(policy), - } - } - - pub(crate) fn insert(&self, info_hash: &InfoHash, torrent: TrackedTorrent) -> Option { - match self { - Repo::SkipMapMutexStd(repo) => { - repo.torrents.insert(*info_hash, Arc::new(Mutex::new(torrent))); - } - } - self.get(info_hash) - } -} diff --git a/packages/torrent-repository/tests/repository/mod.rs b/packages/torrent-repository/tests/repository/mod.rs index 06ee1d622..9701fc53d 100644 --- a/packages/torrent-repository/tests/repository/mod.rs +++ b/packages/torrent-repository/tests/repository/mod.rs @@ -1,5 +1,6 @@ use std::collections::{BTreeMap, HashSet}; use std::hash::{DefaultHasher, Hash, Hasher}; +use std::sync::{Arc, Mutex}; use aquatic_udp_protocol::{AnnounceEvent, NumberOfBytes}; use bittorrent_primitives::info_hash::InfoHash; @@ -9,14 +10,13 @@ use torrust_tracker_primitives::pagination::Pagination; use torrust_tracker_primitives::swarm_metadata::SwarmMetadata; use torrust_tracker_primitives::PersistentTorrents; use torrust_tracker_torrent_repository::entry::torrent::TrackedTorrent; -use torrust_tracker_torrent_repository::TorrentRepository; +use torrust_tracker_torrent_repository::{LockTrackedTorrent, TorrentRepository}; -use crate::common::repo::Repo; use crate::common::torrent_peer_builder::{a_completed_peer, a_started_peer}; #[fixture] -fn skip_list_mutex_std() -> Repo { - Repo::SkipMapMutexStd(TorrentRepository::default()) +fn skip_list_mutex_std() -> TorrentRepository { + TorrentRepository::default() } type Entries = Vec<(InfoHash, TrackedTorrent)>; @@ -148,9 +148,10 @@ fn persistent_three() -> PersistentTorrents { t.iter().copied().collect() } -fn make(repo: &Repo, entries: &Entries) { +fn make(repo: &TorrentRepository, entries: &Entries) { for (info_hash, entry) in entries { - repo.insert(info_hash, entry.clone()); + let new = Arc::new(Mutex::new(entry.clone())); + repo.torrents.insert(*info_hash, new); } } @@ -199,13 +200,16 @@ fn policy_remove_persist() -> TrackerPolicy { #[case::out_of_order(many_out_of_order())] #[case::in_order(many_hashed_in_order())] #[tokio::test] -async fn it_should_get_a_torrent_entry(#[values(skip_list_mutex_std())] repo: Repo, #[case] entries: Entries) { +async fn it_should_get_a_torrent_entry(#[values(skip_list_mutex_std())] repo: TorrentRepository, #[case] entries: Entries) { make(&repo, &entries); if let Some((info_hash, torrent)) = entries.first() { - assert_eq!(repo.get(info_hash), Some(torrent.clone())); + assert_eq!( + Some(repo.get(info_hash).unwrap().lock_or_panic().clone()), + Some(torrent.clone()) + ); } else { - assert_eq!(repo.get(&InfoHash::default()), None); + assert!(repo.get(&InfoHash::default()).is_none()); } } @@ -220,7 +224,7 @@ async fn it_should_get_a_torrent_entry(#[values(skip_list_mutex_std())] repo: Re #[case::in_order(many_hashed_in_order())] #[tokio::test] async fn it_should_get_paginated_entries_in_a_stable_or_sorted_order( - #[values(skip_list_mutex_std())] repo: Repo, + #[values(skip_list_mutex_std())] repo: TorrentRepository, #[case] entries: Entries, many_out_of_order: Entries, ) { @@ -253,7 +257,7 @@ async fn it_should_get_paginated_entries_in_a_stable_or_sorted_order( #[case::in_order(many_hashed_in_order())] #[tokio::test] async fn it_should_get_paginated( - #[values(skip_list_mutex_std())] repo: Repo, + #[values(skip_list_mutex_std())] repo: TorrentRepository, #[case] entries: Entries, #[values(paginated_limit_zero(), paginated_limit_one(), paginated_limit_one_offset_one())] paginated: Pagination, ) { @@ -264,7 +268,15 @@ async fn it_should_get_paginated( match paginated { // it should return empty if limit is zero. - Pagination { limit: 0, .. } => assert_eq!(repo.get_paginated(Some(&paginated)), vec![]), + Pagination { limit: 0, .. } => { + let torrents: Vec<(InfoHash, TrackedTorrent)> = repo + .get_paginated(Some(&paginated)) + .iter() + .map(|(i, lock_tracked_torrent)| (*i, lock_tracked_torrent.lock_or_panic().clone())) + .collect(); + + assert_eq!(torrents, vec![]); + } // it should return a single entry if the limit is one. Pagination { limit: 1, offset: 0 } => { @@ -300,7 +312,7 @@ async fn it_should_get_paginated( #[case::out_of_order(many_out_of_order())] #[case::in_order(many_hashed_in_order())] #[tokio::test] -async fn it_should_get_metrics(#[values(skip_list_mutex_std())] repo: Repo, #[case] entries: Entries) { +async fn it_should_get_metrics(#[values(skip_list_mutex_std())] repo: TorrentRepository, #[case] entries: Entries) { use torrust_tracker_primitives::swarm_metadata::AggregateSwarmMetadata; make(&repo, &entries); @@ -316,7 +328,7 @@ async fn it_should_get_metrics(#[values(skip_list_mutex_std())] repo: Repo, #[ca metrics.total_downloaded += u64::from(stats.downloaded); } - assert_eq!(repo.get_metrics(), metrics); + assert_eq!(repo.get_aggregate_swarm_metadata(), metrics); } #[rstest] @@ -330,18 +342,18 @@ async fn it_should_get_metrics(#[values(skip_list_mutex_std())] repo: Repo, #[ca #[case::in_order(many_hashed_in_order())] #[tokio::test] async fn it_should_import_persistent_torrents( - #[values(skip_list_mutex_std())] repo: Repo, + #[values(skip_list_mutex_std())] repo: TorrentRepository, #[case] entries: Entries, #[values(persistent_empty(), persistent_single(), persistent_three())] persistent_torrents: PersistentTorrents, ) { make(&repo, &entries); - let mut downloaded = repo.get_metrics().total_downloaded; + let mut downloaded = repo.get_aggregate_swarm_metadata().total_downloaded; persistent_torrents.iter().for_each(|(_, d)| downloaded += u64::from(*d)); repo.import_persistent(&persistent_torrents); - assert_eq!(repo.get_metrics().total_downloaded, downloaded); + assert_eq!(repo.get_aggregate_swarm_metadata().total_downloaded, downloaded); for (entry, _) in persistent_torrents { assert!(repo.get(&entry).is_some()); @@ -358,18 +370,21 @@ async fn it_should_import_persistent_torrents( #[case::out_of_order(many_out_of_order())] #[case::in_order(many_hashed_in_order())] #[tokio::test] -async fn it_should_remove_an_entry(#[values(skip_list_mutex_std())] repo: Repo, #[case] entries: Entries) { +async fn it_should_remove_an_entry(#[values(skip_list_mutex_std())] repo: TorrentRepository, #[case] entries: Entries) { make(&repo, &entries); for (info_hash, torrent) in entries { - assert_eq!(repo.get(&info_hash), Some(torrent.clone())); - assert_eq!(repo.remove(&info_hash), Some(torrent)); + assert_eq!( + Some(repo.get(&info_hash).unwrap().lock_or_panic().clone()), + Some(torrent.clone()) + ); + assert_eq!(Some(repo.remove(&info_hash).unwrap().lock_or_panic().clone()), Some(torrent)); - assert_eq!(repo.get(&info_hash), None); - assert_eq!(repo.remove(&info_hash), None); + assert!(repo.get(&info_hash).is_none()); + assert!(repo.remove(&info_hash).is_none()); } - assert_eq!(repo.get_metrics().total_torrents, 0); + assert_eq!(repo.get_aggregate_swarm_metadata().total_torrents, 0); } #[rstest] @@ -382,7 +397,7 @@ async fn it_should_remove_an_entry(#[values(skip_list_mutex_std())] repo: Repo, #[case::out_of_order(many_out_of_order())] #[case::in_order(many_hashed_in_order())] #[tokio::test] -async fn it_should_remove_inactive_peers(#[values(skip_list_mutex_std())] repo: Repo, #[case] entries: Entries) { +async fn it_should_remove_inactive_peers(#[values(skip_list_mutex_std())] repo: TorrentRepository, #[case] entries: Entries) { use std::ops::Sub as _; use std::time::Duration; @@ -420,7 +435,7 @@ async fn it_should_remove_inactive_peers(#[values(skip_list_mutex_std())] repo: // and verify there is an extra torrent entry. { repo.upsert_peer(&info_hash, &peer, None); - assert_eq!(repo.get_metrics().total_torrents, entries.len() as u64 + 1); + assert_eq!(repo.get_aggregate_swarm_metadata().total_torrents, entries.len() as u64 + 1); } // Insert the infohash and peer into the repository @@ -440,7 +455,8 @@ async fn it_should_remove_inactive_peers(#[values(skip_list_mutex_std())] repo: // Verify that this new peer was inserted into the repository. { - let entry = repo.get(&info_hash).expect("it_should_get_some"); + let lock_tracked_torrent = repo.get(&info_hash).expect("it_should_get_some"); + let entry = lock_tracked_torrent.lock_or_panic(); assert!(entry.get_peers(None).contains(&peer.into())); } @@ -451,7 +467,8 @@ async fn it_should_remove_inactive_peers(#[values(skip_list_mutex_std())] repo: // Verify that the this peer was removed from the repository. { - let entry = repo.get(&info_hash).expect("it_should_get_some"); + let lock_tracked_torrent = repo.get(&info_hash).expect("it_should_get_some"); + let entry = lock_tracked_torrent.lock_or_panic(); assert!(!entry.get_peers(None).contains(&peer.into())); } } @@ -467,7 +484,7 @@ async fn it_should_remove_inactive_peers(#[values(skip_list_mutex_std())] repo: #[case::in_order(many_hashed_in_order())] #[tokio::test] async fn it_should_remove_peerless_torrents( - #[values(skip_list_mutex_std())] repo: Repo, + #[values(skip_list_mutex_std())] repo: TorrentRepository, #[case] entries: Entries, #[values(policy_none(), policy_persist(), policy_remove(), policy_remove_persist())] policy: TrackerPolicy, ) { @@ -475,7 +492,11 @@ async fn it_should_remove_peerless_torrents( repo.remove_peerless_torrents(&policy); - let torrents = repo.get_paginated(None); + let torrents: Vec<(InfoHash, TrackedTorrent)> = repo + .get_paginated(None) + .iter() + .map(|(i, lock_tracked_torrent)| (*i, lock_tracked_torrent.lock_or_panic().clone())) + .collect(); for (_, entry) in torrents { assert!(entry.meets_retaining_policy(&policy)); From 62e57ae27cf24d250bfef6414a2f22ccef7d9b72 Mon Sep 17 00:00:00 2001 From: Jose Celano Date: Fri, 2 May 2025 13:19:54 +0100 Subject: [PATCH 555/802] chore(deps): udpate dependencies ``` cargo update Updating crates.io index Locking 18 packages to latest compatible versions Updating async-executor v1.13.1 -> v1.13.2 Updating axum v0.8.3 -> v0.8.4 Updating bytemuck v1.22.0 -> v1.23.0 Updating cc v1.2.20 -> v1.2.21 Updating chrono v0.4.40 -> v0.4.41 Updating hashbrown v0.15.2 -> v0.15.3 Updating miette v7.5.0 -> v7.6.0 Updating miette-derive v7.5.0 -> v7.6.0 Updating openssl-sys v0.9.107 -> v0.9.108 Updating rustix v1.0.5 -> v1.0.7 Updating sha2 v0.10.8 -> v0.10.9 Updating syn v2.0.100 -> v2.0.101 Updating synstructure v0.13.1 -> v0.13.2 Updating toml v0.8.20 -> v0.8.22 Updating toml_datetime v0.6.8 -> v0.6.9 Updating toml_edit v0.22.24 -> v0.22.26 Adding toml_write v0.1.1 Updating winnow v0.7.7 -> v0.7.8 ``` --- Cargo.lock | 177 ++++++++++++++++++++++++++++------------------------- 1 file changed, 92 insertions(+), 85 deletions(-) diff --git a/Cargo.lock b/Cargo.lock index 02e674e95..eea957f88 100644 --- a/Cargo.lock +++ b/Cargo.lock @@ -233,14 +233,15 @@ dependencies = [ [[package]] name = "async-executor" -version = "1.13.1" +version = "1.13.2" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "30ca9a001c1e8ba5149f91a74362376cc6bc5b919d92d988668657bd570bdcec" +checksum = "bb812ffb58524bdd10860d7d974e2f01cc0950c2438a74ee5ec2e2280c6c4ffa" dependencies = [ "async-task", "concurrent-queue", "fastrand", "futures-lite", + "pin-project-lite", "slab", ] @@ -331,7 +332,7 @@ checksum = "e539d3fca749fcee5236ab05e93a52867dd549cc157c8cb7f99595f3cedffdb5" dependencies = [ "proc-macro2", "quote", - "syn 2.0.100", + "syn 2.0.101", ] [[package]] @@ -357,9 +358,9 @@ checksum = "ace50bade8e6234aa140d9a2f552bbee1db4d353f69b8217bc503490fc1a9f26" [[package]] name = "axum" -version = "0.8.3" +version = "0.8.4" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "de45108900e1f9b9242f7f2e254aa3e2c029c921c258fe9e6b4217eeebd54288" +checksum = "021e862c184ae977658b36c4500f7feac3221ca5da43e3f25bd04ab6c79a29b5" dependencies = [ "axum-core", "axum-macros", @@ -454,7 +455,7 @@ checksum = "604fde5e028fea851ce1d8570bbdc034bec850d157f7569d10f347d06808c05c" dependencies = [ "proc-macro2", "quote", - "syn 2.0.100", + "syn 2.0.101", ] [[package]] @@ -549,7 +550,7 @@ dependencies = [ "regex", "rustc-hash", "shlex", - "syn 2.0.100", + "syn 2.0.101", ] [[package]] @@ -843,7 +844,7 @@ dependencies = [ "proc-macro-crate", "proc-macro2", "quote", - "syn 2.0.100", + "syn 2.0.101", ] [[package]] @@ -912,9 +913,9 @@ dependencies = [ [[package]] name = "bytemuck" -version = "1.22.0" +version = "1.23.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "b6b1fc10dbac614ebc03540c9dbd60e83887fda27794998c6528f1782047d540" +checksum = "9134a6ef01ce4b366b50689c94f82c14bc72bc5d0386829828a2e2752ef7958c" [[package]] name = "byteorder" @@ -954,9 +955,9 @@ dependencies = [ [[package]] name = "cc" -version = "1.2.20" +version = "1.2.21" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "04da6a0d40b948dfc4fa8f5bbf402b0fc1a64a28dbf7d12ffd683550f2c1b63a" +checksum = "8691782945451c1c383942c4874dbe63814f61cb57ef773cda2972682b7bb3c0" dependencies = [ "jobserver", "libc", @@ -986,9 +987,9 @@ checksum = "613afe47fcd5fac7ccf1db93babcb082c5994d996f20b8b159f2ad1658eb5724" [[package]] name = "chrono" -version = "0.4.40" +version = "0.4.41" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "1a7964611d71df112cb1730f2ee67324fcf4d0fc6606acbbe9bfe06df124637c" +checksum = "c469d952047f47f91b68d1cba3f10d63c11d73e4636f24f08daf0278abf01c4d" dependencies = [ "android-tzdata", "iana-time-zone", @@ -1076,7 +1077,7 @@ dependencies = [ "heck", "proc-macro2", "quote", - "syn 2.0.100", + "syn 2.0.101", ] [[package]] @@ -1307,7 +1308,7 @@ dependencies = [ "proc-macro2", "quote", "strsim", - "syn 2.0.100", + "syn 2.0.101", ] [[package]] @@ -1318,7 +1319,7 @@ checksum = "fc34b93ccb385b40dc71c6fceac4b2ad23662c7eeb248cf10d529b7e055b6ead" dependencies = [ "darling_core", "quote", - "syn 2.0.100", + "syn 2.0.101", ] [[package]] @@ -1362,7 +1363,7 @@ checksum = "bda628edc44c4bb645fbe0f758797143e4e07926f7ebf4e9bdfbd3d2ce621df3" dependencies = [ "proc-macro2", "quote", - "syn 2.0.100", + "syn 2.0.101", "unicode-xid", ] @@ -1374,7 +1375,7 @@ checksum = "ccfae181bab5ab6c5478b2ccb69e4c68a02f8c3ec72f6616bfec9dbc599d2ee0" dependencies = [ "proc-macro2", "quote", - "syn 2.0.100", + "syn 2.0.101", ] [[package]] @@ -1401,7 +1402,7 @@ checksum = "97369cbbc041bc366949bc74d34658d6cda5621039731c6310521892a3a20ae0" dependencies = [ "proc-macro2", "quote", - "syn 2.0.100", + "syn 2.0.101", ] [[package]] @@ -1648,7 +1649,7 @@ checksum = "e99b8b3c28ae0e84b604c75f721c21dc77afb3706076af5e8216d15fd1deaae3" dependencies = [ "frunk_proc_macro_helpers", "quote", - "syn 2.0.100", + "syn 2.0.101", ] [[package]] @@ -1660,7 +1661,7 @@ dependencies = [ "frunk_core", "proc-macro2", "quote", - "syn 2.0.100", + "syn 2.0.101", ] [[package]] @@ -1672,7 +1673,7 @@ dependencies = [ "frunk_core", "frunk_proc_macro_helpers", "quote", - "syn 2.0.100", + "syn 2.0.101", ] [[package]] @@ -1760,7 +1761,7 @@ checksum = "162ee34ebcb7c64a8abebc059ce0fee27c2262618d7b60ed8faf72fef13c3650" dependencies = [ "proc-macro2", "quote", - "syn 2.0.100", + "syn 2.0.101", ] [[package]] @@ -1902,9 +1903,9 @@ checksum = "e5274423e17b7c9fc20b6e7e208532f9b19825d82dfd615708b70edd83df41f1" [[package]] name = "hashbrown" -version = "0.15.2" +version = "0.15.3" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "bf151400ff0baff5465007dd2f3e717f3fe502074ca563069ce3a6629d07b289" +checksum = "84b26c544d002229e640969970a2e74021aadf6e2f96372b9c58eff97de08eb3" dependencies = [ "allocator-api2", "equivalent", @@ -1917,7 +1918,7 @@ version = "0.10.0" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "7382cf6263419f2d8df38c55d7da83da5c18aef87fc7a7fc1fb1e344edfe14c1" dependencies = [ - "hashbrown 0.15.2", + "hashbrown 0.15.3", ] [[package]] @@ -2248,7 +2249,7 @@ checksum = "1ec89e9337638ecdc08744df490b221a7399bf8d164eb52a665454e60e075ad6" dependencies = [ "proc-macro2", "quote", - "syn 2.0.100", + "syn 2.0.101", ] [[package]] @@ -2296,7 +2297,7 @@ source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "cea70ddb795996207ad57735b50c5982d8844f38ba9ee5f1aedcfb708a2aa11e" dependencies = [ "equivalent", - "hashbrown 0.15.2", + "hashbrown 0.15.3", "serde", ] @@ -2522,7 +2523,7 @@ version = "0.12.5" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "234cf4f4a04dc1f57e24b96cc0cd600cf2af460d4161ac5ecdd0af8e1f3b2a38" dependencies = [ - "hashbrown 0.15.2", + "hashbrown 0.15.3", ] [[package]] @@ -2539,9 +2540,9 @@ checksum = "78ca9ab1a0babb1e7d5695e3530886289c18cf2f87ec19a575a0abdce112e3a3" [[package]] name = "miette" -version = "7.5.0" +version = "7.6.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "1a955165f87b37fd1862df2a59547ac542c77ef6d17c666f619d1ad22dd89484" +checksum = "5f98efec8807c63c752b5bd61f862c165c115b0a35685bdcfd9238c7aeb592b7" dependencies = [ "backtrace", "backtrace-ext", @@ -2553,19 +2554,18 @@ dependencies = [ "supports-unicode", "terminal_size", "textwrap", - "thiserror 1.0.69", "unicode-width 0.1.14", ] [[package]] name = "miette-derive" -version = "7.5.0" +version = "7.6.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "bf45bf44ab49be92fd1227a3be6fc6f617f1a337c06af54981048574d8783147" +checksum = "db5b29714e950dbb20d5e6f74f9dcec4edbcc1067bb7f8ed198c097b8c1a818b" dependencies = [ "proc-macro2", "quote", - "syn 2.0.100", + "syn 2.0.101", ] [[package]] @@ -2623,7 +2623,7 @@ dependencies = [ "cfg-if", "proc-macro2", "quote", - "syn 2.0.100", + "syn 2.0.101", ] [[package]] @@ -2673,7 +2673,7 @@ dependencies = [ "proc-macro-error2", "proc-macro2", "quote", - "syn 2.0.100", + "syn 2.0.101", "termcolor", "thiserror 1.0.69", ] @@ -2872,7 +2872,7 @@ checksum = "a948666b637a0f465e8564c73e89d4dde00d72d4d473cc972f390fc3dcee7d9c" dependencies = [ "proc-macro2", "quote", - "syn 2.0.100", + "syn 2.0.101", ] [[package]] @@ -2883,9 +2883,9 @@ checksum = "d05e27ee213611ffe7d6348b942e8f942b37114c00cc03cec254295a4a17852e" [[package]] name = "openssl-sys" -version = "0.9.107" +version = "0.9.108" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "8288979acd84749c744a9014b4382d42b8f7b2592847b5afb2ed29e5d16ede07" +checksum = "e145e1651e858e820e4860f7b9c5e169bc1d8ce1c86043be79fa7b7634821847" dependencies = [ "cc", "libc", @@ -2956,7 +2956,7 @@ dependencies = [ "regex", "regex-syntax", "structmeta", - "syn 2.0.100", + "syn 2.0.101", ] [[package]] @@ -2979,7 +2979,7 @@ dependencies = [ "proc-macro2", "proc-macro2-diagnostics", "quote", - "syn 2.0.100", + "syn 2.0.101", ] [[package]] @@ -3202,7 +3202,7 @@ dependencies = [ "proc-macro-error-attr2", "proc-macro2", "quote", - "syn 2.0.100", + "syn 2.0.101", ] [[package]] @@ -3222,7 +3222,7 @@ checksum = "af066a9c399a26e020ada66a034357a868728e72cd426f3adcd35f80d88d88c8" dependencies = [ "proc-macro2", "quote", - "syn 2.0.100", + "syn 2.0.101", "version_check", "yansi", ] @@ -3576,7 +3576,7 @@ dependencies = [ "regex", "relative-path", "rustc_version", - "syn 2.0.100", + "syn 2.0.101", "unicode-ident", ] @@ -3646,9 +3646,9 @@ dependencies = [ [[package]] name = "rustix" -version = "1.0.5" +version = "1.0.7" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "d97817398dd4bb2e6da002002db259209759911da105da92bec29ccb12cf58bf" +checksum = "c71e83d6afe7ff64890ec6b71d6a69bb8a610ab78ce364b3352876bb4c801266" dependencies = [ "bitflags 2.9.0", "errno", @@ -3844,7 +3844,7 @@ checksum = "5b0276cf7f2c73365f7157c8123c21cd9a50fbbd844757af28ca1f5925fc2a00" dependencies = [ "proc-macro2", "quote", - "syn 2.0.100", + "syn 2.0.101", ] [[package]] @@ -3891,7 +3891,7 @@ checksum = "175ee3e80ae9982737ca543e96133087cbd9a485eecc3bc4de9c1a37b47ea59c" dependencies = [ "proc-macro2", "quote", - "syn 2.0.100", + "syn 2.0.101", ] [[package]] @@ -3942,7 +3942,7 @@ dependencies = [ "darling", "proc-macro2", "quote", - "syn 2.0.100", + "syn 2.0.101", ] [[package]] @@ -3958,9 +3958,9 @@ dependencies = [ [[package]] name = "sha2" -version = "0.10.8" +version = "0.10.9" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "793db75ad2bcafc3ffa7c68b215fee268f537982cd901d132f89c6343f3a3dc8" +checksum = "a7507d819769d01a365ab707794a4084392c824f54a7a6a7862f8c3d0892b283" dependencies = [ "cfg-if", "cpufeatures", @@ -4055,7 +4055,7 @@ dependencies = [ "proc-macro2", "quote", "structmeta-derive", - "syn 2.0.100", + "syn 2.0.101", ] [[package]] @@ -4066,7 +4066,7 @@ checksum = "152a0b65a590ff6c3da95cabe2353ee04e6167c896b28e3b14478c2636c922fc" dependencies = [ "proc-macro2", "quote", - "syn 2.0.100", + "syn 2.0.101", ] [[package]] @@ -4119,9 +4119,9 @@ dependencies = [ [[package]] name = "syn" -version = "2.0.100" +version = "2.0.101" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "b09a44accad81e1ba1cd74a32461ba89dee89095ba17b32f5d03683b1b1fc2a0" +checksum = "8ce2b7fc941b3a24138a0a7cf8e858bfc6a992e7978a068a5c760deb0ed43caf" dependencies = [ "proc-macro2", "quote", @@ -4139,13 +4139,13 @@ dependencies = [ [[package]] name = "synstructure" -version = "0.13.1" +version = "0.13.2" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "c8af7666ab7b6390ab78131fb5b0fce11d6b7a6951602017c35fa82800708971" +checksum = "728a70f3dbaf5bab7f0c4b1ac8d7ae5ea60a4b5549c8a5914361c99147a709d2" dependencies = [ "proc-macro2", "quote", - "syn 2.0.100", + "syn 2.0.101", ] [[package]] @@ -4201,7 +4201,7 @@ dependencies = [ "fastrand", "getrandom 0.3.2", "once_cell", - "rustix 1.0.5", + "rustix 1.0.7", "windows-sys 0.59.0", ] @@ -4220,7 +4220,7 @@ version = "0.4.2" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "45c6481c4829e4cc63825e62c49186a34538b7b2750b73b266581ffb612fb5ed" dependencies = [ - "rustix 1.0.5", + "rustix 1.0.7", "windows-sys 0.59.0", ] @@ -4295,7 +4295,7 @@ checksum = "4fee6c4efc90059e10f81e6d42c60a18f76588c3d74cb83a0b242a2b6c7504c1" dependencies = [ "proc-macro2", "quote", - "syn 2.0.100", + "syn 2.0.101", ] [[package]] @@ -4306,7 +4306,7 @@ checksum = "7f7cf42b4507d8ea322120659672cf1b9dbb93f8f2d4ecfd6e51350ff5b17a1d" dependencies = [ "proc-macro2", "quote", - "syn 2.0.100", + "syn 2.0.101", ] [[package]] @@ -4410,7 +4410,7 @@ checksum = "6e06d43f1345a3bcd39f6a56dbb7dcab2ba47e68e8ac134855e7e2bdbaf8cab8" dependencies = [ "proc-macro2", "quote", - "syn 2.0.100", + "syn 2.0.101", ] [[package]] @@ -4474,9 +4474,9 @@ dependencies = [ [[package]] name = "toml" -version = "0.8.20" +version = "0.8.22" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "cd87a5cdd6ffab733b2f74bc4fd7ee5fff6634124999ac278c35fc78c6120148" +checksum = "05ae329d1f08c4d17a59bed7ff5b5a769d062e64a62d34a3261b219e62cd5aae" dependencies = [ "serde", "serde_spanned", @@ -4486,26 +4486,33 @@ dependencies = [ [[package]] name = "toml_datetime" -version = "0.6.8" +version = "0.6.9" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "0dd7358ecb8fc2f8d014bf86f6f638ce72ba252a2c3a2572f2a795f1d23efb41" +checksum = "3da5db5a963e24bc68be8b17b6fa82814bb22ee8660f192bb182771d498f09a3" dependencies = [ "serde", ] [[package]] name = "toml_edit" -version = "0.22.24" +version = "0.22.26" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "17b4795ff5edd201c7cd6dca065ae59972ce77d1b80fa0a84d94950ece7d1474" +checksum = "310068873db2c5b3e7659d2cc35d21855dbafa50d1ce336397c666e3cb08137e" dependencies = [ "indexmap 2.9.0", "serde", "serde_spanned", "toml_datetime", + "toml_write", "winnow", ] +[[package]] +name = "toml_write" +version = "0.1.1" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "bfb942dfe1d8e29a7ee7fcbde5bd2b9a25fb89aa70caea2eba3bee836ff41076" + [[package]] name = "torrust-axum-health-check-api-server" version = "3.0.0-develop" @@ -4841,7 +4848,7 @@ dependencies = [ "bittorrent-primitives", "criterion", "crossbeam-skiplist", - "rand 0.8.5", + "rand 0.9.1", "rstest", "tokio", "torrust-tracker-clock", @@ -4973,7 +4980,7 @@ checksum = "395ae124c09f9e6918a2310af6038fba074bcf474ac352496d5910dd59a2226d" dependencies = [ "proc-macro2", "quote", - "syn 2.0.100", + "syn 2.0.101", ] [[package]] @@ -5212,7 +5219,7 @@ dependencies = [ "log", "proc-macro2", "quote", - "syn 2.0.100", + "syn 2.0.101", "wasm-bindgen-shared", ] @@ -5247,7 +5254,7 @@ checksum = "8ae87ea40c9f689fc23f209965b6fb8a99ad69aeeb0231408be24920604395de" dependencies = [ "proc-macro2", "quote", - "syn 2.0.100", + "syn 2.0.101", "wasm-bindgen-backend", "wasm-bindgen-shared", ] @@ -5323,7 +5330,7 @@ checksum = "a47fddd13af08290e67f4acabf4b459f647552718f683a7b415d290ac744a836" dependencies = [ "proc-macro2", "quote", - "syn 2.0.100", + "syn 2.0.101", ] [[package]] @@ -5334,7 +5341,7 @@ checksum = "bd9211b69f8dcdfa817bfd14bf1c97c9188afa36f4750130fcdf3f400eca9fa8" dependencies = [ "proc-macro2", "quote", - "syn 2.0.100", + "syn 2.0.101", ] [[package]] @@ -5595,9 +5602,9 @@ checksum = "271414315aff87387382ec3d271b52d7ae78726f5d44ac98b4f4030c91880486" [[package]] name = "winnow" -version = "0.7.7" +version = "0.7.8" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "6cb8234a863ea0e8cd7284fcdd4f145233eb00fee02bbdd9861aec44e6477bc5" +checksum = "9e27d6ad3dac991091e4d35de9ba2d2d00647c5d0fc26c5496dee55984ae111b" dependencies = [ "memchr", ] @@ -5639,7 +5646,7 @@ source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "0d65cbf2f12c15564212d48f4e3dfb87923d25d611f2aed18f4cb23f0413d89e" dependencies = [ "libc", - "rustix 1.0.5", + "rustix 1.0.7", ] [[package]] @@ -5668,7 +5675,7 @@ checksum = "2380878cad4ac9aac1e2435f3eb4020e8374b5f13c296cb75b4620ff8e229154" dependencies = [ "proc-macro2", "quote", - "syn 2.0.100", + "syn 2.0.101", "synstructure", ] @@ -5699,7 +5706,7 @@ checksum = "fa4f8080344d4671fb4e831a13ad1e68092748387dfc4f55e356242fae12ce3e" dependencies = [ "proc-macro2", "quote", - "syn 2.0.100", + "syn 2.0.101", ] [[package]] @@ -5710,7 +5717,7 @@ checksum = "28a6e20d751156648aa063f3800b706ee209a32c0b4d9f24be3d980b01be55ef" dependencies = [ "proc-macro2", "quote", - "syn 2.0.100", + "syn 2.0.101", ] [[package]] @@ -5730,7 +5737,7 @@ checksum = "d71e5d6e06ab090c67b5e44993ec16b72dcbaabc526db883a360057678b48502" dependencies = [ "proc-macro2", "quote", - "syn 2.0.100", + "syn 2.0.101", "synstructure", ] @@ -5759,7 +5766,7 @@ checksum = "6eafa6dfb17584ea3e2bd6e76e0cc15ad7af12b09abdd1ca55961bed9b1063c6" dependencies = [ "proc-macro2", "quote", - "syn 2.0.100", + "syn 2.0.101", ] [[package]] From cb51ec9b355f38fe1c604900a10c4728deea5005 Mon Sep 17 00:00:00 2001 From: Jose Celano Date: Mon, 5 May 2025 17:12:35 +0100 Subject: [PATCH 556/802] docs: [#1495] improve torrent-repository pkg readme --- packages/torrent-repository/README.md | 30 +++++++++------------------ 1 file changed, 10 insertions(+), 20 deletions(-) diff --git a/packages/torrent-repository/README.md b/packages/torrent-repository/README.md index ffc71f1d7..a8c55746b 100644 --- a/packages/torrent-repository/README.md +++ b/packages/torrent-repository/README.md @@ -2,26 +2,16 @@ A library to provide a torrent repository to the [Torrust Tracker](https://github.com/torrust/torrust-tracker). -## Benchmarking - -```console -cargo bench -p torrust-tracker-torrent-repository -``` - -Example partial output: - -```output - Running benches/repository_benchmark.rs (target/release/deps/repository_benchmark-a9b0013c8d09c3c3) -add_one_torrent/RwLockStd - time: [63.057 ns 63.242 ns 63.506 ns] -Found 12 outliers among 100 measurements (12.00%) - 2 (2.00%) low severe - 2 (2.00%) low mild - 2 (2.00%) high mild - 6 (6.00%) high severe -add_one_torrent/RwLockStdMutexStd - time: [62.505 ns 63.077 ns 63.817 ns] -``` +Its main responsibilities include: + +- Managing Torrent Entries: It stores, retrieves, and manages torrent entries, which are torrents being tracked. +- Persistence: It supports lading tracked torrents from a persistent storage, ensuring that torrent data can be restored across restarts. +- Pagination and sorting: It provides paginated and stable/sorted access to torrent entries. +- Peer management: It manages peers associated with torrents, including removing inactive peers and handling torrents with no peers (peerless torrents). +- Policy handling: It supports different policies for handling torrents, such as persisting, removing, or custom policies for torrents with no peers. +- Metrics: It can provide metrics about the torrents, such as counts or statuses, likely for monitoring or statistics. + +This repo is a core component for managing the state and lifecycle of torrents and their peers in a BitTorrent tracker, with peer management, and flexible policies. ## Documentation From 15c14c50268c8b4567ab7d26503a11432c17bc9d Mon Sep 17 00:00:00 2001 From: Jose Celano Date: Mon, 5 May 2025 17:29:21 +0100 Subject: [PATCH 557/802] refactor: [#1495] rename PeerList to Swarm --- packages/torrent-repository/src/entry/mod.rs | 2 +- .../src/entry/{peer_list.rs => swarm.rs} | 112 +++++++++--------- .../torrent-repository/src/entry/torrent.rs | 4 +- packages/torrent-repository/src/repository.rs | 6 +- 4 files changed, 63 insertions(+), 61 deletions(-) rename packages/torrent-repository/src/entry/{peer_list.rs => swarm.rs} (68%) diff --git a/packages/torrent-repository/src/entry/mod.rs b/packages/torrent-repository/src/entry/mod.rs index 785672be5..94fdcc58e 100644 --- a/packages/torrent-repository/src/entry/mod.rs +++ b/packages/torrent-repository/src/entry/mod.rs @@ -1,2 +1,2 @@ -pub mod peer_list; +pub mod swarm; pub mod torrent; diff --git a/packages/torrent-repository/src/entry/peer_list.rs b/packages/torrent-repository/src/entry/swarm.rs similarity index 68% rename from packages/torrent-repository/src/entry/peer_list.rs rename to packages/torrent-repository/src/entry/swarm.rs index 33270cf27..0395361a3 100644 --- a/packages/torrent-repository/src/entry/peer_list.rs +++ b/packages/torrent-repository/src/entry/swarm.rs @@ -1,9 +1,11 @@ //! A peer list. +use std::collections::BTreeMap; use std::net::SocketAddr; use std::sync::Arc; use aquatic_udp_protocol::PeerId; -use torrust_tracker_primitives::{peer, DurationSinceUnixEpoch}; +use torrust_tracker_primitives::peer::{self, Peer}; +use torrust_tracker_primitives::DurationSinceUnixEpoch; // code-review: the current implementation uses the peer Id as the ``BTreeMap`` // key. That would allow adding two identical peers except for the Id. @@ -11,11 +13,11 @@ use torrust_tracker_primitives::{peer, DurationSinceUnixEpoch}; // would be allowed. That would lead to duplicated peers in the tracker responses. #[derive(Clone, Debug, Default, PartialEq, Eq, PartialOrd, Ord, Hash)] -pub struct PeerList { - peers: std::collections::BTreeMap>, +pub struct Swarm { + peers: BTreeMap>, } -impl PeerList { +impl Swarm { #[must_use] pub fn len(&self) -> usize { self.peers.len() @@ -94,193 +96,193 @@ mod tests { use torrust_tracker_primitives::peer::fixture::PeerBuilder; use torrust_tracker_primitives::DurationSinceUnixEpoch; - use crate::entry::peer_list::PeerList; + use crate::entry::swarm::Swarm; #[test] fn be_empty_when_no_peers_have_been_inserted() { - let peer_list = PeerList::default(); + let swarm = Swarm::default(); - assert!(peer_list.is_empty()); + assert!(swarm.is_empty()); } #[test] fn have_zero_length_when_no_peers_have_been_inserted() { - let peer_list = PeerList::default(); + let swarm = Swarm::default(); - assert_eq!(peer_list.len(), 0); + assert_eq!(swarm.len(), 0); } #[test] fn allow_inserting_a_new_peer() { - let mut peer_list = PeerList::default(); + let mut swarm = Swarm::default(); let peer = PeerBuilder::default().build(); - assert_eq!(peer_list.upsert(peer.into()), None); + assert_eq!(swarm.upsert(peer.into()), None); } #[test] fn allow_updating_a_preexisting_peer() { - let mut peer_list = PeerList::default(); + let mut swarm = Swarm::default(); let peer = PeerBuilder::default().build(); - peer_list.upsert(peer.into()); + swarm.upsert(peer.into()); - assert_eq!(peer_list.upsert(peer.into()), Some(Arc::new(peer))); + assert_eq!(swarm.upsert(peer.into()), Some(Arc::new(peer))); } #[test] fn allow_getting_all_peers() { - let mut peer_list = PeerList::default(); + let mut swarm = Swarm::default(); let peer = PeerBuilder::default().build(); - peer_list.upsert(peer.into()); + swarm.upsert(peer.into()); - assert_eq!(peer_list.get_all(None), [Arc::new(peer)]); + assert_eq!(swarm.get_all(None), [Arc::new(peer)]); } #[test] fn allow_getting_one_peer_by_id() { - let mut peer_list = PeerList::default(); + let mut swarm = Swarm::default(); let peer = PeerBuilder::default().build(); - peer_list.upsert(peer.into()); + swarm.upsert(peer.into()); - assert_eq!(peer_list.get(&peer.peer_id), Some(Arc::new(peer)).as_ref()); + assert_eq!(swarm.get(&peer.peer_id), Some(Arc::new(peer)).as_ref()); } #[test] fn increase_the_number_of_peers_after_inserting_a_new_one() { - let mut peer_list = PeerList::default(); + let mut swarm = Swarm::default(); let peer = PeerBuilder::default().build(); - peer_list.upsert(peer.into()); + swarm.upsert(peer.into()); - assert_eq!(peer_list.len(), 1); + assert_eq!(swarm.len(), 1); } #[test] fn decrease_the_number_of_peers_after_removing_one() { - let mut peer_list = PeerList::default(); + let mut swarm = Swarm::default(); let peer = PeerBuilder::default().build(); - peer_list.upsert(peer.into()); + swarm.upsert(peer.into()); - peer_list.remove(&peer.peer_id); + swarm.remove(&peer.peer_id); - assert!(peer_list.is_empty()); + assert!(swarm.is_empty()); } #[test] fn allow_removing_an_existing_peer() { - let mut peer_list = PeerList::default(); + let mut swarm = Swarm::default(); let peer = PeerBuilder::default().build(); - peer_list.upsert(peer.into()); + swarm.upsert(peer.into()); - peer_list.remove(&peer.peer_id); + swarm.remove(&peer.peer_id); - assert_eq!(peer_list.get(&peer.peer_id), None); + assert_eq!(swarm.get(&peer.peer_id), None); } #[test] fn allow_getting_all_peers_excluding_peers_with_a_given_address() { - let mut peer_list = PeerList::default(); + let mut swarm = Swarm::default(); let peer1 = PeerBuilder::default() .with_peer_id(&PeerId(*b"-qB00000000000000001")) .with_peer_addr(&SocketAddr::new(IpAddr::V4(Ipv4Addr::new(127, 0, 0, 1)), 6969)) .build(); - peer_list.upsert(peer1.into()); + swarm.upsert(peer1.into()); let peer2 = PeerBuilder::default() .with_peer_id(&PeerId(*b"-qB00000000000000002")) .with_peer_addr(&SocketAddr::new(IpAddr::V4(Ipv4Addr::new(127, 0, 0, 2)), 6969)) .build(); - peer_list.upsert(peer2.into()); + swarm.upsert(peer2.into()); - assert_eq!(peer_list.get_peers_excluding_addr(&peer2.peer_addr, None), [Arc::new(peer1)]); + assert_eq!(swarm.get_peers_excluding_addr(&peer2.peer_addr, None), [Arc::new(peer1)]); } #[test] fn return_the_number_of_seeders_in_the_list() { - let mut peer_list = PeerList::default(); + let mut swarm = Swarm::default(); let seeder = PeerBuilder::seeder().build(); let leecher = PeerBuilder::leecher().build(); - peer_list.upsert(seeder.into()); - peer_list.upsert(leecher.into()); + swarm.upsert(seeder.into()); + swarm.upsert(leecher.into()); - let (seeders, _leechers) = peer_list.seeders_and_leechers(); + let (seeders, _leechers) = swarm.seeders_and_leechers(); assert_eq!(seeders, 1); } #[test] fn return_the_number_of_leechers_in_the_list() { - let mut peer_list = PeerList::default(); + let mut swarm = Swarm::default(); let seeder = PeerBuilder::seeder().build(); let leecher = PeerBuilder::leecher().build(); - peer_list.upsert(seeder.into()); - peer_list.upsert(leecher.into()); + swarm.upsert(seeder.into()); + swarm.upsert(leecher.into()); - let (_seeders, leechers) = peer_list.seeders_and_leechers(); + let (_seeders, leechers) = swarm.seeders_and_leechers(); assert_eq!(leechers, 1); } #[test] fn remove_inactive_peers() { - let mut peer_list = PeerList::default(); + let mut swarm = Swarm::default(); let one_second = DurationSinceUnixEpoch::new(1, 0); // Insert the peer let last_update_time = DurationSinceUnixEpoch::new(1_669_397_478_934, 0); let peer = PeerBuilder::default().last_updated_on(last_update_time).build(); - peer_list.upsert(peer.into()); + swarm.upsert(peer.into()); // Remove peers not updated since one second after inserting the peer - peer_list.remove_inactive_peers(last_update_time + one_second); + swarm.remove_inactive_peers(last_update_time + one_second); - assert_eq!(peer_list.len(), 0); + assert_eq!(swarm.len(), 0); } #[test] fn not_remove_active_peers() { - let mut peer_list = PeerList::default(); + let mut swarm = Swarm::default(); let one_second = DurationSinceUnixEpoch::new(1, 0); // Insert the peer let last_update_time = DurationSinceUnixEpoch::new(1_669_397_478_934, 0); let peer = PeerBuilder::default().last_updated_on(last_update_time).build(); - peer_list.upsert(peer.into()); + swarm.upsert(peer.into()); // Remove peers not updated since one second before inserting the peer. - peer_list.remove_inactive_peers(last_update_time - one_second); + swarm.remove_inactive_peers(last_update_time - one_second); - assert_eq!(peer_list.len(), 1); + assert_eq!(swarm.len(), 1); } #[test] fn allow_inserting_two_identical_peers_except_for_the_id() { - let mut peer_list = PeerList::default(); + let mut swarm = Swarm::default(); let peer1 = PeerBuilder::default().with_peer_id(&PeerId(*b"-qB00000000000000001")).build(); - peer_list.upsert(peer1.into()); + swarm.upsert(peer1.into()); let peer2 = PeerBuilder::default().with_peer_id(&PeerId(*b"-qB00000000000000002")).build(); - peer_list.upsert(peer2.into()); + swarm.upsert(peer2.into()); - assert_eq!(peer_list.len(), 2); + assert_eq!(swarm.len(), 2); } } } diff --git a/packages/torrent-repository/src/entry/torrent.rs b/packages/torrent-repository/src/entry/torrent.rs index 1cc0f7ba2..48f1a2df1 100644 --- a/packages/torrent-repository/src/entry/torrent.rs +++ b/packages/torrent-repository/src/entry/torrent.rs @@ -8,7 +8,7 @@ use torrust_tracker_primitives::peer::{self}; use torrust_tracker_primitives::swarm_metadata::SwarmMetadata; use torrust_tracker_primitives::DurationSinceUnixEpoch; -use super::peer_list::PeerList; +use super::swarm::Swarm; /// A data structure containing all the information about a torrent in the tracker. /// @@ -18,7 +18,7 @@ use super::peer_list::PeerList; #[derive(Clone, Debug, Default, PartialEq, Eq, PartialOrd, Ord, Hash)] pub struct TrackedTorrent { /// A network of peers that are all trying to download the torrent associated to this entry - pub(crate) swarm: PeerList, + pub(crate) swarm: Swarm, /// The number of peers that have ever completed downloading the torrent associated to this entry pub(crate) downloaded: u32, diff --git a/packages/torrent-repository/src/repository.rs b/packages/torrent-repository/src/repository.rs index 8e67f2487..0c387071c 100644 --- a/packages/torrent-repository/src/repository.rs +++ b/packages/torrent-repository/src/repository.rs @@ -7,7 +7,7 @@ use torrust_tracker_primitives::pagination::Pagination; use torrust_tracker_primitives::swarm_metadata::{AggregateSwarmMetadata, SwarmMetadata}; use torrust_tracker_primitives::{peer, DurationSinceUnixEpoch, PersistentTorrent, PersistentTorrents}; -use crate::entry::peer_list::PeerList; +use crate::entry::swarm::Swarm; use crate::entry::torrent::TrackedTorrent; use crate::{LockTrackedTorrent, TrackedTorrentHandle}; @@ -53,7 +53,7 @@ impl TorrentRepository { let new_entry = if let Some(number_of_downloads) = opt_persistent_torrent { TrackedTorrentHandle::new( TrackedTorrent { - swarm: PeerList::default(), + swarm: Swarm::default(), downloaded: number_of_downloads, } .into(), @@ -237,7 +237,7 @@ impl TorrentRepository { let entry = TrackedTorrentHandle::new( TrackedTorrent { - swarm: PeerList::default(), + swarm: Swarm::default(), downloaded: *completed, } .into(), From 2882705fbab880ae57cebad4944e6d2452eb63fd Mon Sep 17 00:00:00 2001 From: Jose Celano Date: Mon, 5 May 2025 20:57:27 +0100 Subject: [PATCH 558/802] refactor: [#1495] use SocketAddr as key for peers in Swarm This change prevents duplicate peers with the same address but different IDs, ensuring more accurate peer tracking. --- .../tests/server/asserts.rs | 1 + .../tests/server/requests/announce.rs | 5 ++ .../tests/server/v1/contract.rs | 46 +++++++++++--- .../torrent-repository/src/entry/swarm.rs | 62 ++++++++++++------- .../torrent-repository/src/entry/torrent.rs | 2 +- .../tests/common/torrent_peer_builder.rs | 18 +++++- .../torrent-repository/tests/entry/mod.rs | 3 +- packages/tracker-core/src/lib.rs | 8 +-- packages/tracker-core/src/test_helpers.rs | 6 +- 9 files changed, 109 insertions(+), 42 deletions(-) diff --git a/packages/axum-http-tracker-server/tests/server/asserts.rs b/packages/axum-http-tracker-server/tests/server/asserts.rs index 7ab8d93e5..a82014e16 100644 --- a/packages/axum-http-tracker-server/tests/server/asserts.rs +++ b/packages/axum-http-tracker-server/tests/server/asserts.rs @@ -22,6 +22,7 @@ pub fn assert_bencoded_error(response_text: &String, expected_failure_reason: &s ); } +#[allow(dead_code)] pub async fn assert_empty_announce_response(response: Response) { assert_eq!(response.status(), 200); let announce_response: Announce = serde_bencode::from_str(&response.text().await.unwrap()).unwrap(); diff --git a/packages/axum-http-tracker-server/tests/server/requests/announce.rs b/packages/axum-http-tracker-server/tests/server/requests/announce.rs index 0775de7e4..5a670b618 100644 --- a/packages/axum-http-tracker-server/tests/server/requests/announce.rs +++ b/packages/axum-http-tracker-server/tests/server/requests/announce.rs @@ -126,6 +126,11 @@ impl QueryBuilder { self } + pub fn with_port(mut self, port: u16) -> Self { + self.announce_query.port = port; + self + } + pub fn without_compact(mut self) -> Self { self.announce_query.compact = None; self diff --git a/packages/axum-http-tracker-server/tests/server/v1/contract.rs b/packages/axum-http-tracker-server/tests/server/v1/contract.rs index 37d96052f..d1f52d55a 100644 --- a/packages/axum-http-tracker-server/tests/server/v1/contract.rs +++ b/packages/axum-http-tracker-server/tests/server/v1/contract.rs @@ -105,8 +105,8 @@ mod for_all_config_modes { use crate::common::fixtures::invalid_info_hashes; use crate::server::asserts::{ assert_announce_response, assert_bad_announce_request_error_response, assert_cannot_parse_query_param_error_response, - assert_cannot_parse_query_params_error_response, assert_compact_announce_response, assert_empty_announce_response, - assert_is_announce_response, assert_missing_query_params_for_announce_request_error_response, + assert_cannot_parse_query_params_error_response, assert_compact_announce_response, assert_is_announce_response, + assert_missing_query_params_for_announce_request_error_response, }; use crate::server::client::Client; use crate::server::requests::announce::{Compact, QueryBuilder}; @@ -559,7 +559,8 @@ mod for_all_config_modes { } #[tokio::test] - async fn should_consider_two_peers_to_be_the_same_when_they_have_the_same_peer_id_even_if_the_ip_is_different() { + async fn should_consider_two_peers_to_be_the_same_when_they_have_the_same_socket_address_even_if_the_peer_id_is_different( + ) { logging::setup(); let env = Started::new(&configuration::ephemeral_public().into()).await; @@ -567,19 +568,44 @@ mod for_all_config_modes { let info_hash = InfoHash::from_str("9c38422213e30bff212b30c360d26f9a02136422").unwrap(); // DevSkim: ignore DS173237 let peer = PeerBuilder::default().build(); - // Add a peer - env.add_torrent_peer(&info_hash, &peer); - - let announce_query = QueryBuilder::default() + let announce_query_1 = QueryBuilder::default() .with_info_hash(&info_hash) .with_peer_id(&peer.peer_id) + .with_peer_addr(&peer.peer_addr.ip()) + .with_port(peer.peer_addr.port()) + .query(); + + let announce_query_2 = QueryBuilder::default() + .with_info_hash(&info_hash) + .with_peer_id(&PeerId(*b"-qB00000000000000002")) // Different peer ID + .with_peer_addr(&peer.peer_addr.ip()) + .with_port(peer.peer_addr.port()) .query(); - assert_ne!(peer.peer_addr.ip(), announce_query.peer_addr); + // Same peer socket address + assert_eq!(announce_query_1.peer_addr, announce_query_2.peer_addr); + assert_eq!(announce_query_1.port, announce_query_2.port); + + // Different peer ID + assert_ne!(announce_query_1.peer_id, announce_query_2.peer_id); - let response = Client::new(*env.bind_address()).announce(&announce_query).await; + let _response = Client::new(*env.bind_address()).announce(&announce_query_1).await; + let response = Client::new(*env.bind_address()).announce(&announce_query_2).await; - assert_empty_announce_response(response).await; + let announce_policy = env.container.tracker_core_container.core_config.announce_policy; + + // The response should contain only the first peer. + assert_announce_response( + response, + &Announce { + complete: 1, + incomplete: 0, + interval: announce_policy.interval, + min_interval: announce_policy.interval_min, + peers: vec![], + }, + ) + .await; env.stop().await; } diff --git a/packages/torrent-repository/src/entry/swarm.rs b/packages/torrent-repository/src/entry/swarm.rs index 0395361a3..d6a7df102 100644 --- a/packages/torrent-repository/src/entry/swarm.rs +++ b/packages/torrent-repository/src/entry/swarm.rs @@ -3,18 +3,12 @@ use std::collections::BTreeMap; use std::net::SocketAddr; use std::sync::Arc; -use aquatic_udp_protocol::PeerId; use torrust_tracker_primitives::peer::{self, Peer}; use torrust_tracker_primitives::DurationSinceUnixEpoch; -// code-review: the current implementation uses the peer Id as the ``BTreeMap`` -// key. That would allow adding two identical peers except for the Id. -// For example, two peers with the same socket address but a different peer Id -// would be allowed. That would lead to duplicated peers in the tracker responses. - #[derive(Clone, Debug, Default, PartialEq, Eq, PartialOrd, Ord, Hash)] pub struct Swarm { - peers: BTreeMap>, + peers: BTreeMap>, } impl Swarm { @@ -28,12 +22,12 @@ impl Swarm { self.peers.is_empty() } - pub fn upsert(&mut self, value: Arc) -> Option> { - self.peers.insert(value.peer_id, value) + pub fn upsert(&mut self, peer: Arc) -> Option> { + self.peers.insert(peer.peer_addr, peer) } - pub fn remove(&mut self, key: &PeerId) -> Option> { - self.peers.remove(key) + pub fn remove(&mut self, peer: &Peer) -> Option> { + self.peers.remove(&peer.peer_addr) } pub fn remove_inactive_peers(&mut self, current_cutoff: DurationSinceUnixEpoch) { @@ -42,12 +36,12 @@ impl Swarm { } #[must_use] - pub fn get(&self, peer_id: &PeerId) -> Option<&Arc> { - self.peers.get(peer_id) + pub fn get(&self, peer_addr: &SocketAddr) -> Option<&Arc> { + self.peers.get(peer_addr) } #[must_use] - pub fn get_all(&self, limit: Option) -> Vec> { + pub fn get_all(&self, limit: Option) -> Vec> { match limit { Some(limit) => self.peers.values().take(limit).cloned().collect(), None => self.peers.values().cloned().collect(), @@ -151,7 +145,7 @@ mod tests { swarm.upsert(peer.into()); - assert_eq!(swarm.get(&peer.peer_id), Some(Arc::new(peer)).as_ref()); + assert_eq!(swarm.get(&peer.peer_addr), Some(Arc::new(peer)).as_ref()); } #[test] @@ -173,7 +167,7 @@ mod tests { swarm.upsert(peer.into()); - swarm.remove(&peer.peer_id); + swarm.remove(&peer); assert!(swarm.is_empty()); } @@ -186,9 +180,9 @@ mod tests { swarm.upsert(peer.into()); - swarm.remove(&peer.peer_id); + swarm.remove(&peer); - assert_eq!(swarm.get(&peer.peer_id), None); + assert_eq!(swarm.get(&peer.peer_addr), None); } #[test] @@ -273,16 +267,42 @@ mod tests { } #[test] - fn allow_inserting_two_identical_peers_except_for_the_id() { + fn allow_inserting_two_identical_peers_except_for_the_socket_address() { let mut swarm = Swarm::default(); - let peer1 = PeerBuilder::default().with_peer_id(&PeerId(*b"-qB00000000000000001")).build(); + let peer1 = PeerBuilder::default() + .with_peer_addr(&SocketAddr::new(IpAddr::V4(Ipv4Addr::new(127, 0, 0, 1)), 6969)) + .build(); swarm.upsert(peer1.into()); - let peer2 = PeerBuilder::default().with_peer_id(&PeerId(*b"-qB00000000000000002")).build(); + let peer2 = PeerBuilder::default() + .with_peer_addr(&SocketAddr::new(IpAddr::V4(Ipv4Addr::new(127, 0, 0, 2)), 6969)) + .build(); swarm.upsert(peer2.into()); assert_eq!(swarm.len(), 2); } + + #[test] + fn not_allow_inserting_two_peers_with_different_peer_id_but_the_same_socket_address() { + let mut swarm = Swarm::default(); + + // When that happens the peer ID will be changed in the swarm. + // In practice, it's like if the peer had changed its ID. + + let peer1 = PeerBuilder::default() + .with_peer_id(&PeerId(*b"-qB00000000000000001")) + .with_peer_addr(&SocketAddr::new(IpAddr::V4(Ipv4Addr::new(127, 0, 0, 1)), 6969)) + .build(); + swarm.upsert(peer1.into()); + + let peer2 = PeerBuilder::default() + .with_peer_id(&PeerId(*b"-qB00000000000000002")) + .with_peer_addr(&SocketAddr::new(IpAddr::V4(Ipv4Addr::new(127, 0, 0, 1)), 6969)) + .build(); + swarm.upsert(peer2.into()); + + assert_eq!(swarm.len(), 1); + } } } diff --git a/packages/torrent-repository/src/entry/torrent.rs b/packages/torrent-repository/src/entry/torrent.rs index 48f1a2df1..b251699ec 100644 --- a/packages/torrent-repository/src/entry/torrent.rs +++ b/packages/torrent-repository/src/entry/torrent.rs @@ -75,7 +75,7 @@ impl TrackedTorrent { match peer::ReadInfo::get_event(peer) { AnnounceEvent::Stopped => { - drop(self.swarm.remove(&peer::ReadInfo::get_id(peer))); + drop(self.swarm.remove(peer)); } AnnounceEvent::Completed => { let previous = self.swarm.upsert(Arc::new(*peer)); diff --git a/packages/torrent-repository/tests/common/torrent_peer_builder.rs b/packages/torrent-repository/tests/common/torrent_peer_builder.rs index 33120180d..0c065e670 100644 --- a/packages/torrent-repository/tests/common/torrent_peer_builder.rs +++ b/packages/torrent-repository/tests/common/torrent_peer_builder.rs @@ -1,4 +1,4 @@ -use std::net::SocketAddr; +use std::net::{IpAddr, Ipv4Addr, SocketAddr}; use aquatic_udp_protocol::{AnnounceEvent, NumberOfBytes, PeerId}; use torrust_tracker_clock::clock::Time; @@ -67,24 +67,40 @@ impl TorrentPeerBuilder { /// A torrent seeder is a peer with 0 bytes left to download which /// has not announced it has stopped +#[allow(clippy::cast_sign_loss)] +#[allow(clippy::cast_possible_truncation)] #[must_use] pub fn a_completed_peer(id: i32) -> peer::Peer { let peer_id = peer::Id::new(id); + let peer_addr = SocketAddr::new(IpAddr::V4(Ipv4Addr::new(127, 0, 0, 1)), id as u16); + TorrentPeerBuilder::new() .with_number_of_bytes_left(0) .with_event_completed() .with_peer_id(*peer_id) + .with_peer_address(peer_addr) .into() } /// A torrent leecher is a peer that is not a seeder. /// Leecher: left > 0 OR event = Stopped +/// +/// # Panics +/// +/// This function panics if proved id can't be converted into a valid socket address port. +/// +/// The `id` argument is used to identify the peer in both the `peer_id` and the `peer_addr`. +#[allow(clippy::cast_sign_loss)] +#[allow(clippy::cast_possible_truncation)] #[must_use] pub fn a_started_peer(id: i32) -> peer::Peer { let peer_id = peer::Id::new(id); + let peer_addr = SocketAddr::new(IpAddr::V4(Ipv4Addr::new(127, 0, 0, 1)), id as u16); + TorrentPeerBuilder::new() .with_number_of_bytes_left(1) .with_event_started() .with_peer_id(*peer_id) + .with_peer_address(peer_addr) .into() } diff --git a/packages/torrent-repository/tests/entry/mod.rs b/packages/torrent-repository/tests/entry/mod.rs index 27bb5f238..5f958f05c 100644 --- a/packages/torrent-repository/tests/entry/mod.rs +++ b/packages/torrent-repository/tests/entry/mod.rs @@ -370,8 +370,7 @@ async fn it_should_limit_the_number_of_peers_returned( // We add one more peer than the scrape limit for peer_number in 1..=74 + 1 { - let mut peer = a_started_peer(1); - peer.peer_id = *peer::Id::new(peer_number); + let peer = a_started_peer(peer_number); torrent.upsert_peer(&peer); } diff --git a/packages/tracker-core/src/lib.rs b/packages/tracker-core/src/lib.rs index d9da9b9e7..82ebac3c6 100644 --- a/packages/tracker-core/src/lib.rs +++ b/packages/tracker-core/src/lib.rs @@ -224,14 +224,14 @@ mod tests { // Scrape let scrape_data = scrape_handler.scrape(&vec![info_hash]).await.unwrap(); - // The expected swarm metadata for the file + // The expected swarm metadata for the torrent let mut expected_scrape_data = ScrapeData::empty(); expected_scrape_data.add_file( &info_hash, SwarmMetadata { - complete: 0, // the "complete" peer does not count because it was not previously known - downloaded: 0, - incomplete: 1, // the "incomplete" peer we have just announced + complete: 1, // the "incomplete" announced + downloaded: 0, // the "complete" peer download does not count because it was not previously known + incomplete: 1, // the "incomplete" peer announced }, ); diff --git a/packages/tracker-core/src/test_helpers.rs b/packages/tracker-core/src/test_helpers.rs index 0d7ca012f..04fe4133b 100644 --- a/packages/tracker-core/src/test_helpers.rs +++ b/packages/tracker-core/src/test_helpers.rs @@ -104,7 +104,7 @@ pub(crate) mod tests { #[must_use] pub fn complete_peer() -> Peer { Peer { - peer_id: PeerId(*b"-qB00000000000000000"), + peer_id: PeerId(*b"-qB00000000000000001"), peer_addr: SocketAddr::new(IpAddr::V4(Ipv4Addr::new(126, 0, 0, 1)), 8080), updated: DurationSinceUnixEpoch::new(1_669_397_478_934, 0), uploaded: NumberOfBytes::new(0), @@ -118,8 +118,8 @@ pub(crate) mod tests { #[must_use] pub fn incomplete_peer() -> Peer { Peer { - peer_id: PeerId(*b"-qB00000000000000000"), - peer_addr: SocketAddr::new(IpAddr::V4(Ipv4Addr::new(126, 0, 0, 1)), 8080), + peer_id: PeerId(*b"-qB00000000000000002"), + peer_addr: SocketAddr::new(IpAddr::V4(Ipv4Addr::new(126, 0, 0, 2)), 8080), updated: DurationSinceUnixEpoch::new(1_669_397_478_934, 0), uploaded: NumberOfBytes::new(0), downloaded: NumberOfBytes::new(0), From 0a4c8050515825244ee4e62ccea0332deb83a84a Mon Sep 17 00:00:00 2001 From: Jose Celano Date: Tue, 6 May 2025 08:49:46 +0100 Subject: [PATCH 559/802] refactor: [#1495] add SwarmMetadata to Swarm - Moved responsability for keeping metadata to the Swarm type. - Number of seeder and leechers is now calculated when the Swarm changes not on-demand. We avoid iterating over the peers to get the number of seeders and leechers. - The number of downloads is also calculate now in the Swarm. It will be removed from the TrackedTorrent. --- packages/primitives/src/swarm_metadata.rs | 17 +- .../torrent-repository/src/entry/swarm.rs | 652 +++++++++++++----- 2 files changed, 503 insertions(+), 166 deletions(-) diff --git a/packages/primitives/src/swarm_metadata.rs b/packages/primitives/src/swarm_metadata.rs index 792eff632..a70298d71 100644 --- a/packages/primitives/src/swarm_metadata.rs +++ b/packages/primitives/src/swarm_metadata.rs @@ -7,7 +7,7 @@ use derive_more::Constructor; /// Swarm metadata dictionary in the scrape response. /// /// See [BEP 48: Tracker Protocol Extension: Scrape](https://www.bittorrent.org/beps/bep_0048.html) -#[derive(Copy, Clone, Debug, PartialEq, Default, Constructor)] +#[derive(Copy, Clone, Debug, PartialEq, Eq, PartialOrd, Ord, Hash, Default, Constructor)] pub struct SwarmMetadata { /// (i.e `completed`): The number of peers that have ever completed /// downloading a given torrent. @@ -27,6 +27,21 @@ impl SwarmMetadata { pub fn zeroed() -> Self { Self::default() } + + #[must_use] + pub fn downloads(&self) -> u32 { + self.downloaded + } + + #[must_use] + pub fn seeders(&self) -> u32 { + self.complete + } + + #[must_use] + pub fn leechers(&self) -> u32 { + self.incomplete + } } /// Structure that holds aggregate swarm metadata. diff --git a/packages/torrent-repository/src/entry/swarm.rs b/packages/torrent-repository/src/entry/swarm.rs index d6a7df102..7331d4504 100644 --- a/packages/torrent-repository/src/entry/swarm.rs +++ b/packages/torrent-repository/src/entry/swarm.rs @@ -1,14 +1,18 @@ -//! A peer list. +//! A swarm is a collection of peers that are all trying to download the same +//! torrent. use std::collections::BTreeMap; use std::net::SocketAddr; use std::sync::Arc; +use aquatic_udp_protocol::AnnounceEvent; use torrust_tracker_primitives::peer::{self, Peer}; +use torrust_tracker_primitives::swarm_metadata::SwarmMetadata; use torrust_tracker_primitives::DurationSinceUnixEpoch; #[derive(Clone, Debug, Default, PartialEq, Eq, PartialOrd, Ord, Hash)] pub struct Swarm { peers: BTreeMap>, + metadata: SwarmMetadata, } impl Swarm { @@ -23,16 +27,82 @@ impl Swarm { } pub fn upsert(&mut self, peer: Arc) -> Option> { - self.peers.insert(peer.peer_addr, peer) + let new_peer_is_seeder = peer.is_seeder(); + let new_peer_completed = peer.event == AnnounceEvent::Completed; + + if let Some(old_peer) = self.peers.insert(peer.peer_addr, peer) { + // A peer has been updated in the swarm. + + // Check if the peer has changed its from leecher to seeder or vice versa. + if old_peer.is_seeder() != new_peer_is_seeder { + if new_peer_is_seeder { + self.metadata.complete += 1; + self.metadata.incomplete -= 1; + } else { + self.metadata.complete -= 1; + self.metadata.incomplete += 1; + } + } + + // Check if the peer has completed downloading the torrent. + if new_peer_completed && old_peer.event != AnnounceEvent::Completed { + self.metadata.downloaded += 1; + } + + Some(old_peer) + } else { + // A new peer has been added to the swarm. + + // Check if the peer is a seeder or a leecher. + if new_peer_is_seeder { + self.metadata.complete += 1; + } else { + self.metadata.incomplete += 1; + } + + // Check if the peer has completed downloading the torrent. + if new_peer_completed { + // Don't increment `downloaded` here: we only count transitions + // from a known peer + } + + None + } } pub fn remove(&mut self, peer: &Peer) -> Option> { - self.peers.remove(&peer.peer_addr) + match self.peers.remove(&peer.peer_addr) { + Some(old_peer) => { + // A peer has been removed from the swarm. + + // Check if the peer was a seeder or a leecher. + if old_peer.is_seeder() { + self.metadata.complete -= 1; + } else { + self.metadata.incomplete -= 1; + } + + Some(old_peer) + } + None => None, + } } pub fn remove_inactive_peers(&mut self, current_cutoff: DurationSinceUnixEpoch) { - self.peers - .retain(|_, peer| peer::ReadInfo::get_updated(peer) > current_cutoff); + self.peers.retain(|_, peer| { + let is_active = peer::ReadInfo::get_updated(peer) > current_cutoff; + + if !is_active { + // Update the metadata when removing a peer. + if peer.is_seeder() { + self.metadata.complete -= 1; + } else { + self.metadata.incomplete -= 1; + } + } + + is_active + }); } #[must_use] @@ -48,14 +118,6 @@ impl Swarm { } } - #[must_use] - pub fn seeders_and_leechers(&self) -> (usize, usize) { - let seeders = self.peers.values().filter(|peer| peer.is_seeder()).count(); - let leechers = self.len() - seeders; - - (seeders, leechers) - } - #[must_use] pub fn get_peers_excluding_addr(&self, peer_addr: &SocketAddr, limit: Option) -> Vec> { match limit { @@ -77,232 +139,492 @@ impl Swarm { .collect(), } } + + #[must_use] + pub fn metadata(&self) -> SwarmMetadata { + self.metadata + } + + /// Returns the number of seeders and leechers in the swarm. + /// + /// # Panics + /// + /// This function will panic if the `complete` or `incomplete` fields in the + /// `metadata` field cannot be converted to `usize`. + #[must_use] + pub fn seeders_and_leechers(&self) -> (usize, usize) { + let seeders = self + .metadata + .complete + .try_into() + .expect("Failed to convert 'complete' (seeders) count to usize"); + let leechers = self + .metadata + .incomplete + .try_into() + .expect("Failed to convert 'incomplete' (leechers) count to usize"); + + (seeders, leechers) + } } #[cfg(test)] mod tests { - mod it_should { - use std::net::{IpAddr, Ipv4Addr, SocketAddr}; - use std::sync::Arc; + use std::net::{IpAddr, Ipv4Addr, SocketAddr}; + use std::sync::Arc; - use aquatic_udp_protocol::PeerId; - use torrust_tracker_primitives::peer::fixture::PeerBuilder; - use torrust_tracker_primitives::DurationSinceUnixEpoch; + use aquatic_udp_protocol::PeerId; + use torrust_tracker_primitives::peer::fixture::PeerBuilder; + use torrust_tracker_primitives::swarm_metadata::SwarmMetadata; + use torrust_tracker_primitives::DurationSinceUnixEpoch; - use crate::entry::swarm::Swarm; + use crate::entry::swarm::Swarm; - #[test] - fn be_empty_when_no_peers_have_been_inserted() { - let swarm = Swarm::default(); + #[test] + fn it_should_be_empty_when_no_peers_have_been_inserted() { + let swarm = Swarm::default(); - assert!(swarm.is_empty()); - } + assert!(swarm.is_empty()); + } - #[test] - fn have_zero_length_when_no_peers_have_been_inserted() { - let swarm = Swarm::default(); + #[test] + fn it_should_have_zero_length_when_no_peers_have_been_inserted() { + let swarm = Swarm::default(); - assert_eq!(swarm.len(), 0); - } + assert_eq!(swarm.len(), 0); + } - #[test] - fn allow_inserting_a_new_peer() { - let mut swarm = Swarm::default(); + #[test] + fn it_should_allow_inserting_a_new_peer() { + let mut swarm = Swarm::default(); - let peer = PeerBuilder::default().build(); + let peer = PeerBuilder::default().build(); - assert_eq!(swarm.upsert(peer.into()), None); - } + assert_eq!(swarm.upsert(peer.into()), None); + } - #[test] - fn allow_updating_a_preexisting_peer() { - let mut swarm = Swarm::default(); + #[test] + fn it_should_allow_updating_a_preexisting_peer() { + let mut swarm = Swarm::default(); - let peer = PeerBuilder::default().build(); + let peer = PeerBuilder::default().build(); - swarm.upsert(peer.into()); + swarm.upsert(peer.into()); - assert_eq!(swarm.upsert(peer.into()), Some(Arc::new(peer))); - } + assert_eq!(swarm.upsert(peer.into()), Some(Arc::new(peer))); + } - #[test] - fn allow_getting_all_peers() { - let mut swarm = Swarm::default(); + #[test] + fn it_should_allow_getting_all_peers() { + let mut swarm = Swarm::default(); - let peer = PeerBuilder::default().build(); + let peer = PeerBuilder::default().build(); - swarm.upsert(peer.into()); + swarm.upsert(peer.into()); - assert_eq!(swarm.get_all(None), [Arc::new(peer)]); - } + assert_eq!(swarm.get_all(None), [Arc::new(peer)]); + } - #[test] - fn allow_getting_one_peer_by_id() { - let mut swarm = Swarm::default(); + #[test] + fn it_should_allow_getting_one_peer_by_id() { + let mut swarm = Swarm::default(); - let peer = PeerBuilder::default().build(); + let peer = PeerBuilder::default().build(); - swarm.upsert(peer.into()); + swarm.upsert(peer.into()); - assert_eq!(swarm.get(&peer.peer_addr), Some(Arc::new(peer)).as_ref()); - } + assert_eq!(swarm.get(&peer.peer_addr), Some(Arc::new(peer)).as_ref()); + } - #[test] - fn increase_the_number_of_peers_after_inserting_a_new_one() { - let mut swarm = Swarm::default(); + #[test] + fn it_should_increase_the_number_of_peers_after_inserting_a_new_one() { + let mut swarm = Swarm::default(); - let peer = PeerBuilder::default().build(); + let peer = PeerBuilder::default().build(); - swarm.upsert(peer.into()); + swarm.upsert(peer.into()); - assert_eq!(swarm.len(), 1); - } + assert_eq!(swarm.len(), 1); + } - #[test] - fn decrease_the_number_of_peers_after_removing_one() { - let mut swarm = Swarm::default(); + #[test] + fn it_should_decrease_the_number_of_peers_after_removing_one() { + let mut swarm = Swarm::default(); - let peer = PeerBuilder::default().build(); + let peer = PeerBuilder::default().build(); - swarm.upsert(peer.into()); + swarm.upsert(peer.into()); - swarm.remove(&peer); + swarm.remove(&peer); - assert!(swarm.is_empty()); - } + assert!(swarm.is_empty()); + } - #[test] - fn allow_removing_an_existing_peer() { - let mut swarm = Swarm::default(); + #[test] + fn it_should_allow_removing_an_existing_peer() { + let mut swarm = Swarm::default(); - let peer = PeerBuilder::default().build(); + let peer = PeerBuilder::default().build(); - swarm.upsert(peer.into()); + swarm.upsert(peer.into()); - swarm.remove(&peer); + let old = swarm.remove(&peer); - assert_eq!(swarm.get(&peer.peer_addr), None); - } + assert_eq!(old, Some(Arc::new(peer))); + assert_eq!(swarm.get(&peer.peer_addr), None); + } - #[test] - fn allow_getting_all_peers_excluding_peers_with_a_given_address() { - let mut swarm = Swarm::default(); + #[test] + fn it_should_allow_removing_a_non_existing_peer() { + let mut swarm = Swarm::default(); - let peer1 = PeerBuilder::default() - .with_peer_id(&PeerId(*b"-qB00000000000000001")) - .with_peer_addr(&SocketAddr::new(IpAddr::V4(Ipv4Addr::new(127, 0, 0, 1)), 6969)) - .build(); - swarm.upsert(peer1.into()); + let peer = PeerBuilder::default().build(); - let peer2 = PeerBuilder::default() - .with_peer_id(&PeerId(*b"-qB00000000000000002")) - .with_peer_addr(&SocketAddr::new(IpAddr::V4(Ipv4Addr::new(127, 0, 0, 2)), 6969)) - .build(); - swarm.upsert(peer2.into()); + assert_eq!(swarm.remove(&peer), None); + } - assert_eq!(swarm.get_peers_excluding_addr(&peer2.peer_addr, None), [Arc::new(peer1)]); - } + #[test] + fn it_should_allow_getting_all_peers_excluding_peers_with_a_given_address() { + let mut swarm = Swarm::default(); - #[test] - fn return_the_number_of_seeders_in_the_list() { - let mut swarm = Swarm::default(); + let peer1 = PeerBuilder::default() + .with_peer_id(&PeerId(*b"-qB00000000000000001")) + .with_peer_addr(&SocketAddr::new(IpAddr::V4(Ipv4Addr::new(127, 0, 0, 1)), 6969)) + .build(); + swarm.upsert(peer1.into()); - let seeder = PeerBuilder::seeder().build(); - let leecher = PeerBuilder::leecher().build(); + let peer2 = PeerBuilder::default() + .with_peer_id(&PeerId(*b"-qB00000000000000002")) + .with_peer_addr(&SocketAddr::new(IpAddr::V4(Ipv4Addr::new(127, 0, 0, 2)), 6969)) + .build(); + swarm.upsert(peer2.into()); - swarm.upsert(seeder.into()); - swarm.upsert(leecher.into()); + assert_eq!(swarm.get_peers_excluding_addr(&peer2.peer_addr, None), [Arc::new(peer1)]); + } - let (seeders, _leechers) = swarm.seeders_and_leechers(); + #[test] + fn it_should_remove_inactive_peers() { + let mut swarm = Swarm::default(); + let one_second = DurationSinceUnixEpoch::new(1, 0); - assert_eq!(seeders, 1); - } + // Insert the peer + let last_update_time = DurationSinceUnixEpoch::new(1_669_397_478_934, 0); + let peer = PeerBuilder::default().last_updated_on(last_update_time).build(); + swarm.upsert(peer.into()); + + // Remove peers not updated since one second after inserting the peer + swarm.remove_inactive_peers(last_update_time + one_second); - #[test] - fn return_the_number_of_leechers_in_the_list() { - let mut swarm = Swarm::default(); + assert_eq!(swarm.len(), 0); + } - let seeder = PeerBuilder::seeder().build(); - let leecher = PeerBuilder::leecher().build(); + #[test] + fn it_should_not_remove_active_peers() { + let mut swarm = Swarm::default(); + let one_second = DurationSinceUnixEpoch::new(1, 0); - swarm.upsert(seeder.into()); - swarm.upsert(leecher.into()); + // Insert the peer + let last_update_time = DurationSinceUnixEpoch::new(1_669_397_478_934, 0); + let peer = PeerBuilder::default().last_updated_on(last_update_time).build(); + swarm.upsert(peer.into()); - let (_seeders, leechers) = swarm.seeders_and_leechers(); + // Remove peers not updated since one second before inserting the peer. + swarm.remove_inactive_peers(last_update_time - one_second); - assert_eq!(leechers, 1); - } + assert_eq!(swarm.len(), 1); + } + + #[test] + fn it_should_allow_inserting_two_identical_peers_except_for_the_socket_address() { + let mut swarm = Swarm::default(); + + let peer1 = PeerBuilder::default() + .with_peer_addr(&SocketAddr::new(IpAddr::V4(Ipv4Addr::new(127, 0, 0, 1)), 6969)) + .build(); + swarm.upsert(peer1.into()); + + let peer2 = PeerBuilder::default() + .with_peer_addr(&SocketAddr::new(IpAddr::V4(Ipv4Addr::new(127, 0, 0, 2)), 6969)) + .build(); + swarm.upsert(peer2.into()); + + assert_eq!(swarm.len(), 2); + } + + #[test] + fn it_should_not_allow_inserting_two_peers_with_different_peer_id_but_the_same_socket_address() { + let mut swarm = Swarm::default(); + + // When that happens the peer ID will be changed in the swarm. + // In practice, it's like if the peer had changed its ID. + + let peer1 = PeerBuilder::default() + .with_peer_id(&PeerId(*b"-qB00000000000000001")) + .with_peer_addr(&SocketAddr::new(IpAddr::V4(Ipv4Addr::new(127, 0, 0, 1)), 6969)) + .build(); + swarm.upsert(peer1.into()); + + let peer2 = PeerBuilder::default() + .with_peer_id(&PeerId(*b"-qB00000000000000002")) + .with_peer_addr(&SocketAddr::new(IpAddr::V4(Ipv4Addr::new(127, 0, 0, 1)), 6969)) + .build(); + swarm.upsert(peer2.into()); + + assert_eq!(swarm.len(), 1); + } + + #[test] + fn it_should_return_the_metadata() { + let mut swarm = Swarm::default(); + + let seeder = PeerBuilder::seeder().build(); + let leecher = PeerBuilder::leecher().build(); + + swarm.upsert(seeder.into()); + swarm.upsert(leecher.into()); + + assert_eq!( + swarm.metadata(), + SwarmMetadata { + downloaded: 0, + complete: 1, + incomplete: 1, + } + ); + } + + #[test] + fn it_should_return_the_number_of_seeders_in_the_list() { + let mut swarm = Swarm::default(); + + let seeder = PeerBuilder::seeder().build(); + let leecher = PeerBuilder::leecher().build(); + + swarm.upsert(seeder.into()); + swarm.upsert(leecher.into()); + + let (seeders, _leechers) = swarm.seeders_and_leechers(); + + assert_eq!(seeders, 1); + } + + #[test] + fn it_should_return_the_number_of_leechers_in_the_list() { + let mut swarm = Swarm::default(); - #[test] - fn remove_inactive_peers() { - let mut swarm = Swarm::default(); - let one_second = DurationSinceUnixEpoch::new(1, 0); + let seeder = PeerBuilder::seeder().build(); + let leecher = PeerBuilder::leecher().build(); - // Insert the peer - let last_update_time = DurationSinceUnixEpoch::new(1_669_397_478_934, 0); - let peer = PeerBuilder::default().last_updated_on(last_update_time).build(); - swarm.upsert(peer.into()); + swarm.upsert(seeder.into()); + swarm.upsert(leecher.into()); - // Remove peers not updated since one second after inserting the peer - swarm.remove_inactive_peers(last_update_time + one_second); + let (_seeders, leechers) = swarm.seeders_and_leechers(); - assert_eq!(swarm.len(), 0); + assert_eq!(leechers, 1); + } + + mod updating_the_swarm_metadata { + + mod when_a_new_peer_is_added { + use torrust_tracker_primitives::peer::fixture::PeerBuilder; + + use crate::entry::swarm::Swarm; + + #[test] + fn it_should_increase_the_number_of_leechers_if_the_new_peer_is_a_leecher_() { + let mut swarm = Swarm::default(); + + let leechers = swarm.metadata().leechers(); + + let leecher = PeerBuilder::leecher().build(); + + swarm.upsert(leecher.into()); + + assert_eq!(swarm.metadata().leechers(), leechers + 1); + } + + #[test] + fn it_should_increase_the_number_of_seeders_if_the_new_peer_is_a_seeder() { + let mut swarm = Swarm::default(); + + let seeders = swarm.metadata().seeders(); + + let seeder = PeerBuilder::seeder().build(); + + swarm.upsert(seeder.into()); + + assert_eq!(swarm.metadata().seeders(), seeders + 1); + } + + #[test] + fn it_should_not_increasing_the_number_of_downloads_if_the_new_peer_has_completed_downloading_as_it_was_not_previously_known( + ) { + let mut swarm = Swarm::default(); + + let downloads = swarm.metadata().downloads(); + + let seeder = PeerBuilder::seeder().build(); + + swarm.upsert(seeder.into()); + + assert_eq!(swarm.metadata().downloads(), downloads); + } } - #[test] - fn not_remove_active_peers() { - let mut swarm = Swarm::default(); - let one_second = DurationSinceUnixEpoch::new(1, 0); + mod when_a_peer_is_removed { + use torrust_tracker_primitives::peer::fixture::PeerBuilder; + + use crate::entry::swarm::Swarm; + + #[test] + fn it_should_decrease_the_number_of_leechers_if_the_removed_peer_was_a_leecher() { + let mut swarm = Swarm::default(); + + let leecher = PeerBuilder::leecher().build(); + + swarm.upsert(leecher.into()); + + let leechers = swarm.metadata().leechers(); + + swarm.remove(&leecher); - // Insert the peer - let last_update_time = DurationSinceUnixEpoch::new(1_669_397_478_934, 0); - let peer = PeerBuilder::default().last_updated_on(last_update_time).build(); - swarm.upsert(peer.into()); + assert_eq!(swarm.metadata().leechers(), leechers - 1); + } - // Remove peers not updated since one second before inserting the peer. - swarm.remove_inactive_peers(last_update_time - one_second); + #[test] + fn it_should_decrease_the_number_of_seeders_if_the_removed_peer_was_a_seeder() { + let mut swarm = Swarm::default(); - assert_eq!(swarm.len(), 1); + let seeder = PeerBuilder::seeder().build(); + + swarm.upsert(seeder.into()); + + let seeders = swarm.metadata().seeders(); + + swarm.remove(&seeder); + + assert_eq!(swarm.metadata().seeders(), seeders - 1); + } } - #[test] - fn allow_inserting_two_identical_peers_except_for_the_socket_address() { - let mut swarm = Swarm::default(); + mod when_a_peer_is_removed_due_to_inactivity { + use std::time::Duration; + + use torrust_tracker_primitives::peer::fixture::PeerBuilder; + + use crate::entry::swarm::Swarm; + + #[test] + fn it_should_decrease_the_number_of_leechers_when_a_removed_peer_is_a_leecher() { + let mut swarm = Swarm::default(); + + let leecher = PeerBuilder::leecher().build(); - let peer1 = PeerBuilder::default() - .with_peer_addr(&SocketAddr::new(IpAddr::V4(Ipv4Addr::new(127, 0, 0, 1)), 6969)) - .build(); - swarm.upsert(peer1.into()); + swarm.upsert(leecher.into()); - let peer2 = PeerBuilder::default() - .with_peer_addr(&SocketAddr::new(IpAddr::V4(Ipv4Addr::new(127, 0, 0, 2)), 6969)) - .build(); - swarm.upsert(peer2.into()); + let leechers = swarm.metadata().leechers(); - assert_eq!(swarm.len(), 2); + swarm.remove_inactive_peers(leecher.updated + Duration::from_secs(1)); + + assert_eq!(swarm.metadata().leechers(), leechers - 1); + } + + #[test] + fn it_should_decrease_the_number_of_seeders_when_the_removed_peer_is_a_seeder() { + let mut swarm = Swarm::default(); + + let seeder = PeerBuilder::seeder().build(); + + swarm.upsert(seeder.into()); + + let seeders = swarm.metadata().seeders(); + + swarm.remove_inactive_peers(seeder.updated + Duration::from_secs(1)); + + assert_eq!(swarm.metadata().seeders(), seeders - 1); + } } - #[test] - fn not_allow_inserting_two_peers_with_different_peer_id_but_the_same_socket_address() { - let mut swarm = Swarm::default(); + mod for_changes_in_existing_peers { + use aquatic_udp_protocol::NumberOfBytes; + use torrust_tracker_primitives::peer::fixture::PeerBuilder; + + use crate::entry::swarm::Swarm; + + #[test] + fn it_should_increase_seeders_and_decreasing_leechers_when_the_peer_changes_from_leecher_to_seeder_() { + let mut swarm = Swarm::default(); + + let mut peer = PeerBuilder::leecher().build(); + + swarm.upsert(peer.into()); + + let leechers = swarm.metadata().leechers(); + let seeders = swarm.metadata().seeders(); + + peer.left = NumberOfBytes::new(0); // Convert to seeder + + swarm.upsert(peer.into()); + + assert_eq!(swarm.metadata().seeders(), seeders + 1); + assert_eq!(swarm.metadata().leechers(), leechers - 1); + } + + #[test] + fn it_should_increase_leechers_and_decreasing_seeders_when_the_peer_changes_from_seeder_to_leecher() { + let mut swarm = Swarm::default(); + + let mut peer = PeerBuilder::seeder().build(); + + swarm.upsert(peer.into()); + + let leechers = swarm.metadata().leechers(); + let seeders = swarm.metadata().seeders(); + + peer.left = NumberOfBytes::new(10); // Convert to leecher + + swarm.upsert(peer.into()); + + assert_eq!(swarm.metadata().leechers(), leechers + 1); + assert_eq!(swarm.metadata().seeders(), seeders - 1); + } + + #[test] + fn it_should_increase_the_number_of_downloads_when_the_peer_announces_completed_downloading() { + let mut swarm = Swarm::default(); + + let mut peer = PeerBuilder::leecher().build(); + + swarm.upsert(peer.into()); + + let downloads = swarm.metadata().downloads(); + + peer.event = aquatic_udp_protocol::AnnounceEvent::Completed; + + swarm.upsert(peer.into()); + + assert_eq!(swarm.metadata().downloads(), downloads + 1); + } + + #[test] + fn it_should_not_increasing_the_number_of_downloads_when_the_peer_announces_completed_downloading_twice_() { + let mut swarm = Swarm::default(); + + let mut peer = PeerBuilder::leecher().build(); + + swarm.upsert(peer.into()); + + let downloads = swarm.metadata().downloads(); - // When that happens the peer ID will be changed in the swarm. - // In practice, it's like if the peer had changed its ID. + peer.event = aquatic_udp_protocol::AnnounceEvent::Completed; - let peer1 = PeerBuilder::default() - .with_peer_id(&PeerId(*b"-qB00000000000000001")) - .with_peer_addr(&SocketAddr::new(IpAddr::V4(Ipv4Addr::new(127, 0, 0, 1)), 6969)) - .build(); - swarm.upsert(peer1.into()); + swarm.upsert(peer.into()); - let peer2 = PeerBuilder::default() - .with_peer_id(&PeerId(*b"-qB00000000000000002")) - .with_peer_addr(&SocketAddr::new(IpAddr::V4(Ipv4Addr::new(127, 0, 0, 1)), 6969)) - .build(); - swarm.upsert(peer2.into()); + swarm.upsert(peer.into()); - assert_eq!(swarm.len(), 1); + assert_eq!(swarm.metadata().downloads(), downloads + 1); + } } } } From 61560a8bd27a4eedb8d19bde450fce39474fc076 Mon Sep 17 00:00:00 2001 From: Jose Celano Date: Tue, 6 May 2025 11:36:01 +0100 Subject: [PATCH 560/802] chore: add gitignore to torrent-repository pkg --- packages/torrent-repository/.gitignore | 1 + 1 file changed, 1 insertion(+) create mode 100644 packages/torrent-repository/.gitignore diff --git a/packages/torrent-repository/.gitignore b/packages/torrent-repository/.gitignore new file mode 100644 index 000000000..c9907ae11 --- /dev/null +++ b/packages/torrent-repository/.gitignore @@ -0,0 +1 @@ +/.coverage/ From f73c56698c94e2d2e5e177e470c8a9c291ab791e Mon Sep 17 00:00:00 2001 From: Jose Celano Date: Tue, 6 May 2025 16:21:39 +0100 Subject: [PATCH 561/802] refactor: [#1495] some renamings in Swarm type --- .../torrent-repository/src/entry/swarm.rs | 142 +++++++++--------- .../torrent-repository/src/entry/torrent.rs | 10 +- 2 files changed, 76 insertions(+), 76 deletions(-) diff --git a/packages/torrent-repository/src/entry/swarm.rs b/packages/torrent-repository/src/entry/swarm.rs index 7331d4504..05c09b68e 100644 --- a/packages/torrent-repository/src/entry/swarm.rs +++ b/packages/torrent-repository/src/entry/swarm.rs @@ -5,37 +5,27 @@ use std::net::SocketAddr; use std::sync::Arc; use aquatic_udp_protocol::AnnounceEvent; -use torrust_tracker_primitives::peer::{self, Peer}; +use torrust_tracker_primitives::peer::{self, Peer, PeerAnnouncement}; use torrust_tracker_primitives::swarm_metadata::SwarmMetadata; use torrust_tracker_primitives::DurationSinceUnixEpoch; #[derive(Clone, Debug, Default, PartialEq, Eq, PartialOrd, Ord, Hash)] pub struct Swarm { - peers: BTreeMap>, + peers: BTreeMap>, metadata: SwarmMetadata, } impl Swarm { - #[must_use] - pub fn len(&self) -> usize { - self.peers.len() - } - - #[must_use] - pub fn is_empty(&self) -> bool { - self.peers.is_empty() - } - - pub fn upsert(&mut self, peer: Arc) -> Option> { - let new_peer_is_seeder = peer.is_seeder(); - let new_peer_completed = peer.event == AnnounceEvent::Completed; + pub fn handle_announce(&mut self, incoming_announce: Arc) -> Option> { + let is_now_seeder = incoming_announce.is_seeder(); + let has_completed = incoming_announce.event == AnnounceEvent::Completed; - if let Some(old_peer) = self.peers.insert(peer.peer_addr, peer) { + if let Some(old_announce) = self.peers.insert(incoming_announce.peer_addr, incoming_announce) { // A peer has been updated in the swarm. // Check if the peer has changed its from leecher to seeder or vice versa. - if old_peer.is_seeder() != new_peer_is_seeder { - if new_peer_is_seeder { + if old_announce.is_seeder() != is_now_seeder { + if is_now_seeder { self.metadata.complete += 1; self.metadata.incomplete -= 1; } else { @@ -45,23 +35,23 @@ impl Swarm { } // Check if the peer has completed downloading the torrent. - if new_peer_completed && old_peer.event != AnnounceEvent::Completed { + if has_completed && old_announce.event != AnnounceEvent::Completed { self.metadata.downloaded += 1; } - Some(old_peer) + Some(old_announce) } else { // A new peer has been added to the swarm. // Check if the peer is a seeder or a leecher. - if new_peer_is_seeder { + if is_now_seeder { self.metadata.complete += 1; } else { self.metadata.incomplete += 1; } // Check if the peer has completed downloading the torrent. - if new_peer_completed { + if has_completed { // Don't increment `downloaded` here: we only count transitions // from a known peer } @@ -70,8 +60,8 @@ impl Swarm { } } - pub fn remove(&mut self, peer: &Peer) -> Option> { - match self.peers.remove(&peer.peer_addr) { + pub fn remove(&mut self, peer_to_remove: &Peer) -> Option> { + match self.peers.remove(&peer_to_remove.peer_addr) { Some(old_peer) => { // A peer has been removed from the swarm. @@ -88,7 +78,7 @@ impl Swarm { } } - pub fn remove_inactive_peers(&mut self, current_cutoff: DurationSinceUnixEpoch) { + pub fn remove_inactive(&mut self, current_cutoff: DurationSinceUnixEpoch) { self.peers.retain(|_, peer| { let is_active = peer::ReadInfo::get_updated(peer) > current_cutoff; @@ -111,7 +101,7 @@ impl Swarm { } #[must_use] - pub fn get_all(&self, limit: Option) -> Vec> { + pub fn peers(&self, limit: Option) -> Vec> { match limit { Some(limit) => self.peers.values().take(limit).cloned().collect(), None => self.peers.values().cloned().collect(), @@ -119,7 +109,7 @@ impl Swarm { } #[must_use] - pub fn get_peers_excluding_addr(&self, peer_addr: &SocketAddr, limit: Option) -> Vec> { + pub fn peers_excluding(&self, peer_addr: &SocketAddr, limit: Option) -> Vec> { match limit { Some(limit) => self .peers @@ -166,6 +156,16 @@ impl Swarm { (seeders, leechers) } + + #[must_use] + pub fn len(&self) -> usize { + self.peers.len() + } + + #[must_use] + pub fn is_empty(&self) -> bool { + self.peers.is_empty() + } } #[cfg(test)] @@ -201,7 +201,7 @@ mod tests { let peer = PeerBuilder::default().build(); - assert_eq!(swarm.upsert(peer.into()), None); + assert_eq!(swarm.handle_announce(peer.into()), None); } #[test] @@ -210,9 +210,9 @@ mod tests { let peer = PeerBuilder::default().build(); - swarm.upsert(peer.into()); + swarm.handle_announce(peer.into()); - assert_eq!(swarm.upsert(peer.into()), Some(Arc::new(peer))); + assert_eq!(swarm.handle_announce(peer.into()), Some(Arc::new(peer))); } #[test] @@ -221,9 +221,9 @@ mod tests { let peer = PeerBuilder::default().build(); - swarm.upsert(peer.into()); + swarm.handle_announce(peer.into()); - assert_eq!(swarm.get_all(None), [Arc::new(peer)]); + assert_eq!(swarm.peers(None), [Arc::new(peer)]); } #[test] @@ -232,7 +232,7 @@ mod tests { let peer = PeerBuilder::default().build(); - swarm.upsert(peer.into()); + swarm.handle_announce(peer.into()); assert_eq!(swarm.get(&peer.peer_addr), Some(Arc::new(peer)).as_ref()); } @@ -243,7 +243,7 @@ mod tests { let peer = PeerBuilder::default().build(); - swarm.upsert(peer.into()); + swarm.handle_announce(peer.into()); assert_eq!(swarm.len(), 1); } @@ -254,7 +254,7 @@ mod tests { let peer = PeerBuilder::default().build(); - swarm.upsert(peer.into()); + swarm.handle_announce(peer.into()); swarm.remove(&peer); @@ -267,7 +267,7 @@ mod tests { let peer = PeerBuilder::default().build(); - swarm.upsert(peer.into()); + swarm.handle_announce(peer.into()); let old = swarm.remove(&peer); @@ -292,15 +292,15 @@ mod tests { .with_peer_id(&PeerId(*b"-qB00000000000000001")) .with_peer_addr(&SocketAddr::new(IpAddr::V4(Ipv4Addr::new(127, 0, 0, 1)), 6969)) .build(); - swarm.upsert(peer1.into()); + swarm.handle_announce(peer1.into()); let peer2 = PeerBuilder::default() .with_peer_id(&PeerId(*b"-qB00000000000000002")) .with_peer_addr(&SocketAddr::new(IpAddr::V4(Ipv4Addr::new(127, 0, 0, 2)), 6969)) .build(); - swarm.upsert(peer2.into()); + swarm.handle_announce(peer2.into()); - assert_eq!(swarm.get_peers_excluding_addr(&peer2.peer_addr, None), [Arc::new(peer1)]); + assert_eq!(swarm.peers_excluding(&peer2.peer_addr, None), [Arc::new(peer1)]); } #[test] @@ -311,10 +311,10 @@ mod tests { // Insert the peer let last_update_time = DurationSinceUnixEpoch::new(1_669_397_478_934, 0); let peer = PeerBuilder::default().last_updated_on(last_update_time).build(); - swarm.upsert(peer.into()); + swarm.handle_announce(peer.into()); // Remove peers not updated since one second after inserting the peer - swarm.remove_inactive_peers(last_update_time + one_second); + swarm.remove_inactive(last_update_time + one_second); assert_eq!(swarm.len(), 0); } @@ -327,10 +327,10 @@ mod tests { // Insert the peer let last_update_time = DurationSinceUnixEpoch::new(1_669_397_478_934, 0); let peer = PeerBuilder::default().last_updated_on(last_update_time).build(); - swarm.upsert(peer.into()); + swarm.handle_announce(peer.into()); // Remove peers not updated since one second before inserting the peer. - swarm.remove_inactive_peers(last_update_time - one_second); + swarm.remove_inactive(last_update_time - one_second); assert_eq!(swarm.len(), 1); } @@ -342,12 +342,12 @@ mod tests { let peer1 = PeerBuilder::default() .with_peer_addr(&SocketAddr::new(IpAddr::V4(Ipv4Addr::new(127, 0, 0, 1)), 6969)) .build(); - swarm.upsert(peer1.into()); + swarm.handle_announce(peer1.into()); let peer2 = PeerBuilder::default() .with_peer_addr(&SocketAddr::new(IpAddr::V4(Ipv4Addr::new(127, 0, 0, 2)), 6969)) .build(); - swarm.upsert(peer2.into()); + swarm.handle_announce(peer2.into()); assert_eq!(swarm.len(), 2); } @@ -363,13 +363,13 @@ mod tests { .with_peer_id(&PeerId(*b"-qB00000000000000001")) .with_peer_addr(&SocketAddr::new(IpAddr::V4(Ipv4Addr::new(127, 0, 0, 1)), 6969)) .build(); - swarm.upsert(peer1.into()); + swarm.handle_announce(peer1.into()); let peer2 = PeerBuilder::default() .with_peer_id(&PeerId(*b"-qB00000000000000002")) .with_peer_addr(&SocketAddr::new(IpAddr::V4(Ipv4Addr::new(127, 0, 0, 1)), 6969)) .build(); - swarm.upsert(peer2.into()); + swarm.handle_announce(peer2.into()); assert_eq!(swarm.len(), 1); } @@ -381,8 +381,8 @@ mod tests { let seeder = PeerBuilder::seeder().build(); let leecher = PeerBuilder::leecher().build(); - swarm.upsert(seeder.into()); - swarm.upsert(leecher.into()); + swarm.handle_announce(seeder.into()); + swarm.handle_announce(leecher.into()); assert_eq!( swarm.metadata(), @@ -401,8 +401,8 @@ mod tests { let seeder = PeerBuilder::seeder().build(); let leecher = PeerBuilder::leecher().build(); - swarm.upsert(seeder.into()); - swarm.upsert(leecher.into()); + swarm.handle_announce(seeder.into()); + swarm.handle_announce(leecher.into()); let (seeders, _leechers) = swarm.seeders_and_leechers(); @@ -416,8 +416,8 @@ mod tests { let seeder = PeerBuilder::seeder().build(); let leecher = PeerBuilder::leecher().build(); - swarm.upsert(seeder.into()); - swarm.upsert(leecher.into()); + swarm.handle_announce(seeder.into()); + swarm.handle_announce(leecher.into()); let (_seeders, leechers) = swarm.seeders_and_leechers(); @@ -439,7 +439,7 @@ mod tests { let leecher = PeerBuilder::leecher().build(); - swarm.upsert(leecher.into()); + swarm.handle_announce(leecher.into()); assert_eq!(swarm.metadata().leechers(), leechers + 1); } @@ -452,7 +452,7 @@ mod tests { let seeder = PeerBuilder::seeder().build(); - swarm.upsert(seeder.into()); + swarm.handle_announce(seeder.into()); assert_eq!(swarm.metadata().seeders(), seeders + 1); } @@ -466,7 +466,7 @@ mod tests { let seeder = PeerBuilder::seeder().build(); - swarm.upsert(seeder.into()); + swarm.handle_announce(seeder.into()); assert_eq!(swarm.metadata().downloads(), downloads); } @@ -483,7 +483,7 @@ mod tests { let leecher = PeerBuilder::leecher().build(); - swarm.upsert(leecher.into()); + swarm.handle_announce(leecher.into()); let leechers = swarm.metadata().leechers(); @@ -498,7 +498,7 @@ mod tests { let seeder = PeerBuilder::seeder().build(); - swarm.upsert(seeder.into()); + swarm.handle_announce(seeder.into()); let seeders = swarm.metadata().seeders(); @@ -521,11 +521,11 @@ mod tests { let leecher = PeerBuilder::leecher().build(); - swarm.upsert(leecher.into()); + swarm.handle_announce(leecher.into()); let leechers = swarm.metadata().leechers(); - swarm.remove_inactive_peers(leecher.updated + Duration::from_secs(1)); + swarm.remove_inactive(leecher.updated + Duration::from_secs(1)); assert_eq!(swarm.metadata().leechers(), leechers - 1); } @@ -536,11 +536,11 @@ mod tests { let seeder = PeerBuilder::seeder().build(); - swarm.upsert(seeder.into()); + swarm.handle_announce(seeder.into()); let seeders = swarm.metadata().seeders(); - swarm.remove_inactive_peers(seeder.updated + Duration::from_secs(1)); + swarm.remove_inactive(seeder.updated + Duration::from_secs(1)); assert_eq!(swarm.metadata().seeders(), seeders - 1); } @@ -558,14 +558,14 @@ mod tests { let mut peer = PeerBuilder::leecher().build(); - swarm.upsert(peer.into()); + swarm.handle_announce(peer.into()); let leechers = swarm.metadata().leechers(); let seeders = swarm.metadata().seeders(); peer.left = NumberOfBytes::new(0); // Convert to seeder - swarm.upsert(peer.into()); + swarm.handle_announce(peer.into()); assert_eq!(swarm.metadata().seeders(), seeders + 1); assert_eq!(swarm.metadata().leechers(), leechers - 1); @@ -577,14 +577,14 @@ mod tests { let mut peer = PeerBuilder::seeder().build(); - swarm.upsert(peer.into()); + swarm.handle_announce(peer.into()); let leechers = swarm.metadata().leechers(); let seeders = swarm.metadata().seeders(); peer.left = NumberOfBytes::new(10); // Convert to leecher - swarm.upsert(peer.into()); + swarm.handle_announce(peer.into()); assert_eq!(swarm.metadata().leechers(), leechers + 1); assert_eq!(swarm.metadata().seeders(), seeders - 1); @@ -596,13 +596,13 @@ mod tests { let mut peer = PeerBuilder::leecher().build(); - swarm.upsert(peer.into()); + swarm.handle_announce(peer.into()); let downloads = swarm.metadata().downloads(); peer.event = aquatic_udp_protocol::AnnounceEvent::Completed; - swarm.upsert(peer.into()); + swarm.handle_announce(peer.into()); assert_eq!(swarm.metadata().downloads(), downloads + 1); } @@ -613,15 +613,15 @@ mod tests { let mut peer = PeerBuilder::leecher().build(); - swarm.upsert(peer.into()); + swarm.handle_announce(peer.into()); let downloads = swarm.metadata().downloads(); peer.event = aquatic_udp_protocol::AnnounceEvent::Completed; - swarm.upsert(peer.into()); + swarm.handle_announce(peer.into()); - swarm.upsert(peer.into()); + swarm.handle_announce(peer.into()); assert_eq!(swarm.metadata().downloads(), downloads + 1); } diff --git a/packages/torrent-repository/src/entry/torrent.rs b/packages/torrent-repository/src/entry/torrent.rs index b251699ec..3a895008f 100644 --- a/packages/torrent-repository/src/entry/torrent.rs +++ b/packages/torrent-repository/src/entry/torrent.rs @@ -62,12 +62,12 @@ impl TrackedTorrent { #[must_use] pub fn get_peers(&self, limit: Option) -> Vec> { - self.swarm.get_all(limit) + self.swarm.peers(limit) } #[must_use] pub fn get_peers_for_client(&self, client: &SocketAddr, limit: Option) -> Vec> { - self.swarm.get_peers_excluding_addr(client, limit) + self.swarm.peers_excluding(client, limit) } pub fn upsert_peer(&mut self, peer: &peer::Peer) -> bool { @@ -78,7 +78,7 @@ impl TrackedTorrent { drop(self.swarm.remove(peer)); } AnnounceEvent::Completed => { - let previous = self.swarm.upsert(Arc::new(*peer)); + let previous = self.swarm.handle_announce(Arc::new(*peer)); // Don't count if peer was not previously known and not already completed. if previous.is_some_and(|p| p.event != AnnounceEvent::Completed) { self.downloaded += 1; @@ -88,7 +88,7 @@ impl TrackedTorrent { _ => { // `Started` event (first announced event) or // `None` event (announcements done at regular intervals). - drop(self.swarm.upsert(Arc::new(*peer))); + drop(self.swarm.handle_announce(Arc::new(*peer))); } } @@ -96,6 +96,6 @@ impl TrackedTorrent { } pub fn remove_inactive_peers(&mut self, current_cutoff: DurationSinceUnixEpoch) { - self.swarm.remove_inactive_peers(current_cutoff); + self.swarm.remove_inactive(current_cutoff); } } From 82bbfe3fcfa2768d527efbddff26ec44e0bee136 Mon Sep 17 00:00:00 2001 From: Jose Celano Date: Tue, 6 May 2025 17:11:23 +0100 Subject: [PATCH 562/802] refactor: [#1495] move logic from TackedTorrent to Swarm --- .../torrent-repository/src/entry/swarm.rs | 118 ++++++++++++------ .../torrent-repository/src/entry/torrent.rs | 54 ++++---- packages/torrent-repository/src/repository.rs | 10 +- .../tests/common/torrent.rs | 16 +-- .../tests/repository/mod.rs | 24 ++-- packages/tracker-core/src/announce_handler.rs | 2 +- packages/tracker-core/src/torrent/services.rs | 2 +- 7 files changed, 128 insertions(+), 98 deletions(-) diff --git a/packages/torrent-repository/src/entry/swarm.rs b/packages/torrent-repository/src/entry/swarm.rs index 05c09b68e..5d97655ea 100644 --- a/packages/torrent-repository/src/entry/swarm.rs +++ b/packages/torrent-repository/src/entry/swarm.rs @@ -16,7 +16,20 @@ pub struct Swarm { } impl Swarm { - pub fn handle_announce(&mut self, incoming_announce: Arc) -> Option> { + pub fn handle_announcement(&mut self, incoming_announce: &PeerAnnouncement) -> bool { + let mut downloads_increased: bool = false; + + let _previous_peer = match peer::ReadInfo::get_event(incoming_announce) { + AnnounceEvent::Started | AnnounceEvent::None | AnnounceEvent::Completed => { + self.upsert_peer(Arc::new(*incoming_announce), &mut downloads_increased) + } + AnnounceEvent::Stopped => self.remove(incoming_announce), + }; + + downloads_increased + } + + pub fn upsert_peer(&mut self, incoming_announce: Arc, downloads_increased: &mut bool) -> Option> { let is_now_seeder = incoming_announce.is_seeder(); let has_completed = incoming_announce.event == AnnounceEvent::Completed; @@ -37,6 +50,7 @@ impl Swarm { // Check if the peer has completed downloading the torrent. if has_completed && old_announce.event != AnnounceEvent::Completed { self.metadata.downloaded += 1; + *downloads_increased = true; } Some(old_announce) @@ -198,30 +212,33 @@ mod tests { #[test] fn it_should_allow_inserting_a_new_peer() { let mut swarm = Swarm::default(); + let mut downloads_increased = false; let peer = PeerBuilder::default().build(); - assert_eq!(swarm.handle_announce(peer.into()), None); + assert_eq!(swarm.upsert_peer(peer.into(), &mut downloads_increased), None); } #[test] fn it_should_allow_updating_a_preexisting_peer() { let mut swarm = Swarm::default(); + let mut downloads_increased = false; let peer = PeerBuilder::default().build(); - swarm.handle_announce(peer.into()); + swarm.upsert_peer(peer.into(), &mut downloads_increased); - assert_eq!(swarm.handle_announce(peer.into()), Some(Arc::new(peer))); + assert_eq!(swarm.upsert_peer(peer.into(), &mut downloads_increased), Some(Arc::new(peer))); } #[test] fn it_should_allow_getting_all_peers() { let mut swarm = Swarm::default(); + let mut downloads_increased = false; let peer = PeerBuilder::default().build(); - swarm.handle_announce(peer.into()); + swarm.upsert_peer(peer.into(), &mut downloads_increased); assert_eq!(swarm.peers(None), [Arc::new(peer)]); } @@ -229,10 +246,11 @@ mod tests { #[test] fn it_should_allow_getting_one_peer_by_id() { let mut swarm = Swarm::default(); + let mut downloads_increased = false; let peer = PeerBuilder::default().build(); - swarm.handle_announce(peer.into()); + swarm.upsert_peer(peer.into(), &mut downloads_increased); assert_eq!(swarm.get(&peer.peer_addr), Some(Arc::new(peer)).as_ref()); } @@ -240,10 +258,11 @@ mod tests { #[test] fn it_should_increase_the_number_of_peers_after_inserting_a_new_one() { let mut swarm = Swarm::default(); + let mut downloads_increased = false; let peer = PeerBuilder::default().build(); - swarm.handle_announce(peer.into()); + swarm.upsert_peer(peer.into(), &mut downloads_increased); assert_eq!(swarm.len(), 1); } @@ -251,10 +270,11 @@ mod tests { #[test] fn it_should_decrease_the_number_of_peers_after_removing_one() { let mut swarm = Swarm::default(); + let mut downloads_increased = false; let peer = PeerBuilder::default().build(); - swarm.handle_announce(peer.into()); + swarm.upsert_peer(peer.into(), &mut downloads_increased); swarm.remove(&peer); @@ -264,10 +284,11 @@ mod tests { #[test] fn it_should_allow_removing_an_existing_peer() { let mut swarm = Swarm::default(); + let mut downloads_increased = false; let peer = PeerBuilder::default().build(); - swarm.handle_announce(peer.into()); + swarm.upsert_peer(peer.into(), &mut downloads_increased); let old = swarm.remove(&peer); @@ -287,18 +308,19 @@ mod tests { #[test] fn it_should_allow_getting_all_peers_excluding_peers_with_a_given_address() { let mut swarm = Swarm::default(); + let mut downloads_increased = false; let peer1 = PeerBuilder::default() .with_peer_id(&PeerId(*b"-qB00000000000000001")) .with_peer_addr(&SocketAddr::new(IpAddr::V4(Ipv4Addr::new(127, 0, 0, 1)), 6969)) .build(); - swarm.handle_announce(peer1.into()); + swarm.upsert_peer(peer1.into(), &mut downloads_increased); let peer2 = PeerBuilder::default() .with_peer_id(&PeerId(*b"-qB00000000000000002")) .with_peer_addr(&SocketAddr::new(IpAddr::V4(Ipv4Addr::new(127, 0, 0, 2)), 6969)) .build(); - swarm.handle_announce(peer2.into()); + swarm.upsert_peer(peer2.into(), &mut downloads_increased); assert_eq!(swarm.peers_excluding(&peer2.peer_addr, None), [Arc::new(peer1)]); } @@ -306,12 +328,13 @@ mod tests { #[test] fn it_should_remove_inactive_peers() { let mut swarm = Swarm::default(); + let mut downloads_increased = false; let one_second = DurationSinceUnixEpoch::new(1, 0); // Insert the peer let last_update_time = DurationSinceUnixEpoch::new(1_669_397_478_934, 0); let peer = PeerBuilder::default().last_updated_on(last_update_time).build(); - swarm.handle_announce(peer.into()); + swarm.upsert_peer(peer.into(), &mut downloads_increased); // Remove peers not updated since one second after inserting the peer swarm.remove_inactive(last_update_time + one_second); @@ -322,12 +345,13 @@ mod tests { #[test] fn it_should_not_remove_active_peers() { let mut swarm = Swarm::default(); + let mut downloads_increased = false; let one_second = DurationSinceUnixEpoch::new(1, 0); // Insert the peer let last_update_time = DurationSinceUnixEpoch::new(1_669_397_478_934, 0); let peer = PeerBuilder::default().last_updated_on(last_update_time).build(); - swarm.handle_announce(peer.into()); + swarm.upsert_peer(peer.into(), &mut downloads_increased); // Remove peers not updated since one second before inserting the peer. swarm.remove_inactive(last_update_time - one_second); @@ -338,16 +362,17 @@ mod tests { #[test] fn it_should_allow_inserting_two_identical_peers_except_for_the_socket_address() { let mut swarm = Swarm::default(); + let mut downloads_increased = false; let peer1 = PeerBuilder::default() .with_peer_addr(&SocketAddr::new(IpAddr::V4(Ipv4Addr::new(127, 0, 0, 1)), 6969)) .build(); - swarm.handle_announce(peer1.into()); + swarm.upsert_peer(peer1.into(), &mut downloads_increased); let peer2 = PeerBuilder::default() .with_peer_addr(&SocketAddr::new(IpAddr::V4(Ipv4Addr::new(127, 0, 0, 2)), 6969)) .build(); - swarm.handle_announce(peer2.into()); + swarm.upsert_peer(peer2.into(), &mut downloads_increased); assert_eq!(swarm.len(), 2); } @@ -355,6 +380,7 @@ mod tests { #[test] fn it_should_not_allow_inserting_two_peers_with_different_peer_id_but_the_same_socket_address() { let mut swarm = Swarm::default(); + let mut downloads_increased = false; // When that happens the peer ID will be changed in the swarm. // In practice, it's like if the peer had changed its ID. @@ -363,13 +389,13 @@ mod tests { .with_peer_id(&PeerId(*b"-qB00000000000000001")) .with_peer_addr(&SocketAddr::new(IpAddr::V4(Ipv4Addr::new(127, 0, 0, 1)), 6969)) .build(); - swarm.handle_announce(peer1.into()); + swarm.upsert_peer(peer1.into(), &mut downloads_increased); let peer2 = PeerBuilder::default() .with_peer_id(&PeerId(*b"-qB00000000000000002")) .with_peer_addr(&SocketAddr::new(IpAddr::V4(Ipv4Addr::new(127, 0, 0, 1)), 6969)) .build(); - swarm.handle_announce(peer2.into()); + swarm.upsert_peer(peer2.into(), &mut downloads_increased); assert_eq!(swarm.len(), 1); } @@ -377,12 +403,13 @@ mod tests { #[test] fn it_should_return_the_metadata() { let mut swarm = Swarm::default(); + let mut downloads_increased = false; let seeder = PeerBuilder::seeder().build(); let leecher = PeerBuilder::leecher().build(); - swarm.handle_announce(seeder.into()); - swarm.handle_announce(leecher.into()); + swarm.upsert_peer(seeder.into(), &mut downloads_increased); + swarm.upsert_peer(leecher.into(), &mut downloads_increased); assert_eq!( swarm.metadata(), @@ -397,12 +424,13 @@ mod tests { #[test] fn it_should_return_the_number_of_seeders_in_the_list() { let mut swarm = Swarm::default(); + let mut downloads_increased = false; let seeder = PeerBuilder::seeder().build(); let leecher = PeerBuilder::leecher().build(); - swarm.handle_announce(seeder.into()); - swarm.handle_announce(leecher.into()); + swarm.upsert_peer(seeder.into(), &mut downloads_increased); + swarm.upsert_peer(leecher.into(), &mut downloads_increased); let (seeders, _leechers) = swarm.seeders_and_leechers(); @@ -412,12 +440,13 @@ mod tests { #[test] fn it_should_return_the_number_of_leechers_in_the_list() { let mut swarm = Swarm::default(); + let mut downloads_increased = false; let seeder = PeerBuilder::seeder().build(); let leecher = PeerBuilder::leecher().build(); - swarm.handle_announce(seeder.into()); - swarm.handle_announce(leecher.into()); + swarm.upsert_peer(seeder.into(), &mut downloads_increased); + swarm.upsert_peer(leecher.into(), &mut downloads_increased); let (_seeders, leechers) = swarm.seeders_and_leechers(); @@ -434,12 +463,13 @@ mod tests { #[test] fn it_should_increase_the_number_of_leechers_if_the_new_peer_is_a_leecher_() { let mut swarm = Swarm::default(); + let mut downloads_increased = false; let leechers = swarm.metadata().leechers(); let leecher = PeerBuilder::leecher().build(); - swarm.handle_announce(leecher.into()); + swarm.upsert_peer(leecher.into(), &mut downloads_increased); assert_eq!(swarm.metadata().leechers(), leechers + 1); } @@ -447,12 +477,13 @@ mod tests { #[test] fn it_should_increase_the_number_of_seeders_if_the_new_peer_is_a_seeder() { let mut swarm = Swarm::default(); + let mut downloads_increased = false; let seeders = swarm.metadata().seeders(); let seeder = PeerBuilder::seeder().build(); - swarm.handle_announce(seeder.into()); + swarm.upsert_peer(seeder.into(), &mut downloads_increased); assert_eq!(swarm.metadata().seeders(), seeders + 1); } @@ -461,12 +492,13 @@ mod tests { fn it_should_not_increasing_the_number_of_downloads_if_the_new_peer_has_completed_downloading_as_it_was_not_previously_known( ) { let mut swarm = Swarm::default(); + let mut downloads_increased = false; let downloads = swarm.metadata().downloads(); let seeder = PeerBuilder::seeder().build(); - swarm.handle_announce(seeder.into()); + swarm.upsert_peer(seeder.into(), &mut downloads_increased); assert_eq!(swarm.metadata().downloads(), downloads); } @@ -480,10 +512,11 @@ mod tests { #[test] fn it_should_decrease_the_number_of_leechers_if_the_removed_peer_was_a_leecher() { let mut swarm = Swarm::default(); + let mut downloads_increased = false; let leecher = PeerBuilder::leecher().build(); - swarm.handle_announce(leecher.into()); + swarm.upsert_peer(leecher.into(), &mut downloads_increased); let leechers = swarm.metadata().leechers(); @@ -495,10 +528,11 @@ mod tests { #[test] fn it_should_decrease_the_number_of_seeders_if_the_removed_peer_was_a_seeder() { let mut swarm = Swarm::default(); + let mut downloads_increased = false; let seeder = PeerBuilder::seeder().build(); - swarm.handle_announce(seeder.into()); + swarm.upsert_peer(seeder.into(), &mut downloads_increased); let seeders = swarm.metadata().seeders(); @@ -518,10 +552,11 @@ mod tests { #[test] fn it_should_decrease_the_number_of_leechers_when_a_removed_peer_is_a_leecher() { let mut swarm = Swarm::default(); + let mut downloads_increased = false; let leecher = PeerBuilder::leecher().build(); - swarm.handle_announce(leecher.into()); + swarm.upsert_peer(leecher.into(), &mut downloads_increased); let leechers = swarm.metadata().leechers(); @@ -533,10 +568,11 @@ mod tests { #[test] fn it_should_decrease_the_number_of_seeders_when_the_removed_peer_is_a_seeder() { let mut swarm = Swarm::default(); + let mut downloads_increased = false; let seeder = PeerBuilder::seeder().build(); - swarm.handle_announce(seeder.into()); + swarm.upsert_peer(seeder.into(), &mut downloads_increased); let seeders = swarm.metadata().seeders(); @@ -555,17 +591,18 @@ mod tests { #[test] fn it_should_increase_seeders_and_decreasing_leechers_when_the_peer_changes_from_leecher_to_seeder_() { let mut swarm = Swarm::default(); + let mut downloads_increased = false; let mut peer = PeerBuilder::leecher().build(); - swarm.handle_announce(peer.into()); + swarm.upsert_peer(peer.into(), &mut downloads_increased); let leechers = swarm.metadata().leechers(); let seeders = swarm.metadata().seeders(); peer.left = NumberOfBytes::new(0); // Convert to seeder - swarm.handle_announce(peer.into()); + swarm.upsert_peer(peer.into(), &mut downloads_increased); assert_eq!(swarm.metadata().seeders(), seeders + 1); assert_eq!(swarm.metadata().leechers(), leechers - 1); @@ -574,17 +611,18 @@ mod tests { #[test] fn it_should_increase_leechers_and_decreasing_seeders_when_the_peer_changes_from_seeder_to_leecher() { let mut swarm = Swarm::default(); + let mut downloads_increased = false; let mut peer = PeerBuilder::seeder().build(); - swarm.handle_announce(peer.into()); + swarm.upsert_peer(peer.into(), &mut downloads_increased); let leechers = swarm.metadata().leechers(); let seeders = swarm.metadata().seeders(); peer.left = NumberOfBytes::new(10); // Convert to leecher - swarm.handle_announce(peer.into()); + swarm.upsert_peer(peer.into(), &mut downloads_increased); assert_eq!(swarm.metadata().leechers(), leechers + 1); assert_eq!(swarm.metadata().seeders(), seeders - 1); @@ -593,16 +631,17 @@ mod tests { #[test] fn it_should_increase_the_number_of_downloads_when_the_peer_announces_completed_downloading() { let mut swarm = Swarm::default(); + let mut downloads_increased = false; let mut peer = PeerBuilder::leecher().build(); - swarm.handle_announce(peer.into()); + swarm.upsert_peer(peer.into(), &mut downloads_increased); let downloads = swarm.metadata().downloads(); peer.event = aquatic_udp_protocol::AnnounceEvent::Completed; - swarm.handle_announce(peer.into()); + swarm.upsert_peer(peer.into(), &mut downloads_increased); assert_eq!(swarm.metadata().downloads(), downloads + 1); } @@ -610,18 +649,19 @@ mod tests { #[test] fn it_should_not_increasing_the_number_of_downloads_when_the_peer_announces_completed_downloading_twice_() { let mut swarm = Swarm::default(); + let mut downloads_increased = false; let mut peer = PeerBuilder::leecher().build(); - swarm.handle_announce(peer.into()); + swarm.upsert_peer(peer.into(), &mut downloads_increased); let downloads = swarm.metadata().downloads(); peer.event = aquatic_udp_protocol::AnnounceEvent::Completed; - swarm.handle_announce(peer.into()); + swarm.upsert_peer(peer.into(), &mut downloads_increased); - swarm.handle_announce(peer.into()); + swarm.upsert_peer(peer.into(), &mut downloads_increased); assert_eq!(swarm.metadata().downloads(), downloads + 1); } diff --git a/packages/torrent-repository/src/entry/torrent.rs b/packages/torrent-repository/src/entry/torrent.rs index 3a895008f..b92ca5243 100644 --- a/packages/torrent-repository/src/entry/torrent.rs +++ b/packages/torrent-repository/src/entry/torrent.rs @@ -2,7 +2,6 @@ use std::fmt::Debug; use std::net::SocketAddr; use std::sync::Arc; -use aquatic_udp_protocol::AnnounceEvent; use torrust_tracker_configuration::TrackerPolicy; use torrust_tracker_primitives::peer::{self}; use torrust_tracker_primitives::swarm_metadata::SwarmMetadata; @@ -10,35 +9,41 @@ use torrust_tracker_primitives::DurationSinceUnixEpoch; use super::swarm::Swarm; -/// A data structure containing all the information about a torrent in the tracker. +/// A data structure containing all the information about a torrent in the +/// tracker. /// /// This is the tracker entry for a given torrent and contains the swarm data, /// that's the list of all the peers trying to download the same torrent. +/// /// The tracker keeps one entry like this for every torrent. #[derive(Clone, Debug, Default, PartialEq, Eq, PartialOrd, Ord, Hash)] pub struct TrackedTorrent { - /// A network of peers that are all trying to download the torrent associated to this entry + /// A network of peers that are all trying to download the torrent. pub(crate) swarm: Swarm, - /// The number of peers that have ever completed downloading the torrent associated to this entry + /// The number of peers that have ever completed downloading the torrent. + /// This value is can be persistent so it's loaded from the database when + /// the tracker starts. pub(crate) downloaded: u32, } impl TrackedTorrent { - #[allow(clippy::cast_possible_truncation)] #[must_use] pub fn get_swarm_metadata(&self) -> SwarmMetadata { - let (seeders, leechers) = self.swarm.seeders_and_leechers(); + let metadata = self.swarm.metadata(); SwarmMetadata { downloaded: self.downloaded, - complete: seeders as u32, - incomplete: leechers as u32, + complete: metadata.complete, + incomplete: metadata.incomplete, } } + /// Returns true if the torrents meets the retention policy, meaning that + /// it should be kept in the tracker. #[must_use] pub fn meets_retaining_policy(&self, policy: &TrackerPolicy) -> bool { + // code-review: why? if policy.persistent_torrent_completed_stat && self.downloaded > 0 { return true; } @@ -51,17 +56,17 @@ impl TrackedTorrent { } #[must_use] - pub fn peers_is_empty(&self) -> bool { + pub fn swarm_is_empty(&self) -> bool { self.swarm.is_empty() } #[must_use] - pub fn get_peers_len(&self) -> usize { + pub fn swarm_len(&self) -> usize { self.swarm.len() } #[must_use] - pub fn get_peers(&self, limit: Option) -> Vec> { + pub fn swarm_peers(&self, limit: Option) -> Vec> { self.swarm.peers(limit) } @@ -70,29 +75,14 @@ impl TrackedTorrent { self.swarm.peers_excluding(client, limit) } - pub fn upsert_peer(&mut self, peer: &peer::Peer) -> bool { - let mut number_of_downloads_increased: bool = false; - - match peer::ReadInfo::get_event(peer) { - AnnounceEvent::Stopped => { - drop(self.swarm.remove(peer)); - } - AnnounceEvent::Completed => { - let previous = self.swarm.handle_announce(Arc::new(*peer)); - // Don't count if peer was not previously known and not already completed. - if previous.is_some_and(|p| p.event != AnnounceEvent::Completed) { - self.downloaded += 1; - number_of_downloads_increased = true; - } - } - _ => { - // `Started` event (first announced event) or - // `None` event (announcements done at regular intervals). - drop(self.swarm.handle_announce(Arc::new(*peer))); - } + pub fn handle_announcement(&mut self, peer: &peer::Peer) -> bool { + let downloads_increased = self.swarm.handle_announcement(peer); + + if downloads_increased { + self.downloaded += 1; } - number_of_downloads_increased + downloads_increased } pub fn remove_inactive_peers(&mut self, current_cutoff: DurationSinceUnixEpoch) { diff --git a/packages/torrent-repository/src/repository.rs b/packages/torrent-repository/src/repository.rs index 0c387071c..69bfcf17b 100644 --- a/packages/torrent-repository/src/repository.rs +++ b/packages/torrent-repository/src/repository.rs @@ -46,7 +46,7 @@ impl TorrentRepository { if let Some(existing_entry) = self.torrents.get(info_hash) { tracing::debug!("Torrent already exists: {:?}", info_hash); - existing_entry.value().lock_or_panic().upsert_peer(peer) + existing_entry.value().lock_or_panic().handle_announcement(peer) } else { tracing::debug!("Inserting new torrent: {:?}", info_hash); @@ -66,7 +66,7 @@ impl TorrentRepository { let mut torrent_guard = inserted_entry.value().lock_or_panic(); - torrent_guard.upsert_peer(peer) + torrent_guard.handle_announcement(peer) } } @@ -202,7 +202,7 @@ impl TorrentRepository { pub fn get_torrent_peers(&self, info_hash: &InfoHash, limit: usize) -> Vec> { match self.get(info_hash) { None => vec![], - Some(entry) => entry.lock_or_panic().get_peers(Some(limit)), + Some(entry) => entry.lock_or_panic().swarm_peers(Some(limit)), } } @@ -573,8 +573,8 @@ mod tests { let torrent_entry_info = TorrentEntryInfo { swarm_metadata: torrent_guard.get_swarm_metadata(), - peers: torrent_guard.get_peers(None).iter().map(|peer| *peer.clone()).collect(), - number_of_peers: torrent_guard.get_peers_len(), + peers: torrent_guard.swarm_peers(None).iter().map(|peer| *peer.clone()).collect(), + number_of_peers: torrent_guard.swarm_len(), }; drop(torrent_guard); diff --git a/packages/torrent-repository/tests/common/torrent.rs b/packages/torrent-repository/tests/common/torrent.rs index ffa3c6d71..f8be53361 100644 --- a/packages/torrent-repository/tests/common/torrent.rs +++ b/packages/torrent-repository/tests/common/torrent.rs @@ -29,22 +29,22 @@ impl Torrent { pub(crate) fn peers_is_empty(&self) -> bool { match self { - Torrent::Single(entry) => entry.peers_is_empty(), - Torrent::MutexStd(entry) => entry.lock_or_panic().peers_is_empty(), + Torrent::Single(entry) => entry.swarm_is_empty(), + Torrent::MutexStd(entry) => entry.lock_or_panic().swarm_is_empty(), } } pub(crate) fn get_peers_len(&self) -> usize { match self { - Torrent::Single(entry) => entry.get_peers_len(), - Torrent::MutexStd(entry) => entry.lock_or_panic().get_peers_len(), + Torrent::Single(entry) => entry.swarm_len(), + Torrent::MutexStd(entry) => entry.lock_or_panic().swarm_len(), } } pub(crate) fn get_peers(&self, limit: Option) -> Vec> { match self { - Torrent::Single(entry) => entry.get_peers(limit), - Torrent::MutexStd(entry) => entry.lock_or_panic().get_peers(limit), + Torrent::Single(entry) => entry.swarm_peers(limit), + Torrent::MutexStd(entry) => entry.lock_or_panic().swarm_peers(limit), } } @@ -57,8 +57,8 @@ impl Torrent { pub(crate) fn upsert_peer(&mut self, peer: &peer::Peer) -> bool { match self { - Torrent::Single(entry) => entry.upsert_peer(peer), - Torrent::MutexStd(entry) => entry.lock_or_panic().upsert_peer(peer), + Torrent::Single(entry) => entry.handle_announcement(peer), + Torrent::MutexStd(entry) => entry.lock_or_panic().handle_announcement(peer), } } diff --git a/packages/torrent-repository/tests/repository/mod.rs b/packages/torrent-repository/tests/repository/mod.rs index 9701fc53d..40dcff6db 100644 --- a/packages/torrent-repository/tests/repository/mod.rs +++ b/packages/torrent-repository/tests/repository/mod.rs @@ -34,14 +34,14 @@ fn default() -> Entries { #[fixture] fn started() -> Entries { let mut torrent = TrackedTorrent::default(); - torrent.upsert_peer(&a_started_peer(1)); + torrent.handle_announcement(&a_started_peer(1)); vec![(InfoHash::default(), torrent)] } #[fixture] fn completed() -> Entries { let mut torrent = TrackedTorrent::default(); - torrent.upsert_peer(&a_completed_peer(2)); + torrent.handle_announcement(&a_completed_peer(2)); vec![(InfoHash::default(), torrent)] } @@ -49,10 +49,10 @@ fn completed() -> Entries { fn downloaded() -> Entries { let mut torrent = TrackedTorrent::default(); let mut peer = a_started_peer(3); - torrent.upsert_peer(&peer); + torrent.handle_announcement(&peer); peer.event = AnnounceEvent::Completed; peer.left = NumberOfBytes::new(0); - torrent.upsert_peer(&peer); + torrent.handle_announcement(&peer); vec![(InfoHash::default(), torrent)] } @@ -60,21 +60,21 @@ fn downloaded() -> Entries { fn three() -> Entries { let mut started = TrackedTorrent::default(); let started_h = &mut DefaultHasher::default(); - started.upsert_peer(&a_started_peer(1)); + started.handle_announcement(&a_started_peer(1)); started.hash(started_h); let mut completed = TrackedTorrent::default(); let completed_h = &mut DefaultHasher::default(); - completed.upsert_peer(&a_completed_peer(2)); + completed.handle_announcement(&a_completed_peer(2)); completed.hash(completed_h); let mut downloaded = TrackedTorrent::default(); let downloaded_h = &mut DefaultHasher::default(); let mut downloaded_peer = a_started_peer(3); - downloaded.upsert_peer(&downloaded_peer); + downloaded.handle_announcement(&downloaded_peer); downloaded_peer.event = AnnounceEvent::Completed; downloaded_peer.left = NumberOfBytes::new(0); - downloaded.upsert_peer(&downloaded_peer); + downloaded.handle_announcement(&downloaded_peer); downloaded.hash(downloaded_h); vec![ @@ -90,7 +90,7 @@ fn many_out_of_order() -> Entries { for i in 0..408 { let mut entry = TrackedTorrent::default(); - entry.upsert_peer(&a_started_peer(i)); + entry.handle_announcement(&a_started_peer(i)); entries.insert((InfoHash::from(&i), entry)); } @@ -105,7 +105,7 @@ fn many_hashed_in_order() -> Entries { for i in 0..408 { let mut entry = TrackedTorrent::default(); - entry.upsert_peer(&a_started_peer(i)); + entry.handle_announcement(&a_started_peer(i)); let hash: &mut DefaultHasher = &mut DefaultHasher::default(); hash.write_i32(i); @@ -457,7 +457,7 @@ async fn it_should_remove_inactive_peers(#[values(skip_list_mutex_std())] repo: { let lock_tracked_torrent = repo.get(&info_hash).expect("it_should_get_some"); let entry = lock_tracked_torrent.lock_or_panic(); - assert!(entry.get_peers(None).contains(&peer.into())); + assert!(entry.swarm_peers(None).contains(&peer.into())); } // Remove peers that have not been updated since the timeout (120 seconds ago). @@ -469,7 +469,7 @@ async fn it_should_remove_inactive_peers(#[values(skip_list_mutex_std())] repo: { let lock_tracked_torrent = repo.get(&info_hash).expect("it_should_get_some"); let entry = lock_tracked_torrent.lock_or_panic(); - assert!(!entry.get_peers(None).contains(&peer.into())); + assert!(!entry.swarm_peers(None).contains(&peer.into())); } } diff --git a/packages/tracker-core/src/announce_handler.rs b/packages/tracker-core/src/announce_handler.rs index 6174190dc..ece0c87e6 100644 --- a/packages/tracker-core/src/announce_handler.rs +++ b/packages/tracker-core/src/announce_handler.rs @@ -660,7 +660,7 @@ mod tests { assert_eq!(torrent_entry.lock_or_panic().get_swarm_metadata().downloaded, 1); // It does not persist the peers - assert!(torrent_entry.lock_or_panic().peers_is_empty()); + assert!(torrent_entry.lock_or_panic().swarm_is_empty()); } } diff --git a/packages/tracker-core/src/torrent/services.rs b/packages/tracker-core/src/torrent/services.rs index 37846b4e3..b748cd3a0 100644 --- a/packages/tracker-core/src/torrent/services.rs +++ b/packages/tracker-core/src/torrent/services.rs @@ -101,7 +101,7 @@ pub fn get_torrent_info(in_memory_torrent_repository: &Arc Date: Tue, 6 May 2025 17:21:36 +0100 Subject: [PATCH 563/802] refactor: [#1495] make TrackedTorrent fields private --- packages/torrent-repository/src/entry/torrent.rs | 9 +++++++-- packages/torrent-repository/src/repository.rs | 16 ++-------------- 2 files changed, 9 insertions(+), 16 deletions(-) diff --git a/packages/torrent-repository/src/entry/torrent.rs b/packages/torrent-repository/src/entry/torrent.rs index b92ca5243..25c76c25c 100644 --- a/packages/torrent-repository/src/entry/torrent.rs +++ b/packages/torrent-repository/src/entry/torrent.rs @@ -19,15 +19,20 @@ use super::swarm::Swarm; #[derive(Clone, Debug, Default, PartialEq, Eq, PartialOrd, Ord, Hash)] pub struct TrackedTorrent { /// A network of peers that are all trying to download the torrent. - pub(crate) swarm: Swarm, + swarm: Swarm, /// The number of peers that have ever completed downloading the torrent. /// This value is can be persistent so it's loaded from the database when /// the tracker starts. - pub(crate) downloaded: u32, + downloaded: u32, } impl TrackedTorrent { + #[must_use] + pub fn new(swarm: Swarm, downloaded: u32) -> Self { + Self { swarm, downloaded } + } + #[must_use] pub fn get_swarm_metadata(&self) -> SwarmMetadata { let metadata = self.swarm.metadata(); diff --git a/packages/torrent-repository/src/repository.rs b/packages/torrent-repository/src/repository.rs index 69bfcf17b..6977893b7 100644 --- a/packages/torrent-repository/src/repository.rs +++ b/packages/torrent-repository/src/repository.rs @@ -51,13 +51,7 @@ impl TorrentRepository { tracing::debug!("Inserting new torrent: {:?}", info_hash); let new_entry = if let Some(number_of_downloads) = opt_persistent_torrent { - TrackedTorrentHandle::new( - TrackedTorrent { - swarm: Swarm::default(), - downloaded: number_of_downloads, - } - .into(), - ) + TrackedTorrentHandle::new(TrackedTorrent::new(Swarm::default(), number_of_downloads).into()) } else { TrackedTorrentHandle::default() }; @@ -235,13 +229,7 @@ impl TorrentRepository { continue; } - let entry = TrackedTorrentHandle::new( - TrackedTorrent { - swarm: Swarm::default(), - downloaded: *completed, - } - .into(), - ); + let entry = TrackedTorrentHandle::new(TrackedTorrent::new(Swarm::default(), *completed).into()); // Since SkipMap is lock-free the torrent could have been inserted // after checking if it exists. From 3fb117b2b78768d26d9db31df56c6dd59909932e Mon Sep 17 00:00:00 2001 From: Jose Celano Date: Tue, 6 May 2025 17:31:15 +0100 Subject: [PATCH 564/802] refactor: [#1495] initialize number of downloads in Swarm to persisted value --- packages/torrent-repository/src/entry/swarm.rs | 8 ++++++++ packages/torrent-repository/src/repository.rs | 2 +- 2 files changed, 9 insertions(+), 1 deletion(-) diff --git a/packages/torrent-repository/src/entry/swarm.rs b/packages/torrent-repository/src/entry/swarm.rs index 5d97655ea..44cdaf7aa 100644 --- a/packages/torrent-repository/src/entry/swarm.rs +++ b/packages/torrent-repository/src/entry/swarm.rs @@ -16,6 +16,14 @@ pub struct Swarm { } impl Swarm { + #[must_use] + pub fn new(downloaded: u32) -> Self { + Self { + peers: BTreeMap::new(), + metadata: SwarmMetadata::new(downloaded, 0, 0), + } + } + pub fn handle_announcement(&mut self, incoming_announce: &PeerAnnouncement) -> bool { let mut downloads_increased: bool = false; diff --git a/packages/torrent-repository/src/repository.rs b/packages/torrent-repository/src/repository.rs index 6977893b7..fa3d77f95 100644 --- a/packages/torrent-repository/src/repository.rs +++ b/packages/torrent-repository/src/repository.rs @@ -51,7 +51,7 @@ impl TorrentRepository { tracing::debug!("Inserting new torrent: {:?}", info_hash); let new_entry = if let Some(number_of_downloads) = opt_persistent_torrent { - TrackedTorrentHandle::new(TrackedTorrent::new(Swarm::default(), number_of_downloads).into()) + TrackedTorrentHandle::new(TrackedTorrent::new(Swarm::new(number_of_downloads), number_of_downloads).into()) } else { TrackedTorrentHandle::default() }; From ec597f020e4d0a063ba9979cbe2038396272600c Mon Sep 17 00:00:00 2001 From: Jose Celano Date: Tue, 6 May 2025 17:38:42 +0100 Subject: [PATCH 565/802] refactor: [#1495] get the number of downloads from Swarm instead of from TrackedTorrent --- packages/torrent-repository/src/entry/torrent.rs | 8 +------- packages/torrent-repository/src/repository.rs | 2 +- 2 files changed, 2 insertions(+), 8 deletions(-) diff --git a/packages/torrent-repository/src/entry/torrent.rs b/packages/torrent-repository/src/entry/torrent.rs index 25c76c25c..7a31ff5a0 100644 --- a/packages/torrent-repository/src/entry/torrent.rs +++ b/packages/torrent-repository/src/entry/torrent.rs @@ -35,13 +35,7 @@ impl TrackedTorrent { #[must_use] pub fn get_swarm_metadata(&self) -> SwarmMetadata { - let metadata = self.swarm.metadata(); - - SwarmMetadata { - downloaded: self.downloaded, - complete: metadata.complete, - incomplete: metadata.incomplete, - } + self.swarm.metadata() } /// Returns true if the torrents meets the retention policy, meaning that diff --git a/packages/torrent-repository/src/repository.rs b/packages/torrent-repository/src/repository.rs index fa3d77f95..cb64474c8 100644 --- a/packages/torrent-repository/src/repository.rs +++ b/packages/torrent-repository/src/repository.rs @@ -229,7 +229,7 @@ impl TorrentRepository { continue; } - let entry = TrackedTorrentHandle::new(TrackedTorrent::new(Swarm::default(), *completed).into()); + let entry = TrackedTorrentHandle::new(TrackedTorrent::new(Swarm::new(*completed), *completed).into()); // Since SkipMap is lock-free the torrent could have been inserted // after checking if it exists. From 23ce6e4731e617c455a760e586c614e332813881 Mon Sep 17 00:00:00 2001 From: Jose Celano Date: Tue, 6 May 2025 17:41:56 +0100 Subject: [PATCH 566/802] refactor: [#1495]remove unused field in TrackedTorrent --- .../torrent-repository/src/entry/torrent.rs | 19 ++++--------------- packages/torrent-repository/src/repository.rs | 4 ++-- 2 files changed, 6 insertions(+), 17 deletions(-) diff --git a/packages/torrent-repository/src/entry/torrent.rs b/packages/torrent-repository/src/entry/torrent.rs index 7a31ff5a0..c13db59a1 100644 --- a/packages/torrent-repository/src/entry/torrent.rs +++ b/packages/torrent-repository/src/entry/torrent.rs @@ -20,17 +20,12 @@ use super::swarm::Swarm; pub struct TrackedTorrent { /// A network of peers that are all trying to download the torrent. swarm: Swarm, - - /// The number of peers that have ever completed downloading the torrent. - /// This value is can be persistent so it's loaded from the database when - /// the tracker starts. - downloaded: u32, } impl TrackedTorrent { #[must_use] - pub fn new(swarm: Swarm, downloaded: u32) -> Self { - Self { swarm, downloaded } + pub fn new(swarm: Swarm) -> Self { + Self { swarm } } #[must_use] @@ -43,7 +38,7 @@ impl TrackedTorrent { #[must_use] pub fn meets_retaining_policy(&self, policy: &TrackerPolicy) -> bool { // code-review: why? - if policy.persistent_torrent_completed_stat && self.downloaded > 0 { + if policy.persistent_torrent_completed_stat && self.get_swarm_metadata().downloaded > 0 { return true; } @@ -75,13 +70,7 @@ impl TrackedTorrent { } pub fn handle_announcement(&mut self, peer: &peer::Peer) -> bool { - let downloads_increased = self.swarm.handle_announcement(peer); - - if downloads_increased { - self.downloaded += 1; - } - - downloads_increased + self.swarm.handle_announcement(peer) } pub fn remove_inactive_peers(&mut self, current_cutoff: DurationSinceUnixEpoch) { diff --git a/packages/torrent-repository/src/repository.rs b/packages/torrent-repository/src/repository.rs index cb64474c8..babca5f5d 100644 --- a/packages/torrent-repository/src/repository.rs +++ b/packages/torrent-repository/src/repository.rs @@ -51,7 +51,7 @@ impl TorrentRepository { tracing::debug!("Inserting new torrent: {:?}", info_hash); let new_entry = if let Some(number_of_downloads) = opt_persistent_torrent { - TrackedTorrentHandle::new(TrackedTorrent::new(Swarm::new(number_of_downloads), number_of_downloads).into()) + TrackedTorrentHandle::new(TrackedTorrent::new(Swarm::new(number_of_downloads)).into()) } else { TrackedTorrentHandle::default() }; @@ -229,7 +229,7 @@ impl TorrentRepository { continue; } - let entry = TrackedTorrentHandle::new(TrackedTorrent::new(Swarm::new(*completed), *completed).into()); + let entry = TrackedTorrentHandle::new(TrackedTorrent::new(Swarm::new(*completed)).into()); // Since SkipMap is lock-free the torrent could have been inserted // after checking if it exists. From ef7292f424158b07789da2d9b883ac7a8853e230 Mon Sep 17 00:00:00 2001 From: Jose Celano Date: Tue, 6 May 2025 17:52:00 +0100 Subject: [PATCH 567/802] refactor: [#1495] move logic from TrackedTorrent to Swarm --- packages/torrent-repository/src/entry/swarm.rs | 17 +++++++++++++++++ .../torrent-repository/src/entry/torrent.rs | 13 +------------ 2 files changed, 18 insertions(+), 12 deletions(-) diff --git a/packages/torrent-repository/src/entry/swarm.rs b/packages/torrent-repository/src/entry/swarm.rs index 44cdaf7aa..eb7aebfe4 100644 --- a/packages/torrent-repository/src/entry/swarm.rs +++ b/packages/torrent-repository/src/entry/swarm.rs @@ -5,6 +5,7 @@ use std::net::SocketAddr; use std::sync::Arc; use aquatic_udp_protocol::AnnounceEvent; +use torrust_tracker_configuration::TrackerPolicy; use torrust_tracker_primitives::peer::{self, Peer, PeerAnnouncement}; use torrust_tracker_primitives::swarm_metadata::SwarmMetadata; use torrust_tracker_primitives::DurationSinceUnixEpoch; @@ -188,6 +189,22 @@ impl Swarm { pub fn is_empty(&self) -> bool { self.peers.is_empty() } + + /// Returns true if the torrents meets the retention policy, meaning that + /// it should be kept in the tracker. + #[must_use] + pub fn meets_retaining_policy(&self, policy: &TrackerPolicy) -> bool { + // code-review: why? + if policy.persistent_torrent_completed_stat && self.metadata().downloaded > 0 { + return true; + } + + if policy.remove_peerless_torrents && self.is_empty() { + return false; + } + + true + } } #[cfg(test)] diff --git a/packages/torrent-repository/src/entry/torrent.rs b/packages/torrent-repository/src/entry/torrent.rs index c13db59a1..44d5f226a 100644 --- a/packages/torrent-repository/src/entry/torrent.rs +++ b/packages/torrent-repository/src/entry/torrent.rs @@ -33,20 +33,9 @@ impl TrackedTorrent { self.swarm.metadata() } - /// Returns true if the torrents meets the retention policy, meaning that - /// it should be kept in the tracker. #[must_use] pub fn meets_retaining_policy(&self, policy: &TrackerPolicy) -> bool { - // code-review: why? - if policy.persistent_torrent_completed_stat && self.get_swarm_metadata().downloaded > 0 { - return true; - } - - if policy.remove_peerless_torrents && self.swarm.is_empty() { - return false; - } - - true + self.swarm.meets_retaining_policy(policy) } #[must_use] From b6afed5c9f2900d41c02478d73aaa7f53f70b6fa Mon Sep 17 00:00:00 2001 From: Jose Celano Date: Tue, 6 May 2025 17:54:05 +0100 Subject: [PATCH 568/802] refactor: [#1495] rename methods --- .../torrent-repository/src/entry/torrent.rs | 12 +++++----- packages/torrent-repository/src/repository.rs | 16 ++++++------- .../tests/common/torrent.rs | 24 +++++++++---------- .../tests/repository/mod.rs | 6 ++--- packages/tracker-core/src/announce_handler.rs | 4 ++-- packages/tracker-core/src/torrent/manager.rs | 2 +- packages/tracker-core/src/torrent/services.rs | 8 +++---- 7 files changed, 36 insertions(+), 36 deletions(-) diff --git a/packages/torrent-repository/src/entry/torrent.rs b/packages/torrent-repository/src/entry/torrent.rs index 44d5f226a..69a809a37 100644 --- a/packages/torrent-repository/src/entry/torrent.rs +++ b/packages/torrent-repository/src/entry/torrent.rs @@ -29,7 +29,7 @@ impl TrackedTorrent { } #[must_use] - pub fn get_swarm_metadata(&self) -> SwarmMetadata { + pub fn metadata(&self) -> SwarmMetadata { self.swarm.metadata() } @@ -39,22 +39,22 @@ impl TrackedTorrent { } #[must_use] - pub fn swarm_is_empty(&self) -> bool { + pub fn is_empty(&self) -> bool { self.swarm.is_empty() } #[must_use] - pub fn swarm_len(&self) -> usize { + pub fn len(&self) -> usize { self.swarm.len() } #[must_use] - pub fn swarm_peers(&self, limit: Option) -> Vec> { + pub fn peers(&self, limit: Option) -> Vec> { self.swarm.peers(limit) } #[must_use] - pub fn get_peers_for_client(&self, client: &SocketAddr, limit: Option) -> Vec> { + pub fn peers_excluding(&self, client: &SocketAddr, limit: Option) -> Vec> { self.swarm.peers_excluding(client, limit) } @@ -62,7 +62,7 @@ impl TrackedTorrent { self.swarm.handle_announcement(peer) } - pub fn remove_inactive_peers(&mut self, current_cutoff: DurationSinceUnixEpoch) { + pub fn remove_inactive(&mut self, current_cutoff: DurationSinceUnixEpoch) { self.swarm.remove_inactive(current_cutoff); } } diff --git a/packages/torrent-repository/src/repository.rs b/packages/torrent-repository/src/repository.rs index babca5f5d..1706937fc 100644 --- a/packages/torrent-repository/src/repository.rs +++ b/packages/torrent-repository/src/repository.rs @@ -84,7 +84,7 @@ impl TorrentRepository { /// This function panics if the lock for the entry cannot be obtained. pub fn remove_inactive_peers(&self, current_cutoff: DurationSinceUnixEpoch) { for entry in &self.torrents { - entry.value().lock_or_panic().remove_inactive_peers(current_cutoff); + entry.value().lock_or_panic().remove_inactive(current_cutoff); } } @@ -139,7 +139,7 @@ impl TorrentRepository { pub fn get_swarm_metadata(&self, info_hash: &InfoHash) -> Option { self.torrents .get(info_hash) - .map(|entry| entry.value().lock_or_panic().get_swarm_metadata()) + .map(|entry| entry.value().lock_or_panic().metadata()) } /// Retrieves swarm metadata for a given torrent. @@ -175,7 +175,7 @@ impl TorrentRepository { pub fn get_peers_for(&self, info_hash: &InfoHash, peer: &peer::Peer, limit: usize) -> Vec> { match self.get(info_hash) { None => vec![], - Some(entry) => entry.lock_or_panic().get_peers_for_client(&peer.peer_addr, Some(limit)), + Some(entry) => entry.lock_or_panic().peers_excluding(&peer.peer_addr, Some(limit)), } } @@ -196,7 +196,7 @@ impl TorrentRepository { pub fn get_torrent_peers(&self, info_hash: &InfoHash, limit: usize) -> Vec> { match self.get(info_hash) { None => vec![], - Some(entry) => entry.lock_or_panic().swarm_peers(Some(limit)), + Some(entry) => entry.lock_or_panic().peers(Some(limit)), } } @@ -255,7 +255,7 @@ impl TorrentRepository { let mut metrics = AggregateSwarmMetadata::default(); for entry in &self.torrents { - let stats = entry.value().lock_or_panic().get_swarm_metadata(); + let stats = entry.value().lock_or_panic().metadata(); metrics.total_complete += u64::from(stats.complete); metrics.total_downloaded += u64::from(stats.downloaded); metrics.total_incomplete += u64::from(stats.incomplete); @@ -560,9 +560,9 @@ mod tests { let torrent_guard = self.lock_or_panic(); let torrent_entry_info = TorrentEntryInfo { - swarm_metadata: torrent_guard.get_swarm_metadata(), - peers: torrent_guard.swarm_peers(None).iter().map(|peer| *peer.clone()).collect(), - number_of_peers: torrent_guard.swarm_len(), + swarm_metadata: torrent_guard.metadata(), + peers: torrent_guard.peers(None).iter().map(|peer| *peer.clone()).collect(), + number_of_peers: torrent_guard.len(), }; drop(torrent_guard); diff --git a/packages/torrent-repository/tests/common/torrent.rs b/packages/torrent-repository/tests/common/torrent.rs index f8be53361..242ffec70 100644 --- a/packages/torrent-repository/tests/common/torrent.rs +++ b/packages/torrent-repository/tests/common/torrent.rs @@ -15,8 +15,8 @@ pub(crate) enum Torrent { impl Torrent { pub(crate) fn get_stats(&self) -> SwarmMetadata { match self { - Torrent::Single(entry) => entry.get_swarm_metadata(), - Torrent::MutexStd(entry) => entry.lock_or_panic().get_swarm_metadata(), + Torrent::Single(entry) => entry.metadata(), + Torrent::MutexStd(entry) => entry.lock_or_panic().metadata(), } } @@ -29,29 +29,29 @@ impl Torrent { pub(crate) fn peers_is_empty(&self) -> bool { match self { - Torrent::Single(entry) => entry.swarm_is_empty(), - Torrent::MutexStd(entry) => entry.lock_or_panic().swarm_is_empty(), + Torrent::Single(entry) => entry.is_empty(), + Torrent::MutexStd(entry) => entry.lock_or_panic().is_empty(), } } pub(crate) fn get_peers_len(&self) -> usize { match self { - Torrent::Single(entry) => entry.swarm_len(), - Torrent::MutexStd(entry) => entry.lock_or_panic().swarm_len(), + Torrent::Single(entry) => entry.len(), + Torrent::MutexStd(entry) => entry.lock_or_panic().len(), } } pub(crate) fn get_peers(&self, limit: Option) -> Vec> { match self { - Torrent::Single(entry) => entry.swarm_peers(limit), - Torrent::MutexStd(entry) => entry.lock_or_panic().swarm_peers(limit), + Torrent::Single(entry) => entry.peers(limit), + Torrent::MutexStd(entry) => entry.lock_or_panic().peers(limit), } } pub(crate) fn get_peers_for_client(&self, client: &SocketAddr, limit: Option) -> Vec> { match self { - Torrent::Single(entry) => entry.get_peers_for_client(client, limit), - Torrent::MutexStd(entry) => entry.lock_or_panic().get_peers_for_client(client, limit), + Torrent::Single(entry) => entry.peers_excluding(client, limit), + Torrent::MutexStd(entry) => entry.lock_or_panic().peers_excluding(client, limit), } } @@ -64,8 +64,8 @@ impl Torrent { pub(crate) fn remove_inactive_peers(&mut self, current_cutoff: DurationSinceUnixEpoch) { match self { - Torrent::Single(entry) => entry.remove_inactive_peers(current_cutoff), - Torrent::MutexStd(entry) => entry.lock_or_panic().remove_inactive_peers(current_cutoff), + Torrent::Single(entry) => entry.remove_inactive(current_cutoff), + Torrent::MutexStd(entry) => entry.lock_or_panic().remove_inactive(current_cutoff), } } } diff --git a/packages/torrent-repository/tests/repository/mod.rs b/packages/torrent-repository/tests/repository/mod.rs index 40dcff6db..783606a40 100644 --- a/packages/torrent-repository/tests/repository/mod.rs +++ b/packages/torrent-repository/tests/repository/mod.rs @@ -320,7 +320,7 @@ async fn it_should_get_metrics(#[values(skip_list_mutex_std())] repo: TorrentRep let mut metrics = AggregateSwarmMetadata::default(); for (_, torrent) in entries { - let stats = torrent.get_swarm_metadata(); + let stats = torrent.metadata(); metrics.total_torrents += 1; metrics.total_incomplete += u64::from(stats.incomplete); @@ -457,7 +457,7 @@ async fn it_should_remove_inactive_peers(#[values(skip_list_mutex_std())] repo: { let lock_tracked_torrent = repo.get(&info_hash).expect("it_should_get_some"); let entry = lock_tracked_torrent.lock_or_panic(); - assert!(entry.swarm_peers(None).contains(&peer.into())); + assert!(entry.peers(None).contains(&peer.into())); } // Remove peers that have not been updated since the timeout (120 seconds ago). @@ -469,7 +469,7 @@ async fn it_should_remove_inactive_peers(#[values(skip_list_mutex_std())] repo: { let lock_tracked_torrent = repo.get(&info_hash).expect("it_should_get_some"); let entry = lock_tracked_torrent.lock_or_panic(); - assert!(!entry.swarm_peers(None).contains(&peer.into())); + assert!(!entry.peers(None).contains(&peer.into())); } } diff --git a/packages/tracker-core/src/announce_handler.rs b/packages/tracker-core/src/announce_handler.rs index ece0c87e6..fac0a38c8 100644 --- a/packages/tracker-core/src/announce_handler.rs +++ b/packages/tracker-core/src/announce_handler.rs @@ -657,10 +657,10 @@ mod tests { .expect("it should be able to get entry"); // It persists the number of completed peers. - assert_eq!(torrent_entry.lock_or_panic().get_swarm_metadata().downloaded, 1); + assert_eq!(torrent_entry.lock_or_panic().metadata().downloaded, 1); // It does not persist the peers - assert!(torrent_entry.lock_or_panic().swarm_is_empty()); + assert!(torrent_entry.lock_or_panic().is_empty()); } } diff --git a/packages/tracker-core/src/torrent/manager.rs b/packages/tracker-core/src/torrent/manager.rs index ae7c61741..5c8352f11 100644 --- a/packages/tracker-core/src/torrent/manager.rs +++ b/packages/tracker-core/src/torrent/manager.rs @@ -165,7 +165,7 @@ mod tests { .get(&infohash) .unwrap() .lock_or_panic() - .get_swarm_metadata() + .metadata() .downloaded, 1 ); diff --git a/packages/tracker-core/src/torrent/services.rs b/packages/tracker-core/src/torrent/services.rs index b748cd3a0..a35fd7aed 100644 --- a/packages/tracker-core/src/torrent/services.rs +++ b/packages/tracker-core/src/torrent/services.rs @@ -99,9 +99,9 @@ pub fn get_torrent_info(in_memory_torrent_repository: &Arc = vec![]; for (info_hash, torrent_entry) in in_memory_torrent_repository.get_paginated(pagination) { - let stats = torrent_entry.lock_or_panic().get_swarm_metadata(); + let stats = torrent_entry.lock_or_panic().metadata(); basic_infos.push(BasicInfo { info_hash, @@ -184,7 +184,7 @@ pub fn get_torrents(in_memory_torrent_repository: &Arc Date: Tue, 6 May 2025 18:05:59 +0100 Subject: [PATCH 569/802] refactor: [#1495] remove unneeded TrackedTorrent (wrapper over Swarm) --- packages/torrent-repository/src/entry/mod.rs | 1 - .../torrent-repository/src/entry/torrent.rs | 68 ------------------- packages/torrent-repository/src/lib.rs | 10 +-- packages/torrent-repository/src/repository.rs | 5 +- .../tests/common/torrent.rs | 2 +- .../torrent-repository/tests/entry/mod.rs | 2 +- .../tests/repository/mod.rs | 30 ++++---- 7 files changed, 24 insertions(+), 94 deletions(-) delete mode 100644 packages/torrent-repository/src/entry/torrent.rs diff --git a/packages/torrent-repository/src/entry/mod.rs b/packages/torrent-repository/src/entry/mod.rs index 94fdcc58e..899c10d57 100644 --- a/packages/torrent-repository/src/entry/mod.rs +++ b/packages/torrent-repository/src/entry/mod.rs @@ -1,2 +1 @@ pub mod swarm; -pub mod torrent; diff --git a/packages/torrent-repository/src/entry/torrent.rs b/packages/torrent-repository/src/entry/torrent.rs deleted file mode 100644 index 69a809a37..000000000 --- a/packages/torrent-repository/src/entry/torrent.rs +++ /dev/null @@ -1,68 +0,0 @@ -use std::fmt::Debug; -use std::net::SocketAddr; -use std::sync::Arc; - -use torrust_tracker_configuration::TrackerPolicy; -use torrust_tracker_primitives::peer::{self}; -use torrust_tracker_primitives::swarm_metadata::SwarmMetadata; -use torrust_tracker_primitives::DurationSinceUnixEpoch; - -use super::swarm::Swarm; - -/// A data structure containing all the information about a torrent in the -/// tracker. -/// -/// This is the tracker entry for a given torrent and contains the swarm data, -/// that's the list of all the peers trying to download the same torrent. -/// -/// The tracker keeps one entry like this for every torrent. -#[derive(Clone, Debug, Default, PartialEq, Eq, PartialOrd, Ord, Hash)] -pub struct TrackedTorrent { - /// A network of peers that are all trying to download the torrent. - swarm: Swarm, -} - -impl TrackedTorrent { - #[must_use] - pub fn new(swarm: Swarm) -> Self { - Self { swarm } - } - - #[must_use] - pub fn metadata(&self) -> SwarmMetadata { - self.swarm.metadata() - } - - #[must_use] - pub fn meets_retaining_policy(&self, policy: &TrackerPolicy) -> bool { - self.swarm.meets_retaining_policy(policy) - } - - #[must_use] - pub fn is_empty(&self) -> bool { - self.swarm.is_empty() - } - - #[must_use] - pub fn len(&self) -> usize { - self.swarm.len() - } - - #[must_use] - pub fn peers(&self, limit: Option) -> Vec> { - self.swarm.peers(limit) - } - - #[must_use] - pub fn peers_excluding(&self, client: &SocketAddr, limit: Option) -> Vec> { - self.swarm.peers_excluding(client, limit) - } - - pub fn handle_announcement(&mut self, peer: &peer::Peer) -> bool { - self.swarm.handle_announcement(peer) - } - - pub fn remove_inactive(&mut self, current_cutoff: DurationSinceUnixEpoch) { - self.swarm.remove_inactive(current_cutoff); - } -} diff --git a/packages/torrent-repository/src/lib.rs b/packages/torrent-repository/src/lib.rs index d7042a1fd..12b205681 100644 --- a/packages/torrent-repository/src/lib.rs +++ b/packages/torrent-repository/src/lib.rs @@ -6,8 +6,8 @@ use std::sync::{Arc, Mutex, MutexGuard}; use torrust_tracker_clock::clock; pub type TorrentRepository = repository::TorrentRepository; -pub type TrackedTorrentHandle = Arc>; -pub type TrackedTorrent = entry::torrent::TrackedTorrent; +pub type TrackedTorrentHandle = Arc>; +pub type Swarm = entry::swarm::Swarm; /// Working version, for production. #[cfg(not(test))] @@ -20,11 +20,11 @@ pub(crate) type CurrentClock = clock::Working; pub(crate) type CurrentClock = clock::Stopped; pub trait LockTrackedTorrent { - fn lock_or_panic(&self) -> MutexGuard<'_, TrackedTorrent>; + fn lock_or_panic(&self) -> MutexGuard<'_, Swarm>; } -impl LockTrackedTorrent for Arc> { - fn lock_or_panic(&self) -> MutexGuard<'_, TrackedTorrent> { +impl LockTrackedTorrent for Arc> { + fn lock_or_panic(&self) -> MutexGuard<'_, Swarm> { self.lock().expect("can't acquire lock for tracked torrent handle") } } diff --git a/packages/torrent-repository/src/repository.rs b/packages/torrent-repository/src/repository.rs index 1706937fc..2a5a38a3f 100644 --- a/packages/torrent-repository/src/repository.rs +++ b/packages/torrent-repository/src/repository.rs @@ -8,7 +8,6 @@ use torrust_tracker_primitives::swarm_metadata::{AggregateSwarmMetadata, SwarmMe use torrust_tracker_primitives::{peer, DurationSinceUnixEpoch, PersistentTorrent, PersistentTorrents}; use crate::entry::swarm::Swarm; -use crate::entry::torrent::TrackedTorrent; use crate::{LockTrackedTorrent, TrackedTorrentHandle}; #[derive(Default, Debug)] @@ -51,7 +50,7 @@ impl TorrentRepository { tracing::debug!("Inserting new torrent: {:?}", info_hash); let new_entry = if let Some(number_of_downloads) = opt_persistent_torrent { - TrackedTorrentHandle::new(TrackedTorrent::new(Swarm::new(number_of_downloads)).into()) + TrackedTorrentHandle::new(Swarm::new(number_of_downloads).into()) } else { TrackedTorrentHandle::default() }; @@ -229,7 +228,7 @@ impl TorrentRepository { continue; } - let entry = TrackedTorrentHandle::new(TrackedTorrent::new(Swarm::new(*completed)).into()); + let entry = TrackedTorrentHandle::new(Swarm::new(*completed).into()); // Since SkipMap is lock-free the torrent could have been inserted // after checking if it exists. diff --git a/packages/torrent-repository/tests/common/torrent.rs b/packages/torrent-repository/tests/common/torrent.rs index 242ffec70..e991cc7c9 100644 --- a/packages/torrent-repository/tests/common/torrent.rs +++ b/packages/torrent-repository/tests/common/torrent.rs @@ -8,7 +8,7 @@ use torrust_tracker_torrent_repository::{entry, LockTrackedTorrent, TrackedTorre #[derive(Debug, Clone)] pub(crate) enum Torrent { - Single(entry::torrent::TrackedTorrent), + Single(entry::swarm::Swarm), MutexStd(TrackedTorrentHandle), } diff --git a/packages/torrent-repository/tests/entry/mod.rs b/packages/torrent-repository/tests/entry/mod.rs index 5f958f05c..ab1848ed1 100644 --- a/packages/torrent-repository/tests/entry/mod.rs +++ b/packages/torrent-repository/tests/entry/mod.rs @@ -17,7 +17,7 @@ use crate::CurrentClock; #[fixture] fn single() -> Torrent { - Torrent::Single(entry::torrent::TrackedTorrent::default()) + Torrent::Single(entry::swarm::Swarm::default()) } #[fixture] fn mutex_std() -> Torrent { diff --git a/packages/torrent-repository/tests/repository/mod.rs b/packages/torrent-repository/tests/repository/mod.rs index 783606a40..3515a38cc 100644 --- a/packages/torrent-repository/tests/repository/mod.rs +++ b/packages/torrent-repository/tests/repository/mod.rs @@ -9,7 +9,7 @@ use torrust_tracker_configuration::TrackerPolicy; use torrust_tracker_primitives::pagination::Pagination; use torrust_tracker_primitives::swarm_metadata::SwarmMetadata; use torrust_tracker_primitives::PersistentTorrents; -use torrust_tracker_torrent_repository::entry::torrent::TrackedTorrent; +use torrust_tracker_torrent_repository::entry::swarm::Swarm; use torrust_tracker_torrent_repository::{LockTrackedTorrent, TorrentRepository}; use crate::common::torrent_peer_builder::{a_completed_peer, a_started_peer}; @@ -19,7 +19,7 @@ fn skip_list_mutex_std() -> TorrentRepository { TorrentRepository::default() } -type Entries = Vec<(InfoHash, TrackedTorrent)>; +type Entries = Vec<(InfoHash, Swarm)>; #[fixture] fn empty() -> Entries { @@ -28,26 +28,26 @@ fn empty() -> Entries { #[fixture] fn default() -> Entries { - vec![(InfoHash::default(), TrackedTorrent::default())] + vec![(InfoHash::default(), Swarm::default())] } #[fixture] fn started() -> Entries { - let mut torrent = TrackedTorrent::default(); + let mut torrent = Swarm::default(); torrent.handle_announcement(&a_started_peer(1)); vec![(InfoHash::default(), torrent)] } #[fixture] fn completed() -> Entries { - let mut torrent = TrackedTorrent::default(); + let mut torrent = Swarm::default(); torrent.handle_announcement(&a_completed_peer(2)); vec![(InfoHash::default(), torrent)] } #[fixture] fn downloaded() -> Entries { - let mut torrent = TrackedTorrent::default(); + let mut torrent = Swarm::default(); let mut peer = a_started_peer(3); torrent.handle_announcement(&peer); peer.event = AnnounceEvent::Completed; @@ -58,17 +58,17 @@ fn downloaded() -> Entries { #[fixture] fn three() -> Entries { - let mut started = TrackedTorrent::default(); + let mut started = Swarm::default(); let started_h = &mut DefaultHasher::default(); started.handle_announcement(&a_started_peer(1)); started.hash(started_h); - let mut completed = TrackedTorrent::default(); + let mut completed = Swarm::default(); let completed_h = &mut DefaultHasher::default(); completed.handle_announcement(&a_completed_peer(2)); completed.hash(completed_h); - let mut downloaded = TrackedTorrent::default(); + let mut downloaded = Swarm::default(); let downloaded_h = &mut DefaultHasher::default(); let mut downloaded_peer = a_started_peer(3); downloaded.handle_announcement(&downloaded_peer); @@ -86,10 +86,10 @@ fn three() -> Entries { #[fixture] fn many_out_of_order() -> Entries { - let mut entries: HashSet<(InfoHash, TrackedTorrent)> = HashSet::default(); + let mut entries: HashSet<(InfoHash, Swarm)> = HashSet::default(); for i in 0..408 { - let mut entry = TrackedTorrent::default(); + let mut entry = Swarm::default(); entry.handle_announcement(&a_started_peer(i)); entries.insert((InfoHash::from(&i), entry)); @@ -101,10 +101,10 @@ fn many_out_of_order() -> Entries { #[fixture] fn many_hashed_in_order() -> Entries { - let mut entries: BTreeMap = BTreeMap::default(); + let mut entries: BTreeMap = BTreeMap::default(); for i in 0..408 { - let mut entry = TrackedTorrent::default(); + let mut entry = Swarm::default(); entry.handle_announcement(&a_started_peer(i)); let hash: &mut DefaultHasher = &mut DefaultHasher::default(); @@ -269,7 +269,7 @@ async fn it_should_get_paginated( match paginated { // it should return empty if limit is zero. Pagination { limit: 0, .. } => { - let torrents: Vec<(InfoHash, TrackedTorrent)> = repo + let torrents: Vec<(InfoHash, Swarm)> = repo .get_paginated(Some(&paginated)) .iter() .map(|(i, lock_tracked_torrent)| (*i, lock_tracked_torrent.lock_or_panic().clone())) @@ -492,7 +492,7 @@ async fn it_should_remove_peerless_torrents( repo.remove_peerless_torrents(&policy); - let torrents: Vec<(InfoHash, TrackedTorrent)> = repo + let torrents: Vec<(InfoHash, Swarm)> = repo .get_paginated(None) .iter() .map(|(i, lock_tracked_torrent)| (*i, lock_tracked_torrent.lock_or_panic().clone())) From 030ae26bd27a8742c757badb992d807d9af7171b Mon Sep 17 00:00:00 2001 From: Jose Celano Date: Tue, 6 May 2025 18:12:14 +0100 Subject: [PATCH 570/802] refactor: [#1495] reorganize torrent-repository mod --- packages/torrent-repository/src/entry/mod.rs | 1 - packages/torrent-repository/src/lib.rs | 4 ++-- packages/torrent-repository/src/repository.rs | 2 +- packages/torrent-repository/src/{entry => }/swarm.rs | 10 +++++----- packages/torrent-repository/tests/common/torrent.rs | 4 ++-- packages/torrent-repository/tests/entry/mod.rs | 4 ++-- packages/torrent-repository/tests/repository/mod.rs | 2 +- 7 files changed, 13 insertions(+), 14 deletions(-) delete mode 100644 packages/torrent-repository/src/entry/mod.rs rename packages/torrent-repository/src/{entry => }/swarm.rs (99%) diff --git a/packages/torrent-repository/src/entry/mod.rs b/packages/torrent-repository/src/entry/mod.rs deleted file mode 100644 index 899c10d57..000000000 --- a/packages/torrent-repository/src/entry/mod.rs +++ /dev/null @@ -1 +0,0 @@ -pub mod swarm; diff --git a/packages/torrent-repository/src/lib.rs b/packages/torrent-repository/src/lib.rs index 12b205681..3748cb171 100644 --- a/packages/torrent-repository/src/lib.rs +++ b/packages/torrent-repository/src/lib.rs @@ -1,5 +1,5 @@ -pub mod entry; pub mod repository; +pub mod swarm; use std::sync::{Arc, Mutex, MutexGuard}; @@ -7,7 +7,7 @@ use torrust_tracker_clock::clock; pub type TorrentRepository = repository::TorrentRepository; pub type TrackedTorrentHandle = Arc>; -pub type Swarm = entry::swarm::Swarm; +pub type Swarm = swarm::Swarm; /// Working version, for production. #[cfg(not(test))] diff --git a/packages/torrent-repository/src/repository.rs b/packages/torrent-repository/src/repository.rs index 2a5a38a3f..2c1330c20 100644 --- a/packages/torrent-repository/src/repository.rs +++ b/packages/torrent-repository/src/repository.rs @@ -7,7 +7,7 @@ use torrust_tracker_primitives::pagination::Pagination; use torrust_tracker_primitives::swarm_metadata::{AggregateSwarmMetadata, SwarmMetadata}; use torrust_tracker_primitives::{peer, DurationSinceUnixEpoch, PersistentTorrent, PersistentTorrents}; -use crate::entry::swarm::Swarm; +use crate::swarm::Swarm; use crate::{LockTrackedTorrent, TrackedTorrentHandle}; #[derive(Default, Debug)] diff --git a/packages/torrent-repository/src/entry/swarm.rs b/packages/torrent-repository/src/swarm.rs similarity index 99% rename from packages/torrent-repository/src/entry/swarm.rs rename to packages/torrent-repository/src/swarm.rs index eb7aebfe4..78602f3d9 100644 --- a/packages/torrent-repository/src/entry/swarm.rs +++ b/packages/torrent-repository/src/swarm.rs @@ -218,7 +218,7 @@ mod tests { use torrust_tracker_primitives::swarm_metadata::SwarmMetadata; use torrust_tracker_primitives::DurationSinceUnixEpoch; - use crate::entry::swarm::Swarm; + use crate::swarm::Swarm; #[test] fn it_should_be_empty_when_no_peers_have_been_inserted() { @@ -483,7 +483,7 @@ mod tests { mod when_a_new_peer_is_added { use torrust_tracker_primitives::peer::fixture::PeerBuilder; - use crate::entry::swarm::Swarm; + use crate::swarm::Swarm; #[test] fn it_should_increase_the_number_of_leechers_if_the_new_peer_is_a_leecher_() { @@ -532,7 +532,7 @@ mod tests { mod when_a_peer_is_removed { use torrust_tracker_primitives::peer::fixture::PeerBuilder; - use crate::entry::swarm::Swarm; + use crate::swarm::Swarm; #[test] fn it_should_decrease_the_number_of_leechers_if_the_removed_peer_was_a_leecher() { @@ -572,7 +572,7 @@ mod tests { use torrust_tracker_primitives::peer::fixture::PeerBuilder; - use crate::entry::swarm::Swarm; + use crate::swarm::Swarm; #[test] fn it_should_decrease_the_number_of_leechers_when_a_removed_peer_is_a_leecher() { @@ -611,7 +611,7 @@ mod tests { use aquatic_udp_protocol::NumberOfBytes; use torrust_tracker_primitives::peer::fixture::PeerBuilder; - use crate::entry::swarm::Swarm; + use crate::swarm::Swarm; #[test] fn it_should_increase_seeders_and_decreasing_leechers_when_the_peer_changes_from_leecher_to_seeder_() { diff --git a/packages/torrent-repository/tests/common/torrent.rs b/packages/torrent-repository/tests/common/torrent.rs index e991cc7c9..197032cb4 100644 --- a/packages/torrent-repository/tests/common/torrent.rs +++ b/packages/torrent-repository/tests/common/torrent.rs @@ -4,11 +4,11 @@ use std::sync::Arc; use torrust_tracker_configuration::TrackerPolicy; use torrust_tracker_primitives::swarm_metadata::SwarmMetadata; use torrust_tracker_primitives::{peer, DurationSinceUnixEpoch}; -use torrust_tracker_torrent_repository::{entry, LockTrackedTorrent, TrackedTorrentHandle}; +use torrust_tracker_torrent_repository::{swarm, LockTrackedTorrent, TrackedTorrentHandle}; #[derive(Debug, Clone)] pub(crate) enum Torrent { - Single(entry::swarm::Swarm), + Single(swarm::Swarm), MutexStd(TrackedTorrentHandle), } diff --git a/packages/torrent-repository/tests/entry/mod.rs b/packages/torrent-repository/tests/entry/mod.rs index ab1848ed1..9b16f8c4a 100644 --- a/packages/torrent-repository/tests/entry/mod.rs +++ b/packages/torrent-repository/tests/entry/mod.rs @@ -9,7 +9,7 @@ use torrust_tracker_clock::clock::{self, Time as _}; use torrust_tracker_configuration::{TrackerPolicy, TORRENT_PEERS_LIMIT}; use torrust_tracker_primitives::peer; use torrust_tracker_primitives::peer::Peer; -use torrust_tracker_torrent_repository::{entry, TrackedTorrentHandle}; +use torrust_tracker_torrent_repository::{swarm, TrackedTorrentHandle}; use crate::common::torrent::Torrent; use crate::common::torrent_peer_builder::{a_completed_peer, a_started_peer}; @@ -17,7 +17,7 @@ use crate::CurrentClock; #[fixture] fn single() -> Torrent { - Torrent::Single(entry::swarm::Swarm::default()) + Torrent::Single(swarm::Swarm::default()) } #[fixture] fn mutex_std() -> Torrent { diff --git a/packages/torrent-repository/tests/repository/mod.rs b/packages/torrent-repository/tests/repository/mod.rs index 3515a38cc..1595db335 100644 --- a/packages/torrent-repository/tests/repository/mod.rs +++ b/packages/torrent-repository/tests/repository/mod.rs @@ -9,7 +9,7 @@ use torrust_tracker_configuration::TrackerPolicy; use torrust_tracker_primitives::pagination::Pagination; use torrust_tracker_primitives::swarm_metadata::SwarmMetadata; use torrust_tracker_primitives::PersistentTorrents; -use torrust_tracker_torrent_repository::entry::swarm::Swarm; +use torrust_tracker_torrent_repository::swarm::Swarm; use torrust_tracker_torrent_repository::{LockTrackedTorrent, TorrentRepository}; use crate::common::torrent_peer_builder::{a_completed_peer, a_started_peer}; From 78d4b83b4e3ab36bd9f8252768142b09f74c6786 Mon Sep 17 00:00:00 2001 From: Jose Celano Date: Tue, 6 May 2025 18:13:09 +0100 Subject: [PATCH 571/802] refactor: [#1495] rename TrackedTorrentHandle to SwarmHandle --- packages/torrent-repository/src/lib.rs | 2 +- packages/torrent-repository/src/repository.rs | 20 +++++++++---------- .../tests/common/torrent.rs | 4 ++-- .../torrent-repository/tests/entry/mod.rs | 4 ++-- .../src/torrent/repository/in_memory.rs | 8 ++++---- 5 files changed, 19 insertions(+), 19 deletions(-) diff --git a/packages/torrent-repository/src/lib.rs b/packages/torrent-repository/src/lib.rs index 3748cb171..76ef6c784 100644 --- a/packages/torrent-repository/src/lib.rs +++ b/packages/torrent-repository/src/lib.rs @@ -6,7 +6,7 @@ use std::sync::{Arc, Mutex, MutexGuard}; use torrust_tracker_clock::clock; pub type TorrentRepository = repository::TorrentRepository; -pub type TrackedTorrentHandle = Arc>; +pub type SwarmHandle = Arc>; pub type Swarm = swarm::Swarm; /// Working version, for production. diff --git a/packages/torrent-repository/src/repository.rs b/packages/torrent-repository/src/repository.rs index 2c1330c20..fd30b4714 100644 --- a/packages/torrent-repository/src/repository.rs +++ b/packages/torrent-repository/src/repository.rs @@ -8,11 +8,11 @@ use torrust_tracker_primitives::swarm_metadata::{AggregateSwarmMetadata, SwarmMe use torrust_tracker_primitives::{peer, DurationSinceUnixEpoch, PersistentTorrent, PersistentTorrents}; use crate::swarm::Swarm; -use crate::{LockTrackedTorrent, TrackedTorrentHandle}; +use crate::{LockTrackedTorrent, SwarmHandle}; #[derive(Default, Debug)] pub struct TorrentRepository { - pub torrents: SkipMap, + pub torrents: SkipMap, } impl TorrentRepository { @@ -50,9 +50,9 @@ impl TorrentRepository { tracing::debug!("Inserting new torrent: {:?}", info_hash); let new_entry = if let Some(number_of_downloads) = opt_persistent_torrent { - TrackedTorrentHandle::new(Swarm::new(number_of_downloads).into()) + SwarmHandle::new(Swarm::new(number_of_downloads).into()) } else { - TrackedTorrentHandle::default() + SwarmHandle::default() }; let inserted_entry = self.torrents.get_or_insert(*info_hash, new_entry); @@ -69,7 +69,7 @@ impl TorrentRepository { /// /// An `Option` containing the removed torrent entry if it existed. #[must_use] - pub fn remove(&self, key: &InfoHash) -> Option { + pub fn remove(&self, key: &InfoHash) -> Option { self.torrents.remove(key).map(|entry| entry.value().clone()) } @@ -93,7 +93,7 @@ impl TorrentRepository { /// /// An `Option` containing the tracked torrent handle if found. #[must_use] - pub fn get(&self, key: &InfoHash) -> Option { + pub fn get(&self, key: &InfoHash) -> Option { let maybe_entry = self.torrents.get(key); maybe_entry.map(|entry| entry.value().clone()) } @@ -108,7 +108,7 @@ impl TorrentRepository { /// /// A vector of `(InfoHash, TorrentEntry)` tuples. #[must_use] - pub fn get_paginated(&self, pagination: Option<&Pagination>) -> Vec<(InfoHash, TrackedTorrentHandle)> { + pub fn get_paginated(&self, pagination: Option<&Pagination>) -> Vec<(InfoHash, SwarmHandle)> { match pagination { Some(pagination) => self .torrents @@ -228,7 +228,7 @@ impl TorrentRepository { continue; } - let entry = TrackedTorrentHandle::new(Swarm::new(*completed).into()); + let entry = SwarmHandle::new(Swarm::new(*completed).into()); // Since SkipMap is lock-free the torrent could have been inserted // after checking if it exists. @@ -541,7 +541,7 @@ mod tests { use crate::repository::TorrentRepository; use crate::tests::{sample_info_hash, sample_peer}; - use crate::{LockTrackedTorrent, TrackedTorrentHandle}; + use crate::{LockTrackedTorrent, SwarmHandle}; /// `TorrentEntry` data is not directly accessible. It's only /// accessible through the trait methods. We need this temporary @@ -554,7 +554,7 @@ mod tests { } #[allow(clippy::from_over_into)] - impl Into for TrackedTorrentHandle { + impl Into for SwarmHandle { fn into(self) -> TorrentEntryInfo { let torrent_guard = self.lock_or_panic(); diff --git a/packages/torrent-repository/tests/common/torrent.rs b/packages/torrent-repository/tests/common/torrent.rs index 197032cb4..a1899621f 100644 --- a/packages/torrent-repository/tests/common/torrent.rs +++ b/packages/torrent-repository/tests/common/torrent.rs @@ -4,12 +4,12 @@ use std::sync::Arc; use torrust_tracker_configuration::TrackerPolicy; use torrust_tracker_primitives::swarm_metadata::SwarmMetadata; use torrust_tracker_primitives::{peer, DurationSinceUnixEpoch}; -use torrust_tracker_torrent_repository::{swarm, LockTrackedTorrent, TrackedTorrentHandle}; +use torrust_tracker_torrent_repository::{swarm, LockTrackedTorrent, SwarmHandle}; #[derive(Debug, Clone)] pub(crate) enum Torrent { Single(swarm::Swarm), - MutexStd(TrackedTorrentHandle), + MutexStd(SwarmHandle), } impl Torrent { diff --git a/packages/torrent-repository/tests/entry/mod.rs b/packages/torrent-repository/tests/entry/mod.rs index 9b16f8c4a..4607fd9c7 100644 --- a/packages/torrent-repository/tests/entry/mod.rs +++ b/packages/torrent-repository/tests/entry/mod.rs @@ -9,7 +9,7 @@ use torrust_tracker_clock::clock::{self, Time as _}; use torrust_tracker_configuration::{TrackerPolicy, TORRENT_PEERS_LIMIT}; use torrust_tracker_primitives::peer; use torrust_tracker_primitives::peer::Peer; -use torrust_tracker_torrent_repository::{swarm, TrackedTorrentHandle}; +use torrust_tracker_torrent_repository::{swarm, SwarmHandle}; use crate::common::torrent::Torrent; use crate::common::torrent_peer_builder::{a_completed_peer, a_started_peer}; @@ -21,7 +21,7 @@ fn single() -> Torrent { } #[fixture] fn mutex_std() -> Torrent { - Torrent::MutexStd(TrackedTorrentHandle::default()) + Torrent::MutexStd(SwarmHandle::default()) } #[fixture] diff --git a/packages/tracker-core/src/torrent/repository/in_memory.rs b/packages/tracker-core/src/torrent/repository/in_memory.rs index e362b20c1..98d7eb682 100644 --- a/packages/tracker-core/src/torrent/repository/in_memory.rs +++ b/packages/tracker-core/src/torrent/repository/in_memory.rs @@ -7,7 +7,7 @@ use torrust_tracker_configuration::{TrackerPolicy, TORRENT_PEERS_LIMIT}; use torrust_tracker_primitives::pagination::Pagination; use torrust_tracker_primitives::swarm_metadata::{AggregateSwarmMetadata, SwarmMetadata}; use torrust_tracker_primitives::{peer, DurationSinceUnixEpoch, PersistentTorrent, PersistentTorrents}; -use torrust_tracker_torrent_repository::{TorrentRepository, TrackedTorrentHandle}; +use torrust_tracker_torrent_repository::{SwarmHandle, TorrentRepository}; /// In-memory repository for torrent entries. /// @@ -64,7 +64,7 @@ impl InMemoryTorrentRepository { /// An `Option` containing the removed torrent entry if it existed. #[cfg(test)] #[must_use] - pub(crate) fn remove(&self, key: &InfoHash) -> Option { + pub(crate) fn remove(&self, key: &InfoHash) -> Option { self.torrents.remove(key) } @@ -104,7 +104,7 @@ impl InMemoryTorrentRepository { /// /// An `Option` containing the torrent entry if found. #[must_use] - pub(crate) fn get(&self, key: &InfoHash) -> Option { + pub(crate) fn get(&self, key: &InfoHash) -> Option { self.torrents.get(key) } @@ -122,7 +122,7 @@ impl InMemoryTorrentRepository { /// /// A vector of `(InfoHash, TorrentEntry)` tuples. #[must_use] - pub(crate) fn get_paginated(&self, pagination: Option<&Pagination>) -> Vec<(InfoHash, TrackedTorrentHandle)> { + pub(crate) fn get_paginated(&self, pagination: Option<&Pagination>) -> Vec<(InfoHash, SwarmHandle)> { self.torrents.get_paginated(pagination) } From 0411a9a464554e039cbdd806c95b9bdd443ef155 Mon Sep 17 00:00:00 2001 From: Jose Celano Date: Tue, 6 May 2025 18:16:05 +0100 Subject: [PATCH 572/802] refactor: [#1495] rename TorrentRepository to Swarms --- packages/torrent-repository/src/lib.rs | 4 +- .../src/{repository.rs => swarms.rs} | 108 +++++++++--------- .../tests/repository/mod.rs | 26 ++--- .../src/torrent/repository/in_memory.rs | 4 +- 4 files changed, 71 insertions(+), 71 deletions(-) rename packages/torrent-repository/src/{repository.rs => swarms.rs} (90%) diff --git a/packages/torrent-repository/src/lib.rs b/packages/torrent-repository/src/lib.rs index 76ef6c784..f120afe88 100644 --- a/packages/torrent-repository/src/lib.rs +++ b/packages/torrent-repository/src/lib.rs @@ -1,11 +1,11 @@ -pub mod repository; +pub mod swarms; pub mod swarm; use std::sync::{Arc, Mutex, MutexGuard}; use torrust_tracker_clock::clock; -pub type TorrentRepository = repository::TorrentRepository; +pub type Swarms = swarms::Swarms; pub type SwarmHandle = Arc>; pub type Swarm = swarm::Swarm; diff --git a/packages/torrent-repository/src/repository.rs b/packages/torrent-repository/src/swarms.rs similarity index 90% rename from packages/torrent-repository/src/repository.rs rename to packages/torrent-repository/src/swarms.rs index fd30b4714..b5b891a2b 100644 --- a/packages/torrent-repository/src/repository.rs +++ b/packages/torrent-repository/src/swarms.rs @@ -11,11 +11,11 @@ use crate::swarm::Swarm; use crate::{LockTrackedTorrent, SwarmHandle}; #[derive(Default, Debug)] -pub struct TorrentRepository { - pub torrents: SkipMap, +pub struct Swarms { + pub swarms: SkipMap, } -impl TorrentRepository { +impl Swarms { /// Upsert a peer into the swarm of a torrent. /// /// Optionally, it can also preset the number of downloads of the torrent @@ -42,7 +42,7 @@ impl TorrentRepository { peer: &peer::Peer, opt_persistent_torrent: Option, ) -> bool { - if let Some(existing_entry) = self.torrents.get(info_hash) { + if let Some(existing_entry) = self.swarms.get(info_hash) { tracing::debug!("Torrent already exists: {:?}", info_hash); existing_entry.value().lock_or_panic().handle_announcement(peer) @@ -55,7 +55,7 @@ impl TorrentRepository { SwarmHandle::default() }; - let inserted_entry = self.torrents.get_or_insert(*info_hash, new_entry); + let inserted_entry = self.swarms.get_or_insert(*info_hash, new_entry); let mut torrent_guard = inserted_entry.value().lock_or_panic(); @@ -70,7 +70,7 @@ impl TorrentRepository { /// An `Option` containing the removed torrent entry if it existed. #[must_use] pub fn remove(&self, key: &InfoHash) -> Option { - self.torrents.remove(key).map(|entry| entry.value().clone()) + self.swarms.remove(key).map(|entry| entry.value().clone()) } /// Removes inactive peers from all torrent entries. @@ -82,7 +82,7 @@ impl TorrentRepository { /// /// This function panics if the lock for the entry cannot be obtained. pub fn remove_inactive_peers(&self, current_cutoff: DurationSinceUnixEpoch) { - for entry in &self.torrents { + for entry in &self.swarms { entry.value().lock_or_panic().remove_inactive(current_cutoff); } } @@ -94,7 +94,7 @@ impl TorrentRepository { /// An `Option` containing the tracked torrent handle if found. #[must_use] pub fn get(&self, key: &InfoHash) -> Option { - let maybe_entry = self.torrents.get(key); + let maybe_entry = self.swarms.get(key); maybe_entry.map(|entry| entry.value().clone()) } @@ -111,14 +111,14 @@ impl TorrentRepository { pub fn get_paginated(&self, pagination: Option<&Pagination>) -> Vec<(InfoHash, SwarmHandle)> { match pagination { Some(pagination) => self - .torrents + .swarms .iter() .skip(pagination.offset as usize) .take(pagination.limit as usize) .map(|entry| (*entry.key(), entry.value().clone())) .collect(), None => self - .torrents + .swarms .iter() .map(|entry| (*entry.key(), entry.value().clone())) .collect(), @@ -136,7 +136,7 @@ impl TorrentRepository { /// This function panics if the lock for the entry cannot be obtained. #[must_use] pub fn get_swarm_metadata(&self, info_hash: &InfoHash) -> Option { - self.torrents + self.swarms .get(info_hash) .map(|entry| entry.value().lock_or_panic().metadata()) } @@ -208,7 +208,7 @@ impl TorrentRepository { /// /// This function panics if the lock for the entry cannot be obtained. pub fn remove_peerless_torrents(&self, policy: &TrackerPolicy) { - for entry in &self.torrents { + for entry in &self.swarms { if entry.value().lock_or_panic().meets_retaining_policy(policy) { continue; } @@ -224,7 +224,7 @@ impl TorrentRepository { /// access. pub fn import_persistent(&self, persistent_torrents: &PersistentTorrents) { for (info_hash, completed) in persistent_torrents { - if self.torrents.contains_key(info_hash) { + if self.swarms.contains_key(info_hash) { continue; } @@ -232,7 +232,7 @@ impl TorrentRepository { // Since SkipMap is lock-free the torrent could have been inserted // after checking if it exists. - self.torrents.get_or_insert(*info_hash, entry); + self.swarms.get_or_insert(*info_hash, entry); } } @@ -253,7 +253,7 @@ impl TorrentRepository { pub fn get_aggregate_swarm_metadata(&self) -> AggregateSwarmMetadata { let mut metrics = AggregateSwarmMetadata::default(); - for entry in &self.torrents { + for entry in &self.swarms { let stats = entry.value().lock_or_panic().metadata(); metrics.total_complete += u64::from(stats.complete); metrics.total_downloaded += u64::from(stats.downloaded); @@ -304,12 +304,12 @@ mod tests { use std::sync::Arc; - use crate::repository::TorrentRepository; + use crate::swarms::Swarms; use crate::tests::{sample_info_hash, sample_peer}; #[tokio::test] async fn it_should_add_the_first_peer_to_the_torrent_peer_list() { - let torrent_repository = Arc::new(TorrentRepository::default()); + let torrent_repository = Arc::new(Swarms::default()); let info_hash = sample_info_hash(); @@ -320,7 +320,7 @@ mod tests { #[tokio::test] async fn it_should_allow_adding_the_same_peer_twice_to_the_torrent_peer_list() { - let torrent_repository = Arc::new(TorrentRepository::default()); + let torrent_repository = Arc::new(Swarms::default()); let info_hash = sample_info_hash(); @@ -340,13 +340,13 @@ mod tests { use torrust_tracker_primitives::peer::Peer; use torrust_tracker_primitives::DurationSinceUnixEpoch; - use crate::repository::tests::the_in_memory_torrent_repository::numeric_peer_id; - use crate::repository::TorrentRepository; + use crate::swarms::tests::the_in_memory_torrent_repository::numeric_peer_id; + use crate::swarms::Swarms; use crate::tests::{sample_info_hash, sample_peer}; #[tokio::test] async fn it_should_return_the_peers_for_a_given_torrent() { - let torrent_repository = Arc::new(TorrentRepository::default()); + let torrent_repository = Arc::new(Swarms::default()); let info_hash = sample_info_hash(); let peer = sample_peer(); @@ -360,7 +360,7 @@ mod tests { #[tokio::test] async fn it_should_return_an_empty_list_or_peers_for_a_non_existing_torrent() { - let torrent_repository = Arc::new(TorrentRepository::default()); + let torrent_repository = Arc::new(Swarms::default()); let peers = torrent_repository.get_torrent_peers(&sample_info_hash(), 74); @@ -369,7 +369,7 @@ mod tests { #[tokio::test] async fn it_should_return_74_peers_at_the_most_for_a_given_torrent() { - let torrent_repository = Arc::new(TorrentRepository::default()); + let torrent_repository = Arc::new(Swarms::default()); let info_hash = sample_info_hash(); @@ -402,13 +402,13 @@ mod tests { use torrust_tracker_primitives::peer::Peer; use torrust_tracker_primitives::DurationSinceUnixEpoch; - use crate::repository::tests::the_in_memory_torrent_repository::numeric_peer_id; - use crate::repository::TorrentRepository; + use crate::swarms::tests::the_in_memory_torrent_repository::numeric_peer_id; + use crate::swarms::Swarms; use crate::tests::{sample_info_hash, sample_peer}; #[tokio::test] async fn it_should_return_an_empty_peer_list_for_a_non_existing_torrent() { - let torrent_repository = Arc::new(TorrentRepository::default()); + let torrent_repository = Arc::new(Swarms::default()); let peers = torrent_repository.get_peers_for(&sample_info_hash(), &sample_peer(), TORRENT_PEERS_LIMIT); @@ -417,7 +417,7 @@ mod tests { #[tokio::test] async fn it_should_return_the_peers_for_a_given_torrent_excluding_a_given_peer() { - let torrent_repository = Arc::new(TorrentRepository::default()); + let torrent_repository = Arc::new(Swarms::default()); let info_hash = sample_info_hash(); let peer = sample_peer(); @@ -431,7 +431,7 @@ mod tests { #[tokio::test] async fn it_should_return_74_peers_at_the_most_for_a_given_torrent_when_it_filters_out_a_given_peer() { - let torrent_repository = Arc::new(TorrentRepository::default()); + let torrent_repository = Arc::new(Swarms::default()); let info_hash = sample_info_hash(); @@ -471,12 +471,12 @@ mod tests { use torrust_tracker_configuration::TrackerPolicy; use torrust_tracker_primitives::DurationSinceUnixEpoch; - use crate::repository::TorrentRepository; + use crate::swarms::Swarms; use crate::tests::{sample_info_hash, sample_peer}; #[tokio::test] async fn it_should_remove_a_torrent_entry() { - let torrent_repository = Arc::new(TorrentRepository::default()); + let torrent_repository = Arc::new(Swarms::default()); let info_hash = sample_info_hash(); let _number_of_downloads_increased = torrent_repository.upsert_peer(&info_hash, &sample_peer(), None); @@ -488,7 +488,7 @@ mod tests { #[tokio::test] async fn it_should_remove_peers_that_have_not_been_updated_after_a_cutoff_time() { - let torrent_repository = Arc::new(TorrentRepository::default()); + let torrent_repository = Arc::new(Swarms::default()); let info_hash = sample_info_hash(); let mut peer = sample_peer(); @@ -502,8 +502,8 @@ mod tests { assert!(!torrent_repository.get_torrent_peers(&info_hash, 74).contains(&Arc::new(peer))); } - fn initialize_repository_with_one_torrent_without_peers(info_hash: &InfoHash) -> Arc { - let torrent_repository = Arc::new(TorrentRepository::default()); + fn initialize_repository_with_one_torrent_without_peers(info_hash: &InfoHash) -> Arc { + let torrent_repository = Arc::new(Swarms::default()); // Insert a sample peer for the torrent to force adding the torrent entry let mut peer = sample_peer(); @@ -539,7 +539,7 @@ mod tests { use torrust_tracker_primitives::peer::Peer; use torrust_tracker_primitives::swarm_metadata::SwarmMetadata; - use crate::repository::TorrentRepository; + use crate::swarms::Swarms; use crate::tests::{sample_info_hash, sample_peer}; use crate::{LockTrackedTorrent, SwarmHandle}; @@ -572,7 +572,7 @@ mod tests { #[tokio::test] async fn it_should_return_one_torrent_entry_by_infohash() { - let torrent_repository = Arc::new(TorrentRepository::default()); + let torrent_repository = Arc::new(Swarms::default()); let info_hash = sample_info_hash(); let peer = sample_peer(); @@ -600,13 +600,13 @@ mod tests { use torrust_tracker_primitives::swarm_metadata::SwarmMetadata; - use crate::repository::tests::the_in_memory_torrent_repository::returning_torrent_entries::TorrentEntryInfo; - use crate::repository::TorrentRepository; + use crate::swarms::tests::the_in_memory_torrent_repository::returning_torrent_entries::TorrentEntryInfo; + use crate::swarms::Swarms; use crate::tests::{sample_info_hash, sample_peer}; #[tokio::test] async fn without_pagination() { - let torrent_repository = Arc::new(TorrentRepository::default()); + let torrent_repository = Arc::new(Swarms::default()); let info_hash = sample_info_hash(); let peer = sample_peer(); @@ -638,8 +638,8 @@ mod tests { use torrust_tracker_primitives::pagination::Pagination; use torrust_tracker_primitives::swarm_metadata::SwarmMetadata; - use crate::repository::tests::the_in_memory_torrent_repository::returning_torrent_entries::TorrentEntryInfo; - use crate::repository::TorrentRepository; + use crate::swarms::tests::the_in_memory_torrent_repository::returning_torrent_entries::TorrentEntryInfo; + use crate::swarms::Swarms; use crate::tests::{ sample_info_hash_alphabetically_ordered_after_sample_info_hash_one, sample_info_hash_one, sample_peer_one, sample_peer_two, @@ -647,7 +647,7 @@ mod tests { #[tokio::test] async fn it_should_return_the_first_page() { - let torrent_repository = Arc::new(TorrentRepository::default()); + let torrent_repository = Arc::new(Swarms::default()); // Insert one torrent entry let info_hash_one = sample_info_hash_one(); @@ -682,7 +682,7 @@ mod tests { #[tokio::test] async fn it_should_return_the_second_page() { - let torrent_repository = Arc::new(TorrentRepository::default()); + let torrent_repository = Arc::new(Swarms::default()); // Insert one torrent entry let info_hash_one = sample_info_hash_one(); @@ -717,7 +717,7 @@ mod tests { #[tokio::test] async fn it_should_allow_changing_the_page_size() { - let torrent_repository = Arc::new(TorrentRepository::default()); + let torrent_repository = Arc::new(Swarms::default()); // Insert one torrent entry let info_hash_one = sample_info_hash_one(); @@ -745,14 +745,14 @@ mod tests { use bittorrent_primitives::info_hash::fixture::gen_seeded_infohash; use torrust_tracker_primitives::swarm_metadata::AggregateSwarmMetadata; - use crate::repository::TorrentRepository; + use crate::swarms::Swarms; use crate::tests::{complete_peer, leecher, sample_info_hash, seeder}; // todo: refactor to use test parametrization #[tokio::test] async fn it_should_get_empty_aggregate_swarm_metadata_when_there_are_no_torrents() { - let torrent_repository = Arc::new(TorrentRepository::default()); + let torrent_repository = Arc::new(Swarms::default()); let aggregate_swarm_metadata = torrent_repository.get_aggregate_swarm_metadata(); @@ -769,7 +769,7 @@ mod tests { #[tokio::test] async fn it_should_return_the_aggregate_swarm_metadata_when_there_is_a_leecher() { - let torrent_repository = Arc::new(TorrentRepository::default()); + let torrent_repository = Arc::new(Swarms::default()); let _number_of_downloads_increased = torrent_repository.upsert_peer(&sample_info_hash(), &leecher(), None); @@ -788,7 +788,7 @@ mod tests { #[tokio::test] async fn it_should_return_the_aggregate_swarm_metadata_when_there_is_a_seeder() { - let torrent_repository = Arc::new(TorrentRepository::default()); + let torrent_repository = Arc::new(Swarms::default()); let _number_of_downloads_increased = torrent_repository.upsert_peer(&sample_info_hash(), &seeder(), None); @@ -807,7 +807,7 @@ mod tests { #[tokio::test] async fn it_should_return_the_aggregate_swarm_metadata_when_there_is_a_completed_peer() { - let torrent_repository = Arc::new(TorrentRepository::default()); + let torrent_repository = Arc::new(Swarms::default()); let _number_of_downloads_increased = torrent_repository.upsert_peer(&sample_info_hash(), &complete_peer(), None); @@ -826,7 +826,7 @@ mod tests { #[tokio::test] async fn it_should_return_the_aggregate_swarm_metadata_when_there_are_multiple_torrents() { - let torrent_repository = Arc::new(TorrentRepository::default()); + let torrent_repository = Arc::new(Swarms::default()); let start_time = std::time::Instant::now(); for i in 0..1_000_000 { @@ -858,12 +858,12 @@ mod tests { use torrust_tracker_primitives::swarm_metadata::SwarmMetadata; - use crate::repository::TorrentRepository; + use crate::swarms::Swarms; use crate::tests::{leecher, sample_info_hash}; #[tokio::test] async fn it_should_get_swarm_metadata_for_an_existing_torrent() { - let torrent_repository = Arc::new(TorrentRepository::default()); + let torrent_repository = Arc::new(Swarms::default()); let infohash = sample_info_hash(); @@ -883,7 +883,7 @@ mod tests { #[tokio::test] async fn it_should_return_zeroed_swarm_metadata_for_a_non_existing_torrent() { - let torrent_repository = Arc::new(TorrentRepository::default()); + let torrent_repository = Arc::new(Swarms::default()); let swarm_metadata = torrent_repository.get_swarm_metadata_or_default(&sample_info_hash()); @@ -897,12 +897,12 @@ mod tests { use torrust_tracker_primitives::PersistentTorrents; - use crate::repository::TorrentRepository; + use crate::swarms::Swarms; use crate::tests::sample_info_hash; #[tokio::test] async fn it_should_allow_importing_persisted_torrent_entries() { - let torrent_repository = Arc::new(TorrentRepository::default()); + let torrent_repository = Arc::new(Swarms::default()); let infohash = sample_info_hash(); diff --git a/packages/torrent-repository/tests/repository/mod.rs b/packages/torrent-repository/tests/repository/mod.rs index 1595db335..4c9053b7e 100644 --- a/packages/torrent-repository/tests/repository/mod.rs +++ b/packages/torrent-repository/tests/repository/mod.rs @@ -10,13 +10,13 @@ use torrust_tracker_primitives::pagination::Pagination; use torrust_tracker_primitives::swarm_metadata::SwarmMetadata; use torrust_tracker_primitives::PersistentTorrents; use torrust_tracker_torrent_repository::swarm::Swarm; -use torrust_tracker_torrent_repository::{LockTrackedTorrent, TorrentRepository}; +use torrust_tracker_torrent_repository::{LockTrackedTorrent, Swarms}; use crate::common::torrent_peer_builder::{a_completed_peer, a_started_peer}; #[fixture] -fn skip_list_mutex_std() -> TorrentRepository { - TorrentRepository::default() +fn skip_list_mutex_std() -> Swarms { + Swarms::default() } type Entries = Vec<(InfoHash, Swarm)>; @@ -148,10 +148,10 @@ fn persistent_three() -> PersistentTorrents { t.iter().copied().collect() } -fn make(repo: &TorrentRepository, entries: &Entries) { +fn make(repo: &Swarms, entries: &Entries) { for (info_hash, entry) in entries { let new = Arc::new(Mutex::new(entry.clone())); - repo.torrents.insert(*info_hash, new); + repo.swarms.insert(*info_hash, new); } } @@ -200,7 +200,7 @@ fn policy_remove_persist() -> TrackerPolicy { #[case::out_of_order(many_out_of_order())] #[case::in_order(many_hashed_in_order())] #[tokio::test] -async fn it_should_get_a_torrent_entry(#[values(skip_list_mutex_std())] repo: TorrentRepository, #[case] entries: Entries) { +async fn it_should_get_a_torrent_entry(#[values(skip_list_mutex_std())] repo: Swarms, #[case] entries: Entries) { make(&repo, &entries); if let Some((info_hash, torrent)) = entries.first() { @@ -224,7 +224,7 @@ async fn it_should_get_a_torrent_entry(#[values(skip_list_mutex_std())] repo: To #[case::in_order(many_hashed_in_order())] #[tokio::test] async fn it_should_get_paginated_entries_in_a_stable_or_sorted_order( - #[values(skip_list_mutex_std())] repo: TorrentRepository, + #[values(skip_list_mutex_std())] repo: Swarms, #[case] entries: Entries, many_out_of_order: Entries, ) { @@ -257,7 +257,7 @@ async fn it_should_get_paginated_entries_in_a_stable_or_sorted_order( #[case::in_order(many_hashed_in_order())] #[tokio::test] async fn it_should_get_paginated( - #[values(skip_list_mutex_std())] repo: TorrentRepository, + #[values(skip_list_mutex_std())] repo: Swarms, #[case] entries: Entries, #[values(paginated_limit_zero(), paginated_limit_one(), paginated_limit_one_offset_one())] paginated: Pagination, ) { @@ -312,7 +312,7 @@ async fn it_should_get_paginated( #[case::out_of_order(many_out_of_order())] #[case::in_order(many_hashed_in_order())] #[tokio::test] -async fn it_should_get_metrics(#[values(skip_list_mutex_std())] repo: TorrentRepository, #[case] entries: Entries) { +async fn it_should_get_metrics(#[values(skip_list_mutex_std())] repo: Swarms, #[case] entries: Entries) { use torrust_tracker_primitives::swarm_metadata::AggregateSwarmMetadata; make(&repo, &entries); @@ -342,7 +342,7 @@ async fn it_should_get_metrics(#[values(skip_list_mutex_std())] repo: TorrentRep #[case::in_order(many_hashed_in_order())] #[tokio::test] async fn it_should_import_persistent_torrents( - #[values(skip_list_mutex_std())] repo: TorrentRepository, + #[values(skip_list_mutex_std())] repo: Swarms, #[case] entries: Entries, #[values(persistent_empty(), persistent_single(), persistent_three())] persistent_torrents: PersistentTorrents, ) { @@ -370,7 +370,7 @@ async fn it_should_import_persistent_torrents( #[case::out_of_order(many_out_of_order())] #[case::in_order(many_hashed_in_order())] #[tokio::test] -async fn it_should_remove_an_entry(#[values(skip_list_mutex_std())] repo: TorrentRepository, #[case] entries: Entries) { +async fn it_should_remove_an_entry(#[values(skip_list_mutex_std())] repo: Swarms, #[case] entries: Entries) { make(&repo, &entries); for (info_hash, torrent) in entries { @@ -397,7 +397,7 @@ async fn it_should_remove_an_entry(#[values(skip_list_mutex_std())] repo: Torren #[case::out_of_order(many_out_of_order())] #[case::in_order(many_hashed_in_order())] #[tokio::test] -async fn it_should_remove_inactive_peers(#[values(skip_list_mutex_std())] repo: TorrentRepository, #[case] entries: Entries) { +async fn it_should_remove_inactive_peers(#[values(skip_list_mutex_std())] repo: Swarms, #[case] entries: Entries) { use std::ops::Sub as _; use std::time::Duration; @@ -484,7 +484,7 @@ async fn it_should_remove_inactive_peers(#[values(skip_list_mutex_std())] repo: #[case::in_order(many_hashed_in_order())] #[tokio::test] async fn it_should_remove_peerless_torrents( - #[values(skip_list_mutex_std())] repo: TorrentRepository, + #[values(skip_list_mutex_std())] repo: Swarms, #[case] entries: Entries, #[values(policy_none(), policy_persist(), policy_remove(), policy_remove_persist())] policy: TrackerPolicy, ) { diff --git a/packages/tracker-core/src/torrent/repository/in_memory.rs b/packages/tracker-core/src/torrent/repository/in_memory.rs index 98d7eb682..67e532e86 100644 --- a/packages/tracker-core/src/torrent/repository/in_memory.rs +++ b/packages/tracker-core/src/torrent/repository/in_memory.rs @@ -7,7 +7,7 @@ use torrust_tracker_configuration::{TrackerPolicy, TORRENT_PEERS_LIMIT}; use torrust_tracker_primitives::pagination::Pagination; use torrust_tracker_primitives::swarm_metadata::{AggregateSwarmMetadata, SwarmMetadata}; use torrust_tracker_primitives::{peer, DurationSinceUnixEpoch, PersistentTorrent, PersistentTorrents}; -use torrust_tracker_torrent_repository::{SwarmHandle, TorrentRepository}; +use torrust_tracker_torrent_repository::{SwarmHandle, Swarms}; /// In-memory repository for torrent entries. /// @@ -21,7 +21,7 @@ use torrust_tracker_torrent_repository::{SwarmHandle, TorrentRepository}; #[derive(Debug, Default)] pub struct InMemoryTorrentRepository { /// The underlying in-memory data structure that stores torrent entries. - torrents: Arc, + torrents: Arc, } impl InMemoryTorrentRepository { From 0f4596ef7de53e5806520cb7126e8234d28ab9ce Mon Sep 17 00:00:00 2001 From: Jose Celano Date: Tue, 6 May 2025 18:31:21 +0100 Subject: [PATCH 573/802] fix: [#1495] formatting --- packages/torrent-repository/src/lib.rs | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/packages/torrent-repository/src/lib.rs b/packages/torrent-repository/src/lib.rs index f120afe88..c985f7a2b 100644 --- a/packages/torrent-repository/src/lib.rs +++ b/packages/torrent-repository/src/lib.rs @@ -1,5 +1,5 @@ -pub mod swarms; pub mod swarm; +pub mod swarms; use std::sync::{Arc, Mutex, MutexGuard}; From 34c159a161b7c167730f6c139dd3cb608173d37a Mon Sep 17 00:00:00 2001 From: Jose Celano Date: Tue, 6 May 2025 18:48:48 +0100 Subject: [PATCH 574/802] refactor: [#1495] update method Swarm::meets_retaining_policy MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit Changed from: ``` /// Returns true if the torrents meets the retention policy, meaning that /// it should be kept in the tracker. pub fn meets_retaining_policy(&self, policy: &TrackerPolicy) -> bool { if policy.persistent_torrent_completed_stat && self.metadata().downloaded > 0 { return true; } if policy.remove_peerless_torrents && self.is_empty() { return false; } true } ``` To: ``` pub fn meets_retaining_policy(&self, policy: &TrackerPolicy) -> bool { !(policy.remove_peerless_torrents && self.is_empty()) } ``` I think the first condition was introduced to avoid loosing the number of downloads we¡hen the torrent is removed becuase there are no peers. Now, we load that number from database when the torrent is added again after removing it from the tracker. --- packages/torrent-repository/src/swarm.rs | 38 +++++++++++++++++------- 1 file changed, 27 insertions(+), 11 deletions(-) diff --git a/packages/torrent-repository/src/swarm.rs b/packages/torrent-repository/src/swarm.rs index 78602f3d9..1a17a2fb6 100644 --- a/packages/torrent-repository/src/swarm.rs +++ b/packages/torrent-repository/src/swarm.rs @@ -190,20 +190,11 @@ impl Swarm { self.peers.is_empty() } - /// Returns true if the torrents meets the retention policy, meaning that + /// Returns true if the swarm meets the retention policy, meaning that /// it should be kept in the tracker. #[must_use] pub fn meets_retaining_policy(&self, policy: &TrackerPolicy) -> bool { - // code-review: why? - if policy.persistent_torrent_completed_stat && self.metadata().downloaded > 0 { - return true; - } - - if policy.remove_peerless_torrents && self.is_empty() { - return false; - } - - true + !(policy.remove_peerless_torrents && self.is_empty()) } } @@ -214,6 +205,7 @@ mod tests { use std::sync::Arc; use aquatic_udp_protocol::PeerId; + use torrust_tracker_configuration::TrackerPolicy; use torrust_tracker_primitives::peer::fixture::PeerBuilder; use torrust_tracker_primitives::swarm_metadata::SwarmMetadata; use torrust_tracker_primitives::DurationSinceUnixEpoch; @@ -384,6 +376,30 @@ mod tests { assert_eq!(swarm.len(), 1); } + #[test] + fn it_should_be_kept_when_empty_if_the_tracker_policy_is_not_to_remove_peerless_torrents() { + let empty_swarm = Swarm::default(); + + let policy = TrackerPolicy { + remove_peerless_torrents: false, + ..Default::default() + }; + + assert!(empty_swarm.meets_retaining_policy(&policy)); + } + + #[test] + fn it_should_be_removed_when_empty_if_the_tracker_policy_is_to_remove_peerless_torrents() { + let empty_swarm = Swarm::default(); + + let policy = TrackerPolicy { + remove_peerless_torrents: true, + ..Default::default() + }; + + assert!(!empty_swarm.meets_retaining_policy(&policy)); + } + #[test] fn it_should_allow_inserting_two_identical_peers_except_for_the_socket_address() { let mut swarm = Swarm::default(); From 728de220693828e056b8f5069ddff19589b6825a Mon Sep 17 00:00:00 2001 From: Jose Celano Date: Tue, 6 May 2025 18:55:41 +0100 Subject: [PATCH 575/802] docs: [#1495] add todo --- packages/torrent-repository/src/swarms.rs | 1 + packages/torrent-repository/tests/repository/mod.rs | 1 + 2 files changed, 2 insertions(+) diff --git a/packages/torrent-repository/src/swarms.rs b/packages/torrent-repository/src/swarms.rs index b5b891a2b..936f49d22 100644 --- a/packages/torrent-repository/src/swarms.rs +++ b/packages/torrent-repository/src/swarms.rs @@ -12,6 +12,7 @@ use crate::{LockTrackedTorrent, SwarmHandle}; #[derive(Default, Debug)] pub struct Swarms { + // todo: this needs to be public only to insert a peerless torrent (empty swarm). pub swarms: SkipMap, } diff --git a/packages/torrent-repository/tests/repository/mod.rs b/packages/torrent-repository/tests/repository/mod.rs index 4c9053b7e..071a187fa 100644 --- a/packages/torrent-repository/tests/repository/mod.rs +++ b/packages/torrent-repository/tests/repository/mod.rs @@ -151,6 +151,7 @@ fn persistent_three() -> PersistentTorrents { fn make(repo: &Swarms, entries: &Entries) { for (info_hash, entry) in entries { let new = Arc::new(Mutex::new(entry.clone())); + // todo: use a public method to insert an empty swarm. repo.swarms.insert(*info_hash, new); } } From 6f5cb279083ee3b8b47f849e111019dfdea9c3b3 Mon Sep 17 00:00:00 2001 From: Jose Celano Date: Wed, 7 May 2025 10:03:18 +0100 Subject: [PATCH 576/802] refactor: [#1495] remove test for SwarmHandle Integration tests will be removed becuase unit tests have been added. Besides, there is no point in testing only the wrapper. SwarmHandle in only a wrapper over Swarm. --- .../torrent-repository/tests/common/mod.rs | 1 - .../tests/common/torrent.rs | 71 ---------- .../torrent-repository/tests/entry/mod.rs | 127 ++++++++---------- 3 files changed, 55 insertions(+), 144 deletions(-) delete mode 100644 packages/torrent-repository/tests/common/torrent.rs diff --git a/packages/torrent-repository/tests/common/mod.rs b/packages/torrent-repository/tests/common/mod.rs index e083a05cc..c77ca2769 100644 --- a/packages/torrent-repository/tests/common/mod.rs +++ b/packages/torrent-repository/tests/common/mod.rs @@ -1,2 +1 @@ -pub mod torrent; pub mod torrent_peer_builder; diff --git a/packages/torrent-repository/tests/common/torrent.rs b/packages/torrent-repository/tests/common/torrent.rs deleted file mode 100644 index a1899621f..000000000 --- a/packages/torrent-repository/tests/common/torrent.rs +++ /dev/null @@ -1,71 +0,0 @@ -use std::net::SocketAddr; -use std::sync::Arc; - -use torrust_tracker_configuration::TrackerPolicy; -use torrust_tracker_primitives::swarm_metadata::SwarmMetadata; -use torrust_tracker_primitives::{peer, DurationSinceUnixEpoch}; -use torrust_tracker_torrent_repository::{swarm, LockTrackedTorrent, SwarmHandle}; - -#[derive(Debug, Clone)] -pub(crate) enum Torrent { - Single(swarm::Swarm), - MutexStd(SwarmHandle), -} - -impl Torrent { - pub(crate) fn get_stats(&self) -> SwarmMetadata { - match self { - Torrent::Single(entry) => entry.metadata(), - Torrent::MutexStd(entry) => entry.lock_or_panic().metadata(), - } - } - - pub(crate) fn meets_retaining_policy(&self, policy: &TrackerPolicy) -> bool { - match self { - Torrent::Single(entry) => entry.meets_retaining_policy(policy), - Torrent::MutexStd(entry) => entry.lock_or_panic().meets_retaining_policy(policy), - } - } - - pub(crate) fn peers_is_empty(&self) -> bool { - match self { - Torrent::Single(entry) => entry.is_empty(), - Torrent::MutexStd(entry) => entry.lock_or_panic().is_empty(), - } - } - - pub(crate) fn get_peers_len(&self) -> usize { - match self { - Torrent::Single(entry) => entry.len(), - Torrent::MutexStd(entry) => entry.lock_or_panic().len(), - } - } - - pub(crate) fn get_peers(&self, limit: Option) -> Vec> { - match self { - Torrent::Single(entry) => entry.peers(limit), - Torrent::MutexStd(entry) => entry.lock_or_panic().peers(limit), - } - } - - pub(crate) fn get_peers_for_client(&self, client: &SocketAddr, limit: Option) -> Vec> { - match self { - Torrent::Single(entry) => entry.peers_excluding(client, limit), - Torrent::MutexStd(entry) => entry.lock_or_panic().peers_excluding(client, limit), - } - } - - pub(crate) fn upsert_peer(&mut self, peer: &peer::Peer) -> bool { - match self { - Torrent::Single(entry) => entry.handle_announcement(peer), - Torrent::MutexStd(entry) => entry.lock_or_panic().handle_announcement(peer), - } - } - - pub(crate) fn remove_inactive_peers(&mut self, current_cutoff: DurationSinceUnixEpoch) { - match self { - Torrent::Single(entry) => entry.remove_inactive(current_cutoff), - Torrent::MutexStd(entry) => entry.lock_or_panic().remove_inactive(current_cutoff), - } - } -} diff --git a/packages/torrent-repository/tests/entry/mod.rs b/packages/torrent-repository/tests/entry/mod.rs index 4607fd9c7..491b77a90 100644 --- a/packages/torrent-repository/tests/entry/mod.rs +++ b/packages/torrent-repository/tests/entry/mod.rs @@ -9,19 +9,14 @@ use torrust_tracker_clock::clock::{self, Time as _}; use torrust_tracker_configuration::{TrackerPolicy, TORRENT_PEERS_LIMIT}; use torrust_tracker_primitives::peer; use torrust_tracker_primitives::peer::Peer; -use torrust_tracker_torrent_repository::{swarm, SwarmHandle}; +use torrust_tracker_torrent_repository::Swarm; -use crate::common::torrent::Torrent; use crate::common::torrent_peer_builder::{a_completed_peer, a_started_peer}; use crate::CurrentClock; #[fixture] -fn single() -> Torrent { - Torrent::Single(swarm::Swarm::default()) -} -#[fixture] -fn mutex_std() -> Torrent { - Torrent::MutexStd(SwarmHandle::default()) +fn single() -> Swarm { + Swarm::default() } #[fixture] @@ -52,39 +47,39 @@ pub enum Makes { Three, } -fn make(torrent: &mut Torrent, makes: &Makes) -> Vec { +fn make(torrent: &mut Swarm, makes: &Makes) -> Vec { match makes { Makes::Empty => vec![], Makes::Started => { let peer = a_started_peer(1); - torrent.upsert_peer(&peer); + torrent.handle_announcement(&peer); vec![peer] } Makes::Completed => { let peer = a_completed_peer(2); - torrent.upsert_peer(&peer); + torrent.handle_announcement(&peer); vec![peer] } Makes::Downloaded => { let mut peer = a_started_peer(3); - torrent.upsert_peer(&peer); + torrent.handle_announcement(&peer); peer.event = AnnounceEvent::Completed; peer.left = NumberOfBytes::new(0); - torrent.upsert_peer(&peer); + torrent.handle_announcement(&peer); vec![peer] } Makes::Three => { let peer_1 = a_started_peer(1); - torrent.upsert_peer(&peer_1); + torrent.handle_announcement(&peer_1); let peer_2 = a_completed_peer(2); - torrent.upsert_peer(&peer_2); + torrent.handle_announcement(&peer_2); let mut peer_3 = a_started_peer(3); - torrent.upsert_peer(&peer_3); + torrent.handle_announcement(&peer_3); peer_3.event = AnnounceEvent::Completed; peer_3.left = NumberOfBytes::new(0); - torrent.upsert_peer(&peer_3); + torrent.handle_announcement(&peer_3); vec![peer_1, peer_2, peer_3] } } @@ -93,10 +88,10 @@ fn make(torrent: &mut Torrent, makes: &Makes) -> Vec { #[rstest] #[case::empty(&Makes::Empty)] #[tokio::test] -async fn it_should_be_empty_by_default(#[values(single(), mutex_std())] mut torrent: Torrent, #[case] makes: &Makes) { +async fn it_should_be_empty_by_default(#[values(single())] mut torrent: Swarm, #[case] makes: &Makes) { make(&mut torrent, makes); - assert_eq!(torrent.get_peers_len(), 0); + assert_eq!(torrent.len(), 0); } #[rstest] @@ -107,14 +102,14 @@ async fn it_should_be_empty_by_default(#[values(single(), mutex_std())] mut torr #[case::three(&Makes::Three)] #[tokio::test] async fn it_should_check_if_entry_should_be_retained_based_on_the_tracker_policy( - #[values(single(), mutex_std())] mut torrent: Torrent, + #[values(single())] mut torrent: Swarm, #[case] makes: &Makes, #[values(policy_none(), policy_persist(), policy_remove(), policy_remove_persist())] policy: TrackerPolicy, ) { make(&mut torrent, makes); - let has_peers = !torrent.peers_is_empty(); - let has_downloads = torrent.get_stats().downloaded != 0; + let has_peers = !torrent.is_empty(); + let has_downloads = torrent.metadata().downloaded != 0; match (policy.remove_peerless_torrents, policy.persistent_torrent_completed_stat) { // remove torrents without peers, and keep completed download stats @@ -144,10 +139,10 @@ async fn it_should_check_if_entry_should_be_retained_based_on_the_tracker_policy #[case::downloaded(&Makes::Downloaded)] #[case::three(&Makes::Three)] #[tokio::test] -async fn it_should_get_peers_for_torrent_entry(#[values(single(), mutex_std())] mut torrent: Torrent, #[case] makes: &Makes) { +async fn it_should_get_peers_for_torrent_entry(#[values(single())] mut torrent: Swarm, #[case] makes: &Makes) { let peers = make(&mut torrent, makes); - let torrent_peers = torrent.get_peers(None); + let torrent_peers = torrent.peers(None); assert_eq!(torrent_peers.len(), peers.len()); @@ -163,15 +158,15 @@ async fn it_should_get_peers_for_torrent_entry(#[values(single(), mutex_std())] #[case::downloaded(&Makes::Downloaded)] #[case::three(&Makes::Three)] #[tokio::test] -async fn it_should_update_a_peer(#[values(single(), mutex_std())] mut torrent: Torrent, #[case] makes: &Makes) { +async fn it_should_update_a_peer(#[values(single())] mut torrent: Swarm, #[case] makes: &Makes) { make(&mut torrent, makes); // Make and insert a new peer. let mut peer = a_started_peer(-1); - torrent.upsert_peer(&peer); + torrent.handle_announcement(&peer); // Get the Inserted Peer by Id. - let peers = torrent.get_peers(None); + let peers = torrent.peers(None); let original = peers .iter() .find(|p| peer::ReadInfo::get_id(*p) == peer::ReadInfo::get_id(&peer)) @@ -181,10 +176,10 @@ async fn it_should_update_a_peer(#[values(single(), mutex_std())] mut torrent: T // Announce "Completed" torrent download event. peer.event = AnnounceEvent::Completed; - torrent.upsert_peer(&peer); + torrent.handle_announcement(&peer); // Get the Updated Peer by Id. - let peers = torrent.get_peers(None); + let peers = torrent.peers(None); let updated = peers .iter() .find(|p| peer::ReadInfo::get_id(*p) == peer::ReadInfo::get_id(&peer)) @@ -200,20 +195,17 @@ async fn it_should_update_a_peer(#[values(single(), mutex_std())] mut torrent: T #[case::downloaded(&Makes::Downloaded)] #[case::three(&Makes::Three)] #[tokio::test] -async fn it_should_remove_a_peer_upon_stopped_announcement( - #[values(single(), mutex_std())] mut torrent: Torrent, - #[case] makes: &Makes, -) { +async fn it_should_remove_a_peer_upon_stopped_announcement(#[values(single())] mut torrent: Swarm, #[case] makes: &Makes) { use torrust_tracker_primitives::peer::ReadInfo as _; make(&mut torrent, makes); let mut peer = a_started_peer(-1); - torrent.upsert_peer(&peer); + torrent.handle_announcement(&peer); // The started peer should be inserted. - let peers = torrent.get_peers(None); + let peers = torrent.peers(None); let original = peers .iter() .find(|p| p.get_id() == peer.get_id()) @@ -223,10 +215,10 @@ async fn it_should_remove_a_peer_upon_stopped_announcement( // Change peer to "Stopped" and insert. peer.event = AnnounceEvent::Stopped; - torrent.upsert_peer(&peer); + torrent.handle_announcement(&peer); // It should be removed now. - let peers = torrent.get_peers(None); + let peers = torrent.peers(None); assert_eq!( peers.iter().find(|p| p.get_id() == peer.get_id()), @@ -242,13 +234,13 @@ async fn it_should_remove_a_peer_upon_stopped_announcement( #[case::three(&Makes::Three)] #[tokio::test] async fn it_should_handle_a_peer_completed_announcement_and_update_the_downloaded_statistic( - #[values(single(), mutex_std())] mut torrent: Torrent, + #[values(single())] mut torrent: Swarm, #[case] makes: &Makes, ) { make(&mut torrent, makes); - let downloaded = torrent.get_stats().downloaded; + let downloaded = torrent.metadata().downloaded; - let peers = torrent.get_peers(None); + let peers = torrent.peers(None); let mut peer = **peers.first().expect("there should be a peer"); let is_already_completed = peer.event == AnnounceEvent::Completed; @@ -256,8 +248,8 @@ async fn it_should_handle_a_peer_completed_announcement_and_update_the_downloade // Announce "Completed" torrent download event. peer.event = AnnounceEvent::Completed; - torrent.upsert_peer(&peer); - let stats = torrent.get_stats(); + torrent.handle_announcement(&peer); + let stats = torrent.metadata(); if is_already_completed { assert_eq!(stats.downloaded, downloaded); @@ -272,19 +264,19 @@ async fn it_should_handle_a_peer_completed_announcement_and_update_the_downloade #[case::downloaded(&Makes::Downloaded)] #[case::three(&Makes::Three)] #[tokio::test] -async fn it_should_update_a_peer_as_a_seeder(#[values(single(), mutex_std())] mut torrent: Torrent, #[case] makes: &Makes) { +async fn it_should_update_a_peer_as_a_seeder(#[values(single())] mut torrent: Swarm, #[case] makes: &Makes) { let peers = make(&mut torrent, makes); let completed = u32::try_from(peers.iter().filter(|p| p.is_seeder()).count()).expect("it_should_not_be_so_many"); - let peers = torrent.get_peers(None); + let peers = torrent.peers(None); let mut peer = **peers.first().expect("there should be a peer"); let is_already_non_left = peer.left == NumberOfBytes::new(0); // Set Bytes Left to Zero peer.left = NumberOfBytes::new(0); - torrent.upsert_peer(&peer); - let stats = torrent.get_stats(); + torrent.handle_announcement(&peer); + let stats = torrent.metadata(); if is_already_non_left { // it was already complete @@ -301,19 +293,19 @@ async fn it_should_update_a_peer_as_a_seeder(#[values(single(), mutex_std())] mu #[case::downloaded(&Makes::Downloaded)] #[case::three(&Makes::Three)] #[tokio::test] -async fn it_should_update_a_peer_as_incomplete(#[values(single(), mutex_std())] mut torrent: Torrent, #[case] makes: &Makes) { +async fn it_should_update_a_peer_as_incomplete(#[values(single())] mut torrent: Swarm, #[case] makes: &Makes) { let peers = make(&mut torrent, makes); let incomplete = u32::try_from(peers.iter().filter(|p| !p.is_seeder()).count()).expect("it should not be so many"); - let peers = torrent.get_peers(None); + let peers = torrent.peers(None); let mut peer = **peers.first().expect("there should be a peer"); let completed_already = peer.left == NumberOfBytes::new(0); // Set Bytes Left to no Zero peer.left = NumberOfBytes::new(1); - torrent.upsert_peer(&peer); - let stats = torrent.get_stats(); + torrent.handle_announcement(&peer); + let stats = torrent.metadata(); if completed_already { // now it is incomplete @@ -330,13 +322,10 @@ async fn it_should_update_a_peer_as_incomplete(#[values(single(), mutex_std())] #[case::downloaded(&Makes::Downloaded)] #[case::three(&Makes::Three)] #[tokio::test] -async fn it_should_get_peers_excluding_the_client_socket( - #[values(single(), mutex_std())] mut torrent: Torrent, - #[case] makes: &Makes, -) { +async fn it_should_get_peers_excluding_the_client_socket(#[values(single())] mut torrent: Swarm, #[case] makes: &Makes) { make(&mut torrent, makes); - let peers = torrent.get_peers(None); + let peers = torrent.peers(None); let mut peer = **peers.first().expect("there should be a peer"); let socket = SocketAddr::new(IpAddr::V4(Ipv4Addr::new(127, 0, 0, 1)), 8081); @@ -345,14 +334,14 @@ async fn it_should_get_peers_excluding_the_client_socket( assert_ne!(peer.peer_addr, socket); // it should get the peer as it dose not share the socket. - assert!(torrent.get_peers_for_client(&socket, None).contains(&peer.into())); + assert!(torrent.peers_excluding(&socket, None).contains(&peer.into())); // set the address to the socket. peer.peer_addr = socket; - torrent.upsert_peer(&peer); // Add peer + torrent.handle_announcement(&peer); // Add peer // It should not include the peer that has the same socket. - assert!(!torrent.get_peers_for_client(&socket, None).contains(&peer.into())); + assert!(!torrent.peers_excluding(&socket, None).contains(&peer.into())); } #[rstest] @@ -362,19 +351,16 @@ async fn it_should_get_peers_excluding_the_client_socket( #[case::downloaded(&Makes::Downloaded)] #[case::three(&Makes::Three)] #[tokio::test] -async fn it_should_limit_the_number_of_peers_returned( - #[values(single(), mutex_std())] mut torrent: Torrent, - #[case] makes: &Makes, -) { +async fn it_should_limit_the_number_of_peers_returned(#[values(single())] mut torrent: Swarm, #[case] makes: &Makes) { make(&mut torrent, makes); // We add one more peer than the scrape limit for peer_number in 1..=74 + 1 { let peer = a_started_peer(peer_number); - torrent.upsert_peer(&peer); + torrent.handle_announcement(&peer); } - let peers = torrent.get_peers(Some(TORRENT_PEERS_LIMIT)); + let peers = torrent.peers(Some(TORRENT_PEERS_LIMIT)); assert_eq!(peers.len(), 74); } @@ -386,10 +372,7 @@ async fn it_should_limit_the_number_of_peers_returned( #[case::downloaded(&Makes::Downloaded)] #[case::three(&Makes::Three)] #[tokio::test] -async fn it_should_remove_inactive_peers_beyond_cutoff( - #[values(single(), mutex_std())] mut torrent: Torrent, - #[case] makes: &Makes, -) { +async fn it_should_remove_inactive_peers_beyond_cutoff(#[values(single())] mut torrent: Swarm, #[case] makes: &Makes) { const TIMEOUT: Duration = Duration::from_secs(120); const EXPIRE: Duration = Duration::from_secs(121); @@ -402,12 +385,12 @@ async fn it_should_remove_inactive_peers_beyond_cutoff( peer.updated = now.sub(EXPIRE); - torrent.upsert_peer(&peer); + torrent.handle_announcement(&peer); - assert_eq!(torrent.get_peers_len(), peers.len() + 1); + assert_eq!(torrent.len(), peers.len() + 1); let current_cutoff = CurrentClock::now_sub(&TIMEOUT).unwrap_or_default(); - torrent.remove_inactive_peers(current_cutoff); + torrent.remove_inactive(current_cutoff); - assert_eq!(torrent.get_peers_len(), peers.len()); + assert_eq!(torrent.len(), peers.len()); } From 5413e597b7054a4ea7f32a4f36ce9b801c78e832 Mon Sep 17 00:00:00 2001 From: Jose Celano Date: Wed, 7 May 2025 10:15:40 +0100 Subject: [PATCH 577/802] refactor: [#1495] renamings to follow latest changes in torrent-repository pkg --- .../torrent-repository/tests/integration.rs | 4 +- .../tests/{entry => swarm}/mod.rs | 128 +++++++++--------- .../tests/{repository => swarms}/mod.rs | 99 +++++++------- .../src/torrent/repository/in_memory.rs | 26 ++-- 4 files changed, 130 insertions(+), 127 deletions(-) rename packages/torrent-repository/tests/{entry => swarm}/mod.rs (73%) rename packages/torrent-repository/tests/{repository => swarms}/mod.rs (81%) diff --git a/packages/torrent-repository/tests/integration.rs b/packages/torrent-repository/tests/integration.rs index 5aab67b03..b3e057075 100644 --- a/packages/torrent-repository/tests/integration.rs +++ b/packages/torrent-repository/tests/integration.rs @@ -7,8 +7,8 @@ use torrust_tracker_clock::clock; pub mod common; -mod entry; -mod repository; +mod swarm; +mod swarms; /// This code needs to be copied into each crate. /// Working version, for production. diff --git a/packages/torrent-repository/tests/entry/mod.rs b/packages/torrent-repository/tests/swarm/mod.rs similarity index 73% rename from packages/torrent-repository/tests/entry/mod.rs rename to packages/torrent-repository/tests/swarm/mod.rs index 491b77a90..d529b0243 100644 --- a/packages/torrent-repository/tests/entry/mod.rs +++ b/packages/torrent-repository/tests/swarm/mod.rs @@ -15,7 +15,7 @@ use crate::common::torrent_peer_builder::{a_completed_peer, a_started_peer}; use crate::CurrentClock; #[fixture] -fn single() -> Swarm { +fn swarm() -> Swarm { Swarm::default() } @@ -47,39 +47,39 @@ pub enum Makes { Three, } -fn make(torrent: &mut Swarm, makes: &Makes) -> Vec { +fn make(swarm: &mut Swarm, makes: &Makes) -> Vec { match makes { Makes::Empty => vec![], Makes::Started => { let peer = a_started_peer(1); - torrent.handle_announcement(&peer); + swarm.handle_announcement(&peer); vec![peer] } Makes::Completed => { let peer = a_completed_peer(2); - torrent.handle_announcement(&peer); + swarm.handle_announcement(&peer); vec![peer] } Makes::Downloaded => { let mut peer = a_started_peer(3); - torrent.handle_announcement(&peer); + swarm.handle_announcement(&peer); peer.event = AnnounceEvent::Completed; peer.left = NumberOfBytes::new(0); - torrent.handle_announcement(&peer); + swarm.handle_announcement(&peer); vec![peer] } Makes::Three => { let peer_1 = a_started_peer(1); - torrent.handle_announcement(&peer_1); + swarm.handle_announcement(&peer_1); let peer_2 = a_completed_peer(2); - torrent.handle_announcement(&peer_2); + swarm.handle_announcement(&peer_2); let mut peer_3 = a_started_peer(3); - torrent.handle_announcement(&peer_3); + swarm.handle_announcement(&peer_3); peer_3.event = AnnounceEvent::Completed; peer_3.left = NumberOfBytes::new(0); - torrent.handle_announcement(&peer_3); + swarm.handle_announcement(&peer_3); vec![peer_1, peer_2, peer_3] } } @@ -88,10 +88,10 @@ fn make(torrent: &mut Swarm, makes: &Makes) -> Vec { #[rstest] #[case::empty(&Makes::Empty)] #[tokio::test] -async fn it_should_be_empty_by_default(#[values(single())] mut torrent: Swarm, #[case] makes: &Makes) { - make(&mut torrent, makes); +async fn it_should_be_empty_by_default(#[values(swarm())] mut swarm: Swarm, #[case] makes: &Makes) { + make(&mut swarm, makes); - assert_eq!(torrent.len(), 0); + assert_eq!(swarm.len(), 0); } #[rstest] @@ -102,33 +102,33 @@ async fn it_should_be_empty_by_default(#[values(single())] mut torrent: Swarm, # #[case::three(&Makes::Three)] #[tokio::test] async fn it_should_check_if_entry_should_be_retained_based_on_the_tracker_policy( - #[values(single())] mut torrent: Swarm, + #[values(swarm())] mut swarm: Swarm, #[case] makes: &Makes, #[values(policy_none(), policy_persist(), policy_remove(), policy_remove_persist())] policy: TrackerPolicy, ) { - make(&mut torrent, makes); + make(&mut swarm, makes); - let has_peers = !torrent.is_empty(); - let has_downloads = torrent.metadata().downloaded != 0; + let has_peers = !swarm.is_empty(); + let has_downloads = swarm.metadata().downloaded != 0; match (policy.remove_peerless_torrents, policy.persistent_torrent_completed_stat) { // remove torrents without peers, and keep completed download stats (true, true) => match (has_peers, has_downloads) { // no peers, but has downloads // peers, with or without downloads - (false, true) | (true, true | false) => assert!(torrent.meets_retaining_policy(&policy)), + (false, true) | (true, true | false) => assert!(swarm.meets_retaining_policy(&policy)), // no peers and no downloads - (false, false) => assert!(!torrent.meets_retaining_policy(&policy)), + (false, false) => assert!(!swarm.meets_retaining_policy(&policy)), }, // remove torrents without peers and drop completed download stats (true, false) => match (has_peers, has_downloads) { // peers, with or without downloads - (true, true | false) => assert!(torrent.meets_retaining_policy(&policy)), + (true, true | false) => assert!(swarm.meets_retaining_policy(&policy)), // no peers and with or without downloads - (false, true | false) => assert!(!torrent.meets_retaining_policy(&policy)), + (false, true | false) => assert!(!swarm.meets_retaining_policy(&policy)), }, // keep torrents without peers, but keep or drop completed download stats - (false, true | false) => assert!(torrent.meets_retaining_policy(&policy)), + (false, true | false) => assert!(swarm.meets_retaining_policy(&policy)), } } @@ -139,10 +139,10 @@ async fn it_should_check_if_entry_should_be_retained_based_on_the_tracker_policy #[case::downloaded(&Makes::Downloaded)] #[case::three(&Makes::Three)] #[tokio::test] -async fn it_should_get_peers_for_torrent_entry(#[values(single())] mut torrent: Swarm, #[case] makes: &Makes) { - let peers = make(&mut torrent, makes); +async fn it_should_get_peers_for_torrent_entry(#[values(swarm())] mut swarm: Swarm, #[case] makes: &Makes) { + let peers = make(&mut swarm, makes); - let torrent_peers = torrent.peers(None); + let torrent_peers = swarm.peers(None); assert_eq!(torrent_peers.len(), peers.len()); @@ -158,15 +158,15 @@ async fn it_should_get_peers_for_torrent_entry(#[values(single())] mut torrent: #[case::downloaded(&Makes::Downloaded)] #[case::three(&Makes::Three)] #[tokio::test] -async fn it_should_update_a_peer(#[values(single())] mut torrent: Swarm, #[case] makes: &Makes) { - make(&mut torrent, makes); +async fn it_should_update_a_peer(#[values(swarm())] mut swarm: Swarm, #[case] makes: &Makes) { + make(&mut swarm, makes); // Make and insert a new peer. let mut peer = a_started_peer(-1); - torrent.handle_announcement(&peer); + swarm.handle_announcement(&peer); // Get the Inserted Peer by Id. - let peers = torrent.peers(None); + let peers = swarm.peers(None); let original = peers .iter() .find(|p| peer::ReadInfo::get_id(*p) == peer::ReadInfo::get_id(&peer)) @@ -176,10 +176,10 @@ async fn it_should_update_a_peer(#[values(single())] mut torrent: Swarm, #[case] // Announce "Completed" torrent download event. peer.event = AnnounceEvent::Completed; - torrent.handle_announcement(&peer); + swarm.handle_announcement(&peer); // Get the Updated Peer by Id. - let peers = torrent.peers(None); + let peers = swarm.peers(None); let updated = peers .iter() .find(|p| peer::ReadInfo::get_id(*p) == peer::ReadInfo::get_id(&peer)) @@ -195,17 +195,17 @@ async fn it_should_update_a_peer(#[values(single())] mut torrent: Swarm, #[case] #[case::downloaded(&Makes::Downloaded)] #[case::three(&Makes::Three)] #[tokio::test] -async fn it_should_remove_a_peer_upon_stopped_announcement(#[values(single())] mut torrent: Swarm, #[case] makes: &Makes) { +async fn it_should_remove_a_peer_upon_stopped_announcement(#[values(swarm())] mut swarm: Swarm, #[case] makes: &Makes) { use torrust_tracker_primitives::peer::ReadInfo as _; - make(&mut torrent, makes); + make(&mut swarm, makes); let mut peer = a_started_peer(-1); - torrent.handle_announcement(&peer); + swarm.handle_announcement(&peer); // The started peer should be inserted. - let peers = torrent.peers(None); + let peers = swarm.peers(None); let original = peers .iter() .find(|p| p.get_id() == peer.get_id()) @@ -215,10 +215,10 @@ async fn it_should_remove_a_peer_upon_stopped_announcement(#[values(single())] m // Change peer to "Stopped" and insert. peer.event = AnnounceEvent::Stopped; - torrent.handle_announcement(&peer); + swarm.handle_announcement(&peer); // It should be removed now. - let peers = torrent.peers(None); + let peers = swarm.peers(None); assert_eq!( peers.iter().find(|p| p.get_id() == peer.get_id()), @@ -234,7 +234,7 @@ async fn it_should_remove_a_peer_upon_stopped_announcement(#[values(single())] m #[case::three(&Makes::Three)] #[tokio::test] async fn it_should_handle_a_peer_completed_announcement_and_update_the_downloaded_statistic( - #[values(single())] mut torrent: Swarm, + #[values(swarm())] mut torrent: Swarm, #[case] makes: &Makes, ) { make(&mut torrent, makes); @@ -264,19 +264,19 @@ async fn it_should_handle_a_peer_completed_announcement_and_update_the_downloade #[case::downloaded(&Makes::Downloaded)] #[case::three(&Makes::Three)] #[tokio::test] -async fn it_should_update_a_peer_as_a_seeder(#[values(single())] mut torrent: Swarm, #[case] makes: &Makes) { - let peers = make(&mut torrent, makes); +async fn it_should_update_a_peer_as_a_seeder(#[values(swarm())] mut swarm: Swarm, #[case] makes: &Makes) { + let peers = make(&mut swarm, makes); let completed = u32::try_from(peers.iter().filter(|p| p.is_seeder()).count()).expect("it_should_not_be_so_many"); - let peers = torrent.peers(None); + let peers = swarm.peers(None); let mut peer = **peers.first().expect("there should be a peer"); let is_already_non_left = peer.left == NumberOfBytes::new(0); // Set Bytes Left to Zero peer.left = NumberOfBytes::new(0); - torrent.handle_announcement(&peer); - let stats = torrent.metadata(); + swarm.handle_announcement(&peer); + let stats = swarm.metadata(); if is_already_non_left { // it was already complete @@ -293,19 +293,19 @@ async fn it_should_update_a_peer_as_a_seeder(#[values(single())] mut torrent: Sw #[case::downloaded(&Makes::Downloaded)] #[case::three(&Makes::Three)] #[tokio::test] -async fn it_should_update_a_peer_as_incomplete(#[values(single())] mut torrent: Swarm, #[case] makes: &Makes) { - let peers = make(&mut torrent, makes); +async fn it_should_update_a_peer_as_incomplete(#[values(swarm())] mut swarm: Swarm, #[case] makes: &Makes) { + let peers = make(&mut swarm, makes); let incomplete = u32::try_from(peers.iter().filter(|p| !p.is_seeder()).count()).expect("it should not be so many"); - let peers = torrent.peers(None); + let peers = swarm.peers(None); let mut peer = **peers.first().expect("there should be a peer"); let completed_already = peer.left == NumberOfBytes::new(0); // Set Bytes Left to no Zero peer.left = NumberOfBytes::new(1); - torrent.handle_announcement(&peer); - let stats = torrent.metadata(); + swarm.handle_announcement(&peer); + let stats = swarm.metadata(); if completed_already { // now it is incomplete @@ -322,10 +322,10 @@ async fn it_should_update_a_peer_as_incomplete(#[values(single())] mut torrent: #[case::downloaded(&Makes::Downloaded)] #[case::three(&Makes::Three)] #[tokio::test] -async fn it_should_get_peers_excluding_the_client_socket(#[values(single())] mut torrent: Swarm, #[case] makes: &Makes) { - make(&mut torrent, makes); +async fn it_should_get_peers_excluding_the_client_socket(#[values(swarm())] mut swarm: Swarm, #[case] makes: &Makes) { + make(&mut swarm, makes); - let peers = torrent.peers(None); + let peers = swarm.peers(None); let mut peer = **peers.first().expect("there should be a peer"); let socket = SocketAddr::new(IpAddr::V4(Ipv4Addr::new(127, 0, 0, 1)), 8081); @@ -334,14 +334,14 @@ async fn it_should_get_peers_excluding_the_client_socket(#[values(single())] mut assert_ne!(peer.peer_addr, socket); // it should get the peer as it dose not share the socket. - assert!(torrent.peers_excluding(&socket, None).contains(&peer.into())); + assert!(swarm.peers_excluding(&socket, None).contains(&peer.into())); // set the address to the socket. peer.peer_addr = socket; - torrent.handle_announcement(&peer); // Add peer + swarm.handle_announcement(&peer); // Add peer // It should not include the peer that has the same socket. - assert!(!torrent.peers_excluding(&socket, None).contains(&peer.into())); + assert!(!swarm.peers_excluding(&socket, None).contains(&peer.into())); } #[rstest] @@ -351,16 +351,16 @@ async fn it_should_get_peers_excluding_the_client_socket(#[values(single())] mut #[case::downloaded(&Makes::Downloaded)] #[case::three(&Makes::Three)] #[tokio::test] -async fn it_should_limit_the_number_of_peers_returned(#[values(single())] mut torrent: Swarm, #[case] makes: &Makes) { - make(&mut torrent, makes); +async fn it_should_limit_the_number_of_peers_returned(#[values(swarm())] mut swarm: Swarm, #[case] makes: &Makes) { + make(&mut swarm, makes); // We add one more peer than the scrape limit for peer_number in 1..=74 + 1 { let peer = a_started_peer(peer_number); - torrent.handle_announcement(&peer); + swarm.handle_announcement(&peer); } - let peers = torrent.peers(Some(TORRENT_PEERS_LIMIT)); + let peers = swarm.peers(Some(TORRENT_PEERS_LIMIT)); assert_eq!(peers.len(), 74); } @@ -372,11 +372,11 @@ async fn it_should_limit_the_number_of_peers_returned(#[values(single())] mut to #[case::downloaded(&Makes::Downloaded)] #[case::three(&Makes::Three)] #[tokio::test] -async fn it_should_remove_inactive_peers_beyond_cutoff(#[values(single())] mut torrent: Swarm, #[case] makes: &Makes) { +async fn it_should_remove_inactive_peers_beyond_cutoff(#[values(swarm())] mut swarm: Swarm, #[case] makes: &Makes) { const TIMEOUT: Duration = Duration::from_secs(120); const EXPIRE: Duration = Duration::from_secs(121); - let peers = make(&mut torrent, makes); + let peers = make(&mut swarm, makes); let mut peer = a_completed_peer(-1); @@ -385,12 +385,12 @@ async fn it_should_remove_inactive_peers_beyond_cutoff(#[values(single())] mut t peer.updated = now.sub(EXPIRE); - torrent.handle_announcement(&peer); + swarm.handle_announcement(&peer); - assert_eq!(torrent.len(), peers.len() + 1); + assert_eq!(swarm.len(), peers.len() + 1); let current_cutoff = CurrentClock::now_sub(&TIMEOUT).unwrap_or_default(); - torrent.remove_inactive(current_cutoff); + swarm.remove_inactive(current_cutoff); - assert_eq!(torrent.len(), peers.len()); + assert_eq!(swarm.len(), peers.len()); } diff --git a/packages/torrent-repository/tests/repository/mod.rs b/packages/torrent-repository/tests/swarms/mod.rs similarity index 81% rename from packages/torrent-repository/tests/repository/mod.rs rename to packages/torrent-repository/tests/swarms/mod.rs index 071a187fa..20c6255fa 100644 --- a/packages/torrent-repository/tests/repository/mod.rs +++ b/packages/torrent-repository/tests/swarms/mod.rs @@ -15,7 +15,7 @@ use torrust_tracker_torrent_repository::{LockTrackedTorrent, Swarms}; use crate::common::torrent_peer_builder::{a_completed_peer, a_started_peer}; #[fixture] -fn skip_list_mutex_std() -> Swarms { +fn swarms() -> Swarms { Swarms::default() } @@ -33,27 +33,27 @@ fn default() -> Entries { #[fixture] fn started() -> Entries { - let mut torrent = Swarm::default(); - torrent.handle_announcement(&a_started_peer(1)); - vec![(InfoHash::default(), torrent)] + let mut swarm = Swarm::default(); + swarm.handle_announcement(&a_started_peer(1)); + vec![(InfoHash::default(), swarm)] } #[fixture] fn completed() -> Entries { - let mut torrent = Swarm::default(); - torrent.handle_announcement(&a_completed_peer(2)); - vec![(InfoHash::default(), torrent)] + let mut swarm = Swarm::default(); + swarm.handle_announcement(&a_completed_peer(2)); + vec![(InfoHash::default(), swarm)] } #[fixture] fn downloaded() -> Entries { - let mut torrent = Swarm::default(); + let mut swarm = Swarm::default(); let mut peer = a_started_peer(3); - torrent.handle_announcement(&peer); + swarm.handle_announcement(&peer); peer.event = AnnounceEvent::Completed; peer.left = NumberOfBytes::new(0); - torrent.handle_announcement(&peer); - vec![(InfoHash::default(), torrent)] + swarm.handle_announcement(&peer); + vec![(InfoHash::default(), swarm)] } #[fixture] @@ -201,13 +201,13 @@ fn policy_remove_persist() -> TrackerPolicy { #[case::out_of_order(many_out_of_order())] #[case::in_order(many_hashed_in_order())] #[tokio::test] -async fn it_should_get_a_torrent_entry(#[values(skip_list_mutex_std())] repo: Swarms, #[case] entries: Entries) { +async fn it_should_get_a_torrent_entry(#[values(swarms())] repo: Swarms, #[case] entries: Entries) { make(&repo, &entries); - if let Some((info_hash, torrent)) = entries.first() { + if let Some((info_hash, swarm)) = entries.first() { assert_eq!( Some(repo.get(info_hash).unwrap().lock_or_panic().clone()), - Some(torrent.clone()) + Some(swarm.clone()) ); } else { assert!(repo.get(&InfoHash::default()).is_none()); @@ -225,7 +225,7 @@ async fn it_should_get_a_torrent_entry(#[values(skip_list_mutex_std())] repo: Sw #[case::in_order(many_hashed_in_order())] #[tokio::test] async fn it_should_get_paginated_entries_in_a_stable_or_sorted_order( - #[values(skip_list_mutex_std())] repo: Swarms, + #[values(swarms())] repo: Swarms, #[case] entries: Entries, many_out_of_order: Entries, ) { @@ -258,7 +258,7 @@ async fn it_should_get_paginated_entries_in_a_stable_or_sorted_order( #[case::in_order(many_hashed_in_order())] #[tokio::test] async fn it_should_get_paginated( - #[values(skip_list_mutex_std())] repo: Swarms, + #[values(swarms())] repo: Swarms, #[case] entries: Entries, #[values(paginated_limit_zero(), paginated_limit_one(), paginated_limit_one_offset_one())] paginated: Pagination, ) { @@ -270,13 +270,13 @@ async fn it_should_get_paginated( match paginated { // it should return empty if limit is zero. Pagination { limit: 0, .. } => { - let torrents: Vec<(InfoHash, Swarm)> = repo + let swarms: Vec<(InfoHash, Swarm)> = repo .get_paginated(Some(&paginated)) .iter() - .map(|(i, lock_tracked_torrent)| (*i, lock_tracked_torrent.lock_or_panic().clone())) + .map(|(i, swarm_handle)| (*i, swarm_handle.lock_or_panic().clone())) .collect(); - assert_eq!(torrents, vec![]); + assert_eq!(swarms, vec![]); } // it should return a single entry if the limit is one. @@ -313,10 +313,10 @@ async fn it_should_get_paginated( #[case::out_of_order(many_out_of_order())] #[case::in_order(many_hashed_in_order())] #[tokio::test] -async fn it_should_get_metrics(#[values(skip_list_mutex_std())] repo: Swarms, #[case] entries: Entries) { +async fn it_should_get_metrics(#[values(swarms())] swarms: Swarms, #[case] entries: Entries) { use torrust_tracker_primitives::swarm_metadata::AggregateSwarmMetadata; - make(&repo, &entries); + make(&swarms, &entries); let mut metrics = AggregateSwarmMetadata::default(); @@ -329,7 +329,7 @@ async fn it_should_get_metrics(#[values(skip_list_mutex_std())] repo: Swarms, #[ metrics.total_downloaded += u64::from(stats.downloaded); } - assert_eq!(repo.get_aggregate_swarm_metadata(), metrics); + assert_eq!(swarms.get_aggregate_swarm_metadata(), metrics); } #[rstest] @@ -343,21 +343,21 @@ async fn it_should_get_metrics(#[values(skip_list_mutex_std())] repo: Swarms, #[ #[case::in_order(many_hashed_in_order())] #[tokio::test] async fn it_should_import_persistent_torrents( - #[values(skip_list_mutex_std())] repo: Swarms, + #[values(swarms())] swarms: Swarms, #[case] entries: Entries, #[values(persistent_empty(), persistent_single(), persistent_three())] persistent_torrents: PersistentTorrents, ) { - make(&repo, &entries); + make(&swarms, &entries); - let mut downloaded = repo.get_aggregate_swarm_metadata().total_downloaded; + let mut downloaded = swarms.get_aggregate_swarm_metadata().total_downloaded; persistent_torrents.iter().for_each(|(_, d)| downloaded += u64::from(*d)); - repo.import_persistent(&persistent_torrents); + swarms.import_persistent(&persistent_torrents); - assert_eq!(repo.get_aggregate_swarm_metadata().total_downloaded, downloaded); + assert_eq!(swarms.get_aggregate_swarm_metadata().total_downloaded, downloaded); for (entry, _) in persistent_torrents { - assert!(repo.get(&entry).is_some()); + assert!(swarms.get(&entry).is_some()); } } @@ -371,21 +371,24 @@ async fn it_should_import_persistent_torrents( #[case::out_of_order(many_out_of_order())] #[case::in_order(many_hashed_in_order())] #[tokio::test] -async fn it_should_remove_an_entry(#[values(skip_list_mutex_std())] repo: Swarms, #[case] entries: Entries) { - make(&repo, &entries); +async fn it_should_remove_an_entry(#[values(swarms())] swarms: Swarms, #[case] entries: Entries) { + make(&swarms, &entries); for (info_hash, torrent) in entries { assert_eq!( - Some(repo.get(&info_hash).unwrap().lock_or_panic().clone()), + Some(swarms.get(&info_hash).unwrap().lock_or_panic().clone()), Some(torrent.clone()) ); - assert_eq!(Some(repo.remove(&info_hash).unwrap().lock_or_panic().clone()), Some(torrent)); + assert_eq!( + Some(swarms.remove(&info_hash).unwrap().lock_or_panic().clone()), + Some(torrent) + ); - assert!(repo.get(&info_hash).is_none()); - assert!(repo.remove(&info_hash).is_none()); + assert!(swarms.get(&info_hash).is_none()); + assert!(swarms.remove(&info_hash).is_none()); } - assert_eq!(repo.get_aggregate_swarm_metadata().total_torrents, 0); + assert_eq!(swarms.get_aggregate_swarm_metadata().total_torrents, 0); } #[rstest] @@ -398,7 +401,7 @@ async fn it_should_remove_an_entry(#[values(skip_list_mutex_std())] repo: Swarms #[case::out_of_order(many_out_of_order())] #[case::in_order(many_hashed_in_order())] #[tokio::test] -async fn it_should_remove_inactive_peers(#[values(skip_list_mutex_std())] repo: Swarms, #[case] entries: Entries) { +async fn it_should_remove_inactive_peers(#[values(swarms())] swarms: Swarms, #[case] entries: Entries) { use std::ops::Sub as _; use std::time::Duration; @@ -411,7 +414,7 @@ async fn it_should_remove_inactive_peers(#[values(skip_list_mutex_std())] repo: const TIMEOUT: Duration = Duration::from_secs(120); const EXPIRE: Duration = Duration::from_secs(121); - make(&repo, &entries); + make(&swarms, &entries); let info_hash: InfoHash; let mut peer: peer::Peer; @@ -435,15 +438,15 @@ async fn it_should_remove_inactive_peers(#[values(skip_list_mutex_std())] repo: // Insert the infohash and peer into the repository // and verify there is an extra torrent entry. { - repo.upsert_peer(&info_hash, &peer, None); - assert_eq!(repo.get_aggregate_swarm_metadata().total_torrents, entries.len() as u64 + 1); + swarms.upsert_peer(&info_hash, &peer, None); + assert_eq!(swarms.get_aggregate_swarm_metadata().total_torrents, entries.len() as u64 + 1); } // Insert the infohash and peer into the repository // and verify the swarm metadata was updated. { - repo.upsert_peer(&info_hash, &peer, None); - let stats = repo.get_swarm_metadata(&info_hash); + swarms.upsert_peer(&info_hash, &peer, None); + let stats = swarms.get_swarm_metadata(&info_hash); assert_eq!( stats, Some(SwarmMetadata { @@ -456,19 +459,19 @@ async fn it_should_remove_inactive_peers(#[values(skip_list_mutex_std())] repo: // Verify that this new peer was inserted into the repository. { - let lock_tracked_torrent = repo.get(&info_hash).expect("it_should_get_some"); + let lock_tracked_torrent = swarms.get(&info_hash).expect("it_should_get_some"); let entry = lock_tracked_torrent.lock_or_panic(); assert!(entry.peers(None).contains(&peer.into())); } // Remove peers that have not been updated since the timeout (120 seconds ago). { - repo.remove_inactive_peers(CurrentClock::now_sub(&TIMEOUT).expect("it should get a time passed")); + swarms.remove_inactive_peers(CurrentClock::now_sub(&TIMEOUT).expect("it should get a time passed")); } // Verify that the this peer was removed from the repository. { - let lock_tracked_torrent = repo.get(&info_hash).expect("it_should_get_some"); + let lock_tracked_torrent = swarms.get(&info_hash).expect("it_should_get_some"); let entry = lock_tracked_torrent.lock_or_panic(); assert!(!entry.peers(None).contains(&peer.into())); } @@ -485,15 +488,15 @@ async fn it_should_remove_inactive_peers(#[values(skip_list_mutex_std())] repo: #[case::in_order(many_hashed_in_order())] #[tokio::test] async fn it_should_remove_peerless_torrents( - #[values(skip_list_mutex_std())] repo: Swarms, + #[values(swarms())] swarms: Swarms, #[case] entries: Entries, #[values(policy_none(), policy_persist(), policy_remove(), policy_remove_persist())] policy: TrackerPolicy, ) { - make(&repo, &entries); + make(&swarms, &entries); - repo.remove_peerless_torrents(&policy); + swarms.remove_peerless_torrents(&policy); - let torrents: Vec<(InfoHash, Swarm)> = repo + let torrents: Vec<(InfoHash, Swarm)> = swarms .get_paginated(None) .iter() .map(|(i, lock_tracked_torrent)| (*i, lock_tracked_torrent.lock_or_panic().clone())) diff --git a/packages/tracker-core/src/torrent/repository/in_memory.rs b/packages/tracker-core/src/torrent/repository/in_memory.rs index 67e532e86..5902f6735 100644 --- a/packages/tracker-core/src/torrent/repository/in_memory.rs +++ b/packages/tracker-core/src/torrent/repository/in_memory.rs @@ -20,8 +20,8 @@ use torrust_tracker_torrent_repository::{SwarmHandle, Swarms}; /// used in production. Other implementations are kept for reference. #[derive(Debug, Default)] pub struct InMemoryTorrentRepository { - /// The underlying in-memory data structure that stores torrent entries. - torrents: Arc, + /// The underlying in-memory data structure that stores swarms data. + swarms: Arc, } impl InMemoryTorrentRepository { @@ -46,7 +46,7 @@ impl InMemoryTorrentRepository { peer: &peer::Peer, opt_persistent_torrent: Option, ) -> bool { - self.torrents.upsert_peer(info_hash, peer, opt_persistent_torrent) + self.swarms.upsert_peer(info_hash, peer, opt_persistent_torrent) } /// Removes a torrent entry from the repository. @@ -65,7 +65,7 @@ impl InMemoryTorrentRepository { #[cfg(test)] #[must_use] pub(crate) fn remove(&self, key: &InfoHash) -> Option { - self.torrents.remove(key) + self.swarms.remove(key) } /// Removes inactive peers from all torrent entries. @@ -78,7 +78,7 @@ impl InMemoryTorrentRepository { /// * `current_cutoff` - The cutoff timestamp; peers not updated since this /// time will be removed. pub(crate) fn remove_inactive_peers(&self, current_cutoff: DurationSinceUnixEpoch) { - self.torrents.remove_inactive_peers(current_cutoff); + self.swarms.remove_inactive_peers(current_cutoff); } /// Removes torrent entries that have no active peers. @@ -91,7 +91,7 @@ impl InMemoryTorrentRepository { /// * `policy` - The tracker policy containing the configuration for /// removing peerless torrents. pub(crate) fn remove_peerless_torrents(&self, policy: &TrackerPolicy) { - self.torrents.remove_peerless_torrents(policy); + self.swarms.remove_peerless_torrents(policy); } /// Retrieves a torrent entry by its infohash. @@ -105,7 +105,7 @@ impl InMemoryTorrentRepository { /// An `Option` containing the torrent entry if found. #[must_use] pub(crate) fn get(&self, key: &InfoHash) -> Option { - self.torrents.get(key) + self.swarms.get(key) } /// Retrieves a paginated list of torrent entries. @@ -123,7 +123,7 @@ impl InMemoryTorrentRepository { /// A vector of `(InfoHash, TorrentEntry)` tuples. #[must_use] pub(crate) fn get_paginated(&self, pagination: Option<&Pagination>) -> Vec<(InfoHash, SwarmHandle)> { - self.torrents.get_paginated(pagination) + self.swarms.get_paginated(pagination) } /// Retrieves swarm metadata for a given torrent. @@ -141,7 +141,7 @@ impl InMemoryTorrentRepository { /// A `SwarmMetadata` struct containing the aggregated torrent data. #[must_use] pub(crate) fn get_swarm_metadata_or_default(&self, info_hash: &InfoHash) -> SwarmMetadata { - self.torrents.get_swarm_metadata_or_default(info_hash) + self.swarms.get_swarm_metadata_or_default(info_hash) } /// Retrieves torrent peers for a given torrent and client, excluding the @@ -163,7 +163,7 @@ impl InMemoryTorrentRepository { /// the torrent, excluding the requesting client. #[must_use] pub(crate) fn get_peers_for(&self, info_hash: &InfoHash, peer: &peer::Peer, limit: usize) -> Vec> { - self.torrents.get_peers_for(info_hash, peer, max(limit, TORRENT_PEERS_LIMIT)) + self.swarms.get_peers_for(info_hash, peer, max(limit, TORRENT_PEERS_LIMIT)) } /// Retrieves the list of peers for a given torrent. @@ -186,7 +186,7 @@ impl InMemoryTorrentRepository { #[must_use] pub fn get_torrent_peers(&self, info_hash: &InfoHash) -> Vec> { // todo: pass the limit as an argument like `get_peers_for` - self.torrents.get_torrent_peers(info_hash, TORRENT_PEERS_LIMIT) + self.swarms.get_torrent_peers(info_hash, TORRENT_PEERS_LIMIT) } /// Calculates and returns overall torrent metrics. @@ -200,7 +200,7 @@ impl InMemoryTorrentRepository { /// A [`AggregateSwarmMetadata`] struct with the aggregated metrics. #[must_use] pub fn get_aggregate_swarm_metadata(&self) -> AggregateSwarmMetadata { - self.torrents.get_aggregate_swarm_metadata() + self.swarms.get_aggregate_swarm_metadata() } /// Imports persistent torrent data into the in-memory repository. @@ -212,6 +212,6 @@ impl InMemoryTorrentRepository { /// /// * `persistent_torrents` - A reference to the persisted torrent data. pub fn import_persistent(&self, persistent_torrents: &PersistentTorrents) { - self.torrents.import_persistent(persistent_torrents); + self.swarms.import_persistent(persistent_torrents); } } From 6d50fa083cd334bfc1f23a96d3754e98ed6ae51b Mon Sep 17 00:00:00 2001 From: Jose Celano Date: Wed, 7 May 2025 11:45:50 +0100 Subject: [PATCH 578/802] refactor: [#1495] remove panics from Swarms type They have been moved one level up to the InMemoryTorrentRepository type. We should buble them up to the final user, returing an error in the UDP or HTTP tracker when the swarm handle lock cannot be adquired. A new issues will be opened to address that. --- Cargo.lock | 2 +- packages/torrent-repository/Cargo.toml | 2 +- packages/torrent-repository/src/lib.rs | 2 +- packages/torrent-repository/src/swarms.rs | 189 +++++++++++------- .../torrent-repository/tests/swarms/mod.rs | 25 ++- .../src/torrent/repository/in_memory.rs | 54 ++++- 6 files changed, 182 insertions(+), 92 deletions(-) diff --git a/Cargo.lock b/Cargo.lock index eea957f88..093b8e9b0 100644 --- a/Cargo.lock +++ b/Cargo.lock @@ -4850,12 +4850,12 @@ dependencies = [ "crossbeam-skiplist", "rand 0.9.1", "rstest", + "thiserror 2.0.12", "tokio", "torrust-tracker-clock", "torrust-tracker-configuration", "torrust-tracker-primitives", "torrust-tracker-test-helpers", - "tracing", ] [[package]] diff --git a/packages/torrent-repository/Cargo.toml b/packages/torrent-repository/Cargo.toml index e584fadf4..2cc02a720 100644 --- a/packages/torrent-repository/Cargo.toml +++ b/packages/torrent-repository/Cargo.toml @@ -19,11 +19,11 @@ version.workspace = true aquatic_udp_protocol = "0" bittorrent-primitives = "0.1.0" crossbeam-skiplist = "0" +thiserror = "2.0.12" tokio = { version = "1", features = ["macros", "net", "rt-multi-thread", "signal", "sync"] } torrust-tracker-clock = { version = "3.0.0-develop", path = "../clock" } torrust-tracker-configuration = { version = "3.0.0-develop", path = "../configuration" } torrust-tracker-primitives = { version = "3.0.0-develop", path = "../primitives" } -tracing = "0" [dev-dependencies] async-std = { version = "1", features = ["attributes", "tokio1"] } diff --git a/packages/torrent-repository/src/lib.rs b/packages/torrent-repository/src/lib.rs index c985f7a2b..a4e7d9c5d 100644 --- a/packages/torrent-repository/src/lib.rs +++ b/packages/torrent-repository/src/lib.rs @@ -23,7 +23,7 @@ pub trait LockTrackedTorrent { fn lock_or_panic(&self) -> MutexGuard<'_, Swarm>; } -impl LockTrackedTorrent for Arc> { +impl LockTrackedTorrent for SwarmHandle { fn lock_or_panic(&self) -> MutexGuard<'_, Swarm> { self.lock().expect("can't acquire lock for tracked torrent handle") } diff --git a/packages/torrent-repository/src/swarms.rs b/packages/torrent-repository/src/swarms.rs index 936f49d22..222bea60a 100644 --- a/packages/torrent-repository/src/swarms.rs +++ b/packages/torrent-repository/src/swarms.rs @@ -8,7 +8,7 @@ use torrust_tracker_primitives::swarm_metadata::{AggregateSwarmMetadata, SwarmMe use torrust_tracker_primitives::{peer, DurationSinceUnixEpoch, PersistentTorrent, PersistentTorrents}; use crate::swarm::Swarm; -use crate::{LockTrackedTorrent, SwarmHandle}; +use crate::SwarmHandle; #[derive(Default, Debug)] pub struct Swarms { @@ -34,33 +34,31 @@ impl Swarms { /// Returns `true` if the number of downloads was increased because the peer /// completed the download. /// - /// # Panics + /// # Errors /// - /// This function panics if the lock for the entry cannot be obtained. + /// This function panics if the lock for the swarm handle cannot be acquired. pub fn upsert_peer( &self, info_hash: &InfoHash, peer: &peer::Peer, opt_persistent_torrent: Option, - ) -> bool { - if let Some(existing_entry) = self.swarms.get(info_hash) { - tracing::debug!("Torrent already exists: {:?}", info_hash); + ) -> Result { + if let Some(existing_swarm_handle) = self.swarms.get(info_hash) { + let mut swarm = existing_swarm_handle.value().lock()?; - existing_entry.value().lock_or_panic().handle_announcement(peer) + Ok(swarm.handle_announcement(peer)) } else { - tracing::debug!("Inserting new torrent: {:?}", info_hash); - - let new_entry = if let Some(number_of_downloads) = opt_persistent_torrent { + let new_swarm_handle = if let Some(number_of_downloads) = opt_persistent_torrent { SwarmHandle::new(Swarm::new(number_of_downloads).into()) } else { SwarmHandle::default() }; - let inserted_entry = self.swarms.get_or_insert(*info_hash, new_entry); + let inserted_swarm_handle = self.swarms.get_or_insert(*info_hash, new_swarm_handle); - let mut torrent_guard = inserted_entry.value().lock_or_panic(); + let mut swarm = inserted_swarm_handle.value().lock()?; - torrent_guard.handle_announcement(peer) + Ok(swarm.handle_announcement(peer)) } } @@ -79,13 +77,17 @@ impl Swarms { /// A peer is considered inactive if its last update timestamp is older than /// the provided cutoff time. /// - /// # Panics + /// # Errors /// - /// This function panics if the lock for the entry cannot be obtained. - pub fn remove_inactive_peers(&self, current_cutoff: DurationSinceUnixEpoch) { - for entry in &self.swarms { - entry.value().lock_or_panic().remove_inactive(current_cutoff); + /// This function returns an error if it fails to acquire the lock for any + /// swarm handle. + pub fn remove_inactive_peers(&self, current_cutoff: DurationSinceUnixEpoch) -> Result<(), Error> { + for swarm_handle in &self.swarms { + let mut swarm = swarm_handle.value().lock()?; + swarm.remove_inactive(current_cutoff); } + + Ok(()) } /// Retrieves a tracked torrent handle by its infohash. @@ -132,14 +134,17 @@ impl Swarms { /// /// A `SwarmMetadata` struct containing the aggregated torrent data if found. /// - /// # Panics + /// # Errors /// - /// This function panics if the lock for the entry cannot be obtained. - #[must_use] - pub fn get_swarm_metadata(&self, info_hash: &InfoHash) -> Option { - self.swarms - .get(info_hash) - .map(|entry| entry.value().lock_or_panic().metadata()) + /// This function panics if the lock for the swarm handle cannot be acquired. + pub fn get_swarm_metadata(&self, info_hash: &InfoHash) -> Result, Error> { + match self.swarms.get(info_hash) { + None => Ok(None), + Some(swarm_handle) => { + let swarm = swarm_handle.value().lock()?; + Ok(Some(swarm.metadata())) + } + } } /// Retrieves swarm metadata for a given torrent. @@ -148,11 +153,16 @@ impl Swarms { /// /// A `SwarmMetadata` struct containing the aggregated torrent data if it's /// found or a zeroed metadata struct if not. - #[must_use] - pub fn get_swarm_metadata_or_default(&self, info_hash: &InfoHash) -> SwarmMetadata { + /// + /// # Errors + /// + /// This function returns an error if it fails to acquire the lock for the + /// swarm handle. + pub fn get_swarm_metadata_or_default(&self, info_hash: &InfoHash) -> Result { match self.get_swarm_metadata(info_hash) { - Some(swarm_metadata) => swarm_metadata, - None => SwarmMetadata::zeroed(), + Ok(Some(swarm_metadata)) => Ok(swarm_metadata), + Ok(None) => Ok(SwarmMetadata::zeroed()), + Err(err) => Err(err), } } @@ -168,14 +178,17 @@ impl Swarms { /// A vector of peers (wrapped in `Arc`) representing the active peers for /// the torrent, excluding the requesting client. /// - /// # Panics + /// # Errors /// - /// This function panics if the lock for the torrent entry cannot be obtained. - #[must_use] - pub fn get_peers_for(&self, info_hash: &InfoHash, peer: &peer::Peer, limit: usize) -> Vec> { + /// This function returns an error if it fails to acquire the lock for the + /// swarm handle. + pub fn get_peers_for(&self, info_hash: &InfoHash, peer: &peer::Peer, limit: usize) -> Result>, Error> { match self.get(info_hash) { - None => vec![], - Some(entry) => entry.lock_or_panic().peers_excluding(&peer.peer_addr, Some(limit)), + None => Ok(vec![]), + Some(swarm_handle) => { + let swarm = swarm_handle.lock()?; + Ok(swarm.peers_excluding(&peer.peer_addr, Some(limit))) + } } } @@ -189,14 +202,17 @@ impl Swarms { /// A vector of peers (wrapped in `Arc`) representing the active peers for /// the torrent. /// - /// # Panics + /// # Errors /// - /// This function panics if the lock for the torrent entry cannot be obtained. - #[must_use] - pub fn get_torrent_peers(&self, info_hash: &InfoHash, limit: usize) -> Vec> { + /// This function returns an error if it fails to acquire the lock for the + /// swarm handle. + pub fn get_torrent_peers(&self, info_hash: &InfoHash, limit: usize) -> Result>, Error> { match self.get(info_hash) { - None => vec![], - Some(entry) => entry.lock_or_panic().peers(Some(limit)), + None => Ok(vec![]), + Some(swarm_handle) => { + let swarm = swarm_handle.lock()?; + Ok(swarm.peers(Some(limit))) + } } } @@ -205,17 +221,22 @@ impl Swarms { /// Depending on the tracker policy, torrents without any peers may be /// removed to conserve memory. /// - /// # Panics + /// # Errors /// - /// This function panics if the lock for the entry cannot be obtained. - pub fn remove_peerless_torrents(&self, policy: &TrackerPolicy) { - for entry in &self.swarms { - if entry.value().lock_or_panic().meets_retaining_policy(policy) { + /// This function returns an error if it fails to acquire the lock for any + /// swarm handle. + pub fn remove_peerless_torrents(&self, policy: &TrackerPolicy) -> Result<(), Error> { + for swarm_handle in &self.swarms { + let swarm = swarm_handle.value().lock()?; + + if swarm.meets_retaining_policy(policy) { continue; } - entry.remove(); + swarm_handle.remove(); } + + Ok(()) } /// Imports persistent torrent data into the in-memory repository. @@ -247,22 +268,35 @@ impl Swarms { /// /// A [`AggregateSwarmMetadata`] struct with the aggregated metrics. /// - /// # Panics + /// # Errors /// - /// This function panics if the lock for the entry cannot be obtained. - #[must_use] - pub fn get_aggregate_swarm_metadata(&self) -> AggregateSwarmMetadata { + /// This function returns an error if it fails to acquire the lock for any + /// swarm handle. + pub fn get_aggregate_swarm_metadata(&self) -> Result { let mut metrics = AggregateSwarmMetadata::default(); for entry in &self.swarms { - let stats = entry.value().lock_or_panic().metadata(); + let swarm = entry.value().lock()?; + let stats = swarm.metadata(); metrics.total_complete += u64::from(stats.complete); metrics.total_downloaded += u64::from(stats.downloaded); metrics.total_incomplete += u64::from(stats.incomplete); metrics.total_torrents += 1; } - metrics + Ok(metrics) + } +} + +#[derive(thiserror::Error, Debug, Clone)] +pub enum Error { + #[error("Can't acquire swarm lock")] + CannotAcquireSwarmLock, +} + +impl From>> for Error { + fn from(_error: std::sync::PoisonError>) -> Self { + Error::CannotAcquireSwarmLock } } @@ -354,7 +388,7 @@ mod tests { let _number_of_downloads_increased = torrent_repository.upsert_peer(&info_hash, &peer, None); - let peers = torrent_repository.get_torrent_peers(&info_hash, 74); + let peers = torrent_repository.get_torrent_peers(&info_hash, 74).unwrap(); assert_eq!(peers, vec![Arc::new(peer)]); } @@ -363,7 +397,7 @@ mod tests { async fn it_should_return_an_empty_list_or_peers_for_a_non_existing_torrent() { let torrent_repository = Arc::new(Swarms::default()); - let peers = torrent_repository.get_torrent_peers(&sample_info_hash(), 74); + let peers = torrent_repository.get_torrent_peers(&sample_info_hash(), 74).unwrap(); assert!(peers.is_empty()); } @@ -388,7 +422,7 @@ mod tests { let _number_of_downloads_increased = torrent_repository.upsert_peer(&info_hash, &peer, None); } - let peers = torrent_repository.get_torrent_peers(&info_hash, 74); + let peers = torrent_repository.get_torrent_peers(&info_hash, 74).unwrap(); assert_eq!(peers.len(), 74); } @@ -411,7 +445,9 @@ mod tests { async fn it_should_return_an_empty_peer_list_for_a_non_existing_torrent() { let torrent_repository = Arc::new(Swarms::default()); - let peers = torrent_repository.get_peers_for(&sample_info_hash(), &sample_peer(), TORRENT_PEERS_LIMIT); + let peers = torrent_repository + .get_peers_for(&sample_info_hash(), &sample_peer(), TORRENT_PEERS_LIMIT) + .unwrap(); assert_eq!(peers, vec![]); } @@ -425,7 +461,9 @@ mod tests { let _number_of_downloads_increased = torrent_repository.upsert_peer(&info_hash, &peer, None); - let peers = torrent_repository.get_peers_for(&info_hash, &peer, TORRENT_PEERS_LIMIT); + let peers = torrent_repository + .get_peers_for(&info_hash, &peer, TORRENT_PEERS_LIMIT) + .unwrap(); assert_eq!(peers, vec![]); } @@ -455,7 +493,9 @@ mod tests { let _number_of_downloads_increased = torrent_repository.upsert_peer(&info_hash, &peer, None); } - let peers = torrent_repository.get_peers_for(&info_hash, &excluded_peer, TORRENT_PEERS_LIMIT); + let peers = torrent_repository + .get_peers_for(&info_hash, &excluded_peer, TORRENT_PEERS_LIMIT) + .unwrap(); assert_eq!(peers.len(), 74); } @@ -498,9 +538,14 @@ mod tests { let _number_of_downloads_increased = torrent_repository.upsert_peer(&info_hash, &peer, None); // Cut off time is 1 second after the peer was updated - torrent_repository.remove_inactive_peers(peer.updated.add(Duration::from_secs(1))); + torrent_repository + .remove_inactive_peers(peer.updated.add(Duration::from_secs(1))) + .unwrap(); - assert!(!torrent_repository.get_torrent_peers(&info_hash, 74).contains(&Arc::new(peer))); + assert!(!torrent_repository + .get_torrent_peers(&info_hash, 74) + .unwrap() + .contains(&Arc::new(peer))); } fn initialize_repository_with_one_torrent_without_peers(info_hash: &InfoHash) -> Arc { @@ -512,7 +557,9 @@ mod tests { let _number_of_downloads_increased = torrent_repository.upsert_peer(info_hash, &peer, None); // Remove the peer - torrent_repository.remove_inactive_peers(peer.updated.add(Duration::from_secs(1))); + torrent_repository + .remove_inactive_peers(peer.updated.add(Duration::from_secs(1))) + .unwrap(); torrent_repository } @@ -528,7 +575,7 @@ mod tests { ..Default::default() }; - torrent_repository.remove_peerless_torrents(&tracker_policy); + torrent_repository.remove_peerless_torrents(&tracker_policy).unwrap(); assert!(torrent_repository.get(&info_hash).is_none()); } @@ -755,7 +802,7 @@ mod tests { async fn it_should_get_empty_aggregate_swarm_metadata_when_there_are_no_torrents() { let torrent_repository = Arc::new(Swarms::default()); - let aggregate_swarm_metadata = torrent_repository.get_aggregate_swarm_metadata(); + let aggregate_swarm_metadata = torrent_repository.get_aggregate_swarm_metadata().unwrap(); assert_eq!( aggregate_swarm_metadata, @@ -774,7 +821,7 @@ mod tests { let _number_of_downloads_increased = torrent_repository.upsert_peer(&sample_info_hash(), &leecher(), None); - let aggregate_swarm_metadata = torrent_repository.get_aggregate_swarm_metadata(); + let aggregate_swarm_metadata = torrent_repository.get_aggregate_swarm_metadata().unwrap(); assert_eq!( aggregate_swarm_metadata, @@ -793,7 +840,7 @@ mod tests { let _number_of_downloads_increased = torrent_repository.upsert_peer(&sample_info_hash(), &seeder(), None); - let aggregate_swarm_metadata = torrent_repository.get_aggregate_swarm_metadata(); + let aggregate_swarm_metadata = torrent_repository.get_aggregate_swarm_metadata().unwrap(); assert_eq!( aggregate_swarm_metadata, @@ -812,7 +859,7 @@ mod tests { let _number_of_downloads_increased = torrent_repository.upsert_peer(&sample_info_hash(), &complete_peer(), None); - let aggregate_swarm_metadata = torrent_repository.get_aggregate_swarm_metadata(); + let aggregate_swarm_metadata = torrent_repository.get_aggregate_swarm_metadata().unwrap(); assert_eq!( aggregate_swarm_metadata, @@ -837,7 +884,7 @@ mod tests { let result_a = start_time.elapsed(); let start_time = std::time::Instant::now(); - let aggregate_swarm_metadata = torrent_repository.get_aggregate_swarm_metadata(); + let aggregate_swarm_metadata = torrent_repository.get_aggregate_swarm_metadata().unwrap(); let result_b = start_time.elapsed(); assert_eq!( @@ -870,7 +917,7 @@ mod tests { let _number_of_downloads_increased = torrent_repository.upsert_peer(&infohash, &leecher(), None); - let swarm_metadata = torrent_repository.get_swarm_metadata_or_default(&infohash); + let swarm_metadata = torrent_repository.get_swarm_metadata_or_default(&infohash).unwrap(); assert_eq!( swarm_metadata, @@ -886,7 +933,7 @@ mod tests { async fn it_should_return_zeroed_swarm_metadata_for_a_non_existing_torrent() { let torrent_repository = Arc::new(Swarms::default()); - let swarm_metadata = torrent_repository.get_swarm_metadata_or_default(&sample_info_hash()); + let swarm_metadata = torrent_repository.get_swarm_metadata_or_default(&sample_info_hash()).unwrap(); assert_eq!(swarm_metadata, SwarmMetadata::zeroed()); } @@ -913,7 +960,7 @@ mod tests { torrent_repository.import_persistent(&persistent_torrents); - let swarm_metadata = torrent_repository.get_swarm_metadata_or_default(&infohash); + let swarm_metadata = torrent_repository.get_swarm_metadata_or_default(&infohash).unwrap(); // Only the number of downloads is persisted. assert_eq!(swarm_metadata.downloaded, 1); diff --git a/packages/torrent-repository/tests/swarms/mod.rs b/packages/torrent-repository/tests/swarms/mod.rs index 20c6255fa..82247bfcb 100644 --- a/packages/torrent-repository/tests/swarms/mod.rs +++ b/packages/torrent-repository/tests/swarms/mod.rs @@ -329,7 +329,7 @@ async fn it_should_get_metrics(#[values(swarms())] swarms: Swarms, #[case] entri metrics.total_downloaded += u64::from(stats.downloaded); } - assert_eq!(swarms.get_aggregate_swarm_metadata(), metrics); + assert_eq!(swarms.get_aggregate_swarm_metadata().unwrap(), metrics); } #[rstest] @@ -349,12 +349,12 @@ async fn it_should_import_persistent_torrents( ) { make(&swarms, &entries); - let mut downloaded = swarms.get_aggregate_swarm_metadata().total_downloaded; + let mut downloaded = swarms.get_aggregate_swarm_metadata().unwrap().total_downloaded; persistent_torrents.iter().for_each(|(_, d)| downloaded += u64::from(*d)); swarms.import_persistent(&persistent_torrents); - assert_eq!(swarms.get_aggregate_swarm_metadata().total_downloaded, downloaded); + assert_eq!(swarms.get_aggregate_swarm_metadata().unwrap().total_downloaded, downloaded); for (entry, _) in persistent_torrents { assert!(swarms.get(&entry).is_some()); @@ -388,7 +388,7 @@ async fn it_should_remove_an_entry(#[values(swarms())] swarms: Swarms, #[case] e assert!(swarms.remove(&info_hash).is_none()); } - assert_eq!(swarms.get_aggregate_swarm_metadata().total_torrents, 0); + assert_eq!(swarms.get_aggregate_swarm_metadata().unwrap().total_torrents, 0); } #[rstest] @@ -438,15 +438,18 @@ async fn it_should_remove_inactive_peers(#[values(swarms())] swarms: Swarms, #[c // Insert the infohash and peer into the repository // and verify there is an extra torrent entry. { - swarms.upsert_peer(&info_hash, &peer, None); - assert_eq!(swarms.get_aggregate_swarm_metadata().total_torrents, entries.len() as u64 + 1); + swarms.upsert_peer(&info_hash, &peer, None).unwrap(); + assert_eq!( + swarms.get_aggregate_swarm_metadata().unwrap().total_torrents, + entries.len() as u64 + 1 + ); } // Insert the infohash and peer into the repository // and verify the swarm metadata was updated. { - swarms.upsert_peer(&info_hash, &peer, None); - let stats = swarms.get_swarm_metadata(&info_hash); + swarms.upsert_peer(&info_hash, &peer, None).unwrap(); + let stats = swarms.get_swarm_metadata(&info_hash).unwrap(); assert_eq!( stats, Some(SwarmMetadata { @@ -466,7 +469,9 @@ async fn it_should_remove_inactive_peers(#[values(swarms())] swarms: Swarms, #[c // Remove peers that have not been updated since the timeout (120 seconds ago). { - swarms.remove_inactive_peers(CurrentClock::now_sub(&TIMEOUT).expect("it should get a time passed")); + swarms + .remove_inactive_peers(CurrentClock::now_sub(&TIMEOUT).expect("it should get a time passed")) + .unwrap(); } // Verify that the this peer was removed from the repository. @@ -494,7 +499,7 @@ async fn it_should_remove_peerless_torrents( ) { make(&swarms, &entries); - swarms.remove_peerless_torrents(&policy); + swarms.remove_peerless_torrents(&policy).unwrap(); let torrents: Vec<(InfoHash, Swarm)> = swarms .get_paginated(None) diff --git a/packages/tracker-core/src/torrent/repository/in_memory.rs b/packages/tracker-core/src/torrent/repository/in_memory.rs index 5902f6735..8c93f3605 100644 --- a/packages/tracker-core/src/torrent/repository/in_memory.rs +++ b/packages/tracker-core/src/torrent/repository/in_memory.rs @@ -39,6 +39,10 @@ impl InMemoryTorrentRepository { /// # Returns /// /// `true` if the peer stats were updated. + /// + /// # Panics + /// + /// This function panics if the underling swarms return an error. #[must_use] pub fn upsert_peer( &self, @@ -46,7 +50,9 @@ impl InMemoryTorrentRepository { peer: &peer::Peer, opt_persistent_torrent: Option, ) -> bool { - self.swarms.upsert_peer(info_hash, peer, opt_persistent_torrent) + self.swarms + .upsert_peer(info_hash, peer, opt_persistent_torrent) + .expect("Failed to upsert the peer in swarms") } /// Removes a torrent entry from the repository. @@ -77,8 +83,14 @@ impl InMemoryTorrentRepository { /// /// * `current_cutoff` - The cutoff timestamp; peers not updated since this /// time will be removed. + /// + /// # Panics + /// + /// This function panics if the underling swarms return an error. pub(crate) fn remove_inactive_peers(&self, current_cutoff: DurationSinceUnixEpoch) { - self.swarms.remove_inactive_peers(current_cutoff); + self.swarms + .remove_inactive_peers(current_cutoff) + .expect("Failed to remove inactive peers from swarms"); } /// Removes torrent entries that have no active peers. @@ -90,8 +102,14 @@ impl InMemoryTorrentRepository { /// /// * `policy` - The tracker policy containing the configuration for /// removing peerless torrents. + /// + /// # Panics + /// + /// This function panics if the underling swarms return an error. pub(crate) fn remove_peerless_torrents(&self, policy: &TrackerPolicy) { - self.swarms.remove_peerless_torrents(policy); + self.swarms + .remove_peerless_torrents(policy) + .expect("Failed to remove peerless torrents from swarms"); } /// Retrieves a torrent entry by its infohash. @@ -139,9 +157,15 @@ impl InMemoryTorrentRepository { /// # Returns /// /// A `SwarmMetadata` struct containing the aggregated torrent data. + /// + /// # Panics + /// + /// This function panics if the underling swarms return an error.s #[must_use] pub(crate) fn get_swarm_metadata_or_default(&self, info_hash: &InfoHash) -> SwarmMetadata { - self.swarms.get_swarm_metadata_or_default(info_hash) + self.swarms + .get_swarm_metadata_or_default(info_hash) + .expect("Failed to get swarm metadata") } /// Retrieves torrent peers for a given torrent and client, excluding the @@ -161,9 +185,15 @@ impl InMemoryTorrentRepository { /// /// A vector of peers (wrapped in `Arc`) representing the active peers for /// the torrent, excluding the requesting client. + /// + /// # Panics + /// + /// This function panics if the underling swarms return an error. #[must_use] pub(crate) fn get_peers_for(&self, info_hash: &InfoHash, peer: &peer::Peer, limit: usize) -> Vec> { - self.swarms.get_peers_for(info_hash, peer, max(limit, TORRENT_PEERS_LIMIT)) + self.swarms + .get_peers_for(info_hash, peer, max(limit, TORRENT_PEERS_LIMIT)) + .expect("Failed to get other peers in swarm") } /// Retrieves the list of peers for a given torrent. @@ -182,11 +212,13 @@ impl InMemoryTorrentRepository { /// /// # Panics /// - /// This function panics if the lock for the torrent entry cannot be obtained. + /// This function panics if the underling swarms return an error. #[must_use] pub fn get_torrent_peers(&self, info_hash: &InfoHash) -> Vec> { // todo: pass the limit as an argument like `get_peers_for` - self.swarms.get_torrent_peers(info_hash, TORRENT_PEERS_LIMIT) + self.swarms + .get_torrent_peers(info_hash, TORRENT_PEERS_LIMIT) + .expect("Failed to get other peers in swarm") } /// Calculates and returns overall torrent metrics. @@ -198,9 +230,15 @@ impl InMemoryTorrentRepository { /// # Returns /// /// A [`AggregateSwarmMetadata`] struct with the aggregated metrics. + /// + /// # Panics + /// + /// This function panics if the underling swarms return an error. #[must_use] pub fn get_aggregate_swarm_metadata(&self) -> AggregateSwarmMetadata { - self.swarms.get_aggregate_swarm_metadata() + self.swarms + .get_aggregate_swarm_metadata() + .expect("Failed to get aggregate swarm metadata") } /// Imports persistent torrent data into the in-memory repository. From 31f1fbf32216fbb1f1fc43c5c103af44e25bb462 Mon Sep 17 00:00:00 2001 From: Jose Celano Date: Wed, 7 May 2025 12:07:15 +0100 Subject: [PATCH 579/802] refactgor: [#1495] make field private It was public only to allow setting a pre-defined state in tests. A new public method have been adding temporarily to explain its usage. --- packages/torrent-repository/src/swarms.rs | 14 +++++++++++--- packages/torrent-repository/tests/swarms/mod.rs | 9 +++------ 2 files changed, 14 insertions(+), 9 deletions(-) diff --git a/packages/torrent-repository/src/swarms.rs b/packages/torrent-repository/src/swarms.rs index 222bea60a..34cd52d3b 100644 --- a/packages/torrent-repository/src/swarms.rs +++ b/packages/torrent-repository/src/swarms.rs @@ -1,4 +1,4 @@ -use std::sync::Arc; +use std::sync::{Arc, Mutex}; use bittorrent_primitives::info_hash::InfoHash; use crossbeam_skiplist::SkipMap; @@ -12,8 +12,7 @@ use crate::SwarmHandle; #[derive(Default, Debug)] pub struct Swarms { - // todo: this needs to be public only to insert a peerless torrent (empty swarm). - pub swarms: SkipMap, + swarms: SkipMap, } impl Swarms { @@ -62,6 +61,15 @@ impl Swarms { } } + /// Inserts a new swarm. It's only used for testing purposes. It allows to + /// pre-define the initial state of the swarm without having to go through + /// the upsert process. + pub fn insert_swarm(&self, info_hash: &InfoHash, swarm: Swarm) { + // code-review: swarms builder? + let swarm_handle = Arc::new(Mutex::new(swarm)); + self.swarms.insert(*info_hash, swarm_handle); + } + /// Removes a torrent entry from the repository. /// /// # Returns diff --git a/packages/torrent-repository/tests/swarms/mod.rs b/packages/torrent-repository/tests/swarms/mod.rs index 82247bfcb..43571eb83 100644 --- a/packages/torrent-repository/tests/swarms/mod.rs +++ b/packages/torrent-repository/tests/swarms/mod.rs @@ -1,6 +1,5 @@ use std::collections::{BTreeMap, HashSet}; use std::hash::{DefaultHasher, Hash, Hasher}; -use std::sync::{Arc, Mutex}; use aquatic_udp_protocol::{AnnounceEvent, NumberOfBytes}; use bittorrent_primitives::info_hash::InfoHash; @@ -148,11 +147,9 @@ fn persistent_three() -> PersistentTorrents { t.iter().copied().collect() } -fn make(repo: &Swarms, entries: &Entries) { - for (info_hash, entry) in entries { - let new = Arc::new(Mutex::new(entry.clone())); - // todo: use a public method to insert an empty swarm. - repo.swarms.insert(*info_hash, new); +fn make(swarms: &Swarms, entries: &Entries) { + for (info_hash, swarm) in entries { + swarms.insert_swarm(info_hash, swarm.clone()); } } From 5c2c1e0f77c767a945823fb3abf7091caeb17129 Mon Sep 17 00:00:00 2001 From: Jose Celano Date: Wed, 7 May 2025 12:13:42 +0100 Subject: [PATCH 580/802] feat: [#1495] add len and is_empty methods to Swarms type --- packages/torrent-repository/src/swarms.rs | 10 ++++++++++ 1 file changed, 10 insertions(+) diff --git a/packages/torrent-repository/src/swarms.rs b/packages/torrent-repository/src/swarms.rs index 34cd52d3b..a03b9d7e6 100644 --- a/packages/torrent-repository/src/swarms.rs +++ b/packages/torrent-repository/src/swarms.rs @@ -294,6 +294,16 @@ impl Swarms { Ok(metrics) } + + #[must_use] + pub fn len(&self) -> usize { + self.swarms.len() + } + + #[must_use] + pub fn is_empty(&self) -> bool { + self.swarms.is_empty() + } } #[derive(thiserror::Error, Debug, Clone)] From 5b3142f6bae735750aa0ebead74b3587bb441f01 Mon Sep 17 00:00:00 2001 From: Jose Celano Date: Wed, 7 May 2025 12:17:35 +0100 Subject: [PATCH 581/802] refactor: [#1495] refactor Swarms::upsert_peer --- packages/torrent-repository/src/swarms.rs | 20 +++++++------------- 1 file changed, 7 insertions(+), 13 deletions(-) diff --git a/packages/torrent-repository/src/swarms.rs b/packages/torrent-repository/src/swarms.rs index a03b9d7e6..fb6652ba5 100644 --- a/packages/torrent-repository/src/swarms.rs +++ b/packages/torrent-repository/src/swarms.rs @@ -42,23 +42,17 @@ impl Swarms { peer: &peer::Peer, opt_persistent_torrent: Option, ) -> Result { - if let Some(existing_swarm_handle) = self.swarms.get(info_hash) { - let mut swarm = existing_swarm_handle.value().lock()?; - - Ok(swarm.handle_announcement(peer)) + let swarm_handle = if let Some(number_of_downloads) = opt_persistent_torrent { + SwarmHandle::new(Swarm::new(number_of_downloads).into()) } else { - let new_swarm_handle = if let Some(number_of_downloads) = opt_persistent_torrent { - SwarmHandle::new(Swarm::new(number_of_downloads).into()) - } else { - SwarmHandle::default() - }; + SwarmHandle::default() + }; - let inserted_swarm_handle = self.swarms.get_or_insert(*info_hash, new_swarm_handle); + let swarm_handle = self.swarms.get_or_insert(*info_hash, swarm_handle); - let mut swarm = inserted_swarm_handle.value().lock()?; + let mut swarm = swarm_handle.value().lock()?; - Ok(swarm.handle_announcement(peer)) - } + Ok(swarm.handle_announcement(peer)) } /// Inserts a new swarm. It's only used for testing purposes. It allows to From 4d91738d05cc2220ebdea4cb512badbf1809074f Mon Sep 17 00:00:00 2001 From: Jose Celano Date: Wed, 7 May 2025 12:58:21 +0100 Subject: [PATCH 582/802] refactor: [#1495] renamings in torrent-repository pkg --- packages/torrent-repository/src/swarms.rs | 189 +++++++++--------- .../torrent-repository/tests/swarms/mod.rs | 6 +- .../src/torrent/repository/in_memory.rs | 6 +- 3 files changed, 102 insertions(+), 99 deletions(-) diff --git a/packages/torrent-repository/src/swarms.rs b/packages/torrent-repository/src/swarms.rs index fb6652ba5..828e8c030 100644 --- a/packages/torrent-repository/src/swarms.rs +++ b/packages/torrent-repository/src/swarms.rs @@ -36,7 +36,7 @@ impl Swarms { /// # Errors /// /// This function panics if the lock for the swarm handle cannot be acquired. - pub fn upsert_peer( + pub fn handle_announcement( &self, info_hash: &InfoHash, peer: &peer::Peer, @@ -55,11 +55,13 @@ impl Swarms { Ok(swarm.handle_announcement(peer)) } - /// Inserts a new swarm. It's only used for testing purposes. It allows to - /// pre-define the initial state of the swarm without having to go through - /// the upsert process. - pub fn insert_swarm(&self, info_hash: &InfoHash, swarm: Swarm) { + /// Inserts a new swarm. + pub fn insert(&self, info_hash: &InfoHash, swarm: Swarm) { // code-review: swarms builder? + // It's only used for testing purposes. It allows to pre-define the + // initial state of the swarm without having to go through the upsert + // process. + let swarm_handle = Arc::new(Mutex::new(swarm)); self.swarms.insert(*info_hash, swarm_handle); } @@ -184,7 +186,12 @@ impl Swarms { /// /// This function returns an error if it fails to acquire the lock for the /// swarm handle. - pub fn get_peers_for(&self, info_hash: &InfoHash, peer: &peer::Peer, limit: usize) -> Result>, Error> { + pub fn get_peers_peers_excluding( + &self, + info_hash: &InfoHash, + peer: &peer::Peer, + limit: usize, + ) -> Result>, Error> { match self.get(info_hash) { None => Ok(vec![]), Some(swarm_handle) => { @@ -208,7 +215,7 @@ impl Swarms { /// /// This function returns an error if it fails to acquire the lock for the /// swarm handle. - pub fn get_torrent_peers(&self, info_hash: &InfoHash, limit: usize) -> Result>, Error> { + pub fn get_swarm_peers(&self, info_hash: &InfoHash, limit: usize) -> Result>, Error> { match self.get(info_hash) { None => Ok(vec![]), Some(swarm_handle) => { @@ -356,25 +363,25 @@ mod tests { #[tokio::test] async fn it_should_add_the_first_peer_to_the_torrent_peer_list() { - let torrent_repository = Arc::new(Swarms::default()); + let swarms = Arc::new(Swarms::default()); let info_hash = sample_info_hash(); - let _number_of_downloads_increased = torrent_repository.upsert_peer(&info_hash, &sample_peer(), None); + let _number_of_downloads_increased = swarms.handle_announcement(&info_hash, &sample_peer(), None); - assert!(torrent_repository.get(&info_hash).is_some()); + assert!(swarms.get(&info_hash).is_some()); } #[tokio::test] async fn it_should_allow_adding_the_same_peer_twice_to_the_torrent_peer_list() { - let torrent_repository = Arc::new(Swarms::default()); + let swarms = Arc::new(Swarms::default()); let info_hash = sample_info_hash(); - let _number_of_downloads_increased = torrent_repository.upsert_peer(&info_hash, &sample_peer(), None); - let _number_of_downloads_increased = torrent_repository.upsert_peer(&info_hash, &sample_peer(), None); + let _number_of_downloads_increased = swarms.handle_announcement(&info_hash, &sample_peer(), None); + let _number_of_downloads_increased = swarms.handle_announcement(&info_hash, &sample_peer(), None); - assert!(torrent_repository.get(&info_hash).is_some()); + assert!(swarms.get(&info_hash).is_some()); } } @@ -393,30 +400,30 @@ mod tests { #[tokio::test] async fn it_should_return_the_peers_for_a_given_torrent() { - let torrent_repository = Arc::new(Swarms::default()); + let swarms = Arc::new(Swarms::default()); let info_hash = sample_info_hash(); let peer = sample_peer(); - let _number_of_downloads_increased = torrent_repository.upsert_peer(&info_hash, &peer, None); + let _number_of_downloads_increased = swarms.handle_announcement(&info_hash, &peer, None); - let peers = torrent_repository.get_torrent_peers(&info_hash, 74).unwrap(); + let peers = swarms.get_swarm_peers(&info_hash, 74).unwrap(); assert_eq!(peers, vec![Arc::new(peer)]); } #[tokio::test] async fn it_should_return_an_empty_list_or_peers_for_a_non_existing_torrent() { - let torrent_repository = Arc::new(Swarms::default()); + let swarms = Arc::new(Swarms::default()); - let peers = torrent_repository.get_torrent_peers(&sample_info_hash(), 74).unwrap(); + let peers = swarms.get_swarm_peers(&sample_info_hash(), 74).unwrap(); assert!(peers.is_empty()); } #[tokio::test] async fn it_should_return_74_peers_at_the_most_for_a_given_torrent() { - let torrent_repository = Arc::new(Swarms::default()); + let swarms = Arc::new(Swarms::default()); let info_hash = sample_info_hash(); @@ -431,10 +438,10 @@ mod tests { event: AnnounceEvent::Completed, }; - let _number_of_downloads_increased = torrent_repository.upsert_peer(&info_hash, &peer, None); + let _number_of_downloads_increased = swarms.handle_announcement(&info_hash, &peer, None); } - let peers = torrent_repository.get_torrent_peers(&info_hash, 74).unwrap(); + let peers = swarms.get_swarm_peers(&info_hash, 74).unwrap(); assert_eq!(peers.len(), 74); } @@ -455,10 +462,10 @@ mod tests { #[tokio::test] async fn it_should_return_an_empty_peer_list_for_a_non_existing_torrent() { - let torrent_repository = Arc::new(Swarms::default()); + let swarms = Arc::new(Swarms::default()); - let peers = torrent_repository - .get_peers_for(&sample_info_hash(), &sample_peer(), TORRENT_PEERS_LIMIT) + let peers = swarms + .get_peers_peers_excluding(&sample_info_hash(), &sample_peer(), TORRENT_PEERS_LIMIT) .unwrap(); assert_eq!(peers, vec![]); @@ -466,15 +473,15 @@ mod tests { #[tokio::test] async fn it_should_return_the_peers_for_a_given_torrent_excluding_a_given_peer() { - let torrent_repository = Arc::new(Swarms::default()); + let swarms = Arc::new(Swarms::default()); let info_hash = sample_info_hash(); let peer = sample_peer(); - let _number_of_downloads_increased = torrent_repository.upsert_peer(&info_hash, &peer, None); + let _number_of_downloads_increased = swarms.handle_announcement(&info_hash, &peer, None); - let peers = torrent_repository - .get_peers_for(&info_hash, &peer, TORRENT_PEERS_LIMIT) + let peers = swarms + .get_peers_peers_excluding(&info_hash, &peer, TORRENT_PEERS_LIMIT) .unwrap(); assert_eq!(peers, vec![]); @@ -482,13 +489,13 @@ mod tests { #[tokio::test] async fn it_should_return_74_peers_at_the_most_for_a_given_torrent_when_it_filters_out_a_given_peer() { - let torrent_repository = Arc::new(Swarms::default()); + let swarms = Arc::new(Swarms::default()); let info_hash = sample_info_hash(); let excluded_peer = sample_peer(); - let _number_of_downloads_increased = torrent_repository.upsert_peer(&info_hash, &excluded_peer, None); + let _number_of_downloads_increased = swarms.handle_announcement(&info_hash, &excluded_peer, None); // Add 74 peers for idx in 2..=75 { @@ -502,11 +509,11 @@ mod tests { event: AnnounceEvent::Completed, }; - let _number_of_downloads_increased = torrent_repository.upsert_peer(&info_hash, &peer, None); + let _number_of_downloads_increased = swarms.handle_announcement(&info_hash, &peer, None); } - let peers = torrent_repository - .get_peers_for(&info_hash, &excluded_peer, TORRENT_PEERS_LIMIT) + let peers = swarms + .get_peers_peers_excluding(&info_hash, &excluded_peer, TORRENT_PEERS_LIMIT) .unwrap(); assert_eq!(peers.len(), 74); @@ -529,67 +536,64 @@ mod tests { #[tokio::test] async fn it_should_remove_a_torrent_entry() { - let torrent_repository = Arc::new(Swarms::default()); + let swarms = Arc::new(Swarms::default()); let info_hash = sample_info_hash(); - let _number_of_downloads_increased = torrent_repository.upsert_peer(&info_hash, &sample_peer(), None); + let _number_of_downloads_increased = swarms.handle_announcement(&info_hash, &sample_peer(), None); - let _unused = torrent_repository.remove(&info_hash); + let _unused = swarms.remove(&info_hash); - assert!(torrent_repository.get(&info_hash).is_none()); + assert!(swarms.get(&info_hash).is_none()); } #[tokio::test] async fn it_should_remove_peers_that_have_not_been_updated_after_a_cutoff_time() { - let torrent_repository = Arc::new(Swarms::default()); + let swarms = Arc::new(Swarms::default()); let info_hash = sample_info_hash(); let mut peer = sample_peer(); peer.updated = DurationSinceUnixEpoch::new(0, 0); - let _number_of_downloads_increased = torrent_repository.upsert_peer(&info_hash, &peer, None); + let _number_of_downloads_increased = swarms.handle_announcement(&info_hash, &peer, None); // Cut off time is 1 second after the peer was updated - torrent_repository + swarms .remove_inactive_peers(peer.updated.add(Duration::from_secs(1))) .unwrap(); - assert!(!torrent_repository - .get_torrent_peers(&info_hash, 74) - .unwrap() - .contains(&Arc::new(peer))); + assert!(!swarms.get_swarm_peers(&info_hash, 74).unwrap().contains(&Arc::new(peer))); } fn initialize_repository_with_one_torrent_without_peers(info_hash: &InfoHash) -> Arc { - let torrent_repository = Arc::new(Swarms::default()); + let swarms = Arc::new(Swarms::default()); // Insert a sample peer for the torrent to force adding the torrent entry let mut peer = sample_peer(); peer.updated = DurationSinceUnixEpoch::new(0, 0); - let _number_of_downloads_increased = torrent_repository.upsert_peer(info_hash, &peer, None); + let _number_of_downloads_increased = swarms.handle_announcement(info_hash, &peer, None); // Remove the peer - torrent_repository + swarms .remove_inactive_peers(peer.updated.add(Duration::from_secs(1))) .unwrap(); - torrent_repository + swarms } #[tokio::test] async fn it_should_remove_torrents_without_peers() { let info_hash = sample_info_hash(); - let torrent_repository = initialize_repository_with_one_torrent_without_peers(&info_hash); + let swarms = initialize_repository_with_one_torrent_without_peers(&info_hash); let tracker_policy = TrackerPolicy { remove_peerless_torrents: true, ..Default::default() }; - torrent_repository.remove_peerless_torrents(&tracker_policy).unwrap(); + swarms.remove_peerless_torrents(&tracker_policy).unwrap(); - assert!(torrent_repository.get(&info_hash).is_none()); + assert!(swarms.get(&info_hash).is_none()); } } mod returning_torrent_entries { @@ -632,14 +636,14 @@ mod tests { #[tokio::test] async fn it_should_return_one_torrent_entry_by_infohash() { - let torrent_repository = Arc::new(Swarms::default()); + let swarms = Arc::new(Swarms::default()); let info_hash = sample_info_hash(); let peer = sample_peer(); - let _number_of_downloads_increased = torrent_repository.upsert_peer(&info_hash, &peer, None); + let _number_of_downloads_increased = swarms.handle_announcement(&info_hash, &peer, None); - let torrent_entry = torrent_repository.get(&info_hash).unwrap(); + let torrent_entry = swarms.get(&info_hash).unwrap(); assert_eq!( TorrentEntryInfo { @@ -666,13 +670,13 @@ mod tests { #[tokio::test] async fn without_pagination() { - let torrent_repository = Arc::new(Swarms::default()); + let swarms = Arc::new(Swarms::default()); let info_hash = sample_info_hash(); let peer = sample_peer(); - let _number_of_downloads_increased = torrent_repository.upsert_peer(&info_hash, &peer, None); + let _number_of_downloads_increased = swarms.handle_announcement(&info_hash, &peer, None); - let torrent_entries = torrent_repository.get_paginated(None); + let torrent_entries = swarms.get_paginated(None); assert_eq!(torrent_entries.len(), 1); @@ -707,20 +711,20 @@ mod tests { #[tokio::test] async fn it_should_return_the_first_page() { - let torrent_repository = Arc::new(Swarms::default()); + let swarms = Arc::new(Swarms::default()); // Insert one torrent entry let info_hash_one = sample_info_hash_one(); let peer_one = sample_peer_one(); - let _number_of_downloads_increased = torrent_repository.upsert_peer(&info_hash_one, &peer_one, None); + let _number_of_downloads_increased = swarms.handle_announcement(&info_hash_one, &peer_one, None); // Insert another torrent entry let info_hash_one = sample_info_hash_alphabetically_ordered_after_sample_info_hash_one(); let peer_two = sample_peer_two(); - let _number_of_downloads_increased = torrent_repository.upsert_peer(&info_hash_one, &peer_two, None); + let _number_of_downloads_increased = swarms.handle_announcement(&info_hash_one, &peer_two, None); // Get only the first page where page size is 1 - let torrent_entries = torrent_repository.get_paginated(Some(&Pagination { offset: 0, limit: 1 })); + let torrent_entries = swarms.get_paginated(Some(&Pagination { offset: 0, limit: 1 })); assert_eq!(torrent_entries.len(), 1); @@ -742,20 +746,20 @@ mod tests { #[tokio::test] async fn it_should_return_the_second_page() { - let torrent_repository = Arc::new(Swarms::default()); + let swarms = Arc::new(Swarms::default()); // Insert one torrent entry let info_hash_one = sample_info_hash_one(); let peer_one = sample_peer_one(); - let _number_of_downloads_increased = torrent_repository.upsert_peer(&info_hash_one, &peer_one, None); + let _number_of_downloads_increased = swarms.handle_announcement(&info_hash_one, &peer_one, None); // Insert another torrent entry let info_hash_one = sample_info_hash_alphabetically_ordered_after_sample_info_hash_one(); let peer_two = sample_peer_two(); - let _number_of_downloads_increased = torrent_repository.upsert_peer(&info_hash_one, &peer_two, None); + let _number_of_downloads_increased = swarms.handle_announcement(&info_hash_one, &peer_two, None); // Get only the first page where page size is 1 - let torrent_entries = torrent_repository.get_paginated(Some(&Pagination { offset: 1, limit: 1 })); + let torrent_entries = swarms.get_paginated(Some(&Pagination { offset: 1, limit: 1 })); assert_eq!(torrent_entries.len(), 1); @@ -777,20 +781,20 @@ mod tests { #[tokio::test] async fn it_should_allow_changing_the_page_size() { - let torrent_repository = Arc::new(Swarms::default()); + let swarms = Arc::new(Swarms::default()); // Insert one torrent entry let info_hash_one = sample_info_hash_one(); let peer_one = sample_peer_one(); - let _number_of_downloads_increased = torrent_repository.upsert_peer(&info_hash_one, &peer_one, None); + let _number_of_downloads_increased = swarms.handle_announcement(&info_hash_one, &peer_one, None); // Insert another torrent entry let info_hash_one = sample_info_hash_alphabetically_ordered_after_sample_info_hash_one(); let peer_two = sample_peer_two(); - let _number_of_downloads_increased = torrent_repository.upsert_peer(&info_hash_one, &peer_two, None); + let _number_of_downloads_increased = swarms.handle_announcement(&info_hash_one, &peer_two, None); // Get only the first page where page size is 1 - let torrent_entries = torrent_repository.get_paginated(Some(&Pagination { offset: 1, limit: 1 })); + let torrent_entries = swarms.get_paginated(Some(&Pagination { offset: 1, limit: 1 })); assert_eq!(torrent_entries.len(), 1); } @@ -812,9 +816,9 @@ mod tests { #[tokio::test] async fn it_should_get_empty_aggregate_swarm_metadata_when_there_are_no_torrents() { - let torrent_repository = Arc::new(Swarms::default()); + let swarms = Arc::new(Swarms::default()); - let aggregate_swarm_metadata = torrent_repository.get_aggregate_swarm_metadata().unwrap(); + let aggregate_swarm_metadata = swarms.get_aggregate_swarm_metadata().unwrap(); assert_eq!( aggregate_swarm_metadata, @@ -829,11 +833,11 @@ mod tests { #[tokio::test] async fn it_should_return_the_aggregate_swarm_metadata_when_there_is_a_leecher() { - let torrent_repository = Arc::new(Swarms::default()); + let swarms = Arc::new(Swarms::default()); - let _number_of_downloads_increased = torrent_repository.upsert_peer(&sample_info_hash(), &leecher(), None); + let _number_of_downloads_increased = swarms.handle_announcement(&sample_info_hash(), &leecher(), None); - let aggregate_swarm_metadata = torrent_repository.get_aggregate_swarm_metadata().unwrap(); + let aggregate_swarm_metadata = swarms.get_aggregate_swarm_metadata().unwrap(); assert_eq!( aggregate_swarm_metadata, @@ -848,11 +852,11 @@ mod tests { #[tokio::test] async fn it_should_return_the_aggregate_swarm_metadata_when_there_is_a_seeder() { - let torrent_repository = Arc::new(Swarms::default()); + let swarms = Arc::new(Swarms::default()); - let _number_of_downloads_increased = torrent_repository.upsert_peer(&sample_info_hash(), &seeder(), None); + let _number_of_downloads_increased = swarms.handle_announcement(&sample_info_hash(), &seeder(), None); - let aggregate_swarm_metadata = torrent_repository.get_aggregate_swarm_metadata().unwrap(); + let aggregate_swarm_metadata = swarms.get_aggregate_swarm_metadata().unwrap(); assert_eq!( aggregate_swarm_metadata, @@ -867,11 +871,11 @@ mod tests { #[tokio::test] async fn it_should_return_the_aggregate_swarm_metadata_when_there_is_a_completed_peer() { - let torrent_repository = Arc::new(Swarms::default()); + let swarms = Arc::new(Swarms::default()); - let _number_of_downloads_increased = torrent_repository.upsert_peer(&sample_info_hash(), &complete_peer(), None); + let _number_of_downloads_increased = swarms.handle_announcement(&sample_info_hash(), &complete_peer(), None); - let aggregate_swarm_metadata = torrent_repository.get_aggregate_swarm_metadata().unwrap(); + let aggregate_swarm_metadata = swarms.get_aggregate_swarm_metadata().unwrap(); assert_eq!( aggregate_swarm_metadata, @@ -886,17 +890,16 @@ mod tests { #[tokio::test] async fn it_should_return_the_aggregate_swarm_metadata_when_there_are_multiple_torrents() { - let torrent_repository = Arc::new(Swarms::default()); + let swarms = Arc::new(Swarms::default()); let start_time = std::time::Instant::now(); for i in 0..1_000_000 { - let _number_of_downloads_increased = - torrent_repository.upsert_peer(&gen_seeded_infohash(&i), &leecher(), None); + let _number_of_downloads_increased = swarms.handle_announcement(&gen_seeded_infohash(&i), &leecher(), None); } let result_a = start_time.elapsed(); let start_time = std::time::Instant::now(); - let aggregate_swarm_metadata = torrent_repository.get_aggregate_swarm_metadata().unwrap(); + let aggregate_swarm_metadata = swarms.get_aggregate_swarm_metadata().unwrap(); let result_b = start_time.elapsed(); assert_eq!( @@ -923,13 +926,13 @@ mod tests { #[tokio::test] async fn it_should_get_swarm_metadata_for_an_existing_torrent() { - let torrent_repository = Arc::new(Swarms::default()); + let swarms = Arc::new(Swarms::default()); let infohash = sample_info_hash(); - let _number_of_downloads_increased = torrent_repository.upsert_peer(&infohash, &leecher(), None); + let _number_of_downloads_increased = swarms.handle_announcement(&infohash, &leecher(), None); - let swarm_metadata = torrent_repository.get_swarm_metadata_or_default(&infohash).unwrap(); + let swarm_metadata = swarms.get_swarm_metadata_or_default(&infohash).unwrap(); assert_eq!( swarm_metadata, @@ -943,9 +946,9 @@ mod tests { #[tokio::test] async fn it_should_return_zeroed_swarm_metadata_for_a_non_existing_torrent() { - let torrent_repository = Arc::new(Swarms::default()); + let swarms = Arc::new(Swarms::default()); - let swarm_metadata = torrent_repository.get_swarm_metadata_or_default(&sample_info_hash()).unwrap(); + let swarm_metadata = swarms.get_swarm_metadata_or_default(&sample_info_hash()).unwrap(); assert_eq!(swarm_metadata, SwarmMetadata::zeroed()); } @@ -962,7 +965,7 @@ mod tests { #[tokio::test] async fn it_should_allow_importing_persisted_torrent_entries() { - let torrent_repository = Arc::new(Swarms::default()); + let swarms = Arc::new(Swarms::default()); let infohash = sample_info_hash(); @@ -970,9 +973,9 @@ mod tests { persistent_torrents.insert(infohash, 1); - torrent_repository.import_persistent(&persistent_torrents); + swarms.import_persistent(&persistent_torrents); - let swarm_metadata = torrent_repository.get_swarm_metadata_or_default(&infohash).unwrap(); + let swarm_metadata = swarms.get_swarm_metadata_or_default(&infohash).unwrap(); // Only the number of downloads is persisted. assert_eq!(swarm_metadata.downloaded, 1); diff --git a/packages/torrent-repository/tests/swarms/mod.rs b/packages/torrent-repository/tests/swarms/mod.rs index 43571eb83..8e58b9e76 100644 --- a/packages/torrent-repository/tests/swarms/mod.rs +++ b/packages/torrent-repository/tests/swarms/mod.rs @@ -149,7 +149,7 @@ fn persistent_three() -> PersistentTorrents { fn make(swarms: &Swarms, entries: &Entries) { for (info_hash, swarm) in entries { - swarms.insert_swarm(info_hash, swarm.clone()); + swarms.insert(info_hash, swarm.clone()); } } @@ -435,7 +435,7 @@ async fn it_should_remove_inactive_peers(#[values(swarms())] swarms: Swarms, #[c // Insert the infohash and peer into the repository // and verify there is an extra torrent entry. { - swarms.upsert_peer(&info_hash, &peer, None).unwrap(); + swarms.handle_announcement(&info_hash, &peer, None).unwrap(); assert_eq!( swarms.get_aggregate_swarm_metadata().unwrap().total_torrents, entries.len() as u64 + 1 @@ -445,7 +445,7 @@ async fn it_should_remove_inactive_peers(#[values(swarms())] swarms: Swarms, #[c // Insert the infohash and peer into the repository // and verify the swarm metadata was updated. { - swarms.upsert_peer(&info_hash, &peer, None).unwrap(); + swarms.handle_announcement(&info_hash, &peer, None).unwrap(); let stats = swarms.get_swarm_metadata(&info_hash).unwrap(); assert_eq!( stats, diff --git a/packages/tracker-core/src/torrent/repository/in_memory.rs b/packages/tracker-core/src/torrent/repository/in_memory.rs index 8c93f3605..38593bf3c 100644 --- a/packages/tracker-core/src/torrent/repository/in_memory.rs +++ b/packages/tracker-core/src/torrent/repository/in_memory.rs @@ -51,7 +51,7 @@ impl InMemoryTorrentRepository { opt_persistent_torrent: Option, ) -> bool { self.swarms - .upsert_peer(info_hash, peer, opt_persistent_torrent) + .handle_announcement(info_hash, peer, opt_persistent_torrent) .expect("Failed to upsert the peer in swarms") } @@ -192,7 +192,7 @@ impl InMemoryTorrentRepository { #[must_use] pub(crate) fn get_peers_for(&self, info_hash: &InfoHash, peer: &peer::Peer, limit: usize) -> Vec> { self.swarms - .get_peers_for(info_hash, peer, max(limit, TORRENT_PEERS_LIMIT)) + .get_peers_peers_excluding(info_hash, peer, max(limit, TORRENT_PEERS_LIMIT)) .expect("Failed to get other peers in swarm") } @@ -217,7 +217,7 @@ impl InMemoryTorrentRepository { pub fn get_torrent_peers(&self, info_hash: &InfoHash) -> Vec> { // todo: pass the limit as an argument like `get_peers_for` self.swarms - .get_torrent_peers(info_hash, TORRENT_PEERS_LIMIT) + .get_swarm_peers(info_hash, TORRENT_PEERS_LIMIT) .expect("Failed to get other peers in swarm") } From 4b5e914ad90c7a36552574eca65600c69c24e3f6 Mon Sep 17 00:00:00 2001 From: Jose Celano Date: Wed, 7 May 2025 14:20:45 +0100 Subject: [PATCH 583/802] chore(deps): update dependencies ```output cargo update Updating crates.io index Locking 15 packages to latest compatible versions Updating backtrace v0.3.74 -> v0.3.75 Updating brotli v8.0.0 -> v8.0.1 Updating docker_credential v1.3.1 -> v1.3.2 Updating etcetera v0.8.0 -> v0.10.0 Updating h2 v0.4.9 -> v0.4.10 Updating hermit-abi v0.5.0 -> v0.5.1 Updating libm v0.2.13 -> v0.2.15 Updating local-ip-address v0.6.4 -> v0.6.5 Updating redox_syscall v0.5.11 -> v0.5.12 Updating rustls v0.23.26 -> v0.23.27 Updating rustls-pki-types v1.11.0 -> v1.12.0 Updating rustls-webpki v0.103.1 -> v0.103.2 Updating testcontainers v0.23.3 -> v0.24.0 Updating tokio v1.44.2 -> v1.45.0 Removing windows-sys v0.48.0 Removing windows-targets v0.48.5 Removing windows_aarch64_gnullvm v0.48.5 Removing windows_aarch64_msvc v0.48.5 Removing windows_i686_gnu v0.48.5 Removing windows_i686_msvc v0.48.5 Removing windows_x86_64_gnu v0.48.5 Removing windows_x86_64_gnullvm v0.48.5 Removing windows_x86_64_msvc v0.48.5 Updating winnow v0.7.8 -> v0.7.10 ``` --- Cargo.lock | 137 +++++++++++++++-------------------------------------- 1 file changed, 37 insertions(+), 100 deletions(-) diff --git a/Cargo.lock b/Cargo.lock index 093b8e9b0..80f98db36 100644 --- a/Cargo.lock +++ b/Cargo.lock @@ -482,9 +482,9 @@ dependencies = [ [[package]] name = "backtrace" -version = "0.3.74" +version = "0.3.75" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "8d82cb332cdfaed17ae235a638438ac4d4839913cc2af585c3c6746e8f8bee1a" +checksum = "6806a6321ec58106fea15becdad98371e28d92ccbc7c8f1b3b6dd724fe8f1002" dependencies = [ "addr2line", "cfg-if", @@ -849,9 +849,9 @@ dependencies = [ [[package]] name = "brotli" -version = "8.0.0" +version = "8.0.1" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "cf19e729cdbd51af9a397fb9ef8ac8378007b797f8273cfbfdf45dcaa316167b" +checksum = "9991eea70ea4f293524138648e41ee89b0b2b12ddef3b255effa43c8056e0e0d" dependencies = [ "alloc-no-stdlib", "alloc-stdlib", @@ -1407,9 +1407,9 @@ dependencies = [ [[package]] name = "docker_credential" -version = "1.3.1" +version = "1.3.2" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "31951f49556e34d90ed28342e1df7e1cb7a229c4cab0aecc627b5d91edd41d07" +checksum = "1d89dfcba45b4afad7450a99b39e751590463e45c04728cf555d36bb66940de8" dependencies = [ "base64 0.21.7", "serde", @@ -1465,13 +1465,13 @@ dependencies = [ [[package]] name = "etcetera" -version = "0.8.0" +version = "0.10.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "136d1b5283a1ab77bd9257427ffd09d8667ced0570b6f938942bc7568ed5b943" +checksum = "26c7b13d0780cb82722fd59f6f57f925e143427e4a75313a6c77243bf5326ae6" dependencies = [ "cfg-if", "home", - "windows-sys 0.48.0", + "windows-sys 0.59.0", ] [[package]] @@ -1859,9 +1859,9 @@ dependencies = [ [[package]] name = "h2" -version = "0.4.9" +version = "0.4.10" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "75249d144030531f8dee69fe9cea04d3edf809a017ae445e2abdff6629e86633" +checksum = "a9421a676d1b147b16b82c9225157dc629087ef8ec4d5e2960f9437a90dac0a5" dependencies = [ "atomic-waker", "bytes", @@ -1935,9 +1935,9 @@ checksum = "fbf6a919d6cf397374f7dfeeea91d974c7c0a7221d0d0f4f20d859d329e53fcc" [[package]] name = "hermit-abi" -version = "0.5.0" +version = "0.5.1" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "fbd780fe5cc30f81464441920d82ac8740e2e46b29a6fad543ddd075229ce37e" +checksum = "f154ce46856750ed433c8649605bf7ed2de3bc35fd9d2a9f30cddd873c80cb08" [[package]] name = "hex" @@ -2337,7 +2337,7 @@ version = "0.4.16" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "e04d7f318608d35d4b61ddd75cbdaee86b023ebe2bd5a66ee0915f0bf93095a9" dependencies = [ - "hermit-abi 0.5.0", + "hermit-abi 0.5.1", "libc", "windows-sys 0.59.0", ] @@ -2431,9 +2431,9 @@ dependencies = [ [[package]] name = "libm" -version = "0.2.13" +version = "0.2.15" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "c9627da5196e5d8ed0b0495e61e518847578da83483c37288316d9b2e03a7f72" +checksum = "f9fbbcab51052fe104eb5e5d351cf728d30a5be1fe14d9be8a3b097481fb97de" [[package]] name = "libredox" @@ -2443,7 +2443,7 @@ checksum = "c0ff37bd590ca25063e35af745c343cb7a0271906fb7b37e4813e8f79f00268d" dependencies = [ "bitflags 2.9.0", "libc", - "redox_syscall 0.5.11", + "redox_syscall 0.5.12", ] [[package]] @@ -2488,9 +2488,9 @@ checksum = "23fb14cb19457329c82206317a5663005a4d404783dc74f4252769b0d5f42856" [[package]] name = "local-ip-address" -version = "0.6.4" +version = "0.6.5" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "c986b1747bbd3666abe4d57c64e60e6a82c2216140d8b12d5ceb33feb9de44b3" +checksum = "656b3b27f8893f7bbf9485148ff9a65f019e3f33bd5cdc87c83cab16b3fd9ec8" dependencies = [ "libc", "neli", @@ -2929,7 +2929,7 @@ checksum = "1e401f977ab385c9e4e3ab30627d6f26d00e2c73eef317493c4ec6d468726cf8" dependencies = [ "cfg-if", "libc", - "redox_syscall 0.5.11", + "redox_syscall 0.5.12", "smallvec", "windows-targets 0.52.6", ] @@ -3401,9 +3401,9 @@ dependencies = [ [[package]] name = "redox_syscall" -version = "0.5.11" +version = "0.5.12" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "d2f103c6d277498fbceb16e84d317e2a400f160f46904d5f5410848c829511a3" +checksum = "928fca9cf2aa042393a8325b9ead81d2f0df4cb12e1e24cef072922ccd99c5af" dependencies = [ "bitflags 2.9.0", ] @@ -3659,9 +3659,9 @@ dependencies = [ [[package]] name = "rustls" -version = "0.23.26" +version = "0.23.27" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "df51b5869f3a441595eac5e8ff14d486ff285f7b8c0df8770e49c3b56351f0f0" +checksum = "730944ca083c1c233a75c09f199e973ca499344a2b7ba9e755c457e86fb4a321" dependencies = [ "once_cell", "ring", @@ -3694,15 +3694,18 @@ dependencies = [ [[package]] name = "rustls-pki-types" -version = "1.11.0" +version = "1.12.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "917ce264624a4b4db1c364dcc35bfca9ded014d0a958cd47ad3e960e988ea51c" +checksum = "229a4a4c221013e7e1f1a043678c5cc39fe5171437c88fb47151a21e6f5b5c79" +dependencies = [ + "zeroize", +] [[package]] name = "rustls-webpki" -version = "0.103.1" +version = "0.103.2" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "fef8b8769aaccf73098557a87cd1816b4f9c7c16811c9c77142aa695c16f2c03" +checksum = "7149975849f1abb3832b246010ef62ccc80d3a76169517ada7188252b9cfb437" dependencies = [ "ring", "rustls-pki-types", @@ -4232,9 +4235,9 @@ checksum = "8f50febec83f5ee1df3015341d8bd429f2d1cc62bcba7ea2076759d315084683" [[package]] name = "testcontainers" -version = "0.23.3" +version = "0.24.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "59a4f01f39bb10fc2a5ab23eb0d888b1e2bb168c157f61a1b98e6c501c639c74" +checksum = "23bb7577dca13ad86a78e8271ef5d322f37229ec83b8d98da6d996c588a1ddb1" dependencies = [ "async-trait", "bollard", @@ -4387,9 +4390,9 @@ checksum = "1f3ccbac311fea05f86f61904b462b55fb3df8837a366dfc601a0161d0532f20" [[package]] name = "tokio" -version = "1.44.2" +version = "1.45.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "e6b88822cbe49de4185e3a4cbf8321dd487cf5fe0c5c65695fef6346371e9c48" +checksum = "2513ca694ef9ede0fb23fe71a4ee4107cb102b9dc1930f6d0fd77aae068ae165" dependencies = [ "backtrace", "bytes", @@ -5388,15 +5391,6 @@ dependencies = [ "windows-link", ] -[[package]] -name = "windows-sys" -version = "0.48.0" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "677d2418bec65e3338edb076e806bc1ec15693c5d0104683f2efe857f61056a9" -dependencies = [ - "windows-targets 0.48.5", -] - [[package]] name = "windows-sys" version = "0.52.0" @@ -5415,21 +5409,6 @@ dependencies = [ "windows-targets 0.52.6", ] -[[package]] -name = "windows-targets" -version = "0.48.5" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "9a2fa6e2155d7247be68c096456083145c183cbbbc2764150dda45a87197940c" -dependencies = [ - "windows_aarch64_gnullvm 0.48.5", - "windows_aarch64_msvc 0.48.5", - "windows_i686_gnu 0.48.5", - "windows_i686_msvc 0.48.5", - "windows_x86_64_gnu 0.48.5", - "windows_x86_64_gnullvm 0.48.5", - "windows_x86_64_msvc 0.48.5", -] - [[package]] name = "windows-targets" version = "0.52.6" @@ -5462,12 +5441,6 @@ dependencies = [ "windows_x86_64_msvc 0.53.0", ] -[[package]] -name = "windows_aarch64_gnullvm" -version = "0.48.5" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "2b38e32f0abccf9987a4e3079dfb67dcd799fb61361e53e2882c3cbaf0d905d8" - [[package]] name = "windows_aarch64_gnullvm" version = "0.52.6" @@ -5480,12 +5453,6 @@ version = "0.53.0" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "86b8d5f90ddd19cb4a147a5fa63ca848db3df085e25fee3cc10b39b6eebae764" -[[package]] -name = "windows_aarch64_msvc" -version = "0.48.5" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "dc35310971f3b2dbbf3f0690a219f40e2d9afcf64f9ab7cc1be722937c26b4bc" - [[package]] name = "windows_aarch64_msvc" version = "0.52.6" @@ -5498,12 +5465,6 @@ version = "0.53.0" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "c7651a1f62a11b8cbd5e0d42526e55f2c99886c77e007179efff86c2b137e66c" -[[package]] -name = "windows_i686_gnu" -version = "0.48.5" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "a75915e7def60c94dcef72200b9a8e58e5091744960da64ec734a6c6e9b3743e" - [[package]] name = "windows_i686_gnu" version = "0.52.6" @@ -5528,12 +5489,6 @@ version = "0.53.0" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "9ce6ccbdedbf6d6354471319e781c0dfef054c81fbc7cf83f338a4296c0cae11" -[[package]] -name = "windows_i686_msvc" -version = "0.48.5" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "8f55c233f70c4b27f66c523580f78f1004e8b5a8b659e05a4eb49d4166cca406" - [[package]] name = "windows_i686_msvc" version = "0.52.6" @@ -5546,12 +5501,6 @@ version = "0.53.0" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "581fee95406bb13382d2f65cd4a908ca7b1e4c2f1917f143ba16efe98a589b5d" -[[package]] -name = "windows_x86_64_gnu" -version = "0.48.5" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "53d40abd2583d23e4718fddf1ebec84dbff8381c07cae67ff7768bbf19c6718e" - [[package]] name = "windows_x86_64_gnu" version = "0.52.6" @@ -5564,12 +5513,6 @@ version = "0.53.0" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "2e55b5ac9ea33f2fc1716d1742db15574fd6fc8dadc51caab1c16a3d3b4190ba" -[[package]] -name = "windows_x86_64_gnullvm" -version = "0.48.5" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "0b7b52767868a23d5bab768e390dc5f5c55825b6d30b86c844ff2dc7414044cc" - [[package]] name = "windows_x86_64_gnullvm" version = "0.52.6" @@ -5582,12 +5525,6 @@ version = "0.53.0" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "0a6e035dd0599267ce1ee132e51c27dd29437f63325753051e71dd9e42406c57" -[[package]] -name = "windows_x86_64_msvc" -version = "0.48.5" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "ed94fce61571a4006852b7389a063ab983c02eb1bb37b47f8272ce92d06d9538" - [[package]] name = "windows_x86_64_msvc" version = "0.52.6" @@ -5602,9 +5539,9 @@ checksum = "271414315aff87387382ec3d271b52d7ae78726f5d44ac98b4f4030c91880486" [[package]] name = "winnow" -version = "0.7.8" +version = "0.7.10" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "9e27d6ad3dac991091e4d35de9ba2d2d00647c5d0fc26c5496dee55984ae111b" +checksum = "c06928c8748d81b05c9be96aad92e1b6ff01833332f281e8cfca3be4b35fc9ec" dependencies = [ "memchr", ] From 32a37d148ca1258d47a9bcf3fbbfb3a3d99a1ba8 Mon Sep 17 00:00:00 2001 From: Jose Celano Date: Wed, 7 May 2025 17:06:24 +0100 Subject: [PATCH 584/802] fix: [#1502] bug in total number of downloads for all torrents metric Relates to: https://github.com/torrust/torrust-tracker/pull/1497/commits/34c159a161b7c167730f6c139dd3cb608173d37a A couple of days ago, I made a change in [this commit](https://github.com/torrust/torrust-tracker/pull/1497/commits/34c159a161b7c167730f6c139dd3cb608173d37a). I changed the `Swarm::meets_retaining_policy` method from: ``` /// Returns true if the torrents meets the retention policy, meaning that /// it should be kept in the tracker. pub fn meets_retaining_policy(&self, policy: &TrackerPolicy) -> bool { if policy.persistent_torrent_completed_stat && self.metadata().downloaded > 0 { return true; } if policy.remove_peerless_torrents && self.is_empty() { return false; } true } ``` To: ``` pub fn meets_retaining_policy(&self, policy: &TrackerPolicy) -> bool { !(policy.remove_peerless_torrents && self.is_empty()) } ``` I thought this code was not needed: ```rust if policy.persistent_torrent_completed_stat && self.metadata().downloaded > 0 { return true; } ``` However, it's needed. One of the metrics returned by the tracker API is the **total number of downloads for all torrents**. ```json { "torrents": 320961, "seeders": 189885, "completed": 975119, <- this "leechers": 231044, ... } ``` That metric is always stored in memory but can optionally persist into the database. It's important to highlight that the metric represents: - The total number of downloads for **ALL** torrents ever, when the metric is persisted. - The total number of downloads for **ALL** torrents since the tracker started, when the metric is not persisted. It could be mixed up with another internal metric (not exposed via the API), which is the same counter but only for ONE swarm (one torrent). - The total number of downloads for **ONE** concrete torrent ever, when the metric is persisted. - The total number of downloads for **ONE** concrete torrent since the tracker started, when the metric is not persisted. The bug affects the first metric. The exposed via the API. The problem is that this feature conflicts with removing the peerless torrents. When removing the peerless torrents config option is enabled, the counter is lost unless it is persisted. Becuase the counter values are stored in the "Swarm" together with the list of peers. If statistics persistence is enabled, that's not a problem. When the torrent is removed from the tracker (from the swarms or swarm collection), the counter is initialised again if the torrent is added. In other words, if a new peer starts the swarm again, the number of downloads is loaded from the database. However, that works for the counter of each torrent (swarm) but not for the overall counter (the sum of downloads for all torrents). That metric is not stored anywhere. It's calculated on demand by iterating all the swarms and summing up the total for each torrent, giving the total amount of downloads for **ALL** torrents. When the torrent is removed, the downloads for that torrent don't count in the total. That is the reason we have to keep the torrent (swarm) in memory, even if it does not have any peer (and it should be removed according to the other config flag). The removed line: ```rust if policy.persistent_torrent_completed_stat && self.metadata().downloaded > 0 { return true; } ``` does that. **When the stats persistence is disabled**, that's one way to store the value. Alternatively, we could add another cache for the data and never remove that value. The current solution has a problem: It can make the tracker consume a lot of memory because peerless torrents are not removed in practice (even if it's configured to be). **When the stats persistence is enabled,** we can simply return the value from the database. **NOTICE:** that the value is used in the scrape response, so it might be convenient to have a cache in memory anyway. - [x] Revert the change to fix the bug asap. - [x] Write a unit test. This behaviour was not covered by any test (or documented). - [ ] Add an in-memory cache value in `Swarms` type to store the total for all torrents, regardless of which are the current active swarms. --- packages/torrent-repository/src/swarm.rs | 122 +++++++++++++++++++---- 1 file changed, 102 insertions(+), 20 deletions(-) diff --git a/packages/torrent-repository/src/swarm.rs b/packages/torrent-repository/src/swarm.rs index 1a17a2fb6..e5b5d598c 100644 --- a/packages/torrent-repository/src/swarm.rs +++ b/packages/torrent-repository/src/swarm.rs @@ -191,10 +191,20 @@ impl Swarm { } /// Returns true if the swarm meets the retention policy, meaning that - /// it should be kept in the tracker. + /// it should be kept in the list of swarms. #[must_use] pub fn meets_retaining_policy(&self, policy: &TrackerPolicy) -> bool { - !(policy.remove_peerless_torrents && self.is_empty()) + !self.should_be_removed(policy) + } + + fn should_be_removed(&self, policy: &TrackerPolicy) -> bool { + // If the policy is to remove peerless torrents and the swarm is empty (no peers), + (policy.remove_peerless_torrents && self.is_empty()) + // but not when the policy is to persist torrent stats and the + // torrent has been downloaded at least once. + // (because the only way to store the counter is to keep the swarm in memory. + // See https://github.com/torrust/torrust-tracker/issues/1502) + && !(policy.persistent_torrent_completed_stat && self.metadata().downloaded > 0) } } @@ -205,7 +215,6 @@ mod tests { use std::sync::Arc; use aquatic_udp_protocol::PeerId; - use torrust_tracker_configuration::TrackerPolicy; use torrust_tracker_primitives::peer::fixture::PeerBuilder; use torrust_tracker_primitives::swarm_metadata::SwarmMetadata; use torrust_tracker_primitives::DurationSinceUnixEpoch; @@ -376,28 +385,101 @@ mod tests { assert_eq!(swarm.len(), 1); } - #[test] - fn it_should_be_kept_when_empty_if_the_tracker_policy_is_not_to_remove_peerless_torrents() { - let empty_swarm = Swarm::default(); + mod for_retaining_policy { - let policy = TrackerPolicy { - remove_peerless_torrents: false, - ..Default::default() - }; + use torrust_tracker_configuration::TrackerPolicy; + use torrust_tracker_primitives::peer::fixture::PeerBuilder; - assert!(empty_swarm.meets_retaining_policy(&policy)); - } + use crate::Swarm; - #[test] - fn it_should_be_removed_when_empty_if_the_tracker_policy_is_to_remove_peerless_torrents() { - let empty_swarm = Swarm::default(); + fn empty_swarm() -> Swarm { + Swarm::default() + } - let policy = TrackerPolicy { - remove_peerless_torrents: true, - ..Default::default() - }; + fn not_empty_swarm() -> Swarm { + let mut swarm = Swarm::default(); + swarm.upsert_peer(PeerBuilder::default().build().into(), &mut false); + swarm + } + + fn not_empty_swarm_with_downloads() -> Swarm { + let mut swarm = Swarm::default(); + + let mut peer = PeerBuilder::leecher().build(); + let mut downloads_increased = false; + + swarm.upsert_peer(peer.into(), &mut downloads_increased); + + peer.event = aquatic_udp_protocol::AnnounceEvent::Completed; + + swarm.upsert_peer(peer.into(), &mut downloads_increased); + + assert!(swarm.metadata().downloads() > 0); + + swarm + } + + fn remove_peerless_torrents_policy() -> TrackerPolicy { + TrackerPolicy { + remove_peerless_torrents: true, + ..Default::default() + } + } + + fn don_not_remove_peerless_torrents_policy() -> TrackerPolicy { + TrackerPolicy { + remove_peerless_torrents: false, + ..Default::default() + } + } - assert!(!empty_swarm.meets_retaining_policy(&policy)); + mod when_removing_peerless_torrents_is_enabled { + + use torrust_tracker_configuration::TrackerPolicy; + + use crate::swarm::tests::for_retaining_policy::{ + empty_swarm, not_empty_swarm, not_empty_swarm_with_downloads, remove_peerless_torrents_policy, + }; + + #[test] + fn it_should_be_removed_if_the_swarm_is_empty() { + assert!(empty_swarm().should_be_removed(&remove_peerless_torrents_policy())); + } + + #[test] + fn it_should_not_be_removed_is_the_swarm_is_not_empty() { + assert!(!not_empty_swarm().should_be_removed(&remove_peerless_torrents_policy())); + } + + #[test] + fn it_should_not_be_removed_even_if_the_swarm_is_empty_if_we_need_to_track_stats_for_downloads_and_there_has_been_downloads( + ) { + let policy = TrackerPolicy { + remove_peerless_torrents: true, + persistent_torrent_completed_stat: true, + ..Default::default() + }; + + assert!(!not_empty_swarm_with_downloads().should_be_removed(&policy)); + } + } + + mod when_removing_peerless_torrents_is_disabled { + + use crate::swarm::tests::for_retaining_policy::{ + don_not_remove_peerless_torrents_policy, empty_swarm, not_empty_swarm, + }; + + #[test] + fn it_should_not_be_removed_even_if_the_swarm_is_empty() { + assert!(!empty_swarm().should_be_removed(&don_not_remove_peerless_torrents_policy())); + } + + #[test] + fn it_should_not_be_removed_is_the_swarm_is_not_empty() { + assert!(!not_empty_swarm().should_be_removed(&don_not_remove_peerless_torrents_policy())); + } + } } #[test] From 57b4822b74c4c8f81f81006dfbb45fb6bbde4e4f Mon Sep 17 00:00:00 2001 From: Jose Celano Date: Thu, 8 May 2025 11:01:43 +0100 Subject: [PATCH 585/802] refactor: remove debug print --- packages/udp-tracker-core/src/services/announce.rs | 2 -- 1 file changed, 2 deletions(-) diff --git a/packages/udp-tracker-core/src/services/announce.rs b/packages/udp-tracker-core/src/services/announce.rs index 499da2945..6ea237d84 100644 --- a/packages/udp-tracker-core/src/services/announce.rs +++ b/packages/udp-tracker-core/src/services/announce.rs @@ -119,8 +119,6 @@ impl AnnounceService { tracing::debug!(target = crate::UDP_TRACKER_LOG_TARGET, "Sending UdpAnnounce event: {event:?}"); - println!("Sending UdpAnnounce event: {event:?}"); - udp_stats_event_sender.send(event).await; } } From f11dfccc852605304a9923d10036a5a7d7502e28 Mon Sep 17 00:00:00 2001 From: Jose Celano Date: Thu, 8 May 2025 11:04:11 +0100 Subject: [PATCH 586/802] feat: [#1502] adding logs for debugging This adds more logs to the torrent's cleanup process. It would be helpful to find the bug described in the issue https://github.com/torrust/torrust-tracker/issues/1502. However, it will be useful afterwards. Sample output: ```output 2025-05-08T10:01:18.417631Z INFO torrust_tracker_lib::bootstrap::jobs::torrent_cleanup: Cleaning up torrents (executed every 60 secs) ... 2025-05-08T10:01:18.417661Z INFO bittorrent_tracker_core::torrent::manager: torrents=1 downloads=2 seeders=2 leechers=0 2025-05-08T10:01:18.417666Z INFO bittorrent_tracker_core::torrent::manager: peerless_torrents=0 peers=2 2025-05-08T10:01:18.417670Z INFO torrust_tracker_torrent_repository::swarms: Removing inactive peers since: 2025-05-08T10:00:48.417669546Z ... 2025-05-08T10:01:18.417676Z INFO torrust_tracker_torrent_repository::swarms: Inactive peers removed: 2 2025-05-08T10:01:18.417679Z INFO bittorrent_tracker_core::torrent::manager: torrents=1 downloads=2 seeders=0 leechers=0 2025-05-08T10:01:18.417682Z INFO bittorrent_tracker_core::torrent::manager: peerless_torrents=1 peers=0 2025-05-08T10:01:18.417685Z INFO torrust_tracker_torrent_repository::swarms: Removing peerless torrents ... 2025-05-08T10:01:18.417688Z INFO torrust_tracker_torrent_repository::swarms: Peerless torrents removed: 0 2025-05-08T10:01:18.417690Z INFO bittorrent_tracker_core::torrent::manager: torrents=1 downloads=2 seeders=0 leechers=0 2025-05-08T10:01:18.417693Z INFO bittorrent_tracker_core::torrent::manager: peerless_torrents=1 peers=0 2025-05-08T10:01:18.417697Z INFO torrust_tracker_lib::bootstrap::jobs::torrent_cleanup: Cleaned up torrents in: 0 ms ``` --- Cargo.lock | 1 + packages/torrent-repository/Cargo.toml | 1 + packages/torrent-repository/src/swarm.rs | 13 +- packages/torrent-repository/src/swarms.rs | 124 ++++++++++++++---- packages/tracker-core/src/torrent/manager.rs | 35 +++++ .../src/torrent/repository/in_memory.rs | 22 ++++ .../config/tracker.development.sqlite3.toml | 6 + src/bootstrap/jobs/torrent_cleanup.rs | 5 +- 8 files changed, 181 insertions(+), 26 deletions(-) diff --git a/Cargo.lock b/Cargo.lock index 80f98db36..04ce8ad8c 100644 --- a/Cargo.lock +++ b/Cargo.lock @@ -4859,6 +4859,7 @@ dependencies = [ "torrust-tracker-configuration", "torrust-tracker-primitives", "torrust-tracker-test-helpers", + "tracing", ] [[package]] diff --git a/packages/torrent-repository/Cargo.toml b/packages/torrent-repository/Cargo.toml index 2cc02a720..3396cd961 100644 --- a/packages/torrent-repository/Cargo.toml +++ b/packages/torrent-repository/Cargo.toml @@ -24,6 +24,7 @@ tokio = { version = "1", features = ["macros", "net", "rt-multi-thread", "signal torrust-tracker-clock = { version = "3.0.0-develop", path = "../clock" } torrust-tracker-configuration = { version = "3.0.0-develop", path = "../configuration" } torrust-tracker-primitives = { version = "3.0.0-develop", path = "../primitives" } +tracing = "0" [dev-dependencies] async-std = { version = "1", features = ["attributes", "tokio1"] } diff --git a/packages/torrent-repository/src/swarm.rs b/packages/torrent-repository/src/swarm.rs index e5b5d598c..4437ca410 100644 --- a/packages/torrent-repository/src/swarm.rs +++ b/packages/torrent-repository/src/swarm.rs @@ -101,7 +101,9 @@ impl Swarm { } } - pub fn remove_inactive(&mut self, current_cutoff: DurationSinceUnixEpoch) { + pub fn remove_inactive(&mut self, current_cutoff: DurationSinceUnixEpoch) -> u64 { + let mut inactive_peers_removed = 0; + self.peers.retain(|_, peer| { let is_active = peer::ReadInfo::get_updated(peer) > current_cutoff; @@ -112,10 +114,14 @@ impl Swarm { } else { self.metadata.incomplete -= 1; } + + inactive_peers_removed += 1; } is_active }); + + inactive_peers_removed } #[must_use] @@ -190,6 +196,11 @@ impl Swarm { self.peers.is_empty() } + #[must_use] + pub fn is_peerless(&self) -> bool { + self.is_empty() + } + /// Returns true if the swarm meets the retention policy, meaning that /// it should be kept in the list of swarms. #[must_use] diff --git a/packages/torrent-repository/src/swarms.rs b/packages/torrent-repository/src/swarms.rs index 828e8c030..0746e19a8 100644 --- a/packages/torrent-repository/src/swarms.rs +++ b/packages/torrent-repository/src/swarms.rs @@ -2,6 +2,7 @@ use std::sync::{Arc, Mutex}; use bittorrent_primitives::info_hash::InfoHash; use crossbeam_skiplist::SkipMap; +use torrust_tracker_clock::conv::convert_from_timestamp_to_datetime_utc; use torrust_tracker_configuration::TrackerPolicy; use torrust_tracker_primitives::pagination::Pagination; use torrust_tracker_primitives::swarm_metadata::{AggregateSwarmMetadata, SwarmMetadata}; @@ -76,24 +77,6 @@ impl Swarms { self.swarms.remove(key).map(|entry| entry.value().clone()) } - /// Removes inactive peers from all torrent entries. - /// - /// A peer is considered inactive if its last update timestamp is older than - /// the provided cutoff time. - /// - /// # Errors - /// - /// This function returns an error if it fails to acquire the lock for any - /// swarm handle. - pub fn remove_inactive_peers(&self, current_cutoff: DurationSinceUnixEpoch) -> Result<(), Error> { - for swarm_handle in &self.swarms { - let mut swarm = swarm_handle.value().lock()?; - swarm.remove_inactive(current_cutoff); - } - - Ok(()) - } - /// Retrieves a tracked torrent handle by its infohash. /// /// # Returns @@ -225,6 +208,34 @@ impl Swarms { } } + /// Removes inactive peers from all torrent entries. + /// + /// A peer is considered inactive if its last update timestamp is older than + /// the provided cutoff time. + /// + /// # Errors + /// + /// This function returns an error if it fails to acquire the lock for any + /// swarm handle. + pub fn remove_inactive_peers(&self, current_cutoff: DurationSinceUnixEpoch) -> Result { + tracing::info!( + "Removing inactive peers since: {:?} ...", + convert_from_timestamp_to_datetime_utc(current_cutoff) + ); + + let mut inactive_peers_removed = 0; + + for swarm_handle in &self.swarms { + let mut swarm = swarm_handle.value().lock()?; + let removed = swarm.remove_inactive(current_cutoff); + inactive_peers_removed += removed; + } + + tracing::info!("Inactive peers removed: {inactive_peers_removed}"); + + Ok(inactive_peers_removed) + } + /// Removes torrent entries that have no active peers. /// /// Depending on the tracker policy, torrents without any peers may be @@ -234,7 +245,11 @@ impl Swarms { /// /// This function returns an error if it fails to acquire the lock for any /// swarm handle. - pub fn remove_peerless_torrents(&self, policy: &TrackerPolicy) -> Result<(), Error> { + pub fn remove_peerless_torrents(&self, policy: &TrackerPolicy) -> Result { + tracing::info!("Removing peerless torrents ..."); + + let mut peerless_torrents_removed = 0; + for swarm_handle in &self.swarms { let swarm = swarm_handle.value().lock()?; @@ -243,9 +258,13 @@ impl Swarms { } swarm_handle.remove(); + + peerless_torrents_removed += 1; } - Ok(()) + tracing::info!("Peerless torrents removed: {peerless_torrents_removed}"); + + Ok(peerless_torrents_removed) } /// Imports persistent torrent data into the in-memory repository. @@ -253,7 +272,11 @@ impl Swarms { /// This method takes a set of persisted torrent entries (e.g., from a /// database) and imports them into the in-memory repository for immediate /// access. - pub fn import_persistent(&self, persistent_torrents: &PersistentTorrents) { + pub fn import_persistent(&self, persistent_torrents: &PersistentTorrents) -> u64 { + tracing::info!("Importing persisted info about torrents ..."); + + let mut torrents_imported = 0; + for (info_hash, completed) in persistent_torrents { if self.swarms.contains_key(info_hash) { continue; @@ -264,7 +287,13 @@ impl Swarms { // Since SkipMap is lock-free the torrent could have been inserted // after checking if it exists. self.swarms.get_or_insert(*info_hash, entry); + + torrents_imported += 1; } + + tracing::info!("Imported torrents: {torrents_imported}"); + + torrents_imported } /// Calculates and returns overall torrent metrics. @@ -284,9 +313,11 @@ impl Swarms { pub fn get_aggregate_swarm_metadata(&self) -> Result { let mut metrics = AggregateSwarmMetadata::default(); - for entry in &self.swarms { - let swarm = entry.value().lock()?; + for swarm_handle in &self.swarms { + let swarm = swarm_handle.value().lock()?; + let stats = swarm.metadata(); + metrics.total_complete += u64::from(stats.complete); metrics.total_downloaded += u64::from(stats.downloaded); metrics.total_incomplete += u64::from(stats.incomplete); @@ -296,6 +327,53 @@ impl Swarms { Ok(metrics) } + /// Counts the number of torrents that are peerless (i.e., have no active + /// peers). + /// + /// # Returns + /// + /// A `usize` representing the number of peerless torrents. + /// + /// # Errors + /// + /// This function returns an error if it fails to acquire the lock for any + /// swarm handle. + pub fn count_peerless_torrents(&self) -> Result { + let mut peerless_torrents = 0; + + for swarm_handle in &self.swarms { + let swarm = swarm_handle.value().lock()?; + + if swarm.is_peerless() { + peerless_torrents += 1; + } + } + + Ok(peerless_torrents) + } + + /// Counts the total number of peers across all torrents. + /// + /// # Returns + /// + /// A `usize` representing the total number of peers. + /// + /// # Errors + /// + /// This function returns an error if it fails to acquire the lock for any + /// swarm handle. + pub fn count_peers(&self) -> Result { + let mut peers = 0; + + for swarm_handle in &self.swarms { + let swarm = swarm_handle.value().lock()?; + + peers += swarm.len(); + } + + Ok(peers) + } + #[must_use] pub fn len(&self) -> usize { self.swarms.len() diff --git a/packages/tracker-core/src/torrent/manager.rs b/packages/tracker-core/src/torrent/manager.rs index 5c8352f11..5afbcecf2 100644 --- a/packages/tracker-core/src/torrent/manager.rs +++ b/packages/tracker-core/src/torrent/manager.rs @@ -92,16 +92,51 @@ impl TorrentsManager { /// (`remove_peerless_torrents` is set), it removes entire torrent /// entries that have no active peers. pub fn cleanup_torrents(&self) { + self.log_aggregate_swarm_metadata(); + + self.remove_inactive_peers(); + + self.log_aggregate_swarm_metadata(); + + self.remove_peerless_torrents(); + + self.log_aggregate_swarm_metadata(); + } + + fn remove_inactive_peers(&self) { let current_cutoff = CurrentClock::now_sub(&Duration::from_secs(u64::from(self.config.tracker_policy.max_peer_timeout))) .unwrap_or_default(); self.in_memory_torrent_repository.remove_inactive_peers(current_cutoff); + } + fn remove_peerless_torrents(&self) { if self.config.tracker_policy.remove_peerless_torrents { self.in_memory_torrent_repository .remove_peerless_torrents(&self.config.tracker_policy); } } + + fn log_aggregate_swarm_metadata(&self) { + // Pre-calculated data + let aggregate_swarm_metadata = self.in_memory_torrent_repository.get_aggregate_swarm_metadata(); + + tracing::info!(name: "pre_calculated_aggregate_swarm_metadata", + torrents = aggregate_swarm_metadata.total_torrents, + downloads = aggregate_swarm_metadata.total_downloaded, + seeders = aggregate_swarm_metadata.total_complete, + leechers = aggregate_swarm_metadata.total_incomplete, + ); + + // Hot data (iterating over data structures) + let peerless_torrents = self.in_memory_torrent_repository.count_peerless_torrents(); + let peers = self.in_memory_torrent_repository.count_peers(); + + tracing::info!(name: "hot_aggregate_swarm_metadata", + peerless_torrents = peerless_torrents, + peers = peers, + ); + } } #[cfg(test)] diff --git a/packages/tracker-core/src/torrent/repository/in_memory.rs b/packages/tracker-core/src/torrent/repository/in_memory.rs index 38593bf3c..ffb53edad 100644 --- a/packages/tracker-core/src/torrent/repository/in_memory.rs +++ b/packages/tracker-core/src/torrent/repository/in_memory.rs @@ -241,6 +241,28 @@ impl InMemoryTorrentRepository { .expect("Failed to get aggregate swarm metadata") } + /// Counts the number of peerless torrents in the repository. + /// + /// # Panics + /// + /// This function panics if the underling swarms return an error. + #[must_use] + pub fn count_peerless_torrents(&self) -> usize { + self.swarms + .count_peerless_torrents() + .expect("Failed to count peerless torrents") + } + + /// Counts the number of peers in the repository. + /// + /// # Panics + /// + /// This function panics if the underling swarms return an error. + #[must_use] + pub fn count_peers(&self) -> usize { + self.swarms.count_peers().expect("Failed to count peers") + } + /// Imports persistent torrent data into the in-memory repository. /// /// This method takes a set of persisted torrent entries (e.g., from a database) diff --git a/share/default/config/tracker.development.sqlite3.toml b/share/default/config/tracker.development.sqlite3.toml index 333c6d66c..8d03f2300 100644 --- a/share/default/config/tracker.development.sqlite3.toml +++ b/share/default/config/tracker.development.sqlite3.toml @@ -7,9 +7,15 @@ schema_version = "2.0.0" threshold = "info" [core] +#inactive_peer_cleanup_interval = 60 listed = false private = false +#[core.tracker_policy] +#max_peer_timeout = 30 +#persistent_torrent_completed_stat = true +#remove_peerless_torrents = true + [[udp_trackers]] bind_address = "0.0.0.0:6868" tracker_usage_statistics = true diff --git a/src/bootstrap/jobs/torrent_cleanup.rs b/src/bootstrap/jobs/torrent_cleanup.rs index 54b1eeef7..0107b5370 100644 --- a/src/bootstrap/jobs/torrent_cleanup.rs +++ b/src/bootstrap/jobs/torrent_cleanup.rs @@ -28,6 +28,7 @@ use tracing::instrument; pub fn start_job(config: &Core, torrents_manager: &Arc) -> JoinHandle<()> { let weak_torrents_manager = std::sync::Arc::downgrade(torrents_manager); let interval = config.inactive_peer_cleanup_interval; + let interval_in_secs = interval; tokio::spawn(async move { let interval = std::time::Duration::from_secs(interval); @@ -43,9 +44,9 @@ pub fn start_job(config: &Core, torrents_manager: &Arc) -> Join _ = interval.tick() => { if let Some(torrents_manager) = weak_torrents_manager.upgrade() { let start_time = Utc::now().time(); - tracing::info!("Cleaning up torrents.."); + tracing::info!("Cleaning up torrents (executed every {} secs) ...", interval_in_secs); torrents_manager.cleanup_torrents(); - tracing::info!("Cleaned up torrents in: {}ms", (Utc::now().time() - start_time).num_milliseconds()); + tracing::info!("Cleaned up torrents in: {} ms", (Utc::now().time() - start_time).num_milliseconds()); } else { break; } From 46c7eae0fd53cbfc628ed85676eec8cee681f283 Mon Sep 17 00:00:00 2001 From: Jose Celano Date: Thu, 8 May 2025 16:42:17 +0100 Subject: [PATCH 587/802] dev: enable persistence for downdloads in dev config There are no performance problems in dev env, so it's better to enable as many features as possible to tests them while developing. --- share/default/config/tracker.development.sqlite3.toml | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/share/default/config/tracker.development.sqlite3.toml b/share/default/config/tracker.development.sqlite3.toml index 8d03f2300..488743eb9 100644 --- a/share/default/config/tracker.development.sqlite3.toml +++ b/share/default/config/tracker.development.sqlite3.toml @@ -11,9 +11,9 @@ threshold = "info" listed = false private = false -#[core.tracker_policy] +[core.tracker_policy] #max_peer_timeout = 30 -#persistent_torrent_completed_stat = true +persistent_torrent_completed_stat = true #remove_peerless_torrents = true [[udp_trackers]] From ced2788a2854203be9653169d23e65415d7b4972 Mon Sep 17 00:00:00 2001 From: Jose Celano Date: Thu, 8 May 2025 16:54:06 +0100 Subject: [PATCH 588/802] fix: [#1502] import torrents' download counters from DB when the tracker starts. In the current implementation all torrents that have benn downloaded at least once have to be in memory initializting the counter. Otherwise, the global counter for downloads for all torrents only includes downloads for the torrents being currently tracker by the tracker. --- packages/tracker-core/src/torrent/manager.rs | 5 ++--- src/app.rs | 11 +++++++++++ 2 files changed, 13 insertions(+), 3 deletions(-) diff --git a/packages/tracker-core/src/torrent/manager.rs b/packages/tracker-core/src/torrent/manager.rs index 5afbcecf2..aaac811f2 100644 --- a/packages/tracker-core/src/torrent/manager.rs +++ b/packages/tracker-core/src/torrent/manager.rs @@ -60,7 +60,7 @@ impl TorrentsManager { } } - /// Loads torrents from the persistent database into the in-memory repository. + /// Loads torrents from the database into the in-memory repository. /// /// This function retrieves the list of persistent torrent entries (which /// include only the aggregate metrics, not the detailed peer lists) from @@ -70,8 +70,7 @@ impl TorrentsManager { /// /// Returns a `databases::error::Error` if unable to load the persistent /// torrent data. - #[allow(dead_code)] - pub(crate) fn load_torrents_from_database(&self) -> Result<(), databases::error::Error> { + pub fn load_torrents_from_database(&self) -> Result<(), databases::error::Error> { let persistent_torrents = self.db_torrent_repository.load_all()?; self.in_memory_torrent_repository.import_persistent(&persistent_torrents); diff --git a/src/app.rs b/src/app.rs index 8f5c6ca4c..7bfa5296a 100644 --- a/src/app.rs +++ b/src/app.rs @@ -61,6 +61,7 @@ pub async fn start(config: &Configuration, app_container: &Arc) -> async fn load_data_from_database(config: &Configuration, app_container: &Arc) { load_peer_keys(config, app_container).await; load_whitelisted_torrents(config, app_container).await; + load_torrents_from_database(config, app_container); } async fn start_jobs(config: &Configuration, app_container: &Arc) -> JobManager { @@ -109,6 +110,16 @@ async fn load_whitelisted_torrents(config: &Configuration, app_container: &Arc) { + if config.core.tracker_policy.persistent_torrent_completed_stat { + app_container + .tracker_core_container + .torrents_manager + .load_torrents_from_database() + .expect("Could not load torrents from database."); + } +} + fn start_http_core_event_listener(config: &Configuration, app_container: &Arc, job_manager: &mut JobManager) { let opt_handle = jobs::http_tracker_core::start_event_listener(config, app_container); From 632185bf2237affb044f5c5f32c458061b460f40 Mon Sep 17 00:00:00 2001 From: Jose Celano Date: Thu, 8 May 2025 17:02:04 +0100 Subject: [PATCH 589/802] refactor: tracing spwams to use structure formats When possible prefer this with "variable=value" format: ``` imported_torrents=2 ``` To this: ``` Imported torrents: 2 ``` It's easier to parse and less likely to be changed. --- packages/torrent-repository/src/swarms.rs | 6 +++--- 1 file changed, 3 insertions(+), 3 deletions(-) diff --git a/packages/torrent-repository/src/swarms.rs b/packages/torrent-repository/src/swarms.rs index 0746e19a8..a140663c9 100644 --- a/packages/torrent-repository/src/swarms.rs +++ b/packages/torrent-repository/src/swarms.rs @@ -231,7 +231,7 @@ impl Swarms { inactive_peers_removed += removed; } - tracing::info!("Inactive peers removed: {inactive_peers_removed}"); + tracing::info!(inactive_peers_removed = inactive_peers_removed); Ok(inactive_peers_removed) } @@ -262,7 +262,7 @@ impl Swarms { peerless_torrents_removed += 1; } - tracing::info!("Peerless torrents removed: {peerless_torrents_removed}"); + tracing::info!(peerless_torrents_removed = peerless_torrents_removed); Ok(peerless_torrents_removed) } @@ -291,7 +291,7 @@ impl Swarms { torrents_imported += 1; } - tracing::info!("Imported torrents: {torrents_imported}"); + tracing::info!(imported_torrents = torrents_imported); torrents_imported } From cb487f36588c681988f5f4c75eacf87a8539dc1c Mon Sep 17 00:00:00 2001 From: Jose Celano Date: Fri, 9 May 2025 08:35:40 +0100 Subject: [PATCH 590/802] fix: [#1510] disable torrent stats importation at start When the tracker starts, if stats persistence is enabled, all torrents that have ever been downloaded are loaded into memory (`Swarms` type) with their download counter. That's the current way to count all downloads and expose that metric. However, it does not work with **millions of torrents** (like in the tracker demo) becuase: - It's too slow. - It consumes too much memory (all torrents that have ever been downloaded have to be loaded). A new solution is needed to keep that metric, but in the meantime, this disables that feature, producing these effects: - Non-accurate value for downloads when the tracker is restarted. - Increasing indefinitely the number of torrents in memory even if the "remove peerless torrents" policy is enabled (becuase this feature overrides that policy and peerless torrents are kept in memory). --- src/app.rs | 8 +++++++- 1 file changed, 7 insertions(+), 1 deletion(-) diff --git a/src/app.rs b/src/app.rs index 7bfa5296a..93035ee99 100644 --- a/src/app.rs +++ b/src/app.rs @@ -61,7 +61,12 @@ pub async fn start(config: &Configuration, app_container: &Arc) -> async fn load_data_from_database(config: &Configuration, app_container: &Arc) { load_peer_keys(config, app_container).await; load_whitelisted_torrents(config, app_container).await; - load_torrents_from_database(config, app_container); + // todo: disabled because of performance issues. + // The tracker demo has a lot of torrents and loading them all at once is not + // efficient. We also load them on demand but the total number of downloads + // metric is not accurate because not all torrents are loaded. + // See: https://github.com/torrust/torrust-tracker/issues/1510 + //load_torrents_from_database(config, app_container); } async fn start_jobs(config: &Configuration, app_container: &Arc) -> JobManager { @@ -110,6 +115,7 @@ async fn load_whitelisted_torrents(config: &Configuration, app_container: &Arc) { if config.core.tracker_policy.persistent_torrent_completed_stat { app_container From 243c25484ce796db48d3532eadd6b76c7fd4f3eb Mon Sep 17 00:00:00 2001 From: Jose Celano Date: Tue, 13 May 2025 10:34:36 +0100 Subject: [PATCH 591/802] feat: allow incrementing/decrementing gauge metrics --- packages/metrics/src/gauge.rs | 22 +++++++ packages/metrics/src/metric/mod.rs | 8 +++ packages/metrics/src/metric_collection.rs | 62 ++++++++++++++++++++ packages/metrics/src/sample.rs | 38 +++++++++++- packages/metrics/src/sample_collection.rs | 70 +++++++++++++++++++---- 5 files changed, 187 insertions(+), 13 deletions(-) diff --git a/packages/metrics/src/gauge.rs b/packages/metrics/src/gauge.rs index 61ff3024c..3f6089955 100644 --- a/packages/metrics/src/gauge.rs +++ b/packages/metrics/src/gauge.rs @@ -20,6 +20,14 @@ impl Gauge { pub fn set(&mut self, value: f64) { self.0 = value; } + + pub fn increment(&mut self, value: f64) { + self.0 += value; + } + + pub fn decrement(&mut self, value: f64) { + self.0 -= value; + } } impl From for Gauge { @@ -72,6 +80,20 @@ mod tests { assert_relative_eq!(gauge.value(), 1.0); } + #[test] + fn it_could_be_incremented() { + let mut gauge = Gauge::new(0.0); + gauge.increment(1.0); + assert_relative_eq!(gauge.value(), 1.0); + } + + #[test] + fn it_could_be_decremented() { + let mut gauge = Gauge::new(1.0); + gauge.decrement(1.0); + assert_relative_eq!(gauge.value(), 0.0); + } + #[test] fn it_serializes_to_prometheus() { let counter = Gauge::new(42.0); diff --git a/packages/metrics/src/metric/mod.rs b/packages/metrics/src/metric/mod.rs index ecce90f18..05779f09f 100644 --- a/packages/metrics/src/metric/mod.rs +++ b/packages/metrics/src/metric/mod.rs @@ -61,6 +61,14 @@ impl Metric { pub fn set(&mut self, label_set: &LabelSet, value: f64, time: DurationSinceUnixEpoch) { self.sample_collection.set(label_set, value, time); } + + pub fn increment(&mut self, label_set: &LabelSet, time: DurationSinceUnixEpoch) { + self.sample_collection.increment(label_set, time); + } + + pub fn decrement(&mut self, label_set: &LabelSet, time: DurationSinceUnixEpoch) { + self.sample_collection.decrement(label_set, time); + } } impl PrometheusSerializable for Metric { diff --git a/packages/metrics/src/metric_collection.rs b/packages/metrics/src/metric_collection.rs index 9e89c3c4b..438f3b03a 100644 --- a/packages/metrics/src/metric_collection.rs +++ b/packages/metrics/src/metric_collection.rs @@ -136,6 +136,38 @@ impl MetricCollection { Ok(()) } + /// # Errors + /// + /// Return an error if a metrics of a different type with the same name + /// already exists. + pub fn increase_gauge(&mut self, name: &MetricName, label_set: &LabelSet, time: DurationSinceUnixEpoch) -> Result<(), Error> { + if self.counters.metrics.contains_key(name) { + return Err(Error::MetricNameCollisionAdding { + metric_name: name.clone(), + }); + } + + self.gauges.increment(name, label_set, time); + + Ok(()) + } + + /// # Errors + /// + /// Return an error if a metrics of a different type with the same name + /// already exists. + pub fn decrease_gauge(&mut self, name: &MetricName, label_set: &LabelSet, time: DurationSinceUnixEpoch) -> Result<(), Error> { + if self.counters.metrics.contains_key(name) { + return Err(Error::MetricNameCollisionAdding { + metric_name: name.clone(), + }); + } + + self.gauges.decrement(name, label_set, time); + + Ok(()) + } + pub fn ensure_gauge_exists(&mut self, name: &MetricName) { self.gauges.ensure_metric_exists(name); } @@ -353,6 +385,36 @@ impl MetricKindCollection { metric.set(label_set, value, time); } + /// Increments the gauge for the given metric name and labels. + /// + /// If the metric name does not exist, it will be created. + /// + /// # Panics + /// + /// Panics if the metric does not exist and it could not be created. + pub fn increment(&mut self, name: &MetricName, label_set: &LabelSet, time: DurationSinceUnixEpoch) { + self.ensure_metric_exists(name); + + let metric = self.metrics.get_mut(name).expect("Gauge metric should exist"); + + metric.increment(label_set, time); + } + + /// Decrements the gauge for the given metric name and labels. + /// + /// If the metric name does not exist, it will be created. + /// + /// # Panics + /// + /// Panics if the metric does not exist and it could not be created. + pub fn decrement(&mut self, name: &MetricName, label_set: &LabelSet, time: DurationSinceUnixEpoch) { + self.ensure_metric_exists(name); + + let metric = self.metrics.get_mut(name).expect("Gauge metric should exist"); + + metric.decrement(label_set, time); + } + #[must_use] pub fn get_value(&self, name: &MetricName, label_set: &LabelSet) -> Option { self.metrics diff --git a/packages/metrics/src/sample.rs b/packages/metrics/src/sample.rs index 5567dffec..4621c9906 100644 --- a/packages/metrics/src/sample.rs +++ b/packages/metrics/src/sample.rs @@ -64,6 +64,14 @@ impl Sample { pub fn set(&mut self, value: f64, time: DurationSinceUnixEpoch) { self.measurement.set(value, time); } + + pub fn increment(&mut self, time: DurationSinceUnixEpoch) { + self.measurement.increment(time); + } + + pub fn decrement(&mut self, time: DurationSinceUnixEpoch) { + self.measurement.decrement(time); + } } #[derive(Debug, Clone, PartialEq, Serialize, Deserialize)] @@ -121,6 +129,16 @@ impl Measurement { self.value.set(value); self.set_recorded_at(time); } + + pub fn increment(&mut self, time: DurationSinceUnixEpoch) { + self.value.increment(1.0); + self.set_recorded_at(time); + } + + pub fn decrement(&mut self, time: DurationSinceUnixEpoch) { + self.value.decrement(1.0); + self.set_recorded_at(time); + } } /// Serializes the `recorded_at` field as a string in ISO 8601 format (RFC 3339). @@ -273,7 +291,7 @@ mod tests { } #[test] - fn it_should_allow_incrementing_the_counter() { + fn it_should_allow_setting_a_value() { let mut sample = Sample::new(Gauge::default(), DurationSinceUnixEpoch::default(), LabelSet::default()); sample.set(1.0, updated_at_time()); @@ -281,6 +299,24 @@ mod tests { assert_eq!(sample.value(), &Gauge::new(1.0)); } + #[test] + fn it_should_allow_incrementing_the_value() { + let mut sample = Sample::new(Gauge::new(0.0), DurationSinceUnixEpoch::default(), LabelSet::default()); + + sample.increment(updated_at_time()); + + assert_eq!(sample.value(), &Gauge::new(1.0)); + } + + #[test] + fn it_should_allow_decrementing_the_value() { + let mut sample = Sample::new(Gauge::new(1.0), DurationSinceUnixEpoch::default(), LabelSet::default()); + + sample.decrement(updated_at_time()); + + assert_eq!(sample.value(), &Gauge::new(0.0)); + } + #[test] fn it_should_record_the_latest_update_time_when_the_counter_is_incremented() { let mut sample = Sample::new(Gauge::default(), DurationSinceUnixEpoch::default(), LabelSet::default()); diff --git a/packages/metrics/src/sample_collection.rs b/packages/metrics/src/sample_collection.rs index 49c839673..ea6b4d4af 100644 --- a/packages/metrics/src/sample_collection.rs +++ b/packages/metrics/src/sample_collection.rs @@ -90,6 +90,24 @@ impl SampleCollection { sample.set(value, time); } + + pub fn increment(&mut self, label_set: &LabelSet, time: DurationSinceUnixEpoch) { + let sample = self + .samples + .entry(label_set.clone()) + .or_insert_with(|| Measurement::new(Gauge::default(), time)); + + sample.increment(time); + } + + pub fn decrement(&mut self, label_set: &LabelSet, time: DurationSinceUnixEpoch) { + let sample = self + .samples + .entry(label_set.clone()) + .or_insert_with(|| Measurement::new(Gauge::default(), time)); + + sample.decrement(time); + } } impl Serialize for SampleCollection { @@ -278,7 +296,7 @@ mod tests { #[test] fn it_should_increment_the_counter_for_a_preexisting_label_set() { let label_set = LabelSet::default(); - let mut collection = SampleCollection::default(); + let mut collection = SampleCollection::::default(); // Initialize the sample collection.increment(&label_set, sample_update_time()); @@ -296,7 +314,7 @@ mod tests { #[test] fn it_should_allow_increment_the_counter_for_a_non_existent_label_set() { let label_set = LabelSet::default(); - let mut collection = SampleCollection::default(); + let mut collection = SampleCollection::::default(); // Increment a non-existent label collection.increment(&label_set, sample_update_time()); @@ -312,7 +330,7 @@ mod tests { let label_set = LabelSet::default(); let initial_time = sample_update_time(); - let mut collection = SampleCollection::default(); + let mut collection = SampleCollection::::default(); collection.increment(&label_set, initial_time); // Increment with a new time @@ -330,7 +348,7 @@ mod tests { let label2 = LabelSet::from([("name", "value2")]); let now = sample_update_time(); - let mut collection = SampleCollection::default(); + let mut collection = SampleCollection::::default(); collection.increment(&label1, now); collection.increment(&label2, now); @@ -351,9 +369,9 @@ mod tests { use crate::gauge::Gauge; #[test] - fn it_should_increment_the_gauge_for_a_preexisting_label_set() { + fn it_should_allow_setting_the_gauge_for_a_preexisting_label_set() { let label_set = LabelSet::default(); - let mut collection = SampleCollection::default(); + let mut collection = SampleCollection::::default(); // Initialize the sample collection.set(&label_set, 1.0, sample_update_time()); @@ -369,9 +387,9 @@ mod tests { } #[test] - fn it_should_allow_increment_the_gauge_for_a_non_existent_label_set() { + fn it_should_allow_setting_the_gauge_for_a_non_existent_label_set() { let label_set = LabelSet::default(); - let mut collection = SampleCollection::default(); + let mut collection = SampleCollection::::default(); // Set a non-existent label collection.set(&label_set, 1.0, sample_update_time()); @@ -383,11 +401,11 @@ mod tests { } #[test] - fn it_should_update_the_latest_update_time_when_incremented() { + fn it_should_update_the_latest_update_time_when_setting() { let label_set = LabelSet::default(); let initial_time = sample_update_time(); - let mut collection = SampleCollection::default(); + let mut collection = SampleCollection::::default(); collection.set(&label_set, 1.0, initial_time); // Set with a new time @@ -400,12 +418,12 @@ mod tests { } #[test] - fn it_should_increment_the_gauge_for_multiple_labels() { + fn it_should_allow_setting_the_gauge_for_multiple_labels() { let label1 = LabelSet::from([("name", "value1")]); let label2 = LabelSet::from([("name", "value2")]); let now = sample_update_time(); - let mut collection = SampleCollection::default(); + let mut collection = SampleCollection::::default(); collection.set(&label1, 1.0, now); collection.set(&label2, 2.0, now); @@ -414,5 +432,33 @@ mod tests { assert_eq!(collection.get(&label2).unwrap().value(), &Gauge::new(2.0)); assert_eq!(collection.len(), 2); } + + #[test] + fn it_should_allow_incrementing_the_gauge() { + let label_set = LabelSet::default(); + let mut collection = SampleCollection::::default(); + + // Initialize the sample + collection.set(&label_set, 1.0, sample_update_time()); + + // Increment + collection.increment(&label_set, sample_update_time()); + let sample = collection.get(&label_set).unwrap(); + assert_eq!(*sample.value(), Gauge::new(2.0)); + } + + #[test] + fn it_should_allow_decrementing_the_gauge() { + let label_set = LabelSet::default(); + let mut collection = SampleCollection::::default(); + + // Initialize the sample + collection.set(&label_set, 1.0, sample_update_time()); + + // Increment + collection.decrement(&label_set, sample_update_time()); + let sample = collection.get(&label_set).unwrap(); + assert_eq!(*sample.value(), Gauge::new(0.0)); + } } } From 2522ad4ccff7bbc0010581d10bdc2f32abc90555 Mon Sep 17 00:00:00 2001 From: Jose Celano Date: Fri, 9 May 2025 16:40:14 +0100 Subject: [PATCH 592/802] feat: [#1358] basic scaffolding for events in torrent-repository pkg TODO: - Run the event listener for the torrent-repository package when the tracker starts. - Inject enven sender in `Swarms` and `Swarm` type to send events. - Trigger events and process them to update the metrics. - Expose the metrics via the `metrics` API endpoint. - ... --- Cargo.lock | 3 + packages/torrent-repository/Cargo.toml | 3 + packages/torrent-repository/src/event.rs | 44 ++++++++++++++ packages/torrent-repository/src/lib.rs | 4 ++ .../src/statistics/event/handler.rs | 21 +++++++ .../src/statistics/event/listener.rs | 57 +++++++++++++++++++ .../src/statistics/event/mod.rs | 2 + .../src/statistics/metrics.rs | 39 +++++++++++++ .../torrent-repository/src/statistics/mod.rs | 34 +++++++++++ .../src/statistics/repository.rs | 54 ++++++++++++++++++ 10 files changed, 261 insertions(+) create mode 100644 packages/torrent-repository/src/event.rs create mode 100644 packages/torrent-repository/src/statistics/event/handler.rs create mode 100644 packages/torrent-repository/src/statistics/event/listener.rs create mode 100644 packages/torrent-repository/src/statistics/event/mod.rs create mode 100644 packages/torrent-repository/src/statistics/metrics.rs create mode 100644 packages/torrent-repository/src/statistics/mod.rs create mode 100644 packages/torrent-repository/src/statistics/repository.rs diff --git a/Cargo.lock b/Cargo.lock index 04ce8ad8c..90a6354bc 100644 --- a/Cargo.lock +++ b/Cargo.lock @@ -4853,10 +4853,13 @@ dependencies = [ "crossbeam-skiplist", "rand 0.9.1", "rstest", + "serde", "thiserror 2.0.12", "tokio", "torrust-tracker-clock", "torrust-tracker-configuration", + "torrust-tracker-events", + "torrust-tracker-metrics", "torrust-tracker-primitives", "torrust-tracker-test-helpers", "tracing", diff --git a/packages/torrent-repository/Cargo.toml b/packages/torrent-repository/Cargo.toml index 3396cd961..77192c7cf 100644 --- a/packages/torrent-repository/Cargo.toml +++ b/packages/torrent-repository/Cargo.toml @@ -19,10 +19,13 @@ version.workspace = true aquatic_udp_protocol = "0" bittorrent-primitives = "0.1.0" crossbeam-skiplist = "0" +serde = "1.0.219" thiserror = "2.0.12" tokio = { version = "1", features = ["macros", "net", "rt-multi-thread", "signal", "sync"] } torrust-tracker-clock = { version = "3.0.0-develop", path = "../clock" } torrust-tracker-configuration = { version = "3.0.0-develop", path = "../configuration" } +torrust-tracker-events = { version = "3.0.0-develop", path = "../events" } +torrust-tracker-metrics = { version = "3.0.0-develop", path = "../metrics" } torrust-tracker-primitives = { version = "3.0.0-develop", path = "../primitives" } tracing = "0" diff --git a/packages/torrent-repository/src/event.rs b/packages/torrent-repository/src/event.rs new file mode 100644 index 000000000..57fe7bc4b --- /dev/null +++ b/packages/torrent-repository/src/event.rs @@ -0,0 +1,44 @@ +use std::net::SocketAddr; + +use aquatic_udp_protocol::PeerId; +use bittorrent_primitives::info_hash::InfoHash; +use torrust_tracker_primitives::peer::PeerAnnouncement; + +#[derive(Debug, PartialEq, Eq, Clone)] +pub enum Event { + TorrentAdded { + info_hash: InfoHash, + announcement: PeerAnnouncement, + }, + TorrentRemoved { + info_hash: InfoHash, + }, + PeerAdded { + announcement: PeerAnnouncement, + }, + PeerRemoved { + socket_addr: SocketAddr, + peer_id: PeerId, + }, +} + +pub mod sender { + use std::sync::Arc; + + use super::Event; + + pub type Sender = Option>>; + pub type Broadcaster = torrust_tracker_events::broadcaster::Broadcaster; +} + +pub mod receiver { + use super::Event; + + pub type Receiver = Box>; +} + +pub mod bus { + use crate::event::Event; + + pub type EventBus = torrust_tracker_events::bus::EventBus; +} diff --git a/packages/torrent-repository/src/lib.rs b/packages/torrent-repository/src/lib.rs index a4e7d9c5d..0d455177c 100644 --- a/packages/torrent-repository/src/lib.rs +++ b/packages/torrent-repository/src/lib.rs @@ -1,3 +1,5 @@ +pub mod event; +pub mod statistics; pub mod swarm; pub mod swarms; @@ -19,6 +21,8 @@ pub(crate) type CurrentClock = clock::Working; #[allow(dead_code)] pub(crate) type CurrentClock = clock::Stopped; +pub const TORRENT_REPOSITORY_LOG_TARGET: &str = "TORRENT_REPOSITORY"; + pub trait LockTrackedTorrent { fn lock_or_panic(&self) -> MutexGuard<'_, Swarm>; } diff --git a/packages/torrent-repository/src/statistics/event/handler.rs b/packages/torrent-repository/src/statistics/event/handler.rs new file mode 100644 index 000000000..d68df0b1b --- /dev/null +++ b/packages/torrent-repository/src/statistics/event/handler.rs @@ -0,0 +1,21 @@ +use std::sync::Arc; + +use torrust_tracker_primitives::DurationSinceUnixEpoch; + +use crate::event::Event; +use crate::statistics::repository::Repository; + +/// # Panics +/// +/// This function panics if the client IP address is not the same as the IP +/// version of the event. +pub async fn handle_event(_event: Event, stats_repository: &Arc, _now: DurationSinceUnixEpoch) { + /*match event { + Event::TorrentAdded { .. } => {} + Event::TorrentRemoved { .. } => {} + Event::PeerAdded { .. } => {} + Event::PeerRemoved { .. } => {} + }*/ + + tracing::debug!("metrics: {:?}", stats_repository.get_metrics().await); +} diff --git a/packages/torrent-repository/src/statistics/event/listener.rs b/packages/torrent-repository/src/statistics/event/listener.rs new file mode 100644 index 000000000..f3b534332 --- /dev/null +++ b/packages/torrent-repository/src/statistics/event/listener.rs @@ -0,0 +1,57 @@ +use std::sync::Arc; + +use tokio::task::JoinHandle; +use torrust_tracker_clock::clock::Time; +use torrust_tracker_events::receiver::RecvError; + +use super::handler::handle_event; +use crate::event::receiver::Receiver; +use crate::statistics::repository::Repository; +use crate::{CurrentClock, TORRENT_REPOSITORY_LOG_TARGET}; + +#[must_use] +pub fn run_event_listener(receiver: Receiver, repository: &Arc) -> JoinHandle<()> { + let stats_repository = repository.clone(); + + tracing::info!(target: TORRENT_REPOSITORY_LOG_TARGET, "Starting torrent repository event listener"); + + tokio::spawn(async move { + dispatch_events(receiver, stats_repository).await; + + tracing::info!(target: TORRENT_REPOSITORY_LOG_TARGET, "Torrent repository listener finished"); + }) +} + +async fn dispatch_events(mut receiver: Receiver, stats_repository: Arc) { + let shutdown_signal = tokio::signal::ctrl_c(); + + tokio::pin!(shutdown_signal); + + loop { + tokio::select! { + biased; + + _ = &mut shutdown_signal => { + tracing::info!(target: TORRENT_REPOSITORY_LOG_TARGET, "Received Ctrl+C, shutting down torrent repository event listener."); + break; + } + + result = receiver.recv() => { + match result { + Ok(event) => handle_event(event, &stats_repository, CurrentClock::now()).await, + Err(e) => { + match e { + RecvError::Closed => { + tracing::info!(target: TORRENT_REPOSITORY_LOG_TARGET, "Torrent repository event receiver closed."); + break; + } + RecvError::Lagged(n) => { + tracing::warn!(target: TORRENT_REPOSITORY_LOG_TARGET, "Torrent repository event receiver lagged by {} events.", n); + } + } + } + } + } + } + } +} diff --git a/packages/torrent-repository/src/statistics/event/mod.rs b/packages/torrent-repository/src/statistics/event/mod.rs new file mode 100644 index 000000000..dae683398 --- /dev/null +++ b/packages/torrent-repository/src/statistics/event/mod.rs @@ -0,0 +1,2 @@ +pub mod handler; +pub mod listener; diff --git a/packages/torrent-repository/src/statistics/metrics.rs b/packages/torrent-repository/src/statistics/metrics.rs new file mode 100644 index 000000000..6ee275e63 --- /dev/null +++ b/packages/torrent-repository/src/statistics/metrics.rs @@ -0,0 +1,39 @@ +use serde::Serialize; +use torrust_tracker_metrics::label::LabelSet; +use torrust_tracker_metrics::metric::MetricName; +use torrust_tracker_metrics::metric_collection::{Error, MetricCollection}; +use torrust_tracker_primitives::DurationSinceUnixEpoch; + +/// Metrics collected by the torrent repository. +#[derive(Debug, Clone, PartialEq, Default, Serialize)] +pub struct Metrics { + /// A collection of metrics. + pub metric_collection: MetricCollection, +} + +impl Metrics { + /// # Errors + /// + /// Returns an error if the metric does not exist and it cannot be created. + pub fn increase_counter( + &mut self, + metric_name: &MetricName, + labels: &LabelSet, + now: DurationSinceUnixEpoch, + ) -> Result<(), Error> { + self.metric_collection.increase_counter(metric_name, labels, now) + } + + /// # Errors + /// + /// Returns an error if the metric does not exist and it cannot be created. + pub fn set_gauge( + &mut self, + metric_name: &MetricName, + labels: &LabelSet, + value: f64, + now: DurationSinceUnixEpoch, + ) -> Result<(), Error> { + self.metric_collection.set_gauge(metric_name, labels, value, now) + } +} diff --git a/packages/torrent-repository/src/statistics/mod.rs b/packages/torrent-repository/src/statistics/mod.rs new file mode 100644 index 000000000..b0dce479f --- /dev/null +++ b/packages/torrent-repository/src/statistics/mod.rs @@ -0,0 +1,34 @@ +pub mod event; +pub mod metrics; +pub mod repository; + +use metrics::Metrics; +use torrust_tracker_metrics::metric::description::MetricDescription; +use torrust_tracker_metrics::metric_name; +use torrust_tracker_metrics::unit::Unit; + +const TORRENT_REPOSITORY_RUNTIME_TORRENTS_DOWNLOADS_TOTAL: &str = "torrent_repository_runtime_torrents_downloads_total"; +const TORRENT_REPOSITORY_PERSISTENT_TORRENTS_DOWNLOADS_TOTAL: &str = "torrent_repository_persistent_torrents_downloads_total"; + +#[must_use] +pub fn describe_metrics() -> Metrics { + let mut metrics = Metrics::default(); + + metrics.metric_collection.describe_counter( + &metric_name!(TORRENT_REPOSITORY_RUNTIME_TORRENTS_DOWNLOADS_TOTAL), + Some(Unit::Count), + Some(&MetricDescription::new( + "The total number of torrent downloads since the tracker process started.", + )), + ); + + metrics.metric_collection.describe_counter( + &metric_name!(TORRENT_REPOSITORY_PERSISTENT_TORRENTS_DOWNLOADS_TOTAL), + Some(Unit::Count), + Some(&MetricDescription::new( + "The total number of torrent downloads since persistent statistics were enabled the first time.", + )), + ); + + metrics +} diff --git a/packages/torrent-repository/src/statistics/repository.rs b/packages/torrent-repository/src/statistics/repository.rs new file mode 100644 index 000000000..9fdff7008 --- /dev/null +++ b/packages/torrent-repository/src/statistics/repository.rs @@ -0,0 +1,54 @@ +use std::sync::Arc; + +use tokio::sync::{RwLock, RwLockReadGuard}; +use torrust_tracker_metrics::label::LabelSet; +use torrust_tracker_metrics::metric::MetricName; +use torrust_tracker_metrics::metric_collection::Error; +use torrust_tracker_primitives::DurationSinceUnixEpoch; + +use super::describe_metrics; +use super::metrics::Metrics; + +/// A repository for the torrent repository metrics. +#[derive(Clone)] +pub struct Repository { + pub stats: Arc>, +} + +impl Default for Repository { + fn default() -> Self { + Self::new() + } +} + +impl Repository { + #[must_use] + pub fn new() -> Self { + let stats = Arc::new(RwLock::new(describe_metrics())); + + Self { stats } + } + + pub async fn get_metrics(&self) -> RwLockReadGuard<'_, Metrics> { + self.stats.read().await + } + + /// # Errors + /// + /// This function will return an error if the metric collection fails to + /// increase the counter. + pub async fn increase_counter( + &self, + metric_name: &MetricName, + labels: &LabelSet, + now: DurationSinceUnixEpoch, + ) -> Result<(), Error> { + let mut stats_lock = self.stats.write().await; + + let result = stats_lock.increase_counter(metric_name, labels, now); + + drop(stats_lock); + + result + } +} From f986bdaf2396dc7a921a86ed168d3bd684c64931 Mon Sep 17 00:00:00 2001 From: Jose Celano Date: Fri, 9 May 2025 17:18:55 +0100 Subject: [PATCH 593/802] feat: [#1358] add the and run the event listener when the tracker starts This creates independent services that are not used yet in the tracker-core, meaning the `Swarms` object created in the `TorrentRepositoryContainer` will not store any torrent yet. The tracker core is still creating its own fresh instance. --- Cargo.lock | 1 + Cargo.toml | 1 + packages/torrent-repository/src/container.rs | 37 ++++++++++++++++++++ packages/torrent-repository/src/lib.rs | 1 + src/app.rs | 14 ++++++++ src/bootstrap/jobs/mod.rs | 1 + src/bootstrap/jobs/torrent_repository.rs | 20 +++++++++++ src/container.rs | 11 ++++++ 8 files changed, 86 insertions(+) create mode 100644 packages/torrent-repository/src/container.rs create mode 100644 src/bootstrap/jobs/torrent_repository.rs diff --git a/Cargo.lock b/Cargo.lock index 90a6354bc..5f024dcc2 100644 --- a/Cargo.lock +++ b/Cargo.lock @@ -4713,6 +4713,7 @@ dependencies = [ "torrust-tracker-clock", "torrust-tracker-configuration", "torrust-tracker-test-helpers", + "torrust-tracker-torrent-repository", "torrust-udp-tracker-server", "tracing", "tracing-subscriber", diff --git a/Cargo.toml b/Cargo.toml index a15ff78df..219701d03 100644 --- a/Cargo.toml +++ b/Cargo.toml @@ -55,6 +55,7 @@ torrust-rest-tracker-api-core = { version = "3.0.0-develop", path = "packages/re torrust-server-lib = { version = "3.0.0-develop", path = "packages/server-lib" } torrust-tracker-clock = { version = "3.0.0-develop", path = "packages/clock" } torrust-tracker-configuration = { version = "3.0.0-develop", path = "packages/configuration" } +torrust-tracker-torrent-repository = { version = "3.0.0-develop", path = "packages/torrent-repository" } torrust-udp-tracker-server = { version = "3.0.0-develop", path = "packages/udp-tracker-server" } tracing = "0" tracing-subscriber = { version = "0", features = ["json"] } diff --git a/packages/torrent-repository/src/container.rs b/packages/torrent-repository/src/container.rs new file mode 100644 index 000000000..7522c7956 --- /dev/null +++ b/packages/torrent-repository/src/container.rs @@ -0,0 +1,37 @@ +use std::sync::Arc; + +use crate::event::bus::EventBus; +use crate::event::sender::Broadcaster; +use crate::event::{self}; +use crate::statistics::repository::Repository; +use crate::{statistics, Swarms}; + +pub struct TorrentRepositoryContainer { + pub swarms: Arc, + pub event_bus: Arc, + pub stats_event_sender: event::sender::Sender, + pub stats_repository: Arc, +} + +impl TorrentRepositoryContainer { + #[must_use] + pub fn initialize() -> Self { + let swarms = Arc::new(Swarms::default()); + + // Torrent repository stats + let broadcaster = Broadcaster::default(); + let stats_repository = Arc::new(Repository::new()); + + // todo: add a config option to enable/disable stats for this package + let event_bus = Arc::new(EventBus::new(true, broadcaster.clone())); + + let stats_event_sender = event_bus.sender(); + + Self { + swarms, + event_bus, + stats_event_sender, + stats_repository, + } + } +} diff --git a/packages/torrent-repository/src/lib.rs b/packages/torrent-repository/src/lib.rs index 0d455177c..c6790c4db 100644 --- a/packages/torrent-repository/src/lib.rs +++ b/packages/torrent-repository/src/lib.rs @@ -1,3 +1,4 @@ +pub mod container; pub mod event; pub mod statistics; pub mod swarm; diff --git a/src/app.rs b/src/app.rs index 93035ee99..ca8b7a5c3 100644 --- a/src/app.rs +++ b/src/app.rs @@ -72,9 +72,11 @@ async fn load_data_from_database(config: &Configuration, app_container: &Arc) -> JobManager { let mut job_manager = JobManager::new(); + start_torrent_repository_event_listener(config, app_container, &mut job_manager); start_http_core_event_listener(config, app_container, &mut job_manager); start_udp_core_event_listener(config, app_container, &mut job_manager); start_udp_server_event_listener(config, app_container, &mut job_manager); + start_the_udp_instances(config, app_container, &mut job_manager).await; start_the_http_instances(config, app_container, &mut job_manager).await; start_the_http_api(config, app_container, &mut job_manager).await; @@ -126,6 +128,18 @@ fn load_torrents_from_database(config: &Configuration, app_container: &Arc, + job_manager: &mut JobManager, +) { + let opt_handle = jobs::torrent_repository::start_event_listener(config, app_container); + + if let Some(handle) = opt_handle { + job_manager.push("torrent_repository_event_listener", handle); + } +} + fn start_http_core_event_listener(config: &Configuration, app_container: &Arc, job_manager: &mut JobManager) { let opt_handle = jobs::http_tracker_core::start_event_listener(config, app_container); diff --git a/src/bootstrap/jobs/mod.rs b/src/bootstrap/jobs/mod.rs index 2e3d798ad..b311c6da6 100644 --- a/src/bootstrap/jobs/mod.rs +++ b/src/bootstrap/jobs/mod.rs @@ -11,6 +11,7 @@ pub mod http_tracker; pub mod http_tracker_core; pub mod manager; pub mod torrent_cleanup; +pub mod torrent_repository; pub mod tracker_apis; pub mod udp_tracker; pub mod udp_tracker_core; diff --git a/src/bootstrap/jobs/torrent_repository.rs b/src/bootstrap/jobs/torrent_repository.rs new file mode 100644 index 000000000..2125de554 --- /dev/null +++ b/src/bootstrap/jobs/torrent_repository.rs @@ -0,0 +1,20 @@ +use std::sync::Arc; + +use tokio::task::JoinHandle; +use torrust_tracker_configuration::Configuration; + +use crate::container::AppContainer; + +pub fn start_event_listener(config: &Configuration, app_container: &Arc) -> Option> { + if config.core.tracker_usage_statistics { + let job = torrust_tracker_torrent_repository::statistics::event::listener::run_event_listener( + app_container.torrent_repository_container.event_bus.receiver(), + &app_container.torrent_repository_container.stats_repository, + ); + + Some(job) + } else { + tracing::info!("HTTP tracker core event listener job is disabled."); + None + } +} diff --git a/src/container.rs b/src/container.rs index 93f1fb4d7..016b4a881 100644 --- a/src/container.rs +++ b/src/container.rs @@ -9,6 +9,7 @@ use bittorrent_udp_tracker_core::{self}; use torrust_rest_tracker_api_core::container::TrackerHttpApiCoreContainer; use torrust_server_lib::registar::Registar; use torrust_tracker_configuration::{Configuration, HttpApi}; +use torrust_tracker_torrent_repository::container::TorrentRepositoryContainer; use torrust_udp_tracker_server::container::UdpTrackerServerContainer; use tracing::instrument; @@ -28,6 +29,9 @@ pub struct AppContainer { // Registar pub registar: Arc, + // Torrent Repository + pub torrent_repository_container: Arc, + // Core pub tracker_core_container: Arc, @@ -54,6 +58,10 @@ impl AppContainer { let registar = Arc::new(Registar::default()); + // Torrent Repository + + let torrent_repository_container = Arc::new(TorrentRepositoryContainer::initialize()); + // Core let tracker_core_container = Arc::new(TrackerCoreContainer::initialize(&core_config)); @@ -84,6 +92,9 @@ impl AppContainer { // Registar registar, + // Torrent Repository + torrent_repository_container, + // Core tracker_core_container, From 95766bb9897a8ecec544bf6d38bcfd532c1d0ea9 Mon Sep 17 00:00:00 2001 From: Jose Celano Date: Fri, 9 May 2025 17:44:26 +0100 Subject: [PATCH 594/802] feat: [#1358] inject Swarms into InMemoryTorrentRepository in production code todo: do the same for testing code. --- packages/torrent-repository/src/swarms.rs | 4 +++- packages/tracker-core/src/container.rs | 15 ++++++++++++++- .../src/torrent/repository/in_memory.rs | 5 +++++ src/container.rs | 5 ++++- 4 files changed, 26 insertions(+), 3 deletions(-) diff --git a/packages/torrent-repository/src/swarms.rs b/packages/torrent-repository/src/swarms.rs index a140663c9..9dddaa0c0 100644 --- a/packages/torrent-repository/src/swarms.rs +++ b/packages/torrent-repository/src/swarms.rs @@ -9,7 +9,7 @@ use torrust_tracker_primitives::swarm_metadata::{AggregateSwarmMetadata, SwarmMe use torrust_tracker_primitives::{peer, DurationSinceUnixEpoch, PersistentTorrent, PersistentTorrents}; use crate::swarm::Swarm; -use crate::SwarmHandle; +use crate::{SwarmHandle, TORRENT_REPOSITORY_LOG_TARGET}; #[derive(Default, Debug)] pub struct Swarms { @@ -43,6 +43,8 @@ impl Swarms { peer: &peer::Peer, opt_persistent_torrent: Option, ) -> Result { + tracing::trace!(target: TORRENT_REPOSITORY_LOG_TARGET, "Handling announcement for torrent: {info_hash}"); + let swarm_handle = if let Some(number_of_downloads) = opt_persistent_torrent { SwarmHandle::new(Swarm::new(number_of_downloads).into()) } else { diff --git a/packages/tracker-core/src/container.rs b/packages/tracker-core/src/container.rs index 9f4d23802..3f35c3943 100644 --- a/packages/tracker-core/src/container.rs +++ b/packages/tracker-core/src/container.rs @@ -1,6 +1,8 @@ use std::sync::Arc; use torrust_tracker_configuration::Core; +use torrust_tracker_torrent_repository::container::TorrentRepositoryContainer; +use torrust_tracker_torrent_repository::Swarms; use crate::announce_handler::AnnounceHandler; use crate::authentication::handler::KeysHandler; @@ -35,8 +37,19 @@ pub struct TrackerCoreContainer { } impl TrackerCoreContainer { + #[must_use] + pub fn initialize_from(core_config: &Arc, torrent_repository_container: &Arc) -> Self { + Self::inner_initialize(core_config, &torrent_repository_container.swarms) + } + #[must_use] pub fn initialize(core_config: &Arc) -> Self { + let swarms = Arc::new(Swarms::default()); + Self::inner_initialize(core_config, &swarms) + } + + #[must_use] + fn inner_initialize(core_config: &Arc, swarms: &Arc) -> Self { let database = initialize_database(core_config); let in_memory_whitelist = Arc::new(InMemoryWhitelist::default()); let whitelist_authorization = Arc::new(WhitelistAuthorization::new(core_config, &in_memory_whitelist.clone())); @@ -48,7 +61,7 @@ impl TrackerCoreContainer { &db_key_repository.clone(), &in_memory_key_repository.clone(), )); - let in_memory_torrent_repository = Arc::new(InMemoryTorrentRepository::default()); + let in_memory_torrent_repository = Arc::new(InMemoryTorrentRepository::new(swarms.clone())); let db_torrent_repository = Arc::new(DatabasePersistentTorrentRepository::new(&database)); let torrents_manager = Arc::new(TorrentsManager::new( diff --git a/packages/tracker-core/src/torrent/repository/in_memory.rs b/packages/tracker-core/src/torrent/repository/in_memory.rs index ffb53edad..c8e593471 100644 --- a/packages/tracker-core/src/torrent/repository/in_memory.rs +++ b/packages/tracker-core/src/torrent/repository/in_memory.rs @@ -25,6 +25,11 @@ pub struct InMemoryTorrentRepository { } impl InMemoryTorrentRepository { + #[must_use] + pub fn new(swarms: Arc) -> Self { + Self { swarms } + } + /// Inserts or updates a peer in the torrent entry corresponding to the /// given infohash. /// diff --git a/src/container.rs b/src/container.rs index 016b4a881..838de58d6 100644 --- a/src/container.rs +++ b/src/container.rs @@ -64,7 +64,10 @@ impl AppContainer { // Core - let tracker_core_container = Arc::new(TrackerCoreContainer::initialize(&core_config)); + let tracker_core_container = Arc::new(TrackerCoreContainer::initialize_from( + &core_config, + &torrent_repository_container, + )); // HTTP From 41f402292a8b4661cf3d9ca0032d9f506ba0ea43 Mon Sep 17 00:00:00 2001 From: Jose Celano Date: Mon, 12 May 2025 11:10:13 +0100 Subject: [PATCH 595/802] feat: [#1358] inject Swarms into InMemoryTorrentRepository in testing code --- Cargo.lock | 6 ++++++ packages/axum-http-tracker-server/Cargo.toml | 1 + .../axum-http-tracker-server/src/environment.rs | 9 ++++++++- packages/axum-http-tracker-server/src/server.rs | 8 +++++++- packages/axum-rest-tracker-api-server/Cargo.toml | 1 + .../src/environment.rs | 11 ++++++++++- packages/http-tracker-core/Cargo.toml | 1 + packages/http-tracker-core/src/container.rs | 10 +++++++++- packages/rest-tracker-api-core/Cargo.toml | 1 + packages/rest-tracker-api-core/src/container.rs | 11 ++++++++++- packages/tracker-core/src/container.rs | 14 +------------- packages/udp-tracker-core/Cargo.toml | 1 + packages/udp-tracker-core/src/container.rs | 10 +++++++++- packages/udp-tracker-server/Cargo.toml | 1 + packages/udp-tracker-server/src/environment.rs | 10 +++++++++- 15 files changed, 75 insertions(+), 20 deletions(-) diff --git a/Cargo.lock b/Cargo.lock index 5f024dcc2..b39355065 100644 --- a/Cargo.lock +++ b/Cargo.lock @@ -593,6 +593,7 @@ dependencies = [ "torrust-tracker-metrics", "torrust-tracker-primitives", "torrust-tracker-test-helpers", + "torrust-tracker-torrent-repository", "tracing", ] @@ -708,6 +709,7 @@ dependencies = [ "torrust-tracker-metrics", "torrust-tracker-primitives", "torrust-tracker-test-helpers", + "torrust-tracker-torrent-repository", "tracing", "zerocopy 0.7.35", ] @@ -4575,6 +4577,7 @@ dependencies = [ "torrust-tracker-events", "torrust-tracker-primitives", "torrust-tracker-test-helpers", + "torrust-tracker-torrent-repository", "tower", "tower-http", "tracing", @@ -4614,6 +4617,7 @@ dependencies = [ "torrust-tracker-metrics", "torrust-tracker-primitives", "torrust-tracker-test-helpers", + "torrust-tracker-torrent-repository", "torrust-udp-tracker-server", "tower", "tower-http", @@ -4666,6 +4670,7 @@ dependencies = [ "torrust-tracker-metrics", "torrust-tracker-primitives", "torrust-tracker-test-helpers", + "torrust-tracker-torrent-repository", "torrust-udp-tracker-server", ] @@ -4913,6 +4918,7 @@ dependencies = [ "torrust-tracker-metrics", "torrust-tracker-primitives", "torrust-tracker-test-helpers", + "torrust-tracker-torrent-repository", "tracing", "url", "uuid", diff --git a/packages/axum-http-tracker-server/Cargo.toml b/packages/axum-http-tracker-server/Cargo.toml index 1b4627d41..81831a614 100644 --- a/packages/axum-http-tracker-server/Cargo.toml +++ b/packages/axum-http-tracker-server/Cargo.toml @@ -33,6 +33,7 @@ torrust-server-lib = { version = "3.0.0-develop", path = "../server-lib" } torrust-tracker-clock = { version = "3.0.0-develop", path = "../clock" } torrust-tracker-configuration = { version = "3.0.0-develop", path = "../configuration" } torrust-tracker-primitives = { version = "3.0.0-develop", path = "../primitives" } +torrust-tracker-torrent-repository = { version = "3.0.0-develop", path = "../torrent-repository" } tower = { version = "0", features = ["timeout"] } tower-http = { version = "0", features = ["compression-full", "cors", "propagate-header", "request-id", "trace"] } tracing = "0" diff --git a/packages/axum-http-tracker-server/src/environment.rs b/packages/axum-http-tracker-server/src/environment.rs index aeb53a710..b9ac6bdbb 100644 --- a/packages/axum-http-tracker-server/src/environment.rs +++ b/packages/axum-http-tracker-server/src/environment.rs @@ -10,6 +10,7 @@ use torrust_axum_server::tsl::make_rust_tls; use torrust_server_lib::registar::Registar; use torrust_tracker_configuration::{logging, Configuration}; use torrust_tracker_primitives::peer; +use torrust_tracker_torrent_repository::container::TorrentRepositoryContainer; use crate::server::{HttpServer, Launcher, Running, Stopped}; @@ -143,7 +144,13 @@ impl EnvContainer { .expect("missing HTTP tracker configuration"); let http_tracker_config = Arc::new(http_tracker_config[0].clone()); - let tracker_core_container = Arc::new(TrackerCoreContainer::initialize(&core_config)); + let torrent_repository_container = Arc::new(TorrentRepositoryContainer::initialize()); + + let tracker_core_container = Arc::new(TrackerCoreContainer::initialize_from( + &core_config, + &torrent_repository_container, + )); + let http_tracker_container = HttpTrackerCoreContainer::initialize_from_tracker_core(&tracker_core_container, &http_tracker_config); diff --git a/packages/axum-http-tracker-server/src/server.rs b/packages/axum-http-tracker-server/src/server.rs index ff1650b9c..3904449fa 100644 --- a/packages/axum-http-tracker-server/src/server.rs +++ b/packages/axum-http-tracker-server/src/server.rs @@ -260,6 +260,7 @@ mod tests { use torrust_server_lib::registar::Registar; use torrust_tracker_configuration::{logging, Configuration}; use torrust_tracker_test_helpers::configuration::ephemeral_public; + use torrust_tracker_torrent_repository::container::TorrentRepositoryContainer; use crate::server::{HttpServer, Launcher}; @@ -289,7 +290,12 @@ mod tests { let _unused = run_event_listener(http_stats_event_bus.receiver(), &http_stats_repository); } - let tracker_core_container = Arc::new(TrackerCoreContainer::initialize(&core_config)); + let torrent_repository_container = Arc::new(TorrentRepositoryContainer::initialize()); + + let tracker_core_container = Arc::new(TrackerCoreContainer::initialize_from( + &core_config, + &torrent_repository_container, + )); let announce_service = Arc::new(AnnounceService::new( tracker_core_container.core_config.clone(), diff --git a/packages/axum-rest-tracker-api-server/Cargo.toml b/packages/axum-rest-tracker-api-server/Cargo.toml index d1491c96e..296f77d61 100644 --- a/packages/axum-rest-tracker-api-server/Cargo.toml +++ b/packages/axum-rest-tracker-api-server/Cargo.toml @@ -39,6 +39,7 @@ torrust-tracker-clock = { version = "3.0.0-develop", path = "../clock" } torrust-tracker-configuration = { version = "3.0.0-develop", path = "../configuration" } torrust-tracker-metrics = { version = "3.0.0-develop", path = "../metrics" } torrust-tracker-primitives = { version = "3.0.0-develop", path = "../primitives" } +torrust-tracker-torrent-repository = { version = "3.0.0-develop", path = "../torrent-repository" } torrust-udp-tracker-server = { version = "3.0.0-develop", path = "../udp-tracker-server" } tower = { version = "0", features = ["timeout"] } tower-http = { version = "0", features = ["compression-full", "cors", "propagate-header", "request-id", "trace"] } diff --git a/packages/axum-rest-tracker-api-server/src/environment.rs b/packages/axum-rest-tracker-api-server/src/environment.rs index 275d72574..0758b38d1 100644 --- a/packages/axum-rest-tracker-api-server/src/environment.rs +++ b/packages/axum-rest-tracker-api-server/src/environment.rs @@ -12,6 +12,7 @@ use torrust_rest_tracker_api_core::container::TrackerHttpApiCoreContainer; use torrust_server_lib::registar::Registar; use torrust_tracker_configuration::{logging, Configuration}; use torrust_tracker_primitives::peer; +use torrust_tracker_torrent_repository::container::TorrentRepositoryContainer; use torrust_udp_tracker_server::container::UdpTrackerServerContainer; use crate::server::{ApiServer, Launcher, Running, Stopped}; @@ -172,11 +173,19 @@ impl EnvContainer { .clone(), ); - let tracker_core_container = Arc::new(TrackerCoreContainer::initialize(&core_config)); + let torrent_repository_container = Arc::new(TorrentRepositoryContainer::initialize()); + + let tracker_core_container = Arc::new(TrackerCoreContainer::initialize_from( + &core_config, + &torrent_repository_container, + )); + let http_tracker_core_container = HttpTrackerCoreContainer::initialize_from_tracker_core(&tracker_core_container, &http_tracker_config); + let udp_tracker_core_container = UdpTrackerCoreContainer::initialize_from_tracker_core(&tracker_core_container, &udp_tracker_config); + let udp_tracker_server_container = UdpTrackerServerContainer::initialize(&core_config); let tracker_http_api_core_container = TrackerHttpApiCoreContainer::initialize_from( diff --git a/packages/http-tracker-core/Cargo.toml b/packages/http-tracker-core/Cargo.toml index 5473c5a25..37b540e39 100644 --- a/packages/http-tracker-core/Cargo.toml +++ b/packages/http-tracker-core/Cargo.toml @@ -28,6 +28,7 @@ torrust-tracker-configuration = { version = "3.0.0-develop", path = "../configur torrust-tracker-events = { version = "3.0.0-develop", path = "../events" } torrust-tracker-metrics = { version = "3.0.0-develop", path = "../metrics" } torrust-tracker-primitives = { version = "3.0.0-develop", path = "../primitives" } +torrust-tracker-torrent-repository = { version = "3.0.0-develop", path = "../torrent-repository" } tracing = "0" [dev-dependencies] diff --git a/packages/http-tracker-core/src/container.rs b/packages/http-tracker-core/src/container.rs index 681d4a4f4..922273610 100644 --- a/packages/http-tracker-core/src/container.rs +++ b/packages/http-tracker-core/src/container.rs @@ -2,6 +2,7 @@ use std::sync::Arc; use bittorrent_tracker_core::container::TrackerCoreContainer; use torrust_tracker_configuration::{Core, HttpTracker}; +use torrust_tracker_torrent_repository::container::TorrentRepositoryContainer; use crate::event::bus::EventBus; use crate::event::sender::Broadcaster; @@ -26,7 +27,13 @@ pub struct HttpTrackerCoreContainer { impl HttpTrackerCoreContainer { #[must_use] pub fn initialize(core_config: &Arc, http_tracker_config: &Arc) -> Arc { - let tracker_core_container = Arc::new(TrackerCoreContainer::initialize(core_config)); + let torrent_repository_container = Arc::new(TorrentRepositoryContainer::initialize()); + + let tracker_core_container = Arc::new(TrackerCoreContainer::initialize_from( + core_config, + &torrent_repository_container, + )); + Self::initialize_from_tracker_core(&tracker_core_container, http_tracker_config) } @@ -36,6 +43,7 @@ impl HttpTrackerCoreContainer { http_tracker_config: &Arc, ) -> Arc { let http_tracker_core_services = HttpTrackerCoreServices::initialize_from(tracker_core_container); + Self::initialize_from_services(tracker_core_container, &http_tracker_core_services, http_tracker_config) } diff --git a/packages/rest-tracker-api-core/Cargo.toml b/packages/rest-tracker-api-core/Cargo.toml index 0077572fb..de1946239 100644 --- a/packages/rest-tracker-api-core/Cargo.toml +++ b/packages/rest-tracker-api-core/Cargo.toml @@ -21,6 +21,7 @@ tokio = { version = "1", features = ["macros", "net", "rt-multi-thread", "signal torrust-tracker-configuration = { version = "3.0.0-develop", path = "../configuration" } torrust-tracker-metrics = { version = "3.0.0-develop", path = "../metrics" } torrust-tracker-primitives = { version = "3.0.0-develop", path = "../primitives" } +torrust-tracker-torrent-repository = { version = "3.0.0-develop", path = "../torrent-repository" } torrust-udp-tracker-server = { version = "3.0.0-develop", path = "../udp-tracker-server" } [dev-dependencies] diff --git a/packages/rest-tracker-api-core/src/container.rs b/packages/rest-tracker-api-core/src/container.rs index ec3786dfb..327ab4bd6 100644 --- a/packages/rest-tracker-api-core/src/container.rs +++ b/packages/rest-tracker-api-core/src/container.rs @@ -7,6 +7,7 @@ use bittorrent_udp_tracker_core::services::banning::BanService; use bittorrent_udp_tracker_core::{self}; use tokio::sync::RwLock; use torrust_tracker_configuration::{Core, HttpApi, HttpTracker, UdpTracker}; +use torrust_tracker_torrent_repository::container::TorrentRepositoryContainer; use torrust_udp_tracker_server::container::UdpTrackerServerContainer; pub struct TrackerHttpApiCoreContainer { @@ -26,11 +27,19 @@ impl TrackerHttpApiCoreContainer { udp_tracker_config: &Arc, http_api_config: &Arc, ) -> Arc { - let tracker_core_container = Arc::new(TrackerCoreContainer::initialize(core_config)); + let torrent_repository_container = Arc::new(TorrentRepositoryContainer::initialize()); + + let tracker_core_container = Arc::new(TrackerCoreContainer::initialize_from( + core_config, + &torrent_repository_container, + )); + let http_tracker_core_container = HttpTrackerCoreContainer::initialize_from_tracker_core(&tracker_core_container, http_tracker_config); + let udp_tracker_core_container = UdpTrackerCoreContainer::initialize_from_tracker_core(&tracker_core_container, udp_tracker_config); + let udp_tracker_server_container = UdpTrackerServerContainer::initialize(core_config); Self::initialize_from( diff --git a/packages/tracker-core/src/container.rs b/packages/tracker-core/src/container.rs index 3f35c3943..f4fb272de 100644 --- a/packages/tracker-core/src/container.rs +++ b/packages/tracker-core/src/container.rs @@ -2,7 +2,6 @@ use std::sync::Arc; use torrust_tracker_configuration::Core; use torrust_tracker_torrent_repository::container::TorrentRepositoryContainer; -use torrust_tracker_torrent_repository::Swarms; use crate::announce_handler::AnnounceHandler; use crate::authentication::handler::KeysHandler; @@ -39,17 +38,6 @@ pub struct TrackerCoreContainer { impl TrackerCoreContainer { #[must_use] pub fn initialize_from(core_config: &Arc, torrent_repository_container: &Arc) -> Self { - Self::inner_initialize(core_config, &torrent_repository_container.swarms) - } - - #[must_use] - pub fn initialize(core_config: &Arc) -> Self { - let swarms = Arc::new(Swarms::default()); - Self::inner_initialize(core_config, &swarms) - } - - #[must_use] - fn inner_initialize(core_config: &Arc, swarms: &Arc) -> Self { let database = initialize_database(core_config); let in_memory_whitelist = Arc::new(InMemoryWhitelist::default()); let whitelist_authorization = Arc::new(WhitelistAuthorization::new(core_config, &in_memory_whitelist.clone())); @@ -61,7 +49,7 @@ impl TrackerCoreContainer { &db_key_repository.clone(), &in_memory_key_repository.clone(), )); - let in_memory_torrent_repository = Arc::new(InMemoryTorrentRepository::new(swarms.clone())); + let in_memory_torrent_repository = Arc::new(InMemoryTorrentRepository::new(torrent_repository_container.swarms.clone())); let db_torrent_repository = Arc::new(DatabasePersistentTorrentRepository::new(&database)); let torrents_manager = Arc::new(TorrentsManager::new( diff --git a/packages/udp-tracker-core/Cargo.toml b/packages/udp-tracker-core/Cargo.toml index 6cf250074..9a27ec826 100644 --- a/packages/udp-tracker-core/Cargo.toml +++ b/packages/udp-tracker-core/Cargo.toml @@ -33,6 +33,7 @@ torrust-tracker-configuration = { version = "3.0.0-develop", path = "../configur torrust-tracker-events = { version = "3.0.0-develop", path = "../events" } torrust-tracker-metrics = { version = "3.0.0-develop", path = "../metrics" } torrust-tracker-primitives = { version = "3.0.0-develop", path = "../primitives" } +torrust-tracker-torrent-repository = { version = "3.0.0-develop", path = "../torrent-repository" } tracing = "0" zerocopy = "0.7" diff --git a/packages/udp-tracker-core/src/container.rs b/packages/udp-tracker-core/src/container.rs index 98c01a703..2b6567ec0 100644 --- a/packages/udp-tracker-core/src/container.rs +++ b/packages/udp-tracker-core/src/container.rs @@ -3,6 +3,7 @@ use std::sync::Arc; use bittorrent_tracker_core::container::TrackerCoreContainer; use tokio::sync::RwLock; use torrust_tracker_configuration::{Core, UdpTracker}; +use torrust_tracker_torrent_repository::container::TorrentRepositoryContainer; use crate::event::bus::EventBus; use crate::event::sender::Broadcaster; @@ -31,7 +32,13 @@ pub struct UdpTrackerCoreContainer { impl UdpTrackerCoreContainer { #[must_use] pub fn initialize(core_config: &Arc, udp_tracker_config: &Arc) -> Arc { - let tracker_core_container = Arc::new(TrackerCoreContainer::initialize(core_config)); + let torrent_repository_container = Arc::new(TorrentRepositoryContainer::initialize()); + + let tracker_core_container = Arc::new(TrackerCoreContainer::initialize_from( + core_config, + &torrent_repository_container, + )); + Self::initialize_from_tracker_core(&tracker_core_container, udp_tracker_config) } @@ -41,6 +48,7 @@ impl UdpTrackerCoreContainer { udp_tracker_config: &Arc, ) -> Arc { let udp_tracker_core_services = UdpTrackerCoreServices::initialize_from(tracker_core_container); + Self::initialize_from_services(tracker_core_container, &udp_tracker_core_services, udp_tracker_config) } diff --git a/packages/udp-tracker-server/Cargo.toml b/packages/udp-tracker-server/Cargo.toml index 4d0296461..a0c129acb 100644 --- a/packages/udp-tracker-server/Cargo.toml +++ b/packages/udp-tracker-server/Cargo.toml @@ -33,6 +33,7 @@ torrust-tracker-events = { version = "3.0.0-develop", path = "../events" } torrust-tracker-located-error = { version = "3.0.0-develop", path = "../located-error" } torrust-tracker-metrics = { version = "3.0.0-develop", path = "../metrics" } torrust-tracker-primitives = { version = "3.0.0-develop", path = "../primitives" } +torrust-tracker-torrent-repository = { version = "3.0.0-develop", path = "../torrent-repository" } tracing = "0" url = { version = "2", features = ["serde"] } uuid = { version = "1", features = ["v4"] } diff --git a/packages/udp-tracker-server/src/environment.rs b/packages/udp-tracker-server/src/environment.rs index 962442fde..e3667e74a 100644 --- a/packages/udp-tracker-server/src/environment.rs +++ b/packages/udp-tracker-server/src/environment.rs @@ -8,6 +8,7 @@ use tokio::task::JoinHandle; use torrust_server_lib::registar::Registar; use torrust_tracker_configuration::{logging, Configuration, DEFAULT_TIMEOUT}; use torrust_tracker_primitives::peer; +use torrust_tracker_torrent_repository::container::TorrentRepositoryContainer; use crate::container::UdpTrackerServerContainer; use crate::server::spawner::Spawner; @@ -173,9 +174,16 @@ impl EnvContainer { let udp_tracker_configurations = configuration.udp_trackers.clone().expect("missing UDP tracker configuration"); let udp_tracker_config = Arc::new(udp_tracker_configurations[0].clone()); - let tracker_core_container = Arc::new(TrackerCoreContainer::initialize(&core_config)); + let torrent_repository_container = Arc::new(TorrentRepositoryContainer::initialize()); + + let tracker_core_container = Arc::new(TrackerCoreContainer::initialize_from( + &core_config, + &torrent_repository_container, + )); + let udp_tracker_core_container = UdpTrackerCoreContainer::initialize_from_tracker_core(&tracker_core_container, &udp_tracker_config); + let udp_tracker_server_container = UdpTrackerServerContainer::initialize(&core_config); Self { From 68b930d4b4e89c3003fb38689f6c2b3c32bb06d2 Mon Sep 17 00:00:00 2001 From: Jose Celano Date: Mon, 12 May 2025 11:30:29 +0100 Subject: [PATCH 596/802] feat: [#1495] expose new torrent-repositoru metrics via the REST API These are the new metrics in JSON format: http://localhost:1212/api/v1/metrics?token=MyAccessToken ```json { "metrics": [ { "kind": "counter", "name": "torrent_repository_persistent_torrents_downloads_total", "samples": [] }, { "kind": "counter", "name": "torrent_repository_runtime_torrents_downloads_total", "samples": [] } ] } ``` --- .../src/environment.rs | 1 + .../src/v1/context/stats/handlers.rs | 2 ++ .../src/v1/context/stats/routes.rs | 1 + .../rest-tracker-api-core/src/container.rs | 22 ++++++++++++++++--- .../src/statistics/services.rs | 6 +++++ src/container.rs | 9 ++++++-- 6 files changed, 36 insertions(+), 5 deletions(-) diff --git a/packages/axum-rest-tracker-api-server/src/environment.rs b/packages/axum-rest-tracker-api-server/src/environment.rs index 0758b38d1..ae3eadb31 100644 --- a/packages/axum-rest-tracker-api-server/src/environment.rs +++ b/packages/axum-rest-tracker-api-server/src/environment.rs @@ -189,6 +189,7 @@ impl EnvContainer { let udp_tracker_server_container = UdpTrackerServerContainer::initialize(&core_config); let tracker_http_api_core_container = TrackerHttpApiCoreContainer::initialize_from( + &torrent_repository_container, &tracker_core_container, &http_tracker_core_container, &udp_tracker_core_container, diff --git a/packages/axum-rest-tracker-api-server/src/v1/context/stats/handlers.rs b/packages/axum-rest-tracker-api-server/src/v1/context/stats/handlers.rs index 17d3e4f2d..552958d74 100644 --- a/packages/axum-rest-tracker-api-server/src/v1/context/stats/handlers.rs +++ b/packages/axum-rest-tracker-api-server/src/v1/context/stats/handlers.rs @@ -69,6 +69,7 @@ pub async fn get_metrics_handler( State(state): State<( Arc, Arc>, + Arc, Arc, Arc, Arc, @@ -81,6 +82,7 @@ pub async fn get_metrics_handler( state.2.clone(), state.3.clone(), state.4.clone(), + state.5.clone(), ) .await; diff --git a/packages/axum-rest-tracker-api-server/src/v1/context/stats/routes.rs b/packages/axum-rest-tracker-api-server/src/v1/context/stats/routes.rs index c19f08b2a..3eeaa8bf4 100644 --- a/packages/axum-rest-tracker-api-server/src/v1/context/stats/routes.rs +++ b/packages/axum-rest-tracker-api-server/src/v1/context/stats/routes.rs @@ -28,6 +28,7 @@ pub fn add(prefix: &str, router: Router, http_api_container: &Arc, + + // Torrent repository + pub torrent_repository_container: Arc, + + // Tracker core pub tracker_core_container: Arc, + + // HTTP tracker core pub http_stats_repository: Arc, + + // UDP tracker core pub ban_service: Arc>, pub udp_core_stats_repository: Arc, pub udp_server_stats_repository: Arc, @@ -43,6 +52,7 @@ impl TrackerHttpApiCoreContainer { let udp_tracker_server_container = UdpTrackerServerContainer::initialize(core_config); Self::initialize_from( + &torrent_repository_container, &tracker_core_container, &http_tracker_core_container, &udp_tracker_core_container, @@ -53,6 +63,7 @@ impl TrackerHttpApiCoreContainer { #[must_use] pub fn initialize_from( + torrent_repository_container: &Arc, tracker_core_container: &Arc, http_tracker_core_container: &Arc, udp_tracker_core_container: &Arc, @@ -60,16 +71,21 @@ impl TrackerHttpApiCoreContainer { http_api_config: &Arc, ) -> Arc { Arc::new(TrackerHttpApiCoreContainer { + http_api_config: http_api_config.clone(), + + // Torrent repository + torrent_repository_container: torrent_repository_container.clone(), + + // Tracker core tracker_core_container: tracker_core_container.clone(), + // HTTP tracker core http_stats_repository: http_tracker_core_container.stats_repository.clone(), + // UDP tracker core ban_service: udp_tracker_core_container.ban_service.clone(), udp_core_stats_repository: udp_tracker_core_container.stats_repository.clone(), - udp_server_stats_repository: udp_tracker_server_container.stats_repository.clone(), - - http_api_config: http_api_config.clone(), }) } } diff --git a/packages/rest-tracker-api-core/src/statistics/services.rs b/packages/rest-tracker-api-core/src/statistics/services.rs index 8d5b7514a..b8c2f3f1d 100644 --- a/packages/rest-tracker-api-core/src/statistics/services.rs +++ b/packages/rest-tracker-api-core/src/statistics/services.rs @@ -93,6 +93,7 @@ pub struct TrackerLabeledMetrics { pub async fn get_labeled_metrics( in_memory_torrent_repository: Arc, ban_service: Arc>, + swarms_stats_repository: Arc, http_stats_repository: Arc, udp_stats_repository: Arc, udp_server_stats_repository: Arc, @@ -100,12 +101,17 @@ pub async fn get_labeled_metrics( let _torrents_metrics = in_memory_torrent_repository.get_aggregate_swarm_metadata(); let _udp_banned_ips_total = ban_service.read().await.get_banned_ips_total(); + let swarms_stats = swarms_stats_repository.get_metrics().await; let http_stats = http_stats_repository.get_stats().await; let udp_stats_repository = udp_stats_repository.get_stats().await; let udp_server_stats = udp_server_stats_repository.get_stats().await; // Merge all the metrics into a single collection let mut metrics = MetricCollection::default(); + + metrics + .merge(&swarms_stats.metric_collection) + .expect("msg: failed to merge torrent repository metrics"); metrics .merge(&http_stats.metric_collection) .expect("msg: failed to merge HTTP core metrics"); diff --git a/src/container.rs b/src/container.rs index 838de58d6..273425fc1 100644 --- a/src/container.rs +++ b/src/container.rs @@ -142,10 +142,15 @@ impl AppContainer { #[must_use] pub fn tracker_http_api_container(&self, http_api_config: &Arc) -> Arc { TrackerHttpApiCoreContainer { - tracker_core_container: self.tracker_core_container.clone(), http_api_config: http_api_config.clone(), - ban_service: self.udp_tracker_core_services.ban_service.clone(), + + torrent_repository_container: self.torrent_repository_container.clone(), + + tracker_core_container: self.tracker_core_container.clone(), + http_stats_repository: self.http_tracker_core_services.stats_repository.clone(), + + ban_service: self.udp_tracker_core_services.ban_service.clone(), udp_core_stats_repository: self.udp_tracker_core_services.stats_repository.clone(), udp_server_stats_repository: self.udp_tracker_server_container.stats_repository.clone(), } From 2c479a1baa112a4bd86eeb686bc018c3e4f08716 Mon Sep 17 00:00:00 2001 From: Jose Celano Date: Mon, 12 May 2025 16:38:25 +0100 Subject: [PATCH 597/802] refactor: [#1358] inject event sender in Swarms type --- .../src/environment.rs | 8 +- .../tests/server/v1/contract.rs | 31 ++-- .../src/environment.rs | 8 +- .../tests/server/v1/contract/context/stats.rs | 3 +- .../server/v1/contract/context/torrent.rs | 18 +-- packages/events/src/sender.rs | 1 + packages/torrent-repository/src/container.rs | 4 +- .../src/statistics/event/handler.rs | 26 +++- packages/torrent-repository/src/swarms.rs | 140 ++++++++++++------ .../torrent-repository/tests/swarms/mod.rs | 8 +- packages/tracker-core/src/announce_handler.rs | 14 +- packages/tracker-core/src/torrent/manager.rs | 30 ++-- .../src/torrent/repository/in_memory.rs | 9 +- packages/tracker-core/src/torrent/services.rs | 36 +++-- .../udp-tracker-server/src/environment.rs | 5 +- .../src/handlers/announce.rs | 18 ++- .../udp-tracker-server/src/handlers/scrape.rs | 4 +- 17 files changed, 231 insertions(+), 132 deletions(-) diff --git a/packages/axum-http-tracker-server/src/environment.rs b/packages/axum-http-tracker-server/src/environment.rs index b9ac6bdbb..078bda9e5 100644 --- a/packages/axum-http-tracker-server/src/environment.rs +++ b/packages/axum-http-tracker-server/src/environment.rs @@ -25,12 +25,12 @@ pub struct Environment { impl Environment { /// Add a torrent to the tracker - pub fn add_torrent_peer(&self, info_hash: &InfoHash, peer: &peer::Peer) { - let _number_of_downloads_increased = self - .container + pub async fn add_torrent_peer(&self, info_hash: &InfoHash, peer: &peer::Peer) -> bool { + self.container .tracker_core_container .in_memory_torrent_repository - .upsert_peer(info_hash, peer, None); + .upsert_peer(info_hash, peer, None) + .await } } diff --git a/packages/axum-http-tracker-server/tests/server/v1/contract.rs b/packages/axum-http-tracker-server/tests/server/v1/contract.rs index d1f52d55a..afd4d3168 100644 --- a/packages/axum-http-tracker-server/tests/server/v1/contract.rs +++ b/packages/axum-http-tracker-server/tests/server/v1/contract.rs @@ -474,7 +474,7 @@ mod for_all_config_modes { let previously_announced_peer = PeerBuilder::default().with_peer_id(&PeerId(*b"-qB00000000000000001")).build(); // Add the Peer 1 - env.add_torrent_peer(&info_hash, &previously_announced_peer); + env.add_torrent_peer(&info_hash, &previously_announced_peer).await; // Announce the new Peer 2. This new peer is non included on the response peer list let response = Client::new(*env.bind_address()) @@ -517,7 +517,7 @@ mod for_all_config_modes { .with_peer_id(&PeerId(*b"-qB00000000000000001")) .with_peer_addr(&SocketAddr::new(IpAddr::V4(Ipv4Addr::new(0x69, 0x69, 0x69, 0x69)), 8080)) .build(); - env.add_torrent_peer(&info_hash, &peer_using_ipv4); + env.add_torrent_peer(&info_hash, &peer_using_ipv4).await; // Announce a peer using IPV6 let peer_using_ipv6 = PeerBuilder::default() @@ -527,7 +527,7 @@ mod for_all_config_modes { 8080, )) .build(); - env.add_torrent_peer(&info_hash, &peer_using_ipv6); + env.add_torrent_peer(&info_hash, &peer_using_ipv6).await; // Announce the new Peer. let response = Client::new(*env.bind_address()) @@ -625,7 +625,7 @@ mod for_all_config_modes { let previously_announced_peer = PeerBuilder::default().with_peer_id(&PeerId(*b"-qB00000000000000001")).build(); // Add the Peer 1 - env.add_torrent_peer(&info_hash, &previously_announced_peer); + env.add_torrent_peer(&info_hash, &previously_announced_peer).await; // Announce the new Peer 2 accepting compact responses let response = Client::new(*env.bind_address()) @@ -666,7 +666,7 @@ mod for_all_config_modes { let previously_announced_peer = PeerBuilder::default().with_peer_id(&PeerId(*b"-qB00000000000000001")).build(); // Add the Peer 1 - env.add_torrent_peer(&info_hash, &previously_announced_peer); + env.add_torrent_peer(&info_hash, &previously_announced_peer).await; // Announce the new Peer 2 without passing the "compact" param // By default it should respond with the compact peer list @@ -1010,7 +1010,8 @@ mod for_all_config_modes { .with_peer_id(&PeerId(*b"-qB00000000000000001")) .with_bytes_pending_to_download(1) .build(), - ); + ) + .await; let response = Client::new(*env.bind_address()) .scrape( @@ -1050,7 +1051,8 @@ mod for_all_config_modes { .with_peer_id(&PeerId(*b"-qB00000000000000001")) .with_no_bytes_pending_to_download() .build(), - ); + ) + .await; let response = Client::new(*env.bind_address()) .scrape( @@ -1282,7 +1284,8 @@ mod configured_as_whitelisted { .with_peer_id(&PeerId(*b"-qB00000000000000001")) .with_bytes_pending_to_download(1) .build(), - ); + ) + .await; let response = Client::new(*env.bind_address()) .scrape( @@ -1318,7 +1321,8 @@ mod configured_as_whitelisted { .with_peer_id(&PeerId(*b"-qB00000000000000001")) .with_bytes_pending_to_download(1) .build(), - ); + ) + .await; env.container .tracker_core_container @@ -1494,7 +1498,8 @@ mod configured_as_private { .with_peer_id(&PeerId(*b"-qB00000000000000001")) .with_bytes_pending_to_download(1) .build(), - ); + ) + .await; let response = Client::new(*env.bind_address()) .scrape( @@ -1525,7 +1530,8 @@ mod configured_as_private { .with_peer_id(&PeerId(*b"-qB00000000000000001")) .with_bytes_pending_to_download(1) .build(), - ); + ) + .await; let expiring_key = env .container @@ -1576,7 +1582,8 @@ mod configured_as_private { .with_peer_id(&PeerId(*b"-qB00000000000000001")) .with_bytes_pending_to_download(1) .build(), - ); + ) + .await; let false_key: Key = "YZSl4lMZupRuOpSRC3krIKR5BPB14nrJ".parse().unwrap(); diff --git a/packages/axum-rest-tracker-api-server/src/environment.rs b/packages/axum-rest-tracker-api-server/src/environment.rs index ae3eadb31..e4a83d15d 100644 --- a/packages/axum-rest-tracker-api-server/src/environment.rs +++ b/packages/axum-rest-tracker-api-server/src/environment.rs @@ -33,12 +33,12 @@ where S: std::fmt::Debug + std::fmt::Display, { /// Add a torrent to the tracker - pub fn add_torrent_peer(&self, info_hash: &InfoHash, peer: &peer::Peer) { - let _number_of_downloads_increased = self - .container + pub async fn add_torrent_peer(&self, info_hash: &InfoHash, peer: &peer::Peer) -> bool { + self.container .tracker_core_container .in_memory_torrent_repository - .upsert_peer(info_hash, peer, None); + .upsert_peer(info_hash, peer, None) + .await } } diff --git a/packages/axum-rest-tracker-api-server/tests/server/v1/contract/context/stats.rs b/packages/axum-rest-tracker-api-server/tests/server/v1/contract/context/stats.rs index 51a4804e7..7cae0abbf 100644 --- a/packages/axum-rest-tracker-api-server/tests/server/v1/contract/context/stats.rs +++ b/packages/axum-rest-tracker-api-server/tests/server/v1/contract/context/stats.rs @@ -21,7 +21,8 @@ async fn should_allow_getting_tracker_statistics() { env.add_torrent_peer( &InfoHash::from_str("9e0217d0fa71c87332cd8bf9dbeabcb2c2cf3c4d").unwrap(), // DevSkim: ignore DS173237 &PeerBuilder::default().into(), - ); + ) + .await; let request_id = Uuid::new_v4(); diff --git a/packages/axum-rest-tracker-api-server/tests/server/v1/contract/context/torrent.rs b/packages/axum-rest-tracker-api-server/tests/server/v1/contract/context/torrent.rs index 42421db99..ae9819785 100644 --- a/packages/axum-rest-tracker-api-server/tests/server/v1/contract/context/torrent.rs +++ b/packages/axum-rest-tracker-api-server/tests/server/v1/contract/context/torrent.rs @@ -26,7 +26,7 @@ async fn should_allow_getting_all_torrents() { let info_hash = InfoHash::from_str("9e0217d0fa71c87332cd8bf9dbeabcb2c2cf3c4d").unwrap(); // DevSkim: ignore DS173237 - env.add_torrent_peer(&info_hash, &PeerBuilder::default().into()); + env.add_torrent_peer(&info_hash, &PeerBuilder::default().into()).await; let request_id = Uuid::new_v4(); @@ -59,8 +59,8 @@ async fn should_allow_limiting_the_torrents_in_the_result() { let info_hash_1 = InfoHash::from_str("9e0217d0fa71c87332cd8bf9dbeabcb2c2cf3c4d").unwrap(); // DevSkim: ignore DS173237 let info_hash_2 = InfoHash::from_str("0b3aea4adc213ce32295be85d3883a63bca25446").unwrap(); // DevSkim: ignore DS173237 - env.add_torrent_peer(&info_hash_1, &PeerBuilder::default().into()); - env.add_torrent_peer(&info_hash_2, &PeerBuilder::default().into()); + env.add_torrent_peer(&info_hash_1, &PeerBuilder::default().into()).await; + env.add_torrent_peer(&info_hash_2, &PeerBuilder::default().into()).await; let request_id = Uuid::new_v4(); @@ -96,8 +96,8 @@ async fn should_allow_the_torrents_result_pagination() { let info_hash_1 = InfoHash::from_str("9e0217d0fa71c87332cd8bf9dbeabcb2c2cf3c4d").unwrap(); // DevSkim: ignore DS173237 let info_hash_2 = InfoHash::from_str("0b3aea4adc213ce32295be85d3883a63bca25446").unwrap(); // DevSkim: ignore DS173237 - env.add_torrent_peer(&info_hash_1, &PeerBuilder::default().into()); - env.add_torrent_peer(&info_hash_2, &PeerBuilder::default().into()); + env.add_torrent_peer(&info_hash_1, &PeerBuilder::default().into()).await; + env.add_torrent_peer(&info_hash_2, &PeerBuilder::default().into()).await; let request_id = Uuid::new_v4(); @@ -132,8 +132,8 @@ async fn should_allow_getting_a_list_of_torrents_providing_infohashes() { let info_hash_1 = InfoHash::from_str("9e0217d0fa71c87332cd8bf9dbeabcb2c2cf3c4d").unwrap(); // DevSkim: ignore DS173237 let info_hash_2 = InfoHash::from_str("0b3aea4adc213ce32295be85d3883a63bca25446").unwrap(); // DevSkim: ignore DS173237 - env.add_torrent_peer(&info_hash_1, &PeerBuilder::default().into()); - env.add_torrent_peer(&info_hash_2, &PeerBuilder::default().into()); + env.add_torrent_peer(&info_hash_1, &PeerBuilder::default().into()).await; + env.add_torrent_peer(&info_hash_2, &PeerBuilder::default().into()).await; let request_id = Uuid::new_v4(); @@ -307,7 +307,7 @@ async fn should_allow_getting_a_torrent_info() { let peer = PeerBuilder::default().into(); - env.add_torrent_peer(&info_hash, &peer); + env.add_torrent_peer(&info_hash, &peer).await; let request_id = Uuid::new_v4(); @@ -389,7 +389,7 @@ async fn should_not_allow_getting_a_torrent_info_for_unauthenticated_users() { let info_hash = InfoHash::from_str("9e0217d0fa71c87332cd8bf9dbeabcb2c2cf3c4d").unwrap(); // DevSkim: ignore DS173237 - env.add_torrent_peer(&info_hash, &PeerBuilder::default().into()); + env.add_torrent_peer(&info_hash, &PeerBuilder::default().into()).await; let request_id = Uuid::new_v4(); diff --git a/packages/events/src/sender.rs b/packages/events/src/sender.rs index 9fc77f650..3dccade4c 100644 --- a/packages/events/src/sender.rs +++ b/packages/events/src/sender.rs @@ -1,4 +1,5 @@ use std::fmt; +use std::fmt::Debug; use futures::future::BoxFuture; #[cfg(test)] diff --git a/packages/torrent-repository/src/container.rs b/packages/torrent-repository/src/container.rs index 7522c7956..50a6b8b9c 100644 --- a/packages/torrent-repository/src/container.rs +++ b/packages/torrent-repository/src/container.rs @@ -16,8 +16,6 @@ pub struct TorrentRepositoryContainer { impl TorrentRepositoryContainer { #[must_use] pub fn initialize() -> Self { - let swarms = Arc::new(Swarms::default()); - // Torrent repository stats let broadcaster = Broadcaster::default(); let stats_repository = Arc::new(Repository::new()); @@ -27,6 +25,8 @@ impl TorrentRepositoryContainer { let stats_event_sender = event_bus.sender(); + let swarms = Arc::new(Swarms::new(stats_event_sender.clone())); + Self { swarms, event_bus, diff --git a/packages/torrent-repository/src/statistics/event/handler.rs b/packages/torrent-repository/src/statistics/event/handler.rs index d68df0b1b..2073575a8 100644 --- a/packages/torrent-repository/src/statistics/event/handler.rs +++ b/packages/torrent-repository/src/statistics/event/handler.rs @@ -9,13 +9,25 @@ use crate::statistics::repository::Repository; /// /// This function panics if the client IP address is not the same as the IP /// version of the event. -pub async fn handle_event(_event: Event, stats_repository: &Arc, _now: DurationSinceUnixEpoch) { - /*match event { - Event::TorrentAdded { .. } => {} - Event::TorrentRemoved { .. } => {} - Event::PeerAdded { .. } => {} - Event::PeerRemoved { .. } => {} - }*/ +pub async fn handle_event(event: Event, stats_repository: &Arc, _now: DurationSinceUnixEpoch) { + match event { + Event::TorrentAdded { info_hash, .. } => { + // todo: update metrics + tracing::debug!("Torrent added {info_hash}"); + } + Event::TorrentRemoved { info_hash } => { + // todo: update metrics + tracing::debug!("Torrent removed {info_hash}"); + } + Event::PeerAdded { announcement } => { + // todo: update metrics + tracing::debug!("Peer added {announcement:?}"); + } + Event::PeerRemoved { socket_addr, peer_id } => { + // todo: update metrics + tracing::debug!("Peer removed: socket address {socket_addr:?}, peer ID: {peer_id:?}"); + } + } tracing::debug!("metrics: {:?}", stats_repository.get_metrics().await); } diff --git a/packages/torrent-repository/src/swarms.rs b/packages/torrent-repository/src/swarms.rs index 9dddaa0c0..d92e1755a 100644 --- a/packages/torrent-repository/src/swarms.rs +++ b/packages/torrent-repository/src/swarms.rs @@ -8,15 +8,26 @@ use torrust_tracker_primitives::pagination::Pagination; use torrust_tracker_primitives::swarm_metadata::{AggregateSwarmMetadata, SwarmMetadata}; use torrust_tracker_primitives::{peer, DurationSinceUnixEpoch, PersistentTorrent, PersistentTorrents}; +use crate::event::sender::Sender; +use crate::event::Event; use crate::swarm::Swarm; -use crate::{SwarmHandle, TORRENT_REPOSITORY_LOG_TARGET}; +use crate::SwarmHandle; -#[derive(Default, Debug)] +#[derive(Default)] pub struct Swarms { swarms: SkipMap, + event_sender: Sender, } impl Swarms { + #[must_use] + pub fn new(event_sender: Sender) -> Self { + Self { + swarms: SkipMap::new(), + event_sender, + } + } + /// Upsert a peer into the swarm of a torrent. /// /// Optionally, it can also preset the number of downloads of the torrent @@ -37,36 +48,55 @@ impl Swarms { /// # Errors /// /// This function panics if the lock for the swarm handle cannot be acquired. - pub fn handle_announcement( + pub async fn handle_announcement( &self, info_hash: &InfoHash, peer: &peer::Peer, opt_persistent_torrent: Option, ) -> Result { - tracing::trace!(target: TORRENT_REPOSITORY_LOG_TARGET, "Handling announcement for torrent: {info_hash}"); + let swarm_handle = match self.swarms.get(info_hash) { + None => { + let new_swarm_handle = if let Some(number_of_downloads) = opt_persistent_torrent { + SwarmHandle::new(Swarm::new(number_of_downloads).into()) + } else { + SwarmHandle::default() + }; - let swarm_handle = if let Some(number_of_downloads) = opt_persistent_torrent { - SwarmHandle::new(Swarm::new(number_of_downloads).into()) - } else { - SwarmHandle::default() - }; + let new_swarm_handle = self.swarms.get_or_insert(*info_hash, new_swarm_handle); - let swarm_handle = self.swarms.get_or_insert(*info_hash, swarm_handle); + if let Some(event_sender) = self.event_sender.as_deref() { + event_sender + .send(Event::TorrentAdded { + info_hash: *info_hash, + announcement: *peer, + }) + .await; + } + + new_swarm_handle + } + Some(existing_swarm_handle) => existing_swarm_handle, + }; let mut swarm = swarm_handle.value().lock()?; Ok(swarm.handle_announcement(peer)) } - /// Inserts a new swarm. + /// Inserts a new swarm. Only used for testing purposes. pub fn insert(&self, info_hash: &InfoHash, swarm: Swarm) { - // code-review: swarms builder? + // code-review: swarms builder? or constructor from vec? // It's only used for testing purposes. It allows to pre-define the // initial state of the swarm without having to go through the upsert // process. let swarm_handle = Arc::new(Mutex::new(swarm)); + self.swarms.insert(*info_hash, swarm_handle); + + // IMPORTANT: Notice this does not send an event because is used only + // for testing purposes. The event is sent only when the torrent is + // announced for the first time. } /// Removes a torrent entry from the repository. @@ -75,8 +105,14 @@ impl Swarms { /// /// An `Option` containing the removed torrent entry if it existed. #[must_use] - pub fn remove(&self, key: &InfoHash) -> Option { - self.swarms.remove(key).map(|entry| entry.value().clone()) + pub async fn remove(&self, key: &InfoHash) -> Option { + let swarm_handle = self.swarms.remove(key).map(|entry| entry.value().clone()); + + if let Some(event_sender) = self.event_sender.as_deref() { + event_sender.send(Event::TorrentRemoved { info_hash: *key }).await; + } + + swarm_handle } /// Retrieves a tracked torrent handle by its infohash. @@ -402,7 +438,7 @@ impl From>> for Error { #[cfg(test)] mod tests { - mod the_in_memory_torrent_repository { + mod the_swarm_repository { use aquatic_udp_protocol::PeerId; @@ -447,7 +483,7 @@ mod tests { let info_hash = sample_info_hash(); - let _number_of_downloads_increased = swarms.handle_announcement(&info_hash, &sample_peer(), None); + swarms.handle_announcement(&info_hash, &sample_peer(), None).await.unwrap(); assert!(swarms.get(&info_hash).is_some()); } @@ -458,8 +494,8 @@ mod tests { let info_hash = sample_info_hash(); - let _number_of_downloads_increased = swarms.handle_announcement(&info_hash, &sample_peer(), None); - let _number_of_downloads_increased = swarms.handle_announcement(&info_hash, &sample_peer(), None); + swarms.handle_announcement(&info_hash, &sample_peer(), None).await.unwrap(); + swarms.handle_announcement(&info_hash, &sample_peer(), None).await.unwrap(); assert!(swarms.get(&info_hash).is_some()); } @@ -474,7 +510,7 @@ mod tests { use torrust_tracker_primitives::peer::Peer; use torrust_tracker_primitives::DurationSinceUnixEpoch; - use crate::swarms::tests::the_in_memory_torrent_repository::numeric_peer_id; + use crate::swarms::tests::the_swarm_repository::numeric_peer_id; use crate::swarms::Swarms; use crate::tests::{sample_info_hash, sample_peer}; @@ -485,7 +521,7 @@ mod tests { let info_hash = sample_info_hash(); let peer = sample_peer(); - let _number_of_downloads_increased = swarms.handle_announcement(&info_hash, &peer, None); + swarms.handle_announcement(&info_hash, &peer, None).await.unwrap(); let peers = swarms.get_swarm_peers(&info_hash, 74).unwrap(); @@ -518,7 +554,7 @@ mod tests { event: AnnounceEvent::Completed, }; - let _number_of_downloads_increased = swarms.handle_announcement(&info_hash, &peer, None); + swarms.handle_announcement(&info_hash, &peer, None).await.unwrap(); } let peers = swarms.get_swarm_peers(&info_hash, 74).unwrap(); @@ -536,7 +572,7 @@ mod tests { use torrust_tracker_primitives::peer::Peer; use torrust_tracker_primitives::DurationSinceUnixEpoch; - use crate::swarms::tests::the_in_memory_torrent_repository::numeric_peer_id; + use crate::swarms::tests::the_swarm_repository::numeric_peer_id; use crate::swarms::Swarms; use crate::tests::{sample_info_hash, sample_peer}; @@ -558,7 +594,7 @@ mod tests { let info_hash = sample_info_hash(); let peer = sample_peer(); - let _number_of_downloads_increased = swarms.handle_announcement(&info_hash, &peer, None); + swarms.handle_announcement(&info_hash, &peer, None).await.unwrap(); let peers = swarms .get_peers_peers_excluding(&info_hash, &peer, TORRENT_PEERS_LIMIT) @@ -575,7 +611,7 @@ mod tests { let excluded_peer = sample_peer(); - let _number_of_downloads_increased = swarms.handle_announcement(&info_hash, &excluded_peer, None); + swarms.handle_announcement(&info_hash, &excluded_peer, None).await.unwrap(); // Add 74 peers for idx in 2..=75 { @@ -589,7 +625,7 @@ mod tests { event: AnnounceEvent::Completed, }; - let _number_of_downloads_increased = swarms.handle_announcement(&info_hash, &peer, None); + swarms.handle_announcement(&info_hash, &peer, None).await.unwrap(); } let peers = swarms @@ -619,9 +655,9 @@ mod tests { let swarms = Arc::new(Swarms::default()); let info_hash = sample_info_hash(); - let _number_of_downloads_increased = swarms.handle_announcement(&info_hash, &sample_peer(), None); + swarms.handle_announcement(&info_hash, &sample_peer(), None).await.unwrap(); - let _unused = swarms.remove(&info_hash); + let _unused = swarms.remove(&info_hash).await; assert!(swarms.get(&info_hash).is_none()); } @@ -634,7 +670,7 @@ mod tests { let mut peer = sample_peer(); peer.updated = DurationSinceUnixEpoch::new(0, 0); - let _number_of_downloads_increased = swarms.handle_announcement(&info_hash, &peer, None); + swarms.handle_announcement(&info_hash, &peer, None).await.unwrap(); // Cut off time is 1 second after the peer was updated swarms @@ -644,13 +680,13 @@ mod tests { assert!(!swarms.get_swarm_peers(&info_hash, 74).unwrap().contains(&Arc::new(peer))); } - fn initialize_repository_with_one_torrent_without_peers(info_hash: &InfoHash) -> Arc { + async fn initialize_repository_with_one_torrent_without_peers(info_hash: &InfoHash) -> Arc { let swarms = Arc::new(Swarms::default()); // Insert a sample peer for the torrent to force adding the torrent entry let mut peer = sample_peer(); peer.updated = DurationSinceUnixEpoch::new(0, 0); - let _number_of_downloads_increased = swarms.handle_announcement(info_hash, &peer, None); + swarms.handle_announcement(info_hash, &peer, None).await.unwrap(); // Remove the peer swarms @@ -664,7 +700,7 @@ mod tests { async fn it_should_remove_torrents_without_peers() { let info_hash = sample_info_hash(); - let swarms = initialize_repository_with_one_torrent_without_peers(&info_hash); + let swarms = initialize_repository_with_one_torrent_without_peers(&info_hash).await; let tracker_policy = TrackerPolicy { remove_peerless_torrents: true, @@ -721,7 +757,7 @@ mod tests { let info_hash = sample_info_hash(); let peer = sample_peer(); - let _number_of_downloads_increased = swarms.handle_announcement(&info_hash, &peer, None); + swarms.handle_announcement(&info_hash, &peer, None).await.unwrap(); let torrent_entry = swarms.get(&info_hash).unwrap(); @@ -744,7 +780,7 @@ mod tests { use torrust_tracker_primitives::swarm_metadata::SwarmMetadata; - use crate::swarms::tests::the_in_memory_torrent_repository::returning_torrent_entries::TorrentEntryInfo; + use crate::swarms::tests::the_swarm_repository::returning_torrent_entries::TorrentEntryInfo; use crate::swarms::Swarms; use crate::tests::{sample_info_hash, sample_peer}; @@ -754,7 +790,7 @@ mod tests { let info_hash = sample_info_hash(); let peer = sample_peer(); - let _number_of_downloads_increased = swarms.handle_announcement(&info_hash, &peer, None); + swarms.handle_announcement(&info_hash, &peer, None).await.unwrap(); let torrent_entries = swarms.get_paginated(None); @@ -782,7 +818,7 @@ mod tests { use torrust_tracker_primitives::pagination::Pagination; use torrust_tracker_primitives::swarm_metadata::SwarmMetadata; - use crate::swarms::tests::the_in_memory_torrent_repository::returning_torrent_entries::TorrentEntryInfo; + use crate::swarms::tests::the_swarm_repository::returning_torrent_entries::TorrentEntryInfo; use crate::swarms::Swarms; use crate::tests::{ sample_info_hash_alphabetically_ordered_after_sample_info_hash_one, sample_info_hash_one, @@ -796,12 +832,12 @@ mod tests { // Insert one torrent entry let info_hash_one = sample_info_hash_one(); let peer_one = sample_peer_one(); - let _number_of_downloads_increased = swarms.handle_announcement(&info_hash_one, &peer_one, None); + swarms.handle_announcement(&info_hash_one, &peer_one, None).await.unwrap(); // Insert another torrent entry let info_hash_one = sample_info_hash_alphabetically_ordered_after_sample_info_hash_one(); let peer_two = sample_peer_two(); - let _number_of_downloads_increased = swarms.handle_announcement(&info_hash_one, &peer_two, None); + swarms.handle_announcement(&info_hash_one, &peer_two, None).await.unwrap(); // Get only the first page where page size is 1 let torrent_entries = swarms.get_paginated(Some(&Pagination { offset: 0, limit: 1 })); @@ -831,12 +867,12 @@ mod tests { // Insert one torrent entry let info_hash_one = sample_info_hash_one(); let peer_one = sample_peer_one(); - let _number_of_downloads_increased = swarms.handle_announcement(&info_hash_one, &peer_one, None); + swarms.handle_announcement(&info_hash_one, &peer_one, None).await.unwrap(); // Insert another torrent entry let info_hash_one = sample_info_hash_alphabetically_ordered_after_sample_info_hash_one(); let peer_two = sample_peer_two(); - let _number_of_downloads_increased = swarms.handle_announcement(&info_hash_one, &peer_two, None); + swarms.handle_announcement(&info_hash_one, &peer_two, None).await.unwrap(); // Get only the first page where page size is 1 let torrent_entries = swarms.get_paginated(Some(&Pagination { offset: 1, limit: 1 })); @@ -866,12 +902,12 @@ mod tests { // Insert one torrent entry let info_hash_one = sample_info_hash_one(); let peer_one = sample_peer_one(); - let _number_of_downloads_increased = swarms.handle_announcement(&info_hash_one, &peer_one, None); + swarms.handle_announcement(&info_hash_one, &peer_one, None).await.unwrap(); // Insert another torrent entry let info_hash_one = sample_info_hash_alphabetically_ordered_after_sample_info_hash_one(); let peer_two = sample_peer_two(); - let _number_of_downloads_increased = swarms.handle_announcement(&info_hash_one, &peer_two, None); + swarms.handle_announcement(&info_hash_one, &peer_two, None).await.unwrap(); // Get only the first page where page size is 1 let torrent_entries = swarms.get_paginated(Some(&Pagination { offset: 1, limit: 1 })); @@ -915,7 +951,10 @@ mod tests { async fn it_should_return_the_aggregate_swarm_metadata_when_there_is_a_leecher() { let swarms = Arc::new(Swarms::default()); - let _number_of_downloads_increased = swarms.handle_announcement(&sample_info_hash(), &leecher(), None); + swarms + .handle_announcement(&sample_info_hash(), &leecher(), None) + .await + .unwrap(); let aggregate_swarm_metadata = swarms.get_aggregate_swarm_metadata().unwrap(); @@ -934,7 +973,10 @@ mod tests { async fn it_should_return_the_aggregate_swarm_metadata_when_there_is_a_seeder() { let swarms = Arc::new(Swarms::default()); - let _number_of_downloads_increased = swarms.handle_announcement(&sample_info_hash(), &seeder(), None); + swarms + .handle_announcement(&sample_info_hash(), &seeder(), None) + .await + .unwrap(); let aggregate_swarm_metadata = swarms.get_aggregate_swarm_metadata().unwrap(); @@ -953,7 +995,10 @@ mod tests { async fn it_should_return_the_aggregate_swarm_metadata_when_there_is_a_completed_peer() { let swarms = Arc::new(Swarms::default()); - let _number_of_downloads_increased = swarms.handle_announcement(&sample_info_hash(), &complete_peer(), None); + swarms + .handle_announcement(&sample_info_hash(), &complete_peer(), None) + .await + .unwrap(); let aggregate_swarm_metadata = swarms.get_aggregate_swarm_metadata().unwrap(); @@ -974,7 +1019,10 @@ mod tests { let start_time = std::time::Instant::now(); for i in 0..1_000_000 { - let _number_of_downloads_increased = swarms.handle_announcement(&gen_seeded_infohash(&i), &leecher(), None); + swarms + .handle_announcement(&gen_seeded_infohash(&i), &leecher(), None) + .await + .unwrap(); } let result_a = start_time.elapsed(); @@ -1010,7 +1058,7 @@ mod tests { let infohash = sample_info_hash(); - let _number_of_downloads_increased = swarms.handle_announcement(&infohash, &leecher(), None); + swarms.handle_announcement(&infohash, &leecher(), None).await.unwrap(); let swarm_metadata = swarms.get_swarm_metadata_or_default(&infohash).unwrap(); diff --git a/packages/torrent-repository/tests/swarms/mod.rs b/packages/torrent-repository/tests/swarms/mod.rs index 8e58b9e76..975457cca 100644 --- a/packages/torrent-repository/tests/swarms/mod.rs +++ b/packages/torrent-repository/tests/swarms/mod.rs @@ -377,12 +377,12 @@ async fn it_should_remove_an_entry(#[values(swarms())] swarms: Swarms, #[case] e Some(torrent.clone()) ); assert_eq!( - Some(swarms.remove(&info_hash).unwrap().lock_or_panic().clone()), + Some(swarms.remove(&info_hash).await.unwrap().lock_or_panic().clone()), Some(torrent) ); assert!(swarms.get(&info_hash).is_none()); - assert!(swarms.remove(&info_hash).is_none()); + assert!(swarms.remove(&info_hash).await.is_none()); } assert_eq!(swarms.get_aggregate_swarm_metadata().unwrap().total_torrents, 0); @@ -435,7 +435,7 @@ async fn it_should_remove_inactive_peers(#[values(swarms())] swarms: Swarms, #[c // Insert the infohash and peer into the repository // and verify there is an extra torrent entry. { - swarms.handle_announcement(&info_hash, &peer, None).unwrap(); + swarms.handle_announcement(&info_hash, &peer, None).await.unwrap(); assert_eq!( swarms.get_aggregate_swarm_metadata().unwrap().total_torrents, entries.len() as u64 + 1 @@ -445,7 +445,7 @@ async fn it_should_remove_inactive_peers(#[values(swarms())] swarms: Swarms, #[c // Insert the infohash and peer into the repository // and verify the swarm metadata was updated. { - swarms.handle_announcement(&info_hash, &peer, None).unwrap(); + swarms.handle_announcement(&info_hash, &peer, None).await.unwrap(); let stats = swarms.get_swarm_metadata(&info_hash).unwrap(); assert_eq!( stats, diff --git a/packages/tracker-core/src/announce_handler.rs b/packages/tracker-core/src/announce_handler.rs index fac0a38c8..00d42174a 100644 --- a/packages/tracker-core/src/announce_handler.rs +++ b/packages/tracker-core/src/announce_handler.rs @@ -171,9 +171,10 @@ impl AnnounceHandler { peer.change_ip(&assign_ip_address_to_peer(remote_client_ip, self.config.net.external_ip)); - let number_of_downloads_increased = - self.in_memory_torrent_repository - .upsert_peer(info_hash, peer, opt_persistent_torrent); + let number_of_downloads_increased = self + .in_memory_torrent_repository + .upsert_peer(info_hash, peer, opt_persistent_torrent) + .await; if self.config.tracker_policy.persistent_torrent_completed_stat && number_of_downloads_increased { self.db_torrent_repository.increase_number_of_downloads(info_hash)?; @@ -594,7 +595,7 @@ mod tests { use aquatic_udp_protocol::AnnounceEvent; use torrust_tracker_test_helpers::configuration; - use torrust_tracker_torrent_repository::LockTrackedTorrent; + use torrust_tracker_torrent_repository::{LockTrackedTorrent, Swarms}; use crate::announce_handler::tests::the_announce_handler::peer_ip; use crate::announce_handler::{AnnounceHandler, PeersWanted}; @@ -613,7 +614,8 @@ mod tests { config.core.tracker_policy.persistent_torrent_completed_stat = true; let database = initialize_database(&config.core); - let in_memory_torrent_repository = Arc::new(InMemoryTorrentRepository::default()); + let swarms = Arc::new(Swarms::default()); + let in_memory_torrent_repository = Arc::new(InMemoryTorrentRepository::new(swarms)); let db_torrent_repository = Arc::new(DatabasePersistentTorrentRepository::new(&database)); let torrents_manager = Arc::new(TorrentsManager::new( &config.core, @@ -648,7 +650,7 @@ mod tests { assert_eq!(announce_data.stats.downloaded, 1); // Remove the newly updated torrent from memory - let _unused = in_memory_torrent_repository.remove(&info_hash); + let _unused = in_memory_torrent_repository.remove(&info_hash).await; torrents_manager.load_torrents_from_database().unwrap(); diff --git a/packages/tracker-core/src/torrent/manager.rs b/packages/tracker-core/src/torrent/manager.rs index aaac811f2..dec52daac 100644 --- a/packages/tracker-core/src/torrent/manager.rs +++ b/packages/tracker-core/src/torrent/manager.rs @@ -144,7 +144,7 @@ mod tests { use std::sync::Arc; use torrust_tracker_configuration::Core; - use torrust_tracker_torrent_repository::LockTrackedTorrent; + use torrust_tracker_torrent_repository::{LockTrackedTorrent, Swarms}; use super::{DatabasePersistentTorrentRepository, TorrentsManager}; use crate::databases::setup::initialize_database; @@ -163,7 +163,8 @@ mod tests { } fn initialize_torrents_manager_with(config: Core) -> (Arc, Arc) { - let in_memory_torrent_repository = Arc::new(InMemoryTorrentRepository::default()); + let swarms = Arc::new(Swarms::default()); + let in_memory_torrent_repository = Arc::new(InMemoryTorrentRepository::new(swarms)); let database = initialize_database(&config); let database_persistent_torrent_repository = Arc::new(DatabasePersistentTorrentRepository::new(&database)); @@ -219,8 +220,8 @@ mod tests { use crate::torrent::manager::tests::{initialize_torrents_manager, initialize_torrents_manager_with}; use crate::torrent::repository::in_memory::InMemoryTorrentRepository; - #[test] - fn it_should_remove_peers_that_have_not_been_updated_after_a_cutoff_time() { + #[tokio::test] + async fn it_should_remove_peers_that_have_not_been_updated_after_a_cutoff_time() { let (torrents_manager, services) = initialize_torrents_manager(); let infohash = sample_info_hash(); @@ -230,7 +231,10 @@ mod tests { // Add a peer to the torrent let mut peer = sample_peer(); peer.updated = DurationSinceUnixEpoch::new(0, 0); - let _number_of_downloads_increased = services.in_memory_torrent_repository.upsert_peer(&infohash, &peer, None); + let _number_of_downloads_increased = services + .in_memory_torrent_repository + .upsert_peer(&infohash, &peer, None) + .await; // Simulate the time has passed 1 second more than the max peer timeout. clock::Stopped::local_add(&Duration::from_secs( @@ -243,18 +247,18 @@ mod tests { assert!(services.in_memory_torrent_repository.get(&infohash).is_none()); } - fn add_a_peerless_torrent(infohash: &InfoHash, in_memory_torrent_repository: &Arc) { + async fn add_a_peerless_torrent(infohash: &InfoHash, in_memory_torrent_repository: &Arc) { // Add a peer to the torrent let mut peer = sample_peer(); peer.updated = DurationSinceUnixEpoch::new(0, 0); - let _number_of_downloads_increased = in_memory_torrent_repository.upsert_peer(infohash, &peer, None); + let _number_of_downloads_increased = in_memory_torrent_repository.upsert_peer(infohash, &peer, None).await; // Remove the peer. The torrent is now peerless. in_memory_torrent_repository.remove_inactive_peers(peer.updated.add(Duration::from_secs(1))); } - #[test] - fn it_should_remove_torrents_that_have_no_peers_when_it_is_configured_to_do_so() { + #[tokio::test] + async fn it_should_remove_torrents_that_have_no_peers_when_it_is_configured_to_do_so() { let mut config = ephemeral_configuration(); config.tracker_policy.remove_peerless_torrents = true; @@ -262,15 +266,15 @@ mod tests { let infohash = sample_info_hash(); - add_a_peerless_torrent(&infohash, &services.in_memory_torrent_repository); + add_a_peerless_torrent(&infohash, &services.in_memory_torrent_repository).await; torrents_manager.cleanup_torrents(); assert!(services.in_memory_torrent_repository.get(&infohash).is_none()); } - #[test] - fn it_should_retain_peerless_torrents_when_it_is_configured_to_do_so() { + #[tokio::test] + async fn it_should_retain_peerless_torrents_when_it_is_configured_to_do_so() { let mut config = ephemeral_configuration(); config.tracker_policy.remove_peerless_torrents = false; @@ -278,7 +282,7 @@ mod tests { let infohash = sample_info_hash(); - add_a_peerless_torrent(&infohash, &services.in_memory_torrent_repository); + add_a_peerless_torrent(&infohash, &services.in_memory_torrent_repository).await; torrents_manager.cleanup_torrents(); diff --git a/packages/tracker-core/src/torrent/repository/in_memory.rs b/packages/tracker-core/src/torrent/repository/in_memory.rs index c8e593471..37d9d3f5c 100644 --- a/packages/tracker-core/src/torrent/repository/in_memory.rs +++ b/packages/tracker-core/src/torrent/repository/in_memory.rs @@ -18,7 +18,7 @@ use torrust_tracker_torrent_repository::{SwarmHandle, Swarms}; /// /// Multiple implementations were considered, and the chosen implementation is /// used in production. Other implementations are kept for reference. -#[derive(Debug, Default)] +#[derive(Default)] pub struct InMemoryTorrentRepository { /// The underlying in-memory data structure that stores swarms data. swarms: Arc, @@ -49,7 +49,7 @@ impl InMemoryTorrentRepository { /// /// This function panics if the underling swarms return an error. #[must_use] - pub fn upsert_peer( + pub async fn upsert_peer( &self, info_hash: &InfoHash, peer: &peer::Peer, @@ -57,6 +57,7 @@ impl InMemoryTorrentRepository { ) -> bool { self.swarms .handle_announcement(info_hash, peer, opt_persistent_torrent) + .await .expect("Failed to upsert the peer in swarms") } @@ -75,8 +76,8 @@ impl InMemoryTorrentRepository { /// An `Option` containing the removed torrent entry if it existed. #[cfg(test)] #[must_use] - pub(crate) fn remove(&self, key: &InfoHash) -> Option { - self.swarms.remove(key) + pub(crate) async fn remove(&self, key: &InfoHash) -> Option { + self.swarms.remove(key).await } /// Removes inactive peers from all torrent entries. diff --git a/packages/tracker-core/src/torrent/services.rs b/packages/tracker-core/src/torrent/services.rs index a35fd7aed..14a4f58f5 100644 --- a/packages/tracker-core/src/torrent/services.rs +++ b/packages/tracker-core/src/torrent/services.rs @@ -246,7 +246,9 @@ mod tests { let hash = "9e0217d0fa71c87332cd8bf9dbeabcb2c2cf3c4d".to_owned(); // DevSkim: ignore DS173237 let info_hash = InfoHash::from_str(&hash).unwrap(); - let _number_of_downloads_increased = in_memory_torrent_repository.upsert_peer(&info_hash, &sample_peer(), None); + let _number_of_downloads_increased = in_memory_torrent_repository + .upsert_peer(&info_hash, &sample_peer(), None) + .await; let torrent_info = get_torrent_info(&in_memory_torrent_repository, &info_hash).unwrap(); @@ -290,7 +292,9 @@ mod tests { let hash = "9e0217d0fa71c87332cd8bf9dbeabcb2c2cf3c4d".to_owned(); // DevSkim: ignore DS173237 let info_hash = InfoHash::from_str(&hash).unwrap(); - let _number_of_downloads_increased = in_memory_torrent_repository.upsert_peer(&info_hash, &sample_peer(), None); + let _number_of_downloads_increased = in_memory_torrent_repository + .upsert_peer(&info_hash, &sample_peer(), None) + .await; let torrents = get_torrents_page(&in_memory_torrent_repository, Some(&Pagination::default())); @@ -315,8 +319,12 @@ mod tests { let hash2 = "03840548643af2a7b63a9f5cbca348bc7150ca3a".to_owned(); // DevSkim: ignore DS173237 let info_hash2 = InfoHash::from_str(&hash2).unwrap(); - let _number_of_downloads_increased = in_memory_torrent_repository.upsert_peer(&info_hash1, &sample_peer(), None); - let _number_of_downloads_increased = in_memory_torrent_repository.upsert_peer(&info_hash2, &sample_peer(), None); + let _number_of_downloads_increased = in_memory_torrent_repository + .upsert_peer(&info_hash1, &sample_peer(), None) + .await; + let _number_of_downloads_increased = in_memory_torrent_repository + .upsert_peer(&info_hash2, &sample_peer(), None) + .await; let offset = 0; let limit = 1; @@ -336,8 +344,12 @@ mod tests { let hash2 = "03840548643af2a7b63a9f5cbca348bc7150ca3a".to_owned(); // DevSkim: ignore DS173237 let info_hash2 = InfoHash::from_str(&hash2).unwrap(); - let _number_of_downloads_increased = in_memory_torrent_repository.upsert_peer(&info_hash1, &sample_peer(), None); - let _number_of_downloads_increased = in_memory_torrent_repository.upsert_peer(&info_hash2, &sample_peer(), None); + let _number_of_downloads_increased = in_memory_torrent_repository + .upsert_peer(&info_hash1, &sample_peer(), None) + .await; + let _number_of_downloads_increased = in_memory_torrent_repository + .upsert_peer(&info_hash2, &sample_peer(), None) + .await; let offset = 1; let limit = 4000; @@ -362,11 +374,15 @@ mod tests { let hash1 = "9e0217d0fa71c87332cd8bf9dbeabcb2c2cf3c4d".to_owned(); // DevSkim: ignore DS173237 let info_hash1 = InfoHash::from_str(&hash1).unwrap(); - let _number_of_downloads_increased = in_memory_torrent_repository.upsert_peer(&info_hash1, &sample_peer(), None); + let _number_of_downloads_increased = in_memory_torrent_repository + .upsert_peer(&info_hash1, &sample_peer(), None) + .await; let hash2 = "03840548643af2a7b63a9f5cbca348bc7150ca3a".to_owned(); // DevSkim: ignore DS173237 let info_hash2 = InfoHash::from_str(&hash2).unwrap(); - let _number_of_downloads_increased = in_memory_torrent_repository.upsert_peer(&info_hash2, &sample_peer(), None); + let _number_of_downloads_increased = in_memory_torrent_repository + .upsert_peer(&info_hash2, &sample_peer(), None) + .await; let torrents = get_torrents_page(&in_memory_torrent_repository, Some(&Pagination::default())); @@ -414,7 +430,9 @@ mod tests { let info_hash = sample_info_hash(); - let _ = in_memory_torrent_repository.upsert_peer(&info_hash, &sample_peer(), None); + let _ = in_memory_torrent_repository + .upsert_peer(&info_hash, &sample_peer(), None) + .await; let torrent_info = get_torrents(&in_memory_torrent_repository, &[info_hash]); diff --git a/packages/udp-tracker-server/src/environment.rs b/packages/udp-tracker-server/src/environment.rs index e3667e74a..6dae3d860 100644 --- a/packages/udp-tracker-server/src/environment.rs +++ b/packages/udp-tracker-server/src/environment.rs @@ -34,12 +34,13 @@ where { /// Add a torrent to the tracker #[allow(dead_code)] - pub fn add_torrent(&self, info_hash: &InfoHash, peer: &peer::Peer) { + pub async fn add_torrent(&self, info_hash: &InfoHash, peer: &peer::Peer) { let _number_of_downloads_increased = self .container .tracker_core_container .in_memory_torrent_repository - .upsert_peer(info_hash, peer, None); + .upsert_peer(info_hash, peer, None) + .await; } } diff --git a/packages/udp-tracker-server/src/handlers/announce.rs b/packages/udp-tracker-server/src/handlers/announce.rs index 5311531aa..ba0721289 100644 --- a/packages/udp-tracker-server/src/handlers/announce.rs +++ b/packages/udp-tracker-server/src/handlers/announce.rs @@ -353,7 +353,7 @@ mod tests { assert_eq!(peers[0].peer_addr, SocketAddr::new(IpAddr::V4(remote_client_ip), client_port)); } - fn add_a_torrent_peer_using_ipv6(in_memory_torrent_repository: &Arc) { + async fn add_a_torrent_peer_using_ipv6(in_memory_torrent_repository: &Arc) { let info_hash = AquaticInfoHash([0u8; 20]); let client_ip_v4 = Ipv4Addr::new(126, 0, 0, 1); @@ -366,8 +366,9 @@ mod tests { .with_peer_address(SocketAddr::new(IpAddr::V6(client_ip_v6), client_port)) .into(); - let _number_of_downloads_increased = - in_memory_torrent_repository.upsert_peer(&info_hash.0.into(), &peer_using_ipv6, None); + let _number_of_downloads_increased = in_memory_torrent_repository + .upsert_peer(&info_hash.0.into(), &peer_using_ipv6, None) + .await; } async fn announce_a_new_peer_using_ipv4( @@ -405,7 +406,7 @@ mod tests { let (core_tracker_services, core_udp_tracker_services, _server_udp_tracker_services) = initialize_core_tracker_services_for_public_tracker(); - add_a_torrent_peer_using_ipv6(&core_tracker_services.in_memory_torrent_repository); + add_a_torrent_peer_using_ipv6(&core_tracker_services.in_memory_torrent_repository).await; let response = announce_a_new_peer_using_ipv4(Arc::new(core_tracker_services), Arc::new(core_udp_tracker_services)).await; @@ -689,7 +690,7 @@ mod tests { assert_eq!(peers[0].peer_addr, SocketAddr::new(IpAddr::V6(remote_client_ip), client_port)); } - fn add_a_torrent_peer_using_ipv4(in_memory_torrent_repository: &Arc) { + async fn add_a_torrent_peer_using_ipv4(in_memory_torrent_repository: &Arc) { let info_hash = AquaticInfoHash([0u8; 20]); let client_ip_v4 = Ipv4Addr::new(126, 0, 0, 1); @@ -701,8 +702,9 @@ mod tests { .with_peer_address(SocketAddr::new(IpAddr::V4(client_ip_v4), client_port)) .into(); - let _number_of_downloads_increased = - in_memory_torrent_repository.upsert_peer(&info_hash.0.into(), &peer_using_ipv4, None); + let _number_of_downloads_increased = in_memory_torrent_repository + .upsert_peer(&info_hash.0.into(), &peer_using_ipv4, None) + .await; } async fn announce_a_new_peer_using_ipv6( @@ -755,7 +757,7 @@ mod tests { let (core_tracker_services, _core_udp_tracker_services, _server_udp_tracker_services) = initialize_core_tracker_services_for_public_tracker(); - add_a_torrent_peer_using_ipv4(&core_tracker_services.in_memory_torrent_repository); + add_a_torrent_peer_using_ipv4(&core_tracker_services.in_memory_torrent_repository).await; let response = announce_a_new_peer_using_ipv6( core_tracker_services.core_config.clone(), diff --git a/packages/udp-tracker-server/src/handlers/scrape.rs b/packages/udp-tracker-server/src/handlers/scrape.rs index 5cc84acd6..34d5a5ce2 100644 --- a/packages/udp-tracker-server/src/handlers/scrape.rs +++ b/packages/udp-tracker-server/src/handlers/scrape.rs @@ -163,7 +163,9 @@ mod tests { .with_number_of_bytes_left(0) .into(); - let _number_of_downloads_increased = in_memory_torrent_repository.upsert_peer(&info_hash.0.into(), &peer, None); + let _number_of_downloads_increased = in_memory_torrent_repository + .upsert_peer(&info_hash.0.into(), &peer, None) + .await; } fn build_scrape_request(remote_addr: &SocketAddr, info_hash: &InfoHash) -> ScrapeRequest { From 6d95d1ad22f87c46310a14f47736e52bac07d993 Mon Sep 17 00:00:00 2001 From: Jose Celano Date: Mon, 12 May 2025 19:12:58 +0100 Subject: [PATCH 598/802] refactor: [#1358] inject event sender in Swarm type It required to use `tokio::sync::Mutex` for the `SwarmHandle` (`Arc>`). Otherwise it's not safe to pass the Swarm lock between threads. --- Cargo.lock | 1 + .../tests/server/v1/contract.rs | 12 +- .../src/v1/context/torrent/handlers.rs | 17 +- .../src/statistics/services.rs | 2 +- .../src/statistics/services.rs | 2 +- packages/torrent-repository/Cargo.toml | 1 + packages/torrent-repository/src/lib.rs | 13 +- packages/torrent-repository/src/swarm.rs | 295 +++++++++++------- packages/torrent-repository/src/swarms.rs | 140 +++++---- .../torrent-repository/tests/swarm/mod.rs | 60 ++-- .../torrent-repository/tests/swarms/mod.rs | 198 ++++++------ packages/tracker-core/src/announce_handler.rs | 18 +- packages/tracker-core/src/scrape_handler.rs | 6 +- packages/tracker-core/src/torrent/manager.rs | 48 +-- .../src/torrent/repository/in_memory.rs | 25 +- packages/tracker-core/src/torrent/services.rs | 51 +-- .../src/statistics/services.rs | 2 +- .../src/handlers/announce.rs | 17 +- .../src/statistics/services.rs | 2 +- src/bootstrap/jobs/torrent_cleanup.rs | 2 +- 20 files changed, 510 insertions(+), 402 deletions(-) diff --git a/Cargo.lock b/Cargo.lock index b39355065..ddf163cc6 100644 --- a/Cargo.lock +++ b/Cargo.lock @@ -4857,6 +4857,7 @@ dependencies = [ "bittorrent-primitives", "criterion", "crossbeam-skiplist", + "futures", "rand 0.9.1", "rstest", "serde", diff --git a/packages/axum-http-tracker-server/tests/server/v1/contract.rs b/packages/axum-http-tracker-server/tests/server/v1/contract.rs index afd4d3168..d864ba67c 100644 --- a/packages/axum-http-tracker-server/tests/server/v1/contract.rs +++ b/packages/axum-http-tracker-server/tests/server/v1/contract.rs @@ -787,7 +787,8 @@ mod for_all_config_modes { .container .tracker_core_container .in_memory_torrent_repository - .get_torrent_peers(&info_hash); + .get_torrent_peers(&info_hash) + .await; let peer_addr = peers[0].peer_addr; assert_eq!(peer_addr.ip(), client_ip); @@ -829,7 +830,8 @@ mod for_all_config_modes { .container .tracker_core_container .in_memory_torrent_repository - .get_torrent_peers(&info_hash); + .get_torrent_peers(&info_hash) + .await; let peer_addr = peers[0].peer_addr; assert_eq!( @@ -878,7 +880,8 @@ mod for_all_config_modes { .container .tracker_core_container .in_memory_torrent_repository - .get_torrent_peers(&info_hash); + .get_torrent_peers(&info_hash) + .await; let peer_addr = peers[0].peer_addr; assert_eq!( @@ -925,7 +928,8 @@ mod for_all_config_modes { .container .tracker_core_container .in_memory_torrent_repository - .get_torrent_peers(&info_hash); + .get_torrent_peers(&info_hash) + .await; let peer_addr = peers[0].peer_addr; assert_eq!(peer_addr.ip(), IpAddr::from_str("150.172.238.178").unwrap()); diff --git a/packages/axum-rest-tracker-api-server/src/v1/context/torrent/handlers.rs b/packages/axum-rest-tracker-api-server/src/v1/context/torrent/handlers.rs index 613abbdeb..eecbd9ac3 100644 --- a/packages/axum-rest-tracker-api-server/src/v1/context/torrent/handlers.rs +++ b/packages/axum-rest-tracker-api-server/src/v1/context/torrent/handlers.rs @@ -33,7 +33,7 @@ pub async fn get_torrent_handler( ) -> Response { match InfoHash::from_str(&info_hash.0) { Err(_) => invalid_info_hash_param_response(&info_hash.0), - Ok(info_hash) => match get_torrent_info(&in_memory_torrent_repository, &info_hash) { + Ok(info_hash) => match get_torrent_info(&in_memory_torrent_repository, &info_hash).await { Some(info) => torrent_info_response(info).into_response(), None => torrent_not_known_response(), }, @@ -85,14 +85,19 @@ pub async fn get_torrents_handler( tracing::debug!("pagination: {:?}", pagination); if pagination.0.info_hashes.is_empty() { - torrent_list_response(&get_torrents_page( - &in_memory_torrent_repository, - Some(&Pagination::new_with_options(pagination.0.offset, pagination.0.limit)), - )) + torrent_list_response( + &get_torrents_page( + &in_memory_torrent_repository, + Some(&Pagination::new_with_options(pagination.0.offset, pagination.0.limit)), + ) + .await, + ) .into_response() } else { match parse_info_hashes(pagination.0.info_hashes) { - Ok(info_hashes) => torrent_list_response(&get_torrents(&in_memory_torrent_repository, &info_hashes)).into_response(), + Ok(info_hashes) => { + torrent_list_response(&get_torrents(&in_memory_torrent_repository, &info_hashes).await).into_response() + } Err(err) => match err { QueryParamError::InvalidInfoHash { info_hash } => invalid_info_hash_param_response(&info_hash), }, diff --git a/packages/http-tracker-core/src/statistics/services.rs b/packages/http-tracker-core/src/statistics/services.rs index 1c5890ea8..3c8a4fa43 100644 --- a/packages/http-tracker-core/src/statistics/services.rs +++ b/packages/http-tracker-core/src/statistics/services.rs @@ -47,7 +47,7 @@ pub async fn get_metrics( in_memory_torrent_repository: Arc, stats_repository: Arc, ) -> TrackerMetrics { - let torrents_metrics = in_memory_torrent_repository.get_aggregate_swarm_metadata(); + let torrents_metrics = in_memory_torrent_repository.get_aggregate_swarm_metadata().await; let stats = stats_repository.get_stats().await; TrackerMetrics { diff --git a/packages/rest-tracker-api-core/src/statistics/services.rs b/packages/rest-tracker-api-core/src/statistics/services.rs index b8c2f3f1d..aad31a323 100644 --- a/packages/rest-tracker-api-core/src/statistics/services.rs +++ b/packages/rest-tracker-api-core/src/statistics/services.rs @@ -32,7 +32,7 @@ pub async fn get_metrics( http_stats_repository: Arc, udp_server_stats_repository: Arc, ) -> TrackerMetrics { - let torrents_metrics = in_memory_torrent_repository.get_aggregate_swarm_metadata(); + let torrents_metrics = in_memory_torrent_repository.get_aggregate_swarm_metadata().await; let udp_banned_ips_total = ban_service.read().await.get_banned_ips_total(); let http_stats = http_stats_repository.get_stats().await; let udp_server_stats = udp_server_stats_repository.get_stats().await; diff --git a/packages/torrent-repository/Cargo.toml b/packages/torrent-repository/Cargo.toml index 77192c7cf..1c7cc09fe 100644 --- a/packages/torrent-repository/Cargo.toml +++ b/packages/torrent-repository/Cargo.toml @@ -19,6 +19,7 @@ version.workspace = true aquatic_udp_protocol = "0" bittorrent-primitives = "0.1.0" crossbeam-skiplist = "0" +futures = "0" serde = "1.0.219" thiserror = "2.0.12" tokio = { version = "1", features = ["macros", "net", "rt-multi-thread", "signal", "sync"] } diff --git a/packages/torrent-repository/src/lib.rs b/packages/torrent-repository/src/lib.rs index c6790c4db..3adf2f18d 100644 --- a/packages/torrent-repository/src/lib.rs +++ b/packages/torrent-repository/src/lib.rs @@ -4,8 +4,9 @@ pub mod statistics; pub mod swarm; pub mod swarms; -use std::sync::{Arc, Mutex, MutexGuard}; +use std::sync::Arc; +use tokio::sync::Mutex; use torrust_tracker_clock::clock; pub type Swarms = swarms::Swarms; @@ -24,16 +25,6 @@ pub(crate) type CurrentClock = clock::Stopped; pub const TORRENT_REPOSITORY_LOG_TARGET: &str = "TORRENT_REPOSITORY"; -pub trait LockTrackedTorrent { - fn lock_or_panic(&self) -> MutexGuard<'_, Swarm>; -} - -impl LockTrackedTorrent for SwarmHandle { - fn lock_or_panic(&self) -> MutexGuard<'_, Swarm> { - self.lock().expect("can't acquire lock for tracked torrent handle") - } -} - #[cfg(test)] pub(crate) mod tests { use std::net::{IpAddr, Ipv4Addr, SocketAddr}; diff --git a/packages/torrent-repository/src/swarm.rs b/packages/torrent-repository/src/swarm.rs index 4437ca410..d1918bd24 100644 --- a/packages/torrent-repository/src/swarm.rs +++ b/packages/torrent-repository/src/swarm.rs @@ -1,6 +1,8 @@ //! A swarm is a collection of peers that are all trying to download the same //! torrent. use std::collections::BTreeMap; +use std::fmt::Debug; +use std::hash::{Hash, Hasher}; use std::net::SocketAddr; use std::sync::Arc; @@ -10,37 +12,72 @@ use torrust_tracker_primitives::peer::{self, Peer, PeerAnnouncement}; use torrust_tracker_primitives::swarm_metadata::SwarmMetadata; use torrust_tracker_primitives::DurationSinceUnixEpoch; -#[derive(Clone, Debug, Default, PartialEq, Eq, PartialOrd, Ord, Hash)] +use crate::event::sender::Sender; +use crate::event::Event; + +#[derive(Clone, Default)] pub struct Swarm { peers: BTreeMap>, metadata: SwarmMetadata, + event_sender: Sender, +} + +#[allow(clippy::missing_fields_in_debug)] +impl Debug for Swarm { + fn fmt(&self, f: &mut std::fmt::Formatter<'_>) -> std::fmt::Result { + f.debug_struct("Swarm") + .field("peers", &self.peers) + .field("metadata", &self.metadata) + .finish() + } +} + +impl Hash for Swarm { + fn hash(&self, state: &mut H) { + self.peers.hash(state); + self.metadata.hash(state); + } +} + +impl PartialEq for Swarm { + fn eq(&self, other: &Self) -> bool { + self.peers == other.peers && self.metadata == other.metadata + } } +impl Eq for Swarm {} + impl Swarm { #[must_use] - pub fn new(downloaded: u32) -> Self { + pub fn new(downloaded: u32, event_sender: Sender) -> Self { Self { peers: BTreeMap::new(), metadata: SwarmMetadata::new(downloaded, 0, 0), + event_sender, } } - pub fn handle_announcement(&mut self, incoming_announce: &PeerAnnouncement) -> bool { + pub async fn handle_announcement(&mut self, incoming_announce: &PeerAnnouncement) -> bool { let mut downloads_increased: bool = false; let _previous_peer = match peer::ReadInfo::get_event(incoming_announce) { AnnounceEvent::Started | AnnounceEvent::None | AnnounceEvent::Completed => { - self.upsert_peer(Arc::new(*incoming_announce), &mut downloads_increased) + self.upsert_peer(Arc::new(*incoming_announce), &mut downloads_increased).await } - AnnounceEvent::Stopped => self.remove(incoming_announce), + AnnounceEvent::Stopped => self.remove(incoming_announce).await, }; downloads_increased } - pub fn upsert_peer(&mut self, incoming_announce: Arc, downloads_increased: &mut bool) -> Option> { + pub async fn upsert_peer( + &mut self, + incoming_announce: Arc, + downloads_increased: &mut bool, + ) -> Option> { let is_now_seeder = incoming_announce.is_seeder(); let has_completed = incoming_announce.event == AnnounceEvent::Completed; + let announcement = incoming_announce.clone(); if let Some(old_announce) = self.peers.insert(incoming_announce.peer_addr, incoming_announce) { // A peer has been updated in the swarm. @@ -79,11 +116,19 @@ impl Swarm { // from a known peer } + if let Some(event_sender) = self.event_sender.as_deref() { + event_sender + .send(Event::PeerAdded { + announcement: *announcement, + }) + .await; + } + None } } - pub fn remove(&mut self, peer_to_remove: &Peer) -> Option> { + pub async fn remove(&mut self, peer_to_remove: &Peer) -> Option> { match self.peers.remove(&peer_to_remove.peer_addr) { Some(old_peer) => { // A peer has been removed from the swarm. @@ -95,6 +140,15 @@ impl Swarm { self.metadata.incomplete -= 1; } + if let Some(event_sender) = self.event_sender.as_deref() { + event_sender + .send(Event::PeerRemoved { + socket_addr: old_peer.peer_addr, + peer_id: old_peer.peer_id, + }) + .await; + } + Some(old_peer) } None => None, @@ -246,104 +300,107 @@ mod tests { assert_eq!(swarm.len(), 0); } - #[test] - fn it_should_allow_inserting_a_new_peer() { + #[tokio::test] + async fn it_should_allow_inserting_a_new_peer() { let mut swarm = Swarm::default(); let mut downloads_increased = false; let peer = PeerBuilder::default().build(); - assert_eq!(swarm.upsert_peer(peer.into(), &mut downloads_increased), None); + assert_eq!(swarm.upsert_peer(peer.into(), &mut downloads_increased).await, None); } - #[test] - fn it_should_allow_updating_a_preexisting_peer() { + #[tokio::test] + async fn it_should_allow_updating_a_preexisting_peer() { let mut swarm = Swarm::default(); let mut downloads_increased = false; let peer = PeerBuilder::default().build(); - swarm.upsert_peer(peer.into(), &mut downloads_increased); + swarm.upsert_peer(peer.into(), &mut downloads_increased).await; - assert_eq!(swarm.upsert_peer(peer.into(), &mut downloads_increased), Some(Arc::new(peer))); + assert_eq!( + swarm.upsert_peer(peer.into(), &mut downloads_increased).await, + Some(Arc::new(peer)) + ); } - #[test] - fn it_should_allow_getting_all_peers() { + #[tokio::test] + async fn it_should_allow_getting_all_peers() { let mut swarm = Swarm::default(); let mut downloads_increased = false; let peer = PeerBuilder::default().build(); - swarm.upsert_peer(peer.into(), &mut downloads_increased); + swarm.upsert_peer(peer.into(), &mut downloads_increased).await; assert_eq!(swarm.peers(None), [Arc::new(peer)]); } - #[test] - fn it_should_allow_getting_one_peer_by_id() { + #[tokio::test] + async fn it_should_allow_getting_one_peer_by_id() { let mut swarm = Swarm::default(); let mut downloads_increased = false; let peer = PeerBuilder::default().build(); - swarm.upsert_peer(peer.into(), &mut downloads_increased); + swarm.upsert_peer(peer.into(), &mut downloads_increased).await; assert_eq!(swarm.get(&peer.peer_addr), Some(Arc::new(peer)).as_ref()); } - #[test] - fn it_should_increase_the_number_of_peers_after_inserting_a_new_one() { + #[tokio::test] + async fn it_should_increase_the_number_of_peers_after_inserting_a_new_one() { let mut swarm = Swarm::default(); let mut downloads_increased = false; let peer = PeerBuilder::default().build(); - swarm.upsert_peer(peer.into(), &mut downloads_increased); + swarm.upsert_peer(peer.into(), &mut downloads_increased).await; assert_eq!(swarm.len(), 1); } - #[test] - fn it_should_decrease_the_number_of_peers_after_removing_one() { + #[tokio::test] + async fn it_should_decrease_the_number_of_peers_after_removing_one() { let mut swarm = Swarm::default(); let mut downloads_increased = false; let peer = PeerBuilder::default().build(); - swarm.upsert_peer(peer.into(), &mut downloads_increased); + swarm.upsert_peer(peer.into(), &mut downloads_increased).await; - swarm.remove(&peer); + swarm.remove(&peer).await; assert!(swarm.is_empty()); } - #[test] - fn it_should_allow_removing_an_existing_peer() { + #[tokio::test] + async fn it_should_allow_removing_an_existing_peer() { let mut swarm = Swarm::default(); let mut downloads_increased = false; let peer = PeerBuilder::default().build(); - swarm.upsert_peer(peer.into(), &mut downloads_increased); + swarm.upsert_peer(peer.into(), &mut downloads_increased).await; - let old = swarm.remove(&peer); + let old = swarm.remove(&peer).await; assert_eq!(old, Some(Arc::new(peer))); assert_eq!(swarm.get(&peer.peer_addr), None); } - #[test] - fn it_should_allow_removing_a_non_existing_peer() { + #[tokio::test] + async fn it_should_allow_removing_a_non_existing_peer() { let mut swarm = Swarm::default(); let peer = PeerBuilder::default().build(); - assert_eq!(swarm.remove(&peer), None); + assert_eq!(swarm.remove(&peer).await, None); } - #[test] - fn it_should_allow_getting_all_peers_excluding_peers_with_a_given_address() { + #[tokio::test] + async fn it_should_allow_getting_all_peers_excluding_peers_with_a_given_address() { let mut swarm = Swarm::default(); let mut downloads_increased = false; @@ -351,19 +408,19 @@ mod tests { .with_peer_id(&PeerId(*b"-qB00000000000000001")) .with_peer_addr(&SocketAddr::new(IpAddr::V4(Ipv4Addr::new(127, 0, 0, 1)), 6969)) .build(); - swarm.upsert_peer(peer1.into(), &mut downloads_increased); + swarm.upsert_peer(peer1.into(), &mut downloads_increased).await; let peer2 = PeerBuilder::default() .with_peer_id(&PeerId(*b"-qB00000000000000002")) .with_peer_addr(&SocketAddr::new(IpAddr::V4(Ipv4Addr::new(127, 0, 0, 2)), 6969)) .build(); - swarm.upsert_peer(peer2.into(), &mut downloads_increased); + swarm.upsert_peer(peer2.into(), &mut downloads_increased).await; assert_eq!(swarm.peers_excluding(&peer2.peer_addr, None), [Arc::new(peer1)]); } - #[test] - fn it_should_remove_inactive_peers() { + #[tokio::test] + async fn it_should_remove_inactive_peers() { let mut swarm = Swarm::default(); let mut downloads_increased = false; let one_second = DurationSinceUnixEpoch::new(1, 0); @@ -371,7 +428,7 @@ mod tests { // Insert the peer let last_update_time = DurationSinceUnixEpoch::new(1_669_397_478_934, 0); let peer = PeerBuilder::default().last_updated_on(last_update_time).build(); - swarm.upsert_peer(peer.into(), &mut downloads_increased); + swarm.upsert_peer(peer.into(), &mut downloads_increased).await; // Remove peers not updated since one second after inserting the peer swarm.remove_inactive(last_update_time + one_second); @@ -379,8 +436,8 @@ mod tests { assert_eq!(swarm.len(), 0); } - #[test] - fn it_should_not_remove_active_peers() { + #[tokio::test] + async fn it_should_not_remove_active_peers() { let mut swarm = Swarm::default(); let mut downloads_increased = false; let one_second = DurationSinceUnixEpoch::new(1, 0); @@ -388,7 +445,7 @@ mod tests { // Insert the peer let last_update_time = DurationSinceUnixEpoch::new(1_669_397_478_934, 0); let peer = PeerBuilder::default().last_updated_on(last_update_time).build(); - swarm.upsert_peer(peer.into(), &mut downloads_increased); + swarm.upsert_peer(peer.into(), &mut downloads_increased).await; // Remove peers not updated since one second before inserting the peer. swarm.remove_inactive(last_update_time - one_second); @@ -407,23 +464,23 @@ mod tests { Swarm::default() } - fn not_empty_swarm() -> Swarm { + async fn not_empty_swarm() -> Swarm { let mut swarm = Swarm::default(); - swarm.upsert_peer(PeerBuilder::default().build().into(), &mut false); + swarm.upsert_peer(PeerBuilder::default().build().into(), &mut false).await; swarm } - fn not_empty_swarm_with_downloads() -> Swarm { + async fn not_empty_swarm_with_downloads() -> Swarm { let mut swarm = Swarm::default(); let mut peer = PeerBuilder::leecher().build(); let mut downloads_increased = false; - swarm.upsert_peer(peer.into(), &mut downloads_increased); + swarm.upsert_peer(peer.into(), &mut downloads_increased).await; peer.event = aquatic_udp_protocol::AnnounceEvent::Completed; - swarm.upsert_peer(peer.into(), &mut downloads_increased); + swarm.upsert_peer(peer.into(), &mut downloads_increased).await; assert!(swarm.metadata().downloads() > 0); @@ -457,13 +514,13 @@ mod tests { assert!(empty_swarm().should_be_removed(&remove_peerless_torrents_policy())); } - #[test] - fn it_should_not_be_removed_is_the_swarm_is_not_empty() { - assert!(!not_empty_swarm().should_be_removed(&remove_peerless_torrents_policy())); + #[tokio::test] + async fn it_should_not_be_removed_is_the_swarm_is_not_empty() { + assert!(!not_empty_swarm().await.should_be_removed(&remove_peerless_torrents_policy())); } - #[test] - fn it_should_not_be_removed_even_if_the_swarm_is_empty_if_we_need_to_track_stats_for_downloads_and_there_has_been_downloads( + #[tokio::test] + async fn it_should_not_be_removed_even_if_the_swarm_is_empty_if_we_need_to_track_stats_for_downloads_and_there_has_been_downloads( ) { let policy = TrackerPolicy { remove_peerless_torrents: true, @@ -471,7 +528,7 @@ mod tests { ..Default::default() }; - assert!(!not_empty_swarm_with_downloads().should_be_removed(&policy)); + assert!(!not_empty_swarm_with_downloads().await.should_be_removed(&policy)); } } @@ -486,33 +543,35 @@ mod tests { assert!(!empty_swarm().should_be_removed(&don_not_remove_peerless_torrents_policy())); } - #[test] - fn it_should_not_be_removed_is_the_swarm_is_not_empty() { - assert!(!not_empty_swarm().should_be_removed(&don_not_remove_peerless_torrents_policy())); + #[tokio::test] + async fn it_should_not_be_removed_is_the_swarm_is_not_empty() { + assert!(!not_empty_swarm() + .await + .should_be_removed(&don_not_remove_peerless_torrents_policy())); } } } - #[test] - fn it_should_allow_inserting_two_identical_peers_except_for_the_socket_address() { + #[tokio::test] + async fn it_should_allow_inserting_two_identical_peers_except_for_the_socket_address() { let mut swarm = Swarm::default(); let mut downloads_increased = false; let peer1 = PeerBuilder::default() .with_peer_addr(&SocketAddr::new(IpAddr::V4(Ipv4Addr::new(127, 0, 0, 1)), 6969)) .build(); - swarm.upsert_peer(peer1.into(), &mut downloads_increased); + swarm.upsert_peer(peer1.into(), &mut downloads_increased).await; let peer2 = PeerBuilder::default() .with_peer_addr(&SocketAddr::new(IpAddr::V4(Ipv4Addr::new(127, 0, 0, 2)), 6969)) .build(); - swarm.upsert_peer(peer2.into(), &mut downloads_increased); + swarm.upsert_peer(peer2.into(), &mut downloads_increased).await; assert_eq!(swarm.len(), 2); } - #[test] - fn it_should_not_allow_inserting_two_peers_with_different_peer_id_but_the_same_socket_address() { + #[tokio::test] + async fn it_should_not_allow_inserting_two_peers_with_different_peer_id_but_the_same_socket_address() { let mut swarm = Swarm::default(); let mut downloads_increased = false; @@ -523,27 +582,27 @@ mod tests { .with_peer_id(&PeerId(*b"-qB00000000000000001")) .with_peer_addr(&SocketAddr::new(IpAddr::V4(Ipv4Addr::new(127, 0, 0, 1)), 6969)) .build(); - swarm.upsert_peer(peer1.into(), &mut downloads_increased); + swarm.upsert_peer(peer1.into(), &mut downloads_increased).await; let peer2 = PeerBuilder::default() .with_peer_id(&PeerId(*b"-qB00000000000000002")) .with_peer_addr(&SocketAddr::new(IpAddr::V4(Ipv4Addr::new(127, 0, 0, 1)), 6969)) .build(); - swarm.upsert_peer(peer2.into(), &mut downloads_increased); + swarm.upsert_peer(peer2.into(), &mut downloads_increased).await; assert_eq!(swarm.len(), 1); } - #[test] - fn it_should_return_the_metadata() { + #[tokio::test] + async fn it_should_return_the_metadata() { let mut swarm = Swarm::default(); let mut downloads_increased = false; let seeder = PeerBuilder::seeder().build(); let leecher = PeerBuilder::leecher().build(); - swarm.upsert_peer(seeder.into(), &mut downloads_increased); - swarm.upsert_peer(leecher.into(), &mut downloads_increased); + swarm.upsert_peer(seeder.into(), &mut downloads_increased).await; + swarm.upsert_peer(leecher.into(), &mut downloads_increased).await; assert_eq!( swarm.metadata(), @@ -555,32 +614,32 @@ mod tests { ); } - #[test] - fn it_should_return_the_number_of_seeders_in_the_list() { + #[tokio::test] + async fn it_should_return_the_number_of_seeders_in_the_list() { let mut swarm = Swarm::default(); let mut downloads_increased = false; let seeder = PeerBuilder::seeder().build(); let leecher = PeerBuilder::leecher().build(); - swarm.upsert_peer(seeder.into(), &mut downloads_increased); - swarm.upsert_peer(leecher.into(), &mut downloads_increased); + swarm.upsert_peer(seeder.into(), &mut downloads_increased).await; + swarm.upsert_peer(leecher.into(), &mut downloads_increased).await; let (seeders, _leechers) = swarm.seeders_and_leechers(); assert_eq!(seeders, 1); } - #[test] - fn it_should_return_the_number_of_leechers_in_the_list() { + #[tokio::test] + async fn it_should_return_the_number_of_leechers_in_the_list() { let mut swarm = Swarm::default(); let mut downloads_increased = false; let seeder = PeerBuilder::seeder().build(); let leecher = PeerBuilder::leecher().build(); - swarm.upsert_peer(seeder.into(), &mut downloads_increased); - swarm.upsert_peer(leecher.into(), &mut downloads_increased); + swarm.upsert_peer(seeder.into(), &mut downloads_increased).await; + swarm.upsert_peer(leecher.into(), &mut downloads_increased).await; let (_seeders, leechers) = swarm.seeders_and_leechers(); @@ -594,8 +653,8 @@ mod tests { use crate::swarm::Swarm; - #[test] - fn it_should_increase_the_number_of_leechers_if_the_new_peer_is_a_leecher_() { + #[tokio::test] + async fn it_should_increase_the_number_of_leechers_if_the_new_peer_is_a_leecher_() { let mut swarm = Swarm::default(); let mut downloads_increased = false; @@ -603,13 +662,13 @@ mod tests { let leecher = PeerBuilder::leecher().build(); - swarm.upsert_peer(leecher.into(), &mut downloads_increased); + swarm.upsert_peer(leecher.into(), &mut downloads_increased).await; assert_eq!(swarm.metadata().leechers(), leechers + 1); } - #[test] - fn it_should_increase_the_number_of_seeders_if_the_new_peer_is_a_seeder() { + #[tokio::test] + async fn it_should_increase_the_number_of_seeders_if_the_new_peer_is_a_seeder() { let mut swarm = Swarm::default(); let mut downloads_increased = false; @@ -617,13 +676,13 @@ mod tests { let seeder = PeerBuilder::seeder().build(); - swarm.upsert_peer(seeder.into(), &mut downloads_increased); + swarm.upsert_peer(seeder.into(), &mut downloads_increased).await; assert_eq!(swarm.metadata().seeders(), seeders + 1); } - #[test] - fn it_should_not_increasing_the_number_of_downloads_if_the_new_peer_has_completed_downloading_as_it_was_not_previously_known( + #[tokio::test] + async fn it_should_not_increasing_the_number_of_downloads_if_the_new_peer_has_completed_downloading_as_it_was_not_previously_known( ) { let mut swarm = Swarm::default(); let mut downloads_increased = false; @@ -632,7 +691,7 @@ mod tests { let seeder = PeerBuilder::seeder().build(); - swarm.upsert_peer(seeder.into(), &mut downloads_increased); + swarm.upsert_peer(seeder.into(), &mut downloads_increased).await; assert_eq!(swarm.metadata().downloads(), downloads); } @@ -643,34 +702,34 @@ mod tests { use crate::swarm::Swarm; - #[test] - fn it_should_decrease_the_number_of_leechers_if_the_removed_peer_was_a_leecher() { + #[tokio::test] + async fn it_should_decrease_the_number_of_leechers_if_the_removed_peer_was_a_leecher() { let mut swarm = Swarm::default(); let mut downloads_increased = false; let leecher = PeerBuilder::leecher().build(); - swarm.upsert_peer(leecher.into(), &mut downloads_increased); + swarm.upsert_peer(leecher.into(), &mut downloads_increased).await; let leechers = swarm.metadata().leechers(); - swarm.remove(&leecher); + swarm.remove(&leecher).await; assert_eq!(swarm.metadata().leechers(), leechers - 1); } - #[test] - fn it_should_decrease_the_number_of_seeders_if_the_removed_peer_was_a_seeder() { + #[tokio::test] + async fn it_should_decrease_the_number_of_seeders_if_the_removed_peer_was_a_seeder() { let mut swarm = Swarm::default(); let mut downloads_increased = false; let seeder = PeerBuilder::seeder().build(); - swarm.upsert_peer(seeder.into(), &mut downloads_increased); + swarm.upsert_peer(seeder.into(), &mut downloads_increased).await; let seeders = swarm.metadata().seeders(); - swarm.remove(&seeder); + swarm.remove(&seeder).await; assert_eq!(swarm.metadata().seeders(), seeders - 1); } @@ -683,14 +742,14 @@ mod tests { use crate::swarm::Swarm; - #[test] - fn it_should_decrease_the_number_of_leechers_when_a_removed_peer_is_a_leecher() { + #[tokio::test] + async fn it_should_decrease_the_number_of_leechers_when_a_removed_peer_is_a_leecher() { let mut swarm = Swarm::default(); let mut downloads_increased = false; let leecher = PeerBuilder::leecher().build(); - swarm.upsert_peer(leecher.into(), &mut downloads_increased); + swarm.upsert_peer(leecher.into(), &mut downloads_increased).await; let leechers = swarm.metadata().leechers(); @@ -699,14 +758,14 @@ mod tests { assert_eq!(swarm.metadata().leechers(), leechers - 1); } - #[test] - fn it_should_decrease_the_number_of_seeders_when_the_removed_peer_is_a_seeder() { + #[tokio::test] + async fn it_should_decrease_the_number_of_seeders_when_the_removed_peer_is_a_seeder() { let mut swarm = Swarm::default(); let mut downloads_increased = false; let seeder = PeerBuilder::seeder().build(); - swarm.upsert_peer(seeder.into(), &mut downloads_increased); + swarm.upsert_peer(seeder.into(), &mut downloads_increased).await; let seeders = swarm.metadata().seeders(); @@ -722,80 +781,80 @@ mod tests { use crate::swarm::Swarm; - #[test] - fn it_should_increase_seeders_and_decreasing_leechers_when_the_peer_changes_from_leecher_to_seeder_() { + #[tokio::test] + async fn it_should_increase_seeders_and_decreasing_leechers_when_the_peer_changes_from_leecher_to_seeder_() { let mut swarm = Swarm::default(); let mut downloads_increased = false; let mut peer = PeerBuilder::leecher().build(); - swarm.upsert_peer(peer.into(), &mut downloads_increased); + swarm.upsert_peer(peer.into(), &mut downloads_increased).await; let leechers = swarm.metadata().leechers(); let seeders = swarm.metadata().seeders(); peer.left = NumberOfBytes::new(0); // Convert to seeder - swarm.upsert_peer(peer.into(), &mut downloads_increased); + swarm.upsert_peer(peer.into(), &mut downloads_increased).await; assert_eq!(swarm.metadata().seeders(), seeders + 1); assert_eq!(swarm.metadata().leechers(), leechers - 1); } - #[test] - fn it_should_increase_leechers_and_decreasing_seeders_when_the_peer_changes_from_seeder_to_leecher() { + #[tokio::test] + async fn it_should_increase_leechers_and_decreasing_seeders_when_the_peer_changes_from_seeder_to_leecher() { let mut swarm = Swarm::default(); let mut downloads_increased = false; let mut peer = PeerBuilder::seeder().build(); - swarm.upsert_peer(peer.into(), &mut downloads_increased); + swarm.upsert_peer(peer.into(), &mut downloads_increased).await; let leechers = swarm.metadata().leechers(); let seeders = swarm.metadata().seeders(); peer.left = NumberOfBytes::new(10); // Convert to leecher - swarm.upsert_peer(peer.into(), &mut downloads_increased); + swarm.upsert_peer(peer.into(), &mut downloads_increased).await; assert_eq!(swarm.metadata().leechers(), leechers + 1); assert_eq!(swarm.metadata().seeders(), seeders - 1); } - #[test] - fn it_should_increase_the_number_of_downloads_when_the_peer_announces_completed_downloading() { + #[tokio::test] + async fn it_should_increase_the_number_of_downloads_when_the_peer_announces_completed_downloading() { let mut swarm = Swarm::default(); let mut downloads_increased = false; let mut peer = PeerBuilder::leecher().build(); - swarm.upsert_peer(peer.into(), &mut downloads_increased); + swarm.upsert_peer(peer.into(), &mut downloads_increased).await; let downloads = swarm.metadata().downloads(); peer.event = aquatic_udp_protocol::AnnounceEvent::Completed; - swarm.upsert_peer(peer.into(), &mut downloads_increased); + swarm.upsert_peer(peer.into(), &mut downloads_increased).await; assert_eq!(swarm.metadata().downloads(), downloads + 1); } - #[test] - fn it_should_not_increasing_the_number_of_downloads_when_the_peer_announces_completed_downloading_twice_() { + #[tokio::test] + async fn it_should_not_increasing_the_number_of_downloads_when_the_peer_announces_completed_downloading_twice_() { let mut swarm = Swarm::default(); let mut downloads_increased = false; let mut peer = PeerBuilder::leecher().build(); - swarm.upsert_peer(peer.into(), &mut downloads_increased); + swarm.upsert_peer(peer.into(), &mut downloads_increased).await; let downloads = swarm.metadata().downloads(); peer.event = aquatic_udp_protocol::AnnounceEvent::Completed; - swarm.upsert_peer(peer.into(), &mut downloads_increased); + swarm.upsert_peer(peer.into(), &mut downloads_increased).await; - swarm.upsert_peer(peer.into(), &mut downloads_increased); + swarm.upsert_peer(peer.into(), &mut downloads_increased).await; assert_eq!(swarm.metadata().downloads(), downloads + 1); } diff --git a/packages/torrent-repository/src/swarms.rs b/packages/torrent-repository/src/swarms.rs index d92e1755a..277a85cc2 100644 --- a/packages/torrent-repository/src/swarms.rs +++ b/packages/torrent-repository/src/swarms.rs @@ -1,7 +1,8 @@ -use std::sync::{Arc, Mutex}; +use std::sync::Arc; use bittorrent_primitives::info_hash::InfoHash; use crossbeam_skiplist::SkipMap; +use tokio::sync::Mutex; use torrust_tracker_clock::conv::convert_from_timestamp_to_datetime_utc; use torrust_tracker_configuration::TrackerPolicy; use torrust_tracker_primitives::pagination::Pagination; @@ -48,6 +49,7 @@ impl Swarms { /// # Errors /// /// This function panics if the lock for the swarm handle cannot be acquired. + #[allow(clippy::await_holding_lock)] pub async fn handle_announcement( &self, info_hash: &InfoHash, @@ -57,7 +59,7 @@ impl Swarms { let swarm_handle = match self.swarms.get(info_hash) { None => { let new_swarm_handle = if let Some(number_of_downloads) = opt_persistent_torrent { - SwarmHandle::new(Swarm::new(number_of_downloads).into()) + SwarmHandle::new(Swarm::new(number_of_downloads, self.event_sender.clone()).into()) } else { SwarmHandle::default() }; @@ -78,9 +80,11 @@ impl Swarms { Some(existing_swarm_handle) => existing_swarm_handle, }; - let mut swarm = swarm_handle.value().lock()?; + let mut swarm = swarm_handle.value().lock().await; - Ok(swarm.handle_announcement(peer)) + let downloads_increased = swarm.handle_announcement(peer).await; + + Ok(downloads_increased) } /// Inserts a new swarm. Only used for testing purposes. @@ -162,11 +166,11 @@ impl Swarms { /// # Errors /// /// This function panics if the lock for the swarm handle cannot be acquired. - pub fn get_swarm_metadata(&self, info_hash: &InfoHash) -> Result, Error> { + pub async fn get_swarm_metadata(&self, info_hash: &InfoHash) -> Result, Error> { match self.swarms.get(info_hash) { None => Ok(None), Some(swarm_handle) => { - let swarm = swarm_handle.value().lock()?; + let swarm = swarm_handle.value().lock().await; Ok(Some(swarm.metadata())) } } @@ -183,8 +187,8 @@ impl Swarms { /// /// This function returns an error if it fails to acquire the lock for the /// swarm handle. - pub fn get_swarm_metadata_or_default(&self, info_hash: &InfoHash) -> Result { - match self.get_swarm_metadata(info_hash) { + pub async fn get_swarm_metadata_or_default(&self, info_hash: &InfoHash) -> Result { + match self.get_swarm_metadata(info_hash).await { Ok(Some(swarm_metadata)) => Ok(swarm_metadata), Ok(None) => Ok(SwarmMetadata::zeroed()), Err(err) => Err(err), @@ -207,7 +211,7 @@ impl Swarms { /// /// This function returns an error if it fails to acquire the lock for the /// swarm handle. - pub fn get_peers_peers_excluding( + pub async fn get_peers_peers_excluding( &self, info_hash: &InfoHash, peer: &peer::Peer, @@ -216,7 +220,7 @@ impl Swarms { match self.get(info_hash) { None => Ok(vec![]), Some(swarm_handle) => { - let swarm = swarm_handle.lock()?; + let swarm = swarm_handle.lock().await; Ok(swarm.peers_excluding(&peer.peer_addr, Some(limit))) } } @@ -236,11 +240,11 @@ impl Swarms { /// /// This function returns an error if it fails to acquire the lock for the /// swarm handle. - pub fn get_swarm_peers(&self, info_hash: &InfoHash, limit: usize) -> Result>, Error> { + pub async fn get_swarm_peers(&self, info_hash: &InfoHash, limit: usize) -> Result>, Error> { match self.get(info_hash) { None => Ok(vec![]), Some(swarm_handle) => { - let swarm = swarm_handle.lock()?; + let swarm = swarm_handle.lock().await; Ok(swarm.peers(Some(limit))) } } @@ -255,7 +259,7 @@ impl Swarms { /// /// This function returns an error if it fails to acquire the lock for any /// swarm handle. - pub fn remove_inactive_peers(&self, current_cutoff: DurationSinceUnixEpoch) -> Result { + pub async fn remove_inactive_peers(&self, current_cutoff: DurationSinceUnixEpoch) -> Result { tracing::info!( "Removing inactive peers since: {:?} ...", convert_from_timestamp_to_datetime_utc(current_cutoff) @@ -264,7 +268,7 @@ impl Swarms { let mut inactive_peers_removed = 0; for swarm_handle in &self.swarms { - let mut swarm = swarm_handle.value().lock()?; + let mut swarm = swarm_handle.value().lock().await; let removed = swarm.remove_inactive(current_cutoff); inactive_peers_removed += removed; } @@ -283,13 +287,13 @@ impl Swarms { /// /// This function returns an error if it fails to acquire the lock for any /// swarm handle. - pub fn remove_peerless_torrents(&self, policy: &TrackerPolicy) -> Result { + pub async fn remove_peerless_torrents(&self, policy: &TrackerPolicy) -> Result { tracing::info!("Removing peerless torrents ..."); let mut peerless_torrents_removed = 0; for swarm_handle in &self.swarms { - let swarm = swarm_handle.value().lock()?; + let swarm = swarm_handle.value().lock().await; if swarm.meets_retaining_policy(policy) { continue; @@ -320,7 +324,7 @@ impl Swarms { continue; } - let entry = SwarmHandle::new(Swarm::new(*completed).into()); + let entry = SwarmHandle::new(Swarm::new(*completed, self.event_sender.clone()).into()); // Since SkipMap is lock-free the torrent could have been inserted // after checking if it exists. @@ -348,11 +352,11 @@ impl Swarms { /// /// This function returns an error if it fails to acquire the lock for any /// swarm handle. - pub fn get_aggregate_swarm_metadata(&self) -> Result { + pub async fn get_aggregate_swarm_metadata(&self) -> Result { let mut metrics = AggregateSwarmMetadata::default(); for swarm_handle in &self.swarms { - let swarm = swarm_handle.value().lock()?; + let swarm = swarm_handle.value().lock().await; let stats = swarm.metadata(); @@ -376,11 +380,11 @@ impl Swarms { /// /// This function returns an error if it fails to acquire the lock for any /// swarm handle. - pub fn count_peerless_torrents(&self) -> Result { + pub async fn count_peerless_torrents(&self) -> Result { let mut peerless_torrents = 0; for swarm_handle in &self.swarms { - let swarm = swarm_handle.value().lock()?; + let swarm = swarm_handle.value().lock().await; if swarm.is_peerless() { peerless_torrents += 1; @@ -400,11 +404,11 @@ impl Swarms { /// /// This function returns an error if it fails to acquire the lock for any /// swarm handle. - pub fn count_peers(&self) -> Result { + pub async fn count_peers(&self) -> Result { let mut peers = 0; for swarm_handle in &self.swarms { - let swarm = swarm_handle.value().lock()?; + let swarm = swarm_handle.value().lock().await; peers += swarm.len(); } @@ -424,16 +428,7 @@ impl Swarms { } #[derive(thiserror::Error, Debug, Clone)] -pub enum Error { - #[error("Can't acquire swarm lock")] - CannotAcquireSwarmLock, -} - -impl From>> for Error { - fn from(_error: std::sync::PoisonError>) -> Self { - Error::CannotAcquireSwarmLock - } -} +pub enum Error {} #[cfg(test)] mod tests { @@ -523,7 +518,7 @@ mod tests { swarms.handle_announcement(&info_hash, &peer, None).await.unwrap(); - let peers = swarms.get_swarm_peers(&info_hash, 74).unwrap(); + let peers = swarms.get_swarm_peers(&info_hash, 74).await.unwrap(); assert_eq!(peers, vec![Arc::new(peer)]); } @@ -532,7 +527,7 @@ mod tests { async fn it_should_return_an_empty_list_or_peers_for_a_non_existing_torrent() { let swarms = Arc::new(Swarms::default()); - let peers = swarms.get_swarm_peers(&sample_info_hash(), 74).unwrap(); + let peers = swarms.get_swarm_peers(&sample_info_hash(), 74).await.unwrap(); assert!(peers.is_empty()); } @@ -557,7 +552,7 @@ mod tests { swarms.handle_announcement(&info_hash, &peer, None).await.unwrap(); } - let peers = swarms.get_swarm_peers(&info_hash, 74).unwrap(); + let peers = swarms.get_swarm_peers(&info_hash, 74).await.unwrap(); assert_eq!(peers.len(), 74); } @@ -582,6 +577,7 @@ mod tests { let peers = swarms .get_peers_peers_excluding(&sample_info_hash(), &sample_peer(), TORRENT_PEERS_LIMIT) + .await .unwrap(); assert_eq!(peers, vec![]); @@ -598,6 +594,7 @@ mod tests { let peers = swarms .get_peers_peers_excluding(&info_hash, &peer, TORRENT_PEERS_LIMIT) + .await .unwrap(); assert_eq!(peers, vec![]); @@ -630,6 +627,7 @@ mod tests { let peers = swarms .get_peers_peers_excluding(&info_hash, &excluded_peer, TORRENT_PEERS_LIMIT) + .await .unwrap(); assert_eq!(peers.len(), 74); @@ -675,9 +673,14 @@ mod tests { // Cut off time is 1 second after the peer was updated swarms .remove_inactive_peers(peer.updated.add(Duration::from_secs(1))) + .await .unwrap(); - assert!(!swarms.get_swarm_peers(&info_hash, 74).unwrap().contains(&Arc::new(peer))); + assert!(!swarms + .get_swarm_peers(&info_hash, 74) + .await + .unwrap() + .contains(&Arc::new(peer))); } async fn initialize_repository_with_one_torrent_without_peers(info_hash: &InfoHash) -> Arc { @@ -691,6 +694,7 @@ mod tests { // Remove the peer swarms .remove_inactive_peers(peer.updated.add(Duration::from_secs(1))) + .await .unwrap(); swarms @@ -707,7 +711,7 @@ mod tests { ..Default::default() }; - swarms.remove_peerless_torrents(&tracker_policy).unwrap(); + swarms.remove_peerless_torrents(&tracker_policy).await.unwrap(); assert!(swarms.get(&info_hash).is_none()); } @@ -721,7 +725,7 @@ mod tests { use crate::swarms::Swarms; use crate::tests::{sample_info_hash, sample_peer}; - use crate::{LockTrackedTorrent, SwarmHandle}; + use crate::{Swarm, SwarmHandle}; /// `TorrentEntry` data is not directly accessible. It's only /// accessible through the trait methods. We need this temporary @@ -733,19 +737,19 @@ mod tests { number_of_peers: usize, } + async fn torrent_entry_info(swarm_handle: SwarmHandle) -> TorrentEntryInfo { + let torrent_guard = swarm_handle.lock().await; + torrent_guard.clone().into() + } + #[allow(clippy::from_over_into)] - impl Into for SwarmHandle { + impl Into for Swarm { fn into(self) -> TorrentEntryInfo { - let torrent_guard = self.lock_or_panic(); - let torrent_entry_info = TorrentEntryInfo { - swarm_metadata: torrent_guard.metadata(), - peers: torrent_guard.peers(None).iter().map(|peer| *peer.clone()).collect(), - number_of_peers: torrent_guard.len(), + swarm_metadata: self.metadata(), + peers: self.peers(None).iter().map(|peer| *peer.clone()).collect(), + number_of_peers: self.len(), }; - - drop(torrent_guard); - torrent_entry_info } } @@ -759,7 +763,7 @@ mod tests { swarms.handle_announcement(&info_hash, &peer, None).await.unwrap(); - let torrent_entry = swarms.get(&info_hash).unwrap(); + let torrent_entry_info = torrent_entry_info(swarms.get(&info_hash).unwrap()).await; assert_eq!( TorrentEntryInfo { @@ -771,7 +775,7 @@ mod tests { peers: vec!(peer), number_of_peers: 1 }, - torrent_entry.into() + torrent_entry_info ); } @@ -780,7 +784,9 @@ mod tests { use torrust_tracker_primitives::swarm_metadata::SwarmMetadata; - use crate::swarms::tests::the_swarm_repository::returning_torrent_entries::TorrentEntryInfo; + use crate::swarms::tests::the_swarm_repository::returning_torrent_entries::{ + torrent_entry_info, TorrentEntryInfo, + }; use crate::swarms::Swarms; use crate::tests::{sample_info_hash, sample_peer}; @@ -796,7 +802,7 @@ mod tests { assert_eq!(torrent_entries.len(), 1); - let torrent_entry = torrent_entries.first().unwrap().1.clone(); + let torrent_entry = torrent_entry_info(torrent_entries.first().unwrap().1.clone()).await; assert_eq!( TorrentEntryInfo { @@ -808,7 +814,7 @@ mod tests { peers: vec!(peer), number_of_peers: 1 }, - torrent_entry.into() + torrent_entry ); } @@ -818,7 +824,9 @@ mod tests { use torrust_tracker_primitives::pagination::Pagination; use torrust_tracker_primitives::swarm_metadata::SwarmMetadata; - use crate::swarms::tests::the_swarm_repository::returning_torrent_entries::TorrentEntryInfo; + use crate::swarms::tests::the_swarm_repository::returning_torrent_entries::{ + torrent_entry_info, TorrentEntryInfo, + }; use crate::swarms::Swarms; use crate::tests::{ sample_info_hash_alphabetically_ordered_after_sample_info_hash_one, sample_info_hash_one, @@ -844,7 +852,7 @@ mod tests { assert_eq!(torrent_entries.len(), 1); - let torrent_entry = torrent_entries.first().unwrap().1.clone(); + let torrent_entry_info = torrent_entry_info(torrent_entries.first().unwrap().1.clone()).await; assert_eq!( TorrentEntryInfo { @@ -856,7 +864,7 @@ mod tests { peers: vec!(peer_one), number_of_peers: 1 }, - torrent_entry.into() + torrent_entry_info ); } @@ -879,7 +887,7 @@ mod tests { assert_eq!(torrent_entries.len(), 1); - let torrent_entry = torrent_entries.first().unwrap().1.clone(); + let torrent_entry_info = torrent_entry_info(torrent_entries.first().unwrap().1.clone()).await; assert_eq!( TorrentEntryInfo { @@ -891,7 +899,7 @@ mod tests { peers: vec!(peer_two), number_of_peers: 1 }, - torrent_entry.into() + torrent_entry_info ); } @@ -934,7 +942,7 @@ mod tests { async fn it_should_get_empty_aggregate_swarm_metadata_when_there_are_no_torrents() { let swarms = Arc::new(Swarms::default()); - let aggregate_swarm_metadata = swarms.get_aggregate_swarm_metadata().unwrap(); + let aggregate_swarm_metadata = swarms.get_aggregate_swarm_metadata().await.unwrap(); assert_eq!( aggregate_swarm_metadata, @@ -956,7 +964,7 @@ mod tests { .await .unwrap(); - let aggregate_swarm_metadata = swarms.get_aggregate_swarm_metadata().unwrap(); + let aggregate_swarm_metadata = swarms.get_aggregate_swarm_metadata().await.unwrap(); assert_eq!( aggregate_swarm_metadata, @@ -978,7 +986,7 @@ mod tests { .await .unwrap(); - let aggregate_swarm_metadata = swarms.get_aggregate_swarm_metadata().unwrap(); + let aggregate_swarm_metadata = swarms.get_aggregate_swarm_metadata().await.unwrap(); assert_eq!( aggregate_swarm_metadata, @@ -1000,7 +1008,7 @@ mod tests { .await .unwrap(); - let aggregate_swarm_metadata = swarms.get_aggregate_swarm_metadata().unwrap(); + let aggregate_swarm_metadata = swarms.get_aggregate_swarm_metadata().await.unwrap(); assert_eq!( aggregate_swarm_metadata, @@ -1027,7 +1035,7 @@ mod tests { let result_a = start_time.elapsed(); let start_time = std::time::Instant::now(); - let aggregate_swarm_metadata = swarms.get_aggregate_swarm_metadata().unwrap(); + let aggregate_swarm_metadata = swarms.get_aggregate_swarm_metadata().await.unwrap(); let result_b = start_time.elapsed(); assert_eq!( @@ -1060,7 +1068,7 @@ mod tests { swarms.handle_announcement(&infohash, &leecher(), None).await.unwrap(); - let swarm_metadata = swarms.get_swarm_metadata_or_default(&infohash).unwrap(); + let swarm_metadata = swarms.get_swarm_metadata_or_default(&infohash).await.unwrap(); assert_eq!( swarm_metadata, @@ -1076,7 +1084,7 @@ mod tests { async fn it_should_return_zeroed_swarm_metadata_for_a_non_existing_torrent() { let swarms = Arc::new(Swarms::default()); - let swarm_metadata = swarms.get_swarm_metadata_or_default(&sample_info_hash()).unwrap(); + let swarm_metadata = swarms.get_swarm_metadata_or_default(&sample_info_hash()).await.unwrap(); assert_eq!(swarm_metadata, SwarmMetadata::zeroed()); } @@ -1103,7 +1111,7 @@ mod tests { swarms.import_persistent(&persistent_torrents); - let swarm_metadata = swarms.get_swarm_metadata_or_default(&infohash).unwrap(); + let swarm_metadata = swarms.get_swarm_metadata_or_default(&infohash).await.unwrap(); // Only the number of downloads is persisted. assert_eq!(swarm_metadata.downloaded, 1); diff --git a/packages/torrent-repository/tests/swarm/mod.rs b/packages/torrent-repository/tests/swarm/mod.rs index d529b0243..1f5d0b737 100644 --- a/packages/torrent-repository/tests/swarm/mod.rs +++ b/packages/torrent-repository/tests/swarm/mod.rs @@ -47,39 +47,39 @@ pub enum Makes { Three, } -fn make(swarm: &mut Swarm, makes: &Makes) -> Vec { +async fn make(swarm: &mut Swarm, makes: &Makes) -> Vec { match makes { Makes::Empty => vec![], Makes::Started => { let peer = a_started_peer(1); - swarm.handle_announcement(&peer); + swarm.handle_announcement(&peer).await; vec![peer] } Makes::Completed => { let peer = a_completed_peer(2); - swarm.handle_announcement(&peer); + swarm.handle_announcement(&peer).await; vec![peer] } Makes::Downloaded => { let mut peer = a_started_peer(3); - swarm.handle_announcement(&peer); + swarm.handle_announcement(&peer).await; peer.event = AnnounceEvent::Completed; peer.left = NumberOfBytes::new(0); - swarm.handle_announcement(&peer); + swarm.handle_announcement(&peer).await; vec![peer] } Makes::Three => { let peer_1 = a_started_peer(1); - swarm.handle_announcement(&peer_1); + swarm.handle_announcement(&peer_1).await; let peer_2 = a_completed_peer(2); - swarm.handle_announcement(&peer_2); + swarm.handle_announcement(&peer_2).await; let mut peer_3 = a_started_peer(3); - swarm.handle_announcement(&peer_3); + swarm.handle_announcement(&peer_3).await; peer_3.event = AnnounceEvent::Completed; peer_3.left = NumberOfBytes::new(0); - swarm.handle_announcement(&peer_3); + swarm.handle_announcement(&peer_3).await; vec![peer_1, peer_2, peer_3] } } @@ -89,7 +89,7 @@ fn make(swarm: &mut Swarm, makes: &Makes) -> Vec { #[case::empty(&Makes::Empty)] #[tokio::test] async fn it_should_be_empty_by_default(#[values(swarm())] mut swarm: Swarm, #[case] makes: &Makes) { - make(&mut swarm, makes); + make(&mut swarm, makes).await; assert_eq!(swarm.len(), 0); } @@ -106,7 +106,7 @@ async fn it_should_check_if_entry_should_be_retained_based_on_the_tracker_policy #[case] makes: &Makes, #[values(policy_none(), policy_persist(), policy_remove(), policy_remove_persist())] policy: TrackerPolicy, ) { - make(&mut swarm, makes); + make(&mut swarm, makes).await; let has_peers = !swarm.is_empty(); let has_downloads = swarm.metadata().downloaded != 0; @@ -140,7 +140,7 @@ async fn it_should_check_if_entry_should_be_retained_based_on_the_tracker_policy #[case::three(&Makes::Three)] #[tokio::test] async fn it_should_get_peers_for_torrent_entry(#[values(swarm())] mut swarm: Swarm, #[case] makes: &Makes) { - let peers = make(&mut swarm, makes); + let peers = make(&mut swarm, makes).await; let torrent_peers = swarm.peers(None); @@ -159,11 +159,11 @@ async fn it_should_get_peers_for_torrent_entry(#[values(swarm())] mut swarm: Swa #[case::three(&Makes::Three)] #[tokio::test] async fn it_should_update_a_peer(#[values(swarm())] mut swarm: Swarm, #[case] makes: &Makes) { - make(&mut swarm, makes); + make(&mut swarm, makes).await; // Make and insert a new peer. let mut peer = a_started_peer(-1); - swarm.handle_announcement(&peer); + swarm.handle_announcement(&peer).await; // Get the Inserted Peer by Id. let peers = swarm.peers(None); @@ -176,7 +176,7 @@ async fn it_should_update_a_peer(#[values(swarm())] mut swarm: Swarm, #[case] ma // Announce "Completed" torrent download event. peer.event = AnnounceEvent::Completed; - swarm.handle_announcement(&peer); + swarm.handle_announcement(&peer).await; // Get the Updated Peer by Id. let peers = swarm.peers(None); @@ -198,11 +198,11 @@ async fn it_should_update_a_peer(#[values(swarm())] mut swarm: Swarm, #[case] ma async fn it_should_remove_a_peer_upon_stopped_announcement(#[values(swarm())] mut swarm: Swarm, #[case] makes: &Makes) { use torrust_tracker_primitives::peer::ReadInfo as _; - make(&mut swarm, makes); + make(&mut swarm, makes).await; let mut peer = a_started_peer(-1); - swarm.handle_announcement(&peer); + swarm.handle_announcement(&peer).await; // The started peer should be inserted. let peers = swarm.peers(None); @@ -215,7 +215,7 @@ async fn it_should_remove_a_peer_upon_stopped_announcement(#[values(swarm())] mu // Change peer to "Stopped" and insert. peer.event = AnnounceEvent::Stopped; - swarm.handle_announcement(&peer); + swarm.handle_announcement(&peer).await; // It should be removed now. let peers = swarm.peers(None); @@ -237,7 +237,7 @@ async fn it_should_handle_a_peer_completed_announcement_and_update_the_downloade #[values(swarm())] mut torrent: Swarm, #[case] makes: &Makes, ) { - make(&mut torrent, makes); + make(&mut torrent, makes).await; let downloaded = torrent.metadata().downloaded; let peers = torrent.peers(None); @@ -248,7 +248,7 @@ async fn it_should_handle_a_peer_completed_announcement_and_update_the_downloade // Announce "Completed" torrent download event. peer.event = AnnounceEvent::Completed; - torrent.handle_announcement(&peer); + torrent.handle_announcement(&peer).await; let stats = torrent.metadata(); if is_already_completed { @@ -265,7 +265,7 @@ async fn it_should_handle_a_peer_completed_announcement_and_update_the_downloade #[case::three(&Makes::Three)] #[tokio::test] async fn it_should_update_a_peer_as_a_seeder(#[values(swarm())] mut swarm: Swarm, #[case] makes: &Makes) { - let peers = make(&mut swarm, makes); + let peers = make(&mut swarm, makes).await; let completed = u32::try_from(peers.iter().filter(|p| p.is_seeder()).count()).expect("it_should_not_be_so_many"); let peers = swarm.peers(None); @@ -275,7 +275,7 @@ async fn it_should_update_a_peer_as_a_seeder(#[values(swarm())] mut swarm: Swarm // Set Bytes Left to Zero peer.left = NumberOfBytes::new(0); - swarm.handle_announcement(&peer); + swarm.handle_announcement(&peer).await; let stats = swarm.metadata(); if is_already_non_left { @@ -294,7 +294,7 @@ async fn it_should_update_a_peer_as_a_seeder(#[values(swarm())] mut swarm: Swarm #[case::three(&Makes::Three)] #[tokio::test] async fn it_should_update_a_peer_as_incomplete(#[values(swarm())] mut swarm: Swarm, #[case] makes: &Makes) { - let peers = make(&mut swarm, makes); + let peers = make(&mut swarm, makes).await; let incomplete = u32::try_from(peers.iter().filter(|p| !p.is_seeder()).count()).expect("it should not be so many"); let peers = swarm.peers(None); @@ -304,7 +304,7 @@ async fn it_should_update_a_peer_as_incomplete(#[values(swarm())] mut swarm: Swa // Set Bytes Left to no Zero peer.left = NumberOfBytes::new(1); - swarm.handle_announcement(&peer); + swarm.handle_announcement(&peer).await; let stats = swarm.metadata(); if completed_already { @@ -323,7 +323,7 @@ async fn it_should_update_a_peer_as_incomplete(#[values(swarm())] mut swarm: Swa #[case::three(&Makes::Three)] #[tokio::test] async fn it_should_get_peers_excluding_the_client_socket(#[values(swarm())] mut swarm: Swarm, #[case] makes: &Makes) { - make(&mut swarm, makes); + make(&mut swarm, makes).await; let peers = swarm.peers(None); let mut peer = **peers.first().expect("there should be a peer"); @@ -338,7 +338,7 @@ async fn it_should_get_peers_excluding_the_client_socket(#[values(swarm())] mut // set the address to the socket. peer.peer_addr = socket; - swarm.handle_announcement(&peer); // Add peer + swarm.handle_announcement(&peer).await; // Add peer // It should not include the peer that has the same socket. assert!(!swarm.peers_excluding(&socket, None).contains(&peer.into())); @@ -352,12 +352,12 @@ async fn it_should_get_peers_excluding_the_client_socket(#[values(swarm())] mut #[case::three(&Makes::Three)] #[tokio::test] async fn it_should_limit_the_number_of_peers_returned(#[values(swarm())] mut swarm: Swarm, #[case] makes: &Makes) { - make(&mut swarm, makes); + make(&mut swarm, makes).await; // We add one more peer than the scrape limit for peer_number in 1..=74 + 1 { let peer = a_started_peer(peer_number); - swarm.handle_announcement(&peer); + swarm.handle_announcement(&peer).await; } let peers = swarm.peers(Some(TORRENT_PEERS_LIMIT)); @@ -376,7 +376,7 @@ async fn it_should_remove_inactive_peers_beyond_cutoff(#[values(swarm())] mut sw const TIMEOUT: Duration = Duration::from_secs(120); const EXPIRE: Duration = Duration::from_secs(121); - let peers = make(&mut swarm, makes); + let peers = make(&mut swarm, makes).await; let mut peer = a_completed_peer(-1); @@ -385,7 +385,7 @@ async fn it_should_remove_inactive_peers_beyond_cutoff(#[values(swarm())] mut sw peer.updated = now.sub(EXPIRE); - swarm.handle_announcement(&peer); + swarm.handle_announcement(&peer).await; assert_eq!(swarm.len(), peers.len() + 1); diff --git a/packages/torrent-repository/tests/swarms/mod.rs b/packages/torrent-repository/tests/swarms/mod.rs index 975457cca..d8ee354c8 100644 --- a/packages/torrent-repository/tests/swarms/mod.rs +++ b/packages/torrent-repository/tests/swarms/mod.rs @@ -3,13 +3,14 @@ use std::hash::{DefaultHasher, Hash, Hasher}; use aquatic_udp_protocol::{AnnounceEvent, NumberOfBytes}; use bittorrent_primitives::info_hash::InfoHash; +use futures::future::join_all; use rstest::{fixture, rstest}; use torrust_tracker_configuration::TrackerPolicy; use torrust_tracker_primitives::pagination::Pagination; use torrust_tracker_primitives::swarm_metadata::SwarmMetadata; use torrust_tracker_primitives::PersistentTorrents; use torrust_tracker_torrent_repository::swarm::Swarm; -use torrust_tracker_torrent_repository::{LockTrackedTorrent, Swarms}; +use torrust_tracker_torrent_repository::Swarms; use crate::common::torrent_peer_builder::{a_completed_peer, a_started_peer}; @@ -31,49 +32,49 @@ fn default() -> Entries { } #[fixture] -fn started() -> Entries { +async fn started() -> Entries { let mut swarm = Swarm::default(); - swarm.handle_announcement(&a_started_peer(1)); + swarm.handle_announcement(&a_started_peer(1)).await; vec![(InfoHash::default(), swarm)] } #[fixture] -fn completed() -> Entries { +async fn completed() -> Entries { let mut swarm = Swarm::default(); - swarm.handle_announcement(&a_completed_peer(2)); + swarm.handle_announcement(&a_completed_peer(2)).await; vec![(InfoHash::default(), swarm)] } #[fixture] -fn downloaded() -> Entries { +async fn downloaded() -> Entries { let mut swarm = Swarm::default(); let mut peer = a_started_peer(3); - swarm.handle_announcement(&peer); + swarm.handle_announcement(&peer).await; peer.event = AnnounceEvent::Completed; peer.left = NumberOfBytes::new(0); - swarm.handle_announcement(&peer); + swarm.handle_announcement(&peer).await; vec![(InfoHash::default(), swarm)] } #[fixture] -fn three() -> Entries { +async fn three() -> Entries { let mut started = Swarm::default(); let started_h = &mut DefaultHasher::default(); - started.handle_announcement(&a_started_peer(1)); + started.handle_announcement(&a_started_peer(1)).await; started.hash(started_h); let mut completed = Swarm::default(); let completed_h = &mut DefaultHasher::default(); - completed.handle_announcement(&a_completed_peer(2)); + completed.handle_announcement(&a_completed_peer(2)).await; completed.hash(completed_h); let mut downloaded = Swarm::default(); let downloaded_h = &mut DefaultHasher::default(); let mut downloaded_peer = a_started_peer(3); - downloaded.handle_announcement(&downloaded_peer); + downloaded.handle_announcement(&downloaded_peer).await; downloaded_peer.event = AnnounceEvent::Completed; downloaded_peer.left = NumberOfBytes::new(0); - downloaded.handle_announcement(&downloaded_peer); + downloaded.handle_announcement(&downloaded_peer).await; downloaded.hash(downloaded_h); vec![ @@ -84,12 +85,12 @@ fn three() -> Entries { } #[fixture] -fn many_out_of_order() -> Entries { +async fn many_out_of_order() -> Entries { let mut entries: HashSet<(InfoHash, Swarm)> = HashSet::default(); for i in 0..408 { let mut entry = Swarm::default(); - entry.handle_announcement(&a_started_peer(i)); + entry.handle_announcement(&a_started_peer(i)).await; entries.insert((InfoHash::from(&i), entry)); } @@ -99,12 +100,12 @@ fn many_out_of_order() -> Entries { } #[fixture] -fn many_hashed_in_order() -> Entries { +async fn many_hashed_in_order() -> Entries { let mut entries: BTreeMap = BTreeMap::default(); for i in 0..408 { let mut entry = Swarm::default(); - entry.handle_announcement(&a_started_peer(i)); + entry.handle_announcement(&a_started_peer(i)).await; let hash: &mut DefaultHasher = &mut DefaultHasher::default(); hash.write_i32(i); @@ -191,21 +192,18 @@ fn policy_remove_persist() -> TrackerPolicy { #[rstest] #[case::empty(empty())] #[case::default(default())] -#[case::started(started())] -#[case::completed(completed())] -#[case::downloaded(downloaded())] -#[case::three(three())] -#[case::out_of_order(many_out_of_order())] -#[case::in_order(many_hashed_in_order())] +#[case::started(started().await)] +#[case::completed(completed().await)] +#[case::downloaded(downloaded().await)] +#[case::three(three().await)] +#[case::out_of_order(many_out_of_order().await)] +#[case::in_order(many_hashed_in_order().await)] #[tokio::test] async fn it_should_get_a_torrent_entry(#[values(swarms())] repo: Swarms, #[case] entries: Entries) { make(&repo, &entries); if let Some((info_hash, swarm)) = entries.first() { - assert_eq!( - Some(repo.get(info_hash).unwrap().lock_or_panic().clone()), - Some(swarm.clone()) - ); + assert_eq!(Some(repo.get(info_hash).unwrap().lock().await.clone()), Some(swarm.clone())); } else { assert!(repo.get(&InfoHash::default()).is_none()); } @@ -214,23 +212,23 @@ async fn it_should_get_a_torrent_entry(#[values(swarms())] repo: Swarms, #[case] #[rstest] #[case::empty(empty())] #[case::default(default())] -#[case::started(started())] -#[case::completed(completed())] -#[case::downloaded(downloaded())] -#[case::three(three())] -#[case::out_of_order(many_out_of_order())] -#[case::in_order(many_hashed_in_order())] +#[case::started(started().await)] +#[case::completed(completed().await)] +#[case::downloaded(downloaded().await)] +#[case::three(three().await)] +#[case::out_of_order(many_out_of_order().await)] +#[case::in_order(many_hashed_in_order().await)] #[tokio::test] async fn it_should_get_paginated_entries_in_a_stable_or_sorted_order( #[values(swarms())] repo: Swarms, #[case] entries: Entries, - many_out_of_order: Entries, + #[future] many_out_of_order: Entries, ) { make(&repo, &entries); let entries_a = repo.get_paginated(None).iter().map(|(i, _)| *i).collect::>(); - make(&repo, &many_out_of_order); + make(&repo, &many_out_of_order.await); let entries_b = repo.get_paginated(None).iter().map(|(i, _)| *i).collect::>(); @@ -247,12 +245,12 @@ async fn it_should_get_paginated_entries_in_a_stable_or_sorted_order( #[rstest] #[case::empty(empty())] #[case::default(default())] -#[case::started(started())] -#[case::completed(completed())] -#[case::downloaded(downloaded())] -#[case::three(three())] -#[case::out_of_order(many_out_of_order())] -#[case::in_order(many_hashed_in_order())] +#[case::started(started().await)] +#[case::completed(completed().await)] +#[case::downloaded(downloaded().await)] +#[case::three(three().await)] +#[case::out_of_order(many_out_of_order().await)] +#[case::in_order(many_hashed_in_order().await)] #[tokio::test] async fn it_should_get_paginated( #[values(swarms())] repo: Swarms, @@ -267,11 +265,15 @@ async fn it_should_get_paginated( match paginated { // it should return empty if limit is zero. Pagination { limit: 0, .. } => { - let swarms: Vec<(InfoHash, Swarm)> = repo - .get_paginated(Some(&paginated)) - .iter() - .map(|(i, swarm_handle)| (*i, swarm_handle.lock_or_panic().clone())) - .collect(); + let page = repo.get_paginated(Some(&paginated)); + + let futures = page.iter().map(|(i, swarm_handle)| { + let i = *i; + let swarm_handle = swarm_handle.clone(); + async move { (i, swarm_handle.lock().await.clone()) } + }); + + let swarms: Vec<(InfoHash, Swarm)> = join_all(futures).await; assert_eq!(swarms, vec![]); } @@ -287,7 +289,7 @@ async fn it_should_get_paginated( } } - // it should return the only the second entry if both the limit and the offset are one. + // it should return only the second entry if both the limit and the offset are one. Pagination { limit: 1, offset: 1 } => { if info_hashes.len() > 1 { let page = repo.get_paginated(Some(&paginated)); @@ -295,7 +297,7 @@ async fn it_should_get_paginated( assert_eq!(page[0].0, info_hashes[1]); } } - // the other cases are not yet tested. + _ => {} } } @@ -303,12 +305,12 @@ async fn it_should_get_paginated( #[rstest] #[case::empty(empty())] #[case::default(default())] -#[case::started(started())] -#[case::completed(completed())] -#[case::downloaded(downloaded())] -#[case::three(three())] -#[case::out_of_order(many_out_of_order())] -#[case::in_order(many_hashed_in_order())] +#[case::started(started().await)] +#[case::completed(completed().await)] +#[case::downloaded(downloaded().await)] +#[case::three(three().await)] +#[case::out_of_order(many_out_of_order().await)] +#[case::in_order(many_hashed_in_order().await)] #[tokio::test] async fn it_should_get_metrics(#[values(swarms())] swarms: Swarms, #[case] entries: Entries) { use torrust_tracker_primitives::swarm_metadata::AggregateSwarmMetadata; @@ -326,18 +328,18 @@ async fn it_should_get_metrics(#[values(swarms())] swarms: Swarms, #[case] entri metrics.total_downloaded += u64::from(stats.downloaded); } - assert_eq!(swarms.get_aggregate_swarm_metadata().unwrap(), metrics); + assert_eq!(swarms.get_aggregate_swarm_metadata().await.unwrap(), metrics); } #[rstest] #[case::empty(empty())] #[case::default(default())] -#[case::started(started())] -#[case::completed(completed())] -#[case::downloaded(downloaded())] -#[case::three(three())] -#[case::out_of_order(many_out_of_order())] -#[case::in_order(many_hashed_in_order())] +#[case::started(started().await)] +#[case::completed(completed().await)] +#[case::downloaded(downloaded().await)] +#[case::three(three().await)] +#[case::out_of_order(many_out_of_order().await)] +#[case::in_order(many_hashed_in_order().await)] #[tokio::test] async fn it_should_import_persistent_torrents( #[values(swarms())] swarms: Swarms, @@ -346,12 +348,15 @@ async fn it_should_import_persistent_torrents( ) { make(&swarms, &entries); - let mut downloaded = swarms.get_aggregate_swarm_metadata().unwrap().total_downloaded; + let mut downloaded = swarms.get_aggregate_swarm_metadata().await.unwrap().total_downloaded; persistent_torrents.iter().for_each(|(_, d)| downloaded += u64::from(*d)); swarms.import_persistent(&persistent_torrents); - assert_eq!(swarms.get_aggregate_swarm_metadata().unwrap().total_downloaded, downloaded); + assert_eq!( + swarms.get_aggregate_swarm_metadata().await.unwrap().total_downloaded, + downloaded + ); for (entry, _) in persistent_torrents { assert!(swarms.get(&entry).is_some()); @@ -361,23 +366,23 @@ async fn it_should_import_persistent_torrents( #[rstest] #[case::empty(empty())] #[case::default(default())] -#[case::started(started())] -#[case::completed(completed())] -#[case::downloaded(downloaded())] -#[case::three(three())] -#[case::out_of_order(many_out_of_order())] -#[case::in_order(many_hashed_in_order())] +#[case::started(started().await)] +#[case::completed(completed().await)] +#[case::downloaded(downloaded().await)] +#[case::three(three().await)] +#[case::out_of_order(many_out_of_order().await)] +#[case::in_order(many_hashed_in_order().await)] #[tokio::test] async fn it_should_remove_an_entry(#[values(swarms())] swarms: Swarms, #[case] entries: Entries) { make(&swarms, &entries); for (info_hash, torrent) in entries { assert_eq!( - Some(swarms.get(&info_hash).unwrap().lock_or_panic().clone()), + Some(swarms.get(&info_hash).unwrap().lock().await.clone()), Some(torrent.clone()) ); assert_eq!( - Some(swarms.remove(&info_hash).await.unwrap().lock_or_panic().clone()), + Some(swarms.remove(&info_hash).await.unwrap().lock().await.clone()), Some(torrent) ); @@ -385,18 +390,18 @@ async fn it_should_remove_an_entry(#[values(swarms())] swarms: Swarms, #[case] e assert!(swarms.remove(&info_hash).await.is_none()); } - assert_eq!(swarms.get_aggregate_swarm_metadata().unwrap().total_torrents, 0); + assert_eq!(swarms.get_aggregate_swarm_metadata().await.unwrap().total_torrents, 0); } #[rstest] #[case::empty(empty())] #[case::default(default())] -#[case::started(started())] -#[case::completed(completed())] -#[case::downloaded(downloaded())] -#[case::three(three())] -#[case::out_of_order(many_out_of_order())] -#[case::in_order(many_hashed_in_order())] +#[case::started(started().await)] +#[case::completed(completed().await)] +#[case::downloaded(downloaded().await)] +#[case::three(three().await)] +#[case::out_of_order(many_out_of_order().await)] +#[case::in_order(many_hashed_in_order().await)] #[tokio::test] async fn it_should_remove_inactive_peers(#[values(swarms())] swarms: Swarms, #[case] entries: Entries) { use std::ops::Sub as _; @@ -437,7 +442,7 @@ async fn it_should_remove_inactive_peers(#[values(swarms())] swarms: Swarms, #[c { swarms.handle_announcement(&info_hash, &peer, None).await.unwrap(); assert_eq!( - swarms.get_aggregate_swarm_metadata().unwrap().total_torrents, + swarms.get_aggregate_swarm_metadata().await.unwrap().total_torrents, entries.len() as u64 + 1 ); } @@ -446,7 +451,7 @@ async fn it_should_remove_inactive_peers(#[values(swarms())] swarms: Swarms, #[c // and verify the swarm metadata was updated. { swarms.handle_announcement(&info_hash, &peer, None).await.unwrap(); - let stats = swarms.get_swarm_metadata(&info_hash).unwrap(); + let stats = swarms.get_swarm_metadata(&info_hash).await.unwrap(); assert_eq!( stats, Some(SwarmMetadata { @@ -460,7 +465,7 @@ async fn it_should_remove_inactive_peers(#[values(swarms())] swarms: Swarms, #[c // Verify that this new peer was inserted into the repository. { let lock_tracked_torrent = swarms.get(&info_hash).expect("it_should_get_some"); - let entry = lock_tracked_torrent.lock_or_panic(); + let entry = lock_tracked_torrent.lock().await; assert!(entry.peers(None).contains(&peer.into())); } @@ -468,13 +473,14 @@ async fn it_should_remove_inactive_peers(#[values(swarms())] swarms: Swarms, #[c { swarms .remove_inactive_peers(CurrentClock::now_sub(&TIMEOUT).expect("it should get a time passed")) + .await .unwrap(); } // Verify that the this peer was removed from the repository. { let lock_tracked_torrent = swarms.get(&info_hash).expect("it_should_get_some"); - let entry = lock_tracked_torrent.lock_or_panic(); + let entry = lock_tracked_torrent.lock().await; assert!(!entry.peers(None).contains(&peer.into())); } } @@ -482,12 +488,12 @@ async fn it_should_remove_inactive_peers(#[values(swarms())] swarms: Swarms, #[c #[rstest] #[case::empty(empty())] #[case::default(default())] -#[case::started(started())] -#[case::completed(completed())] -#[case::downloaded(downloaded())] -#[case::three(three())] -#[case::out_of_order(many_out_of_order())] -#[case::in_order(many_hashed_in_order())] +#[case::started(started().await)] +#[case::completed(completed().await)] +#[case::downloaded(downloaded().await)] +#[case::three(three().await)] +#[case::out_of_order(many_out_of_order().await)] +#[case::in_order(many_hashed_in_order().await)] #[tokio::test] async fn it_should_remove_peerless_torrents( #[values(swarms())] swarms: Swarms, @@ -496,13 +502,17 @@ async fn it_should_remove_peerless_torrents( ) { make(&swarms, &entries); - swarms.remove_peerless_torrents(&policy).unwrap(); + swarms.remove_peerless_torrents(&policy).await.unwrap(); + + let paginated = swarms.get_paginated(None); // ← store the result in a named variable + + let futures = paginated.iter().map(|(i, swarm_handle)| { + let i = *i; + let swarm_handle = swarm_handle.clone(); + async move { (i, swarm_handle.lock().await.clone()) } + }); - let torrents: Vec<(InfoHash, Swarm)> = swarms - .get_paginated(None) - .iter() - .map(|(i, lock_tracked_torrent)| (*i, lock_tracked_torrent.lock_or_panic().clone())) - .collect(); + let torrents: Vec<(InfoHash, Swarm)> = join_all(futures).await; for (_, entry) in torrents { assert!(entry.meets_retaining_policy(&policy)); diff --git a/packages/tracker-core/src/announce_handler.rs b/packages/tracker-core/src/announce_handler.rs index 00d42174a..a2e8db743 100644 --- a/packages/tracker-core/src/announce_handler.rs +++ b/packages/tracker-core/src/announce_handler.rs @@ -180,16 +180,20 @@ impl AnnounceHandler { self.db_torrent_repository.increase_number_of_downloads(info_hash)?; } - Ok(self.build_announce_data(info_hash, peer, peers_wanted)) + Ok(self.build_announce_data(info_hash, peer, peers_wanted).await) } /// Builds the announce data for the peer making the request. - fn build_announce_data(&self, info_hash: &InfoHash, peer: &peer::Peer, peers_wanted: &PeersWanted) -> AnnounceData { + async fn build_announce_data(&self, info_hash: &InfoHash, peer: &peer::Peer, peers_wanted: &PeersWanted) -> AnnounceData { let peers = self .in_memory_torrent_repository - .get_peers_for(info_hash, peer, peers_wanted.limit()); + .get_peers_for(info_hash, peer, peers_wanted.limit()) + .await; - let swarm_metadata = self.in_memory_torrent_repository.get_swarm_metadata_or_default(info_hash); + let swarm_metadata = self + .in_memory_torrent_repository + .get_swarm_metadata_or_default(info_hash) + .await; AnnounceData { peers, @@ -595,7 +599,7 @@ mod tests { use aquatic_udp_protocol::AnnounceEvent; use torrust_tracker_test_helpers::configuration; - use torrust_tracker_torrent_repository::{LockTrackedTorrent, Swarms}; + use torrust_tracker_torrent_repository::Swarms; use crate::announce_handler::tests::the_announce_handler::peer_ip; use crate::announce_handler::{AnnounceHandler, PeersWanted}; @@ -659,10 +663,10 @@ mod tests { .expect("it should be able to get entry"); // It persists the number of completed peers. - assert_eq!(torrent_entry.lock_or_panic().metadata().downloaded, 1); + assert_eq!(torrent_entry.lock().await.metadata().downloaded, 1); // It does not persist the peers - assert!(torrent_entry.lock_or_panic().is_empty()); + assert!(torrent_entry.lock().await.is_empty()); } } diff --git a/packages/tracker-core/src/scrape_handler.rs b/packages/tracker-core/src/scrape_handler.rs index 5d78c7d90..443d989a6 100644 --- a/packages/tracker-core/src/scrape_handler.rs +++ b/packages/tracker-core/src/scrape_handler.rs @@ -112,7 +112,11 @@ impl ScrapeHandler { for info_hash in info_hashes { let swarm_metadata = match self.whitelist_authorization.authorize(info_hash).await { - Ok(()) => self.in_memory_torrent_repository.get_swarm_metadata_or_default(info_hash), + Ok(()) => { + self.in_memory_torrent_repository + .get_swarm_metadata_or_default(info_hash) + .await + } Err(_) => SwarmMetadata::zeroed(), }; scrape_data.add_file(info_hash, swarm_metadata); diff --git a/packages/tracker-core/src/torrent/manager.rs b/packages/tracker-core/src/torrent/manager.rs index dec52daac..bc193bd4f 100644 --- a/packages/tracker-core/src/torrent/manager.rs +++ b/packages/tracker-core/src/torrent/manager.rs @@ -90,35 +90,36 @@ impl TorrentsManager { /// 2. If the tracker is configured to remove peerless torrents /// (`remove_peerless_torrents` is set), it removes entire torrent /// entries that have no active peers. - pub fn cleanup_torrents(&self) { - self.log_aggregate_swarm_metadata(); + pub async fn cleanup_torrents(&self) { + self.log_aggregate_swarm_metadata().await; - self.remove_inactive_peers(); + self.remove_inactive_peers().await; - self.log_aggregate_swarm_metadata(); + self.log_aggregate_swarm_metadata().await; - self.remove_peerless_torrents(); + self.remove_peerless_torrents().await; - self.log_aggregate_swarm_metadata(); + self.log_aggregate_swarm_metadata().await; } - fn remove_inactive_peers(&self) { + async fn remove_inactive_peers(&self) { let current_cutoff = CurrentClock::now_sub(&Duration::from_secs(u64::from(self.config.tracker_policy.max_peer_timeout))) .unwrap_or_default(); - self.in_memory_torrent_repository.remove_inactive_peers(current_cutoff); + self.in_memory_torrent_repository.remove_inactive_peers(current_cutoff).await; } - fn remove_peerless_torrents(&self) { + async fn remove_peerless_torrents(&self) { if self.config.tracker_policy.remove_peerless_torrents { self.in_memory_torrent_repository - .remove_peerless_torrents(&self.config.tracker_policy); + .remove_peerless_torrents(&self.config.tracker_policy) + .await; } } - fn log_aggregate_swarm_metadata(&self) { + async fn log_aggregate_swarm_metadata(&self) { // Pre-calculated data - let aggregate_swarm_metadata = self.in_memory_torrent_repository.get_aggregate_swarm_metadata(); + let aggregate_swarm_metadata = self.in_memory_torrent_repository.get_aggregate_swarm_metadata().await; tracing::info!(name: "pre_calculated_aggregate_swarm_metadata", torrents = aggregate_swarm_metadata.total_torrents, @@ -128,8 +129,8 @@ impl TorrentsManager { ); // Hot data (iterating over data structures) - let peerless_torrents = self.in_memory_torrent_repository.count_peerless_torrents(); - let peers = self.in_memory_torrent_repository.count_peers(); + let peerless_torrents = self.in_memory_torrent_repository.count_peerless_torrents().await; + let peers = self.in_memory_torrent_repository.count_peers().await; tracing::info!(name: "hot_aggregate_swarm_metadata", peerless_torrents = peerless_torrents, @@ -144,7 +145,7 @@ mod tests { use std::sync::Arc; use torrust_tracker_configuration::Core; - use torrust_tracker_torrent_repository::{LockTrackedTorrent, Swarms}; + use torrust_tracker_torrent_repository::Swarms; use super::{DatabasePersistentTorrentRepository, TorrentsManager}; use crate::databases::setup::initialize_database; @@ -184,8 +185,8 @@ mod tests { ) } - #[test] - fn it_should_load_the_numbers_of_downloads_for_all_torrents_from_the_database() { + #[tokio::test] + async fn it_should_load_the_numbers_of_downloads_for_all_torrents_from_the_database() { let (torrents_manager, services) = initialize_torrents_manager(); let infohash = sample_info_hash(); @@ -199,7 +200,8 @@ mod tests { .in_memory_torrent_repository .get(&infohash) .unwrap() - .lock_or_panic() + .lock() + .await .metadata() .downloaded, 1 @@ -242,7 +244,7 @@ mod tests { )) .unwrap(); - torrents_manager.cleanup_torrents(); + torrents_manager.cleanup_torrents().await; assert!(services.in_memory_torrent_repository.get(&infohash).is_none()); } @@ -254,7 +256,9 @@ mod tests { let _number_of_downloads_increased = in_memory_torrent_repository.upsert_peer(infohash, &peer, None).await; // Remove the peer. The torrent is now peerless. - in_memory_torrent_repository.remove_inactive_peers(peer.updated.add(Duration::from_secs(1))); + in_memory_torrent_repository + .remove_inactive_peers(peer.updated.add(Duration::from_secs(1))) + .await; } #[tokio::test] @@ -268,7 +272,7 @@ mod tests { add_a_peerless_torrent(&infohash, &services.in_memory_torrent_repository).await; - torrents_manager.cleanup_torrents(); + torrents_manager.cleanup_torrents().await; assert!(services.in_memory_torrent_repository.get(&infohash).is_none()); } @@ -284,7 +288,7 @@ mod tests { add_a_peerless_torrent(&infohash, &services.in_memory_torrent_repository).await; - torrents_manager.cleanup_torrents(); + torrents_manager.cleanup_torrents().await; assert!(services.in_memory_torrent_repository.get(&infohash).is_some()); } diff --git a/packages/tracker-core/src/torrent/repository/in_memory.rs b/packages/tracker-core/src/torrent/repository/in_memory.rs index 37d9d3f5c..311480306 100644 --- a/packages/tracker-core/src/torrent/repository/in_memory.rs +++ b/packages/tracker-core/src/torrent/repository/in_memory.rs @@ -93,9 +93,10 @@ impl InMemoryTorrentRepository { /// # Panics /// /// This function panics if the underling swarms return an error. - pub(crate) fn remove_inactive_peers(&self, current_cutoff: DurationSinceUnixEpoch) { + pub(crate) async fn remove_inactive_peers(&self, current_cutoff: DurationSinceUnixEpoch) { self.swarms .remove_inactive_peers(current_cutoff) + .await .expect("Failed to remove inactive peers from swarms"); } @@ -112,9 +113,10 @@ impl InMemoryTorrentRepository { /// # Panics /// /// This function panics if the underling swarms return an error. - pub(crate) fn remove_peerless_torrents(&self, policy: &TrackerPolicy) { + pub(crate) async fn remove_peerless_torrents(&self, policy: &TrackerPolicy) { self.swarms .remove_peerless_torrents(policy) + .await .expect("Failed to remove peerless torrents from swarms"); } @@ -168,9 +170,10 @@ impl InMemoryTorrentRepository { /// /// This function panics if the underling swarms return an error.s #[must_use] - pub(crate) fn get_swarm_metadata_or_default(&self, info_hash: &InfoHash) -> SwarmMetadata { + pub(crate) async fn get_swarm_metadata_or_default(&self, info_hash: &InfoHash) -> SwarmMetadata { self.swarms .get_swarm_metadata_or_default(info_hash) + .await .expect("Failed to get swarm metadata") } @@ -196,9 +199,10 @@ impl InMemoryTorrentRepository { /// /// This function panics if the underling swarms return an error. #[must_use] - pub(crate) fn get_peers_for(&self, info_hash: &InfoHash, peer: &peer::Peer, limit: usize) -> Vec> { + pub(crate) async fn get_peers_for(&self, info_hash: &InfoHash, peer: &peer::Peer, limit: usize) -> Vec> { self.swarms .get_peers_peers_excluding(info_hash, peer, max(limit, TORRENT_PEERS_LIMIT)) + .await .expect("Failed to get other peers in swarm") } @@ -220,10 +224,11 @@ impl InMemoryTorrentRepository { /// /// This function panics if the underling swarms return an error. #[must_use] - pub fn get_torrent_peers(&self, info_hash: &InfoHash) -> Vec> { + pub async fn get_torrent_peers(&self, info_hash: &InfoHash) -> Vec> { // todo: pass the limit as an argument like `get_peers_for` self.swarms .get_swarm_peers(info_hash, TORRENT_PEERS_LIMIT) + .await .expect("Failed to get other peers in swarm") } @@ -241,9 +246,10 @@ impl InMemoryTorrentRepository { /// /// This function panics if the underling swarms return an error. #[must_use] - pub fn get_aggregate_swarm_metadata(&self) -> AggregateSwarmMetadata { + pub async fn get_aggregate_swarm_metadata(&self) -> AggregateSwarmMetadata { self.swarms .get_aggregate_swarm_metadata() + .await .expect("Failed to get aggregate swarm metadata") } @@ -253,9 +259,10 @@ impl InMemoryTorrentRepository { /// /// This function panics if the underling swarms return an error. #[must_use] - pub fn count_peerless_torrents(&self) -> usize { + pub async fn count_peerless_torrents(&self) -> usize { self.swarms .count_peerless_torrents() + .await .expect("Failed to count peerless torrents") } @@ -265,8 +272,8 @@ impl InMemoryTorrentRepository { /// /// This function panics if the underling swarms return an error. #[must_use] - pub fn count_peers(&self) -> usize { - self.swarms.count_peers().expect("Failed to count peers") + pub async fn count_peers(&self) -> usize { + self.swarms.count_peers().await.expect("Failed to count peers") } /// Imports persistent torrent data into the in-memory repository. diff --git a/packages/tracker-core/src/torrent/services.rs b/packages/tracker-core/src/torrent/services.rs index 14a4f58f5..97694a80f 100644 --- a/packages/tracker-core/src/torrent/services.rs +++ b/packages/tracker-core/src/torrent/services.rs @@ -17,7 +17,6 @@ use std::sync::Arc; use bittorrent_primitives::info_hash::InfoHash; use torrust_tracker_primitives::pagination::Pagination; use torrust_tracker_primitives::peer; -use torrust_tracker_torrent_repository::LockTrackedTorrent; use crate::torrent::repository::in_memory::InMemoryTorrentRepository; @@ -94,14 +93,17 @@ pub struct BasicInfo { /// /// This function panics if the lock for the torrent entry cannot be obtained. #[must_use] -pub fn get_torrent_info(in_memory_torrent_repository: &Arc, info_hash: &InfoHash) -> Option { +pub async fn get_torrent_info( + in_memory_torrent_repository: &Arc, + info_hash: &InfoHash, +) -> Option { let torrent_entry_option = in_memory_torrent_repository.get(info_hash); let torrent_entry = torrent_entry_option?; - let stats = torrent_entry.lock_or_panic().metadata(); + let stats = torrent_entry.lock().await.metadata(); - let peers = torrent_entry.lock_or_panic().peers(None); + let peers = torrent_entry.lock().await.peers(None); let peers = Some(peers.iter().map(|peer| (**peer)).collect()); @@ -136,14 +138,14 @@ pub fn get_torrent_info(in_memory_torrent_repository: &Arc, pagination: Option<&Pagination>, ) -> Vec { let mut basic_infos: Vec = vec![]; for (info_hash, torrent_entry) in in_memory_torrent_repository.get_paginated(pagination) { - let stats = torrent_entry.lock_or_panic().metadata(); + let stats = torrent_entry.lock().await.metadata(); basic_infos.push(BasicInfo { info_hash, @@ -178,19 +180,21 @@ pub fn get_torrents_page( /// /// This function panics if the lock for the torrent entry cannot be obtained. #[must_use] -pub fn get_torrents(in_memory_torrent_repository: &Arc, info_hashes: &[InfoHash]) -> Vec { +pub async fn get_torrents( + in_memory_torrent_repository: &Arc, + info_hashes: &[InfoHash], +) -> Vec { let mut basic_infos: Vec = vec![]; for info_hash in info_hashes { - if let Some(stats) = in_memory_torrent_repository - .get(info_hash) - .map(|torrent_entry| torrent_entry.lock_or_panic().metadata()) - { + if let Some(torrent_entry) = in_memory_torrent_repository.get(info_hash) { + let metadata = torrent_entry.lock().await.metadata(); + basic_infos.push(BasicInfo { info_hash: *info_hash, - seeders: u64::from(stats.complete), - completed: u64::from(stats.downloaded), - leechers: u64::from(stats.incomplete), + seeders: u64::from(metadata.complete), + completed: u64::from(metadata.downloaded), + leechers: u64::from(metadata.incomplete), }); } } @@ -235,7 +239,8 @@ mod tests { let torrent_info = get_torrent_info( &in_memory_torrent_repository, &InfoHash::from_str("0b3aea4adc213ce32295be85d3883a63bca25446").unwrap(), // DevSkim: ignore DS173237 - ); + ) + .await; assert!(torrent_info.is_none()); } @@ -250,7 +255,7 @@ mod tests { .upsert_peer(&info_hash, &sample_peer(), None) .await; - let torrent_info = get_torrent_info(&in_memory_torrent_repository, &info_hash).unwrap(); + let torrent_info = get_torrent_info(&in_memory_torrent_repository, &info_hash).await.unwrap(); assert_eq!( torrent_info, @@ -280,7 +285,7 @@ mod tests { async fn it_should_return_an_empty_result_if_the_tracker_does_not_have_any_torrent() { let in_memory_torrent_repository = Arc::new(InMemoryTorrentRepository::default()); - let torrents = get_torrents_page(&in_memory_torrent_repository, Some(&Pagination::default())); + let torrents = get_torrents_page(&in_memory_torrent_repository, Some(&Pagination::default())).await; assert_eq!(torrents, vec![]); } @@ -296,7 +301,7 @@ mod tests { .upsert_peer(&info_hash, &sample_peer(), None) .await; - let torrents = get_torrents_page(&in_memory_torrent_repository, Some(&Pagination::default())); + let torrents = get_torrents_page(&in_memory_torrent_repository, Some(&Pagination::default())).await; assert_eq!( torrents, @@ -329,7 +334,7 @@ mod tests { let offset = 0; let limit = 1; - let torrents = get_torrents_page(&in_memory_torrent_repository, Some(&Pagination::new(offset, limit))); + let torrents = get_torrents_page(&in_memory_torrent_repository, Some(&Pagination::new(offset, limit))).await; assert_eq!(torrents.len(), 1); } @@ -354,7 +359,7 @@ mod tests { let offset = 1; let limit = 4000; - let torrents = get_torrents_page(&in_memory_torrent_repository, Some(&Pagination::new(offset, limit))); + let torrents = get_torrents_page(&in_memory_torrent_repository, Some(&Pagination::new(offset, limit))).await; assert_eq!(torrents.len(), 1); assert_eq!( @@ -384,7 +389,7 @@ mod tests { .upsert_peer(&info_hash2, &sample_peer(), None) .await; - let torrents = get_torrents_page(&in_memory_torrent_repository, Some(&Pagination::default())); + let torrents = get_torrents_page(&in_memory_torrent_repository, Some(&Pagination::default())).await; assert_eq!( torrents, @@ -419,7 +424,7 @@ mod tests { async fn it_should_return_an_empty_list_if_none_of_the_requested_torrents_is_found() { let in_memory_torrent_repository = Arc::new(InMemoryTorrentRepository::default()); - let torrent_info = get_torrents(&in_memory_torrent_repository, &[sample_info_hash()]); + let torrent_info = get_torrents(&in_memory_torrent_repository, &[sample_info_hash()]).await; assert!(torrent_info.is_empty()); } @@ -434,7 +439,7 @@ mod tests { .upsert_peer(&info_hash, &sample_peer(), None) .await; - let torrent_info = get_torrents(&in_memory_torrent_repository, &[info_hash]); + let torrent_info = get_torrents(&in_memory_torrent_repository, &[info_hash]).await; assert_eq!( torrent_info, diff --git a/packages/udp-tracker-core/src/statistics/services.rs b/packages/udp-tracker-core/src/statistics/services.rs index c76f02040..20ba2ea7f 100644 --- a/packages/udp-tracker-core/src/statistics/services.rs +++ b/packages/udp-tracker-core/src/statistics/services.rs @@ -63,7 +63,7 @@ pub async fn get_metrics( in_memory_torrent_repository: Arc, stats_repository: Arc, ) -> TrackerMetrics { - let torrents_metrics = in_memory_torrent_repository.get_aggregate_swarm_metadata(); + let torrents_metrics = in_memory_torrent_repository.get_aggregate_swarm_metadata().await; let stats = stats_repository.get_stats().await; TrackerMetrics { diff --git a/packages/udp-tracker-server/src/handlers/announce.rs b/packages/udp-tracker-server/src/handlers/announce.rs index ba0721289..86e7888f2 100644 --- a/packages/udp-tracker-server/src/handlers/announce.rs +++ b/packages/udp-tracker-server/src/handlers/announce.rs @@ -254,7 +254,8 @@ mod tests { let peers = core_tracker_services .in_memory_torrent_repository - .get_torrent_peers(&info_hash.0.into()); + .get_torrent_peers(&info_hash.0.into()) + .await; let expected_peer = TorrentPeerBuilder::new() .with_peer_id(peer_id) @@ -348,7 +349,8 @@ mod tests { let peers = core_tracker_services .in_memory_torrent_repository - .get_torrent_peers(&info_hash.0.into()); + .get_torrent_peers(&info_hash.0.into()) + .await; assert_eq!(peers[0].peer_addr, SocketAddr::new(IpAddr::V4(remote_client_ip), client_port)); } @@ -505,7 +507,8 @@ mod tests { let peers = core_tracker_services .in_memory_torrent_repository - .get_torrent_peers(&info_hash.0.into()); + .get_torrent_peers(&info_hash.0.into()) + .await; let external_ip_in_tracker_configuration = core_tracker_services.core_config.net.external_ip.unwrap(); @@ -587,7 +590,8 @@ mod tests { let peers = core_tracker_services .in_memory_torrent_repository - .get_torrent_peers(&info_hash.0.into()); + .get_torrent_peers(&info_hash.0.into()) + .await; let expected_peer = TorrentPeerBuilder::new() .with_peer_id(peer_id) @@ -684,7 +688,8 @@ mod tests { let peers = core_tracker_services .in_memory_torrent_repository - .get_torrent_peers(&info_hash.0.into()); + .get_torrent_peers(&info_hash.0.into()) + .await; // When using IPv6 the tracker converts the remote client ip into a IPv4 address assert_eq!(peers[0].peer_addr, SocketAddr::new(IpAddr::V6(remote_client_ip), client_port)); @@ -940,7 +945,7 @@ mod tests { .await .unwrap(); - let peers = in_memory_torrent_repository.get_torrent_peers(&info_hash.0.into()); + let peers = in_memory_torrent_repository.get_torrent_peers(&info_hash.0.into()).await; let external_ip_in_tracker_configuration = core_config.net.external_ip.unwrap(); diff --git a/packages/udp-tracker-server/src/statistics/services.rs b/packages/udp-tracker-server/src/statistics/services.rs index a2215067b..c8b24a744 100644 --- a/packages/udp-tracker-server/src/statistics/services.rs +++ b/packages/udp-tracker-server/src/statistics/services.rs @@ -66,7 +66,7 @@ pub async fn get_metrics( ban_service: Arc>, stats_repository: Arc, ) -> TrackerMetrics { - let torrents_metrics = in_memory_torrent_repository.get_aggregate_swarm_metadata(); + let torrents_metrics = in_memory_torrent_repository.get_aggregate_swarm_metadata().await; let stats = stats_repository.get_stats().await; let udp_banned_ips_total = ban_service.read().await.get_banned_ips_total(); diff --git a/src/bootstrap/jobs/torrent_cleanup.rs b/src/bootstrap/jobs/torrent_cleanup.rs index 0107b5370..8a3a71a44 100644 --- a/src/bootstrap/jobs/torrent_cleanup.rs +++ b/src/bootstrap/jobs/torrent_cleanup.rs @@ -45,7 +45,7 @@ pub fn start_job(config: &Core, torrents_manager: &Arc) -> Join if let Some(torrents_manager) = weak_torrents_manager.upgrade() { let start_time = Utc::now().time(); tracing::info!("Cleaning up torrents (executed every {} secs) ...", interval_in_secs); - torrents_manager.cleanup_torrents(); + torrents_manager.cleanup_torrents().await; tracing::info!("Cleaned up torrents in: {} ms", (Utc::now().time() - start_time).num_milliseconds()); } else { break; From 1eb545c0fb233bba0206c2557401c2b4c686cc3a Mon Sep 17 00:00:00 2001 From: Jose Celano Date: Tue, 13 May 2025 09:16:15 +0100 Subject: [PATCH 599/802] feat: [#1358] remove persistent metric from torrent-repository pkg This package dones not have persistence. Persistence is only handle in the `tracker-core` pacakge. The metric will be included there. --- packages/torrent-repository/src/statistics/mod.rs | 9 --------- 1 file changed, 9 deletions(-) diff --git a/packages/torrent-repository/src/statistics/mod.rs b/packages/torrent-repository/src/statistics/mod.rs index b0dce479f..fc8f1e1e8 100644 --- a/packages/torrent-repository/src/statistics/mod.rs +++ b/packages/torrent-repository/src/statistics/mod.rs @@ -8,7 +8,6 @@ use torrust_tracker_metrics::metric_name; use torrust_tracker_metrics::unit::Unit; const TORRENT_REPOSITORY_RUNTIME_TORRENTS_DOWNLOADS_TOTAL: &str = "torrent_repository_runtime_torrents_downloads_total"; -const TORRENT_REPOSITORY_PERSISTENT_TORRENTS_DOWNLOADS_TOTAL: &str = "torrent_repository_persistent_torrents_downloads_total"; #[must_use] pub fn describe_metrics() -> Metrics { @@ -22,13 +21,5 @@ pub fn describe_metrics() -> Metrics { )), ); - metrics.metric_collection.describe_counter( - &metric_name!(TORRENT_REPOSITORY_PERSISTENT_TORRENTS_DOWNLOADS_TOTAL), - Some(Unit::Count), - Some(&MetricDescription::new( - "The total number of torrent downloads since persistent statistics were enabled the first time.", - )), - ); - metrics } From 29a2dfd80dd76176ed517534ae5f0bf75a59c50a Mon Sep 17 00:00:00 2001 From: Jose Celano Date: Tue, 13 May 2025 13:47:34 +0100 Subject: [PATCH 600/802] dev: change default config Decrease torrent cleanup interval and peer timeout to do manual tests faster. --- share/default/config/tracker.development.sqlite3.toml | 6 +++--- 1 file changed, 3 insertions(+), 3 deletions(-) diff --git a/share/default/config/tracker.development.sqlite3.toml b/share/default/config/tracker.development.sqlite3.toml index 488743eb9..89d700132 100644 --- a/share/default/config/tracker.development.sqlite3.toml +++ b/share/default/config/tracker.development.sqlite3.toml @@ -7,14 +7,14 @@ schema_version = "2.0.0" threshold = "info" [core] -#inactive_peer_cleanup_interval = 60 +inactive_peer_cleanup_interval = 60 listed = false private = false [core.tracker_policy] -#max_peer_timeout = 30 +max_peer_timeout = 30 persistent_torrent_completed_stat = true -#remove_peerless_torrents = true +remove_peerless_torrents = true [[udp_trackers]] bind_address = "0.0.0.0:6868" From d47483ff065b65f3ab51e27a481bf82c5048e3c6 Mon Sep 17 00:00:00 2001 From: Jose Celano Date: Tue, 13 May 2025 13:52:47 +0100 Subject: [PATCH 601/802] feat: [#1358] new metric in torrent-repository: total number of torrents --- .../src/statistics/event/handler.rs | 4 -- packages/metrics/src/metric_collection.rs | 14 +++++- .../src/statistics/event/handler.rs | 29 ++++++++---- .../src/statistics/metrics.rs | 26 ++++++++++- .../torrent-repository/src/statistics/mod.rs | 7 +++ .../src/statistics/repository.rs | 44 +++++++++++++++++-- packages/torrent-repository/src/swarms.rs | 6 +++ 7 files changed, 111 insertions(+), 19 deletions(-) diff --git a/packages/http-tracker-core/src/statistics/event/handler.rs b/packages/http-tracker-core/src/statistics/event/handler.rs index 8d2ad1aa2..f5506f6e3 100644 --- a/packages/http-tracker-core/src/statistics/event/handler.rs +++ b/packages/http-tracker-core/src/statistics/event/handler.rs @@ -9,10 +9,6 @@ use crate::event::Event; use crate::statistics::repository::Repository; use crate::statistics::HTTP_TRACKER_CORE_REQUESTS_RECEIVED_TOTAL; -/// # Panics -/// -/// This function panics if the client IP address is not the same as the IP -/// version of the event. pub async fn handle_event(event: Event, stats_repository: &Arc, now: DurationSinceUnixEpoch) { match event { Event::TcpAnnounce { connection, .. } => { diff --git a/packages/metrics/src/metric_collection.rs b/packages/metrics/src/metric_collection.rs index 438f3b03a..83b08f178 100644 --- a/packages/metrics/src/metric_collection.rs +++ b/packages/metrics/src/metric_collection.rs @@ -140,7 +140,12 @@ impl MetricCollection { /// /// Return an error if a metrics of a different type with the same name /// already exists. - pub fn increase_gauge(&mut self, name: &MetricName, label_set: &LabelSet, time: DurationSinceUnixEpoch) -> Result<(), Error> { + pub fn increment_gauge( + &mut self, + name: &MetricName, + label_set: &LabelSet, + time: DurationSinceUnixEpoch, + ) -> Result<(), Error> { if self.counters.metrics.contains_key(name) { return Err(Error::MetricNameCollisionAdding { metric_name: name.clone(), @@ -156,7 +161,12 @@ impl MetricCollection { /// /// Return an error if a metrics of a different type with the same name /// already exists. - pub fn decrease_gauge(&mut self, name: &MetricName, label_set: &LabelSet, time: DurationSinceUnixEpoch) -> Result<(), Error> { + pub fn decrement_gauge( + &mut self, + name: &MetricName, + label_set: &LabelSet, + time: DurationSinceUnixEpoch, + ) -> Result<(), Error> { if self.counters.metrics.contains_key(name) { return Err(Error::MetricNameCollisionAdding { metric_name: name.clone(), diff --git a/packages/torrent-repository/src/statistics/event/handler.rs b/packages/torrent-repository/src/statistics/event/handler.rs index 2073575a8..6428bbeb7 100644 --- a/packages/torrent-repository/src/statistics/event/handler.rs +++ b/packages/torrent-repository/src/statistics/event/handler.rs @@ -1,23 +1,36 @@ use std::sync::Arc; +use torrust_tracker_metrics::label::LabelSet; +use torrust_tracker_metrics::metric_name; use torrust_tracker_primitives::DurationSinceUnixEpoch; use crate::event::Event; use crate::statistics::repository::Repository; +use crate::statistics::TORRENT_REPOSITORY_TORRENTS_TOTAL; -/// # Panics -/// -/// This function panics if the client IP address is not the same as the IP -/// version of the event. -pub async fn handle_event(event: Event, stats_repository: &Arc, _now: DurationSinceUnixEpoch) { +pub async fn handle_event(event: Event, stats_repository: &Arc, now: DurationSinceUnixEpoch) { match event { Event::TorrentAdded { info_hash, .. } => { - // todo: update metrics tracing::debug!("Torrent added {info_hash}"); + + match stats_repository + .increment_gauge(&metric_name!(TORRENT_REPOSITORY_TORRENTS_TOTAL), &LabelSet::default(), now) + .await + { + Ok(()) => {} + Err(err) => tracing::error!("Failed to increment the gauge: {}", err), + }; } Event::TorrentRemoved { info_hash } => { - // todo: update metrics tracing::debug!("Torrent removed {info_hash}"); + + match stats_repository + .decrement_gauge(&metric_name!(TORRENT_REPOSITORY_TORRENTS_TOTAL), &LabelSet::default(), now) + .await + { + Ok(()) => {} + Err(err) => tracing::error!("Failed to decrement the gauge: {}", err), + }; } Event::PeerAdded { announcement } => { // todo: update metrics @@ -28,6 +41,4 @@ pub async fn handle_event(event: Event, stats_repository: &Arc, _now tracing::debug!("Peer removed: socket address {socket_addr:?}, peer ID: {peer_id:?}"); } } - - tracing::debug!("metrics: {:?}", stats_repository.get_metrics().await); } diff --git a/packages/torrent-repository/src/statistics/metrics.rs b/packages/torrent-repository/src/statistics/metrics.rs index 6ee275e63..f8ab3f9d9 100644 --- a/packages/torrent-repository/src/statistics/metrics.rs +++ b/packages/torrent-repository/src/statistics/metrics.rs @@ -15,7 +15,7 @@ impl Metrics { /// # Errors /// /// Returns an error if the metric does not exist and it cannot be created. - pub fn increase_counter( + pub fn increment_counter( &mut self, metric_name: &MetricName, labels: &LabelSet, @@ -36,4 +36,28 @@ impl Metrics { ) -> Result<(), Error> { self.metric_collection.set_gauge(metric_name, labels, value, now) } + + /// # Errors + /// + /// Returns an error if the metric does not exist and it cannot be created. + pub fn increment_gauge( + &mut self, + metric_name: &MetricName, + labels: &LabelSet, + now: DurationSinceUnixEpoch, + ) -> Result<(), Error> { + self.metric_collection.increment_gauge(metric_name, labels, now) + } + + /// # Errors + /// + /// Returns an error if the metric does not exist and it cannot be created. + pub fn decrement_gauge( + &mut self, + metric_name: &MetricName, + labels: &LabelSet, + now: DurationSinceUnixEpoch, + ) -> Result<(), Error> { + self.metric_collection.decrement_gauge(metric_name, labels, now) + } } diff --git a/packages/torrent-repository/src/statistics/mod.rs b/packages/torrent-repository/src/statistics/mod.rs index fc8f1e1e8..f1507b7bb 100644 --- a/packages/torrent-repository/src/statistics/mod.rs +++ b/packages/torrent-repository/src/statistics/mod.rs @@ -7,12 +7,19 @@ use torrust_tracker_metrics::metric::description::MetricDescription; use torrust_tracker_metrics::metric_name; use torrust_tracker_metrics::unit::Unit; +const TORRENT_REPOSITORY_TORRENTS_TOTAL: &str = "torrent_repository_torrents_total"; const TORRENT_REPOSITORY_RUNTIME_TORRENTS_DOWNLOADS_TOTAL: &str = "torrent_repository_runtime_torrents_downloads_total"; #[must_use] pub fn describe_metrics() -> Metrics { let mut metrics = Metrics::default(); + metrics.metric_collection.describe_gauge( + &metric_name!(TORRENT_REPOSITORY_TORRENTS_TOTAL), + Some(Unit::Count), + Some(&MetricDescription::new("The total number of torrents.")), + ); + metrics.metric_collection.describe_counter( &metric_name!(TORRENT_REPOSITORY_RUNTIME_TORRENTS_DOWNLOADS_TOTAL), Some(Unit::Count), diff --git a/packages/torrent-repository/src/statistics/repository.rs b/packages/torrent-repository/src/statistics/repository.rs index 9fdff7008..a8cb8549e 100644 --- a/packages/torrent-repository/src/statistics/repository.rs +++ b/packages/torrent-repository/src/statistics/repository.rs @@ -36,8 +36,8 @@ impl Repository { /// # Errors /// /// This function will return an error if the metric collection fails to - /// increase the counter. - pub async fn increase_counter( + /// increment the counter. + pub async fn increment_counter( &self, metric_name: &MetricName, labels: &LabelSet, @@ -45,7 +45,45 @@ impl Repository { ) -> Result<(), Error> { let mut stats_lock = self.stats.write().await; - let result = stats_lock.increase_counter(metric_name, labels, now); + let result = stats_lock.increment_counter(metric_name, labels, now); + + drop(stats_lock); + + result + } + + /// # Errors + /// + /// This function will return an error if the metric collection fails to + /// increment the gauge. + pub async fn increment_gauge( + &self, + metric_name: &MetricName, + labels: &LabelSet, + now: DurationSinceUnixEpoch, + ) -> Result<(), Error> { + let mut stats_lock = self.stats.write().await; + + let result = stats_lock.increment_gauge(metric_name, labels, now); + + drop(stats_lock); + + result + } + + /// # Errors + /// + /// This function will return an error if the metric collection fails to + /// decrement the gauge. + pub async fn decrement_gauge( + &self, + metric_name: &MetricName, + labels: &LabelSet, + now: DurationSinceUnixEpoch, + ) -> Result<(), Error> { + let mut stats_lock = self.stats.write().await; + + let result = stats_lock.decrement_gauge(metric_name, labels, now); drop(stats_lock); diff --git a/packages/torrent-repository/src/swarms.rs b/packages/torrent-repository/src/swarms.rs index 277a85cc2..41123fd50 100644 --- a/packages/torrent-repository/src/swarms.rs +++ b/packages/torrent-repository/src/swarms.rs @@ -299,9 +299,15 @@ impl Swarms { continue; } + let info_hash = *swarm_handle.key(); + swarm_handle.remove(); peerless_torrents_removed += 1; + + if let Some(event_sender) = self.event_sender.as_deref() { + event_sender.send(Event::TorrentRemoved { info_hash }).await; + } } tracing::info!(peerless_torrents_removed = peerless_torrents_removed); From ba2033bf60d3b9e56fe8063d57db648fa39858ce Mon Sep 17 00:00:00 2001 From: Jose Celano Date: Tue, 13 May 2025 15:08:40 +0100 Subject: [PATCH 602/802] fix: [#1358] trigger PeerRemoved event when peer is removed due to inactivity --- packages/torrent-repository/src/event.rs | 2 +- .../src/statistics/event/handler.rs | 5 ++- packages/torrent-repository/src/swarm.rs | 38 ++++++++++++++----- packages/torrent-repository/src/swarms.rs | 4 +- .../torrent-repository/tests/swarm/mod.rs | 2 +- 5 files changed, 36 insertions(+), 15 deletions(-) diff --git a/packages/torrent-repository/src/event.rs b/packages/torrent-repository/src/event.rs index 57fe7bc4b..1184714ae 100644 --- a/packages/torrent-repository/src/event.rs +++ b/packages/torrent-repository/src/event.rs @@ -17,7 +17,7 @@ pub enum Event { announcement: PeerAnnouncement, }, PeerRemoved { - socket_addr: SocketAddr, + peer_addr: SocketAddr, peer_id: PeerId, }, } diff --git a/packages/torrent-repository/src/statistics/event/handler.rs b/packages/torrent-repository/src/statistics/event/handler.rs index 6428bbeb7..8022102d9 100644 --- a/packages/torrent-repository/src/statistics/event/handler.rs +++ b/packages/torrent-repository/src/statistics/event/handler.rs @@ -36,7 +36,10 @@ pub async fn handle_event(event: Event, stats_repository: &Arc, now: // todo: update metrics tracing::debug!("Peer added {announcement:?}"); } - Event::PeerRemoved { socket_addr, peer_id } => { + Event::PeerRemoved { + peer_addr: socket_addr, + peer_id, + } => { // todo: update metrics tracing::debug!("Peer removed: socket address {socket_addr:?}, peer ID: {peer_id:?}"); } diff --git a/packages/torrent-repository/src/swarm.rs b/packages/torrent-repository/src/swarm.rs index d1918bd24..32785cada 100644 --- a/packages/torrent-repository/src/swarm.rs +++ b/packages/torrent-repository/src/swarm.rs @@ -143,7 +143,7 @@ impl Swarm { if let Some(event_sender) = self.event_sender.as_deref() { event_sender .send(Event::PeerRemoved { - socket_addr: old_peer.peer_addr, + peer_addr: old_peer.peer_addr, peer_id: old_peer.peer_id, }) .await; @@ -155,10 +155,11 @@ impl Swarm { } } - pub fn remove_inactive(&mut self, current_cutoff: DurationSinceUnixEpoch) -> u64 { - let mut inactive_peers_removed = 0; + pub async fn remove_inactive(&mut self, current_cutoff: DurationSinceUnixEpoch) -> usize { + let mut number_of_peers_removed = 0; + let mut removed_peers = Vec::new(); - self.peers.retain(|_, peer| { + self.peers.retain(|_key, peer| { let is_active = peer::ReadInfo::get_updated(peer) > current_cutoff; if !is_active { @@ -169,13 +170,30 @@ impl Swarm { self.metadata.incomplete -= 1; } - inactive_peers_removed += 1; + number_of_peers_removed += 1; + + if let Some(_event_sender) = self.event_sender.as_deref() { + // Events can not be trigger here because retain does not allow + // async closures. + removed_peers.push((peer.peer_addr, peer.peer_id)); + } } is_active }); - inactive_peers_removed + if let Some(event_sender) = self.event_sender.as_deref() { + for (peer_addr, peer_id) in &removed_peers { + event_sender + .send(Event::PeerRemoved { + peer_addr: *peer_addr, + peer_id: *peer_id, + }) + .await; + } + } + + number_of_peers_removed } #[must_use] @@ -431,7 +449,7 @@ mod tests { swarm.upsert_peer(peer.into(), &mut downloads_increased).await; // Remove peers not updated since one second after inserting the peer - swarm.remove_inactive(last_update_time + one_second); + swarm.remove_inactive(last_update_time + one_second).await; assert_eq!(swarm.len(), 0); } @@ -448,7 +466,7 @@ mod tests { swarm.upsert_peer(peer.into(), &mut downloads_increased).await; // Remove peers not updated since one second before inserting the peer. - swarm.remove_inactive(last_update_time - one_second); + swarm.remove_inactive(last_update_time - one_second).await; assert_eq!(swarm.len(), 1); } @@ -753,7 +771,7 @@ mod tests { let leechers = swarm.metadata().leechers(); - swarm.remove_inactive(leecher.updated + Duration::from_secs(1)); + swarm.remove_inactive(leecher.updated + Duration::from_secs(1)).await; assert_eq!(swarm.metadata().leechers(), leechers - 1); } @@ -769,7 +787,7 @@ mod tests { let seeders = swarm.metadata().seeders(); - swarm.remove_inactive(seeder.updated + Duration::from_secs(1)); + swarm.remove_inactive(seeder.updated + Duration::from_secs(1)).await; assert_eq!(swarm.metadata().seeders(), seeders - 1); } diff --git a/packages/torrent-repository/src/swarms.rs b/packages/torrent-repository/src/swarms.rs index 41123fd50..c74fec3ea 100644 --- a/packages/torrent-repository/src/swarms.rs +++ b/packages/torrent-repository/src/swarms.rs @@ -259,7 +259,7 @@ impl Swarms { /// /// This function returns an error if it fails to acquire the lock for any /// swarm handle. - pub async fn remove_inactive_peers(&self, current_cutoff: DurationSinceUnixEpoch) -> Result { + pub async fn remove_inactive_peers(&self, current_cutoff: DurationSinceUnixEpoch) -> Result { tracing::info!( "Removing inactive peers since: {:?} ...", convert_from_timestamp_to_datetime_utc(current_cutoff) @@ -269,7 +269,7 @@ impl Swarms { for swarm_handle in &self.swarms { let mut swarm = swarm_handle.value().lock().await; - let removed = swarm.remove_inactive(current_cutoff); + let removed = swarm.remove_inactive(current_cutoff).await; inactive_peers_removed += removed; } diff --git a/packages/torrent-repository/tests/swarm/mod.rs b/packages/torrent-repository/tests/swarm/mod.rs index 1f5d0b737..f7ae4b439 100644 --- a/packages/torrent-repository/tests/swarm/mod.rs +++ b/packages/torrent-repository/tests/swarm/mod.rs @@ -390,7 +390,7 @@ async fn it_should_remove_inactive_peers_beyond_cutoff(#[values(swarm())] mut sw assert_eq!(swarm.len(), peers.len() + 1); let current_cutoff = CurrentClock::now_sub(&TIMEOUT).unwrap_or_default(); - swarm.remove_inactive(current_cutoff); + swarm.remove_inactive(current_cutoff).await; assert_eq!(swarm.len(), peers.len()); } From 269d27398975df921a846770e58b4d0a5bfde256 Mon Sep 17 00:00:00 2001 From: Jose Celano Date: Tue, 13 May 2025 20:14:31 +0100 Subject: [PATCH 603/802] refactor: [#1358] rename metric From `TORRENT_REPOSITORY_RUNTIME_TORRENTS_DOWNLOADS_TOTAL` to `TORRENT_REPOSITORY_TORRENTS_DOWNLOADS_TOTAL`. None of the metrics in the `torrent-repositry` package will be persisted. We can use the `persitent` sufix for metrics in other packages to avoid conflicts. It's planned to use the same metric in the `tracker-core` package but with the historial persited value. --- packages/torrent-repository/src/statistics/mod.rs | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/packages/torrent-repository/src/statistics/mod.rs b/packages/torrent-repository/src/statistics/mod.rs index f1507b7bb..941d619e9 100644 --- a/packages/torrent-repository/src/statistics/mod.rs +++ b/packages/torrent-repository/src/statistics/mod.rs @@ -8,7 +8,7 @@ use torrust_tracker_metrics::metric_name; use torrust_tracker_metrics::unit::Unit; const TORRENT_REPOSITORY_TORRENTS_TOTAL: &str = "torrent_repository_torrents_total"; -const TORRENT_REPOSITORY_RUNTIME_TORRENTS_DOWNLOADS_TOTAL: &str = "torrent_repository_runtime_torrents_downloads_total"; +const TORRENT_REPOSITORY_TORRENTS_DOWNLOADS_TOTAL: &str = "torrent_repository_torrents_downloads_total"; #[must_use] pub fn describe_metrics() -> Metrics { @@ -21,7 +21,7 @@ pub fn describe_metrics() -> Metrics { ); metrics.metric_collection.describe_counter( - &metric_name!(TORRENT_REPOSITORY_RUNTIME_TORRENTS_DOWNLOADS_TOTAL), + &metric_name!(TORRENT_REPOSITORY_TORRENTS_DOWNLOADS_TOTAL), Some(Unit::Count), Some(&MetricDescription::new( "The total number of torrent downloads since the tracker process started.", From 01a9970256c1c4f76ee9efb1bfb5faa886c7fd3d Mon Sep 17 00:00:00 2001 From: Jose Celano Date: Wed, 14 May 2025 07:40:22 +0100 Subject: [PATCH 604/802] feat: [#1358] new metric in torrent-repository: total number of peers You can tested it manually with: ``` cargo run -p torrust-tracker-client --bin udp_tracker_client announce udp://127.0.0.1:6969 443c7602b4fde83d1154d6d9da48808418b181b6 | jq curl -s "http://localhost:1212/api/v1/metrics?token=MyAccessToken&format=prometheus" | grep torrent_repository_peers_total Finished `dev` profile [optimized + debuginfo] target(s) in 0.10s Running `target/debug/udp_tracker_client announce 'udp://127.0.0.1:6969' 443c7602b4fde83d1154d6d9da48808418b181b6` { "AnnounceIpv4": { "transaction_id": -888840697, "announce_interval": 120, "leechers": 0, "seeders": 1, "peers": [] } } torrent_repository_peers_total{peer_role="seeder"} 1 ``` --- packages/torrent-repository/src/event.rs | 10 ++-- .../src/statistics/event/handler.rs | 51 ++++++++++++++----- .../torrent-repository/src/statistics/mod.rs | 16 ++++++ packages/torrent-repository/src/swarm.rs | 24 ++------- 4 files changed, 61 insertions(+), 40 deletions(-) diff --git a/packages/torrent-repository/src/event.rs b/packages/torrent-repository/src/event.rs index 1184714ae..fecb8cd1d 100644 --- a/packages/torrent-repository/src/event.rs +++ b/packages/torrent-repository/src/event.rs @@ -1,8 +1,5 @@ -use std::net::SocketAddr; - -use aquatic_udp_protocol::PeerId; use bittorrent_primitives::info_hash::InfoHash; -use torrust_tracker_primitives::peer::PeerAnnouncement; +use torrust_tracker_primitives::peer::{Peer, PeerAnnouncement}; #[derive(Debug, PartialEq, Eq, Clone)] pub enum Event { @@ -14,11 +11,10 @@ pub enum Event { info_hash: InfoHash, }, PeerAdded { - announcement: PeerAnnouncement, + peer: Peer, }, PeerRemoved { - peer_addr: SocketAddr, - peer_id: PeerId, + peer: Peer, }, } diff --git a/packages/torrent-repository/src/statistics/event/handler.rs b/packages/torrent-repository/src/statistics/event/handler.rs index 8022102d9..e869e7c1a 100644 --- a/packages/torrent-repository/src/statistics/event/handler.rs +++ b/packages/torrent-repository/src/statistics/event/handler.rs @@ -1,17 +1,17 @@ use std::sync::Arc; -use torrust_tracker_metrics::label::LabelSet; -use torrust_tracker_metrics::metric_name; +use torrust_tracker_metrics::label::{LabelSet, LabelValue}; +use torrust_tracker_metrics::{label_name, metric_name}; use torrust_tracker_primitives::DurationSinceUnixEpoch; use crate::event::Event; use crate::statistics::repository::Repository; -use crate::statistics::TORRENT_REPOSITORY_TORRENTS_TOTAL; +use crate::statistics::{TORRENT_REPOSITORY_PEERS_TOTAL, TORRENT_REPOSITORY_TORRENTS_TOTAL}; pub async fn handle_event(event: Event, stats_repository: &Arc, now: DurationSinceUnixEpoch) { match event { Event::TorrentAdded { info_hash, .. } => { - tracing::debug!("Torrent added {info_hash}"); + tracing::debug!(info_hash = ?info_hash, "Torrent added",); match stats_repository .increment_gauge(&metric_name!(TORRENT_REPOSITORY_TORRENTS_TOTAL), &LabelSet::default(), now) @@ -22,7 +22,7 @@ pub async fn handle_event(event: Event, stats_repository: &Arc, now: }; } Event::TorrentRemoved { info_hash } => { - tracing::debug!("Torrent removed {info_hash}"); + tracing::debug!(info_hash = ?info_hash, "Torrent removed",); match stats_repository .decrement_gauge(&metric_name!(TORRENT_REPOSITORY_TORRENTS_TOTAL), &LabelSet::default(), now) @@ -32,16 +32,39 @@ pub async fn handle_event(event: Event, stats_repository: &Arc, now: Err(err) => tracing::error!("Failed to decrement the gauge: {}", err), }; } - Event::PeerAdded { announcement } => { - // todo: update metrics - tracing::debug!("Peer added {announcement:?}"); + Event::PeerAdded { peer } => { + tracing::debug!(peer = ?peer, "Peer added", ); + + let label_set: LabelSet = if peer.is_seeder() { + (label_name!("peer_role"), LabelValue::new("seeder")).into() + } else { + (label_name!("peer_role"), LabelValue::new("leecher")).into() + }; + + match stats_repository + .increment_gauge(&metric_name!(TORRENT_REPOSITORY_PEERS_TOTAL), &label_set, now) + .await + { + Ok(()) => {} + Err(err) => tracing::error!("Failed to increment the gauge: {}", err), + }; } - Event::PeerRemoved { - peer_addr: socket_addr, - peer_id, - } => { - // todo: update metrics - tracing::debug!("Peer removed: socket address {socket_addr:?}, peer ID: {peer_id:?}"); + Event::PeerRemoved { peer } => { + tracing::debug!(peer = ?peer, "Peer removed", ); + + let label_set: LabelSet = if peer.is_seeder() { + (label_name!("peer_role"), LabelValue::new("seeder")).into() + } else { + (label_name!("peer_role"), LabelValue::new("leecher")).into() + }; + + match stats_repository + .decrement_gauge(&metric_name!(TORRENT_REPOSITORY_PEERS_TOTAL), &label_set, now) + .await + { + Ok(()) => {} + Err(err) => tracing::error!("Failed to decrement the gauge: {}", err), + }; } } } diff --git a/packages/torrent-repository/src/statistics/mod.rs b/packages/torrent-repository/src/statistics/mod.rs index 941d619e9..4deaf19cb 100644 --- a/packages/torrent-repository/src/statistics/mod.rs +++ b/packages/torrent-repository/src/statistics/mod.rs @@ -7,13 +7,21 @@ use torrust_tracker_metrics::metric::description::MetricDescription; use torrust_tracker_metrics::metric_name; use torrust_tracker_metrics::unit::Unit; +// Torrent metrics + const TORRENT_REPOSITORY_TORRENTS_TOTAL: &str = "torrent_repository_torrents_total"; const TORRENT_REPOSITORY_TORRENTS_DOWNLOADS_TOTAL: &str = "torrent_repository_torrents_downloads_total"; +// Peers metrics + +const TORRENT_REPOSITORY_PEERS_TOTAL: &str = "torrent_repository_peers_total"; + #[must_use] pub fn describe_metrics() -> Metrics { let mut metrics = Metrics::default(); + // Torrent metrics + metrics.metric_collection.describe_gauge( &metric_name!(TORRENT_REPOSITORY_TORRENTS_TOTAL), Some(Unit::Count), @@ -28,5 +36,13 @@ pub fn describe_metrics() -> Metrics { )), ); + // Peers metrics + + metrics.metric_collection.describe_gauge( + &metric_name!(TORRENT_REPOSITORY_PEERS_TOTAL), + Some(Unit::Count), + Some(&MetricDescription::new("The total number of peers.")), + ); + metrics } diff --git a/packages/torrent-repository/src/swarm.rs b/packages/torrent-repository/src/swarm.rs index 32785cada..9832d8b2a 100644 --- a/packages/torrent-repository/src/swarm.rs +++ b/packages/torrent-repository/src/swarm.rs @@ -117,11 +117,7 @@ impl Swarm { } if let Some(event_sender) = self.event_sender.as_deref() { - event_sender - .send(Event::PeerAdded { - announcement: *announcement, - }) - .await; + event_sender.send(Event::PeerAdded { peer: *announcement }).await; } None @@ -141,12 +137,7 @@ impl Swarm { } if let Some(event_sender) = self.event_sender.as_deref() { - event_sender - .send(Event::PeerRemoved { - peer_addr: old_peer.peer_addr, - peer_id: old_peer.peer_id, - }) - .await; + event_sender.send(Event::PeerRemoved { peer: *old_peer.clone() }).await; } Some(old_peer) @@ -175,7 +166,7 @@ impl Swarm { if let Some(_event_sender) = self.event_sender.as_deref() { // Events can not be trigger here because retain does not allow // async closures. - removed_peers.push((peer.peer_addr, peer.peer_id)); + removed_peers.push(*peer.clone()); } } @@ -183,13 +174,8 @@ impl Swarm { }); if let Some(event_sender) = self.event_sender.as_deref() { - for (peer_addr, peer_id) in &removed_peers { - event_sender - .send(Event::PeerRemoved { - peer_addr: *peer_addr, - peer_id: *peer_id, - }) - .await; + for peer in &removed_peers { + event_sender.send(Event::PeerRemoved { peer: *peer }).await; } } From daba8a07ae957c927a6d45591549a10b12a9a582 Mon Sep 17 00:00:00 2001 From: Jose Celano Date: Wed, 14 May 2025 12:13:38 +0100 Subject: [PATCH 605/802] feat: [#1358] new metric in torrent-repository: total number of downloads --- packages/primitives/src/peer.rs | 42 +++++++++++ packages/torrent-repository/Cargo.toml | 2 +- packages/torrent-repository/src/event.rs | 7 ++ .../src/statistics/event/handler.rs | 72 +++++++++++++++---- packages/torrent-repository/src/swarm.rs | 15 +++- 5 files changed, 122 insertions(+), 16 deletions(-) diff --git a/packages/primitives/src/peer.rs b/packages/primitives/src/peer.rs index bd753b220..316541ad6 100644 --- a/packages/primitives/src/peer.rs +++ b/packages/primitives/src/peer.rs @@ -27,6 +27,7 @@ use std::ops::{Deref, DerefMut}; use std::sync::Arc; use aquatic_udp_protocol::{AnnounceEvent, NumberOfBytes, PeerId}; +use derive_more::Display; use serde::Serialize; use zerocopy::FromBytes as _; @@ -34,6 +35,24 @@ use crate::DurationSinceUnixEpoch; pub type PeerAnnouncement = Peer; +#[derive(Debug, Display, Serialize, Copy, Clone, PartialEq, Eq, Hash)] +#[serde(rename_all_fields = "lowercase")] +pub enum PeerRole { + Seeder, + Leecher, +} + +impl PeerRole { + /// Returns the opposite role: Seeder becomes Leecher, and vice versa. + #[must_use] + pub fn opposite(self) -> Self { + match self { + PeerRole::Seeder => PeerRole::Leecher, + PeerRole::Leecher => PeerRole::Seeder, + } + } +} + /// Peer struct used by the core `Tracker`. /// /// A sample peer: @@ -147,6 +166,7 @@ impl PartialOrd for Peer { pub trait ReadInfo { fn is_seeder(&self) -> bool; + fn is_leecher(&self) -> bool; fn get_event(&self) -> AnnounceEvent; fn get_id(&self) -> PeerId; fn get_updated(&self) -> DurationSinceUnixEpoch; @@ -158,6 +178,10 @@ impl ReadInfo for Peer { self.left.0.get() <= 0 && self.event != AnnounceEvent::Stopped } + fn is_leecher(&self) -> bool { + !self.is_seeder() + } + fn get_event(&self) -> AnnounceEvent { self.event } @@ -180,6 +204,10 @@ impl ReadInfo for Arc { self.left.0.get() <= 0 && self.event != AnnounceEvent::Stopped } + fn is_leecher(&self) -> bool { + !self.is_seeder() + } + fn get_event(&self) -> AnnounceEvent { self.event } @@ -203,6 +231,20 @@ impl Peer { self.left.0.get() <= 0 && self.event != AnnounceEvent::Stopped } + #[must_use] + pub fn is_leecher(&self) -> bool { + !self.is_seeder() + } + + #[must_use] + pub fn role(&self) -> PeerRole { + if self.is_seeder() { + PeerRole::Seeder + } else { + PeerRole::Leecher + } + } + pub fn ip(&mut self) -> IpAddr { self.peer_addr.ip() } diff --git a/packages/torrent-repository/Cargo.toml b/packages/torrent-repository/Cargo.toml index 1c7cc09fe..26662b583 100644 --- a/packages/torrent-repository/Cargo.toml +++ b/packages/torrent-repository/Cargo.toml @@ -20,7 +20,7 @@ aquatic_udp_protocol = "0" bittorrent-primitives = "0.1.0" crossbeam-skiplist = "0" futures = "0" -serde = "1.0.219" +serde = { version = "1.0.219", features = ["derive"] } thiserror = "2.0.12" tokio = { version = "1", features = ["macros", "net", "rt-multi-thread", "signal", "sync"] } torrust-tracker-clock = { version = "3.0.0-develop", path = "../clock" } diff --git a/packages/torrent-repository/src/event.rs b/packages/torrent-repository/src/event.rs index fecb8cd1d..69d35141f 100644 --- a/packages/torrent-repository/src/event.rs +++ b/packages/torrent-repository/src/event.rs @@ -16,6 +16,13 @@ pub enum Event { PeerRemoved { peer: Peer, }, + PeerUpdated { + old_peer: Peer, + new_peer: Peer, + }, + PeerDownloadCompleted { + peer: Peer, + }, } pub mod sender { diff --git a/packages/torrent-repository/src/statistics/event/handler.rs b/packages/torrent-repository/src/statistics/event/handler.rs index e869e7c1a..5bf4a2f84 100644 --- a/packages/torrent-repository/src/statistics/event/handler.rs +++ b/packages/torrent-repository/src/statistics/event/handler.rs @@ -2,11 +2,14 @@ use std::sync::Arc; use torrust_tracker_metrics::label::{LabelSet, LabelValue}; use torrust_tracker_metrics::{label_name, metric_name}; +use torrust_tracker_primitives::peer::Peer; use torrust_tracker_primitives::DurationSinceUnixEpoch; use crate::event::Event; use crate::statistics::repository::Repository; -use crate::statistics::{TORRENT_REPOSITORY_PEERS_TOTAL, TORRENT_REPOSITORY_TORRENTS_TOTAL}; +use crate::statistics::{ + TORRENT_REPOSITORY_PEERS_TOTAL, TORRENT_REPOSITORY_TORRENTS_DOWNLOADS_TOTAL, TORRENT_REPOSITORY_TORRENTS_TOTAL, +}; pub async fn handle_event(event: Event, stats_repository: &Arc, now: DurationSinceUnixEpoch) { match event { @@ -35,14 +38,8 @@ pub async fn handle_event(event: Event, stats_repository: &Arc, now: Event::PeerAdded { peer } => { tracing::debug!(peer = ?peer, "Peer added", ); - let label_set: LabelSet = if peer.is_seeder() { - (label_name!("peer_role"), LabelValue::new("seeder")).into() - } else { - (label_name!("peer_role"), LabelValue::new("leecher")).into() - }; - match stats_repository - .increment_gauge(&metric_name!(TORRENT_REPOSITORY_PEERS_TOTAL), &label_set, now) + .increment_gauge(&metric_name!(TORRENT_REPOSITORY_PEERS_TOTAL), &label_set_for_peer(&peer), now) .await { Ok(()) => {} @@ -52,19 +49,66 @@ pub async fn handle_event(event: Event, stats_repository: &Arc, now: Event::PeerRemoved { peer } => { tracing::debug!(peer = ?peer, "Peer removed", ); - let label_set: LabelSet = if peer.is_seeder() { - (label_name!("peer_role"), LabelValue::new("seeder")).into() - } else { - (label_name!("peer_role"), LabelValue::new("leecher")).into() + match stats_repository + .decrement_gauge(&metric_name!(TORRENT_REPOSITORY_PEERS_TOTAL), &label_set_for_peer(&peer), now) + .await + { + Ok(()) => {} + Err(err) => tracing::error!("Failed to decrement the gauge: {}", err), }; + } + Event::PeerUpdated { old_peer, new_peer } => { + tracing::debug!(old_peer = ?old_peer, new_peer = ?new_peer, "Peer updated", ); + + if old_peer.role() != new_peer.role() { + match stats_repository + .increment_gauge( + &metric_name!(TORRENT_REPOSITORY_PEERS_TOTAL), + &(label_name!("peer_role"), LabelValue::new(&new_peer.role().to_string())).into(), + now, + ) + .await + { + Ok(()) => {} + Err(err) => tracing::error!("Failed to increment the gauge: {}", err), + } + + match stats_repository + .decrement_gauge( + &metric_name!(TORRENT_REPOSITORY_PEERS_TOTAL), + &(label_name!("peer_role"), LabelValue::new(&old_peer.role().to_string())).into(), + now, + ) + .await + { + Ok(()) => {} + Err(err) => tracing::error!("Failed to decrement the gauge: {}", err), + }; + } + } + Event::PeerDownloadCompleted { peer } => { + tracing::debug!(peer = ?peer, "Peer download completed", ); match stats_repository - .decrement_gauge(&metric_name!(TORRENT_REPOSITORY_PEERS_TOTAL), &label_set, now) + .increment_counter( + &metric_name!(TORRENT_REPOSITORY_TORRENTS_DOWNLOADS_TOTAL), + &label_set_for_peer(&peer), + now, + ) .await { Ok(()) => {} - Err(err) => tracing::error!("Failed to decrement the gauge: {}", err), + Err(err) => tracing::error!("Failed to increment the gauge: {}", err), }; } } } + +/// Returns the label set to be included in the metrics for the given peer. +fn label_set_for_peer(peer: &Peer) -> LabelSet { + if peer.is_seeder() { + (label_name!("peer_role"), LabelValue::new("seeder")).into() + } else { + (label_name!("peer_role"), LabelValue::new("leecher")).into() + } +} diff --git a/packages/torrent-repository/src/swarm.rs b/packages/torrent-repository/src/swarm.rs index 9832d8b2a..782726958 100644 --- a/packages/torrent-repository/src/swarm.rs +++ b/packages/torrent-repository/src/swarm.rs @@ -82,7 +82,7 @@ impl Swarm { if let Some(old_announce) = self.peers.insert(incoming_announce.peer_addr, incoming_announce) { // A peer has been updated in the swarm. - // Check if the peer has changed its from leecher to seeder or vice versa. + // Check if the peer has changed from leecher to seeder or vice versa. if old_announce.is_seeder() != is_now_seeder { if is_now_seeder { self.metadata.complete += 1; @@ -99,6 +99,19 @@ impl Swarm { *downloads_increased = true; } + if let Some(event_sender) = self.event_sender.as_deref() { + event_sender + .send(Event::PeerUpdated { + old_peer: *old_announce, + new_peer: *announcement, + }) + .await; + + if *downloads_increased { + event_sender.send(Event::PeerDownloadCompleted { peer: *announcement }).await; + } + } + Some(old_announce) } else { // A new peer has been added to the swarm. From c706a1b30915f660ec09a3c28bc4a4a841536a5c Mon Sep 17 00:00:00 2001 From: Jose Celano Date: Wed, 14 May 2025 12:24:14 +0100 Subject: [PATCH 606/802] refactor: [#1358] move logs --- .../src/statistics/event/handler.rs | 60 +++++-------------- .../src/statistics/repository.rs | 15 +++++ 2 files changed, 31 insertions(+), 44 deletions(-) diff --git a/packages/torrent-repository/src/statistics/event/handler.rs b/packages/torrent-repository/src/statistics/event/handler.rs index 5bf4a2f84..90df19ab6 100644 --- a/packages/torrent-repository/src/statistics/event/handler.rs +++ b/packages/torrent-repository/src/statistics/event/handler.rs @@ -16,90 +16,62 @@ pub async fn handle_event(event: Event, stats_repository: &Arc, now: Event::TorrentAdded { info_hash, .. } => { tracing::debug!(info_hash = ?info_hash, "Torrent added",); - match stats_repository + let _unused = stats_repository .increment_gauge(&metric_name!(TORRENT_REPOSITORY_TORRENTS_TOTAL), &LabelSet::default(), now) - .await - { - Ok(()) => {} - Err(err) => tracing::error!("Failed to increment the gauge: {}", err), - }; + .await; } Event::TorrentRemoved { info_hash } => { tracing::debug!(info_hash = ?info_hash, "Torrent removed",); - match stats_repository + let _unused = stats_repository .decrement_gauge(&metric_name!(TORRENT_REPOSITORY_TORRENTS_TOTAL), &LabelSet::default(), now) - .await - { - Ok(()) => {} - Err(err) => tracing::error!("Failed to decrement the gauge: {}", err), - }; + .await; } Event::PeerAdded { peer } => { tracing::debug!(peer = ?peer, "Peer added", ); - match stats_repository + let _unused = stats_repository .increment_gauge(&metric_name!(TORRENT_REPOSITORY_PEERS_TOTAL), &label_set_for_peer(&peer), now) - .await - { - Ok(()) => {} - Err(err) => tracing::error!("Failed to increment the gauge: {}", err), - }; + .await; } Event::PeerRemoved { peer } => { tracing::debug!(peer = ?peer, "Peer removed", ); - match stats_repository + let _unused = stats_repository .decrement_gauge(&metric_name!(TORRENT_REPOSITORY_PEERS_TOTAL), &label_set_for_peer(&peer), now) - .await - { - Ok(()) => {} - Err(err) => tracing::error!("Failed to decrement the gauge: {}", err), - }; + .await; } Event::PeerUpdated { old_peer, new_peer } => { tracing::debug!(old_peer = ?old_peer, new_peer = ?new_peer, "Peer updated", ); if old_peer.role() != new_peer.role() { - match stats_repository + let _unused = stats_repository .increment_gauge( &metric_name!(TORRENT_REPOSITORY_PEERS_TOTAL), - &(label_name!("peer_role"), LabelValue::new(&new_peer.role().to_string())).into(), + &label_set_for_peer(&new_peer), now, ) - .await - { - Ok(()) => {} - Err(err) => tracing::error!("Failed to increment the gauge: {}", err), - } + .await; - match stats_repository + let _unused = stats_repository .decrement_gauge( &metric_name!(TORRENT_REPOSITORY_PEERS_TOTAL), - &(label_name!("peer_role"), LabelValue::new(&old_peer.role().to_string())).into(), + &label_set_for_peer(&old_peer), now, ) - .await - { - Ok(()) => {} - Err(err) => tracing::error!("Failed to decrement the gauge: {}", err), - }; + .await; } } Event::PeerDownloadCompleted { peer } => { tracing::debug!(peer = ?peer, "Peer download completed", ); - match stats_repository + let _unused = stats_repository .increment_counter( &metric_name!(TORRENT_REPOSITORY_TORRENTS_DOWNLOADS_TOTAL), &label_set_for_peer(&peer), now, ) - .await - { - Ok(()) => {} - Err(err) => tracing::error!("Failed to increment the gauge: {}", err), - }; + .await; } } } diff --git a/packages/torrent-repository/src/statistics/repository.rs b/packages/torrent-repository/src/statistics/repository.rs index a8cb8549e..1e376faf7 100644 --- a/packages/torrent-repository/src/statistics/repository.rs +++ b/packages/torrent-repository/src/statistics/repository.rs @@ -49,6 +49,11 @@ impl Repository { drop(stats_lock); + match result { + Ok(()) => {} + Err(ref err) => tracing::error!("Failed to increment the counter: {}", err), + } + result } @@ -68,6 +73,11 @@ impl Repository { drop(stats_lock); + match result { + Ok(()) => {} + Err(ref err) => tracing::error!("Failed to increment the gauge: {}", err), + } + result } @@ -87,6 +97,11 @@ impl Repository { drop(stats_lock); + match result { + Ok(()) => {} + Err(ref err) => tracing::error!("Failed to decrement the gauge: {}", err), + } + result } } From 60c00e8bd575285f5c47e0cf8518574b527b6db7 Mon Sep 17 00:00:00 2001 From: Jose Celano Date: Wed, 14 May 2025 12:46:53 +0100 Subject: [PATCH 607/802] feat: [#1358] add info-hash to all torrent-repository events To know which swarm the event belongs to. --- packages/torrent-repository/src/event.rs | 4 + .../src/statistics/event/handler.rs | 20 ++-- packages/torrent-repository/src/swarm.rs | 106 +++++++++++------- packages/torrent-repository/src/swarms.rs | 11 +- .../torrent-repository/tests/swarm/mod.rs | 3 +- .../torrent-repository/tests/swarms/mod.rs | 22 ++-- 6 files changed, 104 insertions(+), 62 deletions(-) diff --git a/packages/torrent-repository/src/event.rs b/packages/torrent-repository/src/event.rs index 69d35141f..ac1c06637 100644 --- a/packages/torrent-repository/src/event.rs +++ b/packages/torrent-repository/src/event.rs @@ -11,16 +11,20 @@ pub enum Event { info_hash: InfoHash, }, PeerAdded { + info_hash: InfoHash, peer: Peer, }, PeerRemoved { + info_hash: InfoHash, peer: Peer, }, PeerUpdated { + info_hash: InfoHash, old_peer: Peer, new_peer: Peer, }, PeerDownloadCompleted { + info_hash: InfoHash, peer: Peer, }, } diff --git a/packages/torrent-repository/src/statistics/event/handler.rs b/packages/torrent-repository/src/statistics/event/handler.rs index 90df19ab6..d2783f9ba 100644 --- a/packages/torrent-repository/src/statistics/event/handler.rs +++ b/packages/torrent-repository/src/statistics/event/handler.rs @@ -27,22 +27,26 @@ pub async fn handle_event(event: Event, stats_repository: &Arc, now: .decrement_gauge(&metric_name!(TORRENT_REPOSITORY_TORRENTS_TOTAL), &LabelSet::default(), now) .await; } - Event::PeerAdded { peer } => { - tracing::debug!(peer = ?peer, "Peer added", ); + Event::PeerAdded { info_hash, peer } => { + tracing::debug!(info_hash = ?info_hash, peer = ?peer, "Peer added", ); let _unused = stats_repository .increment_gauge(&metric_name!(TORRENT_REPOSITORY_PEERS_TOTAL), &label_set_for_peer(&peer), now) .await; } - Event::PeerRemoved { peer } => { - tracing::debug!(peer = ?peer, "Peer removed", ); + Event::PeerRemoved { info_hash, peer } => { + tracing::debug!(info_hash = ?info_hash, peer = ?peer, "Peer removed", ); let _unused = stats_repository .decrement_gauge(&metric_name!(TORRENT_REPOSITORY_PEERS_TOTAL), &label_set_for_peer(&peer), now) .await; } - Event::PeerUpdated { old_peer, new_peer } => { - tracing::debug!(old_peer = ?old_peer, new_peer = ?new_peer, "Peer updated", ); + Event::PeerUpdated { + info_hash, + old_peer, + new_peer, + } => { + tracing::debug!(info_hash = ?info_hash, old_peer = ?old_peer, new_peer = ?new_peer, "Peer updated", ); if old_peer.role() != new_peer.role() { let _unused = stats_repository @@ -62,8 +66,8 @@ pub async fn handle_event(event: Event, stats_repository: &Arc, now: .await; } } - Event::PeerDownloadCompleted { peer } => { - tracing::debug!(peer = ?peer, "Peer download completed", ); + Event::PeerDownloadCompleted { info_hash, peer } => { + tracing::debug!(info_hash = ?info_hash, peer = ?peer, "Peer download completed", ); let _unused = stats_repository .increment_counter( diff --git a/packages/torrent-repository/src/swarm.rs b/packages/torrent-repository/src/swarm.rs index 782726958..3fe0e27d7 100644 --- a/packages/torrent-repository/src/swarm.rs +++ b/packages/torrent-repository/src/swarm.rs @@ -7,6 +7,7 @@ use std::net::SocketAddr; use std::sync::Arc; use aquatic_udp_protocol::AnnounceEvent; +use bittorrent_primitives::info_hash::InfoHash; use torrust_tracker_configuration::TrackerPolicy; use torrust_tracker_primitives::peer::{self, Peer, PeerAnnouncement}; use torrust_tracker_primitives::swarm_metadata::SwarmMetadata; @@ -15,8 +16,9 @@ use torrust_tracker_primitives::DurationSinceUnixEpoch; use crate::event::sender::Sender; use crate::event::Event; -#[derive(Clone, Default)] +#[derive(Clone)] pub struct Swarm { + info_hash: InfoHash, peers: BTreeMap>, metadata: SwarmMetadata, event_sender: Sender, @@ -49,8 +51,9 @@ impl Eq for Swarm {} impl Swarm { #[must_use] - pub fn new(downloaded: u32, event_sender: Sender) -> Self { + pub fn new(info_hash: &InfoHash, downloaded: u32, event_sender: Sender) -> Self { Self { + info_hash: *info_hash, peers: BTreeMap::new(), metadata: SwarmMetadata::new(downloaded, 0, 0), event_sender, @@ -102,13 +105,19 @@ impl Swarm { if let Some(event_sender) = self.event_sender.as_deref() { event_sender .send(Event::PeerUpdated { + info_hash: self.info_hash, old_peer: *old_announce, new_peer: *announcement, }) .await; if *downloads_increased { - event_sender.send(Event::PeerDownloadCompleted { peer: *announcement }).await; + event_sender + .send(Event::PeerDownloadCompleted { + info_hash: self.info_hash, + peer: *announcement, + }) + .await; } } @@ -130,7 +139,12 @@ impl Swarm { } if let Some(event_sender) = self.event_sender.as_deref() { - event_sender.send(Event::PeerAdded { peer: *announcement }).await; + event_sender + .send(Event::PeerAdded { + info_hash: self.info_hash, + peer: *announcement, + }) + .await; } None @@ -150,7 +164,12 @@ impl Swarm { } if let Some(event_sender) = self.event_sender.as_deref() { - event_sender.send(Event::PeerRemoved { peer: *old_peer.clone() }).await; + event_sender + .send(Event::PeerRemoved { + info_hash: self.info_hash, + peer: *old_peer.clone(), + }) + .await; } Some(old_peer) @@ -188,7 +207,12 @@ impl Swarm { if let Some(event_sender) = self.event_sender.as_deref() { for peer in &removed_peers { - event_sender.send(Event::PeerRemoved { peer: *peer }).await; + event_sender + .send(Event::PeerRemoved { + info_hash: self.info_hash, + peer: *peer, + }) + .await; } } @@ -302,24 +326,25 @@ mod tests { use torrust_tracker_primitives::DurationSinceUnixEpoch; use crate::swarm::Swarm; + use crate::tests::sample_info_hash; #[test] fn it_should_be_empty_when_no_peers_have_been_inserted() { - let swarm = Swarm::default(); + let swarm = Swarm::new(&sample_info_hash(), 0, None); assert!(swarm.is_empty()); } #[test] fn it_should_have_zero_length_when_no_peers_have_been_inserted() { - let swarm = Swarm::default(); + let swarm = Swarm::new(&sample_info_hash(), 0, None); assert_eq!(swarm.len(), 0); } #[tokio::test] async fn it_should_allow_inserting_a_new_peer() { - let mut swarm = Swarm::default(); + let mut swarm = Swarm::new(&sample_info_hash(), 0, None); let mut downloads_increased = false; let peer = PeerBuilder::default().build(); @@ -329,7 +354,7 @@ mod tests { #[tokio::test] async fn it_should_allow_updating_a_preexisting_peer() { - let mut swarm = Swarm::default(); + let mut swarm = Swarm::new(&sample_info_hash(), 0, None); let mut downloads_increased = false; let peer = PeerBuilder::default().build(); @@ -344,7 +369,7 @@ mod tests { #[tokio::test] async fn it_should_allow_getting_all_peers() { - let mut swarm = Swarm::default(); + let mut swarm = Swarm::new(&sample_info_hash(), 0, None); let mut downloads_increased = false; let peer = PeerBuilder::default().build(); @@ -356,7 +381,7 @@ mod tests { #[tokio::test] async fn it_should_allow_getting_one_peer_by_id() { - let mut swarm = Swarm::default(); + let mut swarm = Swarm::new(&sample_info_hash(), 0, None); let mut downloads_increased = false; let peer = PeerBuilder::default().build(); @@ -368,7 +393,7 @@ mod tests { #[tokio::test] async fn it_should_increase_the_number_of_peers_after_inserting_a_new_one() { - let mut swarm = Swarm::default(); + let mut swarm = Swarm::new(&sample_info_hash(), 0, None); let mut downloads_increased = false; let peer = PeerBuilder::default().build(); @@ -380,7 +405,7 @@ mod tests { #[tokio::test] async fn it_should_decrease_the_number_of_peers_after_removing_one() { - let mut swarm = Swarm::default(); + let mut swarm = Swarm::new(&sample_info_hash(), 0, None); let mut downloads_increased = false; let peer = PeerBuilder::default().build(); @@ -394,7 +419,7 @@ mod tests { #[tokio::test] async fn it_should_allow_removing_an_existing_peer() { - let mut swarm = Swarm::default(); + let mut swarm = Swarm::new(&sample_info_hash(), 0, None); let mut downloads_increased = false; let peer = PeerBuilder::default().build(); @@ -409,7 +434,7 @@ mod tests { #[tokio::test] async fn it_should_allow_removing_a_non_existing_peer() { - let mut swarm = Swarm::default(); + let mut swarm = Swarm::new(&sample_info_hash(), 0, None); let peer = PeerBuilder::default().build(); @@ -418,7 +443,7 @@ mod tests { #[tokio::test] async fn it_should_allow_getting_all_peers_excluding_peers_with_a_given_address() { - let mut swarm = Swarm::default(); + let mut swarm = Swarm::new(&sample_info_hash(), 0, None); let mut downloads_increased = false; let peer1 = PeerBuilder::default() @@ -438,7 +463,7 @@ mod tests { #[tokio::test] async fn it_should_remove_inactive_peers() { - let mut swarm = Swarm::default(); + let mut swarm = Swarm::new(&sample_info_hash(), 0, None); let mut downloads_increased = false; let one_second = DurationSinceUnixEpoch::new(1, 0); @@ -455,7 +480,7 @@ mod tests { #[tokio::test] async fn it_should_not_remove_active_peers() { - let mut swarm = Swarm::default(); + let mut swarm = Swarm::new(&sample_info_hash(), 0, None); let mut downloads_increased = false; let one_second = DurationSinceUnixEpoch::new(1, 0); @@ -475,20 +500,21 @@ mod tests { use torrust_tracker_configuration::TrackerPolicy; use torrust_tracker_primitives::peer::fixture::PeerBuilder; + use crate::tests::sample_info_hash; use crate::Swarm; fn empty_swarm() -> Swarm { - Swarm::default() + Swarm::new(&sample_info_hash(), 0, None) } async fn not_empty_swarm() -> Swarm { - let mut swarm = Swarm::default(); + let mut swarm = Swarm::new(&sample_info_hash(), 0, None); swarm.upsert_peer(PeerBuilder::default().build().into(), &mut false).await; swarm } async fn not_empty_swarm_with_downloads() -> Swarm { - let mut swarm = Swarm::default(); + let mut swarm = Swarm::new(&sample_info_hash(), 0, None); let mut peer = PeerBuilder::leecher().build(); let mut downloads_increased = false; @@ -571,7 +597,7 @@ mod tests { #[tokio::test] async fn it_should_allow_inserting_two_identical_peers_except_for_the_socket_address() { - let mut swarm = Swarm::default(); + let mut swarm = Swarm::new(&sample_info_hash(), 0, None); let mut downloads_increased = false; let peer1 = PeerBuilder::default() @@ -589,7 +615,7 @@ mod tests { #[tokio::test] async fn it_should_not_allow_inserting_two_peers_with_different_peer_id_but_the_same_socket_address() { - let mut swarm = Swarm::default(); + let mut swarm = Swarm::new(&sample_info_hash(), 0, None); let mut downloads_increased = false; // When that happens the peer ID will be changed in the swarm. @@ -612,7 +638,7 @@ mod tests { #[tokio::test] async fn it_should_return_the_metadata() { - let mut swarm = Swarm::default(); + let mut swarm = Swarm::new(&sample_info_hash(), 0, None); let mut downloads_increased = false; let seeder = PeerBuilder::seeder().build(); @@ -633,7 +659,7 @@ mod tests { #[tokio::test] async fn it_should_return_the_number_of_seeders_in_the_list() { - let mut swarm = Swarm::default(); + let mut swarm = Swarm::new(&sample_info_hash(), 0, None); let mut downloads_increased = false; let seeder = PeerBuilder::seeder().build(); @@ -649,7 +675,7 @@ mod tests { #[tokio::test] async fn it_should_return_the_number_of_leechers_in_the_list() { - let mut swarm = Swarm::default(); + let mut swarm = Swarm::new(&sample_info_hash(), 0, None); let mut downloads_increased = false; let seeder = PeerBuilder::seeder().build(); @@ -669,10 +695,11 @@ mod tests { use torrust_tracker_primitives::peer::fixture::PeerBuilder; use crate::swarm::Swarm; + use crate::tests::sample_info_hash; #[tokio::test] async fn it_should_increase_the_number_of_leechers_if_the_new_peer_is_a_leecher_() { - let mut swarm = Swarm::default(); + let mut swarm = Swarm::new(&sample_info_hash(), 0, None); let mut downloads_increased = false; let leechers = swarm.metadata().leechers(); @@ -686,7 +713,7 @@ mod tests { #[tokio::test] async fn it_should_increase_the_number_of_seeders_if_the_new_peer_is_a_seeder() { - let mut swarm = Swarm::default(); + let mut swarm = Swarm::new(&sample_info_hash(), 0, None); let mut downloads_increased = false; let seeders = swarm.metadata().seeders(); @@ -701,7 +728,7 @@ mod tests { #[tokio::test] async fn it_should_not_increasing_the_number_of_downloads_if_the_new_peer_has_completed_downloading_as_it_was_not_previously_known( ) { - let mut swarm = Swarm::default(); + let mut swarm = Swarm::new(&sample_info_hash(), 0, None); let mut downloads_increased = false; let downloads = swarm.metadata().downloads(); @@ -718,10 +745,11 @@ mod tests { use torrust_tracker_primitives::peer::fixture::PeerBuilder; use crate::swarm::Swarm; + use crate::tests::sample_info_hash; #[tokio::test] async fn it_should_decrease_the_number_of_leechers_if_the_removed_peer_was_a_leecher() { - let mut swarm = Swarm::default(); + let mut swarm = Swarm::new(&sample_info_hash(), 0, None); let mut downloads_increased = false; let leecher = PeerBuilder::leecher().build(); @@ -737,7 +765,7 @@ mod tests { #[tokio::test] async fn it_should_decrease_the_number_of_seeders_if_the_removed_peer_was_a_seeder() { - let mut swarm = Swarm::default(); + let mut swarm = Swarm::new(&sample_info_hash(), 0, None); let mut downloads_increased = false; let seeder = PeerBuilder::seeder().build(); @@ -758,10 +786,11 @@ mod tests { use torrust_tracker_primitives::peer::fixture::PeerBuilder; use crate::swarm::Swarm; + use crate::tests::sample_info_hash; #[tokio::test] async fn it_should_decrease_the_number_of_leechers_when_a_removed_peer_is_a_leecher() { - let mut swarm = Swarm::default(); + let mut swarm = Swarm::new(&sample_info_hash(), 0, None); let mut downloads_increased = false; let leecher = PeerBuilder::leecher().build(); @@ -777,7 +806,7 @@ mod tests { #[tokio::test] async fn it_should_decrease_the_number_of_seeders_when_the_removed_peer_is_a_seeder() { - let mut swarm = Swarm::default(); + let mut swarm = Swarm::new(&sample_info_hash(), 0, None); let mut downloads_increased = false; let seeder = PeerBuilder::seeder().build(); @@ -797,10 +826,11 @@ mod tests { use torrust_tracker_primitives::peer::fixture::PeerBuilder; use crate::swarm::Swarm; + use crate::tests::sample_info_hash; #[tokio::test] async fn it_should_increase_seeders_and_decreasing_leechers_when_the_peer_changes_from_leecher_to_seeder_() { - let mut swarm = Swarm::default(); + let mut swarm = Swarm::new(&sample_info_hash(), 0, None); let mut downloads_increased = false; let mut peer = PeerBuilder::leecher().build(); @@ -820,7 +850,7 @@ mod tests { #[tokio::test] async fn it_should_increase_leechers_and_decreasing_seeders_when_the_peer_changes_from_seeder_to_leecher() { - let mut swarm = Swarm::default(); + let mut swarm = Swarm::new(&sample_info_hash(), 0, None); let mut downloads_increased = false; let mut peer = PeerBuilder::seeder().build(); @@ -840,7 +870,7 @@ mod tests { #[tokio::test] async fn it_should_increase_the_number_of_downloads_when_the_peer_announces_completed_downloading() { - let mut swarm = Swarm::default(); + let mut swarm = Swarm::new(&sample_info_hash(), 0, None); let mut downloads_increased = false; let mut peer = PeerBuilder::leecher().build(); @@ -858,7 +888,7 @@ mod tests { #[tokio::test] async fn it_should_not_increasing_the_number_of_downloads_when_the_peer_announces_completed_downloading_twice_() { - let mut swarm = Swarm::default(); + let mut swarm = Swarm::new(&sample_info_hash(), 0, None); let mut downloads_increased = false; let mut peer = PeerBuilder::leecher().build(); diff --git a/packages/torrent-repository/src/swarms.rs b/packages/torrent-repository/src/swarms.rs index c74fec3ea..3200d77ff 100644 --- a/packages/torrent-repository/src/swarms.rs +++ b/packages/torrent-repository/src/swarms.rs @@ -58,11 +58,10 @@ impl Swarms { ) -> Result { let swarm_handle = match self.swarms.get(info_hash) { None => { - let new_swarm_handle = if let Some(number_of_downloads) = opt_persistent_torrent { - SwarmHandle::new(Swarm::new(number_of_downloads, self.event_sender.clone()).into()) - } else { - SwarmHandle::default() - }; + let number_of_downloads = opt_persistent_torrent.unwrap_or_default(); + + let new_swarm_handle = + SwarmHandle::new(Swarm::new(info_hash, number_of_downloads, self.event_sender.clone()).into()); let new_swarm_handle = self.swarms.get_or_insert(*info_hash, new_swarm_handle); @@ -330,7 +329,7 @@ impl Swarms { continue; } - let entry = SwarmHandle::new(Swarm::new(*completed, self.event_sender.clone()).into()); + let entry = SwarmHandle::new(Swarm::new(info_hash, *completed, self.event_sender.clone()).into()); // Since SkipMap is lock-free the torrent could have been inserted // after checking if it exists. diff --git a/packages/torrent-repository/tests/swarm/mod.rs b/packages/torrent-repository/tests/swarm/mod.rs index f7ae4b439..cb4009ba9 100644 --- a/packages/torrent-repository/tests/swarm/mod.rs +++ b/packages/torrent-repository/tests/swarm/mod.rs @@ -3,6 +3,7 @@ use std::ops::Sub; use std::time::Duration; use aquatic_udp_protocol::{AnnounceEvent, NumberOfBytes}; +use bittorrent_primitives::info_hash::InfoHash; use rstest::{fixture, rstest}; use torrust_tracker_clock::clock::stopped::Stopped as _; use torrust_tracker_clock::clock::{self, Time as _}; @@ -16,7 +17,7 @@ use crate::CurrentClock; #[fixture] fn swarm() -> Swarm { - Swarm::default() + Swarm::new(&InfoHash::default(), 0, None) } #[fixture] diff --git a/packages/torrent-repository/tests/swarms/mod.rs b/packages/torrent-repository/tests/swarms/mod.rs index d8ee354c8..780d6cd4c 100644 --- a/packages/torrent-repository/tests/swarms/mod.rs +++ b/packages/torrent-repository/tests/swarms/mod.rs @@ -14,6 +14,10 @@ use torrust_tracker_torrent_repository::Swarms; use crate::common::torrent_peer_builder::{a_completed_peer, a_started_peer}; +fn swarm() -> Swarm { + Swarm::new(&InfoHash::default(), 0, None) +} + #[fixture] fn swarms() -> Swarms { Swarms::default() @@ -28,26 +32,26 @@ fn empty() -> Entries { #[fixture] fn default() -> Entries { - vec![(InfoHash::default(), Swarm::default())] + vec![(InfoHash::default(), swarm())] } #[fixture] async fn started() -> Entries { - let mut swarm = Swarm::default(); + let mut swarm = swarm(); swarm.handle_announcement(&a_started_peer(1)).await; vec![(InfoHash::default(), swarm)] } #[fixture] async fn completed() -> Entries { - let mut swarm = Swarm::default(); + let mut swarm = swarm(); swarm.handle_announcement(&a_completed_peer(2)).await; vec![(InfoHash::default(), swarm)] } #[fixture] async fn downloaded() -> Entries { - let mut swarm = Swarm::default(); + let mut swarm = swarm(); let mut peer = a_started_peer(3); swarm.handle_announcement(&peer).await; peer.event = AnnounceEvent::Completed; @@ -58,17 +62,17 @@ async fn downloaded() -> Entries { #[fixture] async fn three() -> Entries { - let mut started = Swarm::default(); + let mut started = swarm(); let started_h = &mut DefaultHasher::default(); started.handle_announcement(&a_started_peer(1)).await; started.hash(started_h); - let mut completed = Swarm::default(); + let mut completed = swarm(); let completed_h = &mut DefaultHasher::default(); completed.handle_announcement(&a_completed_peer(2)).await; completed.hash(completed_h); - let mut downloaded = Swarm::default(); + let mut downloaded = swarm(); let downloaded_h = &mut DefaultHasher::default(); let mut downloaded_peer = a_started_peer(3); downloaded.handle_announcement(&downloaded_peer).await; @@ -89,7 +93,7 @@ async fn many_out_of_order() -> Entries { let mut entries: HashSet<(InfoHash, Swarm)> = HashSet::default(); for i in 0..408 { - let mut entry = Swarm::default(); + let mut entry = swarm(); entry.handle_announcement(&a_started_peer(i)).await; entries.insert((InfoHash::from(&i), entry)); @@ -104,7 +108,7 @@ async fn many_hashed_in_order() -> Entries { let mut entries: BTreeMap = BTreeMap::default(); for i in 0..408 { - let mut entry = Swarm::default(); + let mut entry = swarm(); entry.handle_announcement(&a_started_peer(i)).await; let hash: &mut DefaultHasher = &mut DefaultHasher::default(); From dfba00c7c2fe641e486d2fbeb023cd099db3e567 Mon Sep 17 00:00:00 2001 From: Jose Celano Date: Wed, 14 May 2025 13:15:14 +0100 Subject: [PATCH 608/802] feat: [#1358] allow disabling the event sender in the torrent-repository pkg --- .../src/environment.rs | 4 +- .../axum-http-tracker-server/src/server.rs | 6 ++- .../src/v1/handlers/announce.rs | 2 +- .../src/v1/handlers/scrape.rs | 2 +- .../src/environment.rs | 4 +- packages/events/src/bus.rs | 46 ++++++++++++++----- .../http-tracker-core/benches/helpers/util.rs | 2 +- packages/http-tracker-core/src/container.rs | 6 ++- .../src/services/announce.rs | 2 +- .../http-tracker-core/src/services/scrape.rs | 6 ++- .../src/statistics/services.rs | 2 +- .../rest-tracker-api-core/src/container.rs | 4 +- .../src/statistics/services.rs | 2 +- packages/torrent-repository/src/container.rs | 7 +-- .../udp-tracker-core/benches/helpers/sync.rs | 3 +- packages/udp-tracker-core/src/container.rs | 6 ++- .../udp-tracker-core/src/services/connect.rs | 7 +-- packages/udp-tracker-server/src/container.rs | 2 +- .../udp-tracker-server/src/environment.rs | 4 +- .../src/handlers/announce.rs | 14 ++++-- .../src/handlers/connect.rs | 22 ++++++--- .../udp-tracker-server/src/handlers/mod.rs | 8 +++- .../udp-tracker-server/src/handlers/scrape.rs | 3 +- src/container.rs | 4 +- 24 files changed, 118 insertions(+), 50 deletions(-) diff --git a/packages/axum-http-tracker-server/src/environment.rs b/packages/axum-http-tracker-server/src/environment.rs index 078bda9e5..10dada2db 100644 --- a/packages/axum-http-tracker-server/src/environment.rs +++ b/packages/axum-http-tracker-server/src/environment.rs @@ -144,7 +144,9 @@ impl EnvContainer { .expect("missing HTTP tracker configuration"); let http_tracker_config = Arc::new(http_tracker_config[0].clone()); - let torrent_repository_container = Arc::new(TorrentRepositoryContainer::initialize()); + let torrent_repository_container = Arc::new(TorrentRepositoryContainer::initialize( + configuration.core.tracker_usage_statistics.into(), + )); let tracker_core_container = Arc::new(TrackerCoreContainer::initialize_from( &core_config, diff --git a/packages/axum-http-tracker-server/src/server.rs b/packages/axum-http-tracker-server/src/server.rs index 3904449fa..f7d1ed7ea 100644 --- a/packages/axum-http-tracker-server/src/server.rs +++ b/packages/axum-http-tracker-server/src/server.rs @@ -280,7 +280,7 @@ mod tests { let http_core_broadcaster = Broadcaster::default(); let http_stats_repository = Arc::new(Repository::new()); let http_stats_event_bus = Arc::new(EventBus::new( - configuration.core.tracker_usage_statistics, + configuration.core.tracker_usage_statistics.into(), http_core_broadcaster.clone(), )); @@ -290,7 +290,9 @@ mod tests { let _unused = run_event_listener(http_stats_event_bus.receiver(), &http_stats_repository); } - let torrent_repository_container = Arc::new(TorrentRepositoryContainer::initialize()); + let torrent_repository_container = Arc::new(TorrentRepositoryContainer::initialize( + configuration.core.tracker_usage_statistics.into(), + )); let tracker_core_container = Arc::new(TrackerCoreContainer::initialize_from( &core_config, diff --git a/packages/axum-http-tracker-server/src/v1/handlers/announce.rs b/packages/axum-http-tracker-server/src/v1/handlers/announce.rs index 7489211a9..7d7a0b386 100644 --- a/packages/axum-http-tracker-server/src/v1/handlers/announce.rs +++ b/packages/axum-http-tracker-server/src/v1/handlers/announce.rs @@ -168,7 +168,7 @@ mod tests { let http_core_broadcaster = Broadcaster::default(); let http_stats_repository = Arc::new(Repository::new()); let http_stats_event_bus = Arc::new(EventBus::new( - config.core.tracker_usage_statistics, + config.core.tracker_usage_statistics.into(), http_core_broadcaster.clone(), )); diff --git a/packages/axum-http-tracker-server/src/v1/handlers/scrape.rs b/packages/axum-http-tracker-server/src/v1/handlers/scrape.rs index 330e7c13e..8decfe95c 100644 --- a/packages/axum-http-tracker-server/src/v1/handlers/scrape.rs +++ b/packages/axum-http-tracker-server/src/v1/handlers/scrape.rs @@ -139,7 +139,7 @@ mod tests { let http_core_broadcaster = Broadcaster::default(); let http_stats_repository = Arc::new(Repository::new()); let http_stats_event_bus = Arc::new(EventBus::new( - config.core.tracker_usage_statistics, + config.core.tracker_usage_statistics.into(), http_core_broadcaster.clone(), )); diff --git a/packages/axum-rest-tracker-api-server/src/environment.rs b/packages/axum-rest-tracker-api-server/src/environment.rs index e4a83d15d..92ca5a2d1 100644 --- a/packages/axum-rest-tracker-api-server/src/environment.rs +++ b/packages/axum-rest-tracker-api-server/src/environment.rs @@ -173,7 +173,9 @@ impl EnvContainer { .clone(), ); - let torrent_repository_container = Arc::new(TorrentRepositoryContainer::initialize()); + let torrent_repository_container = Arc::new(TorrentRepositoryContainer::initialize( + core_config.tracker_usage_statistics.into(), + )); let tracker_core_container = Arc::new(TrackerCoreContainer::initialize_from( &core_config, diff --git a/packages/events/src/bus.rs b/packages/events/src/bus.rs index d53f29b8d..b42fb4fc5 100644 --- a/packages/events/src/bus.rs +++ b/packages/events/src/bus.rs @@ -3,36 +3,60 @@ use std::sync::Arc; use crate::broadcaster::Broadcaster; use crate::{receiver, sender}; +#[derive(Clone, Debug)] +pub enum SenderStatus { + Enabled, + Disabled, +} + +impl From for SenderStatus { + fn from(enabled: bool) -> Self { + if enabled { + Self::Enabled + } else { + Self::Disabled + } + } +} + +impl From for bool { + fn from(sender_status: SenderStatus) -> Self { + match sender_status { + SenderStatus::Enabled => true, + SenderStatus::Disabled => false, + } + } +} + #[derive(Clone, Debug)] pub struct EventBus { - pub enable_sender: bool, + pub sender_status: SenderStatus, pub broadcaster: Broadcaster, } impl Default for EventBus { fn default() -> Self { - let enable_sender = true; + let sender_status = SenderStatus::Enabled; let broadcaster = Broadcaster::::default(); - Self::new(enable_sender, broadcaster) + Self::new(sender_status, broadcaster) } } impl EventBus { #[must_use] - pub fn new(enable_sender: bool, broadcaster: Broadcaster) -> Self { + pub fn new(sender_status: SenderStatus, broadcaster: Broadcaster) -> Self { Self { - enable_sender, + sender_status, broadcaster, } } #[must_use] pub fn sender(&self) -> Option>> { - if self.enable_sender { - Some(Arc::new(self.broadcaster.clone())) - } else { - None + match self.sender_status { + SenderStatus::Enabled => Some(Arc::new(self.broadcaster.clone())), + SenderStatus::Disabled => None, } } @@ -50,14 +74,14 @@ mod tests { #[tokio::test] async fn it_should_provide_an_event_sender_when_enabled() { - let bus = EventBus::::new(true, Broadcaster::default()); + let bus = EventBus::::new(SenderStatus::Enabled, Broadcaster::default()); assert!(bus.sender().is_some()); } #[tokio::test] async fn it_should_not_provide_event_sender_when_disabled() { - let bus = EventBus::::new(false, Broadcaster::default()); + let bus = EventBus::::new(SenderStatus::Disabled, Broadcaster::default()); assert!(bus.sender().is_none()); } diff --git a/packages/http-tracker-core/benches/helpers/util.rs b/packages/http-tracker-core/benches/helpers/util.rs index 7ee91a2c4..cfb3f745f 100644 --- a/packages/http-tracker-core/benches/helpers/util.rs +++ b/packages/http-tracker-core/benches/helpers/util.rs @@ -62,7 +62,7 @@ pub fn initialize_core_tracker_services_with_config(config: &Configuration) -> ( let http_core_broadcaster = Broadcaster::default(); let http_stats_repository = Arc::new(Repository::new()); let http_stats_event_bus = Arc::new(EventBus::new( - config.core.tracker_usage_statistics, + config.core.tracker_usage_statistics.into(), http_core_broadcaster.clone(), )); diff --git a/packages/http-tracker-core/src/container.rs b/packages/http-tracker-core/src/container.rs index 922273610..f063c0061 100644 --- a/packages/http-tracker-core/src/container.rs +++ b/packages/http-tracker-core/src/container.rs @@ -27,7 +27,9 @@ pub struct HttpTrackerCoreContainer { impl HttpTrackerCoreContainer { #[must_use] pub fn initialize(core_config: &Arc, http_tracker_config: &Arc) -> Arc { - let torrent_repository_container = Arc::new(TorrentRepositoryContainer::initialize()); + let torrent_repository_container = Arc::new(TorrentRepositoryContainer::initialize( + core_config.tracker_usage_statistics.into(), + )); let tracker_core_container = Arc::new(TrackerCoreContainer::initialize_from( core_config, @@ -80,7 +82,7 @@ impl HttpTrackerCoreServices { let http_core_broadcaster = Broadcaster::default(); let http_stats_repository = Arc::new(Repository::new()); let http_stats_event_bus = Arc::new(EventBus::new( - tracker_core_container.core_config.tracker_usage_statistics, + tracker_core_container.core_config.tracker_usage_statistics.into(), http_core_broadcaster.clone(), )); diff --git a/packages/http-tracker-core/src/services/announce.rs b/packages/http-tracker-core/src/services/announce.rs index e0f387273..9f39a04e4 100644 --- a/packages/http-tracker-core/src/services/announce.rs +++ b/packages/http-tracker-core/src/services/announce.rs @@ -256,7 +256,7 @@ mod tests { let http_core_broadcaster = Broadcaster::default(); let http_stats_repository = Arc::new(Repository::new()); let http_stats_event_bus = Arc::new(EventBus::new( - config.core.tracker_usage_statistics, + config.core.tracker_usage_statistics.into(), http_core_broadcaster.clone(), )); diff --git a/packages/http-tracker-core/src/services/scrape.rs b/packages/http-tracker-core/src/services/scrape.rs index 70e30099c..3da1aa88f 100644 --- a/packages/http-tracker-core/src/services/scrape.rs +++ b/packages/http-tracker-core/src/services/scrape.rs @@ -255,6 +255,7 @@ mod tests { use bittorrent_http_tracker_protocol::v1::services::peer_ip_resolver::{ClientIpSources, RemoteClientAddr, ResolvedIp}; use bittorrent_tracker_core::announce_handler::PeersWanted; use mockall::predicate::eq; + use torrust_tracker_events::bus::SenderStatus; use torrust_tracker_primitives::core::ScrapeData; use torrust_tracker_primitives::service_binding::{Protocol, ServiceBinding}; use torrust_tracker_primitives::swarm_metadata::SwarmMetadata; @@ -276,7 +277,7 @@ mod tests { // HTTP core stats let http_core_broadcaster = Broadcaster::default(); - let http_stats_event_bus = Arc::new(EventBus::new(false, http_core_broadcaster.clone())); + let http_stats_event_bus = Arc::new(EventBus::new(SenderStatus::Disabled, http_core_broadcaster.clone())); let http_stats_event_sender = http_stats_event_bus.sender(); @@ -446,6 +447,7 @@ mod tests { use bittorrent_http_tracker_protocol::v1::services::peer_ip_resolver::{ClientIpSources, RemoteClientAddr, ResolvedIp}; use bittorrent_tracker_core::announce_handler::PeersWanted; use mockall::predicate::eq; + use torrust_tracker_events::bus::SenderStatus; use torrust_tracker_primitives::core::ScrapeData; use torrust_tracker_primitives::service_binding::{Protocol, ServiceBinding}; use torrust_tracker_test_helpers::configuration; @@ -468,7 +470,7 @@ mod tests { // HTTP core stats let http_core_broadcaster = Broadcaster::default(); - let http_stats_event_bus = Arc::new(EventBus::new(false, http_core_broadcaster.clone())); + let http_stats_event_bus = Arc::new(EventBus::new(SenderStatus::Disabled, http_core_broadcaster.clone())); let http_stats_event_sender = http_stats_event_bus.sender(); diff --git a/packages/http-tracker-core/src/statistics/services.rs b/packages/http-tracker-core/src/statistics/services.rs index 3c8a4fa43..af1e30524 100644 --- a/packages/http-tracker-core/src/statistics/services.rs +++ b/packages/http-tracker-core/src/statistics/services.rs @@ -96,7 +96,7 @@ mod tests { let http_core_broadcaster = Broadcaster::default(); let http_stats_repository = Arc::new(Repository::new()); let http_stats_event_bus = Arc::new(EventBus::new( - config.core.tracker_usage_statistics, + config.core.tracker_usage_statistics.into(), http_core_broadcaster.clone(), )); diff --git a/packages/rest-tracker-api-core/src/container.rs b/packages/rest-tracker-api-core/src/container.rs index e9a622e04..1c4a08e26 100644 --- a/packages/rest-tracker-api-core/src/container.rs +++ b/packages/rest-tracker-api-core/src/container.rs @@ -36,7 +36,9 @@ impl TrackerHttpApiCoreContainer { udp_tracker_config: &Arc, http_api_config: &Arc, ) -> Arc { - let torrent_repository_container = Arc::new(TorrentRepositoryContainer::initialize()); + let torrent_repository_container = Arc::new(TorrentRepositoryContainer::initialize( + core_config.tracker_usage_statistics.into(), + )); let tracker_core_container = Arc::new(TrackerCoreContainer::initialize_from( core_config, diff --git a/packages/rest-tracker-api-core/src/statistics/services.rs b/packages/rest-tracker-api-core/src/statistics/services.rs index aad31a323..d05a35981 100644 --- a/packages/rest-tracker-api-core/src/statistics/services.rs +++ b/packages/rest-tracker-api-core/src/statistics/services.rs @@ -160,7 +160,7 @@ mod tests { let http_core_broadcaster = Broadcaster::default(); let http_stats_repository = Arc::new(Repository::new()); let http_stats_event_bus = Arc::new(EventBus::new( - config.core.tracker_usage_statistics, + config.core.tracker_usage_statistics.into(), http_core_broadcaster.clone(), )); diff --git a/packages/torrent-repository/src/container.rs b/packages/torrent-repository/src/container.rs index 50a6b8b9c..d185180b1 100644 --- a/packages/torrent-repository/src/container.rs +++ b/packages/torrent-repository/src/container.rs @@ -1,5 +1,7 @@ use std::sync::Arc; +use torrust_tracker_events::bus::SenderStatus; + use crate::event::bus::EventBus; use crate::event::sender::Broadcaster; use crate::event::{self}; @@ -15,13 +17,12 @@ pub struct TorrentRepositoryContainer { impl TorrentRepositoryContainer { #[must_use] - pub fn initialize() -> Self { + pub fn initialize(sender_status: SenderStatus) -> Self { // Torrent repository stats let broadcaster = Broadcaster::default(); let stats_repository = Arc::new(Repository::new()); - // todo: add a config option to enable/disable stats for this package - let event_bus = Arc::new(EventBus::new(true, broadcaster.clone())); + let event_bus = Arc::new(EventBus::new(sender_status, broadcaster.clone())); let stats_event_sender = event_bus.sender(); diff --git a/packages/udp-tracker-core/benches/helpers/sync.rs b/packages/udp-tracker-core/benches/helpers/sync.rs index 1814a865e..e8ec1ce03 100644 --- a/packages/udp-tracker-core/benches/helpers/sync.rs +++ b/packages/udp-tracker-core/benches/helpers/sync.rs @@ -5,6 +5,7 @@ use std::time::{Duration, Instant}; use bittorrent_udp_tracker_core::event::bus::EventBus; use bittorrent_udp_tracker_core::event::sender::Broadcaster; use bittorrent_udp_tracker_core::services::connect::ConnectService; +use torrust_tracker_events::bus::SenderStatus; use torrust_tracker_primitives::service_binding::{Protocol, ServiceBinding}; use crate::helpers::utils::{sample_ipv4_remote_addr, sample_issue_time}; @@ -16,7 +17,7 @@ pub async fn connect_once(samples: u64) -> Duration { let server_service_binding = ServiceBinding::new(Protocol::UDP, server_socket_addr).unwrap(); let udp_core_broadcaster = Broadcaster::default(); - let event_bus = Arc::new(EventBus::new(false, udp_core_broadcaster.clone())); + let event_bus = Arc::new(EventBus::new(SenderStatus::Disabled, udp_core_broadcaster.clone())); let udp_core_stats_event_sender = event_bus.sender(); let connect_service = Arc::new(ConnectService::new(udp_core_stats_event_sender)); diff --git a/packages/udp-tracker-core/src/container.rs b/packages/udp-tracker-core/src/container.rs index 2b6567ec0..07a8a09ef 100644 --- a/packages/udp-tracker-core/src/container.rs +++ b/packages/udp-tracker-core/src/container.rs @@ -32,7 +32,9 @@ pub struct UdpTrackerCoreContainer { impl UdpTrackerCoreContainer { #[must_use] pub fn initialize(core_config: &Arc, udp_tracker_config: &Arc) -> Arc { - let torrent_repository_container = Arc::new(TorrentRepositoryContainer::initialize()); + let torrent_repository_container = Arc::new(TorrentRepositoryContainer::initialize( + core_config.tracker_usage_statistics.into(), + )); let tracker_core_container = Arc::new(TrackerCoreContainer::initialize_from( core_config, @@ -91,7 +93,7 @@ impl UdpTrackerCoreServices { let udp_core_broadcaster = Broadcaster::default(); let udp_core_stats_repository = Arc::new(Repository::new()); let event_bus = Arc::new(EventBus::new( - tracker_core_container.core_config.tracker_usage_statistics, + tracker_core_container.core_config.tracker_usage_statistics.into(), udp_core_broadcaster.clone(), )); diff --git a/packages/udp-tracker-core/src/services/connect.rs b/packages/udp-tracker-core/src/services/connect.rs index 18c9fd0ba..6ba36f274 100644 --- a/packages/udp-tracker-core/src/services/connect.rs +++ b/packages/udp-tracker-core/src/services/connect.rs @@ -61,6 +61,7 @@ mod tests { use std::sync::Arc; use mockall::predicate::eq; + use torrust_tracker_events::bus::SenderStatus; use torrust_tracker_primitives::service_binding::{Protocol, ServiceBinding}; use crate::connection_cookie::make; @@ -79,7 +80,7 @@ mod tests { let server_service_binding = ServiceBinding::new(Protocol::UDP, server_socket_addr).unwrap(); let udp_core_broadcaster = Broadcaster::default(); - let event_bus = Arc::new(EventBus::new(false, udp_core_broadcaster.clone())); + let event_bus = Arc::new(EventBus::new(SenderStatus::Disabled, udp_core_broadcaster.clone())); let udp_core_stats_event_sender = event_bus.sender(); let connect_service = Arc::new(ConnectService::new(udp_core_stats_event_sender)); @@ -100,7 +101,7 @@ mod tests { let server_service_binding = ServiceBinding::new(Protocol::UDP, server_socket_addr).unwrap(); let udp_core_broadcaster = Broadcaster::default(); - let event_bus = Arc::new(EventBus::new(false, udp_core_broadcaster.clone())); + let event_bus = Arc::new(EventBus::new(SenderStatus::Disabled, udp_core_broadcaster.clone())); let udp_core_stats_event_sender = event_bus.sender(); let connect_service = Arc::new(ConnectService::new(udp_core_stats_event_sender)); @@ -122,7 +123,7 @@ mod tests { let server_service_binding = ServiceBinding::new(Protocol::UDP, server_socket_addr).unwrap(); let udp_core_broadcaster = Broadcaster::default(); - let event_bus = Arc::new(EventBus::new(false, udp_core_broadcaster.clone())); + let event_bus = Arc::new(EventBus::new(SenderStatus::Disabled, udp_core_broadcaster.clone())); let udp_core_stats_event_sender = event_bus.sender(); let connect_service = Arc::new(ConnectService::new(udp_core_stats_event_sender)); diff --git a/packages/udp-tracker-server/src/container.rs b/packages/udp-tracker-server/src/container.rs index a0bc8f35b..365db4ca7 100644 --- a/packages/udp-tracker-server/src/container.rs +++ b/packages/udp-tracker-server/src/container.rs @@ -39,7 +39,7 @@ impl UdpTrackerServerServices { let udp_server_broadcaster = Broadcaster::default(); let udp_server_stats_repository = Arc::new(Repository::new()); let udp_server_stats_event_bus = Arc::new(EventBus::new( - core_config.tracker_usage_statistics, + core_config.tracker_usage_statistics.into(), udp_server_broadcaster.clone(), )); diff --git a/packages/udp-tracker-server/src/environment.rs b/packages/udp-tracker-server/src/environment.rs index 6dae3d860..f92d5dd29 100644 --- a/packages/udp-tracker-server/src/environment.rs +++ b/packages/udp-tracker-server/src/environment.rs @@ -175,7 +175,9 @@ impl EnvContainer { let udp_tracker_configurations = configuration.udp_trackers.clone().expect("missing UDP tracker configuration"); let udp_tracker_config = Arc::new(udp_tracker_configurations[0].clone()); - let torrent_repository_container = Arc::new(TorrentRepositoryContainer::initialize()); + let torrent_repository_container = Arc::new(TorrentRepositoryContainer::initialize( + core_config.tracker_usage_statistics.into(), + )); let tracker_core_container = Arc::new(TrackerCoreContainer::initialize_from( &core_config, diff --git a/packages/udp-tracker-server/src/handlers/announce.rs b/packages/udp-tracker-server/src/handlers/announce.rs index 86e7888f2..65b521f27 100644 --- a/packages/udp-tracker-server/src/handlers/announce.rs +++ b/packages/udp-tracker-server/src/handlers/announce.rs @@ -206,6 +206,7 @@ mod tests { use bittorrent_tracker_core::torrent::repository::in_memory::InMemoryTorrentRepository; use bittorrent_udp_tracker_core::connection_cookie::{gen_remote_fingerprint, make}; use mockall::predicate::eq; + use torrust_tracker_events::bus::SenderStatus; use torrust_tracker_primitives::service_binding::{Protocol, ServiceBinding}; use crate::event::{ConnectionContext, Event, UdpRequestKind}; @@ -378,7 +379,10 @@ mod tests { core_udp_tracker_services: Arc, ) -> Response { let udp_server_broadcaster = crate::event::sender::Broadcaster::default(); - let event_bus = Arc::new(crate::event::bus::EventBus::new(false, udp_server_broadcaster.clone())); + let event_bus = Arc::new(crate::event::bus::EventBus::new( + SenderStatus::Disabled, + udp_server_broadcaster.clone(), + )); let udp_server_stats_event_sender = event_bus.sender(); @@ -542,6 +546,7 @@ mod tests { use bittorrent_udp_tracker_core::services::announce::AnnounceService; use mockall::predicate::eq; use torrust_tracker_configuration::Core; + use torrust_tracker_events::bus::SenderStatus; use torrust_tracker_primitives::service_binding::{Protocol, ServiceBinding}; use crate::event::{ConnectionContext, Event, UdpRequestKind}; @@ -718,11 +723,14 @@ mod tests { whitelist_authorization: Arc, ) -> Response { let udp_core_broadcaster = Broadcaster::default(); - let core_event_bus = Arc::new(EventBus::new(false, udp_core_broadcaster.clone())); + let core_event_bus = Arc::new(EventBus::new(SenderStatus::Disabled, udp_core_broadcaster.clone())); let udp_core_stats_event_sender = core_event_bus.sender(); let udp_server_broadcaster = crate::event::sender::Broadcaster::default(); - let server_event_bus = Arc::new(crate::event::bus::EventBus::new(false, udp_server_broadcaster.clone())); + let server_event_bus = Arc::new(crate::event::bus::EventBus::new( + SenderStatus::Disabled, + udp_server_broadcaster.clone(), + )); let udp_server_stats_event_sender = server_event_bus.sender(); diff --git a/packages/udp-tracker-server/src/handlers/connect.rs b/packages/udp-tracker-server/src/handlers/connect.rs index 1244a6a3b..961189945 100644 --- a/packages/udp-tracker-server/src/handlers/connect.rs +++ b/packages/udp-tracker-server/src/handlers/connect.rs @@ -63,6 +63,7 @@ mod tests { use bittorrent_udp_tracker_core::event::sender::Broadcaster; use bittorrent_udp_tracker_core::services::connect::ConnectService; use mockall::predicate::eq; + use torrust_tracker_events::bus::SenderStatus; use torrust_tracker_primitives::service_binding::{Protocol, ServiceBinding}; use crate::event::{ConnectionContext, Event, UdpRequestKind}; @@ -84,11 +85,14 @@ mod tests { let server_service_binding = ServiceBinding::new(Protocol::UDP, server_socket_addr).unwrap(); let udp_core_broadcaster = Broadcaster::default(); - let core_event_bus = Arc::new(EventBus::new(false, udp_core_broadcaster.clone())); + let core_event_bus = Arc::new(EventBus::new(SenderStatus::Disabled, udp_core_broadcaster.clone())); let udp_core_stats_event_sender = core_event_bus.sender(); let udp_server_broadcaster = crate::event::sender::Broadcaster::default(); - let server_event_bus = Arc::new(crate::event::bus::EventBus::new(false, udp_server_broadcaster.clone())); + let server_event_bus = Arc::new(crate::event::bus::EventBus::new( + SenderStatus::Disabled, + udp_server_broadcaster.clone(), + )); let udp_server_stats_event_sender = server_event_bus.sender(); @@ -123,11 +127,14 @@ mod tests { let server_service_binding = ServiceBinding::new(Protocol::UDP, server_socket_addr).unwrap(); let udp_core_broadcaster = Broadcaster::default(); - let core_event_bus = Arc::new(EventBus::new(false, udp_core_broadcaster.clone())); + let core_event_bus = Arc::new(EventBus::new(SenderStatus::Disabled, udp_core_broadcaster.clone())); let udp_core_stats_event_sender = core_event_bus.sender(); let udp_server_broadcaster = crate::event::sender::Broadcaster::default(); - let server_event_bus = Arc::new(crate::event::bus::EventBus::new(false, udp_server_broadcaster.clone())); + let server_event_bus = Arc::new(crate::event::bus::EventBus::new( + SenderStatus::Disabled, + udp_server_broadcaster.clone(), + )); let udp_server_stats_event_sender = server_event_bus.sender(); @@ -162,12 +169,15 @@ mod tests { let server_service_binding = ServiceBinding::new(Protocol::UDP, server_socket_addr).unwrap(); let udp_core_broadcaster = Broadcaster::default(); - let core_event_bus = Arc::new(EventBus::new(false, udp_core_broadcaster.clone())); + let core_event_bus = Arc::new(EventBus::new(SenderStatus::Disabled, udp_core_broadcaster.clone())); let udp_core_stats_event_sender = core_event_bus.sender(); let udp_server_broadcaster = crate::event::sender::Broadcaster::default(); - let server_event_bus = Arc::new(crate::event::bus::EventBus::new(false, udp_server_broadcaster.clone())); + let server_event_bus = Arc::new(crate::event::bus::EventBus::new( + SenderStatus::Disabled, + udp_server_broadcaster.clone(), + )); let udp_server_stats_event_sender = server_event_bus.sender(); diff --git a/packages/udp-tracker-server/src/handlers/mod.rs b/packages/udp-tracker-server/src/handlers/mod.rs index d39ad0972..ca834c006 100644 --- a/packages/udp-tracker-server/src/handlers/mod.rs +++ b/packages/udp-tracker-server/src/handlers/mod.rs @@ -227,6 +227,7 @@ pub(crate) mod tests { use mockall::mock; use torrust_tracker_clock::clock::Time; use torrust_tracker_configuration::{Configuration, Core}; + use torrust_tracker_events::bus::SenderStatus; use torrust_tracker_events::sender::SendError; use torrust_tracker_primitives::{peer, DurationSinceUnixEpoch}; use torrust_tracker_test_helpers::configuration; @@ -287,11 +288,14 @@ pub(crate) mod tests { let scrape_handler = Arc::new(ScrapeHandler::new(&whitelist_authorization, &in_memory_torrent_repository)); let udp_core_broadcaster = Broadcaster::default(); - let core_event_bus = Arc::new(EventBus::new(false, udp_core_broadcaster.clone())); + let core_event_bus = Arc::new(EventBus::new(SenderStatus::Disabled, udp_core_broadcaster.clone())); let udp_core_stats_event_sender = core_event_bus.sender(); let udp_server_broadcaster = crate::event::sender::Broadcaster::default(); - let server_event_bus = Arc::new(crate::event::bus::EventBus::new(false, udp_server_broadcaster.clone())); + let server_event_bus = Arc::new(crate::event::bus::EventBus::new( + SenderStatus::Disabled, + udp_server_broadcaster.clone(), + )); let udp_server_stats_event_sender = server_event_bus.sender(); diff --git a/packages/udp-tracker-server/src/handlers/scrape.rs b/packages/udp-tracker-server/src/handlers/scrape.rs index 34d5a5ce2..e35e118b4 100644 --- a/packages/udp-tracker-server/src/handlers/scrape.rs +++ b/packages/udp-tracker-server/src/handlers/scrape.rs @@ -92,6 +92,7 @@ mod tests { }; use bittorrent_tracker_core::torrent::repository::in_memory::InMemoryTorrentRepository; use bittorrent_udp_tracker_core::connection_cookie::{gen_remote_fingerprint, make}; + use torrust_tracker_events::bus::SenderStatus; use torrust_tracker_primitives::service_binding::{Protocol, ServiceBinding}; use crate::event::bus::EventBus; @@ -183,7 +184,7 @@ mod tests { core_udp_tracker_services: Arc, ) -> Response { let udp_server_broadcaster = Broadcaster::default(); - let event_bus = Arc::new(EventBus::new(false, udp_server_broadcaster.clone())); + let event_bus = Arc::new(EventBus::new(SenderStatus::Disabled, udp_server_broadcaster.clone())); let udp_server_stats_event_sender = event_bus.sender(); diff --git a/src/container.rs b/src/container.rs index 273425fc1..98c455780 100644 --- a/src/container.rs +++ b/src/container.rs @@ -60,7 +60,9 @@ impl AppContainer { // Torrent Repository - let torrent_repository_container = Arc::new(TorrentRepositoryContainer::initialize()); + let torrent_repository_container = Arc::new(TorrentRepositoryContainer::initialize( + core_config.tracker_usage_statistics.into(), + )); // Core From 8ee258eee3f16aeceb6166185b71325263dd0ff8 Mon Sep 17 00:00:00 2001 From: Jose Celano Date: Wed, 14 May 2025 14:11:37 +0100 Subject: [PATCH 609/802] refactor: [#1358] use the new field info-hash as ID for the Swarm (Hash,PartialEq) --- packages/torrent-repository/src/swarm.rs | 5 ++--- 1 file changed, 2 insertions(+), 3 deletions(-) diff --git a/packages/torrent-repository/src/swarm.rs b/packages/torrent-repository/src/swarm.rs index 3fe0e27d7..2ad216a61 100644 --- a/packages/torrent-repository/src/swarm.rs +++ b/packages/torrent-repository/src/swarm.rs @@ -36,14 +36,13 @@ impl Debug for Swarm { impl Hash for Swarm { fn hash(&self, state: &mut H) { - self.peers.hash(state); - self.metadata.hash(state); + self.info_hash.hash(state); } } impl PartialEq for Swarm { fn eq(&self, other: &Self) -> bool { - self.peers == other.peers && self.metadata == other.metadata + self.info_hash == other.info_hash } } From c9a893c876546562c484131acba77034249b5008 Mon Sep 17 00:00:00 2001 From: Jose Celano Date: Wed, 14 May 2025 14:12:01 +0100 Subject: [PATCH 610/802] refactor: [#1358] rename metrics for clarity There are two concepts: - Unique peers: phisical client with different socket address. - Peer connections: a client (peer) can particiapte in multiple swarms. Current metrics count the second, meaning the peer would be counted doubled if it particiaptes in two swarms. --- .../src/statistics/event/handler.rs | 18 +++++++++++++----- .../torrent-repository/src/statistics/mod.rs | 17 +++++++++++++---- 2 files changed, 26 insertions(+), 9 deletions(-) diff --git a/packages/torrent-repository/src/statistics/event/handler.rs b/packages/torrent-repository/src/statistics/event/handler.rs index d2783f9ba..2fd7271cc 100644 --- a/packages/torrent-repository/src/statistics/event/handler.rs +++ b/packages/torrent-repository/src/statistics/event/handler.rs @@ -8,7 +8,7 @@ use torrust_tracker_primitives::DurationSinceUnixEpoch; use crate::event::Event; use crate::statistics::repository::Repository; use crate::statistics::{ - TORRENT_REPOSITORY_PEERS_TOTAL, TORRENT_REPOSITORY_TORRENTS_DOWNLOADS_TOTAL, TORRENT_REPOSITORY_TORRENTS_TOTAL, + TORRENT_REPOSITORY_PEER_CONNECTIONS_TOTAL, TORRENT_REPOSITORY_TORRENTS_DOWNLOADS_TOTAL, TORRENT_REPOSITORY_TORRENTS_TOTAL, }; pub async fn handle_event(event: Event, stats_repository: &Arc, now: DurationSinceUnixEpoch) { @@ -31,14 +31,22 @@ pub async fn handle_event(event: Event, stats_repository: &Arc, now: tracing::debug!(info_hash = ?info_hash, peer = ?peer, "Peer added", ); let _unused = stats_repository - .increment_gauge(&metric_name!(TORRENT_REPOSITORY_PEERS_TOTAL), &label_set_for_peer(&peer), now) + .increment_gauge( + &metric_name!(TORRENT_REPOSITORY_PEER_CONNECTIONS_TOTAL), + &label_set_for_peer(&peer), + now, + ) .await; } Event::PeerRemoved { info_hash, peer } => { tracing::debug!(info_hash = ?info_hash, peer = ?peer, "Peer removed", ); let _unused = stats_repository - .decrement_gauge(&metric_name!(TORRENT_REPOSITORY_PEERS_TOTAL), &label_set_for_peer(&peer), now) + .decrement_gauge( + &metric_name!(TORRENT_REPOSITORY_PEER_CONNECTIONS_TOTAL), + &label_set_for_peer(&peer), + now, + ) .await; } Event::PeerUpdated { @@ -51,7 +59,7 @@ pub async fn handle_event(event: Event, stats_repository: &Arc, now: if old_peer.role() != new_peer.role() { let _unused = stats_repository .increment_gauge( - &metric_name!(TORRENT_REPOSITORY_PEERS_TOTAL), + &metric_name!(TORRENT_REPOSITORY_PEER_CONNECTIONS_TOTAL), &label_set_for_peer(&new_peer), now, ) @@ -59,7 +67,7 @@ pub async fn handle_event(event: Event, stats_repository: &Arc, now: let _unused = stats_repository .decrement_gauge( - &metric_name!(TORRENT_REPOSITORY_PEERS_TOTAL), + &metric_name!(TORRENT_REPOSITORY_PEER_CONNECTIONS_TOTAL), &label_set_for_peer(&old_peer), now, ) diff --git a/packages/torrent-repository/src/statistics/mod.rs b/packages/torrent-repository/src/statistics/mod.rs index 4deaf19cb..18dcf83ea 100644 --- a/packages/torrent-repository/src/statistics/mod.rs +++ b/packages/torrent-repository/src/statistics/mod.rs @@ -14,7 +14,8 @@ const TORRENT_REPOSITORY_TORRENTS_DOWNLOADS_TOTAL: &str = "torrent_repository_to // Peers metrics -const TORRENT_REPOSITORY_PEERS_TOTAL: &str = "torrent_repository_peers_total"; +const TORRENT_REPOSITORY_PEER_CONNECTIONS_TOTAL: &str = "torrent_repository_peer_connections_total"; +const TORRENT_REPOSITORY_UNIQUE_PEERS_TOTAL: &str = "torrent_repository_unique_peers_total"; // todo: not implemented yet #[must_use] pub fn describe_metrics() -> Metrics { @@ -32,16 +33,24 @@ pub fn describe_metrics() -> Metrics { &metric_name!(TORRENT_REPOSITORY_TORRENTS_DOWNLOADS_TOTAL), Some(Unit::Count), Some(&MetricDescription::new( - "The total number of torrent downloads since the tracker process started.", + "The total number of torrent downloads (since the tracker process started).", )), ); // Peers metrics metrics.metric_collection.describe_gauge( - &metric_name!(TORRENT_REPOSITORY_PEERS_TOTAL), + &metric_name!(TORRENT_REPOSITORY_PEER_CONNECTIONS_TOTAL), Some(Unit::Count), - Some(&MetricDescription::new("The total number of peers.")), + Some(&MetricDescription::new( + "The total number of peer connections (one connection per torrent).", + )), + ); + + metrics.metric_collection.describe_gauge( + &metric_name!(TORRENT_REPOSITORY_UNIQUE_PEERS_TOTAL), + Some(Unit::Count), + Some(&MetricDescription::new("The total number of unique peers.")), ); metrics From 0e38707fda29f54ae8cad8e2d19b737c97d77843 Mon Sep 17 00:00:00 2001 From: Jose Celano Date: Wed, 14 May 2025 15:43:01 +0100 Subject: [PATCH 611/802] fix: [#1358] revert Hash impl for Swarm To fix broken tests. This implementation will kept for now. I think it's only used for testing and I'm planning to remvoe all integration tests becuase now web have unit tests covering the same functionality. --- packages/torrent-repository/src/swarm.rs | 5 +++-- 1 file changed, 3 insertions(+), 2 deletions(-) diff --git a/packages/torrent-repository/src/swarm.rs b/packages/torrent-repository/src/swarm.rs index 2ad216a61..3fe0e27d7 100644 --- a/packages/torrent-repository/src/swarm.rs +++ b/packages/torrent-repository/src/swarm.rs @@ -36,13 +36,14 @@ impl Debug for Swarm { impl Hash for Swarm { fn hash(&self, state: &mut H) { - self.info_hash.hash(state); + self.peers.hash(state); + self.metadata.hash(state); } } impl PartialEq for Swarm { fn eq(&self, other: &Self) -> bool { - self.info_hash == other.info_hash + self.peers == other.peers && self.metadata == other.metadata } } From 3d7e6ff04ab94f576b8aedd6663756243c6f3e55 Mon Sep 17 00:00:00 2001 From: Jose Celano Date: Wed, 14 May 2025 17:03:02 +0100 Subject: [PATCH 612/802] test: [#1358] add tests to torrust_tracker_torrent_repository::swarm::Swarm --- Cargo.lock | 1 + packages/primitives/src/peer.rs | 18 +++ packages/torrent-repository/Cargo.toml | 1 + packages/torrent-repository/src/event.rs | 20 +++ packages/torrent-repository/src/swarm.rs | 189 +++++++++++++++++++++++ 5 files changed, 229 insertions(+) diff --git a/Cargo.lock b/Cargo.lock index ddf163cc6..75a272292 100644 --- a/Cargo.lock +++ b/Cargo.lock @@ -4858,6 +4858,7 @@ dependencies = [ "criterion", "crossbeam-skiplist", "futures", + "mockall", "rand 0.9.1", "rstest", "serde", diff --git a/packages/primitives/src/peer.rs b/packages/primitives/src/peer.rs index 316541ad6..cd4531b09 100644 --- a/packages/primitives/src/peer.rs +++ b/packages/primitives/src/peer.rs @@ -252,6 +252,18 @@ impl Peer { pub fn change_ip(&mut self, new_ip: &IpAddr) { self.peer_addr = SocketAddr::new(*new_ip, self.peer_addr.port()); } + + pub fn mark_as_completed(&mut self) { + self.event = AnnounceEvent::Completed; + } + + #[must_use] + pub fn into_completed(self) -> Self { + Self { + event: AnnounceEvent::Completed, + ..self + } + } } use std::panic::Location; @@ -520,6 +532,12 @@ pub mod fixture { self } + #[must_use] + pub fn with_event(mut self, event: AnnounceEvent) -> Self { + self.peer.event = event; + self + } + #[allow(dead_code)] #[must_use] pub fn build(self) -> Peer { diff --git a/packages/torrent-repository/Cargo.toml b/packages/torrent-repository/Cargo.toml index 26662b583..98ae5817d 100644 --- a/packages/torrent-repository/Cargo.toml +++ b/packages/torrent-repository/Cargo.toml @@ -33,6 +33,7 @@ tracing = "0" [dev-dependencies] async-std = { version = "1", features = ["attributes", "tokio1"] } criterion = { version = "0", features = ["async_tokio"] } +mockall = "0" rand = "0" rstest = "0" torrust-tracker-test-helpers = { version = "3.0.0-develop", path = "../test-helpers" } diff --git a/packages/torrent-repository/src/event.rs b/packages/torrent-repository/src/event.rs index ac1c06637..9709da19a 100644 --- a/packages/torrent-repository/src/event.rs +++ b/packages/torrent-repository/src/event.rs @@ -36,6 +36,26 @@ pub mod sender { pub type Sender = Option>>; pub type Broadcaster = torrust_tracker_events::broadcaster::Broadcaster; + + #[cfg(test)] + pub mod tests { + + use futures::future::BoxFuture; + use mockall::mock; + use torrust_tracker_events::sender::{SendError, Sender}; + + use crate::event::Event; + + mock! { + pub EventSender {} + + impl Sender for EventSender { + type Event = Event; + + fn send(&self, event: Event) -> BoxFuture<'static,Option > > > ; + } + } + } } pub mod receiver { diff --git a/packages/torrent-repository/src/swarm.rs b/packages/torrent-repository/src/swarm.rs index 3fe0e27d7..473703e89 100644 --- a/packages/torrent-repository/src/swarm.rs +++ b/packages/torrent-repository/src/swarm.rs @@ -328,6 +328,16 @@ mod tests { use crate::swarm::Swarm; use crate::tests::sample_info_hash; + #[test] + fn it_should_allow_debugging() { + let swarm = Swarm::new(&sample_info_hash(), 0, None); + + assert_eq!( + format!("{swarm:?}"), + "Swarm { peers: {}, metadata: SwarmMetadata { downloaded: 0, complete: 0, incomplete: 0 } }" + ); + } + #[test] fn it_should_be_empty_when_no_peers_have_been_inserted() { let swarm = Swarm::new(&sample_info_hash(), 0, None); @@ -689,6 +699,12 @@ mod tests { assert_eq!(leechers, 1); } + #[tokio::test] + async fn it_should_be_a_peerless_swarm_when_it_does_not_contain_any_peers() { + let swarm = Swarm::new(&sample_info_hash(), 0, None); + assert!(swarm.is_peerless()); + } + mod updating_the_swarm_metadata { mod when_a_new_peer_is_added { @@ -907,4 +923,177 @@ mod tests { } } } + + mod triggering_events { + + use std::future; + use std::sync::Arc; + + use aquatic_udp_protocol::AnnounceEvent::Started; + use mockall::predicate::eq; + use torrust_tracker_primitives::peer::fixture::PeerBuilder; + use torrust_tracker_primitives::DurationSinceUnixEpoch; + + use crate::event::sender::tests::MockEventSender; + use crate::event::Event; + use crate::swarm::Swarm; + use crate::tests::sample_info_hash; + + #[tokio::test] + async fn it_should_trigger_an_event_when_a_new_peer_is_added() { + let info_hash = sample_info_hash(); + let peer = PeerBuilder::leecher().build(); + + let mut event_sender_mock = MockEventSender::new(); + + event_sender_mock + .expect_send() + .with(eq(Event::PeerAdded { info_hash, peer })) + .times(1) + .returning(|_| Box::pin(future::ready(Some(Ok(1))))); + + let mut swarm = Swarm::new(&sample_info_hash(), 0, Some(Arc::new(event_sender_mock))); + + let mut downloads_increased = false; + swarm.upsert_peer(peer.into(), &mut downloads_increased).await; + } + + #[tokio::test] + async fn it_should_trigger_an_event_when_a_peer_is_directly_removed() { + let info_hash = sample_info_hash(); + let peer = PeerBuilder::leecher().build(); + + let mut event_sender_mock = MockEventSender::new(); + + event_sender_mock + .expect_send() + .with(eq(Event::PeerAdded { info_hash, peer })) + .times(1) + .returning(|_| Box::pin(future::ready(Some(Ok(1))))); + + event_sender_mock + .expect_send() + .with(eq(Event::PeerRemoved { info_hash, peer })) + .times(1) + .returning(|_| Box::pin(future::ready(Some(Ok(1))))); + + let mut swarm = Swarm::new(&info_hash, 0, Some(Arc::new(event_sender_mock))); + + // Insert the peer + let mut downloads_increased = false; + swarm.upsert_peer(peer.into(), &mut downloads_increased).await; + + swarm.remove(&peer).await; + } + + #[tokio::test] + async fn it_should_trigger_an_event_when_a_peer_is_removed_due_to_inactivity() { + let info_hash = sample_info_hash(); + let peer = PeerBuilder::leecher().build(); + + let mut event_sender_mock = MockEventSender::new(); + + event_sender_mock + .expect_send() + .with(eq(Event::PeerAdded { info_hash, peer })) + .times(1) + .returning(|_| Box::pin(future::ready(Some(Ok(1))))); + + event_sender_mock + .expect_send() + .with(eq(Event::PeerRemoved { info_hash, peer })) + .times(1) + .returning(|_| Box::pin(future::ready(Some(Ok(1))))); + + let mut swarm = Swarm::new(&info_hash, 0, Some(Arc::new(event_sender_mock))); + + // Insert the peer + let mut downloads_increased = false; + swarm.upsert_peer(peer.into(), &mut downloads_increased).await; + + // Peers not updated after this time will be removed + let current_cutoff = peer.updated + DurationSinceUnixEpoch::from_secs(1); + + swarm.remove_inactive(current_cutoff).await; + } + + #[tokio::test] + async fn it_should_trigger_an_event_when_a_peer_is_updated() { + let info_hash = sample_info_hash(); + let peer = PeerBuilder::leecher().with_event(Started).build(); + + let mut event_sender_mock = MockEventSender::new(); + + event_sender_mock + .expect_send() + .with(eq(Event::PeerAdded { info_hash, peer })) + .times(1) + .returning(|_| Box::pin(future::ready(Some(Ok(1))))); + + event_sender_mock + .expect_send() + .with(eq(Event::PeerUpdated { + info_hash, + old_peer: peer, + new_peer: peer, + })) + .times(1) + .returning(|_| Box::pin(future::ready(Some(Ok(1))))); + + let mut swarm = Swarm::new(&info_hash, 0, Some(Arc::new(event_sender_mock))); + + // Insert the peer + let mut downloads_increased = false; + swarm.upsert_peer(peer.into(), &mut downloads_increased).await; + + // Update the peer + swarm.upsert_peer(peer.into(), &mut downloads_increased).await; + } + + #[tokio::test] + async fn it_should_trigger_an_event_when_a_peer_completes_a_download() { + let info_hash = sample_info_hash(); + let started_peer = PeerBuilder::leecher().with_event(Started).build(); + let completed_peer = started_peer.into_completed(); + + let mut event_sender_mock = MockEventSender::new(); + + event_sender_mock + .expect_send() + .with(eq(Event::PeerAdded { + info_hash, + peer: started_peer, + })) + .times(1) + .returning(|_| Box::pin(future::ready(Some(Ok(1))))); + + event_sender_mock + .expect_send() + .with(eq(Event::PeerUpdated { + info_hash, + old_peer: started_peer, + new_peer: completed_peer, + })) + .times(1) + .returning(|_| Box::pin(future::ready(Some(Ok(1))))); + + event_sender_mock + .expect_send() + .with(eq(Event::PeerDownloadCompleted { + info_hash, + peer: completed_peer, + })) + .times(1) + .returning(|_| Box::pin(future::ready(Some(Ok(1))))); + + let mut swarm = Swarm::new(&info_hash, 0, Some(Arc::new(event_sender_mock))); + + // Insert the peer + let mut downloads_increased = false; + swarm.upsert_peer(started_peer.into(), &mut downloads_increased).await; + + // Announce as completed + swarm.upsert_peer(completed_peer.into(), &mut downloads_increased).await; + } + } } From f71211fedc91477058064150398b265705f6fdf0 Mon Sep 17 00:00:00 2001 From: Jose Celano Date: Wed, 14 May 2025 17:53:02 +0100 Subject: [PATCH 613/802] test: [#1358] add tests to torrust_tracker_torrent_repository::swarms::Swarms --- packages/torrent-repository/src/event.rs | 16 +- packages/torrent-repository/src/swarm.rs | 2 +- packages/torrent-repository/src/swarms.rs | 247 +++++++++++++++++++++- 3 files changed, 259 insertions(+), 6 deletions(-) diff --git a/packages/torrent-repository/src/event.rs b/packages/torrent-repository/src/event.rs index 9709da19a..da086f89e 100644 --- a/packages/torrent-repository/src/event.rs +++ b/packages/torrent-repository/src/event.rs @@ -40,8 +40,9 @@ pub mod sender { #[cfg(test)] pub mod tests { - use futures::future::BoxFuture; + use futures::future::{self, BoxFuture}; use mockall::mock; + use mockall::predicate::eq; use torrust_tracker_events::sender::{SendError, Sender}; use crate::event::Event; @@ -55,6 +56,19 @@ pub mod sender { fn send(&self, event: Event) -> BoxFuture<'static,Option > > > ; } } + + pub fn expect_event(mock: &mut MockEventSender, event: Event) { + mock.expect_send() + .with(eq(event)) + .times(1) + .returning(|_| Box::pin(future::ready(Some(Ok(1))))); + } + + pub fn expect_event_sequence(mock: &mut MockEventSender, event: Vec) { + for e in event { + expect_event(mock, e); + } + } } } diff --git a/packages/torrent-repository/src/swarm.rs b/packages/torrent-repository/src/swarm.rs index 473703e89..160636906 100644 --- a/packages/torrent-repository/src/swarm.rs +++ b/packages/torrent-repository/src/swarm.rs @@ -647,7 +647,7 @@ mod tests { } #[tokio::test] - async fn it_should_return_the_metadata() { + async fn it_should_return_the_swarm_metadata() { let mut swarm = Swarm::new(&sample_info_hash(), 0, None); let mut downloads_increased = false; diff --git a/packages/torrent-repository/src/swarms.rs b/packages/torrent-repository/src/swarms.rs index 3200d77ff..8b8327778 100644 --- a/packages/torrent-repository/src/swarms.rs +++ b/packages/torrent-repository/src/swarms.rs @@ -440,8 +440,13 @@ mod tests { mod the_swarm_repository { + use std::sync::Arc; + use aquatic_udp_protocol::PeerId; + use crate::swarms::Swarms; + use crate::tests::{sample_info_hash, sample_peer}; + /// It generates a peer id from a number where the number is the last /// part of the peer ID. For example, for `12` it returns /// `-qB00000000000000012`. @@ -462,14 +467,50 @@ mod tests { // The `TorrentRepository` has these responsibilities: // - To maintain the peer lists for each torrent. - // - To maintain the the torrent entries, which contains all the info about the - // torrents, including the peer lists. - // - To return the torrent entries. + // - To maintain the the torrent entries, which contains all the info + // about the torrents, including the peer lists. + // - To return the torrent entries (swarm handles). // - To return the peer lists for a given torrent. // - To return the torrent metrics. // - To return the swarm metadata for a given torrent. // - To handle the persistence of the torrent entries. + #[tokio::test] + async fn it_should_return_zero_length_when_it_has_no_swarms() { + let swarms = Arc::new(Swarms::default()); + assert_eq!(swarms.len(), 0); + } + + #[tokio::test] + async fn it_should_return_the_length_when_it_has_swarms() { + let swarms = Arc::new(Swarms::default()); + let info_hash = sample_info_hash(); + let peer = sample_peer(); + swarms.handle_announcement(&info_hash, &peer, None).await.unwrap(); + assert_eq!(swarms.len(), 1); + } + + #[tokio::test] + async fn it_should_be_empty_when_it_has_no_swarms() { + let swarms = Arc::new(Swarms::default()); + assert!(swarms.is_empty()); + + let info_hash = sample_info_hash(); + let peer = sample_peer(); + swarms.handle_announcement(&info_hash, &peer, None).await.unwrap(); + assert!(!swarms.is_empty()); + } + + #[tokio::test] + async fn it_should_not_be_empty_when_it_has_at_least_one_swarm() { + let swarms = Arc::new(Swarms::default()); + let info_hash = sample_info_hash(); + let peer = sample_peer(); + swarms.handle_announcement(&info_hash, &peer, None).await.unwrap(); + + assert!(!swarms.is_empty()); + } + mod maintaining_the_peer_lists { use std::sync::Arc; @@ -1054,6 +1095,59 @@ mod tests { "{result_a:?} {result_b:?}" ); } + + mod it_should_count_peerless_torrents { + use std::sync::Arc; + + use torrust_tracker_primitives::DurationSinceUnixEpoch; + + use crate::swarms::Swarms; + use crate::tests::{sample_info_hash, sample_peer}; + + #[tokio::test] + async fn no_peerless_torrents() { + let swarms = Arc::new(Swarms::default()); + assert_eq!(swarms.count_peerless_torrents().await.unwrap(), 0); + } + + #[tokio::test] + async fn one_peerless_torrents() { + let info_hash = sample_info_hash(); + let peer = sample_peer(); + + let swarms = Arc::new(Swarms::default()); + swarms.handle_announcement(&info_hash, &peer, None).await.unwrap(); + + let current_cutoff = peer.updated + DurationSinceUnixEpoch::from_secs(1); + swarms.remove_inactive_peers(current_cutoff).await.unwrap(); + + assert_eq!(swarms.count_peerless_torrents().await.unwrap(), 1); + } + } + + mod it_should_count_peers { + use std::sync::Arc; + + use crate::swarms::Swarms; + use crate::tests::{sample_info_hash, sample_peer}; + + #[tokio::test] + async fn no_peers() { + let swarms = Arc::new(Swarms::default()); + assert_eq!(swarms.count_peers().await.unwrap(), 0); + } + + #[tokio::test] + async fn one_peer() { + let info_hash = sample_info_hash(); + let peer = sample_peer(); + + let swarms = Arc::new(Swarms::default()); + swarms.handle_announcement(&info_hash, &peer, None).await.unwrap(); + + assert_eq!(swarms.count_peers().await.unwrap(), 1); + } + } } mod returning_swarm_metadata { @@ -1102,7 +1196,7 @@ mod tests { use torrust_tracker_primitives::PersistentTorrents; use crate::swarms::Swarms; - use crate::tests::sample_info_hash; + use crate::tests::{leecher, sample_info_hash}; #[tokio::test] async fn it_should_allow_importing_persisted_torrent_entries() { @@ -1121,6 +1215,151 @@ mod tests { // Only the number of downloads is persisted. assert_eq!(swarm_metadata.downloaded, 1); } + + #[tokio::test] + async fn it_should_allow_overwriting_a_previously_imported_persisted_torrent() { + // code-review: do we want to allow this? + + let swarms = Arc::new(Swarms::default()); + + let infohash = sample_info_hash(); + + let mut persistent_torrents = PersistentTorrents::default(); + + persistent_torrents.insert(infohash, 1); + persistent_torrents.insert(infohash, 2); + + swarms.import_persistent(&persistent_torrents); + + let swarm_metadata = swarms.get_swarm_metadata_or_default(&infohash).await.unwrap(); + + // It takes the last value + assert_eq!(swarm_metadata.downloaded, 2); + } + + #[tokio::test] + async fn it_should_now_allow_importing_a_persisted_torrent_if_it_already_exists() { + let swarms = Arc::new(Swarms::default()); + + let infohash = sample_info_hash(); + + // Insert a new the torrent entry + swarms.handle_announcement(&infohash, &leecher(), None).await.unwrap(); + let initial_number_of_downloads = swarms.get_swarm_metadata_or_default(&infohash).await.unwrap().downloaded; + + // Try to import the torrent entry + let new_number_of_downloads = initial_number_of_downloads + 1; + let mut persistent_torrents = PersistentTorrents::default(); + persistent_torrents.insert(infohash, new_number_of_downloads); + swarms.import_persistent(&persistent_torrents); + + // The number of downloads should not be changed + assert_eq!( + swarms.get_swarm_metadata_or_default(&infohash).await.unwrap().downloaded, + initial_number_of_downloads + ); + } + } + } + + mod triggering_events { + + use std::sync::Arc; + + use torrust_tracker_primitives::peer::fixture::PeerBuilder; + use torrust_tracker_primitives::DurationSinceUnixEpoch; + + use crate::event::sender::tests::{expect_event_sequence, MockEventSender}; + use crate::event::Event; + use crate::swarms::Swarms; + use crate::tests::sample_info_hash; + + #[tokio::test] + async fn it_should_trigger_an_event_when_a_new_torrent_is_added() { + let info_hash = sample_info_hash(); + let peer = PeerBuilder::leecher().build(); + + let mut event_sender_mock = MockEventSender::new(); + + expect_event_sequence( + &mut event_sender_mock, + vec![ + Event::TorrentAdded { + info_hash, + announcement: peer, + }, + Event::PeerAdded { info_hash, peer }, + ], + ); + + let swarms = Swarms::new(Some(Arc::new(event_sender_mock))); + + swarms.handle_announcement(&info_hash, &peer, None).await.unwrap(); + } + + #[tokio::test] + async fn it_should_trigger_an_event_when_a_torrent_is_directly_removed() { + let info_hash = sample_info_hash(); + let peer = PeerBuilder::leecher().build(); + + let mut event_sender_mock = MockEventSender::new(); + + expect_event_sequence( + &mut event_sender_mock, + vec![ + Event::TorrentAdded { + info_hash, + announcement: peer, + }, + Event::PeerAdded { info_hash, peer }, + Event::TorrentRemoved { info_hash }, + ], + ); + + let swarms = Swarms::new(Some(Arc::new(event_sender_mock))); + + swarms.handle_announcement(&info_hash, &peer, None).await.unwrap(); + + swarms.remove(&info_hash).await.unwrap(); + } + + #[tokio::test] + async fn it_should_trigger_an_event_when_a_peerless_torrent_is_removed() { + let info_hash = sample_info_hash(); + let peer = PeerBuilder::leecher().build(); + + let mut event_sender_mock = MockEventSender::new(); + + expect_event_sequence( + &mut event_sender_mock, + vec![ + Event::TorrentAdded { + info_hash, + announcement: peer, + }, + Event::PeerAdded { info_hash, peer }, + Event::PeerRemoved { info_hash, peer }, + Event::TorrentRemoved { info_hash }, + ], + ); + + let swarms = Swarms::new(Some(Arc::new(event_sender_mock))); + + // Add the new torrent + swarms.handle_announcement(&info_hash, &peer, None).await.unwrap(); + + // Remove the peer + let current_cutoff = peer.updated + DurationSinceUnixEpoch::from_secs(1); + swarms.remove_inactive_peers(current_cutoff).await.unwrap(); + + // Remove peerless torrents + + let tracker_policy = torrust_tracker_configuration::TrackerPolicy { + remove_peerless_torrents: true, + ..Default::default() + }; + + swarms.remove_peerless_torrents(&tracker_policy).await.unwrap(); } } } From b13797e768ea79fd071a47dd6cbb710f11a22a21 Mon Sep 17 00:00:00 2001 From: Jose Celano Date: Wed, 14 May 2025 19:33:26 +0100 Subject: [PATCH 614/802] test: [#1358] add tests for events in torrent-repository pkg --- packages/primitives/src/peer.rs | 46 ++- packages/torrent-repository/src/event.rs | 26 ++ .../src/statistics/event/handler.rs | 336 ++++++++++++++++++ 3 files changed, 406 insertions(+), 2 deletions(-) diff --git a/packages/primitives/src/peer.rs b/packages/primitives/src/peer.rs index cd4531b09..20ddd3074 100644 --- a/packages/primitives/src/peer.rs +++ b/packages/primitives/src/peer.rs @@ -22,12 +22,13 @@ //! }; //! ``` +use std::fmt; use std::net::{IpAddr, SocketAddr}; use std::ops::{Deref, DerefMut}; +use std::str::FromStr; use std::sync::Arc; use aquatic_udp_protocol::{AnnounceEvent, NumberOfBytes, PeerId}; -use derive_more::Display; use serde::Serialize; use zerocopy::FromBytes as _; @@ -35,7 +36,7 @@ use crate::DurationSinceUnixEpoch; pub type PeerAnnouncement = Peer; -#[derive(Debug, Display, Serialize, Copy, Clone, PartialEq, Eq, Hash)] +#[derive(Debug, Serialize, Copy, Clone, PartialEq, Eq, Hash)] #[serde(rename_all_fields = "lowercase")] pub enum PeerRole { Seeder, @@ -53,6 +54,39 @@ impl PeerRole { } } +impl fmt::Display for PeerRole { + fn fmt(&self, f: &mut std::fmt::Formatter<'_>) -> std::fmt::Result { + match self { + PeerRole::Seeder => write!(f, "seeder"), + PeerRole::Leecher => write!(f, "leecher"), + } + } +} + +impl FromStr for PeerRole { + type Err = ParsePeerRoleError; + + fn from_str(s: &str) -> Result { + match s.to_lowercase().as_str() { + "seeder" => Ok(PeerRole::Seeder), + "leecher" => Ok(PeerRole::Leecher), + _ => Err(ParsePeerRoleError::InvalidPeerRole { + location: Location::caller(), + raw_param: s.to_string(), + }), + } + } +} + +#[derive(Error, Debug)] +pub enum ParsePeerRoleError { + #[error("invalid param {raw_param} in {location}")] + InvalidPeerRole { + location: &'static Location<'static>, + raw_param: String, + }, +} + /// Peer struct used by the core `Tracker`. /// /// A sample peer: @@ -264,6 +298,14 @@ impl Peer { ..self } } + + #[must_use] + pub fn into_seeder(self) -> Self { + Self { + left: NumberOfBytes::new(0), + ..self + } + } } use std::panic::Location; diff --git a/packages/torrent-repository/src/event.rs b/packages/torrent-repository/src/event.rs index da086f89e..65a65ce8c 100644 --- a/packages/torrent-repository/src/event.rs +++ b/packages/torrent-repository/src/event.rs @@ -83,3 +83,29 @@ pub mod bus { pub type EventBus = torrust_tracker_events::bus::EventBus; } + +#[cfg(test)] +pub mod test { + + use torrust_tracker_primitives::peer::Peer; + + use super::Event; + use crate::tests::sample_info_hash; + + #[test] + fn events_should_be_comparable() { + let info_hash = sample_info_hash(); + + let event1 = Event::TorrentAdded { + info_hash, + announcement: Peer::default(), + }; + + let event2 = Event::TorrentRemoved { info_hash }; + + let event1_clone = event1.clone(); + + assert!(event1 == event1_clone); + assert!(event1 != event2); + } +} diff --git a/packages/torrent-repository/src/statistics/event/handler.rs b/packages/torrent-repository/src/statistics/event/handler.rs index 2fd7271cc..2b61839b8 100644 --- a/packages/torrent-repository/src/statistics/event/handler.rs +++ b/packages/torrent-repository/src/statistics/event/handler.rs @@ -13,6 +13,7 @@ use crate::statistics::{ pub async fn handle_event(event: Event, stats_repository: &Arc, now: DurationSinceUnixEpoch) { match event { + // Torrent events Event::TorrentAdded { info_hash, .. } => { tracing::debug!(info_hash = ?info_hash, "Torrent added",); @@ -27,6 +28,8 @@ pub async fn handle_event(event: Event, stats_repository: &Arc, now: .decrement_gauge(&metric_name!(TORRENT_REPOSITORY_TORRENTS_TOTAL), &LabelSet::default(), now) .await; } + + // Peer events Event::PeerAdded { info_hash, peer } => { tracing::debug!(info_hash = ?info_hash, peer = ?peer, "Peer added", ); @@ -96,3 +99,336 @@ fn label_set_for_peer(peer: &Peer) -> LabelSet { (label_name!("peer_role"), LabelValue::new("leecher")).into() } } + +#[cfg(test)] +mod tests { + use std::sync::Arc; + + use aquatic_udp_protocol::NumberOfBytes; + use torrust_tracker_metrics::label::LabelSet; + use torrust_tracker_metrics::metric::MetricName; + use torrust_tracker_primitives::peer::{Peer, PeerRole}; + + use crate::statistics::repository::Repository; + use crate::tests::{leecher, seeder}; + + fn make_peer(role: PeerRole) -> Peer { + match role { + PeerRole::Seeder => seeder(), + PeerRole::Leecher => leecher(), + } + } + + // It returns a peer with the opposite role of the given peer. + fn make_opposite_role_peer(peer: &Peer) -> Peer { + let mut opposite_role_peer = *peer; + + match peer.role() { + PeerRole::Seeder => { + opposite_role_peer.left = NumberOfBytes::new(1); + } + PeerRole::Leecher => { + opposite_role_peer.left = NumberOfBytes::new(0); + } + } + + opposite_role_peer + } + + async fn expect_counter_metric_to_be( + stats_repository: &Arc, + metric_name: &MetricName, + label_set: &LabelSet, + expected_value: u64, + ) { + let value = get_counter_metric(stats_repository, metric_name, label_set).await; + assert_eq!(value.to_string(), expected_value.to_string()); + } + + async fn get_counter_metric(stats_repository: &Arc, metric_name: &MetricName, label_set: &LabelSet) -> u64 { + stats_repository + .get_metrics() + .await + .metric_collection + .get_counter_value(metric_name, label_set) + .unwrap_or_else(|| panic!("Failed to get counter value for metric name '{metric_name}' and label set '{label_set}'")) + .value() + } + + async fn expect_gauge_metric_to_be( + stats_repository: &Arc, + metric_name: &MetricName, + label_set: &LabelSet, + expected_value: f64, + ) { + let value = get_gauge_metric(stats_repository, metric_name, label_set).await; + assert_eq!(value.to_string(), expected_value.to_string()); + } + + async fn get_gauge_metric(stats_repository: &Arc, metric_name: &MetricName, label_set: &LabelSet) -> f64 { + stats_repository + .get_metrics() + .await + .metric_collection + .get_gauge_value(metric_name, label_set) + .unwrap_or_else(|| panic!("Failed to get gauge value for metric name '{metric_name}' and label set '{label_set}'")) + .value() + } + + mod for_torrent_metrics { + + use std::sync::Arc; + + use torrust_tracker_clock::clock::stopped::Stopped; + use torrust_tracker_clock::clock::{self, Time}; + use torrust_tracker_metrics::label::LabelSet; + use torrust_tracker_metrics::metric_name; + + use crate::event::Event; + use crate::statistics::event::handler::handle_event; + use crate::statistics::event::handler::tests::expect_gauge_metric_to_be; + use crate::statistics::repository::Repository; + use crate::statistics::TORRENT_REPOSITORY_TORRENTS_TOTAL; + use crate::tests::{sample_info_hash, sample_peer}; + use crate::CurrentClock; + + #[tokio::test] + async fn it_should_increment_the_number_of_torrents_when_a_torrent_added_event_is_received() { + clock::Stopped::local_set_to_unix_epoch(); + + let stats_repository = Arc::new(Repository::new()); + + handle_event( + Event::TorrentAdded { + info_hash: sample_info_hash(), + announcement: sample_peer(), + }, + &stats_repository, + CurrentClock::now(), + ) + .await; + + expect_gauge_metric_to_be( + &stats_repository, + &metric_name!(TORRENT_REPOSITORY_TORRENTS_TOTAL), + &LabelSet::default(), + 1.0, + ) + .await; + } + + #[tokio::test] + async fn it_should_decrement_the_number_of_torrents_when_a_torrent_removed_event_is_received() { + clock::Stopped::local_set_to_unix_epoch(); + + let stats_repository = Arc::new(Repository::new()); + let metric_name = metric_name!(TORRENT_REPOSITORY_TORRENTS_TOTAL); + let label_set = LabelSet::default(); + + // Increment the gauge first to simulate a torrent being added. + stats_repository + .increment_gauge(&metric_name, &label_set, CurrentClock::now()) + .await + .unwrap(); + + handle_event( + Event::TorrentRemoved { + info_hash: sample_info_hash(), + }, + &stats_repository, + CurrentClock::now(), + ) + .await; + + expect_gauge_metric_to_be(&stats_repository, &metric_name, &label_set, 0.0).await; + } + } + + mod for_peer_metrics { + + mod peer_connections_total { + + use std::sync::Arc; + + use rstest::rstest; + use torrust_tracker_clock::clock::stopped::Stopped; + use torrust_tracker_clock::clock::{self, Time}; + use torrust_tracker_metrics::label::LabelValue; + use torrust_tracker_metrics::{label_name, metric_name}; + use torrust_tracker_primitives::peer::PeerRole; + + use crate::event::Event; + use crate::statistics::event::handler::handle_event; + use crate::statistics::event::handler::tests::{ + expect_gauge_metric_to_be, get_gauge_metric, make_opposite_role_peer, make_peer, + }; + use crate::statistics::repository::Repository; + use crate::statistics::TORRENT_REPOSITORY_PEER_CONNECTIONS_TOTAL; + use crate::tests::sample_info_hash; + use crate::CurrentClock; + + #[rstest] + #[case("seeder")] + #[case("leecher")] + #[tokio::test] + async fn it_should_increment_the_number_of_peer_connections_when_a_peer_added_event_is_received( + #[case] role: PeerRole, + ) { + clock::Stopped::local_set_to_unix_epoch(); + + let peer = make_peer(role); + + let stats_repository = Arc::new(Repository::new()); + let metric_name = metric_name!(TORRENT_REPOSITORY_PEER_CONNECTIONS_TOTAL); + let label_set = (label_name!("peer_role"), LabelValue::new(&role.to_string())).into(); + + handle_event( + Event::PeerAdded { + info_hash: sample_info_hash(), + peer, + }, + &stats_repository, + CurrentClock::now(), + ) + .await; + + expect_gauge_metric_to_be(&stats_repository, &metric_name, &label_set, 1.0).await; + } + + #[rstest] + #[case("seeder")] + #[case("leecher")] + #[tokio::test] + async fn it_should_decrement_the_number_of_peer_connections_when_a_peer_removed_event_is_received( + #[case] role: PeerRole, + ) { + clock::Stopped::local_set_to_unix_epoch(); + + let peer = make_peer(role); + + let stats_repository = Arc::new(Repository::new()); + + let metric_name = metric_name!(TORRENT_REPOSITORY_PEER_CONNECTIONS_TOTAL); + let label_set = (label_name!("peer_role"), LabelValue::new(&role.to_string())).into(); + + // Increment the gauge first to simulate a peer being added. + stats_repository + .increment_gauge(&metric_name, &label_set, CurrentClock::now()) + .await + .unwrap(); + + handle_event( + Event::PeerRemoved { + info_hash: sample_info_hash(), + peer, + }, + &stats_repository, + CurrentClock::now(), + ) + .await; + + expect_gauge_metric_to_be(&stats_repository, &metric_name, &label_set, 0.0).await; + } + + #[rstest] + #[case("seeder")] + #[case("leecher")] + #[tokio::test] + async fn it_should_adjust_the_number_of_seeders_and_leechers_when_a_peer_updated_event_is_received_and_the_peer_changed_its_role( + #[case] old_role: PeerRole, + ) { + clock::Stopped::local_set_to_unix_epoch(); + + let stats_repository = Arc::new(Repository::new()); + + let old_peer = make_peer(old_role); + let new_peer = make_opposite_role_peer(&old_peer); + + let metric_name = metric_name!(TORRENT_REPOSITORY_PEER_CONNECTIONS_TOTAL); + let old_role_label_set = (label_name!("peer_role"), LabelValue::new(&old_peer.role().to_string())).into(); + let new_role_label_set = (label_name!("peer_role"), LabelValue::new(&new_peer.role().to_string())).into(); + + // Increment the gauge first by simulating a peer was added. + handle_event( + Event::PeerAdded { + info_hash: sample_info_hash(), + peer: old_peer, + }, + &stats_repository, + CurrentClock::now(), + ) + .await; + + let old_role_total = get_gauge_metric(&stats_repository, &metric_name, &old_role_label_set).await; + let new_role_total = 0.0; + + // The peer's role has changed, so we need to increment the new + // role and decrement the old one. + handle_event( + Event::PeerUpdated { + info_hash: sample_info_hash(), + old_peer, + new_peer, + }, + &stats_repository, + CurrentClock::now(), + ) + .await; + + // The peer's role has changed, so the new role has incremented. + expect_gauge_metric_to_be(&stats_repository, &metric_name, &new_role_label_set, new_role_total + 1.0).await; + + // And the old role has decremented. + expect_gauge_metric_to_be(&stats_repository, &metric_name, &old_role_label_set, old_role_total - 1.0).await; + } + } + + mod torrent_downloads_total { + + use std::sync::Arc; + + use rstest::rstest; + use torrust_tracker_clock::clock::stopped::Stopped; + use torrust_tracker_clock::clock::{self, Time}; + use torrust_tracker_metrics::label::LabelValue; + use torrust_tracker_metrics::{label_name, metric_name}; + use torrust_tracker_primitives::peer::PeerRole; + + use crate::event::Event; + use crate::statistics::event::handler::handle_event; + use crate::statistics::event::handler::tests::{expect_counter_metric_to_be, make_peer}; + use crate::statistics::repository::Repository; + use crate::statistics::TORRENT_REPOSITORY_TORRENTS_DOWNLOADS_TOTAL; + use crate::tests::sample_info_hash; + use crate::CurrentClock; + + #[rstest] + #[case("seeder")] + #[case("leecher")] + #[tokio::test] + async fn it_should_increment_the_number_of_downloads_when_a_peer_downloaded_event_is_received( + #[case] role: PeerRole, + ) { + clock::Stopped::local_set_to_unix_epoch(); + + let peer = make_peer(role); + + let stats_repository = Arc::new(Repository::new()); + let metric_name = metric_name!(TORRENT_REPOSITORY_TORRENTS_DOWNLOADS_TOTAL); + let label_set = (label_name!("peer_role"), LabelValue::new(&role.to_string())).into(); + + handle_event( + Event::PeerDownloadCompleted { + info_hash: sample_info_hash(), + peer, + }, + &stats_repository, + CurrentClock::now(), + ) + .await; + + expect_counter_metric_to_be(&stats_repository, &metric_name, &label_set, 1).await; + } + } + } +} From 47d1eab5a7328b8a524d9bbcabc3b3bc4ddce6b5 Mon Sep 17 00:00:00 2001 From: Jose Celano Date: Thu, 15 May 2025 19:09:43 +0100 Subject: [PATCH 615/802] refactor: [#1358] Swarm tests to use new mock helpers --- packages/torrent-repository/src/swarm.rs | 111 ++++++++--------------- 1 file changed, 39 insertions(+), 72 deletions(-) diff --git a/packages/torrent-repository/src/swarm.rs b/packages/torrent-repository/src/swarm.rs index 160636906..3277cad8d 100644 --- a/packages/torrent-repository/src/swarm.rs +++ b/packages/torrent-repository/src/swarm.rs @@ -926,15 +926,13 @@ mod tests { mod triggering_events { - use std::future; use std::sync::Arc; use aquatic_udp_protocol::AnnounceEvent::Started; - use mockall::predicate::eq; use torrust_tracker_primitives::peer::fixture::PeerBuilder; use torrust_tracker_primitives::DurationSinceUnixEpoch; - use crate::event::sender::tests::MockEventSender; + use crate::event::sender::tests::{expect_event_sequence, MockEventSender}; use crate::event::Event; use crate::swarm::Swarm; use crate::tests::sample_info_hash; @@ -946,11 +944,7 @@ mod tests { let mut event_sender_mock = MockEventSender::new(); - event_sender_mock - .expect_send() - .with(eq(Event::PeerAdded { info_hash, peer })) - .times(1) - .returning(|_| Box::pin(future::ready(Some(Ok(1))))); + expect_event_sequence(&mut event_sender_mock, vec![Event::PeerAdded { info_hash, peer }]); let mut swarm = Swarm::new(&sample_info_hash(), 0, Some(Arc::new(event_sender_mock))); @@ -965,17 +959,10 @@ mod tests { let mut event_sender_mock = MockEventSender::new(); - event_sender_mock - .expect_send() - .with(eq(Event::PeerAdded { info_hash, peer })) - .times(1) - .returning(|_| Box::pin(future::ready(Some(Ok(1))))); - - event_sender_mock - .expect_send() - .with(eq(Event::PeerRemoved { info_hash, peer })) - .times(1) - .returning(|_| Box::pin(future::ready(Some(Ok(1))))); + expect_event_sequence( + &mut event_sender_mock, + vec![Event::PeerAdded { info_hash, peer }, Event::PeerRemoved { info_hash, peer }], + ); let mut swarm = Swarm::new(&info_hash, 0, Some(Arc::new(event_sender_mock))); @@ -993,17 +980,10 @@ mod tests { let mut event_sender_mock = MockEventSender::new(); - event_sender_mock - .expect_send() - .with(eq(Event::PeerAdded { info_hash, peer })) - .times(1) - .returning(|_| Box::pin(future::ready(Some(Ok(1))))); - - event_sender_mock - .expect_send() - .with(eq(Event::PeerRemoved { info_hash, peer })) - .times(1) - .returning(|_| Box::pin(future::ready(Some(Ok(1))))); + expect_event_sequence( + &mut event_sender_mock, + vec![Event::PeerAdded { info_hash, peer }, Event::PeerRemoved { info_hash, peer }], + ); let mut swarm = Swarm::new(&info_hash, 0, Some(Arc::new(event_sender_mock))); @@ -1024,21 +1004,17 @@ mod tests { let mut event_sender_mock = MockEventSender::new(); - event_sender_mock - .expect_send() - .with(eq(Event::PeerAdded { info_hash, peer })) - .times(1) - .returning(|_| Box::pin(future::ready(Some(Ok(1))))); - - event_sender_mock - .expect_send() - .with(eq(Event::PeerUpdated { - info_hash, - old_peer: peer, - new_peer: peer, - })) - .times(1) - .returning(|_| Box::pin(future::ready(Some(Ok(1))))); + expect_event_sequence( + &mut event_sender_mock, + vec![ + Event::PeerAdded { info_hash, peer }, + Event::PeerUpdated { + info_hash, + old_peer: peer, + new_peer: peer, + }, + ], + ); let mut swarm = Swarm::new(&info_hash, 0, Some(Arc::new(event_sender_mock))); @@ -1058,33 +1034,24 @@ mod tests { let mut event_sender_mock = MockEventSender::new(); - event_sender_mock - .expect_send() - .with(eq(Event::PeerAdded { - info_hash, - peer: started_peer, - })) - .times(1) - .returning(|_| Box::pin(future::ready(Some(Ok(1))))); - - event_sender_mock - .expect_send() - .with(eq(Event::PeerUpdated { - info_hash, - old_peer: started_peer, - new_peer: completed_peer, - })) - .times(1) - .returning(|_| Box::pin(future::ready(Some(Ok(1))))); - - event_sender_mock - .expect_send() - .with(eq(Event::PeerDownloadCompleted { - info_hash, - peer: completed_peer, - })) - .times(1) - .returning(|_| Box::pin(future::ready(Some(Ok(1))))); + expect_event_sequence( + &mut event_sender_mock, + vec![ + Event::PeerAdded { + info_hash, + peer: started_peer, + }, + Event::PeerUpdated { + info_hash, + old_peer: started_peer, + new_peer: completed_peer, + }, + Event::PeerDownloadCompleted { + info_hash, + peer: completed_peer, + }, + ], + ); let mut swarm = Swarm::new(&info_hash, 0, Some(Arc::new(event_sender_mock))); From b3b0b71396bebb0916a47f4833313b473260f59d Mon Sep 17 00:00:00 2001 From: Jose Celano Date: Thu, 15 May 2025 21:51:44 +0100 Subject: [PATCH 616/802] refactor: [#1358] Swarm, cleaning upsert_peer method --- packages/primitives/src/peer.rs | 5 + packages/torrent-repository/src/swarm.rs | 111 +++++++++++++---------- 2 files changed, 67 insertions(+), 49 deletions(-) diff --git a/packages/primitives/src/peer.rs b/packages/primitives/src/peer.rs index 20ddd3074..57ca3909d 100644 --- a/packages/primitives/src/peer.rs +++ b/packages/primitives/src/peer.rs @@ -270,6 +270,11 @@ impl Peer { !self.is_seeder() } + #[must_use] + pub fn is_completed(&self) -> bool { + self.event == AnnounceEvent::Completed + } + #[must_use] pub fn role(&self) -> PeerRole { if self.is_seeder() { diff --git a/packages/torrent-repository/src/swarm.rs b/packages/torrent-repository/src/swarm.rs index 3277cad8d..d01f79fe8 100644 --- a/packages/torrent-repository/src/swarm.rs +++ b/packages/torrent-repository/src/swarm.rs @@ -73,21 +73,39 @@ impl Swarm { downloads_increased } - pub async fn upsert_peer( + async fn upsert_peer( &mut self, incoming_announce: Arc, downloads_increased: &mut bool, ) -> Option> { - let is_now_seeder = incoming_announce.is_seeder(); - let has_completed = incoming_announce.event == AnnounceEvent::Completed; let announcement = incoming_announce.clone(); - if let Some(old_announce) = self.peers.insert(incoming_announce.peer_addr, incoming_announce) { - // A peer has been updated in the swarm. + if let Some(previous_announce) = self.peers.insert(incoming_announce.peer_addr, incoming_announce) { + *downloads_increased = self.update_metadata(Some(&previous_announce), &announcement); - // Check if the peer has changed from leecher to seeder or vice versa. - if old_announce.is_seeder() != is_now_seeder { - if is_now_seeder { + self.trigger_peer_updated_event(&previous_announce, &announcement, *downloads_increased) + .await; + + Some(previous_announce) + } else { + *downloads_increased = self.update_metadata(None, &announcement); + + self.trigger_peer_added_event(&announcement).await; + + None + } + } + + fn update_metadata( + &mut self, + opt_previous_announce: Option<&Arc>, + new_announce: &Arc, + ) -> bool { + let mut downloads_increased = false; + + if let Some(previous_announce) = opt_previous_announce { + if previous_announce.role() != new_announce.role() { + if new_announce.is_seeder() { self.metadata.complete += 1; self.metadata.incomplete -= 1; } else { @@ -96,58 +114,53 @@ impl Swarm { } } - // Check if the peer has completed downloading the torrent. - if has_completed && old_announce.event != AnnounceEvent::Completed { + if new_announce.is_completed() && !previous_announce.is_completed() { self.metadata.downloaded += 1; - *downloads_increased = true; + downloads_increased = true; } - - if let Some(event_sender) = self.event_sender.as_deref() { - event_sender - .send(Event::PeerUpdated { - info_hash: self.info_hash, - old_peer: *old_announce, - new_peer: *announcement, - }) - .await; - - if *downloads_increased { - event_sender - .send(Event::PeerDownloadCompleted { - info_hash: self.info_hash, - peer: *announcement, - }) - .await; - } - } - - Some(old_announce) + } else if new_announce.is_seeder() { + self.metadata.complete += 1; } else { - // A new peer has been added to the swarm. - - // Check if the peer is a seeder or a leecher. - if is_now_seeder { - self.metadata.complete += 1; - } else { - self.metadata.incomplete += 1; - } + self.metadata.incomplete += 1; + } - // Check if the peer has completed downloading the torrent. - if has_completed { - // Don't increment `downloaded` here: we only count transitions - // from a known peer - } + downloads_increased + } - if let Some(event_sender) = self.event_sender.as_deref() { + async fn trigger_peer_updated_event( + &self, + old_announce: &Arc, + new_announce: &Arc, + downloads_increased: bool, + ) { + if let Some(event_sender) = self.event_sender.as_deref() { + event_sender + .send(Event::PeerUpdated { + info_hash: self.info_hash, + old_peer: *old_announce.clone(), + new_peer: *new_announce.clone(), + }) + .await; + + if downloads_increased { event_sender - .send(Event::PeerAdded { + .send(Event::PeerDownloadCompleted { info_hash: self.info_hash, - peer: *announcement, + peer: *new_announce.clone(), }) .await; } + } + } - None + async fn trigger_peer_added_event(&self, announcement: &Arc) { + if let Some(event_sender) = self.event_sender.as_deref() { + event_sender + .send(Event::PeerAdded { + info_hash: self.info_hash, + peer: *announcement.clone(), + }) + .await; } } From d154b2aa045063c807deb0a6a88fad55297e46b4 Mon Sep 17 00:00:00 2001 From: Jose Celano Date: Fri, 16 May 2025 08:51:09 +0100 Subject: [PATCH 617/802] refactor: [#1358] clean Swarm type --- packages/torrent-repository/src/swarm.rs | 308 +++++++++++------------ 1 file changed, 148 insertions(+), 160 deletions(-) diff --git a/packages/torrent-repository/src/swarm.rs b/packages/torrent-repository/src/swarm.rs index d01f79fe8..8cf2982e6 100644 --- a/packages/torrent-repository/src/swarm.rs +++ b/packages/torrent-repository/src/swarm.rs @@ -67,169 +67,20 @@ impl Swarm { AnnounceEvent::Started | AnnounceEvent::None | AnnounceEvent::Completed => { self.upsert_peer(Arc::new(*incoming_announce), &mut downloads_increased).await } - AnnounceEvent::Stopped => self.remove(incoming_announce).await, + AnnounceEvent::Stopped => self.remove_peer(&incoming_announce.peer_addr).await, }; downloads_increased } - async fn upsert_peer( - &mut self, - incoming_announce: Arc, - downloads_increased: &mut bool, - ) -> Option> { - let announcement = incoming_announce.clone(); - - if let Some(previous_announce) = self.peers.insert(incoming_announce.peer_addr, incoming_announce) { - *downloads_increased = self.update_metadata(Some(&previous_announce), &announcement); - - self.trigger_peer_updated_event(&previous_announce, &announcement, *downloads_increased) - .await; - - Some(previous_announce) - } else { - *downloads_increased = self.update_metadata(None, &announcement); - - self.trigger_peer_added_event(&announcement).await; - - None - } - } - - fn update_metadata( - &mut self, - opt_previous_announce: Option<&Arc>, - new_announce: &Arc, - ) -> bool { - let mut downloads_increased = false; - - if let Some(previous_announce) = opt_previous_announce { - if previous_announce.role() != new_announce.role() { - if new_announce.is_seeder() { - self.metadata.complete += 1; - self.metadata.incomplete -= 1; - } else { - self.metadata.complete -= 1; - self.metadata.incomplete += 1; - } - } - - if new_announce.is_completed() && !previous_announce.is_completed() { - self.metadata.downloaded += 1; - downloads_increased = true; - } - } else if new_announce.is_seeder() { - self.metadata.complete += 1; - } else { - self.metadata.incomplete += 1; - } - - downloads_increased - } - - async fn trigger_peer_updated_event( - &self, - old_announce: &Arc, - new_announce: &Arc, - downloads_increased: bool, - ) { - if let Some(event_sender) = self.event_sender.as_deref() { - event_sender - .send(Event::PeerUpdated { - info_hash: self.info_hash, - old_peer: *old_announce.clone(), - new_peer: *new_announce.clone(), - }) - .await; - - if downloads_increased { - event_sender - .send(Event::PeerDownloadCompleted { - info_hash: self.info_hash, - peer: *new_announce.clone(), - }) - .await; - } - } - } - - async fn trigger_peer_added_event(&self, announcement: &Arc) { - if let Some(event_sender) = self.event_sender.as_deref() { - event_sender - .send(Event::PeerAdded { - info_hash: self.info_hash, - peer: *announcement.clone(), - }) - .await; - } - } - - pub async fn remove(&mut self, peer_to_remove: &Peer) -> Option> { - match self.peers.remove(&peer_to_remove.peer_addr) { - Some(old_peer) => { - // A peer has been removed from the swarm. - - // Check if the peer was a seeder or a leecher. - if old_peer.is_seeder() { - self.metadata.complete -= 1; - } else { - self.metadata.incomplete -= 1; - } - - if let Some(event_sender) = self.event_sender.as_deref() { - event_sender - .send(Event::PeerRemoved { - info_hash: self.info_hash, - peer: *old_peer.clone(), - }) - .await; - } - - Some(old_peer) - } - None => None, - } - } - pub async fn remove_inactive(&mut self, current_cutoff: DurationSinceUnixEpoch) -> usize { - let mut number_of_peers_removed = 0; - let mut removed_peers = Vec::new(); - - self.peers.retain(|_key, peer| { - let is_active = peer::ReadInfo::get_updated(peer) > current_cutoff; - - if !is_active { - // Update the metadata when removing a peer. - if peer.is_seeder() { - self.metadata.complete -= 1; - } else { - self.metadata.incomplete -= 1; - } - - number_of_peers_removed += 1; - - if let Some(_event_sender) = self.event_sender.as_deref() { - // Events can not be trigger here because retain does not allow - // async closures. - removed_peers.push(*peer.clone()); - } - } + let peers_to_remove = self.inactive_peers(current_cutoff); - is_active - }); - - if let Some(event_sender) = self.event_sender.as_deref() { - for peer in &removed_peers { - event_sender - .send(Event::PeerRemoved { - info_hash: self.info_hash, - peer: *peer, - }) - .await; - } + for peer_addr in &peers_to_remove { + self.remove_peer(peer_addr).await; } - number_of_peers_removed + peers_to_remove.len() } #[must_use] @@ -316,6 +167,57 @@ impl Swarm { !self.should_be_removed(policy) } + async fn upsert_peer( + &mut self, + incoming_announce: Arc, + downloads_increased: &mut bool, + ) -> Option> { + let announcement = incoming_announce.clone(); + + if let Some(previous_announce) = self.peers.insert(incoming_announce.peer_addr, incoming_announce) { + *downloads_increased = self.update_metadata_on_update(&previous_announce, &announcement); + + self.trigger_peer_updated_event(&previous_announce, &announcement).await; + + if *downloads_increased { + self.trigger_peer_download_completed_event(&announcement).await; + } + + Some(previous_announce) + } else { + *downloads_increased = false; + + self.update_metadata_on_insert(&announcement); + + self.trigger_peer_added_event(&announcement).await; + + None + } + } + + async fn remove_peer(&mut self, peer_addr: &SocketAddr) -> Option> { + if let Some(old_peer) = self.peers.remove(peer_addr) { + self.update_metadata_on_removal(&old_peer); + + self.trigger_peer_removed_event(&old_peer).await; + + Some(old_peer) + } else { + None + } + } + + #[must_use] + fn inactive_peers(&self, current_cutoff: DurationSinceUnixEpoch) -> Vec { + self.peers + .iter() + .filter(|(_, peer)| peer::ReadInfo::get_updated(&**peer) <= current_cutoff) + .map(|(addr, _)| *addr) + .collect() + } + + /// Returns true if the swarm should be removed according to the retention + /// policy. fn should_be_removed(&self, policy: &TrackerPolicy) -> bool { // If the policy is to remove peerless torrents and the swarm is empty (no peers), (policy.remove_peerless_torrents && self.is_empty()) @@ -325,6 +227,92 @@ impl Swarm { // See https://github.com/torrust/torrust-tracker/issues/1502) && !(policy.persistent_torrent_completed_stat && self.metadata().downloaded > 0) } + + fn update_metadata_on_insert(&mut self, added_peer: &Arc) { + if added_peer.is_seeder() { + self.metadata.complete += 1; + } else { + self.metadata.incomplete += 1; + } + } + + fn update_metadata_on_removal(&mut self, removed_peer: &Arc) { + if removed_peer.is_seeder() { + self.metadata.complete -= 1; + } else { + self.metadata.incomplete -= 1; + } + } + + fn update_metadata_on_update( + &mut self, + previous_announce: &Arc, + new_announce: &Arc, + ) -> bool { + let mut downloads_increased = false; + + if previous_announce.role() != new_announce.role() { + if new_announce.is_seeder() { + self.metadata.complete += 1; + self.metadata.incomplete -= 1; + } else { + self.metadata.complete -= 1; + self.metadata.incomplete += 1; + } + } + + if new_announce.is_completed() && !previous_announce.is_completed() { + self.metadata.downloaded += 1; + downloads_increased = true; + } + + downloads_increased + } + + async fn trigger_peer_added_event(&self, announcement: &Arc) { + if let Some(event_sender) = self.event_sender.as_deref() { + event_sender + .send(Event::PeerAdded { + info_hash: self.info_hash, + peer: *announcement.clone(), + }) + .await; + } + } + + async fn trigger_peer_removed_event(&self, old_peer: &Arc) { + if let Some(event_sender) = self.event_sender.as_deref() { + event_sender + .send(Event::PeerRemoved { + info_hash: self.info_hash, + peer: *old_peer.clone(), + }) + .await; + } + } + + async fn trigger_peer_updated_event(&self, old_announce: &Arc, new_announce: &Arc) { + if let Some(event_sender) = self.event_sender.as_deref() { + event_sender + .send(Event::PeerUpdated { + info_hash: self.info_hash, + old_peer: *old_announce.clone(), + new_peer: *new_announce.clone(), + }) + .await; + } + } + + async fn trigger_peer_download_completed_event(&self, new_announce: &Arc) { + if let Some(event_sender) = self.event_sender.as_deref() { + event_sender + .send(Event::PeerDownloadCompleted { + info_hash: self.info_hash, + peer: *new_announce.clone(), + }) + .await; + } + } } #[cfg(test)] @@ -435,7 +423,7 @@ mod tests { swarm.upsert_peer(peer.into(), &mut downloads_increased).await; - swarm.remove(&peer).await; + swarm.remove_peer(&peer.peer_addr).await; assert!(swarm.is_empty()); } @@ -449,7 +437,7 @@ mod tests { swarm.upsert_peer(peer.into(), &mut downloads_increased).await; - let old = swarm.remove(&peer).await; + let old = swarm.remove_peer(&peer.peer_addr).await; assert_eq!(old, Some(Arc::new(peer))); assert_eq!(swarm.get(&peer.peer_addr), None); @@ -461,7 +449,7 @@ mod tests { let peer = PeerBuilder::default().build(); - assert_eq!(swarm.remove(&peer).await, None); + assert_eq!(swarm.remove_peer(&peer.peer_addr).await, None); } #[tokio::test] @@ -787,7 +775,7 @@ mod tests { let leechers = swarm.metadata().leechers(); - swarm.remove(&leecher).await; + swarm.remove_peer(&leecher.peer_addr).await; assert_eq!(swarm.metadata().leechers(), leechers - 1); } @@ -803,7 +791,7 @@ mod tests { let seeders = swarm.metadata().seeders(); - swarm.remove(&seeder).await; + swarm.remove_peer(&seeder.peer_addr).await; assert_eq!(swarm.metadata().seeders(), seeders - 1); } @@ -983,7 +971,7 @@ mod tests { let mut downloads_increased = false; swarm.upsert_peer(peer.into(), &mut downloads_increased).await; - swarm.remove(&peer).await; + swarm.remove_peer(&peer.peer_addr).await; } #[tokio::test] From 52ac171063270edfb9b63549ae848157acd258da Mon Sep 17 00:00:00 2001 From: Jose Celano Date: Fri, 16 May 2025 10:12:18 +0100 Subject: [PATCH 618/802] chore(deps): update dependencies ```output cargo update Updating crates.io index Locking 32 packages to latest compatible versions Updating bitflags v2.9.0 -> v2.9.1 Updating cc v1.2.21 -> v1.2.22 Updating clap v4.5.37 -> v4.5.38 Updating clap_builder v4.5.37 -> v4.5.38 Updating errno v0.3.11 -> v0.3.12 Updating getrandom v0.3.2 -> v0.3.3 Updating icu_collections v1.5.0 -> v2.0.0 Adding icu_locale_core v2.0.0 Removing icu_locid v1.5.0 Removing icu_locid_transform v1.5.0 Removing icu_locid_transform_data v1.5.1 Updating icu_normalizer v1.5.0 -> v2.0.0 Updating icu_normalizer_data v1.5.1 -> v2.0.0 Updating icu_properties v1.5.1 -> v2.0.0 Updating icu_properties_data v1.5.1 -> v2.0.0 Updating icu_provider v1.5.0 -> v2.0.0 Removing icu_provider_macros v1.5.0 Updating idna_adapter v1.2.0 -> v1.2.1 Updating libloading v0.8.6 -> v0.8.7 Updating litemap v0.7.5 -> v0.8.0 Updating multimap v0.10.0 -> v0.10.1 Updating owo-colors v4.2.0 -> v4.2.1 Adding potential_utf v0.1.2 Updating rustls-webpki v0.103.2 -> v0.103.3 Updating tempfile v3.19.1 -> v3.20.0 Updating tinystr v0.7.6 -> v0.8.1 Updating tower-http v0.6.2 -> v0.6.4 Removing utf16_iter v1.0.5 Updating windows-core v0.61.0 -> v0.61.1 Updating windows-result v0.3.2 -> v0.3.3 Updating windows-strings v0.4.0 -> v0.4.1 Removing write16 v1.0.0 Updating writeable v0.5.5 -> v0.6.1 Updating yoke v0.7.5 -> v0.8.0 Updating yoke-derive v0.7.5 -> v0.8.0 Adding zerotrie v0.2.2 Updating zerovec v0.10.4 -> v0.11.2 Updating zerovec-derive v0.10.3 -> v0.11.1 ``` --- Cargo.lock | 236 ++++++++++++++++++++++++----------------------------- 1 file changed, 106 insertions(+), 130 deletions(-) diff --git a/Cargo.lock b/Cargo.lock index 75a272292..ab898e327 100644 --- a/Cargo.lock +++ b/Cargo.lock @@ -541,7 +541,7 @@ version = "0.71.1" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "5f58bf3d7db68cfbac37cfc485a8d711e87e064c3d0fe0435b92f7a407f9d6b3" dependencies = [ - "bitflags 2.9.0", + "bitflags 2.9.1", "cexpr", "clang-sys", "itertools 0.13.0", @@ -567,9 +567,9 @@ checksum = "bef38d45163c2f1dde094a7dfd33ccf595c92905c8f8f4fdc18d06fb1037718a" [[package]] name = "bitflags" -version = "2.9.0" +version = "2.9.1" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "5c8214115b7bf84099f1309324e63141d4c5d7cc26862f97a0a857dbefe165bd" +checksum = "1b8e56985ec62d17e9c1001dc89c88ecd7dc08e47eba5ec7c29c7b5eeecde967" [[package]] name = "bittorrent-http-tracker-core" @@ -957,9 +957,9 @@ dependencies = [ [[package]] name = "cc" -version = "1.2.21" +version = "1.2.22" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "8691782945451c1c383942c4874dbe63814f61cb57ef773cda2972682b7bb3c0" +checksum = "32db95edf998450acc7881c932f94cd9b05c87b4b2599e8bab064753da4acfd1" dependencies = [ "jobserver", "libc", @@ -1050,9 +1050,9 @@ dependencies = [ [[package]] name = "clap" -version = "4.5.37" +version = "4.5.38" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "eccb054f56cbd38340b380d4a8e69ef1f02f1af43db2f0cc817a4774d80ae071" +checksum = "ed93b9805f8ba930df42c2590f05453d5ec36cbb85d018868a5b24d31f6ac000" dependencies = [ "clap_builder", "clap_derive", @@ -1060,9 +1060,9 @@ dependencies = [ [[package]] name = "clap_builder" -version = "4.5.37" +version = "4.5.38" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "efd9466fac8543255d3b1fcad4762c5e116ffe808c8a3043d4263cd4fd4862a2" +checksum = "379026ff283facf611b0ea629334361c4211d1b12ee01024eec1591133b04120" dependencies = [ "anstream", "anstyle", @@ -1457,9 +1457,9 @@ checksum = "877a4ace8713b0bcf2a4e7eec82529c029f1d0619886d18145fea96c3ffe5c0f" [[package]] name = "errno" -version = "0.3.11" +version = "0.3.12" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "976dd42dc7e85965fe702eb8164f21f450704bdde31faefd6471dba214cb594e" +checksum = "cea14ef9355e3beab063703aa9dab15afd25f0667c341310c1e5274bb1d0da18" dependencies = [ "libc", "windows-sys 0.59.0", @@ -1825,9 +1825,9 @@ dependencies = [ [[package]] name = "getrandom" -version = "0.3.2" +version = "0.3.3" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "73fea8450eea4bac3940448fb7ae50d91f034f941199fcd9d909a5a07aa455f0" +checksum = "26145e563e54f2cadc477553f1ec5ee650b00862f0a58bcd12cbdc5f0ea2d2f4" dependencies = [ "cfg-if", "libc", @@ -2138,21 +2138,22 @@ dependencies = [ [[package]] name = "icu_collections" -version = "1.5.0" +version = "2.0.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "db2fa452206ebee18c4b5c2274dbf1de17008e874b4dc4f0aea9d01ca79e4526" +checksum = "200072f5d0e3614556f94a9930d5dc3e0662a652823904c3a75dc3b0af7fee47" dependencies = [ "displaydoc", + "potential_utf", "yoke", "zerofrom", "zerovec", ] [[package]] -name = "icu_locid" -version = "1.5.0" +name = "icu_locale_core" +version = "2.0.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "13acbb8371917fc971be86fc8057c41a64b521c184808a698c02acc242dbf637" +checksum = "0cde2700ccaed3872079a65fb1a78f6c0a36c91570f28755dda67bc8f7d9f00a" dependencies = [ "displaydoc", "litemap", @@ -2161,31 +2162,11 @@ dependencies = [ "zerovec", ] -[[package]] -name = "icu_locid_transform" -version = "1.5.0" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "01d11ac35de8e40fdeda00d9e1e9d92525f3f9d887cdd7aa81d727596788b54e" -dependencies = [ - "displaydoc", - "icu_locid", - "icu_locid_transform_data", - "icu_provider", - "tinystr", - "zerovec", -] - -[[package]] -name = "icu_locid_transform_data" -version = "1.5.1" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "7515e6d781098bf9f7205ab3fc7e9709d34554ae0b21ddbcb5febfa4bc7df11d" - [[package]] name = "icu_normalizer" -version = "1.5.0" +version = "2.0.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "19ce3e0da2ec68599d193c93d088142efd7f9c5d6fc9b803774855747dc6a84f" +checksum = "436880e8e18df4d7bbc06d58432329d6458cc84531f7ac5f024e93deadb37979" dependencies = [ "displaydoc", "icu_collections", @@ -2193,67 +2174,54 @@ dependencies = [ "icu_properties", "icu_provider", "smallvec", - "utf16_iter", - "utf8_iter", - "write16", "zerovec", ] [[package]] name = "icu_normalizer_data" -version = "1.5.1" +version = "2.0.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "c5e8338228bdc8ab83303f16b797e177953730f601a96c25d10cb3ab0daa0cb7" +checksum = "00210d6893afc98edb752b664b8890f0ef174c8adbb8d0be9710fa66fbbf72d3" [[package]] name = "icu_properties" -version = "1.5.1" +version = "2.0.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "93d6020766cfc6302c15dbbc9c8778c37e62c14427cb7f6e601d849e092aeef5" +checksum = "2549ca8c7241c82f59c80ba2a6f415d931c5b58d24fb8412caa1a1f02c49139a" dependencies = [ "displaydoc", "icu_collections", - "icu_locid_transform", + "icu_locale_core", "icu_properties_data", "icu_provider", - "tinystr", + "potential_utf", + "zerotrie", "zerovec", ] [[package]] name = "icu_properties_data" -version = "1.5.1" +version = "2.0.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "85fb8799753b75aee8d2a21d7c14d9f38921b54b3dbda10f5a3c7a7b82dba5e2" +checksum = "8197e866e47b68f8f7d95249e172903bec06004b18b2937f1095d40a0c57de04" [[package]] name = "icu_provider" -version = "1.5.0" +version = "2.0.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "6ed421c8a8ef78d3e2dbc98a973be2f3770cb42b606e3ab18d6237c4dfde68d9" +checksum = "03c80da27b5f4187909049ee2d72f276f0d9f99a42c306bd0131ecfe04d8e5af" dependencies = [ "displaydoc", - "icu_locid", - "icu_provider_macros", + "icu_locale_core", "stable_deref_trait", "tinystr", "writeable", "yoke", "zerofrom", + "zerotrie", "zerovec", ] -[[package]] -name = "icu_provider_macros" -version = "1.5.0" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "1ec89e9337638ecdc08744df490b221a7399bf8d164eb52a665454e60e075ad6" -dependencies = [ - "proc-macro2", - "quote", - "syn 2.0.101", -] - [[package]] name = "ident_case" version = "1.0.1" @@ -2273,9 +2241,9 @@ dependencies = [ [[package]] name = "idna_adapter" -version = "1.2.0" +version = "1.2.1" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "daca1df1c957320b2cf139ac61e7bd64fed304c5040df000a745aa1de3b4ef71" +checksum = "3acae9609540aa318d1bc588455225fb2085b9ed0c4f6bd0d9d5bcd86f1a0344" dependencies = [ "icu_normalizer", "icu_properties", @@ -2386,7 +2354,7 @@ version = "0.1.33" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "38f262f097c174adebe41eb73d66ae9c06b2844fb0da69969647bbddd9b0538a" dependencies = [ - "getrandom 0.3.2", + "getrandom 0.3.3", "libc", ] @@ -2423,12 +2391,12 @@ checksum = "d750af042f7ef4f724306de029d18836c26c1765a54a6a3f094cbd23a7267ffa" [[package]] name = "libloading" -version = "0.8.6" +version = "0.8.7" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "fc2f4eb4bc735547cfed7c0a4922cbd04a4655978c09b54f1f7b228750664c34" +checksum = "6a793df0d7afeac54f95b471d3af7f0d4fb975699f972341a4b76988d49cdf0c" dependencies = [ "cfg-if", - "windows-targets 0.52.6", + "windows-targets 0.53.0", ] [[package]] @@ -2443,7 +2411,7 @@ version = "0.1.3" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "c0ff37bd590ca25063e35af745c343cb7a0271906fb7b37e4813e8f79f00268d" dependencies = [ - "bitflags 2.9.0", + "bitflags 2.9.1", "libc", "redox_syscall 0.5.12", ] @@ -2484,9 +2452,9 @@ checksum = "cd945864f07fe9f5371a27ad7b52a172b4b499999f1d97574c9fa68373937e12" [[package]] name = "litemap" -version = "0.7.5" +version = "0.8.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "23fb14cb19457329c82206317a5663005a4d404783dc74f4252769b0d5f42856" +checksum = "241eaef5fd12c88705a01fc1066c48c4b36e0dd4377dcdc7ec3942cea7a69956" [[package]] name = "local-ip-address" @@ -2630,9 +2598,9 @@ dependencies = [ [[package]] name = "multimap" -version = "0.10.0" +version = "0.10.1" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "defc4c55412d89136f966bbb339008b474350e5e6e78d2714439c386b3137a03" +checksum = "1d87ecb2933e8aeadb3e3a02b828fed80a7528047e68b4f424523a0981a3a084" dependencies = [ "serde", ] @@ -2689,7 +2657,7 @@ dependencies = [ "base64 0.21.7", "bigdecimal", "bindgen", - "bitflags 2.9.0", + "bitflags 2.9.1", "bitvec", "btoi", "byteorder", @@ -2857,7 +2825,7 @@ version = "0.10.72" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "fedfea7d58a1f73118430a55da6a286e7b044961736ce96a16a17068ea25e5da" dependencies = [ - "bitflags 2.9.0", + "bitflags 2.9.1", "cfg-if", "foreign-types", "libc", @@ -2903,9 +2871,9 @@ checksum = "b15813163c1d831bf4a13c3610c05c0d03b39feb07f7e09fa234dac9b15aaf39" [[package]] name = "owo-colors" -version = "4.2.0" +version = "4.2.1" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "1036865bb9422d3300cf723f657c2851d0e9ab12567854b1f4eba3d77decf564" +checksum = "26995317201fa17f3656c36716aed4a7c81743a9634ac4c99c0eeda495db0cec" [[package]] name = "parking" @@ -3125,6 +3093,15 @@ dependencies = [ "portable-atomic", ] +[[package]] +name = "potential_utf" +version = "0.1.2" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "e5a7c30837279ca13e7c867e9e40053bc68740f988cb07f7ca6df43cc734b585" +dependencies = [ + "zerovec", +] + [[package]] name = "powerfmt" version = "0.2.0" @@ -3369,7 +3346,7 @@ version = "0.9.3" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "99d9a13982dcf210057a8a78572b2217b667c3beacbf3a0d8b454f6f82837d38" dependencies = [ - "getrandom 0.3.2", + "getrandom 0.3.3", ] [[package]] @@ -3407,7 +3384,7 @@ version = "0.5.12" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "928fca9cf2aa042393a8325b9ead81d2f0df4cb12e1e24cef072922ccd99c5af" dependencies = [ - "bitflags 2.9.0", + "bitflags 2.9.1", ] [[package]] @@ -3588,7 +3565,7 @@ version = "0.35.0" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "a22715a5d6deef63c637207afbe68d0c72c3f8d0022d7cf9714c442d6157606b" dependencies = [ - "bitflags 2.9.0", + "bitflags 2.9.1", "fallible-iterator", "fallible-streaming-iterator", "hashlink", @@ -3639,7 +3616,7 @@ version = "0.38.44" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "fdb5bc1ae2baa591800df16c9ca78619bf65c0488b41b96ccec5d11220d8c154" dependencies = [ - "bitflags 2.9.0", + "bitflags 2.9.1", "errno", "libc", "linux-raw-sys 0.4.15", @@ -3652,7 +3629,7 @@ version = "1.0.7" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "c71e83d6afe7ff64890ec6b71d6a69bb8a610ab78ce364b3352876bb4c801266" dependencies = [ - "bitflags 2.9.0", + "bitflags 2.9.1", "errno", "libc", "linux-raw-sys 0.9.4", @@ -3705,9 +3682,9 @@ dependencies = [ [[package]] name = "rustls-webpki" -version = "0.103.2" +version = "0.103.3" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "7149975849f1abb3832b246010ef62ccc80d3a76169517ada7188252b9cfb437" +checksum = "e4a72fe2bcf7a6ac6fd7d0b9e5cb68aeb7d4c0a0271730218b3e92d43b4eb435" dependencies = [ "ring", "rustls-pki-types", @@ -3777,7 +3754,7 @@ version = "2.11.1" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "897b2245f0b511c87893af39b033e5ca9cce68824c4d7e7630b5a1d339658d02" dependencies = [ - "bitflags 2.9.0", + "bitflags 2.9.1", "core-foundation 0.9.4", "core-foundation-sys", "libc", @@ -3790,7 +3767,7 @@ version = "3.2.0" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "271720403f46ca04f7ba6f55d438f8bd878d6b8ca0a1046e8228c4145bcbb316" dependencies = [ - "bitflags 2.9.0", + "bitflags 2.9.1", "core-foundation 0.10.0", "core-foundation-sys", "libc", @@ -4159,7 +4136,7 @@ version = "0.6.1" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "3c879d448e9d986b661742763247d3693ed13609438cf3d006f51f5368a5ba6b" dependencies = [ - "bitflags 2.9.0", + "bitflags 2.9.1", "core-foundation 0.9.4", "system-configuration-sys", ] @@ -4199,12 +4176,12 @@ dependencies = [ [[package]] name = "tempfile" -version = "3.19.1" +version = "3.20.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "7437ac7763b9b123ccf33c338a5cc1bac6f69b45a136c19bdd8a65e3916435bf" +checksum = "e8a64e3985349f2441a1a9ef0b853f869006c3855f2cda6862a94d26ebb9d6a1" dependencies = [ "fastrand", - "getrandom 0.3.2", + "getrandom 0.3.3", "once_cell", "rustix 1.0.7", "windows-sys 0.59.0", @@ -4357,9 +4334,9 @@ dependencies = [ [[package]] name = "tinystr" -version = "0.7.6" +version = "0.8.1" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "9117f5d4db391c1cf6927e7bea3db74b9a1c1add8f7eda9ffd5364f40f57b82f" +checksum = "5d4f6d1145dcb577acf783d4e601bc1d76a13337bb54e6233add580b07344c8b" dependencies = [ "displaydoc", "zerovec", @@ -4945,12 +4922,12 @@ dependencies = [ [[package]] name = "tower-http" -version = "0.6.2" +version = "0.6.4" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "403fa3b783d4b626a8ad51d766ab03cb6d2dbfc46b1c5d4448395e6628dc9697" +checksum = "0fdb0c213ca27a9f57ab69ddb290fd80d970922355b83ae380b395d3986b8a2e" dependencies = [ "async-compression", - "bitflags 2.9.0", + "bitflags 2.9.1", "bytes", "futures-core", "http", @@ -5127,12 +5104,6 @@ dependencies = [ "serde", ] -[[package]] -name = "utf16_iter" -version = "1.0.5" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "c8232dd3cdaed5356e0f716d285e4b40b932ac434100fe9b7e0e8e935b9e6246" - [[package]] name = "utf8_iter" version = "1.0.4" @@ -5151,7 +5122,7 @@ version = "1.16.0" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "458f7a779bf54acc9f347480ac654f68407d3aab21269a6e3c9f922acd9e2da9" dependencies = [ - "getrandom 0.3.2", + "getrandom 0.3.3", "rand 0.9.1", ] @@ -5327,15 +5298,15 @@ checksum = "712e227841d057c1ee1cd2fb22fa7e5a5461ae8e48fa2ca79ec42cfc1931183f" [[package]] name = "windows-core" -version = "0.61.0" +version = "0.61.1" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "4763c1de310c86d75a878046489e2e5ba02c649d185f21c67d4cf8a56d098980" +checksum = "46ec44dc15085cea82cf9c78f85a9114c463a369786585ad2882d1ff0b0acf40" dependencies = [ "windows-implement", "windows-interface", "windows-link", "windows-result", - "windows-strings 0.4.0", + "windows-strings 0.4.1", ] [[package]] @@ -5379,9 +5350,9 @@ dependencies = [ [[package]] name = "windows-result" -version = "0.3.2" +version = "0.3.3" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "c64fd11a4fd95df68efcfee5f44a294fe71b8bc6a91993e2791938abcc712252" +checksum = "4b895b5356fc36103d0f64dd1e94dfa7ac5633f1c9dd6e80fe9ec4adef69e09d" dependencies = [ "windows-link", ] @@ -5397,9 +5368,9 @@ dependencies = [ [[package]] name = "windows-strings" -version = "0.4.0" +version = "0.4.1" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "7a2ba9642430ee452d5a7aa78d72907ebe8cfda358e8cb7918a2050581322f97" +checksum = "2a7ab927b2637c19b3dbe0965e75d8f2d30bdd697a1516191cad2ec4df8fb28a" dependencies = [ "windows-link", ] @@ -5565,20 +5536,14 @@ version = "0.39.0" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "6f42320e61fe2cfd34354ecb597f86f413484a798ba44a8ca1165c58d42da6c1" dependencies = [ - "bitflags 2.9.0", + "bitflags 2.9.1", ] -[[package]] -name = "write16" -version = "1.0.0" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "d1890f4022759daae28ed4fe62859b1236caebfc61ede2f63ed4e695f3f6d936" - [[package]] name = "writeable" -version = "0.5.5" +version = "0.6.1" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "1e9df38ee2d2c3c5948ea468a8406ff0db0b29ae1ffde1bcf20ef305bcc95c51" +checksum = "ea2f10b9bb0928dfb1b42b65e1f9e36f7f54dbdf08457afefb38afcdec4fa2bb" [[package]] name = "wyz" @@ -5607,9 +5572,9 @@ checksum = "cfe53a6657fd280eaa890a3bc59152892ffa3e30101319d168b781ed6529b049" [[package]] name = "yoke" -version = "0.7.5" +version = "0.8.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "120e6aef9aa629e3d4f52dc8cc43a015c7724194c97dfaf45180d2daf2b77f40" +checksum = "5f41bb01b8226ef4bfd589436a297c53d118f65921786300e427be8d487695cc" dependencies = [ "serde", "stable_deref_trait", @@ -5619,9 +5584,9 @@ dependencies = [ [[package]] name = "yoke-derive" -version = "0.7.5" +version = "0.8.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "2380878cad4ac9aac1e2435f3eb4020e8374b5f13c296cb75b4620ff8e229154" +checksum = "38da3c9736e16c5d3c8c597a9aaa5d1fa565d0532ae05e27c24aa62fb32c0ab6" dependencies = [ "proc-macro2", "quote", @@ -5697,11 +5662,22 @@ version = "1.8.1" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "ced3678a2879b30306d323f4542626697a464a97c0a07c9aebf7ebca65cd4dde" +[[package]] +name = "zerotrie" +version = "0.2.2" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "36f0bbd478583f79edad978b407914f61b2972f5af6fa089686016be8f9af595" +dependencies = [ + "displaydoc", + "yoke", + "zerofrom", +] + [[package]] name = "zerovec" -version = "0.10.4" +version = "0.11.2" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "aa2b893d79df23bfb12d5461018d408ea19dfafe76c2c7ef6d4eba614f8ff079" +checksum = "4a05eb080e015ba39cc9e23bbe5e7fb04d5fb040350f99f34e338d5fdd294428" dependencies = [ "yoke", "zerofrom", @@ -5710,9 +5686,9 @@ dependencies = [ [[package]] name = "zerovec-derive" -version = "0.10.3" +version = "0.11.1" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "6eafa6dfb17584ea3e2bd6e76e0cc15ad7af12b09abdd1ca55961bed9b1063c6" +checksum = "5b96237efa0c878c64bd89c436f661be4e46b2f3eff1ebb976f7ef2321d2f58f" dependencies = [ "proc-macro2", "quote", From 8d3b948ec3218e09f5187674866b969b5ef73af3 Mon Sep 17 00:00:00 2001 From: Jose Celano Date: Fri, 16 May 2025 13:13:37 +0100 Subject: [PATCH 619/802] tests: [#1504] remove integration tests from torrent-repository pacakge All features are now covered by unit tests. --- .../torrent-repository/tests/common/mod.rs | 1 - .../tests/common/torrent_peer_builder.rs | 106 ---- .../torrent-repository/tests/integration.rs | 22 - .../torrent-repository/tests/swarm/mod.rs | 397 ------------- .../torrent-repository/tests/swarms/mod.rs | 524 ------------------ 5 files changed, 1050 deletions(-) delete mode 100644 packages/torrent-repository/tests/common/mod.rs delete mode 100644 packages/torrent-repository/tests/common/torrent_peer_builder.rs delete mode 100644 packages/torrent-repository/tests/integration.rs delete mode 100644 packages/torrent-repository/tests/swarm/mod.rs delete mode 100644 packages/torrent-repository/tests/swarms/mod.rs diff --git a/packages/torrent-repository/tests/common/mod.rs b/packages/torrent-repository/tests/common/mod.rs deleted file mode 100644 index c77ca2769..000000000 --- a/packages/torrent-repository/tests/common/mod.rs +++ /dev/null @@ -1 +0,0 @@ -pub mod torrent_peer_builder; diff --git a/packages/torrent-repository/tests/common/torrent_peer_builder.rs b/packages/torrent-repository/tests/common/torrent_peer_builder.rs deleted file mode 100644 index 0c065e670..000000000 --- a/packages/torrent-repository/tests/common/torrent_peer_builder.rs +++ /dev/null @@ -1,106 +0,0 @@ -use std::net::{IpAddr, Ipv4Addr, SocketAddr}; - -use aquatic_udp_protocol::{AnnounceEvent, NumberOfBytes, PeerId}; -use torrust_tracker_clock::clock::Time; -use torrust_tracker_primitives::{peer, DurationSinceUnixEpoch}; - -use crate::CurrentClock; - -#[derive(Debug, Default)] -struct TorrentPeerBuilder { - peer: peer::Peer, -} - -#[allow(dead_code)] -impl TorrentPeerBuilder { - #[must_use] - fn new() -> Self { - Self { - peer: peer::Peer { - updated: CurrentClock::now(), - ..Default::default() - }, - } - } - - #[must_use] - fn with_event_completed(mut self) -> Self { - self.peer.event = AnnounceEvent::Completed; - self - } - - #[must_use] - fn with_event_started(mut self) -> Self { - self.peer.event = AnnounceEvent::Started; - self - } - - #[must_use] - fn with_peer_address(mut self, peer_addr: SocketAddr) -> Self { - self.peer.peer_addr = peer_addr; - self - } - - #[must_use] - fn with_peer_id(mut self, peer_id: PeerId) -> Self { - self.peer.peer_id = peer_id; - self - } - - #[must_use] - fn with_number_of_bytes_left(mut self, left: i64) -> Self { - self.peer.left = NumberOfBytes::new(left); - self - } - - #[must_use] - fn updated_at(mut self, updated: DurationSinceUnixEpoch) -> Self { - self.peer.updated = updated; - self - } - - #[must_use] - fn into(self) -> peer::Peer { - self.peer - } -} - -/// A torrent seeder is a peer with 0 bytes left to download which -/// has not announced it has stopped -#[allow(clippy::cast_sign_loss)] -#[allow(clippy::cast_possible_truncation)] -#[must_use] -pub fn a_completed_peer(id: i32) -> peer::Peer { - let peer_id = peer::Id::new(id); - let peer_addr = SocketAddr::new(IpAddr::V4(Ipv4Addr::new(127, 0, 0, 1)), id as u16); - - TorrentPeerBuilder::new() - .with_number_of_bytes_left(0) - .with_event_completed() - .with_peer_id(*peer_id) - .with_peer_address(peer_addr) - .into() -} - -/// A torrent leecher is a peer that is not a seeder. -/// Leecher: left > 0 OR event = Stopped -/// -/// # Panics -/// -/// This function panics if proved id can't be converted into a valid socket address port. -/// -/// The `id` argument is used to identify the peer in both the `peer_id` and the `peer_addr`. -#[allow(clippy::cast_sign_loss)] -#[allow(clippy::cast_possible_truncation)] -#[must_use] -pub fn a_started_peer(id: i32) -> peer::Peer { - let peer_id = peer::Id::new(id); - let peer_addr = SocketAddr::new(IpAddr::V4(Ipv4Addr::new(127, 0, 0, 1)), id as u16); - - TorrentPeerBuilder::new() - .with_number_of_bytes_left(1) - .with_event_started() - .with_peer_id(*peer_id) - .with_peer_address(peer_addr) - .into() -} diff --git a/packages/torrent-repository/tests/integration.rs b/packages/torrent-repository/tests/integration.rs deleted file mode 100644 index b3e057075..000000000 --- a/packages/torrent-repository/tests/integration.rs +++ /dev/null @@ -1,22 +0,0 @@ -//! Integration tests. -//! -//! ```text -//! cargo test --test integration -//! ``` - -use torrust_tracker_clock::clock; - -pub mod common; -mod swarm; -mod swarms; - -/// This code needs to be copied into each crate. -/// Working version, for production. -#[cfg(not(test))] -#[allow(dead_code)] -pub(crate) type CurrentClock = clock::Working; - -/// Stopped version, for testing. -#[cfg(test)] -#[allow(dead_code)] -pub(crate) type CurrentClock = clock::Stopped; diff --git a/packages/torrent-repository/tests/swarm/mod.rs b/packages/torrent-repository/tests/swarm/mod.rs deleted file mode 100644 index cb4009ba9..000000000 --- a/packages/torrent-repository/tests/swarm/mod.rs +++ /dev/null @@ -1,397 +0,0 @@ -use std::net::{IpAddr, Ipv4Addr, SocketAddr}; -use std::ops::Sub; -use std::time::Duration; - -use aquatic_udp_protocol::{AnnounceEvent, NumberOfBytes}; -use bittorrent_primitives::info_hash::InfoHash; -use rstest::{fixture, rstest}; -use torrust_tracker_clock::clock::stopped::Stopped as _; -use torrust_tracker_clock::clock::{self, Time as _}; -use torrust_tracker_configuration::{TrackerPolicy, TORRENT_PEERS_LIMIT}; -use torrust_tracker_primitives::peer; -use torrust_tracker_primitives::peer::Peer; -use torrust_tracker_torrent_repository::Swarm; - -use crate::common::torrent_peer_builder::{a_completed_peer, a_started_peer}; -use crate::CurrentClock; - -#[fixture] -fn swarm() -> Swarm { - Swarm::new(&InfoHash::default(), 0, None) -} - -#[fixture] -fn policy_none() -> TrackerPolicy { - TrackerPolicy::new(0, false, false) -} - -#[fixture] -fn policy_persist() -> TrackerPolicy { - TrackerPolicy::new(0, true, false) -} - -#[fixture] -fn policy_remove() -> TrackerPolicy { - TrackerPolicy::new(0, false, true) -} - -#[fixture] -fn policy_remove_persist() -> TrackerPolicy { - TrackerPolicy::new(0, true, true) -} - -pub enum Makes { - Empty, - Started, - Completed, - Downloaded, - Three, -} - -async fn make(swarm: &mut Swarm, makes: &Makes) -> Vec { - match makes { - Makes::Empty => vec![], - Makes::Started => { - let peer = a_started_peer(1); - swarm.handle_announcement(&peer).await; - vec![peer] - } - Makes::Completed => { - let peer = a_completed_peer(2); - swarm.handle_announcement(&peer).await; - vec![peer] - } - Makes::Downloaded => { - let mut peer = a_started_peer(3); - swarm.handle_announcement(&peer).await; - peer.event = AnnounceEvent::Completed; - peer.left = NumberOfBytes::new(0); - swarm.handle_announcement(&peer).await; - vec![peer] - } - Makes::Three => { - let peer_1 = a_started_peer(1); - swarm.handle_announcement(&peer_1).await; - - let peer_2 = a_completed_peer(2); - swarm.handle_announcement(&peer_2).await; - - let mut peer_3 = a_started_peer(3); - swarm.handle_announcement(&peer_3).await; - peer_3.event = AnnounceEvent::Completed; - peer_3.left = NumberOfBytes::new(0); - swarm.handle_announcement(&peer_3).await; - vec![peer_1, peer_2, peer_3] - } - } -} - -#[rstest] -#[case::empty(&Makes::Empty)] -#[tokio::test] -async fn it_should_be_empty_by_default(#[values(swarm())] mut swarm: Swarm, #[case] makes: &Makes) { - make(&mut swarm, makes).await; - - assert_eq!(swarm.len(), 0); -} - -#[rstest] -#[case::empty(&Makes::Empty)] -#[case::started(&Makes::Started)] -#[case::completed(&Makes::Completed)] -#[case::downloaded(&Makes::Downloaded)] -#[case::three(&Makes::Three)] -#[tokio::test] -async fn it_should_check_if_entry_should_be_retained_based_on_the_tracker_policy( - #[values(swarm())] mut swarm: Swarm, - #[case] makes: &Makes, - #[values(policy_none(), policy_persist(), policy_remove(), policy_remove_persist())] policy: TrackerPolicy, -) { - make(&mut swarm, makes).await; - - let has_peers = !swarm.is_empty(); - let has_downloads = swarm.metadata().downloaded != 0; - - match (policy.remove_peerless_torrents, policy.persistent_torrent_completed_stat) { - // remove torrents without peers, and keep completed download stats - (true, true) => match (has_peers, has_downloads) { - // no peers, but has downloads - // peers, with or without downloads - (false, true) | (true, true | false) => assert!(swarm.meets_retaining_policy(&policy)), - // no peers and no downloads - (false, false) => assert!(!swarm.meets_retaining_policy(&policy)), - }, - // remove torrents without peers and drop completed download stats - (true, false) => match (has_peers, has_downloads) { - // peers, with or without downloads - (true, true | false) => assert!(swarm.meets_retaining_policy(&policy)), - // no peers and with or without downloads - (false, true | false) => assert!(!swarm.meets_retaining_policy(&policy)), - }, - // keep torrents without peers, but keep or drop completed download stats - (false, true | false) => assert!(swarm.meets_retaining_policy(&policy)), - } -} - -#[rstest] -#[case::empty(&Makes::Empty)] -#[case::started(&Makes::Started)] -#[case::completed(&Makes::Completed)] -#[case::downloaded(&Makes::Downloaded)] -#[case::three(&Makes::Three)] -#[tokio::test] -async fn it_should_get_peers_for_torrent_entry(#[values(swarm())] mut swarm: Swarm, #[case] makes: &Makes) { - let peers = make(&mut swarm, makes).await; - - let torrent_peers = swarm.peers(None); - - assert_eq!(torrent_peers.len(), peers.len()); - - for peer in torrent_peers { - assert!(peers.contains(&peer)); - } -} - -#[rstest] -#[case::empty(&Makes::Empty)] -#[case::started(&Makes::Started)] -#[case::completed(&Makes::Completed)] -#[case::downloaded(&Makes::Downloaded)] -#[case::three(&Makes::Three)] -#[tokio::test] -async fn it_should_update_a_peer(#[values(swarm())] mut swarm: Swarm, #[case] makes: &Makes) { - make(&mut swarm, makes).await; - - // Make and insert a new peer. - let mut peer = a_started_peer(-1); - swarm.handle_announcement(&peer).await; - - // Get the Inserted Peer by Id. - let peers = swarm.peers(None); - let original = peers - .iter() - .find(|p| peer::ReadInfo::get_id(*p) == peer::ReadInfo::get_id(&peer)) - .expect("it should find peer by id"); - - assert_eq!(original.event, AnnounceEvent::Started, "it should be as created"); - - // Announce "Completed" torrent download event. - peer.event = AnnounceEvent::Completed; - swarm.handle_announcement(&peer).await; - - // Get the Updated Peer by Id. - let peers = swarm.peers(None); - let updated = peers - .iter() - .find(|p| peer::ReadInfo::get_id(*p) == peer::ReadInfo::get_id(&peer)) - .expect("it should find peer by id"); - - assert_eq!(updated.event, AnnounceEvent::Completed, "it should be updated"); -} - -#[rstest] -#[case::empty(&Makes::Empty)] -#[case::started(&Makes::Started)] -#[case::completed(&Makes::Completed)] -#[case::downloaded(&Makes::Downloaded)] -#[case::three(&Makes::Three)] -#[tokio::test] -async fn it_should_remove_a_peer_upon_stopped_announcement(#[values(swarm())] mut swarm: Swarm, #[case] makes: &Makes) { - use torrust_tracker_primitives::peer::ReadInfo as _; - - make(&mut swarm, makes).await; - - let mut peer = a_started_peer(-1); - - swarm.handle_announcement(&peer).await; - - // The started peer should be inserted. - let peers = swarm.peers(None); - let original = peers - .iter() - .find(|p| p.get_id() == peer.get_id()) - .expect("it should find peer by id"); - - assert_eq!(original.event, AnnounceEvent::Started); - - // Change peer to "Stopped" and insert. - peer.event = AnnounceEvent::Stopped; - swarm.handle_announcement(&peer).await; - - // It should be removed now. - let peers = swarm.peers(None); - - assert_eq!( - peers.iter().find(|p| p.get_id() == peer.get_id()), - None, - "it should be removed" - ); -} - -#[rstest] -#[case::started(&Makes::Started)] -#[case::completed(&Makes::Completed)] -#[case::downloaded(&Makes::Downloaded)] -#[case::three(&Makes::Three)] -#[tokio::test] -async fn it_should_handle_a_peer_completed_announcement_and_update_the_downloaded_statistic( - #[values(swarm())] mut torrent: Swarm, - #[case] makes: &Makes, -) { - make(&mut torrent, makes).await; - let downloaded = torrent.metadata().downloaded; - - let peers = torrent.peers(None); - let mut peer = **peers.first().expect("there should be a peer"); - - let is_already_completed = peer.event == AnnounceEvent::Completed; - - // Announce "Completed" torrent download event. - peer.event = AnnounceEvent::Completed; - - torrent.handle_announcement(&peer).await; - let stats = torrent.metadata(); - - if is_already_completed { - assert_eq!(stats.downloaded, downloaded); - } else { - assert_eq!(stats.downloaded, downloaded + 1); - } -} - -#[rstest] -#[case::started(&Makes::Started)] -#[case::completed(&Makes::Completed)] -#[case::downloaded(&Makes::Downloaded)] -#[case::three(&Makes::Three)] -#[tokio::test] -async fn it_should_update_a_peer_as_a_seeder(#[values(swarm())] mut swarm: Swarm, #[case] makes: &Makes) { - let peers = make(&mut swarm, makes).await; - let completed = u32::try_from(peers.iter().filter(|p| p.is_seeder()).count()).expect("it_should_not_be_so_many"); - - let peers = swarm.peers(None); - let mut peer = **peers.first().expect("there should be a peer"); - - let is_already_non_left = peer.left == NumberOfBytes::new(0); - - // Set Bytes Left to Zero - peer.left = NumberOfBytes::new(0); - swarm.handle_announcement(&peer).await; - let stats = swarm.metadata(); - - if is_already_non_left { - // it was already complete - assert_eq!(stats.complete, completed); - } else { - // now it is complete - assert_eq!(stats.complete, completed + 1); - } -} - -#[rstest] -#[case::started(&Makes::Started)] -#[case::completed(&Makes::Completed)] -#[case::downloaded(&Makes::Downloaded)] -#[case::three(&Makes::Three)] -#[tokio::test] -async fn it_should_update_a_peer_as_incomplete(#[values(swarm())] mut swarm: Swarm, #[case] makes: &Makes) { - let peers = make(&mut swarm, makes).await; - let incomplete = u32::try_from(peers.iter().filter(|p| !p.is_seeder()).count()).expect("it should not be so many"); - - let peers = swarm.peers(None); - let mut peer = **peers.first().expect("there should be a peer"); - - let completed_already = peer.left == NumberOfBytes::new(0); - - // Set Bytes Left to no Zero - peer.left = NumberOfBytes::new(1); - swarm.handle_announcement(&peer).await; - let stats = swarm.metadata(); - - if completed_already { - // now it is incomplete - assert_eq!(stats.incomplete, incomplete + 1); - } else { - // was already incomplete - assert_eq!(stats.incomplete, incomplete); - } -} - -#[rstest] -#[case::started(&Makes::Started)] -#[case::completed(&Makes::Completed)] -#[case::downloaded(&Makes::Downloaded)] -#[case::three(&Makes::Three)] -#[tokio::test] -async fn it_should_get_peers_excluding_the_client_socket(#[values(swarm())] mut swarm: Swarm, #[case] makes: &Makes) { - make(&mut swarm, makes).await; - - let peers = swarm.peers(None); - let mut peer = **peers.first().expect("there should be a peer"); - - let socket = SocketAddr::new(IpAddr::V4(Ipv4Addr::new(127, 0, 0, 1)), 8081); - - // for this test, we should not already use this socket. - assert_ne!(peer.peer_addr, socket); - - // it should get the peer as it dose not share the socket. - assert!(swarm.peers_excluding(&socket, None).contains(&peer.into())); - - // set the address to the socket. - peer.peer_addr = socket; - swarm.handle_announcement(&peer).await; // Add peer - - // It should not include the peer that has the same socket. - assert!(!swarm.peers_excluding(&socket, None).contains(&peer.into())); -} - -#[rstest] -#[case::empty(&Makes::Empty)] -#[case::started(&Makes::Started)] -#[case::completed(&Makes::Completed)] -#[case::downloaded(&Makes::Downloaded)] -#[case::three(&Makes::Three)] -#[tokio::test] -async fn it_should_limit_the_number_of_peers_returned(#[values(swarm())] mut swarm: Swarm, #[case] makes: &Makes) { - make(&mut swarm, makes).await; - - // We add one more peer than the scrape limit - for peer_number in 1..=74 + 1 { - let peer = a_started_peer(peer_number); - swarm.handle_announcement(&peer).await; - } - - let peers = swarm.peers(Some(TORRENT_PEERS_LIMIT)); - - assert_eq!(peers.len(), 74); -} - -#[rstest] -#[case::empty(&Makes::Empty)] -#[case::started(&Makes::Started)] -#[case::completed(&Makes::Completed)] -#[case::downloaded(&Makes::Downloaded)] -#[case::three(&Makes::Three)] -#[tokio::test] -async fn it_should_remove_inactive_peers_beyond_cutoff(#[values(swarm())] mut swarm: Swarm, #[case] makes: &Makes) { - const TIMEOUT: Duration = Duration::from_secs(120); - const EXPIRE: Duration = Duration::from_secs(121); - - let peers = make(&mut swarm, makes).await; - - let mut peer = a_completed_peer(-1); - - let now = clock::Working::now(); - clock::Stopped::local_set(&now); - - peer.updated = now.sub(EXPIRE); - - swarm.handle_announcement(&peer).await; - - assert_eq!(swarm.len(), peers.len() + 1); - - let current_cutoff = CurrentClock::now_sub(&TIMEOUT).unwrap_or_default(); - swarm.remove_inactive(current_cutoff).await; - - assert_eq!(swarm.len(), peers.len()); -} diff --git a/packages/torrent-repository/tests/swarms/mod.rs b/packages/torrent-repository/tests/swarms/mod.rs deleted file mode 100644 index 780d6cd4c..000000000 --- a/packages/torrent-repository/tests/swarms/mod.rs +++ /dev/null @@ -1,524 +0,0 @@ -use std::collections::{BTreeMap, HashSet}; -use std::hash::{DefaultHasher, Hash, Hasher}; - -use aquatic_udp_protocol::{AnnounceEvent, NumberOfBytes}; -use bittorrent_primitives::info_hash::InfoHash; -use futures::future::join_all; -use rstest::{fixture, rstest}; -use torrust_tracker_configuration::TrackerPolicy; -use torrust_tracker_primitives::pagination::Pagination; -use torrust_tracker_primitives::swarm_metadata::SwarmMetadata; -use torrust_tracker_primitives::PersistentTorrents; -use torrust_tracker_torrent_repository::swarm::Swarm; -use torrust_tracker_torrent_repository::Swarms; - -use crate::common::torrent_peer_builder::{a_completed_peer, a_started_peer}; - -fn swarm() -> Swarm { - Swarm::new(&InfoHash::default(), 0, None) -} - -#[fixture] -fn swarms() -> Swarms { - Swarms::default() -} - -type Entries = Vec<(InfoHash, Swarm)>; - -#[fixture] -fn empty() -> Entries { - vec![] -} - -#[fixture] -fn default() -> Entries { - vec![(InfoHash::default(), swarm())] -} - -#[fixture] -async fn started() -> Entries { - let mut swarm = swarm(); - swarm.handle_announcement(&a_started_peer(1)).await; - vec![(InfoHash::default(), swarm)] -} - -#[fixture] -async fn completed() -> Entries { - let mut swarm = swarm(); - swarm.handle_announcement(&a_completed_peer(2)).await; - vec![(InfoHash::default(), swarm)] -} - -#[fixture] -async fn downloaded() -> Entries { - let mut swarm = swarm(); - let mut peer = a_started_peer(3); - swarm.handle_announcement(&peer).await; - peer.event = AnnounceEvent::Completed; - peer.left = NumberOfBytes::new(0); - swarm.handle_announcement(&peer).await; - vec![(InfoHash::default(), swarm)] -} - -#[fixture] -async fn three() -> Entries { - let mut started = swarm(); - let started_h = &mut DefaultHasher::default(); - started.handle_announcement(&a_started_peer(1)).await; - started.hash(started_h); - - let mut completed = swarm(); - let completed_h = &mut DefaultHasher::default(); - completed.handle_announcement(&a_completed_peer(2)).await; - completed.hash(completed_h); - - let mut downloaded = swarm(); - let downloaded_h = &mut DefaultHasher::default(); - let mut downloaded_peer = a_started_peer(3); - downloaded.handle_announcement(&downloaded_peer).await; - downloaded_peer.event = AnnounceEvent::Completed; - downloaded_peer.left = NumberOfBytes::new(0); - downloaded.handle_announcement(&downloaded_peer).await; - downloaded.hash(downloaded_h); - - vec![ - (InfoHash::from(&started_h.clone()), started), - (InfoHash::from(&completed_h.clone()), completed), - (InfoHash::from(&downloaded_h.clone()), downloaded), - ] -} - -#[fixture] -async fn many_out_of_order() -> Entries { - let mut entries: HashSet<(InfoHash, Swarm)> = HashSet::default(); - - for i in 0..408 { - let mut entry = swarm(); - entry.handle_announcement(&a_started_peer(i)).await; - - entries.insert((InfoHash::from(&i), entry)); - } - - // we keep the random order from the hashed set for the vector. - entries.iter().map(|(i, e)| (*i, e.clone())).collect() -} - -#[fixture] -async fn many_hashed_in_order() -> Entries { - let mut entries: BTreeMap = BTreeMap::default(); - - for i in 0..408 { - let mut entry = swarm(); - entry.handle_announcement(&a_started_peer(i)).await; - - let hash: &mut DefaultHasher = &mut DefaultHasher::default(); - hash.write_i32(i); - - entries.insert(InfoHash::from(&hash.clone()), entry); - } - - // We return the entries in-order from from the b-tree map. - entries.iter().map(|(i, e)| (*i, e.clone())).collect() -} - -#[fixture] -fn persistent_empty() -> PersistentTorrents { - PersistentTorrents::default() -} - -#[fixture] -fn persistent_single() -> PersistentTorrents { - let hash = &mut DefaultHasher::default(); - - hash.write_u8(1); - let t = [(InfoHash::from(&hash.clone()), 0_u32)]; - - t.iter().copied().collect() -} - -#[fixture] -fn persistent_three() -> PersistentTorrents { - let hash = &mut DefaultHasher::default(); - - hash.write_u8(1); - let info_1 = InfoHash::from(&hash.clone()); - hash.write_u8(2); - let info_2 = InfoHash::from(&hash.clone()); - hash.write_u8(3); - let info_3 = InfoHash::from(&hash.clone()); - - let t = [(info_1, 1_u32), (info_2, 2_u32), (info_3, 3_u32)]; - - t.iter().copied().collect() -} - -fn make(swarms: &Swarms, entries: &Entries) { - for (info_hash, swarm) in entries { - swarms.insert(info_hash, swarm.clone()); - } -} - -#[fixture] -fn paginated_limit_zero() -> Pagination { - Pagination::new(0, 0) -} - -#[fixture] -fn paginated_limit_one() -> Pagination { - Pagination::new(0, 1) -} - -#[fixture] -fn paginated_limit_one_offset_one() -> Pagination { - Pagination::new(1, 1) -} - -#[fixture] -fn policy_none() -> TrackerPolicy { - TrackerPolicy::new(0, false, false) -} - -#[fixture] -fn policy_persist() -> TrackerPolicy { - TrackerPolicy::new(0, true, false) -} - -#[fixture] -fn policy_remove() -> TrackerPolicy { - TrackerPolicy::new(0, false, true) -} - -#[fixture] -fn policy_remove_persist() -> TrackerPolicy { - TrackerPolicy::new(0, true, true) -} - -#[rstest] -#[case::empty(empty())] -#[case::default(default())] -#[case::started(started().await)] -#[case::completed(completed().await)] -#[case::downloaded(downloaded().await)] -#[case::three(three().await)] -#[case::out_of_order(many_out_of_order().await)] -#[case::in_order(many_hashed_in_order().await)] -#[tokio::test] -async fn it_should_get_a_torrent_entry(#[values(swarms())] repo: Swarms, #[case] entries: Entries) { - make(&repo, &entries); - - if let Some((info_hash, swarm)) = entries.first() { - assert_eq!(Some(repo.get(info_hash).unwrap().lock().await.clone()), Some(swarm.clone())); - } else { - assert!(repo.get(&InfoHash::default()).is_none()); - } -} - -#[rstest] -#[case::empty(empty())] -#[case::default(default())] -#[case::started(started().await)] -#[case::completed(completed().await)] -#[case::downloaded(downloaded().await)] -#[case::three(three().await)] -#[case::out_of_order(many_out_of_order().await)] -#[case::in_order(many_hashed_in_order().await)] -#[tokio::test] -async fn it_should_get_paginated_entries_in_a_stable_or_sorted_order( - #[values(swarms())] repo: Swarms, - #[case] entries: Entries, - #[future] many_out_of_order: Entries, -) { - make(&repo, &entries); - - let entries_a = repo.get_paginated(None).iter().map(|(i, _)| *i).collect::>(); - - make(&repo, &many_out_of_order.await); - - let entries_b = repo.get_paginated(None).iter().map(|(i, _)| *i).collect::>(); - - let is_equal = entries_b.iter().take(entries_a.len()).copied().collect::>() == entries_a; - - let is_sorted = entries_b.windows(2).all(|w| w[0] <= w[1]); - - assert!( - is_equal || is_sorted, - "The order is unstable: {is_equal}, or is sorted {is_sorted}." - ); -} - -#[rstest] -#[case::empty(empty())] -#[case::default(default())] -#[case::started(started().await)] -#[case::completed(completed().await)] -#[case::downloaded(downloaded().await)] -#[case::three(three().await)] -#[case::out_of_order(many_out_of_order().await)] -#[case::in_order(many_hashed_in_order().await)] -#[tokio::test] -async fn it_should_get_paginated( - #[values(swarms())] repo: Swarms, - #[case] entries: Entries, - #[values(paginated_limit_zero(), paginated_limit_one(), paginated_limit_one_offset_one())] paginated: Pagination, -) { - make(&repo, &entries); - - let mut info_hashes = repo.get_paginated(None).iter().map(|(i, _)| *i).collect::>(); - info_hashes.sort(); - - match paginated { - // it should return empty if limit is zero. - Pagination { limit: 0, .. } => { - let page = repo.get_paginated(Some(&paginated)); - - let futures = page.iter().map(|(i, swarm_handle)| { - let i = *i; - let swarm_handle = swarm_handle.clone(); - async move { (i, swarm_handle.lock().await.clone()) } - }); - - let swarms: Vec<(InfoHash, Swarm)> = join_all(futures).await; - - assert_eq!(swarms, vec![]); - } - - // it should return a single entry if the limit is one. - Pagination { limit: 1, offset: 0 } => { - if info_hashes.is_empty() { - assert_eq!(repo.get_paginated(Some(&paginated)).len(), 0); - } else { - let page = repo.get_paginated(Some(&paginated)); - assert_eq!(page.len(), 1); - assert_eq!(page.first().map(|(i, _)| i), info_hashes.first()); - } - } - - // it should return only the second entry if both the limit and the offset are one. - Pagination { limit: 1, offset: 1 } => { - if info_hashes.len() > 1 { - let page = repo.get_paginated(Some(&paginated)); - assert_eq!(page.len(), 1); - assert_eq!(page[0].0, info_hashes[1]); - } - } - - _ => {} - } -} - -#[rstest] -#[case::empty(empty())] -#[case::default(default())] -#[case::started(started().await)] -#[case::completed(completed().await)] -#[case::downloaded(downloaded().await)] -#[case::three(three().await)] -#[case::out_of_order(many_out_of_order().await)] -#[case::in_order(many_hashed_in_order().await)] -#[tokio::test] -async fn it_should_get_metrics(#[values(swarms())] swarms: Swarms, #[case] entries: Entries) { - use torrust_tracker_primitives::swarm_metadata::AggregateSwarmMetadata; - - make(&swarms, &entries); - - let mut metrics = AggregateSwarmMetadata::default(); - - for (_, torrent) in entries { - let stats = torrent.metadata(); - - metrics.total_torrents += 1; - metrics.total_incomplete += u64::from(stats.incomplete); - metrics.total_complete += u64::from(stats.complete); - metrics.total_downloaded += u64::from(stats.downloaded); - } - - assert_eq!(swarms.get_aggregate_swarm_metadata().await.unwrap(), metrics); -} - -#[rstest] -#[case::empty(empty())] -#[case::default(default())] -#[case::started(started().await)] -#[case::completed(completed().await)] -#[case::downloaded(downloaded().await)] -#[case::three(three().await)] -#[case::out_of_order(many_out_of_order().await)] -#[case::in_order(many_hashed_in_order().await)] -#[tokio::test] -async fn it_should_import_persistent_torrents( - #[values(swarms())] swarms: Swarms, - #[case] entries: Entries, - #[values(persistent_empty(), persistent_single(), persistent_three())] persistent_torrents: PersistentTorrents, -) { - make(&swarms, &entries); - - let mut downloaded = swarms.get_aggregate_swarm_metadata().await.unwrap().total_downloaded; - persistent_torrents.iter().for_each(|(_, d)| downloaded += u64::from(*d)); - - swarms.import_persistent(&persistent_torrents); - - assert_eq!( - swarms.get_aggregate_swarm_metadata().await.unwrap().total_downloaded, - downloaded - ); - - for (entry, _) in persistent_torrents { - assert!(swarms.get(&entry).is_some()); - } -} - -#[rstest] -#[case::empty(empty())] -#[case::default(default())] -#[case::started(started().await)] -#[case::completed(completed().await)] -#[case::downloaded(downloaded().await)] -#[case::three(three().await)] -#[case::out_of_order(many_out_of_order().await)] -#[case::in_order(many_hashed_in_order().await)] -#[tokio::test] -async fn it_should_remove_an_entry(#[values(swarms())] swarms: Swarms, #[case] entries: Entries) { - make(&swarms, &entries); - - for (info_hash, torrent) in entries { - assert_eq!( - Some(swarms.get(&info_hash).unwrap().lock().await.clone()), - Some(torrent.clone()) - ); - assert_eq!( - Some(swarms.remove(&info_hash).await.unwrap().lock().await.clone()), - Some(torrent) - ); - - assert!(swarms.get(&info_hash).is_none()); - assert!(swarms.remove(&info_hash).await.is_none()); - } - - assert_eq!(swarms.get_aggregate_swarm_metadata().await.unwrap().total_torrents, 0); -} - -#[rstest] -#[case::empty(empty())] -#[case::default(default())] -#[case::started(started().await)] -#[case::completed(completed().await)] -#[case::downloaded(downloaded().await)] -#[case::three(three().await)] -#[case::out_of_order(many_out_of_order().await)] -#[case::in_order(many_hashed_in_order().await)] -#[tokio::test] -async fn it_should_remove_inactive_peers(#[values(swarms())] swarms: Swarms, #[case] entries: Entries) { - use std::ops::Sub as _; - use std::time::Duration; - - use torrust_tracker_clock::clock::stopped::Stopped as _; - use torrust_tracker_clock::clock::{self, Time as _}; - use torrust_tracker_primitives::peer; - - use crate::CurrentClock; - - const TIMEOUT: Duration = Duration::from_secs(120); - const EXPIRE: Duration = Duration::from_secs(121); - - make(&swarms, &entries); - - let info_hash: InfoHash; - let mut peer: peer::Peer; - - // Generate a new infohash and peer. - { - let hash = &mut DefaultHasher::default(); - hash.write_u8(255); - info_hash = InfoHash::from(&hash.clone()); - peer = a_completed_peer(-1); - } - - // Set the last updated time of the peer to be 121 seconds ago. - { - let now = clock::Working::now(); - clock::Stopped::local_set(&now); - - peer.updated = now.sub(EXPIRE); - } - - // Insert the infohash and peer into the repository - // and verify there is an extra torrent entry. - { - swarms.handle_announcement(&info_hash, &peer, None).await.unwrap(); - assert_eq!( - swarms.get_aggregate_swarm_metadata().await.unwrap().total_torrents, - entries.len() as u64 + 1 - ); - } - - // Insert the infohash and peer into the repository - // and verify the swarm metadata was updated. - { - swarms.handle_announcement(&info_hash, &peer, None).await.unwrap(); - let stats = swarms.get_swarm_metadata(&info_hash).await.unwrap(); - assert_eq!( - stats, - Some(SwarmMetadata { - downloaded: 0, - complete: 1, - incomplete: 0 - }) - ); - } - - // Verify that this new peer was inserted into the repository. - { - let lock_tracked_torrent = swarms.get(&info_hash).expect("it_should_get_some"); - let entry = lock_tracked_torrent.lock().await; - assert!(entry.peers(None).contains(&peer.into())); - } - - // Remove peers that have not been updated since the timeout (120 seconds ago). - { - swarms - .remove_inactive_peers(CurrentClock::now_sub(&TIMEOUT).expect("it should get a time passed")) - .await - .unwrap(); - } - - // Verify that the this peer was removed from the repository. - { - let lock_tracked_torrent = swarms.get(&info_hash).expect("it_should_get_some"); - let entry = lock_tracked_torrent.lock().await; - assert!(!entry.peers(None).contains(&peer.into())); - } -} - -#[rstest] -#[case::empty(empty())] -#[case::default(default())] -#[case::started(started().await)] -#[case::completed(completed().await)] -#[case::downloaded(downloaded().await)] -#[case::three(three().await)] -#[case::out_of_order(many_out_of_order().await)] -#[case::in_order(many_hashed_in_order().await)] -#[tokio::test] -async fn it_should_remove_peerless_torrents( - #[values(swarms())] swarms: Swarms, - #[case] entries: Entries, - #[values(policy_none(), policy_persist(), policy_remove(), policy_remove_persist())] policy: TrackerPolicy, -) { - make(&swarms, &entries); - - swarms.remove_peerless_torrents(&policy).await.unwrap(); - - let paginated = swarms.get_paginated(None); // ← store the result in a named variable - - let futures = paginated.iter().map(|(i, swarm_handle)| { - let i = *i; - let swarm_handle = swarm_handle.clone(); - async move { (i, swarm_handle.lock().await.clone()) } - }); - - let torrents: Vec<(InfoHash, Swarm)> = join_all(futures).await; - - for (_, entry) in torrents { - assert!(entry.meets_retaining_policy(&policy)); - } -} From c2dabb2fcc5a4bddacfc21c68f0e626a547c82af Mon Sep 17 00:00:00 2001 From: Jose Celano Date: Fri, 16 May 2025 13:15:24 +0100 Subject: [PATCH 620/802] chore: [#1504] remove uneeded fn attribute --- packages/torrent-repository/src/swarms.rs | 1 - 1 file changed, 1 deletion(-) diff --git a/packages/torrent-repository/src/swarms.rs b/packages/torrent-repository/src/swarms.rs index 8b8327778..ac2490853 100644 --- a/packages/torrent-repository/src/swarms.rs +++ b/packages/torrent-repository/src/swarms.rs @@ -49,7 +49,6 @@ impl Swarms { /// # Errors /// /// This function panics if the lock for the swarm handle cannot be acquired. - #[allow(clippy::await_holding_lock)] pub async fn handle_announcement( &self, info_hash: &InfoHash, From 1472c8e99ac145ec03140f719e08786e750892ca Mon Sep 17 00:00:00 2001 From: Jose Celano Date: Fri, 16 May 2025 13:17:54 +0100 Subject: [PATCH 621/802] refactor: [#1504] remove unneded trait implementationis for Swarm --- packages/torrent-repository/src/swarm.rs | 37 ------------------------ 1 file changed, 37 deletions(-) diff --git a/packages/torrent-repository/src/swarm.rs b/packages/torrent-repository/src/swarm.rs index 8cf2982e6..f25304979 100644 --- a/packages/torrent-repository/src/swarm.rs +++ b/packages/torrent-repository/src/swarm.rs @@ -1,8 +1,6 @@ //! A swarm is a collection of peers that are all trying to download the same //! torrent. use std::collections::BTreeMap; -use std::fmt::Debug; -use std::hash::{Hash, Hasher}; use std::net::SocketAddr; use std::sync::Arc; @@ -24,31 +22,6 @@ pub struct Swarm { event_sender: Sender, } -#[allow(clippy::missing_fields_in_debug)] -impl Debug for Swarm { - fn fmt(&self, f: &mut std::fmt::Formatter<'_>) -> std::fmt::Result { - f.debug_struct("Swarm") - .field("peers", &self.peers) - .field("metadata", &self.metadata) - .finish() - } -} - -impl Hash for Swarm { - fn hash(&self, state: &mut H) { - self.peers.hash(state); - self.metadata.hash(state); - } -} - -impl PartialEq for Swarm { - fn eq(&self, other: &Self) -> bool { - self.peers == other.peers && self.metadata == other.metadata - } -} - -impl Eq for Swarm {} - impl Swarm { #[must_use] pub fn new(info_hash: &InfoHash, downloaded: u32, event_sender: Sender) -> Self { @@ -329,16 +302,6 @@ mod tests { use crate::swarm::Swarm; use crate::tests::sample_info_hash; - #[test] - fn it_should_allow_debugging() { - let swarm = Swarm::new(&sample_info_hash(), 0, None); - - assert_eq!( - format!("{swarm:?}"), - "Swarm { peers: {}, metadata: SwarmMetadata { downloaded: 0, complete: 0, incomplete: 0 } }" - ); - } - #[test] fn it_should_be_empty_when_no_peers_have_been_inserted() { let swarm = Swarm::new(&sample_info_hash(), 0, None); From 85d9d3562bfaca3295f0cf2c3e879e061e7169ac Mon Sep 17 00:00:00 2001 From: Jose Celano Date: Mon, 19 May 2025 10:48:31 +0100 Subject: [PATCH 622/802] refactor: [#1493] remove duplicate code for Peer buidler --- .../tests/server/v1/contract.rs | 14 ++-- packages/primitives/src/peer.rs | 33 ++++++-- .../tests/common/torrent_peer_builder.rs | 80 ++----------------- .../src/handlers/announce.rs | 27 ++++--- .../udp-tracker-server/src/handlers/mod.rs | 51 +----------- .../udp-tracker-server/src/handlers/scrape.rs | 9 ++- 6 files changed, 61 insertions(+), 153 deletions(-) diff --git a/packages/axum-http-tracker-server/tests/server/v1/contract.rs b/packages/axum-http-tracker-server/tests/server/v1/contract.rs index d864ba67c..d9ac2e1e1 100644 --- a/packages/axum-http-tracker-server/tests/server/v1/contract.rs +++ b/packages/axum-http-tracker-server/tests/server/v1/contract.rs @@ -1012,7 +1012,7 @@ mod for_all_config_modes { &info_hash, &PeerBuilder::default() .with_peer_id(&PeerId(*b"-qB00000000000000001")) - .with_bytes_pending_to_download(1) + .with_bytes_left_to_download(1) .build(), ) .await; @@ -1053,7 +1053,7 @@ mod for_all_config_modes { &info_hash, &PeerBuilder::default() .with_peer_id(&PeerId(*b"-qB00000000000000001")) - .with_no_bytes_pending_to_download() + .with_no_bytes_left_to_download() .build(), ) .await; @@ -1286,7 +1286,7 @@ mod configured_as_whitelisted { &info_hash, &PeerBuilder::default() .with_peer_id(&PeerId(*b"-qB00000000000000001")) - .with_bytes_pending_to_download(1) + .with_bytes_left_to_download(1) .build(), ) .await; @@ -1323,7 +1323,7 @@ mod configured_as_whitelisted { &info_hash, &PeerBuilder::default() .with_peer_id(&PeerId(*b"-qB00000000000000001")) - .with_bytes_pending_to_download(1) + .with_bytes_left_to_download(1) .build(), ) .await; @@ -1500,7 +1500,7 @@ mod configured_as_private { &info_hash, &PeerBuilder::default() .with_peer_id(&PeerId(*b"-qB00000000000000001")) - .with_bytes_pending_to_download(1) + .with_bytes_left_to_download(1) .build(), ) .await; @@ -1532,7 +1532,7 @@ mod configured_as_private { &info_hash, &PeerBuilder::default() .with_peer_id(&PeerId(*b"-qB00000000000000001")) - .with_bytes_pending_to_download(1) + .with_bytes_left_to_download(1) .build(), ) .await; @@ -1584,7 +1584,7 @@ mod configured_as_private { &info_hash, &PeerBuilder::default() .with_peer_id(&PeerId(*b"-qB00000000000000001")) - .with_bytes_pending_to_download(1) + .with_bytes_left_to_download(1) .build(), ) .await; diff --git a/packages/primitives/src/peer.rs b/packages/primitives/src/peer.rs index 57ca3909d..c271ee5d6 100644 --- a/packages/primitives/src/peer.rs +++ b/packages/primitives/src/peer.rs @@ -558,21 +558,30 @@ pub mod fixture { self } - #[allow(dead_code)] #[must_use] - pub fn with_bytes_pending_to_download(mut self, left: i64) -> Self { + pub fn with_peer_address(mut self, peer_addr: SocketAddr) -> Self { + self.peer.peer_addr = peer_addr; + self + } + + #[must_use] + pub fn updated_on(mut self, updated: DurationSinceUnixEpoch) -> Self { + self.peer.updated = updated; + self + } + + #[must_use] + pub fn with_bytes_left_to_download(mut self, left: i64) -> Self { self.peer.left = NumberOfBytes::new(left); self } - #[allow(dead_code)] #[must_use] - pub fn with_no_bytes_pending_to_download(mut self) -> Self { + pub fn with_no_bytes_left_to_download(mut self) -> Self { self.peer.left = NumberOfBytes::new(0); self } - #[allow(dead_code)] #[must_use] pub fn last_updated_on(mut self, updated: DurationSinceUnixEpoch) -> Self { self.peer.updated = updated; @@ -585,13 +594,23 @@ pub mod fixture { self } - #[allow(dead_code)] + #[must_use] + pub fn with_event_started(mut self) -> Self { + self.peer.event = AnnounceEvent::Started; + self + } + + #[must_use] + pub fn with_event_completed(mut self) -> Self { + self.peer.event = AnnounceEvent::Completed; + self + } + #[must_use] pub fn build(self) -> Peer { self.into() } - #[allow(dead_code)] #[must_use] pub fn into(self) -> Peer { self.peer diff --git a/packages/torrent-repository-benchmarking/tests/common/torrent_peer_builder.rs b/packages/torrent-repository-benchmarking/tests/common/torrent_peer_builder.rs index 33120180d..48aa981cd 100644 --- a/packages/torrent-repository-benchmarking/tests/common/torrent_peer_builder.rs +++ b/packages/torrent-repository-benchmarking/tests/common/torrent_peer_builder.rs @@ -1,79 +1,15 @@ -use std::net::SocketAddr; - -use aquatic_udp_protocol::{AnnounceEvent, NumberOfBytes, PeerId}; -use torrust_tracker_clock::clock::Time; -use torrust_tracker_primitives::{peer, DurationSinceUnixEpoch}; - -use crate::CurrentClock; - -#[derive(Debug, Default)] -struct TorrentPeerBuilder { - peer: peer::Peer, -} - -#[allow(dead_code)] -impl TorrentPeerBuilder { - #[must_use] - fn new() -> Self { - Self { - peer: peer::Peer { - updated: CurrentClock::now(), - ..Default::default() - }, - } - } - - #[must_use] - fn with_event_completed(mut self) -> Self { - self.peer.event = AnnounceEvent::Completed; - self - } - - #[must_use] - fn with_event_started(mut self) -> Self { - self.peer.event = AnnounceEvent::Started; - self - } - - #[must_use] - fn with_peer_address(mut self, peer_addr: SocketAddr) -> Self { - self.peer.peer_addr = peer_addr; - self - } - - #[must_use] - fn with_peer_id(mut self, peer_id: PeerId) -> Self { - self.peer.peer_id = peer_id; - self - } - - #[must_use] - fn with_number_of_bytes_left(mut self, left: i64) -> Self { - self.peer.left = NumberOfBytes::new(left); - self - } - - #[must_use] - fn updated_at(mut self, updated: DurationSinceUnixEpoch) -> Self { - self.peer.updated = updated; - self - } - - #[must_use] - fn into(self) -> peer::Peer { - self.peer - } -} +use torrust_tracker_primitives::peer::fixture::PeerBuilder; +use torrust_tracker_primitives::peer::{self}; /// A torrent seeder is a peer with 0 bytes left to download which /// has not announced it has stopped #[must_use] pub fn a_completed_peer(id: i32) -> peer::Peer { let peer_id = peer::Id::new(id); - TorrentPeerBuilder::new() - .with_number_of_bytes_left(0) + PeerBuilder::default() + .with_bytes_left_to_download(0) .with_event_completed() - .with_peer_id(*peer_id) + .with_peer_id(&peer_id) .into() } @@ -82,9 +18,9 @@ pub fn a_completed_peer(id: i32) -> peer::Peer { #[must_use] pub fn a_started_peer(id: i32) -> peer::Peer { let peer_id = peer::Id::new(id); - TorrentPeerBuilder::new() - .with_number_of_bytes_left(1) + PeerBuilder::default() + .with_bytes_left_to_download(1) .with_event_started() - .with_peer_id(*peer_id) + .with_peer_id(&peer_id) .into() } diff --git a/packages/udp-tracker-server/src/handlers/announce.rs b/packages/udp-tracker-server/src/handlers/announce.rs index 65b521f27..567f43740 100644 --- a/packages/udp-tracker-server/src/handlers/announce.rs +++ b/packages/udp-tracker-server/src/handlers/announce.rs @@ -207,6 +207,7 @@ mod tests { use bittorrent_udp_tracker_core::connection_cookie::{gen_remote_fingerprint, make}; use mockall::predicate::eq; use torrust_tracker_events::bus::SenderStatus; + use torrust_tracker_primitives::peer::fixture::PeerBuilder; use torrust_tracker_primitives::service_binding::{Protocol, ServiceBinding}; use crate::event::{ConnectionContext, Event, UdpRequestKind}; @@ -216,7 +217,6 @@ mod tests { initialize_core_tracker_services_for_default_tracker_configuration, initialize_core_tracker_services_for_public_tracker, sample_cookie_valid_range, sample_ipv4_socket_address, sample_issue_time, CoreTrackerServices, CoreUdpTrackerServices, MockUdpServerStatsEventSender, - TorrentPeerBuilder, }; #[tokio::test] @@ -258,8 +258,8 @@ mod tests { .get_torrent_peers(&info_hash.0.into()) .await; - let expected_peer = TorrentPeerBuilder::new() - .with_peer_id(peer_id) + let expected_peer = PeerBuilder::default() + .with_peer_id(&peer_id) .with_peer_address(SocketAddr::new(IpAddr::V4(client_ip), client_port)) .updated_on(peers[0].updated) .into(); @@ -364,8 +364,8 @@ mod tests { let client_port = 8080; let peer_id = AquaticPeerId([255u8; 20]); - let peer_using_ipv6 = TorrentPeerBuilder::new() - .with_peer_id(peer_id) + let peer_using_ipv6 = PeerBuilder::default() + .with_peer_id(&peer_id) .with_peer_address(SocketAddr::new(IpAddr::V6(client_ip_v6), client_port)) .into(); @@ -466,13 +466,13 @@ mod tests { use aquatic_udp_protocol::{InfoHash as AquaticInfoHash, PeerId as AquaticPeerId}; use bittorrent_udp_tracker_core::connection_cookie::{gen_remote_fingerprint, make}; + use torrust_tracker_primitives::peer::fixture::PeerBuilder; use torrust_tracker_primitives::service_binding::{Protocol, ServiceBinding}; use crate::handlers::announce::tests::announce_request::AnnounceRequestBuilder; use crate::handlers::handle_announce; use crate::handlers::tests::{ initialize_core_tracker_services_for_public_tracker, sample_cookie_valid_range, sample_issue_time, - TorrentPeerBuilder, }; #[tokio::test] @@ -516,8 +516,8 @@ mod tests { let external_ip_in_tracker_configuration = core_tracker_services.core_config.net.external_ip.unwrap(); - let expected_peer = TorrentPeerBuilder::new() - .with_peer_id(peer_id) + let expected_peer = PeerBuilder::default() + .with_peer_id(&peer_id) .with_peer_address(SocketAddr::new(external_ip_in_tracker_configuration, client_port)) .updated_on(peers[0].updated) .into(); @@ -547,6 +547,7 @@ mod tests { use mockall::predicate::eq; use torrust_tracker_configuration::Core; use torrust_tracker_events::bus::SenderStatus; + use torrust_tracker_primitives::peer::fixture::PeerBuilder; use torrust_tracker_primitives::service_binding::{Protocol, ServiceBinding}; use crate::event::{ConnectionContext, Event, UdpRequestKind}; @@ -555,7 +556,7 @@ mod tests { use crate::handlers::tests::{ initialize_core_tracker_services_for_default_tracker_configuration, initialize_core_tracker_services_for_public_tracker, sample_cookie_valid_range, sample_ipv6_remote_addr, - sample_issue_time, MockUdpServerStatsEventSender, TorrentPeerBuilder, + sample_issue_time, MockUdpServerStatsEventSender, }; #[tokio::test] @@ -598,8 +599,8 @@ mod tests { .get_torrent_peers(&info_hash.0.into()) .await; - let expected_peer = TorrentPeerBuilder::new() - .with_peer_id(peer_id) + let expected_peer = PeerBuilder::default() + .with_peer_id(&peer_id) .with_peer_address(SocketAddr::new(IpAddr::V6(client_ip_v6), client_port)) .updated_on(peers[0].updated) .into(); @@ -707,8 +708,8 @@ mod tests { let client_port = 8080; let peer_id = AquaticPeerId([255u8; 20]); - let peer_using_ipv4 = TorrentPeerBuilder::new() - .with_peer_id(peer_id) + let peer_using_ipv4 = PeerBuilder::default() + .with_peer_id(&peer_id) .with_peer_address(SocketAddr::new(IpAddr::V4(client_ip_v4), client_port)) .into(); diff --git a/packages/udp-tracker-server/src/handlers/mod.rs b/packages/udp-tracker-server/src/handlers/mod.rs index ca834c006..831073333 100644 --- a/packages/udp-tracker-server/src/handlers/mod.rs +++ b/packages/udp-tracker-server/src/handlers/mod.rs @@ -208,7 +208,6 @@ pub(crate) mod tests { use std::ops::Range; use std::sync::Arc; - use aquatic_udp_protocol::{NumberOfBytes, PeerId}; use bittorrent_tracker_core::announce_handler::AnnounceHandler; use bittorrent_tracker_core::databases::setup::initialize_database; use bittorrent_tracker_core::scrape_handler::ScrapeHandler; @@ -225,14 +224,12 @@ pub(crate) mod tests { use bittorrent_udp_tracker_core::{self, event as core_event}; use futures::future::BoxFuture; use mockall::mock; - use torrust_tracker_clock::clock::Time; use torrust_tracker_configuration::{Configuration, Core}; use torrust_tracker_events::bus::SenderStatus; use torrust_tracker_events::sender::SendError; - use torrust_tracker_primitives::{peer, DurationSinceUnixEpoch}; use torrust_tracker_test_helpers::configuration; - use crate::{event as server_event, CurrentClock}; + use crate::event as server_event; pub(crate) struct CoreTrackerServices { pub core_config: Arc, @@ -360,52 +357,6 @@ pub(crate) mod tests { sample_issue_time() - 10.0..sample_issue_time() + 10.0 } - #[derive(Debug, Default)] - pub(crate) struct TorrentPeerBuilder { - peer: peer::Peer, - } - - impl TorrentPeerBuilder { - #[must_use] - pub fn new() -> Self { - Self { - peer: peer::Peer { - updated: CurrentClock::now(), - ..Default::default() - }, - } - } - - #[must_use] - pub fn with_peer_address(mut self, peer_addr: SocketAddr) -> Self { - self.peer.peer_addr = peer_addr; - self - } - - #[must_use] - pub fn with_peer_id(mut self, peer_id: PeerId) -> Self { - self.peer.peer_id = peer_id; - self - } - - #[must_use] - pub fn with_number_of_bytes_left(mut self, left: i64) -> Self { - self.peer.left = NumberOfBytes::new(left); - self - } - - #[must_use] - pub fn updated_on(mut self, updated: DurationSinceUnixEpoch) -> Self { - self.peer.updated = updated; - self - } - - #[must_use] - pub fn into(self) -> peer::Peer { - self.peer - } - } - pub(crate) struct TrackerConfigurationBuilder { configuration: Configuration, } diff --git a/packages/udp-tracker-server/src/handlers/scrape.rs b/packages/udp-tracker-server/src/handlers/scrape.rs index e35e118b4..a9462e0f9 100644 --- a/packages/udp-tracker-server/src/handlers/scrape.rs +++ b/packages/udp-tracker-server/src/handlers/scrape.rs @@ -93,6 +93,7 @@ mod tests { use bittorrent_tracker_core::torrent::repository::in_memory::InMemoryTorrentRepository; use bittorrent_udp_tracker_core::connection_cookie::{gen_remote_fingerprint, make}; use torrust_tracker_events::bus::SenderStatus; + use torrust_tracker_primitives::peer::fixture::PeerBuilder; use torrust_tracker_primitives::service_binding::{Protocol, ServiceBinding}; use crate::event::bus::EventBus; @@ -100,7 +101,7 @@ mod tests { use crate::handlers::handle_scrape; use crate::handlers::tests::{ initialize_core_tracker_services_for_public_tracker, sample_cookie_valid_range, sample_ipv4_remote_addr, - sample_issue_time, CoreTrackerServices, CoreUdpTrackerServices, TorrentPeerBuilder, + sample_issue_time, CoreTrackerServices, CoreUdpTrackerServices, }; fn zeroed_torrent_statistics() -> TorrentScrapeStatistics { @@ -158,10 +159,10 @@ mod tests { ) { let peer_id = PeerId([255u8; 20]); - let peer = TorrentPeerBuilder::new() - .with_peer_id(peer_id) + let peer = PeerBuilder::default() + .with_peer_id(&peer_id) .with_peer_address(*remote_addr) - .with_number_of_bytes_left(0) + .with_bytes_left_to_download(0) .into(); let _number_of_downloads_increased = in_memory_torrent_repository From b11af88ee3981faa92d26f64b6216d56ec1ff473 Mon Sep 17 00:00:00 2001 From: Jose Celano Date: Mon, 19 May 2025 17:51:21 +0100 Subject: [PATCH 623/802] feat: [#1522] add events metrics in torrent-repository These new metrics just count the number of times events have ocurred. --- .../src/statistics/event/handler.rs | 206 ++++++++++++++++-- .../torrent-repository/src/statistics/mod.rs | 41 +++- 2 files changed, 229 insertions(+), 18 deletions(-) diff --git a/packages/torrent-repository/src/statistics/event/handler.rs b/packages/torrent-repository/src/statistics/event/handler.rs index 2b61839b8..f8d350a80 100644 --- a/packages/torrent-repository/src/statistics/event/handler.rs +++ b/packages/torrent-repository/src/statistics/event/handler.rs @@ -8,7 +8,9 @@ use torrust_tracker_primitives::DurationSinceUnixEpoch; use crate::event::Event; use crate::statistics::repository::Repository; use crate::statistics::{ - TORRENT_REPOSITORY_PEER_CONNECTIONS_TOTAL, TORRENT_REPOSITORY_TORRENTS_DOWNLOADS_TOTAL, TORRENT_REPOSITORY_TORRENTS_TOTAL, + TORRENT_REPOSITORY_PEERS_ADDED_TOTAL, TORRENT_REPOSITORY_PEERS_REMOVED_TOTAL, TORRENT_REPOSITORY_PEERS_UPDATED_TOTAL, + TORRENT_REPOSITORY_PEER_CONNECTIONS_TOTAL, TORRENT_REPOSITORY_TORRENTS_ADDED_TOTAL, + TORRENT_REPOSITORY_TORRENTS_DOWNLOADS_TOTAL, TORRENT_REPOSITORY_TORRENTS_REMOVED_TOTAL, TORRENT_REPOSITORY_TORRENTS_TOTAL, }; pub async fn handle_event(event: Event, stats_repository: &Arc, now: DurationSinceUnixEpoch) { @@ -20,6 +22,14 @@ pub async fn handle_event(event: Event, stats_repository: &Arc, now: let _unused = stats_repository .increment_gauge(&metric_name!(TORRENT_REPOSITORY_TORRENTS_TOTAL), &LabelSet::default(), now) .await; + + let _unused = stats_repository + .increment_counter( + &metric_name!(TORRENT_REPOSITORY_TORRENTS_ADDED_TOTAL), + &LabelSet::default(), + now, + ) + .await; } Event::TorrentRemoved { info_hash } => { tracing::debug!(info_hash = ?info_hash, "Torrent removed",); @@ -27,29 +37,41 @@ pub async fn handle_event(event: Event, stats_repository: &Arc, now: let _unused = stats_repository .decrement_gauge(&metric_name!(TORRENT_REPOSITORY_TORRENTS_TOTAL), &LabelSet::default(), now) .await; + + let _unused = stats_repository + .increment_counter( + &metric_name!(TORRENT_REPOSITORY_TORRENTS_REMOVED_TOTAL), + &LabelSet::default(), + now, + ) + .await; } // Peer events Event::PeerAdded { info_hash, peer } => { tracing::debug!(info_hash = ?info_hash, peer = ?peer, "Peer added", ); + let label_set = label_set_for_peer(&peer); + let _unused = stats_repository - .increment_gauge( - &metric_name!(TORRENT_REPOSITORY_PEER_CONNECTIONS_TOTAL), - &label_set_for_peer(&peer), - now, - ) + .increment_gauge(&metric_name!(TORRENT_REPOSITORY_PEER_CONNECTIONS_TOTAL), &label_set, now) + .await; + + let _unused = stats_repository + .increment_counter(&metric_name!(TORRENT_REPOSITORY_PEERS_ADDED_TOTAL), &label_set, now) .await; } Event::PeerRemoved { info_hash, peer } => { tracing::debug!(info_hash = ?info_hash, peer = ?peer, "Peer removed", ); + let label_set = label_set_for_peer(&peer); + let _unused = stats_repository - .decrement_gauge( - &metric_name!(TORRENT_REPOSITORY_PEER_CONNECTIONS_TOTAL), - &label_set_for_peer(&peer), - now, - ) + .decrement_gauge(&metric_name!(TORRENT_REPOSITORY_PEER_CONNECTIONS_TOTAL), &label_set, now) + .await; + + let _unused = stats_repository + .increment_counter(&metric_name!(TORRENT_REPOSITORY_PEERS_REMOVED_TOTAL), &label_set, now) .await; } Event::PeerUpdated { @@ -76,6 +98,12 @@ pub async fn handle_event(event: Event, stats_repository: &Arc, now: ) .await; } + + let label_set = label_set_for_peer(&new_peer); + + let _unused = stats_repository + .increment_counter(&metric_name!(TORRENT_REPOSITORY_PEERS_UPDATED_TOTAL), &label_set, now) + .await; } Event::PeerDownloadCompleted { info_hash, peer } => { tracing::debug!(info_hash = ?info_hash, peer = ?peer, "Peer download completed", ); @@ -92,7 +120,7 @@ pub async fn handle_event(event: Event, stats_repository: &Arc, now: } /// Returns the label set to be included in the metrics for the given peer. -fn label_set_for_peer(peer: &Peer) -> LabelSet { +pub(crate) fn label_set_for_peer(peer: &Peer) -> LabelSet { if peer.is_seeder() { (label_name!("peer_role"), LabelValue::new("seeder")).into() } else { @@ -135,7 +163,7 @@ mod tests { opposite_role_peer } - async fn expect_counter_metric_to_be( + pub async fn expect_counter_metric_to_be( stats_repository: &Arc, metric_name: &MetricName, label_set: &LabelSet, @@ -186,9 +214,11 @@ mod tests { use crate::event::Event; use crate::statistics::event::handler::handle_event; - use crate::statistics::event::handler::tests::expect_gauge_metric_to_be; + use crate::statistics::event::handler::tests::{expect_counter_metric_to_be, expect_gauge_metric_to_be}; use crate::statistics::repository::Repository; - use crate::statistics::TORRENT_REPOSITORY_TORRENTS_TOTAL; + use crate::statistics::{ + TORRENT_REPOSITORY_TORRENTS_ADDED_TOTAL, TORRENT_REPOSITORY_TORRENTS_REMOVED_TOTAL, TORRENT_REPOSITORY_TORRENTS_TOTAL, + }; use crate::tests::{sample_info_hash, sample_peer}; use crate::CurrentClock; @@ -242,9 +272,73 @@ mod tests { expect_gauge_metric_to_be(&stats_repository, &metric_name, &label_set, 0.0).await; } + + #[tokio::test] + async fn it_should_increment_the_number_of_torrents_added_when_a_torrent_added_event_is_received() { + clock::Stopped::local_set_to_unix_epoch(); + + let stats_repository = Arc::new(Repository::new()); + + handle_event( + Event::TorrentAdded { + info_hash: sample_info_hash(), + announcement: sample_peer(), + }, + &stats_repository, + CurrentClock::now(), + ) + .await; + + expect_counter_metric_to_be( + &stats_repository, + &metric_name!(TORRENT_REPOSITORY_TORRENTS_ADDED_TOTAL), + &LabelSet::default(), + 1, + ) + .await; + } + + #[tokio::test] + async fn it_should_increment_the_number_of_torrents_removed_when_a_torrent_removed_event_is_received() { + clock::Stopped::local_set_to_unix_epoch(); + + let stats_repository = Arc::new(Repository::new()); + + handle_event( + Event::TorrentRemoved { + info_hash: sample_info_hash(), + }, + &stats_repository, + CurrentClock::now(), + ) + .await; + + expect_counter_metric_to_be( + &stats_repository, + &metric_name!(TORRENT_REPOSITORY_TORRENTS_REMOVED_TOTAL), + &LabelSet::default(), + 1, + ) + .await; + } } mod for_peer_metrics { + use std::sync::Arc; + + use torrust_tracker_clock::clock::stopped::Stopped; + use torrust_tracker_clock::clock::{self, Time}; + use torrust_tracker_metrics::metric_name; + + use crate::event::Event; + use crate::statistics::event::handler::tests::expect_counter_metric_to_be; + use crate::statistics::event::handler::{handle_event, label_set_for_peer}; + use crate::statistics::repository::Repository; + use crate::statistics::{ + TORRENT_REPOSITORY_PEERS_ADDED_TOTAL, TORRENT_REPOSITORY_PEERS_REMOVED_TOTAL, TORRENT_REPOSITORY_PEERS_UPDATED_TOTAL, + }; + use crate::tests::{sample_info_hash, sample_peer}; + use crate::CurrentClock; mod peer_connections_total { @@ -383,6 +477,88 @@ mod tests { } } + #[tokio::test] + async fn it_should_increment_the_number_of_peers_added_when_a_peer_added_event_is_received() { + clock::Stopped::local_set_to_unix_epoch(); + + let stats_repository = Arc::new(Repository::new()); + + let peer = sample_peer(); + + handle_event( + Event::PeerAdded { + info_hash: sample_info_hash(), + peer, + }, + &stats_repository, + CurrentClock::now(), + ) + .await; + + expect_counter_metric_to_be( + &stats_repository, + &metric_name!(TORRENT_REPOSITORY_PEERS_ADDED_TOTAL), + &label_set_for_peer(&peer), + 1, + ) + .await; + } + + #[tokio::test] + async fn it_should_increment_the_number_of_peers_removed_when_a_peer_removed_event_is_received() { + clock::Stopped::local_set_to_unix_epoch(); + + let stats_repository = Arc::new(Repository::new()); + + let peer = sample_peer(); + + handle_event( + Event::PeerRemoved { + info_hash: sample_info_hash(), + peer, + }, + &stats_repository, + CurrentClock::now(), + ) + .await; + + expect_counter_metric_to_be( + &stats_repository, + &metric_name!(TORRENT_REPOSITORY_PEERS_REMOVED_TOTAL), + &label_set_for_peer(&peer), + 1, + ) + .await; + } + + #[tokio::test] + async fn it_should_increment_the_number_of_peers_updated_when_a_peer_updated_event_is_received() { + clock::Stopped::local_set_to_unix_epoch(); + + let stats_repository = Arc::new(Repository::new()); + + let new_peer = sample_peer(); + + handle_event( + Event::PeerUpdated { + info_hash: sample_info_hash(), + old_peer: sample_peer(), + new_peer, + }, + &stats_repository, + CurrentClock::now(), + ) + .await; + + expect_counter_metric_to_be( + &stats_repository, + &metric_name!(TORRENT_REPOSITORY_PEERS_UPDATED_TOTAL), + &label_set_for_peer(&new_peer), + 1, + ) + .await; + } + mod torrent_downloads_total { use std::sync::Arc; diff --git a/packages/torrent-repository/src/statistics/mod.rs b/packages/torrent-repository/src/statistics/mod.rs index 18dcf83ea..7d3ad85ce 100644 --- a/packages/torrent-repository/src/statistics/mod.rs +++ b/packages/torrent-repository/src/statistics/mod.rs @@ -9,11 +9,18 @@ use torrust_tracker_metrics::unit::Unit; // Torrent metrics +const TORRENT_REPOSITORY_TORRENTS_ADDED_TOTAL: &str = "torrent_repository_torrents_added_total"; +const TORRENT_REPOSITORY_TORRENTS_REMOVED_TOTAL: &str = "torrent_repository_torrents_removed_total"; + const TORRENT_REPOSITORY_TORRENTS_TOTAL: &str = "torrent_repository_torrents_total"; const TORRENT_REPOSITORY_TORRENTS_DOWNLOADS_TOTAL: &str = "torrent_repository_torrents_downloads_total"; // Peers metrics +const TORRENT_REPOSITORY_PEERS_ADDED_TOTAL: &str = "torrent_repository_peers_added_total"; +const TORRENT_REPOSITORY_PEERS_REMOVED_TOTAL: &str = "torrent_repository_peers_removed_total"; +const TORRENT_REPOSITORY_PEERS_UPDATED_TOTAL: &str = "torrent_repository_peers_updated_total"; + const TORRENT_REPOSITORY_PEER_CONNECTIONS_TOTAL: &str = "torrent_repository_peer_connections_total"; const TORRENT_REPOSITORY_UNIQUE_PEERS_TOTAL: &str = "torrent_repository_unique_peers_total"; // todo: not implemented yet @@ -23,6 +30,18 @@ pub fn describe_metrics() -> Metrics { // Torrent metrics + metrics.metric_collection.describe_counter( + &metric_name!(TORRENT_REPOSITORY_TORRENTS_ADDED_TOTAL), + Some(Unit::Count), + Some(&MetricDescription::new("The total number of torrents added.")), + ); + + metrics.metric_collection.describe_counter( + &metric_name!(TORRENT_REPOSITORY_TORRENTS_REMOVED_TOTAL), + Some(Unit::Count), + Some(&MetricDescription::new("The total number of torrents removed.")), + ); + metrics.metric_collection.describe_gauge( &metric_name!(TORRENT_REPOSITORY_TORRENTS_TOTAL), Some(Unit::Count), @@ -32,13 +51,29 @@ pub fn describe_metrics() -> Metrics { metrics.metric_collection.describe_counter( &metric_name!(TORRENT_REPOSITORY_TORRENTS_DOWNLOADS_TOTAL), Some(Unit::Count), - Some(&MetricDescription::new( - "The total number of torrent downloads (since the tracker process started).", - )), + Some(&MetricDescription::new("The total number of torrent downloads.")), ); // Peers metrics + metrics.metric_collection.describe_counter( + &metric_name!(TORRENT_REPOSITORY_PEERS_ADDED_TOTAL), + Some(Unit::Count), + Some(&MetricDescription::new("The total number of peers added.")), + ); + + metrics.metric_collection.describe_counter( + &metric_name!(TORRENT_REPOSITORY_PEERS_REMOVED_TOTAL), + Some(Unit::Count), + Some(&MetricDescription::new("The total number of peers removed.")), + ); + + metrics.metric_collection.describe_counter( + &metric_name!(TORRENT_REPOSITORY_PEERS_UPDATED_TOTAL), + Some(Unit::Count), + Some(&MetricDescription::new("The total number of peers updated.")), + ); + metrics.metric_collection.describe_gauge( &metric_name!(TORRENT_REPOSITORY_PEER_CONNECTIONS_TOTAL), Some(Unit::Count), From 260f7ffbe557d84ae400f152c4fc3c9980eb4b27 Mon Sep 17 00:00:00 2001 From: Jose Celano Date: Tue, 20 May 2025 12:07:45 +0100 Subject: [PATCH 624/802] feat: [#1523] add new metric: number of inactive peers The metric is added to the `torrent-repository` package. The metric in Prometheus format: ``` torrent_repository_peers_inactive_total{} 0 ``` It was not included as a new label in the number of peers because it can't be calculated from current events. New inactivity events could have been added but the solution was much more complex than this and having two metrics counting peers is not so bad. The discarded alternative was addinga new label por satte (`active`, `inactive`). --- Cargo.lock | 1 + packages/torrent-repository/Cargo.toml | 1 + .../torrent-repository/src/statistics/mod.rs | 8 +++ .../src/statistics/peers_inactivity_update.rs | 72 +++++++++++++++++++ .../src/statistics/repository.rs | 25 +++++++ packages/torrent-repository/src/swarm.rs | 24 +++++++ packages/torrent-repository/src/swarms.rs | 28 ++++++++ packages/tracker-core/src/torrent/manager.rs | 10 ++- src/app.rs | 19 ++++- src/bootstrap/jobs/mod.rs | 1 + src/bootstrap/jobs/peers_inactivity_update.rs | 27 +++++++ 11 files changed, 211 insertions(+), 5 deletions(-) create mode 100644 packages/torrent-repository/src/statistics/peers_inactivity_update.rs create mode 100644 src/bootstrap/jobs/peers_inactivity_update.rs diff --git a/Cargo.lock b/Cargo.lock index ab898e327..6e4ab415f 100644 --- a/Cargo.lock +++ b/Cargo.lock @@ -4832,6 +4832,7 @@ dependencies = [ "aquatic_udp_protocol", "async-std", "bittorrent-primitives", + "chrono", "criterion", "crossbeam-skiplist", "futures", diff --git a/packages/torrent-repository/Cargo.toml b/packages/torrent-repository/Cargo.toml index 98ae5817d..510a59e9d 100644 --- a/packages/torrent-repository/Cargo.toml +++ b/packages/torrent-repository/Cargo.toml @@ -18,6 +18,7 @@ version.workspace = true [dependencies] aquatic_udp_protocol = "0" bittorrent-primitives = "0.1.0" +chrono = { version = "0", default-features = false, features = ["clock"] } crossbeam-skiplist = "0" futures = "0" serde = { version = "1.0.219", features = ["derive"] } diff --git a/packages/torrent-repository/src/statistics/mod.rs b/packages/torrent-repository/src/statistics/mod.rs index 7d3ad85ce..0f8a839ca 100644 --- a/packages/torrent-repository/src/statistics/mod.rs +++ b/packages/torrent-repository/src/statistics/mod.rs @@ -1,5 +1,6 @@ pub mod event; pub mod metrics; +pub mod peers_inactivity_update; pub mod repository; use metrics::Metrics; @@ -23,6 +24,7 @@ const TORRENT_REPOSITORY_PEERS_UPDATED_TOTAL: &str = "torrent_repository_peers_u const TORRENT_REPOSITORY_PEER_CONNECTIONS_TOTAL: &str = "torrent_repository_peer_connections_total"; const TORRENT_REPOSITORY_UNIQUE_PEERS_TOTAL: &str = "torrent_repository_unique_peers_total"; // todo: not implemented yet +const TORRENT_REPOSITORY_PEERS_INACTIVE_TOTAL: &str = "torrent_repository_peers_inactive_total"; #[must_use] pub fn describe_metrics() -> Metrics { @@ -88,5 +90,11 @@ pub fn describe_metrics() -> Metrics { Some(&MetricDescription::new("The total number of unique peers.")), ); + metrics.metric_collection.describe_gauge( + &metric_name!(TORRENT_REPOSITORY_PEERS_INACTIVE_TOTAL), + Some(Unit::Count), + Some(&MetricDescription::new("The total number of inactive peers.")), + ); + metrics } diff --git a/packages/torrent-repository/src/statistics/peers_inactivity_update.rs b/packages/torrent-repository/src/statistics/peers_inactivity_update.rs new file mode 100644 index 000000000..e388173a1 --- /dev/null +++ b/packages/torrent-repository/src/statistics/peers_inactivity_update.rs @@ -0,0 +1,72 @@ +//! Job that runs a task on intervals to update peers' inactivity metrics. +use std::sync::Arc; + +use chrono::Utc; +use tokio::task::JoinHandle; +use torrust_tracker_clock::clock::Time; +use torrust_tracker_metrics::label::LabelSet; +use torrust_tracker_metrics::metric_name; +use torrust_tracker_primitives::DurationSinceUnixEpoch; +use tracing::instrument; + +use super::repository::Repository; +use crate::statistics::TORRENT_REPOSITORY_PEERS_INACTIVE_TOTAL; +use crate::{CurrentClock, Swarms}; + +#[must_use] +#[instrument(skip(swarms, stats_repository))] +pub fn start_job( + swarms: &Arc, + stats_repository: &Arc, + inactivity_cutoff: DurationSinceUnixEpoch, +) -> JoinHandle<()> { + let weak_swarms = std::sync::Arc::downgrade(swarms); + let weak_stats_repository = std::sync::Arc::downgrade(stats_repository); + + let interval_in_secs = 15; // todo: make this configurable + + tokio::spawn(async move { + let interval = std::time::Duration::from_secs(interval_in_secs); + let mut interval = tokio::time::interval(interval); + interval.tick().await; + + loop { + tokio::select! { + _ = tokio::signal::ctrl_c() => { + tracing::info!("Stopping peers inactivity metrics update job ..."); + break; + } + _ = interval.tick() => { + if let (Some(swarms), Some(stats_repository)) = (weak_swarms.upgrade(), weak_stats_repository.upgrade()) { + let start_time = Utc::now().time(); + + tracing::debug!("Updating peers inactivity metrics (executed every {} secs) ...", interval_in_secs); + + let inactive_peers_total = swarms.count_inactive_peers(inactivity_cutoff).await; + + tracing::info!(inactive_peers_total = inactive_peers_total); + + #[allow(clippy::cast_precision_loss)] + let inactive_peers_total = inactive_peers_total as f64; + + let _unused = stats_repository + .set_gauge( + &metric_name!(TORRENT_REPOSITORY_PEERS_INACTIVE_TOTAL), + &LabelSet::default(), + inactive_peers_total, + CurrentClock::now(), + ) + .await; + + tracing::debug!( + "Peers inactivity metrics updated in {} ms", + (Utc::now().time() - start_time).num_milliseconds() + ); + } else { + break; + } + } + } + } + }) +} diff --git a/packages/torrent-repository/src/statistics/repository.rs b/packages/torrent-repository/src/statistics/repository.rs index 1e376faf7..fe1292d00 100644 --- a/packages/torrent-repository/src/statistics/repository.rs +++ b/packages/torrent-repository/src/statistics/repository.rs @@ -57,6 +57,31 @@ impl Repository { result } + /// # Errors + /// + /// This function will return an error if the metric collection fails to + /// set the gauge. + pub async fn set_gauge( + &self, + metric_name: &MetricName, + labels: &LabelSet, + value: f64, + now: DurationSinceUnixEpoch, + ) -> Result<(), Error> { + let mut stats_lock = self.stats.write().await; + + let result = stats_lock.set_gauge(metric_name, labels, value, now); + + drop(stats_lock); + + match result { + Ok(()) => {} + Err(ref err) => tracing::error!("Failed to set the gauge: {}", err), + } + + result + } + /// # Errors /// /// This function will return an error if the metric collection fails to diff --git a/packages/torrent-repository/src/swarm.rs b/packages/torrent-repository/src/swarm.rs index f25304979..d7a1ede87 100644 --- a/packages/torrent-repository/src/swarm.rs +++ b/packages/torrent-repository/src/swarm.rs @@ -118,6 +118,14 @@ impl Swarm { (seeders, leechers) } + #[must_use] + pub fn count_inactive_peers(&self, current_cutoff: DurationSinceUnixEpoch) -> usize { + self.peers + .iter() + .filter(|(_, peer)| peer::ReadInfo::get_updated(&**peer) <= current_cutoff) + .count() + } + #[must_use] pub fn len(&self) -> usize { self.peers.len() @@ -435,6 +443,22 @@ mod tests { assert_eq!(swarm.peers_excluding(&peer2.peer_addr, None), [Arc::new(peer1)]); } + #[tokio::test] + async fn it_should_count_inactive_peers() { + let mut swarm = Swarm::new(&sample_info_hash(), 0, None); + let mut downloads_increased = false; + let one_second = DurationSinceUnixEpoch::new(1, 0); + + // Insert the peer + let last_update_time = DurationSinceUnixEpoch::new(1_669_397_478_934, 0); + let peer = PeerBuilder::default().last_updated_on(last_update_time).build(); + swarm.upsert_peer(peer.into(), &mut downloads_increased).await; + + let inactive_peers_total = swarm.count_inactive_peers(last_update_time + one_second); + + assert_eq!(inactive_peers_total, 1); + } + #[tokio::test] async fn it_should_remove_inactive_peers() { let mut swarm = Swarm::new(&sample_info_hash(), 0, None); diff --git a/packages/torrent-repository/src/swarms.rs b/packages/torrent-repository/src/swarms.rs index ac2490853..811bf6a50 100644 --- a/packages/torrent-repository/src/swarms.rs +++ b/packages/torrent-repository/src/swarms.rs @@ -248,6 +248,18 @@ impl Swarms { } } + /// Counts the number of inactive peers across all torrents. + pub async fn count_inactive_peers(&self, current_cutoff: DurationSinceUnixEpoch) -> usize { + let mut inactive_peers_total = 0; + + for swarm_handle in &self.swarms { + let swarm = swarm_handle.value().lock().await; + inactive_peers_total += swarm.count_inactive_peers(current_cutoff); + } + + inactive_peers_total + } + /// Removes inactive peers from all torrent entries. /// /// A peer is considered inactive if its last update timestamp is older than @@ -705,6 +717,22 @@ mod tests { assert!(swarms.get(&info_hash).is_none()); } + #[tokio::test] + async fn it_should_count_inactive_peers() { + let swarms = Arc::new(Swarms::default()); + + let info_hash = sample_info_hash(); + let mut peer = sample_peer(); + peer.updated = DurationSinceUnixEpoch::new(0, 0); + + swarms.handle_announcement(&info_hash, &peer, None).await.unwrap(); + + // Cut off time is 1 second after the peer was updated + let inactive_peers_total = swarms.count_inactive_peers(peer.updated.add(Duration::from_secs(1))).await; + + assert_eq!(inactive_peers_total, 1); + } + #[tokio::test] async fn it_should_remove_peers_that_have_not_been_updated_after_a_cutoff_time() { let swarms = Arc::new(Swarms::default()); diff --git a/packages/tracker-core/src/torrent/manager.rs b/packages/tracker-core/src/torrent/manager.rs index bc193bd4f..bf73f7e8b 100644 --- a/packages/tracker-core/src/torrent/manager.rs +++ b/packages/tracker-core/src/torrent/manager.rs @@ -4,6 +4,7 @@ use std::time::Duration; use torrust_tracker_clock::clock::Time; use torrust_tracker_configuration::Core; +use torrust_tracker_primitives::DurationSinceUnixEpoch; use super::repository::in_memory::InMemoryTorrentRepository; use super::repository::persisted::DatabasePersistentTorrentRepository; @@ -103,10 +104,13 @@ impl TorrentsManager { } async fn remove_inactive_peers(&self) { - let current_cutoff = CurrentClock::now_sub(&Duration::from_secs(u64::from(self.config.tracker_policy.max_peer_timeout))) - .unwrap_or_default(); + self.in_memory_torrent_repository + .remove_inactive_peers(self.current_cutoff()) + .await; + } - self.in_memory_torrent_repository.remove_inactive_peers(current_cutoff).await; + fn current_cutoff(&self) -> DurationSinceUnixEpoch { + CurrentClock::now_sub(&Duration::from_secs(u64::from(self.config.tracker_policy.max_peer_timeout))).unwrap_or_default() } async fn remove_peerless_torrents(&self) { diff --git a/src/app.rs b/src/app.rs index ca8b7a5c3..1c2d9387e 100644 --- a/src/app.rs +++ b/src/app.rs @@ -27,7 +27,9 @@ use torrust_tracker_configuration::{Configuration, HttpTracker, UdpTracker}; use tracing::instrument; use crate::bootstrap::jobs::manager::JobManager; -use crate::bootstrap::jobs::{self, health_check_api, http_tracker, torrent_cleanup, tracker_apis, udp_tracker}; +use crate::bootstrap::jobs::{ + self, health_check_api, http_tracker, peers_inactivity_update, torrent_cleanup, tracker_apis, udp_tracker, +}; use crate::bootstrap::{self}; use crate::container::AppContainer; @@ -79,8 +81,11 @@ async fn start_jobs(config: &Configuration, app_container: &Arc) - start_the_udp_instances(config, app_container, &mut job_manager).await; start_the_http_instances(config, app_container, &mut job_manager).await; - start_the_http_api(config, app_container, &mut job_manager).await; + start_torrent_cleanup(config, app_container, &mut job_manager); + start_peers_inactivity_update(config, app_container, &mut job_manager); + + start_the_http_api(config, app_container, &mut job_manager).await; start_health_check_api(config, app_container, &mut job_manager).await; job_manager @@ -260,6 +265,16 @@ fn start_torrent_cleanup(config: &Configuration, app_container: &Arc, job_manager: &mut JobManager) { + if config.core.tracker_usage_statistics { + let handle = peers_inactivity_update::start_job(config, app_container); + + job_manager.push("peers_inactivity_update", handle); + } else { + tracing::info!("Peers inactivity update job is disabled."); + } +} + async fn start_health_check_api(config: &Configuration, app_container: &Arc, job_manager: &mut JobManager) { let handle = health_check_api::start_job(&config.health_check_api, app_container.registar.entries()).await; diff --git a/src/bootstrap/jobs/mod.rs b/src/bootstrap/jobs/mod.rs index b311c6da6..f593ce808 100644 --- a/src/bootstrap/jobs/mod.rs +++ b/src/bootstrap/jobs/mod.rs @@ -10,6 +10,7 @@ pub mod health_check_api; pub mod http_tracker; pub mod http_tracker_core; pub mod manager; +pub mod peers_inactivity_update; pub mod torrent_cleanup; pub mod torrent_repository; pub mod tracker_apis; diff --git a/src/bootstrap/jobs/peers_inactivity_update.rs b/src/bootstrap/jobs/peers_inactivity_update.rs new file mode 100644 index 000000000..e7939720c --- /dev/null +++ b/src/bootstrap/jobs/peers_inactivity_update.rs @@ -0,0 +1,27 @@ +//! Job that runs a task on intervals to update peers' inactivity metrics. +use std::sync::Arc; +use std::time::Duration; + +use tokio::task::JoinHandle; +use torrust_tracker_clock::clock::Time; +use torrust_tracker_configuration::Configuration; + +use crate::container::AppContainer; +use crate::CurrentClock; + +#[must_use] +pub fn start_job(config: &Configuration, app_container: &Arc) -> JoinHandle<()> { + torrust_tracker_torrent_repository::statistics::peers_inactivity_update::start_job( + &app_container.torrent_repository_container.swarms.clone(), + &app_container.torrent_repository_container.stats_repository.clone(), + peer_inactivity_cutoff_timestamp(config.core.tracker_policy.max_peer_timeout), + ) +} + +/// Returns the timestamp of the cutoff for inactive peers. +/// +/// Peers that has not been updated for more than `max_peer_timeout` seconds are +/// considered inactive. +fn peer_inactivity_cutoff_timestamp(max_peer_timeout: u32) -> Duration { + CurrentClock::now_sub(&Duration::from_secs(u64::from(max_peer_timeout))).unwrap_or_default() +} From 677deacdc419526122eff62973f2685ac976a5eb Mon Sep 17 00:00:00 2001 From: Jose Celano Date: Wed, 21 May 2025 12:34:12 +0100 Subject: [PATCH 625/802] feat: [#1523] add new metric: number of inactive torrents --- .../statistics/activity_metrics_updater.rs | 104 ++++++++++++++++++ .../torrent-repository/src/statistics/mod.rs | 9 +- .../src/statistics/peers_inactivity_update.rs | 72 ------------ packages/torrent-repository/src/swarm.rs | 35 ++++++ packages/torrent-repository/src/swarms.rs | 51 +++++++++ src/app.rs | 4 +- ..._update.rs => activity_metrics_updater.rs} | 4 +- src/bootstrap/jobs/mod.rs | 2 +- 8 files changed, 203 insertions(+), 78 deletions(-) create mode 100644 packages/torrent-repository/src/statistics/activity_metrics_updater.rs delete mode 100644 packages/torrent-repository/src/statistics/peers_inactivity_update.rs rename src/bootstrap/jobs/{peers_inactivity_update.rs => activity_metrics_updater.rs} (84%) diff --git a/packages/torrent-repository/src/statistics/activity_metrics_updater.rs b/packages/torrent-repository/src/statistics/activity_metrics_updater.rs new file mode 100644 index 000000000..2dfa5fb4e --- /dev/null +++ b/packages/torrent-repository/src/statistics/activity_metrics_updater.rs @@ -0,0 +1,104 @@ +//! Job that runs a task on intervals to update peers' activity metrics. +use std::sync::Arc; + +use chrono::Utc; +use tokio::task::JoinHandle; +use torrust_tracker_clock::clock::Time; +use torrust_tracker_metrics::label::LabelSet; +use torrust_tracker_metrics::metric_name; +use torrust_tracker_primitives::DurationSinceUnixEpoch; +use tracing::instrument; + +use super::repository::Repository; +use crate::statistics::{TORRENT_REPOSITORY_PEERS_INACTIVE_TOTAL, TORRENT_REPOSITORY_TORRENTS_INACTIVE_TOTAL}; +use crate::{CurrentClock, Swarms}; + +#[must_use] +#[instrument(skip(swarms, stats_repository))] +pub fn start_job( + swarms: &Arc, + stats_repository: &Arc, + inactivity_cutoff: DurationSinceUnixEpoch, +) -> JoinHandle<()> { + let weak_swarms = std::sync::Arc::downgrade(swarms); + let weak_stats_repository = std::sync::Arc::downgrade(stats_repository); + + let interval_in_secs = 15; // todo: make this configurable + + tokio::spawn(async move { + let interval = std::time::Duration::from_secs(interval_in_secs); + let mut interval = tokio::time::interval(interval); + interval.tick().await; + + loop { + tokio::select! { + _ = tokio::signal::ctrl_c() => { + tracing::info!("Stopping peers activity metrics update job (ctrl-c signal received) ..."); + break; + } + _ = interval.tick() => { + if let (Some(swarms), Some(stats_repository)) = (weak_swarms.upgrade(), weak_stats_repository.upgrade()) { + update_activity_metrics(interval_in_secs, &swarms, &stats_repository, inactivity_cutoff).await; + } else { + tracing::info!("Stopping peers activity metrics update job (can't upgrade weak pointers) ..."); + break; + } + } + } + } + }) +} + +async fn update_activity_metrics( + interval_in_secs: u64, + swarms: &Arc, + stats_repository: &Arc, + inactivity_cutoff: DurationSinceUnixEpoch, +) { + let start_time = Utc::now().time(); + + tracing::debug!( + "Updating peers and torrents activity metrics (executed every {} secs) ...", + interval_in_secs + ); + + let activity_metadata = swarms.get_activity_metadata(inactivity_cutoff).await; + + activity_metadata.log(); + + update_inactive_peers_total(stats_repository, activity_metadata.inactive_peers_total).await; + update_inactive_torrents_total(stats_repository, activity_metadata.inactive_torrents_total).await; + + tracing::debug!( + "Peers and torrents activity metrics updated in {} ms", + (Utc::now().time() - start_time).num_milliseconds() + ); +} + +async fn update_inactive_peers_total(stats_repository: &Arc, inactive_peers_total: usize) { + #[allow(clippy::cast_precision_loss)] + let inactive_peers_total = inactive_peers_total as f64; + + let _unused = stats_repository + .set_gauge( + &metric_name!(TORRENT_REPOSITORY_PEERS_INACTIVE_TOTAL), + &LabelSet::default(), + inactive_peers_total, + CurrentClock::now(), + ) + .await; +} + +async fn update_inactive_torrents_total(stats_repository: &Arc, inactive_torrents_total: usize) { + #[allow(clippy::cast_precision_loss)] + let inactive_torrents_total = inactive_torrents_total as f64; + + let _unused = stats_repository + .set_gauge( + &metric_name!(TORRENT_REPOSITORY_TORRENTS_INACTIVE_TOTAL), + &LabelSet::default(), + inactive_torrents_total, + CurrentClock::now(), + ) + .await; +} diff --git a/packages/torrent-repository/src/statistics/mod.rs b/packages/torrent-repository/src/statistics/mod.rs index 0f8a839ca..cfc252e34 100644 --- a/packages/torrent-repository/src/statistics/mod.rs +++ b/packages/torrent-repository/src/statistics/mod.rs @@ -1,6 +1,6 @@ +pub mod activity_metrics_updater; pub mod event; pub mod metrics; -pub mod peers_inactivity_update; pub mod repository; use metrics::Metrics; @@ -15,6 +15,7 @@ const TORRENT_REPOSITORY_TORRENTS_REMOVED_TOTAL: &str = "torrent_repository_torr const TORRENT_REPOSITORY_TORRENTS_TOTAL: &str = "torrent_repository_torrents_total"; const TORRENT_REPOSITORY_TORRENTS_DOWNLOADS_TOTAL: &str = "torrent_repository_torrents_downloads_total"; +const TORRENT_REPOSITORY_TORRENTS_INACTIVE_TOTAL: &str = "torrent_repository_torrents_inactive_total"; // Peers metrics @@ -56,6 +57,12 @@ pub fn describe_metrics() -> Metrics { Some(&MetricDescription::new("The total number of torrent downloads.")), ); + metrics.metric_collection.describe_gauge( + &metric_name!(TORRENT_REPOSITORY_TORRENTS_INACTIVE_TOTAL), + Some(Unit::Count), + Some(&MetricDescription::new("The total number of inactive torrents.")), + ); + // Peers metrics metrics.metric_collection.describe_counter( diff --git a/packages/torrent-repository/src/statistics/peers_inactivity_update.rs b/packages/torrent-repository/src/statistics/peers_inactivity_update.rs deleted file mode 100644 index e388173a1..000000000 --- a/packages/torrent-repository/src/statistics/peers_inactivity_update.rs +++ /dev/null @@ -1,72 +0,0 @@ -//! Job that runs a task on intervals to update peers' inactivity metrics. -use std::sync::Arc; - -use chrono::Utc; -use tokio::task::JoinHandle; -use torrust_tracker_clock::clock::Time; -use torrust_tracker_metrics::label::LabelSet; -use torrust_tracker_metrics::metric_name; -use torrust_tracker_primitives::DurationSinceUnixEpoch; -use tracing::instrument; - -use super::repository::Repository; -use crate::statistics::TORRENT_REPOSITORY_PEERS_INACTIVE_TOTAL; -use crate::{CurrentClock, Swarms}; - -#[must_use] -#[instrument(skip(swarms, stats_repository))] -pub fn start_job( - swarms: &Arc, - stats_repository: &Arc, - inactivity_cutoff: DurationSinceUnixEpoch, -) -> JoinHandle<()> { - let weak_swarms = std::sync::Arc::downgrade(swarms); - let weak_stats_repository = std::sync::Arc::downgrade(stats_repository); - - let interval_in_secs = 15; // todo: make this configurable - - tokio::spawn(async move { - let interval = std::time::Duration::from_secs(interval_in_secs); - let mut interval = tokio::time::interval(interval); - interval.tick().await; - - loop { - tokio::select! { - _ = tokio::signal::ctrl_c() => { - tracing::info!("Stopping peers inactivity metrics update job ..."); - break; - } - _ = interval.tick() => { - if let (Some(swarms), Some(stats_repository)) = (weak_swarms.upgrade(), weak_stats_repository.upgrade()) { - let start_time = Utc::now().time(); - - tracing::debug!("Updating peers inactivity metrics (executed every {} secs) ...", interval_in_secs); - - let inactive_peers_total = swarms.count_inactive_peers(inactivity_cutoff).await; - - tracing::info!(inactive_peers_total = inactive_peers_total); - - #[allow(clippy::cast_precision_loss)] - let inactive_peers_total = inactive_peers_total as f64; - - let _unused = stats_repository - .set_gauge( - &metric_name!(TORRENT_REPOSITORY_PEERS_INACTIVE_TOTAL), - &LabelSet::default(), - inactive_peers_total, - CurrentClock::now(), - ) - .await; - - tracing::debug!( - "Peers inactivity metrics updated in {} ms", - (Utc::now().time() - start_time).num_milliseconds() - ); - } else { - break; - } - } - } - } - }) -} diff --git a/packages/torrent-repository/src/swarm.rs b/packages/torrent-repository/src/swarm.rs index d7a1ede87..b9076289b 100644 --- a/packages/torrent-repository/src/swarm.rs +++ b/packages/torrent-repository/src/swarm.rs @@ -126,6 +126,17 @@ impl Swarm { .count() } + #[must_use] + pub fn get_activity_metadata(&self, current_cutoff: DurationSinceUnixEpoch) -> ActivityMetadata { + let inactive_peers_total = self.count_inactive_peers(current_cutoff); + + let active_peers_total = self.len() - inactive_peers_total; + + let is_active = active_peers_total > 0; + + ActivityMetadata::new(is_active, active_peers_total, inactive_peers_total) + } + #[must_use] pub fn len(&self) -> usize { self.peers.len() @@ -296,6 +307,30 @@ impl Swarm { } } +#[derive(Clone)] +pub struct ActivityMetadata { + /// Indicates if the swarm is active. It's inactive if there are no active + /// peers. + pub is_active: bool, + + /// The number of active peers in the swarm. + pub active_peers_total: usize, + + /// The number of inactive peers in the swarm. + pub inactive_peers_total: usize, +} + +impl ActivityMetadata { + #[must_use] + pub fn new(is_active: bool, active_peers_total: usize, inactive_peers_total: usize) -> Self { + Self { + is_active, + active_peers_total, + inactive_peers_total, + } + } +} + #[cfg(test)] mod tests { diff --git a/packages/torrent-repository/src/swarms.rs b/packages/torrent-repository/src/swarms.rs index 811bf6a50..36f83070d 100644 --- a/packages/torrent-repository/src/swarms.rs +++ b/packages/torrent-repository/src/swarms.rs @@ -248,6 +248,32 @@ impl Swarms { } } + pub async fn get_activity_metadata(&self, current_cutoff: DurationSinceUnixEpoch) -> AggregateActivityMetadata { + let mut active_peers_total = 0; + let mut inactive_peers_total = 0; + let mut active_torrents_total = 0; + + for swarm_handle in &self.swarms { + let swarm = swarm_handle.value().lock().await; + + let activity_metadata = swarm.get_activity_metadata(current_cutoff); + + if activity_metadata.is_active { + active_torrents_total += 1; + } + + active_peers_total += activity_metadata.active_peers_total; + inactive_peers_total += activity_metadata.inactive_peers_total; + } + + AggregateActivityMetadata { + active_peers_total, + inactive_peers_total, + active_torrents_total, + inactive_torrents_total: self.len() - active_torrents_total, + } + } + /// Counts the number of inactive peers across all torrents. pub async fn count_inactive_peers(&self, current_cutoff: DurationSinceUnixEpoch) -> usize { let mut inactive_peers_total = 0; @@ -446,6 +472,31 @@ impl Swarms { #[derive(thiserror::Error, Debug, Clone)] pub enum Error {} +#[derive(Clone, Debug, Default)] +pub struct AggregateActivityMetadata { + /// The number of active peers in all swarms. + pub active_peers_total: usize, + + /// The number of inactive peers in all swarms. + pub inactive_peers_total: usize, + + /// The number of active torrents. + pub active_torrents_total: usize, + + /// The number of inactive torrents. + pub inactive_torrents_total: usize, +} + +impl AggregateActivityMetadata { + pub fn log(&self) { + tracing::info!( + active_peers_total = self.active_peers_total, + inactive_peers_total = self.inactive_peers_total, + active_torrents_total = self.active_torrents_total, + inactive_torrents_total = self.inactive_torrents_total + ); + } +} #[cfg(test)] mod tests { diff --git a/src/app.rs b/src/app.rs index 1c2d9387e..5180e4583 100644 --- a/src/app.rs +++ b/src/app.rs @@ -28,7 +28,7 @@ use tracing::instrument; use crate::bootstrap::jobs::manager::JobManager; use crate::bootstrap::jobs::{ - self, health_check_api, http_tracker, peers_inactivity_update, torrent_cleanup, tracker_apis, udp_tracker, + self, activity_metrics_updater, health_check_api, http_tracker, torrent_cleanup, tracker_apis, udp_tracker, }; use crate::bootstrap::{self}; use crate::container::AppContainer; @@ -267,7 +267,7 @@ fn start_torrent_cleanup(config: &Configuration, app_container: &Arc, job_manager: &mut JobManager) { if config.core.tracker_usage_statistics { - let handle = peers_inactivity_update::start_job(config, app_container); + let handle = activity_metrics_updater::start_job(config, app_container); job_manager.push("peers_inactivity_update", handle); } else { diff --git a/src/bootstrap/jobs/peers_inactivity_update.rs b/src/bootstrap/jobs/activity_metrics_updater.rs similarity index 84% rename from src/bootstrap/jobs/peers_inactivity_update.rs rename to src/bootstrap/jobs/activity_metrics_updater.rs index e7939720c..7411c05cf 100644 --- a/src/bootstrap/jobs/peers_inactivity_update.rs +++ b/src/bootstrap/jobs/activity_metrics_updater.rs @@ -1,4 +1,4 @@ -//! Job that runs a task on intervals to update peers' inactivity metrics. +//! Job that runs a task on intervals to update peers' activity metrics. use std::sync::Arc; use std::time::Duration; @@ -11,7 +11,7 @@ use crate::CurrentClock; #[must_use] pub fn start_job(config: &Configuration, app_container: &Arc) -> JoinHandle<()> { - torrust_tracker_torrent_repository::statistics::peers_inactivity_update::start_job( + torrust_tracker_torrent_repository::statistics::activity_metrics_updater::start_job( &app_container.torrent_repository_container.swarms.clone(), &app_container.torrent_repository_container.stats_repository.clone(), peer_inactivity_cutoff_timestamp(config.core.tracker_policy.max_peer_timeout), diff --git a/src/bootstrap/jobs/mod.rs b/src/bootstrap/jobs/mod.rs index f593ce808..c8d7a8598 100644 --- a/src/bootstrap/jobs/mod.rs +++ b/src/bootstrap/jobs/mod.rs @@ -6,11 +6,11 @@ //! 2. Launch all the application services as concurrent jobs. //! //! This modules contains all the functions needed to start those jobs. +pub mod activity_metrics_updater; pub mod health_check_api; pub mod http_tracker; pub mod http_tracker_core; pub mod manager; -pub mod peers_inactivity_update; pub mod torrent_cleanup; pub mod torrent_repository; pub mod tracker_apis; From 3a23a38b38c059311b5213e8e6055ac809d6f648 Mon Sep 17 00:00:00 2001 From: Jose Celano Date: Wed, 21 May 2025 16:56:39 +0100 Subject: [PATCH 626/802] fix: tracing message --- src/bootstrap/jobs/torrent_repository.rs | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/src/bootstrap/jobs/torrent_repository.rs b/src/bootstrap/jobs/torrent_repository.rs index 2125de554..ea0d215ee 100644 --- a/src/bootstrap/jobs/torrent_repository.rs +++ b/src/bootstrap/jobs/torrent_repository.rs @@ -14,7 +14,7 @@ pub fn start_event_listener(config: &Configuration, app_container: &Arc Date: Wed, 21 May 2025 17:11:52 +0100 Subject: [PATCH 627/802] feat: [#1524] listens to torrent-repository events in the tracker-core pkg This will enable udpating stats (number of torrent downloads per torrent) from the event handler (persisting in the DB). And after that, I will enable adding lebeled metrics. --- Cargo.lock | 1 + packages/tracker-core/Cargo.toml | 1 + packages/tracker-core/src/lib.rs | 3 ++ .../src/statistics/event/handler.rs | 32 ++++++++++++ .../src/statistics/event/listener.rs | 52 +++++++++++++++++++ .../tracker-core/src/statistics/event/mod.rs | 2 + packages/tracker-core/src/statistics/mod.rs | 1 + src/app.rs | 9 ++++ src/bootstrap/jobs/mod.rs | 1 + src/bootstrap/jobs/tracker_core.rs | 21 ++++++++ 10 files changed, 123 insertions(+) create mode 100644 packages/tracker-core/src/statistics/event/handler.rs create mode 100644 packages/tracker-core/src/statistics/event/listener.rs create mode 100644 packages/tracker-core/src/statistics/event/mod.rs create mode 100644 packages/tracker-core/src/statistics/mod.rs create mode 100644 src/bootstrap/jobs/tracker_core.rs diff --git a/Cargo.lock b/Cargo.lock index 6e4ab415f..5415149e8 100644 --- a/Cargo.lock +++ b/Cargo.lock @@ -676,6 +676,7 @@ dependencies = [ "torrust-rest-tracker-api-client", "torrust-tracker-clock", "torrust-tracker-configuration", + "torrust-tracker-events", "torrust-tracker-located-error", "torrust-tracker-primitives", "torrust-tracker-test-helpers", diff --git a/packages/tracker-core/Cargo.toml b/packages/tracker-core/Cargo.toml index ac1cee88d..3c89505b2 100644 --- a/packages/tracker-core/Cargo.toml +++ b/packages/tracker-core/Cargo.toml @@ -29,6 +29,7 @@ thiserror = "2" tokio = { version = "1", features = ["macros", "net", "rt-multi-thread", "signal", "sync"] } torrust-tracker-clock = { version = "3.0.0-develop", path = "../clock" } torrust-tracker-configuration = { version = "3.0.0-develop", path = "../configuration" } +torrust-tracker-events = { version = "3.0.0-develop", path = "../events" } torrust-tracker-located-error = { version = "3.0.0-develop", path = "../located-error" } torrust-tracker-primitives = { version = "3.0.0-develop", path = "../primitives" } torrust-tracker-torrent-repository = { version = "3.0.0-develop", path = "../torrent-repository" } diff --git a/packages/tracker-core/src/lib.rs b/packages/tracker-core/src/lib.rs index 82ebac3c6..dacf41383 100644 --- a/packages/tracker-core/src/lib.rs +++ b/packages/tracker-core/src/lib.rs @@ -124,6 +124,7 @@ pub mod container; pub mod databases; pub mod error; pub mod scrape_handler; +pub mod statistics; pub mod torrent; pub mod whitelist; @@ -156,6 +157,8 @@ pub(crate) type CurrentClock = clock::Working; #[allow(dead_code)] pub(crate) type CurrentClock = clock::Stopped; +pub const TRACKER_CORE_LOG_TARGET: &str = "TRACKER_CORE"; + #[cfg(test)] mod tests { mod the_tracker { diff --git a/packages/tracker-core/src/statistics/event/handler.rs b/packages/tracker-core/src/statistics/event/handler.rs new file mode 100644 index 000000000..bdd4d414b --- /dev/null +++ b/packages/tracker-core/src/statistics/event/handler.rs @@ -0,0 +1,32 @@ +use torrust_tracker_primitives::DurationSinceUnixEpoch; +use torrust_tracker_torrent_repository::event::Event; + +pub async fn handle_event(event: Event, _now: DurationSinceUnixEpoch) { + match event { + // Torrent events + Event::TorrentAdded { info_hash, .. } => { + tracing::debug!(info_hash = ?info_hash, "Torrent added",); + } + Event::TorrentRemoved { info_hash } => { + tracing::debug!(info_hash = ?info_hash, "Torrent removed",); + } + + // Peer events + Event::PeerAdded { info_hash, peer } => { + tracing::debug!(info_hash = ?info_hash, peer = ?peer, "Peer added", ); + } + Event::PeerRemoved { info_hash, peer } => { + tracing::debug!(info_hash = ?info_hash, peer = ?peer, "Peer removed", ); + } + Event::PeerUpdated { + info_hash, + old_peer, + new_peer, + } => { + tracing::debug!(info_hash = ?info_hash, old_peer = ?old_peer, new_peer = ?new_peer, "Peer updated"); + } + Event::PeerDownloadCompleted { info_hash, peer } => { + tracing::debug!(info_hash = ?info_hash, peer = ?peer, "Peer download completed", ); + } + } +} diff --git a/packages/tracker-core/src/statistics/event/listener.rs b/packages/tracker-core/src/statistics/event/listener.rs new file mode 100644 index 000000000..2fe068b76 --- /dev/null +++ b/packages/tracker-core/src/statistics/event/listener.rs @@ -0,0 +1,52 @@ +use tokio::task::JoinHandle; +use torrust_tracker_clock::clock::Time; +use torrust_tracker_events::receiver::RecvError; +use torrust_tracker_torrent_repository::event::receiver::Receiver; + +use super::handler::handle_event; +use crate::{CurrentClock, TRACKER_CORE_LOG_TARGET}; + +#[must_use] +pub fn run_event_listener(receiver: Receiver) -> JoinHandle<()> { + tracing::info!(target: TRACKER_CORE_LOG_TARGET, "Starting torrent repository event listener"); + + tokio::spawn(async move { + dispatch_events(receiver).await; + + tracing::info!(target: TRACKER_CORE_LOG_TARGET, "Torrent repository listener finished"); + }) +} + +async fn dispatch_events(mut receiver: Receiver) { + let shutdown_signal = tokio::signal::ctrl_c(); + + tokio::pin!(shutdown_signal); + + loop { + tokio::select! { + biased; + + _ = &mut shutdown_signal => { + tracing::info!(target: TRACKER_CORE_LOG_TARGET, "Received Ctrl+C, shutting down torrent repository event listener"); + break; + } + + result = receiver.recv() => { + match result { + Ok(event) => handle_event(event, CurrentClock::now()).await, + Err(e) => { + match e { + RecvError::Closed => { + tracing::info!(target: TRACKER_CORE_LOG_TARGET, "Torrent repository event receiver closed"); + break; + } + RecvError::Lagged(n) => { + tracing::warn!(target: TRACKER_CORE_LOG_TARGET, "Torrent repository event receiver lagged by {} events", n); + } + } + } + } + } + } + } +} diff --git a/packages/tracker-core/src/statistics/event/mod.rs b/packages/tracker-core/src/statistics/event/mod.rs new file mode 100644 index 000000000..dae683398 --- /dev/null +++ b/packages/tracker-core/src/statistics/event/mod.rs @@ -0,0 +1,2 @@ +pub mod handler; +pub mod listener; diff --git a/packages/tracker-core/src/statistics/mod.rs b/packages/tracker-core/src/statistics/mod.rs new file mode 100644 index 000000000..53f112654 --- /dev/null +++ b/packages/tracker-core/src/statistics/mod.rs @@ -0,0 +1 @@ +pub mod event; diff --git a/src/app.rs b/src/app.rs index 5180e4583..3b6abb86f 100644 --- a/src/app.rs +++ b/src/app.rs @@ -75,6 +75,7 @@ async fn start_jobs(config: &Configuration, app_container: &Arc) - let mut job_manager = JobManager::new(); start_torrent_repository_event_listener(config, app_container, &mut job_manager); + start_tracker_core_event_listener(config, app_container, &mut job_manager); start_http_core_event_listener(config, app_container, &mut job_manager); start_udp_core_event_listener(config, app_container, &mut job_manager); start_udp_server_event_listener(config, app_container, &mut job_manager); @@ -145,6 +146,14 @@ fn start_torrent_repository_event_listener( } } +fn start_tracker_core_event_listener(config: &Configuration, app_container: &Arc, job_manager: &mut JobManager) { + let opt_handle = jobs::tracker_core::start_event_listener(config, app_container); + + if let Some(handle) = opt_handle { + job_manager.push("tracker_core_event_listener", handle); + } +} + fn start_http_core_event_listener(config: &Configuration, app_container: &Arc, job_manager: &mut JobManager) { let opt_handle = jobs::http_tracker_core::start_event_listener(config, app_container); diff --git a/src/bootstrap/jobs/mod.rs b/src/bootstrap/jobs/mod.rs index c8d7a8598..0e9c912af 100644 --- a/src/bootstrap/jobs/mod.rs +++ b/src/bootstrap/jobs/mod.rs @@ -14,6 +14,7 @@ pub mod manager; pub mod torrent_cleanup; pub mod torrent_repository; pub mod tracker_apis; +pub mod tracker_core; pub mod udp_tracker; pub mod udp_tracker_core; pub mod udp_tracker_server; diff --git a/src/bootstrap/jobs/tracker_core.rs b/src/bootstrap/jobs/tracker_core.rs new file mode 100644 index 000000000..28eb745c2 --- /dev/null +++ b/src/bootstrap/jobs/tracker_core.rs @@ -0,0 +1,21 @@ +use std::sync::Arc; + +use tokio::task::JoinHandle; +use torrust_tracker_configuration::Configuration; + +use crate::container::AppContainer; + +pub fn start_event_listener(config: &Configuration, app_container: &Arc) -> Option> { + // todo: enable this when labeled metrics are implemented. + //if config.core.tracker_usage_statistics || config.core.tracker_policy.persistent_torrent_completed_stat { + if config.core.tracker_policy.persistent_torrent_completed_stat { + let job = bittorrent_tracker_core::statistics::event::listener::run_event_listener( + app_container.torrent_repository_container.event_bus.receiver(), + ); + + Some(job) + } else { + tracing::info!("Tracker core event listener job is disabled."); + None + } +} From 896875738f62b863bb87f27558f0e2344703110a Mon Sep 17 00:00:00 2001 From: Jose Celano Date: Wed, 21 May 2025 17:26:05 +0100 Subject: [PATCH 628/802] refactor: extract method JobManger::push_opt --- src/app.rs | 45 ++++++++++++++++------------------- src/bootstrap/jobs/manager.rs | 6 +++++ 2 files changed, 26 insertions(+), 25 deletions(-) diff --git a/src/app.rs b/src/app.rs index 3b6abb86f..5037ad761 100644 --- a/src/app.rs +++ b/src/app.rs @@ -139,43 +139,38 @@ fn start_torrent_repository_event_listener( app_container: &Arc, job_manager: &mut JobManager, ) { - let opt_handle = jobs::torrent_repository::start_event_listener(config, app_container); - - if let Some(handle) = opt_handle { - job_manager.push("torrent_repository_event_listener", handle); - } + job_manager.push_opt( + "torrent_repository_event_listener", + jobs::torrent_repository::start_event_listener(config, app_container), + ); } fn start_tracker_core_event_listener(config: &Configuration, app_container: &Arc, job_manager: &mut JobManager) { - let opt_handle = jobs::tracker_core::start_event_listener(config, app_container); - - if let Some(handle) = opt_handle { - job_manager.push("tracker_core_event_listener", handle); - } + job_manager.push_opt( + "tracker_core_event_listener", + jobs::tracker_core::start_event_listener(config, app_container), + ); } fn start_http_core_event_listener(config: &Configuration, app_container: &Arc, job_manager: &mut JobManager) { - let opt_handle = jobs::http_tracker_core::start_event_listener(config, app_container); - - if let Some(handle) = opt_handle { - job_manager.push("http_core_event_listener", handle); - } + job_manager.push_opt( + "http_core_event_listener", + jobs::http_tracker_core::start_event_listener(config, app_container), + ); } fn start_udp_core_event_listener(config: &Configuration, app_container: &Arc, job_manager: &mut JobManager) { - let opt_handle = jobs::udp_tracker_core::start_event_listener(config, app_container); - - if let Some(handle) = opt_handle { - job_manager.push("udp_core_event_listener", handle); - } + job_manager.push_opt( + "udp_core_event_listener", + jobs::udp_tracker_core::start_event_listener(config, app_container), + ); } fn start_udp_server_event_listener(config: &Configuration, app_container: &Arc, job_manager: &mut JobManager) { - let opt_handle = jobs::udp_tracker_server::start_event_listener(config, app_container); - - if let Some(handle) = opt_handle { - job_manager.push("udp_server_event_listener", handle); - } + job_manager.push_opt( + "udp_server_event_listener", + jobs::udp_tracker_server::start_event_listener(config, app_container), + ); } async fn start_the_udp_instances(config: &Configuration, app_container: &Arc, job_manager: &mut JobManager) { diff --git a/src/bootstrap/jobs/manager.rs b/src/bootstrap/jobs/manager.rs index 5beab3224..53733844b 100644 --- a/src/bootstrap/jobs/manager.rs +++ b/src/bootstrap/jobs/manager.rs @@ -36,6 +36,12 @@ impl JobManager { self.jobs.push(Job::new(name, handle)); } + pub fn push_opt>(&mut self, name: N, handle: Option>) { + if let Some(handle) = handle { + self.push(name, handle); + } + } + /// Waits sequentially for all jobs to complete, with a graceful timeout per /// job. pub async fn wait_for_all(mut self, grace_period: Duration) { From e90585af80fc7a153708731ef1d5488da4e549d6 Mon Sep 17 00:00:00 2001 From: Jose Celano Date: Wed, 21 May 2025 18:16:39 +0100 Subject: [PATCH 629/802] refactor: [#1524] move total downloads udpate from announce command to event handler --- packages/tracker-core/src/announce_handler.rs | 11 ++++++----- .../src/statistics/event/handler.rs | 19 ++++++++++++++++++- .../src/statistics/event/listener.rs | 16 ++++++++++++---- src/bootstrap/jobs/tracker_core.rs | 1 + 4 files changed, 37 insertions(+), 10 deletions(-) diff --git a/packages/tracker-core/src/announce_handler.rs b/packages/tracker-core/src/announce_handler.rs index a2e8db743..61e5de125 100644 --- a/packages/tracker-core/src/announce_handler.rs +++ b/packages/tracker-core/src/announce_handler.rs @@ -163,6 +163,11 @@ impl AnnounceHandler { ) -> Result { self.whitelist_authorization.authorize(info_hash).await?; + // This will be removed in the future. + // See https://github.com/torrust/torrust-tracker/issues/1502 + // There will be a persisted metric for counting the total number of + // downloads across all torrents. The in-memory metric will count only + // the number of downloads during the current tracker uptime. let opt_persistent_torrent = if self.config.tracker_policy.persistent_torrent_completed_stat { self.db_torrent_repository.load(info_hash)? } else { @@ -171,15 +176,11 @@ impl AnnounceHandler { peer.change_ip(&assign_ip_address_to_peer(remote_client_ip, self.config.net.external_ip)); - let number_of_downloads_increased = self + let _number_of_downloads_increased = self .in_memory_torrent_repository .upsert_peer(info_hash, peer, opt_persistent_torrent) .await; - if self.config.tracker_policy.persistent_torrent_completed_stat && number_of_downloads_increased { - self.db_torrent_repository.increase_number_of_downloads(info_hash)?; - } - Ok(self.build_announce_data(info_hash, peer, peers_wanted).await) } diff --git a/packages/tracker-core/src/statistics/event/handler.rs b/packages/tracker-core/src/statistics/event/handler.rs index bdd4d414b..7b6ce83b7 100644 --- a/packages/tracker-core/src/statistics/event/handler.rs +++ b/packages/tracker-core/src/statistics/event/handler.rs @@ -1,7 +1,15 @@ +use std::sync::Arc; + use torrust_tracker_primitives::DurationSinceUnixEpoch; use torrust_tracker_torrent_repository::event::Event; -pub async fn handle_event(event: Event, _now: DurationSinceUnixEpoch) { +use crate::torrent::repository::persisted::DatabasePersistentTorrentRepository; + +pub async fn handle_event( + event: Event, + db_torrent_repository: &Arc, + _now: DurationSinceUnixEpoch, +) { match event { // Torrent events Event::TorrentAdded { info_hash, .. } => { @@ -27,6 +35,15 @@ pub async fn handle_event(event: Event, _now: DurationSinceUnixEpoch) { } Event::PeerDownloadCompleted { info_hash, peer } => { tracing::debug!(info_hash = ?info_hash, peer = ?peer, "Peer download completed", ); + + match db_torrent_repository.increase_number_of_downloads(&info_hash) { + Ok(()) => { + tracing::debug!(info_hash = ?info_hash, "Number of downloads increased"); + } + Err(err) => { + tracing::error!(info_hash = ?info_hash, error = ?err, "Failed to increase number of downloads"); + } + } } } } diff --git a/packages/tracker-core/src/statistics/event/listener.rs b/packages/tracker-core/src/statistics/event/listener.rs index 2fe068b76..e04675092 100644 --- a/packages/tracker-core/src/statistics/event/listener.rs +++ b/packages/tracker-core/src/statistics/event/listener.rs @@ -1,23 +1,31 @@ +use std::sync::Arc; + use tokio::task::JoinHandle; use torrust_tracker_clock::clock::Time; use torrust_tracker_events::receiver::RecvError; use torrust_tracker_torrent_repository::event::receiver::Receiver; use super::handler::handle_event; +use crate::torrent::repository::persisted::DatabasePersistentTorrentRepository; use crate::{CurrentClock, TRACKER_CORE_LOG_TARGET}; #[must_use] -pub fn run_event_listener(receiver: Receiver) -> JoinHandle<()> { +pub fn run_event_listener( + receiver: Receiver, + db_torrent_repository: &Arc, +) -> JoinHandle<()> { + let db_torrent_repository: Arc = db_torrent_repository.clone(); + tracing::info!(target: TRACKER_CORE_LOG_TARGET, "Starting torrent repository event listener"); tokio::spawn(async move { - dispatch_events(receiver).await; + dispatch_events(receiver, db_torrent_repository).await; tracing::info!(target: TRACKER_CORE_LOG_TARGET, "Torrent repository listener finished"); }) } -async fn dispatch_events(mut receiver: Receiver) { +async fn dispatch_events(mut receiver: Receiver, db_torrent_repository: Arc) { let shutdown_signal = tokio::signal::ctrl_c(); tokio::pin!(shutdown_signal); @@ -33,7 +41,7 @@ async fn dispatch_events(mut receiver: Receiver) { result = receiver.recv() => { match result { - Ok(event) => handle_event(event, CurrentClock::now()).await, + Ok(event) => handle_event(event, &db_torrent_repository, CurrentClock::now()).await, Err(e) => { match e { RecvError::Closed => { diff --git a/src/bootstrap/jobs/tracker_core.rs b/src/bootstrap/jobs/tracker_core.rs index 28eb745c2..bb879db6b 100644 --- a/src/bootstrap/jobs/tracker_core.rs +++ b/src/bootstrap/jobs/tracker_core.rs @@ -11,6 +11,7 @@ pub fn start_event_listener(config: &Configuration, app_container: &Arc Date: Mon, 26 May 2025 09:45:42 +0100 Subject: [PATCH 630/802] refactor: [#1524] remove duplciate code for tracker core container --- packages/tracker-core/tests/integration.rs | 49 ++++------------------ 1 file changed, 7 insertions(+), 42 deletions(-) diff --git a/packages/tracker-core/tests/integration.rs b/packages/tracker-core/tests/integration.rs index 5aaded10a..282dcade5 100644 --- a/packages/tracker-core/tests/integration.rs +++ b/packages/tracker-core/tests/integration.rs @@ -4,17 +4,13 @@ use std::sync::Arc; use aquatic_udp_protocol::{AnnounceEvent, NumberOfBytes, PeerId}; use bittorrent_primitives::info_hash::InfoHash; -use bittorrent_tracker_core::announce_handler::{AnnounceHandler, PeersWanted}; -use bittorrent_tracker_core::databases::setup::initialize_database; -use bittorrent_tracker_core::scrape_handler::ScrapeHandler; -use bittorrent_tracker_core::torrent::repository::in_memory::InMemoryTorrentRepository; -use bittorrent_tracker_core::torrent::repository::persisted::DatabasePersistentTorrentRepository; -use bittorrent_tracker_core::whitelist; -use bittorrent_tracker_core::whitelist::repository::in_memory::InMemoryWhitelist; +use bittorrent_tracker_core::announce_handler::PeersWanted; +use bittorrent_tracker_core::container::TrackerCoreContainer; use torrust_tracker_configuration::Core; use torrust_tracker_primitives::peer::Peer; use torrust_tracker_primitives::DurationSinceUnixEpoch; use torrust_tracker_test_helpers::configuration::ephemeral_sqlite_database; +use torrust_tracker_torrent_repository::container::TorrentRepositoryContainer; /// # Panics /// @@ -59,41 +55,13 @@ fn remote_client_ip() -> IpAddr { IpAddr::V4(Ipv4Addr::from_str("126.0.0.1").unwrap()) } -struct Container { - pub announce_handler: Arc, - pub scrape_handler: Arc, -} - -impl Container { - pub fn initialize(config: &Core) -> Self { - let database = initialize_database(config); - let in_memory_torrent_repository = Arc::new(InMemoryTorrentRepository::default()); - let db_torrent_repository = Arc::new(DatabasePersistentTorrentRepository::new(&database)); - let in_memory_whitelist = Arc::new(InMemoryWhitelist::default()); - let whitelist_authorization = Arc::new(whitelist::authorization::WhitelistAuthorization::new( - config, - &in_memory_whitelist.clone(), - )); - let announce_handler = Arc::new(AnnounceHandler::new( - config, - &whitelist_authorization, - &in_memory_torrent_repository, - &db_torrent_repository, - )); - let scrape_handler = Arc::new(ScrapeHandler::new(&whitelist_authorization, &in_memory_torrent_repository)); - - Self { - announce_handler, - scrape_handler, - } - } -} - #[tokio::test] async fn test_announce_and_scrape_requests() { - let config = ephemeral_configuration(); + let config = Arc::new(ephemeral_configuration()); + + let torrent_repository_container = Arc::new(TorrentRepositoryContainer::initialize(config.tracker_usage_statistics.into())); - let container = Container::initialize(&config); + let container = TrackerCoreContainer::initialize_from(&config, &torrent_repository_container); let info_hash = sample_info_hash(); @@ -130,6 +98,3 @@ async fn test_announce_and_scrape_requests() { assert!(scrape_data.files.contains_key(&info_hash)); } - -#[test] -fn test_scrape_request() {} From b05bccdccc90ed73f63ac9f9c61fcbfaa75f7bbf Mon Sep 17 00:00:00 2001 From: Jose Celano Date: Mon, 26 May 2025 10:02:49 +0100 Subject: [PATCH 631/802] refactor: [#1524] integration tests in tracker-core --- packages/tracker-core/tests/integration.rs | 58 +++++++++++++++------- 1 file changed, 39 insertions(+), 19 deletions(-) diff --git a/packages/tracker-core/tests/integration.rs b/packages/tracker-core/tests/integration.rs index 282dcade5..f59b9d185 100644 --- a/packages/tracker-core/tests/integration.rs +++ b/packages/tracker-core/tests/integration.rs @@ -7,6 +7,7 @@ use bittorrent_primitives::info_hash::InfoHash; use bittorrent_tracker_core::announce_handler::PeersWanted; use bittorrent_tracker_core::container::TrackerCoreContainer; use torrust_tracker_configuration::Core; +use torrust_tracker_primitives::core::AnnounceData; use torrust_tracker_primitives::peer::Peer; use torrust_tracker_primitives::DurationSinceUnixEpoch; use torrust_tracker_test_helpers::configuration::ephemeral_sqlite_database; @@ -55,44 +56,63 @@ fn remote_client_ip() -> IpAddr { IpAddr::V4(Ipv4Addr::from_str("126.0.0.1").unwrap()) } -#[tokio::test] -async fn test_announce_and_scrape_requests() { +fn initialize() -> (Arc, Arc, InfoHash, Peer) { let config = Arc::new(ephemeral_configuration()); let torrent_repository_container = Arc::new(TorrentRepositoryContainer::initialize(config.tracker_usage_statistics.into())); - let container = TrackerCoreContainer::initialize_from(&config, &torrent_repository_container); + let container = Arc::new(TrackerCoreContainer::initialize_from(&config, &torrent_repository_container)); let info_hash = sample_info_hash(); - let mut peer = sample_peer(); + let peer = sample_peer(); - // Announce + (config, container, info_hash, peer) +} - // First announce: download started +async fn announce_peer_started(container: &Arc, peer: &mut Peer, info_hash: &InfoHash) -> AnnounceData { peer.event = AnnounceEvent::Started; - let announce_data = container + + container .announce_handler - .announce(&info_hash, &mut peer, &remote_client_ip(), &PeersWanted::AsManyAsPossible) + .announce(info_hash, peer, &remote_client_ip(), &PeersWanted::AsManyAsPossible) .await - .unwrap(); - - // NOTICE: you don't get back the peer making the request. - assert_eq!(announce_data.peers.len(), 0); - assert_eq!(announce_data.stats.downloaded, 0); + .unwrap() +} - // Second announce: download completed +async fn _announce_peer_completed(container: &Arc, peer: &mut Peer, info_hash: &InfoHash) -> AnnounceData { peer.event = AnnounceEvent::Completed; - let announce_data = container + + container .announce_handler - .announce(&info_hash, &mut peer, &remote_client_ip(), &PeersWanted::AsManyAsPossible) + .announce(info_hash, peer, &remote_client_ip(), &PeersWanted::AsManyAsPossible) .await - .unwrap(); + .unwrap() +} + +#[tokio::test] +async fn it_should_handle_the_announce_request() { + let (_config, container, info_hash, mut peer) = initialize(); + + let announce_data = announce_peer_started(&container, &mut peer, &info_hash).await; + + assert_eq!(announce_data, AnnounceData::default()); +} + +#[tokio::test] +async fn it_should_not_return_the_peer_making_the_announce_request() { + let (_config, container, info_hash, mut peer) = initialize(); + + let announce_data = announce_peer_started(&container, &mut peer, &info_hash).await; assert_eq!(announce_data.peers.len(), 0); - assert_eq!(announce_data.stats.downloaded, 1); +} + +#[tokio::test] +async fn it_should_handle_the_scrape_request() { + let (_config, container, info_hash, mut peer) = initialize(); - // Scrape + let _announce_data = announce_peer_started(&container, &mut peer, &info_hash).await; let scrape_data = container.scrape_handler.scrape(&vec![info_hash]).await.unwrap(); From ab2f52dd3781d58d56d997b42e185c2f102feafc Mon Sep 17 00:00:00 2001 From: Jose Celano Date: Mon, 26 May 2025 12:54:12 +0100 Subject: [PATCH 632/802] fix: [#1524] test (move to integration test) --- packages/tracker-core/src/announce_handler.rs | 77 ----------- packages/tracker-core/src/torrent/manager.rs | 2 + .../src/torrent/repository/in_memory.rs | 19 --- packages/tracker-core/tests/integration.rs | 121 +++++++++++++++--- 4 files changed, 106 insertions(+), 113 deletions(-) diff --git a/packages/tracker-core/src/announce_handler.rs b/packages/tracker-core/src/announce_handler.rs index 61e5de125..0a3fef045 100644 --- a/packages/tracker-core/src/announce_handler.rs +++ b/packages/tracker-core/src/announce_handler.rs @@ -594,83 +594,6 @@ mod tests { } } - mod handling_torrent_persistence { - - use std::sync::Arc; - - use aquatic_udp_protocol::AnnounceEvent; - use torrust_tracker_test_helpers::configuration; - use torrust_tracker_torrent_repository::Swarms; - - use crate::announce_handler::tests::the_announce_handler::peer_ip; - use crate::announce_handler::{AnnounceHandler, PeersWanted}; - use crate::databases::setup::initialize_database; - use crate::test_helpers::tests::{sample_info_hash, sample_peer}; - use crate::torrent::manager::TorrentsManager; - use crate::torrent::repository::in_memory::InMemoryTorrentRepository; - use crate::torrent::repository::persisted::DatabasePersistentTorrentRepository; - use crate::whitelist::authorization::WhitelistAuthorization; - use crate::whitelist::repository::in_memory::InMemoryWhitelist; - - #[tokio::test] - async fn it_should_persist_the_number_of_completed_peers_for_all_torrents_into_the_database() { - let mut config = configuration::ephemeral_public(); - - config.core.tracker_policy.persistent_torrent_completed_stat = true; - - let database = initialize_database(&config.core); - let swarms = Arc::new(Swarms::default()); - let in_memory_torrent_repository = Arc::new(InMemoryTorrentRepository::new(swarms)); - let db_torrent_repository = Arc::new(DatabasePersistentTorrentRepository::new(&database)); - let torrents_manager = Arc::new(TorrentsManager::new( - &config.core, - &in_memory_torrent_repository, - &db_torrent_repository, - )); - let in_memory_whitelist = Arc::new(InMemoryWhitelist::default()); - let whitelist_authorization = Arc::new(WhitelistAuthorization::new(&config.core, &in_memory_whitelist.clone())); - let announce_handler = Arc::new(AnnounceHandler::new( - &config.core, - &whitelist_authorization, - &in_memory_torrent_repository, - &db_torrent_repository, - )); - - let info_hash = sample_info_hash(); - - let mut peer = sample_peer(); - - peer.event = AnnounceEvent::Started; - let announce_data = announce_handler - .announce(&info_hash, &mut peer, &peer_ip(), &PeersWanted::AsManyAsPossible) - .await - .unwrap(); - assert_eq!(announce_data.stats.downloaded, 0); - - peer.event = AnnounceEvent::Completed; - let announce_data = announce_handler - .announce(&info_hash, &mut peer, &peer_ip(), &PeersWanted::AsManyAsPossible) - .await - .unwrap(); - assert_eq!(announce_data.stats.downloaded, 1); - - // Remove the newly updated torrent from memory - let _unused = in_memory_torrent_repository.remove(&info_hash).await; - - torrents_manager.load_torrents_from_database().unwrap(); - - let torrent_entry = in_memory_torrent_repository - .get(&info_hash) - .expect("it should be able to get entry"); - - // It persists the number of completed peers. - assert_eq!(torrent_entry.lock().await.metadata().downloaded, 1); - - // It does not persist the peers - assert!(torrent_entry.lock().await.is_empty()); - } - } - mod should_allow_the_client_peers_to_specified_the_number_of_peers_wanted { use torrust_tracker_configuration::TORRENT_PEERS_LIMIT; diff --git a/packages/tracker-core/src/torrent/manager.rs b/packages/tracker-core/src/torrent/manager.rs index bf73f7e8b..f463eee98 100644 --- a/packages/tracker-core/src/torrent/manager.rs +++ b/packages/tracker-core/src/torrent/manager.rs @@ -74,6 +74,8 @@ impl TorrentsManager { pub fn load_torrents_from_database(&self) -> Result<(), databases::error::Error> { let persistent_torrents = self.db_torrent_repository.load_all()?; + println!("Loaded {} persistent torrents from the database", persistent_torrents.len()); + self.in_memory_torrent_repository.import_persistent(&persistent_torrents); Ok(()) diff --git a/packages/tracker-core/src/torrent/repository/in_memory.rs b/packages/tracker-core/src/torrent/repository/in_memory.rs index 311480306..bf8d083f8 100644 --- a/packages/tracker-core/src/torrent/repository/in_memory.rs +++ b/packages/tracker-core/src/torrent/repository/in_memory.rs @@ -61,25 +61,6 @@ impl InMemoryTorrentRepository { .expect("Failed to upsert the peer in swarms") } - /// Removes a torrent entry from the repository. - /// - /// This method is only available in tests. It removes the torrent entry - /// associated with the given info hash and returns the removed entry if it - /// existed. - /// - /// # Arguments - /// - /// * `key` - The info hash of the torrent to remove. - /// - /// # Returns - /// - /// An `Option` containing the removed torrent entry if it existed. - #[cfg(test)] - #[must_use] - pub(crate) async fn remove(&self, key: &InfoHash) -> Option { - self.swarms.remove(key).await - } - /// Removes inactive peers from all torrent entries. /// /// A peer is considered inactive if its last update timestamp is older than diff --git a/packages/tracker-core/tests/integration.rs b/packages/tracker-core/tests/integration.rs index f59b9d185..7af0ec4fa 100644 --- a/packages/tracker-core/tests/integration.rs +++ b/packages/tracker-core/tests/integration.rs @@ -6,12 +6,15 @@ use aquatic_udp_protocol::{AnnounceEvent, NumberOfBytes, PeerId}; use bittorrent_primitives::info_hash::InfoHash; use bittorrent_tracker_core::announce_handler::PeersWanted; use bittorrent_tracker_core::container::TrackerCoreContainer; -use torrust_tracker_configuration::Core; +use tokio::task::yield_now; +use torrust_tracker_configuration::{AnnouncePolicy, Core}; use torrust_tracker_primitives::core::AnnounceData; use torrust_tracker_primitives::peer::Peer; +use torrust_tracker_primitives::swarm_metadata::SwarmMetadata; use torrust_tracker_primitives::DurationSinceUnixEpoch; use torrust_tracker_test_helpers::configuration::ephemeral_sqlite_database; use torrust_tracker_torrent_repository::container::TorrentRepositoryContainer; +use torrust_tracker_torrent_repository::Swarms; /// # Panics /// @@ -56,52 +59,114 @@ fn remote_client_ip() -> IpAddr { IpAddr::V4(Ipv4Addr::from_str("126.0.0.1").unwrap()) } -fn initialize() -> (Arc, Arc, InfoHash, Peer) { - let config = Arc::new(ephemeral_configuration()); - - let torrent_repository_container = Arc::new(TorrentRepositoryContainer::initialize(config.tracker_usage_statistics.into())); - - let container = Arc::new(TrackerCoreContainer::initialize_from(&config, &torrent_repository_container)); +async fn initialize_test_env(core_config: Core) -> (Arc, Arc, Arc, InfoHash, Peer) { + let config = Arc::new(core_config); let info_hash = sample_info_hash(); let peer = sample_peer(); - (config, container, info_hash, peer) + let (container, swarms) = start(&config).await; + + (config, container, swarms, info_hash, peer) +} + +async fn start(core_config: &Arc) -> (Arc, Arc) { + let torrent_repository_container = Arc::new(TorrentRepositoryContainer::initialize( + core_config.tracker_usage_statistics.into(), + )); + + let container = Arc::new(TrackerCoreContainer::initialize_from( + core_config, + &torrent_repository_container, + )); + + let mut jobs = vec![]; + + let job = torrust_tracker_torrent_repository::statistics::event::listener::run_event_listener( + torrent_repository_container.event_bus.receiver(), + &torrent_repository_container.stats_repository, + ); + + jobs.push(job); + + let job = bittorrent_tracker_core::statistics::event::listener::run_event_listener( + torrent_repository_container.event_bus.receiver(), + &container.db_torrent_repository, + ); + + jobs.push(job); + + // Give the event listeners some time to start + // todo: they should notify when they are ready + tokio::time::sleep(std::time::Duration::from_millis(100)).await; + + (container, torrent_repository_container.swarms.clone()) } async fn announce_peer_started(container: &Arc, peer: &mut Peer, info_hash: &InfoHash) -> AnnounceData { peer.event = AnnounceEvent::Started; - container + let announce_data = container .announce_handler .announce(info_hash, peer, &remote_client_ip(), &PeersWanted::AsManyAsPossible) .await - .unwrap() + .unwrap(); + + // Give time to the event listeners to process the event + yield_now().await; + + announce_data } -async fn _announce_peer_completed(container: &Arc, peer: &mut Peer, info_hash: &InfoHash) -> AnnounceData { +async fn announce_peer_completed(container: &Arc, peer: &mut Peer, info_hash: &InfoHash) -> AnnounceData { peer.event = AnnounceEvent::Completed; - container + let announce_data = container .announce_handler .announce(info_hash, peer, &remote_client_ip(), &PeersWanted::AsManyAsPossible) .await - .unwrap() + .unwrap(); + + // Give time to the event listeners to process the event + yield_now().await; + + announce_data +} + +async fn increase_number_of_downloads(container: &Arc, peer: &mut Peer, info_hash: &InfoHash) { + let _announce_data = announce_peer_started(container, peer, info_hash).await; + let announce_data = announce_peer_completed(container, peer, info_hash).await; + + assert_eq!(announce_data.stats.downloads(), 1); } #[tokio::test] async fn it_should_handle_the_announce_request() { - let (_config, container, info_hash, mut peer) = initialize(); + let (_config, container, _swarms, info_hash, mut peer) = initialize_test_env(ephemeral_configuration()).await; let announce_data = announce_peer_started(&container, &mut peer, &info_hash).await; - assert_eq!(announce_data, AnnounceData::default()); + assert_eq!( + announce_data, + AnnounceData { + peers: vec![], + stats: SwarmMetadata { + downloaded: 0, + complete: 1, + incomplete: 0 + }, + policy: AnnouncePolicy { + interval: 120, + interval_min: 120 + } + } + ); } #[tokio::test] async fn it_should_not_return_the_peer_making_the_announce_request() { - let (_config, container, info_hash, mut peer) = initialize(); + let (_config, container, _swarms, info_hash, mut peer) = initialize_test_env(ephemeral_configuration()).await; let announce_data = announce_peer_started(&container, &mut peer, &info_hash).await; @@ -110,7 +175,7 @@ async fn it_should_not_return_the_peer_making_the_announce_request() { #[tokio::test] async fn it_should_handle_the_scrape_request() { - let (_config, container, info_hash, mut peer) = initialize(); + let (_config, container, _swarms, info_hash, mut peer) = initialize_test_env(ephemeral_configuration()).await; let _announce_data = announce_peer_started(&container, &mut peer, &info_hash).await; @@ -118,3 +183,25 @@ async fn it_should_handle_the_scrape_request() { assert!(scrape_data.files.contains_key(&info_hash)); } + +#[tokio::test] +async fn it_should_persist_the_number_of_completed_peers_for_all_torrents_into_the_database() { + let mut core_config = ephemeral_configuration(); + core_config.tracker_policy.persistent_torrent_completed_stat = true; + + let (_config, container, swarms, info_hash, mut peer) = initialize_test_env(core_config).await; + + increase_number_of_downloads(&container, &mut peer, &info_hash).await; + + assert!(swarms.get_swarm_metadata(&info_hash).await.unwrap().unwrap().downloads() == 1); + + swarms.remove(&info_hash).await.unwrap(); + + // Make sure the swarm metadata is removed + assert!(swarms.get_swarm_metadata(&info_hash).await.unwrap().is_none()); + + // Load torrents from the database to ensure the completed stats are persisted + container.torrents_manager.load_torrents_from_database().unwrap(); + + assert!(swarms.get_swarm_metadata(&info_hash).await.unwrap().unwrap().downloads() == 1); +} From 28603fe1d877ab26076ff3e9c10a246e26122fab Mon Sep 17 00:00:00 2001 From: Jose Celano Date: Mon, 26 May 2025 13:38:26 +0100 Subject: [PATCH 633/802] refactor: [#1524] extract TestEnv for integration tests in tracker-core --- .../tracker-core/tests/common/fixtures.rs | 52 +++++ packages/tracker-core/tests/common/mod.rs | 2 + .../tracker-core/tests/common/test_env.rs | 137 +++++++++++++ packages/tracker-core/tests/integration.rs | 191 ++++-------------- 4 files changed, 227 insertions(+), 155 deletions(-) create mode 100644 packages/tracker-core/tests/common/fixtures.rs create mode 100644 packages/tracker-core/tests/common/mod.rs create mode 100644 packages/tracker-core/tests/common/test_env.rs diff --git a/packages/tracker-core/tests/common/fixtures.rs b/packages/tracker-core/tests/common/fixtures.rs new file mode 100644 index 000000000..ea9c93a65 --- /dev/null +++ b/packages/tracker-core/tests/common/fixtures.rs @@ -0,0 +1,52 @@ +use std::net::{IpAddr, Ipv4Addr, SocketAddr}; +use std::str::FromStr; + +use aquatic_udp_protocol::{AnnounceEvent, NumberOfBytes, PeerId}; +use bittorrent_primitives::info_hash::InfoHash; +use torrust_tracker_configuration::Core; +use torrust_tracker_primitives::peer::Peer; +use torrust_tracker_primitives::DurationSinceUnixEpoch; +use torrust_tracker_test_helpers::configuration::ephemeral_sqlite_database; + +/// # Panics +/// +/// Will panic if the temporary file path is not a valid UTF-8 string. +#[must_use] +pub fn ephemeral_configuration() -> Core { + let mut config = Core::default(); + + let temp_file = ephemeral_sqlite_database(); + temp_file.to_str().unwrap().clone_into(&mut config.database.path); + + config +} + +/// # Panics +/// +/// Will panic if the string representation of the info hash is not a valid infohash. +#[must_use] +pub fn sample_info_hash() -> InfoHash { + "3b245504cf5f11bbdbe1201cea6a6bf45aee1bc0" // DevSkim: ignore DS173237 + .parse::() + .expect("String should be a valid info hash") +} + +/// Sample peer whose state is not relevant for the tests. +#[must_use] +pub fn sample_peer() -> Peer { + Peer { + peer_id: PeerId(*b"-qB00000000000000000"), + peer_addr: SocketAddr::new(remote_client_ip(), 8080), + updated: DurationSinceUnixEpoch::new(1_669_397_478_934, 0), + uploaded: NumberOfBytes::new(0), + downloaded: NumberOfBytes::new(0), + left: NumberOfBytes::new(0), // No bytes left to download + event: AnnounceEvent::Completed, + } +} + +// The client peer IP. +#[must_use] +pub fn remote_client_ip() -> IpAddr { + IpAddr::V4(Ipv4Addr::from_str("126.0.0.1").unwrap()) +} diff --git a/packages/tracker-core/tests/common/mod.rs b/packages/tracker-core/tests/common/mod.rs new file mode 100644 index 000000000..414e9d7b5 --- /dev/null +++ b/packages/tracker-core/tests/common/mod.rs @@ -0,0 +1,2 @@ +pub mod fixtures; +pub mod test_env; diff --git a/packages/tracker-core/tests/common/test_env.rs b/packages/tracker-core/tests/common/test_env.rs new file mode 100644 index 000000000..8a443d8f0 --- /dev/null +++ b/packages/tracker-core/tests/common/test_env.rs @@ -0,0 +1,137 @@ +use std::net::IpAddr; +use std::sync::Arc; + +use aquatic_udp_protocol::AnnounceEvent; +use bittorrent_primitives::info_hash::InfoHash; +use bittorrent_tracker_core::announce_handler::PeersWanted; +use bittorrent_tracker_core::container::TrackerCoreContainer; +use tokio::task::yield_now; +use torrust_tracker_configuration::Core; +use torrust_tracker_primitives::core::{AnnounceData, ScrapeData}; +use torrust_tracker_primitives::peer::Peer; +use torrust_tracker_primitives::swarm_metadata::SwarmMetadata; +use torrust_tracker_torrent_repository::container::TorrentRepositoryContainer; + +pub struct TestEnv { + pub torrent_repository_container: Arc, + pub tracker_core_container: Arc, +} + +impl TestEnv { + #[must_use] + pub async fn started(core_config: Core) -> Self { + let test_env = TestEnv::new(core_config); + test_env.start().await; + test_env + } + + #[must_use] + pub fn new(core_config: Core) -> Self { + let core_config = Arc::new(core_config); + + let torrent_repository_container = Arc::new(TorrentRepositoryContainer::initialize( + core_config.tracker_usage_statistics.into(), + )); + + let tracker_core_container = Arc::new(TrackerCoreContainer::initialize_from( + &core_config, + &torrent_repository_container, + )); + + Self { + torrent_repository_container, + tracker_core_container, + } + } + + pub async fn start(&self) { + let mut jobs = vec![]; + + let job = torrust_tracker_torrent_repository::statistics::event::listener::run_event_listener( + self.torrent_repository_container.event_bus.receiver(), + &self.torrent_repository_container.stats_repository, + ); + + jobs.push(job); + + let job = bittorrent_tracker_core::statistics::event::listener::run_event_listener( + self.torrent_repository_container.event_bus.receiver(), + &self.tracker_core_container.db_torrent_repository, + ); + + jobs.push(job); + + // Give the event listeners some time to start + // todo: they should notify when they are ready + tokio::time::sleep(std::time::Duration::from_millis(100)).await; + } + + pub async fn announce_peer_started( + &mut self, + mut peer: Peer, + remote_client_ip: &IpAddr, + info_hash: &InfoHash, + ) -> AnnounceData { + peer.event = AnnounceEvent::Started; + + let announce_data = self + .tracker_core_container + .announce_handler + .announce(info_hash, &mut peer, remote_client_ip, &PeersWanted::AsManyAsPossible) + .await + .unwrap(); + + // Give time to the event listeners to process the event + yield_now().await; + + announce_data + } + + pub async fn announce_peer_completed( + &mut self, + mut peer: Peer, + remote_client_ip: &IpAddr, + info_hash: &InfoHash, + ) -> AnnounceData { + peer.event = AnnounceEvent::Completed; + + let announce_data = self + .tracker_core_container + .announce_handler + .announce(info_hash, &mut peer, remote_client_ip, &PeersWanted::AsManyAsPossible) + .await + .unwrap(); + + // Give time to the event listeners to process the event + yield_now().await; + + announce_data + } + + pub async fn scrape(&self, info_hash: &InfoHash) -> ScrapeData { + self.tracker_core_container + .scrape_handler + .scrape(&vec![*info_hash]) + .await + .unwrap() + } + + pub async fn increase_number_of_downloads(&mut self, peer: Peer, remote_client_ip: &IpAddr, info_hash: &InfoHash) { + let _announce_data = self.announce_peer_started(peer, remote_client_ip, info_hash).await; + let announce_data = self.announce_peer_completed(peer, remote_client_ip, info_hash).await; + + assert_eq!(announce_data.stats.downloads(), 1); + } + + pub async fn get_swarm_metadata(&self, info_hash: &InfoHash) -> Option { + self.torrent_repository_container + .swarms + .get_swarm_metadata(info_hash) + .await + .unwrap() + } + + pub async fn remove_swarm(&self, info_hash: &InfoHash) { + self.torrent_repository_container.swarms.remove(info_hash).await.unwrap(); + } +} diff --git a/packages/tracker-core/tests/integration.rs b/packages/tracker-core/tests/integration.rs index 7af0ec4fa..d24acf67b 100644 --- a/packages/tracker-core/tests/integration.rs +++ b/packages/tracker-core/tests/integration.rs @@ -1,151 +1,18 @@ -use std::net::{IpAddr, Ipv4Addr, SocketAddr}; -use std::str::FromStr; -use std::sync::Arc; - -use aquatic_udp_protocol::{AnnounceEvent, NumberOfBytes, PeerId}; -use bittorrent_primitives::info_hash::InfoHash; -use bittorrent_tracker_core::announce_handler::PeersWanted; -use bittorrent_tracker_core::container::TrackerCoreContainer; -use tokio::task::yield_now; -use torrust_tracker_configuration::{AnnouncePolicy, Core}; +mod common; + +use common::fixtures::{ephemeral_configuration, remote_client_ip, sample_info_hash, sample_peer}; +use common::test_env::TestEnv; +use torrust_tracker_configuration::AnnouncePolicy; use torrust_tracker_primitives::core::AnnounceData; -use torrust_tracker_primitives::peer::Peer; use torrust_tracker_primitives::swarm_metadata::SwarmMetadata; -use torrust_tracker_primitives::DurationSinceUnixEpoch; -use torrust_tracker_test_helpers::configuration::ephemeral_sqlite_database; -use torrust_tracker_torrent_repository::container::TorrentRepositoryContainer; -use torrust_tracker_torrent_repository::Swarms; - -/// # Panics -/// -/// Will panic if the temporary file path is not a valid UTF-8 string. -#[must_use] -pub fn ephemeral_configuration() -> Core { - let mut config = Core::default(); - - let temp_file = ephemeral_sqlite_database(); - temp_file.to_str().unwrap().clone_into(&mut config.database.path); - - config -} - -/// # Panics -/// -/// Will panic if the string representation of the info hash is not a valid infohash. -#[must_use] -pub fn sample_info_hash() -> InfoHash { - "3b245504cf5f11bbdbe1201cea6a6bf45aee1bc0" // DevSkim: ignore DS173237 - .parse::() - .expect("String should be a valid info hash") -} - -/// Sample peer whose state is not relevant for the tests. -#[must_use] -pub fn sample_peer() -> Peer { - Peer { - peer_id: PeerId(*b"-qB00000000000000000"), - peer_addr: SocketAddr::new(remote_client_ip(), 8080), - updated: DurationSinceUnixEpoch::new(1_669_397_478_934, 0), - uploaded: NumberOfBytes::new(0), - downloaded: NumberOfBytes::new(0), - left: NumberOfBytes::new(0), // No bytes left to download - event: AnnounceEvent::Completed, - } -} - -// The client peer IP. -#[must_use] -fn remote_client_ip() -> IpAddr { - IpAddr::V4(Ipv4Addr::from_str("126.0.0.1").unwrap()) -} - -async fn initialize_test_env(core_config: Core) -> (Arc, Arc, Arc, InfoHash, Peer) { - let config = Arc::new(core_config); - - let info_hash = sample_info_hash(); - - let peer = sample_peer(); - - let (container, swarms) = start(&config).await; - - (config, container, swarms, info_hash, peer) -} - -async fn start(core_config: &Arc) -> (Arc, Arc) { - let torrent_repository_container = Arc::new(TorrentRepositoryContainer::initialize( - core_config.tracker_usage_statistics.into(), - )); - - let container = Arc::new(TrackerCoreContainer::initialize_from( - core_config, - &torrent_repository_container, - )); - - let mut jobs = vec![]; - - let job = torrust_tracker_torrent_repository::statistics::event::listener::run_event_listener( - torrent_repository_container.event_bus.receiver(), - &torrent_repository_container.stats_repository, - ); - - jobs.push(job); - - let job = bittorrent_tracker_core::statistics::event::listener::run_event_listener( - torrent_repository_container.event_bus.receiver(), - &container.db_torrent_repository, - ); - - jobs.push(job); - - // Give the event listeners some time to start - // todo: they should notify when they are ready - tokio::time::sleep(std::time::Duration::from_millis(100)).await; - - (container, torrent_repository_container.swarms.clone()) -} - -async fn announce_peer_started(container: &Arc, peer: &mut Peer, info_hash: &InfoHash) -> AnnounceData { - peer.event = AnnounceEvent::Started; - - let announce_data = container - .announce_handler - .announce(info_hash, peer, &remote_client_ip(), &PeersWanted::AsManyAsPossible) - .await - .unwrap(); - - // Give time to the event listeners to process the event - yield_now().await; - - announce_data -} - -async fn announce_peer_completed(container: &Arc, peer: &mut Peer, info_hash: &InfoHash) -> AnnounceData { - peer.event = AnnounceEvent::Completed; - - let announce_data = container - .announce_handler - .announce(info_hash, peer, &remote_client_ip(), &PeersWanted::AsManyAsPossible) - .await - .unwrap(); - - // Give time to the event listeners to process the event - yield_now().await; - - announce_data -} - -async fn increase_number_of_downloads(container: &Arc, peer: &mut Peer, info_hash: &InfoHash) { - let _announce_data = announce_peer_started(container, peer, info_hash).await; - let announce_data = announce_peer_completed(container, peer, info_hash).await; - - assert_eq!(announce_data.stats.downloads(), 1); -} #[tokio::test] async fn it_should_handle_the_announce_request() { - let (_config, container, _swarms, info_hash, mut peer) = initialize_test_env(ephemeral_configuration()).await; + let mut test_env = TestEnv::started(ephemeral_configuration()).await; - let announce_data = announce_peer_started(&container, &mut peer, &info_hash).await; + let announce_data = test_env + .announce_peer_started(sample_peer(), &remote_client_ip(), &sample_info_hash()) + .await; assert_eq!( announce_data, @@ -166,20 +33,26 @@ async fn it_should_handle_the_announce_request() { #[tokio::test] async fn it_should_not_return_the_peer_making_the_announce_request() { - let (_config, container, _swarms, info_hash, mut peer) = initialize_test_env(ephemeral_configuration()).await; + let mut test_env = TestEnv::started(ephemeral_configuration()).await; - let announce_data = announce_peer_started(&container, &mut peer, &info_hash).await; + let announce_data = test_env + .announce_peer_started(sample_peer(), &remote_client_ip(), &sample_info_hash()) + .await; assert_eq!(announce_data.peers.len(), 0); } #[tokio::test] async fn it_should_handle_the_scrape_request() { - let (_config, container, _swarms, info_hash, mut peer) = initialize_test_env(ephemeral_configuration()).await; + let mut test_env = TestEnv::started(ephemeral_configuration()).await; - let _announce_data = announce_peer_started(&container, &mut peer, &info_hash).await; + let info_hash = sample_info_hash(); + + let _announce_data = test_env + .announce_peer_started(sample_peer(), &remote_client_ip(), &info_hash) + .await; - let scrape_data = container.scrape_handler.scrape(&vec![info_hash]).await.unwrap(); + let scrape_data = test_env.scrape(&info_hash).await; assert!(scrape_data.files.contains_key(&info_hash)); } @@ -189,19 +62,27 @@ async fn it_should_persist_the_number_of_completed_peers_for_all_torrents_into_t let mut core_config = ephemeral_configuration(); core_config.tracker_policy.persistent_torrent_completed_stat = true; - let (_config, container, swarms, info_hash, mut peer) = initialize_test_env(core_config).await; + let mut test_env = TestEnv::started(core_config).await; - increase_number_of_downloads(&container, &mut peer, &info_hash).await; + let info_hash = sample_info_hash(); - assert!(swarms.get_swarm_metadata(&info_hash).await.unwrap().unwrap().downloads() == 1); + test_env + .increase_number_of_downloads(sample_peer(), &remote_client_ip(), &info_hash) + .await; - swarms.remove(&info_hash).await.unwrap(); + assert!(test_env.get_swarm_metadata(&info_hash).await.unwrap().downloads() == 1); - // Make sure the swarm metadata is removed - assert!(swarms.get_swarm_metadata(&info_hash).await.unwrap().is_none()); + test_env.remove_swarm(&info_hash).await; + + // Ensure the swarm metadata is removed + assert!(test_env.get_swarm_metadata(&info_hash).await.is_none()); // Load torrents from the database to ensure the completed stats are persisted - container.torrents_manager.load_torrents_from_database().unwrap(); + test_env + .tracker_core_container + .torrents_manager + .load_torrents_from_database() + .unwrap(); - assert!(swarms.get_swarm_metadata(&info_hash).await.unwrap().unwrap().downloads() == 1); + assert!(test_env.get_swarm_metadata(&info_hash).await.unwrap().downloads() == 1); } From 8c3154953f80a221de63366bd10cc7111a71f126 Mon Sep 17 00:00:00 2001 From: Jose Celano Date: Mon, 26 May 2025 13:47:40 +0100 Subject: [PATCH 634/802] refactor: [#1524] rename methods --- .../src/environment.rs | 2 +- .../src/environment.rs | 2 +- .../src/services/announce.rs | 2 +- .../http-tracker-core/src/services/scrape.rs | 6 ++--- packages/tracker-core/src/announce_handler.rs | 24 +++++++++---------- packages/tracker-core/src/lib.rs | 8 +++---- packages/tracker-core/src/scrape_handler.rs | 6 ++--- packages/tracker-core/src/torrent/manager.rs | 4 ++-- .../src/torrent/repository/in_memory.rs | 2 +- packages/tracker-core/src/torrent/services.rs | 18 +++++++------- .../tracker-core/tests/common/test_env.rs | 6 ++--- .../udp-tracker-core/src/services/announce.rs | 2 +- .../udp-tracker-core/src/services/scrape.rs | 2 +- .../udp-tracker-server/src/environment.rs | 2 +- .../src/handlers/announce.rs | 4 ++-- .../udp-tracker-server/src/handlers/scrape.rs | 2 +- 16 files changed, 46 insertions(+), 46 deletions(-) diff --git a/packages/axum-http-tracker-server/src/environment.rs b/packages/axum-http-tracker-server/src/environment.rs index 10dada2db..59605d781 100644 --- a/packages/axum-http-tracker-server/src/environment.rs +++ b/packages/axum-http-tracker-server/src/environment.rs @@ -29,7 +29,7 @@ impl Environment { self.container .tracker_core_container .in_memory_torrent_repository - .upsert_peer(info_hash, peer, None) + .handle_announcement(info_hash, peer, None) .await } } diff --git a/packages/axum-rest-tracker-api-server/src/environment.rs b/packages/axum-rest-tracker-api-server/src/environment.rs index 92ca5a2d1..3c7ff564d 100644 --- a/packages/axum-rest-tracker-api-server/src/environment.rs +++ b/packages/axum-rest-tracker-api-server/src/environment.rs @@ -37,7 +37,7 @@ where self.container .tracker_core_container .in_memory_torrent_repository - .upsert_peer(info_hash, peer, None) + .handle_announcement(info_hash, peer, None) .await } } diff --git a/packages/http-tracker-core/src/services/announce.rs b/packages/http-tracker-core/src/services/announce.rs index 9f39a04e4..0ad5ed143 100644 --- a/packages/http-tracker-core/src/services/announce.rs +++ b/packages/http-tracker-core/src/services/announce.rs @@ -87,7 +87,7 @@ impl AnnounceService { let announce_data = self .announce_handler - .announce( + .handle_announcement( &announce_request.info_hash, &mut peer, &remote_client_addr.ip(), diff --git a/packages/http-tracker-core/src/services/scrape.rs b/packages/http-tracker-core/src/services/scrape.rs index 3da1aa88f..f22f2f632 100644 --- a/packages/http-tracker-core/src/services/scrape.rs +++ b/packages/http-tracker-core/src/services/scrape.rs @@ -78,7 +78,7 @@ impl ScrapeService { let scrape_data = if self.authentication_is_required() && !self.is_authenticated(maybe_key).await { ScrapeData::zeroed(&scrape_request.info_hashes) } else { - self.scrape_handler.scrape(&scrape_request.info_hashes).await? + self.scrape_handler.handle_scrape(&scrape_request.info_hashes).await? }; let remote_client_addr = resolve_remote_client_addr(&self.core_config.net.on_reverse_proxy.into(), client_ip_sources)?; @@ -291,7 +291,7 @@ mod tests { let original_peer_ip = peer.ip(); container .announce_handler - .announce(&info_hash, &mut peer, &original_peer_ip, &PeersWanted::AsManyAsPossible) + .handle_announcement(&info_hash, &mut peer, &original_peer_ip, &PeersWanted::AsManyAsPossible) .await .unwrap(); @@ -482,7 +482,7 @@ mod tests { let original_peer_ip = peer.ip(); container .announce_handler - .announce(&info_hash, &mut peer, &original_peer_ip, &PeersWanted::AsManyAsPossible) + .handle_announcement(&info_hash, &mut peer, &original_peer_ip, &PeersWanted::AsManyAsPossible) .await .unwrap(); diff --git a/packages/tracker-core/src/announce_handler.rs b/packages/tracker-core/src/announce_handler.rs index 0a3fef045..7d37ec9ed 100644 --- a/packages/tracker-core/src/announce_handler.rs +++ b/packages/tracker-core/src/announce_handler.rs @@ -154,7 +154,7 @@ impl AnnounceHandler { /// /// Returns an error if the tracker is running in `listed` mode and the /// torrent is not whitelisted. - pub async fn announce( + pub async fn handle_announcement( &self, info_hash: &InfoHash, peer: &mut peer::Peer, @@ -178,7 +178,7 @@ impl AnnounceHandler { let _number_of_downloads_increased = self .in_memory_torrent_repository - .upsert_peer(info_hash, peer, opt_persistent_torrent) + .handle_announcement(info_hash, peer, opt_persistent_torrent) .await; Ok(self.build_announce_data(info_hash, peer, peers_wanted).await) @@ -456,7 +456,7 @@ mod tests { let mut peer = sample_peer(); let announce_data = announce_handler - .announce(&sample_info_hash(), &mut peer, &peer_ip(), &PeersWanted::AsManyAsPossible) + .handle_announcement(&sample_info_hash(), &mut peer, &peer_ip(), &PeersWanted::AsManyAsPossible) .await .unwrap(); @@ -469,7 +469,7 @@ mod tests { let mut previously_announced_peer = sample_peer_1(); announce_handler - .announce( + .handle_announcement( &sample_info_hash(), &mut previously_announced_peer, &peer_ip(), @@ -480,7 +480,7 @@ mod tests { let mut peer = sample_peer_2(); let announce_data = announce_handler - .announce(&sample_info_hash(), &mut peer, &peer_ip(), &PeersWanted::AsManyAsPossible) + .handle_announcement(&sample_info_hash(), &mut peer, &peer_ip(), &PeersWanted::AsManyAsPossible) .await .unwrap(); @@ -493,7 +493,7 @@ mod tests { let mut previously_announced_peer_1 = sample_peer_1(); announce_handler - .announce( + .handle_announcement( &sample_info_hash(), &mut previously_announced_peer_1, &peer_ip(), @@ -504,7 +504,7 @@ mod tests { let mut previously_announced_peer_2 = sample_peer_2(); announce_handler - .announce( + .handle_announcement( &sample_info_hash(), &mut previously_announced_peer_2, &peer_ip(), @@ -515,7 +515,7 @@ mod tests { let mut peer = sample_peer_3(); let announce_data = announce_handler - .announce(&sample_info_hash(), &mut peer, &peer_ip(), &PeersWanted::only(1)) + .handle_announcement(&sample_info_hash(), &mut peer, &peer_ip(), &PeersWanted::only(1)) .await .unwrap(); @@ -540,7 +540,7 @@ mod tests { let mut peer = seeder(); let announce_data = announce_handler - .announce(&sample_info_hash(), &mut peer, &peer_ip(), &PeersWanted::AsManyAsPossible) + .handle_announcement(&sample_info_hash(), &mut peer, &peer_ip(), &PeersWanted::AsManyAsPossible) .await .unwrap(); @@ -554,7 +554,7 @@ mod tests { let mut peer = leecher(); let announce_data = announce_handler - .announce(&sample_info_hash(), &mut peer, &peer_ip(), &PeersWanted::AsManyAsPossible) + .handle_announcement(&sample_info_hash(), &mut peer, &peer_ip(), &PeersWanted::AsManyAsPossible) .await .unwrap(); @@ -568,7 +568,7 @@ mod tests { // We have to announce with "started" event because peer does not count if peer was not previously known let mut started_peer = started_peer(); announce_handler - .announce( + .handle_announcement( &sample_info_hash(), &mut started_peer, &peer_ip(), @@ -579,7 +579,7 @@ mod tests { let mut completed_peer = completed_peer(); let announce_data = announce_handler - .announce( + .handle_announcement( &sample_info_hash(), &mut completed_peer, &peer_ip(), diff --git a/packages/tracker-core/src/lib.rs b/packages/tracker-core/src/lib.rs index dacf41383..5167abf51 100644 --- a/packages/tracker-core/src/lib.rs +++ b/packages/tracker-core/src/lib.rs @@ -203,7 +203,7 @@ mod tests { // Announce a "complete" peer for the torrent let mut complete_peer = complete_peer(); announce_handler - .announce( + .handle_announcement( &info_hash, &mut complete_peer, &IpAddr::V4(Ipv4Addr::new(126, 0, 0, 10)), @@ -215,7 +215,7 @@ mod tests { // Announce an "incomplete" peer for the torrent let mut incomplete_peer = incomplete_peer(); announce_handler - .announce( + .handle_announcement( &info_hash, &mut incomplete_peer, &IpAddr::V4(Ipv4Addr::new(126, 0, 0, 11)), @@ -225,7 +225,7 @@ mod tests { .unwrap(); // Scrape - let scrape_data = scrape_handler.scrape(&vec![info_hash]).await.unwrap(); + let scrape_data = scrape_handler.handle_scrape(&vec![info_hash]).await.unwrap(); // The expected swarm metadata for the torrent let mut expected_scrape_data = ScrapeData::empty(); @@ -259,7 +259,7 @@ mod tests { let non_whitelisted_info_hash = "3b245504cf5f11bbdbe1201cea6a6bf45aee1bc0".parse::().unwrap(); // DevSkim: ignore DS173237 - let scrape_data = scrape_handler.scrape(&vec![non_whitelisted_info_hash]).await.unwrap(); + let scrape_data = scrape_handler.handle_scrape(&vec![non_whitelisted_info_hash]).await.unwrap(); // The expected zeroed swarm metadata for the file let mut expected_scrape_data = ScrapeData::empty(); diff --git a/packages/tracker-core/src/scrape_handler.rs b/packages/tracker-core/src/scrape_handler.rs index 443d989a6..9c94a4e50 100644 --- a/packages/tracker-core/src/scrape_handler.rs +++ b/packages/tracker-core/src/scrape_handler.rs @@ -107,7 +107,7 @@ impl ScrapeHandler { /// # BEP Reference: /// /// [BEP 48: Scrape Protocol](https://www.bittorrent.org/beps/bep_0048.html) - pub async fn scrape(&self, info_hashes: &Vec) -> Result { + pub async fn handle_scrape(&self, info_hashes: &Vec) -> Result { let mut scrape_data = ScrapeData::empty(); for info_hash in info_hashes { @@ -158,7 +158,7 @@ mod tests { let info_hashes = vec!["3b245504cf5f11bbdbe1201cea6a6bf45aee1bc0".parse::().unwrap()]; // DevSkim: ignore DS173237 - let scrape_data = scrape_handler.scrape(&info_hashes).await.unwrap(); + let scrape_data = scrape_handler.handle_scrape(&info_hashes).await.unwrap(); let mut expected_scrape_data = ScrapeData::empty(); @@ -176,7 +176,7 @@ mod tests { "99c82bb73505a3c0b453f9fa0e881d6e5a32a0c1".parse::().unwrap(), // DevSkim: ignore DS173237 ]; - let scrape_data = scrape_handler.scrape(&info_hashes).await.unwrap(); + let scrape_data = scrape_handler.handle_scrape(&info_hashes).await.unwrap(); let mut expected_scrape_data = ScrapeData::empty(); expected_scrape_data.add_file_with_zeroed_metadata(&info_hashes[0]); diff --git a/packages/tracker-core/src/torrent/manager.rs b/packages/tracker-core/src/torrent/manager.rs index f463eee98..171d554a8 100644 --- a/packages/tracker-core/src/torrent/manager.rs +++ b/packages/tracker-core/src/torrent/manager.rs @@ -241,7 +241,7 @@ mod tests { peer.updated = DurationSinceUnixEpoch::new(0, 0); let _number_of_downloads_increased = services .in_memory_torrent_repository - .upsert_peer(&infohash, &peer, None) + .handle_announcement(&infohash, &peer, None) .await; // Simulate the time has passed 1 second more than the max peer timeout. @@ -259,7 +259,7 @@ mod tests { // Add a peer to the torrent let mut peer = sample_peer(); peer.updated = DurationSinceUnixEpoch::new(0, 0); - let _number_of_downloads_increased = in_memory_torrent_repository.upsert_peer(infohash, &peer, None).await; + let _number_of_downloads_increased = in_memory_torrent_repository.handle_announcement(infohash, &peer, None).await; // Remove the peer. The torrent is now peerless. in_memory_torrent_repository diff --git a/packages/tracker-core/src/torrent/repository/in_memory.rs b/packages/tracker-core/src/torrent/repository/in_memory.rs index bf8d083f8..bf63ef8d4 100644 --- a/packages/tracker-core/src/torrent/repository/in_memory.rs +++ b/packages/tracker-core/src/torrent/repository/in_memory.rs @@ -49,7 +49,7 @@ impl InMemoryTorrentRepository { /// /// This function panics if the underling swarms return an error. #[must_use] - pub async fn upsert_peer( + pub async fn handle_announcement( &self, info_hash: &InfoHash, peer: &peer::Peer, diff --git a/packages/tracker-core/src/torrent/services.rs b/packages/tracker-core/src/torrent/services.rs index 97694a80f..16db7b635 100644 --- a/packages/tracker-core/src/torrent/services.rs +++ b/packages/tracker-core/src/torrent/services.rs @@ -252,7 +252,7 @@ mod tests { let hash = "9e0217d0fa71c87332cd8bf9dbeabcb2c2cf3c4d".to_owned(); // DevSkim: ignore DS173237 let info_hash = InfoHash::from_str(&hash).unwrap(); let _number_of_downloads_increased = in_memory_torrent_repository - .upsert_peer(&info_hash, &sample_peer(), None) + .handle_announcement(&info_hash, &sample_peer(), None) .await; let torrent_info = get_torrent_info(&in_memory_torrent_repository, &info_hash).await.unwrap(); @@ -298,7 +298,7 @@ mod tests { let info_hash = InfoHash::from_str(&hash).unwrap(); let _number_of_downloads_increased = in_memory_torrent_repository - .upsert_peer(&info_hash, &sample_peer(), None) + .handle_announcement(&info_hash, &sample_peer(), None) .await; let torrents = get_torrents_page(&in_memory_torrent_repository, Some(&Pagination::default())).await; @@ -325,10 +325,10 @@ mod tests { let info_hash2 = InfoHash::from_str(&hash2).unwrap(); let _number_of_downloads_increased = in_memory_torrent_repository - .upsert_peer(&info_hash1, &sample_peer(), None) + .handle_announcement(&info_hash1, &sample_peer(), None) .await; let _number_of_downloads_increased = in_memory_torrent_repository - .upsert_peer(&info_hash2, &sample_peer(), None) + .handle_announcement(&info_hash2, &sample_peer(), None) .await; let offset = 0; @@ -350,10 +350,10 @@ mod tests { let info_hash2 = InfoHash::from_str(&hash2).unwrap(); let _number_of_downloads_increased = in_memory_torrent_repository - .upsert_peer(&info_hash1, &sample_peer(), None) + .handle_announcement(&info_hash1, &sample_peer(), None) .await; let _number_of_downloads_increased = in_memory_torrent_repository - .upsert_peer(&info_hash2, &sample_peer(), None) + .handle_announcement(&info_hash2, &sample_peer(), None) .await; let offset = 1; @@ -380,13 +380,13 @@ mod tests { let hash1 = "9e0217d0fa71c87332cd8bf9dbeabcb2c2cf3c4d".to_owned(); // DevSkim: ignore DS173237 let info_hash1 = InfoHash::from_str(&hash1).unwrap(); let _number_of_downloads_increased = in_memory_torrent_repository - .upsert_peer(&info_hash1, &sample_peer(), None) + .handle_announcement(&info_hash1, &sample_peer(), None) .await; let hash2 = "03840548643af2a7b63a9f5cbca348bc7150ca3a".to_owned(); // DevSkim: ignore DS173237 let info_hash2 = InfoHash::from_str(&hash2).unwrap(); let _number_of_downloads_increased = in_memory_torrent_repository - .upsert_peer(&info_hash2, &sample_peer(), None) + .handle_announcement(&info_hash2, &sample_peer(), None) .await; let torrents = get_torrents_page(&in_memory_torrent_repository, Some(&Pagination::default())).await; @@ -436,7 +436,7 @@ mod tests { let info_hash = sample_info_hash(); let _ = in_memory_torrent_repository - .upsert_peer(&info_hash, &sample_peer(), None) + .handle_announcement(&info_hash, &sample_peer(), None) .await; let torrent_info = get_torrents(&in_memory_torrent_repository, &[info_hash]).await; diff --git a/packages/tracker-core/tests/common/test_env.rs b/packages/tracker-core/tests/common/test_env.rs index 8a443d8f0..d4462e3f6 100644 --- a/packages/tracker-core/tests/common/test_env.rs +++ b/packages/tracker-core/tests/common/test_env.rs @@ -77,7 +77,7 @@ impl TestEnv { let announce_data = self .tracker_core_container .announce_handler - .announce(info_hash, &mut peer, remote_client_ip, &PeersWanted::AsManyAsPossible) + .handle_announcement(info_hash, &mut peer, remote_client_ip, &PeersWanted::AsManyAsPossible) .await .unwrap(); @@ -98,7 +98,7 @@ impl TestEnv { let announce_data = self .tracker_core_container .announce_handler - .announce(info_hash, &mut peer, remote_client_ip, &PeersWanted::AsManyAsPossible) + .handle_announcement(info_hash, &mut peer, remote_client_ip, &PeersWanted::AsManyAsPossible) .await .unwrap(); @@ -111,7 +111,7 @@ impl TestEnv { pub async fn scrape(&self, info_hash: &InfoHash) -> ScrapeData { self.tracker_core_container .scrape_handler - .scrape(&vec![*info_hash]) + .handle_scrape(&vec![*info_hash]) .await .unwrap() } diff --git a/packages/udp-tracker-core/src/services/announce.rs b/packages/udp-tracker-core/src/services/announce.rs index 6ea237d84..a69e91d8a 100644 --- a/packages/udp-tracker-core/src/services/announce.rs +++ b/packages/udp-tracker-core/src/services/announce.rs @@ -78,7 +78,7 @@ impl AnnounceService { let announce_data = self .announce_handler - .announce(&info_hash, &mut peer, &remote_client_ip, &peers_wanted) + .handle_announcement(&info_hash, &mut peer, &remote_client_ip, &peers_wanted) .await?; self.send_event(info_hash, peer, client_socket_addr, server_service_binding) diff --git a/packages/udp-tracker-core/src/services/scrape.rs b/packages/udp-tracker-core/src/services/scrape.rs index b42004f63..8551351fb 100644 --- a/packages/udp-tracker-core/src/services/scrape.rs +++ b/packages/udp-tracker-core/src/services/scrape.rs @@ -56,7 +56,7 @@ impl ScrapeService { let scrape_data = self .scrape_handler - .scrape(&Self::convert_from_aquatic(&request.info_hashes)) + .handle_scrape(&Self::convert_from_aquatic(&request.info_hashes)) .await?; self.send_event(client_socket_addr, server_service_binding).await; diff --git a/packages/udp-tracker-server/src/environment.rs b/packages/udp-tracker-server/src/environment.rs index f92d5dd29..c4e0ce96f 100644 --- a/packages/udp-tracker-server/src/environment.rs +++ b/packages/udp-tracker-server/src/environment.rs @@ -39,7 +39,7 @@ where .container .tracker_core_container .in_memory_torrent_repository - .upsert_peer(info_hash, peer, None) + .handle_announcement(info_hash, peer, None) .await; } } diff --git a/packages/udp-tracker-server/src/handlers/announce.rs b/packages/udp-tracker-server/src/handlers/announce.rs index 567f43740..edc36ebc8 100644 --- a/packages/udp-tracker-server/src/handlers/announce.rs +++ b/packages/udp-tracker-server/src/handlers/announce.rs @@ -370,7 +370,7 @@ mod tests { .into(); let _number_of_downloads_increased = in_memory_torrent_repository - .upsert_peer(&info_hash.0.into(), &peer_using_ipv6, None) + .handle_announcement(&info_hash.0.into(), &peer_using_ipv6, None) .await; } @@ -714,7 +714,7 @@ mod tests { .into(); let _number_of_downloads_increased = in_memory_torrent_repository - .upsert_peer(&info_hash.0.into(), &peer_using_ipv4, None) + .handle_announcement(&info_hash.0.into(), &peer_using_ipv4, None) .await; } diff --git a/packages/udp-tracker-server/src/handlers/scrape.rs b/packages/udp-tracker-server/src/handlers/scrape.rs index a9462e0f9..183d78b70 100644 --- a/packages/udp-tracker-server/src/handlers/scrape.rs +++ b/packages/udp-tracker-server/src/handlers/scrape.rs @@ -166,7 +166,7 @@ mod tests { .into(); let _number_of_downloads_increased = in_memory_torrent_repository - .upsert_peer(&info_hash.0.into(), &peer, None) + .handle_announcement(&info_hash.0.into(), &peer, None) .await; } From 67d177b6d4af24608d6be5a80ed10434242c4cd4 Mon Sep 17 00:00:00 2001 From: Jose Celano Date: Mon, 26 May 2025 15:47:18 +0100 Subject: [PATCH 635/802] refactor: [#1524] command/query separation The returned value is not needed anymore. Secondary action (increase metrics) is done in the event listeners. --- .../src/environment.rs | 4 +- .../src/environment.rs | 4 +- packages/torrent-repository/src/swarm.rs | 157 +++++++----------- packages/torrent-repository/src/swarms.rs | 6 +- packages/tracker-core/src/announce_handler.rs | 3 +- packages/tracker-core/src/torrent/manager.rs | 4 +- .../src/torrent/repository/in_memory.rs | 5 +- packages/tracker-core/src/torrent/services.rs | 18 +- .../udp-tracker-server/src/environment.rs | 3 +- .../src/handlers/announce.rs | 4 +- .../udp-tracker-server/src/handlers/scrape.rs | 2 +- 11 files changed, 82 insertions(+), 128 deletions(-) diff --git a/packages/axum-http-tracker-server/src/environment.rs b/packages/axum-http-tracker-server/src/environment.rs index 59605d781..0c1431db5 100644 --- a/packages/axum-http-tracker-server/src/environment.rs +++ b/packages/axum-http-tracker-server/src/environment.rs @@ -25,12 +25,12 @@ pub struct Environment { impl Environment { /// Add a torrent to the tracker - pub async fn add_torrent_peer(&self, info_hash: &InfoHash, peer: &peer::Peer) -> bool { + pub async fn add_torrent_peer(&self, info_hash: &InfoHash, peer: &peer::Peer) { self.container .tracker_core_container .in_memory_torrent_repository .handle_announcement(info_hash, peer, None) - .await + .await; } } diff --git a/packages/axum-rest-tracker-api-server/src/environment.rs b/packages/axum-rest-tracker-api-server/src/environment.rs index 3c7ff564d..be93a8723 100644 --- a/packages/axum-rest-tracker-api-server/src/environment.rs +++ b/packages/axum-rest-tracker-api-server/src/environment.rs @@ -33,12 +33,12 @@ where S: std::fmt::Debug + std::fmt::Display, { /// Add a torrent to the tracker - pub async fn add_torrent_peer(&self, info_hash: &InfoHash, peer: &peer::Peer) -> bool { + pub async fn add_torrent_peer(&self, info_hash: &InfoHash, peer: &peer::Peer) { self.container .tracker_core_container .in_memory_torrent_repository .handle_announcement(info_hash, peer, None) - .await + .await; } } diff --git a/packages/torrent-repository/src/swarm.rs b/packages/torrent-repository/src/swarm.rs index b9076289b..84e1f2da4 100644 --- a/packages/torrent-repository/src/swarm.rs +++ b/packages/torrent-repository/src/swarm.rs @@ -33,17 +33,13 @@ impl Swarm { } } - pub async fn handle_announcement(&mut self, incoming_announce: &PeerAnnouncement) -> bool { - let mut downloads_increased: bool = false; - + pub async fn handle_announcement(&mut self, incoming_announce: &PeerAnnouncement) { let _previous_peer = match peer::ReadInfo::get_event(incoming_announce) { AnnounceEvent::Started | AnnounceEvent::None | AnnounceEvent::Completed => { - self.upsert_peer(Arc::new(*incoming_announce), &mut downloads_increased).await + self.upsert_peer(Arc::new(*incoming_announce)).await } AnnounceEvent::Stopped => self.remove_peer(&incoming_announce.peer_addr).await, }; - - downloads_increased } pub async fn remove_inactive(&mut self, current_cutoff: DurationSinceUnixEpoch) -> usize { @@ -159,26 +155,20 @@ impl Swarm { !self.should_be_removed(policy) } - async fn upsert_peer( - &mut self, - incoming_announce: Arc, - downloads_increased: &mut bool, - ) -> Option> { + async fn upsert_peer(&mut self, incoming_announce: Arc) -> Option> { let announcement = incoming_announce.clone(); if let Some(previous_announce) = self.peers.insert(incoming_announce.peer_addr, incoming_announce) { - *downloads_increased = self.update_metadata_on_update(&previous_announce, &announcement); + let downloads_increased = self.update_metadata_on_update(&previous_announce, &announcement); self.trigger_peer_updated_event(&previous_announce, &announcement).await; - if *downloads_increased { + if downloads_increased { self.trigger_peer_download_completed_event(&announcement).await; } Some(previous_announce) } else { - *downloads_increased = false; - self.update_metadata_on_insert(&announcement); self.trigger_peer_added_event(&announcement).await; @@ -362,36 +352,30 @@ mod tests { #[tokio::test] async fn it_should_allow_inserting_a_new_peer() { let mut swarm = Swarm::new(&sample_info_hash(), 0, None); - let mut downloads_increased = false; let peer = PeerBuilder::default().build(); - assert_eq!(swarm.upsert_peer(peer.into(), &mut downloads_increased).await, None); + assert_eq!(swarm.upsert_peer(peer.into()).await, None); } #[tokio::test] async fn it_should_allow_updating_a_preexisting_peer() { let mut swarm = Swarm::new(&sample_info_hash(), 0, None); - let mut downloads_increased = false; let peer = PeerBuilder::default().build(); - swarm.upsert_peer(peer.into(), &mut downloads_increased).await; + swarm.upsert_peer(peer.into()).await; - assert_eq!( - swarm.upsert_peer(peer.into(), &mut downloads_increased).await, - Some(Arc::new(peer)) - ); + assert_eq!(swarm.upsert_peer(peer.into()).await, Some(Arc::new(peer))); } #[tokio::test] async fn it_should_allow_getting_all_peers() { let mut swarm = Swarm::new(&sample_info_hash(), 0, None); - let mut downloads_increased = false; let peer = PeerBuilder::default().build(); - swarm.upsert_peer(peer.into(), &mut downloads_increased).await; + swarm.upsert_peer(peer.into()).await; assert_eq!(swarm.peers(None), [Arc::new(peer)]); } @@ -399,11 +383,10 @@ mod tests { #[tokio::test] async fn it_should_allow_getting_one_peer_by_id() { let mut swarm = Swarm::new(&sample_info_hash(), 0, None); - let mut downloads_increased = false; let peer = PeerBuilder::default().build(); - swarm.upsert_peer(peer.into(), &mut downloads_increased).await; + swarm.upsert_peer(peer.into()).await; assert_eq!(swarm.get(&peer.peer_addr), Some(Arc::new(peer)).as_ref()); } @@ -411,11 +394,10 @@ mod tests { #[tokio::test] async fn it_should_increase_the_number_of_peers_after_inserting_a_new_one() { let mut swarm = Swarm::new(&sample_info_hash(), 0, None); - let mut downloads_increased = false; let peer = PeerBuilder::default().build(); - swarm.upsert_peer(peer.into(), &mut downloads_increased).await; + swarm.upsert_peer(peer.into()).await; assert_eq!(swarm.len(), 1); } @@ -423,11 +405,10 @@ mod tests { #[tokio::test] async fn it_should_decrease_the_number_of_peers_after_removing_one() { let mut swarm = Swarm::new(&sample_info_hash(), 0, None); - let mut downloads_increased = false; let peer = PeerBuilder::default().build(); - swarm.upsert_peer(peer.into(), &mut downloads_increased).await; + swarm.upsert_peer(peer.into()).await; swarm.remove_peer(&peer.peer_addr).await; @@ -437,11 +418,10 @@ mod tests { #[tokio::test] async fn it_should_allow_removing_an_existing_peer() { let mut swarm = Swarm::new(&sample_info_hash(), 0, None); - let mut downloads_increased = false; let peer = PeerBuilder::default().build(); - swarm.upsert_peer(peer.into(), &mut downloads_increased).await; + swarm.upsert_peer(peer.into()).await; let old = swarm.remove_peer(&peer.peer_addr).await; @@ -461,19 +441,18 @@ mod tests { #[tokio::test] async fn it_should_allow_getting_all_peers_excluding_peers_with_a_given_address() { let mut swarm = Swarm::new(&sample_info_hash(), 0, None); - let mut downloads_increased = false; let peer1 = PeerBuilder::default() .with_peer_id(&PeerId(*b"-qB00000000000000001")) .with_peer_addr(&SocketAddr::new(IpAddr::V4(Ipv4Addr::new(127, 0, 0, 1)), 6969)) .build(); - swarm.upsert_peer(peer1.into(), &mut downloads_increased).await; + swarm.upsert_peer(peer1.into()).await; let peer2 = PeerBuilder::default() .with_peer_id(&PeerId(*b"-qB00000000000000002")) .with_peer_addr(&SocketAddr::new(IpAddr::V4(Ipv4Addr::new(127, 0, 0, 2)), 6969)) .build(); - swarm.upsert_peer(peer2.into(), &mut downloads_increased).await; + swarm.upsert_peer(peer2.into()).await; assert_eq!(swarm.peers_excluding(&peer2.peer_addr, None), [Arc::new(peer1)]); } @@ -481,13 +460,13 @@ mod tests { #[tokio::test] async fn it_should_count_inactive_peers() { let mut swarm = Swarm::new(&sample_info_hash(), 0, None); - let mut downloads_increased = false; + let one_second = DurationSinceUnixEpoch::new(1, 0); // Insert the peer let last_update_time = DurationSinceUnixEpoch::new(1_669_397_478_934, 0); let peer = PeerBuilder::default().last_updated_on(last_update_time).build(); - swarm.upsert_peer(peer.into(), &mut downloads_increased).await; + swarm.upsert_peer(peer.into()).await; let inactive_peers_total = swarm.count_inactive_peers(last_update_time + one_second); @@ -497,13 +476,13 @@ mod tests { #[tokio::test] async fn it_should_remove_inactive_peers() { let mut swarm = Swarm::new(&sample_info_hash(), 0, None); - let mut downloads_increased = false; + let one_second = DurationSinceUnixEpoch::new(1, 0); // Insert the peer let last_update_time = DurationSinceUnixEpoch::new(1_669_397_478_934, 0); let peer = PeerBuilder::default().last_updated_on(last_update_time).build(); - swarm.upsert_peer(peer.into(), &mut downloads_increased).await; + swarm.upsert_peer(peer.into()).await; // Remove peers not updated since one second after inserting the peer swarm.remove_inactive(last_update_time + one_second).await; @@ -514,13 +493,13 @@ mod tests { #[tokio::test] async fn it_should_not_remove_active_peers() { let mut swarm = Swarm::new(&sample_info_hash(), 0, None); - let mut downloads_increased = false; + let one_second = DurationSinceUnixEpoch::new(1, 0); // Insert the peer let last_update_time = DurationSinceUnixEpoch::new(1_669_397_478_934, 0); let peer = PeerBuilder::default().last_updated_on(last_update_time).build(); - swarm.upsert_peer(peer.into(), &mut downloads_increased).await; + swarm.upsert_peer(peer.into()).await; // Remove peers not updated since one second before inserting the peer. swarm.remove_inactive(last_update_time - one_second).await; @@ -542,7 +521,7 @@ mod tests { async fn not_empty_swarm() -> Swarm { let mut swarm = Swarm::new(&sample_info_hash(), 0, None); - swarm.upsert_peer(PeerBuilder::default().build().into(), &mut false).await; + swarm.upsert_peer(PeerBuilder::default().build().into()).await; swarm } @@ -550,13 +529,12 @@ mod tests { let mut swarm = Swarm::new(&sample_info_hash(), 0, None); let mut peer = PeerBuilder::leecher().build(); - let mut downloads_increased = false; - swarm.upsert_peer(peer.into(), &mut downloads_increased).await; + swarm.upsert_peer(peer.into()).await; peer.event = aquatic_udp_protocol::AnnounceEvent::Completed; - swarm.upsert_peer(peer.into(), &mut downloads_increased).await; + swarm.upsert_peer(peer.into()).await; assert!(swarm.metadata().downloads() > 0); @@ -631,17 +609,16 @@ mod tests { #[tokio::test] async fn it_should_allow_inserting_two_identical_peers_except_for_the_socket_address() { let mut swarm = Swarm::new(&sample_info_hash(), 0, None); - let mut downloads_increased = false; let peer1 = PeerBuilder::default() .with_peer_addr(&SocketAddr::new(IpAddr::V4(Ipv4Addr::new(127, 0, 0, 1)), 6969)) .build(); - swarm.upsert_peer(peer1.into(), &mut downloads_increased).await; + swarm.upsert_peer(peer1.into()).await; let peer2 = PeerBuilder::default() .with_peer_addr(&SocketAddr::new(IpAddr::V4(Ipv4Addr::new(127, 0, 0, 2)), 6969)) .build(); - swarm.upsert_peer(peer2.into(), &mut downloads_increased).await; + swarm.upsert_peer(peer2.into()).await; assert_eq!(swarm.len(), 2); } @@ -649,7 +626,6 @@ mod tests { #[tokio::test] async fn it_should_not_allow_inserting_two_peers_with_different_peer_id_but_the_same_socket_address() { let mut swarm = Swarm::new(&sample_info_hash(), 0, None); - let mut downloads_increased = false; // When that happens the peer ID will be changed in the swarm. // In practice, it's like if the peer had changed its ID. @@ -658,13 +634,13 @@ mod tests { .with_peer_id(&PeerId(*b"-qB00000000000000001")) .with_peer_addr(&SocketAddr::new(IpAddr::V4(Ipv4Addr::new(127, 0, 0, 1)), 6969)) .build(); - swarm.upsert_peer(peer1.into(), &mut downloads_increased).await; + swarm.upsert_peer(peer1.into()).await; let peer2 = PeerBuilder::default() .with_peer_id(&PeerId(*b"-qB00000000000000002")) .with_peer_addr(&SocketAddr::new(IpAddr::V4(Ipv4Addr::new(127, 0, 0, 1)), 6969)) .build(); - swarm.upsert_peer(peer2.into(), &mut downloads_increased).await; + swarm.upsert_peer(peer2.into()).await; assert_eq!(swarm.len(), 1); } @@ -672,13 +648,12 @@ mod tests { #[tokio::test] async fn it_should_return_the_swarm_metadata() { let mut swarm = Swarm::new(&sample_info_hash(), 0, None); - let mut downloads_increased = false; let seeder = PeerBuilder::seeder().build(); let leecher = PeerBuilder::leecher().build(); - swarm.upsert_peer(seeder.into(), &mut downloads_increased).await; - swarm.upsert_peer(leecher.into(), &mut downloads_increased).await; + swarm.upsert_peer(seeder.into()).await; + swarm.upsert_peer(leecher.into()).await; assert_eq!( swarm.metadata(), @@ -693,13 +668,12 @@ mod tests { #[tokio::test] async fn it_should_return_the_number_of_seeders_in_the_list() { let mut swarm = Swarm::new(&sample_info_hash(), 0, None); - let mut downloads_increased = false; let seeder = PeerBuilder::seeder().build(); let leecher = PeerBuilder::leecher().build(); - swarm.upsert_peer(seeder.into(), &mut downloads_increased).await; - swarm.upsert_peer(leecher.into(), &mut downloads_increased).await; + swarm.upsert_peer(seeder.into()).await; + swarm.upsert_peer(leecher.into()).await; let (seeders, _leechers) = swarm.seeders_and_leechers(); @@ -709,13 +683,12 @@ mod tests { #[tokio::test] async fn it_should_return_the_number_of_leechers_in_the_list() { let mut swarm = Swarm::new(&sample_info_hash(), 0, None); - let mut downloads_increased = false; let seeder = PeerBuilder::seeder().build(); let leecher = PeerBuilder::leecher().build(); - swarm.upsert_peer(seeder.into(), &mut downloads_increased).await; - swarm.upsert_peer(leecher.into(), &mut downloads_increased).await; + swarm.upsert_peer(seeder.into()).await; + swarm.upsert_peer(leecher.into()).await; let (_seeders, leechers) = swarm.seeders_and_leechers(); @@ -739,13 +712,12 @@ mod tests { #[tokio::test] async fn it_should_increase_the_number_of_leechers_if_the_new_peer_is_a_leecher_() { let mut swarm = Swarm::new(&sample_info_hash(), 0, None); - let mut downloads_increased = false; let leechers = swarm.metadata().leechers(); let leecher = PeerBuilder::leecher().build(); - swarm.upsert_peer(leecher.into(), &mut downloads_increased).await; + swarm.upsert_peer(leecher.into()).await; assert_eq!(swarm.metadata().leechers(), leechers + 1); } @@ -753,13 +725,12 @@ mod tests { #[tokio::test] async fn it_should_increase_the_number_of_seeders_if_the_new_peer_is_a_seeder() { let mut swarm = Swarm::new(&sample_info_hash(), 0, None); - let mut downloads_increased = false; let seeders = swarm.metadata().seeders(); let seeder = PeerBuilder::seeder().build(); - swarm.upsert_peer(seeder.into(), &mut downloads_increased).await; + swarm.upsert_peer(seeder.into()).await; assert_eq!(swarm.metadata().seeders(), seeders + 1); } @@ -768,13 +739,12 @@ mod tests { async fn it_should_not_increasing_the_number_of_downloads_if_the_new_peer_has_completed_downloading_as_it_was_not_previously_known( ) { let mut swarm = Swarm::new(&sample_info_hash(), 0, None); - let mut downloads_increased = false; let downloads = swarm.metadata().downloads(); let seeder = PeerBuilder::seeder().build(); - swarm.upsert_peer(seeder.into(), &mut downloads_increased).await; + swarm.upsert_peer(seeder.into()).await; assert_eq!(swarm.metadata().downloads(), downloads); } @@ -789,11 +759,10 @@ mod tests { #[tokio::test] async fn it_should_decrease_the_number_of_leechers_if_the_removed_peer_was_a_leecher() { let mut swarm = Swarm::new(&sample_info_hash(), 0, None); - let mut downloads_increased = false; let leecher = PeerBuilder::leecher().build(); - swarm.upsert_peer(leecher.into(), &mut downloads_increased).await; + swarm.upsert_peer(leecher.into()).await; let leechers = swarm.metadata().leechers(); @@ -805,11 +774,10 @@ mod tests { #[tokio::test] async fn it_should_decrease_the_number_of_seeders_if_the_removed_peer_was_a_seeder() { let mut swarm = Swarm::new(&sample_info_hash(), 0, None); - let mut downloads_increased = false; let seeder = PeerBuilder::seeder().build(); - swarm.upsert_peer(seeder.into(), &mut downloads_increased).await; + swarm.upsert_peer(seeder.into()).await; let seeders = swarm.metadata().seeders(); @@ -830,11 +798,10 @@ mod tests { #[tokio::test] async fn it_should_decrease_the_number_of_leechers_when_a_removed_peer_is_a_leecher() { let mut swarm = Swarm::new(&sample_info_hash(), 0, None); - let mut downloads_increased = false; let leecher = PeerBuilder::leecher().build(); - swarm.upsert_peer(leecher.into(), &mut downloads_increased).await; + swarm.upsert_peer(leecher.into()).await; let leechers = swarm.metadata().leechers(); @@ -846,11 +813,10 @@ mod tests { #[tokio::test] async fn it_should_decrease_the_number_of_seeders_when_the_removed_peer_is_a_seeder() { let mut swarm = Swarm::new(&sample_info_hash(), 0, None); - let mut downloads_increased = false; let seeder = PeerBuilder::seeder().build(); - swarm.upsert_peer(seeder.into(), &mut downloads_increased).await; + swarm.upsert_peer(seeder.into()).await; let seeders = swarm.metadata().seeders(); @@ -870,18 +836,17 @@ mod tests { #[tokio::test] async fn it_should_increase_seeders_and_decreasing_leechers_when_the_peer_changes_from_leecher_to_seeder_() { let mut swarm = Swarm::new(&sample_info_hash(), 0, None); - let mut downloads_increased = false; let mut peer = PeerBuilder::leecher().build(); - swarm.upsert_peer(peer.into(), &mut downloads_increased).await; + swarm.upsert_peer(peer.into()).await; let leechers = swarm.metadata().leechers(); let seeders = swarm.metadata().seeders(); peer.left = NumberOfBytes::new(0); // Convert to seeder - swarm.upsert_peer(peer.into(), &mut downloads_increased).await; + swarm.upsert_peer(peer.into()).await; assert_eq!(swarm.metadata().seeders(), seeders + 1); assert_eq!(swarm.metadata().leechers(), leechers - 1); @@ -890,18 +855,17 @@ mod tests { #[tokio::test] async fn it_should_increase_leechers_and_decreasing_seeders_when_the_peer_changes_from_seeder_to_leecher() { let mut swarm = Swarm::new(&sample_info_hash(), 0, None); - let mut downloads_increased = false; let mut peer = PeerBuilder::seeder().build(); - swarm.upsert_peer(peer.into(), &mut downloads_increased).await; + swarm.upsert_peer(peer.into()).await; let leechers = swarm.metadata().leechers(); let seeders = swarm.metadata().seeders(); peer.left = NumberOfBytes::new(10); // Convert to leecher - swarm.upsert_peer(peer.into(), &mut downloads_increased).await; + swarm.upsert_peer(peer.into()).await; assert_eq!(swarm.metadata().leechers(), leechers + 1); assert_eq!(swarm.metadata().seeders(), seeders - 1); @@ -910,17 +874,16 @@ mod tests { #[tokio::test] async fn it_should_increase_the_number_of_downloads_when_the_peer_announces_completed_downloading() { let mut swarm = Swarm::new(&sample_info_hash(), 0, None); - let mut downloads_increased = false; let mut peer = PeerBuilder::leecher().build(); - swarm.upsert_peer(peer.into(), &mut downloads_increased).await; + swarm.upsert_peer(peer.into()).await; let downloads = swarm.metadata().downloads(); peer.event = aquatic_udp_protocol::AnnounceEvent::Completed; - swarm.upsert_peer(peer.into(), &mut downloads_increased).await; + swarm.upsert_peer(peer.into()).await; assert_eq!(swarm.metadata().downloads(), downloads + 1); } @@ -928,19 +891,18 @@ mod tests { #[tokio::test] async fn it_should_not_increasing_the_number_of_downloads_when_the_peer_announces_completed_downloading_twice_() { let mut swarm = Swarm::new(&sample_info_hash(), 0, None); - let mut downloads_increased = false; let mut peer = PeerBuilder::leecher().build(); - swarm.upsert_peer(peer.into(), &mut downloads_increased).await; + swarm.upsert_peer(peer.into()).await; let downloads = swarm.metadata().downloads(); peer.event = aquatic_udp_protocol::AnnounceEvent::Completed; - swarm.upsert_peer(peer.into(), &mut downloads_increased).await; + swarm.upsert_peer(peer.into()).await; - swarm.upsert_peer(peer.into(), &mut downloads_increased).await; + swarm.upsert_peer(peer.into()).await; assert_eq!(swarm.metadata().downloads(), downloads + 1); } @@ -971,8 +933,7 @@ mod tests { let mut swarm = Swarm::new(&sample_info_hash(), 0, Some(Arc::new(event_sender_mock))); - let mut downloads_increased = false; - swarm.upsert_peer(peer.into(), &mut downloads_increased).await; + swarm.upsert_peer(peer.into()).await; } #[tokio::test] @@ -990,8 +951,7 @@ mod tests { let mut swarm = Swarm::new(&info_hash, 0, Some(Arc::new(event_sender_mock))); // Insert the peer - let mut downloads_increased = false; - swarm.upsert_peer(peer.into(), &mut downloads_increased).await; + swarm.upsert_peer(peer.into()).await; swarm.remove_peer(&peer.peer_addr).await; } @@ -1011,8 +971,7 @@ mod tests { let mut swarm = Swarm::new(&info_hash, 0, Some(Arc::new(event_sender_mock))); // Insert the peer - let mut downloads_increased = false; - swarm.upsert_peer(peer.into(), &mut downloads_increased).await; + swarm.upsert_peer(peer.into()).await; // Peers not updated after this time will be removed let current_cutoff = peer.updated + DurationSinceUnixEpoch::from_secs(1); @@ -1042,11 +1001,10 @@ mod tests { let mut swarm = Swarm::new(&info_hash, 0, Some(Arc::new(event_sender_mock))); // Insert the peer - let mut downloads_increased = false; - swarm.upsert_peer(peer.into(), &mut downloads_increased).await; + swarm.upsert_peer(peer.into()).await; // Update the peer - swarm.upsert_peer(peer.into(), &mut downloads_increased).await; + swarm.upsert_peer(peer.into()).await; } #[tokio::test] @@ -1079,11 +1037,10 @@ mod tests { let mut swarm = Swarm::new(&info_hash, 0, Some(Arc::new(event_sender_mock))); // Insert the peer - let mut downloads_increased = false; - swarm.upsert_peer(started_peer.into(), &mut downloads_increased).await; + swarm.upsert_peer(started_peer.into()).await; // Announce as completed - swarm.upsert_peer(completed_peer.into(), &mut downloads_increased).await; + swarm.upsert_peer(completed_peer.into()).await; } } } diff --git a/packages/torrent-repository/src/swarms.rs b/packages/torrent-repository/src/swarms.rs index 36f83070d..1504ac1f4 100644 --- a/packages/torrent-repository/src/swarms.rs +++ b/packages/torrent-repository/src/swarms.rs @@ -54,7 +54,7 @@ impl Swarms { info_hash: &InfoHash, peer: &peer::Peer, opt_persistent_torrent: Option, - ) -> Result { + ) -> Result<(), Error> { let swarm_handle = match self.swarms.get(info_hash) { None => { let number_of_downloads = opt_persistent_torrent.unwrap_or_default(); @@ -80,9 +80,9 @@ impl Swarms { let mut swarm = swarm_handle.value().lock().await; - let downloads_increased = swarm.handle_announcement(peer).await; + swarm.handle_announcement(peer).await; - Ok(downloads_increased) + Ok(()) } /// Inserts a new swarm. Only used for testing purposes. diff --git a/packages/tracker-core/src/announce_handler.rs b/packages/tracker-core/src/announce_handler.rs index 7d37ec9ed..ffd244f2a 100644 --- a/packages/tracker-core/src/announce_handler.rs +++ b/packages/tracker-core/src/announce_handler.rs @@ -176,8 +176,7 @@ impl AnnounceHandler { peer.change_ip(&assign_ip_address_to_peer(remote_client_ip, self.config.net.external_ip)); - let _number_of_downloads_increased = self - .in_memory_torrent_repository + self.in_memory_torrent_repository .handle_announcement(info_hash, peer, opt_persistent_torrent) .await; diff --git a/packages/tracker-core/src/torrent/manager.rs b/packages/tracker-core/src/torrent/manager.rs index 171d554a8..d9997c4ad 100644 --- a/packages/tracker-core/src/torrent/manager.rs +++ b/packages/tracker-core/src/torrent/manager.rs @@ -239,7 +239,7 @@ mod tests { // Add a peer to the torrent let mut peer = sample_peer(); peer.updated = DurationSinceUnixEpoch::new(0, 0); - let _number_of_downloads_increased = services + services .in_memory_torrent_repository .handle_announcement(&infohash, &peer, None) .await; @@ -259,7 +259,7 @@ mod tests { // Add a peer to the torrent let mut peer = sample_peer(); peer.updated = DurationSinceUnixEpoch::new(0, 0); - let _number_of_downloads_increased = in_memory_torrent_repository.handle_announcement(infohash, &peer, None).await; + in_memory_torrent_repository.handle_announcement(infohash, &peer, None).await; // Remove the peer. The torrent is now peerless. in_memory_torrent_repository diff --git a/packages/tracker-core/src/torrent/repository/in_memory.rs b/packages/tracker-core/src/torrent/repository/in_memory.rs index bf63ef8d4..5c8a335b6 100644 --- a/packages/tracker-core/src/torrent/repository/in_memory.rs +++ b/packages/tracker-core/src/torrent/repository/in_memory.rs @@ -48,17 +48,16 @@ impl InMemoryTorrentRepository { /// # Panics /// /// This function panics if the underling swarms return an error. - #[must_use] pub async fn handle_announcement( &self, info_hash: &InfoHash, peer: &peer::Peer, opt_persistent_torrent: Option, - ) -> bool { + ) { self.swarms .handle_announcement(info_hash, peer, opt_persistent_torrent) .await - .expect("Failed to upsert the peer in swarms") + .expect("Failed to upsert the peer in swarms"); } /// Removes inactive peers from all torrent entries. diff --git a/packages/tracker-core/src/torrent/services.rs b/packages/tracker-core/src/torrent/services.rs index 16db7b635..2ae51fc78 100644 --- a/packages/tracker-core/src/torrent/services.rs +++ b/packages/tracker-core/src/torrent/services.rs @@ -251,7 +251,7 @@ mod tests { let hash = "9e0217d0fa71c87332cd8bf9dbeabcb2c2cf3c4d".to_owned(); // DevSkim: ignore DS173237 let info_hash = InfoHash::from_str(&hash).unwrap(); - let _number_of_downloads_increased = in_memory_torrent_repository + in_memory_torrent_repository .handle_announcement(&info_hash, &sample_peer(), None) .await; @@ -297,7 +297,7 @@ mod tests { let hash = "9e0217d0fa71c87332cd8bf9dbeabcb2c2cf3c4d".to_owned(); // DevSkim: ignore DS173237 let info_hash = InfoHash::from_str(&hash).unwrap(); - let _number_of_downloads_increased = in_memory_torrent_repository + in_memory_torrent_repository .handle_announcement(&info_hash, &sample_peer(), None) .await; @@ -324,10 +324,10 @@ mod tests { let hash2 = "03840548643af2a7b63a9f5cbca348bc7150ca3a".to_owned(); // DevSkim: ignore DS173237 let info_hash2 = InfoHash::from_str(&hash2).unwrap(); - let _number_of_downloads_increased = in_memory_torrent_repository + in_memory_torrent_repository .handle_announcement(&info_hash1, &sample_peer(), None) .await; - let _number_of_downloads_increased = in_memory_torrent_repository + in_memory_torrent_repository .handle_announcement(&info_hash2, &sample_peer(), None) .await; @@ -349,10 +349,10 @@ mod tests { let hash2 = "03840548643af2a7b63a9f5cbca348bc7150ca3a".to_owned(); // DevSkim: ignore DS173237 let info_hash2 = InfoHash::from_str(&hash2).unwrap(); - let _number_of_downloads_increased = in_memory_torrent_repository + in_memory_torrent_repository .handle_announcement(&info_hash1, &sample_peer(), None) .await; - let _number_of_downloads_increased = in_memory_torrent_repository + in_memory_torrent_repository .handle_announcement(&info_hash2, &sample_peer(), None) .await; @@ -379,13 +379,13 @@ mod tests { let hash1 = "9e0217d0fa71c87332cd8bf9dbeabcb2c2cf3c4d".to_owned(); // DevSkim: ignore DS173237 let info_hash1 = InfoHash::from_str(&hash1).unwrap(); - let _number_of_downloads_increased = in_memory_torrent_repository + in_memory_torrent_repository .handle_announcement(&info_hash1, &sample_peer(), None) .await; let hash2 = "03840548643af2a7b63a9f5cbca348bc7150ca3a".to_owned(); // DevSkim: ignore DS173237 let info_hash2 = InfoHash::from_str(&hash2).unwrap(); - let _number_of_downloads_increased = in_memory_torrent_repository + in_memory_torrent_repository .handle_announcement(&info_hash2, &sample_peer(), None) .await; @@ -435,7 +435,7 @@ mod tests { let info_hash = sample_info_hash(); - let _ = in_memory_torrent_repository + in_memory_torrent_repository .handle_announcement(&info_hash, &sample_peer(), None) .await; diff --git a/packages/udp-tracker-server/src/environment.rs b/packages/udp-tracker-server/src/environment.rs index c4e0ce96f..94a166e4e 100644 --- a/packages/udp-tracker-server/src/environment.rs +++ b/packages/udp-tracker-server/src/environment.rs @@ -35,8 +35,7 @@ where /// Add a torrent to the tracker #[allow(dead_code)] pub async fn add_torrent(&self, info_hash: &InfoHash, peer: &peer::Peer) { - let _number_of_downloads_increased = self - .container + self.container .tracker_core_container .in_memory_torrent_repository .handle_announcement(info_hash, peer, None) diff --git a/packages/udp-tracker-server/src/handlers/announce.rs b/packages/udp-tracker-server/src/handlers/announce.rs index edc36ebc8..e2ca6821e 100644 --- a/packages/udp-tracker-server/src/handlers/announce.rs +++ b/packages/udp-tracker-server/src/handlers/announce.rs @@ -369,7 +369,7 @@ mod tests { .with_peer_address(SocketAddr::new(IpAddr::V6(client_ip_v6), client_port)) .into(); - let _number_of_downloads_increased = in_memory_torrent_repository + in_memory_torrent_repository .handle_announcement(&info_hash.0.into(), &peer_using_ipv6, None) .await; } @@ -713,7 +713,7 @@ mod tests { .with_peer_address(SocketAddr::new(IpAddr::V4(client_ip_v4), client_port)) .into(); - let _number_of_downloads_increased = in_memory_torrent_repository + in_memory_torrent_repository .handle_announcement(&info_hash.0.into(), &peer_using_ipv4, None) .await; } diff --git a/packages/udp-tracker-server/src/handlers/scrape.rs b/packages/udp-tracker-server/src/handlers/scrape.rs index 183d78b70..8bac05c1e 100644 --- a/packages/udp-tracker-server/src/handlers/scrape.rs +++ b/packages/udp-tracker-server/src/handlers/scrape.rs @@ -165,7 +165,7 @@ mod tests { .with_bytes_left_to_download(0) .into(); - let _number_of_downloads_increased = in_memory_torrent_repository + in_memory_torrent_repository .handle_announcement(&info_hash.0.into(), &peer, None) .await; } From 21752709a3703f9e791510ccea97bbcbd495bb1d Mon Sep 17 00:00:00 2001 From: Jose Celano Date: Mon, 26 May 2025 18:39:20 +0100 Subject: [PATCH 636/802] feat: [#1535] scaffolding for tracker-core metrics New metric added: ``` tracker_core_persistent_torrents_downloads_total{} 1 ``` However, it's not persisted yet. TODO: - Persist into the database when updated. - Load from database when the tracker starts. --- Cargo.lock | 1 + .../src/v1/context/stats/handlers.rs | 2 + .../src/v1/context/stats/routes.rs | 2 + .../src/statistics/services.rs | 5 + .../src/http/client/requests/announce.rs | 8 +- packages/tracker-core/Cargo.toml | 1 + packages/tracker-core/src/container.rs | 6 +- .../src/statistics/event/handler.rs | 21 ++- .../src/statistics/event/listener.rs | 13 +- .../tracker-core/src/statistics/metrics.rs | 63 +++++++++ packages/tracker-core/src/statistics/mod.rs | 26 ++++ .../tracker-core/src/statistics/repository.rs | 132 ++++++++++++++++++ .../tracker-core/tests/common/test_env.rs | 1 + .../config/tracker.development.sqlite3.toml | 4 +- src/bootstrap/jobs/tracker_core.rs | 5 +- 15 files changed, 276 insertions(+), 14 deletions(-) create mode 100644 packages/tracker-core/src/statistics/metrics.rs create mode 100644 packages/tracker-core/src/statistics/repository.rs diff --git a/Cargo.lock b/Cargo.lock index 5415149e8..96de11cb2 100644 --- a/Cargo.lock +++ b/Cargo.lock @@ -678,6 +678,7 @@ dependencies = [ "torrust-tracker-configuration", "torrust-tracker-events", "torrust-tracker-located-error", + "torrust-tracker-metrics", "torrust-tracker-primitives", "torrust-tracker-test-helpers", "torrust-tracker-torrent-repository", diff --git a/packages/axum-rest-tracker-api-server/src/v1/context/stats/handlers.rs b/packages/axum-rest-tracker-api-server/src/v1/context/stats/handlers.rs index 552958d74..3a353f1fc 100644 --- a/packages/axum-rest-tracker-api-server/src/v1/context/stats/handlers.rs +++ b/packages/axum-rest-tracker-api-server/src/v1/context/stats/handlers.rs @@ -70,6 +70,7 @@ pub async fn get_metrics_handler( Arc, Arc>, Arc, + Arc, Arc, Arc, Arc, @@ -83,6 +84,7 @@ pub async fn get_metrics_handler( state.3.clone(), state.4.clone(), state.5.clone(), + state.6.clone(), ) .await; diff --git a/packages/axum-rest-tracker-api-server/src/v1/context/stats/routes.rs b/packages/axum-rest-tracker-api-server/src/v1/context/stats/routes.rs index 3eeaa8bf4..f6c661130 100644 --- a/packages/axum-rest-tracker-api-server/src/v1/context/stats/routes.rs +++ b/packages/axum-rest-tracker-api-server/src/v1/context/stats/routes.rs @@ -28,7 +28,9 @@ pub fn add(prefix: &str, router: Router, http_api_container: &Arc, ban_service: Arc>, swarms_stats_repository: Arc, + tracker_core_stats_repository: Arc, http_stats_repository: Arc, udp_stats_repository: Arc, udp_server_stats_repository: Arc, @@ -102,6 +103,7 @@ pub async fn get_labeled_metrics( let _udp_banned_ips_total = ban_service.read().await.get_banned_ips_total(); let swarms_stats = swarms_stats_repository.get_metrics().await; + let tracker_core_stats = tracker_core_stats_repository.get_metrics().await; let http_stats = http_stats_repository.get_stats().await; let udp_stats_repository = udp_stats_repository.get_stats().await; let udp_server_stats = udp_server_stats_repository.get_stats().await; @@ -112,6 +114,9 @@ pub async fn get_labeled_metrics( metrics .merge(&swarms_stats.metric_collection) .expect("msg: failed to merge torrent repository metrics"); + metrics + .merge(&tracker_core_stats.metric_collection) + .expect("msg: failed to merge tracker core metrics"); metrics .merge(&http_stats.metric_collection) .expect("msg: failed to merge HTTP core metrics"); diff --git a/packages/tracker-client/src/http/client/requests/announce.rs b/packages/tracker-client/src/http/client/requests/announce.rs index 7d20fbba8..29b5d1221 100644 --- a/packages/tracker-client/src/http/client/requests/announce.rs +++ b/packages/tracker-client/src/http/client/requests/announce.rs @@ -53,16 +53,16 @@ pub type BaseTenASCII = u64; pub type PortNumber = u16; pub enum Event { - //Started, - //Stopped, + Started, + Stopped, Completed, } impl fmt::Display for Event { fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result { match self { - //Event::Started => write!(f, "started"), - //Event::Stopped => write!(f, "stopped"), + Event::Started => write!(f, "started"), + Event::Stopped => write!(f, "stopped"), Event::Completed => write!(f, "completed"), } } diff --git a/packages/tracker-core/Cargo.toml b/packages/tracker-core/Cargo.toml index 3c89505b2..a2d08dfa0 100644 --- a/packages/tracker-core/Cargo.toml +++ b/packages/tracker-core/Cargo.toml @@ -31,6 +31,7 @@ torrust-tracker-clock = { version = "3.0.0-develop", path = "../clock" } torrust-tracker-configuration = { version = "3.0.0-develop", path = "../configuration" } torrust-tracker-events = { version = "3.0.0-develop", path = "../events" } torrust-tracker-located-error = { version = "3.0.0-develop", path = "../located-error" } +torrust-tracker-metrics = { version = "3.0.0-develop", path = "../metrics" } torrust-tracker-primitives = { version = "3.0.0-develop", path = "../primitives" } torrust-tracker-torrent-repository = { version = "3.0.0-develop", path = "../torrent-repository" } tracing = "0" diff --git a/packages/tracker-core/src/container.rs b/packages/tracker-core/src/container.rs index f4fb272de..ed56fb106 100644 --- a/packages/tracker-core/src/container.rs +++ b/packages/tracker-core/src/container.rs @@ -14,11 +14,11 @@ use crate::scrape_handler::ScrapeHandler; use crate::torrent::manager::TorrentsManager; use crate::torrent::repository::in_memory::InMemoryTorrentRepository; use crate::torrent::repository::persisted::DatabasePersistentTorrentRepository; -use crate::whitelist; use crate::whitelist::authorization::WhitelistAuthorization; use crate::whitelist::manager::WhitelistManager; use crate::whitelist::repository::in_memory::InMemoryWhitelist; use crate::whitelist::setup::initialize_whitelist_manager; +use crate::{statistics, whitelist}; pub struct TrackerCoreContainer { pub core_config: Arc, @@ -33,6 +33,7 @@ pub struct TrackerCoreContainer { pub in_memory_torrent_repository: Arc, pub db_torrent_repository: Arc, pub torrents_manager: Arc, + pub stats_repository: Arc, } impl TrackerCoreContainer { @@ -58,6 +59,8 @@ impl TrackerCoreContainer { &db_torrent_repository, )); + let stats_repository = Arc::new(statistics::repository::Repository::new()); + let announce_handler = Arc::new(AnnounceHandler::new( core_config, &whitelist_authorization, @@ -80,6 +83,7 @@ impl TrackerCoreContainer { in_memory_torrent_repository, db_torrent_repository, torrents_manager, + stats_repository, } } } diff --git a/packages/tracker-core/src/statistics/event/handler.rs b/packages/tracker-core/src/statistics/event/handler.rs index 7b6ce83b7..ac6d0639e 100644 --- a/packages/tracker-core/src/statistics/event/handler.rs +++ b/packages/tracker-core/src/statistics/event/handler.rs @@ -1,14 +1,19 @@ use std::sync::Arc; +use torrust_tracker_metrics::label::LabelSet; +use torrust_tracker_metrics::metric_name; use torrust_tracker_primitives::DurationSinceUnixEpoch; use torrust_tracker_torrent_repository::event::Event; +use crate::statistics::repository::Repository; +use crate::statistics::TRACKER_CORE_PERSISTENT_TORRENTS_DOWNLOADS_TOTAL; use crate::torrent::repository::persisted::DatabasePersistentTorrentRepository; pub async fn handle_event( event: Event, + stats_repository: &Arc, db_torrent_repository: &Arc, - _now: DurationSinceUnixEpoch, + now: DurationSinceUnixEpoch, ) { match event { // Torrent events @@ -36,6 +41,7 @@ pub async fn handle_event( Event::PeerDownloadCompleted { info_hash, peer } => { tracing::debug!(info_hash = ?info_hash, peer = ?peer, "Peer download completed", ); + // Increment the number of downloads for the torrent match db_torrent_repository.increase_number_of_downloads(&info_hash) { Ok(()) => { tracing::debug!(info_hash = ?info_hash, "Number of downloads increased"); @@ -44,6 +50,19 @@ pub async fn handle_event( tracing::error!(info_hash = ?info_hash, error = ?err, "Failed to increase number of downloads"); } } + + // Increment the number of downloads for all the torrents + let _unused = stats_repository + .increment_counter( + &metric_name!(TRACKER_CORE_PERSISTENT_TORRENTS_DOWNLOADS_TOTAL), + &LabelSet::default(), + now, + ) + .await; + + // todo: + // - Persist the metric into the database. + // - Load the metric from the database. } } } diff --git a/packages/tracker-core/src/statistics/event/listener.rs b/packages/tracker-core/src/statistics/event/listener.rs index e04675092..f85b2b7a0 100644 --- a/packages/tracker-core/src/statistics/event/listener.rs +++ b/packages/tracker-core/src/statistics/event/listener.rs @@ -6,26 +6,33 @@ use torrust_tracker_events::receiver::RecvError; use torrust_tracker_torrent_repository::event::receiver::Receiver; use super::handler::handle_event; +use crate::statistics::repository::Repository; use crate::torrent::repository::persisted::DatabasePersistentTorrentRepository; use crate::{CurrentClock, TRACKER_CORE_LOG_TARGET}; #[must_use] pub fn run_event_listener( receiver: Receiver, + repository: &Arc, db_torrent_repository: &Arc, ) -> JoinHandle<()> { + let stats_repository = repository.clone(); let db_torrent_repository: Arc = db_torrent_repository.clone(); tracing::info!(target: TRACKER_CORE_LOG_TARGET, "Starting torrent repository event listener"); tokio::spawn(async move { - dispatch_events(receiver, db_torrent_repository).await; + dispatch_events(receiver, stats_repository, db_torrent_repository).await; tracing::info!(target: TRACKER_CORE_LOG_TARGET, "Torrent repository listener finished"); }) } -async fn dispatch_events(mut receiver: Receiver, db_torrent_repository: Arc) { +async fn dispatch_events( + mut receiver: Receiver, + stats_repository: Arc, + db_torrent_repository: Arc, +) { let shutdown_signal = tokio::signal::ctrl_c(); tokio::pin!(shutdown_signal); @@ -41,7 +48,7 @@ async fn dispatch_events(mut receiver: Receiver, db_torrent_repository: Arc { match result { - Ok(event) => handle_event(event, &db_torrent_repository, CurrentClock::now()).await, + Ok(event) => handle_event(event, &stats_repository, &db_torrent_repository, CurrentClock::now()).await, Err(e) => { match e { RecvError::Closed => { diff --git a/packages/tracker-core/src/statistics/metrics.rs b/packages/tracker-core/src/statistics/metrics.rs new file mode 100644 index 000000000..f8ab3f9d9 --- /dev/null +++ b/packages/tracker-core/src/statistics/metrics.rs @@ -0,0 +1,63 @@ +use serde::Serialize; +use torrust_tracker_metrics::label::LabelSet; +use torrust_tracker_metrics::metric::MetricName; +use torrust_tracker_metrics::metric_collection::{Error, MetricCollection}; +use torrust_tracker_primitives::DurationSinceUnixEpoch; + +/// Metrics collected by the torrent repository. +#[derive(Debug, Clone, PartialEq, Default, Serialize)] +pub struct Metrics { + /// A collection of metrics. + pub metric_collection: MetricCollection, +} + +impl Metrics { + /// # Errors + /// + /// Returns an error if the metric does not exist and it cannot be created. + pub fn increment_counter( + &mut self, + metric_name: &MetricName, + labels: &LabelSet, + now: DurationSinceUnixEpoch, + ) -> Result<(), Error> { + self.metric_collection.increase_counter(metric_name, labels, now) + } + + /// # Errors + /// + /// Returns an error if the metric does not exist and it cannot be created. + pub fn set_gauge( + &mut self, + metric_name: &MetricName, + labels: &LabelSet, + value: f64, + now: DurationSinceUnixEpoch, + ) -> Result<(), Error> { + self.metric_collection.set_gauge(metric_name, labels, value, now) + } + + /// # Errors + /// + /// Returns an error if the metric does not exist and it cannot be created. + pub fn increment_gauge( + &mut self, + metric_name: &MetricName, + labels: &LabelSet, + now: DurationSinceUnixEpoch, + ) -> Result<(), Error> { + self.metric_collection.increment_gauge(metric_name, labels, now) + } + + /// # Errors + /// + /// Returns an error if the metric does not exist and it cannot be created. + pub fn decrement_gauge( + &mut self, + metric_name: &MetricName, + labels: &LabelSet, + now: DurationSinceUnixEpoch, + ) -> Result<(), Error> { + self.metric_collection.decrement_gauge(metric_name, labels, now) + } +} diff --git a/packages/tracker-core/src/statistics/mod.rs b/packages/tracker-core/src/statistics/mod.rs index 53f112654..1cd9aac6b 100644 --- a/packages/tracker-core/src/statistics/mod.rs +++ b/packages/tracker-core/src/statistics/mod.rs @@ -1 +1,27 @@ pub mod event; +pub mod metrics; +pub mod repository; + +use metrics::Metrics; +use torrust_tracker_metrics::metric::description::MetricDescription; +use torrust_tracker_metrics::metric_name; +use torrust_tracker_metrics::unit::Unit; + +// Torrent metrics + +const TRACKER_CORE_PERSISTENT_TORRENTS_DOWNLOADS_TOTAL: &str = "tracker_core_persistent_torrents_downloads_total"; + +#[must_use] +pub fn describe_metrics() -> Metrics { + let mut metrics = Metrics::default(); + + // Torrent metrics + + metrics.metric_collection.describe_counter( + &metric_name!(TRACKER_CORE_PERSISTENT_TORRENTS_DOWNLOADS_TOTAL), + Some(Unit::Count), + Some(&MetricDescription::new("The total number of torrent downloads (persisted).")), + ); + + metrics +} diff --git a/packages/tracker-core/src/statistics/repository.rs b/packages/tracker-core/src/statistics/repository.rs new file mode 100644 index 000000000..fe1292d00 --- /dev/null +++ b/packages/tracker-core/src/statistics/repository.rs @@ -0,0 +1,132 @@ +use std::sync::Arc; + +use tokio::sync::{RwLock, RwLockReadGuard}; +use torrust_tracker_metrics::label::LabelSet; +use torrust_tracker_metrics::metric::MetricName; +use torrust_tracker_metrics::metric_collection::Error; +use torrust_tracker_primitives::DurationSinceUnixEpoch; + +use super::describe_metrics; +use super::metrics::Metrics; + +/// A repository for the torrent repository metrics. +#[derive(Clone)] +pub struct Repository { + pub stats: Arc>, +} + +impl Default for Repository { + fn default() -> Self { + Self::new() + } +} + +impl Repository { + #[must_use] + pub fn new() -> Self { + let stats = Arc::new(RwLock::new(describe_metrics())); + + Self { stats } + } + + pub async fn get_metrics(&self) -> RwLockReadGuard<'_, Metrics> { + self.stats.read().await + } + + /// # Errors + /// + /// This function will return an error if the metric collection fails to + /// increment the counter. + pub async fn increment_counter( + &self, + metric_name: &MetricName, + labels: &LabelSet, + now: DurationSinceUnixEpoch, + ) -> Result<(), Error> { + let mut stats_lock = self.stats.write().await; + + let result = stats_lock.increment_counter(metric_name, labels, now); + + drop(stats_lock); + + match result { + Ok(()) => {} + Err(ref err) => tracing::error!("Failed to increment the counter: {}", err), + } + + result + } + + /// # Errors + /// + /// This function will return an error if the metric collection fails to + /// set the gauge. + pub async fn set_gauge( + &self, + metric_name: &MetricName, + labels: &LabelSet, + value: f64, + now: DurationSinceUnixEpoch, + ) -> Result<(), Error> { + let mut stats_lock = self.stats.write().await; + + let result = stats_lock.set_gauge(metric_name, labels, value, now); + + drop(stats_lock); + + match result { + Ok(()) => {} + Err(ref err) => tracing::error!("Failed to set the gauge: {}", err), + } + + result + } + + /// # Errors + /// + /// This function will return an error if the metric collection fails to + /// increment the gauge. + pub async fn increment_gauge( + &self, + metric_name: &MetricName, + labels: &LabelSet, + now: DurationSinceUnixEpoch, + ) -> Result<(), Error> { + let mut stats_lock = self.stats.write().await; + + let result = stats_lock.increment_gauge(metric_name, labels, now); + + drop(stats_lock); + + match result { + Ok(()) => {} + Err(ref err) => tracing::error!("Failed to increment the gauge: {}", err), + } + + result + } + + /// # Errors + /// + /// This function will return an error if the metric collection fails to + /// decrement the gauge. + pub async fn decrement_gauge( + &self, + metric_name: &MetricName, + labels: &LabelSet, + now: DurationSinceUnixEpoch, + ) -> Result<(), Error> { + let mut stats_lock = self.stats.write().await; + + let result = stats_lock.decrement_gauge(metric_name, labels, now); + + drop(stats_lock); + + match result { + Ok(()) => {} + Err(ref err) => tracing::error!("Failed to decrement the gauge: {}", err), + } + + result + } +} diff --git a/packages/tracker-core/tests/common/test_env.rs b/packages/tracker-core/tests/common/test_env.rs index d4462e3f6..0be8bd4c6 100644 --- a/packages/tracker-core/tests/common/test_env.rs +++ b/packages/tracker-core/tests/common/test_env.rs @@ -56,6 +56,7 @@ impl TestEnv { let job = bittorrent_tracker_core::statistics::event::listener::run_event_listener( self.torrent_repository_container.event_bus.receiver(), + &self.tracker_core_container.stats_repository, &self.tracker_core_container.db_torrent_repository, ); diff --git a/share/default/config/tracker.development.sqlite3.toml b/share/default/config/tracker.development.sqlite3.toml index 89d700132..17a73a1d2 100644 --- a/share/default/config/tracker.development.sqlite3.toml +++ b/share/default/config/tracker.development.sqlite3.toml @@ -7,12 +7,12 @@ schema_version = "2.0.0" threshold = "info" [core] -inactive_peer_cleanup_interval = 60 +inactive_peer_cleanup_interval = 120 listed = false private = false [core.tracker_policy] -max_peer_timeout = 30 +max_peer_timeout = 60 persistent_torrent_completed_stat = true remove_peerless_torrents = true diff --git a/src/bootstrap/jobs/tracker_core.rs b/src/bootstrap/jobs/tracker_core.rs index bb879db6b..37c53b9e4 100644 --- a/src/bootstrap/jobs/tracker_core.rs +++ b/src/bootstrap/jobs/tracker_core.rs @@ -6,11 +6,10 @@ use torrust_tracker_configuration::Configuration; use crate::container::AppContainer; pub fn start_event_listener(config: &Configuration, app_container: &Arc) -> Option> { - // todo: enable this when labeled metrics are implemented. - //if config.core.tracker_usage_statistics || config.core.tracker_policy.persistent_torrent_completed_stat { - if config.core.tracker_policy.persistent_torrent_completed_stat { + if config.core.tracker_usage_statistics || config.core.tracker_policy.persistent_torrent_completed_stat { let job = bittorrent_tracker_core::statistics::event::listener::run_event_listener( app_container.torrent_repository_container.event_bus.receiver(), + &app_container.tracker_core_container.stats_repository, &app_container.tracker_core_container.db_torrent_repository, ); From 6f11534d49742a8b6654fe9450b683c2bd49e9a9 Mon Sep 17 00:00:00 2001 From: Jose Celano Date: Tue, 27 May 2025 10:37:27 +0100 Subject: [PATCH 637/802] feat: [#1539] add method to Database trait to persis global downloads counter It does not use the new methods in production yet. --- ...3000_torrust_tracker_create_all_tables.sql | 1 + ...er_new_torrent_aggregate_metrics_table.sql | 6 ++ ...3000_torrust_tracker_create_all_tables.sql | 1 + ...er_new_torrent_aggregate_metrics_table.sql | 6 ++ .../tracker-core/src/databases/driver/mod.rs | 44 ++++++++++++ .../src/databases/driver/mysql.rs | 57 +++++++++++++++- .../src/databases/driver/sqlite.rs | 68 ++++++++++++++++++- packages/tracker-core/src/databases/mod.rs | 34 +++++++++- 8 files changed, 214 insertions(+), 3 deletions(-) create mode 100644 packages/tracker-core/migrations/mysql/20250527093000_torrust_tracker_new_torrent_aggregate_metrics_table.sql create mode 100644 packages/tracker-core/migrations/sqlite/20250527093000_torrust_tracker_new_torrent_aggregate_metrics_table.sql diff --git a/packages/tracker-core/migrations/mysql/20240730183000_torrust_tracker_create_all_tables.sql b/packages/tracker-core/migrations/mysql/20240730183000_torrust_tracker_create_all_tables.sql index 407ae4dd1..ab160bd75 100644 --- a/packages/tracker-core/migrations/mysql/20240730183000_torrust_tracker_create_all_tables.sql +++ b/packages/tracker-core/migrations/mysql/20240730183000_torrust_tracker_create_all_tables.sql @@ -4,6 +4,7 @@ CREATE TABLE info_hash VARCHAR(40) NOT NULL UNIQUE ); +# todo: rename to `torrent_metrics` CREATE TABLE IF NOT EXISTS torrents ( id integer PRIMARY KEY AUTO_INCREMENT, diff --git a/packages/tracker-core/migrations/mysql/20250527093000_torrust_tracker_new_torrent_aggregate_metrics_table.sql b/packages/tracker-core/migrations/mysql/20250527093000_torrust_tracker_new_torrent_aggregate_metrics_table.sql new file mode 100644 index 000000000..36f940cc3 --- /dev/null +++ b/packages/tracker-core/migrations/mysql/20250527093000_torrust_tracker_new_torrent_aggregate_metrics_table.sql @@ -0,0 +1,6 @@ +CREATE TABLE + IF NOT EXISTS torrent_aggregate_metrics ( + id integer PRIMARY KEY AUTO_INCREMENT, + metric_name VARCHAR(50) NOT NULL UNIQUE, + value INTEGER DEFAULT 0 NOT NULL + ); \ No newline at end of file diff --git a/packages/tracker-core/migrations/sqlite/20240730183000_torrust_tracker_create_all_tables.sql b/packages/tracker-core/migrations/sqlite/20240730183000_torrust_tracker_create_all_tables.sql index bd451bf8b..c5bcad926 100644 --- a/packages/tracker-core/migrations/sqlite/20240730183000_torrust_tracker_create_all_tables.sql +++ b/packages/tracker-core/migrations/sqlite/20240730183000_torrust_tracker_create_all_tables.sql @@ -4,6 +4,7 @@ CREATE TABLE info_hash TEXT NOT NULL UNIQUE ); +# todo: rename to `torrent_metrics` CREATE TABLE IF NOT EXISTS torrents ( id INTEGER PRIMARY KEY AUTOINCREMENT, diff --git a/packages/tracker-core/migrations/sqlite/20250527093000_torrust_tracker_new_torrent_aggregate_metrics_table.sql b/packages/tracker-core/migrations/sqlite/20250527093000_torrust_tracker_new_torrent_aggregate_metrics_table.sql new file mode 100644 index 000000000..34166903c --- /dev/null +++ b/packages/tracker-core/migrations/sqlite/20250527093000_torrust_tracker_new_torrent_aggregate_metrics_table.sql @@ -0,0 +1,6 @@ +CREATE TABLE + IF NOT EXISTS torrent_aggregate_metrics ( + id INTEGER PRIMARY KEY AUTOINCREMENT, + metric_name TEXT NOT NULL UNIQUE, + value INTEGER DEFAULT 0 NOT NULL + ); \ No newline at end of file diff --git a/packages/tracker-core/src/databases/driver/mod.rs b/packages/tracker-core/src/databases/driver/mod.rs index 2cedab2d7..e8f0ecbfb 100644 --- a/packages/tracker-core/src/databases/driver/mod.rs +++ b/packages/tracker-core/src/databases/driver/mod.rs @@ -6,6 +6,9 @@ use sqlite::Sqlite; use super::error::Error; use super::Database; +/// Metric name in DB for the total number of downloads across all torrents. +const TORRENTS_DOWNLOADS_TOTAL: &str = "torrents_downloads_total"; + /// The database management system used by the tracker. /// /// Refer to: @@ -97,9 +100,14 @@ pub(crate) mod tests { // Persistent torrents (stats) + // Torrent metrics handling_torrent_persistence::it_should_save_and_load_persistent_torrents(driver); handling_torrent_persistence::it_should_load_all_persistent_torrents(driver); handling_torrent_persistence::it_should_increase_the_number_of_downloads_for_a_given_torrent(driver); + // Aggregate metrics for all torrents + handling_torrent_persistence::it_should_save_and_load_the_global_number_of_downloads(driver); + handling_torrent_persistence::it_should_load_the_global_number_of_downloads(driver); + handling_torrent_persistence::it_should_increase_the_global_number_of_downloads(driver); // Authentication keys (for private trackers) @@ -154,6 +162,8 @@ pub(crate) mod tests { use crate::databases::Database; use crate::test_helpers::tests::sample_info_hash; + // Metrics per torrent + pub fn it_should_save_and_load_persistent_torrents(driver: &Arc>) { let infohash = sample_info_hash(); @@ -192,6 +202,40 @@ pub(crate) mod tests { assert_eq!(number_of_downloads, 2); } + + // Aggregate metrics for all torrents + + pub fn it_should_save_and_load_the_global_number_of_downloads(driver: &Arc>) { + let number_of_downloads = 1; + + driver.save_global_number_of_downloads(number_of_downloads).unwrap(); + + let number_of_downloads = driver.load_global_number_of_downloads().unwrap().unwrap(); + + assert_eq!(number_of_downloads, 1); + } + + pub fn it_should_load_the_global_number_of_downloads(driver: &Arc>) { + let number_of_downloads = 1; + + driver.save_global_number_of_downloads(number_of_downloads).unwrap(); + + let number_of_downloads = driver.load_global_number_of_downloads().unwrap().unwrap(); + + assert_eq!(number_of_downloads, 1); + } + + pub fn it_should_increase_the_global_number_of_downloads(driver: &Arc>) { + let number_of_downloads = 1; + + driver.save_global_number_of_downloads(number_of_downloads).unwrap(); + + driver.increase_global_number_of_downloads().unwrap(); + + let number_of_downloads = driver.load_global_number_of_downloads().unwrap().unwrap(); + + assert_eq!(number_of_downloads, 2); + } } mod handling_authentication_keys { diff --git a/packages/tracker-core/src/databases/driver/mysql.rs b/packages/tracker-core/src/databases/driver/mysql.rs index d07f061c2..bfbc47ebd 100644 --- a/packages/tracker-core/src/databases/driver/mysql.rs +++ b/packages/tracker-core/src/databases/driver/mysql.rs @@ -15,7 +15,7 @@ use r2d2_mysql::mysql::{params, Opts, OptsBuilder}; use r2d2_mysql::MySqlConnectionManager; use torrust_tracker_primitives::{PersistentTorrent, PersistentTorrents}; -use super::{Database, Driver, Error}; +use super::{Database, Driver, Error, TORRENTS_DOWNLOADS_TOTAL}; use crate::authentication::key::AUTH_KEY_LENGTH; use crate::authentication::{self, Key}; @@ -46,6 +46,27 @@ impl Mysql { Ok(Self { pool }) } + + fn load_torrent_aggregate_metric(&self, metric_name: &str) -> Result, Error> { + let mut conn = self.pool.get().map_err(|e| (e, DRIVER))?; + + let query = conn.exec_first::( + "SELECT value FROM torrent_aggregate_metrics WHERE metric_name = :metric_name", + params! { "metric_name" => metric_name }, + ); + + let persistent_torrent = query?; + + Ok(persistent_torrent) + } + + fn save_torrent_aggregate_metric(&self, metric_name: &str, completed: PersistentTorrent) -> Result<(), Error> { + const COMMAND : &str = "INSERT INTO torrent_aggregate_metrics (metric_name, value) VALUES (:metric_name, :completed) ON DUPLICATE KEY UPDATE value = VALUES(value)"; + + let mut conn = self.pool.get().map_err(|e| (e, DRIVER))?; + + Ok(conn.exec_drop(COMMAND, params! { metric_name, completed })?) + } } impl Database for Mysql { @@ -66,6 +87,14 @@ impl Database for Mysql { );" .to_string(); + let create_torrent_aggregate_metrics_table = " + CREATE TABLE IF NOT EXISTS torrent_aggregate_metrics ( + id integer PRIMARY KEY AUTO_INCREMENT, + metric_name VARCHAR(50) NOT NULL UNIQUE, + value INTEGER DEFAULT 0 NOT NULL + );" + .to_string(); + let create_keys_table = format!( " CREATE TABLE IF NOT EXISTS `keys` ( @@ -82,6 +111,8 @@ impl Database for Mysql { conn.query_drop(&create_torrents_table) .expect("Could not create torrents table."); + conn.query_drop(&create_torrent_aggregate_metrics_table) + .expect("Could not create create_torrent_aggregate_metrics_table table."); conn.query_drop(&create_keys_table).expect("Could not create keys table."); conn.query_drop(&create_whitelist_table) .expect("Could not create whitelist table."); @@ -168,6 +199,30 @@ impl Database for Mysql { Ok(()) } + /// Refer to [`databases::Database::load_global_number_of_downloads`](crate::core::databases::Database::load_global_number_of_downloads). + fn load_global_number_of_downloads(&self) -> Result, Error> { + self.load_torrent_aggregate_metric(TORRENTS_DOWNLOADS_TOTAL) + } + + /// Refer to [`databases::Database::save_global_number_of_downloads`](crate::core::databases::Database::save_global_number_of_downloads). + fn save_global_number_of_downloads(&self, downloaded: PersistentTorrent) -> Result<(), Error> { + self.save_torrent_aggregate_metric(TORRENTS_DOWNLOADS_TOTAL, downloaded) + } + + /// Refer to [`databases::Database::increase_global_number_of_downloads`](crate::core::databases::Database::increase_global_number_of_downloads). + fn increase_global_number_of_downloads(&self) -> Result<(), Error> { + let mut conn = self.pool.get().map_err(|e| (e, DRIVER))?; + + let metric_name = TORRENTS_DOWNLOADS_TOTAL; + + conn.exec_drop( + "UPDATE torrent_aggregate_metrics SET value = value + 1 WHERE metric_name = :metric_name", + params! { metric_name }, + )?; + + Ok(()) + } + /// Refer to [`databases::Database::load_keys`](crate::core::databases::Database::load_keys). fn load_keys(&self) -> Result, Error> { let mut conn = self.pool.get().map_err(|e| (e, DRIVER))?; diff --git a/packages/tracker-core/src/databases/driver/sqlite.rs b/packages/tracker-core/src/databases/driver/sqlite.rs index d36f24f8b..91e969233 100644 --- a/packages/tracker-core/src/databases/driver/sqlite.rs +++ b/packages/tracker-core/src/databases/driver/sqlite.rs @@ -15,7 +15,7 @@ use r2d2_sqlite::rusqlite::types::Null; use r2d2_sqlite::SqliteConnectionManager; use torrust_tracker_primitives::{DurationSinceUnixEpoch, PersistentTorrent, PersistentTorrents}; -use super::{Database, Driver, Error}; +use super::{Database, Driver, Error, TORRENTS_DOWNLOADS_TOTAL}; use crate::authentication::{self, Key}; const DRIVER: Driver = Driver::Sqlite3; @@ -49,6 +49,39 @@ impl Sqlite { Ok(Self { pool }) } + + fn load_torrent_aggregate_metric(&self, metric_name: &str) -> Result, Error> { + let conn = self.pool.get().map_err(|e| (e, DRIVER))?; + + let mut stmt = conn.prepare("SELECT value FROM torrent_aggregate_metrics WHERE metric_name = ?")?; + + let mut rows = stmt.query([metric_name])?; + + let persistent_torrent = rows.next()?; + + Ok(persistent_torrent.map(|f| { + let value: i64 = f.get(0).unwrap(); + u32::try_from(value).unwrap() + })) + } + + fn save_torrent_aggregate_metric(&self, metric_name: &str, completed: PersistentTorrent) -> Result<(), Error> { + let conn = self.pool.get().map_err(|e| (e, DRIVER))?; + + let insert = conn.execute( + "INSERT INTO torrent_aggregate_metrics (metric_name, value) VALUES (?1, ?2) ON CONFLICT(metric_name) DO UPDATE SET value = ?2", + [metric_name.to_string(), completed.to_string()], + )?; + + if insert == 0 { + Err(Error::InsertFailed { + location: Location::caller(), + driver: DRIVER, + }) + } else { + Ok(()) + } + } } impl Database for Sqlite { @@ -69,6 +102,14 @@ impl Database for Sqlite { );" .to_string(); + let create_torrent_aggregate_metrics_table = " + CREATE TABLE IF NOT EXISTS torrent_aggregate_metrics ( + id INTEGER PRIMARY KEY AUTOINCREMENT, + metric_name TEXT NOT NULL UNIQUE, + value INTEGER DEFAULT 0 NOT NULL + );" + .to_string(); + let create_keys_table = " CREATE TABLE IF NOT EXISTS keys ( id INTEGER PRIMARY KEY AUTOINCREMENT, @@ -82,6 +123,7 @@ impl Database for Sqlite { conn.execute(&create_whitelist_table, [])?; conn.execute(&create_keys_table, [])?; conn.execute(&create_torrents_table, [])?; + conn.execute(&create_torrent_aggregate_metrics_table, [])?; Ok(()) } @@ -172,6 +214,30 @@ impl Database for Sqlite { Ok(()) } + /// Refer to [`databases::Database::load_global_number_of_downloads`](crate::core::databases::Database::load_global_number_of_downloads). + fn load_global_number_of_downloads(&self) -> Result, Error> { + self.load_torrent_aggregate_metric(TORRENTS_DOWNLOADS_TOTAL) + } + + /// Refer to [`databases::Database::save_global_number_of_downloads`](crate::core::databases::Database::save_global_number_of_downloads). + fn save_global_number_of_downloads(&self, downloaded: PersistentTorrent) -> Result<(), Error> { + self.save_torrent_aggregate_metric(TORRENTS_DOWNLOADS_TOTAL, downloaded) + } + + /// Refer to [`databases::Database::increase_global_number_of_downloads`](crate::core::databases::Database::increase_global_number_of_downloads). + fn increase_global_number_of_downloads(&self) -> Result<(), Error> { + let conn = self.pool.get().map_err(|e| (e, DRIVER))?; + + let metric_name = TORRENTS_DOWNLOADS_TOTAL; + + let _ = conn.execute( + "UPDATE torrent_aggregate_metrics SET value = value + 1 WHERE metric_name = ?", + [metric_name], + )?; + + Ok(()) + } + /// Refer to [`databases::Database::load_keys`](crate::core::databases::Database::load_keys). fn load_keys(&self) -> Result, Error> { let conn = self.pool.get().map_err(|e| (e, DRIVER))?; diff --git a/packages/tracker-core/src/databases/mod.rs b/packages/tracker-core/src/databases/mod.rs index 2703ab8bf..a9d6b2a22 100644 --- a/packages/tracker-core/src/databases/mod.rs +++ b/packages/tracker-core/src/databases/mod.rs @@ -131,16 +131,48 @@ pub trait Database: Sync + Send { /// It does not create a new entry if the torrent is not found and it does /// not return an error. /// + /// # Context: Torrent Metrics + /// + /// # Arguments + /// + /// * `info_hash` - A reference to the torrent's info hash. + /// + /// # Errors + /// + /// Returns an [`Error`] if the query failed. + fn increase_number_of_downloads(&self, info_hash: &InfoHash) -> Result<(), Error>; + + /// Loads the total number of downloads for all torrents from the database. + /// + /// # Context: Torrent Metrics + /// + /// # Errors + /// + /// Returns an [`Error`] if the total downloads cannot be loaded. + fn load_global_number_of_downloads(&self) -> Result, Error>; + + /// Saves the total number of downloads for all torrents into the database. + /// + /// # Context: Torrent Metrics + /// /// # Arguments /// /// * `info_hash` - A reference to the torrent's info hash. + /// * `downloaded` - The number of times the torrent has been downloaded. + /// + /// # Errors + /// + /// Returns an [`Error`] if the total downloads cannot be saved. + fn save_global_number_of_downloads(&self, downloaded: PersistentTorrent) -> Result<(), Error>; + + /// Increases the total number of downloads for all torrents. /// /// # Context: Torrent Metrics /// /// # Errors /// /// Returns an [`Error`] if the query failed. - fn increase_number_of_downloads(&self, info_hash: &InfoHash) -> Result<(), Error>; + fn increase_global_number_of_downloads(&self) -> Result<(), Error>; // Whitelist From 9301e587ab8f4d565c19418ebb46a4340a1fcc9f Mon Sep 17 00:00:00 2001 From: Jose Celano Date: Tue, 27 May 2025 10:57:20 +0100 Subject: [PATCH 638/802] feat: [#1539] save global downloads counter in DB The total number of dowloads (for all torrents) is saved in the DB, but not loaded yet. todo: load the initial value when the tracker starts. --- .../src/statistics/event/handler.rs | 34 +++++++++++-------- .../src/torrent/repository/persisted.rs | 16 +++++++++ 2 files changed, 36 insertions(+), 14 deletions(-) diff --git a/packages/tracker-core/src/statistics/event/handler.rs b/packages/tracker-core/src/statistics/event/handler.rs index ac6d0639e..e394641b8 100644 --- a/packages/tracker-core/src/statistics/event/handler.rs +++ b/packages/tracker-core/src/statistics/event/handler.rs @@ -41,17 +41,7 @@ pub async fn handle_event( Event::PeerDownloadCompleted { info_hash, peer } => { tracing::debug!(info_hash = ?info_hash, peer = ?peer, "Peer download completed", ); - // Increment the number of downloads for the torrent - match db_torrent_repository.increase_number_of_downloads(&info_hash) { - Ok(()) => { - tracing::debug!(info_hash = ?info_hash, "Number of downloads increased"); - } - Err(err) => { - tracing::error!(info_hash = ?info_hash, error = ?err, "Failed to increase number of downloads"); - } - } - - // Increment the number of downloads for all the torrents + // Increment the number of downloads for all the torrents in memory let _unused = stats_repository .increment_counter( &metric_name!(TRACKER_CORE_PERSISTENT_TORRENTS_DOWNLOADS_TOTAL), @@ -60,9 +50,25 @@ pub async fn handle_event( ) .await; - // todo: - // - Persist the metric into the database. - // - Load the metric from the database. + // Increment the number of downloads for the torrent in the database + match db_torrent_repository.increase_number_of_downloads(&info_hash) { + Ok(()) => { + tracing::debug!(info_hash = ?info_hash, "Number of torrent downloads increased"); + } + Err(err) => { + tracing::error!(info_hash = ?info_hash, error = ?err, "Failed to increase number of downloads for the torrent"); + } + } + + // Increment the global number of downloads (for all torrents) in the database + match db_torrent_repository.increase_global_number_of_downloads() { + Ok(()) => { + tracing::debug!("Global number of downloads increased"); + } + Err(err) => { + tracing::error!(error = ?err, "Failed to increase global number of downloads"); + } + } } } } diff --git a/packages/tracker-core/src/torrent/repository/persisted.rs b/packages/tracker-core/src/torrent/repository/persisted.rs index dec571baf..62e3244ba 100644 --- a/packages/tracker-core/src/torrent/repository/persisted.rs +++ b/packages/tracker-core/src/torrent/repository/persisted.rs @@ -67,6 +67,22 @@ impl DatabasePersistentTorrentRepository { } } + /// Increases the global number of downloads for all torrent. + /// + /// If the metric is not found, it creates it. + /// + /// # Errors + /// + /// Returns an [`Error`] if the database operation fails. + pub(crate) fn increase_global_number_of_downloads(&self) -> Result<(), Error> { + let torrent = self.database.load_global_number_of_downloads()?; + + match torrent { + Some(_number_of_downloads) => self.database.increase_global_number_of_downloads(), + None => self.database.save_global_number_of_downloads(1), + } + } + /// Loads all persistent torrent metrics from the database. /// /// This function retrieves the torrent metrics (e.g., download counts) from the persistent store From c07f3667572b9c70c72f281aabe0a2f13cebcdc3 Mon Sep 17 00:00:00 2001 From: Jose Celano Date: Tue, 27 May 2025 12:05:20 +0100 Subject: [PATCH 639/802] feat: [#1539] load global downloads counter from DB When the tracker starts. --- packages/metrics/src/counter.rs | 11 ++++ packages/metrics/src/metric/mod.rs | 4 ++ packages/metrics/src/metric_collection.rs | 43 +++++++++++++- packages/metrics/src/sample.rs | 5 ++ packages/metrics/src/sample_collection.rs | 9 +++ .../tracker-core/src/statistics/metrics.rs | 13 +++++ packages/tracker-core/src/statistics/mod.rs | 1 + .../src/statistics/persisted_metrics.rs | 57 +++++++++++++++++++ .../tracker-core/src/statistics/repository.rs | 25 ++++++++ .../src/torrent/repository/persisted.rs | 45 +++++++++------ packages/tracker-core/tests/integration.rs | 2 +- src/app.rs | 17 ++++++ 12 files changed, 214 insertions(+), 18 deletions(-) create mode 100644 packages/tracker-core/src/statistics/persisted_metrics.rs diff --git a/packages/metrics/src/counter.rs b/packages/metrics/src/counter.rs index 3a816c75b..ac6d21836 100644 --- a/packages/metrics/src/counter.rs +++ b/packages/metrics/src/counter.rs @@ -20,6 +20,10 @@ impl Counter { pub fn increment(&mut self, value: u64) { self.0 += value; } + + pub fn absolute(&mut self, value: u64) { + self.0 = value; + } } impl From for Counter { @@ -73,6 +77,13 @@ mod tests { assert_eq!(counter.value(), 3); } + #[test] + fn it_could_set_to_an_absolute_value() { + let mut counter = Counter::new(0); + counter.absolute(1); + assert_eq!(counter.value(), 1); + } + #[test] fn it_serializes_to_prometheus() { let counter = Counter::new(42); diff --git a/packages/metrics/src/metric/mod.rs b/packages/metrics/src/metric/mod.rs index 05779f09f..2118637b8 100644 --- a/packages/metrics/src/metric/mod.rs +++ b/packages/metrics/src/metric/mod.rs @@ -55,6 +55,10 @@ impl Metric { pub fn increment(&mut self, label_set: &LabelSet, time: DurationSinceUnixEpoch) { self.sample_collection.increment(label_set, time); } + + pub fn absolute(&mut self, label_set: &LabelSet, value: u64, time: DurationSinceUnixEpoch) { + self.sample_collection.absolute(label_set, value, time); + } } impl Metric { diff --git a/packages/metrics/src/metric_collection.rs b/packages/metrics/src/metric_collection.rs index 83b08f178..824397000 100644 --- a/packages/metrics/src/metric_collection.rs +++ b/packages/metrics/src/metric_collection.rs @@ -72,6 +72,8 @@ impl MetricCollection { self.counters.get_value(name, label_set) } + /// Increases the counter for the given metric name and labels. + /// /// # Errors /// /// Return an error if a metrics of a different type with the same name @@ -93,6 +95,30 @@ impl MetricCollection { Ok(()) } + /// Sets the counter for the given metric name and labels. + /// + /// # Errors + /// + /// Return an error if a metrics of a different type with the same name + /// already exists. + pub fn set_counter( + &mut self, + name: &MetricName, + label_set: &LabelSet, + value: u64, + time: DurationSinceUnixEpoch, + ) -> Result<(), Error> { + if self.gauges.metrics.contains_key(name) { + return Err(Error::MetricNameCollisionAdding { + metric_name: name.clone(), + }); + } + + self.counters.absolute(name, label_set, value, time); + + Ok(()) + } + pub fn ensure_counter_exists(&mut self, name: &MetricName) { self.counters.ensure_metric_exists(name); } @@ -361,7 +387,7 @@ impl MetricKindCollection { /// /// # Panics /// - /// Panics if the metric does not exist and it could not be created. + /// Panics if the metric does not exist. pub fn increment(&mut self, name: &MetricName, label_set: &LabelSet, time: DurationSinceUnixEpoch) { self.ensure_metric_exists(name); @@ -370,6 +396,21 @@ impl MetricKindCollection { metric.increment(label_set, time); } + /// Sets the counter to an absolute value for the given metric name and labels. + /// + /// If the metric name does not exist, it will be created. + /// + /// # Panics + /// + /// Panics if the metric does not exist. + pub fn absolute(&mut self, name: &MetricName, label_set: &LabelSet, value: u64, time: DurationSinceUnixEpoch) { + self.ensure_metric_exists(name); + + let metric = self.metrics.get_mut(name).expect("Counter metric should exist"); + + metric.absolute(label_set, value, time); + } + #[must_use] pub fn get_value(&self, name: &MetricName, label_set: &LabelSet) -> Option { self.metrics diff --git a/packages/metrics/src/sample.rs b/packages/metrics/src/sample.rs index 4621c9906..ad4dff00e 100644 --- a/packages/metrics/src/sample.rs +++ b/packages/metrics/src/sample.rs @@ -122,6 +122,11 @@ impl Measurement { self.value.increment(1); self.set_recorded_at(time); } + + pub fn absolute(&mut self, value: u64, time: DurationSinceUnixEpoch) { + self.value.absolute(value); + self.set_recorded_at(time); + } } impl Measurement { diff --git a/packages/metrics/src/sample_collection.rs b/packages/metrics/src/sample_collection.rs index ea6b4d4af..e815f26ec 100644 --- a/packages/metrics/src/sample_collection.rs +++ b/packages/metrics/src/sample_collection.rs @@ -79,6 +79,15 @@ impl SampleCollection { sample.increment(time); } + + pub fn absolute(&mut self, label_set: &LabelSet, value: u64, time: DurationSinceUnixEpoch) { + let sample = self + .samples + .entry(label_set.clone()) + .or_insert_with(|| Measurement::new(Counter::default(), time)); + + sample.absolute(value, time); + } } impl SampleCollection { diff --git a/packages/tracker-core/src/statistics/metrics.rs b/packages/tracker-core/src/statistics/metrics.rs index f8ab3f9d9..02cc51499 100644 --- a/packages/tracker-core/src/statistics/metrics.rs +++ b/packages/tracker-core/src/statistics/metrics.rs @@ -24,6 +24,19 @@ impl Metrics { self.metric_collection.increase_counter(metric_name, labels, now) } + /// # Errors + /// + /// Returns an error if the metric does not exist and it cannot be created. + pub fn set_counter( + &mut self, + metric_name: &MetricName, + labels: &LabelSet, + value: u64, + now: DurationSinceUnixEpoch, + ) -> Result<(), Error> { + self.metric_collection.set_counter(metric_name, labels, value, now) + } + /// # Errors /// /// Returns an error if the metric does not exist and it cannot be created. diff --git a/packages/tracker-core/src/statistics/mod.rs b/packages/tracker-core/src/statistics/mod.rs index 1cd9aac6b..89d6b79d5 100644 --- a/packages/tracker-core/src/statistics/mod.rs +++ b/packages/tracker-core/src/statistics/mod.rs @@ -1,5 +1,6 @@ pub mod event; pub mod metrics; +pub mod persisted_metrics; pub mod repository; use metrics::Metrics; diff --git a/packages/tracker-core/src/statistics/persisted_metrics.rs b/packages/tracker-core/src/statistics/persisted_metrics.rs new file mode 100644 index 000000000..4d53236a5 --- /dev/null +++ b/packages/tracker-core/src/statistics/persisted_metrics.rs @@ -0,0 +1,57 @@ +use std::sync::Arc; + +use thiserror::Error; +use torrust_tracker_metrics::label::LabelSet; +use torrust_tracker_metrics::{metric_collection, metric_name}; +use torrust_tracker_primitives::DurationSinceUnixEpoch; + +use super::repository::Repository; +use super::TRACKER_CORE_PERSISTENT_TORRENTS_DOWNLOADS_TOTAL; +use crate::databases; +use crate::torrent::repository::persisted::DatabasePersistentTorrentRepository; + +/// Loads persisted metrics from the database and sets them in the stats repository. +/// +/// # Errors +/// +/// This function will return an error if the database query fails or if the +/// metric collection fails to set the initial metric values. +pub async fn load_persisted_metrics( + stats_repository: &Arc, + db_torrent_repository: &Arc, + now: DurationSinceUnixEpoch, +) -> Result<(), Error> { + if let Some(downloads) = db_torrent_repository.load_global_number_of_downloads()? { + stats_repository + .set_counter( + &metric_name!(TRACKER_CORE_PERSISTENT_TORRENTS_DOWNLOADS_TOTAL), + &LabelSet::default(), + u64::from(downloads), + now, + ) + .await?; + } + + Ok(()) +} + +#[derive(Error, Debug, Clone)] +pub enum Error { + #[error("Database error: {err}")] + DatabaseError { err: databases::error::Error }, + + #[error("Metrics error: {err}")] + MetricsError { err: metric_collection::Error }, +} + +impl From for Error { + fn from(err: databases::error::Error) -> Self { + Self::DatabaseError { err } + } +} + +impl From for Error { + fn from(err: metric_collection::Error) -> Self { + Self::MetricsError { err } + } +} diff --git a/packages/tracker-core/src/statistics/repository.rs b/packages/tracker-core/src/statistics/repository.rs index fe1292d00..dd0ebebe7 100644 --- a/packages/tracker-core/src/statistics/repository.rs +++ b/packages/tracker-core/src/statistics/repository.rs @@ -57,6 +57,31 @@ impl Repository { result } + /// # Errors + /// + /// This function will return an error if the metric collection fails to + /// increment the counter. + pub async fn set_counter( + &self, + metric_name: &MetricName, + labels: &LabelSet, + value: u64, + now: DurationSinceUnixEpoch, + ) -> Result<(), Error> { + let mut stats_lock = self.stats.write().await; + + let result = stats_lock.set_counter(metric_name, labels, value, now); + + drop(stats_lock); + + match result { + Ok(()) => {} + Err(ref err) => tracing::error!("Failed to set the counter: {}", err), + } + + result + } + /// # Errors /// /// This function will return an error if the metric collection fails to diff --git a/packages/tracker-core/src/torrent/repository/persisted.rs b/packages/tracker-core/src/torrent/repository/persisted.rs index 62e3244ba..1818065fd 100644 --- a/packages/tracker-core/src/torrent/repository/persisted.rs +++ b/packages/tracker-core/src/torrent/repository/persisted.rs @@ -47,6 +47,8 @@ impl DatabasePersistentTorrentRepository { } } + // Single Torrent Metrics + /// Increases the number of downloads for a given torrent. /// /// If the torrent is not found, it creates a new entry. @@ -67,22 +69,6 @@ impl DatabasePersistentTorrentRepository { } } - /// Increases the global number of downloads for all torrent. - /// - /// If the metric is not found, it creates it. - /// - /// # Errors - /// - /// Returns an [`Error`] if the database operation fails. - pub(crate) fn increase_global_number_of_downloads(&self) -> Result<(), Error> { - let torrent = self.database.load_global_number_of_downloads()?; - - match torrent { - Some(_number_of_downloads) => self.database.increase_global_number_of_downloads(), - None => self.database.save_global_number_of_downloads(1), - } - } - /// Loads all persistent torrent metrics from the database. /// /// This function retrieves the torrent metrics (e.g., download counts) from the persistent store @@ -123,6 +109,33 @@ impl DatabasePersistentTorrentRepository { pub(crate) fn save(&self, info_hash: &InfoHash, downloaded: u32) -> Result<(), Error> { self.database.save_persistent_torrent(info_hash, downloaded) } + + // Aggregate Metrics + + /// Increases the global number of downloads for all torrent. + /// + /// If the metric is not found, it creates it. + /// + /// # Errors + /// + /// Returns an [`Error`] if the database operation fails. + pub(crate) fn increase_global_number_of_downloads(&self) -> Result<(), Error> { + let torrent = self.database.load_global_number_of_downloads()?; + + match torrent { + Some(_number_of_downloads) => self.database.increase_global_number_of_downloads(), + None => self.database.save_global_number_of_downloads(1), + } + } + + /// Loads the global number of downloads for all torrents from the database. + /// + /// # Errors + /// + /// Returns an [`Error`] if the underlying database query fails. + pub(crate) fn load_global_number_of_downloads(&self) -> Result, Error> { + self.database.load_global_number_of_downloads() + } } #[cfg(test)] diff --git a/packages/tracker-core/tests/integration.rs b/packages/tracker-core/tests/integration.rs index d24acf67b..986bdaaf3 100644 --- a/packages/tracker-core/tests/integration.rs +++ b/packages/tracker-core/tests/integration.rs @@ -58,7 +58,7 @@ async fn it_should_handle_the_scrape_request() { } #[tokio::test] -async fn it_should_persist_the_number_of_completed_peers_for_all_torrents_into_the_database() { +async fn it_should_persist_the_number_of_completed_peers_for_each_torrent_into_the_database() { let mut core_config = ephemeral_configuration(); core_config.tracker_policy.persistent_torrent_completed_stat = true; diff --git a/src/app.rs b/src/app.rs index 5037ad761..571e034f5 100644 --- a/src/app.rs +++ b/src/app.rs @@ -23,6 +23,7 @@ //! - Tracker REST API: the tracker API can be enabled/disabled. use std::sync::Arc; +use torrust_tracker_clock::clock::Time; use torrust_tracker_configuration::{Configuration, HttpTracker, UdpTracker}; use tracing::instrument; @@ -32,6 +33,7 @@ use crate::bootstrap::jobs::{ }; use crate::bootstrap::{self}; use crate::container::AppContainer; +use crate::CurrentClock; pub async fn run() -> (Arc, JobManager) { let (config, app_container) = bootstrap::app::setup(); @@ -63,6 +65,8 @@ pub async fn start(config: &Configuration, app_container: &Arc) -> async fn load_data_from_database(config: &Configuration, app_container: &Arc) { load_peer_keys(config, app_container).await; load_whitelisted_torrents(config, app_container).await; + load_torrent_metrics(config, app_container).await; + // todo: disabled because of performance issues. // The tracker demo has a lot of torrents and loading them all at once is not // efficient. We also load them on demand but the total number of downloads @@ -134,6 +138,19 @@ fn load_torrents_from_database(config: &Configuration, app_container: &Arc) { + if config.core.tracker_policy.persistent_torrent_completed_stat { + bittorrent_tracker_core::statistics::persisted_metrics::load_persisted_metrics( + &app_container.tracker_core_container.stats_repository, + &app_container.tracker_core_container.db_torrent_repository, + CurrentClock::now(), + ) + .await + .expect("Could not load persisted metrics from database."); + } +} + fn start_torrent_repository_event_listener( config: &Configuration, app_container: &Arc, From 2c9311bf2240cbc56ccbb3ec5dfee954d666a13e Mon Sep 17 00:00:00 2001 From: Jose Celano Date: Tue, 27 May 2025 12:54:35 +0100 Subject: [PATCH 640/802] test: [#1539] add integration test for persisted downloads counter --- .../tracker-core/tests/common/test_env.rs | 31 +++++++++++++++++++ packages/tracker-core/tests/integration.rs | 25 +++++++++++++++ 2 files changed, 56 insertions(+) diff --git a/packages/tracker-core/tests/common/test_env.rs b/packages/tracker-core/tests/common/test_env.rs index 0be8bd4c6..4e14e9bd8 100644 --- a/packages/tracker-core/tests/common/test_env.rs +++ b/packages/tracker-core/tests/common/test_env.rs @@ -5,11 +5,15 @@ use aquatic_udp_protocol::AnnounceEvent; use bittorrent_primitives::info_hash::InfoHash; use bittorrent_tracker_core::announce_handler::PeersWanted; use bittorrent_tracker_core::container::TrackerCoreContainer; +use bittorrent_tracker_core::statistics::persisted_metrics::load_persisted_metrics; use tokio::task::yield_now; use torrust_tracker_configuration::Core; +use torrust_tracker_metrics::label::LabelSet; +use torrust_tracker_metrics::metric::MetricName; use torrust_tracker_primitives::core::{AnnounceData, ScrapeData}; use torrust_tracker_primitives::peer::Peer; use torrust_tracker_primitives::swarm_metadata::SwarmMetadata; +use torrust_tracker_primitives::DurationSinceUnixEpoch; use torrust_tracker_torrent_repository::container::TorrentRepositoryContainer; pub struct TestEnv { @@ -45,6 +49,22 @@ impl TestEnv { } pub async fn start(&self) { + let now = DurationSinceUnixEpoch::from_secs(0); + self.load_persisted_metrics(now).await; + self.run_jobs().await; + } + + async fn load_persisted_metrics(&self, now: DurationSinceUnixEpoch) { + load_persisted_metrics( + &self.tracker_core_container.stats_repository, + &self.tracker_core_container.db_torrent_repository, + now, + ) + .await + .unwrap(); + } + + async fn run_jobs(&self) { let mut jobs = vec![]; let job = torrust_tracker_torrent_repository::statistics::event::listener::run_event_listener( @@ -135,4 +155,15 @@ impl TestEnv { pub async fn remove_swarm(&self, info_hash: &InfoHash) { self.torrent_repository_container.swarms.remove(info_hash).await.unwrap(); } + + pub async fn get_counter_value(&self, metric_name: &str) -> u64 { + self.tracker_core_container + .stats_repository + .get_metrics() + .await + .metric_collection + .get_counter_value(&MetricName::new(metric_name), &LabelSet::default()) + .unwrap() + .value() + } } diff --git a/packages/tracker-core/tests/integration.rs b/packages/tracker-core/tests/integration.rs index 986bdaaf3..b170aaebd 100644 --- a/packages/tracker-core/tests/integration.rs +++ b/packages/tracker-core/tests/integration.rs @@ -86,3 +86,28 @@ async fn it_should_persist_the_number_of_completed_peers_for_each_torrent_into_t assert!(test_env.get_swarm_metadata(&info_hash).await.unwrap().downloads() == 1); } + +#[tokio::test] +async fn it_should_persist_the_global_number_of_completed_peers_into_the_database() { + let mut core_config = ephemeral_configuration(); + + core_config.tracker_policy.persistent_torrent_completed_stat = true; + + let mut test_env = TestEnv::started(core_config.clone()).await; + + test_env + .increase_number_of_downloads(sample_peer(), &remote_client_ip(), &sample_info_hash()) + .await; + + // We run a new instance of the test environment to simulate a restart. + // The new instance uses the same underlying database. + + let new_test_env = TestEnv::started(core_config).await; + + assert_eq!( + new_test_env + .get_counter_value("tracker_core_persistent_torrents_downloads_total") + .await, + 1 + ); +} From 4febda494e036f772e2c473a784acf0d254d026c Mon Sep 17 00:00:00 2001 From: Jose Celano Date: Tue, 27 May 2025 13:17:57 +0100 Subject: [PATCH 641/802] fic: [#1539] persisten metrics should be enabled by config --- .../src/statistics/event/handler.rs | 33 ++++++++++--------- .../src/statistics/event/listener.rs | 17 ++++++++-- .../tracker-core/tests/common/test_env.rs | 4 +++ src/bootstrap/jobs/tracker_core.rs | 5 +++ 4 files changed, 42 insertions(+), 17 deletions(-) diff --git a/packages/tracker-core/src/statistics/event/handler.rs b/packages/tracker-core/src/statistics/event/handler.rs index e394641b8..4002053e2 100644 --- a/packages/tracker-core/src/statistics/event/handler.rs +++ b/packages/tracker-core/src/statistics/event/handler.rs @@ -13,6 +13,7 @@ pub async fn handle_event( event: Event, stats_repository: &Arc, db_torrent_repository: &Arc, + persistent_torrent_completed_stat: bool, now: DurationSinceUnixEpoch, ) { match event { @@ -50,23 +51,25 @@ pub async fn handle_event( ) .await; - // Increment the number of downloads for the torrent in the database - match db_torrent_repository.increase_number_of_downloads(&info_hash) { - Ok(()) => { - tracing::debug!(info_hash = ?info_hash, "Number of torrent downloads increased"); + if persistent_torrent_completed_stat { + // Increment the number of downloads for the torrent in the database + match db_torrent_repository.increase_number_of_downloads(&info_hash) { + Ok(()) => { + tracing::debug!(info_hash = ?info_hash, "Number of torrent downloads increased"); + } + Err(err) => { + tracing::error!(info_hash = ?info_hash, error = ?err, "Failed to increase number of downloads for the torrent"); + } } - Err(err) => { - tracing::error!(info_hash = ?info_hash, error = ?err, "Failed to increase number of downloads for the torrent"); - } - } - // Increment the global number of downloads (for all torrents) in the database - match db_torrent_repository.increase_global_number_of_downloads() { - Ok(()) => { - tracing::debug!("Global number of downloads increased"); - } - Err(err) => { - tracing::error!(error = ?err, "Failed to increase global number of downloads"); + // Increment the global number of downloads (for all torrents) in the database + match db_torrent_repository.increase_global_number_of_downloads() { + Ok(()) => { + tracing::debug!("Global number of downloads increased"); + } + Err(err) => { + tracing::error!(error = ?err, "Failed to increase global number of downloads"); + } } } } diff --git a/packages/tracker-core/src/statistics/event/listener.rs b/packages/tracker-core/src/statistics/event/listener.rs index f85b2b7a0..cf6d35d6e 100644 --- a/packages/tracker-core/src/statistics/event/listener.rs +++ b/packages/tracker-core/src/statistics/event/listener.rs @@ -15,6 +15,7 @@ pub fn run_event_listener( receiver: Receiver, repository: &Arc, db_torrent_repository: &Arc, + persistent_torrent_completed_stat: bool, ) -> JoinHandle<()> { let stats_repository = repository.clone(); let db_torrent_repository: Arc = db_torrent_repository.clone(); @@ -22,7 +23,13 @@ pub fn run_event_listener( tracing::info!(target: TRACKER_CORE_LOG_TARGET, "Starting torrent repository event listener"); tokio::spawn(async move { - dispatch_events(receiver, stats_repository, db_torrent_repository).await; + dispatch_events( + receiver, + stats_repository, + db_torrent_repository, + persistent_torrent_completed_stat, + ) + .await; tracing::info!(target: TRACKER_CORE_LOG_TARGET, "Torrent repository listener finished"); }) @@ -32,6 +39,7 @@ async fn dispatch_events( mut receiver: Receiver, stats_repository: Arc, db_torrent_repository: Arc, + persistent_torrent_completed_stat: bool, ) { let shutdown_signal = tokio::signal::ctrl_c(); @@ -48,7 +56,12 @@ async fn dispatch_events( result = receiver.recv() => { match result { - Ok(event) => handle_event(event, &stats_repository, &db_torrent_repository, CurrentClock::now()).await, + Ok(event) => handle_event( + event, + &stats_repository, + &db_torrent_repository, + persistent_torrent_completed_stat, + CurrentClock::now()).await, Err(e) => { match e { RecvError::Closed => { diff --git a/packages/tracker-core/tests/common/test_env.rs b/packages/tracker-core/tests/common/test_env.rs index 4e14e9bd8..11a4d400a 100644 --- a/packages/tracker-core/tests/common/test_env.rs +++ b/packages/tracker-core/tests/common/test_env.rs @@ -78,6 +78,10 @@ impl TestEnv { self.torrent_repository_container.event_bus.receiver(), &self.tracker_core_container.stats_repository, &self.tracker_core_container.db_torrent_repository, + self.tracker_core_container + .core_config + .tracker_policy + .persistent_torrent_completed_stat, ); jobs.push(job); diff --git a/src/bootstrap/jobs/tracker_core.rs b/src/bootstrap/jobs/tracker_core.rs index 37c53b9e4..161e69aad 100644 --- a/src/bootstrap/jobs/tracker_core.rs +++ b/src/bootstrap/jobs/tracker_core.rs @@ -11,6 +11,11 @@ pub fn start_event_listener(config: &Configuration, app_container: &Arc Date: Tue, 27 May 2025 14:36:23 +0100 Subject: [PATCH 642/802] refactor: [#1541] rename DatabasePersistentTorrentRepository to DatabaseDownloadsMetricRepository --- .../src/v1/handlers/announce.rs | 4 ++-- packages/http-tracker-core/benches/helpers/util.rs | 4 ++-- packages/http-tracker-core/src/services/announce.rs | 4 ++-- packages/http-tracker-core/src/services/scrape.rs | 4 ++-- packages/tracker-core/src/announce_handler.rs | 6 +++--- packages/tracker-core/src/container.rs | 6 +++--- .../tracker-core/src/statistics/event/handler.rs | 4 ++-- .../tracker-core/src/statistics/event/listener.rs | 8 ++++---- .../tracker-core/src/statistics/persisted_metrics.rs | 4 ++-- packages/tracker-core/src/test_helpers.rs | 4 ++-- packages/tracker-core/src/torrent/manager.rs | 12 ++++++------ .../tracker-core/src/torrent/repository/persisted.rs | 12 ++++++------ packages/udp-tracker-server/src/handlers/announce.rs | 4 ++-- packages/udp-tracker-server/src/handlers/mod.rs | 4 ++-- 14 files changed, 40 insertions(+), 40 deletions(-) diff --git a/packages/axum-http-tracker-server/src/v1/handlers/announce.rs b/packages/axum-http-tracker-server/src/v1/handlers/announce.rs index 7d7a0b386..c195b5a1f 100644 --- a/packages/axum-http-tracker-server/src/v1/handlers/announce.rs +++ b/packages/axum-http-tracker-server/src/v1/handlers/announce.rs @@ -120,7 +120,7 @@ mod tests { use bittorrent_tracker_core::authentication::service::AuthenticationService; use bittorrent_tracker_core::databases::setup::initialize_database; use bittorrent_tracker_core::torrent::repository::in_memory::InMemoryTorrentRepository; - use bittorrent_tracker_core::torrent::repository::persisted::DatabasePersistentTorrentRepository; + use bittorrent_tracker_core::torrent::repository::persisted::DatabaseDownloadsMetricRepository; use bittorrent_tracker_core::whitelist::authorization::WhitelistAuthorization; use bittorrent_tracker_core::whitelist::repository::in_memory::InMemoryWhitelist; use torrust_tracker_configuration::Configuration; @@ -156,7 +156,7 @@ mod tests { let in_memory_key_repository = Arc::new(InMemoryKeyRepository::default()); let authentication_service = Arc::new(AuthenticationService::new(&config.core, &in_memory_key_repository)); let in_memory_torrent_repository = Arc::new(InMemoryTorrentRepository::default()); - let db_torrent_repository = Arc::new(DatabasePersistentTorrentRepository::new(&database)); + let db_torrent_repository = Arc::new(DatabaseDownloadsMetricRepository::new(&database)); let announce_handler = Arc::new(AnnounceHandler::new( &config.core, &whitelist_authorization, diff --git a/packages/http-tracker-core/benches/helpers/util.rs b/packages/http-tracker-core/benches/helpers/util.rs index cfb3f745f..bf870b39c 100644 --- a/packages/http-tracker-core/benches/helpers/util.rs +++ b/packages/http-tracker-core/benches/helpers/util.rs @@ -15,7 +15,7 @@ use bittorrent_tracker_core::authentication::key::repository::in_memory::InMemor use bittorrent_tracker_core::authentication::service::AuthenticationService; use bittorrent_tracker_core::databases::setup::initialize_database; use bittorrent_tracker_core::torrent::repository::in_memory::InMemoryTorrentRepository; -use bittorrent_tracker_core::torrent::repository::persisted::DatabasePersistentTorrentRepository; +use bittorrent_tracker_core::torrent::repository::persisted::DatabaseDownloadsMetricRepository; use bittorrent_tracker_core::whitelist::authorization::WhitelistAuthorization; use bittorrent_tracker_core::whitelist::repository::in_memory::InMemoryWhitelist; use futures::future::BoxFuture; @@ -45,7 +45,7 @@ pub fn initialize_core_tracker_services_with_config(config: &Configuration) -> ( let core_config = Arc::new(config.core.clone()); let database = initialize_database(&config.core); let in_memory_torrent_repository = Arc::new(InMemoryTorrentRepository::default()); - let db_torrent_repository = Arc::new(DatabasePersistentTorrentRepository::new(&database)); + let db_torrent_repository = Arc::new(DatabaseDownloadsMetricRepository::new(&database)); let in_memory_whitelist = Arc::new(InMemoryWhitelist::default()); let whitelist_authorization = Arc::new(WhitelistAuthorization::new(&config.core, &in_memory_whitelist.clone())); let in_memory_key_repository = Arc::new(InMemoryKeyRepository::default()); diff --git a/packages/http-tracker-core/src/services/announce.rs b/packages/http-tracker-core/src/services/announce.rs index 0ad5ed143..36dd58193 100644 --- a/packages/http-tracker-core/src/services/announce.rs +++ b/packages/http-tracker-core/src/services/announce.rs @@ -213,7 +213,7 @@ mod tests { use bittorrent_tracker_core::authentication::service::AuthenticationService; use bittorrent_tracker_core::databases::setup::initialize_database; use bittorrent_tracker_core::torrent::repository::in_memory::InMemoryTorrentRepository; - use bittorrent_tracker_core::torrent::repository::persisted::DatabasePersistentTorrentRepository; + use bittorrent_tracker_core::torrent::repository::persisted::DatabaseDownloadsMetricRepository; use bittorrent_tracker_core::whitelist::authorization::WhitelistAuthorization; use bittorrent_tracker_core::whitelist::repository::in_memory::InMemoryWhitelist; use torrust_tracker_configuration::{Configuration, Core}; @@ -239,7 +239,7 @@ mod tests { let core_config = Arc::new(config.core.clone()); let database = initialize_database(&config.core); let in_memory_torrent_repository = Arc::new(InMemoryTorrentRepository::default()); - let db_torrent_repository = Arc::new(DatabasePersistentTorrentRepository::new(&database)); + let db_torrent_repository = Arc::new(DatabaseDownloadsMetricRepository::new(&database)); let in_memory_whitelist = Arc::new(InMemoryWhitelist::default()); let whitelist_authorization = Arc::new(WhitelistAuthorization::new(&config.core, &in_memory_whitelist.clone())); let in_memory_key_repository = Arc::new(InMemoryKeyRepository::default()); diff --git a/packages/http-tracker-core/src/services/scrape.rs b/packages/http-tracker-core/src/services/scrape.rs index f22f2f632..e98c1b2c4 100644 --- a/packages/http-tracker-core/src/services/scrape.rs +++ b/packages/http-tracker-core/src/services/scrape.rs @@ -177,7 +177,7 @@ mod tests { use bittorrent_tracker_core::databases::setup::initialize_database; use bittorrent_tracker_core::scrape_handler::ScrapeHandler; use bittorrent_tracker_core::torrent::repository::in_memory::InMemoryTorrentRepository; - use bittorrent_tracker_core::torrent::repository::persisted::DatabasePersistentTorrentRepository; + use bittorrent_tracker_core::torrent::repository::persisted::DatabaseDownloadsMetricRepository; use bittorrent_tracker_core::whitelist::authorization::WhitelistAuthorization; use bittorrent_tracker_core::whitelist::repository::in_memory::InMemoryWhitelist; use futures::future::BoxFuture; @@ -200,7 +200,7 @@ mod tests { let in_memory_whitelist = Arc::new(InMemoryWhitelist::default()); let whitelist_authorization = Arc::new(WhitelistAuthorization::new(&config.core, &in_memory_whitelist.clone())); let in_memory_torrent_repository = Arc::new(InMemoryTorrentRepository::default()); - let db_torrent_repository = Arc::new(DatabasePersistentTorrentRepository::new(&database)); + let db_torrent_repository = Arc::new(DatabaseDownloadsMetricRepository::new(&database)); let in_memory_key_repository = Arc::new(InMemoryKeyRepository::default()); let authentication_service = Arc::new(AuthenticationService::new(&config.core, &in_memory_key_repository)); diff --git a/packages/tracker-core/src/announce_handler.rs b/packages/tracker-core/src/announce_handler.rs index ffd244f2a..5c79e32bf 100644 --- a/packages/tracker-core/src/announce_handler.rs +++ b/packages/tracker-core/src/announce_handler.rs @@ -99,7 +99,7 @@ use torrust_tracker_primitives::core::AnnounceData; use torrust_tracker_primitives::peer; use super::torrent::repository::in_memory::InMemoryTorrentRepository; -use super::torrent::repository::persisted::DatabasePersistentTorrentRepository; +use super::torrent::repository::persisted::DatabaseDownloadsMetricRepository; use crate::error::AnnounceError; use crate::whitelist::authorization::WhitelistAuthorization; @@ -115,7 +115,7 @@ pub struct AnnounceHandler { in_memory_torrent_repository: Arc, /// Repository for persistent torrent data (database). - db_torrent_repository: Arc, + db_torrent_repository: Arc, } impl AnnounceHandler { @@ -125,7 +125,7 @@ impl AnnounceHandler { config: &Core, whitelist_authorization: &Arc, in_memory_torrent_repository: &Arc, - db_torrent_repository: &Arc, + db_torrent_repository: &Arc, ) -> Self { Self { whitelist_authorization: whitelist_authorization.clone(), diff --git a/packages/tracker-core/src/container.rs b/packages/tracker-core/src/container.rs index ed56fb106..8c6f360eb 100644 --- a/packages/tracker-core/src/container.rs +++ b/packages/tracker-core/src/container.rs @@ -13,7 +13,7 @@ use crate::databases::Database; use crate::scrape_handler::ScrapeHandler; use crate::torrent::manager::TorrentsManager; use crate::torrent::repository::in_memory::InMemoryTorrentRepository; -use crate::torrent::repository::persisted::DatabasePersistentTorrentRepository; +use crate::torrent::repository::persisted::DatabaseDownloadsMetricRepository; use crate::whitelist::authorization::WhitelistAuthorization; use crate::whitelist::manager::WhitelistManager; use crate::whitelist::repository::in_memory::InMemoryWhitelist; @@ -31,7 +31,7 @@ pub struct TrackerCoreContainer { pub whitelist_authorization: Arc, pub whitelist_manager: Arc, pub in_memory_torrent_repository: Arc, - pub db_torrent_repository: Arc, + pub db_torrent_repository: Arc, pub torrents_manager: Arc, pub stats_repository: Arc, } @@ -51,7 +51,7 @@ impl TrackerCoreContainer { &in_memory_key_repository.clone(), )); let in_memory_torrent_repository = Arc::new(InMemoryTorrentRepository::new(torrent_repository_container.swarms.clone())); - let db_torrent_repository = Arc::new(DatabasePersistentTorrentRepository::new(&database)); + let db_torrent_repository = Arc::new(DatabaseDownloadsMetricRepository::new(&database)); let torrents_manager = Arc::new(TorrentsManager::new( core_config, diff --git a/packages/tracker-core/src/statistics/event/handler.rs b/packages/tracker-core/src/statistics/event/handler.rs index 4002053e2..028e7bc46 100644 --- a/packages/tracker-core/src/statistics/event/handler.rs +++ b/packages/tracker-core/src/statistics/event/handler.rs @@ -7,12 +7,12 @@ use torrust_tracker_torrent_repository::event::Event; use crate::statistics::repository::Repository; use crate::statistics::TRACKER_CORE_PERSISTENT_TORRENTS_DOWNLOADS_TOTAL; -use crate::torrent::repository::persisted::DatabasePersistentTorrentRepository; +use crate::torrent::repository::persisted::DatabaseDownloadsMetricRepository; pub async fn handle_event( event: Event, stats_repository: &Arc, - db_torrent_repository: &Arc, + db_torrent_repository: &Arc, persistent_torrent_completed_stat: bool, now: DurationSinceUnixEpoch, ) { diff --git a/packages/tracker-core/src/statistics/event/listener.rs b/packages/tracker-core/src/statistics/event/listener.rs index cf6d35d6e..63c75e2f6 100644 --- a/packages/tracker-core/src/statistics/event/listener.rs +++ b/packages/tracker-core/src/statistics/event/listener.rs @@ -7,18 +7,18 @@ use torrust_tracker_torrent_repository::event::receiver::Receiver; use super::handler::handle_event; use crate::statistics::repository::Repository; -use crate::torrent::repository::persisted::DatabasePersistentTorrentRepository; +use crate::torrent::repository::persisted::DatabaseDownloadsMetricRepository; use crate::{CurrentClock, TRACKER_CORE_LOG_TARGET}; #[must_use] pub fn run_event_listener( receiver: Receiver, repository: &Arc, - db_torrent_repository: &Arc, + db_torrent_repository: &Arc, persistent_torrent_completed_stat: bool, ) -> JoinHandle<()> { let stats_repository = repository.clone(); - let db_torrent_repository: Arc = db_torrent_repository.clone(); + let db_torrent_repository: Arc = db_torrent_repository.clone(); tracing::info!(target: TRACKER_CORE_LOG_TARGET, "Starting torrent repository event listener"); @@ -38,7 +38,7 @@ pub fn run_event_listener( async fn dispatch_events( mut receiver: Receiver, stats_repository: Arc, - db_torrent_repository: Arc, + db_torrent_repository: Arc, persistent_torrent_completed_stat: bool, ) { let shutdown_signal = tokio::signal::ctrl_c(); diff --git a/packages/tracker-core/src/statistics/persisted_metrics.rs b/packages/tracker-core/src/statistics/persisted_metrics.rs index 4d53236a5..73c52884e 100644 --- a/packages/tracker-core/src/statistics/persisted_metrics.rs +++ b/packages/tracker-core/src/statistics/persisted_metrics.rs @@ -8,7 +8,7 @@ use torrust_tracker_primitives::DurationSinceUnixEpoch; use super::repository::Repository; use super::TRACKER_CORE_PERSISTENT_TORRENTS_DOWNLOADS_TOTAL; use crate::databases; -use crate::torrent::repository::persisted::DatabasePersistentTorrentRepository; +use crate::torrent::repository::persisted::DatabaseDownloadsMetricRepository; /// Loads persisted metrics from the database and sets them in the stats repository. /// @@ -18,7 +18,7 @@ use crate::torrent::repository::persisted::DatabasePersistentTorrentRepository; /// metric collection fails to set the initial metric values. pub async fn load_persisted_metrics( stats_repository: &Arc, - db_torrent_repository: &Arc, + db_torrent_repository: &Arc, now: DurationSinceUnixEpoch, ) -> Result<(), Error> { if let Some(downloads) = db_torrent_repository.load_global_number_of_downloads()? { diff --git a/packages/tracker-core/src/test_helpers.rs b/packages/tracker-core/src/test_helpers.rs index 04fe4133b..540381c75 100644 --- a/packages/tracker-core/src/test_helpers.rs +++ b/packages/tracker-core/src/test_helpers.rs @@ -20,7 +20,7 @@ pub(crate) mod tests { use crate::databases::setup::initialize_database; use crate::scrape_handler::ScrapeHandler; use crate::torrent::repository::in_memory::InMemoryTorrentRepository; - use crate::torrent::repository::persisted::DatabasePersistentTorrentRepository; + use crate::torrent::repository::persisted::DatabaseDownloadsMetricRepository; use crate::whitelist::repository::in_memory::InMemoryWhitelist; use crate::whitelist::{self}; @@ -137,7 +137,7 @@ pub(crate) mod tests { &in_memory_whitelist.clone(), )); let in_memory_torrent_repository = Arc::new(InMemoryTorrentRepository::default()); - let db_torrent_repository = Arc::new(DatabasePersistentTorrentRepository::new(&database)); + let db_torrent_repository = Arc::new(DatabaseDownloadsMetricRepository::new(&database)); let announce_handler = Arc::new(AnnounceHandler::new( &config.core, diff --git a/packages/tracker-core/src/torrent/manager.rs b/packages/tracker-core/src/torrent/manager.rs index d9997c4ad..dfcdaf38c 100644 --- a/packages/tracker-core/src/torrent/manager.rs +++ b/packages/tracker-core/src/torrent/manager.rs @@ -7,7 +7,7 @@ use torrust_tracker_configuration::Core; use torrust_tracker_primitives::DurationSinceUnixEpoch; use super::repository::in_memory::InMemoryTorrentRepository; -use super::repository::persisted::DatabasePersistentTorrentRepository; +use super::repository::persisted::DatabaseDownloadsMetricRepository; use crate::{databases, CurrentClock}; /// The `TorrentsManager` is responsible for managing torrent entries by @@ -31,7 +31,7 @@ pub struct TorrentsManager { /// The persistent torrents repository. #[allow(dead_code)] - db_torrent_repository: Arc, + db_torrent_repository: Arc, } impl TorrentsManager { @@ -52,7 +52,7 @@ impl TorrentsManager { pub fn new( config: &Core, in_memory_torrent_repository: &Arc, - db_torrent_repository: &Arc, + db_torrent_repository: &Arc, ) -> Self { Self { config: config.clone(), @@ -153,7 +153,7 @@ mod tests { use torrust_tracker_configuration::Core; use torrust_tracker_torrent_repository::Swarms; - use super::{DatabasePersistentTorrentRepository, TorrentsManager}; + use super::{DatabaseDownloadsMetricRepository, TorrentsManager}; use crate::databases::setup::initialize_database; use crate::test_helpers::tests::{ephemeral_configuration, sample_info_hash}; use crate::torrent::repository::in_memory::InMemoryTorrentRepository; @@ -161,7 +161,7 @@ mod tests { struct TorrentsManagerDeps { config: Arc, in_memory_torrent_repository: Arc, - database_persistent_torrent_repository: Arc, + database_persistent_torrent_repository: Arc, } fn initialize_torrents_manager() -> (Arc, Arc) { @@ -173,7 +173,7 @@ mod tests { let swarms = Arc::new(Swarms::default()); let in_memory_torrent_repository = Arc::new(InMemoryTorrentRepository::new(swarms)); let database = initialize_database(&config); - let database_persistent_torrent_repository = Arc::new(DatabasePersistentTorrentRepository::new(&database)); + let database_persistent_torrent_repository = Arc::new(DatabaseDownloadsMetricRepository::new(&database)); let torrents_manager = Arc::new(TorrentsManager::new( &config, diff --git a/packages/tracker-core/src/torrent/repository/persisted.rs b/packages/tracker-core/src/torrent/repository/persisted.rs index 1818065fd..d6c6ce263 100644 --- a/packages/tracker-core/src/torrent/repository/persisted.rs +++ b/packages/tracker-core/src/torrent/repository/persisted.rs @@ -19,7 +19,7 @@ use crate::databases::Database; /// /// Not all in-memory torrent data is persisted; only the aggregate metrics are /// stored. -pub struct DatabasePersistentTorrentRepository { +pub struct DatabaseDownloadsMetricRepository { /// A shared reference to the database driver implementation. /// /// The driver must implement the [`Database`] trait. This allows for @@ -28,7 +28,7 @@ pub struct DatabasePersistentTorrentRepository { database: Arc>, } -impl DatabasePersistentTorrentRepository { +impl DatabaseDownloadsMetricRepository { /// Creates a new instance of `DatabasePersistentTorrentRepository`. /// /// # Arguments @@ -41,7 +41,7 @@ impl DatabasePersistentTorrentRepository { /// A new `DatabasePersistentTorrentRepository` instance with a cloned /// reference to the provided database. #[must_use] - pub fn new(database: &Arc>) -> DatabasePersistentTorrentRepository { + pub fn new(database: &Arc>) -> DatabaseDownloadsMetricRepository { Self { database: database.clone(), } @@ -143,14 +143,14 @@ mod tests { use torrust_tracker_primitives::PersistentTorrents; - use super::DatabasePersistentTorrentRepository; + use super::DatabaseDownloadsMetricRepository; use crate::databases::setup::initialize_database; use crate::test_helpers::tests::{ephemeral_configuration, sample_info_hash, sample_info_hash_one, sample_info_hash_two}; - fn initialize_db_persistent_torrent_repository() -> DatabasePersistentTorrentRepository { + fn initialize_db_persistent_torrent_repository() -> DatabaseDownloadsMetricRepository { let config = ephemeral_configuration(); let database = initialize_database(&config); - DatabasePersistentTorrentRepository::new(&database) + DatabaseDownloadsMetricRepository::new(&database) } #[test] diff --git a/packages/udp-tracker-server/src/handlers/announce.rs b/packages/udp-tracker-server/src/handlers/announce.rs index e2ca6821e..60788ab9c 100644 --- a/packages/udp-tracker-server/src/handlers/announce.rs +++ b/packages/udp-tracker-server/src/handlers/announce.rs @@ -836,7 +836,7 @@ mod tests { use bittorrent_tracker_core::announce_handler::AnnounceHandler; use bittorrent_tracker_core::databases::setup::initialize_database; use bittorrent_tracker_core::torrent::repository::in_memory::InMemoryTorrentRepository; - use bittorrent_tracker_core::torrent::repository::persisted::DatabasePersistentTorrentRepository; + use bittorrent_tracker_core::torrent::repository::persisted::DatabaseDownloadsMetricRepository; use bittorrent_tracker_core::whitelist::authorization::WhitelistAuthorization; use bittorrent_tracker_core::whitelist::repository::in_memory::InMemoryWhitelist; use bittorrent_udp_tracker_core::connection_cookie::{gen_remote_fingerprint, make}; @@ -885,7 +885,7 @@ mod tests { let whitelist_authorization = Arc::new(WhitelistAuthorization::new(&config.core, &in_memory_whitelist.clone())); let in_memory_torrent_repository = Arc::new(InMemoryTorrentRepository::default()); - let db_torrent_repository = Arc::new(DatabasePersistentTorrentRepository::new(&database)); + let db_torrent_repository = Arc::new(DatabaseDownloadsMetricRepository::new(&database)); let mut udp_core_stats_event_sender_mock = MockUdpCoreStatsEventSender::new(); udp_core_stats_event_sender_mock diff --git a/packages/udp-tracker-server/src/handlers/mod.rs b/packages/udp-tracker-server/src/handlers/mod.rs index 831073333..eb51e6d01 100644 --- a/packages/udp-tracker-server/src/handlers/mod.rs +++ b/packages/udp-tracker-server/src/handlers/mod.rs @@ -212,7 +212,7 @@ pub(crate) mod tests { use bittorrent_tracker_core::databases::setup::initialize_database; use bittorrent_tracker_core::scrape_handler::ScrapeHandler; use bittorrent_tracker_core::torrent::repository::in_memory::InMemoryTorrentRepository; - use bittorrent_tracker_core::torrent::repository::persisted::DatabasePersistentTorrentRepository; + use bittorrent_tracker_core::torrent::repository::persisted::DatabaseDownloadsMetricRepository; use bittorrent_tracker_core::whitelist; use bittorrent_tracker_core::whitelist::authorization::WhitelistAuthorization; use bittorrent_tracker_core::whitelist::repository::in_memory::InMemoryWhitelist; @@ -275,7 +275,7 @@ pub(crate) mod tests { let in_memory_whitelist = Arc::new(InMemoryWhitelist::default()); let whitelist_authorization = Arc::new(WhitelistAuthorization::new(&config.core, &in_memory_whitelist.clone())); let in_memory_torrent_repository = Arc::new(InMemoryTorrentRepository::default()); - let db_torrent_repository = Arc::new(DatabasePersistentTorrentRepository::new(&database)); + let db_torrent_repository = Arc::new(DatabaseDownloadsMetricRepository::new(&database)); let announce_handler = Arc::new(AnnounceHandler::new( &config.core, &whitelist_authorization, From 99adbdee9dfe7bf9846bfebeacbf0036193385bc Mon Sep 17 00:00:00 2001 From: Jose Celano Date: Tue, 27 May 2025 14:41:48 +0100 Subject: [PATCH 643/802] refactor: [#1541] rename symbol db_torrent_repository to db_downloads_metric_repository --- .../src/v1/handlers/announce.rs | 4 ++-- packages/http-tracker-core/benches/helpers/util.rs | 4 ++-- packages/http-tracker-core/src/services/announce.rs | 4 ++-- packages/http-tracker-core/src/services/scrape.rs | 4 ++-- packages/tracker-core/src/announce_handler.rs | 8 ++++---- packages/tracker-core/src/container.rs | 10 +++++----- packages/tracker-core/src/statistics/event/handler.rs | 6 +++--- packages/tracker-core/src/statistics/event/listener.rs | 10 +++++----- .../tracker-core/src/statistics/persisted_metrics.rs | 4 ++-- packages/tracker-core/src/test_helpers.rs | 4 ++-- packages/tracker-core/src/torrent/manager.rs | 10 +++++----- packages/tracker-core/tests/common/test_env.rs | 4 ++-- packages/udp-tracker-server/src/handlers/announce.rs | 4 ++-- packages/udp-tracker-server/src/handlers/mod.rs | 4 ++-- src/app.rs | 2 +- src/bootstrap/jobs/tracker_core.rs | 2 +- 16 files changed, 42 insertions(+), 42 deletions(-) diff --git a/packages/axum-http-tracker-server/src/v1/handlers/announce.rs b/packages/axum-http-tracker-server/src/v1/handlers/announce.rs index c195b5a1f..108ebb33f 100644 --- a/packages/axum-http-tracker-server/src/v1/handlers/announce.rs +++ b/packages/axum-http-tracker-server/src/v1/handlers/announce.rs @@ -156,12 +156,12 @@ mod tests { let in_memory_key_repository = Arc::new(InMemoryKeyRepository::default()); let authentication_service = Arc::new(AuthenticationService::new(&config.core, &in_memory_key_repository)); let in_memory_torrent_repository = Arc::new(InMemoryTorrentRepository::default()); - let db_torrent_repository = Arc::new(DatabaseDownloadsMetricRepository::new(&database)); + let db_downloads_metric_repository = Arc::new(DatabaseDownloadsMetricRepository::new(&database)); let announce_handler = Arc::new(AnnounceHandler::new( &config.core, &whitelist_authorization, &in_memory_torrent_repository, - &db_torrent_repository, + &db_downloads_metric_repository, )); // HTTP core stats diff --git a/packages/http-tracker-core/benches/helpers/util.rs b/packages/http-tracker-core/benches/helpers/util.rs index bf870b39c..06c20543e 100644 --- a/packages/http-tracker-core/benches/helpers/util.rs +++ b/packages/http-tracker-core/benches/helpers/util.rs @@ -45,7 +45,7 @@ pub fn initialize_core_tracker_services_with_config(config: &Configuration) -> ( let core_config = Arc::new(config.core.clone()); let database = initialize_database(&config.core); let in_memory_torrent_repository = Arc::new(InMemoryTorrentRepository::default()); - let db_torrent_repository = Arc::new(DatabaseDownloadsMetricRepository::new(&database)); + let db_downloads_metric_repository = Arc::new(DatabaseDownloadsMetricRepository::new(&database)); let in_memory_whitelist = Arc::new(InMemoryWhitelist::default()); let whitelist_authorization = Arc::new(WhitelistAuthorization::new(&config.core, &in_memory_whitelist.clone())); let in_memory_key_repository = Arc::new(InMemoryKeyRepository::default()); @@ -55,7 +55,7 @@ pub fn initialize_core_tracker_services_with_config(config: &Configuration) -> ( &config.core, &whitelist_authorization, &in_memory_torrent_repository, - &db_torrent_repository, + &db_downloads_metric_repository, )); // HTTP core stats diff --git a/packages/http-tracker-core/src/services/announce.rs b/packages/http-tracker-core/src/services/announce.rs index 36dd58193..7831324f0 100644 --- a/packages/http-tracker-core/src/services/announce.rs +++ b/packages/http-tracker-core/src/services/announce.rs @@ -239,7 +239,7 @@ mod tests { let core_config = Arc::new(config.core.clone()); let database = initialize_database(&config.core); let in_memory_torrent_repository = Arc::new(InMemoryTorrentRepository::default()); - let db_torrent_repository = Arc::new(DatabaseDownloadsMetricRepository::new(&database)); + let db_downloads_metric_repository = Arc::new(DatabaseDownloadsMetricRepository::new(&database)); let in_memory_whitelist = Arc::new(InMemoryWhitelist::default()); let whitelist_authorization = Arc::new(WhitelistAuthorization::new(&config.core, &in_memory_whitelist.clone())); let in_memory_key_repository = Arc::new(InMemoryKeyRepository::default()); @@ -249,7 +249,7 @@ mod tests { &config.core, &whitelist_authorization, &in_memory_torrent_repository, - &db_torrent_repository, + &db_downloads_metric_repository, )); // HTTP core stats diff --git a/packages/http-tracker-core/src/services/scrape.rs b/packages/http-tracker-core/src/services/scrape.rs index e98c1b2c4..0261626a9 100644 --- a/packages/http-tracker-core/src/services/scrape.rs +++ b/packages/http-tracker-core/src/services/scrape.rs @@ -200,7 +200,7 @@ mod tests { let in_memory_whitelist = Arc::new(InMemoryWhitelist::default()); let whitelist_authorization = Arc::new(WhitelistAuthorization::new(&config.core, &in_memory_whitelist.clone())); let in_memory_torrent_repository = Arc::new(InMemoryTorrentRepository::default()); - let db_torrent_repository = Arc::new(DatabaseDownloadsMetricRepository::new(&database)); + let db_downloads_metric_repository = Arc::new(DatabaseDownloadsMetricRepository::new(&database)); let in_memory_key_repository = Arc::new(InMemoryKeyRepository::default()); let authentication_service = Arc::new(AuthenticationService::new(&config.core, &in_memory_key_repository)); @@ -208,7 +208,7 @@ mod tests { &config.core, &whitelist_authorization, &in_memory_torrent_repository, - &db_torrent_repository, + &db_downloads_metric_repository, )); let scrape_handler = Arc::new(ScrapeHandler::new(&whitelist_authorization, &in_memory_torrent_repository)); diff --git a/packages/tracker-core/src/announce_handler.rs b/packages/tracker-core/src/announce_handler.rs index 5c79e32bf..847ddd1af 100644 --- a/packages/tracker-core/src/announce_handler.rs +++ b/packages/tracker-core/src/announce_handler.rs @@ -115,7 +115,7 @@ pub struct AnnounceHandler { in_memory_torrent_repository: Arc, /// Repository for persistent torrent data (database). - db_torrent_repository: Arc, + db_downloads_metric_repository: Arc, } impl AnnounceHandler { @@ -125,13 +125,13 @@ impl AnnounceHandler { config: &Core, whitelist_authorization: &Arc, in_memory_torrent_repository: &Arc, - db_torrent_repository: &Arc, + db_downloads_metric_repository: &Arc, ) -> Self { Self { whitelist_authorization: whitelist_authorization.clone(), config: config.clone(), in_memory_torrent_repository: in_memory_torrent_repository.clone(), - db_torrent_repository: db_torrent_repository.clone(), + db_downloads_metric_repository: db_downloads_metric_repository.clone(), } } @@ -169,7 +169,7 @@ impl AnnounceHandler { // downloads across all torrents. The in-memory metric will count only // the number of downloads during the current tracker uptime. let opt_persistent_torrent = if self.config.tracker_policy.persistent_torrent_completed_stat { - self.db_torrent_repository.load(info_hash)? + self.db_downloads_metric_repository.load(info_hash)? } else { None }; diff --git a/packages/tracker-core/src/container.rs b/packages/tracker-core/src/container.rs index 8c6f360eb..4dd795e7a 100644 --- a/packages/tracker-core/src/container.rs +++ b/packages/tracker-core/src/container.rs @@ -31,7 +31,7 @@ pub struct TrackerCoreContainer { pub whitelist_authorization: Arc, pub whitelist_manager: Arc, pub in_memory_torrent_repository: Arc, - pub db_torrent_repository: Arc, + pub db_downloads_metric_repository: Arc, pub torrents_manager: Arc, pub stats_repository: Arc, } @@ -51,12 +51,12 @@ impl TrackerCoreContainer { &in_memory_key_repository.clone(), )); let in_memory_torrent_repository = Arc::new(InMemoryTorrentRepository::new(torrent_repository_container.swarms.clone())); - let db_torrent_repository = Arc::new(DatabaseDownloadsMetricRepository::new(&database)); + let db_downloads_metric_repository = Arc::new(DatabaseDownloadsMetricRepository::new(&database)); let torrents_manager = Arc::new(TorrentsManager::new( core_config, &in_memory_torrent_repository, - &db_torrent_repository, + &db_downloads_metric_repository, )); let stats_repository = Arc::new(statistics::repository::Repository::new()); @@ -65,7 +65,7 @@ impl TrackerCoreContainer { core_config, &whitelist_authorization, &in_memory_torrent_repository, - &db_torrent_repository, + &db_downloads_metric_repository, )); let scrape_handler = Arc::new(ScrapeHandler::new(&whitelist_authorization, &in_memory_torrent_repository)); @@ -81,7 +81,7 @@ impl TrackerCoreContainer { whitelist_authorization, whitelist_manager, in_memory_torrent_repository, - db_torrent_repository, + db_downloads_metric_repository, torrents_manager, stats_repository, } diff --git a/packages/tracker-core/src/statistics/event/handler.rs b/packages/tracker-core/src/statistics/event/handler.rs index 028e7bc46..82c56abce 100644 --- a/packages/tracker-core/src/statistics/event/handler.rs +++ b/packages/tracker-core/src/statistics/event/handler.rs @@ -12,7 +12,7 @@ use crate::torrent::repository::persisted::DatabaseDownloadsMetricRepository; pub async fn handle_event( event: Event, stats_repository: &Arc, - db_torrent_repository: &Arc, + db_downloads_metric_repository: &Arc, persistent_torrent_completed_stat: bool, now: DurationSinceUnixEpoch, ) { @@ -53,7 +53,7 @@ pub async fn handle_event( if persistent_torrent_completed_stat { // Increment the number of downloads for the torrent in the database - match db_torrent_repository.increase_number_of_downloads(&info_hash) { + match db_downloads_metric_repository.increase_number_of_downloads(&info_hash) { Ok(()) => { tracing::debug!(info_hash = ?info_hash, "Number of torrent downloads increased"); } @@ -63,7 +63,7 @@ pub async fn handle_event( } // Increment the global number of downloads (for all torrents) in the database - match db_torrent_repository.increase_global_number_of_downloads() { + match db_downloads_metric_repository.increase_global_number_of_downloads() { Ok(()) => { tracing::debug!("Global number of downloads increased"); } diff --git a/packages/tracker-core/src/statistics/event/listener.rs b/packages/tracker-core/src/statistics/event/listener.rs index 63c75e2f6..f0d8cb7f1 100644 --- a/packages/tracker-core/src/statistics/event/listener.rs +++ b/packages/tracker-core/src/statistics/event/listener.rs @@ -14,11 +14,11 @@ use crate::{CurrentClock, TRACKER_CORE_LOG_TARGET}; pub fn run_event_listener( receiver: Receiver, repository: &Arc, - db_torrent_repository: &Arc, + db_downloads_metric_repository: &Arc, persistent_torrent_completed_stat: bool, ) -> JoinHandle<()> { let stats_repository = repository.clone(); - let db_torrent_repository: Arc = db_torrent_repository.clone(); + let db_downloads_metric_repository: Arc = db_downloads_metric_repository.clone(); tracing::info!(target: TRACKER_CORE_LOG_TARGET, "Starting torrent repository event listener"); @@ -26,7 +26,7 @@ pub fn run_event_listener( dispatch_events( receiver, stats_repository, - db_torrent_repository, + db_downloads_metric_repository, persistent_torrent_completed_stat, ) .await; @@ -38,7 +38,7 @@ pub fn run_event_listener( async fn dispatch_events( mut receiver: Receiver, stats_repository: Arc, - db_torrent_repository: Arc, + db_downloads_metric_repository: Arc, persistent_torrent_completed_stat: bool, ) { let shutdown_signal = tokio::signal::ctrl_c(); @@ -59,7 +59,7 @@ async fn dispatch_events( Ok(event) => handle_event( event, &stats_repository, - &db_torrent_repository, + &db_downloads_metric_repository, persistent_torrent_completed_stat, CurrentClock::now()).await, Err(e) => { diff --git a/packages/tracker-core/src/statistics/persisted_metrics.rs b/packages/tracker-core/src/statistics/persisted_metrics.rs index 73c52884e..55ec91b10 100644 --- a/packages/tracker-core/src/statistics/persisted_metrics.rs +++ b/packages/tracker-core/src/statistics/persisted_metrics.rs @@ -18,10 +18,10 @@ use crate::torrent::repository::persisted::DatabaseDownloadsMetricRepository; /// metric collection fails to set the initial metric values. pub async fn load_persisted_metrics( stats_repository: &Arc, - db_torrent_repository: &Arc, + db_downloads_metric_repository: &Arc, now: DurationSinceUnixEpoch, ) -> Result<(), Error> { - if let Some(downloads) = db_torrent_repository.load_global_number_of_downloads()? { + if let Some(downloads) = db_downloads_metric_repository.load_global_number_of_downloads()? { stats_repository .set_counter( &metric_name!(TRACKER_CORE_PERSISTENT_TORRENTS_DOWNLOADS_TOTAL), diff --git a/packages/tracker-core/src/test_helpers.rs b/packages/tracker-core/src/test_helpers.rs index 540381c75..f8b79e4db 100644 --- a/packages/tracker-core/src/test_helpers.rs +++ b/packages/tracker-core/src/test_helpers.rs @@ -137,13 +137,13 @@ pub(crate) mod tests { &in_memory_whitelist.clone(), )); let in_memory_torrent_repository = Arc::new(InMemoryTorrentRepository::default()); - let db_torrent_repository = Arc::new(DatabaseDownloadsMetricRepository::new(&database)); + let db_downloads_metric_repository = Arc::new(DatabaseDownloadsMetricRepository::new(&database)); let announce_handler = Arc::new(AnnounceHandler::new( &config.core, &whitelist_authorization, &in_memory_torrent_repository, - &db_torrent_repository, + &db_downloads_metric_repository, )); let scrape_handler = Arc::new(ScrapeHandler::new(&whitelist_authorization, &in_memory_torrent_repository)); diff --git a/packages/tracker-core/src/torrent/manager.rs b/packages/tracker-core/src/torrent/manager.rs index dfcdaf38c..e18e19ce0 100644 --- a/packages/tracker-core/src/torrent/manager.rs +++ b/packages/tracker-core/src/torrent/manager.rs @@ -31,7 +31,7 @@ pub struct TorrentsManager { /// The persistent torrents repository. #[allow(dead_code)] - db_torrent_repository: Arc, + db_downloads_metric_repository: Arc, } impl TorrentsManager { @@ -42,7 +42,7 @@ impl TorrentsManager { /// * `config` - A reference to the tracker configuration. /// * `in_memory_torrent_repository` - A shared reference to the in-memory /// repository of torrents. - /// * `db_torrent_repository` - A shared reference to the persistent + /// * `db_downloads_metric_repository` - A shared reference to the persistent /// repository for torrent metrics. /// /// # Returns @@ -52,12 +52,12 @@ impl TorrentsManager { pub fn new( config: &Core, in_memory_torrent_repository: &Arc, - db_torrent_repository: &Arc, + db_downloads_metric_repository: &Arc, ) -> Self { Self { config: config.clone(), in_memory_torrent_repository: in_memory_torrent_repository.clone(), - db_torrent_repository: db_torrent_repository.clone(), + db_downloads_metric_repository: db_downloads_metric_repository.clone(), } } @@ -72,7 +72,7 @@ impl TorrentsManager { /// Returns a `databases::error::Error` if unable to load the persistent /// torrent data. pub fn load_torrents_from_database(&self) -> Result<(), databases::error::Error> { - let persistent_torrents = self.db_torrent_repository.load_all()?; + let persistent_torrents = self.db_downloads_metric_repository.load_all()?; println!("Loaded {} persistent torrents from the database", persistent_torrents.len()); diff --git a/packages/tracker-core/tests/common/test_env.rs b/packages/tracker-core/tests/common/test_env.rs index 11a4d400a..88b363234 100644 --- a/packages/tracker-core/tests/common/test_env.rs +++ b/packages/tracker-core/tests/common/test_env.rs @@ -57,7 +57,7 @@ impl TestEnv { async fn load_persisted_metrics(&self, now: DurationSinceUnixEpoch) { load_persisted_metrics( &self.tracker_core_container.stats_repository, - &self.tracker_core_container.db_torrent_repository, + &self.tracker_core_container.db_downloads_metric_repository, now, ) .await @@ -77,7 +77,7 @@ impl TestEnv { let job = bittorrent_tracker_core::statistics::event::listener::run_event_listener( self.torrent_repository_container.event_bus.receiver(), &self.tracker_core_container.stats_repository, - &self.tracker_core_container.db_torrent_repository, + &self.tracker_core_container.db_downloads_metric_repository, self.tracker_core_container .core_config .tracker_policy diff --git a/packages/udp-tracker-server/src/handlers/announce.rs b/packages/udp-tracker-server/src/handlers/announce.rs index 60788ab9c..38e136a12 100644 --- a/packages/udp-tracker-server/src/handlers/announce.rs +++ b/packages/udp-tracker-server/src/handlers/announce.rs @@ -885,7 +885,7 @@ mod tests { let whitelist_authorization = Arc::new(WhitelistAuthorization::new(&config.core, &in_memory_whitelist.clone())); let in_memory_torrent_repository = Arc::new(InMemoryTorrentRepository::default()); - let db_torrent_repository = Arc::new(DatabaseDownloadsMetricRepository::new(&database)); + let db_downloads_metric_repository = Arc::new(DatabaseDownloadsMetricRepository::new(&database)); let mut udp_core_stats_event_sender_mock = MockUdpCoreStatsEventSender::new(); udp_core_stats_event_sender_mock @@ -923,7 +923,7 @@ mod tests { &config.core, &whitelist_authorization, &in_memory_torrent_repository, - &db_torrent_repository, + &db_downloads_metric_repository, )); let request = AnnounceRequestBuilder::default() diff --git a/packages/udp-tracker-server/src/handlers/mod.rs b/packages/udp-tracker-server/src/handlers/mod.rs index eb51e6d01..9bbebd56e 100644 --- a/packages/udp-tracker-server/src/handlers/mod.rs +++ b/packages/udp-tracker-server/src/handlers/mod.rs @@ -275,12 +275,12 @@ pub(crate) mod tests { let in_memory_whitelist = Arc::new(InMemoryWhitelist::default()); let whitelist_authorization = Arc::new(WhitelistAuthorization::new(&config.core, &in_memory_whitelist.clone())); let in_memory_torrent_repository = Arc::new(InMemoryTorrentRepository::default()); - let db_torrent_repository = Arc::new(DatabaseDownloadsMetricRepository::new(&database)); + let db_downloads_metric_repository = Arc::new(DatabaseDownloadsMetricRepository::new(&database)); let announce_handler = Arc::new(AnnounceHandler::new( &config.core, &whitelist_authorization, &in_memory_torrent_repository, - &db_torrent_repository, + &db_downloads_metric_repository, )); let scrape_handler = Arc::new(ScrapeHandler::new(&whitelist_authorization, &in_memory_torrent_repository)); diff --git a/src/app.rs b/src/app.rs index 571e034f5..ac51239fc 100644 --- a/src/app.rs +++ b/src/app.rs @@ -143,7 +143,7 @@ async fn load_torrent_metrics(config: &Configuration, app_container: &Arc Date: Tue, 27 May 2025 14:45:40 +0100 Subject: [PATCH 644/802] refactor: [#1541] create folder for mod More submods will be included inside. --- packages/tracker-core/src/statistics/mod.rs | 2 +- .../src/statistics/{persisted_metrics.rs => persisted/mod.rs} | 0 packages/tracker-core/tests/common/test_env.rs | 2 +- src/app.rs | 2 +- 4 files changed, 3 insertions(+), 3 deletions(-) rename packages/tracker-core/src/statistics/{persisted_metrics.rs => persisted/mod.rs} (100%) diff --git a/packages/tracker-core/src/statistics/mod.rs b/packages/tracker-core/src/statistics/mod.rs index 89d6b79d5..ff8187379 100644 --- a/packages/tracker-core/src/statistics/mod.rs +++ b/packages/tracker-core/src/statistics/mod.rs @@ -1,6 +1,6 @@ pub mod event; pub mod metrics; -pub mod persisted_metrics; +pub mod persisted; pub mod repository; use metrics::Metrics; diff --git a/packages/tracker-core/src/statistics/persisted_metrics.rs b/packages/tracker-core/src/statistics/persisted/mod.rs similarity index 100% rename from packages/tracker-core/src/statistics/persisted_metrics.rs rename to packages/tracker-core/src/statistics/persisted/mod.rs diff --git a/packages/tracker-core/tests/common/test_env.rs b/packages/tracker-core/tests/common/test_env.rs index 88b363234..2aafbbbad 100644 --- a/packages/tracker-core/tests/common/test_env.rs +++ b/packages/tracker-core/tests/common/test_env.rs @@ -5,7 +5,7 @@ use aquatic_udp_protocol::AnnounceEvent; use bittorrent_primitives::info_hash::InfoHash; use bittorrent_tracker_core::announce_handler::PeersWanted; use bittorrent_tracker_core::container::TrackerCoreContainer; -use bittorrent_tracker_core::statistics::persisted_metrics::load_persisted_metrics; +use bittorrent_tracker_core::statistics::persisted::load_persisted_metrics; use tokio::task::yield_now; use torrust_tracker_configuration::Core; use torrust_tracker_metrics::label::LabelSet; diff --git a/src/app.rs b/src/app.rs index ac51239fc..c31281829 100644 --- a/src/app.rs +++ b/src/app.rs @@ -141,7 +141,7 @@ fn load_torrents_from_database(config: &Configuration, app_container: &Arc) { if config.core.tracker_policy.persistent_torrent_completed_stat { - bittorrent_tracker_core::statistics::persisted_metrics::load_persisted_metrics( + bittorrent_tracker_core::statistics::persisted::load_persisted_metrics( &app_container.tracker_core_container.stats_repository, &app_container.tracker_core_container.db_downloads_metric_repository, CurrentClock::now(), From fdbea0aa85dcef20561e7e363232eea00e3d4f6b Mon Sep 17 00:00:00 2001 From: Jose Celano Date: Tue, 27 May 2025 14:47:32 +0100 Subject: [PATCH 645/802] refactor: [#1541] rename mod --- packages/axum-http-tracker-server/src/v1/handlers/announce.rs | 2 +- packages/http-tracker-core/benches/helpers/util.rs | 2 +- packages/http-tracker-core/src/services/announce.rs | 2 +- packages/http-tracker-core/src/services/scrape.rs | 2 +- packages/tracker-core/src/announce_handler.rs | 2 +- packages/tracker-core/src/container.rs | 2 +- packages/tracker-core/src/statistics/event/handler.rs | 2 +- packages/tracker-core/src/statistics/event/listener.rs | 2 +- packages/tracker-core/src/statistics/persisted/mod.rs | 2 +- packages/tracker-core/src/test_helpers.rs | 2 +- packages/tracker-core/src/torrent/manager.rs | 2 +- .../src/torrent/repository/{persisted.rs => downloads.rs} | 0 packages/tracker-core/src/torrent/repository/mod.rs | 2 +- packages/udp-tracker-server/src/handlers/announce.rs | 2 +- packages/udp-tracker-server/src/handlers/mod.rs | 2 +- 15 files changed, 14 insertions(+), 14 deletions(-) rename packages/tracker-core/src/torrent/repository/{persisted.rs => downloads.rs} (100%) diff --git a/packages/axum-http-tracker-server/src/v1/handlers/announce.rs b/packages/axum-http-tracker-server/src/v1/handlers/announce.rs index 108ebb33f..68e0825f4 100644 --- a/packages/axum-http-tracker-server/src/v1/handlers/announce.rs +++ b/packages/axum-http-tracker-server/src/v1/handlers/announce.rs @@ -120,7 +120,7 @@ mod tests { use bittorrent_tracker_core::authentication::service::AuthenticationService; use bittorrent_tracker_core::databases::setup::initialize_database; use bittorrent_tracker_core::torrent::repository::in_memory::InMemoryTorrentRepository; - use bittorrent_tracker_core::torrent::repository::persisted::DatabaseDownloadsMetricRepository; + use bittorrent_tracker_core::torrent::repository::downloads::DatabaseDownloadsMetricRepository; use bittorrent_tracker_core::whitelist::authorization::WhitelistAuthorization; use bittorrent_tracker_core::whitelist::repository::in_memory::InMemoryWhitelist; use torrust_tracker_configuration::Configuration; diff --git a/packages/http-tracker-core/benches/helpers/util.rs b/packages/http-tracker-core/benches/helpers/util.rs index 06c20543e..2798203ae 100644 --- a/packages/http-tracker-core/benches/helpers/util.rs +++ b/packages/http-tracker-core/benches/helpers/util.rs @@ -15,7 +15,7 @@ use bittorrent_tracker_core::authentication::key::repository::in_memory::InMemor use bittorrent_tracker_core::authentication::service::AuthenticationService; use bittorrent_tracker_core::databases::setup::initialize_database; use bittorrent_tracker_core::torrent::repository::in_memory::InMemoryTorrentRepository; -use bittorrent_tracker_core::torrent::repository::persisted::DatabaseDownloadsMetricRepository; +use bittorrent_tracker_core::torrent::repository::downloads::DatabaseDownloadsMetricRepository; use bittorrent_tracker_core::whitelist::authorization::WhitelistAuthorization; use bittorrent_tracker_core::whitelist::repository::in_memory::InMemoryWhitelist; use futures::future::BoxFuture; diff --git a/packages/http-tracker-core/src/services/announce.rs b/packages/http-tracker-core/src/services/announce.rs index 7831324f0..7f3e553e4 100644 --- a/packages/http-tracker-core/src/services/announce.rs +++ b/packages/http-tracker-core/src/services/announce.rs @@ -213,7 +213,7 @@ mod tests { use bittorrent_tracker_core::authentication::service::AuthenticationService; use bittorrent_tracker_core::databases::setup::initialize_database; use bittorrent_tracker_core::torrent::repository::in_memory::InMemoryTorrentRepository; - use bittorrent_tracker_core::torrent::repository::persisted::DatabaseDownloadsMetricRepository; + use bittorrent_tracker_core::torrent::repository::downloads::DatabaseDownloadsMetricRepository; use bittorrent_tracker_core::whitelist::authorization::WhitelistAuthorization; use bittorrent_tracker_core::whitelist::repository::in_memory::InMemoryWhitelist; use torrust_tracker_configuration::{Configuration, Core}; diff --git a/packages/http-tracker-core/src/services/scrape.rs b/packages/http-tracker-core/src/services/scrape.rs index 0261626a9..f10f00732 100644 --- a/packages/http-tracker-core/src/services/scrape.rs +++ b/packages/http-tracker-core/src/services/scrape.rs @@ -177,7 +177,7 @@ mod tests { use bittorrent_tracker_core::databases::setup::initialize_database; use bittorrent_tracker_core::scrape_handler::ScrapeHandler; use bittorrent_tracker_core::torrent::repository::in_memory::InMemoryTorrentRepository; - use bittorrent_tracker_core::torrent::repository::persisted::DatabaseDownloadsMetricRepository; + use bittorrent_tracker_core::torrent::repository::downloads::DatabaseDownloadsMetricRepository; use bittorrent_tracker_core::whitelist::authorization::WhitelistAuthorization; use bittorrent_tracker_core::whitelist::repository::in_memory::InMemoryWhitelist; use futures::future::BoxFuture; diff --git a/packages/tracker-core/src/announce_handler.rs b/packages/tracker-core/src/announce_handler.rs index 847ddd1af..9a1c92efa 100644 --- a/packages/tracker-core/src/announce_handler.rs +++ b/packages/tracker-core/src/announce_handler.rs @@ -99,7 +99,7 @@ use torrust_tracker_primitives::core::AnnounceData; use torrust_tracker_primitives::peer; use super::torrent::repository::in_memory::InMemoryTorrentRepository; -use super::torrent::repository::persisted::DatabaseDownloadsMetricRepository; +use super::torrent::repository::downloads::DatabaseDownloadsMetricRepository; use crate::error::AnnounceError; use crate::whitelist::authorization::WhitelistAuthorization; diff --git a/packages/tracker-core/src/container.rs b/packages/tracker-core/src/container.rs index 4dd795e7a..b2bcdebb3 100644 --- a/packages/tracker-core/src/container.rs +++ b/packages/tracker-core/src/container.rs @@ -13,7 +13,7 @@ use crate::databases::Database; use crate::scrape_handler::ScrapeHandler; use crate::torrent::manager::TorrentsManager; use crate::torrent::repository::in_memory::InMemoryTorrentRepository; -use crate::torrent::repository::persisted::DatabaseDownloadsMetricRepository; +use crate::torrent::repository::downloads::DatabaseDownloadsMetricRepository; use crate::whitelist::authorization::WhitelistAuthorization; use crate::whitelist::manager::WhitelistManager; use crate::whitelist::repository::in_memory::InMemoryWhitelist; diff --git a/packages/tracker-core/src/statistics/event/handler.rs b/packages/tracker-core/src/statistics/event/handler.rs index 82c56abce..028f32030 100644 --- a/packages/tracker-core/src/statistics/event/handler.rs +++ b/packages/tracker-core/src/statistics/event/handler.rs @@ -7,7 +7,7 @@ use torrust_tracker_torrent_repository::event::Event; use crate::statistics::repository::Repository; use crate::statistics::TRACKER_CORE_PERSISTENT_TORRENTS_DOWNLOADS_TOTAL; -use crate::torrent::repository::persisted::DatabaseDownloadsMetricRepository; +use crate::torrent::repository::downloads::DatabaseDownloadsMetricRepository; pub async fn handle_event( event: Event, diff --git a/packages/tracker-core/src/statistics/event/listener.rs b/packages/tracker-core/src/statistics/event/listener.rs index f0d8cb7f1..23b6e648a 100644 --- a/packages/tracker-core/src/statistics/event/listener.rs +++ b/packages/tracker-core/src/statistics/event/listener.rs @@ -7,7 +7,7 @@ use torrust_tracker_torrent_repository::event::receiver::Receiver; use super::handler::handle_event; use crate::statistics::repository::Repository; -use crate::torrent::repository::persisted::DatabaseDownloadsMetricRepository; +use crate::torrent::repository::downloads::DatabaseDownloadsMetricRepository; use crate::{CurrentClock, TRACKER_CORE_LOG_TARGET}; #[must_use] diff --git a/packages/tracker-core/src/statistics/persisted/mod.rs b/packages/tracker-core/src/statistics/persisted/mod.rs index 55ec91b10..4475f9647 100644 --- a/packages/tracker-core/src/statistics/persisted/mod.rs +++ b/packages/tracker-core/src/statistics/persisted/mod.rs @@ -8,7 +8,7 @@ use torrust_tracker_primitives::DurationSinceUnixEpoch; use super::repository::Repository; use super::TRACKER_CORE_PERSISTENT_TORRENTS_DOWNLOADS_TOTAL; use crate::databases; -use crate::torrent::repository::persisted::DatabaseDownloadsMetricRepository; +use crate::torrent::repository::downloads::DatabaseDownloadsMetricRepository; /// Loads persisted metrics from the database and sets them in the stats repository. /// diff --git a/packages/tracker-core/src/test_helpers.rs b/packages/tracker-core/src/test_helpers.rs index f8b79e4db..c10d3dd3e 100644 --- a/packages/tracker-core/src/test_helpers.rs +++ b/packages/tracker-core/src/test_helpers.rs @@ -20,7 +20,7 @@ pub(crate) mod tests { use crate::databases::setup::initialize_database; use crate::scrape_handler::ScrapeHandler; use crate::torrent::repository::in_memory::InMemoryTorrentRepository; - use crate::torrent::repository::persisted::DatabaseDownloadsMetricRepository; + use crate::torrent::repository::downloads::DatabaseDownloadsMetricRepository; use crate::whitelist::repository::in_memory::InMemoryWhitelist; use crate::whitelist::{self}; diff --git a/packages/tracker-core/src/torrent/manager.rs b/packages/tracker-core/src/torrent/manager.rs index e18e19ce0..f86e9442e 100644 --- a/packages/tracker-core/src/torrent/manager.rs +++ b/packages/tracker-core/src/torrent/manager.rs @@ -7,7 +7,7 @@ use torrust_tracker_configuration::Core; use torrust_tracker_primitives::DurationSinceUnixEpoch; use super::repository::in_memory::InMemoryTorrentRepository; -use super::repository::persisted::DatabaseDownloadsMetricRepository; +use super::repository::downloads::DatabaseDownloadsMetricRepository; use crate::{databases, CurrentClock}; /// The `TorrentsManager` is responsible for managing torrent entries by diff --git a/packages/tracker-core/src/torrent/repository/persisted.rs b/packages/tracker-core/src/torrent/repository/downloads.rs similarity index 100% rename from packages/tracker-core/src/torrent/repository/persisted.rs rename to packages/tracker-core/src/torrent/repository/downloads.rs diff --git a/packages/tracker-core/src/torrent/repository/mod.rs b/packages/tracker-core/src/torrent/repository/mod.rs index ae789e5e9..fd0382025 100644 --- a/packages/tracker-core/src/torrent/repository/mod.rs +++ b/packages/tracker-core/src/torrent/repository/mod.rs @@ -1,3 +1,3 @@ //! Torrent repository implementations. pub mod in_memory; -pub mod persisted; +pub mod downloads; diff --git a/packages/udp-tracker-server/src/handlers/announce.rs b/packages/udp-tracker-server/src/handlers/announce.rs index 38e136a12..555d047d0 100644 --- a/packages/udp-tracker-server/src/handlers/announce.rs +++ b/packages/udp-tracker-server/src/handlers/announce.rs @@ -836,7 +836,7 @@ mod tests { use bittorrent_tracker_core::announce_handler::AnnounceHandler; use bittorrent_tracker_core::databases::setup::initialize_database; use bittorrent_tracker_core::torrent::repository::in_memory::InMemoryTorrentRepository; - use bittorrent_tracker_core::torrent::repository::persisted::DatabaseDownloadsMetricRepository; + use bittorrent_tracker_core::torrent::repository::downloads::DatabaseDownloadsMetricRepository; use bittorrent_tracker_core::whitelist::authorization::WhitelistAuthorization; use bittorrent_tracker_core::whitelist::repository::in_memory::InMemoryWhitelist; use bittorrent_udp_tracker_core::connection_cookie::{gen_remote_fingerprint, make}; diff --git a/packages/udp-tracker-server/src/handlers/mod.rs b/packages/udp-tracker-server/src/handlers/mod.rs index 9bbebd56e..3957f63c3 100644 --- a/packages/udp-tracker-server/src/handlers/mod.rs +++ b/packages/udp-tracker-server/src/handlers/mod.rs @@ -212,7 +212,7 @@ pub(crate) mod tests { use bittorrent_tracker_core::databases::setup::initialize_database; use bittorrent_tracker_core::scrape_handler::ScrapeHandler; use bittorrent_tracker_core::torrent::repository::in_memory::InMemoryTorrentRepository; - use bittorrent_tracker_core::torrent::repository::persisted::DatabaseDownloadsMetricRepository; + use bittorrent_tracker_core::torrent::repository::downloads::DatabaseDownloadsMetricRepository; use bittorrent_tracker_core::whitelist; use bittorrent_tracker_core::whitelist::authorization::WhitelistAuthorization; use bittorrent_tracker_core::whitelist::repository::in_memory::InMemoryWhitelist; From 7e27d31bcfff7b5653adc6df99e9b87caf8eed59 Mon Sep 17 00:00:00 2001 From: Jose Celano Date: Tue, 27 May 2025 14:52:30 +0100 Subject: [PATCH 646/802] refactor: [#1541] move mod --- packages/axum-http-tracker-server/src/v1/handlers/announce.rs | 2 +- packages/http-tracker-core/benches/helpers/util.rs | 2 +- packages/http-tracker-core/src/services/announce.rs | 2 +- packages/http-tracker-core/src/services/scrape.rs | 2 +- packages/tracker-core/src/announce_handler.rs | 2 +- packages/tracker-core/src/container.rs | 2 +- packages/tracker-core/src/statistics/event/handler.rs | 2 +- packages/tracker-core/src/statistics/event/listener.rs | 2 +- .../{torrent/repository => statistics/persisted}/downloads.rs | 0 packages/tracker-core/src/statistics/persisted/mod.rs | 4 +++- packages/tracker-core/src/test_helpers.rs | 2 +- packages/tracker-core/src/torrent/manager.rs | 2 +- packages/tracker-core/src/torrent/repository/mod.rs | 1 - packages/udp-tracker-server/src/handlers/announce.rs | 2 +- packages/udp-tracker-server/src/handlers/mod.rs | 2 +- 15 files changed, 15 insertions(+), 14 deletions(-) rename packages/tracker-core/src/{torrent/repository => statistics/persisted}/downloads.rs (100%) diff --git a/packages/axum-http-tracker-server/src/v1/handlers/announce.rs b/packages/axum-http-tracker-server/src/v1/handlers/announce.rs index 68e0825f4..16ff83f81 100644 --- a/packages/axum-http-tracker-server/src/v1/handlers/announce.rs +++ b/packages/axum-http-tracker-server/src/v1/handlers/announce.rs @@ -119,8 +119,8 @@ mod tests { use bittorrent_tracker_core::authentication::key::repository::in_memory::InMemoryKeyRepository; use bittorrent_tracker_core::authentication::service::AuthenticationService; use bittorrent_tracker_core::databases::setup::initialize_database; + use bittorrent_tracker_core::statistics::persisted::downloads::DatabaseDownloadsMetricRepository; use bittorrent_tracker_core::torrent::repository::in_memory::InMemoryTorrentRepository; - use bittorrent_tracker_core::torrent::repository::downloads::DatabaseDownloadsMetricRepository; use bittorrent_tracker_core::whitelist::authorization::WhitelistAuthorization; use bittorrent_tracker_core::whitelist::repository::in_memory::InMemoryWhitelist; use torrust_tracker_configuration::Configuration; diff --git a/packages/http-tracker-core/benches/helpers/util.rs b/packages/http-tracker-core/benches/helpers/util.rs index 2798203ae..414d3b40e 100644 --- a/packages/http-tracker-core/benches/helpers/util.rs +++ b/packages/http-tracker-core/benches/helpers/util.rs @@ -14,8 +14,8 @@ use bittorrent_tracker_core::announce_handler::AnnounceHandler; use bittorrent_tracker_core::authentication::key::repository::in_memory::InMemoryKeyRepository; use bittorrent_tracker_core::authentication::service::AuthenticationService; use bittorrent_tracker_core::databases::setup::initialize_database; +use bittorrent_tracker_core::statistics::persisted::downloads::DatabaseDownloadsMetricRepository; use bittorrent_tracker_core::torrent::repository::in_memory::InMemoryTorrentRepository; -use bittorrent_tracker_core::torrent::repository::downloads::DatabaseDownloadsMetricRepository; use bittorrent_tracker_core::whitelist::authorization::WhitelistAuthorization; use bittorrent_tracker_core::whitelist::repository::in_memory::InMemoryWhitelist; use futures::future::BoxFuture; diff --git a/packages/http-tracker-core/src/services/announce.rs b/packages/http-tracker-core/src/services/announce.rs index 7f3e553e4..23d589bce 100644 --- a/packages/http-tracker-core/src/services/announce.rs +++ b/packages/http-tracker-core/src/services/announce.rs @@ -212,8 +212,8 @@ mod tests { use bittorrent_tracker_core::authentication::key::repository::in_memory::InMemoryKeyRepository; use bittorrent_tracker_core::authentication::service::AuthenticationService; use bittorrent_tracker_core::databases::setup::initialize_database; + use bittorrent_tracker_core::statistics::persisted::downloads::DatabaseDownloadsMetricRepository; use bittorrent_tracker_core::torrent::repository::in_memory::InMemoryTorrentRepository; - use bittorrent_tracker_core::torrent::repository::downloads::DatabaseDownloadsMetricRepository; use bittorrent_tracker_core::whitelist::authorization::WhitelistAuthorization; use bittorrent_tracker_core::whitelist::repository::in_memory::InMemoryWhitelist; use torrust_tracker_configuration::{Configuration, Core}; diff --git a/packages/http-tracker-core/src/services/scrape.rs b/packages/http-tracker-core/src/services/scrape.rs index f10f00732..1445ffcfe 100644 --- a/packages/http-tracker-core/src/services/scrape.rs +++ b/packages/http-tracker-core/src/services/scrape.rs @@ -176,8 +176,8 @@ mod tests { use bittorrent_tracker_core::authentication::service::AuthenticationService; use bittorrent_tracker_core::databases::setup::initialize_database; use bittorrent_tracker_core::scrape_handler::ScrapeHandler; + use bittorrent_tracker_core::statistics::persisted::downloads::DatabaseDownloadsMetricRepository; use bittorrent_tracker_core::torrent::repository::in_memory::InMemoryTorrentRepository; - use bittorrent_tracker_core::torrent::repository::downloads::DatabaseDownloadsMetricRepository; use bittorrent_tracker_core::whitelist::authorization::WhitelistAuthorization; use bittorrent_tracker_core::whitelist::repository::in_memory::InMemoryWhitelist; use futures::future::BoxFuture; diff --git a/packages/tracker-core/src/announce_handler.rs b/packages/tracker-core/src/announce_handler.rs index 9a1c92efa..501993ad5 100644 --- a/packages/tracker-core/src/announce_handler.rs +++ b/packages/tracker-core/src/announce_handler.rs @@ -99,8 +99,8 @@ use torrust_tracker_primitives::core::AnnounceData; use torrust_tracker_primitives::peer; use super::torrent::repository::in_memory::InMemoryTorrentRepository; -use super::torrent::repository::downloads::DatabaseDownloadsMetricRepository; use crate::error::AnnounceError; +use crate::statistics::persisted::downloads::DatabaseDownloadsMetricRepository; use crate::whitelist::authorization::WhitelistAuthorization; /// Handles `announce` requests from `BitTorrent` clients. diff --git a/packages/tracker-core/src/container.rs b/packages/tracker-core/src/container.rs index b2bcdebb3..02af67118 100644 --- a/packages/tracker-core/src/container.rs +++ b/packages/tracker-core/src/container.rs @@ -11,9 +11,9 @@ use crate::authentication::service::AuthenticationService; use crate::databases::setup::initialize_database; use crate::databases::Database; use crate::scrape_handler::ScrapeHandler; +use crate::statistics::persisted::downloads::DatabaseDownloadsMetricRepository; use crate::torrent::manager::TorrentsManager; use crate::torrent::repository::in_memory::InMemoryTorrentRepository; -use crate::torrent::repository::downloads::DatabaseDownloadsMetricRepository; use crate::whitelist::authorization::WhitelistAuthorization; use crate::whitelist::manager::WhitelistManager; use crate::whitelist::repository::in_memory::InMemoryWhitelist; diff --git a/packages/tracker-core/src/statistics/event/handler.rs b/packages/tracker-core/src/statistics/event/handler.rs index 028f32030..0001b43ce 100644 --- a/packages/tracker-core/src/statistics/event/handler.rs +++ b/packages/tracker-core/src/statistics/event/handler.rs @@ -5,9 +5,9 @@ use torrust_tracker_metrics::metric_name; use torrust_tracker_primitives::DurationSinceUnixEpoch; use torrust_tracker_torrent_repository::event::Event; +use crate::statistics::persisted::downloads::DatabaseDownloadsMetricRepository; use crate::statistics::repository::Repository; use crate::statistics::TRACKER_CORE_PERSISTENT_TORRENTS_DOWNLOADS_TOTAL; -use crate::torrent::repository::downloads::DatabaseDownloadsMetricRepository; pub async fn handle_event( event: Event, diff --git a/packages/tracker-core/src/statistics/event/listener.rs b/packages/tracker-core/src/statistics/event/listener.rs index 23b6e648a..2702aa858 100644 --- a/packages/tracker-core/src/statistics/event/listener.rs +++ b/packages/tracker-core/src/statistics/event/listener.rs @@ -6,8 +6,8 @@ use torrust_tracker_events::receiver::RecvError; use torrust_tracker_torrent_repository::event::receiver::Receiver; use super::handler::handle_event; +use crate::statistics::persisted::downloads::DatabaseDownloadsMetricRepository; use crate::statistics::repository::Repository; -use crate::torrent::repository::downloads::DatabaseDownloadsMetricRepository; use crate::{CurrentClock, TRACKER_CORE_LOG_TARGET}; #[must_use] diff --git a/packages/tracker-core/src/torrent/repository/downloads.rs b/packages/tracker-core/src/statistics/persisted/downloads.rs similarity index 100% rename from packages/tracker-core/src/torrent/repository/downloads.rs rename to packages/tracker-core/src/statistics/persisted/downloads.rs diff --git a/packages/tracker-core/src/statistics/persisted/mod.rs b/packages/tracker-core/src/statistics/persisted/mod.rs index 4475f9647..f675b4ebc 100644 --- a/packages/tracker-core/src/statistics/persisted/mod.rs +++ b/packages/tracker-core/src/statistics/persisted/mod.rs @@ -1,3 +1,5 @@ +pub mod downloads; + use std::sync::Arc; use thiserror::Error; @@ -8,7 +10,7 @@ use torrust_tracker_primitives::DurationSinceUnixEpoch; use super::repository::Repository; use super::TRACKER_CORE_PERSISTENT_TORRENTS_DOWNLOADS_TOTAL; use crate::databases; -use crate::torrent::repository::downloads::DatabaseDownloadsMetricRepository; +use crate::statistics::persisted::downloads::DatabaseDownloadsMetricRepository; /// Loads persisted metrics from the database and sets them in the stats repository. /// diff --git a/packages/tracker-core/src/test_helpers.rs b/packages/tracker-core/src/test_helpers.rs index c10d3dd3e..62649cd22 100644 --- a/packages/tracker-core/src/test_helpers.rs +++ b/packages/tracker-core/src/test_helpers.rs @@ -19,8 +19,8 @@ pub(crate) mod tests { use crate::announce_handler::AnnounceHandler; use crate::databases::setup::initialize_database; use crate::scrape_handler::ScrapeHandler; + use crate::statistics::persisted::downloads::DatabaseDownloadsMetricRepository; use crate::torrent::repository::in_memory::InMemoryTorrentRepository; - use crate::torrent::repository::downloads::DatabaseDownloadsMetricRepository; use crate::whitelist::repository::in_memory::InMemoryWhitelist; use crate::whitelist::{self}; diff --git a/packages/tracker-core/src/torrent/manager.rs b/packages/tracker-core/src/torrent/manager.rs index f86e9442e..b7c6d5117 100644 --- a/packages/tracker-core/src/torrent/manager.rs +++ b/packages/tracker-core/src/torrent/manager.rs @@ -7,7 +7,7 @@ use torrust_tracker_configuration::Core; use torrust_tracker_primitives::DurationSinceUnixEpoch; use super::repository::in_memory::InMemoryTorrentRepository; -use super::repository::downloads::DatabaseDownloadsMetricRepository; +use crate::statistics::persisted::downloads::DatabaseDownloadsMetricRepository; use crate::{databases, CurrentClock}; /// The `TorrentsManager` is responsible for managing torrent entries by diff --git a/packages/tracker-core/src/torrent/repository/mod.rs b/packages/tracker-core/src/torrent/repository/mod.rs index fd0382025..d8325dec5 100644 --- a/packages/tracker-core/src/torrent/repository/mod.rs +++ b/packages/tracker-core/src/torrent/repository/mod.rs @@ -1,3 +1,2 @@ //! Torrent repository implementations. pub mod in_memory; -pub mod downloads; diff --git a/packages/udp-tracker-server/src/handlers/announce.rs b/packages/udp-tracker-server/src/handlers/announce.rs index 555d047d0..2fc3f6e63 100644 --- a/packages/udp-tracker-server/src/handlers/announce.rs +++ b/packages/udp-tracker-server/src/handlers/announce.rs @@ -835,8 +835,8 @@ mod tests { use aquatic_udp_protocol::{InfoHash as AquaticInfoHash, PeerId as AquaticPeerId}; use bittorrent_tracker_core::announce_handler::AnnounceHandler; use bittorrent_tracker_core::databases::setup::initialize_database; + use bittorrent_tracker_core::statistics::persisted::downloads::DatabaseDownloadsMetricRepository; use bittorrent_tracker_core::torrent::repository::in_memory::InMemoryTorrentRepository; - use bittorrent_tracker_core::torrent::repository::downloads::DatabaseDownloadsMetricRepository; use bittorrent_tracker_core::whitelist::authorization::WhitelistAuthorization; use bittorrent_tracker_core::whitelist::repository::in_memory::InMemoryWhitelist; use bittorrent_udp_tracker_core::connection_cookie::{gen_remote_fingerprint, make}; diff --git a/packages/udp-tracker-server/src/handlers/mod.rs b/packages/udp-tracker-server/src/handlers/mod.rs index 3957f63c3..df550ab72 100644 --- a/packages/udp-tracker-server/src/handlers/mod.rs +++ b/packages/udp-tracker-server/src/handlers/mod.rs @@ -211,8 +211,8 @@ pub(crate) mod tests { use bittorrent_tracker_core::announce_handler::AnnounceHandler; use bittorrent_tracker_core::databases::setup::initialize_database; use bittorrent_tracker_core::scrape_handler::ScrapeHandler; + use bittorrent_tracker_core::statistics::persisted::downloads::DatabaseDownloadsMetricRepository; use bittorrent_tracker_core::torrent::repository::in_memory::InMemoryTorrentRepository; - use bittorrent_tracker_core::torrent::repository::downloads::DatabaseDownloadsMetricRepository; use bittorrent_tracker_core::whitelist; use bittorrent_tracker_core::whitelist::authorization::WhitelistAuthorization; use bittorrent_tracker_core::whitelist::repository::in_memory::InMemoryWhitelist; From 0508a6a11e6050715a712005384c65659bfecf4e Mon Sep 17 00:00:00 2001 From: Jose Celano Date: Tue, 27 May 2025 15:04:51 +0100 Subject: [PATCH 647/802] refactor: [#1541] rename methods --- packages/tracker-core/src/announce_handler.rs | 2 +- .../tracker-core/src/databases/driver/mod.rs | 28 +++++------ .../src/databases/driver/mysql.rs | 14 +++--- .../src/databases/driver/sqlite.rs | 14 +++--- packages/tracker-core/src/databases/mod.rs | 14 +++--- .../src/statistics/event/handler.rs | 4 +- .../src/statistics/persisted/downloads.rs | 46 +++++++++---------- .../src/statistics/persisted/mod.rs | 2 +- packages/tracker-core/src/torrent/manager.rs | 12 ++--- 9 files changed, 68 insertions(+), 68 deletions(-) diff --git a/packages/tracker-core/src/announce_handler.rs b/packages/tracker-core/src/announce_handler.rs index 501993ad5..a6614361a 100644 --- a/packages/tracker-core/src/announce_handler.rs +++ b/packages/tracker-core/src/announce_handler.rs @@ -169,7 +169,7 @@ impl AnnounceHandler { // downloads across all torrents. The in-memory metric will count only // the number of downloads during the current tracker uptime. let opt_persistent_torrent = if self.config.tracker_policy.persistent_torrent_completed_stat { - self.db_downloads_metric_repository.load(info_hash)? + self.db_downloads_metric_repository.load_torrent_downloads(info_hash)? } else { None }; diff --git a/packages/tracker-core/src/databases/driver/mod.rs b/packages/tracker-core/src/databases/driver/mod.rs index e8f0ecbfb..6c849bb70 100644 --- a/packages/tracker-core/src/databases/driver/mod.rs +++ b/packages/tracker-core/src/databases/driver/mod.rs @@ -169,9 +169,9 @@ pub(crate) mod tests { let number_of_downloads = 1; - driver.save_persistent_torrent(&infohash, number_of_downloads).unwrap(); + driver.save_torrent_downloads(&infohash, number_of_downloads).unwrap(); - let number_of_downloads = driver.load_persistent_torrent(&infohash).unwrap().unwrap(); + let number_of_downloads = driver.load_torrent_downloads(&infohash).unwrap().unwrap(); assert_eq!(number_of_downloads, 1); } @@ -181,9 +181,9 @@ pub(crate) mod tests { let number_of_downloads = 1; - driver.save_persistent_torrent(&infohash, number_of_downloads).unwrap(); + driver.save_torrent_downloads(&infohash, number_of_downloads).unwrap(); - let torrents = driver.load_persistent_torrents().unwrap(); + let torrents = driver.load_all_torrents_downloads().unwrap(); assert_eq!(torrents.len(), 1); assert_eq!(torrents.get(&infohash), Some(number_of_downloads).as_ref()); @@ -194,11 +194,11 @@ pub(crate) mod tests { let number_of_downloads = 1; - driver.save_persistent_torrent(&infohash, number_of_downloads).unwrap(); + driver.save_torrent_downloads(&infohash, number_of_downloads).unwrap(); - driver.increase_number_of_downloads(&infohash).unwrap(); + driver.increase_downloads_for_torrent(&infohash).unwrap(); - let number_of_downloads = driver.load_persistent_torrent(&infohash).unwrap().unwrap(); + let number_of_downloads = driver.load_torrent_downloads(&infohash).unwrap().unwrap(); assert_eq!(number_of_downloads, 2); } @@ -208,9 +208,9 @@ pub(crate) mod tests { pub fn it_should_save_and_load_the_global_number_of_downloads(driver: &Arc>) { let number_of_downloads = 1; - driver.save_global_number_of_downloads(number_of_downloads).unwrap(); + driver.save_global_downloads(number_of_downloads).unwrap(); - let number_of_downloads = driver.load_global_number_of_downloads().unwrap().unwrap(); + let number_of_downloads = driver.load_global_downloads().unwrap().unwrap(); assert_eq!(number_of_downloads, 1); } @@ -218,9 +218,9 @@ pub(crate) mod tests { pub fn it_should_load_the_global_number_of_downloads(driver: &Arc>) { let number_of_downloads = 1; - driver.save_global_number_of_downloads(number_of_downloads).unwrap(); + driver.save_global_downloads(number_of_downloads).unwrap(); - let number_of_downloads = driver.load_global_number_of_downloads().unwrap().unwrap(); + let number_of_downloads = driver.load_global_downloads().unwrap().unwrap(); assert_eq!(number_of_downloads, 1); } @@ -228,11 +228,11 @@ pub(crate) mod tests { pub fn it_should_increase_the_global_number_of_downloads(driver: &Arc>) { let number_of_downloads = 1; - driver.save_global_number_of_downloads(number_of_downloads).unwrap(); + driver.save_global_downloads(number_of_downloads).unwrap(); - driver.increase_global_number_of_downloads().unwrap(); + driver.increase_global_downloads().unwrap(); - let number_of_downloads = driver.load_global_number_of_downloads().unwrap().unwrap(); + let number_of_downloads = driver.load_global_downloads().unwrap().unwrap(); assert_eq!(number_of_downloads, 2); } diff --git a/packages/tracker-core/src/databases/driver/mysql.rs b/packages/tracker-core/src/databases/driver/mysql.rs index bfbc47ebd..ce76ce563 100644 --- a/packages/tracker-core/src/databases/driver/mysql.rs +++ b/packages/tracker-core/src/databases/driver/mysql.rs @@ -146,7 +146,7 @@ impl Database for Mysql { } /// Refer to [`databases::Database::load_persistent_torrents`](crate::core::databases::Database::load_persistent_torrents). - fn load_persistent_torrents(&self) -> Result { + fn load_all_torrents_downloads(&self) -> Result { let mut conn = self.pool.get().map_err(|e| (e, DRIVER))?; let torrents = conn.query_map( @@ -161,7 +161,7 @@ impl Database for Mysql { } /// Refer to [`databases::Database::load_persistent_torrent`](crate::core::databases::Database::load_persistent_torrent). - fn load_persistent_torrent(&self, info_hash: &InfoHash) -> Result, Error> { + fn load_torrent_downloads(&self, info_hash: &InfoHash) -> Result, Error> { let mut conn = self.pool.get().map_err(|e| (e, DRIVER))?; let query = conn.exec_first::( @@ -175,7 +175,7 @@ impl Database for Mysql { } /// Refer to [`databases::Database::save_persistent_torrent`](crate::core::databases::Database::save_persistent_torrent). - fn save_persistent_torrent(&self, info_hash: &InfoHash, completed: u32) -> Result<(), Error> { + fn save_torrent_downloads(&self, info_hash: &InfoHash, completed: u32) -> Result<(), Error> { const COMMAND : &str = "INSERT INTO torrents (info_hash, completed) VALUES (:info_hash_str, :completed) ON DUPLICATE KEY UPDATE completed = VALUES(completed)"; let mut conn = self.pool.get().map_err(|e| (e, DRIVER))?; @@ -186,7 +186,7 @@ impl Database for Mysql { } /// Refer to [`databases::Database::increase_number_of_downloads`](crate::core::databases::Database::increase_number_of_downloads). - fn increase_number_of_downloads(&self, info_hash: &InfoHash) -> Result<(), Error> { + fn increase_downloads_for_torrent(&self, info_hash: &InfoHash) -> Result<(), Error> { let mut conn = self.pool.get().map_err(|e| (e, DRIVER))?; let info_hash_str = info_hash.to_string(); @@ -200,17 +200,17 @@ impl Database for Mysql { } /// Refer to [`databases::Database::load_global_number_of_downloads`](crate::core::databases::Database::load_global_number_of_downloads). - fn load_global_number_of_downloads(&self) -> Result, Error> { + fn load_global_downloads(&self) -> Result, Error> { self.load_torrent_aggregate_metric(TORRENTS_DOWNLOADS_TOTAL) } /// Refer to [`databases::Database::save_global_number_of_downloads`](crate::core::databases::Database::save_global_number_of_downloads). - fn save_global_number_of_downloads(&self, downloaded: PersistentTorrent) -> Result<(), Error> { + fn save_global_downloads(&self, downloaded: PersistentTorrent) -> Result<(), Error> { self.save_torrent_aggregate_metric(TORRENTS_DOWNLOADS_TOTAL, downloaded) } /// Refer to [`databases::Database::increase_global_number_of_downloads`](crate::core::databases::Database::increase_global_number_of_downloads). - fn increase_global_number_of_downloads(&self) -> Result<(), Error> { + fn increase_global_downloads(&self) -> Result<(), Error> { let mut conn = self.pool.get().map_err(|e| (e, DRIVER))?; let metric_name = TORRENTS_DOWNLOADS_TOTAL; diff --git a/packages/tracker-core/src/databases/driver/sqlite.rs b/packages/tracker-core/src/databases/driver/sqlite.rs index 91e969233..794f65a4c 100644 --- a/packages/tracker-core/src/databases/driver/sqlite.rs +++ b/packages/tracker-core/src/databases/driver/sqlite.rs @@ -152,7 +152,7 @@ impl Database for Sqlite { } /// Refer to [`databases::Database::load_persistent_torrents`](crate::core::databases::Database::load_persistent_torrents). - fn load_persistent_torrents(&self) -> Result { + fn load_all_torrents_downloads(&self) -> Result { let conn = self.pool.get().map_err(|e| (e, DRIVER))?; let mut stmt = conn.prepare("SELECT info_hash, completed FROM torrents")?; @@ -168,7 +168,7 @@ impl Database for Sqlite { } /// Refer to [`databases::Database::load_persistent_torrent`](crate::core::databases::Database::load_persistent_torrent). - fn load_persistent_torrent(&self, info_hash: &InfoHash) -> Result, Error> { + fn load_torrent_downloads(&self, info_hash: &InfoHash) -> Result, Error> { let conn = self.pool.get().map_err(|e| (e, DRIVER))?; let mut stmt = conn.prepare("SELECT completed FROM torrents WHERE info_hash = ?")?; @@ -184,7 +184,7 @@ impl Database for Sqlite { } /// Refer to [`databases::Database::save_persistent_torrent`](crate::core::databases::Database::save_persistent_torrent). - fn save_persistent_torrent(&self, info_hash: &InfoHash, completed: u32) -> Result<(), Error> { + fn save_torrent_downloads(&self, info_hash: &InfoHash, completed: u32) -> Result<(), Error> { let conn = self.pool.get().map_err(|e| (e, DRIVER))?; let insert = conn.execute( @@ -203,7 +203,7 @@ impl Database for Sqlite { } /// Refer to [`databases::Database::increase_number_of_downloads`](crate::core::databases::Database::increase_number_of_downloads). - fn increase_number_of_downloads(&self, info_hash: &InfoHash) -> Result<(), Error> { + fn increase_downloads_for_torrent(&self, info_hash: &InfoHash) -> Result<(), Error> { let conn = self.pool.get().map_err(|e| (e, DRIVER))?; let _ = conn.execute( @@ -215,17 +215,17 @@ impl Database for Sqlite { } /// Refer to [`databases::Database::load_global_number_of_downloads`](crate::core::databases::Database::load_global_number_of_downloads). - fn load_global_number_of_downloads(&self) -> Result, Error> { + fn load_global_downloads(&self) -> Result, Error> { self.load_torrent_aggregate_metric(TORRENTS_DOWNLOADS_TOTAL) } /// Refer to [`databases::Database::save_global_number_of_downloads`](crate::core::databases::Database::save_global_number_of_downloads). - fn save_global_number_of_downloads(&self, downloaded: PersistentTorrent) -> Result<(), Error> { + fn save_global_downloads(&self, downloaded: PersistentTorrent) -> Result<(), Error> { self.save_torrent_aggregate_metric(TORRENTS_DOWNLOADS_TOTAL, downloaded) } /// Refer to [`databases::Database::increase_global_number_of_downloads`](crate::core::databases::Database::increase_global_number_of_downloads). - fn increase_global_number_of_downloads(&self) -> Result<(), Error> { + fn increase_global_downloads(&self) -> Result<(), Error> { let conn = self.pool.get().map_err(|e| (e, DRIVER))?; let metric_name = TORRENTS_DOWNLOADS_TOTAL; diff --git a/packages/tracker-core/src/databases/mod.rs b/packages/tracker-core/src/databases/mod.rs index a9d6b2a22..b637219ad 100644 --- a/packages/tracker-core/src/databases/mod.rs +++ b/packages/tracker-core/src/databases/mod.rs @@ -101,7 +101,7 @@ pub trait Database: Sync + Send { /// # Errors /// /// Returns an [`Error`] if the metrics cannot be loaded. - fn load_persistent_torrents(&self) -> Result; + fn load_all_torrents_downloads(&self) -> Result; /// Loads torrent metrics data from the database for one torrent. /// @@ -110,7 +110,7 @@ pub trait Database: Sync + Send { /// # Errors /// /// Returns an [`Error`] if the metrics cannot be loaded. - fn load_persistent_torrent(&self, info_hash: &InfoHash) -> Result, Error>; + fn load_torrent_downloads(&self, info_hash: &InfoHash) -> Result, Error>; /// Saves torrent metrics data into the database. /// @@ -124,7 +124,7 @@ pub trait Database: Sync + Send { /// # Errors /// /// Returns an [`Error`] if the metrics cannot be saved. - fn save_persistent_torrent(&self, info_hash: &InfoHash, downloaded: u32) -> Result<(), Error>; + fn save_torrent_downloads(&self, info_hash: &InfoHash, downloaded: u32) -> Result<(), Error>; /// Increases the number of downloads for a given torrent. /// @@ -140,7 +140,7 @@ pub trait Database: Sync + Send { /// # Errors /// /// Returns an [`Error`] if the query failed. - fn increase_number_of_downloads(&self, info_hash: &InfoHash) -> Result<(), Error>; + fn increase_downloads_for_torrent(&self, info_hash: &InfoHash) -> Result<(), Error>; /// Loads the total number of downloads for all torrents from the database. /// @@ -149,7 +149,7 @@ pub trait Database: Sync + Send { /// # Errors /// /// Returns an [`Error`] if the total downloads cannot be loaded. - fn load_global_number_of_downloads(&self) -> Result, Error>; + fn load_global_downloads(&self) -> Result, Error>; /// Saves the total number of downloads for all torrents into the database. /// @@ -163,7 +163,7 @@ pub trait Database: Sync + Send { /// # Errors /// /// Returns an [`Error`] if the total downloads cannot be saved. - fn save_global_number_of_downloads(&self, downloaded: PersistentTorrent) -> Result<(), Error>; + fn save_global_downloads(&self, downloaded: PersistentTorrent) -> Result<(), Error>; /// Increases the total number of downloads for all torrents. /// @@ -172,7 +172,7 @@ pub trait Database: Sync + Send { /// # Errors /// /// Returns an [`Error`] if the query failed. - fn increase_global_number_of_downloads(&self) -> Result<(), Error>; + fn increase_global_downloads(&self) -> Result<(), Error>; // Whitelist diff --git a/packages/tracker-core/src/statistics/event/handler.rs b/packages/tracker-core/src/statistics/event/handler.rs index 0001b43ce..0909dc184 100644 --- a/packages/tracker-core/src/statistics/event/handler.rs +++ b/packages/tracker-core/src/statistics/event/handler.rs @@ -53,7 +53,7 @@ pub async fn handle_event( if persistent_torrent_completed_stat { // Increment the number of downloads for the torrent in the database - match db_downloads_metric_repository.increase_number_of_downloads(&info_hash) { + match db_downloads_metric_repository.increase_downloads_for_torrent(&info_hash) { Ok(()) => { tracing::debug!(info_hash = ?info_hash, "Number of torrent downloads increased"); } @@ -63,7 +63,7 @@ pub async fn handle_event( } // Increment the global number of downloads (for all torrents) in the database - match db_downloads_metric_repository.increase_global_number_of_downloads() { + match db_downloads_metric_repository.increase_global_downloads() { Ok(()) => { tracing::debug!("Global number of downloads increased"); } diff --git a/packages/tracker-core/src/statistics/persisted/downloads.rs b/packages/tracker-core/src/statistics/persisted/downloads.rs index d6c6ce263..7edaf73d8 100644 --- a/packages/tracker-core/src/statistics/persisted/downloads.rs +++ b/packages/tracker-core/src/statistics/persisted/downloads.rs @@ -60,12 +60,12 @@ impl DatabaseDownloadsMetricRepository { /// # Errors /// /// Returns an [`Error`] if the database operation fails. - pub(crate) fn increase_number_of_downloads(&self, info_hash: &InfoHash) -> Result<(), Error> { - let torrent = self.load(info_hash)?; + pub(crate) fn increase_downloads_for_torrent(&self, info_hash: &InfoHash) -> Result<(), Error> { + let torrent = self.load_torrent_downloads(info_hash)?; match torrent { - Some(_number_of_downloads) => self.database.increase_number_of_downloads(info_hash), - None => self.save(info_hash, 1), + Some(_number_of_downloads) => self.database.increase_downloads_for_torrent(info_hash), + None => self.save_torrent_downloads(info_hash, 1), } } @@ -77,8 +77,8 @@ impl DatabaseDownloadsMetricRepository { /// # Errors /// /// Returns an [`Error`] if the underlying database query fails. - pub(crate) fn load_all(&self) -> Result { - self.database.load_persistent_torrents() + pub(crate) fn load_all_torrents_downloads(&self) -> Result { + self.database.load_all_torrents_downloads() } /// Loads one persistent torrent metrics from the database. @@ -89,8 +89,8 @@ impl DatabaseDownloadsMetricRepository { /// # Errors /// /// Returns an [`Error`] if the underlying database query fails. - pub(crate) fn load(&self, info_hash: &InfoHash) -> Result, Error> { - self.database.load_persistent_torrent(info_hash) + pub(crate) fn load_torrent_downloads(&self, info_hash: &InfoHash) -> Result, Error> { + self.database.load_torrent_downloads(info_hash) } /// Saves the persistent torrent metric into the database. @@ -106,8 +106,8 @@ impl DatabaseDownloadsMetricRepository { /// # Errors /// /// Returns an [`Error`] if the database operation fails. - pub(crate) fn save(&self, info_hash: &InfoHash, downloaded: u32) -> Result<(), Error> { - self.database.save_persistent_torrent(info_hash, downloaded) + pub(crate) fn save_torrent_downloads(&self, info_hash: &InfoHash, downloaded: u32) -> Result<(), Error> { + self.database.save_torrent_downloads(info_hash, downloaded) } // Aggregate Metrics @@ -119,12 +119,12 @@ impl DatabaseDownloadsMetricRepository { /// # Errors /// /// Returns an [`Error`] if the database operation fails. - pub(crate) fn increase_global_number_of_downloads(&self) -> Result<(), Error> { - let torrent = self.database.load_global_number_of_downloads()?; + pub(crate) fn increase_global_downloads(&self) -> Result<(), Error> { + let torrent = self.database.load_global_downloads()?; match torrent { - Some(_number_of_downloads) => self.database.increase_global_number_of_downloads(), - None => self.database.save_global_number_of_downloads(1), + Some(_number_of_downloads) => self.database.increase_global_downloads(), + None => self.database.save_global_downloads(1), } } @@ -133,8 +133,8 @@ impl DatabaseDownloadsMetricRepository { /// # Errors /// /// Returns an [`Error`] if the underlying database query fails. - pub(crate) fn load_global_number_of_downloads(&self) -> Result, Error> { - self.database.load_global_number_of_downloads() + pub(crate) fn load_global_downloads(&self) -> Result, Error> { + self.database.load_global_downloads() } } @@ -159,9 +159,9 @@ mod tests { let infohash = sample_info_hash(); - repository.save(&infohash, 1).unwrap(); + repository.save_torrent_downloads(&infohash, 1).unwrap(); - let torrents = repository.load_all().unwrap(); + let torrents = repository.load_all_torrents_downloads().unwrap(); assert_eq!(torrents.get(&infohash), Some(1).as_ref()); } @@ -172,9 +172,9 @@ mod tests { let infohash = sample_info_hash(); - repository.increase_number_of_downloads(&infohash).unwrap(); + repository.increase_downloads_for_torrent(&infohash).unwrap(); - let torrents = repository.load_all().unwrap(); + let torrents = repository.load_all_torrents_downloads().unwrap(); assert_eq!(torrents.get(&infohash), Some(1).as_ref()); } @@ -186,10 +186,10 @@ mod tests { let infohash_one = sample_info_hash_one(); let infohash_two = sample_info_hash_two(); - repository.save(&infohash_one, 1).unwrap(); - repository.save(&infohash_two, 2).unwrap(); + repository.save_torrent_downloads(&infohash_one, 1).unwrap(); + repository.save_torrent_downloads(&infohash_two, 2).unwrap(); - let torrents = repository.load_all().unwrap(); + let torrents = repository.load_all_torrents_downloads().unwrap(); let mut expected_torrents = PersistentTorrents::new(); expected_torrents.insert(infohash_one, 1); diff --git a/packages/tracker-core/src/statistics/persisted/mod.rs b/packages/tracker-core/src/statistics/persisted/mod.rs index f675b4ebc..86c28370d 100644 --- a/packages/tracker-core/src/statistics/persisted/mod.rs +++ b/packages/tracker-core/src/statistics/persisted/mod.rs @@ -23,7 +23,7 @@ pub async fn load_persisted_metrics( db_downloads_metric_repository: &Arc, now: DurationSinceUnixEpoch, ) -> Result<(), Error> { - if let Some(downloads) = db_downloads_metric_repository.load_global_number_of_downloads()? { + if let Some(downloads) = db_downloads_metric_repository.load_global_downloads()? { stats_repository .set_counter( &metric_name!(TRACKER_CORE_PERSISTENT_TORRENTS_DOWNLOADS_TOTAL), diff --git a/packages/tracker-core/src/torrent/manager.rs b/packages/tracker-core/src/torrent/manager.rs index b7c6d5117..766fa5c4a 100644 --- a/packages/tracker-core/src/torrent/manager.rs +++ b/packages/tracker-core/src/torrent/manager.rs @@ -29,8 +29,7 @@ pub struct TorrentsManager { /// The in-memory torrents repository. in_memory_torrent_repository: Arc, - /// The persistent torrents repository. - #[allow(dead_code)] + /// The download metrics repository. db_downloads_metric_repository: Arc, } @@ -72,9 +71,7 @@ impl TorrentsManager { /// Returns a `databases::error::Error` if unable to load the persistent /// torrent data. pub fn load_torrents_from_database(&self) -> Result<(), databases::error::Error> { - let persistent_torrents = self.db_downloads_metric_repository.load_all()?; - - println!("Loaded {} persistent torrents from the database", persistent_torrents.len()); + let persistent_torrents = self.db_downloads_metric_repository.load_all_torrents_downloads()?; self.in_memory_torrent_repository.import_persistent(&persistent_torrents); @@ -197,7 +194,10 @@ mod tests { let infohash = sample_info_hash(); - services.database_persistent_torrent_repository.save(&infohash, 1).unwrap(); + services + .database_persistent_torrent_repository + .save_torrent_downloads(&infohash, 1) + .unwrap(); torrents_manager.load_torrents_from_database().unwrap(); From a5a80b5de923957eaee81c474110ea443b2cd5a6 Mon Sep 17 00:00:00 2001 From: Jose Celano Date: Tue, 27 May 2025 15:09:40 +0100 Subject: [PATCH 648/802] refactor: [#1541] rename type alias PersistentTorrent to NumberOfDownloads --- packages/primitives/src/lib.rs | 4 ++-- .../src/repository/dash_map_mutex_std.rs | 4 ++-- .../src/repository/mod.rs | 6 +++--- .../src/repository/rw_lock_std.rs | 4 ++-- .../src/repository/rw_lock_std_mutex_std.rs | 4 ++-- .../src/repository/rw_lock_std_mutex_tokio.rs | 4 ++-- .../src/repository/rw_lock_tokio.rs | 4 ++-- .../src/repository/rw_lock_tokio_mutex_std.rs | 4 ++-- .../src/repository/rw_lock_tokio_mutex_tokio.rs | 4 ++-- .../src/repository/skip_map_mutex_std.rs | 8 ++++---- .../tests/common/repo.rs | 4 ++-- packages/torrent-repository/src/swarms.rs | 4 ++-- packages/tracker-core/src/databases/driver/mysql.rs | 12 ++++++------ packages/tracker-core/src/databases/driver/sqlite.rs | 12 ++++++------ packages/tracker-core/src/databases/mod.rs | 8 ++++---- .../src/statistics/persisted/downloads.rs | 6 +++--- .../tracker-core/src/torrent/repository/in_memory.rs | 4 ++-- 17 files changed, 48 insertions(+), 48 deletions(-) diff --git a/packages/primitives/src/lib.rs b/packages/primitives/src/lib.rs index c901e5276..b04991eb8 100644 --- a/packages/primitives/src/lib.rs +++ b/packages/primitives/src/lib.rs @@ -18,5 +18,5 @@ use bittorrent_primitives::info_hash::InfoHash; /// Duration since the Unix Epoch. pub type DurationSinceUnixEpoch = Duration; -pub type PersistentTorrent = u32; -pub type PersistentTorrents = BTreeMap; +pub type NumberOfDownloads = u32; +pub type PersistentTorrents = BTreeMap; diff --git a/packages/torrent-repository-benchmarking/src/repository/dash_map_mutex_std.rs b/packages/torrent-repository-benchmarking/src/repository/dash_map_mutex_std.rs index d4a84caa0..c0ef455d4 100644 --- a/packages/torrent-repository-benchmarking/src/repository/dash_map_mutex_std.rs +++ b/packages/torrent-repository-benchmarking/src/repository/dash_map_mutex_std.rs @@ -5,7 +5,7 @@ use dashmap::DashMap; use torrust_tracker_configuration::TrackerPolicy; use torrust_tracker_primitives::pagination::Pagination; use torrust_tracker_primitives::swarm_metadata::{AggregateSwarmMetadata, SwarmMetadata}; -use torrust_tracker_primitives::{peer, DurationSinceUnixEpoch, PersistentTorrent, PersistentTorrents}; +use torrust_tracker_primitives::{peer, DurationSinceUnixEpoch, NumberOfDownloads, PersistentTorrents}; use super::Repository; use crate::entry::peer_list::PeerList; @@ -22,7 +22,7 @@ where EntryMutexStd: EntrySync, EntrySingle: Entry, { - fn upsert_peer(&self, info_hash: &InfoHash, peer: &peer::Peer, _opt_persistent_torrent: Option) -> bool { + fn upsert_peer(&self, info_hash: &InfoHash, peer: &peer::Peer, _opt_persistent_torrent: Option) -> bool { // todo: load persistent torrent data if provided if let Some(entry) = self.torrents.get(info_hash) { diff --git a/packages/torrent-repository-benchmarking/src/repository/mod.rs b/packages/torrent-repository-benchmarking/src/repository/mod.rs index 9284ff6e6..2ad7a3927 100644 --- a/packages/torrent-repository-benchmarking/src/repository/mod.rs +++ b/packages/torrent-repository-benchmarking/src/repository/mod.rs @@ -2,7 +2,7 @@ use bittorrent_primitives::info_hash::InfoHash; use torrust_tracker_configuration::TrackerPolicy; use torrust_tracker_primitives::pagination::Pagination; use torrust_tracker_primitives::swarm_metadata::{AggregateSwarmMetadata, SwarmMetadata}; -use torrust_tracker_primitives::{peer, DurationSinceUnixEpoch, PersistentTorrent, PersistentTorrents}; +use torrust_tracker_primitives::{peer, DurationSinceUnixEpoch, NumberOfDownloads, PersistentTorrents}; pub mod dash_map_mutex_std; pub mod rw_lock_std; @@ -23,7 +23,7 @@ pub trait Repository: Debug + Default + Sized + 'static { fn remove(&self, key: &InfoHash) -> Option; fn remove_inactive_peers(&self, current_cutoff: DurationSinceUnixEpoch); fn remove_peerless_torrents(&self, policy: &TrackerPolicy); - fn upsert_peer(&self, info_hash: &InfoHash, peer: &peer::Peer, opt_persistent_torrent: Option) -> bool; + fn upsert_peer(&self, info_hash: &InfoHash, peer: &peer::Peer, opt_persistent_torrent: Option) -> bool; fn get_swarm_metadata(&self, info_hash: &InfoHash) -> Option; } @@ -40,7 +40,7 @@ pub trait RepositoryAsync: Debug + Default + Sized + 'static { &self, info_hash: &InfoHash, peer: &peer::Peer, - opt_persistent_torrent: Option, + opt_persistent_torrent: Option, ) -> impl std::future::Future + Send; fn get_swarm_metadata(&self, info_hash: &InfoHash) -> impl std::future::Future> + Send; } diff --git a/packages/torrent-repository-benchmarking/src/repository/rw_lock_std.rs b/packages/torrent-repository-benchmarking/src/repository/rw_lock_std.rs index d190718af..c0e4d5cf5 100644 --- a/packages/torrent-repository-benchmarking/src/repository/rw_lock_std.rs +++ b/packages/torrent-repository-benchmarking/src/repository/rw_lock_std.rs @@ -2,7 +2,7 @@ use bittorrent_primitives::info_hash::InfoHash; use torrust_tracker_configuration::TrackerPolicy; use torrust_tracker_primitives::pagination::Pagination; use torrust_tracker_primitives::swarm_metadata::{AggregateSwarmMetadata, SwarmMetadata}; -use torrust_tracker_primitives::{peer, DurationSinceUnixEpoch, PersistentTorrent, PersistentTorrents}; +use torrust_tracker_primitives::{peer, DurationSinceUnixEpoch, NumberOfDownloads, PersistentTorrents}; use super::Repository; use crate::entry::peer_list::PeerList; @@ -45,7 +45,7 @@ impl Repository for TorrentsRwLockStd where EntrySingle: Entry, { - fn upsert_peer(&self, info_hash: &InfoHash, peer: &peer::Peer, _opt_persistent_torrent: Option) -> bool { + fn upsert_peer(&self, info_hash: &InfoHash, peer: &peer::Peer, _opt_persistent_torrent: Option) -> bool { // todo: load persistent torrent data if provided let mut db = self.get_torrents_mut(); diff --git a/packages/torrent-repository-benchmarking/src/repository/rw_lock_std_mutex_std.rs b/packages/torrent-repository-benchmarking/src/repository/rw_lock_std_mutex_std.rs index 1764b94e8..30aabc799 100644 --- a/packages/torrent-repository-benchmarking/src/repository/rw_lock_std_mutex_std.rs +++ b/packages/torrent-repository-benchmarking/src/repository/rw_lock_std_mutex_std.rs @@ -4,7 +4,7 @@ use bittorrent_primitives::info_hash::InfoHash; use torrust_tracker_configuration::TrackerPolicy; use torrust_tracker_primitives::pagination::Pagination; use torrust_tracker_primitives::swarm_metadata::{AggregateSwarmMetadata, SwarmMetadata}; -use torrust_tracker_primitives::{peer, DurationSinceUnixEpoch, PersistentTorrent, PersistentTorrents}; +use torrust_tracker_primitives::{peer, DurationSinceUnixEpoch, NumberOfDownloads, PersistentTorrents}; use super::Repository; use crate::entry::peer_list::PeerList; @@ -32,7 +32,7 @@ where EntryMutexStd: EntrySync, EntrySingle: Entry, { - fn upsert_peer(&self, info_hash: &InfoHash, peer: &peer::Peer, _opt_persistent_torrent: Option) -> bool { + fn upsert_peer(&self, info_hash: &InfoHash, peer: &peer::Peer, _opt_persistent_torrent: Option) -> bool { // todo: load persistent torrent data if provided let maybe_entry = self.get_torrents().get(info_hash).cloned(); diff --git a/packages/torrent-repository-benchmarking/src/repository/rw_lock_std_mutex_tokio.rs b/packages/torrent-repository-benchmarking/src/repository/rw_lock_std_mutex_tokio.rs index 116c1ff87..f56322654 100644 --- a/packages/torrent-repository-benchmarking/src/repository/rw_lock_std_mutex_tokio.rs +++ b/packages/torrent-repository-benchmarking/src/repository/rw_lock_std_mutex_tokio.rs @@ -8,7 +8,7 @@ use futures::{Future, FutureExt}; use torrust_tracker_configuration::TrackerPolicy; use torrust_tracker_primitives::pagination::Pagination; use torrust_tracker_primitives::swarm_metadata::{AggregateSwarmMetadata, SwarmMetadata}; -use torrust_tracker_primitives::{peer, DurationSinceUnixEpoch, PersistentTorrent, PersistentTorrents}; +use torrust_tracker_primitives::{peer, DurationSinceUnixEpoch, NumberOfDownloads, PersistentTorrents}; use super::RepositoryAsync; use crate::entry::peer_list::PeerList; @@ -40,7 +40,7 @@ where &self, info_hash: &InfoHash, peer: &peer::Peer, - _opt_persistent_torrent: Option, + _opt_persistent_torrent: Option, ) -> bool { // todo: load persistent torrent data if provided diff --git a/packages/torrent-repository-benchmarking/src/repository/rw_lock_tokio.rs b/packages/torrent-repository-benchmarking/src/repository/rw_lock_tokio.rs index 53838023d..091ff303d 100644 --- a/packages/torrent-repository-benchmarking/src/repository/rw_lock_tokio.rs +++ b/packages/torrent-repository-benchmarking/src/repository/rw_lock_tokio.rs @@ -2,7 +2,7 @@ use bittorrent_primitives::info_hash::InfoHash; use torrust_tracker_configuration::TrackerPolicy; use torrust_tracker_primitives::pagination::Pagination; use torrust_tracker_primitives::swarm_metadata::{AggregateSwarmMetadata, SwarmMetadata}; -use torrust_tracker_primitives::{peer, DurationSinceUnixEpoch, PersistentTorrent, PersistentTorrents}; +use torrust_tracker_primitives::{peer, DurationSinceUnixEpoch, NumberOfDownloads, PersistentTorrents}; use super::RepositoryAsync; use crate::entry::peer_list::PeerList; @@ -50,7 +50,7 @@ where &self, info_hash: &InfoHash, peer: &peer::Peer, - _opt_persistent_torrent: Option, + _opt_persistent_torrent: Option, ) -> bool { // todo: load persistent torrent data if provided diff --git a/packages/torrent-repository-benchmarking/src/repository/rw_lock_tokio_mutex_std.rs b/packages/torrent-repository-benchmarking/src/repository/rw_lock_tokio_mutex_std.rs index eb7e300fd..542ad7f0a 100644 --- a/packages/torrent-repository-benchmarking/src/repository/rw_lock_tokio_mutex_std.rs +++ b/packages/torrent-repository-benchmarking/src/repository/rw_lock_tokio_mutex_std.rs @@ -4,7 +4,7 @@ use bittorrent_primitives::info_hash::InfoHash; use torrust_tracker_configuration::TrackerPolicy; use torrust_tracker_primitives::pagination::Pagination; use torrust_tracker_primitives::swarm_metadata::{AggregateSwarmMetadata, SwarmMetadata}; -use torrust_tracker_primitives::{peer, DurationSinceUnixEpoch, PersistentTorrent, PersistentTorrents}; +use torrust_tracker_primitives::{peer, DurationSinceUnixEpoch, NumberOfDownloads, PersistentTorrents}; use super::RepositoryAsync; use crate::entry::peer_list::PeerList; @@ -38,7 +38,7 @@ where &self, info_hash: &InfoHash, peer: &peer::Peer, - _opt_persistent_torrent: Option, + _opt_persistent_torrent: Option, ) -> bool { // todo: load persistent torrent data if provided diff --git a/packages/torrent-repository-benchmarking/src/repository/rw_lock_tokio_mutex_tokio.rs b/packages/torrent-repository-benchmarking/src/repository/rw_lock_tokio_mutex_tokio.rs index c8ebaf4d6..2551972b3 100644 --- a/packages/torrent-repository-benchmarking/src/repository/rw_lock_tokio_mutex_tokio.rs +++ b/packages/torrent-repository-benchmarking/src/repository/rw_lock_tokio_mutex_tokio.rs @@ -4,7 +4,7 @@ use bittorrent_primitives::info_hash::InfoHash; use torrust_tracker_configuration::TrackerPolicy; use torrust_tracker_primitives::pagination::Pagination; use torrust_tracker_primitives::swarm_metadata::{AggregateSwarmMetadata, SwarmMetadata}; -use torrust_tracker_primitives::{peer, DurationSinceUnixEpoch, PersistentTorrent, PersistentTorrents}; +use torrust_tracker_primitives::{peer, DurationSinceUnixEpoch, NumberOfDownloads, PersistentTorrents}; use super::RepositoryAsync; use crate::entry::peer_list::PeerList; @@ -38,7 +38,7 @@ where &self, info_hash: &InfoHash, peer: &peer::Peer, - _opt_persistent_torrent: Option, + _opt_persistent_torrent: Option, ) -> bool { // todo: load persistent torrent data if provided diff --git a/packages/torrent-repository-benchmarking/src/repository/skip_map_mutex_std.rs b/packages/torrent-repository-benchmarking/src/repository/skip_map_mutex_std.rs index 8a15a9442..7d141facb 100644 --- a/packages/torrent-repository-benchmarking/src/repository/skip_map_mutex_std.rs +++ b/packages/torrent-repository-benchmarking/src/repository/skip_map_mutex_std.rs @@ -5,7 +5,7 @@ use crossbeam_skiplist::SkipMap; use torrust_tracker_configuration::TrackerPolicy; use torrust_tracker_primitives::pagination::Pagination; use torrust_tracker_primitives::swarm_metadata::{AggregateSwarmMetadata, SwarmMetadata}; -use torrust_tracker_primitives::{peer, DurationSinceUnixEpoch, PersistentTorrent, PersistentTorrents}; +use torrust_tracker_primitives::{peer, DurationSinceUnixEpoch, NumberOfDownloads, PersistentTorrents}; use super::Repository; use crate::entry::peer_list::PeerList; @@ -38,7 +38,7 @@ where /// /// Returns `true` if the number of downloads was increased because the peer /// completed the download. - fn upsert_peer(&self, info_hash: &InfoHash, peer: &peer::Peer, opt_persistent_torrent: Option) -> bool { + fn upsert_peer(&self, info_hash: &InfoHash, peer: &peer::Peer, opt_persistent_torrent: Option) -> bool { if let Some(existing_entry) = self.torrents.get(info_hash) { existing_entry.value().upsert_peer(peer) } else { @@ -146,7 +146,7 @@ where EntryRwLockParkingLot: EntrySync, EntrySingle: Entry, { - fn upsert_peer(&self, info_hash: &InfoHash, peer: &peer::Peer, _opt_persistent_torrent: Option) -> bool { + fn upsert_peer(&self, info_hash: &InfoHash, peer: &peer::Peer, _opt_persistent_torrent: Option) -> bool { // todo: load persistent torrent data if provided let entry = self.torrents.get_or_insert(*info_hash, Arc::default()); @@ -239,7 +239,7 @@ where EntryMutexParkingLot: EntrySync, EntrySingle: Entry, { - fn upsert_peer(&self, info_hash: &InfoHash, peer: &peer::Peer, _opt_persistent_torrent: Option) -> bool { + fn upsert_peer(&self, info_hash: &InfoHash, peer: &peer::Peer, _opt_persistent_torrent: Option) -> bool { // todo: load persistent torrent data if provided let entry = self.torrents.get_or_insert(*info_hash, Arc::default()); diff --git a/packages/torrent-repository-benchmarking/tests/common/repo.rs b/packages/torrent-repository-benchmarking/tests/common/repo.rs index 6c5c6ff77..3371e3c64 100644 --- a/packages/torrent-repository-benchmarking/tests/common/repo.rs +++ b/packages/torrent-repository-benchmarking/tests/common/repo.rs @@ -2,7 +2,7 @@ use bittorrent_primitives::info_hash::InfoHash; use torrust_tracker_configuration::TrackerPolicy; use torrust_tracker_primitives::pagination::Pagination; use torrust_tracker_primitives::swarm_metadata::{AggregateSwarmMetadata, SwarmMetadata}; -use torrust_tracker_primitives::{peer, DurationSinceUnixEpoch, PersistentTorrent, PersistentTorrents}; +use torrust_tracker_primitives::{peer, DurationSinceUnixEpoch, NumberOfDownloads, PersistentTorrents}; use torrust_tracker_torrent_repository_benchmarking::repository::{Repository as _, RepositoryAsync as _}; use torrust_tracker_torrent_repository_benchmarking::{ EntrySingle, TorrentsDashMapMutexStd, TorrentsRwLockStd, TorrentsRwLockStdMutexStd, TorrentsRwLockStdMutexTokio, @@ -29,7 +29,7 @@ impl Repo { &self, info_hash: &InfoHash, peer: &peer::Peer, - opt_persistent_torrent: Option, + opt_persistent_torrent: Option, ) -> bool { match self { Repo::RwLockStd(repo) => repo.upsert_peer(info_hash, peer, opt_persistent_torrent), diff --git a/packages/torrent-repository/src/swarms.rs b/packages/torrent-repository/src/swarms.rs index 1504ac1f4..9c1f3d9b2 100644 --- a/packages/torrent-repository/src/swarms.rs +++ b/packages/torrent-repository/src/swarms.rs @@ -7,7 +7,7 @@ use torrust_tracker_clock::conv::convert_from_timestamp_to_datetime_utc; use torrust_tracker_configuration::TrackerPolicy; use torrust_tracker_primitives::pagination::Pagination; use torrust_tracker_primitives::swarm_metadata::{AggregateSwarmMetadata, SwarmMetadata}; -use torrust_tracker_primitives::{peer, DurationSinceUnixEpoch, PersistentTorrent, PersistentTorrents}; +use torrust_tracker_primitives::{peer, DurationSinceUnixEpoch, NumberOfDownloads, PersistentTorrents}; use crate::event::sender::Sender; use crate::event::Event; @@ -53,7 +53,7 @@ impl Swarms { &self, info_hash: &InfoHash, peer: &peer::Peer, - opt_persistent_torrent: Option, + opt_persistent_torrent: Option, ) -> Result<(), Error> { let swarm_handle = match self.swarms.get(info_hash) { None => { diff --git a/packages/tracker-core/src/databases/driver/mysql.rs b/packages/tracker-core/src/databases/driver/mysql.rs index ce76ce563..a5dfc50e5 100644 --- a/packages/tracker-core/src/databases/driver/mysql.rs +++ b/packages/tracker-core/src/databases/driver/mysql.rs @@ -13,7 +13,7 @@ use r2d2::Pool; use r2d2_mysql::mysql::prelude::Queryable; use r2d2_mysql::mysql::{params, Opts, OptsBuilder}; use r2d2_mysql::MySqlConnectionManager; -use torrust_tracker_primitives::{PersistentTorrent, PersistentTorrents}; +use torrust_tracker_primitives::{NumberOfDownloads, PersistentTorrents}; use super::{Database, Driver, Error, TORRENTS_DOWNLOADS_TOTAL}; use crate::authentication::key::AUTH_KEY_LENGTH; @@ -47,7 +47,7 @@ impl Mysql { Ok(Self { pool }) } - fn load_torrent_aggregate_metric(&self, metric_name: &str) -> Result, Error> { + fn load_torrent_aggregate_metric(&self, metric_name: &str) -> Result, Error> { let mut conn = self.pool.get().map_err(|e| (e, DRIVER))?; let query = conn.exec_first::( @@ -60,7 +60,7 @@ impl Mysql { Ok(persistent_torrent) } - fn save_torrent_aggregate_metric(&self, metric_name: &str, completed: PersistentTorrent) -> Result<(), Error> { + fn save_torrent_aggregate_metric(&self, metric_name: &str, completed: NumberOfDownloads) -> Result<(), Error> { const COMMAND : &str = "INSERT INTO torrent_aggregate_metrics (metric_name, value) VALUES (:metric_name, :completed) ON DUPLICATE KEY UPDATE value = VALUES(value)"; let mut conn = self.pool.get().map_err(|e| (e, DRIVER))?; @@ -161,7 +161,7 @@ impl Database for Mysql { } /// Refer to [`databases::Database::load_persistent_torrent`](crate::core::databases::Database::load_persistent_torrent). - fn load_torrent_downloads(&self, info_hash: &InfoHash) -> Result, Error> { + fn load_torrent_downloads(&self, info_hash: &InfoHash) -> Result, Error> { let mut conn = self.pool.get().map_err(|e| (e, DRIVER))?; let query = conn.exec_first::( @@ -200,12 +200,12 @@ impl Database for Mysql { } /// Refer to [`databases::Database::load_global_number_of_downloads`](crate::core::databases::Database::load_global_number_of_downloads). - fn load_global_downloads(&self) -> Result, Error> { + fn load_global_downloads(&self) -> Result, Error> { self.load_torrent_aggregate_metric(TORRENTS_DOWNLOADS_TOTAL) } /// Refer to [`databases::Database::save_global_number_of_downloads`](crate::core::databases::Database::save_global_number_of_downloads). - fn save_global_downloads(&self, downloaded: PersistentTorrent) -> Result<(), Error> { + fn save_global_downloads(&self, downloaded: NumberOfDownloads) -> Result<(), Error> { self.save_torrent_aggregate_metric(TORRENTS_DOWNLOADS_TOTAL, downloaded) } diff --git a/packages/tracker-core/src/databases/driver/sqlite.rs b/packages/tracker-core/src/databases/driver/sqlite.rs index 794f65a4c..d4b6a82c6 100644 --- a/packages/tracker-core/src/databases/driver/sqlite.rs +++ b/packages/tracker-core/src/databases/driver/sqlite.rs @@ -13,7 +13,7 @@ use r2d2::Pool; use r2d2_sqlite::rusqlite::params; use r2d2_sqlite::rusqlite::types::Null; use r2d2_sqlite::SqliteConnectionManager; -use torrust_tracker_primitives::{DurationSinceUnixEpoch, PersistentTorrent, PersistentTorrents}; +use torrust_tracker_primitives::{DurationSinceUnixEpoch, NumberOfDownloads, PersistentTorrents}; use super::{Database, Driver, Error, TORRENTS_DOWNLOADS_TOTAL}; use crate::authentication::{self, Key}; @@ -50,7 +50,7 @@ impl Sqlite { Ok(Self { pool }) } - fn load_torrent_aggregate_metric(&self, metric_name: &str) -> Result, Error> { + fn load_torrent_aggregate_metric(&self, metric_name: &str) -> Result, Error> { let conn = self.pool.get().map_err(|e| (e, DRIVER))?; let mut stmt = conn.prepare("SELECT value FROM torrent_aggregate_metrics WHERE metric_name = ?")?; @@ -65,7 +65,7 @@ impl Sqlite { })) } - fn save_torrent_aggregate_metric(&self, metric_name: &str, completed: PersistentTorrent) -> Result<(), Error> { + fn save_torrent_aggregate_metric(&self, metric_name: &str, completed: NumberOfDownloads) -> Result<(), Error> { let conn = self.pool.get().map_err(|e| (e, DRIVER))?; let insert = conn.execute( @@ -168,7 +168,7 @@ impl Database for Sqlite { } /// Refer to [`databases::Database::load_persistent_torrent`](crate::core::databases::Database::load_persistent_torrent). - fn load_torrent_downloads(&self, info_hash: &InfoHash) -> Result, Error> { + fn load_torrent_downloads(&self, info_hash: &InfoHash) -> Result, Error> { let conn = self.pool.get().map_err(|e| (e, DRIVER))?; let mut stmt = conn.prepare("SELECT completed FROM torrents WHERE info_hash = ?")?; @@ -215,12 +215,12 @@ impl Database for Sqlite { } /// Refer to [`databases::Database::load_global_number_of_downloads`](crate::core::databases::Database::load_global_number_of_downloads). - fn load_global_downloads(&self) -> Result, Error> { + fn load_global_downloads(&self) -> Result, Error> { self.load_torrent_aggregate_metric(TORRENTS_DOWNLOADS_TOTAL) } /// Refer to [`databases::Database::save_global_number_of_downloads`](crate::core::databases::Database::save_global_number_of_downloads). - fn save_global_downloads(&self, downloaded: PersistentTorrent) -> Result<(), Error> { + fn save_global_downloads(&self, downloaded: NumberOfDownloads) -> Result<(), Error> { self.save_torrent_aggregate_metric(TORRENTS_DOWNLOADS_TOTAL, downloaded) } diff --git a/packages/tracker-core/src/databases/mod.rs b/packages/tracker-core/src/databases/mod.rs index b637219ad..6147873f6 100644 --- a/packages/tracker-core/src/databases/mod.rs +++ b/packages/tracker-core/src/databases/mod.rs @@ -52,7 +52,7 @@ pub mod setup; use bittorrent_primitives::info_hash::InfoHash; use mockall::automock; -use torrust_tracker_primitives::{PersistentTorrent, PersistentTorrents}; +use torrust_tracker_primitives::{NumberOfDownloads, PersistentTorrents}; use self::error::Error; use crate::authentication::{self, Key}; @@ -110,7 +110,7 @@ pub trait Database: Sync + Send { /// # Errors /// /// Returns an [`Error`] if the metrics cannot be loaded. - fn load_torrent_downloads(&self, info_hash: &InfoHash) -> Result, Error>; + fn load_torrent_downloads(&self, info_hash: &InfoHash) -> Result, Error>; /// Saves torrent metrics data into the database. /// @@ -149,7 +149,7 @@ pub trait Database: Sync + Send { /// # Errors /// /// Returns an [`Error`] if the total downloads cannot be loaded. - fn load_global_downloads(&self) -> Result, Error>; + fn load_global_downloads(&self) -> Result, Error>; /// Saves the total number of downloads for all torrents into the database. /// @@ -163,7 +163,7 @@ pub trait Database: Sync + Send { /// # Errors /// /// Returns an [`Error`] if the total downloads cannot be saved. - fn save_global_downloads(&self, downloaded: PersistentTorrent) -> Result<(), Error>; + fn save_global_downloads(&self, downloaded: NumberOfDownloads) -> Result<(), Error>; /// Increases the total number of downloads for all torrents. /// diff --git a/packages/tracker-core/src/statistics/persisted/downloads.rs b/packages/tracker-core/src/statistics/persisted/downloads.rs index 7edaf73d8..2e2ae3926 100644 --- a/packages/tracker-core/src/statistics/persisted/downloads.rs +++ b/packages/tracker-core/src/statistics/persisted/downloads.rs @@ -2,7 +2,7 @@ use std::sync::Arc; use bittorrent_primitives::info_hash::InfoHash; -use torrust_tracker_primitives::{PersistentTorrent, PersistentTorrents}; +use torrust_tracker_primitives::{NumberOfDownloads, PersistentTorrents}; use crate::databases::error::Error; use crate::databases::Database; @@ -89,7 +89,7 @@ impl DatabaseDownloadsMetricRepository { /// # Errors /// /// Returns an [`Error`] if the underlying database query fails. - pub(crate) fn load_torrent_downloads(&self, info_hash: &InfoHash) -> Result, Error> { + pub(crate) fn load_torrent_downloads(&self, info_hash: &InfoHash) -> Result, Error> { self.database.load_torrent_downloads(info_hash) } @@ -133,7 +133,7 @@ impl DatabaseDownloadsMetricRepository { /// # Errors /// /// Returns an [`Error`] if the underlying database query fails. - pub(crate) fn load_global_downloads(&self) -> Result, Error> { + pub(crate) fn load_global_downloads(&self) -> Result, Error> { self.database.load_global_downloads() } } diff --git a/packages/tracker-core/src/torrent/repository/in_memory.rs b/packages/tracker-core/src/torrent/repository/in_memory.rs index 5c8a335b6..e44bd774f 100644 --- a/packages/tracker-core/src/torrent/repository/in_memory.rs +++ b/packages/tracker-core/src/torrent/repository/in_memory.rs @@ -6,7 +6,7 @@ use bittorrent_primitives::info_hash::InfoHash; use torrust_tracker_configuration::{TrackerPolicy, TORRENT_PEERS_LIMIT}; use torrust_tracker_primitives::pagination::Pagination; use torrust_tracker_primitives::swarm_metadata::{AggregateSwarmMetadata, SwarmMetadata}; -use torrust_tracker_primitives::{peer, DurationSinceUnixEpoch, PersistentTorrent, PersistentTorrents}; +use torrust_tracker_primitives::{peer, DurationSinceUnixEpoch, NumberOfDownloads, PersistentTorrents}; use torrust_tracker_torrent_repository::{SwarmHandle, Swarms}; /// In-memory repository for torrent entries. @@ -52,7 +52,7 @@ impl InMemoryTorrentRepository { &self, info_hash: &InfoHash, peer: &peer::Peer, - opt_persistent_torrent: Option, + opt_persistent_torrent: Option, ) { self.swarms .handle_announcement(info_hash, peer, opt_persistent_torrent) From bcf2338b04b5953dd75f2072365baa0dacff6b16 Mon Sep 17 00:00:00 2001 From: Jose Celano Date: Tue, 27 May 2025 15:12:26 +0100 Subject: [PATCH 649/802] refactor: [#1541] rename type alias PersistentTorrents to NumberOfDownloadsBTreeMap --- packages/primitives/src/lib.rs | 2 +- .../src/repository/dash_map_mutex_std.rs | 4 ++-- .../src/repository/mod.rs | 6 +++--- .../src/repository/rw_lock_std.rs | 4 ++-- .../src/repository/rw_lock_std_mutex_std.rs | 4 ++-- .../src/repository/rw_lock_std_mutex_tokio.rs | 4 ++-- .../src/repository/rw_lock_tokio.rs | 4 ++-- .../src/repository/rw_lock_tokio_mutex_std.rs | 4 ++-- .../src/repository/rw_lock_tokio_mutex_tokio.rs | 4 ++-- .../src/repository/skip_map_mutex_std.rs | 8 ++++---- .../tests/common/repo.rs | 4 ++-- .../tests/repository/mod.rs | 12 ++++++------ packages/torrent-repository/src/swarms.rs | 12 ++++++------ packages/tracker-core/src/databases/driver/mysql.rs | 4 ++-- packages/tracker-core/src/databases/driver/sqlite.rs | 4 ++-- packages/tracker-core/src/databases/mod.rs | 4 ++-- .../src/statistics/persisted/downloads.rs | 8 ++++---- .../tracker-core/src/torrent/repository/in_memory.rs | 4 ++-- 18 files changed, 48 insertions(+), 48 deletions(-) diff --git a/packages/primitives/src/lib.rs b/packages/primitives/src/lib.rs index b04991eb8..ec2edda97 100644 --- a/packages/primitives/src/lib.rs +++ b/packages/primitives/src/lib.rs @@ -19,4 +19,4 @@ use bittorrent_primitives::info_hash::InfoHash; pub type DurationSinceUnixEpoch = Duration; pub type NumberOfDownloads = u32; -pub type PersistentTorrents = BTreeMap; +pub type NumberOfDownloadsBTreeMap = BTreeMap; diff --git a/packages/torrent-repository-benchmarking/src/repository/dash_map_mutex_std.rs b/packages/torrent-repository-benchmarking/src/repository/dash_map_mutex_std.rs index c0ef455d4..192777b32 100644 --- a/packages/torrent-repository-benchmarking/src/repository/dash_map_mutex_std.rs +++ b/packages/torrent-repository-benchmarking/src/repository/dash_map_mutex_std.rs @@ -5,7 +5,7 @@ use dashmap::DashMap; use torrust_tracker_configuration::TrackerPolicy; use torrust_tracker_primitives::pagination::Pagination; use torrust_tracker_primitives::swarm_metadata::{AggregateSwarmMetadata, SwarmMetadata}; -use torrust_tracker_primitives::{peer, DurationSinceUnixEpoch, NumberOfDownloads, PersistentTorrents}; +use torrust_tracker_primitives::{peer, DurationSinceUnixEpoch, NumberOfDownloads, NumberOfDownloadsBTreeMap}; use super::Repository; use crate::entry::peer_list::PeerList; @@ -77,7 +77,7 @@ where } } - fn import_persistent(&self, persistent_torrents: &PersistentTorrents) { + fn import_persistent(&self, persistent_torrents: &NumberOfDownloadsBTreeMap) { for (info_hash, completed) in persistent_torrents { if self.torrents.contains_key(info_hash) { continue; diff --git a/packages/torrent-repository-benchmarking/src/repository/mod.rs b/packages/torrent-repository-benchmarking/src/repository/mod.rs index 2ad7a3927..890088ea7 100644 --- a/packages/torrent-repository-benchmarking/src/repository/mod.rs +++ b/packages/torrent-repository-benchmarking/src/repository/mod.rs @@ -2,7 +2,7 @@ use bittorrent_primitives::info_hash::InfoHash; use torrust_tracker_configuration::TrackerPolicy; use torrust_tracker_primitives::pagination::Pagination; use torrust_tracker_primitives::swarm_metadata::{AggregateSwarmMetadata, SwarmMetadata}; -use torrust_tracker_primitives::{peer, DurationSinceUnixEpoch, NumberOfDownloads, PersistentTorrents}; +use torrust_tracker_primitives::{peer, DurationSinceUnixEpoch, NumberOfDownloads, NumberOfDownloadsBTreeMap}; pub mod dash_map_mutex_std; pub mod rw_lock_std; @@ -19,7 +19,7 @@ pub trait Repository: Debug + Default + Sized + 'static { fn get(&self, key: &InfoHash) -> Option; fn get_metrics(&self) -> AggregateSwarmMetadata; fn get_paginated(&self, pagination: Option<&Pagination>) -> Vec<(InfoHash, T)>; - fn import_persistent(&self, persistent_torrents: &PersistentTorrents); + fn import_persistent(&self, persistent_torrents: &NumberOfDownloadsBTreeMap); fn remove(&self, key: &InfoHash) -> Option; fn remove_inactive_peers(&self, current_cutoff: DurationSinceUnixEpoch); fn remove_peerless_torrents(&self, policy: &TrackerPolicy); @@ -32,7 +32,7 @@ pub trait RepositoryAsync: Debug + Default + Sized + 'static { fn get(&self, key: &InfoHash) -> impl std::future::Future> + Send; fn get_metrics(&self) -> impl std::future::Future + Send; fn get_paginated(&self, pagination: Option<&Pagination>) -> impl std::future::Future> + Send; - fn import_persistent(&self, persistent_torrents: &PersistentTorrents) -> impl std::future::Future + Send; + fn import_persistent(&self, persistent_torrents: &NumberOfDownloadsBTreeMap) -> impl std::future::Future + Send; fn remove(&self, key: &InfoHash) -> impl std::future::Future> + Send; fn remove_inactive_peers(&self, current_cutoff: DurationSinceUnixEpoch) -> impl std::future::Future + Send; fn remove_peerless_torrents(&self, policy: &TrackerPolicy) -> impl std::future::Future + Send; diff --git a/packages/torrent-repository-benchmarking/src/repository/rw_lock_std.rs b/packages/torrent-repository-benchmarking/src/repository/rw_lock_std.rs index c0e4d5cf5..074725674 100644 --- a/packages/torrent-repository-benchmarking/src/repository/rw_lock_std.rs +++ b/packages/torrent-repository-benchmarking/src/repository/rw_lock_std.rs @@ -2,7 +2,7 @@ use bittorrent_primitives::info_hash::InfoHash; use torrust_tracker_configuration::TrackerPolicy; use torrust_tracker_primitives::pagination::Pagination; use torrust_tracker_primitives::swarm_metadata::{AggregateSwarmMetadata, SwarmMetadata}; -use torrust_tracker_primitives::{peer, DurationSinceUnixEpoch, NumberOfDownloads, PersistentTorrents}; +use torrust_tracker_primitives::{peer, DurationSinceUnixEpoch, NumberOfDownloads, NumberOfDownloadsBTreeMap}; use super::Repository; use crate::entry::peer_list::PeerList; @@ -92,7 +92,7 @@ where } } - fn import_persistent(&self, persistent_torrents: &PersistentTorrents) { + fn import_persistent(&self, persistent_torrents: &NumberOfDownloadsBTreeMap) { let mut torrents = self.get_torrents_mut(); for (info_hash, downloaded) in persistent_torrents { diff --git a/packages/torrent-repository-benchmarking/src/repository/rw_lock_std_mutex_std.rs b/packages/torrent-repository-benchmarking/src/repository/rw_lock_std_mutex_std.rs index 30aabc799..9577a42e1 100644 --- a/packages/torrent-repository-benchmarking/src/repository/rw_lock_std_mutex_std.rs +++ b/packages/torrent-repository-benchmarking/src/repository/rw_lock_std_mutex_std.rs @@ -4,7 +4,7 @@ use bittorrent_primitives::info_hash::InfoHash; use torrust_tracker_configuration::TrackerPolicy; use torrust_tracker_primitives::pagination::Pagination; use torrust_tracker_primitives::swarm_metadata::{AggregateSwarmMetadata, SwarmMetadata}; -use torrust_tracker_primitives::{peer, DurationSinceUnixEpoch, NumberOfDownloads, PersistentTorrents}; +use torrust_tracker_primitives::{peer, DurationSinceUnixEpoch, NumberOfDownloads, NumberOfDownloadsBTreeMap}; use super::Repository; use crate::entry::peer_list::PeerList; @@ -87,7 +87,7 @@ where } } - fn import_persistent(&self, persistent_torrents: &PersistentTorrents) { + fn import_persistent(&self, persistent_torrents: &NumberOfDownloadsBTreeMap) { let mut torrents = self.get_torrents_mut(); for (info_hash, completed) in persistent_torrents { diff --git a/packages/torrent-repository-benchmarking/src/repository/rw_lock_std_mutex_tokio.rs b/packages/torrent-repository-benchmarking/src/repository/rw_lock_std_mutex_tokio.rs index f56322654..73cb64a08 100644 --- a/packages/torrent-repository-benchmarking/src/repository/rw_lock_std_mutex_tokio.rs +++ b/packages/torrent-repository-benchmarking/src/repository/rw_lock_std_mutex_tokio.rs @@ -8,7 +8,7 @@ use futures::{Future, FutureExt}; use torrust_tracker_configuration::TrackerPolicy; use torrust_tracker_primitives::pagination::Pagination; use torrust_tracker_primitives::swarm_metadata::{AggregateSwarmMetadata, SwarmMetadata}; -use torrust_tracker_primitives::{peer, DurationSinceUnixEpoch, NumberOfDownloads, PersistentTorrents}; +use torrust_tracker_primitives::{peer, DurationSinceUnixEpoch, NumberOfDownloads, NumberOfDownloadsBTreeMap}; use super::RepositoryAsync; use crate::entry::peer_list::PeerList; @@ -101,7 +101,7 @@ where metrics } - async fn import_persistent(&self, persistent_torrents: &PersistentTorrents) { + async fn import_persistent(&self, persistent_torrents: &NumberOfDownloadsBTreeMap) { let mut db = self.get_torrents_mut(); for (info_hash, completed) in persistent_torrents { diff --git a/packages/torrent-repository-benchmarking/src/repository/rw_lock_tokio.rs b/packages/torrent-repository-benchmarking/src/repository/rw_lock_tokio.rs index 091ff303d..9d7d591fc 100644 --- a/packages/torrent-repository-benchmarking/src/repository/rw_lock_tokio.rs +++ b/packages/torrent-repository-benchmarking/src/repository/rw_lock_tokio.rs @@ -2,7 +2,7 @@ use bittorrent_primitives::info_hash::InfoHash; use torrust_tracker_configuration::TrackerPolicy; use torrust_tracker_primitives::pagination::Pagination; use torrust_tracker_primitives::swarm_metadata::{AggregateSwarmMetadata, SwarmMetadata}; -use torrust_tracker_primitives::{peer, DurationSinceUnixEpoch, NumberOfDownloads, PersistentTorrents}; +use torrust_tracker_primitives::{peer, DurationSinceUnixEpoch, NumberOfDownloads, NumberOfDownloadsBTreeMap}; use super::RepositoryAsync; use crate::entry::peer_list::PeerList; @@ -98,7 +98,7 @@ where metrics } - async fn import_persistent(&self, persistent_torrents: &PersistentTorrents) { + async fn import_persistent(&self, persistent_torrents: &NumberOfDownloadsBTreeMap) { let mut torrents = self.get_torrents_mut().await; for (info_hash, completed) in persistent_torrents { diff --git a/packages/torrent-repository-benchmarking/src/repository/rw_lock_tokio_mutex_std.rs b/packages/torrent-repository-benchmarking/src/repository/rw_lock_tokio_mutex_std.rs index 542ad7f0a..6ad7ade98 100644 --- a/packages/torrent-repository-benchmarking/src/repository/rw_lock_tokio_mutex_std.rs +++ b/packages/torrent-repository-benchmarking/src/repository/rw_lock_tokio_mutex_std.rs @@ -4,7 +4,7 @@ use bittorrent_primitives::info_hash::InfoHash; use torrust_tracker_configuration::TrackerPolicy; use torrust_tracker_primitives::pagination::Pagination; use torrust_tracker_primitives::swarm_metadata::{AggregateSwarmMetadata, SwarmMetadata}; -use torrust_tracker_primitives::{peer, DurationSinceUnixEpoch, NumberOfDownloads, PersistentTorrents}; +use torrust_tracker_primitives::{peer, DurationSinceUnixEpoch, NumberOfDownloads, NumberOfDownloadsBTreeMap}; use super::RepositoryAsync; use crate::entry::peer_list::PeerList; @@ -92,7 +92,7 @@ where metrics } - async fn import_persistent(&self, persistent_torrents: &PersistentTorrents) { + async fn import_persistent(&self, persistent_torrents: &NumberOfDownloadsBTreeMap) { let mut torrents = self.get_torrents_mut().await; for (info_hash, completed) in persistent_torrents { diff --git a/packages/torrent-repository-benchmarking/src/repository/rw_lock_tokio_mutex_tokio.rs b/packages/torrent-repository-benchmarking/src/repository/rw_lock_tokio_mutex_tokio.rs index 2551972b3..6ce6c3f58 100644 --- a/packages/torrent-repository-benchmarking/src/repository/rw_lock_tokio_mutex_tokio.rs +++ b/packages/torrent-repository-benchmarking/src/repository/rw_lock_tokio_mutex_tokio.rs @@ -4,7 +4,7 @@ use bittorrent_primitives::info_hash::InfoHash; use torrust_tracker_configuration::TrackerPolicy; use torrust_tracker_primitives::pagination::Pagination; use torrust_tracker_primitives::swarm_metadata::{AggregateSwarmMetadata, SwarmMetadata}; -use torrust_tracker_primitives::{peer, DurationSinceUnixEpoch, NumberOfDownloads, PersistentTorrents}; +use torrust_tracker_primitives::{peer, DurationSinceUnixEpoch, NumberOfDownloads, NumberOfDownloadsBTreeMap}; use super::RepositoryAsync; use crate::entry::peer_list::PeerList; @@ -95,7 +95,7 @@ where metrics } - async fn import_persistent(&self, persistent_torrents: &PersistentTorrents) { + async fn import_persistent(&self, persistent_torrents: &NumberOfDownloadsBTreeMap) { let mut db = self.get_torrents_mut().await; for (info_hash, completed) in persistent_torrents { diff --git a/packages/torrent-repository-benchmarking/src/repository/skip_map_mutex_std.rs b/packages/torrent-repository-benchmarking/src/repository/skip_map_mutex_std.rs index 7d141facb..81fc1c05a 100644 --- a/packages/torrent-repository-benchmarking/src/repository/skip_map_mutex_std.rs +++ b/packages/torrent-repository-benchmarking/src/repository/skip_map_mutex_std.rs @@ -5,7 +5,7 @@ use crossbeam_skiplist::SkipMap; use torrust_tracker_configuration::TrackerPolicy; use torrust_tracker_primitives::pagination::Pagination; use torrust_tracker_primitives::swarm_metadata::{AggregateSwarmMetadata, SwarmMetadata}; -use torrust_tracker_primitives::{peer, DurationSinceUnixEpoch, NumberOfDownloads, PersistentTorrents}; +use torrust_tracker_primitives::{peer, DurationSinceUnixEpoch, NumberOfDownloads, NumberOfDownloadsBTreeMap}; use super::Repository; use crate::entry::peer_list::PeerList; @@ -100,7 +100,7 @@ where } } - fn import_persistent(&self, persistent_torrents: &PersistentTorrents) { + fn import_persistent(&self, persistent_torrents: &NumberOfDownloadsBTreeMap) { for (info_hash, completed) in persistent_torrents { if self.torrents.contains_key(info_hash) { continue; @@ -193,7 +193,7 @@ where } } - fn import_persistent(&self, persistent_torrents: &PersistentTorrents) { + fn import_persistent(&self, persistent_torrents: &NumberOfDownloadsBTreeMap) { for (info_hash, completed) in persistent_torrents { if self.torrents.contains_key(info_hash) { continue; @@ -286,7 +286,7 @@ where } } - fn import_persistent(&self, persistent_torrents: &PersistentTorrents) { + fn import_persistent(&self, persistent_torrents: &NumberOfDownloadsBTreeMap) { for (info_hash, completed) in persistent_torrents { if self.torrents.contains_key(info_hash) { continue; diff --git a/packages/torrent-repository-benchmarking/tests/common/repo.rs b/packages/torrent-repository-benchmarking/tests/common/repo.rs index 3371e3c64..e5037d641 100644 --- a/packages/torrent-repository-benchmarking/tests/common/repo.rs +++ b/packages/torrent-repository-benchmarking/tests/common/repo.rs @@ -2,7 +2,7 @@ use bittorrent_primitives::info_hash::InfoHash; use torrust_tracker_configuration::TrackerPolicy; use torrust_tracker_primitives::pagination::Pagination; use torrust_tracker_primitives::swarm_metadata::{AggregateSwarmMetadata, SwarmMetadata}; -use torrust_tracker_primitives::{peer, DurationSinceUnixEpoch, NumberOfDownloads, PersistentTorrents}; +use torrust_tracker_primitives::{peer, DurationSinceUnixEpoch, NumberOfDownloads, NumberOfDownloadsBTreeMap}; use torrust_tracker_torrent_repository_benchmarking::repository::{Repository as _, RepositoryAsync as _}; use torrust_tracker_torrent_repository_benchmarking::{ EntrySingle, TorrentsDashMapMutexStd, TorrentsRwLockStd, TorrentsRwLockStdMutexStd, TorrentsRwLockStdMutexTokio, @@ -144,7 +144,7 @@ impl Repo { } } - pub(crate) async fn import_persistent(&self, persistent_torrents: &PersistentTorrents) { + pub(crate) async fn import_persistent(&self, persistent_torrents: &NumberOfDownloadsBTreeMap) { match self { Repo::RwLockStd(repo) => repo.import_persistent(persistent_torrents), Repo::RwLockStdMutexStd(repo) => repo.import_persistent(persistent_torrents), diff --git a/packages/torrent-repository-benchmarking/tests/repository/mod.rs b/packages/torrent-repository-benchmarking/tests/repository/mod.rs index 6973f38bd..141faa8a9 100644 --- a/packages/torrent-repository-benchmarking/tests/repository/mod.rs +++ b/packages/torrent-repository-benchmarking/tests/repository/mod.rs @@ -7,7 +7,7 @@ use rstest::{fixture, rstest}; use torrust_tracker_configuration::TrackerPolicy; use torrust_tracker_primitives::pagination::Pagination; use torrust_tracker_primitives::swarm_metadata::SwarmMetadata; -use torrust_tracker_primitives::PersistentTorrents; +use torrust_tracker_primitives::NumberOfDownloadsBTreeMap; use torrust_tracker_torrent_repository_benchmarking::entry::Entry as _; use torrust_tracker_torrent_repository_benchmarking::repository::dash_map_mutex_std::XacrimonDashMap; use torrust_tracker_torrent_repository_benchmarking::repository::rw_lock_std::RwLockStd; @@ -167,12 +167,12 @@ fn many_hashed_in_order() -> Entries { } #[fixture] -fn persistent_empty() -> PersistentTorrents { - PersistentTorrents::default() +fn persistent_empty() -> NumberOfDownloadsBTreeMap { + NumberOfDownloadsBTreeMap::default() } #[fixture] -fn persistent_single() -> PersistentTorrents { +fn persistent_single() -> NumberOfDownloadsBTreeMap { let hash = &mut DefaultHasher::default(); hash.write_u8(1); @@ -182,7 +182,7 @@ fn persistent_single() -> PersistentTorrents { } #[fixture] -fn persistent_three() -> PersistentTorrents { +fn persistent_three() -> NumberOfDownloadsBTreeMap { let hash = &mut DefaultHasher::default(); hash.write_u8(1); @@ -445,7 +445,7 @@ async fn it_should_import_persistent_torrents( )] repo: Repo, #[case] entries: Entries, - #[values(persistent_empty(), persistent_single(), persistent_three())] persistent_torrents: PersistentTorrents, + #[values(persistent_empty(), persistent_single(), persistent_three())] persistent_torrents: NumberOfDownloadsBTreeMap, ) { make(&repo, &entries).await; diff --git a/packages/torrent-repository/src/swarms.rs b/packages/torrent-repository/src/swarms.rs index 9c1f3d9b2..ba8a80a62 100644 --- a/packages/torrent-repository/src/swarms.rs +++ b/packages/torrent-repository/src/swarms.rs @@ -7,7 +7,7 @@ use torrust_tracker_clock::conv::convert_from_timestamp_to_datetime_utc; use torrust_tracker_configuration::TrackerPolicy; use torrust_tracker_primitives::pagination::Pagination; use torrust_tracker_primitives::swarm_metadata::{AggregateSwarmMetadata, SwarmMetadata}; -use torrust_tracker_primitives::{peer, DurationSinceUnixEpoch, NumberOfDownloads, PersistentTorrents}; +use torrust_tracker_primitives::{peer, DurationSinceUnixEpoch, NumberOfDownloads, NumberOfDownloadsBTreeMap}; use crate::event::sender::Sender; use crate::event::Event; @@ -356,7 +356,7 @@ impl Swarms { /// This method takes a set of persisted torrent entries (e.g., from a /// database) and imports them into the in-memory repository for immediate /// access. - pub fn import_persistent(&self, persistent_torrents: &PersistentTorrents) -> u64 { + pub fn import_persistent(&self, persistent_torrents: &NumberOfDownloadsBTreeMap) -> u64 { tracing::info!("Importing persisted info about torrents ..."); let mut torrents_imported = 0; @@ -1271,7 +1271,7 @@ mod tests { use std::sync::Arc; - use torrust_tracker_primitives::PersistentTorrents; + use torrust_tracker_primitives::NumberOfDownloadsBTreeMap; use crate::swarms::Swarms; use crate::tests::{leecher, sample_info_hash}; @@ -1282,7 +1282,7 @@ mod tests { let infohash = sample_info_hash(); - let mut persistent_torrents = PersistentTorrents::default(); + let mut persistent_torrents = NumberOfDownloadsBTreeMap::default(); persistent_torrents.insert(infohash, 1); @@ -1302,7 +1302,7 @@ mod tests { let infohash = sample_info_hash(); - let mut persistent_torrents = PersistentTorrents::default(); + let mut persistent_torrents = NumberOfDownloadsBTreeMap::default(); persistent_torrents.insert(infohash, 1); persistent_torrents.insert(infohash, 2); @@ -1327,7 +1327,7 @@ mod tests { // Try to import the torrent entry let new_number_of_downloads = initial_number_of_downloads + 1; - let mut persistent_torrents = PersistentTorrents::default(); + let mut persistent_torrents = NumberOfDownloadsBTreeMap::default(); persistent_torrents.insert(infohash, new_number_of_downloads); swarms.import_persistent(&persistent_torrents); diff --git a/packages/tracker-core/src/databases/driver/mysql.rs b/packages/tracker-core/src/databases/driver/mysql.rs index a5dfc50e5..da2f86ce8 100644 --- a/packages/tracker-core/src/databases/driver/mysql.rs +++ b/packages/tracker-core/src/databases/driver/mysql.rs @@ -13,7 +13,7 @@ use r2d2::Pool; use r2d2_mysql::mysql::prelude::Queryable; use r2d2_mysql::mysql::{params, Opts, OptsBuilder}; use r2d2_mysql::MySqlConnectionManager; -use torrust_tracker_primitives::{NumberOfDownloads, PersistentTorrents}; +use torrust_tracker_primitives::{NumberOfDownloads, NumberOfDownloadsBTreeMap}; use super::{Database, Driver, Error, TORRENTS_DOWNLOADS_TOTAL}; use crate::authentication::key::AUTH_KEY_LENGTH; @@ -146,7 +146,7 @@ impl Database for Mysql { } /// Refer to [`databases::Database::load_persistent_torrents`](crate::core::databases::Database::load_persistent_torrents). - fn load_all_torrents_downloads(&self) -> Result { + fn load_all_torrents_downloads(&self) -> Result { let mut conn = self.pool.get().map_err(|e| (e, DRIVER))?; let torrents = conn.query_map( diff --git a/packages/tracker-core/src/databases/driver/sqlite.rs b/packages/tracker-core/src/databases/driver/sqlite.rs index d4b6a82c6..d08351aa8 100644 --- a/packages/tracker-core/src/databases/driver/sqlite.rs +++ b/packages/tracker-core/src/databases/driver/sqlite.rs @@ -13,7 +13,7 @@ use r2d2::Pool; use r2d2_sqlite::rusqlite::params; use r2d2_sqlite::rusqlite::types::Null; use r2d2_sqlite::SqliteConnectionManager; -use torrust_tracker_primitives::{DurationSinceUnixEpoch, NumberOfDownloads, PersistentTorrents}; +use torrust_tracker_primitives::{DurationSinceUnixEpoch, NumberOfDownloads, NumberOfDownloadsBTreeMap}; use super::{Database, Driver, Error, TORRENTS_DOWNLOADS_TOTAL}; use crate::authentication::{self, Key}; @@ -152,7 +152,7 @@ impl Database for Sqlite { } /// Refer to [`databases::Database::load_persistent_torrents`](crate::core::databases::Database::load_persistent_torrents). - fn load_all_torrents_downloads(&self) -> Result { + fn load_all_torrents_downloads(&self) -> Result { let conn = self.pool.get().map_err(|e| (e, DRIVER))?; let mut stmt = conn.prepare("SELECT info_hash, completed FROM torrents")?; diff --git a/packages/tracker-core/src/databases/mod.rs b/packages/tracker-core/src/databases/mod.rs index 6147873f6..c9d89769a 100644 --- a/packages/tracker-core/src/databases/mod.rs +++ b/packages/tracker-core/src/databases/mod.rs @@ -52,7 +52,7 @@ pub mod setup; use bittorrent_primitives::info_hash::InfoHash; use mockall::automock; -use torrust_tracker_primitives::{NumberOfDownloads, PersistentTorrents}; +use torrust_tracker_primitives::{NumberOfDownloads, NumberOfDownloadsBTreeMap}; use self::error::Error; use crate::authentication::{self, Key}; @@ -101,7 +101,7 @@ pub trait Database: Sync + Send { /// # Errors /// /// Returns an [`Error`] if the metrics cannot be loaded. - fn load_all_torrents_downloads(&self) -> Result; + fn load_all_torrents_downloads(&self) -> Result; /// Loads torrent metrics data from the database for one torrent. /// diff --git a/packages/tracker-core/src/statistics/persisted/downloads.rs b/packages/tracker-core/src/statistics/persisted/downloads.rs index 2e2ae3926..4d3bdf9a3 100644 --- a/packages/tracker-core/src/statistics/persisted/downloads.rs +++ b/packages/tracker-core/src/statistics/persisted/downloads.rs @@ -2,7 +2,7 @@ use std::sync::Arc; use bittorrent_primitives::info_hash::InfoHash; -use torrust_tracker_primitives::{NumberOfDownloads, PersistentTorrents}; +use torrust_tracker_primitives::{NumberOfDownloads, NumberOfDownloadsBTreeMap}; use crate::databases::error::Error; use crate::databases::Database; @@ -77,7 +77,7 @@ impl DatabaseDownloadsMetricRepository { /// # Errors /// /// Returns an [`Error`] if the underlying database query fails. - pub(crate) fn load_all_torrents_downloads(&self) -> Result { + pub(crate) fn load_all_torrents_downloads(&self) -> Result { self.database.load_all_torrents_downloads() } @@ -141,7 +141,7 @@ impl DatabaseDownloadsMetricRepository { #[cfg(test)] mod tests { - use torrust_tracker_primitives::PersistentTorrents; + use torrust_tracker_primitives::NumberOfDownloadsBTreeMap; use super::DatabaseDownloadsMetricRepository; use crate::databases::setup::initialize_database; @@ -191,7 +191,7 @@ mod tests { let torrents = repository.load_all_torrents_downloads().unwrap(); - let mut expected_torrents = PersistentTorrents::new(); + let mut expected_torrents = NumberOfDownloadsBTreeMap::new(); expected_torrents.insert(infohash_one, 1); expected_torrents.insert(infohash_two, 2); diff --git a/packages/tracker-core/src/torrent/repository/in_memory.rs b/packages/tracker-core/src/torrent/repository/in_memory.rs index e44bd774f..164f46c69 100644 --- a/packages/tracker-core/src/torrent/repository/in_memory.rs +++ b/packages/tracker-core/src/torrent/repository/in_memory.rs @@ -6,7 +6,7 @@ use bittorrent_primitives::info_hash::InfoHash; use torrust_tracker_configuration::{TrackerPolicy, TORRENT_PEERS_LIMIT}; use torrust_tracker_primitives::pagination::Pagination; use torrust_tracker_primitives::swarm_metadata::{AggregateSwarmMetadata, SwarmMetadata}; -use torrust_tracker_primitives::{peer, DurationSinceUnixEpoch, NumberOfDownloads, PersistentTorrents}; +use torrust_tracker_primitives::{peer, DurationSinceUnixEpoch, NumberOfDownloads, NumberOfDownloadsBTreeMap}; use torrust_tracker_torrent_repository::{SwarmHandle, Swarms}; /// In-memory repository for torrent entries. @@ -264,7 +264,7 @@ impl InMemoryTorrentRepository { /// # Arguments /// /// * `persistent_torrents` - A reference to the persisted torrent data. - pub fn import_persistent(&self, persistent_torrents: &PersistentTorrents) { + pub fn import_persistent(&self, persistent_torrents: &NumberOfDownloadsBTreeMap) { self.swarms.import_persistent(persistent_torrents); } } From bd6e06acaaebffad76a69249a5faa3402db501a7 Mon Sep 17 00:00:00 2001 From: Jose Celano Date: Tue, 27 May 2025 15:14:09 +0100 Subject: [PATCH 650/802] refactor: [#1541] remove unused code --- src/app.rs | 19 ------------------- 1 file changed, 19 deletions(-) diff --git a/src/app.rs b/src/app.rs index c31281829..ccc2e8bcb 100644 --- a/src/app.rs +++ b/src/app.rs @@ -66,13 +66,6 @@ async fn load_data_from_database(config: &Configuration, app_container: &Arc) -> JobManager { @@ -127,18 +120,6 @@ async fn load_whitelisted_torrents(config: &Configuration, app_container: &Arc) { - if config.core.tracker_policy.persistent_torrent_completed_stat { - app_container - .tracker_core_container - .torrents_manager - .load_torrents_from_database() - .expect("Could not load torrents from database."); - } -} - -#[allow(dead_code)] async fn load_torrent_metrics(config: &Configuration, app_container: &Arc) { if config.core.tracker_policy.persistent_torrent_completed_stat { bittorrent_tracker_core::statistics::persisted::load_persisted_metrics( From 3d6fc651d2cb515a3147264554f0db6f4c7ace12 Mon Sep 17 00:00:00 2001 From: Jose Celano Date: Wed, 28 May 2025 08:14:01 +0100 Subject: [PATCH 651/802] refactor: [#1543] rename AggregateSwarmMetadata to AggregateActiveSwarmMetadata Aggregate values are only for active swarms. For example, it does not count downloads for torrents that are not currently active. --- .../src/v1/context/stats/resources.rs | 4 ++-- .../src/statistics/services.rs | 8 ++++---- packages/primitives/src/swarm_metadata.rs | 13 ++++++------- .../src/statistics/services.rs | 8 ++++---- .../src/repository/dash_map_mutex_std.rs | 6 +++--- .../src/repository/mod.rs | 6 +++--- .../src/repository/rw_lock_std.rs | 6 +++--- .../src/repository/rw_lock_std_mutex_std.rs | 6 +++--- .../src/repository/rw_lock_std_mutex_tokio.rs | 6 +++--- .../src/repository/rw_lock_tokio.rs | 6 +++--- .../src/repository/rw_lock_tokio_mutex_std.rs | 6 +++--- .../repository/rw_lock_tokio_mutex_tokio.rs | 6 +++--- .../src/repository/skip_map_mutex_std.rs | 14 +++++++------- .../tests/common/repo.rs | 4 ++-- .../tests/repository/mod.rs | 4 ++-- packages/torrent-repository/src/swarms.rs | 18 +++++++++--------- .../src/torrent/repository/in_memory.rs | 4 ++-- .../src/statistics/services.rs | 8 ++++---- .../src/statistics/services.rs | 8 ++++---- 19 files changed, 70 insertions(+), 71 deletions(-) diff --git a/packages/axum-rest-tracker-api-server/src/v1/context/stats/resources.rs b/packages/axum-rest-tracker-api-server/src/v1/context/stats/resources.rs index 8fcfd1be0..8b6d639c8 100644 --- a/packages/axum-rest-tracker-api-server/src/v1/context/stats/resources.rs +++ b/packages/axum-rest-tracker-api-server/src/v1/context/stats/resources.rs @@ -136,7 +136,7 @@ impl From for LabeledStats { mod tests { use torrust_rest_tracker_api_core::statistics::metrics::Metrics; use torrust_rest_tracker_api_core::statistics::services::TrackerMetrics; - use torrust_tracker_primitives::swarm_metadata::AggregateSwarmMetadata; + use torrust_tracker_primitives::swarm_metadata::AggregateActiveSwarmMetadata; use super::Stats; @@ -145,7 +145,7 @@ mod tests { fn stats_resource_should_be_converted_from_tracker_metrics() { assert_eq!( Stats::from(TrackerMetrics { - torrents_metrics: AggregateSwarmMetadata { + torrents_metrics: AggregateActiveSwarmMetadata { total_complete: 1, total_downloaded: 2, total_incomplete: 3, diff --git a/packages/http-tracker-core/src/statistics/services.rs b/packages/http-tracker-core/src/statistics/services.rs index af1e30524..dbc096030 100644 --- a/packages/http-tracker-core/src/statistics/services.rs +++ b/packages/http-tracker-core/src/statistics/services.rs @@ -23,7 +23,7 @@ use std::sync::Arc; use bittorrent_tracker_core::torrent::repository::in_memory::InMemoryTorrentRepository; -use torrust_tracker_primitives::swarm_metadata::AggregateSwarmMetadata; +use torrust_tracker_primitives::swarm_metadata::AggregateActiveSwarmMetadata; use crate::statistics::metrics::Metrics; use crate::statistics::repository::Repository; @@ -34,7 +34,7 @@ pub struct TrackerMetrics { /// Domain level metrics. /// /// General metrics for all torrents (number of seeders, leechers, etcetera) - pub torrents_metrics: AggregateSwarmMetadata, + pub torrents_metrics: AggregateActiveSwarmMetadata, /// Application level metrics. Usage statistics/metrics. /// @@ -72,7 +72,7 @@ mod tests { use bittorrent_tracker_core::torrent::repository::in_memory::InMemoryTorrentRepository; use bittorrent_tracker_core::{self}; use torrust_tracker_configuration::Configuration; - use torrust_tracker_primitives::swarm_metadata::AggregateSwarmMetadata; + use torrust_tracker_primitives::swarm_metadata::AggregateActiveSwarmMetadata; use torrust_tracker_test_helpers::configuration; use crate::event::bus::EventBus; @@ -109,7 +109,7 @@ mod tests { assert_eq!( tracker_metrics, TrackerMetrics { - torrents_metrics: AggregateSwarmMetadata::default(), + torrents_metrics: AggregateActiveSwarmMetadata::default(), protocol_metrics: describe_metrics(), } ); diff --git a/packages/primitives/src/swarm_metadata.rs b/packages/primitives/src/swarm_metadata.rs index a70298d71..57ba816d3 100644 --- a/packages/primitives/src/swarm_metadata.rs +++ b/packages/primitives/src/swarm_metadata.rs @@ -46,24 +46,23 @@ impl SwarmMetadata { /// Structure that holds aggregate swarm metadata. /// -/// Metrics are aggregate values for all torrents. +/// Metrics are aggregate values for all active torrents/swarms. #[derive(Copy, Clone, Debug, PartialEq, Default)] -pub struct AggregateSwarmMetadata { - /// Total number of peers that have ever completed downloading for all - /// torrents. +pub struct AggregateActiveSwarmMetadata { + /// Total number of peers that have ever completed downloading. pub total_downloaded: u64, - /// Total number of seeders for all torrents. + /// Total number of seeders. pub total_complete: u64, - /// Total number of leechers for all torrents. + /// Total number of leechers. pub total_incomplete: u64, /// Total number of torrents. pub total_torrents: u64, } -impl AddAssign for AggregateSwarmMetadata { +impl AddAssign for AggregateActiveSwarmMetadata { fn add_assign(&mut self, rhs: Self) { self.total_complete += rhs.total_complete; self.total_downloaded += rhs.total_downloaded; diff --git a/packages/rest-tracker-api-core/src/statistics/services.rs b/packages/rest-tracker-api-core/src/statistics/services.rs index 8fb29e7bd..4a471a3ef 100644 --- a/packages/rest-tracker-api-core/src/statistics/services.rs +++ b/packages/rest-tracker-api-core/src/statistics/services.rs @@ -5,7 +5,7 @@ use bittorrent_udp_tracker_core::services::banning::BanService; use bittorrent_udp_tracker_core::{self}; use tokio::sync::RwLock; use torrust_tracker_metrics::metric_collection::MetricCollection; -use torrust_tracker_primitives::swarm_metadata::AggregateSwarmMetadata; +use torrust_tracker_primitives::swarm_metadata::AggregateActiveSwarmMetadata; use torrust_udp_tracker_server::statistics as udp_server_statistics; use crate::statistics::metrics::Metrics; @@ -16,7 +16,7 @@ pub struct TrackerMetrics { /// Domain level metrics. /// /// General metrics for all torrents (number of seeders, leechers, etcetera) - pub torrents_metrics: AggregateSwarmMetadata, + pub torrents_metrics: AggregateActiveSwarmMetadata, /// Application level metrics. Usage statistics/metrics. /// @@ -144,7 +144,7 @@ mod tests { use bittorrent_udp_tracker_core::MAX_CONNECTION_ID_ERRORS_PER_IP; use tokio::sync::RwLock; use torrust_tracker_configuration::Configuration; - use torrust_tracker_primitives::swarm_metadata::AggregateSwarmMetadata; + use torrust_tracker_primitives::swarm_metadata::AggregateActiveSwarmMetadata; use torrust_tracker_test_helpers::configuration; use crate::statistics::metrics::Metrics; @@ -187,7 +187,7 @@ mod tests { assert_eq!( tracker_metrics, TrackerMetrics { - torrents_metrics: AggregateSwarmMetadata::default(), + torrents_metrics: AggregateActiveSwarmMetadata::default(), protocol_metrics: Metrics::default(), } ); diff --git a/packages/torrent-repository-benchmarking/src/repository/dash_map_mutex_std.rs b/packages/torrent-repository-benchmarking/src/repository/dash_map_mutex_std.rs index 192777b32..fec94b4a5 100644 --- a/packages/torrent-repository-benchmarking/src/repository/dash_map_mutex_std.rs +++ b/packages/torrent-repository-benchmarking/src/repository/dash_map_mutex_std.rs @@ -4,7 +4,7 @@ use bittorrent_primitives::info_hash::InfoHash; use dashmap::DashMap; use torrust_tracker_configuration::TrackerPolicy; use torrust_tracker_primitives::pagination::Pagination; -use torrust_tracker_primitives::swarm_metadata::{AggregateSwarmMetadata, SwarmMetadata}; +use torrust_tracker_primitives::swarm_metadata::{AggregateActiveSwarmMetadata, SwarmMetadata}; use torrust_tracker_primitives::{peer, DurationSinceUnixEpoch, NumberOfDownloads, NumberOfDownloadsBTreeMap}; use super::Repository; @@ -46,8 +46,8 @@ where maybe_entry.map(|entry| entry.clone()) } - fn get_metrics(&self) -> AggregateSwarmMetadata { - let mut metrics = AggregateSwarmMetadata::default(); + fn get_metrics(&self) -> AggregateActiveSwarmMetadata { + let mut metrics = AggregateActiveSwarmMetadata::default(); for entry in &self.torrents { let stats = entry.value().lock().expect("it should get a lock").get_swarm_metadata(); diff --git a/packages/torrent-repository-benchmarking/src/repository/mod.rs b/packages/torrent-repository-benchmarking/src/repository/mod.rs index 890088ea7..cf58838a1 100644 --- a/packages/torrent-repository-benchmarking/src/repository/mod.rs +++ b/packages/torrent-repository-benchmarking/src/repository/mod.rs @@ -1,7 +1,7 @@ use bittorrent_primitives::info_hash::InfoHash; use torrust_tracker_configuration::TrackerPolicy; use torrust_tracker_primitives::pagination::Pagination; -use torrust_tracker_primitives::swarm_metadata::{AggregateSwarmMetadata, SwarmMetadata}; +use torrust_tracker_primitives::swarm_metadata::{AggregateActiveSwarmMetadata, SwarmMetadata}; use torrust_tracker_primitives::{peer, DurationSinceUnixEpoch, NumberOfDownloads, NumberOfDownloadsBTreeMap}; pub mod dash_map_mutex_std; @@ -17,7 +17,7 @@ use std::fmt::Debug; pub trait Repository: Debug + Default + Sized + 'static { fn get(&self, key: &InfoHash) -> Option; - fn get_metrics(&self) -> AggregateSwarmMetadata; + fn get_metrics(&self) -> AggregateActiveSwarmMetadata; fn get_paginated(&self, pagination: Option<&Pagination>) -> Vec<(InfoHash, T)>; fn import_persistent(&self, persistent_torrents: &NumberOfDownloadsBTreeMap); fn remove(&self, key: &InfoHash) -> Option; @@ -30,7 +30,7 @@ pub trait Repository: Debug + Default + Sized + 'static { #[allow(clippy::module_name_repetitions)] pub trait RepositoryAsync: Debug + Default + Sized + 'static { fn get(&self, key: &InfoHash) -> impl std::future::Future> + Send; - fn get_metrics(&self) -> impl std::future::Future + Send; + fn get_metrics(&self) -> impl std::future::Future + Send; fn get_paginated(&self, pagination: Option<&Pagination>) -> impl std::future::Future> + Send; fn import_persistent(&self, persistent_torrents: &NumberOfDownloadsBTreeMap) -> impl std::future::Future + Send; fn remove(&self, key: &InfoHash) -> impl std::future::Future> + Send; diff --git a/packages/torrent-repository-benchmarking/src/repository/rw_lock_std.rs b/packages/torrent-repository-benchmarking/src/repository/rw_lock_std.rs index 074725674..5000579dd 100644 --- a/packages/torrent-repository-benchmarking/src/repository/rw_lock_std.rs +++ b/packages/torrent-repository-benchmarking/src/repository/rw_lock_std.rs @@ -1,7 +1,7 @@ use bittorrent_primitives::info_hash::InfoHash; use torrust_tracker_configuration::TrackerPolicy; use torrust_tracker_primitives::pagination::Pagination; -use torrust_tracker_primitives::swarm_metadata::{AggregateSwarmMetadata, SwarmMetadata}; +use torrust_tracker_primitives::swarm_metadata::{AggregateActiveSwarmMetadata, SwarmMetadata}; use torrust_tracker_primitives::{peer, DurationSinceUnixEpoch, NumberOfDownloads, NumberOfDownloadsBTreeMap}; use super::Repository; @@ -64,8 +64,8 @@ where db.get(key).cloned() } - fn get_metrics(&self) -> AggregateSwarmMetadata { - let mut metrics = AggregateSwarmMetadata::default(); + fn get_metrics(&self) -> AggregateActiveSwarmMetadata { + let mut metrics = AggregateActiveSwarmMetadata::default(); for entry in self.get_torrents().values() { let stats = entry.get_swarm_metadata(); diff --git a/packages/torrent-repository-benchmarking/src/repository/rw_lock_std_mutex_std.rs b/packages/torrent-repository-benchmarking/src/repository/rw_lock_std_mutex_std.rs index 9577a42e1..085256ff1 100644 --- a/packages/torrent-repository-benchmarking/src/repository/rw_lock_std_mutex_std.rs +++ b/packages/torrent-repository-benchmarking/src/repository/rw_lock_std_mutex_std.rs @@ -3,7 +3,7 @@ use std::sync::Arc; use bittorrent_primitives::info_hash::InfoHash; use torrust_tracker_configuration::TrackerPolicy; use torrust_tracker_primitives::pagination::Pagination; -use torrust_tracker_primitives::swarm_metadata::{AggregateSwarmMetadata, SwarmMetadata}; +use torrust_tracker_primitives::swarm_metadata::{AggregateActiveSwarmMetadata, SwarmMetadata}; use torrust_tracker_primitives::{peer, DurationSinceUnixEpoch, NumberOfDownloads, NumberOfDownloadsBTreeMap}; use super::Repository; @@ -59,8 +59,8 @@ where db.get(key).cloned() } - fn get_metrics(&self) -> AggregateSwarmMetadata { - let mut metrics = AggregateSwarmMetadata::default(); + fn get_metrics(&self) -> AggregateActiveSwarmMetadata { + let mut metrics = AggregateActiveSwarmMetadata::default(); for entry in self.get_torrents().values() { let stats = entry.lock().expect("it should get a lock").get_swarm_metadata(); diff --git a/packages/torrent-repository-benchmarking/src/repository/rw_lock_std_mutex_tokio.rs b/packages/torrent-repository-benchmarking/src/repository/rw_lock_std_mutex_tokio.rs index 73cb64a08..9fd451149 100644 --- a/packages/torrent-repository-benchmarking/src/repository/rw_lock_std_mutex_tokio.rs +++ b/packages/torrent-repository-benchmarking/src/repository/rw_lock_std_mutex_tokio.rs @@ -7,7 +7,7 @@ use futures::future::join_all; use futures::{Future, FutureExt}; use torrust_tracker_configuration::TrackerPolicy; use torrust_tracker_primitives::pagination::Pagination; -use torrust_tracker_primitives::swarm_metadata::{AggregateSwarmMetadata, SwarmMetadata}; +use torrust_tracker_primitives::swarm_metadata::{AggregateActiveSwarmMetadata, SwarmMetadata}; use torrust_tracker_primitives::{peer, DurationSinceUnixEpoch, NumberOfDownloads, NumberOfDownloadsBTreeMap}; use super::RepositoryAsync; @@ -85,8 +85,8 @@ where } } - async fn get_metrics(&self) -> AggregateSwarmMetadata { - let mut metrics = AggregateSwarmMetadata::default(); + async fn get_metrics(&self) -> AggregateActiveSwarmMetadata { + let mut metrics = AggregateActiveSwarmMetadata::default(); let entries: Vec<_> = self.get_torrents().values().cloned().collect(); diff --git a/packages/torrent-repository-benchmarking/src/repository/rw_lock_tokio.rs b/packages/torrent-repository-benchmarking/src/repository/rw_lock_tokio.rs index 9d7d591fc..e85200aeb 100644 --- a/packages/torrent-repository-benchmarking/src/repository/rw_lock_tokio.rs +++ b/packages/torrent-repository-benchmarking/src/repository/rw_lock_tokio.rs @@ -1,7 +1,7 @@ use bittorrent_primitives::info_hash::InfoHash; use torrust_tracker_configuration::TrackerPolicy; use torrust_tracker_primitives::pagination::Pagination; -use torrust_tracker_primitives::swarm_metadata::{AggregateSwarmMetadata, SwarmMetadata}; +use torrust_tracker_primitives::swarm_metadata::{AggregateActiveSwarmMetadata, SwarmMetadata}; use torrust_tracker_primitives::{peer, DurationSinceUnixEpoch, NumberOfDownloads, NumberOfDownloadsBTreeMap}; use super::RepositoryAsync; @@ -84,8 +84,8 @@ where } } - async fn get_metrics(&self) -> AggregateSwarmMetadata { - let mut metrics = AggregateSwarmMetadata::default(); + async fn get_metrics(&self) -> AggregateActiveSwarmMetadata { + let mut metrics = AggregateActiveSwarmMetadata::default(); for entry in self.get_torrents().await.values() { let stats = entry.get_swarm_metadata(); diff --git a/packages/torrent-repository-benchmarking/src/repository/rw_lock_tokio_mutex_std.rs b/packages/torrent-repository-benchmarking/src/repository/rw_lock_tokio_mutex_std.rs index 6ad7ade98..8d6584713 100644 --- a/packages/torrent-repository-benchmarking/src/repository/rw_lock_tokio_mutex_std.rs +++ b/packages/torrent-repository-benchmarking/src/repository/rw_lock_tokio_mutex_std.rs @@ -3,7 +3,7 @@ use std::sync::Arc; use bittorrent_primitives::info_hash::InfoHash; use torrust_tracker_configuration::TrackerPolicy; use torrust_tracker_primitives::pagination::Pagination; -use torrust_tracker_primitives::swarm_metadata::{AggregateSwarmMetadata, SwarmMetadata}; +use torrust_tracker_primitives::swarm_metadata::{AggregateActiveSwarmMetadata, SwarmMetadata}; use torrust_tracker_primitives::{peer, DurationSinceUnixEpoch, NumberOfDownloads, NumberOfDownloadsBTreeMap}; use super::RepositoryAsync; @@ -78,8 +78,8 @@ where } } - async fn get_metrics(&self) -> AggregateSwarmMetadata { - let mut metrics = AggregateSwarmMetadata::default(); + async fn get_metrics(&self) -> AggregateActiveSwarmMetadata { + let mut metrics = AggregateActiveSwarmMetadata::default(); for entry in self.get_torrents().await.values() { let stats = entry.get_swarm_metadata(); diff --git a/packages/torrent-repository-benchmarking/src/repository/rw_lock_tokio_mutex_tokio.rs b/packages/torrent-repository-benchmarking/src/repository/rw_lock_tokio_mutex_tokio.rs index 6ce6c3f58..c8f499e03 100644 --- a/packages/torrent-repository-benchmarking/src/repository/rw_lock_tokio_mutex_tokio.rs +++ b/packages/torrent-repository-benchmarking/src/repository/rw_lock_tokio_mutex_tokio.rs @@ -3,7 +3,7 @@ use std::sync::Arc; use bittorrent_primitives::info_hash::InfoHash; use torrust_tracker_configuration::TrackerPolicy; use torrust_tracker_primitives::pagination::Pagination; -use torrust_tracker_primitives::swarm_metadata::{AggregateSwarmMetadata, SwarmMetadata}; +use torrust_tracker_primitives::swarm_metadata::{AggregateActiveSwarmMetadata, SwarmMetadata}; use torrust_tracker_primitives::{peer, DurationSinceUnixEpoch, NumberOfDownloads, NumberOfDownloadsBTreeMap}; use super::RepositoryAsync; @@ -81,8 +81,8 @@ where } } - async fn get_metrics(&self) -> AggregateSwarmMetadata { - let mut metrics = AggregateSwarmMetadata::default(); + async fn get_metrics(&self) -> AggregateActiveSwarmMetadata { + let mut metrics = AggregateActiveSwarmMetadata::default(); for entry in self.get_torrents().await.values() { let stats = entry.get_swarm_metadata().await; diff --git a/packages/torrent-repository-benchmarking/src/repository/skip_map_mutex_std.rs b/packages/torrent-repository-benchmarking/src/repository/skip_map_mutex_std.rs index 81fc1c05a..0432b13d0 100644 --- a/packages/torrent-repository-benchmarking/src/repository/skip_map_mutex_std.rs +++ b/packages/torrent-repository-benchmarking/src/repository/skip_map_mutex_std.rs @@ -4,7 +4,7 @@ use bittorrent_primitives::info_hash::InfoHash; use crossbeam_skiplist::SkipMap; use torrust_tracker_configuration::TrackerPolicy; use torrust_tracker_primitives::pagination::Pagination; -use torrust_tracker_primitives::swarm_metadata::{AggregateSwarmMetadata, SwarmMetadata}; +use torrust_tracker_primitives::swarm_metadata::{AggregateActiveSwarmMetadata, SwarmMetadata}; use torrust_tracker_primitives::{peer, DurationSinceUnixEpoch, NumberOfDownloads, NumberOfDownloadsBTreeMap}; use super::Repository; @@ -69,8 +69,8 @@ where maybe_entry.map(|entry| entry.value().clone()) } - fn get_metrics(&self) -> AggregateSwarmMetadata { - let mut metrics = AggregateSwarmMetadata::default(); + fn get_metrics(&self) -> AggregateActiveSwarmMetadata { + let mut metrics = AggregateActiveSwarmMetadata::default(); for entry in &self.torrents { let stats = entry.value().lock().expect("it should get a lock").get_swarm_metadata(); @@ -162,8 +162,8 @@ where maybe_entry.map(|entry| entry.value().clone()) } - fn get_metrics(&self) -> AggregateSwarmMetadata { - let mut metrics = AggregateSwarmMetadata::default(); + fn get_metrics(&self) -> AggregateActiveSwarmMetadata { + let mut metrics = AggregateActiveSwarmMetadata::default(); for entry in &self.torrents { let stats = entry.value().read().get_swarm_metadata(); @@ -255,8 +255,8 @@ where maybe_entry.map(|entry| entry.value().clone()) } - fn get_metrics(&self) -> AggregateSwarmMetadata { - let mut metrics = AggregateSwarmMetadata::default(); + fn get_metrics(&self) -> AggregateActiveSwarmMetadata { + let mut metrics = AggregateActiveSwarmMetadata::default(); for entry in &self.torrents { let stats = entry.value().lock().get_swarm_metadata(); diff --git a/packages/torrent-repository-benchmarking/tests/common/repo.rs b/packages/torrent-repository-benchmarking/tests/common/repo.rs index e5037d641..2987240ef 100644 --- a/packages/torrent-repository-benchmarking/tests/common/repo.rs +++ b/packages/torrent-repository-benchmarking/tests/common/repo.rs @@ -1,7 +1,7 @@ use bittorrent_primitives::info_hash::InfoHash; use torrust_tracker_configuration::TrackerPolicy; use torrust_tracker_primitives::pagination::Pagination; -use torrust_tracker_primitives::swarm_metadata::{AggregateSwarmMetadata, SwarmMetadata}; +use torrust_tracker_primitives::swarm_metadata::{AggregateActiveSwarmMetadata, SwarmMetadata}; use torrust_tracker_primitives::{peer, DurationSinceUnixEpoch, NumberOfDownloads, NumberOfDownloadsBTreeMap}; use torrust_tracker_torrent_repository_benchmarking::repository::{Repository as _, RepositoryAsync as _}; use torrust_tracker_torrent_repository_benchmarking::{ @@ -75,7 +75,7 @@ impl Repo { } } - pub(crate) async fn get_metrics(&self) -> AggregateSwarmMetadata { + pub(crate) async fn get_metrics(&self) -> AggregateActiveSwarmMetadata { match self { Repo::RwLockStd(repo) => repo.get_metrics(), Repo::RwLockStdMutexStd(repo) => repo.get_metrics(), diff --git a/packages/torrent-repository-benchmarking/tests/repository/mod.rs b/packages/torrent-repository-benchmarking/tests/repository/mod.rs index 141faa8a9..e555654ca 100644 --- a/packages/torrent-repository-benchmarking/tests/repository/mod.rs +++ b/packages/torrent-repository-benchmarking/tests/repository/mod.rs @@ -402,11 +402,11 @@ async fn it_should_get_metrics( repo: Repo, #[case] entries: Entries, ) { - use torrust_tracker_primitives::swarm_metadata::AggregateSwarmMetadata; + use torrust_tracker_primitives::swarm_metadata::AggregateActiveSwarmMetadata; make(&repo, &entries).await; - let mut metrics = AggregateSwarmMetadata::default(); + let mut metrics = AggregateActiveSwarmMetadata::default(); for (_, torrent) in entries { let stats = torrent.get_swarm_metadata(); diff --git a/packages/torrent-repository/src/swarms.rs b/packages/torrent-repository/src/swarms.rs index ba8a80a62..f0b3233b6 100644 --- a/packages/torrent-repository/src/swarms.rs +++ b/packages/torrent-repository/src/swarms.rs @@ -6,7 +6,7 @@ use tokio::sync::Mutex; use torrust_tracker_clock::conv::convert_from_timestamp_to_datetime_utc; use torrust_tracker_configuration::TrackerPolicy; use torrust_tracker_primitives::pagination::Pagination; -use torrust_tracker_primitives::swarm_metadata::{AggregateSwarmMetadata, SwarmMetadata}; +use torrust_tracker_primitives::swarm_metadata::{AggregateActiveSwarmMetadata, SwarmMetadata}; use torrust_tracker_primitives::{peer, DurationSinceUnixEpoch, NumberOfDownloads, NumberOfDownloadsBTreeMap}; use crate::event::sender::Sender; @@ -394,8 +394,8 @@ impl Swarms { /// /// This function returns an error if it fails to acquire the lock for any /// swarm handle. - pub async fn get_aggregate_swarm_metadata(&self) -> Result { - let mut metrics = AggregateSwarmMetadata::default(); + pub async fn get_aggregate_swarm_metadata(&self) -> Result { + let mut metrics = AggregateActiveSwarmMetadata::default(); for swarm_handle in &self.swarms { let swarm = swarm_handle.value().lock().await; @@ -1055,7 +1055,7 @@ mod tests { use std::sync::Arc; use bittorrent_primitives::info_hash::fixture::gen_seeded_infohash; - use torrust_tracker_primitives::swarm_metadata::AggregateSwarmMetadata; + use torrust_tracker_primitives::swarm_metadata::AggregateActiveSwarmMetadata; use crate::swarms::Swarms; use crate::tests::{complete_peer, leecher, sample_info_hash, seeder}; @@ -1070,7 +1070,7 @@ mod tests { assert_eq!( aggregate_swarm_metadata, - AggregateSwarmMetadata { + AggregateActiveSwarmMetadata { total_complete: 0, total_downloaded: 0, total_incomplete: 0, @@ -1092,7 +1092,7 @@ mod tests { assert_eq!( aggregate_swarm_metadata, - AggregateSwarmMetadata { + AggregateActiveSwarmMetadata { total_complete: 0, total_downloaded: 0, total_incomplete: 1, @@ -1114,7 +1114,7 @@ mod tests { assert_eq!( aggregate_swarm_metadata, - AggregateSwarmMetadata { + AggregateActiveSwarmMetadata { total_complete: 1, total_downloaded: 0, total_incomplete: 0, @@ -1136,7 +1136,7 @@ mod tests { assert_eq!( aggregate_swarm_metadata, - AggregateSwarmMetadata { + AggregateActiveSwarmMetadata { total_complete: 1, total_downloaded: 0, total_incomplete: 0, @@ -1164,7 +1164,7 @@ mod tests { assert_eq!( (aggregate_swarm_metadata), - (AggregateSwarmMetadata { + (AggregateActiveSwarmMetadata { total_complete: 0, total_downloaded: 0, total_incomplete: 1_000_000, diff --git a/packages/tracker-core/src/torrent/repository/in_memory.rs b/packages/tracker-core/src/torrent/repository/in_memory.rs index 164f46c69..ffd885c4f 100644 --- a/packages/tracker-core/src/torrent/repository/in_memory.rs +++ b/packages/tracker-core/src/torrent/repository/in_memory.rs @@ -5,7 +5,7 @@ use std::sync::Arc; use bittorrent_primitives::info_hash::InfoHash; use torrust_tracker_configuration::{TrackerPolicy, TORRENT_PEERS_LIMIT}; use torrust_tracker_primitives::pagination::Pagination; -use torrust_tracker_primitives::swarm_metadata::{AggregateSwarmMetadata, SwarmMetadata}; +use torrust_tracker_primitives::swarm_metadata::{AggregateActiveSwarmMetadata, SwarmMetadata}; use torrust_tracker_primitives::{peer, DurationSinceUnixEpoch, NumberOfDownloads, NumberOfDownloadsBTreeMap}; use torrust_tracker_torrent_repository::{SwarmHandle, Swarms}; @@ -226,7 +226,7 @@ impl InMemoryTorrentRepository { /// /// This function panics if the underling swarms return an error. #[must_use] - pub async fn get_aggregate_swarm_metadata(&self) -> AggregateSwarmMetadata { + pub async fn get_aggregate_swarm_metadata(&self) -> AggregateActiveSwarmMetadata { self.swarms .get_aggregate_swarm_metadata() .await diff --git a/packages/udp-tracker-core/src/statistics/services.rs b/packages/udp-tracker-core/src/statistics/services.rs index 20ba2ea7f..24d25a25c 100644 --- a/packages/udp-tracker-core/src/statistics/services.rs +++ b/packages/udp-tracker-core/src/statistics/services.rs @@ -39,7 +39,7 @@ use std::sync::Arc; use bittorrent_tracker_core::torrent::repository::in_memory::InMemoryTorrentRepository; -use torrust_tracker_primitives::swarm_metadata::AggregateSwarmMetadata; +use torrust_tracker_primitives::swarm_metadata::AggregateActiveSwarmMetadata; use crate::statistics::metrics::Metrics; use crate::statistics::repository::Repository; @@ -50,7 +50,7 @@ pub struct TrackerMetrics { /// Domain level metrics. /// /// General metrics for all torrents (number of seeders, leechers, etcetera) - pub torrents_metrics: AggregateSwarmMetadata, + pub torrents_metrics: AggregateActiveSwarmMetadata, /// Application level metrics. Usage statistics/metrics. /// @@ -89,7 +89,7 @@ mod tests { use bittorrent_tracker_core::torrent::repository::in_memory::InMemoryTorrentRepository; use bittorrent_tracker_core::{self}; - use torrust_tracker_primitives::swarm_metadata::AggregateSwarmMetadata; + use torrust_tracker_primitives::swarm_metadata::AggregateActiveSwarmMetadata; use crate::statistics::describe_metrics; use crate::statistics::repository::Repository; @@ -106,7 +106,7 @@ mod tests { assert_eq!( tracker_metrics, TrackerMetrics { - torrents_metrics: AggregateSwarmMetadata::default(), + torrents_metrics: AggregateActiveSwarmMetadata::default(), protocol_metrics: describe_metrics(), } ); diff --git a/packages/udp-tracker-server/src/statistics/services.rs b/packages/udp-tracker-server/src/statistics/services.rs index c8b24a744..e6e5a28f3 100644 --- a/packages/udp-tracker-server/src/statistics/services.rs +++ b/packages/udp-tracker-server/src/statistics/services.rs @@ -41,7 +41,7 @@ use std::sync::Arc; use bittorrent_tracker_core::torrent::repository::in_memory::InMemoryTorrentRepository; use bittorrent_udp_tracker_core::services::banning::BanService; use tokio::sync::RwLock; -use torrust_tracker_primitives::swarm_metadata::AggregateSwarmMetadata; +use torrust_tracker_primitives::swarm_metadata::AggregateActiveSwarmMetadata; use crate::statistics::metrics::Metrics; use crate::statistics::repository::Repository; @@ -52,7 +52,7 @@ pub struct TrackerMetrics { /// Domain level metrics. /// /// General metrics for all torrents (number of seeders, leechers, etcetera) - pub torrents_metrics: AggregateSwarmMetadata, + pub torrents_metrics: AggregateActiveSwarmMetadata, /// Application level metrics. Usage statistics/metrics. /// @@ -109,7 +109,7 @@ mod tests { use bittorrent_udp_tracker_core::services::banning::BanService; use bittorrent_udp_tracker_core::MAX_CONNECTION_ID_ERRORS_PER_IP; use tokio::sync::RwLock; - use torrust_tracker_primitives::swarm_metadata::AggregateSwarmMetadata; + use torrust_tracker_primitives::swarm_metadata::AggregateActiveSwarmMetadata; use crate::statistics::describe_metrics; use crate::statistics::repository::Repository; @@ -132,7 +132,7 @@ mod tests { assert_eq!( tracker_metrics, TrackerMetrics { - torrents_metrics: AggregateSwarmMetadata::default(), + torrents_metrics: AggregateActiveSwarmMetadata::default(), protocol_metrics: describe_metrics(), } ); From e1076142feea8062691da139f9b7ff38be59491f Mon Sep 17 00:00:00 2001 From: Jose Celano Date: Wed, 28 May 2025 08:19:12 +0100 Subject: [PATCH 652/802] chore: [#1543] remove comment on tracker-core handle_announcement We need to load the number of downloads for the torrent before adding it to the active swarms becuase the scrape response includes the number of downloads, and that number should included all downloads ever. --- packages/tracker-core/src/announce_handler.rs | 5 ----- 1 file changed, 5 deletions(-) diff --git a/packages/tracker-core/src/announce_handler.rs b/packages/tracker-core/src/announce_handler.rs index a6614361a..f74c135e3 100644 --- a/packages/tracker-core/src/announce_handler.rs +++ b/packages/tracker-core/src/announce_handler.rs @@ -163,11 +163,6 @@ impl AnnounceHandler { ) -> Result { self.whitelist_authorization.authorize(info_hash).await?; - // This will be removed in the future. - // See https://github.com/torrust/torrust-tracker/issues/1502 - // There will be a persisted metric for counting the total number of - // downloads across all torrents. The in-memory metric will count only - // the number of downloads during the current tracker uptime. let opt_persistent_torrent = if self.config.tracker_policy.persistent_torrent_completed_stat { self.db_downloads_metric_repository.load_torrent_downloads(info_hash)? } else { From 762bf6905477866ae2cf2a676255050d7a522d7f Mon Sep 17 00:00:00 2001 From: Jose Celano Date: Wed, 28 May 2025 08:39:17 +0100 Subject: [PATCH 653/802] refactor: [#1543] Optimization: Don't load number of downloads from DB if not needed --- packages/torrent-repository/src/swarms.rs | 4 ++++ packages/tracker-core/src/announce_handler.rs | 24 ++++++++++++------- .../src/torrent/repository/in_memory.rs | 6 +++++ 3 files changed, 26 insertions(+), 8 deletions(-) diff --git a/packages/torrent-repository/src/swarms.rs b/packages/torrent-repository/src/swarms.rs index f0b3233b6..8e7bc24de 100644 --- a/packages/torrent-repository/src/swarms.rs +++ b/packages/torrent-repository/src/swarms.rs @@ -467,6 +467,10 @@ impl Swarms { pub fn is_empty(&self) -> bool { self.swarms.is_empty() } + + pub fn contains(&self, key: &InfoHash) -> bool { + self.swarms.contains_key(key) + } } #[derive(thiserror::Error, Debug, Clone)] diff --git a/packages/tracker-core/src/announce_handler.rs b/packages/tracker-core/src/announce_handler.rs index f74c135e3..0b6bffd31 100644 --- a/packages/tracker-core/src/announce_handler.rs +++ b/packages/tracker-core/src/announce_handler.rs @@ -96,9 +96,10 @@ use std::sync::Arc; use bittorrent_primitives::info_hash::InfoHash; use torrust_tracker_configuration::{Core, TORRENT_PEERS_LIMIT}; use torrust_tracker_primitives::core::AnnounceData; -use torrust_tracker_primitives::peer; +use torrust_tracker_primitives::{peer, NumberOfDownloads}; use super::torrent::repository::in_memory::InMemoryTorrentRepository; +use crate::databases; use crate::error::AnnounceError; use crate::statistics::persisted::downloads::DatabaseDownloadsMetricRepository; use crate::whitelist::authorization::WhitelistAuthorization; @@ -163,21 +164,28 @@ impl AnnounceHandler { ) -> Result { self.whitelist_authorization.authorize(info_hash).await?; - let opt_persistent_torrent = if self.config.tracker_policy.persistent_torrent_completed_stat { - self.db_downloads_metric_repository.load_torrent_downloads(info_hash)? - } else { - None - }; - peer.change_ip(&assign_ip_address_to_peer(remote_client_ip, self.config.net.external_ip)); self.in_memory_torrent_repository - .handle_announcement(info_hash, peer, opt_persistent_torrent) + .handle_announcement(info_hash, peer, self.load_downloads_metric_if_needed(info_hash)?) .await; Ok(self.build_announce_data(info_hash, peer, peers_wanted).await) } + /// Loads the number of downloads for a torrent if needed. + fn load_downloads_metric_if_needed( + &self, + info_hash: &InfoHash, + ) -> Result, databases::error::Error> { + if self.config.tracker_policy.persistent_torrent_completed_stat && !self.in_memory_torrent_repository.contains(info_hash) + { + Ok(self.db_downloads_metric_repository.load_torrent_downloads(info_hash)?) + } else { + Ok(None) + } + } + /// Builds the announce data for the peer making the request. async fn build_announce_data(&self, info_hash: &InfoHash, peer: &peer::Peer, peers_wanted: &PeersWanted) -> AnnounceData { let peers = self diff --git a/packages/tracker-core/src/torrent/repository/in_memory.rs b/packages/tracker-core/src/torrent/repository/in_memory.rs index ffd885c4f..cc873726d 100644 --- a/packages/tracker-core/src/torrent/repository/in_memory.rs +++ b/packages/tracker-core/src/torrent/repository/in_memory.rs @@ -267,4 +267,10 @@ impl InMemoryTorrentRepository { pub fn import_persistent(&self, persistent_torrents: &NumberOfDownloadsBTreeMap) { self.swarms.import_persistent(persistent_torrents); } + + /// Checks if the repository contains a torrent entry for the given infohash. + #[must_use] + pub fn contains(&self, info_hash: &InfoHash) -> bool { + self.swarms.contains(info_hash) + } } From 02c33f6972eef36058afee9f0ee7180b51b5d072 Mon Sep 17 00:00:00 2001 From: Jose Celano Date: Wed, 28 May 2025 11:34:42 +0100 Subject: [PATCH 654/802] fix: [#1543] the downloads counter values returned in the API It now returns the persisted value when available (stats persistence enabled). --- Cargo.lock | 1 + .../src/v1/context/stats/handlers.rs | 15 ++++- .../src/v1/context/stats/routes.rs | 3 + packages/rest-tracker-api-core/Cargo.toml | 1 + .../src/statistics/services.rs | 56 +++++++++++++++++-- .../torrent-repository/src/statistics/mod.rs | 2 +- packages/tracker-core/src/statistics/mod.rs | 2 +- 7 files changed, 73 insertions(+), 7 deletions(-) diff --git a/Cargo.lock b/Cargo.lock index 96de11cb2..009b1e458 100644 --- a/Cargo.lock +++ b/Cargo.lock @@ -4646,6 +4646,7 @@ dependencies = [ "bittorrent-udp-tracker-core", "tokio", "torrust-tracker-configuration", + "torrust-tracker-events", "torrust-tracker-metrics", "torrust-tracker-primitives", "torrust-tracker-test-helpers", diff --git a/packages/axum-rest-tracker-api-server/src/v1/context/stats/handlers.rs b/packages/axum-rest-tracker-api-server/src/v1/context/stats/handlers.rs index 3a353f1fc..463c81ac8 100644 --- a/packages/axum-rest-tracker-api-server/src/v1/context/stats/handlers.rs +++ b/packages/axum-rest-tracker-api-server/src/v1/context/stats/handlers.rs @@ -10,6 +10,7 @@ use bittorrent_udp_tracker_core::services::banning::BanService; use serde::Deserialize; use tokio::sync::RwLock; use torrust_rest_tracker_api_core::statistics::services::{get_labeled_metrics, get_metrics}; +use torrust_tracker_configuration::Core; use super::responses::{labeled_metrics_response, labeled_stats_response, metrics_response, stats_response}; @@ -40,14 +41,26 @@ pub struct QueryParams { #[allow(clippy::type_complexity)] pub async fn get_stats_handler( State(state): State<( + Arc, Arc, Arc>, + Arc, + Arc, Arc, Arc, )>, params: Query, ) -> Response { - let metrics = get_metrics(state.0.clone(), state.1.clone(), state.2.clone(), state.3.clone()).await; + let metrics = get_metrics( + state.0.clone(), + state.1.clone(), + state.2.clone(), + state.3.clone(), + state.4.clone(), + state.5.clone(), + state.6.clone(), + ) + .await; match params.0.format { Some(format) => match format { diff --git a/packages/axum-rest-tracker-api-server/src/v1/context/stats/routes.rs b/packages/axum-rest-tracker-api-server/src/v1/context/stats/routes.rs index f6c661130..3be266d3a 100644 --- a/packages/axum-rest-tracker-api-server/src/v1/context/stats/routes.rs +++ b/packages/axum-rest-tracker-api-server/src/v1/context/stats/routes.rs @@ -17,8 +17,11 @@ pub fn add(prefix: &str, router: Router, http_api_container: &Arc, in_memory_torrent_repository: Arc, ban_service: Arc>, + torrent_repository_stats_repository: Arc, + tracker_core_stats_repository: Arc, http_stats_repository: Arc, udp_server_stats_repository: Arc, ) -> TrackerMetrics { - let torrents_metrics = in_memory_torrent_repository.get_aggregate_swarm_metadata().await; + let aggregate_active_swarm_metadata = in_memory_torrent_repository.get_aggregate_swarm_metadata().await; let udp_banned_ips_total = ban_service.read().await.get_banned_ips_total(); let http_stats = http_stats_repository.get_stats().await; let udp_server_stats = udp_server_stats_repository.get_stats().await; + let total_downloaded = if core_config.tracker_policy.persistent_torrent_completed_stat { + let metrics = tracker_core_stats_repository.get_metrics().await; + + let downloads = metrics.metric_collection.get_counter_value( + &metric_name!(TRACKER_CORE_PERSISTENT_TORRENTS_DOWNLOADS_TOTAL), + &LabelSet::default(), + ); + + if let Some(downloads) = downloads { + downloads.value() + } else { + 0 + } + } else { + let metrics = torrent_repository_stats_repository.get_metrics().await; + + let downloads = metrics.metric_collection.get_counter_value( + &metric_name!(TORRENT_REPOSITORY_TORRENTS_DOWNLOADS_TOTAL), + &LabelSet::default(), + ); + + if let Some(downloads) = downloads { + downloads.value() + } else { + 0 + } + }; + + let mut torrents_metrics = aggregate_active_swarm_metadata; + torrents_metrics.total_downloaded = total_downloaded; + // For backward compatibility we keep the `tcp4_connections_handled` and // `tcp6_connections_handled` metrics. They don't make sense for the HTTP // tracker, but we keep them for now. In new major versions we should remove @@ -138,14 +177,16 @@ mod tests { use bittorrent_http_tracker_core::event::sender::Broadcaster; use bittorrent_http_tracker_core::statistics::event::listener::run_event_listener; use bittorrent_http_tracker_core::statistics::repository::Repository; - use bittorrent_tracker_core::torrent::repository::in_memory::InMemoryTorrentRepository; + use bittorrent_tracker_core::container::TrackerCoreContainer; use bittorrent_tracker_core::{self}; use bittorrent_udp_tracker_core::services::banning::BanService; use bittorrent_udp_tracker_core::MAX_CONNECTION_ID_ERRORS_PER_IP; use tokio::sync::RwLock; use torrust_tracker_configuration::Configuration; + use torrust_tracker_events::bus::SenderStatus; use torrust_tracker_primitives::swarm_metadata::AggregateActiveSwarmMetadata; use torrust_tracker_test_helpers::configuration; + use torrust_tracker_torrent_repository::container::TorrentRepositoryContainer; use crate::statistics::metrics::Metrics; use crate::statistics::services::{get_metrics, TrackerMetrics}; @@ -157,8 +198,12 @@ mod tests { #[tokio::test] async fn the_statistics_service_should_return_the_tracker_metrics() { let config = tracker_configuration(); + let core_config = Arc::new(config.core.clone()); + + let torrent_repository_container = Arc::new(TorrentRepositoryContainer::initialize(SenderStatus::Enabled)); + + let tracker_core_container = TrackerCoreContainer::initialize_from(&core_config, &torrent_repository_container.clone()); - let in_memory_torrent_repository = Arc::new(InMemoryTorrentRepository::default()); let ban_service = Arc::new(RwLock::new(BanService::new(MAX_CONNECTION_ID_ERRORS_PER_IP))); // HTTP core stats @@ -177,8 +222,11 @@ mod tests { let udp_server_stats_repository = Arc::new(torrust_udp_tracker_server::statistics::repository::Repository::new()); let tracker_metrics = get_metrics( - in_memory_torrent_repository.clone(), + core_config, + tracker_core_container.in_memory_torrent_repository.clone(), ban_service.clone(), + torrent_repository_container.stats_repository.clone(), + tracker_core_container.stats_repository.clone(), http_stats_repository.clone(), udp_server_stats_repository.clone(), ) diff --git a/packages/torrent-repository/src/statistics/mod.rs b/packages/torrent-repository/src/statistics/mod.rs index cfc252e34..ab5eb3f09 100644 --- a/packages/torrent-repository/src/statistics/mod.rs +++ b/packages/torrent-repository/src/statistics/mod.rs @@ -14,7 +14,7 @@ const TORRENT_REPOSITORY_TORRENTS_ADDED_TOTAL: &str = "torrent_repository_torren const TORRENT_REPOSITORY_TORRENTS_REMOVED_TOTAL: &str = "torrent_repository_torrents_removed_total"; const TORRENT_REPOSITORY_TORRENTS_TOTAL: &str = "torrent_repository_torrents_total"; -const TORRENT_REPOSITORY_TORRENTS_DOWNLOADS_TOTAL: &str = "torrent_repository_torrents_downloads_total"; +pub const TORRENT_REPOSITORY_TORRENTS_DOWNLOADS_TOTAL: &str = "torrent_repository_torrents_downloads_total"; const TORRENT_REPOSITORY_TORRENTS_INACTIVE_TOTAL: &str = "torrent_repository_torrents_inactive_total"; // Peers metrics diff --git a/packages/tracker-core/src/statistics/mod.rs b/packages/tracker-core/src/statistics/mod.rs index ff8187379..0c421863f 100644 --- a/packages/tracker-core/src/statistics/mod.rs +++ b/packages/tracker-core/src/statistics/mod.rs @@ -10,7 +10,7 @@ use torrust_tracker_metrics::unit::Unit; // Torrent metrics -const TRACKER_CORE_PERSISTENT_TORRENTS_DOWNLOADS_TOTAL: &str = "tracker_core_persistent_torrents_downloads_total"; +pub const TRACKER_CORE_PERSISTENT_TORRENTS_DOWNLOADS_TOTAL: &str = "tracker_core_persistent_torrents_downloads_total"; #[must_use] pub fn describe_metrics() -> Metrics { From 8d3a6fe9c3ef05a914ac51437260191a7b3c4e47 Mon Sep 17 00:00:00 2001 From: Jose Celano Date: Wed, 28 May 2025 11:48:02 +0100 Subject: [PATCH 655/802] refactor: [#1543] extract methods --- .../src/v1/context/stats/resources.rs | 5 +- .../src/statistics/metrics.rs | 31 ++++- .../src/statistics/services.rs | 109 +++++++++++------- 3 files changed, 98 insertions(+), 47 deletions(-) diff --git a/packages/axum-rest-tracker-api-server/src/v1/context/stats/resources.rs b/packages/axum-rest-tracker-api-server/src/v1/context/stats/resources.rs index 8b6d639c8..08f83026f 100644 --- a/packages/axum-rest-tracker-api-server/src/v1/context/stats/resources.rs +++ b/packages/axum-rest-tracker-api-server/src/v1/context/stats/resources.rs @@ -134,9 +134,8 @@ impl From for LabeledStats { #[cfg(test)] mod tests { - use torrust_rest_tracker_api_core::statistics::metrics::Metrics; + use torrust_rest_tracker_api_core::statistics::metrics::{Metrics, TorrentsMetrics}; use torrust_rest_tracker_api_core::statistics::services::TrackerMetrics; - use torrust_tracker_primitives::swarm_metadata::AggregateActiveSwarmMetadata; use super::Stats; @@ -145,7 +144,7 @@ mod tests { fn stats_resource_should_be_converted_from_tracker_metrics() { assert_eq!( Stats::from(TrackerMetrics { - torrents_metrics: AggregateActiveSwarmMetadata { + torrents_metrics: TorrentsMetrics { total_complete: 1, total_downloaded: 2, total_incomplete: 3, diff --git a/packages/rest-tracker-api-core/src/statistics/metrics.rs b/packages/rest-tracker-api-core/src/statistics/metrics.rs index 7e41cf713..ca556becf 100644 --- a/packages/rest-tracker-api-core/src/statistics/metrics.rs +++ b/packages/rest-tracker-api-core/src/statistics/metrics.rs @@ -1,4 +1,33 @@ -/// Metrics collected by the tracker. +use torrust_tracker_primitives::swarm_metadata::AggregateActiveSwarmMetadata; + +/// Metrics collected by the tracker at the swarm layer. +#[derive(Copy, Clone, Debug, PartialEq, Default)] +pub struct TorrentsMetrics { + /// Total number of peers that have ever completed downloading. + pub total_downloaded: u64, + + /// Total number of seeders. + pub total_complete: u64, + + /// Total number of leechers. + pub total_incomplete: u64, + + /// Total number of torrents. + pub total_torrents: u64, +} + +impl From for TorrentsMetrics { + fn from(value: AggregateActiveSwarmMetadata) -> Self { + Self { + total_downloaded: value.total_downloaded, + total_complete: value.total_complete, + total_incomplete: value.total_incomplete, + total_torrents: value.total_torrents, + } + } +} + +/// Metrics collected by the tracker at the delivery layer. /// /// - Number of connections handled /// - Number of `announce` requests handled diff --git a/packages/rest-tracker-api-core/src/statistics/services.rs b/packages/rest-tracker-api-core/src/statistics/services.rs index cc02f61e6..a899cb961 100644 --- a/packages/rest-tracker-api-core/src/statistics/services.rs +++ b/packages/rest-tracker-api-core/src/statistics/services.rs @@ -9,10 +9,10 @@ use torrust_tracker_configuration::Core; use torrust_tracker_metrics::label::LabelSet; use torrust_tracker_metrics::metric_collection::MetricCollection; use torrust_tracker_metrics::metric_name; -use torrust_tracker_primitives::swarm_metadata::AggregateActiveSwarmMetadata; use torrust_tracker_torrent_repository::statistics::TORRENT_REPOSITORY_TORRENTS_DOWNLOADS_TOTAL; use torrust_udp_tracker_server::statistics as udp_server_statistics; +use super::metrics::TorrentsMetrics; use crate::statistics::metrics::Metrics; /// All the metrics collected by the tracker. @@ -21,7 +21,7 @@ pub struct TrackerMetrics { /// Domain level metrics. /// /// General metrics for all torrents (number of seeders, leechers, etcetera) - pub torrents_metrics: AggregateActiveSwarmMetadata, + pub torrents_metrics: TorrentsMetrics, /// Application level metrics. Usage statistics/metrics. /// @@ -30,7 +30,6 @@ pub struct TrackerMetrics { } /// It returns all the [`TrackerMetrics`] -#[allow(deprecated)] pub async fn get_metrics( core_config: Arc, in_memory_torrent_repository: Arc, @@ -40,10 +39,25 @@ pub async fn get_metrics( http_stats_repository: Arc, udp_server_stats_repository: Arc, ) -> TrackerMetrics { + TrackerMetrics { + torrents_metrics: get_torrents_metrics( + core_config, + in_memory_torrent_repository, + torrent_repository_stats_repository, + tracker_core_stats_repository, + ) + .await, + protocol_metrics: get_protocol_metrics(ban_service, http_stats_repository, udp_server_stats_repository).await, + } +} + +async fn get_torrents_metrics( + core_config: Arc, + in_memory_torrent_repository: Arc, + torrent_repository_stats_repository: Arc, + tracker_core_stats_repository: Arc, +) -> TorrentsMetrics { let aggregate_active_swarm_metadata = in_memory_torrent_repository.get_aggregate_swarm_metadata().await; - let udp_banned_ips_total = ban_service.read().await.get_banned_ips_total(); - let http_stats = http_stats_repository.get_stats().await; - let udp_server_stats = udp_server_stats_repository.get_stats().await; let total_downloaded = if core_config.tracker_policy.persistent_torrent_completed_stat { let metrics = tracker_core_stats_repository.get_metrics().await; @@ -73,47 +87,57 @@ pub async fn get_metrics( } }; - let mut torrents_metrics = aggregate_active_swarm_metadata; + let mut torrents_metrics: TorrentsMetrics = aggregate_active_swarm_metadata.into(); torrents_metrics.total_downloaded = total_downloaded; + torrents_metrics +} + +#[allow(deprecated)] +async fn get_protocol_metrics( + ban_service: Arc>, + http_stats_repository: Arc, + udp_server_stats_repository: Arc, +) -> Metrics { + let udp_banned_ips_total = ban_service.read().await.get_banned_ips_total(); + let http_stats = http_stats_repository.get_stats().await; + let udp_server_stats = udp_server_stats_repository.get_stats().await; + // For backward compatibility we keep the `tcp4_connections_handled` and // `tcp6_connections_handled` metrics. They don't make sense for the HTTP // tracker, but we keep them for now. In new major versions we should remove // them. - TrackerMetrics { - torrents_metrics, - protocol_metrics: Metrics { - // TCPv4 - tcp4_connections_handled: http_stats.tcp4_announces_handled + http_stats.tcp4_scrapes_handled, - tcp4_announces_handled: http_stats.tcp4_announces_handled, - tcp4_scrapes_handled: http_stats.tcp4_scrapes_handled, - // TCPv6 - tcp6_connections_handled: http_stats.tcp6_announces_handled + http_stats.tcp6_scrapes_handled, - tcp6_announces_handled: http_stats.tcp6_announces_handled, - tcp6_scrapes_handled: http_stats.tcp6_scrapes_handled, - // UDP - udp_requests_aborted: udp_server_stats.udp_requests_aborted, - udp_requests_banned: udp_server_stats.udp_requests_banned, - udp_banned_ips_total: udp_banned_ips_total as u64, - udp_avg_connect_processing_time_ns: udp_server_stats.udp_avg_connect_processing_time_ns, - udp_avg_announce_processing_time_ns: udp_server_stats.udp_avg_announce_processing_time_ns, - udp_avg_scrape_processing_time_ns: udp_server_stats.udp_avg_scrape_processing_time_ns, - // UDPv4 - udp4_requests: udp_server_stats.udp4_requests, - udp4_connections_handled: udp_server_stats.udp4_connections_handled, - udp4_announces_handled: udp_server_stats.udp4_announces_handled, - udp4_scrapes_handled: udp_server_stats.udp4_scrapes_handled, - udp4_responses: udp_server_stats.udp4_responses, - udp4_errors_handled: udp_server_stats.udp4_errors_handled, - // UDPv6 - udp6_requests: udp_server_stats.udp6_requests, - udp6_connections_handled: udp_server_stats.udp6_connections_handled, - udp6_announces_handled: udp_server_stats.udp6_announces_handled, - udp6_scrapes_handled: udp_server_stats.udp6_scrapes_handled, - udp6_responses: udp_server_stats.udp6_responses, - udp6_errors_handled: udp_server_stats.udp6_errors_handled, - }, + Metrics { + // TCPv4 + tcp4_connections_handled: http_stats.tcp4_announces_handled + http_stats.tcp4_scrapes_handled, + tcp4_announces_handled: http_stats.tcp4_announces_handled, + tcp4_scrapes_handled: http_stats.tcp4_scrapes_handled, + // TCPv6 + tcp6_connections_handled: http_stats.tcp6_announces_handled + http_stats.tcp6_scrapes_handled, + tcp6_announces_handled: http_stats.tcp6_announces_handled, + tcp6_scrapes_handled: http_stats.tcp6_scrapes_handled, + // UDP + udp_requests_aborted: udp_server_stats.udp_requests_aborted, + udp_requests_banned: udp_server_stats.udp_requests_banned, + udp_banned_ips_total: udp_banned_ips_total as u64, + udp_avg_connect_processing_time_ns: udp_server_stats.udp_avg_connect_processing_time_ns, + udp_avg_announce_processing_time_ns: udp_server_stats.udp_avg_announce_processing_time_ns, + udp_avg_scrape_processing_time_ns: udp_server_stats.udp_avg_scrape_processing_time_ns, + // UDPv4 + udp4_requests: udp_server_stats.udp4_requests, + udp4_connections_handled: udp_server_stats.udp4_connections_handled, + udp4_announces_handled: udp_server_stats.udp4_announces_handled, + udp4_scrapes_handled: udp_server_stats.udp4_scrapes_handled, + udp4_responses: udp_server_stats.udp4_responses, + udp4_errors_handled: udp_server_stats.udp4_errors_handled, + // UDPv6 + udp6_requests: udp_server_stats.udp6_requests, + udp6_connections_handled: udp_server_stats.udp6_connections_handled, + udp6_announces_handled: udp_server_stats.udp6_announces_handled, + udp6_scrapes_handled: udp_server_stats.udp6_scrapes_handled, + udp6_responses: udp_server_stats.udp6_responses, + udp6_errors_handled: udp_server_stats.udp6_errors_handled, } } @@ -184,11 +208,10 @@ mod tests { use tokio::sync::RwLock; use torrust_tracker_configuration::Configuration; use torrust_tracker_events::bus::SenderStatus; - use torrust_tracker_primitives::swarm_metadata::AggregateActiveSwarmMetadata; use torrust_tracker_test_helpers::configuration; use torrust_tracker_torrent_repository::container::TorrentRepositoryContainer; - use crate::statistics::metrics::Metrics; + use crate::statistics::metrics::{Metrics, TorrentsMetrics}; use crate::statistics::services::{get_metrics, TrackerMetrics}; pub fn tracker_configuration() -> Configuration { @@ -235,7 +258,7 @@ mod tests { assert_eq!( tracker_metrics, TrackerMetrics { - torrents_metrics: AggregateActiveSwarmMetadata::default(), + torrents_metrics: TorrentsMetrics::default(), protocol_metrics: Metrics::default(), } ); From b0e744390603b94a00232f1e8d72e61010c2a24a Mon Sep 17 00:00:00 2001 From: Jose Celano Date: Wed, 28 May 2025 12:08:05 +0100 Subject: [PATCH 656/802] fix: [#1543] return always in API the downloads number from tracker-core The tracker-core always has the metric alhoutght it can be persisted or not. When it's not persisted, it contains the number of downloads during the session. On the other hand, the `torrent-repository` metri uses labels, so you have to sum all values for all labels to get the total. ``` torrent_repository_torrents_downloads_total{peer_role="seeder"} 1 tracker_core_persistent_torrents_downloads_total{} 1 ``` --- .../src/v1/context/stats/handlers.rs | 5 -- .../src/v1/context/stats/routes.rs | 2 - .../src/statistics/services.rs | 51 ++----------------- .../torrent-repository/src/statistics/mod.rs | 2 +- .../src/http/client/requests/announce.rs | 2 +- packages/tracker-core/src/statistics/mod.rs | 2 +- .../tracker-core/src/statistics/repository.rs | 21 +++++++- 7 files changed, 26 insertions(+), 59 deletions(-) diff --git a/packages/axum-rest-tracker-api-server/src/v1/context/stats/handlers.rs b/packages/axum-rest-tracker-api-server/src/v1/context/stats/handlers.rs index 463c81ac8..47bb5ad16 100644 --- a/packages/axum-rest-tracker-api-server/src/v1/context/stats/handlers.rs +++ b/packages/axum-rest-tracker-api-server/src/v1/context/stats/handlers.rs @@ -10,7 +10,6 @@ use bittorrent_udp_tracker_core::services::banning::BanService; use serde::Deserialize; use tokio::sync::RwLock; use torrust_rest_tracker_api_core::statistics::services::{get_labeled_metrics, get_metrics}; -use torrust_tracker_configuration::Core; use super::responses::{labeled_metrics_response, labeled_stats_response, metrics_response, stats_response}; @@ -41,10 +40,8 @@ pub struct QueryParams { #[allow(clippy::type_complexity)] pub async fn get_stats_handler( State(state): State<( - Arc, Arc, Arc>, - Arc, Arc, Arc, Arc, @@ -57,8 +54,6 @@ pub async fn get_stats_handler( state.2.clone(), state.3.clone(), state.4.clone(), - state.5.clone(), - state.6.clone(), ) .await; diff --git a/packages/axum-rest-tracker-api-server/src/v1/context/stats/routes.rs b/packages/axum-rest-tracker-api-server/src/v1/context/stats/routes.rs index 3be266d3a..a573b764a 100644 --- a/packages/axum-rest-tracker-api-server/src/v1/context/stats/routes.rs +++ b/packages/axum-rest-tracker-api-server/src/v1/context/stats/routes.rs @@ -17,10 +17,8 @@ pub fn add(prefix: &str, router: Router, http_api_container: &Arc, in_memory_torrent_repository: Arc, ban_service: Arc>, - torrent_repository_stats_repository: Arc, tracker_core_stats_repository: Arc, http_stats_repository: Arc, udp_server_stats_repository: Arc, ) -> TrackerMetrics { TrackerMetrics { - torrents_metrics: get_torrents_metrics( - core_config, - in_memory_torrent_repository, - torrent_repository_stats_repository, - tracker_core_stats_repository, - ) - .await, + torrents_metrics: get_torrents_metrics(in_memory_torrent_repository, tracker_core_stats_repository).await, protocol_metrics: get_protocol_metrics(ban_service, http_stats_repository, udp_server_stats_repository).await, } } async fn get_torrents_metrics( - core_config: Arc, in_memory_torrent_repository: Arc, - torrent_repository_stats_repository: Arc, + tracker_core_stats_repository: Arc, ) -> TorrentsMetrics { let aggregate_active_swarm_metadata = in_memory_torrent_repository.get_aggregate_swarm_metadata().await; - let total_downloaded = if core_config.tracker_policy.persistent_torrent_completed_stat { - let metrics = tracker_core_stats_repository.get_metrics().await; - - let downloads = metrics.metric_collection.get_counter_value( - &metric_name!(TRACKER_CORE_PERSISTENT_TORRENTS_DOWNLOADS_TOTAL), - &LabelSet::default(), - ); - - if let Some(downloads) = downloads { - downloads.value() - } else { - 0 - } - } else { - let metrics = torrent_repository_stats_repository.get_metrics().await; - - let downloads = metrics.metric_collection.get_counter_value( - &metric_name!(TORRENT_REPOSITORY_TORRENTS_DOWNLOADS_TOTAL), - &LabelSet::default(), - ); - - if let Some(downloads) = downloads { - downloads.value() - } else { - 0 - } - }; - let mut torrents_metrics: TorrentsMetrics = aggregate_active_swarm_metadata.into(); - torrents_metrics.total_downloaded = total_downloaded; + torrents_metrics.total_downloaded = tracker_core_stats_repository.get_torrents_downloads_total().await; torrents_metrics } @@ -152,7 +110,6 @@ pub struct TrackerLabeledMetrics { /// /// Will panic if the metrics cannot be merged. This could happen if the /// packages are producing duplicate metric names, for example. -#[allow(deprecated)] pub async fn get_labeled_metrics( in_memory_torrent_repository: Arc, ban_service: Arc>, @@ -245,10 +202,8 @@ mod tests { let udp_server_stats_repository = Arc::new(torrust_udp_tracker_server::statistics::repository::Repository::new()); let tracker_metrics = get_metrics( - core_config, tracker_core_container.in_memory_torrent_repository.clone(), ban_service.clone(), - torrent_repository_container.stats_repository.clone(), tracker_core_container.stats_repository.clone(), http_stats_repository.clone(), udp_server_stats_repository.clone(), diff --git a/packages/torrent-repository/src/statistics/mod.rs b/packages/torrent-repository/src/statistics/mod.rs index ab5eb3f09..cfc252e34 100644 --- a/packages/torrent-repository/src/statistics/mod.rs +++ b/packages/torrent-repository/src/statistics/mod.rs @@ -14,7 +14,7 @@ const TORRENT_REPOSITORY_TORRENTS_ADDED_TOTAL: &str = "torrent_repository_torren const TORRENT_REPOSITORY_TORRENTS_REMOVED_TOTAL: &str = "torrent_repository_torrents_removed_total"; const TORRENT_REPOSITORY_TORRENTS_TOTAL: &str = "torrent_repository_torrents_total"; -pub const TORRENT_REPOSITORY_TORRENTS_DOWNLOADS_TOTAL: &str = "torrent_repository_torrents_downloads_total"; +const TORRENT_REPOSITORY_TORRENTS_DOWNLOADS_TOTAL: &str = "torrent_repository_torrents_downloads_total"; const TORRENT_REPOSITORY_TORRENTS_INACTIVE_TOTAL: &str = "torrent_repository_torrents_inactive_total"; // Peers metrics diff --git a/packages/tracker-client/src/http/client/requests/announce.rs b/packages/tracker-client/src/http/client/requests/announce.rs index 29b5d1221..87bdbad52 100644 --- a/packages/tracker-client/src/http/client/requests/announce.rs +++ b/packages/tracker-client/src/http/client/requests/announce.rs @@ -102,7 +102,7 @@ impl QueryBuilder { peer_id: PeerId(*b"-qB00000000000000001").0, port: 17548, left: 0, - event: Some(Event::Completed), + event: Some(Event::Started), compact: Some(Compact::NotAccepted), }; Self { diff --git a/packages/tracker-core/src/statistics/mod.rs b/packages/tracker-core/src/statistics/mod.rs index 0c421863f..ff8187379 100644 --- a/packages/tracker-core/src/statistics/mod.rs +++ b/packages/tracker-core/src/statistics/mod.rs @@ -10,7 +10,7 @@ use torrust_tracker_metrics::unit::Unit; // Torrent metrics -pub const TRACKER_CORE_PERSISTENT_TORRENTS_DOWNLOADS_TOTAL: &str = "tracker_core_persistent_torrents_downloads_total"; +const TRACKER_CORE_PERSISTENT_TORRENTS_DOWNLOADS_TOTAL: &str = "tracker_core_persistent_torrents_downloads_total"; #[must_use] pub fn describe_metrics() -> Metrics { diff --git a/packages/tracker-core/src/statistics/repository.rs b/packages/tracker-core/src/statistics/repository.rs index dd0ebebe7..21b1da7f2 100644 --- a/packages/tracker-core/src/statistics/repository.rs +++ b/packages/tracker-core/src/statistics/repository.rs @@ -4,10 +4,11 @@ use tokio::sync::{RwLock, RwLockReadGuard}; use torrust_tracker_metrics::label::LabelSet; use torrust_tracker_metrics::metric::MetricName; use torrust_tracker_metrics::metric_collection::Error; +use torrust_tracker_metrics::metric_name; use torrust_tracker_primitives::DurationSinceUnixEpoch; -use super::describe_metrics; use super::metrics::Metrics; +use super::{describe_metrics, TRACKER_CORE_PERSISTENT_TORRENTS_DOWNLOADS_TOTAL}; /// A repository for the torrent repository metrics. #[derive(Clone)] @@ -154,4 +155,22 @@ impl Repository { result } + + /// Get the total number of torrent downloads. + /// + /// The value is persisted in database if persistence for downloads metrics is enabled. + pub async fn get_torrents_downloads_total(&self) -> u64 { + let metrics = self.get_metrics().await; + + let downloads = metrics.metric_collection.get_counter_value( + &metric_name!(TRACKER_CORE_PERSISTENT_TORRENTS_DOWNLOADS_TOTAL), + &LabelSet::default(), + ); + + if let Some(downloads) = downloads { + downloads.value() + } else { + 0 + } + } } From 43c71793aaba1feba5d246c42db703b443721e60 Mon Sep 17 00:00:00 2001 From: Jose Celano Date: Wed, 28 May 2025 12:15:26 +0100 Subject: [PATCH 657/802] refactor: [#1543] rename Metrics to ProtocolMetrics --- .../src/v1/context/stats/resources.rs | 4 ++-- .../rest-tracker-api-core/src/statistics/metrics.rs | 2 +- .../rest-tracker-api-core/src/statistics/services.rs | 12 ++++++------ 3 files changed, 9 insertions(+), 9 deletions(-) diff --git a/packages/axum-rest-tracker-api-server/src/v1/context/stats/resources.rs b/packages/axum-rest-tracker-api-server/src/v1/context/stats/resources.rs index 08f83026f..ece50383b 100644 --- a/packages/axum-rest-tracker-api-server/src/v1/context/stats/resources.rs +++ b/packages/axum-rest-tracker-api-server/src/v1/context/stats/resources.rs @@ -134,7 +134,7 @@ impl From for LabeledStats { #[cfg(test)] mod tests { - use torrust_rest_tracker_api_core::statistics::metrics::{Metrics, TorrentsMetrics}; + use torrust_rest_tracker_api_core::statistics::metrics::{ProtocolMetrics, TorrentsMetrics}; use torrust_rest_tracker_api_core::statistics::services::TrackerMetrics; use super::Stats; @@ -150,7 +150,7 @@ mod tests { total_incomplete: 3, total_torrents: 4 }, - protocol_metrics: Metrics { + protocol_metrics: ProtocolMetrics { // TCP tcp4_connections_handled: 5, tcp4_announces_handled: 6, diff --git a/packages/rest-tracker-api-core/src/statistics/metrics.rs b/packages/rest-tracker-api-core/src/statistics/metrics.rs index ca556becf..ecdecd130 100644 --- a/packages/rest-tracker-api-core/src/statistics/metrics.rs +++ b/packages/rest-tracker-api-core/src/statistics/metrics.rs @@ -36,7 +36,7 @@ impl From for TorrentsMetrics { /// These metrics are collected for each connection type: UDP and HTTP /// and also for each IP version used by the peers: IPv4 and IPv6. #[derive(Debug, PartialEq, Default)] -pub struct Metrics { +pub struct ProtocolMetrics { /// Total number of TCP (HTTP tracker) connections from IPv4 peers. /// Since the HTTP tracker spec does not require a handshake, this metric /// increases for every HTTP request. diff --git a/packages/rest-tracker-api-core/src/statistics/services.rs b/packages/rest-tracker-api-core/src/statistics/services.rs index d5b68c274..9a2eb3667 100644 --- a/packages/rest-tracker-api-core/src/statistics/services.rs +++ b/packages/rest-tracker-api-core/src/statistics/services.rs @@ -8,7 +8,7 @@ use torrust_tracker_metrics::metric_collection::MetricCollection; use torrust_udp_tracker_server::statistics as udp_server_statistics; use super::metrics::TorrentsMetrics; -use crate::statistics::metrics::Metrics; +use crate::statistics::metrics::ProtocolMetrics; /// All the metrics collected by the tracker. #[derive(Debug, PartialEq)] @@ -21,7 +21,7 @@ pub struct TrackerMetrics { /// Application level metrics. Usage statistics/metrics. /// /// Metrics about how the tracker is been used (number of udp announce requests, number of http scrape requests, etcetera) - pub protocol_metrics: Metrics, + pub protocol_metrics: ProtocolMetrics, } /// It returns all the [`TrackerMetrics`] @@ -56,7 +56,7 @@ async fn get_protocol_metrics( ban_service: Arc>, http_stats_repository: Arc, udp_server_stats_repository: Arc, -) -> Metrics { +) -> ProtocolMetrics { let udp_banned_ips_total = ban_service.read().await.get_banned_ips_total(); let http_stats = http_stats_repository.get_stats().await; let udp_server_stats = udp_server_stats_repository.get_stats().await; @@ -66,7 +66,7 @@ async fn get_protocol_metrics( // tracker, but we keep them for now. In new major versions we should remove // them. - Metrics { + ProtocolMetrics { // TCPv4 tcp4_connections_handled: http_stats.tcp4_announces_handled + http_stats.tcp4_scrapes_handled, tcp4_announces_handled: http_stats.tcp4_announces_handled, @@ -168,7 +168,7 @@ mod tests { use torrust_tracker_test_helpers::configuration; use torrust_tracker_torrent_repository::container::TorrentRepositoryContainer; - use crate::statistics::metrics::{Metrics, TorrentsMetrics}; + use crate::statistics::metrics::{ProtocolMetrics, TorrentsMetrics}; use crate::statistics::services::{get_metrics, TrackerMetrics}; pub fn tracker_configuration() -> Configuration { @@ -214,7 +214,7 @@ mod tests { tracker_metrics, TrackerMetrics { torrents_metrics: TorrentsMetrics::default(), - protocol_metrics: Metrics::default(), + protocol_metrics: ProtocolMetrics::default(), } ); } From 92242f8b54e7b0091b053a3ab8c110638b51a7a5 Mon Sep 17 00:00:00 2001 From: Jose Celano Date: Wed, 28 May 2025 12:21:08 +0100 Subject: [PATCH 658/802] fix: [#1543] Remove peerless torrents when it's enabled in the tracker policy There were not being removed when stats was enabled becuase the tracker was counting downloads only from the active swarms. Now the API exposed metric (global downldoads) is not taken from the in-memory data structrure unless stats persistence is disabled. In that case, the global total would be per session (since the tracker started), and reset when the tracker restarts. --- packages/torrent-repository/src/swarm.rs | 8 +------- 1 file changed, 1 insertion(+), 7 deletions(-) diff --git a/packages/torrent-repository/src/swarm.rs b/packages/torrent-repository/src/swarm.rs index 84e1f2da4..362fc6153 100644 --- a/packages/torrent-repository/src/swarm.rs +++ b/packages/torrent-repository/src/swarm.rs @@ -201,13 +201,7 @@ impl Swarm { /// Returns true if the swarm should be removed according to the retention /// policy. fn should_be_removed(&self, policy: &TrackerPolicy) -> bool { - // If the policy is to remove peerless torrents and the swarm is empty (no peers), - (policy.remove_peerless_torrents && self.is_empty()) - // but not when the policy is to persist torrent stats and the - // torrent has been downloaded at least once. - // (because the only way to store the counter is to keep the swarm in memory. - // See https://github.com/torrust/torrust-tracker/issues/1502) - && !(policy.persistent_torrent_completed_stat && self.metadata().downloaded > 0) + policy.remove_peerless_torrents && self.is_empty() } fn update_metadata_on_insert(&mut self, added_peer: &Arc) { From 55149bcf97ad261e0ef36334520ce4cc73082ecc Mon Sep 17 00:00:00 2001 From: Jose Celano Date: Thu, 29 May 2025 10:16:12 +0100 Subject: [PATCH 659/802] refactor: [#1519] rename dir torrent-repository --- Cargo.toml | 2 +- packages/axum-http-tracker-server/Cargo.toml | 2 +- packages/axum-rest-tracker-api-server/Cargo.toml | 2 +- packages/http-tracker-core/Cargo.toml | 2 +- packages/rest-tracker-api-core/Cargo.toml | 2 +- .../.gitignore | 0 .../Cargo.toml | 0 .../README.md | 0 .../src/container.rs | 0 .../src/event.rs | 0 .../src/lib.rs | 0 .../src/statistics/activity_metrics_updater.rs | 0 .../src/statistics/event/handler.rs | 0 .../src/statistics/event/listener.rs | 0 .../src/statistics/event/mod.rs | 0 .../src/statistics/metrics.rs | 0 .../src/statistics/mod.rs | 0 .../src/statistics/repository.rs | 0 .../src/swarm.rs | 0 .../src/swarms.rs | 0 packages/torrent-repository-benchmarking/README.md | 2 +- packages/tracker-core/Cargo.toml | 2 +- packages/udp-tracker-core/Cargo.toml | 2 +- packages/udp-tracker-server/Cargo.toml | 2 +- 24 files changed, 9 insertions(+), 9 deletions(-) rename packages/{torrent-repository => swarm-coordination-registry}/.gitignore (100%) rename packages/{torrent-repository => swarm-coordination-registry}/Cargo.toml (100%) rename packages/{torrent-repository => swarm-coordination-registry}/README.md (100%) rename packages/{torrent-repository => swarm-coordination-registry}/src/container.rs (100%) rename packages/{torrent-repository => swarm-coordination-registry}/src/event.rs (100%) rename packages/{torrent-repository => swarm-coordination-registry}/src/lib.rs (100%) rename packages/{torrent-repository => swarm-coordination-registry}/src/statistics/activity_metrics_updater.rs (100%) rename packages/{torrent-repository => swarm-coordination-registry}/src/statistics/event/handler.rs (100%) rename packages/{torrent-repository => swarm-coordination-registry}/src/statistics/event/listener.rs (100%) rename packages/{torrent-repository => swarm-coordination-registry}/src/statistics/event/mod.rs (100%) rename packages/{torrent-repository => swarm-coordination-registry}/src/statistics/metrics.rs (100%) rename packages/{torrent-repository => swarm-coordination-registry}/src/statistics/mod.rs (100%) rename packages/{torrent-repository => swarm-coordination-registry}/src/statistics/repository.rs (100%) rename packages/{torrent-repository => swarm-coordination-registry}/src/swarm.rs (100%) rename packages/{torrent-repository => swarm-coordination-registry}/src/swarms.rs (100%) diff --git a/Cargo.toml b/Cargo.toml index 219701d03..3e6e3e073 100644 --- a/Cargo.toml +++ b/Cargo.toml @@ -55,7 +55,7 @@ torrust-rest-tracker-api-core = { version = "3.0.0-develop", path = "packages/re torrust-server-lib = { version = "3.0.0-develop", path = "packages/server-lib" } torrust-tracker-clock = { version = "3.0.0-develop", path = "packages/clock" } torrust-tracker-configuration = { version = "3.0.0-develop", path = "packages/configuration" } -torrust-tracker-torrent-repository = { version = "3.0.0-develop", path = "packages/torrent-repository" } +torrust-tracker-torrent-repository = { version = "3.0.0-develop", path = "packages/swarm-coordination-registry" } torrust-udp-tracker-server = { version = "3.0.0-develop", path = "packages/udp-tracker-server" } tracing = "0" tracing-subscriber = { version = "0", features = ["json"] } diff --git a/packages/axum-http-tracker-server/Cargo.toml b/packages/axum-http-tracker-server/Cargo.toml index 81831a614..51283ee01 100644 --- a/packages/axum-http-tracker-server/Cargo.toml +++ b/packages/axum-http-tracker-server/Cargo.toml @@ -33,7 +33,7 @@ torrust-server-lib = { version = "3.0.0-develop", path = "../server-lib" } torrust-tracker-clock = { version = "3.0.0-develop", path = "../clock" } torrust-tracker-configuration = { version = "3.0.0-develop", path = "../configuration" } torrust-tracker-primitives = { version = "3.0.0-develop", path = "../primitives" } -torrust-tracker-torrent-repository = { version = "3.0.0-develop", path = "../torrent-repository" } +torrust-tracker-torrent-repository = { version = "3.0.0-develop", path = "../swarm-coordination-registry" } tower = { version = "0", features = ["timeout"] } tower-http = { version = "0", features = ["compression-full", "cors", "propagate-header", "request-id", "trace"] } tracing = "0" diff --git a/packages/axum-rest-tracker-api-server/Cargo.toml b/packages/axum-rest-tracker-api-server/Cargo.toml index 296f77d61..558dbf6c1 100644 --- a/packages/axum-rest-tracker-api-server/Cargo.toml +++ b/packages/axum-rest-tracker-api-server/Cargo.toml @@ -39,7 +39,7 @@ torrust-tracker-clock = { version = "3.0.0-develop", path = "../clock" } torrust-tracker-configuration = { version = "3.0.0-develop", path = "../configuration" } torrust-tracker-metrics = { version = "3.0.0-develop", path = "../metrics" } torrust-tracker-primitives = { version = "3.0.0-develop", path = "../primitives" } -torrust-tracker-torrent-repository = { version = "3.0.0-develop", path = "../torrent-repository" } +torrust-tracker-torrent-repository = { version = "3.0.0-develop", path = "../swarm-coordination-registry" } torrust-udp-tracker-server = { version = "3.0.0-develop", path = "../udp-tracker-server" } tower = { version = "0", features = ["timeout"] } tower-http = { version = "0", features = ["compression-full", "cors", "propagate-header", "request-id", "trace"] } diff --git a/packages/http-tracker-core/Cargo.toml b/packages/http-tracker-core/Cargo.toml index 37b540e39..008aa92c6 100644 --- a/packages/http-tracker-core/Cargo.toml +++ b/packages/http-tracker-core/Cargo.toml @@ -28,7 +28,7 @@ torrust-tracker-configuration = { version = "3.0.0-develop", path = "../configur torrust-tracker-events = { version = "3.0.0-develop", path = "../events" } torrust-tracker-metrics = { version = "3.0.0-develop", path = "../metrics" } torrust-tracker-primitives = { version = "3.0.0-develop", path = "../primitives" } -torrust-tracker-torrent-repository = { version = "3.0.0-develop", path = "../torrent-repository" } +torrust-tracker-torrent-repository = { version = "3.0.0-develop", path = "../swarm-coordination-registry" } tracing = "0" [dev-dependencies] diff --git a/packages/rest-tracker-api-core/Cargo.toml b/packages/rest-tracker-api-core/Cargo.toml index 8cfe601b2..9a086ad19 100644 --- a/packages/rest-tracker-api-core/Cargo.toml +++ b/packages/rest-tracker-api-core/Cargo.toml @@ -21,7 +21,7 @@ tokio = { version = "1", features = ["macros", "net", "rt-multi-thread", "signal torrust-tracker-configuration = { version = "3.0.0-develop", path = "../configuration" } torrust-tracker-metrics = { version = "3.0.0-develop", path = "../metrics" } torrust-tracker-primitives = { version = "3.0.0-develop", path = "../primitives" } -torrust-tracker-torrent-repository = { version = "3.0.0-develop", path = "../torrent-repository" } +torrust-tracker-torrent-repository = { version = "3.0.0-develop", path = "../swarm-coordination-registry" } torrust-udp-tracker-server = { version = "3.0.0-develop", path = "../udp-tracker-server" } [dev-dependencies] diff --git a/packages/torrent-repository/.gitignore b/packages/swarm-coordination-registry/.gitignore similarity index 100% rename from packages/torrent-repository/.gitignore rename to packages/swarm-coordination-registry/.gitignore diff --git a/packages/torrent-repository/Cargo.toml b/packages/swarm-coordination-registry/Cargo.toml similarity index 100% rename from packages/torrent-repository/Cargo.toml rename to packages/swarm-coordination-registry/Cargo.toml diff --git a/packages/torrent-repository/README.md b/packages/swarm-coordination-registry/README.md similarity index 100% rename from packages/torrent-repository/README.md rename to packages/swarm-coordination-registry/README.md diff --git a/packages/torrent-repository/src/container.rs b/packages/swarm-coordination-registry/src/container.rs similarity index 100% rename from packages/torrent-repository/src/container.rs rename to packages/swarm-coordination-registry/src/container.rs diff --git a/packages/torrent-repository/src/event.rs b/packages/swarm-coordination-registry/src/event.rs similarity index 100% rename from packages/torrent-repository/src/event.rs rename to packages/swarm-coordination-registry/src/event.rs diff --git a/packages/torrent-repository/src/lib.rs b/packages/swarm-coordination-registry/src/lib.rs similarity index 100% rename from packages/torrent-repository/src/lib.rs rename to packages/swarm-coordination-registry/src/lib.rs diff --git a/packages/torrent-repository/src/statistics/activity_metrics_updater.rs b/packages/swarm-coordination-registry/src/statistics/activity_metrics_updater.rs similarity index 100% rename from packages/torrent-repository/src/statistics/activity_metrics_updater.rs rename to packages/swarm-coordination-registry/src/statistics/activity_metrics_updater.rs diff --git a/packages/torrent-repository/src/statistics/event/handler.rs b/packages/swarm-coordination-registry/src/statistics/event/handler.rs similarity index 100% rename from packages/torrent-repository/src/statistics/event/handler.rs rename to packages/swarm-coordination-registry/src/statistics/event/handler.rs diff --git a/packages/torrent-repository/src/statistics/event/listener.rs b/packages/swarm-coordination-registry/src/statistics/event/listener.rs similarity index 100% rename from packages/torrent-repository/src/statistics/event/listener.rs rename to packages/swarm-coordination-registry/src/statistics/event/listener.rs diff --git a/packages/torrent-repository/src/statistics/event/mod.rs b/packages/swarm-coordination-registry/src/statistics/event/mod.rs similarity index 100% rename from packages/torrent-repository/src/statistics/event/mod.rs rename to packages/swarm-coordination-registry/src/statistics/event/mod.rs diff --git a/packages/torrent-repository/src/statistics/metrics.rs b/packages/swarm-coordination-registry/src/statistics/metrics.rs similarity index 100% rename from packages/torrent-repository/src/statistics/metrics.rs rename to packages/swarm-coordination-registry/src/statistics/metrics.rs diff --git a/packages/torrent-repository/src/statistics/mod.rs b/packages/swarm-coordination-registry/src/statistics/mod.rs similarity index 100% rename from packages/torrent-repository/src/statistics/mod.rs rename to packages/swarm-coordination-registry/src/statistics/mod.rs diff --git a/packages/torrent-repository/src/statistics/repository.rs b/packages/swarm-coordination-registry/src/statistics/repository.rs similarity index 100% rename from packages/torrent-repository/src/statistics/repository.rs rename to packages/swarm-coordination-registry/src/statistics/repository.rs diff --git a/packages/torrent-repository/src/swarm.rs b/packages/swarm-coordination-registry/src/swarm.rs similarity index 100% rename from packages/torrent-repository/src/swarm.rs rename to packages/swarm-coordination-registry/src/swarm.rs diff --git a/packages/torrent-repository/src/swarms.rs b/packages/swarm-coordination-registry/src/swarms.rs similarity index 100% rename from packages/torrent-repository/src/swarms.rs rename to packages/swarm-coordination-registry/src/swarms.rs diff --git a/packages/torrent-repository-benchmarking/README.md b/packages/torrent-repository-benchmarking/README.md index f248ca0da..a0556a58f 100644 --- a/packages/torrent-repository-benchmarking/README.md +++ b/packages/torrent-repository-benchmarking/README.md @@ -1,4 +1,4 @@ -# Torrust Tracker Torrent Repository Benchmarking +# Torrust Tracker Swarm Coordination Registry Benchmarking A library to runt benchmarking for different implementations of a repository of torrents files and their peers. Torrent repositories are used by the [Torrust Tracker](https://github.com/torrust/torrust-tracker). diff --git a/packages/tracker-core/Cargo.toml b/packages/tracker-core/Cargo.toml index a2d08dfa0..8c9bf7769 100644 --- a/packages/tracker-core/Cargo.toml +++ b/packages/tracker-core/Cargo.toml @@ -33,7 +33,7 @@ torrust-tracker-events = { version = "3.0.0-develop", path = "../events" } torrust-tracker-located-error = { version = "3.0.0-develop", path = "../located-error" } torrust-tracker-metrics = { version = "3.0.0-develop", path = "../metrics" } torrust-tracker-primitives = { version = "3.0.0-develop", path = "../primitives" } -torrust-tracker-torrent-repository = { version = "3.0.0-develop", path = "../torrent-repository" } +torrust-tracker-torrent-repository = { version = "3.0.0-develop", path = "../swarm-coordination-registry" } tracing = "0" [dev-dependencies] diff --git a/packages/udp-tracker-core/Cargo.toml b/packages/udp-tracker-core/Cargo.toml index 9a27ec826..2933a7e70 100644 --- a/packages/udp-tracker-core/Cargo.toml +++ b/packages/udp-tracker-core/Cargo.toml @@ -33,7 +33,7 @@ torrust-tracker-configuration = { version = "3.0.0-develop", path = "../configur torrust-tracker-events = { version = "3.0.0-develop", path = "../events" } torrust-tracker-metrics = { version = "3.0.0-develop", path = "../metrics" } torrust-tracker-primitives = { version = "3.0.0-develop", path = "../primitives" } -torrust-tracker-torrent-repository = { version = "3.0.0-develop", path = "../torrent-repository" } +torrust-tracker-torrent-repository = { version = "3.0.0-develop", path = "../swarm-coordination-registry" } tracing = "0" zerocopy = "0.7" diff --git a/packages/udp-tracker-server/Cargo.toml b/packages/udp-tracker-server/Cargo.toml index a0c129acb..396dc0805 100644 --- a/packages/udp-tracker-server/Cargo.toml +++ b/packages/udp-tracker-server/Cargo.toml @@ -33,7 +33,7 @@ torrust-tracker-events = { version = "3.0.0-develop", path = "../events" } torrust-tracker-located-error = { version = "3.0.0-develop", path = "../located-error" } torrust-tracker-metrics = { version = "3.0.0-develop", path = "../metrics" } torrust-tracker-primitives = { version = "3.0.0-develop", path = "../primitives" } -torrust-tracker-torrent-repository = { version = "3.0.0-develop", path = "../torrent-repository" } +torrust-tracker-torrent-repository = { version = "3.0.0-develop", path = "../swarm-coordination-registry" } tracing = "0" url = { version = "2", features = ["serde"] } uuid = { version = "1", features = ["v4"] } From 2b7a25163a6a0d21aa0defe9e2999be1c5105ae0 Mon Sep 17 00:00:00 2001 From: Jose Celano Date: Thu, 29 May 2025 10:42:57 +0100 Subject: [PATCH 660/802] refactor: [#1519] rename crate torrust-tracker-torrent-repository to torrust-tracker-swarm-coordination-registry --- .github/workflows/deployment.yaml | 2 +- Cargo.lock | 40 +++++++++---------- Cargo.toml | 2 +- packages/axum-http-tracker-server/Cargo.toml | 2 +- .../src/environment.rs | 2 +- .../axum-http-tracker-server/src/server.rs | 2 +- .../axum-rest-tracker-api-server/Cargo.toml | 2 +- .../src/environment.rs | 2 +- .../src/v1/context/stats/handlers.rs | 2 +- packages/http-tracker-core/Cargo.toml | 2 +- packages/http-tracker-core/src/container.rs | 2 +- packages/rest-tracker-api-core/Cargo.toml | 2 +- .../rest-tracker-api-core/src/container.rs | 2 +- .../src/statistics/services.rs | 4 +- .../swarm-coordination-registry/Cargo.toml | 2 +- packages/tracker-core/Cargo.toml | 2 +- packages/tracker-core/src/container.rs | 2 +- .../src/statistics/event/handler.rs | 2 +- .../src/statistics/event/listener.rs | 2 +- packages/tracker-core/src/torrent/manager.rs | 2 +- .../src/torrent/repository/in_memory.rs | 2 +- .../tracker-core/tests/common/test_env.rs | 6 +-- packages/udp-tracker-core/Cargo.toml | 2 +- packages/udp-tracker-core/src/container.rs | 2 +- packages/udp-tracker-server/Cargo.toml | 2 +- .../udp-tracker-server/src/environment.rs | 2 +- .../jobs/activity_metrics_updater.rs | 2 +- src/bootstrap/jobs/torrent_repository.rs | 2 +- src/container.rs | 2 +- 29 files changed, 50 insertions(+), 52 deletions(-) diff --git a/.github/workflows/deployment.yaml b/.github/workflows/deployment.yaml index d62b4bbcc..4e8fd579b 100644 --- a/.github/workflows/deployment.yaml +++ b/.github/workflows/deployment.yaml @@ -77,7 +77,7 @@ jobs: cargo publish -p torrust-tracker-located-error cargo publish -p torrust-tracker-metrics cargo publish -p torrust-tracker-primitives + cargo publish -p torrust-tracker-swarm-coordination-registry cargo publish -p torrust-tracker-test-helpers cargo publish -p torrust-tracker-torrent-benchmarking - cargo publish -p torrust-tracker-torrent-repository cargo publish -p torrust-udp-tracker-server diff --git a/Cargo.lock b/Cargo.lock index 009b1e458..ecf178a59 100644 --- a/Cargo.lock +++ b/Cargo.lock @@ -592,8 +592,8 @@ dependencies = [ "torrust-tracker-events", "torrust-tracker-metrics", "torrust-tracker-primitives", + "torrust-tracker-swarm-coordination-registry", "torrust-tracker-test-helpers", - "torrust-tracker-torrent-repository", "tracing", ] @@ -680,8 +680,8 @@ dependencies = [ "torrust-tracker-located-error", "torrust-tracker-metrics", "torrust-tracker-primitives", + "torrust-tracker-swarm-coordination-registry", "torrust-tracker-test-helpers", - "torrust-tracker-torrent-repository", "tracing", "url", ] @@ -710,8 +710,8 @@ dependencies = [ "torrust-tracker-events", "torrust-tracker-metrics", "torrust-tracker-primitives", + "torrust-tracker-swarm-coordination-registry", "torrust-tracker-test-helpers", - "torrust-tracker-torrent-repository", "tracing", "zerocopy 0.7.35", ] @@ -4555,8 +4555,8 @@ dependencies = [ "torrust-tracker-configuration", "torrust-tracker-events", "torrust-tracker-primitives", + "torrust-tracker-swarm-coordination-registry", "torrust-tracker-test-helpers", - "torrust-tracker-torrent-repository", "tower", "tower-http", "tracing", @@ -4595,8 +4595,8 @@ dependencies = [ "torrust-tracker-configuration", "torrust-tracker-metrics", "torrust-tracker-primitives", + "torrust-tracker-swarm-coordination-registry", "torrust-tracker-test-helpers", - "torrust-tracker-torrent-repository", "torrust-udp-tracker-server", "tower", "tower-http", @@ -4649,8 +4649,8 @@ dependencies = [ "torrust-tracker-events", "torrust-tracker-metrics", "torrust-tracker-primitives", + "torrust-tracker-swarm-coordination-registry", "torrust-tracker-test-helpers", - "torrust-tracker-torrent-repository", "torrust-udp-tracker-server", ] @@ -4697,8 +4697,8 @@ dependencies = [ "torrust-server-lib", "torrust-tracker-clock", "torrust-tracker-configuration", + "torrust-tracker-swarm-coordination-registry", "torrust-tracker-test-helpers", - "torrust-tracker-torrent-repository", "torrust-udp-tracker-server", "tracing", "tracing-subscriber", @@ -4819,17 +4819,7 @@ dependencies = [ ] [[package]] -name = "torrust-tracker-test-helpers" -version = "3.0.0-develop" -dependencies = [ - "rand 0.9.1", - "torrust-tracker-configuration", - "tracing", - "tracing-subscriber", -] - -[[package]] -name = "torrust-tracker-torrent-repository" +name = "torrust-tracker-swarm-coordination-registry" version = "3.0.0-develop" dependencies = [ "aquatic_udp_protocol", @@ -4840,7 +4830,7 @@ dependencies = [ "crossbeam-skiplist", "futures", "mockall", - "rand 0.9.1", + "rand 0.8.5", "rstest", "serde", "thiserror 2.0.12", @@ -4854,6 +4844,16 @@ dependencies = [ "tracing", ] +[[package]] +name = "torrust-tracker-test-helpers" +version = "3.0.0-develop" +dependencies = [ + "rand 0.9.1", + "torrust-tracker-configuration", + "tracing", + "tracing-subscriber", +] + [[package]] name = "torrust-tracker-torrent-repository-benchmarking" version = "3.0.0-develop" @@ -4900,8 +4900,8 @@ dependencies = [ "torrust-tracker-located-error", "torrust-tracker-metrics", "torrust-tracker-primitives", + "torrust-tracker-swarm-coordination-registry", "torrust-tracker-test-helpers", - "torrust-tracker-torrent-repository", "tracing", "url", "uuid", diff --git a/Cargo.toml b/Cargo.toml index 3e6e3e073..976176155 100644 --- a/Cargo.toml +++ b/Cargo.toml @@ -55,7 +55,7 @@ torrust-rest-tracker-api-core = { version = "3.0.0-develop", path = "packages/re torrust-server-lib = { version = "3.0.0-develop", path = "packages/server-lib" } torrust-tracker-clock = { version = "3.0.0-develop", path = "packages/clock" } torrust-tracker-configuration = { version = "3.0.0-develop", path = "packages/configuration" } -torrust-tracker-torrent-repository = { version = "3.0.0-develop", path = "packages/swarm-coordination-registry" } +torrust-tracker-swarm-coordination-registry = { version = "3.0.0-develop", path = "packages/swarm-coordination-registry" } torrust-udp-tracker-server = { version = "3.0.0-develop", path = "packages/udp-tracker-server" } tracing = "0" tracing-subscriber = { version = "0", features = ["json"] } diff --git a/packages/axum-http-tracker-server/Cargo.toml b/packages/axum-http-tracker-server/Cargo.toml index 51283ee01..fa195489c 100644 --- a/packages/axum-http-tracker-server/Cargo.toml +++ b/packages/axum-http-tracker-server/Cargo.toml @@ -33,7 +33,7 @@ torrust-server-lib = { version = "3.0.0-develop", path = "../server-lib" } torrust-tracker-clock = { version = "3.0.0-develop", path = "../clock" } torrust-tracker-configuration = { version = "3.0.0-develop", path = "../configuration" } torrust-tracker-primitives = { version = "3.0.0-develop", path = "../primitives" } -torrust-tracker-torrent-repository = { version = "3.0.0-develop", path = "../swarm-coordination-registry" } +torrust-tracker-swarm-coordination-registry = { version = "3.0.0-develop", path = "../swarm-coordination-registry" } tower = { version = "0", features = ["timeout"] } tower-http = { version = "0", features = ["compression-full", "cors", "propagate-header", "request-id", "trace"] } tracing = "0" diff --git a/packages/axum-http-tracker-server/src/environment.rs b/packages/axum-http-tracker-server/src/environment.rs index 0c1431db5..54c6b7767 100644 --- a/packages/axum-http-tracker-server/src/environment.rs +++ b/packages/axum-http-tracker-server/src/environment.rs @@ -10,7 +10,7 @@ use torrust_axum_server::tsl::make_rust_tls; use torrust_server_lib::registar::Registar; use torrust_tracker_configuration::{logging, Configuration}; use torrust_tracker_primitives::peer; -use torrust_tracker_torrent_repository::container::TorrentRepositoryContainer; +use torrust_tracker_swarm_coordination_registry::container::TorrentRepositoryContainer; use crate::server::{HttpServer, Launcher, Running, Stopped}; diff --git a/packages/axum-http-tracker-server/src/server.rs b/packages/axum-http-tracker-server/src/server.rs index f7d1ed7ea..b8ece8086 100644 --- a/packages/axum-http-tracker-server/src/server.rs +++ b/packages/axum-http-tracker-server/src/server.rs @@ -259,8 +259,8 @@ mod tests { use torrust_axum_server::tsl::make_rust_tls; use torrust_server_lib::registar::Registar; use torrust_tracker_configuration::{logging, Configuration}; + use torrust_tracker_swarm_coordination_registry::container::TorrentRepositoryContainer; use torrust_tracker_test_helpers::configuration::ephemeral_public; - use torrust_tracker_torrent_repository::container::TorrentRepositoryContainer; use crate::server::{HttpServer, Launcher}; diff --git a/packages/axum-rest-tracker-api-server/Cargo.toml b/packages/axum-rest-tracker-api-server/Cargo.toml index 558dbf6c1..9493b8693 100644 --- a/packages/axum-rest-tracker-api-server/Cargo.toml +++ b/packages/axum-rest-tracker-api-server/Cargo.toml @@ -39,7 +39,7 @@ torrust-tracker-clock = { version = "3.0.0-develop", path = "../clock" } torrust-tracker-configuration = { version = "3.0.0-develop", path = "../configuration" } torrust-tracker-metrics = { version = "3.0.0-develop", path = "../metrics" } torrust-tracker-primitives = { version = "3.0.0-develop", path = "../primitives" } -torrust-tracker-torrent-repository = { version = "3.0.0-develop", path = "../swarm-coordination-registry" } +torrust-tracker-swarm-coordination-registry = { version = "3.0.0-develop", path = "../swarm-coordination-registry" } torrust-udp-tracker-server = { version = "3.0.0-develop", path = "../udp-tracker-server" } tower = { version = "0", features = ["timeout"] } tower-http = { version = "0", features = ["compression-full", "cors", "propagate-header", "request-id", "trace"] } diff --git a/packages/axum-rest-tracker-api-server/src/environment.rs b/packages/axum-rest-tracker-api-server/src/environment.rs index be93a8723..6be4cc53c 100644 --- a/packages/axum-rest-tracker-api-server/src/environment.rs +++ b/packages/axum-rest-tracker-api-server/src/environment.rs @@ -12,7 +12,7 @@ use torrust_rest_tracker_api_core::container::TrackerHttpApiCoreContainer; use torrust_server_lib::registar::Registar; use torrust_tracker_configuration::{logging, Configuration}; use torrust_tracker_primitives::peer; -use torrust_tracker_torrent_repository::container::TorrentRepositoryContainer; +use torrust_tracker_swarm_coordination_registry::container::TorrentRepositoryContainer; use torrust_udp_tracker_server::container::UdpTrackerServerContainer; use crate::server::{ApiServer, Launcher, Running, Stopped}; diff --git a/packages/axum-rest-tracker-api-server/src/v1/context/stats/handlers.rs b/packages/axum-rest-tracker-api-server/src/v1/context/stats/handlers.rs index 47bb5ad16..b907b861a 100644 --- a/packages/axum-rest-tracker-api-server/src/v1/context/stats/handlers.rs +++ b/packages/axum-rest-tracker-api-server/src/v1/context/stats/handlers.rs @@ -77,7 +77,7 @@ pub async fn get_metrics_handler( State(state): State<( Arc, Arc>, - Arc, + Arc, Arc, Arc, Arc, diff --git a/packages/http-tracker-core/Cargo.toml b/packages/http-tracker-core/Cargo.toml index 008aa92c6..45af59baa 100644 --- a/packages/http-tracker-core/Cargo.toml +++ b/packages/http-tracker-core/Cargo.toml @@ -28,7 +28,7 @@ torrust-tracker-configuration = { version = "3.0.0-develop", path = "../configur torrust-tracker-events = { version = "3.0.0-develop", path = "../events" } torrust-tracker-metrics = { version = "3.0.0-develop", path = "../metrics" } torrust-tracker-primitives = { version = "3.0.0-develop", path = "../primitives" } -torrust-tracker-torrent-repository = { version = "3.0.0-develop", path = "../swarm-coordination-registry" } +torrust-tracker-swarm-coordination-registry = { version = "3.0.0-develop", path = "../swarm-coordination-registry" } tracing = "0" [dev-dependencies] diff --git a/packages/http-tracker-core/src/container.rs b/packages/http-tracker-core/src/container.rs index f063c0061..35f75e1fe 100644 --- a/packages/http-tracker-core/src/container.rs +++ b/packages/http-tracker-core/src/container.rs @@ -2,7 +2,7 @@ use std::sync::Arc; use bittorrent_tracker_core::container::TrackerCoreContainer; use torrust_tracker_configuration::{Core, HttpTracker}; -use torrust_tracker_torrent_repository::container::TorrentRepositoryContainer; +use torrust_tracker_swarm_coordination_registry::container::TorrentRepositoryContainer; use crate::event::bus::EventBus; use crate::event::sender::Broadcaster; diff --git a/packages/rest-tracker-api-core/Cargo.toml b/packages/rest-tracker-api-core/Cargo.toml index 9a086ad19..cc8eda903 100644 --- a/packages/rest-tracker-api-core/Cargo.toml +++ b/packages/rest-tracker-api-core/Cargo.toml @@ -21,7 +21,7 @@ tokio = { version = "1", features = ["macros", "net", "rt-multi-thread", "signal torrust-tracker-configuration = { version = "3.0.0-develop", path = "../configuration" } torrust-tracker-metrics = { version = "3.0.0-develop", path = "../metrics" } torrust-tracker-primitives = { version = "3.0.0-develop", path = "../primitives" } -torrust-tracker-torrent-repository = { version = "3.0.0-develop", path = "../swarm-coordination-registry" } +torrust-tracker-swarm-coordination-registry = { version = "3.0.0-develop", path = "../swarm-coordination-registry" } torrust-udp-tracker-server = { version = "3.0.0-develop", path = "../udp-tracker-server" } [dev-dependencies] diff --git a/packages/rest-tracker-api-core/src/container.rs b/packages/rest-tracker-api-core/src/container.rs index 1c4a08e26..f76c2ece3 100644 --- a/packages/rest-tracker-api-core/src/container.rs +++ b/packages/rest-tracker-api-core/src/container.rs @@ -7,7 +7,7 @@ use bittorrent_udp_tracker_core::services::banning::BanService; use bittorrent_udp_tracker_core::{self}; use tokio::sync::RwLock; use torrust_tracker_configuration::{Core, HttpApi, HttpTracker, UdpTracker}; -use torrust_tracker_torrent_repository::container::TorrentRepositoryContainer; +use torrust_tracker_swarm_coordination_registry::container::TorrentRepositoryContainer; use torrust_udp_tracker_server::container::UdpTrackerServerContainer; pub struct TrackerHttpApiCoreContainer { diff --git a/packages/rest-tracker-api-core/src/statistics/services.rs b/packages/rest-tracker-api-core/src/statistics/services.rs index 9a2eb3667..56536a02f 100644 --- a/packages/rest-tracker-api-core/src/statistics/services.rs +++ b/packages/rest-tracker-api-core/src/statistics/services.rs @@ -113,7 +113,7 @@ pub struct TrackerLabeledMetrics { pub async fn get_labeled_metrics( in_memory_torrent_repository: Arc, ban_service: Arc>, - swarms_stats_repository: Arc, + swarms_stats_repository: Arc, tracker_core_stats_repository: Arc, http_stats_repository: Arc, udp_stats_repository: Arc, @@ -165,8 +165,8 @@ mod tests { use tokio::sync::RwLock; use torrust_tracker_configuration::Configuration; use torrust_tracker_events::bus::SenderStatus; + use torrust_tracker_swarm_coordination_registry::container::TorrentRepositoryContainer; use torrust_tracker_test_helpers::configuration; - use torrust_tracker_torrent_repository::container::TorrentRepositoryContainer; use crate::statistics::metrics::{ProtocolMetrics, TorrentsMetrics}; use crate::statistics::services::{get_metrics, TrackerMetrics}; diff --git a/packages/swarm-coordination-registry/Cargo.toml b/packages/swarm-coordination-registry/Cargo.toml index 510a59e9d..074562a47 100644 --- a/packages/swarm-coordination-registry/Cargo.toml +++ b/packages/swarm-coordination-registry/Cargo.toml @@ -1,7 +1,7 @@ [package] description = "A library that provides a repository of torrents files and their peers." keywords = ["library", "repository", "torrents"] -name = "torrust-tracker-torrent-repository" +name = "torrust-tracker-swarm-coordination-registry" readme = "README.md" authors.workspace = true diff --git a/packages/tracker-core/Cargo.toml b/packages/tracker-core/Cargo.toml index 8c9bf7769..f04a3b89b 100644 --- a/packages/tracker-core/Cargo.toml +++ b/packages/tracker-core/Cargo.toml @@ -33,7 +33,7 @@ torrust-tracker-events = { version = "3.0.0-develop", path = "../events" } torrust-tracker-located-error = { version = "3.0.0-develop", path = "../located-error" } torrust-tracker-metrics = { version = "3.0.0-develop", path = "../metrics" } torrust-tracker-primitives = { version = "3.0.0-develop", path = "../primitives" } -torrust-tracker-torrent-repository = { version = "3.0.0-develop", path = "../swarm-coordination-registry" } +torrust-tracker-swarm-coordination-registry = { version = "3.0.0-develop", path = "../swarm-coordination-registry" } tracing = "0" [dev-dependencies] diff --git a/packages/tracker-core/src/container.rs b/packages/tracker-core/src/container.rs index 02af67118..949761553 100644 --- a/packages/tracker-core/src/container.rs +++ b/packages/tracker-core/src/container.rs @@ -1,7 +1,7 @@ use std::sync::Arc; use torrust_tracker_configuration::Core; -use torrust_tracker_torrent_repository::container::TorrentRepositoryContainer; +use torrust_tracker_swarm_coordination_registry::container::TorrentRepositoryContainer; use crate::announce_handler::AnnounceHandler; use crate::authentication::handler::KeysHandler; diff --git a/packages/tracker-core/src/statistics/event/handler.rs b/packages/tracker-core/src/statistics/event/handler.rs index 0909dc184..9a5182f25 100644 --- a/packages/tracker-core/src/statistics/event/handler.rs +++ b/packages/tracker-core/src/statistics/event/handler.rs @@ -3,7 +3,7 @@ use std::sync::Arc; use torrust_tracker_metrics::label::LabelSet; use torrust_tracker_metrics::metric_name; use torrust_tracker_primitives::DurationSinceUnixEpoch; -use torrust_tracker_torrent_repository::event::Event; +use torrust_tracker_swarm_coordination_registry::event::Event; use crate::statistics::persisted::downloads::DatabaseDownloadsMetricRepository; use crate::statistics::repository::Repository; diff --git a/packages/tracker-core/src/statistics/event/listener.rs b/packages/tracker-core/src/statistics/event/listener.rs index 2702aa858..d3beaf41f 100644 --- a/packages/tracker-core/src/statistics/event/listener.rs +++ b/packages/tracker-core/src/statistics/event/listener.rs @@ -3,7 +3,7 @@ use std::sync::Arc; use tokio::task::JoinHandle; use torrust_tracker_clock::clock::Time; use torrust_tracker_events::receiver::RecvError; -use torrust_tracker_torrent_repository::event::receiver::Receiver; +use torrust_tracker_swarm_coordination_registry::event::receiver::Receiver; use super::handler::handle_event; use crate::statistics::persisted::downloads::DatabaseDownloadsMetricRepository; diff --git a/packages/tracker-core/src/torrent/manager.rs b/packages/tracker-core/src/torrent/manager.rs index 766fa5c4a..cbdf01193 100644 --- a/packages/tracker-core/src/torrent/manager.rs +++ b/packages/tracker-core/src/torrent/manager.rs @@ -148,7 +148,7 @@ mod tests { use std::sync::Arc; use torrust_tracker_configuration::Core; - use torrust_tracker_torrent_repository::Swarms; + use torrust_tracker_swarm_coordination_registry::Swarms; use super::{DatabaseDownloadsMetricRepository, TorrentsManager}; use crate::databases::setup::initialize_database; diff --git a/packages/tracker-core/src/torrent/repository/in_memory.rs b/packages/tracker-core/src/torrent/repository/in_memory.rs index cc873726d..47b34ad26 100644 --- a/packages/tracker-core/src/torrent/repository/in_memory.rs +++ b/packages/tracker-core/src/torrent/repository/in_memory.rs @@ -7,7 +7,7 @@ use torrust_tracker_configuration::{TrackerPolicy, TORRENT_PEERS_LIMIT}; use torrust_tracker_primitives::pagination::Pagination; use torrust_tracker_primitives::swarm_metadata::{AggregateActiveSwarmMetadata, SwarmMetadata}; use torrust_tracker_primitives::{peer, DurationSinceUnixEpoch, NumberOfDownloads, NumberOfDownloadsBTreeMap}; -use torrust_tracker_torrent_repository::{SwarmHandle, Swarms}; +use torrust_tracker_swarm_coordination_registry::{SwarmHandle, Swarms}; /// In-memory repository for torrent entries. /// diff --git a/packages/tracker-core/tests/common/test_env.rs b/packages/tracker-core/tests/common/test_env.rs index 2aafbbbad..64bdcaad8 100644 --- a/packages/tracker-core/tests/common/test_env.rs +++ b/packages/tracker-core/tests/common/test_env.rs @@ -14,7 +14,7 @@ use torrust_tracker_primitives::core::{AnnounceData, ScrapeData}; use torrust_tracker_primitives::peer::Peer; use torrust_tracker_primitives::swarm_metadata::SwarmMetadata; use torrust_tracker_primitives::DurationSinceUnixEpoch; -use torrust_tracker_torrent_repository::container::TorrentRepositoryContainer; +use torrust_tracker_swarm_coordination_registry::container::TorrentRepositoryContainer; pub struct TestEnv { pub torrent_repository_container: Arc, @@ -67,11 +67,10 @@ impl TestEnv { async fn run_jobs(&self) { let mut jobs = vec![]; - let job = torrust_tracker_torrent_repository::statistics::event::listener::run_event_listener( + let job = torrust_tracker_swarm_coordination_registry::statistics::event::listener::run_event_listener( self.torrent_repository_container.event_bus.receiver(), &self.torrent_repository_container.stats_repository, ); - jobs.push(job); let job = bittorrent_tracker_core::statistics::event::listener::run_event_listener( @@ -83,7 +82,6 @@ impl TestEnv { .tracker_policy .persistent_torrent_completed_stat, ); - jobs.push(job); // Give the event listeners some time to start diff --git a/packages/udp-tracker-core/Cargo.toml b/packages/udp-tracker-core/Cargo.toml index 2933a7e70..290c5fbfd 100644 --- a/packages/udp-tracker-core/Cargo.toml +++ b/packages/udp-tracker-core/Cargo.toml @@ -33,7 +33,7 @@ torrust-tracker-configuration = { version = "3.0.0-develop", path = "../configur torrust-tracker-events = { version = "3.0.0-develop", path = "../events" } torrust-tracker-metrics = { version = "3.0.0-develop", path = "../metrics" } torrust-tracker-primitives = { version = "3.0.0-develop", path = "../primitives" } -torrust-tracker-torrent-repository = { version = "3.0.0-develop", path = "../swarm-coordination-registry" } +torrust-tracker-swarm-coordination-registry = { version = "3.0.0-develop", path = "../swarm-coordination-registry" } tracing = "0" zerocopy = "0.7" diff --git a/packages/udp-tracker-core/src/container.rs b/packages/udp-tracker-core/src/container.rs index 07a8a09ef..c4be395fc 100644 --- a/packages/udp-tracker-core/src/container.rs +++ b/packages/udp-tracker-core/src/container.rs @@ -3,7 +3,7 @@ use std::sync::Arc; use bittorrent_tracker_core::container::TrackerCoreContainer; use tokio::sync::RwLock; use torrust_tracker_configuration::{Core, UdpTracker}; -use torrust_tracker_torrent_repository::container::TorrentRepositoryContainer; +use torrust_tracker_swarm_coordination_registry::container::TorrentRepositoryContainer; use crate::event::bus::EventBus; use crate::event::sender::Broadcaster; diff --git a/packages/udp-tracker-server/Cargo.toml b/packages/udp-tracker-server/Cargo.toml index 396dc0805..72fa520ba 100644 --- a/packages/udp-tracker-server/Cargo.toml +++ b/packages/udp-tracker-server/Cargo.toml @@ -33,7 +33,7 @@ torrust-tracker-events = { version = "3.0.0-develop", path = "../events" } torrust-tracker-located-error = { version = "3.0.0-develop", path = "../located-error" } torrust-tracker-metrics = { version = "3.0.0-develop", path = "../metrics" } torrust-tracker-primitives = { version = "3.0.0-develop", path = "../primitives" } -torrust-tracker-torrent-repository = { version = "3.0.0-develop", path = "../swarm-coordination-registry" } +torrust-tracker-swarm-coordination-registry = { version = "3.0.0-develop", path = "../swarm-coordination-registry" } tracing = "0" url = { version = "2", features = ["serde"] } uuid = { version = "1", features = ["v4"] } diff --git a/packages/udp-tracker-server/src/environment.rs b/packages/udp-tracker-server/src/environment.rs index 94a166e4e..3f479a02d 100644 --- a/packages/udp-tracker-server/src/environment.rs +++ b/packages/udp-tracker-server/src/environment.rs @@ -8,7 +8,7 @@ use tokio::task::JoinHandle; use torrust_server_lib::registar::Registar; use torrust_tracker_configuration::{logging, Configuration, DEFAULT_TIMEOUT}; use torrust_tracker_primitives::peer; -use torrust_tracker_torrent_repository::container::TorrentRepositoryContainer; +use torrust_tracker_swarm_coordination_registry::container::TorrentRepositoryContainer; use crate::container::UdpTrackerServerContainer; use crate::server::spawner::Spawner; diff --git a/src/bootstrap/jobs/activity_metrics_updater.rs b/src/bootstrap/jobs/activity_metrics_updater.rs index 7411c05cf..9813fed65 100644 --- a/src/bootstrap/jobs/activity_metrics_updater.rs +++ b/src/bootstrap/jobs/activity_metrics_updater.rs @@ -11,7 +11,7 @@ use crate::CurrentClock; #[must_use] pub fn start_job(config: &Configuration, app_container: &Arc) -> JoinHandle<()> { - torrust_tracker_torrent_repository::statistics::activity_metrics_updater::start_job( + torrust_tracker_swarm_coordination_registry::statistics::activity_metrics_updater::start_job( &app_container.torrent_repository_container.swarms.clone(), &app_container.torrent_repository_container.stats_repository.clone(), peer_inactivity_cutoff_timestamp(config.core.tracker_policy.max_peer_timeout), diff --git a/src/bootstrap/jobs/torrent_repository.rs b/src/bootstrap/jobs/torrent_repository.rs index ea0d215ee..c64917ea6 100644 --- a/src/bootstrap/jobs/torrent_repository.rs +++ b/src/bootstrap/jobs/torrent_repository.rs @@ -7,7 +7,7 @@ use crate::container::AppContainer; pub fn start_event_listener(config: &Configuration, app_container: &Arc) -> Option> { if config.core.tracker_usage_statistics { - let job = torrust_tracker_torrent_repository::statistics::event::listener::run_event_listener( + let job = torrust_tracker_swarm_coordination_registry::statistics::event::listener::run_event_listener( app_container.torrent_repository_container.event_bus.receiver(), &app_container.torrent_repository_container.stats_repository, ); diff --git a/src/container.rs b/src/container.rs index 98c455780..bb5873fb2 100644 --- a/src/container.rs +++ b/src/container.rs @@ -9,7 +9,7 @@ use bittorrent_udp_tracker_core::{self}; use torrust_rest_tracker_api_core::container::TrackerHttpApiCoreContainer; use torrust_server_lib::registar::Registar; use torrust_tracker_configuration::{Configuration, HttpApi}; -use torrust_tracker_torrent_repository::container::TorrentRepositoryContainer; +use torrust_tracker_swarm_coordination_registry::container::TorrentRepositoryContainer; use torrust_udp_tracker_server::container::UdpTrackerServerContainer; use tracing::instrument; From 2768306a8b5db288f27dedac6ce59a11efc61bcb Mon Sep 17 00:00:00 2001 From: Jose Celano Date: Thu, 29 May 2025 10:50:56 +0100 Subject: [PATCH 661/802] refactor: [#1519] rename Swarm to Coordinator --- .../swarm-coordination-registry/src/lib.rs | 4 +- .../swarm-coordination-registry/src/swarm.rs | 102 +++++++++--------- .../swarm-coordination-registry/src/swarms.rs | 12 +-- 3 files changed, 59 insertions(+), 59 deletions(-) diff --git a/packages/swarm-coordination-registry/src/lib.rs b/packages/swarm-coordination-registry/src/lib.rs index 3adf2f18d..c93f553fa 100644 --- a/packages/swarm-coordination-registry/src/lib.rs +++ b/packages/swarm-coordination-registry/src/lib.rs @@ -10,8 +10,8 @@ use tokio::sync::Mutex; use torrust_tracker_clock::clock; pub type Swarms = swarms::Swarms; -pub type SwarmHandle = Arc>; -pub type Swarm = swarm::Swarm; +pub type SwarmHandle = Arc>; +pub type Coordinator = swarm::Coordinator; /// Working version, for production. #[cfg(not(test))] diff --git a/packages/swarm-coordination-registry/src/swarm.rs b/packages/swarm-coordination-registry/src/swarm.rs index 362fc6153..81e454d8b 100644 --- a/packages/swarm-coordination-registry/src/swarm.rs +++ b/packages/swarm-coordination-registry/src/swarm.rs @@ -15,14 +15,14 @@ use crate::event::sender::Sender; use crate::event::Event; #[derive(Clone)] -pub struct Swarm { +pub struct Coordinator { info_hash: InfoHash, peers: BTreeMap>, metadata: SwarmMetadata, event_sender: Sender, } -impl Swarm { +impl Coordinator { #[must_use] pub fn new(info_hash: &InfoHash, downloaded: u32, event_sender: Sender) -> Self { Self { @@ -326,26 +326,26 @@ mod tests { use torrust_tracker_primitives::swarm_metadata::SwarmMetadata; use torrust_tracker_primitives::DurationSinceUnixEpoch; - use crate::swarm::Swarm; + use crate::swarm::Coordinator; use crate::tests::sample_info_hash; #[test] fn it_should_be_empty_when_no_peers_have_been_inserted() { - let swarm = Swarm::new(&sample_info_hash(), 0, None); + let swarm = Coordinator::new(&sample_info_hash(), 0, None); assert!(swarm.is_empty()); } #[test] fn it_should_have_zero_length_when_no_peers_have_been_inserted() { - let swarm = Swarm::new(&sample_info_hash(), 0, None); + let swarm = Coordinator::new(&sample_info_hash(), 0, None); assert_eq!(swarm.len(), 0); } #[tokio::test] async fn it_should_allow_inserting_a_new_peer() { - let mut swarm = Swarm::new(&sample_info_hash(), 0, None); + let mut swarm = Coordinator::new(&sample_info_hash(), 0, None); let peer = PeerBuilder::default().build(); @@ -354,7 +354,7 @@ mod tests { #[tokio::test] async fn it_should_allow_updating_a_preexisting_peer() { - let mut swarm = Swarm::new(&sample_info_hash(), 0, None); + let mut swarm = Coordinator::new(&sample_info_hash(), 0, None); let peer = PeerBuilder::default().build(); @@ -365,7 +365,7 @@ mod tests { #[tokio::test] async fn it_should_allow_getting_all_peers() { - let mut swarm = Swarm::new(&sample_info_hash(), 0, None); + let mut swarm = Coordinator::new(&sample_info_hash(), 0, None); let peer = PeerBuilder::default().build(); @@ -376,7 +376,7 @@ mod tests { #[tokio::test] async fn it_should_allow_getting_one_peer_by_id() { - let mut swarm = Swarm::new(&sample_info_hash(), 0, None); + let mut swarm = Coordinator::new(&sample_info_hash(), 0, None); let peer = PeerBuilder::default().build(); @@ -387,7 +387,7 @@ mod tests { #[tokio::test] async fn it_should_increase_the_number_of_peers_after_inserting_a_new_one() { - let mut swarm = Swarm::new(&sample_info_hash(), 0, None); + let mut swarm = Coordinator::new(&sample_info_hash(), 0, None); let peer = PeerBuilder::default().build(); @@ -398,7 +398,7 @@ mod tests { #[tokio::test] async fn it_should_decrease_the_number_of_peers_after_removing_one() { - let mut swarm = Swarm::new(&sample_info_hash(), 0, None); + let mut swarm = Coordinator::new(&sample_info_hash(), 0, None); let peer = PeerBuilder::default().build(); @@ -411,7 +411,7 @@ mod tests { #[tokio::test] async fn it_should_allow_removing_an_existing_peer() { - let mut swarm = Swarm::new(&sample_info_hash(), 0, None); + let mut swarm = Coordinator::new(&sample_info_hash(), 0, None); let peer = PeerBuilder::default().build(); @@ -425,7 +425,7 @@ mod tests { #[tokio::test] async fn it_should_allow_removing_a_non_existing_peer() { - let mut swarm = Swarm::new(&sample_info_hash(), 0, None); + let mut swarm = Coordinator::new(&sample_info_hash(), 0, None); let peer = PeerBuilder::default().build(); @@ -434,7 +434,7 @@ mod tests { #[tokio::test] async fn it_should_allow_getting_all_peers_excluding_peers_with_a_given_address() { - let mut swarm = Swarm::new(&sample_info_hash(), 0, None); + let mut swarm = Coordinator::new(&sample_info_hash(), 0, None); let peer1 = PeerBuilder::default() .with_peer_id(&PeerId(*b"-qB00000000000000001")) @@ -453,7 +453,7 @@ mod tests { #[tokio::test] async fn it_should_count_inactive_peers() { - let mut swarm = Swarm::new(&sample_info_hash(), 0, None); + let mut swarm = Coordinator::new(&sample_info_hash(), 0, None); let one_second = DurationSinceUnixEpoch::new(1, 0); @@ -469,7 +469,7 @@ mod tests { #[tokio::test] async fn it_should_remove_inactive_peers() { - let mut swarm = Swarm::new(&sample_info_hash(), 0, None); + let mut swarm = Coordinator::new(&sample_info_hash(), 0, None); let one_second = DurationSinceUnixEpoch::new(1, 0); @@ -486,7 +486,7 @@ mod tests { #[tokio::test] async fn it_should_not_remove_active_peers() { - let mut swarm = Swarm::new(&sample_info_hash(), 0, None); + let mut swarm = Coordinator::new(&sample_info_hash(), 0, None); let one_second = DurationSinceUnixEpoch::new(1, 0); @@ -507,20 +507,20 @@ mod tests { use torrust_tracker_primitives::peer::fixture::PeerBuilder; use crate::tests::sample_info_hash; - use crate::Swarm; + use crate::Coordinator; - fn empty_swarm() -> Swarm { - Swarm::new(&sample_info_hash(), 0, None) + fn empty_swarm() -> Coordinator { + Coordinator::new(&sample_info_hash(), 0, None) } - async fn not_empty_swarm() -> Swarm { - let mut swarm = Swarm::new(&sample_info_hash(), 0, None); + async fn not_empty_swarm() -> Coordinator { + let mut swarm = Coordinator::new(&sample_info_hash(), 0, None); swarm.upsert_peer(PeerBuilder::default().build().into()).await; swarm } - async fn not_empty_swarm_with_downloads() -> Swarm { - let mut swarm = Swarm::new(&sample_info_hash(), 0, None); + async fn not_empty_swarm_with_downloads() -> Coordinator { + let mut swarm = Coordinator::new(&sample_info_hash(), 0, None); let mut peer = PeerBuilder::leecher().build(); @@ -602,7 +602,7 @@ mod tests { #[tokio::test] async fn it_should_allow_inserting_two_identical_peers_except_for_the_socket_address() { - let mut swarm = Swarm::new(&sample_info_hash(), 0, None); + let mut swarm = Coordinator::new(&sample_info_hash(), 0, None); let peer1 = PeerBuilder::default() .with_peer_addr(&SocketAddr::new(IpAddr::V4(Ipv4Addr::new(127, 0, 0, 1)), 6969)) @@ -619,7 +619,7 @@ mod tests { #[tokio::test] async fn it_should_not_allow_inserting_two_peers_with_different_peer_id_but_the_same_socket_address() { - let mut swarm = Swarm::new(&sample_info_hash(), 0, None); + let mut swarm = Coordinator::new(&sample_info_hash(), 0, None); // When that happens the peer ID will be changed in the swarm. // In practice, it's like if the peer had changed its ID. @@ -641,7 +641,7 @@ mod tests { #[tokio::test] async fn it_should_return_the_swarm_metadata() { - let mut swarm = Swarm::new(&sample_info_hash(), 0, None); + let mut swarm = Coordinator::new(&sample_info_hash(), 0, None); let seeder = PeerBuilder::seeder().build(); let leecher = PeerBuilder::leecher().build(); @@ -661,7 +661,7 @@ mod tests { #[tokio::test] async fn it_should_return_the_number_of_seeders_in_the_list() { - let mut swarm = Swarm::new(&sample_info_hash(), 0, None); + let mut swarm = Coordinator::new(&sample_info_hash(), 0, None); let seeder = PeerBuilder::seeder().build(); let leecher = PeerBuilder::leecher().build(); @@ -676,7 +676,7 @@ mod tests { #[tokio::test] async fn it_should_return_the_number_of_leechers_in_the_list() { - let mut swarm = Swarm::new(&sample_info_hash(), 0, None); + let mut swarm = Coordinator::new(&sample_info_hash(), 0, None); let seeder = PeerBuilder::seeder().build(); let leecher = PeerBuilder::leecher().build(); @@ -691,7 +691,7 @@ mod tests { #[tokio::test] async fn it_should_be_a_peerless_swarm_when_it_does_not_contain_any_peers() { - let swarm = Swarm::new(&sample_info_hash(), 0, None); + let swarm = Coordinator::new(&sample_info_hash(), 0, None); assert!(swarm.is_peerless()); } @@ -700,12 +700,12 @@ mod tests { mod when_a_new_peer_is_added { use torrust_tracker_primitives::peer::fixture::PeerBuilder; - use crate::swarm::Swarm; + use crate::swarm::Coordinator; use crate::tests::sample_info_hash; #[tokio::test] async fn it_should_increase_the_number_of_leechers_if_the_new_peer_is_a_leecher_() { - let mut swarm = Swarm::new(&sample_info_hash(), 0, None); + let mut swarm = Coordinator::new(&sample_info_hash(), 0, None); let leechers = swarm.metadata().leechers(); @@ -718,7 +718,7 @@ mod tests { #[tokio::test] async fn it_should_increase_the_number_of_seeders_if_the_new_peer_is_a_seeder() { - let mut swarm = Swarm::new(&sample_info_hash(), 0, None); + let mut swarm = Coordinator::new(&sample_info_hash(), 0, None); let seeders = swarm.metadata().seeders(); @@ -732,7 +732,7 @@ mod tests { #[tokio::test] async fn it_should_not_increasing_the_number_of_downloads_if_the_new_peer_has_completed_downloading_as_it_was_not_previously_known( ) { - let mut swarm = Swarm::new(&sample_info_hash(), 0, None); + let mut swarm = Coordinator::new(&sample_info_hash(), 0, None); let downloads = swarm.metadata().downloads(); @@ -747,12 +747,12 @@ mod tests { mod when_a_peer_is_removed { use torrust_tracker_primitives::peer::fixture::PeerBuilder; - use crate::swarm::Swarm; + use crate::swarm::Coordinator; use crate::tests::sample_info_hash; #[tokio::test] async fn it_should_decrease_the_number_of_leechers_if_the_removed_peer_was_a_leecher() { - let mut swarm = Swarm::new(&sample_info_hash(), 0, None); + let mut swarm = Coordinator::new(&sample_info_hash(), 0, None); let leecher = PeerBuilder::leecher().build(); @@ -767,7 +767,7 @@ mod tests { #[tokio::test] async fn it_should_decrease_the_number_of_seeders_if_the_removed_peer_was_a_seeder() { - let mut swarm = Swarm::new(&sample_info_hash(), 0, None); + let mut swarm = Coordinator::new(&sample_info_hash(), 0, None); let seeder = PeerBuilder::seeder().build(); @@ -786,12 +786,12 @@ mod tests { use torrust_tracker_primitives::peer::fixture::PeerBuilder; - use crate::swarm::Swarm; + use crate::swarm::Coordinator; use crate::tests::sample_info_hash; #[tokio::test] async fn it_should_decrease_the_number_of_leechers_when_a_removed_peer_is_a_leecher() { - let mut swarm = Swarm::new(&sample_info_hash(), 0, None); + let mut swarm = Coordinator::new(&sample_info_hash(), 0, None); let leecher = PeerBuilder::leecher().build(); @@ -806,7 +806,7 @@ mod tests { #[tokio::test] async fn it_should_decrease_the_number_of_seeders_when_the_removed_peer_is_a_seeder() { - let mut swarm = Swarm::new(&sample_info_hash(), 0, None); + let mut swarm = Coordinator::new(&sample_info_hash(), 0, None); let seeder = PeerBuilder::seeder().build(); @@ -824,12 +824,12 @@ mod tests { use aquatic_udp_protocol::NumberOfBytes; use torrust_tracker_primitives::peer::fixture::PeerBuilder; - use crate::swarm::Swarm; + use crate::swarm::Coordinator; use crate::tests::sample_info_hash; #[tokio::test] async fn it_should_increase_seeders_and_decreasing_leechers_when_the_peer_changes_from_leecher_to_seeder_() { - let mut swarm = Swarm::new(&sample_info_hash(), 0, None); + let mut swarm = Coordinator::new(&sample_info_hash(), 0, None); let mut peer = PeerBuilder::leecher().build(); @@ -848,7 +848,7 @@ mod tests { #[tokio::test] async fn it_should_increase_leechers_and_decreasing_seeders_when_the_peer_changes_from_seeder_to_leecher() { - let mut swarm = Swarm::new(&sample_info_hash(), 0, None); + let mut swarm = Coordinator::new(&sample_info_hash(), 0, None); let mut peer = PeerBuilder::seeder().build(); @@ -867,7 +867,7 @@ mod tests { #[tokio::test] async fn it_should_increase_the_number_of_downloads_when_the_peer_announces_completed_downloading() { - let mut swarm = Swarm::new(&sample_info_hash(), 0, None); + let mut swarm = Coordinator::new(&sample_info_hash(), 0, None); let mut peer = PeerBuilder::leecher().build(); @@ -884,7 +884,7 @@ mod tests { #[tokio::test] async fn it_should_not_increasing_the_number_of_downloads_when_the_peer_announces_completed_downloading_twice_() { - let mut swarm = Swarm::new(&sample_info_hash(), 0, None); + let mut swarm = Coordinator::new(&sample_info_hash(), 0, None); let mut peer = PeerBuilder::leecher().build(); @@ -913,7 +913,7 @@ mod tests { use crate::event::sender::tests::{expect_event_sequence, MockEventSender}; use crate::event::Event; - use crate::swarm::Swarm; + use crate::swarm::Coordinator; use crate::tests::sample_info_hash; #[tokio::test] @@ -925,7 +925,7 @@ mod tests { expect_event_sequence(&mut event_sender_mock, vec![Event::PeerAdded { info_hash, peer }]); - let mut swarm = Swarm::new(&sample_info_hash(), 0, Some(Arc::new(event_sender_mock))); + let mut swarm = Coordinator::new(&sample_info_hash(), 0, Some(Arc::new(event_sender_mock))); swarm.upsert_peer(peer.into()).await; } @@ -942,7 +942,7 @@ mod tests { vec![Event::PeerAdded { info_hash, peer }, Event::PeerRemoved { info_hash, peer }], ); - let mut swarm = Swarm::new(&info_hash, 0, Some(Arc::new(event_sender_mock))); + let mut swarm = Coordinator::new(&info_hash, 0, Some(Arc::new(event_sender_mock))); // Insert the peer swarm.upsert_peer(peer.into()).await; @@ -962,7 +962,7 @@ mod tests { vec![Event::PeerAdded { info_hash, peer }, Event::PeerRemoved { info_hash, peer }], ); - let mut swarm = Swarm::new(&info_hash, 0, Some(Arc::new(event_sender_mock))); + let mut swarm = Coordinator::new(&info_hash, 0, Some(Arc::new(event_sender_mock))); // Insert the peer swarm.upsert_peer(peer.into()).await; @@ -992,7 +992,7 @@ mod tests { ], ); - let mut swarm = Swarm::new(&info_hash, 0, Some(Arc::new(event_sender_mock))); + let mut swarm = Coordinator::new(&info_hash, 0, Some(Arc::new(event_sender_mock))); // Insert the peer swarm.upsert_peer(peer.into()).await; @@ -1028,7 +1028,7 @@ mod tests { ], ); - let mut swarm = Swarm::new(&info_hash, 0, Some(Arc::new(event_sender_mock))); + let mut swarm = Coordinator::new(&info_hash, 0, Some(Arc::new(event_sender_mock))); // Insert the peer swarm.upsert_peer(started_peer.into()).await; diff --git a/packages/swarm-coordination-registry/src/swarms.rs b/packages/swarm-coordination-registry/src/swarms.rs index 8e7bc24de..12fe2190d 100644 --- a/packages/swarm-coordination-registry/src/swarms.rs +++ b/packages/swarm-coordination-registry/src/swarms.rs @@ -11,7 +11,7 @@ use torrust_tracker_primitives::{peer, DurationSinceUnixEpoch, NumberOfDownloads use crate::event::sender::Sender; use crate::event::Event; -use crate::swarm::Swarm; +use crate::swarm::Coordinator; use crate::SwarmHandle; #[derive(Default)] @@ -60,7 +60,7 @@ impl Swarms { let number_of_downloads = opt_persistent_torrent.unwrap_or_default(); let new_swarm_handle = - SwarmHandle::new(Swarm::new(info_hash, number_of_downloads, self.event_sender.clone()).into()); + SwarmHandle::new(Coordinator::new(info_hash, number_of_downloads, self.event_sender.clone()).into()); let new_swarm_handle = self.swarms.get_or_insert(*info_hash, new_swarm_handle); @@ -86,7 +86,7 @@ impl Swarms { } /// Inserts a new swarm. Only used for testing purposes. - pub fn insert(&self, info_hash: &InfoHash, swarm: Swarm) { + pub fn insert(&self, info_hash: &InfoHash, swarm: Coordinator) { // code-review: swarms builder? or constructor from vec? // It's only used for testing purposes. It allows to pre-define the // initial state of the swarm without having to go through the upsert @@ -366,7 +366,7 @@ impl Swarms { continue; } - let entry = SwarmHandle::new(Swarm::new(info_hash, *completed, self.event_sender.clone()).into()); + let entry = SwarmHandle::new(Coordinator::new(info_hash, *completed, self.event_sender.clone()).into()); // Since SkipMap is lock-free the torrent could have been inserted // after checking if it exists. @@ -853,7 +853,7 @@ mod tests { use crate::swarms::Swarms; use crate::tests::{sample_info_hash, sample_peer}; - use crate::{Swarm, SwarmHandle}; + use crate::{Coordinator, SwarmHandle}; /// `TorrentEntry` data is not directly accessible. It's only /// accessible through the trait methods. We need this temporary @@ -871,7 +871,7 @@ mod tests { } #[allow(clippy::from_over_into)] - impl Into for Swarm { + impl Into for Coordinator { fn into(self) -> TorrentEntryInfo { let torrent_entry_info = TorrentEntryInfo { swarm_metadata: self.metadata(), From ba37801d3c62b2b2c4ad1df1785609e6543d7d61 Mon Sep 17 00:00:00 2001 From: Jose Celano Date: Thu, 29 May 2025 10:55:08 +0100 Subject: [PATCH 662/802] refactor: [#1519] rename Swarms to Registry --- .../swarm-coordination-registry/src/lib.rs | 2 +- .../swarm-coordination-registry/src/swarms.rs | 110 +++++++++--------- 2 files changed, 56 insertions(+), 56 deletions(-) diff --git a/packages/swarm-coordination-registry/src/lib.rs b/packages/swarm-coordination-registry/src/lib.rs index c93f553fa..f3926331a 100644 --- a/packages/swarm-coordination-registry/src/lib.rs +++ b/packages/swarm-coordination-registry/src/lib.rs @@ -9,7 +9,7 @@ use std::sync::Arc; use tokio::sync::Mutex; use torrust_tracker_clock::clock; -pub type Swarms = swarms::Swarms; +pub type Swarms = swarms::Registry; pub type SwarmHandle = Arc>; pub type Coordinator = swarm::Coordinator; diff --git a/packages/swarm-coordination-registry/src/swarms.rs b/packages/swarm-coordination-registry/src/swarms.rs index 12fe2190d..c14cb66b7 100644 --- a/packages/swarm-coordination-registry/src/swarms.rs +++ b/packages/swarm-coordination-registry/src/swarms.rs @@ -15,12 +15,12 @@ use crate::swarm::Coordinator; use crate::SwarmHandle; #[derive(Default)] -pub struct Swarms { +pub struct Registry { swarms: SkipMap, event_sender: Sender, } -impl Swarms { +impl Registry { #[must_use] pub fn new(event_sender: Sender) -> Self { Self { @@ -510,7 +510,7 @@ mod tests { use aquatic_udp_protocol::PeerId; - use crate::swarms::Swarms; + use crate::swarms::Registry; use crate::tests::{sample_info_hash, sample_peer}; /// It generates a peer id from a number where the number is the last @@ -543,13 +543,13 @@ mod tests { #[tokio::test] async fn it_should_return_zero_length_when_it_has_no_swarms() { - let swarms = Arc::new(Swarms::default()); + let swarms = Arc::new(Registry::default()); assert_eq!(swarms.len(), 0); } #[tokio::test] async fn it_should_return_the_length_when_it_has_swarms() { - let swarms = Arc::new(Swarms::default()); + let swarms = Arc::new(Registry::default()); let info_hash = sample_info_hash(); let peer = sample_peer(); swarms.handle_announcement(&info_hash, &peer, None).await.unwrap(); @@ -558,7 +558,7 @@ mod tests { #[tokio::test] async fn it_should_be_empty_when_it_has_no_swarms() { - let swarms = Arc::new(Swarms::default()); + let swarms = Arc::new(Registry::default()); assert!(swarms.is_empty()); let info_hash = sample_info_hash(); @@ -569,7 +569,7 @@ mod tests { #[tokio::test] async fn it_should_not_be_empty_when_it_has_at_least_one_swarm() { - let swarms = Arc::new(Swarms::default()); + let swarms = Arc::new(Registry::default()); let info_hash = sample_info_hash(); let peer = sample_peer(); swarms.handle_announcement(&info_hash, &peer, None).await.unwrap(); @@ -581,12 +581,12 @@ mod tests { use std::sync::Arc; - use crate::swarms::Swarms; + use crate::swarms::Registry; use crate::tests::{sample_info_hash, sample_peer}; #[tokio::test] async fn it_should_add_the_first_peer_to_the_torrent_peer_list() { - let swarms = Arc::new(Swarms::default()); + let swarms = Arc::new(Registry::default()); let info_hash = sample_info_hash(); @@ -597,7 +597,7 @@ mod tests { #[tokio::test] async fn it_should_allow_adding_the_same_peer_twice_to_the_torrent_peer_list() { - let swarms = Arc::new(Swarms::default()); + let swarms = Arc::new(Registry::default()); let info_hash = sample_info_hash(); @@ -618,12 +618,12 @@ mod tests { use torrust_tracker_primitives::DurationSinceUnixEpoch; use crate::swarms::tests::the_swarm_repository::numeric_peer_id; - use crate::swarms::Swarms; + use crate::swarms::Registry; use crate::tests::{sample_info_hash, sample_peer}; #[tokio::test] async fn it_should_return_the_peers_for_a_given_torrent() { - let swarms = Arc::new(Swarms::default()); + let swarms = Arc::new(Registry::default()); let info_hash = sample_info_hash(); let peer = sample_peer(); @@ -637,7 +637,7 @@ mod tests { #[tokio::test] async fn it_should_return_an_empty_list_or_peers_for_a_non_existing_torrent() { - let swarms = Arc::new(Swarms::default()); + let swarms = Arc::new(Registry::default()); let peers = swarms.get_swarm_peers(&sample_info_hash(), 74).await.unwrap(); @@ -646,7 +646,7 @@ mod tests { #[tokio::test] async fn it_should_return_74_peers_at_the_most_for_a_given_torrent() { - let swarms = Arc::new(Swarms::default()); + let swarms = Arc::new(Registry::default()); let info_hash = sample_info_hash(); @@ -680,12 +680,12 @@ mod tests { use torrust_tracker_primitives::DurationSinceUnixEpoch; use crate::swarms::tests::the_swarm_repository::numeric_peer_id; - use crate::swarms::Swarms; + use crate::swarms::Registry; use crate::tests::{sample_info_hash, sample_peer}; #[tokio::test] async fn it_should_return_an_empty_peer_list_for_a_non_existing_torrent() { - let swarms = Arc::new(Swarms::default()); + let swarms = Arc::new(Registry::default()); let peers = swarms .get_peers_peers_excluding(&sample_info_hash(), &sample_peer(), TORRENT_PEERS_LIMIT) @@ -697,7 +697,7 @@ mod tests { #[tokio::test] async fn it_should_return_the_peers_for_a_given_torrent_excluding_a_given_peer() { - let swarms = Arc::new(Swarms::default()); + let swarms = Arc::new(Registry::default()); let info_hash = sample_info_hash(); let peer = sample_peer(); @@ -714,7 +714,7 @@ mod tests { #[tokio::test] async fn it_should_return_74_peers_at_the_most_for_a_given_torrent_when_it_filters_out_a_given_peer() { - let swarms = Arc::new(Swarms::default()); + let swarms = Arc::new(Registry::default()); let info_hash = sample_info_hash(); @@ -757,12 +757,12 @@ mod tests { use torrust_tracker_configuration::TrackerPolicy; use torrust_tracker_primitives::DurationSinceUnixEpoch; - use crate::swarms::Swarms; + use crate::swarms::Registry; use crate::tests::{sample_info_hash, sample_peer}; #[tokio::test] async fn it_should_remove_a_torrent_entry() { - let swarms = Arc::new(Swarms::default()); + let swarms = Arc::new(Registry::default()); let info_hash = sample_info_hash(); swarms.handle_announcement(&info_hash, &sample_peer(), None).await.unwrap(); @@ -774,7 +774,7 @@ mod tests { #[tokio::test] async fn it_should_count_inactive_peers() { - let swarms = Arc::new(Swarms::default()); + let swarms = Arc::new(Registry::default()); let info_hash = sample_info_hash(); let mut peer = sample_peer(); @@ -790,7 +790,7 @@ mod tests { #[tokio::test] async fn it_should_remove_peers_that_have_not_been_updated_after_a_cutoff_time() { - let swarms = Arc::new(Swarms::default()); + let swarms = Arc::new(Registry::default()); let info_hash = sample_info_hash(); let mut peer = sample_peer(); @@ -811,8 +811,8 @@ mod tests { .contains(&Arc::new(peer))); } - async fn initialize_repository_with_one_torrent_without_peers(info_hash: &InfoHash) -> Arc { - let swarms = Arc::new(Swarms::default()); + async fn initialize_repository_with_one_torrent_without_peers(info_hash: &InfoHash) -> Arc { + let swarms = Arc::new(Registry::default()); // Insert a sample peer for the torrent to force adding the torrent entry let mut peer = sample_peer(); @@ -851,7 +851,7 @@ mod tests { use torrust_tracker_primitives::peer::Peer; use torrust_tracker_primitives::swarm_metadata::SwarmMetadata; - use crate::swarms::Swarms; + use crate::swarms::Registry; use crate::tests::{sample_info_hash, sample_peer}; use crate::{Coordinator, SwarmHandle}; @@ -884,7 +884,7 @@ mod tests { #[tokio::test] async fn it_should_return_one_torrent_entry_by_infohash() { - let swarms = Arc::new(Swarms::default()); + let swarms = Arc::new(Registry::default()); let info_hash = sample_info_hash(); let peer = sample_peer(); @@ -915,12 +915,12 @@ mod tests { use crate::swarms::tests::the_swarm_repository::returning_torrent_entries::{ torrent_entry_info, TorrentEntryInfo, }; - use crate::swarms::Swarms; + use crate::swarms::Registry; use crate::tests::{sample_info_hash, sample_peer}; #[tokio::test] async fn without_pagination() { - let swarms = Arc::new(Swarms::default()); + let swarms = Arc::new(Registry::default()); let info_hash = sample_info_hash(); let peer = sample_peer(); @@ -955,7 +955,7 @@ mod tests { use crate::swarms::tests::the_swarm_repository::returning_torrent_entries::{ torrent_entry_info, TorrentEntryInfo, }; - use crate::swarms::Swarms; + use crate::swarms::Registry; use crate::tests::{ sample_info_hash_alphabetically_ordered_after_sample_info_hash_one, sample_info_hash_one, sample_peer_one, sample_peer_two, @@ -963,7 +963,7 @@ mod tests { #[tokio::test] async fn it_should_return_the_first_page() { - let swarms = Arc::new(Swarms::default()); + let swarms = Arc::new(Registry::default()); // Insert one torrent entry let info_hash_one = sample_info_hash_one(); @@ -998,7 +998,7 @@ mod tests { #[tokio::test] async fn it_should_return_the_second_page() { - let swarms = Arc::new(Swarms::default()); + let swarms = Arc::new(Registry::default()); // Insert one torrent entry let info_hash_one = sample_info_hash_one(); @@ -1033,7 +1033,7 @@ mod tests { #[tokio::test] async fn it_should_allow_changing_the_page_size() { - let swarms = Arc::new(Swarms::default()); + let swarms = Arc::new(Registry::default()); // Insert one torrent entry let info_hash_one = sample_info_hash_one(); @@ -1061,14 +1061,14 @@ mod tests { use bittorrent_primitives::info_hash::fixture::gen_seeded_infohash; use torrust_tracker_primitives::swarm_metadata::AggregateActiveSwarmMetadata; - use crate::swarms::Swarms; + use crate::swarms::Registry; use crate::tests::{complete_peer, leecher, sample_info_hash, seeder}; // todo: refactor to use test parametrization #[tokio::test] async fn it_should_get_empty_aggregate_swarm_metadata_when_there_are_no_torrents() { - let swarms = Arc::new(Swarms::default()); + let swarms = Arc::new(Registry::default()); let aggregate_swarm_metadata = swarms.get_aggregate_swarm_metadata().await.unwrap(); @@ -1085,7 +1085,7 @@ mod tests { #[tokio::test] async fn it_should_return_the_aggregate_swarm_metadata_when_there_is_a_leecher() { - let swarms = Arc::new(Swarms::default()); + let swarms = Arc::new(Registry::default()); swarms .handle_announcement(&sample_info_hash(), &leecher(), None) @@ -1107,7 +1107,7 @@ mod tests { #[tokio::test] async fn it_should_return_the_aggregate_swarm_metadata_when_there_is_a_seeder() { - let swarms = Arc::new(Swarms::default()); + let swarms = Arc::new(Registry::default()); swarms .handle_announcement(&sample_info_hash(), &seeder(), None) @@ -1129,7 +1129,7 @@ mod tests { #[tokio::test] async fn it_should_return_the_aggregate_swarm_metadata_when_there_is_a_completed_peer() { - let swarms = Arc::new(Swarms::default()); + let swarms = Arc::new(Registry::default()); swarms .handle_announcement(&sample_info_hash(), &complete_peer(), None) @@ -1151,7 +1151,7 @@ mod tests { #[tokio::test] async fn it_should_return_the_aggregate_swarm_metadata_when_there_are_multiple_torrents() { - let swarms = Arc::new(Swarms::default()); + let swarms = Arc::new(Registry::default()); let start_time = std::time::Instant::now(); for i in 0..1_000_000 { @@ -1183,12 +1183,12 @@ mod tests { use torrust_tracker_primitives::DurationSinceUnixEpoch; - use crate::swarms::Swarms; + use crate::swarms::Registry; use crate::tests::{sample_info_hash, sample_peer}; #[tokio::test] async fn no_peerless_torrents() { - let swarms = Arc::new(Swarms::default()); + let swarms = Arc::new(Registry::default()); assert_eq!(swarms.count_peerless_torrents().await.unwrap(), 0); } @@ -1197,7 +1197,7 @@ mod tests { let info_hash = sample_info_hash(); let peer = sample_peer(); - let swarms = Arc::new(Swarms::default()); + let swarms = Arc::new(Registry::default()); swarms.handle_announcement(&info_hash, &peer, None).await.unwrap(); let current_cutoff = peer.updated + DurationSinceUnixEpoch::from_secs(1); @@ -1210,12 +1210,12 @@ mod tests { mod it_should_count_peers { use std::sync::Arc; - use crate::swarms::Swarms; + use crate::swarms::Registry; use crate::tests::{sample_info_hash, sample_peer}; #[tokio::test] async fn no_peers() { - let swarms = Arc::new(Swarms::default()); + let swarms = Arc::new(Registry::default()); assert_eq!(swarms.count_peers().await.unwrap(), 0); } @@ -1224,7 +1224,7 @@ mod tests { let info_hash = sample_info_hash(); let peer = sample_peer(); - let swarms = Arc::new(Swarms::default()); + let swarms = Arc::new(Registry::default()); swarms.handle_announcement(&info_hash, &peer, None).await.unwrap(); assert_eq!(swarms.count_peers().await.unwrap(), 1); @@ -1238,12 +1238,12 @@ mod tests { use torrust_tracker_primitives::swarm_metadata::SwarmMetadata; - use crate::swarms::Swarms; + use crate::swarms::Registry; use crate::tests::{leecher, sample_info_hash}; #[tokio::test] async fn it_should_get_swarm_metadata_for_an_existing_torrent() { - let swarms = Arc::new(Swarms::default()); + let swarms = Arc::new(Registry::default()); let infohash = sample_info_hash(); @@ -1263,7 +1263,7 @@ mod tests { #[tokio::test] async fn it_should_return_zeroed_swarm_metadata_for_a_non_existing_torrent() { - let swarms = Arc::new(Swarms::default()); + let swarms = Arc::new(Registry::default()); let swarm_metadata = swarms.get_swarm_metadata_or_default(&sample_info_hash()).await.unwrap(); @@ -1277,12 +1277,12 @@ mod tests { use torrust_tracker_primitives::NumberOfDownloadsBTreeMap; - use crate::swarms::Swarms; + use crate::swarms::Registry; use crate::tests::{leecher, sample_info_hash}; #[tokio::test] async fn it_should_allow_importing_persisted_torrent_entries() { - let swarms = Arc::new(Swarms::default()); + let swarms = Arc::new(Registry::default()); let infohash = sample_info_hash(); @@ -1302,7 +1302,7 @@ mod tests { async fn it_should_allow_overwriting_a_previously_imported_persisted_torrent() { // code-review: do we want to allow this? - let swarms = Arc::new(Swarms::default()); + let swarms = Arc::new(Registry::default()); let infohash = sample_info_hash(); @@ -1321,7 +1321,7 @@ mod tests { #[tokio::test] async fn it_should_now_allow_importing_a_persisted_torrent_if_it_already_exists() { - let swarms = Arc::new(Swarms::default()); + let swarms = Arc::new(Registry::default()); let infohash = sample_info_hash(); @@ -1353,7 +1353,7 @@ mod tests { use crate::event::sender::tests::{expect_event_sequence, MockEventSender}; use crate::event::Event; - use crate::swarms::Swarms; + use crate::swarms::Registry; use crate::tests::sample_info_hash; #[tokio::test] @@ -1374,7 +1374,7 @@ mod tests { ], ); - let swarms = Swarms::new(Some(Arc::new(event_sender_mock))); + let swarms = Registry::new(Some(Arc::new(event_sender_mock))); swarms.handle_announcement(&info_hash, &peer, None).await.unwrap(); } @@ -1398,7 +1398,7 @@ mod tests { ], ); - let swarms = Swarms::new(Some(Arc::new(event_sender_mock))); + let swarms = Registry::new(Some(Arc::new(event_sender_mock))); swarms.handle_announcement(&info_hash, &peer, None).await.unwrap(); @@ -1425,7 +1425,7 @@ mod tests { ], ); - let swarms = Swarms::new(Some(Arc::new(event_sender_mock))); + let swarms = Registry::new(Some(Arc::new(event_sender_mock))); // Add the new torrent swarms.handle_announcement(&info_hash, &peer, None).await.unwrap(); From 63f04e57ffbf27644692fd0fb4b7527415188f4c Mon Sep 17 00:00:00 2001 From: Jose Celano Date: Thu, 29 May 2025 11:04:27 +0100 Subject: [PATCH 663/802] refactor: [#1519] extract mod coordinator --- packages/swarm-coordination-registry/src/lib.rs | 2 +- .../src/{swarm.rs => swarm/coordinator.rs} | 16 ++++++++-------- .../swarm-coordination-registry/src/swarm/mod.rs | 1 + .../swarm-coordination-registry/src/swarms.rs | 2 +- 4 files changed, 11 insertions(+), 10 deletions(-) rename packages/swarm-coordination-registry/src/{swarm.rs => swarm/coordinator.rs} (98%) create mode 100644 packages/swarm-coordination-registry/src/swarm/mod.rs diff --git a/packages/swarm-coordination-registry/src/lib.rs b/packages/swarm-coordination-registry/src/lib.rs index f3926331a..2e591f41c 100644 --- a/packages/swarm-coordination-registry/src/lib.rs +++ b/packages/swarm-coordination-registry/src/lib.rs @@ -11,7 +11,7 @@ use torrust_tracker_clock::clock; pub type Swarms = swarms::Registry; pub type SwarmHandle = Arc>; -pub type Coordinator = swarm::Coordinator; +pub type Coordinator = swarm::coordinator::Coordinator; /// Working version, for production. #[cfg(not(test))] diff --git a/packages/swarm-coordination-registry/src/swarm.rs b/packages/swarm-coordination-registry/src/swarm/coordinator.rs similarity index 98% rename from packages/swarm-coordination-registry/src/swarm.rs rename to packages/swarm-coordination-registry/src/swarm/coordinator.rs index 81e454d8b..1ddf3e60b 100644 --- a/packages/swarm-coordination-registry/src/swarm.rs +++ b/packages/swarm-coordination-registry/src/swarm/coordinator.rs @@ -326,7 +326,7 @@ mod tests { use torrust_tracker_primitives::swarm_metadata::SwarmMetadata; use torrust_tracker_primitives::DurationSinceUnixEpoch; - use crate::swarm::Coordinator; + use crate::swarm::coordinator::Coordinator; use crate::tests::sample_info_hash; #[test] @@ -553,7 +553,7 @@ mod tests { use torrust_tracker_configuration::TrackerPolicy; - use crate::swarm::tests::for_retaining_policy::{ + use crate::swarm::coordinator::tests::for_retaining_policy::{ empty_swarm, not_empty_swarm, not_empty_swarm_with_downloads, remove_peerless_torrents_policy, }; @@ -582,7 +582,7 @@ mod tests { mod when_removing_peerless_torrents_is_disabled { - use crate::swarm::tests::for_retaining_policy::{ + use crate::swarm::coordinator::tests::for_retaining_policy::{ don_not_remove_peerless_torrents_policy, empty_swarm, not_empty_swarm, }; @@ -700,7 +700,7 @@ mod tests { mod when_a_new_peer_is_added { use torrust_tracker_primitives::peer::fixture::PeerBuilder; - use crate::swarm::Coordinator; + use crate::swarm::coordinator::Coordinator; use crate::tests::sample_info_hash; #[tokio::test] @@ -747,7 +747,7 @@ mod tests { mod when_a_peer_is_removed { use torrust_tracker_primitives::peer::fixture::PeerBuilder; - use crate::swarm::Coordinator; + use crate::swarm::coordinator::Coordinator; use crate::tests::sample_info_hash; #[tokio::test] @@ -786,7 +786,7 @@ mod tests { use torrust_tracker_primitives::peer::fixture::PeerBuilder; - use crate::swarm::Coordinator; + use crate::swarm::coordinator::Coordinator; use crate::tests::sample_info_hash; #[tokio::test] @@ -824,7 +824,7 @@ mod tests { use aquatic_udp_protocol::NumberOfBytes; use torrust_tracker_primitives::peer::fixture::PeerBuilder; - use crate::swarm::Coordinator; + use crate::swarm::coordinator::Coordinator; use crate::tests::sample_info_hash; #[tokio::test] @@ -913,7 +913,7 @@ mod tests { use crate::event::sender::tests::{expect_event_sequence, MockEventSender}; use crate::event::Event; - use crate::swarm::Coordinator; + use crate::swarm::coordinator::Coordinator; use crate::tests::sample_info_hash; #[tokio::test] diff --git a/packages/swarm-coordination-registry/src/swarm/mod.rs b/packages/swarm-coordination-registry/src/swarm/mod.rs new file mode 100644 index 000000000..115b2c7c9 --- /dev/null +++ b/packages/swarm-coordination-registry/src/swarm/mod.rs @@ -0,0 +1 @@ +pub mod coordinator; diff --git a/packages/swarm-coordination-registry/src/swarms.rs b/packages/swarm-coordination-registry/src/swarms.rs index c14cb66b7..158cc88c7 100644 --- a/packages/swarm-coordination-registry/src/swarms.rs +++ b/packages/swarm-coordination-registry/src/swarms.rs @@ -11,7 +11,7 @@ use torrust_tracker_primitives::{peer, DurationSinceUnixEpoch, NumberOfDownloads use crate::event::sender::Sender; use crate::event::Event; -use crate::swarm::Coordinator; +use crate::swarm::coordinator::Coordinator; use crate::SwarmHandle; #[derive(Default)] From cfc5b342180ccfa5e2388403ede3d7a33ac35af3 Mon Sep 17 00:00:00 2001 From: Jose Celano Date: Thu, 29 May 2025 11:05:25 +0100 Subject: [PATCH 664/802] refactor: [#1519] rename mod swarms to resgistry --- .../swarm-coordination-registry/src/lib.rs | 4 +-- .../src/{swarms.rs => registry.rs} | 36 +++++++++---------- 2 files changed, 20 insertions(+), 20 deletions(-) rename packages/swarm-coordination-registry/src/{swarms.rs => registry.rs} (98%) diff --git a/packages/swarm-coordination-registry/src/lib.rs b/packages/swarm-coordination-registry/src/lib.rs index 2e591f41c..82a29b867 100644 --- a/packages/swarm-coordination-registry/src/lib.rs +++ b/packages/swarm-coordination-registry/src/lib.rs @@ -2,14 +2,14 @@ pub mod container; pub mod event; pub mod statistics; pub mod swarm; -pub mod swarms; +pub mod registry; use std::sync::Arc; use tokio::sync::Mutex; use torrust_tracker_clock::clock; -pub type Swarms = swarms::Registry; +pub type Swarms = registry::Registry; pub type SwarmHandle = Arc>; pub type Coordinator = swarm::coordinator::Coordinator; diff --git a/packages/swarm-coordination-registry/src/swarms.rs b/packages/swarm-coordination-registry/src/registry.rs similarity index 98% rename from packages/swarm-coordination-registry/src/swarms.rs rename to packages/swarm-coordination-registry/src/registry.rs index 158cc88c7..970b664ec 100644 --- a/packages/swarm-coordination-registry/src/swarms.rs +++ b/packages/swarm-coordination-registry/src/registry.rs @@ -510,7 +510,7 @@ mod tests { use aquatic_udp_protocol::PeerId; - use crate::swarms::Registry; + use crate::registry::Registry; use crate::tests::{sample_info_hash, sample_peer}; /// It generates a peer id from a number where the number is the last @@ -581,7 +581,7 @@ mod tests { use std::sync::Arc; - use crate::swarms::Registry; + use crate::registry::Registry; use crate::tests::{sample_info_hash, sample_peer}; #[tokio::test] @@ -617,8 +617,8 @@ mod tests { use torrust_tracker_primitives::peer::Peer; use torrust_tracker_primitives::DurationSinceUnixEpoch; - use crate::swarms::tests::the_swarm_repository::numeric_peer_id; - use crate::swarms::Registry; + use crate::registry::tests::the_swarm_repository::numeric_peer_id; + use crate::registry::Registry; use crate::tests::{sample_info_hash, sample_peer}; #[tokio::test] @@ -679,8 +679,8 @@ mod tests { use torrust_tracker_primitives::peer::Peer; use torrust_tracker_primitives::DurationSinceUnixEpoch; - use crate::swarms::tests::the_swarm_repository::numeric_peer_id; - use crate::swarms::Registry; + use crate::registry::tests::the_swarm_repository::numeric_peer_id; + use crate::registry::Registry; use crate::tests::{sample_info_hash, sample_peer}; #[tokio::test] @@ -757,7 +757,7 @@ mod tests { use torrust_tracker_configuration::TrackerPolicy; use torrust_tracker_primitives::DurationSinceUnixEpoch; - use crate::swarms::Registry; + use crate::registry::Registry; use crate::tests::{sample_info_hash, sample_peer}; #[tokio::test] @@ -851,7 +851,7 @@ mod tests { use torrust_tracker_primitives::peer::Peer; use torrust_tracker_primitives::swarm_metadata::SwarmMetadata; - use crate::swarms::Registry; + use crate::registry::Registry; use crate::tests::{sample_info_hash, sample_peer}; use crate::{Coordinator, SwarmHandle}; @@ -912,10 +912,10 @@ mod tests { use torrust_tracker_primitives::swarm_metadata::SwarmMetadata; - use crate::swarms::tests::the_swarm_repository::returning_torrent_entries::{ + use crate::registry::tests::the_swarm_repository::returning_torrent_entries::{ torrent_entry_info, TorrentEntryInfo, }; - use crate::swarms::Registry; + use crate::registry::Registry; use crate::tests::{sample_info_hash, sample_peer}; #[tokio::test] @@ -952,10 +952,10 @@ mod tests { use torrust_tracker_primitives::pagination::Pagination; use torrust_tracker_primitives::swarm_metadata::SwarmMetadata; - use crate::swarms::tests::the_swarm_repository::returning_torrent_entries::{ + use crate::registry::tests::the_swarm_repository::returning_torrent_entries::{ torrent_entry_info, TorrentEntryInfo, }; - use crate::swarms::Registry; + use crate::registry::Registry; use crate::tests::{ sample_info_hash_alphabetically_ordered_after_sample_info_hash_one, sample_info_hash_one, sample_peer_one, sample_peer_two, @@ -1061,7 +1061,7 @@ mod tests { use bittorrent_primitives::info_hash::fixture::gen_seeded_infohash; use torrust_tracker_primitives::swarm_metadata::AggregateActiveSwarmMetadata; - use crate::swarms::Registry; + use crate::registry::Registry; use crate::tests::{complete_peer, leecher, sample_info_hash, seeder}; // todo: refactor to use test parametrization @@ -1183,7 +1183,7 @@ mod tests { use torrust_tracker_primitives::DurationSinceUnixEpoch; - use crate::swarms::Registry; + use crate::registry::Registry; use crate::tests::{sample_info_hash, sample_peer}; #[tokio::test] @@ -1210,7 +1210,7 @@ mod tests { mod it_should_count_peers { use std::sync::Arc; - use crate::swarms::Registry; + use crate::registry::Registry; use crate::tests::{sample_info_hash, sample_peer}; #[tokio::test] @@ -1238,7 +1238,7 @@ mod tests { use torrust_tracker_primitives::swarm_metadata::SwarmMetadata; - use crate::swarms::Registry; + use crate::registry::Registry; use crate::tests::{leecher, sample_info_hash}; #[tokio::test] @@ -1277,7 +1277,7 @@ mod tests { use torrust_tracker_primitives::NumberOfDownloadsBTreeMap; - use crate::swarms::Registry; + use crate::registry::Registry; use crate::tests::{leecher, sample_info_hash}; #[tokio::test] @@ -1353,7 +1353,7 @@ mod tests { use crate::event::sender::tests::{expect_event_sequence, MockEventSender}; use crate::event::Event; - use crate::swarms::Registry; + use crate::registry::Registry; use crate::tests::sample_info_hash; #[tokio::test] From 9146681a798ce22df46e069e4ea357e6c18ce8b7 Mon Sep 17 00:00:00 2001 From: Jose Celano Date: Thu, 29 May 2025 11:08:21 +0100 Subject: [PATCH 665/802] refactor: [#1519] move mod registry --- .../swarm-coordination-registry/src/lib.rs | 3 +- .../src/swarm/mod.rs | 1 + .../src/{ => swarm}/registry.rs | 36 +++++++++---------- 3 files changed, 20 insertions(+), 20 deletions(-) rename packages/swarm-coordination-registry/src/{ => swarm}/registry.rs (97%) diff --git a/packages/swarm-coordination-registry/src/lib.rs b/packages/swarm-coordination-registry/src/lib.rs index 82a29b867..bbeb5e924 100644 --- a/packages/swarm-coordination-registry/src/lib.rs +++ b/packages/swarm-coordination-registry/src/lib.rs @@ -2,14 +2,13 @@ pub mod container; pub mod event; pub mod statistics; pub mod swarm; -pub mod registry; use std::sync::Arc; use tokio::sync::Mutex; use torrust_tracker_clock::clock; -pub type Swarms = registry::Registry; +pub type Swarms = swarm::registry::Registry; pub type SwarmHandle = Arc>; pub type Coordinator = swarm::coordinator::Coordinator; diff --git a/packages/swarm-coordination-registry/src/swarm/mod.rs b/packages/swarm-coordination-registry/src/swarm/mod.rs index 115b2c7c9..925ae4948 100644 --- a/packages/swarm-coordination-registry/src/swarm/mod.rs +++ b/packages/swarm-coordination-registry/src/swarm/mod.rs @@ -1 +1,2 @@ pub mod coordinator; +pub mod registry; diff --git a/packages/swarm-coordination-registry/src/registry.rs b/packages/swarm-coordination-registry/src/swarm/registry.rs similarity index 97% rename from packages/swarm-coordination-registry/src/registry.rs rename to packages/swarm-coordination-registry/src/swarm/registry.rs index 970b664ec..30652537b 100644 --- a/packages/swarm-coordination-registry/src/registry.rs +++ b/packages/swarm-coordination-registry/src/swarm/registry.rs @@ -510,7 +510,7 @@ mod tests { use aquatic_udp_protocol::PeerId; - use crate::registry::Registry; + use crate::swarm::registry::Registry; use crate::tests::{sample_info_hash, sample_peer}; /// It generates a peer id from a number where the number is the last @@ -581,7 +581,7 @@ mod tests { use std::sync::Arc; - use crate::registry::Registry; + use crate::swarm::registry::Registry; use crate::tests::{sample_info_hash, sample_peer}; #[tokio::test] @@ -617,8 +617,8 @@ mod tests { use torrust_tracker_primitives::peer::Peer; use torrust_tracker_primitives::DurationSinceUnixEpoch; - use crate::registry::tests::the_swarm_repository::numeric_peer_id; - use crate::registry::Registry; + use crate::swarm::registry::tests::the_swarm_repository::numeric_peer_id; + use crate::swarm::registry::Registry; use crate::tests::{sample_info_hash, sample_peer}; #[tokio::test] @@ -679,8 +679,8 @@ mod tests { use torrust_tracker_primitives::peer::Peer; use torrust_tracker_primitives::DurationSinceUnixEpoch; - use crate::registry::tests::the_swarm_repository::numeric_peer_id; - use crate::registry::Registry; + use crate::swarm::registry::tests::the_swarm_repository::numeric_peer_id; + use crate::swarm::registry::Registry; use crate::tests::{sample_info_hash, sample_peer}; #[tokio::test] @@ -757,7 +757,7 @@ mod tests { use torrust_tracker_configuration::TrackerPolicy; use torrust_tracker_primitives::DurationSinceUnixEpoch; - use crate::registry::Registry; + use crate::swarm::registry::Registry; use crate::tests::{sample_info_hash, sample_peer}; #[tokio::test] @@ -851,7 +851,7 @@ mod tests { use torrust_tracker_primitives::peer::Peer; use torrust_tracker_primitives::swarm_metadata::SwarmMetadata; - use crate::registry::Registry; + use crate::swarm::registry::Registry; use crate::tests::{sample_info_hash, sample_peer}; use crate::{Coordinator, SwarmHandle}; @@ -912,10 +912,10 @@ mod tests { use torrust_tracker_primitives::swarm_metadata::SwarmMetadata; - use crate::registry::tests::the_swarm_repository::returning_torrent_entries::{ + use crate::swarm::registry::tests::the_swarm_repository::returning_torrent_entries::{ torrent_entry_info, TorrentEntryInfo, }; - use crate::registry::Registry; + use crate::swarm::registry::Registry; use crate::tests::{sample_info_hash, sample_peer}; #[tokio::test] @@ -952,10 +952,10 @@ mod tests { use torrust_tracker_primitives::pagination::Pagination; use torrust_tracker_primitives::swarm_metadata::SwarmMetadata; - use crate::registry::tests::the_swarm_repository::returning_torrent_entries::{ + use crate::swarm::registry::tests::the_swarm_repository::returning_torrent_entries::{ torrent_entry_info, TorrentEntryInfo, }; - use crate::registry::Registry; + use crate::swarm::registry::Registry; use crate::tests::{ sample_info_hash_alphabetically_ordered_after_sample_info_hash_one, sample_info_hash_one, sample_peer_one, sample_peer_two, @@ -1061,7 +1061,7 @@ mod tests { use bittorrent_primitives::info_hash::fixture::gen_seeded_infohash; use torrust_tracker_primitives::swarm_metadata::AggregateActiveSwarmMetadata; - use crate::registry::Registry; + use crate::swarm::registry::Registry; use crate::tests::{complete_peer, leecher, sample_info_hash, seeder}; // todo: refactor to use test parametrization @@ -1183,7 +1183,7 @@ mod tests { use torrust_tracker_primitives::DurationSinceUnixEpoch; - use crate::registry::Registry; + use crate::swarm::registry::Registry; use crate::tests::{sample_info_hash, sample_peer}; #[tokio::test] @@ -1210,7 +1210,7 @@ mod tests { mod it_should_count_peers { use std::sync::Arc; - use crate::registry::Registry; + use crate::swarm::registry::Registry; use crate::tests::{sample_info_hash, sample_peer}; #[tokio::test] @@ -1238,7 +1238,7 @@ mod tests { use torrust_tracker_primitives::swarm_metadata::SwarmMetadata; - use crate::registry::Registry; + use crate::swarm::registry::Registry; use crate::tests::{leecher, sample_info_hash}; #[tokio::test] @@ -1277,7 +1277,7 @@ mod tests { use torrust_tracker_primitives::NumberOfDownloadsBTreeMap; - use crate::registry::Registry; + use crate::swarm::registry::Registry; use crate::tests::{leecher, sample_info_hash}; #[tokio::test] @@ -1353,7 +1353,7 @@ mod tests { use crate::event::sender::tests::{expect_event_sequence, MockEventSender}; use crate::event::Event; - use crate::registry::Registry; + use crate::swarm::registry::Registry; use crate::tests::sample_info_hash; #[tokio::test] From 290c9eb491373ada84e9b3b2baa9bb596cbaffcc Mon Sep 17 00:00:00 2001 From: Jose Celano Date: Thu, 29 May 2025 11:09:02 +0100 Subject: [PATCH 666/802] refactor: [#1519] rename Swarms to Registry --- packages/swarm-coordination-registry/src/container.rs | 6 +++--- packages/swarm-coordination-registry/src/lib.rs | 2 +- .../src/statistics/activity_metrics_updater.rs | 6 +++--- packages/tracker-core/src/torrent/manager.rs | 4 ++-- packages/tracker-core/src/torrent/repository/in_memory.rs | 6 +++--- 5 files changed, 12 insertions(+), 12 deletions(-) diff --git a/packages/swarm-coordination-registry/src/container.rs b/packages/swarm-coordination-registry/src/container.rs index d185180b1..1b56b3d4b 100644 --- a/packages/swarm-coordination-registry/src/container.rs +++ b/packages/swarm-coordination-registry/src/container.rs @@ -6,10 +6,10 @@ use crate::event::bus::EventBus; use crate::event::sender::Broadcaster; use crate::event::{self}; use crate::statistics::repository::Repository; -use crate::{statistics, Swarms}; +use crate::{statistics, Registry}; pub struct TorrentRepositoryContainer { - pub swarms: Arc, + pub swarms: Arc, pub event_bus: Arc, pub stats_event_sender: event::sender::Sender, pub stats_repository: Arc, @@ -26,7 +26,7 @@ impl TorrentRepositoryContainer { let stats_event_sender = event_bus.sender(); - let swarms = Arc::new(Swarms::new(stats_event_sender.clone())); + let swarms = Arc::new(Registry::new(stats_event_sender.clone())); Self { swarms, diff --git a/packages/swarm-coordination-registry/src/lib.rs b/packages/swarm-coordination-registry/src/lib.rs index bbeb5e924..0382c14fa 100644 --- a/packages/swarm-coordination-registry/src/lib.rs +++ b/packages/swarm-coordination-registry/src/lib.rs @@ -8,7 +8,7 @@ use std::sync::Arc; use tokio::sync::Mutex; use torrust_tracker_clock::clock; -pub type Swarms = swarm::registry::Registry; +pub type Registry = swarm::registry::Registry; pub type SwarmHandle = Arc>; pub type Coordinator = swarm::coordinator::Coordinator; diff --git a/packages/swarm-coordination-registry/src/statistics/activity_metrics_updater.rs b/packages/swarm-coordination-registry/src/statistics/activity_metrics_updater.rs index 2dfa5fb4e..016e230ec 100644 --- a/packages/swarm-coordination-registry/src/statistics/activity_metrics_updater.rs +++ b/packages/swarm-coordination-registry/src/statistics/activity_metrics_updater.rs @@ -11,12 +11,12 @@ use tracing::instrument; use super::repository::Repository; use crate::statistics::{TORRENT_REPOSITORY_PEERS_INACTIVE_TOTAL, TORRENT_REPOSITORY_TORRENTS_INACTIVE_TOTAL}; -use crate::{CurrentClock, Swarms}; +use crate::{CurrentClock, Registry}; #[must_use] #[instrument(skip(swarms, stats_repository))] pub fn start_job( - swarms: &Arc, + swarms: &Arc, stats_repository: &Arc, inactivity_cutoff: DurationSinceUnixEpoch, ) -> JoinHandle<()> { @@ -51,7 +51,7 @@ pub fn start_job( async fn update_activity_metrics( interval_in_secs: u64, - swarms: &Arc, + swarms: &Arc, stats_repository: &Arc, inactivity_cutoff: DurationSinceUnixEpoch, ) { diff --git a/packages/tracker-core/src/torrent/manager.rs b/packages/tracker-core/src/torrent/manager.rs index cbdf01193..5acc27980 100644 --- a/packages/tracker-core/src/torrent/manager.rs +++ b/packages/tracker-core/src/torrent/manager.rs @@ -148,7 +148,7 @@ mod tests { use std::sync::Arc; use torrust_tracker_configuration::Core; - use torrust_tracker_swarm_coordination_registry::Swarms; + use torrust_tracker_swarm_coordination_registry::Registry; use super::{DatabaseDownloadsMetricRepository, TorrentsManager}; use crate::databases::setup::initialize_database; @@ -167,7 +167,7 @@ mod tests { } fn initialize_torrents_manager_with(config: Core) -> (Arc, Arc) { - let swarms = Arc::new(Swarms::default()); + let swarms = Arc::new(Registry::default()); let in_memory_torrent_repository = Arc::new(InMemoryTorrentRepository::new(swarms)); let database = initialize_database(&config); let database_persistent_torrent_repository = Arc::new(DatabaseDownloadsMetricRepository::new(&database)); diff --git a/packages/tracker-core/src/torrent/repository/in_memory.rs b/packages/tracker-core/src/torrent/repository/in_memory.rs index 47b34ad26..ead05a32d 100644 --- a/packages/tracker-core/src/torrent/repository/in_memory.rs +++ b/packages/tracker-core/src/torrent/repository/in_memory.rs @@ -7,7 +7,7 @@ use torrust_tracker_configuration::{TrackerPolicy, TORRENT_PEERS_LIMIT}; use torrust_tracker_primitives::pagination::Pagination; use torrust_tracker_primitives::swarm_metadata::{AggregateActiveSwarmMetadata, SwarmMetadata}; use torrust_tracker_primitives::{peer, DurationSinceUnixEpoch, NumberOfDownloads, NumberOfDownloadsBTreeMap}; -use torrust_tracker_swarm_coordination_registry::{SwarmHandle, Swarms}; +use torrust_tracker_swarm_coordination_registry::{Registry, SwarmHandle}; /// In-memory repository for torrent entries. /// @@ -21,12 +21,12 @@ use torrust_tracker_swarm_coordination_registry::{SwarmHandle, Swarms}; #[derive(Default)] pub struct InMemoryTorrentRepository { /// The underlying in-memory data structure that stores swarms data. - swarms: Arc, + swarms: Arc, } impl InMemoryTorrentRepository { #[must_use] - pub fn new(swarms: Arc) -> Self { + pub fn new(swarms: Arc) -> Self { Self { swarms } } From bbe974de3537246dad431826fd3dc8764dd44375 Mon Sep 17 00:00:00 2001 From: Jose Celano Date: Thu, 29 May 2025 11:11:23 +0100 Subject: [PATCH 667/802] refactor: [#1519] rename SwarmHandle to CoordinatorHandle --- .../swarm-coordination-registry/src/lib.rs | 2 +- .../src/swarm/registry.rs | 18 +++++++++--------- .../src/torrent/repository/in_memory.rs | 6 +++--- 3 files changed, 13 insertions(+), 13 deletions(-) diff --git a/packages/swarm-coordination-registry/src/lib.rs b/packages/swarm-coordination-registry/src/lib.rs index 0382c14fa..fc7996817 100644 --- a/packages/swarm-coordination-registry/src/lib.rs +++ b/packages/swarm-coordination-registry/src/lib.rs @@ -9,7 +9,7 @@ use tokio::sync::Mutex; use torrust_tracker_clock::clock; pub type Registry = swarm::registry::Registry; -pub type SwarmHandle = Arc>; +pub type CoordinatorHandle = Arc>; pub type Coordinator = swarm::coordinator::Coordinator; /// Working version, for production. diff --git a/packages/swarm-coordination-registry/src/swarm/registry.rs b/packages/swarm-coordination-registry/src/swarm/registry.rs index 30652537b..c8e98f307 100644 --- a/packages/swarm-coordination-registry/src/swarm/registry.rs +++ b/packages/swarm-coordination-registry/src/swarm/registry.rs @@ -12,11 +12,11 @@ use torrust_tracker_primitives::{peer, DurationSinceUnixEpoch, NumberOfDownloads use crate::event::sender::Sender; use crate::event::Event; use crate::swarm::coordinator::Coordinator; -use crate::SwarmHandle; +use crate::CoordinatorHandle; #[derive(Default)] pub struct Registry { - swarms: SkipMap, + swarms: SkipMap, event_sender: Sender, } @@ -60,7 +60,7 @@ impl Registry { let number_of_downloads = opt_persistent_torrent.unwrap_or_default(); let new_swarm_handle = - SwarmHandle::new(Coordinator::new(info_hash, number_of_downloads, self.event_sender.clone()).into()); + CoordinatorHandle::new(Coordinator::new(info_hash, number_of_downloads, self.event_sender.clone()).into()); let new_swarm_handle = self.swarms.get_or_insert(*info_hash, new_swarm_handle); @@ -107,7 +107,7 @@ impl Registry { /// /// An `Option` containing the removed torrent entry if it existed. #[must_use] - pub async fn remove(&self, key: &InfoHash) -> Option { + pub async fn remove(&self, key: &InfoHash) -> Option { let swarm_handle = self.swarms.remove(key).map(|entry| entry.value().clone()); if let Some(event_sender) = self.event_sender.as_deref() { @@ -123,7 +123,7 @@ impl Registry { /// /// An `Option` containing the tracked torrent handle if found. #[must_use] - pub fn get(&self, key: &InfoHash) -> Option { + pub fn get(&self, key: &InfoHash) -> Option { let maybe_entry = self.swarms.get(key); maybe_entry.map(|entry| entry.value().clone()) } @@ -138,7 +138,7 @@ impl Registry { /// /// A vector of `(InfoHash, TorrentEntry)` tuples. #[must_use] - pub fn get_paginated(&self, pagination: Option<&Pagination>) -> Vec<(InfoHash, SwarmHandle)> { + pub fn get_paginated(&self, pagination: Option<&Pagination>) -> Vec<(InfoHash, CoordinatorHandle)> { match pagination { Some(pagination) => self .swarms @@ -366,7 +366,7 @@ impl Registry { continue; } - let entry = SwarmHandle::new(Coordinator::new(info_hash, *completed, self.event_sender.clone()).into()); + let entry = CoordinatorHandle::new(Coordinator::new(info_hash, *completed, self.event_sender.clone()).into()); // Since SkipMap is lock-free the torrent could have been inserted // after checking if it exists. @@ -853,7 +853,7 @@ mod tests { use crate::swarm::registry::Registry; use crate::tests::{sample_info_hash, sample_peer}; - use crate::{Coordinator, SwarmHandle}; + use crate::{Coordinator, CoordinatorHandle}; /// `TorrentEntry` data is not directly accessible. It's only /// accessible through the trait methods. We need this temporary @@ -865,7 +865,7 @@ mod tests { number_of_peers: usize, } - async fn torrent_entry_info(swarm_handle: SwarmHandle) -> TorrentEntryInfo { + async fn torrent_entry_info(swarm_handle: CoordinatorHandle) -> TorrentEntryInfo { let torrent_guard = swarm_handle.lock().await; torrent_guard.clone().into() } diff --git a/packages/tracker-core/src/torrent/repository/in_memory.rs b/packages/tracker-core/src/torrent/repository/in_memory.rs index ead05a32d..e50a82933 100644 --- a/packages/tracker-core/src/torrent/repository/in_memory.rs +++ b/packages/tracker-core/src/torrent/repository/in_memory.rs @@ -7,7 +7,7 @@ use torrust_tracker_configuration::{TrackerPolicy, TORRENT_PEERS_LIMIT}; use torrust_tracker_primitives::pagination::Pagination; use torrust_tracker_primitives::swarm_metadata::{AggregateActiveSwarmMetadata, SwarmMetadata}; use torrust_tracker_primitives::{peer, DurationSinceUnixEpoch, NumberOfDownloads, NumberOfDownloadsBTreeMap}; -use torrust_tracker_swarm_coordination_registry::{Registry, SwarmHandle}; +use torrust_tracker_swarm_coordination_registry::{CoordinatorHandle, Registry}; /// In-memory repository for torrent entries. /// @@ -110,7 +110,7 @@ impl InMemoryTorrentRepository { /// /// An `Option` containing the torrent entry if found. #[must_use] - pub(crate) fn get(&self, key: &InfoHash) -> Option { + pub(crate) fn get(&self, key: &InfoHash) -> Option { self.swarms.get(key) } @@ -128,7 +128,7 @@ impl InMemoryTorrentRepository { /// /// A vector of `(InfoHash, TorrentEntry)` tuples. #[must_use] - pub(crate) fn get_paginated(&self, pagination: Option<&Pagination>) -> Vec<(InfoHash, SwarmHandle)> { + pub(crate) fn get_paginated(&self, pagination: Option<&Pagination>) -> Vec<(InfoHash, CoordinatorHandle)> { self.swarms.get_paginated(pagination) } From 00b9bf998269dec2e64b512fd07a0b4296985166 Mon Sep 17 00:00:00 2001 From: Jose Celano Date: Mon, 2 Jun 2025 08:00:23 +0100 Subject: [PATCH 668/802] chore(deps): update dependencies ```output cargo update Updating crates.io index Locking 33 packages to latest compatible versions Updating anstyle-wincon v3.0.7 -> v3.0.8 Updating async-io v2.4.0 -> v2.4.1 Updating cc v1.2.22 -> v1.2.25 Updating clap v4.5.38 -> v4.5.39 Updating clap_builder v4.5.38 -> v4.5.39 Updating core-foundation v0.10.0 -> v0.10.1 Adding criterion v0.6.0 Removing hermit-abi v0.4.0 Updating hyper-rustls v0.27.5 -> v0.27.6 Updating hyper-util v0.1.11 -> v0.1.13 Updating icu_properties v2.0.0 -> v2.0.1 Updating icu_properties_data v2.0.0 -> v2.0.1 Adding iri-string v0.7.8 Updating libloading v0.8.7 -> v0.8.8 Updating libsqlite3-sys v0.33.0 -> v0.34.0 Removing linux-raw-sys v0.4.15 Updating lock_api v0.4.12 -> v0.4.13 Updating mio v1.0.3 -> v1.0.4 Adding once_cell_polyfill v1.70.1 Updating openssl v0.10.72 -> v0.10.73 Updating openssl-sys v0.9.108 -> v0.9.109 Updating parking_lot v0.12.3 -> v0.12.4 Updating parking_lot_core v0.9.10 -> v0.9.11 Updating polling v3.7.4 -> v3.8.0 Updating r2d2_sqlite v0.28.0 -> v0.29.0 Updating reqwest v0.12.15 -> v0.12.18 Updating rusqlite v0.35.0 -> v0.36.0 Removing rustix v0.38.44 Updating rustversion v1.0.20 -> v1.0.21 Updating socket2 v0.5.9 -> v0.5.10 Updating tokio v1.45.0 -> v1.45.1 Updating tower-http v0.6.4 -> v0.6.5 Updating uuid v1.16.0 -> v1.17.0 Updating windows-core v0.61.1 -> v0.61.2 Updating windows-result v0.3.3 -> v0.3.4 Updating windows-strings v0.4.1 -> v0.4.2 ``` --- Cargo.lock | 239 +++++++++++++++++++++++++++++------------------------ 1 file changed, 131 insertions(+), 108 deletions(-) diff --git a/Cargo.lock b/Cargo.lock index ecf178a59..35040f516 100644 --- a/Cargo.lock +++ b/Cargo.lock @@ -120,12 +120,12 @@ dependencies = [ [[package]] name = "anstyle-wincon" -version = "3.0.7" +version = "3.0.8" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "ca3534e77181a9cc07539ad51f2141fe32f6c3ffd4df76db8ad92346b003ae4e" +checksum = "6680de5231bd6ee4c6191b8a1325daa282b415391ec9d3a37bd34f2060dc73fa" dependencies = [ "anstyle", - "once_cell", + "once_cell_polyfill", "windows-sys 0.59.0", ] @@ -263,9 +263,9 @@ dependencies = [ [[package]] name = "async-io" -version = "2.4.0" +version = "2.4.1" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "43a2b323ccce0a1d90b449fd71f2a06ca7faa7c54c2751f06c9bd851fc061059" +checksum = "1237c0ae75a0f3765f58910ff9cdd0a12eeb39ab2f4c7de23262f337f0aacbb3" dependencies = [ "async-lock", "cfg-if", @@ -274,7 +274,7 @@ dependencies = [ "futures-lite", "parking", "polling", - "rustix 0.38.44", + "rustix", "slab", "tracing", "windows-sys 0.59.0", @@ -579,7 +579,7 @@ dependencies = [ "bittorrent-http-tracker-protocol", "bittorrent-primitives", "bittorrent-tracker-core", - "criterion", + "criterion 0.5.1", "formatjson", "futures", "mockall", @@ -697,7 +697,7 @@ dependencies = [ "bloom", "blowfish", "cipher", - "criterion", + "criterion 0.5.1", "futures", "lazy_static", "mockall", @@ -959,9 +959,9 @@ dependencies = [ [[package]] name = "cc" -version = "1.2.22" +version = "1.2.25" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "32db95edf998450acc7881c932f94cd9b05c87b4b2599e8bab064753da4acfd1" +checksum = "d0fc897dc1e865cc67c0e05a836d9d3f1df3cbe442aa4a9473b18e12624a4951" dependencies = [ "jobserver", "libc", @@ -1052,9 +1052,9 @@ dependencies = [ [[package]] name = "clap" -version = "4.5.38" +version = "4.5.39" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "ed93b9805f8ba930df42c2590f05453d5ec36cbb85d018868a5b24d31f6ac000" +checksum = "fd60e63e9be68e5fb56422e397cf9baddded06dae1d2e523401542383bc72a9f" dependencies = [ "clap_builder", "clap_derive", @@ -1062,9 +1062,9 @@ dependencies = [ [[package]] name = "clap_builder" -version = "4.5.38" +version = "4.5.39" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "379026ff283facf611b0ea629334361c4211d1b12ee01024eec1591133b04120" +checksum = "89cc6392a1f72bbeb820d71f32108f61fdaf18bc526e1d23954168a67759ef51" dependencies = [ "anstream", "anstyle", @@ -1139,9 +1139,9 @@ dependencies = [ [[package]] name = "core-foundation" -version = "0.10.0" +version = "0.10.1" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "b55271e5c8c478ad3f38ad24ef34923091e0548492a266d19b3c0b4d82574c63" +checksum = "b2a6cd9ae233e7f62ba4e9353e81a88df7fc8a5987b8d445b4d90c879bd156f6" dependencies = [ "core-foundation-sys", "libc", @@ -1199,6 +1199,30 @@ dependencies = [ "walkdir", ] +[[package]] +name = "criterion" +version = "0.6.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "3bf7af66b0989381bd0be551bd7cc91912a655a58c6918420c9527b1fd8b4679" +dependencies = [ + "anes", + "cast", + "ciborium", + "clap", + "criterion-plot", + "itertools 0.13.0", + "num-traits", + "oorandom", + "plotters", + "rayon", + "regex", + "serde", + "serde_json", + "tinytemplate", + "tokio", + "walkdir", +] + [[package]] name = "criterion-plot" version = "0.5.0" @@ -1931,12 +1955,6 @@ version = "0.5.0" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "2304e00983f87ffb38b55b444b5e3b60a884b5d30c0fca7d82fe33449bbe55ea" -[[package]] -name = "hermit-abi" -version = "0.4.0" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "fbf6a919d6cf397374f7dfeeea91d974c7c0a7221d0d0f4f20d859d329e53fcc" - [[package]] name = "hermit-abi" version = "0.5.1" @@ -2048,11 +2066,10 @@ dependencies = [ [[package]] name = "hyper-rustls" -version = "0.27.5" +version = "0.27.6" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "2d191583f3da1305256f22463b9bb0471acad48a4e534a5218b9963e9c1f59b2" +checksum = "03a01595e11bdcec50946522c32dde3fc6914743000a68b93000965f2f02406d" dependencies = [ - "futures-util", "http", "hyper", "hyper-util", @@ -2081,22 +2098,28 @@ dependencies = [ [[package]] name = "hyper-util" -version = "0.1.11" +version = "0.1.13" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "497bbc33a26fdd4af9ed9c70d63f61cf56a938375fbb32df34db9b1cd6d643f2" +checksum = "b1c293b6b3d21eca78250dc7dbebd6b9210ec5530e038cbfe0661b5c47ab06e8" dependencies = [ + "base64 0.22.1", "bytes", "futures-channel", + "futures-core", "futures-util", "http", "http-body", "hyper", + "ipnet", "libc", + "percent-encoding", "pin-project-lite", "socket2", + "system-configuration", "tokio", "tower-service", "tracing", + "windows-registry", ] [[package]] @@ -2187,9 +2210,9 @@ checksum = "00210d6893afc98edb752b664b8890f0ef174c8adbb8d0be9710fa66fbbf72d3" [[package]] name = "icu_properties" -version = "2.0.0" +version = "2.0.1" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "2549ca8c7241c82f59c80ba2a6f415d931c5b58d24fb8412caa1a1f02c49139a" +checksum = "016c619c1eeb94efb86809b015c58f479963de65bdb6253345c1a1276f22e32b" dependencies = [ "displaydoc", "icu_collections", @@ -2203,9 +2226,9 @@ dependencies = [ [[package]] name = "icu_properties_data" -version = "2.0.0" +version = "2.0.1" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "8197e866e47b68f8f7d95249e172903bec06004b18b2937f1095d40a0c57de04" +checksum = "298459143998310acd25ffe6810ed544932242d3f07083eee1084d83a71bd632" [[package]] name = "icu_provider" @@ -2303,13 +2326,23 @@ version = "2.11.0" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "469fb0b9cefa57e3ef31275ee7cacb78f2fdca44e4765491884a2b119d4eb130" +[[package]] +name = "iri-string" +version = "0.7.8" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "dbc5ebe9c3a1a7a5127f920a418f7585e9e758e911d0466ed004f393b0e380b2" +dependencies = [ + "memchr", + "serde", +] + [[package]] name = "is-terminal" version = "0.4.16" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "e04d7f318608d35d4b61ddd75cbdaee86b023ebe2bd5a66ee0915f0bf93095a9" dependencies = [ - "hermit-abi 0.5.1", + "hermit-abi", "libc", "windows-sys 0.59.0", ] @@ -2393,9 +2426,9 @@ checksum = "d750af042f7ef4f724306de029d18836c26c1765a54a6a3f094cbd23a7267ffa" [[package]] name = "libloading" -version = "0.8.7" +version = "0.8.8" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "6a793df0d7afeac54f95b471d3af7f0d4fb975699f972341a4b76988d49cdf0c" +checksum = "07033963ba89ebaf1584d767badaa2e8fcec21aedea6b8c0346d487d49c28667" dependencies = [ "cfg-if", "windows-targets 0.53.0", @@ -2420,9 +2453,9 @@ dependencies = [ [[package]] name = "libsqlite3-sys" -version = "0.33.0" +version = "0.34.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "947e6816f7825b2b45027c2c32e7085da9934defa535de4a6a46b10a4d5257fa" +checksum = "91632f3b4fb6bd1d72aa3d78f41ffecfcf2b1a6648d8c241dbe7dbfaf4875e15" dependencies = [ "cc", "pkg-config", @@ -2440,12 +2473,6 @@ dependencies = [ "vcpkg", ] -[[package]] -name = "linux-raw-sys" -version = "0.4.15" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "d26c52dbd32dccf2d10cac7725f8eae5296885fb5703b261f7d0a0739ec807ab" - [[package]] name = "linux-raw-sys" version = "0.9.4" @@ -2472,9 +2499,9 @@ dependencies = [ [[package]] name = "lock_api" -version = "0.4.12" +version = "0.4.13" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "07af8b9cdd281b7915f413fa73f29ebd5d55d0d3f0155584dade1ff18cea1b17" +checksum = "96936507f153605bddfcda068dd804796c84324ed2510809e5b2a624c81da765" dependencies = [ "autocfg", "scopeguard", @@ -2563,13 +2590,13 @@ dependencies = [ [[package]] name = "mio" -version = "1.0.3" +version = "1.0.4" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "2886843bf800fba2e3377cff24abf6379b4c4d5c6681eaf9ea5b0d15090450bd" +checksum = "78bed444cc8a2160f01cbcf811ef18cac863ad68ae8ca62092e8db51d51c761c" dependencies = [ "libc", "wasi 0.11.0+wasi-snapshot-preview1", - "windows-sys 0.52.0", + "windows-sys 0.59.0", ] [[package]] @@ -2815,6 +2842,12 @@ version = "1.21.3" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "42f5e15c9953c5e4ccceeb2e7382a716482c34515315f7b03532b8b4e8393d2d" +[[package]] +name = "once_cell_polyfill" +version = "1.70.1" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "a4895175b425cb1f87721b59f0f286c2092bd4af812243672510e1ac53e2e0ad" + [[package]] name = "oorandom" version = "11.1.5" @@ -2823,9 +2856,9 @@ checksum = "d6790f58c7ff633d8771f42965289203411a5e5c68388703c06e14f24770b41e" [[package]] name = "openssl" -version = "0.10.72" +version = "0.10.73" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "fedfea7d58a1f73118430a55da6a286e7b044961736ce96a16a17068ea25e5da" +checksum = "8505734d46c8ab1e19a1dce3aef597ad87dcb4c37e7188231769bd6bd51cebf8" dependencies = [ "bitflags 2.9.1", "cfg-if", @@ -2855,9 +2888,9 @@ checksum = "d05e27ee213611ffe7d6348b942e8f942b37114c00cc03cec254295a4a17852e" [[package]] name = "openssl-sys" -version = "0.9.108" +version = "0.9.109" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "e145e1651e858e820e4860f7b9c5e169bc1d8ce1c86043be79fa7b7634821847" +checksum = "90096e2e47630d78b7d1c20952dc621f957103f8bc2c8359ec81290d75238571" dependencies = [ "cc", "libc", @@ -2885,9 +2918,9 @@ checksum = "f38d5652c16fde515bb1ecef450ab0f6a219d619a7274976324d5e377f7dceba" [[package]] name = "parking_lot" -version = "0.12.3" +version = "0.12.4" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "f1bf18183cf54e8d6059647fc3063646a1801cf30896933ec2311622cc4b9a27" +checksum = "70d58bf43669b5795d1576d0641cfb6fbb2057bf629506267a92807158584a13" dependencies = [ "lock_api", "parking_lot_core", @@ -2895,9 +2928,9 @@ dependencies = [ [[package]] name = "parking_lot_core" -version = "0.9.10" +version = "0.9.11" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "1e401f977ab385c9e4e3ab30627d6f26d00e2c73eef317493c4ec6d468726cf8" +checksum = "bc838d2a56b5b1a6c25f55575dfc605fabb63bb2365f6c2353ef9159aa69e4a5" dependencies = [ "cfg-if", "libc", @@ -3067,15 +3100,15 @@ dependencies = [ [[package]] name = "polling" -version = "3.7.4" +version = "3.8.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "a604568c3202727d1507653cb121dbd627a58684eb09a820fd746bee38b4442f" +checksum = "b53a684391ad002dd6a596ceb6c74fd004fdce75f4be2e3f615068abbea5fd50" dependencies = [ "cfg-if", "concurrent-queue", - "hermit-abi 0.4.0", + "hermit-abi", "pin-project-lite", - "rustix 0.38.44", + "rustix", "tracing", "windows-sys 0.59.0", ] @@ -3277,9 +3310,9 @@ dependencies = [ [[package]] name = "r2d2_sqlite" -version = "0.28.0" +version = "0.29.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "8998443b32daee2ad6f528afb19ad77c4a8acc4d8d55b3e5072ed42862fe261a" +checksum = "35006423374afbd4b270acddcbf1e28e60f6bdaaad10c2888b8fd2fba035213c" dependencies = [ "r2d2", "rusqlite", @@ -3435,15 +3468,14 @@ dependencies = [ [[package]] name = "reqwest" -version = "0.12.15" +version = "0.12.18" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "d19c46a6fdd48bc4dab94b6103fccc55d34c67cc0ad04653aad4ea2a07cd7bbb" +checksum = "e98ff6b0dbbe4d5a37318f433d4fc82babd21631f194d370409ceb2e40b2f0b5" dependencies = [ "base64 0.22.1", "bytes", "encoding_rs", "futures-core", - "futures-util", "h2", "http", "http-body", @@ -3460,21 +3492,20 @@ dependencies = [ "once_cell", "percent-encoding", "pin-project-lite", - "rustls-pemfile", + "rustls-pki-types", "serde", "serde_json", "serde_urlencoded", "sync_wrapper", - "system-configuration", "tokio", "tokio-native-tls", "tower", + "tower-http", "tower-service", "url", "wasm-bindgen", "wasm-bindgen-futures", "web-sys", - "windows-registry", ] [[package]] @@ -3563,9 +3594,9 @@ dependencies = [ [[package]] name = "rusqlite" -version = "0.35.0" +version = "0.36.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "a22715a5d6deef63c637207afbe68d0c72c3f8d0022d7cf9714c442d6157606b" +checksum = "3de23c3319433716cf134eed225fe9986bc24f63bed9be9f20c329029e672dc7" dependencies = [ "bitflags 2.9.1", "fallible-iterator", @@ -3612,19 +3643,6 @@ dependencies = [ "semver", ] -[[package]] -name = "rustix" -version = "0.38.44" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "fdb5bc1ae2baa591800df16c9ca78619bf65c0488b41b96ccec5d11220d8c154" -dependencies = [ - "bitflags 2.9.1", - "errno", - "libc", - "linux-raw-sys 0.4.15", - "windows-sys 0.59.0", -] - [[package]] name = "rustix" version = "1.0.7" @@ -3634,7 +3652,7 @@ dependencies = [ "bitflags 2.9.1", "errno", "libc", - "linux-raw-sys 0.9.4", + "linux-raw-sys", "windows-sys 0.59.0", ] @@ -3695,9 +3713,9 @@ dependencies = [ [[package]] name = "rustversion" -version = "1.0.20" +version = "1.0.21" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "eded382c5f5f786b989652c49544c4877d9f015cc22e145a5ea8ea66c2921cd2" +checksum = "8a0d197bd2c9dc6e53b84da9556a69ba4cdfab8619eb41a8bd1cc2027a0f6b1d" [[package]] name = "ryu" @@ -3770,7 +3788,7 @@ source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "271720403f46ca04f7ba6f55d438f8bd878d6b8ca0a1046e8228c4145bcbb316" dependencies = [ "bitflags 2.9.1", - "core-foundation 0.10.0", + "core-foundation 0.10.1", "core-foundation-sys", "libc", "security-framework-sys", @@ -4004,9 +4022,9 @@ checksum = "8917285742e9f3e1683f0a9c4e6b57960b7314d0b08d30d1ecd426713ee2eee9" [[package]] name = "socket2" -version = "0.5.9" +version = "0.5.10" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "4f5fd57c80058a56cf5c777ab8a126398ece8e442983605d280a44ce79d0edef" +checksum = "e22376abed350d73dd1cd119b57ffccad95b4e585a7cda43e286245ce23c0678" dependencies = [ "libc", "windows-sys 0.52.0", @@ -4185,7 +4203,7 @@ dependencies = [ "fastrand", "getrandom 0.3.3", "once_cell", - "rustix 1.0.7", + "rustix", "windows-sys 0.59.0", ] @@ -4204,7 +4222,7 @@ version = "0.4.2" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "45c6481c4829e4cc63825e62c49186a34538b7b2750b73b266581ffb612fb5ed" dependencies = [ - "rustix 1.0.7", + "rustix", "windows-sys 0.59.0", ] @@ -4371,9 +4389,9 @@ checksum = "1f3ccbac311fea05f86f61904b462b55fb3df8837a366dfc601a0161d0532f20" [[package]] name = "tokio" -version = "1.45.0" +version = "1.45.1" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "2513ca694ef9ede0fb23fe71a4ee4107cb102b9dc1930f6d0fd77aae068ae165" +checksum = "75ef51a33ef1da925cea3e4eb122833cb377c61439ca401b770f54902b806779" dependencies = [ "backtrace", "bytes", @@ -4762,7 +4780,7 @@ dependencies = [ name = "torrust-tracker-contrib-bencode" version = "3.0.0-develop" dependencies = [ - "criterion", + "criterion 0.6.0", "thiserror 2.0.12", ] @@ -4826,11 +4844,11 @@ dependencies = [ "async-std", "bittorrent-primitives", "chrono", - "criterion", + "criterion 0.6.0", "crossbeam-skiplist", "futures", "mockall", - "rand 0.8.5", + "rand 0.9.1", "rstest", "serde", "thiserror 2.0.12", @@ -4861,7 +4879,7 @@ dependencies = [ "aquatic_udp_protocol", "async-std", "bittorrent-primitives", - "criterion", + "criterion 0.6.0", "crossbeam-skiplist", "dashmap", "futures", @@ -4926,19 +4944,22 @@ dependencies = [ [[package]] name = "tower-http" -version = "0.6.4" +version = "0.6.5" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "0fdb0c213ca27a9f57ab69ddb290fd80d970922355b83ae380b395d3986b8a2e" +checksum = "5cc2d9e086a412a451384326f521c8123a99a466b329941a9403696bff9b0da2" dependencies = [ "async-compression", "bitflags 2.9.1", "bytes", "futures-core", + "futures-util", "http", "http-body", + "iri-string", "pin-project-lite", "tokio", "tokio-util", + "tower", "tower-layer", "tower-service", "tracing", @@ -5122,12 +5143,14 @@ checksum = "06abde3611657adf66d383f00b093d7faecc7fa57071cce2578660c9f1010821" [[package]] name = "uuid" -version = "1.16.0" +version = "1.17.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "458f7a779bf54acc9f347480ac654f68407d3aab21269a6e3c9f922acd9e2da9" +checksum = "3cf4199d1e5d15ddd86a694e4d0dffa9c323ce759fea589f00fef9d81cc1931d" dependencies = [ "getrandom 0.3.3", + "js-sys", "rand 0.9.1", + "wasm-bindgen", ] [[package]] @@ -5302,15 +5325,15 @@ checksum = "712e227841d057c1ee1cd2fb22fa7e5a5461ae8e48fa2ca79ec42cfc1931183f" [[package]] name = "windows-core" -version = "0.61.1" +version = "0.61.2" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "46ec44dc15085cea82cf9c78f85a9114c463a369786585ad2882d1ff0b0acf40" +checksum = "c0fdd3ddb90610c7638aa2b3a3ab2904fb9e5cdbecc643ddb3647212781c4ae3" dependencies = [ "windows-implement", "windows-interface", "windows-link", "windows-result", - "windows-strings 0.4.1", + "windows-strings 0.4.2", ] [[package]] @@ -5354,9 +5377,9 @@ dependencies = [ [[package]] name = "windows-result" -version = "0.3.3" +version = "0.3.4" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "4b895b5356fc36103d0f64dd1e94dfa7ac5633f1c9dd6e80fe9ec4adef69e09d" +checksum = "56f42bd332cc6c8eac5af113fc0c1fd6a8fd2aa08a0119358686e5160d0586c6" dependencies = [ "windows-link", ] @@ -5372,9 +5395,9 @@ dependencies = [ [[package]] name = "windows-strings" -version = "0.4.1" +version = "0.4.2" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "2a7ab927b2637c19b3dbe0965e75d8f2d30bdd697a1516191cad2ec4df8fb28a" +checksum = "56e6c93f3a0c3b36176cb1327a4958a0353d5d166c2a35cb268ace15e91d3b57" dependencies = [ "windows-link", ] @@ -5565,7 +5588,7 @@ source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "0d65cbf2f12c15564212d48f4e3dfb87923d25d611f2aed18f4cb23f0413d89e" dependencies = [ "libc", - "rustix 1.0.7", + "rustix", ] [[package]] From caa03cc88a912d8ef2c8041aba5b3eb2ddf6ed95 Mon Sep 17 00:00:00 2001 From: Jose Celano Date: Mon, 2 Jun 2025 10:47:51 +0100 Subject: [PATCH 669/802] fix: deprecated function criterion::black_box --- contrib/bencode/benches/bencode_benchmark.rs | 4 +++- 1 file changed, 3 insertions(+), 1 deletion(-) diff --git a/contrib/bencode/benches/bencode_benchmark.rs b/contrib/bencode/benches/bencode_benchmark.rs index b79bb0999..b22b286a5 100644 --- a/contrib/bencode/benches/bencode_benchmark.rs +++ b/contrib/bencode/benches/bencode_benchmark.rs @@ -1,4 +1,6 @@ -use criterion::{black_box, criterion_group, criterion_main, Criterion}; +use std::hint::black_box; + +use criterion::{criterion_group, criterion_main, Criterion}; use torrust_tracker_contrib_bencode::{BDecodeOpt, BencodeRef}; const B_NESTED_LISTS: &[u8; 100] = From 9c3c9109f575b221749f6baff0c9909197d46650 Mon Sep 17 00:00:00 2001 From: Jose Celano Date: Mon, 2 Jun 2025 10:50:19 +0100 Subject: [PATCH 670/802] chore: add GitHhub MCP server config --- .vscode/mcp.json | 26 ++++++++++++++++++++++++++ 1 file changed, 26 insertions(+) create mode 100644 .vscode/mcp.json diff --git a/.vscode/mcp.json b/.vscode/mcp.json new file mode 100644 index 000000000..506a52259 --- /dev/null +++ b/.vscode/mcp.json @@ -0,0 +1,26 @@ +{ + "inputs": [ + { + "type": "promptString", + "id": "github_token", + "description": "GitHub Personal Access Token", + "password": true + } + ], + "servers": { + "github": { + "command": "docker", + "args": [ + "run", + "-i", + "--rm", + "-e", + "GITHUB_PERSONAL_ACCESS_TOKEN", + "ghcr.io/github/github-mcp-server" + ], + "env": { + "GITHUB_PERSONAL_ACCESS_TOKEN": "${input:github_token}" + } + } + } +} \ No newline at end of file From db1c9b066d3bb5e0458f7d03dbdf6c2a6b251303 Mon Sep 17 00:00:00 2001 From: Jose Celano Date: Mon, 2 Jun 2025 11:53:06 +0100 Subject: [PATCH 671/802] fix: test after updating dependencies --- .../tests/server/contract.rs | 14 ++++---------- 1 file changed, 4 insertions(+), 10 deletions(-) diff --git a/packages/axum-health-check-api-server/tests/server/contract.rs b/packages/axum-health-check-api-server/tests/server/contract.rs index 0e0d26b83..1d1ba3539 100644 --- a/packages/axum-health-check-api-server/tests/server/contract.rs +++ b/packages/axum-health-check-api-server/tests/server/contract.rs @@ -119,11 +119,8 @@ mod api { assert_eq!(details.binding, binding); assert!( - details - .result - .as_ref() - .is_err_and(|e| e.contains("error sending request for url")), - "Expected to contain, \"error sending request for url\", but have message \"{:?}\".", + details.result.as_ref().is_err_and(|e| e.contains("error sending request")), + "Expected to contain, \"error sending request\", but have message \"{:?}\".", details.result ); assert_eq!( @@ -226,11 +223,8 @@ mod http { assert_eq!(details.binding, binding); assert!( - details - .result - .as_ref() - .is_err_and(|e| e.contains("error sending request for url")), - "Expected to contain, \"error sending request for url\", but have message \"{:?}\".", + details.result.as_ref().is_err_and(|e| e.contains("error sending request")), + "Expected to contain, \"error sending request\", but have message \"{:?}\".", details.result ); assert_eq!( From 52b9660eeb6172fc4a03285751d9fe201eaca7a4 Mon Sep 17 00:00:00 2001 From: Jose Celano Date: Mon, 2 Jun 2025 13:49:09 +0100 Subject: [PATCH 672/802] feat: [#1456] wrapper over aquatic RequestParseError to make it sendable The error will be included in the UdpError event ans sent via tokio channel. --- packages/udp-tracker-server/src/error.rs | 34 +++++++++++++++++-- .../udp-tracker-server/src/handlers/error.rs | 20 +++-------- 2 files changed, 35 insertions(+), 19 deletions(-) diff --git a/packages/udp-tracker-server/src/error.rs b/packages/udp-tracker-server/src/error.rs index 93caf6853..6a63a4c9a 100644 --- a/packages/udp-tracker-server/src/error.rs +++ b/packages/udp-tracker-server/src/error.rs @@ -1,7 +1,7 @@ //! Error types for the UDP server. use std::panic::Location; -use aquatic_udp_protocol::{ConnectionId, RequestParseError}; +use aquatic_udp_protocol::{ConnectionId, RequestParseError, TransactionId}; use bittorrent_udp_tracker_core::services::announce::UdpAnnounceError; use bittorrent_udp_tracker_core::services::scrape::UdpScrapeError; use derive_more::derive::Display; @@ -17,7 +17,7 @@ pub struct ConnectionCookie(pub ConnectionId); pub enum Error { /// Error returned when the request is invalid. #[error("error when phrasing request: {request_parse_error:?}")] - RequestParseError { request_parse_error: RequestParseError }, + RequestParseError { request_parse_error: SendableRequestParseError }, /// Error returned when the domain tracker returns an announce error. #[error("tracker announce error: {source}")] @@ -47,7 +47,9 @@ pub enum Error { impl From for Error { fn from(request_parse_error: RequestParseError) -> Self { - Self::RequestParseError { request_parse_error } + Self::RequestParseError { + request_parse_error: request_parse_error.into(), + } } } @@ -66,3 +68,29 @@ impl From for Error { } } } + +#[derive(Debug, PartialEq, Eq, Clone)] +pub struct SendableRequestParseError { + pub message: String, + pub opt_connection_id: Option, + pub opt_transaction_id: Option, +} + +impl From for SendableRequestParseError { + fn from(request_parse_error: RequestParseError) -> Self { + let (message, opt_connection_id, opt_transaction_id) = match request_parse_error { + RequestParseError::Sendable { + connection_id, + transaction_id, + err, + } => ((*err).to_string(), Some(connection_id), Some(transaction_id)), + RequestParseError::Unsendable { err } => (err.to_string(), None, None), + }; + + Self { + message, + opt_connection_id, + opt_transaction_id, + } + } +} diff --git a/packages/udp-tracker-server/src/handlers/error.rs b/packages/udp-tracker-server/src/handlers/error.rs index 6259e26ca..7b477d84f 100644 --- a/packages/udp-tracker-server/src/handlers/error.rs +++ b/packages/udp-tracker-server/src/handlers/error.rs @@ -2,8 +2,7 @@ use std::net::SocketAddr; use std::ops::Range; -use aquatic_udp_protocol::{ErrorResponse, RequestParseError, Response, TransactionId}; -use bittorrent_udp_tracker_core::connection_cookie::{check, gen_remote_fingerprint}; +use aquatic_udp_protocol::{ErrorResponse, Response, TransactionId}; use bittorrent_udp_tracker_core::{self, UDP_TRACKER_LOG_TARGET}; use torrust_tracker_primitives::service_binding::ServiceBinding; use tracing::{instrument, Level}; @@ -40,25 +39,14 @@ pub async fn handle_error( } let e = if let Error::RequestParseError { request_parse_error } = e { - match request_parse_error { - RequestParseError::Sendable { - connection_id, - transaction_id, - err, - } => { - if let Err(e) = check(connection_id, gen_remote_fingerprint(&client_socket_addr), cookie_valid_range) { - (e.to_string(), Some(*transaction_id)) - } else { - ((*err).to_string(), Some(*transaction_id)) - } - } - RequestParseError::Unsendable { err } => (err.to_string(), transaction_id), - } + (request_parse_error.message.clone(), transaction_id) } else { (e.to_string(), transaction_id) }; if e.1.is_some() { + // code-review: why we trigger an event only if transaction_id is present? + if let Some(udp_server_stats_event_sender) = opt_udp_server_stats_event_sender.as_deref() { udp_server_stats_event_sender .send(Event::UdpError { From 8f3c22aaa3bbdb643545af72c48e27499f3a283c Mon Sep 17 00:00:00 2001 From: Jose Celano Date: Mon, 2 Jun 2025 16:29:27 +0100 Subject: [PATCH 673/802] feat: [#1456] expose error kind in the UdpError event Not exposing the original complex error type becuase: - It's too complex. - It forces all errors to be "Sent", "PartialEq". - It would expose a lot of internals. --- packages/tracker-core/src/error.rs | 2 +- .../udp-tracker-core/src/connection_cookie.rs | 2 +- packages/udp-tracker-server/src/error.rs | 13 +++++- packages/udp-tracker-server/src/event.rs | 45 ++++++++++++++++++- .../udp-tracker-server/src/handlers/error.rs | 11 ++--- .../src/statistics/event/handler.rs | 6 ++- 6 files changed, 68 insertions(+), 11 deletions(-) diff --git a/packages/tracker-core/src/error.rs b/packages/tracker-core/src/error.rs index 4a35e9a0b..866aa64c5 100644 --- a/packages/tracker-core/src/error.rs +++ b/packages/tracker-core/src/error.rs @@ -84,7 +84,7 @@ pub enum ScrapeError { /// /// This error is returned when an operation involves a torrent that is not /// present in the whitelist. -#[derive(thiserror::Error, Debug, Clone)] +#[derive(thiserror::Error, Debug, Clone, PartialEq, Eq)] pub enum WhitelistError { /// Indicates that the torrent identified by `info_hash` is not whitelisted. #[error("The torrent: {info_hash}, is not whitelisted, {location}")] diff --git a/packages/udp-tracker-core/src/connection_cookie.rs b/packages/udp-tracker-core/src/connection_cookie.rs index 31c116400..ce255705f 100644 --- a/packages/udp-tracker-core/src/connection_cookie.rs +++ b/packages/udp-tracker-core/src/connection_cookie.rs @@ -86,7 +86,7 @@ use zerocopy::AsBytes; use crate::crypto::keys::CipherArrayBlowfish; /// Error returned when there was an error with the connection cookie. -#[derive(Error, Debug, Clone)] +#[derive(Error, Debug, Clone, PartialEq)] pub enum ConnectionCookieError { #[error("cookie value is not normal: {not_normal_value}")] ValueNotNormal { not_normal_value: f64 }, diff --git a/packages/udp-tracker-server/src/error.rs b/packages/udp-tracker-server/src/error.rs index 6a63a4c9a..d45b96569 100644 --- a/packages/udp-tracker-server/src/error.rs +++ b/packages/udp-tracker-server/src/error.rs @@ -1,4 +1,5 @@ //! Error types for the UDP server. +use std::fmt::Display; use std::panic::Location; use aquatic_udp_protocol::{ConnectionId, RequestParseError, TransactionId}; @@ -13,7 +14,7 @@ use torrust_tracker_located_error::LocatedError; pub struct ConnectionCookie(pub ConnectionId); /// Error returned by the UDP server. -#[derive(Error, Debug)] +#[derive(Error, Debug, Clone)] pub enum Error { /// Error returned when the request is invalid. #[error("error when phrasing request: {request_parse_error:?}")] @@ -76,6 +77,16 @@ pub struct SendableRequestParseError { pub opt_transaction_id: Option, } +impl Display for SendableRequestParseError { + fn fmt(&self, f: &mut std::fmt::Formatter<'_>) -> std::fmt::Result { + write!( + f, + "SendableRequestParseError: message: {}, connection_id: {:?}, transaction_id: {:?}", + self.message, self.opt_connection_id, self.opt_transaction_id + ) + } +} + impl From for SendableRequestParseError { fn from(request_parse_error: RequestParseError) -> Self { let (message, opt_connection_id, opt_transaction_id) = match request_parse_error { diff --git a/packages/udp-tracker-server/src/event.rs b/packages/udp-tracker-server/src/event.rs index 8aabd7ffb..4d3646563 100644 --- a/packages/udp-tracker-server/src/event.rs +++ b/packages/udp-tracker-server/src/event.rs @@ -2,12 +2,17 @@ use std::fmt; use std::net::SocketAddr; use std::time::Duration; +use bittorrent_tracker_core::error::{AnnounceError, ScrapeError}; +use bittorrent_udp_tracker_core::services::announce::UdpAnnounceError; +use bittorrent_udp_tracker_core::services::scrape::UdpScrapeError; use torrust_tracker_metrics::label::{LabelSet, LabelValue}; use torrust_tracker_metrics::label_name; use torrust_tracker_primitives::service_binding::ServiceBinding; +use crate::error::Error; + /// A UDP server event. -#[derive(Debug, PartialEq, Eq, Clone)] +#[derive(Debug, Clone, PartialEq)] pub enum Event { UdpRequestReceived { context: ConnectionContext, @@ -30,6 +35,7 @@ pub enum Event { UdpError { context: ConnectionContext, kind: Option, + error: ErrorKind, }, } @@ -109,6 +115,43 @@ impl From for LabelSet { } } +#[derive(Debug, Clone, PartialEq)] +pub enum ErrorKind { + RequestParse(String), + ConnectionCookie(String), + Whitelist(String), + Database(String), + InternalServer(String), + BadRequest(String), + TrackerAuthentication(String), +} + +impl From for ErrorKind { + fn from(error: Error) -> Self { + match error { + Error::RequestParseError { request_parse_error } => Self::RequestParse(request_parse_error.to_string()), + Error::UdpAnnounceError { source } => match source { + UdpAnnounceError::ConnectionCookieError { source } => Self::ConnectionCookie(source.to_string()), + UdpAnnounceError::TrackerCoreAnnounceError { source } => match source { + AnnounceError::Whitelist(whitelist_error) => Self::Whitelist(whitelist_error.to_string()), + AnnounceError::Database(error) => Self::Database(error.to_string()), + }, + UdpAnnounceError::TrackerCoreWhitelistError { source } => Self::Whitelist(source.to_string()), + }, + Error::UdpScrapeError { source } => match source { + UdpScrapeError::ConnectionCookieError { source } => Self::ConnectionCookie(source.to_string()), + UdpScrapeError::TrackerCoreScrapeError { source } => match source { + ScrapeError::Whitelist(whitelist_error) => Self::Whitelist(whitelist_error.to_string()), + }, + UdpScrapeError::TrackerCoreWhitelistError { source } => Self::Whitelist(source.to_string()), + }, + Error::InternalServer { location: _, message } => Self::InternalServer(message.to_string()), + Error::BadRequest { source } => Self::BadRequest(source.to_string()), + Error::TrackerAuthenticationRequired { location } => Self::TrackerAuthentication(location.to_string()), + } + } +} + pub mod sender { use std::sync::Arc; diff --git a/packages/udp-tracker-server/src/handlers/error.rs b/packages/udp-tracker-server/src/handlers/error.rs index 7b477d84f..54163aca5 100644 --- a/packages/udp-tracker-server/src/handlers/error.rs +++ b/packages/udp-tracker-server/src/handlers/error.rs @@ -21,7 +21,7 @@ pub async fn handle_error( request_id: Uuid, opt_udp_server_stats_event_sender: &crate::event::sender::Sender, cookie_valid_range: Range, - e: &Error, + error: &Error, transaction_id: Option, ) -> Response { tracing::trace!("handle error"); @@ -31,17 +31,17 @@ pub async fn handle_error( match transaction_id { Some(transaction_id) => { let transaction_id = transaction_id.0.to_string(); - tracing::error!(target: UDP_TRACKER_LOG_TARGET, error = %e, %client_socket_addr, %server_socket_addr, %request_id, %transaction_id, "response error"); + tracing::error!(target: UDP_TRACKER_LOG_TARGET, error = %error, %client_socket_addr, %server_socket_addr, %request_id, %transaction_id, "response error"); } None => { - tracing::error!(target: UDP_TRACKER_LOG_TARGET, error = %e, %client_socket_addr, %server_socket_addr, %request_id, "response error"); + tracing::error!(target: UDP_TRACKER_LOG_TARGET, error = %error, %client_socket_addr, %server_socket_addr, %request_id, "response error"); } } - let e = if let Error::RequestParseError { request_parse_error } = e { + let e = if let Error::RequestParseError { request_parse_error } = error { (request_parse_error.message.clone(), transaction_id) } else { - (e.to_string(), transaction_id) + (error.to_string(), transaction_id) }; if e.1.is_some() { @@ -52,6 +52,7 @@ pub async fn handle_error( .send(Event::UdpError { context: ConnectionContext::new(client_socket_addr, server_service_binding), kind: req_kind, + error: error.clone().into(), }) .await; } diff --git a/packages/udp-tracker-server/src/statistics/event/handler.rs b/packages/udp-tracker-server/src/statistics/event/handler.rs index 1e1502339..b231d8336 100644 --- a/packages/udp-tracker-server/src/statistics/event/handler.rs +++ b/packages/udp-tracker-server/src/statistics/event/handler.rs @@ -232,7 +232,7 @@ pub async fn handle_event(event: Event, stats_repository: &Repository, now: Dura Err(err) => tracing::error!("Failed to increase the counter: {}", err), }; } - Event::UdpError { context, kind } => { + Event::UdpError { context, kind, error: _ } => { // Global fixed metrics match context.client_socket_addr().ip() { std::net::IpAddr::V4(_) => { @@ -271,7 +271,7 @@ mod tests { use torrust_tracker_clock::clock::Time; use torrust_tracker_primitives::service_binding::{Protocol, ServiceBinding}; - use crate::event::{ConnectionContext, Event, UdpRequestKind}; + use crate::event::{ConnectionContext, ErrorKind, Event, UdpRequestKind}; use crate::statistics::event::handler::handle_event; use crate::statistics::repository::Repository; use crate::CurrentClock; @@ -518,6 +518,7 @@ mod tests { .unwrap(), ), kind: None, + error: ErrorKind::RequestParse("Invalid request format".to_string()), }, &stats_repository, CurrentClock::now(), @@ -650,6 +651,7 @@ mod tests { .unwrap(), ), kind: None, + error: ErrorKind::RequestParse("Invalid request format".to_string()), }, &stats_repository, CurrentClock::now(), From d7902f1d670bf4411303fa3934e0a4ce595a20ef Mon Sep 17 00:00:00 2001 From: Jose Celano Date: Mon, 2 Jun 2025 16:36:50 +0100 Subject: [PATCH 674/802] refactor: [#1456] remove unused enum variant in udp server error --- Cargo.lock | 1 - packages/udp-tracker-server/Cargo.toml | 1 - packages/udp-tracker-server/src/error.rs | 7 ------- packages/udp-tracker-server/src/event.rs | 1 - packages/udp-tracker-server/src/handlers/mod.rs | 1 + 5 files changed, 1 insertion(+), 10 deletions(-) diff --git a/Cargo.lock b/Cargo.lock index 35040f516..feb749d3f 100644 --- a/Cargo.lock +++ b/Cargo.lock @@ -4915,7 +4915,6 @@ dependencies = [ "torrust-tracker-clock", "torrust-tracker-configuration", "torrust-tracker-events", - "torrust-tracker-located-error", "torrust-tracker-metrics", "torrust-tracker-primitives", "torrust-tracker-swarm-coordination-registry", diff --git a/packages/udp-tracker-server/Cargo.toml b/packages/udp-tracker-server/Cargo.toml index 72fa520ba..c0bc94ce3 100644 --- a/packages/udp-tracker-server/Cargo.toml +++ b/packages/udp-tracker-server/Cargo.toml @@ -30,7 +30,6 @@ torrust-server-lib = { version = "3.0.0-develop", path = "../server-lib" } torrust-tracker-clock = { version = "3.0.0-develop", path = "../clock" } torrust-tracker-configuration = { version = "3.0.0-develop", path = "../configuration" } torrust-tracker-events = { version = "3.0.0-develop", path = "../events" } -torrust-tracker-located-error = { version = "3.0.0-develop", path = "../located-error" } torrust-tracker-metrics = { version = "3.0.0-develop", path = "../metrics" } torrust-tracker-primitives = { version = "3.0.0-develop", path = "../primitives" } torrust-tracker-swarm-coordination-registry = { version = "3.0.0-develop", path = "../swarm-coordination-registry" } diff --git a/packages/udp-tracker-server/src/error.rs b/packages/udp-tracker-server/src/error.rs index d45b96569..aecf960b8 100644 --- a/packages/udp-tracker-server/src/error.rs +++ b/packages/udp-tracker-server/src/error.rs @@ -7,7 +7,6 @@ use bittorrent_udp_tracker_core::services::announce::UdpAnnounceError; use bittorrent_udp_tracker_core::services::scrape::UdpScrapeError; use derive_more::derive::Display; use thiserror::Error; -use torrust_tracker_located_error::LocatedError; #[derive(Display, Debug)] #[display(":?")] @@ -35,12 +34,6 @@ pub enum Error { message: String, }, - /// Error returned when the request is invalid. - #[error("bad request: {source}")] - BadRequest { - source: LocatedError<'static, dyn std::error::Error + Send + Sync>, - }, - /// Error returned when tracker requires authentication. #[error("domain tracker requires authentication but is not supported in current UDP implementation. Location: {location}")] TrackerAuthenticationRequired { location: &'static Location<'static> }, diff --git a/packages/udp-tracker-server/src/event.rs b/packages/udp-tracker-server/src/event.rs index 4d3646563..e320ceb8a 100644 --- a/packages/udp-tracker-server/src/event.rs +++ b/packages/udp-tracker-server/src/event.rs @@ -146,7 +146,6 @@ impl From for ErrorKind { UdpScrapeError::TrackerCoreWhitelistError { source } => Self::Whitelist(source.to_string()), }, Error::InternalServer { location: _, message } => Self::InternalServer(message.to_string()), - Error::BadRequest { source } => Self::BadRequest(source.to_string()), Error::TrackerAuthenticationRequired { location } => Self::TrackerAuthentication(location.to_string()), } } diff --git a/packages/udp-tracker-server/src/handlers/mod.rs b/packages/udp-tracker-server/src/handlers/mod.rs index df550ab72..6785bd293 100644 --- a/packages/udp-tracker-server/src/handlers/mod.rs +++ b/packages/udp-tracker-server/src/handlers/mod.rs @@ -109,6 +109,7 @@ pub(crate) async fn handle_packet( } }, Err(e) => { + // The request payload could not be parsed, so we handle it as an error. let response = handle_error( None, udp_request.from, From 0108c26b6db35d11522589cb20ce62904a97c059 Mon Sep 17 00:00:00 2001 From: Jose Celano Date: Mon, 2 Jun 2025 16:54:40 +0100 Subject: [PATCH 675/802] fix: test. Error message changed --- packages/udp-tracker-server/src/error.rs | 2 +- packages/udp-tracker-server/tests/server/contract.rs | 4 +++- 2 files changed, 4 insertions(+), 2 deletions(-) diff --git a/packages/udp-tracker-server/src/error.rs b/packages/udp-tracker-server/src/error.rs index aecf960b8..697cc5cab 100644 --- a/packages/udp-tracker-server/src/error.rs +++ b/packages/udp-tracker-server/src/error.rs @@ -16,7 +16,7 @@ pub struct ConnectionCookie(pub ConnectionId); #[derive(Error, Debug, Clone)] pub enum Error { /// Error returned when the request is invalid. - #[error("error when phrasing request: {request_parse_error:?}")] + #[error("error parsing request: {request_parse_error:?}")] RequestParseError { request_parse_error: SendableRequestParseError }, /// Error returned when the domain tracker returns an announce error. diff --git a/packages/udp-tracker-server/tests/server/contract.rs b/packages/udp-tracker-server/tests/server/contract.rs index 860fd1f0b..04ad0f39d 100644 --- a/packages/udp-tracker-server/tests/server/contract.rs +++ b/packages/udp-tracker-server/tests/server/contract.rs @@ -59,7 +59,9 @@ async fn should_return_a_bad_request_response_when_the_client_sends_an_empty_req let response = Response::parse_bytes(&response, true).unwrap(); - assert_eq!(get_error_response_message(&response).unwrap(), "Protocol identifier missing"); + assert!(get_error_response_message(&response) + .unwrap() + .contains("Protocol identifier missing")); env.stop().await; } From f485501f8e7705fe886932d5889b79c8eafb9057 Mon Sep 17 00:00:00 2001 From: Jose Celano Date: Mon, 2 Jun 2025 16:55:24 +0100 Subject: [PATCH 676/802] refactor: [#1456 clean code --- .../udp-tracker-server/src/handlers/error.rs | 16 +++++----------- packages/udp-tracker-server/src/handlers/mod.rs | 9 ++++++++- 2 files changed, 13 insertions(+), 12 deletions(-) diff --git a/packages/udp-tracker-server/src/handlers/error.rs b/packages/udp-tracker-server/src/handlers/error.rs index 54163aca5..4ebe24075 100644 --- a/packages/udp-tracker-server/src/handlers/error.rs +++ b/packages/udp-tracker-server/src/handlers/error.rs @@ -22,13 +22,13 @@ pub async fn handle_error( opt_udp_server_stats_event_sender: &crate::event::sender::Sender, cookie_valid_range: Range, error: &Error, - transaction_id: Option, + opt_transaction_id: Option, ) -> Response { tracing::trace!("handle error"); let server_socket_addr = server_service_binding.bind_address(); - match transaction_id { + match opt_transaction_id { Some(transaction_id) => { let transaction_id = transaction_id.0.to_string(); tracing::error!(target: UDP_TRACKER_LOG_TARGET, error = %error, %client_socket_addr, %server_socket_addr, %request_id, %transaction_id, "response error"); @@ -38,13 +38,7 @@ pub async fn handle_error( } } - let e = if let Error::RequestParseError { request_parse_error } = error { - (request_parse_error.message.clone(), transaction_id) - } else { - (error.to_string(), transaction_id) - }; - - if e.1.is_some() { + if opt_transaction_id.is_some() { // code-review: why we trigger an event only if transaction_id is present? if let Some(udp_server_stats_event_sender) = opt_udp_server_stats_event_sender.as_deref() { @@ -59,7 +53,7 @@ pub async fn handle_error( } Response::from(ErrorResponse { - transaction_id: e.1.unwrap_or(TransactionId(I32::new(0))), - message: e.0.into(), + transaction_id: opt_transaction_id.unwrap_or(TransactionId(I32::new(0))), + message: error.to_string().into(), }) } diff --git a/packages/udp-tracker-server/src/handlers/mod.rs b/packages/udp-tracker-server/src/handlers/mod.rs index 6785bd293..69c62a638 100644 --- a/packages/udp-tracker-server/src/handlers/mod.rs +++ b/packages/udp-tracker-server/src/handlers/mod.rs @@ -110,6 +110,13 @@ pub(crate) async fn handle_packet( }, Err(e) => { // The request payload could not be parsed, so we handle it as an error. + + let opt_transaction_id = if let Error::RequestParseError { request_parse_error } = e.clone() { + request_parse_error.opt_transaction_id + } else { + None + }; + let response = handle_error( None, udp_request.from, @@ -118,7 +125,7 @@ pub(crate) async fn handle_packet( &udp_tracker_server_container.stats_event_sender, cookie_time_values.valid_range.clone(), &e, - None, + opt_transaction_id, ) .await; From 525ab738d485a15175a8924520d88f66515f927a Mon Sep 17 00:00:00 2001 From: Jose Celano Date: Mon, 2 Jun 2025 17:04:25 +0100 Subject: [PATCH 677/802] refactor: [#1456] extract methods --- .../udp-tracker-server/src/handlers/error.rs | 41 ++++++++++++++++--- 1 file changed, 36 insertions(+), 5 deletions(-) diff --git a/packages/udp-tracker-server/src/handlers/error.rs b/packages/udp-tracker-server/src/handlers/error.rs index 4ebe24075..af530efd6 100644 --- a/packages/udp-tracker-server/src/handlers/error.rs +++ b/packages/udp-tracker-server/src/handlers/error.rs @@ -28,6 +28,32 @@ pub async fn handle_error( let server_socket_addr = server_service_binding.bind_address(); + log_error(error, client_socket_addr, server_socket_addr, opt_transaction_id, request_id); + + trigger_udp_error_event( + error.clone(), + client_socket_addr, + server_service_binding, + opt_transaction_id, + opt_udp_server_stats_event_sender, + req_kind, + ) + .await; + + Response::from(ErrorResponse { + transaction_id: opt_transaction_id.unwrap_or(TransactionId(I32::new(0))), + message: error.to_string().into(), + }) +} + +fn log_error( + error: &Error, + client_socket_addr: SocketAddr, + server_socket_addr: SocketAddr, + opt_transaction_id: Option, + + request_id: Uuid, +) { match opt_transaction_id { Some(transaction_id) => { let transaction_id = transaction_id.0.to_string(); @@ -37,7 +63,17 @@ pub async fn handle_error( tracing::error!(target: UDP_TRACKER_LOG_TARGET, error = %error, %client_socket_addr, %server_socket_addr, %request_id, "response error"); } } +} + +async fn trigger_udp_error_event( + error: Error, + client_socket_addr: SocketAddr, + server_service_binding: ServiceBinding, + opt_transaction_id: Option, + opt_udp_server_stats_event_sender: &crate::event::sender::Sender, + req_kind: Option, +) { if opt_transaction_id.is_some() { // code-review: why we trigger an event only if transaction_id is present? @@ -51,9 +87,4 @@ pub async fn handle_error( .await; } } - - Response::from(ErrorResponse { - transaction_id: opt_transaction_id.unwrap_or(TransactionId(I32::new(0))), - message: error.to_string().into(), - }) } From ad1b19a366573dd24f35c3d6250758ee082ba9f6 Mon Sep 17 00:00:00 2001 From: Jose Celano Date: Mon, 2 Jun 2025 17:09:45 +0100 Subject: [PATCH 678/802] feat: trigger UDP error event when there is no transaction ID too --- .../udp-tracker-server/src/handlers/error.rs | 28 +++++++------------ 1 file changed, 10 insertions(+), 18 deletions(-) diff --git a/packages/udp-tracker-server/src/handlers/error.rs b/packages/udp-tracker-server/src/handlers/error.rs index af530efd6..7fb4141b2 100644 --- a/packages/udp-tracker-server/src/handlers/error.rs +++ b/packages/udp-tracker-server/src/handlers/error.rs @@ -31,10 +31,9 @@ pub async fn handle_error( log_error(error, client_socket_addr, server_socket_addr, opt_transaction_id, request_id); trigger_udp_error_event( - error.clone(), + error, client_socket_addr, server_service_binding, - opt_transaction_id, opt_udp_server_stats_event_sender, req_kind, ) @@ -51,7 +50,6 @@ fn log_error( client_socket_addr: SocketAddr, server_socket_addr: SocketAddr, opt_transaction_id: Option, - request_id: Uuid, ) { match opt_transaction_id { @@ -66,25 +64,19 @@ fn log_error( } async fn trigger_udp_error_event( - error: Error, + error: &Error, client_socket_addr: SocketAddr, server_service_binding: ServiceBinding, - opt_transaction_id: Option, - opt_udp_server_stats_event_sender: &crate::event::sender::Sender, req_kind: Option, ) { - if opt_transaction_id.is_some() { - // code-review: why we trigger an event only if transaction_id is present? - - if let Some(udp_server_stats_event_sender) = opt_udp_server_stats_event_sender.as_deref() { - udp_server_stats_event_sender - .send(Event::UdpError { - context: ConnectionContext::new(client_socket_addr, server_service_binding), - kind: req_kind, - error: error.clone().into(), - }) - .await; - } + if let Some(udp_server_stats_event_sender) = opt_udp_server_stats_event_sender.as_deref() { + udp_server_stats_event_sender + .send(Event::UdpError { + context: ConnectionContext::new(client_socket_addr, server_service_binding), + kind: req_kind, + error: error.clone().into(), + }) + .await; } } From 21bea5b4bf30f3c220b443fed839521df50f453c Mon Sep 17 00:00:00 2001 From: Jose Celano Date: Mon, 2 Jun 2025 17:35:17 +0100 Subject: [PATCH 679/802] refactor: [#1456] increase ban counters asyncronously --- .../udp-tracker-server/src/environment.rs | 1 + .../udp-tracker-server/src/handlers/mod.rs | 10 ---- .../src/statistics/event/handler.rs | 54 +++++++++++++++++-- .../src/statistics/event/listener.rs | 17 ++++-- src/bootstrap/jobs/udp_tracker_server.rs | 1 + 5 files changed, 65 insertions(+), 18 deletions(-) diff --git a/packages/udp-tracker-server/src/environment.rs b/packages/udp-tracker-server/src/environment.rs index 3f479a02d..268259f1b 100644 --- a/packages/udp-tracker-server/src/environment.rs +++ b/packages/udp-tracker-server/src/environment.rs @@ -82,6 +82,7 @@ impl Environment { let udp_server_event_listener_job = Some(crate::statistics::event::listener::run_event_listener( self.container.udp_tracker_server_container.event_bus.receiver(), &self.container.udp_tracker_server_container.stats_repository, + &self.container.udp_tracker_core_container.ban_service, )); // Start the UDP tracker server diff --git a/packages/udp-tracker-server/src/handlers/mod.rs b/packages/udp-tracker-server/src/handlers/mod.rs index 69c62a638..0bd455701 100644 --- a/packages/udp-tracker-server/src/handlers/mod.rs +++ b/packages/udp-tracker-server/src/handlers/mod.rs @@ -13,7 +13,6 @@ use announce::handle_announce; use aquatic_udp_protocol::{Request, Response, TransactionId}; use bittorrent_tracker_core::MAX_SCRAPE_TORRENTS; use bittorrent_udp_tracker_core::container::UdpTrackerCoreContainer; -use bittorrent_udp_tracker_core::services::announce::UdpAnnounceError; use connect::handle_connect; use error::handle_error; use scrape::handle_scrape; @@ -84,15 +83,6 @@ pub(crate) async fn handle_packet( { Ok((response, req_kid)) => return (response, Some(req_kid)), Err((error, transaction_id, req_kind)) => { - if let Error::UdpAnnounceError { - source: UdpAnnounceError::ConnectionCookieError { .. }, - } = error - { - // code-review: should we include `RequestParseError` and `BadRequest`? - let mut ban_service = udp_tracker_core_container.ban_service.write().await; - ban_service.increase_counter(&udp_request.from.ip()); - } - let response = handle_error( Some(req_kind.clone()), udp_request.from, diff --git a/packages/udp-tracker-server/src/statistics/event/handler.rs b/packages/udp-tracker-server/src/statistics/event/handler.rs index b231d8336..394850844 100644 --- a/packages/udp-tracker-server/src/statistics/event/handler.rs +++ b/packages/udp-tracker-server/src/statistics/event/handler.rs @@ -1,8 +1,12 @@ +use std::sync::Arc; + +use bittorrent_udp_tracker_core::services::banning::BanService; +use tokio::sync::RwLock; use torrust_tracker_metrics::label::{LabelSet, LabelValue}; use torrust_tracker_metrics::{label_name, metric_name}; use torrust_tracker_primitives::DurationSinceUnixEpoch; -use crate::event::{Event, UdpRequestKind, UdpResponseKind}; +use crate::event::{ErrorKind, Event, UdpRequestKind, UdpResponseKind}; use crate::statistics::repository::Repository; use crate::statistics::{ UDP_TRACKER_SERVER_ERRORS_TOTAL, UDP_TRACKER_SERVER_PERFORMANCE_AVG_PROCESSING_TIME_NS, @@ -16,7 +20,12 @@ use crate::statistics::{ /// This function panics if the client IP version does not match the expected /// version. #[allow(clippy::too_many_lines)] -pub async fn handle_event(event: Event, stats_repository: &Repository, now: DurationSinceUnixEpoch) { +pub async fn handle_event( + event: Event, + stats_repository: &Repository, + ban_service: &Arc>, + now: DurationSinceUnixEpoch, +) { match event { Event::UdpRequestAborted { context } => { // Global fixed metrics @@ -232,7 +241,14 @@ pub async fn handle_event(event: Event, stats_repository: &Repository, now: Dura Err(err) => tracing::error!("Failed to increase the counter: {}", err), }; } - Event::UdpError { context, kind, error: _ } => { + Event::UdpError { context, kind, error } => { + // Increase the number of errors + // code-review: should we ban IP due to other errors too? + if let ErrorKind::ConnectionCookie(_msg) = error { + let mut ban_service = ban_service.write().await; + ban_service.increase_counter(&context.client_socket_addr().ip()); + } + // Global fixed metrics match context.client_socket_addr().ip() { std::net::IpAddr::V4(_) => { @@ -267,7 +283,9 @@ pub async fn handle_event(event: Event, stats_repository: &Repository, now: Dura #[cfg(test)] mod tests { use std::net::{IpAddr, Ipv4Addr, Ipv6Addr, SocketAddr}; + use std::sync::Arc; + use bittorrent_udp_tracker_core::services::banning::BanService; use torrust_tracker_clock::clock::Time; use torrust_tracker_primitives::service_binding::{Protocol, ServiceBinding}; @@ -279,6 +297,7 @@ mod tests { #[tokio::test] async fn should_increase_the_number_of_aborted_requests_when_it_receives_a_udp_request_aborted_event() { let stats_repository = Repository::new(); + let ban_service = Arc::new(tokio::sync::RwLock::new(BanService::new(1))); handle_event( Event::UdpRequestAborted { @@ -292,6 +311,7 @@ mod tests { ), }, &stats_repository, + &ban_service, CurrentClock::now(), ) .await; @@ -304,6 +324,7 @@ mod tests { #[tokio::test] async fn should_increase_the_number_of_banned_requests_when_it_receives_a_udp_request_banned_event() { let stats_repository = Repository::new(); + let ban_service = Arc::new(tokio::sync::RwLock::new(BanService::new(1))); handle_event( Event::UdpRequestBanned { @@ -317,6 +338,7 @@ mod tests { ), }, &stats_repository, + &ban_service, CurrentClock::now(), ) .await; @@ -329,6 +351,7 @@ mod tests { #[tokio::test] async fn should_increase_the_number_of_incoming_requests_when_it_receives_a_udp4_incoming_request_event() { let stats_repository = Repository::new(); + let ban_service = Arc::new(tokio::sync::RwLock::new(BanService::new(1))); handle_event( Event::UdpRequestReceived { @@ -342,6 +365,7 @@ mod tests { ), }, &stats_repository, + &ban_service, CurrentClock::now(), ) .await; @@ -354,6 +378,7 @@ mod tests { #[tokio::test] async fn should_increase_the_udp_abort_counter_when_it_receives_a_udp_abort_event() { let stats_repository = Repository::new(); + let ban_service = Arc::new(tokio::sync::RwLock::new(BanService::new(1))); handle_event( Event::UdpRequestAborted { @@ -367,6 +392,7 @@ mod tests { ), }, &stats_repository, + &ban_service, CurrentClock::now(), ) .await; @@ -376,6 +402,7 @@ mod tests { #[tokio::test] async fn should_increase_the_udp_ban_counter_when_it_receives_a_udp_banned_event() { let stats_repository = Repository::new(); + let ban_service = Arc::new(tokio::sync::RwLock::new(BanService::new(1))); handle_event( Event::UdpRequestBanned { @@ -389,6 +416,7 @@ mod tests { ), }, &stats_repository, + &ban_service, CurrentClock::now(), ) .await; @@ -399,6 +427,7 @@ mod tests { #[tokio::test] async fn should_increase_the_udp4_connect_requests_counter_when_it_receives_a_udp4_request_event_of_connect_kind() { let stats_repository = Repository::new(); + let ban_service = Arc::new(tokio::sync::RwLock::new(BanService::new(1))); handle_event( Event::UdpRequestAccepted { @@ -413,6 +442,7 @@ mod tests { kind: crate::event::UdpRequestKind::Connect, }, &stats_repository, + &ban_service, CurrentClock::now(), ) .await; @@ -425,6 +455,7 @@ mod tests { #[tokio::test] async fn should_increase_the_udp4_announce_requests_counter_when_it_receives_a_udp4_request_event_of_announce_kind() { let stats_repository = Repository::new(); + let ban_service = Arc::new(tokio::sync::RwLock::new(BanService::new(1))); handle_event( Event::UdpRequestAccepted { @@ -439,6 +470,7 @@ mod tests { kind: crate::event::UdpRequestKind::Announce, }, &stats_repository, + &ban_service, CurrentClock::now(), ) .await; @@ -451,6 +483,7 @@ mod tests { #[tokio::test] async fn should_increase_the_udp4_scrape_requests_counter_when_it_receives_a_udp4_request_event_of_scrape_kind() { let stats_repository = Repository::new(); + let ban_service = Arc::new(tokio::sync::RwLock::new(BanService::new(1))); handle_event( Event::UdpRequestAccepted { @@ -465,6 +498,7 @@ mod tests { kind: crate::event::UdpRequestKind::Scrape, }, &stats_repository, + &ban_service, CurrentClock::now(), ) .await; @@ -477,6 +511,7 @@ mod tests { #[tokio::test] async fn should_increase_the_udp4_responses_counter_when_it_receives_a_udp4_response_event() { let stats_repository = Repository::new(); + let ban_service = Arc::new(tokio::sync::RwLock::new(BanService::new(1))); handle_event( Event::UdpResponseSent { @@ -494,6 +529,7 @@ mod tests { req_processing_time: std::time::Duration::from_secs(1), }, &stats_repository, + &ban_service, CurrentClock::now(), ) .await; @@ -506,6 +542,7 @@ mod tests { #[tokio::test] async fn should_increase_the_udp4_errors_counter_when_it_receives_a_udp4_error_event() { let stats_repository = Repository::new(); + let ban_service = Arc::new(tokio::sync::RwLock::new(BanService::new(1))); handle_event( Event::UdpError { @@ -521,6 +558,7 @@ mod tests { error: ErrorKind::RequestParse("Invalid request format".to_string()), }, &stats_repository, + &ban_service, CurrentClock::now(), ) .await; @@ -533,6 +571,7 @@ mod tests { #[tokio::test] async fn should_increase_the_udp6_connect_requests_counter_when_it_receives_a_udp6_request_event_of_connect_kind() { let stats_repository = Repository::new(); + let ban_service = Arc::new(tokio::sync::RwLock::new(BanService::new(1))); handle_event( Event::UdpRequestAccepted { @@ -547,6 +586,7 @@ mod tests { kind: crate::event::UdpRequestKind::Connect, }, &stats_repository, + &ban_service, CurrentClock::now(), ) .await; @@ -559,6 +599,7 @@ mod tests { #[tokio::test] async fn should_increase_the_udp6_announce_requests_counter_when_it_receives_a_udp6_request_event_of_announce_kind() { let stats_repository = Repository::new(); + let ban_service = Arc::new(tokio::sync::RwLock::new(BanService::new(1))); handle_event( Event::UdpRequestAccepted { @@ -573,6 +614,7 @@ mod tests { kind: crate::event::UdpRequestKind::Announce, }, &stats_repository, + &ban_service, CurrentClock::now(), ) .await; @@ -585,6 +627,7 @@ mod tests { #[tokio::test] async fn should_increase_the_udp6_scrape_requests_counter_when_it_receives_a_udp6_request_event_of_scrape_kind() { let stats_repository = Repository::new(); + let ban_service = Arc::new(tokio::sync::RwLock::new(BanService::new(1))); handle_event( Event::UdpRequestAccepted { @@ -599,6 +642,7 @@ mod tests { kind: crate::event::UdpRequestKind::Scrape, }, &stats_repository, + &ban_service, CurrentClock::now(), ) .await; @@ -611,6 +655,7 @@ mod tests { #[tokio::test] async fn should_increase_the_udp6_response_counter_when_it_receives_a_udp6_response_event() { let stats_repository = Repository::new(); + let ban_service = Arc::new(tokio::sync::RwLock::new(BanService::new(1))); handle_event( Event::UdpResponseSent { @@ -628,6 +673,7 @@ mod tests { req_processing_time: std::time::Duration::from_secs(1), }, &stats_repository, + &ban_service, CurrentClock::now(), ) .await; @@ -639,6 +685,7 @@ mod tests { #[tokio::test] async fn should_increase_the_udp6_errors_counter_when_it_receives_a_udp6_error_event() { let stats_repository = Repository::new(); + let ban_service = Arc::new(tokio::sync::RwLock::new(BanService::new(1))); handle_event( Event::UdpError { @@ -654,6 +701,7 @@ mod tests { error: ErrorKind::RequestParse("Invalid request format".to_string()), }, &stats_repository, + &ban_service, CurrentClock::now(), ) .await; diff --git a/packages/udp-tracker-server/src/statistics/event/listener.rs b/packages/udp-tracker-server/src/statistics/event/listener.rs index d805cc87f..e6c9a85ce 100644 --- a/packages/udp-tracker-server/src/statistics/event/listener.rs +++ b/packages/udp-tracker-server/src/statistics/event/listener.rs @@ -1,6 +1,8 @@ use std::sync::Arc; +use bittorrent_udp_tracker_core::services::banning::BanService; use bittorrent_udp_tracker_core::UDP_TRACKER_LOG_TARGET; +use tokio::sync::RwLock; use tokio::task::JoinHandle; use torrust_tracker_clock::clock::Time; use torrust_tracker_events::receiver::RecvError; @@ -11,19 +13,24 @@ use crate::statistics::repository::Repository; use crate::CurrentClock; #[must_use] -pub fn run_event_listener(receiver: Receiver, repository: &Arc) -> JoinHandle<()> { - let stats_repository = repository.clone(); +pub fn run_event_listener( + receiver: Receiver, + repository: &Arc, + ban_service: &Arc>, +) -> JoinHandle<()> { + let repository_clone = repository.clone(); + let ban_service_clone = ban_service.clone(); tracing::info!(target: UDP_TRACKER_LOG_TARGET, "Starting UDP tracker server event listener"); tokio::spawn(async move { - dispatch_events(receiver, stats_repository).await; + dispatch_events(receiver, repository_clone, ban_service_clone).await; tracing::info!(target: UDP_TRACKER_LOG_TARGET, "UDP tracker server event listener finished"); }) } -async fn dispatch_events(mut receiver: Receiver, stats_repository: Arc) { +async fn dispatch_events(mut receiver: Receiver, stats_repository: Arc, ban_service: Arc>) { let shutdown_signal = tokio::signal::ctrl_c(); tokio::pin!(shutdown_signal); @@ -38,7 +45,7 @@ async fn dispatch_events(mut receiver: Receiver, stats_repository: Arc { match result { - Ok(event) => handle_event(event, &stats_repository, CurrentClock::now()).await, + Ok(event) => handle_event(event, &stats_repository, &ban_service, CurrentClock::now()).await, Err(e) => { match e { RecvError::Closed => { diff --git a/src/bootstrap/jobs/udp_tracker_server.rs b/src/bootstrap/jobs/udp_tracker_server.rs index 42ac2d03e..8a4c2a273 100644 --- a/src/bootstrap/jobs/udp_tracker_server.rs +++ b/src/bootstrap/jobs/udp_tracker_server.rs @@ -10,6 +10,7 @@ pub fn start_event_listener(config: &Configuration, app_container: &Arc Date: Mon, 2 Jun 2025 17:49:41 +0100 Subject: [PATCH 680/802] refactor: rename UDP tracker server error variants --- packages/udp-tracker-server/src/error.rs | 16 ++++++++-------- packages/udp-tracker-server/src/event.rs | 10 +++++----- packages/udp-tracker-server/src/handlers/mod.rs | 2 +- 3 files changed, 14 insertions(+), 14 deletions(-) diff --git a/packages/udp-tracker-server/src/error.rs b/packages/udp-tracker-server/src/error.rs index 697cc5cab..d260ebfd4 100644 --- a/packages/udp-tracker-server/src/error.rs +++ b/packages/udp-tracker-server/src/error.rs @@ -17,31 +17,31 @@ pub struct ConnectionCookie(pub ConnectionId); pub enum Error { /// Error returned when the request is invalid. #[error("error parsing request: {request_parse_error:?}")] - RequestParseError { request_parse_error: SendableRequestParseError }, + InvalidRequest { request_parse_error: SendableRequestParseError }, /// Error returned when the domain tracker returns an announce error. #[error("tracker announce error: {source}")] - UdpAnnounceError { source: UdpAnnounceError }, + AnnounceFailed { source: UdpAnnounceError }, /// Error returned when the domain tracker returns an scrape error. #[error("tracker scrape error: {source}")] - UdpScrapeError { source: UdpScrapeError }, + ScrapeFailed { source: UdpScrapeError }, /// Error returned from a third-party library (`aquatic_udp_protocol`). #[error("internal server error: {message}, {location}")] - InternalServer { + Internal { location: &'static Location<'static>, message: String, }, /// Error returned when tracker requires authentication. #[error("domain tracker requires authentication but is not supported in current UDP implementation. Location: {location}")] - TrackerAuthenticationRequired { location: &'static Location<'static> }, + AuthRequired { location: &'static Location<'static> }, } impl From for Error { fn from(request_parse_error: RequestParseError) -> Self { - Self::RequestParseError { + Self::InvalidRequest { request_parse_error: request_parse_error.into(), } } @@ -49,7 +49,7 @@ impl From for Error { impl From for Error { fn from(udp_announce_error: UdpAnnounceError) -> Self { - Self::UdpAnnounceError { + Self::AnnounceFailed { source: udp_announce_error, } } @@ -57,7 +57,7 @@ impl From for Error { impl From for Error { fn from(udp_scrape_error: UdpScrapeError) -> Self { - Self::UdpScrapeError { + Self::ScrapeFailed { source: udp_scrape_error, } } diff --git a/packages/udp-tracker-server/src/event.rs b/packages/udp-tracker-server/src/event.rs index e320ceb8a..4fa29940e 100644 --- a/packages/udp-tracker-server/src/event.rs +++ b/packages/udp-tracker-server/src/event.rs @@ -129,8 +129,8 @@ pub enum ErrorKind { impl From for ErrorKind { fn from(error: Error) -> Self { match error { - Error::RequestParseError { request_parse_error } => Self::RequestParse(request_parse_error.to_string()), - Error::UdpAnnounceError { source } => match source { + Error::InvalidRequest { request_parse_error } => Self::RequestParse(request_parse_error.to_string()), + Error::AnnounceFailed { source } => match source { UdpAnnounceError::ConnectionCookieError { source } => Self::ConnectionCookie(source.to_string()), UdpAnnounceError::TrackerCoreAnnounceError { source } => match source { AnnounceError::Whitelist(whitelist_error) => Self::Whitelist(whitelist_error.to_string()), @@ -138,15 +138,15 @@ impl From for ErrorKind { }, UdpAnnounceError::TrackerCoreWhitelistError { source } => Self::Whitelist(source.to_string()), }, - Error::UdpScrapeError { source } => match source { + Error::ScrapeFailed { source } => match source { UdpScrapeError::ConnectionCookieError { source } => Self::ConnectionCookie(source.to_string()), UdpScrapeError::TrackerCoreScrapeError { source } => match source { ScrapeError::Whitelist(whitelist_error) => Self::Whitelist(whitelist_error.to_string()), }, UdpScrapeError::TrackerCoreWhitelistError { source } => Self::Whitelist(source.to_string()), }, - Error::InternalServer { location: _, message } => Self::InternalServer(message.to_string()), - Error::TrackerAuthenticationRequired { location } => Self::TrackerAuthentication(location.to_string()), + Error::Internal { location: _, message } => Self::InternalServer(message.to_string()), + Error::AuthRequired { location } => Self::TrackerAuthentication(location.to_string()), } } } diff --git a/packages/udp-tracker-server/src/handlers/mod.rs b/packages/udp-tracker-server/src/handlers/mod.rs index 0bd455701..c1125b97f 100644 --- a/packages/udp-tracker-server/src/handlers/mod.rs +++ b/packages/udp-tracker-server/src/handlers/mod.rs @@ -101,7 +101,7 @@ pub(crate) async fn handle_packet( Err(e) => { // The request payload could not be parsed, so we handle it as an error. - let opt_transaction_id = if let Error::RequestParseError { request_parse_error } = e.clone() { + let opt_transaction_id = if let Error::InvalidRequest { request_parse_error } = e.clone() { request_parse_error.opt_transaction_id } else { None From 89ac87cbc1c26fd93e6a019faeb10161f9f6e058 Mon Sep 17 00:00:00 2001 From: Jose Celano Date: Mon, 2 Jun 2025 18:03:25 +0100 Subject: [PATCH 681/802] refactor: [#1551] extract methods in udp event handler" --- .../src/statistics/event/handler.rs | 482 +++++++++--------- 1 file changed, 254 insertions(+), 228 deletions(-) diff --git a/packages/udp-tracker-server/src/statistics/event/handler.rs b/packages/udp-tracker-server/src/statistics/event/handler.rs index 394850844..a1e9007e9 100644 --- a/packages/udp-tracker-server/src/statistics/event/handler.rs +++ b/packages/udp-tracker-server/src/statistics/event/handler.rs @@ -6,7 +6,7 @@ use torrust_tracker_metrics::label::{LabelSet, LabelValue}; use torrust_tracker_metrics::{label_name, metric_name}; use torrust_tracker_primitives::DurationSinceUnixEpoch; -use crate::event::{ErrorKind, Event, UdpRequestKind, UdpResponseKind}; +use crate::event::{ConnectionContext, ErrorKind, Event, UdpRequestKind, UdpResponseKind}; use crate::statistics::repository::Repository; use crate::statistics::{ UDP_TRACKER_SERVER_ERRORS_TOTAL, UDP_TRACKER_SERVER_PERFORMANCE_AVG_PROCESSING_TIME_NS, @@ -15,10 +15,6 @@ use crate::statistics::{ UDP_TRACKER_SERVER_RESPONSES_SENT_TOTAL, }; -/// # Panics -/// -/// This function panics if the client IP version does not match the expected -/// version. #[allow(clippy::too_many_lines)] pub async fn handle_event( event: Event, @@ -28,256 +24,286 @@ pub async fn handle_event( ) { match event { Event::UdpRequestAborted { context } => { - // Global fixed metrics - stats_repository.increase_udp_requests_aborted().await; - - // Extendable metrics - match stats_repository - .increase_counter( - &metric_name!(UDP_TRACKER_SERVER_REQUESTS_ABORTED_TOTAL), - &LabelSet::from(context), - now, - ) - .await - { - Ok(()) => {} - Err(err) => tracing::error!("Failed to increase the counter: {}", err), - }; + handle_udp_request_aborted_event(context, stats_repository, now).await; } Event::UdpRequestBanned { context } => { - // Global fixed metrics - stats_repository.increase_udp_requests_banned().await; - - // Extendable metrics - match stats_repository - .increase_counter( - &metric_name!(UDP_TRACKER_SERVER_REQUESTS_BANNED_TOTAL), - &LabelSet::from(context), - now, - ) - .await - { - Ok(()) => {} - Err(err) => tracing::error!("Failed to increase the counter: {}", err), - }; + handle_udp_request_banned_event(context, stats_repository, now).await; } Event::UdpRequestReceived { context } => { - // Global fixed metrics - match context.client_socket_addr().ip() { - std::net::IpAddr::V4(_) => { - stats_repository.increase_udp4_requests().await; - } - std::net::IpAddr::V6(_) => { - stats_repository.increase_udp6_requests().await; - } - } - - // Extendable metrics - match stats_repository - .increase_counter( - &metric_name!(UDP_TRACKER_SERVER_REQUESTS_RECEIVED_TOTAL), - &LabelSet::from(context), - now, - ) - .await - { - Ok(()) => {} - Err(err) => tracing::error!("Failed to increase the counter: {}", err), - }; + handle_udp_request_received_event(context, stats_repository, now).await; } Event::UdpRequestAccepted { context, kind } => { - // Global fixed metrics - match kind { - UdpRequestKind::Connect => match context.client_socket_addr().ip() { - std::net::IpAddr::V4(_) => { - stats_repository.increase_udp4_connections().await; - } - std::net::IpAddr::V6(_) => { - stats_repository.increase_udp6_connections().await; - } - }, - UdpRequestKind::Announce => match context.client_socket_addr().ip() { - std::net::IpAddr::V4(_) => { - stats_repository.increase_udp4_announces().await; - } - std::net::IpAddr::V6(_) => { - stats_repository.increase_udp6_announces().await; - } - }, - UdpRequestKind::Scrape => match context.client_socket_addr().ip() { - std::net::IpAddr::V4(_) => { - stats_repository.increase_udp4_scrapes().await; - } - std::net::IpAddr::V6(_) => { - stats_repository.increase_udp6_scrapes().await; - } - }, - } - - // Extendable metrics - - let mut label_set = LabelSet::from(context); - - label_set.upsert(label_name!("request_kind"), LabelValue::new(&kind.to_string())); - - match stats_repository - .increase_counter(&metric_name!(UDP_TRACKER_SERVER_REQUESTS_ACCEPTED_TOTAL), &label_set, now) - .await - { - Ok(()) => {} - Err(err) => tracing::error!("Failed to increase the counter: {}", err), - }; + handle_udp_request_accepted_event(context, kind, stats_repository, now).await; } Event::UdpResponseSent { context, kind, req_processing_time, } => { - // Global fixed metrics - match context.client_socket_addr().ip() { - std::net::IpAddr::V4(_) => { - stats_repository.increase_udp4_responses().await; - } - std::net::IpAddr::V6(_) => { - stats_repository.increase_udp6_responses().await; - } - } + handle_udp_response_sent_event(context, kind, req_processing_time, stats_repository, now).await; + } + Event::UdpError { context, kind, error } => { + handle_udp_error_event(context, kind, error, stats_repository, ban_service, now).await; + } + } - let (result_label_value, kind_label_value) = match kind { - UdpResponseKind::Ok { req_kind } => match req_kind { - UdpRequestKind::Connect => { - let new_avg = stats_repository - .recalculate_udp_avg_connect_processing_time_ns(req_processing_time) - .await; - - // Extendable metrics - - let mut label_set = LabelSet::from(context.clone()); - label_set.upsert(label_name!("request_kind"), LabelValue::new(&req_kind.to_string())); - - match stats_repository - .set_gauge( - &metric_name!(UDP_TRACKER_SERVER_PERFORMANCE_AVG_PROCESSING_TIME_NS), - &label_set, - new_avg, - now, - ) - .await - { - Ok(()) => {} - Err(err) => tracing::error!("Failed to set gauge: {}", err), - } - - (LabelValue::new("ok"), LabelValue::new(&UdpRequestKind::Connect.to_string())) - } - UdpRequestKind::Announce => { - let new_avg = stats_repository - .recalculate_udp_avg_announce_processing_time_ns(req_processing_time) - .await; - - // Extendable metrics - - let mut label_set = LabelSet::from(context.clone()); - label_set.upsert(label_name!("request_kind"), LabelValue::new(&req_kind.to_string())); - - match stats_repository - .set_gauge( - &metric_name!(UDP_TRACKER_SERVER_PERFORMANCE_AVG_PROCESSING_TIME_NS), - &label_set, - new_avg, - now, - ) - .await - { - Ok(()) => {} - Err(err) => tracing::error!("Failed to set gauge: {}", err), - } - - (LabelValue::new("ok"), LabelValue::new(&UdpRequestKind::Announce.to_string())) - } - UdpRequestKind::Scrape => { - let new_avg = stats_repository - .recalculate_udp_avg_scrape_processing_time_ns(req_processing_time) - .await; - - // Extendable metrics - - let mut label_set = LabelSet::from(context.clone()); - label_set.upsert(label_name!("request_kind"), LabelValue::new(&req_kind.to_string())); - - match stats_repository - .set_gauge( - &metric_name!(UDP_TRACKER_SERVER_PERFORMANCE_AVG_PROCESSING_TIME_NS), - &label_set, - new_avg, - now, - ) - .await - { - Ok(()) => {} - Err(err) => tracing::error!("Failed to set gauge: {}", err), - } - - (LabelValue::new("ok"), LabelValue::new(&UdpRequestKind::Scrape.to_string())) - } - }, - UdpResponseKind::Error { opt_req_kind: _ } => (LabelValue::new("error"), LabelValue::ignore()), - }; + tracing::debug!("stats: {:?}", stats_repository.get_stats().await); +} - // Extendable metrics +async fn handle_udp_request_aborted_event( + context: ConnectionContext, + stats_repository: &Repository, + now: DurationSinceUnixEpoch, +) { + // Global fixed metrics + stats_repository.increase_udp_requests_aborted().await; + + // Extendable metrics + match stats_repository + .increase_counter( + &metric_name!(UDP_TRACKER_SERVER_REQUESTS_ABORTED_TOTAL), + &LabelSet::from(context), + now, + ) + .await + { + Ok(()) => {} + Err(err) => tracing::error!("Failed to increase the counter: {}", err), + }; +} - let mut label_set = LabelSet::from(context); +async fn handle_udp_request_banned_event(context: ConnectionContext, stats_repository: &Repository, now: DurationSinceUnixEpoch) { + // Global fixed metrics + stats_repository.increase_udp_requests_banned().await; - if result_label_value == LabelValue::new("ok") { - label_set.upsert(label_name!("request_kind"), kind_label_value); - } - label_set.upsert(label_name!("result"), result_label_value); - - match stats_repository - .increase_counter(&metric_name!(UDP_TRACKER_SERVER_RESPONSES_SENT_TOTAL), &label_set, now) - .await - { - Ok(()) => {} - Err(err) => tracing::error!("Failed to increase the counter: {}", err), - }; + // Extendable metrics + match stats_repository + .increase_counter( + &metric_name!(UDP_TRACKER_SERVER_REQUESTS_BANNED_TOTAL), + &LabelSet::from(context), + now, + ) + .await + { + Ok(()) => {} + Err(err) => tracing::error!("Failed to increase the counter: {}", err), + }; +} + +async fn handle_udp_request_received_event( + context: ConnectionContext, + stats_repository: &Repository, + now: DurationSinceUnixEpoch, +) { + // Global fixed metrics + match context.client_socket_addr().ip() { + std::net::IpAddr::V4(_) => { + stats_repository.increase_udp4_requests().await; } - Event::UdpError { context, kind, error } => { - // Increase the number of errors - // code-review: should we ban IP due to other errors too? - if let ErrorKind::ConnectionCookie(_msg) = error { - let mut ban_service = ban_service.write().await; - ban_service.increase_counter(&context.client_socket_addr().ip()); - } + std::net::IpAddr::V6(_) => { + stats_repository.increase_udp6_requests().await; + } + } - // Global fixed metrics - match context.client_socket_addr().ip() { - std::net::IpAddr::V4(_) => { - stats_repository.increase_udp4_errors().await; - } - std::net::IpAddr::V6(_) => { - stats_repository.increase_udp6_errors().await; - } + // Extendable metrics + match stats_repository + .increase_counter( + &metric_name!(UDP_TRACKER_SERVER_REQUESTS_RECEIVED_TOTAL), + &LabelSet::from(context), + now, + ) + .await + { + Ok(()) => {} + Err(err) => tracing::error!("Failed to increase the counter: {}", err), + }; +} + +async fn handle_udp_request_accepted_event( + context: ConnectionContext, + kind: UdpRequestKind, + stats_repository: &Repository, + now: DurationSinceUnixEpoch, +) { + // Global fixed metrics + match kind { + UdpRequestKind::Connect => match context.client_socket_addr().ip() { + std::net::IpAddr::V4(_) => { + stats_repository.increase_udp4_connections().await; + } + std::net::IpAddr::V6(_) => { + stats_repository.increase_udp6_connections().await; + } + }, + UdpRequestKind::Announce => match context.client_socket_addr().ip() { + std::net::IpAddr::V4(_) => { + stats_repository.increase_udp4_announces().await; + } + std::net::IpAddr::V6(_) => { + stats_repository.increase_udp6_announces().await; + } + }, + UdpRequestKind::Scrape => match context.client_socket_addr().ip() { + std::net::IpAddr::V4(_) => { + stats_repository.increase_udp4_scrapes().await; } + std::net::IpAddr::V6(_) => { + stats_repository.increase_udp6_scrapes().await; + } + }, + } - // Extendable metrics + // Extendable metrics + let mut label_set = LabelSet::from(context); + label_set.upsert(label_name!("request_kind"), LabelValue::new(&kind.to_string())); + match stats_repository + .increase_counter(&metric_name!(UDP_TRACKER_SERVER_REQUESTS_ACCEPTED_TOTAL), &label_set, now) + .await + { + Ok(()) => {} + Err(err) => tracing::error!("Failed to increase the counter: {}", err), + }; +} - let mut label_set = LabelSet::from(context); +/// # Panics +/// +/// This function panics if the client IP version does not match the expected +/// version. +async fn handle_udp_response_sent_event( + context: ConnectionContext, + kind: UdpResponseKind, + req_processing_time: std::time::Duration, + stats_repository: &Repository, + now: DurationSinceUnixEpoch, +) { + // Global fixed metrics + match context.client_socket_addr().ip() { + std::net::IpAddr::V4(_) => { + stats_repository.increase_udp4_responses().await; + } + std::net::IpAddr::V6(_) => { + stats_repository.increase_udp6_responses().await; + } + } - if let Some(kind) = kind { - label_set.upsert(label_name!("request_kind"), kind.to_string().into()); + let (result_label_value, kind_label_value) = match kind { + UdpResponseKind::Ok { req_kind } => match req_kind { + UdpRequestKind::Connect => { + let new_avg = stats_repository + .recalculate_udp_avg_connect_processing_time_ns(req_processing_time) + .await; + let mut label_set = LabelSet::from(context.clone()); + label_set.upsert(label_name!("request_kind"), LabelValue::new(&req_kind.to_string())); + match stats_repository + .set_gauge( + &metric_name!(UDP_TRACKER_SERVER_PERFORMANCE_AVG_PROCESSING_TIME_NS), + &label_set, + new_avg, + now, + ) + .await + { + Ok(()) => {} + Err(err) => tracing::error!("Failed to set gauge: {}", err), + } + (LabelValue::new("ok"), LabelValue::new(&UdpRequestKind::Connect.to_string())) } + UdpRequestKind::Announce => { + let new_avg = stats_repository + .recalculate_udp_avg_announce_processing_time_ns(req_processing_time) + .await; + let mut label_set = LabelSet::from(context.clone()); + label_set.upsert(label_name!("request_kind"), LabelValue::new(&req_kind.to_string())); + match stats_repository + .set_gauge( + &metric_name!(UDP_TRACKER_SERVER_PERFORMANCE_AVG_PROCESSING_TIME_NS), + &label_set, + new_avg, + now, + ) + .await + { + Ok(()) => {} + Err(err) => tracing::error!("Failed to set gauge: {}", err), + } + (LabelValue::new("ok"), LabelValue::new(&UdpRequestKind::Announce.to_string())) + } + UdpRequestKind::Scrape => { + let new_avg = stats_repository + .recalculate_udp_avg_scrape_processing_time_ns(req_processing_time) + .await; + let mut label_set = LabelSet::from(context.clone()); + label_set.upsert(label_name!("request_kind"), LabelValue::new(&req_kind.to_string())); + match stats_repository + .set_gauge( + &metric_name!(UDP_TRACKER_SERVER_PERFORMANCE_AVG_PROCESSING_TIME_NS), + &label_set, + new_avg, + now, + ) + .await + { + Ok(()) => {} + Err(err) => tracing::error!("Failed to set gauge: {}", err), + } + (LabelValue::new("ok"), LabelValue::new(&UdpRequestKind::Scrape.to_string())) + } + }, + UdpResponseKind::Error { opt_req_kind: _ } => (LabelValue::new("error"), LabelValue::ignore()), + }; + + // Extendable metrics + let mut label_set = LabelSet::from(context); + if result_label_value == LabelValue::new("ok") { + label_set.upsert(label_name!("request_kind"), kind_label_value); + } + label_set.upsert(label_name!("result"), result_label_value); + match stats_repository + .increase_counter(&metric_name!(UDP_TRACKER_SERVER_RESPONSES_SENT_TOTAL), &label_set, now) + .await + { + Ok(()) => {} + Err(err) => tracing::error!("Failed to increase the counter: {}", err), + }; +} + +async fn handle_udp_error_event( + context: ConnectionContext, + kind: Option, + error: ErrorKind, + stats_repository: &Repository, + ban_service: &Arc>, + now: DurationSinceUnixEpoch, +) { + // Increase the number of errors + // code-review: should we ban IP due to other errors too? + if let ErrorKind::ConnectionCookie(_msg) = error { + let mut ban_service = ban_service.write().await; + ban_service.increase_counter(&context.client_socket_addr().ip()); + } - match stats_repository - .increase_counter(&metric_name!(UDP_TRACKER_SERVER_ERRORS_TOTAL), &label_set, now) - .await - { - Ok(()) => {} - Err(err) => tracing::error!("Failed to increase the counter: {}", err), - }; + // Global fixed metrics + match context.client_socket_addr().ip() { + std::net::IpAddr::V4(_) => { + stats_repository.increase_udp4_errors().await; + } + std::net::IpAddr::V6(_) => { + stats_repository.increase_udp6_errors().await; } } - tracing::debug!("stats: {:?}", stats_repository.get_stats().await); + // Extendable metrics + let mut label_set = LabelSet::from(context); + if let Some(kind) = kind { + label_set.upsert(label_name!("request_kind"), kind.to_string().into()); + } + match stats_repository + .increase_counter(&metric_name!(UDP_TRACKER_SERVER_ERRORS_TOTAL), &label_set, now) + .await + { + Ok(()) => {} + Err(err) => tracing::error!("Failed to increase the counter: {}", err), + }; } #[cfg(test)] From a8f3a973c661815b7721d87cc24b828915d0deec Mon Sep 17 00:00:00 2001 From: Jose Celano Date: Mon, 2 Jun 2025 18:47:46 +0100 Subject: [PATCH 682/802] refactor: [#1551] extract event handler for each udp event --- .../src/statistics/event/handler.rs | 739 ------------------ .../src/statistics/event/handler/error.rs | 95 +++ .../src/statistics/event/handler/mod.rs | 49 ++ .../event/handler/request_aborted.rs | 92 +++ .../event/handler/request_accepted.rs | 236 ++++++ .../event/handler/request_banned.rs | 92 +++ .../event/handler/request_received.rs | 74 ++ .../statistics/event/handler/response_sent.rs | 182 +++++ 8 files changed, 820 insertions(+), 739 deletions(-) delete mode 100644 packages/udp-tracker-server/src/statistics/event/handler.rs create mode 100644 packages/udp-tracker-server/src/statistics/event/handler/error.rs create mode 100644 packages/udp-tracker-server/src/statistics/event/handler/mod.rs create mode 100644 packages/udp-tracker-server/src/statistics/event/handler/request_aborted.rs create mode 100644 packages/udp-tracker-server/src/statistics/event/handler/request_accepted.rs create mode 100644 packages/udp-tracker-server/src/statistics/event/handler/request_banned.rs create mode 100644 packages/udp-tracker-server/src/statistics/event/handler/request_received.rs create mode 100644 packages/udp-tracker-server/src/statistics/event/handler/response_sent.rs diff --git a/packages/udp-tracker-server/src/statistics/event/handler.rs b/packages/udp-tracker-server/src/statistics/event/handler.rs deleted file mode 100644 index a1e9007e9..000000000 --- a/packages/udp-tracker-server/src/statistics/event/handler.rs +++ /dev/null @@ -1,739 +0,0 @@ -use std::sync::Arc; - -use bittorrent_udp_tracker_core::services::banning::BanService; -use tokio::sync::RwLock; -use torrust_tracker_metrics::label::{LabelSet, LabelValue}; -use torrust_tracker_metrics::{label_name, metric_name}; -use torrust_tracker_primitives::DurationSinceUnixEpoch; - -use crate::event::{ConnectionContext, ErrorKind, Event, UdpRequestKind, UdpResponseKind}; -use crate::statistics::repository::Repository; -use crate::statistics::{ - UDP_TRACKER_SERVER_ERRORS_TOTAL, UDP_TRACKER_SERVER_PERFORMANCE_AVG_PROCESSING_TIME_NS, - UDP_TRACKER_SERVER_REQUESTS_ABORTED_TOTAL, UDP_TRACKER_SERVER_REQUESTS_ACCEPTED_TOTAL, - UDP_TRACKER_SERVER_REQUESTS_BANNED_TOTAL, UDP_TRACKER_SERVER_REQUESTS_RECEIVED_TOTAL, - UDP_TRACKER_SERVER_RESPONSES_SENT_TOTAL, -}; - -#[allow(clippy::too_many_lines)] -pub async fn handle_event( - event: Event, - stats_repository: &Repository, - ban_service: &Arc>, - now: DurationSinceUnixEpoch, -) { - match event { - Event::UdpRequestAborted { context } => { - handle_udp_request_aborted_event(context, stats_repository, now).await; - } - Event::UdpRequestBanned { context } => { - handle_udp_request_banned_event(context, stats_repository, now).await; - } - Event::UdpRequestReceived { context } => { - handle_udp_request_received_event(context, stats_repository, now).await; - } - Event::UdpRequestAccepted { context, kind } => { - handle_udp_request_accepted_event(context, kind, stats_repository, now).await; - } - Event::UdpResponseSent { - context, - kind, - req_processing_time, - } => { - handle_udp_response_sent_event(context, kind, req_processing_time, stats_repository, now).await; - } - Event::UdpError { context, kind, error } => { - handle_udp_error_event(context, kind, error, stats_repository, ban_service, now).await; - } - } - - tracing::debug!("stats: {:?}", stats_repository.get_stats().await); -} - -async fn handle_udp_request_aborted_event( - context: ConnectionContext, - stats_repository: &Repository, - now: DurationSinceUnixEpoch, -) { - // Global fixed metrics - stats_repository.increase_udp_requests_aborted().await; - - // Extendable metrics - match stats_repository - .increase_counter( - &metric_name!(UDP_TRACKER_SERVER_REQUESTS_ABORTED_TOTAL), - &LabelSet::from(context), - now, - ) - .await - { - Ok(()) => {} - Err(err) => tracing::error!("Failed to increase the counter: {}", err), - }; -} - -async fn handle_udp_request_banned_event(context: ConnectionContext, stats_repository: &Repository, now: DurationSinceUnixEpoch) { - // Global fixed metrics - stats_repository.increase_udp_requests_banned().await; - - // Extendable metrics - match stats_repository - .increase_counter( - &metric_name!(UDP_TRACKER_SERVER_REQUESTS_BANNED_TOTAL), - &LabelSet::from(context), - now, - ) - .await - { - Ok(()) => {} - Err(err) => tracing::error!("Failed to increase the counter: {}", err), - }; -} - -async fn handle_udp_request_received_event( - context: ConnectionContext, - stats_repository: &Repository, - now: DurationSinceUnixEpoch, -) { - // Global fixed metrics - match context.client_socket_addr().ip() { - std::net::IpAddr::V4(_) => { - stats_repository.increase_udp4_requests().await; - } - std::net::IpAddr::V6(_) => { - stats_repository.increase_udp6_requests().await; - } - } - - // Extendable metrics - match stats_repository - .increase_counter( - &metric_name!(UDP_TRACKER_SERVER_REQUESTS_RECEIVED_TOTAL), - &LabelSet::from(context), - now, - ) - .await - { - Ok(()) => {} - Err(err) => tracing::error!("Failed to increase the counter: {}", err), - }; -} - -async fn handle_udp_request_accepted_event( - context: ConnectionContext, - kind: UdpRequestKind, - stats_repository: &Repository, - now: DurationSinceUnixEpoch, -) { - // Global fixed metrics - match kind { - UdpRequestKind::Connect => match context.client_socket_addr().ip() { - std::net::IpAddr::V4(_) => { - stats_repository.increase_udp4_connections().await; - } - std::net::IpAddr::V6(_) => { - stats_repository.increase_udp6_connections().await; - } - }, - UdpRequestKind::Announce => match context.client_socket_addr().ip() { - std::net::IpAddr::V4(_) => { - stats_repository.increase_udp4_announces().await; - } - std::net::IpAddr::V6(_) => { - stats_repository.increase_udp6_announces().await; - } - }, - UdpRequestKind::Scrape => match context.client_socket_addr().ip() { - std::net::IpAddr::V4(_) => { - stats_repository.increase_udp4_scrapes().await; - } - std::net::IpAddr::V6(_) => { - stats_repository.increase_udp6_scrapes().await; - } - }, - } - - // Extendable metrics - let mut label_set = LabelSet::from(context); - label_set.upsert(label_name!("request_kind"), LabelValue::new(&kind.to_string())); - match stats_repository - .increase_counter(&metric_name!(UDP_TRACKER_SERVER_REQUESTS_ACCEPTED_TOTAL), &label_set, now) - .await - { - Ok(()) => {} - Err(err) => tracing::error!("Failed to increase the counter: {}", err), - }; -} - -/// # Panics -/// -/// This function panics if the client IP version does not match the expected -/// version. -async fn handle_udp_response_sent_event( - context: ConnectionContext, - kind: UdpResponseKind, - req_processing_time: std::time::Duration, - stats_repository: &Repository, - now: DurationSinceUnixEpoch, -) { - // Global fixed metrics - match context.client_socket_addr().ip() { - std::net::IpAddr::V4(_) => { - stats_repository.increase_udp4_responses().await; - } - std::net::IpAddr::V6(_) => { - stats_repository.increase_udp6_responses().await; - } - } - - let (result_label_value, kind_label_value) = match kind { - UdpResponseKind::Ok { req_kind } => match req_kind { - UdpRequestKind::Connect => { - let new_avg = stats_repository - .recalculate_udp_avg_connect_processing_time_ns(req_processing_time) - .await; - let mut label_set = LabelSet::from(context.clone()); - label_set.upsert(label_name!("request_kind"), LabelValue::new(&req_kind.to_string())); - match stats_repository - .set_gauge( - &metric_name!(UDP_TRACKER_SERVER_PERFORMANCE_AVG_PROCESSING_TIME_NS), - &label_set, - new_avg, - now, - ) - .await - { - Ok(()) => {} - Err(err) => tracing::error!("Failed to set gauge: {}", err), - } - (LabelValue::new("ok"), LabelValue::new(&UdpRequestKind::Connect.to_string())) - } - UdpRequestKind::Announce => { - let new_avg = stats_repository - .recalculate_udp_avg_announce_processing_time_ns(req_processing_time) - .await; - let mut label_set = LabelSet::from(context.clone()); - label_set.upsert(label_name!("request_kind"), LabelValue::new(&req_kind.to_string())); - match stats_repository - .set_gauge( - &metric_name!(UDP_TRACKER_SERVER_PERFORMANCE_AVG_PROCESSING_TIME_NS), - &label_set, - new_avg, - now, - ) - .await - { - Ok(()) => {} - Err(err) => tracing::error!("Failed to set gauge: {}", err), - } - (LabelValue::new("ok"), LabelValue::new(&UdpRequestKind::Announce.to_string())) - } - UdpRequestKind::Scrape => { - let new_avg = stats_repository - .recalculate_udp_avg_scrape_processing_time_ns(req_processing_time) - .await; - let mut label_set = LabelSet::from(context.clone()); - label_set.upsert(label_name!("request_kind"), LabelValue::new(&req_kind.to_string())); - match stats_repository - .set_gauge( - &metric_name!(UDP_TRACKER_SERVER_PERFORMANCE_AVG_PROCESSING_TIME_NS), - &label_set, - new_avg, - now, - ) - .await - { - Ok(()) => {} - Err(err) => tracing::error!("Failed to set gauge: {}", err), - } - (LabelValue::new("ok"), LabelValue::new(&UdpRequestKind::Scrape.to_string())) - } - }, - UdpResponseKind::Error { opt_req_kind: _ } => (LabelValue::new("error"), LabelValue::ignore()), - }; - - // Extendable metrics - let mut label_set = LabelSet::from(context); - if result_label_value == LabelValue::new("ok") { - label_set.upsert(label_name!("request_kind"), kind_label_value); - } - label_set.upsert(label_name!("result"), result_label_value); - match stats_repository - .increase_counter(&metric_name!(UDP_TRACKER_SERVER_RESPONSES_SENT_TOTAL), &label_set, now) - .await - { - Ok(()) => {} - Err(err) => tracing::error!("Failed to increase the counter: {}", err), - }; -} - -async fn handle_udp_error_event( - context: ConnectionContext, - kind: Option, - error: ErrorKind, - stats_repository: &Repository, - ban_service: &Arc>, - now: DurationSinceUnixEpoch, -) { - // Increase the number of errors - // code-review: should we ban IP due to other errors too? - if let ErrorKind::ConnectionCookie(_msg) = error { - let mut ban_service = ban_service.write().await; - ban_service.increase_counter(&context.client_socket_addr().ip()); - } - - // Global fixed metrics - match context.client_socket_addr().ip() { - std::net::IpAddr::V4(_) => { - stats_repository.increase_udp4_errors().await; - } - std::net::IpAddr::V6(_) => { - stats_repository.increase_udp6_errors().await; - } - } - - // Extendable metrics - let mut label_set = LabelSet::from(context); - if let Some(kind) = kind { - label_set.upsert(label_name!("request_kind"), kind.to_string().into()); - } - match stats_repository - .increase_counter(&metric_name!(UDP_TRACKER_SERVER_ERRORS_TOTAL), &label_set, now) - .await - { - Ok(()) => {} - Err(err) => tracing::error!("Failed to increase the counter: {}", err), - }; -} - -#[cfg(test)] -mod tests { - use std::net::{IpAddr, Ipv4Addr, Ipv6Addr, SocketAddr}; - use std::sync::Arc; - - use bittorrent_udp_tracker_core::services::banning::BanService; - use torrust_tracker_clock::clock::Time; - use torrust_tracker_primitives::service_binding::{Protocol, ServiceBinding}; - - use crate::event::{ConnectionContext, ErrorKind, Event, UdpRequestKind}; - use crate::statistics::event::handler::handle_event; - use crate::statistics::repository::Repository; - use crate::CurrentClock; - - #[tokio::test] - async fn should_increase_the_number_of_aborted_requests_when_it_receives_a_udp_request_aborted_event() { - let stats_repository = Repository::new(); - let ban_service = Arc::new(tokio::sync::RwLock::new(BanService::new(1))); - - handle_event( - Event::UdpRequestAborted { - context: ConnectionContext::new( - SocketAddr::new(IpAddr::V4(Ipv4Addr::new(203, 0, 113, 195)), 8080), - ServiceBinding::new( - Protocol::UDP, - SocketAddr::new(IpAddr::V4(Ipv4Addr::new(203, 0, 113, 196)), 6969), - ) - .unwrap(), - ), - }, - &stats_repository, - &ban_service, - CurrentClock::now(), - ) - .await; - - let stats = stats_repository.get_stats().await; - - assert_eq!(stats.udp_requests_aborted, 1); - } - - #[tokio::test] - async fn should_increase_the_number_of_banned_requests_when_it_receives_a_udp_request_banned_event() { - let stats_repository = Repository::new(); - let ban_service = Arc::new(tokio::sync::RwLock::new(BanService::new(1))); - - handle_event( - Event::UdpRequestBanned { - context: ConnectionContext::new( - SocketAddr::new(IpAddr::V4(Ipv4Addr::new(203, 0, 113, 195)), 8080), - ServiceBinding::new( - Protocol::UDP, - SocketAddr::new(IpAddr::V4(Ipv4Addr::new(203, 0, 113, 196)), 6969), - ) - .unwrap(), - ), - }, - &stats_repository, - &ban_service, - CurrentClock::now(), - ) - .await; - - let stats = stats_repository.get_stats().await; - - assert_eq!(stats.udp_requests_banned, 1); - } - - #[tokio::test] - async fn should_increase_the_number_of_incoming_requests_when_it_receives_a_udp4_incoming_request_event() { - let stats_repository = Repository::new(); - let ban_service = Arc::new(tokio::sync::RwLock::new(BanService::new(1))); - - handle_event( - Event::UdpRequestReceived { - context: ConnectionContext::new( - SocketAddr::new(IpAddr::V4(Ipv4Addr::new(203, 0, 113, 195)), 8080), - ServiceBinding::new( - Protocol::UDP, - SocketAddr::new(IpAddr::V4(Ipv4Addr::new(203, 0, 113, 196)), 6969), - ) - .unwrap(), - ), - }, - &stats_repository, - &ban_service, - CurrentClock::now(), - ) - .await; - - let stats = stats_repository.get_stats().await; - - assert_eq!(stats.udp4_requests, 1); - } - - #[tokio::test] - async fn should_increase_the_udp_abort_counter_when_it_receives_a_udp_abort_event() { - let stats_repository = Repository::new(); - let ban_service = Arc::new(tokio::sync::RwLock::new(BanService::new(1))); - - handle_event( - Event::UdpRequestAborted { - context: ConnectionContext::new( - SocketAddr::new(IpAddr::V4(Ipv4Addr::new(203, 0, 113, 195)), 8080), - ServiceBinding::new( - Protocol::UDP, - SocketAddr::new(IpAddr::V4(Ipv4Addr::new(203, 0, 113, 196)), 6969), - ) - .unwrap(), - ), - }, - &stats_repository, - &ban_service, - CurrentClock::now(), - ) - .await; - let stats = stats_repository.get_stats().await; - assert_eq!(stats.udp_requests_aborted, 1); - } - #[tokio::test] - async fn should_increase_the_udp_ban_counter_when_it_receives_a_udp_banned_event() { - let stats_repository = Repository::new(); - let ban_service = Arc::new(tokio::sync::RwLock::new(BanService::new(1))); - - handle_event( - Event::UdpRequestBanned { - context: ConnectionContext::new( - SocketAddr::new(IpAddr::V4(Ipv4Addr::new(203, 0, 113, 195)), 8080), - ServiceBinding::new( - Protocol::UDP, - SocketAddr::new(IpAddr::V4(Ipv4Addr::new(203, 0, 113, 196)), 6969), - ) - .unwrap(), - ), - }, - &stats_repository, - &ban_service, - CurrentClock::now(), - ) - .await; - let stats = stats_repository.get_stats().await; - assert_eq!(stats.udp_requests_banned, 1); - } - - #[tokio::test] - async fn should_increase_the_udp4_connect_requests_counter_when_it_receives_a_udp4_request_event_of_connect_kind() { - let stats_repository = Repository::new(); - let ban_service = Arc::new(tokio::sync::RwLock::new(BanService::new(1))); - - handle_event( - Event::UdpRequestAccepted { - context: ConnectionContext::new( - SocketAddr::new(IpAddr::V4(Ipv4Addr::new(203, 0, 113, 195)), 8080), - ServiceBinding::new( - Protocol::UDP, - SocketAddr::new(IpAddr::V4(Ipv4Addr::new(203, 0, 113, 196)), 6969), - ) - .unwrap(), - ), - kind: crate::event::UdpRequestKind::Connect, - }, - &stats_repository, - &ban_service, - CurrentClock::now(), - ) - .await; - - let stats = stats_repository.get_stats().await; - - assert_eq!(stats.udp4_connections_handled, 1); - } - - #[tokio::test] - async fn should_increase_the_udp4_announce_requests_counter_when_it_receives_a_udp4_request_event_of_announce_kind() { - let stats_repository = Repository::new(); - let ban_service = Arc::new(tokio::sync::RwLock::new(BanService::new(1))); - - handle_event( - Event::UdpRequestAccepted { - context: ConnectionContext::new( - SocketAddr::new(IpAddr::V4(Ipv4Addr::new(203, 0, 113, 195)), 8080), - ServiceBinding::new( - Protocol::UDP, - SocketAddr::new(IpAddr::V4(Ipv4Addr::new(203, 0, 113, 196)), 6969), - ) - .unwrap(), - ), - kind: crate::event::UdpRequestKind::Announce, - }, - &stats_repository, - &ban_service, - CurrentClock::now(), - ) - .await; - - let stats = stats_repository.get_stats().await; - - assert_eq!(stats.udp4_announces_handled, 1); - } - - #[tokio::test] - async fn should_increase_the_udp4_scrape_requests_counter_when_it_receives_a_udp4_request_event_of_scrape_kind() { - let stats_repository = Repository::new(); - let ban_service = Arc::new(tokio::sync::RwLock::new(BanService::new(1))); - - handle_event( - Event::UdpRequestAccepted { - context: ConnectionContext::new( - SocketAddr::new(IpAddr::V4(Ipv4Addr::new(203, 0, 113, 195)), 8080), - ServiceBinding::new( - Protocol::UDP, - SocketAddr::new(IpAddr::V4(Ipv4Addr::new(203, 0, 113, 196)), 6969), - ) - .unwrap(), - ), - kind: crate::event::UdpRequestKind::Scrape, - }, - &stats_repository, - &ban_service, - CurrentClock::now(), - ) - .await; - - let stats = stats_repository.get_stats().await; - - assert_eq!(stats.udp4_scrapes_handled, 1); - } - - #[tokio::test] - async fn should_increase_the_udp4_responses_counter_when_it_receives_a_udp4_response_event() { - let stats_repository = Repository::new(); - let ban_service = Arc::new(tokio::sync::RwLock::new(BanService::new(1))); - - handle_event( - Event::UdpResponseSent { - context: ConnectionContext::new( - SocketAddr::new(IpAddr::V4(Ipv4Addr::new(203, 0, 113, 195)), 8080), - ServiceBinding::new( - Protocol::UDP, - SocketAddr::new(IpAddr::V4(Ipv4Addr::new(203, 0, 113, 196)), 6969), - ) - .unwrap(), - ), - kind: crate::event::UdpResponseKind::Ok { - req_kind: UdpRequestKind::Announce, - }, - req_processing_time: std::time::Duration::from_secs(1), - }, - &stats_repository, - &ban_service, - CurrentClock::now(), - ) - .await; - - let stats = stats_repository.get_stats().await; - - assert_eq!(stats.udp4_responses, 1); - } - - #[tokio::test] - async fn should_increase_the_udp4_errors_counter_when_it_receives_a_udp4_error_event() { - let stats_repository = Repository::new(); - let ban_service = Arc::new(tokio::sync::RwLock::new(BanService::new(1))); - - handle_event( - Event::UdpError { - context: ConnectionContext::new( - SocketAddr::new(IpAddr::V4(Ipv4Addr::new(203, 0, 113, 195)), 8080), - ServiceBinding::new( - Protocol::UDP, - SocketAddr::new(IpAddr::V4(Ipv4Addr::new(203, 0, 113, 196)), 6969), - ) - .unwrap(), - ), - kind: None, - error: ErrorKind::RequestParse("Invalid request format".to_string()), - }, - &stats_repository, - &ban_service, - CurrentClock::now(), - ) - .await; - - let stats = stats_repository.get_stats().await; - - assert_eq!(stats.udp4_errors_handled, 1); - } - - #[tokio::test] - async fn should_increase_the_udp6_connect_requests_counter_when_it_receives_a_udp6_request_event_of_connect_kind() { - let stats_repository = Repository::new(); - let ban_service = Arc::new(tokio::sync::RwLock::new(BanService::new(1))); - - handle_event( - Event::UdpRequestAccepted { - context: ConnectionContext::new( - SocketAddr::new(IpAddr::V6(Ipv6Addr::new(0, 0, 0, 0, 203, 0, 113, 195)), 8080), - ServiceBinding::new( - Protocol::UDP, - SocketAddr::new(IpAddr::V6(Ipv6Addr::new(0, 0, 0, 0, 203, 0, 113, 196)), 6969), - ) - .unwrap(), - ), - kind: crate::event::UdpRequestKind::Connect, - }, - &stats_repository, - &ban_service, - CurrentClock::now(), - ) - .await; - - let stats = stats_repository.get_stats().await; - - assert_eq!(stats.udp6_connections_handled, 1); - } - - #[tokio::test] - async fn should_increase_the_udp6_announce_requests_counter_when_it_receives_a_udp6_request_event_of_announce_kind() { - let stats_repository = Repository::new(); - let ban_service = Arc::new(tokio::sync::RwLock::new(BanService::new(1))); - - handle_event( - Event::UdpRequestAccepted { - context: ConnectionContext::new( - SocketAddr::new(IpAddr::V6(Ipv6Addr::new(0, 0, 0, 0, 203, 0, 113, 195)), 8080), - ServiceBinding::new( - Protocol::UDP, - SocketAddr::new(IpAddr::V6(Ipv6Addr::new(0, 0, 0, 0, 203, 0, 113, 196)), 6969), - ) - .unwrap(), - ), - kind: crate::event::UdpRequestKind::Announce, - }, - &stats_repository, - &ban_service, - CurrentClock::now(), - ) - .await; - - let stats = stats_repository.get_stats().await; - - assert_eq!(stats.udp6_announces_handled, 1); - } - - #[tokio::test] - async fn should_increase_the_udp6_scrape_requests_counter_when_it_receives_a_udp6_request_event_of_scrape_kind() { - let stats_repository = Repository::new(); - let ban_service = Arc::new(tokio::sync::RwLock::new(BanService::new(1))); - - handle_event( - Event::UdpRequestAccepted { - context: ConnectionContext::new( - SocketAddr::new(IpAddr::V6(Ipv6Addr::new(0, 0, 0, 0, 203, 0, 113, 195)), 8080), - ServiceBinding::new( - Protocol::UDP, - SocketAddr::new(IpAddr::V6(Ipv6Addr::new(0, 0, 0, 0, 203, 0, 113, 196)), 6969), - ) - .unwrap(), - ), - kind: crate::event::UdpRequestKind::Scrape, - }, - &stats_repository, - &ban_service, - CurrentClock::now(), - ) - .await; - - let stats = stats_repository.get_stats().await; - - assert_eq!(stats.udp6_scrapes_handled, 1); - } - - #[tokio::test] - async fn should_increase_the_udp6_response_counter_when_it_receives_a_udp6_response_event() { - let stats_repository = Repository::new(); - let ban_service = Arc::new(tokio::sync::RwLock::new(BanService::new(1))); - - handle_event( - Event::UdpResponseSent { - context: ConnectionContext::new( - SocketAddr::new(IpAddr::V6(Ipv6Addr::new(0, 0, 0, 0, 203, 0, 113, 195)), 8080), - ServiceBinding::new( - Protocol::UDP, - SocketAddr::new(IpAddr::V6(Ipv6Addr::new(0, 0, 0, 0, 203, 0, 113, 196)), 6969), - ) - .unwrap(), - ), - kind: crate::event::UdpResponseKind::Ok { - req_kind: UdpRequestKind::Announce, - }, - req_processing_time: std::time::Duration::from_secs(1), - }, - &stats_repository, - &ban_service, - CurrentClock::now(), - ) - .await; - - let stats = stats_repository.get_stats().await; - - assert_eq!(stats.udp6_responses, 1); - } - #[tokio::test] - async fn should_increase_the_udp6_errors_counter_when_it_receives_a_udp6_error_event() { - let stats_repository = Repository::new(); - let ban_service = Arc::new(tokio::sync::RwLock::new(BanService::new(1))); - - handle_event( - Event::UdpError { - context: ConnectionContext::new( - SocketAddr::new(IpAddr::V6(Ipv6Addr::new(0, 0, 0, 0, 203, 0, 113, 195)), 8080), - ServiceBinding::new( - Protocol::UDP, - SocketAddr::new(IpAddr::V6(Ipv6Addr::new(0, 0, 0, 0, 203, 0, 113, 196)), 6969), - ) - .unwrap(), - ), - kind: None, - error: ErrorKind::RequestParse("Invalid request format".to_string()), - }, - &stats_repository, - &ban_service, - CurrentClock::now(), - ) - .await; - - let stats = stats_repository.get_stats().await; - - assert_eq!(stats.udp6_errors_handled, 1); - } -} diff --git a/packages/udp-tracker-server/src/statistics/event/handler/error.rs b/packages/udp-tracker-server/src/statistics/event/handler/error.rs new file mode 100644 index 000000000..e1023a56b --- /dev/null +++ b/packages/udp-tracker-server/src/statistics/event/handler/error.rs @@ -0,0 +1,95 @@ +use std::sync::Arc; + +use bittorrent_udp_tracker_core::services::banning::BanService; +use tokio::sync::RwLock; +use torrust_tracker_metrics::label::LabelSet; +use torrust_tracker_metrics::{label_name, metric_name}; +use torrust_tracker_primitives::DurationSinceUnixEpoch; + +use crate::event::{ConnectionContext, ErrorKind, UdpRequestKind}; +use crate::statistics::repository::Repository; +use crate::statistics::UDP_TRACKER_SERVER_ERRORS_TOTAL; + +pub async fn handle_event( + context: ConnectionContext, + kind: Option, + error: ErrorKind, + stats_repository: &Repository, + ban_service: &Arc>, + now: DurationSinceUnixEpoch, +) { + // Increase the number of errors + // code-review: should we ban IP due to other errors too? + if let ErrorKind::ConnectionCookie(_msg) = error { + let mut ban_service = ban_service.write().await; + ban_service.increase_counter(&context.client_socket_addr().ip()); + } + + // Global fixed metrics + match context.client_socket_addr().ip() { + std::net::IpAddr::V4(_) => { + stats_repository.increase_udp4_errors().await; + } + std::net::IpAddr::V6(_) => { + stats_repository.increase_udp6_errors().await; + } + } + + // Extendable metrics + let mut label_set = LabelSet::from(context); + if let Some(kind) = kind { + label_set.upsert(label_name!("request_kind"), kind.to_string().into()); + } + match stats_repository + .increase_counter(&metric_name!(UDP_TRACKER_SERVER_ERRORS_TOTAL), &label_set, now) + .await + { + Ok(()) => {} + Err(err) => tracing::error!("Failed to increase the counter: {}", err), + }; +} + +#[cfg(test)] +mod tests { + use std::net::{IpAddr, Ipv4Addr, SocketAddr}; + use std::sync::Arc; + + use bittorrent_udp_tracker_core::services::banning::BanService; + use torrust_tracker_clock::clock::Time; + use torrust_tracker_primitives::service_binding::{Protocol, ServiceBinding}; + + use crate::event::{ConnectionContext, Event}; + use crate::statistics::event::handler::error::ErrorKind; + use crate::statistics::event::handler::handle_event; + use crate::statistics::repository::Repository; + use crate::CurrentClock; + + #[tokio::test] + async fn should_increase_the_udp4_errors_counter_when_it_receives_a_udp4_error_event() { + let stats_repository = Repository::new(); + let ban_service = Arc::new(tokio::sync::RwLock::new(BanService::new(1))); + + handle_event( + Event::UdpError { + context: ConnectionContext::new( + SocketAddr::new(IpAddr::V4(Ipv4Addr::new(203, 0, 113, 195)), 8080), + ServiceBinding::new( + Protocol::UDP, + SocketAddr::new(IpAddr::V4(Ipv4Addr::new(203, 0, 113, 196)), 6969), + ) + .unwrap(), + ), + kind: None, + error: ErrorKind::RequestParse("Invalid request format".to_string()), + }, + &stats_repository, + &ban_service, + CurrentClock::now(), + ) + .await; + + let stats = stats_repository.get_stats().await; + + assert_eq!(stats.udp4_errors_handled, 1); + } +} diff --git a/packages/udp-tracker-server/src/statistics/event/handler/mod.rs b/packages/udp-tracker-server/src/statistics/event/handler/mod.rs new file mode 100644 index 000000000..c8ac864a3 --- /dev/null +++ b/packages/udp-tracker-server/src/statistics/event/handler/mod.rs @@ -0,0 +1,49 @@ +mod error; +mod request_aborted; +mod request_accepted; +mod request_banned; +mod request_received; +mod response_sent; + +use std::sync::Arc; + +use bittorrent_udp_tracker_core::services::banning::BanService; +use tokio::sync::RwLock; +use torrust_tracker_primitives::DurationSinceUnixEpoch; + +use crate::event::Event; +use crate::statistics::repository::Repository; + +pub async fn handle_event( + event: Event, + stats_repository: &Repository, + ban_service: &Arc>, + now: DurationSinceUnixEpoch, +) { + match event { + Event::UdpRequestAborted { context } => { + request_aborted::handle_event(context, stats_repository, now).await; + } + Event::UdpRequestBanned { context } => { + request_banned::handle_event(context, stats_repository, now).await; + } + Event::UdpRequestReceived { context } => { + request_received::handle_event(context, stats_repository, now).await; + } + Event::UdpRequestAccepted { context, kind } => { + request_accepted::handle_event(context, kind, stats_repository, now).await; + } + Event::UdpResponseSent { + context, + kind, + req_processing_time, + } => { + response_sent::handle_event(context, kind, req_processing_time, stats_repository, now).await; + } + Event::UdpError { context, kind, error } => { + error::handle_event(context, kind, error, stats_repository, ban_service, now).await; + } + } + + tracing::debug!("stats: {:?}", stats_repository.get_stats().await); +} diff --git a/packages/udp-tracker-server/src/statistics/event/handler/request_aborted.rs b/packages/udp-tracker-server/src/statistics/event/handler/request_aborted.rs new file mode 100644 index 000000000..270ec2a45 --- /dev/null +++ b/packages/udp-tracker-server/src/statistics/event/handler/request_aborted.rs @@ -0,0 +1,92 @@ +use torrust_tracker_metrics::label::LabelSet; +use torrust_tracker_metrics::metric_name; +use torrust_tracker_primitives::DurationSinceUnixEpoch; + +use crate::event::ConnectionContext; +use crate::statistics::repository::Repository; +use crate::statistics::UDP_TRACKER_SERVER_REQUESTS_ABORTED_TOTAL; + +pub async fn handle_event(context: ConnectionContext, stats_repository: &Repository, now: DurationSinceUnixEpoch) { + // Global fixed metrics + stats_repository.increase_udp_requests_aborted().await; + + // Extendable metrics + match stats_repository + .increase_counter( + &metric_name!(UDP_TRACKER_SERVER_REQUESTS_ABORTED_TOTAL), + &LabelSet::from(context), + now, + ) + .await + { + Ok(()) => {} + Err(err) => tracing::error!("Failed to increase the counter: {}", err), + }; +} + +#[cfg(test)] +mod tests { + use std::net::{IpAddr, Ipv4Addr, SocketAddr}; + use std::sync::Arc; + + use bittorrent_udp_tracker_core::services::banning::BanService; + use torrust_tracker_clock::clock::Time; + use torrust_tracker_primitives::service_binding::{Protocol, ServiceBinding}; + + use crate::event::{ConnectionContext, Event}; + use crate::statistics::event::handler::handle_event; + use crate::statistics::repository::Repository; + use crate::CurrentClock; + + #[tokio::test] + async fn should_increase_the_number_of_aborted_requests_when_it_receives_a_udp_request_aborted_event() { + let stats_repository = Repository::new(); + let ban_service = Arc::new(tokio::sync::RwLock::new(BanService::new(1))); + + handle_event( + Event::UdpRequestAborted { + context: ConnectionContext::new( + SocketAddr::new(IpAddr::V4(Ipv4Addr::new(203, 0, 113, 195)), 8080), + ServiceBinding::new( + Protocol::UDP, + SocketAddr::new(IpAddr::V4(Ipv4Addr::new(203, 0, 113, 196)), 6969), + ) + .unwrap(), + ), + }, + &stats_repository, + &ban_service, + CurrentClock::now(), + ) + .await; + + let stats = stats_repository.get_stats().await; + + assert_eq!(stats.udp_requests_aborted, 1); + } + + #[tokio::test] + async fn should_increase_the_udp_abort_counter_when_it_receives_a_udp_abort_event() { + let stats_repository = Repository::new(); + let ban_service = Arc::new(tokio::sync::RwLock::new(BanService::new(1))); + + handle_event( + Event::UdpRequestAborted { + context: ConnectionContext::new( + SocketAddr::new(IpAddr::V4(Ipv4Addr::new(203, 0, 113, 195)), 8080), + ServiceBinding::new( + Protocol::UDP, + SocketAddr::new(IpAddr::V4(Ipv4Addr::new(203, 0, 113, 196)), 6969), + ) + .unwrap(), + ), + }, + &stats_repository, + &ban_service, + CurrentClock::now(), + ) + .await; + let stats = stats_repository.get_stats().await; + assert_eq!(stats.udp_requests_aborted, 1); + } +} diff --git a/packages/udp-tracker-server/src/statistics/event/handler/request_accepted.rs b/packages/udp-tracker-server/src/statistics/event/handler/request_accepted.rs new file mode 100644 index 000000000..25c1311e5 --- /dev/null +++ b/packages/udp-tracker-server/src/statistics/event/handler/request_accepted.rs @@ -0,0 +1,236 @@ +use torrust_tracker_metrics::label::{LabelSet, LabelValue}; +use torrust_tracker_metrics::{label_name, metric_name}; +use torrust_tracker_primitives::DurationSinceUnixEpoch; + +use crate::event::{ConnectionContext, UdpRequestKind}; +use crate::statistics::repository::Repository; +use crate::statistics::UDP_TRACKER_SERVER_REQUESTS_ACCEPTED_TOTAL; + +pub async fn handle_event( + context: ConnectionContext, + kind: UdpRequestKind, + stats_repository: &Repository, + now: DurationSinceUnixEpoch, +) { + // Global fixed metrics + match kind { + UdpRequestKind::Connect => match context.client_socket_addr().ip() { + std::net::IpAddr::V4(_) => { + stats_repository.increase_udp4_connections().await; + } + std::net::IpAddr::V6(_) => { + stats_repository.increase_udp6_connections().await; + } + }, + UdpRequestKind::Announce => match context.client_socket_addr().ip() { + std::net::IpAddr::V4(_) => { + stats_repository.increase_udp4_announces().await; + } + std::net::IpAddr::V6(_) => { + stats_repository.increase_udp6_announces().await; + } + }, + UdpRequestKind::Scrape => match context.client_socket_addr().ip() { + std::net::IpAddr::V4(_) => { + stats_repository.increase_udp4_scrapes().await; + } + std::net::IpAddr::V6(_) => { + stats_repository.increase_udp6_scrapes().await; + } + }, + } + + // Extendable metrics + let mut label_set = LabelSet::from(context); + label_set.upsert(label_name!("request_kind"), LabelValue::new(&kind.to_string())); + match stats_repository + .increase_counter(&metric_name!(UDP_TRACKER_SERVER_REQUESTS_ACCEPTED_TOTAL), &label_set, now) + .await + { + Ok(()) => {} + Err(err) => tracing::error!("Failed to increase the counter: {}", err), + }; +} + +#[cfg(test)] +mod tests { + use std::net::{IpAddr, Ipv4Addr, Ipv6Addr, SocketAddr}; + use std::sync::Arc; + + use bittorrent_udp_tracker_core::services::banning::BanService; + use torrust_tracker_clock::clock::Time; + use torrust_tracker_primitives::service_binding::{Protocol, ServiceBinding}; + + use crate::event::{ConnectionContext, Event}; + use crate::statistics::event::handler::handle_event; + use crate::statistics::repository::Repository; + use crate::CurrentClock; + + #[tokio::test] + async fn should_increase_the_udp4_connect_requests_counter_when_it_receives_a_udp4_request_event_of_connect_kind() { + let stats_repository = Repository::new(); + let ban_service = Arc::new(tokio::sync::RwLock::new(BanService::new(1))); + + handle_event( + Event::UdpRequestAccepted { + context: ConnectionContext::new( + SocketAddr::new(IpAddr::V4(Ipv4Addr::new(203, 0, 113, 195)), 8080), + ServiceBinding::new( + Protocol::UDP, + SocketAddr::new(IpAddr::V4(Ipv4Addr::new(203, 0, 113, 196)), 6969), + ) + .unwrap(), + ), + kind: crate::event::UdpRequestKind::Connect, + }, + &stats_repository, + &ban_service, + CurrentClock::now(), + ) + .await; + + let stats = stats_repository.get_stats().await; + + assert_eq!(stats.udp4_connections_handled, 1); + } + + #[tokio::test] + async fn should_increase_the_udp4_announce_requests_counter_when_it_receives_a_udp4_request_event_of_announce_kind() { + let stats_repository = Repository::new(); + let ban_service = Arc::new(tokio::sync::RwLock::new(BanService::new(1))); + + handle_event( + Event::UdpRequestAccepted { + context: ConnectionContext::new( + SocketAddr::new(IpAddr::V4(Ipv4Addr::new(203, 0, 113, 195)), 8080), + ServiceBinding::new( + Protocol::UDP, + SocketAddr::new(IpAddr::V4(Ipv4Addr::new(203, 0, 113, 196)), 6969), + ) + .unwrap(), + ), + kind: crate::event::UdpRequestKind::Announce, + }, + &stats_repository, + &ban_service, + CurrentClock::now(), + ) + .await; + + let stats = stats_repository.get_stats().await; + + assert_eq!(stats.udp4_announces_handled, 1); + } + + #[tokio::test] + async fn should_increase_the_udp4_scrape_requests_counter_when_it_receives_a_udp4_request_event_of_scrape_kind() { + let stats_repository = Repository::new(); + let ban_service = Arc::new(tokio::sync::RwLock::new(BanService::new(1))); + + handle_event( + Event::UdpRequestAccepted { + context: ConnectionContext::new( + SocketAddr::new(IpAddr::V4(Ipv4Addr::new(203, 0, 113, 195)), 8080), + ServiceBinding::new( + Protocol::UDP, + SocketAddr::new(IpAddr::V4(Ipv4Addr::new(203, 0, 113, 196)), 6969), + ) + .unwrap(), + ), + kind: crate::event::UdpRequestKind::Scrape, + }, + &stats_repository, + &ban_service, + CurrentClock::now(), + ) + .await; + + let stats = stats_repository.get_stats().await; + + assert_eq!(stats.udp4_scrapes_handled, 1); + } + + #[tokio::test] + async fn should_increase_the_udp6_connect_requests_counter_when_it_receives_a_udp6_request_event_of_connect_kind() { + let stats_repository = Repository::new(); + let ban_service = Arc::new(tokio::sync::RwLock::new(BanService::new(1))); + + handle_event( + Event::UdpRequestAccepted { + context: ConnectionContext::new( + SocketAddr::new(IpAddr::V6(Ipv6Addr::new(0, 0, 0, 0, 203, 0, 113, 195)), 8080), + ServiceBinding::new( + Protocol::UDP, + SocketAddr::new(IpAddr::V6(Ipv6Addr::new(0, 0, 0, 0, 203, 0, 113, 196)), 6969), + ) + .unwrap(), + ), + kind: crate::event::UdpRequestKind::Connect, + }, + &stats_repository, + &ban_service, + CurrentClock::now(), + ) + .await; + + let stats = stats_repository.get_stats().await; + + assert_eq!(stats.udp6_connections_handled, 1); + } + + #[tokio::test] + async fn should_increase_the_udp6_announce_requests_counter_when_it_receives_a_udp6_request_event_of_announce_kind() { + let stats_repository = Repository::new(); + let ban_service = Arc::new(tokio::sync::RwLock::new(BanService::new(1))); + + handle_event( + Event::UdpRequestAccepted { + context: ConnectionContext::new( + SocketAddr::new(IpAddr::V6(Ipv6Addr::new(0, 0, 0, 0, 203, 0, 113, 195)), 8080), + ServiceBinding::new( + Protocol::UDP, + SocketAddr::new(IpAddr::V6(Ipv6Addr::new(0, 0, 0, 0, 203, 0, 113, 196)), 6969), + ) + .unwrap(), + ), + kind: crate::event::UdpRequestKind::Announce, + }, + &stats_repository, + &ban_service, + CurrentClock::now(), + ) + .await; + + let stats = stats_repository.get_stats().await; + + assert_eq!(stats.udp6_announces_handled, 1); + } + + #[tokio::test] + async fn should_increase_the_udp6_scrape_requests_counter_when_it_receives_a_udp6_request_event_of_scrape_kind() { + let stats_repository = Repository::new(); + let ban_service = Arc::new(tokio::sync::RwLock::new(BanService::new(1))); + + handle_event( + Event::UdpRequestAccepted { + context: ConnectionContext::new( + SocketAddr::new(IpAddr::V6(Ipv6Addr::new(0, 0, 0, 0, 203, 0, 113, 195)), 8080), + ServiceBinding::new( + Protocol::UDP, + SocketAddr::new(IpAddr::V6(Ipv6Addr::new(0, 0, 0, 0, 203, 0, 113, 196)), 6969), + ) + .unwrap(), + ), + kind: crate::event::UdpRequestKind::Scrape, + }, + &stats_repository, + &ban_service, + CurrentClock::now(), + ) + .await; + + let stats = stats_repository.get_stats().await; + + assert_eq!(stats.udp6_scrapes_handled, 1); + } +} diff --git a/packages/udp-tracker-server/src/statistics/event/handler/request_banned.rs b/packages/udp-tracker-server/src/statistics/event/handler/request_banned.rs new file mode 100644 index 000000000..74641574a --- /dev/null +++ b/packages/udp-tracker-server/src/statistics/event/handler/request_banned.rs @@ -0,0 +1,92 @@ +use torrust_tracker_metrics::label::LabelSet; +use torrust_tracker_metrics::metric_name; +use torrust_tracker_primitives::DurationSinceUnixEpoch; + +use crate::event::ConnectionContext; +use crate::statistics::repository::Repository; +use crate::statistics::UDP_TRACKER_SERVER_REQUESTS_BANNED_TOTAL; + +pub async fn handle_event(context: ConnectionContext, stats_repository: &Repository, now: DurationSinceUnixEpoch) { + // Global fixed metrics + stats_repository.increase_udp_requests_banned().await; + + // Extendable metrics + match stats_repository + .increase_counter( + &metric_name!(UDP_TRACKER_SERVER_REQUESTS_BANNED_TOTAL), + &LabelSet::from(context), + now, + ) + .await + { + Ok(()) => {} + Err(err) => tracing::error!("Failed to increase the counter: {}", err), + }; +} + +#[cfg(test)] +mod tests { + use std::net::{IpAddr, Ipv4Addr, SocketAddr}; + use std::sync::Arc; + + use bittorrent_udp_tracker_core::services::banning::BanService; + use torrust_tracker_clock::clock::Time; + use torrust_tracker_primitives::service_binding::{Protocol, ServiceBinding}; + + use crate::event::{ConnectionContext, Event}; + use crate::statistics::event::handler::handle_event; + use crate::statistics::repository::Repository; + use crate::CurrentClock; + + #[tokio::test] + async fn should_increase_the_number_of_banned_requests_when_it_receives_a_udp_request_banned_event() { + let stats_repository = Repository::new(); + let ban_service = Arc::new(tokio::sync::RwLock::new(BanService::new(1))); + + handle_event( + Event::UdpRequestBanned { + context: ConnectionContext::new( + SocketAddr::new(IpAddr::V4(Ipv4Addr::new(203, 0, 113, 195)), 8080), + ServiceBinding::new( + Protocol::UDP, + SocketAddr::new(IpAddr::V4(Ipv4Addr::new(203, 0, 113, 196)), 6969), + ) + .unwrap(), + ), + }, + &stats_repository, + &ban_service, + CurrentClock::now(), + ) + .await; + + let stats = stats_repository.get_stats().await; + + assert_eq!(stats.udp_requests_banned, 1); + } + + #[tokio::test] + async fn should_increase_the_udp_ban_counter_when_it_receives_a_udp_banned_event() { + let stats_repository = Repository::new(); + let ban_service = Arc::new(tokio::sync::RwLock::new(BanService::new(1))); + + handle_event( + Event::UdpRequestBanned { + context: ConnectionContext::new( + SocketAddr::new(IpAddr::V4(Ipv4Addr::new(203, 0, 113, 195)), 8080), + ServiceBinding::new( + Protocol::UDP, + SocketAddr::new(IpAddr::V4(Ipv4Addr::new(203, 0, 113, 196)), 6969), + ) + .unwrap(), + ), + }, + &stats_repository, + &ban_service, + CurrentClock::now(), + ) + .await; + let stats = stats_repository.get_stats().await; + assert_eq!(stats.udp_requests_banned, 1); + } +} diff --git a/packages/udp-tracker-server/src/statistics/event/handler/request_received.rs b/packages/udp-tracker-server/src/statistics/event/handler/request_received.rs new file mode 100644 index 000000000..8333258c2 --- /dev/null +++ b/packages/udp-tracker-server/src/statistics/event/handler/request_received.rs @@ -0,0 +1,74 @@ +use torrust_tracker_metrics::label::LabelSet; +use torrust_tracker_metrics::metric_name; +use torrust_tracker_primitives::DurationSinceUnixEpoch; + +use crate::event::ConnectionContext; +use crate::statistics::repository::Repository; +use crate::statistics::UDP_TRACKER_SERVER_REQUESTS_RECEIVED_TOTAL; + +pub async fn handle_event(context: ConnectionContext, stats_repository: &Repository, now: DurationSinceUnixEpoch) { + // Global fixed metrics + match context.client_socket_addr().ip() { + std::net::IpAddr::V4(_) => { + stats_repository.increase_udp4_requests().await; + } + std::net::IpAddr::V6(_) => { + stats_repository.increase_udp6_requests().await; + } + } + + // Extendable metrics + match stats_repository + .increase_counter( + &metric_name!(UDP_TRACKER_SERVER_REQUESTS_RECEIVED_TOTAL), + &LabelSet::from(context), + now, + ) + .await + { + Ok(()) => {} + Err(err) => tracing::error!("Failed to increase the counter: {}", err), + }; +} + +#[cfg(test)] +mod tests { + use std::net::{IpAddr, Ipv4Addr, SocketAddr}; + use std::sync::Arc; + + use bittorrent_udp_tracker_core::services::banning::BanService; + use torrust_tracker_clock::clock::Time; + use torrust_tracker_primitives::service_binding::{Protocol, ServiceBinding}; + + use crate::event::{ConnectionContext, Event}; + use crate::statistics::event::handler::handle_event; + use crate::statistics::repository::Repository; + use crate::CurrentClock; + + #[tokio::test] + async fn should_increase_the_number_of_incoming_requests_when_it_receives_a_udp4_incoming_request_event() { + let stats_repository = Repository::new(); + let ban_service = Arc::new(tokio::sync::RwLock::new(BanService::new(1))); + + handle_event( + Event::UdpRequestReceived { + context: ConnectionContext::new( + SocketAddr::new(IpAddr::V4(Ipv4Addr::new(203, 0, 113, 195)), 8080), + ServiceBinding::new( + Protocol::UDP, + SocketAddr::new(IpAddr::V4(Ipv4Addr::new(203, 0, 113, 196)), 6969), + ) + .unwrap(), + ), + }, + &stats_repository, + &ban_service, + CurrentClock::now(), + ) + .await; + + let stats = stats_repository.get_stats().await; + + assert_eq!(stats.udp4_requests, 1); + } +} diff --git a/packages/udp-tracker-server/src/statistics/event/handler/response_sent.rs b/packages/udp-tracker-server/src/statistics/event/handler/response_sent.rs new file mode 100644 index 000000000..a69184e08 --- /dev/null +++ b/packages/udp-tracker-server/src/statistics/event/handler/response_sent.rs @@ -0,0 +1,182 @@ +use torrust_tracker_metrics::label::{LabelSet, LabelValue}; +use torrust_tracker_metrics::{label_name, metric_name}; +use torrust_tracker_primitives::DurationSinceUnixEpoch; + +use crate::event::{ConnectionContext, UdpRequestKind, UdpResponseKind}; +use crate::statistics::repository::Repository; +use crate::statistics::{UDP_TRACKER_SERVER_PERFORMANCE_AVG_PROCESSING_TIME_NS, UDP_TRACKER_SERVER_RESPONSES_SENT_TOTAL}; + +pub async fn handle_event( + context: ConnectionContext, + kind: UdpResponseKind, + req_processing_time: std::time::Duration, + stats_repository: &Repository, + now: DurationSinceUnixEpoch, +) { + // Global fixed metrics + match context.client_socket_addr().ip() { + std::net::IpAddr::V4(_) => { + stats_repository.increase_udp4_responses().await; + } + std::net::IpAddr::V6(_) => { + stats_repository.increase_udp6_responses().await; + } + } + + let (result_label_value, kind_label_value) = match kind { + UdpResponseKind::Ok { req_kind } => match req_kind { + UdpRequestKind::Connect => { + let new_avg = stats_repository + .recalculate_udp_avg_connect_processing_time_ns(req_processing_time) + .await; + let mut label_set = LabelSet::from(context.clone()); + label_set.upsert(label_name!("request_kind"), LabelValue::new(&req_kind.to_string())); + match stats_repository + .set_gauge( + &metric_name!(UDP_TRACKER_SERVER_PERFORMANCE_AVG_PROCESSING_TIME_NS), + &label_set, + new_avg, + now, + ) + .await + { + Ok(()) => {} + Err(err) => tracing::error!("Failed to set gauge: {}", err), + } + (LabelValue::new("ok"), LabelValue::new(&UdpRequestKind::Connect.to_string())) + } + UdpRequestKind::Announce => { + let new_avg = stats_repository + .recalculate_udp_avg_announce_processing_time_ns(req_processing_time) + .await; + let mut label_set = LabelSet::from(context.clone()); + label_set.upsert(label_name!("request_kind"), LabelValue::new(&req_kind.to_string())); + match stats_repository + .set_gauge( + &metric_name!(UDP_TRACKER_SERVER_PERFORMANCE_AVG_PROCESSING_TIME_NS), + &label_set, + new_avg, + now, + ) + .await + { + Ok(()) => {} + Err(err) => tracing::error!("Failed to set gauge: {}", err), + } + (LabelValue::new("ok"), LabelValue::new(&UdpRequestKind::Announce.to_string())) + } + UdpRequestKind::Scrape => { + let new_avg = stats_repository + .recalculate_udp_avg_scrape_processing_time_ns(req_processing_time) + .await; + let mut label_set = LabelSet::from(context.clone()); + label_set.upsert(label_name!("request_kind"), LabelValue::new(&req_kind.to_string())); + match stats_repository + .set_gauge( + &metric_name!(UDP_TRACKER_SERVER_PERFORMANCE_AVG_PROCESSING_TIME_NS), + &label_set, + new_avg, + now, + ) + .await + { + Ok(()) => {} + Err(err) => tracing::error!("Failed to set gauge: {}", err), + } + (LabelValue::new("ok"), LabelValue::new(&UdpRequestKind::Scrape.to_string())) + } + }, + UdpResponseKind::Error { opt_req_kind: _ } => (LabelValue::new("error"), LabelValue::ignore()), + }; + + // Extendable metrics + let mut label_set = LabelSet::from(context); + if result_label_value == LabelValue::new("ok") { + label_set.upsert(label_name!("request_kind"), kind_label_value); + } + label_set.upsert(label_name!("result"), result_label_value); + match stats_repository + .increase_counter(&metric_name!(UDP_TRACKER_SERVER_RESPONSES_SENT_TOTAL), &label_set, now) + .await + { + Ok(()) => {} + Err(err) => tracing::error!("Failed to increase the counter: {}", err), + }; +} + +#[cfg(test)] +mod tests { + use std::net::{IpAddr, Ipv4Addr, Ipv6Addr, SocketAddr}; + use std::sync::Arc; + + use bittorrent_udp_tracker_core::services::banning::BanService; + use torrust_tracker_clock::clock::Time; + use torrust_tracker_primitives::service_binding::{Protocol, ServiceBinding}; + + use crate::event::{ConnectionContext, Event, UdpRequestKind}; + use crate::statistics::event::handler::handle_event; + use crate::statistics::repository::Repository; + use crate::CurrentClock; + + #[tokio::test] + async fn should_increase_the_udp4_responses_counter_when_it_receives_a_udp4_response_event() { + let stats_repository = Repository::new(); + let ban_service = Arc::new(tokio::sync::RwLock::new(BanService::new(1))); + + handle_event( + Event::UdpResponseSent { + context: ConnectionContext::new( + SocketAddr::new(IpAddr::V4(Ipv4Addr::new(203, 0, 113, 195)), 8080), + ServiceBinding::new( + Protocol::UDP, + SocketAddr::new(IpAddr::V4(Ipv4Addr::new(203, 0, 113, 196)), 6969), + ) + .unwrap(), + ), + kind: crate::event::UdpResponseKind::Ok { + req_kind: UdpRequestKind::Announce, + }, + req_processing_time: std::time::Duration::from_secs(1), + }, + &stats_repository, + &ban_service, + CurrentClock::now(), + ) + .await; + + let stats = stats_repository.get_stats().await; + + assert_eq!(stats.udp4_responses, 1); + } + + #[tokio::test] + async fn should_increase_the_udp6_response_counter_when_it_receives_a_udp6_response_event() { + let stats_repository = Repository::new(); + let ban_service = Arc::new(tokio::sync::RwLock::new(BanService::new(1))); + + handle_event( + Event::UdpResponseSent { + context: ConnectionContext::new( + SocketAddr::new(IpAddr::V6(Ipv6Addr::new(0, 0, 0, 0, 203, 0, 113, 195)), 8080), + ServiceBinding::new( + Protocol::UDP, + SocketAddr::new(IpAddr::V6(Ipv6Addr::new(0, 0, 0, 0, 203, 0, 113, 196)), 6969), + ) + .unwrap(), + ), + kind: crate::event::UdpResponseKind::Ok { + req_kind: UdpRequestKind::Announce, + }, + req_processing_time: std::time::Duration::from_secs(1), + }, + &stats_repository, + &ban_service, + CurrentClock::now(), + ) + .await; + + let stats = stats_repository.get_stats().await; + + assert_eq!(stats.udp6_responses, 1); + } +} From d9f4c13fa860b835dc2299f9d2688a9467faef73 Mon Sep 17 00:00:00 2001 From: Jose Celano Date: Wed, 4 Jun 2025 10:22:14 +0100 Subject: [PATCH 683/802] refactor: [#1556] extract functions --- .../src/statistics/event/handler/error.rs | 19 ++++++++++++++----- 1 file changed, 14 insertions(+), 5 deletions(-) diff --git a/packages/udp-tracker-server/src/statistics/event/handler/error.rs b/packages/udp-tracker-server/src/statistics/event/handler/error.rs index e1023a56b..5cd57e12b 100644 --- a/packages/udp-tracker-server/src/statistics/event/handler/error.rs +++ b/packages/udp-tracker-server/src/statistics/event/handler/error.rs @@ -18,14 +18,17 @@ pub async fn handle_event( ban_service: &Arc>, now: DurationSinceUnixEpoch, ) { - // Increase the number of errors - // code-review: should we ban IP due to other errors too? if let ErrorKind::ConnectionCookie(_msg) = error { let mut ban_service = ban_service.write().await; ban_service.increase_counter(&context.client_socket_addr().ip()); } - // Global fixed metrics + update_global_fixed_metrics(&context, stats_repository).await; + + update_extendable_metrics(&context, kind, stats_repository, now).await; +} + +async fn update_global_fixed_metrics(context: &ConnectionContext, stats_repository: &Repository) { match context.client_socket_addr().ip() { std::net::IpAddr::V4(_) => { stats_repository.increase_udp4_errors().await; @@ -34,9 +37,15 @@ pub async fn handle_event( stats_repository.increase_udp6_errors().await; } } +} - // Extendable metrics - let mut label_set = LabelSet::from(context); +async fn update_extendable_metrics( + context: &ConnectionContext, + kind: Option, + stats_repository: &Repository, + now: DurationSinceUnixEpoch, +) { + let mut label_set = LabelSet::from(context.clone()); if let Some(kind) = kind { label_set.upsert(label_name!("request_kind"), kind.to_string().into()); } From 7e616d71afe16e82968e56185df45ee695588e8a Mon Sep 17 00:00:00 2001 From: Jose Celano Date: Wed, 4 Jun 2025 12:12:05 +0100 Subject: [PATCH 684/802] feat: [#1556] add a new metric to count connection ID errors per clietn software The new metric in Prometheous format: ``` udp_tracker_server_connection_id_errors_total{client_software_name="Transmission",client_software_version="0.12"} 2 ``` --- cSpell.json | 1 + packages/udp-tracker-server/src/event.rs | 15 ++- .../src/handlers/announce.rs | 55 +++++---- .../udp-tracker-server/src/handlers/mod.rs | 2 +- .../src/server/processor.rs | 17 ++- .../src/statistics/event/handler/error.rs | 105 ++++++++++++++---- .../event/handler/request_accepted.rs | 11 +- .../statistics/event/handler/response_sent.rs | 17 ++- .../udp-tracker-server/src/statistics/mod.rs | 7 ++ 9 files changed, 168 insertions(+), 62 deletions(-) diff --git a/cSpell.json b/cSpell.json index e384a08d9..fcbf53f1f 100644 --- a/cSpell.json +++ b/cSpell.json @@ -127,6 +127,7 @@ "proto", "Quickstart", "Radeon", + "Rakshasa", "Rasterbar", "realpath", "reannounce", diff --git a/packages/udp-tracker-server/src/event.rs b/packages/udp-tracker-server/src/event.rs index 4fa29940e..152545e6a 100644 --- a/packages/udp-tracker-server/src/event.rs +++ b/packages/udp-tracker-server/src/event.rs @@ -2,6 +2,7 @@ use std::fmt; use std::net::SocketAddr; use std::time::Duration; +use aquatic_udp_protocol::AnnounceRequest; use bittorrent_tracker_core::error::{AnnounceError, ScrapeError}; use bittorrent_udp_tracker_core::services::announce::UdpAnnounceError; use bittorrent_udp_tracker_core::services::scrape::UdpScrapeError; @@ -42,15 +43,25 @@ pub enum Event { #[derive(Debug, PartialEq, Eq, Clone)] pub enum UdpRequestKind { Connect, - Announce, + Announce { announce_request: AnnounceRequest }, Scrape, } +impl From for LabelValue { + fn from(kind: UdpRequestKind) -> Self { + match kind { + UdpRequestKind::Connect => LabelValue::new("connect"), + UdpRequestKind::Announce { .. } => LabelValue::new("announce"), + UdpRequestKind::Scrape => LabelValue::new("scrape"), + } + } +} + impl fmt::Display for UdpRequestKind { fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result { let proto_str = match self { UdpRequestKind::Connect => "connect", - UdpRequestKind::Announce => "announce", + UdpRequestKind::Announce { .. } => "announce", UdpRequestKind::Scrape => "scrape", }; write!(f, "{proto_str}") diff --git a/packages/udp-tracker-server/src/handlers/announce.rs b/packages/udp-tracker-server/src/handlers/announce.rs index 2fc3f6e63..901a1434a 100644 --- a/packages/udp-tracker-server/src/handlers/announce.rs +++ b/packages/udp-tracker-server/src/handlers/announce.rs @@ -44,7 +44,9 @@ pub async fn handle_announce( udp_server_stats_event_sender .send(Event::UdpRequestAccepted { context: ConnectionContext::new(client_socket_addr, server_service_binding.clone()), - kind: UdpRequestKind::Announce, + kind: UdpRequestKind::Announce { + announce_request: *request, + }, }) .await; } @@ -52,7 +54,15 @@ pub async fn handle_announce( let announce_data = announce_service .handle_announce(client_socket_addr, server_service_binding, request, cookie_valid_range) .await - .map_err(|e| (e.into(), request.transaction_id, UdpRequestKind::Announce))?; + .map_err(|e| { + ( + e.into(), + request.transaction_id, + UdpRequestKind::Announce { + announce_request: *request, + }, + ) + })?; Ok(build_response(client_socket_addr, request, core_config, &announce_data)) } @@ -118,9 +128,9 @@ fn build_response( } #[cfg(test)] -mod tests { +pub(crate) mod tests { - mod announce_request { + pub mod announce_request { use std::net::Ipv4Addr; use std::num::NonZeroU16; @@ -133,7 +143,7 @@ mod tests { use crate::handlers::tests::{sample_ipv4_remote_addr_fingerprint, sample_issue_time}; - struct AnnounceRequestBuilder { + pub struct AnnounceRequestBuilder { request: AnnounceRequest, } @@ -431,13 +441,14 @@ mod tests { let client_socket_addr = sample_ipv4_socket_address(); let server_socket_addr = SocketAddr::new(IpAddr::V4(Ipv4Addr::new(203, 0, 113, 196)), 6969); let server_service_binding = ServiceBinding::new(Protocol::UDP, server_socket_addr).unwrap(); + let announce_request = AnnounceRequestBuilder::default().into(); let mut udp_server_stats_event_sender_mock = MockUdpServerStatsEventSender::new(); udp_server_stats_event_sender_mock .expect_send() .with(eq(Event::UdpRequestAccepted { context: ConnectionContext::new(client_socket_addr, server_service_binding.clone()), - kind: UdpRequestKind::Announce, + kind: UdpRequestKind::Announce { announce_request }, })) .times(1) .returning(|_| Box::pin(future::ready(Some(Ok(1))))); @@ -451,7 +462,7 @@ mod tests { &core_udp_tracker_services.announce_service, client_socket_addr, server_service_binding, - &AnnounceRequestBuilder::default().into(), + &announce_request, &core_tracker_services.core_config, &udp_server_stats_event_sender, sample_cookie_valid_range(), @@ -795,12 +806,16 @@ mod tests { let server_socket_addr = SocketAddr::new(IpAddr::V6(Ipv6Addr::new(0, 0, 0, 0, 203, 0, 113, 196)), 6969); let server_service_binding = ServiceBinding::new(Protocol::UDP, server_socket_addr).unwrap(); + let announce_request = AnnounceRequestBuilder::default() + .with_connection_id(make(gen_remote_fingerprint(&client_socket_addr), sample_issue_time()).unwrap()) + .into(); + let mut udp_server_stats_event_sender_mock = MockUdpServerStatsEventSender::new(); udp_server_stats_event_sender_mock .expect_send() .with(eq(Event::UdpRequestAccepted { context: ConnectionContext::new(client_socket_addr, server_service_binding.clone()), - kind: UdpRequestKind::Announce, + kind: UdpRequestKind::Announce { announce_request }, })) .times(1) .returning(|_| Box::pin(future::ready(Some(Ok(1))))); @@ -810,10 +825,6 @@ mod tests { let (core_tracker_services, core_udp_tracker_services, _server_udp_tracker_services) = initialize_core_tracker_services_for_default_tracker_configuration(); - let announce_request = AnnounceRequestBuilder::default() - .with_connection_id(make(gen_remote_fingerprint(&client_socket_addr), sample_issue_time()).unwrap()) - .into(); - handle_announce( &core_udp_tracker_services.announce_service, client_socket_addr, @@ -887,6 +898,14 @@ mod tests { let in_memory_torrent_repository = Arc::new(InMemoryTorrentRepository::default()); let db_downloads_metric_repository = Arc::new(DatabaseDownloadsMetricRepository::new(&database)); + let request = AnnounceRequestBuilder::default() + .with_connection_id(make(gen_remote_fingerprint(&client_socket_addr), sample_issue_time()).unwrap()) + .with_info_hash(info_hash) + .with_peer_id(peer_id) + .with_ip_address(client_ip_v4) + .with_port(client_port) + .into(); + let mut udp_core_stats_event_sender_mock = MockUdpCoreStatsEventSender::new(); udp_core_stats_event_sender_mock .expect_send() @@ -912,7 +931,9 @@ mod tests { .expect_send() .with(eq(Event::UdpRequestAccepted { context: ConnectionContext::new(client_socket_addr, server_service_binding_clone.clone()), - kind: UdpRequestKind::Announce, + kind: UdpRequestKind::Announce { + announce_request: request, + }, })) .times(1) .returning(|_| Box::pin(future::ready(Some(Ok(1))))); @@ -926,14 +947,6 @@ mod tests { &db_downloads_metric_repository, )); - let request = AnnounceRequestBuilder::default() - .with_connection_id(make(gen_remote_fingerprint(&client_socket_addr), sample_issue_time()).unwrap()) - .with_info_hash(info_hash) - .with_peer_id(peer_id) - .with_ip_address(client_ip_v4) - .with_port(client_port) - .into(); - let core_config = Arc::new(config.core.clone()); let announce_service = Arc::new(AnnounceService::new( diff --git a/packages/udp-tracker-server/src/handlers/mod.rs b/packages/udp-tracker-server/src/handlers/mod.rs index c1125b97f..3c8204bf5 100644 --- a/packages/udp-tracker-server/src/handlers/mod.rs +++ b/packages/udp-tracker-server/src/handlers/mod.rs @@ -177,7 +177,7 @@ pub async fn handle_request( ) .await { - Ok(response) => Ok((response, UdpRequestKind::Announce)), + Ok(response) => Ok((response, UdpRequestKind::Announce { announce_request })), Err(err) => Err(err), } } diff --git a/packages/udp-tracker-server/src/server/processor.rs b/packages/udp-tracker-server/src/server/processor.rs index 6b877f85b..dd6ba633d 100644 --- a/packages/udp-tracker-server/src/server/processor.rs +++ b/packages/udp-tracker-server/src/server/processor.rs @@ -87,16 +87,15 @@ impl Processor { }; let udp_response_kind = match &response { - Response::Connect(_) => event::UdpResponseKind::Ok { - req_kind: event::UdpRequestKind::Connect, - }, - Response::AnnounceIpv4(_) | Response::AnnounceIpv6(_) => event::UdpResponseKind::Ok { - req_kind: event::UdpRequestKind::Announce, - }, - Response::Scrape(_) => event::UdpResponseKind::Ok { - req_kind: event::UdpRequestKind::Scrape, - }, Response::Error(_e) => event::UdpResponseKind::Error { opt_req_kind: None }, + _ => { + if let Some(req_kind) = opt_req_kind { + event::UdpResponseKind::Ok { req_kind } + } else { + // code-review: this case should never happen. + event::UdpResponseKind::Error { opt_req_kind } + } + } }; let mut writer = Cursor::new(Vec::with_capacity(200)); diff --git a/packages/udp-tracker-server/src/statistics/event/handler/error.rs b/packages/udp-tracker-server/src/statistics/event/handler/error.rs index 5cd57e12b..7327386a3 100644 --- a/packages/udp-tracker-server/src/statistics/event/handler/error.rs +++ b/packages/udp-tracker-server/src/statistics/event/handler/error.rs @@ -1,5 +1,6 @@ use std::sync::Arc; +use aquatic_udp_protocol::PeerClient; use bittorrent_udp_tracker_core::services::banning::BanService; use tokio::sync::RwLock; use torrust_tracker_metrics::label::LabelSet; @@ -8,54 +9,118 @@ use torrust_tracker_primitives::DurationSinceUnixEpoch; use crate::event::{ConnectionContext, ErrorKind, UdpRequestKind}; use crate::statistics::repository::Repository; -use crate::statistics::UDP_TRACKER_SERVER_ERRORS_TOTAL; +use crate::statistics::{UDP_TRACKER_SERVER_CONNECTION_ID_ERRORS_TOTAL, UDP_TRACKER_SERVER_ERRORS_TOTAL}; pub async fn handle_event( - context: ConnectionContext, - kind: Option, - error: ErrorKind, - stats_repository: &Repository, + connection_context: ConnectionContext, + opt_udp_request_kind: Option, + error_kind: ErrorKind, + repository: &Repository, ban_service: &Arc>, now: DurationSinceUnixEpoch, ) { - if let ErrorKind::ConnectionCookie(_msg) = error { + if let ErrorKind::ConnectionCookie(_msg) = error_kind.clone() { let mut ban_service = ban_service.write().await; - ban_service.increase_counter(&context.client_socket_addr().ip()); + ban_service.increase_counter(&connection_context.client_socket_addr().ip()); } - update_global_fixed_metrics(&context, stats_repository).await; + update_global_fixed_metrics(&connection_context, repository).await; - update_extendable_metrics(&context, kind, stats_repository, now).await; + update_extendable_metrics(&connection_context, opt_udp_request_kind, error_kind, repository, now).await; } -async fn update_global_fixed_metrics(context: &ConnectionContext, stats_repository: &Repository) { - match context.client_socket_addr().ip() { +async fn update_global_fixed_metrics(connection_context: &ConnectionContext, repository: &Repository) { + match connection_context.client_socket_addr().ip() { std::net::IpAddr::V4(_) => { - stats_repository.increase_udp4_errors().await; + repository.increase_udp4_errors().await; } std::net::IpAddr::V6(_) => { - stats_repository.increase_udp6_errors().await; + repository.increase_udp6_errors().await; } } } async fn update_extendable_metrics( - context: &ConnectionContext, - kind: Option, - stats_repository: &Repository, + connection_context: &ConnectionContext, + opt_udp_request_kind: Option, + error_kind: ErrorKind, + repository: &Repository, now: DurationSinceUnixEpoch, ) { - let mut label_set = LabelSet::from(context.clone()); - if let Some(kind) = kind { + update_all_errors_counter(connection_context, opt_udp_request_kind.clone(), repository, now).await; + update_connection_id_errors_counter(opt_udp_request_kind, error_kind, repository, now).await; +} + +async fn update_all_errors_counter( + connection_context: &ConnectionContext, + opt_udp_request_kind: Option, + repository: &Repository, + now: DurationSinceUnixEpoch, +) { + let mut label_set = LabelSet::from(connection_context.clone()); + + if let Some(kind) = opt_udp_request_kind.clone() { label_set.upsert(label_name!("request_kind"), kind.to_string().into()); } - match stats_repository + + match repository .increase_counter(&metric_name!(UDP_TRACKER_SERVER_ERRORS_TOTAL), &label_set, now) .await { Ok(()) => {} Err(err) => tracing::error!("Failed to increase the counter: {}", err), - }; + } +} + +async fn update_connection_id_errors_counter( + opt_udp_request_kind: Option, + error_kind: ErrorKind, + repository: &Repository, + now: DurationSinceUnixEpoch, +) { + if let ErrorKind::ConnectionCookie(_) = error_kind { + if let Some(UdpRequestKind::Announce { announce_request }) = opt_udp_request_kind { + let (client_software_name, client_software_version) = extract_name_and_version(&announce_request.peer_id.client()); + + let label_set = LabelSet::from([ + (label_name!("client_software_name"), client_software_name.into()), + (label_name!("client_software_version"), client_software_version.into()), + ]); + + match repository + .increase_counter(&metric_name!(UDP_TRACKER_SERVER_CONNECTION_ID_ERRORS_TOTAL), &label_set, now) + .await + { + Ok(()) => {} + Err(err) => tracing::error!("Failed to increase the counter: {}", err), + }; + } + } +} + +fn extract_name_and_version(peer_client: &PeerClient) -> (String, String) { + match peer_client { + PeerClient::BitTorrent(compact_string) => ("BitTorrent".to_string(), compact_string.as_str().to_owned()), + PeerClient::Deluge(compact_string) => ("Deluge".to_string(), compact_string.as_str().to_owned()), + PeerClient::LibTorrentRakshasa(compact_string) => ("lt (rakshasa)".to_string(), compact_string.as_str().to_owned()), + PeerClient::LibTorrentRasterbar(compact_string) => ("lt (rasterbar)".to_string(), compact_string.as_str().to_owned()), + PeerClient::QBitTorrent(compact_string) => ("QBitTorrent".to_string(), compact_string.as_str().to_owned()), + PeerClient::Transmission(compact_string) => ("Transmission".to_string(), compact_string.as_str().to_owned()), + PeerClient::UTorrent(compact_string) => ("µTorrent".to_string(), compact_string.as_str().to_owned()), + PeerClient::UTorrentEmbedded(compact_string) => ("µTorrent Emb.".to_string(), compact_string.as_str().to_owned()), + PeerClient::UTorrentMac(compact_string) => ("µTorrent Mac".to_string(), compact_string.as_str().to_owned()), + PeerClient::UTorrentWeb(compact_string) => ("µTorrent Web".to_string(), compact_string.as_str().to_owned()), + PeerClient::Vuze(compact_string) => ("Vuze".to_string(), compact_string.as_str().to_owned()), + PeerClient::WebTorrent(compact_string) => ("WebTorrent".to_string(), compact_string.as_str().to_owned()), + PeerClient::WebTorrentDesktop(compact_string) => ("WebTorrent Desktop".to_string(), compact_string.as_str().to_owned()), + PeerClient::Mainline(compact_string) => ("Mainline".to_string(), compact_string.as_str().to_owned()), + PeerClient::OtherWithPrefixAndVersion { prefix, version } => { + (format!("Other ({})", prefix.as_str()), version.as_str().to_owned()) + } + PeerClient::OtherWithPrefix(compact_string) => (format!("Other ({compact_string})"), String::new()), + PeerClient::Other => ("Other".to_string(), String::new()), + _ => ("Unknown".to_string(), String::new()), + } } #[cfg(test)] diff --git a/packages/udp-tracker-server/src/statistics/event/handler/request_accepted.rs b/packages/udp-tracker-server/src/statistics/event/handler/request_accepted.rs index 25c1311e5..0007a18b0 100644 --- a/packages/udp-tracker-server/src/statistics/event/handler/request_accepted.rs +++ b/packages/udp-tracker-server/src/statistics/event/handler/request_accepted.rs @@ -22,7 +22,7 @@ pub async fn handle_event( stats_repository.increase_udp6_connections().await; } }, - UdpRequestKind::Announce => match context.client_socket_addr().ip() { + UdpRequestKind::Announce { .. } => match context.client_socket_addr().ip() { std::net::IpAddr::V4(_) => { stats_repository.increase_udp4_announces().await; } @@ -62,6 +62,7 @@ mod tests { use torrust_tracker_primitives::service_binding::{Protocol, ServiceBinding}; use crate::event::{ConnectionContext, Event}; + use crate::handlers::announce::tests::announce_request::AnnounceRequestBuilder; use crate::statistics::event::handler::handle_event; use crate::statistics::repository::Repository; use crate::CurrentClock; @@ -109,7 +110,9 @@ mod tests { ) .unwrap(), ), - kind: crate::event::UdpRequestKind::Announce, + kind: crate::event::UdpRequestKind::Announce { + announce_request: AnnounceRequestBuilder::default().into(), + }, }, &stats_repository, &ban_service, @@ -193,7 +196,9 @@ mod tests { ) .unwrap(), ), - kind: crate::event::UdpRequestKind::Announce, + kind: crate::event::UdpRequestKind::Announce { + announce_request: AnnounceRequestBuilder::default().into(), + }, }, &stats_repository, &ban_service, diff --git a/packages/udp-tracker-server/src/statistics/event/handler/response_sent.rs b/packages/udp-tracker-server/src/statistics/event/handler/response_sent.rs index a69184e08..0038ac5f9 100644 --- a/packages/udp-tracker-server/src/statistics/event/handler/response_sent.rs +++ b/packages/udp-tracker-server/src/statistics/event/handler/response_sent.rs @@ -43,9 +43,9 @@ pub async fn handle_event( Ok(()) => {} Err(err) => tracing::error!("Failed to set gauge: {}", err), } - (LabelValue::new("ok"), LabelValue::new(&UdpRequestKind::Connect.to_string())) + (LabelValue::new("ok"), UdpRequestKind::Connect.into()) } - UdpRequestKind::Announce => { + UdpRequestKind::Announce { announce_request } => { let new_avg = stats_repository .recalculate_udp_avg_announce_processing_time_ns(req_processing_time) .await; @@ -63,7 +63,7 @@ pub async fn handle_event( Ok(()) => {} Err(err) => tracing::error!("Failed to set gauge: {}", err), } - (LabelValue::new("ok"), LabelValue::new(&UdpRequestKind::Announce.to_string())) + (LabelValue::new("ok"), UdpRequestKind::Announce { announce_request }.into()) } UdpRequestKind::Scrape => { let new_avg = stats_repository @@ -113,7 +113,8 @@ mod tests { use torrust_tracker_clock::clock::Time; use torrust_tracker_primitives::service_binding::{Protocol, ServiceBinding}; - use crate::event::{ConnectionContext, Event, UdpRequestKind}; + use crate::event::{ConnectionContext, Event}; + use crate::handlers::announce::tests::announce_request::AnnounceRequestBuilder; use crate::statistics::event::handler::handle_event; use crate::statistics::repository::Repository; use crate::CurrentClock; @@ -134,7 +135,9 @@ mod tests { .unwrap(), ), kind: crate::event::UdpResponseKind::Ok { - req_kind: UdpRequestKind::Announce, + req_kind: crate::event::UdpRequestKind::Announce { + announce_request: AnnounceRequestBuilder::default().into(), + }, }, req_processing_time: std::time::Duration::from_secs(1), }, @@ -165,7 +168,9 @@ mod tests { .unwrap(), ), kind: crate::event::UdpResponseKind::Ok { - req_kind: UdpRequestKind::Announce, + req_kind: crate::event::UdpRequestKind::Announce { + announce_request: AnnounceRequestBuilder::default().into(), + }, }, req_processing_time: std::time::Duration::from_secs(1), }, diff --git a/packages/udp-tracker-server/src/statistics/mod.rs b/packages/udp-tracker-server/src/statistics/mod.rs index 8f6e9becf..5c30a9abc 100644 --- a/packages/udp-tracker-server/src/statistics/mod.rs +++ b/packages/udp-tracker-server/src/statistics/mod.rs @@ -10,6 +10,7 @@ use torrust_tracker_metrics::unit::Unit; const UDP_TRACKER_SERVER_REQUESTS_ABORTED_TOTAL: &str = "udp_tracker_server_requests_aborted_total"; const UDP_TRACKER_SERVER_REQUESTS_BANNED_TOTAL: &str = "udp_tracker_server_requests_banned_total"; +const UDP_TRACKER_SERVER_CONNECTION_ID_ERRORS_TOTAL: &str = "udp_tracker_server_connection_id_errors_total"; const UDP_TRACKER_SERVER_REQUESTS_RECEIVED_TOTAL: &str = "udp_tracker_server_requests_received_total"; const UDP_TRACKER_SERVER_REQUESTS_ACCEPTED_TOTAL: &str = "udp_tracker_server_requests_accepted_total"; const UDP_TRACKER_SERVER_RESPONSES_SENT_TOTAL: &str = "udp_tracker_server_responses_sent_total"; @@ -32,6 +33,12 @@ pub fn describe_metrics() -> Metrics { Some(&MetricDescription::new("Total number of UDP requests banned")), ); + metrics.metric_collection.describe_counter( + &metric_name!(UDP_TRACKER_SERVER_CONNECTION_ID_ERRORS_TOTAL), + Some(Unit::Count), + Some(&MetricDescription::new("Total number of requests with connection ID errors")), + ); + metrics.metric_collection.describe_counter( &metric_name!(UDP_TRACKER_SERVER_REQUESTS_RECEIVED_TOTAL), Some(Unit::Count), From d4c43bd3a5bc75704d8b8a5b4641f273968aceb4 Mon Sep 17 00:00:00 2001 From: Jose Celano Date: Wed, 4 Jun 2025 18:46:23 +0100 Subject: [PATCH 685/802] feat: [#1375] add new metric label server_binding_address_type - Label name: `server_binding_address_type` - Label values: `plain`, `v4_mapped_v6` Usage example in Prometheous format: ``` udp_tracker_server_requests_accepted_total{request_kind="connect",server_binding_address_type="plain",server_binding_ip="0.0.0.0",server_binding_port="6969",server_binding_protocol="udp"} 1 ``` Example of IPv4-mapped-IPv6 IP: `[::ffff:192.0.2.33]` --- packages/http-tracker-core/src/event.rs | 4 ++ packages/primitives/src/service_binding.rs | 68 +++++++++++++++++++++- packages/udp-tracker-core/src/event.rs | 4 ++ packages/udp-tracker-server/src/event.rs | 4 ++ 4 files changed, 79 insertions(+), 1 deletion(-) diff --git a/packages/http-tracker-core/src/event.rs b/packages/http-tracker-core/src/event.rs index 681f4bbfe..cf969b4ff 100644 --- a/packages/http-tracker-core/src/event.rs +++ b/packages/http-tracker-core/src/event.rs @@ -86,6 +86,10 @@ impl From for LabelSet { label_name!("server_binding_ip"), LabelValue::new(&connection_context.server.service_binding.bind_address().ip().to_string()), ), + ( + label_name!("server_binding_address_type"), + LabelValue::new(&connection_context.server.service_binding.bind_address_type().to_string()), + ), ( label_name!("server_binding_port"), LabelValue::new(&connection_context.server.service_binding.bind_address().port().to_string()), diff --git a/packages/primitives/src/service_binding.rs b/packages/primitives/src/service_binding.rs index 30eb1aa9e..d5055130e 100644 --- a/packages/primitives/src/service_binding.rs +++ b/packages/primitives/src/service_binding.rs @@ -4,6 +4,8 @@ use std::net::SocketAddr; use serde::{Deserialize, Serialize}; use url::Url; +const DUAL_STACK_IP_V4_MAPPED_V6_PREFIX: &str = "::ffff:"; + /// Represents the supported network protocols. #[derive(Debug, Clone, PartialEq, Eq, Serialize, Deserialize, Hash)] pub enum Protocol { @@ -23,6 +25,29 @@ impl fmt::Display for Protocol { } } +#[derive(Debug, Clone, PartialEq, Eq, Serialize, Deserialize, Hash)] +pub enum AddressType { + /// Represents a plain IPv4 or IPv6 address. + Plain, + + /// Represents an IPv6 address that is a mapped IPv4 address. + /// + /// This is used for IPv6 addresses that represent an IPv4 address in a dual-stack network. + /// + /// For example: `[::ffff:192.0.2.33]` + V4MappedV6, +} + +impl fmt::Display for AddressType { + fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result { + let addr_type_str = match self { + Self::Plain => "plain", + Self::V4MappedV6 => "v4_mapped_v6", + }; + write!(f, "{addr_type_str}") + } +} + #[derive(thiserror::Error, Debug, Clone)] pub enum Error { #[error("The port number cannot be zero. It must be an assigned valid port.")] @@ -94,6 +119,15 @@ impl ServiceBinding { self.bind_address } + #[must_use] + pub fn bind_address_type(&self) -> AddressType { + if self.is_v4_mapped_v6() { + return AddressType::V4MappedV6; + } + + AddressType::Plain + } + /// # Panics /// /// It never panics because the URL is always valid. @@ -102,6 +136,15 @@ impl ServiceBinding { Url::parse(&format!("{}://{}", self.protocol, self.bind_address)) .expect("Service binding can always be parsed into a URL") } + + fn is_v4_mapped_v6(&self) -> bool { + self.bind_address.ip().is_ipv6() + && self + .bind_address + .ip() + .to_string() + .starts_with(DUAL_STACK_IP_V4_MAPPED_V6_PREFIX) + } } impl From for Url { @@ -126,7 +169,7 @@ mod tests { use rstest::rstest; use url::Url; - use crate::service_binding::{Error, Protocol, ServiceBinding}; + use crate::service_binding::{AddressType, Error, Protocol, ServiceBinding}; #[rstest] #[case("wildcard_ip", Protocol::UDP, SocketAddr::from_str("0.0.0.0:6969").unwrap())] @@ -156,6 +199,29 @@ mod tests { ); } + #[test] + fn should_return_the_bind_address_plain_type_for_ipv4_ips() { + let service_binding = ServiceBinding::new(Protocol::UDP, SocketAddr::from_str("127.0.0.1:6969").unwrap()).unwrap(); + + assert_eq!(service_binding.bind_address_type(), AddressType::Plain); + } + + #[test] + fn should_return_the_bind_address_plain_type_for_ipv6_ips() { + let service_binding = + ServiceBinding::new(Protocol::UDP, SocketAddr::from_str("[0:0:0:0:0:0:0:1]:6969").unwrap()).unwrap(); + + assert_eq!(service_binding.bind_address_type(), AddressType::Plain); + } + + #[test] + fn should_return_the_bind_address_v4_mapped_v7_type_for_ipv4_ips_mapped_to_ipv6() { + let service_binding = + ServiceBinding::new(Protocol::UDP, SocketAddr::from_str("[::ffff:192.0.2.33]:6969").unwrap()).unwrap(); + + assert_eq!(service_binding.bind_address_type(), AddressType::V4MappedV6); + } + #[test] fn should_return_the_corresponding_url() { let service_binding = ServiceBinding::new(Protocol::UDP, SocketAddr::from_str("127.0.0.1:6969").unwrap()).unwrap(); diff --git a/packages/udp-tracker-core/src/event.rs b/packages/udp-tracker-core/src/event.rs index 14a4dbfb3..e9264653e 100644 --- a/packages/udp-tracker-core/src/event.rs +++ b/packages/udp-tracker-core/src/event.rs @@ -59,6 +59,10 @@ impl From for LabelSet { label_name!("server_binding_ip"), LabelValue::new(&connection_context.server_service_binding.bind_address().ip().to_string()), ), + ( + label_name!("server_binding_address_type"), + LabelValue::new(&connection_context.server_service_binding.bind_address_type().to_string()), + ), ( label_name!("server_binding_port"), LabelValue::new(&connection_context.server_service_binding.bind_address().port().to_string()), diff --git a/packages/udp-tracker-server/src/event.rs b/packages/udp-tracker-server/src/event.rs index 152545e6a..09fc139cb 100644 --- a/packages/udp-tracker-server/src/event.rs +++ b/packages/udp-tracker-server/src/event.rs @@ -118,6 +118,10 @@ impl From for LabelSet { label_name!("server_binding_ip"), LabelValue::new(&connection_context.server_service_binding.bind_address().ip().to_string()), ), + ( + label_name!("server_binding_address_type"), + LabelValue::new(&connection_context.server_service_binding.bind_address_type().to_string()), + ), ( label_name!("server_binding_port"), LabelValue::new(&connection_context.server_service_binding.bind_address().port().to_string()), From 552697b452219100f345bc2696fb760f8f68fd15 Mon Sep 17 00:00:00 2001 From: Jose Celano Date: Thu, 5 Jun 2025 12:06:24 +0100 Subject: [PATCH 686/802] feat!: [#1514] rename ffield kind to type in JSON metrics "kind" has been renamed to "type" to follow Prometheus name. ```json { "type": "counter", "name": "http_tracker_core_requests_received_total", "samples": [] } ``` --- packages/metrics/src/metric_collection.rs | 8 ++++---- 1 file changed, 4 insertions(+), 4 deletions(-) diff --git a/packages/metrics/src/metric_collection.rs b/packages/metrics/src/metric_collection.rs index 824397000..4038497d1 100644 --- a/packages/metrics/src/metric_collection.rs +++ b/packages/metrics/src/metric_collection.rs @@ -234,7 +234,7 @@ impl Serialize for MetricCollection { S: Serializer, { #[derive(Serialize)] - #[serde(tag = "kind", rename_all = "lowercase")] + #[serde(tag = "type", rename_all = "lowercase")] enum SerializableMetric<'a> { Counter(&'a Metric), Gauge(&'a Metric), @@ -260,7 +260,7 @@ impl<'de> Deserialize<'de> for MetricCollection { D: Deserializer<'de>, { #[derive(Deserialize)] - #[serde(tag = "kind", rename_all = "lowercase")] + #[serde(tag = "type", rename_all = "lowercase")] enum MetricPayload { Counter(Metric), Gauge(Metric), @@ -540,7 +540,7 @@ mod tests { r#" [ { - "kind":"counter", + "type":"counter", "name":"http_tracker_core_announce_requests_received_total", "samples":[ { @@ -564,7 +564,7 @@ mod tests { ] }, { - "kind":"gauge", + "type":"gauge", "name":"udp_tracker_server_performance_avg_announce_processing_time_ns", "samples":[ { From 2ee3111deebc2970784b65a73d5551d38ec6ec77 Mon Sep 17 00:00:00 2001 From: Jose Celano Date: Thu, 5 Jun 2025 12:49:09 +0100 Subject: [PATCH 687/802] refactor: [#1514] ensure_metric_exists method to pass the whole metric This will allow to inject also the metric unit and description. --- packages/metrics/src/metric/mod.rs | 11 ++++ packages/metrics/src/metric_collection.rs | 64 ++++++++++++++--------- 2 files changed, 51 insertions(+), 24 deletions(-) diff --git a/packages/metrics/src/metric/mod.rs b/packages/metrics/src/metric/mod.rs index 2118637b8..14704925c 100644 --- a/packages/metrics/src/metric/mod.rs +++ b/packages/metrics/src/metric/mod.rs @@ -30,6 +30,17 @@ impl Metric { } } + /// # Panics + /// + /// This function will panic if the empty sample collection cannot be created. + #[must_use] + pub fn without_samples(name: MetricName) -> Self { + Self { + name, + sample_collection: SampleCollection::new(vec![]).expect("Empty sample collection creation should not fail"), + } + } + #[must_use] pub fn name(&self) -> &MetricName { &self.name diff --git a/packages/metrics/src/metric_collection.rs b/packages/metrics/src/metric_collection.rs index 4038497d1..d10bcfd7c 100644 --- a/packages/metrics/src/metric_collection.rs +++ b/packages/metrics/src/metric_collection.rs @@ -10,7 +10,6 @@ use super::label::LabelSet; use super::metric::{Metric, MetricName}; use super::prometheus::PrometheusSerializable; use crate::metric::description::MetricDescription; -use crate::sample_collection::SampleCollection; use crate::unit::Unit; use crate::METRICS_TARGET; @@ -59,7 +58,10 @@ impl MetricCollection { pub fn describe_counter(&mut self, name: &MetricName, opt_unit: Option, opt_description: Option<&MetricDescription>) { tracing::info!(target: METRICS_TARGET, type = "counter", name = name.to_string(), unit = ?opt_unit, description = ?opt_description); - self.counters.ensure_metric_exists(name); + + let metric = Metric::::without_samples(name.clone()); + + self.counters.ensure_metric_exists(metric); } #[must_use] @@ -120,14 +122,19 @@ impl MetricCollection { } pub fn ensure_counter_exists(&mut self, name: &MetricName) { - self.counters.ensure_metric_exists(name); + let metric = Metric::::without_samples(name.clone()); + + self.counters.ensure_metric_exists(metric); } // Gauge-specific methods pub fn describe_gauge(&mut self, name: &MetricName, opt_unit: Option, opt_description: Option<&MetricDescription>) { tracing::info!(target: METRICS_TARGET, type = "gauge", name = name.to_string(), unit = ?opt_unit, description = ?opt_description); - self.gauges.ensure_metric_exists(name); + + let metric = Metric::::without_samples(name.clone()); + + self.gauges.ensure_metric_exists(metric); } #[must_use] @@ -205,7 +212,9 @@ impl MetricCollection { } pub fn ensure_gauge_exists(&mut self, name: &MetricName) { - self.gauges.ensure_metric_exists(name); + let metric = Metric::::without_samples(name.clone()); + + self.gauges.ensure_metric_exists(metric); } } @@ -336,18 +345,9 @@ impl MetricKindCollection { self.metrics.keys() } - /// # Panics - /// - /// It should not panic as long as empty sample collections are allowed. - pub fn ensure_metric_exists(&mut self, name: &MetricName) { - if !self.metrics.contains_key(name) { - self.metrics.insert( - name.clone(), - Metric::new( - name.clone(), - SampleCollection::new(vec![]).expect("Empty sample collection creation should not fail"), - ), - ); + pub fn ensure_metric_exists(&mut self, metric: Metric) { + if !self.metrics.contains_key(metric.name()) { + self.metrics.insert(metric.name().clone(), metric); } } } @@ -389,7 +389,9 @@ impl MetricKindCollection { /// /// Panics if the metric does not exist. pub fn increment(&mut self, name: &MetricName, label_set: &LabelSet, time: DurationSinceUnixEpoch) { - self.ensure_metric_exists(name); + let metric = Metric::::without_samples(name.clone()); + + self.ensure_metric_exists(metric); let metric = self.metrics.get_mut(name).expect("Counter metric should exist"); @@ -404,7 +406,9 @@ impl MetricKindCollection { /// /// Panics if the metric does not exist. pub fn absolute(&mut self, name: &MetricName, label_set: &LabelSet, value: u64, time: DurationSinceUnixEpoch) { - self.ensure_metric_exists(name); + let metric = Metric::::without_samples(name.clone()); + + self.ensure_metric_exists(metric); let metric = self.metrics.get_mut(name).expect("Counter metric should exist"); @@ -429,7 +433,9 @@ impl MetricKindCollection { /// /// Panics if the metric does not exist and it could not be created. pub fn set(&mut self, name: &MetricName, label_set: &LabelSet, value: f64, time: DurationSinceUnixEpoch) { - self.ensure_metric_exists(name); + let metric = Metric::::without_samples(name.clone()); + + self.ensure_metric_exists(metric); let metric = self.metrics.get_mut(name).expect("Gauge metric should exist"); @@ -444,7 +450,9 @@ impl MetricKindCollection { /// /// Panics if the metric does not exist and it could not be created. pub fn increment(&mut self, name: &MetricName, label_set: &LabelSet, time: DurationSinceUnixEpoch) { - self.ensure_metric_exists(name); + let metric = Metric::::without_samples(name.clone()); + + self.ensure_metric_exists(metric); let metric = self.metrics.get_mut(name).expect("Gauge metric should exist"); @@ -459,7 +467,9 @@ impl MetricKindCollection { /// /// Panics if the metric does not exist and it could not be created. pub fn decrement(&mut self, name: &MetricName, label_set: &LabelSet, time: DurationSinceUnixEpoch) { - self.ensure_metric_exists(name); + let metric = Metric::::without_samples(name.clone()); + + self.ensure_metric_exists(metric); let metric = self.metrics.get_mut(name).expect("Gauge metric should exist"); @@ -483,6 +493,7 @@ mod tests { use super::*; use crate::label::LabelValue; use crate::sample::Sample; + use crate::sample_collection::SampleCollection; use crate::tests::{format_prometheus_output, sort_lines}; use crate::{label_name, metric_name}; @@ -731,8 +742,11 @@ mod tests { let mut counters = MetricKindCollection::default(); let mut gauges = MetricKindCollection::default(); - counters.ensure_metric_exists(&metric_name!("test_counter")); - gauges.ensure_metric_exists(&metric_name!("test_gauge")); + let counter = Metric::::without_samples(metric_name!("test_counter")); + counters.ensure_metric_exists(counter); + + let gauge = Metric::::without_samples(metric_name!("test_gauge")); + gauges.ensure_metric_exists(gauge); let metric_collection = MetricCollection::new(counters, gauges).unwrap(); @@ -748,6 +762,7 @@ mod tests { use super::*; use crate::label::LabelValue; use crate::sample::Sample; + use crate::sample_collection::SampleCollection; #[test] fn it_should_increase_a_preexistent_counter() { @@ -845,6 +860,7 @@ mod tests { use super::*; use crate::label::LabelValue; use crate::sample::Sample; + use crate::sample_collection::SampleCollection; #[test] fn it_should_set_a_preexistent_gauge() { From 031bf65fe7b89ca85409a9c156017df100d48c2e Mon Sep 17 00:00:00 2001 From: Jose Celano Date: Thu, 5 Jun 2025 12:52:31 +0100 Subject: [PATCH 688/802] refactor: [#1514] remove unused code --- packages/metrics/src/metric_collection.rs | 32 ----------------------- 1 file changed, 32 deletions(-) diff --git a/packages/metrics/src/metric_collection.rs b/packages/metrics/src/metric_collection.rs index d10bcfd7c..b9e397e5b 100644 --- a/packages/metrics/src/metric_collection.rs +++ b/packages/metrics/src/metric_collection.rs @@ -121,12 +121,6 @@ impl MetricCollection { Ok(()) } - pub fn ensure_counter_exists(&mut self, name: &MetricName) { - let metric = Metric::::without_samples(name.clone()); - - self.counters.ensure_metric_exists(metric); - } - // Gauge-specific methods pub fn describe_gauge(&mut self, name: &MetricName, opt_unit: Option, opt_description: Option<&MetricDescription>) { @@ -210,12 +204,6 @@ impl MetricCollection { Ok(()) } - - pub fn ensure_gauge_exists(&mut self, name: &MetricName) { - let metric = Metric::::without_samples(name.clone()); - - self.gauges.ensure_metric_exists(metric); - } } #[derive(thiserror::Error, Debug, Clone)] @@ -813,16 +801,6 @@ mod tests { ); } - #[test] - fn it_should_allow_making_sure_a_counter_exists_without_increasing_it() { - let mut metric_collection = - MetricCollection::new(MetricKindCollection::default(), MetricKindCollection::default()).unwrap(); - - metric_collection.ensure_counter_exists(&metric_name!("test_counter")); - - assert!(metric_collection.contains_counter(&metric_name!("test_counter"))); - } - #[test] fn it_should_allow_describing_a_counter_before_using_it() { let mut metric_collection = @@ -905,16 +883,6 @@ mod tests { ); } - #[test] - fn it_should_allow_making_sure_a_gauge_exists_without_setting_it() { - let mut metric_collection = - MetricCollection::new(MetricKindCollection::default(), MetricKindCollection::default()).unwrap(); - - metric_collection.ensure_gauge_exists(&metric_name!("test_gauge")); - - assert!(metric_collection.contains_gauge(&metric_name!("test_gauge"))); - } - #[test] fn it_should_allow_describing_a_gauge_before_using_it() { let mut metric_collection = From 458497b6460e9069d4e7fdaf27da43864cf0ed2e Mon Sep 17 00:00:00 2001 From: Jose Celano Date: Thu, 5 Jun 2025 16:04:22 +0100 Subject: [PATCH 689/802] feat: [#1514] add unit and description to metrics It's also shown in the JSON export format. ```json { "metrics": [ { "type": "counter", "name": "torrent_repository_torrents_downloads_total", "unit": "count", "description": "The total number of torrent downloads.", "samples": [] } } ``` todo: show them in the Prometheus export format. --- .../http-tracker-core/src/statistics/mod.rs | 2 +- packages/metrics/src/metric/mod.rs | 35 ++++++-- packages/metrics/src/metric_collection.rs | 89 +++++++++++++------ packages/metrics/src/unit.rs | 6 +- .../src/statistics/mod.rs | 22 ++--- packages/tracker-core/src/statistics/mod.rs | 2 +- .../udp-tracker-core/src/statistics/mod.rs | 2 +- .../udp-tracker-server/src/statistics/mod.rs | 16 ++-- 8 files changed, 116 insertions(+), 58 deletions(-) diff --git a/packages/http-tracker-core/src/statistics/mod.rs b/packages/http-tracker-core/src/statistics/mod.rs index f949babbd..7181632aa 100644 --- a/packages/http-tracker-core/src/statistics/mod.rs +++ b/packages/http-tracker-core/src/statistics/mod.rs @@ -17,7 +17,7 @@ pub fn describe_metrics() -> Metrics { metrics.metric_collection.describe_counter( &metric_name!(HTTP_TRACKER_CORE_REQUESTS_RECEIVED_TOTAL), Some(Unit::Count), - Some(&MetricDescription::new("Total number of HTTP requests received")), + Some(MetricDescription::new("Total number of HTTP requests received")), ); metrics diff --git a/packages/metrics/src/metric/mod.rs b/packages/metrics/src/metric/mod.rs index 14704925c..eff2c7a5f 100644 --- a/packages/metrics/src/metric/mod.rs +++ b/packages/metrics/src/metric/mod.rs @@ -9,7 +9,9 @@ use super::label::LabelSet; use super::prometheus::PrometheusSerializable; use super::sample_collection::SampleCollection; use crate::gauge::Gauge; +use crate::metric::description::MetricDescription; use crate::sample::Measurement; +use crate::unit::Unit; pub type MetricName = name::MetricName; @@ -17,15 +19,28 @@ pub type MetricName = name::MetricName; pub struct Metric { name: MetricName, + #[serde(rename = "unit")] + opt_unit: Option, + + #[serde(rename = "description")] + opt_description: Option, + #[serde(rename = "samples")] sample_collection: SampleCollection, } impl Metric { #[must_use] - pub fn new(name: MetricName, samples: SampleCollection) -> Self { + pub fn new( + name: MetricName, + opt_unit: Option, + opt_description: Option, + samples: SampleCollection, + ) -> Self { Self { name, + opt_unit, + opt_description, sample_collection: samples, } } @@ -34,9 +49,11 @@ impl Metric { /// /// This function will panic if the empty sample collection cannot be created. #[must_use] - pub fn without_samples(name: MetricName) -> Self { + pub fn new_empty_with_name(name: MetricName) -> Self { Self { name, + opt_unit: None, + opt_description: None, sample_collection: SampleCollection::new(vec![]).expect("Empty sample collection creation should not fail"), } } @@ -119,7 +136,7 @@ mod tests { let samples = SampleCollection::::default(); - let metric = Metric::::new(name.clone(), samples); + let metric = Metric::::new(name.clone(), None, None, samples); assert!(metric.is_empty()); } @@ -133,7 +150,7 @@ mod tests { let samples = SampleCollection::new(vec![Sample::new(Counter::new(1), time, label_set.clone())]).unwrap(); - Metric::::new(name.clone(), samples) + Metric::::new(name.clone(), None, None, samples) } #[test] @@ -147,7 +164,7 @@ mod tests { let samples = SampleCollection::::default(); - let metric = Metric::::new(name.clone(), samples); + let metric = Metric::::new(name.clone(), None, None, samples); assert_eq!(metric.number_of_samples(), 0); } @@ -166,7 +183,7 @@ mod tests { let samples = SampleCollection::::default(); - let _metric = Metric::::new(name, samples); + let _metric = Metric::::new(name, None, None, samples); } #[test] @@ -179,7 +196,7 @@ mod tests { let samples = SampleCollection::new(vec![Sample::new(Counter::new(1), time, label_set.clone())]).unwrap(); - let metric = Metric::::new(name.clone(), samples); + let metric = Metric::::new(name.clone(), None, None, samples); assert_eq!(metric.get_sample_data(&label_set).unwrap().value().value(), 1); } @@ -200,7 +217,7 @@ mod tests { let samples = SampleCollection::::default(); - let _metric = Metric::::new(name, samples); + let _metric = Metric::::new(name, None, None, samples); } #[test] @@ -213,7 +230,7 @@ mod tests { let samples = SampleCollection::new(vec![Sample::new(Gauge::new(1.0), time, label_set.clone())]).unwrap(); - let metric = Metric::::new(name.clone(), samples); + let metric = Metric::::new(name.clone(), None, None, samples); assert_relative_eq!(metric.get_sample_data(&label_set).unwrap().value().value(), 1.0); } diff --git a/packages/metrics/src/metric_collection.rs b/packages/metrics/src/metric_collection.rs index b9e397e5b..59c0448af 100644 --- a/packages/metrics/src/metric_collection.rs +++ b/packages/metrics/src/metric_collection.rs @@ -10,6 +10,7 @@ use super::label::LabelSet; use super::metric::{Metric, MetricName}; use super::prometheus::PrometheusSerializable; use crate::metric::description::MetricDescription; +use crate::sample_collection::SampleCollection; use crate::unit::Unit; use crate::METRICS_TARGET; @@ -56,12 +57,12 @@ impl MetricCollection { // Counter-specific methods - pub fn describe_counter(&mut self, name: &MetricName, opt_unit: Option, opt_description: Option<&MetricDescription>) { + pub fn describe_counter(&mut self, name: &MetricName, opt_unit: Option, opt_description: Option) { tracing::info!(target: METRICS_TARGET, type = "counter", name = name.to_string(), unit = ?opt_unit, description = ?opt_description); - let metric = Metric::::without_samples(name.clone()); + let metric = Metric::::new(name.clone(), opt_unit, opt_description, SampleCollection::default()); - self.counters.ensure_metric_exists(metric); + self.counters.insert(metric); } #[must_use] @@ -123,12 +124,12 @@ impl MetricCollection { // Gauge-specific methods - pub fn describe_gauge(&mut self, name: &MetricName, opt_unit: Option, opt_description: Option<&MetricDescription>) { + pub fn describe_gauge(&mut self, name: &MetricName, opt_unit: Option, opt_description: Option) { tracing::info!(target: METRICS_TARGET, type = "gauge", name = name.to_string(), unit = ?opt_unit, description = ?opt_description); - let metric = Metric::::without_samples(name.clone()); + let metric = Metric::::new(name.clone(), opt_unit, opt_description, SampleCollection::default()); - self.gauges.ensure_metric_exists(metric); + self.gauges.insert(metric); } #[must_use] @@ -333,11 +334,15 @@ impl MetricKindCollection { self.metrics.keys() } - pub fn ensure_metric_exists(&mut self, metric: Metric) { + pub fn insert_if_absent(&mut self, metric: Metric) { if !self.metrics.contains_key(metric.name()) { - self.metrics.insert(metric.name().clone(), metric); + self.insert(metric); } } + + pub fn insert(&mut self, metric: Metric) { + self.metrics.insert(metric.name().clone(), metric); + } } impl MetricKindCollection { @@ -377,9 +382,9 @@ impl MetricKindCollection { /// /// Panics if the metric does not exist. pub fn increment(&mut self, name: &MetricName, label_set: &LabelSet, time: DurationSinceUnixEpoch) { - let metric = Metric::::without_samples(name.clone()); + let metric = Metric::::new_empty_with_name(name.clone()); - self.ensure_metric_exists(metric); + self.insert_if_absent(metric); let metric = self.metrics.get_mut(name).expect("Counter metric should exist"); @@ -394,9 +399,9 @@ impl MetricKindCollection { /// /// Panics if the metric does not exist. pub fn absolute(&mut self, name: &MetricName, label_set: &LabelSet, value: u64, time: DurationSinceUnixEpoch) { - let metric = Metric::::without_samples(name.clone()); + let metric = Metric::::new_empty_with_name(name.clone()); - self.ensure_metric_exists(metric); + self.insert_if_absent(metric); let metric = self.metrics.get_mut(name).expect("Counter metric should exist"); @@ -421,9 +426,9 @@ impl MetricKindCollection { /// /// Panics if the metric does not exist and it could not be created. pub fn set(&mut self, name: &MetricName, label_set: &LabelSet, value: f64, time: DurationSinceUnixEpoch) { - let metric = Metric::::without_samples(name.clone()); + let metric = Metric::::new_empty_with_name(name.clone()); - self.ensure_metric_exists(metric); + self.insert_if_absent(metric); let metric = self.metrics.get_mut(name).expect("Gauge metric should exist"); @@ -438,9 +443,9 @@ impl MetricKindCollection { /// /// Panics if the metric does not exist and it could not be created. pub fn increment(&mut self, name: &MetricName, label_set: &LabelSet, time: DurationSinceUnixEpoch) { - let metric = Metric::::without_samples(name.clone()); + let metric = Metric::::new_empty_with_name(name.clone()); - self.ensure_metric_exists(metric); + self.insert_if_absent(metric); let metric = self.metrics.get_mut(name).expect("Gauge metric should exist"); @@ -455,9 +460,9 @@ impl MetricKindCollection { /// /// Panics if the metric does not exist and it could not be created. pub fn decrement(&mut self, name: &MetricName, label_set: &LabelSet, time: DurationSinceUnixEpoch) { - let metric = Metric::::without_samples(name.clone()); + let metric = Metric::::new_empty_with_name(name.clone()); - self.ensure_metric_exists(metric); + self.insert_if_absent(metric); let metric = self.metrics.get_mut(name).expect("Gauge metric should exist"); @@ -523,11 +528,15 @@ mod tests { MetricCollection::new( MetricKindCollection::new(vec![Metric::new( metric_name!("http_tracker_core_announce_requests_received_total"), + None, + None, SampleCollection::new(vec![Sample::new(Counter::new(1), time, label_set_1.clone())]).unwrap(), )]) .unwrap(), MetricKindCollection::new(vec![Metric::new( metric_name!("udp_tracker_server_performance_avg_announce_processing_time_ns"), + None, + None, SampleCollection::new(vec![Sample::new(Gauge::new(1.0), time, label_set_1.clone())]).unwrap(), )]) .unwrap(), @@ -541,6 +550,8 @@ mod tests { { "type":"counter", "name":"http_tracker_core_announce_requests_received_total", + "unit": null, + "description": null, "samples":[ { "value":1, @@ -565,6 +576,8 @@ mod tests { { "type":"gauge", "name":"udp_tracker_server_performance_avg_announce_processing_time_ns", + "unit": null, + "description": null, "samples":[ { "value":1.0, @@ -603,10 +616,20 @@ mod tests { #[test] fn it_should_not_allow_duplicate_names_across_types() { - let counters = - MetricKindCollection::new(vec![Metric::new(metric_name!("test_metric"), SampleCollection::default())]).unwrap(); - let gauges = - MetricKindCollection::new(vec![Metric::new(metric_name!("test_metric"), SampleCollection::default())]).unwrap(); + let counters = MetricKindCollection::new(vec![Metric::new( + metric_name!("test_metric"), + None, + None, + SampleCollection::default(), + )]) + .unwrap(); + let gauges = MetricKindCollection::new(vec![Metric::new( + metric_name!("test_metric"), + None, + None, + SampleCollection::default(), + )]) + .unwrap(); assert!(MetricCollection::new(counters, gauges).is_err()); } @@ -699,6 +722,8 @@ mod tests { let metric_collection = MetricCollection::new( MetricKindCollection::new(vec![Metric::new( metric_name!("http_tracker_core_announce_requests_received_total"), + None, + None, SampleCollection::new(vec![ Sample::new(Counter::new(1), time, label_set_1.clone()), Sample::new(Counter::new(2), time, label_set_2.clone()), @@ -730,11 +755,11 @@ mod tests { let mut counters = MetricKindCollection::default(); let mut gauges = MetricKindCollection::default(); - let counter = Metric::::without_samples(metric_name!("test_counter")); - counters.ensure_metric_exists(counter); + let counter = Metric::::new_empty_with_name(metric_name!("test_counter")); + counters.insert_if_absent(counter); - let gauge = Metric::::without_samples(metric_name!("test_gauge")); - gauges.ensure_metric_exists(gauge); + let gauge = Metric::::new_empty_with_name(metric_name!("test_gauge")); + gauges.insert_if_absent(gauge); let metric_collection = MetricCollection::new(counters, gauges).unwrap(); @@ -760,6 +785,8 @@ mod tests { let mut metric_collection = MetricCollection::new( MetricKindCollection::new(vec![Metric::new( metric_name!("test_counter"), + None, + None, SampleCollection::new(vec![Sample::new(Counter::new(0), time, label_set.clone())]).unwrap(), )]) .unwrap(), @@ -819,10 +846,14 @@ mod tests { let result = MetricKindCollection::new(vec![ Metric::new( metric_name!("test_counter"), + None, + None, SampleCollection::new(vec![Sample::new(Counter::new(0), time, label_set.clone())]).unwrap(), ), Metric::new( metric_name!("test_counter"), + None, + None, SampleCollection::new(vec![Sample::new(Counter::new(0), time, label_set.clone())]).unwrap(), ), ]); @@ -849,6 +880,8 @@ mod tests { MetricKindCollection::default(), MetricKindCollection::new(vec![Metric::new( metric_name!("test_gauge"), + None, + None, SampleCollection::new(vec![Sample::new(Gauge::new(0.0), time, label_set.clone())]).unwrap(), )]) .unwrap(), @@ -901,10 +934,14 @@ mod tests { let result = MetricKindCollection::new(vec![ Metric::new( metric_name!("test_gauge"), + None, + None, SampleCollection::new(vec![Sample::new(Gauge::new(0.0), time, label_set.clone())]).unwrap(), ), Metric::new( metric_name!("test_gauge"), + None, + None, SampleCollection::new(vec![Sample::new(Gauge::new(0.0), time, label_set.clone())]).unwrap(), ), ]); diff --git a/packages/metrics/src/unit.rs b/packages/metrics/src/unit.rs index f7a528bed..43b42bf79 100644 --- a/packages/metrics/src/unit.rs +++ b/packages/metrics/src/unit.rs @@ -4,7 +4,11 @@ //! The `Unit` enum is used to specify the unit of measurement for metrics. //! //! They were copied from the `metrics` crate, to allow future compatibility. -#[derive(Debug, Clone, Copy, PartialEq, Eq, Hash)] + +use serde::{Deserialize, Serialize}; + +#[derive(Debug, Clone, Copy, PartialEq, Eq, Hash, Serialize, Deserialize)] +#[serde(rename_all = "snake_case")] pub enum Unit { Count, Percent, diff --git a/packages/swarm-coordination-registry/src/statistics/mod.rs b/packages/swarm-coordination-registry/src/statistics/mod.rs index cfc252e34..6505a2db2 100644 --- a/packages/swarm-coordination-registry/src/statistics/mod.rs +++ b/packages/swarm-coordination-registry/src/statistics/mod.rs @@ -36,31 +36,31 @@ pub fn describe_metrics() -> Metrics { metrics.metric_collection.describe_counter( &metric_name!(TORRENT_REPOSITORY_TORRENTS_ADDED_TOTAL), Some(Unit::Count), - Some(&MetricDescription::new("The total number of torrents added.")), + Some(MetricDescription::new("The total number of torrents added.")), ); metrics.metric_collection.describe_counter( &metric_name!(TORRENT_REPOSITORY_TORRENTS_REMOVED_TOTAL), Some(Unit::Count), - Some(&MetricDescription::new("The total number of torrents removed.")), + Some(MetricDescription::new("The total number of torrents removed.")), ); metrics.metric_collection.describe_gauge( &metric_name!(TORRENT_REPOSITORY_TORRENTS_TOTAL), Some(Unit::Count), - Some(&MetricDescription::new("The total number of torrents.")), + Some(MetricDescription::new("The total number of torrents.")), ); metrics.metric_collection.describe_counter( &metric_name!(TORRENT_REPOSITORY_TORRENTS_DOWNLOADS_TOTAL), Some(Unit::Count), - Some(&MetricDescription::new("The total number of torrent downloads.")), + Some(MetricDescription::new("The total number of torrent downloads.")), ); metrics.metric_collection.describe_gauge( &metric_name!(TORRENT_REPOSITORY_TORRENTS_INACTIVE_TOTAL), Some(Unit::Count), - Some(&MetricDescription::new("The total number of inactive torrents.")), + Some(MetricDescription::new("The total number of inactive torrents.")), ); // Peers metrics @@ -68,25 +68,25 @@ pub fn describe_metrics() -> Metrics { metrics.metric_collection.describe_counter( &metric_name!(TORRENT_REPOSITORY_PEERS_ADDED_TOTAL), Some(Unit::Count), - Some(&MetricDescription::new("The total number of peers added.")), + Some(MetricDescription::new("The total number of peers added.")), ); metrics.metric_collection.describe_counter( &metric_name!(TORRENT_REPOSITORY_PEERS_REMOVED_TOTAL), Some(Unit::Count), - Some(&MetricDescription::new("The total number of peers removed.")), + Some(MetricDescription::new("The total number of peers removed.")), ); metrics.metric_collection.describe_counter( &metric_name!(TORRENT_REPOSITORY_PEERS_UPDATED_TOTAL), Some(Unit::Count), - Some(&MetricDescription::new("The total number of peers updated.")), + Some(MetricDescription::new("The total number of peers updated.")), ); metrics.metric_collection.describe_gauge( &metric_name!(TORRENT_REPOSITORY_PEER_CONNECTIONS_TOTAL), Some(Unit::Count), - Some(&MetricDescription::new( + Some(MetricDescription::new( "The total number of peer connections (one connection per torrent).", )), ); @@ -94,13 +94,13 @@ pub fn describe_metrics() -> Metrics { metrics.metric_collection.describe_gauge( &metric_name!(TORRENT_REPOSITORY_UNIQUE_PEERS_TOTAL), Some(Unit::Count), - Some(&MetricDescription::new("The total number of unique peers.")), + Some(MetricDescription::new("The total number of unique peers.")), ); metrics.metric_collection.describe_gauge( &metric_name!(TORRENT_REPOSITORY_PEERS_INACTIVE_TOTAL), Some(Unit::Count), - Some(&MetricDescription::new("The total number of inactive peers.")), + Some(MetricDescription::new("The total number of inactive peers.")), ); metrics diff --git a/packages/tracker-core/src/statistics/mod.rs b/packages/tracker-core/src/statistics/mod.rs index ff8187379..fdb8e8fd4 100644 --- a/packages/tracker-core/src/statistics/mod.rs +++ b/packages/tracker-core/src/statistics/mod.rs @@ -21,7 +21,7 @@ pub fn describe_metrics() -> Metrics { metrics.metric_collection.describe_counter( &metric_name!(TRACKER_CORE_PERSISTENT_TORRENTS_DOWNLOADS_TOTAL), Some(Unit::Count), - Some(&MetricDescription::new("The total number of torrent downloads (persisted).")), + Some(MetricDescription::new("The total number of torrent downloads (persisted).")), ); metrics diff --git a/packages/udp-tracker-core/src/statistics/mod.rs b/packages/udp-tracker-core/src/statistics/mod.rs index 9eb85d7f1..fec76069e 100644 --- a/packages/udp-tracker-core/src/statistics/mod.rs +++ b/packages/udp-tracker-core/src/statistics/mod.rs @@ -17,7 +17,7 @@ pub fn describe_metrics() -> Metrics { metrics.metric_collection.describe_counter( &metric_name!(UDP_TRACKER_CORE_REQUESTS_RECEIVED_TOTAL), Some(Unit::Count), - Some(&MetricDescription::new("Total number of UDP requests received")), + Some(MetricDescription::new("Total number of UDP requests received")), ); metrics diff --git a/packages/udp-tracker-server/src/statistics/mod.rs b/packages/udp-tracker-server/src/statistics/mod.rs index 5c30a9abc..a7da2dc63 100644 --- a/packages/udp-tracker-server/src/statistics/mod.rs +++ b/packages/udp-tracker-server/src/statistics/mod.rs @@ -24,49 +24,49 @@ pub fn describe_metrics() -> Metrics { metrics.metric_collection.describe_counter( &metric_name!(UDP_TRACKER_SERVER_REQUESTS_ABORTED_TOTAL), Some(Unit::Count), - Some(&MetricDescription::new("Total number of UDP requests aborted")), + Some(MetricDescription::new("Total number of UDP requests aborted")), ); metrics.metric_collection.describe_counter( &metric_name!(UDP_TRACKER_SERVER_REQUESTS_BANNED_TOTAL), Some(Unit::Count), - Some(&MetricDescription::new("Total number of UDP requests banned")), + Some(MetricDescription::new("Total number of UDP requests banned")), ); metrics.metric_collection.describe_counter( &metric_name!(UDP_TRACKER_SERVER_CONNECTION_ID_ERRORS_TOTAL), Some(Unit::Count), - Some(&MetricDescription::new("Total number of requests with connection ID errors")), + Some(MetricDescription::new("Total number of requests with connection ID errors")), ); metrics.metric_collection.describe_counter( &metric_name!(UDP_TRACKER_SERVER_REQUESTS_RECEIVED_TOTAL), Some(Unit::Count), - Some(&MetricDescription::new("Total number of UDP requests received")), + Some(MetricDescription::new("Total number of UDP requests received")), ); metrics.metric_collection.describe_counter( &metric_name!(UDP_TRACKER_SERVER_REQUESTS_ACCEPTED_TOTAL), Some(Unit::Count), - Some(&MetricDescription::new("Total number of UDP requests accepted")), + Some(MetricDescription::new("Total number of UDP requests accepted")), ); metrics.metric_collection.describe_counter( &metric_name!(UDP_TRACKER_SERVER_RESPONSES_SENT_TOTAL), Some(Unit::Count), - Some(&MetricDescription::new("Total number of UDP responses sent")), + Some(MetricDescription::new("Total number of UDP responses sent")), ); metrics.metric_collection.describe_counter( &metric_name!(UDP_TRACKER_SERVER_ERRORS_TOTAL), Some(Unit::Count), - Some(&MetricDescription::new("Total number of errors processing UDP requests")), + Some(MetricDescription::new("Total number of errors processing UDP requests")), ); metrics.metric_collection.describe_gauge( &metric_name!(UDP_TRACKER_SERVER_PERFORMANCE_AVG_PROCESSING_TIME_NS), Some(Unit::Nanoseconds), - Some(&MetricDescription::new( + Some(MetricDescription::new( "Average time to process a UDP connect request in nanoseconds", )), ); From 842739ff95d33f1b47afc9b71459b3f8671ed175 Mon Sep 17 00:00:00 2001 From: Jose Celano Date: Fri, 6 Jun 2025 11:18:02 +0100 Subject: [PATCH 690/802] feat: [#1514] add HELP and TYPE to prometehous metric export --- packages/metrics/src/metric/mod.rs | 44 +++++++++++++++++++++-- packages/metrics/src/metric_collection.rs | 4 +++ 2 files changed, 45 insertions(+), 3 deletions(-) diff --git a/packages/metrics/src/metric/mod.rs b/packages/metrics/src/metric/mod.rs index eff2c7a5f..08f7dd485 100644 --- a/packages/metrics/src/metric/mod.rs +++ b/packages/metrics/src/metric/mod.rs @@ -103,18 +103,56 @@ impl Metric { } } -impl PrometheusSerializable for Metric { +impl PrometheusSerializable for Metric { fn to_prometheus(&self) -> String { let samples: Vec = self .sample_collection .iter() .map(|(label_set, sample)| { - format!( + let help = if let Some(description) = &self.opt_description { + format!("# HELP {description}\n") + } else { + String::new() + }; + + let kind = format!("# TYPE {} counter\n", self.name.to_prometheus()); + + let metric = format!( "{}{} {}", self.name.to_prometheus(), label_set.to_prometheus(), sample.value().to_prometheus() - ) + ); + + format!("{help}{kind}{metric}") + }) + .collect(); + samples.join("\n") + } +} + +impl PrometheusSerializable for Metric { + fn to_prometheus(&self) -> String { + let samples: Vec = self + .sample_collection + .iter() + .map(|(label_set, sample)| { + let help = if let Some(description) = &self.opt_description { + format!("# HELP {description}\n") + } else { + String::new() + }; + + let kind = format!("# TYPE {} gauge\n", self.name.to_prometheus()); + + let metric = format!( + "{}{} {}", + self.name.to_prometheus(), + label_set.to_prometheus(), + sample.value().to_prometheus() + ); + + format!("{help}{kind}{metric}") }) .collect(); samples.join("\n") diff --git a/packages/metrics/src/metric_collection.rs b/packages/metrics/src/metric_collection.rs index 59c0448af..23b7609f6 100644 --- a/packages/metrics/src/metric_collection.rs +++ b/packages/metrics/src/metric_collection.rs @@ -607,7 +607,9 @@ mod tests { fn prometheus() -> String { format_prometheus_output( r#" + # TYPE http_tracker_core_announce_requests_received_total counter http_tracker_core_announce_requests_received_total{server_binding_ip="0.0.0.0",server_binding_port="7070",server_binding_protocol="http"} 1 + # TYPE udp_tracker_server_performance_avg_announce_processing_time_ns gauge udp_tracker_server_performance_avg_announce_processing_time_ns{server_binding_ip="0.0.0.0",server_binding_port="7070",server_binding_protocol="http"} 1 "#, ) @@ -739,7 +741,9 @@ mod tests { let expected_prometheus_output = format_prometheus_output( r#" + # TYPE http_tracker_core_announce_requests_received_total counter http_tracker_core_announce_requests_received_total{server_binding_ip="0.0.0.0",server_binding_port="7171",server_binding_protocol="http"} 2 + # TYPE http_tracker_core_announce_requests_received_total counter http_tracker_core_announce_requests_received_total{server_binding_ip="0.0.0.0",server_binding_port="7070",server_binding_protocol="http"} 1 "#, ); From ed1322b7a8be7cf039d01aaab112eefe01e55ba0 Mon Sep 17 00:00:00 2001 From: Jose Celano Date: Fri, 6 Jun 2025 11:25:48 +0100 Subject: [PATCH 691/802] refactor: [#1514] reorganize SampleCollection tests --- packages/metrics/src/sample_collection.rs | 90 +++++++++++++---------- 1 file changed, 53 insertions(+), 37 deletions(-) diff --git a/packages/metrics/src/sample_collection.rs b/packages/metrics/src/sample_collection.rs index e815f26ec..a87aacb63 100644 --- a/packages/metrics/src/sample_collection.rs +++ b/packages/metrics/src/sample_collection.rs @@ -168,10 +168,8 @@ mod tests { use crate::counter::Counter; use crate::label::LabelSet; - use crate::prometheus::PrometheusSerializable; use crate::sample::Sample; use crate::sample_collection::SampleCollection; - use crate::tests::format_prometheus_output; fn sample_update_time() -> DurationSinceUnixEpoch { DurationSinceUnixEpoch::from_secs(1_743_552_000) @@ -242,56 +240,74 @@ mod tests { assert!(!collection.is_empty()); } - #[test] - fn it_should_be_serializable_and_deserializable_for_json_format() { - let sample = Sample::new(Counter::default(), sample_update_time(), LabelSet::default()); - let collection = SampleCollection::new(vec![sample]).unwrap(); + mod json_serialization { + use crate::counter::Counter; + use crate::label::LabelSet; + use crate::sample::Sample; + use crate::sample_collection::tests::sample_update_time; + use crate::sample_collection::SampleCollection; - let serialized = serde_json::to_string(&collection).unwrap(); - let deserialized: SampleCollection = serde_json::from_str(&serialized).unwrap(); + #[test] + fn it_should_be_serializable_and_deserializable_for_json_format() { + let sample = Sample::new(Counter::default(), sample_update_time(), LabelSet::default()); + let collection = SampleCollection::new(vec![sample]).unwrap(); - assert_eq!(deserialized, collection); - } + let serialized = serde_json::to_string(&collection).unwrap(); + let deserialized: SampleCollection = serde_json::from_str(&serialized).unwrap(); - #[test] - fn it_should_fail_deserializing_from_json_with_duplicate_label_sets() { - let samples = vec![ - Sample::new(Counter::default(), sample_update_time(), LabelSet::default()), - Sample::new(Counter::default(), sample_update_time(), LabelSet::default()), - ]; + assert_eq!(deserialized, collection); + } - let serialized = serde_json::to_string(&samples).unwrap(); + #[test] + fn it_should_fail_deserializing_from_json_with_duplicate_label_sets() { + let samples = vec![ + Sample::new(Counter::default(), sample_update_time(), LabelSet::default()), + Sample::new(Counter::default(), sample_update_time(), LabelSet::default()), + ]; - let result: Result, _> = serde_json::from_str(&serialized); + let serialized = serde_json::to_string(&samples).unwrap(); - assert!(result.is_err()); + let result: Result, _> = serde_json::from_str(&serialized); + + assert!(result.is_err()); + } } - #[test] - fn it_should_be_exportable_to_prometheus_format_when_empty() { - let sample = Sample::new(Counter::default(), sample_update_time(), LabelSet::default()); - let collection = SampleCollection::new(vec![sample]).unwrap(); + mod prometheus_serialization { + use crate::counter::Counter; + use crate::label::LabelSet; + use crate::prometheus::PrometheusSerializable; + use crate::sample::Sample; + use crate::sample_collection::tests::sample_update_time; + use crate::sample_collection::SampleCollection; + use crate::tests::format_prometheus_output; - let prometheus_output = collection.to_prometheus(); + #[test] + fn it_should_be_exportable_to_prometheus_format_when_empty() { + let sample = Sample::new(Counter::default(), sample_update_time(), LabelSet::default()); + let collection = SampleCollection::new(vec![sample]).unwrap(); - assert!(!prometheus_output.is_empty()); - } + let prometheus_output = collection.to_prometheus(); - #[test] - fn it_should_be_exportable_to_prometheus_format() { - let sample = Sample::new( - Counter::new(1), - sample_update_time(), - LabelSet::from(vec![("labe_name_1", "label value value 1")]), - ); + assert!(!prometheus_output.is_empty()); + } - let collection = SampleCollection::new(vec![sample]).unwrap(); + #[test] + fn it_should_be_exportable_to_prometheus_format() { + let sample = Sample::new( + Counter::new(1), + sample_update_time(), + LabelSet::from(vec![("labe_name_1", "label value value 1")]), + ); - let prometheus_output = collection.to_prometheus(); + let collection = SampleCollection::new(vec![sample]).unwrap(); - let expected_prometheus_output = format_prometheus_output("{labe_name_1=\"label value value 1\"} 1"); + let prometheus_output = collection.to_prometheus(); - assert_eq!(prometheus_output, expected_prometheus_output); + let expected_prometheus_output = format_prometheus_output("{labe_name_1=\"label value value 1\"} 1"); + + assert_eq!(prometheus_output, expected_prometheus_output); + } } #[cfg(test)] From a89406daad7c308639635fa234d39b27ec41085b Mon Sep 17 00:00:00 2001 From: Jose Celano Date: Fri, 6 Jun 2025 12:49:34 +0100 Subject: [PATCH 692/802] refactor: [#1514] remove duplicate code in Metric type --- packages/metrics/src/metric/mod.rs | 117 ++++++++++++++++++++--------- 1 file changed, 83 insertions(+), 34 deletions(-) diff --git a/packages/metrics/src/metric/mod.rs b/packages/metrics/src/metric/mod.rs index 08f7dd485..a97621da8 100644 --- a/packages/metrics/src/metric/mod.rs +++ b/packages/metrics/src/metric/mod.rs @@ -103,28 +103,92 @@ impl Metric { } } +/// `PrometheusMetricSample` is a wrapper around types that provides methods to +/// convert the metric and its measurement into a Prometheus-compatible format. +/// +/// In Prometheus, a metric is a time series that consists of a name, a set of +/// labels, and a value. The sample value needs data from the `Metric` and +/// `Measurement` structs, as well as the `LabelSet` that defines the labels for +/// the metric. +struct PrometheusMetricSample<'a, T> { + metric: &'a Metric, + measurement: &'a Measurement, + label_set: &'a LabelSet, +} + +enum PrometheusType { + Counter, + Gauge, +} + +impl PrometheusSerializable for PrometheusType { + fn to_prometheus(&self) -> String { + match self { + PrometheusType::Counter => "counter".to_string(), + PrometheusType::Gauge => "gauge".to_string(), + } + } +} + +impl PrometheusMetricSample<'_, T> { + fn to_prometheus(&self, prometheus_type: &PrometheusType) -> String { + format!( + "{}{}{}", + self.help_line(), + self.type_line(prometheus_type), + self.metric_line() + ) + } + + fn help_line(&self) -> String { + if let Some(description) = &self.metric.opt_description { + format!("# HELP {description}\n") + } else { + String::new() + } + } + + fn type_line(&self, kind: &PrometheusType) -> String { + format!("# TYPE {} {}\n", self.metric.name().to_prometheus(), kind.to_prometheus()) + } + + fn metric_line(&self) -> String { + format!( + "{}{} {}", + self.metric.name.to_prometheus(), + self.label_set.to_prometheus(), + self.measurement.value().to_prometheus() + ) + } +} + +impl<'a> PrometheusMetricSample<'a, Counter> { + pub fn new(metric: &'a Metric, measurement: &'a Measurement, label_set: &'a LabelSet) -> Self { + Self { + metric, + measurement, + label_set, + } + } +} + +impl<'a> PrometheusMetricSample<'a, Gauge> { + pub fn new(metric: &'a Metric, measurement: &'a Measurement, label_set: &'a LabelSet) -> Self { + Self { + metric, + measurement, + label_set, + } + } +} + impl PrometheusSerializable for Metric { fn to_prometheus(&self) -> String { let samples: Vec = self .sample_collection .iter() - .map(|(label_set, sample)| { - let help = if let Some(description) = &self.opt_description { - format!("# HELP {description}\n") - } else { - String::new() - }; - - let kind = format!("# TYPE {} counter\n", self.name.to_prometheus()); - - let metric = format!( - "{}{} {}", - self.name.to_prometheus(), - label_set.to_prometheus(), - sample.value().to_prometheus() - ); - - format!("{help}{kind}{metric}") + .map(|(label_set, measurement)| { + PrometheusMetricSample::::new(self, measurement, label_set).to_prometheus(&PrometheusType::Counter) }) .collect(); samples.join("\n") @@ -136,23 +200,8 @@ impl PrometheusSerializable for Metric { let samples: Vec = self .sample_collection .iter() - .map(|(label_set, sample)| { - let help = if let Some(description) = &self.opt_description { - format!("# HELP {description}\n") - } else { - String::new() - }; - - let kind = format!("# TYPE {} gauge\n", self.name.to_prometheus()); - - let metric = format!( - "{}{} {}", - self.name.to_prometheus(), - label_set.to_prometheus(), - sample.value().to_prometheus() - ); - - format!("{help}{kind}{metric}") + .map(|(label_set, measurement)| { + PrometheusMetricSample::::new(self, measurement, label_set).to_prometheus(&PrometheusType::Gauge) }) .collect(); samples.join("\n") From 748e6a50e6f18324d2587eddb3fc43f626fb3876 Mon Sep 17 00:00:00 2001 From: Jose Celano Date: Fri, 6 Jun 2025 15:41:07 +0100 Subject: [PATCH 693/802] test: [#1514] add tests to metrics package --- packages/metrics/src/label/set.rs | 200 +++++++++++++++++++++------- packages/metrics/src/label/value.rs | 59 ++++++++ 2 files changed, 211 insertions(+), 48 deletions(-) diff --git a/packages/metrics/src/label/set.rs b/packages/metrics/src/label/set.rs index 2b6334fc7..1c2c3e27e 100644 --- a/packages/metrics/src/label/set.rs +++ b/packages/metrics/src/label/set.rs @@ -175,6 +175,7 @@ impl PrometheusSerializable for LabelSet { mod tests { use std::collections::BTreeMap; + use std::hash::{DefaultHasher, Hash}; use pretty_assertions::assert_eq; @@ -195,54 +196,6 @@ mod tests { ] } - #[test] - fn it_should_allow_instantiation_from_an_array_of_label_pairs() { - let label_set: LabelSet = sample_array_of_label_pairs().into(); - - assert_eq!( - label_set, - LabelSet { - items: BTreeMap::from(sample_array_of_label_pairs()) - } - ); - } - - #[test] - fn it_should_allow_instantiation_from_a_vec_of_label_pairs() { - let label_set: LabelSet = sample_vec_of_label_pairs().into(); - - assert_eq!( - label_set, - LabelSet { - items: BTreeMap::from(sample_array_of_label_pairs()) - } - ); - } - - #[test] - fn it_should_allow_instantiation_from_a_b_tree_map() { - let label_set: LabelSet = BTreeMap::from(sample_array_of_label_pairs()).into(); - - assert_eq!( - label_set, - LabelSet { - items: BTreeMap::from(sample_array_of_label_pairs()) - } - ); - } - - #[test] - fn it_should_allow_instantiation_from_a_label_pair() { - let label_set: LabelSet = (label_name!("label_name"), LabelValue::new("value")).into(); - - assert_eq!( - label_set, - LabelSet { - items: BTreeMap::from([(label_name!("label_name"), LabelValue::new("value"))]) - } - ); - } - #[test] fn it_should_allow_inserting_a_new_label_pair() { let mut label_set = LabelSet::default(); @@ -338,4 +291,155 @@ mod tests { assert_eq!(label_set.to_string(), r#"{label_name="label value"}"#); } + + #[test] + fn it_should_allow_instantiation_from_an_array_of_label_pairs() { + let label_set: LabelSet = sample_array_of_label_pairs().into(); + + assert_eq!( + label_set, + LabelSet { + items: BTreeMap::from(sample_array_of_label_pairs()) + } + ); + } + + #[test] + fn it_should_allow_instantiation_from_a_vec_of_label_pairs() { + let label_set: LabelSet = sample_vec_of_label_pairs().into(); + + assert_eq!( + label_set, + LabelSet { + items: BTreeMap::from(sample_array_of_label_pairs()) + } + ); + } + + #[test] + fn it_should_allow_instantiation_from_a_b_tree_map() { + let label_set: LabelSet = BTreeMap::from(sample_array_of_label_pairs()).into(); + + assert_eq!( + label_set, + LabelSet { + items: BTreeMap::from(sample_array_of_label_pairs()) + } + ); + } + + #[test] + fn it_should_allow_instantiation_from_a_label_pair() { + let label_set: LabelSet = (label_name!("label_name"), LabelValue::new("value")).into(); + + assert_eq!( + label_set, + LabelSet { + items: BTreeMap::from([(label_name!("label_name"), LabelValue::new("value"))]) + } + ); + } + + #[test] + fn it_should_allow_instantiation_from_vec_of_str_tuples() { + let label_set: LabelSet = vec![("foo", "bar"), ("baz", "qux")].into(); + + let mut expected = BTreeMap::new(); + expected.insert(LabelName::new("foo"), LabelValue::new("bar")); + expected.insert(LabelName::new("baz"), LabelValue::new("qux")); + + assert_eq!(label_set, LabelSet { items: expected }); + } + + #[test] + fn it_should_allow_instantiation_from_vec_of_string_tuples() { + let label_set: LabelSet = vec![("foo".to_string(), "bar".to_string()), ("baz".to_string(), "qux".to_string())].into(); + + let mut expected = BTreeMap::new(); + expected.insert(LabelName::new("foo"), LabelValue::new("bar")); + expected.insert(LabelName::new("baz"), LabelValue::new("qux")); + + assert_eq!(label_set, LabelSet { items: expected }); + } + + #[test] + fn it_should_allow_instantiation_from_vec_of_serialized_label() { + use super::SerializedLabel; + let label_set: LabelSet = vec![ + SerializedLabel { + name: LabelName::new("foo"), + value: LabelValue::new("bar"), + }, + SerializedLabel { + name: LabelName::new("baz"), + value: LabelValue::new("qux"), + }, + ] + .into(); + + let mut expected = BTreeMap::new(); + expected.insert(LabelName::new("foo"), LabelValue::new("bar")); + expected.insert(LabelName::new("baz"), LabelValue::new("qux")); + + assert_eq!(label_set, LabelSet { items: expected }); + } + + #[test] + fn it_should_allow_instantiation_from_array_of_string_tuples() { + let arr: [(String, String); 2] = [("foo".to_string(), "bar".to_string()), ("baz".to_string(), "qux".to_string())]; + let label_set: LabelSet = arr.into(); + + let mut expected = BTreeMap::new(); + + expected.insert(LabelName::new("foo"), LabelValue::new("bar")); + expected.insert(LabelName::new("baz"), LabelValue::new("qux")); + + assert_eq!(label_set, LabelSet { items: expected }); + } + + #[test] + fn it_should_allow_instantiation_from_array_of_str_tuples() { + let arr: [(&str, &str); 2] = [("foo", "bar"), ("baz", "qux")]; + let label_set: LabelSet = arr.into(); + + let mut expected = BTreeMap::new(); + + expected.insert(LabelName::new("foo"), LabelValue::new("bar")); + expected.insert(LabelName::new("baz"), LabelValue::new("qux")); + + assert_eq!(label_set, LabelSet { items: expected }); + } + + #[test] + fn it_should_be_comparable() { + let a: LabelSet = (label_name!("x"), LabelValue::new("1")).into(); + let b: LabelSet = (label_name!("x"), LabelValue::new("1")).into(); + let c: LabelSet = (label_name!("y"), LabelValue::new("2")).into(); + + assert_eq!(a, b); + assert_ne!(a, c); + } + + #[test] + fn it_should_be_allow_ordering() { + let a: LabelSet = (label_name!("x"), LabelValue::new("1")).into(); + let b: LabelSet = (label_name!("y"), LabelValue::new("2")).into(); + + assert!(a < b); + } + + #[test] + fn it_should_be_hashable() { + let a: LabelSet = (label_name!("x"), LabelValue::new("1")).into(); + + let mut hasher = DefaultHasher::new(); + + a.hash(&mut hasher); + } + + #[test] + fn it_should_implement_clone() { + let a: LabelSet = (label_name!("x"), LabelValue::new("1")).into(); + let _unused = a.clone(); + } } diff --git a/packages/metrics/src/label/value.rs b/packages/metrics/src/label/value.rs index ffdbce333..4f25844a8 100644 --- a/packages/metrics/src/label/value.rs +++ b/packages/metrics/src/label/value.rs @@ -33,6 +33,9 @@ impl From for LabelValue { #[cfg(test)] mod tests { + use std::collections::hash_map::DefaultHasher; + use std::hash::Hash; + use crate::label::value::LabelValue; use crate::prometheus::PrometheusSerializable; @@ -41,4 +44,60 @@ mod tests { let label_value = LabelValue::new("value"); assert_eq!(label_value.to_prometheus(), "value"); } + + #[test] + fn it_could_be_initialized_from_str() { + let lv = LabelValue::new("abc"); + assert_eq!(lv.0, "abc"); + } + + #[test] + fn it_should_allow_to_create_an_ignored_label_value() { + let lv = LabelValue::ignore(); + assert_eq!(lv.0, ""); + } + + #[test] + fn it_should_be_converted_from_string() { + let s = String::from("foo"); + let lv: LabelValue = s.clone().into(); + assert_eq!(lv.0, s); + } + + #[test] + fn it_should_be_comparable() { + let a = LabelValue::new("x"); + let b = LabelValue::new("x"); + let c = LabelValue::new("y"); + + assert_eq!(a, b); + assert_ne!(a, c); + } + + #[test] + fn it_should_be_allow_ordering() { + let a = LabelValue::new("x"); + let b = LabelValue::new("y"); + + assert!(a < b); + } + + #[test] + fn it_should_be_hashable() { + let a = LabelValue::new("x"); + let mut hasher = DefaultHasher::new(); + a.hash(&mut hasher); + } + + #[test] + fn it_should_implement_clone() { + let a = LabelValue::new("x"); + let _unused = a.clone(); + } + + #[test] + fn it_should_implement_display() { + let a = LabelValue::new("x"); + assert_eq!(format!("{a}"), "x"); + } } From 642d7742ea44dfd65db0ce840dc33053c0ce53dd Mon Sep 17 00:00:00 2001 From: Jose Celano Date: Fri, 6 Jun 2025 16:15:11 +0100 Subject: [PATCH 694/802] fix: [#1514] HELP line in Prometheus export must contain metric name Format for each metric sample: {label_set} Exmaple: ``` udp_tracker_server_requests_received_total{server_binding_ip="0.0.0.0",server_binding_port="6868",server_binding_protocol="udp"} 36661 ``` See https://prometheus.io/docs/instrumenting/exposition_formats/#comments-help-text-and-type-information --- packages/metrics/src/metric/description.rs | 13 +++++++++++++ packages/metrics/src/metric/mod.rs | 12 +++++++++++- packages/metrics/src/metric_collection.rs | 10 ++++++---- 3 files changed, 30 insertions(+), 5 deletions(-) diff --git a/packages/metrics/src/metric/description.rs b/packages/metrics/src/metric/description.rs index 8a50dee90..6a0ca3432 100644 --- a/packages/metrics/src/metric/description.rs +++ b/packages/metrics/src/metric/description.rs @@ -1,6 +1,8 @@ use derive_more::Display; use serde::{Deserialize, Serialize}; +use crate::prometheus::PrometheusSerializable; + #[derive(Debug, Display, Clone, Eq, PartialEq, Default, Deserialize, Serialize, Hash, Ord, PartialOrd)] pub struct MetricDescription(String); @@ -11,6 +13,11 @@ impl MetricDescription { } } +impl PrometheusSerializable for MetricDescription { + fn to_prometheus(&self) -> String { + self.0.clone() + } +} #[cfg(test)] mod tests { use super::*; @@ -21,6 +28,12 @@ mod tests { assert_eq!(metric.0, "Metric description"); } + #[test] + fn it_serializes_to_prometheus() { + let label_value = MetricDescription::new("name"); + assert_eq!(label_value.to_prometheus(), "name"); + } + #[test] fn it_should_be_displayed() { let metric = MetricDescription::new("Metric description"); diff --git a/packages/metrics/src/metric/mod.rs b/packages/metrics/src/metric/mod.rs index a97621da8..f3278d98c 100644 --- a/packages/metrics/src/metric/mod.rs +++ b/packages/metrics/src/metric/mod.rs @@ -133,6 +133,10 @@ impl PrometheusSerializable for PrometheusType { impl PrometheusMetricSample<'_, T> { fn to_prometheus(&self, prometheus_type: &PrometheusType) -> String { format!( + // Format: + // # HELP + // # TYPE + // {label_set} "{}{}{}", self.help_line(), self.type_line(prometheus_type), @@ -142,7 +146,12 @@ impl PrometheusMetricSample<'_, T> { fn help_line(&self) -> String { if let Some(description) = &self.metric.opt_description { - format!("# HELP {description}\n") + format!( + // Format: # HELP + "# HELP {} {}\n", + self.metric.name().to_prometheus(), + description.to_prometheus() + ) } else { String::new() } @@ -154,6 +163,7 @@ impl PrometheusMetricSample<'_, T> { fn metric_line(&self) -> String { format!( + // Format: {label_set} "{}{} {}", self.metric.name.to_prometheus(), self.label_set.to_prometheus(), diff --git a/packages/metrics/src/metric_collection.rs b/packages/metrics/src/metric_collection.rs index 23b7609f6..122895478 100644 --- a/packages/metrics/src/metric_collection.rs +++ b/packages/metrics/src/metric_collection.rs @@ -529,14 +529,14 @@ mod tests { MetricKindCollection::new(vec![Metric::new( metric_name!("http_tracker_core_announce_requests_received_total"), None, - None, + Some(MetricDescription::new("The number of announce requests received.")), SampleCollection::new(vec![Sample::new(Counter::new(1), time, label_set_1.clone())]).unwrap(), )]) .unwrap(), MetricKindCollection::new(vec![Metric::new( metric_name!("udp_tracker_server_performance_avg_announce_processing_time_ns"), None, - None, + Some(MetricDescription::new("The average announce processing time in nanoseconds.")), SampleCollection::new(vec![Sample::new(Gauge::new(1.0), time, label_set_1.clone())]).unwrap(), )]) .unwrap(), @@ -551,7 +551,7 @@ mod tests { "type":"counter", "name":"http_tracker_core_announce_requests_received_total", "unit": null, - "description": null, + "description": "The number of announce requests received.", "samples":[ { "value":1, @@ -577,7 +577,7 @@ mod tests { "type":"gauge", "name":"udp_tracker_server_performance_avg_announce_processing_time_ns", "unit": null, - "description": null, + "description": "The average announce processing time in nanoseconds.", "samples":[ { "value":1.0, @@ -607,8 +607,10 @@ mod tests { fn prometheus() -> String { format_prometheus_output( r#" + # HELP http_tracker_core_announce_requests_received_total The number of announce requests received. # TYPE http_tracker_core_announce_requests_received_total counter http_tracker_core_announce_requests_received_total{server_binding_ip="0.0.0.0",server_binding_port="7070",server_binding_protocol="http"} 1 + # HELP udp_tracker_server_performance_avg_announce_processing_time_ns The average announce processing time in nanoseconds. # TYPE udp_tracker_server_performance_avg_announce_processing_time_ns gauge udp_tracker_server_performance_avg_announce_processing_time_ns{server_binding_ip="0.0.0.0",server_binding_port="7070",server_binding_protocol="http"} 1 "#, From 376f242166725f682c4b80502535b27b88fcb52c Mon Sep 17 00:00:00 2001 From: Jose Celano Date: Fri, 6 Jun 2025 16:31:07 +0100 Subject: [PATCH 695/802] test: [#1514] add tests to metrics package --- packages/metrics/src/metric/mod.rs | 51 +++++++++++++++++++++++++----- 1 file changed, 43 insertions(+), 8 deletions(-) diff --git a/packages/metrics/src/metric/mod.rs b/packages/metrics/src/metric/mod.rs index f3278d98c..6f254023f 100644 --- a/packages/metrics/src/metric/mod.rs +++ b/packages/metrics/src/metric/mod.rs @@ -286,14 +286,25 @@ mod tests { #[test] fn it_should_allow_incrementing_a_sample() { let time = DurationSinceUnixEpoch::from_secs(1_743_552_000); - let name = metric_name!("test_metric"); - let label_set: LabelSet = [(label_name!("server_binding_protocol"), LabelValue::new("http"))].into(); + let samples = SampleCollection::new(vec![Sample::new(Counter::new(0), time, label_set.clone())]).unwrap(); + let mut metric = Metric::::new(name.clone(), None, None, samples); - let samples = SampleCollection::new(vec![Sample::new(Counter::new(1), time, label_set.clone())]).unwrap(); + metric.increment(&label_set, time); - let metric = Metric::::new(name.clone(), None, None, samples); + assert_eq!(metric.get_sample_data(&label_set).unwrap().value().value(), 1); + } + + #[test] + fn it_should_allow_setting_to_an_absolute_value() { + let time = DurationSinceUnixEpoch::from_secs(1_743_552_000); + let name = metric_name!("test_metric"); + let label_set: LabelSet = [(label_name!("server_binding_protocol"), LabelValue::new("http"))].into(); + let samples = SampleCollection::new(vec![Sample::new(Counter::new(0), time, label_set.clone())]).unwrap(); + let mut metric = Metric::::new(name.clone(), None, None, samples); + + metric.absolute(&label_set, 1, time); assert_eq!(metric.get_sample_data(&label_set).unwrap().value().value(), 1); } @@ -318,16 +329,40 @@ mod tests { } #[test] - fn it_should_allow_setting_a_sample() { + fn it_should_allow_incrementing_a_sample() { let time = DurationSinceUnixEpoch::from_secs(1_743_552_000); - let name = metric_name!("test_metric"); - let label_set: LabelSet = [(label_name!("server_binding_protocol"), LabelValue::new("http"))].into(); + let samples = SampleCollection::new(vec![Sample::new(Gauge::new(0.0), time, label_set.clone())]).unwrap(); + let mut metric = Metric::::new(name.clone(), None, None, samples); + metric.increment(&label_set, time); + + assert_relative_eq!(metric.get_sample_data(&label_set).unwrap().value().value(), 1.0); + } + + #[test] + fn it_should_allow_decrement_a_sample() { + let time = DurationSinceUnixEpoch::from_secs(1_743_552_000); + let name = metric_name!("test_metric"); + let label_set: LabelSet = [(label_name!("server_binding_protocol"), LabelValue::new("http"))].into(); let samples = SampleCollection::new(vec![Sample::new(Gauge::new(1.0), time, label_set.clone())]).unwrap(); + let mut metric = Metric::::new(name.clone(), None, None, samples); - let metric = Metric::::new(name.clone(), None, None, samples); + metric.decrement(&label_set, time); + + assert_relative_eq!(metric.get_sample_data(&label_set).unwrap().value().value(), 0.0); + } + + #[test] + fn it_should_allow_setting_a_sample() { + let time = DurationSinceUnixEpoch::from_secs(1_743_552_000); + let name = metric_name!("test_metric"); + let label_set: LabelSet = [(label_name!("server_binding_protocol"), LabelValue::new("http"))].into(); + let samples = SampleCollection::new(vec![Sample::new(Gauge::new(0.0), time, label_set.clone())]).unwrap(); + let mut metric = Metric::::new(name.clone(), None, None, samples); + + metric.set(&label_set, 1.0, time); assert_relative_eq!(metric.get_sample_data(&label_set).unwrap().value().value(), 1.0); } From 507b48035daac72b6ae5c22394fc7198fe3fee02 Mon Sep 17 00:00:00 2001 From: Jose Celano Date: Fri, 6 Jun 2025 17:01:43 +0100 Subject: [PATCH 696/802] fix: [#1514] bug. Don't allow merge metric collections with the same metrin name It was not possible to merge counters or gauges if the metric names was duplicate but possible when the metric name was duplciate across types. For example, the target collection (the one is mutated) contains a counter with a name that is being used in the source collection (the wan we get metric from) for a gauge metric. --- packages/metrics/src/metric_collection.rs | 82 +++++++++++++++++++++++ 1 file changed, 82 insertions(+) diff --git a/packages/metrics/src/metric_collection.rs b/packages/metrics/src/metric_collection.rs index 122895478..c7dfbba7a 100644 --- a/packages/metrics/src/metric_collection.rs +++ b/packages/metrics/src/metric_collection.rs @@ -50,11 +50,33 @@ impl MetricCollection { /// /// Returns an error if a metric name already exists in the current collection. pub fn merge(&mut self, other: &Self) -> Result<(), Error> { + self.check_cross_type_collision(other)?; self.counters.merge(&other.counters)?; self.gauges.merge(&other.gauges)?; Ok(()) } + /// Returns a set of all metric names in this collection. + fn collect_names(&self) -> HashSet { + self.counters.names().chain(self.gauges.names()).cloned().collect() + } + + /// Checks for name collisions between this collection and another one. + fn check_cross_type_collision(&self, other: &Self) -> Result<(), Error> { + let self_names: HashSet<_> = self.collect_names(); + let other_names: HashSet<_> = other.collect_names(); + + let cross_type_collisions = self_names.intersection(&other_names).next(); + + if let Some(name) = cross_type_collisions { + return Err(Error::MetricNameCollisionInMerge { + metric_name: (*name).clone(), + }); + } + + Ok(()) + } + // Counter-specific methods pub fn describe_counter(&mut self, name: &MetricName, opt_unit: Option, opt_description: Option) { @@ -774,6 +796,66 @@ mod tests { assert_eq!(prometheus_output, ""); } + #[test] + fn it_should_allow_merging_metric_collections() { + let time = DurationSinceUnixEpoch::from_secs(1_743_552_000); + let label_set: LabelSet = (label_name!("label_name"), LabelValue::new("value")).into(); + + let mut collection1 = MetricCollection::default(); + collection1 + .increase_counter(&metric_name!("test_counter"), &label_set, time) + .unwrap(); + + let mut collection2 = MetricCollection::default(); + collection2 + .set_gauge(&metric_name!("test_gauge"), &label_set, 1.0, time) + .unwrap(); + + collection1.merge(&collection2).unwrap(); + + assert!(collection1.contains_counter(&metric_name!("test_counter"))); + assert!(collection1.contains_gauge(&metric_name!("test_gauge"))); + } + + #[test] + fn it_should_not_allow_merging_metric_collections_with_name_collisions_for_the_same_metric_types() { + let time = DurationSinceUnixEpoch::from_secs(1_743_552_000); + let label_set: LabelSet = (label_name!("label_name"), LabelValue::new("value")).into(); + + let mut collection1 = MetricCollection::default(); + collection1 + .increase_counter(&metric_name!("test_metric"), &label_set, time) + .unwrap(); + + let mut collection2 = MetricCollection::default(); + collection2 + .increase_counter(&metric_name!("test_metric"), &label_set, time) + .unwrap(); + let result = collection1.merge(&collection2); + + assert!(result.is_err()); + } + + #[test] + fn it_should_not_allow_merging_metric_collections_with_name_collisions_for_different_metric_types() { + let time = DurationSinceUnixEpoch::from_secs(1_743_552_000); + let label_set: LabelSet = (label_name!("label_name"), LabelValue::new("value")).into(); + + let mut collection1 = MetricCollection::default(); + collection1 + .increase_counter(&metric_name!("test_metric"), &label_set, time) + .unwrap(); + + let mut collection2 = MetricCollection::default(); + collection2 + .set_gauge(&metric_name!("test_metric"), &label_set, 1.0, time) + .unwrap(); + + let result = collection1.merge(&collection2); + + assert!(result.is_err()); + } + mod for_counters { use pretty_assertions::assert_eq; From bb2392dda0f2f7339544a3227a2d1adca008f156 Mon Sep 17 00:00:00 2001 From: Jose Celano Date: Fri, 6 Jun 2025 17:16:48 +0100 Subject: [PATCH 697/802] test: [#1514] add tests to metrics package --- packages/metrics/src/metric_collection.rs | 233 ++++++++++++++++++---- 1 file changed, 194 insertions(+), 39 deletions(-) diff --git a/packages/metrics/src/metric_collection.rs b/packages/metrics/src/metric_collection.rs index c7dfbba7a..c53d02bcf 100644 --- a/packages/metrics/src/metric_collection.rs +++ b/packages/metrics/src/metric_collection.rs @@ -374,17 +374,18 @@ impl MetricKindCollection { /// /// Returns an error if a metric name already exists in the current collection. pub fn merge(&mut self, other: &Self) -> Result<(), Error> { - // Check for name collisions - for metric_name in other.metrics.keys() { - if self.metrics.contains_key(metric_name) { - return Err(Error::MetricNameCollisionInMerge { - metric_name: metric_name.clone(), - }); - } - } + self.check_for_name_collision(other)?; for (metric_name, metric) in &other.metrics { - if self.metrics.insert(metric_name.clone(), metric.clone()).is_some() { + self.metrics.insert(metric_name.clone(), metric.clone()); + } + + Ok(()) + } + + fn check_for_name_collision(&self, other: &Self) -> Result<(), Error> { + for metric_name in other.metrics.keys() { + if self.metrics.contains_key(metric_name) { return Err(Error::MetricNameCollisionInMerge { metric_name: metric_name.clone(), }); @@ -856,6 +857,38 @@ mod tests { assert!(result.is_err()); } + fn collection_with_one_counter(metric_name: &MetricName, label_set: &LabelSet, counter: Counter) -> MetricCollection { + let time = DurationSinceUnixEpoch::from_secs(1_743_552_000); + + MetricCollection::new( + MetricKindCollection::new(vec![Metric::new( + metric_name.clone(), + None, + None, + SampleCollection::new(vec![Sample::new(counter, time, label_set.clone())]).unwrap(), + )]) + .unwrap(), + MetricKindCollection::default(), + ) + .unwrap() + } + + fn collection_with_one_gauge(metric_name: &MetricName, label_set: &LabelSet, gauge: Gauge) -> MetricCollection { + let time = DurationSinceUnixEpoch::from_secs(1_743_552_000); + + MetricCollection::new( + MetricKindCollection::default(), + MetricKindCollection::new(vec![Metric::new( + metric_name.clone(), + None, + None, + SampleCollection::new(vec![Sample::new(gauge, time, label_set.clone())]).unwrap(), + )]) + .unwrap(), + ) + .unwrap() + } + mod for_counters { use pretty_assertions::assert_eq; @@ -866,32 +899,54 @@ mod tests { use crate::sample_collection::SampleCollection; #[test] - fn it_should_increase_a_preexistent_counter() { + fn it_should_allow_setting_to_an_absolute_value() { let time = DurationSinceUnixEpoch::from_secs(1_743_552_000); + let metric_name = metric_name!("test_counter"); let label_set: LabelSet = (label_name!("label_name"), LabelValue::new("value")).into(); - let mut metric_collection = MetricCollection::new( - MetricKindCollection::new(vec![Metric::new( - metric_name!("test_counter"), - None, - None, - SampleCollection::new(vec![Sample::new(Counter::new(0), time, label_set.clone())]).unwrap(), - )]) - .unwrap(), - MetricKindCollection::default(), - ) - .unwrap(); + let mut collection = collection_with_one_counter(&metric_name, &label_set, Counter::new(0)); - metric_collection - .increase_counter(&metric_name!("test_counter"), &label_set, time) + collection + .set_counter(&metric_name!("test_counter"), &label_set, 1, time) .unwrap(); - metric_collection + + assert_eq!( + collection.get_counter_value(&metric_name!("test_counter"), &label_set), + Some(Counter::new(1)) + ); + } + + #[test] + fn it_should_fail_setting_to_an_absolute_value_if_a_gauge_with_the_same_name_exists() { + let time = DurationSinceUnixEpoch::from_secs(1_743_552_000); + let metric_name = metric_name!("test_counter"); + let label_set: LabelSet = (label_name!("label_name"), LabelValue::new("value")).into(); + + let mut collection = collection_with_one_gauge(&metric_name, &label_set, Gauge::new(0.0)); + + let result = collection.set_counter(&metric_name!("test_counter"), &label_set, 1, time); + + assert!( + result.is_err() + && matches!(result, Err(Error::MetricNameCollisionAdding { metric_name }) if metric_name == metric_name!("test_counter")) + ); + } + + #[test] + fn it_should_increase_a_preexistent_counter() { + let time = DurationSinceUnixEpoch::from_secs(1_743_552_000); + let metric_name = metric_name!("test_counter"); + let label_set: LabelSet = (label_name!("label_name"), LabelValue::new("value")).into(); + + let mut collection = collection_with_one_counter(&metric_name, &label_set, Counter::new(0)); + + collection .increase_counter(&metric_name!("test_counter"), &label_set, time) .unwrap(); assert_eq!( - metric_collection.get_counter_value(&metric_name!("test_counter"), &label_set), - Some(Counter::new(2)) + collection.get_counter_value(&metric_name!("test_counter"), &label_set), + Some(Counter::new(1)) ); } @@ -962,30 +1017,89 @@ mod tests { #[test] fn it_should_set_a_preexistent_gauge() { let time = DurationSinceUnixEpoch::from_secs(1_743_552_000); + let metric_name = metric_name!("test_gauge"); let label_set: LabelSet = (label_name!("label_name"), LabelValue::new("value")).into(); - let mut metric_collection = MetricCollection::new( - MetricKindCollection::default(), - MetricKindCollection::new(vec![Metric::new( - metric_name!("test_gauge"), - None, - None, - SampleCollection::new(vec![Sample::new(Gauge::new(0.0), time, label_set.clone())]).unwrap(), - )]) - .unwrap(), - ) - .unwrap(); + let mut collection = collection_with_one_gauge(&metric_name, &label_set, Gauge::new(0.0)); - metric_collection + collection .set_gauge(&metric_name!("test_gauge"), &label_set, 1.0, time) .unwrap(); assert_eq!( - metric_collection.get_gauge_value(&metric_name!("test_gauge"), &label_set), + collection.get_gauge_value(&metric_name!("test_gauge"), &label_set), Some(Gauge::new(1.0)) ); } + #[test] + fn it_should_allow_incrementing_a_gauge() { + let time = DurationSinceUnixEpoch::from_secs(1_743_552_000); + let metric_name = metric_name!("test_gauge"); + let label_set: LabelSet = (label_name!("label_name"), LabelValue::new("value")).into(); + + let mut collection = collection_with_one_gauge(&metric_name, &label_set, Gauge::new(0.0)); + + collection + .increment_gauge(&metric_name!("test_gauge"), &label_set, time) + .unwrap(); + + assert_eq!( + collection.get_gauge_value(&metric_name!("test_gauge"), &label_set), + Some(Gauge::new(1.0)) + ); + } + + #[test] + fn it_should_fail_incrementing_a_gauge_if_it_exists_a_counter_with_the_same_name() { + let time = DurationSinceUnixEpoch::from_secs(1_743_552_000); + let metric_name = metric_name!("test_gauge"); + let label_set: LabelSet = (label_name!("label_name"), LabelValue::new("value")).into(); + + let mut collection = collection_with_one_counter(&metric_name, &label_set, Counter::new(0)); + + let result = collection.increment_gauge(&metric_name!("test_gauge"), &label_set, time); + + assert!( + result.is_err() + && matches!(result, Err(Error::MetricNameCollisionAdding { metric_name }) if metric_name == metric_name!("test_gauge")) + ); + } + + #[test] + fn it_should_allow_decrementing_a_gauge() { + let time = DurationSinceUnixEpoch::from_secs(1_743_552_000); + let metric_name = metric_name!("test_gauge"); + let label_set: LabelSet = (label_name!("label_name"), LabelValue::new("value")).into(); + + let mut collection = collection_with_one_gauge(&metric_name, &label_set, Gauge::new(1.0)); + + collection + .decrement_gauge(&metric_name!("test_gauge"), &label_set, time) + .unwrap(); + + assert_eq!( + collection.get_gauge_value(&metric_name!("test_gauge"), &label_set), + Some(Gauge::new(0.0)) + ); + } + + #[test] + fn it_should_fail_decrementing_a_gauge_if_it_exists_a_counter_with_the_same_name() { + let time = DurationSinceUnixEpoch::from_secs(1_743_552_000); + let metric_name = metric_name!("test_gauge"); + let label_set: LabelSet = (label_name!("label_name"), LabelValue::new("value")).into(); + + let mut collection = collection_with_one_counter(&metric_name, &label_set, Counter::new(0)); + + let result = collection.decrement_gauge(&metric_name!("test_gauge"), &label_set, time); + + assert!( + result.is_err() + && matches!(result, Err(Error::MetricNameCollisionAdding { metric_name }) if metric_name == metric_name!("test_gauge")) + ); + } + #[test] fn it_should_automatically_create_a_gauge_when_setting_if_it_does_not_exist() { let time = DurationSinceUnixEpoch::from_secs(1_743_552_000); @@ -1037,4 +1151,45 @@ mod tests { assert!(result.is_err()); } } + + mod metric_kind_collection { + + use crate::counter::Counter; + use crate::gauge::Gauge; + use crate::metric::Metric; + use crate::metric_collection::{Error, MetricKindCollection}; + use crate::metric_name; + + #[test] + fn it_should_not_allow_merging_counter_metric_collections_with_name_collisions() { + let mut collection1 = MetricKindCollection::::default(); + collection1.insert(Metric::::new_empty_with_name(metric_name!("test_metric"))); + + let mut collection2 = MetricKindCollection::::default(); + collection2.insert(Metric::::new_empty_with_name(metric_name!("test_metric"))); + + let result = collection1.merge(&collection2); + + assert!( + result.is_err() + && matches!(result, Err(Error::MetricNameCollisionInMerge { metric_name }) if metric_name == metric_name!("test_metric")) + ); + } + + #[test] + fn it_should_not_allow_merging_gauge_metric_collections_with_name_collisions() { + let mut collection1 = MetricKindCollection::::default(); + collection1.insert(Metric::::new_empty_with_name(metric_name!("test_metric"))); + + let mut collection2 = MetricKindCollection::::default(); + collection2.insert(Metric::::new_empty_with_name(metric_name!("test_metric"))); + + let result = collection1.merge(&collection2); + + assert!( + result.is_err() + && matches!(result, Err(Error::MetricNameCollisionInMerge { metric_name }) if metric_name == metric_name!("test_metric")) + ); + } + } } From 45bc807c366ee1dd4522ac9cfdb00f45d8eeb606 Mon Sep 17 00:00:00 2001 From: Jose Celano Date: Mon, 9 Jun 2025 12:59:55 +0100 Subject: [PATCH 698/802] refactor: [#1534] rename TORRENT_REPOSITORY_LOG_TARGET to SWARM_COORDINATION_REGISTRY_LOG_TARGET --- packages/swarm-coordination-registry/src/lib.rs | 2 +- .../src/statistics/event/listener.rs | 12 ++++++------ 2 files changed, 7 insertions(+), 7 deletions(-) diff --git a/packages/swarm-coordination-registry/src/lib.rs b/packages/swarm-coordination-registry/src/lib.rs index fc7996817..eb2721a0c 100644 --- a/packages/swarm-coordination-registry/src/lib.rs +++ b/packages/swarm-coordination-registry/src/lib.rs @@ -22,7 +22,7 @@ pub(crate) type CurrentClock = clock::Working; #[allow(dead_code)] pub(crate) type CurrentClock = clock::Stopped; -pub const TORRENT_REPOSITORY_LOG_TARGET: &str = "TORRENT_REPOSITORY"; +pub const SWARM_COORDINATION_REGISTRY_LOG_TARGET: &str = "SWARM_COORDINATION_REGISTRY"; #[cfg(test)] pub(crate) mod tests { diff --git a/packages/swarm-coordination-registry/src/statistics/event/listener.rs b/packages/swarm-coordination-registry/src/statistics/event/listener.rs index f3b534332..9ff707818 100644 --- a/packages/swarm-coordination-registry/src/statistics/event/listener.rs +++ b/packages/swarm-coordination-registry/src/statistics/event/listener.rs @@ -7,18 +7,18 @@ use torrust_tracker_events::receiver::RecvError; use super::handler::handle_event; use crate::event::receiver::Receiver; use crate::statistics::repository::Repository; -use crate::{CurrentClock, TORRENT_REPOSITORY_LOG_TARGET}; +use crate::{CurrentClock, SWARM_COORDINATION_REGISTRY_LOG_TARGET}; #[must_use] pub fn run_event_listener(receiver: Receiver, repository: &Arc) -> JoinHandle<()> { let stats_repository = repository.clone(); - tracing::info!(target: TORRENT_REPOSITORY_LOG_TARGET, "Starting torrent repository event listener"); + tracing::info!(target: SWARM_COORDINATION_REGISTRY_LOG_TARGET, "Starting torrent repository event listener"); tokio::spawn(async move { dispatch_events(receiver, stats_repository).await; - tracing::info!(target: TORRENT_REPOSITORY_LOG_TARGET, "Torrent repository listener finished"); + tracing::info!(target: SWARM_COORDINATION_REGISTRY_LOG_TARGET, "Torrent repository listener finished"); }) } @@ -32,7 +32,7 @@ async fn dispatch_events(mut receiver: Receiver, stats_repository: Arc { - tracing::info!(target: TORRENT_REPOSITORY_LOG_TARGET, "Received Ctrl+C, shutting down torrent repository event listener."); + tracing::info!(target: SWARM_COORDINATION_REGISTRY_LOG_TARGET, "Received Ctrl+C, shutting down torrent repository event listener."); break; } @@ -42,11 +42,11 @@ async fn dispatch_events(mut receiver: Receiver, stats_repository: Arc { match e { RecvError::Closed => { - tracing::info!(target: TORRENT_REPOSITORY_LOG_TARGET, "Torrent repository event receiver closed."); + tracing::info!(target: SWARM_COORDINATION_REGISTRY_LOG_TARGET, "Torrent repository event receiver closed."); break; } RecvError::Lagged(n) => { - tracing::warn!(target: TORRENT_REPOSITORY_LOG_TARGET, "Torrent repository event receiver lagged by {} events.", n); + tracing::warn!(target: SWARM_COORDINATION_REGISTRY_LOG_TARGET, "Torrent repository event receiver lagged by {} events.", n); } } } From c67f27a7f071708cf4739fd7ae3cbff3e946464f Mon Sep 17 00:00:00 2001 From: Jose Celano Date: Mon, 9 Jun 2025 13:05:03 +0100 Subject: [PATCH 699/802] refactor: [#1534] Rename torrent_repository_ prefix to swarm_coordination_registry_ --- .../statistics/activity_metrics_updater.rs | 6 +- .../src/statistics/event/handler.rs | 88 ++++++++++++------- .../src/statistics/mod.rs | 44 +++++----- 3 files changed, 83 insertions(+), 55 deletions(-) diff --git a/packages/swarm-coordination-registry/src/statistics/activity_metrics_updater.rs b/packages/swarm-coordination-registry/src/statistics/activity_metrics_updater.rs index 016e230ec..cf814e810 100644 --- a/packages/swarm-coordination-registry/src/statistics/activity_metrics_updater.rs +++ b/packages/swarm-coordination-registry/src/statistics/activity_metrics_updater.rs @@ -10,7 +10,7 @@ use torrust_tracker_primitives::DurationSinceUnixEpoch; use tracing::instrument; use super::repository::Repository; -use crate::statistics::{TORRENT_REPOSITORY_PEERS_INACTIVE_TOTAL, TORRENT_REPOSITORY_TORRENTS_INACTIVE_TOTAL}; +use crate::statistics::{SWARM_COORDINATION_REGISTRY_PEERS_INACTIVE_TOTAL, SWARM_COORDINATION_REGISTRY_TORRENTS_INACTIVE_TOTAL}; use crate::{CurrentClock, Registry}; #[must_use] @@ -81,7 +81,7 @@ async fn update_inactive_peers_total(stats_repository: &Arc, inactiv let _unused = stats_repository .set_gauge( - &metric_name!(TORRENT_REPOSITORY_PEERS_INACTIVE_TOTAL), + &metric_name!(SWARM_COORDINATION_REGISTRY_PEERS_INACTIVE_TOTAL), &LabelSet::default(), inactive_peers_total, CurrentClock::now(), @@ -95,7 +95,7 @@ async fn update_inactive_torrents_total(stats_repository: &Arc, inac let _unused = stats_repository .set_gauge( - &metric_name!(TORRENT_REPOSITORY_TORRENTS_INACTIVE_TOTAL), + &metric_name!(SWARM_COORDINATION_REGISTRY_TORRENTS_INACTIVE_TOTAL), &LabelSet::default(), inactive_torrents_total, CurrentClock::now(), diff --git a/packages/swarm-coordination-registry/src/statistics/event/handler.rs b/packages/swarm-coordination-registry/src/statistics/event/handler.rs index f8d350a80..17b012086 100644 --- a/packages/swarm-coordination-registry/src/statistics/event/handler.rs +++ b/packages/swarm-coordination-registry/src/statistics/event/handler.rs @@ -8,11 +8,13 @@ use torrust_tracker_primitives::DurationSinceUnixEpoch; use crate::event::Event; use crate::statistics::repository::Repository; use crate::statistics::{ - TORRENT_REPOSITORY_PEERS_ADDED_TOTAL, TORRENT_REPOSITORY_PEERS_REMOVED_TOTAL, TORRENT_REPOSITORY_PEERS_UPDATED_TOTAL, - TORRENT_REPOSITORY_PEER_CONNECTIONS_TOTAL, TORRENT_REPOSITORY_TORRENTS_ADDED_TOTAL, - TORRENT_REPOSITORY_TORRENTS_DOWNLOADS_TOTAL, TORRENT_REPOSITORY_TORRENTS_REMOVED_TOTAL, TORRENT_REPOSITORY_TORRENTS_TOTAL, + SWARM_COORDINATION_REGISTRY_PEERS_ADDED_TOTAL, SWARM_COORDINATION_REGISTRY_PEERS_REMOVED_TOTAL, + SWARM_COORDINATION_REGISTRY_PEERS_UPDATED_TOTAL, SWARM_COORDINATION_REGISTRY_PEER_CONNECTIONS_TOTAL, + SWARM_COORDINATION_REGISTRY_TORRENTS_ADDED_TOTAL, SWARM_COORDINATION_REGISTRY_TORRENTS_DOWNLOADS_TOTAL, + SWARM_COORDINATION_REGISTRY_TORRENTS_REMOVED_TOTAL, SWARM_COORDINATION_REGISTRY_TORRENTS_TOTAL, }; +#[allow(clippy::too_many_lines)] pub async fn handle_event(event: Event, stats_repository: &Arc, now: DurationSinceUnixEpoch) { match event { // Torrent events @@ -20,12 +22,16 @@ pub async fn handle_event(event: Event, stats_repository: &Arc, now: tracing::debug!(info_hash = ?info_hash, "Torrent added",); let _unused = stats_repository - .increment_gauge(&metric_name!(TORRENT_REPOSITORY_TORRENTS_TOTAL), &LabelSet::default(), now) + .increment_gauge( + &metric_name!(SWARM_COORDINATION_REGISTRY_TORRENTS_TOTAL), + &LabelSet::default(), + now, + ) .await; let _unused = stats_repository .increment_counter( - &metric_name!(TORRENT_REPOSITORY_TORRENTS_ADDED_TOTAL), + &metric_name!(SWARM_COORDINATION_REGISTRY_TORRENTS_ADDED_TOTAL), &LabelSet::default(), now, ) @@ -35,12 +41,16 @@ pub async fn handle_event(event: Event, stats_repository: &Arc, now: tracing::debug!(info_hash = ?info_hash, "Torrent removed",); let _unused = stats_repository - .decrement_gauge(&metric_name!(TORRENT_REPOSITORY_TORRENTS_TOTAL), &LabelSet::default(), now) + .decrement_gauge( + &metric_name!(SWARM_COORDINATION_REGISTRY_TORRENTS_TOTAL), + &LabelSet::default(), + now, + ) .await; let _unused = stats_repository .increment_counter( - &metric_name!(TORRENT_REPOSITORY_TORRENTS_REMOVED_TOTAL), + &metric_name!(SWARM_COORDINATION_REGISTRY_TORRENTS_REMOVED_TOTAL), &LabelSet::default(), now, ) @@ -54,11 +64,15 @@ pub async fn handle_event(event: Event, stats_repository: &Arc, now: let label_set = label_set_for_peer(&peer); let _unused = stats_repository - .increment_gauge(&metric_name!(TORRENT_REPOSITORY_PEER_CONNECTIONS_TOTAL), &label_set, now) + .increment_gauge( + &metric_name!(SWARM_COORDINATION_REGISTRY_PEER_CONNECTIONS_TOTAL), + &label_set, + now, + ) .await; let _unused = stats_repository - .increment_counter(&metric_name!(TORRENT_REPOSITORY_PEERS_ADDED_TOTAL), &label_set, now) + .increment_counter(&metric_name!(SWARM_COORDINATION_REGISTRY_PEERS_ADDED_TOTAL), &label_set, now) .await; } Event::PeerRemoved { info_hash, peer } => { @@ -67,11 +81,19 @@ pub async fn handle_event(event: Event, stats_repository: &Arc, now: let label_set = label_set_for_peer(&peer); let _unused = stats_repository - .decrement_gauge(&metric_name!(TORRENT_REPOSITORY_PEER_CONNECTIONS_TOTAL), &label_set, now) + .decrement_gauge( + &metric_name!(SWARM_COORDINATION_REGISTRY_PEER_CONNECTIONS_TOTAL), + &label_set, + now, + ) .await; let _unused = stats_repository - .increment_counter(&metric_name!(TORRENT_REPOSITORY_PEERS_REMOVED_TOTAL), &label_set, now) + .increment_counter( + &metric_name!(SWARM_COORDINATION_REGISTRY_PEERS_REMOVED_TOTAL), + &label_set, + now, + ) .await; } Event::PeerUpdated { @@ -84,7 +106,7 @@ pub async fn handle_event(event: Event, stats_repository: &Arc, now: if old_peer.role() != new_peer.role() { let _unused = stats_repository .increment_gauge( - &metric_name!(TORRENT_REPOSITORY_PEER_CONNECTIONS_TOTAL), + &metric_name!(SWARM_COORDINATION_REGISTRY_PEER_CONNECTIONS_TOTAL), &label_set_for_peer(&new_peer), now, ) @@ -92,7 +114,7 @@ pub async fn handle_event(event: Event, stats_repository: &Arc, now: let _unused = stats_repository .decrement_gauge( - &metric_name!(TORRENT_REPOSITORY_PEER_CONNECTIONS_TOTAL), + &metric_name!(SWARM_COORDINATION_REGISTRY_PEER_CONNECTIONS_TOTAL), &label_set_for_peer(&old_peer), now, ) @@ -102,7 +124,11 @@ pub async fn handle_event(event: Event, stats_repository: &Arc, now: let label_set = label_set_for_peer(&new_peer); let _unused = stats_repository - .increment_counter(&metric_name!(TORRENT_REPOSITORY_PEERS_UPDATED_TOTAL), &label_set, now) + .increment_counter( + &metric_name!(SWARM_COORDINATION_REGISTRY_PEERS_UPDATED_TOTAL), + &label_set, + now, + ) .await; } Event::PeerDownloadCompleted { info_hash, peer } => { @@ -110,7 +136,7 @@ pub async fn handle_event(event: Event, stats_repository: &Arc, now: let _unused = stats_repository .increment_counter( - &metric_name!(TORRENT_REPOSITORY_TORRENTS_DOWNLOADS_TOTAL), + &metric_name!(SWARM_COORDINATION_REGISTRY_TORRENTS_DOWNLOADS_TOTAL), &label_set_for_peer(&peer), now, ) @@ -217,7 +243,8 @@ mod tests { use crate::statistics::event::handler::tests::{expect_counter_metric_to_be, expect_gauge_metric_to_be}; use crate::statistics::repository::Repository; use crate::statistics::{ - TORRENT_REPOSITORY_TORRENTS_ADDED_TOTAL, TORRENT_REPOSITORY_TORRENTS_REMOVED_TOTAL, TORRENT_REPOSITORY_TORRENTS_TOTAL, + SWARM_COORDINATION_REGISTRY_TORRENTS_ADDED_TOTAL, SWARM_COORDINATION_REGISTRY_TORRENTS_REMOVED_TOTAL, + SWARM_COORDINATION_REGISTRY_TORRENTS_TOTAL, }; use crate::tests::{sample_info_hash, sample_peer}; use crate::CurrentClock; @@ -240,7 +267,7 @@ mod tests { expect_gauge_metric_to_be( &stats_repository, - &metric_name!(TORRENT_REPOSITORY_TORRENTS_TOTAL), + &metric_name!(SWARM_COORDINATION_REGISTRY_TORRENTS_TOTAL), &LabelSet::default(), 1.0, ) @@ -252,7 +279,7 @@ mod tests { clock::Stopped::local_set_to_unix_epoch(); let stats_repository = Arc::new(Repository::new()); - let metric_name = metric_name!(TORRENT_REPOSITORY_TORRENTS_TOTAL); + let metric_name = metric_name!(SWARM_COORDINATION_REGISTRY_TORRENTS_TOTAL); let label_set = LabelSet::default(); // Increment the gauge first to simulate a torrent being added. @@ -291,7 +318,7 @@ mod tests { expect_counter_metric_to_be( &stats_repository, - &metric_name!(TORRENT_REPOSITORY_TORRENTS_ADDED_TOTAL), + &metric_name!(SWARM_COORDINATION_REGISTRY_TORRENTS_ADDED_TOTAL), &LabelSet::default(), 1, ) @@ -315,7 +342,7 @@ mod tests { expect_counter_metric_to_be( &stats_repository, - &metric_name!(TORRENT_REPOSITORY_TORRENTS_REMOVED_TOTAL), + &metric_name!(SWARM_COORDINATION_REGISTRY_TORRENTS_REMOVED_TOTAL), &LabelSet::default(), 1, ) @@ -335,7 +362,8 @@ mod tests { use crate::statistics::event::handler::{handle_event, label_set_for_peer}; use crate::statistics::repository::Repository; use crate::statistics::{ - TORRENT_REPOSITORY_PEERS_ADDED_TOTAL, TORRENT_REPOSITORY_PEERS_REMOVED_TOTAL, TORRENT_REPOSITORY_PEERS_UPDATED_TOTAL, + SWARM_COORDINATION_REGISTRY_PEERS_ADDED_TOTAL, SWARM_COORDINATION_REGISTRY_PEERS_REMOVED_TOTAL, + SWARM_COORDINATION_REGISTRY_PEERS_UPDATED_TOTAL, }; use crate::tests::{sample_info_hash, sample_peer}; use crate::CurrentClock; @@ -357,7 +385,7 @@ mod tests { expect_gauge_metric_to_be, get_gauge_metric, make_opposite_role_peer, make_peer, }; use crate::statistics::repository::Repository; - use crate::statistics::TORRENT_REPOSITORY_PEER_CONNECTIONS_TOTAL; + use crate::statistics::SWARM_COORDINATION_REGISTRY_PEER_CONNECTIONS_TOTAL; use crate::tests::sample_info_hash; use crate::CurrentClock; @@ -373,7 +401,7 @@ mod tests { let peer = make_peer(role); let stats_repository = Arc::new(Repository::new()); - let metric_name = metric_name!(TORRENT_REPOSITORY_PEER_CONNECTIONS_TOTAL); + let metric_name = metric_name!(SWARM_COORDINATION_REGISTRY_PEER_CONNECTIONS_TOTAL); let label_set = (label_name!("peer_role"), LabelValue::new(&role.to_string())).into(); handle_event( @@ -402,7 +430,7 @@ mod tests { let stats_repository = Arc::new(Repository::new()); - let metric_name = metric_name!(TORRENT_REPOSITORY_PEER_CONNECTIONS_TOTAL); + let metric_name = metric_name!(SWARM_COORDINATION_REGISTRY_PEER_CONNECTIONS_TOTAL); let label_set = (label_name!("peer_role"), LabelValue::new(&role.to_string())).into(); // Increment the gauge first to simulate a peer being added. @@ -438,7 +466,7 @@ mod tests { let old_peer = make_peer(old_role); let new_peer = make_opposite_role_peer(&old_peer); - let metric_name = metric_name!(TORRENT_REPOSITORY_PEER_CONNECTIONS_TOTAL); + let metric_name = metric_name!(SWARM_COORDINATION_REGISTRY_PEER_CONNECTIONS_TOTAL); let old_role_label_set = (label_name!("peer_role"), LabelValue::new(&old_peer.role().to_string())).into(); let new_role_label_set = (label_name!("peer_role"), LabelValue::new(&new_peer.role().to_string())).into(); @@ -497,7 +525,7 @@ mod tests { expect_counter_metric_to_be( &stats_repository, - &metric_name!(TORRENT_REPOSITORY_PEERS_ADDED_TOTAL), + &metric_name!(SWARM_COORDINATION_REGISTRY_PEERS_ADDED_TOTAL), &label_set_for_peer(&peer), 1, ) @@ -524,7 +552,7 @@ mod tests { expect_counter_metric_to_be( &stats_repository, - &metric_name!(TORRENT_REPOSITORY_PEERS_REMOVED_TOTAL), + &metric_name!(SWARM_COORDINATION_REGISTRY_PEERS_REMOVED_TOTAL), &label_set_for_peer(&peer), 1, ) @@ -552,7 +580,7 @@ mod tests { expect_counter_metric_to_be( &stats_repository, - &metric_name!(TORRENT_REPOSITORY_PEERS_UPDATED_TOTAL), + &metric_name!(SWARM_COORDINATION_REGISTRY_PEERS_UPDATED_TOTAL), &label_set_for_peer(&new_peer), 1, ) @@ -574,7 +602,7 @@ mod tests { use crate::statistics::event::handler::handle_event; use crate::statistics::event::handler::tests::{expect_counter_metric_to_be, make_peer}; use crate::statistics::repository::Repository; - use crate::statistics::TORRENT_REPOSITORY_TORRENTS_DOWNLOADS_TOTAL; + use crate::statistics::SWARM_COORDINATION_REGISTRY_TORRENTS_DOWNLOADS_TOTAL; use crate::tests::sample_info_hash; use crate::CurrentClock; @@ -590,7 +618,7 @@ mod tests { let peer = make_peer(role); let stats_repository = Arc::new(Repository::new()); - let metric_name = metric_name!(TORRENT_REPOSITORY_TORRENTS_DOWNLOADS_TOTAL); + let metric_name = metric_name!(SWARM_COORDINATION_REGISTRY_TORRENTS_DOWNLOADS_TOTAL); let label_set = (label_name!("peer_role"), LabelValue::new(&role.to_string())).into(); handle_event( diff --git a/packages/swarm-coordination-registry/src/statistics/mod.rs b/packages/swarm-coordination-registry/src/statistics/mod.rs index 6505a2db2..5b9b7f376 100644 --- a/packages/swarm-coordination-registry/src/statistics/mod.rs +++ b/packages/swarm-coordination-registry/src/statistics/mod.rs @@ -10,22 +10,22 @@ use torrust_tracker_metrics::unit::Unit; // Torrent metrics -const TORRENT_REPOSITORY_TORRENTS_ADDED_TOTAL: &str = "torrent_repository_torrents_added_total"; -const TORRENT_REPOSITORY_TORRENTS_REMOVED_TOTAL: &str = "torrent_repository_torrents_removed_total"; +const SWARM_COORDINATION_REGISTRY_TORRENTS_ADDED_TOTAL: &str = "swarm_coordination_registry_torrents_added_total"; +const SWARM_COORDINATION_REGISTRY_TORRENTS_REMOVED_TOTAL: &str = "swarm_coordination_registry_torrents_removed_total"; -const TORRENT_REPOSITORY_TORRENTS_TOTAL: &str = "torrent_repository_torrents_total"; -const TORRENT_REPOSITORY_TORRENTS_DOWNLOADS_TOTAL: &str = "torrent_repository_torrents_downloads_total"; -const TORRENT_REPOSITORY_TORRENTS_INACTIVE_TOTAL: &str = "torrent_repository_torrents_inactive_total"; +const SWARM_COORDINATION_REGISTRY_TORRENTS_TOTAL: &str = "swarm_coordination_registry_torrents_total"; +const SWARM_COORDINATION_REGISTRY_TORRENTS_DOWNLOADS_TOTAL: &str = "swarm_coordination_registry_torrents_downloads_total"; +const SWARM_COORDINATION_REGISTRY_TORRENTS_INACTIVE_TOTAL: &str = "swarm_coordination_registry_torrents_inactive_total"; // Peers metrics -const TORRENT_REPOSITORY_PEERS_ADDED_TOTAL: &str = "torrent_repository_peers_added_total"; -const TORRENT_REPOSITORY_PEERS_REMOVED_TOTAL: &str = "torrent_repository_peers_removed_total"; -const TORRENT_REPOSITORY_PEERS_UPDATED_TOTAL: &str = "torrent_repository_peers_updated_total"; +const SWARM_COORDINATION_REGISTRY_PEERS_ADDED_TOTAL: &str = "swarm_coordination_registry_peers_added_total"; +const SWARM_COORDINATION_REGISTRY_PEERS_REMOVED_TOTAL: &str = "swarm_coordination_registry_peers_removed_total"; +const SWARM_COORDINATION_REGISTRY_PEERS_UPDATED_TOTAL: &str = "swarm_coordination_registry_peers_updated_total"; -const TORRENT_REPOSITORY_PEER_CONNECTIONS_TOTAL: &str = "torrent_repository_peer_connections_total"; -const TORRENT_REPOSITORY_UNIQUE_PEERS_TOTAL: &str = "torrent_repository_unique_peers_total"; // todo: not implemented yet -const TORRENT_REPOSITORY_PEERS_INACTIVE_TOTAL: &str = "torrent_repository_peers_inactive_total"; +const SWARM_COORDINATION_REGISTRY_PEER_CONNECTIONS_TOTAL: &str = "swarm_coordination_registry_peer_connections_total"; +const SWARM_COORDINATION_REGISTRY_UNIQUE_PEERS_TOTAL: &str = "swarm_coordination_registry_unique_peers_total"; // todo: not implemented yet +const SWARM_COORDINATION_REGISTRY_PEERS_INACTIVE_TOTAL: &str = "swarm_coordination_registry_peers_inactive_total"; #[must_use] pub fn describe_metrics() -> Metrics { @@ -34,31 +34,31 @@ pub fn describe_metrics() -> Metrics { // Torrent metrics metrics.metric_collection.describe_counter( - &metric_name!(TORRENT_REPOSITORY_TORRENTS_ADDED_TOTAL), + &metric_name!(SWARM_COORDINATION_REGISTRY_TORRENTS_ADDED_TOTAL), Some(Unit::Count), Some(MetricDescription::new("The total number of torrents added.")), ); metrics.metric_collection.describe_counter( - &metric_name!(TORRENT_REPOSITORY_TORRENTS_REMOVED_TOTAL), + &metric_name!(SWARM_COORDINATION_REGISTRY_TORRENTS_REMOVED_TOTAL), Some(Unit::Count), Some(MetricDescription::new("The total number of torrents removed.")), ); metrics.metric_collection.describe_gauge( - &metric_name!(TORRENT_REPOSITORY_TORRENTS_TOTAL), + &metric_name!(SWARM_COORDINATION_REGISTRY_TORRENTS_TOTAL), Some(Unit::Count), Some(MetricDescription::new("The total number of torrents.")), ); metrics.metric_collection.describe_counter( - &metric_name!(TORRENT_REPOSITORY_TORRENTS_DOWNLOADS_TOTAL), + &metric_name!(SWARM_COORDINATION_REGISTRY_TORRENTS_DOWNLOADS_TOTAL), Some(Unit::Count), Some(MetricDescription::new("The total number of torrent downloads.")), ); metrics.metric_collection.describe_gauge( - &metric_name!(TORRENT_REPOSITORY_TORRENTS_INACTIVE_TOTAL), + &metric_name!(SWARM_COORDINATION_REGISTRY_TORRENTS_INACTIVE_TOTAL), Some(Unit::Count), Some(MetricDescription::new("The total number of inactive torrents.")), ); @@ -66,25 +66,25 @@ pub fn describe_metrics() -> Metrics { // Peers metrics metrics.metric_collection.describe_counter( - &metric_name!(TORRENT_REPOSITORY_PEERS_ADDED_TOTAL), + &metric_name!(SWARM_COORDINATION_REGISTRY_PEERS_ADDED_TOTAL), Some(Unit::Count), Some(MetricDescription::new("The total number of peers added.")), ); metrics.metric_collection.describe_counter( - &metric_name!(TORRENT_REPOSITORY_PEERS_REMOVED_TOTAL), + &metric_name!(SWARM_COORDINATION_REGISTRY_PEERS_REMOVED_TOTAL), Some(Unit::Count), Some(MetricDescription::new("The total number of peers removed.")), ); metrics.metric_collection.describe_counter( - &metric_name!(TORRENT_REPOSITORY_PEERS_UPDATED_TOTAL), + &metric_name!(SWARM_COORDINATION_REGISTRY_PEERS_UPDATED_TOTAL), Some(Unit::Count), Some(MetricDescription::new("The total number of peers updated.")), ); metrics.metric_collection.describe_gauge( - &metric_name!(TORRENT_REPOSITORY_PEER_CONNECTIONS_TOTAL), + &metric_name!(SWARM_COORDINATION_REGISTRY_PEER_CONNECTIONS_TOTAL), Some(Unit::Count), Some(MetricDescription::new( "The total number of peer connections (one connection per torrent).", @@ -92,13 +92,13 @@ pub fn describe_metrics() -> Metrics { ); metrics.metric_collection.describe_gauge( - &metric_name!(TORRENT_REPOSITORY_UNIQUE_PEERS_TOTAL), + &metric_name!(SWARM_COORDINATION_REGISTRY_UNIQUE_PEERS_TOTAL), Some(Unit::Count), Some(MetricDescription::new("The total number of unique peers.")), ); metrics.metric_collection.describe_gauge( - &metric_name!(TORRENT_REPOSITORY_PEERS_INACTIVE_TOTAL), + &metric_name!(SWARM_COORDINATION_REGISTRY_PEERS_INACTIVE_TOTAL), Some(Unit::Count), Some(MetricDescription::new("The total number of inactive peers.")), ); From c26315aea7c837ff0c523b5979600aa8f00d93bf Mon Sep 17 00:00:00 2001 From: Jose Celano Date: Mon, 9 Jun 2025 13:06:36 +0100 Subject: [PATCH 700/802] refactor: [#1534] Rename TorrentRepositoryContainer type to SwarmCoordinationRegistryContainer --- packages/axum-http-tracker-server/src/environment.rs | 4 ++-- packages/axum-http-tracker-server/src/server.rs | 4 ++-- packages/axum-rest-tracker-api-server/src/environment.rs | 4 ++-- packages/http-tracker-core/src/container.rs | 4 ++-- packages/rest-tracker-api-core/src/container.rs | 8 ++++---- packages/rest-tracker-api-core/src/statistics/services.rs | 4 ++-- packages/swarm-coordination-registry/src/container.rs | 4 ++-- packages/tracker-core/src/container.rs | 7 +++++-- packages/tracker-core/tests/common/test_env.rs | 6 +++--- packages/udp-tracker-core/src/container.rs | 4 ++-- packages/udp-tracker-server/src/environment.rs | 4 ++-- src/container.rs | 6 +++--- 12 files changed, 31 insertions(+), 28 deletions(-) diff --git a/packages/axum-http-tracker-server/src/environment.rs b/packages/axum-http-tracker-server/src/environment.rs index 54c6b7767..ccc54b9cc 100644 --- a/packages/axum-http-tracker-server/src/environment.rs +++ b/packages/axum-http-tracker-server/src/environment.rs @@ -10,7 +10,7 @@ use torrust_axum_server::tsl::make_rust_tls; use torrust_server_lib::registar::Registar; use torrust_tracker_configuration::{logging, Configuration}; use torrust_tracker_primitives::peer; -use torrust_tracker_swarm_coordination_registry::container::TorrentRepositoryContainer; +use torrust_tracker_swarm_coordination_registry::container::SwarmCoordinationRegistryContainer; use crate::server::{HttpServer, Launcher, Running, Stopped}; @@ -144,7 +144,7 @@ impl EnvContainer { .expect("missing HTTP tracker configuration"); let http_tracker_config = Arc::new(http_tracker_config[0].clone()); - let torrent_repository_container = Arc::new(TorrentRepositoryContainer::initialize( + let torrent_repository_container = Arc::new(SwarmCoordinationRegistryContainer::initialize( configuration.core.tracker_usage_statistics.into(), )); diff --git a/packages/axum-http-tracker-server/src/server.rs b/packages/axum-http-tracker-server/src/server.rs index b8ece8086..99ba4be51 100644 --- a/packages/axum-http-tracker-server/src/server.rs +++ b/packages/axum-http-tracker-server/src/server.rs @@ -259,7 +259,7 @@ mod tests { use torrust_axum_server::tsl::make_rust_tls; use torrust_server_lib::registar::Registar; use torrust_tracker_configuration::{logging, Configuration}; - use torrust_tracker_swarm_coordination_registry::container::TorrentRepositoryContainer; + use torrust_tracker_swarm_coordination_registry::container::SwarmCoordinationRegistryContainer; use torrust_tracker_test_helpers::configuration::ephemeral_public; use crate::server::{HttpServer, Launcher}; @@ -290,7 +290,7 @@ mod tests { let _unused = run_event_listener(http_stats_event_bus.receiver(), &http_stats_repository); } - let torrent_repository_container = Arc::new(TorrentRepositoryContainer::initialize( + let torrent_repository_container = Arc::new(SwarmCoordinationRegistryContainer::initialize( configuration.core.tracker_usage_statistics.into(), )); diff --git a/packages/axum-rest-tracker-api-server/src/environment.rs b/packages/axum-rest-tracker-api-server/src/environment.rs index 6be4cc53c..fc6ee112e 100644 --- a/packages/axum-rest-tracker-api-server/src/environment.rs +++ b/packages/axum-rest-tracker-api-server/src/environment.rs @@ -12,7 +12,7 @@ use torrust_rest_tracker_api_core::container::TrackerHttpApiCoreContainer; use torrust_server_lib::registar::Registar; use torrust_tracker_configuration::{logging, Configuration}; use torrust_tracker_primitives::peer; -use torrust_tracker_swarm_coordination_registry::container::TorrentRepositoryContainer; +use torrust_tracker_swarm_coordination_registry::container::SwarmCoordinationRegistryContainer; use torrust_udp_tracker_server::container::UdpTrackerServerContainer; use crate::server::{ApiServer, Launcher, Running, Stopped}; @@ -173,7 +173,7 @@ impl EnvContainer { .clone(), ); - let torrent_repository_container = Arc::new(TorrentRepositoryContainer::initialize( + let torrent_repository_container = Arc::new(SwarmCoordinationRegistryContainer::initialize( core_config.tracker_usage_statistics.into(), )); diff --git a/packages/http-tracker-core/src/container.rs b/packages/http-tracker-core/src/container.rs index 35f75e1fe..f573740a7 100644 --- a/packages/http-tracker-core/src/container.rs +++ b/packages/http-tracker-core/src/container.rs @@ -2,7 +2,7 @@ use std::sync::Arc; use bittorrent_tracker_core::container::TrackerCoreContainer; use torrust_tracker_configuration::{Core, HttpTracker}; -use torrust_tracker_swarm_coordination_registry::container::TorrentRepositoryContainer; +use torrust_tracker_swarm_coordination_registry::container::SwarmCoordinationRegistryContainer; use crate::event::bus::EventBus; use crate::event::sender::Broadcaster; @@ -27,7 +27,7 @@ pub struct HttpTrackerCoreContainer { impl HttpTrackerCoreContainer { #[must_use] pub fn initialize(core_config: &Arc, http_tracker_config: &Arc) -> Arc { - let torrent_repository_container = Arc::new(TorrentRepositoryContainer::initialize( + let torrent_repository_container = Arc::new(SwarmCoordinationRegistryContainer::initialize( core_config.tracker_usage_statistics.into(), )); diff --git a/packages/rest-tracker-api-core/src/container.rs b/packages/rest-tracker-api-core/src/container.rs index f76c2ece3..238e76801 100644 --- a/packages/rest-tracker-api-core/src/container.rs +++ b/packages/rest-tracker-api-core/src/container.rs @@ -7,14 +7,14 @@ use bittorrent_udp_tracker_core::services::banning::BanService; use bittorrent_udp_tracker_core::{self}; use tokio::sync::RwLock; use torrust_tracker_configuration::{Core, HttpApi, HttpTracker, UdpTracker}; -use torrust_tracker_swarm_coordination_registry::container::TorrentRepositoryContainer; +use torrust_tracker_swarm_coordination_registry::container::SwarmCoordinationRegistryContainer; use torrust_udp_tracker_server::container::UdpTrackerServerContainer; pub struct TrackerHttpApiCoreContainer { pub http_api_config: Arc, // Torrent repository - pub torrent_repository_container: Arc, + pub torrent_repository_container: Arc, // Tracker core pub tracker_core_container: Arc, @@ -36,7 +36,7 @@ impl TrackerHttpApiCoreContainer { udp_tracker_config: &Arc, http_api_config: &Arc, ) -> Arc { - let torrent_repository_container = Arc::new(TorrentRepositoryContainer::initialize( + let torrent_repository_container = Arc::new(SwarmCoordinationRegistryContainer::initialize( core_config.tracker_usage_statistics.into(), )); @@ -65,7 +65,7 @@ impl TrackerHttpApiCoreContainer { #[must_use] pub fn initialize_from( - torrent_repository_container: &Arc, + torrent_repository_container: &Arc, tracker_core_container: &Arc, http_tracker_core_container: &Arc, udp_tracker_core_container: &Arc, diff --git a/packages/rest-tracker-api-core/src/statistics/services.rs b/packages/rest-tracker-api-core/src/statistics/services.rs index 56536a02f..1467517d9 100644 --- a/packages/rest-tracker-api-core/src/statistics/services.rs +++ b/packages/rest-tracker-api-core/src/statistics/services.rs @@ -165,7 +165,7 @@ mod tests { use tokio::sync::RwLock; use torrust_tracker_configuration::Configuration; use torrust_tracker_events::bus::SenderStatus; - use torrust_tracker_swarm_coordination_registry::container::TorrentRepositoryContainer; + use torrust_tracker_swarm_coordination_registry::container::SwarmCoordinationRegistryContainer; use torrust_tracker_test_helpers::configuration; use crate::statistics::metrics::{ProtocolMetrics, TorrentsMetrics}; @@ -180,7 +180,7 @@ mod tests { let config = tracker_configuration(); let core_config = Arc::new(config.core.clone()); - let torrent_repository_container = Arc::new(TorrentRepositoryContainer::initialize(SenderStatus::Enabled)); + let torrent_repository_container = Arc::new(SwarmCoordinationRegistryContainer::initialize(SenderStatus::Enabled)); let tracker_core_container = TrackerCoreContainer::initialize_from(&core_config, &torrent_repository_container.clone()); diff --git a/packages/swarm-coordination-registry/src/container.rs b/packages/swarm-coordination-registry/src/container.rs index 1b56b3d4b..1a243f967 100644 --- a/packages/swarm-coordination-registry/src/container.rs +++ b/packages/swarm-coordination-registry/src/container.rs @@ -8,14 +8,14 @@ use crate::event::{self}; use crate::statistics::repository::Repository; use crate::{statistics, Registry}; -pub struct TorrentRepositoryContainer { +pub struct SwarmCoordinationRegistryContainer { pub swarms: Arc, pub event_bus: Arc, pub stats_event_sender: event::sender::Sender, pub stats_repository: Arc, } -impl TorrentRepositoryContainer { +impl SwarmCoordinationRegistryContainer { #[must_use] pub fn initialize(sender_status: SenderStatus) -> Self { // Torrent repository stats diff --git a/packages/tracker-core/src/container.rs b/packages/tracker-core/src/container.rs index 949761553..8d776a3e6 100644 --- a/packages/tracker-core/src/container.rs +++ b/packages/tracker-core/src/container.rs @@ -1,7 +1,7 @@ use std::sync::Arc; use torrust_tracker_configuration::Core; -use torrust_tracker_swarm_coordination_registry::container::TorrentRepositoryContainer; +use torrust_tracker_swarm_coordination_registry::container::SwarmCoordinationRegistryContainer; use crate::announce_handler::AnnounceHandler; use crate::authentication::handler::KeysHandler; @@ -38,7 +38,10 @@ pub struct TrackerCoreContainer { impl TrackerCoreContainer { #[must_use] - pub fn initialize_from(core_config: &Arc, torrent_repository_container: &Arc) -> Self { + pub fn initialize_from( + core_config: &Arc, + torrent_repository_container: &Arc, + ) -> Self { let database = initialize_database(core_config); let in_memory_whitelist = Arc::new(InMemoryWhitelist::default()); let whitelist_authorization = Arc::new(WhitelistAuthorization::new(core_config, &in_memory_whitelist.clone())); diff --git a/packages/tracker-core/tests/common/test_env.rs b/packages/tracker-core/tests/common/test_env.rs index 64bdcaad8..0c1ea8524 100644 --- a/packages/tracker-core/tests/common/test_env.rs +++ b/packages/tracker-core/tests/common/test_env.rs @@ -14,10 +14,10 @@ use torrust_tracker_primitives::core::{AnnounceData, ScrapeData}; use torrust_tracker_primitives::peer::Peer; use torrust_tracker_primitives::swarm_metadata::SwarmMetadata; use torrust_tracker_primitives::DurationSinceUnixEpoch; -use torrust_tracker_swarm_coordination_registry::container::TorrentRepositoryContainer; +use torrust_tracker_swarm_coordination_registry::container::SwarmCoordinationRegistryContainer; pub struct TestEnv { - pub torrent_repository_container: Arc, + pub torrent_repository_container: Arc, pub tracker_core_container: Arc, } @@ -33,7 +33,7 @@ impl TestEnv { pub fn new(core_config: Core) -> Self { let core_config = Arc::new(core_config); - let torrent_repository_container = Arc::new(TorrentRepositoryContainer::initialize( + let torrent_repository_container = Arc::new(SwarmCoordinationRegistryContainer::initialize( core_config.tracker_usage_statistics.into(), )); diff --git a/packages/udp-tracker-core/src/container.rs b/packages/udp-tracker-core/src/container.rs index c4be395fc..a6e45268f 100644 --- a/packages/udp-tracker-core/src/container.rs +++ b/packages/udp-tracker-core/src/container.rs @@ -3,7 +3,7 @@ use std::sync::Arc; use bittorrent_tracker_core::container::TrackerCoreContainer; use tokio::sync::RwLock; use torrust_tracker_configuration::{Core, UdpTracker}; -use torrust_tracker_swarm_coordination_registry::container::TorrentRepositoryContainer; +use torrust_tracker_swarm_coordination_registry::container::SwarmCoordinationRegistryContainer; use crate::event::bus::EventBus; use crate::event::sender::Broadcaster; @@ -32,7 +32,7 @@ pub struct UdpTrackerCoreContainer { impl UdpTrackerCoreContainer { #[must_use] pub fn initialize(core_config: &Arc, udp_tracker_config: &Arc) -> Arc { - let torrent_repository_container = Arc::new(TorrentRepositoryContainer::initialize( + let torrent_repository_container = Arc::new(SwarmCoordinationRegistryContainer::initialize( core_config.tracker_usage_statistics.into(), )); diff --git a/packages/udp-tracker-server/src/environment.rs b/packages/udp-tracker-server/src/environment.rs index 268259f1b..d12a1b011 100644 --- a/packages/udp-tracker-server/src/environment.rs +++ b/packages/udp-tracker-server/src/environment.rs @@ -8,7 +8,7 @@ use tokio::task::JoinHandle; use torrust_server_lib::registar::Registar; use torrust_tracker_configuration::{logging, Configuration, DEFAULT_TIMEOUT}; use torrust_tracker_primitives::peer; -use torrust_tracker_swarm_coordination_registry::container::TorrentRepositoryContainer; +use torrust_tracker_swarm_coordination_registry::container::SwarmCoordinationRegistryContainer; use crate::container::UdpTrackerServerContainer; use crate::server::spawner::Spawner; @@ -175,7 +175,7 @@ impl EnvContainer { let udp_tracker_configurations = configuration.udp_trackers.clone().expect("missing UDP tracker configuration"); let udp_tracker_config = Arc::new(udp_tracker_configurations[0].clone()); - let torrent_repository_container = Arc::new(TorrentRepositoryContainer::initialize( + let torrent_repository_container = Arc::new(SwarmCoordinationRegistryContainer::initialize( core_config.tracker_usage_statistics.into(), )); diff --git a/src/container.rs b/src/container.rs index bb5873fb2..0f73bda6b 100644 --- a/src/container.rs +++ b/src/container.rs @@ -9,7 +9,7 @@ use bittorrent_udp_tracker_core::{self}; use torrust_rest_tracker_api_core::container::TrackerHttpApiCoreContainer; use torrust_server_lib::registar::Registar; use torrust_tracker_configuration::{Configuration, HttpApi}; -use torrust_tracker_swarm_coordination_registry::container::TorrentRepositoryContainer; +use torrust_tracker_swarm_coordination_registry::container::SwarmCoordinationRegistryContainer; use torrust_udp_tracker_server::container::UdpTrackerServerContainer; use tracing::instrument; @@ -30,7 +30,7 @@ pub struct AppContainer { pub registar: Arc, // Torrent Repository - pub torrent_repository_container: Arc, + pub torrent_repository_container: Arc, // Core pub tracker_core_container: Arc, @@ -60,7 +60,7 @@ impl AppContainer { // Torrent Repository - let torrent_repository_container = Arc::new(TorrentRepositoryContainer::initialize( + let torrent_repository_container = Arc::new(SwarmCoordinationRegistryContainer::initialize( core_config.tracker_usage_statistics.into(), )); From b09e79c5983952ece3f94e6c689f62737bf1fd86 Mon Sep 17 00:00:00 2001 From: Jose Celano Date: Mon, 9 Jun 2025 13:11:56 +0100 Subject: [PATCH 701/802] refactor: [#1534] Rename torrent_repository_container to swarm_coordination_registry_container --- .../src/environment.rs | 4 ++-- .../axum-http-tracker-server/src/server.rs | 4 ++-- .../src/environment.rs | 6 ++--- .../src/v1/context/stats/routes.rs | 5 ++++- packages/http-tracker-core/src/container.rs | 4 ++-- .../rest-tracker-api-core/src/container.rs | 12 +++++----- .../src/statistics/services.rs | 6 +++-- packages/tracker-core/src/container.rs | 6 +++-- .../tracker-core/tests/common/test_env.rs | 22 +++++++++++-------- packages/udp-tracker-core/src/container.rs | 4 ++-- .../udp-tracker-server/src/environment.rs | 4 ++-- .../jobs/activity_metrics_updater.rs | 4 ++-- src/bootstrap/jobs/torrent_repository.rs | 4 ++-- src/bootstrap/jobs/tracker_core.rs | 2 +- src/container.rs | 10 ++++----- 15 files changed, 54 insertions(+), 43 deletions(-) diff --git a/packages/axum-http-tracker-server/src/environment.rs b/packages/axum-http-tracker-server/src/environment.rs index ccc54b9cc..6e58c2cac 100644 --- a/packages/axum-http-tracker-server/src/environment.rs +++ b/packages/axum-http-tracker-server/src/environment.rs @@ -144,13 +144,13 @@ impl EnvContainer { .expect("missing HTTP tracker configuration"); let http_tracker_config = Arc::new(http_tracker_config[0].clone()); - let torrent_repository_container = Arc::new(SwarmCoordinationRegistryContainer::initialize( + let swarm_coordination_registry_container = Arc::new(SwarmCoordinationRegistryContainer::initialize( configuration.core.tracker_usage_statistics.into(), )); let tracker_core_container = Arc::new(TrackerCoreContainer::initialize_from( &core_config, - &torrent_repository_container, + &swarm_coordination_registry_container, )); let http_tracker_container = diff --git a/packages/axum-http-tracker-server/src/server.rs b/packages/axum-http-tracker-server/src/server.rs index 99ba4be51..1775a3d72 100644 --- a/packages/axum-http-tracker-server/src/server.rs +++ b/packages/axum-http-tracker-server/src/server.rs @@ -290,13 +290,13 @@ mod tests { let _unused = run_event_listener(http_stats_event_bus.receiver(), &http_stats_repository); } - let torrent_repository_container = Arc::new(SwarmCoordinationRegistryContainer::initialize( + let swarm_coordination_registry_container = Arc::new(SwarmCoordinationRegistryContainer::initialize( configuration.core.tracker_usage_statistics.into(), )); let tracker_core_container = Arc::new(TrackerCoreContainer::initialize_from( &core_config, - &torrent_repository_container, + &swarm_coordination_registry_container, )); let announce_service = Arc::new(AnnounceService::new( diff --git a/packages/axum-rest-tracker-api-server/src/environment.rs b/packages/axum-rest-tracker-api-server/src/environment.rs index fc6ee112e..cddb45277 100644 --- a/packages/axum-rest-tracker-api-server/src/environment.rs +++ b/packages/axum-rest-tracker-api-server/src/environment.rs @@ -173,13 +173,13 @@ impl EnvContainer { .clone(), ); - let torrent_repository_container = Arc::new(SwarmCoordinationRegistryContainer::initialize( + let swarm_coordination_registry_container = Arc::new(SwarmCoordinationRegistryContainer::initialize( core_config.tracker_usage_statistics.into(), )); let tracker_core_container = Arc::new(TrackerCoreContainer::initialize_from( &core_config, - &torrent_repository_container, + &swarm_coordination_registry_container, )); let http_tracker_core_container = @@ -191,7 +191,7 @@ impl EnvContainer { let udp_tracker_server_container = UdpTrackerServerContainer::initialize(&core_config); let tracker_http_api_core_container = TrackerHttpApiCoreContainer::initialize_from( - &torrent_repository_container, + &swarm_coordination_registry_container, &tracker_core_container, &http_tracker_core_container, &udp_tracker_core_container, diff --git a/packages/axum-rest-tracker-api-server/src/v1/context/stats/routes.rs b/packages/axum-rest-tracker-api-server/src/v1/context/stats/routes.rs index a573b764a..c2a1466e0 100644 --- a/packages/axum-rest-tracker-api-server/src/v1/context/stats/routes.rs +++ b/packages/axum-rest-tracker-api-server/src/v1/context/stats/routes.rs @@ -30,7 +30,10 @@ pub fn add(prefix: &str, router: Router, http_api_container: &Arc, http_tracker_config: &Arc) -> Arc { - let torrent_repository_container = Arc::new(SwarmCoordinationRegistryContainer::initialize( + let swarm_coordination_registry_container = Arc::new(SwarmCoordinationRegistryContainer::initialize( core_config.tracker_usage_statistics.into(), )); let tracker_core_container = Arc::new(TrackerCoreContainer::initialize_from( core_config, - &torrent_repository_container, + &swarm_coordination_registry_container, )); Self::initialize_from_tracker_core(&tracker_core_container, http_tracker_config) diff --git a/packages/rest-tracker-api-core/src/container.rs b/packages/rest-tracker-api-core/src/container.rs index 238e76801..93655b2ba 100644 --- a/packages/rest-tracker-api-core/src/container.rs +++ b/packages/rest-tracker-api-core/src/container.rs @@ -14,7 +14,7 @@ pub struct TrackerHttpApiCoreContainer { pub http_api_config: Arc, // Torrent repository - pub torrent_repository_container: Arc, + pub swarm_coordination_registry_container: Arc, // Tracker core pub tracker_core_container: Arc, @@ -36,13 +36,13 @@ impl TrackerHttpApiCoreContainer { udp_tracker_config: &Arc, http_api_config: &Arc, ) -> Arc { - let torrent_repository_container = Arc::new(SwarmCoordinationRegistryContainer::initialize( + let swarm_coordination_registry_container = Arc::new(SwarmCoordinationRegistryContainer::initialize( core_config.tracker_usage_statistics.into(), )); let tracker_core_container = Arc::new(TrackerCoreContainer::initialize_from( core_config, - &torrent_repository_container, + &swarm_coordination_registry_container, )); let http_tracker_core_container = @@ -54,7 +54,7 @@ impl TrackerHttpApiCoreContainer { let udp_tracker_server_container = UdpTrackerServerContainer::initialize(core_config); Self::initialize_from( - &torrent_repository_container, + &swarm_coordination_registry_container, &tracker_core_container, &http_tracker_core_container, &udp_tracker_core_container, @@ -65,7 +65,7 @@ impl TrackerHttpApiCoreContainer { #[must_use] pub fn initialize_from( - torrent_repository_container: &Arc, + swarm_coordination_registry_container: &Arc, tracker_core_container: &Arc, http_tracker_core_container: &Arc, udp_tracker_core_container: &Arc, @@ -76,7 +76,7 @@ impl TrackerHttpApiCoreContainer { http_api_config: http_api_config.clone(), // Torrent repository - torrent_repository_container: torrent_repository_container.clone(), + swarm_coordination_registry_container: swarm_coordination_registry_container.clone(), // Tracker core tracker_core_container: tracker_core_container.clone(), diff --git a/packages/rest-tracker-api-core/src/statistics/services.rs b/packages/rest-tracker-api-core/src/statistics/services.rs index 1467517d9..6474df0d7 100644 --- a/packages/rest-tracker-api-core/src/statistics/services.rs +++ b/packages/rest-tracker-api-core/src/statistics/services.rs @@ -180,9 +180,11 @@ mod tests { let config = tracker_configuration(); let core_config = Arc::new(config.core.clone()); - let torrent_repository_container = Arc::new(SwarmCoordinationRegistryContainer::initialize(SenderStatus::Enabled)); + let swarm_coordination_registry_container = + Arc::new(SwarmCoordinationRegistryContainer::initialize(SenderStatus::Enabled)); - let tracker_core_container = TrackerCoreContainer::initialize_from(&core_config, &torrent_repository_container.clone()); + let tracker_core_container = + TrackerCoreContainer::initialize_from(&core_config, &swarm_coordination_registry_container.clone()); let ban_service = Arc::new(RwLock::new(BanService::new(MAX_CONNECTION_ID_ERRORS_PER_IP))); diff --git a/packages/tracker-core/src/container.rs b/packages/tracker-core/src/container.rs index 8d776a3e6..93b8efd7e 100644 --- a/packages/tracker-core/src/container.rs +++ b/packages/tracker-core/src/container.rs @@ -40,7 +40,7 @@ impl TrackerCoreContainer { #[must_use] pub fn initialize_from( core_config: &Arc, - torrent_repository_container: &Arc, + swarm_coordination_registry_container: &Arc, ) -> Self { let database = initialize_database(core_config); let in_memory_whitelist = Arc::new(InMemoryWhitelist::default()); @@ -53,7 +53,9 @@ impl TrackerCoreContainer { &db_key_repository.clone(), &in_memory_key_repository.clone(), )); - let in_memory_torrent_repository = Arc::new(InMemoryTorrentRepository::new(torrent_repository_container.swarms.clone())); + let in_memory_torrent_repository = Arc::new(InMemoryTorrentRepository::new( + swarm_coordination_registry_container.swarms.clone(), + )); let db_downloads_metric_repository = Arc::new(DatabaseDownloadsMetricRepository::new(&database)); let torrents_manager = Arc::new(TorrentsManager::new( diff --git a/packages/tracker-core/tests/common/test_env.rs b/packages/tracker-core/tests/common/test_env.rs index 0c1ea8524..d3bc9652a 100644 --- a/packages/tracker-core/tests/common/test_env.rs +++ b/packages/tracker-core/tests/common/test_env.rs @@ -17,7 +17,7 @@ use torrust_tracker_primitives::DurationSinceUnixEpoch; use torrust_tracker_swarm_coordination_registry::container::SwarmCoordinationRegistryContainer; pub struct TestEnv { - pub torrent_repository_container: Arc, + pub swarm_coordination_registry_container: Arc, pub tracker_core_container: Arc, } @@ -33,17 +33,17 @@ impl TestEnv { pub fn new(core_config: Core) -> Self { let core_config = Arc::new(core_config); - let torrent_repository_container = Arc::new(SwarmCoordinationRegistryContainer::initialize( + let swarm_coordination_registry_container = Arc::new(SwarmCoordinationRegistryContainer::initialize( core_config.tracker_usage_statistics.into(), )); let tracker_core_container = Arc::new(TrackerCoreContainer::initialize_from( &core_config, - &torrent_repository_container, + &swarm_coordination_registry_container, )); Self { - torrent_repository_container, + swarm_coordination_registry_container, tracker_core_container, } } @@ -68,13 +68,13 @@ impl TestEnv { let mut jobs = vec![]; let job = torrust_tracker_swarm_coordination_registry::statistics::event::listener::run_event_listener( - self.torrent_repository_container.event_bus.receiver(), - &self.torrent_repository_container.stats_repository, + self.swarm_coordination_registry_container.event_bus.receiver(), + &self.swarm_coordination_registry_container.stats_repository, ); jobs.push(job); let job = bittorrent_tracker_core::statistics::event::listener::run_event_listener( - self.torrent_repository_container.event_bus.receiver(), + self.swarm_coordination_registry_container.event_bus.receiver(), &self.tracker_core_container.stats_repository, &self.tracker_core_container.db_downloads_metric_repository, self.tracker_core_container @@ -147,7 +147,7 @@ impl TestEnv { } pub async fn get_swarm_metadata(&self, info_hash: &InfoHash) -> Option { - self.torrent_repository_container + self.swarm_coordination_registry_container .swarms .get_swarm_metadata(info_hash) .await @@ -155,7 +155,11 @@ impl TestEnv { } pub async fn remove_swarm(&self, info_hash: &InfoHash) { - self.torrent_repository_container.swarms.remove(info_hash).await.unwrap(); + self.swarm_coordination_registry_container + .swarms + .remove(info_hash) + .await + .unwrap(); } pub async fn get_counter_value(&self, metric_name: &str) -> u64 { diff --git a/packages/udp-tracker-core/src/container.rs b/packages/udp-tracker-core/src/container.rs index a6e45268f..1d8b1d71c 100644 --- a/packages/udp-tracker-core/src/container.rs +++ b/packages/udp-tracker-core/src/container.rs @@ -32,13 +32,13 @@ pub struct UdpTrackerCoreContainer { impl UdpTrackerCoreContainer { #[must_use] pub fn initialize(core_config: &Arc, udp_tracker_config: &Arc) -> Arc { - let torrent_repository_container = Arc::new(SwarmCoordinationRegistryContainer::initialize( + let swarm_coordination_registry_container = Arc::new(SwarmCoordinationRegistryContainer::initialize( core_config.tracker_usage_statistics.into(), )); let tracker_core_container = Arc::new(TrackerCoreContainer::initialize_from( core_config, - &torrent_repository_container, + &swarm_coordination_registry_container, )); Self::initialize_from_tracker_core(&tracker_core_container, udp_tracker_config) diff --git a/packages/udp-tracker-server/src/environment.rs b/packages/udp-tracker-server/src/environment.rs index d12a1b011..f48b3a7c1 100644 --- a/packages/udp-tracker-server/src/environment.rs +++ b/packages/udp-tracker-server/src/environment.rs @@ -175,13 +175,13 @@ impl EnvContainer { let udp_tracker_configurations = configuration.udp_trackers.clone().expect("missing UDP tracker configuration"); let udp_tracker_config = Arc::new(udp_tracker_configurations[0].clone()); - let torrent_repository_container = Arc::new(SwarmCoordinationRegistryContainer::initialize( + let swarm_coordination_registry_container = Arc::new(SwarmCoordinationRegistryContainer::initialize( core_config.tracker_usage_statistics.into(), )); let tracker_core_container = Arc::new(TrackerCoreContainer::initialize_from( &core_config, - &torrent_repository_container, + &swarm_coordination_registry_container, )); let udp_tracker_core_container = diff --git a/src/bootstrap/jobs/activity_metrics_updater.rs b/src/bootstrap/jobs/activity_metrics_updater.rs index 9813fed65..9bbdc3f9b 100644 --- a/src/bootstrap/jobs/activity_metrics_updater.rs +++ b/src/bootstrap/jobs/activity_metrics_updater.rs @@ -12,8 +12,8 @@ use crate::CurrentClock; #[must_use] pub fn start_job(config: &Configuration, app_container: &Arc) -> JoinHandle<()> { torrust_tracker_swarm_coordination_registry::statistics::activity_metrics_updater::start_job( - &app_container.torrent_repository_container.swarms.clone(), - &app_container.torrent_repository_container.stats_repository.clone(), + &app_container.swarm_coordination_registry_container.swarms.clone(), + &app_container.swarm_coordination_registry_container.stats_repository.clone(), peer_inactivity_cutoff_timestamp(config.core.tracker_policy.max_peer_timeout), ) } diff --git a/src/bootstrap/jobs/torrent_repository.rs b/src/bootstrap/jobs/torrent_repository.rs index c64917ea6..44ffdf53b 100644 --- a/src/bootstrap/jobs/torrent_repository.rs +++ b/src/bootstrap/jobs/torrent_repository.rs @@ -8,8 +8,8 @@ use crate::container::AppContainer; pub fn start_event_listener(config: &Configuration, app_container: &Arc) -> Option> { if config.core.tracker_usage_statistics { let job = torrust_tracker_swarm_coordination_registry::statistics::event::listener::run_event_listener( - app_container.torrent_repository_container.event_bus.receiver(), - &app_container.torrent_repository_container.stats_repository, + app_container.swarm_coordination_registry_container.event_bus.receiver(), + &app_container.swarm_coordination_registry_container.stats_repository, ); Some(job) diff --git a/src/bootstrap/jobs/tracker_core.rs b/src/bootstrap/jobs/tracker_core.rs index fd5cacbda..f2fc25ef3 100644 --- a/src/bootstrap/jobs/tracker_core.rs +++ b/src/bootstrap/jobs/tracker_core.rs @@ -8,7 +8,7 @@ use crate::container::AppContainer; pub fn start_event_listener(config: &Configuration, app_container: &Arc) -> Option> { if config.core.tracker_usage_statistics || config.core.tracker_policy.persistent_torrent_completed_stat { let job = bittorrent_tracker_core::statistics::event::listener::run_event_listener( - app_container.torrent_repository_container.event_bus.receiver(), + app_container.swarm_coordination_registry_container.event_bus.receiver(), &app_container.tracker_core_container.stats_repository, &app_container.tracker_core_container.db_downloads_metric_repository, app_container diff --git a/src/container.rs b/src/container.rs index 0f73bda6b..461a5b36a 100644 --- a/src/container.rs +++ b/src/container.rs @@ -30,7 +30,7 @@ pub struct AppContainer { pub registar: Arc, // Torrent Repository - pub torrent_repository_container: Arc, + pub swarm_coordination_registry_container: Arc, // Core pub tracker_core_container: Arc, @@ -60,7 +60,7 @@ impl AppContainer { // Torrent Repository - let torrent_repository_container = Arc::new(SwarmCoordinationRegistryContainer::initialize( + let swarm_coordination_registry_container = Arc::new(SwarmCoordinationRegistryContainer::initialize( core_config.tracker_usage_statistics.into(), )); @@ -68,7 +68,7 @@ impl AppContainer { let tracker_core_container = Arc::new(TrackerCoreContainer::initialize_from( &core_config, - &torrent_repository_container, + &swarm_coordination_registry_container, )); // HTTP @@ -98,7 +98,7 @@ impl AppContainer { registar, // Torrent Repository - torrent_repository_container, + swarm_coordination_registry_container, // Core tracker_core_container, @@ -146,7 +146,7 @@ impl AppContainer { TrackerHttpApiCoreContainer { http_api_config: http_api_config.clone(), - torrent_repository_container: self.torrent_repository_container.clone(), + swarm_coordination_registry_container: self.swarm_coordination_registry_container.clone(), tracker_core_container: self.tracker_core_container.clone(), From 8da42e4333d015ff5927da10807f7c67fa399ece Mon Sep 17 00:00:00 2001 From: Jose Celano Date: Mon, 9 Jun 2025 13:13:48 +0100 Subject: [PATCH 702/802] refactor: [#1534] Rename torrent_repository_event_listener to swarm_coordination_registry_event_listener --- src/app.rs | 6 +++--- 1 file changed, 3 insertions(+), 3 deletions(-) diff --git a/src/app.rs b/src/app.rs index ccc2e8bcb..5050c1dd1 100644 --- a/src/app.rs +++ b/src/app.rs @@ -71,7 +71,7 @@ async fn load_data_from_database(config: &Configuration, app_container: &Arc) -> JobManager { let mut job_manager = JobManager::new(); - start_torrent_repository_event_listener(config, app_container, &mut job_manager); + start_swarm_coordination_registry_event_listener(config, app_container, &mut job_manager); start_tracker_core_event_listener(config, app_container, &mut job_manager); start_http_core_event_listener(config, app_container, &mut job_manager); start_udp_core_event_listener(config, app_container, &mut job_manager); @@ -132,13 +132,13 @@ async fn load_torrent_metrics(config: &Configuration, app_container: &Arc, job_manager: &mut JobManager, ) { job_manager.push_opt( - "torrent_repository_event_listener", + "swarm_coordination_registry_event_listener", jobs::torrent_repository::start_event_listener(config, app_container), ); } From b2feb7b3150f0314cace37b7f08a926a2eb63298 Mon Sep 17 00:00:00 2001 From: Jose Celano Date: Mon, 9 Jun 2025 13:15:40 +0100 Subject: [PATCH 703/802] docs: [#1534] Update comments after rename --- packages/rest-tracker-api-core/src/container.rs | 4 ++-- packages/swarm-coordination-registry/src/container.rs | 2 +- packages/tracker-core/src/statistics/persisted/downloads.rs | 2 +- src/container.rs | 6 +++--- 4 files changed, 7 insertions(+), 7 deletions(-) diff --git a/packages/rest-tracker-api-core/src/container.rs b/packages/rest-tracker-api-core/src/container.rs index 93655b2ba..bcc5a0186 100644 --- a/packages/rest-tracker-api-core/src/container.rs +++ b/packages/rest-tracker-api-core/src/container.rs @@ -13,7 +13,7 @@ use torrust_udp_tracker_server::container::UdpTrackerServerContainer; pub struct TrackerHttpApiCoreContainer { pub http_api_config: Arc, - // Torrent repository + // Swarm Coordination Registry Container pub swarm_coordination_registry_container: Arc, // Tracker core @@ -75,7 +75,7 @@ impl TrackerHttpApiCoreContainer { Arc::new(TrackerHttpApiCoreContainer { http_api_config: http_api_config.clone(), - // Torrent repository + // Swarm Coordination Registry Container swarm_coordination_registry_container: swarm_coordination_registry_container.clone(), // Tracker core diff --git a/packages/swarm-coordination-registry/src/container.rs b/packages/swarm-coordination-registry/src/container.rs index 1a243f967..718e3ee52 100644 --- a/packages/swarm-coordination-registry/src/container.rs +++ b/packages/swarm-coordination-registry/src/container.rs @@ -18,7 +18,7 @@ pub struct SwarmCoordinationRegistryContainer { impl SwarmCoordinationRegistryContainer { #[must_use] pub fn initialize(sender_status: SenderStatus) -> Self { - // Torrent repository stats + // // Swarm Coordination Registry Container stats let broadcaster = Broadcaster::default(); let stats_repository = Arc::new(Repository::new()); diff --git a/packages/tracker-core/src/statistics/persisted/downloads.rs b/packages/tracker-core/src/statistics/persisted/downloads.rs index 4d3bdf9a3..6248bdc73 100644 --- a/packages/tracker-core/src/statistics/persisted/downloads.rs +++ b/packages/tracker-core/src/statistics/persisted/downloads.rs @@ -7,7 +7,7 @@ use torrust_tracker_primitives::{NumberOfDownloads, NumberOfDownloadsBTreeMap}; use crate::databases::error::Error; use crate::databases::Database; -/// Torrent repository implementation that persists torrent metrics in a database. +/// It persists torrent metrics in a database. /// /// This repository persists only a subset of the torrent data: the torrent /// metrics, specifically the number of downloads (or completed counts) for each diff --git a/src/container.rs b/src/container.rs index 461a5b36a..7112a54e8 100644 --- a/src/container.rs +++ b/src/container.rs @@ -29,7 +29,7 @@ pub struct AppContainer { // Registar pub registar: Arc, - // Torrent Repository + // Swarm Coordination Registry Container pub swarm_coordination_registry_container: Arc, // Core @@ -58,7 +58,7 @@ impl AppContainer { let registar = Arc::new(Registar::default()); - // Torrent Repository + // Swarm Coordination Registry Container let swarm_coordination_registry_container = Arc::new(SwarmCoordinationRegistryContainer::initialize( core_config.tracker_usage_statistics.into(), @@ -97,7 +97,7 @@ impl AppContainer { // Registar registar, - // Torrent Repository + // Swarm Coordination Registry Container swarm_coordination_registry_container, // Core From 7be03663946dcbcf2f1ee28d62b1c8d30741cd42 Mon Sep 17 00:00:00 2001 From: Jose Celano Date: Mon, 9 Jun 2025 13:42:39 +0100 Subject: [PATCH 704/802] feat: [#1534] add new metric to count peers reverting state from complete to any other state The metric is: ``` swarm_coordination_registry_peers_completed_state_reverted_total 1 ``` --- .../src/statistics/event/handler.rs | 27 +++++++++++++++---- .../src/statistics/mod.rs | 10 +++++++ 2 files changed, 32 insertions(+), 5 deletions(-) diff --git a/packages/swarm-coordination-registry/src/statistics/event/handler.rs b/packages/swarm-coordination-registry/src/statistics/event/handler.rs index 17b012086..1d3f8f32c 100644 --- a/packages/swarm-coordination-registry/src/statistics/event/handler.rs +++ b/packages/swarm-coordination-registry/src/statistics/event/handler.rs @@ -8,10 +8,11 @@ use torrust_tracker_primitives::DurationSinceUnixEpoch; use crate::event::Event; use crate::statistics::repository::Repository; use crate::statistics::{ - SWARM_COORDINATION_REGISTRY_PEERS_ADDED_TOTAL, SWARM_COORDINATION_REGISTRY_PEERS_REMOVED_TOTAL, - SWARM_COORDINATION_REGISTRY_PEERS_UPDATED_TOTAL, SWARM_COORDINATION_REGISTRY_PEER_CONNECTIONS_TOTAL, - SWARM_COORDINATION_REGISTRY_TORRENTS_ADDED_TOTAL, SWARM_COORDINATION_REGISTRY_TORRENTS_DOWNLOADS_TOTAL, - SWARM_COORDINATION_REGISTRY_TORRENTS_REMOVED_TOTAL, SWARM_COORDINATION_REGISTRY_TORRENTS_TOTAL, + SWARM_COORDINATION_REGISTRY_PEERS_ADDED_TOTAL, SWARM_COORDINATION_REGISTRY_PEERS_COMPLETED_STATE_REVERTED_TOTAL, + SWARM_COORDINATION_REGISTRY_PEERS_REMOVED_TOTAL, SWARM_COORDINATION_REGISTRY_PEERS_UPDATED_TOTAL, + SWARM_COORDINATION_REGISTRY_PEER_CONNECTIONS_TOTAL, SWARM_COORDINATION_REGISTRY_TORRENTS_ADDED_TOTAL, + SWARM_COORDINATION_REGISTRY_TORRENTS_DOWNLOADS_TOTAL, SWARM_COORDINATION_REGISTRY_TORRENTS_REMOVED_TOTAL, + SWARM_COORDINATION_REGISTRY_TORRENTS_TOTAL, }; #[allow(clippy::too_many_lines)] @@ -103,6 +104,8 @@ pub async fn handle_event(event: Event, stats_repository: &Arc, now: } => { tracing::debug!(info_hash = ?info_hash, old_peer = ?old_peer, new_peer = ?new_peer, "Peer updated", ); + // If the peer's role has changed, we need to adjust the number of + // connections if old_peer.role() != new_peer.role() { let _unused = stats_repository .increment_gauge( @@ -121,6 +124,20 @@ pub async fn handle_event(event: Event, stats_repository: &Arc, now: .await; } + // If the peer reverted from a completed state to any other state, + // we need to increment the counter for reverted completed. + if old_peer.is_completed() && !new_peer.is_completed() { + let _unused = stats_repository + .increment_counter( + &metric_name!(SWARM_COORDINATION_REGISTRY_PEERS_COMPLETED_STATE_REVERTED_TOTAL), + &LabelSet::default(), + now, + ) + .await; + } + + // Regardless of the role change, we still need to increment the + // counter for updated peers. let label_set = label_set_for_peer(&new_peer); let _unused = stats_repository @@ -134,7 +151,7 @@ pub async fn handle_event(event: Event, stats_repository: &Arc, now: Event::PeerDownloadCompleted { info_hash, peer } => { tracing::debug!(info_hash = ?info_hash, peer = ?peer, "Peer download completed", ); - let _unused = stats_repository + let _unused: Result<(), torrust_tracker_metrics::metric_collection::Error> = stats_repository .increment_counter( &metric_name!(SWARM_COORDINATION_REGISTRY_TORRENTS_DOWNLOADS_TOTAL), &label_set_for_peer(&peer), diff --git a/packages/swarm-coordination-registry/src/statistics/mod.rs b/packages/swarm-coordination-registry/src/statistics/mod.rs index 5b9b7f376..a4bf4c018 100644 --- a/packages/swarm-coordination-registry/src/statistics/mod.rs +++ b/packages/swarm-coordination-registry/src/statistics/mod.rs @@ -26,6 +26,8 @@ const SWARM_COORDINATION_REGISTRY_PEERS_UPDATED_TOTAL: &str = "swarm_coordinatio const SWARM_COORDINATION_REGISTRY_PEER_CONNECTIONS_TOTAL: &str = "swarm_coordination_registry_peer_connections_total"; const SWARM_COORDINATION_REGISTRY_UNIQUE_PEERS_TOTAL: &str = "swarm_coordination_registry_unique_peers_total"; // todo: not implemented yet const SWARM_COORDINATION_REGISTRY_PEERS_INACTIVE_TOTAL: &str = "swarm_coordination_registry_peers_inactive_total"; +const SWARM_COORDINATION_REGISTRY_PEERS_COMPLETED_STATE_REVERTED_TOTAL: &str = + "swarm_coordination_registry_peers_completed_state_reverted_total"; #[must_use] pub fn describe_metrics() -> Metrics { @@ -103,5 +105,13 @@ pub fn describe_metrics() -> Metrics { Some(MetricDescription::new("The total number of inactive peers.")), ); + metrics.metric_collection.describe_counter( + &metric_name!(SWARM_COORDINATION_REGISTRY_PEERS_COMPLETED_STATE_REVERTED_TOTAL), + Some(Unit::Count), + Some(MetricDescription::new( + "The total number of peers whose completed state was reverted.", + )), + ); + metrics } From d81e59e2e11787ea99fa123091154a524c85f8eb Mon Sep 17 00:00:00 2001 From: Jose Celano Date: Mon, 9 Jun 2025 17:27:03 +0100 Subject: [PATCH 705/802] fix: [#1565] ban service should work with stats disabled --- .../src/banning/event/handler.rs | 19 ++++++ .../src/banning/event/listener.rs | 58 +++++++++++++++++++ .../src/banning/event/mod.rs | 2 + .../udp-tracker-server/src/banning/mod.rs | 1 + .../udp-tracker-server/src/environment.rs | 51 ++++++++-------- packages/udp-tracker-server/src/lib.rs | 1 + .../src/statistics/event/handler/error.rs | 15 ----- .../src/statistics/event/handler/mod.rs | 13 +---- .../event/handler/request_aborted.rs | 6 -- .../event/handler/request_accepted.rs | 14 ----- .../event/handler/request_banned.rs | 6 -- .../event/handler/request_received.rs | 4 -- .../statistics/event/handler/response_sent.rs | 6 -- .../src/statistics/event/listener.rs | 15 ++--- src/app.rs | 20 +++++-- src/bootstrap/jobs/udp_tracker_server.rs | 11 +++- 16 files changed, 137 insertions(+), 105 deletions(-) create mode 100644 packages/udp-tracker-server/src/banning/event/handler.rs create mode 100644 packages/udp-tracker-server/src/banning/event/listener.rs create mode 100644 packages/udp-tracker-server/src/banning/event/mod.rs create mode 100644 packages/udp-tracker-server/src/banning/mod.rs diff --git a/packages/udp-tracker-server/src/banning/event/handler.rs b/packages/udp-tracker-server/src/banning/event/handler.rs new file mode 100644 index 000000000..2d77d0979 --- /dev/null +++ b/packages/udp-tracker-server/src/banning/event/handler.rs @@ -0,0 +1,19 @@ +use std::sync::Arc; + +use bittorrent_udp_tracker_core::services::banning::BanService; +use tokio::sync::RwLock; +use torrust_tracker_primitives::DurationSinceUnixEpoch; + +use crate::event::{ErrorKind, Event}; + +pub async fn handle_event(event: Event, ban_service: &Arc>, _now: DurationSinceUnixEpoch) { + if let Event::UdpError { + context, + kind: _, + error: ErrorKind::ConnectionCookie(_msg), + } = event + { + let mut ban_service = ban_service.write().await; + ban_service.increase_counter(&context.client_socket_addr().ip()); + } +} diff --git a/packages/udp-tracker-server/src/banning/event/listener.rs b/packages/udp-tracker-server/src/banning/event/listener.rs new file mode 100644 index 000000000..ee1a4366f --- /dev/null +++ b/packages/udp-tracker-server/src/banning/event/listener.rs @@ -0,0 +1,58 @@ +use std::sync::Arc; + +use bittorrent_udp_tracker_core::services::banning::BanService; +use bittorrent_udp_tracker_core::UDP_TRACKER_LOG_TARGET; +use tokio::sync::RwLock; +use tokio::task::JoinHandle; +use torrust_tracker_clock::clock::Time; +use torrust_tracker_events::receiver::RecvError; + +use super::handler::handle_event; +use crate::event::receiver::Receiver; +use crate::CurrentClock; + +#[must_use] +pub fn run_event_listener(receiver: Receiver, ban_service: &Arc>) -> JoinHandle<()> { + let ban_service_clone = ban_service.clone(); + + tracing::info!(target: UDP_TRACKER_LOG_TARGET, "Starting UDP tracker server event listener (banning)"); + + tokio::spawn(async move { + dispatch_events(receiver, ban_service_clone).await; + + tracing::info!(target: UDP_TRACKER_LOG_TARGET, "UDP tracker server event listener (banning) finished"); + }) +} + +async fn dispatch_events(mut receiver: Receiver, ban_service: Arc>) { + let shutdown_signal = tokio::signal::ctrl_c(); + tokio::pin!(shutdown_signal); + + loop { + tokio::select! { + biased; + + _ = &mut shutdown_signal => { + tracing::info!(target: UDP_TRACKER_LOG_TARGET, "Received Ctrl+C, shutting down UDP tracker server event listener (banning)"); + break; + } + + result = receiver.recv() => { + match result { + Ok(event) => handle_event(event, &ban_service, CurrentClock::now()).await, + Err(e) => { + match e { + RecvError::Closed => { + tracing::info!(target: UDP_TRACKER_LOG_TARGET, "Udp server receiver (banning) closed."); + break; + } + RecvError::Lagged(n) => { + tracing::warn!(target: UDP_TRACKER_LOG_TARGET, "Udp server receiver (banning) lagged by {} events.", n); + } + } + } + } + } + } + } +} diff --git a/packages/udp-tracker-server/src/banning/event/mod.rs b/packages/udp-tracker-server/src/banning/event/mod.rs new file mode 100644 index 000000000..dae683398 --- /dev/null +++ b/packages/udp-tracker-server/src/banning/event/mod.rs @@ -0,0 +1,2 @@ +pub mod handler; +pub mod listener; diff --git a/packages/udp-tracker-server/src/banning/mod.rs b/packages/udp-tracker-server/src/banning/mod.rs new file mode 100644 index 000000000..53f112654 --- /dev/null +++ b/packages/udp-tracker-server/src/banning/mod.rs @@ -0,0 +1 @@ +pub mod event; diff --git a/packages/udp-tracker-server/src/environment.rs b/packages/udp-tracker-server/src/environment.rs index f48b3a7c1..6c03cc75f 100644 --- a/packages/udp-tracker-server/src/environment.rs +++ b/packages/udp-tracker-server/src/environment.rs @@ -1,13 +1,11 @@ use std::net::SocketAddr; use std::sync::Arc; -use bittorrent_primitives::info_hash::InfoHash; use bittorrent_tracker_core::container::TrackerCoreContainer; use bittorrent_udp_tracker_core::container::UdpTrackerCoreContainer; use tokio::task::JoinHandle; use torrust_server_lib::registar::Registar; use torrust_tracker_configuration::{logging, Configuration, DEFAULT_TIMEOUT}; -use torrust_tracker_primitives::peer; use torrust_tracker_swarm_coordination_registry::container::SwarmCoordinationRegistryContainer; use crate::container::UdpTrackerServerContainer; @@ -25,22 +23,8 @@ where pub registar: Registar, pub server: Server, pub udp_core_event_listener_job: Option>, - pub udp_server_event_listener_job: Option>, -} - -impl Environment -where - S: std::fmt::Debug + std::fmt::Display, -{ - /// Add a torrent to the tracker - #[allow(dead_code)] - pub async fn add_torrent(&self, info_hash: &InfoHash, peer: &peer::Peer) { - self.container - .tracker_core_container - .in_memory_torrent_repository - .handle_announcement(info_hash, peer, None) - .await; - } + pub udp_server_stats_event_listener_job: Option>, + pub udp_server_banning_event_listener_job: Option>, } impl Environment { @@ -60,7 +44,8 @@ impl Environment { registar: Registar::default(), server, udp_core_event_listener_job: None, - udp_server_event_listener_job: None, + udp_server_stats_event_listener_job: None, + udp_server_banning_event_listener_job: None, } } @@ -78,10 +63,15 @@ impl Environment { &self.container.udp_tracker_core_container.stats_repository, )); - // Start the UDP tracker server event listener - let udp_server_event_listener_job = Some(crate::statistics::event::listener::run_event_listener( + // Start the UDP tracker server event listener (statistics) + let udp_server_stats_event_listener_job = Some(crate::statistics::event::listener::run_event_listener( self.container.udp_tracker_server_container.event_bus.receiver(), &self.container.udp_tracker_server_container.stats_repository, + )); + + // Start the UDP tracker server event listener (banning) + let udp_server_banning_event_listener_job = Some(crate::banning::event::listener::run_event_listener( + self.container.udp_tracker_server_container.event_bus.receiver(), &self.container.udp_tracker_core_container.ban_service, )); @@ -102,7 +92,8 @@ impl Environment { registar: self.registar.clone(), server, udp_core_event_listener_job, - udp_server_event_listener_job, + udp_server_stats_event_listener_job, + udp_server_banning_event_listener_job, } } } @@ -131,11 +122,18 @@ impl Environment { udp_core_event_listener_job.abort(); } - // Stop the UDP tracker server event listener - if let Some(udp_server_event_listener_job) = self.udp_server_event_listener_job { + // Stop the UDP tracker server event listener (statistics) + if let Some(udp_server_stats_event_listener_job) = self.udp_server_stats_event_listener_job { + // todo: send a message to the event listener to stop and wait for + // it to finish + udp_server_stats_event_listener_job.abort(); + } + + // Stop the UDP tracker server event listener (banning) + if let Some(udp_server_banning_event_listener_job) = self.udp_server_banning_event_listener_job { // todo: send a message to the event listener to stop and wait for // it to finish - udp_server_event_listener_job.abort(); + udp_server_banning_event_listener_job.abort(); } // Stop the UDP tracker server @@ -149,7 +147,8 @@ impl Environment { registar: Registar::default(), server, udp_core_event_listener_job: None, - udp_server_event_listener_job: None, + udp_server_stats_event_listener_job: None, + udp_server_banning_event_listener_job: None, } } diff --git a/packages/udp-tracker-server/src/lib.rs b/packages/udp-tracker-server/src/lib.rs index 996c41917..58a3830e1 100644 --- a/packages/udp-tracker-server/src/lib.rs +++ b/packages/udp-tracker-server/src/lib.rs @@ -634,6 +634,7 @@ //! documentation by [Arvid Norberg](https://github.com/arvidn) was very //! supportive in the development of this documentation. Some descriptions were //! taken from the [libtorrent](https://www.rasterbar.com/products/libtorrent/udp_tracker_protocol.html). +pub mod banning; pub mod container; pub mod environment; pub mod error; diff --git a/packages/udp-tracker-server/src/statistics/event/handler/error.rs b/packages/udp-tracker-server/src/statistics/event/handler/error.rs index 7327386a3..7bde032fe 100644 --- a/packages/udp-tracker-server/src/statistics/event/handler/error.rs +++ b/packages/udp-tracker-server/src/statistics/event/handler/error.rs @@ -1,8 +1,4 @@ -use std::sync::Arc; - use aquatic_udp_protocol::PeerClient; -use bittorrent_udp_tracker_core::services::banning::BanService; -use tokio::sync::RwLock; use torrust_tracker_metrics::label::LabelSet; use torrust_tracker_metrics::{label_name, metric_name}; use torrust_tracker_primitives::DurationSinceUnixEpoch; @@ -16,16 +12,9 @@ pub async fn handle_event( opt_udp_request_kind: Option, error_kind: ErrorKind, repository: &Repository, - ban_service: &Arc>, now: DurationSinceUnixEpoch, ) { - if let ErrorKind::ConnectionCookie(_msg) = error_kind.clone() { - let mut ban_service = ban_service.write().await; - ban_service.increase_counter(&connection_context.client_socket_addr().ip()); - } - update_global_fixed_metrics(&connection_context, repository).await; - update_extendable_metrics(&connection_context, opt_udp_request_kind, error_kind, repository, now).await; } @@ -126,9 +115,7 @@ fn extract_name_and_version(peer_client: &PeerClient) -> (String, String) { #[cfg(test)] mod tests { use std::net::{IpAddr, Ipv4Addr, SocketAddr}; - use std::sync::Arc; - use bittorrent_udp_tracker_core::services::banning::BanService; use torrust_tracker_clock::clock::Time; use torrust_tracker_primitives::service_binding::{Protocol, ServiceBinding}; @@ -141,7 +128,6 @@ mod tests { #[tokio::test] async fn should_increase_the_udp4_errors_counter_when_it_receives_a_udp4_error_event() { let stats_repository = Repository::new(); - let ban_service = Arc::new(tokio::sync::RwLock::new(BanService::new(1))); handle_event( Event::UdpError { @@ -157,7 +143,6 @@ mod tests { error: ErrorKind::RequestParse("Invalid request format".to_string()), }, &stats_repository, - &ban_service, CurrentClock::now(), ) .await; diff --git a/packages/udp-tracker-server/src/statistics/event/handler/mod.rs b/packages/udp-tracker-server/src/statistics/event/handler/mod.rs index c8ac864a3..9e7f5cd47 100644 --- a/packages/udp-tracker-server/src/statistics/event/handler/mod.rs +++ b/packages/udp-tracker-server/src/statistics/event/handler/mod.rs @@ -5,21 +5,12 @@ mod request_banned; mod request_received; mod response_sent; -use std::sync::Arc; - -use bittorrent_udp_tracker_core::services::banning::BanService; -use tokio::sync::RwLock; use torrust_tracker_primitives::DurationSinceUnixEpoch; use crate::event::Event; use crate::statistics::repository::Repository; -pub async fn handle_event( - event: Event, - stats_repository: &Repository, - ban_service: &Arc>, - now: DurationSinceUnixEpoch, -) { +pub async fn handle_event(event: Event, stats_repository: &Repository, now: DurationSinceUnixEpoch) { match event { Event::UdpRequestAborted { context } => { request_aborted::handle_event(context, stats_repository, now).await; @@ -41,7 +32,7 @@ pub async fn handle_event( response_sent::handle_event(context, kind, req_processing_time, stats_repository, now).await; } Event::UdpError { context, kind, error } => { - error::handle_event(context, kind, error, stats_repository, ban_service, now).await; + error::handle_event(context, kind, error, stats_repository, now).await; } } diff --git a/packages/udp-tracker-server/src/statistics/event/handler/request_aborted.rs b/packages/udp-tracker-server/src/statistics/event/handler/request_aborted.rs index 270ec2a45..fc701df75 100644 --- a/packages/udp-tracker-server/src/statistics/event/handler/request_aborted.rs +++ b/packages/udp-tracker-server/src/statistics/event/handler/request_aborted.rs @@ -27,9 +27,7 @@ pub async fn handle_event(context: ConnectionContext, stats_repository: &Reposit #[cfg(test)] mod tests { use std::net::{IpAddr, Ipv4Addr, SocketAddr}; - use std::sync::Arc; - use bittorrent_udp_tracker_core::services::banning::BanService; use torrust_tracker_clock::clock::Time; use torrust_tracker_primitives::service_binding::{Protocol, ServiceBinding}; @@ -41,7 +39,6 @@ mod tests { #[tokio::test] async fn should_increase_the_number_of_aborted_requests_when_it_receives_a_udp_request_aborted_event() { let stats_repository = Repository::new(); - let ban_service = Arc::new(tokio::sync::RwLock::new(BanService::new(1))); handle_event( Event::UdpRequestAborted { @@ -55,7 +52,6 @@ mod tests { ), }, &stats_repository, - &ban_service, CurrentClock::now(), ) .await; @@ -68,7 +64,6 @@ mod tests { #[tokio::test] async fn should_increase_the_udp_abort_counter_when_it_receives_a_udp_abort_event() { let stats_repository = Repository::new(); - let ban_service = Arc::new(tokio::sync::RwLock::new(BanService::new(1))); handle_event( Event::UdpRequestAborted { @@ -82,7 +77,6 @@ mod tests { ), }, &stats_repository, - &ban_service, CurrentClock::now(), ) .await; diff --git a/packages/udp-tracker-server/src/statistics/event/handler/request_accepted.rs b/packages/udp-tracker-server/src/statistics/event/handler/request_accepted.rs index 0007a18b0..b296f8ec9 100644 --- a/packages/udp-tracker-server/src/statistics/event/handler/request_accepted.rs +++ b/packages/udp-tracker-server/src/statistics/event/handler/request_accepted.rs @@ -55,9 +55,7 @@ pub async fn handle_event( #[cfg(test)] mod tests { use std::net::{IpAddr, Ipv4Addr, Ipv6Addr, SocketAddr}; - use std::sync::Arc; - use bittorrent_udp_tracker_core::services::banning::BanService; use torrust_tracker_clock::clock::Time; use torrust_tracker_primitives::service_binding::{Protocol, ServiceBinding}; @@ -70,7 +68,6 @@ mod tests { #[tokio::test] async fn should_increase_the_udp4_connect_requests_counter_when_it_receives_a_udp4_request_event_of_connect_kind() { let stats_repository = Repository::new(); - let ban_service = Arc::new(tokio::sync::RwLock::new(BanService::new(1))); handle_event( Event::UdpRequestAccepted { @@ -85,7 +82,6 @@ mod tests { kind: crate::event::UdpRequestKind::Connect, }, &stats_repository, - &ban_service, CurrentClock::now(), ) .await; @@ -98,7 +94,6 @@ mod tests { #[tokio::test] async fn should_increase_the_udp4_announce_requests_counter_when_it_receives_a_udp4_request_event_of_announce_kind() { let stats_repository = Repository::new(); - let ban_service = Arc::new(tokio::sync::RwLock::new(BanService::new(1))); handle_event( Event::UdpRequestAccepted { @@ -115,7 +110,6 @@ mod tests { }, }, &stats_repository, - &ban_service, CurrentClock::now(), ) .await; @@ -128,7 +122,6 @@ mod tests { #[tokio::test] async fn should_increase_the_udp4_scrape_requests_counter_when_it_receives_a_udp4_request_event_of_scrape_kind() { let stats_repository = Repository::new(); - let ban_service = Arc::new(tokio::sync::RwLock::new(BanService::new(1))); handle_event( Event::UdpRequestAccepted { @@ -143,7 +136,6 @@ mod tests { kind: crate::event::UdpRequestKind::Scrape, }, &stats_repository, - &ban_service, CurrentClock::now(), ) .await; @@ -156,7 +148,6 @@ mod tests { #[tokio::test] async fn should_increase_the_udp6_connect_requests_counter_when_it_receives_a_udp6_request_event_of_connect_kind() { let stats_repository = Repository::new(); - let ban_service = Arc::new(tokio::sync::RwLock::new(BanService::new(1))); handle_event( Event::UdpRequestAccepted { @@ -171,7 +162,6 @@ mod tests { kind: crate::event::UdpRequestKind::Connect, }, &stats_repository, - &ban_service, CurrentClock::now(), ) .await; @@ -184,7 +174,6 @@ mod tests { #[tokio::test] async fn should_increase_the_udp6_announce_requests_counter_when_it_receives_a_udp6_request_event_of_announce_kind() { let stats_repository = Repository::new(); - let ban_service = Arc::new(tokio::sync::RwLock::new(BanService::new(1))); handle_event( Event::UdpRequestAccepted { @@ -201,7 +190,6 @@ mod tests { }, }, &stats_repository, - &ban_service, CurrentClock::now(), ) .await; @@ -214,7 +202,6 @@ mod tests { #[tokio::test] async fn should_increase_the_udp6_scrape_requests_counter_when_it_receives_a_udp6_request_event_of_scrape_kind() { let stats_repository = Repository::new(); - let ban_service = Arc::new(tokio::sync::RwLock::new(BanService::new(1))); handle_event( Event::UdpRequestAccepted { @@ -229,7 +216,6 @@ mod tests { kind: crate::event::UdpRequestKind::Scrape, }, &stats_repository, - &ban_service, CurrentClock::now(), ) .await; diff --git a/packages/udp-tracker-server/src/statistics/event/handler/request_banned.rs b/packages/udp-tracker-server/src/statistics/event/handler/request_banned.rs index 74641574a..ce6e179a3 100644 --- a/packages/udp-tracker-server/src/statistics/event/handler/request_banned.rs +++ b/packages/udp-tracker-server/src/statistics/event/handler/request_banned.rs @@ -27,9 +27,7 @@ pub async fn handle_event(context: ConnectionContext, stats_repository: &Reposit #[cfg(test)] mod tests { use std::net::{IpAddr, Ipv4Addr, SocketAddr}; - use std::sync::Arc; - use bittorrent_udp_tracker_core::services::banning::BanService; use torrust_tracker_clock::clock::Time; use torrust_tracker_primitives::service_binding::{Protocol, ServiceBinding}; @@ -41,7 +39,6 @@ mod tests { #[tokio::test] async fn should_increase_the_number_of_banned_requests_when_it_receives_a_udp_request_banned_event() { let stats_repository = Repository::new(); - let ban_service = Arc::new(tokio::sync::RwLock::new(BanService::new(1))); handle_event( Event::UdpRequestBanned { @@ -55,7 +52,6 @@ mod tests { ), }, &stats_repository, - &ban_service, CurrentClock::now(), ) .await; @@ -68,7 +64,6 @@ mod tests { #[tokio::test] async fn should_increase_the_udp_ban_counter_when_it_receives_a_udp_banned_event() { let stats_repository = Repository::new(); - let ban_service = Arc::new(tokio::sync::RwLock::new(BanService::new(1))); handle_event( Event::UdpRequestBanned { @@ -82,7 +77,6 @@ mod tests { ), }, &stats_repository, - &ban_service, CurrentClock::now(), ) .await; diff --git a/packages/udp-tracker-server/src/statistics/event/handler/request_received.rs b/packages/udp-tracker-server/src/statistics/event/handler/request_received.rs index 8333258c2..89f306f6a 100644 --- a/packages/udp-tracker-server/src/statistics/event/handler/request_received.rs +++ b/packages/udp-tracker-server/src/statistics/event/handler/request_received.rs @@ -34,9 +34,7 @@ pub async fn handle_event(context: ConnectionContext, stats_repository: &Reposit #[cfg(test)] mod tests { use std::net::{IpAddr, Ipv4Addr, SocketAddr}; - use std::sync::Arc; - use bittorrent_udp_tracker_core::services::banning::BanService; use torrust_tracker_clock::clock::Time; use torrust_tracker_primitives::service_binding::{Protocol, ServiceBinding}; @@ -48,7 +46,6 @@ mod tests { #[tokio::test] async fn should_increase_the_number_of_incoming_requests_when_it_receives_a_udp4_incoming_request_event() { let stats_repository = Repository::new(); - let ban_service = Arc::new(tokio::sync::RwLock::new(BanService::new(1))); handle_event( Event::UdpRequestReceived { @@ -62,7 +59,6 @@ mod tests { ), }, &stats_repository, - &ban_service, CurrentClock::now(), ) .await; diff --git a/packages/udp-tracker-server/src/statistics/event/handler/response_sent.rs b/packages/udp-tracker-server/src/statistics/event/handler/response_sent.rs index 0038ac5f9..4e167a10e 100644 --- a/packages/udp-tracker-server/src/statistics/event/handler/response_sent.rs +++ b/packages/udp-tracker-server/src/statistics/event/handler/response_sent.rs @@ -107,9 +107,7 @@ pub async fn handle_event( #[cfg(test)] mod tests { use std::net::{IpAddr, Ipv4Addr, Ipv6Addr, SocketAddr}; - use std::sync::Arc; - use bittorrent_udp_tracker_core::services::banning::BanService; use torrust_tracker_clock::clock::Time; use torrust_tracker_primitives::service_binding::{Protocol, ServiceBinding}; @@ -122,7 +120,6 @@ mod tests { #[tokio::test] async fn should_increase_the_udp4_responses_counter_when_it_receives_a_udp4_response_event() { let stats_repository = Repository::new(); - let ban_service = Arc::new(tokio::sync::RwLock::new(BanService::new(1))); handle_event( Event::UdpResponseSent { @@ -142,7 +139,6 @@ mod tests { req_processing_time: std::time::Duration::from_secs(1), }, &stats_repository, - &ban_service, CurrentClock::now(), ) .await; @@ -155,7 +151,6 @@ mod tests { #[tokio::test] async fn should_increase_the_udp6_response_counter_when_it_receives_a_udp6_response_event() { let stats_repository = Repository::new(); - let ban_service = Arc::new(tokio::sync::RwLock::new(BanService::new(1))); handle_event( Event::UdpResponseSent { @@ -175,7 +170,6 @@ mod tests { req_processing_time: std::time::Duration::from_secs(1), }, &stats_repository, - &ban_service, CurrentClock::now(), ) .await; diff --git a/packages/udp-tracker-server/src/statistics/event/listener.rs b/packages/udp-tracker-server/src/statistics/event/listener.rs index e6c9a85ce..ae659c15e 100644 --- a/packages/udp-tracker-server/src/statistics/event/listener.rs +++ b/packages/udp-tracker-server/src/statistics/event/listener.rs @@ -1,8 +1,6 @@ use std::sync::Arc; -use bittorrent_udp_tracker_core::services::banning::BanService; use bittorrent_udp_tracker_core::UDP_TRACKER_LOG_TARGET; -use tokio::sync::RwLock; use tokio::task::JoinHandle; use torrust_tracker_clock::clock::Time; use torrust_tracker_events::receiver::RecvError; @@ -13,24 +11,19 @@ use crate::statistics::repository::Repository; use crate::CurrentClock; #[must_use] -pub fn run_event_listener( - receiver: Receiver, - repository: &Arc, - ban_service: &Arc>, -) -> JoinHandle<()> { +pub fn run_event_listener(receiver: Receiver, repository: &Arc) -> JoinHandle<()> { let repository_clone = repository.clone(); - let ban_service_clone = ban_service.clone(); tracing::info!(target: UDP_TRACKER_LOG_TARGET, "Starting UDP tracker server event listener"); tokio::spawn(async move { - dispatch_events(receiver, repository_clone, ban_service_clone).await; + dispatch_events(receiver, repository_clone).await; tracing::info!(target: UDP_TRACKER_LOG_TARGET, "UDP tracker server event listener finished"); }) } -async fn dispatch_events(mut receiver: Receiver, stats_repository: Arc, ban_service: Arc>) { +async fn dispatch_events(mut receiver: Receiver, stats_repository: Arc) { let shutdown_signal = tokio::signal::ctrl_c(); tokio::pin!(shutdown_signal); @@ -45,7 +38,7 @@ async fn dispatch_events(mut receiver: Receiver, stats_repository: Arc { match result { - Ok(event) => handle_event(event, &stats_repository, &ban_service, CurrentClock::now()).await, + Ok(event) => handle_event(event, &stats_repository, CurrentClock::now()).await, Err(e) => { match e { RecvError::Closed => { diff --git a/src/app.rs b/src/app.rs index 5050c1dd1..58d758d7f 100644 --- a/src/app.rs +++ b/src/app.rs @@ -75,7 +75,8 @@ async fn start_jobs(config: &Configuration, app_container: &Arc) - start_tracker_core_event_listener(config, app_container, &mut job_manager); start_http_core_event_listener(config, app_container, &mut job_manager); start_udp_core_event_listener(config, app_container, &mut job_manager); - start_udp_server_event_listener(config, app_container, &mut job_manager); + start_udp_server_stats_event_listener(config, app_container, &mut job_manager); + start_udp_server_banning_event_listener(app_container, &mut job_manager); start_the_udp_instances(config, app_container, &mut job_manager).await; start_the_http_instances(config, app_container, &mut job_manager).await; @@ -164,10 +165,21 @@ fn start_udp_core_event_listener(config: &Configuration, app_container: &Arc, job_manager: &mut JobManager) { +fn start_udp_server_stats_event_listener( + config: &Configuration, + app_container: &Arc, + job_manager: &mut JobManager, +) { job_manager.push_opt( - "udp_server_event_listener", - jobs::udp_tracker_server::start_event_listener(config, app_container), + "udp_server_stats_event_listener", + jobs::udp_tracker_server::start_stats_event_listener(config, app_container), + ); +} + +fn start_udp_server_banning_event_listener(app_container: &Arc, job_manager: &mut JobManager) { + job_manager.push( + "udp_server_banning_event_listener", + jobs::udp_tracker_server::start_banning_event_listener(app_container), ); } diff --git a/src/bootstrap/jobs/udp_tracker_server.rs b/src/bootstrap/jobs/udp_tracker_server.rs index 8a4c2a273..0910fdaf5 100644 --- a/src/bootstrap/jobs/udp_tracker_server.rs +++ b/src/bootstrap/jobs/udp_tracker_server.rs @@ -5,12 +5,11 @@ use torrust_tracker_configuration::Configuration; use crate::container::AppContainer; -pub fn start_event_listener(config: &Configuration, app_container: &Arc) -> Option> { +pub fn start_stats_event_listener(config: &Configuration, app_container: &Arc) -> Option> { if config.core.tracker_usage_statistics { let job = torrust_udp_tracker_server::statistics::event::listener::run_event_listener( app_container.udp_tracker_server_container.event_bus.receiver(), &app_container.udp_tracker_server_container.stats_repository, - &app_container.udp_tracker_core_services.ban_service, ); Some(job) } else { @@ -18,3 +17,11 @@ pub fn start_event_listener(config: &Configuration, app_container: &Arc) -> JoinHandle<()> { + torrust_udp_tracker_server::banning::event::listener::run_event_listener( + app_container.udp_tracker_server_container.event_bus.receiver(), + &app_container.udp_tracker_core_services.ban_service, + ) +} From f7b80ed937fd98e6c31f47e87fd005990bbf25a4 Mon Sep 17 00:00:00 2001 From: Jose Celano Date: Tue, 10 Jun 2025 13:50:26 +0100 Subject: [PATCH 706/802] feat: [#1570] add new metric for banned IPs total --- .../src/banning/event/handler.rs | 30 ++++++++++++++++++- .../src/banning/event/listener.rs | 14 ++++++--- .../udp-tracker-server/src/environment.rs | 1 + .../udp-tracker-server/src/statistics/mod.rs | 7 +++++ src/bootstrap/jobs/udp_tracker_server.rs | 1 + 5 files changed, 48 insertions(+), 5 deletions(-) diff --git a/packages/udp-tracker-server/src/banning/event/handler.rs b/packages/udp-tracker-server/src/banning/event/handler.rs index 2d77d0979..4876323a8 100644 --- a/packages/udp-tracker-server/src/banning/event/handler.rs +++ b/packages/udp-tracker-server/src/banning/event/handler.rs @@ -2,11 +2,20 @@ use std::sync::Arc; use bittorrent_udp_tracker_core::services::banning::BanService; use tokio::sync::RwLock; +use torrust_tracker_metrics::label::LabelSet; +use torrust_tracker_metrics::metric_name; use torrust_tracker_primitives::DurationSinceUnixEpoch; use crate::event::{ErrorKind, Event}; +use crate::statistics::repository::Repository; +use crate::statistics::UDP_TRACKER_SERVER_IPS_BANNED_TOTAL; -pub async fn handle_event(event: Event, ban_service: &Arc>, _now: DurationSinceUnixEpoch) { +pub async fn handle_event( + event: Event, + ban_service: &Arc>, + repository: &Repository, + now: DurationSinceUnixEpoch, +) { if let Event::UdpError { context, kind: _, @@ -14,6 +23,25 @@ pub async fn handle_event(event: Event, ban_service: &Arc>, _ } = event { let mut ban_service = ban_service.write().await; + ban_service.increase_counter(&context.client_socket_addr().ip()); + + update_metric_for_banned_ips_total(repository, ban_service.get_banned_ips_total(), now).await; + } +} + +#[allow(clippy::cast_precision_loss)] +async fn update_metric_for_banned_ips_total(repository: &Repository, ips_banned_total: usize, now: DurationSinceUnixEpoch) { + match repository + .set_gauge( + &metric_name!(UDP_TRACKER_SERVER_IPS_BANNED_TOTAL), + &LabelSet::default(), + ips_banned_total as f64, + now, + ) + .await + { + Ok(()) => {} + Err(err) => tracing::error!("Failed to increase the counter: {}", err), } } diff --git a/packages/udp-tracker-server/src/banning/event/listener.rs b/packages/udp-tracker-server/src/banning/event/listener.rs index ee1a4366f..fee3395fa 100644 --- a/packages/udp-tracker-server/src/banning/event/listener.rs +++ b/packages/udp-tracker-server/src/banning/event/listener.rs @@ -9,22 +9,28 @@ use torrust_tracker_events::receiver::RecvError; use super::handler::handle_event; use crate::event::receiver::Receiver; +use crate::statistics::repository::Repository; use crate::CurrentClock; #[must_use] -pub fn run_event_listener(receiver: Receiver, ban_service: &Arc>) -> JoinHandle<()> { +pub fn run_event_listener( + receiver: Receiver, + ban_service: &Arc>, + repository: &Arc, +) -> JoinHandle<()> { let ban_service_clone = ban_service.clone(); + let repository_clone = repository.clone(); tracing::info!(target: UDP_TRACKER_LOG_TARGET, "Starting UDP tracker server event listener (banning)"); tokio::spawn(async move { - dispatch_events(receiver, ban_service_clone).await; + dispatch_events(receiver, ban_service_clone, repository_clone).await; tracing::info!(target: UDP_TRACKER_LOG_TARGET, "UDP tracker server event listener (banning) finished"); }) } -async fn dispatch_events(mut receiver: Receiver, ban_service: Arc>) { +async fn dispatch_events(mut receiver: Receiver, ban_service: Arc>, repository: Arc) { let shutdown_signal = tokio::signal::ctrl_c(); tokio::pin!(shutdown_signal); @@ -39,7 +45,7 @@ async fn dispatch_events(mut receiver: Receiver, ban_service: Arc { match result { - Ok(event) => handle_event(event, &ban_service, CurrentClock::now()).await, + Ok(event) => handle_event(event, &ban_service, &repository, CurrentClock::now()).await, Err(e) => { match e { RecvError::Closed => { diff --git a/packages/udp-tracker-server/src/environment.rs b/packages/udp-tracker-server/src/environment.rs index 6c03cc75f..61b1cba63 100644 --- a/packages/udp-tracker-server/src/environment.rs +++ b/packages/udp-tracker-server/src/environment.rs @@ -73,6 +73,7 @@ impl Environment { let udp_server_banning_event_listener_job = Some(crate::banning::event::listener::run_event_listener( self.container.udp_tracker_server_container.event_bus.receiver(), &self.container.udp_tracker_core_container.ban_service, + &self.container.udp_tracker_server_container.stats_repository, )); // Start the UDP tracker server diff --git a/packages/udp-tracker-server/src/statistics/mod.rs b/packages/udp-tracker-server/src/statistics/mod.rs index a7da2dc63..ebb3df0bf 100644 --- a/packages/udp-tracker-server/src/statistics/mod.rs +++ b/packages/udp-tracker-server/src/statistics/mod.rs @@ -10,6 +10,7 @@ use torrust_tracker_metrics::unit::Unit; const UDP_TRACKER_SERVER_REQUESTS_ABORTED_TOTAL: &str = "udp_tracker_server_requests_aborted_total"; const UDP_TRACKER_SERVER_REQUESTS_BANNED_TOTAL: &str = "udp_tracker_server_requests_banned_total"; +pub(crate) const UDP_TRACKER_SERVER_IPS_BANNED_TOTAL: &str = "udp_tracker_server_ips_banned_total"; const UDP_TRACKER_SERVER_CONNECTION_ID_ERRORS_TOTAL: &str = "udp_tracker_server_connection_id_errors_total"; const UDP_TRACKER_SERVER_REQUESTS_RECEIVED_TOTAL: &str = "udp_tracker_server_requests_received_total"; const UDP_TRACKER_SERVER_REQUESTS_ACCEPTED_TOTAL: &str = "udp_tracker_server_requests_accepted_total"; @@ -33,6 +34,12 @@ pub fn describe_metrics() -> Metrics { Some(MetricDescription::new("Total number of UDP requests banned")), ); + metrics.metric_collection.describe_gauge( + &metric_name!(UDP_TRACKER_SERVER_IPS_BANNED_TOTAL), + Some(Unit::Count), + Some(MetricDescription::new("Total number of IPs banned from UDP requests")), + ); + metrics.metric_collection.describe_counter( &metric_name!(UDP_TRACKER_SERVER_CONNECTION_ID_ERRORS_TOTAL), Some(Unit::Count), diff --git a/src/bootstrap/jobs/udp_tracker_server.rs b/src/bootstrap/jobs/udp_tracker_server.rs index 0910fdaf5..3e8a7aaa8 100644 --- a/src/bootstrap/jobs/udp_tracker_server.rs +++ b/src/bootstrap/jobs/udp_tracker_server.rs @@ -23,5 +23,6 @@ pub fn start_banning_event_listener(app_container: &Arc) -> JoinHa torrust_udp_tracker_server::banning::event::listener::run_event_listener( app_container.udp_tracker_server_container.event_bus.receiver(), &app_container.udp_tracker_core_services.ban_service, + &app_container.udp_tracker_server_container.stats_repository, ) } From 12d69179a8c8a2d240d7860ffe2e84afc4082d62 Mon Sep 17 00:00:00 2001 From: Jose Celano Date: Tue, 10 Jun 2025 15:42:55 +0100 Subject: [PATCH 707/802] feat: [#1571] increase broadcaster channel capacity to 65536 --- packages/events/src/broadcaster.rs | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/packages/events/src/broadcaster.rs b/packages/events/src/broadcaster.rs index d0a511cd4..79c83df8a 100644 --- a/packages/events/src/broadcaster.rs +++ b/packages/events/src/broadcaster.rs @@ -5,7 +5,7 @@ use tokio::sync::broadcast::{self}; use crate::receiver::{Receiver, RecvError}; use crate::sender::{SendError, Sender}; -const CHANNEL_CAPACITY: usize = 32768; +const CHANNEL_CAPACITY: usize = 65536; /// An event sender and receiver implementation using a broadcast channel. #[derive(Clone, Debug)] From 02433cbe809d72e066b1bd3c3461e4349a201c67 Mon Sep 17 00:00:00 2001 From: Jose Celano Date: Tue, 10 Jun 2025 18:56:26 +0100 Subject: [PATCH 708/802] fix: [#1569] Prometheus txt export format. Only one HELP and TYPE header per metric Current format: ``` # HELP udp_tracker_server_connection_id_errors_total Total number of requests with connection ID errors # TYPE udp_tracker_server_connection_id_errors_total counter udp_tracker_server_connection_id_errors_total{client_software_name="Other (BC)",client_software_version="0087"} 4 # HELP udp_tracker_server_connection_id_errors_total Total number of requests with connection ID errors # TYPE udp_tracker_server_connection_id_errors_total counter udp_tracker_server_connection_id_errors_total{client_software_name="Other (FD66)",client_software_version=""} 1 # HELP udp_tracker_server_connection_id_errors_total Total number of requests with connection ID errors # TYPE udp_tracker_server_connection_id_errors_total counter udp_tracker_server_connection_id_errors_total{client_software_name="Other (SP)",client_software_version="3605"} 631 # HELP udp_tracker_server_connection_id_errors_total Total number of requests with connection ID errors # TYPE udp_tracker_server_connection_id_errors_total counter udp_tracker_server_connection_id_errors_total{client_software_name="Other (TIX0325)",client_software_version=""} 14 # HELP udp_tracker_server_connection_id_errors_total Total number of requests with connection ID errors # TYPE udp_tracker_server_connection_id_errors_total counter udp_tracker_server_connection_id_errors_total{client_software_name="Other (BC)",client_software_version="0202"} 6754 # HELP udp_tracker_server_connection_id_errors_total Total number of requests with connection ID errors # TYPE udp_tracker_server_connection_id_errors_total counter udp_tracker_server_connection_id_errors_total{client_software_name="Other (XF)",client_software_version="9400"} 1 # HELP udp_tracker_server_connection_id_errors_total Total number of requests with connection ID errors # TYPE udp_tracker_server_connection_id_errors_total counter udp_tracker_server_connection_id_errors_total{client_software_name="Other (BC)",client_software_version="0090"} 7 # HELP udp_tracker_server_connection_id_errors_total Total number of requests with connection ID errors # TYPE udp_tracker_server_connection_id_errors_total counter udp_tracker_server_connection_id_errors_total{client_software_name="Transmission",client_software_version="2.32"} 1 # HELP udp_tracker_server_connection_id_errors_total Total number of requests with connection ID errors # TYPE udp_tracker_server_connection_id_errors_total counter udp_tracker_server_connection_id_errors_total{client_software_name="Other (61-b39e)",client_software_version=""} 1 ``` Expected format: ``` # HELP udp_tracker_server_connection_id_errors_total Total number of requests with connection ID errors # TYPE udp_tracker_server_connection_id_errors_total counter udp_tracker_server_connection_id_errors_total{client_software_name="Other (BC)",client_software_version="0087"} 4 udp_tracker_server_connection_id_errors_total{client_software_name="Other (FD66)",client_software_version=""} 1 udp_tracker_server_connection_id_errors_total{client_software_name="Other (SP)",client_software_version="3605"} 631 udp_tracker_server_connection_id_errors_total{client_software_name="Other (TIX0325)",client_software_version=""} 14 udp_tracker_server_connection_id_errors_total{client_software_name="Other (BC)",client_software_version="0202"} 6754 udp_tracker_server_connection_id_errors_total{client_software_name="Other (XF)",client_software_version="9400"} 1 udp_tracker_server_connection_id_errors_total{client_software_name="Other (BC)",client_software_version="0090"} 7 udp_tracker_server_connection_id_errors_total{client_software_name="Transmission",client_software_version="2.32"} 1 udp_tracker_server_connection_id_errors_total{client_software_name="Other (61-b39e)",client_software_version=""} 1 ``` A line break after each metric has also been added to improve readability. --- packages/metrics/src/label/set.rs | 8 ++ packages/metrics/src/lib.rs | 6 +- packages/metrics/src/metric/mod.rs | 102 ++++++---------------- packages/metrics/src/metric_collection.rs | 31 ++++--- packages/metrics/src/sample.rs | 6 +- packages/metrics/src/sample_collection.rs | 6 +- 6 files changed, 64 insertions(+), 95 deletions(-) diff --git a/packages/metrics/src/label/set.rs b/packages/metrics/src/label/set.rs index 1c2c3e27e..cab457f42 100644 --- a/packages/metrics/src/label/set.rs +++ b/packages/metrics/src/label/set.rs @@ -16,6 +16,10 @@ impl LabelSet { pub fn upsert(&mut self, key: LabelName, value: LabelValue) { self.items.insert(key, value); } + + pub fn is_empty(&self) -> bool { + self.items.is_empty() + } } impl Display for LabelSet { @@ -157,6 +161,10 @@ impl<'de> Deserialize<'de> for LabelSet { impl PrometheusSerializable for LabelSet { fn to_prometheus(&self) -> String { + if self.is_empty() { + return String::new(); + } + let items = self.items.iter().fold(String::new(), |mut output, label_pair| { if !output.is_empty() { output.push(','); diff --git a/packages/metrics/src/lib.rs b/packages/metrics/src/lib.rs index 95d70bf6c..997cd3c8c 100644 --- a/packages/metrics/src/lib.rs +++ b/packages/metrics/src/lib.rs @@ -12,12 +12,12 @@ pub const METRICS_TARGET: &str = "METRICS"; #[cfg(test)] mod tests { - /// It removes leading and trailing whitespace from each line, and empty lines. + /// It removes leading and trailing whitespace from each line. pub fn format_prometheus_output(output: &str) -> String { output .lines() - .map(str::trim) - .filter(|line| !line.is_empty()) + .map(str::trim_start) + .map(str::trim_end) .collect::>() .join("\n") } diff --git a/packages/metrics/src/metric/mod.rs b/packages/metrics/src/metric/mod.rs index 6f254023f..df743c519 100644 --- a/packages/metrics/src/metric/mod.rs +++ b/packages/metrics/src/metric/mod.rs @@ -103,19 +103,6 @@ impl Metric { } } -/// `PrometheusMetricSample` is a wrapper around types that provides methods to -/// convert the metric and its measurement into a Prometheus-compatible format. -/// -/// In Prometheus, a metric is a time series that consists of a name, a set of -/// labels, and a value. The sample value needs data from the `Metric` and -/// `Measurement` structs, as well as the `LabelSet` that defines the labels for -/// the metric. -struct PrometheusMetricSample<'a, T> { - metric: &'a Metric, - measurement: &'a Measurement, - label_set: &'a LabelSet, -} - enum PrometheusType { Counter, Gauge, @@ -130,91 +117,58 @@ impl PrometheusSerializable for PrometheusType { } } -impl PrometheusMetricSample<'_, T> { - fn to_prometheus(&self, prometheus_type: &PrometheusType) -> String { - format!( - // Format: - // # HELP - // # TYPE - // {label_set} - "{}{}{}", - self.help_line(), - self.type_line(prometheus_type), - self.metric_line() - ) - } - - fn help_line(&self) -> String { - if let Some(description) = &self.metric.opt_description { - format!( - // Format: # HELP - "# HELP {} {}\n", - self.metric.name().to_prometheus(), - description.to_prometheus() - ) +impl Metric { + #[must_use] + fn prometheus_help_line(&self) -> String { + if let Some(description) = &self.opt_description { + format!("# HELP {} {}", self.name.to_prometheus(), description.to_prometheus()) } else { String::new() } } - fn type_line(&self, kind: &PrometheusType) -> String { - format!("# TYPE {} {}\n", self.metric.name().to_prometheus(), kind.to_prometheus()) + #[must_use] + fn prometheus_type_line(&self, prometheus_type: &PrometheusType) -> String { + format!("# TYPE {} {}", self.name.to_prometheus(), prometheus_type.to_prometheus()) } - fn metric_line(&self) -> String { + #[must_use] + fn prometheus_sample_line(&self, label_set: &LabelSet, measurement: &Measurement) -> String { format!( - // Format: {label_set} "{}{} {}", - self.metric.name.to_prometheus(), - self.label_set.to_prometheus(), - self.measurement.value().to_prometheus() + self.name.to_prometheus(), + label_set.to_prometheus(), + measurement.to_prometheus() ) } -} -impl<'a> PrometheusMetricSample<'a, Counter> { - pub fn new(metric: &'a Metric, measurement: &'a Measurement, label_set: &'a LabelSet) -> Self { - Self { - metric, - measurement, - label_set, - } + #[must_use] + fn prometheus_samples(&self) -> String { + self.sample_collection + .iter() + .map(|(label_set, measurement)| self.prometheus_sample_line(label_set, measurement)) + .collect::>() + .join("\n") } -} -impl<'a> PrometheusMetricSample<'a, Gauge> { - pub fn new(metric: &'a Metric, measurement: &'a Measurement, label_set: &'a LabelSet) -> Self { - Self { - metric, - measurement, - label_set, - } + fn to_prometheus(&self, prometheus_type: &PrometheusType) -> String { + let help_line = self.prometheus_help_line(); + let type_line = self.prometheus_type_line(prometheus_type); + let samples = self.prometheus_samples(); + + format!("{help_line}\n{type_line}\n{samples}") } } impl PrometheusSerializable for Metric { fn to_prometheus(&self) -> String { - let samples: Vec = self - .sample_collection - .iter() - .map(|(label_set, measurement)| { - PrometheusMetricSample::::new(self, measurement, label_set).to_prometheus(&PrometheusType::Counter) - }) - .collect(); - samples.join("\n") + self.to_prometheus(&PrometheusType::Counter) } } impl PrometheusSerializable for Metric { fn to_prometheus(&self) -> String { - let samples: Vec = self - .sample_collection - .iter() - .map(|(label_set, measurement)| { - PrometheusMetricSample::::new(self, measurement, label_set).to_prometheus(&PrometheusType::Gauge) - }) - .collect(); - samples.join("\n") + self.to_prometheus(&PrometheusType::Gauge) } } diff --git a/packages/metrics/src/metric_collection.rs b/packages/metrics/src/metric_collection.rs index c53d02bcf..ff932caae 100644 --- a/packages/metrics/src/metric_collection.rs +++ b/packages/metrics/src/metric_collection.rs @@ -322,7 +322,7 @@ impl PrometheusSerializable for MetricCollection { .map(Metric::::to_prometheus), ) .collect::>() - .join("\n") + .join("\n\n") } } @@ -629,14 +629,14 @@ mod tests { fn prometheus() -> String { format_prometheus_output( - r#" - # HELP http_tracker_core_announce_requests_received_total The number of announce requests received. - # TYPE http_tracker_core_announce_requests_received_total counter - http_tracker_core_announce_requests_received_total{server_binding_ip="0.0.0.0",server_binding_port="7070",server_binding_protocol="http"} 1 - # HELP udp_tracker_server_performance_avg_announce_processing_time_ns The average announce processing time in nanoseconds. - # TYPE udp_tracker_server_performance_avg_announce_processing_time_ns gauge - udp_tracker_server_performance_avg_announce_processing_time_ns{server_binding_ip="0.0.0.0",server_binding_port="7070",server_binding_protocol="http"} 1 - "#, + r#"# HELP http_tracker_core_announce_requests_received_total The number of announce requests received. +# TYPE http_tracker_core_announce_requests_received_total counter +http_tracker_core_announce_requests_received_total{server_binding_ip="0.0.0.0",server_binding_port="7070",server_binding_protocol="http"} 1 + +# HELP udp_tracker_server_performance_avg_announce_processing_time_ns The average announce processing time in nanoseconds. +# TYPE udp_tracker_server_performance_avg_announce_processing_time_ns gauge +udp_tracker_server_performance_avg_announce_processing_time_ns{server_binding_ip="0.0.0.0",server_binding_port="7070",server_binding_protocol="http"} 1 +"#, ) } } @@ -750,7 +750,7 @@ mod tests { MetricKindCollection::new(vec![Metric::new( metric_name!("http_tracker_core_announce_requests_received_total"), None, - None, + Some(MetricDescription::new("The number of announce requests received.")), SampleCollection::new(vec![ Sample::new(Counter::new(1), time, label_set_1.clone()), Sample::new(Counter::new(2), time, label_set_2.clone()), @@ -765,12 +765,11 @@ mod tests { let prometheus_output = metric_collection.to_prometheus(); let expected_prometheus_output = format_prometheus_output( - r#" - # TYPE http_tracker_core_announce_requests_received_total counter - http_tracker_core_announce_requests_received_total{server_binding_ip="0.0.0.0",server_binding_port="7171",server_binding_protocol="http"} 2 - # TYPE http_tracker_core_announce_requests_received_total counter - http_tracker_core_announce_requests_received_total{server_binding_ip="0.0.0.0",server_binding_port="7070",server_binding_protocol="http"} 1 - "#, + r#"# HELP http_tracker_core_announce_requests_received_total The number of announce requests received. +# TYPE http_tracker_core_announce_requests_received_total counter +http_tracker_core_announce_requests_received_total{server_binding_ip="0.0.0.0",server_binding_port="7070",server_binding_protocol="http"} 1 +http_tracker_core_announce_requests_received_total{server_binding_ip="0.0.0.0",server_binding_port="7171",server_binding_protocol="http"} 2 +"#, ); // code-review: samples are not serialized in the same order as they are created. diff --git a/packages/metrics/src/sample.rs b/packages/metrics/src/sample.rs index ad4dff00e..b9cd6c312 100644 --- a/packages/metrics/src/sample.rs +++ b/packages/metrics/src/sample.rs @@ -50,7 +50,11 @@ impl Sample { impl PrometheusSerializable for Sample { fn to_prometheus(&self) -> String { - format!("{} {}", self.label_set.to_prometheus(), self.measurement.to_prometheus()) + if self.label_set.is_empty() { + format!(" {}", self.measurement.to_prometheus()) + } else { + format!("{} {}", self.label_set.to_prometheus(), self.measurement.to_prometheus()) + } } } diff --git a/packages/metrics/src/sample_collection.rs b/packages/metrics/src/sample_collection.rs index a87aacb63..ef88b27dd 100644 --- a/packages/metrics/src/sample_collection.rs +++ b/packages/metrics/src/sample_collection.rs @@ -155,7 +155,11 @@ impl PrometheusSerializable for SampleCollection { let mut output = String::new(); for (label_set, sample_data) in &self.samples { - let _ = write!(output, "{} {}", label_set.to_prometheus(), sample_data.to_prometheus()); + if label_set.is_empty() { + let _ = write!(output, "{}", sample_data.to_prometheus()); + } else { + let _ = write!(output, "{} {}", label_set.to_prometheus(), sample_data.to_prometheus()); + } } output From 9b254ce7082899a6995760f7403fb6d7efbad324 Mon Sep 17 00:00:00 2001 From: Jose Celano Date: Tue, 10 Jun 2025 21:40:12 +0100 Subject: [PATCH 709/802] chore(deps): update dependencies ``` cargo update Updating crates.io index Locking 41 packages to latest compatible versions Updating adler2 v2.0.0 -> v2.0.1 Updating anstream v0.6.18 -> v0.6.19 Updating anstyle v1.0.10 -> v1.0.11 Updating anstyle-parse v0.2.6 -> v0.2.7 Updating anstyle-query v1.1.2 -> v1.1.3 Updating anstyle-wincon v3.0.8 -> v3.0.9 Updating async-compression v0.4.23 -> v0.4.24 Updating bindgen v0.71.1 -> v0.72.0 Updating bumpalo v3.17.0 -> v3.18.1 Updating bytemuck v1.23.0 -> v1.23.1 Updating camino v1.1.9 -> v1.1.10 Updating cc v1.2.25 -> v1.2.26 Updating cfg-if v1.0.0 -> v1.0.1 Updating clap v4.5.39 -> v4.5.40 Updating clap_builder v4.5.39 -> v4.5.40 Updating clap_derive v4.5.32 -> v4.5.40 Updating clap_lex v0.7.4 -> v0.7.5 Updating colorchoice v1.0.3 -> v1.0.4 Updating flate2 v1.1.1 -> v1.1.2 Updating fs-err v3.1.0 -> v3.1.1 Updating hashbrown v0.15.3 -> v0.15.4 Updating hyper-rustls v0.27.6 -> v0.27.7 Updating hyper-util v0.1.13 -> v0.1.14 Updating miniz_oxide v0.8.8 -> v0.8.9 Updating portable-atomic v1.11.0 -> v1.11.1 Updating reqwest v0.12.18 -> v0.12.20 Updating rustc-demangle v0.1.24 -> v0.1.25 Updating serde_spanned v0.6.8 -> v0.6.9 Updating smallvec v1.15.0 -> v1.15.1 Updating syn v2.0.101 -> v2.0.102 Updating toml v0.8.22 -> v0.8.23 Updating toml_datetime v0.6.9 -> v0.6.11 Updating toml_edit v0.22.26 -> v0.22.27 Updating toml_write v0.1.1 -> v0.1.2 Updating tower-http v0.6.5 -> v0.6.6 Updating tracing-attributes v0.1.28 -> v0.1.29 Updating tracing-core v0.1.33 -> v0.1.34 Updating unicode-width v0.2.0 -> v0.2.1 Updating wasi v0.11.0+wasi-snapshot-preview1 -> v0.11.1+wasi-snapshot-preview1 Updating windows-registry v0.4.0 -> v0.5.2 Removing windows-strings v0.3.1 Updating winnow v0.7.10 -> v0.7.11 ``` --- Cargo.lock | 277 +++++++++++++++++++++++++---------------------------- 1 file changed, 133 insertions(+), 144 deletions(-) diff --git a/Cargo.lock b/Cargo.lock index feb749d3f..269f7a3a2 100644 --- a/Cargo.lock +++ b/Cargo.lock @@ -13,9 +13,9 @@ dependencies = [ [[package]] name = "adler2" -version = "2.0.0" +version = "2.0.1" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "512761e0bb2578dd7380c6baaa0f4ce03e84f95e960231d1dec8bf4d7d6e2627" +checksum = "320119579fcad9c21884f5c4861d16174d0e06250625266f50fe6898340abefa" [[package]] name = "ahash" @@ -81,9 +81,9 @@ checksum = "4b46cbb362ab8752921c97e041f5e366ee6297bd428a31275b9fcf1e380f7299" [[package]] name = "anstream" -version = "0.6.18" +version = "0.6.19" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "8acc5369981196006228e28809f761875c0327210a891e941f4c683b3a99529b" +checksum = "301af1932e46185686725e0fad2f8f2aa7da69dd70bf6ecc44d6b703844a3933" dependencies = [ "anstyle", "anstyle-parse", @@ -96,33 +96,33 @@ dependencies = [ [[package]] name = "anstyle" -version = "1.0.10" +version = "1.0.11" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "55cc3b69f167a1ef2e161439aa98aed94e6028e5f9a59be9a6ffb47aef1651f9" +checksum = "862ed96ca487e809f1c8e5a8447f6ee2cf102f846893800b20cebdf541fc6bbd" [[package]] name = "anstyle-parse" -version = "0.2.6" +version = "0.2.7" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "3b2d16507662817a6a20a9ea92df6652ee4f94f914589377d69f3b21bc5798a9" +checksum = "4e7644824f0aa2c7b9384579234ef10eb7efb6a0deb83f9630a49594dd9c15c2" dependencies = [ "utf8parse", ] [[package]] name = "anstyle-query" -version = "1.1.2" +version = "1.1.3" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "79947af37f4177cfead1110013d678905c37501914fba0efea834c3fe9a8d60c" +checksum = "6c8bdeb6047d8983be085bab0ba1472e6dc604e7041dbf6fcd5e71523014fae9" dependencies = [ "windows-sys 0.59.0", ] [[package]] name = "anstyle-wincon" -version = "3.0.8" +version = "3.0.9" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "6680de5231bd6ee4c6191b8a1325daa282b415391ec9d3a37bd34f2060dc73fa" +checksum = "403f75924867bb1033c59fbf0797484329750cfbe3c4325cd33127941fabc882" dependencies = [ "anstyle", "once_cell_polyfill", @@ -217,9 +217,9 @@ dependencies = [ [[package]] name = "async-compression" -version = "0.4.23" +version = "0.4.24" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "b37fc50485c4f3f736a4fb14199f6d5f5ba008d7f28fe710306c92780f004c07" +checksum = "d615619615a650c571269c00dca41db04b9210037fa76ed8239f70404ab56985" dependencies = [ "brotli", "flate2", @@ -332,7 +332,7 @@ checksum = "e539d3fca749fcee5236ab05e93a52867dd549cc157c8cb7f99595f3cedffdb5" dependencies = [ "proc-macro2", "quote", - "syn 2.0.101", + "syn 2.0.102", ] [[package]] @@ -455,7 +455,7 @@ checksum = "604fde5e028fea851ce1d8570bbdc034bec850d157f7569d10f347d06808c05c" dependencies = [ "proc-macro2", "quote", - "syn 2.0.101", + "syn 2.0.102", ] [[package]] @@ -537,9 +537,9 @@ checksum = "383d29d513d8764dcdc42ea295d979eb99c3c9f00607b3692cf68a431f7dca72" [[package]] name = "bindgen" -version = "0.71.1" +version = "0.72.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "5f58bf3d7db68cfbac37cfc485a8d711e87e064c3d0fe0435b92f7a407f9d6b3" +checksum = "4f72209734318d0b619a5e0f5129918b848c416e122a3c4ce054e03cb87b726f" dependencies = [ "bitflags 2.9.1", "cexpr", @@ -550,7 +550,7 @@ dependencies = [ "regex", "rustc-hash", "shlex", - "syn 2.0.101", + "syn 2.0.102", ] [[package]] @@ -848,7 +848,7 @@ dependencies = [ "proc-macro-crate", "proc-macro2", "quote", - "syn 2.0.101", + "syn 2.0.102", ] [[package]] @@ -889,9 +889,9 @@ checksum = "40e38929add23cdf8a366df9b0e088953150724bcbe5fc330b0d8eb3b328eec8" [[package]] name = "bumpalo" -version = "3.17.0" +version = "3.18.1" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "1628fb46dfa0b37568d12e5edd512553eccf6a22a78e8bde00bb4aed84d5bdbf" +checksum = "793db76d6187cd04dff33004d8e6c9cc4e05cd330500379d2394209271b4aeee" [[package]] name = "bytecheck" @@ -917,9 +917,9 @@ dependencies = [ [[package]] name = "bytemuck" -version = "1.23.0" +version = "1.23.1" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "9134a6ef01ce4b366b50689c94f82c14bc72bc5d0386829828a2e2752ef7958c" +checksum = "5c76a5792e44e4abe34d3abf15636779261d45a7450612059293d1d2cfc63422" [[package]] name = "byteorder" @@ -935,9 +935,9 @@ checksum = "d71b6127be86fdcfddb610f7182ac57211d4b18a3e9c82eb2d17662f2227ad6a" [[package]] name = "camino" -version = "1.1.9" +version = "1.1.10" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "8b96ec4966b5813e2c0507c1f86115c8c5abaadc3980879c3424042a02fd1ad3" +checksum = "0da45bc31171d8d6960122e222a67740df867c1dd53b4d51caa297084c185cab" dependencies = [ "serde", ] @@ -959,9 +959,9 @@ dependencies = [ [[package]] name = "cc" -version = "1.2.25" +version = "1.2.26" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "d0fc897dc1e865cc67c0e05a836d9d3f1df3cbe442aa4a9473b18e12624a4951" +checksum = "956a5e21988b87f372569b66183b78babf23ebc2e744b733e4350a752c4dafac" dependencies = [ "jobserver", "libc", @@ -979,9 +979,9 @@ dependencies = [ [[package]] name = "cfg-if" -version = "1.0.0" +version = "1.0.1" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "baf1de4339761588bc0619e3cbc0120ee582ebb74b53b4efbf79117bd2da40fd" +checksum = "9555578bc9e57714c812a1f84e4fc5b4d21fcb063490c624de019f7464c91268" [[package]] name = "cfg_aliases" @@ -1052,9 +1052,9 @@ dependencies = [ [[package]] name = "clap" -version = "4.5.39" +version = "4.5.40" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "fd60e63e9be68e5fb56422e397cf9baddded06dae1d2e523401542383bc72a9f" +checksum = "40b6887a1d8685cebccf115538db5c0efe625ccac9696ad45c409d96566e910f" dependencies = [ "clap_builder", "clap_derive", @@ -1062,9 +1062,9 @@ dependencies = [ [[package]] name = "clap_builder" -version = "4.5.39" +version = "4.5.40" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "89cc6392a1f72bbeb820d71f32108f61fdaf18bc526e1d23954168a67759ef51" +checksum = "e0c66c08ce9f0c698cbce5c0279d0bb6ac936d8674174fe48f736533b964f59e" dependencies = [ "anstream", "anstyle", @@ -1074,21 +1074,21 @@ dependencies = [ [[package]] name = "clap_derive" -version = "4.5.32" +version = "4.5.40" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "09176aae279615badda0765c0c0b3f6ed53f4709118af73cf4655d85d1530cd7" +checksum = "d2c7947ae4cc3d851207c1adb5b5e260ff0cca11446b1d6d1423788e442257ce" dependencies = [ "heck", "proc-macro2", "quote", - "syn 2.0.101", + "syn 2.0.102", ] [[package]] name = "clap_lex" -version = "0.7.4" +version = "0.7.5" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "f46ad14479a25103f283c0f10005961cf086d8dc42205bb44c46ac563475dca6" +checksum = "b94f61472cee1439c0b966b47e3aca9ae07e45d070759512cd390ea2bebc6675" [[package]] name = "cmake" @@ -1101,9 +1101,9 @@ dependencies = [ [[package]] name = "colorchoice" -version = "1.0.3" +version = "1.0.4" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "5b63caa9aa9397e2d9480a9b13673856c78d8ac123288526c37d7839f2a86990" +checksum = "b05b61dc5112cbb17e4b6cd61790d9845d13888356391624cbe7e41efeac1e75" [[package]] name = "compact_str" @@ -1336,7 +1336,7 @@ dependencies = [ "proc-macro2", "quote", "strsim", - "syn 2.0.101", + "syn 2.0.102", ] [[package]] @@ -1347,7 +1347,7 @@ checksum = "fc34b93ccb385b40dc71c6fceac4b2ad23662c7eeb248cf10d529b7e055b6ead" dependencies = [ "darling_core", "quote", - "syn 2.0.101", + "syn 2.0.102", ] [[package]] @@ -1391,7 +1391,7 @@ checksum = "bda628edc44c4bb645fbe0f758797143e4e07926f7ebf4e9bdfbd3d2ce621df3" dependencies = [ "proc-macro2", "quote", - "syn 2.0.101", + "syn 2.0.102", "unicode-xid", ] @@ -1403,7 +1403,7 @@ checksum = "ccfae181bab5ab6c5478b2ccb69e4c68a02f8c3ec72f6616bfec9dbc599d2ee0" dependencies = [ "proc-macro2", "quote", - "syn 2.0.101", + "syn 2.0.102", ] [[package]] @@ -1430,7 +1430,7 @@ checksum = "97369cbbc041bc366949bc74d34658d6cda5621039731c6310521892a3a20ae0" dependencies = [ "proc-macro2", "quote", - "syn 2.0.101", + "syn 2.0.102", ] [[package]] @@ -1577,9 +1577,9 @@ dependencies = [ [[package]] name = "flate2" -version = "1.1.1" +version = "1.1.2" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "7ced92e76e966ca2fd84c8f7aa01a4aea65b0eb6648d72f7c8f3e2764a67fece" +checksum = "4a3d7db9596fecd151c5f638c0ee5d5bd487b6e0ea232e5dc96d5250f6f94b1d" dependencies = [ "crc32fast", "libz-sys", @@ -1677,7 +1677,7 @@ checksum = "e99b8b3c28ae0e84b604c75f721c21dc77afb3706076af5e8216d15fd1deaae3" dependencies = [ "frunk_proc_macro_helpers", "quote", - "syn 2.0.101", + "syn 2.0.102", ] [[package]] @@ -1689,7 +1689,7 @@ dependencies = [ "frunk_core", "proc-macro2", "quote", - "syn 2.0.101", + "syn 2.0.102", ] [[package]] @@ -1701,14 +1701,14 @@ dependencies = [ "frunk_core", "frunk_proc_macro_helpers", "quote", - "syn 2.0.101", + "syn 2.0.102", ] [[package]] name = "fs-err" -version = "3.1.0" +version = "3.1.1" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "1f89bda4c2a21204059a977ed3bfe746677dfd137b83c339e702b0ac91d482aa" +checksum = "88d7be93788013f265201256d58f04936a8079ad5dc898743aa20525f503b683" dependencies = [ "autocfg", "tokio", @@ -1789,7 +1789,7 @@ checksum = "162ee34ebcb7c64a8abebc059ce0fee27c2262618d7b60ed8faf72fef13c3650" dependencies = [ "proc-macro2", "quote", - "syn 2.0.101", + "syn 2.0.102", ] [[package]] @@ -1846,7 +1846,7 @@ checksum = "335ff9f135e4384c8150d6f27c6daed433577f86b4750418338c01a1a2528592" dependencies = [ "cfg-if", "libc", - "wasi 0.11.0+wasi-snapshot-preview1", + "wasi 0.11.1+wasi-snapshot-preview1", ] [[package]] @@ -1931,9 +1931,9 @@ checksum = "e5274423e17b7c9fc20b6e7e208532f9b19825d82dfd615708b70edd83df41f1" [[package]] name = "hashbrown" -version = "0.15.3" +version = "0.15.4" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "84b26c544d002229e640969970a2e74021aadf6e2f96372b9c58eff97de08eb3" +checksum = "5971ac85611da7067dbfcabef3c70ebb5606018acd9e2a3903a0da507521e0d5" dependencies = [ "allocator-api2", "equivalent", @@ -1946,7 +1946,7 @@ version = "0.10.0" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "7382cf6263419f2d8df38c55d7da83da5c18aef87fc7a7fc1fb1e344edfe14c1" dependencies = [ - "hashbrown 0.15.3", + "hashbrown 0.15.4", ] [[package]] @@ -2066,9 +2066,9 @@ dependencies = [ [[package]] name = "hyper-rustls" -version = "0.27.6" +version = "0.27.7" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "03a01595e11bdcec50946522c32dde3fc6914743000a68b93000965f2f02406d" +checksum = "e3c93eb611681b207e1fe55d5a71ecf91572ec8a6705cdb6857f7d8d5242cf58" dependencies = [ "http", "hyper", @@ -2098,9 +2098,9 @@ dependencies = [ [[package]] name = "hyper-util" -version = "0.1.13" +version = "0.1.14" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "b1c293b6b3d21eca78250dc7dbebd6b9210ec5530e038cbfe0661b5c47ab06e8" +checksum = "dc2fdfdbff08affe55bb779f33b053aa1fe5dd5b54c257343c17edfa55711bdb" dependencies = [ "base64 0.22.1", "bytes", @@ -2292,7 +2292,7 @@ source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "cea70ddb795996207ad57735b50c5982d8844f38ba9ee5f1aedcfb708a2aa11e" dependencies = [ "equivalent", - "hashbrown 0.15.3", + "hashbrown 0.15.4", "serde", ] @@ -2522,7 +2522,7 @@ version = "0.12.5" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "234cf4f4a04dc1f57e24b96cc0cd600cf2af460d4161ac5ecdd0af8e1f3b2a38" dependencies = [ - "hashbrown 0.15.3", + "hashbrown 0.15.4", ] [[package]] @@ -2564,7 +2564,7 @@ checksum = "db5b29714e950dbb20d5e6f74f9dcec4edbcc1067bb7f8ed198c097b8c1a818b" dependencies = [ "proc-macro2", "quote", - "syn 2.0.101", + "syn 2.0.102", ] [[package]] @@ -2581,9 +2581,9 @@ checksum = "68354c5c6bd36d73ff3feceb05efa59b6acb7626617f4962be322a825e61f79a" [[package]] name = "miniz_oxide" -version = "0.8.8" +version = "0.8.9" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "3be647b768db090acb35d5ec5db2b0e1f1de11133ca123b9eacf5137868f892a" +checksum = "1fa76a2c86f704bdb222d66965fb3d63269ce38518b83cb0575fca855ebb6316" dependencies = [ "adler2", ] @@ -2595,7 +2595,7 @@ source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "78bed444cc8a2160f01cbcf811ef18cac863ad68ae8ca62092e8db51d51c761c" dependencies = [ "libc", - "wasi 0.11.0+wasi-snapshot-preview1", + "wasi 0.11.1+wasi-snapshot-preview1", "windows-sys 0.59.0", ] @@ -2622,7 +2622,7 @@ dependencies = [ "cfg-if", "proc-macro2", "quote", - "syn 2.0.101", + "syn 2.0.102", ] [[package]] @@ -2672,7 +2672,7 @@ dependencies = [ "proc-macro-error2", "proc-macro2", "quote", - "syn 2.0.101", + "syn 2.0.102", "termcolor", "thiserror 1.0.69", ] @@ -2877,7 +2877,7 @@ checksum = "a948666b637a0f465e8564c73e89d4dde00d72d4d473cc972f390fc3dcee7d9c" dependencies = [ "proc-macro2", "quote", - "syn 2.0.101", + "syn 2.0.102", ] [[package]] @@ -2961,7 +2961,7 @@ dependencies = [ "regex", "regex-syntax", "structmeta", - "syn 2.0.101", + "syn 2.0.102", ] [[package]] @@ -2984,7 +2984,7 @@ dependencies = [ "proc-macro2", "proc-macro2-diagnostics", "quote", - "syn 2.0.101", + "syn 2.0.102", ] [[package]] @@ -3115,9 +3115,9 @@ dependencies = [ [[package]] name = "portable-atomic" -version = "1.11.0" +version = "1.11.1" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "350e9b48cbc6b0e028b0473b114454c6316e57336ee184ceab6e53f72c178b3e" +checksum = "f84267b20a16ea918e43c6a88433c2d54fa145c92a811b5b047ccbe153674483" [[package]] name = "portable-atomic-util" @@ -3216,7 +3216,7 @@ dependencies = [ "proc-macro-error-attr2", "proc-macro2", "quote", - "syn 2.0.101", + "syn 2.0.102", ] [[package]] @@ -3236,7 +3236,7 @@ checksum = "af066a9c399a26e020ada66a034357a868728e72cd426f3adcd35f80d88d88c8" dependencies = [ "proc-macro2", "quote", - "syn 2.0.101", + "syn 2.0.102", "version_check", "yansi", ] @@ -3468,9 +3468,9 @@ dependencies = [ [[package]] name = "reqwest" -version = "0.12.18" +version = "0.12.20" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "e98ff6b0dbbe4d5a37318f433d4fc82babd21631f194d370409ceb2e40b2f0b5" +checksum = "eabf4c97d9130e2bf606614eb937e86edac8292eaa6f422f995d7e8de1eb1813" dependencies = [ "base64 0.22.1", "bytes", @@ -3484,12 +3484,10 @@ dependencies = [ "hyper-rustls", "hyper-tls", "hyper-util", - "ipnet", "js-sys", "log", "mime", "native-tls", - "once_cell", "percent-encoding", "pin-project-lite", "rustls-pki-types", @@ -3588,7 +3586,7 @@ dependencies = [ "regex", "relative-path", "rustc_version", - "syn 2.0.101", + "syn 2.0.102", "unicode-ident", ] @@ -3624,9 +3622,9 @@ dependencies = [ [[package]] name = "rustc-demangle" -version = "0.1.24" +version = "0.1.25" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "719b953e2095829ee67db738b3bfa9fa368c94900df327b3f07fe6e794d2fe1f" +checksum = "989e6739f80c4ad5b13e0fd7fe89531180375b18520cc8c82080e4dc4035b84f" [[package]] name = "rustc-hash" @@ -3846,7 +3844,7 @@ checksum = "5b0276cf7f2c73365f7157c8123c21cd9a50fbbd844757af28ca1f5925fc2a00" dependencies = [ "proc-macro2", "quote", - "syn 2.0.101", + "syn 2.0.102", ] [[package]] @@ -3893,14 +3891,14 @@ checksum = "175ee3e80ae9982737ca543e96133087cbd9a485eecc3bc4de9c1a37b47ea59c" dependencies = [ "proc-macro2", "quote", - "syn 2.0.101", + "syn 2.0.102", ] [[package]] name = "serde_spanned" -version = "0.6.8" +version = "0.6.9" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "87607cb1398ed59d48732e575a4c28a7a8ebf2454b964fe3f224f2afc07909e1" +checksum = "bf41e0cfaf7226dca15e8197172c295a782857fcb97fad1808a166870dee75a3" dependencies = [ "serde", ] @@ -3944,7 +3942,7 @@ dependencies = [ "darling", "proc-macro2", "quote", - "syn 2.0.101", + "syn 2.0.102", ] [[package]] @@ -4016,9 +4014,9 @@ dependencies = [ [[package]] name = "smallvec" -version = "1.15.0" +version = "1.15.1" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "8917285742e9f3e1683f0a9c4e6b57960b7314d0b08d30d1ecd426713ee2eee9" +checksum = "67b1b7a3b5fe4f1376887184045fcf45c69e92af734b7aaddc05fb777b6fbd03" [[package]] name = "socket2" @@ -4057,7 +4055,7 @@ dependencies = [ "proc-macro2", "quote", "structmeta-derive", - "syn 2.0.101", + "syn 2.0.102", ] [[package]] @@ -4068,7 +4066,7 @@ checksum = "152a0b65a590ff6c3da95cabe2353ee04e6167c896b28e3b14478c2636c922fc" dependencies = [ "proc-macro2", "quote", - "syn 2.0.101", + "syn 2.0.102", ] [[package]] @@ -4121,9 +4119,9 @@ dependencies = [ [[package]] name = "syn" -version = "2.0.101" +version = "2.0.102" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "8ce2b7fc941b3a24138a0a7cf8e858bfc6a992e7978a068a5c760deb0ed43caf" +checksum = "f6397daf94fa90f058bd0fd88429dd9e5738999cca8d701813c80723add80462" dependencies = [ "proc-macro2", "quote", @@ -4147,7 +4145,7 @@ checksum = "728a70f3dbaf5bab7f0c4b1ac8d7ae5ea60a4b5549c8a5914361c99147a709d2" dependencies = [ "proc-macro2", "quote", - "syn 2.0.101", + "syn 2.0.102", ] [[package]] @@ -4268,7 +4266,7 @@ source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "c13547615a44dc9c452a8a534638acdf07120d4b6847c8178705da06306a3057" dependencies = [ "unicode-linebreak", - "unicode-width 0.2.0", + "unicode-width 0.2.1", ] [[package]] @@ -4297,7 +4295,7 @@ checksum = "4fee6c4efc90059e10f81e6d42c60a18f76588c3d74cb83a0b242a2b6c7504c1" dependencies = [ "proc-macro2", "quote", - "syn 2.0.101", + "syn 2.0.102", ] [[package]] @@ -4308,7 +4306,7 @@ checksum = "7f7cf42b4507d8ea322120659672cf1b9dbb93f8f2d4ecfd6e51350ff5b17a1d" dependencies = [ "proc-macro2", "quote", - "syn 2.0.101", + "syn 2.0.102", ] [[package]] @@ -4412,7 +4410,7 @@ checksum = "6e06d43f1345a3bcd39f6a56dbb7dcab2ba47e68e8ac134855e7e2bdbaf8cab8" dependencies = [ "proc-macro2", "quote", - "syn 2.0.101", + "syn 2.0.102", ] [[package]] @@ -4476,9 +4474,9 @@ dependencies = [ [[package]] name = "toml" -version = "0.8.22" +version = "0.8.23" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "05ae329d1f08c4d17a59bed7ff5b5a769d062e64a62d34a3261b219e62cd5aae" +checksum = "dc1beb996b9d83529a9e75c17a1686767d148d70663143c7854d8b4a09ced362" dependencies = [ "serde", "serde_spanned", @@ -4488,18 +4486,18 @@ dependencies = [ [[package]] name = "toml_datetime" -version = "0.6.9" +version = "0.6.11" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "3da5db5a963e24bc68be8b17b6fa82814bb22ee8660f192bb182771d498f09a3" +checksum = "22cddaf88f4fbc13c51aebbf5f8eceb5c7c5a9da2ac40a13519eb5b0a0e8f11c" dependencies = [ "serde", ] [[package]] name = "toml_edit" -version = "0.22.26" +version = "0.22.27" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "310068873db2c5b3e7659d2cc35d21855dbafa50d1ce336397c666e3cb08137e" +checksum = "41fe8c660ae4257887cf66394862d21dbca4a6ddd26f04a3560410406a2f819a" dependencies = [ "indexmap 2.9.0", "serde", @@ -4511,9 +4509,9 @@ dependencies = [ [[package]] name = "toml_write" -version = "0.1.1" +version = "0.1.2" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "bfb942dfe1d8e29a7ee7fcbde5bd2b9a25fb89aa70caea2eba3bee836ff41076" +checksum = "5d99f8c9a7727884afe522e9bd5edbfc91a3312b36a77b5fb8926e4c31a41801" [[package]] name = "torrust-axum-health-check-api-server" @@ -4943,9 +4941,9 @@ dependencies = [ [[package]] name = "tower-http" -version = "0.6.5" +version = "0.6.6" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "5cc2d9e086a412a451384326f521c8123a99a466b329941a9403696bff9b0da2" +checksum = "adc82fd73de2a9722ac5da747f12383d2bfdb93591ee6c58486e0097890f05f2" dependencies = [ "async-compression", "bitflags 2.9.1", @@ -4991,20 +4989,20 @@ dependencies = [ [[package]] name = "tracing-attributes" -version = "0.1.28" +version = "0.1.29" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "395ae124c09f9e6918a2310af6038fba074bcf474ac352496d5910dd59a2226d" +checksum = "1b1ffbcf9c6f6b99d386e7444eb608ba646ae452a36b39737deb9663b610f662" dependencies = [ "proc-macro2", "quote", - "syn 2.0.101", + "syn 2.0.102", ] [[package]] name = "tracing-core" -version = "0.1.33" +version = "0.1.34" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "e672c95779cf947c5311f83787af4fa8fffd12fb27e4993211a84bdfd9610f9c" +checksum = "b9d12581f227e93f094d3af2ae690a574abb8a2b9b7a96e7cfe9647b2b617678" dependencies = [ "once_cell", "valuable", @@ -5100,9 +5098,9 @@ checksum = "7dd6e30e90baa6f72411720665d41d89b9a3d039dc45b8faea1ddd07f617f6af" [[package]] name = "unicode-width" -version = "0.2.0" +version = "0.2.1" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "1fc81956842c57dac11422a97c3b8195a1ff727f06e85c84ed2e8aa277c9a0fd" +checksum = "4a1a07cc7db3810833284e8d372ccdc6da29741639ecc70c9ec107df0fa6154c" [[package]] name = "unicode-xid" @@ -5197,9 +5195,9 @@ dependencies = [ [[package]] name = "wasi" -version = "0.11.0+wasi-snapshot-preview1" +version = "0.11.1+wasi-snapshot-preview1" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "9c8d87e72b64a3b4db28d11ce29237c246188f4f51057d65a7eab63b7987e423" +checksum = "ccf3ec651a847eb01de73ccad15eb7d99f80485de043efb2f370cd654f4ea44b" [[package]] name = "wasi" @@ -5232,7 +5230,7 @@ dependencies = [ "log", "proc-macro2", "quote", - "syn 2.0.101", + "syn 2.0.102", "wasm-bindgen-shared", ] @@ -5267,7 +5265,7 @@ checksum = "8ae87ea40c9f689fc23f209965b6fb8a99ad69aeeb0231408be24920604395de" dependencies = [ "proc-macro2", "quote", - "syn 2.0.101", + "syn 2.0.102", "wasm-bindgen-backend", "wasm-bindgen-shared", ] @@ -5332,7 +5330,7 @@ dependencies = [ "windows-interface", "windows-link", "windows-result", - "windows-strings 0.4.2", + "windows-strings", ] [[package]] @@ -5343,7 +5341,7 @@ checksum = "a47fddd13af08290e67f4acabf4b459f647552718f683a7b415d290ac744a836" dependencies = [ "proc-macro2", "quote", - "syn 2.0.101", + "syn 2.0.102", ] [[package]] @@ -5354,7 +5352,7 @@ checksum = "bd9211b69f8dcdfa817bfd14bf1c97c9188afa36f4750130fcdf3f400eca9fa8" dependencies = [ "proc-macro2", "quote", - "syn 2.0.101", + "syn 2.0.102", ] [[package]] @@ -5365,13 +5363,13 @@ checksum = "76840935b766e1b0a05c0066835fb9ec80071d4c09a16f6bd5f7e655e3c14c38" [[package]] name = "windows-registry" -version = "0.4.0" +version = "0.5.2" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "4286ad90ddb45071efd1a66dfa43eb02dd0dfbae1545ad6cc3c51cf34d7e8ba3" +checksum = "b3bab093bdd303a1240bb99b8aba8ea8a69ee19d34c9e2ef9594e708a4878820" dependencies = [ + "windows-link", "windows-result", - "windows-strings 0.3.1", - "windows-targets 0.53.0", + "windows-strings", ] [[package]] @@ -5383,15 +5381,6 @@ dependencies = [ "windows-link", ] -[[package]] -name = "windows-strings" -version = "0.3.1" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "87fa48cc5d406560701792be122a10132491cff9d0aeb23583cc2dcafc847319" -dependencies = [ - "windows-link", -] - [[package]] name = "windows-strings" version = "0.4.2" @@ -5549,9 +5538,9 @@ checksum = "271414315aff87387382ec3d271b52d7ae78726f5d44ac98b4f4030c91880486" [[package]] name = "winnow" -version = "0.7.10" +version = "0.7.11" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "c06928c8748d81b05c9be96aad92e1b6ff01833332f281e8cfca3be4b35fc9ec" +checksum = "74c7b26e3480b707944fc872477815d29a8e429d2f93a1ce000f5fa84a15cbcd" dependencies = [ "memchr", ] @@ -5616,7 +5605,7 @@ checksum = "38da3c9736e16c5d3c8c597a9aaa5d1fa565d0532ae05e27c24aa62fb32c0ab6" dependencies = [ "proc-macro2", "quote", - "syn 2.0.101", + "syn 2.0.102", "synstructure", ] @@ -5647,7 +5636,7 @@ checksum = "fa4f8080344d4671fb4e831a13ad1e68092748387dfc4f55e356242fae12ce3e" dependencies = [ "proc-macro2", "quote", - "syn 2.0.101", + "syn 2.0.102", ] [[package]] @@ -5658,7 +5647,7 @@ checksum = "28a6e20d751156648aa063f3800b706ee209a32c0b4d9f24be3d980b01be55ef" dependencies = [ "proc-macro2", "quote", - "syn 2.0.101", + "syn 2.0.102", ] [[package]] @@ -5678,7 +5667,7 @@ checksum = "d71e5d6e06ab090c67b5e44993ec16b72dcbaabc526db883a360057678b48502" dependencies = [ "proc-macro2", "quote", - "syn 2.0.101", + "syn 2.0.102", "synstructure", ] @@ -5718,7 +5707,7 @@ checksum = "5b96237efa0c878c64bd89c436f661be4e46b2f3eff1ebb976f7ef2321d2f58f" dependencies = [ "proc-macro2", "quote", - "syn 2.0.101", + "syn 2.0.102", ] [[package]] From 7e722c06f17603c9d049692f49eca4e1693b7cf7 Mon Sep 17 00:00:00 2001 From: Jose Celano Date: Tue, 10 Jun 2025 21:57:08 +0100 Subject: [PATCH 710/802] fix: clippy errors --- packages/axum-http-tracker-server/src/server.rs | 6 +++--- packages/axum-http-tracker-server/src/v1/routes.rs | 2 +- packages/axum-rest-tracker-api-server/src/routes.rs | 4 ++-- packages/axum-rest-tracker-api-server/src/server.rs | 4 ++-- .../torrent-repository-benchmarking/tests/repository/mod.rs | 4 +++- packages/udp-tracker-server/src/handlers/mod.rs | 2 +- 6 files changed, 12 insertions(+), 10 deletions(-) diff --git a/packages/axum-http-tracker-server/src/server.rs b/packages/axum-http-tracker-server/src/server.rs index 1775a3d72..ba0dd8c6e 100644 --- a/packages/axum-http-tracker-server/src/server.rs +++ b/packages/axum-http-tracker-server/src/server.rs @@ -47,7 +47,7 @@ impl Launcher { #[instrument(skip(self, http_tracker_container, tx_start, rx_halt))] fn start( &self, - http_tracker_container: Arc, + http_tracker_container: &Arc, tx_start: Sender, rx_halt: Receiver, ) -> BoxFuture<'static, ()> { @@ -69,7 +69,7 @@ impl Launcher { tracing::info!(target: HTTP_TRACKER_LOG_TARGET, "Starting on: {protocol}://{address}"); - let app = router(http_tracker_container, service_binding.clone()); + let app = router(http_tracker_container, &service_binding); let running = Box::pin(async { match tls { @@ -176,7 +176,7 @@ impl HttpServer { let launcher = self.state.launcher; let task = tokio::spawn(async move { - let server = launcher.start(http_tracker_container, tx_start, rx_halt); + let server = launcher.start(&http_tracker_container, tx_start, rx_halt); server.await; diff --git a/packages/axum-http-tracker-server/src/v1/routes.rs b/packages/axum-http-tracker-server/src/v1/routes.rs index 3fe467a0d..df395cd9a 100644 --- a/packages/axum-http-tracker-server/src/v1/routes.rs +++ b/packages/axum-http-tracker-server/src/v1/routes.rs @@ -31,7 +31,7 @@ use crate::HTTP_TRACKER_LOG_TARGET; /// > **NOTICE**: it's added a layer to get the client IP from the connection /// > info. The tracker could use the connection info to get the client IP. #[instrument(skip(http_tracker_container, server_service_binding))] -pub fn router(http_tracker_container: Arc, server_service_binding: ServiceBinding) -> Router { +pub fn router(http_tracker_container: &Arc, server_service_binding: &ServiceBinding) -> Router { let server_socket_addr = server_service_binding.bind_address(); Router::new() diff --git a/packages/axum-rest-tracker-api-server/src/routes.rs b/packages/axum-rest-tracker-api-server/src/routes.rs index c18451c89..78b7818d9 100644 --- a/packages/axum-rest-tracker-api-server/src/routes.rs +++ b/packages/axum-rest-tracker-api-server/src/routes.rs @@ -36,7 +36,7 @@ use crate::API_LOG_TARGET; /// Add all API routes to the router. #[instrument(skip(http_api_container, access_tokens))] pub fn router( - http_api_container: Arc, + http_api_container: &Arc, access_tokens: Arc, server_socket_addr: SocketAddr, ) -> Router { @@ -44,7 +44,7 @@ pub fn router( let api_url_prefix = "/api"; - let router = v1::routes::add(api_url_prefix, router, &http_api_container); + let router = v1::routes::add(api_url_prefix, router, http_api_container); let state = State { access_tokens }; diff --git a/packages/axum-rest-tracker-api-server/src/server.rs b/packages/axum-rest-tracker-api-server/src/server.rs index 04c51d8fb..b358345fb 100644 --- a/packages/axum-rest-tracker-api-server/src/server.rs +++ b/packages/axum-rest-tracker-api-server/src/server.rs @@ -140,7 +140,7 @@ impl ApiServer { let task = tokio::spawn(async move { tracing::debug!(target: API_LOG_TARGET, "Starting with launcher in spawned task ..."); - let _task = launcher.start(http_api_container, access_tokens, tx_start, rx_halt).await; + let _task = launcher.start(&http_api_container, access_tokens, tx_start, rx_halt).await; tracing::debug!(target: API_LOG_TARGET, "Started with launcher in spawned task"); @@ -241,7 +241,7 @@ impl Launcher { #[instrument(skip(self, http_api_container, access_tokens, tx_start, rx_halt))] pub fn start( &self, - http_api_container: Arc, + http_api_container: &Arc, access_tokens: Arc, tx_start: Sender, rx_halt: Receiver, diff --git a/packages/torrent-repository-benchmarking/tests/repository/mod.rs b/packages/torrent-repository-benchmarking/tests/repository/mod.rs index e555654ca..c3589ce68 100644 --- a/packages/torrent-repository-benchmarking/tests/repository/mod.rs +++ b/packages/torrent-repository-benchmarking/tests/repository/mod.rs @@ -450,7 +450,9 @@ async fn it_should_import_persistent_torrents( make(&repo, &entries).await; let mut downloaded = repo.get_metrics().await.total_downloaded; - persistent_torrents.iter().for_each(|(_, d)| downloaded += u64::from(*d)); + for d in persistent_torrents.values() { + downloaded += u64::from(*d); + } repo.import_persistent(&persistent_torrents).await; diff --git a/packages/udp-tracker-server/src/handlers/mod.rs b/packages/udp-tracker-server/src/handlers/mod.rs index 3c8204bf5..43c5bc4d5 100644 --- a/packages/udp-tracker-server/src/handlers/mod.rs +++ b/packages/udp-tracker-server/src/handlers/mod.rs @@ -28,7 +28,7 @@ use crate::event::UdpRequestKind; use crate::CurrentClock; #[derive(Debug, Clone, PartialEq)] -pub(super) struct CookieTimeValues { +pub struct CookieTimeValues { pub(super) issue_time: f64, pub(super) valid_range: Range, } From 64be8472feffb8217cdd7ce505510b4d234d5981 Mon Sep 17 00:00:00 2001 From: Jose Celano Date: Wed, 11 Jun 2025 14:16:02 +0100 Subject: [PATCH 711/802] feat: [#1446] add aggregate function sum to metric collection It allows sum metric samples matching a given criteria. The criteria is a label set. Sample values are added if they contain all the label name/value pairs specified in the criteria. For example, given these metric's samples in Prometheus export text format: ``` udp_tracker_server_requests_accepted_total{request_kind="scrape",server_binding_address_type="plain",server_binding_ip="0.0.0.0",server_binding_port="6969",server_binding_protocol="udp"} 213118 udp_tracker_server_requests_accepted_total{request_kind="announce",server_binding_address_type="plain",server_binding_ip="0.0.0.0",server_binding_port="6969",server_binding_protocol="udp"} 16460553 udp_tracker_server_requests_accepted_total{request_kind="connect",server_binding_address_type="plain",server_binding_ip="0.0.0.0",server_binding_port="6868",server_binding_protocol="udp"} 617 udp_tracker_server_requests_accepted_total{request_kind="connect",server_binding_address_type="plain",server_binding_ip="0.0.0.0",server_binding_port="6969",server_binding_protocol="udp"} 17148137 ``` And the criteria: it should contain the label `request_kind` with the value `connect`. It should return: 617 + 17148137 = 17148754 --- .../src/statistics/metrics.rs | 2 +- packages/metrics/src/aggregate.rs | 28 ++ packages/metrics/src/counter.rs | 18 ++ packages/metrics/src/gauge.rs | 11 + packages/metrics/src/label/set.rs | 21 ++ packages/metrics/src/lib.rs | 1 + packages/metrics/src/metric/aggregate/mod.rs | 1 + packages/metrics/src/metric/aggregate/sum.rs | 283 ++++++++++++++++++ packages/metrics/src/metric/mod.rs | 1 + .../src/metric_collection/aggregate.rs | 112 +++++++ .../mod.rs} | 22 +- .../src/statistics/metrics.rs | 2 +- .../tracker-core/src/statistics/metrics.rs | 2 +- .../src/statistics/metrics.rs | 2 +- .../src/statistics/metrics.rs | 2 +- 15 files changed, 493 insertions(+), 15 deletions(-) create mode 100644 packages/metrics/src/aggregate.rs create mode 100644 packages/metrics/src/metric/aggregate/mod.rs create mode 100644 packages/metrics/src/metric/aggregate/sum.rs create mode 100644 packages/metrics/src/metric_collection/aggregate.rs rename packages/metrics/src/{metric_collection.rs => metric_collection/mod.rs} (98%) diff --git a/packages/http-tracker-core/src/statistics/metrics.rs b/packages/http-tracker-core/src/statistics/metrics.rs index bf053b04e..650194d43 100644 --- a/packages/http-tracker-core/src/statistics/metrics.rs +++ b/packages/http-tracker-core/src/statistics/metrics.rs @@ -33,7 +33,7 @@ impl Metrics { labels: &LabelSet, now: DurationSinceUnixEpoch, ) -> Result<(), Error> { - self.metric_collection.increase_counter(metric_name, labels, now) + self.metric_collection.increment_counter(metric_name, labels, now) } /// # Errors diff --git a/packages/metrics/src/aggregate.rs b/packages/metrics/src/aggregate.rs new file mode 100644 index 000000000..875360cd9 --- /dev/null +++ b/packages/metrics/src/aggregate.rs @@ -0,0 +1,28 @@ +use derive_more::Display; + +#[derive(Debug, Display, Clone, Copy, PartialEq)] +pub struct AggregateValue(f64); + +impl AggregateValue { + #[must_use] + pub fn new(value: f64) -> Self { + Self(value) + } + + #[must_use] + pub fn value(&self) -> f64 { + self.0 + } +} + +impl From for AggregateValue { + fn from(value: f64) -> Self { + Self(value) + } +} + +impl From for f64 { + fn from(value: AggregateValue) -> Self { + value.0 + } +} diff --git a/packages/metrics/src/counter.rs b/packages/metrics/src/counter.rs index ac6d21836..3148ab4c3 100644 --- a/packages/metrics/src/counter.rs +++ b/packages/metrics/src/counter.rs @@ -17,6 +17,11 @@ impl Counter { self.0 } + #[must_use] + pub fn primitive(&self) -> u64 { + self.value() + } + pub fn increment(&mut self, value: u64) { self.0 += value; } @@ -26,12 +31,25 @@ impl Counter { } } +impl From for Counter { + fn from(value: u32) -> Self { + Self(u64::from(value)) + } +} + impl From for Counter { fn from(value: u64) -> Self { Self(value) } } +impl From for Counter { + fn from(value: i32) -> Self { + #[allow(clippy::cast_sign_loss)] + Self(value as u64) + } +} + impl From for u64 { fn from(counter: Counter) -> Self { counter.value() diff --git a/packages/metrics/src/gauge.rs b/packages/metrics/src/gauge.rs index 3f6089955..a2ef8135f 100644 --- a/packages/metrics/src/gauge.rs +++ b/packages/metrics/src/gauge.rs @@ -17,6 +17,11 @@ impl Gauge { self.0 } + #[must_use] + pub fn primitive(&self) -> f64 { + self.value() + } + pub fn set(&mut self, value: f64) { self.0 = value; } @@ -30,6 +35,12 @@ impl Gauge { } } +impl From for Gauge { + fn from(value: f32) -> Self { + Self(f64::from(value)) + } +} + impl From for Gauge { fn from(value: f64) -> Self { Self(value) diff --git a/packages/metrics/src/label/set.rs b/packages/metrics/src/label/set.rs index cab457f42..673f330c1 100644 --- a/packages/metrics/src/label/set.rs +++ b/packages/metrics/src/label/set.rs @@ -1,3 +1,4 @@ +use std::collections::btree_map::Iter; use std::collections::BTreeMap; use std::fmt::Display; @@ -12,6 +13,11 @@ pub struct LabelSet { } impl LabelSet { + #[must_use] + pub fn empty() -> Self { + Self { items: BTreeMap::new() } + } + /// Insert a new label pair or update the value of an existing label. pub fn upsert(&mut self, key: LabelName, value: LabelValue) { self.items.insert(key, value); @@ -20,6 +26,21 @@ impl LabelSet { pub fn is_empty(&self) -> bool { self.items.is_empty() } + + pub fn contains_pair(&self, name: &LabelName, value: &LabelValue) -> bool { + match self.items.get(name) { + Some(existing_value) => existing_value == value, + None => false, + } + } + + pub fn matches(&self, criteria: &LabelSet) -> bool { + criteria.iter().all(|(key, value)| self.contains_pair(key, value)) + } + + pub fn iter(&self) -> Iter<'_, LabelName, LabelValue> { + self.items.iter() + } } impl Display for LabelSet { diff --git a/packages/metrics/src/lib.rs b/packages/metrics/src/lib.rs index 997cd3c8c..c53e9dd02 100644 --- a/packages/metrics/src/lib.rs +++ b/packages/metrics/src/lib.rs @@ -1,3 +1,4 @@ +pub mod aggregate; pub mod counter; pub mod gauge; pub mod label; diff --git a/packages/metrics/src/metric/aggregate/mod.rs b/packages/metrics/src/metric/aggregate/mod.rs new file mode 100644 index 000000000..dce785d95 --- /dev/null +++ b/packages/metrics/src/metric/aggregate/mod.rs @@ -0,0 +1 @@ +pub mod sum; diff --git a/packages/metrics/src/metric/aggregate/sum.rs b/packages/metrics/src/metric/aggregate/sum.rs new file mode 100644 index 000000000..f08ea7d55 --- /dev/null +++ b/packages/metrics/src/metric/aggregate/sum.rs @@ -0,0 +1,283 @@ +use crate::aggregate::AggregateValue; +use crate::counter::Counter; +use crate::gauge::Gauge; +use crate::label::LabelSet; +use crate::metric::Metric; + +pub trait Sum { + fn sum(&self, label_set_criteria: &LabelSet) -> AggregateValue; +} + +impl Sum for Metric { + #[allow(clippy::cast_precision_loss)] + fn sum(&self, label_set_criteria: &LabelSet) -> AggregateValue { + let sum: f64 = self + .sample_collection + .iter() + .filter(|(label_set, _measurement)| label_set.matches(label_set_criteria)) + .map(|(_label_set, measurement)| measurement.value().primitive() as f64) + .sum(); + + sum.into() + } +} + +impl Sum for Metric { + fn sum(&self, label_set_criteria: &LabelSet) -> AggregateValue { + let sum: f64 = self + .sample_collection + .iter() + .filter(|(label_set, _measurement)| label_set.matches(label_set_criteria)) + .map(|(_label_set, measurement)| measurement.value().primitive()) + .sum(); + + sum.into() + } +} + +#[cfg(test)] +mod tests { + + use torrust_tracker_primitives::DurationSinceUnixEpoch; + + use crate::aggregate::AggregateValue; + use crate::counter::Counter; + use crate::gauge::Gauge; + use crate::label::LabelSet; + use crate::metric::aggregate::sum::Sum; + use crate::metric::{Metric, MetricName}; + use crate::metric_name; + use crate::sample::Sample; + use crate::sample_collection::SampleCollection; + + struct MetricBuilder { + sample_time: DurationSinceUnixEpoch, + name: MetricName, + samples: Vec>, + } + + impl Default for MetricBuilder { + fn default() -> Self { + Self { + sample_time: DurationSinceUnixEpoch::from_secs(1_743_552_000), + name: metric_name!("test_metric"), + samples: vec![], + } + } + } + + impl MetricBuilder { + fn with_sample(mut self, value: T, label_set: &LabelSet) -> Self { + let sample = Sample::new(value, self.sample_time, label_set.clone()); + self.samples.push(sample); + self + } + + fn build(self) -> Metric { + Metric::new( + self.name, + None, + None, + SampleCollection::new(self.samples).expect("invalid samples"), + ) + } + } + + fn counter_cases() -> Vec<(Metric, LabelSet, AggregateValue)> { + // (metric, label set criteria, expected_aggregate_value) + vec![ + // Metric with one sample without label set + ( + MetricBuilder::default().with_sample(1.into(), &LabelSet::empty()).build(), + LabelSet::empty(), + 1.0.into(), + ), + // Metric with one sample with a label set + ( + MetricBuilder::default() + .with_sample(1.into(), &[("l1", "l1_value")].into()) + .build(), + [("l1", "l1_value")].into(), + 1.0.into(), + ), + // Metric with two samples, different label sets, sum all + ( + MetricBuilder::default() + .with_sample(1.into(), &[("l1", "l1_value")].into()) + .with_sample(2.into(), &[("l2", "l2_value")].into()) + .build(), + LabelSet::empty(), + 3.0.into(), + ), + // Metric with two samples, different label sets, sum one + ( + MetricBuilder::default() + .with_sample(1.into(), &[("l1", "l1_value")].into()) + .with_sample(2.into(), &[("l2", "l2_value")].into()) + .build(), + [("l1", "l1_value")].into(), + 1.0.into(), + ), + // Metric with two samples, same label key, different label values, sum by key + ( + MetricBuilder::default() + .with_sample(1.into(), &[("l1", "l1_value"), ("la", "la_value")].into()) + .with_sample(2.into(), &[("l1", "l1_value"), ("lb", "lb_value")].into()) + .build(), + [("l1", "l1_value")].into(), + 3.0.into(), + ), + // Metric with two samples, different label values, sum by subkey + ( + MetricBuilder::default() + .with_sample(1.into(), &[("l1", "l1_value"), ("la", "la_value")].into()) + .with_sample(2.into(), &[("l1", "l1_value"), ("lb", "lb_value")].into()) + .build(), + [("la", "la_value")].into(), + 1.0.into(), + ), + // Edge: Metric with no samples at all + (MetricBuilder::default().build(), LabelSet::empty(), 0.0.into()), + // Edge: Metric with samples but no matching labels + ( + MetricBuilder::default() + .with_sample(5.into(), &[("foo", "bar")].into()) + .build(), + [("not", "present")].into(), + 0.0.into(), + ), + // Edge: Metric with zero value + ( + MetricBuilder::default() + .with_sample(0.into(), &[("l3", "l3_value")].into()) + .build(), + [("l3", "l3_value")].into(), + 0.0.into(), + ), + // Edge: Metric with a very large value + ( + MetricBuilder::default() + .with_sample(u64::MAX.into(), &LabelSet::empty()) + .build(), + LabelSet::empty(), + #[allow(clippy::cast_precision_loss)] + (u64::MAX as f64).into(), + ), + ] + } + + fn gauge_cases() -> Vec<(Metric, LabelSet, AggregateValue)> { + // (metric, label set criteria, expected_aggregate_value) + vec![ + // Metric with one sample without label set + ( + MetricBuilder::default().with_sample(1.0.into(), &LabelSet::empty()).build(), + LabelSet::empty(), + 1.0.into(), + ), + // Metric with one sample with a label set + ( + MetricBuilder::default() + .with_sample(1.0.into(), &[("l1", "l1_value")].into()) + .build(), + [("l1", "l1_value")].into(), + 1.0.into(), + ), + // Metric with two samples, different label sets, sum all + ( + MetricBuilder::default() + .with_sample(1.0.into(), &[("l1", "l1_value")].into()) + .with_sample(2.0.into(), &[("l2", "l2_value")].into()) + .build(), + LabelSet::empty(), + 3.0.into(), + ), + // Metric with two samples, different label sets, sum one + ( + MetricBuilder::default() + .with_sample(1.0.into(), &[("l1", "l1_value")].into()) + .with_sample(2.0.into(), &[("l2", "l2_value")].into()) + .build(), + [("l1", "l1_value")].into(), + 1.0.into(), + ), + // Metric with two samples, same label key, different label values, sum by key + ( + MetricBuilder::default() + .with_sample(1.0.into(), &[("l1", "l1_value"), ("la", "la_value")].into()) + .with_sample(2.0.into(), &[("l1", "l1_value"), ("lb", "lb_value")].into()) + .build(), + [("l1", "l1_value")].into(), + 3.0.into(), + ), + // Metric with two samples, different label values, sum by subkey + ( + MetricBuilder::default() + .with_sample(1.0.into(), &[("l1", "l1_value"), ("la", "la_value")].into()) + .with_sample(2.0.into(), &[("l1", "l1_value"), ("lb", "lb_value")].into()) + .build(), + [("la", "la_value")].into(), + 1.0.into(), + ), + // Edge: Metric with no samples at all + (MetricBuilder::default().build(), LabelSet::empty(), 0.0.into()), + // Edge: Metric with samples but no matching labels + ( + MetricBuilder::default() + .with_sample(5.0.into(), &[("foo", "bar")].into()) + .build(), + [("not", "present")].into(), + 0.0.into(), + ), + // Edge: Metric with zero value + ( + MetricBuilder::default() + .with_sample(0.0.into(), &[("l3", "l3_value")].into()) + .build(), + [("l3", "l3_value")].into(), + 0.0.into(), + ), + // Edge: Metric with negative values + ( + MetricBuilder::default() + .with_sample((-2.0).into(), &[("l4", "l4_value")].into()) + .with_sample(3.0.into(), &[("l5", "l5_value")].into()) + .build(), + LabelSet::empty(), + 1.0.into(), + ), + // Edge: Metric with a very large value + ( + MetricBuilder::default() + .with_sample(f64::MAX.into(), &LabelSet::empty()) + .build(), + LabelSet::empty(), + f64::MAX.into(), + ), + ] + } + + #[test] + fn test_counter_cases() { + for (idx, (metric, criteria, expected_value)) in counter_cases().iter().enumerate() { + let sum = metric.sum(criteria); + + assert_eq!( + sum, *expected_value, + "at case {idx}, expected sum to be {expected_value}, got {sum}" + ); + } + } + + #[test] + fn test_gauge_cases() { + for (idx, (metric, criteria, expected_value)) in gauge_cases().iter().enumerate() { + let sum = metric.sum(criteria); + + assert_eq!( + sum, *expected_value, + "at case {idx}, expected sum to be {expected_value}, got {sum}" + ); + } + } +} diff --git a/packages/metrics/src/metric/mod.rs b/packages/metrics/src/metric/mod.rs index df743c519..8ee24493a 100644 --- a/packages/metrics/src/metric/mod.rs +++ b/packages/metrics/src/metric/mod.rs @@ -1,3 +1,4 @@ +pub mod aggregate; pub mod description; pub mod name; diff --git a/packages/metrics/src/metric_collection/aggregate.rs b/packages/metrics/src/metric_collection/aggregate.rs new file mode 100644 index 000000000..7fd744d92 --- /dev/null +++ b/packages/metrics/src/metric_collection/aggregate.rs @@ -0,0 +1,112 @@ +use crate::aggregate::AggregateValue; +use crate::counter::Counter; +use crate::gauge::Gauge; +use crate::label::LabelSet; +use crate::metric::aggregate::sum::Sum as MetricSumTrait; +use crate::metric::MetricName; +use crate::metric_collection::{MetricCollection, MetricKindCollection}; + +pub trait Sum { + fn sum(&self, metric_name: &MetricName, label_set_criteria: &LabelSet) -> Option; +} + +impl Sum for MetricCollection { + fn sum(&self, metric_name: &MetricName, label_set_criteria: &LabelSet) -> Option { + if let Some(value) = self.counters.sum(metric_name, label_set_criteria) { + return Some(value); + } + + self.gauges.sum(metric_name, label_set_criteria) + } +} + +impl Sum for MetricKindCollection { + fn sum(&self, metric_name: &MetricName, label_set_criteria: &LabelSet) -> Option { + self.metrics.get(metric_name).map(|metric| metric.sum(label_set_criteria)) + } +} + +impl Sum for MetricKindCollection { + fn sum(&self, metric_name: &MetricName, label_set_criteria: &LabelSet) -> Option { + self.metrics.get(metric_name).map(|metric| metric.sum(label_set_criteria)) + } +} + +#[cfg(test)] +mod tests { + + mod it_should_allow_summing_all_metric_samples_containing_some_given_labels { + + use torrust_tracker_primitives::DurationSinceUnixEpoch; + + use crate::label::LabelValue; + use crate::label_name; + use crate::metric_collection::aggregate::Sum; + + #[test] + fn type_counter_with_two_samples() { + use crate::label::LabelSet; + use crate::metric_collection::MetricCollection; + use crate::metric_name; + + let metric_name = metric_name!("test_counter"); + + let mut collection = MetricCollection::default(); + + collection + .increment_counter( + &metric_name!("test_counter"), + &(label_name!("label_1"), LabelValue::new("value_1")).into(), + DurationSinceUnixEpoch::from_secs(1), + ) + .unwrap(); + + collection + .increment_counter( + &metric_name!("test_counter"), + &(label_name!("label_2"), LabelValue::new("value_2")).into(), + DurationSinceUnixEpoch::from_secs(1), + ) + .unwrap(); + + assert_eq!(collection.sum(&metric_name, &LabelSet::empty()), Some(2.0.into())); + assert_eq!( + collection.sum(&metric_name, &(label_name!("label_1"), LabelValue::new("value_1")).into()), + Some(1.0.into()) + ); + } + + #[test] + fn type_gauge_with_two_samples() { + use crate::label::LabelSet; + use crate::metric_collection::MetricCollection; + use crate::metric_name; + + let metric_name = metric_name!("test_gauge"); + + let mut collection = MetricCollection::default(); + + collection + .increment_gauge( + &metric_name!("test_gauge"), + &(label_name!("label_1"), LabelValue::new("value_1")).into(), + DurationSinceUnixEpoch::from_secs(1), + ) + .unwrap(); + + collection + .increment_gauge( + &metric_name!("test_gauge"), + &(label_name!("label_2"), LabelValue::new("value_2")).into(), + DurationSinceUnixEpoch::from_secs(1), + ) + .unwrap(); + + assert_eq!(collection.sum(&metric_name, &LabelSet::empty()), Some(2.0.into())); + assert_eq!( + collection.sum(&metric_name, &(label_name!("label_1"), LabelValue::new("value_1")).into()), + Some(1.0.into()) + ); + } + } +} diff --git a/packages/metrics/src/metric_collection.rs b/packages/metrics/src/metric_collection/mod.rs similarity index 98% rename from packages/metrics/src/metric_collection.rs rename to packages/metrics/src/metric_collection/mod.rs index ff932caae..e183236aa 100644 --- a/packages/metrics/src/metric_collection.rs +++ b/packages/metrics/src/metric_collection/mod.rs @@ -1,3 +1,5 @@ +pub mod aggregate; + use std::collections::{HashMap, HashSet}; use serde::ser::{SerializeSeq, Serializer}; @@ -103,7 +105,7 @@ impl MetricCollection { /// /// Return an error if a metrics of a different type with the same name /// already exists. - pub fn increase_counter( + pub fn increment_counter( &mut self, name: &MetricName, label_set: &LabelSet, @@ -669,7 +671,7 @@ udp_tracker_server_performance_avg_announce_processing_time_ns{server_binding_ip // First create a counter collection - .increase_counter(&metric_name!("test_metric"), &label_set, time) + .increment_counter(&metric_name!("test_metric"), &label_set, time) .unwrap(); // Then try to create a gauge with the same name @@ -690,7 +692,7 @@ udp_tracker_server_performance_avg_announce_processing_time_ns{server_binding_ip .unwrap(); // Then try to create a counter with the same name - let result = collection.increase_counter(&metric_name!("test_metric"), &label_set, time); + let result = collection.increment_counter(&metric_name!("test_metric"), &label_set, time); assert!(result.is_err()); } @@ -803,7 +805,7 @@ http_tracker_core_announce_requests_received_total{server_binding_ip="0.0.0.0",s let mut collection1 = MetricCollection::default(); collection1 - .increase_counter(&metric_name!("test_counter"), &label_set, time) + .increment_counter(&metric_name!("test_counter"), &label_set, time) .unwrap(); let mut collection2 = MetricCollection::default(); @@ -824,12 +826,12 @@ http_tracker_core_announce_requests_received_total{server_binding_ip="0.0.0.0",s let mut collection1 = MetricCollection::default(); collection1 - .increase_counter(&metric_name!("test_metric"), &label_set, time) + .increment_counter(&metric_name!("test_metric"), &label_set, time) .unwrap(); let mut collection2 = MetricCollection::default(); collection2 - .increase_counter(&metric_name!("test_metric"), &label_set, time) + .increment_counter(&metric_name!("test_metric"), &label_set, time) .unwrap(); let result = collection1.merge(&collection2); @@ -843,7 +845,7 @@ http_tracker_core_announce_requests_received_total{server_binding_ip="0.0.0.0",s let mut collection1 = MetricCollection::default(); collection1 - .increase_counter(&metric_name!("test_metric"), &label_set, time) + .increment_counter(&metric_name!("test_metric"), &label_set, time) .unwrap(); let mut collection2 = MetricCollection::default(); @@ -940,7 +942,7 @@ http_tracker_core_announce_requests_received_total{server_binding_ip="0.0.0.0",s let mut collection = collection_with_one_counter(&metric_name, &label_set, Counter::new(0)); collection - .increase_counter(&metric_name!("test_counter"), &label_set, time) + .increment_counter(&metric_name!("test_counter"), &label_set, time) .unwrap(); assert_eq!( @@ -958,10 +960,10 @@ http_tracker_core_announce_requests_received_total{server_binding_ip="0.0.0.0",s MetricCollection::new(MetricKindCollection::default(), MetricKindCollection::default()).unwrap(); metric_collection - .increase_counter(&metric_name!("test_counter"), &label_set, time) + .increment_counter(&metric_name!("test_counter"), &label_set, time) .unwrap(); metric_collection - .increase_counter(&metric_name!("test_counter"), &label_set, time) + .increment_counter(&metric_name!("test_counter"), &label_set, time) .unwrap(); assert_eq!( diff --git a/packages/swarm-coordination-registry/src/statistics/metrics.rs b/packages/swarm-coordination-registry/src/statistics/metrics.rs index f8ab3f9d9..d62a1ba6e 100644 --- a/packages/swarm-coordination-registry/src/statistics/metrics.rs +++ b/packages/swarm-coordination-registry/src/statistics/metrics.rs @@ -21,7 +21,7 @@ impl Metrics { labels: &LabelSet, now: DurationSinceUnixEpoch, ) -> Result<(), Error> { - self.metric_collection.increase_counter(metric_name, labels, now) + self.metric_collection.increment_counter(metric_name, labels, now) } /// # Errors diff --git a/packages/tracker-core/src/statistics/metrics.rs b/packages/tracker-core/src/statistics/metrics.rs index 02cc51499..a5caaf1cf 100644 --- a/packages/tracker-core/src/statistics/metrics.rs +++ b/packages/tracker-core/src/statistics/metrics.rs @@ -21,7 +21,7 @@ impl Metrics { labels: &LabelSet, now: DurationSinceUnixEpoch, ) -> Result<(), Error> { - self.metric_collection.increase_counter(metric_name, labels, now) + self.metric_collection.increment_counter(metric_name, labels, now) } /// # Errors diff --git a/packages/udp-tracker-core/src/statistics/metrics.rs b/packages/udp-tracker-core/src/statistics/metrics.rs index 94aa7d08f..e6ff8d5f6 100644 --- a/packages/udp-tracker-core/src/statistics/metrics.rs +++ b/packages/udp-tracker-core/src/statistics/metrics.rs @@ -47,7 +47,7 @@ impl Metrics { labels: &LabelSet, now: DurationSinceUnixEpoch, ) -> Result<(), Error> { - self.metric_collection.increase_counter(metric_name, labels, now) + self.metric_collection.increment_counter(metric_name, labels, now) } /// # Errors diff --git a/packages/udp-tracker-server/src/statistics/metrics.rs b/packages/udp-tracker-server/src/statistics/metrics.rs index 7b18f6418..ac6250872 100644 --- a/packages/udp-tracker-server/src/statistics/metrics.rs +++ b/packages/udp-tracker-server/src/statistics/metrics.rs @@ -78,7 +78,7 @@ impl Metrics { labels: &LabelSet, now: DurationSinceUnixEpoch, ) -> Result<(), Error> { - self.metric_collection.increase_counter(metric_name, labels, now) + self.metric_collection.increment_counter(metric_name, labels, now) } /// # Errors From 4da4f8351c1e616421bf0e8b5b83b1926fe34cd4 Mon Sep 17 00:00:00 2001 From: Jose Celano Date: Thu, 12 Jun 2025 08:49:47 +0100 Subject: [PATCH 712/802] refactor: [#1446] rename vars --- packages/metrics/src/label/set.rs | 16 ++++++++-------- 1 file changed, 8 insertions(+), 8 deletions(-) diff --git a/packages/metrics/src/label/set.rs b/packages/metrics/src/label/set.rs index 673f330c1..542f5d2e6 100644 --- a/packages/metrics/src/label/set.rs +++ b/packages/metrics/src/label/set.rs @@ -19,8 +19,8 @@ impl LabelSet { } /// Insert a new label pair or update the value of an existing label. - pub fn upsert(&mut self, key: LabelName, value: LabelValue) { - self.items.insert(key, value); + pub fn upsert(&mut self, name: LabelName, value: LabelValue) { + self.items.insert(name, value); } pub fn is_empty(&self) -> bool { @@ -35,7 +35,7 @@ impl LabelSet { } pub fn matches(&self, criteria: &LabelSet) -> bool { - criteria.iter().all(|(key, value)| self.contains_pair(key, value)) + criteria.iter().all(|(name, value)| self.contains_pair(name, value)) } pub fn iter(&self) -> Iter<'_, LabelName, LabelValue> { @@ -48,7 +48,7 @@ impl Display for LabelSet { let items = self .items .iter() - .map(|(key, value)| format!("{key}=\"{value}\"")) + .map(|(name, value)| format!("{name}=\"{value}\"")) .collect::>() .join(","); @@ -90,8 +90,8 @@ impl From> for LabelSet { fn from(vec: Vec) -> Self { let mut items = BTreeMap::new(); - for (key, value) in vec { - items.insert(key, value); + for (name, value) in vec { + items.insert(name, value); } Self { items } @@ -160,8 +160,8 @@ impl Serialize for LabelSet { { self.items .iter() - .map(|(key, value)| SerializedLabel { - name: key.clone(), + .map(|(name, value)| SerializedLabel { + name: name.clone(), value: value.clone(), }) .collect::>() From 0d134396e53c9fe75becaad8f03072a0e53d3a22 Mon Sep 17 00:00:00 2001 From: Jose Celano Date: Thu, 12 Jun 2025 09:50:24 +0100 Subject: [PATCH 713/802] test: [#1446] add more tests to metrics package --- packages/metrics/.gitignore | 2 +- packages/metrics/README.md | 22 +++ packages/metrics/cSpell.json | 19 +++ packages/metrics/src/aggregate.rs | 115 ++++++++++++++++ packages/metrics/src/counter.rs | 156 ++++++++++++++++++++++ packages/metrics/src/gauge.rs | 124 +++++++++++++++++ packages/metrics/src/label/set.rs | 112 +++++++++++++++- packages/metrics/src/metric/mod.rs | 29 ++++ packages/metrics/src/sample.rs | 18 +++ packages/metrics/src/sample_collection.rs | 50 +++++++ 10 files changed, 645 insertions(+), 2 deletions(-) create mode 100644 packages/metrics/cSpell.json diff --git a/packages/metrics/.gitignore b/packages/metrics/.gitignore index 0b1372e5c..6350e9868 100644 --- a/packages/metrics/.gitignore +++ b/packages/metrics/.gitignore @@ -1 +1 @@ -./.coverage +.coverage diff --git a/packages/metrics/README.md b/packages/metrics/README.md index 627640eec..885d6fa45 100644 --- a/packages/metrics/README.md +++ b/packages/metrics/README.md @@ -6,6 +6,28 @@ A library with the metrics types used by the [Torrust Tracker](https://github.co [Crate documentation](https://docs.rs/torrust-tracker-metrics). +## Testing + +Run coverage report: + +```console +cargo llvm-cov --package torrust-tracker-metrics +``` + +Generate LCOV report with `llvm-cov` (for Visual Studio Code extension): + +```console +mkdir -p ./.coverage +cargo llvm-cov --package torrust-tracker-metrics --lcov --output-path=./.coverage/lcov.info +``` + +Generate HTML report with `llvm-cov`: + +```console +mkdir -p ./.coverage +cargo llvm-cov --package torrust-tracker-metrics --html --output-dir ./.coverage +``` + ## Acknowledgements We copied some parts like units or function names and signatures from the crate [metrics](https://crates.io/crates/metrics) because we wanted to make it compatible as much as possible with it. In the future, we may consider using the `metrics` crate directly instead of maintaining our own version. diff --git a/packages/metrics/cSpell.json b/packages/metrics/cSpell.json new file mode 100644 index 000000000..1a2c13d2e --- /dev/null +++ b/packages/metrics/cSpell.json @@ -0,0 +1,19 @@ +{ + "words": [ + "cloneable", + "formatjson", + "Gibibytes", + "Kibibytes", + "Mebibytes", + "ñaca", + "rstest", + "subsec", + "Tebibytes", + "thiserror" + ], + "enableFiletypes": [ + "dockerfile", + "shellscript", + "toml" + ] +} diff --git a/packages/metrics/src/aggregate.rs b/packages/metrics/src/aggregate.rs index 875360cd9..e480be396 100644 --- a/packages/metrics/src/aggregate.rs +++ b/packages/metrics/src/aggregate.rs @@ -26,3 +26,118 @@ impl From for f64 { value.0 } } + +#[cfg(test)] +mod tests { + use approx::assert_relative_eq; + + use super::*; + + #[test] + fn it_should_be_created_with_new() { + let value = AggregateValue::new(42.5); + assert_relative_eq!(value.value(), 42.5); + } + + #[test] + fn it_should_return_the_inner_value() { + let value = AggregateValue::new(123.456); + assert_relative_eq!(value.value(), 123.456); + } + + #[test] + fn it_should_handle_zero_value() { + let value = AggregateValue::new(0.0); + assert_relative_eq!(value.value(), 0.0); + } + + #[test] + fn it_should_handle_negative_values() { + let value = AggregateValue::new(-42.5); + assert_relative_eq!(value.value(), -42.5); + } + + #[test] + fn it_should_handle_infinity() { + let value = AggregateValue::new(f64::INFINITY); + assert_relative_eq!(value.value(), f64::INFINITY); + } + + #[test] + fn it_should_handle_nan() { + let value = AggregateValue::new(f64::NAN); + assert!(value.value().is_nan()); + } + + #[test] + fn it_should_be_created_from_f64() { + let value: AggregateValue = 42.5.into(); + assert_relative_eq!(value.value(), 42.5); + } + + #[test] + fn it_should_convert_to_f64() { + let value = AggregateValue::new(42.5); + let f64_value: f64 = value.into(); + assert_relative_eq!(f64_value, 42.5); + } + + #[test] + fn it_should_be_displayable() { + let value = AggregateValue::new(42.5); + assert_eq!(value.to_string(), "42.5"); + } + + #[test] + fn it_should_be_debuggable() { + let value = AggregateValue::new(42.5); + let debug_string = format!("{value:?}"); + assert_eq!(debug_string, "AggregateValue(42.5)"); + } + + #[test] + fn it_should_be_cloneable() { + let value = AggregateValue::new(42.5); + let cloned_value = value; + assert_eq!(value, cloned_value); + } + + #[test] + fn it_should_be_copyable() { + let value = AggregateValue::new(42.5); + let copied_value = value; + assert_eq!(value, copied_value); + } + + #[test] + fn it_should_support_equality_comparison() { + let value1 = AggregateValue::new(42.5); + let value2 = AggregateValue::new(42.5); + let value3 = AggregateValue::new(43.0); + + assert_eq!(value1, value2); + assert_ne!(value1, value3); + } + + #[test] + fn it_should_handle_special_float_values_in_equality() { + let nan1 = AggregateValue::new(f64::NAN); + let nan2 = AggregateValue::new(f64::NAN); + let infinity = AggregateValue::new(f64::INFINITY); + let neg_infinity = AggregateValue::new(f64::NEG_INFINITY); + + // NaN is not equal to itself in IEEE 754 + assert_ne!(nan1, nan2); + assert_eq!(infinity, AggregateValue::new(f64::INFINITY)); + assert_eq!(neg_infinity, AggregateValue::new(f64::NEG_INFINITY)); + assert_ne!(infinity, neg_infinity); + } + + #[test] + fn it_should_handle_conversion_roundtrip() { + let original_value = 42.5; + let aggregate_value = AggregateValue::from(original_value); + let converted_back: f64 = aggregate_value.into(); + assert_relative_eq!(original_value, converted_back); + } +} diff --git a/packages/metrics/src/counter.rs b/packages/metrics/src/counter.rs index 3148ab4c3..0e2002181 100644 --- a/packages/metrics/src/counter.rs +++ b/packages/metrics/src/counter.rs @@ -107,4 +107,160 @@ mod tests { let counter = Counter::new(42); assert_eq!(counter.to_prometheus(), "42"); } + + #[test] + fn it_could_be_converted_from_u32() { + let counter: Counter = 42u32.into(); + assert_eq!(counter.value(), 42); + } + + #[test] + fn it_could_be_converted_from_i32() { + let counter: Counter = 42i32.into(); + assert_eq!(counter.value(), 42); + } + + #[test] + fn it_should_return_primitive_value() { + let counter = Counter::new(123); + assert_eq!(counter.primitive(), 123); + } + + #[test] + fn it_should_handle_zero_value() { + let counter = Counter::new(0); + assert_eq!(counter.value(), 0); + assert_eq!(counter.primitive(), 0); + } + + #[test] + fn it_should_handle_large_values() { + let counter = Counter::new(u64::MAX); + assert_eq!(counter.value(), u64::MAX); + } + + #[test] + fn it_should_handle_u32_max_conversion() { + let counter: Counter = u32::MAX.into(); + assert_eq!(counter.value(), u64::from(u32::MAX)); + } + + #[test] + fn it_should_handle_i32_max_conversion() { + let counter: Counter = i32::MAX.into(); + assert_eq!(counter.value(), i32::MAX as u64); + } + + #[test] + fn it_should_handle_negative_i32_conversion() { + let counter: Counter = (-42i32).into(); + #[allow(clippy::cast_sign_loss)] + let expected = (-42i32) as u64; + assert_eq!(counter.value(), expected); + } + + #[test] + fn it_should_handle_i32_min_conversion() { + let counter: Counter = i32::MIN.into(); + #[allow(clippy::cast_sign_loss)] + let expected = i32::MIN as u64; + assert_eq!(counter.value(), expected); + } + + #[test] + fn it_should_handle_large_increments() { + let mut counter = Counter::new(100); + counter.increment(1000); + assert_eq!(counter.value(), 1100); + + counter.increment(u64::MAX - 1100); + assert_eq!(counter.value(), u64::MAX); + } + + #[test] + fn it_should_support_multiple_absolute_operations() { + let mut counter = Counter::new(0); + + counter.absolute(100); + assert_eq!(counter.value(), 100); + + counter.absolute(50); + assert_eq!(counter.value(), 50); + + counter.absolute(0); + assert_eq!(counter.value(), 0); + } + + #[test] + fn it_should_be_displayable() { + let counter = Counter::new(42); + assert_eq!(counter.to_string(), "42"); + + let counter = Counter::new(0); + assert_eq!(counter.to_string(), "0"); + } + + #[test] + fn it_should_be_debuggable() { + let counter = Counter::new(42); + let debug_string = format!("{counter:?}"); + assert_eq!(debug_string, "Counter(42)"); + } + + #[test] + fn it_should_be_cloneable() { + let counter = Counter::new(42); + let cloned_counter = counter.clone(); + assert_eq!(counter, cloned_counter); + assert_eq!(counter.value(), cloned_counter.value()); + } + + #[test] + fn it_should_support_equality_comparison() { + let counter1 = Counter::new(42); + let counter2 = Counter::new(42); + let counter3 = Counter::new(43); + + assert_eq!(counter1, counter2); + assert_ne!(counter1, counter3); + } + + #[test] + fn it_should_have_default_value() { + let counter = Counter::default(); + assert_eq!(counter.value(), 0); + } + + #[test] + fn it_should_handle_conversion_roundtrip() { + let original_value = 12345u64; + let counter = Counter::from(original_value); + let converted_back: u64 = counter.into(); + assert_eq!(original_value, converted_back); + } + + #[test] + fn it_should_handle_u32_conversion_roundtrip() { + let original_value = 12345u32; + let counter = Counter::from(original_value); + assert_eq!(counter.value(), u64::from(original_value)); + } + + #[test] + fn it_should_handle_i32_conversion_roundtrip() { + let original_value = 12345i32; + let counter = Counter::from(original_value); + #[allow(clippy::cast_sign_loss)] + let expected = original_value as u64; + assert_eq!(counter.value(), expected); + } + + #[test] + fn it_should_serialize_large_values_to_prometheus() { + let counter = Counter::new(u64::MAX); + assert_eq!(counter.to_prometheus(), u64::MAX.to_string()); + + let counter = Counter::new(0); + assert_eq!(counter.to_prometheus(), "0"); + } } diff --git a/packages/metrics/src/gauge.rs b/packages/metrics/src/gauge.rs index a2ef8135f..d0883715b 100644 --- a/packages/metrics/src/gauge.rs +++ b/packages/metrics/src/gauge.rs @@ -113,4 +113,128 @@ mod tests { let counter = Gauge::new(42.1); assert_eq!(counter.to_prometheus(), "42.1"); } + + #[test] + fn it_could_be_converted_from_f32() { + let gauge: Gauge = 42.5f32.into(); + assert_relative_eq!(gauge.value(), 42.5); + } + + #[test] + fn it_should_return_primitive_value() { + let gauge = Gauge::new(123.456); + assert_relative_eq!(gauge.primitive(), 123.456); + } + + #[test] + fn it_should_handle_zero_value() { + let gauge = Gauge::new(0.0); + assert_relative_eq!(gauge.value(), 0.0); + assert_relative_eq!(gauge.primitive(), 0.0); + } + + #[test] + fn it_should_handle_negative_values() { + let gauge = Gauge::new(-42.5); + assert_relative_eq!(gauge.value(), -42.5); + } + + #[test] + fn it_should_handle_large_values() { + let gauge = Gauge::new(f64::MAX); + assert_relative_eq!(gauge.value(), f64::MAX); + } + + #[test] + fn it_should_handle_infinity() { + let gauge = Gauge::new(f64::INFINITY); + assert_relative_eq!(gauge.value(), f64::INFINITY); + } + + #[test] + fn it_should_handle_nan() { + let gauge = Gauge::new(f64::NAN); + assert!(gauge.value().is_nan()); + } + + #[test] + fn it_should_be_displayable() { + let gauge = Gauge::new(42.5); + assert_eq!(gauge.to_string(), "42.5"); + + let gauge = Gauge::new(0.0); + assert_eq!(gauge.to_string(), "0"); + } + + #[test] + fn it_should_be_debuggable() { + let gauge = Gauge::new(42.5); + let debug_string = format!("{gauge:?}"); + assert_eq!(debug_string, "Gauge(42.5)"); + } + + #[test] + fn it_should_be_cloneable() { + let gauge = Gauge::new(42.5); + let cloned_gauge = gauge.clone(); + assert_eq!(gauge, cloned_gauge); + assert_relative_eq!(gauge.value(), cloned_gauge.value()); + } + + #[test] + fn it_should_support_equality_comparison() { + let gauge1 = Gauge::new(42.5); + let gauge2 = Gauge::new(42.5); + let gauge3 = Gauge::new(43.0); + + assert_eq!(gauge1, gauge2); + assert_ne!(gauge1, gauge3); + } + + #[test] + fn it_should_have_default_value() { + let gauge = Gauge::default(); + assert_relative_eq!(gauge.value(), 0.0); + } + + #[test] + fn it_should_handle_conversion_roundtrip() { + let original_value = 12345.678; + let gauge = Gauge::from(original_value); + let converted_back: f64 = gauge.into(); + assert_relative_eq!(original_value, converted_back); + } + + #[test] + fn it_should_handle_f32_conversion_roundtrip() { + let original_value = 12345.5f32; + let gauge = Gauge::from(original_value); + assert_relative_eq!(gauge.value(), f64::from(original_value)); + } + + #[test] + fn it_should_handle_multiple_operations() { + let mut gauge = Gauge::new(100.0); + + gauge.increment(50.0); + assert_relative_eq!(gauge.value(), 150.0); + + gauge.decrement(25.0); + assert_relative_eq!(gauge.value(), 125.0); + + gauge.set(200.0); + assert_relative_eq!(gauge.value(), 200.0); + } + + #[test] + fn it_should_serialize_special_values_to_prometheus() { + let gauge = Gauge::new(f64::INFINITY); + assert_eq!(gauge.to_prometheus(), "inf"); + + let gauge = Gauge::new(f64::NEG_INFINITY); + assert_eq!(gauge.to_prometheus(), "-inf"); + + let gauge = Gauge::new(f64::NAN); + assert_eq!(gauge.to_prometheus(), "NaN"); + } } diff --git a/packages/metrics/src/label/set.rs b/packages/metrics/src/label/set.rs index 542f5d2e6..46256e4d5 100644 --- a/packages/metrics/src/label/set.rs +++ b/packages/metrics/src/label/set.rs @@ -297,10 +297,18 @@ mod tests { #[test] fn it_should_allow_serializing_to_prometheus_format() { let label_set = LabelSet::from((label_name!("label_name"), LabelValue::new("label value"))); - assert_eq!(label_set.to_prometheus(), r#"{label_name="label value"}"#); } + #[test] + fn it_should_handle_prometheus_format_with_special_characters() { + let label_set: LabelSet = vec![("label_with_underscores", "value_with_underscores")].into(); + assert_eq!( + label_set.to_prometheus(), + r#"{label_with_underscores="value_with_underscores"}"# + ); + } + #[test] fn it_should_alphabetically_order_labels_in_prometheus_format() { let label_set = LabelSet::from([ @@ -471,4 +479,106 @@ mod tests { let a: LabelSet = (label_name!("x"), LabelValue::new("1")).into(); let _unused = a.clone(); } + + #[test] + fn it_should_check_if_empty() { + let empty_set = LabelSet::empty(); + assert!(empty_set.is_empty()); + } + + #[test] + fn it_should_check_if_non_empty() { + let non_empty_set: LabelSet = (label_name!("label"), LabelValue::new("value")).into(); + assert!(!non_empty_set.is_empty()); + } + + #[test] + fn it_should_create_an_empty_label_set() { + let empty_set = LabelSet::empty(); + assert!(empty_set.is_empty()); + } + + #[test] + fn it_should_check_if_contains_specific_label_pair() { + let label_set: LabelSet = vec![("service", "tracker"), ("protocol", "http")].into(); + + // Test existing pair + assert!(label_set.contains_pair(&LabelName::new("service"), &LabelValue::new("tracker"))); + assert!(label_set.contains_pair(&LabelName::new("protocol"), &LabelValue::new("http"))); + + // Test non-existing name + assert!(!label_set.contains_pair(&LabelName::new("missing"), &LabelValue::new("value"))); + + // Test existing name with wrong value + assert!(!label_set.contains_pair(&LabelName::new("service"), &LabelValue::new("wrong"))); + } + + #[test] + fn it_should_match_against_criteria() { + let label_set: LabelSet = vec![("service", "tracker"), ("protocol", "http"), ("version", "v1")].into(); + + // Empty criteria should match any label set + assert!(label_set.matches(&LabelSet::empty())); + + // Single matching criterion + let single_criteria: LabelSet = vec![("service", "tracker")].into(); + assert!(label_set.matches(&single_criteria)); + + // Multiple matching criteria + let multiple_criteria: LabelSet = vec![("service", "tracker"), ("protocol", "http")].into(); + assert!(label_set.matches(&multiple_criteria)); + + // Non-matching criterion + let non_matching: LabelSet = vec![("service", "wrong")].into(); + assert!(!label_set.matches(&non_matching)); + + // Partially matching criteria (one matches, one doesn't) + let partial_matching: LabelSet = vec![("service", "tracker"), ("missing", "value")].into(); + assert!(!label_set.matches(&partial_matching)); + + // Criteria with label not in original set + let missing_label: LabelSet = vec![("missing_label", "value")].into(); + assert!(!label_set.matches(&missing_label)); + } + + #[test] + fn it_should_allow_iteration_over_label_pairs() { + let label_set: LabelSet = vec![("service", "tracker"), ("protocol", "http")].into(); + + let mut count = 0; + + for (name, value) in label_set.iter() { + count += 1; + // Verify we can access name and value + assert!(!name.to_string().is_empty()); + assert!(!value.to_string().is_empty()); + } + + assert_eq!(count, 2); + } + + #[test] + fn it_should_display_empty_label_set() { + let empty_set = LabelSet::empty(); + assert_eq!(empty_set.to_string(), "{}"); + } + + #[test] + fn it_should_serialize_empty_label_set_to_prometheus_format() { + let empty_set = LabelSet::empty(); + assert_eq!(empty_set.to_prometheus(), ""); + } + + #[test] + fn it_should_maintain_order_in_iteration() { + let label_set: LabelSet = vec![("z_label", "z_value"), ("a_label", "a_value"), ("m_label", "m_value")].into(); + + let mut labels: Vec = vec![]; + for (name, _) in label_set.iter() { + labels.push(name.to_string()); + } + + // Should be in alphabetical order + assert_eq!(labels, vec!["a_label", "m_label", "z_label"]); + } } diff --git a/packages/metrics/src/metric/mod.rs b/packages/metrics/src/metric/mod.rs index 8ee24493a..d1aa01b94 100644 --- a/packages/metrics/src/metric/mod.rs +++ b/packages/metrics/src/metric/mod.rs @@ -322,4 +322,33 @@ mod tests { assert_relative_eq!(metric.get_sample_data(&label_set).unwrap().value().value(), 1.0); } } + + mod for_prometheus_serialization { + use super::super::*; + use crate::counter::Counter; + use crate::metric_name; + + #[test] + fn it_should_return_empty_string_for_prometheus_help_line_when_description_is_none() { + let name = metric_name!("test_metric"); + let samples = SampleCollection::::default(); + let metric = Metric::::new(name, None, None, samples); + + let help_line = metric.prometheus_help_line(); + + assert_eq!(help_line, String::new()); + } + + #[test] + fn it_should_return_formatted_help_line_for_prometheus_when_description_is_some() { + let name = metric_name!("test_metric"); + let description = MetricDescription::new("This is a test metric description"); + let samples = SampleCollection::::default(); + let metric = Metric::::new(name, None, Some(description), samples); + + let help_line = metric.prometheus_help_line(); + + assert_eq!(help_line, "# HELP test_metric This is a test metric description"); + } + } } diff --git a/packages/metrics/src/sample.rs b/packages/metrics/src/sample.rs index b9cd6c312..63f46b9b8 100644 --- a/packages/metrics/src/sample.rs +++ b/packages/metrics/src/sample.rs @@ -279,6 +279,15 @@ mod tests { assert_eq!(sample.to_prometheus(), r#"{label_name="label_value",method="GET"} 42"#); } + + #[test] + fn it_should_allow_exporting_to_prometheus_format_with_empty_label_set() { + let counter = Counter::new(42); + + let sample = Sample::new(counter, DurationSinceUnixEpoch::default(), LabelSet::default()); + + assert_eq!(sample.to_prometheus(), " 42"); + } } mod for_gauge_type_sample { use torrust_tracker_primitives::DurationSinceUnixEpoch; @@ -347,6 +356,15 @@ mod tests { assert_eq!(sample.to_prometheus(), r#"{label_name="label_value",method="GET"} 42"#); } + + #[test] + fn it_should_allow_exporting_to_prometheus_format_with_empty_label_set() { + let gauge = Gauge::new(42.0); + + let sample = Sample::new(gauge, DurationSinceUnixEpoch::default(), LabelSet::default()); + + assert_eq!(sample.to_prometheus(), " 42"); + } } mod serialization_to_json { diff --git a/packages/metrics/src/sample_collection.rs b/packages/metrics/src/sample_collection.rs index ef88b27dd..e520d7310 100644 --- a/packages/metrics/src/sample_collection.rs +++ b/packages/metrics/src/sample_collection.rs @@ -386,6 +386,56 @@ mod tests { assert_eq!(collection.get(&label2).unwrap().value(), &Counter::new(1)); assert_eq!(collection.len(), 2); } + + #[test] + fn it_should_allow_setting_absolute_value_for_a_counter() { + let label_set = LabelSet::default(); + let mut collection = SampleCollection::::default(); + + // Set absolute value for a non-existent label + collection.absolute(&label_set, 42, sample_update_time()); + + // Verify the label exists and has the absolute value + assert!(collection.get(&label_set).is_some()); + let sample = collection.get(&label_set).unwrap(); + assert_eq!(*sample.value(), Counter::new(42)); + } + + #[test] + fn it_should_allow_setting_absolute_value_for_existing_counter() { + let label_set = LabelSet::default(); + let mut collection = SampleCollection::::default(); + + // Initialize the sample with increment + collection.increment(&label_set, sample_update_time()); + + // Verify initial state + let sample = collection.get(&label_set).unwrap(); + assert_eq!(sample.value(), &Counter::new(1)); + + // Set absolute value + collection.absolute(&label_set, 100, sample_update_time()); + let sample = collection.get(&label_set).unwrap(); + assert_eq!(*sample.value(), Counter::new(100)); + } + + #[test] + fn it_should_update_time_when_setting_absolute_value() { + let label_set = LabelSet::default(); + let initial_time = sample_update_time(); + let mut collection = SampleCollection::::default(); + + // Set absolute value with initial time + collection.absolute(&label_set, 50, initial_time); + + // Set absolute value with a new time + let new_time = initial_time.add(DurationSinceUnixEpoch::from_secs(1)); + collection.absolute(&label_set, 75, new_time); + + let sample = collection.get(&label_set).unwrap(); + assert_eq!(sample.recorded_at(), new_time); + assert_eq!(*sample.value(), Counter::new(75)); + } } #[cfg(test)] From 476ece46e7b3b67d01e8fc2031aa0e9faf3578af Mon Sep 17 00:00:00 2001 From: Jose Celano Date: Fri, 13 Jun 2025 10:52:53 +0100 Subject: [PATCH 714/802] refactor: [#1446] WIP. Calculate global metrics from labeled metrics We need to add a new label to make it easier to fileter by the server IP family: IPV4 or IPv6. --- Cargo.lock | 1 + .../src/statistics/event/handler.rs | 14 ++- .../http-tracker-core/src/statistics/mod.rs | 2 +- packages/metrics/src/aggregate.rs | 2 +- packages/rest-tracker-api-core/Cargo.toml | 1 + .../src/statistics/services.rs | 117 +++++++++++++++++- .../event/handler/request_accepted.rs | 4 +- .../udp-tracker-server/src/statistics/mod.rs | 2 +- 8 files changed, 135 insertions(+), 8 deletions(-) diff --git a/Cargo.lock b/Cargo.lock index 269f7a3a2..6f8215bbf 100644 --- a/Cargo.lock +++ b/Cargo.lock @@ -4668,6 +4668,7 @@ dependencies = [ "torrust-tracker-swarm-coordination-registry", "torrust-tracker-test-helpers", "torrust-udp-tracker-server", + "tracing", ] [[package]] diff --git a/packages/http-tracker-core/src/statistics/event/handler.rs b/packages/http-tracker-core/src/statistics/event/handler.rs index f5506f6e3..dcb814eef 100644 --- a/packages/http-tracker-core/src/statistics/event/handler.rs +++ b/packages/http-tracker-core/src/statistics/event/handler.rs @@ -32,7 +32,12 @@ pub async fn handle_event(event: Event, stats_repository: &Arc, now: .increase_counter(&metric_name!(HTTP_TRACKER_CORE_REQUESTS_RECEIVED_TOTAL), &label_set, now) .await { - Ok(()) => {} + Ok(()) => { + tracing::debug!( + "Successfully increased the counter for HTTP announce requests received: {}", + label_set + ); + } Err(err) => tracing::error!("Failed to increase the counter: {}", err), }; } @@ -57,7 +62,12 @@ pub async fn handle_event(event: Event, stats_repository: &Arc, now: .increase_counter(&metric_name!(HTTP_TRACKER_CORE_REQUESTS_RECEIVED_TOTAL), &label_set, now) .await { - Ok(()) => {} + Ok(()) => { + tracing::debug!( + "Successfully increased the counter for HTTP scrape requests received: {}", + label_set + ); + } Err(err) => tracing::error!("Failed to increase the counter: {}", err), }; } diff --git a/packages/http-tracker-core/src/statistics/mod.rs b/packages/http-tracker-core/src/statistics/mod.rs index 7181632aa..b8ca865fa 100644 --- a/packages/http-tracker-core/src/statistics/mod.rs +++ b/packages/http-tracker-core/src/statistics/mod.rs @@ -8,7 +8,7 @@ use torrust_tracker_metrics::metric::description::MetricDescription; use torrust_tracker_metrics::metric_name; use torrust_tracker_metrics::unit::Unit; -const HTTP_TRACKER_CORE_REQUESTS_RECEIVED_TOTAL: &str = "http_tracker_core_requests_received_total"; +pub const HTTP_TRACKER_CORE_REQUESTS_RECEIVED_TOTAL: &str = "http_tracker_core_requests_received_total"; #[must_use] pub fn describe_metrics() -> Metrics { diff --git a/packages/metrics/src/aggregate.rs b/packages/metrics/src/aggregate.rs index e480be396..39b760fca 100644 --- a/packages/metrics/src/aggregate.rs +++ b/packages/metrics/src/aggregate.rs @@ -1,6 +1,6 @@ use derive_more::Display; -#[derive(Debug, Display, Clone, Copy, PartialEq)] +#[derive(Debug, Display, Clone, Copy, PartialEq, Default)] pub struct AggregateValue(f64); impl AggregateValue { diff --git a/packages/rest-tracker-api-core/Cargo.toml b/packages/rest-tracker-api-core/Cargo.toml index cc8eda903..d9e396960 100644 --- a/packages/rest-tracker-api-core/Cargo.toml +++ b/packages/rest-tracker-api-core/Cargo.toml @@ -23,6 +23,7 @@ torrust-tracker-metrics = { version = "3.0.0-develop", path = "../metrics" } torrust-tracker-primitives = { version = "3.0.0-develop", path = "../primitives" } torrust-tracker-swarm-coordination-registry = { version = "3.0.0-develop", path = "../swarm-coordination-registry" } torrust-udp-tracker-server = { version = "3.0.0-develop", path = "../udp-tracker-server" } +tracing = "0" [dev-dependencies] torrust-tracker-events = { version = "3.0.0-develop", path = "../events" } diff --git a/packages/rest-tracker-api-core/src/statistics/services.rs b/packages/rest-tracker-api-core/src/statistics/services.rs index 6474df0d7..3cfd6653e 100644 --- a/packages/rest-tracker-api-core/src/statistics/services.rs +++ b/packages/rest-tracker-api-core/src/statistics/services.rs @@ -1,11 +1,14 @@ use std::sync::Arc; +use bittorrent_http_tracker_core::statistics::HTTP_TRACKER_CORE_REQUESTS_RECEIVED_TOTAL; use bittorrent_tracker_core::torrent::repository::in_memory::InMemoryTorrentRepository; use bittorrent_udp_tracker_core::services::banning::BanService; use bittorrent_udp_tracker_core::{self}; use tokio::sync::RwLock; +use torrust_tracker_metrics::metric_collection::aggregate::Sum; use torrust_tracker_metrics::metric_collection::MetricCollection; -use torrust_udp_tracker_server::statistics as udp_server_statistics; +use torrust_tracker_metrics::metric_name; +use torrust_udp_tracker_server::statistics::{self as udp_server_statistics, UDP_TRACKER_SERVER_REQUESTS_ACCEPTED_TOTAL}; use super::metrics::TorrentsMetrics; use crate::statistics::metrics::ProtocolMetrics; @@ -32,9 +35,38 @@ pub async fn get_metrics( http_stats_repository: Arc, udp_server_stats_repository: Arc, ) -> TrackerMetrics { + let protocol_metrics_from_global_metrics = get_protocol_metrics( + ban_service.clone(), + http_stats_repository.clone(), + udp_server_stats_repository.clone(), + ) + .await; + + let protocol_metrics_from_labeled_metrics = get_protocol_metrics_from_labeled_metrics( + ban_service.clone(), + http_stats_repository.clone(), + udp_server_stats_repository.clone(), + ) + .await; + + // todo: + // We keep both metrics until we deploy to production and we can + // ensure that the protocol metrics from labeled metrics are correct. + // After that we can remove the `get_protocol_metrics` function and + // use only the `get_protocol_metrics_from_labeled_metrics` function. + // And also remove the code in repositories to generate the global metrics. + let protocol_metrics = if protocol_metrics_from_global_metrics == protocol_metrics_from_labeled_metrics { + protocol_metrics_from_labeled_metrics + } else { + // tracing::warn!("The protocol metrics from global metrics and labeled metrics are different"); + // tracing::warn!("Global metrics: {:?}", protocol_metrics_from_global_metrics); + // tracing::warn!("Labeled metrics: {:?}", protocol_metrics_from_labeled_metrics); + protocol_metrics_from_global_metrics + }; + TrackerMetrics { torrents_metrics: get_torrents_metrics(in_memory_torrent_repository, tracker_core_stats_repository).await, - protocol_metrics: get_protocol_metrics(ban_service, http_stats_repository, udp_server_stats_repository).await, + protocol_metrics, } } @@ -99,6 +131,87 @@ async fn get_protocol_metrics( } } +#[allow(deprecated)] +async fn get_protocol_metrics_from_labeled_metrics( + ban_service: Arc>, + http_stats_repository: Arc, + udp_server_stats_repository: Arc, +) -> ProtocolMetrics { + let udp_banned_ips_total = ban_service.read().await.get_banned_ips_total(); + let http_stats = http_stats_repository.get_stats().await; + let udp_server_stats = udp_server_stats_repository.get_stats().await; + + #[allow(clippy::cast_sign_loss)] + #[allow(clippy::cast_possible_truncation)] + let tcp4_announces_handled = http_stats + .metric_collection + .sum( + &metric_name!(HTTP_TRACKER_CORE_REQUESTS_RECEIVED_TOTAL), + &[("request_kind", "announce")].into(), // todo: add label for `server_binding_ip_family` with value `inet` (inet/inet6) + ) + .unwrap_or_default() + .value() as u64; + + #[allow(clippy::cast_sign_loss)] + #[allow(clippy::cast_possible_truncation)] + let udp4_announces_handled = udp_server_stats + .metric_collection + .sum( + &metric_name!(UDP_TRACKER_SERVER_REQUESTS_ACCEPTED_TOTAL), + &[("request_kind", "announce")].into(), // todo: add label for `server_binding_ip_family` with value `inet` (inet/inet6) + ) + .unwrap_or_default() + .value() as u64; + + /* + + todo: + + - Add a label for `server_binding_ip_family` with value `inet` (inet/inet6) + to all metrics containing an IP address. This will allow us to distinguish + between IPv4 and IPv6 metrics. + - Continue replacing the other metrics with the labeled metrics. + + */ + + // For backward compatibility we keep the `tcp4_connections_handled` and + // `tcp6_connections_handled` metrics. They don't make sense for the HTTP + // tracker, but we keep them for now. In new major versions we should remove + // them. + + ProtocolMetrics { + // TCPv4 + tcp4_connections_handled: tcp4_announces_handled + http_stats.tcp4_scrapes_handled, + tcp4_announces_handled, + tcp4_scrapes_handled: http_stats.tcp4_scrapes_handled, + // TCPv6 + tcp6_connections_handled: http_stats.tcp6_announces_handled + http_stats.tcp6_scrapes_handled, + tcp6_announces_handled: http_stats.tcp6_announces_handled, + tcp6_scrapes_handled: http_stats.tcp6_scrapes_handled, + // UDP + udp_requests_aborted: udp_server_stats.udp_requests_aborted, + udp_requests_banned: udp_server_stats.udp_requests_banned, + udp_banned_ips_total: udp_banned_ips_total as u64, + udp_avg_connect_processing_time_ns: udp_server_stats.udp_avg_connect_processing_time_ns, + udp_avg_announce_processing_time_ns: udp_server_stats.udp_avg_announce_processing_time_ns, + udp_avg_scrape_processing_time_ns: udp_server_stats.udp_avg_scrape_processing_time_ns, + // UDPv4 + udp4_requests: udp_server_stats.udp4_requests, + udp4_connections_handled: udp_server_stats.udp4_connections_handled, + udp4_announces_handled, + udp4_scrapes_handled: udp_server_stats.udp4_scrapes_handled, + udp4_responses: udp_server_stats.udp4_responses, + udp4_errors_handled: udp_server_stats.udp4_errors_handled, + // UDPv6 + udp6_requests: udp_server_stats.udp6_requests, + udp6_connections_handled: udp_server_stats.udp6_connections_handled, + udp6_announces_handled: udp_server_stats.udp6_announces_handled, + udp6_scrapes_handled: udp_server_stats.udp6_scrapes_handled, + udp6_responses: udp_server_stats.udp6_responses, + udp6_errors_handled: udp_server_stats.udp6_errors_handled, + } +} + #[derive(Debug, PartialEq)] pub struct TrackerLabeledMetrics { pub metrics: MetricCollection, diff --git a/packages/udp-tracker-server/src/statistics/event/handler/request_accepted.rs b/packages/udp-tracker-server/src/statistics/event/handler/request_accepted.rs index b296f8ec9..37b668227 100644 --- a/packages/udp-tracker-server/src/statistics/event/handler/request_accepted.rs +++ b/packages/udp-tracker-server/src/statistics/event/handler/request_accepted.rs @@ -47,7 +47,9 @@ pub async fn handle_event( .increase_counter(&metric_name!(UDP_TRACKER_SERVER_REQUESTS_ACCEPTED_TOTAL), &label_set, now) .await { - Ok(()) => {} + Ok(()) => { + tracing::debug!("Successfully increased the counter for UDP requests accepted: {}", label_set); + } Err(err) => tracing::error!("Failed to increase the counter: {}", err), }; } diff --git a/packages/udp-tracker-server/src/statistics/mod.rs b/packages/udp-tracker-server/src/statistics/mod.rs index ebb3df0bf..3a25fd51d 100644 --- a/packages/udp-tracker-server/src/statistics/mod.rs +++ b/packages/udp-tracker-server/src/statistics/mod.rs @@ -13,7 +13,7 @@ const UDP_TRACKER_SERVER_REQUESTS_BANNED_TOTAL: &str = "udp_tracker_server_reque pub(crate) const UDP_TRACKER_SERVER_IPS_BANNED_TOTAL: &str = "udp_tracker_server_ips_banned_total"; const UDP_TRACKER_SERVER_CONNECTION_ID_ERRORS_TOTAL: &str = "udp_tracker_server_connection_id_errors_total"; const UDP_TRACKER_SERVER_REQUESTS_RECEIVED_TOTAL: &str = "udp_tracker_server_requests_received_total"; -const UDP_TRACKER_SERVER_REQUESTS_ACCEPTED_TOTAL: &str = "udp_tracker_server_requests_accepted_total"; +pub const UDP_TRACKER_SERVER_REQUESTS_ACCEPTED_TOTAL: &str = "udp_tracker_server_requests_accepted_total"; const UDP_TRACKER_SERVER_RESPONSES_SENT_TOTAL: &str = "udp_tracker_server_responses_sent_total"; const UDP_TRACKER_SERVER_ERRORS_TOTAL: &str = "udp_tracker_server_errors_total"; const UDP_TRACKER_SERVER_PERFORMANCE_AVG_PROCESSING_TIME_NS: &str = "udp_tracker_server_performance_avg_processing_time_ns"; From 1376a7cb20166140c081c2bbf26443043bd1eb77 Mon Sep 17 00:00:00 2001 From: Jose Celano Date: Fri, 13 Jun 2025 11:02:35 +0100 Subject: [PATCH 715/802] refactor: [#1446] rename AddressType to IpType Address might be a socket address. --- packages/http-tracker-core/src/event.rs | 4 ++-- packages/primitives/src/service_binding.rs | 18 +++++++++--------- packages/udp-tracker-core/src/event.rs | 4 ++-- packages/udp-tracker-server/src/event.rs | 4 ++-- 4 files changed, 15 insertions(+), 15 deletions(-) diff --git a/packages/http-tracker-core/src/event.rs b/packages/http-tracker-core/src/event.rs index cf969b4ff..e3d37569d 100644 --- a/packages/http-tracker-core/src/event.rs +++ b/packages/http-tracker-core/src/event.rs @@ -87,8 +87,8 @@ impl From for LabelSet { LabelValue::new(&connection_context.server.service_binding.bind_address().ip().to_string()), ), ( - label_name!("server_binding_address_type"), - LabelValue::new(&connection_context.server.service_binding.bind_address_type().to_string()), + label_name!("server_binding_address_ip_type"), + LabelValue::new(&connection_context.server.service_binding.bind_address_ip_type().to_string()), ), ( label_name!("server_binding_port"), diff --git a/packages/primitives/src/service_binding.rs b/packages/primitives/src/service_binding.rs index d5055130e..72d5e7f2e 100644 --- a/packages/primitives/src/service_binding.rs +++ b/packages/primitives/src/service_binding.rs @@ -26,7 +26,7 @@ impl fmt::Display for Protocol { } #[derive(Debug, Clone, PartialEq, Eq, Serialize, Deserialize, Hash)] -pub enum AddressType { +pub enum IpType { /// Represents a plain IPv4 or IPv6 address. Plain, @@ -38,7 +38,7 @@ pub enum AddressType { V4MappedV6, } -impl fmt::Display for AddressType { +impl fmt::Display for IpType { fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result { let addr_type_str = match self { Self::Plain => "plain", @@ -120,12 +120,12 @@ impl ServiceBinding { } #[must_use] - pub fn bind_address_type(&self) -> AddressType { + pub fn bind_address_ip_type(&self) -> IpType { if self.is_v4_mapped_v6() { - return AddressType::V4MappedV6; + return IpType::V4MappedV6; } - AddressType::Plain + IpType::Plain } /// # Panics @@ -169,7 +169,7 @@ mod tests { use rstest::rstest; use url::Url; - use crate::service_binding::{AddressType, Error, Protocol, ServiceBinding}; + use crate::service_binding::{Error, IpType, Protocol, ServiceBinding}; #[rstest] #[case("wildcard_ip", Protocol::UDP, SocketAddr::from_str("0.0.0.0:6969").unwrap())] @@ -203,7 +203,7 @@ mod tests { fn should_return_the_bind_address_plain_type_for_ipv4_ips() { let service_binding = ServiceBinding::new(Protocol::UDP, SocketAddr::from_str("127.0.0.1:6969").unwrap()).unwrap(); - assert_eq!(service_binding.bind_address_type(), AddressType::Plain); + assert_eq!(service_binding.bind_address_ip_type(), IpType::Plain); } #[test] @@ -211,7 +211,7 @@ mod tests { let service_binding = ServiceBinding::new(Protocol::UDP, SocketAddr::from_str("[0:0:0:0:0:0:0:1]:6969").unwrap()).unwrap(); - assert_eq!(service_binding.bind_address_type(), AddressType::Plain); + assert_eq!(service_binding.bind_address_ip_type(), IpType::Plain); } #[test] @@ -219,7 +219,7 @@ mod tests { let service_binding = ServiceBinding::new(Protocol::UDP, SocketAddr::from_str("[::ffff:192.0.2.33]:6969").unwrap()).unwrap(); - assert_eq!(service_binding.bind_address_type(), AddressType::V4MappedV6); + assert_eq!(service_binding.bind_address_ip_type(), IpType::V4MappedV6); } #[test] diff --git a/packages/udp-tracker-core/src/event.rs b/packages/udp-tracker-core/src/event.rs index e9264653e..d354d3e7e 100644 --- a/packages/udp-tracker-core/src/event.rs +++ b/packages/udp-tracker-core/src/event.rs @@ -60,8 +60,8 @@ impl From for LabelSet { LabelValue::new(&connection_context.server_service_binding.bind_address().ip().to_string()), ), ( - label_name!("server_binding_address_type"), - LabelValue::new(&connection_context.server_service_binding.bind_address_type().to_string()), + label_name!("server_binding_address_ip_type"), + LabelValue::new(&connection_context.server_service_binding.bind_address_ip_type().to_string()), ), ( label_name!("server_binding_port"), diff --git a/packages/udp-tracker-server/src/event.rs b/packages/udp-tracker-server/src/event.rs index 09fc139cb..c3e736a53 100644 --- a/packages/udp-tracker-server/src/event.rs +++ b/packages/udp-tracker-server/src/event.rs @@ -119,8 +119,8 @@ impl From for LabelSet { LabelValue::new(&connection_context.server_service_binding.bind_address().ip().to_string()), ), ( - label_name!("server_binding_address_type"), - LabelValue::new(&connection_context.server_service_binding.bind_address_type().to_string()), + label_name!("server_binding_address_ip_type"), + LabelValue::new(&connection_context.server_service_binding.bind_address_ip_type().to_string()), ), ( label_name!("server_binding_port"), From 96bae36c5b9bae301f9567bc339a43b7ee80219c Mon Sep 17 00:00:00 2001 From: Jose Celano Date: Fri, 13 Jun 2025 11:19:02 +0100 Subject: [PATCH 716/802] feat: [#1446] add new metric label server_binding_address_ip_type Example: ``` udp_tracker_core_requests_received_total{request_kind="connect",server_binding_address_ip_family="inet",server_binding_address_ip_type="plain",server_binding_ip="0.0.0.0",server_binding_port="6969",server_binding_protocol="udp"} 1 ``` It's needed to easily filter metric samples to calculate aggregate values for a given IP family (IPv4 or IPv6). --- packages/http-tracker-core/src/event.rs | 4 ++ packages/primitives/src/service_binding.rs | 43 ++++++++++++++++++++-- packages/udp-tracker-core/src/event.rs | 4 ++ packages/udp-tracker-server/src/event.rs | 4 ++ 4 files changed, 52 insertions(+), 3 deletions(-) diff --git a/packages/http-tracker-core/src/event.rs b/packages/http-tracker-core/src/event.rs index e3d37569d..5af88c927 100644 --- a/packages/http-tracker-core/src/event.rs +++ b/packages/http-tracker-core/src/event.rs @@ -90,6 +90,10 @@ impl From for LabelSet { label_name!("server_binding_address_ip_type"), LabelValue::new(&connection_context.server.service_binding.bind_address_ip_type().to_string()), ), + ( + label_name!("server_binding_address_ip_family"), + LabelValue::new(&connection_context.server.service_binding.bind_address_ip_family().to_string()), + ), ( label_name!("server_binding_port"), LabelValue::new(&connection_context.server.service_binding.bind_address().port().to_string()), diff --git a/packages/primitives/src/service_binding.rs b/packages/primitives/src/service_binding.rs index 72d5e7f2e..74ff58e66 100644 --- a/packages/primitives/src/service_binding.rs +++ b/packages/primitives/src/service_binding.rs @@ -1,5 +1,5 @@ use std::fmt; -use std::net::SocketAddr; +use std::net::{IpAddr, SocketAddr}; use serde::{Deserialize, Serialize}; use url::Url; @@ -40,11 +40,43 @@ pub enum IpType { impl fmt::Display for IpType { fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result { - let addr_type_str = match self { + let ip_type_str = match self { Self::Plain => "plain", Self::V4MappedV6 => "v4_mapped_v6", }; - write!(f, "{addr_type_str}") + write!(f, "{ip_type_str}") + } +} + +#[derive(Debug, Clone, PartialEq, Eq, Serialize, Deserialize, Hash)] +pub enum IpFamily { + // IPv4 + Inet, + // IPv6 + Inet6, +} + +impl fmt::Display for IpFamily { + fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result { + let ip_family_str = match self { + Self::Inet => "inet", + Self::Inet6 => "inet6", + }; + write!(f, "{ip_family_str}") + } +} + +impl From for IpFamily { + fn from(ip: IpAddr) -> Self { + if ip.is_ipv4() { + return IpFamily::Inet; + } + + if ip.is_ipv6() { + return IpFamily::Inet6; + } + + panic!("Unsupported IP address type: {ip}"); } } @@ -128,6 +160,11 @@ impl ServiceBinding { IpType::Plain } + #[must_use] + pub fn bind_address_ip_family(&self) -> IpFamily { + self.bind_address.ip().into() + } + /// # Panics /// /// It never panics because the URL is always valid. diff --git a/packages/udp-tracker-core/src/event.rs b/packages/udp-tracker-core/src/event.rs index d354d3e7e..761b809d8 100644 --- a/packages/udp-tracker-core/src/event.rs +++ b/packages/udp-tracker-core/src/event.rs @@ -63,6 +63,10 @@ impl From for LabelSet { label_name!("server_binding_address_ip_type"), LabelValue::new(&connection_context.server_service_binding.bind_address_ip_type().to_string()), ), + ( + label_name!("server_binding_address_ip_family"), + LabelValue::new(&connection_context.server_service_binding.bind_address_ip_family().to_string()), + ), ( label_name!("server_binding_port"), LabelValue::new(&connection_context.server_service_binding.bind_address().port().to_string()), diff --git a/packages/udp-tracker-server/src/event.rs b/packages/udp-tracker-server/src/event.rs index c3e736a53..5588a2b33 100644 --- a/packages/udp-tracker-server/src/event.rs +++ b/packages/udp-tracker-server/src/event.rs @@ -122,6 +122,10 @@ impl From for LabelSet { label_name!("server_binding_address_ip_type"), LabelValue::new(&connection_context.server_service_binding.bind_address_ip_type().to_string()), ), + ( + label_name!("server_binding_address_ip_family"), + LabelValue::new(&connection_context.server_service_binding.bind_address_ip_family().to_string()), + ), ( label_name!("server_binding_port"), LabelValue::new(&connection_context.server_service_binding.bind_address().port().to_string()), From 3f5216e382e40f2e65e8ca5d2ce40eb7ba4753aa Mon Sep 17 00:00:00 2001 From: Jose Celano Date: Fri, 13 Jun 2025 11:21:05 +0100 Subject: [PATCH 717/802] fix: [#1446] clippy error --- packages/rest-tracker-api-core/src/statistics/services.rs | 6 +++--- 1 file changed, 3 insertions(+), 3 deletions(-) diff --git a/packages/rest-tracker-api-core/src/statistics/services.rs b/packages/rest-tracker-api-core/src/statistics/services.rs index 3cfd6653e..4ffecb690 100644 --- a/packages/rest-tracker-api-core/src/statistics/services.rs +++ b/packages/rest-tracker-api-core/src/statistics/services.rs @@ -164,14 +164,14 @@ async fn get_protocol_metrics_from_labeled_metrics( .value() as u64; /* - + todo: - Add a label for `server_binding_ip_family` with value `inet` (inet/inet6) - to all metrics containing an IP address. This will allow us to distinguish + to all metrics containing an IP address. This will allow us to distinguish between IPv4 and IPv6 metrics. - Continue replacing the other metrics with the labeled metrics. - + */ // For backward compatibility we keep the `tcp4_connections_handled` and From dcfb5d5d207b9fad0aceba9aa85c4497923cb33c Mon Sep 17 00:00:00 2001 From: Jose Celano Date: Fri, 13 Jun 2025 11:51:53 +0100 Subject: [PATCH 718/802] refactor: [#1446] Calculate global metrics from labeled metrics --- .../src/statistics/services.rs | 309 +++++++++++++++--- .../udp-tracker-server/src/statistics/mod.rs | 16 +- 2 files changed, 274 insertions(+), 51 deletions(-) diff --git a/packages/rest-tracker-api-core/src/statistics/services.rs b/packages/rest-tracker-api-core/src/statistics/services.rs index 4ffecb690..66bacbb06 100644 --- a/packages/rest-tracker-api-core/src/statistics/services.rs +++ b/packages/rest-tracker-api-core/src/statistics/services.rs @@ -5,10 +5,16 @@ use bittorrent_tracker_core::torrent::repository::in_memory::InMemoryTorrentRepo use bittorrent_udp_tracker_core::services::banning::BanService; use bittorrent_udp_tracker_core::{self}; use tokio::sync::RwLock; +use torrust_tracker_metrics::label::LabelSet; use torrust_tracker_metrics::metric_collection::aggregate::Sum; use torrust_tracker_metrics::metric_collection::MetricCollection; use torrust_tracker_metrics::metric_name; -use torrust_udp_tracker_server::statistics::{self as udp_server_statistics, UDP_TRACKER_SERVER_REQUESTS_ACCEPTED_TOTAL}; +use torrust_udp_tracker_server::statistics::{ + self as udp_server_statistics, UDP_TRACKER_SERVER_ERRORS_TOTAL, UDP_TRACKER_SERVER_IPS_BANNED_TOTAL, + UDP_TRACKER_SERVER_PERFORMANCE_AVG_PROCESSING_TIME_NS, UDP_TRACKER_SERVER_REQUESTS_ABORTED_TOTAL, + UDP_TRACKER_SERVER_REQUESTS_ACCEPTED_TOTAL, UDP_TRACKER_SERVER_REQUESTS_BANNED_TOTAL, + UDP_TRACKER_SERVER_REQUESTS_RECEIVED_TOTAL, UDP_TRACKER_SERVER_RESPONSES_SENT_TOTAL, +}; use super::metrics::TorrentsMetrics; use crate::statistics::metrics::ProtocolMetrics; @@ -42,12 +48,8 @@ pub async fn get_metrics( ) .await; - let protocol_metrics_from_labeled_metrics = get_protocol_metrics_from_labeled_metrics( - ban_service.clone(), - http_stats_repository.clone(), - udp_server_stats_repository.clone(), - ) - .await; + let protocol_metrics_from_labeled_metrics = + get_protocol_metrics_from_labeled_metrics(http_stats_repository.clone(), udp_server_stats_repository.clone()).await; // todo: // We keep both metrics until we deploy to production and we can @@ -58,9 +60,9 @@ pub async fn get_metrics( let protocol_metrics = if protocol_metrics_from_global_metrics == protocol_metrics_from_labeled_metrics { protocol_metrics_from_labeled_metrics } else { - // tracing::warn!("The protocol metrics from global metrics and labeled metrics are different"); - // tracing::warn!("Global metrics: {:?}", protocol_metrics_from_global_metrics); - // tracing::warn!("Labeled metrics: {:?}", protocol_metrics_from_labeled_metrics); + tracing::warn!("The protocol metrics from global metrics and labeled metrics are different"); + tracing::warn!("Global metrics: {:?}", protocol_metrics_from_global_metrics); + tracing::warn!("Labeled metrics: {:?}", protocol_metrics_from_labeled_metrics); protocol_metrics_from_global_metrics }; @@ -132,22 +134,153 @@ async fn get_protocol_metrics( } #[allow(deprecated)] +#[allow(clippy::too_many_lines)] async fn get_protocol_metrics_from_labeled_metrics( - ban_service: Arc>, http_stats_repository: Arc, udp_server_stats_repository: Arc, ) -> ProtocolMetrics { - let udp_banned_ips_total = ban_service.read().await.get_banned_ips_total(); let http_stats = http_stats_repository.get_stats().await; let udp_server_stats = udp_server_stats_repository.get_stats().await; + /* + + todo: We have to delete the global metrics from Metric types: + + - bittorrent_http_tracker_core::statistics::metrics::Metrics + - bittorrent_udp_tracker_core::statistics::metrics::Metrics + - torrust_udp_tracker_server::statistics::metrics::Metrics + + Internally only the labeled metrics should be used. + + */ + + // TCPv4 + #[allow(clippy::cast_sign_loss)] #[allow(clippy::cast_possible_truncation)] let tcp4_announces_handled = http_stats .metric_collection .sum( &metric_name!(HTTP_TRACKER_CORE_REQUESTS_RECEIVED_TOTAL), - &[("request_kind", "announce")].into(), // todo: add label for `server_binding_ip_family` with value `inet` (inet/inet6) + &[("server_binding_address_ip_family", "inet"), ("request_kind", "announce")].into(), + ) + .unwrap_or_default() + .value() as u64; + + #[allow(clippy::cast_sign_loss)] + #[allow(clippy::cast_possible_truncation)] + let tcp4_scrapes_handled = http_stats + .metric_collection + .sum( + &metric_name!(HTTP_TRACKER_CORE_REQUESTS_RECEIVED_TOTAL), + &[("server_binding_address_ip_family", "inet"), ("request_kind", "scrape")].into(), + ) + .unwrap_or_default() + .value() as u64; + + // TCPv6 + + #[allow(clippy::cast_sign_loss)] + #[allow(clippy::cast_possible_truncation)] + let tcp6_announces_handled = http_stats + .metric_collection + .sum( + &metric_name!(HTTP_TRACKER_CORE_REQUESTS_RECEIVED_TOTAL), + &[("server_binding_address_ip_family", "inet6"), ("request_kind", "announce")].into(), + ) + .unwrap_or_default() + .value() as u64; + + #[allow(clippy::cast_sign_loss)] + #[allow(clippy::cast_possible_truncation)] + let tcp6_scrapes_handled = http_stats + .metric_collection + .sum( + &metric_name!(HTTP_TRACKER_CORE_REQUESTS_RECEIVED_TOTAL), + &[("server_binding_address_ip_family", "inet6"), ("request_kind", "scrape")].into(), + ) + .unwrap_or_default() + .value() as u64; + + // UDP + + #[allow(clippy::cast_sign_loss)] + #[allow(clippy::cast_possible_truncation)] + let udp_requests_aborted = udp_server_stats + .metric_collection + .sum(&metric_name!(UDP_TRACKER_SERVER_REQUESTS_ABORTED_TOTAL), &LabelSet::empty()) + .unwrap_or_default() + .value() as u64; + + #[allow(clippy::cast_sign_loss)] + #[allow(clippy::cast_possible_truncation)] + let udp_requests_banned = udp_server_stats + .metric_collection + .sum(&metric_name!(UDP_TRACKER_SERVER_REQUESTS_BANNED_TOTAL), &LabelSet::empty()) + .unwrap_or_default() + .value() as u64; + + #[allow(clippy::cast_sign_loss)] + #[allow(clippy::cast_possible_truncation)] + let udp_banned_ips_total = udp_server_stats + .metric_collection + .sum(&metric_name!(UDP_TRACKER_SERVER_IPS_BANNED_TOTAL), &LabelSet::empty()) + .unwrap_or_default() + .value() as u64; + + #[allow(clippy::cast_sign_loss)] + #[allow(clippy::cast_possible_truncation)] + let udp_avg_connect_processing_time_ns = udp_server_stats + .metric_collection + .sum( + &metric_name!(UDP_TRACKER_SERVER_PERFORMANCE_AVG_PROCESSING_TIME_NS), + &[("request_kind", "connect")].into(), + ) + .unwrap_or_default() + .value() as u64; + + #[allow(clippy::cast_sign_loss)] + #[allow(clippy::cast_possible_truncation)] + let udp_avg_announce_processing_time_ns = udp_server_stats + .metric_collection + .sum( + &metric_name!(UDP_TRACKER_SERVER_PERFORMANCE_AVG_PROCESSING_TIME_NS), + &[("request_kind", "announce")].into(), + ) + .unwrap_or_default() + .value() as u64; + + #[allow(clippy::cast_sign_loss)] + #[allow(clippy::cast_possible_truncation)] + let udp_avg_scrape_processing_time_ns = udp_server_stats + .metric_collection + .sum( + &metric_name!(UDP_TRACKER_SERVER_PERFORMANCE_AVG_PROCESSING_TIME_NS), + &[("request_kind", "scrape")].into(), + ) + .unwrap_or_default() + .value() as u64; + + // UDPv4 + + #[allow(clippy::cast_sign_loss)] + #[allow(clippy::cast_possible_truncation)] + let udp4_requests = udp_server_stats + .metric_collection + .sum( + &metric_name!(UDP_TRACKER_SERVER_REQUESTS_RECEIVED_TOTAL), + &[("server_binding_address_ip_family", "inet")].into(), + ) + .unwrap_or_default() + .value() as u64; + + #[allow(clippy::cast_sign_loss)] + #[allow(clippy::cast_possible_truncation)] + let udp4_connections_handled = udp_server_stats + .metric_collection + .sum( + &metric_name!(UDP_TRACKER_SERVER_REQUESTS_ACCEPTED_TOTAL), + &[("server_binding_address_ip_family", "inet"), ("request_kind", "connect")].into(), ) .unwrap_or_default() .value() as u64; @@ -158,21 +291,111 @@ async fn get_protocol_metrics_from_labeled_metrics( .metric_collection .sum( &metric_name!(UDP_TRACKER_SERVER_REQUESTS_ACCEPTED_TOTAL), - &[("request_kind", "announce")].into(), // todo: add label for `server_binding_ip_family` with value `inet` (inet/inet6) + &[("server_binding_address_ip_family", "inet"), ("request_kind", "announce")].into(), ) .unwrap_or_default() .value() as u64; - /* + #[allow(clippy::cast_sign_loss)] + #[allow(clippy::cast_possible_truncation)] + let udp4_scrapes_handled = udp_server_stats + .metric_collection + .sum( + &metric_name!(UDP_TRACKER_SERVER_REQUESTS_ACCEPTED_TOTAL), + &[("server_binding_address_ip_family", "inet"), ("request_kind", "scrape")].into(), + ) + .unwrap_or_default() + .value() as u64; + + #[allow(clippy::cast_sign_loss)] + #[allow(clippy::cast_possible_truncation)] + let udp4_responses = udp_server_stats + .metric_collection + .sum( + &metric_name!(UDP_TRACKER_SERVER_RESPONSES_SENT_TOTAL), + &[("server_binding_address_ip_family", "inet")].into(), + ) + .unwrap_or_default() + .value() as u64; + + #[allow(clippy::cast_sign_loss)] + #[allow(clippy::cast_possible_truncation)] + let udp4_errors_handled = udp_server_stats + .metric_collection + .sum( + &metric_name!(UDP_TRACKER_SERVER_ERRORS_TOTAL), + &[("server_binding_address_ip_family", "inet")].into(), + ) + .unwrap_or_default() + .value() as u64; - todo: + // UDPv6 - - Add a label for `server_binding_ip_family` with value `inet` (inet/inet6) - to all metrics containing an IP address. This will allow us to distinguish - between IPv4 and IPv6 metrics. - - Continue replacing the other metrics with the labeled metrics. + #[allow(clippy::cast_sign_loss)] + #[allow(clippy::cast_possible_truncation)] + let udp6_requests = udp_server_stats + .metric_collection + .sum( + &metric_name!(UDP_TRACKER_SERVER_REQUESTS_RECEIVED_TOTAL), + &[("server_binding_address_ip_family", "inet6")].into(), + ) + .unwrap_or_default() + .value() as u64; - */ + #[allow(clippy::cast_sign_loss)] + #[allow(clippy::cast_possible_truncation)] + let udp6_connections_handled = udp_server_stats + .metric_collection + .sum( + &metric_name!(UDP_TRACKER_SERVER_REQUESTS_ACCEPTED_TOTAL), + &[("server_binding_address_ip_family", "inet6"), ("request_kind", "connect")].into(), + ) + .unwrap_or_default() + .value() as u64; + + #[allow(clippy::cast_sign_loss)] + #[allow(clippy::cast_possible_truncation)] + let udp6_announces_handled = udp_server_stats + .metric_collection + .sum( + &metric_name!(UDP_TRACKER_SERVER_REQUESTS_ACCEPTED_TOTAL), + &[("server_binding_address_ip_family", "inet6"), ("request_kind", "announce")].into(), + ) + .unwrap_or_default() + .value() as u64; + + #[allow(clippy::cast_sign_loss)] + #[allow(clippy::cast_possible_truncation)] + let udp6_scrapes_handled = udp_server_stats + .metric_collection + .sum( + &metric_name!(UDP_TRACKER_SERVER_REQUESTS_ACCEPTED_TOTAL), + &[("server_binding_address_ip_family", "inet6"), ("request_kind", "scrape")].into(), + ) + .unwrap_or_default() + .value() as u64; + + #[allow(clippy::cast_sign_loss)] + #[allow(clippy::cast_possible_truncation)] + let udp6_responses = udp_server_stats + .metric_collection + .sum( + &metric_name!(UDP_TRACKER_SERVER_RESPONSES_SENT_TOTAL), + &[("server_binding_address_ip_family", "inet6")].into(), + ) + .unwrap_or_default() + .value() as u64; + + #[allow(clippy::cast_sign_loss)] + #[allow(clippy::cast_possible_truncation)] + let udp6_errors_handled = udp_server_stats + .metric_collection + .sum( + &metric_name!(UDP_TRACKER_SERVER_ERRORS_TOTAL), + &[("server_binding_address_ip_family", "inet6")].into(), + ) + .unwrap_or_default() + .value() as u64; // For backward compatibility we keep the `tcp4_connections_handled` and // `tcp6_connections_handled` metrics. They don't make sense for the HTTP @@ -181,34 +404,34 @@ async fn get_protocol_metrics_from_labeled_metrics( ProtocolMetrics { // TCPv4 - tcp4_connections_handled: tcp4_announces_handled + http_stats.tcp4_scrapes_handled, + tcp4_connections_handled: tcp4_announces_handled + tcp4_scrapes_handled, tcp4_announces_handled, - tcp4_scrapes_handled: http_stats.tcp4_scrapes_handled, + tcp4_scrapes_handled, // TCPv6 - tcp6_connections_handled: http_stats.tcp6_announces_handled + http_stats.tcp6_scrapes_handled, - tcp6_announces_handled: http_stats.tcp6_announces_handled, - tcp6_scrapes_handled: http_stats.tcp6_scrapes_handled, + tcp6_connections_handled: tcp6_announces_handled + tcp6_scrapes_handled, + tcp6_announces_handled, + tcp6_scrapes_handled, // UDP - udp_requests_aborted: udp_server_stats.udp_requests_aborted, - udp_requests_banned: udp_server_stats.udp_requests_banned, - udp_banned_ips_total: udp_banned_ips_total as u64, - udp_avg_connect_processing_time_ns: udp_server_stats.udp_avg_connect_processing_time_ns, - udp_avg_announce_processing_time_ns: udp_server_stats.udp_avg_announce_processing_time_ns, - udp_avg_scrape_processing_time_ns: udp_server_stats.udp_avg_scrape_processing_time_ns, + udp_requests_aborted, + udp_requests_banned, + udp_banned_ips_total, + udp_avg_connect_processing_time_ns, + udp_avg_announce_processing_time_ns, + udp_avg_scrape_processing_time_ns, // UDPv4 - udp4_requests: udp_server_stats.udp4_requests, - udp4_connections_handled: udp_server_stats.udp4_connections_handled, + udp4_requests, + udp4_connections_handled, udp4_announces_handled, - udp4_scrapes_handled: udp_server_stats.udp4_scrapes_handled, - udp4_responses: udp_server_stats.udp4_responses, - udp4_errors_handled: udp_server_stats.udp4_errors_handled, + udp4_scrapes_handled, + udp4_responses, + udp4_errors_handled, // UDPv6 - udp6_requests: udp_server_stats.udp6_requests, - udp6_connections_handled: udp_server_stats.udp6_connections_handled, - udp6_announces_handled: udp_server_stats.udp6_announces_handled, - udp6_scrapes_handled: udp_server_stats.udp6_scrapes_handled, - udp6_responses: udp_server_stats.udp6_responses, - udp6_errors_handled: udp_server_stats.udp6_errors_handled, + udp6_requests, + udp6_connections_handled, + udp6_announces_handled, + udp6_scrapes_handled, + udp6_responses, + udp6_errors_handled, } } diff --git a/packages/udp-tracker-server/src/statistics/mod.rs b/packages/udp-tracker-server/src/statistics/mod.rs index 3a25fd51d..b42a73f27 100644 --- a/packages/udp-tracker-server/src/statistics/mod.rs +++ b/packages/udp-tracker-server/src/statistics/mod.rs @@ -8,15 +8,15 @@ use torrust_tracker_metrics::metric::description::MetricDescription; use torrust_tracker_metrics::metric_name; use torrust_tracker_metrics::unit::Unit; -const UDP_TRACKER_SERVER_REQUESTS_ABORTED_TOTAL: &str = "udp_tracker_server_requests_aborted_total"; -const UDP_TRACKER_SERVER_REQUESTS_BANNED_TOTAL: &str = "udp_tracker_server_requests_banned_total"; -pub(crate) const UDP_TRACKER_SERVER_IPS_BANNED_TOTAL: &str = "udp_tracker_server_ips_banned_total"; -const UDP_TRACKER_SERVER_CONNECTION_ID_ERRORS_TOTAL: &str = "udp_tracker_server_connection_id_errors_total"; -const UDP_TRACKER_SERVER_REQUESTS_RECEIVED_TOTAL: &str = "udp_tracker_server_requests_received_total"; +pub const UDP_TRACKER_SERVER_REQUESTS_ABORTED_TOTAL: &str = "udp_tracker_server_requests_aborted_total"; +pub const UDP_TRACKER_SERVER_REQUESTS_BANNED_TOTAL: &str = "udp_tracker_server_requests_banned_total"; +pub const UDP_TRACKER_SERVER_IPS_BANNED_TOTAL: &str = "udp_tracker_server_ips_banned_total"; +pub const UDP_TRACKER_SERVER_CONNECTION_ID_ERRORS_TOTAL: &str = "udp_tracker_server_connection_id_errors_total"; +pub const UDP_TRACKER_SERVER_REQUESTS_RECEIVED_TOTAL: &str = "udp_tracker_server_requests_received_total"; pub const UDP_TRACKER_SERVER_REQUESTS_ACCEPTED_TOTAL: &str = "udp_tracker_server_requests_accepted_total"; -const UDP_TRACKER_SERVER_RESPONSES_SENT_TOTAL: &str = "udp_tracker_server_responses_sent_total"; -const UDP_TRACKER_SERVER_ERRORS_TOTAL: &str = "udp_tracker_server_errors_total"; -const UDP_TRACKER_SERVER_PERFORMANCE_AVG_PROCESSING_TIME_NS: &str = "udp_tracker_server_performance_avg_processing_time_ns"; +pub const UDP_TRACKER_SERVER_RESPONSES_SENT_TOTAL: &str = "udp_tracker_server_responses_sent_total"; +pub const UDP_TRACKER_SERVER_ERRORS_TOTAL: &str = "udp_tracker_server_errors_total"; +pub const UDP_TRACKER_SERVER_PERFORMANCE_AVG_PROCESSING_TIME_NS: &str = "udp_tracker_server_performance_avg_processing_time_ns"; #[must_use] pub fn describe_metrics() -> Metrics { From 15b802526a8741b021db5968af1b3502ad5a5986 Mon Sep 17 00:00:00 2001 From: Jose Celano Date: Mon, 16 Jun 2025 11:31:27 +0100 Subject: [PATCH 719/802] docs: [#1579] add tracker demo section to README --- README.md | 22 +++++++++++++++++- .../torrust-tracker-grafana-dashboard.png | Bin 0 -> 259670 bytes 2 files changed, 21 insertions(+), 1 deletion(-) create mode 100644 docs/media/demo/torrust-tracker-grafana-dashboard.png diff --git a/README.md b/README.md index 33fc4a028..bb102355b 100644 --- a/README.md +++ b/README.md @@ -19,6 +19,24 @@ - [x] Support [newTrackon][newtrackon] checks. - [x] Persistent `SQLite3` or `MySQL` Databases. +## Tracker Demo + +Experience the **Torrust Tracker** in action with our comprehensive demo environment! The [Torrust Demo][torrust-demo] repository provides a complete setup showcasing the tracker's capabilities in a real-world scenario. + +The demo takes full advantage of the tracker's powerful metrics system and seamless integration with [Prometheus][prometheus]. This allows you to monitor tracker performance, peer statistics, and system health in real-time. You can build sophisticated Grafana dashboards to visualize all aspects of your tracker's operation. + +![Sample Grafana Dashboard](./docs/media/demo/torrust-tracker-grafana-dashboard.png) + +**Demo Features:** + +- Complete Docker Compose setup. +- Pre-configured Prometheus metrics collection. +- Sample Grafana dashboards for monitoring. +- Real-time tracker statistics and performance metrics. +- Easy deployment for testing and evaluation. + +Visit the [Torrust Demo repository][torrust-demo] to get started with your own tracker instance and explore the monitoring capabilities. + ## Roadmap Core: @@ -49,7 +67,7 @@ Utils: Others: -- [ ] Support for Windows. +- [ ] Intensive testing for Windows. - [ ] Docker images for other architectures. @@ -274,3 +292,5 @@ This project was a joint effort by [Nautilus Cyberneering GmbH][nautilus] and [D [Naim A.]: https://github.com/naim94a/udpt [greatest-ape]: https://github.com/greatest-ape/aquatic [Power2All]: https://github.com/power2all +[torrust-demo]: https://github.com/torrust/torrust-demo +[prometheus]: https://prometheus.io/ diff --git a/docs/media/demo/torrust-tracker-grafana-dashboard.png b/docs/media/demo/torrust-tracker-grafana-dashboard.png new file mode 100644 index 0000000000000000000000000000000000000000..090932a8c47b5fba171bab7887f7368e70b94e7c GIT binary patch literal 259670 zcmeFYcTiK|*De|xic(ahC{3jbA~hf-ph)imq4(ZVdPhY-y3#vH3rQeI2_-Z&0@8bc zP$JR^ozUCee!qLpcjlhI?#!M0*V!`+WM_xH-}SEYtmj#Y($Y|*y!qfJ2n3>3R+7^O zfo^nzKvz3&kO60~tykQDUza?!6=gxFKBg7m#Z_zNS6Y`SL6@=}C51qshahFSm%4u0 zYt#M~x_b>5+x<2|Nosag>Z!`IvZ6ofUKl>5i?({r!&hzvb)&_M%Om+}Rp{ZhH9Wd8 zV=B%_S9#+!x=QVPDlrQx#XCtVwy$xwAF_XrV`tBB5zd%DP|TNIR1=y|lAWmt-Pp^{ z@N5ybhG@1BmQJKf3}8qo@I?Rf_XZI3q|JM@&HlLsVuUEW#6lQb( zf4zX>K0C1f|2`~!KQ#PjivM}MQgFzp@risTC@6SSFpGcQ4|sBVH6DB34k<9JO|Eid^0GEj*40l`Ti*4 z5(spKT#y0;Vu<}@2?Bk$xaUH_<~ zfd4;M@yF^_74X0Q`-q_U|J)8_oxt}0_rV|}8@RypT0GMqTb?Oqji|DQ@IadMH(SsO zNU?R@6qx?TfI5|Wy=!r*sVUq%)Hh1>o@wb1gA|`>AxPzSIHinXk^PLXdH%^|&^rqo zLcO+(aXPQ_yWQff)a4DXx)?6NWDIY5sBF#OCN1_i7|{sj?A+TNjjzKw>>aQ3xUOMO z6x2rx6w@4y25(NEEi0t#6N9LM&*)mgoCevtMXFtjH_kgHX!jMFzDOymvKp7wh|v=_ z2pcwi*FhjsQ&lYZZ3G>pMmuJy_j7j^jCpe-MbfQcF~mDuZ%SMh^#ZANMR$$Fag4=) zjijb02(N&B2`=&=*p-{sX%NRMhD&^J&5L`woE>``wT8rrp260~-z7>ZQn%I^_6KLR%5eOv zP8nu?-q0FtiNouD_jv}yCK#zhBki5FXby&2YcqS$uZ`69vestAHU~xJ9&X4nPt=?4 zciTUiY&7iN2vo`S=S8nM)!|wf25t4#lWoUt1}_c9A-XdQkHpMij&q*(CO^ZNjmI4J zkXe&|9}?g6hAuny9yFJwxGSz^HdKpnc9ox97!j|m7*?MLV<$X$A!a5lAF)tGBiX33m61*JwT}5N$Gt zd*&Ur9N%RguU5St%{ZSlUr#~?yy?7bY9)jk<(Ma1pyA%Q+A$ljLkDA4%2;~7A>q8CMX;^xiaToX*mP7S-K%-g zT+(PXu-2h}7AvvHTmrYA^IA?(B+X-7CEj!u#u8aa!s^tt=5kw?(Y=B5%FVK3!TU5G zDXzou)^2V6CS{SExkwknC0CP1GyktN-s>q19v}6H-&5o8hV3Z8Ap^puH?-8fy(25g zIU~f=yvId)Grn*fZ0sE`{a6*=j)hL_raDg0mps9E#h6Wgd*uz+ci0CDgPT0EYMD`~ zVDE@UlP}0MOT7iamSz)&{OkN4ya7L%dLQH!QcS*e4PK`l`$zW9E?PJ zW082hyKv=ddWV@}yY-0{Raj;rcw;UPSM)_(*x}FjW_>Ocr z$v_qlSI`XYsR;O3kG8_J&)It3b?9(N*U!w6ANXFq1?UkTmo5=){@2c7Fo9(tyNr%` zoKz)BZvbA5Hp~P+lVv@1ZdB=S=bR`^8TzK0H6zWVgQMt87nN->&8mSt{uDSK_cu$0 zvhn>9kYmXbEvy*eFxT{-%vIfmLl}I#xxJ6m&mKdS-Hp**8Pg%XyW<$cBf@bwU8qfb z35WY=gz&cP!U?}qn@$##1Eyk~hx^-kEnp8$i`V+|Wlkx(B(_y!BBQfvgGbtPTgBBF zt;vJM3g5*b7VTL)bom++f^iUjSFbA@IE@t+h&z4ArDDX4A(V(HsvTqNe-m#^_M<+$pop*BH$}pM_3WA15{4q7j39 zW_Q$ZNtoM8c`yDpZ%Sh6EztM2PJOs*{}hZC>g#Ym1Gu`ThEt%VKnJMEVG9h?* zR}$Rs0iY3-gPOi~&sjEdyIW&DPef72#BJSgN1AWa%Y?-cy}sp|nP!ylspBto-5eK9 z>zqvBZhos9@b`MPhy<514!fbTQE6)2XEN7x*uFL*vS<&)TXI0^)8S>koPM50h_?3i zqzAK|U40&1I*0V@+)^}5ekwH=)6E{5{iEr+k)ro%nD=qIUBo2zgeh;9&N)5PSCfHK zEk^$AhW+br04ttdvrn>;LP%iri*w-~4S#J$k2-}a;o>UQlJoUM`R0JJqgQ2WUR`ec ztQfG*M+Gk91r>9|l090|F!^aDL_>L-%Kv6v=;1_=+XCy8)FltaKbu#9g&W|;?>tyf zRJo`$RdniODw{wupQwl&>@eAO7)>VH2C+b)y1(65vBvPkOku@dz2b$UZcELfG*&_> zf8K#?UE*KqSb8bwH6E!z8O>rIMh@ik6XeU-$L7WRr)Re7>0Xg^(#HjMEv-5y2kRrL zVq%ET>Z%}+Nz-b)bN8-l%>qZH2^*(V7fC$xB#KzGY=)dVs2HFM5q!qU z7r0Wg+aJg?A6?bOB4$##sIu;NfUPCls0!|?&^E486WH#}u*tyaa3CbP40wXw<3!iG zM^7blM>p*@M{$>BmrC?zi@X+I>K|BZ5GM5`q&m5> zxB{(5ed*B@52PN-NaZ#bUr-$I7X{B0?SeDQuX?dqN3+wq<0|q9K{F=tO>VTgB&7_e z82i2$M~X#riD>tym)cFyI{g>Rs(1*|uD9h>TRq(~%dW54Y%;e=zgv`fvz;D*-p{C* z%cqO#&R*po7LO0>t?0A(2b;ndt+w#-Rz>3=Hv9jw+LI#1P1HEtbyL6V?ZjoiY{65k zGEMcX!lfw`KjBc>b`r89LdScdR9v>A1LiyHL6#|F=BrG^AKa>GExrCj)(`XS{(_XJ zdt?=tIy7^%d`SDkbOy?GOLz6{@B16L+=E)@j>bv!+&5!Aq#_hHrC|9*3L2SW&LLh< z#xw1^2x;7%uQY=CBIGvOZ>D5>)Sb9!*d}OjS^nG7E*)=!>na>$#X2dp-isD4sFFC% z#3h^D9VA9z`|oaPO}+t(XEsxuziZ{M#!2sdHIEwI(wdSJ4u57i4cN7;#(M82pXn*b z^x23{!+Jxe5N=}K`Cl{8{dU3%QteiqYmknlb{;ldh?NoM|9i`dr4VHhE}x>Etiyx% z=M2h89 zT-3KQ$Z%NxUr6xzUKwRlh4y5wIb3~3RU~k*I@1HIu`X`#SaV0Wmx-vmV9ZN8l}t(0 z#8=PSuAl#AHag6)jh?PC^)~bramVO{6ZMw!+E|`cHimt3BE_kCDW2-3;9;veo}RXR z(`D+7D(pD~vQC{XepgAqYPvqtg2;7#XKY}KM=wTww`0udY^Nn^X7VV45C|cHcc~OFP9Qjd|h$z znQfUzy|z^)6~nbjRO*c@qUzNyqtPd6j;ivv)3!0tr7u>By#p!3#gM=l@M4TA+9AINmSW#}UJndztyU_QY8~6#yol9{lLVCyJ@=x|@_l>eM-1JV_{f%_d?S5ML%w ztXZ%q)?Hcwg6&{(5-JfK-*O&Stb+e$c=U({e32?1J$;beQXLd`bGO!M@(WQU6lcYU zjs+SpOUyjNaOs!K%U@y>t`{>N7qYvt^g&qFH-`z zyK35r%btXJIhBiHA>YfgX|D&ZP1OqSJ-Ma+=B9|YCyZzrn}Wr@3?BV91y|$N%H1DF zAL=7#BUOg}yJ)d>>eNDcH_eM6Vp9lz;Swi&nMumx`bfzV&T%hd{elm=)Ms2sn;>3u z*mo_n@l64H-HzC^;pnv5PzDBXMeE#|aW8|yipE#f>u$6yjSBo_AulJpV$B zen>m>+7~gXDwViv_?^jxfZ6HV{I=jf`~iFG@Z)0u5o|BV83hn-uKPK{Q}y@>1P7S#M$ReUXFTONIX1kQsP84zNd#qlq$}^2x3T=@!j)vSsz-&e} z%1v8M3+c^Z6$D8=`D|3zd(5_Px~d0)ToIR-Rx_jScbZQz|^;rF@DK=8f>xry-70@FiAb&h9#T z=Zz*w5%olvMRmy8ypF42qNR&Ri^itqFP#Fu|H=W}8CE$cy+Rttl@%Ud-sXY1mCKQw zQ?=UNDAY(rYoi=vG=}gKpg}08qN1MB(%thq%zV%Kk4yU+!2qxs6bioplEgybCTGN# zqR+Pk09xh04~rN7J7pJ2oqsZguU-}HH610){r9#1n6{U;|1TN9|Bd-lN(IW%|2)2b ze)<1D`SyPv0f1RT|Hlgd#|r<6!~fkE?%@A#ZGaRPb?3E8r4cQo6Es#_w*{vM1qVVW zrpm6wc}r|KE^j`%4EkfA2vO-w6%g;GlaDl-cio0}jg=wnDx@8}kmJK6OV;)5kG0{r zH^BRCA0rSg#qzYhW+`G~E9>~-;E+)GMc&+%S>NSbUlz60qd#eXXe)SLEuZ@N_P;v~ zu8sa^mtb~%EPWYtl}os%BwL`->J?u!B_*`9vuL=*4!B5n-f?K-7=O<4_nPG`L1z!S z1V+0imJUKo8ZlPF35s?gh_(C!$ zS(@&?vtLMi3&5%GlU5=23KDx+JtkOBMvc6ox`fTmZAcGi5-Qrm$*fG(P%kU{E1i7i zs|7blJ*a1OeBc9;5j5l6C|IQhhhrgBGz-r91mjk;Ge=AAIZuX7u)`4zh6}wc%{uz! zGJ4q~oo2ammFa32j%S8*Y>&Y`cS|gBZ`usYJjCj*v0ck0YYxGvg(6YQqy3ORI=JC$ z4ckaX?%oHe9)T<)qv9RAgm^G``7FZdOvV>|KHU9Y%kPt6UX=0xT5wyX@*^*f38Ca! zx@sJR*zx!7A`Z$pBS{{%Q9R@yK%m5ep09YRfaro=I_z!&9qZ_3R}9(u^Mc;50b&P* z5at>Gb5s5DYe#_rdznke*85l0YZV!DC$){?70lC$bUtj(k8QO~GjbimOPM<50cfnbeugv=xlmQOAB<{kZm2I#+o4 z2nHeKih?f>LCF4|zH^k2d!y?$fXpkYZv5Rn}|@zKVrs$dW9K zH1cMw-PAs`#!V6|;iRFG;dZ)pikuL!fZ2E(XxeHx@OAj#? z^52r+{#=AC$UY`6KRMQ=pBXj-Hptxqc^M9Sr(o^9MMr=PH9FQj_K zqT0wbiu5i@NI@Zda*r5WiJ459SqRJp;}!=Zz9e}Y(_h}Youupun7QtClDDf@P~9da zh+zuYd+*>fWNYLgY)*;l+nnFSXrai}mgFAk#&g}kni&R&yl3OL?>?vm39A}udyhQM zT>z}kD?$zf&#dDO&b&u#Ozjd@2N_xb52UZ+x|+i6`K-FL$Q?7Z7r93M@}k$x=Al%_ z$*%942qV%-*)~Q@yn2krHo_3Qcl=xVC0AEAFKg4Qy6>+p}&a z)YYpgM94+1j#J<<;15BE4#CnWA|a0NSJ_fEr$brq;KG55YmykUn*X&4J!eQep2dS4X(Q3mvuwX%8)PuYjRUF{P0U{CbeTOp%>$0Bd^uPx<`nS(dnM)MgS1Z`Kh=uX# zEYk!fz}xz5<8%V9^PweZ>~0RutrVU#om~N0{E5CHNIWo{lXMwmC}SB|8a11#KYU{o<+lFgCrg#Bd)`${LNlG$!uPbAQLXtI=D)!_R% z4hXi~UM5)hQQ-W#Y~>&4g1@B0t-(vIil0 zS~CU;DX^eh&sTnEOCb1|p!UdAv znpwtG7;cV^E&J(&{gmxdsg5AB?K@xz!7qcNeMZMt(ZjYmp$2GZ_0cPwAGWhfPkC~B zl*idkJ7cU603T6wNKQq$PB-a`Jx*mA<=)rw)%xzFZ_M{V7F&l#vQ7zf*T}k`o1EQS z@*cgK^(xPMzZdl^a-wYM@-GVe<~fv}f3@Pm3@3--Pm%UTExUI%CdU8P5I<(WBhjn0QsW8kO&mqS4F-J!^+y4mp4o0vO*)F00imV2 z*Kd!vzumdipC%eu)7YXQuO1hD3+*6RZ`yE4bW6?m*XryDQ*i5+*1!SlKc_fmmR08r z78BR`7XjU98)=f7T}PCV$A`V+m91o?%EO3mILws zV0WG-Ly3J?%m+-J@JInr6!%ZejE#)bVcG*wU(= zMsF|%9O|1lQ^yM+XCPKa?PFflaZ!o6oJ@=l6%h2ZM|Kyuba89jlABc@SF%vE5t4WS z2x1_QpfchatXnPJ8Z58X_;tGgq$9=of(X9(-AFhXHnaFEXX$L*D8FXMjDS>aw`V|^9I>JDp8=UyvQ_*AMe?%V`6HJS)^Nja68iB4%1ukm2>B*0i%$%fVR${#{1)kE-x=?}Q)dW~b5M z4(j`QeB!u~e0RDvAk^-Xs%u_{7`6gf%30ku$_WqYiJNj9NT`OR zj|2CfG$(pSnNNmQ0FJ#7ZG~0lg7X<4@=JyT>n=iTRo`mY3d{Wo{yrC4x^88Hx=B?dKFkU=|E@d6w z>y=A^8Jq%@NDZW5qudAeJoq-z_^V-^9}+L=Q7&|E!q~R-!IbAwjnmr;0O$6vbQEyr)u5M9|7-Jft<;YaC@CLdmBWHlU(!qbVlIBF zLaX)7rH)(n#TfX_f(EpZ{R^8oAhS{$?><%T9D-5#v>cA+^>0@n6MYX$oc+GbWsIh@ z1uXx%6O6j<`W0yQjsxDc{RF3Q9KU>*GXhk^@~3EzMd(DDjTc4w))G^(Ej)IJT3Y^W3ko#Y8A+2v7Mfz7pwU0^e zzx+<9fDQqT6aEdx1XJZB6~nC!k)iES5oSDsjeHX>H6mTfl;xht0 zVZLy2S!(>)GOKcs)g(S{Krirm(FEw!vD0d^j~*R+y?dyb=;np70pG|D(YiOaban<$ z3TUYMwRzZ4m>tnN>t7M&^`O=Rb8z@xgA3EwvQg7d^bN90eoH0=`tw&%lE-YAf+v;_ zVrha$R}>W70#g;{fZJyJ=sTX>(=UY*)OPYdA9~t4e!o_asuz{$a~G>)hiC^CVXxJC zx|Wv@vGQmfGgAZLQ>5`N7&~Jnm_b_byO^I}#OXl{eVQ}@2)``zh%?D8>L4%D!O3+n z_+_mC-q5_zJncSzS5GvF0GZ6~zFShkekcL;fq7&KIoh?F6Ua{-iyImjimqyPfhIz^At;EyqSLaNtlE9w}z6(7Ph&Y zoc0fD&&WU~K||%DSj5ANQq=N4(-rsTzUv=N@ZC0?_|lRancwlQ8o}XgN-*!gu6#;=mDKWSr*+Gp!OjtB?zPKLcdEZ;$a}jhw zo|&^Nn;128*%h^Io)=qZ;PvV@(epnwnj~H7OB{|oFT4=;V<6R%i$)T##g)bKGE@a5 zR{>Ov`j%vEYo&(f^&_pIjS=!@0!lrR-IVZh=(k~v$w~-06Og%VO@aKvT5kZPAJR@?k@ehz=0oBhQ1UznOC3N-Y(^cc#yVw1!ThX6KD_~f+sf*H;#?Y ziV<@~_#YDSa$!5I9AtW5Mio!SC!;GO?=~tgQ4Yp>DQJJ$>x|D_@R#8xfz`_#QIKFe|`-ZdU<-wRa*&eJ- z#C@5uG4HCV#^$jrzxdvx2*w!6i>*PWi>vH#*K$4sfKkNuZd87y!_t}B7aL*s1j9aN zTB}nY!+*3N#!s0i!;a}L;>OLDwl9M>^U0!`hWdGC3lLhz61TI{+>!)56n1^ZLS?o#KhTcA3!L^AAAI>DY!J z%TK&TlZK|IQ@*rAy~p%Y5JMLFzKXd|B)_hZxzYe++lsf5;HB^K{8GQ>56V)DLPw9e zbbJuEnpB->zeTQf=@-GC~FL?@S1PhrhO?%Ey9k zTLjzN7<{Z1W32F2_dcvUNiL%2jc)XEhx1u3?G4*z5>SD^(%&=+VO=Ef_nzBbd*;L5 z{ii^8X?KO#v2Xg>Nyt8!BP5+5_nlm`F!vcH>Z++dN>E7;4seKj&+N$D%cP50Pl7^X zRP`0auW)w9qQy^DdZD6$dk2}Q1L3K~4I}El?)CGGMt-OdgyH#$AG3;m6 zj4!uD4;)){kHLm@L87`BPYAx$*ewf2}D*0u)WyYX#V6p-4N^M zH8N$6uwW)Q-Q2bOzn8SWuuJ_ayFTo38o%lMhL|v&K5}DzHmONN zFF3RI<%`u6oEk;PBZNS22309O7g^RYjAnc&QD|k4JJc{@ZG39kMZR0{<|O{4rb}|X zhvv64tee94dQVh5@!Q=u!0w%#_;PQHt{8VvUp4LIGN>+yOR_a9UpwwQ4sAf{j;`%Z z?gh&i>DVQ~`;D8gfR4xTh^lA$LATg{J(MiQ1zszDzXGDK$Sa9UY?!{R995^Bkak@6qRF8Biw!tegnJcBo@x=?4W4q> zPpd#f4`}ZnJlS-gbMHWEm%fBLY z9v7Sh^-?nYR!aRLzaq;CWZ$9WJ2!>HvwhbtoOnnTZW=WV_w8@1t)n()SQmiUKTWi4 zA3ybl=72{x%s1B8eQ2#rzUuKQF_f(AGXNYB(C#Cqh=X@DoA*UW9D|`5R6dm=)6b2I zTplgiOK-ZBLN*V;%ExkEO^a=q!XuB7Pvrd7y(K(KOoxw$VVLCQp`lfo&iNZF zGojB4FWb1;M=D-bOZBfnDTL=L6z9;qFtiymjdDDecU-zOC88KhnE$L_-i*t;2NL); z^&rjVkjXq`$D!^nL`qJ|Oe166$?GxCOusO*eC_WzIS4W{H7qzYbwT=lopSni zOI=9g&pM@U$|Vpfb$>mTW95xbNB`vD?D;NlVod`ku+b|9qAme+DzV>od6BUXPayB4j5E(!qAj~)GT#T_yD~4dq?gnZ+wzOL z2HSHvKAHUXPoLgHBeuaO-VYB=Ogsk4$<-6-ew}pi0a$l^!%o}To~-K>`{~VGs`1I? zvOCH4_3N%s+_Eq}WSF)rRDz$E(ypL=JU|qZ`)fOe=3Of(9yI>~hTD^v)U(C;eY; zL8ymE@C9i+(cX%Gk3#!3D;0?fFx85yuQTRV$*~l9qy75#uA6KwN4*!hc%$ysM5ruO zr<7Q~WfE2K_(e*KjdOZ5#IypD?R5K!>d= z(@m=EvNE++0bfN-icz1$y+6SdmwX^?5ywlTDPn=ickG6p#gn{}ggg1gu}Y}kuUx|I ztAU*v5gZ>TW5kv_4MIxLKmX|rCIFu(m$Cm?wg)q~GK6`ZbXg-in<)2gpK=|!eRb~G z;9^=a@g8G7VcrnH-6Q>~r>YzwGUBRP3YvrK`$kB= z(=YU#RjWQaA|OV$=5$Cdi+-*T!17{}Ndc~EyE&bi7FybV6P<+~C(m3KuogDq6C^GC z4Xh_YJfu{U6G?YG;SQ!(36_;8r6=%_}C*yXt>3rpqCB>(O21fGU#T_rpgFXM)tb zr|_hRj!o50oP7?8iqQB_TAtuxd&qoHQ@ebm!xdod7jeLJW0$(R@Gs?9@eFFcURXX~ zP(#Q;{C+GiQ+v5mdCBK|9PwQYZHnzXmcv?JQjWoZCo)=Oat$)A*Q;|+p5F<}YpSu^R z)1P`Y#EQ2JXiJZ9qdlWOIeErkw-o>w9Ka2Hjeso&1KWBBNR}8#F1KeX9T)6`nP6&; zY$aLL8MrgLp`XlH4K8IX0|~@W$a#A^3_o$5<1-&;Qp*DieSLv)&P$-@L_7P$*#azY zO7SJ%+1$|(7DXYHz$EQ^#1K0p`@z0ksB}jhX&x96P$t{{%1-OwcyS_HR}0Y8^J6nl zp41R`EkW;M%&Wj^+jhtjiGn?Ez!l^&WOTDfDgv}7idq*fyRsn1X0tCLiK44bb6)`3 zO_Lek-;N<2uXqG&ZR&;SG%I|Z5c$DUOthxCf=UXi*!qk($S;-caJX}(; zg_m_b8bl?`{z*}&ur@Z>OZluy(Mai0luZuMMP2ylBYzLGyF14WVO=miEDLG#Z0KuT z0DoLgq-q|U-K^R7%4kVSemhesDB8H8q&8d;rWB!4Yp_>6%XTG~+UL&Z0Ze0gwCnCP zLt2r0{SalCGn+HAi$tuNldv1`xCIh849`?MhQIV~q{&q{TfKZfNXVO05-?Ya9E@p#3uliftHK0~p(D{BRpm8eXP~4M!ca!hO^5#)K8fu$Z zv;b#2Lwr#K6yRuxL@Dvm$QWI}Wl@(X{XtM`k@egch;zLhZDXsq_H%w*K>h`E(5@TR z-j3@_&g704^FK_ZwHt;g-#0P~`nA=PNJ{I{o}N=VaOOw< zPC?!9EDbU9P-_8k#3=$2tQzilo7d;tG%n&cjT2?=>?O_tHfjS=LWv^SJG@Ews-i&c zzB?cRf$1;LO;+?SWNJ5M22Ye5uXr;lH2#dc7nr5HxX0c305==>Ptc7a31T=xHPbqT zCRoaels>+#S|Qp;aeJmMGvRuF90xYLG?x3J9KpCM%lx&~%ODCjpK3!*`}r;_EuM3D z)m+XqiRWtVq520RZjMfZAtCt7?CzuzU;x9t0@nJW;~1R30Gqz_%yR~P8HS72V0FnEe>3^U?7_0u)^zA9>e zO@;e<#N~p3e8xq|q_eJee`Di4EKTq*^^ikfF^u_YFqx~r8ks)0s|7NKIyRb4t`oLM z9Eis_1wZ>Yjj~IaxOc}2$H$x;B8+(InF_Q41VGnFxW@{9I;=nWcbKO5nvw1|!kemp z;{x(tpM$h#)&>zfk6Y*Ll9cPXikx;!yGh}P_u{DPc!BN>m?I1eS3U>ilO2|nuQ7FjG$q!CFhb8^#9_8q3z zcif9d?s&d4H3e!h#LQ@**m*Clpo9~RKx_Ic5gyn#A&Gm#3;*Eo)ix`k72o>}5#>Jr z$egs7PeaBpKfUS~P+vbX?$o3;nA9co19Q87+jFlSh}Fx%*VQ3vkA;ka)*Lj*LGL^P zno5`GEIM^>n)VDr?TG@o7(fz1vZG+j`3X z_#w=~peJoR^wU4L?E0jd;<8@PHxJO+kQBk%;S-b6kIohk3#upv@+LiJ@T3yq^{vyy1Vc(Cyyk^ zFZx=BU)=69hAsU1_%??!uj;K2Jc9=S2XCaE3{xELbC0?QXw&vqcS*3W#CMy)Wi>BY zy?;5Ge8g^=0Mj%wN}U^|^Hv@RYFsOZ0G1$P#oHGLVhm|0t}N@ARm{L8%U=OQaw7&6 zmZ<`9wG&_enxf5n`K!|1k^j_zr^uFtw24L_Vy@G{|~h4K@MB zSde8Cg+djIwlkb(HHW2SA;Dz%n?Uhesf(!t)DhJfDr)%Ksld~Gpb~W5xTO!s;=sU7 zbhG;L>({yaB8OZE71z$1{$%=+_~$4CT&|L|C(G?ftgXX$7l9?mlFR;9d2(p9bby^ zXkA*q56lq2ggn3#`8A=A)k_!E2Mh1xOc$DE_!DhaYoEq-?8G+p(DmD7DF3!XzhL>8 zIxt4pRk~1l?Q)ZB{>z#d6J|8rPo$W*Rg+%eW#9kQ5-R3@{|)?J!>CsDJtw%S z#y00>%u~adv-1I_{=L!WAqc^@m^Yz+Gw(oRe%XYuki8j1*gQGg+82(b+`So)e&hSM zM)J7K1W;zv5k-Bdy+Rnd5jb-di>t%0 zg*fRZ9)_Q{g5E=|bB)s5b~BA`hIFfV)_wS(^N9@K{h;niRqTfk>>E=}<=?67*{coo zWPX2CPAvRdlFDALZ>(d@og$gWn{8kFsbZHaLL-$u_{P(x3vb4bk`9meA;|M)?C3kj ztSg66mv;SU<~OA?jJ^}v`wcVcK`jt)wEi{8H(|csx3?mH#246P6ff8|9g$v6)h*nV zdm7;CwEes1`MI4JC~xzL&4vbjgF7>dEXZ{ddQC1PyqKv)MD!DXT#ne&mQj$OZ9enn zL=F18-CvrVW`=H?E-RXne65=wC}@1d9* zr4*0jxE8KT-8zQ3(=YbWFUXSgRH1ss+nbq?Q^K0K2r!wO!;xEOGsJJB-zASne^5<9 zOpNaOCN*9Py}SDC@W7nIZVQ{SbLiG>@%BmN1+MhOpSb$1{Og^K2fK-XRExEHS{VcIj277rw;U0hQ@D017s-vV?WaCHLn?6f9}n&u@}^ z_x}EX4+@z;k8V@eRNTJD{V6=4D7yNdh<~$z(r@|;S{%#9#%kN#?lAjby}Sxq%PY23 z=ve!^p1fUk8~6vLdd5L*d$WM)mF>S-lKX$kVbpW*a9MDOuN-efUiA+K2%|Cdbyv4h@7Zci^M;QiyBV7ghj6@lL zf`VMObPdlQ(o-ls1r02EB#HV7w^!T|w-vR|UQX|_+xp$}ail4M%T6HK?3Z1R$kd)$ z>m$SVr*P8u`A;H~hhO1eE?4#Bme`HpMl9f;eod(X-<;88{&g|YMCm^DZDJ?LoJBI> z{FZ#}=fZHS&KBC?oJ11wSt(A?O z!)faa)6|a{A3m5db9&5f8OKrxryADv1>2xy6aO-CTfuDQIW$)u;8E%uWF`~KSY!Op z`k8mPo4V)P&YAVCH!EQ(JU3Eh|Kma7(iN?1;hfj5h09LSwovf9PaAA)c7tZbx1PqQ zb9dc;WSM9_i#W(M+(`#tGJFYMn==|eV(Pk9y+jmzBy1>O_`|Qd;a*pQLw*i8^_9eI zK`WA;RmxW_z(i#HWnZ4$m37+AX6}NX!`liZnlsmqsu7{_oyl+l2ex7jO0_a6!#s}M@<&BhcKkbut{Rk6< zMTFjG{BwLT>RHuE)UVujyCHWbWsa?{)g4<#VGr~h?vc~pTYHh$nW;ljpa+$x&We28 z&CP8fX9@^4jn8-w4}gT|=jGE>=9vd3e^@6n@PE;C&0%%E|39pjZ5t=swr$&9SjKWK z+gi13+qUiHTHadyp1z;!cdqqU*R^xb^W69Q#ruguDwekl+<3NEa-6>IPTVx?(9Da1 z!$>$4b{U)1@6HWB$-B-#*vk-1(p9JYeOJPM`ll`N%j=HwMMM$tbLfqO(_PjBR+iEtco7hVqO__gC zX9iyG&gk7Jp3_45CJtg0$>OFUS5oD$3^Li3w zz`<>6O}MWH0axUC=H(QE??>!|Tvmv>WOny#Vt+)MPyn)#Op$_8U)#ZZr(3HcrI006mCw6j@U?! z2eY?5*D&wm46LqyNdxn45HEipRxds^bFwE4)6rP>QC1MjPB%PlHU+lnWA(^S?x#iS z_-FF@w0jW7P>RRI?OU1s}L2Qd4jm-&74WH^~OyWjr&;Y zEg=F;#)fPxvcXA5oX|+Jru+oSiC43^(y_sG8R9KIegA>>yt*sn>w81tjfXe4u{&fD ztB^ip!2*0UXVxIL=!$4Fj!`-=o4VTM0XM3lmK{3>y62Y3->h#Y`rk?47W?V_yO{N6 zEgtKaIVDn6XyCdm(Bi3w?hU-s_Se{v7$!~VvxWgIz?rgyyGX>qz_=JWB4RS^Fj4F4 z2?+2e==qsHF4CkQp7T7d-Wdoyw8;Pw@rOCCk^|ut47P_m;tw^8repfgxxn2agq+zo zV(frstYjqUu6980<`k4>WNfpwb>0{j&H8x(d?w4-EovwzP+<_tw(lR5T_%b_IvN^` zkq*A*-R0)mz4yn2yOA-dlmz+qK>hhK^~_(axI+5J{qj6dED)aqo4mG)f-j5Bq$1c)KK_Z zZ&vE6Ly(>`7|w;=qM)K&8!TuXI4cmcpUd51+=QaInj>5*x%zh}PfvTD z2d8&d9@F$u?gU*Lm><3Y(BqUdl(EqwW^3$Nsjg7-E*$L?F&d4F*o=IZ zIhv)|m*?Z0cf;9~Uh+)j#E02B^80gWZK>y73{bEY)aVUAmX-MULsh?xsKOq?fCOhK z(V@&UCh#(qAuH>;143PUM>%C^2EnvR2f@Ck;7CNMC_5u@fc@$4C{$bl?*dk$5n_(E z=ZXo}Bt2yN*08KE*v+JoLMor=EP_FhPpt*LV`8*jhEr9riHCadAi_D@Dr&8JQ0x_&Mz!Zn8$v&P4 z|NK{(Ht4c5^)H;7_(X|TWlgf#*D^X50bU8N#)>+>_vy-}w-tNIOZ73)KU@mUT ztrC+N3L)v#BC_K9ysy5vOQnMUUiNbmx}$%y7~ya}bp`oAUj8u8upJxdtSp(Dt^bXW zKJa`S!%-O13y_@7+BVD@gv#oQm$S1QzPIMW!2(lf&^wAN$8KJ;frGRtPnzN$fc?#( zG(|(*x1<;N#Qk!5ocS{+QnlN$!hQm-6F}}HzbnlelsBbhG9CD7;ha_-m#^FuFBAkN zv6q$@t^)8TD0u72s?d%f%DMJ-VnsIdy>K30nYCgqJOYA3X40uVnXfV+q<^8$R2iTv+eow?!sAmwR!;9s#b9t?u-}2$^X#z$6va*sw74OiGO!Lx zgcWApMz*(>q5@y2;`t3Tk75;e>;ObM;cw!?~X0z=HkY>fM|yqksOPmxl&B zwE>@qPry(Ob5)f&DgkD)SdENW1Ja zvSKV5UhOh>IaJCaIht>_)!eo^sG#Jnr_DtJt{zNZF@k!=B;!%pFTU_VtyOMa|gai$&23kp{z8oCK}q zg9@`o?OnP9F(@GQdq&L56Hj#kz1J3OhdSBs7zp=62PRCCZ9AXg$|v9Q{Im9e5K82B zYPYqm9my|$?>Ro&qQk%dfy*wSQ6d+OvckdHeGL+%((0_0W^2YYx=m8hXt$W+s zSwv$`j{B#Rus>aMYz05c%EjJP*!*KyopcBWk)vsKVO9U;T8!#*6#3YXooNK$FniM;VY06%)w? z7O&aPnns{`k=V%q(;)!-qZ`AQh*DA3{@dzqM#=&SCfI0$|M7F)veRQjx7}lN{xhKC z;ML`|K_tx?fTAtt4XY%o`5veBRh`)-D6pQTdhn6Q&C&K$Y_8An{s$ckW?t88C)z!B zm}aM?=L-f5wd-Gql#a5!Va? z^9Cy|jX75ad(|Wh^W`4U%}Okmh2XBcG5h#$iAVEO|3(Jx z!}IZD1)^DRxjl4CHh)CHm#_Fu7Pr1e`NC^3ffo1-p-sp|+AXSgNI$Skp2j0q;_{{@ zIH=%En-HQBEiyX|=_$sUuz%*~E91TM2*vJAng(Gmc~GfU^1mCNrEO z;-Ft&O{Jo!@Rk=;nlV1PCEuS<6Ii7dma17tbYfJ&m6G^y~p z#G4q5#7ekqYu?yV$|B5HORM0IsmE8`JiQ`=K)6RTybF@JSlrbE-P|;7oL_fM)vIdb zlkUbIqPncwA|?U{M^%^4UeBxp@0Y-?_4T@GWw?7Sg*Cb2>a2Q(D-iqh1IsKWiW;<8 zVz(k_Sbn06G$^`N#uBlldv@j`QG`5`x#|QVdTLHhq%>I#s+2&;tO0m{zK7>U2JbLRLUzVV&j1|s4kHlcghkW&yMF93)nH{_~5dR z;cS{QLYcBObTaVq8rpM(Gv{0yybRQTD0TuaTL&bT=S)$)u^^Sz`Ern2%@4Gf6wEot zmZiQ9P3o37*(UC;8z`*$Vc>=7P z3v)!zy;(_%;{_mpimy4LkQ-TmlwA)LzWsMvR~R&vF+}0d$`$%v(BSmv=J@h?XpTBV05qGJxL4Y{P%i zu+=L-Frt|f<>d`qa~7$a0>pQj@@mHw%3p3JB}0FFZ++lBGTXfV$I3T)x9)M?!29P0 zW#qTPK=zx2KJ@3~!P@(4j+Nzt2@)0#(fBdV55Sr|d1yF1L!MLwFgs1L@%}|u<}d}= zP90&2vaylSe8rf6s#<3AAK3|0@^~m*&ZoHttSv!t`ANJzecn*=G4Y_PKKLxio0TQA z;o?BEu(T|U>u#-L8ttmTUX*zuxQ`AyApjV%q|xQr&65Kp<#q^4tYphNzl%o4(y6v~ z>43KQ9-DGuZz8*AhLA}3qow9U>m%Vw129=q{aI_tF0Gap9fgCV z82j)X{wkdEaBx`68B_UB8F}^o2U~=CH%NqYM}HR%yq?y`Qv(hv4#d3tHjW|m3Oyax zgS|Lo!=nfk1A5%aw@Z_jAOr7zqjhr|{<*nNx$T3fmk+N1&%17exW?g*S)5NiU0vqI ze)Bj_v96*+(CyafH{1iP)(;V+n=?M~m(yV=V@O>^f`EjHu#y@Fy!Uj70{CU-_rC#2 zsOI@PQb$wQovI37GP!j4!9$;r6q!B+us3u zk%1*qpsA_VY2pa#Cnwm=D0^IAjWO~kQmP9_?K*|CPLhpE`y^l$W5^z8+ZF`UF926F zq6M=jKK`V#rko{NWrm)KPqVf*%8csMD!w1MYQp7LL$j|BZi&UDC7>c@xH(}yB+3)i zvZ505e#f{*gzKR;vH3A9)QT~xr@x*4m;Dtvj-#fM|$%wuEzV!QR|0?T_Ax|6}NN8^vI@N9{T4r03V`kzCon zlI=HhGs+976zaJ32uuAE7`oi!9lHB6{8_1t&%kE?I`5EAsLL(OOb)Mt zlGMZu7C7RuB~0eT9b3C026~eSA;jK>{R-atBCIXabbC3>u? zV+HKNh*NMKwTT}X+k`Xmm?7=54la?N^A-YMjd zG=P;zEtHqVDlglU--h|#cxfv(ga_uO;)b%*eqpO`)5|TCo;^U0b%-s*;`@_?^q{(Y zXvd)q0!R_|sw(Jk(91mcm^htAsCi?NuRW3T7`DR?9i>_&A<8~%N<@$|()*urvB|N4{A3MF)ve$j5zad`F^>&C|tkey- zuSI=3#ma(!mjnn|00@6wpAB_i`Z(Lq#IE)n*!mjZac*8XiFHiG_I-f^%r2Ab$WrHx zjDc3S8*ZX>fT3>FFoniHPL zBC4ldi8qp5ri{+J#pV4OMCm0kW|E9bZ-?2|iu2EGX0jST;85Y+8;TETN2w=HQ^7u4 zQadwe#5XIUm|0N%Am7Ha2}acTqXl66{_ru_)oI)2P4-_eQ)nkXH)&mtX*eQI8&29!r6X956xFO9TI z?_Yx|o-W92gVxjkQlk4uRGa%j0ycY&;07B0_8k&zd;dXUwEz+2$epCv z8Z8p&KU`O5;=Q^*DZXB-h09Xu3Uz+%D9AbJSFJAQmFEC9mf&ZBvQG$pF7L#{(6=4X znvgv!>tRMzoS1{&qNj&H;bVDr9-%KE9W83C8GOBY&*gnb*Nhi2Sq?f5$JK*zaA|*TNkn^b~)bd zBDPHy{&T)RNZC2H>DQ-U6vWd$HlJ@pyz|PV*wvTBK_b=0Z}7Jd8dQiK%hi)i|71(4!F~0}TC~Pq#r%ME>OY|oW?u1;Kyf&K5=x;to7mM; zBli+<+w=6X+px@*W=M|7`jCz1ZT+=%7Yk^%QE+zn9yG5+veQKiOC()S;t_-MwHxFX zL1Sasu6JMmzY8#c(M1Om0hXNKD4@w6flQ+Jdxb(0DgK#(L?pg&Zpq(NHCNcUF?;B1 zK2MA%r?z?KNY}4LE6J4wLthv}TC#cVW#%MzbDwqeuO}vM&*s$i(pn-|e?89~6qIvc z5z$tI$eCM`2dYYQ`^3@=qVvc8*;iH#Xi(J$LBvhQ9Q-@^u9Hef1n9Mi^3uA1k)<1a zZ7l~6E*y`(tdD2`cvl3W+=MhojZrbDtYYdxu-<}D9w4x#lOdLfa=`+H=?FzYiyn$^ zZX1uPTIGs9kS0u+>xdhbCLpiyFm!G%l6?`tH5Hc5XWiM^U&)To%W#dpqcO4 zeEJmP-g1DK6ufnEaKda8^s0>b*NcPyGq~-owjC~c;_2rkx97*jG82>EOapv3 zug99cw!aXd=f^ZcH|yk~>mC`!LkPp%Vy#%8Uw^fwoF65k0Xw7UF!zT9`>bO+T?1=% zd9z}>S3%c~R_bW@sR(6|vW$$rXmBmm!J#KjXqL7VvW~;3bb_QUe-Yi^Oz%HFlM)xA z=$}IA#i-)+y_zL$&ooC=`)Iu-JN-9>Jcw#obWSFZ7Xsuo-Do{@m?Pa!exxE6n=(9H zxEa0DztP}`nmH8I|0`|xzWrN;f7=u(^6ERCJ{#aBhrhiv=OA4T_4|w?V1&d#6OLL9 z{&{;6$8b*9i`dCwBZd&jOb$nB31l+!EUF!Chd*;U47pSJEPIw7{22Fq! zyD|v-hDl=NB+JOK_wvmW1q@i*m3mycd(OHt_#xuJ@_IhWz3unNLoefDlA|lXk2#Vj8eqlb zpR_09!buqI?(ng?f z&y1yl_6fl{TX`a|ui^Hs+|<%Xf3T!Z<+oKM^?7r(ZxN~agwEOHUDt_l{806>_do-_ z%Xm_lyYjs(js# z6xU*Ha<2^vV$xN=arY*!Nj)JUfk7?s#_o4S&ZSdz))SasWjzMK7@)ti%$l8TC5T)t zjeA;=xcBVrCd@hggjp?nu1e_D^z)w{Z}*buG;=&?zFRM0nakuusO$KO{Pc3?5dgR~u^& z;%!qcc!CCM=D4M^(JR6BN?plrKt)w~p8|u(<|3LnS=zvyI)s!eEc^1Zsh$|R2RnO8 zW!~o3H}NGsVfamfFL)~)O*q>F*z7I)d{Zm?z&9mOVA%ZvioDCfXx}9Iuhe76UsFEY zEo}^30P3*H8lR&{p^J@Xd|-mN)^j}oe8b|6bK(6hE|~OHWmcPG6fRMYN8j%kw#0i@ z9CGd;K-ht-3&;}(uA7?o)(-grd6=MQDETa^FrcO<9R?l97A@m@=xZZk87lWVm_~+l zLSOx~WYuGS*@v}z5 zJT6cT2>BIRdQ%PfE2TYC%KZv)0POj*^998r8}9WafEc z7K@f6L!2?ZMgd{3doCzyj8cp3UQ-4P<+h3J!gltnRH;@X9FPF!>ZhxH!vPv7&`Wn1 zt=ilr<4PdzxKn*-Cti1yolTb}n}m~4w6S*}&~y~?TUDj@;@Hapvd;=VPy9cY@ZZk1 zU|u%EICCYnwa529W<%vSqi4I|ECQ{RNa%G`M2~oTm}QVDm8PjC3gS0yyE6Mi9f}Ka zdrH1swN6whD4mG!@3Nw6%(RuC^;Z@}ZM$hdUjZpfYMA6}cVvou*>-G}-XuJQE`W(% z6Xjy6cYJMeS_^KoKO4VgR?!7Ybo#5Y_KP=vsr336O7=30b~Xv-Jo8J(_q|_`0z)O6fld7bq)blXA?~jRJ#$P80>1Ydg(G4EEC^a{!>-Gg8tHdge@AT1D}4& z2`MOAt`OovkBOF(1ng$$nWxib!17zvYMb}@nR8W3$uIxv5yF$WgE<|dgtA1OiQ}xO z9)Qp<=q^4OzK?I)Vge4ZVZa$~d8SI+Z)C|6eMRyH11~a`5WZ>Rkr8=%GVom>qOGnK z-o}?@`YOoJdy=N2CdF#uTw~wre6LN639RZgtBo5 ziW&~QWZX~!a1c!7#Lyw!AFQh@MqX91)Yj;yOVqrqZ9m0L%9GfkyX*rg?lsR}0>LBK z14;&bX9YF3W}nPim1_|@KLNnTO&_yhNtxIcIm6DOB#JcDV0@sjg_PWs+dDYD7V%0JBzyF8%0jfqP+fb=jl*TB2LnyGg^o%_+HD!9ZdYPeK(?u2yNFH zgLg&hZ4 z%sSy7p;bⅇ-?nC!6Cry5iA(BzS7DUw1EBFmNRpx{a&&2vzaYatU0GyJh1M+`^Ce zq)8Cz<3dA=N0l`DVC?e@Dfwbet@FVZ#ct*jxZf=cVQP6}c$H2rSYx{9Ae5_FcPg0DJ;HDFmUs1`h$& zbDuVrWJB@$=bGD4hO`Z|mRD~Rq`Dw-oRl*BwTysSF+H=gR3FBhnI!UG&Kx)(GTk9* zOVk>^hsMQ!nzh7r%E-Ctc?CyZjAfNVG^7ZYpf{RQ#DEx(-`p?NVL zcjSI;>%dq^cHu}eWfQ*G85}HX0YJ^Lo)YmekW>zCY;?$LNKf7rt-<=*|Dp(67!D0r zt*hSp`>bo1wTB{ASw_g8kS=ontoFgr)JKHkwMt(uMsa=R(jfP%A2v8CU8-1VB;ja&9GVrsl0#nYz|HQ*sXoRHVYrwQ3Q)r8mq-h4KK;oUq&JkH za8)zYC3zPh)J+eG%g3WLHO1}E*v69&bB4Y#HNE6e6aG3(fUm))j`!EMir$s1SBn7U zTP52m1yHksyWUz1ar7*@zy1iog2qsNU7y*pC@RxF8kj)rY+k|A!IG&n_|ALF{#I!Q zzrBtn$q*EiX<#@E6lC|hpLOVEIA2rMmQstlbzwKzcmtE7VSTOdJR0?!Gf_V9C66lX z_UZO6eR{2e2HnGF_SCj4^Ql2K|FC|Dd%5!)hfd``47zr}<}|pyf>=wK52V1S_G}k8 z?1EGNXbS+{K~Nyf&;BcDpm3%drlQ=+KD*O|9&##C9)g*M8DQ)YOfOWf?8ludyX#J! z*9R1$yuRiW_H3ci+h%bm>J0YCfBz`kNHQh(Q8aYM{5?3i={YQr&!2XNqE%VNY%EZ!ahlU{N$v|Jsh_ z2Pj)Y!uT_ULXTdE&O$JE`X;kgr1G!8=N%WraQGN~O@KASiUP6?%?9F{gbWv7H`7l_ z43n8}o`98YT9BJK_T``8+k7fwYxO7CReNG1yV;S4qBR7-?cqfAl z4yeqc{sKH{K4>NB*u73{g1^PDo3BrqY^h?Y0H}I7+hU*wYednIbnBW<0)CFf{_)`8 z%T9g|HN}$naEFDIzWrxZ1B|rtpUk?V%g9#4K!2F!RfObQ)<66kQ6sy#O-Qh8dCI!2 zg@~+NABX|mV6ylXCEkV<^fthd@qT6}${-q{gT!}f=s_gJdJHMJC;OmGK&s43PJOG!BT;`Az5A>=4O}z? zsH+clPN;6L-IBlpPyRw!S9p>t8^xuzdk?bVW*h4F3iE1UCN?nnU1?FiXT4P$`rd4^ zl<(w>TQd9g`TU8#bpwV}^s+ZLfO@k)3Jy3HR`aw7^i)901eL64HWAoFcX6*P+i0 z5@DOBrAK^gVDz`V6%+@bqM%NRjMVst93Td_vc&+q0o-1Ir!+2i?ABCBxzdKSMbl>M z$?rXzetg5#D;o+_i&hW@7!$JE84u9s+*5W%BF5(poe(hm(k^hbvNB+F(*GGq%($q- zT$p*nPxY~^u+_Lz_gVb!Up}(|Y>rZf+B;nbvj=cPQ-_H++zvvU0t~$!cgao%$5&tU z9Vr#p=Vv2&&N>kQA!e8R7?}dBo35aw^h?>iS{>=n;=jk^hst_jgxzmMd%l&&a97u z3vI!t<&?ezi%-43^ITcL;$Xgd6l#w>@@#YqG`$E); z=C`8y4Qb@F1E|s@0#+=5r#o8R5Q@eyJxZRs^dSM4B;i7Kh{&XgM26v;t(iBR;X2yj z0(=8haXR3%&>T_qY4GGHYBc6w#i~!xA>ooQ&xQENPp99?;{o2ofIv!7;6Rh!USbB# ztuQ+Z89Fn!SyN9RDGn@#&yQoblV}&AEP~YUBRu9vwv54I+s^aBk*uj2=1m1Mk#en{ zFY1hWnfX+b;~%2YR&Z_&CsWj>9~DKK3l610LFCw@%!Os|A*U>=xvZ`t3gM!Kfbk|V zNSC*BsEt`eqs{h+1UfwgsCW)`^$U;~6cbQVW21TajGAWvTjrku-wJ^am8$5!IRs$- z%e6_zOM@)_tw`1GEElRGN~1c#XR+%~;8o=VuUbYF44nuisxH*WT@&rHOW{MMrE|7k<2p-XI=4)L^rn#xp!dihB%6v zm^_WrR92>88s!WCPNL@#NK%{l0^H^^4jgZ`qbrN%h515xyH+eC3?6AO6ptFP{Z5}UHwtDbO;61fGMYY`+=1rgP}I#w z**uLsq&`znJ$)1n(YKloC-=J`HgU)=8%&I3+gPrzgOo3WyobpzjrqBkI@cYD3h0YH zS~SmEs#W5Asd9`4(I`*)ST9fCx)~*Xt0LrR)Lotk{|VyG&>u7GQ5>gAltku007UrX zCMpYup^B#iv2JY?Oj4Q}qhi=sV^{G66O5k_f158o{-A;2s@dt%Xwcyw;!jdw@BeEC zv%+;@ql?$eoNDxlC0}bO*G5^_7McEDU$0YZt0ae=>k`p@;@h=1l{F>~sg%Z*`j>S{ zQXN5-l~PC|Oq~mpj+tFqxtIK@Lx*$$j$1|+;3M-u)fd_CqIqlnyo%yPfKYdU`^Jx>`m0-*$-;WC+2AikUPJ|Q?cM3nL!jX?8_ zKmLb=S%aK|H7kiqm2`<9)J)PG6##C?5gzv^!j4ga&7IvqbUM{#6mZ$i;fxt1cq*iF z*&gbcl!%g|AdgmCJH!(@34*$T%*LLI6|6*

N#W8OnjQf$?#Wcy}JV5c&0xN-Wgx z6Yg z@^URF{r)_|lp@twFs;C2{J$dZzA40xZCxsF9`2v?&+oti6bodxkDrFtzo*Ka# zqbTWS)}2sDT1pvoL=R>R8%BR4O?b|Vk%gpmQ~J`4xDL*H)E=v!vY08)BTDlVLey;g zktyfL7+qxs^FASsR| zf5wW!3=};Q(J(0NHZy4n_r<{)^I$azz4dcxhrWC!(t?o`Zz_)_y8peyPU$kHhP)r2 z_)7u`#GRwOxKAXJNL)*uQC0g0oB@-g>H#{t6O7Q691(7GQ0kasfgYSF9^%OyYD#GF zTSonkGf^oBv>8MfBndLyKdJxlROxe3den+(g#CO5&M`(3!htV(#d`NdV}eC@Z4ms` z8()oBLDq6$c-h+Zz0(CQ{~t>fT<2lifRGB0LGTG(b#$qT#N&rS5S@8IHD1jYmb~lT z#EA+S{ckX>sZb?BFfS|E-rVQhqHf!8T?ihdkylP+>Jw>}7@V20zbcPJmtCn{j#oa= z`Xr?+Jx;m{XiAinKyml7_A(-x``mVlD^gvhyvH^QF=i`Ev7Wn4YBNe#f z068no3h@=!aO{c%pCMg#tfmG}iKc#S?nsg{V6OPVCg@a|^shs-(*eNnM7x7xZcNxamQ9=qEcsy;e( zehD>LgK;{55N@@)ue|3z(}}3)7mKZ-f!YU>A-dEf?RePPh@luu6ns1Ws9M2rsUg_mU-OcnPrTNmXq-ebzff*>f22BVG}X!EEh%kt_e_gMAtJ;{EhqP%vpIzY4V<f3h_HAMnZ&9*;}r(c&IY_Z`S$N{aN6wSK;{CK0Fl1VDZ? z!FO@~Cui~``ITVZ#u5T%?#RYY1x%(F*jbCqrUM2u*$1yI(ePd=Y3$ ziicD)_C20|w?~&_iS<90tGcN^8o4;hZs|4r3o0tKqCBS2q>pmHPbM&m${yqiSB9e^ z=m1mnwczMp*mf!y77ry%Wx{YpeG;s@G$Ck?Kh%lVOr7lO(nR_Sw!3aJ8t@Jf{uq`~ zBz+F7XBX^I)(@(=1 z*p^*8oEIGgeD=0tTc8fc$Ob4Y1*pS2OZ#(W$=vUM?8{#SuHHcT|EX_z7@sg-(i&x4 z$#|2PWRojA-nJ+dazK0IN{R%1uI6I3qNf%vDpPi**yn+DXOWeqvj6`sKyEH`qTK3g zg7g9OOtz!?Wt03-{h^pDv6wBdP{1~0@ZrR{F-z;rifoIhssDmHOrGNlkIfcwwt1#t zIfdJ=6AHH2QGZSINv3H9CELgs9@yMh4(4SlQHbDm_u)&gA_}55b1K{Y8g)`Qnjt ze`V^O5J5!uo)mNHA;%Mj)g)#H2W_q|KgfaO?86~}2ibx@9rH*179M6{m&66V~ zt|Hll;sdZ6SI_rbXe**7HBJi?mCAgOxmHFsVh<`U^i4C8#>qY6IdYV{ERW!Ec0%nB zz@%KlS$BTcVKy4qi_z)||0f~oMJtfZ%#*$}^i0iC$^7r8B&&A>i#`O2!2r$jc}y`J z)x$+dcCHMgv2Xm*OO9f{(Ob1Q_XCrjU34YWf98?`(q!xq{U-yS@c`FUt+5xtYuD-Y z{>H=-6A>@1z5~FY zzC%$yT6sS>FKp@?>U8KEPqV}KcrOS=;kGxX!_>l*5gOb)t(bxeG- zGizg9B3~Nc7J@1xJXOUBc2R#by-hbku`Qc3j1z$dPX05OMpU3kY&4yNO9`-d@bvmaw?v|x!s!U#3W=U87ll0wtOx$MU%Q56%|*$zmgfiCZs7fAMjbxTA|aswC$)< zt`xm`nhT+hN2yXDt--Mbn?8-o@2xO~b^=chpsHC(mHE){%MhP%B`Z*hT4d4Am?b%7 zL@_042WV)xk>g5Le%gt_BQ@lLT;OIQ_`6iWBn)#8+7uK<)efn3Dj&%r87d4|MZrTh z)&|ZtTZM57|A4#kX}DI+zI705`iqnM(817%PYC-%3p9%dI-s70FbT_jy7L+(+W zl{m!8Z^x#r*DG}HjfTmlHm#!LlN+uIty%YhY=@qU3onKYp%eMJLd+ai4#jW4q zhA7#k+w>=OGAfIl3{~0 z8BjP>(#V4KOAlfPJ^?AbiYuzVX%e9(`J!iu_S}3-w)xRD$3&6y4jtbWc6>n3eul(v zuOjyz)w7D1TTU7&)EQ1za~XkFkZiV7Z2ObCF08}D^HKnl#87yW(c$6e@)zSvuPe{# zws*iC1X`R4Bujd{cgg}ZO1gy!B>^Fa@e{vXJ(1m!@>G;xU5!#PXbI4-$WqzGEK-ONyYB6y z<4RZ-O=%?GS2YF?R}D^ULP2X|ErG7bwZ%C-^Fa6H$7$ly=Ooy2NOoa9@rGo$^f8Zi z%AngPizcs0;Ix{otqNMYhc@$2gAHvx8HthCK_$4pCl~?{k8ExNDpykC1h0aw+dsRK=6A{2@GQHb46Nr*ou5m*ATCFG5SlVUv?>oyn}04bnAKT z6(b|LH+8=yD4ikN)GFBYoSg&r>YHms3`dB{ZMv3p(TFER_I0kNaRGJ(j|a=5cDP+i zzs3&a;WCel{}Fo#VBS9Jnpg!zLQKDe+VxS`F%7CFHV)Z->xNe89+B14ADebK+O&A& z(^MDYjx||-$x(gPZi5a>Aqvkv`qY*ztLUn~7o9S)hdOb89UfD!l~i{sg3BQ;_^&hB4CqS~7{bNOZJ;uOS=Q zSEkmx{{-4`801M0-6HZ}&CN_^=N(LK-^qq7COkv$N6)PS z4gA^!vbo!+ElzD_LNb4RQ-6@Q`FHpy{pLI3-It3*w146dS>LuNT1&0cz(ea7K zyD=Q$H~$Zi*q#}Bqw3Id!ECi&b_;zPmqiuUrcv#sqGEkAKl>vkfUbPtHtz14qVotX zMuAD&o9k$N<(43d%%_VElLOA5Bqim*!r1PDxC8x(M4k~dW zR<_ZK2``rCkp5D8?G@GCfSG?oFMZ>J-|48m&Es0MDxtH7uZ&=7DsH{tM$ZO~TqE>* z;@`R8-<5xSs}8A*8Rx>v4Y_P{uzE&DQ`PRHSZ5wEMDr~(JOSPj&Qm z9MtF>l?YZNmeo$AoMg9Xchs4e{S@r;%d_4LwN{EXL$`AtJF!x`LdBv)&abB=YJ@@$ zoReo}-K(e&`eAD~{$O0iUe_tYT|Jd%piZ+GgmSUbZIzOP0<~G#0rudp1|ymM5Qy1< za)rsYH@=nC`VECVSuQA{D3#17k7kJC-h-_oDyH4 z2iyB!9D#a`k3mVb+v53mtNCX3kX0Sy#89q#G6wRiB;bsCmhO13oXH2e( z`sKaJnjJ24bs;cs|3uqRh*go^GT%E0y1YJn;bh9RGn3a`Z9B9BU1pa93%=it*{xZu zoB=&YPliY)`1w@_C8l+%3Z?{w3Y3_+7n;uuvZ*S-pmK%_#wem5OfMe*EWjI31w|Jx zaMmnXl(x2mhfY8F#KY93_{Y~l__jgFleSe6*D<3I7ZZ8OD)9h@`Y|NM`G}1KTd7+r zJeUOdxkb;EyVtKWDlgX=VsQJLMW!-Qs-L*83i68BdoRJ7>0A^Tsz(LckVr$ag!3z( zHhFl?nD6&tFZ~*eXq;(*3|?T~Ofzb%0ev3Mczq{cg*n>&K7O%$u$!#tJ9V{^wxCJ@ zQ~QK2DKHTe2kn1WC2jANv5-@H6eJcz8C0Z92TX69hi_3)-YWSE_96ZLYws?E{*+xo zHPR$_)PnSK9-`s6S2im8Kqyb*@_k{g2~bK$ln|%<3pEBewVd z2z%?OD5Gz0d_WNiQE3!Vq(NGgM!HjGU`T1{l1@=N9T+;KV}@oJVF;BTO1eY3J0$%* zeDA&Qd*8o)&swbI!kNQ!_SwBZdmr*#HDFead7IFzdWZ=y$N)GH;&vUgyX-&&GEc}L zUjo>7wnQ?I$p}5I`@ByL`uV1+i%5WjL~0xj;;C& zP5t-lB+g1;XsI@Yb@JD9)aA2ptf1mj72459wl4j#vClViS|&Wf*d!((A0TRUce~O$ z55~zB!lXV<&+R%EqN4SRpZXiOTnW8!k^A=)2mpHi`4@mK z|4$@+?c}_daH?;SFhuV9C5Hmm-Xuo3$&kRucBZTca~>HI+9cO8;N`M7qwIg;6f$8h zOI(!c>!tlKLp&S(@6cg*TT?GFvjs+}s;{{;B#IxGuuAsLxS~#o^(h*tEo_S)klPqz z-Ye)z;?Mj|6iMQDDp)fR&q_XX868vZM5~tuPPxc08HdqTH6_U2{kGp!mA17+3a{TH zbQe2Gb)g`xZ{sEDo?<6r5kN_%8q%S?$%-4BSOojgDlX_5^V;{ExnvGlOSh;I@p9__ z2+wLOx|WKtbhXEU*aFhXLwNrT3W7K%y)S^D9$+CivBS zaCdC3h{nAX-v=43v2>eaBc8FGq8e%v>jcvLvi#T@4v(^e1`DCHaFs{Pjl)PLPW&nHP;f!SBoxGiyO z6TSspoy@EjPRp$8;MN4?UCDmT`b>(I)J>_%5G5&kstQ@6RC15=Xvx%^S$VYk-Q~?m zdT#k-;{Q`Y~9>ufOk0kDP@l5c0PuK=Pr&bjR&; z8`@0IAn0ta2=+o+5k?$_OEstxTPm}0J)IMCQ+dqGXS9cy#u4}PcxNE0VKo%<<`Cmtn=wimh%>I~v(Y61{FJ@-VF`FL}qtBtEf}DXh zH*fkxA}v=Fp-z)uTna676GC1|Z6ordB{XbeJXd;3E!HtVOmH;$bFWi3vS zw4~x?B6W#hv7!8;IzGZ~qB+}m6E)V=Pti>CN6H4Zc2f288G92ECs88hw1h4H$uhks zl=?Er(F!zZQ0g)J1FcK!{HiGCL|`5Tx0>F@)FWqQ?^XgL77tIduRq?x8(vaer7?5b zGk8z@eoVM;=>@I6At?~pOiAl z1^1WjT8lk@+^~BG{C9yEYn=}b&^y0jmERmULxOevQ)Tq#`I8TrwuxqoEEn!qnDN$7(ZLHqr=rKQ~C@ zpw+(mE+olSxN&q2#c~_|VpOPaeTn}L$;S-`U?=08Kv{MUh0s+$%P=IMr>e;g%6GIi#@F&zsp<2$195ke+R&aa8;gnt4T#@@`tYW{$>=sA9h zK`7(=o);D+`7yO-QrfBo42X;OcPfc6ajgz!tgt=0hT|0dZDaV2<3Um3MORk+r4;C> z{oMWK*l?x=evvUTYej08b&4r$9)|B~))AJ9r~w^ONaV--u&5A&ie#EG| z{@ZwBpk!!VO=ct-+Aycx6%`E_}0ig z{7cqlf_pwG6wi9+9<#pmVRJjozmY+Dh4^$tSq}rq6Qzqe*~dW|Pgp%R`1|RAuziMk ze(hwMyTF71=!5k5^P2!^NmS@siu0tM!4SpNkNG1T7IAAv{B4n*l^aRTt%cJoUvMJUMeWu*Ubw?d34b|F@kqS60>5bXPc=20WT)MrcMlV#12PHAIbsF{YAzh^f0!5MGfO(coPB9;JXh)-;zZ3r2$zVrRi?Yk_W-!uJS4@fr(tO|bPK3nILX{E%6 zb%V{4ozNA`?BxteEUs*`%%dI9$J9BwKeVL3ZP)8CKa9jiP@2p)D@>)aZ9B#r1BD-@ z!dds%Y`^YH$J3Sad*ji2gK6@b%5?afMntt#DdNcx+Dr6^`RYM;H#1b_v^f!9dakPy z^zyr$Bb4-8m!#Iusx$)36FsHBGTq+BVd4VDIlB%cp2AsB3q*2Kl%Kx(@;CzJJ_~>i zCpQta&P&{zo=RGM@ewK~YlgAXQJ|i>B(LleCjTH^3zQ5F^is54^Cjy~u4a+Rc--Zs zPr!UE*>+W*EAaNr2j)Ooe-mN6BkhCaN%ymJ{em_Aix}4?R@$eH+6%~sLE?R09*<%y z%zA*v@WsB5xH_UJW!RKu=jK@hP$&B@n6;Blb8Er>kOt8($j6%jhutTGr+!Sf%#A^|?sTerkA8c1uJ*S0C6k=lFJexw^e;?vy26?_j@pru{ zjm_3Ys{S6L9e3wC>Duw6%2BrZZ7Xwx%Y9C7IJpmPFUqChQ7$S(ytEIeD7>93s7#KN z@%@gGr&zkjtF<0>di|=FSGvP6-M;q}wGM2AqB-%ugx8$MR3}r=DG%;<2B)R}W3!ft zgkNa^AR2Z*mm3dJa{Bd1t=eGWv+XWO%*U=s_1?a2cvTvY14HEK`_XuO1#plCl8)V+q?7tf&nNR+?t#TW}3%72^s1}L??`uA+Hr!%&%;nC4 z`VW*%7;3^uXz}QZU}(chQ<6V%DsA6B&TaifZGQz|9}?E( z2)SanEh%Qo2$!L~7oVBhBEThfV%ARa5u?$~DhRnUGdzB5v+87z5MNmkog67bh*z}R zk@Q=~&r8-&XrT40i9g|)?VlzI4&C>xOF=k#Zfd>6~LP1xK{Ml42EpJh?772m_v&M;c8(|c`02EqnlG1$7i&4LPFgCcYuO{l8i}_Uw4QLED8@%y6&OZk-gYAziWkM9L zNKlL!=}7h@5ZkC)KY0K5O{l%w@8PUU+J_gSmVe>2?N zqW9m6Me&@UZy0$rw!j;1LX93E^YH}F3#~~Je-VNRCml`ieTDAFdW{4Q0 z=s5}B8@5vjobYkRbJ1(TC4u~gefuL;utMK_E{wJBhvR0P{RPHi0b{$K$9WP0x}&Z* zzMR#0;4vH`?lMH0=2n4Z+fVs4mGXnxXF{s5HcwM5tuMYad00P8G_b_*87ulnUkh*s z_xV`Eoc|o7`}&533uti@`}6d)8ejEfLs5~*{w7$PBQPPkSzNCGKaf2VkCRAB@#IMO zRdHKx(s^X|b##h(gDc_q3w4?0P}4yTmRkuZz5ZmG$cg_U{Gp4|N6&)dsYOu9Pl=TU z9#MJUzi64*uj$*57u@~H_gHvFQ9n1P)|g%f`8AqboWFN>B%%2P3_0KY#Ta0sBmH@_ z^#jU1eZn-at%{<0>8+X(K6?aZYPn{a8IGsZ60${%lPv4%jis?8m0dlO%3_|clmbQx z;C>kqG@q=lVY)x~8X%kz|2=L4@2m^!KKznFhWU)2G1v9gqiWd>?&jp=n;7k7CfkD$ zkyx46tNN^(W8$Vp9aZmLBmD>snUG**%qbR6*+Vu!`#-hq*geaRsB{na*lq9DI)pr=RaeBAZ%{;lfy zDIYHD)wX5A&_>PPc02b9XwV1djS#)OCH*bi<2_NHe551I@Uy?_rQrS5pR$Sv39EI^ z*(V`|0K(XYJ1E<}Di3kX^Y8nBqDwdTj4uGTSehHeF+-Cy?YC(ib`Pjw_7eA^R6cxb zem2w3gUG*|m-}X|sh07NrXx|I*=ri|^oO>(Yyjn%uuy_mCY&8iEn!|PnSn(#v*8(b z+}2CM6d{y2rLN&OZ!C84+#f7)EVPOEI1jL?UB69=mO`ynF~)w=f4(7QbX2m0Y*x59 zGl>5tRJ~cfl|*xZJ`gZnNZKLUP`*w2TEmmX~oT7wFj7 zyPxzkXr9)uV=T7UDgWG%%=4s=qdHM|-vSIg{`$EQ@{wJL1i)zomQjQm>PhvfP{rCzbeDpz~)!lY^8bM>*{wL;__LlKf&Uhh05<@ zt8_?7lM}mAeNWuUb!qt-;ZdHY6HWZuhkuxc>Me8$AAjaA@(BEcU3VjRkhllw<viJtPhvj*4|p6j1*p@=1z@zN+tw1sLWH!(-Tl*$LrAE%glrb%v70)S;ukXLxIVB zB-@%)A7AJy_F2B;vccmh3%$1w9%MHc-m|DDCy%=g#X1RNq14L($%Zc|-X{F& z8*p%WfXd-alvz%!hcIe3n>bWuC0yk-KONb-#l)u*YY`WiQFCoSx6%U_J^BVS&izS} zRZ1)aXL+*CmK8l-Hay}aMJ^_^7RW&$9Y|L(R~e!6v7+|LgXidqjWf5@CN)gI>lq$5 zPNJh8vL_`{4Q; zj%zx%{s{7?4=_^TW%7krb~uJaOmnC^aNQ=#g{(nNHRS~FffH{Y+s1mI8%;TfFSV7f z6;WR)gy+NPHiKsj6B{Xb`d)l+;{9@+FxK_gsveA`X(xHwpj<){vG=(qAkl7sT{#J= z_*(=6lyT@@{2n7S1F*5Jgbb@zfzuq8DL^xZF+dCb&(qN* zB9B*-T^<8Ed;hCSz2p_+9Cy76fYb%(@2(BM$9CgDu|a2gt?Yy{F zgwG*epMV5vQ&_tqkNommzK39ndu(MeZDKRW)RU+umjG6cmQm`16bkWzEQh*O%8;KK zyb^BrR8tAJxY_HcRw4}5158dW4Gk}U{{XwI{8Nd#otKvXwdT1z^V=*AF!5EKW}HT0Qf-f>Tu$aPe8pOXRY>$>Jh6YkBBsL_ySZlHfEBDKg^659lldAq7+5Qz$Plz{)R zycGk8`gD;k&YAa}L)kl}wY3z7N_;ET+M93MyHd#Io}`{=W3t~V;vh!Vw=1PBCctpb zg(}|+kb7WG$8oi-vJWs=YlW!fNjt71vYp|659L!5*0rv>w?E{1aNrsU;zYazfafs^ z&f9wI=l9iTG-AR~<9!h&8_IMawe#ri;!d`pHBUa5Q+A540nycqRnK45nU|h!iq0cv z8EzHl%4oaRA^nG<%@50`lE}`~lMG4rBXU63etWXq_ZFZw?+%R)R2z_3yA#VxOS}+% zDJIy9xumFnfhL^4%0t0KtvMm}@b{`x&2nb5RQIn`uXTo<2N}8$nH}E zn;&uEJ76&W45g|MZs@k3{CMBSD?prCbTVIakh=$O}|0h?$1`M6vyZ~#LZ<54{Vuv1g^cfUh29vKBB8{KS2;yf1|foPWU56Gwr0)x|kU8>caz;`1b<9`r^1;IBh zhMrvr&lk|ia_ovIDy1k&uBkq$(u`xAbp*ilgKxVY$8D?7SSjn{&TuIRR?eO+s6LrBvSt{!I{Ho~nDba2Hstc|E8*r3AyRtOn^}5eT-CrWjVF~NC zE5PE1ei8adUtBZiWeQN4<@w$&QwrvOJDn|?A!D>uZ--1Ed_?YS$CV+^u;b+Xqx+LH z@=Fj_|B{+wtSn17W<KT{+s0#}xLdpK68h5_I_ zFEA2cN#{S_Bjxt`#R?m-roD0TY=sAt%J)nbi0FOUWa6{5`Z_!hreq`x*OAv{Tz(Oh#e#yF1-&W#8 zRLM{zrSqrK+|f8dTUxk86>sc5ei{8HkqvlV;BW=RuLEk*8^3yiEL zCL9AnQH zeB@U=AV-S@rXx(NP%XFtzT9rljSBRB{4EqZ>n@3X z!=2jA?)sGylAUV&#GsDMqu?m}AR|-zA?|uNiKvM3_*0;W#l*>U?bPKl*)A;@7hB&`vdHi@&Y|RJ2dH6P&iqNl{gOa;0~Y~8ENzRi z(j^y)>5MChjF&LCMc-2WH)!^lqZsEsD%Q@f2-IXg+f(ufnO$;PW7?u=JSBEQtl5h| zbhFPz)N>ohzn7X|CXfb_vwIxUy}%_3RLpIFt-MWg0nHc$mkgJK$G|4c&p~YIHkI()fIXc~pu!%3Z_0RXLQ>VU=?;_)L;tDo=;Y8U)*JDS zSR2}Gv2y)#)M#I$Bv2J4rOdFF-}$oQ*JBuAv5Dz!8W?5!X;AhklDFNA#^Gz^#Z!56 zHYU9+cjmZosfZl5c@wep4xq9s+#GE^1a!Svl6@26bIcylFs>+O$^k+vLli21Z5o&_ z3HK}hHsuy=B>?o9^-hSj)dsJ8T?gn$Zn0o-5U>N-NkmQvg ztkXgz8zY{)iR%;b7oMPF0A-4Y*lbD+qa>|TKS6j1^62| zUtSOp5NMFd0uXVUZe%haugRk3x<%72U20j)Rv*zs3=Vg`lgr$t&qD>dApkU=>4Je zmq|ErEJqXv6k%(=n$H2WCLeDebz8>@A2aoK*ghrWWUYp9CQan6)ow(6q$I`AqMr2h z-f$zo6uuk~sGmIsCnJ%+s$E_B2gMh-$}ISWP0d!&U0B&FxOwR4wJP2q(v?Hbq40&O zlnEmyzvCw02Dm7o5+#M*+w)zWunRH)55pn7kXpXW>!)sq?>}WH7zc$Fm6#XrxE*@D zmx)^0@4JQ=M2TN<`6rd>9P)9#5FR}CV1#ko^b)vblTCXL*#^ATnHiIDk4mcMAC878&Vb>QQf!$iW^FrvVsclaS+1&2MU-(^GUT#QLEj( z$1^bzM~BA*I9B=pxHx--V~(R(dRd|-gbpuFeIFo3>w0zM1W`dh%*^8997~YXuaj%~ zsgi{Cj|?oRR94bnz8mX7RCauN@!Sobl6s>50lf5-pQ+tKm%*yH`?I`sFLr`W{Ra+6 zF4lnp1y69Xl84QoU92D0z)IU>zHZ1sd7}^gE7({$Wv)Fgd1A@9oD`4 zB_ee`SD;%diRXTbxYa~Ij+%ogdsSD`0#~GcH|rS7BB3!@orl7U-R`J6UotP z$xUq2>4E$=VMc~m9(I(iz1$MGOtzQy15@+y@t8M0m4VPxOIZ;8>J3JhaqML1%a3iP&We12eR6I}BCr=6E}u5BVmSwaLA(UqaVv_M^YIcb|g zkqptXA^XyIy04jzs0T2pO@tJlq>@lVMtE6?*UvA7t5h7NQ$_M)==!|#@*PI1DZ{W{JbGPYVa1V%jc8s`9r(7A+450N! z+v%CxiF`>1?axod396C?CVSC81X^Zu8*+<_H}}>mgtc_hrK%o;hu-Reel!bP6`irW z?EBQrZZ7FupSVEdAH;8lI5=6B*4*d4Si@``{xw%uwIrznbCuAqi3|+#%2dTH z*W;|-gXj0vVuZ9TYU)9tN_^kDjS%Bj)djniwt4y$ZlXMGMXOlCy0R=L#W4+*GSw#& z=D8hMJSWa|qV@xpoZ=m0e+#gt`G zQe0py0${sWxwd6yo{tBVIT<)9SfUW&`6?a1h~Gukx_`= zt`=$XS7c+Qka zvW&I{d3BSn_WETBSVAd@Js6m0lRC|;M1yGk%?(_O*oaaZXX$*YphI2hdT1x8=_yMX_QpU943@w62DD(ne;_{JbEZK{Dz(@Zgw1K;5kYUb+n zuTI3+9cJLOWooYsR6MFqn`(Vb2eF=0L(39WvN_S<*f|3GVt1Fd!|SK8*-eq|nX0B3 zPrNdY1y7ftiWX_oMM5!2Qv*J`g5}fjQj{vQb0||A1qQsXJ+;=RObUm4c)PvONceyc z>iP;_DlIsY<$2aa&=C@2C$g%i6#QXnC9i~1U&{cRVv6E-HsE>IJ$I zrv+*4+iKOx-aESN#h&%p4u!$%8-A5LW@RyQny7@dP-|W6r8UyT+6sSYhL@iweb?^J zta$aYzChzU0?Tt zQA&ZhMj5zka6YZlj6S*c;OfKXvaGx5O~oI415C4Irx909Qk|LSi!M@uRQ8A;y{Bh& zX@54EFH`2Ih{*wY*=iJ7AiG0w!HcC^dw__@i*YqqK`bT!Yon|KgL*;v_rAHsR^cl z6?lTLXTVRK+6DqmB#Xn|AQRaIr*W}bd{V%{4DXoP@8m@$>@UmqWCN&d)xyB?$$`k! zs3M53Wsx=Nssf%6I864CMZIxHtLlfX3WGxBVH4ni*!g5n+L;LFZhmEiE5Ymg8@4_Vw$2PqRNsEw9Y+(61Gq$oDc;KBa>D{&q^K)uRPau-;i{xx0ohD=3h>gj=tEbw>C68JO=Y zGM@9Bt~&jqCq77XS+8DGe};1J2QA_t3VzBAnUYlE_y1&29pLyr;3o_f;sQaqryKpY z4uKEo0Y3gdp(Eegm&bynn#T0>sY|;s#T#p zJVxQ@(@ zU&XSevqL2xqo#g!E89I%Nctyu+3XNu5#o*OPyu#hqvp(Z7$&PQtn8r3%Cd~km-nrE z&%q`zO`@S*OOCH+!@LoF5Ju6T5>5d6-XJF4v%c7VSEb@{mi^6(BYJ(6=h?KAp+i`| z>`}KnoCG)!z|N$$RyBMZ&_Y6D1dp=*+11!`X?0?6>Mqb6A9{|Dd$WM+?KG|7JHG4D zMCwtch6$D#3|bvy{M}>R>_1xH35tmc96#)x;g0x14H%QUC@Qji{uL(Z!;wB@(0z@D ze89mb=}~>dDYgOIAQSODw&jXN9^birUBk@w?X~GK`Vm_q)*4#y&G@O`@QPQ3s>C-7 z5x~28e1j@3#|x`&=RGZsWu<7;*7h1=jw#mfVUODYfqsY~BKTj&s#?75rpiiOk6$J- zz?0`hwyjxB(*TW<)*DnW^h-yyqgTNWNXT>k%PbSX6CLs>d-h7R1M zn9J;_wfM>*g`c%Ozv59sv(Zp@Thx>XL(=VblL|8z$rtuoqrd2w(vPY#{-Rr80vn^^@jZ)Njwj@ojeaoz}1 znZ?!fBccv_`vpWweFig%huirdBl9_2y7>WSl%LK&2AcuzuE_vsTbP4}G;-yX`HmkV zMO~3CmMZQ3P+!%Mn?8Tbd>KTzihp-e|7KiSE*%aC0^}p9l_X=r0PT_|xre3uP(Sty zV~ZyU^sTZlLTu+_lylJ4eL$65%sdB9t|6)7gKg{(JvhIt_m9wYZ~>ezRvcdd(;`E< z_%d5Et#muUGDi{6S?`Xb0R^1TKSz?o!RCRbbV>yLzKyMEe#2#1-&>uav>UY}#qJi!6ldLa#MTl1`UF~lG8<>9cpsw?K zS?@TWSK{jy#V>9q#rIiyCa`$c7+>NAI(Pgz3cYzbtJl3;WatMKMB6Q~1 z3}clcV*kauOsUyXRs4Z__M=hp;$ciZ7Ud+vW0+tjnjIL^AU^>JB$NS%2v`(~;%|)_ z5&p~XwXLw{M5x*KB41XgEi)c8$?m>@aYNNLLC9)wXH98Kc1y{;Dyb##Cxc+^wojdE ziYY^`s`*WUR2g|bo19Uw<)6y_v^ar_x-JJf9-4!t zT2GyJf>Eo(-klmTZPk9?wUvn=5*GvRJUU@LCj!=XcJ0;56{-Z5Z2*h8z5Sm1wwr&$ zfT`dbQ;RDm7GseTq;2z$h2|azlz8_s%jea_6ws$%YthV>XH0E2mfON%4)gXE_8>NO zdx93#8ysN8{up6pa(u|@mL4@e!c{d%Nlj@GgL;B!z$k<=QK#?c%Z9KTX#+w8c`iMrQoL`i2w*%HC{(%*+Fw1%>%tvrUPxol09XPwB>$anSxV;9Bit$=X@d>3a(^ zw2xbPP49z1NLN|nJ0NEXh%1pEb=dauo6xUJR3?)DdM0Q8tn8E#QgC#Q_Vty2z5@dB zG72)wtqo-B`Wf!x@bVFZKy1q4rT|B{+%5r%%)9E~)#mQz2R873p0`R%o9nk*`oB*| z2ykcA|9XTTz-~|I|2zw?6qEkLZVjzH(<%}aSIevN5)XhQ zYD`Sjk;{(L_A~S=k7Eo4)|Vsr&kkb$YX^z*)bdznXh%_SwwYNjJTdXZ_)p+NDO#n8 z04nA03eCc!R)9Hjzfc3OLaxxp3kbas)`^&LWhk_}uMfU(`)e8{r3e7|_FvQ>H3Q^U z6CgA2wMDiTzUV}iYJg<__t6}R2W~F>Ur&+L|NDWAd;c#jou4rE{I8FLe%So~Ef9*y z!w-TX0-8cyc~;8(FK=9qHMdL81-U55p5~c%#u|?9{7pO?6dbQjWP6 zRxE>2FV!iDB%z%ta^LzlQqe=19R4Gx@4-)>)eS1a^Wlf6fYv8rF?sfr)oHxDcdo0< zpb;4AQOtOm?a=oU`hepbd&#)d-_1s>D5BjDci;03uDPoXG)KlpjAe;qXfM!sL9pEK z?Yyv&d>4VHv;IzHHMP-iVY?&e>ql1e&F@}FRE7S%3yKU~q-_1TIUy(Ba0Y3;7vsLz z=Hf8ZXzaGu*KFI#sbDv`s2}5dBp&0tHA)J9xwNrSGH&9plI?#`6##A6FSY-ycL$_g zw@BWqn*CaVf)j#uy&N*3vcaw;5MIp@4R;k8Cl(bI-OM$#56V;_tklxC4ssY@`L!{M z(0gp(e*G>eLnB8w30lADnJF5m-b`^dlCLDtbb%G>JKym2f8(Yf+POnAS#k&eti(RG z+IFrHPEY-}QmiL&@R-KL+Pg1|o|fay8?+dDXJw`AZ0=l7@Q&Sv^E$(Iy+=WsV7+`R zeW0?t?f7U>w!a1X45~L5=vmFQQBNhW2+PfVLE*9Z{RQKIh^t;hmANjQ%K*X&x!(Ql zV)y{@&oC#JPGqaWuA+qlXHDGmfsS`J;Z&uIL()PIB~s9COxh&iBw`l*0MwPrXilAe zx+BWIX_)WF`M2avxdni)MCzN2dS@2hdG(az?r14wT(D1nJ>9A0BOp?aC)u3G>+z-y8-?i=*h@xA9MGE1 zdN;pHpb^slau{;hfWd6FuXV2Pitdfa(ixycEs74l(}~vE_G`>P{de;>k7uMtA^Rnc z`Y_8ln^19+bR4brtjLMm?5TMiiSGNDv?u?@?54ikWV&2dc4lku8KZ4Cs@p|emn+5R z3R`JK{N4_<{B3D`<^`X1mZ|*JI+FUBO=cD_6T7dWn- z3W@GIt}vGQDMhE4#QuDd%ni^3 z-@M!`h==#;L~Ff!?#0kac6@jE{X^m7c;?y0mQ6N02geY-Q&pFd*)bQuxx%=LjfJ{)rZ4j1Z&FuYk2q_-IV88Vc(Vj6IOA^Q=o|bk6AD_ z`H0Wa&P^uXabs8`=7}I!R`z?H<8blh^i0^UgTimC_aci>+MZ%a0eH??&?sG~kE(zhxfICsVY9wtLOu7p7w$=>SfIErZ8Y9uoWMjT`xX#`<{MOxJOx@wugGxem6s z%b`!2jOau?AZ2OpAFuvumCaYmyR8j;Wh%(e|FAdr2xaeWbo%Udf**vMJoy20Kh zG-x{ub59v&^-t&>jGT_F%p{o$g7L`wc%|H;3VQp8qnEm)P^*>YmUu^$SH-|`B^nh5 z;GE!!{%lEWu@2*dZU=uS-J)0{of<2L5PIQ!)E30<0CwKtLD^?gE~m1(u~m|-1mggg zBAtr*$I!rm47gcu{gjgVxW#qn_;B6&`1j|yQoxK(RkMyK_zyFk}+w#80o_C+&v| zVwN-QchecGIG5$xhx|l@wiTG&?F8NA=vFnJ{|Wa_nOS~C6+g^3?UA{bX2k`F zOTPHO?V+=p!mZxaK;~;I9$7g#4nX^;U7iwMSAR6k>OAc&w{ve&|L3dx`0<|(hp1Ja z@!$|igmwWIkm)*+>pk`|{p+w*p1<8*s(|Ke*qg&2ZS>}BZ}0f9w0e>z=o?B(Miw)! z*I-}uWW=fX-pSLAh(DhN49CARJ$(2uhWZ9sZZjsmf`yO&`Tbe{um`uy2(7Puv+6xH z^C-XB{{rE%R2Ug8hL~j#JsBNg?+}uf_SpRN>EZ1gc-Qd{@R&UYIXjle(*LM;(-LvN z$j8OSb@MkA-g8w}SAR_s2^*=Bu@)?zR}`>PzuwCYS2-Q1*fde2r>CF1r&Z-rb+J0l zgK`^xt)#s(c zbnee1?zGETQEi+icqwYs{8C(S(Hl@IP}oPc8cLt{3$1{&phL#mgvi9yZOmbwX@;G? zvBFe59dXL%&$6s_`Zq_6#?;r_tmn0!Y*n}43ilI1d*p%nd_tIljX5&U#79P{Tj}De zrk*=k zYmMtYZ};`zP`k0RA>^ch4%+dKzt{oM()H=iu!#)4w{%2A#Kq>820SAJ`Sam}zJ2s; z;QQ=Sa9KH_1VuI7XPQr+P)te;z8;p}#VrQP?VXO;_ZYJeC2&DP@}}*)uj`g}0mE18 zedGfuuHtu{+)#RJVTpQI+D5O9+~zb9IoR<;o<^x%@^1Y@UNTQlPjL~8l(BMyGRbkI zrn4r#`2G*Zj_ZF<4>BBgMBm~++#LsuNl9s-VAIv2o%{=DV;5M{BAJqp*S`5wOJIe> zwSDf)?92(a_zhrC@0bUYWE5y_`gG~ch_v<}Uqr6bHC|`hyd%BVr;kuA?J8?=Y4LbQATL@tvT@Zr+71dv*i4~KZjJBeXiWwE zvkMc}5OH4_CZ)a#o7A;=mX>fi(2siI+UjVvkH_BN{e!+!2|xrk2lLQVc%CCu?ueMT z9iyY*hZ4N9_z!o+(@==$JHG0TJ@90v_V)G_;jCu#v97i@%s60$MovCUZL59zg?n>V zdh_2+GNVSX11$B-E5~2d%&e?-fZe?KgE_Yay!o;lw~zyU1ms^%cEF7kEHN#u6d(Tq zALP?n<2fal$qhoHiBz%75M`L{cZKpf*+!_Pk0WF-PgmA-we}j{jCD0X5Oq-nykE%1 zP{FO=$ZZ&&oKM@cvv$_>C6s2_4!8N<>U?Ut-u3xTQpsWM~^nVdx=1l6> zQQ8X9HBL})nZCP{jV0l*8*mVb0}?}H4uMAJ25p!_+Uxl2c$%cPAK#8n9|E1N{!iwfBGaY1!VSQwp zQGL)d@Tv>nNc2yqNf}9|P8DoN$x=QJHQX$Z1%mk0sPniLGP=R^Z|FksZqj?^YsJ1&R&zGI;(mp z4LZhem7u+&o#O(_S7AG93-HAh@pKEa=gBqJEwY8*Q@g~Bd3Gi*4x4=TH9g%Rt8;!P zc7F1pVzDAIksR=3*)g$z|mLK&f-%GSgaWt(UFsv z@A{{F7%lz?kXgc9-Zj&5%;`NY==T?`4m{0QwWs+8wHdR9Z&%Gd)}A`SvQ6ZgE^ma& zpy^}u>|$5Zp%0&yjE$rCIHo(=+c!~fjQNc_hld5m&3&y_8hg3{2ip4AzdqM_z>LX* z8wBliOH4m^G}QY#0rC03cM`$HjU0K>-YS577LUw=_|DD|u* zA;QSpsMX-BV$#7u$+wB`Da2@T-hER>nb(4XU{MdpQ7RLEvzhs`q?=NGRGNg>L7ATe z<0=+sa=J%!p!0R{yk<^L7&(pi*msBF!jsLJ4HiM08P#b|dpOgz?`k3tP%sz3DPyDJ zz~`zo1se~G2l;Hw_fJlS+oPz!0+mU&_r|m>MCz_HcwPHDO*&YooRD~*OtJ0SK~bgoan+BloaD=DVS9S#8H6f^V%CeerdD&vT6T{GwovKY>KXW_E3VTfM2fw$$_*_s8^nIv;~EQ%F1?jye>ncH1toOrbAYL_w;a%SCPxA zs@7KV4B3sBCopY{TSf`Y7|#_2W33Z@J^-kltCT9yugs4C_% zE{twCzk5#w22@a%X;>!(B&tr{CJ!xs6R=iC_@F>aZLDpqAWeP>2yp_SIZ`G45YMaz zG;hsUp<1QS**0RlBYK>y6~AY6RM4bTNa^BCS-V)x&=7es>k;2lp%CSa73Qx;vlRit=8SVc(T=O%umT>q@4W@!T|PsMH6Mal!EQ&7Kr-V{Z}W4qfp z*>IL6b?ZD!pKgy-&j<49i?2}J+PulOj&>Rs_WyQfN2Yaa=)WoH3MfXoH7rJFxNw}hoBykpwLqi1G2q)`jZ=xjqxz8&q-#KJ_>ibd*QqJzUz?(7whc|x1Xrb(lC z#eso}+*==LK8dH#`SKZO(*MWYTSjHscG02_C@KOXA`PN+r*x@wOE*Y&cPmJjbc3{j zba#VvOM`TG_r7l5@B8-t&K`T5amM+xpD}pHJMf9?zOJ?AnscsoKN!zl@c|$6=k4P= zMyl^3vQwq{_JbM^PS1b$PuE#WH+DK7$xpYp1MhDrofHT4O20Q|d=3~U1IaTL=B}#a zk(n!*$YG*=2CE-E3NcpQ-c#bx zgz}iTw{MPTH{W)#)kmFB>3LUMXs${b=A}oyv@@akcX~2doT557$*rL`=I|N&w725` zAaM4!XKNBMX>O)2swyr>u9rXQ$fm2St22@GX7YvBRj+)kV-G7KAzihr@7GGwE4pZR zP-oL~;rl0aqh97e3dpZ!=a*I3WOQ_7 zU4eJ390Le?wH(LxDdGXKrQB?Fu|3<9Df3y>`!j8A#J%o&7C5@$eWUETxw$U3>c216 zehm-VSyGX9QA5E>KIZ_nvx$)iX#4>lc_UP`UlbQ0Gy;{6GS9&&wXcC`fb~?&}m!GnF6o47+q3c;)T}=fZ zGY?af)JK2%9RjJbAbK?-DH(bTXH$0>Iileo z!d42Z+ZiHWj#V}cq@YY}TQ(*wdLklXjnVzc=;ryR;L!D+jg1YV!Z^p_iDq*+!fZDH&PM!~}h>du8RMtg35{shUR7sM55MFQ`)O+LhRE zn5bk63kz|kb=I;}D_{tZ?T2*@HK0!^@HDYcHdr!;zTo$f!K?%C%b z?l(89Mr8it&b!8ZSn>qM&ej{PA9GXsF`3hT9GG&$mAjtQR8+ET>I;gEjUEYHmZ7q; zOp1b=bNNMH-dsO)I>bQVApRy_ejI$3ejPkJd7ALE?Nc_b0V}nc%H8qpHMNtL_9`0* z_{*bh>GALHi9>LB6d~QgcmMSV#1Ut*lNAf@+I~y=0X&*DFJ^Q zNlI-Td|{?iay5uid%pMhM$>f4TLj z3&Nw{^0~5EAo}mj2t5z}@M<)=sQ?t{lTEn*?o?z^QL3B!t)1Ee9AvBXtFelSZO6`4 zh@&>}Xs56B-^)atTnwR2SGWpSs`O7;1Ni~*nAP4`mg}zgKu)Y&McTsA_ybQ2EUX;R z*x8+K;D2y6mo<{bQ(-=%Ra+x$SyA&wxhs@lxbgOW!*6O7BA6$grk2}-EYxG*b1IDH z#az#BCW6Yf+d2co!sG;MJ>nGwy97+zIx7GeN4BhoeELI^KPa|uurz(MsEI9K*Ka^Yl~E=9kiA-#&3-p6Pa)s52a6Cd z?9$NEU$x3219%VPp!YMroV`PCVq?R5RUbpA_LH~4B3fPDZsnKu>Aqu5p8VNF5@Azp zlw*?K;Kj<-Z*!K5v@}jv)(=&v)oLJ1uG}w+BK)NxbA5x7jry|As(xg(n3Y$ny48=h&a}je zPsC+`KOI3Tg4|rlIIF3vgNPf{<%7}^s0`JLtzW?S4AnPzuZ{4Z)RfQ1{0=!WRx@87 zT$g*aCAe8?$@*?{16msk452xP?OniQUrILc;~3f-I` z>ty<9LI}#el0j1A(KQs@lV)dNB^vc7^9gHfcd;1#AwD`f8W|#gw0g}s(uT^D4Fsor zf$HaHguFlKlItL8;m40E*5jJeeG`d$bbwylXpbj4a(TRdav2+0u@Z}*@@fIZm#8do3<$XG z-pTP^Q?xz&;L+aEVNj#2*Gn-+P|DoJ8-93b?Xp(0SPLwQv9#NeE$3rePA8)#$V>{t zyq`$e1P!FW2HcdP=5L@}qJaz+3|iOU3^N)>1y-rf2QNUHHfjuox`Av2jt{d}6v%{z zyUt$R(<3|xl|+qs7#G|o2pMqHh3 z78Po^P=M}+L4S(&LyCBfT07Cl&-O-73ug2v_pj&9N8jpPlu>C+Lno3BoJp2xrrp^J z4a-ox0^Ww}&EYp~kcAK8A`6vlttoZ`O4Y^PiPM$haNM&Cxrh129 zja|`NZ{BcWC}(8vHD1H*PL)xM|C#%z!}SQM9+ey0hWt{=B#Y;^y$gq{By}m zGhprWz7Dt$AcE>*HdKn=I^1B-xt>2T8PBVrQp!t5ClA}MTJRi>(89z%#=Yd|)UO0q z7Ucdw@eEwv^i#awMyIKs8=X=b%-nkt7dYmV)ljEYm9b>_3djK6!|ptEVj2Ghaxg}XYPN`?1%JMWZ442vao)v{+c}3CKu?^+qs6XCKlTspZ@v& zruG=vf0oVt98y#mSp5mC6I_<=nG*|c9D^xY2LBpjcVP_~|2B=f)E>1C!7dtG3EO#;QZW=Kw9zDW5h(bNK z%L|aF!o)fG6~0?lgj@sxYiPdqP2zrTc}`A>tM=D9twhj>4ir3(xDO8To+e93G&^*vv zKVKEMpCCBTy*GE*j@sm*+rRJT2zZd|xLn@KyIH{Ja`>wvhJ3*o*Gs&pdjuG)1MR;ja9AY@eHbwR*9n*JBLAO9fBt{Z z4*lQ%25oc0|C$^6%pr-(Hk!fjo2%GA9T*Zae&fSuyqI1CnkaznZL3u&)Yz=r9sGf7 zIG$Hd<|15eM*HWL?60E#HFFi1OYRpo20T67s|)aFc6sWY5c9ZyPCPJeWkt`O`Br>+0l3|=3il>Z*PH~R0-f%K zg%(f8dqTSYJqz&6q^QA_|EYV@Kd%);`OjLE`nm{5*euE81K8aO?Z8Ra#0=PkW}|!P~Qb$J6!$Kj6N^t=fYQ zc&?5hA1MjR%e4dXawh$rV!qw4?#5a>($_kk(^%J|9(3v=R(aUp;wC&MW6wJ7%IVK? zZj0%6gKkxBqHgr!OZ6{4s6$5of=9ij9z{gdY7d65+;6%ct1|36BK&?LzY*T1qPhRUt-dFQ zHlqy9kl?^VEL$v^y5zrCif({*JdJui(m0t4EtBvp%e{mzh)_uwK+)d^g3`c@n(l&= z@U9nE517LO(<2Z+-<`V-BgvKCpE&`p=<4b!t>=B7i_N4+N2kDcdpEjs{XZjv@46M2 zsQ)$L7A25#&yxj-OJZ-ri660=P)&q5E#F`wsp!?w_Tby2-1XC2$K#c~wd_dpwv#P9 zdP>H_2yfwMf0yz{`QO8GFXf=A4Ge0}!L!EdYHTpKlKo8>8bgH!QHLQs`k#;g_tC=t z`{Ch#n~prsKTP)~bdv`>Pzi{M3e)pv4d*RRARoHgz`lxRXuZa3e<{p6H0P9>LFED*@}I{$#?DhP75PzpmanX8iTFjJz>dy&4--_S)(7TJKu^HsE-OdA(G3B` zSuf7|p5ObGLI~~`m$fPFgriFmp!q~@EgIw^<0m`zx|zTv-3X{sV9rK}fWt-^1X8@lg!3aNoJbEaWT_(;vNS@^=HXx3Hf*lyAtKjKrN4=rS;G@i;%p|J}MM zvr1@@FhPwuDY$8t`_CDE+QqJ=ePB$&bg!PHv&nnLVF-*2MPvd&{}K-`qZ#*00+VwB zZ}LqAXvC)z3-gnT+HbQC&AYn>uL}taQ*(2xqu^y~sOPHXfbQ(P_H2{wgu{5Dh8l2F z&OGpxB%1;>7DoX#nrQPbval=yD_YPfcxYu26PS8gsJ+7 zr4E+gx_Gr#0hR@r;}qgOO(&D|kOtU7Dhir7GP$y^T-UJ%mD$5 z+tJn0J5#6bJ8=kF?m$t42FpgP&qHWN7Fh2QnMbSc(1dGp7;o$@_Ez~b`CR3$sMjY2 zN}8I{y+34E#Ue7LbXCP7q%1h{zp12A5z}h~N-27OujO=P?|h@%N6)9lG+LlTsQM3t z6YdC`f+8^FbKwLcmh6)4a5TO_W{Z_mL{|0DMJK)E7Pv^!kt4*!F?acIn8E6#;549r z1_!h9>&@R%`jG@28I*#h+oq3D&2xuX$Q7#OK4+G16;}2X;Wf<~Pnd&US`ohTBC<9_ zA4~V-2VqCNFQ)&pb}cir9HBDJwCyE#dDh%=hF3M1j33WO*oc-{e!DwEUk>C<=dN&= z&(_guZ><~0^0J1;=+oJxyYtM1w-(u8AS)(H(U|P@cVFPNh(-*2O;v=9#g)VuFy`iT zfuk{SHj!6Z>}G^LqCd^H{zS*om@!GX3^WyNJ<;)CCv*4{upat7KG-W|DYJ zc)zDTvv^8-a)>cDO;wYsvYlSn3bYtbPw1R5n{MbkmmM;7($}Ly%q@MX-d8qGMSmfa-tGTH4*_D0%!{&J)4di<`V+vY5)Wu@tPOGd35Jf#tE_9mBKCG#dqe8EMD zg5&KucpS==_)Xfyn-ciJ<#j)j10jAe0(|S5;V=!WPYSZ#OOY| z{Hrt&+*I-Qe<#8Eanv|Be=@d4vg&$bD8a0i<3c?~8=uS{1Qr)i7crQ=d@b^)X zxPW<&m$H0|$67e<8)9l(@7dXB3e^A^0aubCZ7$!0VQdtO91^^en zq5(kt*W{HW0g6kJAl`ND)z&8a6ivbf!1(;rweCgY3eS7Zkko^`Ld!WtXhI4yLxn@} z6}z4#kJs3;xpV5Lw-UDkMIDZN^ov3AZsmb6Djp1$v$RkxyLZlZ$gD~kKMuw42lPz`8~6B&jf` z&&;w=kZjTn!POsGE*X+cO-4XF69~_IC@JMI$n|~g0d7xIKq`RPbSU9K?HDt+`&Lgi zn(_kNzG~&@II-Yq3C)d9w%s5Qz1Uk)^GA4OyaVo)0oFF8-w>t}@GbNozQ@0Y!; zMZxDb0o#&QfnS{bz!8rildscq1Z-ONK#=+RcBM%%fU6aaZq;b8NuRwo*h+#_Pb_~h zUk_kzI4`;qlZqOi2~j~+REf%!#W$7~#4M#r`m>Ha)$P{04ILyCq}CWpebXkWZp1M0 zSC0RdPXf5$(+#d6_kxVP60z=28g0l$&gJLwr>@|`g0cPM9)lk`@nJ%(*t&yNf>%K-DtlA+{Pu{0WBQpt4cwOTzGP+F!&0qC6|& z(Y77~TgOqhN-tt;9c;eGR}~{352~zwI}cd;xX!x=$?;mndU(`gZu;>-GO;B3C_+V# z)KuOu)25oKEX%6CPOM=wt{Q$H$d&l61KcgU?_lpaL2pj^~!_Rl3QO%PG~wHwE$s0FlYs`2zc=ui!?iB5-g6Hi@3gVgLik zz-xm8k7cI?iCh(W=zpF8ao&6h(YBJ;4j>Bh00jVhj9Ja;ekE9IZ8*w#@RDSzGw+3# z3fbZ;DS$@NIbnpUEcv)y}TWDdK8grxmZ!^@CaxoZ7oPM zK@vnE;><9I(Kx?gLUOVok-SnSt0$TytN)TPDb?Z4#!5`t(|C+Lpklg-v&ws}U2(#C zLo6GPBBHi+>Hn#<`IZ3pUI(4jV72h8;qFHDfov^S^SQpEI_zeWJ&RT?^sP;XFF))} zc5rO~LH|Ru3pZlKB?R;LH8|9Pf`xWEsGxZHRnm;nSm3Go#6x(+8V%l*`u`}!T0~-U zHEG`!1>@!zFL3_Jry=lSdZm?YYN* zP!bDf*R(|txAk7rIo_KJDdQgxJJzQXF4h9fPr5E7Iq_OJn86)SKTmd}0wZ56_~uqa zi`=sRnFgR$MAjEc;ZibgldS})8_e*2e+Q}`W6!&503ABvtp?(8n}Al_k~zF71O^Y@ zYJCICS%~8pU3CUf3NP`;-fPsI49z#(i$s7~rQvF}h0!oR&muqo91e`&U{^Er<@?)$S4O9ez;BEt13yfJ?J9B<>L z+4RMqC0N($QnjH+6I>2Ml#bDp@-^A%YUHfo(GjE+8&lKQw0|Thp3f}4ps;4B;F?6) zg_~gzOKztmk@`m3D>`&0HKIO1sIx0lswe(?eq?^+{FvKWXS=kboJ8ET=(O*eVHBI13x;PAuMxO(aSt#MnsgJG^9TFv~2F}(kv%8|E-gwc3ogHS{7=s zSx;2%#cx{2Pev4_gn_Y|Ui!T~2hHC3)+@izX3xDJm+VJ+)L%_^B3DHD`}>zlN+I;d z?PfE|VQpzR%F)LZ(0y$)Ct&o)99}V~LnJuWcygsrqoxsuF_;LRKGJ5!wrSz0DsE2R z=YS5L+-D~Ter;#it7fSKemd*-O1cO;b;J0Q)-ch~&{iuCE}go`!)ix4f7|yMC=YIg zMbg^dMNep=4pyRC=jp4quPUi~VEiufd^F>Dg91C~E0bQ-YIGp*)(7v)M5CDGC9^7l zLlzz`NWJMI=O6JgEbgfG1^cXGbzt&_#g>ADmpCturu4hfbx8q%DB9Ve!Im~Y zPkcO0CzK}FD`$1v(+Qn<=rgvs$W(j7p>H~xTS{pkjrQ-+gDfZN%P9( zqzwjByv3VrI6O`hg@x7E6ZSti4qo@{Ys~Fy&(}N0F=&6_OcW6i5RfUQlo1|y*lWjj z+PYY2Qu!-lfKAZpd2s$DV%gl+k%SNBV-BH4-cIuY$gKhX{x(y2GkJ&SP3LJ_zqd>7 zE)5SYCl-P)kFkQQ=L)KRwx+d(FH|ubPFpLTns84PW3nyS@PxkN@#RvSD^Mp);k=mN zUkC`It923}uxKcrJbJy!pzQn4C5aTNcSym~cJ>ch(;nUi6cCEsX)C9D>9t{yT6d)wgDl)3VD57fHIYqws+1|I;nN#18Js)2SMP;X{HcYg{;o4Wj>ojn<`N?| zIxXW^+Q<}j9LaN~Bz1A~%Dz}R`wcd?(T}kf5VNwb4idE>_Mk_Ww{8iF`)-|#yaLS6 zmF#NTF|0adg4us`j=76FW+hTv6SrowsyzGoT*ypGrVbi-f?>yv$7WXS@??pwQ`zb% z+5`cg2Xb2*Q??`*%1H4;SRCu_+az{t_-j<;4sOkqDdg?olIz$PZH@9+*K!&=(x=7r zqczsxamx8xrujFwq{Cnc2Td z$XDyC->t<+{5bW|_q&#mj6zG?`5Pjl9#Y}3R~?qAxk?_X%!YIWR!IS<_v;Tb< zmn);komPfZbq|+ppQoHotg8Aq-JA~g>_2{n-`v~`!=zRJdVf8y^(SG!H6!fyLrr7; zHP5W=go#8P101@ua)??S|=sH-fm~ zW~G}=Y2nqD4UKZ~^CFAcbajUog!^FUW6VDw-F+QD5gx@R`MIa=Ai!R(q=tQgvz&E~ z(iZ*j$>_Oul+&(D=VCRR?c2d6zEP*C?3 z@kXq6NwRv^bCq4ewF_{OS@z3?ijD5;1!zt88+U%ir}X<2Do%-PjsDwB9_UvNyY1n` z!UvWUp6K*V({TaWFVN724aH2K4$IEOZyNj{%kk1LCnBmlrAwTz25F&s;dH(rb4agG zL3Q)OqCV{@?*fdq;f`s5dznW4D!RAWW?YhSpB#ZynI=a_-C&^XHTKQe$6W8hhTlGS z1=HEm5E<57tcHmEy&-WVqJff{c8&o)${NO!nHQk3Q&dwe2xhV%uiYuJO-y+Q$A~l>FoCa1e+h?$4@N76mf|m=|1m z-~h@;{eCHwl=1kMsXpbi#o&zG;Xdjt*HWn=-tVO;-wGTf6tDxUUIFnI?^qec4C1W- z&Ll)d`SplS7e#erEjn20 zw)XjZMjE}$?l&mlvh1|G1_Q!#$uJ;9j;9PbI7y!Gv&DNrk+T;k2pF9call)l2C zOeP$^OSVE!SjS6+h9*NddETvgb2OZ~oQ-GM(b>s%I9KQS`QBqa%qiE}{lob}T@fM3 zeEy58K)k~h%ao2NO1Y2cy{Qxvmd@LfW>ZIxXFV5ixlfk;4%Z#I?4mi@<29_}$wp+D zKm7ywJ=&78lzcoGBmVZD4?(G2(4fyf&f>|b~c_;(wZ!esq8n{N~7??BiD z<~?PadkGdhH+l3s5&A)#JM+Ps2&vvl4t1y>9e#Dr>mo&vv_Z$-U{q+)Q9ABZONS!} zqDMer+NQcd>@|<~Q;JA@3u>@C3Or=Yzn+8Zy3X z@eYf0O(i>egz~omN+Bv(S~yzt@0XqZ!RuCPGOBV<%H|#_m7w>J7fgcEg%mT;Q`_f= z93vWzh86p;n|$4kfC;RDC(n$6|ImGNhQPSJnUd5P$j^Xty{nDhr$XyyaW%0pS@(rx{aOz?K zyw^PqYfR#xkU^cO=yhrUO6uY&q7FjOKT-Lm2NxY2#2zl~lbW0oHX@kD!yY{~D~Y#{ zdNY5OOg`FY^~_FWv>;NT1RJ#(gwNi$ZvdRAQfkCs(IsX9B)!8Jz9V ztXQeN0d=13)IhH-hl1}@Wg=8T=uowcN&)9!{7X6P=rm)v-$0NXU!19cmZPO2*y>%`e5&Hm{ic1_E7=WWXgj*)ByN{odS05FTe{QV2E4L&=TMZOkBSH&S zkXLyAH06?J8}O$1Q?5ejh%efedloCxOVf}eq(iGUkGlVTYPlt>-ukjpC@i9h`D!)S zUI1SV<5b8PiI$T8*}BwUB8&Yo!8%T{W`G7DJ``$c%2+jXE- zhQIns-fKlunO0Oln8o6=VY)4BYrpLezBvL11c`5$yGZAX1Y7e)`{&7xc0_%;hD)%n z6NzW|zEd%=n+DkM-huyopb`;I$zyJBW!aJ^Jd-KDm@;&SE{_&fIn`9S;XkZ@Z^>c; zHJCkB3Zz0UNslP!tRm^Js;o+MKy3hJh_^_wSW!2?DI#x1reZAq3I|r!i2oG-3n@wq zlMw0+hH(i;BQZEE32kFjqFq4kzj`glTQhO#Gv&KFDJE_>wD(#;X_omIWK7RsPMx7p zRIFL%G?51#w@zGeSXTSPQqw3aY$UR+&6U z%qyaqaS{i8Jaj>~6pq^cb?@V!;IO4&X8frD3O({IJW z`lq@Sb_BH4_M{@@^5I4?*p6JCSvnB?gA1a<(3BasT! z`G<8msa?KUnr+f1+YGu_-PY}w>z;6fMW!Eai&DBjA(i<}S$IvLW821R9=anKo1n=! zh>?vNh?^VE_J1}|dsUwTV|$7#Qcv>(#Iteg%z7x$l|&<46--QP2}wvXaGz5;HED81 z++!Uo8#YN&gV(kc<92Cu-$B6>A^rQtl|!vZjEVmhz|-2Ltk~}d>GX>MTK6xiJl8St zxjc}mn@$N}&9O}$lIQO>jurlNN`nRU*hi!2|0+`Uha3Eqov6dkfkT77pGm3Goe2k(hLJ?G zZmI`A*r1?i$|DvI{WA;{=x;}^Vn7w&vNn9)Hn_(EE(=Ul;Jv)_@!A8Jo=T_2`K)ha zBLR+u{+Wp0gJ^&w*}tFb7s-2F_il?t^*;vm+}Ug`ixJ?e2liZEJ}=bmlqGCqpku*hy&vVLxA(XH%st~8sL~+X40?>75LkIr?O~yj)wt;YIHJiL6MYQFghUUid4az7`B$%^9;~Zn%m{fU@ z{1LD8_0R2qeNYw*5CiN@yuOi}4O1C%h($BAKPg7gkoRu9#Z%~?O#fjDfG{ZBP6DEr zPZnC2=u%>4uMH9%sCOAtk0buRWM;w%b&W5NJx}D^wks|F5Z<##47nfo87Lbv6ceKv zrkJ*+RNWR&nZrJ}Jr32;PDA47LWPH|C9Ss)g-TloGNYCq4K_EPrAe@|Q6Dtq(h;Am z;~ej@Uu{~i?$h8P(Z_qI+)w-La`7WQ_WUuQhi}N;g%^Z&H7a6&krZ%Smnox_f4~%gO&R(wATRhMfQ1{j2Pkh;V#@BTx=|BIIWaueXlu`>`P(6bFXtCvkU&WQaaO1 z8SOdL?LxzTN}-#3>x#M*RYumbuqo`Ufd~R`USq+#4Bl`l$upssR{`-_M#E)u@TfK- zF7ZJ&LJraXP*mm&B`dz!{!Wl{gW{kX{Vm5UK(ZhNmQnaZhQ|_N3`!O?ri8tY)u5HE zdmi``zn3gf4yBi@n&FUjlQW}O;78C$bw=nTgAhLXvK-TX`u0pm6rqa&9F1$EssqYi z-W_kB0TO;FbFqm&3onlo%=PMAuhv+Fl5>N7gV1-m>@cBP>ZqIjCHqfxApv4g0st*z zycg>GYpjS8${ls%pjpNm{T$;cBVolkm!s`L0=7qINE%Ogy zzD7V#(0&@G_L!wX3(>{4wBksP0xQvZK|KH6jRvHhQSPpMjlvb+JbNw>00nGxUhD>iDVD0I1s9W&`CAZ)sGPD6dI}U!yQh{nLl%Dgw_Pl`GVkO z=xnPC0M}+*H|Vf8IdXZ_MSvVZ>)H0Vn!oEVC5Gqs^5EEnVK5e@&>Y4UW|c+1`1k>Y z&Gf4sv8%Xcb2($lv=K5i_Q+j4eZX)ixQrMz2Us}pXoxTM^qcWsF+~1)Hw~(pqg>H% zNfNL>@XcnF%Pn`{|A-3wCFM)n^Kll`=K=wDtb|X;_PW!KGj|rtMxzD5-=(mm*oSA> z=Ije9*GlB@=wuLx5mE+$qiztzfirw1UOs&eqt(5AqP!}XeQ#$Zt@-#Oqa_mC*PVsk zv0~(DAQtR(H&hFUl>#=50{sQbP@x%xuCEZr3dq+#rw^lAESXp`ZCCrdMDao3??jX-qcAwKSuoEO8LdebDlgMtjYpu9hT zOic=1^^SqB=&rCoae@dOgfzsDngFLx6&z5*d3_WQYV1n|j8C0j#(x15EmtCQen1Ei zKcIFf8jD`%djksVPjTD6auVc_bOshy=MSRcardGGZ(bAH|zvj}@X6BHYN!`hbK-GnUi~M$;{dnU>Q3 zS?4TC4$KE&V?@EU7%OS=s6KXzL`Zk8u_U7{v%%1oaSS@-{K#6E*ygR6Q*P&woHoyk zmKHn@2pQsqp2ZkSR4w_O7X6ub-3zL&KS&RPT$eWSCQl61+52U*^Md-bM{l{yAimw& z&sH#&ehCv2GDCzeom6P{J>W6|LgI?tJrCq z@@WhuYH`*7#Vs;=k+*U)P*%h#o~7=KR1P5Il;V}JaK4Ta!8^1L+IGOp0t^N&G2Fq; zH}H1l_Re!YXew9f4^%-q>G>cMJ`O^5Thz@wc!vw?aq=C@aw+Z`q1W;fr=#pxmL6LK z-*137nl0JIru(pB)}@yUpA(;hA(dk^v@TC+xyHAm`Z8wQ`ecJ~wq(m<^$M{0Pr!!f zKKNrYdwt7gJuHZ|+*72PaV7_Qj#HulJn9}_rl^VN6|>s81G@x3P-hW=^Am*-Xa6^R z{F%}~(El~LOPGK8e?n>Sf5yaU(%j)xs>`BO_2H-+$hp$b=<`?s2K`T)nx?-9FPdZT zaqC@~y2aWq6bg)+F#WJ+ja| z=cm7D)Hy6=b)K@%R4^>Du4AZwMnJ+`JZ01D0DPOiX{nUS&h8G;e7695ny{HT>=s$; zWe&y#!;J_JE0u>pH(R?1;Dp*g{CPtlU; zrS#w!ATRA#vyHyeTXKE?l_Io#rQ(@%CEvaw4zDBXWatqEJG>$lqkvoPO`vml_h5O( z3XM0lP@(htcTa2swfVU98lBL@)OB!$nXG;@fz6@Pk&UTOq5?3UqrVd2pZ>_Bob9|U zJ~?#Zx_X`-)ELZjTNk}RE}arQqo}wXF6G5vzKxnyR0DnOn-u8k z_Oy!px{luq3%R$%mewbuJF>Zhf4(KwH3L1gb+&g;`GOCU= zA68?fdi#0av9s16;Ch^&9E05|qMVQf4HJ5{h5#it3z@q)Pc8FDYCyHWOznvA`1b`-r>st`YDx#Erb zt^S0=3L6PY1AE$;p`Uh)uknBF2K;R^WddPs;}4vS{H(W7h!QO7O8|eYRU9dVlv`wA z8<8YBo`iFV487eTU@u#CTRnEn z6ru?adz0+5(fO2b36-zw7XC=z-wLlDiFPcPLI#tcy>FB9Z0^vyTrymSZWVIXPf~_>;AHyP@c}| zV%;_==u-RB7iq%7CeObR<|MHo-btM~(zp?3hq}jsL4D52Fg-iWL}T@ewUE4=5-dnR zMb5B)Or43sWcD`eM>pP$5^Rwm`6PfTO_GZ2EHcL=k;yaO%OIKPfP1$r%-4oQ zUm)kD`SpsNunS0uyc^d_ZQAMk6IR9fI}})RXDr*FrF8n`h$V4G-CB&Q1qZCk#+lrQ zU2iSj&?Yn-A7G~3Izu}8-r<(*@F+h7V44{C7Vq8k=UkLtcZ-?Zr;t>?T2;l#{;s9i zLsm=zxtt-wg(0aFT>j2n4f;b?7+ff5ZmgB5cs;&HPp5L#IAt8+ zNk6(JL=N-dVISf3M{t58Bwl}ny5n#pc@^yz$+I9TDx$twZ8ZuW_-h?w);yM z>{A&B!>X;^!B7GxcT*u^RZ%TcPLhonS)v3;tVrFG{^!eTxT2?@;-Ti76yMyJL`&7& zv2e5b1HZ3(b>mukHr@4o)%Hmbw?XgDB;)zwgEJ$rLU?t%}lB)av_27N?Ca)aAi_ z2dJep=J7P@A@EKBGXUL1WT!{bE)9Sr;aK2+ileB7EH~|w=}Y|H5hEm$E*4iAUvUS95Vs6Fkznu`9AYiHux;?oWI@s(TI_w zN>hD3kbqnTM2J!5ftiOMO@_#&Sq-xO zByhYI(Gc4jo|amDIJ8(vRi}93Km--gngYQ3{`y7|wc^6k(62Jd7?r9%ZPJ{)wU}68 z6et#f!hG4ZO9!uh=0s(>x~3Kb3wRf4g=QZ-BDz`p@7id zE2Cr1c3D-qIC*2D3V#mmGS3sE`J&xk7Q+2IE~>jMWI-RT)(|%NmUX=5>HDM0iyY1E=O+)%T)Wxx(ycNCfIZV4aP!powv3G^WivNVZP6v0yEZ0#?maJ8Vif?KZ-yDF;~ z90$1F7QN_oMz`s82a;;ot(P7;?B8L-tZv$^Va;C=OOVKr#b3bldmmh%;CwR_*b#@W zvExo2^_q!g0Eu1zv=5o)wLNizw)EpDsscXaypGz6UX24+BBEIEvvWVi;}tZ@l)A^i|7tJ{Xu50e-BBD!j;G zyiRA^HctEIUOXaS5vUlT0{1C1&=O%&x`n=cqR%8s9@HOo*2R4}>HMvg)sTV0XNhB6 zr^OqU`)g28jLt}=DCmGQHOkuHaHrhE!9ax#dhB^2r>-s^oGT~Yf}+)gLy+v*%6Df? z(nbs5I54xS`I8vFx4wuyVC$fygF&YrjTvC_V|R$tzh4y8YDI6lBel36pk)U)*D88! zbM>5_&)X8--JSFPR6j+jzfxu1tG=a6@~C|PW3bI8a8xMjv_&zsKBk>m-Wb-c)*YWi z>(Z?JiHyo!jeG8KFQslb+);g$KaqLM@pL#cwk8iG4^TB9hUlrkh@?l60F2m-)h(et z@l0qe)98=_c6^=Y7Y%>m^m%7Bss6e|F~Y2W}Y$PwuNn6Bi@y_7 z{xBFA0dC0%;|neT$hSYwV`Sz6cf8jB7$ywCux#*8d#OB6P1u8sxryNk@HaBD;&ug)d3~ z#;#OO0_TnEGIeCK6QJ%dA}AZfbJaYwy2OA80IHQo%V|h8`aqvg<{9M;4_Rpt1ivs` z+E;9e4kM3l(>`VB^(ywWUym^|54ZOoWH>nV0#`FQdLD!HN$Dof`{`)_>eabE8@!Tmpj;F~945=rcc0HeIRj^SW`fKOO`U6&svij!l@YS5n$62?1?0JHiR zzgGUwEDbV(M)g|V(A;sOjXRi%h-0&SMJ5sV&FVU0Zjl{VsKHP6OAzAhcTe_o%Z?Rx z;1Hv!TVF;yEe%%~xgFYbA5M<4pC{e;Z_Cjl8%cXQA`cCjAD0tSlE25EB>yCSZ+hFda zj{YSJ?_VCCDl{vF`%LG`j+#s=>0yvQ?z30i-_;!f8;7=OR5~%-s%|XBZB42vGTm|7 z;%~9{Lau}a10!o90o$At_1(dmDbSA?PW!h_OLgZrut+jV`0q`!-%Orp-AH$}*8ig6 z&veW{a1e#Y^kA30!UMykk>dip$P_LlUOW+q7vPG(vIb%NbPF=b<)Wrr~zWKEAH(yd%GhpD0fKyI- zexfkIJA!%)0(o|n`1PNx+i@n#NU0`ERQi5s?Qd>_=Y1~>Q`GB!~w^Ic}4*_OVY z??f4=hV|#l|IBL!S4-%<5&a*Oy>(Po-xoc45kwGFy30#A7w|kxsSoX>6kKj?F5}H$QGO)p`*;JJ`>^Odd83=(${o0|Gd_;a7ry za()1QGN&a%958vgikYU>M8R1QzZRr_weism`;WN`I-AkYQ7y(oNRV2c`Dp!-Uv=xg zi#@T_^zRwY{Kuc2FS{RHk(NWytUtR7+M1v5#Xukn!Tx=wZWL$!-v}XU9|J{EQJX-b zlp3n}Uux{|2)zm#^|iH(+w~e)%Er{C{ixYdt5%a|!%oY$ar8ZKq%GCn9<5A6UQEc(7sFs#dnY74YnQ z?mG!Cof1sw=@>VV8+0o*<=t5ah5m#y`OyF zkM?cG8Nvz3?1n!Yjd}a~OQ2rA1I|J|$d`Z-%K~a<>hv4B8?Rq-R$q&N9pndlPGRv& zZ&Uco9If|NfPpe_5Lzl^9D*NRW~#Pp>Qy zc<*#`jrIQ zZZ5JOCH#I3+))CfpF^(q4c`OL+^{itH1R|bq<_0qBTzl%M66qeCK#9i;vtdUMs$0>zeAV zX}760U(V`Jc0DTs%4c$=83~)wcOq~dPko+JSFC96c;!&qnl5z#WmI4PDPPL@`t_+$ z!t{XlPFFPl6{q_YW}c~re`|_a^hGHTv^ju~IMzf_9z;=MxDF`~!!Z$yr2XGh6tl{7 zB~wE{?}Cs}Zw4ebVnR%cuJ_;1yPKTu?{4dgo8J7qZk-s52F1vV$-QHz0VqcJ^7wib z(h;7O`a$n07NrzHd;~XecY8fHbMk=g#sYhUxefxpq*Z{It5P?ovUA-gmaUXG&28A< zQsB`@xb|XceoghVZtQ%q^_)uO#Veef^sV#blS{>fIDQ{La?Qk|bsk}1N7xl2>` z6`6q~O&NWy>+*pj6o<9nnya36iputJ3UR+?pL7RGyG2x19~V1x$m^=%&6EC zbLt+S|9JrdGATbz8^b_F_&2%yzQWkM$uViiHy4yaAjn=^n|fLTQiR2bVqn32%LtdP zyitwuckWxE6DGrqM$dn>CI!1^p_}ZBy-P4wOO=S5SrDV}GK*s)erYbnWRM)?%6T^#2py zoaZb#Bv)p#2iqF{8F0eAqHyNhjSMvVxO5Q*j|6b3Injar$;tYm<7_~*9o+lyYSR(0 zg0|MPOrV+tp)DyD1gm*-Y_>P&U3lG%%dwtvxLNS%aAT#lt)|r* zQmFfwOS6O95XfZ*P7&$WFa6j{$mc97Q?O1-?$gEoj~7R5&#TvaDHt2m1*RNf#=`pW zHmfaZV_NgZ|HEi^HafC@Y|Jw@q5 zo)l(MA|)CKDi}x_rpoezr(mID8@gud^J=&|S)a3VRAJ_&&rB{OKHnn5yxjzBeKvoO zLFgUASR8&qap+;KnX4Cni%6y6V0$;XEq9>{U!Ig7+?)FHmNl(u7&ThFFj#bg*9})a zc~!X_TE1k;{3++IznA;x_$nOo)V%cNQ&xhl?}IGvTy?U_(cLw5NYa%sM=mJGI~gXR zGjN>}6_FJKL$1o&quZ?UGH^zN;)(IQum8u2NBvGbjt24x&}GA>xeF6a!)wKrN?KDL z>AszT;({sf6QKGN?^L9K@q|aW_S-&!sMXQ}ygPU_r~_7-wrXH`H09r>hn%@)C3LyAm!Vsn*y*Ch9KGwE~5ZCX>F0z9xxFthz zMBD$GDCVLkp6EC*kW1;O0j08a48h4l--eW*yZNOyBf<*3M=gv&I97+p zkRbhk7n=JFs(j_;dL-xyJ<#5v!ovLl@@RHpahqCW1i0NuDXSVR9CQ>cj8mDh`Jbs% zm+uZ9o`b%Rzw5O4S5F{tTAd#;I$f)Ez;H3moF0=IY1MnNlwu8GAf4?Pn-+Xy-7Bylt1a(B{{sR9+1(^(iSUD_h&NdUmD4V`JnxdNaB%6C zgz7bne*o<|oNhcfrq`}9%ATf@>W9YfqFm{7p9i0JQ4yA&_zuM;8XPpNlV_cdgA)z< z#qzo?s@}$MF4O00i0&98McYoGR9Rp=gywPe{Cvj{NW3@q%GjROn_Tn-o5IQlFBNkN z+z&g*mB9t2tzY@|Lg9H(oza{L$^fkGvz|n6g~W39J0_{j7##~XLoO$<&9OJ1a96LR z0?5W@r*0^0j8MxOg?&0?oZ|uJxAH6Uv*jWY^@gg+)PJh$`Zlzgmm}QUKUpTTUiDj{J`+l+CNuwsiZ)ILE^dMTCQ2sKII@8Q#0RMq!jid<23NCJi7!*4ugYNC*~lav zgP7KCj<^}>bQ_-%Go`;k_-R96?!n-60__Azu8B;w>CdNww#Y*Tx{6R7!josr;ud2_ zuUWhi6>_eq)5jrl;D4RRqF<4@fG`14wX%wJA@`kL0#65sR8UKfqmfNV=%s{^Ezn;F zF4zzqUyey~ZRObS89->@74j$@7GBCTT7Jj_!ZtG!lR*T97<^Hhlr|GnxAM4Z(1z(# zH?w@aO7_%6yTiaHkwXT+4fDY$Hn=Qf zZ2C(dIu#{po0Lk>k(3=+BKDg1JHTe1;O0OGiwq_q{yG0s$YT66_{pEtT+FEy%IpSATXDH5diW4RY09-$0$L@ee19M|q`;-CI%Z~;zb z8MEF3y^1$Us9&E$vyA|J0ml>$ggasmim#?W?F{}`3O zF`{~hM81~-95gOpgIy$|L;epiYmw`zVYjkapOx2m-P(C;(pA zO~V1w*l}FUiFk;Mc$m;{X{!=y4#me0;koF9!T6Te{mQaQ6rf9qX0d-;fi zh%&&k@Gg=kJm%C-aQtlOy}<$6CziGf0Me}lMPZlm%pWPxcz{hD#h{t1T?B2G<9#1J zb19%h^*!SOGf+gNE|c{d`9U8(qkN9~OVw=aT5jH6ZXHI}0TU1+2Cg&LJrIv&7SUM7 zsYhpv2&1$cpWafcjRV5Yrwvg;+B7omNnmXMdId5@Ut9$EIMWZsAZNkf&$%`tKbc&y zs6&ZpoA5ljM#U4_e-=yWLTQsb@`NHPfa72{xz0%T+N`2M6tE?!}48EE04h$Nz80MFn=^{AhrnCT_LWoukh z`n_1BpPo6mQbr4y)(^$N#gFA0j4;G6ThA(6xaTNANScThmSO_IOZ@>TfPqgfPD6R)K%Pq7*?1q}F|fh}VGKY3Z4LL{s{r$_ z+OGk}g7DLr9E>3Y`VIUSbAuL|9jQRO4I_F~6x*-;?3cliho;2hpv>bhKpQ0;o08(L zm)f@9>5dCOc_d3azGz^LJJop|#}z}2@GF56-fl)bo_^pa?em9K0{U#$`0`(0A;}8H!zc7|BF6eo|x0=+v%Ke8BF%>v#y5?E?{AZnP0V0 zkwdfRR+&5=pgkl8X^-&{a<0KbeN!3Dt~k2*J;#u`IH3%src}h9CN@DCk8;3TNT==}%gA871R&>D?wq^yOAHZ31$+I3_Py`(Ia&oS|j< zGqp3YKgNhaX46uv$2Zp0i!fF*$iD`NZv{=&brHap8znsDos4H74I%!#9@Z*RD2(A8 zrZxQ^!LA`{!qi>@9N`V5wFUk=cqM77C;OWT4jvhLptn<@#`wA4&}Vx9$SY=)a!_+& zaCg_#H>e8SC5X-;>{vdqGjRkdzwC5^Z9)zhZ{x-T{nVk$Qf_2blRHlF^+axS#{GHyu3)2GjTiXUQ|L8kDm#dA{I2{8Obv zFV&ta#g!FTe~n8X#SuIf+>@|xs4@7}+0yVsy>^*@ha`anY&cg%68MVnIILuWOd*|y8IIeveEoQQiUQO=EoUP7 z>OJAGzCwWEY*><$&U$|?rx_+#gumU}cQITEFtsiIsa%Rln{uP_6FQidF^C3CWoXgr z2IvCBkra2nrRQ-3#I`4Mf&VT`#=7Qj`!=x_?I0sDXy=gwrg8B}PY!s)3fQw?4Ny2d zKHtCDnN{chUYR_H-Pqnq*pAp>zT)>48Y+EgeC3;w-t~GHocaa%w7*Tgg1%*NVd7x_ zg>L^U$Ui--7ThOTq^}&n>%~YI(@aSn2>li*+CreecJi}zOrxIxkAj1`hbPDZ(Kz7*0o4=@@=6l@zG-L_<+QvS;<;anCo{lqoR_LerNzfsL9Od* zYywOW0@%MKY-~qzX$RPxA&Tang9Q~W3GiL-5$Aoiu8|J36Np}v8-Grc#tCi)>C2km zvnj-HK$lt4? zZ5x9i1{GpIVi&z9CUYl6taY`V?l$mJfs!6%EYN5bPZz^Uu@uh)?34)1@C}UK6`-4n zt^=^`Z{T4t+oe@N1yI4Q+w829yWWLeh-rdDg<}x(s%TfrtXF)vR}s+HWfleB-cxT- z%G@!ugN8M{P2dpgT)bZhODt&#)-`?ALteWWGNB4y^;}kWqoBode}B+bzU}gRmw%}X z-E4ATtF9L~D>|&AD3x*^vQ|YQx?TC5D6u~fJ4?!E=Xq5_xkl;YUjx}vbeVU|*IS;q z>NLOO`P$`W8+3aEvN9@Jc++Vs1W1~>xnPI}Hti_D{TP&&gM`*Jb~2IVQu)o5 zeUf3kuX504U%aqEkFxHV$k_4^j)pNzj9~5K4M;s+%wLMJJPakGdbvBRP-dA)Z0<7Z>JpZ*OTVNf>9V-0P+%p3a zH*O}8ocW_P3lD8o8&5{Mz-A=fiOgijF{p)$c7gw1wp!9E1ADZ!{HrM=JN_LRZDu5Z zUdjpv;EvTD;VevVe|olqnnIj=6XEBG;OpP{Gwsk@gEot8N$`B*nROp`#{+U~^?Wei zjmR_56CVgmC=eGxTg$1^Ul$uW#`Ml_mRDl{mE5)3AJ6G~mX_Qb@5iMyH51GugYT~* zJbUvpzsd`kt%i9H-}!Zb`^X$l9g0Nb(bjy^BP_DlfVUE&E8o|DkxnvSvBFDCXj;Cxna_1zBqv(yvAGER-^ITO5WROe%I$7{soFBT{@t# z3YFb7q8S*&7c7_l+nI7FRZnW;lN%BwafH2Zho}4FIyx z{^2)h;slLNQBFs>@^!~T=h>&b1UvJD2G!?YqHVf^Wj(~v+^z$R1%K93osc0)mqtE{ ztkxPQFDjEMVcJmH!8kQI)?zRmWd}I8A&`jST>T)`A7Z7#Pq7+#-sA%y0t$L2+G6hU zP(1EysYhzIf_9+Q7=#Z(tE0J?6HB*z(wBHK$p?Bh4II#@uXSbMpdwtd@s9y)kr(R; zvc&>gf7YYlNFNBCx(O<~aI$Z!uNY9iVDy=H ziEh8H7R4R`9Wdi&*d2(=0tPir!RaC?C)s264Ox|{6lk`x^Q};fuhFy3rEnoBQAYHl0RGn8Ze4u%%K86LxY)g!=OMoY z)>H%b)4?z63+`n6{!ZpY4i-R+YP0m|m;RU4ORu zu;&Y=3;ex1d%;I`&vLgmq)J30@$gknqw5TPw{G5@Xj3{Izq< z;!?TUh9yqDEbESQ$IZ?weX;TF502OB4;pPVx>N(Ms3C1i+e%Ly*3;b z#s4jIts%IlN4G2Z^!l$+1#JM?@D4$QeE)zVQ_2 zk+rW67lgQI(%fg~)MK_QLf+-G^5I&^f$1IOG=Nu28DZncjYF5I-Hi2GANhpVS(Vnc?F(Og!qx~^;El0DYKMsaa5e_Z>#cD3eub`ODcI*Z?uNm~@ zsw0~R#m7VYad2?}d0Fu-1{JtX)JsOlUxL4C)3bwac8 zqQ)Z6IShWXKrC!~%d3;ky315%*Qpd0gqs=+mv{$UmhIEb&L|K^zf0PSThW`lR=#CI zF@F?w5M~RP zV^-#ah3mj>&;3gzFgXAP!eHWqj_~jyKP&5LUTnoBG=9TbcC*Dx?8FOdXzhzjNGwRg z<09B-+_PbNO&XG**I(HWbpC(q&n;Qi`1W(aMe{$~G^BsG0-VY0RY1L29Dg2x?f)KA zkcgIHNrFMrlyEHLHo8eG`|w0fBAiP8wRM|oV2D)`@Nv&Sl9KYN3cL?6(l!YPrU#zo zMfdFh42Zy+4Xmk~)v!9BOY4j2fxlkBH3oEVxOxoZqv+h6O~^Ru3ATtW7KIYvQHAy* z84YmVq1fVeh_Nk-s@^!9I<9qJsky$;P#XuULo8Yfva6Qi%9;*|U!(b*aWf{-f^fb7 z5NQ#)af-Pm*a#3m0T9x8O0QY@TD~E@4;BZqA%De4GJJDFa?$($InL+*=@$t#1QewV z;F~~yxJ;TMHs}j~qndlCYV|-;Nf2%My)zHgU6Jp8ia*g!$8&v-;=(jo_5hRq`D~4_|<)eP;cP}LuYC=jg>uYhq=@o1KY%r z7cZJdL4(dH7n**j6YiEI>tfhayEG_%6x!6AE}2kKan;!_K^x!9#Q^Hx^W{9+i>}6J zl9E}Yw$pxu4ZaJy|FmY{xas_Wjyt}f12k4X>>Ktqj#IBa*%VMob0q+{39@o;vxyO5 zivUnO#tP%P9ITzjs*+|$VDRe0JrqO(ch@P%+?H{b1mBlg1w$VF0!KZ&I-g@nIsphz zv{h*Y5t>UvU!s0_l_{R+Hv9& zB&ua0^`VEbQv0#>VILgwFL5JWBH2cXcC!Bl8nG8yM)H2jNeZqoOFFP83Vwg&~OsgfH?pfq=SI6vd;OCTubvHs6WLC zqRs^tU)_`D(QW5O;U1!K@!NX_T|zCH9HnY+u6Z1=Fgw5uUcxdSSyMO(@;{;p1$62g zA-ua*Q}MHs=NO}*{8P&o7b;tzx+^+-BSQq6t$addkv28|Gs3zAL)%waw~~8Z)r!A- zvkPH!{MGUZ40w2errXvx4& zfIu@l{(RzehMoFaDkyT5Qs;pl?(+u;<70aVM@h@Jv4>?jEf}Dp23ky?vmVi53t}n3 zW$QNrv?5N?QgTfOnT6J#(JU9Z(VqI){NLv2>BFvAMGbp-t({^ zLGr=)AuzM2?G9OzGbY}auT$`S=2r)E)8dHRBDS*vpU9a{fw$W1EkHo_XsIMLy_&iN zAmr81^-vlwvZRn`_gQ|x%nm$rlx7fiTcrCB9mpT|0_K2N0X)+}Tzd;bb?#FKY$xJ!``p;P3DRdBGN`H(3BnyFW*CDuc;7WPtnK0x9ZPOck*!M+D zONL6gkABW;kf#gk0bliLE7x;0s%cPTTmrbWL%q^P3X(V@kLdja2SCr!(pCU82L?$7 zciR7Y6Sp@?lVh8J@1JKgRb!e&Oxw;0!cb?Pdxz<&o>R0g+Jm^XFhoPzJ8s9Lz^M&7> zH^pS$W)plP^En;{{!N6p;CQV8i-<%@veWs{<2S+ImJmYY$kdiY)}drkWXe0$X8}hO zl#J&Og9ctXx81iaJ1AucV;e5aM$%^nNJJF86kI(;k(uA59=AoJR%EvSo-eHbdm+@FdNjSF7w}8c9V-!B>Gh^S*wXrmdGRX0|YIE5C-}b}Uoccs+0G1-rD`*cQqg{DfayqZl1p`RT{s^zpmDTdLS$ zNaky>5ZDOKch@wS#c~C|vmbX+Bzei!Dz<#?61O&+HYc_}9YOdp7feb?9dvJ5zZ8wp zDG)ceW+Nf2o$aL=!42qs$Ul}OZ{$LnqZlggoYZ5OUhHPas-ah8lt>8;_iw4j(pr8Y zp{-3vz{BPgBB&j*iC_PmoZokw);CpJvbo=y3|d?(pc$BHhRbv`)1{pTEk5aA;VKy; zIC?=HOO!K7s|X*1QtZrDD?0c}OW|#gybvPgc2189qquu`w#%)>h(Q|iEB(WL%>mCQ;USNE0xu}+23;Ei6eV#2{0*`oj|{4_TLb> z1kR0N*WYC?OVeU;36XXut%uGqoUEyTOwO@>PjxXeUSy{IiG}TL@!MC=ffRS>>~*lx zn;z!%$Fx~~CSO&MOUCHBDWE9>ni{1#pCY3h{(R>@`cw{Y$uj$qdQD3X_m4>Mr-44M zIVD}_y*R{GJDp=zi&0wvA>Zrp?|Q{wW?#m!CzSrM>WSXvO4(0l8$a4F>@2Bu%JuQr zn(3g~YD`}lAGo-Q{;YS%^H+&XOtAlh%G}((x{6nNsYrfzXOB_iycnAJmf6Loq1kVv z!o1UUm64M?~ZuqR^m+M3aQf7-5oPKyM*Gl4Ue-!$AZ7j%~@Vv-GMA2_4DUr z0r&X2mfhihXRc0lFKl`}SOTx#8kt<+<~<8$Si5Y1BBsDdNE$Wiw|1Ezbvc~e7|&16 z?bbYbZ>NukE)}Op)$qAdRp%Mc>GS7LnFqZZzBuyE8k)oT6n9V4*?OC07Y7r!q*l4T zd1K9%KG>XKO`1zdCnRn0S$(on=ug|j@wyQ5Q1sXx7vKfbMYJ3DAT3ud1^4r}^G_J-Fp$D>EEP*a=j_nha? zx6sBe_mbI{B_^7Vn!@_|=BEm!#lEsvfeYQAEOgIRAZ@^#Uil}h9)SuWRIIPbdZNk0 zoLhc9g~+Ln1_ca$8Xw(TY^4+t5$PMMXziPz&(CRIao@bFISaJx_<&BtWA|}9OdM}Q z3YG3Mzpj{St`W@paFwWjV8`Gb$0k=lZnC&}^dP^EgXZjx#a@+s3OMd+L5r+KeO!1stpn?)Kl!=cQj)r+2iaUE0wiz2}qP#6`eo!U66xx@V)jnIn zw(Xd(*owv8REOu=GC#k67xGLb6wGH_`OY6eKX3D0FZLtH_>$`uHhP8LQ-tevW4r1( zrz>8mzYgRq!TMa!|w3j z<_(vOs=^0BZTuer>tdhZM~gZ2F1&t}`};BT?r3~$>Yj`5UCs+N`_Rst?;`O1j?}8d z?(ni;O0{I^t!<837c%6DXeRK4<>Eg&8%uw%~d-3c~OFhQH(6}@t z=%#3b!`}xxLr*LC-}o0<0`t+@dA;j}Ef)$dX@TqR0?gX;2>kmIIA>)X1MX;7F(Eh$ z%9>WGN5#V3+7BskU8)7291XZ7=St$O`8*&qjN)nUjx?7fg~P>GGa&?zvzh6T&Tt;MX)T^c-YV=FnWS zuwOqTx-`R((HvW~KQGnUIT4h~kk-7OrB%Ljny*7HNE1cL*SW4a9@v3%84u5Eupfe? zT2~DfDrP5>KH3-l>_TQaQMQ+UF*u95GjnO`S7j;0_j&;)ziImbG9qHj75a=a6x_~Z z$i+(d-{d$?MmP2rl+0?Lm4U&CAL9+g!>K!f(($)vTqrD)mXLt5Bq7%wg_l%Xbh|m; zUNJ-3j%rPNpG+@ySP>t|l`eY(w)%GmKB@_IDeqvWnB{b@_AFnff48g@nt)>^U#p@;^e^q%8LQbwKc|<=gv7KoL zO2CoQA)gW)o^$XsUh!}}@w>)l|~;-7aIY+ao(37r-%{&lBx7i+cF*3!P5=jJp4I4At_8N-r1%W)EinH7x2s#UWE(y0DNF^ zYFtXs80WSFZz3T&u?eeDUM0aD2eCy1PZ!_CDHxXhxzK@e?Wa_uUPk;!{&3x-6z^@v zNaCDsxPY~EvFSh0zB^l-XE+Jpr`mE{ zpEYuNYP~oyZ0Rb`@RsnI3yl$#n%%TKoXJt-c(Y;VS6!*@5cN`OOxEa2tZGa6Q6vJYtP|zfJH+? zv)mSZJMRDJ=}s>qOYk`Gob9cwqd$r4wtC-fC26An)S&qHN71)Nt^2IuD(&IiQa98f zt04*eN@5fd8-9*T4gHqvC?}`RDB8{-R**CEoGFi}Ci#c&n%?ME0<-VwBXH{?! zf``sJ`T8aRnq9ck^d}O&xysxfZ*Agp>D_d4bj-H@yVjYqjE(K&;yAO~v4fs9b#CF_ zW0VHr1`E)*I~jsvt{7Uf0x_P`zflf?)-5Uw`^)eMXIn z{U!~A>N>376gi4A5(8+p@xCHgCIET#Xb)m!dvPD1Wj?n(dRTYaLyQ2Mwt>xP1SJpq z`=ckn?yx59pmXU&;G?5clke%q$OUz~VfDsPN}6Uq0jI`nx9UNK?y?Z*XFCIf)!Zm$ z3MYR=syyu&>;B|L)Hzzx8tQQ3V#IasSIE;jc4I}Vp>bQ@wWg9AofX$h6R9P}+Md=TYx5+pWAM+F~XJJ-!P?Zj}Injg7`4#zgcBqzVI6(ehe?cGK$`nGgRgeOQ*oiuIGJd zmgZ%An;coE+#J2Ck_@2t6}8^F623Tly1uHUdnTxp^AbMEKDa|S*_p_s%EjHh0JhQpML;)#~XtN5g00goMqy}Df|8oKBX zR{r6xzF7FoL2R?~#Q)Fm+u>1)=L^b5x9?IYngzv8Jt${Ze_F71$K>Plqore72mvI& zEjg*Q6;Yq7`h4{f>w^_>xN+J?;1LP;v)F5Jb!8?QNHrRvPi__MbC-ZVCiOm|Q0L&( z4`mp!e@l|PUJ&w@QtCluE8%6=Xg{uAc**13m!?=>lbWz&aQlV!K>Yj<=Vn#s_dRda zzWMD<31c{7y*;(qc(ZQtc0SDOY?JU{KDP7X^tkl_gbWOh22e%{Wbnf&Qf3b80WIOX zuFi|SMrugVyl=zdjdMo(S8k`?SLH2H=AEHgTOkE`ubnS8BpqiCkSKiLpl4i89$#GB zvVz2+trr~+lTcOSAkf9HOLN4lz5M*A1TR*Pr5Ms@NvM7s18oeaHmlaiKh7O zpA?>+)_BHadv|(05B56dk_o`tAr!wku)7&SW%r%Q0i@NAfjO6bjWz&)06GP=h7ihC zOOoO7sVn*0bd8ciP{|b0pJ1JC!2>+XYnLFkenr1Km9 z*1vm;C}Mu|4nCNbg@&7eAc&}u40*ymXtJWor5Y7I&xucLlv>!hSHEV#7~C!H*!MOw9+p#6fq&)CTBd3U%$C=@)Ned4^wzWY4GzS*BOR2EXuIjwjlW z`x?#BJ3psjvOpqHF9b&%Mnj-US&f{AM`&tA5dUOTv$y>Vzfh^%q)pz;{gnN?qdY=+ z!p~_Xzdz)JAgMEegN~=O_Q0j-$6Q?u#DoXK%iB`E-i}AnT!}JKuq|rLWfeW|NtVomHx>L4XblwO3WOAVo~)%Kqb0 zcEK_`AO1o((6YkCNPr${-M@bL7i_rCgx}>^JO0)R)e=3Udpm%pr{w?Kn+|1; zDdsP$G>Ufw7sRgoH_7L8y6j||AE|i`+DAGr!A?~r^@a+2r|S~+g4(qHbOX3HmPI)4 zC+#qN3~6+mi~de}Gh9fG>+VLrRiCDit@gvuL|-QBnqM_Bw=O$*0Tq>Wuq~AY-+fyC zmR8hM!W__l%dPjj9nJ3hZL{YjE{jcS7gQBm_%A z2XbUqbIZhESJ(<(CiTd9#B_sP8~g<21f;K(A6cD+0k_tEuV&tdPBK?o_VOnuWFSHM zh6FHP?`GzBYvOwBT%eXrmv(i*ZLdzr6j~|t{@EBA1iG3yAYlqW6tFnWX?lC4R^?3e z9jHoWQmZ+H^tAP)rDwbhj8tPo`njZ5zvPU#hUBq#F*%C+9fso<4WT{pXGV&x=!(2Q zP%oYp&&3MngaDeQB8CAwBwgJ3slXxMb7|Uc<_sqLx@%!VFiOby1jMV0FE7xR-e$-_=?0~&6J6BZvXx5|J?uiJ>gpzUuQf6bm?^f6oD8b1Ulln;TzFl9 z(p~T1hY^|J8$j14@jV4onL7Cw9Unn9PEDz<=Z;4$taw^D3zJtTS^0B9kRG68{0Ae&>WlS zkwHx&OpbAcu(pjyOWqcsOfnlqQcy(Lyr@_vi#!Q-hZ{Z>=;yMQz?Rd^U=y)lpo4O^zLzR4NQI4{qx2N>;;W3`Uu#vT8t* zjvS+v)vll)C?YwrshzY0sXQe}T8dL%%chFtH4!MC0OtK^MDr}fOv}eS@7n>_hOatE zQ)O#T)mpV7JKo>*89}rNm)GnSEcGB|a28{pWxV{1wb~>$Hm=~dBpG9V@Tc8tH)>J7 z*4TIW6twx1319xCo@qE`HZpairYQ0vtJ^0uz%97Z9s727LsZXZ*_vG}~58LmdzWjX{u?*B}SkfBdF!LuqRd^FM zTDA3QuarhrG31rloyRuq17ULd%iKs6t#Hpb4O zjWoFAuxJbx0Ia~{*Qjv#k!oBfXAUL9mc}U=3kBqFsp)ddZn)4^0(M36RqlPJBkiv+ zK*KeP#px7qzatmm2BGSOI}MhtsSvWi^7Y^X8!s~WgB0h|o74Uv zm4XoZ9%u~!cY^#Y0SZU36F;*Y|9xRh8!oi%u$N>(dbw&*T3_Hu(UTw5`M$M@5)8aZ za%#0w5S?mnCCLn!0wbeVh8I_gGx02Mh;a{xqjzs>CTzcVG}RK)t@INeRJV*l{Y< z`7EE~M&e(zgDjhi&v2yUXtb*;6{}JmcAS?U!C<%@B@zs=`=lKYko`v-la-XX#n*t# z1k`0I&q5g@A-v$evmjDs4NvfUVxYuVlpWW&{@8~1MWv69mINNNL$|N;g8O_5%vhNA z3i5H!8L-qG^+bwg^~ETJz-~klkV{4|h!l1WUc(cdO=k;KAp?jO4_ z44v#udtqHws_YG@1%YlFF2-u+AUY$pQ4a?+_)Crxq8QvEJW}MUGhl;2>vd^Z;K?`# zbE&fy-^BBX*N#MWMH&;YOfc#H31MqT0swL4CczaBv{>2nT!Tuq%a6`0#GMh^rO#L7 zvkQfi3AtAH%MV$#=zOk`l~)VO3JM*@o(mBZs)~}sN7{OgKFCvL3qgj8&W|8XN7sZ0 zr_+Xqt1GH2QCkL9wwYnQgxUgHndrRE_<0CqZjoN6$HL}v?LMYA#6K{K-tlWehvjGF z*)hjsim@8{IQT1QI}L$vKVk7;c{i-JyS}{1y0YY0Kem+Msl~$3boVQ_78al9k7;Yn zq0rh@(^-qRCFT+6AdJh6s>V|ksA#u0!1ce}fkS)aV4A0*{oH>`091gA&9>4-KCTkW zn;!~g7b7s7geMEunM$ia9St!LqRU_2>#Y=j_!76vvVw=qTW50|1sgI$%aGA;V=~dfIXyNpuVXLpG?!#|xt4;}mai?{K~{ zNDEg7$d_pO=s9xF4hEHIYL9OZ)t6QZTBCpp=9?myInhf+sXz-HPDWh=(7ww8NY|j( zsUD|_30TZl-K)7?_wWlHnte}RPs#M+@U6x2@w46EVI>h5^9z9|8|r|dEI`!T6bJ4A zFBKN_?5~5aX6#sv)m@DT^?mbd3#%^XVspo}sQ&Dr&LPgx6gvvZ3$ETpC-8nnqr3hm zqi=CEV~x+~`!lEV{jQU6qoVP~nMdlbJ<|^-L zLEEZPb69g*no3oXavGv2M9*h0og~Z42uXx zMt`#$Ym`j6|RKf}5gK@N7=e{o-@$}QjUHsk_vTRULYI4z?MA&y3 zk{)U(_oTqmyE9#M!}i9`B}x>ZZi}j|V!!U0Vt=7eD>e>u*j^TlD0JA0y1Cscx+-Zr zYqwy_adj=}Tz@B*9g*a|pr$oN|CkSfNo9{dU_#_F1)qkp(%F9#{LC)(Azm1Ug_M}q zB;CFmhsk*C^+i0c?#C(s#m*i2uB6=HR+!XeSLJjS?xv5@457ol}YxGRNkeiZ)W zMhi|Tz9&0AC1jj8X2AP(bVMbZmS+HmC5)?2Z}Pnuo6jZ34Y;Mp2}p-ap^ZUh{YLS0 zJv_QA{^te2mpjabQxIERg4y+Q zcCpW}3RNOC@!eU&R|qP>#MQXlf2F-fx7#+h?m8P@>ncW&v_-O4l+?I)gPa%kdO`Mb zJo|Lh>i#;AEI^WLNQ=Rl#xP1BZ1_xwuA7&v=BjNS#xRG-)}rx`nxQM&>BWx?B2x6$ zHc=!>lap+*Yo|RGZdo1;+?mgHl6gurieGF1fsk7 z80CoRrG#vHvos8CExoEiIy=Fy^?1OI=nIbeP(|`iJo^#NlfxIuAG1l+;vlE=_W`jn zm2w|AHiiq+E1yP17?Q_vBkSW^b5kbi-qz zYd-LZ%lDmKKTTj{-At}jN4G8ewg{>X^ttRE(PfD2mTF`ok&tR|M!^}?9TxL>`l9Eu z#PdHhQxbP+M<^_=@0RBlB47*#@O_0?%t96I7e;fRYHzV{36(C$1ulsqzIBklHO$+m zI&7xVQFzETG8{20oWw}j>U(($jSjCN>Zw;qFSux5tdqzi7X8yjt8Q%b<-&y8)=vb? zd{A%t$Zizz)?!J4M*A5$s<*b6o3hCh9W+^dz1TGBS>}Gz%)K>YDIuEmBqa-q{ukJC zckchBBb@8y=VPLRQ@jcM@|Me+BF_nSkBmM-pCItRtp#t*&ESyXJ4pSyk&UVrt$mRb zhEGv5SRo+e2x#HECVs??EM}N8k;0vticmuEqR9I+Xhp*`JSe{Auv(Fd)5O(Igfkfop` zIcX+Z$&N}s_G}XSP0_b4`XezrScfosrh~u>!FxF`KPMoV6hS&sFke2T5mImqW?r!+z*5~)W&vVXu zzQ4{t-?#VWwJzCf$C_)7G4FAYdyL8DzEd@iLUq)T_c=K2FLX@Sq9-TlP;ioJ@C%^9 z>&guTqtv8Z;t^K#Fej(I82H9YCBB1xsO?Rcrh__!tI4CyIeKE?y5r|HGj3 zb0iTeB|z~c9|kcvz9FFTUwC_SxayQT`6vvXlIEOhc)@^D$Po!S}@}_bz@!c>!XTo&l7r`sZ$-P3w@U?c_R#T zlf*d!Uh1|ezURZHjJvKEHR!77xAOF-ccoR-1Ywz+_7wJkdr!oNgdRH7B@Sg4Xm}m} zpR(X@Aa;nrs}}JBYbNYh?UTqYsb|l#rbsfkU-hYZe`bKrE`0mr4!yNLH97L z5tW~)J3ief4a6J9c5!gj<3We`0fdqu(h(>(OTB~9zATgJ^Gx#TFt+L__S~4?A zTZ;B7$r$#HKCNlKK#8mRX^ac|$E4u*J_n(KC;#TNLWa&o$M&}L7-Srl;M^SUCyjiH zH!y@9Xb*x5h%8k@C%w})QTU1l`&1DKZ>8gS@F0WlW~mZ8fk^1xc7pg%eY$=#*IE4a z&OnZr0>O>9d-E)un9uI(t9dO>+ni>E*|9H8T9uoPqn@A?wi{C>6Fm*o&jV|Y z`(m`Z>(-xDJv66eGj%1c7I#&bJ(o-Mm~yxg0DK?`5jh`L$faqr0yWYG3<3T>1(k92 zg(vj`%~DKTDU_O=f@l$0KT|8w@!(7YuKcZ5y=b;D?z)m>K~ATCRMZhvG@#)a#)t^d zfm951>=P-u{H*5cFGtF$J^u<3K+gi$NR!taMOW{1g?!7WvS!^93B`{B zE*}9xOc0}!zkRP|M?GjGCm%vl$YAse^iY=)1%#p=Kw#uYlJ4JFFKeV>F}zdQ6;1%3 z?iVjW8>E>Uc;${lQls;7qlWs04;sto8h0F(SLmfZBbK-6VxqNIBFI1y;AQ{lLrNUa zW$*S8B)e|^uYA$|Lc;7(f8yQWTSuqs`wx?{#sTMTB=>XlRPr#mt|iEkQ88z#@qu;m z06^I=#@#s-D7MaW7pQ$$Y85Q`6Wu?T8SvUd zbot!ils1) z`KTcWjQu2_M#hgkB=#H?gz&}762N7?>u1W~P& zckffelfgy(v{X=2@B!rPj~miZKkh+swlB2-%e@~PU z&s}psOgv0)?G~pU%~U~CKzE>~BLbpMo5>>t4DfevY(Kx$M>Vc@8VL^Fov$cX#B?cL z$pbO|6c#pCNztL;EAK|lSe-(#EWzahlJvvxk>YoQFVvV2cqiZrzyk58SX3wzOQM~77n7X*2g+l8Rmv5<``**1;(_aGV z6tG$Y_HgJ^rHjDv>c+Nf&p!hqZvOwKgrx=iH&oKRYNpUEu)3kHB;jjCgwZ=o%ZIh~ zxi+Z^MeEw_d_}bnzyFRS_v;Mo^w$pufuD}GglOZXWjM|9r_W%pGB%dRfoYmfm(i!M zI~8=uJ{nrX084p%1o0wy~F7i~}U?}F0b(aF=FbvWwTK-7?-bzUT;&b@9B*6rn ztTvWoK5u_`7(GIM6y$npTO`YcN^j#fRY6nw^I@^qE7QV11dK0z3YdkdF+Xp}yXEK! zYJVz-HtBA`99e$64>TOKmL6>o@hB%#FI!3dar?I}Wj5Xi@m z%gpvWT&~A`_K;VSfq1R%%xD7)*)7SRwpoB;FTuK-R=`4A3xPEzG9j5_9MD!s31zzh z;uXk#f8vp^z|{h}r{`0m^$|jpQkqQ8o9EcZGn6+E<1QGZf!=dhFRX!cVHaH(kX|sQ zA@GlpSmSKUgl|5dS9WF3TgLxCAmPadIs~gx&wpHgbtdR@KQhOJ-6R4?i>|W zz!WgQL$Y z1EfUzcHA)Ihsm&PcDkOG;^+5<@AP|vM!Te8Js8Qg2~O&%r9`!(1HNxncveu$P1%iq z5~siijtJq#Rd^`1DhC4OC>Rmzrhj%wO=57}U_m40gTkc5n-xrrrk&B+6;k)wXNK%K z9*xAHOuHTI6@P7}=c9(+XWZEqBUBr<*h@cLWHdbYW5+>fRpjC%c=CCU7~iRIwA!Mr^^V;wiqY;v6C&dJJZRL%R2 z59Yv9;jrj4fRBflRU0y#d|*;cm>w(4+%Nwj$# z_8>sMVMPpKHvYQpi{hq4-|OO{!nDY^PoJe zNZ=*`ul!XF8t~=&*88XtcL!$?MlhT}VMzpDgRva*Zs#QZ)mC*c8x!p1A938B4SyvS zs_Hy0#Z>|-Qjy=n<@MG#nTg_!rU^acMIb{iVpGaj;qa)^GR%l!QH3sOnLrap4G2FV zcTUGsT0_)O;s?NS*G`?JJGElm0Z``Ols?K8{<3r2n5Vqh8|?p3T_N)+%|p3s@v0Rd6iYTpOO| z6z~jhy#z}_$c8X*Ss7Kb&7djCkvzy=mq=Y9jW%H4>a6D0e~zGj1ssH0dEY14UY<{o zowV&Jb9ye`w6O^vzIw0m=!Ez)%#O8z)O)YySS!I@jSwghTc$>ditJAjVX`C0f;Z-T z>s3eNQu8IQ%IW&K<724Z(w2Fw1n#!wmH;?^(lcjV4wQHGN}@p zsN1nyy7%UFvu*I(kXJ69>nc|>x)Y`xH)Sh^GaTFf?qP+F`T|qD0?Mv}K*$4vmP=At z1))Dot3SA;i2^*RbVC??Loas($@**n4QVktK)48be<*gtSF6(8pJA;#7e5T1>jpD& z&(ywtQj{Gn+gkSVi{oV!9p6RIb$CpS53kWYDnv!K5?)!;CH7>!`OA4qVIk&pg&BHx z16tV>tWCN+bC-G@{ZObxS(MR?&`|#mPGF@Wu<@<@Gi%`@l%ruTFAx!^S&Wfl7@jl9 z%RDjf>Y`OSj^??!9mX@AZ$PKtR$AyiL5mKn-|QezIlud)a-A2r?`J~~Pn8b)5(q~>*b@<9s}K-&3a2B>Yr@8khEPSCgCZR*j}9c~8Ks;Q+Kg4|v2(ZP z!tatD4l^c3>r3aC`}x>#))Nv#YOWj}I13aL)>h>-EuH~y4Wt{A_xnTVy1=>TQGh*7 zgt3IOFQw#RGsHiY2v9Qq;y~Hkx6hg>4cpH<)sjGb2mY00ytg!X-gKMO^TM!-s{SyvI7mZ}+xF6QJD(1B7dxT}kk#vs!!Q)uM(qMN5 zpWkt*?p=*AW6<{XbwPqjg@F33Q7P0~kwYYI+GzHRm)9mVh4AxP=s@vFb$ij|4i2r6 zf#Kp_iBk--JNen(Y!E2C@f5tG<#>Qu-j_W5KPwG(E$1)TsFu8rJ^Xol z%ulOAus*TBOq;Egohl#lL0va&9&vvRVP5oGw(PaFCQ*|76nJ7Wy0KUkiwTQywQsCy zTip)Zh8@3oaD4~KHsI4>nYX|0o^IZqC9Z4v4uHKzk0eD{11eplKLgGJ7#f&lbMg8i ztmLE;KM9CA0Y%;yB=dwNQ;P?EI1_hiRXJx+Z>KpSIeNK&@_ea1I_UwVx8asFrqA%^ zs+#gwzW^pogAXLPhw~OSUlS4nFIRD4#Y%%iL%!8Eq)l!%_zSTaK@iBLLYB_e2{~W! zPx!wb^%+`{ENY9->PHv(^vP4JjQ#-O0KjDUP4l-i3x{u<4F5h5Z&y$`8_S)zsSLZG zy)Pw_94afuayfSjt-sDwia9@P(SKR(?uo_K!;C>2AqEc-4OQmm8(UltZVI2Dd<*D} zLcwbb4drMn-lP@I(2T#?NH9PWDXFrGm$)>2Yy~ypFBQH<=Gutp*`ZcvQ7Y_ZL6H)EC?J~@7 zp52=ZiHcG|7XC}bA6Mj%OaPPdGu{lfi;Ihw&3m+usub&TSGeEeHcS?i^vt`HMC0F4 zFmwXzpzfNc4frk%ybP%xG*vtTR#C%W{lr5{HK?)SKHB{}D^UQ?saoNnzqUM(4}^Ki&f?VfuqdS4!enD#xQ=M-B&To>vfqKVBS}v!UDwq9gpxb`ok9XX%b&+#?sO&w; zi*n_sRVHWklKTR{?KOQWgDGD@_ybeV0{4-TPT&wm9ry%2S6NqamiPDlF=NA%+8W>k z=C4liW=={820zKQ<3`^BnntFE`L+WaYsnMKuRK0#HQi+1~ctU)OF$(ma8c80vKxTaE9y6oX|fPqQ8l zJy+oC-u)zc#zfhr9yEkMkWU)~m!A9|5j{KGKxJb`MJ~)I6ax>RGWyX~Hu5~{mJc{Y z5dk1Che>RMiDUi0;GQpMC?|E6-B+Ux-(|h6xrf!&wBzqq-S)>1ff|jl`+01D9v?!| z@enLqQH9pS{bh1>JO8!_XT3QsC z7~f3gnn7IhH!S)EQ>qPk#EfeU;)$?kZ@;G>M>YnD0e$X7LDeN?5(p%ofYCzC57H-) z4OO6U-dtU`8ZD3JzM*Znd$f6%gAbZ$p$^cTx#C6ehXO~&+{v7yqtIM9LFE5uU}EZMY9 z(d6e#QU?-Jr9!9=AQ!t*Qiq~;FR-z(b&82CjddUp*tF-=@IW1Fs8ri*eM+7R`K25N zP#1Y2gV$82kYRu;& zpNvJ%8^xvW7NLltdQh%VorO2-zV|np)JOMBNb%9Ouh`YT2Iu?^pfKX9Pn&fOW|uF3U4nFKL~axidwY>-T4E87<-tP|?7m}w3Km{I_a0?ueM7^g zi0_8!Sw>*%=9^mE2lJYK}`JAv-4wj7JOWd@ZN~%Euge1Qs4=2WuA2> zHO5`(Kx`J2%>;hN$oytj#$tG2R?$wra`B}F89?YZ{ncEbUpv}dnm{0F#YkkF9zMs` z?*3pLn}K@<$Q8-M?Vbs;6J_}OcZcqJzXb&1o5UZJ;Fc8W8o_qydU67t>0tT?&|AY( zy(MmKvO;^%$%3XK-Z6+g(!cTjuKrlyx)BIGVTfGF35T^KQ>Sx%R{okdWb8Kcm)>AJ ziIQR#kiuouya;vygC+dZ5@19a^j~9nK^1R{@iYreV6AzA%zD5AlV{K-F+2x+w8Qd2 z_;Zy;cM`~l_qy15ND)(mbH-V?qr`i*>ezP4>)DeVsR^dd)R~rcoN|! zqu^e{_=u`LAcDvp{!zp_zG(F)=zPNK^_{_HcXWKY#A`F=e$WlJu^FUExBMyhWU)qh zefu~5*S;^%$ZJ;KhJ8(u$Hc95s&+2IYz~(+Y4VDbGYa`U76VE}F>e8v`4q1)X_=6V=^* z@t2LgIoesU0S}`FOZ~6KVpXHfbj3-a$f@S?{k;6g?y8EyQm~krSoJ_0U~ezFzcpMG z_8bB1P9zu>t>r$qdz)N&i&NRx03`AMQ-Ke8>H$bFs5GW4()Gy-hXUFRlv~2W3lUWq zX#EMj2k5-Ls9DnDZL}T7Ui>u#@Pq;V`Rc@b*Xagqp(MP4yymq;pialKM~02XTA!T5 z_2e1E1=SmPFkQ7f188D;a~=2bbvbj@>UY&&T*nN-f_miY(8pLJ&vJ*x3g=c29|%E9W?=8D|K&lB3&Vf^`Jgdp{sYGUUR?j~3wH zKmPyDr~m)s6Wd%%K;_npncByv|78?hp((6fp-<>s^AY~FH2ryr&U;zBVaY@UCTO($ zH+3T+O@n=8YwxEgGSmla zNPqFyt^xpM zb|>~|CM<2$%$2bK;eWujv_+DCE5&b83<>sXT&Tbfl)l$%jL2Oqi1As=vXYW-4B8`%RZ1Tj2qp)R z00f4B`JU@xLIY@L(>XX?oS&z@ctGfCx#+gBz1>=2rhaYKIlN79z}GO;KU|#XFc

H-X&IZ(dbiJ{hMAWhnoBbDL;AJN? zxP{+RcSz?0F37p}@Ag5x{K&@`fqGE~aIkUK~a#!$D+6lO8D)4y_Jg5WYl4T~mH)baV;v zv>AvEUTpQ#nN%-?HOqmOd2<Uj%EQV2w!O zo_Km_KO-X8fOmh>lsf`Wp zpq{~zckd<}LFHQOL4|9Em_;ERK>cN7}%r_DWy=-)8rh1Nbp7 zX&*qoeoJjHB13Cslgc^BnsV-q=LI{|T0eq(`1SSK#o{Ht?o1UM1Y(q!(fYmc1Y>&s zD2g;THWuP}a)P!|=}J3W3A()?!Y z;^nBnL)A}qA!UsH7FJefRdp7Z3~O2gMiL=ei;PN=Mdh=c2kpkPOuN2=WB1oXCTfKyjm22a$85<7Hf6<8m^!b_&bmW;DGjbbb+ZK zA?Q6eU?By=ioMD#3J$odEaoYnKm2PTVTXT>F7u(0ag{?8PTz(;2p%yiNw})W(y(`# zIMTBJK>sm0!PgdrSPpYmn0S0|1G2vJ1p*hbv_>#4A;?L`f})hWT=55Da}j#h?p(8F&J6#C!jW^e5X*%ocUVv^ktV7K0w|61 zVfu_~zL4-rv)x%Ej5|2_36?)o^1nwCg|UNqC71;`)-YiD0oKqo;ASO_na!VB{PhAy zu)`nM-~*=g1l~}T0S4&|=BfWfziDi-P_NZ%&TWu4TJBpN9o%k&btUSV%j*!q*J6kY zj3`xk@iv#FGV1GBntGLdJbG+wQJ$GX((u9kc4D(Sl5&?|L5KG>arIc@xed!amyOX}$9gR_4u``>FMs_e$lC+eJFH4?6`rUOUE zQ!1CW4*SUl%iV}zWX{Df*?5!vPt1BloZw8-#!B2=y^XHJ<<3FSO=RhunhNq2a83W} zV{*+uHw?Y3eGw@0$8Ey=C4B22@97o9g2t_fmdeoayjxczmAHeZTel9IFj)N;(LWRI zA2UvAhEi3C*p~DrSvUx#N%$(cW{e?mR5a_(&nNzzX z1T6%Ft-4lWyd22g!bfC#kj*=i2w9-9&fI!9zfMs4xE+$Q*7O%?iGKYcGKQRp8sq8=@Q6cdI-=-M`~Tv)GS!*|1VC%t*oD zj*do2!GZG<2lU+e_7|1CZ#rU)bU>+z)pRd!v7s3WWa0hp$Dghj5@2xP9Lb)`O{ZQo z{h5i8V0tnCUDYoDG$Q_A{9x3e&di&nfAA21MSnmN3P}fZvB{uu$e2Og@2bOP&<+Oq z^zMYsbi$IaYyoRUT7V4DYd0xpGOi=GM?onkYpNHUr<}`3f){W+mFBP8pC5y^SH(egWi@rGrWM5~E09G+`tj4OzyEj2C-*Z_}$xtQ;Ju zz1kx7!@g3MOwJ;IeYtNVE7t`q!LXoVvbUev;*1rKA+E2+L+L>ZW-b$wn^=YEWVORR zW8)o1+%{(cLB*<81$-i}edo|nz%hP5t8*`Ao%88dVkLJ&tcio)ndZ-T)re&y%q}A`%CoBw8uOqa`Sp9y~4LNgqta1M_q$7k3XOX{8;5PW`G=8u_54Gv$ z($xH($}n@v(8Qz%zaB}x`ki84zFC#aK8D~u>$hBGrfoHh&iA#uySoH~`}8y2RmWSS zQfGVftv&(EX#DbaFT@lx%Xq=>%L1h==si>Ao5FpBr$y!EhS&Z1(uo{!$h)ukokzH? zFn78_;#01>W&{Gw)v-A@hm97_WQOZUqjc_g$MaaFZ{GR_5E;Z&Dfk9xcQXeJ*Y+}Z zKRuLkqHw2;ZX+{@yvV3KvZtw}aNobOfv(0}q@m&5N9rwwSxeu@`#SMP>R<~>v7%9U z^|Qi)WgTnE1Lj%gV}@eVP;)+vhacO6Pt;uXL1es=hQZ&4)DS7o+8|sA|pM zu78by`Xu#JR2*LZbmhR)smg8Li^;JLUr<-6x1`O6){`lh80In=h-YeI>0{PDGQr~U zHdj>B{n`MYJ!c%zwh+=2dV7+6oc*@JI-y&&(wT^WO<#9*%dDydd|MMae+ZOJJ4mrERSv z>t?o!R@sw+PcAnODvbyd+3r}mNn*9K_py0uvz8Z3dP?-*+5`R7ED!qSZZry>8GXTe zMDh9?X;x8bF5c=Q!_hUz`Mv26dlTaYipldHiBGavdCK-%Jl(a2s`W4tPl(_>r_zalyeMK@SZ#o4>Q+pbDaKG^s`i&YjuXcL~ngerM@yx-RWqx&*Jb(kxtp@ z{xknWB&7D7x+?LAsemgxqWWWN%EgPq#0M<(cXMlJ$Ez=1ai2dbrpwarV$~l=@=j@O zc*&q%YmOUTsNp|)rrR<4+Hr>qo zeyqV1l&4!S+a@O`Q|*?&VQr0m{I%SP8OwF)0O?QS@5_|AZ~LUqVz5*F<2v_J&*Sb! zmBT!An&s&HxO0Af|J36SLd#0NsF{MmUp&Dt5}!vLZ_|reg3x0sR74KEN$y+31<$O1J%?*SS{}WxsxTyqvh4`&l2l}+GlIf;#M zJlRMYaw8c&hWv0U9Rq_+A|@tgzcban!##|Gd=OT0t7I0(?6LRA`E;|l{@MY~_-!Ey zF1?b@!B?hV;r#f!Q}#<~E$vlK+dRi>11lq&IIoQNM@rZG-{zzfO;=dxjwNJ^Zg1|a zv;ZG4QE!BV+pTPP5<^yPxoBFp?h_S-Yo=!RLv2*;0_mdE^zsdl-OlTtv5>wbvdsSS zRFS}8V2G?n{VkWrMltM0gbnWQ^8KInIx&a$gP1O>gg?68x=s}9p`r#84#7>YO~p)w-(D#u3AlVxQ7V)bGM}sI0;#do7o5RDl1QR=Tw|#&ul3>J?k))61GK&B zl%cf+p`^U}W&Cc}XjC*@{_vNm-=4WqjTNW|BsU78Z?VID*epTFQ?2}6G#{ME)Dh|i zCW1s19qwd_;re9|*yQ$ir)4lHzq4^kUq?Zw$#^0B0@-8FRbH)ST%0pRnkE`GiC67s6Dy5;FnW@NoYLrCSv)7chnAXCm&8{T05APWEy0sj%_ zUEIr1kNa7pAs1{M1E>L1VI7X9_3yJT1uYFN;w$VwF+7;B2t@xp*U-UfF+RfJKOG}Z z5ES~>n4GZU>#$-2Fhw);t_z+pYD8|27naygz5(;?QyFRLx@?dX%y3puK{Khb{)_{o4 z;P0zDJKbFoV(6W;a!GaXbJZeb)W;qOtDDF0RSWzuStm`G*!{J~KOM{Nk~6g4$P%86 z+QI5u?6$wA6pGiU7D4Tx#i-fvE2dcR=;H9X=XhrZ5t-ZjNcBqdBImwGzB9He-XOceF_Io(7*Fi)u?sHrKis|9;hvnUOBJ9VR>a~ z`S==hN9Ul)LWZg1Z%v=aZbP&?G8?T&Q}g%H0%*XoAO8!3l;_p9Wt!O+A0Obx zP{4P0U81f_k11jo2(y#~M?~P5P21|Xv{{{OjA9WHsplw`n{MRJR9lz2u43~zm=oTR|)g3jc_$SVt^_c=fR1pxPLLwbIjXHM!sqSWp%AN*8?P^`5^ zwM2h)I>lSsmk61^|Jk!=wwrUlX`L9ceAI*-7Y{irtk%)k$N@6=M31hc5&aH2Tg+Zz8Vdefzs!14!=~T&Q zi$>B=AxS@|w#QmQ)~{eQ!5zYl;Xh_e@Y@S6m|>2hS2UVF>nq$xAjR54n8zf+)tRWa%dKS5KX}`^4X$G0EI)o{raTe! z1*DvC5n0*pjwGJ6VY%tFxUk=N3f9Y4z9$45L%Gk{3}rHft_voneEMGy%wFp5OgvcW zQT7BzO^BYBIc2)zj8@cHv+s%hM6jLbdtii!%?^W89Fe*2hEO*QM~ zHTLfBv_|$|xo4`;!VyD#xvT;v-%;|7C(IEcM(U)d0sn*}NSWK)n~={bh=@e*4%pV25;CMJIr6khy$XHNT1VLvhn(A9dfRS{z+;9P zxegT)m99xsN_{sy+2`~Tk-{I$aBY2Elt&pKU+n*N#H!POli179bBcOu8 z#3a`^i-S$kJSL8P`(WJ1>V#}0qRt6*@Thd~HU@ZR`J}FGC5z_(r23wVx-$wpc%V1# zlMuffs{2T9*q0;;R={fORdSMP2P9+%jsC%E6O%iw`h?H7jV(h%EN{Rq*lzJdc*JIU z5e-bXbIa9VHf|()b2N+IW=+-zco8nAEiRJUvXzrq95B(bm~VFiTz8sn8Ie7LdF;&k z`qVk0Tu*H!Uns24#@ZNx-gnGdpm2QRu+?`LxE9Z-s4kAZnewO~;v}iWBqYV9rD-nX zS`4aXzA5ve!IpV1W;r`%>RsYJAs*KH0|qMgm)#=Pu~ohli$UYz21oCEB@YK4edUv3 z4H`Q?K3l(7Le-dc>s)beXlQtdUYP)iiMF0JMKs2MyuA_e_32FnkxeHM87`)cYl4LE0G@4VSg_4 zpaE>-^=>Sm&uofwwLNH^&+W>Gq_$!SK(o$JvS(6j433Tht>fe4rnd?p63c#PusN~G z27blm`GR{O?h6Uaw}$1`zC*QGgYZh;2`cise#F)7Gfi0%;{P2108U-=S! z0W;1ri8Dm-Rrh zU~8oi65Q8wZ+J&1CjwFoc9RjEt`Y=aJr?sDD7@DF?~hyTHk}7!yF|Xn*4LqZBY9eW zN2~Ouw7(x)WS)UV$f!?CZjD0DoNSd)rQDeoykGA;DUoGLIefJ{3JDW4OeNX{01^A0 z8Tcl!Opwo@wlt(*Cl;O0)*D^9-!$ywE*8?;^e6I~Rn5LF=-`6dzWrCBwceHuLt!H2WUz>~!7z;7- zEtT@6brb_#>qxPGkIiMs<9At-eR9t|R`*AH(AO8PQ_*lFfuEo<)&2+N(6kDR5p)n6 zKzr);!Oo(=f1YTtSL+13nNuQlB6WI4J16*=Z%VK!C2<}v40J0>!Eren3xcMkDN21c zzAhRUFyA4pRBOl&+=3aG04iQzifBm96T0_4rABfhsvawHDI_Am)?v{|S;0pu@y!p= zRmR|u5cJ6s10!Jh#f%Q8akvSMmUzhZjpv)}o=&KSR=~hYNglm$WAvFnXVPx?(DXy= zm9bdc`anIc#&kD1%0F6wD&)tfRj_#s=e@VIyBm_0h69ly?}{{)=SFms%&X}{mb zjfA3QBX}pw>sgz=5zf>|n0R(-T=Rnnvpzh`= z;%N+cxOwyd$+m>xA*SNKFkhdtTOs$_v(Y2+oM3rp<;@3ALDHlFv>sA{8nR~#d>)50 z2NoV4f_!}Z;>nC|!SKM$?d+^5nVFxyacl8S_8!zRjpsCz5Jz@&bSC`H%U$y}TNigr zByRrqWLLZKWd{Jl2?+_8TRl2hW9KKX$35mNyJQy*Om5DY6lPPlRZ&*!ZCz<~uBX`e zM#|Q!y?ubrAC>Pz_mb>keJ|f zb;80?QQ_$1M{0p@G>&YB;!zOe46uUh;gX2YI4QroXKs*5@L2%mcm#Jx!RPDq{pGE1 zV;fhQXNrzOjU=x@bZgj~@B}o|6?7`K&~ss8UU!?LudA;o*HB)2==YH5jsz$TQ~a#N z#gUbi)(W5L!NZ5lKjaf8<2-TVCyz8LzuNm3wJZG(X^`k4KF%aVXyx^M-Rmyh7()C zmqnRO@4V@t=Xa(I{%XT92ok{l5PXF#yyL8nh}{|5+DO)Cv3%FrEOgyh#ue$eUW%N? z{X(g1@J3~@GaYvsjyZOx8{^%s9rR4} zgp%jHL14rycx%D+y7~3QfzDj*v_S|_@@HR>;|9QrPzcwAmGrET*c zJ4%9;B8Ag3vP4UubM8(?6}(eLefoomiAh!0#B>(q4bvy}T752VZX$Dkg5O4gMfE3} ztb})o464?!$#ufDB*T@NYO+0SBv)Ks9`rUjn1FS0aZ{%%TP9v09PFTa)TcZ@Z1GAC zV7tQu$8N!GWwy=JnYhK(u88p^XAuWdzGKI3Ks!v-T11OXqda;9()Y9PbKiM~3D6mC zxNN-`0Zqibyg9ViXaP;p{-mI(<&hBqiE#2SliOXydgHIauA<~V zcL&nDxd-11RfMxblNHjOK?>_8=6gJ&*~z)rzF%nSdx=Wq>6uaoYLFE0mpD#yj6M%6 z()M49s5YUY`ePp~9jkFw7S&N!VMjrdHMRs?9s;&^(h37_6*}Ql8_7D`&PdcJDm^&8 zk=$DWW0&SyyVm&TFOr2VJ&`&~fGK9}t^EP8?t_0!<4_8mv8MUfSKjw?TZE3TTJo=8 zyY~{w0uR8PX036TW)qLR5^#A_Q?^OYAQjI+B9|o0A68;C@cP&=e{;B!K8%#Vt-G79 z`=y2smCU6#W1@4B#l3lfM)i|iYc|7P(+dZ{C?6akyBV(EJ{4ScPlOu8&k9%!@UuOy zi+u;EURHx%;ZDZ>s&qN^8F2E zS)vsmnQr$?DK!;2*>e*Uq7wbC7NC08b(=fgnhblP&S81@+bL*!ab-msc(&)kqBz9F zJ|QwA7(vD1zn|e2wuA&n(kbT9Jg1ac(j`eI;%&Hqcc1%yYW;;%XgxLpU>NBu6QvDi z2On8^JdT~2Wid<`@dXwx`IR)?NSjJimDXS8v(+LXA1PCS1AQzWINqFQow4Qmc&~Y9 zdt2`Up=EqL8ib+00V@YWBui^+!<`txYz-FMm_kr?xA2uNO2^H@f=R31S>(Dx)6z~x z4CIN>hKnvQE-Wt&-~i=DtWHD(k~s?f^>2Kfqv%w9a}+=60=ot0Rk^Ny>q=@Wbk)+e zun2>NY?Y$}1JQt88_GCd2VwF8pvyMo8xI%#zIYB2-L#5f0wUwl>(9-$`*=J|i@q}3 zKi@W#JnMxKA1EHOo!z~zHjA-5m>gzWazSfm;!$+y03x64v3ruvHZWJ$C)QTT7ibs= z{ECPM-s6Gg$;Pvt-Q8@}dJG7e>(?;VVSIw_;1@RQZT*_l#zT4RAAfUwV-quv<1{mH zEX+P`9kDkvGmYUkd)6N4ZaVQjxsmMgDY5YgP;0w!ep~6#HB=Z+W0@>nO;_wW!;3Q6YOq@Y7p{>0 z0{{*RsqYDEfC)N1O=$jbMeu1$C8mZGc-NHg|fR^ z`QehgKwD(>_KTZhPU;=#7qeWE<1yDE7v{&c#~4r5$B26q_}N=Fc-FWwt#l^_woCoGeZkSD9dKAt`cLc|(78!Fu=#>~3+rUnLH-|=uC)a_kVs{~m zvrJliQdhPF;>iM6Iyq^b2O~(r~Gi|du=NX}w!*3-cX}a<@Q9(S^{2Zj-?*_Et zRLVZE10HQDdEDly?_{+JJ;<5qZZ4Il+sSk?HQwr%Raz+!V^p zRLhJR5svWncjwhN=g+13lRR+50iW3!ny7x^bh4kP*6?{@;lmnJjKLW!K0I1QO&5>- zCW5LoR3)z<8baFN6P+Ri-??(d=D<&Y<8-}P_!&zuba8)_9+nJ%Tx0As1FCP7o^yF+ zMc-Om1n-Hyvx>UHDrk~W!6JGNZ-c#EPY(9QV+s$e=N>Ab962?X4ks6({kks{)cw+K z{t+6Q{>o3CNSng8BTAF52wqIFP|}y+YS~6W$-A9ks4KegK=(n<%?SYxWMeKrOvKEc z=&w1aW`T9|?-M8Kze7EmR*?5Hr3FEs=bt}H$4rb+*ERik>Wh9Kh}|w#Y&)bjRkqi5 z-*NNUjQVGc;bTuqsrBLQWA~F?Hky-*U^1cef_l|DOLiyg^+i9ddBRY+i{tCL+uECY zXIhVtE~}%5^hz(>w++gT=X_yA=kl-prLsiui=0c;ud$DXZO%Bbx=yPqE=pJ$BHaK-wm)*Z?{!mM#P ztyJnv5{UPrc|u!^;JZvBUvxl3hs~1yj7$C9I7Oz5h_v1Q7!f8W=66Q*$NSTyAfu;T zsHJn+U3t&oNCb9Kyh594a2j6W>1MWwW|cm{9(I!;Seu2REfy-4S$bVb0xIWj<__9c z3bcUDhIF)JbJ!XHjfmO9WywIp;s2Ay#`bNq)t212=6a_874Fv8@~(xO!&C$H6-ZY7 zl+Wi7YS_H*-})Lu;iGrPF*!9+;64n0EMsO@Op|3pr9*caMo zdXuHTJ(tf3b8Z%QXprCOR{C4%lV$rOznSY3D2Tu7Lm|@+Z=;3v4okiO_r1W2!eug^ zpME%k0Fv&@o5P(J8Nm$?1j0&o+Jo>dGV=Z(^4>Bi&Td;5Z9*J~;}*f)2@WAZaEIU_ zSn%L35gy9IZ5OM<(*1$PMUa7KUM+H0?M&f2%`zf-qvSJ9OwP4_#;oO8(YJY&8` z#f}?dr5ht>wOQJM4V3}3tN{81kxFA}N7iXgIBAi+B$py$cI2L$8#f87bU9=LM1j47 zBbrYOg-O4qA*oq(EnHHZ6lqSq)(}69Jg=`39T)A*6p6B)qBsxwIpSX*7Nj6|-Gjmg z>%1?NdN^MxfjD}?7`yw`hFI+{80Lw%#G!YDoM#~X{QXBghZZkd9v>1hjYaO8z^bH~?gQB})to!ZCoxj}!@-JRww`Q_0n=IOiVKp0=Ja@pz2g(9b% zb=~X9R@Lq8**a(62YCNB#KlQD&##gHdHDbN|8BVulYd4>mj56{`1RvL%WC5U6Tr%! zhkVZawOOHWNMXU00_B@zy5=tt#m)N*5IO9c;J`6Ns>HbuXc3ssAe|Lov%Sx?O!HcI zv^X#*-w=-s^faO@EN6NDyXXX|-dSOh7}`ppLn?g!?Q!cPXZybkdO++Gl=!z6=wII% zq?=xZl(YQXg9LY#273Mf{UPf4|A~(jE{@gv`0*p;Vh40<$PqlqfjxQu(IXRE*xg85 zz6#L>NfHU8oF`JL)X>t0j`{|F5=}F%`#r#xO~RkL!5A&4*pD9hhA%M}t^KW(dhs0{ zX_Bvj|4{#g>Ej@%4Z_i%m{;)ryP@FKt&f!_-v4IO|Lf*2?XCI!tqnzl9NWZXARejW znyO1kcvH8pp~{6(>qkdP$AX6h22*~ac5J*(r>HKqc*X7N9?9rkn z-Pw5N?=I)7CDeNx)3JRiZB)A;n3Dxnl62#3#dMV@O-#iJXdO7f)$lA}+z#so-=2gVOT_YiU^P}2cHr5m-Q9L*^gvqz+>poVaKkZNQIa9bwJ!(+ zHNCllMTZ)p_KM5(0n5EPcWX|p7l$^Y%~R0zsPBv3o;q$}0!2zweLt!JIreQVnuh!> z;X>U}NVVf}wQKqfq%6Uha;dLcMTjwzOrxbn(q17GB}ub7>x@Efhn+739F~DRYW`&h z-|VLe2;>-TJBW3=!#-sV=jI!C+up{ck?1CJX$@s&e@J$IQJaOM1N5R!V&QgF0$XqJXi%+r%D=Zfmm$Zayw^kclAfLl< zsBat%r3%fmf(jipGS#VXxWkI|sitR~TN0Reb@-7FB9TUAC_Ya@yH@oa_yAl88d6ck_g75mBqO8>q8L^m&Ii5 z8X)zM^H$&OcE;;5#<~`X{v3pykF}=GI%4@gmF95CX2_LL{9LK~4t%L@dsM7!Qr*&z zIwy>FC%}_I7obvSK?_nNmjG5@H=pnh=CjjJ6ArDqp`>Cr%PsdGeDZR>f1GnmisR5* zw>|6=Sf7bgeX{nviov=m6 zcprMqNtH9yV?hvA@MHoz2tg)&zU!hu-xR|B^5sSJl)mS!%eWP0az?#bg0|4Cz4#TH zhpy8coScltCr^zO@?z6D+Z(*@?#$OWHimwh5hgjwvp?RsmL?saJkYE+TT|p)82W{T zbZdX@32^=rG2C%-q?75gHMXDKAC5MPW4FYvp$3sl1Nt4`YeS`cdqca@ZkmoNBuiqH zX#t(M&w8~d7P;4egN@^ML3a;MdT3=puXEm1g4EW5J}M1x9_Kw-MosR+#UDRipoqeD zb8|zu67j?({O;)$pJ)rwSzM<)D{^H87YQhk@O)b&KB#0$w?;Zb4Bckd*3kH;s*er# zRk&`uy#5TtbK(15g4$!F=OAg&ADnJSbc7Mc)zt7Tv@0%7e>#0?9fA_U?ncI2%L{^| zn?Rdm`r$c?flF$M;~^i;)zSt5W41n2YvOLXNU0_$&M=CJg z4>l@$@-N7p&o>YtK5e_s-w$iL^VDNd3v-<}^~x2k}_|m&_KIOya`%8WMe`zSQ*g zO1ps+jJCGoLhV@3xg9EvM5>xe#b(xL+d$EGCdWwkx)Abi1sge=RH^8O&V z$pa$2h!KMbpwq#*YU}6-%E;V9SS@wG**s(KC->9~m~9lFU5yH(R;DWefmnUW{Q}eO zC`C2o0Dy@S$q3Rbzx+)EWHoCsDceRtLw8cMj_Z1F2b`1Wq#wj$Ydo_YnGp}jdM`j|gGDssW%dwYRe9wY>2 z)0M5D$}G1&zH;xREE*(8Y0ed5x}8COAkxf#yD)pxv^_rWF-WKdXMPDeyuT!(R%)td z4hRn?hf`xns9V0HgdXHjdn>HpetCF*aH}#9P6}8B2YEEk$?G0dap1Yio6zNNUwH=7)OPN4Cx7pHmrFrU%GcZD^5_%mHBU-NKxxFIvym7W@7^Gv0T`!g;j z#)I`wQc!)cx}T9;JvqRY7z6fZpujjhZXHlykc-G7r7q*PY}HeTK0ia~wIU~-?DF4j zt6K(N_5A0#083r%#B-@QrGd-_W}dEGLB6=mj8+E(}^iM)rGOa}@ zt=b(#R}2rP_A#WRzI0X+b6<4un(ZmoUcQ_S0F6GgM8wVl6-wgtOAh<@#7as^Knakx zIa+j*Ywh8_g!~MU5EaOQL`$yvfSuzLTT<^{hSEaY+EYa!N)&0j}4 zKIBC;p+_D$wj3r)$3Gd`xFugEB?wf|k<~AA^*UF-E<&fC{Ga3nX3{2u*(%`)36FuvWP$8foMp6ufVQ!5iaW)XBb9lU$>>U9Q()p+$PTMsi0$KC?L zGW)E69nUYX`tHZ$l2yZ&g*V6D)RG|DS&#wPs%y-~joD`GDzG)U+<(*{0mm6)+*c8Z zur=kfI@~l>%2j_6Ml*`jWBz5P(To9r zcoCS?SVacYQV>*hWuw2Z2M-w9@2`d?PNl|mH??gH6gc^|vkcd!#+N38oM;_Lad1}) z3!pp3`-36nFNG8s0WWri;jB4NrA`~8HHcK_O=6`MMv+uKxSeE?RI$FI38QJmUgDq5poVix#=+7rY1GTUew>*O}SZZPGgI4q^HyZY-uUUm$b;zx2@u4(_1w?ZsA_9yo%_(EecpV|6!?@FlQ~;QN2k7cXn@6#v$=IeKVbMws>jB`n?xxO52JPAB z2!BGpj`>?fhR`6uY#`03%B}-_Zb*jZ3M%R-L^QOeobT@fXMx@5%)i^@OQ_v)2h^)b zzf2+>7ZXBBIOSM+b8c;^y(Qig<=(w}{PCKo@aGH{Jmq)YjTDW7__PH0BSl`4WIzX- zgJ&e0%N5NyqJfVmYP2iR2eJ z>~5<`j6Ym#@-qe{@D;edt?f5@a}=Y&^!BI{YMt2Dw{uVmbwuEvNfI z+_{tLQbp$(Xk$E_*&PPlX@xedn6v$6>(v3XA9XhlWmDvA1IrUc=xcM9^ zLv(ltVnRB)T%Z^@+((zJoI}teOygr?2_e(Yp3e6KeVWDI$dkUx)cXGRhtl-Ypai=4 z=Ex}!lG&jlHgHIze=2%rp51WRTh=bN0YMZGtQqe@ZeB~pc?s-D_$0)Qi^=x58ZIrj zUSqD|JdKifml3Dct=-+-?_KrVA~=k$y%il5dl?jU6^ZZkeqExkenx{vtn~5ICk~4p z3MjxaH`t&b$`i6=kpPdb3VU$=Af|jzEg#3R`@*YXvdHcR;g}7i95hGOj-o%Wc%bjf zVNv{f0)Ye6m`rV@AV<)(NAS8}72Gg-BlGwIG^=E&Qx^f|Tb+SmZ9Nrm6$_^mpB%$D za$Tn2-5TuuV}h}keEa>{fn?L=vH0hWhGHIVI*p2Z+e_`(=+hGZl1^{i+fOBxoVI5< zE-o%SDe0WgU!?gJmX+Oj!H3os_zWBH2%o~(febkjF1;5L6aa6L&~686BTCK2Tc%v9 zK+>D~KJoiNhDN;hv|sYHd-sPPdMf4Y*P(L6;ZKCxLOi=}YP*Rb`r|P_-h1~Mie|8mOK>b?<5DZfY&38&?*r2RTwSmJwjR&B5)b)oriOr>dJ zmxFG>v!F~`yVT_}TMLkBxtLW^BK z&x}+*Q8V*yD2o zDXokKDPj-xDvIRaZE0>TP04Dabwj-j?xqOZp#y(?_6T)%=cm); zG>Ay!;M5ijcG(^LbF)1*CZ-k4$ZoD_uvRKrY(DPMhM6@xxAqAZCMXqHvhg9z1U#$r zE8IZ#G+z)_Fk1q>bo~90AEZ1D9zILu5aVdoidn18^i2H}k6P&*cJtA?g9_^>YV5qi z^EBnk-`^j~_Br|?70E-x=x|$U3}xRLDLrJE`DDsOWV~ ztf<#nu>LCivr8hJEAm1=m)61pR>*Bks<+!GP&JO`9TN$?oQ(y=Qa1x)p}h{*tW@Bf z?7n7}dPU_-wA4MmW0IW#e-vR)?K{&_)Za^c8^U|tdtB~4BG9#rEsLa`eS!4$+t7T> zhf8*L3O4BmSyY9~DH&4s6&T=+tI5uuV+|g?X^3oA#6B?k(fC2NDbhwmG3iMyN5`j) z?FXTNUOWi+9Hjl&TDL6!zW5!&3+$~WP3w2Vp|Ih0_EsJ?U9#bVBBd*itMk}r>E_Lw z2`=S^@juTDt|hL$ER$Q_frPtAXDRGv-qA^ogRt;L_0OiELcSGAzy_TTH=+OUDN-&y zmulllukh)3sVHSZ_|n1q_<+Y@C!*WQW_3;x#+=(xZq5qNN(Z$(Om#>p(YT8Tt*>J)rZV4YI`#Or+S6s ztuM#h0eyF>dTfs(mKh$&0~8!I@VeOs6~uA#pST^MlK-q73HX80Kzcp!TjhWk?SM<_ zEfUcJ6k_Dm5vPDbTN?@y9sPR0EMwK3CF&Mvg~7R)8}_^eB^c@t2I=Iqv>PBiNK+7H z7jsefgo^mjpBdE0dupviLy1*$Exm$}Y>P@z7|LH}&j=tFlMdwI~ zQD{O8ltwAccRt> ztqcI4@v?$~pE<=`fCl*9^Phd1O>UYwkb$w!=dwtWo4aEFhoN|QbAdX%B|Z<4n`?-% zW!zu+%w~^gez*#vU`5X2HSe05bFNmbf<$C_VGqs8r|8JMA9{nE3<5a|bYI2azq#>P zhsFt6c>@Rhjnr!Bbs4$D2;8}o(^7of z5Wt5UuJ()D=;UeX{kAne>I=WWd5@K>!q&k8C5UkNrs+*cZyZZhu{MaU;@Jh3LqrkM zbKC8&|602Z4!V&xm)WUr8#8^6w9%n$V`j7{Erd$tx`6>Uv^>0!k!jEjfC)}88QlRM zI|Z7!z^s9d;)}SksNHg)F>h>Z^X^m|!goaVG6WeLEJkL7O=|@79K6_yL;Z7jsAE=c z=nna0ysaq@vY=ARRz)fgA$ekk%!|yakQe$!K;t0`#a zO7~WH+*8`If|&Hg0AnT4#xJ0BTHU)f-x(bbeKEgxLVaSm>^z~&mW2}=01R3f+3wN) zhD{e&|Lv#X@yzLE0e}LTiaDa7a*8}ULCrq;1I;c{vAjRY{i1`RU#{7oFcsPeLg*C3 zqS^DTz^Y;NS}#2;Ly_&7N;#UsQ+Icu{f!U+%U>rL#_l~cPH0#ZFFkfbN8R2?hgdCX zmjtcn_TJ88J?{{O40RBXnp=TwU%YtnR5t+L1tJILzO?oqK`Ch8 zC&L%V9+KHau{iV0JKZ7VMRxdOG;`6N?wF33x-bK{JMYHlH+Q){>7cH#f6=^Mb{q5K z?VH*&VVaV=WK&q5#PUMIhS{>Te2gm+zZ;f^mWM=!MJ85|>S>4WZ;iocnOxb?Dr+RP=V?foLD1uIv$!1;cPv zNzPAuR76`qA%iEQ;oz!(qMdaJ$>{j_S}S>+(!ezN{_wVrrKP2us%n(;c!~1cSv?2) zfnJ|jEy^8U^AepUB^!qvuPNfoz*3XRbWe=U-nG2_9(J^kCC-fjW3^U3nr=B>XE)FX zW9^1yL#gDVn_bYR-ea}4ww_q5Jl)^mk&(7rPL_-w#y&cZ8FzKLoP08FOB*RoxqQCw zS_JnYc2KaJT*v?(qaE%8osycm3b|&GS!w8nbn}w+Y=cEgvifjUQWN#gvUbj?VJP?u z>qCt=jxkgHeY-tTp7D$#_*DmOQF0KjvWuHjR=(t4Az4pF%!y+Ec2}oj>|uXf|#}lPM+GQ4xJZMHJ?Zy z@sf(dJv_7L+z9)D|LQ!?$ywMqk3jb%Ij=M~_t%5se_*$}R+kQ?dMP{>Z^SL#tZKt{ zqor?pt1a}U-yQTGu$D}7S{#_iak(Q>{^WOk!LjQRt!g#D-L!`C1*+%jKt^4O!jlO)Q^)tHi@ z&bzVK;TESXCl~ZlUxHEV9fuR0(v};8YLc*TZ^8$2@gUdHD)FMF>v}$Vb-Wiwwp%WE z49k*@O-&9v;L!H;DavnPWM# zw6xqflbO&asTuk=+{8=3D zaEijlp(d@edPoDP>?kgC&naT9^6uV}HKo;2so23JC-aSwl9C_YOA`mD)8=&pT+f;# z%M4qc#RRs@H>+_ol}a)z?Of#L^sj8s2TaRb!Q9`_A-6K4;@vhw6l**G$s`v$dx-D}6$-+{fXK&3d<*ln_+GRCKvS6}CfJ!wDY% zdC=->mVYvsyOG)+{;(sm*M?_p#fRf^aH)uQusfPPWT!W=7)S{5%d@m_2N{b6H@n_w zF*sppw38ndVP>%uOTt-n3#86s~bG`HjOgBz_NU=AYr%I7!(NMNpV%}mm?S>#_ z5so>6fBOwOs%WPC5}gaHdE&>qsMrntjlx{9P^u{6>Q`zV0le!OTT5meQt?+x&V0g%XV7u=`ty>(UH3 zZIpmbS4Qtj3 z5QVCB+I|)LvO*x)@_vx9QxBgkB!W+dK**TMTwuc=iTJ|t?EDT5IB;ztB4MT4nW|Ts zuOR;A5ps*9{a;=p68aviz?=X35qiCq>Azng5a}$h!r;6A`QT3xC`A9~EBFT$Mv&gW zzLqikMuYkKA6uMrA(|3A3|dG}R+{}h*-3lA)chzg4C{Np*xo|E@Hd$Hq8 zl{(CNjMvaJTOq+SZ_gu@fDufL?N7AtiAJwm6cfHR#Bb=e{gPsOuX)Eq zH}Z-?p5_jA8P%0peV(@lg#UO}Dk!h0$db-O8Qa^HULCHhxO@N;j<5V`mHJCnW66P62`A4f9v5HgX)iDWmiR>mCrsf zCdP=0az(7vlzZkhWQ^~)*tbcy1pe^8*RNvNde!~4HTor0x-tAhHHe|Ff?{GdSl+%) zh?*Q%Pm>ZcZfqkR+imZfy?Num7An7oupX>hnH8aUZzl4#S90TQwVZYr*$*DjEmYQm zo4I~`V|h0+o{E-MQZaKveJEY_p%4*|iEB(mL^>KaUAkd#YK2)=FOOfbNn^n3+^`0# z@lYT2<*;H>GkX7WLH$5wNr39qv{=x<{(hX8SY>AIxzqCaRxQ#s+|G9X=MHj@G0{{> z)vP(WjdS9U$M!B+{1$e}`x*AJ0%PM3d1z5=B6M^m`}$kM9o_rOQzCgaeXQe-DD~$y zXL)?4E+a4=$C--o!MwaWIeB?jz15c~X=w|Un#03RR7^YHem%RBsZxYB8pgrJMNPS4 zRJw%TDr#wK+i9a_@b>K^T!&Xd@bvoN;J`Z|K;GFo=6h1o{EV!K_qQOw>kf`X@{W#` zcRbJO>QH!0|M>sYPf!s@lebqhTgJdC?U~Z4t`z25%`EddGn|%t-ym|tFB!)h+HO=rxf8BHBn0-&F+L1eWtCc-Fdma zWa$$AK%K~i`94G;Mx&{051?(RDU14i(+o6Xc?mU=b*h~?1a?0f=n>@^@N`CUXaxp( zVY*OJHCMJ?|3pWsuHdZh9C)Cja{pgFOqqAeTz~iKv4NIcET`Sd#PRFSNG{;Ryp~?) zK?YUxXQF)v|H$*LPWP*z^$qj>^!qvkZEFIgq#A=|XiLUGE|8vM{cTAxbwxEZ8q$SI zVPCv#u%c*N^*8x4Gaez4rRpy_v%}`!*i24Dm%5s%=G^S0qRT4>l%ku*SGlm#|f*J&Wh}BujPW-S3)n>GJADA4fjU0jV8xLXt&HsDT_>B$n`1iZ%hkL zyB?N6LX-rdc=Y!q>?zUJ(tP>c%gIZ*;6C1B?vnB?S4n3#@6Fn(T&ON`m*QPSC8Js7 zb?2Wz5-aq~^F~|NLLG_dPpz(@B6@Dai>+}FE#t#D!X3A)G7`i6s0GJ?C5sZ{(_|2c z?GGi)j2)__LxU%6A7d@7en)FaYYWSYhW+phMD_pK=XNr7_b$gZ zA=A3hsHmt7hq~Ge%lGNID4(VVs`K;>dw!G+CS3549)m&o87bR!a30+aSj=4|n2+AL zgbsakHuHMjsrMyDcn1euW3@UyZ@Zi3E2Vs}Xb1O*4~+A2gVc@dU%l)r&0oLt~F_sO-+3z6uug&*PC~h>{s}<)y=}K2b8TUEAyL<1Td0sHRn<$I=QV32m>az7u=D9_ISgL=)3`oP4OH=0+-#c_Z5 zw{O~&G!rx8ocL7 z?O%rdKSY$S>M6BPUdFw%mr#?D*lf?%?*6oNrl?i za>xTzy#9Jl``*}@Vj)>VG(pq7&?G)wVm>5G8dLtZ_h4dT($a;OV>Dg&)6YyD8Yx^{ zT&O%J)SGFcW?St$Z{*gh(p~vZ&^U2F)#`+FE5lCOR zBylu;v!NjPZNo_FO(Gwu($PPC8SmXT6jWe}uNBMXrgZBT`?Pp-TD+pFY9+X&Xj+w2 z*YoRQY&eCa(?lGWue!N+UnB-RGsc|u#K3EH`z_<~c~e%EXqN}P-z8I^gmAGP=^@KR|$p&S@I0q zsPe5R!C9-ut0CP*JauOKN;VtYK~U~PY9Mpt&PDFtC26g0vO&7h;CCDU%D>&CXq(AH zR)a-nAz`8}whG-K6NY8EUqb)8VXGVcL}44s$SNLKbfIIjROvpwBwemy7Ed87pTk%x22*m5IzSWwA0!w-+q zVYsutmezLPM68r|UymeJP-7Y(5J$*Z%6+*m?)dwI7whWe6EU$i*TdubyAErz@oZ6y zR+%qe94?Py$Fa~G+nQU|oOYc$79AP~B|{C`;`ZYn;muP)AGaeZo7IUFFJhw{#yJtp zm7^(K4yl3ygT2Y`=DOwvdHC#d*5@ZBPfZ^p+zTJq`Mi7(OataWD@^8;&?8#P)~r$o zz-$l-MannJT*yS|`**39ysgKkV|&4G-%2gncMfKKwS+LdQ4z&zMA`Z+L}&7&Ok>&n zy{hj|Tp>NphJ-M+y0JTK&}lbw!Z@Q)bq2fC!dNJBBK!78E==R%e8hncliR`eE>l3f zXaWjCL5WQ=%)-`I3A|vYT#7dq-D@+~i)K=^eYog>d;vk8KR*QF(9LJh1m~ulM08Ne zjeZXEEuZWgM`3e|lS@6yXHGD($bj7y=hur&H$+#*7BF^=KQ7Vrr1g`n@sGGx zR1SrCi4ex}xk&|CPrF$v2ki-v8IZXE)N<6`;*wjeI1% zMdLT-i1i%%T_$J$lO8p4TLb@HsZDR$cI@b=tiI=!TofeFCBTKY-!mXFypE7D9AQye zgvK<-T&q*=aC(i4PhoVIPlokGE`3>)Lm9vF;6J-8bHpy6HXU~xf^8a2?8f0{R=NPW zCi5l1{-J0MySDX*@vDf0WQ)L?2!w)yua&om&*`QzWScBTd*hYP$3bI!y|NJe`rWXv zu8ox#k5Zk8^#wJq?H&9F2PLxwl=RoS zBwAWn$QJ3-K62RmLXvrO1wlpBM3;X}9`l{C*?VS1_g3l;jS7lQ_Y7#>tEoKHFO_IB z8P??_60y5sb7yC;RfCbUf?C*r^UmtMkcnit@j}HSAq8=#I^meb%(?$A+robI zakLD_u*}UZk&4)Py&x@~56##4!fYgaxIaq7Rmt^e)gA>wk%z2~e+WmIK-P+hD_W-~ z4TwN(3?I9Q$7FT&*utnufV^DpT7Byr&D3-$q1xW)UVu4om)t@)xB&M;_X^yQYebzI zMJ2CPG4nMDuhh^Qnd@?TNn*Z9j=n`3`^gy@Mv{*@l5xc|+g>LzG()U&5Jpzu9wEmm zB{u#`k;t!Jz#Y^ETd6eLUmIE`n9r)`6Q<^+t9^N3AZE)*TBK|GNkN7cr{CC$%S$|y zRc>gGR2!C6U?5cLKWJ$p?MKSXnrOET0jw1{lHbSy!4iaRsZ0U3(J z`jtWUD!i_mjZug`Q*CnaRd7E!BlG#?O_?`B5rywvqaWx?Y{auk-=QrP1CJbng#-pd zrOG^FcQnje8+@XxqI2$|ArL=|uKAghJaCm|aYdb`*bs!-MGSScmRD4Tz81QtAVF@| zxZK2%ac*x8-_aPx08@|>T=n+R1A?$6i(=;j!?^Z#Jo!^{V!k{}n+b5F-T*N0d+4-k zydqUBeFR^iv{O~CyK?0DSqnV0`t7KIz=&Xh6-F}k;qRM}lEEs&DtSqjEo{oOJE?!v zrUPq^77{7F?~cAH63rVhqA{;42zN=OeJJcqWN`r^NyskH^{sCtg-^tz$$8uCjNXPS z7$YmXEgVgj6Oq|AKb+Yz(3LKjV3%v^CoxW8EMc0WT05f9tQ=M|2WD19q8@F@XPd&a zFYbep&CC1oz(_V7jR_=^8xmu*6=lb>dOp`FD1M2G5dUkK7Gat+vt}eA#Xm1l z06G^@0DOv&&s9P`RZ8L4Ld?>eU47aCC?oL(%+rDIMPi?-HTo3}4gwE35$@g@31||& zrpRi(FxixK4l^W#x90kCUkWtTkLgajxlv<4Y<3bnSl(I=&eRy!s7JlRYIVRO^zyUe zp9F0@Ci~COg-s!9G@csEX}z3$>mh1rP|$Y^9rD%n3LF3{OBzq5z1iDZJ4;PHcCx5~ zLn*hDt-j@P$R?cQn43n16pcx!Xp8L_PV~yB1;}b#58HmXNkz;@A3NeAOsi$|6s8KM zeX%=g*68bXMA%b6z1~uFiTv&LUD?i=1w%pD4 zjMZ?*o@ej(GrS(RFHka5QLVoGfE@MC*C8k>60&u8uP=z!dtUY|7od3a;3tg~F*=@P zNE!7dXI%L6X823n$h}%7+8B#8%>Cbg`)|9MN^=yJZF2`_lCZ4E_;;C4T- zgGfvv#<=30d5hV#&WNW-*r4}|_6H+KLSi`yb0CVo>!#=O;h;juaP%Ng5DU3+jdY8N z_|wk(k(S8*N!H~4;II{Zpuk|>Fu&V{gF?^%peP%c=gPa4U%!5>d_VIsip?;S@6Nh% zk`tccVz%Mpg7(Y}7EAjCIBWO2uP?e!fVScfTbr95XGA@JyQDGh(ZW@_GNq1@w?bUx z{jQKSW;?owGDAN8+mY)lMyUFb_p7ky5NgQnCbNZ$(@#%K+9m6}igm8Y;30i^MBvR& zZ8UqsHH-0CN;zbJm;(Sp(d-CW@rk}b{;|dR35G)R933HVP=y&R{W2qK>1UEh?_>14 zE+pofUDK5>^0I)R^%T6elR^;{=CX)YU0|-d4S8P>LYu;opd<%jRM;Mn~?WG zVgTPUzp&2e`tVJJ9$^kDqWvlqQAGQONl0pBf3t^sI7=%paj_$cUR1Txxd4eIw7IX4IEk>l-d-ebk&J_B>F9$6i|Zb8@Dom0i)(2MYn8Vu~r> zXf1Zrm8}*0yzI;Us-5Djun8Iyo!j^@yt=9z$wwhC0nMyQ_*EXI! zEBQTs3|PdM%IYckEq}bc51*VCPAgH)Z>){&uZuGL1I*R$ic9_Sg#m+zMMCg;Kueee zDx$nKh%42qy~EbZP9ESO3nDz3T#1V#7uIyQ08RS~QpvO54>1S^)Y#3E=U;CPtkmuI zQOC~Dokuv<%%qih7zyuFsdU<>i7|z}GhRc{4x> zE9!|-_^x+0zD)+xrRcY~sGR!5wT+1qNdUwKq}PU*AzZMU552%=%MA^2&~i$nDbma2;%r6V(ONv54L^3e6yi5VCg z6c@&&G1BmeX*;Pt>let`JwH=r)XgH8BD$f7rme3W_gyq)hfvm!-p zUqxkT?seSX7`K-s+>;p4r326P-Y6St5>X`Tk+~493#iAyz<|0|KR8rYvk3*bUiq3z zjE-(Q=geTJ=-v%)cA!;3P|7_1=>s&dyr`&hq3PVM>9m21)-*j9vaHZyRPX1HQupzP zGLcl~ux0Pmx5?X4j6y+3^7DTEmBIC?MNS(|RXFRWH%`lTj0qh=o`MI8_jHY9^oras z(#A#&nY|1O=*5jv$~jWmUmfaiO@Fw!4`6zxMN-pkG`@t8khQT2<7i~izd>yOdX4=N zccM-XFn%ob_F*Kg00k{}#N+{1RH5{z$o|32il_fY1(bXwnHhF$z<~P&FR)7$SksYN zP5g4kcw;$p=D@+hndvmlUZOpx%4%qt7x%|SY|T}bRuq{~@W%5>yPa((|N3?J-Jb-+ z#wqPU3_+36zTEzLuLyaP6a^`%kom#7aV?!kkPYAg$QM)#f7nR zw`Yl&PZ7!(g_O2VJwH6w-iv8BRz4{Si2>-U9w-)-Keu?}=5NYcnHEUq{9KCJz%6xK zhi~8?$bK-K@kb_g^b&B$ls{iSt)4ha0}`q@eZd0->$Bs#1eriwh>4507f2!~D2J3h zP~LELTRRHaO{s`HB=-x3ho(b?Pp+ouKx71S=A1=1IMQ_b#m*knvdP@94^x-tq{&u? zU7Q9_wBuh$F27!j4V-&~?<71;dm?;bdr)cdObFUPiyGB#xIfVLL6ZdJwNzK~7oGce zpRYVgcggG0fE6D=su+xnGLbZ?9yOMD5a5K%;LHcol-)3LF%w#ny!LxTZY>U*PaYI; zZ{=nkHc5uXYABkH7YJny)6nOX$!Bbc~~s!99giQ>%qA~VnnJFNj1m&AZp>#1)Hs0k#aEGlGKRjU&@kSD%7m{@Vh?S~lY z?aQ4EhBF$<7An>2>gwVc!^HV3+C{CEb~Q9m0B1o$$+VLXIuJ}^;xq^gLLwrq^-2DZ zo{-{W?CfQy?d@3;qMvHGqB>%2$6n$8Nh9j1J$sEpPClGIeFd#Y-9?Rm(a!(Hh-jF-Vrps45I)#YvTFxLNOf0_pOc|ULi$!0#dN|=ZvmE_}mdNg0&baH)kSX6%Mb1YuYl%|ROO^4`>4`2t zKK0dj9Xl9yyJg+t2b>-XW0%8Tpi}q-_S2Kqyj4Z=3&G#Yr1;1JY~;mn?!aQVVE!Jr z-_^J5xTrT|BBbPY$8n4J&h~B)6zbr4RjHaIN|+=C*cTA|vZLH8O;Z+YSf#4C^}83YgLG(nqDew-izfevlWFZy0y7GtHb$_9daw$?V(GzZ+b{73L9~FsefrDfG*c;kPl#)%aP!Q9^zMFY*4~YFQuRKwx zQo#i25I$Jbr)A)fn!&a~@e;>Q%iW#-SOs=WTREMMvluntAOTdEbXe2JKs1OO)?gw+ zZ_YhHq>rFm2y;|&%^x|Le_^H~dG1@cr5CcYTb9j0sutPxWXuUApA`tTv#QRAC&P6_ zVI(;!4n-;azei>`A2__ad$FcX$m94L()`;KO6#LIE^IE|Cwr@TKTECvu$0dm#gy36 z47L5!nqy%;+Ea^Iv*c8x?Yn3;n`fg>&9V7cAn_~u5%x-=7y;AK7Sz|^>*4MVB_`th z*dQ4xn(`uYeu;W=SJu8k04Oz?akh>5^A?>G=OLEZuN*l7sMBGWKgw8FlD30L{+oVL z2qrr;dimpBEVskc;ngaH3=T;>0-xSYOX=+;PpCb!k!KhJ}bf7neA7wEVd zje0l!=kiXR?jAynpp^mP{&9Y2k-W^CnS|gGA7Cj)14BjX-S`n*v9q(l2LWz$EzT#k zR7Q==gNkx09YRM;9Y2SDKR`DW-e+X0x1)3|kU1y8CjdueYG11o=5b5EL!sdPKMOBQ zAf->%G==8^DR$!c015Kc>Qzp4i)}E17hH_1YQ0}ynSmj zskoTMcpA^0>1kdCUJyWLLzN}7z8Pr8ssp zdIhN(F{qi>O$x&B?BhjG;7`4Z_cvC|%11t#eIn3@UKn>H)v6KxR0)05XrIm86jOgp zddOBgwb`L_>vJBrzSJy=Of9I9e!NBQR`)i~?Yr)U9Z)jku^S<|MlsQfTsXTlL*U%g z@y`dZ64!b>BoYTyII^e!VGyGz}oL7P=u@f?|b zx;Fm}`D@k2BslymA`y*?+h_TkK~mbwNGBM7#fD=|IrDUo(%XFmQ&PxR%QaM+WW3Zm z`^z>pWkc?}FT^q&6RI{%$>YDIAXic~t96Qg;4Bd;3=X9O(@LWK&LxgUw%fQws4`wj zMjC*j-llvhjn=uZoy?Ao92W4yvp0T8ga+)4c>(2`Y_C(nDT1O47#IHs><<#Dmp>cR;&4Z02?9Y!sVorK_u}4`i#LfehxU zsH4IB^TwvQUOS)Kz?#?^vx!%|pt2Jqxy~DkbH;#&+s#JT3Eum=8_7vPjP)JUR z0&KFy?%nKGY+6m@$D&BQLbY~)L=B%x{6HtuF^H8+MGZ|)LsNF+bhmU_fV6ajw6t`CO1E@3Y^1yETibKaTmSRk zali51JH{R3!VnN)yMNDqe(PCl&biiPt6s0tHWE{6Wo9a+GM^8x50n1~3Ysy~NDPNu zI2s+=+6UO$VR!_Qh!?t00RByiX95u}IAtmiN%!*SGsYuxkCjY$If-^7|AdWPS=n6i zVEz9>FP!!C^Z>n7T;Z^oguL9_5f8n9k3Gn-H6b=Erzih*?iHJf2M#QVm$ejayX3u=Wz!!n&)U2(7sIY zWBUwgAVj1r#*oKhH!WU3ZsZ$K{YTKs6gFFm($N;{qov7Dz6(Hi*TDC-o98JG$%j_Y zER{;i6K5jP0U8AS^jq5Klf!ThD@LH0AS&a>*{9XsgS>NCLo>40KzY9-*5C{lt(BGT zJL;Zk?|DMl_y|E)@K-)p!9cs{LnTEVv0(BCv*E~G+h2k?KjZtC7T_78u4cBdAYX z+2{^0AY=+82%IG4pA-lfXlG7oXL9!3`B5n&ikX&zD^AC)Yv!Swh4Di;u_qnq?iReAw@8HRtO{DHvj_mwa6|5m4pO4BEl1t)& z+|R@zSQNQ7;dh)PF}AJOMabk6CN9p;tR&Tm+}+uvCT})uKlbI239{6> zilC#aePa>2cL%}wNZbUX6uF$We?~zaYC~+LQXzc@)1E|U6MX0r(FhJqh=?7#RY2Jr zSz=%uSg(Yp9MyTf_wbDmrAZ~biQ9f5RbwQXYIJ0<;*6$N@LmDc3Y-DqI+eZ^d0h&w zX>1^L{eq@)3Gwi7FjUK|Tm{D>b?t#@)efR#)&*j!HVTCot zkrCf#c?j;`sX%{z2A$P)?BDv@hkI|{xRLhlou^Ur)QxC1TW#7Z`cDowU*H8=VFR%v z=?4zL56ufSXgYbwnsdY?A*{i6m4#2-DvVJh_gLc}n*Yi6LV}!KgngM)yDi-0spDym zw|kmRHjBHjwKKMImaTk=oi?yW_+L`kB=qP+FC*}}qau4%D$T>B;?7jN)02vvyRbOS zcLLQ0t7ijvcR+)V%K8AcuQ)$bFdo^_SRE>V`VYAz@3zhKzp)Q6lk@*bE_wYL9is`( zn~VUEO^V67&4yLsXcOkajU}G95ZQ<;tG^|rkhbt<3|@y{Si!Biw&FT#CX-+ZWwEl_ zVmbvY)5?M(T>5a`C=H99kUR@?dHc_~Q2s;DfNX9K`ZZ(wAIZ+-)q7%trg>Kh*YbcR zQNHEnFBVW4fcRwHrt%07+vK&{Npa{NYO@{hN|RcfbaRYb%p@ussp#%0FeZ`peO)^2YdlW?>8v zQyDO3X~;JN0$I%JY=loatz&kzFBij9Z+-se!DpPsUo{P)% zysq@fZS#UEITal(jy3{u`EadPe12vo^Pb4vBHQDx%ej-4itg&tqTR@jsEcw21_l5z zb(93$;T8qWY_rDr!}3%YMhcR7hkOAmhg zGC9xn$>7E$EnP(w6ALR6nB-LdIf&GV7q7x{;v}E`Nxjmo){bYsUPZQY;2qv70U}^^ z#9XD&&ckh-0Ena#?fiN8l_izHkl~j}s}Oz-7RC4W6@t=WcV#!TN=4uk zQ=aYM#_NZ!TZ{F4gRY=U-A%DTzfR8aB;!15L;DZxqGT7S{rP8~SVg79JGN+qj25}* z8+3JbKl@}p3x*!G+(C`iv_LA!kZ&)4l&r=STgk13S!D9^wDzZ#P+Aa_(nwqV38=T( zltaC=8Xc) zx75x-47Ajfn%-EuAD~^}XA3IC=fKz&0mLdA6{gR}zr3`w`13C5B=pLl^fAG6i^|eA z`kjipb6Sw;w&t7vfrR6c6KJNmDk$htn$K_El~eXscN7`}@lLkT7~&=#V=w~obXUL2 zEmO5L-t+pt;=3#RhCfy7`VS8ki(KZh{~NR0QI(is7Gt;cBQbjt zZ4|kcdXkH{U5Q#UQFMy-P*<`J_N;ov`niwafrR>Jsp2>usq6;$4po+j>&Q>IT;i<~ z&6?%;{&=YVT8)W<_pPd|m|zeMLk%w=iqRF#EpJdi}sX-0o z=18gFlR*9v&Ou!w6~a(Pyr=`6qE?F3YL4j1cE6zOn=B=^KE=|9521%|1m^PQu5Tn^RWDbkQOn&e&pXn z%TEPr{wtnJ1EA2wkl#S!BBt_4l!XdayZxcj+FinfPBm#A5%*7+sV1Po9;wnxrpS1n zOf_O{rf!-S=-y{fJ&I#c!-kpg3OWf%nk8X79>c7 z77_f)xO$O=)wIj|LVpyO9h}gOSSyGxUeG>n?`Ui~3y3EHo8p7KV^qVthV2A^cX^liz7urx6`_M zXuADN-@aIfoo3up@N@*`i7wMhqP6vqt z#O!V_f&v2ZuXq@AG~5R8nCb3C#A|C`7$A<2!T(V~jF7pJnwr`Ih;6l?RZ*^bTa`fJ z=0ZZ>+T`kP+k`qB1rbnR_eM?NNT8)%%mmqSXsqdrj_6|DXPwy^YQWRz>FWNp)_$CO zob3C0^>>lcJK-hIHD=3}-?8v>9%vq!4Lm2p&5hWbenwZDUlg{dqhd0doh;_dtP&B@ zN+{BMcx%%&)FV#!nu4tQHPd*8-a--bamRG8quotKXqOC5>Fax8>vm`aFS6vnfRkHh z_8;LMuv)K9&VPejJhe;?@=1;kjQCXOd?i>sFw}Z^_{8@rE)!$~cN3rDKw3zDE#^~O zhbW+uQy_ss5z0BFArf(y{|waaT)IF&we=R}N+kq@Q@}Jl1r&({A(B#5gYi5SgP?O^ z_^mh7g_5VY%)FFFgvw-{|eC2JkG&k&K% za96GhAX!puexndGg^CA_q7hUYt)^#|gwH(0Ur*0KZu|Me7Kx=n*rd`23>lDF;hVBDp@E80OhN($K|^=& zLr~Z)tyL~BMxFDe=hd7&4lV*T*;T+b{_J2@6xDq>^#~0mlIP}zHt*D>;p9Lm;W!yM z4<=TI6-@?mXgW$SAw2Lw-NJK8MPuwM4oU#LM~Y%Vpj0k!U9kLvIZ-J(!_J8?t^3^| zH1xFcOU8WU;zCttDO51S8U!^nWp@>B=DhYvg;(_AsJ8WGNNDUlj0C?D@WIB5y~ZwG z2kN%hRj#|cw=QG4iqMCf^|ge!FV;?W4no&0h2F%o)8QT3rQWRCD}&c$=DO>S3|lX4 zo0OU#E63Xp_B+o~wRPrZ!btn1ev|Ns9$qxWawN`GsNnGrkIV;_Xq0Qc=Jw+1fffV7 zS;tiX>DZ0JHqIWQkdFFYTaxl!7j{a0#-*AXnLK(?inh=T;?xQhREkhIG`twv{9W`L zY@7!{v7q*A;c-2BQ4@#$XS8r+I=7o1Jc2(BR)z=}ZB%9H^>CeU^zJxa-a6wzdqQ;0 zASg&dh*sKp<|Zm_ZyKjliFbG$n30L;<3-Dw#c)r#QTW>Em5jURT9ELCiD)%GRAwau z;U8r~Fok-s4SW+3djAiMF%$G`H8!cr^i4xY&c2I;D9urTnLXB)JkVFZA&#xMD2>$^ zp4{{V!8I@4sK*6*jbv~Y%JPwwSwt7n5He0+o1F;a=>AV`LLmE>2P2h^0nbmMy`O{j z;eOIg=#=@CCKW@G%vu;xR()zl%%Sb4;>iCwB_#_k)5UBuKecGWr{S7hqSRec=yHTA zSYC#gN-is9CiWdiJyEfVcI3=#frUE?{+y2*!_hCG+JqH|ROJctDxU$7-(mc3!FpRE zqjMMp@8Q><%KUgr1+A5cQ0)Jz?|MJNHzBn;QarfA{ix!1`$zAtT={H4?%om0AsIXe z?K@al{TwfU#2EL?Y54y}m=i)RO>^|(MMe#^cq6TZF{~xxGmXS*WC2y|GdAj^P~<+j z+>n68(iZkmw5C_9ziyNZ!%~d~RPH{3 zzRasa@S@NZon+R9?nQtyTjtgOrDzM%FiDev+#h+zk10f|w5siusZIBgRy}cK9&;&`2QR`EJV(W1l;Oj@2ZmdIs5a!)9p-L zRH$eAP)ZqB_L`UkE%ui2bZD_{Z;s6%L>M;3H>{M^N^DJLK5<6wjydk2D_EVRY32CU z+Db>r^cXgBIhZ`l*ywjeP%x_3D1mt1I(z%GGztGwudvX>UI+!WlZD^mrXA%`%%#dy z=uq*PaR1LzD;;kgwKYI_J2&v1wstN8aH9@9Z2fJ+zg=VMn-2fXABRF%X44F%~<$ z1?L9qHM5y?kSO$(~AT0e^Z)G;gOhv3UQdu|#xY0~^Yf;a}f^^?$NT{PhiO*@(5(`29P7{o;_{ z8pHqa_b~FrZ;Rag`Fvwr;Qb5HRek>AINLt+0l86SkyR>Y9{=adpU1P)O9}%_7prf5J`EOGHTdJJdi?tVrb4TRyvE|AGQ1-JBR!mb0U5riho+(V=}7)K zGsUhjex#Gfm}@y>H9OfZgdnc*^FRKhKmXUQyFVTj#g8yK_PMnr@4?TbzS71Z7AZ-z zDqT`1)(O>KS*Jn}oA|;en#lOAvU^4y>--)36 zb<>XI55>e2CW0Pa^n9MS6LaZ`z}&tg`%ne+^T0s?E?V)g7FVv1lJgY>K2P?dR8auy zG@=JD2@kiB3rGITCmXX|C&!bNb;o%04!!w8_IzXf4`2?~`SBh-%vY2vc}rWbrY>Hs z;Yl`r6wIh6Z0ODhu=`bvIs5^Eqt1W*Q$Q(y$mWAl;obdjC#{2X%(uC?}O4Um&(co2MjQ-r&^A*phy7wG(D@kV-NQ_B&JzO$ zJ~TfDirRPJp?MMY&L zyXjBG>=PRz^$Pd)Fq3oEPm@0!9`*GUXq4Dp?6n2)>!YeoPD@L2GJdCeFd0vvYw8=i z+`Af09F-{@#Ww$0?$}Jf^T@yJ%FgY(ci-BP+ZSbKM_9E-Wv=sDaLv7=>WJDI4cu87 z5Bh2NvwxNyUYX&MT(SA3omKu1@>z=Y&ebaY#C1(#}xdCD2@b+qQSusE2zO>v|Q zmCltNaZo+iv1D$WT$gT*aXnV9Gzz|6twX|Rbx4B*0^|41s( zNVD?|3*~X`OSSLS$lu_v+1N;f>wTj@mtTg>M5L- zhn^f_pcy&mzCAor01r$s=&;_4#ok{V*8^LF7kLU8B2AbNN8j*+8GMs|FZ!uowZtlZTMrK&ICU&9aZ_(^?UR4DHC?%lh@>YgnZ zewR4}th4LV?uA2)`bs?HCay?J1T#5Av9iiaxwVec=Fk>KeN}cd?M#Lna=$ig2wy9U zX!;-fmbMO#oLFssy|6hsjr{mOHmvmF(_ryGHmrC##o-?t)($o-zSn{)g0`ji9{Z!f z3$xXGQ(6e~T-J-bo#UgXlldO5=zEsy6Qn%0KO0}W)e*AWU4r0TWZGN5!Of*;|Mu-n z1Rmi5f=dzmHgfOPn>X0A)#N)gzuVh)g??7?DM-ofh%V7y9SPf6;eRK*?)vrT^U0n{ z3j>6(8^?W7y=sPEOIK_-;JHh3KJ)Dz0uj!j+8nHhfwrwPsP-#r+HCb%m6%O7fBdF- z!E%2>UB4s#%Ez%SS+P=zwZ@x!EEPsghp zQ;!gfJ;SqW$Zx|o0{4xM0;c)p%(QbE1kaCYHSdTP|L5wBf;*alC^rO2x|Z}RZT@-u zW!lmd%y|OQYxCHnGWhw7^TBg+0{Nu#VWFvGTU$;U>kQi)T8PfPvU=wZju4VJfz$_! zz0{E$2B4`;FLOP{XE%_X&)LGmEDiOgXQBDsVBtWoysV7)ij&OC-j%OVrs>53-SXF9(hf5*y@F<;!k0rc>u2BROQjMxfwT zEXVrlWBYQOeLHyE;Sna*=g))qoNoFp&sqp0)YQj61Ozk+eqGO4l`IyOZNu>v{}l&Y zk}*SG!F>w{3!@&PV|b`sc6v4(9`plp=CIGiTV>pWQe@7)T-UoXD$67)FpoR>Ttig3 zm616QqN3k=2Z@7|eq$Sb<#h3&27QY^$>8hA(U^t_jcd2*c&>dY-+l3z%Ums4se5py zS|QueDp!!HZ69gtLqMtct$y3K0zNMx3@-n#M0?mivXtufz(b65TLqw8E;*jvb!ggH2N zSPgP(V3hjHy;!5FeK*pH^G{?L`0aNX82l>j6Wly4R7`kv%*Xdq`(CT^Dr()`m>P61Mq00k@25sFC9J+jCGNf{UX#wb4X$RyKU^(z$E5QLw zIZspjAvILcR7knLBvd?D`mo+ER8cnGx~*+7Rf7Q+yxoD+SsX7EjN)gXRnMS<$)|er z2>^O)WK$%IxcSS7D?j-8eX65Yx3aZO z?iV2sp$(yrDK$mB>`!Rh$bI2K+;2QPy(YRIf65K#Yz>zMU+Ix*o0>Y8xrzu3W28~~ z!UD5SicChmVWy}h7sK#u8}emTO;CHHQfeyFi>(?g;Ocb7?H-<3cZ`dJ11aG}u*T6k zdpn%1TnU@^%XMOw*s=0$%esA@$@7it=V$gUGU4{AnXgPI@wm9`T{`2DF;>YAJ3qVG z^9mUwvzDn>d7c*vtH;K&foF_BT36)z+FC)xy;?$XOG}p78WOYT%FJ?=4Q%r1T}N0~ zrkWC(8jd9ju?3T~>ZpoLCNKPTx?~E#U`?V;f(t#ia zl$6xY@s16h!UnQC`L^Baa7(?YAhANaam{0o(yGOw8hZ7LQueAns~D`ltWgBI*DF&f zXEJ)JF0f{$*B%~;2rN&&J7;Pt@-63NRR~w2R^?>Q)!?jttWHE_mOv1u6)ZwTwFr?k z9TRm9Wwf=0YyC%T@HL~UO%Y(0^?d#x;Q``lcwa{_=0&5IziO>2K8+Oe9#qkz1eTSM z_{-k=!=}!kL7ib+R?t)Q---eF3kB2P)dNER&zBhgUwlY!7(nY+bXEEz6vH^u(0%bd}GM zz4*FyqMt@}PqK~^yxHbvOX8kIbP7BTq@*;;m$)Schl+W^#q@bbE zZjCPi{UeAQ*=JQS_n|9MBFdUp)=njyU4%&(lcb|nZ`rYOt+MOba_%y#>k?nx)4Bni zcxrIld>qlqFQ=nCJBg^r`V9{{yX)Cxr>ymiu#EcqG{q>m+(@s9&6* z)AWYpi&!I1IT?#mEVQ6PO~<%DyQy^YM0eZTtOL#m1;){+F=SjR$yV=vNJw#l^oVWJ zWO_nxzAM*}E6)REJ#WWWmf)N4hfpsv9R>(1g=%R?{mr*pT3AHmaPuMtp!ievo6T0S%=Jdesnv@0t-1s6zMO5WPc{gpf&G%^WD^B5 zDu{Cf*&`BAL_xyi&0UD!oguWJUQ%r}OAKWfvZs~+4fXoqcWQp?Yq7x`Ei&uv{_=xz zuNF*Js1_q13kewxm2tvgU)CyoBBDp7Os<%k=;-KjCG8|&qQjdZ@1|u4FJ)MDR=(|M z+CstlT@T;q!<9=m!=*~;PrL^mITX#o8yG4qcmDOdAcFcO4*CcCfzm#qaT|Q7(Yjc- zh9d4B7o_6DIjhyHnSJccfdKn4U6`CfTQm9Pi_G`$k72+g6$3-xhEuAKJdNxFqr@q_ zndp`{&vj1Da{s_CqqB*RRRY&AuCwnvQc_YXXvNtM)uNzB?ouX*bKTA0JrH{cb#%DPos* zOIT1b)!G&$2=$r$^fT4*9OC$AN(a2ZYG3=a@8!Cr_rAgaU@NQ6C!5-NL>i;zCL|nL zgJO2WUA}eO0tj&qw{;=n`-XldX%gU=n9w5UM)lx9BotjUN z!<(=9%C}k9+y%P}Y}j=Qn!0|?|r8Q*B^hu&mt3ihl@XIFj6&` z#lOBoxE(phYH=tw`VkpT=3?N{D{L9gQ_nc5iY`Yc2wF>~Pe7ax5I^ z%ho{W;^HbXpR8Zu?!AhQE&l3Ubd}_*;UPS!`i2INu{`9o*2~-XL|i+oCL_8F?M2KF zG|;E2uSkA|w&RQF`*$qjPh7@IF2bEyH&1`gW7T|M5-h}zsy4xs&cy^BN z*=+ejvSQX_e-BocqG1EyfF*yufMo+L_9JdZP3! z+4^{8>GiBj3+FYlaH`wP%&`iXZruQ9s98J%1H1K!&Xr03QUkcHz5RSrl)7ge^xD2H zakO2%jxh^Q5&^Db0`9rWST76K)1~6;CP@aiI!CWV+NA@-^uQXgYo&}uUQ-7!9|>Rm z99pGMEa$O;O=asfGo5jxCoFDv0Ts~g7zumWuIVg!qIZS7*bQbd{f^2v;zrhg$HQw$ zYS>=v%ct&8<-A`(j%_cUCLPxXXpd2ECO!<6j6+Y;NEX1UD&;dITuz^4OnjuFW z$-2xHVrIU`&0wdItyUbmPwa+*SE5)poq6n6q+qu`aGxfxA>s;t{Tgwbs-DBKmIypc z=Y}?gBoerZ(7kp%8O3fXeDvLVlT`_ z#{?{ep_^N4?bh=GT_d*`Hnb0ml@Qo_KDUI#ve@t8)DW}BZWDA)&&-HLNb%EX*4%~e z#n}VzWY5*$=m=}1OMlX)Np|i_R}C>!f$PK z0l*+dr2$1oyH|@%swI^i2*tzC9(nDJ8x9}27S7i7DT%KT-@+k2>LWy1!gX(dq`kaN zkYv+jUZ!uY)6~fbPNrcWng%nVhLTI)8=Y$pf&`e0WjbQ>$rE7#vcg9US`FX z!fsm6SP*^aZkBSi&J6OooGoX#E!y|*bvn7zfdph1PwWqIzy^}idGFR()qIR0Kfox9 z;Be@14L?yowz5*UGq#@v8Pdyn6}luL90ULSX_n_ZwXj`+>2A$nw%F8buN5dWd=tU%Khl)STQ3Q{uw;1 zO%!fZEwsbyO_SC>f5Y#IOZ_nD`_rL9djd(-dY9gT5=r#Em0{5sJIN@11LA}|S8Lve zcz%JpVQySkfz{SPL3i=$igno>V}FNZ$8G;o@WW-+47l|cD(w3Z)P3-(i1x<~J^cwE zg6VxZYv9w=C4XdhKkT!(NFldFx-J$sav##qaCFN4Zgxvn39Cte{qM}P+>T2RP#Fwh;#J#rLQ4aVF&0gMiQt2) z+Z7+e6v*t;IKmF$-tWYi29!dO2(C|ub@-a|kFx6wSh)1$vSS6h9~fk=qqRn4H4j+` zZ!Oo88}3buz@Efm;b&^^=eh&qCZf5_8nTDwj9|yZkWG2ia1P~lQem~HzNzbnu0FPFmxuYgu+QdO&ydMg3Lo?9Q3GnBzM z(i8LCk6Gk;;&;^v;e_jk*%rYgOpTqmR?0l_!6W;~rJkkaz^j-p&(d?v4qewI1k6WS zC$Mw#3)!8-Wo=v|ppt{9&FS|paP$?6-@CFdeZOPP&e*&QmJ4f)oaP!$tGuN{MTZuM zMpv$AerJ0&SSL)vKKf28J|gL~MyIja)gR>?5qT#f=WaqX{Vm@n;-71}y#wD0cKuWR z=P{~ISSY%L*M9v+q76NG3^qj6iLjd;QSjIqy{oRi=uhm1fX+c+JGPp>m%m!?j~9Wt z{9n#lpd(l~P=nNQ;0{G? zt=rW?lpu%SpJ=guHZ(fgTfIQdpuX267sx0frcv{DI&tJgIdQdE>d?s^})Pg zR@F^ja$Z}rIv9#C5#S9lHb*}hM?p@_&y0&Dv%N0 zNOT)pzqU>$(w8S=rlz&|;PO$2WF#T$vZ{5kIcdA0DQJ70oX_(8^yw;ZiS=%)9>AgB z`?C~fce!sXSQkc9ayKfGQz{tfC=*O|^|wKyr2|k8RM0F{WTbc=>O8#3wtOyg50fyG z`1sl6z}^m3K48yM%TDKrDHKa;gE_It*@Nw~qus3cUS2^!^&}@a1`Y3YvR%%q78?%q z3HsLGowO=M{@&`Ivqw{Pyb; z(`)0rsMx5XF+5E%*Bl=LM%XGZ^vk&>?@~O749g0?z)GK;`68FhA_n$vfyFr)0>*K) zlG{WCF~%rX$sZxX#V1}qD}tzojGYsiG7oMnmK*m9bUWW}(|5v|B->EQ18Y)0If?D= zF6fE1+%(=4a*u~68mgMDMNS@h#;L4-d3E@>ZqPU4>|2u=4~ui>&XfHF zLOEBR+>@>&Tfs_0Vdk_o)+IC4Z@bs5nQL`Dc(rB|@dc|}0Y{Rhv+l#wPed%Ih#9+V|CWe>wBTZfI!bNiH^$B&z zg5Lue1YSNg-LsOPHO7w^RM;kECW#8&biN1HvO2ah3y*HMM}>H9{&en*fVpzGga@y4 zMme{P65f=RJHum;Cc6T*-u#0KE~(x%9pcFIM%0;fk;S; zV3z9*oLluh^B6P59&(L}&kPK@`j^k4cQ99Y`J>Y0!9lE;LM5S2i^DZ6EHUfJn5?$s zsoTIbNxpb-6ROoT@lxtZH8`sA;52bO*w9MSi(aZE8Pv(WE|+%J3jQ%DARWl7MiwmC ztp+>j3J9=a%s!=0v(P!WkQm$4HO>~at#WVY9BX~R!$Z8XLddy8fA{WpqVqZ>gfB8C zb%6yKH<-OC^5O*r%rQ+h4AKT>=lWN$&}@n3Ao+c}qY0|Y>rLFYM|liKvt168*$sM2 zE*6=cX~?&M9?J9f+wa-@{&>%?0r#xOM8ZT=;cV@_sDy9^V6I>9 zqNpEf2Q&<4&uFE%>)W?CvD}(qC+7veXCJ|ErSUknd^4Kffa&C99WX`hy#uh{dv>@L zW;i*0@8tA!xYQk6Pb{LYt*z~gq}Uv#j%~H`et^7V%b1JUVpZsu6N8`#?FF4DPu>r+ zK2LTs1KY~xu&i^mFRz9lJMFF**pWlKg57l~8rs3RGRQ;QEzeGXU3A)CX#&~M#wK^e zmm4tMP320LbwwSrQ^O_v%%vciMj7n4xU z4zBRAQVU&4PjB4wk@w9|EJXt+9%o}`h-37CcN`oZDpyZJ2?|KOFzS}Zu?Pqx=4a@( z)@g8CJG$PxFx+Q}^&w1FxnpQdDm!pyV7QOZw`1$-dKK=*6xvhYs>beQFUCxp?6}I( z8#f7?u?0`T9$M_jd0`^vx%O?SqD-tt$13fqy9@Fr%Y=6`H_0nsZ6hVE=bieKY$kjg z(|q-Uam*2HWjwNb$=djrV-wsD!>_D!&dhSIZo;_4r@jj$)$=lH&x3AQ)%Z}cb=r_7 zCnx9M)8_~ekrRF_#ONHJo^F7@Hdal#Ud(Gh5PuC_lYyqng_N|fp1k*J zIzFnyR{4=_L=R`8C!DO^-Q7dH52lbvaKU=uW)-hY;4J>&!`0o6m=X#e;**op>v@%x z&KrlHm}ge&CeK?$A55R&>SS89hmn)rs6DAZw(ADMvOu>549+J&G#v?Ct%H*;m3z$v zKq4)ugME4>B3dL$O?oE7}V9S1w($ggMG!vedFUE7Bayje(BUc=E8V zZk&DMq|EBnDQTV>h9yhBoVr*ySZY4;NvWIq5si6*vy(o`jdN4LeSeL#K)=Ov^3+*) z^xN1_G~23T^nLTr6z{?^r(HJpT#ZhkJh{PY6eeU(71q216f(X}1pA9+H{WBSgtIit z{2EcI%1G@NhXUPf5}j-&a9~M(x;@~sy}J*#jQM)Y!e<(_o6Emt2or zgMg9;)D7vnOSt8&A+cXUgPL#R!-wDMikmw-0|tE}!1p#%z0#gA{+_8QDns%O2w)2( zA-(=QX!DpayfTKmKdV3G(c1-Yr!r@%BSoRKPN;}uL(5-^j?d&qx($SC#moG8^P}kz zqTa=6>-Z#5(SZs67Fo9X>CeysSeLhRH@n1l0oPY& z=$Hx(+F)th3ihg`zA>$}*;xzP+eXz(A8{O8l+-2F3BEniU}H_d8FkkV4vD}CKWEsI z9Xm8MmvAU^Zfp~*sR>u_0*%EAaqP$9g~S*x3&Jzk)gyU6KJ6oRk-gZRb*`J(_SoQc z`L6oiMKtVqKd&zjW(Dux$3#azez4A8F6H_D{mb(MemEps;C|1~UtL}n*Y9HRHn+$P z$OmxOU*~A2J(7EP>2#v}Sl3jEx2Z$)`oe1^+OU4V4l^6ul2`E!Fg8%y@67mCl$u&3 z=e^DRpvW>`^S#x{x;H}3Q2BR!QEKd2_r<}*x%Jo+lW3+IpO_fFkisXQ^sR@xShfeW z$V1fz=m4qitZ}@?TCHT;bH4|83`Mn&Q=OfZ6p=)zwwlb#+eGX&+6O#zY&c#!(ybkl zo1*BGweAtw$^{L_E(6&ba00Qq#DTAUT`ego`_%N>w^iOv_b(wg9k=FE0j%0t8;t^H zPYb#R%FKGJkM}Jtp9NM{dQB(AkJF>Oow<2p^c5|JG>1@=3kjYFOCRsv=Hv4_cH55> zu(7T(7*VNHnxrT`0sa;$w@0hdwvMN}Y^Gy3Z7Mr~M%!3-kN7!WYEuw^ds|h3d1YqI z=f3K|O00m54?F}aRm;9r)aVKwy8Er>O0Mzj3t0eEZft0Mijt1j&s>*e0N=$Z?oF* z9e1sCvdF$Pw}YX3m2b|pgTxMLBpJE~2Ht$rIyJw%KIqyoyQqpiGxFm%9C}j=sliTB zt(tgS$d9CEkciv(H^EaD4`^QcVs@N%)5yl=FHc^=?SyZ(Y-_GFBBem0e;D89 z4&z_98gQ=ydW3)?&&d7y>W@1QNLe{gR%s$C0r<*!fQX7UVUM&vWztQ5vSu9NcfL3H zq?DaYsdB7=vfJ1rq;{@x(;UK+gfW!PS5A&PN3YC}$ ziH`KQmoZdb)G}z98}!U24h|;+Biya$ns?`vB>k{$=7}q>Q0uNb(tz!hl6Ze3*?ZGOI9kn;|!x zym%haMbnXjZ!@!?dP#?Z9U|#Q$kW>A-6`S`ovqOg3o|p?WwZL(im~brp;N_u-#K%( zhPpiruDZS)rc-pVUgop8MnKm%r)khIhQ#_(SZ6s}&Rc3B3n&h7@sBbF+hm3-D`5I$ zh4tz+$899L%c}D>4*)J{!n-_kt%r2$TyAVi*rWol9+>#P9-u zS9geKP(?3KsafL!@Mav8J9cX$!T`|0y;L_o{%*bT^s&EjqXlF14=`mh+|!~5*~Bxb zrvp|i3qXKS=iRK5b~JHQZuYzJyb*NT-$sZ&dGsRs$kW4X`e)?y=HA+7{Pn9}Yz7s4 z@M(RZG%y{lY1ufMPDVHEuDagfTP<8ub=j~xySTx2eI%6&=(`}G)Rt<;U;D=lP*#is z+B~zW?*G`s2l1my+2rk8GHVcKg{;0bHsu}~Ww#k252ZathH$!8$(CCsV;^cC5VGl7 zvhm^aP*4obI4O@ZAven^AUcc#tpGxLE;bSlMj!EWqzj?M)wicpIs<47wFAm%Qz5 z=EcA}Xs^7S>QLSx+{J9ym___WRWmaD(5NzM<<<^(+zg;Z2`y zjEORv?_&X_Wu_j+Po`Pzv zO48Jn%V^9GSSqymPVDf3?u8X1yVVI{Fy_W_og8yRn{X+t>gEt4LgS3`VOItEEj*kLmrofp>X*4h%hE{P&Nl5?(V)Iw*Q`#W2LusD-Y|T$-j|Oxg z1x?>nIc`~PBSR_)Q^gIWLBnCaI)aCr;ADlW^ah?>@x{&`?dSS1x`fq<92Hag^&z*fOd51 z=g;Ob$3hx8Dt&~=lP4_5WoE;3QGyD)wFdB^9;$S=uiAozd;+@5#r|12aF=}Ppr6SK zy2$#`O1i9Yi%;raNa<>KP*Ol*ha5~uzd=UU2_QY7l`|k+=*iH*Mz*WH&&s0TK8w_G za6@^g4-MbALDVvI?%I-vn`X81^A2(@jZZVPIk5* zz%oBw8*@ZaeV}n=Kbeotuw&sU0ojv3Al%#i%@$>@3!EWT=a;^M4G_)8+TYpJI;rK- zBv!_%{UucgVshlv20-eu{XR9AcK}xfNG^~xxA#0X`7Q7RC@dW}&CQ5!5E<=+n<0;w z`1smqtX^h-JhZ#;I9(!+&kGdF{JS+7`DD&j;}tt;P-g>N>i=BF^8)L_$k5jknAgx6 z?iiAo5STRidX3Y<#s=oHwMygGB#SqZ&2?!;x_2J4ES6q<0&qPP3Ii4=zRK+QR24kD z-35Mqs3;*Cf;_Ppd!{PioA!@<+dC06{$Tqp8DRf?YWQh3t^&t_V-6up(au!Z@0*1Ffvo>8cUp%J8_%m zLYiPiCq;DY&~<`gq-62sL3YNbR`wX=i^+pi?{&V8m~3e-D{LQ5F@KR>__{|Lqx5CN z9)Ztr5SJ(+1h{^w6aVHfU5K>67X zkdHw+5XrWy$kezPGe`40=_Y_-tS+YjhaVgtTk#Xw6T8H(@bOjSkgAt&Ba0W=+hyN% z-#c&0pKVeq(bg(xT^x134NsP>QKJjku5x5<*HYY3ihIUR9tsh{U=Ot){DPv zSPGogA6eX#dmPnc0hRC#>1qMv_TNyzF7NJy-j9#c33WlKWVGXhkTK5sPA3~3i?Szg zgZ1}rdsOj9VacUo-Gq!cl&ir#3*LD(w?sg}J5z0U)J`kxehO~zJ|Gg=YKVBZ>3X8p z!&5w&X1;1q$hT9d8+KeEmNR(O2B5u2R$C^{$!wfrEh8a0Vf8=xvorvS& zp;!iIXVkWQ&H0+5N{O4G@;rs2n0jwKrCd4ZUcMr2jz-*3DAVZ`Fw5XTQTmW`2==@X z7We6Uh4p0sAXzrEM%_9>2DQ&LV5eAYI31AVceun0p{%V_ebf?|<)yjdqU9ew8xL}F z)t#X8gT-c{!$H4E_o_G>=FmYWTgLczO7|b>isv`4KX5&^bzds}`gM+9{7@PCp}1p8 zRnGW|7Qi$4o_EP+jw)BnjfJQW+Oavu6>wrgYWW6Cj6b$Cgn_Pp?w}?4_2%JJB8IDf*Y#fWHmGu#*1)6i?;5t9q zmiEa-NVolS0k5Mna~;`b+wZsZB11tr8OtUyl8Cu&ied){L;hUaY=a;z?HrB5*!}cQ zYBokoXbX>BE)!zJcN4Ca@1v+g_Aaszn<(kGc@6;ky&$!C%a z2$KiAl^^dpOF?WQUf%`SO|K)fW9P64BO-XiAuXj9)HQm(gBMus=Px+qeLIF6ukMRX zV@r#-vAl72p)`8(eAUk)C8~hlbm<=)9*WxhZ6TSAmoHx?HV-*Y63;O^N%~@|xjs_u z52A~WRW8Bl`LK5Y+Oi&&MvfK}Ez^r&B%DV?qgJh>>)g#Jl-Fk9CRo4*LSfO9E-j7P z`}3c>K>x)2E@JnV9Z2gn{fJMhuxC^Mb5*tr;_D#y31{eHEY$*edpQ?6do@JPf6z)|Ww=^x>^uk&<_%1C+cz&vcszKl- z<{Q*;3QA3bR02h>r)V0mxI@nY-)}5cZfD=QK@4pd`J{kaU+4ow#7iNQN}?i8IJ zyfd)oqkS5vg>&OZ&u`Q80`UNgKJs)hRd)1-gb`GxBfB6zW-^M2kGAp5I<>aWj11aHHSd{F=Q zzq~#B`yMpIfAqZl<7ZQdRQ|sA>>oez=NJDw4~f+{VQFXe4T$$wdwX454scEXc$?$) z@$my%C*jlAhascfl@Df-Yu?q>XPm0=Bk{G(%`_JPF~3QuYo`jUJ2pOk6%db&4VD

_vY+I3cI=r0L3R;_HQIM3j(aznw)~5UbtOv-@Xk3U2I(D*phDO7p%!2~Ez`3LQ>6K=R+K zxmDk-<7KpAu2W&Zs^f4DHRFlR83PKJ2Ia){yaj+bT}W>beS;=7Zp)X<;^B-? zA}?k9FZSLuDypno8wCR>2u46aKqX0*ELo)ii%gM=B$9I|augAfAV`rjNX|JHRe(s& zIp-uf=kzVQ+wOkf^S$God++&q_ZU)+g4(tB+H1}E%xBIuADuUck?HBrKyu6*V6wvP z&Xm$@bKj;o0qowiHl5{N9{>lBE|MY&V097E#QXERLOZ0 z;=BDoq3$B_q$-Vn7B)BhROunJ>h@Qptl|iMrtd*ifqzWJ{pu!KHLSSF=2G~9l}(3q z?O-c3Nl_K25?^ns?d+2mC&ml?v}5cI`L4U4CCjUSJB-yIMdMx@L4HLVso#3o6v z(XD|MMLFN1cv)Hq*qoT!V5d@G zayQ^E;})lOFP<-$G_Lmj+!DA-VwbcQ*(*78b{IYvq&>!4_%YHwCcu%(4}NnbAo8VO z%N||HJNGI+8HW(_I&6#bI}Y4;h?IA{@7S5K`{@g!aRX4a^Oa}cn?_2uI*>T=>>gaX z`TW$~XfheUZAH3L8zJx0xC2ad`lBs+d5aJn-rqyVZ{VJz);JM6|hJHyDFo4oU4ZYP2Pm9n0ogtMc!g(eM|ny>_WM+;DAL z@(C}Z{2|EJT=9G_s=r}npI84;lkjL6+5MAZtDu7lr+qe$J@Kje`5OipjjJIheV=GJ zv!MK>W4vX-cnk{#`p~sz{NYFv}V9hvWp_ z+dBz`F&f2){OyfdjhgG#xF^G<#x#C*T8>psKZ3>xqPxFYnY9qK;JPjH6j%`Wh2{DU zd+WqS2>PqHeH@QWAI;~g*SNYh%n>|!KGfEKr>47C(wPr|v^IfpWD8fkj{A5-uGd+4 z#5iKftN-Nla3}3*{%VMm087BZfYlgxDk0%uf1lM{dS}xH_Sci4doG3jX`<#@gkMN% z#y;XUs!Mbw3oDbL6|b-?J~xjQYKbc;QYoJ0_Xt{OgjaE~L=lNqw;n!HZLAUso0`Qw zjBi|oJ)Ed-L5w=my4^;+2=KRr+twkWiUVgTUd5ilrgv@c+FF{o?c#Fhh!&u$% z)L-EjTDg1I@f`Tvh~3f7%Z;j)wi7;ODm01kiN6L=V}J>G-dV7nI0IV2o43%?zQmrUQtF~z zRY|}EjR}MmCjwtOwu`C-drGz1W+|ou^EPb3S7W_TN}@zL@2BbdKta;A1?(18EwZBq ze7(dz`lV13(cK^-HE1~3kowR#yL#2#PZ9zA4yoLQK`2V*j(|JFZ)Aspwb0Ir=2D)o zwI9LIzC}uB*5h}@l!+eQq_6Vr zi`NfPGAq&&xvpJ@l0dAelpT*3RFLK<>~~tl-Tfr`PD#aRPXZdkf%25q1$=al|XkQ>qzPU3CI+YbNe* zq;AYwX|u9}E|U(Gg~dOG5T$zR7?yOs9*Lg3vXrB)rA)ES4en%ggAN#4cIjuyw`vN0 zo)kaZX!5c38@3w`>iKfgRiID)sMgdO8M0K6XKpCc7KjNUF);xDpe#nk#nk-EH+4(DX-hsB(Gcxwb z=To;bb9Y8J~2wj4rn_e+nyO{zF!`VOu%e6$`z(KjRZp827wzLg-mE#4p{#k0twI<4>O=nX*7Z(J zQ0TS2$kGHT*IRi&-{?b6N^7B0yEy1G^f@^AKHwMXn(30Xqg{@0WOIy}0N>x{<#*9F zccy>2Q$%x^qHL{cZ$()Z?Q*xmdOmeB%@c0}HJE?XY}96IrhZ2oG$_W z)v4*}X0W+;8@7dDR4e@osL*au#p<`LE1fEtd+mDtDS5(@H&ccjifh!)bYW2oP5JEk zSIo0dGwTcH!LCbx08eMMeHi9v9O~!S9tp5b#WZa+k!f6XPN2L+G!`x>+;8%x_{ZRN zKFFoY_!`Tflbh?1rD=Rsaj+|#?b%3=!Uq0U zSYX;KUODS`Vc(V8=*^?`hB6q-yhwVh92NuNg0X$IjS0I{9@#FX(kwDN;NvBV zA9lXJF4(6=M)J|fwc!Y2ePFbA0x#hd?qM-v>%fEhIakdWZQ7TG; z^S~2NoMok!49^;#%MdG&+Qjr^#G#4G47cBjn@*tasb?pFTU=mDOSM9K-Gh_7+U?eM z#3O0~m5|UlnqVEcV+Bbvjxk4nvX6ZR_ZWgXQd~xymnIzNSm=+gIG1`G!NG7?F0;8_ z08hUaG?3i2FVU6O%WN_eWM(8f*mXB8ky8-$u~%mgj+6HGSBf;1)_A4GrKG~B2nY#` zcheX9fk+bdW&3z*OUpi!qEsMma^AJT6kWVs#EG+Mk(49k4O<*8+@l4J+gfILaB%nf zx`M22ZF`q`hnM?{^0>@K5)Oo7SRkM=WblOArjOHslLDXJ`L|qmXBFG!zV5Ly1EZDF z86G$s&beN3bDl2tcMYpgw@8>l0YMJzERrfiDF=2u8z{7U_Nj!|kxkfne;tt~CJ@`f zqp{vtamPIg8)K{~Q;>1iGTOgPBJ7mCh1p1G)=;*{1NQ-cmmO^2Jg?Xg^7H8S4xaIV z*;glyH@ySg!VgTsqBQ2~h+%<)-dbPE$euTvjQ6DYwuRi9aF_GL!g6$!88jGYuAKG3 zV&ip8N2ke!CCBErW^Z!ZaW+gM@Vk}h%i`0 zQ^4D#$@8G8b@*`^)&$#&R03=w@fayZXyqDjdj(-eft;x4++4SUd4Q_#m@7IqkV?Mc z6}9flwss_2oLZ}oEEBBq734aWK-ZX>XxcFetA(fN(0F(9F}bKJ{zs$sunC-1vgF%p zw`F%d&U{BmGjsTuZzw;A2(g)qkX#(pYvxb?D47wd>`EM4*w@ym5teLW)|p5xRe;pd zT^@qEefctw?@xUafe^Q2EYzi2$=0>B4zJz9*|RfF&==g(+aym=Wx!TfIF#SE<}zG~ zcR?$(KJ)YxAuMVZ6t%85L7no3Xu0&pMAGb}Y|~v=^*!H`5SieH2~V7=Yv8ZSuhzPb zRO)#maH#Sa&iMH<+M>MkVzMkpKj^Zm`P!3YYFP2L&MhpYLuOM`0_=ZwsUnJ}y*E|^ z_>ko-75;G+}|L4eSLM= z;)8ZX_`az>ywTjugdYzoO;=Wji;PEps7XaTWZCD5t*y^mqA4ylkricS6V|FvP03F7 z*w)gR4c4P^b1np(_p{MzWevw=TYHn<>F~5%*zS26@8*iz+GMrzq}Hzej!Et0PfQH% zLKECp6-l=6Yd4sUf9l%Ug(ucL?Hz92xTC#UCB{TT5`qG)>aV&(#3xJNNR*O=&zpuJ z?#-^jIv+zMzU1(qJTUCkRTBDo5`@k{< zG~NHG1*m%r@q!Mlr+}q)cbGGYHI07BbbQ90NIdL2?!b-#?73>>gT1{gIe}o$w+h=k zUFQ5}&a;dt%V&~-yKAY~F#FybZ09WZ@H~#Qei2_mwFaBvligtk8HUW!>i(p0)b@se zy2i0!>gEBh(R#z|J|=EwDT#nyi{3RNDU4T~FA*VE3&N9d={ zF-f{@-yeUHR-QW1u216?$MW`5vfhH*bi)nlY?+iTE1WJsU+6wr*F)9xGjpazNg!4K z#^B<|4mD(vvG4(jE=LGDmqBx7en8IdM)iZ<#5@kyQ#aeCQ5lVOlO*P=d>LYs(y;B8 z-5rnc)-!%0iy{ZBb_t&9I-py_6&gfECc~Q=#&ni<>;n41diCNG{5dh{LwGtFle#PV zthfhfgrJLY%}ktjPk@=b)Or#!Kb}~3fNai>RS4Pq+#*s-^x+GF2SW>OZTuOm zI`IRuXV{sF67G1!{OW-R7iFSfh@BKms_vbO-o_{o;!!^eX?+o$`SHFgqO;Cv?)5cVSoC!@+8TsgTrkjZ!jkg||U;dkIhNHx%YDPZIp8h)K%i zaJicuE*xAB71Ia2OuLZ6uog>aIG!WibHcT7K1GtE($wm~mtt zl9tK6)baZ!=nM(gicJw=5Po2A%pOG9?6O>5IL}<|4QpvH(47b~UU2l{gT;?NH^Wm+ z=ERmX-rjs{d%*SPI$;WhL}-1ta6{2nz3nhc-ijDeuo=<=XF}9!xUKOvGtm9!H*u>k zV#@;tm)th)S8C7ZwnoWaZ#>QAks!vNajmVT+ zhswY3j!}x_#$_#e$gFsY@ab6_XJD^eg&&CvbsDs0Rv^hAzR;vRE)9x)g}7yHA*=Q2 zgM3RzZh5h!d4~~@$4>7Tv1n)=nJJiykubXeg7Lo}&*CY?@RwNHqZn!~1Xozoc%_#o zP-WZO+qMnOCpbLF$oc4xLd! zgkLtdkq?MO207c&aWOYAKcj@;k#Imy?A-f_vV?m3!f8$RW0tJ#4`jXfV_V%lh^8!d zG2W97m#i_$e@#j6lBF&oO(17Em%+rgr{@}cR_#36v|>kRG>#<_TRG(-W`BK`Jg57B zz?(ekdU0rcc*q?#jrg^W%4Kg(vSTP*3lI60o76MP=26bw*p!@BLZh?X!yZ0(NSsIk zsi3k$+S2m&VfB2qy~k7dECHHQ)Ai)`fCLLj<1?`q9`5#jMYo5dd1d958lJJ4rWLMl zq&^8YXf~^pjzsZgw2l+~kYrT#<+G6tJ3$)#fU8>L^3lw?jw@6TTu*y6n!71{G4K`r z%4vK_B4;w1I?ue0Gk=dbCz>o>L`==N$vrj8Z&ok|_om{mQ4;y4Rk-w;{CB$GWOlYl zCN>U>B>dRG)_nJP#@Kk}uCEK?7xNN1qgLwMMwEFl8tNZp;YZ)da{evHr;IMaq%3PZJ$Yz&c4T)CYtO;) z{qey@@}^g!_#{PU)7VQ}N4KzB6HjntSTpP1PcO?dNW#-PpvjBXoagnJ))He0oTLYI zC_9Og&5%O;AJ3LP$cB!($}nt3ruw#-C*Zp}XZZvewU@fKltw&2olA%aa8w@hjFrpl z$4@Lzc56VM!1v&pDZ3qvDG&RSNA;G?nxTCxe4!HsWet*%xyLKQp;KL=SL0Wi$~^rz z9PK{e3P-xD@{L6VGCb*hdXXwMuPIUbc9F~m zeo6t*_5#$&NXNVqjHh)SCWe5d^0 zmnaOxBgo`Cj@C%szxb4{1TO=bmI)NOqQh?ydyoEHMK9}B$^?Il<-5>eH)!jga1Cl{ z;Z4D&Fk}FoU0m;1RVsW=lGzaJN1tiUPfycT6Ee-)in+QL9J3>7(idmkfSNs+JS&;T<$X-avMY1p3*25QI?eKpCFeunmP3K--c+pi}-nLTNK9Q=;Et^dQ+!$_*gQl>6{ z7!BUisFbUBNzjt)=K<3otbOL;t@>{t@o-Ci>^S1Bvc1$<=cY4m$61p$%Vnkxbmr2` zn#2!T((>$^t*s@Q?0*cxw;YPj0pD>|gq?Bo_Y^jMb`@|wIW0))4*)dwpL!XHL!-VA zx@9p3W0_11g&7y@f(<9nm2tinxp&Ut}b7>F9*?6H&mg`0siSL%x&sPbe z`p0{C;~CT^+=jJTgeezU@w~g_dHB+Z9{c5@BM{8?Kh?|b`jE0ta;9s6M?9*A5Lx02 zGhY7&BGM&#g}2b%2HD6Fyn!1QbPR%$K-sc?`KRy)@^3~xyn?c=VUvJPVR0csUr54^6s+_Y$GUQ%G{xx~*VUF4h<*eHg zR2oZ*71>b|OUUr$@xab9Z)+3gi&W9bfK`R>^Y){sy>4fm;!-5s{WTuK5ANU3(fVFC z{KbB?OOPE5ngpQNkP;^$`m+JkEM`oHTVCM4&k$eniHb!F9VA znl7rqs~Af)DUk+Ko=J$3~i*WzCk%y{upJ^$_aJ(fB&yT97ld zAe5szn(J2fE!tx%E^Y7F-(AAw3a6u0^VLs8Zz`e^eBY(#VN;khr0GnWoW=RxZh2Ek)8l^m(4ri& zAonfi7Z#o^{jfSaHhD10`+HAj@I|taLs9{ZDbe3lu*y|cdW!ejF-M%vQ`Ho6Q=Y2! z6kF|Mm{sB54h@b@it%$w{Pn5#Esej2Q1?cj|6)QgYVge>K!g1DpT?uldDOj2e!q`j zKe(@b|FeJaAMd1d-Dv&wyUW*vk;jZc?cuNEy|HILP{a89$i(~qPw?N5K{}y!Byc>6 zsE?#rCHaYjTz%g^!G>gUo%`y3e(r@ zyTMP#<3pKZDyhth1T|D$vM`h;xc;Ss%%vGQN1HxWKFH|1o|=~0Y# z&#G*wv}?paB}oZSmF!z9o6KzvPEEJX;wn_>Fm4@)FP?Mv%U(on9rKqM&s(5e8*D^n zZaK9%Id|Qc$*|9JPuG{i_A;MH4h(mZB@X|4_J9SYCY$n6kNH;Fo1QjexXp|6d@7^E zd;`U@+@f>R-49jkn>|CU_kpeaP0jrrf|#3L%DBLil9iOS$#CSeU;ef}eqz|r_E?pX zP%4^NKEtRPRwkIt#{Bo3C1`qEE)`63s!YarDz(uTp9Gq?h$BwbCbJers}uOv)AR1r zA^@naA3?G}5eivhuUN38yHS`2t-8EP^9*5z;P$fNTp&&jzMA$^dIK2}gUGgDXdmdF zyq7b(He%QHd0J;~VL3Iz6MMHII*(pRXp*;ss00%u-HjfF*Q<3t!=R-lbRFCM38eCD zf3(E9pN&i)DuIlz4)nYS8VwU>0+V&)@H(5LC8kHT%Uhe2 zp{_Uo8nbi^3)EI_M+{FAh$qwnD=I6E#k+ty!5TLSr+#`s{GpV`eb2e6sb>%p&TXFl zTn#Z$4%aIdUzJv%b56ODYOlH>WY%JGyfjxkrBEKh z(I$8(iZf+j% z=+UG3~QBn${+dBhy zM9eTSGE<^Ld5(f*Zpr;z_+p0{e(#L@EDcmj=R#Nl1}UyzM zT;?O&6;b|KGt0}Fm={jal|lEcf=-rhA#zuBc7v*nMJ)%>b#rnsrB*vS1%fF7jRZmX z6=k*QpI8OcDKmGN4^&8{vfIrr@CSp9K6!^CYv6W5ttv1urm$p6Qe7zzPy|Yx+{%PyWryD&izywQ)UyOR6NhwP&9`5Qqk2^ ze_>0Pxt^S ze<7L5PS42rY>=qrrcNkD3BxZlJzkRNHV;Q#sSYI!I|6Y@_8A4s+~5rP7Yc4Uc75q? zKyD#fi42rc{A;#3=hj~6$<0_${F+6=`KTCld4TkkJ<&4y`Sq?if7D~Y`fu98V#aHg zQ!SN1M)1!=p%91&WhEr!_gOVY1&B*Y1cAMnRaMIj0s>M9n@)k85QOz>#N`hG0Z0`D*ijI>>)r|>eTz7jCLO)>ymPvy+NYP}tX!Uip2#P#hA(6fGU-M$1i_VA! z>9V~oR?agwra=6FL64~w`6BU~t$kVc-e4z4I_vDGa1v)X(*%D}1JqsvazF>$?Msh@ zJk9dZR#Mc(`XiwJto!AWpy=?hYSi(zAFqAK(~R;!y`)cH_F9TnGgW(EMVFl?e=V6* zXrTUeUl>!oL%Y3LQqkL$Zuq0wlWr1y^1`CaeHp>xZ$5`pnVs<{bOy#IJMK(&joVKq zM{8^|jRWpROd_ zb|1hNB5l0J&C6w(TaB=kyt⁢>xGpYO5g(oSt-CTURyKJ`;SkVo?IykhvJH{4ws) z_91m+c%Dioa6Ov6$A4X~rOpeR7t%sz6A9sI=+se*yqxhMJ`O!tg6-+q?ZuV)nhe-( zEnB3d!S1veuFYJNHUz4z?E!>2z7btb%iN$m3qeF>FR^PDy4~fY=2X2j*LP)vvaFcz zKOiMa8iwnuEH3zs0Bs^>79o29{*>79g3KF@>e)U2K#~}Gkq=)Ctna6_3U-5kbUW(6 zT%ov3MyMCXrFjyGXadl44j@_w(L6KHA1?3(k+)78?ap5B3SS6$w4zrQ{g;2mkLSgO zF#7S&(-D4mFQr%hI%=px`=ue3tT6geY;EGd-Uk0s8E0GzY4Q7))4b%gD=L+zXZcqw z+W$s%IwHCt7V7w~=Pnn~7=uDG-Tup$mWwEqg^AC_Xee+ zxeivUJ>-F%&yv%LLr?*}`1Mt6ek1#dkC0Bf^QI@jp$R{!6>6!i?dC1zv-SoL;wj`Z zu|k6_-5on&Psc+IoNRQ}h-ECATv7C8fAZ^LQHRJRcey}AE%D~SMsHuh}3F+2^r9D>ihRAjt+gj z$v0oY4$ntC})cH2x2N=EsW$1=)o#q8SqxPRNgtf z&s<`1YdXS2?4Khg;^u~?ks{Bk=GZC0ZFO+ylLrYny@@omva4Pkuj+zIR@iyv8kc97 z+i?(;(R`-fa@TpD@nC7vcgoTXAS(6nsyyRVuYqr(Ohlj>;&FanX170dlQa)2q7 z+!DiwAi|g~e!F^pL~Re7Kk6v&62J+OZ^V4hnpT(G^jr|9s5 z>*$a}c`hz?pdcpZ5fFT9DyzubU-t_Ehz52zF*>jXmVOjJ-5XBhK3O?AIlWMz*3*s$ z>rU%q{lGd%^p7W|kdd!d+G5JK6nkXRlqVt~6?G>$gGJ^tBN5xy*3~)6Wzv{y8Ov)6 zrSuL|n)*TKWH!J7;?ovWuHkk)w)vL&VW9s6r1yfC{Kw0AI;oVE?Rj3jXfd==0?5Ob zk(*l#pb=|ZEBSpR!R%`o>PU&ThdXN@nG}nkzWJ#4*OHR0SIULypB)6mPLB`}CkBx5 zu2{OnynFYJ;QoCxGOgq4dRUcVBv4zX3m&GreOm@u;j6|-fUVm`Q{)e#f>~)>f40bu z^!)snj*dsxc6LD^)dK;q;O>K@7j?rcIzZ%OZNHQL)g(9ISKtB+|2cn|eutBxIxbS@ zNVVEz(Ala*x(Pfa)z0%h+~Eo#IIBY1cc4C zwXYC;#TjKFN%=eIR8UcP2FaAGdqqb_+gF5iW$+`kKQS6Vz5{_8!wOGf#eJVB(7_5- zy^=|yUqVA{BoQHTLG%FVf!QpAcrZ073Ydj()7lUNE(V5^5H=ZoCEc5u1pkPPB#=-B z>Q1M)z6~tVe@u48RM2*{^9y;NtDJMmvR|h{wcAyJ!y4hJ?QK5IFfDur*@`;6>vf?G1I}W+B!~`p`v5nL)^W!UK!gJUIv>TQ3$kY#G?4^x!4G@ zEc)BG1EaB_nh!MAQ+Olyu}z3qnx!{d}-jH=1MUY z>{RU4JjzSOYVRvd(fWBX*}0rlzd`rpi6&xqwqb93ce8H}@+uRcY5c}UI>TkE|KL1& zz|}&a;>Lv&kwRjWac)Yp>NL-|m_>^g+wukN0Nyn2%P-K3J3uzyQ!O>^CbdUv*9P|eF*?|kp@KFNMx6SUDY zRT>Ix+JCa`CjWEWE~52|#<^UZ>DR!OQ>Jg`w++^2?b>eAub2Tkn{zglJLKyf7RY5R zC9*!8LCo&C`zF4T<7{5l$!YE}0U=@2pMl)>gUJDpt8xH`d}|2A?TabX7f0~^r-=F6 zKY}j|4E3uYSj*)Uok&wJNMEQ-Ufg*Kk5dJuC@ZC-w_wLhVu)nmQ!<24s)4v$08Zdg z>fiAc&|n796s+(NPW+{VEzN$uPR>vRY5FK$}l3HY)|1!I*37WXa1R%h7C znxn-V#5#IZ#>)HMT89tc0}w!KfmIS5C<6!gY(&=D+$<+5`pG5w#CQY!=4fm_$!h~; zMi%Hhk=7jiK_N*L4}>p@3n>^%f$b}qpX=pLU;oM90WmWydE!T63Zx-D(WCvuqA z^e^Yq<#8A+D!{IUZa*t_Y?ZNV6yO)wn@qUr>*K@h8Xwe|VYkLhB48I0#a*rDabFYq z?%js~G9mi^&1q)C3D{HhN4|90f{}R^w5AmuS(x;qDPEUxcFM`fSY8lN>k5o)qj7X_ z7>QLLdzbOzu?|+ilsePtSNVAQom)G?_gVZ)heFy>stH9{n@X{jNIQy)up|QLr z^Xm(8LE(1tNrYVU86v&Tq)TlDE@a2_5VFRS?)8J2koYrauk6O0$Re1KV>o^`f9C4P zF`UR9<1P3fY}x-q*6i<)7K%?Hm<)u30edBvDc@))us*$>QCu7XT^!8RlU?j^AH=G3p{iyfRIgdfITc|l3zJu|i~xJJYUcuh@Fd2KugM!Q zt?w71r{73PglYMOjcnH0BYf?njQY|G5QqQ>qK6$iKY6T~66ap@FMd#M@`ip~TlNrc zm5&^5w2&I^AQHx4*LydGq60dsCD32)m-duUOtA^()ZzT*u3yreP?vJF{@T~0WHa11 zAf70pAduB&c}=}T{V`UyNcv03VqM*-y88S77Zm7NMCtjde1;A)GxMdtigVS^zH()K zFt1wFr1^r7j(YxM6xW_o+;DdBuENR@_U-dbG4drK`*&h4R8Qtx63wzMo9*K2NrnpbwzHBJwtFJl^*F;^!vN5p z&mcIeCtnELF5WmF*vTvG3VtEwdxf;y$p55+#yb)h_V$;Z>AnUiOgvIvsL9}UWe}$- zN$(lArpfCZ2~DUk1Ro6$dH>|}?iWapC66B5lPS}sznK*>)|m#Uq&AmOnkm(F1i4^$ zcP$adfI{?}&}#-&r(<~)d-BaOmycs|QaQEFg>$9<4a7S*&q(tRi-3XAQkVEC>BFE) z#>XEHsEcWrz142zB28yCN-;bHGTOV%kp{T~@0@F@9~J>z-YR|3u-n)l#wdA)ML*o; z#WU>=c3_q?{-(X6`F_F9qLbGzFvi+ijFW7vo@1q((^E6X^h8@ithg&p>9xAIwp~Dnr17fwnN*a@JFilXuYz}QrVa> z^PRgt1MXnav$~9wW!SteL(|Omws9I5v-vqyv36cgw5h|w_k3~4oraLeyxk%B`aoO3 zkL;JWmx%6ihPsU@;|Q7(6<0V+AWB00wN7X3r(=9Z5%UUprB2c-2?K%L%I}75BFR83 z|HqMTI-FK?nF-D*tNxovTWX7Z#G}!b3qi!JR5sZNKGtE2o|~RF;@14{3|f6G4%iY8 zxToY^o?*S*l7Qg*X6RhZKiHoB96R&&n7yuYLc7N8BDcZPNIlzD%dZ9koooR_S>`2K zhJAT1xylU9fH-M$w(p>BM9543^zun!;UWKb_>{Ug-LN%Om74;?S4=^?_oV_;2%SkN z0jAjqhlw|sV2a(fTLbP|FYKl}p5K-+G9Xei5~6=87)!eJa#MT7wZ*s$xEIhUzH}rI zM|L-=Zh%XCp^YD`^0!1EDxTNCQ&yhGw+#YwJ2#RUFkX4FYR>a()Iz+ovZ6_V?;I+B zxKaQDP99#~OW+L}>TPdl=I!osKzalYilO2yHv!UYxw*G$ev#7se?jnk7b>~N|7N&M z!aXrAxLTUtRO{lV0uEJmIL%PP8t;#w1F2|Y&zTdF+n)nNu6P}fmxBp;RUI$QspRq* zm(0s5(i!yhH$B_7o#(0?m zx3cSR?bRGyW>Nlmf;S0LBx_Dfmx)?LThWMhNjy(!Lh{YG~HSVx%0BGnHRIE&!q|J zHo@1jHVEH-HjKGo%>t!ndFLK>q>IMC)~I%snTYR>iwc-4Y5qHFvFEE}@wd+>NTTR? zL@j@DDYD%FQDh!%badl($_zA3>x9I{rev^Gr^6Q+5y_xbIuF1s<8JI$T>!JZDVVx~ zDC@Nxm>3zm{3GRe-%Tm(xhBU$p~0N44H5$FMXl&~ zn@^-)a=@WRMG8f!o?I4ZkS8Y973U%t?{{(=%~>&Sv?kz$uBKW~OEXKy+4g12&zj)q zIUH5A{!~?4LfG1e6vCcB_E{Lg+Gkkq$!;P2m~jIb&y+@ejb6V6FTRD4V!Pj;@ExF} z{>Kr%vxT_5X1*~-kVxyzuene`*t!9N^#is&R6~w zN2Pu032Sixla3r!Oe_Fq1I9GRESq_(L=5UxHuQI_Vy^3{v`c5}|Mr%p&J#w@m0aD% zyAbhP67)MUg(Da04}X+zR>f;hK9Sc)S7Mh0hsPsrb5H#O)#-dd=v>l6kHi=_CQOh%Z>U$;}!z6#}Q=yJs@FSr}_Dg}9X{ksdB7I-{h zX|{$q&UgLrVxb|zVr5n;B>i%vE2!-+t`C5)lWCa7vkXS!g0#M4D=lKN{sKXJYCep= z`9|*hcK(>=WFPfpjhIb);aj*L2Bcf zpv4pf2|vu5s~i$SuGV_@gG)=fANJR{*4Qcxn6uvFYryF5W+DsQ+J;{OdS6vn$w*rE zEGM3RUztVGs)Phd>YT{G&i=mTJDB6li(&Y!muCl$%yuJ~DI^*mrR6l}13n-=7RI>K ze443=WBw8t{Ni_iecwy3@*Z>6xIO-170j1(Wi~3Qx(2~;Bj^b$CYbMuKl1N?d_f?H; zwiFMR7;at3QKNVY_?e_nuRpklczP!7Rb4XKItvdVP|ssLP996&^(P$9cQjmmxtz9_`c%d}N4m+=5l1%W$sj0HMBN&z5r?q?cCPUl3*drI z_xL{t($dd)<%>Om``p)nM?C7rZ6JlL6GxV}YI<_JN`Rk#uaV-qlgrCMw#AtLy)b+r z0snobFex=gXa?+~Uh<4q4EBfF{%m=jk!po+2tvu4`4CfQd)5*5Kc{U?ju}Xj0(hBj zzkt>Kyk>>g^FVW+Q|ZPBE^N2_dHzpF<^2tjpOGD9gEQ=MZ)y}V(; z!Abu9w~n|D`a$L2c15cg?xgc5FF659CYbj6WtkB@J$+^k1GenU4B789Kr!7;I})Hh zU`{->N<)7D@8;tdoasbd4vA#j(F3O8oeF|Hu1ES$?9)bjv*NGeH9akV$#h}rBbhn| z)uCl)-`y!~A1Po^Pm_-AYno0XcwyzM8Oy%`EoHYDEm;8~_Dk*c$`)N-9d7e~ zhsI>B?d?He!mDeHg}6p_BHo+S2C!n7>-IAkodw{=(ZPY@ zkC}9O&0n}7p^$$pCk+P&r+6z3PcNYH$8QWYG$T&VOBaMDTi^6DRf|$3^@I8O`PVvF z%nKAAePxYqV{$iqS&nUEFzvjduJ$y4o*b-O=39>9kZl^VK-M;k^+3M}))>h@X zVfCdz`L*wCe;QaaSr^!=8Af!7N_eH7<(iIV4itJ`<^tYB%1C#M1%qU%!$f9l#+}0&V zMhsc~f;Q%Nfs;40Kj~LqFjUqp=iHnb{1zJb@WpR_LPsybrmY@%;W)V;0c#*N`R#u# zO4ORXg2A1DU}4i*6}?@LQPZCZ=K8%~gxAx&#{jR>S*fw{xh6nG@^}~l(6xPhm=7R( z`ttlQhmq?^!JQkP=ACvK>4J=k%?m;4SMszF1lT-uX_HY8Unsg#ETx6?=}N_KAv$7< zz<5K0_c~5_NVay1NEX>%68^1rTF1Ur zq%$dt@T=~+g;5q|OcX|URKE@&_vIjhb@x%9{{8P>rNHX}y)p!*N1f5*q~VGG4z2W*aL0ICMp!?Mzr#=v1FK8^5+ndX zstIE1EHL*FL4!3QKm^b9H;G5;%HSFNC( z1j+w_%DpFp#hL)f`Nx&)fe`XCUIgjgJR|gSGO$>hE^^e8k@Xj`rEXOfQMSg}z{@5l z{rMOH@xEc5_|VP|^S!Seopz)k>V%HXI?e6SDwfJ<;*_V7d&=#ygoaSUW4z*m+cF=0 z@#MGP4b`vesL{WxqyFNOn@#XrLz0ROWZ$3uH2Z{xBQ|`3 zHNM(@lAOjLx4xm_6KHfwx_*FGlqmGy}LqJpchdo(C@iXt=0Zn4%gwdAXhT_m$5r0CO3%Qwbn5~}KN+J6WQ1%@OU4cfY?w#Q~VJCTtw#Opu2`F_{t{*q`?3GxY4KjWF`Z*v1mPwUybyJ?$STF~#1v8dRi z#Wds|5YNMN)cZ*Q55ENdUd+$^VC!|ND);{>|4@CILIA?nB;C?ze|`gVfZ(L&~1qq?G@jy3<-J zb-L5Z3cBY128CV$xlB(^W zR*=5$H$kV$YoA4Q39XlVFYe)mjDsl0F~w9@*isSIB7jxTW2fE$0_8^l&0}kGJuHM}LB1ESU}z zb9f`8`LHiWI!P_izF>#!#>Oy;C9u@}k+ZWJmpQDH7Iz`c5(f&KSAM7?`Mf~e>9@$! z52(*gYuUf~>fwu#y{k*5a5UOBQON#Tvw?hiBbsfZ>SD=UV7T;*=-$1Vof!L>IV=5Y z?%?hun&*Z!@Z@AF_M;RZ^{NO=&<|EHGSJYLO{)W?UoNs4C;}1 z@|s#KSlWs$;u*F}MJxAzCJL#G{MEBf{4Pa2ebw0lHlrFk;%+C(vfiy4R^nK$%D~3X zzGF(kX7~cowYNySZ@XXc`cts`=rjgcgKNP^{fKJF9j|7!g*_wVvzZPJ*Y9$dl_@ML z+T7jMAB&I`JlrJP-pHs_#^=EQpsBJ{4@7UwsTv>u8>p-&+YAJdVlII&UtnWQ5MOBocF zLb`06m!BZk)Xu!05y|$QS}Af}u|uVivbC2EKkQ1)ThH!op1>#G)t>4B!}j)hayNQh zhfxb1LFrC`^>zy(v$U^!o1iu1?scZbYcNsBZ*z9a@9Np~YEy{m6&}8umLJ3weph+9 z_@5E9{cUwa1X)?rKKt-^p<>4gPC$X%Z%RsBLg%|?aql9R`%S)@b6C@?7dwt>xsxRT zF+hyIjQdTAbwQ3Jyf!Z`1r_Z+TGV%T#%mzm**7Glq^Jc2<1j8T&WDO{t6^>~&lkyd z&QT}(&$ZQ`adC0Y&CNX+9ZH!qx=DTaZUrHISdhug{hzf3M`_vPctKA!=p&ZcvQjeZ z{xN%4X=x2Ag+`;hnrHKd7N^&JSjWE_sEt%8sB5TWTH0FG8MYxO-@d8quP4L!Ka_n1 zSe0GV?p72;5dLn>Y}0H*YJWK*_{0R+V)>@ zp?S2~)c)6_8ALo;$jL1kUeZ(a*EJ76a@A>KsEyRnQpzGVAjb8R{^Y%;`uCE?43;%$ z`h@vid7qz!MZraBf28Ff#dQu2B$y$6r*Yo1Jm7#Wr2_KV`nHwvy zR`0>bAI56>MieRt44XNZtY;?Vmb$d8HBeVBhs2wbj~dW#E~hD`+-kUj{cH^VmeNj6 zAyVaB1<^2p_NwW7mAs;+d$cGH-xM#-$^P^8CG@Le4tWNN;f;rZYtHS%^PS3V!|QDt zAV#vYvzwZJ;aOC~h8S_)=dWuJLApgqC{@Zyy!At=@Y!8hJ;-i7`dZh%D+iLg9^p2{ z88-A65V9OmzsScIAH^ia=rgaG@*e8;TN!#g z1qib>o_nvAE~lUec;e@UOl#uO<%PK*>axe#D+t z-O%qzzzlAU6?Bb%7N26>pE_|b{F>t)@v!czMPWLe7J7J zlf#QWMhK*%o3k-t>Oa2<>C?(t!e+qgs*(AnM|q*+|L%)&c}fjt;Wj8E~bh zP;ICJGxoKqBRtY(#w-7b)V8Xzv2ZD6&TPHa>*;=Z^qZ(VpR5%(aE7It@e~hF zMCf+J;81iXoYvblks38$quldLWnOdJ^BUYCEWWS$P0V)7rYcK`W5@$J{juslcU>pW znwOgyDK%#)ZMz;iZ!OYrRV{`kxoTQlE9nlujpEy5TMsjB(Sw4^`%FqDK@iJuyR~V= z9z9_J<0MQGRoPT&ZdF#7g$=H{AJwy3%EguuvtxUYY{BU4W%0d}{zN74mW1{UHjn-L zRV>aQVN;}~4A%fymK^7!d)>lt5%-bjRoRpk^*c{aGcVOi1!mhQ{Bx7S!s7n#^n5P0 z(yzg9=)U#Qo%sf4r6nd=Xu~D!<9U7Z_giOdQlqqBt52I0t*FQN*pl4#%WogJB1;8O_rFdrVAl=rO#{Px!Gl{%A&w?;3J4`%SxXRRpmI#EiQN+ZNEkJf6YqQBl) zQ10vTZ`3f0MqfGpzJKtwU#~}#&0)cXe`#}$*O7bxTUNVx8VQN~kq!vr%=f@uw2;6X zcG<`cunF3Z?qJeh$NuLE#i;R%eeUu6M>!Tf4r9j?3;N`z76{9OF^I8#BuKQ>#{a{9{rOAe&PS>U>CE>E@yE-KcQw-)2*6?}bM4$j$X`45t~h+4k0!p2oQ*907S}*md-Ae`Y7MMUBo8#>z8! zd0fy}>gefplK`kfaVsG#3m8p z+z@T1fXLREY<%Og@^oZ`PfT3=laDkGR>;;lM&W+1(L>Mcvr0t^+6V!F04Zb`m?JZf zAQmS(52JgC8tX3?jA(`J&zHf5^sVMLHq{zpNzSE>iWN>QqGtkVRa5s9x>uV?uHv@N zFedCgtT=von*ze&^7EWFIk;)CMEhaf*57MLye;`037hra`=M259vmF)RYtI&zh0qF zm0bqS>kF`{0^z7^i|u6qVSpLfX=sW&b)m(AT0N5wx}y_C{G(3@D&nLCPJ_1RJ71(e zyX%cEI%l~4b5iW`<;w$@6*z)|t~RS@?PW|H9AO6sc9&0(4zxQNPhKBxR>#`59q+FV z*Lqld9vB!1g58v!s0(>r$+7An3lJtM)k=Vy8F^rwG5S{dEgAUdhtfv2Lc-z$kbv5a z$e|`Y#4xh`jVfjedJc1_4o%xd)a+a?-@tVdwz0uQpnljq$+gDzJu7sZZ$wM_n8!BV z=yhpgY09h1ps6RII>;;Cmlt7bYKo6GzqP`ZLTGNaT?MHnw5N1;%eEiGUw@(u{&9g! zlDsGgLC(z33W%=x7e5v*tWPK7}I$Yh^uX87Fcai z)zd>K&HMkvAeZ3~oPRJ#(jOS)=-uwFtWCf4psJv*?P=(!zl=*i75}JkNtfbOzs!Y6 z+>1t-LwtzT$h9;R{&Fu$mNENJ2+UK$5RI&Qn&0vLZ*zDrhPnj-X$yze{NzBUBc8t% z67U1U{QXl@W6C0#g6m-5Z)4@Kytn3f=g1Cj}8V&6-mPeBG2I4x+;z;d$|bPbG1#lvp!l+9G(Nemz& zf3wAnfD{URm*0nJs-Caf`dZSB8!f$0FA}|nD~phJ@`t=V^=9q1Mho^v-#TjhI%i}y zvcpy(m%h0bdIvV!%{6RAhG(+a7P!P`74&J>SMwc!tu)S2EG^6BRH# zeQUpSBeAHB=6CjU309Vt2RhYde8Xo(gIo2G6LOC`v*dL7k??-6vije#gxZrVShNsR zgSDby;#r*r3}_WHAHUh5!DWc*Bz3g8^s}`8Aw(vM&Q0xxoAXWM-j}YKc89)2imx4Q zk8^ijo%iQbuh+?Kd)=Mqs7_*X&b_x+U_A08UK+QJ;RD&svQ3=ooDE@ClWYBd#L<6D z`Hyd!ZsszqA&-uXT)W{M_RC0td8Sw(*6+-FQ(Fj@J5tqft8Zte;^WkgjJ9(X+R)8E zv(vM}H)L?sB^eu=@4RRLSK(-x(v6`Xi&YS4%D4^brJ~uBUo>{q^Lt!Y6)0_6CD#$V z)mYG=SV~APuPeqrxiYH(Q2t*pF&64Qttj+B8@>L_@dLwA+k2Fh%tm{XV^O=vdwazp z19lD$*rcwM<5t6e=ZwRWErU8^3EQ~)`3S4p%Fx9W)wkC~~Sk?2F#XgFM= zZGrLmIoKMY*+&sd3;-O9zV;U&Lq@TpjH3&;5qiC_R$uKm$bs*^e5Zq(dBtIbZNGqE z;u8#J3eV80DPj&F;e8Ak5dqYe;okEb6^aEuw&Q0(lo%e7ox9uHR9inEZ7=usz69zB zb)z$`xjrkL^my;waO{FQ1W`Y@0x1iOq=$eW(;JQTPW`Su_PP?b=sh2y7uJO(X<5of z8ENX`KeDrg?jD%X?lo7&1Yw5ose&H76<0(^SIyzrCgm&W>SL-Fk+&5&aL)M9UHwDjlEc`knm^euZH;2(})~?T2 zr?h`~V0kU6`hA{NGT$l>&jW4dSPH=Xz{oK&X5#nwBY-6Q7!p!D*c<9}Jhd?6k(7i4 z$zeUeWZsRhQ3oi;!Itb9b(u1;nUpv7hZy;oa~XN(iE1uU5tYT6KJGNF_*lkd%F$Aj zm_~2Y>6^GKQEguk^^qz2e0#dEZT)R}@-aEn?EjS={NH)RyMF`#gFme>RJ>k|9-G+t zida3_eS=8ozYsGx0XqdXSm_T~X6a%mCdo_1EJm`7iYJ*W1Z2h41<_ z=QTGEzXebgt)D<6ArY}y(IjF2|E59z<3TT6K8;uoQ|%9z+Q5q-n{RXsCyh^^uD)PIpMES zAN%&M|-mDy@kQ4p= zQZ%qn|0}l#rUGu5FzH0RP>U)2lH!kp7q|>(?BmWksoMAguOGghU%NKm5U^o#bxj@C z43O$&lLEMSLRxD`o4Fh2BdM8q7T)X($xXYgl#%a!B`Lu*LXHSQ+@o_`0)|ReFJEUj zvb}FD@$XOn<7<;i)|E*}GyK zTydC*vYwMaP`wEJzh9Q=d4)D8Ek2ND0`$`OGpXyjpchR1$(+vA9(!Hs0z?C0_o`*Y z>sS1Gd0~#mT~>xDaDA(D;V%bwuqE-aq~P*!(`XNuvo#>aM7@KhGv;!*e*GV?gGuA^ z-_p?+xJ)t6lDMG2W^USb9H+Y1Q1h_BS~TpNBtEG~J3;>Y5B`&*N?BGX3EFd~)l$){ z=2ccjrNveoS*K;7h>4U;`ADpRO8dGD7$Tj6?5>xYDxM1qFW`9d1i`(PlFWjZS2oW# zBIB2a;#{0jR4tqqW-L~Q&-LR?JVz^^zsW32Ol)8`ZlZqYPMQc%Tr}{s9cu7TB#n2h z^X2;1N)sE74UQY_evE3r+}niX)UH8RO3bL%R`kb}BiLWRVPXnMkbL!u0BCokv9bRe z|FiuQbBn_Zz8o0nMQK@r$+SkO4i?h3oK%9j!MIKIj}*RS`1Qu{FuD6>8HKF;s`?$> zczaqMv7q8l=oi)mgU`XV*awqhWFz;F5q)VR_=nUE3>1v3eLt`ENo4mG0s?e!RMe9{ zkizYk)JV+tS2)m#21L&vjiLq)bkH%iGvf`xo7hYxsn3;5f~tj%gQBA9w<#LOLs`N@ zYZ{j4=0p@1(!Z;fz9Z}0oz&S8^dlvVPThdLRA_w%aX3@V*GZ7;D@-B)f%b?>qdj$&D&h!}J}u^%x9#QjlJxdTvd9$zeC_4t++< zJ3lNUGpHh=EL#Q(=rSWisg@*jlzLVIre?fm3VYX<1UXkAS(is%1QINWDp*`#*KM9Q zfD-4%aq?s2kf=GHSpT+u-6ycQG?Za6=bR20fh-pmK50p5U$+0@OojM%@RE#$hHTu* z#Ml^LkTDc0ZHS^&H6+@X)SWd?NJFBdTjpEwtUAmCCMYp3|Ew@Uf$mK>b6iihgL3;U zwy@~Df)Xh7k=Sb7yu^m#$*zu-pvioUUTUt#{ zS(kygsDOq4Sdz)*!0=|n8;bpLytV}yY`4Vq@kGmV92|}m#QB4j(S0iALX)|fX6)}u z$2ap|k6vA1Q|`5k;WBEr9nYGBRQKaYtiv~xDkP?7*JfK45Ia#RC+T>2Cj}*Zq4a zRjsYwTeB@}Rt@wQm`#SFQ6@+031VesrKqs$bi`iY8nM)iI`ea~iEowk0nyWk57+Qg zPft$%C=dRVuS@jfrDw^@OlQdDaYFX6QL~MGs5M9nCE@9X910yB6~tZU4U(n4^5btq zv_^x~zXBICOE0I{+B(Z;9uc@jbG+^@+H`){wQnXlM z2ZCp}Wn{b&&U#r(E=@Tn2HM)b<>lo(S;MDnH?ofd>WS)y}IeQ%=haeO`GxhikdQ$hF0M$Nm=p)U;ix zK1;7`G=8aie>_XCF6}DcnW939lyito5(?5rOZw;jUU{V!^h_7Y&Ns{TCA)$_;o;Y- z`spermT}Y(PsS^s%sSA~zLq@3SkY}|T8`S8YDL_myuUS=C=KStpj8HW>%zdKh0AcU zJ5-<1NVqS%!Bit1wc*zN$zE*h-@ePhfCHS}h{`Ar)OD!hUz?37C~`i@53`i-M3wlc z7oKxjD6zS$OuQ-R?n(5qY-PG2#LzD;3a$=}s}CV-ka&X{F2UBa@Tg2|@iu9cN2grJ z!(R{R6(u--^BF3hR`aqJmTx-p+LJ%hdAX8aCgm#%?8NP)V%XmrIUk@krzzw71U!S z*DFQ`Dx7bG2&orC)EIJB=(US*Jro3CcncNjc_5R3y6>XXgQv|O?%iupw*n_nS_6PE zINr2ocAT680g1k)r6qSvv}OIKr><3o$ZO{>3vy z7Dn_UnJ1K})e)Ciwoqfz5dT?0_2KKtDrO2&wws?(-P!!@1Pk*3PH%2()HSC1*bl?T z4?<8q4k~SHPrfgP$hFFSJ2Lz9jE<8ty2N^|BQL`=?xEJ1$?{GV{^jL9wsqLZyY|mQ z4Ryt(QQ;^4Tn~=!K@WmLtiHaPZM#WW{>sEs!_J$+c;m6$&sBr>d`ev5tQ+~4y^$ow zL$RbTuWCESYoCIy;a#Ud&97f^1iy?_VG!Ujr`Kg1Mn^dYn^b&#$sDRB1-g(^Ld_&7 zGSd8y>-u2*$<#!vqi`4qJ2?>n;iEvL{F1Z3JN!13AQayMi1J#*1WX5i=$Chqjul8) zn0jvi0k3ozBWXUMOho8~1C5%Irj+8e^BWXT5(Mj%WbYA-de9~4$+x)vwz{ip?&$qY`q%z(fO%*Gyph%M-Lz$ zxnU*fCDQ?M&JCdb$$x ziNEcs7peUhU1e~gjU62U73C`e zQLFEvu)X>rF_-2$1C5dmI(kReOkwS>_M1svb|IxNSc&3@o=QtYl_Ybpen)b|s0hlj zPg|PVmkP3;^pHLJ5-V97X;6~MNgPM?)iQw>&tFM+J%DbGWc-!suj9mf)Hi&)Ki0!< zqGud?zi_SV(sW>Z&6O^~lWNKB$U9@NTIeiQ&~fA*8e%wNZ-#@O!TZd<(eg?`dOh&6 zBmLVMB|RkCwl7m zPH|(fapg>#3IPg`YB;;G8p*robZ`vfkH*n5Q%F=4oEgVY5%2gGC~Iu%8D_A~lN^0K zX@N~NAoS|mN8|d(?L5ECj6|Jw-s!+3w(QKbY@KK>0NMV)Q~n(@(5yR=V`3KhhaU3|lS((mwdVDjbby|9IVxeh=OO29+MJ-{FyuoqD;5Ja6 zY8tqPp3upH&^%5FLAq^P&0<9;18Vh4CR}2?hz{= zIvq}pXs3@pO;AIE1DYwI`0Ue`l3g%!q|!dljNn`&5jgA^x6=0~sB4Vy*SgMR#)O4! z+U|-9#lfGH?2zlB>}BxP9VSNb6#a@85-&J)wBK~`_$2ST9 zvlyEG6b3*V_Nn!I_?!t4^|DZN3!?}z`1y+zsARyhpc*hfE1rCD!B^C@P}~ZRxzEV@ zq2+CSKS$nwR{uihb2IT1EfYe_i}zgq}7W z$c|db4k73`UFQFnYU$$QvQVFe>gmW?m`v|F!GED2L$CjFf$2X(7L3KeE3hyZ-=F<) z0jOvQLvjnly}+72D~Hn#snky=@u2I*|6r(#Z@#qUKz<8fXI7PQo&cNbUky^SPl1v|u4*0j1$#Dx8YwJ^gWg?oMpo3M2nx7E^F7O-^lbk!Aj?*o zV68h4Iw;Ezuz+0(35~5QNvPDXZ(+KW29gY*=4H;~MRasx^+9%bA*kFgHj_-K=ig^% zt`yMc7Tu3mZOd}BS#M1`FpT8=TMMvC+6dKt|Cqk*=6A)Q#KOcRE3~6zWQ{_WzB2Kr zqJ}#hUkXNbyZf1F;S(!k21g=;8B`^4`ckRb4X zno$E>-q2bwm}_&6z{My`tgp>zpLcMu-_%;be_(BTk;QJ^Zecg;d&g<=fEh$uOwR}; zLMWvcdvmOwq5)n$%Qus|4Vg3o@KB6?DyXNtcG)cTzrx5F3;c&Pyy;JTfxk@=M7Jvw=nWs*rWbG;_^~+l*7cGhVm;8&l+lYtqwa&8jI$ zTJ=6oP}gh>B)vaXNNhY>5#{FgU~ZeAzl__8l5@OZxX{sW7V^QF_1ai&``CI3fn<~A zfO>uywBoi4m%-4vGwuukL=#NlmG{*Pjs>UvAJYrh3c^Ic)0!`K%Ert7 zwSMQ-s*+v1FS^6a0+r0$^9(7PwZ|7yxTnW9agnDTWmtyC!Qm$M+q;AJyKO9#er!l= zU%7VGelKdQs8n^-$}ODX)r%}O_e6{<0`aBPo&1XVuQ}8b&KRc~BrksgD6qXYOo}@g zP#S|`q;Tpa*H7PMYs4vAKaXCaoQEkb%``F8V9DTtu@6dcIU5@qrT+Cm;D~!|ZB4=2SZqV+$^o95(~ALXP$xzn%b33Hs&+pH8=WiT zx^oAQ8Q&jj_?DA3S0@@8WaV$~j^Q=VQ%yfchM%3CQX#p;@lq)69cnr3E=0@^m<$*G z+?o_MU2w2;%wvLL&giIN_spZ3+Cny$DQZB=)3Y>LEK8oB@7=4L=dpQs{#m7|N#pd0 z6p@{uqdogAi`!x48fiz2RB1t6{m));u+-rb3p1H*cuU(*7`$jgi+ z2_PK=luu9_14tQ|-6cfONkhyWpFQh9TM-ORk&1ayp~8cEZgvSW1Lpnn)P%}Z=hA0T zy~%yO+kc@fp6D*uxyPJM0>fWz#!F%6+A$17JV_J7mR+I**XzEt_5?@U&BYq~y2i#! zKNIcgO5V8m(AvJ#{q>E>cjY+IQ+;Y}t@=1`K2K{l!l}ScK#Igbyq|Ftwj`x=JUel} zNS6f!0`RCLKO2g%MSHPNSEpcr88vVC6LerE=Z?SFn7F>;?+&*<7f@haD2e!C z*8c%^#R&-yZz&wlnrcj_V6b$G&`~LC^1Lx$9Axo!dtT}=V7799-UCk*lj!6wa1BpQ z1YMzKtT(5e6`c^M07d;XXYc2gF1M(p&uXLd>0plg=rZ{&>l82Q*FfLGsf+<gwuFS%aoCJ@ZK|re{6g0Q({s^h09^}B z<^H4-Cs(m!xU%#ID=U401fF=@;5l+~&hQfU_tp|U-~toI>dW0b0(1^*UIS^bHEp+q=bM~-LcJ-npL6J zkhJ;$v-cBA>G{A@hZ0mE0Q?q35OBgpb93{g@oVu4XNxzMHCJ0jV_*M?34g=)TwgL9GAZ;^Kc4{1!cM;89npf@_LYLJipe9X|7W zpzdr1l96Uh1(dgv&L-DMfjVW9a`88n-qnyDP#hTiDNzBBnf%+g$=M1=0ZWK_hic{*+pJ3~13CU&&FDzgij~3LAjKLIYK#@T5I$7iueGV~yS> zi=CA&?f zhREt!iXAAV6}7ePuUL5A5V(8ovASZ811F;Sq5-5zo8;q_5(LN`b0MmjXe52Uj)Yuf z#@%#iL?bt}OMjwLc#1zhTljUdad=s?# zU@Ip{*J+$*Csux+g+!+2mgEi2k8j#}oiiLoN2H#SUaX1>ilA(R+^mepAM~Y(Kwxid zg)Rc8|31o0supO9CLt81{3D}J8^_y~#wI4$Ro1>|FVDpriHdEw`^7O6Nn=Q>Mf1fi zEQG8GW?vLCnU~{=6@Ku{6jxlxgR}RB!$2-l4KARmt*yVyeT&VB#SW zzk|nEIr7iS_2*+wjs$x)Hf3OT6~~$!dE>4jK-S|6+F+reueJAhL;1nNcGMA{%i;=1 zXqW-E@VK7w!3NQ_S$(R0cld+Z^tBGPJ2UAr)o(|Lhd~@*#_-~+0}+^0i8(g`R!1$FTcGx^pt1j&k-_AsB zFKSxrmuBszj*iLt&5(=Y3v6%Av(bNvh?q!>E6V(hIdfq|NC?uok_jc{+-URer?mBB167 zsU{Yly;0}o6vzKBekAl+evCp=SDy9P3E8u60hK}F)$vT|7H^E+d~S>b8TE>>x#hRI z!90EY<+%RL66r1$mbpatrY83bpa*3TENpCUiz#m4qBI0YWRht&QS~#wwI8oJyKPfq zr}sM{&A2=|`HVu^VbuneT;Y=fBsWjrx$VPq`RB#C((!{VK3n@;6yFc~wsd#<^=5Fp zxp{cBS(B${7d=Cgu)(SF_3mhb9J{11v4~P4s@4A;$6N`CAj8X0b&Rc zo%WGmMA3Q$CB7f>_IE-kTs>;V%`oiUP;(h9L3P8St1OyBS)!6Y)s9Eg=$T6}96pjE zaY(zm>~cmD((D1~WL`?7LaF%V+mM75e{1RuL2|e?{}`7PDLM(qh)j^&N&ouxON`eR z9lBZCJ7xQwO5eFYBbrXYZZzmA;Rk@*wr_G^Hhk!;a0L-lc;Z*-dKY0bEYJeOb#kb0 zYEge8J-;zKZ)9O%kql2bP~eIGC8XAbJ>17iKXTdh-ctHbi_y z>ps&rN(7mtLd`$K#Sz<%DaGv)x|GZ4A0+FUN=nY`4Xg(ep!ESnjxX`g#{!)0jrti9P zfZyHSZGKw#aPb4<=!JLD*YZF$Yw)BM8BhotUhB#JwoK&v zaU1AO$+6h(^dsO2U{L0Swuf8_CqjS1ag5t8l`0iSt@k03YCB!vhyI_e0o!`VokQ|% z)>k4)Z+*}M28ISKXy>>)1Mj>RSnuYKBdsmA0wrKXYWPH36x+d({EHkvl?ccPKOC5# z2%!W{0YD!7aj5_J9chi=Cv;L5_8V?hCAQlqO*xWrxFWs5LiQcj))v#rG(Cq~>B%P> z(}jFP71*4d1uKVETnMmaDZs@%HYe1uW1 z3#FV#M^|4-ZFWXOXw4WIhwg#-dJVqXA}3-seTWku*bfqv&Jd8dQ*-R&0xjxSyws@b61RKWPB++| z#XYDVGE>O49glxJEFtFC$%a}ryUJnuhBQgke}1IW6LYpTtg)jn_f7xdHXglpvH4re zz3@)hEwMrOm^HhwlfSe5m%DdN;TLW14*R3YwYj%3F(_RuR7`}+JZKT4Aqus%wRF;i zhz}o7?HuJg_7C2zb)FxMCuS)Xo}t*B=WIu35fWlcJ$v>n{+M@7lHPjxidxS=X2a~h zK7@fg0>Zs<<5>{GL`^_(2rbtaS^ zo*m0Uw8De}n$7xPGoCsl2NSV;&|+3)9UW6;M`_;JL}6U9rxzY$Z0ciAAJ2Fy*&s0C z=BA>m#st{6L;rZ=ByE$3dxI)V@>FnY%974R)E_?(nwQ{gh4{nRQd7go#*!z`j){6Hd2KgEzjlG;|9zV6B7n5adBau&O8#O=;+}n z1;DE-+(xr=Jic}3Fyhc!cb#@Fp*14Q4cZhpQSC|7)P13SYy+xGegyoH2M63xZZR19 zeiiYw`RTxLL@ijCir)pDA={A>N&4J%Yfnu6 z`0CY4uKpnxQ1A2F7$*ZZ@68(j$+s;AZJy91*^t)dZkCpCE;l%gLP*$Z`7o?J-Fzc! zB#PF)J@`?8R7_7M;_;2cFjw zKsJ7cbKDUx)iB&Td@?jxjMdT6(ckN`=DZfC9}^M5_2GlUSfl4?!a2wK^!^=o4g8BS z9CsB}-L?irLYjkjRv?;s?22 zlG$D2cpuPFt_D3}6bYnlP~)*TU7!XfiwU$)0k~rCq_<+bZBwa|eqJF8Qk;XMBg>`U zr%-h~aHz*~DxbOmI=ZOCBl?b%FSmIle!ZV$%62&oBnt3hH?w+UNy&e@r)2ruCjywh z5)eRm`7cOEWST^sbPJ=4D(MB?5qOP=dr;W2-8s9=boOThWcODqI*0vboBGhF7yZr( z<5czq^Vn_YGqoQVfN%|UlgMjMdnh6v6EDGg(e3mvDmj?dtP{_>D9FY-FV9W)PDtFVG(_*Pq&1XRZLL zE8hsg__tpR-Gj2!IRNl!A8`N7shtjGQhy5_%5{a`%;NXfDzi`_nfEP^BR)MspK2PH z@$8_zz!+<48?Lo2Wsx6l766(dapdJizKQSUFSYia8&;tHaez8L`vwn48$h==vAei# zxj(w=HsZZ&v1#E{u%MmGns?`c&OHP`fwxf9@q}f#x3}j<#1gJ46knM#a^2A^Gxe0A z(s-sCMfu`pT}E?o+(t{!6Uuope7}uB_am+_F*2sNZf^Cvia)tzY9G*UV0*8*@CSkVLB#Q7N3$lA=>)-QL;xQOaNCbaJou9>UF#t4_;F z<9S2_Izrv-7)z#KT4;iFIGW{Dq%8ah9r-4K$0~0j*#lbSw{IV9Y;Dz0U@$yVRdps* zzQWk6;#OG5b7ix=P<^swIgtf!1~I`@$mG^#M^Z^d%bw*}?GdcTx_OxY#)UJi)Dcfy zJWyj~08!dHJY*&$ghcY3;ax%kQAB{O#$H=YQ2wbA;?^)T$7zrJ^M<410jr!naxb(y zK9Agyt6N)dxQZ7EsqFPgA5YG4Ug2sl#zEY&)TNfEO061I^$}rD%*nZw%8|NJ<4eb~ zTSCaZTVep*%cbH{+eg;pLaxSE_taEEd`a1Bc_67={aEsIwtR1vorgwp+ z<0_NFGgN6N`i82+t_e3ireW6esibbjij!T z=*7|f@w%G2(#?^)PNDRksa-BLQ!7VrA6G*QN5l0pA5jtqGzk*t%P1~Gfk}E&MX5zqXPw> zHr8UCA)&aQVP<_fLt(1SMaCF8za|n$;(l_1q*y`z^n2{~tb+2!hWq)$A;RTP4jX0} z@d;ADe*OBu@N-2VOMXvT*%8xm+h)4GU%*Y?kzkptEbv`T0xyd@u2h1=i(y+Q_ow~O zs;LMoA3pI!cii5qqU#+&{+?Mu(i(q=w^n|9X~e1@DJ`S?J)XJ)39?X_6k`6z+-xqSm&4DKrCcyhY-g&tlUl48B!ChVq2pVP|26B%C7H8p64$WY+1|>W4V2v*UYkDd zQF((iU-RxiF2s$88++qMhv(J*`rp2p+uI@2b!L_MCC^+-!h6-|;#K_4-Rll~X&Yw8tSG|;~Dp)j>^GCM|I`dDTcRMX`$m<6vs7m^n&8`2%#s6!6PTd z))B5})2O#e1w67U4=*Eb@oG~}>9y5hOGNAp;2(V)GjA*D(NIt@J~Fs*=G53e<&2R= z^2jviWkGlH)57G^Lu{tl(xfmkd*8o|WnL>%f?nc1q$rWbfjO=g*2RtGjMrRZ&&Nyko)N zanj}D9ld4`7e$u_CvyQeG258|ArV=j+*5{#ddxL;A*OC@&`x(fZO^q6LEU{DbbJh;~O zN8?mE44yrI0?TB-d1J1Rbh<%D;wuFe-M9)>wW9A1Lh-K%(|uydycL0?6TVRdjctFw zf>qI4pI3&)t9U-y{`Pqz-FEN!`!sit0$O2)VCjvwbJ=5qwFpFHNAF#*aj>y$xOpjG zN=v_iFV}Kz%D+G^=h0Ar`I8U9{P)c=XexRg#nF)EKR<{TVqK(5XTJZXubF~8G2qt4 zAW$f@)U~jdd;W|z6q=IvVk4rL#RSn*@H_BZ2w-%s~YW_0-j&_OMRYv`P zjcT+Od<3zlI{zL{F`0D;FII%T)~*&3UE8f)vd79e1OqeTQHo!mTuY!AOK=yHv-6b` zad5Bk!}6fK!t02CD)R8h<*QNm<6QV^|D!QQSf^7%AH%Jwi_*JS(>3_8SKdf9C#&hE z5ebKur5Hmm0iMnZk&nkBhhlyQu~JEZnIP{fTD;2gTjwXH=p;Otm*!yX9xoHSk9ia& zV2(~PJQHVjIkpTJ zcvWLvwklLIR4I%5eh;v7<)i4D-z3+FNiQ}X5`v@HF2mVE6<}USFa6=);j!-F^INAN zEZpmwlZ&=%n2tvnl%q{8%k?w}vMYq!tLe-6eR3+I8P-p3rv* z6ri+-N^PHBi6WBqcJ_1+&UxXp6DGInAn6!p{1tw)<{O7FGYa}i=-AKO3J)BqXRg4< z|0L@3E&S7!+DyHFAo4=xl3+3ytmw;$Cz$xNf!f8hoUTZ7cSXTdIreO!j1)5}w1Ar< z^ZL9Z7Z^;ZOD)0Mm3*q`9Wlh#2yQ2IHjQjI?8h<7e=L>WeP$TQzR;jo_uA4PwVkRx zH&VrPim~-3aV^oxK8XC~dsA^c4fFcA1Izc1VEn!QSfNI$LCodIs69rXg19s^Kc-4K zQY|^ z--;-!kjamKdP{cq)g{C=zpci-89u7O6eBbTD-P`C3E$^;Qt3rawz&|-Vk6fP;7)E8 zvL}wC=zSkHM*U@t<`9B^7wa8ezyk~qalt+-+7%&<-l%MT9%&U(WtF!-Jv5N_e&7H5 zBN{K=rmwj@9X|+Ia}+%59~ewg_hm$1tXpSYocIHZGOhNKDT$uOYbKwJ3ywv9MLyY< zVh9j~2Rz$Ch6SfrQ7-ci{%;@xob^o=WasU0`rbCSh^Yk5SVWo9z|b=OPFgyw6;$7kvZns+P-Tq}`Rm@I3t zv&4cca$6{gUSwJuvgvf=z2UUeKE)?9PwFey>f*x&4>tLeC51{sb5!0|XOXBxDN*fd zk)11jA(1BPqxNt@>l8f(a{y~sgb%^Co7OCsO2DO7rcqCeffIeNOwhGlL$FE1vWlKx zNBQL83zZLIa@^p6@ntRgass43U=9oBqtL1R6?gL64VY^Tm>)ReUo2YUhQG#lmH^L6 zH%vFay@rMOX4H^*+?ex(Kt3kAEi_LP+5Bh{MtmK+0 z^YQzq85V~5y12nfhCXx?sXLs7(#_NsVj5K?xt`@qShDJG!V`P|bgs4C_GF`%>`{{VMCLlA<-g z*Vq8d(9m|nCxrE3g;JcU1j*2Pg)~)!cW^l})I$)11$=kcLR2HPa&()ElP0kzEP4jp z36Q6k%A)_{&>?zYBz%YQF0(rGBa#_1nH36NZ>jul%6Ol)t}XjEwn2T&tu5~Rv+g^O z!=fO1bK+>gs2kJ%{9^j?lgc!?0OOxNRZCNL{uTGv{7kXYnE;y4){$7UH>!xLG#x3I z7$4uquPO)=@B0>@{yyRla8%K%ZP{>j=K>2o5H)zr93=A&zDGExYkB>Q);D`qk>}m5 z|Km-?pb-t_4@|ly>U`zkh6g`E0QV~zGD!di;Qvjr9k2?lC;YKusdLoxzcR5dNNsEc zbHmbbmUj}DOclub{WK1b3q5qF%#PWn%u0!6ZD|5RZ$pw1@u@2o8R1tlBg=Q@`V^m6 zkEoaG*MV8fHusrP)WN`en*PZ#N0K_Rtl)#$=VbDg8$^^}(@z!+67&g@b zaY}Yj|1X`U2|@zYQNL90CHQK;NMkoiB#nddc@yFD%&-TJcFy6qR4~6)D=N6j7^&9F zOM!84?%$BpD3K;jcPWT&p zm71$N2ceOS`JS(t$TseJknuU)o7%>E^^)V}6GeqY3!9*Z2QSI4#HXElY)eIoLPi0g zK#P}UIb5sHr0z${_0nqEQp z*L5-H``2O!N9d3~KDN$`KlrVGP8@5V)QqADp`>>F(|ZNlEDjQ9zLf>6Y#eX{5U(q!H=5bL)4$|G)QnF3&l~1G4vC zYtA>v81GzntWZSB%W;bX(xE2Hc)A(rr`=}C;P?e6-YU~DwE0-?U09empanLnp35cN zEXdI_4foSvJmN%CT#J;xb$J4z6tE@U*w{7(PabISI;GCgt%EI4EI9>83h6!9fLMOW zl&RkvybNC{5w6r zU2huL$6~j5(YBfz3`HeYT9yaCa~t96-tI+v@jAePBeqT0)Jn|Jbl>!{tnbf}J!GgtT`wnL*l_mzlXMj4diD)hz;#oV}cQsTQL=9yKG{u+H3uRJY1N5J@boi344yb%h#)R0+u6X~&9lKt3_ajb3*Niv!c9Td>S*K>8VMp|p2ogUtCF+D0*s;MF`>+gOdKf?o~_B>GQ=odeSW?6}Gd_T+m3 zNu}vEaSRbBI(RlOblo8xTb~$Ru3lA*+=R#D((v&St{#twiHc(S3sx%WdmJtCTi5Es zKYg-U5G+D%tPfE&@{`riFmlF>7pcPyMnDrcGc&8Mf?wT615FmtJBEJZeU)`$w-$Os z{tYAnqL*PC-)qT7$p_zU6(-9WkV#C=rt0oQfu^sNMZ&;zIY$xFro@$!+Z7c+m3 zgN_lMda{_y;y!jliatZ|loqu=y7tq{5Xj1|@e^tadn1ysAe5koKI+;&64XzT{;WZF zA$F(5wM<-qhpX1yifq@?t}p+GbliR=SB*LO0FdwAc?sw|y11Vo+-Q1P$b_-It3h~} z1%ST+^oulokFmL_#IrV6r7C#UL0x#{L+F%W){CgbN*Iff+E0k8LAY+y_W5XQ>q+o> z%g7tES{Oq^4xp0k>h8wT(Aep*0RI685g}J#(YS1v-anda*cl|8m_z;uoYnuv$)7zD z_rrMgSta=MZ7TrA_Ci&8Uqfs3+YeX7&-yDJpW}SNgcr&4JgbaR4f|GugTCrH`A*E! z7pJuhpRFvE?tmF*fyZ6uvs9sUSC;&$nL(tqHT(9it-N0XKo5%IeHIzfF~t$HX{drg zh?`%qe1|3ev5Y;vNDNvbGskvRExiHWyqKmo(Y29_eu%zh%!7*f&5!gHk zn!}nwf8E)EaQ(T2#x(!bjG-Gq2f)`FU?*r=%!MeDZLAArB+O`L3fklSZrh(i?B^QL zhmy;CQf=%^*ofB-56#X9U8A47iuQqeuiLC)1}Lq{zkrO#o+v6_O6kQV;turMG8dC z0&VY{#X#U#_e=b82{H`iLxKeRc|wWu)r3nLzh-DcUAtI^|tgqv1~14fF1tbf{5Ka#@GuDRzUjY#;Q z=P&kO2}p=hh`WDwynIkY&T?A+f5Bx@u;SBwb*#AVp(Q0K!5mk`t&)|c@^%{6-#racn@cB{bKA(SqI0N>AnMV@VA5X;Os2#Z`8Ti)M1 zNJ0;`R)9hv>rH6mb6n#G%bT&smob$ugY?P@K^P7Ms7~{cP?Chpg?W-hd@!4g!E*kI z7ZjldrJ2D9=xul1;Yz$EjeOGG3Lba}%KJ&ye5NB5g(|=|Vh_mbiJ5_h1^pN#v7nQo z(u}n!bb42!BvF0IuC%4BZWF_&lG7I^>6(`V-miJzBOF8=!&&Kq_H=?7P{-kZ^s9U; z>(RWeUoiKK1JAg$(FV;T01Ldo4CMSUL*}6QJAxg_8l6+!{9vZm*6E z*%`n2j%zE;VU|FSNG1VU?|)pw|92NDLA1(A=4**GR~%Km+q!si@|ze5JRf+e@kIFN z&m&lfZ@VY&-u}Mxm;pw|q$~V`z_B7K#Ze^L%l0P_`|CSSeS)__`BGyYuULujh3*J1 z9^cc2nejMX;|ARCuVQjNyoqVFCp~I96ZUTvPa&*Oku zY_UzWFB0`wAd*Kaa=)l7q+r{V2ha|+DjrK+;O_T23i16gv`D?Gx3WBgZ>FH=$KNhc zr}`W&2M_WNd;?OV<^Q#8+twfPO9XK45|8^sLO&34v`PAGMEp+{jl@Sf_`TZ`I;L$C?Gab$}p!)hg!+t-h#s~A32 zTW4beTy6JKj-Ro0cN*yAdHw;8-6;4fB5LKeE$1Nb#sb7oyLGOk1C?JAloOCYUds8; zyQJE{Is&)}RCD6KuTg;k{%weSV4>hMN)v3O>J0N(rwTzy8Jz6$b*vAy71j${clAGN z>&tA!J0EUd>-vgdi%Tg4!-GaQNv7mWsUF(fkCzlw&L(7*0ADkeiXX*X5eYp+T&%k^ zlq;*wkm$fZiBZ=XJ*#Z&G$Ff-s({M_GvpA{R8muzmACk#160wP3Nw!<|zKR zy*$T2SK`)h(GJG@u2kEg@Ar)~{^9JHndQ;ni3OB=iyOZ&CRe+|e;vDf`)=tH(EIMc zy!W!5|A*amezX%x6T;own$p#JM*DVQ#c(?kK#UP8v2K875&`B1I@+BBlQ~RSPJlkp z576TU@a)N(M&|?A4ZsMXkFT%6z+%xP$f=oJ1on`{B(uy2D=WDStxZ5ti)&ePe2!g& z94^UIv)JW0IzHo*aPFf&d8=S$YkxF}uSLl=XwL04oqcN3UWSpKIcmjWe1fl2wtF0d3v6sdqt;s1(_ zS@v$Kp9~bh?9Nmnx==*N``|!Ox4x>n-1oWqZ(F4XYT@V7_Uydlhz=LX9ItyfU-;4j znjs=dKaHOkp|$bcNXZYsikaj;gHUO#EWb}55_LE*Q?0h59zPM~u ze&tVJ9TSZKkdONt0rZ|;Z_1vIF%Yu>C;-)44)4i2>>E$E-H}`D;stFS7EO{rv{i3J zCufLrgYg=+JJ%dh&M?nup`I|CFQgFs1pJtgz!M2rXH^m9w3e*jlF7K%OLIL1Mh9** zblvu2S-lcp(0*FJZF3Jq(TqTRUUb@Hl*b&O` zW$M|MOzFL7IRC@<#b5D%kC9bb-QGBW;gj2=MFMReH6RAS9|Asnek(uU3mmW#JI^z| zseDt0G9{qJB>!6yz_qT3sMxL`$2oegY&mxo}13@(mfGZFI04$ek9H{Ik?G+2q+Ne^Ew-KO}p2;a>Jw0&= zpAa)4>QD{MFS8XAuS9sK?ULWBi_C?1>>|FK-P^BpBBm3kD>yT=&m(jJMe-D)I)eW2 z9}17E8buQJ{Zd#!9z=EP+Tr?d`>@Xa;CPlP)jz9TP16AMR*%Cy5HwLpoVMC|;Z5>@ zCdkoXtN?6?HrE(@?aY(t?bOQl|C>MQDU=p)sgaDMtMLL0A3+e2o>#AqPPvE1)tkg7 znWCKR1N9Tr(8%OsRvnxmD)5b89d)QagRAZ$H-G}VVpJ2il4%@mB%2uQW6`wvzDF4m^)Vh4yZuxo%F;Cgu>o>z83Ze>CSqT{x<@M*n6NhGVPYGS_omup`f+Pm;6IH`7AJ|B8X$z|IP&15XlHEuk4yO+it zPkj25Q58yvW&=XzYKQ~^>8yXd87VSa4PbZWeSICgs2j5Ejn3`o^Q{|Q3|J{hFhPI~ z;(6xiP<``3$gh!F$_ddGQNt2iZYv;yv&edNefk(f$bqePf-L~>HJ~~(6qHr?%4ZHe zLBKbCKfY4MyC;95=3Rg7`&IY|q+Wou&NCJd0e%laVtwGVPzP(eZuFt$0^r(oE?4@! zj^?LFjxxzzY1)>)!D%V9AV8ca(-u#*lF2qQm3aIElg3&jzJH{4ATMZ`TD5E)AzD27 z5nZtnw@A>@n|Jo9O?Yg$%kNTn?pOb1$fJ)v!%V@ONY7XrQpHlxy^as# zEB}1Af`MomVnywIe2gBFj(0iAiil{Me^)pb8#PfR zM>>~LU>wn2%_u4A{P|w@fbos%%t=IqHDurV{Q16f*vVDf_-H9Fyal|tF|A`!OcPmPOx2R zSo#@sRUzM&ev_x$?Ym;Vn=F2M&aF`LPjsMiIhS(JJenpH4AfloNm?7fEErEuK}Rio zFO+(Y9Q|8G)tm4L+cP9g5h!zf*yO1@n=n>gehlbcWD=;tKprnEq!h_zkGCTDiQT{F zAvSWL9tXVNE;K>oYi?T%1gGjDa#{}m6vS3(=UHPMf+Q!)m$FT|lu&R-O$$eOf6(0z z0%+qXIRXG%p16=)urcJ_4Lw8n% z$F4t`Hx6(h2ZYyvfI76Dq=EMid4~a|$0>!MBdD$(Ii1m{e*lcJ#!~zE>nL41_*LB! z$A9&XLH+qY>&e4Yo2%oYl2dTHs+rS@zw*Bjr9#Jd;F8;RJ4{kM(vZ-?%dj~l*Ly$S6w>4nDD-1s~pHGTG`b1M-ZD$A$x>h`6of#o-baE9{ba;Q3fZ z$OICET{kn+t|5yG0gi_)kxG$OI^;aO+V;<9w2Dd3@w8~i;(d1_>D zZ|~$LwZiyEEmlNydT3Y99saBk5Tn6t_L=RDXBQUPJUsSKnr%S+h|SR+qHplj%<7`v zYyVKo1U{ZLABvVxk&#w7fB!d-j!$|X#?yONr}r!RF&FKq0qKvt zzz{>$eeo`i7K39`g~56H1TfG+=Fgt};_|I5#5AqubNBb2&O}o;mun)xfoGiBM8~g; z;bYrLF@$}2L8(e(a%ALuKSi=uK^SX92 z>DWZDX4mlU;kM#xG5vD=LS#h7w@ZF%I<srx z*}_HAD{Vqo)|*ZEdQ1jFr@CpwHa2(vX#oWMum#fbAKxU=*?t@ch$@=8EVYNuSYnzzohM z+%scuufWWMBhL@+@Q>Rb^BXW&D-s$wPYvapJdkbe>`ryI`$x8`3h60OaE=4x<10q4 zA|h^ZG?S}xW}2v7QW|^ZZU$cfuJtk%(B}72!+Hs>iFPASMo>QiH3R-gNmy6K%J*Cl z!EGrV0JEHXS4HWE4$=gaM+Vv%z?FmIl7@?_P$G#DBs^%XVjwg3snXxaO(rS%2ylu$ zCkfSEDB{-7K_i6nV<#d2yn60&&m`5B?*cK&F2t5!p=3vUweCTuu-Hrq#8i;&8w82E_YMr1A77)pa9Wy74}R)7Tyo#J{~XXSo>o-a@$$GePVQ$e)KV7HD&~H1 zs(9QHW*AkI0$|R0XvDRO?UbDKrDhcGif(jR7{arPD-7UuPayJ5Jg|?KV|gdU?ATy) zX%*8|H^-397M7IM;#vGjXN^SA_?xT2Qv$*f5fQr<66@DCH&Gy0w{X!|Y%G|W{&iT# zEtdj6{omYeP6WWbTjoawnE==U^hTrx8CfaY*XV}Z&8W&Vy(OZ}8t#5GD(iZGoxi_a zx`)J(GW6LweKoG!H)hhiA%%q-F;XOY$pev9Vv%t$U|x}zlhT=2fr62qt9iEWhN(Aa zncHRyXq=UdR!^a-N$KGA__yb_vv)CMB<~77Xl{IvlU{Gd_@NAPSGCo0Pv8X4M+ux#Pvy2YkgKv9u9I%TRX zS!n(ZKncLaf#@>ZMkWszgTm7qeZTq2-bYc)GIFwHozR^YB*KLQe+r{ao{|QxGUG|N z`~3RQOf>J0jm2^4JLH&U9RU9UAJZ$Up?CPT(62O}n{ni?2FktNb!W88uIE+r(AOi? zC9h3VeFCD;rRC-WO)>_0zKDME0Ql#5;S+On5eED(Pu6cUjSoJ%@az{&(^YQw0zhNg zr>*;?Q=XaS5~=Tl6av^)ICBoCmXpjRz+gP4%9l*9t%oN=?L9i_HNZW+EU!Oq@F*od z#_ieCK-T)yXMH&Vg*|J%PVJ9TbmRfZpPn+heEmvT#c8=wzZ$n$WR)oj z)a2hh9b^_rC`>^PDjS`Se|oWAJFC{SSZ%oyQ(9VaxYU(>FJ?XeaD$_yrmMhh7+Op3*FInubPB`V`BfIG`{o(67 z9SaLfJGAub*EV{i>FMu?Y6IX?K_E;WnP_kcguDa3o&c2YZ#<8?Jo#U`v>be1W=Fjw zj5FyD6U!A^wx0)j>aN@U9}ozTXok2S`^D(+u7u34coEev;l{59JmQTB*W*Ge&35V` z@8&t3I&Uey*78*-G&VM7mzSgKwLg%8C=bNV7!*!MX+oZL24}8A3a3z|cKb*2;5PAa zs3ujQ75zpePS$5k1dOSd>Yw9+q+QAR5{05{OwwykWA1zz9h$_}){y2d4z*tS9kvGy z`@#HJ(Y89%S4`)qw<8C28ndtwbxvEHNeVu+=bZ4xxp;n>(<}rSxJtI~lk?qKFp8j| zi5dJIpBs}d0aZ|J3xm3Pr7*8fSmAgK;9G#+044~U#yPa%zeTv&0|sh#t(w{l6>;zi zJO+hqb05QbQpe}Tmy2xklPeyw_p!>*KlnnP1_;0FPrf5@wOo!eib6T<{gh%# zQkcJ#rwVzues^s-gigW#?7??i{yEndwFbAu#v5DZ@hw75OH$C@( zne7;`VM;L8;na2kgBtrEWa}xTsQu~!8mav6C0lzP~)HOTx0|kaMP)1 zS!Lw*^+^-P?5zLdhlOM*U%^4zR_*5diqve=_|0ye=3K4DU?I=2FEukcu~dxuD&B#1c$csyxW!Ut23j!df0fkxWFadvuoVad_jTGTIi>qcdy_lsqc z{Q+)WRY`U>%vhPct;gf3(NIsOUFt#mud4$a$=nfU$Q6qf-8WB*`BKm9{QS*ZqNxou zNC)VRI!}!p7~6Nxw@>;pM7GyE!;kth9|b8oH(C4JADoEt`+H2Tqqbcsz4lT9OuUC< zBVroJtH6PfM-KI7^ONzpyPtwA_h;^0?!r-te%Q~ibtuvXKg+R* zv)`%ZZ#4`Dhaiw3xgKNcy?Q=;kD`vT!cacBa0eNQmr#onc(k>1xHMbtLxLbQr|b;5 z1wJM}&v_WiJJdj+t)01(d)E~on34}t7p1$}ZCkAT)MQApGN^s-6P($F?UEYH6s^TE zK?VYC)i}OZjW&B}-(Xo-SomD?v88ii zz&{CU`BD4eA=={n>x1ZrJHSf#+;5(-G@2(iWI|sPfn{h~l0FGsO;8)j$VlYW#UvZY zq0Y5!kkp!3$UDYs)#8(mzp$z*D&&MvSbs^zL`Arz)z3xO6&X6bM99*r9>}|@8yz70 zz{bH75F|S*-C-BK$$Yl%bphK?CXF0B}ZnD4riQu~s3)WZG(TR)>sEM?VSINbN+EA*n zVZ4QsNZih>APNYCvr^LacK)y9lh=N)A)Q=YVAZEItT_&+_%TBI(*kU3ulZ$UblQHh zJL*%mwAeFVp!7F(y%>;?oLa5?CeH|vx~p66h)$w!>PhQq%1+PKLrvWQ_r5n4@){3D zW&1}XKpJ29>7jED{gt*v)Ytwe|16yM2pyz3EE1Gx)%q(1z$WiD%mQ*U7daZh`9kp- z>Vli!CCm|V8F6?+vul8a&-wNZ7TPj`8;4FvP0cR)ySyDe(8DBpD+CrbE6w@&PybKG zll}XLnm9HUpg$>Zv&G&XZo~6##piTtK|1o#9d2lrJ3Xe$=zO5KFa@veWryuJT3)4k zs1wT|2j`u$cv6ezu}!zbz9%k)ne6M%JjfWEgAcysfueAg2d7nEk&D= z@?#I<2Qrk?gUKCqKl85 z1FV7(AoL10?LLAhhCVv~qJILaGM7ff3JJ0Gu~!j~C}HSiVVV9nFq}?l!Jz3pU3BV) zR!K^N54gl6B%o+@Q_m>s=&3Bpp2!I6G_HIp^RXhf2VhqbjEP#NPAlXwgBgxlz)8x6Lv)vi=t ze!t!d{<(8bzg zy*bmz#U<@w4M$!)_XR_ml=QoApg)WuA&F@5^I-!;7K>r2*hTpvBkXeWV+5+5004?fyVnEv=w{);2UB7`Z;XR#G9--{@-P0;(op zUQ|qO)PUXZY%F%AXM7puVg`589-{zx*U}wgw(#ZZ-6Qn=+ ze6KKR!;E4Z=5!+Bq5^@N3~DGvi^b-<);KI`5enE7I>w|2+wcyHI#*(6J{;4S+dq8m z-Sh=MoUa($2UG)rDBJ{WAd0gK(W39qvg7L56Sa6^#;g1!6v{0#Q2qbo!D~_Bo zP@J6@>fyWl1yt>8ln-aC8em9pZUdaNGn5T`Wh6ihZDzq|y$kPmzPsGo0p}$;jKp_3 zLnm@2-KMzYn@td4;eso7Zb?i1&P56`km6|&q3JSnd ziW$WMrvuGwHh-y%PF&OE=K5RcfFfapD;1cM_<|V&sBNB0)>oNu5YuK(z>iz-+csCs z0*@mdVkTe|N5ad$kt?YQ^fIgo`M)L-y%+TX#RaHB=%^EvpVVyFcW_;ohmI*q&0b%B zcL9BYIE8LdIR0`pXy>NqyKU;_ST)wlW@p!fF;@Gmh$uFTBWI0! ze-l(vgA=V5A6u)#XX#MM1qnfS26Zjy6x+?$i#v2Uh+n(lD_&JQ1Ar4aU$A+{ey4Ju zGwK;NnBoOg3u4;{2>=)>tqmU=-iZRe3K5XRt0ep680-`O9dCoFM8ip-A^JOFh?$8E z*QkonL8_>BJZlEVipaCD0oDAMiX1%rJw6^to#USU~MFu;hTd zeqZ|{K2rreK$DA3sTkVjI)3pZ0manMgY`eMu9mCFppU7#C&W?-=&h7zBuuVKV$01p z(wNT<1QST`YT~x((A1toZZefZd7cze50yQPAB{%?@|RrrgXJ_!M4=skslgZ_J}Y?` zEj$>%2sB$QbV#;~V(fJf8J7oi9vBOeP}8R>hZ8Nm!M%~mgKGV(u!CkmsR3F|!awq_7=GJbajXl`WOK1TIPH;-@^qGbjo1d6P>e4HbXjwoz5)y~HQBJX#@km+Cw z!1aPHLI5yyfK49k05flBh+vQf&`28&*YFfjVeZ=LHVO$?1*n?E6E(qF6i1g13C~pC z%2rpOj)!3*e#Uw`I(A_r%5wimVF+ll1C&tH!K9uBKo1qho7i&UvynO#NdZ;pJ`Hxc zXQX~)SB?4e0sqw*%#!aSYMn8-ZcD=CY!5&l=IVD(hTi0Z(Ho{OV@`cISQ8yMm}R@l zjL&mNNfS+OpXVHz8A#>Vx&y|n7Z6)>smZMVYO5>WciX~X!=wt;X?+d3S`Xj*$wLR# z3|p9MD8H6N0sbOV=-J7Vl5w1K^KiQ(f7`PeF!I&f^_Pd;_$BW5@kEzp-t>W|Tz4}F zh%5l4l`aU52d3`l*ejvRT!ZT%O=K8SsZPRHp`Z(y)1~%xHJTh00JXd`P^rTK77{uD zPb&owphdE>7li*tfd0i6RHiU`C_rwODzK|7RehM+K}Te+HN=_j z{0w8BQRn1`ocvj-@r8)$#4T7hI==)0d5KIKz}BDc0GEesuS%qb)o}OX0m|^~tOyPu z4bTA&wm}L&^=>R7t<&a?_KX8l00$%ovoTGE{newWMg#hGlsrm};U`*Ym)N~DAhdsI zX^O3Djh%e~ARylUn@b=av9O^_69l!eqjWlwPRVbmE|p*mjD|{2&b(p<${r61X7F1; zntXaD)qwW7n*@X+Aj_<4FaV%70Q6KMl4q$n8&zpFZx{yXxwW=Z)@Z`aAg&FvdyHbh z5B||P(Mf)P8{WcA{WKv6F#G(OCR3vX68w?P09r5~`uI$15V+b&4*vv%3LZ+PzQ6fK zO^=0@5PWBWkzWv?{a9oAC@MwUZWti(lYa&+dv*ssaI5L0gy1EycL z1$8O3va$b-R=TTMqyQMjM6Qpgp+Wlv&)ta`d2+?8u1uWm&GPo;*#maDPeNf}R=!Gr z{|Ob9$;SN-+75ewSc{iW=UuxAfbQ76Dp5QRhPxJz+g8Yw#A+&>*=R9N204)1y$iXA zHB=s{)jyeRLBR#35T_*R;w~_fKK4DADklRV3eYiM=(rilF4SO=)}t>#=L*%a_{vXq z)B=_bOpgpL;3UE@nT!Ix3J`>f0$nIDQOs2H@-Rz-T1!gI;(o6Cmbkx%JveY;IMk~B z6a7f(IKGXZ-U>-p1Y2)mmhp+{qB@lM!+fGHx^%7uQPPURC`TFDxZyy+E~s z=_}A0EuV5hJ}TpP*KqI-<%y{yzZFvEsdIoyBegNu6CqExK*{v3Z6FYE@rJg^9m`q&$& zLYxs^8%rZXvk6oq3n+L9Q#Q5T!Ui%#%waV8L_3>%68PHGK9ythzmempeFDqGh{KEp zoXSIm&M#0Ma(Xzhc{K{u{yMzXmwpF9P@}`w1(Tj|+e^^H*Z0bro-?^d-eK=Dx*h6z zu8>8}-HhN9mc&9JPAM3QnZh=n%Q^rP?Tg)4_Wpwiflm6bU5MP*>UDGcQ%5yB-1e$?7x3sgq~W^%;A)S1TqQo?jwJuCPu>Kwz+X)vsf2m!f|3>lBmJ>Rk7 zHGPfiw{m?Ncp(IF<9wTUMP-|2 zeF|a!GnHwC0gcD4QuWU$eSLotZt%d{Y33PEhRHK||6~U^M(MowX8+U8K}PX}glh(f zaZd!6GkCrS+@(lNJgJmB`22vd~M1rqESLo~zhz?qVP(GUa4eWZ5uT#-iDoj{@1vm&06oBE&qKfNBD?p&7 zvG+LQVU~%CS;{ug9?QS`-XyLdcl%8k9Vl?1wZthqke30~&2_-Vk#Jj4^AW{kbcpH? zx6nG-TscoI>E~bSQkz!ff6d}MIa{F5 z6+m;F{aGUZ3!bc|i9gGKXmGbY){=(TK_{8ywbRu-mN1=c>K`7PFMrN`1-(kY^Yia~ zR8mxI%FtSIGABwl_3O4}SaQpGz~ySfEp^h4{jiWjbWfI>CHOS*#glJnaue>uAxska zHS)P~NPHPF;>=BPKy<8zvXJ=;dH-0-6jdiIFSl>wr4{(|2Rr2pZg(gNW|v9g8jV z8uceHM?&6xdHuBRKryZQH~@NqQ9wiZBNL4o$28sAb$!Ujee+MCBE1HZ_BX-w3OKCL z^y5Z)Wx33G#WY@&LDzBjyRNSv5hZ|#@V*2rbc)5s8J*!$v|v z{`#ySr@q2!aI^h!GQ+4m;9R7b!O5)d^PqmukF!he z*FTLFoq#9qvDtr*_%m)v8jli?hQT@-O0d%v?YiFEc(S4I<-*d!M_{;~hH@`3c3g3I z+uZf-UOBpNlX{Mn@x!LdRV!)Bg$xcZ!|Ch;bH{3cKmJkMgG)V0tSc5v8p^--Vtmez z7o}(Ms_nd~kbgCPD=yqk`76^q>gVJRA|lakXBS8!9y}BTH||8cK8FUMA>g$0|Fi%P z!km8X-9iLWKfRVtV=dTf=d(u6?^?AJu;W)|S3;>RVB{TmTBa>i$E`Bnw>(2?T-8SK zbP{=ZIIE_=GmQIFeeBWGHYRx1j8)Mprn4T+1uQ}AoIT>zKD z-WBmLa9Lg@BlsuWXk2|on2Rc3d*C2Tt@nIq!~3sZMPgiKXg%xSywS(2X?)S{*R1c@ zkT%K?Ps6MbjN;V7zAcd5k_-)hxX3uyWC)0$HXs2|omPcmZzzfrEF`P~bV1uAXPR`^ zORf78zq_#7vxYZ%zk40~l*{zb2gqKcVPJ@{d;%vCCD$jUyQktNfB)V_h9Avutc(EB zS1kAwoi$>c`upr{wgUujf2ne0_;D;`*?fnZe{yU-mO{1NtAm z4`=Xv1qrls-uftNep+`~k3ai=ZR!8+j=t;Z<-KC*!WWE2TRz*i6bk14P}MTgzTa-v zt{Qv|MHYYR>&rUYI6l@mioHcg7>0ZG`1!?JeK%ZWa^q*D|K7&VrU~Dv%`_^KpZ_8{ zz<7GbH;pHwyK8@|5>QaCU&>swMn-y3ninnIGELG^7{_n5Sv4I-`Wf3_GS*@aa+q&S zueysBy%t@+d;{?+PLC-S#~CoFZRAt+!zLPX0zVck>w)#R)D1*Q%Fnt?7XYeqDw>^* zA9@!IWJcoB+4yj9a4?s6ZZT@?rZ01f()!LPmfv{t#aMpeCZo}{KHzaYEchRTf~3e|KR-C?nmb7cjtI_-fQM%`KY!T zM<#^(F9TQy);Pk^AncGzfP>jvr$WlW?gnAlF=oQJU>G>Z|av~ zTG#h_@+JB$1eLM8!;4FpBRC3^oZx|v!tg&k{l5cSmkp0f)RJHptb!rnqKbY^f;4yuCAB)+Q9S6k3zwfvciwI2 z_p71J$IrDp7e!y5Qs$l@xRD(rKoajt5kbD)ILLe;_|E%$z1!lL!W|v2KUDftkcP&! z$@MVKnQ+S3l!d27hwn^S8}AC*FYP{#)D@bx_GojGWYh1z-Jq8T^Jzz*^CoOs`#okj zj*p^A2X_R_EM-%ir`%8hCzM|@Ftxwd2{V$|9;bF5mD~Y%f%%&K^f|y24%_21)q9iQDO=x zNzNYggl_XB8YPi;GZzi7Fv6j$W&K0)m%mG36s@#AA=-H;J8;+PXzL_bP`M@AK6z~$ zy;jrY^5De_b|~}IIMw$Fx+0hwnG2sCn&O|j>@5<{QlyS=Dkf|x;!7bFxISqYt&Bf94 zV4x8~&56J%`>eZn`@VqU{n6MYMpku^%+0LoDlgqA+TWM@_3$=mX>LIStf z|3%8%d&VN>An8vWw#!Kx*8?0(N!!ntvZ-CNxrZvb>~AS^A}4lLjeqT)HY@2e7H*Gh)&B^KYBDnazA5(>+6E=)gk*QJ`+P&R3_(EyxW6*C>Xs88M zrC2?y?iGj_qPcRXw@ctHwRJ4)6|k%kPg>Cb9ywWpbi1M2)W{+e z=(O5=EXtwSF8){q^Zm%P*X1t_UlxEHXdh&!DbcBH>irpU!Hpg3FYdw*y2_Hc-l#+J38%Z_gqr$dQbj24u}BHzim$eQJ*})?K>w%{_RXP+ z1cCYsGVq>Z?jo^I(Ub-Txv<rv9b^rouByj%)VpX+$>NbI zvJ!h>m!unHP>D-Ws^u{9>0!TdZ9W(*?XJKI^YX0wow#PXbL(;+0-)zdDySXKL?c{aQGYmjxQ8op{e~8`tbCK^XxN73&zm z)u%V5I2FN#KPNRrPS4uB96}Sy0N@3_tUb)MEs1%KRdY<5h4Q;NsOdNL7=j~cN6i!D z@CtMnH^(cN(IMluVJa$neV|QAAO%P7f-i`Z57sI&XBz=(E<;0tN}<#&`$t^JmjT`z zDLkZ!72sNHF;$aJI?~x5(l3tP$lBjANze}Cl$wi5W`k&zu3U=`BGZ%IZ?l#+=~jOe zvwB)!y|{YTpeA}MNlBgeLaPNya@YU%J-KzV*)*fSo^0{IQfF}f7hWz$TTK|CkuFHV z7wzxuY@>%+1)H8azP#60f^@DRii}W&V?+ z&J>09bl&4eln&g!w-d>j?6k3TTlqh8yCqCc$#1$xRxfqUt$lIR{Ty5|Vd^GPA{8># zM=6$2L$E1##X|>Wb-86i=)g}BN+>##4PHaTcJ>P-a0~2sJ?P$YfgmUT1a48+ zK+(7DHO2j+(luADc-LI82@_H|vlPFC6lOtOd|0?xVO;t5?Q){5Z`~fe>Zd+P>f0OY zB*k;~DYm1GJ+GBg?Ipn4o?ud2{}VWhC>%ZRtSdyY$lZjCI^S}sl*c_Y@RqDG^Q^yl z!o9^ux_BEi^nm-)v|%i8POgw(w{@l)Z?-<`>@uwq%oib95d#r$D2h27z(;mgeK zZbW4swc2%N$;IgAm^A-3Afpi-Asx0c#KzMvPOpSHS~luXh?bLaj$@5#~#$CfFgFlaNp)rqQwAwO!@%*RG*|a!im1K(q!Jo zZ#{{I5+1E4r5cn|wBd+7fi?#t-+CI|*ckis9CgO`Rf1$-f&ACMRz>*9yZPlfW5V!* z9I9@M2YKXq)6o;JGT|TD&w-_4|N52kiMU0CGmAv@8IeHR(D;UL82S#!4I2|Ul1vvK zQZsca%dC|nTkenPa41L`(KTY8K;x>g*RWq(+uy0YBy!~4cq2xbNIB_Jw-HzoHu=SjMJqn`HEoT7|*Xyb>TcGRQ$7PKA_m^w_Ue zHfBxaiJQG5!g~YU<@FiFOPqBhD4?>KI8Zl+ugnozxf5;j(@!Hq>z&y?=@4g4&8_aG z3D7QREQD)k1`~w)6AQx7 zs?NsmlyL0ZF8|*_PnDni_Kweraj6zp240|3Kf@Uqh+d_hgYjbVNPX!I5Ghx=<%u4N?2yD3+QO{!CfCURpZ%Nf&rx6ev8hlkI4N zEoe%>aNWeg?`xmq?wi}TknP;?s*+dRchn^n#(*5uTJ z8B^%ZtvAa1H(Qf|B?qo%wANW48fjhd3>GJ+E3z?{07PK9#WktD?^v$+swK27j`SA{ zU97Kb>!XIXsnSHJ1ANojJ*bD?Be|kJO|{;BV%yDyugM1t{(g|c3o%8`op0gIvt0qt z)pG{I8Gm#UKi{XNJdACBLZ6<*KIP5!m>ne|WdI?e_l3^arGo1FX}lUlw>%7AW^XY5^TC;#1yQHhP@rlE!^D?z#J)Z5F7~leD zj$2bpu9*P%7F^xhtMl(Fc#`m|JaRZTAH}Pc=u4chG-;{7py%lZGIgt(@%*#5|9ZPl58_G)7KYI;)6E>3*FTN5Vo)_WMkk zzm-S?tjnDK_^zr8LSP9N)|1?cJYx$D;W+h7b4Ffj>aK^?gHTN$xQn}4LsT@3%~xj> z6O(qq)0RjL{z0mdiRK6%5}~d{x@lwM>-u5%JDhHOpRzaKG_Jo9=x(FPe@@*2N}2=R zTbj*@P_h4qum6C@`VHTQ@tcw;$(|Xv?46OyirdHvAu~zI9vRsxdneg4?(D7XWUt63 zn-D@~+3PuPKHvZE`+uI-^LXVmyu|x`jq^Ot<2cUi8e-?TFAJC%p3-vU17#mSrPe=x z_XL#S0c?|Ob+W0yr^rH(fkvuKmUb8yjz_ zR5+yjTutTT7m*jgPGp_mV?aA66eH14jGVU`y?U0+8v(tlAFv=utVf#2l~DG<-;i>U z-!YYkSFMg;l`IY>ppMN{_rYQh7QRzskh^}TTTo+6t$fNpd9C|&3{h|Tm;8WdlZ|Qk z*+|PGb)#nR2?TiUQXzWQrTce3E8*D75r$EY6a)tZi2izUOWfh$nuy!BA$U)dO4}t-P*i6n#78Lsi{OLQ!OI7wy5FGg%jjdA zD9CpB8O}QLCeW0Jcp*#@bw>nV3;^vL0f1V9hqtadPPrT4_fQ7$o?A~;MASWv`k2v& zHQVb9c`lr0;PjXvbR}d{Z$SeNOiH?xbe4qnv$RA>@BFx#7wvgvK>Zt}fc3CQ%=;*m zVgrvO`y>{$*5E&TgI@9ygjcRSF&NQZ+@9-*DgHQrKtzc-5U5!x0E92*VMxRGb zo?42zbvj>vK@>lGJD|-X=k6vj+weRRZ7>Y&A_U@)|&zx>&(^b3Sai~00Rg;)B zDHd;=U?Uu^2^hha9uxGH1TI#g^vU;1(%0z#{os2@KDCQK`SRvsy^;^$fqG@`e!6pz z3!g>}knee1fPYTIC}XJ1(_?aFu;Iuolwby|K__d6FrBFOavA+br#t7)IwkDKosBML z4sw^8A$cK+p79X|IW;%PJ8zJI2nN6J9Kti4XslhBD+*f?m`p%W5-W8f7u!QDKf_20 zCg9-5aSY-=E$j?gZ-e(?BW@s$&dx6IeU!{S;W@Oy^CO=Z3t`Fjg+94{K-&Et0ccJ!G_^KK-0LwY`| z>#+U8P*+(#ss?)}=&Gh{`#fth?xJ6lErTkLrrXGHgmKXhz25;`5DETl$oSN|xy8OR zD2$M3(~la=h*zY;WGi?A_@xoFWooYm_yz7*qiIae#cg^+Fx(&!P(=H_Zs zrg$X_?fusQ0U7cdab{NG(vXUd*U!$(v=2`Ih#Q#GyK+Ua>iw+)0vomLxsVN7Os%*| zL9yA?{U3*oC@TDO6+jB4f+nEn5wH4jZ)3nE zaU&W%`=>T-Y_0n56)y5iH)33C7T?(^UQkJPkK6}YgR&z z3vvQ|;YxzuzLMkc_^fAH5WWTGr|U`4MMrc-Hah<--(8*xOFgupSvP-<>tuPtKHr^8 zh!8n0r&!f7sO@&PoDKW2-9Kf z1%AvDCN-IP5@q_eXJyf<3K6VzU?YIu~t+Wk)@up9svxRKzMwhfk=3zKtVtWUN_ zJa(H5lBHC(aX0!ptT4hy->1lYUi1n*>w3hUOT8|uT!~j$iAf6ojwRh-B!V}kq@#P& zF(<3uhZHeTK}f>C^}o83#Z2Pqr!w5lMoGQFX8)F*>KU*0zoiZ-F^Z#yuibJ55s2kG z6dT3kdm=Vkz7F)$O~#F!(en*OXk5Qe#U3xm8V7f5$qr6$=b15^8A7k%MplU@{zi(j zb*-&^`bz&#WjOItVWlv|TFrDzCw&zbdpS$$j?kTVf=8;7cnFb`5FBd}+t!=WMrNKD z_PfTIL`T=va>#(-Pf-s)_DYL6{z&3NGZnr{VTl!UUvB4;Fh=EVs+)V!IOibiO3BZt9MdlB0~5#3^2;c$IXH4- zaz}q{NX5+A`QnR8TKH2w=YH|TeUAAuq1z~p{mf5GEaCZCzCaHiqt(5N{(r?r@O{6g>J$ZOg@z`CwnB$E4JEKUQ(vQEir`V{i6GaGwhAxc?7Qu0cl@^UF{ce|2zhG<}OFPjb!pYnygb7;|o4y3J_v?E#{F58obr*YFVk|MN>s(Exe? zj=woBNbOAr_M&q=3zG;Fr6&E%)cT&duSJ-@&)egIf~Ym#P23wQk|FFHjn3;9QO@hj zG!gMREx6)uL+^+u22+FyokPSY9f+-R?Xrn$RbYP$exSP1(`b5daF3nVzGzWr{N)ry z@xLEC(-|~2^)WT;WTHFO`~#T0(U5mq(?Ec^Yp)Ny3yb2qp)ZQ27-W}UpO`U}l3GZB zX!8Gf0klOdle8oOS>hP9o*^?fw(h`hm|Bp5Se8Ep83n(%kU_z*%!MZsr zkfZr>7^XNgk%E|NQ@wZFv&@23d}rj~vHSK_aSOI)Lb}|Bx;iqJ)~+S_vlrTB49$RyE)CfH*}stb$Pk9 z2c9foUKE~|X^?fgq^i|`m;9W-49%ZeF^u~KpVz+=r<2l%%D?m4gIaGeAF5G~@?lB- zr^nfj=-DK!KW>ik7T<0rY2BC!oV^<(F|mF<$=dnFs4>__+xT^UnOcQ6c7>|^TEcyN zqoW8EtsdcAFND-QEB#Ijn1sq=yglt%}B0*iQCAUHx0S ziT`WCi*$&Y1q?$L!=(qUd|wBzZ5BfeErLYvFxJoV33tRV9i%+Im;ZppZ|AVYY~$;{ zqQKM^z=AI5eNg2ftx0@_kn0r$1dSj)16_Aok&DG35CB$9p(qCI4xHX{bYX6Pw(EScjnRB0@B(f&7YX~ z%=UvkwRltQ^oAeOTjuRvi9cSD>~XaIveKwsIm08mw|vr+VIvk$Uw_wqbhcW{u=xr+oMVtr2@cbTDKWgpQxxOZ@Mv8s-Uq> z0Xiq7(ens^Nk~!R_?64eF-DeQz~D%cMzJd9zegcDo<{gTtA+P_Humli`*y9lLe%JO z%;M?&4s&fgJg!C`t20$ZjTrWWqp#=FVjfvb-|rqC9K3ydG$Os0fWGqcRoxW=ZTs%$ zy(2EdzxNN?uiT9wrnl8WD==z?Q+2>bN4x@;L+!x_t())7F&EYZJi$ACHVezibf44; z2~2S>S)#LaLMad$eOh$>B&};*X;gszg^Mdj6HuG*E`rFg!R7<_xoU!lEfH?3hS2m#vYL8?Rq{b{FLH|x)%SLsCBybi$q9#g! zKDGcdvvcb=JOk2xV0S)o7`p0b@xIN^Yj348&iwZoiVE0cn52<*fB|7b7zfKg`nGz) zavQjvws1zkuiJR+$J%VLArAqDb-|0%_j~_~l&yzw<(fIr&|IC}lI@{GM#DH+136}) zW>w+gEC4ei=%kdUPl@#Z)P!p>9RxA`RNF^zH3zK0&p|c`=@8EaEz8d#U>&pNDg>?n zmKD(X)uunY$6S%o_c};@ClBv$6TY3&ZdY!WVAEp|DtgDCF8;cx@PE z_g4{1T7g=9w35-)tS$0xG8hy4gi~)4#q*E_+qC_(fj)U3eJG54WMjaN@a+2vu!5LX zC)Rb!PyDVS$kNG>8rrx7B&R%e@2tl;e=^PgkiR=+{ov+Nr43!z7V^r?*U*M3q6heMSHv ziWKWVk}Aowj+fH-Q`^Uki7*>Ze*93$*7J4tI502zHy`~(l^Aibbwh|)*ULC19WYL$ zHf_&y-=BY-I{fVL?=iSmfJMXc7=Oz1E|3qCF+%V2#LP;UPratGt{_6JH+ii#SQLI} zB&;>`HXK}&*7{+!du*&{H>cp$V^!gHkgCC%*7JwTm=&r$5FBiWGbp0tR%anjm^vBJ zut09t1ogsnP&$I{LT8brl@mHD%lXD9F;(f(YmNtGeMd|-{iZ@uVJWRRr$a7CmpS;r zqow>5qk|0qHUhqaCN^xITE5`M`Dh*=viA$SIK~kDU?xzoub|Ry!9?)t*{3}=*#a2P zm|)G1GK${OZ6xBct?kw2_8$rz7jSVmcDv2j;gh9W42*{$y#wznzd}f&8T3{onuzST z)L;R|0e}oPI!yo-n1|Jbpl;4_ElYc0?_9{YKkUDre`+r-9~ECJJ4?NnmpL79(2pRf&oXd5HOB)zlhNj-%;u<3%gpCZfUBq(8 zM&i`|lS=jcw>+Ohs~JpIe`6pcgj~1q!f2zF%~}N_?URc*T$Gh+j?f!n%eizs{WcW9bB8j5>sgvfq&Oj(S&j zXh1;%k<29E`shl2876=IAm1?tt3%0Bs#^C$jVLOMD^;C0YB|K+a#w~wDF((X$SHMW zWF_Uq{-vb8=Dgxj(LgKUWiYq`%BaLF2_X&*P{#?V(;#A@*DJ=|Z~_aZN zW*Jv`Mp)l!T2a?=L_nQ@*qZ*crESbJ90nZFkdoFd4h7@CggoduEsN6xjNn_^3qnL` z(}jFPNJ_XEv`X*Xin)~IzqlPhZ?w{o75=<&gO$@t}#18;aTMFL=( zaZjI|`vXG$1f%KvZpPe2)_EHMRUgi}DrAJ6FKlZx!~eq%H(c$D3 z355UKN)|F;-1CqQp%?pL+)+uM3`>SKMC-(BrDw84dbA!=`+8ELh&bw%d38G<{Pdn2 ze_5*`X9q?^3VML7<+(gO+;HNjy6e|=7~ zGFV(pWT=q%U%+^-Rxg3X7sKRN(;tgvZ7G`mW&6RxRu@@5e_uma+dKxDlmpP2kxl6Y zuOJi8X65ejg;;@lPQUjPqX#_>B&6yVZaTQ%+~MNCbL!Uau`rfOK+-vc0s!`lI)2 zQpP+sVhS6cM&Dbb>D9HcDZK{Jm7SKX-7c_QQ1T~2C99hWv!F=qz!U*CFjzkkn7fL4 z6PtTDh-8?+gRWV?#nx@)NQLGzeg<(RJ_`00VEZ0bs~BBo0J(P)giC`2MoWxhxkwI% zNi@YSHy;hFMqM}fwexjX`~h&xeHC(AQ(zNbx=A;M&vV8BWr!N4sy%04}3nG(E2N+UV>cpF?RZO6`@e z9*hn0RiDMGo=$qcM_vCX3yCx*`I+6TgU_kU#?p;JGvK#wj?q727#*EWo9&DN4@C3q zBA*V=o>go4rsDO0yLz)fhy@15C)zP;zN$E0!(DW|H_= z7#VliI$2#W3Rp$?9KG-VnqTIdIDcOlJ=NB5Ei11#kfO=nZJ4?bkN&-gj{mNN>+nRg zT;@W)>qTxio>W+k`9oo~++W@smywc$=Vm*cw|uR6%FR22baOs(=3A)S-8&SS_qn1MqUWCIn<5zxcmL^l0z>*;) z&8KGcxTQ;VA%Z(dDX$ln6jWUqCtnE$3bDAsk z#%ABrWbg;OT#;c5#J==U@8cO3+Mmc&rjY`B8{i>z-*|22E_&`Q4}>V$sX-Pqm~ZTK zCm>itkr;I2vTm=ltX9M=kCey-YBV3*R|qvy*hNo(e<)!5G2SFlb|N*yg8F*rF4}K- zn_3Wh@R$A&_?vl66pwrzt3kjzQbw4AlwT7b-9dN@yp6c<`=CVif0o34h-$t*wf?Xp zNfGf1EOqKV9Mm{&$@_G*V1>$79-B94FT$PvZ-UvXZdqCeN<}V!zeIx>OQNZ=njEsC~<3mf6-;!T+!Xe5PnetDnd#4kP_DS8Bk>HWsO+Zn*ffUcZe6u@b^ z&U{sRYBi1=_N#{{!Sjl&DlXfXvKb4PSBrFIXL7%LWa+rDt(Ein^<_riC*$qKJ zcR7ako=B{{&Y1@ga~NhwRKv+}-|rie@ZQz`xstP`?D1XEnlF29&NbOo%`)`#rH7yW z9N~Xl25SH0jUoATz2?K{fJ+DKXV49mgiC>!J9;Q(wW6_{YZ&oaJ&zKg)`K4@2eAgC z0^ez%@cYJfN$(&QELkkMv`|r@!F-=uVVKjx82XJ1qj+LT^PYNmX48e$nTC4J{iG-R zvc`zx6Z1?fueWgJ9I5^zt`|7f635aNID1tzIVm_3Kjtpi)2qMo0 z+_Y=Yu9GLxZAG(sTlab0T&=)r>srV``dHO7`Z_|o{QAAW3nu0PD8{*xs#JYC8ZY~g zamI_k??y9{Jg>8-SIgqZR7w2lXE*}hqD`MMtfSSKMA^6^gVi;-*d&PK=T7pIaW%kn2K&D2(zVjH}ZZH z>QO#>8cYSF$f021xsnEoLJI6A*qE@J0Jh3=ThJYDgd&Dt1fsp2y|7}}-P&(z$f6>j zv(;R#BRM2ZW{)(}jLoE9KM=*o-MGcNJv1^Puw?&s0n#=G2%3xUFW?orE$7GW)*XC) zEV^zbxi}p4E6u=Cf-re*~OPU0wDHy3U|MCiqsAMkHw#68m__DiR zZ(z4P;$zue417QdEdSk!p1$u-Sa0pc+9w^cJ|mkWos z2!y&Ohvj4y-pT&ZqFOk&{xN@^ZYi~dvn6MNe&rjvGBLk$-uU%H`uK(oLD(-Q?@e3J zOXm0xT|qa8WRcIhVX6DzLxKRI`Ih;uUVWoaZEs_Q0;*c%!+qGQAap#|?&ZZXazDp{ zUP@5NmpT*`I5RJJf8&so$)d(%Rb7XB+~PHxsnlv-3HN15gg zL;(DH$oz`06S`RLtICZ_e6spzQ_)raMpwGjqdoN+W>q2MlQ7+P^wmewr`>l$imDtD z?`S8>{?vdmzk>0>rI&^$AuIMQoxk*DBOByi*#0Sg@W3@sA-{ohtMG0FZFt4O45kZ2 zHCq$$>iFe_h!RtNzX4198%f$?!i9=TQil!Bi?NjK!$KF0Qql4sz z%ArR%2+^&7ymo8PSK;bV2#5_DIfgzK66gYDoVL>kESOf-aO+O+3nbSB5V{|;&`#RD8uAS(642)+0y zD0r&z5ZWcCL^Crp>w~y-4K~{n{&G2G7?Li%)nxqLG)2QL@SRlb%_s2NOjg^9VxSH( z{$myQAvt01)4&|Q$ov|lRAQWkx|&APpNIS=zixn^D9Zx6ggL;*!SM4~004xhCBo(> z3jqGAmh7l|6k|OZ1j>sWr`t`C|E0SKJq>3K_r#A!$zg8}?`$MI-;uG^K+ypqTssET z;%wP$JKF$yIhNg2hEO=WZ@;vw z1HNJ?lmIq|re@bil$n(||20wg2dPs)m_EejKnq1^d)+pA|1Af4MU>MUZQP;q$J*h= zzRBb*5(zuBca#pk+w;q5^EF$dbJ8xpr zJEzp#VeUoc>*Ht*<~sh@2vE_Zw;N}JfPe5wC+HVANPg!jKC+J-J%$S@fb;#Z(50|b zxcBdQ5!#fD?pTHSg1;yQ<37*U%+UQo#pT2Qn__BPIWDiDdp@(HKbeEwj0i-!AjP$R zIJ9!$9P>GkbKt%A^Z)3D(2pw7<$oq(k_Fov)%frjFl_2D;1)_;Z%R-Dk$%(pgAwAGFHar}t}irWR8^^3OMaehpLwtJ%-a;@ z&#TtkVt8lj>8B_^6{BaPfZtO7vY8Y#4!qC#cGUR_9RuWCiCmwH&AB97MWCgD(#P8s3FRSs9`fO$R_)J{ z4O-}O#zKIG52_hd#|&!e7ps(ml)>lJHGBB(O0N?Bp5Y*PwyZn$t{(5TD^k*J`#dhV zV(6xJ}uM-R?S8tg<+LWN$N#?u)#g$CLz|te1;J~+owxTkGmC{O`s*v#H$Mae)d3n${EPHRRR~%Up2EL50!dN8-2#+P1+Rof&vTDiAvR+#?KWMYo6N)uE4~t&t!{Oj;ynQcQr%IZ{*Fw zfBnq=TUS4wA0$2*n2&%dR_A1at|LqzCJ>BZal!RUak) zdV`JwUc2u2Xz{Zq01USS zg;Ur_fqcoWQPnxT{Qz>g&&>ZwtY`Lygj`1@fog-Eto5=t95q}=f5uukNVz8}UUV9cgwLIkF2E z3J?hBQ2l?EZy`lMJ=ehUP~xJoeKl!G!1*Z#4EC^EA>q7je##({z6;{$l^6AbWPxdg z#PB3w^}DJJ-vu}w1Kgnz1+of6P6|OvN4JbiP}}v{UFFiAbxREJz^eO?1YMgd7Vf9>Jqv=c{E&X&KR(lF zqLs+Lem`KJSIfVH=#(YuGHwOf3nNYejR1Uw|Awh(h}`V+yxYN9*FT?WRzodCwzAvT z6oEs7#j!u}aOn4*R`!~E(2!1ou;7x=aq~-_#~nP(Kyo7(G--)7jZdwoW3#doHlNzk z`MTPkojwTZqiW}Cw>fh+fs{N~7DuFsX%N6T0SVaxhYX-kbn)B=tf5}6za{$ZY|(f@ zUjO3-7??pFWyjId$*_m=IDVhQSc>$1?p!2NHURSoh#MH&GNv_0C|3xT8$otksuv#$ z6s&v`O6U=Sm*vsCLUf{}<_!45e!5^$miO-5aEaE%e7_@;U0~oJaNcv}lP7YmJ(tQU<+X zFU6UiPE`LCT@{z*Rk1=i40Oa^}5sNiP=(~ zXiuBvpL3QXVA3NhO^OR2XL#*v=)Qmr947`f7vFkRH3c^50)8wE!YSjvXFetHsrkNg zx->p2fLsB{1T)(Dqy(EfIUhU3_Yq-*2qZT5j1a^H@aPA4)huPBn4}c*SL`s09N11M z?!e#b57?-{li{}oixk}9v2xd2^K4#SEgJ>J&`&}tiYyj?N90&QirMY7f?i%V7U;o3 z@O@+?hCm4u4~FJ6hvYL`DAo`!Tld`|ehguLm$O^R$8-l(jOiAJ&S&-C9NN1Iw!`|f zS}CcO%H~+DDHA@v=>(@Y`>1h2yVB>c_ek5pAe=9zf*%b%EtXlE=dWUyHc)+oE+v*+ zee=&xZ@ASMKei0RkOsQB*#6|e>!}uZg;vR)99VE%+ijd8pJqJ~BZMWQHbl7_p3Rh7 zy<4`6u{Hwx(r3RxpmIjx`C~{=vqp?i-?54+DQmrll^o<~4ZSuP_}sxW7u%gIb-%g{ zcrn=j=|h{W2a$+j*?c{4?GucN4F&@}`e*z%?cWMA?o^Z>_s0I&1x1~*Gg7Od#xIIb zm1&`G1Vd>i%T=KrQr?pn3_7;6-1y0%@xxBv0-F)V9lrDbG{B35`jquMk;+< z0WG6a^ftJp+amu9%&|HS5s*OOp`U%&oh`~3kdtQDc>8q#pN}&SQ%sbVqEx3WD`HoZ zg$OGd4spQ<6Jw<=DQ)Zd*9El>IHwc@W-8)-Z#OS@|HZhYLG{q8(KJToBGt0xpyBu~ zN%16XarKlGXxM1c>s1o`&d?%0k#hN`)s6!sSe1Mv;O96~p0*c*_xaJ~EU3RdI$zhM(MBV8eTVh*t$<*bF~=`W9mwKN>1R z(_4wZCMMFSPz7*`Bc}bWP;|H5tYa%y7#Qq>YOx@s(p@!Zyb^_Lc4{45lkGW zDToweIY5DS`;(1JpUcRUvvTW-1}XMlVEhlFtxauO&rq)xXa_*;y+(uXyoS??5r%;1 zr^JL_3pm5Vp#3EIt6R8DPR$AKF*^0VV5wAsuxXQGltL{zhrN1FZ+=QYKiT(}p19b? za4T;*Uv)j3^=-Cu6F-b@mJfDR5A?3_;3QVD$E@P2C^$Y)Ji2h{hcdNbIk_Uw`$wmz z5V7v0b(VUF(T9tVvaqtUCMPE9ix*Z;(Lv|$^%B#q{+dP(93I&j|HK*Lc8Ml#?$t}$ z={Sf<*Zsz{{ho(DGCxEke3dyZ`>f2TjAYF0=(TLrj25dbDMQSp3u=&WghJg~K9(`| z+c3vEUp=szP1>-lwN?;i=PNbcoia&l3z&WKiFPL-VCG&m&%E8+cdxBi?;!%rn#DG% zc{vo@3tJC_>NCiM1y{S?Hf(j|d`icUjtWl9vQnLCUZM-%F?ju^dvk|>bB>eZ7G-os z>?Kn7hBJ1Ku_$Xa`nP8bwW{YL$Ii0g9Pl!16SC$)jpwQ!kq9((wU z0tL%QX8!egb@Ne9-?>)3>tp?%X^yBfJlT5rI<7g4XmCmP=FWBMm(L$}lIRpfYtXp% zRIr|l3jV-H*{`ZQzF_aB7<{l^NGM=;a=$foF?$xiu7CHvh=ve5jPU4J0RN(B}&J!XqZ!FFWCp9KP5E3-i;f z38RNti()EP-s<>YEOm_<<5@o;D(DC;%Chts<1jyOS!VAVNt*3f)v>pMqkc+=^@~+D z@O|+1rhhNMt4X_xv3l9ee^suk{F*h@(>O#+DuDA$H*52Kh0_YxpYdT@Tl7hMj!sd+ z2RcoyYVYehOG=c4Iz4I&=JnUH`GteeAs&~Dq}eJl^gBGT-AMdf&lWJXyd%zY)_a`7l&H?XCq(k1;GjB46~ z$A%m1l4|V}M;_*ipk?~~R&;b4KhK)uL3b6Ezb&m?zl3eV2mT4+u)fAdl^ z(Kg2`_TP`#Qa|E)CL}_gnZ+fTM>FZ&K^q&Iy?sMud+*K_>MaAdSOFL9Apu>E{{M-p zsgI3<@8{2^bkb0&k@Q!&e5IRuK|19p+7lfXUFJ|bof6l4Q=^N17hk#cp?riz?1;tSE3kN)mjp589`NOiBFu?SBq<=0}ic_w7PL+(H z1`PVZ!!I0jW??*M1dFWM7*+iRvcWw2yQSZ9Jt0>+Daz`90Na0i(|%k3WluHZbARjPWZ!WYdc| z$)Vn4kmQ5$8G9%s^YthqD~V+Bwv0vAn;R={b9{YtZ-$D-XD=~oGHw=BXsBl3Ai`~_ zLoIVe(+65qYIxfkJ!HQ+4!6qMK{s5}sm23DN|5;HFd3I-)Z#gXUI}qcU;Jd)X@`E^ zC>j>VcQ%x;MJ^pQ+S;ij0&r__fP~G2-^MIl7bN%OdyuD%>~Cuj6n8 znmWA0zPxu%{go;m-PJ;(KtJgc#vJ`Ws1>b;yEQK&&aK?Z>pRG6m$~wLKIwgu-dAtE zNo?OE-@X}fvmk zX|+oSpcRH}vj)DM+(uIGvfseuHnq!b9Uhr4sPL_3e;do9`i>9L_qi#H)Mm9yCLRk5tMoA!6g`Dab3yrSZF56PrMUy-J#`B{F*oaX`2{5hh`(F5JM zud;j;nuLAur_UpKSj)z;)}$&cOe*0`ccf;%wizyhlxN+8Y3_(y zWkXqcV*Jr!qtynS6;S4|*tnLF8K!7Nxsh7wuCv$l+4|AiTb80g*-J#WBvS#xv1ICG zti9&1ND{0!Rg>JYttwinT+->0XlI(PjY%>GC$!y;NVbqt3b{Z;$rr{GS;Z_((WxK> z{!M(I%~t_06`6WpsF#S69vJg&Itq$8(uRH!NaMY=JC-a{kx^1XSW1xS6T*_&A9z4t zjeG5y95#=@iN&S$t~$D==zEh-=|b5KvC@BARZ z5?D=CKi?8*KE+l}A@I+tQmte=w8*KyBho0Qa5+l#ZEB_SQQlvLw+ae0b;nt8J~T4E z={p`Zf9C~}F`PV|+iFpK$NP|VyeP8zh2kfomC!3C=?5BNfy_j-CJ2^FuD-`o~*3U{GGM8 z`#T+<2joBAvv#5icZAi=BmF<4WzIjpy-f}ILCz|`Chh8=4#G#Y?3=XraaiRhhwk({ z+($_MFKASbgWv$4adoj47*Zi_(FQ7DM`&0E`ejsE=eZy`(aQt#gmsTYtZN==BvUOt zqw-@!dMN}S6q)&UE{HtT9rvfmbrX#iqU=AUBV0ZMYJ#iZUe%|)681uTP!qr4dZ#Bk z>FvUKB=t21`}=6Smlps`QJFXJoiB3v&EnK}=P@Q>kr7?d6tCV9!Nz;(lPUIn!2lI@y}B8UW`Th$;Vrs*=&>He@!{r`e)XKrN6JXlAPGh zG;p#Q*AY)1jA+nzKPApDuFf!L(e-q>I2vcJR3$%aZ|J&q$HrRp_s+k&{$HvT(+SRV8cs--M*wtLBR)S}Arr6|pvMv#WMl`hS{U=lr5w+rIWA zAn8s3n+wzua*BGt!@cHOJ%|djCUR(B)aj^{48l@-@>k)G3Hi>w);UOsHIt%^JA3V~ z+nc-Ic1v$ER5^Pd=zYc+>RytCN1dwchr_%QA>A$R-57Q%ElL0QB7)??18P`NM;9J6 zyU}=$ki8|sqC?PicWge+9Uh7A-PgVZ4(YPg| zFSRnr!B&RovBKR2hKxmgrmC?2rdM=){Po5H$E%I$Yr#iqhxS)v^w;o5ZyAMWRmD_< zb^Yk(JJ!eH>YO%M=ye+6pB%l_yH>_ZQ)YxI*^oh>RU$2j%gX8pJ*5vX@TL7=ojJEA z88bLf_)}8PRiW%P61{6-rYN^qa*VQ(9Y!5oa8Zna-HX}EHZte4naA-HmTC4JCqCJ% zJx2rRhw#+O&F7H9eQmV*b!ahX{NHiIflE>Udp`i(T)FMrW0Utpp9>B;e#|bQfoxmk8jQbSrf-C^K|7`~L|}FN zN-lvHq5V~cSu7@qe(&!KD{o@RLe76IE-OZi$MPGm*Bx1?-SD`8c)U-RX{k?Am746*8 zxhM$=lDG>Gbn5~Q_R52p%+8UmmPZ7dK3x<{zngILZI6{{&hqxHc_A2->L=)bdh6Cx zS5>~;gwpAp)nebxsnYnn1J;gSyZU1(O)l~E74H28C9x!AmnUHt}L9YG3pzAA5@414Qr$anVY21^uCBI%zin@CF(@ zFVJOo7wxCWc-opV%!Zhw=+gvVyAPWSbGF#}g0AeO`1<^_3beQXVNKe2P|%mrHx!6@ zH4_e5)$ML5R3E7bAypXUkLFF@HSNSOt_C{h%@@Fgd~Bl_MyO0xH6jX9P~ zEU}THz`a&8yCk{dGMYjrY908kFL;8ZS4nFzKjH}EVW zgb%8nRQuw_H}4eZR#5?lEikwPKY$j4u;dPIH%?N>?3%L;i6vTvQ~bv-DiD8Q+3B!X z8+rU!;Y)6QfX?f7nVP9K;@8oSqZq1L8Bp0C;4KccN%emag;F^Vb(|QFR8J?_AlNQX zI#$(PfKf0N@}_GQg|GLRUr7JkKcT}d>{pVpleYk4DVBN;#N{^2hoP247@Z=AzT zXsIhiKk>Z)v8Crw^%lH;XgEQ#UL0+HsL6pROZ#``nsx;JLs{234wnu_H9Y0R_Yal+ zxzH&#{e4?8xfvALXD^qs@Nk2cgQNW_LF?=2a#g#b7YIo&F9~wquT+KARnoUlDi@Ml zHrL!qru5xOJoi6iZoP3%(PaLvTI(qB4#qF=o?E)DuU@@mdY4b<%-dr^hp9lk`KOE= z+ryn*uLf<|_L@K6ehbs1sAy;`gFYW`phmp0OYz=JjECtBM`OPHb!?3%2^q^72wNO` zDXWhYe=aXOIr|gI43;`;JfMct-Y^>~;GNH|s{Z@0F)_J@Uu90KL*(5AiC0iJW>W0(4bEIHuzQlWTV7S-^f z(QG@TXVu=lyZ3%KJmZH@D=g5e=PPLNGe?N}1~2UaP%&A0MqcFC>f z!w^%T5cPY;1n29~Cak$we!3r(fMJ4Wv@v7beg_c8R+h@uEUCK;oA=C#-kCwMKMI`* zj*^x!)%bS-1ZeSo%u3K1C-naI*H~$7c0l!;v`HyJ9J|gUiq5m3aFyysb3V%M+QR!` zcNV2veH8sF7ECn00fBEErnf;1&fs#_bPy&6L+ zz1F;OZQdynZ~bKh1Jna=uu8|HwhAa2UMdgf`IK|L!<9cQJ>px+_tcXK%pnMzJXZZ> zmD(iCstDJMLR@99Mq+ErdJJXZnF9`{V+?qTT%`#L^A(ztt|}CgxdrKS{T2(=mVtVY zxyWnIj}K#;@BFDu^QJnRivxr;c;b-qUG^3t7Qj*!4c-V9XSsyLt2v{zJ_INvy`yVaOaq&(e@34Fn!7TPo`1v)sqX{0mGr+!Sou zGZWT4Bk##~rt>o;XkTmtH5S?{ithsNosEKNM$SfW zjS8Y*J>Z_rwcfj*N647fw+MPiWgjuKQ32O$WLn(BTq1~}eX$VRY<|zs%&Ncn^)ck8 z!A%+S*);a{sGQ#Ot~^)sliz6$EK7t@30$~CZ78Q3e-Ju9wk9~+y9K^PdzKHuxb#^K zz7+cB8~M+9KKe`Iez$Y!mL^2=~w0lU;R>m1xvfl;O||v`(CldUBRV z%K{PcSw&1>k<_+1%p1Q{)eAZZfML_|NGM)ZYPqKx7_t8^>c09f%B}5wz@S@NKtMve zK~h3gLOP^jq+7aCx<$Grq`Rd<80nIb4(aZO?;btpJ?D8p&-({_=Lghrn3?voS;i`FNP%{cl+829YEAMwnnJdplsO-sW4jTs>r8Z@v{XpfX|l==*&p9Zfkb; z!j@IH{`LY~$z{jzsx7oVO<>z(U=^GSdAwt9IvXq+ibp=@kuL1`;3KcuT6{x zf#I+%Cub9)~Ufu*#ZuJf3h>H1Ay3T8q;s=9$@3JY~-im)_4xj;9R!qA*8u zO}1&qwm@QK^ZYJW*agcvAKKkw$vfyp2a{XDpK674GIZY1@~Va1+#pPs>ZKl0f8c=Z z*^J(=tg2cRsA28AS>fIPj>Rk0nm9czINfZCd~}MX*U^aeGLAXv5MRq} z+g-5ElUen4UN|~d$B(qB>Cq%F*joVrYnrZh4)Jq6o!Nfxs9CUlQTag1P=cyO9)PMz z64OJZ&q;2$_bU+Vz7|V4gAozO>_$F7t`R2)kK-9A#;J7`5^3n_Ipg=^~j9Zj7P<853?%>@SHa8h}`N zq!0Y7fL6!WU>SR(t0*$JyG_X}s}Qbi+oRb}^HVWiqLLZa={7{oFm2%>-g1OCfU+s1 zna#Q(P^Sh7AtugOoG1hZKX+X^;$&Y)cJc{cPFwGvucj@XYNo+Zrcinr8c85F*J`U! zufwE?`Vy2GSpD8NRti{*F4J?>*n%G$$Kx}BMc~B?)D(*%Y3`w`y3QoJjVqf&O(?%D z4o|xD6z2u|61q9A^il%@aV3O@WHaMT@*YppbMFvGwB7p(M9RN<_NA8AGDY*GXRW?3 zgC6-HF93<{5V;oQmFLqO#lMxl1&ui=0m#eNFq0ZGyd~NX!hjfiayFtoPG&nu7jv(! z8SwcaFGqEL0Xjgl>sqed1e|1x32DDPu`spOl_*JTH+@R?L0E#TF0}HM5qlp0^cY!C zhN5w{)*zi$o`yDUD#*ls*ABZdLFKG&r3QS*(Jjx>q8zrkTa0Y8Wu&tu9@>?F zSB+V21JV`OhS=rv<i*I& zO#iTxgsY(vBj$8AVt+~7nuDdW5;;KlhZ~dEnz#@unzqLsG}P>8UTb&O8^9Ue4{4)N z{FM5Wy%Z2Mu$t&nZap{aYxmmF*o}JQP3FMydLVg6cVMl6O>`pL5+t%CUny&X6-W)@ zY*eA2lg2}0iTs2mR6c@m)5d@H79kE`beyWZ2b-E3IA^wM_BfWX4?s$dV%YH2c&+^6 zO&!aWO3*6c#Js<3YU$F3a;-%LHx!V$&gss+rw5$*^x?$YH0(&^oqGZVE&+8^lqGz> zJjkfrIZ1X{sot@#7sVGRQOVxDpU?l%?Av26j@L&4a8~{j8KWmF(OJ$rWD{>x+u+*y z2$LnKo1?3@Ug)F39Q;NC#nY3~@LoagC**sEleKD;IDmV+x-UYj^Ns+Zp5a@deKI)g znSBHmVz6x78#H9N?+WoFBGh*)K=+kBr$yxYl`r67CV4Eorp%d_+v)sQ!nD?4mAUpe zZ{9f13#*Z4S5(loSe-J-nN}RnZ&|1Xouc5;wipR4*f!1RxBH?zeJQQ?+0NGMHu z_4vp=ypro4ZE_#(AKS8lm^Vc)9*#oBJyh%;YNu=xc!v8{cOC>f)hFx47yvH3RZk{F z+V6qSs|7rpAX+a=m;^VGi_LdERp&}fTm)ca{)of_2odQi=4$P`FR3stLX{7mDSwVt zzL|U%G?y{?v7tyM>oW62M>;+ai1~A0YiBQfcBuRtqOgo+9r%o zDIPrF?5?n-w98xhM@nDBjIwc`;<-Q`@DW%&MB@$jBZakvK^KCS7COspmegx)nWk}{ z5;W!@EbS{zoVl~O${CBL#Ffe-v|NjYL;rb^WViZ5{*4w(w8Dux%`jp=Ou?&}0SE+E zxbEy8gSVL`G`0Jydio`ChzT+m!C}e9dsWUr^OAma{?tWb_1mqik4xmm#D2wNyluv@ z_S4sA=klw5H??J_TsG1%MlVZLK3O)Go}MmUy-NG+5n${VZ0z`JF@I_x?j|jIDuRyU z>aJVbdTYV$_fAVR*V2Wbc6(OB;@@U_@{KKq$u}iF5DJlMzh1_& z4FP^4SNQcWyj$8v{1JOUzQA?`&1PPeRNTP;9TwuiOC5N%w%nE%TefX_W!=$X*MImm zxz-Dp46MPI2ZhAMQV|-d-1zd`d3lr!;w>k_`PpQ{5Nv98gV>^CDp??e^&;X138Kg4 z5e%1MbJED}K%!;R5Td#QM{?vYX9xmjSQ@b_8U4{Ns14OI4D z<|Ek%BC+42`LJV1kGrUT+2bX+b$DNp*fY6Y{O;#$9FC<2dMDS_A`Q(L3oqQp(q6o# z2*^2Ip&!&LqP7CUg~$Gk2`6YF#Ps#d@|WZx1Mfi5`eD5$%;}g@>eh@3K;7QRYA5=- zBh85VV~x*cHghj>_S!J_HGSzbI6Vm}3i{%t7j;AIn!#1cH%#=sUe`P{ppw69Br)~r zEeegVIFO_&3V4T-*qLUdhN#147QI39e)Pc;o%=24!*q=T&@d?!2v?M5KLj#8*Me0A zpU33m=hC7H1m3Ih!PWd-OwIIaB-Vzh#_E#fH)xM17qVXlK8j|L3kvs0@$?ZYvymNT z$}3WP)SZkT;i10ecGJoDCY4Zvv=t;@Ne@Cm3;ZHnma5SdR9yzejzL+$x?P#8ifEI< z1zoL=MO?DenMv0X>4djAsHsWG+59$%h-eMw)a*f5UNO*F6w>5Z(cj&R)9tqNqGzg4kDiCg?|$l6xZ>>!}y`Gx&27zc-P z{-Tv4Lx7I~l6amLhe_X%XAn5iBVBr zvN9e{qezb%E&;2QX@X6ls@MUAOZ}tZP!o#Bo#ERtfhJr@>`SQdqV3S^IlFrf?F?=0 znVM~36mXOl5pk{M@7E-5(jM(&+o+dkGcJFC3W^z+NXhS*^?<6n#{Z6LC`nWlJqIId zrT49a;7Je7@nKn(r!b(Xd@YJ_!=Ay1f+qyeu|g@)h7ekn<>~ojO3pMpUx@Yuw6-YL z1$dadJkswLwlt~cKO$$8hnkRbCp*4v{jSLx^epSj6CdFf&KW)ZNZa5d@vuWx)xk`s z4dSbPgNn1+Y7;yJ#i&_^ZMVfT)LKo`4gvZ*-ySg?UXYd;0BU=&^>Tc1z{?qbHQ5f+ ztXdG9v+?j@OXg;QJRrYQ-HaHE>&!68U5=2}HU;iM_)hW=L~c8bm^3fQ1dpc(*{r7d zepvi`T;z6|%3JDox_LI#PFi+OCjxt4a|(s^c!i(H{_Zb-x0nZ*_r3!aF5_Llxy+3ycA@jdr?8`(-hvBRbqKd=Kg7E|GqxXGi((wD3_YkS#Gx{2S>;NZ9ubxC}o23@?e zi!R**Ej==f= z+X}`7AGnoo6&wCMD7PB#m9v==E^OF!LtwoeyYT>%r^$P@S^kzu zC)dkyVhX-h#qUSZkg1dpk2M6Y6bF+4Aq^}~$N*}V$V_4jMkptsi~%W08Rm!F0+@9@ zBU=h#j)<)`7$N-p759)eBentbgtS-U6Z|NPB5Pki#XK#!&tv!NxvS>c~hQTilZQ zaofWimfaal&;b=W0Y(cj5$FX%(I$4lKGV@EIfqJYP%eBc^HRUgN~*;nu=#Ynk;0xy zVz1Q-9L&--8vaB%Db-nEVj|qMPK>BPn|T53cN-?2D@8^1c4@ad@KSpAwQ&{>Y!kGf zrEikqM4B;iQpW<$0mPfPh77$)XGmWW~9WxDoC?_PmnU09%ncJ;|tAzF%B9OZn-QR--WlEPN zk_v%0%@d|Z-mC8hqCMEp>!#qIY5~pKe?|N-m?>I-WM#!(vGAi3rjLa{beE2bDdv$8 zLYbvPHR9VgZn%SafXWDPE>&?YZWt=FW5}I1;^;kr3~)l*|6&*6&GHUt zu*jMHA!;w7Q>o}o+Hqkex-)lt-r%U#e^$-`{YIxyQe{u)src&Hu$8b0MQaf zFy@_N3wwJ|8isrq^6>*jq_PV#T_s+=!+=ejh=FDf$e!rgLC=zM^B1q6%L3q3n{N zh>HR_6Sxt$soF`CR3`>&1=@FAG7x*m(k2w9u&=Q&p9^Gzyarsp=-EZyz<5!-|P%9(FH zanPA#vnYC$@e1~pSMvfKi1$xNo%+1fJ3dqwk<}igQoT(RLb&6SZq7naSEmEpNR1C@ z+?+doI`DH84e0i}qhu&`MN9WNJ{Bq5(^{!ZOt5i0HrTy5_2}f-b?2@NPkz=~X?q|>wYp0!BZh)%x%Tgx#=*S+x zi!*y&Ug4Q>3>szmmAtIO>+Ph2%S>lm1axMCh$(`N8bnV|?%WIxss-!A_<#1}V4A5Y|#zoFZcJzcQV#U4CO zFmE7(8xP!iOCo-669w;nsiXoYi=@;TAN75n`|RJE^$bW{znLuHA#2O}NRI8%8Xd&Dbp)I&$_~Afn-};k=tB>Zi7`L{ zDE}dqtjMR&j%0-wg{;&+vKV!W4n~Cfu)(&}(J`~2UceN_q0(U>Nc|=!w}pxY`ol>y z-BGAG`YG|*uSW6Gc;+>WYxJc7m6qX$t}`T6<|#gReB^VHgeFHHJTToiO`>VA2fOSe3c4OXbh~ zrqpKPa@MWQuo@ujf3)j*BMR$W738$Zv*mcOA!=frU*@3_^}+!kKN`pl=%#5p>Pq2& z*+8<00}w+PxV%{|SMI@;h%L37YTn5RX2aL_!>a=nXuhL2RQ|Ag>NT}k;T8WrGvcut ze#Tq9A5MO}I||?anHka8DcM{^y}=*xWhMM#!1NX_N>Y6D$KIsR_5`=_3AoQFyhgo5 zf;P7raPSVHPt*uKju7-OFp8t3h6={A*5A2Inv>9-%e!m6J_t&nzrtM1M;^R%ps8mO zZ~$*KaOFx^<92vAV4%;Nd3^H9jtOt@lYh+Y2vVE<7PrLoCnNe$*MiDMZ$^7|SwK|| zmX&b6MWeMYhGm)S&gq^CpNuy~FgehIF8U=DxPiSvoggpDSM0MPAGl{nn+Yvx&B0J8 zjn!y%u5mxi?pA8Eax}Qirv)od>O^wZQGJR>tQ@nqJj7F}JkVdRP=6(eYYR*_u996Lh}SxH`uw zSxmO;7wwCM;WosyLo4_`D;m?fa=HhiY=Gtrz(_<@_s>6Q5FZ^}jv3j$OeZ>jT-CQI z5zAjLALsP(9t5BYHgXcr4lEqsj{(l0ZIfSq0vPUljum#_7cg)h<7|Q_6n@KmK%?~R z@p+zSlq*UL@W%%Z%Zbwk&?SD%$vcQ17G8s&4(0~Q8Vq&UWuhOE5~Zaa+y*NH&Sq$- zo;H#G-Yydk-lFx`p4b2%M@)#pZQrPK{O@Y$_=*nf_qh42M>Dt9{3+yfb2CLq+-#_t z&J1PBEx<3fhrGow478PnCZwI#t|&UZsyqe#8a}vh9k-Df;b&jJuX{zBzm>l1Q&0s1 z3e_KDd4n{~{g$3~y7avhccM-86RixYaOC}HA*~PVyo4E5`)t6)7;*A4viuF0mH?Wm z&_}8%;zR^RoH+PvThb9@&_L)uNX#xg@B;DCd|Gq5PW24m>QRrCqtt20)xo=i&#}ng zfeVmTv^{pJ_8dE2nR~lG)kA4 z{4u!M}BoXkb=17(peiOcVvK{M`Vc&2fBpU*u;_5t4)FMOGt$z!R)DWh9}Y6rmQfCueiCwqEJ^`gmarO8D}GD$ zn`8w`&Jh4!@)yf?b6=1qKa==^ea<66Z8Kjo{E{j#AHK9APiW{RVLlabcq|W*cY&D= zv{c2~j`|Ow4Gf_(i_c=yN<*8?Br-DOKj({NWB8u_V*a#od7OG zlEO;`YjGX;M&U-=N)6;Mm^kP_@g%o2@+VC;of%_MDSirvx%{`5sSS+H=sA7yg6@&?@Ql`MHj$WaSnjb}2^G>CNN7`PZdV&jDmC1!feUkdam4 zZbjoZdh;(d{in13A2^EUo?(V*{pE@V!=WGrTtVr;Zv~WYbq)B+jwJ;hb{wS1VT9Tb z^k6jt-0l0d7{K3%V6LGopnw1qm3-|ZLU4sZwm<=m4$PNn&N z@&|aW5S+w>Y)0Q~dOE$31e>mc;J|LObYO6r&^`5R(UYaW5qtW+4KD~gf4JL(`XmIz z4>`gE7j^grDMd4G=iHn;aam4y33v{mVEO`{E6a>$_B86Z<(psshm-zApC4$j|M;am zPDkJVoTC^1_cS(Ods14-AaHiRS*~4_C{_t|EvtKx{}Rf^*oHdcYi?vKqA}SiX$e26 zG5VqngZx{f2~vzqi~#*RDxyb-s9<%>gXkbtA_jSUY{+*-#xK*TK-xdN1rc;b#yxUCwf>k*R|AYM$~zWnj4u;HCg-=cRm zJ#QX^)8Js2IA_K?$(saJ$+G-Mm8IF!#=6x>DT>ZiijEZ{nTt4#6kqNGjVCU)iNHPq z@s7N9krr2YEatpW*V73$fF}{NF((BGM zX-$3*(?GW;E2omB{{wBI>YJdw$%uqmIks87mO*X*1R2Q4I1)P4C-Ct&=1JnkdOI2 zgKHi9rGKg_h$yNcM;Tc;GN)^6nRYmr^GOC6U|zLu<6#Rvd#xJ}r~iZ!;l z2V}rKw;8bdhQ1jp#%29`D^=_aOW6is`LZo7n}L$Txy_le)Q{WgYk1r75*_FYMbsLi zet%jdnGsn8Z^{4zM7V$fVz;puXLLLJ+s7Issk;iI(yz%|KGpab#1st${oZUFj13r_ zZyfOt=(RqE011n}B%c1BAtG~qB?Boi@IWB&6?i?tX(T$xLc+m3)-p`XUR|KcvFw&riA|+mJG}!*lJ$seMdfk2Of}dlQ&c@PhAy zdkxywTIYTj_Ph%!hDhL(((lN`fddt|(7$(;PQKYj$zb9q?&+R25ewMt21t`0mOq?5kt-^vw4wuY9Feh-xi%to_ zKf!US({LWCz$;MAajSFspu}IRX~kG>@vHGIFQz5|MaHNMeYys>3to{qC@lkq*DA5; zk<5wshL}9nOCUoRo`@;fm6mCv9s>~@T5VlrWsb$j*3Ki7iTBThmQbK#=m#plf?kC6 zaPd@g{o*gGeCKZIYt9GbWYDNSee`V<5b7vSY?fAY1t?<=Vp`LcAvO>F# zr$G&HzfA@#5fHf8zmnJgBv$=GbszNw-9HUCJksc`0MCij>|TD~CkM%gB@W0d1gskV z#%b5go%8m|)hs6x=GeQcui8Mz4jgiU8r(rr++YD0{2sr<&V^)H|zMIrRtWZGf zl0;Mg^Ryhka1k2=XUJp>z<5yA>}9ft3@tKvz2Ud%@5f7d`UkbA#VWZ!4~Q<1?zh#+ z{BErmYM7-eZSN2RIu`MF8!MKQ;}6{U4{s+hL4yn#boh1mZ^!$1oZGAfa4Jx06L`bs zfC3jSFPe~pYJRC`%A|w<<8;31G^J&c2JOr3 zf3cBtUi`6ex^= z?fJ2S<_s4seEtBc{i=QaPn4vN^;1E8(6>O;*gCBo0}MgB0x9Y{tAQpqsH$ED1hILw zOXOF_$7=ft`$Vsyp>u{TU$d~|uE)8!tS+e`^IH`IlCG}&I}PAfyJK zsg^5GZ?mgiAYZY}euRa7S8E3@>kt}gy`N%?eU;-tvD>zrS`u}0+fJd|HV)R<(2O*D z!>Luw0KA_+jis5B;OY^az_~iKY95cwQ>5fn3Q7)==|la$<-q^=pMO-s|NS%sd)W;uJ4)FfcKnrOQJg zq1kfm9+S&wM-@+(GQkGg!{0Md1WVAKl}Q!N zN}Wv(wcBJs2&=+a+wb;zNt!%PNWcxz^UQ{ufaUCdXIM|mhol=)XlK^K%;^mZWTI&) zqV{-fIsRSOm)iNOkiBi|neaGW)6jp^pg+Na21*`BFEEU?wk5tx8?lTXj5{8mg>PS9 z8RBY~U~@IVEM2zcMnYl;tQ~>Rkn`tX zA&}pYn!2#30={EQWQlL(pOrbkZx{HHF=#}a_G~@gvlE04su@8xR=^Ke^(c2ffwJc?kieg@%yu zzE<{mi{|j0sVQo2sss1YMRmM2Dg4V~{%@e8QNMzt#qEY!S6{z3m>3f@`w$Ea3FUu| zQ$|*$?Fcn|!j#U_l8xP-u}?b;I1PBj39%=}BUWi@$Wi6YPw*$;k1&$|92EF-L%Hk>Z5)^ZoI^<;x&AYK{@b1Opa1iJ=3Al^=jIn! z|7&=Z2I7OBCXU?n$Gf-QV@n9#pC$i2C;$2(ReI%)5q`5OA{w`#1*RJgO_GgHJ0vsf zKPMgh%i{ctP5b?C@FR@szg^USy?Q8KBf`QE}YBOHEm=x!O#zIkCt9sg_5w;8W z?x|N_5!i#`zD%Rr;!h#6Q#NfkAQA@9WY`P_$p^j5dk~BKVo}?D`}JGAYCQFZx9M{Z z+Fmmlwd?oEXl414(ugA#x^%5Yr+`K6T$+FtHeTF}0)=JkpQffbZzRw`o!b2$B|<6T z_K~@p3zw@yUFM0aGgnBY$Sf}952*V^WsmLYsVbF-7cRd*#Nt=NjlB}5IpX0u#bsw4 zi(3`?2kLLm9~t#0hczkX?CrSi>Pc_Qv9qzwSEMQn>p;>e=pwgB)5D3onye)bZK=)# zj3D9yH{UcrUv84tb)20XOnJoW*6(;$lDiKqRmxwg<+97swp#U!#Z@mA7FGzxicFjc zd&)bc!A#*z>WxZ)?Xik(=YUrkF#vThgI2M>nrsmQh_iH~BJxasK^f9AWxe|Xs0B6v z5kT@PbS0onzjkV`t(a=}n-#zUE0DEN9+i)m1mSMnTkMTqy?F1iw!SXfq22Rkg!<`DK;1yp^;3!dIX7@s*OPPc*=?J$PrYCQkIF9(X6w}! zLe!!c3oH8D7TUMrBWo&@9{#H9F$t7X3p5hIgED$1JA?PzSEjgh*gxcdC_2-~1RbI#*=33dq{(4K?cTh+bWa-1&Zbqj%m zp9_=|v6F>-3=?zn4ks(!(Ca?y#YhUPXdU(Cm6e4X74vx$Z?e(QALK4IMPc*Ih} zj+p)?d3-lI61%bC3o?rDpHdYV&@Jhq=(K zmS26b@>3n(Nw4PXA_1FOmnhr0Yp12IIg8d_Mq~X2*_yLLy3q*2XM3<$k1r*2p^^++ zq(kn>$T#7{Era1A`wzBmE|iv3BRno!M;YgB`1Xcg1bB?$ZSXy`=#7#soho>^SZ!tAKO36X6Cb9yc_<|H14&u-zqkx#$`u2&73HxAY)g0_&g%jRWx)SD0z8^wZI^$M*#l5_mVe1P`p0Zhbr{l|UUO3~u^_T-UJG|=0Ei-GoyRZZ6 z8J;YMFKvg&*caTLmUHLib#-+%yTJl`0V3Dr@LdtKoJHRcxGG+AOO`^jrDFW1n%sD2 zuV8bwnjMN@s$x0Kd^LDk!m0?OJnK)C-*pSs1&L>H=1Ro*fS0>A=YS_!cssH8VRxNm z+djM<>JvG?7fvP+EU>E)J|}egVCSnFe&uAB@FsB=Eac@$NaT&@<6&6f$9z}_tM;{F zVWB&+z)gAI($UNC%k-T?WoOLD!&@Dyt84zj-$70}xX<$Gvi!e;8C`ynA8r$Gal*>5 z;gDO~srZ?bQygqP??}g?a^7A3Xs_a6ZE-aa#JYM$$gXH+K*_+1v@EQitegn(H$Szm z4%9>fM$JU+4n<0HOzD~&uQ4FruM%ICs`q)a3jBvk+hoDd!O^+!bJa5h?m=&!yAXUm z@mf!HeH7xo+00J?ZxLoaWY>ge{Q#`sE50&#QeIxZWCv}3U?yN^cc{nVv2t3j=4nfy zsw#{OArjyr6LHysO?*A%jxhYOA*fyyJ24xmk6HyH-zuCW&BL*Gce&Z6r%2sq$?Fb8 zIpIyN*SZsMb&0f38g_6F+z>p)vt!p6!-ULVY+8LAD;ikZi^SSblN0VdWEBi6(K$Sx znETF#Va(J(LH)b3bPy6QmUy|{Gj6)Vd~~+NObk-i!0jYr!(l(d)%LNPURk`tVMyQE zn?Bl5vP}jMhOCmk`&8d=l2U~+$8|2buZ~tlT5qj)o7xlWF8I8?D4Fh2GAYE)X^=@x;0o8sG1jLTG{Rj$y;dG((U9w-^{;*6zIK`a2oMaM(myS2T{B;&(xd& zrS`tBr>U0$OQ~+}b_&KRYwgbkJ7tO9lk)mN-lkn3Vzbg+eDgO+(f?JZ|NY&k*P1B^ z$m1QI&(bzW^Ps#}F5f81Y|9vy#hIGE9Zbis4QFRDz8S_#P5n}4Jr|C?-PP*tppSXi-rV`_a{Ab?=?NOCIjP0ZY6J>l ziG_=>M|ZUwLp#F~5x+k@_7k>^oY{SG)D>c%tpOs(-Ca;Jf-ICyjy%tuMcL}$>oP10 zg>!!E7t@^TuS8Ne(u~(5jlL648DM_Zg4`!J*&LR~DT7tYyLPlRH2PiT_mJycQA%_g zzcpRg%U9-Ji53cat7xaRM(?QDAqr!0JPeN^C5Pc)_&}Nb zJKW^k4Q>Tj*)O^{z-IKKS{6*R4IV{jTWMZ984@U15bux>(n6*pHm4J@t%*7c@qkeO zurLhB`Q~#5mD~?veyDdKQZWo+mslTEhMR965v8Uch)ykbz(01RBr`K31Dk{dbs)m0 zu2XD_FXds1LcOXG{Eg5&b^Xl%C0@;}kwwp-T8IuoMsa!XCe?G4ZRcp~SMNqys8NPf zpk^sqpT=*zPrNS2#PaarLp@W|pc@Y)=<112JclU(>I3@R39$1~`QE|BeaIdNkTZ(O zf-}vO$>aT%ZS#&2^k729;&PeU?Plu?lc5YYPR^cTyC0*gzx-T@5g=%2Xz1izxFI4< z!7t9a{JN9au^E>-f@7cUUDp$_TRe2Mn29LMq+8gJ3V10cb@wv~;m__FX9#Cteo z*9ag~+74(oOD(!cq7Rsg)D>Cle-L^*KOLguk=R71j=&I)#d}uUk6jQF_P%($;O#59 z=<^lLt*K^GZ?kjk5Y{q5URs0Ij6ypYIiKT}+_%mSBNI79Ir5&;#t~R(YQx)IUzWYeGqd@XF5gD@y z?OF3>Q%N7T3j)Ls6<@ObzHFJn=qGg3gyBZB*Db4}{_SP&+L!n;^ld^rN}aLH)VI=B z1qpO7A>a6(@>BD$NG?}k4Jp`^#vy4<)XOJ7b;FJGVWAQfME7vTXo**dVlzK^kd>A7 z*{+5sc=zdqG!AJ&qN+?U65oUX;zw2iLOi^pgVTvxqK*=~P`?S=)Os6|GIn^w|cd%t{XB!0qohibX-oMaGdl5WD4Wp*vCPunCef#t2 zx-=6@Z{OpI8ZSa}5tl$iMuK$HsFipjbK#4l&EV+h2Qq2w?Ba<<8kNuIgb!$DR{E2I zDl3)alce>GjU%uqGK9BR#UJ$c(R}3RW0;M+n!Lw>Ma2@^g`{_v)M%ootp6qP$2Y@Pp7`fAiO`yD##nCo4C+ z?|EIJN=q|_l5ykM+S=CI>|0dlu|oFWWyo1%jVK#u7U&unebPKM>6@iFeCCGZXLTd=(Q> zt_$f_mWBmAhOoNYacw$JfcEuR16P6k?I=;8c8P$K_0mzoQaqa{31nXTOrQ`PAN#EH zAQJC_2q$v4Y}lH8rBC}ql_%K02qEAa=V0R=WZUq0!jmKRZyUW6wd#fHRYcGia$+?s zLXn>m^PS_-)@GZ(W1c=1Grap<=NheygZ!a8VV|c(5Ths|4l(}wrbeVY5RmL3zZ%>= zA>GafgvqklTg=_7Ke2f6lVdLZ_4Y;Z`;KHq-8DSI_43W@z$O>{?qfe5gXI-a1}aTY z!L}h{p-07mdvF2|u{~;;y%`NnD&IVF9cS>B)`>Ugr z5?X3{sPXNa`}dEjPi6{M?z)q|<$K7;D;Q2qav!FS9&-BmO~m!#R^OUhY*2Xk2TLn~ zn1QWBzDFxi{YIx<>%zIUfcg1)WDJiGhs|sGDk<&`McZxz1E`*PL1Ju5N+XpX6l$Pv z9QR^*xBBZg$K9iGTF2z6YHQ!y^v1y8;5M+CDvtd&u9@rj(9`RvoN)b~Y7FPg6AW_U zx`;bSrx~h!uKt341GEH$@VoArNztDw;Fp_Azcty2UtY--eOa6_#&wOg{hT+gdZN;h zSrU^X!k|Oy5i>Kst?hoPTHTw<8YgcVTJg8a5@X59sptuW=ZE;uz#J~^w$AJHSXY{D zx?jeTnwS`TxGD!7Pu)iLq6nAO9BUorW(_8IQtYIWEw3BcVQpr{z{Fg{=CN{D(DjHF z9_Zi8+H=)vBKt4d{QiO-lKip`_w}yeF-oOs{5G5AX4w9#&T~{k@eb6OQsamfByNL! zn%LD&i7l;RhxKW)_jD0?_8T2Upvv}Q+ol%=SR=`#@?&x&pl815wtn^}IraJ6R9yQi7#RE$5##!@$Grtma9mHoKC!}M|xP(anm3-uOE*Dp?{z`bk{n# zYpW{xdea`Ad=g2mM7y(>xVd$BSZeiB82ZIphs$i@L0(=SpW`BH*!usGX%mkpL>(-*7P>v;8XDWZUr}IkFy!TD86cW*2=<%3@Rj) z6eF(Jw0FP6c=}PrNGV6*B7a!#WVdk@-5J;qheSI1vj!R?qcH06j-+Vsqmj$=W~ zQ@&WgNdHD|M*dfJ{P6_wl`!;Q5U&m*J~j48EurIbB>#1!CzT1aFn={oYbF*&cMN6 z_;_GEqvv>Iu!-??ig3DvYK^*k(DGMy909^-I`f?`*_*vSR;_ev|Rf`DrSG&~@bHQP`YB}km7bS0G zw_Hb$ihV>P&ZyTmXe1<>V3Ws;3-j}VAoZ@N#Fo`iulrsQ8Fokep&O|;+j4`NZM%8m z055*aJu1UbJ(Skqc$eGkM_ZpJug%efJ|H(2&Vs0&_pI6Sm5XC3i?%%FGUSNak8k|c ziCHx&`FJd}!U(|0P{(qzr5=}(k7;NYX6o4*N4hbOpYKe6jZCB!2T8zWHlOkW>*&1< zZF{4ell^OvKY{4jOff{qD%046ogvrA>evV1{ZPkhXVsGn?uz#LoLnjtQ^=?NBsQKFqVEW&4 zr|$jB)t_-gr)L=%88#O(kNh#oF^uVqoyT*-E{;i-T13w;6Qtc@I(pZeTk9ps)HSDC za@=&a>fFh@x?Tae*d9NGr?4Y%E*=yT*H6vOeXr5=B%rbJPM!V6dqI~i{N1)=ky^r8 zZnJcq6n~p&e*LxiA%Mq;OXeCb%vMW+25qyye9<5D)+ge!;Y&`@R>mpj=qsmcv4OA9SDE*?gv%oNkmAu`ysQ@>$DKWd#F@SJJTjVujrgzouAAuxwUxl z*4gjBf6=8#uUJduda~x@gK*i@9|ZOpaY@&ayi#QqKUBrQzqOn!#{`{K=jZfB2yz@R z6ik7Lv@)@o(Jl?-`=5ZGrx+IL__;hg^jjQc&Pk&Qst01BDSGDg2JGpLRLQ zRu`jFc(S2?cYp}j0U2&;x-TE&!tqvritBu9^TFv&qLda96)P?-uJT6kN)ZVa3k&)6 z{fqgdpU;MF3fQshby#(UVG$yg66f#m7gl1ie2~JQ*&iV_cpfrL%|;;~vy06&`K1&w z*Ey`CfS^z=SqZy;oHXBh=iD5#TpFIE-EXc$Kq|sG-sJXIW_W&RRJ5?Z;D>~DgYdCp z=ap|a^2f#tLIcAF#g2R;UPq31%iZ^!PQ{40-yy!HzaAFKnTZ0EM8EAUFIT04rLeGY zC~F8SS3aHNb!xxP>0<*!iL~19JEznazx?vX3B`Q7Wrb_5;<@ziC#NJM1nSyBt;Ovr z9mBhA(}UpCgwmdQ^fDPB7C!(vTgJvPt*p-3-VVN$@Ct0}`55=tWHC6)MPP8~2qPUY z1sExoMHA9e`CnVVK~^6CEP8r}x@4EQzKA^!e#CXO9Vbj42EHh}RqtC++7S&5GRj;w zU6Tcm^Yqsye4kjw7?b$&MirA zgZI~mguSgJx2g2Q2gM4cjUJcnt)jv6m#1vTI|G3sA(xA*Op`t>X@d*j(awP~R zKh}^YR;r(acsMJ1f>C_DHijlk!t3Mb_w;qDFhcWaPt(~sYL|tM2?KREf8}nl z!SfL4PD4WQKxGUslv1_R?czxE+BHlc8ifY0pNUmHIK;##&$wCaU-c+&prxNNE9bJo z0nsNK!Tn!d)Sx>JP|At8^Ihlo-XopY_F)*>8|cvAjiO=V26}qkggUZzNn|_h4us%_ zU2R!l<8MrjoDx!Cu2ZO9hj}O9NKAbq=L>&T6&<{b_HMrk-i5OS0KdS46U>#ik#^~( zy51u5GlyBGj(RURiisrLaJ0l~2^_lfpg9?LqnQLPebZ21|GmC==VzUn`)s^%kA96x zU!7bs33^_!Y0kl?M|WI-FHU=BTSo`}lP6EkHrFUS%sI-Hz{I$*zEpZ)zcEPm_1&_V zy$sh_vF3Zc_b(8B6}sW3b0EHLzVVYMt|=>P&BeJDKQ@~MgWYi+vRZ0!F4n>N{oSI_ zE7@KCwGs12$tfxO2g}k-I{a9t6yNFMR-!M&{+bi9IELBkx>*IPqwvVdoO?#mVLaI# zrj|Dxf7l}y`cMe@Rw!yUY2tS;iFfRVl5Y3o0pW&=8S1sH!^Sb$=WC7p)2G{8TkP-7 z)Rpsl_u9t+VY*F1{@HZTb@q_^P3^ z9aTL3FLVj?cKT?2H~>NB?SG0}9#3_lzn>Xe9d;-W9cuRfeFD-yl51e#cX_@)+aUh? z$#>|l{~X-$FF`Hip-JEGCpdwDKn(iy1sfnZ$nCQStg2Mfr-qi6nGO>s=3S>oL@k?5m8^)8SWN6z$iyqSklO_BG3JmV4CtKUH?wlylriU%$-m zD>J!4^~tH%pLy*Z&GhAX_hLmZCwyjs-T`-}Utz=7=Q1MCUA56wmPuzVV5Lvv4x}?Y z)fY6LMhwgz%7ya8=2z99&0mXvP|41FuTCDgf2^uBSM1l z8Q;s2I=24&sS;2L4j3^pGj_&$ZjV`bvJg*sKlerWjjAcgx=h`S^X z5+fJWg4^pONSTsXSdAV?dOrg9Au^2j%Y#|QJ;UY*I^KTjMcu=a0EHQO56icr0u7>f zQg{w$A1y2_I4tI7a)eGOCNLo1S7Rwa#7cU5eO`1i1c-w(l&Z?3%t^GD@A%%z-eQUr#F zcV@o0We~P=bQG16VgNTe_iT4HAxe0YXe33zi{yBdNB-(kYLI5)=ZdIli8cDCro6Lc z$ElHaH10-ouSgFM79~@PK(3*x-*KVo8KY9Hg#lRQ)@ZO352QplVE+41dtj)0THeu7 zdR~hDIYILYz{zcqnF0F%Q2dUFnE3erSKV1gMfJY@egHv{5@~T5K^g?CxGYCu{*8ioczknYZN&+mWE^ZcI^ug*Fz&b(kP7dSI(?|WbO_5FT7 z*WUg%BTN}4fsEjQ8gIO{8!HY6i|)tGAR>0a$jGqVp^Gur-icIMEVWhKPIX(^J@uUYoI?r`b)xBN)&K) zF<f{YStwspJRucVzXP{<~=`mpu{0Y)wVlSgY1PkgtDFl_+OuvrAhb}$1$g4Wr4doIkzrF7dfO%W0|!a+zlj@0_XdmuSX ze#2Zbx?#NaAQb>mDx}+L|LRoc+1Fh9w%#Iq{j1SiXzOfi8a6lQ{@TLkLqsyF^=9GK zN0iyD5RzYED@b}f}L7AGAevQ3Qv0*i(zi;q+ za3y9~B&$^p^E*vMP*pN6frDILS*g%09;lw9>>d=<*baPj)*5PYH}=IvQ}wTkE}ouW zTDPWA6}Cs*L7{eO7KRy;QXORmSeIB_TYqt#oj}c_#+M&3c{; ztiOQ3KrGc%p?3t-s7FFVEHdXt_0>P(p8IflT01p*Lop%NL&XH^qXkhFOQD&HzfD(% znkbM@3}^<@Bsx}}Z0w$-8bJP3rtVz1QoLl2x_-F9vGlhlUeJ zv)=KeFylen@QC{iqJtT)9zPU%;E&zR*#;aVV&_wT(YS*Kv&qlZtE1I?;M%=)VpOIo zQ{^~Dc(zZ`a(yL&J@8DEst)m3(dAMujI;wt`EuaVNz2Tew$-OPyScS4${{8ioS4R) z4rPwk`GzXsI$xN#-}%ABTV}j??vaxhy7;L^s(!!kMhwgz*g*j!T{a!*a-&1Cs`W$$ z?wjeU#nxb+iSo4(FO2=MxApFO9ic=Tf^MG8ro#~l?d|6D42-n2v{FNBLt61R_5IR+ zncPxG^bDpKP?LDhC)@M74TzlA_!|Z*gr_@F;VTlfqL)x|&g3Yt>18SY&hk86HSS9k zfof=IKzLCUrKMi(g|LybA+Q9n2PXUaRP5@MRaK?4vPM{T4uP0jTq&_z9V{(cs1~6t zCj$1J$&a00()4B5-q5$dO>llvyXypSWEKtgxg@HD0EL+;eb#dac@wozfnWi1g-mKN z>f$KS8`O?%oNfpN3}(W0&=(-~e#6qA0yEvlKW}|+;I4;TJa_y>OOt9p50sJ@R^pE- zSgSYVf{o;(fYU2r1mk#~9}?HtPy2&Uu*pXU zHEIZ`-q5Dw=g_t=2FXQdiXL9MTZgnE(pSn(B;!fh(FZI^ACc?=BD&EzprG|PiqIQu ze?K9}mgL=3@H-=6g?oa*iwc@5iaC*#JhpU=%CVgX6a(c>HykO=&hpXy>(y*kHCR(q z_Y?|9v`Z#}j2ddQjHU@scYkAzml^|aJ04lI&xDEZe3j7V7PS1P z#c^|{8v9pr^|0>vsEzH$@I?(@Axhd@i#`&1K@v*1gG$I%hv}t)T`x@>{&6>og!9eh zANf2&BeJU%;5D?O@;H0qo`mM9!OT$G4q-J6Vlc~J33-|@<5X`Ev-J}Qus7sv^;yBU z4l$U!6eKMt`k`+G zCr)ETqt}K0l2>O}m)YUO4`#hqlA~Be#M0(w;dj_sN~1kY|{P)676(K`$z1no;DR>t6g zRlm0|uNGql9E|&*UdZU`=8s?H^0a%tSnPtmPCWOl5O;5=@f>p7;)}q_GtM3ytWA8N zhUx$HkGa>J-z70Q1nr;AhyHMOAG>B1$HRNpcjkaYGt(UU151Hf{H=@2nD^EZTs(`t zA%ZEfy@&fn0qJbRnd#(yFy+$;iu)h1AMg)46cjCXgqaN>Q&(rpsKGfJL>f`3B=FuP zVT_UVoKhJu=75m5!iFwL& zTB`c@sqd;aFkA2+&XD*VJIai7N)%o;A(}ex2sXmt7jL^RPIkB~M`cTFn!Jzh2ZkJW zf|3AizsxMmp(8_#*Si_f<<-9eO&*_{{f0GXDlNe@@;t<1uh6D$AP#<4)f&m}`7bt7 z7DIgWKWm`LT+Z(~!Lz)ddK~7xBpN*_`=I`i7qr}CMP6vVoxp2>uj~^mh2jw7&4p1@ zgxjG4wc-K`HL}V3)5NerjH{9^OpS6flxu!B>vDZbrIIE{a(qk}<}f>pXFB|aOJ77_ zt+>SDQOwW5JU;=$W@36uk|ze2LF;37rt60{CdVmjr+cAs0XA$T%C%1EQi86>q~6Ed z0n>i-ooWuk)*s*L^KE2($)jLYq4{7Vp(a^fILG{|IA|3admz2~07R{=-NZ{3aFq7p z`ZU=<+D+(CHY3E}+WxDE>oL8pR$0#J+raY(Vz;9#U)-nf;1q1yn1Qz=+CzTXaXEkD z1X}@aL7>)#kWCM4d1+^4{x&eKbl88QDs>cOWgEN7iLcN_-QHnqFw=;lczz+Y-^gHT zDEs#A(OQ`p$*;aD4%8WZWW{Cf`6HKsa%;myrYQ1TJJuNb6iv$6ibGy$8qSOqfv+|IT`qD`C9U^g(x3E86IV zzc#MB=SOzMM$PyAo$-ryjNj+vkhRVQXX?yzoc_HwQb`siDMGO;>>y?zMeAl*xSoYQ znOc-Xoogfx3gN{T@mQ(-?l`a#gs;$pr@)pfaz4jNcovkr(h@^u+`PQFh{N`btuM9d zo=DAh*C1{7%edtcJ||vWzy$pHLk|{;%HCv~0+G5(sPN)5FweOrjS>&Y^)P6Ph z?}6)JRQ^>_5J>mvjxU2G@jIYh5#1(BArK>Li8fMz3O=iVZm?c8`bHwL>VL29H9xg! zbpr;_SVCM4cLt(V8h%TziEoDj?JIEfdku}`mGvO@SKQl*sSe6Lu2|6}y9%m4-^@R? zC^;|wUft)hwy~GDx5<^fLPWK)zr(9gnpqzix@hL%vs^bS zIg!DpqOvULJv$kD`(jYl#POj+FsuHw_SI+-6C0b@wd*(}95-{TW4}&$c|AeQN*FkI z{>Kg?nE-Ei(3`}VG*y3y+!_Q+!FtchdbSX{K3bZ`^*r-b$I8-@P|}s!i8lH*NStU- zuf-16Py_&aS@c`VD{Ravcr`132bk4nFbV0LC<%uiPO7QXJ+nOK>(aUYgncndbXse2 z{_z9)IzwD$V$bZ?o3y+(MxSgk_<5@|9r0%G#_)g!lUx7P`orh&VmA={B_t->h;@N2 zl$X#Hr0m%sKS3)tv>pW}+@Oe4mBxRnv)q7z&!_CT3zhT^$WHLXEO)Ic0=~-am?cV> z=QMerjcuJbg4l*yZTO3m=~;&KLSpnFqGO-*9!#zNV!&wTy9?Rd?b#m@$vV#wjg*5; zjwo`@sm_PNo$q*f(r!U+6K%=7d2|1gXj@KMIc(825&tO#rP^IIQF3zKzgt%s&NefP zXEGKR0fe<{h|bfsB!MLJygmBBQg+QORVl62KiGCYY^gPBm!E=?MYfmrCgTI98R+RB zJt06>vhHrhzGR7Kf6ct9?guPHK|d@k_>@;wwKu#q@jT(cA4n4mPBsur{Sd#LA3PEO9Op7BUrRw3K7;y&P9GWVNi>RhwskMvhpMhL-*xkKa^ zs%fR#0^0SN?_LDkprIr_qrt_+&FV^L)8Wu6c!7m%5~k(mz8_y^2;A)0-z IFlXE z{yLxa$U$1_2@n%CUYuaL%2=-&q#h`tTmjdNU){LTTKm>YR$l%QH#a5;Q$%Y6AaOvp znFclx&1Axaqh9mNlTBPmhEfd7d2d)~rrw zIKq17RN)KDfl&w!Ge190b|*gYoEEaPB{J;we(n5p^Y93Ea5Mmt$?no8rc>he-|dpH z1N{aB139PR2eP6#<{G`_TGGr+C*lX^ogzX+{3^hGqi z8_$|v*GCnMR7YhL%xae|EzdWHD`|E@Afn3Nw5+Vkc3tYsI0Ul_p4W>Ka>560^{T!+~-XY=l z9JV4jj>hU-^T>)SS7Y*|xrixXcpy3f&(>=Lt^bgdBvEYD<#@3qpm2Z;>@Zd!Vw=LK z;BTKX#BAp4jsEiGeaW;ymPK!kGpG7Jr#WPgmfy2^WKhkP`%f#d^!DZ}nND{mNq8L( z#Q+V+((?@o8UXG?hm(sP>6p%601aHjpWg76fOIo$%FD`H&p362y^Kyt2?G5M1_bOI z?Z1*ygH=DH!mfV-;Z3@1J{$6q74BQ1vt?svHJaE8%BJ6SU3Px9akEzhNaG$et^jQ+ z9*H*0rPgl|5vdI!8-*RN@Sj&?`l9P*rnvTKC=v7W8G++mBOyHH{|FJK9xxjA-u*Xk zqkHg_T`3Y>L0QB&zim{~S8!=Mn8D=2NhzQL#1BGAgy%u1BHJ5A3Ga$Iq!Kx1)>GbK z-L2m|aqWz8gBtV1?wLx~){C8(?d>VLPc3p*E-67sD@u@696%>iIP3!j>%H$$`UeY;k$m`|#5(_6Nmcvo zEx<9jd{-%D{n(kc2E!&)?;119=;$7kr9XqFsMo5(c{P2?QnN>umah4T!>|fQiEpR{URqpqcdHRGjgB+}oHc}y zihzPb2OS0))RqBwj9fLcpIN&~5c3l2Wlg*?ljY!e4L+>_qAZ*%aX`=T3X=f1=Q(vdS`)MNB5~8 zHX>ET$WB~DNVurIqezDnomzSAPhfmPne;60Y$faLvcA+Wa~%oZcPAH?$lS2Ev0>o}Zjcts+ovCF?-}inv zH=MBt;vXaPp!2+ajaN?|Rj-;_3cvMytL6=qsQb3Ed{ojMK&iF1wLPjRmmCdMw5B5s z4B39%-kz^of0v-xU=Q4QmGq;Mtdy1y7D#hbht={xTX#i!Xn9z4z}gzH~ed zntBHyI_^zlY6q7LCzz~(&r+N~x-~pDCCd+naWC=%*0s-2g^W($nT5EinVHM}s0%P< zcDbNbrl#V|$m|)y_tmt(vJFGEU`W0&rDK2cglBesxVC?!rio)qTDmoFNmWH9C?|OP z1h&q5)zdfKswaCe0uVd736U`!-jEP*^VoyDtB|G(w=3@H zE()`|Cy=#a^Fjw5+d};(4gXLHus|EY@cBL+VIIc5Ja@Y#R!ie1URPiuN2lw*leddXS(Z_zV7 z;_MyI&!uKv4*InUy(wd}3Tvgg|x+yF}19m`g`!2dWTs%<~kZ()BK58^-XXHe+Ua6dN#BFn2uX|h_dum%9gdV6|Ord^-h zk_kN_Q<9P}fRn4Zbe3f8FUP#I(ua?@8RJE=+L|aFB@TmX$xa(aw-t;trCC%a2~AnVc9-=9-$4sRoIlWjH3_wrEbJoZjbnloaaU>Hkjen$ zQSfVh%)cp#Z-BlF<4E-dydTSp5w~>R7a-~HHs_Xml_$X)(~SA&V7aDRCS_Pe7KF9j zjhcsc!`djS;;4*qN4v%8N=`J1_!6;yJdDbN*;kM9Npo4BiU%b3d^~6@J#*ek?<;AE zR?V0A`pQWuXCE~uhj@4h7N#J2?E!jw5t#a*=cb(?%vVbx&3Zz}J#u?!bk5uY&M*@x z$Gq4x+jK9EQ!gB}^rySD%H?Q@7G$2=oa2<5=Wqe~c)uYJDb0C)zw1j|f$j8FZ;5iN z9go9(==;tHgu0i4tMKy}#{(b~d*jm?U)R@L?<}b$@8bc2s&xO#6%dWUz%b2Y@7CSH zaI@$}Hckj_Obuv{*J-{D3JzWj3ejP;eH^W*jx=PLX>`9DX-Dw_z>Q#})6RBeY#RPg z*sIxUrQ{oTYgg}ch3K>U)I4MS$I&9SsjJ^V+{--p70VWH{U6sbuzF;M=sJcf6^t*r zt!zV@H@+F00h%qZ(NC0e&U=R(WEh&wW2FKjU ztFzv}%rCy?FYvScWa&PtFe%WfC01tT>%+y}3&TwM^vNF_j%?#}DPXGyYOTNba*fY; z1wCWac!l)EH4qW$I0nNKv>Y6S;$Fte_fC!-(Cf474#1Z9Ojpt#)%-{&>YetJ zY18#TdgV1W?_}xD5ix1R3;+uPfl@Ov%L_p6U~Z1_7&(T=eDlCerBzc zUy3kXdM|?>U^iD61afnYj-Wt5C3qb|mYA+FAsu!JqcZTTgc1mEOyGc9pNPn!=-5dbggU69hoctGkG%xDmFkm*hNo zH#i`heeK|YmA(ykS=;xDeamDOS66py8~bkI%zI!6kvh#a5}#_zAMmeqUNFx#K-bDl z?myN-Q58mI%sJY9{*qG6Obw1j2AbM#?&#|JF|;@GHrRA<5Ps|SZ5HnfLVuv^&>SFO zLGKdc_^QG{CuFd;l?aTj5F%(eI2G{ABK)1!`QOTeaw&I2fo98XlcwWRgh??Ia>pQZ zj~UrE*fJ9+B|Prdzdgc+=<4gz2?;4JqlBl>Qbjho@3{7`N@~hsnZvsRM^!<%mWSW(xPJKtyt9x{?>V2B8m#-?jT4wRG|EvlO%oY~At{|AFAc z{{B*C%@q+q8j|G=;Dq_<-ClR7&;3_qik20iDFE7{a`Tv7$-0kpQ2aQv_q$SO<4B49 z+6!%74VuUIsnuQ#2vqngo*ZvcgV_~4xo}&AT+9R_c%Srie@UqYMa?-Ls#`bW-bdV> zwD%PZSJADmTa=VRM^^r~R3q)R9Q40iF9)Za&xR~^xk1o)A^7&16p6!OOuk$*J2;IKhU;BQ7rDc#hPrI=noL?)WCZ{XevY z+69r>T{pKNeMw^2U`X1GZA>fVL&#Ogv`+G=4P!_!uS!z2{6W<+~Hn+AaET-jW z8oi+YzH6chiL?xSWR-rYAvxhXODn(Zz?=m;`IEFzMAPj|T`r=_i>H*JpBvvWDOZ2^ zP|SnU!^7j${n+=rV8g*Iv35XD+bjp}5Cv(5%nobRnENqMrHTe*= zZ|KlYRJiiYA;)$y%NeD)Ty_V=!7R(`JvxaAT?3PmxTDp%lK902>AlP+v!kP0Xr~XI zMRXnt-66p101YO@{Np=tO-t{YGE|H^vF-o)8R$x(y43zN+Z$~8eCS~LVqU@TezIeK~s;f0Bpr>RC5m<1x2^YRk%-k4^geI zpSi9dy0D*{ah#t(!5~@K!tvI|sn-g92iG>pqyVbf^2-2KZr?!%)-FLq4NMxeSsX@4 z$Xe}G{I#0W^Rar~>*#C@=knSvCs%S>o$VPxFzUx|ub+MD1H3>EBk`!%UVeZy67V}2 zMK$lt+!qz#T6a&Z2b@|}8faVid3ZG4%+X!K-DNhD#T!ItJ{J~tue|oXFbbn}1qA)g;X);*J@M)bF+dWz zfN9DIo4Xy&^`lx_M_$UdthuT;Fz<`wP_+{znz%=#UR3tL6<;q9H=UoF{#%g}&U78u z{(FZD9B^;WT1Rtks$3Q7sGlTOrc3ouMHQ76*5({f_Ip0(>i_8&hjgNQo^~sxDT_QS zC26x<|KkcGqt54}YnnCfJzj59@lv1>v($K%x-^NoGD zLSSkEJ#3%ds?8@c(Ktl-N|~g-K6q%$%|F4W2=w{45zo(w*)hqT-K<()-%HIKTi&%) zy&9&cVNv4*BOYg!!&HEJ03l(yMq67H)PM))5{z4x+YKBKOj&7f zfXOTU8td$Ry%%q|y$?yt{E=0~Jxd^1F}-fgS)HjqvdTHAvs6jOQ&Urm9oPo^0goSe zfqaTrdY&X5?gzsw5a32vH$fy!Lw?d8g(@Nl-3t9nW# zT$oM#z@QGdSLYI}c^*}*pKBj~Cs|a_A+hdJz(Th~_*ulKkw*|5g6*86G2;-+uqCDX zLlX#A8N1oK$L3s!iHy8oG+}?HKiqn&mJDowcZi63Kg021d{5h~xVWbLo;$K%g1}&b z!khWBPK7VZy?QsQuKZigi!*kXy#wP7Q}&O)&jE7!z@o=D9Od*%iq%~fT-=NPH))*S z__^(rrFz=?`>cWSoW|y-zJM3YUo#TvbpnNqI)MDw)B-R;AP~SF@qh#_plyv8AnND7G4`;n%ESF(r zvALaH=;hhnNA$AQshl7{00tD7y{v@j(9)60`8V)@^EsnXjBY_Dw|z4 z_eE^1WCSu>e{Q-`%mTJX%v-=9q-GYB% zwA^y_s&QO5m|fEUy#2rZFem;0g{l4Lnd<+B2*3Zog^8)z(#LQ=5K0Mt`yVf=+Nx14 zut$bDQjnd|5WZkir1y=+0KbOA3kq!BhxwL`)L2HUI(}mp|1b9x`sdTLTME!mIruNH zFJ}MK2}OU@GG)qty|WhjzX;(l_5S-U8qt5K5&7fi|8ldh|0i#G31hzh{cho;#1sU) Nl;l)pOQl~2{5PVGW#j+= literal 0 HcmV?d00001 From 4cb56482ae0f866c1bc688d14bf45057e329cda5 Mon Sep 17 00:00:00 2001 From: Jose Celano Date: Mon, 16 Jun 2025 11:55:48 +0100 Subject: [PATCH 720/802] fix: [#1579] clippy errors --- .../src/console/clients/checker/checks/udp.rs | 8 +++---- .../src/console/clients/udp/checker.rs | 2 +- .../src/v1/handlers/announce.rs | 10 ++++----- .../src/v1/handlers/scrape.rs | 10 ++++----- .../tests/server/v1/contract.rs | 2 +- .../src/v2_0_0/health_check_api.rs | 2 +- .../configuration/src/v2_0_0/http_tracker.rs | 2 +- packages/configuration/src/v2_0_0/mod.rs | 5 +---- packages/configuration/src/v2_0_0/network.rs | 2 +- .../configuration/src/v2_0_0/tracker_api.rs | 2 +- .../configuration/src/v2_0_0/udp_tracker.rs | 2 +- .../http-tracker-core/benches/helpers/sync.rs | 2 +- packages/http-tracker-core/src/event.rs | 6 ++--- .../src/services/announce.rs | 16 +++++++------- .../http-tracker-core/src/services/scrape.rs | 22 +++++++++---------- .../src/statistics/event/handler.rs | 8 +++---- packages/primitives/src/peer.rs | 6 ++--- packages/primitives/src/service_binding.rs | 2 +- .../src/swarm/coordinator.rs | 8 +++---- packages/test-helpers/src/configuration.rs | 10 ++++----- .../benches/helpers/utils.rs | 2 +- .../src/entry/peer_list.rs | 2 +- .../tests/entry/mod.rs | 2 +- .../udp-tracker-core/benches/helpers/utils.rs | 2 +- packages/udp-tracker-core/src/services/mod.rs | 4 ++-- .../src/handlers/announce.rs | 6 ++--- .../udp-tracker-server/src/handlers/mod.rs | 4 ++-- .../tests/server/contract.rs | 2 +- 28 files changed, 73 insertions(+), 78 deletions(-) diff --git a/console/tracker-client/src/console/clients/checker/checks/udp.rs b/console/tracker-client/src/console/clients/checker/checks/udp.rs index b4edb2e2c..20394d55a 100644 --- a/console/tracker-client/src/console/clients/checker/checks/udp.rs +++ b/console/tracker-client/src/console/clients/checker/checks/udp.rs @@ -117,8 +117,8 @@ mod tests { let socket_addr = resolve_socket_addr(&Url::parse("udp://localhost:8080").unwrap()); assert!( - socket_addr == SocketAddr::new(IpAddr::V4(Ipv4Addr::new(127, 0, 0, 1)), 8080) - || socket_addr == SocketAddr::new(IpAddr::V6(Ipv6Addr::new(0, 0, 0, 0, 0, 0, 0, 1)), 8080) + socket_addr == SocketAddr::new(IpAddr::V4(Ipv4Addr::LOCALHOST), 8080) + || socket_addr == SocketAddr::new(IpAddr::V6(Ipv6Addr::LOCALHOST), 8080) ); } @@ -127,8 +127,8 @@ mod tests { let socket_addr = resolve_socket_addr(&Url::parse("udp://localhost:8080").unwrap()); assert!( - socket_addr == SocketAddr::new(IpAddr::V4(Ipv4Addr::new(127, 0, 0, 1)), 8080) - || socket_addr == SocketAddr::new(IpAddr::V6(Ipv6Addr::new(0, 0, 0, 0, 0, 0, 0, 1)), 8080) + socket_addr == SocketAddr::new(IpAddr::V4(Ipv4Addr::LOCALHOST), 8080) + || socket_addr == SocketAddr::new(IpAddr::V6(Ipv6Addr::LOCALHOST), 8080) ); } } diff --git a/console/tracker-client/src/console/clients/udp/checker.rs b/console/tracker-client/src/console/clients/udp/checker.rs index bf6b49782..ded5c107e 100644 --- a/console/tracker-client/src/console/clients/udp/checker.rs +++ b/console/tracker-client/src/console/clients/udp/checker.rs @@ -116,7 +116,7 @@ impl Client { bytes_uploaded: NumberOfBytes(0i64.into()), bytes_left: NumberOfBytes(0i64.into()), event: AnnounceEvent::Started.into(), - ip_address: Ipv4Addr::new(0, 0, 0, 0).into(), + ip_address: Ipv4Addr::UNSPECIFIED.into(), key: PeerKey::new(0i32), peers_wanted: NumberOfPeers(1i32.into()), port: Port::new(port), diff --git a/packages/axum-http-tracker-server/src/v1/handlers/announce.rs b/packages/axum-http-tracker-server/src/v1/handlers/announce.rs index 16ff83f81..e21a485cf 100644 --- a/packages/axum-http-tracker-server/src/v1/handlers/announce.rs +++ b/packages/axum-http-tracker-server/src/v1/handlers/announce.rs @@ -234,7 +234,7 @@ mod tests { async fn it_should_fail_when_the_authentication_key_is_missing() { let http_core_tracker_services = initialize_private_tracker(); - let server_socket_addr = SocketAddr::new(IpAddr::V4(Ipv4Addr::new(127, 0, 0, 1)), 7070); + let server_socket_addr = SocketAddr::new(IpAddr::V4(Ipv4Addr::LOCALHOST), 7070); let server_service_binding = ServiceBinding::new(Protocol::HTTP, server_socket_addr).unwrap(); let maybe_key = None; @@ -265,7 +265,7 @@ mod tests { let unregistered_key = authentication::Key::from_str("YZSl4lMZupRuOpSRC3krIKR5BPB14nrJ").unwrap(); - let server_socket_addr = SocketAddr::new(IpAddr::V4(Ipv4Addr::new(127, 0, 0, 1)), 7070); + let server_socket_addr = SocketAddr::new(IpAddr::V4(Ipv4Addr::LOCALHOST), 7070); let server_service_binding = ServiceBinding::new(Protocol::HTTP, server_socket_addr).unwrap(); let maybe_key = Some(unregistered_key); @@ -308,7 +308,7 @@ mod tests { let announce_request = sample_announce_request(); - let server_socket_addr = SocketAddr::new(IpAddr::V4(Ipv4Addr::new(127, 0, 0, 1)), 7070); + let server_socket_addr = SocketAddr::new(IpAddr::V4(Ipv4Addr::LOCALHOST), 7070); let server_service_binding = ServiceBinding::new(Protocol::HTTP, server_socket_addr).unwrap(); let response = handle_announce( @@ -356,7 +356,7 @@ mod tests { connection_info_socket_address: None, }; - let server_socket_addr = SocketAddr::new(IpAddr::V4(Ipv4Addr::new(127, 0, 0, 1)), 7070); + let server_socket_addr = SocketAddr::new(IpAddr::V4(Ipv4Addr::LOCALHOST), 7070); let server_service_binding = ServiceBinding::new(Protocol::HTTP, server_socket_addr).unwrap(); let response = handle_announce( @@ -401,7 +401,7 @@ mod tests { connection_info_socket_address: None, }; - let server_socket_addr = SocketAddr::new(IpAddr::V4(Ipv4Addr::new(127, 0, 0, 1)), 7070); + let server_socket_addr = SocketAddr::new(IpAddr::V4(Ipv4Addr::LOCALHOST), 7070); let server_service_binding = ServiceBinding::new(Protocol::HTTP, server_socket_addr).unwrap(); let response = handle_announce( diff --git a/packages/axum-http-tracker-server/src/v1/handlers/scrape.rs b/packages/axum-http-tracker-server/src/v1/handlers/scrape.rs index 8decfe95c..b48d6e036 100644 --- a/packages/axum-http-tracker-server/src/v1/handlers/scrape.rs +++ b/packages/axum-http-tracker-server/src/v1/handlers/scrape.rs @@ -192,7 +192,7 @@ mod tests { #[tokio::test] async fn it_should_return_zeroed_swarm_metadata_when_the_authentication_key_is_missing() { - let server_socket_addr = SocketAddr::new(IpAddr::V4(Ipv4Addr::new(127, 0, 0, 1)), 7070); + let server_socket_addr = SocketAddr::new(IpAddr::V4(Ipv4Addr::LOCALHOST), 7070); let server_service_binding = ServiceBinding::new(Protocol::HTTP, server_socket_addr).unwrap(); let (core_tracker_services, core_http_tracker_services) = initialize_private_tracker(); @@ -224,7 +224,7 @@ mod tests { #[tokio::test] async fn it_should_return_zeroed_swarm_metadata_when_the_authentication_key_is_invalid() { - let server_socket_addr = SocketAddr::new(IpAddr::V4(Ipv4Addr::new(127, 0, 0, 1)), 7070); + let server_socket_addr = SocketAddr::new(IpAddr::V4(Ipv4Addr::LOCALHOST), 7070); let server_service_binding = ServiceBinding::new(Protocol::HTTP, server_socket_addr).unwrap(); let (core_tracker_services, core_http_tracker_services) = initialize_private_tracker(); @@ -272,7 +272,7 @@ mod tests { let scrape_request = sample_scrape_request(); - let server_socket_addr = SocketAddr::new(IpAddr::V4(Ipv4Addr::new(127, 0, 0, 1)), 7070); + let server_socket_addr = SocketAddr::new(IpAddr::V4(Ipv4Addr::LOCALHOST), 7070); let server_service_binding = ServiceBinding::new(Protocol::HTTP, server_socket_addr).unwrap(); let scrape_service = ScrapeService::new( @@ -314,7 +314,7 @@ mod tests { connection_info_socket_address: None, }; - let server_socket_addr = SocketAddr::new(IpAddr::V4(Ipv4Addr::new(127, 0, 0, 1)), 7070); + let server_socket_addr = SocketAddr::new(IpAddr::V4(Ipv4Addr::LOCALHOST), 7070); let server_service_binding = ServiceBinding::new(Protocol::HTTP, server_socket_addr).unwrap(); let scrape_service = ScrapeService::new( @@ -361,7 +361,7 @@ mod tests { connection_info_socket_address: None, }; - let server_socket_addr = SocketAddr::new(IpAddr::V4(Ipv4Addr::new(127, 0, 0, 1)), 7070); + let server_socket_addr = SocketAddr::new(IpAddr::V4(Ipv4Addr::LOCALHOST), 7070); let server_service_binding = ServiceBinding::new(Protocol::HTTP, server_socket_addr).unwrap(); let scrape_service = ScrapeService::new( diff --git a/packages/axum-http-tracker-server/tests/server/v1/contract.rs b/packages/axum-http-tracker-server/tests/server/v1/contract.rs index d9ac2e1e1..dd80e6b59 100644 --- a/packages/axum-http-tracker-server/tests/server/v1/contract.rs +++ b/packages/axum-http-tracker-server/tests/server/v1/contract.rs @@ -748,7 +748,7 @@ mod for_all_config_modes { Client::new(*env.bind_address()) .announce( &QueryBuilder::default() - .with_peer_addr(&IpAddr::V6(Ipv6Addr::new(0, 0, 0, 0, 0, 0, 0, 1))) + .with_peer_addr(&IpAddr::V6(Ipv6Addr::LOCALHOST)) .query(), ) .await; diff --git a/packages/configuration/src/v2_0_0/health_check_api.rs b/packages/configuration/src/v2_0_0/health_check_api.rs index 61178fa80..368f26c42 100644 --- a/packages/configuration/src/v2_0_0/health_check_api.rs +++ b/packages/configuration/src/v2_0_0/health_check_api.rs @@ -25,6 +25,6 @@ impl Default for HealthCheckApi { impl HealthCheckApi { fn default_bind_address() -> SocketAddr { - SocketAddr::new(IpAddr::V4(Ipv4Addr::new(127, 0, 0, 1)), 1313) + SocketAddr::new(IpAddr::V4(Ipv4Addr::LOCALHOST), 1313) } } diff --git a/packages/configuration/src/v2_0_0/http_tracker.rs b/packages/configuration/src/v2_0_0/http_tracker.rs index b3b21bda8..ae00257d8 100644 --- a/packages/configuration/src/v2_0_0/http_tracker.rs +++ b/packages/configuration/src/v2_0_0/http_tracker.rs @@ -37,7 +37,7 @@ impl Default for HttpTracker { impl HttpTracker { fn default_bind_address() -> SocketAddr { - SocketAddr::new(IpAddr::V4(Ipv4Addr::new(0, 0, 0, 0)), 7070) + SocketAddr::new(IpAddr::V4(Ipv4Addr::UNSPECIFIED), 7070) } fn default_tsl_config() -> Option { diff --git a/packages/configuration/src/v2_0_0/mod.rs b/packages/configuration/src/v2_0_0/mod.rs index fd742d8d2..8391ba0e1 100644 --- a/packages/configuration/src/v2_0_0/mod.rs +++ b/packages/configuration/src/v2_0_0/mod.rs @@ -492,10 +492,7 @@ mod tests { fn configuration_should_contain_the_external_ip() { let configuration = Configuration::default(); - assert_eq!( - configuration.core.net.external_ip, - Some(IpAddr::V4(Ipv4Addr::new(0, 0, 0, 0))) - ); + assert_eq!(configuration.core.net.external_ip, Some(IpAddr::V4(Ipv4Addr::UNSPECIFIED))); } #[test] diff --git a/packages/configuration/src/v2_0_0/network.rs b/packages/configuration/src/v2_0_0/network.rs index 8e53d419c..7a4668727 100644 --- a/packages/configuration/src/v2_0_0/network.rs +++ b/packages/configuration/src/v2_0_0/network.rs @@ -32,7 +32,7 @@ impl Default for Network { impl Network { #[allow(clippy::unnecessary_wraps)] fn default_external_ip() -> Option { - Some(IpAddr::V4(Ipv4Addr::new(0, 0, 0, 0))) + Some(IpAddr::V4(Ipv4Addr::UNSPECIFIED)) } fn default_on_reverse_proxy() -> bool { diff --git a/packages/configuration/src/v2_0_0/tracker_api.rs b/packages/configuration/src/v2_0_0/tracker_api.rs index 2da21758b..9433c8c8c 100644 --- a/packages/configuration/src/v2_0_0/tracker_api.rs +++ b/packages/configuration/src/v2_0_0/tracker_api.rs @@ -43,7 +43,7 @@ impl Default for HttpApi { impl HttpApi { fn default_bind_address() -> SocketAddr { - SocketAddr::new(IpAddr::V4(Ipv4Addr::new(127, 0, 0, 1)), 1212) + SocketAddr::new(IpAddr::V4(Ipv4Addr::LOCALHOST), 1212) } #[allow(clippy::unnecessary_wraps)] diff --git a/packages/configuration/src/v2_0_0/udp_tracker.rs b/packages/configuration/src/v2_0_0/udp_tracker.rs index 9918bc1fa..133018e86 100644 --- a/packages/configuration/src/v2_0_0/udp_tracker.rs +++ b/packages/configuration/src/v2_0_0/udp_tracker.rs @@ -33,7 +33,7 @@ impl Default for UdpTracker { impl UdpTracker { fn default_bind_address() -> SocketAddr { - SocketAddr::new(IpAddr::V4(Ipv4Addr::new(0, 0, 0, 0)), 6969) + SocketAddr::new(IpAddr::V4(Ipv4Addr::UNSPECIFIED), 6969) } fn default_cookie_lifetime() -> Duration { diff --git a/packages/http-tracker-core/benches/helpers/sync.rs b/packages/http-tracker-core/benches/helpers/sync.rs index e0f022108..dbf0dac83 100644 --- a/packages/http-tracker-core/benches/helpers/sync.rs +++ b/packages/http-tracker-core/benches/helpers/sync.rs @@ -22,7 +22,7 @@ pub async fn return_announce_data_once(samples: u64) -> Duration { core_http_tracker_services.http_stats_event_sender.clone(), ); - let server_socket_addr = SocketAddr::new(IpAddr::V4(Ipv4Addr::new(127, 0, 0, 1)), 7070); + let server_socket_addr = SocketAddr::new(IpAddr::V4(Ipv4Addr::LOCALHOST), 7070); let server_service_binding = ServiceBinding::new(Protocol::HTTP, server_socket_addr).unwrap(); let start = Instant::now(); diff --git a/packages/http-tracker-core/src/event.rs b/packages/http-tracker-core/src/event.rs index 5af88c927..2a4734bfd 100644 --- a/packages/http-tracker-core/src/event.rs +++ b/packages/http-tracker-core/src/event.rs @@ -174,13 +174,13 @@ pub mod test { use crate::event::{ConnectionContext, Event}; - let remote_client_ip = IpAddr::V4(Ipv4Addr::new(127, 0, 0, 1)); + let remote_client_ip = IpAddr::V4(Ipv4Addr::LOCALHOST); let info_hash = sample_info_hash(); let event1 = Event::TcpAnnounce { connection: ConnectionContext::new( RemoteClientAddr::new(ResolvedIp::FromSocketAddr(remote_client_ip), Some(8080)), - ServiceBinding::new(Protocol::HTTP, SocketAddr::new(IpAddr::V4(Ipv4Addr::new(127, 0, 0, 1)), 7070)).unwrap(), + ServiceBinding::new(Protocol::HTTP, SocketAddr::new(IpAddr::V4(Ipv4Addr::LOCALHOST), 7070)).unwrap(), ), info_hash, announcement: Peer::default(), @@ -192,7 +192,7 @@ pub mod test { ResolvedIp::FromSocketAddr(IpAddr::V4(Ipv4Addr::new(127, 0, 0, 2))), Some(8080), ), - ServiceBinding::new(Protocol::HTTP, SocketAddr::new(IpAddr::V4(Ipv4Addr::new(127, 0, 0, 1)), 7070)).unwrap(), + ServiceBinding::new(Protocol::HTTP, SocketAddr::new(IpAddr::V4(Ipv4Addr::LOCALHOST), 7070)).unwrap(), ), info_hash, announcement: Peer::default(), diff --git a/packages/http-tracker-core/src/services/announce.rs b/packages/http-tracker-core/src/services/announce.rs index 23d589bce..8d12da713 100644 --- a/packages/http-tracker-core/src/services/announce.rs +++ b/packages/http-tracker-core/src/services/announce.rs @@ -349,7 +349,7 @@ mod tests { let (announce_request, client_ip_sources) = sample_announce_request_for_peer(peer); - let server_socket_addr = SocketAddr::new(IpAddr::V4(Ipv4Addr::new(127, 0, 0, 1)), 7070); + let server_socket_addr = SocketAddr::new(IpAddr::V4(Ipv4Addr::LOCALHOST), 7070); let server_service_binding = ServiceBinding::new(Protocol::HTTP, server_socket_addr).unwrap(); let announce_service = AnnounceService::new( @@ -380,7 +380,7 @@ mod tests { #[tokio::test] async fn it_should_send_the_tcp_4_announce_event_when_the_peer_uses_ipv4() { - let server_socket_addr = SocketAddr::new(IpAddr::V4(Ipv4Addr::new(127, 0, 0, 1)), 7070); + let server_socket_addr = SocketAddr::new(IpAddr::V4(Ipv4Addr::LOCALHOST), 7070); let server_service_binding = ServiceBinding::new(Protocol::HTTP, server_socket_addr).unwrap(); let peer = sample_peer_using_ipv4(); let remote_client_ip = IpAddr::V4(Ipv4Addr::new(126, 0, 0, 1)); @@ -442,7 +442,7 @@ mod tests { } fn peer_with_the_ipv4_loopback_ip() -> peer::Peer { - let loopback_ip = IpAddr::V4(Ipv4Addr::new(127, 0, 0, 1)); + let loopback_ip = IpAddr::V4(Ipv4Addr::LOCALHOST); let mut peer = sample_peer(); peer.peer_addr = SocketAddr::new(loopback_ip, 8080); peer @@ -453,10 +453,10 @@ mod tests { { // Tracker changes the peer IP to the tracker external IP when the peer is using the loopback IP. - let server_socket_addr = SocketAddr::new(IpAddr::V4(Ipv4Addr::new(127, 0, 0, 1)), 7070); + let server_socket_addr = SocketAddr::new(IpAddr::V4(Ipv4Addr::LOCALHOST), 7070); let server_service_binding = ServiceBinding::new(Protocol::HTTP, server_socket_addr).unwrap(); let peer = peer_with_the_ipv4_loopback_ip(); - let remote_client_ip = IpAddr::V4(Ipv4Addr::new(127, 0, 0, 1)); + let remote_client_ip = IpAddr::V4(Ipv4Addr::LOCALHOST); let server_service_binding_clone = server_service_binding.clone(); let peer_copy = peer; @@ -466,7 +466,7 @@ mod tests { .expect_send() .with(predicate::function(move |event| { let mut announced_peer = peer_copy; - announced_peer.peer_addr = SocketAddr::new(IpAddr::V4(Ipv4Addr::new(127, 0, 0, 1)), 8080); + announced_peer.peer_addr = SocketAddr::new(IpAddr::V4(Ipv4Addr::LOCALHOST), 8080); let mut peer_announcement = peer; peer_announcement.peer_addr = SocketAddr::new( @@ -514,7 +514,7 @@ mod tests { #[tokio::test] async fn it_should_send_the_tcp_6_announce_event_when_the_peer_uses_ipv6_even_if_the_tracker_changes_the_peer_ip_to_ipv4() { - let server_socket_addr = SocketAddr::new(IpAddr::V4(Ipv4Addr::new(127, 0, 0, 1)), 7070); + let server_socket_addr = SocketAddr::new(IpAddr::V4(Ipv4Addr::LOCALHOST), 7070); let server_service_binding = ServiceBinding::new(Protocol::HTTP, server_socket_addr).unwrap(); let peer = sample_peer_using_ipv6(); let remote_client_ip = IpAddr::V6(Ipv6Addr::new(0x6969, 0x6969, 0x6969, 0x6969, 0x6969, 0x6969, 0x6969, 0x6969)); @@ -550,7 +550,7 @@ mod tests { core_http_tracker_services.http_stats_event_sender.clone(), ); - let server_socket_addr = SocketAddr::new(IpAddr::V4(Ipv4Addr::new(127, 0, 0, 1)), 7070); + let server_socket_addr = SocketAddr::new(IpAddr::V4(Ipv4Addr::LOCALHOST), 7070); let server_service_binding = ServiceBinding::new(Protocol::HTTP, server_socket_addr).unwrap(); let _announce_data = announce_service diff --git a/packages/http-tracker-core/src/services/scrape.rs b/packages/http-tracker-core/src/services/scrape.rs index 1445ffcfe..4587bc90a 100644 --- a/packages/http-tracker-core/src/services/scrape.rs +++ b/packages/http-tracker-core/src/services/scrape.rs @@ -304,7 +304,7 @@ mod tests { connection_info_socket_address: Some(SocketAddr::new(original_peer_ip, 8080)), }; - let server_socket_addr = SocketAddr::new(IpAddr::V4(Ipv4Addr::new(127, 0, 0, 1)), 7070); + let server_socket_addr = SocketAddr::new(IpAddr::V4(Ipv4Addr::LOCALHOST), 7070); let server_service_binding = ServiceBinding::new(Protocol::HTTP, server_socket_addr).unwrap(); let scrape_service = Arc::new(ScrapeService::new( @@ -345,8 +345,7 @@ mod tests { ResolvedIp::FromSocketAddr(IpAddr::V4(Ipv4Addr::new(126, 0, 0, 1))), Some(8080), ), - ServiceBinding::new(Protocol::HTTP, SocketAddr::new(IpAddr::V4(Ipv4Addr::new(127, 0, 0, 1)), 7070)) - .unwrap(), + ServiceBinding::new(Protocol::HTTP, SocketAddr::new(IpAddr::V4(Ipv4Addr::LOCALHOST), 7070)).unwrap(), ), })) .times(1) @@ -366,7 +365,7 @@ mod tests { connection_info_socket_address: Some(SocketAddr::new(peer_ip, 8080)), }; - let server_socket_addr = SocketAddr::new(IpAddr::V4(Ipv4Addr::new(127, 0, 0, 1)), 7070); + let server_socket_addr = SocketAddr::new(IpAddr::V4(Ipv4Addr::LOCALHOST), 7070); let server_service_binding = ServiceBinding::new(Protocol::HTTP, server_socket_addr).unwrap(); let scrape_service = Arc::new(ScrapeService::new( @@ -384,7 +383,7 @@ mod tests { #[tokio::test] async fn it_should_send_the_tcp_6_scrape_event_when_the_peer_uses_ipv6() { - let server_socket_addr = SocketAddr::new(IpAddr::V4(Ipv4Addr::new(127, 0, 0, 1)), 7070); + let server_socket_addr = SocketAddr::new(IpAddr::V4(Ipv4Addr::LOCALHOST), 7070); let server_service_binding = ServiceBinding::new(Protocol::HTTP, server_socket_addr).unwrap(); let config = configuration::ephemeral(); @@ -420,7 +419,7 @@ mod tests { connection_info_socket_address: Some(SocketAddr::new(peer_ip, 8080)), }; - let server_socket_addr = SocketAddr::new(IpAddr::V4(Ipv4Addr::new(127, 0, 0, 1)), 7070); + let server_socket_addr = SocketAddr::new(IpAddr::V4(Ipv4Addr::LOCALHOST), 7070); let server_service_binding = ServiceBinding::new(Protocol::HTTP, server_socket_addr).unwrap(); let scrape_service = Arc::new(ScrapeService::new( @@ -495,7 +494,7 @@ mod tests { connection_info_socket_address: Some(SocketAddr::new(original_peer_ip, 8080)), }; - let server_socket_addr = SocketAddr::new(IpAddr::V4(Ipv4Addr::new(127, 0, 0, 1)), 7070); + let server_socket_addr = SocketAddr::new(IpAddr::V4(Ipv4Addr::LOCALHOST), 7070); let server_service_binding = ServiceBinding::new(Protocol::HTTP, server_socket_addr).unwrap(); let scrape_service = Arc::new(ScrapeService::new( @@ -530,8 +529,7 @@ mod tests { ResolvedIp::FromSocketAddr(IpAddr::V4(Ipv4Addr::new(126, 0, 0, 1))), Some(8080), ), - ServiceBinding::new(Protocol::HTTP, SocketAddr::new(IpAddr::V4(Ipv4Addr::new(127, 0, 0, 1)), 7070)) - .unwrap(), + ServiceBinding::new(Protocol::HTTP, SocketAddr::new(IpAddr::V4(Ipv4Addr::LOCALHOST), 7070)).unwrap(), ), })) .times(1) @@ -549,7 +547,7 @@ mod tests { connection_info_socket_address: Some(SocketAddr::new(peer_ip, 8080)), }; - let server_socket_addr = SocketAddr::new(IpAddr::V4(Ipv4Addr::new(127, 0, 0, 1)), 7070); + let server_socket_addr = SocketAddr::new(IpAddr::V4(Ipv4Addr::LOCALHOST), 7070); let server_service_binding = ServiceBinding::new(Protocol::HTTP, server_socket_addr).unwrap(); let scrape_service = Arc::new(ScrapeService::new( @@ -567,7 +565,7 @@ mod tests { #[tokio::test] async fn it_should_send_the_tcp_6_scrape_event_when_the_peer_uses_ipv6() { - let server_socket_addr = SocketAddr::new(IpAddr::V4(Ipv4Addr::new(127, 0, 0, 1)), 7070); + let server_socket_addr = SocketAddr::new(IpAddr::V4(Ipv4Addr::LOCALHOST), 7070); let server_service_binding = ServiceBinding::new(Protocol::HTTP, server_socket_addr).unwrap(); let config = configuration::ephemeral(); @@ -603,7 +601,7 @@ mod tests { connection_info_socket_address: Some(SocketAddr::new(peer_ip, 8080)), }; - let server_socket_addr = SocketAddr::new(IpAddr::V4(Ipv4Addr::new(127, 0, 0, 1)), 7070); + let server_socket_addr = SocketAddr::new(IpAddr::V4(Ipv4Addr::LOCALHOST), 7070); let server_service_binding = ServiceBinding::new(Protocol::HTTP, server_socket_addr).unwrap(); let scrape_service = Arc::new(ScrapeService::new( diff --git a/packages/http-tracker-core/src/statistics/event/handler.rs b/packages/http-tracker-core/src/statistics/event/handler.rs index dcb814eef..78ef24e02 100644 --- a/packages/http-tracker-core/src/statistics/event/handler.rs +++ b/packages/http-tracker-core/src/statistics/event/handler.rs @@ -101,7 +101,7 @@ mod tests { Event::TcpAnnounce { connection: ConnectionContext::new( RemoteClientAddr::new(ResolvedIp::FromSocketAddr(remote_client_ip), Some(8080)), - ServiceBinding::new(Protocol::HTTP, SocketAddr::new(IpAddr::V4(Ipv4Addr::new(127, 0, 0, 1)), 7070)).unwrap(), + ServiceBinding::new(Protocol::HTTP, SocketAddr::new(IpAddr::V4(Ipv4Addr::LOCALHOST), 7070)).unwrap(), ), info_hash: sample_info_hash(), announcement: peer, @@ -127,7 +127,7 @@ mod tests { ResolvedIp::FromSocketAddr(IpAddr::V4(Ipv4Addr::new(127, 0, 0, 2))), Some(8080), ), - ServiceBinding::new(Protocol::HTTP, SocketAddr::new(IpAddr::V4(Ipv4Addr::new(127, 0, 0, 1)), 7070)).unwrap(), + ServiceBinding::new(Protocol::HTTP, SocketAddr::new(IpAddr::V4(Ipv4Addr::LOCALHOST), 7070)).unwrap(), ), }, &stats_repository, @@ -150,7 +150,7 @@ mod tests { Event::TcpAnnounce { connection: ConnectionContext::new( RemoteClientAddr::new(ResolvedIp::FromSocketAddr(remote_client_ip), Some(8080)), - ServiceBinding::new(Protocol::HTTP, SocketAddr::new(IpAddr::V4(Ipv4Addr::new(127, 0, 0, 1)), 7070)).unwrap(), + ServiceBinding::new(Protocol::HTTP, SocketAddr::new(IpAddr::V4(Ipv4Addr::LOCALHOST), 7070)).unwrap(), ), info_hash: sample_info_hash(), announcement: peer, @@ -178,7 +178,7 @@ mod tests { ))), Some(8080), ), - ServiceBinding::new(Protocol::HTTP, SocketAddr::new(IpAddr::V4(Ipv4Addr::new(127, 0, 0, 1)), 7070)).unwrap(), + ServiceBinding::new(Protocol::HTTP, SocketAddr::new(IpAddr::V4(Ipv4Addr::LOCALHOST), 7070)).unwrap(), ), }, &stats_repository, diff --git a/packages/primitives/src/peer.rs b/packages/primitives/src/peer.rs index c271ee5d6..ef47f28f8 100644 --- a/packages/primitives/src/peer.rs +++ b/packages/primitives/src/peer.rs @@ -194,7 +194,7 @@ impl Ord for Peer { impl PartialOrd for Peer { fn partial_cmp(&self, other: &Self) -> Option { - Some(self.peer_id.cmp(&other.peer_id)) + Some(self.cmp(other)) } } @@ -517,7 +517,7 @@ pub mod fixture { pub fn seeder() -> Self { let peer = Peer { peer_id: PeerId(*b"-qB00000000000000001"), - peer_addr: SocketAddr::new(IpAddr::V4(Ipv4Addr::new(127, 0, 0, 1)), 8080), + peer_addr: SocketAddr::new(IpAddr::V4(Ipv4Addr::LOCALHOST), 8080), updated: DurationSinceUnixEpoch::new(1_669_397_478_934, 0), uploaded: NumberOfBytes::new(0), downloaded: NumberOfBytes::new(0), @@ -621,7 +621,7 @@ pub mod fixture { fn default() -> Self { Self { peer_id: PeerId(*b"-qB00000000000000000"), - peer_addr: SocketAddr::new(IpAddr::V4(Ipv4Addr::new(127, 0, 0, 1)), 8080), + peer_addr: SocketAddr::new(IpAddr::V4(Ipv4Addr::LOCALHOST), 8080), updated: DurationSinceUnixEpoch::new(1_669_397_478_934, 0), uploaded: NumberOfBytes::new(0), downloaded: NumberOfBytes::new(0), diff --git a/packages/primitives/src/service_binding.rs b/packages/primitives/src/service_binding.rs index 74ff58e66..c1ec308c8 100644 --- a/packages/primitives/src/service_binding.rs +++ b/packages/primitives/src/service_binding.rs @@ -115,7 +115,7 @@ pub enum Error { /// use std::net::{IpAddr, Ipv4Addr, SocketAddr}; /// use torrust_tracker_primitives::service_binding::{ServiceBinding, Protocol}; /// -/// let service_binding = ServiceBinding::new(Protocol::HTTP, SocketAddr::new(IpAddr::V4(Ipv4Addr::new(127, 0, 0, 1)), 7070)).unwrap(); +/// let service_binding = ServiceBinding::new(Protocol::HTTP, SocketAddr::new(IpAddr::V4(Ipv4Addr::LOCALHOST), 7070)).unwrap(); /// /// assert_eq!(service_binding.url().to_string(), "http://127.0.0.1:7070/".to_string()); /// ``` diff --git a/packages/swarm-coordination-registry/src/swarm/coordinator.rs b/packages/swarm-coordination-registry/src/swarm/coordinator.rs index 1ddf3e60b..f4e94c62c 100644 --- a/packages/swarm-coordination-registry/src/swarm/coordinator.rs +++ b/packages/swarm-coordination-registry/src/swarm/coordinator.rs @@ -438,7 +438,7 @@ mod tests { let peer1 = PeerBuilder::default() .with_peer_id(&PeerId(*b"-qB00000000000000001")) - .with_peer_addr(&SocketAddr::new(IpAddr::V4(Ipv4Addr::new(127, 0, 0, 1)), 6969)) + .with_peer_addr(&SocketAddr::new(IpAddr::V4(Ipv4Addr::LOCALHOST), 6969)) .build(); swarm.upsert_peer(peer1.into()).await; @@ -605,7 +605,7 @@ mod tests { let mut swarm = Coordinator::new(&sample_info_hash(), 0, None); let peer1 = PeerBuilder::default() - .with_peer_addr(&SocketAddr::new(IpAddr::V4(Ipv4Addr::new(127, 0, 0, 1)), 6969)) + .with_peer_addr(&SocketAddr::new(IpAddr::V4(Ipv4Addr::LOCALHOST), 6969)) .build(); swarm.upsert_peer(peer1.into()).await; @@ -626,13 +626,13 @@ mod tests { let peer1 = PeerBuilder::default() .with_peer_id(&PeerId(*b"-qB00000000000000001")) - .with_peer_addr(&SocketAddr::new(IpAddr::V4(Ipv4Addr::new(127, 0, 0, 1)), 6969)) + .with_peer_addr(&SocketAddr::new(IpAddr::V4(Ipv4Addr::LOCALHOST), 6969)) .build(); swarm.upsert_peer(peer1.into()).await; let peer2 = PeerBuilder::default() .with_peer_id(&PeerId(*b"-qB00000000000000002")) - .with_peer_addr(&SocketAddr::new(IpAddr::V4(Ipv4Addr::new(127, 0, 0, 1)), 6969)) + .with_peer_addr(&SocketAddr::new(IpAddr::V4(Ipv4Addr::LOCALHOST), 6969)) .build(); swarm.upsert_peer(peer2.into()).await; diff --git a/packages/test-helpers/src/configuration.rs b/packages/test-helpers/src/configuration.rs index 986981b1f..ffe3af3b2 100644 --- a/packages/test-helpers/src/configuration.rs +++ b/packages/test-helpers/src/configuration.rs @@ -40,7 +40,7 @@ pub fn ephemeral() -> Configuration { // Ephemeral socket address for API let api_port = 0u16; let mut http_api = HttpApi { - bind_address: SocketAddr::new(IpAddr::V4(Ipv4Addr::new(127, 0, 0, 1)), api_port), + bind_address: SocketAddr::new(IpAddr::V4(Ipv4Addr::LOCALHOST), api_port), ..Default::default() }; http_api.add_token("admin", "MyAccessToken"); @@ -48,12 +48,12 @@ pub fn ephemeral() -> Configuration { // Ephemeral socket address for Health Check API let health_check_api_port = 0u16; - config.health_check_api.bind_address = SocketAddr::new(IpAddr::V4(Ipv4Addr::new(127, 0, 0, 1)), health_check_api_port); + config.health_check_api.bind_address = SocketAddr::new(IpAddr::V4(Ipv4Addr::LOCALHOST), health_check_api_port); // Ephemeral socket address for UDP tracker let udp_port = 0u16; config.udp_trackers = Some(vec![UdpTracker { - bind_address: SocketAddr::new(IpAddr::V4(Ipv4Addr::new(127, 0, 0, 1)), udp_port), + bind_address: SocketAddr::new(IpAddr::V4(Ipv4Addr::LOCALHOST), udp_port), cookie_lifetime: Duration::from_secs(120), tracker_usage_statistics: true, }]); @@ -61,7 +61,7 @@ pub fn ephemeral() -> Configuration { // Ephemeral socket address for HTTP tracker let http_port = 0u16; config.http_trackers = Some(vec![HttpTracker { - bind_address: SocketAddr::new(IpAddr::V4(Ipv4Addr::new(127, 0, 0, 1)), http_port), + bind_address: SocketAddr::new(IpAddr::V4(Ipv4Addr::LOCALHOST), http_port), tsl_config: None, tracker_usage_statistics: true, }]); @@ -156,7 +156,7 @@ pub fn ephemeral_with_external_ip(ip: IpAddr) -> Configuration { pub fn ephemeral_ipv6() -> Configuration { let mut cfg = ephemeral(); - let ipv6 = SocketAddr::new(IpAddr::V6(Ipv6Addr::new(0, 0, 0, 0, 0, 0, 0, 0)), 0); + let ipv6 = SocketAddr::new(IpAddr::V6(Ipv6Addr::UNSPECIFIED), 0); if let Some(ref mut http_api) = cfg.http_api { http_api.bind_address.clone_from(&ipv6); diff --git a/packages/torrent-repository-benchmarking/benches/helpers/utils.rs b/packages/torrent-repository-benchmarking/benches/helpers/utils.rs index 51b09ec0f..16ba0bf7f 100644 --- a/packages/torrent-repository-benchmarking/benches/helpers/utils.rs +++ b/packages/torrent-repository-benchmarking/benches/helpers/utils.rs @@ -9,7 +9,7 @@ use zerocopy::I64; pub const DEFAULT_PEER: Peer = Peer { peer_id: PeerId([0; 20]), - peer_addr: SocketAddr::new(IpAddr::V4(Ipv4Addr::new(127, 0, 0, 1)), 8080), + peer_addr: SocketAddr::new(IpAddr::V4(Ipv4Addr::LOCALHOST), 8080), updated: DurationSinceUnixEpoch::from_secs(0), uploaded: NumberOfBytes(I64::ZERO), downloaded: NumberOfBytes(I64::ZERO), diff --git a/packages/torrent-repository-benchmarking/src/entry/peer_list.rs b/packages/torrent-repository-benchmarking/src/entry/peer_list.rs index 33270cf27..54a560994 100644 --- a/packages/torrent-repository-benchmarking/src/entry/peer_list.rs +++ b/packages/torrent-repository-benchmarking/src/entry/peer_list.rs @@ -195,7 +195,7 @@ mod tests { let peer1 = PeerBuilder::default() .with_peer_id(&PeerId(*b"-qB00000000000000001")) - .with_peer_addr(&SocketAddr::new(IpAddr::V4(Ipv4Addr::new(127, 0, 0, 1)), 6969)) + .with_peer_addr(&SocketAddr::new(IpAddr::V4(Ipv4Addr::LOCALHOST), 6969)) .build(); peer_list.upsert(peer1.into()); diff --git a/packages/torrent-repository-benchmarking/tests/entry/mod.rs b/packages/torrent-repository-benchmarking/tests/entry/mod.rs index b46c05415..5cbb3b19c 100644 --- a/packages/torrent-repository-benchmarking/tests/entry/mod.rs +++ b/packages/torrent-repository-benchmarking/tests/entry/mod.rs @@ -368,7 +368,7 @@ async fn it_should_get_peers_excluding_the_client_socket( let peers = torrent.get_peers(None).await; let mut peer = **peers.first().expect("there should be a peer"); - let socket = SocketAddr::new(IpAddr::V4(Ipv4Addr::new(127, 0, 0, 1)), 8081); + let socket = SocketAddr::new(IpAddr::V4(Ipv4Addr::LOCALHOST), 8081); // for this test, we should not already use this socket. assert_ne!(peer.peer_addr, socket); diff --git a/packages/udp-tracker-core/benches/helpers/utils.rs b/packages/udp-tracker-core/benches/helpers/utils.rs index f04805001..1423d4bcd 100644 --- a/packages/udp-tracker-core/benches/helpers/utils.rs +++ b/packages/udp-tracker-core/benches/helpers/utils.rs @@ -10,7 +10,7 @@ pub(crate) fn sample_ipv4_remote_addr() -> SocketAddr { } pub(crate) fn sample_ipv4_socket_address() -> SocketAddr { - SocketAddr::new(IpAddr::V4(Ipv4Addr::new(127, 0, 0, 1)), 8080) + SocketAddr::new(IpAddr::V4(Ipv4Addr::LOCALHOST), 8080) } pub(crate) fn sample_issue_time() -> f64 { diff --git a/packages/udp-tracker-core/src/services/mod.rs b/packages/udp-tracker-core/src/services/mod.rs index 64e357b1c..56882e68f 100644 --- a/packages/udp-tracker-core/src/services/mod.rs +++ b/packages/udp-tracker-core/src/services/mod.rs @@ -32,11 +32,11 @@ pub(crate) mod tests { } pub(crate) fn sample_ipv4_socket_address() -> SocketAddr { - SocketAddr::new(IpAddr::V4(Ipv4Addr::new(127, 0, 0, 1)), 8080) + SocketAddr::new(IpAddr::V4(Ipv4Addr::LOCALHOST), 8080) } fn sample_ipv6_socket_address() -> SocketAddr { - SocketAddr::new(IpAddr::V6(Ipv6Addr::new(0, 0, 0, 0, 0, 0, 0, 1)), 8080) + SocketAddr::new(IpAddr::V6(Ipv6Addr::LOCALHOST), 8080) } pub(crate) fn sample_issue_time() -> f64 { diff --git a/packages/udp-tracker-server/src/handlers/announce.rs b/packages/udp-tracker-server/src/handlers/announce.rs index 901a1434a..ea19611ce 100644 --- a/packages/udp-tracker-server/src/handlers/announce.rs +++ b/packages/udp-tracker-server/src/handlers/announce.rs @@ -491,7 +491,7 @@ pub(crate) mod tests { let (core_tracker_services, core_udp_tracker_services, server_udp_tracker_services) = initialize_core_tracker_services_for_public_tracker(); - let client_ip = Ipv4Addr::new(127, 0, 0, 1); + let client_ip = Ipv4Addr::LOCALHOST; let client_port = 8080; let info_hash = AquaticInfoHash([0u8; 20]); let peer_id = AquaticPeerId([255u8; 20]); @@ -869,8 +869,8 @@ pub(crate) mod tests { async fn the_peer_ip_should_be_changed_to_the_external_ip_in_the_tracker_configuration() { let config = Arc::new(TrackerConfigurationBuilder::default().with_external_ip("::126.0.0.1").into()); - let loopback_ipv4 = Ipv4Addr::new(127, 0, 0, 1); - let loopback_ipv6 = Ipv6Addr::new(0, 0, 0, 0, 0, 0, 0, 1); + let loopback_ipv4 = Ipv4Addr::LOCALHOST; + let loopback_ipv6 = Ipv6Addr::LOCALHOST; let client_ip_v4 = loopback_ipv4; let client_ip_v6 = loopback_ipv6; diff --git a/packages/udp-tracker-server/src/handlers/mod.rs b/packages/udp-tracker-server/src/handlers/mod.rs index 43c5bc4d5..add576a89 100644 --- a/packages/udp-tracker-server/src/handlers/mod.rs +++ b/packages/udp-tracker-server/src/handlers/mod.rs @@ -340,11 +340,11 @@ pub(crate) mod tests { } pub(crate) fn sample_ipv4_socket_address() -> SocketAddr { - SocketAddr::new(IpAddr::V4(Ipv4Addr::new(127, 0, 0, 1)), 8080) + SocketAddr::new(IpAddr::V4(Ipv4Addr::LOCALHOST), 8080) } fn sample_ipv6_socket_address() -> SocketAddr { - SocketAddr::new(IpAddr::V6(Ipv6Addr::new(0, 0, 0, 0, 0, 0, 0, 1)), 8080) + SocketAddr::new(IpAddr::V6(Ipv6Addr::LOCALHOST), 8080) } pub(crate) fn sample_issue_time() -> f64 { diff --git a/packages/udp-tracker-server/tests/server/contract.rs b/packages/udp-tracker-server/tests/server/contract.rs index 04ad0f39d..0d9540289 100644 --- a/packages/udp-tracker-server/tests/server/contract.rs +++ b/packages/udp-tracker-server/tests/server/contract.rs @@ -167,7 +167,7 @@ mod receiving_an_announce_request { bytes_uploaded: NumberOfBytes(0i64.into()), bytes_left: NumberOfBytes(0i64.into()), event: AnnounceEvent::Started.into(), - ip_address: Ipv4Addr::new(0, 0, 0, 0).into(), + ip_address: Ipv4Addr::UNSPECIFIED.into(), key: PeerKey::new(0i32), peers_wanted: NumberOfPeers(1i32.into()), port: Port(port.into()), From 42850f3031ea4cf0d4c99f780ffcba402da369c3 Mon Sep 17 00:00:00 2001 From: Jose Celano Date: Mon, 16 Jun 2025 16:10:36 +0100 Subject: [PATCH 721/802] refactor: [#1581] extract methods --- .../src/statistics/metrics.rs | 62 +++++++++++++++++++ .../src/statistics/services.rs | 47 ++------------ 2 files changed, 66 insertions(+), 43 deletions(-) diff --git a/packages/http-tracker-core/src/statistics/metrics.rs b/packages/http-tracker-core/src/statistics/metrics.rs index 650194d43..5e6d70831 100644 --- a/packages/http-tracker-core/src/statistics/metrics.rs +++ b/packages/http-tracker-core/src/statistics/metrics.rs @@ -1,9 +1,13 @@ use serde::Serialize; use torrust_tracker_metrics::label::LabelSet; use torrust_tracker_metrics::metric::MetricName; +use torrust_tracker_metrics::metric_collection::aggregate::Sum; use torrust_tracker_metrics::metric_collection::{Error, MetricCollection}; +use torrust_tracker_metrics::metric_name; use torrust_tracker_primitives::DurationSinceUnixEpoch; +use crate::statistics::HTTP_TRACKER_CORE_REQUESTS_RECEIVED_TOTAL; + /// Metrics collected by the tracker. #[derive(Debug, Clone, PartialEq, Default, Serialize)] pub struct Metrics { @@ -49,3 +53,61 @@ impl Metrics { self.metric_collection.set_gauge(metric_name, labels, value, now) } } + +impl Metrics { + /// Total number of TCP (HTTP tracker) `announce` requests from IPv4 peers. + #[must_use] + #[allow(clippy::cast_sign_loss)] + #[allow(clippy::cast_possible_truncation)] + pub fn tcp4_announces_handled(&self) -> u64 { + self.metric_collection + .sum( + &metric_name!(HTTP_TRACKER_CORE_REQUESTS_RECEIVED_TOTAL), + &[("server_binding_address_ip_family", "inet"), ("request_kind", "announce")].into(), + ) + .unwrap_or_default() + .value() as u64 + } + + /// Total number of TCP (HTTP tracker) `scrape` requests from IPv4 peers. + #[must_use] + #[allow(clippy::cast_sign_loss)] + #[allow(clippy::cast_possible_truncation)] + pub fn tcp4_scrapes_handled(&self) -> u64 { + self.metric_collection + .sum( + &metric_name!(HTTP_TRACKER_CORE_REQUESTS_RECEIVED_TOTAL), + &[("server_binding_address_ip_family", "inet"), ("request_kind", "scrape")].into(), + ) + .unwrap_or_default() + .value() as u64 + } + + /// Total number of TCP (HTTP tracker) `announce` requests from IPv6 peers. + #[must_use] + #[allow(clippy::cast_sign_loss)] + #[allow(clippy::cast_possible_truncation)] + pub fn tcp6_announces_handled(&self) -> u64 { + self.metric_collection + .sum( + &metric_name!(HTTP_TRACKER_CORE_REQUESTS_RECEIVED_TOTAL), + &[("server_binding_address_ip_family", "inet6"), ("request_kind", "announce")].into(), + ) + .unwrap_or_default() + .value() as u64 + } + + /// Total number of TCP (HTTP tracker) `scrape` requests from IPv6 peers. + #[must_use] + #[allow(clippy::cast_sign_loss)] + #[allow(clippy::cast_possible_truncation)] + pub fn tcp6_scrapes_handled(&self) -> u64 { + self.metric_collection + .sum( + &metric_name!(HTTP_TRACKER_CORE_REQUESTS_RECEIVED_TOTAL), + &[("server_binding_address_ip_family", "inet6"), ("request_kind", "scrape")].into(), + ) + .unwrap_or_default() + .value() as u64 + } +} diff --git a/packages/rest-tracker-api-core/src/statistics/services.rs b/packages/rest-tracker-api-core/src/statistics/services.rs index 66bacbb06..77c04fef2 100644 --- a/packages/rest-tracker-api-core/src/statistics/services.rs +++ b/packages/rest-tracker-api-core/src/statistics/services.rs @@ -1,6 +1,5 @@ use std::sync::Arc; -use bittorrent_http_tracker_core::statistics::HTTP_TRACKER_CORE_REQUESTS_RECEIVED_TOTAL; use bittorrent_tracker_core::torrent::repository::in_memory::InMemoryTorrentRepository; use bittorrent_udp_tracker_core::services::banning::BanService; use bittorrent_udp_tracker_core::{self}; @@ -156,51 +155,13 @@ async fn get_protocol_metrics_from_labeled_metrics( // TCPv4 - #[allow(clippy::cast_sign_loss)] - #[allow(clippy::cast_possible_truncation)] - let tcp4_announces_handled = http_stats - .metric_collection - .sum( - &metric_name!(HTTP_TRACKER_CORE_REQUESTS_RECEIVED_TOTAL), - &[("server_binding_address_ip_family", "inet"), ("request_kind", "announce")].into(), - ) - .unwrap_or_default() - .value() as u64; - - #[allow(clippy::cast_sign_loss)] - #[allow(clippy::cast_possible_truncation)] - let tcp4_scrapes_handled = http_stats - .metric_collection - .sum( - &metric_name!(HTTP_TRACKER_CORE_REQUESTS_RECEIVED_TOTAL), - &[("server_binding_address_ip_family", "inet"), ("request_kind", "scrape")].into(), - ) - .unwrap_or_default() - .value() as u64; + let tcp4_announces_handled = http_stats.tcp4_announces_handled(); + let tcp4_scrapes_handled = http_stats.tcp4_scrapes_handled(); // TCPv6 - #[allow(clippy::cast_sign_loss)] - #[allow(clippy::cast_possible_truncation)] - let tcp6_announces_handled = http_stats - .metric_collection - .sum( - &metric_name!(HTTP_TRACKER_CORE_REQUESTS_RECEIVED_TOTAL), - &[("server_binding_address_ip_family", "inet6"), ("request_kind", "announce")].into(), - ) - .unwrap_or_default() - .value() as u64; - - #[allow(clippy::cast_sign_loss)] - #[allow(clippy::cast_possible_truncation)] - let tcp6_scrapes_handled = http_stats - .metric_collection - .sum( - &metric_name!(HTTP_TRACKER_CORE_REQUESTS_RECEIVED_TOTAL), - &[("server_binding_address_ip_family", "inet6"), ("request_kind", "scrape")].into(), - ) - .unwrap_or_default() - .value() as u64; + let tcp6_announces_handled = http_stats.tcp6_announces_handled(); + let tcp6_scrapes_handled = http_stats.tcp6_scrapes_handled(); // UDP From 44c184816bb57559fa6b396e734197a459f87ec5 Mon Sep 17 00:00:00 2001 From: Jose Celano Date: Mon, 16 Jun 2025 16:19:41 +0100 Subject: [PATCH 722/802] refactor: [#1581] remove non-labeled metrics in http-tracker-core pkg We only used labeled metric internally, althougth the APi exposes global aggregate metrics (without labels. They are calculated from the labeled metrics. --- .../tests/server/v1/contract.rs | 10 +++--- .../src/statistics/event/handler.rs | 35 +++---------------- .../src/statistics/metrics.rs | 12 ------- .../src/statistics/repository.rs | 24 ------------- .../src/statistics/services.rs | 7 ---- .../src/statistics/services.rs | 12 +++---- 6 files changed, 15 insertions(+), 85 deletions(-) diff --git a/packages/axum-http-tracker-server/tests/server/v1/contract.rs b/packages/axum-http-tracker-server/tests/server/v1/contract.rs index dd80e6b59..85792f922 100644 --- a/packages/axum-http-tracker-server/tests/server/v1/contract.rs +++ b/packages/axum-http-tracker-server/tests/server/v1/contract.rs @@ -704,7 +704,7 @@ mod for_all_config_modes { let stats = env.container.http_tracker_core_container.stats_repository.get_stats().await; - assert_eq!(stats.tcp4_announces_handled, 1); + assert_eq!(stats.tcp4_announces_handled(), 1); drop(stats); @@ -730,7 +730,7 @@ mod for_all_config_modes { let stats = env.container.http_tracker_core_container.stats_repository.get_stats().await; - assert_eq!(stats.tcp6_announces_handled, 1); + assert_eq!(stats.tcp6_announces_handled(), 1); drop(stats); @@ -755,7 +755,7 @@ mod for_all_config_modes { let stats = env.container.http_tracker_core_container.stats_repository.get_stats().await; - assert_eq!(stats.tcp6_announces_handled, 0); + assert_eq!(stats.tcp6_announces_handled(), 0); drop(stats); @@ -1149,7 +1149,7 @@ mod for_all_config_modes { let stats = env.container.http_tracker_core_container.stats_repository.get_stats().await; - assert_eq!(stats.tcp4_scrapes_handled, 1); + assert_eq!(stats.tcp4_scrapes_handled(), 1); drop(stats); @@ -1181,7 +1181,7 @@ mod for_all_config_modes { let stats = env.container.http_tracker_core_container.stats_repository.get_stats().await; - assert_eq!(stats.tcp6_scrapes_handled, 1); + assert_eq!(stats.tcp6_scrapes_handled(), 1); drop(stats); diff --git a/packages/http-tracker-core/src/statistics/event/handler.rs b/packages/http-tracker-core/src/statistics/event/handler.rs index 78ef24e02..a1d8d5fc2 100644 --- a/packages/http-tracker-core/src/statistics/event/handler.rs +++ b/packages/http-tracker-core/src/statistics/event/handler.rs @@ -1,4 +1,3 @@ -use std::net::IpAddr; use std::sync::Arc; use torrust_tracker_metrics::label::{LabelSet, LabelValue}; @@ -12,19 +11,6 @@ use crate::statistics::HTTP_TRACKER_CORE_REQUESTS_RECEIVED_TOTAL; pub async fn handle_event(event: Event, stats_repository: &Arc, now: DurationSinceUnixEpoch) { match event { Event::TcpAnnounce { connection, .. } => { - // Global fixed metrics - - match connection.client_ip_addr() { - IpAddr::V4(_) => { - stats_repository.increase_tcp4_announces().await; - } - IpAddr::V6(_) => { - stats_repository.increase_tcp6_announces().await; - } - } - - // Extendable metrics - let mut label_set = LabelSet::from(connection); label_set.upsert(label_name!("request_kind"), LabelValue::new("announce")); @@ -42,19 +28,6 @@ pub async fn handle_event(event: Event, stats_repository: &Arc, now: }; } Event::TcpScrape { connection } => { - // Global fixed metrics - - match connection.client_ip_addr() { - IpAddr::V4(_) => { - stats_repository.increase_tcp4_scrapes().await; - } - IpAddr::V6(_) => { - stats_repository.increase_tcp6_scrapes().await; - } - } - - // Extendable metrics - let mut label_set = LabelSet::from(connection); label_set.upsert(label_name!("request_kind"), LabelValue::new("scrape")); @@ -113,7 +86,7 @@ mod tests { let stats = stats_repository.get_stats().await; - assert_eq!(stats.tcp4_announces_handled, 1); + assert_eq!(stats.tcp4_announces_handled(), 1); } #[tokio::test] @@ -137,7 +110,7 @@ mod tests { let stats = stats_repository.get_stats().await; - assert_eq!(stats.tcp4_scrapes_handled, 1); + assert_eq!(stats.tcp4_scrapes_handled(), 1); } #[tokio::test] @@ -162,7 +135,7 @@ mod tests { let stats = stats_repository.get_stats().await; - assert_eq!(stats.tcp6_announces_handled, 1); + assert_eq!(stats.tcp6_announces_handled(), 1); } #[tokio::test] @@ -188,6 +161,6 @@ mod tests { let stats = stats_repository.get_stats().await; - assert_eq!(stats.tcp6_scrapes_handled, 1); + assert_eq!(stats.tcp6_scrapes_handled(), 1); } } diff --git a/packages/http-tracker-core/src/statistics/metrics.rs b/packages/http-tracker-core/src/statistics/metrics.rs index 5e6d70831..05acea937 100644 --- a/packages/http-tracker-core/src/statistics/metrics.rs +++ b/packages/http-tracker-core/src/statistics/metrics.rs @@ -11,18 +11,6 @@ use crate::statistics::HTTP_TRACKER_CORE_REQUESTS_RECEIVED_TOTAL; /// Metrics collected by the tracker. #[derive(Debug, Clone, PartialEq, Default, Serialize)] pub struct Metrics { - /// Total number of TCP (HTTP tracker) `announce` requests from IPv4 peers. - pub tcp4_announces_handled: u64, - - /// Total number of TCP (HTTP tracker) `scrape` requests from IPv4 peers. - pub tcp4_scrapes_handled: u64, - - /// Total number of TCP (HTTP tracker) `announce` requests from IPv6 peers. - pub tcp6_announces_handled: u64, - - /// Total number of TCP (HTTP tracker) `scrape` requests from IPv6 peers. - pub tcp6_scrapes_handled: u64, - /// A collection of metrics. pub metric_collection: MetricCollection, } diff --git a/packages/http-tracker-core/src/statistics/repository.rs b/packages/http-tracker-core/src/statistics/repository.rs index d5e718821..ea027f5c6 100644 --- a/packages/http-tracker-core/src/statistics/repository.rs +++ b/packages/http-tracker-core/src/statistics/repository.rs @@ -33,30 +33,6 @@ impl Repository { self.stats.read().await } - pub async fn increase_tcp4_announces(&self) { - let mut stats_lock = self.stats.write().await; - stats_lock.tcp4_announces_handled += 1; - drop(stats_lock); - } - - pub async fn increase_tcp4_scrapes(&self) { - let mut stats_lock = self.stats.write().await; - stats_lock.tcp4_scrapes_handled += 1; - drop(stats_lock); - } - - pub async fn increase_tcp6_announces(&self) { - let mut stats_lock = self.stats.write().await; - stats_lock.tcp6_announces_handled += 1; - drop(stats_lock); - } - - pub async fn increase_tcp6_scrapes(&self) { - let mut stats_lock = self.stats.write().await; - stats_lock.tcp6_scrapes_handled += 1; - drop(stats_lock); - } - /// # Errors /// /// This function will return an error if the metric collection fails to diff --git a/packages/http-tracker-core/src/statistics/services.rs b/packages/http-tracker-core/src/statistics/services.rs index dbc096030..b53d6f12e 100644 --- a/packages/http-tracker-core/src/statistics/services.rs +++ b/packages/http-tracker-core/src/statistics/services.rs @@ -53,13 +53,6 @@ pub async fn get_metrics( TrackerMetrics { torrents_metrics, protocol_metrics: Metrics { - // TCPv4 - tcp4_announces_handled: stats.tcp4_announces_handled, - tcp4_scrapes_handled: stats.tcp4_scrapes_handled, - // TCPv6 - tcp6_announces_handled: stats.tcp6_announces_handled, - tcp6_scrapes_handled: stats.tcp6_scrapes_handled, - // Samples metric_collection: stats.metric_collection.clone(), }, } diff --git a/packages/rest-tracker-api-core/src/statistics/services.rs b/packages/rest-tracker-api-core/src/statistics/services.rs index 77c04fef2..60c4a8ebd 100644 --- a/packages/rest-tracker-api-core/src/statistics/services.rs +++ b/packages/rest-tracker-api-core/src/statistics/services.rs @@ -101,13 +101,13 @@ async fn get_protocol_metrics( ProtocolMetrics { // TCPv4 - tcp4_connections_handled: http_stats.tcp4_announces_handled + http_stats.tcp4_scrapes_handled, - tcp4_announces_handled: http_stats.tcp4_announces_handled, - tcp4_scrapes_handled: http_stats.tcp4_scrapes_handled, + tcp4_connections_handled: http_stats.tcp4_announces_handled() + http_stats.tcp4_scrapes_handled(), + tcp4_announces_handled: http_stats.tcp4_announces_handled(), + tcp4_scrapes_handled: http_stats.tcp4_scrapes_handled(), // TCPv6 - tcp6_connections_handled: http_stats.tcp6_announces_handled + http_stats.tcp6_scrapes_handled, - tcp6_announces_handled: http_stats.tcp6_announces_handled, - tcp6_scrapes_handled: http_stats.tcp6_scrapes_handled, + tcp6_connections_handled: http_stats.tcp6_announces_handled() + http_stats.tcp6_scrapes_handled(), + tcp6_announces_handled: http_stats.tcp6_announces_handled(), + tcp6_scrapes_handled: http_stats.tcp6_scrapes_handled(), // UDP udp_requests_aborted: udp_server_stats.udp_requests_aborted, udp_requests_banned: udp_server_stats.udp_requests_banned, From a5c5a890a5af81ce1e01a978759ee42432d8490e Mon Sep 17 00:00:00 2001 From: Jose Celano Date: Mon, 16 Jun 2025 16:24:13 +0100 Subject: [PATCH 723/802] refactor: [#1581] remove unused code --- .../http-tracker-core/src/statistics/mod.rs | 1 - .../src/statistics/services.rs | 110 ------------------ 2 files changed, 111 deletions(-) delete mode 100644 packages/http-tracker-core/src/statistics/services.rs diff --git a/packages/http-tracker-core/src/statistics/mod.rs b/packages/http-tracker-core/src/statistics/mod.rs index b8ca865fa..3ae355471 100644 --- a/packages/http-tracker-core/src/statistics/mod.rs +++ b/packages/http-tracker-core/src/statistics/mod.rs @@ -1,7 +1,6 @@ pub mod event; pub mod metrics; pub mod repository; -pub mod services; use metrics::Metrics; use torrust_tracker_metrics::metric::description::MetricDescription; diff --git a/packages/http-tracker-core/src/statistics/services.rs b/packages/http-tracker-core/src/statistics/services.rs deleted file mode 100644 index b53d6f12e..000000000 --- a/packages/http-tracker-core/src/statistics/services.rs +++ /dev/null @@ -1,110 +0,0 @@ -//! Statistics services. -//! -//! It includes: -//! -//! - A [`factory`](crate::statistics::setup::factory) function to build the structs needed to collect the tracker metrics. -//! - A [`get_metrics`] service to get the tracker [`metrics`](crate::statistics::metrics::Metrics). -//! -//! Tracker metrics are collected using a Publisher-Subscribe pattern. -//! -//! The factory function builds two structs: -//! -//! - An statistics event [`Sender`](torrust_tracker_events::sender::Sender) -//! - An statistics [`Repository`] -//! -//! ```text -//! let (stats_event_sender, stats_repository) = factory(tracker_usage_statistics); -//! ``` -//! -//! The statistics repository is responsible for storing the metrics in memory. -//! The statistics event sender allows sending events related to metrics. -//! There is an event listener that is receiving all the events and processing them with an event handler. -//! Then, the event handler updates the metrics depending on the received event. -use std::sync::Arc; - -use bittorrent_tracker_core::torrent::repository::in_memory::InMemoryTorrentRepository; -use torrust_tracker_primitives::swarm_metadata::AggregateActiveSwarmMetadata; - -use crate::statistics::metrics::Metrics; -use crate::statistics::repository::Repository; - -/// All the metrics collected by the tracker. -#[derive(Debug, PartialEq)] -pub struct TrackerMetrics { - /// Domain level metrics. - /// - /// General metrics for all torrents (number of seeders, leechers, etcetera) - pub torrents_metrics: AggregateActiveSwarmMetadata, - - /// Application level metrics. Usage statistics/metrics. - /// - /// Metrics about how the tracker is been used (number of number of http scrape requests, etcetera) - pub protocol_metrics: Metrics, -} - -/// It returns all the [`TrackerMetrics`] -pub async fn get_metrics( - in_memory_torrent_repository: Arc, - stats_repository: Arc, -) -> TrackerMetrics { - let torrents_metrics = in_memory_torrent_repository.get_aggregate_swarm_metadata().await; - let stats = stats_repository.get_stats().await; - - TrackerMetrics { - torrents_metrics, - protocol_metrics: Metrics { - metric_collection: stats.metric_collection.clone(), - }, - } -} - -#[cfg(test)] -mod tests { - use std::sync::Arc; - - use bittorrent_tracker_core::torrent::repository::in_memory::InMemoryTorrentRepository; - use bittorrent_tracker_core::{self}; - use torrust_tracker_configuration::Configuration; - use torrust_tracker_primitives::swarm_metadata::AggregateActiveSwarmMetadata; - use torrust_tracker_test_helpers::configuration; - - use crate::event::bus::EventBus; - use crate::event::sender::Broadcaster; - use crate::statistics::describe_metrics; - use crate::statistics::event::listener::run_event_listener; - use crate::statistics::repository::Repository; - use crate::statistics::services::{get_metrics, TrackerMetrics}; - - pub fn tracker_configuration() -> Configuration { - configuration::ephemeral() - } - - #[tokio::test] - async fn the_statistics_service_should_return_the_tracker_metrics() { - let config = tracker_configuration(); - - let in_memory_torrent_repository = Arc::new(InMemoryTorrentRepository::default()); - - // HTTP core stats - let http_core_broadcaster = Broadcaster::default(); - let http_stats_repository = Arc::new(Repository::new()); - let http_stats_event_bus = Arc::new(EventBus::new( - config.core.tracker_usage_statistics.into(), - http_core_broadcaster.clone(), - )); - - if config.core.tracker_usage_statistics { - let _unused = run_event_listener(http_stats_event_bus.receiver(), &http_stats_repository); - } - - let tracker_metrics = get_metrics(in_memory_torrent_repository.clone(), http_stats_repository).await; - - assert_eq!( - tracker_metrics, - TrackerMetrics { - torrents_metrics: AggregateActiveSwarmMetadata::default(), - protocol_metrics: describe_metrics(), - } - ); - } -} From 0284bef1eaf87dfa0884baca89f869672574d8f6 Mon Sep 17 00:00:00 2001 From: Jose Celano Date: Mon, 16 Jun 2025 16:40:44 +0100 Subject: [PATCH 724/802] refactor: [#1581] remove non-labeled metrics in udp-tracker-core pkg --- .../src/statistics/event/handler.rs | 51 +------- .../src/statistics/metrics.rs | 116 ++++++++++++++---- .../src/statistics/repository.rs | 36 ------ .../src/statistics/services.rs | 9 -- 4 files changed, 96 insertions(+), 116 deletions(-) diff --git a/packages/udp-tracker-core/src/statistics/event/handler.rs b/packages/udp-tracker-core/src/statistics/event/handler.rs index 039b6b0d5..e5d2b87a7 100644 --- a/packages/udp-tracker-core/src/statistics/event/handler.rs +++ b/packages/udp-tracker-core/src/statistics/event/handler.rs @@ -12,19 +12,6 @@ use crate::statistics::UDP_TRACKER_CORE_REQUESTS_RECEIVED_TOTAL; pub async fn handle_event(event: Event, stats_repository: &Repository, now: DurationSinceUnixEpoch) { match event { Event::UdpConnect { connection: context } => { - // Global fixed metrics - - match context.client_socket_addr.ip() { - std::net::IpAddr::V4(_) => { - stats_repository.increase_udp4_connections().await; - } - std::net::IpAddr::V6(_) => { - stats_repository.increase_udp6_connections().await; - } - } - - // Extendable metrics - let mut label_set = LabelSet::from(context); label_set.upsert(label_name!("request_kind"), LabelValue::new("connect")); @@ -37,19 +24,6 @@ pub async fn handle_event(event: Event, stats_repository: &Repository, now: Dura }; } Event::UdpAnnounce { connection: context, .. } => { - // Global fixed metrics - - match context.client_socket_addr.ip() { - std::net::IpAddr::V4(_) => { - stats_repository.increase_udp4_announces().await; - } - std::net::IpAddr::V6(_) => { - stats_repository.increase_udp6_announces().await; - } - } - - // Extendable metrics - let mut label_set = LabelSet::from(context); label_set.upsert(label_name!("request_kind"), LabelValue::new("announce")); @@ -62,19 +36,6 @@ pub async fn handle_event(event: Event, stats_repository: &Repository, now: Dura }; } Event::UdpScrape { connection: context } => { - // Global fixed metrics - - match context.client_socket_addr.ip() { - std::net::IpAddr::V4(_) => { - stats_repository.increase_udp4_scrapes().await; - } - std::net::IpAddr::V6(_) => { - stats_repository.increase_udp6_scrapes().await; - } - } - - // Extendable metrics - let mut label_set = LabelSet::from(context); label_set.upsert(label_name!("request_kind"), LabelValue::new("scrape")); @@ -127,7 +88,7 @@ mod tests { let stats = stats_repository.get_stats().await; - assert_eq!(stats.udp4_connections_handled, 1); + assert_eq!(stats.udp4_connections_handled(), 1); } #[tokio::test] @@ -154,7 +115,7 @@ mod tests { let stats = stats_repository.get_stats().await; - assert_eq!(stats.udp4_announces_handled, 1); + assert_eq!(stats.udp4_announces_handled(), 1); } #[tokio::test] @@ -179,7 +140,7 @@ mod tests { let stats = stats_repository.get_stats().await; - assert_eq!(stats.udp4_scrapes_handled, 1); + assert_eq!(stats.udp4_scrapes_handled(), 1); } #[tokio::test] @@ -204,7 +165,7 @@ mod tests { let stats = stats_repository.get_stats().await; - assert_eq!(stats.udp6_connections_handled, 1); + assert_eq!(stats.udp6_connections_handled(), 1); } #[tokio::test] @@ -231,7 +192,7 @@ mod tests { let stats = stats_repository.get_stats().await; - assert_eq!(stats.udp6_announces_handled, 1); + assert_eq!(stats.udp6_announces_handled(), 1); } #[tokio::test] @@ -256,6 +217,6 @@ mod tests { let stats = stats_repository.get_stats().await; - assert_eq!(stats.udp6_scrapes_handled, 1); + assert_eq!(stats.udp6_scrapes_handled(), 1); } } diff --git a/packages/udp-tracker-core/src/statistics/metrics.rs b/packages/udp-tracker-core/src/statistics/metrics.rs index e6ff8d5f6..57838c66f 100644 --- a/packages/udp-tracker-core/src/statistics/metrics.rs +++ b/packages/udp-tracker-core/src/statistics/metrics.rs @@ -1,37 +1,15 @@ use serde::Serialize; use torrust_tracker_metrics::label::LabelSet; use torrust_tracker_metrics::metric::MetricName; +use torrust_tracker_metrics::metric_collection::aggregate::Sum; use torrust_tracker_metrics::metric_collection::{Error, MetricCollection}; +use torrust_tracker_metrics::metric_name; use torrust_tracker_primitives::DurationSinceUnixEpoch; -/// Metrics collected by the tracker. -/// -/// - Number of connections handled -/// - Number of `announce` requests handled -/// - Number of `scrape` request handled -/// -/// These metrics are collected for each connection type: UDP and HTTP -/// and also for each IP version used by the peers: IPv4 and IPv6. +use crate::statistics::UDP_TRACKER_CORE_REQUESTS_RECEIVED_TOTAL; + #[derive(Debug, PartialEq, Default, Serialize)] pub struct Metrics { - /// Total number of UDP (UDP tracker) connections from IPv4 peers. - pub udp4_connections_handled: u64, - - /// Total number of UDP (UDP tracker) `announce` requests from IPv4 peers. - pub udp4_announces_handled: u64, - - /// Total number of UDP (UDP tracker) `scrape` requests from IPv4 peers. - pub udp4_scrapes_handled: u64, - - /// Total number of UDP (UDP tracker) `connection` requests from IPv6 peers. - pub udp6_connections_handled: u64, - - /// Total number of UDP (UDP tracker) `announce` requests from IPv6 peers. - pub udp6_announces_handled: u64, - - /// Total number of UDP (UDP tracker) `scrape` requests from IPv6 peers. - pub udp6_scrapes_handled: u64, - /// A collection of metrics. pub metric_collection: MetricCollection, } @@ -64,3 +42,89 @@ impl Metrics { self.metric_collection.set_gauge(metric_name, labels, value, now) } } + +impl Metrics { + /// Total number of UDP (UDP tracker) connections from IPv4 peers. + #[must_use] + #[allow(clippy::cast_sign_loss)] + #[allow(clippy::cast_possible_truncation)] + pub fn udp4_connections_handled(&self) -> u64 { + self.metric_collection + .sum( + &metric_name!(UDP_TRACKER_CORE_REQUESTS_RECEIVED_TOTAL), + &[("server_binding_address_ip_family", "inet"), ("request_kind", "connect")].into(), + ) + .unwrap_or_default() + .value() as u64 + } + + /// Total number of UDP (UDP tracker) `announce` requests from IPv4 peers. + #[must_use] + #[allow(clippy::cast_sign_loss)] + #[allow(clippy::cast_possible_truncation)] + pub fn udp4_announces_handled(&self) -> u64 { + self.metric_collection + .sum( + &metric_name!(UDP_TRACKER_CORE_REQUESTS_RECEIVED_TOTAL), + &[("server_binding_address_ip_family", "inet"), ("request_kind", "announce")].into(), + ) + .unwrap_or_default() + .value() as u64 + } + + /// Total number of UDP (UDP tracker) `scrape` requests from IPv4 peers. + #[must_use] + #[allow(clippy::cast_sign_loss)] + #[allow(clippy::cast_possible_truncation)] + pub fn udp4_scrapes_handled(&self) -> u64 { + self.metric_collection + .sum( + &metric_name!(UDP_TRACKER_CORE_REQUESTS_RECEIVED_TOTAL), + &[("server_binding_address_ip_family", "inet"), ("request_kind", "scrape")].into(), + ) + .unwrap_or_default() + .value() as u64 + } + + /// Total number of UDP (UDP tracker) `connection` requests from IPv6 peers. + #[must_use] + #[allow(clippy::cast_sign_loss)] + #[allow(clippy::cast_possible_truncation)] + pub fn udp6_connections_handled(&self) -> u64 { + self.metric_collection + .sum( + &metric_name!(UDP_TRACKER_CORE_REQUESTS_RECEIVED_TOTAL), + &[("server_binding_address_ip_family", "inet6"), ("request_kind", "connect")].into(), + ) + .unwrap_or_default() + .value() as u64 + } + + /// Total number of UDP (UDP tracker) `announce` requests from IPv6 peers. + #[must_use] + #[allow(clippy::cast_sign_loss)] + #[allow(clippy::cast_possible_truncation)] + pub fn udp6_announces_handled(&self) -> u64 { + self.metric_collection + .sum( + &metric_name!(UDP_TRACKER_CORE_REQUESTS_RECEIVED_TOTAL), + &[("server_binding_address_ip_family", "inet6"), ("request_kind", "announce")].into(), + ) + .unwrap_or_default() + .value() as u64 + } + + /// Total number of UDP (UDP tracker) `scrape` requests from IPv6 peers. + #[must_use] + #[allow(clippy::cast_sign_loss)] + #[allow(clippy::cast_possible_truncation)] + pub fn udp6_scrapes_handled(&self) -> u64 { + self.metric_collection + .sum( + &metric_name!(UDP_TRACKER_CORE_REQUESTS_RECEIVED_TOTAL), + &[("server_binding_address_ip_family", "inet6"), ("request_kind", "scrape")].into(), + ) + .unwrap_or_default() + .value() as u64 + } +} diff --git a/packages/udp-tracker-core/src/statistics/repository.rs b/packages/udp-tracker-core/src/statistics/repository.rs index c68fa14f7..ceee0e369 100644 --- a/packages/udp-tracker-core/src/statistics/repository.rs +++ b/packages/udp-tracker-core/src/statistics/repository.rs @@ -33,42 +33,6 @@ impl Repository { self.stats.read().await } - pub async fn increase_udp4_connections(&self) { - let mut stats_lock = self.stats.write().await; - stats_lock.udp4_connections_handled += 1; - drop(stats_lock); - } - - pub async fn increase_udp4_announces(&self) { - let mut stats_lock = self.stats.write().await; - stats_lock.udp4_announces_handled += 1; - drop(stats_lock); - } - - pub async fn increase_udp4_scrapes(&self) { - let mut stats_lock = self.stats.write().await; - stats_lock.udp4_scrapes_handled += 1; - drop(stats_lock); - } - - pub async fn increase_udp6_connections(&self) { - let mut stats_lock = self.stats.write().await; - stats_lock.udp6_connections_handled += 1; - drop(stats_lock); - } - - pub async fn increase_udp6_announces(&self) { - let mut stats_lock = self.stats.write().await; - stats_lock.udp6_announces_handled += 1; - drop(stats_lock); - } - - pub async fn increase_udp6_scrapes(&self) { - let mut stats_lock = self.stats.write().await; - stats_lock.udp6_scrapes_handled += 1; - drop(stats_lock); - } - /// # Errors /// /// This function will return an error if the metric collection fails to diff --git a/packages/udp-tracker-core/src/statistics/services.rs b/packages/udp-tracker-core/src/statistics/services.rs index 24d25a25c..18a80bad1 100644 --- a/packages/udp-tracker-core/src/statistics/services.rs +++ b/packages/udp-tracker-core/src/statistics/services.rs @@ -69,15 +69,6 @@ pub async fn get_metrics( TrackerMetrics { torrents_metrics, protocol_metrics: Metrics { - // UDPv4 - udp4_connections_handled: stats.udp4_connections_handled, - udp4_announces_handled: stats.udp4_announces_handled, - udp4_scrapes_handled: stats.udp4_scrapes_handled, - // UDPv6 - udp6_connections_handled: stats.udp6_connections_handled, - udp6_announces_handled: stats.udp6_announces_handled, - udp6_scrapes_handled: stats.udp6_scrapes_handled, - // Extendable metrics metric_collection: stats.metric_collection.clone(), }, } From f008a0a618cbbf221c6442fb32623b23157bb403 Mon Sep 17 00:00:00 2001 From: Jose Celano Date: Mon, 16 Jun 2025 16:50:26 +0100 Subject: [PATCH 725/802] fix: test for request counters in http-tracker-core The IP faimly related to the counter (inet or inet6) dependos on the server binding IP. If the server is listening on a inet6 IP, then inet6 lreated counters should be increased. --- packages/http-tracker-core/src/statistics/event/handler.rs | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/packages/http-tracker-core/src/statistics/event/handler.rs b/packages/http-tracker-core/src/statistics/event/handler.rs index a1d8d5fc2..37c7a26b5 100644 --- a/packages/http-tracker-core/src/statistics/event/handler.rs +++ b/packages/http-tracker-core/src/statistics/event/handler.rs @@ -123,7 +123,7 @@ mod tests { Event::TcpAnnounce { connection: ConnectionContext::new( RemoteClientAddr::new(ResolvedIp::FromSocketAddr(remote_client_ip), Some(8080)), - ServiceBinding::new(Protocol::HTTP, SocketAddr::new(IpAddr::V4(Ipv4Addr::LOCALHOST), 7070)).unwrap(), + ServiceBinding::new(Protocol::HTTP, SocketAddr::new(IpAddr::V6(Ipv6Addr::LOCALHOST), 7070)).unwrap(), ), info_hash: sample_info_hash(), announcement: peer, @@ -151,7 +151,7 @@ mod tests { ))), Some(8080), ), - ServiceBinding::new(Protocol::HTTP, SocketAddr::new(IpAddr::V4(Ipv4Addr::LOCALHOST), 7070)).unwrap(), + ServiceBinding::new(Protocol::HTTP, SocketAddr::new(IpAddr::V6(Ipv6Addr::LOCALHOST), 7070)).unwrap(), ), }, &stats_repository, From 6183eba1a2bf42e7198350f2b205e76a682d9d52 Mon Sep 17 00:00:00 2001 From: Jose Celano Date: Mon, 16 Jun 2025 17:19:53 +0100 Subject: [PATCH 726/802] refactor: [#1581] remove non-labeled metrics in udp-tracker-server pkg --- .../src/statistics/services.rs | 248 +++------------ .../src/statistics/event/handler/error.rs | 14 +- .../event/handler/request_aborted.rs | 8 +- .../event/handler/request_accepted.rs | 41 +-- .../event/handler/request_banned.rs | 8 +- .../event/handler/request_received.rs | 13 +- .../statistics/event/handler/response_sent.rs | 14 +- .../src/statistics/metrics.rs | 294 +++++++++++++++--- .../src/statistics/repository.rs | 158 ++-------- .../src/statistics/services.rs | 37 +-- .../tests/server/contract.rs | 4 +- 11 files changed, 334 insertions(+), 505 deletions(-) diff --git a/packages/rest-tracker-api-core/src/statistics/services.rs b/packages/rest-tracker-api-core/src/statistics/services.rs index 60c4a8ebd..e30febf00 100644 --- a/packages/rest-tracker-api-core/src/statistics/services.rs +++ b/packages/rest-tracker-api-core/src/statistics/services.rs @@ -4,16 +4,8 @@ use bittorrent_tracker_core::torrent::repository::in_memory::InMemoryTorrentRepo use bittorrent_udp_tracker_core::services::banning::BanService; use bittorrent_udp_tracker_core::{self}; use tokio::sync::RwLock; -use torrust_tracker_metrics::label::LabelSet; -use torrust_tracker_metrics::metric_collection::aggregate::Sum; use torrust_tracker_metrics::metric_collection::MetricCollection; -use torrust_tracker_metrics::metric_name; -use torrust_udp_tracker_server::statistics::{ - self as udp_server_statistics, UDP_TRACKER_SERVER_ERRORS_TOTAL, UDP_TRACKER_SERVER_IPS_BANNED_TOTAL, - UDP_TRACKER_SERVER_PERFORMANCE_AVG_PROCESSING_TIME_NS, UDP_TRACKER_SERVER_REQUESTS_ABORTED_TOTAL, - UDP_TRACKER_SERVER_REQUESTS_ACCEPTED_TOTAL, UDP_TRACKER_SERVER_REQUESTS_BANNED_TOTAL, - UDP_TRACKER_SERVER_REQUESTS_RECEIVED_TOTAL, UDP_TRACKER_SERVER_RESPONSES_SENT_TOTAL, -}; +use torrust_udp_tracker_server::statistics::{self as udp_server_statistics}; use super::metrics::TorrentsMetrics; use crate::statistics::metrics::ProtocolMetrics; @@ -109,26 +101,26 @@ async fn get_protocol_metrics( tcp6_announces_handled: http_stats.tcp6_announces_handled(), tcp6_scrapes_handled: http_stats.tcp6_scrapes_handled(), // UDP - udp_requests_aborted: udp_server_stats.udp_requests_aborted, - udp_requests_banned: udp_server_stats.udp_requests_banned, + udp_requests_aborted: udp_server_stats.udp_requests_aborted(), + udp_requests_banned: udp_server_stats.udp_requests_banned(), udp_banned_ips_total: udp_banned_ips_total as u64, - udp_avg_connect_processing_time_ns: udp_server_stats.udp_avg_connect_processing_time_ns, - udp_avg_announce_processing_time_ns: udp_server_stats.udp_avg_announce_processing_time_ns, - udp_avg_scrape_processing_time_ns: udp_server_stats.udp_avg_scrape_processing_time_ns, + udp_avg_connect_processing_time_ns: udp_server_stats.udp_avg_connect_processing_time_ns(), + udp_avg_announce_processing_time_ns: udp_server_stats.udp_avg_announce_processing_time_ns(), + udp_avg_scrape_processing_time_ns: udp_server_stats.udp_avg_scrape_processing_time_ns(), // UDPv4 - udp4_requests: udp_server_stats.udp4_requests, - udp4_connections_handled: udp_server_stats.udp4_connections_handled, - udp4_announces_handled: udp_server_stats.udp4_announces_handled, - udp4_scrapes_handled: udp_server_stats.udp4_scrapes_handled, - udp4_responses: udp_server_stats.udp4_responses, - udp4_errors_handled: udp_server_stats.udp4_errors_handled, + udp4_requests: udp_server_stats.udp4_requests(), + udp4_connections_handled: udp_server_stats.udp4_connections_handled(), + udp4_announces_handled: udp_server_stats.udp4_announces_handled(), + udp4_scrapes_handled: udp_server_stats.udp4_scrapes_handled(), + udp4_responses: udp_server_stats.udp4_responses(), + udp4_errors_handled: udp_server_stats.udp4_errors_handled(), // UDPv6 - udp6_requests: udp_server_stats.udp6_requests, - udp6_connections_handled: udp_server_stats.udp6_connections_handled, - udp6_announces_handled: udp_server_stats.udp6_announces_handled, - udp6_scrapes_handled: udp_server_stats.udp6_scrapes_handled, - udp6_responses: udp_server_stats.udp6_responses, - udp6_errors_handled: udp_server_stats.udp6_errors_handled, + udp6_requests: udp_server_stats.udp6_requests(), + udp6_connections_handled: udp_server_stats.udp6_connections_handled(), + udp6_announces_handled: udp_server_stats.udp6_announces_handled(), + udp6_scrapes_handled: udp_server_stats.udp6_scrapes_handled(), + udp6_responses: udp_server_stats.udp6_responses(), + udp6_errors_handled: udp_server_stats.udp6_errors_handled(), } } @@ -165,198 +157,30 @@ async fn get_protocol_metrics_from_labeled_metrics( // UDP - #[allow(clippy::cast_sign_loss)] - #[allow(clippy::cast_possible_truncation)] - let udp_requests_aborted = udp_server_stats - .metric_collection - .sum(&metric_name!(UDP_TRACKER_SERVER_REQUESTS_ABORTED_TOTAL), &LabelSet::empty()) - .unwrap_or_default() - .value() as u64; - - #[allow(clippy::cast_sign_loss)] - #[allow(clippy::cast_possible_truncation)] - let udp_requests_banned = udp_server_stats - .metric_collection - .sum(&metric_name!(UDP_TRACKER_SERVER_REQUESTS_BANNED_TOTAL), &LabelSet::empty()) - .unwrap_or_default() - .value() as u64; - - #[allow(clippy::cast_sign_loss)] - #[allow(clippy::cast_possible_truncation)] - let udp_banned_ips_total = udp_server_stats - .metric_collection - .sum(&metric_name!(UDP_TRACKER_SERVER_IPS_BANNED_TOTAL), &LabelSet::empty()) - .unwrap_or_default() - .value() as u64; - - #[allow(clippy::cast_sign_loss)] - #[allow(clippy::cast_possible_truncation)] - let udp_avg_connect_processing_time_ns = udp_server_stats - .metric_collection - .sum( - &metric_name!(UDP_TRACKER_SERVER_PERFORMANCE_AVG_PROCESSING_TIME_NS), - &[("request_kind", "connect")].into(), - ) - .unwrap_or_default() - .value() as u64; - - #[allow(clippy::cast_sign_loss)] - #[allow(clippy::cast_possible_truncation)] - let udp_avg_announce_processing_time_ns = udp_server_stats - .metric_collection - .sum( - &metric_name!(UDP_TRACKER_SERVER_PERFORMANCE_AVG_PROCESSING_TIME_NS), - &[("request_kind", "announce")].into(), - ) - .unwrap_or_default() - .value() as u64; - - #[allow(clippy::cast_sign_loss)] - #[allow(clippy::cast_possible_truncation)] - let udp_avg_scrape_processing_time_ns = udp_server_stats - .metric_collection - .sum( - &metric_name!(UDP_TRACKER_SERVER_PERFORMANCE_AVG_PROCESSING_TIME_NS), - &[("request_kind", "scrape")].into(), - ) - .unwrap_or_default() - .value() as u64; + let udp_requests_aborted = udp_server_stats.udp_requests_aborted(); + let udp_requests_banned = udp_server_stats.udp_requests_banned(); + let udp_banned_ips_total = udp_server_stats.udp_banned_ips_total(); + let udp_avg_connect_processing_time_ns = udp_server_stats.udp_avg_connect_processing_time_ns(); + let udp_avg_announce_processing_time_ns = udp_server_stats.udp_avg_announce_processing_time_ns(); + let udp_avg_scrape_processing_time_ns = udp_server_stats.udp_avg_scrape_processing_time_ns(); // UDPv4 - #[allow(clippy::cast_sign_loss)] - #[allow(clippy::cast_possible_truncation)] - let udp4_requests = udp_server_stats - .metric_collection - .sum( - &metric_name!(UDP_TRACKER_SERVER_REQUESTS_RECEIVED_TOTAL), - &[("server_binding_address_ip_family", "inet")].into(), - ) - .unwrap_or_default() - .value() as u64; - - #[allow(clippy::cast_sign_loss)] - #[allow(clippy::cast_possible_truncation)] - let udp4_connections_handled = udp_server_stats - .metric_collection - .sum( - &metric_name!(UDP_TRACKER_SERVER_REQUESTS_ACCEPTED_TOTAL), - &[("server_binding_address_ip_family", "inet"), ("request_kind", "connect")].into(), - ) - .unwrap_or_default() - .value() as u64; - - #[allow(clippy::cast_sign_loss)] - #[allow(clippy::cast_possible_truncation)] - let udp4_announces_handled = udp_server_stats - .metric_collection - .sum( - &metric_name!(UDP_TRACKER_SERVER_REQUESTS_ACCEPTED_TOTAL), - &[("server_binding_address_ip_family", "inet"), ("request_kind", "announce")].into(), - ) - .unwrap_or_default() - .value() as u64; - - #[allow(clippy::cast_sign_loss)] - #[allow(clippy::cast_possible_truncation)] - let udp4_scrapes_handled = udp_server_stats - .metric_collection - .sum( - &metric_name!(UDP_TRACKER_SERVER_REQUESTS_ACCEPTED_TOTAL), - &[("server_binding_address_ip_family", "inet"), ("request_kind", "scrape")].into(), - ) - .unwrap_or_default() - .value() as u64; - - #[allow(clippy::cast_sign_loss)] - #[allow(clippy::cast_possible_truncation)] - let udp4_responses = udp_server_stats - .metric_collection - .sum( - &metric_name!(UDP_TRACKER_SERVER_RESPONSES_SENT_TOTAL), - &[("server_binding_address_ip_family", "inet")].into(), - ) - .unwrap_or_default() - .value() as u64; - - #[allow(clippy::cast_sign_loss)] - #[allow(clippy::cast_possible_truncation)] - let udp4_errors_handled = udp_server_stats - .metric_collection - .sum( - &metric_name!(UDP_TRACKER_SERVER_ERRORS_TOTAL), - &[("server_binding_address_ip_family", "inet")].into(), - ) - .unwrap_or_default() - .value() as u64; + let udp4_requests = udp_server_stats.udp4_requests(); + let udp4_connections_handled = udp_server_stats.udp4_connections_handled(); + let udp4_announces_handled = udp_server_stats.udp4_announces_handled(); + let udp4_scrapes_handled = udp_server_stats.udp4_scrapes_handled(); + let udp4_responses = udp_server_stats.udp4_responses(); + let udp4_errors_handled = udp_server_stats.udp4_errors_handled(); // UDPv6 - #[allow(clippy::cast_sign_loss)] - #[allow(clippy::cast_possible_truncation)] - let udp6_requests = udp_server_stats - .metric_collection - .sum( - &metric_name!(UDP_TRACKER_SERVER_REQUESTS_RECEIVED_TOTAL), - &[("server_binding_address_ip_family", "inet6")].into(), - ) - .unwrap_or_default() - .value() as u64; - - #[allow(clippy::cast_sign_loss)] - #[allow(clippy::cast_possible_truncation)] - let udp6_connections_handled = udp_server_stats - .metric_collection - .sum( - &metric_name!(UDP_TRACKER_SERVER_REQUESTS_ACCEPTED_TOTAL), - &[("server_binding_address_ip_family", "inet6"), ("request_kind", "connect")].into(), - ) - .unwrap_or_default() - .value() as u64; - - #[allow(clippy::cast_sign_loss)] - #[allow(clippy::cast_possible_truncation)] - let udp6_announces_handled = udp_server_stats - .metric_collection - .sum( - &metric_name!(UDP_TRACKER_SERVER_REQUESTS_ACCEPTED_TOTAL), - &[("server_binding_address_ip_family", "inet6"), ("request_kind", "announce")].into(), - ) - .unwrap_or_default() - .value() as u64; - - #[allow(clippy::cast_sign_loss)] - #[allow(clippy::cast_possible_truncation)] - let udp6_scrapes_handled = udp_server_stats - .metric_collection - .sum( - &metric_name!(UDP_TRACKER_SERVER_REQUESTS_ACCEPTED_TOTAL), - &[("server_binding_address_ip_family", "inet6"), ("request_kind", "scrape")].into(), - ) - .unwrap_or_default() - .value() as u64; - - #[allow(clippy::cast_sign_loss)] - #[allow(clippy::cast_possible_truncation)] - let udp6_responses = udp_server_stats - .metric_collection - .sum( - &metric_name!(UDP_TRACKER_SERVER_RESPONSES_SENT_TOTAL), - &[("server_binding_address_ip_family", "inet6")].into(), - ) - .unwrap_or_default() - .value() as u64; - - #[allow(clippy::cast_sign_loss)] - #[allow(clippy::cast_possible_truncation)] - let udp6_errors_handled = udp_server_stats - .metric_collection - .sum( - &metric_name!(UDP_TRACKER_SERVER_ERRORS_TOTAL), - &[("server_binding_address_ip_family", "inet6")].into(), - ) - .unwrap_or_default() - .value() as u64; + let udp6_requests = udp_server_stats.udp6_requests(); + let udp6_connections_handled = udp_server_stats.udp6_connections_handled(); + let udp6_announces_handled = udp_server_stats.udp6_announces_handled(); + let udp6_scrapes_handled = udp_server_stats.udp6_scrapes_handled(); + let udp6_responses = udp_server_stats.udp6_responses(); + let udp6_errors_handled = udp_server_stats.udp6_errors_handled(); // For backward compatibility we keep the `tcp4_connections_handled` and // `tcp6_connections_handled` metrics. They don't make sense for the HTTP diff --git a/packages/udp-tracker-server/src/statistics/event/handler/error.rs b/packages/udp-tracker-server/src/statistics/event/handler/error.rs index 7bde032fe..d83a0584d 100644 --- a/packages/udp-tracker-server/src/statistics/event/handler/error.rs +++ b/packages/udp-tracker-server/src/statistics/event/handler/error.rs @@ -14,21 +14,9 @@ pub async fn handle_event( repository: &Repository, now: DurationSinceUnixEpoch, ) { - update_global_fixed_metrics(&connection_context, repository).await; update_extendable_metrics(&connection_context, opt_udp_request_kind, error_kind, repository, now).await; } -async fn update_global_fixed_metrics(connection_context: &ConnectionContext, repository: &Repository) { - match connection_context.client_socket_addr().ip() { - std::net::IpAddr::V4(_) => { - repository.increase_udp4_errors().await; - } - std::net::IpAddr::V6(_) => { - repository.increase_udp6_errors().await; - } - } -} - async fn update_extendable_metrics( connection_context: &ConnectionContext, opt_udp_request_kind: Option, @@ -149,6 +137,6 @@ mod tests { let stats = stats_repository.get_stats().await; - assert_eq!(stats.udp4_errors_handled, 1); + assert_eq!(stats.udp4_errors_handled(), 1); } } diff --git a/packages/udp-tracker-server/src/statistics/event/handler/request_aborted.rs b/packages/udp-tracker-server/src/statistics/event/handler/request_aborted.rs index fc701df75..19e410d5e 100644 --- a/packages/udp-tracker-server/src/statistics/event/handler/request_aborted.rs +++ b/packages/udp-tracker-server/src/statistics/event/handler/request_aborted.rs @@ -7,10 +7,6 @@ use crate::statistics::repository::Repository; use crate::statistics::UDP_TRACKER_SERVER_REQUESTS_ABORTED_TOTAL; pub async fn handle_event(context: ConnectionContext, stats_repository: &Repository, now: DurationSinceUnixEpoch) { - // Global fixed metrics - stats_repository.increase_udp_requests_aborted().await; - - // Extendable metrics match stats_repository .increase_counter( &metric_name!(UDP_TRACKER_SERVER_REQUESTS_ABORTED_TOTAL), @@ -58,7 +54,7 @@ mod tests { let stats = stats_repository.get_stats().await; - assert_eq!(stats.udp_requests_aborted, 1); + assert_eq!(stats.udp_requests_aborted(), 1); } #[tokio::test] @@ -81,6 +77,6 @@ mod tests { ) .await; let stats = stats_repository.get_stats().await; - assert_eq!(stats.udp_requests_aborted, 1); + assert_eq!(stats.udp_requests_aborted(), 1); } } diff --git a/packages/udp-tracker-server/src/statistics/event/handler/request_accepted.rs b/packages/udp-tracker-server/src/statistics/event/handler/request_accepted.rs index 37b668227..af92636df 100644 --- a/packages/udp-tracker-server/src/statistics/event/handler/request_accepted.rs +++ b/packages/udp-tracker-server/src/statistics/event/handler/request_accepted.rs @@ -12,35 +12,6 @@ pub async fn handle_event( stats_repository: &Repository, now: DurationSinceUnixEpoch, ) { - // Global fixed metrics - match kind { - UdpRequestKind::Connect => match context.client_socket_addr().ip() { - std::net::IpAddr::V4(_) => { - stats_repository.increase_udp4_connections().await; - } - std::net::IpAddr::V6(_) => { - stats_repository.increase_udp6_connections().await; - } - }, - UdpRequestKind::Announce { .. } => match context.client_socket_addr().ip() { - std::net::IpAddr::V4(_) => { - stats_repository.increase_udp4_announces().await; - } - std::net::IpAddr::V6(_) => { - stats_repository.increase_udp6_announces().await; - } - }, - UdpRequestKind::Scrape => match context.client_socket_addr().ip() { - std::net::IpAddr::V4(_) => { - stats_repository.increase_udp4_scrapes().await; - } - std::net::IpAddr::V6(_) => { - stats_repository.increase_udp6_scrapes().await; - } - }, - } - - // Extendable metrics let mut label_set = LabelSet::from(context); label_set.upsert(label_name!("request_kind"), LabelValue::new(&kind.to_string())); match stats_repository @@ -90,7 +61,7 @@ mod tests { let stats = stats_repository.get_stats().await; - assert_eq!(stats.udp4_connections_handled, 1); + assert_eq!(stats.udp4_connections_handled(), 1); } #[tokio::test] @@ -118,7 +89,7 @@ mod tests { let stats = stats_repository.get_stats().await; - assert_eq!(stats.udp4_announces_handled, 1); + assert_eq!(stats.udp4_announces_handled(), 1); } #[tokio::test] @@ -144,7 +115,7 @@ mod tests { let stats = stats_repository.get_stats().await; - assert_eq!(stats.udp4_scrapes_handled, 1); + assert_eq!(stats.udp4_scrapes_handled(), 1); } #[tokio::test] @@ -170,7 +141,7 @@ mod tests { let stats = stats_repository.get_stats().await; - assert_eq!(stats.udp6_connections_handled, 1); + assert_eq!(stats.udp6_connections_handled(), 1); } #[tokio::test] @@ -198,7 +169,7 @@ mod tests { let stats = stats_repository.get_stats().await; - assert_eq!(stats.udp6_announces_handled, 1); + assert_eq!(stats.udp6_announces_handled(), 1); } #[tokio::test] @@ -224,6 +195,6 @@ mod tests { let stats = stats_repository.get_stats().await; - assert_eq!(stats.udp6_scrapes_handled, 1); + assert_eq!(stats.udp6_scrapes_handled(), 1); } } diff --git a/packages/udp-tracker-server/src/statistics/event/handler/request_banned.rs b/packages/udp-tracker-server/src/statistics/event/handler/request_banned.rs index ce6e179a3..8badfa137 100644 --- a/packages/udp-tracker-server/src/statistics/event/handler/request_banned.rs +++ b/packages/udp-tracker-server/src/statistics/event/handler/request_banned.rs @@ -7,10 +7,6 @@ use crate::statistics::repository::Repository; use crate::statistics::UDP_TRACKER_SERVER_REQUESTS_BANNED_TOTAL; pub async fn handle_event(context: ConnectionContext, stats_repository: &Repository, now: DurationSinceUnixEpoch) { - // Global fixed metrics - stats_repository.increase_udp_requests_banned().await; - - // Extendable metrics match stats_repository .increase_counter( &metric_name!(UDP_TRACKER_SERVER_REQUESTS_BANNED_TOTAL), @@ -58,7 +54,7 @@ mod tests { let stats = stats_repository.get_stats().await; - assert_eq!(stats.udp_requests_banned, 1); + assert_eq!(stats.udp_requests_banned(), 1); } #[tokio::test] @@ -81,6 +77,6 @@ mod tests { ) .await; let stats = stats_repository.get_stats().await; - assert_eq!(stats.udp_requests_banned, 1); + assert_eq!(stats.udp_requests_banned(), 1); } } diff --git a/packages/udp-tracker-server/src/statistics/event/handler/request_received.rs b/packages/udp-tracker-server/src/statistics/event/handler/request_received.rs index 89f306f6a..eced5a215 100644 --- a/packages/udp-tracker-server/src/statistics/event/handler/request_received.rs +++ b/packages/udp-tracker-server/src/statistics/event/handler/request_received.rs @@ -7,17 +7,6 @@ use crate::statistics::repository::Repository; use crate::statistics::UDP_TRACKER_SERVER_REQUESTS_RECEIVED_TOTAL; pub async fn handle_event(context: ConnectionContext, stats_repository: &Repository, now: DurationSinceUnixEpoch) { - // Global fixed metrics - match context.client_socket_addr().ip() { - std::net::IpAddr::V4(_) => { - stats_repository.increase_udp4_requests().await; - } - std::net::IpAddr::V6(_) => { - stats_repository.increase_udp6_requests().await; - } - } - - // Extendable metrics match stats_repository .increase_counter( &metric_name!(UDP_TRACKER_SERVER_REQUESTS_RECEIVED_TOTAL), @@ -65,6 +54,6 @@ mod tests { let stats = stats_repository.get_stats().await; - assert_eq!(stats.udp4_requests, 1); + assert_eq!(stats.udp4_requests(), 1); } } diff --git a/packages/udp-tracker-server/src/statistics/event/handler/response_sent.rs b/packages/udp-tracker-server/src/statistics/event/handler/response_sent.rs index 4e167a10e..7e05e483b 100644 --- a/packages/udp-tracker-server/src/statistics/event/handler/response_sent.rs +++ b/packages/udp-tracker-server/src/statistics/event/handler/response_sent.rs @@ -13,16 +13,6 @@ pub async fn handle_event( stats_repository: &Repository, now: DurationSinceUnixEpoch, ) { - // Global fixed metrics - match context.client_socket_addr().ip() { - std::net::IpAddr::V4(_) => { - stats_repository.increase_udp4_responses().await; - } - std::net::IpAddr::V6(_) => { - stats_repository.increase_udp6_responses().await; - } - } - let (result_label_value, kind_label_value) = match kind { UdpResponseKind::Ok { req_kind } => match req_kind { UdpRequestKind::Connect => { @@ -145,7 +135,7 @@ mod tests { let stats = stats_repository.get_stats().await; - assert_eq!(stats.udp4_responses, 1); + assert_eq!(stats.udp4_responses(), 1); } #[tokio::test] @@ -176,6 +166,6 @@ mod tests { let stats = stats_repository.get_stats().await; - assert_eq!(stats.udp6_responses, 1); + assert_eq!(stats.udp6_responses(), 1); } } diff --git a/packages/udp-tracker-server/src/statistics/metrics.rs b/packages/udp-tracker-server/src/statistics/metrics.rs index ac6250872..8eba248d2 100644 --- a/packages/udp-tracker-server/src/statistics/metrics.rs +++ b/packages/udp-tracker-server/src/statistics/metrics.rs @@ -1,96 +1,296 @@ use serde::Serialize; use torrust_tracker_metrics::label::LabelSet; use torrust_tracker_metrics::metric::MetricName; +use torrust_tracker_metrics::metric_collection::aggregate::Sum; use torrust_tracker_metrics::metric_collection::{Error, MetricCollection}; +use torrust_tracker_metrics::metric_name; use torrust_tracker_primitives::DurationSinceUnixEpoch; +use crate::statistics::{ + UDP_TRACKER_SERVER_ERRORS_TOTAL, UDP_TRACKER_SERVER_IPS_BANNED_TOTAL, UDP_TRACKER_SERVER_PERFORMANCE_AVG_PROCESSING_TIME_NS, + UDP_TRACKER_SERVER_REQUESTS_ABORTED_TOTAL, UDP_TRACKER_SERVER_REQUESTS_ACCEPTED_TOTAL, + UDP_TRACKER_SERVER_REQUESTS_BANNED_TOTAL, UDP_TRACKER_SERVER_REQUESTS_RECEIVED_TOTAL, + UDP_TRACKER_SERVER_RESPONSES_SENT_TOTAL, +}; + /// Metrics collected by the UDP tracker server. #[derive(Debug, PartialEq, Default, Serialize)] pub struct Metrics { + /// A collection of metrics. + pub metric_collection: MetricCollection, +} + +impl Metrics { + /// # Errors + /// + /// Returns an error if the metric does not exist and it cannot be created. + pub fn increase_counter( + &mut self, + metric_name: &MetricName, + labels: &LabelSet, + now: DurationSinceUnixEpoch, + ) -> Result<(), Error> { + self.metric_collection.increment_counter(metric_name, labels, now) + } + + /// # Errors + /// + /// Returns an error if the metric does not exist and it cannot be created. + pub fn set_gauge( + &mut self, + metric_name: &MetricName, + labels: &LabelSet, + value: f64, + now: DurationSinceUnixEpoch, + ) -> Result<(), Error> { + self.metric_collection.set_gauge(metric_name, labels, value, now) + } +} + +impl Metrics { // UDP /// Total number of UDP (UDP tracker) requests aborted. - pub udp_requests_aborted: u64, + #[must_use] + #[allow(clippy::cast_sign_loss)] + #[allow(clippy::cast_possible_truncation)] + pub fn udp_requests_aborted(&self) -> u64 { + self.metric_collection + .sum(&metric_name!(UDP_TRACKER_SERVER_REQUESTS_ABORTED_TOTAL), &LabelSet::empty()) + .unwrap_or_default() + .value() as u64 + } /// Total number of UDP (UDP tracker) requests banned. - pub udp_requests_banned: u64, + #[must_use] + #[allow(clippy::cast_sign_loss)] + #[allow(clippy::cast_possible_truncation)] + pub fn udp_requests_banned(&self) -> u64 { + self.metric_collection + .sum(&metric_name!(UDP_TRACKER_SERVER_REQUESTS_BANNED_TOTAL), &LabelSet::empty()) + .unwrap_or_default() + .value() as u64 + } /// Total number of banned IPs. - pub udp_banned_ips_total: u64, + #[must_use] + #[allow(clippy::cast_sign_loss)] + #[allow(clippy::cast_possible_truncation)] + pub fn udp_banned_ips_total(&self) -> u64 { + self.metric_collection + .sum(&metric_name!(UDP_TRACKER_SERVER_IPS_BANNED_TOTAL), &LabelSet::empty()) + .unwrap_or_default() + .value() as u64 + } /// Average rounded time spent processing UDP connect requests. - pub udp_avg_connect_processing_time_ns: u64, + #[must_use] + #[allow(clippy::cast_sign_loss)] + #[allow(clippy::cast_possible_truncation)] + pub fn udp_avg_connect_processing_time_ns(&self) -> u64 { + self.metric_collection + .sum( + &metric_name!(UDP_TRACKER_SERVER_PERFORMANCE_AVG_PROCESSING_TIME_NS), + &[("request_kind", "connect")].into(), + ) + .unwrap_or_default() + .value() as u64 + } /// Average rounded time spent processing UDP announce requests. - pub udp_avg_announce_processing_time_ns: u64, + #[must_use] + #[allow(clippy::cast_sign_loss)] + #[allow(clippy::cast_possible_truncation)] + pub fn udp_avg_announce_processing_time_ns(&self) -> u64 { + self.metric_collection + .sum( + &metric_name!(UDP_TRACKER_SERVER_PERFORMANCE_AVG_PROCESSING_TIME_NS), + &[("request_kind", "announce")].into(), + ) + .unwrap_or_default() + .value() as u64 + } /// Average rounded time spent processing UDP scrape requests. - pub udp_avg_scrape_processing_time_ns: u64, + #[must_use] + #[allow(clippy::cast_sign_loss)] + #[allow(clippy::cast_possible_truncation)] + pub fn udp_avg_scrape_processing_time_ns(&self) -> u64 { + self.metric_collection + .sum( + &metric_name!(UDP_TRACKER_SERVER_PERFORMANCE_AVG_PROCESSING_TIME_NS), + &[("request_kind", "scrape")].into(), + ) + .unwrap_or_default() + .value() as u64 + } // UDPv4 /// Total number of UDP (UDP tracker) requests from IPv4 peers. - pub udp4_requests: u64, + #[must_use] + #[allow(clippy::cast_sign_loss)] + #[allow(clippy::cast_possible_truncation)] + pub fn udp4_requests(&self) -> u64 { + self.metric_collection + .sum( + &metric_name!(UDP_TRACKER_SERVER_REQUESTS_RECEIVED_TOTAL), + &[("server_binding_address_ip_family", "inet")].into(), + ) + .unwrap_or_default() + .value() as u64 + } /// Total number of UDP (UDP tracker) connections from IPv4 peers. - pub udp4_connections_handled: u64, + #[must_use] + #[allow(clippy::cast_sign_loss)] + #[allow(clippy::cast_possible_truncation)] + pub fn udp4_connections_handled(&self) -> u64 { + self.metric_collection + .sum( + &metric_name!(UDP_TRACKER_SERVER_REQUESTS_ACCEPTED_TOTAL), + &[("server_binding_address_ip_family", "inet"), ("request_kind", "connect")].into(), + ) + .unwrap_or_default() + .value() as u64 + } /// Total number of UDP (UDP tracker) `announce` requests from IPv4 peers. - pub udp4_announces_handled: u64, + #[must_use] + #[allow(clippy::cast_sign_loss)] + #[allow(clippy::cast_possible_truncation)] + pub fn udp4_announces_handled(&self) -> u64 { + self.metric_collection + .sum( + &metric_name!(UDP_TRACKER_SERVER_REQUESTS_ACCEPTED_TOTAL), + &[("server_binding_address_ip_family", "inet"), ("request_kind", "announce")].into(), + ) + .unwrap_or_default() + .value() as u64 + } /// Total number of UDP (UDP tracker) `scrape` requests from IPv4 peers. - pub udp4_scrapes_handled: u64, + #[must_use] + #[allow(clippy::cast_sign_loss)] + #[allow(clippy::cast_possible_truncation)] + pub fn udp4_scrapes_handled(&self) -> u64 { + self.metric_collection + .sum( + &metric_name!(UDP_TRACKER_SERVER_REQUESTS_ACCEPTED_TOTAL), + &[("server_binding_address_ip_family", "inet"), ("request_kind", "scrape")].into(), + ) + .unwrap_or_default() + .value() as u64 + } /// Total number of UDP (UDP tracker) responses from IPv4 peers. - pub udp4_responses: u64, + #[must_use] + #[allow(clippy::cast_sign_loss)] + #[allow(clippy::cast_possible_truncation)] + pub fn udp4_responses(&self) -> u64 { + self.metric_collection + .sum( + &metric_name!(UDP_TRACKER_SERVER_RESPONSES_SENT_TOTAL), + &[("server_binding_address_ip_family", "inet")].into(), + ) + .unwrap_or_default() + .value() as u64 + } /// Total number of UDP (UDP tracker) `error` requests from IPv4 peers. - pub udp4_errors_handled: u64, + #[must_use] + #[allow(clippy::cast_sign_loss)] + #[allow(clippy::cast_possible_truncation)] + pub fn udp4_errors_handled(&self) -> u64 { + self.metric_collection + .sum( + &metric_name!(UDP_TRACKER_SERVER_ERRORS_TOTAL), + &[("server_binding_address_ip_family", "inet")].into(), + ) + .unwrap_or_default() + .value() as u64 + } // UDPv6 /// Total number of UDP (UDP tracker) requests from IPv6 peers. - pub udp6_requests: u64, + #[must_use] + #[allow(clippy::cast_sign_loss)] + #[allow(clippy::cast_possible_truncation)] + pub fn udp6_requests(&self) -> u64 { + self.metric_collection + .sum( + &metric_name!(UDP_TRACKER_SERVER_REQUESTS_RECEIVED_TOTAL), + &[("server_binding_address_ip_family", "inet6")].into(), + ) + .unwrap_or_default() + .value() as u64 + } /// Total number of UDP (UDP tracker) `connection` requests from IPv6 peers. - pub udp6_connections_handled: u64, + #[must_use] + #[allow(clippy::cast_sign_loss)] + #[allow(clippy::cast_possible_truncation)] + pub fn udp6_connections_handled(&self) -> u64 { + self.metric_collection + .sum( + &metric_name!(UDP_TRACKER_SERVER_REQUESTS_ACCEPTED_TOTAL), + &[("server_binding_address_ip_family", "inet6"), ("request_kind", "connect")].into(), + ) + .unwrap_or_default() + .value() as u64 + } /// Total number of UDP (UDP tracker) `announce` requests from IPv6 peers. - pub udp6_announces_handled: u64, + #[must_use] + #[allow(clippy::cast_sign_loss)] + #[allow(clippy::cast_possible_truncation)] + pub fn udp6_announces_handled(&self) -> u64 { + self.metric_collection + .sum( + &metric_name!(UDP_TRACKER_SERVER_REQUESTS_ACCEPTED_TOTAL), + &[("server_binding_address_ip_family", "inet6"), ("request_kind", "announce")].into(), + ) + .unwrap_or_default() + .value() as u64 + } /// Total number of UDP (UDP tracker) `scrape` requests from IPv6 peers. - pub udp6_scrapes_handled: u64, + #[must_use] + #[allow(clippy::cast_sign_loss)] + #[allow(clippy::cast_possible_truncation)] + pub fn udp6_scrapes_handled(&self) -> u64 { + self.metric_collection + .sum( + &metric_name!(UDP_TRACKER_SERVER_REQUESTS_ACCEPTED_TOTAL), + &[("server_binding_address_ip_family", "inet6"), ("request_kind", "scrape")].into(), + ) + .unwrap_or_default() + .value() as u64 + } /// Total number of UDP (UDP tracker) responses from IPv6 peers. - pub udp6_responses: u64, - - /// Total number of UDP (UDP tracker) `error` requests from IPv6 peers. - pub udp6_errors_handled: u64, - - /// A collection of metrics. - pub metric_collection: MetricCollection, -} - -impl Metrics { - /// # Errors - /// - /// Returns an error if the metric does not exist and it cannot be created. - pub fn increase_counter( - &mut self, - metric_name: &MetricName, - labels: &LabelSet, - now: DurationSinceUnixEpoch, - ) -> Result<(), Error> { - self.metric_collection.increment_counter(metric_name, labels, now) + #[must_use] + #[allow(clippy::cast_sign_loss)] + #[allow(clippy::cast_possible_truncation)] + pub fn udp6_responses(&self) -> u64 { + self.metric_collection + .sum( + &metric_name!(UDP_TRACKER_SERVER_RESPONSES_SENT_TOTAL), + &[("server_binding_address_ip_family", "inet6")].into(), + ) + .unwrap_or_default() + .value() as u64 } - /// # Errors - /// - /// Returns an error if the metric does not exist and it cannot be created. - pub fn set_gauge( - &mut self, - metric_name: &MetricName, - labels: &LabelSet, - value: f64, - now: DurationSinceUnixEpoch, - ) -> Result<(), Error> { - self.metric_collection.set_gauge(metric_name, labels, value, now) + /// Total number of UDP (UDP tracker) `error` requests from IPv6 peers. + #[must_use] + #[allow(clippy::cast_sign_loss)] + #[allow(clippy::cast_possible_truncation)] + pub fn udp6_errors_handled(&self) -> u64 { + self.metric_collection + .sum( + &metric_name!(UDP_TRACKER_SERVER_ERRORS_TOTAL), + &[("server_binding_address_ip_family", "inet6")].into(), + ) + .unwrap_or_default() + .value() as u64 } } diff --git a/packages/udp-tracker-server/src/statistics/repository.rs b/packages/udp-tracker-server/src/statistics/repository.rs index 1a1db89c7..1851b78a8 100644 --- a/packages/udp-tracker-server/src/statistics/repository.rs +++ b/packages/udp-tracker-server/src/statistics/repository.rs @@ -34,70 +34,59 @@ impl Repository { self.stats.read().await } - pub async fn increase_udp_requests_aborted(&self) { + /// # Errors + /// + /// This function will return an error if the metric collection fails to + /// increase the counter. + pub async fn increase_counter( + &self, + metric_name: &MetricName, + labels: &LabelSet, + now: DurationSinceUnixEpoch, + ) -> Result<(), Error> { let mut stats_lock = self.stats.write().await; - stats_lock.udp_requests_aborted += 1; - drop(stats_lock); - } - pub async fn increase_udp_requests_banned(&self) { - let mut stats_lock = self.stats.write().await; - stats_lock.udp_requests_banned += 1; - drop(stats_lock); - } + let result = stats_lock.increase_counter(metric_name, labels, now); - pub async fn increase_udp4_requests(&self) { - let mut stats_lock = self.stats.write().await; - stats_lock.udp4_requests += 1; drop(stats_lock); - } - pub async fn increase_udp4_connections(&self) { - let mut stats_lock = self.stats.write().await; - stats_lock.udp4_connections_handled += 1; - drop(stats_lock); + result } - pub async fn increase_udp4_announces(&self) { + /// # Errors + /// + /// This function will return an error if the metric collection fails to + /// increase the counter. + pub async fn set_gauge( + &self, + metric_name: &MetricName, + labels: &LabelSet, + value: f64, + now: DurationSinceUnixEpoch, + ) -> Result<(), Error> { let mut stats_lock = self.stats.write().await; - stats_lock.udp4_announces_handled += 1; - drop(stats_lock); - } - pub async fn increase_udp4_scrapes(&self) { - let mut stats_lock = self.stats.write().await; - stats_lock.udp4_scrapes_handled += 1; - drop(stats_lock); - } + let result = stats_lock.set_gauge(metric_name, labels, value, now); - pub async fn increase_udp4_responses(&self) { - let mut stats_lock = self.stats.write().await; - stats_lock.udp4_responses += 1; drop(stats_lock); - } - pub async fn increase_udp4_errors(&self) { - let mut stats_lock = self.stats.write().await; - stats_lock.udp4_errors_handled += 1; - drop(stats_lock); + result } #[allow(clippy::cast_precision_loss)] #[allow(clippy::cast_possible_truncation)] #[allow(clippy::cast_sign_loss)] pub async fn recalculate_udp_avg_connect_processing_time_ns(&self, req_processing_time: Duration) -> f64 { - let mut stats_lock = self.stats.write().await; + let stats_lock = self.stats.write().await; let req_processing_time = req_processing_time.as_nanos() as f64; - let udp_connections_handled = (stats_lock.udp4_connections_handled + stats_lock.udp6_connections_handled) as f64; + let udp_connections_handled = (stats_lock.udp4_connections_handled() + stats_lock.udp6_connections_handled()) as f64; - let previous_avg = stats_lock.udp_avg_connect_processing_time_ns; + let previous_avg = stats_lock.udp_avg_connect_processing_time_ns(); // Moving average: https://en.wikipedia.org/wiki/Moving_average let new_avg = previous_avg as f64 + (req_processing_time - previous_avg as f64) / udp_connections_handled; - stats_lock.udp_avg_connect_processing_time_ns = new_avg.ceil() as u64; - drop(stats_lock); new_avg @@ -107,19 +96,17 @@ impl Repository { #[allow(clippy::cast_possible_truncation)] #[allow(clippy::cast_sign_loss)] pub async fn recalculate_udp_avg_announce_processing_time_ns(&self, req_processing_time: Duration) -> f64 { - let mut stats_lock = self.stats.write().await; + let stats_lock = self.stats.write().await; let req_processing_time = req_processing_time.as_nanos() as f64; - let udp_announces_handled = (stats_lock.udp4_announces_handled + stats_lock.udp6_announces_handled) as f64; + let udp_announces_handled = (stats_lock.udp4_announces_handled() + stats_lock.udp6_announces_handled()) as f64; - let previous_avg = stats_lock.udp_avg_announce_processing_time_ns; + let previous_avg = stats_lock.udp_avg_announce_processing_time_ns(); // Moving average: https://en.wikipedia.org/wiki/Moving_average let new_avg = previous_avg as f64 + (req_processing_time - previous_avg as f64) / udp_announces_handled; - stats_lock.udp_avg_announce_processing_time_ns = new_avg.ceil() as u64; - drop(stats_lock); new_avg @@ -129,95 +116,18 @@ impl Repository { #[allow(clippy::cast_possible_truncation)] #[allow(clippy::cast_sign_loss)] pub async fn recalculate_udp_avg_scrape_processing_time_ns(&self, req_processing_time: Duration) -> f64 { - let mut stats_lock = self.stats.write().await; + let stats_lock = self.stats.write().await; let req_processing_time = req_processing_time.as_nanos() as f64; - let udp_scrapes_handled = (stats_lock.udp4_scrapes_handled + stats_lock.udp6_scrapes_handled) as f64; + let udp_scrapes_handled = (stats_lock.udp4_scrapes_handled() + stats_lock.udp6_scrapes_handled()) as f64; - let previous_avg = stats_lock.udp_avg_scrape_processing_time_ns; + let previous_avg = stats_lock.udp_avg_scrape_processing_time_ns(); // Moving average: https://en.wikipedia.org/wiki/Moving_average let new_avg = previous_avg as f64 + (req_processing_time - previous_avg as f64) / udp_scrapes_handled; - stats_lock.udp_avg_scrape_processing_time_ns = new_avg.ceil() as u64; - drop(stats_lock); new_avg } - - pub async fn increase_udp6_requests(&self) { - let mut stats_lock = self.stats.write().await; - stats_lock.udp6_requests += 1; - drop(stats_lock); - } - - pub async fn increase_udp6_connections(&self) { - let mut stats_lock = self.stats.write().await; - stats_lock.udp6_connections_handled += 1; - drop(stats_lock); - } - - pub async fn increase_udp6_announces(&self) { - let mut stats_lock = self.stats.write().await; - stats_lock.udp6_announces_handled += 1; - drop(stats_lock); - } - - pub async fn increase_udp6_scrapes(&self) { - let mut stats_lock = self.stats.write().await; - stats_lock.udp6_scrapes_handled += 1; - drop(stats_lock); - } - - pub async fn increase_udp6_responses(&self) { - let mut stats_lock = self.stats.write().await; - stats_lock.udp6_responses += 1; - drop(stats_lock); - } - - pub async fn increase_udp6_errors(&self) { - let mut stats_lock = self.stats.write().await; - stats_lock.udp6_errors_handled += 1; - drop(stats_lock); - } - - /// # Errors - /// - /// This function will return an error if the metric collection fails to - /// increase the counter. - pub async fn increase_counter( - &self, - metric_name: &MetricName, - labels: &LabelSet, - now: DurationSinceUnixEpoch, - ) -> Result<(), Error> { - let mut stats_lock = self.stats.write().await; - - let result = stats_lock.increase_counter(metric_name, labels, now); - - drop(stats_lock); - - result - } - - /// # Errors - /// - /// This function will return an error if the metric collection fails to - /// increase the counter. - pub async fn set_gauge( - &self, - metric_name: &MetricName, - labels: &LabelSet, - value: f64, - now: DurationSinceUnixEpoch, - ) -> Result<(), Error> { - let mut stats_lock = self.stats.write().await; - - let result = stats_lock.set_gauge(metric_name, labels, value, now); - - drop(stats_lock); - - result - } } diff --git a/packages/udp-tracker-server/src/statistics/services.rs b/packages/udp-tracker-server/src/statistics/services.rs index e6e5a28f3..0eac01270 100644 --- a/packages/udp-tracker-server/src/statistics/services.rs +++ b/packages/udp-tracker-server/src/statistics/services.rs @@ -39,8 +39,6 @@ use std::sync::Arc; use bittorrent_tracker_core::torrent::repository::in_memory::InMemoryTorrentRepository; -use bittorrent_udp_tracker_core::services::banning::BanService; -use tokio::sync::RwLock; use torrust_tracker_primitives::swarm_metadata::AggregateActiveSwarmMetadata; use crate::statistics::metrics::Metrics; @@ -63,38 +61,14 @@ pub struct TrackerMetrics { /// It returns all the [`TrackerMetrics`] pub async fn get_metrics( in_memory_torrent_repository: Arc, - ban_service: Arc>, stats_repository: Arc, ) -> TrackerMetrics { let torrents_metrics = in_memory_torrent_repository.get_aggregate_swarm_metadata().await; let stats = stats_repository.get_stats().await; - let udp_banned_ips_total = ban_service.read().await.get_banned_ips_total(); TrackerMetrics { torrents_metrics, protocol_metrics: Metrics { - // UDP - udp_requests_aborted: stats.udp_requests_aborted, - udp_requests_banned: stats.udp_requests_banned, - udp_banned_ips_total: udp_banned_ips_total as u64, - udp_avg_connect_processing_time_ns: stats.udp_avg_connect_processing_time_ns, - udp_avg_announce_processing_time_ns: stats.udp_avg_announce_processing_time_ns, - udp_avg_scrape_processing_time_ns: stats.udp_avg_scrape_processing_time_ns, - // UDPv4 - udp4_requests: stats.udp4_requests, - udp4_connections_handled: stats.udp4_connections_handled, - udp4_announces_handled: stats.udp4_announces_handled, - udp4_scrapes_handled: stats.udp4_scrapes_handled, - udp4_responses: stats.udp4_responses, - udp4_errors_handled: stats.udp4_errors_handled, - // UDPv6 - udp6_requests: stats.udp6_requests, - udp6_connections_handled: stats.udp6_connections_handled, - udp6_announces_handled: stats.udp6_announces_handled, - udp6_scrapes_handled: stats.udp6_scrapes_handled, - udp6_responses: stats.udp6_responses, - udp6_errors_handled: stats.udp6_errors_handled, - // Extendable metrics metric_collection: stats.metric_collection.clone(), }, } @@ -106,9 +80,6 @@ mod tests { use bittorrent_tracker_core::torrent::repository::in_memory::InMemoryTorrentRepository; use bittorrent_tracker_core::{self}; - use bittorrent_udp_tracker_core::services::banning::BanService; - use bittorrent_udp_tracker_core::MAX_CONNECTION_ID_ERRORS_PER_IP; - use tokio::sync::RwLock; use torrust_tracker_primitives::swarm_metadata::AggregateActiveSwarmMetadata; use crate::statistics::describe_metrics; @@ -118,16 +89,10 @@ mod tests { #[tokio::test] async fn the_statistics_service_should_return_the_tracker_metrics() { let in_memory_torrent_repository = Arc::new(InMemoryTorrentRepository::default()); - let ban_service = Arc::new(RwLock::new(BanService::new(MAX_CONNECTION_ID_ERRORS_PER_IP))); let stats_repository = Arc::new(Repository::new()); - let tracker_metrics = get_metrics( - in_memory_torrent_repository.clone(), - ban_service.clone(), - stats_repository.clone(), - ) - .await; + let tracker_metrics = get_metrics(in_memory_torrent_repository.clone(), stats_repository.clone()).await; assert_eq!( tracker_metrics, diff --git a/packages/udp-tracker-server/tests/server/contract.rs b/packages/udp-tracker-server/tests/server/contract.rs index 0d9540289..2745f3407 100644 --- a/packages/udp-tracker-server/tests/server/contract.rs +++ b/packages/udp-tracker-server/tests/server/contract.rs @@ -273,7 +273,7 @@ mod receiving_an_announce_request { .stats_repository .get_stats() .await - .udp_requests_banned; + .udp_requests_banned(); // This should return a timeout error match client.send(announce_request.into()).await { @@ -289,7 +289,7 @@ mod receiving_an_announce_request { .stats_repository .get_stats() .await - .udp_requests_banned; + .udp_requests_banned(); let udp_banned_ips_total_after = ban_service.read().await.get_banned_ips_total(); // UDP counter for banned requests should be increased by 1 From a5524825452e82a37c25462fd101d6d2023a36bb Mon Sep 17 00:00:00 2001 From: Jose Celano Date: Mon, 16 Jun 2025 17:28:22 +0100 Subject: [PATCH 727/802] refactor: [#1581] finished. Global metrics in API loaded from labeled metrics --- Cargo.lock | 1 - .../src/v1/context/stats/handlers.rs | 10 +-- .../src/v1/context/stats/routes.rs | 1 - packages/rest-tracker-api-core/Cargo.toml | 1 - .../src/statistics/services.rs | 81 +------------------ 5 files changed, 4 insertions(+), 90 deletions(-) diff --git a/Cargo.lock b/Cargo.lock index 6f8215bbf..269f7a3a2 100644 --- a/Cargo.lock +++ b/Cargo.lock @@ -4668,7 +4668,6 @@ dependencies = [ "torrust-tracker-swarm-coordination-registry", "torrust-tracker-test-helpers", "torrust-udp-tracker-server", - "tracing", ] [[package]] diff --git a/packages/axum-rest-tracker-api-server/src/v1/context/stats/handlers.rs b/packages/axum-rest-tracker-api-server/src/v1/context/stats/handlers.rs index b907b861a..1b1f670a0 100644 --- a/packages/axum-rest-tracker-api-server/src/v1/context/stats/handlers.rs +++ b/packages/axum-rest-tracker-api-server/src/v1/context/stats/handlers.rs @@ -41,21 +41,13 @@ pub struct QueryParams { pub async fn get_stats_handler( State(state): State<( Arc, - Arc>, Arc, Arc, Arc, )>, params: Query, ) -> Response { - let metrics = get_metrics( - state.0.clone(), - state.1.clone(), - state.2.clone(), - state.3.clone(), - state.4.clone(), - ) - .await; + let metrics = get_metrics(state.0.clone(), state.1.clone(), state.2.clone(), state.3.clone()).await; match params.0.format { Some(format) => match format { diff --git a/packages/axum-rest-tracker-api-server/src/v1/context/stats/routes.rs b/packages/axum-rest-tracker-api-server/src/v1/context/stats/routes.rs index c2a1466e0..2bf3776fd 100644 --- a/packages/axum-rest-tracker-api-server/src/v1/context/stats/routes.rs +++ b/packages/axum-rest-tracker-api-server/src/v1/context/stats/routes.rs @@ -18,7 +18,6 @@ pub fn add(prefix: &str, router: Router, http_api_container: &Arc, - ban_service: Arc>, tracker_core_stats_repository: Arc, http_stats_repository: Arc, udp_server_stats_repository: Arc, ) -> TrackerMetrics { - let protocol_metrics_from_global_metrics = get_protocol_metrics( - ban_service.clone(), - http_stats_repository.clone(), - udp_server_stats_repository.clone(), - ) - .await; - - let protocol_metrics_from_labeled_metrics = - get_protocol_metrics_from_labeled_metrics(http_stats_repository.clone(), udp_server_stats_repository.clone()).await; - - // todo: - // We keep both metrics until we deploy to production and we can - // ensure that the protocol metrics from labeled metrics are correct. - // After that we can remove the `get_protocol_metrics` function and - // use only the `get_protocol_metrics_from_labeled_metrics` function. - // And also remove the code in repositories to generate the global metrics. - let protocol_metrics = if protocol_metrics_from_global_metrics == protocol_metrics_from_labeled_metrics { - protocol_metrics_from_labeled_metrics - } else { - tracing::warn!("The protocol metrics from global metrics and labeled metrics are different"); - tracing::warn!("Global metrics: {:?}", protocol_metrics_from_global_metrics); - tracing::warn!("Labeled metrics: {:?}", protocol_metrics_from_labeled_metrics); - protocol_metrics_from_global_metrics - }; - TrackerMetrics { torrents_metrics: get_torrents_metrics(in_memory_torrent_repository, tracker_core_stats_repository).await, - protocol_metrics, + protocol_metrics: get_protocol_metrics(http_stats_repository.clone(), udp_server_stats_repository.clone()).await, } } @@ -76,57 +50,9 @@ async fn get_torrents_metrics( torrents_metrics } -#[allow(deprecated)] -async fn get_protocol_metrics( - ban_service: Arc>, - http_stats_repository: Arc, - udp_server_stats_repository: Arc, -) -> ProtocolMetrics { - let udp_banned_ips_total = ban_service.read().await.get_banned_ips_total(); - let http_stats = http_stats_repository.get_stats().await; - let udp_server_stats = udp_server_stats_repository.get_stats().await; - - // For backward compatibility we keep the `tcp4_connections_handled` and - // `tcp6_connections_handled` metrics. They don't make sense for the HTTP - // tracker, but we keep them for now. In new major versions we should remove - // them. - - ProtocolMetrics { - // TCPv4 - tcp4_connections_handled: http_stats.tcp4_announces_handled() + http_stats.tcp4_scrapes_handled(), - tcp4_announces_handled: http_stats.tcp4_announces_handled(), - tcp4_scrapes_handled: http_stats.tcp4_scrapes_handled(), - // TCPv6 - tcp6_connections_handled: http_stats.tcp6_announces_handled() + http_stats.tcp6_scrapes_handled(), - tcp6_announces_handled: http_stats.tcp6_announces_handled(), - tcp6_scrapes_handled: http_stats.tcp6_scrapes_handled(), - // UDP - udp_requests_aborted: udp_server_stats.udp_requests_aborted(), - udp_requests_banned: udp_server_stats.udp_requests_banned(), - udp_banned_ips_total: udp_banned_ips_total as u64, - udp_avg_connect_processing_time_ns: udp_server_stats.udp_avg_connect_processing_time_ns(), - udp_avg_announce_processing_time_ns: udp_server_stats.udp_avg_announce_processing_time_ns(), - udp_avg_scrape_processing_time_ns: udp_server_stats.udp_avg_scrape_processing_time_ns(), - // UDPv4 - udp4_requests: udp_server_stats.udp4_requests(), - udp4_connections_handled: udp_server_stats.udp4_connections_handled(), - udp4_announces_handled: udp_server_stats.udp4_announces_handled(), - udp4_scrapes_handled: udp_server_stats.udp4_scrapes_handled(), - udp4_responses: udp_server_stats.udp4_responses(), - udp4_errors_handled: udp_server_stats.udp4_errors_handled(), - // UDPv6 - udp6_requests: udp_server_stats.udp6_requests(), - udp6_connections_handled: udp_server_stats.udp6_connections_handled(), - udp6_announces_handled: udp_server_stats.udp6_announces_handled(), - udp6_scrapes_handled: udp_server_stats.udp6_scrapes_handled(), - udp6_responses: udp_server_stats.udp6_responses(), - udp6_errors_handled: udp_server_stats.udp6_errors_handled(), - } -} - #[allow(deprecated)] #[allow(clippy::too_many_lines)] -async fn get_protocol_metrics_from_labeled_metrics( +async fn get_protocol_metrics( http_stats_repository: Arc, udp_server_stats_repository: Arc, ) -> ProtocolMetrics { @@ -307,7 +233,7 @@ mod tests { let tracker_core_container = TrackerCoreContainer::initialize_from(&core_config, &swarm_coordination_registry_container.clone()); - let ban_service = Arc::new(RwLock::new(BanService::new(MAX_CONNECTION_ID_ERRORS_PER_IP))); + let _ban_service = Arc::new(RwLock::new(BanService::new(MAX_CONNECTION_ID_ERRORS_PER_IP))); // HTTP core stats let http_core_broadcaster = Broadcaster::default(); @@ -326,7 +252,6 @@ mod tests { let tracker_metrics = get_metrics( tracker_core_container.in_memory_torrent_repository.clone(), - ban_service.clone(), tracker_core_container.stats_repository.clone(), http_stats_repository.clone(), udp_server_stats_repository.clone(), From 0d9f88337a5117fba523e7f2cb84a70d46af9444 Mon Sep 17 00:00:00 2001 From: Jose Celano Date: Tue, 17 Jun 2025 08:06:25 +0100 Subject: [PATCH 728/802] refactor(metrics): [#1580] convert Sum trait to use associated types for mathematically correct return types - Replace AggregateValue return type with associated Output type in Sum trait - Counter metrics now return u64 (preserving integer precision) - Gauge metrics now return f64 (avoiding unnecessary wrapper type) - Update all test cases to expect primitive types instead of AggregateValue - Convert primitive results to AggregateValue at collection level for backward compatibility - Use proper floating-point comparison in gauge tests with epsilon tolerance This change ensures each aggregate function returns the mathematically appropriate type while maintaining API compatibility for metric collections. --- packages/metrics/src/metric/aggregate/sum.rs | 81 +++++++++---------- .../src/metric_collection/aggregate.rs | 11 ++- 2 files changed, 47 insertions(+), 45 deletions(-) diff --git a/packages/metrics/src/metric/aggregate/sum.rs b/packages/metrics/src/metric/aggregate/sum.rs index f08ea7d55..30c2819b7 100644 --- a/packages/metrics/src/metric/aggregate/sum.rs +++ b/packages/metrics/src/metric/aggregate/sum.rs @@ -1,37 +1,34 @@ -use crate::aggregate::AggregateValue; use crate::counter::Counter; use crate::gauge::Gauge; use crate::label::LabelSet; use crate::metric::Metric; pub trait Sum { - fn sum(&self, label_set_criteria: &LabelSet) -> AggregateValue; + type Output; + fn sum(&self, label_set_criteria: &LabelSet) -> Self::Output; } impl Sum for Metric { - #[allow(clippy::cast_precision_loss)] - fn sum(&self, label_set_criteria: &LabelSet) -> AggregateValue { - let sum: f64 = self - .sample_collection + type Output = u64; + + fn sum(&self, label_set_criteria: &LabelSet) -> Self::Output { + self.sample_collection .iter() .filter(|(label_set, _measurement)| label_set.matches(label_set_criteria)) - .map(|(_label_set, measurement)| measurement.value().primitive() as f64) - .sum(); - - sum.into() + .map(|(_label_set, measurement)| measurement.value().primitive()) + .sum() } } impl Sum for Metric { - fn sum(&self, label_set_criteria: &LabelSet) -> AggregateValue { - let sum: f64 = self - .sample_collection + type Output = f64; + + fn sum(&self, label_set_criteria: &LabelSet) -> Self::Output { + self.sample_collection .iter() .filter(|(label_set, _measurement)| label_set.matches(label_set_criteria)) .map(|(_label_set, measurement)| measurement.value().primitive()) - .sum(); - - sum.into() + .sum() } } @@ -40,7 +37,6 @@ mod tests { use torrust_tracker_primitives::DurationSinceUnixEpoch; - use crate::aggregate::AggregateValue; use crate::counter::Counter; use crate::gauge::Gauge; use crate::label::LabelSet; @@ -83,14 +79,14 @@ mod tests { } } - fn counter_cases() -> Vec<(Metric, LabelSet, AggregateValue)> { + fn counter_cases() -> Vec<(Metric, LabelSet, u64)> { // (metric, label set criteria, expected_aggregate_value) vec![ // Metric with one sample without label set ( MetricBuilder::default().with_sample(1.into(), &LabelSet::empty()).build(), LabelSet::empty(), - 1.0.into(), + 1, ), // Metric with one sample with a label set ( @@ -98,7 +94,7 @@ mod tests { .with_sample(1.into(), &[("l1", "l1_value")].into()) .build(), [("l1", "l1_value")].into(), - 1.0.into(), + 1, ), // Metric with two samples, different label sets, sum all ( @@ -107,7 +103,7 @@ mod tests { .with_sample(2.into(), &[("l2", "l2_value")].into()) .build(), LabelSet::empty(), - 3.0.into(), + 3, ), // Metric with two samples, different label sets, sum one ( @@ -116,7 +112,7 @@ mod tests { .with_sample(2.into(), &[("l2", "l2_value")].into()) .build(), [("l1", "l1_value")].into(), - 1.0.into(), + 1, ), // Metric with two samples, same label key, different label values, sum by key ( @@ -125,7 +121,7 @@ mod tests { .with_sample(2.into(), &[("l1", "l1_value"), ("lb", "lb_value")].into()) .build(), [("l1", "l1_value")].into(), - 3.0.into(), + 3, ), // Metric with two samples, different label values, sum by subkey ( @@ -134,17 +130,17 @@ mod tests { .with_sample(2.into(), &[("l1", "l1_value"), ("lb", "lb_value")].into()) .build(), [("la", "la_value")].into(), - 1.0.into(), + 1, ), // Edge: Metric with no samples at all - (MetricBuilder::default().build(), LabelSet::empty(), 0.0.into()), + (MetricBuilder::default().build(), LabelSet::empty(), 0), // Edge: Metric with samples but no matching labels ( MetricBuilder::default() .with_sample(5.into(), &[("foo", "bar")].into()) .build(), [("not", "present")].into(), - 0.0.into(), + 0, ), // Edge: Metric with zero value ( @@ -152,7 +148,7 @@ mod tests { .with_sample(0.into(), &[("l3", "l3_value")].into()) .build(), [("l3", "l3_value")].into(), - 0.0.into(), + 0, ), // Edge: Metric with a very large value ( @@ -160,20 +156,19 @@ mod tests { .with_sample(u64::MAX.into(), &LabelSet::empty()) .build(), LabelSet::empty(), - #[allow(clippy::cast_precision_loss)] - (u64::MAX as f64).into(), + u64::MAX, ), ] } - fn gauge_cases() -> Vec<(Metric, LabelSet, AggregateValue)> { + fn gauge_cases() -> Vec<(Metric, LabelSet, f64)> { // (metric, label set criteria, expected_aggregate_value) vec![ // Metric with one sample without label set ( MetricBuilder::default().with_sample(1.0.into(), &LabelSet::empty()).build(), LabelSet::empty(), - 1.0.into(), + 1.0, ), // Metric with one sample with a label set ( @@ -181,7 +176,7 @@ mod tests { .with_sample(1.0.into(), &[("l1", "l1_value")].into()) .build(), [("l1", "l1_value")].into(), - 1.0.into(), + 1.0, ), // Metric with two samples, different label sets, sum all ( @@ -190,7 +185,7 @@ mod tests { .with_sample(2.0.into(), &[("l2", "l2_value")].into()) .build(), LabelSet::empty(), - 3.0.into(), + 3.0, ), // Metric with two samples, different label sets, sum one ( @@ -199,7 +194,7 @@ mod tests { .with_sample(2.0.into(), &[("l2", "l2_value")].into()) .build(), [("l1", "l1_value")].into(), - 1.0.into(), + 1.0, ), // Metric with two samples, same label key, different label values, sum by key ( @@ -208,7 +203,7 @@ mod tests { .with_sample(2.0.into(), &[("l1", "l1_value"), ("lb", "lb_value")].into()) .build(), [("l1", "l1_value")].into(), - 3.0.into(), + 3.0, ), // Metric with two samples, different label values, sum by subkey ( @@ -217,17 +212,17 @@ mod tests { .with_sample(2.0.into(), &[("l1", "l1_value"), ("lb", "lb_value")].into()) .build(), [("la", "la_value")].into(), - 1.0.into(), + 1.0, ), // Edge: Metric with no samples at all - (MetricBuilder::default().build(), LabelSet::empty(), 0.0.into()), + (MetricBuilder::default().build(), LabelSet::empty(), 0.0), // Edge: Metric with samples but no matching labels ( MetricBuilder::default() .with_sample(5.0.into(), &[("foo", "bar")].into()) .build(), [("not", "present")].into(), - 0.0.into(), + 0.0, ), // Edge: Metric with zero value ( @@ -235,7 +230,7 @@ mod tests { .with_sample(0.0.into(), &[("l3", "l3_value")].into()) .build(), [("l3", "l3_value")].into(), - 0.0.into(), + 0.0, ), // Edge: Metric with negative values ( @@ -244,7 +239,7 @@ mod tests { .with_sample(3.0.into(), &[("l5", "l5_value")].into()) .build(), LabelSet::empty(), - 1.0.into(), + 1.0, ), // Edge: Metric with a very large value ( @@ -252,7 +247,7 @@ mod tests { .with_sample(f64::MAX.into(), &LabelSet::empty()) .build(), LabelSet::empty(), - f64::MAX.into(), + f64::MAX, ), ] } @@ -274,8 +269,8 @@ mod tests { for (idx, (metric, criteria, expected_value)) in gauge_cases().iter().enumerate() { let sum = metric.sum(criteria); - assert_eq!( - sum, *expected_value, + assert!( + (sum - expected_value).abs() <= f64::EPSILON, "at case {idx}, expected sum to be {expected_value}, got {sum}" ); } diff --git a/packages/metrics/src/metric_collection/aggregate.rs b/packages/metrics/src/metric_collection/aggregate.rs index 7fd744d92..a1afa30da 100644 --- a/packages/metrics/src/metric_collection/aggregate.rs +++ b/packages/metrics/src/metric_collection/aggregate.rs @@ -22,13 +22,20 @@ impl Sum for MetricCollection { impl Sum for MetricKindCollection { fn sum(&self, metric_name: &MetricName, label_set_criteria: &LabelSet) -> Option { - self.metrics.get(metric_name).map(|metric| metric.sum(label_set_criteria)) + self.metrics.get(metric_name).map(|metric| { + let sum: u64 = metric.sum(label_set_criteria); + #[allow(clippy::cast_precision_loss)] + AggregateValue::new(sum as f64) + }) } } impl Sum for MetricKindCollection { fn sum(&self, metric_name: &MetricName, label_set_criteria: &LabelSet) -> Option { - self.metrics.get(metric_name).map(|metric| metric.sum(label_set_criteria)) + self.metrics.get(metric_name).map(|metric| { + let sum: f64 = metric.sum(label_set_criteria); + AggregateValue::new(sum) + }) } } From db6b491edc2ecf219a88fa6b85c9e6de100520e1 Mon Sep 17 00:00:00 2001 From: Jose Celano Date: Tue, 17 Jun 2025 08:14:12 +0100 Subject: [PATCH 729/802] refactor(metrics): [#1580] add associated types to collection-level Sum trait - Convert collection Sum trait from fixed return type to associated Output type - MetricKindCollection now returns Option preserving integer precision - MetricKindCollection now returns Option for direct float access - MetricCollection maintains Option for backward compatibility - Simplify implementation by directly delegating to metric-level sum methods - Remove intermediate conversions in metric kind collections This completes the associated types pattern across both metric-level and collection-level Sum traits, allowing each implementation to return the most mathematically appropriate type while maintaining API compatibility. --- .../src/metric_collection/aggregate.rs | 35 +++++++++++-------- 1 file changed, 20 insertions(+), 15 deletions(-) diff --git a/packages/metrics/src/metric_collection/aggregate.rs b/packages/metrics/src/metric_collection/aggregate.rs index a1afa30da..8bda278d4 100644 --- a/packages/metrics/src/metric_collection/aggregate.rs +++ b/packages/metrics/src/metric_collection/aggregate.rs @@ -7,35 +7,40 @@ use crate::metric::MetricName; use crate::metric_collection::{MetricCollection, MetricKindCollection}; pub trait Sum { - fn sum(&self, metric_name: &MetricName, label_set_criteria: &LabelSet) -> Option; + type Output; + fn sum(&self, metric_name: &MetricName, label_set_criteria: &LabelSet) -> Self::Output; } impl Sum for MetricCollection { - fn sum(&self, metric_name: &MetricName, label_set_criteria: &LabelSet) -> Option { + type Output = Option; + + fn sum(&self, metric_name: &MetricName, label_set_criteria: &LabelSet) -> Self::Output { if let Some(value) = self.counters.sum(metric_name, label_set_criteria) { - return Some(value); + #[allow(clippy::cast_precision_loss)] + return Some(AggregateValue::new(value as f64)); + } + + if let Some(value) = self.gauges.sum(metric_name, label_set_criteria) { + return Some(AggregateValue::new(value)); } - self.gauges.sum(metric_name, label_set_criteria) + None } } impl Sum for MetricKindCollection { - fn sum(&self, metric_name: &MetricName, label_set_criteria: &LabelSet) -> Option { - self.metrics.get(metric_name).map(|metric| { - let sum: u64 = metric.sum(label_set_criteria); - #[allow(clippy::cast_precision_loss)] - AggregateValue::new(sum as f64) - }) + type Output = Option; + + fn sum(&self, metric_name: &MetricName, label_set_criteria: &LabelSet) -> Self::Output { + self.metrics.get(metric_name).map(|metric| metric.sum(label_set_criteria)) } } impl Sum for MetricKindCollection { - fn sum(&self, metric_name: &MetricName, label_set_criteria: &LabelSet) -> Option { - self.metrics.get(metric_name).map(|metric| { - let sum: f64 = metric.sum(label_set_criteria); - AggregateValue::new(sum) - }) + type Output = Option; + + fn sum(&self, metric_name: &MetricName, label_set_criteria: &LabelSet) -> Self::Output { + self.metrics.get(metric_name).map(|metric| metric.sum(label_set_criteria)) } } From 00ac210a90258afc2e5ee06368bb90e9a045731d Mon Sep 17 00:00:00 2001 From: Jose Celano Date: Tue, 17 Jun 2025 08:40:03 +0100 Subject: [PATCH 730/802] refactor(metrics): [#1580] remove AggregateValue wrapper, return primitive types from aggregates - Remove AggregateValue struct and its entire module from metrics package - Simplify Sum trait in metric collections to return Option directly - Update MetricKindCollection implementations to cast counter values to f64 - Remove AggregateValue dependencies from http-tracker-core, udp-tracker-core, and udp-tracker-server - Eliminate unnecessary wrapper overhead in aggregate operations - Maintain backward compatibility by converting all aggregate results to f64 This change completes the metrics package refactoring by removing the generic AggregateValue wrapper that added no value when aggregate functions can return mathematically appropriate primitive types directly. --- .../src/statistics/metrics.rs | 12 +- packages/metrics/src/aggregate.rs | 143 ------------------ packages/metrics/src/lib.rs | 1 - .../src/metric_collection/aggregate.rs | 34 ++--- .../src/statistics/metrics.rs | 18 +-- .../src/statistics/metrics.rs | 54 +++---- 6 files changed, 42 insertions(+), 220 deletions(-) delete mode 100644 packages/metrics/src/aggregate.rs diff --git a/packages/http-tracker-core/src/statistics/metrics.rs b/packages/http-tracker-core/src/statistics/metrics.rs index 05acea937..6aede8359 100644 --- a/packages/http-tracker-core/src/statistics/metrics.rs +++ b/packages/http-tracker-core/src/statistics/metrics.rs @@ -53,8 +53,7 @@ impl Metrics { &metric_name!(HTTP_TRACKER_CORE_REQUESTS_RECEIVED_TOTAL), &[("server_binding_address_ip_family", "inet"), ("request_kind", "announce")].into(), ) - .unwrap_or_default() - .value() as u64 + .unwrap_or_default() as u64 } /// Total number of TCP (HTTP tracker) `scrape` requests from IPv4 peers. @@ -67,8 +66,7 @@ impl Metrics { &metric_name!(HTTP_TRACKER_CORE_REQUESTS_RECEIVED_TOTAL), &[("server_binding_address_ip_family", "inet"), ("request_kind", "scrape")].into(), ) - .unwrap_or_default() - .value() as u64 + .unwrap_or_default() as u64 } /// Total number of TCP (HTTP tracker) `announce` requests from IPv6 peers. @@ -81,8 +79,7 @@ impl Metrics { &metric_name!(HTTP_TRACKER_CORE_REQUESTS_RECEIVED_TOTAL), &[("server_binding_address_ip_family", "inet6"), ("request_kind", "announce")].into(), ) - .unwrap_or_default() - .value() as u64 + .unwrap_or_default() as u64 } /// Total number of TCP (HTTP tracker) `scrape` requests from IPv6 peers. @@ -95,7 +92,6 @@ impl Metrics { &metric_name!(HTTP_TRACKER_CORE_REQUESTS_RECEIVED_TOTAL), &[("server_binding_address_ip_family", "inet6"), ("request_kind", "scrape")].into(), ) - .unwrap_or_default() - .value() as u64 + .unwrap_or_default() as u64 } } diff --git a/packages/metrics/src/aggregate.rs b/packages/metrics/src/aggregate.rs deleted file mode 100644 index 39b760fca..000000000 --- a/packages/metrics/src/aggregate.rs +++ /dev/null @@ -1,143 +0,0 @@ -use derive_more::Display; - -#[derive(Debug, Display, Clone, Copy, PartialEq, Default)] -pub struct AggregateValue(f64); - -impl AggregateValue { - #[must_use] - pub fn new(value: f64) -> Self { - Self(value) - } - - #[must_use] - pub fn value(&self) -> f64 { - self.0 - } -} - -impl From for AggregateValue { - fn from(value: f64) -> Self { - Self(value) - } -} - -impl From for f64 { - fn from(value: AggregateValue) -> Self { - value.0 - } -} - -#[cfg(test)] -mod tests { - use approx::assert_relative_eq; - - use super::*; - - #[test] - fn it_should_be_created_with_new() { - let value = AggregateValue::new(42.5); - assert_relative_eq!(value.value(), 42.5); - } - - #[test] - fn it_should_return_the_inner_value() { - let value = AggregateValue::new(123.456); - assert_relative_eq!(value.value(), 123.456); - } - - #[test] - fn it_should_handle_zero_value() { - let value = AggregateValue::new(0.0); - assert_relative_eq!(value.value(), 0.0); - } - - #[test] - fn it_should_handle_negative_values() { - let value = AggregateValue::new(-42.5); - assert_relative_eq!(value.value(), -42.5); - } - - #[test] - fn it_should_handle_infinity() { - let value = AggregateValue::new(f64::INFINITY); - assert_relative_eq!(value.value(), f64::INFINITY); - } - - #[test] - fn it_should_handle_nan() { - let value = AggregateValue::new(f64::NAN); - assert!(value.value().is_nan()); - } - - #[test] - fn it_should_be_created_from_f64() { - let value: AggregateValue = 42.5.into(); - assert_relative_eq!(value.value(), 42.5); - } - - #[test] - fn it_should_convert_to_f64() { - let value = AggregateValue::new(42.5); - let f64_value: f64 = value.into(); - assert_relative_eq!(f64_value, 42.5); - } - - #[test] - fn it_should_be_displayable() { - let value = AggregateValue::new(42.5); - assert_eq!(value.to_string(), "42.5"); - } - - #[test] - fn it_should_be_debuggable() { - let value = AggregateValue::new(42.5); - let debug_string = format!("{value:?}"); - assert_eq!(debug_string, "AggregateValue(42.5)"); - } - - #[test] - fn it_should_be_cloneable() { - let value = AggregateValue::new(42.5); - let cloned_value = value; - assert_eq!(value, cloned_value); - } - - #[test] - fn it_should_be_copyable() { - let value = AggregateValue::new(42.5); - let copied_value = value; - assert_eq!(value, copied_value); - } - - #[test] - fn it_should_support_equality_comparison() { - let value1 = AggregateValue::new(42.5); - let value2 = AggregateValue::new(42.5); - let value3 = AggregateValue::new(43.0); - - assert_eq!(value1, value2); - assert_ne!(value1, value3); - } - - #[test] - fn it_should_handle_special_float_values_in_equality() { - let nan1 = AggregateValue::new(f64::NAN); - let nan2 = AggregateValue::new(f64::NAN); - let infinity = AggregateValue::new(f64::INFINITY); - let neg_infinity = AggregateValue::new(f64::NEG_INFINITY); - - // NaN is not equal to itself in IEEE 754 - assert_ne!(nan1, nan2); - assert_eq!(infinity, AggregateValue::new(f64::INFINITY)); - assert_eq!(neg_infinity, AggregateValue::new(f64::NEG_INFINITY)); - assert_ne!(infinity, neg_infinity); - } - - #[test] - fn it_should_handle_conversion_roundtrip() { - let original_value = 42.5; - let aggregate_value = AggregateValue::from(original_value); - let converted_back: f64 = aggregate_value.into(); - assert_relative_eq!(original_value, converted_back); - } -} diff --git a/packages/metrics/src/lib.rs b/packages/metrics/src/lib.rs index c53e9dd02..997cd3c8c 100644 --- a/packages/metrics/src/lib.rs +++ b/packages/metrics/src/lib.rs @@ -1,4 +1,3 @@ -pub mod aggregate; pub mod counter; pub mod gauge; pub mod label; diff --git a/packages/metrics/src/metric_collection/aggregate.rs b/packages/metrics/src/metric_collection/aggregate.rs index 8bda278d4..62b2ca498 100644 --- a/packages/metrics/src/metric_collection/aggregate.rs +++ b/packages/metrics/src/metric_collection/aggregate.rs @@ -1,4 +1,3 @@ -use crate::aggregate::AggregateValue; use crate::counter::Counter; use crate::gauge::Gauge; use crate::label::LabelSet; @@ -7,21 +6,17 @@ use crate::metric::MetricName; use crate::metric_collection::{MetricCollection, MetricKindCollection}; pub trait Sum { - type Output; - fn sum(&self, metric_name: &MetricName, label_set_criteria: &LabelSet) -> Self::Output; + fn sum(&self, metric_name: &MetricName, label_set_criteria: &LabelSet) -> Option; } impl Sum for MetricCollection { - type Output = Option; - - fn sum(&self, metric_name: &MetricName, label_set_criteria: &LabelSet) -> Self::Output { + fn sum(&self, metric_name: &MetricName, label_set_criteria: &LabelSet) -> Option { if let Some(value) = self.counters.sum(metric_name, label_set_criteria) { - #[allow(clippy::cast_precision_loss)] - return Some(AggregateValue::new(value as f64)); + return Some(value); } if let Some(value) = self.gauges.sum(metric_name, label_set_criteria) { - return Some(AggregateValue::new(value)); + return Some(value); } None @@ -29,17 +24,16 @@ impl Sum for MetricCollection { } impl Sum for MetricKindCollection { - type Output = Option; - - fn sum(&self, metric_name: &MetricName, label_set_criteria: &LabelSet) -> Self::Output { - self.metrics.get(metric_name).map(|metric| metric.sum(label_set_criteria)) + fn sum(&self, metric_name: &MetricName, label_set_criteria: &LabelSet) -> Option { + #[allow(clippy::cast_precision_loss)] + self.metrics + .get(metric_name) + .map(|metric| metric.sum(label_set_criteria) as f64) } } impl Sum for MetricKindCollection { - type Output = Option; - - fn sum(&self, metric_name: &MetricName, label_set_criteria: &LabelSet) -> Self::Output { + fn sum(&self, metric_name: &MetricName, label_set_criteria: &LabelSet) -> Option { self.metrics.get(metric_name).map(|metric| metric.sum(label_set_criteria)) } } @@ -81,10 +75,10 @@ mod tests { ) .unwrap(); - assert_eq!(collection.sum(&metric_name, &LabelSet::empty()), Some(2.0.into())); + assert_eq!(collection.sum(&metric_name, &LabelSet::empty()), Some(2.0)); assert_eq!( collection.sum(&metric_name, &(label_name!("label_1"), LabelValue::new("value_1")).into()), - Some(1.0.into()) + Some(1.0) ); } @@ -114,10 +108,10 @@ mod tests { ) .unwrap(); - assert_eq!(collection.sum(&metric_name, &LabelSet::empty()), Some(2.0.into())); + assert_eq!(collection.sum(&metric_name, &LabelSet::empty()), Some(2.0)); assert_eq!( collection.sum(&metric_name, &(label_name!("label_1"), LabelValue::new("value_1")).into()), - Some(1.0.into()) + Some(1.0) ); } } diff --git a/packages/udp-tracker-core/src/statistics/metrics.rs b/packages/udp-tracker-core/src/statistics/metrics.rs index 57838c66f..db83c1c1d 100644 --- a/packages/udp-tracker-core/src/statistics/metrics.rs +++ b/packages/udp-tracker-core/src/statistics/metrics.rs @@ -54,8 +54,7 @@ impl Metrics { &metric_name!(UDP_TRACKER_CORE_REQUESTS_RECEIVED_TOTAL), &[("server_binding_address_ip_family", "inet"), ("request_kind", "connect")].into(), ) - .unwrap_or_default() - .value() as u64 + .unwrap_or_default() as u64 } /// Total number of UDP (UDP tracker) `announce` requests from IPv4 peers. @@ -68,8 +67,7 @@ impl Metrics { &metric_name!(UDP_TRACKER_CORE_REQUESTS_RECEIVED_TOTAL), &[("server_binding_address_ip_family", "inet"), ("request_kind", "announce")].into(), ) - .unwrap_or_default() - .value() as u64 + .unwrap_or_default() as u64 } /// Total number of UDP (UDP tracker) `scrape` requests from IPv4 peers. @@ -82,8 +80,7 @@ impl Metrics { &metric_name!(UDP_TRACKER_CORE_REQUESTS_RECEIVED_TOTAL), &[("server_binding_address_ip_family", "inet"), ("request_kind", "scrape")].into(), ) - .unwrap_or_default() - .value() as u64 + .unwrap_or_default() as u64 } /// Total number of UDP (UDP tracker) `connection` requests from IPv6 peers. @@ -96,8 +93,7 @@ impl Metrics { &metric_name!(UDP_TRACKER_CORE_REQUESTS_RECEIVED_TOTAL), &[("server_binding_address_ip_family", "inet6"), ("request_kind", "connect")].into(), ) - .unwrap_or_default() - .value() as u64 + .unwrap_or_default() as u64 } /// Total number of UDP (UDP tracker) `announce` requests from IPv6 peers. @@ -110,8 +106,7 @@ impl Metrics { &metric_name!(UDP_TRACKER_CORE_REQUESTS_RECEIVED_TOTAL), &[("server_binding_address_ip_family", "inet6"), ("request_kind", "announce")].into(), ) - .unwrap_or_default() - .value() as u64 + .unwrap_or_default() as u64 } /// Total number of UDP (UDP tracker) `scrape` requests from IPv6 peers. @@ -124,7 +119,6 @@ impl Metrics { &metric_name!(UDP_TRACKER_CORE_REQUESTS_RECEIVED_TOTAL), &[("server_binding_address_ip_family", "inet6"), ("request_kind", "scrape")].into(), ) - .unwrap_or_default() - .value() as u64 + .unwrap_or_default() as u64 } } diff --git a/packages/udp-tracker-server/src/statistics/metrics.rs b/packages/udp-tracker-server/src/statistics/metrics.rs index 8eba248d2..d3f273665 100644 --- a/packages/udp-tracker-server/src/statistics/metrics.rs +++ b/packages/udp-tracker-server/src/statistics/metrics.rs @@ -56,8 +56,7 @@ impl Metrics { pub fn udp_requests_aborted(&self) -> u64 { self.metric_collection .sum(&metric_name!(UDP_TRACKER_SERVER_REQUESTS_ABORTED_TOTAL), &LabelSet::empty()) - .unwrap_or_default() - .value() as u64 + .unwrap_or_default() as u64 } /// Total number of UDP (UDP tracker) requests banned. @@ -67,8 +66,7 @@ impl Metrics { pub fn udp_requests_banned(&self) -> u64 { self.metric_collection .sum(&metric_name!(UDP_TRACKER_SERVER_REQUESTS_BANNED_TOTAL), &LabelSet::empty()) - .unwrap_or_default() - .value() as u64 + .unwrap_or_default() as u64 } /// Total number of banned IPs. @@ -78,8 +76,7 @@ impl Metrics { pub fn udp_banned_ips_total(&self) -> u64 { self.metric_collection .sum(&metric_name!(UDP_TRACKER_SERVER_IPS_BANNED_TOTAL), &LabelSet::empty()) - .unwrap_or_default() - .value() as u64 + .unwrap_or_default() as u64 } /// Average rounded time spent processing UDP connect requests. @@ -92,8 +89,7 @@ impl Metrics { &metric_name!(UDP_TRACKER_SERVER_PERFORMANCE_AVG_PROCESSING_TIME_NS), &[("request_kind", "connect")].into(), ) - .unwrap_or_default() - .value() as u64 + .unwrap_or_default() as u64 } /// Average rounded time spent processing UDP announce requests. @@ -106,8 +102,7 @@ impl Metrics { &metric_name!(UDP_TRACKER_SERVER_PERFORMANCE_AVG_PROCESSING_TIME_NS), &[("request_kind", "announce")].into(), ) - .unwrap_or_default() - .value() as u64 + .unwrap_or_default() as u64 } /// Average rounded time spent processing UDP scrape requests. @@ -120,8 +115,7 @@ impl Metrics { &metric_name!(UDP_TRACKER_SERVER_PERFORMANCE_AVG_PROCESSING_TIME_NS), &[("request_kind", "scrape")].into(), ) - .unwrap_or_default() - .value() as u64 + .unwrap_or_default() as u64 } // UDPv4 @@ -135,8 +129,7 @@ impl Metrics { &metric_name!(UDP_TRACKER_SERVER_REQUESTS_RECEIVED_TOTAL), &[("server_binding_address_ip_family", "inet")].into(), ) - .unwrap_or_default() - .value() as u64 + .unwrap_or_default() as u64 } /// Total number of UDP (UDP tracker) connections from IPv4 peers. @@ -149,8 +142,7 @@ impl Metrics { &metric_name!(UDP_TRACKER_SERVER_REQUESTS_ACCEPTED_TOTAL), &[("server_binding_address_ip_family", "inet"), ("request_kind", "connect")].into(), ) - .unwrap_or_default() - .value() as u64 + .unwrap_or_default() as u64 } /// Total number of UDP (UDP tracker) `announce` requests from IPv4 peers. @@ -163,8 +155,7 @@ impl Metrics { &metric_name!(UDP_TRACKER_SERVER_REQUESTS_ACCEPTED_TOTAL), &[("server_binding_address_ip_family", "inet"), ("request_kind", "announce")].into(), ) - .unwrap_or_default() - .value() as u64 + .unwrap_or_default() as u64 } /// Total number of UDP (UDP tracker) `scrape` requests from IPv4 peers. @@ -177,8 +168,7 @@ impl Metrics { &metric_name!(UDP_TRACKER_SERVER_REQUESTS_ACCEPTED_TOTAL), &[("server_binding_address_ip_family", "inet"), ("request_kind", "scrape")].into(), ) - .unwrap_or_default() - .value() as u64 + .unwrap_or_default() as u64 } /// Total number of UDP (UDP tracker) responses from IPv4 peers. @@ -191,8 +181,7 @@ impl Metrics { &metric_name!(UDP_TRACKER_SERVER_RESPONSES_SENT_TOTAL), &[("server_binding_address_ip_family", "inet")].into(), ) - .unwrap_or_default() - .value() as u64 + .unwrap_or_default() as u64 } /// Total number of UDP (UDP tracker) `error` requests from IPv4 peers. @@ -205,8 +194,7 @@ impl Metrics { &metric_name!(UDP_TRACKER_SERVER_ERRORS_TOTAL), &[("server_binding_address_ip_family", "inet")].into(), ) - .unwrap_or_default() - .value() as u64 + .unwrap_or_default() as u64 } // UDPv6 @@ -220,8 +208,7 @@ impl Metrics { &metric_name!(UDP_TRACKER_SERVER_REQUESTS_RECEIVED_TOTAL), &[("server_binding_address_ip_family", "inet6")].into(), ) - .unwrap_or_default() - .value() as u64 + .unwrap_or_default() as u64 } /// Total number of UDP (UDP tracker) `connection` requests from IPv6 peers. @@ -234,8 +221,7 @@ impl Metrics { &metric_name!(UDP_TRACKER_SERVER_REQUESTS_ACCEPTED_TOTAL), &[("server_binding_address_ip_family", "inet6"), ("request_kind", "connect")].into(), ) - .unwrap_or_default() - .value() as u64 + .unwrap_or_default() as u64 } /// Total number of UDP (UDP tracker) `announce` requests from IPv6 peers. @@ -248,8 +234,7 @@ impl Metrics { &metric_name!(UDP_TRACKER_SERVER_REQUESTS_ACCEPTED_TOTAL), &[("server_binding_address_ip_family", "inet6"), ("request_kind", "announce")].into(), ) - .unwrap_or_default() - .value() as u64 + .unwrap_or_default() as u64 } /// Total number of UDP (UDP tracker) `scrape` requests from IPv6 peers. @@ -262,8 +247,7 @@ impl Metrics { &metric_name!(UDP_TRACKER_SERVER_REQUESTS_ACCEPTED_TOTAL), &[("server_binding_address_ip_family", "inet6"), ("request_kind", "scrape")].into(), ) - .unwrap_or_default() - .value() as u64 + .unwrap_or_default() as u64 } /// Total number of UDP (UDP tracker) responses from IPv6 peers. @@ -276,8 +260,7 @@ impl Metrics { &metric_name!(UDP_TRACKER_SERVER_RESPONSES_SENT_TOTAL), &[("server_binding_address_ip_family", "inet6")].into(), ) - .unwrap_or_default() - .value() as u64 + .unwrap_or_default() as u64 } /// Total number of UDP (UDP tracker) `error` requests from IPv6 peers. @@ -290,7 +273,6 @@ impl Metrics { &metric_name!(UDP_TRACKER_SERVER_ERRORS_TOTAL), &[("server_binding_address_ip_family", "inet6")].into(), ) - .unwrap_or_default() - .value() as u64 + .unwrap_or_default() as u64 } } From dfd950d715f253ff4740b518564f62ec35977bdb Mon Sep 17 00:00:00 2001 From: Jose Celano Date: Tue, 17 Jun 2025 08:55:50 +0100 Subject: [PATCH 731/802] refactor(metrics): [#1580] reorganize metric collection aggregates into submodules - Move metric_collection/aggregate.rs to aggregate/sum.rs submodule - Create proper module structure for aggregate operations - Update import paths in http-tracker-core, udp-tracker-core, and udp-tracker-server - Change imports from `aggregate::Sum` to `aggregate::sum::Sum` - Maintain the same Sum trait functionality with cleaner module organization This reorganization prepares for potential future aggregate operations beyond just sum while keeping the existing Sum trait API intact. --- packages/http-tracker-core/src/statistics/metrics.rs | 2 +- packages/metrics/src/metric_collection/aggregate/mod.rs | 1 + .../src/metric_collection/{aggregate.rs => aggregate/sum.rs} | 2 +- packages/udp-tracker-core/src/statistics/metrics.rs | 2 +- packages/udp-tracker-server/src/statistics/metrics.rs | 2 +- 5 files changed, 5 insertions(+), 4 deletions(-) create mode 100644 packages/metrics/src/metric_collection/aggregate/mod.rs rename packages/metrics/src/metric_collection/{aggregate.rs => aggregate/sum.rs} (98%) diff --git a/packages/http-tracker-core/src/statistics/metrics.rs b/packages/http-tracker-core/src/statistics/metrics.rs index 6aede8359..00d09b803 100644 --- a/packages/http-tracker-core/src/statistics/metrics.rs +++ b/packages/http-tracker-core/src/statistics/metrics.rs @@ -1,7 +1,7 @@ use serde::Serialize; use torrust_tracker_metrics::label::LabelSet; use torrust_tracker_metrics::metric::MetricName; -use torrust_tracker_metrics::metric_collection::aggregate::Sum; +use torrust_tracker_metrics::metric_collection::aggregate::sum::Sum; use torrust_tracker_metrics::metric_collection::{Error, MetricCollection}; use torrust_tracker_metrics::metric_name; use torrust_tracker_primitives::DurationSinceUnixEpoch; diff --git a/packages/metrics/src/metric_collection/aggregate/mod.rs b/packages/metrics/src/metric_collection/aggregate/mod.rs new file mode 100644 index 000000000..dce785d95 --- /dev/null +++ b/packages/metrics/src/metric_collection/aggregate/mod.rs @@ -0,0 +1 @@ +pub mod sum; diff --git a/packages/metrics/src/metric_collection/aggregate.rs b/packages/metrics/src/metric_collection/aggregate/sum.rs similarity index 98% rename from packages/metrics/src/metric_collection/aggregate.rs rename to packages/metrics/src/metric_collection/aggregate/sum.rs index 62b2ca498..3285fa8f1 100644 --- a/packages/metrics/src/metric_collection/aggregate.rs +++ b/packages/metrics/src/metric_collection/aggregate/sum.rs @@ -47,7 +47,7 @@ mod tests { use crate::label::LabelValue; use crate::label_name; - use crate::metric_collection::aggregate::Sum; + use crate::metric_collection::aggregate::sum::Sum; #[test] fn type_counter_with_two_samples() { diff --git a/packages/udp-tracker-core/src/statistics/metrics.rs b/packages/udp-tracker-core/src/statistics/metrics.rs index db83c1c1d..98906a596 100644 --- a/packages/udp-tracker-core/src/statistics/metrics.rs +++ b/packages/udp-tracker-core/src/statistics/metrics.rs @@ -1,7 +1,7 @@ use serde::Serialize; use torrust_tracker_metrics::label::LabelSet; use torrust_tracker_metrics::metric::MetricName; -use torrust_tracker_metrics::metric_collection::aggregate::Sum; +use torrust_tracker_metrics::metric_collection::aggregate::sum::Sum; use torrust_tracker_metrics::metric_collection::{Error, MetricCollection}; use torrust_tracker_metrics::metric_name; use torrust_tracker_primitives::DurationSinceUnixEpoch; diff --git a/packages/udp-tracker-server/src/statistics/metrics.rs b/packages/udp-tracker-server/src/statistics/metrics.rs index d3f273665..c50966bc6 100644 --- a/packages/udp-tracker-server/src/statistics/metrics.rs +++ b/packages/udp-tracker-server/src/statistics/metrics.rs @@ -1,7 +1,7 @@ use serde::Serialize; use torrust_tracker_metrics::label::LabelSet; use torrust_tracker_metrics::metric::MetricName; -use torrust_tracker_metrics::metric_collection::aggregate::Sum; +use torrust_tracker_metrics::metric_collection::aggregate::sum::Sum; use torrust_tracker_metrics::metric_collection::{Error, MetricCollection}; use torrust_tracker_metrics::metric_name; use torrust_tracker_primitives::DurationSinceUnixEpoch; From 7df7d367d8da85122e0423e3521065ec602ee748 Mon Sep 17 00:00:00 2001 From: Jose Celano Date: Tue, 17 Jun 2025 09:06:19 +0100 Subject: [PATCH 732/802] docs(metrics): enhance README with comprehensive documentation and examples - Add detailed overview and key features section - Include quick start guide with practical usage examples - Document architecture with core components and type system - Add comprehensive development guide with building, testing, and coverage - Include performance considerations and compatibility notes - Add contributing guidelines and related projects - Transform from basic description to full developer documentation - Update cSpell.json with new technical terms (println, serde) This provides much better onboarding for developers and users of the metrics library. --- packages/metrics/README.md | 185 +++++++++++++++++++++++++++++++++-- packages/metrics/cSpell.json | 2 + 2 files changed, 177 insertions(+), 10 deletions(-) diff --git a/packages/metrics/README.md b/packages/metrics/README.md index 885d6fa45..9f3883fba 100644 --- a/packages/metrics/README.md +++ b/packages/metrics/README.md @@ -1,37 +1,202 @@ # Torrust Tracker Metrics -A library with the metrics types used by the [Torrust Tracker](https://github.com/torrust/torrust-tracker) packages. +A comprehensive metrics library providing type-safe metric collection, aggregation, and Prometheus export functionality for the [Torrust Tracker](https://github.com/torrust/torrust-tracker) ecosystem. + +## Overview + +This library offers a robust metrics system designed specifically for tracking and monitoring BitTorrent tracker performance. It provides type-safe metric collection with support for labels, time-series data, and multiple export formats including Prometheus. + +## Key Features + +- **Type-Safe Metrics**: Strongly typed `Counter` and `Gauge` metrics with compile-time guarantees +- **Label Support**: Rich labeling system for multi-dimensional metrics +- **Time-Series Data**: Built-in support for timestamped samples +- **Prometheus Export**: Native Prometheus format serialization +- **Aggregation Functions**: Sum operations with mathematically appropriate return types +- **JSON Serialization**: Full serde support for all metric types +- **Memory Efficient**: Optimized data structures for high-performance scenarios + +## Quick Start + +Add this to your `Cargo.toml`: + +```toml +[dependencies] +torrust-tracker-metrics = "3.0.0" +``` + +### Basic Usage + +```rust +use torrust_tracker_metrics::{ + metric_collection::MetricCollection, + label::{LabelSet, LabelValue}, + metric_name, label_name, +}; +use torrust_tracker_primitives::DurationSinceUnixEpoch; + +// Create a metric collection +let mut metrics = MetricCollection::default(); + +// Define labels +let labels: LabelSet = [ + (label_name!("server"), LabelValue::new("tracker-01")), + (label_name!("protocol"), LabelValue::new("http")), +].into(); + +// Record metrics +let time = DurationSinceUnixEpoch::from_secs(1234567890); +metrics.increment_counter( + &metric_name!("requests_total"), + &labels, + time, +)?; + +metrics.set_gauge( + &metric_name!("active_connections"), + &labels, + 42.0, + time, +)?; + +// Export to Prometheus format +let prometheus_output = metrics.to_prometheus(); +println!("{}", prometheus_output); +``` + +### Metric Aggregation + +```rust +use torrust_tracker_metrics::metric_collection::aggregate::Sum; + +// Sum all counter values matching specific labels +let total_requests = metrics.sum( + &metric_name!("requests_total"), + &[("server", "tracker-01")].into(), +); + +println!("Total requests: {:?}", total_requests); +``` + +## Architecture + +### Core Components + +- **`Counter`**: Monotonically increasing integer values (u64) +- **`Gauge`**: Arbitrary floating-point values that can increase or decrease (f64) +- **`Metric`**: Generic metric container with metadata (name, description, unit) +- **`MetricCollection`**: Type-safe collection managing both counters and gauges +- **`LabelSet`**: Key-value pairs for metric dimensionality +- **`Sample`**: Timestamped metric values with associated labels + +### Type System + +The library uses Rust's type system to ensure metric safety: + +```rust +// Counter operations return u64 +let counter_sum: Option = counter_collection.sum(&name, &labels); + +// Gauge operations return f64 +let gauge_sum: Option = gauge_collection.sum(&name, &labels); + +// Mixed collections convert to f64 for compatibility +let mixed_sum: Option = metric_collection.sum(&name, &labels); +``` + +### Module Structure + +```output +src/ +├── counter.rs # Counter metric type +├── gauge.rs # Gauge metric type +├── metric/ # Generic metric container +│ ├── mod.rs +│ ├── name.rs # Metric naming +│ ├── description.rs # Metric descriptions +│ └── aggregate/ # Metric-level aggregations +├── metric_collection/ # Collection management +│ ├── mod.rs +│ └── aggregate/ # Collection-level aggregations +├── label/ # Label system +│ ├── name.rs # Label names +│ ├── value.rs # Label values +│ └── set.rs # Label collections +├── sample.rs # Timestamped values +├── sample_collection.rs # Sample management +├── prometheus.rs # Prometheus export +└── unit.rs # Measurement units +``` ## Documentation -[Crate documentation](https://docs.rs/torrust-tracker-metrics). +- [Crate documentation](https://docs.rs/torrust-tracker-metrics) +- [API Reference](https://docs.rs/torrust-tracker-metrics/latest/torrust_tracker_metrics/) + +## Development -## Testing +### Code Coverage -Run coverage report: +Run basic coverage report: ```console cargo llvm-cov --package torrust-tracker-metrics ``` -Generate LCOV report with `llvm-cov` (for Visual Studio Code extension): +Generate LCOV report (for IDE integration): ```console mkdir -p ./.coverage -cargo llvm-cov --package torrust-tracker-metrics --lcov --output-path=./.coverage/lcov.info +cargo llvm-cov --package torrust-tracker-metrics --lcov --output-path=./.coverage/lcov.info ``` -Generate HTML report with `llvm-cov`: +Generate detailed HTML coverage report: + +Generate detailed HTML coverage report: ```console mkdir -p ./.coverage -cargo llvm-cov --package torrust-tracker-metrics --html --output-dir ./.coverage +cargo llvm-cov --package torrust-tracker-metrics --html --output-dir ./.coverage ``` +Open the coverage report in your browser: + +```console +open ./.coverage/index.html # macOS +xdg-open ./.coverage/index.html # Linux +``` + +## Performance Considerations + +- **Memory Usage**: Metrics are stored in-memory with efficient HashMap-based collections +- **Label Cardinality**: Be mindful of label combinations as they create separate time series +- **Aggregation**: Sum operations are optimized for both single-type and mixed collections + +## Compatibility + +This library is designed to be compatible with the standard Rust [metrics](https://crates.io/crates/metrics) crate ecosystem where possible. + +## Contributing + +We welcome contributions! Please see the main [Torrust Tracker repository](https://github.com/torrust/torrust-tracker) for contribution guidelines. + +### Reporting Issues + +- [Bug Reports](https://github.com/torrust/torrust-tracker/issues/new?template=bug_report.md) +- [Feature Requests](https://github.com/torrust/torrust-tracker/issues/new?template=feature_request.md) + ## Acknowledgements -We copied some parts like units or function names and signatures from the crate [metrics](https://crates.io/crates/metrics) because we wanted to make it compatible as much as possible with it. In the future, we may consider using the `metrics` crate directly instead of maintaining our own version. +This library draws inspiration from the Rust [metrics](https://crates.io/crates/metrics) crate, incorporating compatible APIs and naming conventions where possible. We may consider migrating to the standard metrics crate in future versions while maintaining our specialized functionality. + +Special thanks to the Rust metrics ecosystem contributors for establishing excellent patterns for metrics collection and export. ## License -The project is licensed under the terms of the [GNU AFFERO GENERAL PUBLIC LICENSE](./LICENSE). +This project is licensed under the [GNU AFFERO GENERAL PUBLIC LICENSE v3.0](./LICENSE). + +## Related Projects + +- [Torrust Tracker](https://github.com/torrust/torrust-tracker) - The main BitTorrent tracker +- [metrics](https://crates.io/crates/metrics) - Standard Rust metrics facade +- [prometheus](https://crates.io/crates/prometheus) - Prometheus client library diff --git a/packages/metrics/cSpell.json b/packages/metrics/cSpell.json index 1a2c13d2e..f04cce9e3 100644 --- a/packages/metrics/cSpell.json +++ b/packages/metrics/cSpell.json @@ -6,7 +6,9 @@ "Kibibytes", "Mebibytes", "ñaca", + "println", "rstest", + "serde", "subsec", "Tebibytes", "thiserror" From d2e75e3f78f367e5f2829bbe5adfedc549fb24f5 Mon Sep 17 00:00:00 2001 From: Jose Celano Date: Tue, 17 Jun 2025 10:57:11 +0100 Subject: [PATCH 733/802] refactor: [#1405] gracefull shutdown for listeners Events listeners listen for the cancelation request instead of directly for the CRTR+c signal. This will allow implementing centralized policies for shutdown and alternative conditions. --- Cargo.lock | 8 +++++ Cargo.toml | 1 + packages/axum-http-tracker-server/Cargo.toml | 1 + .../src/environment.rs | 6 ++++ .../axum-http-tracker-server/src/server.rs | 5 +++- .../src/v1/handlers/announce.rs | 6 +++- .../src/v1/handlers/scrape.rs | 5 +++- packages/events/src/shutdown.rs | 0 packages/http-tracker-core/Cargo.toml | 1 + .../http-tracker-core/benches/helpers/util.rs | 5 +++- .../src/services/announce.rs | 5 +++- .../src/statistics/event/listener.rs | 23 +++++++------- packages/rest-tracker-api-core/Cargo.toml | 1 + .../src/statistics/services.rs | 5 +++- .../swarm-coordination-registry/Cargo.toml | 1 + .../src/statistics/event/listener.rs | 27 +++++++++-------- packages/tracker-core/Cargo.toml | 1 + .../src/statistics/event/listener.rs | 20 ++++++------- .../tracker-core/tests/common/test_env.rs | 5 ++++ packages/udp-tracker-core/Cargo.toml | 1 + .../src/statistics/event/listener.rs | 22 +++++++------- packages/udp-tracker-server/Cargo.toml | 1 + .../src/banning/event/listener.rs | 22 ++++++++------ .../udp-tracker-server/src/environment.rs | 9 ++++++ .../src/statistics/event/listener.rs | 22 +++++++------- src/app.rs | 12 ++++---- src/bootstrap/jobs/http_tracker_core.rs | 8 ++++- src/bootstrap/jobs/manager.rs | 30 +++++++++++++++++-- src/bootstrap/jobs/torrent_repository.rs | 8 ++++- src/bootstrap/jobs/tracker_core.rs | 8 ++++- src/bootstrap/jobs/udp_tracker_core.rs | 8 ++++- src/bootstrap/jobs/udp_tracker_server.rs | 11 +++++-- src/main.rs | 2 ++ 33 files changed, 206 insertions(+), 84 deletions(-) create mode 100644 packages/events/src/shutdown.rs diff --git a/Cargo.lock b/Cargo.lock index 269f7a3a2..b523c8b60 100644 --- a/Cargo.lock +++ b/Cargo.lock @@ -587,6 +587,7 @@ dependencies = [ "serde_json", "thiserror 2.0.12", "tokio", + "tokio-util", "torrust-tracker-clock", "torrust-tracker-configuration", "torrust-tracker-events", @@ -673,6 +674,7 @@ dependencies = [ "testcontainers", "thiserror 2.0.12", "tokio", + "tokio-util", "torrust-rest-tracker-api-client", "torrust-tracker-clock", "torrust-tracker-configuration", @@ -705,6 +707,7 @@ dependencies = [ "serde", "thiserror 2.0.12", "tokio", + "tokio-util", "torrust-tracker-clock", "torrust-tracker-configuration", "torrust-tracker-events", @@ -4565,6 +4568,7 @@ dependencies = [ "serde_bytes", "serde_repr", "tokio", + "tokio-util", "torrust-axum-server", "torrust-server-lib", "torrust-tracker-clock", @@ -4661,6 +4665,7 @@ dependencies = [ "bittorrent-tracker-core", "bittorrent-udp-tracker-core", "tokio", + "tokio-util", "torrust-tracker-configuration", "torrust-tracker-events", "torrust-tracker-metrics", @@ -4704,6 +4709,7 @@ dependencies = [ "serde_json", "thiserror 2.0.12", "tokio", + "tokio-util", "torrust-axum-health-check-api-server", "torrust-axum-http-tracker-server", "torrust-axum-rest-tracker-api-server", @@ -4851,6 +4857,7 @@ dependencies = [ "serde", "thiserror 2.0.12", "tokio", + "tokio-util", "torrust-tracker-clock", "torrust-tracker-configuration", "torrust-tracker-events", @@ -4909,6 +4916,7 @@ dependencies = [ "serde", "thiserror 2.0.12", "tokio", + "tokio-util", "torrust-server-lib", "torrust-tracker-clock", "torrust-tracker-configuration", diff --git a/Cargo.toml b/Cargo.toml index 976176155..dbc39bdf8 100644 --- a/Cargo.toml +++ b/Cargo.toml @@ -47,6 +47,7 @@ serde = { version = "1", features = ["derive"] } serde_json = { version = "1", features = ["preserve_order"] } thiserror = "2.0.12" tokio = { version = "1", features = ["macros", "net", "rt-multi-thread", "signal", "sync"] } +tokio-util = "0.7.15" torrust-axum-health-check-api-server = { version = "3.0.0-develop", path = "packages/axum-health-check-api-server" } torrust-axum-http-tracker-server = { version = "3.0.0-develop", path = "packages/axum-http-tracker-server" } torrust-axum-rest-tracker-api-server = { version = "3.0.0-develop", path = "packages/axum-rest-tracker-api-server" } diff --git a/packages/axum-http-tracker-server/Cargo.toml b/packages/axum-http-tracker-server/Cargo.toml index fa195489c..eb2c2cad3 100644 --- a/packages/axum-http-tracker-server/Cargo.toml +++ b/packages/axum-http-tracker-server/Cargo.toml @@ -28,6 +28,7 @@ hyper = "1" reqwest = { version = "0", features = ["json"] } serde = { version = "1", features = ["derive"] } tokio = { version = "1", features = ["macros", "net", "rt-multi-thread", "signal", "sync"] } +tokio-util = "0.7.15" torrust-axum-server = { version = "3.0.0-develop", path = "../axum-server" } torrust-server-lib = { version = "3.0.0-develop", path = "../server-lib" } torrust-tracker-clock = { version = "3.0.0-develop", path = "../clock" } diff --git a/packages/axum-http-tracker-server/src/environment.rs b/packages/axum-http-tracker-server/src/environment.rs index 6e58c2cac..616973a0f 100644 --- a/packages/axum-http-tracker-server/src/environment.rs +++ b/packages/axum-http-tracker-server/src/environment.rs @@ -6,6 +6,7 @@ use bittorrent_primitives::info_hash::InfoHash; use bittorrent_tracker_core::container::TrackerCoreContainer; use futures::executor::block_on; use tokio::task::JoinHandle; +use tokio_util::sync::CancellationToken; use torrust_axum_server::tsl::make_rust_tls; use torrust_server_lib::registar::Registar; use torrust_tracker_configuration::{logging, Configuration}; @@ -21,6 +22,7 @@ pub struct Environment { pub registar: Registar, pub server: HttpServer, pub event_listener_job: Option>, + pub cancellation_token: CancellationToken, } impl Environment { @@ -59,6 +61,7 @@ impl Environment { registar: Registar::default(), server, event_listener_job: None, + cancellation_token: CancellationToken::new(), } } @@ -72,6 +75,7 @@ impl Environment { // Start the event listener let event_listener_job = run_event_listener( self.container.http_tracker_core_container.event_bus.receiver(), + self.cancellation_token.clone(), &self.container.http_tracker_core_container.stats_repository, ); @@ -87,6 +91,7 @@ impl Environment { registar: self.registar.clone(), server, event_listener_job: Some(event_listener_job), + cancellation_token: self.cancellation_token, } } } @@ -117,6 +122,7 @@ impl Environment { registar: Registar::default(), server, event_listener_job: None, + cancellation_token: self.cancellation_token, } } diff --git a/packages/axum-http-tracker-server/src/server.rs b/packages/axum-http-tracker-server/src/server.rs index ba0dd8c6e..2b43be0a9 100644 --- a/packages/axum-http-tracker-server/src/server.rs +++ b/packages/axum-http-tracker-server/src/server.rs @@ -256,6 +256,7 @@ mod tests { use bittorrent_http_tracker_core::statistics::event::listener::run_event_listener; use bittorrent_http_tracker_core::statistics::repository::Repository; use bittorrent_tracker_core::container::TrackerCoreContainer; + use tokio_util::sync::CancellationToken; use torrust_axum_server::tsl::make_rust_tls; use torrust_server_lib::registar::Registar; use torrust_tracker_configuration::{logging, Configuration}; @@ -265,6 +266,8 @@ mod tests { use crate::server::{HttpServer, Launcher}; pub fn initialize_container(configuration: &Configuration) -> HttpTrackerCoreContainer { + let cancellation_token = CancellationToken::new(); + let core_config = Arc::new(configuration.core.clone()); let http_trackers = configuration @@ -287,7 +290,7 @@ mod tests { let http_stats_event_sender = http_stats_event_bus.sender(); if configuration.core.tracker_usage_statistics { - let _unused = run_event_listener(http_stats_event_bus.receiver(), &http_stats_repository); + let _unused = run_event_listener(http_stats_event_bus.receiver(), cancellation_token, &http_stats_repository); } let swarm_coordination_registry_container = Arc::new(SwarmCoordinationRegistryContainer::initialize( diff --git a/packages/axum-http-tracker-server/src/v1/handlers/announce.rs b/packages/axum-http-tracker-server/src/v1/handlers/announce.rs index e21a485cf..ce718cd30 100644 --- a/packages/axum-http-tracker-server/src/v1/handlers/announce.rs +++ b/packages/axum-http-tracker-server/src/v1/handlers/announce.rs @@ -123,6 +123,7 @@ mod tests { use bittorrent_tracker_core::torrent::repository::in_memory::InMemoryTorrentRepository; use bittorrent_tracker_core::whitelist::authorization::WhitelistAuthorization; use bittorrent_tracker_core::whitelist::repository::in_memory::InMemoryWhitelist; + use tokio_util::sync::CancellationToken; use torrust_tracker_configuration::Configuration; use torrust_tracker_test_helpers::configuration; @@ -149,6 +150,9 @@ mod tests { } fn initialize_core_tracker_services(config: &Configuration) -> CoreHttpTrackerServices { + let cancellation_token = CancellationToken::new(); + + // Initialize the core tracker services with the provided configuration. let core_config = Arc::new(config.core.clone()); let database = initialize_database(&config.core); let in_memory_whitelist = Arc::new(InMemoryWhitelist::default()); @@ -175,7 +179,7 @@ mod tests { let http_stats_event_sender = http_stats_event_bus.sender(); if config.core.tracker_usage_statistics { - let _unused = run_event_listener(http_stats_event_bus.receiver(), &http_stats_repository); + let _unused = run_event_listener(http_stats_event_bus.receiver(), cancellation_token, &http_stats_repository); } let announce_service = Arc::new(AnnounceService::new( diff --git a/packages/axum-http-tracker-server/src/v1/handlers/scrape.rs b/packages/axum-http-tracker-server/src/v1/handlers/scrape.rs index b48d6e036..bdd4378f3 100644 --- a/packages/axum-http-tracker-server/src/v1/handlers/scrape.rs +++ b/packages/axum-http-tracker-server/src/v1/handlers/scrape.rs @@ -97,6 +97,7 @@ mod tests { use bittorrent_tracker_core::torrent::repository::in_memory::InMemoryTorrentRepository; use bittorrent_tracker_core::whitelist::authorization::WhitelistAuthorization; use bittorrent_tracker_core::whitelist::repository::in_memory::InMemoryWhitelist; + use tokio_util::sync::CancellationToken; use torrust_tracker_configuration::{Configuration, Core}; use torrust_tracker_test_helpers::configuration; @@ -127,6 +128,8 @@ mod tests { } fn initialize_core_tracker_services(config: &Configuration) -> (CoreTrackerServices, CoreHttpTrackerServices) { + let cancellation_token = CancellationToken::new(); + let core_config = Arc::new(config.core.clone()); let in_memory_whitelist = Arc::new(InMemoryWhitelist::default()); let whitelist_authorization = Arc::new(WhitelistAuthorization::new(&config.core, &in_memory_whitelist.clone())); @@ -146,7 +149,7 @@ mod tests { let http_stats_event_sender = http_stats_event_bus.sender(); if config.core.tracker_usage_statistics { - let _unused = run_event_listener(http_stats_event_bus.receiver(), &http_stats_repository); + let _unused = run_event_listener(http_stats_event_bus.receiver(), cancellation_token, &http_stats_repository); } ( diff --git a/packages/events/src/shutdown.rs b/packages/events/src/shutdown.rs new file mode 100644 index 000000000..e69de29bb diff --git a/packages/http-tracker-core/Cargo.toml b/packages/http-tracker-core/Cargo.toml index 45af59baa..04a6c96b6 100644 --- a/packages/http-tracker-core/Cargo.toml +++ b/packages/http-tracker-core/Cargo.toml @@ -23,6 +23,7 @@ futures = "0" serde = "1.0.219" thiserror = "2" tokio = { version = "1", features = ["macros", "net", "rt-multi-thread", "signal", "sync"] } +tokio-util = "0.7.15" torrust-tracker-clock = { version = "3.0.0-develop", path = "../clock" } torrust-tracker-configuration = { version = "3.0.0-develop", path = "../configuration" } torrust-tracker-events = { version = "3.0.0-develop", path = "../events" } diff --git a/packages/http-tracker-core/benches/helpers/util.rs b/packages/http-tracker-core/benches/helpers/util.rs index 414d3b40e..028d7c535 100644 --- a/packages/http-tracker-core/benches/helpers/util.rs +++ b/packages/http-tracker-core/benches/helpers/util.rs @@ -20,6 +20,7 @@ use bittorrent_tracker_core::whitelist::authorization::WhitelistAuthorization; use bittorrent_tracker_core::whitelist::repository::in_memory::InMemoryWhitelist; use futures::future::BoxFuture; use mockall::mock; +use tokio_util::sync::CancellationToken; use torrust_tracker_configuration::{Configuration, Core}; use torrust_tracker_events::sender::SendError; use torrust_tracker_primitives::peer::Peer; @@ -42,6 +43,8 @@ pub fn initialize_core_tracker_services() -> (CoreTrackerServices, CoreHttpTrack } pub fn initialize_core_tracker_services_with_config(config: &Configuration) -> (CoreTrackerServices, CoreHttpTrackerServices) { + let cancellation_token = CancellationToken::new(); + let core_config = Arc::new(config.core.clone()); let database = initialize_database(&config.core); let in_memory_torrent_repository = Arc::new(InMemoryTorrentRepository::default()); @@ -69,7 +72,7 @@ pub fn initialize_core_tracker_services_with_config(config: &Configuration) -> ( let http_stats_event_sender = http_stats_event_bus.sender(); if config.core.tracker_usage_statistics { - let _unused = run_event_listener(http_stats_event_bus.receiver(), &http_stats_repository); + let _unused = run_event_listener(http_stats_event_bus.receiver(), cancellation_token, &http_stats_repository); } ( diff --git a/packages/http-tracker-core/src/services/announce.rs b/packages/http-tracker-core/src/services/announce.rs index 8d12da713..08ac93f68 100644 --- a/packages/http-tracker-core/src/services/announce.rs +++ b/packages/http-tracker-core/src/services/announce.rs @@ -216,6 +216,7 @@ mod tests { use bittorrent_tracker_core::torrent::repository::in_memory::InMemoryTorrentRepository; use bittorrent_tracker_core::whitelist::authorization::WhitelistAuthorization; use bittorrent_tracker_core::whitelist::repository::in_memory::InMemoryWhitelist; + use tokio_util::sync::CancellationToken; use torrust_tracker_configuration::{Configuration, Core}; use torrust_tracker_primitives::peer::Peer; use torrust_tracker_test_helpers::configuration; @@ -236,6 +237,8 @@ mod tests { } fn initialize_core_tracker_services_with_config(config: &Configuration) -> (CoreTrackerServices, CoreHttpTrackerServices) { + let cancellation_token = CancellationToken::new(); + let core_config = Arc::new(config.core.clone()); let database = initialize_database(&config.core); let in_memory_torrent_repository = Arc::new(InMemoryTorrentRepository::default()); @@ -263,7 +266,7 @@ mod tests { let http_stats_event_sender = http_stats_event_bus.sender(); if config.core.tracker_usage_statistics { - let _unused = run_event_listener(http_stats_event_bus.receiver(), &http_stats_repository); + let _unused = run_event_listener(http_stats_event_bus.receiver(), cancellation_token, &http_stats_repository); } ( diff --git a/packages/http-tracker-core/src/statistics/event/listener.rs b/packages/http-tracker-core/src/statistics/event/listener.rs index 6730d4c70..ff2937a59 100644 --- a/packages/http-tracker-core/src/statistics/event/listener.rs +++ b/packages/http-tracker-core/src/statistics/event/listener.rs @@ -1,6 +1,7 @@ use std::sync::Arc; use tokio::task::JoinHandle; +use tokio_util::sync::CancellationToken; use torrust_tracker_clock::clock::Time; use torrust_tracker_events::receiver::RecvError; @@ -10,29 +11,29 @@ use crate::statistics::repository::Repository; use crate::{CurrentClock, HTTP_TRACKER_LOG_TARGET}; #[must_use] -pub fn run_event_listener(receiver: Receiver, repository: &Arc) -> JoinHandle<()> { +pub fn run_event_listener( + receiver: Receiver, + cancellation_token: CancellationToken, + repository: &Arc, +) -> JoinHandle<()> { let stats_repository = repository.clone(); tracing::info!(target: HTTP_TRACKER_LOG_TARGET, "Starting HTTP tracker core event listener"); tokio::spawn(async move { - dispatch_events(receiver, stats_repository).await; + dispatch_events(receiver, cancellation_token, stats_repository).await; tracing::info!(target: HTTP_TRACKER_LOG_TARGET, "HTTP tracker core event listener finished"); }) } -async fn dispatch_events(mut receiver: Receiver, stats_repository: Arc) { - let shutdown_signal = tokio::signal::ctrl_c(); - - tokio::pin!(shutdown_signal); - +async fn dispatch_events(mut receiver: Receiver, cancellation_token: CancellationToken, stats_repository: Arc) { loop { tokio::select! { biased; - _ = &mut shutdown_signal => { - tracing::info!(target: HTTP_TRACKER_LOG_TARGET, "Received Ctrl+C, shutting down HTTP tracker core event listener."); + () = cancellation_token.cancelled() => { + tracing::info!(target: HTTP_TRACKER_LOG_TARGET, "Received cancellation request, shutting down HTTP tracker core event listener."); break; } @@ -42,11 +43,11 @@ async fn dispatch_events(mut receiver: Receiver, stats_repository: Arc { match e { RecvError::Closed => { - tracing::info!(target: HTTP_TRACKER_LOG_TARGET, "Http core statistics receiver closed."); + tracing::info!(target: HTTP_TRACKER_LOG_TARGET, "Http tracker core statistics receiver closed."); break; } RecvError::Lagged(n) => { - tracing::warn!(target: HTTP_TRACKER_LOG_TARGET, "Http core statistics receiver lagged by {} events.", n); + tracing::warn!(target: HTTP_TRACKER_LOG_TARGET, "Http tracker core statistics receiver lagged by {} events.", n); } } } diff --git a/packages/rest-tracker-api-core/Cargo.toml b/packages/rest-tracker-api-core/Cargo.toml index cc8eda903..be6d493d7 100644 --- a/packages/rest-tracker-api-core/Cargo.toml +++ b/packages/rest-tracker-api-core/Cargo.toml @@ -18,6 +18,7 @@ bittorrent-http-tracker-core = { version = "3.0.0-develop", path = "../http-trac bittorrent-tracker-core = { version = "3.0.0-develop", path = "../tracker-core" } bittorrent-udp-tracker-core = { version = "3.0.0-develop", path = "../udp-tracker-core" } tokio = { version = "1", features = ["macros", "net", "rt-multi-thread", "signal", "sync"] } +tokio-util = "0.7.15" torrust-tracker-configuration = { version = "3.0.0-develop", path = "../configuration" } torrust-tracker-metrics = { version = "3.0.0-develop", path = "../metrics" } torrust-tracker-primitives = { version = "3.0.0-develop", path = "../primitives" } diff --git a/packages/rest-tracker-api-core/src/statistics/services.rs b/packages/rest-tracker-api-core/src/statistics/services.rs index 44c82bfea..a8132d4fd 100644 --- a/packages/rest-tracker-api-core/src/statistics/services.rs +++ b/packages/rest-tracker-api-core/src/statistics/services.rs @@ -210,6 +210,7 @@ mod tests { use bittorrent_udp_tracker_core::services::banning::BanService; use bittorrent_udp_tracker_core::MAX_CONNECTION_ID_ERRORS_PER_IP; use tokio::sync::RwLock; + use tokio_util::sync::CancellationToken; use torrust_tracker_configuration::Configuration; use torrust_tracker_events::bus::SenderStatus; use torrust_tracker_swarm_coordination_registry::container::SwarmCoordinationRegistryContainer; @@ -224,6 +225,8 @@ mod tests { #[tokio::test] async fn the_statistics_service_should_return_the_tracker_metrics() { + let cancellation_token = CancellationToken::new(); + let config = tracker_configuration(); let core_config = Arc::new(config.core.clone()); @@ -244,7 +247,7 @@ mod tests { )); if config.core.tracker_usage_statistics { - let _unused = run_event_listener(http_stats_event_bus.receiver(), &http_stats_repository); + let _unused = run_event_listener(http_stats_event_bus.receiver(), cancellation_token, &http_stats_repository); } // UDP server stats diff --git a/packages/swarm-coordination-registry/Cargo.toml b/packages/swarm-coordination-registry/Cargo.toml index 074562a47..45359ad81 100644 --- a/packages/swarm-coordination-registry/Cargo.toml +++ b/packages/swarm-coordination-registry/Cargo.toml @@ -24,6 +24,7 @@ futures = "0" serde = { version = "1.0.219", features = ["derive"] } thiserror = "2.0.12" tokio = { version = "1", features = ["macros", "net", "rt-multi-thread", "signal", "sync"] } +tokio-util = "0.7.15" torrust-tracker-clock = { version = "3.0.0-develop", path = "../clock" } torrust-tracker-configuration = { version = "3.0.0-develop", path = "../configuration" } torrust-tracker-events = { version = "3.0.0-develop", path = "../events" } diff --git a/packages/swarm-coordination-registry/src/statistics/event/listener.rs b/packages/swarm-coordination-registry/src/statistics/event/listener.rs index 9ff707818..b578d1284 100644 --- a/packages/swarm-coordination-registry/src/statistics/event/listener.rs +++ b/packages/swarm-coordination-registry/src/statistics/event/listener.rs @@ -1,6 +1,7 @@ use std::sync::Arc; use tokio::task::JoinHandle; +use tokio_util::sync::CancellationToken; use torrust_tracker_clock::clock::Time; use torrust_tracker_events::receiver::RecvError; @@ -10,29 +11,29 @@ use crate::statistics::repository::Repository; use crate::{CurrentClock, SWARM_COORDINATION_REGISTRY_LOG_TARGET}; #[must_use] -pub fn run_event_listener(receiver: Receiver, repository: &Arc) -> JoinHandle<()> { +pub fn run_event_listener( + receiver: Receiver, + cancellation_token: CancellationToken, + repository: &Arc, +) -> JoinHandle<()> { let stats_repository = repository.clone(); - tracing::info!(target: SWARM_COORDINATION_REGISTRY_LOG_TARGET, "Starting torrent repository event listener"); + tracing::info!(target: SWARM_COORDINATION_REGISTRY_LOG_TARGET, "Starting swarm coordination registry event listener"); tokio::spawn(async move { - dispatch_events(receiver, stats_repository).await; + dispatch_events(receiver, cancellation_token, stats_repository).await; - tracing::info!(target: SWARM_COORDINATION_REGISTRY_LOG_TARGET, "Torrent repository listener finished"); + tracing::info!(target: SWARM_COORDINATION_REGISTRY_LOG_TARGET, "Swarm coordination registry listener finished"); }) } -async fn dispatch_events(mut receiver: Receiver, stats_repository: Arc) { - let shutdown_signal = tokio::signal::ctrl_c(); - - tokio::pin!(shutdown_signal); - +async fn dispatch_events(mut receiver: Receiver, cancellation_token: CancellationToken, stats_repository: Arc) { loop { tokio::select! { biased; - _ = &mut shutdown_signal => { - tracing::info!(target: SWARM_COORDINATION_REGISTRY_LOG_TARGET, "Received Ctrl+C, shutting down torrent repository event listener."); + () = cancellation_token.cancelled() => { + tracing::info!(target: SWARM_COORDINATION_REGISTRY_LOG_TARGET, "Received cancellation request, shutting down swarm coordination registry event listener."); break; } @@ -42,11 +43,11 @@ async fn dispatch_events(mut receiver: Receiver, stats_repository: Arc { match e { RecvError::Closed => { - tracing::info!(target: SWARM_COORDINATION_REGISTRY_LOG_TARGET, "Torrent repository event receiver closed."); + tracing::info!(target: SWARM_COORDINATION_REGISTRY_LOG_TARGET, "Swarm coordination registry event receiver closed."); break; } RecvError::Lagged(n) => { - tracing::warn!(target: SWARM_COORDINATION_REGISTRY_LOG_TARGET, "Torrent repository event receiver lagged by {} events.", n); + tracing::warn!(target: SWARM_COORDINATION_REGISTRY_LOG_TARGET, "Swarm coordination registry event receiver lagged by {} events.", n); } } } diff --git a/packages/tracker-core/Cargo.toml b/packages/tracker-core/Cargo.toml index f04a3b89b..dfc83e58e 100644 --- a/packages/tracker-core/Cargo.toml +++ b/packages/tracker-core/Cargo.toml @@ -27,6 +27,7 @@ serde = { version = "1", features = ["derive"] } serde_json = { version = "1", features = ["preserve_order"] } thiserror = "2" tokio = { version = "1", features = ["macros", "net", "rt-multi-thread", "signal", "sync"] } +tokio-util = "0.7.15" torrust-tracker-clock = { version = "3.0.0-develop", path = "../clock" } torrust-tracker-configuration = { version = "3.0.0-develop", path = "../configuration" } torrust-tracker-events = { version = "3.0.0-develop", path = "../events" } diff --git a/packages/tracker-core/src/statistics/event/listener.rs b/packages/tracker-core/src/statistics/event/listener.rs index d3beaf41f..8d2d74c71 100644 --- a/packages/tracker-core/src/statistics/event/listener.rs +++ b/packages/tracker-core/src/statistics/event/listener.rs @@ -1,6 +1,7 @@ use std::sync::Arc; use tokio::task::JoinHandle; +use tokio_util::sync::CancellationToken; use torrust_tracker_clock::clock::Time; use torrust_tracker_events::receiver::RecvError; use torrust_tracker_swarm_coordination_registry::event::receiver::Receiver; @@ -13,6 +14,7 @@ use crate::{CurrentClock, TRACKER_CORE_LOG_TARGET}; #[must_use] pub fn run_event_listener( receiver: Receiver, + cancellation_token: CancellationToken, repository: &Arc, db_downloads_metric_repository: &Arc, persistent_torrent_completed_stat: bool, @@ -20,37 +22,35 @@ pub fn run_event_listener( let stats_repository = repository.clone(); let db_downloads_metric_repository: Arc = db_downloads_metric_repository.clone(); - tracing::info!(target: TRACKER_CORE_LOG_TARGET, "Starting torrent repository event listener"); + tracing::info!(target: TRACKER_CORE_LOG_TARGET, "Starting tracker core event listener"); tokio::spawn(async move { dispatch_events( receiver, + cancellation_token, stats_repository, db_downloads_metric_repository, persistent_torrent_completed_stat, ) .await; - tracing::info!(target: TRACKER_CORE_LOG_TARGET, "Torrent repository listener finished"); + tracing::info!(target: TRACKER_CORE_LOG_TARGET, "Tracker core listener finished"); }) } async fn dispatch_events( mut receiver: Receiver, + cancellation_token: CancellationToken, stats_repository: Arc, db_downloads_metric_repository: Arc, persistent_torrent_completed_stat: bool, ) { - let shutdown_signal = tokio::signal::ctrl_c(); - - tokio::pin!(shutdown_signal); - loop { tokio::select! { biased; - _ = &mut shutdown_signal => { - tracing::info!(target: TRACKER_CORE_LOG_TARGET, "Received Ctrl+C, shutting down torrent repository event listener"); + () = cancellation_token.cancelled() => { + tracing::info!(target: TRACKER_CORE_LOG_TARGET, "Received cancellation request, shutting down tracker core event listener."); break; } @@ -65,11 +65,11 @@ async fn dispatch_events( Err(e) => { match e { RecvError::Closed => { - tracing::info!(target: TRACKER_CORE_LOG_TARGET, "Torrent repository event receiver closed"); + tracing::info!(target: TRACKER_CORE_LOG_TARGET, "Tracker core event receiver closed"); break; } RecvError::Lagged(n) => { - tracing::warn!(target: TRACKER_CORE_LOG_TARGET, "Torrent repository event receiver lagged by {} events", n); + tracing::warn!(target: TRACKER_CORE_LOG_TARGET, "Tracker core event receiver lagged by {} events", n); } } } diff --git a/packages/tracker-core/tests/common/test_env.rs b/packages/tracker-core/tests/common/test_env.rs index d3bc9652a..3fe0464fe 100644 --- a/packages/tracker-core/tests/common/test_env.rs +++ b/packages/tracker-core/tests/common/test_env.rs @@ -7,6 +7,7 @@ use bittorrent_tracker_core::announce_handler::PeersWanted; use bittorrent_tracker_core::container::TrackerCoreContainer; use bittorrent_tracker_core::statistics::persisted::load_persisted_metrics; use tokio::task::yield_now; +use tokio_util::sync::CancellationToken; use torrust_tracker_configuration::Core; use torrust_tracker_metrics::label::LabelSet; use torrust_tracker_metrics::metric::MetricName; @@ -66,15 +67,19 @@ impl TestEnv { async fn run_jobs(&self) { let mut jobs = vec![]; + let cancellation_token = CancellationToken::new(); let job = torrust_tracker_swarm_coordination_registry::statistics::event::listener::run_event_listener( self.swarm_coordination_registry_container.event_bus.receiver(), + cancellation_token.clone(), &self.swarm_coordination_registry_container.stats_repository, ); + jobs.push(job); let job = bittorrent_tracker_core::statistics::event::listener::run_event_listener( self.swarm_coordination_registry_container.event_bus.receiver(), + cancellation_token.clone(), &self.tracker_core_container.stats_repository, &self.tracker_core_container.db_downloads_metric_repository, self.tracker_core_container diff --git a/packages/udp-tracker-core/Cargo.toml b/packages/udp-tracker-core/Cargo.toml index 290c5fbfd..b3007eb80 100644 --- a/packages/udp-tracker-core/Cargo.toml +++ b/packages/udp-tracker-core/Cargo.toml @@ -28,6 +28,7 @@ rand = "0" serde = "1.0.219" thiserror = "2" tokio = { version = "1", features = ["macros", "net", "rt-multi-thread", "signal", "sync", "time"] } +tokio-util = "0.7.15" torrust-tracker-clock = { version = "3.0.0-develop", path = "../clock" } torrust-tracker-configuration = { version = "3.0.0-develop", path = "../configuration" } torrust-tracker-events = { version = "3.0.0-develop", path = "../events" } diff --git a/packages/udp-tracker-core/src/statistics/event/listener.rs b/packages/udp-tracker-core/src/statistics/event/listener.rs index 9b6f2e574..b11bcce85 100644 --- a/packages/udp-tracker-core/src/statistics/event/listener.rs +++ b/packages/udp-tracker-core/src/statistics/event/listener.rs @@ -1,6 +1,7 @@ use std::sync::Arc; use tokio::task::JoinHandle; +use tokio_util::sync::CancellationToken; use torrust_tracker_clock::clock::Time; use torrust_tracker_events::receiver::RecvError; @@ -10,28 +11,29 @@ use crate::statistics::repository::Repository; use crate::{CurrentClock, UDP_TRACKER_LOG_TARGET}; #[must_use] -pub fn run_event_listener(receiver: Receiver, repository: &Arc) -> JoinHandle<()> { +pub fn run_event_listener( + receiver: Receiver, + cancellation_token: CancellationToken, + repository: &Arc, +) -> JoinHandle<()> { let stats_repository = repository.clone(); tracing::info!(target: UDP_TRACKER_LOG_TARGET, "Starting UDP tracker core event listener"); tokio::spawn(async move { - dispatch_events(receiver, stats_repository).await; + dispatch_events(receiver, cancellation_token, stats_repository).await; tracing::info!(target: UDP_TRACKER_LOG_TARGET, "UDP tracker core event listener finished"); }) } -async fn dispatch_events(mut receiver: Receiver, stats_repository: Arc) { - let shutdown_signal = tokio::signal::ctrl_c(); - tokio::pin!(shutdown_signal); - +async fn dispatch_events(mut receiver: Receiver, cancellation_token: CancellationToken, stats_repository: Arc) { loop { tokio::select! { biased; - _ = &mut shutdown_signal => { - tracing::info!(target: UDP_TRACKER_LOG_TARGET, "Received Ctrl+C, shutting down UDP tracker core event listener."); + () = cancellation_token.cancelled() => { + tracing::info!(target: UDP_TRACKER_LOG_TARGET, "Received cancellation request, shutting down UDP tracker core event listener."); break; } @@ -41,11 +43,11 @@ async fn dispatch_events(mut receiver: Receiver, stats_repository: Arc { match e { RecvError::Closed => { - tracing::info!(target: UDP_TRACKER_LOG_TARGET, "Udp core statistics receiver closed."); + tracing::info!(target: UDP_TRACKER_LOG_TARGET, "Udp tracker core statistics receiver closed."); break; } RecvError::Lagged(n) => { - tracing::warn!(target: UDP_TRACKER_LOG_TARGET, "Udp core statistics receiver lagged by {} events.", n); + tracing::warn!(target: UDP_TRACKER_LOG_TARGET, "Udp tracker core statistics receiver lagged by {} events.", n); } } } diff --git a/packages/udp-tracker-server/Cargo.toml b/packages/udp-tracker-server/Cargo.toml index c0bc94ce3..160fe58f9 100644 --- a/packages/udp-tracker-server/Cargo.toml +++ b/packages/udp-tracker-server/Cargo.toml @@ -26,6 +26,7 @@ ringbuf = "0" serde = "1.0.219" thiserror = "2" tokio = { version = "1", features = ["macros", "net", "rt-multi-thread", "signal", "sync"] } +tokio-util = "0.7.15" torrust-server-lib = { version = "3.0.0-develop", path = "../server-lib" } torrust-tracker-clock = { version = "3.0.0-develop", path = "../clock" } torrust-tracker-configuration = { version = "3.0.0-develop", path = "../configuration" } diff --git a/packages/udp-tracker-server/src/banning/event/listener.rs b/packages/udp-tracker-server/src/banning/event/listener.rs index fee3395fa..0d579f912 100644 --- a/packages/udp-tracker-server/src/banning/event/listener.rs +++ b/packages/udp-tracker-server/src/banning/event/listener.rs @@ -4,6 +4,7 @@ use bittorrent_udp_tracker_core::services::banning::BanService; use bittorrent_udp_tracker_core::UDP_TRACKER_LOG_TARGET; use tokio::sync::RwLock; use tokio::task::JoinHandle; +use tokio_util::sync::CancellationToken; use torrust_tracker_clock::clock::Time; use torrust_tracker_events::receiver::RecvError; @@ -15,6 +16,7 @@ use crate::CurrentClock; #[must_use] pub fn run_event_listener( receiver: Receiver, + cancellation_token: CancellationToken, ban_service: &Arc>, repository: &Arc, ) -> JoinHandle<()> { @@ -24,22 +26,24 @@ pub fn run_event_listener( tracing::info!(target: UDP_TRACKER_LOG_TARGET, "Starting UDP tracker server event listener (banning)"); tokio::spawn(async move { - dispatch_events(receiver, ban_service_clone, repository_clone).await; + dispatch_events(receiver, cancellation_token, ban_service_clone, repository_clone).await; tracing::info!(target: UDP_TRACKER_LOG_TARGET, "UDP tracker server event listener (banning) finished"); }) } -async fn dispatch_events(mut receiver: Receiver, ban_service: Arc>, repository: Arc) { - let shutdown_signal = tokio::signal::ctrl_c(); - tokio::pin!(shutdown_signal); - +async fn dispatch_events( + mut receiver: Receiver, + cancellation_token: CancellationToken, + ban_service: Arc>, + repository: Arc, +) { loop { tokio::select! { biased; - _ = &mut shutdown_signal => { - tracing::info!(target: UDP_TRACKER_LOG_TARGET, "Received Ctrl+C, shutting down UDP tracker server event listener (banning)"); + () = cancellation_token.cancelled() => { + tracing::info!(target: UDP_TRACKER_LOG_TARGET, "Received cancellation request, shutting down UDP tracker server event listener."); break; } @@ -49,11 +53,11 @@ async fn dispatch_events(mut receiver: Receiver, ban_service: Arc { match e { RecvError::Closed => { - tracing::info!(target: UDP_TRACKER_LOG_TARGET, "Udp server receiver (banning) closed."); + tracing::info!(target: UDP_TRACKER_LOG_TARGET, "Udp tracker server receiver (banning) closed."); break; } RecvError::Lagged(n) => { - tracing::warn!(target: UDP_TRACKER_LOG_TARGET, "Udp server receiver (banning) lagged by {} events.", n); + tracing::warn!(target: UDP_TRACKER_LOG_TARGET, "Udp tracker server receiver (banning) lagged by {} events.", n); } } } diff --git a/packages/udp-tracker-server/src/environment.rs b/packages/udp-tracker-server/src/environment.rs index 61b1cba63..13e18ba9b 100644 --- a/packages/udp-tracker-server/src/environment.rs +++ b/packages/udp-tracker-server/src/environment.rs @@ -4,6 +4,7 @@ use std::sync::Arc; use bittorrent_tracker_core::container::TrackerCoreContainer; use bittorrent_udp_tracker_core::container::UdpTrackerCoreContainer; use tokio::task::JoinHandle; +use tokio_util::sync::CancellationToken; use torrust_server_lib::registar::Registar; use torrust_tracker_configuration::{logging, Configuration, DEFAULT_TIMEOUT}; use torrust_tracker_swarm_coordination_registry::container::SwarmCoordinationRegistryContainer; @@ -25,6 +26,7 @@ where pub udp_core_event_listener_job: Option>, pub udp_server_stats_event_listener_job: Option>, pub udp_server_banning_event_listener_job: Option>, + pub cancellation_token: CancellationToken, } impl Environment { @@ -46,6 +48,7 @@ impl Environment { udp_core_event_listener_job: None, udp_server_stats_event_listener_job: None, udp_server_banning_event_listener_job: None, + cancellation_token: CancellationToken::new(), } } @@ -57,21 +60,25 @@ impl Environment { #[allow(dead_code)] pub async fn start(self) -> Environment { let cookie_lifetime = self.container.udp_tracker_core_container.udp_tracker_config.cookie_lifetime; + // Start the UDP tracker core event listener let udp_core_event_listener_job = Some(bittorrent_udp_tracker_core::statistics::event::listener::run_event_listener( self.container.udp_tracker_core_container.event_bus.receiver(), + self.cancellation_token.clone(), &self.container.udp_tracker_core_container.stats_repository, )); // Start the UDP tracker server event listener (statistics) let udp_server_stats_event_listener_job = Some(crate::statistics::event::listener::run_event_listener( self.container.udp_tracker_server_container.event_bus.receiver(), + self.cancellation_token.clone(), &self.container.udp_tracker_server_container.stats_repository, )); // Start the UDP tracker server event listener (banning) let udp_server_banning_event_listener_job = Some(crate::banning::event::listener::run_event_listener( self.container.udp_tracker_server_container.event_bus.receiver(), + self.cancellation_token.clone(), &self.container.udp_tracker_core_container.ban_service, &self.container.udp_tracker_server_container.stats_repository, )); @@ -95,6 +102,7 @@ impl Environment { udp_core_event_listener_job, udp_server_stats_event_listener_job, udp_server_banning_event_listener_job, + cancellation_token: self.cancellation_token, } } } @@ -150,6 +158,7 @@ impl Environment { udp_core_event_listener_job: None, udp_server_stats_event_listener_job: None, udp_server_banning_event_listener_job: None, + cancellation_token: self.cancellation_token, } } diff --git a/packages/udp-tracker-server/src/statistics/event/listener.rs b/packages/udp-tracker-server/src/statistics/event/listener.rs index ae659c15e..caaf5a2bc 100644 --- a/packages/udp-tracker-server/src/statistics/event/listener.rs +++ b/packages/udp-tracker-server/src/statistics/event/listener.rs @@ -2,6 +2,7 @@ use std::sync::Arc; use bittorrent_udp_tracker_core::UDP_TRACKER_LOG_TARGET; use tokio::task::JoinHandle; +use tokio_util::sync::CancellationToken; use torrust_tracker_clock::clock::Time; use torrust_tracker_events::receiver::RecvError; @@ -11,28 +12,29 @@ use crate::statistics::repository::Repository; use crate::CurrentClock; #[must_use] -pub fn run_event_listener(receiver: Receiver, repository: &Arc) -> JoinHandle<()> { +pub fn run_event_listener( + receiver: Receiver, + cancellation_token: CancellationToken, + repository: &Arc, +) -> JoinHandle<()> { let repository_clone = repository.clone(); tracing::info!(target: UDP_TRACKER_LOG_TARGET, "Starting UDP tracker server event listener"); tokio::spawn(async move { - dispatch_events(receiver, repository_clone).await; + dispatch_events(receiver, cancellation_token, repository_clone).await; tracing::info!(target: UDP_TRACKER_LOG_TARGET, "UDP tracker server event listener finished"); }) } -async fn dispatch_events(mut receiver: Receiver, stats_repository: Arc) { - let shutdown_signal = tokio::signal::ctrl_c(); - tokio::pin!(shutdown_signal); - +async fn dispatch_events(mut receiver: Receiver, cancellation_token: CancellationToken, stats_repository: Arc) { loop { tokio::select! { biased; - _ = &mut shutdown_signal => { - tracing::info!(target: UDP_TRACKER_LOG_TARGET, "Received Ctrl+C, shutting down UDP tracker server event listener."); + () = cancellation_token.cancelled() => { + tracing::info!(target: UDP_TRACKER_LOG_TARGET, "Received cancellation request, shutting down UDP tracker server event listener."); break; } @@ -42,11 +44,11 @@ async fn dispatch_events(mut receiver: Receiver, stats_repository: Arc { match e { RecvError::Closed => { - tracing::info!(target: UDP_TRACKER_LOG_TARGET, "Udp server statistics receiver closed."); + tracing::info!(target: UDP_TRACKER_LOG_TARGET, "Udp tracker server statistics receiver closed."); break; } RecvError::Lagged(n) => { - tracing::warn!(target: UDP_TRACKER_LOG_TARGET, "Udp server statistics receiver lagged by {} events.", n); + tracing::warn!(target: UDP_TRACKER_LOG_TARGET, "Udp tracker server statistics receiver lagged by {} events.", n); } } } diff --git a/src/app.rs b/src/app.rs index 58d758d7f..2149a6d4c 100644 --- a/src/app.rs +++ b/src/app.rs @@ -140,28 +140,28 @@ fn start_swarm_coordination_registry_event_listener( ) { job_manager.push_opt( "swarm_coordination_registry_event_listener", - jobs::torrent_repository::start_event_listener(config, app_container), + jobs::torrent_repository::start_event_listener(config, app_container, job_manager.new_cancellation_token()), ); } fn start_tracker_core_event_listener(config: &Configuration, app_container: &Arc, job_manager: &mut JobManager) { job_manager.push_opt( "tracker_core_event_listener", - jobs::tracker_core::start_event_listener(config, app_container), + jobs::tracker_core::start_event_listener(config, app_container, job_manager.new_cancellation_token()), ); } fn start_http_core_event_listener(config: &Configuration, app_container: &Arc, job_manager: &mut JobManager) { job_manager.push_opt( "http_core_event_listener", - jobs::http_tracker_core::start_event_listener(config, app_container), + jobs::http_tracker_core::start_event_listener(config, app_container, job_manager.new_cancellation_token()), ); } fn start_udp_core_event_listener(config: &Configuration, app_container: &Arc, job_manager: &mut JobManager) { job_manager.push_opt( "udp_core_event_listener", - jobs::udp_tracker_core::start_event_listener(config, app_container), + jobs::udp_tracker_core::start_event_listener(config, app_container, job_manager.new_cancellation_token()), ); } @@ -172,14 +172,14 @@ fn start_udp_server_stats_event_listener( ) { job_manager.push_opt( "udp_server_stats_event_listener", - jobs::udp_tracker_server::start_stats_event_listener(config, app_container), + jobs::udp_tracker_server::start_stats_event_listener(config, app_container, job_manager.new_cancellation_token()), ); } fn start_udp_server_banning_event_listener(app_container: &Arc, job_manager: &mut JobManager) { job_manager.push( "udp_server_banning_event_listener", - jobs::udp_tracker_server::start_banning_event_listener(app_container), + jobs::udp_tracker_server::start_banning_event_listener(app_container, job_manager.new_cancellation_token()), ); } diff --git a/src/bootstrap/jobs/http_tracker_core.rs b/src/bootstrap/jobs/http_tracker_core.rs index 952c80b40..ab71b9a0f 100644 --- a/src/bootstrap/jobs/http_tracker_core.rs +++ b/src/bootstrap/jobs/http_tracker_core.rs @@ -1,14 +1,20 @@ use std::sync::Arc; use tokio::task::JoinHandle; +use tokio_util::sync::CancellationToken; use torrust_tracker_configuration::Configuration; use crate::container::AppContainer; -pub fn start_event_listener(config: &Configuration, app_container: &Arc) -> Option> { +pub fn start_event_listener( + config: &Configuration, + app_container: &Arc, + cancellation_token: CancellationToken, +) -> Option> { if config.core.tracker_usage_statistics { let job = bittorrent_http_tracker_core::statistics::event::listener::run_event_listener( app_container.http_tracker_core_services.event_bus.receiver(), + cancellation_token, &app_container.http_tracker_core_services.stats_repository, ); diff --git a/src/bootstrap/jobs/manager.rs b/src/bootstrap/jobs/manager.rs index 53733844b..565cd7b73 100644 --- a/src/bootstrap/jobs/manager.rs +++ b/src/bootstrap/jobs/manager.rs @@ -2,13 +2,14 @@ use std::time::Duration; use tokio::task::JoinHandle; use tokio::time::timeout; +use tokio_util::sync::CancellationToken; use tracing::{info, warn}; /// Represents a named background job. #[derive(Debug)] pub struct Job { - pub name: String, - pub handle: JoinHandle<()>, + name: String, + handle: JoinHandle<()>, } impl Job { @@ -24,12 +25,16 @@ impl Job { #[derive(Debug, Default)] pub struct JobManager { jobs: Vec, + cancellation_token: CancellationToken, } impl JobManager { #[must_use] pub fn new() -> Self { - Self { jobs: Vec::new() } + Self { + jobs: Vec::new(), + cancellation_token: CancellationToken::new(), + } } pub fn push>(&mut self, name: N, handle: JoinHandle<()>) { @@ -42,6 +47,25 @@ impl JobManager { } } + #[must_use] + pub fn new_cancellation_token(&self) -> CancellationToken { + self.cancellation_token.clone() + } + + /// Cancels all jobs using the shared cancellation token. + /// + /// Notice that this does not cancel the jobs immediately, but rather + /// signals them to stop. The jobs themselves must handle the cancellation + /// token appropriately. + /// + /// Notice jobs might be pushed into the manager without a cancellation + /// token, so this method will not cancel those jobs. Some tasks might + /// decide to listen for CTRL+c signal directly, or implement their own + /// cancellation logic. + pub fn cancel(&self) { + self.cancellation_token.cancel(); + } + /// Waits sequentially for all jobs to complete, with a graceful timeout per /// job. pub async fn wait_for_all(mut self, grace_period: Duration) { diff --git a/src/bootstrap/jobs/torrent_repository.rs b/src/bootstrap/jobs/torrent_repository.rs index 44ffdf53b..e49323735 100644 --- a/src/bootstrap/jobs/torrent_repository.rs +++ b/src/bootstrap/jobs/torrent_repository.rs @@ -1,14 +1,20 @@ use std::sync::Arc; use tokio::task::JoinHandle; +use tokio_util::sync::CancellationToken; use torrust_tracker_configuration::Configuration; use crate::container::AppContainer; -pub fn start_event_listener(config: &Configuration, app_container: &Arc) -> Option> { +pub fn start_event_listener( + config: &Configuration, + app_container: &Arc, + cancellation_token: CancellationToken, +) -> Option> { if config.core.tracker_usage_statistics { let job = torrust_tracker_swarm_coordination_registry::statistics::event::listener::run_event_listener( app_container.swarm_coordination_registry_container.event_bus.receiver(), + cancellation_token, &app_container.swarm_coordination_registry_container.stats_repository, ); diff --git a/src/bootstrap/jobs/tracker_core.rs b/src/bootstrap/jobs/tracker_core.rs index f2fc25ef3..d881f4cd2 100644 --- a/src/bootstrap/jobs/tracker_core.rs +++ b/src/bootstrap/jobs/tracker_core.rs @@ -1,14 +1,20 @@ use std::sync::Arc; use tokio::task::JoinHandle; +use tokio_util::sync::CancellationToken; use torrust_tracker_configuration::Configuration; use crate::container::AppContainer; -pub fn start_event_listener(config: &Configuration, app_container: &Arc) -> Option> { +pub fn start_event_listener( + config: &Configuration, + app_container: &Arc, + cancellation_token: CancellationToken, +) -> Option> { if config.core.tracker_usage_statistics || config.core.tracker_policy.persistent_torrent_completed_stat { let job = bittorrent_tracker_core::statistics::event::listener::run_event_listener( app_container.swarm_coordination_registry_container.event_bus.receiver(), + cancellation_token, &app_container.tracker_core_container.stats_repository, &app_container.tracker_core_container.db_downloads_metric_repository, app_container diff --git a/src/bootstrap/jobs/udp_tracker_core.rs b/src/bootstrap/jobs/udp_tracker_core.rs index 689fa8301..dd7e8c165 100644 --- a/src/bootstrap/jobs/udp_tracker_core.rs +++ b/src/bootstrap/jobs/udp_tracker_core.rs @@ -1,14 +1,20 @@ use std::sync::Arc; use tokio::task::JoinHandle; +use tokio_util::sync::CancellationToken; use torrust_tracker_configuration::Configuration; use crate::container::AppContainer; -pub fn start_event_listener(config: &Configuration, app_container: &Arc) -> Option> { +pub fn start_event_listener( + config: &Configuration, + app_container: &Arc, + cancellation_token: CancellationToken, +) -> Option> { if config.core.tracker_usage_statistics { let job = bittorrent_udp_tracker_core::statistics::event::listener::run_event_listener( app_container.udp_tracker_core_services.event_bus.receiver(), + cancellation_token, &app_container.udp_tracker_core_services.stats_repository, ); Some(job) diff --git a/src/bootstrap/jobs/udp_tracker_server.rs b/src/bootstrap/jobs/udp_tracker_server.rs index 3e8a7aaa8..fc6df9c16 100644 --- a/src/bootstrap/jobs/udp_tracker_server.rs +++ b/src/bootstrap/jobs/udp_tracker_server.rs @@ -1,14 +1,20 @@ use std::sync::Arc; use tokio::task::JoinHandle; +use tokio_util::sync::CancellationToken; use torrust_tracker_configuration::Configuration; use crate::container::AppContainer; -pub fn start_stats_event_listener(config: &Configuration, app_container: &Arc) -> Option> { +pub fn start_stats_event_listener( + config: &Configuration, + app_container: &Arc, + cancellation_token: CancellationToken, +) -> Option> { if config.core.tracker_usage_statistics { let job = torrust_udp_tracker_server::statistics::event::listener::run_event_listener( app_container.udp_tracker_server_container.event_bus.receiver(), + cancellation_token, &app_container.udp_tracker_server_container.stats_repository, ); Some(job) @@ -19,9 +25,10 @@ pub fn start_stats_event_listener(config: &Configuration, app_container: &Arc) -> JoinHandle<()> { +pub fn start_banning_event_listener(app_container: &Arc, cancellation_token: CancellationToken) -> JoinHandle<()> { torrust_udp_tracker_server::banning::event::listener::run_event_listener( app_container.udp_tracker_server_container.event_bus.receiver(), + cancellation_token, &app_container.udp_tracker_core_services.ban_service, &app_container.udp_tracker_server_container.stats_repository, ) diff --git a/src/main.rs b/src/main.rs index a49c3aeba..7012ecaa7 100644 --- a/src/main.rs +++ b/src/main.rs @@ -10,6 +10,8 @@ async fn main() { _ = tokio::signal::ctrl_c() => { tracing::info!("Torrust tracker shutting down ..."); + jobs.cancel(); + jobs.wait_for_all(Duration::from_secs(10)).await; tracing::info!("Torrust tracker successfully shutdown."); From f7ab993e96a050ddbbd1dd8467bb5bd1ef8c411d Mon Sep 17 00:00:00 2001 From: Jose Celano Date: Tue, 17 Jun 2025 19:41:33 +0100 Subject: [PATCH 734/802] refactor: [#1589] add logs for debugging --- .../src/statistics/repository.rs | 24 +++++++++++++++++++ 1 file changed, 24 insertions(+) diff --git a/packages/udp-tracker-server/src/statistics/repository.rs b/packages/udp-tracker-server/src/statistics/repository.rs index 1851b78a8..fa85610a0 100644 --- a/packages/udp-tracker-server/src/statistics/repository.rs +++ b/packages/udp-tracker-server/src/statistics/repository.rs @@ -89,6 +89,14 @@ impl Repository { drop(stats_lock); + tracing::debug!( + "Recalculated UDP average connect processing time: {} ns (previous: {} ns, req_processing_time: {} ns, udp_connections_handled: {})", + new_avg, + previous_avg, + req_processing_time, + udp_connections_handled + ); + new_avg } @@ -109,6 +117,14 @@ impl Repository { drop(stats_lock); + tracing::debug!( + "Recalculated UDP average announce processing time: {} ns (previous: {} ns, req_processing_time: {} ns, udp_announces_handled: {})", + new_avg, + previous_avg, + req_processing_time, + udp_announces_handled + ); + new_avg } @@ -128,6 +144,14 @@ impl Repository { drop(stats_lock); + tracing::debug!( + "Recalculated UDP average scrape processing time: {} ns (previous: {} ns, req_processing_time: {} ns, udp_scrapes_handled: {})", + new_avg, + previous_avg, + req_processing_time, + udp_scrapes_handled + ); + new_avg } } From 5fc255fa849ad88e977f10e45640176bfd134d26 Mon Sep 17 00:00:00 2001 From: Jose Celano Date: Wed, 18 Jun 2025 11:07:53 +0100 Subject: [PATCH 735/802] tests(udp-tracker-server): [#1589] add unit tests to statistics::repository::Repository --- cSpell.json | 1 + .../src/statistics/repository.rs | 512 ++++++++++++++++++ 2 files changed, 513 insertions(+) diff --git a/cSpell.json b/cSpell.json index fcbf53f1f..647dd24a2 100644 --- a/cSpell.json +++ b/cSpell.json @@ -34,6 +34,7 @@ "chrono", "ciphertext", "clippy", + "cloneable", "codecov", "codegen", "completei", diff --git a/packages/udp-tracker-server/src/statistics/repository.rs b/packages/udp-tracker-server/src/statistics/repository.rs index fa85610a0..eb0951614 100644 --- a/packages/udp-tracker-server/src/statistics/repository.rs +++ b/packages/udp-tracker-server/src/statistics/repository.rs @@ -155,3 +155,515 @@ impl Repository { new_avg } } + +#[cfg(test)] +mod tests { + use core::f64; + use std::time::Duration; + + use torrust_tracker_clock::clock::Time; + use torrust_tracker_metrics::metric_name; + + use super::*; + use crate::statistics::*; + use crate::CurrentClock; + + #[test] + fn it_should_implement_default() { + let repo = Repository::default(); + assert!(!std::ptr::eq(&repo.stats, &Repository::new().stats)); + } + + #[test] + fn it_should_be_cloneable() { + let repo = Repository::new(); + let cloned_repo = repo.clone(); + assert!(!std::ptr::eq(&repo.stats, &cloned_repo.stats)); + } + + #[tokio::test] + async fn it_should_be_initialized_with_described_metrics() { + let repo = Repository::new(); + let stats = repo.get_stats().await; + + // Check that the described metrics are present + assert!(stats + .metric_collection + .contains_counter(&metric_name!(UDP_TRACKER_SERVER_REQUESTS_ABORTED_TOTAL))); + assert!(stats + .metric_collection + .contains_counter(&metric_name!(UDP_TRACKER_SERVER_REQUESTS_BANNED_TOTAL))); + assert!(stats + .metric_collection + .contains_gauge(&metric_name!(UDP_TRACKER_SERVER_IPS_BANNED_TOTAL))); + assert!(stats + .metric_collection + .contains_counter(&metric_name!(UDP_TRACKER_SERVER_CONNECTION_ID_ERRORS_TOTAL))); + assert!(stats + .metric_collection + .contains_counter(&metric_name!(UDP_TRACKER_SERVER_REQUESTS_RECEIVED_TOTAL))); + assert!(stats + .metric_collection + .contains_counter(&metric_name!(UDP_TRACKER_SERVER_REQUESTS_ACCEPTED_TOTAL))); + assert!(stats + .metric_collection + .contains_counter(&metric_name!(UDP_TRACKER_SERVER_RESPONSES_SENT_TOTAL))); + assert!(stats + .metric_collection + .contains_counter(&metric_name!(UDP_TRACKER_SERVER_ERRORS_TOTAL))); + assert!(stats + .metric_collection + .contains_gauge(&metric_name!(UDP_TRACKER_SERVER_PERFORMANCE_AVG_PROCESSING_TIME_NS))); + } + + #[tokio::test] + async fn it_should_return_a_read_guard_to_metrics() { + let repo = Repository::new(); + let stats = repo.get_stats().await; + + // Should be able to read metrics through the guard + assert_eq!(stats.udp_requests_aborted(), 0); + assert_eq!(stats.udp_requests_banned(), 0); + } + + #[tokio::test] + async fn it_should_allow_increasing_a_counter_metric_successfully() { + let repo = Repository::new(); + let now = CurrentClock::now(); + let labels = LabelSet::empty(); + + // Increase a counter metric + let result = repo + .increase_counter(&metric_name!(UDP_TRACKER_SERVER_REQUESTS_ABORTED_TOTAL), &labels, now) + .await; + + assert!(result.is_ok()); + + // Verify the counter was incremented + let stats = repo.get_stats().await; + assert_eq!(stats.udp_requests_aborted(), 1); + } + + #[tokio::test] + async fn it_should_allow_increasing_a_counter_multiple_times() { + let repo = Repository::new(); + let now = CurrentClock::now(); + let labels = LabelSet::empty(); + + // Increase counter multiple times + for _ in 0..5 { + repo.increase_counter(&metric_name!(UDP_TRACKER_SERVER_REQUESTS_ABORTED_TOTAL), &labels, now) + .await + .unwrap(); + } + + // Verify the counter was incremented correctly + let stats = repo.get_stats().await; + assert_eq!(stats.udp_requests_aborted(), 5); + } + + #[tokio::test] + async fn it_should_allow_increasing_a_counter_with_different_labels() { + let repo = Repository::new(); + let now = CurrentClock::now(); + + let labels_ipv4 = LabelSet::from([("server_binding_address_ip_family", "inet")]); + let labels_ipv6 = LabelSet::from([("server_binding_address_ip_family", "inet6")]); + + // Increase counters with different labels + repo.increase_counter(&metric_name!(UDP_TRACKER_SERVER_REQUESTS_RECEIVED_TOTAL), &labels_ipv4, now) + .await + .unwrap(); + + repo.increase_counter(&metric_name!(UDP_TRACKER_SERVER_REQUESTS_RECEIVED_TOTAL), &labels_ipv6, now) + .await + .unwrap(); + + // Verify both labeled metrics + let stats = repo.get_stats().await; + assert_eq!(stats.udp4_requests(), 1); + assert_eq!(stats.udp6_requests(), 1); + } + + #[tokio::test] + async fn it_should_set_a_gauge_metric_successfully() { + let repo = Repository::new(); + let now = CurrentClock::now(); + let labels = LabelSet::empty(); + + // Set a gauge metric + let result = repo + .set_gauge(&metric_name!(UDP_TRACKER_SERVER_IPS_BANNED_TOTAL), &labels, 42.0, now) + .await; + + assert!(result.is_ok()); + + // Verify the gauge was set + let stats = repo.get_stats().await; + assert_eq!(stats.udp_banned_ips_total(), 42); + } + + #[tokio::test] + async fn it_should_overwrite_previous_value_when_setting_a_gauge_with_a_previous_value() { + let repo = Repository::new(); + let now = CurrentClock::now(); + let labels = LabelSet::empty(); + + // Set gauge to initial value + repo.set_gauge(&metric_name!(UDP_TRACKER_SERVER_IPS_BANNED_TOTAL), &labels, 10.0, now) + .await + .unwrap(); + + // Overwrite with new value + repo.set_gauge(&metric_name!(UDP_TRACKER_SERVER_IPS_BANNED_TOTAL), &labels, 25.0, now) + .await + .unwrap(); + + // Verify the gauge has the new value + let stats = repo.get_stats().await; + assert_eq!(stats.udp_banned_ips_total(), 25); + } + + #[tokio::test] + async fn it_should_allow_setting_a_gauge_with_different_labels() { + let repo = Repository::new(); + let now = CurrentClock::now(); + + let labels_connect = LabelSet::from([("request_kind", "connect")]); + let labels_announce = LabelSet::from([("request_kind", "announce")]); + + // Set gauges with different labels + repo.set_gauge( + &metric_name!(UDP_TRACKER_SERVER_PERFORMANCE_AVG_PROCESSING_TIME_NS), + &labels_connect, + 1000.0, + now, + ) + .await + .unwrap(); + + repo.set_gauge( + &metric_name!(UDP_TRACKER_SERVER_PERFORMANCE_AVG_PROCESSING_TIME_NS), + &labels_announce, + 2000.0, + now, + ) + .await + .unwrap(); + + // Verify both labeled metrics + let stats = repo.get_stats().await; + assert_eq!(stats.udp_avg_connect_processing_time_ns(), 1000); + assert_eq!(stats.udp_avg_announce_processing_time_ns(), 2000); + } + + #[tokio::test] + async fn it_should_recalculate_the_udp_average_connect_processing_time_in_nanoseconds_using_moving_average() { + let repo = Repository::new(); + let now = CurrentClock::now(); + + // Set up initial connections handled + let ipv4_labels = LabelSet::from([("server_binding_address_ip_family", "inet"), ("request_kind", "connect")]); + let ipv6_labels = LabelSet::from([("server_binding_address_ip_family", "inet6"), ("request_kind", "connect")]); + + // Simulate 2 IPv4 and 1 IPv6 connections + repo.increase_counter(&metric_name!(UDP_TRACKER_SERVER_REQUESTS_ACCEPTED_TOTAL), &ipv4_labels, now) + .await + .unwrap(); + repo.increase_counter(&metric_name!(UDP_TRACKER_SERVER_REQUESTS_ACCEPTED_TOTAL), &ipv4_labels, now) + .await + .unwrap(); + repo.increase_counter(&metric_name!(UDP_TRACKER_SERVER_REQUESTS_ACCEPTED_TOTAL), &ipv6_labels, now) + .await + .unwrap(); + + // Set initial average to 1000ns + let connect_labels = LabelSet::from([("request_kind", "connect")]); + repo.set_gauge( + &metric_name!(UDP_TRACKER_SERVER_PERFORMANCE_AVG_PROCESSING_TIME_NS), + &connect_labels, + 1000.0, + now, + ) + .await + .unwrap(); + + // Calculate new average with processing time of 2000ns + let processing_time = Duration::from_nanos(2000); + let new_avg = repo.recalculate_udp_avg_connect_processing_time_ns(processing_time).await; + + // Moving average: previous_avg + (new_value - previous_avg) / total_connections + // 1000 + (2000 - 1000) / 3 = 1000 + 333.33 = 1333.33 + let expected_avg = 1000.0 + (2000.0 - 1000.0) / 3.0; + assert!( + (new_avg - expected_avg).abs() < 0.01, + "Expected {expected_avg}, got {new_avg}" + ); + } + + #[tokio::test] + async fn it_should_recalculate_the_udp_average_announce_processing_time_in_nanoseconds_using_moving_average() { + let repo = Repository::new(); + let now = CurrentClock::now(); + + // Set up initial announces handled + let ipv4_labels = LabelSet::from([("server_binding_address_ip_family", "inet"), ("request_kind", "announce")]); + let ipv6_labels = LabelSet::from([("server_binding_address_ip_family", "inet6"), ("request_kind", "announce")]); + + // Simulate 3 IPv4 and 2 IPv6 announces + for _ in 0..3 { + repo.increase_counter(&metric_name!(UDP_TRACKER_SERVER_REQUESTS_ACCEPTED_TOTAL), &ipv4_labels, now) + .await + .unwrap(); + } + for _ in 0..2 { + repo.increase_counter(&metric_name!(UDP_TRACKER_SERVER_REQUESTS_ACCEPTED_TOTAL), &ipv6_labels, now) + .await + .unwrap(); + } + + // Set initial average to 500ns + let announce_labels = LabelSet::from([("request_kind", "announce")]); + repo.set_gauge( + &metric_name!(UDP_TRACKER_SERVER_PERFORMANCE_AVG_PROCESSING_TIME_NS), + &announce_labels, + 500.0, + now, + ) + .await + .unwrap(); + + // Calculate new average with processing time of 1500ns + let processing_time = Duration::from_nanos(1500); + let new_avg = repo.recalculate_udp_avg_announce_processing_time_ns(processing_time).await; + + // Moving average: previous_avg + (new_value - previous_avg) / total_announces + // 500 + (1500 - 500) / 5 = 500 + 200 = 700 + let expected_avg = 500.0 + (1500.0 - 500.0) / 5.0; + assert!( + (new_avg - expected_avg).abs() < 0.01, + "Expected {expected_avg}, got {new_avg}" + ); + } + + #[tokio::test] + async fn it_should_recalculate_the_udp_average_scrape_processing_time_in_nanoseconds_using_moving_average() { + let repo = Repository::new(); + let now = CurrentClock::now(); + + // Set up initial scrapes handled + let ipv4_labels = LabelSet::from([("server_binding_address_ip_family", "inet"), ("request_kind", "scrape")]); + + // Simulate 4 IPv4 scrapes + for _ in 0..4 { + repo.increase_counter(&metric_name!(UDP_TRACKER_SERVER_REQUESTS_ACCEPTED_TOTAL), &ipv4_labels, now) + .await + .unwrap(); + } + + // Set initial average to 800ns + let scrape_labels = LabelSet::from([("request_kind", "scrape")]); + repo.set_gauge( + &metric_name!(UDP_TRACKER_SERVER_PERFORMANCE_AVG_PROCESSING_TIME_NS), + &scrape_labels, + 800.0, + now, + ) + .await + .unwrap(); + + // Calculate new average with processing time of 1200ns + let processing_time = Duration::from_nanos(1200); + let new_avg = repo.recalculate_udp_avg_scrape_processing_time_ns(processing_time).await; + + // Moving average: previous_avg + (new_value - previous_avg) / total_scrapes + // 800 + (1200 - 800) / 4 = 800 + 100 = 900 + let expected_avg = 800.0 + (1200.0 - 800.0) / 4.0; + assert!( + (new_avg - expected_avg).abs() < 0.01, + "Expected {expected_avg}, got {new_avg}" + ); + } + + #[tokio::test] + async fn recalculate_average_methods_should_handle_zero_connections_gracefully() { + let repo = Repository::new(); + + // Test with zero connections (should not panic, should handle division by zero) + let processing_time = Duration::from_nanos(1000); + + let connect_avg = repo.recalculate_udp_avg_connect_processing_time_ns(processing_time).await; + let announce_avg = repo.recalculate_udp_avg_announce_processing_time_ns(processing_time).await; + let scrape_avg = repo.recalculate_udp_avg_scrape_processing_time_ns(processing_time).await; + + // With 0 total connections, the formula becomes 0 + (1000 - 0) / 0 + // This should handle the division by zero case gracefully + assert!(connect_avg.is_infinite() || connect_avg.is_nan()); + assert!(announce_avg.is_infinite() || announce_avg.is_nan()); + assert!(scrape_avg.is_infinite() || scrape_avg.is_nan()); + } + + #[tokio::test] + async fn it_should_handle_concurrent_access() { + let repo = Repository::new(); + let now = CurrentClock::now(); + + // Spawn multiple concurrent tasks + let mut handles = vec![]; + + for i in 0..10 { + let repo_clone = repo.clone(); + let handle = tokio::spawn(async move { + for _ in 0..5 { + repo_clone + .increase_counter( + &metric_name!(UDP_TRACKER_SERVER_REQUESTS_ABORTED_TOTAL), + &LabelSet::empty(), + now, + ) + .await + .unwrap(); + } + i + }); + handles.push(handle); + } + + // Wait for all tasks to complete + for handle in handles { + handle.await.unwrap(); + } + + // Verify all increments were properly recorded + let stats = repo.get_stats().await; + assert_eq!(stats.udp_requests_aborted(), 50); // 10 tasks * 5 increments each + } + + #[tokio::test] + async fn it_should_handle_large_processing_times() { + let repo = Repository::new(); + let now = CurrentClock::now(); + + // Set up a connection + let ipv4_labels = LabelSet::from([("server_binding_address_ip_family", "inet"), ("request_kind", "connect")]); + repo.increase_counter(&metric_name!(UDP_TRACKER_SERVER_REQUESTS_ACCEPTED_TOTAL), &ipv4_labels, now) + .await + .unwrap(); + + // Test with very large processing time + let large_duration = Duration::from_secs(1); // 1 second = 1,000,000,000 ns + let new_avg = repo.recalculate_udp_avg_connect_processing_time_ns(large_duration).await; + + // Should handle large numbers without overflow + assert!(new_avg > 0.0); + assert!(new_avg.is_finite()); + } + + #[tokio::test] + async fn it_should_maintain_consistency_across_operations() { + let repo = Repository::new(); + let now = CurrentClock::now(); + + // Perform a series of operations + repo.increase_counter( + &metric_name!(UDP_TRACKER_SERVER_REQUESTS_ABORTED_TOTAL), + &LabelSet::empty(), + now, + ) + .await + .unwrap(); + + repo.set_gauge( + &metric_name!(UDP_TRACKER_SERVER_IPS_BANNED_TOTAL), + &LabelSet::empty(), + 10.0, + now, + ) + .await + .unwrap(); + + repo.increase_counter( + &metric_name!(UDP_TRACKER_SERVER_REQUESTS_BANNED_TOTAL), + &LabelSet::empty(), + now, + ) + .await + .unwrap(); + + // Check final state + let stats = repo.get_stats().await; + assert_eq!(stats.udp_requests_aborted(), 1); + assert_eq!(stats.udp_banned_ips_total(), 10); + assert_eq!(stats.udp_requests_banned(), 1); + } + + #[tokio::test] + async fn it_should_handle_error_cases_gracefully() { + let repo = Repository::new(); + let now = CurrentClock::now(); + + // Test with invalid metric name (this should still work as metrics are created dynamically) + let result = repo + .increase_counter(&metric_name!("non_existent_metric"), &LabelSet::empty(), now) + .await; + + // Should succeed as metrics are created on demand + assert!(result.is_ok()); + + // Test with NaN value for gauge + let result = repo + .set_gauge( + &metric_name!(UDP_TRACKER_SERVER_IPS_BANNED_TOTAL), + &LabelSet::empty(), + f64::NAN, + now, + ) + .await; + + // Should handle NaN values + assert!(result.is_ok()); + } + + #[tokio::test] + async fn it_should_handle_moving_average_calculation_before_any_connections_are_recorded() { + let repo = Repository::new(); + let now = CurrentClock::now(); + + // This test checks the behavior of `recalculate_udp_avg_connect_processing_time_ns`` + // when no connections have been recorded yet. The first call should + // handle division by zero gracefully and return an infinite average, + // which is the current behavior. + + // todo: the first average should be 2000ns, not infinity. + // This is because the first connection is not counted in the average + // calculation if the counter is increased after calculating the average. + // The problem is that we count requests when they are accepted, not + // when they are processed. And we calculate the average when the + // response is sent. + + // First calculation: no connections recorded yet, should result in infinity + let processing_time_1 = Duration::from_nanos(2000); + let avg_1 = repo.recalculate_udp_avg_connect_processing_time_ns(processing_time_1).await; + + // Division by zero: 1000 + (2000 - 1000) / 0 = infinity + assert!( + avg_1.is_infinite(), + "First calculation should be infinite due to division by zero" + ); + + // Now add one connection and try again + let ipv4_labels = LabelSet::from([("server_binding_address_ip_family", "inet"), ("request_kind", "connect")]); + repo.increase_counter(&metric_name!(UDP_TRACKER_SERVER_REQUESTS_ACCEPTED_TOTAL), &ipv4_labels, now) + .await + .unwrap(); + + // Second calculation: 1 connection, but previous average is infinity + let processing_time_2 = Duration::from_nanos(3000); + let avg_2 = repo.recalculate_udp_avg_connect_processing_time_ns(processing_time_2).await; + + assert!( + (avg_2 - 3000.0).abs() < f64::EPSILON, + "Second calculation should be 3000ns, but got {avg_2}" + ); + } +} From 7e9d9827f1933d2774cce03eb59b47632214a8d2 Mon Sep 17 00:00:00 2001 From: Jose Celano Date: Wed, 18 Jun 2025 12:15:01 +0100 Subject: [PATCH 736/802] fix(udt-tracker-server): metric description --- packages/udp-tracker-server/src/statistics/mod.rs | 4 +--- 1 file changed, 1 insertion(+), 3 deletions(-) diff --git a/packages/udp-tracker-server/src/statistics/mod.rs b/packages/udp-tracker-server/src/statistics/mod.rs index b42a73f27..768722ba3 100644 --- a/packages/udp-tracker-server/src/statistics/mod.rs +++ b/packages/udp-tracker-server/src/statistics/mod.rs @@ -73,9 +73,7 @@ pub fn describe_metrics() -> Metrics { metrics.metric_collection.describe_gauge( &metric_name!(UDP_TRACKER_SERVER_PERFORMANCE_AVG_PROCESSING_TIME_NS), Some(Unit::Nanoseconds), - Some(MetricDescription::new( - "Average time to process a UDP connect request in nanoseconds", - )), + Some(MetricDescription::new("Average time to process a UDP request in nanoseconds")), ); metrics From bf9d16a83ec48d2b60074fdc97b93f7c58bb5944 Mon Sep 17 00:00:00 2001 From: Jose Celano Date: Wed, 18 Jun 2025 12:33:52 +0100 Subject: [PATCH 737/802] tests(udp-tracker-server): [#1589] add unit tests to statistics::metrics::Metrics --- cSpell.json | 1 + .../src/statistics/metrics.rs | 781 ++++++++++++++++++ 2 files changed, 782 insertions(+) diff --git a/cSpell.json b/cSpell.json index 647dd24a2..76939c199 100644 --- a/cSpell.json +++ b/cSpell.json @@ -175,6 +175,7 @@ "trackerid", "Trackon", "typenum", + "udpv", "Unamed", "underflows", "Unsendable", diff --git a/packages/udp-tracker-server/src/statistics/metrics.rs b/packages/udp-tracker-server/src/statistics/metrics.rs index c50966bc6..3c162ff02 100644 --- a/packages/udp-tracker-server/src/statistics/metrics.rs +++ b/packages/udp-tracker-server/src/statistics/metrics.rs @@ -276,3 +276,784 @@ impl Metrics { .unwrap_or_default() as u64 } } + +#[cfg(test)] +mod tests { + use torrust_tracker_clock::clock::Time; + use torrust_tracker_metrics::metric_name; + + use super::*; + use crate::statistics::{ + UDP_TRACKER_SERVER_ERRORS_TOTAL, UDP_TRACKER_SERVER_IPS_BANNED_TOTAL, + UDP_TRACKER_SERVER_PERFORMANCE_AVG_PROCESSING_TIME_NS, UDP_TRACKER_SERVER_REQUESTS_ABORTED_TOTAL, + UDP_TRACKER_SERVER_REQUESTS_ACCEPTED_TOTAL, UDP_TRACKER_SERVER_REQUESTS_BANNED_TOTAL, + UDP_TRACKER_SERVER_REQUESTS_RECEIVED_TOTAL, UDP_TRACKER_SERVER_RESPONSES_SENT_TOTAL, + }; + use crate::CurrentClock; + + #[test] + fn it_should_implement_default() { + let metrics = Metrics::default(); + // MetricCollection starts with empty collections + assert_eq!(metrics, Metrics::default()); + } + + #[test] + fn it_should_implement_debug() { + let metrics = Metrics::default(); + let debug_string = format!("{metrics:?}"); + assert!(debug_string.contains("Metrics")); + assert!(debug_string.contains("metric_collection")); + } + + #[test] + fn it_should_implement_partial_eq() { + let metrics1 = Metrics::default(); + let metrics2 = Metrics::default(); + assert_eq!(metrics1, metrics2); + } + + #[test] + fn it_should_increase_counter_metric() { + let mut metrics = Metrics::default(); + let now = CurrentClock::now(); + let labels = LabelSet::empty(); + + let result = metrics.increase_counter(&metric_name!(UDP_TRACKER_SERVER_REQUESTS_ABORTED_TOTAL), &labels, now); + + assert!(result.is_ok()); + } + + #[test] + fn it_should_increase_counter_metric_with_labels() { + let mut metrics = Metrics::default(); + let now = CurrentClock::now(); + let labels = LabelSet::from([("server_binding_address_ip_family", "inet")]); + + let result = metrics.increase_counter(&metric_name!(UDP_TRACKER_SERVER_REQUESTS_RECEIVED_TOTAL), &labels, now); + + assert!(result.is_ok()); + } + + #[test] + fn it_should_set_gauge_metric() { + let mut metrics = Metrics::default(); + let now = CurrentClock::now(); + let labels = LabelSet::empty(); + + let result = metrics.set_gauge(&metric_name!(UDP_TRACKER_SERVER_IPS_BANNED_TOTAL), &labels, 42.0, now); + + assert!(result.is_ok()); + } + + #[test] + fn it_should_set_gauge_metric_with_labels() { + let mut metrics = Metrics::default(); + let now = CurrentClock::now(); + let labels = LabelSet::from([("request_kind", "connect")]); + + let result = metrics.set_gauge( + &metric_name!(UDP_TRACKER_SERVER_PERFORMANCE_AVG_PROCESSING_TIME_NS), + &labels, + 1000.0, + now, + ); + + assert!(result.is_ok()); + } + + mod udp_general_metrics { + use super::*; + + #[test] + fn it_should_return_zero_for_udp_requests_aborted_when_no_data() { + let metrics = Metrics::default(); + assert_eq!(metrics.udp_requests_aborted(), 0); + } + + #[test] + fn it_should_return_sum_of_udp_requests_aborted() { + let mut metrics = Metrics::default(); + let now = CurrentClock::now(); + let labels = LabelSet::empty(); + + metrics + .increase_counter(&metric_name!(UDP_TRACKER_SERVER_REQUESTS_ABORTED_TOTAL), &labels, now) + .unwrap(); + metrics + .increase_counter(&metric_name!(UDP_TRACKER_SERVER_REQUESTS_ABORTED_TOTAL), &labels, now) + .unwrap(); + + assert_eq!(metrics.udp_requests_aborted(), 2); + } + + #[test] + fn it_should_return_zero_for_udp_requests_banned_when_no_data() { + let metrics = Metrics::default(); + assert_eq!(metrics.udp_requests_banned(), 0); + } + + #[test] + fn it_should_return_sum_of_udp_requests_banned() { + let mut metrics = Metrics::default(); + let now = CurrentClock::now(); + let labels = LabelSet::empty(); + + for _ in 0..3 { + metrics + .increase_counter(&metric_name!(UDP_TRACKER_SERVER_REQUESTS_BANNED_TOTAL), &labels, now) + .unwrap(); + } + + assert_eq!(metrics.udp_requests_banned(), 3); + } + + #[test] + fn it_should_return_zero_for_udp_banned_ips_total_when_no_data() { + let metrics = Metrics::default(); + assert_eq!(metrics.udp_banned_ips_total(), 0); + } + + #[test] + fn it_should_return_gauge_value_for_udp_banned_ips_total() { + let mut metrics = Metrics::default(); + let now = CurrentClock::now(); + let labels = LabelSet::empty(); + + metrics + .set_gauge(&metric_name!(UDP_TRACKER_SERVER_IPS_BANNED_TOTAL), &labels, 10.0, now) + .unwrap(); + + assert_eq!(metrics.udp_banned_ips_total(), 10); + } + } + + mod udp_performance_metrics { + use super::*; + + #[test] + fn it_should_return_zero_for_udp_avg_connect_processing_time_ns_when_no_data() { + let metrics = Metrics::default(); + assert_eq!(metrics.udp_avg_connect_processing_time_ns(), 0); + } + + #[test] + fn it_should_return_gauge_value_for_udp_avg_connect_processing_time_ns() { + let mut metrics = Metrics::default(); + let now = CurrentClock::now(); + let labels = LabelSet::from([("request_kind", "connect")]); + + metrics + .set_gauge( + &metric_name!(UDP_TRACKER_SERVER_PERFORMANCE_AVG_PROCESSING_TIME_NS), + &labels, + 1500.0, + now, + ) + .unwrap(); + + assert_eq!(metrics.udp_avg_connect_processing_time_ns(), 1500); + } + + #[test] + fn it_should_return_zero_for_udp_avg_announce_processing_time_ns_when_no_data() { + let metrics = Metrics::default(); + assert_eq!(metrics.udp_avg_announce_processing_time_ns(), 0); + } + + #[test] + fn it_should_return_gauge_value_for_udp_avg_announce_processing_time_ns() { + let mut metrics = Metrics::default(); + let now = CurrentClock::now(); + let labels = LabelSet::from([("request_kind", "announce")]); + + metrics + .set_gauge( + &metric_name!(UDP_TRACKER_SERVER_PERFORMANCE_AVG_PROCESSING_TIME_NS), + &labels, + 2500.0, + now, + ) + .unwrap(); + + assert_eq!(metrics.udp_avg_announce_processing_time_ns(), 2500); + } + + #[test] + fn it_should_return_zero_for_udp_avg_scrape_processing_time_ns_when_no_data() { + let metrics = Metrics::default(); + assert_eq!(metrics.udp_avg_scrape_processing_time_ns(), 0); + } + + #[test] + fn it_should_return_gauge_value_for_udp_avg_scrape_processing_time_ns() { + let mut metrics = Metrics::default(); + let now = CurrentClock::now(); + let labels = LabelSet::from([("request_kind", "scrape")]); + + metrics + .set_gauge( + &metric_name!(UDP_TRACKER_SERVER_PERFORMANCE_AVG_PROCESSING_TIME_NS), + &labels, + 3500.0, + now, + ) + .unwrap(); + + assert_eq!(metrics.udp_avg_scrape_processing_time_ns(), 3500); + } + } + + mod udpv4_metrics { + use super::*; + + #[test] + fn it_should_return_zero_for_udp4_requests_when_no_data() { + let metrics = Metrics::default(); + assert_eq!(metrics.udp4_requests(), 0); + } + + #[test] + fn it_should_return_sum_of_udp4_requests() { + let mut metrics = Metrics::default(); + let now = CurrentClock::now(); + let labels = LabelSet::from([("server_binding_address_ip_family", "inet")]); + + for _ in 0..5 { + metrics + .increase_counter(&metric_name!(UDP_TRACKER_SERVER_REQUESTS_RECEIVED_TOTAL), &labels, now) + .unwrap(); + } + + assert_eq!(metrics.udp4_requests(), 5); + } + + #[test] + fn it_should_return_zero_for_udp4_connections_handled_when_no_data() { + let metrics = Metrics::default(); + assert_eq!(metrics.udp4_connections_handled(), 0); + } + + #[test] + fn it_should_return_sum_of_udp4_connections_handled() { + let mut metrics = Metrics::default(); + let now = CurrentClock::now(); + let labels = LabelSet::from([("server_binding_address_ip_family", "inet"), ("request_kind", "connect")]); + + for _ in 0..3 { + metrics + .increase_counter(&metric_name!(UDP_TRACKER_SERVER_REQUESTS_ACCEPTED_TOTAL), &labels, now) + .unwrap(); + } + + assert_eq!(metrics.udp4_connections_handled(), 3); + } + + #[test] + fn it_should_return_zero_for_udp4_announces_handled_when_no_data() { + let metrics = Metrics::default(); + assert_eq!(metrics.udp4_announces_handled(), 0); + } + + #[test] + fn it_should_return_sum_of_udp4_announces_handled() { + let mut metrics = Metrics::default(); + let now = CurrentClock::now(); + let labels = LabelSet::from([("server_binding_address_ip_family", "inet"), ("request_kind", "announce")]); + + for _ in 0..7 { + metrics + .increase_counter(&metric_name!(UDP_TRACKER_SERVER_REQUESTS_ACCEPTED_TOTAL), &labels, now) + .unwrap(); + } + + assert_eq!(metrics.udp4_announces_handled(), 7); + } + + #[test] + fn it_should_return_zero_for_udp4_scrapes_handled_when_no_data() { + let metrics = Metrics::default(); + assert_eq!(metrics.udp4_scrapes_handled(), 0); + } + + #[test] + fn it_should_return_sum_of_udp4_scrapes_handled() { + let mut metrics = Metrics::default(); + let now = CurrentClock::now(); + let labels = LabelSet::from([("server_binding_address_ip_family", "inet"), ("request_kind", "scrape")]); + + for _ in 0..4 { + metrics + .increase_counter(&metric_name!(UDP_TRACKER_SERVER_REQUESTS_ACCEPTED_TOTAL), &labels, now) + .unwrap(); + } + + assert_eq!(metrics.udp4_scrapes_handled(), 4); + } + + #[test] + fn it_should_return_zero_for_udp4_responses_when_no_data() { + let metrics = Metrics::default(); + assert_eq!(metrics.udp4_responses(), 0); + } + + #[test] + fn it_should_return_sum_of_udp4_responses() { + let mut metrics = Metrics::default(); + let now = CurrentClock::now(); + let labels = LabelSet::from([("server_binding_address_ip_family", "inet")]); + + for _ in 0..6 { + metrics + .increase_counter(&metric_name!(UDP_TRACKER_SERVER_RESPONSES_SENT_TOTAL), &labels, now) + .unwrap(); + } + + assert_eq!(metrics.udp4_responses(), 6); + } + + #[test] + fn it_should_return_zero_for_udp4_errors_handled_when_no_data() { + let metrics = Metrics::default(); + assert_eq!(metrics.udp4_errors_handled(), 0); + } + + #[test] + fn it_should_return_sum_of_udp4_errors_handled() { + let mut metrics = Metrics::default(); + let now = CurrentClock::now(); + let labels = LabelSet::from([("server_binding_address_ip_family", "inet")]); + + for _ in 0..2 { + metrics + .increase_counter(&metric_name!(UDP_TRACKER_SERVER_ERRORS_TOTAL), &labels, now) + .unwrap(); + } + + assert_eq!(metrics.udp4_errors_handled(), 2); + } + } + + mod udpv6_metrics { + use super::*; + + #[test] + fn it_should_return_zero_for_udp6_requests_when_no_data() { + let metrics = Metrics::default(); + assert_eq!(metrics.udp6_requests(), 0); + } + + #[test] + fn it_should_return_sum_of_udp6_requests() { + let mut metrics = Metrics::default(); + let now = CurrentClock::now(); + let labels = LabelSet::from([("server_binding_address_ip_family", "inet6")]); + + for _ in 0..8 { + metrics + .increase_counter(&metric_name!(UDP_TRACKER_SERVER_REQUESTS_RECEIVED_TOTAL), &labels, now) + .unwrap(); + } + + assert_eq!(metrics.udp6_requests(), 8); + } + + #[test] + fn it_should_return_zero_for_udp6_connections_handled_when_no_data() { + let metrics = Metrics::default(); + assert_eq!(metrics.udp6_connections_handled(), 0); + } + + #[test] + fn it_should_return_sum_of_udp6_connections_handled() { + let mut metrics = Metrics::default(); + let now = CurrentClock::now(); + let labels = LabelSet::from([("server_binding_address_ip_family", "inet6"), ("request_kind", "connect")]); + + for _ in 0..4 { + metrics + .increase_counter(&metric_name!(UDP_TRACKER_SERVER_REQUESTS_ACCEPTED_TOTAL), &labels, now) + .unwrap(); + } + + assert_eq!(metrics.udp6_connections_handled(), 4); + } + + #[test] + fn it_should_return_zero_for_udp6_announces_handled_when_no_data() { + let metrics = Metrics::default(); + assert_eq!(metrics.udp6_announces_handled(), 0); + } + + #[test] + fn it_should_return_sum_of_udp6_announces_handled() { + let mut metrics = Metrics::default(); + let now = CurrentClock::now(); + let labels = LabelSet::from([("server_binding_address_ip_family", "inet6"), ("request_kind", "announce")]); + + for _ in 0..9 { + metrics + .increase_counter(&metric_name!(UDP_TRACKER_SERVER_REQUESTS_ACCEPTED_TOTAL), &labels, now) + .unwrap(); + } + + assert_eq!(metrics.udp6_announces_handled(), 9); + } + + #[test] + fn it_should_return_zero_for_udp6_scrapes_handled_when_no_data() { + let metrics = Metrics::default(); + assert_eq!(metrics.udp6_scrapes_handled(), 0); + } + + #[test] + fn it_should_return_sum_of_udp6_scrapes_handled() { + let mut metrics = Metrics::default(); + let now = CurrentClock::now(); + let labels = LabelSet::from([("server_binding_address_ip_family", "inet6"), ("request_kind", "scrape")]); + + for _ in 0..6 { + metrics + .increase_counter(&metric_name!(UDP_TRACKER_SERVER_REQUESTS_ACCEPTED_TOTAL), &labels, now) + .unwrap(); + } + + assert_eq!(metrics.udp6_scrapes_handled(), 6); + } + + #[test] + fn it_should_return_zero_for_udp6_responses_when_no_data() { + let metrics = Metrics::default(); + assert_eq!(metrics.udp6_responses(), 0); + } + + #[test] + fn it_should_return_sum_of_udp6_responses() { + let mut metrics = Metrics::default(); + let now = CurrentClock::now(); + let labels = LabelSet::from([("server_binding_address_ip_family", "inet6")]); + + for _ in 0..11 { + metrics + .increase_counter(&metric_name!(UDP_TRACKER_SERVER_RESPONSES_SENT_TOTAL), &labels, now) + .unwrap(); + } + + assert_eq!(metrics.udp6_responses(), 11); + } + + #[test] + fn it_should_return_zero_for_udp6_errors_handled_when_no_data() { + let metrics = Metrics::default(); + assert_eq!(metrics.udp6_errors_handled(), 0); + } + + #[test] + fn it_should_return_sum_of_udp6_errors_handled() { + let mut metrics = Metrics::default(); + let now = CurrentClock::now(); + let labels = LabelSet::from([("server_binding_address_ip_family", "inet6")]); + + for _ in 0..3 { + metrics + .increase_counter(&metric_name!(UDP_TRACKER_SERVER_ERRORS_TOTAL), &labels, now) + .unwrap(); + } + + assert_eq!(metrics.udp6_errors_handled(), 3); + } + } + + mod combined_metrics { + use super::*; + + #[test] + fn it_should_distinguish_between_ipv4_and_ipv6_metrics() { + let mut metrics = Metrics::default(); + let now = CurrentClock::now(); + + let ipv4_labels = LabelSet::from([("server_binding_address_ip_family", "inet")]); + let ipv6_labels = LabelSet::from([("server_binding_address_ip_family", "inet6")]); + + // Add different counts for IPv4 and IPv6 + for _ in 0..3 { + metrics + .increase_counter(&metric_name!(UDP_TRACKER_SERVER_REQUESTS_RECEIVED_TOTAL), &ipv4_labels, now) + .unwrap(); + } + + for _ in 0..7 { + metrics + .increase_counter(&metric_name!(UDP_TRACKER_SERVER_REQUESTS_RECEIVED_TOTAL), &ipv6_labels, now) + .unwrap(); + } + + assert_eq!(metrics.udp4_requests(), 3); + assert_eq!(metrics.udp6_requests(), 7); + } + + #[test] + fn it_should_distinguish_between_different_request_kinds() { + let mut metrics = Metrics::default(); + let now = CurrentClock::now(); + + let connect_labels = LabelSet::from([("server_binding_address_ip_family", "inet"), ("request_kind", "connect")]); + let announce_labels = LabelSet::from([("server_binding_address_ip_family", "inet"), ("request_kind", "announce")]); + let scrape_labels = LabelSet::from([("server_binding_address_ip_family", "inet"), ("request_kind", "scrape")]); + + // Add different counts for different request kinds + for _ in 0..2 { + metrics + .increase_counter( + &metric_name!(UDP_TRACKER_SERVER_REQUESTS_ACCEPTED_TOTAL), + &connect_labels, + now, + ) + .unwrap(); + } + + for _ in 0..5 { + metrics + .increase_counter( + &metric_name!(UDP_TRACKER_SERVER_REQUESTS_ACCEPTED_TOTAL), + &announce_labels, + now, + ) + .unwrap(); + } + + for _ in 0..1 { + metrics + .increase_counter(&metric_name!(UDP_TRACKER_SERVER_REQUESTS_ACCEPTED_TOTAL), &scrape_labels, now) + .unwrap(); + } + + assert_eq!(metrics.udp4_connections_handled(), 2); + assert_eq!(metrics.udp4_announces_handled(), 5); + assert_eq!(metrics.udp4_scrapes_handled(), 1); + } + + #[test] + fn it_should_handle_mixed_ipv4_and_ipv6_for_different_request_kinds() { + let mut metrics = Metrics::default(); + let now = CurrentClock::now(); + + let ipv4_connect_labels = LabelSet::from([("server_binding_address_ip_family", "inet"), ("request_kind", "connect")]); + let ipv6_connect_labels = + LabelSet::from([("server_binding_address_ip_family", "inet6"), ("request_kind", "connect")]); + let ipv4_announce_labels = + LabelSet::from([("server_binding_address_ip_family", "inet"), ("request_kind", "announce")]); + let ipv6_announce_labels = + LabelSet::from([("server_binding_address_ip_family", "inet6"), ("request_kind", "announce")]); + + // Add mixed IPv4/IPv6 counts + for _ in 0..3 { + metrics + .increase_counter( + &metric_name!(UDP_TRACKER_SERVER_REQUESTS_ACCEPTED_TOTAL), + &ipv4_connect_labels, + now, + ) + .unwrap(); + } + + for _ in 0..2 { + metrics + .increase_counter( + &metric_name!(UDP_TRACKER_SERVER_REQUESTS_ACCEPTED_TOTAL), + &ipv6_connect_labels, + now, + ) + .unwrap(); + } + + for _ in 0..4 { + metrics + .increase_counter( + &metric_name!(UDP_TRACKER_SERVER_REQUESTS_ACCEPTED_TOTAL), + &ipv4_announce_labels, + now, + ) + .unwrap(); + } + + for _ in 0..6 { + metrics + .increase_counter( + &metric_name!(UDP_TRACKER_SERVER_REQUESTS_ACCEPTED_TOTAL), + &ipv6_announce_labels, + now, + ) + .unwrap(); + } + + assert_eq!(metrics.udp4_connections_handled(), 3); + assert_eq!(metrics.udp6_connections_handled(), 2); + assert_eq!(metrics.udp4_announces_handled(), 4); + assert_eq!(metrics.udp6_announces_handled(), 6); + } + } + + mod edge_cases { + use super::*; + + #[test] + fn it_should_handle_large_counter_values() { + let mut metrics = Metrics::default(); + let now = CurrentClock::now(); + let labels = LabelSet::empty(); + + // Add a large number of increments + for _ in 0..1000 { + metrics + .increase_counter(&metric_name!(UDP_TRACKER_SERVER_REQUESTS_ABORTED_TOTAL), &labels, now) + .unwrap(); + } + + assert_eq!(metrics.udp_requests_aborted(), 1000); + } + + #[test] + fn it_should_handle_large_gauge_values() { + let mut metrics = Metrics::default(); + let now = CurrentClock::now(); + let labels = LabelSet::empty(); + + // Set a large gauge value + metrics + .set_gauge(&metric_name!(UDP_TRACKER_SERVER_IPS_BANNED_TOTAL), &labels, 999_999.0, now) + .unwrap(); + + assert_eq!(metrics.udp_banned_ips_total(), 999_999); + } + + #[test] + fn it_should_handle_zero_gauge_values() { + let mut metrics = Metrics::default(); + let now = CurrentClock::now(); + let labels = LabelSet::empty(); + + metrics + .set_gauge(&metric_name!(UDP_TRACKER_SERVER_IPS_BANNED_TOTAL), &labels, 0.0, now) + .unwrap(); + + assert_eq!(metrics.udp_banned_ips_total(), 0); + } + + #[test] + fn it_should_handle_fractional_gauge_values_with_truncation() { + let mut metrics = Metrics::default(); + let now = CurrentClock::now(); + let labels = LabelSet::from([("request_kind", "connect")]); + + metrics + .set_gauge( + &metric_name!(UDP_TRACKER_SERVER_PERFORMANCE_AVG_PROCESSING_TIME_NS), + &labels, + 1234.567, + now, + ) + .unwrap(); + + // Should truncate to 1234 + assert_eq!(metrics.udp_avg_connect_processing_time_ns(), 1234); + } + + #[test] + fn it_should_overwrite_gauge_values_when_set_multiple_times() { + let mut metrics = Metrics::default(); + let now = CurrentClock::now(); + let labels = LabelSet::empty(); + + // Set initial value + metrics + .set_gauge(&metric_name!(UDP_TRACKER_SERVER_IPS_BANNED_TOTAL), &labels, 50.0, now) + .unwrap(); + + assert_eq!(metrics.udp_banned_ips_total(), 50); + + // Overwrite with new value + metrics + .set_gauge(&metric_name!(UDP_TRACKER_SERVER_IPS_BANNED_TOTAL), &labels, 75.0, now) + .unwrap(); + + assert_eq!(metrics.udp_banned_ips_total(), 75); + } + + #[test] + fn it_should_handle_empty_label_sets() { + let mut metrics = Metrics::default(); + let now = CurrentClock::now(); + let empty_labels = LabelSet::empty(); + + let result = metrics.increase_counter(&metric_name!(UDP_TRACKER_SERVER_REQUESTS_ABORTED_TOTAL), &empty_labels, now); + + assert!(result.is_ok()); + assert_eq!(metrics.udp_requests_aborted(), 1); + } + + #[test] + fn it_should_handle_multiple_labels_on_same_metric() { + let mut metrics = Metrics::default(); + let now = CurrentClock::now(); + + let labels1 = LabelSet::from([("server_binding_address_ip_family", "inet")]); + let labels2 = LabelSet::from([("server_binding_address_ip_family", "inet6")]); + + // Add to same metric with different labels + for _ in 0..3 { + metrics + .increase_counter(&metric_name!(UDP_TRACKER_SERVER_REQUESTS_RECEIVED_TOTAL), &labels1, now) + .unwrap(); + } + + for _ in 0..5 { + metrics + .increase_counter(&metric_name!(UDP_TRACKER_SERVER_REQUESTS_RECEIVED_TOTAL), &labels2, now) + .unwrap(); + } + + // Should return labeled sums correctly + assert_eq!(metrics.udp4_requests(), 3); + assert_eq!(metrics.udp6_requests(), 5); + } + } + + mod error_handling { + use super::*; + + #[test] + fn it_should_return_ok_result_for_valid_counter_operations() { + let mut metrics = Metrics::default(); + let now = CurrentClock::now(); + let labels = LabelSet::empty(); + + let result = metrics.increase_counter(&metric_name!(UDP_TRACKER_SERVER_REQUESTS_ABORTED_TOTAL), &labels, now); + + assert!(result.is_ok()); + } + + #[test] + fn it_should_return_ok_result_for_valid_gauge_operations() { + let mut metrics = Metrics::default(); + let now = CurrentClock::now(); + let labels = LabelSet::empty(); + + let result = metrics.set_gauge(&metric_name!(UDP_TRACKER_SERVER_IPS_BANNED_TOTAL), &labels, 42.0, now); + + assert!(result.is_ok()); + } + + #[test] + fn it_should_handle_unknown_metric_names_gracefully() { + let mut metrics = Metrics::default(); + let now = CurrentClock::now(); + let labels = LabelSet::empty(); + + // This should still work as metrics are created on demand + let result = metrics.increase_counter(&metric_name!("unknown_metric"), &labels, now); + + assert!(result.is_ok()); + } + } +} From 520fd8b6deb9d7ec5cc943ca622267565af304dd Mon Sep 17 00:00:00 2001 From: Jose Celano Date: Wed, 18 Jun 2025 20:04:50 +0100 Subject: [PATCH 738/802] chore: [#1589] add debug logs for avg processing time metric update --- .../src/statistics/event/handler/response_sent.rs | 12 ++++++++++++ 1 file changed, 12 insertions(+) diff --git a/packages/udp-tracker-server/src/statistics/event/handler/response_sent.rs b/packages/udp-tracker-server/src/statistics/event/handler/response_sent.rs index 7e05e483b..e76d67a4e 100644 --- a/packages/udp-tracker-server/src/statistics/event/handler/response_sent.rs +++ b/packages/udp-tracker-server/src/statistics/event/handler/response_sent.rs @@ -19,6 +19,9 @@ pub async fn handle_event( let new_avg = stats_repository .recalculate_udp_avg_connect_processing_time_ns(req_processing_time) .await; + + tracing::debug!("Updating average processing time metric for connect requests: {} ns", new_avg); + let mut label_set = LabelSet::from(context.clone()); label_set.upsert(label_name!("request_kind"), LabelValue::new(&req_kind.to_string())); match stats_repository @@ -39,6 +42,12 @@ pub async fn handle_event( let new_avg = stats_repository .recalculate_udp_avg_announce_processing_time_ns(req_processing_time) .await; + + tracing::debug!( + "Updating average processing time metric for announce requests: {} ns", + new_avg + ); + let mut label_set = LabelSet::from(context.clone()); label_set.upsert(label_name!("request_kind"), LabelValue::new(&req_kind.to_string())); match stats_repository @@ -59,6 +68,9 @@ pub async fn handle_event( let new_avg = stats_repository .recalculate_udp_avg_scrape_processing_time_ns(req_processing_time) .await; + + tracing::debug!("Updating average processing time metric for scrape requests: {} ns", new_avg); + let mut label_set = LabelSet::from(context.clone()); label_set.upsert(label_name!("request_kind"), LabelValue::new(&req_kind.to_string())); match stats_repository From e6c05b6886e241dbf6f2472d41b2c0cc47739756 Mon Sep 17 00:00:00 2001 From: Jose Celano Date: Thu, 19 Jun 2025 10:30:54 +0100 Subject: [PATCH 739/802] refactor(udp-tracker-server): [#1589] move average processing time calculation from Repository to Metrics --- .../src/statistics/metrics.rs | 67 +++++++++++++++++++ .../src/statistics/repository.rs | 58 +--------------- 2 files changed, 70 insertions(+), 55 deletions(-) diff --git a/packages/udp-tracker-server/src/statistics/metrics.rs b/packages/udp-tracker-server/src/statistics/metrics.rs index 3c162ff02..e0ca0aaaf 100644 --- a/packages/udp-tracker-server/src/statistics/metrics.rs +++ b/packages/udp-tracker-server/src/statistics/metrics.rs @@ -1,3 +1,5 @@ +use std::time::Duration; + use serde::Serialize; use torrust_tracker_metrics::label::LabelSet; use torrust_tracker_metrics::metric::MetricName; @@ -48,6 +50,71 @@ impl Metrics { } impl Metrics { + #[allow(clippy::cast_precision_loss)] + pub fn recalculate_udp_avg_connect_processing_time_ns(&self, req_processing_time: Duration) -> f64 { + let req_processing_time = req_processing_time.as_nanos() as f64; + let udp_connections_handled = (self.udp4_connections_handled() + self.udp6_connections_handled()) as f64; + + let previous_avg = self.udp_avg_connect_processing_time_ns(); + + // Moving average: https://en.wikipedia.org/wiki/Moving_average + let new_avg = previous_avg as f64 + (req_processing_time - previous_avg as f64) / udp_connections_handled; + + tracing::debug!( + "Recalculated UDP average connect processing time: {} ns (previous: {} ns, req_processing_time: {} ns, udp_connections_handled: {})", + new_avg, + previous_avg, + req_processing_time, + udp_connections_handled + ); + + new_avg + } + + #[allow(clippy::cast_precision_loss)] + pub fn recalculate_udp_avg_announce_processing_time_ns(&self, req_processing_time: Duration) -> f64 { + let req_processing_time = req_processing_time.as_nanos() as f64; + + let udp_announces_handled = (self.udp4_announces_handled() + self.udp6_announces_handled()) as f64; + + let previous_avg = self.udp_avg_announce_processing_time_ns(); + + // Moving average: https://en.wikipedia.org/wiki/Moving_average + let new_avg = previous_avg as f64 + (req_processing_time - previous_avg as f64) / udp_announces_handled; + + tracing::debug!( + "Recalculated UDP average announce processing time: {} ns (previous: {} ns, req_processing_time: {} ns, udp_announces_handled: {})", + new_avg, + previous_avg, + req_processing_time, + udp_announces_handled + ); + + new_avg + } + + #[allow(clippy::cast_precision_loss)] + pub fn recalculate_udp_avg_scrape_processing_time_ns(&self, req_processing_time: Duration) -> f64 { + let req_processing_time = req_processing_time.as_nanos() as f64; + + let udp_scrapes_handled = (self.udp4_scrapes_handled() + self.udp6_scrapes_handled()) as f64; + + let previous_avg = self.udp_avg_scrape_processing_time_ns(); + + // Moving average: https://en.wikipedia.org/wiki/Moving_average + let new_avg = previous_avg as f64 + (req_processing_time - previous_avg as f64) / udp_scrapes_handled; + + tracing::debug!( + "Recalculated UDP average scrape processing time: {} ns (previous: {} ns, req_processing_time: {} ns, udp_scrapes_handled: {})", + new_avg, + previous_avg, + req_processing_time, + udp_scrapes_handled + ); + + new_avg + } + // UDP /// Total number of UDP (UDP tracker) requests aborted. #[must_use] diff --git a/packages/udp-tracker-server/src/statistics/repository.rs b/packages/udp-tracker-server/src/statistics/repository.rs index eb0951614..2d081767e 100644 --- a/packages/udp-tracker-server/src/statistics/repository.rs +++ b/packages/udp-tracker-server/src/statistics/repository.rs @@ -73,85 +73,33 @@ impl Repository { result } - #[allow(clippy::cast_precision_loss)] - #[allow(clippy::cast_possible_truncation)] - #[allow(clippy::cast_sign_loss)] pub async fn recalculate_udp_avg_connect_processing_time_ns(&self, req_processing_time: Duration) -> f64 { let stats_lock = self.stats.write().await; - let req_processing_time = req_processing_time.as_nanos() as f64; - let udp_connections_handled = (stats_lock.udp4_connections_handled() + stats_lock.udp6_connections_handled()) as f64; - - let previous_avg = stats_lock.udp_avg_connect_processing_time_ns(); - - // Moving average: https://en.wikipedia.org/wiki/Moving_average - let new_avg = previous_avg as f64 + (req_processing_time - previous_avg as f64) / udp_connections_handled; + let new_avg = stats_lock.recalculate_udp_avg_connect_processing_time_ns(req_processing_time); drop(stats_lock); - tracing::debug!( - "Recalculated UDP average connect processing time: {} ns (previous: {} ns, req_processing_time: {} ns, udp_connections_handled: {})", - new_avg, - previous_avg, - req_processing_time, - udp_connections_handled - ); - new_avg } - #[allow(clippy::cast_precision_loss)] - #[allow(clippy::cast_possible_truncation)] - #[allow(clippy::cast_sign_loss)] pub async fn recalculate_udp_avg_announce_processing_time_ns(&self, req_processing_time: Duration) -> f64 { let stats_lock = self.stats.write().await; - let req_processing_time = req_processing_time.as_nanos() as f64; - - let udp_announces_handled = (stats_lock.udp4_announces_handled() + stats_lock.udp6_announces_handled()) as f64; - - let previous_avg = stats_lock.udp_avg_announce_processing_time_ns(); - - // Moving average: https://en.wikipedia.org/wiki/Moving_average - let new_avg = previous_avg as f64 + (req_processing_time - previous_avg as f64) / udp_announces_handled; + let new_avg = stats_lock.recalculate_udp_avg_announce_processing_time_ns(req_processing_time); drop(stats_lock); - tracing::debug!( - "Recalculated UDP average announce processing time: {} ns (previous: {} ns, req_processing_time: {} ns, udp_announces_handled: {})", - new_avg, - previous_avg, - req_processing_time, - udp_announces_handled - ); - new_avg } - #[allow(clippy::cast_precision_loss)] - #[allow(clippy::cast_possible_truncation)] - #[allow(clippy::cast_sign_loss)] pub async fn recalculate_udp_avg_scrape_processing_time_ns(&self, req_processing_time: Duration) -> f64 { let stats_lock = self.stats.write().await; - let req_processing_time = req_processing_time.as_nanos() as f64; - let udp_scrapes_handled = (stats_lock.udp4_scrapes_handled() + stats_lock.udp6_scrapes_handled()) as f64; - - let previous_avg = stats_lock.udp_avg_scrape_processing_time_ns(); - - // Moving average: https://en.wikipedia.org/wiki/Moving_average - let new_avg = previous_avg as f64 + (req_processing_time - previous_avg as f64) / udp_scrapes_handled; + let new_avg = stats_lock.recalculate_udp_avg_scrape_processing_time_ns(req_processing_time); drop(stats_lock); - tracing::debug!( - "Recalculated UDP average scrape processing time: {} ns (previous: {} ns, req_processing_time: {} ns, udp_scrapes_handled: {})", - new_avg, - previous_avg, - req_processing_time, - udp_scrapes_handled - ); - new_avg } } From d50948ea1a5a311605adba930d464c3334835df1 Mon Sep 17 00:00:00 2001 From: Jose Celano Date: Thu, 19 Jun 2025 11:19:57 +0100 Subject: [PATCH 740/802] refactor: [#1598] make recalculate udp avg connect processing time metric and update atomic It also fixes a division by zero bug when the metrics is updated before the counter for number of conenctions has been increased. It only avoid the division by zero. I will propoerly fixed with independent request counter for the moving average calculation. --- .../statistics/event/handler/response_sent.rs | 23 ++------ .../src/statistics/metrics.rs | 31 +++++++++-- .../src/statistics/repository.rs | 53 ++++++++++++++----- 3 files changed, 73 insertions(+), 34 deletions(-) diff --git a/packages/udp-tracker-server/src/statistics/event/handler/response_sent.rs b/packages/udp-tracker-server/src/statistics/event/handler/response_sent.rs index e76d67a4e..7b271f872 100644 --- a/packages/udp-tracker-server/src/statistics/event/handler/response_sent.rs +++ b/packages/udp-tracker-server/src/statistics/event/handler/response_sent.rs @@ -16,26 +16,13 @@ pub async fn handle_event( let (result_label_value, kind_label_value) = match kind { UdpResponseKind::Ok { req_kind } => match req_kind { UdpRequestKind::Connect => { - let new_avg = stats_repository - .recalculate_udp_avg_connect_processing_time_ns(req_processing_time) - .await; - - tracing::debug!("Updating average processing time metric for connect requests: {} ns", new_avg); - let mut label_set = LabelSet::from(context.clone()); label_set.upsert(label_name!("request_kind"), LabelValue::new(&req_kind.to_string())); - match stats_repository - .set_gauge( - &metric_name!(UDP_TRACKER_SERVER_PERFORMANCE_AVG_PROCESSING_TIME_NS), - &label_set, - new_avg, - now, - ) - .await - { - Ok(()) => {} - Err(err) => tracing::error!("Failed to set gauge: {}", err), - } + + let _new_avg = stats_repository + .recalculate_udp_avg_connect_processing_time_ns(req_processing_time, &label_set, now) + .await; + (LabelValue::new("ok"), UdpRequestKind::Connect.into()) } UdpRequestKind::Announce { announce_request } => { diff --git a/packages/udp-tracker-server/src/statistics/metrics.rs b/packages/udp-tracker-server/src/statistics/metrics.rs index e0ca0aaaf..61902dbba 100644 --- a/packages/udp-tracker-server/src/statistics/metrics.rs +++ b/packages/udp-tracker-server/src/statistics/metrics.rs @@ -51,14 +51,23 @@ impl Metrics { impl Metrics { #[allow(clippy::cast_precision_loss)] - pub fn recalculate_udp_avg_connect_processing_time_ns(&self, req_processing_time: Duration) -> f64 { + pub fn recalculate_udp_avg_connect_processing_time_ns( + &mut self, + req_processing_time: Duration, + label_set: &LabelSet, + now: DurationSinceUnixEpoch, + ) -> f64 { let req_processing_time = req_processing_time.as_nanos() as f64; let udp_connections_handled = (self.udp4_connections_handled() + self.udp6_connections_handled()) as f64; let previous_avg = self.udp_avg_connect_processing_time_ns(); - // Moving average: https://en.wikipedia.org/wiki/Moving_average - let new_avg = previous_avg as f64 + (req_processing_time - previous_avg as f64) / udp_connections_handled; + let new_avg = if udp_connections_handled == 0.0 { + req_processing_time + } else { + // Moving average: https://en.wikipedia.org/wiki/Moving_average + previous_avg as f64 + (req_processing_time - previous_avg as f64) / udp_connections_handled + }; tracing::debug!( "Recalculated UDP average connect processing time: {} ns (previous: {} ns, req_processing_time: {} ns, udp_connections_handled: {})", @@ -68,9 +77,25 @@ impl Metrics { udp_connections_handled ); + self.update_udp_avg_connect_processing_time_ns(new_avg, label_set, now); + new_avg } + fn update_udp_avg_connect_processing_time_ns(&mut self, new_avg: f64, label_set: &LabelSet, now: DurationSinceUnixEpoch) { + tracing::debug!("Updating average processing time metric for connect requests: {} ns", new_avg); + + match self.set_gauge( + &metric_name!(UDP_TRACKER_SERVER_PERFORMANCE_AVG_PROCESSING_TIME_NS), + label_set, + new_avg, + now, + ) { + Ok(()) => {} + Err(err) => tracing::error!("Failed to set gauge: {}", err), + } + } + #[allow(clippy::cast_precision_loss)] pub fn recalculate_udp_avg_announce_processing_time_ns(&self, req_processing_time: Duration) -> f64 { let req_processing_time = req_processing_time.as_nanos() as f64; diff --git a/packages/udp-tracker-server/src/statistics/repository.rs b/packages/udp-tracker-server/src/statistics/repository.rs index 2d081767e..cb6979a83 100644 --- a/packages/udp-tracker-server/src/statistics/repository.rs +++ b/packages/udp-tracker-server/src/statistics/repository.rs @@ -73,10 +73,15 @@ impl Repository { result } - pub async fn recalculate_udp_avg_connect_processing_time_ns(&self, req_processing_time: Duration) -> f64 { - let stats_lock = self.stats.write().await; + pub async fn recalculate_udp_avg_connect_processing_time_ns( + &self, + req_processing_time: Duration, + label_set: &LabelSet, + now: DurationSinceUnixEpoch, + ) -> f64 { + let mut stats_lock = self.stats.write().await; - let new_avg = stats_lock.recalculate_udp_avg_connect_processing_time_ns(req_processing_time); + let new_avg = stats_lock.recalculate_udp_avg_connect_processing_time_ns(req_processing_time, label_set, now); drop(stats_lock); @@ -338,7 +343,9 @@ mod tests { // Calculate new average with processing time of 2000ns let processing_time = Duration::from_nanos(2000); - let new_avg = repo.recalculate_udp_avg_connect_processing_time_ns(processing_time).await; + let new_avg = repo + .recalculate_udp_avg_connect_processing_time_ns(processing_time, &connect_labels, now) + .await; // Moving average: previous_avg + (new_value - previous_avg) / total_connections // 1000 + (2000 - 1000) / 3 = 1000 + 333.33 = 1333.33 @@ -436,17 +443,25 @@ mod tests { #[tokio::test] async fn recalculate_average_methods_should_handle_zero_connections_gracefully() { let repo = Repository::new(); + let now = CurrentClock::now(); // Test with zero connections (should not panic, should handle division by zero) let processing_time = Duration::from_nanos(1000); - let connect_avg = repo.recalculate_udp_avg_connect_processing_time_ns(processing_time).await; + let connect_labels = LabelSet::from([("request_kind", "connect")]); + let connect_avg = repo + .recalculate_udp_avg_connect_processing_time_ns(processing_time, &connect_labels, now) + .await; + + let _announce_labels = LabelSet::from([("request_kind", "announce")]); let announce_avg = repo.recalculate_udp_avg_announce_processing_time_ns(processing_time).await; + + let _scrape_labels = LabelSet::from([("request_kind", "scrape")]); let scrape_avg = repo.recalculate_udp_avg_scrape_processing_time_ns(processing_time).await; // With 0 total connections, the formula becomes 0 + (1000 - 0) / 0 // This should handle the division by zero case gracefully - assert!(connect_avg.is_infinite() || connect_avg.is_nan()); + assert!((connect_avg - 1000.0).abs() < f64::EPSILON); assert!(announce_avg.is_infinite() || announce_avg.is_nan()); assert!(scrape_avg.is_infinite() || scrape_avg.is_nan()); } @@ -500,7 +515,10 @@ mod tests { // Test with very large processing time let large_duration = Duration::from_secs(1); // 1 second = 1,000,000,000 ns - let new_avg = repo.recalculate_udp_avg_connect_processing_time_ns(large_duration).await; + let connect_labels = LabelSet::from([("request_kind", "connect")]); + let new_avg = repo + .recalculate_udp_avg_connect_processing_time_ns(large_duration, &connect_labels, now) + .await; // Should handle large numbers without overflow assert!(new_avg > 0.0); @@ -575,6 +593,7 @@ mod tests { #[tokio::test] async fn it_should_handle_moving_average_calculation_before_any_connections_are_recorded() { let repo = Repository::new(); + let connect_labels = LabelSet::from([("request_kind", "connect")]); let now = CurrentClock::now(); // This test checks the behavior of `recalculate_udp_avg_connect_processing_time_ns`` @@ -591,12 +610,13 @@ mod tests { // First calculation: no connections recorded yet, should result in infinity let processing_time_1 = Duration::from_nanos(2000); - let avg_1 = repo.recalculate_udp_avg_connect_processing_time_ns(processing_time_1).await; + let avg_1 = repo + .recalculate_udp_avg_connect_processing_time_ns(processing_time_1, &connect_labels, now) + .await; - // Division by zero: 1000 + (2000 - 1000) / 0 = infinity assert!( - avg_1.is_infinite(), - "First calculation should be infinite due to division by zero" + (avg_1 - 2000.0).abs() < f64::EPSILON, + "First calculation should be 2000, but got {avg_1}" ); // Now add one connection and try again @@ -605,10 +625,17 @@ mod tests { .await .unwrap(); - // Second calculation: 1 connection, but previous average is infinity + // Second calculation: 1 connection let processing_time_2 = Duration::from_nanos(3000); - let avg_2 = repo.recalculate_udp_avg_connect_processing_time_ns(processing_time_2).await; + let connect_labels = LabelSet::from([("request_kind", "connect")]); + let avg_2 = repo + .recalculate_udp_avg_connect_processing_time_ns(processing_time_2, &connect_labels, now) + .await; + // There is one connection, so the average should be: + // 2000 + (3000 - 2000) / 1 = 2000 + 1000 = 3000 + // This is because one connection is not counted yet in the average calculation, + // so the average is simply the processing time of the second connection. assert!( (avg_2 - 3000.0).abs() < f64::EPSILON, "Second calculation should be 3000ns, but got {avg_2}" From 59fbb39974fe731d5b6bc8dc50cee29816058780 Mon Sep 17 00:00:00 2001 From: Jose Celano Date: Thu, 19 Jun 2025 11:32:50 +0100 Subject: [PATCH 741/802] refactor: [#1598] make recalculate udp avg announce processing time metric and update atomic It also fixes a division by zero bug when the metrics is updated before the counter for number of conenctions has been increased. It only avoid the division by zero. I will propoerly fixed with independent request counter for the moving average calculation. --- .../statistics/event/handler/response_sent.rs | 26 ++-------- .../src/statistics/metrics.rs | 51 ++++++++++++------- .../src/statistics/repository.rs | 23 ++++++--- 3 files changed, 54 insertions(+), 46 deletions(-) diff --git a/packages/udp-tracker-server/src/statistics/event/handler/response_sent.rs b/packages/udp-tracker-server/src/statistics/event/handler/response_sent.rs index 7b271f872..3258a7023 100644 --- a/packages/udp-tracker-server/src/statistics/event/handler/response_sent.rs +++ b/packages/udp-tracker-server/src/statistics/event/handler/response_sent.rs @@ -26,29 +26,13 @@ pub async fn handle_event( (LabelValue::new("ok"), UdpRequestKind::Connect.into()) } UdpRequestKind::Announce { announce_request } => { - let new_avg = stats_repository - .recalculate_udp_avg_announce_processing_time_ns(req_processing_time) - .await; - - tracing::debug!( - "Updating average processing time metric for announce requests: {} ns", - new_avg - ); - let mut label_set = LabelSet::from(context.clone()); label_set.upsert(label_name!("request_kind"), LabelValue::new(&req_kind.to_string())); - match stats_repository - .set_gauge( - &metric_name!(UDP_TRACKER_SERVER_PERFORMANCE_AVG_PROCESSING_TIME_NS), - &label_set, - new_avg, - now, - ) - .await - { - Ok(()) => {} - Err(err) => tracing::error!("Failed to set gauge: {}", err), - } + + let _new_avg = stats_repository + .recalculate_udp_avg_announce_processing_time_ns(req_processing_time, &label_set, now) + .await; + (LabelValue::new("ok"), UdpRequestKind::Announce { announce_request }.into()) } UdpRequestKind::Scrape => { diff --git a/packages/udp-tracker-server/src/statistics/metrics.rs b/packages/udp-tracker-server/src/statistics/metrics.rs index 61902dbba..cef1c2824 100644 --- a/packages/udp-tracker-server/src/statistics/metrics.rs +++ b/packages/udp-tracker-server/src/statistics/metrics.rs @@ -77,35 +77,30 @@ impl Metrics { udp_connections_handled ); - self.update_udp_avg_connect_processing_time_ns(new_avg, label_set, now); + self.update_udp_avg_processing_time_ns(new_avg, label_set, now); new_avg } - fn update_udp_avg_connect_processing_time_ns(&mut self, new_avg: f64, label_set: &LabelSet, now: DurationSinceUnixEpoch) { - tracing::debug!("Updating average processing time metric for connect requests: {} ns", new_avg); - - match self.set_gauge( - &metric_name!(UDP_TRACKER_SERVER_PERFORMANCE_AVG_PROCESSING_TIME_NS), - label_set, - new_avg, - now, - ) { - Ok(()) => {} - Err(err) => tracing::error!("Failed to set gauge: {}", err), - } - } - #[allow(clippy::cast_precision_loss)] - pub fn recalculate_udp_avg_announce_processing_time_ns(&self, req_processing_time: Duration) -> f64 { + pub fn recalculate_udp_avg_announce_processing_time_ns( + &mut self, + req_processing_time: Duration, + label_set: &LabelSet, + now: DurationSinceUnixEpoch, + ) -> f64 { let req_processing_time = req_processing_time.as_nanos() as f64; let udp_announces_handled = (self.udp4_announces_handled() + self.udp6_announces_handled()) as f64; let previous_avg = self.udp_avg_announce_processing_time_ns(); - // Moving average: https://en.wikipedia.org/wiki/Moving_average - let new_avg = previous_avg as f64 + (req_processing_time - previous_avg as f64) / udp_announces_handled; + let new_avg = if udp_announces_handled == 0.0 { + req_processing_time + } else { + // Moving average: https://en.wikipedia.org/wiki/Moving_average + previous_avg as f64 + (req_processing_time - previous_avg as f64) / udp_announces_handled + }; tracing::debug!( "Recalculated UDP average announce processing time: {} ns (previous: {} ns, req_processing_time: {} ns, udp_announces_handled: {})", @@ -115,9 +110,29 @@ impl Metrics { udp_announces_handled ); + self.update_udp_avg_processing_time_ns(new_avg, label_set, now); + new_avg } + fn update_udp_avg_processing_time_ns(&mut self, new_avg: f64, label_set: &LabelSet, now: DurationSinceUnixEpoch) { + tracing::debug!( + "Updating average processing time metric to {} ns for label set {}", + new_avg, + label_set, + ); + + match self.set_gauge( + &metric_name!(UDP_TRACKER_SERVER_PERFORMANCE_AVG_PROCESSING_TIME_NS), + label_set, + new_avg, + now, + ) { + Ok(()) => {} + Err(err) => tracing::error!("Failed to set gauge: {}", err), + } + } + #[allow(clippy::cast_precision_loss)] pub fn recalculate_udp_avg_scrape_processing_time_ns(&self, req_processing_time: Duration) -> f64 { let req_processing_time = req_processing_time.as_nanos() as f64; diff --git a/packages/udp-tracker-server/src/statistics/repository.rs b/packages/udp-tracker-server/src/statistics/repository.rs index cb6979a83..024ff4535 100644 --- a/packages/udp-tracker-server/src/statistics/repository.rs +++ b/packages/udp-tracker-server/src/statistics/repository.rs @@ -88,10 +88,15 @@ impl Repository { new_avg } - pub async fn recalculate_udp_avg_announce_processing_time_ns(&self, req_processing_time: Duration) -> f64 { - let stats_lock = self.stats.write().await; + pub async fn recalculate_udp_avg_announce_processing_time_ns( + &self, + req_processing_time: Duration, + label_set: &LabelSet, + now: DurationSinceUnixEpoch, + ) -> f64 { + let mut stats_lock = self.stats.write().await; - let new_avg = stats_lock.recalculate_udp_avg_announce_processing_time_ns(req_processing_time); + let new_avg = stats_lock.recalculate_udp_avg_announce_processing_time_ns(req_processing_time, label_set, now); drop(stats_lock); @@ -390,7 +395,9 @@ mod tests { // Calculate new average with processing time of 1500ns let processing_time = Duration::from_nanos(1500); - let new_avg = repo.recalculate_udp_avg_announce_processing_time_ns(processing_time).await; + let new_avg = repo + .recalculate_udp_avg_announce_processing_time_ns(processing_time, &announce_labels, now) + .await; // Moving average: previous_avg + (new_value - previous_avg) / total_announces // 500 + (1500 - 500) / 5 = 500 + 200 = 700 @@ -453,8 +460,10 @@ mod tests { .recalculate_udp_avg_connect_processing_time_ns(processing_time, &connect_labels, now) .await; - let _announce_labels = LabelSet::from([("request_kind", "announce")]); - let announce_avg = repo.recalculate_udp_avg_announce_processing_time_ns(processing_time).await; + let announce_labels = LabelSet::from([("request_kind", "announce")]); + let announce_avg = repo + .recalculate_udp_avg_announce_processing_time_ns(processing_time, &announce_labels, now) + .await; let _scrape_labels = LabelSet::from([("request_kind", "scrape")]); let scrape_avg = repo.recalculate_udp_avg_scrape_processing_time_ns(processing_time).await; @@ -462,7 +471,7 @@ mod tests { // With 0 total connections, the formula becomes 0 + (1000 - 0) / 0 // This should handle the division by zero case gracefully assert!((connect_avg - 1000.0).abs() < f64::EPSILON); - assert!(announce_avg.is_infinite() || announce_avg.is_nan()); + assert!((announce_avg - 1000.0).abs() < f64::EPSILON); assert!(scrape_avg.is_infinite() || scrape_avg.is_nan()); } From 47c294987725dba83363460c68222f914efcb698 Mon Sep 17 00:00:00 2001 From: Jose Celano Date: Thu, 19 Jun 2025 12:05:58 +0100 Subject: [PATCH 742/802] refactor: [#1598] make recalculate udp avg scrape processing time metric and update atomic It also fixes a division by zero bug when the metrics is updated before the counter for number of conenctions has been increased. It only avoid the division by zero. I will propoerly fixed with independent request counter for the moving average calculation. --- .../statistics/event/handler/response_sent.rs | 27 +++------- .../src/statistics/metrics.rs | 53 +++++++++++-------- .../src/statistics/repository.rs | 23 +++++--- 3 files changed, 55 insertions(+), 48 deletions(-) diff --git a/packages/udp-tracker-server/src/statistics/event/handler/response_sent.rs b/packages/udp-tracker-server/src/statistics/event/handler/response_sent.rs index 3258a7023..7594d16f2 100644 --- a/packages/udp-tracker-server/src/statistics/event/handler/response_sent.rs +++ b/packages/udp-tracker-server/src/statistics/event/handler/response_sent.rs @@ -4,7 +4,7 @@ use torrust_tracker_primitives::DurationSinceUnixEpoch; use crate::event::{ConnectionContext, UdpRequestKind, UdpResponseKind}; use crate::statistics::repository::Repository; -use crate::statistics::{UDP_TRACKER_SERVER_PERFORMANCE_AVG_PROCESSING_TIME_NS, UDP_TRACKER_SERVER_RESPONSES_SENT_TOTAL}; +use crate::statistics::UDP_TRACKER_SERVER_RESPONSES_SENT_TOTAL; pub async fn handle_event( context: ConnectionContext, @@ -36,33 +36,20 @@ pub async fn handle_event( (LabelValue::new("ok"), UdpRequestKind::Announce { announce_request }.into()) } UdpRequestKind::Scrape => { - let new_avg = stats_repository - .recalculate_udp_avg_scrape_processing_time_ns(req_processing_time) - .await; - - tracing::debug!("Updating average processing time metric for scrape requests: {} ns", new_avg); - let mut label_set = LabelSet::from(context.clone()); label_set.upsert(label_name!("request_kind"), LabelValue::new(&req_kind.to_string())); - match stats_repository - .set_gauge( - &metric_name!(UDP_TRACKER_SERVER_PERFORMANCE_AVG_PROCESSING_TIME_NS), - &label_set, - new_avg, - now, - ) - .await - { - Ok(()) => {} - Err(err) => tracing::error!("Failed to set gauge: {}", err), - } + + let _new_avg = stats_repository + .recalculate_udp_avg_scrape_processing_time_ns(req_processing_time, &label_set, now) + .await; + (LabelValue::new("ok"), LabelValue::new(&UdpRequestKind::Scrape.to_string())) } }, UdpResponseKind::Error { opt_req_kind: _ } => (LabelValue::new("error"), LabelValue::ignore()), }; - // Extendable metrics + // Increase the number of responses sent let mut label_set = LabelSet::from(context); if result_label_value == LabelValue::new("ok") { label_set.upsert(label_name!("request_kind"), kind_label_value); diff --git a/packages/udp-tracker-server/src/statistics/metrics.rs b/packages/udp-tracker-server/src/statistics/metrics.rs index cef1c2824..eedd1a02f 100644 --- a/packages/udp-tracker-server/src/statistics/metrics.rs +++ b/packages/udp-tracker-server/src/statistics/metrics.rs @@ -115,34 +115,25 @@ impl Metrics { new_avg } - fn update_udp_avg_processing_time_ns(&mut self, new_avg: f64, label_set: &LabelSet, now: DurationSinceUnixEpoch) { - tracing::debug!( - "Updating average processing time metric to {} ns for label set {}", - new_avg, - label_set, - ); - - match self.set_gauge( - &metric_name!(UDP_TRACKER_SERVER_PERFORMANCE_AVG_PROCESSING_TIME_NS), - label_set, - new_avg, - now, - ) { - Ok(()) => {} - Err(err) => tracing::error!("Failed to set gauge: {}", err), - } - } - #[allow(clippy::cast_precision_loss)] - pub fn recalculate_udp_avg_scrape_processing_time_ns(&self, req_processing_time: Duration) -> f64 { + pub fn recalculate_udp_avg_scrape_processing_time_ns( + &mut self, + req_processing_time: Duration, + label_set: &LabelSet, + now: DurationSinceUnixEpoch, + ) -> f64 { let req_processing_time = req_processing_time.as_nanos() as f64; let udp_scrapes_handled = (self.udp4_scrapes_handled() + self.udp6_scrapes_handled()) as f64; let previous_avg = self.udp_avg_scrape_processing_time_ns(); - // Moving average: https://en.wikipedia.org/wiki/Moving_average - let new_avg = previous_avg as f64 + (req_processing_time - previous_avg as f64) / udp_scrapes_handled; + let new_avg = if udp_scrapes_handled == 0.0 { + req_processing_time + } else { + // Moving average: https://en.wikipedia.org/wiki/Moving_average + previous_avg as f64 + (req_processing_time - previous_avg as f64) / udp_scrapes_handled + }; tracing::debug!( "Recalculated UDP average scrape processing time: {} ns (previous: {} ns, req_processing_time: {} ns, udp_scrapes_handled: {})", @@ -152,9 +143,29 @@ impl Metrics { udp_scrapes_handled ); + self.update_udp_avg_processing_time_ns(new_avg, label_set, now); + new_avg } + fn update_udp_avg_processing_time_ns(&mut self, new_avg: f64, label_set: &LabelSet, now: DurationSinceUnixEpoch) { + tracing::debug!( + "Updating average processing time metric to {} ns for label set {}", + new_avg, + label_set, + ); + + match self.set_gauge( + &metric_name!(UDP_TRACKER_SERVER_PERFORMANCE_AVG_PROCESSING_TIME_NS), + label_set, + new_avg, + now, + ) { + Ok(()) => {} + Err(err) => tracing::error!("Failed to set gauge: {}", err), + } + } + // UDP /// Total number of UDP (UDP tracker) requests aborted. #[must_use] diff --git a/packages/udp-tracker-server/src/statistics/repository.rs b/packages/udp-tracker-server/src/statistics/repository.rs index 024ff4535..c9b3d0548 100644 --- a/packages/udp-tracker-server/src/statistics/repository.rs +++ b/packages/udp-tracker-server/src/statistics/repository.rs @@ -103,10 +103,15 @@ impl Repository { new_avg } - pub async fn recalculate_udp_avg_scrape_processing_time_ns(&self, req_processing_time: Duration) -> f64 { - let stats_lock = self.stats.write().await; + pub async fn recalculate_udp_avg_scrape_processing_time_ns( + &self, + req_processing_time: Duration, + label_set: &LabelSet, + now: DurationSinceUnixEpoch, + ) -> f64 { + let mut stats_lock = self.stats.write().await; - let new_avg = stats_lock.recalculate_udp_avg_scrape_processing_time_ns(req_processing_time); + let new_avg = stats_lock.recalculate_udp_avg_scrape_processing_time_ns(req_processing_time, label_set, now); drop(stats_lock); @@ -436,7 +441,9 @@ mod tests { // Calculate new average with processing time of 1200ns let processing_time = Duration::from_nanos(1200); - let new_avg = repo.recalculate_udp_avg_scrape_processing_time_ns(processing_time).await; + let new_avg = repo + .recalculate_udp_avg_scrape_processing_time_ns(processing_time, &scrape_labels, now) + .await; // Moving average: previous_avg + (new_value - previous_avg) / total_scrapes // 800 + (1200 - 800) / 4 = 800 + 100 = 900 @@ -465,14 +472,16 @@ mod tests { .recalculate_udp_avg_announce_processing_time_ns(processing_time, &announce_labels, now) .await; - let _scrape_labels = LabelSet::from([("request_kind", "scrape")]); - let scrape_avg = repo.recalculate_udp_avg_scrape_processing_time_ns(processing_time).await; + let scrape_labels = LabelSet::from([("request_kind", "scrape")]); + let scrape_avg = repo + .recalculate_udp_avg_scrape_processing_time_ns(processing_time, &scrape_labels, now) + .await; // With 0 total connections, the formula becomes 0 + (1000 - 0) / 0 // This should handle the division by zero case gracefully assert!((connect_avg - 1000.0).abs() < f64::EPSILON); assert!((announce_avg - 1000.0).abs() < f64::EPSILON); - assert!(scrape_avg.is_infinite() || scrape_avg.is_nan()); + assert!((scrape_avg - 1000.0).abs() < f64::EPSILON); } #[tokio::test] From 1c13b12c7cf6c4f109cebea8e8c85ccebb1f99c6 Mon Sep 17 00:00:00 2001 From: Jose Celano Date: Thu, 19 Jun 2025 12:27:16 +0100 Subject: [PATCH 743/802] fix: [#1589] partially. Moving average calculated for each time series We can't count the total number of UDP requests while calculating the moving average but updating it only for a concrete label set (time series). Averages are calculate for each label set. They could be aggregated by caclulating the average for all time series. --- .../src/statistics/metrics.rs | 52 +++++++++++++------ 1 file changed, 37 insertions(+), 15 deletions(-) diff --git a/packages/udp-tracker-server/src/statistics/metrics.rs b/packages/udp-tracker-server/src/statistics/metrics.rs index eedd1a02f..8e32c1f4c 100644 --- a/packages/udp-tracker-server/src/statistics/metrics.rs +++ b/packages/udp-tracker-server/src/statistics/metrics.rs @@ -58,15 +58,16 @@ impl Metrics { now: DurationSinceUnixEpoch, ) -> f64 { let req_processing_time = req_processing_time.as_nanos() as f64; - let udp_connections_handled = (self.udp4_connections_handled() + self.udp6_connections_handled()) as f64; - let previous_avg = self.udp_avg_connect_processing_time_ns(); + let request_accepted_total = self.udp_request_accepted(label_set) as f64; - let new_avg = if udp_connections_handled == 0.0 { + let previous_avg = self.udp_avg_processing_time_ns(label_set); + + let new_avg = if request_accepted_total == 0.0 { req_processing_time } else { // Moving average: https://en.wikipedia.org/wiki/Moving_average - previous_avg as f64 + (req_processing_time - previous_avg as f64) / udp_connections_handled + previous_avg as f64 + (req_processing_time - previous_avg as f64) / request_accepted_total }; tracing::debug!( @@ -74,7 +75,7 @@ impl Metrics { new_avg, previous_avg, req_processing_time, - udp_connections_handled + request_accepted_total ); self.update_udp_avg_processing_time_ns(new_avg, label_set, now); @@ -91,15 +92,15 @@ impl Metrics { ) -> f64 { let req_processing_time = req_processing_time.as_nanos() as f64; - let udp_announces_handled = (self.udp4_announces_handled() + self.udp6_announces_handled()) as f64; + let request_accepted_total = self.udp_request_accepted(label_set) as f64; - let previous_avg = self.udp_avg_announce_processing_time_ns(); + let previous_avg = self.udp_avg_processing_time_ns(label_set); - let new_avg = if udp_announces_handled == 0.0 { + let new_avg = if request_accepted_total == 0.0 { req_processing_time } else { // Moving average: https://en.wikipedia.org/wiki/Moving_average - previous_avg as f64 + (req_processing_time - previous_avg as f64) / udp_announces_handled + previous_avg as f64 + (req_processing_time - previous_avg as f64) / request_accepted_total }; tracing::debug!( @@ -107,7 +108,7 @@ impl Metrics { new_avg, previous_avg, req_processing_time, - udp_announces_handled + request_accepted_total ); self.update_udp_avg_processing_time_ns(new_avg, label_set, now); @@ -124,15 +125,15 @@ impl Metrics { ) -> f64 { let req_processing_time = req_processing_time.as_nanos() as f64; - let udp_scrapes_handled = (self.udp4_scrapes_handled() + self.udp6_scrapes_handled()) as f64; + let request_accepted_total = self.udp_request_accepted(label_set) as f64; - let previous_avg = self.udp_avg_scrape_processing_time_ns(); + let previous_avg = self.udp_avg_processing_time_ns(label_set); - let new_avg = if udp_scrapes_handled == 0.0 { + let new_avg = if request_accepted_total == 0.0 { req_processing_time } else { // Moving average: https://en.wikipedia.org/wiki/Moving_average - previous_avg as f64 + (req_processing_time - previous_avg as f64) / udp_scrapes_handled + previous_avg as f64 + (req_processing_time - previous_avg as f64) / request_accepted_total }; tracing::debug!( @@ -140,7 +141,7 @@ impl Metrics { new_avg, previous_avg, req_processing_time, - udp_scrapes_handled + request_accepted_total ); self.update_udp_avg_processing_time_ns(new_avg, label_set, now); @@ -148,6 +149,27 @@ impl Metrics { new_avg } + #[must_use] + #[allow(clippy::cast_sign_loss)] + #[allow(clippy::cast_possible_truncation)] + pub fn udp_avg_processing_time_ns(&self, label_set: &LabelSet) -> u64 { + self.metric_collection + .sum( + &metric_name!(UDP_TRACKER_SERVER_PERFORMANCE_AVG_PROCESSING_TIME_NS), + label_set, + ) + .unwrap_or_default() as u64 + } + + #[must_use] + #[allow(clippy::cast_sign_loss)] + #[allow(clippy::cast_possible_truncation)] + pub fn udp_request_accepted(&self, label_set: &LabelSet) -> u64 { + self.metric_collection + .sum(&metric_name!(UDP_TRACKER_SERVER_REQUESTS_ACCEPTED_TOTAL), label_set) + .unwrap_or_default() as u64 + } + fn update_udp_avg_processing_time_ns(&mut self, new_avg: f64, label_set: &LabelSet, now: DurationSinceUnixEpoch) { tracing::debug!( "Updating average processing time metric to {} ns for label set {}", From 164de924999367b6fb714c2ecea38da7ad99b0fb Mon Sep 17 00:00:00 2001 From: Jose Celano Date: Thu, 19 Jun 2025 13:59:55 +0100 Subject: [PATCH 744/802] refactor: [#1589] remvoe duplicate code --- .../statistics/event/handler/response_sent.rs | 6 +- .../src/statistics/metrics.rs | 71 +------------------ .../src/statistics/repository.rs | 52 +++----------- 3 files changed, 17 insertions(+), 112 deletions(-) diff --git a/packages/udp-tracker-server/src/statistics/event/handler/response_sent.rs b/packages/udp-tracker-server/src/statistics/event/handler/response_sent.rs index 7594d16f2..34093f511 100644 --- a/packages/udp-tracker-server/src/statistics/event/handler/response_sent.rs +++ b/packages/udp-tracker-server/src/statistics/event/handler/response_sent.rs @@ -20,7 +20,7 @@ pub async fn handle_event( label_set.upsert(label_name!("request_kind"), LabelValue::new(&req_kind.to_string())); let _new_avg = stats_repository - .recalculate_udp_avg_connect_processing_time_ns(req_processing_time, &label_set, now) + .recalculate_udp_avg_processing_time_ns(req_processing_time, &label_set, now) .await; (LabelValue::new("ok"), UdpRequestKind::Connect.into()) @@ -30,7 +30,7 @@ pub async fn handle_event( label_set.upsert(label_name!("request_kind"), LabelValue::new(&req_kind.to_string())); let _new_avg = stats_repository - .recalculate_udp_avg_announce_processing_time_ns(req_processing_time, &label_set, now) + .recalculate_udp_avg_processing_time_ns(req_processing_time, &label_set, now) .await; (LabelValue::new("ok"), UdpRequestKind::Announce { announce_request }.into()) @@ -40,7 +40,7 @@ pub async fn handle_event( label_set.upsert(label_name!("request_kind"), LabelValue::new(&req_kind.to_string())); let _new_avg = stats_repository - .recalculate_udp_avg_scrape_processing_time_ns(req_processing_time, &label_set, now) + .recalculate_udp_avg_processing_time_ns(req_processing_time, &label_set, now) .await; (LabelValue::new("ok"), LabelValue::new(&UdpRequestKind::Scrape.to_string())) diff --git a/packages/udp-tracker-server/src/statistics/metrics.rs b/packages/udp-tracker-server/src/statistics/metrics.rs index 8e32c1f4c..bfed16c47 100644 --- a/packages/udp-tracker-server/src/statistics/metrics.rs +++ b/packages/udp-tracker-server/src/statistics/metrics.rs @@ -51,7 +51,7 @@ impl Metrics { impl Metrics { #[allow(clippy::cast_precision_loss)] - pub fn recalculate_udp_avg_connect_processing_time_ns( + pub fn recalculate_udp_avg_processing_time_ns( &mut self, req_processing_time: Duration, label_set: &LabelSet, @@ -71,73 +71,8 @@ impl Metrics { }; tracing::debug!( - "Recalculated UDP average connect processing time: {} ns (previous: {} ns, req_processing_time: {} ns, udp_connections_handled: {})", - new_avg, - previous_avg, - req_processing_time, - request_accepted_total - ); - - self.update_udp_avg_processing_time_ns(new_avg, label_set, now); - - new_avg - } - - #[allow(clippy::cast_precision_loss)] - pub fn recalculate_udp_avg_announce_processing_time_ns( - &mut self, - req_processing_time: Duration, - label_set: &LabelSet, - now: DurationSinceUnixEpoch, - ) -> f64 { - let req_processing_time = req_processing_time.as_nanos() as f64; - - let request_accepted_total = self.udp_request_accepted(label_set) as f64; - - let previous_avg = self.udp_avg_processing_time_ns(label_set); - - let new_avg = if request_accepted_total == 0.0 { - req_processing_time - } else { - // Moving average: https://en.wikipedia.org/wiki/Moving_average - previous_avg as f64 + (req_processing_time - previous_avg as f64) / request_accepted_total - }; - - tracing::debug!( - "Recalculated UDP average announce processing time: {} ns (previous: {} ns, req_processing_time: {} ns, udp_announces_handled: {})", - new_avg, - previous_avg, - req_processing_time, - request_accepted_total - ); - - self.update_udp_avg_processing_time_ns(new_avg, label_set, now); - - new_avg - } - - #[allow(clippy::cast_precision_loss)] - pub fn recalculate_udp_avg_scrape_processing_time_ns( - &mut self, - req_processing_time: Duration, - label_set: &LabelSet, - now: DurationSinceUnixEpoch, - ) -> f64 { - let req_processing_time = req_processing_time.as_nanos() as f64; - - let request_accepted_total = self.udp_request_accepted(label_set) as f64; - - let previous_avg = self.udp_avg_processing_time_ns(label_set); - - let new_avg = if request_accepted_total == 0.0 { - req_processing_time - } else { - // Moving average: https://en.wikipedia.org/wiki/Moving_average - previous_avg as f64 + (req_processing_time - previous_avg as f64) / request_accepted_total - }; - - tracing::debug!( - "Recalculated UDP average scrape processing time: {} ns (previous: {} ns, req_processing_time: {} ns, udp_scrapes_handled: {})", + "Recalculated UDP average processing time for labels {}: {} ns (previous: {} ns, req_processing_time: {} ns, request_accepted_total: {})", + label_set, new_avg, previous_avg, req_processing_time, diff --git a/packages/udp-tracker-server/src/statistics/repository.rs b/packages/udp-tracker-server/src/statistics/repository.rs index c9b3d0548..6695bbfbc 100644 --- a/packages/udp-tracker-server/src/statistics/repository.rs +++ b/packages/udp-tracker-server/src/statistics/repository.rs @@ -73,7 +73,7 @@ impl Repository { result } - pub async fn recalculate_udp_avg_connect_processing_time_ns( + pub async fn recalculate_udp_avg_processing_time_ns( &self, req_processing_time: Duration, label_set: &LabelSet, @@ -81,37 +81,7 @@ impl Repository { ) -> f64 { let mut stats_lock = self.stats.write().await; - let new_avg = stats_lock.recalculate_udp_avg_connect_processing_time_ns(req_processing_time, label_set, now); - - drop(stats_lock); - - new_avg - } - - pub async fn recalculate_udp_avg_announce_processing_time_ns( - &self, - req_processing_time: Duration, - label_set: &LabelSet, - now: DurationSinceUnixEpoch, - ) -> f64 { - let mut stats_lock = self.stats.write().await; - - let new_avg = stats_lock.recalculate_udp_avg_announce_processing_time_ns(req_processing_time, label_set, now); - - drop(stats_lock); - - new_avg - } - - pub async fn recalculate_udp_avg_scrape_processing_time_ns( - &self, - req_processing_time: Duration, - label_set: &LabelSet, - now: DurationSinceUnixEpoch, - ) -> f64 { - let mut stats_lock = self.stats.write().await; - - let new_avg = stats_lock.recalculate_udp_avg_scrape_processing_time_ns(req_processing_time, label_set, now); + let new_avg = stats_lock.recalculate_udp_avg_processing_time_ns(req_processing_time, label_set, now); drop(stats_lock); @@ -354,7 +324,7 @@ mod tests { // Calculate new average with processing time of 2000ns let processing_time = Duration::from_nanos(2000); let new_avg = repo - .recalculate_udp_avg_connect_processing_time_ns(processing_time, &connect_labels, now) + .recalculate_udp_avg_processing_time_ns(processing_time, &connect_labels, now) .await; // Moving average: previous_avg + (new_value - previous_avg) / total_connections @@ -401,7 +371,7 @@ mod tests { // Calculate new average with processing time of 1500ns let processing_time = Duration::from_nanos(1500); let new_avg = repo - .recalculate_udp_avg_announce_processing_time_ns(processing_time, &announce_labels, now) + .recalculate_udp_avg_processing_time_ns(processing_time, &announce_labels, now) .await; // Moving average: previous_avg + (new_value - previous_avg) / total_announces @@ -442,7 +412,7 @@ mod tests { // Calculate new average with processing time of 1200ns let processing_time = Duration::from_nanos(1200); let new_avg = repo - .recalculate_udp_avg_scrape_processing_time_ns(processing_time, &scrape_labels, now) + .recalculate_udp_avg_processing_time_ns(processing_time, &scrape_labels, now) .await; // Moving average: previous_avg + (new_value - previous_avg) / total_scrapes @@ -464,17 +434,17 @@ mod tests { let connect_labels = LabelSet::from([("request_kind", "connect")]); let connect_avg = repo - .recalculate_udp_avg_connect_processing_time_ns(processing_time, &connect_labels, now) + .recalculate_udp_avg_processing_time_ns(processing_time, &connect_labels, now) .await; let announce_labels = LabelSet::from([("request_kind", "announce")]); let announce_avg = repo - .recalculate_udp_avg_announce_processing_time_ns(processing_time, &announce_labels, now) + .recalculate_udp_avg_processing_time_ns(processing_time, &announce_labels, now) .await; let scrape_labels = LabelSet::from([("request_kind", "scrape")]); let scrape_avg = repo - .recalculate_udp_avg_scrape_processing_time_ns(processing_time, &scrape_labels, now) + .recalculate_udp_avg_processing_time_ns(processing_time, &scrape_labels, now) .await; // With 0 total connections, the formula becomes 0 + (1000 - 0) / 0 @@ -535,7 +505,7 @@ mod tests { let large_duration = Duration::from_secs(1); // 1 second = 1,000,000,000 ns let connect_labels = LabelSet::from([("request_kind", "connect")]); let new_avg = repo - .recalculate_udp_avg_connect_processing_time_ns(large_duration, &connect_labels, now) + .recalculate_udp_avg_processing_time_ns(large_duration, &connect_labels, now) .await; // Should handle large numbers without overflow @@ -629,7 +599,7 @@ mod tests { // First calculation: no connections recorded yet, should result in infinity let processing_time_1 = Duration::from_nanos(2000); let avg_1 = repo - .recalculate_udp_avg_connect_processing_time_ns(processing_time_1, &connect_labels, now) + .recalculate_udp_avg_processing_time_ns(processing_time_1, &connect_labels, now) .await; assert!( @@ -647,7 +617,7 @@ mod tests { let processing_time_2 = Duration::from_nanos(3000); let connect_labels = LabelSet::from([("request_kind", "connect")]); let avg_2 = repo - .recalculate_udp_avg_connect_processing_time_ns(processing_time_2, &connect_labels, now) + .recalculate_udp_avg_processing_time_ns(processing_time_2, &connect_labels, now) .await; // There is one connection, so the average should be: From ed5f1e69de7fc05a87250614425b562fb7db67b9 Mon Sep 17 00:00:00 2001 From: Jose Celano Date: Thu, 19 Jun 2025 22:28:30 +0100 Subject: [PATCH 745/802] fix: [#1589] add dedicated metric for UDP request processing in moving average calculation Add a new metric `UDP_TRACKER_SERVER_PERFORMANCE_PROCESSED_REQUESTS_TOTAL` to track requests processed specifically for performance metrics, eliminating race conditions in the moving average calculation. **Changes:** - Add new metric constant `UDP_TRACKER_SERVER_PERFORMANCE_PROCESSED_REQUESTS_TOTAL` - Update `recalculate_udp_avg_processing_time_ns()` to use dedicated counter instead of accepted requests total - Add `udp_processed_requests_total()` method to retrieve the new metric value - Add `increment_udp_processed_requests_total()` helper method - Update metric descriptions to include the new counter **Problem Fixed:** Previously, the moving average calculation used the accepted requests counter that could be updated independently, causing race conditions where the same request count was used for multiple calculations. The new implementation increments its own dedicated counter atomically during the calculation, ensuring consistency. **Behavior Change:** The counter now starts at 0 and gets incremented to 1 on the first calculation call, then uses proper moving average formula for subsequent calls. This eliminates division by zero issues and provides more accurate moving averages. **Tests Updated:** Updated repository tests to reflect the new atomic behavior where the processed requests counter is managed specifically for moving average calculations. Fixes race conditions in UDP request processing time metrics while maintaining backward compatibility of all public APIs. --- .../src/statistics/metrics.rs | 78 ++++++++++--- .../udp-tracker-server/src/statistics/mod.rs | 10 ++ .../src/statistics/repository.rs | 106 +++++------------- 3 files changed, 103 insertions(+), 91 deletions(-) diff --git a/packages/udp-tracker-server/src/statistics/metrics.rs b/packages/udp-tracker-server/src/statistics/metrics.rs index bfed16c47..e7653815f 100644 --- a/packages/udp-tracker-server/src/statistics/metrics.rs +++ b/packages/udp-tracker-server/src/statistics/metrics.rs @@ -9,7 +9,8 @@ use torrust_tracker_metrics::metric_name; use torrust_tracker_primitives::DurationSinceUnixEpoch; use crate::statistics::{ - UDP_TRACKER_SERVER_ERRORS_TOTAL, UDP_TRACKER_SERVER_IPS_BANNED_TOTAL, UDP_TRACKER_SERVER_PERFORMANCE_AVG_PROCESSING_TIME_NS, + UDP_TRACKER_SERVER_ERRORS_TOTAL, UDP_TRACKER_SERVER_IPS_BANNED_TOTAL, + UDP_TRACKER_SERVER_PERFORMANCE_AVG_PROCESSED_REQUESTS_TOTAL, UDP_TRACKER_SERVER_PERFORMANCE_AVG_PROCESSING_TIME_NS, UDP_TRACKER_SERVER_REQUESTS_ABORTED_TOTAL, UDP_TRACKER_SERVER_REQUESTS_ACCEPTED_TOTAL, UDP_TRACKER_SERVER_REQUESTS_BANNED_TOTAL, UDP_TRACKER_SERVER_REQUESTS_RECEIVED_TOTAL, UDP_TRACKER_SERVER_RESPONSES_SENT_TOTAL, @@ -57,26 +58,22 @@ impl Metrics { label_set: &LabelSet, now: DurationSinceUnixEpoch, ) -> f64 { - let req_processing_time = req_processing_time.as_nanos() as f64; - - let request_accepted_total = self.udp_request_accepted(label_set) as f64; + self.increment_udp_processed_requests_total(label_set, now); + let processed_requests_total = self.udp_processed_requests_total(label_set) as f64; let previous_avg = self.udp_avg_processing_time_ns(label_set); + let req_processing_time = req_processing_time.as_nanos() as f64; - let new_avg = if request_accepted_total == 0.0 { - req_processing_time - } else { - // Moving average: https://en.wikipedia.org/wiki/Moving_average - previous_avg as f64 + (req_processing_time - previous_avg as f64) / request_accepted_total - }; + // Moving average: https://en.wikipedia.org/wiki/Moving_average + let new_avg = previous_avg as f64 + (req_processing_time - previous_avg as f64) / processed_requests_total; tracing::debug!( - "Recalculated UDP average processing time for labels {}: {} ns (previous: {} ns, req_processing_time: {} ns, request_accepted_total: {})", + "Recalculated UDP average processing time for labels {}: {} ns (previous: {} ns, req_processing_time: {} ns, request_processed_total: {})", label_set, new_avg, previous_avg, req_processing_time, - request_accepted_total + processed_requests_total ); self.update_udp_avg_processing_time_ns(new_avg, label_set, now); @@ -105,6 +102,18 @@ impl Metrics { .unwrap_or_default() as u64 } + #[must_use] + #[allow(clippy::cast_sign_loss)] + #[allow(clippy::cast_possible_truncation)] + pub fn udp_processed_requests_total(&self, label_set: &LabelSet) -> u64 { + self.metric_collection + .sum( + &metric_name!(UDP_TRACKER_SERVER_PERFORMANCE_AVG_PROCESSED_REQUESTS_TOTAL), + label_set, + ) + .unwrap_or_default() as u64 + } + fn update_udp_avg_processing_time_ns(&mut self, new_avg: f64, label_set: &LabelSet, now: DurationSinceUnixEpoch) { tracing::debug!( "Updating average processing time metric to {} ns for label set {}", @@ -123,6 +132,19 @@ impl Metrics { } } + fn increment_udp_processed_requests_total(&mut self, label_set: &LabelSet, now: DurationSinceUnixEpoch) { + tracing::debug!("Incrementing processed requests total for label set {}", label_set,); + + match self.increase_counter( + &metric_name!(UDP_TRACKER_SERVER_PERFORMANCE_AVG_PROCESSED_REQUESTS_TOTAL), + label_set, + now, + ) { + Ok(()) => {} + Err(err) => tracing::error!("Failed to increment counter: {}", err), + } + } + // UDP /// Total number of UDP (UDP tracker) requests aborted. #[must_use] @@ -360,9 +382,10 @@ mod tests { use super::*; use crate::statistics::{ UDP_TRACKER_SERVER_ERRORS_TOTAL, UDP_TRACKER_SERVER_IPS_BANNED_TOTAL, - UDP_TRACKER_SERVER_PERFORMANCE_AVG_PROCESSING_TIME_NS, UDP_TRACKER_SERVER_REQUESTS_ABORTED_TOTAL, - UDP_TRACKER_SERVER_REQUESTS_ACCEPTED_TOTAL, UDP_TRACKER_SERVER_REQUESTS_BANNED_TOTAL, - UDP_TRACKER_SERVER_REQUESTS_RECEIVED_TOTAL, UDP_TRACKER_SERVER_RESPONSES_SENT_TOTAL, + UDP_TRACKER_SERVER_PERFORMANCE_AVG_PROCESSED_REQUESTS_TOTAL, UDP_TRACKER_SERVER_PERFORMANCE_AVG_PROCESSING_TIME_NS, + UDP_TRACKER_SERVER_REQUESTS_ABORTED_TOTAL, UDP_TRACKER_SERVER_REQUESTS_ACCEPTED_TOTAL, + UDP_TRACKER_SERVER_REQUESTS_BANNED_TOTAL, UDP_TRACKER_SERVER_REQUESTS_RECEIVED_TOTAL, + UDP_TRACKER_SERVER_RESPONSES_SENT_TOTAL, }; use crate::CurrentClock; @@ -437,6 +460,31 @@ mod tests { assert!(result.is_ok()); } + #[test] + fn it_should_return_zero_for_udp_processed_requests_total_when_no_data() { + let metrics = Metrics::default(); + let labels = LabelSet::from([("request_kind", "connect")]); + assert_eq!(metrics.udp_processed_requests_total(&labels), 0); + } + + #[test] + fn it_should_increment_processed_requests_total() { + let mut metrics = Metrics::default(); + let now = CurrentClock::now(); + let labels = LabelSet::from([("request_kind", "connect")]); + + // Directly increment the counter using the public method + metrics + .increase_counter( + &metric_name!(UDP_TRACKER_SERVER_PERFORMANCE_AVG_PROCESSED_REQUESTS_TOTAL), + &labels, + now, + ) + .unwrap(); + + assert_eq!(metrics.udp_processed_requests_total(&labels), 1); + } + mod udp_general_metrics { use super::*; diff --git a/packages/udp-tracker-server/src/statistics/mod.rs b/packages/udp-tracker-server/src/statistics/mod.rs index 768722ba3..6bd35b9a1 100644 --- a/packages/udp-tracker-server/src/statistics/mod.rs +++ b/packages/udp-tracker-server/src/statistics/mod.rs @@ -17,6 +17,8 @@ pub const UDP_TRACKER_SERVER_REQUESTS_ACCEPTED_TOTAL: &str = "udp_tracker_server pub const UDP_TRACKER_SERVER_RESPONSES_SENT_TOTAL: &str = "udp_tracker_server_responses_sent_total"; pub const UDP_TRACKER_SERVER_ERRORS_TOTAL: &str = "udp_tracker_server_errors_total"; pub const UDP_TRACKER_SERVER_PERFORMANCE_AVG_PROCESSING_TIME_NS: &str = "udp_tracker_server_performance_avg_processing_time_ns"; +pub const UDP_TRACKER_SERVER_PERFORMANCE_AVG_PROCESSED_REQUESTS_TOTAL: &str = + "udp_tracker_server_performance_avg_processed_requests_total"; #[must_use] pub fn describe_metrics() -> Metrics { @@ -76,5 +78,13 @@ pub fn describe_metrics() -> Metrics { Some(MetricDescription::new("Average time to process a UDP request in nanoseconds")), ); + metrics.metric_collection.describe_counter( + &metric_name!(UDP_TRACKER_SERVER_PERFORMANCE_AVG_PROCESSED_REQUESTS_TOTAL), + Some(Unit::Count), + Some(MetricDescription::new( + "Total number of UDP requests processed for the average performance metrics", + )), + ); + metrics } diff --git a/packages/udp-tracker-server/src/statistics/repository.rs b/packages/udp-tracker-server/src/statistics/repository.rs index 6695bbfbc..1ab2cc6a7 100644 --- a/packages/udp-tracker-server/src/statistics/repository.rs +++ b/packages/udp-tracker-server/src/statistics/repository.rs @@ -295,21 +295,6 @@ mod tests { let repo = Repository::new(); let now = CurrentClock::now(); - // Set up initial connections handled - let ipv4_labels = LabelSet::from([("server_binding_address_ip_family", "inet"), ("request_kind", "connect")]); - let ipv6_labels = LabelSet::from([("server_binding_address_ip_family", "inet6"), ("request_kind", "connect")]); - - // Simulate 2 IPv4 and 1 IPv6 connections - repo.increase_counter(&metric_name!(UDP_TRACKER_SERVER_REQUESTS_ACCEPTED_TOTAL), &ipv4_labels, now) - .await - .unwrap(); - repo.increase_counter(&metric_name!(UDP_TRACKER_SERVER_REQUESTS_ACCEPTED_TOTAL), &ipv4_labels, now) - .await - .unwrap(); - repo.increase_counter(&metric_name!(UDP_TRACKER_SERVER_REQUESTS_ACCEPTED_TOTAL), &ipv6_labels, now) - .await - .unwrap(); - // Set initial average to 1000ns let connect_labels = LabelSet::from([("request_kind", "connect")]); repo.set_gauge( @@ -322,14 +307,16 @@ mod tests { .unwrap(); // Calculate new average with processing time of 2000ns + // This will increment the processed requests counter from 0 to 1 let processing_time = Duration::from_nanos(2000); let new_avg = repo .recalculate_udp_avg_processing_time_ns(processing_time, &connect_labels, now) .await; - // Moving average: previous_avg + (new_value - previous_avg) / total_connections - // 1000 + (2000 - 1000) / 3 = 1000 + 333.33 = 1333.33 - let expected_avg = 1000.0 + (2000.0 - 1000.0) / 3.0; + // Moving average: previous_avg + (new_value - previous_avg) / processed_requests_total + // With processed_requests_total = 1 (incremented during the call): + // 1000 + (2000 - 1000) / 1 = 1000 + 1000 = 2000 + let expected_avg = 1000.0 + (2000.0 - 1000.0) / 1.0; assert!( (new_avg - expected_avg).abs() < 0.01, "Expected {expected_avg}, got {new_avg}" @@ -341,22 +328,6 @@ mod tests { let repo = Repository::new(); let now = CurrentClock::now(); - // Set up initial announces handled - let ipv4_labels = LabelSet::from([("server_binding_address_ip_family", "inet"), ("request_kind", "announce")]); - let ipv6_labels = LabelSet::from([("server_binding_address_ip_family", "inet6"), ("request_kind", "announce")]); - - // Simulate 3 IPv4 and 2 IPv6 announces - for _ in 0..3 { - repo.increase_counter(&metric_name!(UDP_TRACKER_SERVER_REQUESTS_ACCEPTED_TOTAL), &ipv4_labels, now) - .await - .unwrap(); - } - for _ in 0..2 { - repo.increase_counter(&metric_name!(UDP_TRACKER_SERVER_REQUESTS_ACCEPTED_TOTAL), &ipv6_labels, now) - .await - .unwrap(); - } - // Set initial average to 500ns let announce_labels = LabelSet::from([("request_kind", "announce")]); repo.set_gauge( @@ -369,14 +340,16 @@ mod tests { .unwrap(); // Calculate new average with processing time of 1500ns + // This will increment the processed requests counter from 0 to 1 let processing_time = Duration::from_nanos(1500); let new_avg = repo .recalculate_udp_avg_processing_time_ns(processing_time, &announce_labels, now) .await; - // Moving average: previous_avg + (new_value - previous_avg) / total_announces - // 500 + (1500 - 500) / 5 = 500 + 200 = 700 - let expected_avg = 500.0 + (1500.0 - 500.0) / 5.0; + // Moving average: previous_avg + (new_value - previous_avg) / processed_requests_total + // With processed_requests_total = 1 (incremented during the call): + // 500 + (1500 - 500) / 1 = 500 + 1000 = 1500 + let expected_avg = 500.0 + (1500.0 - 500.0) / 1.0; assert!( (new_avg - expected_avg).abs() < 0.01, "Expected {expected_avg}, got {new_avg}" @@ -388,16 +361,6 @@ mod tests { let repo = Repository::new(); let now = CurrentClock::now(); - // Set up initial scrapes handled - let ipv4_labels = LabelSet::from([("server_binding_address_ip_family", "inet"), ("request_kind", "scrape")]); - - // Simulate 4 IPv4 scrapes - for _ in 0..4 { - repo.increase_counter(&metric_name!(UDP_TRACKER_SERVER_REQUESTS_ACCEPTED_TOTAL), &ipv4_labels, now) - .await - .unwrap(); - } - // Set initial average to 800ns let scrape_labels = LabelSet::from([("request_kind", "scrape")]); repo.set_gauge( @@ -410,14 +373,16 @@ mod tests { .unwrap(); // Calculate new average with processing time of 1200ns + // This will increment the processed requests counter from 0 to 1 let processing_time = Duration::from_nanos(1200); let new_avg = repo .recalculate_udp_avg_processing_time_ns(processing_time, &scrape_labels, now) .await; - // Moving average: previous_avg + (new_value - previous_avg) / total_scrapes - // 800 + (1200 - 800) / 4 = 800 + 100 = 900 - let expected_avg = 800.0 + (1200.0 - 800.0) / 4.0; + // Moving average: previous_avg + (new_value - previous_avg) / processed_requests_total + // With processed_requests_total = 1 (incremented during the call): + // 800 + (1200 - 800) / 1 = 800 + 400 = 1200 + let expected_avg = 800.0 + (1200.0 - 800.0) / 1.0; assert!( (new_avg - expected_avg).abs() < 0.01, "Expected {expected_avg}, got {new_avg}" @@ -584,49 +549,38 @@ mod tests { let connect_labels = LabelSet::from([("request_kind", "connect")]); let now = CurrentClock::now(); - // This test checks the behavior of `recalculate_udp_avg_connect_processing_time_ns`` - // when no connections have been recorded yet. The first call should - // handle division by zero gracefully and return an infinite average, - // which is the current behavior. + // This test checks the behavior of `recalculate_udp_avg_processing_time_ns` + // when no processed requests have been recorded yet. The first call should + // handle division by zero gracefully and set the first average to the + // processing time of the first request. - // todo: the first average should be 2000ns, not infinity. - // This is because the first connection is not counted in the average - // calculation if the counter is increased after calculating the average. - // The problem is that we count requests when they are accepted, not - // when they are processed. And we calculate the average when the - // response is sent. - - // First calculation: no connections recorded yet, should result in infinity + // First calculation: no processed requests recorded yet let processing_time_1 = Duration::from_nanos(2000); let avg_1 = repo .recalculate_udp_avg_processing_time_ns(processing_time_1, &connect_labels, now) .await; + // The first average should be the first processing time since processed_requests_total is 0 + // When processed_requests_total == 0.0, new_avg = req_processing_time assert!( (avg_1 - 2000.0).abs() < f64::EPSILON, "First calculation should be 2000, but got {avg_1}" ); - // Now add one connection and try again - let ipv4_labels = LabelSet::from([("server_binding_address_ip_family", "inet"), ("request_kind", "connect")]); - repo.increase_counter(&metric_name!(UDP_TRACKER_SERVER_REQUESTS_ACCEPTED_TOTAL), &ipv4_labels, now) - .await - .unwrap(); - - // Second calculation: 1 connection + // Second calculation: now we have one processed request (incremented during first call) let processing_time_2 = Duration::from_nanos(3000); - let connect_labels = LabelSet::from([("request_kind", "connect")]); let avg_2 = repo .recalculate_udp_avg_processing_time_ns(processing_time_2, &connect_labels, now) .await; - // There is one connection, so the average should be: - // 2000 + (3000 - 2000) / 1 = 2000 + 1000 = 3000 - // This is because one connection is not counted yet in the average calculation, - // so the average is simply the processing time of the second connection. + // Moving average calculation: previous_avg + (new_value - previous_avg) / processed_requests_total + // After first call: processed_requests_total = 1, avg = 2000 + // During second call: processed_requests_total incremented to 2 + // new_avg = 2000 + (3000 - 2000) / 2 = 2000 + 500 = 2500 + let expected_avg_2 = 2000.0 + (3000.0 - 2000.0) / 2.0; assert!( - (avg_2 - 3000.0).abs() < f64::EPSILON, - "Second calculation should be 3000ns, but got {avg_2}" + (avg_2 - expected_avg_2).abs() < f64::EPSILON, + "Second calculation should be {expected_avg_2}ns, but got {avg_2}" ); } } From 384b887fa2790413cd189c169c047f5ceebcbe4c Mon Sep 17 00:00:00 2001 From: Jose Celano Date: Fri, 20 Jun 2025 07:57:48 +0100 Subject: [PATCH 746/802] feat(metrics): [#1589] add Avg (average) aggregate function Implements a new aggregate function for calculating averages of metric samples that match specific label criteria, complementing the existing Sum aggregation. - **metrics/src/metric/aggregate/avg.rs**: New metric-level average trait and implementations - `Avg` trait with `avg()` method for calculating averages - Implementation for `Metric` returning `f64` - Implementation for `Metric` returning `f64` - Comprehensive unit tests with edge cases (empty samples, large values, etc.) - **metrics/src/metric_collection/aggregate/avg.rs**: New collection-level average trait - `Avg` trait for `MetricCollection` and `MetricKindCollection` - Delegates to metric-level implementations - Handles mixed counter/gauge collections by trying counters first, then gauges - Returns `None` for non-existent metrics - Comprehensive test suite covering various scenarios - **metrics/src/metric/aggregate/mod.rs**: Export new `avg` module - **metrics/src/metric_collection/aggregate/mod.rs**: Export new `avg` module - **metrics/README.md**: Add example usage of the new `Avg` trait in the aggregation section - **Type Safety**: Returns appropriate types (`f64` for both counters and gauges) - **Label Filtering**: Supports filtering samples by label criteria like existing `Sum` - **Edge Case Handling**: Returns `0.0` for empty sample sets - **Performance**: Uses iterator chains for efficient sample processing - **Comprehensive Testing**: 205 tests pass including new avg functionality ```rust use torrust_tracker_metrics::metric_collection::aggregate::Avg; // Calculate average of all matching samples let avg_value = metrics.avg(&metric_name, &label_criteria); ``` The implementation follows the same patterns as the existing `Sum` aggregate function, ensuring consistency in the codebase and maintaining the same level of type safety and performance characteristics. --- packages/metrics/README.md | 10 +- packages/metrics/src/metric/aggregate/avg.rs | 307 ++++++++++++++++++ packages/metrics/src/metric/aggregate/mod.rs | 1 + .../src/metric_collection/aggregate/avg.rs | 214 ++++++++++++ .../src/metric_collection/aggregate/mod.rs | 1 + 5 files changed, 532 insertions(+), 1 deletion(-) create mode 100644 packages/metrics/src/metric/aggregate/avg.rs create mode 100644 packages/metrics/src/metric_collection/aggregate/avg.rs diff --git a/packages/metrics/README.md b/packages/metrics/README.md index 9f3883fba..3d1d94c5f 100644 --- a/packages/metrics/README.md +++ b/packages/metrics/README.md @@ -67,7 +67,7 @@ println!("{}", prometheus_output); ### Metric Aggregation ```rust -use torrust_tracker_metrics::metric_collection::aggregate::Sum; +use torrust_tracker_metrics::metric_collection::aggregate::{Sum, Avg}; // Sum all counter values matching specific labels let total_requests = metrics.sum( @@ -76,6 +76,14 @@ let total_requests = metrics.sum( ); println!("Total requests: {:?}", total_requests); + +// Calculate average of gauge values matching specific labels +let avg_response_time = metrics.avg( + &metric_name!("response_time_seconds"), + &[("endpoint", "/announce")].into(), +); + +println!("Average response time: {:?}", avg_response_time); ``` ## Architecture diff --git a/packages/metrics/src/metric/aggregate/avg.rs b/packages/metrics/src/metric/aggregate/avg.rs new file mode 100644 index 000000000..e1882ea68 --- /dev/null +++ b/packages/metrics/src/metric/aggregate/avg.rs @@ -0,0 +1,307 @@ +use crate::counter::Counter; +use crate::gauge::Gauge; +use crate::label::LabelSet; +use crate::metric::Metric; + +pub trait Avg { + type Output; + fn avg(&self, label_set_criteria: &LabelSet) -> Self::Output; +} + +impl Avg for Metric { + type Output = f64; + + fn avg(&self, label_set_criteria: &LabelSet) -> Self::Output { + let matching_samples: Vec<_> = self + .sample_collection + .iter() + .filter(|(label_set, _measurement)| label_set.matches(label_set_criteria)) + .collect(); + + if matching_samples.is_empty() { + return 0.0; + } + + let sum: u64 = matching_samples + .iter() + .map(|(_label_set, measurement)| measurement.value().primitive()) + .sum(); + + #[allow(clippy::cast_precision_loss)] + (sum as f64 / matching_samples.len() as f64) + } +} + +impl Avg for Metric { + type Output = f64; + + fn avg(&self, label_set_criteria: &LabelSet) -> Self::Output { + let matching_samples: Vec<_> = self + .sample_collection + .iter() + .filter(|(label_set, _measurement)| label_set.matches(label_set_criteria)) + .collect(); + + if matching_samples.is_empty() { + return 0.0; + } + + let sum: f64 = matching_samples + .iter() + .map(|(_label_set, measurement)| measurement.value().primitive()) + .sum(); + + #[allow(clippy::cast_precision_loss)] + (sum / matching_samples.len() as f64) + } +} + +#[cfg(test)] +mod tests { + + use torrust_tracker_primitives::DurationSinceUnixEpoch; + + use crate::counter::Counter; + use crate::gauge::Gauge; + use crate::label::LabelSet; + use crate::metric::aggregate::avg::Avg; + use crate::metric::{Metric, MetricName}; + use crate::metric_name; + use crate::sample::Sample; + use crate::sample_collection::SampleCollection; + + struct MetricBuilder { + sample_time: DurationSinceUnixEpoch, + name: MetricName, + samples: Vec>, + } + + impl Default for MetricBuilder { + fn default() -> Self { + Self { + sample_time: DurationSinceUnixEpoch::from_secs(1_743_552_000), + name: metric_name!("test_metric"), + samples: vec![], + } + } + } + + impl MetricBuilder { + fn with_sample(mut self, value: T, label_set: &LabelSet) -> Self { + let sample = Sample::new(value, self.sample_time, label_set.clone()); + self.samples.push(sample); + self + } + + fn build(self) -> Metric { + Metric::new( + self.name, + None, + None, + SampleCollection::new(self.samples).expect("invalid samples"), + ) + } + } + + fn counter_cases() -> Vec<(Metric, LabelSet, f64)> { + // (metric, label set criteria, expected_average_value) + vec![ + // Metric with one sample without label set + ( + MetricBuilder::default().with_sample(1.into(), &LabelSet::empty()).build(), + LabelSet::empty(), + 1.0, + ), + // Metric with one sample with a label set + ( + MetricBuilder::default() + .with_sample(1.into(), &[("l1", "l1_value")].into()) + .build(), + [("l1", "l1_value")].into(), + 1.0, + ), + // Metric with two samples, different label sets, average all + ( + MetricBuilder::default() + .with_sample(1.into(), &[("l1", "l1_value")].into()) + .with_sample(3.into(), &[("l2", "l2_value")].into()) + .build(), + LabelSet::empty(), + 2.0, // (1 + 3) / 2 = 2.0 + ), + // Metric with two samples, different label sets, average one + ( + MetricBuilder::default() + .with_sample(1.into(), &[("l1", "l1_value")].into()) + .with_sample(2.into(), &[("l2", "l2_value")].into()) + .build(), + [("l1", "l1_value")].into(), + 1.0, + ), + // Metric with three samples, same label key, different label values, average by key + ( + MetricBuilder::default() + .with_sample(2.into(), &[("l1", "l1_value"), ("la", "la_value")].into()) + .with_sample(4.into(), &[("l1", "l1_value"), ("lb", "lb_value")].into()) + .with_sample(6.into(), &[("l1", "l1_value"), ("lc", "lc_value")].into()) + .build(), + [("l1", "l1_value")].into(), + 4.0, // (2 + 4 + 6) / 3 = 4.0 + ), + // Metric with two samples, different label values, average by subkey + ( + MetricBuilder::default() + .with_sample(5.into(), &[("l1", "l1_value"), ("la", "la_value")].into()) + .with_sample(7.into(), &[("l1", "l1_value"), ("lb", "lb_value")].into()) + .build(), + [("la", "la_value")].into(), + 5.0, + ), + // Edge: Metric with no samples at all + (MetricBuilder::default().build(), LabelSet::empty(), 0.0), + // Edge: Metric with samples but no matching labels + ( + MetricBuilder::default() + .with_sample(5.into(), &[("foo", "bar")].into()) + .build(), + [("not", "present")].into(), + 0.0, + ), + // Edge: Metric with zero value + ( + MetricBuilder::default() + .with_sample(0.into(), &[("l3", "l3_value")].into()) + .build(), + [("l3", "l3_value")].into(), + 0.0, + ), + // Edge: Metric with a very large value + ( + MetricBuilder::default() + .with_sample((u64::MAX / 2).into(), &[("edge", "large1")].into()) + .with_sample((u64::MAX / 2).into(), &[("edge", "large2")].into()) + .build(), + LabelSet::empty(), + #[allow(clippy::cast_precision_loss)] + (u64::MAX as f64 / 2.0), // Average of (max/2) and (max/2) + ), + ] + } + + fn gauge_cases() -> Vec<(Metric, LabelSet, f64)> { + // (metric, label set criteria, expected_average_value) + vec![ + // Metric with one sample without label set + ( + MetricBuilder::default().with_sample(1.0.into(), &LabelSet::empty()).build(), + LabelSet::empty(), + 1.0, + ), + // Metric with one sample with a label set + ( + MetricBuilder::default() + .with_sample(1.0.into(), &[("l1", "l1_value")].into()) + .build(), + [("l1", "l1_value")].into(), + 1.0, + ), + // Metric with two samples, different label sets, average all + ( + MetricBuilder::default() + .with_sample(1.0.into(), &[("l1", "l1_value")].into()) + .with_sample(3.0.into(), &[("l2", "l2_value")].into()) + .build(), + LabelSet::empty(), + 2.0, // (1.0 + 3.0) / 2 = 2.0 + ), + // Metric with two samples, different label sets, average one + ( + MetricBuilder::default() + .with_sample(1.0.into(), &[("l1", "l1_value")].into()) + .with_sample(2.0.into(), &[("l2", "l2_value")].into()) + .build(), + [("l1", "l1_value")].into(), + 1.0, + ), + // Metric with three samples, same label key, different label values, average by key + ( + MetricBuilder::default() + .with_sample(2.0.into(), &[("l1", "l1_value"), ("la", "la_value")].into()) + .with_sample(4.0.into(), &[("l1", "l1_value"), ("lb", "lb_value")].into()) + .with_sample(6.0.into(), &[("l1", "l1_value"), ("lc", "lc_value")].into()) + .build(), + [("l1", "l1_value")].into(), + 4.0, // (2.0 + 4.0 + 6.0) / 3 = 4.0 + ), + // Metric with two samples, different label values, average by subkey + ( + MetricBuilder::default() + .with_sample(5.0.into(), &[("l1", "l1_value"), ("la", "la_value")].into()) + .with_sample(7.0.into(), &[("l1", "l1_value"), ("lb", "lb_value")].into()) + .build(), + [("la", "la_value")].into(), + 5.0, + ), + // Edge: Metric with no samples at all + (MetricBuilder::default().build(), LabelSet::empty(), 0.0), + // Edge: Metric with samples but no matching labels + ( + MetricBuilder::default() + .with_sample(5.0.into(), &[("foo", "bar")].into()) + .build(), + [("not", "present")].into(), + 0.0, + ), + // Edge: Metric with zero value + ( + MetricBuilder::default() + .with_sample(0.0.into(), &[("l3", "l3_value")].into()) + .build(), + [("l3", "l3_value")].into(), + 0.0, + ), + // Edge: Metric with negative values + ( + MetricBuilder::default() + .with_sample((-2.0).into(), &[("l4", "l4_value")].into()) + .with_sample(4.0.into(), &[("l5", "l5_value")].into()) + .build(), + LabelSet::empty(), + 1.0, // (-2.0 + 4.0) / 2 = 1.0 + ), + // Edge: Metric with decimal values + ( + MetricBuilder::default() + .with_sample(1.5.into(), &[("l6", "l6_value")].into()) + .with_sample(2.5.into(), &[("l7", "l7_value")].into()) + .build(), + LabelSet::empty(), + 2.0, // (1.5 + 2.5) / 2 = 2.0 + ), + ] + } + + #[test] + fn test_counter_cases() { + for (idx, (metric, criteria, expected_value)) in counter_cases().iter().enumerate() { + let avg = metric.avg(criteria); + + assert!( + (avg - expected_value).abs() <= f64::EPSILON, + "at case {idx}, expected avg to be {expected_value}, got {avg}" + ); + } + } + + #[test] + fn test_gauge_cases() { + for (idx, (metric, criteria, expected_value)) in gauge_cases().iter().enumerate() { + let avg = metric.avg(criteria); + + assert!( + (avg - expected_value).abs() <= f64::EPSILON, + "at case {idx}, expected avg to be {expected_value}, got {avg}" + ); + } + } +} diff --git a/packages/metrics/src/metric/aggregate/mod.rs b/packages/metrics/src/metric/aggregate/mod.rs index dce785d95..1224a1f52 100644 --- a/packages/metrics/src/metric/aggregate/mod.rs +++ b/packages/metrics/src/metric/aggregate/mod.rs @@ -1 +1,2 @@ +pub mod avg; pub mod sum; diff --git a/packages/metrics/src/metric_collection/aggregate/avg.rs b/packages/metrics/src/metric_collection/aggregate/avg.rs new file mode 100644 index 000000000..936754fc4 --- /dev/null +++ b/packages/metrics/src/metric_collection/aggregate/avg.rs @@ -0,0 +1,214 @@ +use crate::counter::Counter; +use crate::gauge::Gauge; +use crate::label::LabelSet; +use crate::metric::aggregate::avg::Avg as MetricAvgTrait; +use crate::metric::MetricName; +use crate::metric_collection::{MetricCollection, MetricKindCollection}; + +pub trait Avg { + fn avg(&self, metric_name: &MetricName, label_set_criteria: &LabelSet) -> Option; +} + +impl Avg for MetricCollection { + fn avg(&self, metric_name: &MetricName, label_set_criteria: &LabelSet) -> Option { + if let Some(value) = self.counters.avg(metric_name, label_set_criteria) { + return Some(value); + } + + if let Some(value) = self.gauges.avg(metric_name, label_set_criteria) { + return Some(value); + } + + None + } +} + +impl Avg for MetricKindCollection { + fn avg(&self, metric_name: &MetricName, label_set_criteria: &LabelSet) -> Option { + self.metrics + .get(metric_name) + .map(|metric| metric.avg(label_set_criteria)) + } +} + +impl Avg for MetricKindCollection { + fn avg(&self, metric_name: &MetricName, label_set_criteria: &LabelSet) -> Option { + self.metrics.get(metric_name).map(|metric| metric.avg(label_set_criteria)) + } +} + +#[cfg(test)] +mod tests { + + mod it_should_allow_averaging_all_metric_samples_containing_some_given_labels { + + use torrust_tracker_primitives::DurationSinceUnixEpoch; + + use crate::label::LabelValue; + use crate::label_name; + use crate::metric_collection::aggregate::avg::Avg; + + #[test] + fn type_counter_with_two_samples() { + use crate::label::LabelSet; + use crate::metric_collection::MetricCollection; + use crate::metric_name; + + let metric_name = metric_name!("test_counter"); + + let mut collection = MetricCollection::default(); + + collection + .increment_counter( + &metric_name!("test_counter"), + &(label_name!("label_1"), LabelValue::new("value_1")).into(), + DurationSinceUnixEpoch::from_secs(1), + ) + .unwrap(); + + collection + .increment_counter( + &metric_name!("test_counter"), + &(label_name!("label_2"), LabelValue::new("value_2")).into(), + DurationSinceUnixEpoch::from_secs(1), + ) + .unwrap(); + + // Two samples with value 1 each, average should be 1.0 + assert_eq!(collection.avg(&metric_name, &LabelSet::empty()), Some(1.0)); + assert_eq!( + collection.avg(&metric_name, &(label_name!("label_1"), LabelValue::new("value_1")).into()), + Some(1.0) + ); + } + + #[test] + fn type_counter_with_different_values() { + use crate::label::LabelSet; + use crate::metric_collection::MetricCollection; + use crate::metric_name; + + let metric_name = metric_name!("test_counter"); + + let mut collection = MetricCollection::default(); + + // First increment: value goes from 0 to 1 + collection + .increment_counter( + &metric_name!("test_counter"), + &(label_name!("label_1"), LabelValue::new("value_1")).into(), + DurationSinceUnixEpoch::from_secs(1), + ) + .unwrap(); + + // Second increment on the same label: value goes from 1 to 2 + collection + .increment_counter( + &metric_name!("test_counter"), + &(label_name!("label_1"), LabelValue::new("value_1")).into(), + DurationSinceUnixEpoch::from_secs(2), + ) + .unwrap(); + + // Create another counter with a different value + collection + .set_counter( + &metric_name!("test_counter"), + &(label_name!("label_2"), LabelValue::new("value_2")).into(), + 4, + DurationSinceUnixEpoch::from_secs(3), + ) + .unwrap(); + + // Average of 2 and 4 should be 3.0 + assert_eq!(collection.avg(&metric_name, &LabelSet::empty()), Some(3.0)); + assert_eq!( + collection.avg(&metric_name, &(label_name!("label_1"), LabelValue::new("value_1")).into()), + Some(2.0) + ); + assert_eq!( + collection.avg(&metric_name, &(label_name!("label_2"), LabelValue::new("value_2")).into()), + Some(4.0) + ); + } + + #[test] + fn type_gauge_with_two_samples() { + use crate::label::LabelSet; + use crate::metric_collection::MetricCollection; + use crate::metric_name; + + let metric_name = metric_name!("test_gauge"); + + let mut collection = MetricCollection::default(); + + collection + .set_gauge( + &metric_name!("test_gauge"), + &(label_name!("label_1"), LabelValue::new("value_1")).into(), + 2.0, + DurationSinceUnixEpoch::from_secs(1), + ) + .unwrap(); + + collection + .set_gauge( + &metric_name!("test_gauge"), + &(label_name!("label_2"), LabelValue::new("value_2")).into(), + 4.0, + DurationSinceUnixEpoch::from_secs(1), + ) + .unwrap(); + + // Average of 2.0 and 4.0 should be 3.0 + assert_eq!(collection.avg(&metric_name, &LabelSet::empty()), Some(3.0)); + assert_eq!( + collection.avg(&metric_name, &(label_name!("label_1"), LabelValue::new("value_1")).into()), + Some(2.0) + ); + } + + #[test] + fn type_gauge_with_negative_values() { + use crate::label::LabelSet; + use crate::metric_collection::MetricCollection; + use crate::metric_name; + + let metric_name = metric_name!("test_gauge"); + + let mut collection = MetricCollection::default(); + + collection + .set_gauge( + &metric_name!("test_gauge"), + &(label_name!("label_1"), LabelValue::new("value_1")).into(), + -2.0, + DurationSinceUnixEpoch::from_secs(1), + ) + .unwrap(); + + collection + .set_gauge( + &metric_name!("test_gauge"), + &(label_name!("label_2"), LabelValue::new("value_2")).into(), + 6.0, + DurationSinceUnixEpoch::from_secs(1), + ) + .unwrap(); + + // Average of -2.0 and 6.0 should be 2.0 + assert_eq!(collection.avg(&metric_name, &LabelSet::empty()), Some(2.0)); + } + + #[test] + fn nonexistent_metric() { + use crate::label::LabelSet; + use crate::metric_collection::MetricCollection; + use crate::metric_name; + + let collection = MetricCollection::default(); + + assert_eq!(collection.avg(&metric_name!("nonexistent"), &LabelSet::empty()), None); + } + } +} diff --git a/packages/metrics/src/metric_collection/aggregate/mod.rs b/packages/metrics/src/metric_collection/aggregate/mod.rs index dce785d95..1224a1f52 100644 --- a/packages/metrics/src/metric_collection/aggregate/mod.rs +++ b/packages/metrics/src/metric_collection/aggregate/mod.rs @@ -1 +1,2 @@ +pub mod avg; pub mod sum; From 8fbcf9024a39af498162a522ecbd107d01f239a4 Mon Sep 17 00:00:00 2001 From: Jose Celano Date: Fri, 20 Jun 2025 08:30:27 +0100 Subject: [PATCH 747/802] refactor(metrics): extract collect_matching_samples to Metric impl Improve AI-generated code. Moves the collect_matching_samples helper method from individual aggregate implementations to the generic Metric implementation, making it reusable across all aggregate functions. - Add collect_matching_samples method to Metric for filtering samples by label criteria - Remove code duplication between Sum and Avg aggregate implementations - Improve code organization by centralizing sample collection logic - Maintain backward compatibility and all existing functionality This refactoring improves maintainability by providing a single, well-tested implementation of sample filtering that can be used by current and future aggregate functions. --- packages/metrics/src/metric/aggregate/avg.rs | 23 +++++--------------- packages/metrics/src/metric/mod.rs | 11 ++++++++++ 2 files changed, 16 insertions(+), 18 deletions(-) diff --git a/packages/metrics/src/metric/aggregate/avg.rs b/packages/metrics/src/metric/aggregate/avg.rs index e1882ea68..95628450b 100644 --- a/packages/metrics/src/metric/aggregate/avg.rs +++ b/packages/metrics/src/metric/aggregate/avg.rs @@ -1,6 +1,7 @@ use crate::counter::Counter; use crate::gauge::Gauge; use crate::label::LabelSet; +use crate::metric::aggregate::sum::Sum; use crate::metric::Metric; pub trait Avg { @@ -12,20 +13,13 @@ impl Avg for Metric { type Output = f64; fn avg(&self, label_set_criteria: &LabelSet) -> Self::Output { - let matching_samples: Vec<_> = self - .sample_collection - .iter() - .filter(|(label_set, _measurement)| label_set.matches(label_set_criteria)) - .collect(); + let matching_samples = self.collect_matching_samples(label_set_criteria); if matching_samples.is_empty() { return 0.0; } - let sum: u64 = matching_samples - .iter() - .map(|(_label_set, measurement)| measurement.value().primitive()) - .sum(); + let sum = self.sum(label_set_criteria); #[allow(clippy::cast_precision_loss)] (sum as f64 / matching_samples.len() as f64) @@ -36,20 +30,13 @@ impl Avg for Metric { type Output = f64; fn avg(&self, label_set_criteria: &LabelSet) -> Self::Output { - let matching_samples: Vec<_> = self - .sample_collection - .iter() - .filter(|(label_set, _measurement)| label_set.matches(label_set_criteria)) - .collect(); + let matching_samples = self.collect_matching_samples(label_set_criteria); if matching_samples.is_empty() { return 0.0; } - let sum: f64 = matching_samples - .iter() - .map(|(_label_set, measurement)| measurement.value().primitive()) - .sum(); + let sum = self.sum(label_set_criteria); #[allow(clippy::cast_precision_loss)] (sum / matching_samples.len() as f64) diff --git a/packages/metrics/src/metric/mod.rs b/packages/metrics/src/metric/mod.rs index d1aa01b94..6bc1a6075 100644 --- a/packages/metrics/src/metric/mod.rs +++ b/packages/metrics/src/metric/mod.rs @@ -78,6 +78,17 @@ impl Metric { pub fn is_empty(&self) -> bool { self.sample_collection.is_empty() } + + #[must_use] + pub fn collect_matching_samples( + &self, + label_set_criteria: &LabelSet, + ) -> Vec<(&crate::label::LabelSet, &crate::sample::Measurement)> { + self.sample_collection + .iter() + .filter(|(label_set, _measurement)| label_set.matches(label_set_criteria)) + .collect() + } } impl Metric { From f402b0250b846dfb62c8d8cb48ec5b175693f350 Mon Sep 17 00:00:00 2001 From: Jose Celano Date: Fri, 20 Jun 2025 08:32:43 +0100 Subject: [PATCH 748/802] chore: remove deprecated comment --- .../rest-tracker-api-core/src/statistics/services.rs | 12 ------------ 1 file changed, 12 deletions(-) diff --git a/packages/rest-tracker-api-core/src/statistics/services.rs b/packages/rest-tracker-api-core/src/statistics/services.rs index a8132d4fd..af79c5ce7 100644 --- a/packages/rest-tracker-api-core/src/statistics/services.rs +++ b/packages/rest-tracker-api-core/src/statistics/services.rs @@ -59,18 +59,6 @@ async fn get_protocol_metrics( let http_stats = http_stats_repository.get_stats().await; let udp_server_stats = udp_server_stats_repository.get_stats().await; - /* - - todo: We have to delete the global metrics from Metric types: - - - bittorrent_http_tracker_core::statistics::metrics::Metrics - - bittorrent_udp_tracker_core::statistics::metrics::Metrics - - torrust_udp_tracker_server::statistics::metrics::Metrics - - Internally only the labeled metrics should be used. - - */ - // TCPv4 let tcp4_announces_handled = http_stats.tcp4_announces_handled(); From caa69ae91356584e193b11485548fb935bb4f2d3 Mon Sep 17 00:00:00 2001 From: Jose Celano Date: Fri, 20 Jun 2025 08:33:43 +0100 Subject: [PATCH 749/802] test: [#1589] remove uneeded test Division by zero issues was solved. It can't happen now becuase we increase the counter at the beggining of the function. ```rust #[allow(clippy::cast_precision_loss)] pub fn recalculate_udp_avg_processing_time_ns( &mut self, req_processing_time: Duration, label_set: &LabelSet, now: DurationSinceUnixEpoch, ) -> f64 { self.increment_udp_processed_requests_total(label_set, now); let processed_requests_total = self.udp_processed_requests_total(label_set) as f64; let previous_avg = self.udp_avg_processing_time_ns(label_set); let req_processing_time = req_processing_time.as_nanos() as f64; // Moving average: https://en.wikipedia.org/wiki/Moving_average let new_avg = previous_avg as f64 + (req_processing_time - previous_avg as f64) / processed_requests_total; tracing::debug!( "Recalculated UDP average processing time for labels {}: {} ns (previous: {} ns, req_processing_time: {} ns, request_processed_total: {})", label_set, new_avg, previous_avg, req_processing_time, processed_requests_total ); self.update_udp_avg_processing_time_ns(new_avg, label_set, now); new_avg } ``` --- .../src/statistics/repository.rs | 41 ------------------- 1 file changed, 41 deletions(-) diff --git a/packages/udp-tracker-server/src/statistics/repository.rs b/packages/udp-tracker-server/src/statistics/repository.rs index 1ab2cc6a7..85e3bbe64 100644 --- a/packages/udp-tracker-server/src/statistics/repository.rs +++ b/packages/udp-tracker-server/src/statistics/repository.rs @@ -542,45 +542,4 @@ mod tests { // Should handle NaN values assert!(result.is_ok()); } - - #[tokio::test] - async fn it_should_handle_moving_average_calculation_before_any_connections_are_recorded() { - let repo = Repository::new(); - let connect_labels = LabelSet::from([("request_kind", "connect")]); - let now = CurrentClock::now(); - - // This test checks the behavior of `recalculate_udp_avg_processing_time_ns` - // when no processed requests have been recorded yet. The first call should - // handle division by zero gracefully and set the first average to the - // processing time of the first request. - - // First calculation: no processed requests recorded yet - let processing_time_1 = Duration::from_nanos(2000); - let avg_1 = repo - .recalculate_udp_avg_processing_time_ns(processing_time_1, &connect_labels, now) - .await; - - // The first average should be the first processing time since processed_requests_total is 0 - // When processed_requests_total == 0.0, new_avg = req_processing_time - assert!( - (avg_1 - 2000.0).abs() < f64::EPSILON, - "First calculation should be 2000, but got {avg_1}" - ); - - // Second calculation: now we have one processed request (incremented during first call) - let processing_time_2 = Duration::from_nanos(3000); - let avg_2 = repo - .recalculate_udp_avg_processing_time_ns(processing_time_2, &connect_labels, now) - .await; - - // Moving average calculation: previous_avg + (new_value - previous_avg) / processed_requests_total - // After first call: processed_requests_total = 1, avg = 2000 - // During second call: processed_requests_total incremented to 2 - // new_avg = 2000 + (3000 - 2000) / 2 = 2000 + 500 = 2500 - let expected_avg_2 = 2000.0 + (3000.0 - 2000.0) / 2.0; - assert!( - (avg_2 - expected_avg_2).abs() < f64::EPSILON, - "Second calculation should be {expected_avg_2}ns, but got {avg_2}" - ); - } } From ba3d8a914e3dabe7c17c24e2a1258a35fa87199e Mon Sep 17 00:00:00 2001 From: Jose Celano Date: Fri, 20 Jun 2025 10:10:21 +0100 Subject: [PATCH 750/802] fix: format --- packages/metrics/src/metric_collection/aggregate/avg.rs | 4 +--- 1 file changed, 1 insertion(+), 3 deletions(-) diff --git a/packages/metrics/src/metric_collection/aggregate/avg.rs b/packages/metrics/src/metric_collection/aggregate/avg.rs index 936754fc4..0aef4e325 100644 --- a/packages/metrics/src/metric_collection/aggregate/avg.rs +++ b/packages/metrics/src/metric_collection/aggregate/avg.rs @@ -25,9 +25,7 @@ impl Avg for MetricCollection { impl Avg for MetricKindCollection { fn avg(&self, metric_name: &MetricName, label_set_criteria: &LabelSet) -> Option { - self.metrics - .get(metric_name) - .map(|metric| metric.avg(label_set_criteria)) + self.metrics.get(metric_name).map(|metric| metric.avg(label_set_criteria)) } } From cd57f7a78f423d9ae409fd3aa63f7fc7a517375d Mon Sep 17 00:00:00 2001 From: Jose Celano Date: Fri, 20 Jun 2025 10:23:58 +0100 Subject: [PATCH 751/802] fix: [#1589] use average aggregation for UDP processing time metrics When calculating aggregated values for processing time metrics across multiple servers, we need to use the average (.avg()) instead of sum (.sum()) because the metric samples are already averages per server. Using sum() on pre-averaged values would produce incorrect results, as it would add up the averages rather than computing the true average across all servers. Changes: - Add new *_averaged() methods that use .avg() for proper aggregation - Update services.rs to use the corrected averaging methods - Import Avg trait for metric collection averaging functionality Fixes incorrect metric aggregation for: - udp_avg_connect_processing_time_ns - udp_avg_announce_processing_time_ns - udp_avg_scrape_processing_time_ns" --- .../src/statistics/services.rs | 6 +- .../src/statistics/metrics.rs | 267 ++++++++++++++++++ 2 files changed, 270 insertions(+), 3 deletions(-) diff --git a/packages/rest-tracker-api-core/src/statistics/services.rs b/packages/rest-tracker-api-core/src/statistics/services.rs index af79c5ce7..a1edae46a 100644 --- a/packages/rest-tracker-api-core/src/statistics/services.rs +++ b/packages/rest-tracker-api-core/src/statistics/services.rs @@ -74,9 +74,9 @@ async fn get_protocol_metrics( let udp_requests_aborted = udp_server_stats.udp_requests_aborted(); let udp_requests_banned = udp_server_stats.udp_requests_banned(); let udp_banned_ips_total = udp_server_stats.udp_banned_ips_total(); - let udp_avg_connect_processing_time_ns = udp_server_stats.udp_avg_connect_processing_time_ns(); - let udp_avg_announce_processing_time_ns = udp_server_stats.udp_avg_announce_processing_time_ns(); - let udp_avg_scrape_processing_time_ns = udp_server_stats.udp_avg_scrape_processing_time_ns(); + let udp_avg_connect_processing_time_ns = udp_server_stats.udp_avg_connect_processing_time_ns_averaged(); + let udp_avg_announce_processing_time_ns = udp_server_stats.udp_avg_announce_processing_time_ns_averaged(); + let udp_avg_scrape_processing_time_ns = udp_server_stats.udp_avg_scrape_processing_time_ns_averaged(); // UDPv4 diff --git a/packages/udp-tracker-server/src/statistics/metrics.rs b/packages/udp-tracker-server/src/statistics/metrics.rs index e7653815f..ac9540f8e 100644 --- a/packages/udp-tracker-server/src/statistics/metrics.rs +++ b/packages/udp-tracker-server/src/statistics/metrics.rs @@ -3,6 +3,7 @@ use std::time::Duration; use serde::Serialize; use torrust_tracker_metrics::label::LabelSet; use torrust_tracker_metrics::metric::MetricName; +use torrust_tracker_metrics::metric_collection::aggregate::avg::Avg; use torrust_tracker_metrics::metric_collection::aggregate::sum::Sum; use torrust_tracker_metrics::metric_collection::{Error, MetricCollection}; use torrust_tracker_metrics::metric_name; @@ -215,6 +216,48 @@ impl Metrics { .unwrap_or_default() as u64 } + /// Average processing time for UDP connect requests across all servers (in nanoseconds). + /// This calculates the average of all gauge samples for connect requests. + #[must_use] + #[allow(clippy::cast_sign_loss)] + #[allow(clippy::cast_possible_truncation)] + pub fn udp_avg_connect_processing_time_ns_averaged(&self) -> u64 { + self.metric_collection + .avg( + &metric_name!(UDP_TRACKER_SERVER_PERFORMANCE_AVG_PROCESSING_TIME_NS), + &[("request_kind", "connect")].into(), + ) + .unwrap_or(0.0) as u64 + } + + /// Average processing time for UDP announce requests across all servers (in nanoseconds). + /// This calculates the average of all gauge samples for announce requests. + #[must_use] + #[allow(clippy::cast_sign_loss)] + #[allow(clippy::cast_possible_truncation)] + pub fn udp_avg_announce_processing_time_ns_averaged(&self) -> u64 { + self.metric_collection + .avg( + &metric_name!(UDP_TRACKER_SERVER_PERFORMANCE_AVG_PROCESSING_TIME_NS), + &[("request_kind", "announce")].into(), + ) + .unwrap_or(0.0) as u64 + } + + /// Average processing time for UDP scrape requests across all servers (in nanoseconds). + /// This calculates the average of all gauge samples for scrape requests. + #[must_use] + #[allow(clippy::cast_sign_loss)] + #[allow(clippy::cast_possible_truncation)] + pub fn udp_avg_scrape_processing_time_ns_averaged(&self) -> u64 { + self.metric_collection + .avg( + &metric_name!(UDP_TRACKER_SERVER_PERFORMANCE_AVG_PROCESSING_TIME_NS), + &[("request_kind", "scrape")].into(), + ) + .unwrap_or(0.0) as u64 + } + // UDPv4 /// Total number of UDP (UDP tracker) requests from IPv4 peers. #[must_use] @@ -1179,4 +1222,228 @@ mod tests { assert!(result.is_ok()); } } + + mod averaged_processing_time_metrics { + use super::*; + + #[test] + fn it_should_return_zero_for_udp_avg_connect_processing_time_ns_averaged_when_no_data() { + let metrics = Metrics::default(); + assert_eq!(metrics.udp_avg_connect_processing_time_ns_averaged(), 0); + } + + #[test] + fn it_should_return_averaged_value_for_udp_avg_connect_processing_time_ns_averaged() { + let mut metrics = Metrics::default(); + let now = CurrentClock::now(); + let labels1 = LabelSet::from([("request_kind", "connect"), ("server_id", "server1")]); + let labels2 = LabelSet::from([("request_kind", "connect"), ("server_id", "server2")]); + + // Set different gauge values for connect requests from different servers + metrics + .set_gauge( + &metric_name!(UDP_TRACKER_SERVER_PERFORMANCE_AVG_PROCESSING_TIME_NS), + &labels1, + 1000.0, + now, + ) + .unwrap(); + + metrics + .set_gauge( + &metric_name!(UDP_TRACKER_SERVER_PERFORMANCE_AVG_PROCESSING_TIME_NS), + &labels2, + 2000.0, + now, + ) + .unwrap(); + + // Should return the average: (1000 + 2000) / 2 = 1500 + assert_eq!(metrics.udp_avg_connect_processing_time_ns_averaged(), 1500); + } + + #[test] + fn it_should_return_zero_for_udp_avg_announce_processing_time_ns_averaged_when_no_data() { + let metrics = Metrics::default(); + assert_eq!(metrics.udp_avg_announce_processing_time_ns_averaged(), 0); + } + + #[test] + fn it_should_return_averaged_value_for_udp_avg_announce_processing_time_ns_averaged() { + let mut metrics = Metrics::default(); + let now = CurrentClock::now(); + let labels1 = LabelSet::from([("request_kind", "announce"), ("server_id", "server1")]); + let labels2 = LabelSet::from([("request_kind", "announce"), ("server_id", "server2")]); + let labels3 = LabelSet::from([("request_kind", "announce"), ("server_id", "server3")]); + + // Set different gauge values for announce requests from different servers + metrics + .set_gauge( + &metric_name!(UDP_TRACKER_SERVER_PERFORMANCE_AVG_PROCESSING_TIME_NS), + &labels1, + 1500.0, + now, + ) + .unwrap(); + + metrics + .set_gauge( + &metric_name!(UDP_TRACKER_SERVER_PERFORMANCE_AVG_PROCESSING_TIME_NS), + &labels2, + 2500.0, + now, + ) + .unwrap(); + + metrics + .set_gauge( + &metric_name!(UDP_TRACKER_SERVER_PERFORMANCE_AVG_PROCESSING_TIME_NS), + &labels3, + 3000.0, + now, + ) + .unwrap(); + + // Should return the average: (1500 + 2500 + 3000) / 3 = 2333 (truncated) + assert_eq!(metrics.udp_avg_announce_processing_time_ns_averaged(), 2333); + } + + #[test] + fn it_should_return_zero_for_udp_avg_scrape_processing_time_ns_averaged_when_no_data() { + let metrics = Metrics::default(); + assert_eq!(metrics.udp_avg_scrape_processing_time_ns_averaged(), 0); + } + + #[test] + fn it_should_return_averaged_value_for_udp_avg_scrape_processing_time_ns_averaged() { + let mut metrics = Metrics::default(); + let now = CurrentClock::now(); + let labels1 = LabelSet::from([("request_kind", "scrape"), ("server_id", "server1")]); + let labels2 = LabelSet::from([("request_kind", "scrape"), ("server_id", "server2")]); + + // Set different gauge values for scrape requests from different servers + metrics + .set_gauge( + &metric_name!(UDP_TRACKER_SERVER_PERFORMANCE_AVG_PROCESSING_TIME_NS), + &labels1, + 500.0, + now, + ) + .unwrap(); + + metrics + .set_gauge( + &metric_name!(UDP_TRACKER_SERVER_PERFORMANCE_AVG_PROCESSING_TIME_NS), + &labels2, + 1500.0, + now, + ) + .unwrap(); + + // Should return the average: (500 + 1500) / 2 = 1000 + assert_eq!(metrics.udp_avg_scrape_processing_time_ns_averaged(), 1000); + } + + #[test] + fn it_should_handle_fractional_averages_with_truncation() { + let mut metrics = Metrics::default(); + let now = CurrentClock::now(); + let labels1 = LabelSet::from([("request_kind", "connect"), ("server_id", "server1")]); + let labels2 = LabelSet::from([("request_kind", "connect"), ("server_id", "server2")]); + let labels3 = LabelSet::from([("request_kind", "connect"), ("server_id", "server3")]); + + // Set values that will result in a fractional average + metrics + .set_gauge( + &metric_name!(UDP_TRACKER_SERVER_PERFORMANCE_AVG_PROCESSING_TIME_NS), + &labels1, + 1000.0, + now, + ) + .unwrap(); + + metrics + .set_gauge( + &metric_name!(UDP_TRACKER_SERVER_PERFORMANCE_AVG_PROCESSING_TIME_NS), + &labels2, + 1001.0, + now, + ) + .unwrap(); + + metrics + .set_gauge( + &metric_name!(UDP_TRACKER_SERVER_PERFORMANCE_AVG_PROCESSING_TIME_NS), + &labels3, + 1001.0, + now, + ) + .unwrap(); + + // Should return the average: (1000 + 1001 + 1001) / 3 = 1000.666... → 1000 (truncated) + assert_eq!(metrics.udp_avg_connect_processing_time_ns_averaged(), 1000); + } + + #[test] + fn it_should_only_average_matching_request_kinds() { + let mut metrics = Metrics::default(); + let now = CurrentClock::now(); + + // Set values for different request kinds with the same server_id + let connect_labels = LabelSet::from([("request_kind", "connect"), ("server_id", "server1")]); + let announce_labels = LabelSet::from([("request_kind", "announce"), ("server_id", "server1")]); + let scrape_labels = LabelSet::from([("request_kind", "scrape"), ("server_id", "server1")]); + + metrics + .set_gauge( + &metric_name!(UDP_TRACKER_SERVER_PERFORMANCE_AVG_PROCESSING_TIME_NS), + &connect_labels, + 1000.0, + now, + ) + .unwrap(); + + metrics + .set_gauge( + &metric_name!(UDP_TRACKER_SERVER_PERFORMANCE_AVG_PROCESSING_TIME_NS), + &announce_labels, + 2000.0, + now, + ) + .unwrap(); + + metrics + .set_gauge( + &metric_name!(UDP_TRACKER_SERVER_PERFORMANCE_AVG_PROCESSING_TIME_NS), + &scrape_labels, + 3000.0, + now, + ) + .unwrap(); + + // Each function should only return the value for its specific request kind + assert_eq!(metrics.udp_avg_connect_processing_time_ns_averaged(), 1000); + assert_eq!(metrics.udp_avg_announce_processing_time_ns_averaged(), 2000); + assert_eq!(metrics.udp_avg_scrape_processing_time_ns_averaged(), 3000); + } + + #[test] + fn it_should_handle_single_server_averaged_metrics() { + let mut metrics = Metrics::default(); + let now = CurrentClock::now(); + let labels = LabelSet::from([("request_kind", "connect"), ("server_id", "single_server")]); + + metrics + .set_gauge( + &metric_name!(UDP_TRACKER_SERVER_PERFORMANCE_AVG_PROCESSING_TIME_NS), + &labels, + 1234.0, + now, + ) + .unwrap(); + + // With only one server, the average should be the same as the single value + assert_eq!(metrics.udp_avg_connect_processing_time_ns_averaged(), 1234); + } + } } From 4c082faefe1ae5932cca4a7f44b0619a14a50a11 Mon Sep 17 00:00:00 2001 From: Jose Celano Date: Fri, 20 Jun 2025 10:43:01 +0100 Subject: [PATCH 752/802] refactor: [#1589] make methods private --- packages/udp-tracker-server/src/statistics/metrics.rs | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/packages/udp-tracker-server/src/statistics/metrics.rs b/packages/udp-tracker-server/src/statistics/metrics.rs index ac9540f8e..178855377 100644 --- a/packages/udp-tracker-server/src/statistics/metrics.rs +++ b/packages/udp-tracker-server/src/statistics/metrics.rs @@ -85,7 +85,7 @@ impl Metrics { #[must_use] #[allow(clippy::cast_sign_loss)] #[allow(clippy::cast_possible_truncation)] - pub fn udp_avg_processing_time_ns(&self, label_set: &LabelSet) -> u64 { + fn udp_avg_processing_time_ns(&self, label_set: &LabelSet) -> u64 { self.metric_collection .sum( &metric_name!(UDP_TRACKER_SERVER_PERFORMANCE_AVG_PROCESSING_TIME_NS), @@ -106,7 +106,7 @@ impl Metrics { #[must_use] #[allow(clippy::cast_sign_loss)] #[allow(clippy::cast_possible_truncation)] - pub fn udp_processed_requests_total(&self, label_set: &LabelSet) -> u64 { + fn udp_processed_requests_total(&self, label_set: &LabelSet) -> u64 { self.metric_collection .sum( &metric_name!(UDP_TRACKER_SERVER_PERFORMANCE_AVG_PROCESSED_REQUESTS_TOTAL), From a9acca5e73d6897c671117eae63c7e28f3e1629b Mon Sep 17 00:00:00 2001 From: Jose Celano Date: Fri, 20 Jun 2025 11:24:54 +0100 Subject: [PATCH 753/802] refactor: [#1589] rename methods and remove unused code --- .../src/statistics/services.rs | 28 +- .../src/statistics/event/handler/error.rs | 2 +- .../event/handler/request_aborted.rs | 4 +- .../event/handler/request_accepted.rs | 12 +- .../event/handler/request_banned.rs | 4 +- .../event/handler/request_received.rs | 2 +- .../statistics/event/handler/response_sent.rs | 4 +- .../src/statistics/metrics.rs | 246 ++++-------------- .../src/statistics/repository.rs | 44 +++- .../tests/server/contract.rs | 4 +- 10 files changed, 119 insertions(+), 231 deletions(-) diff --git a/packages/rest-tracker-api-core/src/statistics/services.rs b/packages/rest-tracker-api-core/src/statistics/services.rs index a1edae46a..f87cb8c76 100644 --- a/packages/rest-tracker-api-core/src/statistics/services.rs +++ b/packages/rest-tracker-api-core/src/statistics/services.rs @@ -71,8 +71,8 @@ async fn get_protocol_metrics( // UDP - let udp_requests_aborted = udp_server_stats.udp_requests_aborted(); - let udp_requests_banned = udp_server_stats.udp_requests_banned(); + let udp_requests_aborted = udp_server_stats.udp_requests_aborted_total(); + let udp_requests_banned = udp_server_stats.udp_requests_banned_total(); let udp_banned_ips_total = udp_server_stats.udp_banned_ips_total(); let udp_avg_connect_processing_time_ns = udp_server_stats.udp_avg_connect_processing_time_ns_averaged(); let udp_avg_announce_processing_time_ns = udp_server_stats.udp_avg_announce_processing_time_ns_averaged(); @@ -80,21 +80,21 @@ async fn get_protocol_metrics( // UDPv4 - let udp4_requests = udp_server_stats.udp4_requests(); - let udp4_connections_handled = udp_server_stats.udp4_connections_handled(); - let udp4_announces_handled = udp_server_stats.udp4_announces_handled(); - let udp4_scrapes_handled = udp_server_stats.udp4_scrapes_handled(); - let udp4_responses = udp_server_stats.udp4_responses(); - let udp4_errors_handled = udp_server_stats.udp4_errors_handled(); + let udp4_requests = udp_server_stats.udp4_requests_received_total(); + let udp4_connections_handled = udp_server_stats.udp4_connect_requests_accepted_total(); + let udp4_announces_handled = udp_server_stats.udp4_announce_requests_accepted_total(); + let udp4_scrapes_handled = udp_server_stats.udp4_scrape_requests_accepted_total(); + let udp4_responses = udp_server_stats.udp4_responses_sent_total(); + let udp4_errors_handled = udp_server_stats.udp4_errors_total(); // UDPv6 - let udp6_requests = udp_server_stats.udp6_requests(); - let udp6_connections_handled = udp_server_stats.udp6_connections_handled(); - let udp6_announces_handled = udp_server_stats.udp6_announces_handled(); - let udp6_scrapes_handled = udp_server_stats.udp6_scrapes_handled(); - let udp6_responses = udp_server_stats.udp6_responses(); - let udp6_errors_handled = udp_server_stats.udp6_errors_handled(); + let udp6_requests = udp_server_stats.udp6_requests_received_total(); + let udp6_connections_handled = udp_server_stats.udp6_connect_requests_accepted_total(); + let udp6_announces_handled = udp_server_stats.udp6_announce_requests_accepted_total(); + let udp6_scrapes_handled = udp_server_stats.udp6_scrape_requests_accepted_total(); + let udp6_responses = udp_server_stats.udp6_responses_sent_total(); + let udp6_errors_handled = udp_server_stats.udp6_errors_total(); // For backward compatibility we keep the `tcp4_connections_handled` and // `tcp6_connections_handled` metrics. They don't make sense for the HTTP diff --git a/packages/udp-tracker-server/src/statistics/event/handler/error.rs b/packages/udp-tracker-server/src/statistics/event/handler/error.rs index d83a0584d..63e480ca5 100644 --- a/packages/udp-tracker-server/src/statistics/event/handler/error.rs +++ b/packages/udp-tracker-server/src/statistics/event/handler/error.rs @@ -137,6 +137,6 @@ mod tests { let stats = stats_repository.get_stats().await; - assert_eq!(stats.udp4_errors_handled(), 1); + assert_eq!(stats.udp4_errors_total(), 1); } } diff --git a/packages/udp-tracker-server/src/statistics/event/handler/request_aborted.rs b/packages/udp-tracker-server/src/statistics/event/handler/request_aborted.rs index 19e410d5e..f340fe51a 100644 --- a/packages/udp-tracker-server/src/statistics/event/handler/request_aborted.rs +++ b/packages/udp-tracker-server/src/statistics/event/handler/request_aborted.rs @@ -54,7 +54,7 @@ mod tests { let stats = stats_repository.get_stats().await; - assert_eq!(stats.udp_requests_aborted(), 1); + assert_eq!(stats.udp_requests_aborted_total(), 1); } #[tokio::test] @@ -77,6 +77,6 @@ mod tests { ) .await; let stats = stats_repository.get_stats().await; - assert_eq!(stats.udp_requests_aborted(), 1); + assert_eq!(stats.udp_requests_aborted_total(), 1); } } diff --git a/packages/udp-tracker-server/src/statistics/event/handler/request_accepted.rs b/packages/udp-tracker-server/src/statistics/event/handler/request_accepted.rs index af92636df..33971926e 100644 --- a/packages/udp-tracker-server/src/statistics/event/handler/request_accepted.rs +++ b/packages/udp-tracker-server/src/statistics/event/handler/request_accepted.rs @@ -61,7 +61,7 @@ mod tests { let stats = stats_repository.get_stats().await; - assert_eq!(stats.udp4_connections_handled(), 1); + assert_eq!(stats.udp4_connect_requests_accepted_total(), 1); } #[tokio::test] @@ -89,7 +89,7 @@ mod tests { let stats = stats_repository.get_stats().await; - assert_eq!(stats.udp4_announces_handled(), 1); + assert_eq!(stats.udp4_announce_requests_accepted_total(), 1); } #[tokio::test] @@ -115,7 +115,7 @@ mod tests { let stats = stats_repository.get_stats().await; - assert_eq!(stats.udp4_scrapes_handled(), 1); + assert_eq!(stats.udp4_scrape_requests_accepted_total(), 1); } #[tokio::test] @@ -141,7 +141,7 @@ mod tests { let stats = stats_repository.get_stats().await; - assert_eq!(stats.udp6_connections_handled(), 1); + assert_eq!(stats.udp6_connect_requests_accepted_total(), 1); } #[tokio::test] @@ -169,7 +169,7 @@ mod tests { let stats = stats_repository.get_stats().await; - assert_eq!(stats.udp6_announces_handled(), 1); + assert_eq!(stats.udp6_announce_requests_accepted_total(), 1); } #[tokio::test] @@ -195,6 +195,6 @@ mod tests { let stats = stats_repository.get_stats().await; - assert_eq!(stats.udp6_scrapes_handled(), 1); + assert_eq!(stats.udp6_scrape_requests_accepted_total(), 1); } } diff --git a/packages/udp-tracker-server/src/statistics/event/handler/request_banned.rs b/packages/udp-tracker-server/src/statistics/event/handler/request_banned.rs index 8badfa137..10f6cad88 100644 --- a/packages/udp-tracker-server/src/statistics/event/handler/request_banned.rs +++ b/packages/udp-tracker-server/src/statistics/event/handler/request_banned.rs @@ -54,7 +54,7 @@ mod tests { let stats = stats_repository.get_stats().await; - assert_eq!(stats.udp_requests_banned(), 1); + assert_eq!(stats.udp_requests_banned_total(), 1); } #[tokio::test] @@ -77,6 +77,6 @@ mod tests { ) .await; let stats = stats_repository.get_stats().await; - assert_eq!(stats.udp_requests_banned(), 1); + assert_eq!(stats.udp_requests_banned_total(), 1); } } diff --git a/packages/udp-tracker-server/src/statistics/event/handler/request_received.rs b/packages/udp-tracker-server/src/statistics/event/handler/request_received.rs index eced5a215..148b9d8da 100644 --- a/packages/udp-tracker-server/src/statistics/event/handler/request_received.rs +++ b/packages/udp-tracker-server/src/statistics/event/handler/request_received.rs @@ -54,6 +54,6 @@ mod tests { let stats = stats_repository.get_stats().await; - assert_eq!(stats.udp4_requests(), 1); + assert_eq!(stats.udp4_requests_received_total(), 1); } } diff --git a/packages/udp-tracker-server/src/statistics/event/handler/response_sent.rs b/packages/udp-tracker-server/src/statistics/event/handler/response_sent.rs index 34093f511..b1a046b5b 100644 --- a/packages/udp-tracker-server/src/statistics/event/handler/response_sent.rs +++ b/packages/udp-tracker-server/src/statistics/event/handler/response_sent.rs @@ -105,7 +105,7 @@ mod tests { let stats = stats_repository.get_stats().await; - assert_eq!(stats.udp4_responses(), 1); + assert_eq!(stats.udp4_responses_sent_total(), 1); } #[tokio::test] @@ -136,6 +136,6 @@ mod tests { let stats = stats_repository.get_stats().await; - assert_eq!(stats.udp6_responses(), 1); + assert_eq!(stats.udp6_responses_sent_total(), 1); } } diff --git a/packages/udp-tracker-server/src/statistics/metrics.rs b/packages/udp-tracker-server/src/statistics/metrics.rs index 178855377..e167dc5ae 100644 --- a/packages/udp-tracker-server/src/statistics/metrics.rs +++ b/packages/udp-tracker-server/src/statistics/metrics.rs @@ -97,7 +97,7 @@ impl Metrics { #[must_use] #[allow(clippy::cast_sign_loss)] #[allow(clippy::cast_possible_truncation)] - pub fn udp_request_accepted(&self, label_set: &LabelSet) -> u64 { + pub fn udp_request_accepted_total(&self, label_set: &LabelSet) -> u64 { self.metric_collection .sum(&metric_name!(UDP_TRACKER_SERVER_REQUESTS_ACCEPTED_TOTAL), label_set) .unwrap_or_default() as u64 @@ -151,7 +151,7 @@ impl Metrics { #[must_use] #[allow(clippy::cast_sign_loss)] #[allow(clippy::cast_possible_truncation)] - pub fn udp_requests_aborted(&self) -> u64 { + pub fn udp_requests_aborted_total(&self) -> u64 { self.metric_collection .sum(&metric_name!(UDP_TRACKER_SERVER_REQUESTS_ABORTED_TOTAL), &LabelSet::empty()) .unwrap_or_default() as u64 @@ -161,7 +161,7 @@ impl Metrics { #[must_use] #[allow(clippy::cast_sign_loss)] #[allow(clippy::cast_possible_truncation)] - pub fn udp_requests_banned(&self) -> u64 { + pub fn udp_requests_banned_total(&self) -> u64 { self.metric_collection .sum(&metric_name!(UDP_TRACKER_SERVER_REQUESTS_BANNED_TOTAL), &LabelSet::empty()) .unwrap_or_default() as u64 @@ -177,45 +177,6 @@ impl Metrics { .unwrap_or_default() as u64 } - /// Average rounded time spent processing UDP connect requests. - #[must_use] - #[allow(clippy::cast_sign_loss)] - #[allow(clippy::cast_possible_truncation)] - pub fn udp_avg_connect_processing_time_ns(&self) -> u64 { - self.metric_collection - .sum( - &metric_name!(UDP_TRACKER_SERVER_PERFORMANCE_AVG_PROCESSING_TIME_NS), - &[("request_kind", "connect")].into(), - ) - .unwrap_or_default() as u64 - } - - /// Average rounded time spent processing UDP announce requests. - #[must_use] - #[allow(clippy::cast_sign_loss)] - #[allow(clippy::cast_possible_truncation)] - pub fn udp_avg_announce_processing_time_ns(&self) -> u64 { - self.metric_collection - .sum( - &metric_name!(UDP_TRACKER_SERVER_PERFORMANCE_AVG_PROCESSING_TIME_NS), - &[("request_kind", "announce")].into(), - ) - .unwrap_or_default() as u64 - } - - /// Average rounded time spent processing UDP scrape requests. - #[must_use] - #[allow(clippy::cast_sign_loss)] - #[allow(clippy::cast_possible_truncation)] - pub fn udp_avg_scrape_processing_time_ns(&self) -> u64 { - self.metric_collection - .sum( - &metric_name!(UDP_TRACKER_SERVER_PERFORMANCE_AVG_PROCESSING_TIME_NS), - &[("request_kind", "scrape")].into(), - ) - .unwrap_or_default() as u64 - } - /// Average processing time for UDP connect requests across all servers (in nanoseconds). /// This calculates the average of all gauge samples for connect requests. #[must_use] @@ -263,7 +224,7 @@ impl Metrics { #[must_use] #[allow(clippy::cast_sign_loss)] #[allow(clippy::cast_possible_truncation)] - pub fn udp4_requests(&self) -> u64 { + pub fn udp4_requests_received_total(&self) -> u64 { self.metric_collection .sum( &metric_name!(UDP_TRACKER_SERVER_REQUESTS_RECEIVED_TOTAL), @@ -276,7 +237,7 @@ impl Metrics { #[must_use] #[allow(clippy::cast_sign_loss)] #[allow(clippy::cast_possible_truncation)] - pub fn udp4_connections_handled(&self) -> u64 { + pub fn udp4_connect_requests_accepted_total(&self) -> u64 { self.metric_collection .sum( &metric_name!(UDP_TRACKER_SERVER_REQUESTS_ACCEPTED_TOTAL), @@ -289,7 +250,7 @@ impl Metrics { #[must_use] #[allow(clippy::cast_sign_loss)] #[allow(clippy::cast_possible_truncation)] - pub fn udp4_announces_handled(&self) -> u64 { + pub fn udp4_announce_requests_accepted_total(&self) -> u64 { self.metric_collection .sum( &metric_name!(UDP_TRACKER_SERVER_REQUESTS_ACCEPTED_TOTAL), @@ -302,7 +263,7 @@ impl Metrics { #[must_use] #[allow(clippy::cast_sign_loss)] #[allow(clippy::cast_possible_truncation)] - pub fn udp4_scrapes_handled(&self) -> u64 { + pub fn udp4_scrape_requests_accepted_total(&self) -> u64 { self.metric_collection .sum( &metric_name!(UDP_TRACKER_SERVER_REQUESTS_ACCEPTED_TOTAL), @@ -315,7 +276,7 @@ impl Metrics { #[must_use] #[allow(clippy::cast_sign_loss)] #[allow(clippy::cast_possible_truncation)] - pub fn udp4_responses(&self) -> u64 { + pub fn udp4_responses_sent_total(&self) -> u64 { self.metric_collection .sum( &metric_name!(UDP_TRACKER_SERVER_RESPONSES_SENT_TOTAL), @@ -328,7 +289,7 @@ impl Metrics { #[must_use] #[allow(clippy::cast_sign_loss)] #[allow(clippy::cast_possible_truncation)] - pub fn udp4_errors_handled(&self) -> u64 { + pub fn udp4_errors_total(&self) -> u64 { self.metric_collection .sum( &metric_name!(UDP_TRACKER_SERVER_ERRORS_TOTAL), @@ -342,7 +303,7 @@ impl Metrics { #[must_use] #[allow(clippy::cast_sign_loss)] #[allow(clippy::cast_possible_truncation)] - pub fn udp6_requests(&self) -> u64 { + pub fn udp6_requests_received_total(&self) -> u64 { self.metric_collection .sum( &metric_name!(UDP_TRACKER_SERVER_REQUESTS_RECEIVED_TOTAL), @@ -355,7 +316,7 @@ impl Metrics { #[must_use] #[allow(clippy::cast_sign_loss)] #[allow(clippy::cast_possible_truncation)] - pub fn udp6_connections_handled(&self) -> u64 { + pub fn udp6_connect_requests_accepted_total(&self) -> u64 { self.metric_collection .sum( &metric_name!(UDP_TRACKER_SERVER_REQUESTS_ACCEPTED_TOTAL), @@ -368,7 +329,7 @@ impl Metrics { #[must_use] #[allow(clippy::cast_sign_loss)] #[allow(clippy::cast_possible_truncation)] - pub fn udp6_announces_handled(&self) -> u64 { + pub fn udp6_announce_requests_accepted_total(&self) -> u64 { self.metric_collection .sum( &metric_name!(UDP_TRACKER_SERVER_REQUESTS_ACCEPTED_TOTAL), @@ -381,7 +342,7 @@ impl Metrics { #[must_use] #[allow(clippy::cast_sign_loss)] #[allow(clippy::cast_possible_truncation)] - pub fn udp6_scrapes_handled(&self) -> u64 { + pub fn udp6_scrape_requests_accepted_total(&self) -> u64 { self.metric_collection .sum( &metric_name!(UDP_TRACKER_SERVER_REQUESTS_ACCEPTED_TOTAL), @@ -394,7 +355,7 @@ impl Metrics { #[must_use] #[allow(clippy::cast_sign_loss)] #[allow(clippy::cast_possible_truncation)] - pub fn udp6_responses(&self) -> u64 { + pub fn udp6_responses_sent_total(&self) -> u64 { self.metric_collection .sum( &metric_name!(UDP_TRACKER_SERVER_RESPONSES_SENT_TOTAL), @@ -407,7 +368,7 @@ impl Metrics { #[must_use] #[allow(clippy::cast_sign_loss)] #[allow(clippy::cast_possible_truncation)] - pub fn udp6_errors_handled(&self) -> u64 { + pub fn udp6_errors_total(&self) -> u64 { self.metric_collection .sum( &metric_name!(UDP_TRACKER_SERVER_ERRORS_TOTAL), @@ -534,7 +495,7 @@ mod tests { #[test] fn it_should_return_zero_for_udp_requests_aborted_when_no_data() { let metrics = Metrics::default(); - assert_eq!(metrics.udp_requests_aborted(), 0); + assert_eq!(metrics.udp_requests_aborted_total(), 0); } #[test] @@ -550,13 +511,13 @@ mod tests { .increase_counter(&metric_name!(UDP_TRACKER_SERVER_REQUESTS_ABORTED_TOTAL), &labels, now) .unwrap(); - assert_eq!(metrics.udp_requests_aborted(), 2); + assert_eq!(metrics.udp_requests_aborted_total(), 2); } #[test] fn it_should_return_zero_for_udp_requests_banned_when_no_data() { let metrics = Metrics::default(); - assert_eq!(metrics.udp_requests_banned(), 0); + assert_eq!(metrics.udp_requests_banned_total(), 0); } #[test] @@ -571,7 +532,7 @@ mod tests { .unwrap(); } - assert_eq!(metrics.udp_requests_banned(), 3); + assert_eq!(metrics.udp_requests_banned_total(), 3); } #[test] @@ -594,89 +555,13 @@ mod tests { } } - mod udp_performance_metrics { - use super::*; - - #[test] - fn it_should_return_zero_for_udp_avg_connect_processing_time_ns_when_no_data() { - let metrics = Metrics::default(); - assert_eq!(metrics.udp_avg_connect_processing_time_ns(), 0); - } - - #[test] - fn it_should_return_gauge_value_for_udp_avg_connect_processing_time_ns() { - let mut metrics = Metrics::default(); - let now = CurrentClock::now(); - let labels = LabelSet::from([("request_kind", "connect")]); - - metrics - .set_gauge( - &metric_name!(UDP_TRACKER_SERVER_PERFORMANCE_AVG_PROCESSING_TIME_NS), - &labels, - 1500.0, - now, - ) - .unwrap(); - - assert_eq!(metrics.udp_avg_connect_processing_time_ns(), 1500); - } - - #[test] - fn it_should_return_zero_for_udp_avg_announce_processing_time_ns_when_no_data() { - let metrics = Metrics::default(); - assert_eq!(metrics.udp_avg_announce_processing_time_ns(), 0); - } - - #[test] - fn it_should_return_gauge_value_for_udp_avg_announce_processing_time_ns() { - let mut metrics = Metrics::default(); - let now = CurrentClock::now(); - let labels = LabelSet::from([("request_kind", "announce")]); - - metrics - .set_gauge( - &metric_name!(UDP_TRACKER_SERVER_PERFORMANCE_AVG_PROCESSING_TIME_NS), - &labels, - 2500.0, - now, - ) - .unwrap(); - - assert_eq!(metrics.udp_avg_announce_processing_time_ns(), 2500); - } - - #[test] - fn it_should_return_zero_for_udp_avg_scrape_processing_time_ns_when_no_data() { - let metrics = Metrics::default(); - assert_eq!(metrics.udp_avg_scrape_processing_time_ns(), 0); - } - - #[test] - fn it_should_return_gauge_value_for_udp_avg_scrape_processing_time_ns() { - let mut metrics = Metrics::default(); - let now = CurrentClock::now(); - let labels = LabelSet::from([("request_kind", "scrape")]); - - metrics - .set_gauge( - &metric_name!(UDP_TRACKER_SERVER_PERFORMANCE_AVG_PROCESSING_TIME_NS), - &labels, - 3500.0, - now, - ) - .unwrap(); - - assert_eq!(metrics.udp_avg_scrape_processing_time_ns(), 3500); - } - } - mod udpv4_metrics { use super::*; #[test] fn it_should_return_zero_for_udp4_requests_when_no_data() { let metrics = Metrics::default(); - assert_eq!(metrics.udp4_requests(), 0); + assert_eq!(metrics.udp4_requests_received_total(), 0); } #[test] @@ -691,13 +576,13 @@ mod tests { .unwrap(); } - assert_eq!(metrics.udp4_requests(), 5); + assert_eq!(metrics.udp4_requests_received_total(), 5); } #[test] fn it_should_return_zero_for_udp4_connections_handled_when_no_data() { let metrics = Metrics::default(); - assert_eq!(metrics.udp4_connections_handled(), 0); + assert_eq!(metrics.udp4_connect_requests_accepted_total(), 0); } #[test] @@ -712,13 +597,13 @@ mod tests { .unwrap(); } - assert_eq!(metrics.udp4_connections_handled(), 3); + assert_eq!(metrics.udp4_connect_requests_accepted_total(), 3); } #[test] fn it_should_return_zero_for_udp4_announces_handled_when_no_data() { let metrics = Metrics::default(); - assert_eq!(metrics.udp4_announces_handled(), 0); + assert_eq!(metrics.udp4_announce_requests_accepted_total(), 0); } #[test] @@ -733,13 +618,13 @@ mod tests { .unwrap(); } - assert_eq!(metrics.udp4_announces_handled(), 7); + assert_eq!(metrics.udp4_announce_requests_accepted_total(), 7); } #[test] fn it_should_return_zero_for_udp4_scrapes_handled_when_no_data() { let metrics = Metrics::default(); - assert_eq!(metrics.udp4_scrapes_handled(), 0); + assert_eq!(metrics.udp4_scrape_requests_accepted_total(), 0); } #[test] @@ -754,13 +639,13 @@ mod tests { .unwrap(); } - assert_eq!(metrics.udp4_scrapes_handled(), 4); + assert_eq!(metrics.udp4_scrape_requests_accepted_total(), 4); } #[test] fn it_should_return_zero_for_udp4_responses_when_no_data() { let metrics = Metrics::default(); - assert_eq!(metrics.udp4_responses(), 0); + assert_eq!(metrics.udp4_responses_sent_total(), 0); } #[test] @@ -775,13 +660,13 @@ mod tests { .unwrap(); } - assert_eq!(metrics.udp4_responses(), 6); + assert_eq!(metrics.udp4_responses_sent_total(), 6); } #[test] fn it_should_return_zero_for_udp4_errors_handled_when_no_data() { let metrics = Metrics::default(); - assert_eq!(metrics.udp4_errors_handled(), 0); + assert_eq!(metrics.udp4_errors_total(), 0); } #[test] @@ -796,7 +681,7 @@ mod tests { .unwrap(); } - assert_eq!(metrics.udp4_errors_handled(), 2); + assert_eq!(metrics.udp4_errors_total(), 2); } } @@ -806,7 +691,7 @@ mod tests { #[test] fn it_should_return_zero_for_udp6_requests_when_no_data() { let metrics = Metrics::default(); - assert_eq!(metrics.udp6_requests(), 0); + assert_eq!(metrics.udp6_requests_received_total(), 0); } #[test] @@ -821,13 +706,13 @@ mod tests { .unwrap(); } - assert_eq!(metrics.udp6_requests(), 8); + assert_eq!(metrics.udp6_requests_received_total(), 8); } #[test] fn it_should_return_zero_for_udp6_connections_handled_when_no_data() { let metrics = Metrics::default(); - assert_eq!(metrics.udp6_connections_handled(), 0); + assert_eq!(metrics.udp6_connect_requests_accepted_total(), 0); } #[test] @@ -842,13 +727,13 @@ mod tests { .unwrap(); } - assert_eq!(metrics.udp6_connections_handled(), 4); + assert_eq!(metrics.udp6_connect_requests_accepted_total(), 4); } #[test] fn it_should_return_zero_for_udp6_announces_handled_when_no_data() { let metrics = Metrics::default(); - assert_eq!(metrics.udp6_announces_handled(), 0); + assert_eq!(metrics.udp6_announce_requests_accepted_total(), 0); } #[test] @@ -863,13 +748,13 @@ mod tests { .unwrap(); } - assert_eq!(metrics.udp6_announces_handled(), 9); + assert_eq!(metrics.udp6_announce_requests_accepted_total(), 9); } #[test] fn it_should_return_zero_for_udp6_scrapes_handled_when_no_data() { let metrics = Metrics::default(); - assert_eq!(metrics.udp6_scrapes_handled(), 0); + assert_eq!(metrics.udp6_scrape_requests_accepted_total(), 0); } #[test] @@ -884,13 +769,13 @@ mod tests { .unwrap(); } - assert_eq!(metrics.udp6_scrapes_handled(), 6); + assert_eq!(metrics.udp6_scrape_requests_accepted_total(), 6); } #[test] fn it_should_return_zero_for_udp6_responses_when_no_data() { let metrics = Metrics::default(); - assert_eq!(metrics.udp6_responses(), 0); + assert_eq!(metrics.udp6_responses_sent_total(), 0); } #[test] @@ -905,13 +790,13 @@ mod tests { .unwrap(); } - assert_eq!(metrics.udp6_responses(), 11); + assert_eq!(metrics.udp6_responses_sent_total(), 11); } #[test] fn it_should_return_zero_for_udp6_errors_handled_when_no_data() { let metrics = Metrics::default(); - assert_eq!(metrics.udp6_errors_handled(), 0); + assert_eq!(metrics.udp6_errors_total(), 0); } #[test] @@ -926,7 +811,7 @@ mod tests { .unwrap(); } - assert_eq!(metrics.udp6_errors_handled(), 3); + assert_eq!(metrics.udp6_errors_total(), 3); } } @@ -954,8 +839,8 @@ mod tests { .unwrap(); } - assert_eq!(metrics.udp4_requests(), 3); - assert_eq!(metrics.udp6_requests(), 7); + assert_eq!(metrics.udp4_requests_received_total(), 3); + assert_eq!(metrics.udp6_requests_received_total(), 7); } #[test] @@ -994,9 +879,9 @@ mod tests { .unwrap(); } - assert_eq!(metrics.udp4_connections_handled(), 2); - assert_eq!(metrics.udp4_announces_handled(), 5); - assert_eq!(metrics.udp4_scrapes_handled(), 1); + assert_eq!(metrics.udp4_connect_requests_accepted_total(), 2); + assert_eq!(metrics.udp4_announce_requests_accepted_total(), 5); + assert_eq!(metrics.udp4_scrape_requests_accepted_total(), 1); } #[test] @@ -1053,10 +938,10 @@ mod tests { .unwrap(); } - assert_eq!(metrics.udp4_connections_handled(), 3); - assert_eq!(metrics.udp6_connections_handled(), 2); - assert_eq!(metrics.udp4_announces_handled(), 4); - assert_eq!(metrics.udp6_announces_handled(), 6); + assert_eq!(metrics.udp4_connect_requests_accepted_total(), 3); + assert_eq!(metrics.udp6_connect_requests_accepted_total(), 2); + assert_eq!(metrics.udp4_announce_requests_accepted_total(), 4); + assert_eq!(metrics.udp6_announce_requests_accepted_total(), 6); } } @@ -1076,7 +961,7 @@ mod tests { .unwrap(); } - assert_eq!(metrics.udp_requests_aborted(), 1000); + assert_eq!(metrics.udp_requests_aborted_total(), 1000); } #[test] @@ -1106,25 +991,6 @@ mod tests { assert_eq!(metrics.udp_banned_ips_total(), 0); } - #[test] - fn it_should_handle_fractional_gauge_values_with_truncation() { - let mut metrics = Metrics::default(); - let now = CurrentClock::now(); - let labels = LabelSet::from([("request_kind", "connect")]); - - metrics - .set_gauge( - &metric_name!(UDP_TRACKER_SERVER_PERFORMANCE_AVG_PROCESSING_TIME_NS), - &labels, - 1234.567, - now, - ) - .unwrap(); - - // Should truncate to 1234 - assert_eq!(metrics.udp_avg_connect_processing_time_ns(), 1234); - } - #[test] fn it_should_overwrite_gauge_values_when_set_multiple_times() { let mut metrics = Metrics::default(); @@ -1155,7 +1021,7 @@ mod tests { let result = metrics.increase_counter(&metric_name!(UDP_TRACKER_SERVER_REQUESTS_ABORTED_TOTAL), &empty_labels, now); assert!(result.is_ok()); - assert_eq!(metrics.udp_requests_aborted(), 1); + assert_eq!(metrics.udp_requests_aborted_total(), 1); } #[test] @@ -1180,8 +1046,8 @@ mod tests { } // Should return labeled sums correctly - assert_eq!(metrics.udp4_requests(), 3); - assert_eq!(metrics.udp6_requests(), 5); + assert_eq!(metrics.udp4_requests_received_total(), 3); + assert_eq!(metrics.udp6_requests_received_total(), 5); } } diff --git a/packages/udp-tracker-server/src/statistics/repository.rs b/packages/udp-tracker-server/src/statistics/repository.rs index 85e3bbe64..7a1c5fa4a 100644 --- a/packages/udp-tracker-server/src/statistics/repository.rs +++ b/packages/udp-tracker-server/src/statistics/repository.rs @@ -95,6 +95,7 @@ mod tests { use std::time::Duration; use torrust_tracker_clock::clock::Time; + use torrust_tracker_metrics::metric_collection::aggregate::sum::Sum; use torrust_tracker_metrics::metric_name; use super::*; @@ -155,8 +156,8 @@ mod tests { let stats = repo.get_stats().await; // Should be able to read metrics through the guard - assert_eq!(stats.udp_requests_aborted(), 0); - assert_eq!(stats.udp_requests_banned(), 0); + assert_eq!(stats.udp_requests_aborted_total(), 0); + assert_eq!(stats.udp_requests_banned_total(), 0); } #[tokio::test] @@ -174,7 +175,7 @@ mod tests { // Verify the counter was incremented let stats = repo.get_stats().await; - assert_eq!(stats.udp_requests_aborted(), 1); + assert_eq!(stats.udp_requests_aborted_total(), 1); } #[tokio::test] @@ -192,7 +193,7 @@ mod tests { // Verify the counter was incremented correctly let stats = repo.get_stats().await; - assert_eq!(stats.udp_requests_aborted(), 5); + assert_eq!(stats.udp_requests_aborted_total(), 5); } #[tokio::test] @@ -214,8 +215,8 @@ mod tests { // Verify both labeled metrics let stats = repo.get_stats().await; - assert_eq!(stats.udp4_requests(), 1); - assert_eq!(stats.udp6_requests(), 1); + assert_eq!(stats.udp4_requests_received_total(), 1); + assert_eq!(stats.udp6_requests_received_total(), 1); } #[tokio::test] @@ -286,8 +287,29 @@ mod tests { // Verify both labeled metrics let stats = repo.get_stats().await; - assert_eq!(stats.udp_avg_connect_processing_time_ns(), 1000); - assert_eq!(stats.udp_avg_announce_processing_time_ns(), 2000); + + #[allow(clippy::cast_sign_loss)] + #[allow(clippy::cast_possible_truncation)] + let udp_avg_connect_processing_time_ns = stats + .metric_collection + .sum( + &metric_name!(UDP_TRACKER_SERVER_PERFORMANCE_AVG_PROCESSING_TIME_NS), + &[("request_kind", "connect")].into(), + ) + .unwrap_or_default() as u64; + + #[allow(clippy::cast_sign_loss)] + #[allow(clippy::cast_possible_truncation)] + let udp_avg_announce_processing_time_ns = stats + .metric_collection + .sum( + &metric_name!(UDP_TRACKER_SERVER_PERFORMANCE_AVG_PROCESSING_TIME_NS), + &[("request_kind", "announce")].into(), + ) + .unwrap_or_default() as u64; + + assert_eq!(udp_avg_connect_processing_time_ns, 1000); + assert_eq!(udp_avg_announce_processing_time_ns, 2000); } #[tokio::test] @@ -452,7 +474,7 @@ mod tests { // Verify all increments were properly recorded let stats = repo.get_stats().await; - assert_eq!(stats.udp_requests_aborted(), 50); // 10 tasks * 5 increments each + assert_eq!(stats.udp_requests_aborted_total(), 50); // 10 tasks * 5 increments each } #[tokio::test] @@ -511,9 +533,9 @@ mod tests { // Check final state let stats = repo.get_stats().await; - assert_eq!(stats.udp_requests_aborted(), 1); + assert_eq!(stats.udp_requests_aborted_total(), 1); assert_eq!(stats.udp_banned_ips_total(), 10); - assert_eq!(stats.udp_requests_banned(), 1); + assert_eq!(stats.udp_requests_banned_total(), 1); } #[tokio::test] diff --git a/packages/udp-tracker-server/tests/server/contract.rs b/packages/udp-tracker-server/tests/server/contract.rs index 2745f3407..da08bc177 100644 --- a/packages/udp-tracker-server/tests/server/contract.rs +++ b/packages/udp-tracker-server/tests/server/contract.rs @@ -273,7 +273,7 @@ mod receiving_an_announce_request { .stats_repository .get_stats() .await - .udp_requests_banned(); + .udp_requests_banned_total(); // This should return a timeout error match client.send(announce_request.into()).await { @@ -289,7 +289,7 @@ mod receiving_an_announce_request { .stats_repository .get_stats() .await - .udp_requests_banned(); + .udp_requests_banned_total(); let udp_banned_ips_total_after = ban_service.read().await.get_banned_ips_total(); // UDP counter for banned requests should be increased by 1 From dc8d4a9b9874b03a7724b17d0494e84430d95d45 Mon Sep 17 00:00:00 2001 From: Jose Celano Date: Fri, 20 Jun 2025 16:23:07 +0100 Subject: [PATCH 754/802] test: [#1589] add race condition test for UDP performance metrics MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit Adds a comprehensive unit test to validate thread safety when updating UDP_TRACKER_SERVER_PERFORMANCE_AVG_PROCESSING_TIME_NS metrics under concurrent load. The test: - Spawns 200 concurrent tasks (100 per server) simulating two UDP servers - Server 1: cycles through [1000, 2000, 3000, 4000, 5000] ns processing times - Server 2: cycles through [2000, 3000, 4000, 5000, 6000] ns processing times - Validates request counts, average calculations, and metric relationships - Uses tolerance-based assertions (±50ns) to account for moving average calculation variations in concurrent environments - Ensures thread safety and mathematical correctness of the metrics system This test helps ensure the UDP tracker server's metrics collection remains accurate and thread-safe under high-concurrency scenarios. --- .../src/statistics/repository.rs | 202 ++++++++++++++++++ 1 file changed, 202 insertions(+) diff --git a/packages/udp-tracker-server/src/statistics/repository.rs b/packages/udp-tracker-server/src/statistics/repository.rs index 7a1c5fa4a..b80b8ba09 100644 --- a/packages/udp-tracker-server/src/statistics/repository.rs +++ b/packages/udp-tracker-server/src/statistics/repository.rs @@ -564,4 +564,206 @@ mod tests { // Should handle NaN values assert!(result.is_ok()); } + + #[tokio::test] + #[allow(clippy::too_many_lines)] + async fn it_should_handle_race_conditions_when_updating_udp_performance_metrics_in_parallel() { + // Number of concurrent requests per server + const REQUESTS_PER_SERVER: usize = 100; + + let repo = Repository::new(); + let now = CurrentClock::now(); + + // Define labels for two different UDP servers + let server1_labels = LabelSet::from([ + ("request_kind", "connect"), + ("server_binding_address_ip_family", "inet"), + ("server_port", "6868"), + ]); + let server2_labels = LabelSet::from([ + ("request_kind", "connect"), + ("server_binding_address_ip_family", "inet"), + ("server_port", "6969"), + ]); + + let mut handles = vec![]; + + // Spawn tasks for server 1 + for i in 0..REQUESTS_PER_SERVER { + let repo_clone = repo.clone(); + let labels = server1_labels.clone(); + let handle = tokio::spawn(async move { + // Simulate varying processing times (1000ns to 5000ns) + let processing_time_ns = 1000 + (i % 5) * 1000; + let processing_time = Duration::from_nanos(processing_time_ns as u64); + + repo_clone + .recalculate_udp_avg_processing_time_ns(processing_time, &labels, now) + .await + }); + handles.push(handle); + } + + // Spawn tasks for server 2 + for i in 0..REQUESTS_PER_SERVER { + let repo_clone = repo.clone(); + let labels = server2_labels.clone(); + let handle = tokio::spawn(async move { + // Simulate different processing times (2000ns to 6000ns) + let processing_time_ns = 2000 + (i % 5) * 1000; + let processing_time = Duration::from_nanos(processing_time_ns as u64); + + repo_clone + .recalculate_udp_avg_processing_time_ns(processing_time, &labels, now) + .await + }); + handles.push(handle); + } + + // Collect all the results + let mut server1_results = Vec::new(); + let mut server2_results = Vec::new(); + + for (i, handle) in handles.into_iter().enumerate() { + let result = handle.await.unwrap(); + if i < REQUESTS_PER_SERVER { + server1_results.push(result); + } else { + server2_results.push(result); + } + } + + // Verify that all tasks completed successfully + assert_eq!(server1_results.len(), REQUESTS_PER_SERVER); + assert_eq!(server2_results.len(), REQUESTS_PER_SERVER); + + // Verify that all results are finite and positive + for result in &server1_results { + assert!(result.is_finite(), "Server 1 result should be finite: {result}"); + assert!(*result > 0.0, "Server 1 result should be positive: {result}"); + } + + for result in &server2_results { + assert!(result.is_finite(), "Server 2 result should be finite: {result}"); + assert!(*result > 0.0, "Server 2 result should be positive: {result}"); + } + + // Get final stats and verify metrics integrity + let stats = repo.get_stats().await; + + // Verify that the processed requests counters are correct for each server + #[allow(clippy::cast_sign_loss)] + #[allow(clippy::cast_possible_truncation)] + let server1_processed = stats + .metric_collection + .get_counter_value( + &metric_name!(UDP_TRACKER_SERVER_PERFORMANCE_AVG_PROCESSED_REQUESTS_TOTAL), + &server1_labels, + ) + .unwrap() + .value(); + + #[allow(clippy::cast_sign_loss)] + #[allow(clippy::cast_possible_truncation)] + let server2_processed = stats + .metric_collection + .get_counter_value( + &metric_name!(UDP_TRACKER_SERVER_PERFORMANCE_AVG_PROCESSED_REQUESTS_TOTAL), + &server2_labels, + ) + .unwrap() + .value(); + + assert_eq!( + server1_processed, REQUESTS_PER_SERVER as u64, + "Server 1 should have processed {REQUESTS_PER_SERVER} requests", + ); + assert_eq!( + server2_processed, REQUESTS_PER_SERVER as u64, + "Server 2 should have processed {REQUESTS_PER_SERVER} requests", + ); + + // Verify that the final average processing times are reasonable + #[allow(clippy::cast_sign_loss)] + #[allow(clippy::cast_possible_truncation)] + let server1_final_avg = stats + .metric_collection + .get_gauge_value( + &metric_name!(UDP_TRACKER_SERVER_PERFORMANCE_AVG_PROCESSING_TIME_NS), + &server1_labels, + ) + .unwrap() + .value(); + + #[allow(clippy::cast_sign_loss)] + #[allow(clippy::cast_possible_truncation)] + let server2_final_avg = stats + .metric_collection + .get_gauge_value( + &metric_name!(UDP_TRACKER_SERVER_PERFORMANCE_AVG_PROCESSING_TIME_NS), + &server2_labels, + ) + .unwrap() + .value(); + + // Server 1: 100 requests cycling through [1000, 2000, 3000, 4000, 5000] ns + // Expected average: (20×1000 + 20×2000 + 20×3000 + 20×4000 + 20×5000) / 100 = 3000 ns + // Note: Moving average with concurrent updates may have small deviations due to order dependency + assert!( + (server1_final_avg - 3000.0).abs() < 50.0, + "Server 1 final average should be close to 3000ns (±50ns), got {server1_final_avg}ns" + ); + + // Server 2: 100 requests cycling through [2000, 3000, 4000, 5000, 6000] ns + // Expected average: (20×2000 + 20×3000 + 20×4000 + 20×5000 + 20×6000) / 100 = 4000 ns + // Note: Moving average with concurrent updates may have small deviations due to order dependency + assert!( + (server2_final_avg - 4000.0).abs() < 50.0, + "Server 2 final average should be close to 4000ns (±50ns), got {server2_final_avg}ns" + ); + + // Verify that the two servers have different averages (they should since they have different processing time ranges) + assert!( + (server1_final_avg - server2_final_avg).abs() > 950.0, + "Server 1 and Server 2 should have different average processing times" + ); + + // Server 2 should generally have higher averages since its processing times are higher + assert!( + server2_final_avg > server1_final_avg, + "Server 2 average ({server2_final_avg}) should be higher than Server 1 average ({server1_final_avg})" + ); + + // Verify that the moving average calculation maintains consistency + // The last result for each server should match the final stored average + let server1_last_result = server1_results.last().copied().unwrap(); + let server2_last_result = server2_results.last().copied().unwrap(); + + // Note: Due to race conditions, the last result might not exactly match the final stored average + // but it should be in a reasonable range. We'll check that they're in the same ballpark. + let server1_diff = (server1_last_result - server1_final_avg).abs(); + let server2_diff = (server2_last_result - server2_final_avg).abs(); + + assert!( + server1_diff <= 0.0, + "Server 1 last result ({server1_last_result}) should be equal to final average ({server1_final_avg}), diff: {server1_diff}", + ); + + assert!( + server2_diff <= 0.0, + "Server 2 last result ({server2_last_result}) should be equal to final average ({server2_final_avg}), diff: {server2_diff}", + ); + + // Verify that the metric collection contains the expected metrics for both servers + assert!(stats + .metric_collection + .contains_gauge(&metric_name!(UDP_TRACKER_SERVER_PERFORMANCE_AVG_PROCESSING_TIME_NS))); + assert!(stats + .metric_collection + .contains_counter(&metric_name!(UDP_TRACKER_SERVER_PERFORMANCE_AVG_PROCESSED_REQUESTS_TOTAL))); + + println!( + "Race condition test completed successfully:\n Server 1: {server1_processed} requests, final avg: {server1_final_avg}ns\n Server 2: {server2_processed} requests, final avg: {server2_final_avg}ns" + ); + } } From b423bf61ee13562ef642e3b4da01868246dfeec5 Mon Sep 17 00:00:00 2001 From: Jose Celano Date: Fri, 20 Jun 2025 17:50:15 +0100 Subject: [PATCH 755/802] refactor: [#1589] improve readability of UDP performance metrics race condition test Restructures the race condition test to follow clear Arrange-Act-Assert pattern and eliminates code duplication through helper function extraction. The test maintains identical functionality while being more maintainable, readable, and following DRY principles. All 200 concurrent tasks still validate thread safety and mathematical correctness of the metrics system. --- .../src/statistics/repository.rs | 346 +++++++++--------- 1 file changed, 176 insertions(+), 170 deletions(-) diff --git a/packages/udp-tracker-server/src/statistics/repository.rs b/packages/udp-tracker-server/src/statistics/repository.rs index b80b8ba09..94a86e3ab 100644 --- a/packages/udp-tracker-server/src/statistics/repository.rs +++ b/packages/udp-tracker-server/src/statistics/repository.rs @@ -565,205 +565,211 @@ mod tests { assert!(result.is_ok()); } - #[tokio::test] - #[allow(clippy::too_many_lines)] - async fn it_should_handle_race_conditions_when_updating_udp_performance_metrics_in_parallel() { - // Number of concurrent requests per server - const REQUESTS_PER_SERVER: usize = 100; + mod race_conditions { - let repo = Repository::new(); - let now = CurrentClock::now(); + use core::f64; + use std::time::Duration; - // Define labels for two different UDP servers - let server1_labels = LabelSet::from([ - ("request_kind", "connect"), - ("server_binding_address_ip_family", "inet"), - ("server_port", "6868"), - ]); - let server2_labels = LabelSet::from([ - ("request_kind", "connect"), - ("server_binding_address_ip_family", "inet"), - ("server_port", "6969"), - ]); + use tokio::task::JoinHandle; + use torrust_tracker_clock::clock::Time; + use torrust_tracker_metrics::metric_name; - let mut handles = vec![]; + use super::*; + use crate::CurrentClock; - // Spawn tasks for server 1 - for i in 0..REQUESTS_PER_SERVER { - let repo_clone = repo.clone(); - let labels = server1_labels.clone(); - let handle = tokio::spawn(async move { - // Simulate varying processing times (1000ns to 5000ns) - let processing_time_ns = 1000 + (i % 5) * 1000; - let processing_time = Duration::from_nanos(processing_time_ns as u64); + #[tokio::test] + async fn it_should_handle_race_conditions_when_updating_udp_performance_metrics_in_parallel() { + const REQUESTS_PER_SERVER: usize = 100; - repo_clone - .recalculate_udp_avg_processing_time_ns(processing_time, &labels, now) - .await - }); - handles.push(handle); - } + // ** Set up test data and environment ** - // Spawn tasks for server 2 - for i in 0..REQUESTS_PER_SERVER { - let repo_clone = repo.clone(); - let labels = server2_labels.clone(); - let handle = tokio::spawn(async move { - // Simulate different processing times (2000ns to 6000ns) - let processing_time_ns = 2000 + (i % 5) * 1000; - let processing_time = Duration::from_nanos(processing_time_ns as u64); + let repo = Repository::new(); + let now = CurrentClock::now(); - repo_clone - .recalculate_udp_avg_processing_time_ns(processing_time, &labels, now) - .await - }); - handles.push(handle); - } + let server1_labels = create_server_metric_labels("6868"); + let server2_labels = create_server_metric_labels("6969"); - // Collect all the results - let mut server1_results = Vec::new(); - let mut server2_results = Vec::new(); + // ** Execute concurrent metric updates ** - for (i, handle) in handles.into_iter().enumerate() { - let result = handle.await.unwrap(); - if i < REQUESTS_PER_SERVER { - server1_results.push(result); - } else { - server2_results.push(result); - } - } + // Spawn concurrent tasks for server 1 with processing times [1000, 2000, 3000, 4000, 5000] ns + let server1_handles = spawn_server_tasks(&repo, &server1_labels, 1000, now, REQUESTS_PER_SERVER); - // Verify that all tasks completed successfully - assert_eq!(server1_results.len(), REQUESTS_PER_SERVER); - assert_eq!(server2_results.len(), REQUESTS_PER_SERVER); + // Spawn concurrent tasks for server 2 with processing times [2000, 3000, 4000, 5000, 6000] ns + let server2_handles = spawn_server_tasks(&repo, &server2_labels, 2000, now, REQUESTS_PER_SERVER); - // Verify that all results are finite and positive - for result in &server1_results { - assert!(result.is_finite(), "Server 1 result should be finite: {result}"); - assert!(*result > 0.0, "Server 1 result should be positive: {result}"); - } + // Wait for both servers' results + let (server1_results, server2_results) = tokio::join!( + collect_concurrent_task_results(server1_handles), + collect_concurrent_task_results(server2_handles) + ); + + // ** Verify results and metrics ** + + // Verify correctness of concurrent operations + assert_server_results_are_valid(&server1_results, "Server 1", REQUESTS_PER_SERVER); + assert_server_results_are_valid(&server2_results, "Server 2", REQUESTS_PER_SERVER); + + let stats = repo.get_stats().await; - for result in &server2_results { - assert!(result.is_finite(), "Server 2 result should be finite: {result}"); - assert!(*result > 0.0, "Server 2 result should be positive: {result}"); + // Verify each server's metrics individually + let server1_avg = assert_server_metrics_are_correct(&stats, &server1_labels, "Server 1", REQUESTS_PER_SERVER, 3000.0); + let server2_avg = assert_server_metrics_are_correct(&stats, &server2_labels, "Server 2", REQUESTS_PER_SERVER, 4000.0); + + // Verify relationship between servers + assert_server_metrics_relationship(server1_avg, server2_avg); + + // Verify each server's result consistency individually + assert_server_result_matches_stored_average(&server1_results, &stats, &server1_labels, "Server 1"); + assert_server_result_matches_stored_average(&server2_results, &stats, &server2_labels, "Server 2"); + + // Verify metric collection integrity + assert_metric_collection_integrity(&stats); } - // Get final stats and verify metrics integrity - let stats = repo.get_stats().await; + // Test helper functions to hide implementation details - // Verify that the processed requests counters are correct for each server - #[allow(clippy::cast_sign_loss)] - #[allow(clippy::cast_possible_truncation)] - let server1_processed = stats - .metric_collection - .get_counter_value( - &metric_name!(UDP_TRACKER_SERVER_PERFORMANCE_AVG_PROCESSED_REQUESTS_TOTAL), - &server1_labels, - ) - .unwrap() - .value(); + fn create_server_metric_labels(port: &str) -> LabelSet { + LabelSet::from([ + ("request_kind", "connect"), + ("server_binding_address_ip_family", "inet"), + ("server_port", port), + ]) + } - #[allow(clippy::cast_sign_loss)] - #[allow(clippy::cast_possible_truncation)] - let server2_processed = stats - .metric_collection - .get_counter_value( - &metric_name!(UDP_TRACKER_SERVER_PERFORMANCE_AVG_PROCESSED_REQUESTS_TOTAL), - &server2_labels, - ) - .unwrap() - .value(); + fn spawn_server_tasks( + repo: &Repository, + labels: &LabelSet, + base_processing_time_ns: usize, + now: DurationSinceUnixEpoch, + requests_per_server: usize, + ) -> Vec> { + let mut handles = vec![]; + + for i in 0..requests_per_server { + let repo_clone = repo.clone(); + let labels_clone = labels.clone(); + let handle = tokio::spawn(async move { + let processing_time_ns = base_processing_time_ns + (i % 5) * 1000; + let processing_time = Duration::from_nanos(processing_time_ns as u64); + repo_clone + .recalculate_udp_avg_processing_time_ns(processing_time, &labels_clone, now) + .await + }); + handles.push(handle); + } - assert_eq!( - server1_processed, REQUESTS_PER_SERVER as u64, - "Server 1 should have processed {REQUESTS_PER_SERVER} requests", - ); - assert_eq!( - server2_processed, REQUESTS_PER_SERVER as u64, - "Server 2 should have processed {REQUESTS_PER_SERVER} requests", - ); + handles + } - // Verify that the final average processing times are reasonable - #[allow(clippy::cast_sign_loss)] - #[allow(clippy::cast_possible_truncation)] - let server1_final_avg = stats - .metric_collection - .get_gauge_value( - &metric_name!(UDP_TRACKER_SERVER_PERFORMANCE_AVG_PROCESSING_TIME_NS), - &server1_labels, - ) - .unwrap() - .value(); + async fn collect_concurrent_task_results(handles: Vec>) -> Vec { + let mut server_results = Vec::new(); - #[allow(clippy::cast_sign_loss)] - #[allow(clippy::cast_possible_truncation)] - let server2_final_avg = stats - .metric_collection - .get_gauge_value( - &metric_name!(UDP_TRACKER_SERVER_PERFORMANCE_AVG_PROCESSING_TIME_NS), - &server2_labels, - ) - .unwrap() - .value(); + for handle in handles { + let result = handle.await.unwrap(); + server_results.push(result); + } - // Server 1: 100 requests cycling through [1000, 2000, 3000, 4000, 5000] ns - // Expected average: (20×1000 + 20×2000 + 20×3000 + 20×4000 + 20×5000) / 100 = 3000 ns - // Note: Moving average with concurrent updates may have small deviations due to order dependency - assert!( - (server1_final_avg - 3000.0).abs() < 50.0, - "Server 1 final average should be close to 3000ns (±50ns), got {server1_final_avg}ns" - ); + server_results + } - // Server 2: 100 requests cycling through [2000, 3000, 4000, 5000, 6000] ns - // Expected average: (20×2000 + 20×3000 + 20×4000 + 20×5000 + 20×6000) / 100 = 4000 ns - // Note: Moving average with concurrent updates may have small deviations due to order dependency - assert!( - (server2_final_avg - 4000.0).abs() < 50.0, - "Server 2 final average should be close to 4000ns (±50ns), got {server2_final_avg}ns" - ); + fn assert_server_results_are_valid(results: &[f64], server_name: &str, expected_count: usize) { + // Verify all tasks completed + assert_eq!( + results.len(), + expected_count, + "{server_name} should have {expected_count} results" + ); + + // Verify all results are valid numbers + for result in results { + assert!(result.is_finite(), "{server_name} result should be finite: {result}"); + assert!(*result > 0.0, "{server_name} result should be positive: {result}"); + } + } - // Verify that the two servers have different averages (they should since they have different processing time ranges) - assert!( - (server1_final_avg - server2_final_avg).abs() > 950.0, - "Server 1 and Server 2 should have different average processing times" - ); + fn assert_server_metrics_are_correct( + stats: &Metrics, + labels: &LabelSet, + server_name: &str, + expected_request_count: usize, + expected_avg_ns: f64, + ) -> f64 { + // Verify request count + let processed_requests = get_processed_requests_count(stats, labels); + assert_eq!( + processed_requests, expected_request_count as u64, + "{server_name} should have processed {expected_request_count} requests" + ); + + // Verify average processing time is within expected range + let avg_processing_time = get_average_processing_time(stats, labels); + assert!( + (avg_processing_time - expected_avg_ns).abs() < 50.0, + "{server_name} average should be ~{expected_avg_ns}ns (±50ns), got {avg_processing_time}ns" + ); + + avg_processing_time + } - // Server 2 should generally have higher averages since its processing times are higher - assert!( - server2_final_avg > server1_final_avg, - "Server 2 average ({server2_final_avg}) should be higher than Server 1 average ({server1_final_avg})" - ); + fn assert_server_metrics_relationship(server1_avg: f64, server2_avg: f64) { + const MIN_DIFFERENCE_NS: f64 = 950.0; - // Verify that the moving average calculation maintains consistency - // The last result for each server should match the final stored average - let server1_last_result = server1_results.last().copied().unwrap(); - let server2_last_result = server2_results.last().copied().unwrap(); + assert_averages_are_significantly_different(server1_avg, server2_avg, MIN_DIFFERENCE_NS); + assert_server_ordering_is_correct(server1_avg, server2_avg); + } - // Note: Due to race conditions, the last result might not exactly match the final stored average - // but it should be in a reasonable range. We'll check that they're in the same ballpark. - let server1_diff = (server1_last_result - server1_final_avg).abs(); - let server2_diff = (server2_last_result - server2_final_avg).abs(); + fn assert_averages_are_significantly_different(avg1: f64, avg2: f64, min_difference: f64) { + let difference = (avg1 - avg2).abs(); + assert!( + difference > min_difference, + "Server averages should differ by more than {min_difference}ns, but difference was {difference}ns" + ); + } - assert!( - server1_diff <= 0.0, - "Server 1 last result ({server1_last_result}) should be equal to final average ({server1_final_avg}), diff: {server1_diff}", + fn assert_server_ordering_is_correct(server1_avg: f64, server2_avg: f64) { + // Server 2 should have higher average since it has higher processing times [2000-6000] vs [1000-5000] + assert!( + server2_avg > server1_avg, + "Server 2 average ({server2_avg}ns) should be higher than Server 1 ({server1_avg}ns) due to higher processing time ranges" ); + } - assert!( - server2_diff <= 0.0, - "Server 2 last result ({server2_last_result}) should be equal to final average ({server2_final_avg}), diff: {server2_diff}", - ); + fn assert_server_result_matches_stored_average(results: &[f64], stats: &Metrics, labels: &LabelSet, server_name: &str) { + let final_avg = get_average_processing_time(stats, labels); + let last_result = results.last().copied().unwrap(); - // Verify that the metric collection contains the expected metrics for both servers - assert!(stats - .metric_collection - .contains_gauge(&metric_name!(UDP_TRACKER_SERVER_PERFORMANCE_AVG_PROCESSING_TIME_NS))); - assert!(stats - .metric_collection - .contains_counter(&metric_name!(UDP_TRACKER_SERVER_PERFORMANCE_AVG_PROCESSED_REQUESTS_TOTAL))); + assert!( + (last_result - final_avg).abs() <= f64::EPSILON, + "{server_name} last result ({last_result}) should match final average ({final_avg}) exactly" + ); + } - println!( - "Race condition test completed successfully:\n Server 1: {server1_processed} requests, final avg: {server1_final_avg}ns\n Server 2: {server2_processed} requests, final avg: {server2_final_avg}ns" - ); + fn assert_metric_collection_integrity(stats: &Metrics) { + assert!(stats + .metric_collection + .contains_gauge(&metric_name!(UDP_TRACKER_SERVER_PERFORMANCE_AVG_PROCESSING_TIME_NS))); + assert!(stats + .metric_collection + .contains_counter(&metric_name!(UDP_TRACKER_SERVER_PERFORMANCE_AVG_PROCESSED_REQUESTS_TOTAL))); + } + + fn get_processed_requests_count(stats: &Metrics, labels: &LabelSet) -> u64 { + stats + .metric_collection + .get_counter_value( + &metric_name!(UDP_TRACKER_SERVER_PERFORMANCE_AVG_PROCESSED_REQUESTS_TOTAL), + labels, + ) + .unwrap() + .value() + } + + fn get_average_processing_time(stats: &Metrics, labels: &LabelSet) -> f64 { + stats + .metric_collection + .get_gauge_value(&metric_name!(UDP_TRACKER_SERVER_PERFORMANCE_AVG_PROCESSING_TIME_NS), labels) + .unwrap() + .value() + } } } From 364c6077bd9a4eae3200c3665ac5b7fc472dba9c Mon Sep 17 00:00:00 2001 From: Jose Celano Date: Mon, 1 Dec 2025 12:42:58 +0000 Subject: [PATCH 756/802] fix: clippy errors --- .../src/console/clients/checker/checks/udp.rs | 1 + packages/http-protocol/src/v1/query.rs | 2 +- packages/http-tracker-core/src/services/announce.rs | 8 -------- .../swarm-coordination-registry/src/swarm/coordinator.rs | 2 +- .../src/entry/peer_list.rs | 2 +- packages/tracker-core/src/torrent/services.rs | 2 +- packages/udp-tracker-server/src/event.rs | 2 +- packages/udp-tracker-server/tests/server/contract.rs | 2 +- src/console/ci/e2e/docker.rs | 2 +- src/console/ci/e2e/runner.rs | 2 +- 10 files changed, 9 insertions(+), 16 deletions(-) diff --git a/console/tracker-client/src/console/clients/checker/checks/udp.rs b/console/tracker-client/src/console/clients/checker/checks/udp.rs index 20394d55a..611afafc4 100644 --- a/console/tracker-client/src/console/clients/checker/checks/udp.rs +++ b/console/tracker-client/src/console/clients/checker/checks/udp.rs @@ -29,6 +29,7 @@ pub async fn run(udp_trackers: Vec, timeout: Duration) -> Vec for ErrorKind { }, UdpScrapeError::TrackerCoreWhitelistError { source } => Self::Whitelist(source.to_string()), }, - Error::Internal { location: _, message } => Self::InternalServer(message.to_string()), + Error::Internal { location: _, message } => Self::InternalServer(message.clone()), Error::AuthRequired { location } => Self::TrackerAuthentication(location.to_string()), } } diff --git a/packages/udp-tracker-server/tests/server/contract.rs b/packages/udp-tracker-server/tests/server/contract.rs index da08bc177..e9691c879 100644 --- a/packages/udp-tracker-server/tests/server/contract.rs +++ b/packages/udp-tracker-server/tests/server/contract.rs @@ -251,7 +251,7 @@ mod receiving_an_announce_request { let transaction_id = tx_id.0.to_string(); assert!( - logs_contains_a_line_with(&["ERROR", "UDP TRACKER", &transaction_id.to_string()]), + logs_contains_a_line_with(&["ERROR", "UDP TRACKER", &transaction_id]), "Expected logs to contain: ERROR ... UDP TRACKER ... transaction_id={transaction_id}" ); } diff --git a/src/console/ci/e2e/docker.rs b/src/console/ci/e2e/docker.rs index ce2b1aa99..89d258d2c 100644 --- a/src/console/ci/e2e/docker.rs +++ b/src/console/ci/e2e/docker.rs @@ -82,7 +82,7 @@ impl Docker { let mut port_args: Vec = vec![]; for port in &options.ports { port_args.push("--publish".to_string()); - port_args.push(port.to_string()); + port_args.push(port.clone()); } let args = [initial_args, env_var_args, port_args, [image.to_string()].to_vec()].concat(); diff --git a/src/console/ci/e2e/runner.rs b/src/console/ci/e2e/runner.rs index 624878c70..6275c144b 100644 --- a/src/console/ci/e2e/runner.rs +++ b/src/console/ci/e2e/runner.rs @@ -77,7 +77,7 @@ pub fn run() -> anyhow::Result<()> { // Besides, if we don't use port 0 we should get the port numbers from the tracker configuration. // We could not use docker, but the intention was to create E2E tests including containerization. let options = RunOptions { - env_vars: vec![("TORRUST_TRACKER_CONFIG_TOML".to_string(), tracker_config.to_string())], + env_vars: vec![("TORRUST_TRACKER_CONFIG_TOML".to_string(), tracker_config.clone())], ports: vec![ "6969:6969/udp".to_string(), "7070:7070/tcp".to_string(), From 11721dce92bbf4cc42d389da098a157a1a053922 Mon Sep 17 00:00:00 2001 From: Jose Celano Date: Mon, 1 Dec 2025 12:43:42 +0000 Subject: [PATCH 757/802] chore(deps): udpate dependencies ``` $ cargo update Updating crates.io index Locking 264 packages to latest compatible versions Updating addr2line v0.24.2 -> v0.25.1 Updating aho-corasick v1.1.3 -> v1.1.4 Adding alloca v0.4.0 Removing android-tzdata v0.1.1 Updating anstream v0.6.19 -> v0.6.21 Updating anstyle v1.0.11 -> v1.0.13 Updating anstyle-query v1.1.3 -> v1.1.5 Updating anstyle-wincon v3.0.9 -> v3.0.11 Updating anyhow v1.0.98 -> v1.0.100 Adding astral-tokio-tar v0.5.6 Updating async-channel v2.3.1 -> v2.5.0 Updating async-compression v0.4.24 -> v0.4.34 Updating async-executor v1.13.2 -> v1.13.3 Updating async-io v2.4.1 -> v2.6.0 Updating async-lock v3.4.0 -> v3.4.1 Updating async-std v1.13.1 -> v1.13.2 Adding async-stream v0.3.6 Adding async-stream-impl v0.3.6 Updating async-trait v0.1.88 -> v0.1.89 Updating atomic v0.6.0 -> v0.6.1 Updating autocfg v1.4.0 -> v1.5.0 Updating axum v0.8.4 -> v0.8.7 Updating axum-core v0.5.2 -> v0.5.5 Updating axum-extra v0.10.1 -> v0.12.2 Updating axum-server v0.7.2 -> v0.7.3 Updating backtrace v0.3.75 -> v0.3.76 Updating bigdecimal v0.4.8 -> v0.4.9 Updating bindgen v0.72.0 -> v0.72.1 Removing bitflags v1.3.2 Removing bitflags v2.9.1 Adding bitflags v2.10.0 Updating blocking v1.6.1 -> v1.6.2 Updating bollard v0.18.1 -> v0.19.4 Adding bollard-buildkit-proto v0.7.0 Updating bollard-stubs v1.47.1-rc.27.3.1 -> v1.49.1-rc.28.4.0 Updating borsh v1.5.7 -> v1.6.0 Updating borsh-derive v1.5.7 -> v1.6.0 Updating brotli v8.0.1 -> v8.0.2 Updating bumpalo v3.18.1 -> v3.19.0 Updating bytemuck v1.23.1 -> v1.24.0 Updating bytes v1.10.1 -> v1.11.0 Updating camino v1.1.10 -> v1.1.12 (available: v1.2.1) Updating castaway v0.2.3 -> v0.2.4 Updating cc v1.2.26 -> v1.2.48 Updating cfg-if v1.0.1 -> v1.0.4 Updating chrono v0.4.41 -> v0.4.42 Updating clap v4.5.40 -> v4.5.53 Updating clap_builder v4.5.40 -> v4.5.53 Updating clap_derive v4.5.40 -> v4.5.49 Updating clap_lex v0.7.5 -> v0.7.6 Adding compression-codecs v0.4.33 Adding compression-core v0.4.31 Updating crc32fast v1.4.2 -> v1.5.0 Updating criterion v0.6.0 -> v0.8.0 Adding criterion-plot v0.8.0 Updating crunchy v0.2.3 -> v0.2.4 Updating crypto-common v0.1.6 -> v0.1.7 Adding darling v0.21.3 Adding darling_core v0.21.3 Adding darling_macro v0.21.3 Updating deranged v0.4.0 -> v0.5.5 Adding dyn-clone v1.0.20 Updating errno v0.3.12 -> v0.3.14 Updating etcetera v0.10.0 -> v0.11.0 Updating event-listener v5.4.0 -> v5.4.1 Adding ferroid v0.8.7 Updating filetime v0.2.25 -> v0.2.26 Adding find-msvc-tools v0.1.5 Updating flate2 v1.1.2 -> v1.1.5 Updating form_urlencoded v1.2.1 -> v1.2.2 Updating frunk v0.4.3 -> v0.4.4 Updating frunk_core v0.4.3 -> v0.4.4 Updating frunk_derives v0.4.3 -> v0.4.4 Updating frunk_proc_macro_helpers v0.1.3 -> v0.1.4 Updating frunk_proc_macros v0.1.3 -> v0.1.4 Updating fs-err v3.1.1 -> v3.2.0 Updating futures-lite v2.6.0 -> v2.6.1 Updating getrandom v0.3.3 -> v0.3.4 Updating gimli v0.31.1 -> v0.32.3 Updating glob v0.3.2 -> v0.3.3 Updating h2 v0.4.10 -> v0.4.12 Updating half v2.6.0 -> v2.7.1 Removing hashbrown v0.15.4 Adding hashbrown v0.15.5 Adding hashbrown v0.16.1 Updating hermit-abi v0.5.1 -> v0.5.2 Updating hex-literal v1.0.0 -> v1.1.0 Updating home v0.5.11 -> v0.5.12 Updating http v1.3.1 -> v1.4.0 Updating hyper v1.6.0 -> v1.8.1 Adding hyper-timeout v0.5.2 Updating hyper-util v0.1.14 -> v0.1.18 Updating iana-time-zone v0.1.63 -> v0.1.64 Updating icu_collections v2.0.0 -> v2.1.1 Updating icu_locale_core v2.0.0 -> v2.1.1 Updating icu_normalizer v2.0.0 -> v2.1.1 Updating icu_normalizer_data v2.0.0 -> v2.1.1 Updating icu_properties v2.0.1 -> v2.1.1 Updating icu_properties_data v2.0.1 -> v2.1.1 Updating icu_provider v2.0.0 -> v2.1.1 Updating idna v1.0.3 -> v1.1.0 Updating indexmap v2.9.0 -> v2.12.1 Updating iri-string v0.7.8 -> v0.7.9 Updating is-terminal v0.4.16 -> v0.4.17 Updating is_terminal_polyfill v1.70.1 -> v1.70.2 Adding itertools v0.14.0 Updating jobserver v0.1.33 -> v0.1.34 Updating js-sys v0.3.77 -> v0.3.83 Updating libc v0.2.172 -> v0.2.177 Updating libloading v0.8.8 -> v0.8.9 Updating libredox v0.1.3 -> v0.1.10 Updating libsqlite3-sys v0.34.0 -> v0.35.0 Updating libz-sys v1.1.22 -> v1.1.23 Updating linux-raw-sys v0.9.4 -> v0.11.0 Updating litemap v0.8.0 -> v0.8.1 Updating lock_api v0.4.13 -> v0.4.14 Updating log v0.4.27 -> v0.4.28 Updating memchr v2.7.4 -> v2.7.6 Updating mio v1.0.4 -> v1.1.0 Updating mockall v0.13.1 -> v0.14.0 Updating mockall_derive v0.13.1 -> v0.14.0 Updating nu-ansi-term v0.46.0 -> v0.50.3 Adding num v0.4.3 Adding num-complex v0.4.6 Adding num-iter v0.1.45 Adding num-rational v0.4.2 Updating object v0.36.7 -> v0.37.3 Updating once_cell_polyfill v1.70.1 -> v1.70.2 Updating openssl v0.10.73 -> v0.10.75 Updating openssl-sys v0.9.109 -> v0.9.111 Removing overload v0.1.1 Updating owo-colors v4.2.1 -> v4.2.3 Adding page_size v0.6.0 Updating parking_lot v0.12.4 -> v0.12.5 Updating parking_lot_core v0.9.11 -> v0.9.12 Updating pem v3.0.5 -> v3.0.6 Updating percent-encoding v2.3.1 -> v2.3.2 Adding pin-project v1.1.10 Adding pin-project-internal v1.1.10 Updating polling v3.8.0 -> v3.11.0 Updating potential_utf v0.1.2 -> v0.1.4 Updating proc-macro-crate v3.3.0 -> v3.4.0 Updating proc-macro2 v1.0.95 -> v1.0.103 Adding prost v0.14.2 Adding prost-derive v0.14.2 Adding prost-types v0.14.2 Updating quote v1.0.40 -> v1.0.42 Updating r-efi v5.2.0 -> v5.3.0 Updating r2d2_sqlite v0.29.0 -> v0.31.0 Updating rand v0.9.1 -> v0.9.2 Updating rayon v1.10.0 -> v1.11.0 Updating rayon-core v1.12.1 -> v1.13.0 Removing redox_syscall v0.3.5 Removing redox_syscall v0.5.12 Adding redox_syscall v0.5.18 Adding ref-cast v1.0.25 Adding ref-cast-impl v1.0.25 Updating regex v1.11.1 -> v1.12.2 Updating regex-automata v0.4.9 -> v0.4.13 Updating regex-syntax v0.8.5 -> v0.8.8 Updating reqwest v0.12.20 -> v0.12.24 Adding rstest v0.26.1 Adding rstest_macros v0.26.1 Updating rusqlite v0.36.0 -> v0.37.0 Updating rust_decimal v1.37.1 -> v1.39.0 Updating rustc-demangle v0.1.25 -> v0.1.26 Updating rustix v1.0.7 -> v1.1.2 Updating rustls v0.23.27 -> v0.23.35 Updating rustls-native-certs v0.8.1 -> v0.8.2 Updating rustls-pki-types v1.12.0 -> v1.13.1 Updating rustls-webpki v0.103.3 -> v0.103.8 Updating rustversion v1.0.21 -> v1.0.22 Updating schannel v0.1.27 -> v0.1.28 Adding schemars v0.9.0 Adding schemars v1.1.0 Updating security-framework v3.2.0 -> v3.5.1 Updating security-framework-sys v2.14.0 -> v2.15.0 Updating semver v1.0.26 -> v1.0.27 Updating serde v1.0.219 -> v1.0.228 Updating serde_bytes v0.11.17 -> v0.11.19 Adding serde_core v1.0.228 Updating serde_derive v1.0.219 -> v1.0.228 Updating serde_html_form v0.2.7 -> v0.2.8 Updating serde_json v1.0.140 -> v1.0.145 Updating serde_path_to_error v0.1.17 -> v0.1.20 Adding serde_spanned v1.0.3 Updating serde_with v3.12.0 -> v3.16.1 Updating serde_with_macros v3.12.0 -> v3.16.1 Updating signal-hook-registry v1.4.5 -> v1.4.7 Adding simd-adler32 v0.3.7 Updating slab v0.4.9 -> v0.4.11 Adding socket2 v0.6.1 Updating stable_deref_trait v1.2.0 -> v1.2.1 Updating syn v2.0.102 -> v2.0.111 Updating tempfile v3.20.0 -> v3.23.0 Updating terminal_size v0.4.2 -> v0.4.3 Updating testcontainers v0.24.0 -> v0.26.0 Updating thiserror v2.0.12 -> v2.0.17 Updating thiserror-impl v2.0.12 -> v2.0.17 Updating thread_local v1.1.8 -> v1.1.9 Updating time v0.3.41 -> v0.3.44 Updating time-core v0.1.4 -> v0.1.6 Updating time-macros v0.2.22 -> v0.2.24 Updating tinystr v0.8.1 -> v0.8.2 Updating tinyvec v1.9.0 -> v1.10.0 Updating tokio v1.45.1 -> v1.48.0 Updating tokio-macros v2.5.0 -> v2.6.0 Updating tokio-rustls v0.26.2 -> v0.26.4 Removing tokio-tar v0.3.1 Updating tokio-util v0.7.15 -> v0.7.17 Adding toml v0.9.8 Adding toml_datetime v0.7.3 Adding toml_edit v0.23.7 Adding toml_parser v1.0.4 Adding toml_writer v1.0.4 Adding tonic v0.14.2 Adding tonic-prost v0.14.2 Updating tower-http v0.6.6 -> v0.6.7 Updating tracing v0.1.41 -> v0.1.43 Updating tracing-attributes v0.1.29 -> v0.1.31 Updating tracing-core v0.1.34 -> v0.1.35 Updating tracing-subscriber v0.3.19 -> v0.3.22 Updating typenum v1.18.0 -> v1.19.0 Updating unicode-ident v1.0.18 -> v1.0.22 Updating unicode-width v0.2.1 -> v0.2.2 Adding ureq v3.1.4 Adding ureq-proto v0.5.3 Updating url v2.5.4 -> v2.5.7 Adding utf-8 v0.7.6 Updating uuid v1.17.0 -> v1.18.1 Updating value-bag v1.11.1 -> v1.12.0 Removing wasi v0.14.2+wasi-0.2.4 Adding wasip2 v1.0.1+wasi-0.2.4 Updating wasm-bindgen v0.2.100 -> v0.2.106 Removing wasm-bindgen-backend v0.2.100 Updating wasm-bindgen-futures v0.4.50 -> v0.4.56 Updating wasm-bindgen-macro v0.2.100 -> v0.2.106 Updating wasm-bindgen-macro-support v0.2.100 -> v0.2.106 Updating wasm-bindgen-shared v0.2.100 -> v0.2.106 Updating web-sys v0.3.77 -> v0.3.83 Adding web-time v1.1.0 Adding webpki-roots v1.0.4 Updating winapi-util v0.1.9 -> v0.1.11 Updating windows-core v0.61.2 -> v0.62.2 Updating windows-implement v0.60.0 -> v0.60.2 Updating windows-interface v0.59.1 -> v0.59.3 Updating windows-link v0.1.1 -> v0.2.1 Updating windows-registry v0.5.2 -> v0.6.1 Updating windows-result v0.3.4 -> v0.4.1 Updating windows-strings v0.4.2 -> v0.5.1 Adding windows-sys v0.60.2 Adding windows-sys v0.61.2 Updating windows-targets v0.53.0 -> v0.53.5 Updating windows_aarch64_gnullvm v0.53.0 -> v0.53.1 Updating windows_aarch64_msvc v0.53.0 -> v0.53.1 Updating windows_i686_gnu v0.53.0 -> v0.53.1 Updating windows_i686_gnullvm v0.53.0 -> v0.53.1 Updating windows_i686_msvc v0.53.0 -> v0.53.1 Updating windows_x86_64_gnu v0.53.0 -> v0.53.1 Updating windows_x86_64_gnullvm v0.53.0 -> v0.53.1 Updating windows_x86_64_msvc v0.53.0 -> v0.53.1 Updating winnow v0.7.11 -> v0.7.14 Adding wit-bindgen v0.46.0 Removing wit-bindgen-rt v0.39.0 Updating writeable v0.6.1 -> v0.6.2 Updating xattr v1.5.0 -> v1.6.1 Updating yoke v0.8.0 -> v0.8.1 Updating yoke-derive v0.8.0 -> v0.8.1 Updating zerocopy v0.8.25 -> v0.8.31 Updating zerocopy-derive v0.8.25 -> v0.8.31 Updating zeroize v1.8.1 -> v1.8.2 Updating zerotrie v0.2.2 -> v0.2.3 Updating zerovec v0.11.2 -> v0.11.5 Updating zerovec-derive v0.11.1 -> v0.11.2 Updating zstd-sys v2.0.15+zstd.1.5.7 -> v2.0.16+zstd.1.5.7 note: pass `--verbose` to see 6 unchanged dependencies behind latest ``` --- Cargo.lock | 1870 +++++++++++++++++++++++++++++++++------------------- 1 file changed, 1187 insertions(+), 683 deletions(-) diff --git a/Cargo.lock b/Cargo.lock index b523c8b60..62d10c72f 100644 --- a/Cargo.lock +++ b/Cargo.lock @@ -4,9 +4,9 @@ version = 3 [[package]] name = "addr2line" -version = "0.24.2" +version = "0.25.1" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "dfbe277e56a376000877090da837660b4427aad530e3028d44e0bffe4f89a1c1" +checksum = "1b5d307320b3181d6d7954e663bd7c774a838b8220fe0593c86d9fb09f498b4b" dependencies = [ "gimli", ] @@ -30,9 +30,9 @@ dependencies = [ [[package]] name = "aho-corasick" -version = "1.1.3" +version = "1.1.4" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "8e60d3430d3a69478ad0993f19238d2df97c507009a52b3c10addcd7f6bcb916" +checksum = "ddd31a130427c27518df266943a5308ed92d4b226cc639f5a8f1002816174301" dependencies = [ "memchr", ] @@ -53,16 +53,19 @@ dependencies = [ ] [[package]] -name = "allocator-api2" -version = "0.2.21" +name = "alloca" +version = "0.4.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "683d7910e743518b0e34f1186f92494becacb047c7b6bf616c96772180fef923" +checksum = "e5a7d05ea6aea7e9e64d25b9156ba2fee3fdd659e34e41063cd2fc7cd020d7f4" +dependencies = [ + "cc", +] [[package]] -name = "android-tzdata" -version = "0.1.1" +name = "allocator-api2" +version = "0.2.21" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "e999941b234f3131b00bc13c22d06e8c5ff726d1b6318ac7eb276997bbb4fef0" +checksum = "683d7910e743518b0e34f1186f92494becacb047c7b6bf616c96772180fef923" [[package]] name = "android_system_properties" @@ -81,9 +84,9 @@ checksum = "4b46cbb362ab8752921c97e041f5e366ee6297bd428a31275b9fcf1e380f7299" [[package]] name = "anstream" -version = "0.6.19" +version = "0.6.21" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "301af1932e46185686725e0fad2f8f2aa7da69dd70bf6ecc44d6b703844a3933" +checksum = "43d5b281e737544384e969a5ccad3f1cdd24b48086a0fc1b2a5262a26b8f4f4a" dependencies = [ "anstyle", "anstyle-parse", @@ -96,9 +99,9 @@ dependencies = [ [[package]] name = "anstyle" -version = "1.0.11" +version = "1.0.13" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "862ed96ca487e809f1c8e5a8447f6ee2cf102f846893800b20cebdf541fc6bbd" +checksum = "5192cca8006f1fd4f7237516f40fa183bb07f8fbdfedaa0036de5ea9b0b45e78" [[package]] name = "anstyle-parse" @@ -111,29 +114,29 @@ dependencies = [ [[package]] name = "anstyle-query" -version = "1.1.3" +version = "1.1.5" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "6c8bdeb6047d8983be085bab0ba1472e6dc604e7041dbf6fcd5e71523014fae9" +checksum = "40c48f72fd53cd289104fc64099abca73db4166ad86ea0b4341abe65af83dadc" dependencies = [ - "windows-sys 0.59.0", + "windows-sys 0.61.2", ] [[package]] name = "anstyle-wincon" -version = "3.0.9" +version = "3.0.11" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "403f75924867bb1033c59fbf0797484329750cfbe3c4325cd33127941fabc882" +checksum = "291e6a250ff86cd4a820112fb8898808a366d8f9f58ce16d1f538353ad55747d" dependencies = [ "anstyle", "once_cell_polyfill", - "windows-sys 0.59.0", + "windows-sys 0.61.2", ] [[package]] name = "anyhow" -version = "1.0.98" +version = "1.0.100" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "e16d2d3311acee920a9eb8d33b8cbc1787ce4a264e85f964c2404b969bdcd487" +checksum = "a23eb6b1614318a8071c9b2521f36b424b2c83db5eb3a0fead4a6c0809af6e61" [[package]] name = "approx" @@ -182,6 +185,22 @@ version = "0.7.6" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "7c02d123df017efcdfbd739ef81735b36c5ba83ec3c59c80a9d7ecc718f92e50" +[[package]] +name = "astral-tokio-tar" +version = "0.5.6" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "ec179a06c1769b1e42e1e2cbe74c7dcdb3d6383c838454d063eaac5bbb7ebbe5" +dependencies = [ + "filetime", + "futures-core", + "libc", + "portable-atomic", + "rustc-hash", + "tokio", + "tokio-stream", + "xattr", +] + [[package]] name = "async-attributes" version = "1.1.2" @@ -205,9 +224,9 @@ dependencies = [ [[package]] name = "async-channel" -version = "2.3.1" +version = "2.5.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "89b47800b0be77592da0afd425cc03468052844aff33b84e33cc696f64e77b6a" +checksum = "924ed96dd52d1b75e9c1a3e6275715fd320f5f9439fb5a4a11fa51f4221158d2" dependencies = [ "concurrent-queue", "event-listener-strategy", @@ -217,25 +236,22 @@ dependencies = [ [[package]] name = "async-compression" -version = "0.4.24" +version = "0.4.34" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "d615619615a650c571269c00dca41db04b9210037fa76ed8239f70404ab56985" +checksum = "0e86f6d3dc9dc4352edeea6b8e499e13e3f5dc3b964d7ca5fd411415a3498473" dependencies = [ - "brotli", - "flate2", + "compression-codecs", + "compression-core", "futures-core", - "memchr", "pin-project-lite", "tokio", - "zstd", - "zstd-safe", ] [[package]] name = "async-executor" -version = "1.13.2" +version = "1.13.3" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "bb812ffb58524bdd10860d7d974e2f01cc0950c2438a74ee5ec2e2280c6c4ffa" +checksum = "497c00e0fd83a72a79a39fcbd8e3e2f055d6f6c7e025f3b3d91f4f8e76527fb8" dependencies = [ "async-task", "concurrent-queue", @@ -251,7 +267,7 @@ version = "2.4.1" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "05b1b633a2115cd122d73b955eadd9916c18c8f510ec9cd1686404c60ad1c29c" dependencies = [ - "async-channel 2.3.1", + "async-channel 2.5.0", "async-executor", "async-io", "async-lock", @@ -263,11 +279,11 @@ dependencies = [ [[package]] name = "async-io" -version = "2.4.1" +version = "2.6.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "1237c0ae75a0f3765f58910ff9cdd0a12eeb39ab2f4c7de23262f337f0aacbb3" +checksum = "456b8a8feb6f42d237746d4b3e9a178494627745c3c56c6ea55d92ba50d026fc" dependencies = [ - "async-lock", + "autocfg", "cfg-if", "concurrent-queue", "futures-io", @@ -276,26 +292,25 @@ dependencies = [ "polling", "rustix", "slab", - "tracing", - "windows-sys 0.59.0", + "windows-sys 0.61.2", ] [[package]] name = "async-lock" -version = "3.4.0" +version = "3.4.1" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "ff6e472cdea888a4bd64f342f09b3f50e1886d32afe8df3d663c01140b811b18" +checksum = "5fd03604047cee9b6ce9de9f70c6cd540a0520c813cbd49bae61f33ab80ed1dc" dependencies = [ - "event-listener 5.4.0", + "event-listener 5.4.1", "event-listener-strategy", "pin-project-lite", ] [[package]] name = "async-std" -version = "1.13.1" +version = "1.13.2" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "730294c1c08c2e0f85759590518f6333f0d5a0a766a27d519c1b244c3dfd8a24" +checksum = "2c8e079a4ab67ae52b7403632e4618815d6db36d2a010cfe41b02c1b1578f93b" dependencies = [ "async-attributes", "async-channel 1.9.0", @@ -318,6 +333,28 @@ dependencies = [ "wasm-bindgen-futures", ] +[[package]] +name = "async-stream" +version = "0.3.6" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "0b5a71a6f37880a80d1d7f19efd781e4b5de42c88f0722cc13bcb6cc2cfe8476" +dependencies = [ + "async-stream-impl", + "futures-core", + "pin-project-lite", +] + +[[package]] +name = "async-stream-impl" +version = "0.3.6" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "c7c24de15d275a1ecfd47a380fb4d5ec9bfe0933f309ed5e705b775596a3574d" +dependencies = [ + "proc-macro2", + "quote", + "syn 2.0.111", +] + [[package]] name = "async-task" version = "4.7.1" @@ -326,20 +363,20 @@ checksum = "8b75356056920673b02621b35afd0f7dda9306d03c79a30f5c56c44cf256e3de" [[package]] name = "async-trait" -version = "0.1.88" +version = "0.1.89" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "e539d3fca749fcee5236ab05e93a52867dd549cc157c8cb7f99595f3cedffdb5" +checksum = "9035ad2d096bed7955a320ee7e2230574d28fd3c3a0f186cbea1ff3c7eed5dbb" dependencies = [ "proc-macro2", "quote", - "syn 2.0.102", + "syn 2.0.111", ] [[package]] name = "atomic" -version = "0.6.0" +version = "0.6.1" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "8d818003e740b63afc82337e3160717f4f63078720a810b7b903e70a5d1d2994" +checksum = "a89cbf775b137e9b968e67227ef7f775587cde3fd31b0d8599dbd0f598a48340" dependencies = [ "bytemuck", ] @@ -352,15 +389,15 @@ checksum = "1505bd5d3d116872e7271a6d4e16d81d0c8570876c8de68093a09ac269d8aac0" [[package]] name = "autocfg" -version = "1.4.0" +version = "1.5.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "ace50bade8e6234aa140d9a2f552bbee1db4d353f69b8217bc503490fc1a9f26" +checksum = "c08606f8c3cbf4ce6ec8e28fb0014a2c086708fe954eaa885384a6165172e7e8" [[package]] name = "axum" -version = "0.8.4" +version = "0.8.7" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "021e862c184ae977658b36c4500f7feac3221ca5da43e3f25bd04ab6c79a29b5" +checksum = "5b098575ebe77cb6d14fc7f32749631a6e44edbef6b796f89b020e99ba20d425" dependencies = [ "axum-core", "axum-macros", @@ -378,8 +415,7 @@ dependencies = [ "mime", "percent-encoding", "pin-project-lite", - "rustversion", - "serde", + "serde_core", "serde_json", "serde_path_to_error", "serde_urlencoded", @@ -404,9 +440,9 @@ dependencies = [ [[package]] name = "axum-core" -version = "0.5.2" +version = "0.5.5" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "68464cd0412f486726fb3373129ef5d2993f90c34bc2bc1c1e9943b2f4fc7ca6" +checksum = "59446ce19cd142f8833f856eb31f3eb097812d1479ab224f54d72428ca21ea22" dependencies = [ "bytes", "futures-core", @@ -415,7 +451,6 @@ dependencies = [ "http-body-util", "mime", "pin-project-lite", - "rustversion", "sync_wrapper", "tower-layer", "tower-service", @@ -424,27 +459,27 @@ dependencies = [ [[package]] name = "axum-extra" -version = "0.10.1" +version = "0.12.2" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "45bf463831f5131b7d3c756525b305d40f1185b688565648a92e1392ca35713d" +checksum = "dbfe9f610fe4e99cf0cfcd03ccf8c63c28c616fe714d80475ef731f3b13dd21b" dependencies = [ "axum", "axum-core", "bytes", "form_urlencoded", + "futures-core", "futures-util", "http", "http-body", "http-body-util", "mime", "pin-project-lite", - "rustversion", - "serde", + "serde_core", "serde_html_form", "serde_path_to_error", - "tower", "tower-layer", "tower-service", + "tracing", ] [[package]] @@ -455,14 +490,14 @@ checksum = "604fde5e028fea851ce1d8570bbdc034bec850d157f7569d10f347d06808c05c" dependencies = [ "proc-macro2", "quote", - "syn 2.0.102", + "syn 2.0.111", ] [[package]] name = "axum-server" -version = "0.7.2" +version = "0.7.3" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "495c05f60d6df0093e8fb6e74aa5846a0ad06abaf96d76166283720bf740f8ab" +checksum = "c1ab4a3ec9ea8a657c72d99a03a824af695bd0fb5ec639ccbd9cd3543b41a5f9" dependencies = [ "arc-swap", "bytes", @@ -482,9 +517,9 @@ dependencies = [ [[package]] name = "backtrace" -version = "0.3.75" +version = "0.3.76" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "6806a6321ec58106fea15becdad98371e28d92ccbc7c8f1b3b6dd724fe8f1002" +checksum = "bb531853791a215d7c62a30daf0dde835f381ab5de4589cfe7c649d2cbe92bd6" dependencies = [ "addr2line", "cfg-if", @@ -492,7 +527,7 @@ dependencies = [ "miniz_oxide", "object", "rustc-demangle", - "windows-targets 0.52.6", + "windows-link", ] [[package]] @@ -518,9 +553,9 @@ checksum = "72b3254f16251a8381aa12e40e3c4d2f0199f8c6508fbecb9d91f575e0fbb8c6" [[package]] name = "bigdecimal" -version = "0.4.8" +version = "0.4.9" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "1a22f228ab7a1b23027ccc6c350b72868017af7ea8356fbdf19f8d991c690013" +checksum = "560f42649de9fa436b73517378a147ec21f6c997a546581df4b4b31677828934" dependencies = [ "autocfg", "libm", @@ -537,11 +572,11 @@ checksum = "383d29d513d8764dcdc42ea295d979eb99c3c9f00607b3692cf68a431f7dca72" [[package]] name = "bindgen" -version = "0.72.0" +version = "0.72.1" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "4f72209734318d0b619a5e0f5129918b848c416e122a3c4ce054e03cb87b726f" +checksum = "993776b509cfb49c750f11b8f07a46fa23e0a1386ffc01fb1e7d343efc387895" dependencies = [ - "bitflags 2.9.1", + "bitflags", "cexpr", "clang-sys", "itertools 0.13.0", @@ -550,7 +585,7 @@ dependencies = [ "regex", "rustc-hash", "shlex", - "syn 2.0.102", + "syn 2.0.111", ] [[package]] @@ -561,15 +596,9 @@ checksum = "02b4ff8b16e6076c3e14220b39fbc1fabb6737522281a388998046859400895f" [[package]] name = "bitflags" -version = "1.3.2" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "bef38d45163c2f1dde094a7dfd33ccf595c92905c8f8f4fdc18d06fb1037718a" - -[[package]] -name = "bitflags" -version = "2.9.1" +version = "2.10.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "1b8e56985ec62d17e9c1001dc89c88ecd7dc08e47eba5ec7c29c7b5eeecde967" +checksum = "812e12b5285cc515a9c72a5c1d3b6d46a19dac5acfef5265968c166106e31dd3" [[package]] name = "bittorrent-http-tracker-core" @@ -585,7 +614,7 @@ dependencies = [ "mockall", "serde", "serde_json", - "thiserror 2.0.12", + "thiserror 2.0.17", "tokio", "tokio-util", "torrust-tracker-clock", @@ -610,7 +639,7 @@ dependencies = [ "percent-encoding", "serde", "serde_bencode", - "thiserror 2.0.12", + "thiserror 2.0.17", "torrust-tracker-clock", "torrust-tracker-configuration", "torrust-tracker-contrib-bencode", @@ -646,7 +675,7 @@ dependencies = [ "serde_bencode", "serde_bytes", "serde_repr", - "thiserror 2.0.12", + "thiserror 2.0.17", "tokio", "torrust-tracker-configuration", "torrust-tracker-located-error", @@ -668,11 +697,11 @@ dependencies = [ "r2d2", "r2d2_mysql", "r2d2_sqlite", - "rand 0.9.1", + "rand 0.9.2", "serde", "serde_json", "testcontainers", - "thiserror 2.0.12", + "thiserror 2.0.17", "tokio", "tokio-util", "torrust-rest-tracker-api-client", @@ -703,9 +732,9 @@ dependencies = [ "futures", "lazy_static", "mockall", - "rand 0.9.1", + "rand 0.9.2", "serde", - "thiserror 2.0.12", + "thiserror 2.0.17", "tokio", "tokio-util", "torrust-tracker-clock", @@ -751,11 +780,11 @@ dependencies = [ [[package]] name = "blocking" -version = "1.6.1" +version = "1.6.2" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "703f41c54fc768e63e091340b424302bb1c29ef4aa0c7f10fe849dfb114d29ea" +checksum = "e83f8d02be6967315521be875afa792a316e28d57b5a2d401897e2a7921b7f21" dependencies = [ - "async-channel 2.3.1", + "async-channel 2.5.0", "async-task", "futures-io", "futures-lite", @@ -783,13 +812,17 @@ dependencies = [ [[package]] name = "bollard" -version = "0.18.1" +version = "0.19.4" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "97ccca1260af6a459d75994ad5acc1651bcabcbdbc41467cc9786519ab854c30" +checksum = "87a52479c9237eb04047ddb94788c41ca0d26eaff8b697ecfbb4c32f7fdc3b1b" dependencies = [ + "async-stream", "base64 0.22.1", + "bitflags", + "bollard-buildkit-proto", "bollard-stubs", "bytes", + "chrono", "futures-core", "futures-util", "hex", @@ -802,7 +835,9 @@ dependencies = [ "hyper-util", "hyperlocal", "log", + "num", "pin-project-lite", + "rand 0.9.2", "rustls", "rustls-native-certs", "rustls-pemfile", @@ -812,30 +847,51 @@ dependencies = [ "serde_json", "serde_repr", "serde_urlencoded", - "thiserror 2.0.12", + "thiserror 2.0.17", "tokio", + "tokio-stream", "tokio-util", + "tonic", "tower-service", "url", "winapi", ] +[[package]] +name = "bollard-buildkit-proto" +version = "0.7.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "85a885520bf6249ab931a764ffdb87b0ceef48e6e7d807cfdb21b751e086e1ad" +dependencies = [ + "prost", + "prost-types", + "tonic", + "tonic-prost", + "ureq", +] + [[package]] name = "bollard-stubs" -version = "1.47.1-rc.27.3.1" +version = "1.49.1-rc.28.4.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "3f179cfbddb6e77a5472703d4b30436bff32929c0aa8a9008ecf23d1d3cdd0da" +checksum = "5731fe885755e92beff1950774068e0cae67ea6ec7587381536fca84f1779623" dependencies = [ + "base64 0.22.1", + "bollard-buildkit-proto", + "bytes", + "chrono", + "prost", "serde", + "serde_json", "serde_repr", "serde_with", ] [[package]] name = "borsh" -version = "1.5.7" +version = "1.6.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "ad8646f98db542e39fc66e68a20b2144f6a732636df7c2354e74645faaa433ce" +checksum = "d1da5ab77c1437701eeff7c88d968729e7766172279eab0676857b3d63af7a6f" dependencies = [ "borsh-derive", "cfg_aliases", @@ -843,22 +899,22 @@ dependencies = [ [[package]] name = "borsh-derive" -version = "1.5.7" +version = "1.6.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "fdd1d3c0c2f5833f22386f252fe8ed005c7f59fdcddeef025c01b4c3b9fd9ac3" +checksum = "0686c856aa6aac0c4498f936d7d6a02df690f614c03e4d906d1018062b5c5e2c" dependencies = [ "once_cell", "proc-macro-crate", "proc-macro2", "quote", - "syn 2.0.102", + "syn 2.0.111", ] [[package]] name = "brotli" -version = "8.0.1" +version = "8.0.2" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "9991eea70ea4f293524138648e41ee89b0b2b12ddef3b255effa43c8056e0e0d" +checksum = "4bd8b9603c7aa97359dbd97ecf258968c95f3adddd6db2f7e7a5bef101c84560" dependencies = [ "alloc-no-stdlib", "alloc-stdlib", @@ -892,9 +948,9 @@ checksum = "40e38929add23cdf8a366df9b0e088953150724bcbe5fc330b0d8eb3b328eec8" [[package]] name = "bumpalo" -version = "3.18.1" +version = "3.19.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "793db76d6187cd04dff33004d8e6c9cc4e05cd330500379d2394209271b4aeee" +checksum = "46c5e41b57b8bba42a04676d81cb89e9ee8e859a1a66f80a5a72e1cb76b34d43" [[package]] name = "bytecheck" @@ -920,9 +976,9 @@ dependencies = [ [[package]] name = "bytemuck" -version = "1.23.1" +version = "1.24.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "5c76a5792e44e4abe34d3abf15636779261d45a7450612059293d1d2cfc63422" +checksum = "1fbdf580320f38b612e485521afda1ee26d10cc9884efaaa750d383e13e3c5f4" [[package]] name = "byteorder" @@ -932,15 +988,15 @@ checksum = "1fd0f2584146f6f2ef48085050886acf353beff7305ebd1ae69500e27c67f64b" [[package]] name = "bytes" -version = "1.10.1" +version = "1.11.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "d71b6127be86fdcfddb610f7182ac57211d4b18a3e9c82eb2d17662f2227ad6a" +checksum = "b35204fbdc0b3f4446b89fc1ac2cf84a8a68971995d0bf2e925ec7cd960f9cb3" [[package]] name = "camino" -version = "1.1.10" +version = "1.1.12" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "0da45bc31171d8d6960122e222a67740df867c1dd53b4d51caa297084c185cab" +checksum = "dd0b03af37dad7a14518b7691d81acb0f8222604ad3d1b02f6b4bed5188c0cd5" dependencies = [ "serde", ] @@ -953,19 +1009,20 @@ checksum = "37b2a672a2cb129a2e41c10b1224bb368f9f37a2b16b612598138befd7b37eb5" [[package]] name = "castaway" -version = "0.2.3" +version = "0.2.4" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "0abae9be0aaf9ea96a3b1b8b1b55c602ca751eba1b1500220cea4ecbafe7c0d5" +checksum = "dec551ab6e7578819132c713a93c022a05d60159dc86e7a7050223577484c55a" dependencies = [ "rustversion", ] [[package]] name = "cc" -version = "1.2.26" +version = "1.2.48" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "956a5e21988b87f372569b66183b78babf23ebc2e744b733e4350a752c4dafac" +checksum = "c481bdbf0ed3b892f6f806287d72acd515b352a4ec27a208489b8c1bc839633a" dependencies = [ + "find-msvc-tools", "jobserver", "libc", "shlex", @@ -982,9 +1039,9 @@ dependencies = [ [[package]] name = "cfg-if" -version = "1.0.1" +version = "1.0.4" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "9555578bc9e57714c812a1f84e4fc5b4d21fcb063490c624de019f7464c91268" +checksum = "9330f8b2ff13f34540b44e946ef35111825727b38d33286ef986142615121801" [[package]] name = "cfg_aliases" @@ -994,11 +1051,10 @@ checksum = "613afe47fcd5fac7ccf1db93babcb082c5994d996f20b8b159f2ad1658eb5724" [[package]] name = "chrono" -version = "0.4.41" +version = "0.4.42" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "c469d952047f47f91b68d1cba3f10d63c11d73e4636f24f08daf0278abf01c4d" +checksum = "145052bdd345b87320e369255277e3fb5152762ad123a901ef5c262dd38fe8d2" dependencies = [ - "android-tzdata", "iana-time-zone", "num-traits", "serde", @@ -1055,9 +1111,9 @@ dependencies = [ [[package]] name = "clap" -version = "4.5.40" +version = "4.5.53" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "40b6887a1d8685cebccf115538db5c0efe625ccac9696ad45c409d96566e910f" +checksum = "c9e340e012a1bf4935f5282ed1436d1489548e8f72308207ea5df0e23d2d03f8" dependencies = [ "clap_builder", "clap_derive", @@ -1065,9 +1121,9 @@ dependencies = [ [[package]] name = "clap_builder" -version = "4.5.40" +version = "4.5.53" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "e0c66c08ce9f0c698cbce5c0279d0bb6ac936d8674174fe48f736533b964f59e" +checksum = "d76b5d13eaa18c901fd2f7fca939fefe3a0727a953561fefdf3b2922b8569d00" dependencies = [ "anstream", "anstyle", @@ -1077,21 +1133,21 @@ dependencies = [ [[package]] name = "clap_derive" -version = "4.5.40" +version = "4.5.49" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "d2c7947ae4cc3d851207c1adb5b5e260ff0cca11446b1d6d1423788e442257ce" +checksum = "2a0b5487afeab2deb2ff4e03a807ad1a03ac532ff5a2cee5d86884440c7f7671" dependencies = [ "heck", "proc-macro2", "quote", - "syn 2.0.102", + "syn 2.0.111", ] [[package]] name = "clap_lex" -version = "0.7.5" +version = "0.7.6" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "b94f61472cee1439c0b966b47e3aca9ae07e45d070759512cd390ea2bebc6675" +checksum = "a1d728cc89cf3aee9ff92b05e62b19ee65a02b5702cff7d5a377e32c6ae29d8d" [[package]] name = "cmake" @@ -1121,6 +1177,26 @@ dependencies = [ "static_assertions", ] +[[package]] +name = "compression-codecs" +version = "0.4.33" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "302266479cb963552d11bd042013a58ef1adc56768016c8b82b4199488f2d4ad" +dependencies = [ + "brotli", + "compression-core", + "flate2", + "memchr", + "zstd", + "zstd-safe", +] + +[[package]] +name = "compression-core" +version = "0.4.31" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "75984efb6ed102a0d42db99afb6c1948f0380d1d91808d5529916e6c08b49d8d" + [[package]] name = "concurrent-queue" version = "2.5.0" @@ -1167,9 +1243,9 @@ dependencies = [ [[package]] name = "crc32fast" -version = "1.4.2" +version = "1.5.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "a97769d94ddab943e4510d138150169a2758b5ef3eb191a9ee688de3e23ef7b3" +checksum = "9481c1c90cbf2ac953f07c8d4a58aa3945c425b7185c9154d67a65e4230da511" dependencies = [ "cfg-if", ] @@ -1184,7 +1260,7 @@ dependencies = [ "cast", "ciborium", "clap", - "criterion-plot", + "criterion-plot 0.5.0", "futures", "is-terminal", "itertools 0.10.5", @@ -1204,18 +1280,20 @@ dependencies = [ [[package]] name = "criterion" -version = "0.6.0" +version = "0.8.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "3bf7af66b0989381bd0be551bd7cc91912a655a58c6918420c9527b1fd8b4679" +checksum = "a0dfe5e9e71bdcf4e4954f7d14da74d1cdb92a3a07686452d1509652684b1aab" dependencies = [ + "alloca", "anes", "cast", "ciborium", "clap", - "criterion-plot", + "criterion-plot 0.8.0", "itertools 0.13.0", "num-traits", "oorandom", + "page_size", "plotters", "rayon", "regex", @@ -1236,6 +1314,16 @@ dependencies = [ "itertools 0.10.5", ] +[[package]] +name = "criterion-plot" +version = "0.8.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "5de36c2bee19fba779808f92bf5d9b0fa5a40095c277aba10c458a12b35d21d6" +dependencies = [ + "cast", + "itertools 0.13.0", +] + [[package]] name = "crossbeam" version = "0.8.4" @@ -1304,15 +1392,15 @@ checksum = "d0a5c400df2834b80a4c3327b3aad3a4c4cd4de0629063962b03235697506a28" [[package]] name = "crunchy" -version = "0.2.3" +version = "0.2.4" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "43da5946c66ffcc7745f48db692ffbb10a83bfe0afd96235c5c2a4fb23994929" +checksum = "460fbee9c2c2f33933d720630a6a0bac33ba7053db5344fac858d4b8952d77d5" [[package]] name = "crypto-common" -version = "0.1.6" +version = "0.1.7" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "1bfb12502f3fc46cca1bb51ac28df9d618d813cdc3d2f25b9fe775a34af26bb3" +checksum = "78c8292055d1c1df0cce5d180393dc8cce0abec0a7102adb6c7b1eef6016d60a" dependencies = [ "generic-array", "typenum", @@ -1324,8 +1412,18 @@ version = "0.20.11" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "fc7f46116c46ff9ab3eb1597a45688b6715c6e628b5c133e288e709a29bcb4ee" dependencies = [ - "darling_core", - "darling_macro", + "darling_core 0.20.11", + "darling_macro 0.20.11", +] + +[[package]] +name = "darling" +version = "0.21.3" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "9cdf337090841a411e2a7f3deb9187445851f91b309c0c0a29e05f74a00a48c0" +dependencies = [ + "darling_core 0.21.3", + "darling_macro 0.21.3", ] [[package]] @@ -1339,7 +1437,21 @@ dependencies = [ "proc-macro2", "quote", "strsim", - "syn 2.0.102", + "syn 2.0.111", +] + +[[package]] +name = "darling_core" +version = "0.21.3" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "1247195ecd7e3c85f83c8d2a366e4210d588e802133e1e355180a9870b517ea4" +dependencies = [ + "fnv", + "ident_case", + "proc-macro2", + "quote", + "strsim", + "syn 2.0.111", ] [[package]] @@ -1348,9 +1460,20 @@ version = "0.20.11" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "fc34b93ccb385b40dc71c6fceac4b2ad23662c7eeb248cf10d529b7e055b6ead" dependencies = [ - "darling_core", + "darling_core 0.20.11", + "quote", + "syn 2.0.111", +] + +[[package]] +name = "darling_macro" +version = "0.21.3" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "d38308df82d1080de0afee5d069fa14b0326a88c14f15c5ccda35b4a6c414c81" +dependencies = [ + "darling_core 0.21.3", "quote", - "syn 2.0.102", + "syn 2.0.111", ] [[package]] @@ -1369,12 +1492,12 @@ dependencies = [ [[package]] name = "deranged" -version = "0.4.0" +version = "0.5.5" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "9c9e6a11ca8224451684bc0d7d5a7adbf8f2fd6887261a1cfc3c0432f9d4068e" +checksum = "ececcb659e7ba858fb4f10388c250a7252eb0a27373f1a72b8748afdd248e587" dependencies = [ "powerfmt", - "serde", + "serde_core", ] [[package]] @@ -1394,7 +1517,7 @@ checksum = "bda628edc44c4bb645fbe0f758797143e4e07926f7ebf4e9bdfbd3d2ce621df3" dependencies = [ "proc-macro2", "quote", - "syn 2.0.102", + "syn 2.0.111", "unicode-xid", ] @@ -1406,7 +1529,7 @@ checksum = "ccfae181bab5ab6c5478b2ccb69e4c68a02f8c3ec72f6616bfec9dbc599d2ee0" dependencies = [ "proc-macro2", "quote", - "syn 2.0.102", + "syn 2.0.111", ] [[package]] @@ -1433,7 +1556,7 @@ checksum = "97369cbbc041bc366949bc74d34658d6cda5621039731c6310521892a3a20ae0" dependencies = [ "proc-macro2", "quote", - "syn 2.0.102", + "syn 2.0.111", ] [[package]] @@ -1453,6 +1576,12 @@ version = "0.11.0" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "1435fa1053d8b2fbbe9be7e97eca7f33d37b28409959813daefc1446a14247f1" +[[package]] +name = "dyn-clone" +version = "1.0.20" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "d0881ea181b1df73ff77ffaaf9c7544ecc11e82fba9b5f27b262a3c73a332555" + [[package]] name = "either" version = "1.15.0" @@ -1486,23 +1615,22 @@ checksum = "877a4ace8713b0bcf2a4e7eec82529c029f1d0619886d18145fea96c3ffe5c0f" [[package]] name = "errno" -version = "0.3.12" +version = "0.3.14" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "cea14ef9355e3beab063703aa9dab15afd25f0667c341310c1e5274bb1d0da18" +checksum = "39cab71617ae0d63f51a36d69f866391735b51691dbda63cf6f96d042b63efeb" dependencies = [ "libc", - "windows-sys 0.59.0", + "windows-sys 0.61.2", ] [[package]] name = "etcetera" -version = "0.10.0" +version = "0.11.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "26c7b13d0780cb82722fd59f6f57f925e143427e4a75313a6c77243bf5326ae6" +checksum = "de48cc4d1c1d97a20fd819def54b890cadde72ed3ad0c614822a0a433361be96" dependencies = [ "cfg-if", - "home", - "windows-sys 0.59.0", + "windows-sys 0.61.2", ] [[package]] @@ -1513,9 +1641,9 @@ checksum = "0206175f82b8d6bf6652ff7d71a1e27fd2e4efde587fd368662814d6ec1d9ce0" [[package]] name = "event-listener" -version = "5.4.0" +version = "5.4.1" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "3492acde4c3fc54c845eaab3eed8bd00c7a7d881f78bfc801e43a93dec1331ae" +checksum = "e13b66accf52311f30a0db42147dadea9850cb48cd070028831ae5f5d4b856ab" dependencies = [ "concurrent-queue", "parking", @@ -1528,7 +1656,7 @@ version = "0.5.4" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "8be9f3dfaaffdae2972880079a491a1a8bb7cbed0b8dd7a347f668b4150a3b93" dependencies = [ - "event-listener 5.4.0", + "event-listener 5.4.1", "pin-project-lite", ] @@ -1550,6 +1678,17 @@ version = "2.3.0" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "37909eebbb50d72f9059c3b6d82c0463f2ff062c9e95845c43a6c9c0355411be" +[[package]] +name = "ferroid" +version = "0.8.7" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "e0e9414a6ae93ef993ce40a1e02944f13d4508e2bf6f1ced1580ce6910f08253" +dependencies = [ + "portable-atomic", + "rand 0.9.2", + "web-time", +] + [[package]] name = "figment" version = "0.10.19" @@ -1561,28 +1700,34 @@ dependencies = [ "pear", "serde", "tempfile", - "toml", + "toml 0.8.23", "uncased", "version_check", ] [[package]] name = "filetime" -version = "0.2.25" +version = "0.2.26" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "35c0522e981e68cbfa8c3f978441a5f34b30b96e146b33cd3359176b50fe8586" +checksum = "bc0505cd1b6fa6580283f6bdf70a73fcf4aba1184038c90902b92b3dd0df63ed" dependencies = [ "cfg-if", "libc", "libredox", - "windows-sys 0.59.0", + "windows-sys 0.60.2", ] +[[package]] +name = "find-msvc-tools" +version = "0.1.5" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "3a3076410a55c90011c298b04d0cfa770b00fa04e1e3c97d3f6c9de105a03844" + [[package]] name = "flate2" -version = "1.1.2" +version = "1.1.5" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "4a3d7db9596fecd151c5f638c0ee5d5bd487b6e0ea232e5dc96d5250f6f94b1d" +checksum = "bfe33edd8e85a12a67454e37f8c75e730830d83e313556ab9ebf9ee7fbeb3bfb" dependencies = [ "crc32fast", "libz-sys", @@ -1618,9 +1763,9 @@ checksum = "00b0228411908ca8685dba7fc2cdd70ec9990a6e753e89b6ac91a84c40fbaf4b" [[package]] name = "form_urlencoded" -version = "1.2.1" +version = "1.2.2" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "e13624c2627564efccf4934284bdd98cbaa14e79b0b5a141218e507b3a823456" +checksum = "cb4cb245038516f5f85277875cdaa4f7d2c9a0fa0468de06ed190163b1581fcf" dependencies = [ "percent-encoding", ] @@ -1653,9 +1798,9 @@ checksum = "28dd6caf6059519a65843af8fe2a3ae298b14b80179855aeb4adc2c1934ee619" [[package]] name = "frunk" -version = "0.4.3" +version = "0.4.4" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "874b6a17738fc273ec753618bac60ddaeac48cb1d7684c3e7bd472e57a28b817" +checksum = "28aef0f9aa070bce60767c12ba9cb41efeaf1a2bc6427f87b7d83f11239a16d7" dependencies = [ "frunk_core", "frunk_derives", @@ -1665,53 +1810,53 @@ dependencies = [ [[package]] name = "frunk_core" -version = "0.4.3" +version = "0.4.4" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "3529a07095650187788833d585c219761114005d5976185760cf794d265b6a5c" +checksum = "476eeaa382e3462b84da5d6ba3da97b5786823c2d0d3a0d04ef088d073da225c" dependencies = [ "serde", ] [[package]] name = "frunk_derives" -version = "0.4.3" +version = "0.4.4" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "e99b8b3c28ae0e84b604c75f721c21dc77afb3706076af5e8216d15fd1deaae3" +checksum = "a0b4095fc99e1d858e5b8c7125d2638372ec85aa0fe6c807105cf10b0265ca6c" dependencies = [ "frunk_proc_macro_helpers", "quote", - "syn 2.0.102", + "syn 2.0.111", ] [[package]] name = "frunk_proc_macro_helpers" -version = "0.1.3" +version = "0.1.4" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "05a956ef36c377977e512e227dcad20f68c2786ac7a54dacece3746046fea5ce" +checksum = "1952b802269f2db12ab7c0bd328d0ae8feaabf19f352a7b0af7bb0c5693abfce" dependencies = [ "frunk_core", "proc-macro2", "quote", - "syn 2.0.102", + "syn 2.0.111", ] [[package]] name = "frunk_proc_macros" -version = "0.1.3" +version = "0.1.4" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "67e86c2c9183662713fea27ea527aad20fb15fee635a71081ff91bf93df4dc51" +checksum = "3462f590fa236005bd7ca4847f81438bd6fe0febd4d04e11968d4c2e96437e78" dependencies = [ "frunk_core", "frunk_proc_macro_helpers", "quote", - "syn 2.0.102", + "syn 2.0.111", ] [[package]] name = "fs-err" -version = "3.1.1" +version = "3.2.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "88d7be93788013f265201256d58f04936a8079ad5dc898743aa20525f503b683" +checksum = "62d91fd049c123429b018c47887d3f75a265540dd3c30ba9cb7bae9197edb03a" dependencies = [ "autocfg", "tokio", @@ -1773,9 +1918,9 @@ checksum = "9e5c1b78ca4aae1ac06c48a526a655760685149f0d465d21f37abfe57ce075c6" [[package]] name = "futures-lite" -version = "2.6.0" +version = "2.6.1" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "f5edaec856126859abb19ed65f39e90fea3a9574b9707f13539acf4abf7eb532" +checksum = "f78e10609fe0e0b3f4157ffab1876319b5b0db102a2c60dc4626306dc46b44ad" dependencies = [ "fastrand", "futures-core", @@ -1792,7 +1937,7 @@ checksum = "162ee34ebcb7c64a8abebc059ce0fee27c2262618d7b60ed8faf72fef13c3650" dependencies = [ "proc-macro2", "quote", - "syn 2.0.102", + "syn 2.0.111", ] [[package]] @@ -1849,32 +1994,32 @@ checksum = "335ff9f135e4384c8150d6f27c6daed433577f86b4750418338c01a1a2528592" dependencies = [ "cfg-if", "libc", - "wasi 0.11.1+wasi-snapshot-preview1", + "wasi", ] [[package]] name = "getrandom" -version = "0.3.3" +version = "0.3.4" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "26145e563e54f2cadc477553f1ec5ee650b00862f0a58bcd12cbdc5f0ea2d2f4" +checksum = "899def5c37c4fd7b2664648c28120ecec138e4d395b459e5ca34f9cce2dd77fd" dependencies = [ "cfg-if", "libc", "r-efi", - "wasi 0.14.2+wasi-0.2.4", + "wasip2", ] [[package]] name = "gimli" -version = "0.31.1" +version = "0.32.3" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "07e28edb80900c19c28f1072f2e8aeca7fa06b23cd4169cefe1af5aa3260783f" +checksum = "e629b9b98ef3dd8afe6ca2bd0f89306cec16d43d907889945bc5d6687f2f13c7" [[package]] name = "glob" -version = "0.3.2" +version = "0.3.3" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "a8d1add55171497b4705a648c6b583acafb01d58050a51727785f0b2c8e0a2b2" +checksum = "0cc23270f6e1808e30a928bdc84dea0b9b4136a8bc82338574f23baf47bbd280" [[package]] name = "gloo-timers" @@ -1890,9 +2035,9 @@ dependencies = [ [[package]] name = "h2" -version = "0.4.10" +version = "0.4.12" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "a9421a676d1b147b16b82c9225157dc629087ef8ec4d5e2960f9437a90dac0a5" +checksum = "f3c0b69cfcb4e1b9f1bf2f53f95f766e4661169728ec61cd3fe5a0166f2d1386" dependencies = [ "atomic-waker", "bytes", @@ -1900,7 +2045,7 @@ dependencies = [ "futures-core", "futures-sink", "http", - "indexmap 2.9.0", + "indexmap 2.12.1", "slab", "tokio", "tokio-util", @@ -1909,12 +2054,13 @@ dependencies = [ [[package]] name = "half" -version = "2.6.0" +version = "2.7.1" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "459196ed295495a68f7d7fe1d84f6c4b7ff0e21fe3017b2f283c6fac3ad803c9" +checksum = "6ea2d84b969582b4b1864a92dc5d27cd2b77b622a8d79306834f1be5ba20d84b" dependencies = [ "cfg-if", "crunchy", + "zerocopy 0.8.31", ] [[package]] @@ -1934,22 +2080,28 @@ checksum = "e5274423e17b7c9fc20b6e7e208532f9b19825d82dfd615708b70edd83df41f1" [[package]] name = "hashbrown" -version = "0.15.4" +version = "0.15.5" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "5971ac85611da7067dbfcabef3c70ebb5606018acd9e2a3903a0da507521e0d5" +checksum = "9229cfe53dfd69f0609a49f65461bd93001ea1ef889cd5529dd176593f5338a1" dependencies = [ "allocator-api2", "equivalent", "foldhash", ] +[[package]] +name = "hashbrown" +version = "0.16.1" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "841d1cc9bed7f9236f321df977030373f4a4163ae1a7dbfe1a51a2c1a51d9100" + [[package]] name = "hashlink" version = "0.10.0" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "7382cf6263419f2d8df38c55d7da83da5c18aef87fc7a7fc1fb1e344edfe14c1" dependencies = [ - "hashbrown 0.15.4", + "hashbrown 0.15.5", ] [[package]] @@ -1960,9 +2112,9 @@ checksum = "2304e00983f87ffb38b55b444b5e3b60a884b5d30c0fca7d82fe33449bbe55ea" [[package]] name = "hermit-abi" -version = "0.5.1" +version = "0.5.2" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "f154ce46856750ed433c8649605bf7ed2de3bc35fd9d2a9f30cddd873c80cb08" +checksum = "fc0fef456e4baa96da950455cd02c081ca953b141298e41db3fc7e36b1da849c" [[package]] name = "hex" @@ -1972,27 +2124,26 @@ checksum = "7f24254aa9a54b5c858eaee2f5bccdb46aaf0e486a595ed5fd8f86ba55232a70" [[package]] name = "hex-literal" -version = "1.0.0" +version = "1.1.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "bcaaec4551594c969335c98c903c1397853d4198408ea609190f420500f6be71" +checksum = "e712f64ec3850b98572bffac52e2c6f282b29fe6c5fa6d42334b30be438d95c1" [[package]] name = "home" -version = "0.5.11" +version = "0.5.12" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "589533453244b0995c858700322199b2becb13b627df2851f64a2775d024abcf" +checksum = "cc627f471c528ff0c4a49e1d5e60450c8f6461dd6d10ba9dcd3a61d3dff7728d" dependencies = [ - "windows-sys 0.59.0", + "windows-sys 0.61.2", ] [[package]] name = "http" -version = "1.3.1" +version = "1.4.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "f4a85d31aea989eead29a3aaf9e1115a180df8282431156e533de47660892565" +checksum = "e3ba2a386d7f85a81f119ad7498ebe444d2e22c2af0b86b069416ace48b3311a" dependencies = [ "bytes", - "fnv", "itoa", ] @@ -2033,13 +2184,14 @@ checksum = "df3b46402a9d5adb4c86a0cf463f42e19994e3ee891101b1841f30a545cb49a9" [[package]] name = "hyper" -version = "1.6.0" +version = "1.8.1" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "cc2b571658e38e0c01b1fdca3bbbe93c00d3d71693ff2770043f8c29bc7d6f80" +checksum = "2ab2d4f250c3d7b1c9fcdff1cece94ea4e2dfbec68614f7b87cb205f24ca9d11" dependencies = [ + "atomic-waker", "bytes", "futures-channel", - "futures-util", + "futures-core", "h2", "http", "http-body", @@ -2047,6 +2199,7 @@ dependencies = [ "httpdate", "itoa", "pin-project-lite", + "pin-utils", "smallvec", "tokio", "want", @@ -2083,6 +2236,19 @@ dependencies = [ "tower-service", ] +[[package]] +name = "hyper-timeout" +version = "0.5.2" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "2b90d566bffbce6a75bd8b09a05aa8c2cb1fabb6cb348f8840c9e4c90a0d83b0" +dependencies = [ + "hyper", + "hyper-util", + "pin-project-lite", + "tokio", + "tower-service", +] + [[package]] name = "hyper-tls" version = "0.6.0" @@ -2101,9 +2267,9 @@ dependencies = [ [[package]] name = "hyper-util" -version = "0.1.14" +version = "0.1.18" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "dc2fdfdbff08affe55bb779f33b053aa1fe5dd5b54c257343c17edfa55711bdb" +checksum = "52e9a2a24dc5c6821e71a7030e1e14b7b632acac55c40e9d2e082c621261bb56" dependencies = [ "base64 0.22.1", "bytes", @@ -2117,7 +2283,7 @@ dependencies = [ "libc", "percent-encoding", "pin-project-lite", - "socket2", + "socket2 0.6.1", "system-configuration", "tokio", "tower-service", @@ -2142,9 +2308,9 @@ dependencies = [ [[package]] name = "iana-time-zone" -version = "0.1.63" +version = "0.1.64" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "b0c919e5debc312ad217002b8048a17b7d83f80703865bbfcfebb0458b0b27d8" +checksum = "33e57f83510bb73707521ebaffa789ec8caf86f9657cad665b092b581d40e9fb" dependencies = [ "android_system_properties", "core-foundation-sys", @@ -2166,9 +2332,9 @@ dependencies = [ [[package]] name = "icu_collections" -version = "2.0.0" +version = "2.1.1" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "200072f5d0e3614556f94a9930d5dc3e0662a652823904c3a75dc3b0af7fee47" +checksum = "4c6b649701667bbe825c3b7e6388cb521c23d88644678e83c0c4d0a621a34b43" dependencies = [ "displaydoc", "potential_utf", @@ -2179,9 +2345,9 @@ dependencies = [ [[package]] name = "icu_locale_core" -version = "2.0.0" +version = "2.1.1" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "0cde2700ccaed3872079a65fb1a78f6c0a36c91570f28755dda67bc8f7d9f00a" +checksum = "edba7861004dd3714265b4db54a3c390e880ab658fec5f7db895fae2046b5bb6" dependencies = [ "displaydoc", "litemap", @@ -2192,11 +2358,10 @@ dependencies = [ [[package]] name = "icu_normalizer" -version = "2.0.0" +version = "2.1.1" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "436880e8e18df4d7bbc06d58432329d6458cc84531f7ac5f024e93deadb37979" +checksum = "5f6c8828b67bf8908d82127b2054ea1b4427ff0230ee9141c54251934ab1b599" dependencies = [ - "displaydoc", "icu_collections", "icu_normalizer_data", "icu_properties", @@ -2207,42 +2372,38 @@ dependencies = [ [[package]] name = "icu_normalizer_data" -version = "2.0.0" +version = "2.1.1" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "00210d6893afc98edb752b664b8890f0ef174c8adbb8d0be9710fa66fbbf72d3" +checksum = "7aedcccd01fc5fe81e6b489c15b247b8b0690feb23304303a9e560f37efc560a" [[package]] name = "icu_properties" -version = "2.0.1" +version = "2.1.1" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "016c619c1eeb94efb86809b015c58f479963de65bdb6253345c1a1276f22e32b" +checksum = "e93fcd3157766c0c8da2f8cff6ce651a31f0810eaa1c51ec363ef790bbb5fb99" dependencies = [ - "displaydoc", "icu_collections", "icu_locale_core", "icu_properties_data", "icu_provider", - "potential_utf", "zerotrie", "zerovec", ] [[package]] name = "icu_properties_data" -version = "2.0.1" +version = "2.1.1" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "298459143998310acd25ffe6810ed544932242d3f07083eee1084d83a71bd632" +checksum = "02845b3647bb045f1100ecd6480ff52f34c35f82d9880e029d329c21d1054899" [[package]] name = "icu_provider" -version = "2.0.0" +version = "2.1.1" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "03c80da27b5f4187909049ee2d72f276f0d9f99a42c306bd0131ecfe04d8e5af" +checksum = "85962cf0ce02e1e0a629cc34e7ca3e373ce20dda4c4d7294bbd0bf1fdb59e614" dependencies = [ "displaydoc", "icu_locale_core", - "stable_deref_trait", - "tinystr", "writeable", "yoke", "zerofrom", @@ -2258,9 +2419,9 @@ checksum = "b9e0384b61958566e926dc50660321d12159025e767c18e043daf26b70104c39" [[package]] name = "idna" -version = "1.0.3" +version = "1.1.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "686f825264d630750a544639377bae737628043f20d38bbc029e8f29ea968a7e" +checksum = "3b0875f23caa03898994f6ddc501886a45c7d3d62d04d2d90788d47be1b1e4de" dependencies = [ "idna_adapter", "smallvec", @@ -2290,13 +2451,14 @@ dependencies = [ [[package]] name = "indexmap" -version = "2.9.0" +version = "2.12.1" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "cea70ddb795996207ad57735b50c5982d8844f38ba9ee5f1aedcfb708a2aa11e" +checksum = "0ad4bb2b565bca0645f4d68c5c9af97fba094e9791da685bf83cb5f3ce74acf2" dependencies = [ "equivalent", - "hashbrown 0.15.4", + "hashbrown 0.16.1", "serde", + "serde_core", ] [[package]] @@ -2331,9 +2493,9 @@ checksum = "469fb0b9cefa57e3ef31275ee7cacb78f2fdca44e4765491884a2b119d4eb130" [[package]] name = "iri-string" -version = "0.7.8" +version = "0.7.9" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "dbc5ebe9c3a1a7a5127f920a418f7585e9e758e911d0466ed004f393b0e380b2" +checksum = "4f867b9d1d896b67beb18518eda36fdb77a32ea590de864f1325b294a6d14397" dependencies = [ "memchr", "serde", @@ -2341,13 +2503,13 @@ dependencies = [ [[package]] name = "is-terminal" -version = "0.4.16" +version = "0.4.17" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "e04d7f318608d35d4b61ddd75cbdaee86b023ebe2bd5a66ee0915f0bf93095a9" +checksum = "3640c1c38b8e4e43584d8df18be5fc6b0aa314ce6ebf51b53313d4306cca8e46" dependencies = [ "hermit-abi", "libc", - "windows-sys 0.59.0", + "windows-sys 0.61.2", ] [[package]] @@ -2358,9 +2520,9 @@ checksum = "7655c9839580ee829dfacba1d1278c2b7883e50a277ff7541299489d6bdfdc45" [[package]] name = "is_terminal_polyfill" -version = "1.70.1" +version = "1.70.2" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "7943c866cc5cd64cbc25b2e01621d07fa8eb2a1a23160ee81ce38704e97b8ecf" +checksum = "a6cb138bb79a146c1bd460005623e142ef0181e3d0219cb493e02f7d08a35695" [[package]] name = "itertools" @@ -2381,26 +2543,35 @@ dependencies = [ ] [[package]] -name = "itoa" -version = "1.0.15" +name = "itertools" +version = "0.14.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "4a5f13b858c8d314ee3e8f639011f7ccefe71f97f96e50151fb991f267928e2c" +checksum = "2b192c782037fadd9cfa75548310488aabdbf3d2da73885b31bd0abd03351285" +dependencies = [ + "either", +] + +[[package]] +name = "itoa" +version = "1.0.15" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "4a5f13b858c8d314ee3e8f639011f7ccefe71f97f96e50151fb991f267928e2c" [[package]] name = "jobserver" -version = "0.1.33" +version = "0.1.34" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "38f262f097c174adebe41eb73d66ae9c06b2844fb0da69969647bbddd9b0538a" +checksum = "9afb3de4395d6b3e67a780b6de64b51c978ecf11cb9a462c66be7d4ca9039d33" dependencies = [ - "getrandom 0.3.3", + "getrandom 0.3.4", "libc", ] [[package]] name = "js-sys" -version = "0.3.77" +version = "0.3.83" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "1cfaf33c695fc6e08064efbc1f72ec937429614f25eef83af942d0e227c3a28f" +checksum = "464a3709c7f55f1f721e5389aa6ea4e3bc6aba669353300af094b29ffbdde1d8" dependencies = [ "once_cell", "wasm-bindgen", @@ -2423,18 +2594,18 @@ checksum = "bbd2bcb4c963f2ddae06a2efc7e9f3591312473c50c6685e1f298068316e66fe" [[package]] name = "libc" -version = "0.2.172" +version = "0.2.177" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "d750af042f7ef4f724306de029d18836c26c1765a54a6a3f094cbd23a7267ffa" +checksum = "2874a2af47a2325c2001a6e6fad9b16a53b802102b528163885171cf92b15976" [[package]] name = "libloading" -version = "0.8.8" +version = "0.8.9" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "07033963ba89ebaf1584d767badaa2e8fcec21aedea6b8c0346d487d49c28667" +checksum = "d7c4b02199fee7c5d21a5ae7d8cfa79a6ef5bb2fc834d6e9058e89c825efdc55" dependencies = [ "cfg-if", - "windows-targets 0.53.0", + "windows-link", ] [[package]] @@ -2445,20 +2616,20 @@ checksum = "f9fbbcab51052fe104eb5e5d351cf728d30a5be1fe14d9be8a3b097481fb97de" [[package]] name = "libredox" -version = "0.1.3" +version = "0.1.10" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "c0ff37bd590ca25063e35af745c343cb7a0271906fb7b37e4813e8f79f00268d" +checksum = "416f7e718bdb06000964960ffa43b4335ad4012ae8b99060261aa4a8088d5ccb" dependencies = [ - "bitflags 2.9.1", + "bitflags", "libc", - "redox_syscall 0.5.12", + "redox_syscall", ] [[package]] name = "libsqlite3-sys" -version = "0.34.0" +version = "0.35.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "91632f3b4fb6bd1d72aa3d78f41ffecfcf2b1a6648d8c241dbe7dbfaf4875e15" +checksum = "133c182a6a2c87864fe97778797e46c7e999672690dc9fa3ee8e241aa4a9c13f" dependencies = [ "cc", "pkg-config", @@ -2467,9 +2638,9 @@ dependencies = [ [[package]] name = "libz-sys" -version = "1.1.22" +version = "1.1.23" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "8b70e7a7df205e92a1a4cd9aaae7898dac0aa555503cc0a649494d0d60e7651d" +checksum = "15d118bbf3771060e7311cc7bb0545b01d08a8b4a7de949198dec1fa0ca1c0f7" dependencies = [ "cc", "pkg-config", @@ -2478,15 +2649,15 @@ dependencies = [ [[package]] name = "linux-raw-sys" -version = "0.9.4" +version = "0.11.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "cd945864f07fe9f5371a27ad7b52a172b4b499999f1d97574c9fa68373937e12" +checksum = "df1d3c3b53da64cf5760482273a98e575c651a67eec7f77df96b5b642de8f039" [[package]] name = "litemap" -version = "0.8.0" +version = "0.8.1" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "241eaef5fd12c88705a01fc1066c48c4b36e0dd4377dcdc7ec3942cea7a69956" +checksum = "6373607a59f0be73a39b6fe456b8192fcc3585f602af20751600e974dd455e77" [[package]] name = "local-ip-address" @@ -2496,25 +2667,24 @@ checksum = "656b3b27f8893f7bbf9485148ff9a65f019e3f33bd5cdc87c83cab16b3fd9ec8" dependencies = [ "libc", "neli", - "thiserror 2.0.12", + "thiserror 2.0.17", "windows-sys 0.59.0", ] [[package]] name = "lock_api" -version = "0.4.13" +version = "0.4.14" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "96936507f153605bddfcda068dd804796c84324ed2510809e5b2a624c81da765" +checksum = "224399e74b87b5f3557511d98dff8b14089b3dadafcab6bb93eab67d3aace965" dependencies = [ - "autocfg", "scopeguard", ] [[package]] name = "log" -version = "0.4.27" +version = "0.4.28" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "13dc2df351e3202783a1fe0d44375f7295ffb4049267b0f3018346dc122a1d94" +checksum = "34080505efa8e45a4b816c349525ebe327ceaa8559756f0356cba97ef3bf7432" dependencies = [ "value-bag", ] @@ -2525,7 +2695,7 @@ version = "0.12.5" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "234cf4f4a04dc1f57e24b96cc0cd600cf2af460d4161ac5ecdd0af8e1f3b2a38" dependencies = [ - "hashbrown 0.15.4", + "hashbrown 0.15.5", ] [[package]] @@ -2536,9 +2706,9 @@ checksum = "47e1ffaa40ddd1f3ed91f717a33c8c0ee23fff369e3aa8772b9605cc1d22f4c3" [[package]] name = "memchr" -version = "2.7.4" +version = "2.7.6" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "78ca9ab1a0babb1e7d5695e3530886289c18cf2f87ec19a575a0abdce112e3a3" +checksum = "f52b00d39961fc5b2736ea853c9cc86238e165017a493d1d5c8eac6bdc4cc273" [[package]] name = "miette" @@ -2567,7 +2737,7 @@ checksum = "db5b29714e950dbb20d5e6f74f9dcec4edbcc1067bb7f8ed198c097b8c1a818b" dependencies = [ "proc-macro2", "quote", - "syn 2.0.102", + "syn 2.0.111", ] [[package]] @@ -2589,24 +2759,25 @@ source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "1fa76a2c86f704bdb222d66965fb3d63269ce38518b83cb0575fca855ebb6316" dependencies = [ "adler2", + "simd-adler32", ] [[package]] name = "mio" -version = "1.0.4" +version = "1.1.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "78bed444cc8a2160f01cbcf811ef18cac863ad68ae8ca62092e8db51d51c761c" +checksum = "69d83b0086dc8ecf3ce9ae2874b2d1290252e2a30720bea58a5c6639b0092873" dependencies = [ "libc", - "wasi 0.11.1+wasi-snapshot-preview1", - "windows-sys 0.59.0", + "wasi", + "windows-sys 0.61.2", ] [[package]] name = "mockall" -version = "0.13.1" +version = "0.14.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "39a6bfcc6c8c7eed5ee98b9c3e33adc726054389233e201c95dab2d41a3839d2" +checksum = "f58d964098a5f9c6b63d0798e5372fd04708193510a7af313c22e9f29b7b620b" dependencies = [ "cfg-if", "downcast", @@ -2618,14 +2789,14 @@ dependencies = [ [[package]] name = "mockall_derive" -version = "0.13.1" +version = "0.14.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "25ca3004c2efe9011bd4e461bd8256445052b9615405b4f7ea43fc8ca5c20898" +checksum = "ca41ce716dda6a9be188b385aa78ee5260fc25cd3802cb2a8afdc6afbe6b6dbf" dependencies = [ "cfg-if", "proc-macro2", "quote", - "syn 2.0.102", + "syn 2.0.111", ] [[package]] @@ -2657,7 +2828,7 @@ dependencies = [ "percent-encoding", "serde", "serde_json", - "socket2", + "socket2 0.5.10", "twox-hash", "url", ] @@ -2668,14 +2839,14 @@ version = "0.31.2" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "63c3512cf11487168e0e9db7157801bf5273be13055a9cc95356dc9e0035e49c" dependencies = [ - "darling", + "darling 0.20.11", "heck", "num-bigint", "proc-macro-crate", "proc-macro-error2", "proc-macro2", "quote", - "syn 2.0.102", + "syn 2.0.111", "termcolor", "thiserror 1.0.69", ] @@ -2689,7 +2860,7 @@ dependencies = [ "base64 0.21.7", "bigdecimal", "bindgen", - "bitflags 2.9.1", + "bitflags", "bitvec", "btoi", "byteorder", @@ -2788,12 +2959,25 @@ checksum = "e9e591e719385e6ebaeb5ce5d3887f7d5676fceca6411d1925ccc95745f3d6f7" [[package]] name = "nu-ansi-term" -version = "0.46.0" +version = "0.50.3" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "77a8165726e8236064dbb45459242600304b42a5ea24ee2948e18e023bf7ba84" +checksum = "7957b9740744892f114936ab4a57b3f487491bbeafaf8083688b16841a4240e5" dependencies = [ - "overload", - "winapi", + "windows-sys 0.61.2", +] + +[[package]] +name = "num" +version = "0.4.3" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "35bd024e8b2ff75562e5f34e7f4905839deb4b22955ef5e73d2fea1b9813cb23" +dependencies = [ + "num-bigint", + "num-complex", + "num-integer", + "num-iter", + "num-rational", + "num-traits", ] [[package]] @@ -2806,6 +2990,15 @@ dependencies = [ "num-traits", ] +[[package]] +name = "num-complex" +version = "0.4.6" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "73f88a1307638156682bada9d7604135552957b7818057dcef22705b4d509495" +dependencies = [ + "num-traits", +] + [[package]] name = "num-conv" version = "0.1.0" @@ -2821,6 +3014,28 @@ dependencies = [ "num-traits", ] +[[package]] +name = "num-iter" +version = "0.1.45" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "1429034a0490724d0075ebb2bc9e875d6503c3cf69e235a8941aa757d83ef5bf" +dependencies = [ + "autocfg", + "num-integer", + "num-traits", +] + +[[package]] +name = "num-rational" +version = "0.4.2" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "f83d14da390562dca69fc84082e73e548e1ad308d24accdedd2720017cb37824" +dependencies = [ + "num-bigint", + "num-integer", + "num-traits", +] + [[package]] name = "num-traits" version = "0.2.19" @@ -2832,9 +3047,9 @@ dependencies = [ [[package]] name = "object" -version = "0.36.7" +version = "0.37.3" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "62948e14d923ea95ea2c7c86c71013138b66525b86bdc08d2dcc262bdb497b87" +checksum = "ff76201f031d8863c38aa7f905eca4f53abbfa15f609db4277d44cd8938f33fe" dependencies = [ "memchr", ] @@ -2847,9 +3062,9 @@ checksum = "42f5e15c9953c5e4ccceeb2e7382a716482c34515315f7b03532b8b4e8393d2d" [[package]] name = "once_cell_polyfill" -version = "1.70.1" +version = "1.70.2" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "a4895175b425cb1f87721b59f0f286c2092bd4af812243672510e1ac53e2e0ad" +checksum = "384b8ab6d37215f3c5301a95a4accb5d64aa607f1fcb26a11b5303878451b4fe" [[package]] name = "oorandom" @@ -2859,11 +3074,11 @@ checksum = "d6790f58c7ff633d8771f42965289203411a5e5c68388703c06e14f24770b41e" [[package]] name = "openssl" -version = "0.10.73" +version = "0.10.75" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "8505734d46c8ab1e19a1dce3aef597ad87dcb4c37e7188231769bd6bd51cebf8" +checksum = "08838db121398ad17ab8531ce9de97b244589089e290a384c900cb9ff7434328" dependencies = [ - "bitflags 2.9.1", + "bitflags", "cfg-if", "foreign-types", "libc", @@ -2880,7 +3095,7 @@ checksum = "a948666b637a0f465e8564c73e89d4dde00d72d4d473cc972f390fc3dcee7d9c" dependencies = [ "proc-macro2", "quote", - "syn 2.0.102", + "syn 2.0.111", ] [[package]] @@ -2891,9 +3106,9 @@ checksum = "d05e27ee213611ffe7d6348b942e8f942b37114c00cc03cec254295a4a17852e" [[package]] name = "openssl-sys" -version = "0.9.109" +version = "0.9.111" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "90096e2e47630d78b7d1c20952dc621f957103f8bc2c8359ec81290d75238571" +checksum = "82cab2d520aa75e3c58898289429321eb788c3106963d0dc886ec7a5f4adc321" dependencies = [ "cc", "libc", @@ -2902,16 +3117,20 @@ dependencies = [ ] [[package]] -name = "overload" -version = "0.1.1" +name = "owo-colors" +version = "4.2.3" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "b15813163c1d831bf4a13c3610c05c0d03b39feb07f7e09fa234dac9b15aaf39" +checksum = "9c6901729fa79e91a0913333229e9ca5dc725089d1c363b2f4b4760709dc4a52" [[package]] -name = "owo-colors" -version = "4.2.1" +name = "page_size" +version = "0.6.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "26995317201fa17f3656c36716aed4a7c81743a9634ac4c99c0eeda495db0cec" +checksum = "30d5b2194ed13191c1999ae0704b7839fb18384fa22e49b57eeaa97d79ce40da" +dependencies = [ + "libc", + "winapi", +] [[package]] name = "parking" @@ -2921,9 +3140,9 @@ checksum = "f38d5652c16fde515bb1ecef450ab0f6a219d619a7274976324d5e377f7dceba" [[package]] name = "parking_lot" -version = "0.12.4" +version = "0.12.5" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "70d58bf43669b5795d1576d0641cfb6fbb2057bf629506267a92807158584a13" +checksum = "93857453250e3077bd71ff98b6a65ea6621a19bb0f559a85248955ac12c45a1a" dependencies = [ "lock_api", "parking_lot_core", @@ -2931,15 +3150,15 @@ dependencies = [ [[package]] name = "parking_lot_core" -version = "0.9.11" +version = "0.9.12" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "bc838d2a56b5b1a6c25f55575dfc605fabb63bb2365f6c2353ef9159aa69e4a5" +checksum = "2621685985a2ebf1c516881c026032ac7deafcda1a2c9b7850dc81e3dfcb64c1" dependencies = [ "cfg-if", "libc", - "redox_syscall 0.5.12", + "redox_syscall", "smallvec", - "windows-targets 0.52.6", + "windows-link", ] [[package]] @@ -2964,7 +3183,7 @@ dependencies = [ "regex", "regex-syntax", "structmeta", - "syn 2.0.102", + "syn 2.0.111", ] [[package]] @@ -2987,24 +3206,24 @@ dependencies = [ "proc-macro2", "proc-macro2-diagnostics", "quote", - "syn 2.0.102", + "syn 2.0.111", ] [[package]] name = "pem" -version = "3.0.5" +version = "3.0.6" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "38af38e8470ac9dee3ce1bae1af9c1671fffc44ddfd8bd1d0a3445bf349a8ef3" +checksum = "1d30c53c26bc5b31a98cd02d20f25a7c8567146caf63ed593a9d87b2775291be" dependencies = [ "base64 0.22.1", - "serde", + "serde_core", ] [[package]] name = "percent-encoding" -version = "2.3.1" +version = "2.3.2" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "e3148f5046208a5d56bcfc03053e3ca6334e51da8dfb19b6cdc8b306fae3283e" +checksum = "9b4f627cb1b25917193a259e49bdad08f671f8d9708acfd5fe0a8c1455d87220" [[package]] name = "phf" @@ -3044,6 +3263,26 @@ dependencies = [ "siphasher", ] +[[package]] +name = "pin-project" +version = "1.1.10" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "677f1add503faace112b9f1373e43e9e054bfdd22ff1a63c1bc485eaec6a6a8a" +dependencies = [ + "pin-project-internal", +] + +[[package]] +name = "pin-project-internal" +version = "1.1.10" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "6e918e4ff8c4549eb882f14b3a4bc8c8bc93de829416eacf579f1207a8fbf861" +dependencies = [ + "proc-macro2", + "quote", + "syn 2.0.111", +] + [[package]] name = "pin-project-lite" version = "0.2.16" @@ -3103,17 +3342,16 @@ dependencies = [ [[package]] name = "polling" -version = "3.8.0" +version = "3.11.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "b53a684391ad002dd6a596ceb6c74fd004fdce75f4be2e3f615068abbea5fd50" +checksum = "5d0e4f59085d47d8241c88ead0f274e8a0cb551f3625263c05eb8dd897c34218" dependencies = [ "cfg-if", "concurrent-queue", "hermit-abi", "pin-project-lite", "rustix", - "tracing", - "windows-sys 0.59.0", + "windows-sys 0.61.2", ] [[package]] @@ -3133,9 +3371,9 @@ dependencies = [ [[package]] name = "potential_utf" -version = "0.1.2" +version = "0.1.4" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "e5a7c30837279ca13e7c867e9e40053bc68740f988cb07f7ca6df43cc734b585" +checksum = "b73949432f5e2a09657003c25bca5e19a0e9c84f8058ca374f49e0ebe605af77" dependencies = [ "zerovec", ] @@ -3152,7 +3390,7 @@ version = "0.2.21" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "85eae3c4ed2f50dcfe72643da4befc30deadb458a9b590d720cde2f2b1e97da9" dependencies = [ - "zerocopy 0.8.25", + "zerocopy 0.8.31", ] [[package]] @@ -3193,11 +3431,11 @@ dependencies = [ [[package]] name = "proc-macro-crate" -version = "3.3.0" +version = "3.4.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "edce586971a4dfaa28950c6f18ed55e0406c1ab88bbce2c6f6293a7aaba73d35" +checksum = "219cb19e96be00ab2e37d6e299658a0cfa83e52429179969b0f0121b4ac46983" dependencies = [ - "toml_edit", + "toml_edit 0.23.7", ] [[package]] @@ -3219,14 +3457,14 @@ dependencies = [ "proc-macro-error-attr2", "proc-macro2", "quote", - "syn 2.0.102", + "syn 2.0.111", ] [[package]] name = "proc-macro2" -version = "1.0.95" +version = "1.0.103" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "02b3e5e68a3a1a02aad3ec490a98007cbc13c37cbe84a3cd7b8e406d76e7f778" +checksum = "5ee95bc4ef87b8d5ba32e8b7714ccc834865276eab0aed5c9958d00ec45f49e8" dependencies = [ "unicode-ident", ] @@ -3239,11 +3477,43 @@ checksum = "af066a9c399a26e020ada66a034357a868728e72cd426f3adcd35f80d88d88c8" dependencies = [ "proc-macro2", "quote", - "syn 2.0.102", + "syn 2.0.111", "version_check", "yansi", ] +[[package]] +name = "prost" +version = "0.14.2" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "101fec8d036f8d9d4a1e8ebf90d566d1d798f3b1aa379d2576a54a0d9acea5bd" +dependencies = [ + "bytes", + "prost-derive", +] + +[[package]] +name = "prost-derive" +version = "0.14.2" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "d2d93e596a829ebe00afa41c3a056e6308d6b8a4c7d869edf184e2c91b1ba564" +dependencies = [ + "anyhow", + "itertools 0.14.0", + "proc-macro2", + "quote", + "syn 2.0.111", +] + +[[package]] +name = "prost-types" +version = "0.14.2" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "f5d7b7346e150de32340ae3390b8b3ffa37ad93ec31fb5dad86afe817619e4e7" +dependencies = [ + "prost", +] + [[package]] name = "ptr_meta" version = "0.1.4" @@ -3277,18 +3547,18 @@ dependencies = [ [[package]] name = "quote" -version = "1.0.40" +version = "1.0.42" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "1885c039570dc00dcb4ff087a89e185fd56bae234ddc7f056a945bf36467248d" +checksum = "a338cc41d27e6cc6dce6cefc13a0729dfbb81c262b1f519331575dd80ef3067f" dependencies = [ "proc-macro2", ] [[package]] name = "r-efi" -version = "5.2.0" +version = "5.3.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "74765f6d916ee2faa39bc8e68e4f3ed8949b48cccdac59983d287a7cb71ce9c5" +checksum = "69cdb34c158ceb288df11e18b4bd39de994f6657d83847bdffdbd7f346754b0f" [[package]] name = "r2d2" @@ -3313,9 +3583,9 @@ dependencies = [ [[package]] name = "r2d2_sqlite" -version = "0.29.0" +version = "0.31.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "35006423374afbd4b270acddcbf1e28e60f6bdaaad10c2888b8fd2fba035213c" +checksum = "63417e83dc891797eea3ad379f52a5986da4bca0d6ef28baf4d14034dd111b0c" dependencies = [ "r2d2", "rusqlite", @@ -3341,9 +3611,9 @@ dependencies = [ [[package]] name = "rand" -version = "0.9.1" +version = "0.9.2" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "9fbfd9d094a40bf3ae768db9361049ace4c0e04a4fd6b359518bd7b73a73dd97" +checksum = "6db2770f06117d490610c7488547d543617b21bfa07796d7a12f6f1bd53850d1" dependencies = [ "rand_chacha 0.9.0", "rand_core 0.9.3", @@ -3384,14 +3654,14 @@ version = "0.9.3" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "99d9a13982dcf210057a8a78572b2217b667c3beacbf3a0d8b454f6f82837d38" dependencies = [ - "getrandom 0.3.3", + "getrandom 0.3.4", ] [[package]] name = "rayon" -version = "1.10.0" +version = "1.11.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "b418a60154510ca1a002a752ca9714984e21e4241e804d32555251faf8b78ffa" +checksum = "368f01d005bf8fd9b1206fb6fa653e6c4a81ceb1466406b81792d87c5677a58f" dependencies = [ "either", "rayon-core", @@ -3399,9 +3669,9 @@ dependencies = [ [[package]] name = "rayon-core" -version = "1.12.1" +version = "1.13.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "1465873a3dfdaa8ae7cb14b4383657caab0b3e8a0aa9ae8e04b044854c8dfce2" +checksum = "22e18b0f0062d30d4230b2e85ff77fdfe4326feb054b9783a3460d8435c8ab91" dependencies = [ "crossbeam-deque", "crossbeam-utils", @@ -3409,27 +3679,38 @@ dependencies = [ [[package]] name = "redox_syscall" -version = "0.3.5" +version = "0.5.18" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "567664f262709473930a4bf9e51bf2ebf3348f2e748ccc50dea20646858f8f29" +checksum = "ed2bf2547551a7053d6fdfafda3f938979645c44812fbfcda098faae3f1a362d" dependencies = [ - "bitflags 1.3.2", + "bitflags", ] [[package]] -name = "redox_syscall" -version = "0.5.12" +name = "ref-cast" +version = "1.0.25" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "928fca9cf2aa042393a8325b9ead81d2f0df4cb12e1e24cef072922ccd99c5af" +checksum = "f354300ae66f76f1c85c5f84693f0ce81d747e2c3f21a45fef496d89c960bf7d" dependencies = [ - "bitflags 2.9.1", + "ref-cast-impl", +] + +[[package]] +name = "ref-cast-impl" +version = "1.0.25" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "b7186006dcb21920990093f30e3dea63b7d6e977bf1256be20c3563a5db070da" +dependencies = [ + "proc-macro2", + "quote", + "syn 2.0.111", ] [[package]] name = "regex" -version = "1.11.1" +version = "1.12.2" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "b544ef1b4eac5dc2db33ea63606ae9ffcfac26c1416a2806ae0bf5f56b201191" +checksum = "843bc0191f75f3e22651ae5f1e72939ab2f72a4bc30fa80a066bd66edefc24d4" dependencies = [ "aho-corasick", "memchr", @@ -3439,9 +3720,9 @@ dependencies = [ [[package]] name = "regex-automata" -version = "0.4.9" +version = "0.4.13" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "809e8dc61f6de73b46c85f4c96486310fe304c434cfa43669d7b40f711150908" +checksum = "5276caf25ac86c8d810222b3dbb938e512c55c6831a10f3e6ed1c93b84041f1c" dependencies = [ "aho-corasick", "memchr", @@ -3450,9 +3731,9 @@ dependencies = [ [[package]] name = "regex-syntax" -version = "0.8.5" +version = "0.8.8" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "2b15c43186be67a4fd63bee50d0303afffcef381492ebe2c5d87f324e1b8815c" +checksum = "7a2d987857b319362043e95f5353c0535c1f58eec5336fdfcf626430af7def58" [[package]] name = "relative-path" @@ -3471,9 +3752,9 @@ dependencies = [ [[package]] name = "reqwest" -version = "0.12.20" +version = "0.12.24" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "eabf4c97d9130e2bf606614eb937e86edac8292eaa6f422f995d7e8de1eb1813" +checksum = "9d0946410b9f7b082a427e4ef5c8ff541a88b357bc6c637c40db3a68ac70a36f" dependencies = [ "base64 0.22.1", "bytes", @@ -3571,10 +3852,21 @@ checksum = "6fc39292f8613e913f7df8fa892b8944ceb47c247b78e1b1ae2f09e019be789d" dependencies = [ "futures-timer", "futures-util", - "rstest_macros", + "rstest_macros 0.25.0", "rustc_version", ] +[[package]] +name = "rstest" +version = "0.26.1" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "f5a3193c063baaa2a95a33f03035c8a72b83d97a54916055ba22d35ed3839d49" +dependencies = [ + "futures-timer", + "futures-util", + "rstest_macros 0.26.1", +] + [[package]] name = "rstest_macros" version = "0.25.0" @@ -3589,17 +3881,35 @@ dependencies = [ "regex", "relative-path", "rustc_version", - "syn 2.0.102", + "syn 2.0.111", + "unicode-ident", +] + +[[package]] +name = "rstest_macros" +version = "0.26.1" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "9c845311f0ff7951c5506121a9ad75aec44d083c31583b2ea5a30bcb0b0abba0" +dependencies = [ + "cfg-if", + "glob", + "proc-macro-crate", + "proc-macro2", + "quote", + "regex", + "relative-path", + "rustc_version", + "syn 2.0.111", "unicode-ident", ] [[package]] name = "rusqlite" -version = "0.36.0" +version = "0.37.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "3de23c3319433716cf134eed225fe9986bc24f63bed9be9f20c329029e672dc7" +checksum = "165ca6e57b20e1351573e3729b958bc62f0e48025386970b6e4d29e7a7e71f3f" dependencies = [ - "bitflags 2.9.1", + "bitflags", "fallible-iterator", "fallible-streaming-iterator", "hashlink", @@ -3609,9 +3919,9 @@ dependencies = [ [[package]] name = "rust_decimal" -version = "1.37.1" +version = "1.39.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "faa7de2ba56ac291bd90c6b9bece784a52ae1411f9506544b3eae36dd2356d50" +checksum = "35affe401787a9bd846712274d97654355d21b2a2c092a3139aabe31e9022282" dependencies = [ "arrayvec", "borsh", @@ -3625,9 +3935,9 @@ dependencies = [ [[package]] name = "rustc-demangle" -version = "0.1.25" +version = "0.1.26" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "989e6739f80c4ad5b13e0fd7fe89531180375b18520cc8c82080e4dc4035b84f" +checksum = "56f7d92ca342cea22a06f2121d944b4fd82af56988c270852495420f961d4ace" [[package]] name = "rustc-hash" @@ -3646,23 +3956,24 @@ dependencies = [ [[package]] name = "rustix" -version = "1.0.7" +version = "1.1.2" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "c71e83d6afe7ff64890ec6b71d6a69bb8a610ab78ce364b3352876bb4c801266" +checksum = "cd15f8a2c5551a84d56efdc1cd049089e409ac19a3072d5037a17fd70719ff3e" dependencies = [ - "bitflags 2.9.1", + "bitflags", "errno", "libc", "linux-raw-sys", - "windows-sys 0.59.0", + "windows-sys 0.61.2", ] [[package]] name = "rustls" -version = "0.23.27" +version = "0.23.35" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "730944ca083c1c233a75c09f199e973ca499344a2b7ba9e755c457e86fb4a321" +checksum = "533f54bc6a7d4f647e46ad909549eda97bf5afc1585190ef692b4286b198bd8f" dependencies = [ + "log", "once_cell", "ring", "rustls-pki-types", @@ -3673,14 +3984,14 @@ dependencies = [ [[package]] name = "rustls-native-certs" -version = "0.8.1" +version = "0.8.2" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "7fcff2dd52b58a8d98a70243663a0d234c4e2b79235637849d15913394a247d3" +checksum = "9980d917ebb0c0536119ba501e90834767bffc3d60641457fd84a1f3fd337923" dependencies = [ "openssl-probe", "rustls-pki-types", "schannel", - "security-framework 3.2.0", + "security-framework 3.5.1", ] [[package]] @@ -3694,18 +4005,18 @@ dependencies = [ [[package]] name = "rustls-pki-types" -version = "1.12.0" +version = "1.13.1" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "229a4a4c221013e7e1f1a043678c5cc39fe5171437c88fb47151a21e6f5b5c79" +checksum = "708c0f9d5f54ba0272468c1d306a52c495b31fa155e91bc25371e6df7996908c" dependencies = [ "zeroize", ] [[package]] name = "rustls-webpki" -version = "0.103.3" +version = "0.103.8" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "e4a72fe2bcf7a6ac6fd7d0b9e5cb68aeb7d4c0a0271730218b3e92d43b4eb435" +checksum = "2ffdfa2f5286e2247234e03f680868ac2815974dc39e00ea15adc445d0aafe52" dependencies = [ "ring", "rustls-pki-types", @@ -3714,9 +4025,9 @@ dependencies = [ [[package]] name = "rustversion" -version = "1.0.21" +version = "1.0.22" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "8a0d197bd2c9dc6e53b84da9556a69ba4cdfab8619eb41a8bd1cc2027a0f6b1d" +checksum = "b39cdef0fa800fc44525c84ccb54a029961a8215f9619753635a9c0d2538d46d" [[package]] name = "ryu" @@ -3741,11 +4052,11 @@ checksum = "ece8e78b2f38ec51c51f5d475df0a7187ba5111b2a28bdc761ee05b075d40a71" [[package]] name = "schannel" -version = "0.1.27" +version = "0.1.28" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "1f29ebaa345f945cec9fbbc532eb307f0fdad8161f281b6369539c8d84876b3d" +checksum = "891d81b926048e76efe18581bf793546b4c0eaf8448d72be8de2bbee5fd166e1" dependencies = [ - "windows-sys 0.59.0", + "windows-sys 0.61.2", ] [[package]] @@ -3757,6 +4068,30 @@ dependencies = [ "parking_lot", ] +[[package]] +name = "schemars" +version = "0.9.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "4cd191f9397d57d581cddd31014772520aa448f65ef991055d7f61582c65165f" +dependencies = [ + "dyn-clone", + "ref-cast", + "serde", + "serde_json", +] + +[[package]] +name = "schemars" +version = "1.1.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "9558e172d4e8533736ba97870c4b2cd63f84b382a3d6eb063da41b91cce17289" +dependencies = [ + "dyn-clone", + "ref-cast", + "serde", + "serde_json", +] + [[package]] name = "scopeguard" version = "1.2.0" @@ -3775,7 +4110,7 @@ version = "2.11.1" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "897b2245f0b511c87893af39b033e5ca9cce68824c4d7e7630b5a1d339658d02" dependencies = [ - "bitflags 2.9.1", + "bitflags", "core-foundation 0.9.4", "core-foundation-sys", "libc", @@ -3784,11 +4119,11 @@ dependencies = [ [[package]] name = "security-framework" -version = "3.2.0" +version = "3.5.1" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "271720403f46ca04f7ba6f55d438f8bd878d6b8ca0a1046e8228c4145bcbb316" +checksum = "b3297343eaf830f66ede390ea39da1d462b6b0c1b000f420d0a83f898bbbe6ef" dependencies = [ - "bitflags 2.9.1", + "bitflags", "core-foundation 0.10.1", "core-foundation-sys", "libc", @@ -3797,9 +4132,9 @@ dependencies = [ [[package]] name = "security-framework-sys" -version = "2.14.0" +version = "2.15.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "49db231d56a190491cb4aeda9527f1ad45345af50b0851622a7adb8c03b01c32" +checksum = "cc1f0cbffaac4852523ce30d8bd3c5cdc873501d96ff467ca09b6767bb8cd5c0" dependencies = [ "core-foundation-sys", "libc", @@ -3807,16 +4142,17 @@ dependencies = [ [[package]] name = "semver" -version = "1.0.26" +version = "1.0.27" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "56e6fa9c48d24d85fb3de5ad847117517440f6beceb7798af16b4a87d616b8d0" +checksum = "d767eb0aabc880b29956c35734170f26ed551a859dbd361d140cdbeca61ab1e2" [[package]] name = "serde" -version = "1.0.219" +version = "1.0.228" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "5f0e2c6ed6606019b4e29e69dbaba95b11854410e5347d525002456dbbb786b6" +checksum = "9a8e94ea7f378bd32cbbd37198a4a91436180c5bb472411e48b5ec2e2124ae9e" dependencies = [ + "serde_core", "serde_derive", ] @@ -3832,58 +4168,70 @@ dependencies = [ [[package]] name = "serde_bytes" -version = "0.11.17" +version = "0.11.19" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "8437fd221bde2d4ca316d61b90e337e9e702b3820b87d63caa9ba6c02bd06d96" +checksum = "a5d440709e79d88e51ac01c4b72fc6cb7314017bb7da9eeff678aa94c10e3ea8" dependencies = [ "serde", + "serde_core", +] + +[[package]] +name = "serde_core" +version = "1.0.228" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "41d385c7d4ca58e59fc732af25c3983b67ac852c1a25000afe1175de458b67ad" +dependencies = [ + "serde_derive", ] [[package]] name = "serde_derive" -version = "1.0.219" +version = "1.0.228" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "5b0276cf7f2c73365f7157c8123c21cd9a50fbbd844757af28ca1f5925fc2a00" +checksum = "d540f220d3187173da220f885ab66608367b6574e925011a9353e4badda91d79" dependencies = [ "proc-macro2", "quote", - "syn 2.0.102", + "syn 2.0.111", ] [[package]] name = "serde_html_form" -version = "0.2.7" +version = "0.2.8" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "9d2de91cf02bbc07cde38891769ccd5d4f073d22a40683aa4bc7a95781aaa2c4" +checksum = "b2f2d7ff8a2140333718bb329f5c40fc5f0865b84c426183ce14c97d2ab8154f" dependencies = [ "form_urlencoded", - "indexmap 2.9.0", + "indexmap 2.12.1", "itoa", "ryu", - "serde", + "serde_core", ] [[package]] name = "serde_json" -version = "1.0.140" +version = "1.0.145" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "20068b6e96dc6c9bd23e01df8827e6c7e1f2fddd43c21810382803c136b99373" +checksum = "402a6f66d8c709116cf22f558eab210f5a50187f702eb4d7e5ef38d9a7f1c79c" dependencies = [ - "indexmap 2.9.0", + "indexmap 2.12.1", "itoa", "memchr", "ryu", "serde", + "serde_core", ] [[package]] name = "serde_path_to_error" -version = "0.1.17" +version = "0.1.20" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "59fab13f937fa393d08645bf3a84bdfe86e296747b506ada67bb15f10f218b2a" +checksum = "10a9ff822e371bb5403e391ecd83e182e0e77ba7f6fe0160b795797109d1b457" dependencies = [ "itoa", "serde", + "serde_core", ] [[package]] @@ -3894,7 +4242,7 @@ checksum = "175ee3e80ae9982737ca543e96133087cbd9a485eecc3bc4de9c1a37b47ea59c" dependencies = [ "proc-macro2", "quote", - "syn 2.0.102", + "syn 2.0.111", ] [[package]] @@ -3906,6 +4254,15 @@ dependencies = [ "serde", ] +[[package]] +name = "serde_spanned" +version = "1.0.3" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "e24345aa0fe688594e73770a5f6d1b216508b4f93484c0026d521acd30134392" +dependencies = [ + "serde_core", +] + [[package]] name = "serde_urlencoded" version = "0.7.1" @@ -3920,17 +4277,18 @@ dependencies = [ [[package]] name = "serde_with" -version = "3.12.0" +version = "3.16.1" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "d6b6f7f2fcb69f747921f79f3926bd1e203fce4fef62c268dd3abfb6d86029aa" +checksum = "4fa237f2807440d238e0364a218270b98f767a00d3dada77b1c53ae88940e2e7" dependencies = [ "base64 0.22.1", "chrono", "hex", "indexmap 1.9.3", - "indexmap 2.9.0", - "serde", - "serde_derive", + "indexmap 2.12.1", + "schemars 0.9.0", + "schemars 1.1.0", + "serde_core", "serde_json", "serde_with_macros", "time", @@ -3938,14 +4296,14 @@ dependencies = [ [[package]] name = "serde_with_macros" -version = "3.12.0" +version = "3.16.1" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "8d00caa5193a3c8362ac2b73be6b9e768aa5a4b2f721d8f4b339600c3cb51f8e" +checksum = "52a8e3ca0ca629121f70ab50f95249e5a6f925cc0f6ffe8256c45b728875706c" dependencies = [ - "darling", + "darling 0.21.3", "proc-macro2", "quote", - "syn 2.0.102", + "syn 2.0.111", ] [[package]] @@ -3987,13 +4345,19 @@ checksum = "0fda2ff0d084019ba4d7c6f371c95d8fd75ce3524c3cb8fb653a3023f6323e64" [[package]] name = "signal-hook-registry" -version = "1.4.5" +version = "1.4.7" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "9203b8055f63a2a00e2f593bb0510367fe707d7ff1e5c872de2f537b339e5410" +checksum = "7664a098b8e616bdfcc2dc0e9ac44eb231eedf41db4e9fe95d8d32ec728dedad" dependencies = [ "libc", ] +[[package]] +name = "simd-adler32" +version = "0.3.7" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "d66dc143e6b11c1eddc06d5c423cfc97062865baf299914ab64caa38182078fe" + [[package]] name = "simdutf8" version = "0.1.5" @@ -4008,12 +4372,9 @@ checksum = "56199f7ddabf13fe5074ce809e7d3f42b42ae711800501b5b16ea82ad029c39d" [[package]] name = "slab" -version = "0.4.9" +version = "0.4.11" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "8f92a496fb766b417c996b9c5e57daf2f7ad3b0bebe1ccfca4856390e3d3bb67" -dependencies = [ - "autocfg", -] +checksum = "7a2ae44ef20feb57a68b23d846850f861394c2e02dc425a50098ae8c90267589" [[package]] name = "smallvec" @@ -4031,11 +4392,21 @@ dependencies = [ "windows-sys 0.52.0", ] +[[package]] +name = "socket2" +version = "0.6.1" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "17129e116933cf371d018bb80ae557e889637989d8638274fb25622827b03881" +dependencies = [ + "libc", + "windows-sys 0.60.2", +] + [[package]] name = "stable_deref_trait" -version = "1.2.0" +version = "1.2.1" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "a8f112729512f8e442d81f95a8a7ddf2b7c6b8a1a6f509a95864142b30cab2d3" +checksum = "6ce2be8dc25455e1f91df71bfa12ad37d7af1092ae736f3a6cd0e37bc7810596" [[package]] name = "static_assertions" @@ -4058,7 +4429,7 @@ dependencies = [ "proc-macro2", "quote", "structmeta-derive", - "syn 2.0.102", + "syn 2.0.111", ] [[package]] @@ -4069,7 +4440,7 @@ checksum = "152a0b65a590ff6c3da95cabe2353ee04e6167c896b28e3b14478c2636c922fc" dependencies = [ "proc-macro2", "quote", - "syn 2.0.102", + "syn 2.0.111", ] [[package]] @@ -4122,9 +4493,9 @@ dependencies = [ [[package]] name = "syn" -version = "2.0.102" +version = "2.0.111" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "f6397daf94fa90f058bd0fd88429dd9e5738999cca8d701813c80723add80462" +checksum = "390cc9a294ab71bdb1aa2e99d13be9c753cd2d7bd6560c77118597410c4d2e87" dependencies = [ "proc-macro2", "quote", @@ -4148,7 +4519,7 @@ checksum = "728a70f3dbaf5bab7f0c4b1ac8d7ae5ea60a4b5549c8a5914361c99147a709d2" dependencies = [ "proc-macro2", "quote", - "syn 2.0.102", + "syn 2.0.111", ] [[package]] @@ -4157,7 +4528,7 @@ version = "0.6.1" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "3c879d448e9d986b661742763247d3693ed13609438cf3d006f51f5368a5ba6b" dependencies = [ - "bitflags 2.9.1", + "bitflags", "core-foundation 0.9.4", "system-configuration-sys", ] @@ -4197,15 +4568,15 @@ dependencies = [ [[package]] name = "tempfile" -version = "3.20.0" +version = "3.23.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "e8a64e3985349f2441a1a9ef0b853f869006c3855f2cda6862a94d26ebb9d6a1" +checksum = "2d31c77bdf42a745371d260a26ca7163f1e0924b64afa0b688e61b5a9fa02f16" dependencies = [ "fastrand", - "getrandom 0.3.3", + "getrandom 0.3.4", "once_cell", "rustix", - "windows-sys 0.59.0", + "windows-sys 0.61.2", ] [[package]] @@ -4219,12 +4590,12 @@ dependencies = [ [[package]] name = "terminal_size" -version = "0.4.2" +version = "0.4.3" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "45c6481c4829e4cc63825e62c49186a34538b7b2750b73b266581ffb612fb5ed" +checksum = "60b8cb979cb11c32ce1603f8137b22262a9d131aaa5c37b5678025f22b8becd0" dependencies = [ "rustix", - "windows-sys 0.59.0", + "windows-sys 0.60.2", ] [[package]] @@ -4235,18 +4606,20 @@ checksum = "8f50febec83f5ee1df3015341d8bd429f2d1cc62bcba7ea2076759d315084683" [[package]] name = "testcontainers" -version = "0.24.0" +version = "0.26.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "23bb7577dca13ad86a78e8271ef5d322f37229ec83b8d98da6d996c588a1ddb1" +checksum = "a347cac4368ba4f1871743adb27dc14829024d26b1763572404726b0b9943eb8" dependencies = [ + "astral-tokio-tar", "async-trait", "bollard", - "bollard-stubs", "bytes", "docker_credential", "either", "etcetera", + "ferroid", "futures", + "itertools 0.14.0", "log", "memchr", "parse-display", @@ -4254,10 +4627,9 @@ dependencies = [ "serde", "serde_json", "serde_with", - "thiserror 2.0.12", + "thiserror 2.0.17", "tokio", "tokio-stream", - "tokio-tar", "tokio-util", "url", ] @@ -4269,7 +4641,7 @@ source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "c13547615a44dc9c452a8a534638acdf07120d4b6847c8178705da06306a3057" dependencies = [ "unicode-linebreak", - "unicode-width 0.2.1", + "unicode-width 0.2.2", ] [[package]] @@ -4283,11 +4655,11 @@ dependencies = [ [[package]] name = "thiserror" -version = "2.0.12" +version = "2.0.17" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "567b8a2dae586314f7be2a752ec7474332959c6460e02bde30d702a66d488708" +checksum = "f63587ca0f12b72a0600bcba1d40081f830876000bb46dd2337a3051618f4fc8" dependencies = [ - "thiserror-impl 2.0.12", + "thiserror-impl 2.0.17", ] [[package]] @@ -4298,35 +4670,34 @@ checksum = "4fee6c4efc90059e10f81e6d42c60a18f76588c3d74cb83a0b242a2b6c7504c1" dependencies = [ "proc-macro2", "quote", - "syn 2.0.102", + "syn 2.0.111", ] [[package]] name = "thiserror-impl" -version = "2.0.12" +version = "2.0.17" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "7f7cf42b4507d8ea322120659672cf1b9dbb93f8f2d4ecfd6e51350ff5b17a1d" +checksum = "3ff15c8ecd7de3849db632e14d18d2571fa09dfc5ed93479bc4485c7a517c913" dependencies = [ "proc-macro2", "quote", - "syn 2.0.102", + "syn 2.0.111", ] [[package]] name = "thread_local" -version = "1.1.8" +version = "1.1.9" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "8b9ef9bad013ada3808854ceac7b46812a6465ba368859a37e2100283d2d719c" +checksum = "f60246a4944f24f6e018aa17cdeffb7818b76356965d03b07d6a9886e8962185" dependencies = [ "cfg-if", - "once_cell", ] [[package]] name = "time" -version = "0.3.41" +version = "0.3.44" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "8a7619e19bc266e0f9c5e6686659d394bc57973859340060a69221e57dbc0c40" +checksum = "91e7d9e3bb61134e77bde20dd4825b97c010155709965fedf0f49bb138e52a9d" dependencies = [ "deranged", "itoa", @@ -4339,15 +4710,15 @@ dependencies = [ [[package]] name = "time-core" -version = "0.1.4" +version = "0.1.6" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "c9e9a38711f559d9e3ce1cdb06dd7c5b8ea546bc90052da6d06bb76da74bb07c" +checksum = "40868e7c1d2f0b8d73e4a8c7f0ff63af4f6d19be117e90bd73eb1d62cf831c6b" [[package]] name = "time-macros" -version = "0.2.22" +version = "0.2.24" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "3526739392ec93fd8b359c8e98514cb3e8e021beb4e5f597b00a0221f8ed8a49" +checksum = "30cfb0125f12d9c277f35663a0a33f8c30190f4e4574868a330595412d34ebf3" dependencies = [ "num-conv", "time-core", @@ -4355,9 +4726,9 @@ dependencies = [ [[package]] name = "tinystr" -version = "0.8.1" +version = "0.8.2" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "5d4f6d1145dcb577acf783d4e601bc1d76a13337bb54e6233add580b07344c8b" +checksum = "42d3e9c45c09de15d06dd8acf5f4e0e399e85927b7f00711024eb7ae10fa4869" dependencies = [ "displaydoc", "zerovec", @@ -4375,9 +4746,9 @@ dependencies = [ [[package]] name = "tinyvec" -version = "1.9.0" +version = "1.10.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "09b3661f17e86524eccd4371ab0429194e0d7c008abb45f7a7495b1719463c71" +checksum = "bfa5fdc3bce6191a1dbc8c02d5c8bffcf557bafa17c124c5264a458f1b0613fa" dependencies = [ "tinyvec_macros", ] @@ -4390,30 +4761,29 @@ checksum = "1f3ccbac311fea05f86f61904b462b55fb3df8837a366dfc601a0161d0532f20" [[package]] name = "tokio" -version = "1.45.1" +version = "1.48.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "75ef51a33ef1da925cea3e4eb122833cb377c61439ca401b770f54902b806779" +checksum = "ff360e02eab121e0bc37a2d3b4d4dc622e6eda3a8e5253d5435ecf5bd4c68408" dependencies = [ - "backtrace", "bytes", "libc", "mio", "pin-project-lite", "signal-hook-registry", - "socket2", + "socket2 0.6.1", "tokio-macros", - "windows-sys 0.52.0", + "windows-sys 0.61.2", ] [[package]] name = "tokio-macros" -version = "2.5.0" +version = "2.6.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "6e06d43f1345a3bcd39f6a56dbb7dcab2ba47e68e8ac134855e7e2bdbaf8cab8" +checksum = "af407857209536a95c8e56f8231ef2c2e2aff839b22e07a1ffcbc617e9db9fa5" dependencies = [ "proc-macro2", "quote", - "syn 2.0.102", + "syn 2.0.111", ] [[package]] @@ -4428,9 +4798,9 @@ dependencies = [ [[package]] name = "tokio-rustls" -version = "0.26.2" +version = "0.26.4" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "8e727b36a1a0e8b74c376ac2211e40c2c8af09fb4013c60d910495810f008e9b" +checksum = "1729aa945f29d91ba541258c8df89027d5792d85a8841fb65e8bf0f4ede4ef61" dependencies = [ "rustls", "tokio", @@ -4447,26 +4817,11 @@ dependencies = [ "tokio", ] -[[package]] -name = "tokio-tar" -version = "0.3.1" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "9d5714c010ca3e5c27114c1cdeb9d14641ace49874aa5626d7149e47aedace75" -dependencies = [ - "filetime", - "futures-core", - "libc", - "redox_syscall 0.3.5", - "tokio", - "tokio-stream", - "xattr", -] - [[package]] name = "tokio-util" -version = "0.7.15" +version = "0.7.17" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "66a539a9ad6d5d281510d5bd368c973d636c02dbf8a67300bfb6b950696ad7df" +checksum = "2efa149fe76073d6e8fd97ef4f4eca7b67f599660115591483572e406e165594" dependencies = [ "bytes", "futures-core", @@ -4482,9 +4837,24 @@ source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "dc1beb996b9d83529a9e75c17a1686767d148d70663143c7854d8b4a09ced362" dependencies = [ "serde", - "serde_spanned", - "toml_datetime", - "toml_edit", + "serde_spanned 0.6.9", + "toml_datetime 0.6.11", + "toml_edit 0.22.27", +] + +[[package]] +name = "toml" +version = "0.9.8" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "f0dc8b1fb61449e27716ec0e1bdf0f6b8f3e8f6b05391e8497b8b6d7804ea6d8" +dependencies = [ + "indexmap 2.12.1", + "serde_core", + "serde_spanned 1.0.3", + "toml_datetime 0.7.3", + "toml_parser", + "toml_writer", + "winnow", ] [[package]] @@ -4496,26 +4866,102 @@ dependencies = [ "serde", ] +[[package]] +name = "toml_datetime" +version = "0.7.3" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "f2cdb639ebbc97961c51720f858597f7f24c4fc295327923af55b74c3c724533" +dependencies = [ + "serde_core", +] + [[package]] name = "toml_edit" version = "0.22.27" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "41fe8c660ae4257887cf66394862d21dbca4a6ddd26f04a3560410406a2f819a" dependencies = [ - "indexmap 2.9.0", + "indexmap 2.12.1", "serde", - "serde_spanned", - "toml_datetime", + "serde_spanned 0.6.9", + "toml_datetime 0.6.11", "toml_write", "winnow", ] +[[package]] +name = "toml_edit" +version = "0.23.7" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "6485ef6d0d9b5d0ec17244ff7eb05310113c3f316f2d14200d4de56b3cb98f8d" +dependencies = [ + "indexmap 2.12.1", + "toml_datetime 0.7.3", + "toml_parser", + "winnow", +] + +[[package]] +name = "toml_parser" +version = "1.0.4" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "c0cbe268d35bdb4bb5a56a2de88d0ad0eb70af5384a99d648cd4b3d04039800e" +dependencies = [ + "winnow", +] + [[package]] name = "toml_write" version = "0.1.2" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "5d99f8c9a7727884afe522e9bd5edbfc91a3312b36a77b5fb8926e4c31a41801" +[[package]] +name = "toml_writer" +version = "1.0.4" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "df8b2b54733674ad286d16267dcfc7a71ed5c776e4ac7aa3c3e2561f7c637bf2" + +[[package]] +name = "tonic" +version = "0.14.2" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "eb7613188ce9f7df5bfe185db26c5814347d110db17920415cf2fbcad85e7203" +dependencies = [ + "async-trait", + "axum", + "base64 0.22.1", + "bytes", + "h2", + "http", + "http-body", + "http-body-util", + "hyper", + "hyper-timeout", + "hyper-util", + "percent-encoding", + "pin-project", + "socket2 0.6.1", + "sync_wrapper", + "tokio", + "tokio-stream", + "tower", + "tower-layer", + "tower-service", + "tracing", +] + +[[package]] +name = "tonic-prost" +version = "0.14.2" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "66bd50ad6ce1252d87ef024b3d64fe4c3cf54a86fb9ef4c631fdd0ded7aeaa67" +dependencies = [ + "bytes", + "prost", + "tonic", +] + [[package]] name = "torrust-axum-health-check-api-server" version = "3.0.0-develop" @@ -4561,7 +5007,7 @@ dependencies = [ "hyper", "local-ip-address", "percent-encoding", - "rand 0.9.1", + "rand 0.9.2", "reqwest", "serde", "serde_bencode", @@ -4605,7 +5051,7 @@ dependencies = [ "serde", "serde_json", "serde_with", - "thiserror 2.0.12", + "thiserror 2.0.17", "tokio", "torrust-axum-server", "torrust-rest-tracker-api-client", @@ -4636,7 +5082,7 @@ dependencies = [ "hyper", "hyper-util", "pin-project-lite", - "thiserror 2.0.12", + "thiserror 2.0.17", "tokio", "torrust-server-lib", "torrust-tracker-configuration", @@ -4652,7 +5098,7 @@ dependencies = [ "hyper", "reqwest", "serde", - "thiserror 2.0.12", + "thiserror 2.0.17", "url", "uuid", ] @@ -4680,7 +5126,7 @@ name = "torrust-server-lib" version = "3.0.0-develop" dependencies = [ "derive_more", - "rstest", + "rstest 0.25.0", "tokio", "torrust-tracker-primitives", "tower-http", @@ -4702,12 +5148,12 @@ dependencies = [ "clap", "local-ip-address", "mockall", - "rand 0.9.1", + "rand 0.9.2", "regex", "reqwest", "serde", "serde_json", - "thiserror 2.0.12", + "thiserror 2.0.17", "tokio", "tokio-util", "torrust-axum-health-check-api-server", @@ -4743,7 +5189,7 @@ dependencies = [ "serde_bencode", "serde_bytes", "serde_json", - "thiserror 2.0.12", + "thiserror 2.0.17", "tokio", "torrust-tracker-configuration", "tracing", @@ -4771,8 +5217,8 @@ dependencies = [ "serde", "serde_json", "serde_with", - "thiserror 2.0.12", - "toml", + "thiserror 2.0.17", + "toml 0.9.8", "torrust-tracker-located-error", "tracing", "tracing-subscriber", @@ -4784,8 +5230,8 @@ dependencies = [ name = "torrust-tracker-contrib-bencode" version = "3.0.0-develop" dependencies = [ - "criterion 0.6.0", - "thiserror 2.0.12", + "criterion 0.8.0", + "thiserror 2.0.17", ] [[package]] @@ -4801,7 +5247,7 @@ dependencies = [ name = "torrust-tracker-located-error" version = "3.0.0-develop" dependencies = [ - "thiserror 2.0.12", + "thiserror 2.0.17", "tracing", ] @@ -4814,10 +5260,10 @@ dependencies = [ "derive_more", "formatjson", "pretty_assertions", - "rstest", + "rstest 0.25.0", "serde", "serde_json", - "thiserror 2.0.12", + "thiserror 2.0.17", "torrust-tracker-primitives", "tracing", ] @@ -4830,11 +5276,11 @@ dependencies = [ "binascii", "bittorrent-primitives", "derive_more", - "rstest", + "rstest 0.25.0", "serde", "tdyne-peer-id", "tdyne-peer-id-registry", - "thiserror 2.0.12", + "thiserror 2.0.17", "torrust-tracker-configuration", "url", "zerocopy 0.7.35", @@ -4848,14 +5294,14 @@ dependencies = [ "async-std", "bittorrent-primitives", "chrono", - "criterion 0.6.0", + "criterion 0.8.0", "crossbeam-skiplist", "futures", "mockall", - "rand 0.9.1", - "rstest", + "rand 0.9.2", + "rstest 0.26.1", "serde", - "thiserror 2.0.12", + "thiserror 2.0.17", "tokio", "tokio-util", "torrust-tracker-clock", @@ -4871,7 +5317,7 @@ dependencies = [ name = "torrust-tracker-test-helpers" version = "3.0.0-develop" dependencies = [ - "rand 0.9.1", + "rand 0.9.2", "torrust-tracker-configuration", "tracing", "tracing-subscriber", @@ -4884,12 +5330,12 @@ dependencies = [ "aquatic_udp_protocol", "async-std", "bittorrent-primitives", - "criterion 0.6.0", + "criterion 0.8.0", "crossbeam-skiplist", "dashmap", "futures", "parking_lot", - "rstest", + "rstest 0.26.1", "tokio", "torrust-tracker-clock", "torrust-tracker-configuration", @@ -4911,10 +5357,10 @@ dependencies = [ "futures-util", "local-ip-address", "mockall", - "rand 0.9.1", + "rand 0.9.2", "ringbuf", "serde", - "thiserror 2.0.12", + "thiserror 2.0.17", "tokio", "tokio-util", "torrust-server-lib", @@ -4939,9 +5385,12 @@ checksum = "d039ad9159c98b70ecfd540b2573b97f7f52c3e8d9f8ad57a24b916a536975f9" dependencies = [ "futures-core", "futures-util", + "indexmap 2.12.1", "pin-project-lite", + "slab", "sync_wrapper", "tokio", + "tokio-util", "tower-layer", "tower-service", "tracing", @@ -4949,12 +5398,12 @@ dependencies = [ [[package]] name = "tower-http" -version = "0.6.6" +version = "0.6.7" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "adc82fd73de2a9722ac5da747f12383d2bfdb93591ee6c58486e0097890f05f2" +checksum = "9cf146f99d442e8e68e585f5d798ccd3cad9a7835b917e09728880a862706456" dependencies = [ "async-compression", - "bitflags 2.9.1", + "bitflags", "bytes", "futures-core", "futures-util", @@ -4985,9 +5434,9 @@ checksum = "8df9b6e13f2d32c91b9bd719c00d1958837bc7dec474d94952798cc8e69eeec3" [[package]] name = "tracing" -version = "0.1.41" +version = "0.1.43" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "784e0ac535deb450455cbfa28a6f0df145ea1bb7ae51b821cf5e7927fdcfbdd0" +checksum = "2d15d90a0b5c19378952d479dc858407149d7bb45a14de0142f6c534b16fc647" dependencies = [ "log", "pin-project-lite", @@ -4997,20 +5446,20 @@ dependencies = [ [[package]] name = "tracing-attributes" -version = "0.1.29" +version = "0.1.31" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "1b1ffbcf9c6f6b99d386e7444eb608ba646ae452a36b39737deb9663b610f662" +checksum = "7490cfa5ec963746568740651ac6781f701c9c5ea257c58e057f3ba8cf69e8da" dependencies = [ "proc-macro2", "quote", - "syn 2.0.102", + "syn 2.0.111", ] [[package]] name = "tracing-core" -version = "0.1.34" +version = "0.1.35" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "b9d12581f227e93f094d3af2ae690a574abb8a2b9b7a96e7cfe9647b2b617678" +checksum = "7a04e24fab5c89c6a36eb8558c9656f30d81de51dfa4d3b45f26b21d61fa0a6c" dependencies = [ "once_cell", "valuable", @@ -5039,9 +5488,9 @@ dependencies = [ [[package]] name = "tracing-subscriber" -version = "0.3.19" +version = "0.3.22" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "e8189decb5ac0fa7bc8b96b7cb9b2701d60d48805aca84a238004d665fcc4008" +checksum = "2f30143827ddab0d256fd843b7a66d164e9f271cfa0dde49142c5ca0ca291f1e" dependencies = [ "nu-ansi-term", "serde", @@ -5073,9 +5522,9 @@ dependencies = [ [[package]] name = "typenum" -version = "1.18.0" +version = "1.19.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "1dccffe3ce07af9386bfd29e80c0ab1a8205a2fc34e4bcd40364df902cfa8f3f" +checksum = "562d481066bde0658276a35467c4af00bdc6ee726305698a55b86e61d7ad82bb" [[package]] name = "uncased" @@ -5088,9 +5537,9 @@ dependencies = [ [[package]] name = "unicode-ident" -version = "1.0.18" +version = "1.0.22" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "5a5f39404a5da50712a4c1eecf25e90dd62b613502b7e925fd4e4d19b5c96512" +checksum = "9312f7c4f6ff9069b165498234ce8be658059c6728633667c526e27dc2cf1df5" [[package]] name = "unicode-linebreak" @@ -5106,9 +5555,9 @@ checksum = "7dd6e30e90baa6f72411720665d41d89b9a3d039dc45b8faea1ddd07f617f6af" [[package]] name = "unicode-width" -version = "0.2.1" +version = "0.2.2" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "4a1a07cc7db3810833284e8d372ccdc6da29741639ecc70c9ec107df0fa6154c" +checksum = "b4ac048d71ede7ee76d585517add45da530660ef4390e49b098733c6e897f254" [[package]] name = "unicode-xid" @@ -5122,11 +5571,39 @@ version = "0.9.0" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "8ecb6da28b8a351d773b68d5825ac39017e680750f980f3a1a85cd8dd28a47c1" +[[package]] +name = "ureq" +version = "3.1.4" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "d39cb1dbab692d82a977c0392ffac19e188bd9186a9f32806f0aaa859d75585a" +dependencies = [ + "base64 0.22.1", + "log", + "percent-encoding", + "rustls", + "rustls-pki-types", + "ureq-proto", + "utf-8", + "webpki-roots", +] + +[[package]] +name = "ureq-proto" +version = "0.5.3" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "d81f9efa9df032be5934a46a068815a10a042b494b6a58cb0a1a97bb5467ed6f" +dependencies = [ + "base64 0.22.1", + "http", + "httparse", + "log", +] + [[package]] name = "url" -version = "2.5.4" +version = "2.5.7" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "32f8b686cadd1473f4bd0117a5d28d36b1ade384ea9b5069a1c40aefed7fda60" +checksum = "08bc136a29a3d1758e07a9cca267be308aeebf5cfd5a10f3f67ab2097683ef5b" dependencies = [ "form_urlencoded", "idna", @@ -5134,6 +5611,12 @@ dependencies = [ "serde", ] +[[package]] +name = "utf-8" +version = "0.7.6" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "09cc8ee72d2a9becf2f2febe0205bbed8fc6615b7cb429ad062dc7b7ddd036a9" + [[package]] name = "utf8_iter" version = "1.0.4" @@ -5148,13 +5631,13 @@ checksum = "06abde3611657adf66d383f00b093d7faecc7fa57071cce2578660c9f1010821" [[package]] name = "uuid" -version = "1.17.0" +version = "1.18.1" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "3cf4199d1e5d15ddd86a694e4d0dffa9c323ce759fea589f00fef9d81cc1931d" +checksum = "2f87b8aa10b915a06587d0dec516c282ff295b475d94abf425d62b57710070a2" dependencies = [ - "getrandom 0.3.3", + "getrandom 0.3.4", "js-sys", - "rand 0.9.1", + "rand 0.9.2", "wasm-bindgen", ] @@ -5166,9 +5649,9 @@ checksum = "ba73ea9cf16a25df0c8caa16c51acb937d5712a8429db78a3ee29d5dcacd3a65" [[package]] name = "value-bag" -version = "1.11.1" +version = "1.12.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "943ce29a8a743eb10d6082545d861b24f9d1b160b7d741e0f2cdf726bec909c5" +checksum = "7ba6f5989077681266825251a52748b8c1d8a4ad098cc37e440103d0ea717fc0" [[package]] name = "vcpkg" @@ -5208,45 +5691,32 @@ source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "ccf3ec651a847eb01de73ccad15eb7d99f80485de043efb2f370cd654f4ea44b" [[package]] -name = "wasi" -version = "0.14.2+wasi-0.2.4" +name = "wasip2" +version = "1.0.1+wasi-0.2.4" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "9683f9a5a998d873c0d21fcbe3c083009670149a8fab228644b8bd36b2c48cb3" +checksum = "0562428422c63773dad2c345a1882263bbf4d65cf3f42e90921f787ef5ad58e7" dependencies = [ - "wit-bindgen-rt", + "wit-bindgen", ] [[package]] name = "wasm-bindgen" -version = "0.2.100" +version = "0.2.106" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "1edc8929d7499fc4e8f0be2262a241556cfc54a0bea223790e71446f2aab1ef5" +checksum = "0d759f433fa64a2d763d1340820e46e111a7a5ab75f993d1852d70b03dbb80fd" dependencies = [ "cfg-if", "once_cell", "rustversion", "wasm-bindgen-macro", -] - -[[package]] -name = "wasm-bindgen-backend" -version = "0.2.100" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "2f0a0651a5c2bc21487bde11ee802ccaf4c51935d0d3d42a6101f98161700bc6" -dependencies = [ - "bumpalo", - "log", - "proc-macro2", - "quote", - "syn 2.0.102", "wasm-bindgen-shared", ] [[package]] name = "wasm-bindgen-futures" -version = "0.4.50" +version = "0.4.56" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "555d470ec0bc3bb57890405e5d4322cc9ea83cebb085523ced7be4144dac1e61" +checksum = "836d9622d604feee9e5de25ac10e3ea5f2d65b41eac0d9ce72eb5deae707ce7c" dependencies = [ "cfg-if", "js-sys", @@ -5257,9 +5727,9 @@ dependencies = [ [[package]] name = "wasm-bindgen-macro" -version = "0.2.100" +version = "0.2.106" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "7fe63fc6d09ed3792bd0897b314f53de8e16568c2b3f7982f468c0bf9bd0b407" +checksum = "48cb0d2638f8baedbc542ed444afc0644a29166f1595371af4fecf8ce1e7eeb3" dependencies = [ "quote", "wasm-bindgen-macro-support", @@ -5267,36 +5737,55 @@ dependencies = [ [[package]] name = "wasm-bindgen-macro-support" -version = "0.2.100" +version = "0.2.106" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "8ae87ea40c9f689fc23f209965b6fb8a99ad69aeeb0231408be24920604395de" +checksum = "cefb59d5cd5f92d9dcf80e4683949f15ca4b511f4ac0a6e14d4e1ac60c6ecd40" dependencies = [ + "bumpalo", "proc-macro2", "quote", - "syn 2.0.102", - "wasm-bindgen-backend", + "syn 2.0.111", "wasm-bindgen-shared", ] [[package]] name = "wasm-bindgen-shared" -version = "0.2.100" +version = "0.2.106" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "1a05d73b933a847d6cccdda8f838a22ff101ad9bf93e33684f39c1f5f0eece3d" +checksum = "cbc538057e648b67f72a982e708d485b2efa771e1ac05fec311f9f63e5800db4" dependencies = [ "unicode-ident", ] [[package]] name = "web-sys" -version = "0.3.77" +version = "0.3.83" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "9b32828d774c412041098d182a8b38b16ea816958e07cf40eec2bc080ae137ac" +dependencies = [ + "js-sys", + "wasm-bindgen", +] + +[[package]] +name = "web-time" +version = "1.1.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "33b6dd2ef9186f1f2072e409e99cd22a975331a6b3591b12c764e0e55c60d5d2" +checksum = "5a6580f308b1fad9207618087a65c04e7a10bc77e02c8e84e9b00dd4b12fa0bb" dependencies = [ "js-sys", "wasm-bindgen", ] +[[package]] +name = "webpki-roots" +version = "1.0.4" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "b2878ef029c47c6e8cf779119f20fcf52bde7ad42a731b2a304bc221df17571e" +dependencies = [ + "rustls-pki-types", +] + [[package]] name = "winapi" version = "0.3.9" @@ -5315,11 +5804,11 @@ checksum = "ac3b87c63620426dd9b991e5ce0329eff545bccbbb34f3be09ff6fb6ab51b7b6" [[package]] name = "winapi-util" -version = "0.1.9" +version = "0.1.11" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "cf221c93e13a30d793f7645a0e7762c55d169dbb0a49671918a2319d289b10bb" +checksum = "c2a7b1c03c876122aa43f3020e6c3c3ee5c05081c9a00739faf7503aeba10d22" dependencies = [ - "windows-sys 0.59.0", + "windows-sys 0.61.2", ] [[package]] @@ -5330,9 +5819,9 @@ checksum = "712e227841d057c1ee1cd2fb22fa7e5a5461ae8e48fa2ca79ec42cfc1931183f" [[package]] name = "windows-core" -version = "0.61.2" +version = "0.62.2" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "c0fdd3ddb90610c7638aa2b3a3ab2904fb9e5cdbecc643ddb3647212781c4ae3" +checksum = "b8e83a14d34d0623b51dce9581199302a221863196a1dde71a7663a4c2be9deb" dependencies = [ "windows-implement", "windows-interface", @@ -5343,37 +5832,37 @@ dependencies = [ [[package]] name = "windows-implement" -version = "0.60.0" +version = "0.60.2" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "a47fddd13af08290e67f4acabf4b459f647552718f683a7b415d290ac744a836" +checksum = "053e2e040ab57b9dc951b72c264860db7eb3b0200ba345b4e4c3b14f67855ddf" dependencies = [ "proc-macro2", "quote", - "syn 2.0.102", + "syn 2.0.111", ] [[package]] name = "windows-interface" -version = "0.59.1" +version = "0.59.3" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "bd9211b69f8dcdfa817bfd14bf1c97c9188afa36f4750130fcdf3f400eca9fa8" +checksum = "3f316c4a2570ba26bbec722032c4099d8c8bc095efccdc15688708623367e358" dependencies = [ "proc-macro2", "quote", - "syn 2.0.102", + "syn 2.0.111", ] [[package]] name = "windows-link" -version = "0.1.1" +version = "0.2.1" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "76840935b766e1b0a05c0066835fb9ec80071d4c09a16f6bd5f7e655e3c14c38" +checksum = "f0805222e57f7521d6a62e36fa9163bc891acd422f971defe97d64e70d0a4fe5" [[package]] name = "windows-registry" -version = "0.5.2" +version = "0.6.1" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "b3bab093bdd303a1240bb99b8aba8ea8a69ee19d34c9e2ef9594e708a4878820" +checksum = "02752bf7fbdcce7f2a27a742f798510f3e5ad88dbe84871e5168e2120c3d5720" dependencies = [ "windows-link", "windows-result", @@ -5382,18 +5871,18 @@ dependencies = [ [[package]] name = "windows-result" -version = "0.3.4" +version = "0.4.1" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "56f42bd332cc6c8eac5af113fc0c1fd6a8fd2aa08a0119358686e5160d0586c6" +checksum = "7781fa89eaf60850ac3d2da7af8e5242a5ea78d1a11c49bf2910bb5a73853eb5" dependencies = [ "windows-link", ] [[package]] name = "windows-strings" -version = "0.4.2" +version = "0.5.1" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "56e6c93f3a0c3b36176cb1327a4958a0353d5d166c2a35cb268ace15e91d3b57" +checksum = "7837d08f69c77cf6b07689544538e017c1bfcf57e34b4c0ff58e6c2cd3b37091" dependencies = [ "windows-link", ] @@ -5416,6 +5905,24 @@ dependencies = [ "windows-targets 0.52.6", ] +[[package]] +name = "windows-sys" +version = "0.60.2" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "f2f500e4d28234f72040990ec9d39e3a6b950f9f22d3dba18416c35882612bcb" +dependencies = [ + "windows-targets 0.53.5", +] + +[[package]] +name = "windows-sys" +version = "0.61.2" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "ae137229bcbd6cdf0f7b80a31df61766145077ddf49416a728b02cb3921ff3fc" +dependencies = [ + "windows-link", +] + [[package]] name = "windows-targets" version = "0.52.6" @@ -5434,18 +5941,19 @@ dependencies = [ [[package]] name = "windows-targets" -version = "0.53.0" +version = "0.53.5" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "b1e4c7e8ceaaf9cb7d7507c974735728ab453b67ef8f18febdd7c11fe59dca8b" +checksum = "4945f9f551b88e0d65f3db0bc25c33b8acea4d9e41163edf90dcd0b19f9069f3" dependencies = [ - "windows_aarch64_gnullvm 0.53.0", - "windows_aarch64_msvc 0.53.0", - "windows_i686_gnu 0.53.0", - "windows_i686_gnullvm 0.53.0", - "windows_i686_msvc 0.53.0", - "windows_x86_64_gnu 0.53.0", - "windows_x86_64_gnullvm 0.53.0", - "windows_x86_64_msvc 0.53.0", + "windows-link", + "windows_aarch64_gnullvm 0.53.1", + "windows_aarch64_msvc 0.53.1", + "windows_i686_gnu 0.53.1", + "windows_i686_gnullvm 0.53.1", + "windows_i686_msvc 0.53.1", + "windows_x86_64_gnu 0.53.1", + "windows_x86_64_gnullvm 0.53.1", + "windows_x86_64_msvc 0.53.1", ] [[package]] @@ -5456,9 +5964,9 @@ checksum = "32a4622180e7a0ec044bb555404c800bc9fd9ec262ec147edd5989ccd0c02cd3" [[package]] name = "windows_aarch64_gnullvm" -version = "0.53.0" +version = "0.53.1" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "86b8d5f90ddd19cb4a147a5fa63ca848db3df085e25fee3cc10b39b6eebae764" +checksum = "a9d8416fa8b42f5c947f8482c43e7d89e73a173cead56d044f6a56104a6d1b53" [[package]] name = "windows_aarch64_msvc" @@ -5468,9 +5976,9 @@ checksum = "09ec2a7bb152e2252b53fa7803150007879548bc709c039df7627cabbd05d469" [[package]] name = "windows_aarch64_msvc" -version = "0.53.0" +version = "0.53.1" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "c7651a1f62a11b8cbd5e0d42526e55f2c99886c77e007179efff86c2b137e66c" +checksum = "b9d782e804c2f632e395708e99a94275910eb9100b2114651e04744e9b125006" [[package]] name = "windows_i686_gnu" @@ -5480,9 +5988,9 @@ checksum = "8e9b5ad5ab802e97eb8e295ac6720e509ee4c243f69d781394014ebfe8bbfa0b" [[package]] name = "windows_i686_gnu" -version = "0.53.0" +version = "0.53.1" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "c1dc67659d35f387f5f6c479dc4e28f1d4bb90ddd1a5d3da2e5d97b42d6272c3" +checksum = "960e6da069d81e09becb0ca57a65220ddff016ff2d6af6a223cf372a506593a3" [[package]] name = "windows_i686_gnullvm" @@ -5492,9 +6000,9 @@ checksum = "0eee52d38c090b3caa76c563b86c3a4bd71ef1a819287c19d586d7334ae8ed66" [[package]] name = "windows_i686_gnullvm" -version = "0.53.0" +version = "0.53.1" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "9ce6ccbdedbf6d6354471319e781c0dfef054c81fbc7cf83f338a4296c0cae11" +checksum = "fa7359d10048f68ab8b09fa71c3daccfb0e9b559aed648a8f95469c27057180c" [[package]] name = "windows_i686_msvc" @@ -5504,9 +6012,9 @@ checksum = "240948bc05c5e7c6dabba28bf89d89ffce3e303022809e73deaefe4f6ec56c66" [[package]] name = "windows_i686_msvc" -version = "0.53.0" +version = "0.53.1" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "581fee95406bb13382d2f65cd4a908ca7b1e4c2f1917f143ba16efe98a589b5d" +checksum = "1e7ac75179f18232fe9c285163565a57ef8d3c89254a30685b57d83a38d326c2" [[package]] name = "windows_x86_64_gnu" @@ -5516,9 +6024,9 @@ checksum = "147a5c80aabfbf0c7d901cb5895d1de30ef2907eb21fbbab29ca94c5b08b1a78" [[package]] name = "windows_x86_64_gnu" -version = "0.53.0" +version = "0.53.1" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "2e55b5ac9ea33f2fc1716d1742db15574fd6fc8dadc51caab1c16a3d3b4190ba" +checksum = "9c3842cdd74a865a8066ab39c8a7a473c0778a3f29370b5fd6b4b9aa7df4a499" [[package]] name = "windows_x86_64_gnullvm" @@ -5528,9 +6036,9 @@ checksum = "24d5b23dc417412679681396f2b49f3de8c1473deb516bd34410872eff51ed0d" [[package]] name = "windows_x86_64_gnullvm" -version = "0.53.0" +version = "0.53.1" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "0a6e035dd0599267ce1ee132e51c27dd29437f63325753051e71dd9e42406c57" +checksum = "0ffa179e2d07eee8ad8f57493436566c7cc30ac536a3379fdf008f47f6bb7ae1" [[package]] name = "windows_x86_64_msvc" @@ -5540,33 +6048,30 @@ checksum = "589f6da84c646204747d1270a2a5661ea66ed1cced2631d546fdfb155959f9ec" [[package]] name = "windows_x86_64_msvc" -version = "0.53.0" +version = "0.53.1" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "271414315aff87387382ec3d271b52d7ae78726f5d44ac98b4f4030c91880486" +checksum = "d6bbff5f0aada427a1e5a6da5f1f98158182f26556f345ac9e04d36d0ebed650" [[package]] name = "winnow" -version = "0.7.11" +version = "0.7.14" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "74c7b26e3480b707944fc872477815d29a8e429d2f93a1ce000f5fa84a15cbcd" +checksum = "5a5364e9d77fcdeeaa6062ced926ee3381faa2ee02d3eb83a5c27a8825540829" dependencies = [ "memchr", ] [[package]] -name = "wit-bindgen-rt" -version = "0.39.0" +name = "wit-bindgen" +version = "0.46.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "6f42320e61fe2cfd34354ecb597f86f413484a798ba44a8ca1165c58d42da6c1" -dependencies = [ - "bitflags 2.9.1", -] +checksum = "f17a85883d4e6d00e8a97c586de764dabcc06133f7f1d55dce5cdc070ad7fe59" [[package]] name = "writeable" -version = "0.6.1" +version = "0.6.2" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "ea2f10b9bb0928dfb1b42b65e1f9e36f7f54dbdf08457afefb38afcdec4fa2bb" +checksum = "9edde0db4769d2dc68579893f2306b26c6ecfbe0ef499b013d731b7b9247e0b9" [[package]] name = "wyz" @@ -5579,9 +6084,9 @@ dependencies = [ [[package]] name = "xattr" -version = "1.5.0" +version = "1.6.1" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "0d65cbf2f12c15564212d48f4e3dfb87923d25d611f2aed18f4cb23f0413d89e" +checksum = "32e45ad4206f6d2479085147f02bc2ef834ac85886624a23575ae137c8aa8156" dependencies = [ "libc", "rustix", @@ -5595,11 +6100,10 @@ checksum = "cfe53a6657fd280eaa890a3bc59152892ffa3e30101319d168b781ed6529b049" [[package]] name = "yoke" -version = "0.8.0" +version = "0.8.1" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "5f41bb01b8226ef4bfd589436a297c53d118f65921786300e427be8d487695cc" +checksum = "72d6e5c6afb84d73944e5cedb052c4680d5657337201555f9f2a16b7406d4954" dependencies = [ - "serde", "stable_deref_trait", "yoke-derive", "zerofrom", @@ -5607,13 +6111,13 @@ dependencies = [ [[package]] name = "yoke-derive" -version = "0.8.0" +version = "0.8.1" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "38da3c9736e16c5d3c8c597a9aaa5d1fa565d0532ae05e27c24aa62fb32c0ab6" +checksum = "b659052874eb698efe5b9e8cf382204678a0086ebf46982b79d6ca3182927e5d" dependencies = [ "proc-macro2", "quote", - "syn 2.0.102", + "syn 2.0.111", "synstructure", ] @@ -5629,11 +6133,11 @@ dependencies = [ [[package]] name = "zerocopy" -version = "0.8.25" +version = "0.8.31" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "a1702d9583232ddb9174e01bb7c15a2ab8fb1bc6f227aa1233858c351a3ba0cb" +checksum = "fd74ec98b9250adb3ca554bdde269adf631549f51d8a8f8f0a10b50f1cb298c3" dependencies = [ - "zerocopy-derive 0.8.25", + "zerocopy-derive 0.8.31", ] [[package]] @@ -5644,18 +6148,18 @@ checksum = "fa4f8080344d4671fb4e831a13ad1e68092748387dfc4f55e356242fae12ce3e" dependencies = [ "proc-macro2", "quote", - "syn 2.0.102", + "syn 2.0.111", ] [[package]] name = "zerocopy-derive" -version = "0.8.25" +version = "0.8.31" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "28a6e20d751156648aa063f3800b706ee209a32c0b4d9f24be3d980b01be55ef" +checksum = "d8a8d209fdf45cf5138cbb5a506f6b52522a25afccc534d1475dad8e31105c6a" dependencies = [ "proc-macro2", "quote", - "syn 2.0.102", + "syn 2.0.111", ] [[package]] @@ -5675,21 +6179,21 @@ checksum = "d71e5d6e06ab090c67b5e44993ec16b72dcbaabc526db883a360057678b48502" dependencies = [ "proc-macro2", "quote", - "syn 2.0.102", + "syn 2.0.111", "synstructure", ] [[package]] name = "zeroize" -version = "1.8.1" +version = "1.8.2" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "ced3678a2879b30306d323f4542626697a464a97c0a07c9aebf7ebca65cd4dde" +checksum = "b97154e67e32c85465826e8bcc1c59429aaaf107c1e4a9e53c8d8ccd5eff88d0" [[package]] name = "zerotrie" -version = "0.2.2" +version = "0.2.3" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "36f0bbd478583f79edad978b407914f61b2972f5af6fa089686016be8f9af595" +checksum = "2a59c17a5562d507e4b54960e8569ebee33bee890c70aa3fe7b97e85a9fd7851" dependencies = [ "displaydoc", "yoke", @@ -5698,9 +6202,9 @@ dependencies = [ [[package]] name = "zerovec" -version = "0.11.2" +version = "0.11.5" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "4a05eb080e015ba39cc9e23bbe5e7fb04d5fb040350f99f34e338d5fdd294428" +checksum = "6c28719294829477f525be0186d13efa9a3c602f7ec202ca9e353d310fb9a002" dependencies = [ "yoke", "zerofrom", @@ -5709,13 +6213,13 @@ dependencies = [ [[package]] name = "zerovec-derive" -version = "0.11.1" +version = "0.11.2" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "5b96237efa0c878c64bd89c436f661be4e46b2f3eff1ebb976f7ef2321d2f58f" +checksum = "eadce39539ca5cb3985590102671f2567e659fca9666581ad3411d59207951f3" dependencies = [ "proc-macro2", "quote", - "syn 2.0.102", + "syn 2.0.111", ] [[package]] @@ -5738,9 +6242,9 @@ dependencies = [ [[package]] name = "zstd-sys" -version = "2.0.15+zstd.1.5.7" +version = "2.0.16+zstd.1.5.7" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "eb81183ddd97d0c74cedf1d50d85c8d08c1b8b68ee863bdee9e706eedba1a237" +checksum = "91e19ebc2adc8f83e43039e79776e3fda8ca919132d68a1fed6a5faca2683748" dependencies = [ "cc", "pkg-config", From 00db8233574ccfc07f6de996da3fd9c3c3a19b67 Mon Sep 17 00:00:00 2001 From: Jose Celano Date: Tue, 2 Dec 2025 09:55:01 +0000 Subject: [PATCH 758/802] chore(deps): bump actions/checkout from 4 to 6 --- .github/workflows/container.yaml | 2 +- .github/workflows/coverage.yaml | 2 +- .github/workflows/deployment.yaml | 4 ++-- .github/workflows/generate_coverage_pr.yaml | 2 +- .github/workflows/labels.yaml | 2 +- .github/workflows/testing.yaml | 10 +++++----- .github/workflows/upload_coverage_pr.yaml | 2 +- 7 files changed, 12 insertions(+), 12 deletions(-) diff --git a/.github/workflows/container.yaml b/.github/workflows/container.yaml index 9f51f3124..7416df71e 100644 --- a/.github/workflows/container.yaml +++ b/.github/workflows/container.yaml @@ -46,7 +46,7 @@ jobs: - id: checkout name: Checkout Repository - uses: actions/checkout@v4 + uses: actions/checkout@v6 - id: compose name: Compose diff --git a/.github/workflows/coverage.yaml b/.github/workflows/coverage.yaml index e10c5ac66..2c8d63d6c 100644 --- a/.github/workflows/coverage.yaml +++ b/.github/workflows/coverage.yaml @@ -19,7 +19,7 @@ jobs: steps: - name: Checkout repository - uses: actions/checkout@v4 + uses: actions/checkout@v6 - name: Install LLVM tools run: sudo apt-get update && sudo apt-get install -y llvm diff --git a/.github/workflows/deployment.yaml b/.github/workflows/deployment.yaml index 4e8fd579b..b544d1da2 100644 --- a/.github/workflows/deployment.yaml +++ b/.github/workflows/deployment.yaml @@ -17,7 +17,7 @@ jobs: steps: - id: checkout name: Checkout Repository - uses: actions/checkout@v4 + uses: actions/checkout@v6 - id: setup name: Setup Toolchain @@ -42,7 +42,7 @@ jobs: steps: - id: checkout name: Checkout Repository - uses: actions/checkout@v4 + uses: actions/checkout@v6 - id: setup name: Setup Toolchain diff --git a/.github/workflows/generate_coverage_pr.yaml b/.github/workflows/generate_coverage_pr.yaml index d1b241b9d..8363376b2 100644 --- a/.github/workflows/generate_coverage_pr.yaml +++ b/.github/workflows/generate_coverage_pr.yaml @@ -19,7 +19,7 @@ jobs: steps: - name: Checkout repository - uses: actions/checkout@v4 + uses: actions/checkout@v6 - name: Install LLVM tools run: sudo apt-get update && sudo apt-get install -y llvm diff --git a/.github/workflows/labels.yaml b/.github/workflows/labels.yaml index bb8283f30..a312c335f 100644 --- a/.github/workflows/labels.yaml +++ b/.github/workflows/labels.yaml @@ -25,7 +25,7 @@ jobs: steps: - id: checkout name: Checkout Repository - uses: actions/checkout@v4 + uses: actions/checkout@v6 - id: sync name: Apply Labels from File diff --git a/.github/workflows/testing.yaml b/.github/workflows/testing.yaml index 671864fc9..c9328d890 100644 --- a/.github/workflows/testing.yaml +++ b/.github/workflows/testing.yaml @@ -15,7 +15,7 @@ jobs: steps: - id: checkout name: Checkout Repository - uses: actions/checkout@v4 + uses: actions/checkout@v6 - id: setup name: Setup Toolchain @@ -44,7 +44,7 @@ jobs: steps: - id: checkout name: Checkout Repository - uses: actions/checkout@v4 + uses: actions/checkout@v6 - id: setup name: Setup Toolchain @@ -96,7 +96,7 @@ jobs: steps: - name: Checkout code - uses: actions/checkout@v4 + uses: actions/checkout@v6 - id: setup name: Setup Toolchain @@ -119,7 +119,7 @@ jobs: steps: - id: checkout name: Checkout Repository - uses: actions/checkout@v4 + uses: actions/checkout@v6 - id: setup name: Setup Toolchain @@ -173,7 +173,7 @@ jobs: - id: checkout name: Checkout Repository - uses: actions/checkout@v4 + uses: actions/checkout@v6 - id: test name: Run E2E Tests diff --git a/.github/workflows/upload_coverage_pr.yaml b/.github/workflows/upload_coverage_pr.yaml index 1ed2f7bcc..a2a3c82a6 100644 --- a/.github/workflows/upload_coverage_pr.yaml +++ b/.github/workflows/upload_coverage_pr.yaml @@ -96,7 +96,7 @@ jobs: echo "override_commit=$(> "$GITHUB_OUTPUT" - name: Checkout repository - uses: actions/checkout@v4 + uses: actions/checkout@v6 with: ref: ${{ steps.parse_previous_artifacts.outputs.override_commit || '' }} path: repo_root From 6757705ab631c4418eca9cb75b5bc24db1e84ee5 Mon Sep 17 00:00:00 2001 From: Jose Celano Date: Tue, 2 Dec 2025 09:57:21 +0000 Subject: [PATCH 759/802] chore(deps): bump actions/upload-artifact from 4 to 5 --- .github/workflows/generate_coverage_pr.yaml | 6 +++--- 1 file changed, 3 insertions(+), 3 deletions(-) diff --git a/.github/workflows/generate_coverage_pr.yaml b/.github/workflows/generate_coverage_pr.yaml index 8363376b2..6942e276f 100644 --- a/.github/workflows/generate_coverage_pr.yaml +++ b/.github/workflows/generate_coverage_pr.yaml @@ -59,13 +59,13 @@ jobs: # Triggered sub-workflow is not able to detect the original commit/PR which is available # in this workflow. - name: Store PR number - uses: actions/upload-artifact@v4 + uses: actions/upload-artifact@v5 with: name: pr_number path: pr_number.txt - name: Store commit SHA - uses: actions/upload-artifact@v4 + uses: actions/upload-artifact@v5 with: name: commit_sha path: commit_sha.txt @@ -74,7 +74,7 @@ jobs: # is executed by a different workflow `upload_coverage.yml`. The reason for this # split is because `on.pull_request` workflows don't have access to secrets. - name: Store coverage report in artifacts - uses: actions/upload-artifact@v4 + uses: actions/upload-artifact@v5 with: name: codecov_report path: ./codecov.json From 46b245004abea6bbf16afdadadbf9baa617797a0 Mon Sep 17 00:00:00 2001 From: Jose Celano Date: Tue, 2 Dec 2025 09:57:29 +0000 Subject: [PATCH 760/802] chore(deps): bump actions/github-script from 7 to 8 --- .github/workflows/upload_coverage_pr.yaml | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/.github/workflows/upload_coverage_pr.yaml b/.github/workflows/upload_coverage_pr.yaml index a2a3c82a6..8b0006a6d 100644 --- a/.github/workflows/upload_coverage_pr.yaml +++ b/.github/workflows/upload_coverage_pr.yaml @@ -22,7 +22,7 @@ jobs: steps: - name: "Download existing coverage report" id: prepare_report - uses: actions/github-script@v7 + uses: actions/github-script@v8 with: script: | var fs = require('fs'); From 6cb7cdd932385a160c8b2da76842909a2277b04b Mon Sep 17 00:00:00 2001 From: Jose Celano Date: Tue, 2 Dec 2025 10:03:07 +0000 Subject: [PATCH 761/802] chore(deps): update Cargo dependencies MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit - tokio: 1.45.1 → 1.48.0 - reqwest: 0.12.20 → 0.12.24 - clap: 4.5.40 → 4.5.53 - tracing-subscriber: 0.3.19 → 0.3.22 - ringbuf: 0.4.4 → 0.4.8 - uuid: 1.18.1 → 1.19.0 - and other transitive dependencies Related dependabot PRs: - #1629 (tokio) - #1630 (reqwest) - #1623 (clap) - #1614 (tracing-subscriber) - #1604 (ringbuf) --- Cargo.lock | 16 ++++++++-------- 1 file changed, 8 insertions(+), 8 deletions(-) diff --git a/Cargo.lock b/Cargo.lock index 62d10c72f..952e1d8a6 100644 --- a/Cargo.lock +++ b/Cargo.lock @@ -3484,9 +3484,9 @@ dependencies = [ [[package]] name = "prost" -version = "0.14.2" +version = "0.14.1" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "101fec8d036f8d9d4a1e8ebf90d566d1d798f3b1aa379d2576a54a0d9acea5bd" +checksum = "7231bd9b3d3d33c86b58adbac74b5ec0ad9f496b19d22801d773636feaa95f3d" dependencies = [ "bytes", "prost-derive", @@ -3494,9 +3494,9 @@ dependencies = [ [[package]] name = "prost-derive" -version = "0.14.2" +version = "0.14.1" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "d2d93e596a829ebe00afa41c3a056e6308d6b8a4c7d869edf184e2c91b1ba564" +checksum = "9120690fafc389a67ba3803df527d0ec9cbbc9cc45e4cc20b332996dfb672425" dependencies = [ "anyhow", "itertools 0.14.0", @@ -3507,9 +3507,9 @@ dependencies = [ [[package]] name = "prost-types" -version = "0.14.2" +version = "0.14.1" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "f5d7b7346e150de32340ae3390b8b3ffa37ad93ec31fb5dad86afe817619e4e7" +checksum = "b9b4db3d6da204ed77bb26ba83b6122a73aeb2e87e25fbf7ad2e84c4ccbf8f72" dependencies = [ "prost", ] @@ -5631,9 +5631,9 @@ checksum = "06abde3611657adf66d383f00b093d7faecc7fa57071cce2578660c9f1010821" [[package]] name = "uuid" -version = "1.18.1" +version = "1.19.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "2f87b8aa10b915a06587d0dec516c282ff295b475d94abf425d62b57710070a2" +checksum = "e2e054861b4bd027cd373e18e8d8d8e6548085000e41290d95ce0c373a654b4a" dependencies = [ "getrandom 0.3.4", "js-sys", From a2f9657ddb1d773d1067132dc0f7a2207b99c2c9 Mon Sep 17 00:00:00 2001 From: Jose Celano Date: Mon, 22 Dec 2025 08:26:22 +0000 Subject: [PATCH 762/802] chore(deps): update dependencies ``` cargo update Updating crates.io index Locking 49 packages to latest compatible versions Updating async-compression v0.4.34 -> v0.4.36 Updating async-lock v3.4.1 -> v3.4.2 Updating axum v0.8.7 -> v0.8.8 Updating axum-extra v0.12.2 -> v0.12.3 Updating axum-server v0.7.3 -> v0.8.0 Updating bumpalo v3.19.0 -> v3.19.1 Updating cc v1.2.48 -> v1.2.50 Updating cmake v0.1.54 -> v0.1.57 Updating compression-codecs v0.4.33 -> v0.4.35 Adding convert_case v0.10.0 Updating criterion v0.8.0 -> v0.8.1 Updating criterion-plot v0.8.0 -> v0.8.1 Adding derive_builder v0.20.2 Adding derive_builder_core v0.20.2 Adding derive_builder_macro v0.20.2 Updating derive_more v2.0.1 -> v2.1.0 Updating derive_more-impl v2.0.1 -> v2.1.0 Updating ferroid v0.8.7 -> v0.8.8 Updating fs-err v3.2.0 -> v3.2.1 Adding getset v0.1.6 Updating hyper-util v0.1.18 -> v0.1.19 Updating icu_properties v2.1.1 -> v2.1.2 Updating icu_properties_data v2.1.1 -> v2.1.2 Updating itoa v1.0.15 -> v1.0.16 Updating libc v0.2.177 -> v0.2.178 Updating libredox v0.1.10 -> v0.1.11 Updating local-ip-address v0.6.5 -> v0.6.8 Updating log v0.4.28 -> v0.4.29 Updating mio v1.1.0 -> v1.1.1 Updating neli v0.6.5 -> v0.7.3 Updating neli-proc-macros v0.1.4 -> v0.2.2 Updating portable-atomic v1.11.1 -> v1.12.0 Adding redox_syscall v0.6.0 Updating reqwest v0.12.24 -> v0.12.26 Updating rustls-pki-types v1.13.1 -> v1.13.2 Updating ryu v1.0.20 -> v1.0.21 Updating serde_spanned v1.0.3 -> v1.0.4 Updating simd-adler32 v0.3.7 -> v0.3.8 Updating supports-hyperlinks v3.1.0 -> v3.2.0 Updating testcontainers v0.26.0 -> v0.26.2 Updating toml v0.9.8 -> v0.9.10+spec-1.1.0 Updating toml_datetime v0.7.3 -> v0.7.5+spec-1.1.0 Updating toml_edit v0.23.7 -> v0.23.10+spec-1.0.0 Updating toml_parser v1.0.4 -> v1.0.6+spec-1.1.0 Updating toml_writer v1.0.4 -> v1.0.6+spec-1.1.0 Updating tower-http v0.6.7 -> v0.6.8 Updating tracing v0.1.43 -> v0.1.44 Updating tracing-core v0.1.35 -> v0.1.36 Adding unicode-segmentation v1.12.0 Removing windows-sys v0.59.0 note: pass `--verbose` to see 7 unchanged dependencies behind latest ``` --- Cargo.lock | 278 ++++++++++++++++++++++++++++++++--------------------- 1 file changed, 171 insertions(+), 107 deletions(-) diff --git a/Cargo.lock b/Cargo.lock index 952e1d8a6..da0910f48 100644 --- a/Cargo.lock +++ b/Cargo.lock @@ -236,9 +236,9 @@ dependencies = [ [[package]] name = "async-compression" -version = "0.4.34" +version = "0.4.36" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "0e86f6d3dc9dc4352edeea6b8e499e13e3f5dc3b964d7ca5fd411415a3498473" +checksum = "98ec5f6c2f8bc326c994cb9e241cc257ddaba9afa8555a43cffbb5dd86efaa37" dependencies = [ "compression-codecs", "compression-core", @@ -297,9 +297,9 @@ dependencies = [ [[package]] name = "async-lock" -version = "3.4.1" +version = "3.4.2" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "5fd03604047cee9b6ce9de9f70c6cd540a0520c813cbd49bae61f33ab80ed1dc" +checksum = "290f7f2596bd5b78a9fec8088ccd89180d7f9f55b94b0576823bbbdc72ee8311" dependencies = [ "event-listener 5.4.1", "event-listener-strategy", @@ -395,9 +395,9 @@ checksum = "c08606f8c3cbf4ce6ec8e28fb0014a2c086708fe954eaa885384a6165172e7e8" [[package]] name = "axum" -version = "0.8.7" +version = "0.8.8" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "5b098575ebe77cb6d14fc7f32749631a6e44edbef6b796f89b020e99ba20d425" +checksum = "8b52af3cb4058c895d37317bb27508dccc8e5f2d39454016b297bf4a400597b8" dependencies = [ "axum-core", "axum-macros", @@ -459,9 +459,9 @@ dependencies = [ [[package]] name = "axum-extra" -version = "0.12.2" +version = "0.12.3" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "dbfe9f610fe4e99cf0cfcd03ccf8c63c28c616fe714d80475ef731f3b13dd21b" +checksum = "6dfbd6109d91702d55fc56df06aae7ed85c465a7a451db6c0e54a4b9ca5983d1" dependencies = [ "axum", "axum-core", @@ -495,12 +495,13 @@ dependencies = [ [[package]] name = "axum-server" -version = "0.7.3" +version = "0.8.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "c1ab4a3ec9ea8a657c72d99a03a824af695bd0fb5ec639ccbd9cd3543b41a5f9" +checksum = "b1df331683d982a0b9492b38127151e6453639cd34926eb9c07d4cd8c6d22bfc" dependencies = [ "arc-swap", "bytes", + "either", "fs-err", "http", "http-body", @@ -508,7 +509,6 @@ dependencies = [ "hyper-util", "pin-project-lite", "rustls", - "rustls-pemfile", "rustls-pki-types", "tokio", "tokio-rustls", @@ -948,9 +948,9 @@ checksum = "40e38929add23cdf8a366df9b0e088953150724bcbe5fc330b0d8eb3b328eec8" [[package]] name = "bumpalo" -version = "3.19.0" +version = "3.19.1" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "46c5e41b57b8bba42a04676d81cb89e9ee8e859a1a66f80a5a72e1cb76b34d43" +checksum = "5dd9dc738b7a8311c7ade152424974d8115f2cdad61e8dab8dac9f2362298510" [[package]] name = "bytecheck" @@ -1018,9 +1018,9 @@ dependencies = [ [[package]] name = "cc" -version = "1.2.48" +version = "1.2.50" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "c481bdbf0ed3b892f6f806287d72acd515b352a4ec27a208489b8c1bc839633a" +checksum = "9f50d563227a1c37cc0a263f64eca3334388c01c5e4c4861a9def205c614383c" dependencies = [ "find-msvc-tools", "jobserver", @@ -1151,9 +1151,9 @@ checksum = "a1d728cc89cf3aee9ff92b05e62b19ee65a02b5702cff7d5a377e32c6ae29d8d" [[package]] name = "cmake" -version = "0.1.54" +version = "0.1.57" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "e7caa3f9de89ddbe2c607f4101924c5abec803763ae9534e4f4d7d8f84aa81f0" +checksum = "75443c44cd6b379beb8c5b45d85d0773baf31cce901fe7bb252f4eff3008ef7d" dependencies = [ "cc", ] @@ -1179,9 +1179,9 @@ dependencies = [ [[package]] name = "compression-codecs" -version = "0.4.33" +version = "0.4.35" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "302266479cb963552d11bd042013a58ef1adc56768016c8b82b4199488f2d4ad" +checksum = "b0f7ac3e5b97fdce45e8922fb05cae2c37f7bbd63d30dd94821dacfd8f3f2bf2" dependencies = [ "brotli", "compression-core", @@ -1206,6 +1206,15 @@ dependencies = [ "crossbeam-utils", ] +[[package]] +name = "convert_case" +version = "0.10.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "633458d4ef8c78b72454de2d54fd6ab2e60f9e02be22f3c6104cdc8a4e0fceb9" +dependencies = [ + "unicode-segmentation", +] + [[package]] name = "core-foundation" version = "0.9.4" @@ -1280,16 +1289,16 @@ dependencies = [ [[package]] name = "criterion" -version = "0.8.0" +version = "0.8.1" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "a0dfe5e9e71bdcf4e4954f7d14da74d1cdb92a3a07686452d1509652684b1aab" +checksum = "4d883447757bb0ee46f233e9dc22eb84d93a9508c9b868687b274fc431d886bf" dependencies = [ "alloca", "anes", "cast", "ciborium", "clap", - "criterion-plot 0.8.0", + "criterion-plot 0.8.1", "itertools 0.13.0", "num-traits", "oorandom", @@ -1316,9 +1325,9 @@ dependencies = [ [[package]] name = "criterion-plot" -version = "0.8.0" +version = "0.8.1" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "5de36c2bee19fba779808f92bf5d9b0fa5a40095c277aba10c458a12b35d21d6" +checksum = "ed943f81ea2faa8dcecbbfa50164acf95d555afec96a27871663b300e387b2e4" dependencies = [ "cast", "itertools 0.13.0", @@ -1500,23 +1509,56 @@ dependencies = [ "serde_core", ] +[[package]] +name = "derive_builder" +version = "0.20.2" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "507dfb09ea8b7fa618fcf76e953f4f5e192547945816d5358edffe39f6f94947" +dependencies = [ + "derive_builder_macro", +] + +[[package]] +name = "derive_builder_core" +version = "0.20.2" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "2d5bcf7b024d6835cfb3d473887cd966994907effbe9227e8c8219824d06c4e8" +dependencies = [ + "darling 0.20.11", + "proc-macro2", + "quote", + "syn 2.0.111", +] + +[[package]] +name = "derive_builder_macro" +version = "0.20.2" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "ab63b0e2bf4d5928aff72e83a7dace85d7bba5fe12dcc3c5a572d78caffd3f3c" +dependencies = [ + "derive_builder_core", + "syn 2.0.111", +] + [[package]] name = "derive_more" -version = "2.0.1" +version = "2.1.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "093242cf7570c207c83073cf82f79706fe7b8317e98620a47d5be7c3d8497678" +checksum = "10b768e943bed7bf2cab53df09f4bc34bfd217cdb57d971e769874c9a6710618" dependencies = [ "derive_more-impl", ] [[package]] name = "derive_more-impl" -version = "2.0.1" +version = "2.1.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "bda628edc44c4bb645fbe0f758797143e4e07926f7ebf4e9bdfbd3d2ce621df3" +checksum = "6d286bfdaf75e988b4a78e013ecd79c581e06399ab53fbacd2d916c2f904f30b" dependencies = [ + "convert_case", "proc-macro2", "quote", + "rustc_version", "syn 2.0.111", "unicode-xid", ] @@ -1680,9 +1722,9 @@ checksum = "37909eebbb50d72f9059c3b6d82c0463f2ff062c9e95845c43a6c9c0355411be" [[package]] name = "ferroid" -version = "0.8.7" +version = "0.8.8" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "e0e9414a6ae93ef993ce40a1e02944f13d4508e2bf6f1ced1580ce6910f08253" +checksum = "ce161062fb044bd629c2393590efd47cab8d0241faf15704ffb0d47b7b4e4a35" dependencies = [ "portable-atomic", "rand 0.9.2", @@ -1854,9 +1896,9 @@ dependencies = [ [[package]] name = "fs-err" -version = "3.2.0" +version = "3.2.1" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "62d91fd049c123429b018c47887d3f75a265540dd3c30ba9cb7bae9197edb03a" +checksum = "824f08d01d0f496b3eca4f001a13cf17690a6ee930043d20817f547455fd98f8" dependencies = [ "autocfg", "tokio", @@ -2009,6 +2051,18 @@ dependencies = [ "wasip2", ] +[[package]] +name = "getset" +version = "0.1.6" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "9cf0fc11e47561d47397154977bc219f4cf809b2974facc3ccb3b89e2436f912" +dependencies = [ + "proc-macro-error2", + "proc-macro2", + "quote", + "syn 2.0.111", +] + [[package]] name = "gimli" version = "0.32.3" @@ -2267,9 +2321,9 @@ dependencies = [ [[package]] name = "hyper-util" -version = "0.1.18" +version = "0.1.19" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "52e9a2a24dc5c6821e71a7030e1e14b7b632acac55c40e9d2e082c621261bb56" +checksum = "727805d60e7938b76b826a6ef209eb70eaa1812794f9424d4a4e2d740662df5f" dependencies = [ "base64 0.22.1", "bytes", @@ -2378,9 +2432,9 @@ checksum = "7aedcccd01fc5fe81e6b489c15b247b8b0690feb23304303a9e560f37efc560a" [[package]] name = "icu_properties" -version = "2.1.1" +version = "2.1.2" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "e93fcd3157766c0c8da2f8cff6ce651a31f0810eaa1c51ec363ef790bbb5fb99" +checksum = "020bfc02fe870ec3a66d93e677ccca0562506e5872c650f893269e08615d74ec" dependencies = [ "icu_collections", "icu_locale_core", @@ -2392,9 +2446,9 @@ dependencies = [ [[package]] name = "icu_properties_data" -version = "2.1.1" +version = "2.1.2" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "02845b3647bb045f1100ecd6480ff52f34c35f82d9880e029d329c21d1054899" +checksum = "616c294cf8d725c6afcd8f55abc17c56464ef6211f9ed59cccffe534129c77af" [[package]] name = "icu_provider" @@ -2553,9 +2607,9 @@ dependencies = [ [[package]] name = "itoa" -version = "1.0.15" +version = "1.0.16" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "4a5f13b858c8d314ee3e8f639011f7ccefe71f97f96e50151fb991f267928e2c" +checksum = "7ee5b5339afb4c41626dde77b7a611bd4f2c202b897852b4bcf5d03eddc61010" [[package]] name = "jobserver" @@ -2594,9 +2648,9 @@ checksum = "bbd2bcb4c963f2ddae06a2efc7e9f3591312473c50c6685e1f298068316e66fe" [[package]] name = "libc" -version = "0.2.177" +version = "0.2.178" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "2874a2af47a2325c2001a6e6fad9b16a53b802102b528163885171cf92b15976" +checksum = "37c93d8daa9d8a012fd8ab92f088405fb202ea0b6ab73ee2482ae66af4f42091" [[package]] name = "libloading" @@ -2616,13 +2670,13 @@ checksum = "f9fbbcab51052fe104eb5e5d351cf728d30a5be1fe14d9be8a3b097481fb97de" [[package]] name = "libredox" -version = "0.1.10" +version = "0.1.11" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "416f7e718bdb06000964960ffa43b4335ad4012ae8b99060261aa4a8088d5ccb" +checksum = "df15f6eac291ed1cf25865b1ee60399f57e7c227e7f51bdbd4c5270396a9ed50" dependencies = [ "bitflags", "libc", - "redox_syscall", + "redox_syscall 0.6.0", ] [[package]] @@ -2661,14 +2715,14 @@ checksum = "6373607a59f0be73a39b6fe456b8192fcc3585f602af20751600e974dd455e77" [[package]] name = "local-ip-address" -version = "0.6.5" +version = "0.6.8" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "656b3b27f8893f7bbf9485148ff9a65f019e3f33bd5cdc87c83cab16b3fd9ec8" +checksum = "0a60bf300a990b2d1ebdde4228e873e8e4da40d834adbf5265f3da1457ede652" dependencies = [ "libc", "neli", "thiserror 2.0.17", - "windows-sys 0.59.0", + "windows-sys 0.61.2", ] [[package]] @@ -2682,9 +2736,9 @@ dependencies = [ [[package]] name = "log" -version = "0.4.28" +version = "0.4.29" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "34080505efa8e45a4b816c349525ebe327ceaa8559756f0356cba97ef3bf7432" +checksum = "5e5032e24019045c762d3c0f28f5b6b8bbf38563a65908389bf7978758920897" dependencies = [ "value-bag", ] @@ -2764,9 +2818,9 @@ dependencies = [ [[package]] name = "mio" -version = "1.1.0" +version = "1.1.1" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "69d83b0086dc8ecf3ce9ae2874b2d1290252e2a30720bea58a5c6639b0092873" +checksum = "a69bcab0ad47271a0234d9422b131806bf3968021e5dc9328caf2d4cd58557fc" dependencies = [ "libc", "wasi", @@ -2918,27 +2972,31 @@ dependencies = [ [[package]] name = "neli" -version = "0.6.5" +version = "0.7.3" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "93062a0dce6da2517ea35f301dfc88184ce18d3601ec786a727a87bf535deca9" +checksum = "e23bebbf3e157c402c4d5ee113233e5e0610cc27453b2f07eefce649c7365dcc" dependencies = [ + "bitflags", "byteorder", + "derive_builder", + "getset", "libc", "log", "neli-proc-macros", + "parking_lot", ] [[package]] name = "neli-proc-macros" -version = "0.1.4" +version = "0.2.2" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "0c8034b7fbb6f9455b2a96c19e6edf8dc9fc34c70449938d8ee3b4df363f61fe" +checksum = "05d8d08c6e98f20a62417478ebf7be8e1425ec9acecc6f63e22da633f6b71609" dependencies = [ "either", "proc-macro2", "quote", "serde", - "syn 1.0.109", + "syn 2.0.111", ] [[package]] @@ -3156,7 +3214,7 @@ checksum = "2621685985a2ebf1c516881c026032ac7deafcda1a2c9b7850dc81e3dfcb64c1" dependencies = [ "cfg-if", "libc", - "redox_syscall", + "redox_syscall 0.5.18", "smallvec", "windows-link", ] @@ -3356,9 +3414,9 @@ dependencies = [ [[package]] name = "portable-atomic" -version = "1.11.1" +version = "1.12.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "f84267b20a16ea918e43c6a88433c2d54fa145c92a811b5b047ccbe153674483" +checksum = "f59e70c4aef1e55797c2e8fd94a4f2a973fc972cfde0e0b05f683667b0cd39dd" [[package]] name = "portable-atomic-util" @@ -3435,7 +3493,7 @@ version = "3.4.0" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "219cb19e96be00ab2e37d6e299658a0cfa83e52429179969b0f0121b4ac46983" dependencies = [ - "toml_edit 0.23.7", + "toml_edit 0.23.10+spec-1.0.0", ] [[package]] @@ -3686,6 +3744,15 @@ dependencies = [ "bitflags", ] +[[package]] +name = "redox_syscall" +version = "0.6.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "ec96166dafa0886eb81fe1c0a388bece180fbef2135f97c1e2cf8302e74b43b5" +dependencies = [ + "bitflags", +] + [[package]] name = "ref-cast" version = "1.0.25" @@ -3752,9 +3819,9 @@ dependencies = [ [[package]] name = "reqwest" -version = "0.12.24" +version = "0.12.26" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "9d0946410b9f7b082a427e4ef5c8ff541a88b357bc6c637c40db3a68ac70a36f" +checksum = "3b4c14b2d9afca6a60277086b0cc6a6ae0b568f6f7916c943a8cdc79f8be240f" dependencies = [ "base64 0.22.1", "bytes", @@ -4005,9 +4072,9 @@ dependencies = [ [[package]] name = "rustls-pki-types" -version = "1.13.1" +version = "1.13.2" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "708c0f9d5f54ba0272468c1d306a52c495b31fa155e91bc25371e6df7996908c" +checksum = "21e6f2ab2928ca4291b86736a8bd920a277a399bba1589409d72154ff87c1282" dependencies = [ "zeroize", ] @@ -4031,9 +4098,9 @@ checksum = "b39cdef0fa800fc44525c84ccb54a029961a8215f9619753635a9c0d2538d46d" [[package]] name = "ryu" -version = "1.0.20" +version = "1.0.21" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "28d3b2b1366ec20994f1fd18c3c594f05c5dd4bc44d8bb0c1c632c8d6829481f" +checksum = "62049b2877bf12821e8f9ad256ee38fdc31db7387ec2d3b3f403024de2034aea" [[package]] name = "same-file" @@ -4256,9 +4323,9 @@ dependencies = [ [[package]] name = "serde_spanned" -version = "1.0.3" +version = "1.0.4" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "e24345aa0fe688594e73770a5f6d1b216508b4f93484c0026d521acd30134392" +checksum = "f8bbf91e5a4d6315eee45e704372590b30e260ee83af6639d64557f51b067776" dependencies = [ "serde_core", ] @@ -4354,9 +4421,9 @@ dependencies = [ [[package]] name = "simd-adler32" -version = "0.3.7" +version = "0.3.8" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "d66dc143e6b11c1eddc06d5c423cfc97062865baf299914ab64caa38182078fe" +checksum = "e320a6c5ad31d271ad523dcf3ad13e2767ad8b1cb8f047f75a8aeaf8da139da2" [[package]] name = "simdutf8" @@ -4470,9 +4537,9 @@ dependencies = [ [[package]] name = "supports-hyperlinks" -version = "3.1.0" +version = "3.2.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "804f44ed3c63152de6a9f90acbea1a110441de43006ea51bcce8f436196a288b" +checksum = "e396b6523b11ccb83120b115a0b7366de372751aa6edf19844dfb13a6af97e91" [[package]] name = "supports-unicode" @@ -4606,9 +4673,9 @@ checksum = "8f50febec83f5ee1df3015341d8bd429f2d1cc62bcba7ea2076759d315084683" [[package]] name = "testcontainers" -version = "0.26.0" +version = "0.26.2" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "a347cac4368ba4f1871743adb27dc14829024d26b1763572404726b0b9943eb8" +checksum = "1483605f58b2fff80d786eb56a0b6b4e8b1e5423fbc9ec2e3e562fa2040d6f27" dependencies = [ "astral-tokio-tar", "async-trait", @@ -4844,14 +4911,14 @@ dependencies = [ [[package]] name = "toml" -version = "0.9.8" +version = "0.9.10+spec-1.1.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "f0dc8b1fb61449e27716ec0e1bdf0f6b8f3e8f6b05391e8497b8b6d7804ea6d8" +checksum = "0825052159284a1a8b4d6c0c86cbc801f2da5afd2b225fa548c72f2e74002f48" dependencies = [ "indexmap 2.12.1", "serde_core", - "serde_spanned 1.0.3", - "toml_datetime 0.7.3", + "serde_spanned 1.0.4", + "toml_datetime 0.7.5+spec-1.1.0", "toml_parser", "toml_writer", "winnow", @@ -4868,9 +4935,9 @@ dependencies = [ [[package]] name = "toml_datetime" -version = "0.7.3" +version = "0.7.5+spec-1.1.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "f2cdb639ebbc97961c51720f858597f7f24c4fc295327923af55b74c3c724533" +checksum = "92e1cfed4a3038bc5a127e35a2d360f145e1f4b971b551a2ba5fd7aedf7e1347" dependencies = [ "serde_core", ] @@ -4891,21 +4958,21 @@ dependencies = [ [[package]] name = "toml_edit" -version = "0.23.7" +version = "0.23.10+spec-1.0.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "6485ef6d0d9b5d0ec17244ff7eb05310113c3f316f2d14200d4de56b3cb98f8d" +checksum = "84c8b9f757e028cee9fa244aea147aab2a9ec09d5325a9b01e0a49730c2b5269" dependencies = [ "indexmap 2.12.1", - "toml_datetime 0.7.3", + "toml_datetime 0.7.5+spec-1.1.0", "toml_parser", "winnow", ] [[package]] name = "toml_parser" -version = "1.0.4" +version = "1.0.6+spec-1.1.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "c0cbe268d35bdb4bb5a56a2de88d0ad0eb70af5384a99d648cd4b3d04039800e" +checksum = "a3198b4b0a8e11f09dd03e133c0280504d0801269e9afa46362ffde1cbeebf44" dependencies = [ "winnow", ] @@ -4918,9 +4985,9 @@ checksum = "5d99f8c9a7727884afe522e9bd5edbfc91a3312b36a77b5fb8926e4c31a41801" [[package]] name = "toml_writer" -version = "1.0.4" +version = "1.0.6+spec-1.1.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "df8b2b54733674ad286d16267dcfc7a71ed5c776e4ac7aa3c3e2561f7c637bf2" +checksum = "ab16f14aed21ee8bfd8ec22513f7287cd4a91aa92e44edfe2c17ddd004e92607" [[package]] name = "tonic" @@ -5218,7 +5285,7 @@ dependencies = [ "serde_json", "serde_with", "thiserror 2.0.17", - "toml 0.9.8", + "toml 0.9.10+spec-1.1.0", "torrust-tracker-located-error", "tracing", "tracing-subscriber", @@ -5230,7 +5297,7 @@ dependencies = [ name = "torrust-tracker-contrib-bencode" version = "3.0.0-develop" dependencies = [ - "criterion 0.8.0", + "criterion 0.8.1", "thiserror 2.0.17", ] @@ -5294,7 +5361,7 @@ dependencies = [ "async-std", "bittorrent-primitives", "chrono", - "criterion 0.8.0", + "criterion 0.8.1", "crossbeam-skiplist", "futures", "mockall", @@ -5330,7 +5397,7 @@ dependencies = [ "aquatic_udp_protocol", "async-std", "bittorrent-primitives", - "criterion 0.8.0", + "criterion 0.8.1", "crossbeam-skiplist", "dashmap", "futures", @@ -5398,9 +5465,9 @@ dependencies = [ [[package]] name = "tower-http" -version = "0.6.7" +version = "0.6.8" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "9cf146f99d442e8e68e585f5d798ccd3cad9a7835b917e09728880a862706456" +checksum = "d4e6559d53cc268e5031cd8429d05415bc4cb4aefc4aa5d6cc35fbf5b924a1f8" dependencies = [ "async-compression", "bitflags", @@ -5434,9 +5501,9 @@ checksum = "8df9b6e13f2d32c91b9bd719c00d1958837bc7dec474d94952798cc8e69eeec3" [[package]] name = "tracing" -version = "0.1.43" +version = "0.1.44" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "2d15d90a0b5c19378952d479dc858407149d7bb45a14de0142f6c534b16fc647" +checksum = "63e71662fa4b2a2c3a26f570f037eb95bb1f85397f3cd8076caed2f026a6d100" dependencies = [ "log", "pin-project-lite", @@ -5457,9 +5524,9 @@ dependencies = [ [[package]] name = "tracing-core" -version = "0.1.35" +version = "0.1.36" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "7a04e24fab5c89c6a36eb8558c9656f30d81de51dfa4d3b45f26b21d61fa0a6c" +checksum = "db97caf9d906fbde555dd62fa95ddba9eecfd14cb388e4f491a66d74cd5fb79a" dependencies = [ "once_cell", "valuable", @@ -5547,6 +5614,12 @@ version = "0.1.5" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "3b09c83c3c29d37506a3e260c08c03743a6bb66a9cd432c6934ab501a190571f" +[[package]] +name = "unicode-segmentation" +version = "1.12.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "f6ccf251212114b54433ec949fd6a7841275f9ada20dddd2f29e9ceea4501493" + [[package]] name = "unicode-width" version = "0.1.14" @@ -5896,15 +5969,6 @@ dependencies = [ "windows-targets 0.52.6", ] -[[package]] -name = "windows-sys" -version = "0.59.0" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "1e38bc4d79ed67fd075bcc251a1c39b32a1776bbe92e5bef1f0bf1f8c531853b" -dependencies = [ - "windows-targets 0.52.6", -] - [[package]] name = "windows-sys" version = "0.60.2" From 4c16227bdaa03c236cc597b93dc49563224e0afe Mon Sep 17 00:00:00 2001 From: Jose Celano Date: Mon, 22 Dec 2025 08:35:13 +0000 Subject: [PATCH 763/802] fix: E0107 - missing generics for struct axum_server::Server in from_tcp_with_timeouts error[E0107]: missing generics for struct `axum_server::Server` --> packages/axum-server/src/custom_axum_server.rs:44:55 | 44 | pub fn from_tcp_with_timeouts(socket: TcpListener) -> Server { | ^^^^^^ expected at least 1 generic argument Added SocketAddr generic parameter to Server return type and Address trait bound to add_timeouts function. --- packages/axum-server/src/custom_axum_server.rs | 6 +++--- 1 file changed, 3 insertions(+), 3 deletions(-) diff --git a/packages/axum-server/src/custom_axum_server.rs b/packages/axum-server/src/custom_axum_server.rs index 5705ef24e..fccaf54dc 100644 --- a/packages/axum-server/src/custom_axum_server.rs +++ b/packages/axum-server/src/custom_axum_server.rs @@ -18,7 +18,7 @@ //! If you want to know more about Axum and timeouts see . use std::future::Ready; use std::io::ErrorKind; -use std::net::TcpListener; +use std::net::{SocketAddr, TcpListener}; use std::pin::Pin; use std::task::{Context, Poll}; use std::time::Duration; @@ -41,7 +41,7 @@ const HTTP2_KEEP_ALIVE_TIMEOUT: Duration = Duration::from_secs(5); const HTTP2_KEEP_ALIVE_INTERVAL: Duration = Duration::from_secs(5); #[must_use] -pub fn from_tcp_with_timeouts(socket: TcpListener) -> Server { +pub fn from_tcp_with_timeouts(socket: TcpListener) -> Server { add_timeouts(axum_server::from_tcp(socket)) } @@ -50,7 +50,7 @@ pub fn from_tcp_rustls_with_timeouts(socket: TcpListener, tls: RustlsConfig) -> add_timeouts(axum_server::from_tcp_rustls(socket, tls)) } -fn add_timeouts(mut server: Server) -> Server { +fn add_timeouts(mut server: Server) -> Server { server.http_builder().http1().timer(TokioTimer::new()); server.http_builder().http2().timer(TokioTimer::new()); From 51452a8b2a7aee04822e90651179c2b0a8bb031f Mon Sep 17 00:00:00 2001 From: Jose Celano Date: Mon, 22 Dec 2025 08:38:42 +0000 Subject: [PATCH 764/802] fix: E0277 and E0308 - RustlsAcceptor trait bounds and type mismatch in from_tcp_rustls_with_timeouts error[E0277]: the trait bound `RustlsAcceptor: Address` is not satisfied --> packages/axum-server/src/custom_axum_server.rs:49:81 | 49 | ... tls: RustlsConfig) -> Server { | ^^^^^^^^^^^^^^^^^^^^^^ unsatisfied trait bound error[E0308]: mismatched types --> packages/axum-server/src/custom_axum_server.rs:50:18 | 50 | add_timeouts(axum_server::from_tcp_rustls(socket, tls)) | ------------ ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^ expected `Server`, found `Result, ...>` Changed return type to Result, std::io::Error> and used map to apply add_timeouts to the Result value. --- packages/axum-server/src/custom_axum_server.rs | 6 ++++-- 1 file changed, 4 insertions(+), 2 deletions(-) diff --git a/packages/axum-server/src/custom_axum_server.rs b/packages/axum-server/src/custom_axum_server.rs index fccaf54dc..39a2271d6 100644 --- a/packages/axum-server/src/custom_axum_server.rs +++ b/packages/axum-server/src/custom_axum_server.rs @@ -36,6 +36,8 @@ use tokio::sync::mpsc::{self, UnboundedReceiver, UnboundedSender}; use tokio::time::{Instant, Sleep}; use tower::Service; +type RustlsServerResult = Result, std::io::Error>; + const HTTP1_HEADER_READ_TIMEOUT: Duration = Duration::from_secs(5); const HTTP2_KEEP_ALIVE_TIMEOUT: Duration = Duration::from_secs(5); const HTTP2_KEEP_ALIVE_INTERVAL: Duration = Duration::from_secs(5); @@ -46,8 +48,8 @@ pub fn from_tcp_with_timeouts(socket: TcpListener) -> Server { } #[must_use] -pub fn from_tcp_rustls_with_timeouts(socket: TcpListener, tls: RustlsConfig) -> Server { - add_timeouts(axum_server::from_tcp_rustls(socket, tls)) +pub fn from_tcp_rustls_with_timeouts(socket: TcpListener, tls: RustlsConfig) -> RustlsServerResult { + axum_server::from_tcp_rustls(socket, tls).map(add_timeouts) } fn add_timeouts(mut server: Server) -> Server { From 74d5c8b9f0520077e8ec3baf84423808f771a285 Mon Sep 17 00:00:00 2001 From: Jose Celano Date: Mon, 22 Dec 2025 08:39:14 +0000 Subject: [PATCH 765/802] fix: E0308 - mismatched types, from_tcp returns Result in from_tcp_with_timeouts error[E0308]: mismatched types --> packages/axum-server/src/custom_axum_server.rs:47:18 | 47 | add_timeouts(axum_server::from_tcp(socket)) | ------------ ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^ expected `Server`, found `Result, Error>` Changed return type to Result, std::io::Error> and used map to apply add_timeouts to the Result value. --- packages/axum-server/src/custom_axum_server.rs | 5 +++-- 1 file changed, 3 insertions(+), 2 deletions(-) diff --git a/packages/axum-server/src/custom_axum_server.rs b/packages/axum-server/src/custom_axum_server.rs index 39a2271d6..b7f1d664e 100644 --- a/packages/axum-server/src/custom_axum_server.rs +++ b/packages/axum-server/src/custom_axum_server.rs @@ -37,14 +37,15 @@ use tokio::time::{Instant, Sleep}; use tower::Service; type RustlsServerResult = Result, std::io::Error>; +type ServerResult = Result, std::io::Error>; const HTTP1_HEADER_READ_TIMEOUT: Duration = Duration::from_secs(5); const HTTP2_KEEP_ALIVE_TIMEOUT: Duration = Duration::from_secs(5); const HTTP2_KEEP_ALIVE_INTERVAL: Duration = Duration::from_secs(5); #[must_use] -pub fn from_tcp_with_timeouts(socket: TcpListener) -> Server { - add_timeouts(axum_server::from_tcp(socket)) +pub fn from_tcp_with_timeouts(socket: TcpListener) -> ServerResult { + axum_server::from_tcp(socket).map(add_timeouts) } #[must_use] From cd83cfd9491cc9369921d560467d5cbfec917d02 Mon Sep 17 00:00:00 2001 From: Jose Celano Date: Mon, 22 Dec 2025 08:39:45 +0000 Subject: [PATCH 766/802] fix: E0631 - type mismatch in add_timeouts function arguments for acceptor type error[E0631]: type mismatch in function arguments --> packages/axum-server/src/custom_axum_server.rs:53:51 | 53 | axum_server::from_tcp_rustls(socket, tls).map(add_timeouts) | --- ^^^^^^^^^^^^ expected due to this | = note: expected function signature `fn(Server<_, RustlsAcceptor>) -> _` found function signature `fn(Server<_, DefaultAcceptor>) -> _` Made add_timeouts generic over both Address and Acceptor types to work with both DefaultAcceptor and RustlsAcceptor. --- packages/axum-server/src/custom_axum_server.rs | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/packages/axum-server/src/custom_axum_server.rs b/packages/axum-server/src/custom_axum_server.rs index b7f1d664e..e3567bad4 100644 --- a/packages/axum-server/src/custom_axum_server.rs +++ b/packages/axum-server/src/custom_axum_server.rs @@ -53,7 +53,7 @@ pub fn from_tcp_rustls_with_timeouts(socket: TcpListener, tls: RustlsConfig) -> axum_server::from_tcp_rustls(socket, tls).map(add_timeouts) } -fn add_timeouts(mut server: Server) -> Server { +fn add_timeouts(mut server: Server) -> Server { server.http_builder().http1().timer(TokioTimer::new()); server.http_builder().http2().timer(TokioTimer::new()); From 612f7f1f07e69ce3e0fdeb3c8b264f46917aa06e Mon Sep 17 00:00:00 2001 From: Jose Celano Date: Mon, 22 Dec 2025 08:40:10 +0000 Subject: [PATCH 767/802] fix: E0107 - missing generics for struct axum_server::Handle in signals.rs error[E0107]: missing generics for struct `axum_server::Handle` --> packages/axum-server/src/signals.rs:10:26 | 10 | handle: axum_server::Handle, | ^^^^^^ expected 1 generic argument Added SocketAddr generic parameter to Handle type in graceful_shutdown function signature. --- packages/axum-server/src/signals.rs | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/packages/axum-server/src/signals.rs b/packages/axum-server/src/signals.rs index 268ff79fa..360879e32 100644 --- a/packages/axum-server/src/signals.rs +++ b/packages/axum-server/src/signals.rs @@ -7,7 +7,7 @@ use tracing::instrument; #[instrument(skip(handle, rx_halt, message))] pub async fn graceful_shutdown( - handle: axum_server::Handle, + handle: axum_server::Handle, rx_halt: tokio::sync::oneshot::Receiver, message: String, address: SocketAddr, From 37793ce42e19ab922ad70948631d8f26ecad9213 Mon Sep 17 00:00:00 2001 From: Jose Celano Date: Mon, 22 Dec 2025 08:41:05 +0000 Subject: [PATCH 768/802] fix: clippy::uninlined_format_args - variables can be used directly in format! string --> console/tracker-client/src/console/clients/udp/app.rs:178:24 | | __________________^ 179 | | ... "invalid address format: \`{}\`. Expected format is host:port", 180 | | ... tracker_socket_addr_str 181 | | ... )); | |_______^ --> console/tracker-client/src/console/clients/udp/app.rs:199:13 | 199 | ...rr(anyhow::anyhow!("DNS resolution failed for \`{}\`", tracker_socket_addr_str)) Changed format strings to use inline variable interpolation instead of positional arguments. --- console/tracker-client/src/console/clients/udp/app.rs | 5 ++--- 1 file changed, 2 insertions(+), 3 deletions(-) diff --git a/console/tracker-client/src/console/clients/udp/app.rs b/console/tracker-client/src/console/clients/udp/app.rs index a2736c365..527f46e78 100644 --- a/console/tracker-client/src/console/clients/udp/app.rs +++ b/console/tracker-client/src/console/clients/udp/app.rs @@ -176,8 +176,7 @@ fn parse_socket_addr(tracker_socket_addr_str: &str) -> anyhow::Result anyhow::Result = resolved_addr.to_socket_addrs()?.collect(); if socket_addrs.is_empty() { - Err(anyhow::anyhow!("DNS resolution failed for `{}`", tracker_socket_addr_str)) + Err(anyhow::anyhow!("DNS resolution failed for `{tracker_socket_addr_str}`")) } else { Ok(socket_addrs[0]) } From f0678be9cf46a549ebd811800754f9b24b2dab7b Mon Sep 17 00:00:00 2001 From: Jose Celano Date: Mon, 22 Dec 2025 08:41:49 +0000 Subject: [PATCH 769/802] fix: clippy::missing_errors_doc and clippy::double_must_use for from_tcp_with_timeouts error: docs for function returning `Result` missing `# Errors` section --> packages/axum-server/src/custom_axum_server.rs:47:1 | 47 | pub fn from_tcp_with_timeouts(socket: TcpListener) -> ServerResult { | ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^ error: this function has a `#[must_use]` attribute with no message, but returns a type already marked as `#[must_use]` --> packages/axum-server/src/custom_axum_server.rs:47:1 | 47 | pub fn from_tcp_with_timeouts(socket: TcpListener) -> ServerResult { | ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^ Added documentation with Errors section and removed #[must_use] attribute since Result type already has it. --- packages/axum-server/src/custom_axum_server.rs | 6 +++++- 1 file changed, 5 insertions(+), 1 deletion(-) diff --git a/packages/axum-server/src/custom_axum_server.rs b/packages/axum-server/src/custom_axum_server.rs index e3567bad4..f332c9288 100644 --- a/packages/axum-server/src/custom_axum_server.rs +++ b/packages/axum-server/src/custom_axum_server.rs @@ -43,7 +43,11 @@ const HTTP1_HEADER_READ_TIMEOUT: Duration = Duration::from_secs(5); const HTTP2_KEEP_ALIVE_TIMEOUT: Duration = Duration::from_secs(5); const HTTP2_KEEP_ALIVE_INTERVAL: Duration = Duration::from_secs(5); -#[must_use] +/// Creates an Axum server from a TCP listener with configured timeouts. +/// +/// # Errors +/// +/// Returns an error if the server cannot be created from the TCP socket. pub fn from_tcp_with_timeouts(socket: TcpListener) -> ServerResult { axum_server::from_tcp(socket).map(add_timeouts) } From ea001980306165b863393cc3b0ef1f28c0b61cf9 Mon Sep 17 00:00:00 2001 From: Jose Celano Date: Mon, 22 Dec 2025 08:42:12 +0000 Subject: [PATCH 770/802] fix: clippy::missing_errors_doc and clippy::double_must_use for from_tcp_rustls_with_timeouts error: docs for function returning `Result` missing `# Errors` section --> packages/axum-server/src/custom_axum_server.rs:52:1 | 52 | pub fn from_tcp_rustls_with_timeouts(socket: TcpListener, tls: RustlsConfig) -> RustlsServerResult { | ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^ error: this function has a `#[must_use]` attribute with no message, but returns a type already marked as `#[must_use]` --> packages/axum-server/src/custom_axum_server.rs:52:1 | 52 | pub fn from_tcp_rustls_with_timeouts(socket: TcpListener, tls: RustlsConfig) -> RustlsServerResult { | ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^ Added documentation with Errors section and removed #[must_use] attribute since Result type already has it. --- packages/axum-server/src/custom_axum_server.rs | 6 +++++- 1 file changed, 5 insertions(+), 1 deletion(-) diff --git a/packages/axum-server/src/custom_axum_server.rs b/packages/axum-server/src/custom_axum_server.rs index f332c9288..0328198ec 100644 --- a/packages/axum-server/src/custom_axum_server.rs +++ b/packages/axum-server/src/custom_axum_server.rs @@ -52,7 +52,11 @@ pub fn from_tcp_with_timeouts(socket: TcpListener) -> ServerResult { axum_server::from_tcp(socket).map(add_timeouts) } -#[must_use] +/// Creates an Axum server from a TCP listener with TLS and configured timeouts. +/// +/// # Errors +/// +/// Returns an error if the server cannot be created from the TCP socket or if TLS configuration fails. pub fn from_tcp_rustls_with_timeouts(socket: TcpListener, tls: RustlsConfig) -> RustlsServerResult { axum_server::from_tcp_rustls(socket, tls).map(add_timeouts) } From a217bb924a427e281f8df91b7f0a299627293a78 Mon Sep 17 00:00:00 2001 From: Jose Celano Date: Mon, 22 Dec 2025 08:43:28 +0000 Subject: [PATCH 771/802] fix: E0599 - no method named handle found for Result in health-check-api-server error[E0599]: no method named `handle` found for enum `std::result::Result` in the current scope --> packages/axum-health-check-api-server/src/server.rs:120:10 | 119 | let running = axum_server::from_tcp(socket) | ___________________- 120 | | .handle(handle) | |_________-^^^^^^ Added expect() to unwrap Result before calling handle() method since from_tcp now returns Result. --- packages/axum-health-check-api-server/src/server.rs | 1 + 1 file changed, 1 insertion(+) diff --git a/packages/axum-health-check-api-server/src/server.rs b/packages/axum-health-check-api-server/src/server.rs index 3eeb1b054..c261f6af8 100644 --- a/packages/axum-health-check-api-server/src/server.rs +++ b/packages/axum-health-check-api-server/src/server.rs @@ -117,6 +117,7 @@ pub fn start( )); let running = axum_server::from_tcp(socket) + .expect("Failed to create server from TCP socket") .handle(handle) .serve(router.into_make_service_with_connect_info::()); From 054843477e3c73d132b7ca71dee208e3bec65dd8 Mon Sep 17 00:00:00 2001 From: Jose Celano Date: Mon, 22 Dec 2025 08:44:09 +0000 Subject: [PATCH 772/802] fix: E0599 and E0282 - no method named handle found for Result in axum-http-tracker-server error[E0599]: no method named `handle` found for enum `std::result::Result` in the current scope --> packages/axum-http-tracker-server/src/server.rs:77:22 | 76 | ... Some(tls) => custom_axum_server::from_tcp_rustls_with_timeouts(socket, tls) | ____________________- 77 | | ... .handle(handle) | |___________-^^^^^^ error[E0599]: no method named `handle` found for enum `std::result::Result` in the current scope --> packages/axum-http-tracker-server/src/server.rs:85:22 | 84 | None => custom_axum_server::from_tcp_with_timeouts(socket) | _________________________- 85 | | .handle(handle) | |_____________________-^^^^^^ Added expect() calls to unwrap Result before calling handle() method for both TLS and non-TLS cases. --- packages/axum-http-tracker-server/src/server.rs | 2 ++ 1 file changed, 2 insertions(+) diff --git a/packages/axum-http-tracker-server/src/server.rs b/packages/axum-http-tracker-server/src/server.rs index 2b43be0a9..4b7c15de8 100644 --- a/packages/axum-http-tracker-server/src/server.rs +++ b/packages/axum-http-tracker-server/src/server.rs @@ -74,6 +74,7 @@ impl Launcher { let running = Box::pin(async { match tls { Some(tls) => custom_axum_server::from_tcp_rustls_with_timeouts(socket, tls) + .expect("Failed to create server from TCP socket with TLS") .handle(handle) // The TimeoutAcceptor is commented because TSL does not work with it. // See: https://github.com/torrust/torrust-index/issues/204#issuecomment-2115529214 @@ -82,6 +83,7 @@ impl Launcher { .await .expect("Axum server crashed."), None => custom_axum_server::from_tcp_with_timeouts(socket) + .expect("Failed to create server from TCP socket") .handle(handle) .acceptor(TimeoutAcceptor) .serve(app.into_make_service_with_connect_info::()) From 02e43394e2d30370ae8214b2d513c947773123b5 Mon Sep 17 00:00:00 2001 From: Jose Celano Date: Mon, 22 Dec 2025 08:45:06 +0000 Subject: [PATCH 773/802] fix: E0599 and E0282 - no method named handle found for Result in axum-rest-tracker-api-server error[E0599]: no method named `handle` found for enum `std::result::Result` in the current scope --> packages/axum-rest-tracker-api-server/src/server.rs:272:22 | 271 | ... Some(tls) => custom_axum_server::from_tcp_rustls_with_timeouts(socket, tls) | ____________________- 272 | | ... .handle(handle) | |___________-^^^^^^ error[E0599]: no method named `handle` found for enum `std::result::Result` in the current scope --> packages/axum-rest-tracker-api-server/src/server.rs:280:22 | 279 | None => custom_axum_server::from_tcp_with_timeouts(socket) | _________________________- 280 | | .handle(handle) | |_____________________-^^^^^^ Added expect() calls to unwrap Result before calling handle() method for both TLS and non-TLS cases. --- packages/axum-rest-tracker-api-server/src/server.rs | 2 ++ 1 file changed, 2 insertions(+) diff --git a/packages/axum-rest-tracker-api-server/src/server.rs b/packages/axum-rest-tracker-api-server/src/server.rs index b358345fb..a867ecfcf 100644 --- a/packages/axum-rest-tracker-api-server/src/server.rs +++ b/packages/axum-rest-tracker-api-server/src/server.rs @@ -269,6 +269,7 @@ impl Launcher { let running = Box::pin(async { match tls { Some(tls) => custom_axum_server::from_tcp_rustls_with_timeouts(socket, tls) + .expect("Failed to create server from TCP socket with TLS") .handle(handle) // The TimeoutAcceptor is commented because TSL does not work with it. // See: https://github.com/torrust/torrust-index/issues/204#issuecomment-2115529214 @@ -277,6 +278,7 @@ impl Launcher { .await .expect("Axum server for tracker API crashed."), None => custom_axum_server::from_tcp_with_timeouts(socket) + .expect("Failed to create server from TCP socket") .handle(handle) .acceptor(TimeoutAcceptor) .serve(router.into_make_service_with_connect_info::()) From a62eb146fae7de0edb671c65e99d108ca18db2e5 Mon Sep 17 00:00:00 2001 From: Jose Celano Date: Mon, 22 Dec 2025 08:51:04 +0000 Subject: [PATCH 774/802] fix: runtime panic - Registering a blocking socket with tokio runtime is unsupported thread 'tokio-runtime-worker' panicked at /home/josecelano/.cargo/registry/src/index.crates.io-1949cf8c6b5b557f/axum-server-0.8.0/src/server.rs:70:30: Registering a blocking socket with the tokio runtime is unsupported. If you wish to do anyways, please add `--cfg tokio_allow_from_blocking_fd` to your RUSTFLAGS. See github.com/tokio-rs/tokio/issues/7172 for details. Set std::net::TcpListener instances to non-blocking mode using set_nonblocking(true) before passing them to axum-server to avoid runtime panics when registering with tokio runtime. This is required since axum-server 0.8.0 and tokio v1.44.0 which added debug assertions to prevent blocking sockets from being registered with the tokio runtime. --- packages/axum-health-check-api-server/src/server.rs | 3 +++ packages/axum-http-tracker-server/src/server.rs | 3 +++ packages/axum-rest-tracker-api-server/src/server.rs | 1 + 3 files changed, 7 insertions(+) diff --git a/packages/axum-health-check-api-server/src/server.rs b/packages/axum-health-check-api-server/src/server.rs index c261f6af8..a371f146e 100644 --- a/packages/axum-health-check-api-server/src/server.rs +++ b/packages/axum-health-check-api-server/src/server.rs @@ -101,6 +101,9 @@ pub fn start( .layer(SetRequestIdLayer::x_request_id(MakeRequestUuid)); let socket = std::net::TcpListener::bind(bind_to).expect("Could not bind tcp_listener to address."); + socket + .set_nonblocking(true) + .expect("Failed to set socket to non-blocking mode"); let address = socket.local_addr().expect("Could not get local_addr from tcp_listener."); let protocol = Protocol::HTTP; // The health check API only supports HTTP directly now. Use a reverse proxy for HTTPS. let service_binding = ServiceBinding::new(protocol.clone(), address).expect("Service binding creation failed"); diff --git a/packages/axum-http-tracker-server/src/server.rs b/packages/axum-http-tracker-server/src/server.rs index 4b7c15de8..69f9cb72e 100644 --- a/packages/axum-http-tracker-server/src/server.rs +++ b/packages/axum-http-tracker-server/src/server.rs @@ -52,6 +52,9 @@ impl Launcher { rx_halt: Receiver, ) -> BoxFuture<'static, ()> { let socket = std::net::TcpListener::bind(self.bind_to).expect("Could not bind tcp_listener to address."); + socket + .set_nonblocking(true) + .expect("Failed to set socket to non-blocking mode"); let address = socket.local_addr().expect("Could not get local_addr from tcp_listener."); let handle = Handle::new(); diff --git a/packages/axum-rest-tracker-api-server/src/server.rs b/packages/axum-rest-tracker-api-server/src/server.rs index a867ecfcf..32c1051e1 100644 --- a/packages/axum-rest-tracker-api-server/src/server.rs +++ b/packages/axum-rest-tracker-api-server/src/server.rs @@ -247,6 +247,7 @@ impl Launcher { rx_halt: Receiver, ) -> BoxFuture<'static, ()> { let socket = std::net::TcpListener::bind(self.bind_to).expect("Could not bind tcp_listener to address."); + socket.set_nonblocking(true).expect("Failed to set socket to non-blocking mode"); let address = socket.local_addr().expect("Could not get local_addr from tcp_listener."); let router = router(http_api_container, access_tokens, address); From eccab24403fb95be99f3be3bd05875d2e2ac1916 Mon Sep 17 00:00:00 2001 From: Jose Celano Date: Mon, 22 Dec 2025 09:07:47 +0000 Subject: [PATCH 775/802] style: apply cargo fmt formatting to axum-rest-tracker-api-server Format set_nonblocking call to use multi-line formatting per rustfmt conventions. --- packages/axum-rest-tracker-api-server/src/server.rs | 4 +++- 1 file changed, 3 insertions(+), 1 deletion(-) diff --git a/packages/axum-rest-tracker-api-server/src/server.rs b/packages/axum-rest-tracker-api-server/src/server.rs index 32c1051e1..05adeae8a 100644 --- a/packages/axum-rest-tracker-api-server/src/server.rs +++ b/packages/axum-rest-tracker-api-server/src/server.rs @@ -247,7 +247,9 @@ impl Launcher { rx_halt: Receiver, ) -> BoxFuture<'static, ()> { let socket = std::net::TcpListener::bind(self.bind_to).expect("Could not bind tcp_listener to address."); - socket.set_nonblocking(true).expect("Failed to set socket to non-blocking mode"); + socket + .set_nonblocking(true) + .expect("Failed to set socket to non-blocking mode"); let address = socket.local_addr().expect("Could not get local_addr from tcp_listener."); let router = router(http_api_container, access_tokens, address); From 38ed4cbc074c7322ba6b898bf424ba935bb419e3 Mon Sep 17 00:00:00 2001 From: Jose Celano Date: Mon, 22 Dec 2025 16:38:41 +0000 Subject: [PATCH 776/802] chore(deps): update dependencies ``` cargo update Updating crates.io index Locking 4 packages to latest compatible versions Updating derive_more v2.1.0 -> v2.1.1 Updating derive_more-impl v2.1.0 -> v2.1.1 Updating reqwest v0.12.26 -> v0.12.27 Updating serde_json v1.0.145 -> v1.0.146 note: pass `--verbose` to see 7 unchanged dependencies behind latest ``` --- Cargo.lock | 16 ++++++++-------- 1 file changed, 8 insertions(+), 8 deletions(-) diff --git a/Cargo.lock b/Cargo.lock index da0910f48..3bdf93e00 100644 --- a/Cargo.lock +++ b/Cargo.lock @@ -1542,18 +1542,18 @@ dependencies = [ [[package]] name = "derive_more" -version = "2.1.0" +version = "2.1.1" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "10b768e943bed7bf2cab53df09f4bc34bfd217cdb57d971e769874c9a6710618" +checksum = "d751e9e49156b02b44f9c1815bcb94b984cdcc4396ecc32521c739452808b134" dependencies = [ "derive_more-impl", ] [[package]] name = "derive_more-impl" -version = "2.1.0" +version = "2.1.1" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "6d286bfdaf75e988b4a78e013ecd79c581e06399ab53fbacd2d916c2f904f30b" +checksum = "799a97264921d8623a957f6c3b9011f3b5492f557bbb7a5a19b7fa6d06ba8dcb" dependencies = [ "convert_case", "proc-macro2", @@ -3819,9 +3819,9 @@ dependencies = [ [[package]] name = "reqwest" -version = "0.12.26" +version = "0.12.27" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "3b4c14b2d9afca6a60277086b0cc6a6ae0b568f6f7916c943a8cdc79f8be240f" +checksum = "8e893f6bece5953520ddbb3f8f46f3ef36dd1fef4ee9b087c4b4a725fd5d10e4" dependencies = [ "base64 0.22.1", "bytes", @@ -4278,9 +4278,9 @@ dependencies = [ [[package]] name = "serde_json" -version = "1.0.145" +version = "1.0.146" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "402a6f66d8c709116cf22f558eab210f5a50187f702eb4d7e5ef38d9a7f1c79c" +checksum = "217ca874ae0207aac254aa02c957ded05585a90892cc8d87f9e5fa49669dadd8" dependencies = [ "indexmap 2.12.1", "itoa", From 767bb5c2ec9e3042ad28d0d36c7a8e1071385889 Mon Sep 17 00:00:00 2001 From: Jose Celano Date: Tue, 23 Dec 2025 08:58:13 +0000 Subject: [PATCH 777/802] fix: [#1628] upgrade to Debian 13 (Trixie) to resolve security vulnerabilities - Update base images from Debian 12 (bookworm) to Debian 13 (trixie) - Update builder: rust:bookworm -> rust:trixie - Update tester: rust:slim-bookworm -> rust:slim-trixie - Update GCC: gcc:bookworm -> gcc:trixie - Update runtime: gcr.io/distroless/cc-debian12:debug -> gcr.io/distroless/cc-debian13:debug This resolves all 5 security vulnerabilities (1 CRITICAL, 4 HIGH): - CVE-2019-1010022 (CRITICAL): glibc stack guard protection bypass - CVE-2018-20796 (HIGH): glibc uncontrolled recursion - CVE-2019-1010023 (HIGH): glibc ldd malicious ELF code execution - CVE-2019-9192 (HIGH): glibc uncontrolled recursion - CVE-2023-0286 (HIGH): OpenSSL X.400 address type confusion Trivy scan results: - Before: Total 5 (CRITICAL: 1, HIGH: 4) - After: Total 0 (CRITICAL: 0, HIGH: 0) Container tested and verified working with health checks passing. --- Containerfile | 8 ++++---- 1 file changed, 4 insertions(+), 4 deletions(-) diff --git a/Containerfile b/Containerfile index 263053390..e926a5202 100644 --- a/Containerfile +++ b/Containerfile @@ -3,13 +3,13 @@ # Torrust Tracker ## Builder Image -FROM docker.io/library/rust:bookworm AS chef +FROM docker.io/library/rust:trixie AS chef WORKDIR /tmp RUN curl -L --proto '=https' --tlsv1.2 -sSf https://raw.githubusercontent.com/cargo-bins/cargo-binstall/main/install-from-binstall-release.sh | bash RUN cargo binstall --no-confirm cargo-chef cargo-nextest ## Tester Image -FROM docker.io/library/rust:slim-bookworm AS tester +FROM docker.io/library/rust:slim-trixie AS tester WORKDIR /tmp RUN apt-get update; apt-get install -y curl sqlite3; apt-get autoclean @@ -21,7 +21,7 @@ RUN mkdir -p /app/share/torrust/default/database/; \ sqlite3 /app/share/torrust/default/database/tracker.sqlite3.db "VACUUM;" ## Su Exe Compile -FROM docker.io/library/gcc:bookworm AS gcc +FROM docker.io/library/gcc:trixie AS gcc COPY ./contrib/dev-tools/su-exec/ /usr/local/src/su-exec/ RUN cc -Wall -Werror -g /usr/local/src/su-exec/su-exec.c -o /usr/local/bin/su-exec; chmod +x /usr/local/bin/su-exec @@ -91,7 +91,7 @@ RUN chown -R root:root /app; chmod -R u=rw,go=r,a+X /app; chmod -R a+x /app/bin ## Runtime -FROM gcr.io/distroless/cc-debian12:debug AS runtime +FROM gcr.io/distroless/cc-debian13:debug AS runtime RUN ["/busybox/cp", "-sp", "/busybox/sh","/busybox/cat","/busybox/ls","/busybox/env", "/bin/"] COPY --from=gcc --chmod=0555 /usr/local/bin/su-exec /bin/su-exec From 300be03c24aa769d55b8415d310c2e032cff59ce Mon Sep 17 00:00:00 2001 From: Jose Celano Date: Tue, 23 Dec 2025 09:50:17 +0000 Subject: [PATCH 778/802] chore(deps): update dependencies ``` cargo update Updating crates.io index Locking 2 packages to latest compatible versions Updating reqwest v0.12.27 -> v0.12.28 Updating rustix v1.1.2 -> v1.1.3 note: pass `--verbose` to see 7 unchanged dependencies behind latest ``` --- Cargo.lock | 8 ++++---- 1 file changed, 4 insertions(+), 4 deletions(-) diff --git a/Cargo.lock b/Cargo.lock index 3bdf93e00..d0478573b 100644 --- a/Cargo.lock +++ b/Cargo.lock @@ -3819,9 +3819,9 @@ dependencies = [ [[package]] name = "reqwest" -version = "0.12.27" +version = "0.12.28" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "8e893f6bece5953520ddbb3f8f46f3ef36dd1fef4ee9b087c4b4a725fd5d10e4" +checksum = "eddd3ca559203180a307f12d114c268abf583f59b03cb906fd0b3ff8646c1147" dependencies = [ "base64 0.22.1", "bytes", @@ -4023,9 +4023,9 @@ dependencies = [ [[package]] name = "rustix" -version = "1.1.2" +version = "1.1.3" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "cd15f8a2c5551a84d56efdc1cd049089e409ac19a3072d5037a17fd70719ff3e" +checksum = "146c9e247ccc180c1f61615433868c99f3de3ae256a30a43b49f67c2d9171f34" dependencies = [ "bitflags", "errno", From c9c027dfe96fbb4b5558f6519cb936d9ff9f5f1d Mon Sep 17 00:00:00 2001 From: Jose Celano Date: Tue, 23 Dec 2025 09:58:13 +0000 Subject: [PATCH 779/802] chore(deps): bump actions/upload-artifact from 5 to 6 Bumps actions/upload-artifact from 5 to 6. This update includes: - Node.js 24 runtime support - Requires Actions Runner version 2.327.1 or later - Fixes punycode deprecation warnings --- .github/workflows/generate_coverage_pr.yaml | 6 +++--- cSpell.json | 1 + 2 files changed, 4 insertions(+), 3 deletions(-) diff --git a/.github/workflows/generate_coverage_pr.yaml b/.github/workflows/generate_coverage_pr.yaml index 6942e276f..f762207cf 100644 --- a/.github/workflows/generate_coverage_pr.yaml +++ b/.github/workflows/generate_coverage_pr.yaml @@ -59,13 +59,13 @@ jobs: # Triggered sub-workflow is not able to detect the original commit/PR which is available # in this workflow. - name: Store PR number - uses: actions/upload-artifact@v5 + uses: actions/upload-artifact@v6 with: name: pr_number path: pr_number.txt - name: Store commit SHA - uses: actions/upload-artifact@v5 + uses: actions/upload-artifact@v6 with: name: commit_sha path: commit_sha.txt @@ -74,7 +74,7 @@ jobs: # is executed by a different workflow `upload_coverage.yml`. The reason for this # split is because `on.pull_request` workflows don't have access to secrets. - name: Store coverage report in artifacts - uses: actions/upload-artifact@v5 + uses: actions/upload-artifact@v6 with: name: codecov_report path: ./codecov.json diff --git a/cSpell.json b/cSpell.json index 76939c199..81421e050 100644 --- a/cSpell.json +++ b/cSpell.json @@ -32,6 +32,7 @@ "canonicalized", "certbot", "chrono", + "Cinstrument", "ciphertext", "clippy", "cloneable", From 8dde9c3e1b217ebd974670f5d309c590e6dba105 Mon Sep 17 00:00:00 2001 From: Jose Celano Date: Mon, 26 Jan 2026 12:41:33 +0000 Subject: [PATCH 780/802] chore(deps): update dependencies ``` cargo update Updating crates.io index Locking 109 packages to latest compatible versions Updating arc-swap v1.7.1 -> v1.8.0 Updating async-compression v0.4.36 -> v0.4.37 Adding aws-lc-rs v1.15.4 Adding aws-lc-sys v0.37.0 Updating axum-core v0.5.5 -> v0.5.6 Updating axum-extra v0.12.3 -> v0.12.5 Updating bigdecimal v0.4.9 -> v0.4.10 Updating cc v1.2.50 -> v1.2.54 Adding cesu8 v1.1.0 Updating chrono v0.4.42 -> v0.4.43 Updating clap v4.5.53 -> v4.5.54 Updating clap_builder v4.5.53 -> v4.5.54 Updating clap_lex v0.7.6 -> v0.7.7 Adding combine v4.6.7 Updating compression-codecs v0.4.35 -> v0.4.36 Adding dunce v1.0.5 Updating ferroid v0.8.8 -> v0.8.9 Updating filetime v0.2.26 -> v0.2.27 Updating find-msvc-tools v0.1.5 -> v0.1.8 Updating flate2 v1.1.5 -> v1.1.8 Adding foldhash v0.2.0 Updating fs-err v3.2.1 -> v3.2.2 Adding fs_extra v1.3.0 Updating getrandom v0.2.16 -> v0.2.17 Updating h2 v0.4.12 -> v0.4.13 Updating hashlink v0.10.0 -> v0.11.0 Removing hyper-tls v0.6.0 Updating indexmap v2.12.1 -> v2.13.0 Updating iri-string v0.7.9 -> v0.7.10 Updating itoa v1.0.16 -> v1.0.17 Adding jni v0.21.1 Adding jni-sys v0.3.0 Updating js-sys v0.3.83 -> v0.3.85 Updating libc v0.2.178 -> v0.2.180 Updating libm v0.2.15 -> v0.2.16 Updating libredox v0.1.11 -> v0.1.12 Updating libsqlite3-sys v0.35.0 -> v0.36.0 Updating local-ip-address v0.6.8 -> v0.6.9 Adding lru-slab v0.1.2 Updating num-conv v0.1.0 -> v0.2.0 Adding openssl-probe v0.2.1 Updating portable-atomic v1.12.0 -> v1.13.0 Updating proc-macro2 v1.0.103 -> v1.0.106 Updating prost v0.14.1 -> v0.14.3 Updating prost-derive v0.14.1 -> v0.14.3 Updating prost-types v0.14.1 -> v0.14.3 Adding quinn v0.11.9 Adding quinn-proto v0.11.13 Adding quinn-udp v0.5.14 Updating quote v1.0.42 -> v1.0.44 Updating r2d2_sqlite v0.31.0 -> v0.32.0 Updating rand_core v0.9.3 -> v0.9.5 Updating redox_syscall v0.6.0 -> v0.7.0 Updating reqwest v0.12.28 -> v0.13.1 Updating rkyv v0.7.45 -> v0.7.46 Updating rkyv_derive v0.7.45 -> v0.7.46 Adding rsqlite-vfs v0.1.0 Updating rusqlite v0.37.0 -> v0.38.0 Updating rust_decimal v1.39.0 -> v1.40.0 Updating rustc-demangle v0.1.26 -> v0.1.27 Updating rustls v0.23.35 -> v0.23.36 Updating rustls-native-certs v0.8.2 -> v0.8.3 Updating rustls-pki-types v1.13.2 -> v1.14.0 Adding rustls-platform-verifier v0.6.2 Adding rustls-platform-verifier-android v0.1.1 Updating rustls-webpki v0.103.8 -> v0.103.9 Updating ryu v1.0.21 -> v1.0.22 Updating schemars v1.1.0 -> v1.2.0 Updating serde_json v1.0.146 -> v1.0.149 Updating signal-hook-registry v1.4.7 -> v1.4.8 Updating socket2 v0.6.1 -> v0.6.2 Adding sqlite-wasm-rs v0.5.2 Updating subprocess v0.2.9 -> v0.2.13 Updating syn v2.0.111 -> v2.0.114 Updating tempfile v3.23.0 -> v3.24.0 Updating testcontainers v0.26.2 -> v0.26.3 Updating thiserror v2.0.17 -> v2.0.18 Updating thiserror-impl v2.0.17 -> v2.0.18 Updating time v0.3.44 -> v0.3.46 Updating time-core v0.1.6 -> v0.1.8 Updating time-macros v0.2.24 -> v0.2.26 Updating tokio v1.48.0 -> v1.49.0 Removing tokio-native-tls v0.3.1 Updating tokio-stream v0.1.17 -> v0.1.18 Updating tokio-util v0.7.17 -> v0.7.18 Updating toml v0.9.10+spec-1.1.0 -> v0.9.11+spec-1.1.0 Updating tower v0.5.2 -> v0.5.3 Updating url v2.5.7 -> v2.5.8 Updating uuid v1.19.0 -> v1.20.0 Updating wasip2 v1.0.1+wasi-0.2.4 -> v1.0.2+wasi-0.2.9 Updating wasm-bindgen v0.2.106 -> v0.2.108 Updating wasm-bindgen-futures v0.4.56 -> v0.4.58 Updating wasm-bindgen-macro v0.2.106 -> v0.2.108 Updating wasm-bindgen-macro-support v0.2.106 -> v0.2.108 Updating wasm-bindgen-shared v0.2.106 -> v0.2.108 Updating web-sys v0.3.83 -> v0.3.85 Adding webpki-root-certs v1.0.5 Updating webpki-roots v1.0.4 -> v1.0.5 Adding windows-sys v0.45.0 Adding windows-targets v0.42.2 Adding windows_aarch64_gnullvm v0.42.2 Adding windows_aarch64_msvc v0.42.2 Adding windows_i686_gnu v0.42.2 Adding windows_i686_msvc v0.42.2 Adding windows_x86_64_gnu v0.42.2 Adding windows_x86_64_gnullvm v0.42.2 Adding windows_x86_64_msvc v0.42.2 Updating wit-bindgen v0.46.0 -> v0.51.0 Updating zerocopy v0.8.31 -> v0.8.34 Updating zerocopy-derive v0.8.31 -> v0.8.34 Adding zmij v1.0.17 note: pass `--verbose` to see 7 unchanged dependencies behind latest ``` --- Cargo.lock | 850 +++++++++++++++++++++++++++++++++++------------------ 1 file changed, 557 insertions(+), 293 deletions(-) diff --git a/Cargo.lock b/Cargo.lock index d0478573b..146da3a18 100644 --- a/Cargo.lock +++ b/Cargo.lock @@ -23,7 +23,7 @@ version = "0.7.8" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "891477e0c6a8957309ee5c45a6368af3ae14bb510732d2684ffa19af310920f9" dependencies = [ - "getrandom 0.2.16", + "getrandom 0.2.17", "once_cell", "version_check", ] @@ -175,9 +175,12 @@ dependencies = [ [[package]] name = "arc-swap" -version = "1.7.1" +version = "1.8.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "69f7f8c3906b62b754cd5326047894316021dcfe5a194c8ea52bdd94934a3457" +checksum = "51d03449bb8ca2cc2ef70869af31463d1ae5ccc8fa3e334b307203fbf815207e" +dependencies = [ + "rustversion", +] [[package]] name = "arrayvec" @@ -236,13 +239,12 @@ dependencies = [ [[package]] name = "async-compression" -version = "0.4.36" +version = "0.4.37" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "98ec5f6c2f8bc326c994cb9e241cc257ddaba9afa8555a43cffbb5dd86efaa37" +checksum = "d10e4f991a553474232bc0a31799f6d24b034a84c0971d80d2e2f78b2e576e40" dependencies = [ "compression-codecs", "compression-core", - "futures-core", "pin-project-lite", "tokio", ] @@ -352,7 +354,7 @@ checksum = "c7c24de15d275a1ecfd47a380fb4d5ec9bfe0933f309ed5e705b775596a3574d" dependencies = [ "proc-macro2", "quote", - "syn 2.0.111", + "syn 2.0.114", ] [[package]] @@ -369,7 +371,7 @@ checksum = "9035ad2d096bed7955a320ee7e2230574d28fd3c3a0f186cbea1ff3c7eed5dbb" dependencies = [ "proc-macro2", "quote", - "syn 2.0.111", + "syn 2.0.114", ] [[package]] @@ -393,6 +395,28 @@ version = "1.5.0" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "c08606f8c3cbf4ce6ec8e28fb0014a2c086708fe954eaa885384a6165172e7e8" +[[package]] +name = "aws-lc-rs" +version = "1.15.4" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "7b7b6141e96a8c160799cc2d5adecd5cbbe5054cb8c7c4af53da0f83bb7ad256" +dependencies = [ + "aws-lc-sys", + "zeroize", +] + +[[package]] +name = "aws-lc-sys" +version = "0.37.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "5c34dda4df7017c8db52132f0f8a2e0f8161649d15723ed63fc00c82d0f2081a" +dependencies = [ + "cc", + "cmake", + "dunce", + "fs_extra", +] + [[package]] name = "axum" version = "0.8.8" @@ -440,9 +464,9 @@ dependencies = [ [[package]] name = "axum-core" -version = "0.5.5" +version = "0.5.6" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "59446ce19cd142f8833f856eb31f3eb097812d1479ab224f54d72428ca21ea22" +checksum = "08c78f31d7b1291f7ee735c1c6780ccde7785daae9a9206026862dab7d8792d1" dependencies = [ "bytes", "futures-core", @@ -459,9 +483,9 @@ dependencies = [ [[package]] name = "axum-extra" -version = "0.12.3" +version = "0.12.5" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "6dfbd6109d91702d55fc56df06aae7ed85c465a7a451db6c0e54a4b9ca5983d1" +checksum = "fef252edff26ddba56bbcdf2ee3307b8129acb86f5749b68990c168a6fcc9c76" dependencies = [ "axum", "axum-core", @@ -490,7 +514,7 @@ checksum = "604fde5e028fea851ce1d8570bbdc034bec850d157f7569d10f347d06808c05c" dependencies = [ "proc-macro2", "quote", - "syn 2.0.111", + "syn 2.0.114", ] [[package]] @@ -553,9 +577,9 @@ checksum = "72b3254f16251a8381aa12e40e3c4d2f0199f8c6508fbecb9d91f575e0fbb8c6" [[package]] name = "bigdecimal" -version = "0.4.9" +version = "0.4.10" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "560f42649de9fa436b73517378a147ec21f6c997a546581df4b4b31677828934" +checksum = "4d6867f1565b3aad85681f1015055b087fcfd840d6aeee6eee7f2da317603695" dependencies = [ "autocfg", "libm", @@ -585,7 +609,7 @@ dependencies = [ "regex", "rustc-hash", "shlex", - "syn 2.0.111", + "syn 2.0.114", ] [[package]] @@ -614,7 +638,7 @@ dependencies = [ "mockall", "serde", "serde_json", - "thiserror 2.0.17", + "thiserror 2.0.18", "tokio", "tokio-util", "torrust-tracker-clock", @@ -639,7 +663,7 @@ dependencies = [ "percent-encoding", "serde", "serde_bencode", - "thiserror 2.0.17", + "thiserror 2.0.18", "torrust-tracker-clock", "torrust-tracker-configuration", "torrust-tracker-contrib-bencode", @@ -675,7 +699,7 @@ dependencies = [ "serde_bencode", "serde_bytes", "serde_repr", - "thiserror 2.0.17", + "thiserror 2.0.18", "tokio", "torrust-tracker-configuration", "torrust-tracker-located-error", @@ -701,7 +725,7 @@ dependencies = [ "serde", "serde_json", "testcontainers", - "thiserror 2.0.17", + "thiserror 2.0.18", "tokio", "tokio-util", "torrust-rest-tracker-api-client", @@ -734,7 +758,7 @@ dependencies = [ "mockall", "rand 0.9.2", "serde", - "thiserror 2.0.17", + "thiserror 2.0.18", "tokio", "tokio-util", "torrust-tracker-clock", @@ -847,7 +871,7 @@ dependencies = [ "serde_json", "serde_repr", "serde_urlencoded", - "thiserror 2.0.17", + "thiserror 2.0.18", "tokio", "tokio-stream", "tokio-util", @@ -907,7 +931,7 @@ dependencies = [ "proc-macro-crate", "proc-macro2", "quote", - "syn 2.0.111", + "syn 2.0.114", ] [[package]] @@ -1018,9 +1042,9 @@ dependencies = [ [[package]] name = "cc" -version = "1.2.50" +version = "1.2.54" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "9f50d563227a1c37cc0a263f64eca3334388c01c5e4c4861a9def205c614383c" +checksum = "6354c81bbfd62d9cfa9cb3c773c2b7b2a3a482d569de977fd0e961f6e7c00583" dependencies = [ "find-msvc-tools", "jobserver", @@ -1028,6 +1052,12 @@ dependencies = [ "shlex", ] +[[package]] +name = "cesu8" +version = "1.1.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "6d43a04d8753f35258c91f8ec639f792891f748a1edbd759cf1dcea3382ad83c" + [[package]] name = "cexpr" version = "0.6.0" @@ -1051,9 +1081,9 @@ checksum = "613afe47fcd5fac7ccf1db93babcb082c5994d996f20b8b159f2ad1658eb5724" [[package]] name = "chrono" -version = "0.4.42" +version = "0.4.43" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "145052bdd345b87320e369255277e3fb5152762ad123a901ef5c262dd38fe8d2" +checksum = "fac4744fb15ae8337dc853fee7fb3f4e48c0fbaa23d0afe49c447b4fab126118" dependencies = [ "iana-time-zone", "num-traits", @@ -1111,9 +1141,9 @@ dependencies = [ [[package]] name = "clap" -version = "4.5.53" +version = "4.5.54" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "c9e340e012a1bf4935f5282ed1436d1489548e8f72308207ea5df0e23d2d03f8" +checksum = "c6e6ff9dcd79cff5cd969a17a545d79e84ab086e444102a591e288a8aa3ce394" dependencies = [ "clap_builder", "clap_derive", @@ -1121,9 +1151,9 @@ dependencies = [ [[package]] name = "clap_builder" -version = "4.5.53" +version = "4.5.54" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "d76b5d13eaa18c901fd2f7fca939fefe3a0727a953561fefdf3b2922b8569d00" +checksum = "fa42cf4d2b7a41bc8f663a7cab4031ebafa1bf3875705bfaf8466dc60ab52c00" dependencies = [ "anstream", "anstyle", @@ -1140,14 +1170,14 @@ dependencies = [ "heck", "proc-macro2", "quote", - "syn 2.0.111", + "syn 2.0.114", ] [[package]] name = "clap_lex" -version = "0.7.6" +version = "0.7.7" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "a1d728cc89cf3aee9ff92b05e62b19ee65a02b5702cff7d5a377e32c6ae29d8d" +checksum = "c3e64b0cc0439b12df2fa678eae89a1c56a529fd067a9115f7827f1fffd22b32" [[package]] name = "cmake" @@ -1164,6 +1194,16 @@ version = "1.0.4" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "b05b61dc5112cbb17e4b6cd61790d9845d13888356391624cbe7e41efeac1e75" +[[package]] +name = "combine" +version = "4.6.7" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "ba5a308b75df32fe02788e748662718f03fde005016435c444eea572398219fd" +dependencies = [ + "bytes", + "memchr", +] + [[package]] name = "compact_str" version = "0.7.1" @@ -1179,9 +1219,9 @@ dependencies = [ [[package]] name = "compression-codecs" -version = "0.4.35" +version = "0.4.36" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "b0f7ac3e5b97fdce45e8922fb05cae2c37f7bbd63d30dd94821dacfd8f3f2bf2" +checksum = "00828ba6fd27b45a448e57dbfe84f1029d4c9f26b368157e9a448a5f49a2ec2a" dependencies = [ "brotli", "compression-core", @@ -1446,7 +1486,7 @@ dependencies = [ "proc-macro2", "quote", "strsim", - "syn 2.0.111", + "syn 2.0.114", ] [[package]] @@ -1460,7 +1500,7 @@ dependencies = [ "proc-macro2", "quote", "strsim", - "syn 2.0.111", + "syn 2.0.114", ] [[package]] @@ -1471,7 +1511,7 @@ checksum = "fc34b93ccb385b40dc71c6fceac4b2ad23662c7eeb248cf10d529b7e055b6ead" dependencies = [ "darling_core 0.20.11", "quote", - "syn 2.0.111", + "syn 2.0.114", ] [[package]] @@ -1482,7 +1522,7 @@ checksum = "d38308df82d1080de0afee5d069fa14b0326a88c14f15c5ccda35b4a6c414c81" dependencies = [ "darling_core 0.21.3", "quote", - "syn 2.0.111", + "syn 2.0.114", ] [[package]] @@ -1527,7 +1567,7 @@ dependencies = [ "darling 0.20.11", "proc-macro2", "quote", - "syn 2.0.111", + "syn 2.0.114", ] [[package]] @@ -1537,7 +1577,7 @@ source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "ab63b0e2bf4d5928aff72e83a7dace85d7bba5fe12dcc3c5a572d78caffd3f3c" dependencies = [ "derive_builder_core", - "syn 2.0.111", + "syn 2.0.114", ] [[package]] @@ -1559,7 +1599,7 @@ dependencies = [ "proc-macro2", "quote", "rustc_version", - "syn 2.0.111", + "syn 2.0.114", "unicode-xid", ] @@ -1571,7 +1611,7 @@ checksum = "ccfae181bab5ab6c5478b2ccb69e4c68a02f8c3ec72f6616bfec9dbc599d2ee0" dependencies = [ "proc-macro2", "quote", - "syn 2.0.111", + "syn 2.0.114", ] [[package]] @@ -1598,7 +1638,7 @@ checksum = "97369cbbc041bc366949bc74d34658d6cda5621039731c6310521892a3a20ae0" dependencies = [ "proc-macro2", "quote", - "syn 2.0.111", + "syn 2.0.114", ] [[package]] @@ -1618,6 +1658,12 @@ version = "0.11.0" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "1435fa1053d8b2fbbe9be7e97eca7f33d37b28409959813daefc1446a14247f1" +[[package]] +name = "dunce" +version = "1.0.5" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "92773504d58c093f6de2459af4af33faa518c13451eb8f2b5698ed3d36e7c813" + [[package]] name = "dyn-clone" version = "1.0.20" @@ -1722,9 +1768,9 @@ checksum = "37909eebbb50d72f9059c3b6d82c0463f2ff062c9e95845c43a6c9c0355411be" [[package]] name = "ferroid" -version = "0.8.8" +version = "0.8.9" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "ce161062fb044bd629c2393590efd47cab8d0241faf15704ffb0d47b7b4e4a35" +checksum = "bb330bbd4cb7a5b9f559427f06f98a4f853a137c8298f3bd3f8ca57663e21986" dependencies = [ "portable-atomic", "rand 0.9.2", @@ -1749,27 +1795,26 @@ dependencies = [ [[package]] name = "filetime" -version = "0.2.26" +version = "0.2.27" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "bc0505cd1b6fa6580283f6bdf70a73fcf4aba1184038c90902b92b3dd0df63ed" +checksum = "f98844151eee8917efc50bd9e8318cb963ae8b297431495d3f758616ea5c57db" dependencies = [ "cfg-if", "libc", "libredox", - "windows-sys 0.60.2", ] [[package]] name = "find-msvc-tools" -version = "0.1.5" +version = "0.1.8" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "3a3076410a55c90011c298b04d0cfa770b00fa04e1e3c97d3f6c9de105a03844" +checksum = "8591b0bcc8a98a64310a2fae1bb3e9b8564dd10e381e6e28010fde8e8e8568db" [[package]] name = "flate2" -version = "1.1.5" +version = "1.1.8" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "bfe33edd8e85a12a67454e37f8c75e730830d83e313556ab9ebf9ee7fbeb3bfb" +checksum = "b375d6465b98090a5f25b1c7703f3859783755aa9a80433b36e0379a3ec2f369" dependencies = [ "crc32fast", "libz-sys", @@ -1788,6 +1833,12 @@ version = "0.1.5" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "d9c4f5dac5e15c24eb999c26181a6ca40b39fe946cbe4c263c7209467bc83af2" +[[package]] +name = "foldhash" +version = "0.2.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "77ce24cb58228fbb8aa041425bb1050850ac19177686ea6e0f41a70416f56fdb" + [[package]] name = "foreign-types" version = "0.3.2" @@ -1867,7 +1918,7 @@ checksum = "a0b4095fc99e1d858e5b8c7125d2638372ec85aa0fe6c807105cf10b0265ca6c" dependencies = [ "frunk_proc_macro_helpers", "quote", - "syn 2.0.111", + "syn 2.0.114", ] [[package]] @@ -1879,7 +1930,7 @@ dependencies = [ "frunk_core", "proc-macro2", "quote", - "syn 2.0.111", + "syn 2.0.114", ] [[package]] @@ -1891,19 +1942,25 @@ dependencies = [ "frunk_core", "frunk_proc_macro_helpers", "quote", - "syn 2.0.111", + "syn 2.0.114", ] [[package]] name = "fs-err" -version = "3.2.1" +version = "3.2.2" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "824f08d01d0f496b3eca4f001a13cf17690a6ee930043d20817f547455fd98f8" +checksum = "baf68cef89750956493a66a10f512b9e58d9db21f2a573c079c0bdf1207a54a7" dependencies = [ "autocfg", "tokio", ] +[[package]] +name = "fs_extra" +version = "1.3.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "42703706b716c37f96a77aea830392ad231f44c9e9a67872fa5548707e11b11c" + [[package]] name = "funty" version = "2.0.0" @@ -1979,7 +2036,7 @@ checksum = "162ee34ebcb7c64a8abebc059ce0fee27c2262618d7b60ed8faf72fef13c3650" dependencies = [ "proc-macro2", "quote", - "syn 2.0.111", + "syn 2.0.114", ] [[package]] @@ -2030,13 +2087,15 @@ dependencies = [ [[package]] name = "getrandom" -version = "0.2.16" +version = "0.2.17" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "335ff9f135e4384c8150d6f27c6daed433577f86b4750418338c01a1a2528592" +checksum = "ff2abc00be7fca6ebc474524697ae276ad847ad0a6b3faa4bcb027e9a4614ad0" dependencies = [ "cfg-if", + "js-sys", "libc", "wasi", + "wasm-bindgen", ] [[package]] @@ -2046,9 +2105,11 @@ source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "899def5c37c4fd7b2664648c28120ecec138e4d395b459e5ca34f9cce2dd77fd" dependencies = [ "cfg-if", + "js-sys", "libc", "r-efi", "wasip2", + "wasm-bindgen", ] [[package]] @@ -2060,7 +2121,7 @@ dependencies = [ "proc-macro-error2", "proc-macro2", "quote", - "syn 2.0.111", + "syn 2.0.114", ] [[package]] @@ -2089,9 +2150,9 @@ dependencies = [ [[package]] name = "h2" -version = "0.4.12" +version = "0.4.13" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "f3c0b69cfcb4e1b9f1bf2f53f95f766e4661169728ec61cd3fe5a0166f2d1386" +checksum = "2f44da3a8150a6703ed5d34e164b875fd14c2cdab9af1252a9a1020bde2bdc54" dependencies = [ "atomic-waker", "bytes", @@ -2099,7 +2160,7 @@ dependencies = [ "futures-core", "futures-sink", "http", - "indexmap 2.12.1", + "indexmap 2.13.0", "slab", "tokio", "tokio-util", @@ -2114,7 +2175,7 @@ checksum = "6ea2d84b969582b4b1864a92dc5d27cd2b77b622a8d79306834f1be5ba20d84b" dependencies = [ "cfg-if", "crunchy", - "zerocopy 0.8.31", + "zerocopy 0.8.34", ] [[package]] @@ -2140,7 +2201,7 @@ checksum = "9229cfe53dfd69f0609a49f65461bd93001ea1ef889cd5529dd176593f5338a1" dependencies = [ "allocator-api2", "equivalent", - "foldhash", + "foldhash 0.1.5", ] [[package]] @@ -2148,14 +2209,17 @@ name = "hashbrown" version = "0.16.1" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "841d1cc9bed7f9236f321df977030373f4a4163ae1a7dbfe1a51a2c1a51d9100" +dependencies = [ + "foldhash 0.2.0", +] [[package]] name = "hashlink" -version = "0.10.0" +version = "0.11.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "7382cf6263419f2d8df38c55d7da83da5c18aef87fc7a7fc1fb1e344edfe14c1" +checksum = "ea0b22561a9c04a7cb1a302c013e0259cd3b4bb619f145b32f72b8b4bcbed230" dependencies = [ - "hashbrown 0.15.5", + "hashbrown 0.16.1", ] [[package]] @@ -2303,22 +2367,6 @@ dependencies = [ "tower-service", ] -[[package]] -name = "hyper-tls" -version = "0.6.0" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "70206fc6890eaca9fde8a0bf71caa2ddfc9fe045ac9e5c70df101a7dbde866e0" -dependencies = [ - "bytes", - "http-body-util", - "hyper", - "hyper-util", - "native-tls", - "tokio", - "tokio-native-tls", - "tower-service", -] - [[package]] name = "hyper-util" version = "0.1.19" @@ -2337,7 +2385,7 @@ dependencies = [ "libc", "percent-encoding", "pin-project-lite", - "socket2 0.6.1", + "socket2 0.6.2", "system-configuration", "tokio", "tower-service", @@ -2505,9 +2553,9 @@ dependencies = [ [[package]] name = "indexmap" -version = "2.12.1" +version = "2.13.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "0ad4bb2b565bca0645f4d68c5c9af97fba094e9791da685bf83cb5f3ce74acf2" +checksum = "7714e70437a7dc3ac8eb7e6f8df75fd8eb422675fc7678aff7364301092b1017" dependencies = [ "equivalent", "hashbrown 0.16.1", @@ -2547,9 +2595,9 @@ checksum = "469fb0b9cefa57e3ef31275ee7cacb78f2fdca44e4765491884a2b119d4eb130" [[package]] name = "iri-string" -version = "0.7.9" +version = "0.7.10" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "4f867b9d1d896b67beb18518eda36fdb77a32ea590de864f1325b294a6d14397" +checksum = "c91338f0783edbd6195decb37bae672fd3b165faffb89bf7b9e6942f8b1a731a" dependencies = [ "memchr", "serde", @@ -2607,9 +2655,31 @@ dependencies = [ [[package]] name = "itoa" -version = "1.0.16" +version = "1.0.17" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "92ecc6618181def0457392ccd0ee51198e065e016d1d527a7ac1b6dc7c1f09d2" + +[[package]] +name = "jni" +version = "0.21.1" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "1a87aa2bb7d2af34197c04845522473242e1aa17c12f4935d5856491a7fb8c97" +dependencies = [ + "cesu8", + "cfg-if", + "combine", + "jni-sys", + "log", + "thiserror 1.0.69", + "walkdir", + "windows-sys 0.45.0", +] + +[[package]] +name = "jni-sys" +version = "0.3.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "7ee5b5339afb4c41626dde77b7a611bd4f2c202b897852b4bcf5d03eddc61010" +checksum = "8eaf4bc02d17cbdd7ff4c7438cafcdf7fb9a4613313ad11b4f8fefe7d3fa0130" [[package]] name = "jobserver" @@ -2623,9 +2693,9 @@ dependencies = [ [[package]] name = "js-sys" -version = "0.3.83" +version = "0.3.85" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "464a3709c7f55f1f721e5389aa6ea4e3bc6aba669353300af094b29ffbdde1d8" +checksum = "8c942ebf8e95485ca0d52d97da7c5a2c387d0e7f0ba4c35e93bfcaee045955b3" dependencies = [ "once_cell", "wasm-bindgen", @@ -2648,9 +2718,9 @@ checksum = "bbd2bcb4c963f2ddae06a2efc7e9f3591312473c50c6685e1f298068316e66fe" [[package]] name = "libc" -version = "0.2.178" +version = "0.2.180" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "37c93d8daa9d8a012fd8ab92f088405fb202ea0b6ab73ee2482ae66af4f42091" +checksum = "bcc35a38544a891a5f7c865aca548a982ccb3b8650a5b06d0fd33a10283c56fc" [[package]] name = "libloading" @@ -2664,26 +2734,26 @@ dependencies = [ [[package]] name = "libm" -version = "0.2.15" +version = "0.2.16" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "f9fbbcab51052fe104eb5e5d351cf728d30a5be1fe14d9be8a3b097481fb97de" +checksum = "b6d2cec3eae94f9f509c767b45932f1ada8350c4bdb85af2fcab4a3c14807981" [[package]] name = "libredox" -version = "0.1.11" +version = "0.1.12" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "df15f6eac291ed1cf25865b1ee60399f57e7c227e7f51bdbd4c5270396a9ed50" +checksum = "3d0b95e02c851351f877147b7deea7b1afb1df71b63aa5f8270716e0c5720616" dependencies = [ "bitflags", "libc", - "redox_syscall 0.6.0", + "redox_syscall 0.7.0", ] [[package]] name = "libsqlite3-sys" -version = "0.35.0" +version = "0.36.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "133c182a6a2c87864fe97778797e46c7e999672690dc9fa3ee8e241aa4a9c13f" +checksum = "95b4103cffefa72eb8428cb6b47d6627161e51c2739fc5e3b734584157bc642a" dependencies = [ "cc", "pkg-config", @@ -2715,13 +2785,12 @@ checksum = "6373607a59f0be73a39b6fe456b8192fcc3585f602af20751600e974dd455e77" [[package]] name = "local-ip-address" -version = "0.6.8" +version = "0.6.9" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "0a60bf300a990b2d1ebdde4228e873e8e4da40d834adbf5265f3da1457ede652" +checksum = "92488bc8a0f99ee9f23577bdd06526d49657df8bd70504c61f812337cdad01ab" dependencies = [ "libc", "neli", - "thiserror 2.0.17", "windows-sys 0.61.2", ] @@ -2752,6 +2821,12 @@ dependencies = [ "hashbrown 0.15.5", ] +[[package]] +name = "lru-slab" +version = "0.1.2" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "112b39cec0b298b6c1999fee3e31427f74f676e4cb9879ed1a121b43661a4154" + [[package]] name = "matchit" version = "0.8.4" @@ -2791,7 +2866,7 @@ checksum = "db5b29714e950dbb20d5e6f74f9dcec4edbcc1067bb7f8ed198c097b8c1a818b" dependencies = [ "proc-macro2", "quote", - "syn 2.0.111", + "syn 2.0.114", ] [[package]] @@ -2850,7 +2925,7 @@ dependencies = [ "cfg-if", "proc-macro2", "quote", - "syn 2.0.111", + "syn 2.0.114", ] [[package]] @@ -2900,7 +2975,7 @@ dependencies = [ "proc-macro-error2", "proc-macro2", "quote", - "syn 2.0.111", + "syn 2.0.114", "termcolor", "thiserror 1.0.69", ] @@ -2962,7 +3037,7 @@ dependencies = [ "libc", "log", "openssl", - "openssl-probe", + "openssl-probe 0.1.6", "openssl-sys", "schannel", "security-framework 2.11.1", @@ -2996,7 +3071,7 @@ dependencies = [ "proc-macro2", "quote", "serde", - "syn 2.0.111", + "syn 2.0.114", ] [[package]] @@ -3059,9 +3134,9 @@ dependencies = [ [[package]] name = "num-conv" -version = "0.1.0" +version = "0.2.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "51d515d32fb182ee37cda2ccdcb92950d6a3c2893aa280e540671c2cd0f3b1d9" +checksum = "cf97ec579c3c42f953ef76dbf8d55ac91fb219dde70e49aa4a6b7d74e9919050" [[package]] name = "num-integer" @@ -3153,7 +3228,7 @@ checksum = "a948666b637a0f465e8564c73e89d4dde00d72d4d473cc972f390fc3dcee7d9c" dependencies = [ "proc-macro2", "quote", - "syn 2.0.111", + "syn 2.0.114", ] [[package]] @@ -3162,6 +3237,12 @@ version = "0.1.6" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "d05e27ee213611ffe7d6348b942e8f942b37114c00cc03cec254295a4a17852e" +[[package]] +name = "openssl-probe" +version = "0.2.1" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "7c87def4c32ab89d880effc9e097653c8da5d6ef28e6b539d313baaacfbafcbe" + [[package]] name = "openssl-sys" version = "0.9.111" @@ -3241,7 +3322,7 @@ dependencies = [ "regex", "regex-syntax", "structmeta", - "syn 2.0.111", + "syn 2.0.114", ] [[package]] @@ -3264,7 +3345,7 @@ dependencies = [ "proc-macro2", "proc-macro2-diagnostics", "quote", - "syn 2.0.111", + "syn 2.0.114", ] [[package]] @@ -3338,7 +3419,7 @@ checksum = "6e918e4ff8c4549eb882f14b3a4bc8c8bc93de829416eacf579f1207a8fbf861" dependencies = [ "proc-macro2", "quote", - "syn 2.0.111", + "syn 2.0.114", ] [[package]] @@ -3414,9 +3495,9 @@ dependencies = [ [[package]] name = "portable-atomic" -version = "1.12.0" +version = "1.13.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "f59e70c4aef1e55797c2e8fd94a4f2a973fc972cfde0e0b05f683667b0cd39dd" +checksum = "f89776e4d69bb58bc6993e99ffa1d11f228b839984854c7daeb5d37f87cbe950" [[package]] name = "portable-atomic-util" @@ -3448,7 +3529,7 @@ version = "0.2.21" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "85eae3c4ed2f50dcfe72643da4befc30deadb458a9b590d720cde2f2b1e97da9" dependencies = [ - "zerocopy 0.8.31", + "zerocopy 0.8.34", ] [[package]] @@ -3515,14 +3596,14 @@ dependencies = [ "proc-macro-error-attr2", "proc-macro2", "quote", - "syn 2.0.111", + "syn 2.0.114", ] [[package]] name = "proc-macro2" -version = "1.0.103" +version = "1.0.106" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "5ee95bc4ef87b8d5ba32e8b7714ccc834865276eab0aed5c9958d00ec45f49e8" +checksum = "8fd00f0bb2e90d81d1044c2b32617f68fcb9fa3bb7640c23e9c748e53fb30934" dependencies = [ "unicode-ident", ] @@ -3535,16 +3616,16 @@ checksum = "af066a9c399a26e020ada66a034357a868728e72cd426f3adcd35f80d88d88c8" dependencies = [ "proc-macro2", "quote", - "syn 2.0.111", + "syn 2.0.114", "version_check", "yansi", ] [[package]] name = "prost" -version = "0.14.1" +version = "0.14.3" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "7231bd9b3d3d33c86b58adbac74b5ec0ad9f496b19d22801d773636feaa95f3d" +checksum = "d2ea70524a2f82d518bce41317d0fae74151505651af45faf1ffbd6fd33f0568" dependencies = [ "bytes", "prost-derive", @@ -3552,22 +3633,22 @@ dependencies = [ [[package]] name = "prost-derive" -version = "0.14.1" +version = "0.14.3" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "9120690fafc389a67ba3803df527d0ec9cbbc9cc45e4cc20b332996dfb672425" +checksum = "27c6023962132f4b30eb4c172c91ce92d933da334c59c23cddee82358ddafb0b" dependencies = [ "anyhow", "itertools 0.14.0", "proc-macro2", "quote", - "syn 2.0.111", + "syn 2.0.114", ] [[package]] name = "prost-types" -version = "0.14.1" +version = "0.14.3" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "b9b4db3d6da204ed77bb26ba83b6122a73aeb2e87e25fbf7ad2e84c4ccbf8f72" +checksum = "8991c4cbdb8bc5b11f0b074ffe286c30e523de90fee5ba8132f1399f23cb3dd7" dependencies = [ "prost", ] @@ -3603,11 +3684,67 @@ dependencies = [ "rand 0.8.5", ] +[[package]] +name = "quinn" +version = "0.11.9" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "b9e20a958963c291dc322d98411f541009df2ced7b5a4f2bd52337638cfccf20" +dependencies = [ + "bytes", + "cfg_aliases", + "pin-project-lite", + "quinn-proto", + "quinn-udp", + "rustc-hash", + "rustls", + "socket2 0.6.2", + "thiserror 2.0.18", + "tokio", + "tracing", + "web-time", +] + +[[package]] +name = "quinn-proto" +version = "0.11.13" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "f1906b49b0c3bc04b5fe5d86a77925ae6524a19b816ae38ce1e426255f1d8a31" +dependencies = [ + "aws-lc-rs", + "bytes", + "getrandom 0.3.4", + "lru-slab", + "rand 0.9.2", + "ring", + "rustc-hash", + "rustls", + "rustls-pki-types", + "slab", + "thiserror 2.0.18", + "tinyvec", + "tracing", + "web-time", +] + +[[package]] +name = "quinn-udp" +version = "0.5.14" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "addec6a0dcad8a8d96a771f815f0eaf55f9d1805756410b39f5fa81332574cbd" +dependencies = [ + "cfg_aliases", + "libc", + "once_cell", + "socket2 0.6.2", + "tracing", + "windows-sys 0.60.2", +] + [[package]] name = "quote" -version = "1.0.42" +version = "1.0.44" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "a338cc41d27e6cc6dce6cefc13a0729dfbb81c262b1f519331575dd80ef3067f" +checksum = "21b2ebcf727b7760c461f091f9f0f539b77b8e87f2fd88131e7f1b433b3cece4" dependencies = [ "proc-macro2", ] @@ -3641,9 +3778,9 @@ dependencies = [ [[package]] name = "r2d2_sqlite" -version = "0.31.0" +version = "0.32.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "63417e83dc891797eea3ad379f52a5986da4bca0d6ef28baf4d14034dd111b0c" +checksum = "a2ebd03c29250cdf191da93a35118b4567c2ef0eacab54f65e058d6f4c9965f6" dependencies = [ "r2d2", "rusqlite", @@ -3674,7 +3811,7 @@ source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "6db2770f06117d490610c7488547d543617b21bfa07796d7a12f6f1bd53850d1" dependencies = [ "rand_chacha 0.9.0", - "rand_core 0.9.3", + "rand_core 0.9.5", ] [[package]] @@ -3694,7 +3831,7 @@ source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "d3022b5f1df60f26e1ffddd6c66e8aa15de382ae63b3a0c1bfc0e4d3e3f325cb" dependencies = [ "ppv-lite86", - "rand_core 0.9.3", + "rand_core 0.9.5", ] [[package]] @@ -3703,14 +3840,14 @@ version = "0.6.4" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "ec0be4795e2f6a28069bec0b5ff3e2ac9bafc99e6a9a7dc3547996c5c816922c" dependencies = [ - "getrandom 0.2.16", + "getrandom 0.2.17", ] [[package]] name = "rand_core" -version = "0.9.3" +version = "0.9.5" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "99d9a13982dcf210057a8a78572b2217b667c3beacbf3a0d8b454f6f82837d38" +checksum = "76afc826de14238e6e8c374ddcc1fa19e374fd8dd986b0d2af0d02377261d83c" dependencies = [ "getrandom 0.3.4", ] @@ -3746,9 +3883,9 @@ dependencies = [ [[package]] name = "redox_syscall" -version = "0.6.0" +version = "0.7.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "ec96166dafa0886eb81fe1c0a388bece180fbef2135f97c1e2cf8302e74b43b5" +checksum = "49f3fe0889e69e2ae9e41f4d6c4c0181701d00e4697b356fb1f74173a5e0ee27" dependencies = [ "bitflags", ] @@ -3770,7 +3907,7 @@ checksum = "b7186006dcb21920990093f30e3dea63b7d6e977bf1256be20c3563a5db070da" dependencies = [ "proc-macro2", "quote", - "syn 2.0.111", + "syn 2.0.114", ] [[package]] @@ -3819,9 +3956,9 @@ dependencies = [ [[package]] name = "reqwest" -version = "0.12.28" +version = "0.13.1" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "eddd3ca559203180a307f12d114c268abf583f59b03cb906fd0b3ff8646c1147" +checksum = "04e9018c9d814e5f30cc16a0f03271aeab3571e609612d9fe78c1aa8d11c2f62" dependencies = [ "base64 0.22.1", "bytes", @@ -3833,21 +3970,21 @@ dependencies = [ "http-body-util", "hyper", "hyper-rustls", - "hyper-tls", "hyper-util", "js-sys", "log", "mime", - "native-tls", "percent-encoding", "pin-project-lite", + "quinn", + "rustls", "rustls-pki-types", + "rustls-platform-verifier", "serde", "serde_json", - "serde_urlencoded", "sync_wrapper", "tokio", - "tokio-native-tls", + "tokio-rustls", "tower", "tower-http", "tower-service", @@ -3865,7 +4002,7 @@ checksum = "a4689e6c2294d81e88dc6261c768b63bc4fcdb852be6d1352498b114f61383b7" dependencies = [ "cc", "cfg-if", - "getrandom 0.2.16", + "getrandom 0.2.17", "libc", "untrusted", "windows-sys 0.52.0", @@ -3884,9 +4021,9 @@ dependencies = [ [[package]] name = "rkyv" -version = "0.7.45" +version = "0.7.46" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "9008cd6385b9e161d8229e1f6549dd23c3d022f132a2ea37ac3a10ac4935779b" +checksum = "2297bf9c81a3f0dc96bc9521370b88f054168c29826a75e89c55ff196e7ed6a1" dependencies = [ "bitvec", "bytecheck", @@ -3902,15 +4039,25 @@ dependencies = [ [[package]] name = "rkyv_derive" -version = "0.7.45" +version = "0.7.46" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "503d1d27590a2b0a3a4ca4c94755aa2875657196ecbf401a42eff41d7de532c0" +checksum = "84d7b42d4b8d06048d3ac8db0eb31bcb942cbeb709f0b5f2b2ebde398d3038f5" dependencies = [ "proc-macro2", "quote", "syn 1.0.109", ] +[[package]] +name = "rsqlite-vfs" +version = "0.1.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "a8a1f2315036ef6b1fbacd1972e8ee7688030b0a2121edfc2a6550febd41574d" +dependencies = [ + "hashbrown 0.16.1", + "thiserror 2.0.18", +] + [[package]] name = "rstest" version = "0.25.0" @@ -3948,7 +4095,7 @@ dependencies = [ "regex", "relative-path", "rustc_version", - "syn 2.0.111", + "syn 2.0.114", "unicode-ident", ] @@ -3966,15 +4113,15 @@ dependencies = [ "regex", "relative-path", "rustc_version", - "syn 2.0.111", + "syn 2.0.114", "unicode-ident", ] [[package]] name = "rusqlite" -version = "0.37.0" +version = "0.38.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "165ca6e57b20e1351573e3729b958bc62f0e48025386970b6e4d29e7a7e71f3f" +checksum = "f1c93dd1c9683b438c392c492109cb702b8090b2bfc8fed6f6e4eb4523f17af3" dependencies = [ "bitflags", "fallible-iterator", @@ -3982,13 +4129,14 @@ dependencies = [ "hashlink", "libsqlite3-sys", "smallvec", + "sqlite-wasm-rs", ] [[package]] name = "rust_decimal" -version = "1.39.0" +version = "1.40.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "35affe401787a9bd846712274d97654355d21b2a2c092a3139aabe31e9022282" +checksum = "61f703d19852dbf87cbc513643fa81428361eb6940f1ac14fd58155d295a3eb0" dependencies = [ "arrayvec", "borsh", @@ -4002,9 +4150,9 @@ dependencies = [ [[package]] name = "rustc-demangle" -version = "0.1.26" +version = "0.1.27" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "56f7d92ca342cea22a06f2121d944b4fd82af56988c270852495420f961d4ace" +checksum = "b50b8869d9fc858ce7266cce0194bd74df58b9d0e3f6df3a9fc8eb470d95c09d" [[package]] name = "rustc-hash" @@ -4036,10 +4184,11 @@ dependencies = [ [[package]] name = "rustls" -version = "0.23.35" +version = "0.23.36" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "533f54bc6a7d4f647e46ad909549eda97bf5afc1585190ef692b4286b198bd8f" +checksum = "c665f33d38cea657d9614f766881e4d510e0eda4239891eea56b4cadcf01801b" dependencies = [ + "aws-lc-rs", "log", "once_cell", "ring", @@ -4051,11 +4200,11 @@ dependencies = [ [[package]] name = "rustls-native-certs" -version = "0.8.2" +version = "0.8.3" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "9980d917ebb0c0536119ba501e90834767bffc3d60641457fd84a1f3fd337923" +checksum = "612460d5f7bea540c490b2b6395d8e34a953e52b491accd6c86c8164c5932a63" dependencies = [ - "openssl-probe", + "openssl-probe 0.2.1", "rustls-pki-types", "schannel", "security-framework 3.5.1", @@ -4072,19 +4221,48 @@ dependencies = [ [[package]] name = "rustls-pki-types" -version = "1.13.2" +version = "1.14.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "21e6f2ab2928ca4291b86736a8bd920a277a399bba1589409d72154ff87c1282" +checksum = "be040f8b0a225e40375822a563fa9524378b9d63112f53e19ffff34df5d33fdd" dependencies = [ + "web-time", "zeroize", ] +[[package]] +name = "rustls-platform-verifier" +version = "0.6.2" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "1d99feebc72bae7ab76ba994bb5e121b8d83d910ca40b36e0921f53becc41784" +dependencies = [ + "core-foundation 0.10.1", + "core-foundation-sys", + "jni", + "log", + "once_cell", + "rustls", + "rustls-native-certs", + "rustls-platform-verifier-android", + "rustls-webpki", + "security-framework 3.5.1", + "security-framework-sys", + "webpki-root-certs", + "windows-sys 0.61.2", +] + +[[package]] +name = "rustls-platform-verifier-android" +version = "0.1.1" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "f87165f0995f63a9fbeea62b64d10b4d9d8e78ec6d7d51fb2125fda7bb36788f" + [[package]] name = "rustls-webpki" -version = "0.103.8" +version = "0.103.9" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "2ffdfa2f5286e2247234e03f680868ac2815974dc39e00ea15adc445d0aafe52" +checksum = "d7df23109aa6c1567d1c575b9952556388da57401e4ace1d15f79eedad0d8f53" dependencies = [ + "aws-lc-rs", "ring", "rustls-pki-types", "untrusted", @@ -4098,9 +4276,9 @@ checksum = "b39cdef0fa800fc44525c84ccb54a029961a8215f9619753635a9c0d2538d46d" [[package]] name = "ryu" -version = "1.0.21" +version = "1.0.22" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "62049b2877bf12821e8f9ad256ee38fdc31db7387ec2d3b3f403024de2034aea" +checksum = "a50f4cf475b65d88e057964e0e9bb1f0aa9bbb2036dc65c64596b42932536984" [[package]] name = "same-file" @@ -4149,9 +4327,9 @@ dependencies = [ [[package]] name = "schemars" -version = "1.1.0" +version = "1.2.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "9558e172d4e8533736ba97870c4b2cd63f84b382a3d6eb063da41b91cce17289" +checksum = "54e910108742c57a770f492731f99be216a52fadd361b06c8fb59d74ccc267d2" dependencies = [ "dyn-clone", "ref-cast", @@ -4260,7 +4438,7 @@ checksum = "d540f220d3187173da220f885ab66608367b6574e925011a9353e4badda91d79" dependencies = [ "proc-macro2", "quote", - "syn 2.0.111", + "syn 2.0.114", ] [[package]] @@ -4270,7 +4448,7 @@ source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "b2f2d7ff8a2140333718bb329f5c40fc5f0865b84c426183ce14c97d2ab8154f" dependencies = [ "form_urlencoded", - "indexmap 2.12.1", + "indexmap 2.13.0", "itoa", "ryu", "serde_core", @@ -4278,16 +4456,16 @@ dependencies = [ [[package]] name = "serde_json" -version = "1.0.146" +version = "1.0.149" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "217ca874ae0207aac254aa02c957ded05585a90892cc8d87f9e5fa49669dadd8" +checksum = "83fc039473c5595ace860d8c4fafa220ff474b3fc6bfdb4293327f1a37e94d86" dependencies = [ - "indexmap 2.12.1", + "indexmap 2.13.0", "itoa", "memchr", - "ryu", "serde", "serde_core", + "zmij", ] [[package]] @@ -4309,7 +4487,7 @@ checksum = "175ee3e80ae9982737ca543e96133087cbd9a485eecc3bc4de9c1a37b47ea59c" dependencies = [ "proc-macro2", "quote", - "syn 2.0.111", + "syn 2.0.114", ] [[package]] @@ -4352,9 +4530,9 @@ dependencies = [ "chrono", "hex", "indexmap 1.9.3", - "indexmap 2.12.1", + "indexmap 2.13.0", "schemars 0.9.0", - "schemars 1.1.0", + "schemars 1.2.0", "serde_core", "serde_json", "serde_with_macros", @@ -4370,7 +4548,7 @@ dependencies = [ "darling 0.21.3", "proc-macro2", "quote", - "syn 2.0.111", + "syn 2.0.114", ] [[package]] @@ -4412,10 +4590,11 @@ checksum = "0fda2ff0d084019ba4d7c6f371c95d8fd75ce3524c3cb8fb653a3023f6323e64" [[package]] name = "signal-hook-registry" -version = "1.4.7" +version = "1.4.8" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "7664a098b8e616bdfcc2dc0e9ac44eb231eedf41db4e9fe95d8d32ec728dedad" +checksum = "c4db69cba1110affc0e9f7bcd48bbf87b3f4fc7c61fc9155afd4c469eb3d6c1b" dependencies = [ + "errno", "libc", ] @@ -4461,14 +4640,26 @@ dependencies = [ [[package]] name = "socket2" -version = "0.6.1" +version = "0.6.2" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "17129e116933cf371d018bb80ae557e889637989d8638274fb25622827b03881" +checksum = "86f4aa3ad99f2088c990dfa82d367e19cb29268ed67c574d10d0a4bfe71f07e0" dependencies = [ "libc", "windows-sys 0.60.2", ] +[[package]] +name = "sqlite-wasm-rs" +version = "0.5.2" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "2f4206ed3a67690b9c29b77d728f6acc3ce78f16bf846d83c94f76400320181b" +dependencies = [ + "cc", + "js-sys", + "rsqlite-vfs", + "wasm-bindgen", +] + [[package]] name = "stable_deref_trait" version = "1.2.1" @@ -4496,7 +4687,7 @@ dependencies = [ "proc-macro2", "quote", "structmeta-derive", - "syn 2.0.111", + "syn 2.0.114", ] [[package]] @@ -4507,14 +4698,14 @@ checksum = "152a0b65a590ff6c3da95cabe2353ee04e6167c896b28e3b14478c2636c922fc" dependencies = [ "proc-macro2", "quote", - "syn 2.0.111", + "syn 2.0.114", ] [[package]] name = "subprocess" -version = "0.2.9" +version = "0.2.13" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "0c2e86926081dda636c546d8c5e641661049d7562a68f5488be4a1f7f66f6086" +checksum = "f75238edb5be30a9ea3035b945eb9c319dde80e879411cdc9a8978e1ac822960" dependencies = [ "libc", "winapi", @@ -4560,9 +4751,9 @@ dependencies = [ [[package]] name = "syn" -version = "2.0.111" +version = "2.0.114" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "390cc9a294ab71bdb1aa2e99d13be9c753cd2d7bd6560c77118597410c4d2e87" +checksum = "d4d107df263a3013ef9b1879b0df87d706ff80f65a86ea879bd9c31f9b307c2a" dependencies = [ "proc-macro2", "quote", @@ -4586,7 +4777,7 @@ checksum = "728a70f3dbaf5bab7f0c4b1ac8d7ae5ea60a4b5549c8a5914361c99147a709d2" dependencies = [ "proc-macro2", "quote", - "syn 2.0.111", + "syn 2.0.114", ] [[package]] @@ -4635,9 +4826,9 @@ dependencies = [ [[package]] name = "tempfile" -version = "3.23.0" +version = "3.24.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "2d31c77bdf42a745371d260a26ca7163f1e0924b64afa0b688e61b5a9fa02f16" +checksum = "655da9c7eb6305c55742045d5a8d2037996d61d8de95806335c7c86ce0f82e9c" dependencies = [ "fastrand", "getrandom 0.3.4", @@ -4673,9 +4864,9 @@ checksum = "8f50febec83f5ee1df3015341d8bd429f2d1cc62bcba7ea2076759d315084683" [[package]] name = "testcontainers" -version = "0.26.2" +version = "0.26.3" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "1483605f58b2fff80d786eb56a0b6b4e8b1e5423fbc9ec2e3e562fa2040d6f27" +checksum = "a81ec0158db5fbb9831e09d1813fe5ea9023a2b5e6e8e0a5fe67e2a820733629" dependencies = [ "astral-tokio-tar", "async-trait", @@ -4694,7 +4885,7 @@ dependencies = [ "serde", "serde_json", "serde_with", - "thiserror 2.0.17", + "thiserror 2.0.18", "tokio", "tokio-stream", "tokio-util", @@ -4722,11 +4913,11 @@ dependencies = [ [[package]] name = "thiserror" -version = "2.0.17" +version = "2.0.18" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "f63587ca0f12b72a0600bcba1d40081f830876000bb46dd2337a3051618f4fc8" +checksum = "4288b5bcbc7920c07a1149a35cf9590a2aa808e0bc1eafaade0b80947865fbc4" dependencies = [ - "thiserror-impl 2.0.17", + "thiserror-impl 2.0.18", ] [[package]] @@ -4737,18 +4928,18 @@ checksum = "4fee6c4efc90059e10f81e6d42c60a18f76588c3d74cb83a0b242a2b6c7504c1" dependencies = [ "proc-macro2", "quote", - "syn 2.0.111", + "syn 2.0.114", ] [[package]] name = "thiserror-impl" -version = "2.0.17" +version = "2.0.18" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "3ff15c8ecd7de3849db632e14d18d2571fa09dfc5ed93479bc4485c7a517c913" +checksum = "ebc4ee7f67670e9b64d05fa4253e753e016c6c95ff35b89b7941d6b856dec1d5" dependencies = [ "proc-macro2", "quote", - "syn 2.0.111", + "syn 2.0.114", ] [[package]] @@ -4762,30 +4953,30 @@ dependencies = [ [[package]] name = "time" -version = "0.3.44" +version = "0.3.46" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "91e7d9e3bb61134e77bde20dd4825b97c010155709965fedf0f49bb138e52a9d" +checksum = "9da98b7d9b7dad93488a84b8248efc35352b0b2657397d4167e7ad67e5d535e5" dependencies = [ "deranged", "itoa", "num-conv", "powerfmt", - "serde", + "serde_core", "time-core", "time-macros", ] [[package]] name = "time-core" -version = "0.1.6" +version = "0.1.8" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "40868e7c1d2f0b8d73e4a8c7f0ff63af4f6d19be117e90bd73eb1d62cf831c6b" +checksum = "7694e1cfe791f8d31026952abf09c69ca6f6fa4e1a1229e18988f06a04a12dca" [[package]] name = "time-macros" -version = "0.2.24" +version = "0.2.26" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "30cfb0125f12d9c277f35663a0a33f8c30190f4e4574868a330595412d34ebf3" +checksum = "78cc610bac2dcee56805c99642447d4c5dbde4d01f752ffea0199aee1f601dc4" dependencies = [ "num-conv", "time-core", @@ -4828,16 +5019,16 @@ checksum = "1f3ccbac311fea05f86f61904b462b55fb3df8837a366dfc601a0161d0532f20" [[package]] name = "tokio" -version = "1.48.0" +version = "1.49.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "ff360e02eab121e0bc37a2d3b4d4dc622e6eda3a8e5253d5435ecf5bd4c68408" +checksum = "72a2903cd7736441aac9df9d7688bd0ce48edccaadf181c3b90be801e81d3d86" dependencies = [ "bytes", "libc", "mio", "pin-project-lite", "signal-hook-registry", - "socket2 0.6.1", + "socket2 0.6.2", "tokio-macros", "windows-sys 0.61.2", ] @@ -4850,17 +5041,7 @@ checksum = "af407857209536a95c8e56f8231ef2c2e2aff839b22e07a1ffcbc617e9db9fa5" dependencies = [ "proc-macro2", "quote", - "syn 2.0.111", -] - -[[package]] -name = "tokio-native-tls" -version = "0.3.1" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "bbae76ab933c85776efabc971569dd6119c580d8f5d448769dec1764bf796ef2" -dependencies = [ - "native-tls", - "tokio", + "syn 2.0.114", ] [[package]] @@ -4875,9 +5056,9 @@ dependencies = [ [[package]] name = "tokio-stream" -version = "0.1.17" +version = "0.1.18" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "eca58d7bba4a75707817a2c44174253f9236b2d5fbd055602e9d5c07c139a047" +checksum = "32da49809aab5c3bc678af03902d4ccddea2a87d028d86392a4b1560c6906c70" dependencies = [ "futures-core", "pin-project-lite", @@ -4886,9 +5067,9 @@ dependencies = [ [[package]] name = "tokio-util" -version = "0.7.17" +version = "0.7.18" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "2efa149fe76073d6e8fd97ef4f4eca7b67f599660115591483572e406e165594" +checksum = "9ae9cec805b01e8fc3fd2fe289f89149a9b66dd16786abd8b19cfa7b48cb0098" dependencies = [ "bytes", "futures-core", @@ -4911,11 +5092,11 @@ dependencies = [ [[package]] name = "toml" -version = "0.9.10+spec-1.1.0" +version = "0.9.11+spec-1.1.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "0825052159284a1a8b4d6c0c86cbc801f2da5afd2b225fa548c72f2e74002f48" +checksum = "f3afc9a848309fe1aaffaed6e1546a7a14de1f935dc9d89d32afd9a44bab7c46" dependencies = [ - "indexmap 2.12.1", + "indexmap 2.13.0", "serde_core", "serde_spanned 1.0.4", "toml_datetime 0.7.5+spec-1.1.0", @@ -4948,7 +5129,7 @@ version = "0.22.27" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "41fe8c660ae4257887cf66394862d21dbca4a6ddd26f04a3560410406a2f819a" dependencies = [ - "indexmap 2.12.1", + "indexmap 2.13.0", "serde", "serde_spanned 0.6.9", "toml_datetime 0.6.11", @@ -4962,7 +5143,7 @@ version = "0.23.10+spec-1.0.0" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "84c8b9f757e028cee9fa244aea147aab2a9ec09d5325a9b01e0a49730c2b5269" dependencies = [ - "indexmap 2.12.1", + "indexmap 2.13.0", "toml_datetime 0.7.5+spec-1.1.0", "toml_parser", "winnow", @@ -5008,7 +5189,7 @@ dependencies = [ "hyper-util", "percent-encoding", "pin-project", - "socket2 0.6.1", + "socket2 0.6.2", "sync_wrapper", "tokio", "tokio-stream", @@ -5118,7 +5299,7 @@ dependencies = [ "serde", "serde_json", "serde_with", - "thiserror 2.0.17", + "thiserror 2.0.18", "tokio", "torrust-axum-server", "torrust-rest-tracker-api-client", @@ -5149,7 +5330,7 @@ dependencies = [ "hyper", "hyper-util", "pin-project-lite", - "thiserror 2.0.17", + "thiserror 2.0.18", "tokio", "torrust-server-lib", "torrust-tracker-configuration", @@ -5165,7 +5346,7 @@ dependencies = [ "hyper", "reqwest", "serde", - "thiserror 2.0.17", + "thiserror 2.0.18", "url", "uuid", ] @@ -5220,7 +5401,7 @@ dependencies = [ "reqwest", "serde", "serde_json", - "thiserror 2.0.17", + "thiserror 2.0.18", "tokio", "tokio-util", "torrust-axum-health-check-api-server", @@ -5256,7 +5437,7 @@ dependencies = [ "serde_bencode", "serde_bytes", "serde_json", - "thiserror 2.0.17", + "thiserror 2.0.18", "tokio", "torrust-tracker-configuration", "tracing", @@ -5284,8 +5465,8 @@ dependencies = [ "serde", "serde_json", "serde_with", - "thiserror 2.0.17", - "toml 0.9.10+spec-1.1.0", + "thiserror 2.0.18", + "toml 0.9.11+spec-1.1.0", "torrust-tracker-located-error", "tracing", "tracing-subscriber", @@ -5298,7 +5479,7 @@ name = "torrust-tracker-contrib-bencode" version = "3.0.0-develop" dependencies = [ "criterion 0.8.1", - "thiserror 2.0.17", + "thiserror 2.0.18", ] [[package]] @@ -5314,7 +5495,7 @@ dependencies = [ name = "torrust-tracker-located-error" version = "3.0.0-develop" dependencies = [ - "thiserror 2.0.17", + "thiserror 2.0.18", "tracing", ] @@ -5330,7 +5511,7 @@ dependencies = [ "rstest 0.25.0", "serde", "serde_json", - "thiserror 2.0.17", + "thiserror 2.0.18", "torrust-tracker-primitives", "tracing", ] @@ -5347,7 +5528,7 @@ dependencies = [ "serde", "tdyne-peer-id", "tdyne-peer-id-registry", - "thiserror 2.0.17", + "thiserror 2.0.18", "torrust-tracker-configuration", "url", "zerocopy 0.7.35", @@ -5368,7 +5549,7 @@ dependencies = [ "rand 0.9.2", "rstest 0.26.1", "serde", - "thiserror 2.0.17", + "thiserror 2.0.18", "tokio", "tokio-util", "torrust-tracker-clock", @@ -5427,7 +5608,7 @@ dependencies = [ "rand 0.9.2", "ringbuf", "serde", - "thiserror 2.0.17", + "thiserror 2.0.18", "tokio", "tokio-util", "torrust-server-lib", @@ -5446,13 +5627,13 @@ dependencies = [ [[package]] name = "tower" -version = "0.5.2" +version = "0.5.3" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "d039ad9159c98b70ecfd540b2573b97f7f52c3e8d9f8ad57a24b916a536975f9" +checksum = "ebe5ef63511595f1344e2d5cfa636d973292adc0eec1f0ad45fae9f0851ab1d4" dependencies = [ "futures-core", "futures-util", - "indexmap 2.12.1", + "indexmap 2.13.0", "pin-project-lite", "slab", "sync_wrapper", @@ -5519,7 +5700,7 @@ checksum = "7490cfa5ec963746568740651ac6781f701c9c5ea257c58e057f3ba8cf69e8da" dependencies = [ "proc-macro2", "quote", - "syn 2.0.111", + "syn 2.0.114", ] [[package]] @@ -5674,14 +5855,15 @@ dependencies = [ [[package]] name = "url" -version = "2.5.7" +version = "2.5.8" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "08bc136a29a3d1758e07a9cca267be308aeebf5cfd5a10f3f67ab2097683ef5b" +checksum = "ff67a8a4397373c3ef660812acab3268222035010ab8680ec4215f38ba3d0eed" dependencies = [ "form_urlencoded", "idna", "percent-encoding", "serde", + "serde_derive", ] [[package]] @@ -5704,9 +5886,9 @@ checksum = "06abde3611657adf66d383f00b093d7faecc7fa57071cce2578660c9f1010821" [[package]] name = "uuid" -version = "1.19.0" +version = "1.20.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "e2e054861b4bd027cd373e18e8d8d8e6548085000e41290d95ce0c373a654b4a" +checksum = "ee48d38b119b0cd71fe4141b30f5ba9c7c5d9f4e7a3a8b4a674e4b6ef789976f" dependencies = [ "getrandom 0.3.4", "js-sys", @@ -5765,18 +5947,18 @@ checksum = "ccf3ec651a847eb01de73ccad15eb7d99f80485de043efb2f370cd654f4ea44b" [[package]] name = "wasip2" -version = "1.0.1+wasi-0.2.4" +version = "1.0.2+wasi-0.2.9" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "0562428422c63773dad2c345a1882263bbf4d65cf3f42e90921f787ef5ad58e7" +checksum = "9517f9239f02c069db75e65f174b3da828fe5f5b945c4dd26bd25d89c03ebcf5" dependencies = [ "wit-bindgen", ] [[package]] name = "wasm-bindgen" -version = "0.2.106" +version = "0.2.108" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "0d759f433fa64a2d763d1340820e46e111a7a5ab75f993d1852d70b03dbb80fd" +checksum = "64024a30ec1e37399cf85a7ffefebdb72205ca1c972291c51512360d90bd8566" dependencies = [ "cfg-if", "once_cell", @@ -5787,11 +5969,12 @@ dependencies = [ [[package]] name = "wasm-bindgen-futures" -version = "0.4.56" +version = "0.4.58" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "836d9622d604feee9e5de25ac10e3ea5f2d65b41eac0d9ce72eb5deae707ce7c" +checksum = "70a6e77fd0ae8029c9ea0063f87c46fde723e7d887703d74ad2616d792e51e6f" dependencies = [ "cfg-if", + "futures-util", "js-sys", "once_cell", "wasm-bindgen", @@ -5800,9 +5983,9 @@ dependencies = [ [[package]] name = "wasm-bindgen-macro" -version = "0.2.106" +version = "0.2.108" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "48cb0d2638f8baedbc542ed444afc0644a29166f1595371af4fecf8ce1e7eeb3" +checksum = "008b239d9c740232e71bd39e8ef6429d27097518b6b30bdf9086833bd5b6d608" dependencies = [ "quote", "wasm-bindgen-macro-support", @@ -5810,31 +5993,31 @@ dependencies = [ [[package]] name = "wasm-bindgen-macro-support" -version = "0.2.106" +version = "0.2.108" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "cefb59d5cd5f92d9dcf80e4683949f15ca4b511f4ac0a6e14d4e1ac60c6ecd40" +checksum = "5256bae2d58f54820e6490f9839c49780dff84c65aeab9e772f15d5f0e913a55" dependencies = [ "bumpalo", "proc-macro2", "quote", - "syn 2.0.111", + "syn 2.0.114", "wasm-bindgen-shared", ] [[package]] name = "wasm-bindgen-shared" -version = "0.2.106" +version = "0.2.108" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "cbc538057e648b67f72a982e708d485b2efa771e1ac05fec311f9f63e5800db4" +checksum = "1f01b580c9ac74c8d8f0c0e4afb04eeef2acf145458e52c03845ee9cd23e3d12" dependencies = [ "unicode-ident", ] [[package]] name = "web-sys" -version = "0.3.83" +version = "0.3.85" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "9b32828d774c412041098d182a8b38b16ea816958e07cf40eec2bc080ae137ac" +checksum = "312e32e551d92129218ea9a2452120f4aabc03529ef03e4d0d82fb2780608598" dependencies = [ "js-sys", "wasm-bindgen", @@ -5850,11 +6033,20 @@ dependencies = [ "wasm-bindgen", ] +[[package]] +name = "webpki-root-certs" +version = "1.0.5" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "36a29fc0408b113f68cf32637857ab740edfafdf460c326cd2afaa2d84cc05dc" +dependencies = [ + "rustls-pki-types", +] + [[package]] name = "webpki-roots" -version = "1.0.4" +version = "1.0.5" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "b2878ef029c47c6e8cf779119f20fcf52bde7ad42a731b2a304bc221df17571e" +checksum = "12bed680863276c63889429bfd6cab3b99943659923822de1c8a39c49e4d722c" dependencies = [ "rustls-pki-types", ] @@ -5911,7 +6103,7 @@ checksum = "053e2e040ab57b9dc951b72c264860db7eb3b0200ba345b4e4c3b14f67855ddf" dependencies = [ "proc-macro2", "quote", - "syn 2.0.111", + "syn 2.0.114", ] [[package]] @@ -5922,7 +6114,7 @@ checksum = "3f316c4a2570ba26bbec722032c4099d8c8bc095efccdc15688708623367e358" dependencies = [ "proc-macro2", "quote", - "syn 2.0.111", + "syn 2.0.114", ] [[package]] @@ -5960,6 +6152,15 @@ dependencies = [ "windows-link", ] +[[package]] +name = "windows-sys" +version = "0.45.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "75283be5efb2831d37ea142365f009c02ec203cd29a3ebecbc093d52315b66d0" +dependencies = [ + "windows-targets 0.42.2", +] + [[package]] name = "windows-sys" version = "0.52.0" @@ -5987,6 +6188,21 @@ dependencies = [ "windows-link", ] +[[package]] +name = "windows-targets" +version = "0.42.2" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "8e5180c00cd44c9b1c88adb3693291f1cd93605ded80c250a75d472756b4d071" +dependencies = [ + "windows_aarch64_gnullvm 0.42.2", + "windows_aarch64_msvc 0.42.2", + "windows_i686_gnu 0.42.2", + "windows_i686_msvc 0.42.2", + "windows_x86_64_gnu 0.42.2", + "windows_x86_64_gnullvm 0.42.2", + "windows_x86_64_msvc 0.42.2", +] + [[package]] name = "windows-targets" version = "0.52.6" @@ -6020,6 +6236,12 @@ dependencies = [ "windows_x86_64_msvc 0.53.1", ] +[[package]] +name = "windows_aarch64_gnullvm" +version = "0.42.2" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "597a5118570b68bc08d8d59125332c54f1ba9d9adeedeef5b99b02ba2b0698f8" + [[package]] name = "windows_aarch64_gnullvm" version = "0.52.6" @@ -6032,6 +6254,12 @@ version = "0.53.1" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "a9d8416fa8b42f5c947f8482c43e7d89e73a173cead56d044f6a56104a6d1b53" +[[package]] +name = "windows_aarch64_msvc" +version = "0.42.2" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "e08e8864a60f06ef0d0ff4ba04124db8b0fb3be5776a5cd47641e942e58c4d43" + [[package]] name = "windows_aarch64_msvc" version = "0.52.6" @@ -6044,6 +6272,12 @@ version = "0.53.1" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "b9d782e804c2f632e395708e99a94275910eb9100b2114651e04744e9b125006" +[[package]] +name = "windows_i686_gnu" +version = "0.42.2" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "c61d927d8da41da96a81f029489353e68739737d3beca43145c8afec9a31a84f" + [[package]] name = "windows_i686_gnu" version = "0.52.6" @@ -6068,6 +6302,12 @@ version = "0.53.1" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "fa7359d10048f68ab8b09fa71c3daccfb0e9b559aed648a8f95469c27057180c" +[[package]] +name = "windows_i686_msvc" +version = "0.42.2" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "44d840b6ec649f480a41c8d80f9c65108b92d89345dd94027bfe06ac444d1060" + [[package]] name = "windows_i686_msvc" version = "0.52.6" @@ -6080,6 +6320,12 @@ version = "0.53.1" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "1e7ac75179f18232fe9c285163565a57ef8d3c89254a30685b57d83a38d326c2" +[[package]] +name = "windows_x86_64_gnu" +version = "0.42.2" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "8de912b8b8feb55c064867cf047dda097f92d51efad5b491dfb98f6bbb70cb36" + [[package]] name = "windows_x86_64_gnu" version = "0.52.6" @@ -6092,6 +6338,12 @@ version = "0.53.1" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "9c3842cdd74a865a8066ab39c8a7a473c0778a3f29370b5fd6b4b9aa7df4a499" +[[package]] +name = "windows_x86_64_gnullvm" +version = "0.42.2" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "26d41b46a36d453748aedef1486d5c7a85db22e56aff34643984ea85514e94a3" + [[package]] name = "windows_x86_64_gnullvm" version = "0.52.6" @@ -6104,6 +6356,12 @@ version = "0.53.1" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "0ffa179e2d07eee8ad8f57493436566c7cc30ac536a3379fdf008f47f6bb7ae1" +[[package]] +name = "windows_x86_64_msvc" +version = "0.42.2" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "9aec5da331524158c6d1a4ac0ab1541149c0b9505fde06423b02f5ef0106b9f0" + [[package]] name = "windows_x86_64_msvc" version = "0.52.6" @@ -6127,9 +6385,9 @@ dependencies = [ [[package]] name = "wit-bindgen" -version = "0.46.0" +version = "0.51.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "f17a85883d4e6d00e8a97c586de764dabcc06133f7f1d55dce5cdc070ad7fe59" +checksum = "d7249219f66ced02969388cf2bb044a09756a083d0fab1e566056b04d9fbcaa5" [[package]] name = "writeable" @@ -6181,7 +6439,7 @@ checksum = "b659052874eb698efe5b9e8cf382204678a0086ebf46982b79d6ca3182927e5d" dependencies = [ "proc-macro2", "quote", - "syn 2.0.111", + "syn 2.0.114", "synstructure", ] @@ -6197,11 +6455,11 @@ dependencies = [ [[package]] name = "zerocopy" -version = "0.8.31" +version = "0.8.34" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "fd74ec98b9250adb3ca554bdde269adf631549f51d8a8f8f0a10b50f1cb298c3" +checksum = "71ddd76bcebeed25db614f82bf31a9f4222d3fbba300e6fb6c00afa26cbd4d9d" dependencies = [ - "zerocopy-derive 0.8.31", + "zerocopy-derive 0.8.34", ] [[package]] @@ -6212,18 +6470,18 @@ checksum = "fa4f8080344d4671fb4e831a13ad1e68092748387dfc4f55e356242fae12ce3e" dependencies = [ "proc-macro2", "quote", - "syn 2.0.111", + "syn 2.0.114", ] [[package]] name = "zerocopy-derive" -version = "0.8.31" +version = "0.8.34" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "d8a8d209fdf45cf5138cbb5a506f6b52522a25afccc534d1475dad8e31105c6a" +checksum = "d8187381b52e32220d50b255276aa16a084ec0a9017a0ca2152a1f55c539758d" dependencies = [ "proc-macro2", "quote", - "syn 2.0.111", + "syn 2.0.114", ] [[package]] @@ -6243,7 +6501,7 @@ checksum = "d71e5d6e06ab090c67b5e44993ec16b72dcbaabc526db883a360057678b48502" dependencies = [ "proc-macro2", "quote", - "syn 2.0.111", + "syn 2.0.114", "synstructure", ] @@ -6283,9 +6541,15 @@ checksum = "eadce39539ca5cb3985590102671f2567e659fca9666581ad3411d59207951f3" dependencies = [ "proc-macro2", "quote", - "syn 2.0.111", + "syn 2.0.114", ] +[[package]] +name = "zmij" +version = "1.0.17" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "02aae0f83f69aafc94776e879363e9771d7ecbffe2c7fbb6c14c5e00dfe88439" + [[package]] name = "zstd" version = "0.13.3" From 457a020c704d3ec084c0df222c15ca00e1c83f82 Mon Sep 17 00:00:00 2001 From: Jose Celano Date: Mon, 26 Jan 2026 13:06:09 +0000 Subject: [PATCH 781/802] fix: enable reqwest query feature for API compatibility reqwest 0.13 made the feature optional and disabled by default. This commit adds the feature to the reqwest dependency in the rest-tracker-api-client package to restore query parameter functionality. --- Cargo.lock | 1 + packages/rest-tracker-api-client/Cargo.toml | 2 +- .../rest-tracker-api-client/src/v1/client.rs | 20 +++++++++---------- 3 files changed, 12 insertions(+), 11 deletions(-) diff --git a/Cargo.lock b/Cargo.lock index 146da3a18..8916a6640 100644 --- a/Cargo.lock +++ b/Cargo.lock @@ -3982,6 +3982,7 @@ dependencies = [ "rustls-platform-verifier", "serde", "serde_json", + "serde_urlencoded", "sync_wrapper", "tokio", "tokio-rustls", diff --git a/packages/rest-tracker-api-client/Cargo.toml b/packages/rest-tracker-api-client/Cargo.toml index cba580e18..c01b9c05a 100644 --- a/packages/rest-tracker-api-client/Cargo.toml +++ b/packages/rest-tracker-api-client/Cargo.toml @@ -16,7 +16,7 @@ version.workspace = true [dependencies] hyper = "1" -reqwest = { version = "0", features = ["json"] } +reqwest = { version = "0", features = ["json", "query"] } serde = { version = "1", features = ["derive"] } thiserror = "2" url = { version = "2", features = ["serde"] } diff --git a/packages/rest-tracker-api-client/src/v1/client.rs b/packages/rest-tracker-api-client/src/v1/client.rs index 3137b8b41..02a5b0d9c 100644 --- a/packages/rest-tracker-api-client/src/v1/client.rs +++ b/packages/rest-tracker-api-client/src/v1/client.rs @@ -204,22 +204,22 @@ impl Client { /// /// Will panic if the request can't be sent pub async fn get(path: Url, query: Option, headers: Option) -> Response { - let builder = reqwest::Client::builder() + let client = reqwest::Client::builder() .timeout(Duration::from_secs(DEFAULT_REQUEST_TIMEOUT_IN_SECS)) .build() .unwrap(); - let builder = match query { - Some(params) => builder.get(path).query(&ReqwestQuery::from(params)), - None => builder.get(path), - }; + let mut request_builder = client.get(path); - let builder = match headers { - Some(headers) => builder.headers(headers), - None => builder, - }; + if let Some(params) = query { + request_builder = request_builder.query(&ReqwestQuery::from(params)); + } + + if let Some(headers) = headers { + request_builder = request_builder.headers(headers); + } - builder.send().await.unwrap() + request_builder.send().await.unwrap() } /// Returns a `HeaderMap` with a request id header. From ac47c1b26a068cf371c35fe40660ccfb564f1de2 Mon Sep 17 00:00:00 2001 From: Jose Celano Date: Mon, 26 Jan 2026 14:08:13 +0000 Subject: [PATCH 782/802] fix: suppress clippy warnings for large error types in config tests --- packages/configuration/src/v2_0_0/mod.rs | 5 +++++ 1 file changed, 5 insertions(+) diff --git a/packages/configuration/src/v2_0_0/mod.rs b/packages/configuration/src/v2_0_0/mod.rs index 8391ba0e1..b3fbc881e 100644 --- a/packages/configuration/src/v2_0_0/mod.rs +++ b/packages/configuration/src/v2_0_0/mod.rs @@ -521,6 +521,7 @@ mod tests { } #[test] + #[allow(clippy::result_large_err)] fn configuration_should_use_the_default_values_when_only_the_mandatory_options_are_provided_by_the_user_via_toml_file() { figment::Jail::expect_with(|jail| { jail.create_file( @@ -552,6 +553,7 @@ mod tests { } #[test] + #[allow(clippy::result_large_err)] fn configuration_should_use_the_default_values_when_only_the_mandatory_options_are_provided_by_the_user_via_toml_content() { figment::Jail::expect_with(|_jail| { let config_toml = r#" @@ -581,6 +583,7 @@ mod tests { } #[test] + #[allow(clippy::result_large_err)] fn default_configuration_could_be_overwritten_from_a_single_env_var_with_toml_contents() { figment::Jail::expect_with(|_jail| { let config_toml = r#" @@ -613,6 +616,7 @@ mod tests { } #[test] + #[allow(clippy::result_large_err)] fn default_configuration_could_be_overwritten_from_a_toml_config_file() { figment::Jail::expect_with(|jail| { jail.create_file( @@ -646,6 +650,7 @@ mod tests { }); } + #[allow(clippy::result_large_err)] #[test] fn configuration_should_allow_to_overwrite_the_default_tracker_api_token_for_admin_with_an_env_var() { figment::Jail::expect_with(|jail| { From 046d5c982e34f7ace34e5e5355c8b72f540ad583 Mon Sep 17 00:00:00 2001 From: Jose Celano Date: Fri, 20 Feb 2026 11:08:46 +0000 Subject: [PATCH 783/802] chore(deps): update dependencies ``` cargo update Updating crates.io index Locking 98 packages to latest compatible versions Updating anyhow v1.0.100 -> v1.0.102 Updating arc-swap v1.8.0 -> v1.8.2 Updating async-compression v0.4.37 -> v0.4.40 Updating async-executor v1.13.3 -> v1.14.0 Updating aws-lc-rs v1.15.4 -> v1.16.0 Updating aws-lc-sys v0.37.0 -> v0.37.1 Updating bitflags v2.10.0 -> v2.11.0 Updating bollard v0.19.4 -> v0.20.1 Updating bollard-stubs v1.49.1-rc.28.4.0 -> v1.52.1-rc.29.1.3 Updating bumpalo v3.19.1 -> v3.20.2 Updating bytemuck v1.24.0 -> v1.25.0 Updating bytes v1.11.0 -> v1.11.1 Updating cc v1.2.54 -> v1.2.56 Adding chacha20 v0.10.0 Adding cipher v0.5.0 Updating clap v4.5.54 -> v4.5.60 Updating clap_builder v4.5.54 -> v4.5.60 Updating clap_derive v4.5.49 -> v4.5.55 Updating clap_lex v0.7.7 -> v1.0.0 Updating compression-codecs v0.4.36 -> v0.4.37 Adding cpufeatures v0.3.0 Updating criterion v0.8.1 -> v0.8.2 Updating criterion-plot v0.8.1 -> v0.8.2 Adding crypto-common v0.2.0 Updating deranged v0.5.5 -> v0.5.6 Adding env_filter v1.0.0 Updating env_logger v0.8.4 -> v0.11.9 Updating find-msvc-tools v0.1.8 -> v0.1.9 Updating flate2 v1.1.8 -> v1.1.9 Updating fs-err v3.2.2 -> v3.3.0 Updating futures v0.3.31 -> v0.3.32 Updating futures-channel v0.3.31 -> v0.3.32 Updating futures-core v0.3.31 -> v0.3.32 Updating futures-executor v0.3.31 -> v0.3.32 Updating futures-io v0.3.31 -> v0.3.32 Updating futures-macro v0.3.31 -> v0.3.32 Updating futures-sink v0.3.31 -> v0.3.32 Updating futures-task v0.3.31 -> v0.3.32 Updating futures-util v0.3.31 -> v0.3.32 Adding getrandom v0.4.1 Adding hybrid-array v0.4.7 Updating hyper-util v0.1.19 -> v0.1.20 Updating iana-time-zone v0.1.64 -> v0.1.65 Adding id-arena v2.3.0 Adding inout v0.2.2 Adding leb128fmt v0.1.0 Updating libc v0.2.180 -> v0.2.182 Updating local-ip-address v0.6.9 -> v0.6.10 Updating memchr v2.7.6 -> v2.8.0 Updating native-tls v0.2.14 -> v0.2.18 Updating neli v0.7.3 -> v0.7.4 Removing openssl-probe v0.1.6 Updating portable-atomic v1.13.0 -> v1.13.1 Updating portable-atomic-util v0.2.4 -> v0.2.5 Updating predicates v3.1.3 -> v3.1.4 Updating predicates-core v1.0.9 -> v1.0.10 Updating predicates-tree v1.0.12 -> v1.0.13 Adding prettyplease v0.2.37 Updating quickcheck v1.0.3 -> v1.1.0 Adding rand v0.10.0 Adding rand_core v0.10.0 Updating redox_syscall v0.7.0 -> v0.7.1 Updating regex v1.12.2 -> v1.12.3 Updating regex-automata v0.4.13 -> v0.4.14 Updating regex-syntax v0.8.8 -> v0.8.9 Updating reqwest v0.13.1 -> v0.13.2 Removing rustls-pemfile v2.2.0 Updating ryu v1.0.22 -> v1.0.23 Updating schemars v1.2.0 -> v1.2.1 Removing security-framework v2.11.1 Removing security-framework v3.5.1 Adding security-framework v3.7.0 Updating security-framework-sys v2.15.0 -> v2.17.0 Updating siphasher v1.0.1 -> v1.0.2 Updating slab v0.4.11 -> v0.4.12 Updating subprocess v0.2.13 -> v0.2.15 Updating syn v2.0.114 -> v2.0.117 Updating system-configuration v0.6.1 -> v0.7.0 Updating tempfile v3.24.0 -> v3.25.0 Updating testcontainers v0.26.3 -> v0.27.0 Updating time v0.3.46 -> v0.3.47 Updating time-macros v0.2.26 -> v0.2.27 Updating toml v0.9.11+spec-1.1.0 -> v0.9.12+spec-1.1.0 (available: v1.0.3+spec-1.1.0) Updating toml_parser v1.0.6+spec-1.1.0 -> v1.0.9+spec-1.1.0 Updating tonic v0.14.2 -> v0.14.5 Updating tonic-prost v0.14.2 -> v0.14.5 Updating unicode-ident v1.0.22 -> v1.0.24 Updating ureq v3.1.4 -> v3.2.0 Updating uuid v1.20.0 -> v1.21.0 Adding wasip3 v0.4.0+wasi-0.3.0-rc-2026-01-06 Adding wasm-encoder v0.244.0 Adding wasm-metadata v0.244.0 Adding wasmparser v0.244.0 Updating webpki-root-certs v1.0.5 -> v1.0.6 Removing webpki-roots v1.0.5 Adding wit-bindgen-core v0.51.0 Adding wit-bindgen-rust v0.51.0 Adding wit-bindgen-rust-macro v0.51.0 Adding wit-component v0.244.0 Adding wit-parser v0.244.0 Updating zerocopy v0.8.34 -> v0.8.39 Updating zerocopy-derive v0.8.34 -> v0.8.39 Updating zmij v1.0.17 -> v1.0.21 note: pass `--verbose` to see 7 unchanged dependencies behind latest ``` --- Cargo.lock | 767 +++++++++++++++++++++++++++++++++-------------------- 1 file changed, 485 insertions(+), 282 deletions(-) diff --git a/Cargo.lock b/Cargo.lock index 8916a6640..e801b94cb 100644 --- a/Cargo.lock +++ b/Cargo.lock @@ -134,9 +134,9 @@ dependencies = [ [[package]] name = "anyhow" -version = "1.0.100" +version = "1.0.102" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "a23eb6b1614318a8071c9b2521f36b424b2c83db5eb3a0fead4a6c0809af6e61" +checksum = "7f202df86484c868dbad7eaa557ef785d5c66295e41b460ef922eca0723b842c" [[package]] name = "approx" @@ -175,9 +175,9 @@ dependencies = [ [[package]] name = "arc-swap" -version = "1.8.0" +version = "1.8.2" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "51d03449bb8ca2cc2ef70869af31463d1ae5ccc8fa3e334b307203fbf815207e" +checksum = "f9f3647c145568cec02c42054e07bdf9a5a698e15b466fb2341bfc393cd24aa5" dependencies = [ "rustversion", ] @@ -239,9 +239,9 @@ dependencies = [ [[package]] name = "async-compression" -version = "0.4.37" +version = "0.4.40" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "d10e4f991a553474232bc0a31799f6d24b034a84c0971d80d2e2f78b2e576e40" +checksum = "7d67d43201f4d20c78bcda740c142ca52482d81da80681533d33bf3f0596c8e2" dependencies = [ "compression-codecs", "compression-core", @@ -251,9 +251,9 @@ dependencies = [ [[package]] name = "async-executor" -version = "1.13.3" +version = "1.14.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "497c00e0fd83a72a79a39fcbd8e3e2f055d6f6c7e025f3b3d91f4f8e76527fb8" +checksum = "c96bf972d85afc50bf5ab8fe2d54d1586b4e0b46c97c50a0c9e71e2f7bcd812a" dependencies = [ "async-task", "concurrent-queue", @@ -354,7 +354,7 @@ checksum = "c7c24de15d275a1ecfd47a380fb4d5ec9bfe0933f309ed5e705b775596a3574d" dependencies = [ "proc-macro2", "quote", - "syn 2.0.114", + "syn 2.0.117", ] [[package]] @@ -371,7 +371,7 @@ checksum = "9035ad2d096bed7955a320ee7e2230574d28fd3c3a0f186cbea1ff3c7eed5dbb" dependencies = [ "proc-macro2", "quote", - "syn 2.0.114", + "syn 2.0.117", ] [[package]] @@ -397,9 +397,9 @@ checksum = "c08606f8c3cbf4ce6ec8e28fb0014a2c086708fe954eaa885384a6165172e7e8" [[package]] name = "aws-lc-rs" -version = "1.15.4" +version = "1.16.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "7b7b6141e96a8c160799cc2d5adecd5cbbe5054cb8c7c4af53da0f83bb7ad256" +checksum = "d9a7b350e3bb1767102698302bc37256cbd48422809984b98d292c40e2579aa9" dependencies = [ "aws-lc-sys", "zeroize", @@ -407,9 +407,9 @@ dependencies = [ [[package]] name = "aws-lc-sys" -version = "0.37.0" +version = "0.37.1" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "5c34dda4df7017c8db52132f0f8a2e0f8161649d15723ed63fc00c82d0f2081a" +checksum = "b092fe214090261288111db7a2b2c2118e5a7f30dc2569f1732c4069a6840549" dependencies = [ "cc", "cmake", @@ -514,7 +514,7 @@ checksum = "604fde5e028fea851ce1d8570bbdc034bec850d157f7569d10f347d06808c05c" dependencies = [ "proc-macro2", "quote", - "syn 2.0.114", + "syn 2.0.117", ] [[package]] @@ -609,7 +609,7 @@ dependencies = [ "regex", "rustc-hash", "shlex", - "syn 2.0.114", + "syn 2.0.117", ] [[package]] @@ -620,9 +620,9 @@ checksum = "02b4ff8b16e6076c3e14220b39fbc1fabb6737522281a388998046859400895f" [[package]] name = "bitflags" -version = "2.10.0" +version = "2.11.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "812e12b5285cc515a9c72a5c1d3b6d46a19dac5acfef5265968c166106e31dd3" +checksum = "843867be96c8daad0d758b57df9392b6d8d271134fce549de6ce169ff98a92af" [[package]] name = "bittorrent-http-tracker-core" @@ -721,7 +721,7 @@ dependencies = [ "r2d2", "r2d2_mysql", "r2d2_sqlite", - "rand 0.9.2", + "rand 0.10.0", "serde", "serde_json", "testcontainers", @@ -751,12 +751,12 @@ dependencies = [ "bittorrent-udp-tracker-protocol", "bloom", "blowfish", - "cipher", + "cipher 0.5.0", "criterion 0.5.1", "futures", "lazy_static", "mockall", - "rand 0.9.2", + "rand 0.10.0", "serde", "thiserror 2.0.18", "tokio", @@ -831,14 +831,14 @@ source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "e412e2cd0f2b2d93e02543ceae7917b3c70331573df19ee046bcbc35e45e87d7" dependencies = [ "byteorder", - "cipher", + "cipher 0.4.4", ] [[package]] name = "bollard" -version = "0.19.4" +version = "0.20.1" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "87a52479c9237eb04047ddb94788c41ca0d26eaff8b697ecfbb4c32f7fdc3b1b" +checksum = "227aa051deec8d16bd9c34605e7aaf153f240e35483dd42f6f78903847934738" dependencies = [ "async-stream", "base64 0.22.1", @@ -846,7 +846,6 @@ dependencies = [ "bollard-buildkit-proto", "bollard-stubs", "bytes", - "chrono", "futures-core", "futures-util", "hex", @@ -864,14 +863,13 @@ dependencies = [ "rand 0.9.2", "rustls", "rustls-native-certs", - "rustls-pemfile", "rustls-pki-types", "serde", "serde_derive", "serde_json", - "serde_repr", "serde_urlencoded", "thiserror 2.0.18", + "time", "tokio", "tokio-stream", "tokio-util", @@ -896,19 +894,18 @@ dependencies = [ [[package]] name = "bollard-stubs" -version = "1.49.1-rc.28.4.0" +version = "1.52.1-rc.29.1.3" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "5731fe885755e92beff1950774068e0cae67ea6ec7587381536fca84f1779623" +checksum = "0f0a8ca8799131c1837d1282c3f81f31e76ceb0ce426e04a7fe1ccee3287c066" dependencies = [ "base64 0.22.1", "bollard-buildkit-proto", "bytes", - "chrono", "prost", "serde", "serde_json", "serde_repr", - "serde_with", + "time", ] [[package]] @@ -931,7 +928,7 @@ dependencies = [ "proc-macro-crate", "proc-macro2", "quote", - "syn 2.0.114", + "syn 2.0.117", ] [[package]] @@ -972,9 +969,9 @@ checksum = "40e38929add23cdf8a366df9b0e088953150724bcbe5fc330b0d8eb3b328eec8" [[package]] name = "bumpalo" -version = "3.19.1" +version = "3.20.2" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "5dd9dc738b7a8311c7ade152424974d8115f2cdad61e8dab8dac9f2362298510" +checksum = "5d20789868f4b01b2f2caec9f5c4e0213b41e3e5702a50157d699ae31ced2fcb" [[package]] name = "bytecheck" @@ -1000,9 +997,9 @@ dependencies = [ [[package]] name = "bytemuck" -version = "1.24.0" +version = "1.25.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "1fbdf580320f38b612e485521afda1ee26d10cc9884efaaa750d383e13e3c5f4" +checksum = "c8efb64bd706a16a1bdde310ae86b351e4d21550d98d056f22f8a7f7a2183fec" [[package]] name = "byteorder" @@ -1012,9 +1009,9 @@ checksum = "1fd0f2584146f6f2ef48085050886acf353beff7305ebd1ae69500e27c67f64b" [[package]] name = "bytes" -version = "1.11.0" +version = "1.11.1" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "b35204fbdc0b3f4446b89fc1ac2cf84a8a68971995d0bf2e925ec7cd960f9cb3" +checksum = "1e748733b7cbc798e1434b6ac524f0c1ff2ab456fe201501e6497c8417a4fc33" [[package]] name = "camino" @@ -1042,9 +1039,9 @@ dependencies = [ [[package]] name = "cc" -version = "1.2.54" +version = "1.2.56" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "6354c81bbfd62d9cfa9cb3c773c2b7b2a3a482d569de977fd0e961f6e7c00583" +checksum = "aebf35691d1bfb0ac386a69bac2fde4dd276fb618cf8bf4f5318fe285e821bb2" dependencies = [ "find-msvc-tools", "jobserver", @@ -1079,6 +1076,17 @@ version = "0.2.1" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "613afe47fcd5fac7ccf1db93babcb082c5994d996f20b8b159f2ad1658eb5724" +[[package]] +name = "chacha20" +version = "0.10.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "6f8d983286843e49675a4b7a2d174efe136dc93a18d69130dd18198a6c167601" +dependencies = [ + "cfg-if", + "cpufeatures 0.3.0", + "rand_core 0.10.0", +] + [[package]] name = "chrono" version = "0.4.43" @@ -1124,8 +1132,18 @@ version = "0.4.4" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "773f3b9af64447d2ce9850330c473515014aa235e6a783b02db81ff39e4a3dad" dependencies = [ - "crypto-common", - "inout", + "crypto-common 0.1.7", + "inout 0.1.4", +] + +[[package]] +name = "cipher" +version = "0.5.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "64727038c8c5e2bb503a15b9f5b9df50a1da9a33e83e1f93067d914f2c6604a5" +dependencies = [ + "crypto-common 0.2.0", + "inout 0.2.2", ] [[package]] @@ -1141,9 +1159,9 @@ dependencies = [ [[package]] name = "clap" -version = "4.5.54" +version = "4.5.60" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "c6e6ff9dcd79cff5cd969a17a545d79e84ab086e444102a591e288a8aa3ce394" +checksum = "2797f34da339ce31042b27d23607e051786132987f595b02ba4f6a6dffb7030a" dependencies = [ "clap_builder", "clap_derive", @@ -1151,9 +1169,9 @@ dependencies = [ [[package]] name = "clap_builder" -version = "4.5.54" +version = "4.5.60" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "fa42cf4d2b7a41bc8f663a7cab4031ebafa1bf3875705bfaf8466dc60ab52c00" +checksum = "24a241312cea5059b13574bb9b3861cabf758b879c15190b37b6d6fd63ab6876" dependencies = [ "anstream", "anstyle", @@ -1163,21 +1181,21 @@ dependencies = [ [[package]] name = "clap_derive" -version = "4.5.49" +version = "4.5.55" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "2a0b5487afeab2deb2ff4e03a807ad1a03ac532ff5a2cee5d86884440c7f7671" +checksum = "a92793da1a46a5f2a02a6f4c46c6496b28c43638adea8306fcb0caa1634f24e5" dependencies = [ "heck", "proc-macro2", "quote", - "syn 2.0.114", + "syn 2.0.117", ] [[package]] name = "clap_lex" -version = "0.7.7" +version = "1.0.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "c3e64b0cc0439b12df2fa678eae89a1c56a529fd067a9115f7827f1fffd22b32" +checksum = "3a822ea5bc7590f9d40f1ba12c0dc3c2760f3482c6984db1573ad11031420831" [[package]] name = "cmake" @@ -1219,9 +1237,9 @@ dependencies = [ [[package]] name = "compression-codecs" -version = "0.4.36" +version = "0.4.37" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "00828ba6fd27b45a448e57dbfe84f1029d4c9f26b368157e9a448a5f49a2ec2a" +checksum = "eb7b51a7d9c967fc26773061ba86150f19c50c0d65c887cb1fbe295fd16619b7" dependencies = [ "brotli", "compression-core", @@ -1290,6 +1308,15 @@ dependencies = [ "libc", ] +[[package]] +name = "cpufeatures" +version = "0.3.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "8b2a41393f66f16b0823bb79094d54ac5fbd34ab292ddafb9a0456ac9f87d201" +dependencies = [ + "libc", +] + [[package]] name = "crc32fast" version = "1.5.0" @@ -1329,16 +1356,16 @@ dependencies = [ [[package]] name = "criterion" -version = "0.8.1" +version = "0.8.2" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "4d883447757bb0ee46f233e9dc22eb84d93a9508c9b868687b274fc431d886bf" +checksum = "950046b2aa2492f9a536f5f4f9a3de7b9e2476e575e05bd6c333371add4d98f3" dependencies = [ "alloca", "anes", "cast", "ciborium", "clap", - "criterion-plot 0.8.1", + "criterion-plot 0.8.2", "itertools 0.13.0", "num-traits", "oorandom", @@ -1365,9 +1392,9 @@ dependencies = [ [[package]] name = "criterion-plot" -version = "0.8.1" +version = "0.8.2" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "ed943f81ea2faa8dcecbbfa50164acf95d555afec96a27871663b300e387b2e4" +checksum = "d8d80a2f4f5b554395e47b5d8305bc3d27813bacb73493eb1001e8f76dae29ea" dependencies = [ "cast", "itertools 0.13.0", @@ -1455,6 +1482,15 @@ dependencies = [ "typenum", ] +[[package]] +name = "crypto-common" +version = "0.2.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "211f05e03c7d03754740fd9e585de910a095d6b99f8bcfffdef8319fa02a8331" +dependencies = [ + "hybrid-array", +] + [[package]] name = "darling" version = "0.20.11" @@ -1486,7 +1522,7 @@ dependencies = [ "proc-macro2", "quote", "strsim", - "syn 2.0.114", + "syn 2.0.117", ] [[package]] @@ -1500,7 +1536,7 @@ dependencies = [ "proc-macro2", "quote", "strsim", - "syn 2.0.114", + "syn 2.0.117", ] [[package]] @@ -1511,7 +1547,7 @@ checksum = "fc34b93ccb385b40dc71c6fceac4b2ad23662c7eeb248cf10d529b7e055b6ead" dependencies = [ "darling_core 0.20.11", "quote", - "syn 2.0.114", + "syn 2.0.117", ] [[package]] @@ -1522,7 +1558,7 @@ checksum = "d38308df82d1080de0afee5d069fa14b0326a88c14f15c5ccda35b4a6c414c81" dependencies = [ "darling_core 0.21.3", "quote", - "syn 2.0.114", + "syn 2.0.117", ] [[package]] @@ -1541,9 +1577,9 @@ dependencies = [ [[package]] name = "deranged" -version = "0.5.5" +version = "0.5.6" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "ececcb659e7ba858fb4f10388c250a7252eb0a27373f1a72b8748afdd248e587" +checksum = "cc3dc5ad92c2e2d1c193bbbbdf2ea477cb81331de4f3103f267ca18368b988c4" dependencies = [ "powerfmt", "serde_core", @@ -1567,7 +1603,7 @@ dependencies = [ "darling 0.20.11", "proc-macro2", "quote", - "syn 2.0.114", + "syn 2.0.117", ] [[package]] @@ -1577,7 +1613,7 @@ source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "ab63b0e2bf4d5928aff72e83a7dace85d7bba5fe12dcc3c5a572d78caffd3f3c" dependencies = [ "derive_builder_core", - "syn 2.0.114", + "syn 2.0.117", ] [[package]] @@ -1599,7 +1635,7 @@ dependencies = [ "proc-macro2", "quote", "rustc_version", - "syn 2.0.114", + "syn 2.0.117", "unicode-xid", ] @@ -1611,7 +1647,7 @@ checksum = "ccfae181bab5ab6c5478b2ccb69e4c68a02f8c3ec72f6616bfec9dbc599d2ee0" dependencies = [ "proc-macro2", "quote", - "syn 2.0.114", + "syn 2.0.117", ] [[package]] @@ -1627,7 +1663,7 @@ source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "9ed9a281f7bc9b7576e61468ba615a66a5c8cfdff42420a70aa82701a3b1e292" dependencies = [ "block-buffer", - "crypto-common", + "crypto-common 0.1.7", ] [[package]] @@ -1638,7 +1674,7 @@ checksum = "97369cbbc041bc366949bc74d34658d6cda5621039731c6310521892a3a20ae0" dependencies = [ "proc-macro2", "quote", - "syn 2.0.114", + "syn 2.0.117", ] [[package]] @@ -1686,15 +1722,25 @@ dependencies = [ ] [[package]] -name = "env_logger" -version = "0.8.4" +name = "env_filter" +version = "1.0.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "a19187fea3ac7e84da7dacf48de0c45d63c6a76f9490dae389aead16c243fce3" +checksum = "7a1c3cc8e57274ec99de65301228b537f1e4eedc1b8e0f9411c6caac8ae7308f" dependencies = [ "log", "regex", ] +[[package]] +name = "env_logger" +version = "0.11.9" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "b2daee4ea451f429a58296525ddf28b45a3b64f1acf6587e2067437bb11e218d" +dependencies = [ + "env_filter", + "log", +] + [[package]] name = "equivalent" version = "1.0.2" @@ -1806,15 +1852,15 @@ dependencies = [ [[package]] name = "find-msvc-tools" -version = "0.1.8" +version = "0.1.9" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "8591b0bcc8a98a64310a2fae1bb3e9b8564dd10e381e6e28010fde8e8e8568db" +checksum = "5baebc0774151f905a1a2cc41989300b1e6fbb29aff0ceffa1064fdd3088d582" [[package]] name = "flate2" -version = "1.1.8" +version = "1.1.9" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "b375d6465b98090a5f25b1c7703f3859783755aa9a80433b36e0379a3ec2f369" +checksum = "843fba2746e448b37e26a819579957415c8cef339bf08564fe8b7ddbd959573c" dependencies = [ "crc32fast", "libz-sys", @@ -1918,7 +1964,7 @@ checksum = "a0b4095fc99e1d858e5b8c7125d2638372ec85aa0fe6c807105cf10b0265ca6c" dependencies = [ "frunk_proc_macro_helpers", "quote", - "syn 2.0.114", + "syn 2.0.117", ] [[package]] @@ -1930,7 +1976,7 @@ dependencies = [ "frunk_core", "proc-macro2", "quote", - "syn 2.0.114", + "syn 2.0.117", ] [[package]] @@ -1942,14 +1988,14 @@ dependencies = [ "frunk_core", "frunk_proc_macro_helpers", "quote", - "syn 2.0.114", + "syn 2.0.117", ] [[package]] name = "fs-err" -version = "3.2.2" +version = "3.3.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "baf68cef89750956493a66a10f512b9e58d9db21f2a573c079c0bdf1207a54a7" +checksum = "73fde052dbfc920003cfd2c8e2c6e6d4cc7c1091538c3a24226cec0665ab08c0" dependencies = [ "autocfg", "tokio", @@ -1969,9 +2015,9 @@ checksum = "e6d5a32815ae3f33302d95fdcb2ce17862f8c65363dcfd29360480ba1001fc9c" [[package]] name = "futures" -version = "0.3.31" +version = "0.3.32" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "65bc07b1a8bc7c85c5f2e110c476c7389b4554ba72af57d8445ea63a576b0876" +checksum = "8b147ee9d1f6d097cef9ce628cd2ee62288d963e16fb287bd9286455b241382d" dependencies = [ "futures-channel", "futures-core", @@ -1984,9 +2030,9 @@ dependencies = [ [[package]] name = "futures-channel" -version = "0.3.31" +version = "0.3.32" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "2dff15bf788c671c1934e366d07e30c1814a8ef514e1af724a602e8a2fbe1b10" +checksum = "07bbe89c50d7a535e539b8c17bc0b49bdb77747034daa8087407d655f3f7cc1d" dependencies = [ "futures-core", "futures-sink", @@ -1994,15 +2040,15 @@ dependencies = [ [[package]] name = "futures-core" -version = "0.3.31" +version = "0.3.32" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "05f29059c0c2090612e8d742178b0580d2dc940c837851ad723096f87af6663e" +checksum = "7e3450815272ef58cec6d564423f6e755e25379b217b0bc688e295ba24df6b1d" [[package]] name = "futures-executor" -version = "0.3.31" +version = "0.3.32" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "1e28d1d997f585e54aebc3f97d39e72338912123a67330d723fdbb564d646c9f" +checksum = "baf29c38818342a3b26b5b923639e7b1f4a61fc5e76102d4b1981c6dc7a7579d" dependencies = [ "futures-core", "futures-task", @@ -2011,9 +2057,9 @@ dependencies = [ [[package]] name = "futures-io" -version = "0.3.31" +version = "0.3.32" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "9e5c1b78ca4aae1ac06c48a526a655760685149f0d465d21f37abfe57ce075c6" +checksum = "cecba35d7ad927e23624b22ad55235f2239cfa44fd10428eecbeba6d6a717718" [[package]] name = "futures-lite" @@ -2030,26 +2076,26 @@ dependencies = [ [[package]] name = "futures-macro" -version = "0.3.31" +version = "0.3.32" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "162ee34ebcb7c64a8abebc059ce0fee27c2262618d7b60ed8faf72fef13c3650" +checksum = "e835b70203e41293343137df5c0664546da5745f82ec9b84d40be8336958447b" dependencies = [ "proc-macro2", "quote", - "syn 2.0.114", + "syn 2.0.117", ] [[package]] name = "futures-sink" -version = "0.3.31" +version = "0.3.32" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "e575fab7d1e0dcb8d0c7bcf9a63ee213816ab51902e6d244a95819acacf1d4f7" +checksum = "c39754e157331b013978ec91992bde1ac089843443c49cbc7f46150b0fad0893" [[package]] name = "futures-task" -version = "0.3.31" +version = "0.3.32" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "f90f7dce0722e95104fcb095585910c0977252f286e354b5e3bd38902cd99988" +checksum = "037711b3d59c33004d3856fbdc83b99d4ff37a24768fa1be9ce3538a1cde4393" [[package]] name = "futures-timer" @@ -2059,9 +2105,9 @@ checksum = "f288b0a4f20f9a56b5d1da57e2227c661b7b16168e2f72365f57b63326e29b24" [[package]] name = "futures-util" -version = "0.3.31" +version = "0.3.32" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "9fa08315bb612088cc391249efdc3bc77536f16c91f6cf495e6fbe85b20a4a81" +checksum = "389ca41296e6190b48053de0321d02a77f32f8a5d2461dd38762c0593805c6d6" dependencies = [ "futures-channel", "futures-core", @@ -2071,7 +2117,6 @@ dependencies = [ "futures-task", "memchr", "pin-project-lite", - "pin-utils", "slab", ] @@ -2112,6 +2157,20 @@ dependencies = [ "wasm-bindgen", ] +[[package]] +name = "getrandom" +version = "0.4.1" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "139ef39800118c7683f2fd3c98c1b23c09ae076556b435f8e9064ae108aaeeec" +dependencies = [ + "cfg-if", + "libc", + "r-efi", + "rand_core 0.10.0", + "wasip2", + "wasip3", +] + [[package]] name = "getset" version = "0.1.6" @@ -2121,7 +2180,7 @@ dependencies = [ "proc-macro-error2", "proc-macro2", "quote", - "syn 2.0.114", + "syn 2.0.117", ] [[package]] @@ -2175,7 +2234,7 @@ checksum = "6ea2d84b969582b4b1864a92dc5d27cd2b77b622a8d79306834f1be5ba20d84b" dependencies = [ "cfg-if", "crunchy", - "zerocopy 0.8.34", + "zerocopy 0.8.39", ] [[package]] @@ -2300,6 +2359,15 @@ version = "1.0.3" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "df3b46402a9d5adb4c86a0cf463f42e19994e3ee891101b1841f30a545cb49a9" +[[package]] +name = "hybrid-array" +version = "0.4.7" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "e1b229d73f5803b562cc26e4da0396c8610a4ee209f4fac8fa4f8d709166dc45" +dependencies = [ + "typenum", +] + [[package]] name = "hyper" version = "1.8.1" @@ -2369,14 +2437,13 @@ dependencies = [ [[package]] name = "hyper-util" -version = "0.1.19" +version = "0.1.20" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "727805d60e7938b76b826a6ef209eb70eaa1812794f9424d4a4e2d740662df5f" +checksum = "96547c2556ec9d12fb1578c4eaf448b04993e7fb79cbaad930a656880a6bdfa0" dependencies = [ "base64 0.22.1", "bytes", "futures-channel", - "futures-core", "futures-util", "http", "http-body", @@ -2410,9 +2477,9 @@ dependencies = [ [[package]] name = "iana-time-zone" -version = "0.1.64" +version = "0.1.65" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "33e57f83510bb73707521ebaffa789ec8caf86f9657cad665b092b581d40e9fb" +checksum = "e31bc9ad994ba00e440a8aa5c9ef0ec67d5cb5e5cb0cc7f8b744a35b389cc470" dependencies = [ "android_system_properties", "core-foundation-sys", @@ -2513,6 +2580,12 @@ dependencies = [ "zerovec", ] +[[package]] +name = "id-arena" +version = "2.3.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "3d3067d79b975e8844ca9eb072e16b31c3c1c36928edf9c6789548c524d0d954" + [[package]] name = "ident_case" version = "1.0.1" @@ -2578,6 +2651,15 @@ dependencies = [ "generic-array", ] +[[package]] +name = "inout" +version = "0.2.2" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "4250ce6452e92010fdf7268ccc5d14faa80bb12fc741938534c58f16804e03c7" +dependencies = [ + "hybrid-array", +] + [[package]] name = "io-enum" version = "1.2.0" @@ -2716,11 +2798,17 @@ version = "1.5.0" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "bbd2bcb4c963f2ddae06a2efc7e9f3591312473c50c6685e1f298068316e66fe" +[[package]] +name = "leb128fmt" +version = "0.1.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "09edd9e8b54e49e587e4f6295a7d29c3ea94d469cb40ab8ca70b288248a81db2" + [[package]] name = "libc" -version = "0.2.180" +version = "0.2.182" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "bcc35a38544a891a5f7c865aca548a982ccb3b8650a5b06d0fd33a10283c56fc" +checksum = "6800badb6cb2082ffd7b6a67e6125bb39f18782f793520caee8cb8846be06112" [[package]] name = "libloading" @@ -2746,7 +2834,7 @@ checksum = "3d0b95e02c851351f877147b7deea7b1afb1df71b63aa5f8270716e0c5720616" dependencies = [ "bitflags", "libc", - "redox_syscall 0.7.0", + "redox_syscall 0.7.1", ] [[package]] @@ -2785,9 +2873,9 @@ checksum = "6373607a59f0be73a39b6fe456b8192fcc3585f602af20751600e974dd455e77" [[package]] name = "local-ip-address" -version = "0.6.9" +version = "0.6.10" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "92488bc8a0f99ee9f23577bdd06526d49657df8bd70504c61f812337cdad01ab" +checksum = "79ef8c257c92ade496781a32a581d43e3d512cf8ce714ecf04ea80f93ed0ff4a" dependencies = [ "libc", "neli", @@ -2835,9 +2923,9 @@ checksum = "47e1ffaa40ddd1f3ed91f717a33c8c0ee23fff369e3aa8772b9605cc1d22f4c3" [[package]] name = "memchr" -version = "2.7.6" +version = "2.8.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "f52b00d39961fc5b2736ea853c9cc86238e165017a493d1d5c8eac6bdc4cc273" +checksum = "f8ca58f447f06ed17d5fc4043ce1b10dd205e060fb3ce5b979b8ed8e59ff3f79" [[package]] name = "miette" @@ -2866,7 +2954,7 @@ checksum = "db5b29714e950dbb20d5e6f74f9dcec4edbcc1067bb7f8ed198c097b8c1a818b" dependencies = [ "proc-macro2", "quote", - "syn 2.0.114", + "syn 2.0.117", ] [[package]] @@ -2925,7 +3013,7 @@ dependencies = [ "cfg-if", "proc-macro2", "quote", - "syn 2.0.114", + "syn 2.0.117", ] [[package]] @@ -2975,7 +3063,7 @@ dependencies = [ "proc-macro-error2", "proc-macro2", "quote", - "syn 2.0.114", + "syn 2.0.117", "termcolor", "thiserror 1.0.69", ] @@ -3030,26 +3118,26 @@ dependencies = [ [[package]] name = "native-tls" -version = "0.2.14" +version = "0.2.18" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "87de3442987e9dbec73158d5c715e7ad9072fda936bb03d19d7fa10e00520f0e" +checksum = "465500e14ea162429d264d44189adc38b199b62b1c21eea9f69e4b73cb03bbf2" dependencies = [ "libc", "log", "openssl", - "openssl-probe 0.1.6", + "openssl-probe", "openssl-sys", "schannel", - "security-framework 2.11.1", + "security-framework", "security-framework-sys", "tempfile", ] [[package]] name = "neli" -version = "0.7.3" +version = "0.7.4" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "e23bebbf3e157c402c4d5ee113233e5e0610cc27453b2f07eefce649c7365dcc" +checksum = "22f9786d56d972959e1408b6a93be6af13b9c1392036c5c1fafa08a1b0c6ee87" dependencies = [ "bitflags", "byteorder", @@ -3071,7 +3159,7 @@ dependencies = [ "proc-macro2", "quote", "serde", - "syn 2.0.114", + "syn 2.0.117", ] [[package]] @@ -3228,15 +3316,9 @@ checksum = "a948666b637a0f465e8564c73e89d4dde00d72d4d473cc972f390fc3dcee7d9c" dependencies = [ "proc-macro2", "quote", - "syn 2.0.114", + "syn 2.0.117", ] -[[package]] -name = "openssl-probe" -version = "0.1.6" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "d05e27ee213611ffe7d6348b942e8f942b37114c00cc03cec254295a4a17852e" - [[package]] name = "openssl-probe" version = "0.2.1" @@ -3322,7 +3404,7 @@ dependencies = [ "regex", "regex-syntax", "structmeta", - "syn 2.0.114", + "syn 2.0.117", ] [[package]] @@ -3345,7 +3427,7 @@ dependencies = [ "proc-macro2", "proc-macro2-diagnostics", "quote", - "syn 2.0.114", + "syn 2.0.117", ] [[package]] @@ -3419,7 +3501,7 @@ checksum = "6e918e4ff8c4549eb882f14b3a4bc8c8bc93de829416eacf579f1207a8fbf861" dependencies = [ "proc-macro2", "quote", - "syn 2.0.114", + "syn 2.0.117", ] [[package]] @@ -3495,15 +3577,15 @@ dependencies = [ [[package]] name = "portable-atomic" -version = "1.13.0" +version = "1.13.1" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "f89776e4d69bb58bc6993e99ffa1d11f228b839984854c7daeb5d37f87cbe950" +checksum = "c33a9471896f1c69cecef8d20cbe2f7accd12527ce60845ff44c153bb2a21b49" [[package]] name = "portable-atomic-util" -version = "0.2.4" +version = "0.2.5" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "d8a2f0d8d040d7848a709caf78912debcc3f33ee4b3cac47d73d1e1069e83507" +checksum = "7a9db96d7fa8782dd8c15ce32ffe8680bbd1e978a43bf51a34d39483540495f5" dependencies = [ "portable-atomic", ] @@ -3529,14 +3611,14 @@ version = "0.2.21" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "85eae3c4ed2f50dcfe72643da4befc30deadb458a9b590d720cde2f2b1e97da9" dependencies = [ - "zerocopy 0.8.34", + "zerocopy 0.8.39", ] [[package]] name = "predicates" -version = "3.1.3" +version = "3.1.4" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "a5d19ee57562043d37e82899fade9a22ebab7be9cef5026b07fda9cdd4293573" +checksum = "ada8f2932f28a27ee7b70dd6c1c39ea0675c55a36879ab92f3a715eaa1e63cfe" dependencies = [ "anstyle", "predicates-core", @@ -3544,15 +3626,15 @@ dependencies = [ [[package]] name = "predicates-core" -version = "1.0.9" +version = "1.0.10" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "727e462b119fe9c93fd0eb1429a5f7647394014cf3c04ab2c0350eeb09095ffa" +checksum = "cad38746f3166b4031b1a0d39ad9f954dd291e7854fcc0eed52ee41a0b50d144" [[package]] name = "predicates-tree" -version = "1.0.12" +version = "1.0.13" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "72dd2d6d381dfb73a193c7fca536518d7caee39fc8503f74e7dc0be0531b425c" +checksum = "d0de1b847b39c8131db0467e9df1ff60e6d0562ab8e9a16e568ad0fdb372e2f2" dependencies = [ "predicates-core", "termtree", @@ -3568,6 +3650,16 @@ dependencies = [ "yansi", ] +[[package]] +name = "prettyplease" +version = "0.2.37" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "479ca8adacdd7ce8f1fb39ce9ecccbfe93a3f1344b3d0d97f20bc0196208f62b" +dependencies = [ + "proc-macro2", + "syn 2.0.117", +] + [[package]] name = "proc-macro-crate" version = "3.4.0" @@ -3596,7 +3688,7 @@ dependencies = [ "proc-macro-error-attr2", "proc-macro2", "quote", - "syn 2.0.114", + "syn 2.0.117", ] [[package]] @@ -3616,7 +3708,7 @@ checksum = "af066a9c399a26e020ada66a034357a868728e72cd426f3adcd35f80d88d88c8" dependencies = [ "proc-macro2", "quote", - "syn 2.0.114", + "syn 2.0.117", "version_check", "yansi", ] @@ -3641,7 +3733,7 @@ dependencies = [ "itertools 0.14.0", "proc-macro2", "quote", - "syn 2.0.114", + "syn 2.0.117", ] [[package]] @@ -3675,13 +3767,13 @@ dependencies = [ [[package]] name = "quickcheck" -version = "1.0.3" +version = "1.1.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "588f6378e4dd99458b60ec275b4477add41ce4fa9f64dcba6f15adccb19b50d6" +checksum = "95c589f335db0f6aaa168a7cd27b1fc6920f5e1470c804f814d9cd6e62a0f70b" dependencies = [ "env_logger", "log", - "rand 0.8.5", + "rand 0.10.0", ] [[package]] @@ -3814,6 +3906,17 @@ dependencies = [ "rand_core 0.9.5", ] +[[package]] +name = "rand" +version = "0.10.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "bc266eb313df6c5c09c1c7b1fbe2510961e5bcd3add930c1e31f7ed9da0feff8" +dependencies = [ + "chacha20", + "getrandom 0.4.1", + "rand_core 0.10.0", +] + [[package]] name = "rand_chacha" version = "0.3.1" @@ -3852,6 +3955,12 @@ dependencies = [ "getrandom 0.3.4", ] +[[package]] +name = "rand_core" +version = "0.10.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "0c8d0fd677905edcbeedbf2edb6494d676f0e98d54d5cf9bda0b061cb8fb8aba" + [[package]] name = "rayon" version = "1.11.0" @@ -3883,9 +3992,9 @@ dependencies = [ [[package]] name = "redox_syscall" -version = "0.7.0" +version = "0.7.1" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "49f3fe0889e69e2ae9e41f4d6c4c0181701d00e4697b356fb1f74173a5e0ee27" +checksum = "35985aa610addc02e24fc232012c86fd11f14111180f902b67e2d5331f8ebf2b" dependencies = [ "bitflags", ] @@ -3907,14 +4016,14 @@ checksum = "b7186006dcb21920990093f30e3dea63b7d6e977bf1256be20c3563a5db070da" dependencies = [ "proc-macro2", "quote", - "syn 2.0.114", + "syn 2.0.117", ] [[package]] name = "regex" -version = "1.12.2" +version = "1.12.3" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "843bc0191f75f3e22651ae5f1e72939ab2f72a4bc30fa80a066bd66edefc24d4" +checksum = "e10754a14b9137dd7b1e3e5b0493cc9171fdd105e0ab477f51b72e7f3ac0e276" dependencies = [ "aho-corasick", "memchr", @@ -3924,9 +4033,9 @@ dependencies = [ [[package]] name = "regex-automata" -version = "0.4.13" +version = "0.4.14" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "5276caf25ac86c8d810222b3dbb938e512c55c6831a10f3e6ed1c93b84041f1c" +checksum = "6e1dd4122fc1595e8162618945476892eefca7b88c52820e74af6262213cae8f" dependencies = [ "aho-corasick", "memchr", @@ -3935,9 +4044,9 @@ dependencies = [ [[package]] name = "regex-syntax" -version = "0.8.8" +version = "0.8.9" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "7a2d987857b319362043e95f5353c0535c1f58eec5336fdfcf626430af7def58" +checksum = "a96887878f22d7bad8a3b6dc5b7440e0ada9a245242924394987b21cf2210a4c" [[package]] name = "relative-path" @@ -3956,9 +4065,9 @@ dependencies = [ [[package]] name = "reqwest" -version = "0.13.1" +version = "0.13.2" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "04e9018c9d814e5f30cc16a0f03271aeab3571e609612d9fe78c1aa8d11c2f62" +checksum = "ab3f43e3283ab1488b624b44b0e988d0acea0b3214e694730a055cb6b2efa801" dependencies = [ "base64 0.22.1", "bytes", @@ -4096,7 +4205,7 @@ dependencies = [ "regex", "relative-path", "rustc_version", - "syn 2.0.114", + "syn 2.0.117", "unicode-ident", ] @@ -4114,7 +4223,7 @@ dependencies = [ "regex", "relative-path", "rustc_version", - "syn 2.0.114", + "syn 2.0.117", "unicode-ident", ] @@ -4205,19 +4314,10 @@ version = "0.8.3" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "612460d5f7bea540c490b2b6395d8e34a953e52b491accd6c86c8164c5932a63" dependencies = [ - "openssl-probe 0.2.1", + "openssl-probe", "rustls-pki-types", "schannel", - "security-framework 3.5.1", -] - -[[package]] -name = "rustls-pemfile" -version = "2.2.0" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "dce314e5fee3f39953d46bb63bb8a46d40c2f8fb7cc5a3b6cab2bde9721d6e50" -dependencies = [ - "rustls-pki-types", + "security-framework", ] [[package]] @@ -4245,7 +4345,7 @@ dependencies = [ "rustls-native-certs", "rustls-platform-verifier-android", "rustls-webpki", - "security-framework 3.5.1", + "security-framework", "security-framework-sys", "webpki-root-certs", "windows-sys 0.61.2", @@ -4277,9 +4377,9 @@ checksum = "b39cdef0fa800fc44525c84ccb54a029961a8215f9619753635a9c0d2538d46d" [[package]] name = "ryu" -version = "1.0.22" +version = "1.0.23" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "a50f4cf475b65d88e057964e0e9bb1f0aa9bbb2036dc65c64596b42932536984" +checksum = "9774ba4a74de5f7b1c1451ed6cd5285a32eddb5cccb8cc655a4e50009e06477f" [[package]] name = "same-file" @@ -4328,9 +4428,9 @@ dependencies = [ [[package]] name = "schemars" -version = "1.2.0" +version = "1.2.1" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "54e910108742c57a770f492731f99be216a52fadd361b06c8fb59d74ccc267d2" +checksum = "a2b42f36aa1cd011945615b92222f6bf73c599a102a300334cd7f8dbeec726cc" dependencies = [ "dyn-clone", "ref-cast", @@ -4352,22 +4452,9 @@ checksum = "1c107b6f4780854c8b126e228ea8869f4d7b71260f962fefb57b996b8959ba6b" [[package]] name = "security-framework" -version = "2.11.1" +version = "3.7.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "897b2245f0b511c87893af39b033e5ca9cce68824c4d7e7630b5a1d339658d02" -dependencies = [ - "bitflags", - "core-foundation 0.9.4", - "core-foundation-sys", - "libc", - "security-framework-sys", -] - -[[package]] -name = "security-framework" -version = "3.5.1" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "b3297343eaf830f66ede390ea39da1d462b6b0c1b000f420d0a83f898bbbe6ef" +checksum = "b7f4bc775c73d9a02cde8bf7b2ec4c9d12743edf609006c7facc23998404cd1d" dependencies = [ "bitflags", "core-foundation 0.10.1", @@ -4378,9 +4465,9 @@ dependencies = [ [[package]] name = "security-framework-sys" -version = "2.15.0" +version = "2.17.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "cc1f0cbffaac4852523ce30d8bd3c5cdc873501d96ff467ca09b6767bb8cd5c0" +checksum = "6ce2691df843ecc5d231c0b14ece2acc3efb62c0a398c7e1d875f3983ce020e3" dependencies = [ "core-foundation-sys", "libc", @@ -4439,7 +4526,7 @@ checksum = "d540f220d3187173da220f885ab66608367b6574e925011a9353e4badda91d79" dependencies = [ "proc-macro2", "quote", - "syn 2.0.114", + "syn 2.0.117", ] [[package]] @@ -4488,7 +4575,7 @@ checksum = "175ee3e80ae9982737ca543e96133087cbd9a485eecc3bc4de9c1a37b47ea59c" dependencies = [ "proc-macro2", "quote", - "syn 2.0.114", + "syn 2.0.117", ] [[package]] @@ -4533,7 +4620,7 @@ dependencies = [ "indexmap 1.9.3", "indexmap 2.13.0", "schemars 0.9.0", - "schemars 1.2.0", + "schemars 1.2.1", "serde_core", "serde_json", "serde_with_macros", @@ -4549,7 +4636,7 @@ dependencies = [ "darling 0.21.3", "proc-macro2", "quote", - "syn 2.0.114", + "syn 2.0.117", ] [[package]] @@ -4559,7 +4646,7 @@ source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "e3bf829a2d51ab4a5ddf1352d8470c140cadc8301b2ae1789db023f01cedd6ba" dependencies = [ "cfg-if", - "cpufeatures", + "cpufeatures 0.2.17", "digest", ] @@ -4570,7 +4657,7 @@ source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "a7507d819769d01a365ab707794a4084392c824f54a7a6a7862f8c3d0892b283" dependencies = [ "cfg-if", - "cpufeatures", + "cpufeatures 0.2.17", "digest", ] @@ -4613,15 +4700,15 @@ checksum = "e3a9fe34e3e7a50316060351f37187a3f546bce95496156754b601a5fa71b76e" [[package]] name = "siphasher" -version = "1.0.1" +version = "1.0.2" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "56199f7ddabf13fe5074ce809e7d3f42b42ae711800501b5b16ea82ad029c39d" +checksum = "b2aa850e253778c88a04c3d7323b043aeda9d3e30d5971937c1855769763678e" [[package]] name = "slab" -version = "0.4.11" +version = "0.4.12" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "7a2ae44ef20feb57a68b23d846850f861394c2e02dc425a50098ae8c90267589" +checksum = "0c790de23124f9ab44544d7ac05d60440adc586479ce501c1d6d7da3cd8c9cf5" [[package]] name = "smallvec" @@ -4688,7 +4775,7 @@ dependencies = [ "proc-macro2", "quote", "structmeta-derive", - "syn 2.0.114", + "syn 2.0.117", ] [[package]] @@ -4699,14 +4786,14 @@ checksum = "152a0b65a590ff6c3da95cabe2353ee04e6167c896b28e3b14478c2636c922fc" dependencies = [ "proc-macro2", "quote", - "syn 2.0.114", + "syn 2.0.117", ] [[package]] name = "subprocess" -version = "0.2.13" +version = "0.2.15" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "f75238edb5be30a9ea3035b945eb9c319dde80e879411cdc9a8978e1ac822960" +checksum = "2c56e8662b206b9892d7a5a3f2ecdbcb455d3d6b259111373b7e08b8055158a8" dependencies = [ "libc", "winapi", @@ -4752,9 +4839,9 @@ dependencies = [ [[package]] name = "syn" -version = "2.0.114" +version = "2.0.117" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "d4d107df263a3013ef9b1879b0df87d706ff80f65a86ea879bd9c31f9b307c2a" +checksum = "e665b8803e7b1d2a727f4023456bbbbe74da67099c585258af0ad9c5013b9b99" dependencies = [ "proc-macro2", "quote", @@ -4778,14 +4865,14 @@ checksum = "728a70f3dbaf5bab7f0c4b1ac8d7ae5ea60a4b5549c8a5914361c99147a709d2" dependencies = [ "proc-macro2", "quote", - "syn 2.0.114", + "syn 2.0.117", ] [[package]] name = "system-configuration" -version = "0.6.1" +version = "0.7.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "3c879d448e9d986b661742763247d3693ed13609438cf3d006f51f5368a5ba6b" +checksum = "a13f3d0daba03132c0aa9767f98351b3488edc2c100cda2d2ec2b04f3d8d3c8b" dependencies = [ "bitflags", "core-foundation 0.9.4", @@ -4827,12 +4914,12 @@ dependencies = [ [[package]] name = "tempfile" -version = "3.24.0" +version = "3.25.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "655da9c7eb6305c55742045d5a8d2037996d61d8de95806335c7c86ce0f82e9c" +checksum = "0136791f7c95b1f6dd99f9cc786b91bb81c3800b639b3478e561ddb7be95e5f1" dependencies = [ "fastrand", - "getrandom 0.3.4", + "getrandom 0.4.1", "once_cell", "rustix", "windows-sys 0.61.2", @@ -4865,9 +4952,9 @@ checksum = "8f50febec83f5ee1df3015341d8bd429f2d1cc62bcba7ea2076759d315084683" [[package]] name = "testcontainers" -version = "0.26.3" +version = "0.27.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "a81ec0158db5fbb9831e09d1813fe5ea9023a2b5e6e8e0a5fe67e2a820733629" +checksum = "c3fdcea723c64cc08dbc533b3761e345a15bf1222cbe6cb611de09b43f17a168" dependencies = [ "astral-tokio-tar", "async-trait", @@ -4878,6 +4965,7 @@ dependencies = [ "etcetera", "ferroid", "futures", + "http", "itertools 0.14.0", "log", "memchr", @@ -4929,7 +5017,7 @@ checksum = "4fee6c4efc90059e10f81e6d42c60a18f76588c3d74cb83a0b242a2b6c7504c1" dependencies = [ "proc-macro2", "quote", - "syn 2.0.114", + "syn 2.0.117", ] [[package]] @@ -4940,7 +5028,7 @@ checksum = "ebc4ee7f67670e9b64d05fa4253e753e016c6c95ff35b89b7941d6b856dec1d5" dependencies = [ "proc-macro2", "quote", - "syn 2.0.114", + "syn 2.0.117", ] [[package]] @@ -4954,9 +5042,9 @@ dependencies = [ [[package]] name = "time" -version = "0.3.46" +version = "0.3.47" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "9da98b7d9b7dad93488a84b8248efc35352b0b2657397d4167e7ad67e5d535e5" +checksum = "743bd48c283afc0388f9b8827b976905fb217ad9e647fae3a379a9283c4def2c" dependencies = [ "deranged", "itoa", @@ -4975,9 +5063,9 @@ checksum = "7694e1cfe791f8d31026952abf09c69ca6f6fa4e1a1229e18988f06a04a12dca" [[package]] name = "time-macros" -version = "0.2.26" +version = "0.2.27" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "78cc610bac2dcee56805c99642447d4c5dbde4d01f752ffea0199aee1f601dc4" +checksum = "2e70e4c5a0e0a8a4823ad65dfe1a6930e4f4d756dcd9dd7939022b5e8c501215" dependencies = [ "num-conv", "time-core", @@ -5042,7 +5130,7 @@ checksum = "af407857209536a95c8e56f8231ef2c2e2aff839b22e07a1ffcbc617e9db9fa5" dependencies = [ "proc-macro2", "quote", - "syn 2.0.114", + "syn 2.0.117", ] [[package]] @@ -5093,9 +5181,9 @@ dependencies = [ [[package]] name = "toml" -version = "0.9.11+spec-1.1.0" +version = "0.9.12+spec-1.1.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "f3afc9a848309fe1aaffaed6e1546a7a14de1f935dc9d89d32afd9a44bab7c46" +checksum = "cf92845e79fc2e2def6a5d828f0801e29a2f8acc037becc5ab08595c7d5e9863" dependencies = [ "indexmap 2.13.0", "serde_core", @@ -5152,9 +5240,9 @@ dependencies = [ [[package]] name = "toml_parser" -version = "1.0.6+spec-1.1.0" +version = "1.0.9+spec-1.1.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "a3198b4b0a8e11f09dd03e133c0280504d0801269e9afa46362ffde1cbeebf44" +checksum = "702d4415e08923e7e1ef96cd5727c0dfed80b4d2fa25db9647fe5eb6f7c5a4c4" dependencies = [ "winnow", ] @@ -5173,9 +5261,9 @@ checksum = "ab16f14aed21ee8bfd8ec22513f7287cd4a91aa92e44edfe2c17ddd004e92607" [[package]] name = "tonic" -version = "0.14.2" +version = "0.14.5" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "eb7613188ce9f7df5bfe185db26c5814347d110db17920415cf2fbcad85e7203" +checksum = "fec7c61a0695dc1887c1b53952990f3ad2e3a31453e1f49f10e75424943a93ec" dependencies = [ "async-trait", "axum", @@ -5202,9 +5290,9 @@ dependencies = [ [[package]] name = "tonic-prost" -version = "0.14.2" +version = "0.14.5" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "66bd50ad6ce1252d87ef024b3d64fe4c3cf54a86fb9ef4c631fdd0ded7aeaa67" +checksum = "a55376a0bbaa4975a3f10d009ad763d8f4108f067c7c2e74f3001fb49778d309" dependencies = [ "bytes", "prost", @@ -5256,7 +5344,7 @@ dependencies = [ "hyper", "local-ip-address", "percent-encoding", - "rand 0.9.2", + "rand 0.10.0", "reqwest", "serde", "serde_bencode", @@ -5397,7 +5485,7 @@ dependencies = [ "clap", "local-ip-address", "mockall", - "rand 0.9.2", + "rand 0.10.0", "regex", "reqwest", "serde", @@ -5467,7 +5555,7 @@ dependencies = [ "serde_json", "serde_with", "thiserror 2.0.18", - "toml 0.9.11+spec-1.1.0", + "toml 0.9.12+spec-1.1.0", "torrust-tracker-located-error", "tracing", "tracing-subscriber", @@ -5479,7 +5567,7 @@ dependencies = [ name = "torrust-tracker-contrib-bencode" version = "3.0.0-develop" dependencies = [ - "criterion 0.8.1", + "criterion 0.8.2", "thiserror 2.0.18", ] @@ -5543,11 +5631,11 @@ dependencies = [ "async-std", "bittorrent-primitives", "chrono", - "criterion 0.8.1", + "criterion 0.8.2", "crossbeam-skiplist", "futures", "mockall", - "rand 0.9.2", + "rand 0.10.0", "rstest 0.26.1", "serde", "thiserror 2.0.18", @@ -5566,7 +5654,7 @@ dependencies = [ name = "torrust-tracker-test-helpers" version = "3.0.0-develop" dependencies = [ - "rand 0.9.2", + "rand 0.10.0", "torrust-tracker-configuration", "tracing", "tracing-subscriber", @@ -5579,7 +5667,7 @@ dependencies = [ "aquatic_udp_protocol", "async-std", "bittorrent-primitives", - "criterion 0.8.1", + "criterion 0.8.2", "crossbeam-skiplist", "dashmap", "futures", @@ -5606,7 +5694,7 @@ dependencies = [ "futures-util", "local-ip-address", "mockall", - "rand 0.9.2", + "rand 0.10.0", "ringbuf", "serde", "thiserror 2.0.18", @@ -5701,7 +5789,7 @@ checksum = "7490cfa5ec963746568740651ac6781f701c9c5ea257c58e057f3ba8cf69e8da" dependencies = [ "proc-macro2", "quote", - "syn 2.0.114", + "syn 2.0.117", ] [[package]] @@ -5786,9 +5874,9 @@ dependencies = [ [[package]] name = "unicode-ident" -version = "1.0.22" +version = "1.0.24" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "9312f7c4f6ff9069b165498234ce8be658059c6728633667c526e27dc2cf1df5" +checksum = "e6e4313cd5fcd3dad5cafa179702e2b244f760991f45397d14d4ebf38247da75" [[package]] name = "unicode-linebreak" @@ -5828,9 +5916,9 @@ checksum = "8ecb6da28b8a351d773b68d5825ac39017e680750f980f3a1a85cd8dd28a47c1" [[package]] name = "ureq" -version = "3.1.4" +version = "3.2.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "d39cb1dbab692d82a977c0392ffac19e188bd9186a9f32806f0aaa859d75585a" +checksum = "fdc97a28575b85cfedf2a7e7d3cc64b3e11bd8ac766666318003abbacc7a21fc" dependencies = [ "base64 0.22.1", "log", @@ -5839,7 +5927,6 @@ dependencies = [ "rustls-pki-types", "ureq-proto", "utf-8", - "webpki-roots", ] [[package]] @@ -5887,11 +5974,11 @@ checksum = "06abde3611657adf66d383f00b093d7faecc7fa57071cce2578660c9f1010821" [[package]] name = "uuid" -version = "1.20.0" +version = "1.21.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "ee48d38b119b0cd71fe4141b30f5ba9c7c5d9f4e7a3a8b4a674e4b6ef789976f" +checksum = "b672338555252d43fd2240c714dc444b8c6fb0a5c5335e65a07bba7742735ddb" dependencies = [ - "getrandom 0.3.4", + "getrandom 0.4.1", "js-sys", "rand 0.9.2", "wasm-bindgen", @@ -5955,6 +6042,15 @@ dependencies = [ "wit-bindgen", ] +[[package]] +name = "wasip3" +version = "0.4.0+wasi-0.3.0-rc-2026-01-06" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "5428f8bf88ea5ddc08faddef2ac4a67e390b88186c703ce6dbd955e1c145aca5" +dependencies = [ + "wit-bindgen", +] + [[package]] name = "wasm-bindgen" version = "0.2.108" @@ -6001,7 +6097,7 @@ dependencies = [ "bumpalo", "proc-macro2", "quote", - "syn 2.0.114", + "syn 2.0.117", "wasm-bindgen-shared", ] @@ -6014,6 +6110,40 @@ dependencies = [ "unicode-ident", ] +[[package]] +name = "wasm-encoder" +version = "0.244.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "990065f2fe63003fe337b932cfb5e3b80e0b4d0f5ff650e6985b1048f62c8319" +dependencies = [ + "leb128fmt", + "wasmparser", +] + +[[package]] +name = "wasm-metadata" +version = "0.244.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "bb0e353e6a2fbdc176932bbaab493762eb1255a7900fe0fea1a2f96c296cc909" +dependencies = [ + "anyhow", + "indexmap 2.13.0", + "wasm-encoder", + "wasmparser", +] + +[[package]] +name = "wasmparser" +version = "0.244.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "47b807c72e1bac69382b3a6fb3dbe8ea4c0ed87ff5629b8685ae6b9a611028fe" +dependencies = [ + "bitflags", + "hashbrown 0.15.5", + "indexmap 2.13.0", + "semver", +] + [[package]] name = "web-sys" version = "0.3.85" @@ -6036,18 +6166,9 @@ dependencies = [ [[package]] name = "webpki-root-certs" -version = "1.0.5" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "36a29fc0408b113f68cf32637857ab740edfafdf460c326cd2afaa2d84cc05dc" -dependencies = [ - "rustls-pki-types", -] - -[[package]] -name = "webpki-roots" -version = "1.0.5" +version = "1.0.6" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "12bed680863276c63889429bfd6cab3b99943659923822de1c8a39c49e4d722c" +checksum = "804f18a4ac2676ffb4e8b5b5fa9ae38af06df08162314f96a68d2a363e21a8ca" dependencies = [ "rustls-pki-types", ] @@ -6104,7 +6225,7 @@ checksum = "053e2e040ab57b9dc951b72c264860db7eb3b0200ba345b4e4c3b14f67855ddf" dependencies = [ "proc-macro2", "quote", - "syn 2.0.114", + "syn 2.0.117", ] [[package]] @@ -6115,7 +6236,7 @@ checksum = "3f316c4a2570ba26bbec722032c4099d8c8bc095efccdc15688708623367e358" dependencies = [ "proc-macro2", "quote", - "syn 2.0.114", + "syn 2.0.117", ] [[package]] @@ -6389,6 +6510,88 @@ name = "wit-bindgen" version = "0.51.0" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "d7249219f66ced02969388cf2bb044a09756a083d0fab1e566056b04d9fbcaa5" +dependencies = [ + "wit-bindgen-rust-macro", +] + +[[package]] +name = "wit-bindgen-core" +version = "0.51.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "ea61de684c3ea68cb082b7a88508a8b27fcc8b797d738bfc99a82facf1d752dc" +dependencies = [ + "anyhow", + "heck", + "wit-parser", +] + +[[package]] +name = "wit-bindgen-rust" +version = "0.51.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "b7c566e0f4b284dd6561c786d9cb0142da491f46a9fbed79ea69cdad5db17f21" +dependencies = [ + "anyhow", + "heck", + "indexmap 2.13.0", + "prettyplease", + "syn 2.0.117", + "wasm-metadata", + "wit-bindgen-core", + "wit-component", +] + +[[package]] +name = "wit-bindgen-rust-macro" +version = "0.51.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "0c0f9bfd77e6a48eccf51359e3ae77140a7f50b1e2ebfe62422d8afdaffab17a" +dependencies = [ + "anyhow", + "prettyplease", + "proc-macro2", + "quote", + "syn 2.0.117", + "wit-bindgen-core", + "wit-bindgen-rust", +] + +[[package]] +name = "wit-component" +version = "0.244.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "9d66ea20e9553b30172b5e831994e35fbde2d165325bec84fc43dbf6f4eb9cb2" +dependencies = [ + "anyhow", + "bitflags", + "indexmap 2.13.0", + "log", + "serde", + "serde_derive", + "serde_json", + "wasm-encoder", + "wasm-metadata", + "wasmparser", + "wit-parser", +] + +[[package]] +name = "wit-parser" +version = "0.244.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "ecc8ac4bc1dc3381b7f59c34f00b67e18f910c2c0f50015669dde7def656a736" +dependencies = [ + "anyhow", + "id-arena", + "indexmap 2.13.0", + "log", + "semver", + "serde", + "serde_derive", + "serde_json", + "unicode-xid", + "wasmparser", +] [[package]] name = "writeable" @@ -6440,7 +6643,7 @@ checksum = "b659052874eb698efe5b9e8cf382204678a0086ebf46982b79d6ca3182927e5d" dependencies = [ "proc-macro2", "quote", - "syn 2.0.114", + "syn 2.0.117", "synstructure", ] @@ -6456,11 +6659,11 @@ dependencies = [ [[package]] name = "zerocopy" -version = "0.8.34" +version = "0.8.39" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "71ddd76bcebeed25db614f82bf31a9f4222d3fbba300e6fb6c00afa26cbd4d9d" +checksum = "db6d35d663eadb6c932438e763b262fe1a70987f9ae936e60158176d710cae4a" dependencies = [ - "zerocopy-derive 0.8.34", + "zerocopy-derive 0.8.39", ] [[package]] @@ -6471,18 +6674,18 @@ checksum = "fa4f8080344d4671fb4e831a13ad1e68092748387dfc4f55e356242fae12ce3e" dependencies = [ "proc-macro2", "quote", - "syn 2.0.114", + "syn 2.0.117", ] [[package]] name = "zerocopy-derive" -version = "0.8.34" +version = "0.8.39" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "d8187381b52e32220d50b255276aa16a084ec0a9017a0ca2152a1f55c539758d" +checksum = "4122cd3169e94605190e77839c9a40d40ed048d305bfdc146e7df40ab0f3e517" dependencies = [ "proc-macro2", "quote", - "syn 2.0.114", + "syn 2.0.117", ] [[package]] @@ -6502,7 +6705,7 @@ checksum = "d71e5d6e06ab090c67b5e44993ec16b72dcbaabc526db883a360057678b48502" dependencies = [ "proc-macro2", "quote", - "syn 2.0.114", + "syn 2.0.117", "synstructure", ] @@ -6542,14 +6745,14 @@ checksum = "eadce39539ca5cb3985590102671f2567e659fca9666581ad3411d59207951f3" dependencies = [ "proc-macro2", "quote", - "syn 2.0.114", + "syn 2.0.117", ] [[package]] name = "zmij" -version = "1.0.17" +version = "1.0.21" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "02aae0f83f69aafc94776e879363e9771d7ecbffe2c7fbb6c14c5e00dfe88439" +checksum = "b8848ee67ecc8aedbaf3e4122217aff892639231befc6a1b58d29fff4c2cabaa" [[package]] name = "zstd" From f737ace07747c99ef6d393d7cacdff4d7083532b Mon Sep 17 00:00:00 2001 From: Jose Celano Date: Fri, 20 Feb 2026 11:58:56 +0000 Subject: [PATCH 784/802] fix: resolve compilation errors after dependency updates BREAKING CHANGE: cipher crate pinned to v0.4 for compatibility with blowfish - Replace Rng import with RngExt for sample_iter method in rand 0.10 - Pin cipher crate to v0.4 to match blowfish dependency constraints - Add explicit generic-array dependency to udp-tracker-core - Import GenericArray directly from generic_array crate - Update Keeper trait in crypto/keys.rs to use BlockEncrypt + BlockDecrypt bounds - Add BlockEncrypt and BlockDecrypt trait imports to connection_cookie.rs - Fix imports in: - packages/tracker-core/src/authentication/key/peer_key.rs - packages/udp-tracker-core/src/crypto/ephemeral_instance_keys.rs - packages/udp-tracker-core/src/crypto/keys.rs - packages/test-helpers/src/random.rs - src/console/ci/e2e/tracker_container.rs --- Cargo.lock | 48 +++---------------- packages/test-helpers/src/random.rs | 2 +- .../src/authentication/key/peer_key.rs | 2 +- packages/tracker-core/src/test_helpers.rs | 2 +- packages/udp-tracker-core/Cargo.toml | 3 +- .../src/crypto/ephemeral_instance_keys.rs | 4 +- packages/udp-tracker-core/src/crypto/keys.rs | 4 +- src/console/ci/e2e/tracker_container.rs | 2 +- 8 files changed, 17 insertions(+), 50 deletions(-) diff --git a/Cargo.lock b/Cargo.lock index e801b94cb..c6b151951 100644 --- a/Cargo.lock +++ b/Cargo.lock @@ -751,9 +751,10 @@ dependencies = [ "bittorrent-udp-tracker-protocol", "bloom", "blowfish", - "cipher 0.5.0", + "cipher", "criterion 0.5.1", "futures", + "generic-array", "lazy_static", "mockall", "rand 0.10.0", @@ -831,7 +832,7 @@ source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "e412e2cd0f2b2d93e02543ceae7917b3c70331573df19ee046bcbc35e45e87d7" dependencies = [ "byteorder", - "cipher 0.4.4", + "cipher", ] [[package]] @@ -1132,18 +1133,8 @@ version = "0.4.4" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "773f3b9af64447d2ce9850330c473515014aa235e6a783b02db81ff39e4a3dad" dependencies = [ - "crypto-common 0.1.7", - "inout 0.1.4", -] - -[[package]] -name = "cipher" -version = "0.5.0" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "64727038c8c5e2bb503a15b9f5b9df50a1da9a33e83e1f93067d914f2c6604a5" -dependencies = [ - "crypto-common 0.2.0", - "inout 0.2.2", + "crypto-common", + "inout", ] [[package]] @@ -1482,15 +1473,6 @@ dependencies = [ "typenum", ] -[[package]] -name = "crypto-common" -version = "0.2.0" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "211f05e03c7d03754740fd9e585de910a095d6b99f8bcfffdef8319fa02a8331" -dependencies = [ - "hybrid-array", -] - [[package]] name = "darling" version = "0.20.11" @@ -1663,7 +1645,7 @@ source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "9ed9a281f7bc9b7576e61468ba615a66a5c8cfdff42420a70aa82701a3b1e292" dependencies = [ "block-buffer", - "crypto-common 0.1.7", + "crypto-common", ] [[package]] @@ -2359,15 +2341,6 @@ version = "1.0.3" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "df3b46402a9d5adb4c86a0cf463f42e19994e3ee891101b1841f30a545cb49a9" -[[package]] -name = "hybrid-array" -version = "0.4.7" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "e1b229d73f5803b562cc26e4da0396c8610a4ee209f4fac8fa4f8d709166dc45" -dependencies = [ - "typenum", -] - [[package]] name = "hyper" version = "1.8.1" @@ -2651,15 +2624,6 @@ dependencies = [ "generic-array", ] -[[package]] -name = "inout" -version = "0.2.2" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "4250ce6452e92010fdf7268ccc5d14faa80bb12fc741938534c58f16804e03c7" -dependencies = [ - "hybrid-array", -] - [[package]] name = "io-enum" version = "1.2.0" diff --git a/packages/test-helpers/src/random.rs b/packages/test-helpers/src/random.rs index f096d695c..62265dbd7 100644 --- a/packages/test-helpers/src/random.rs +++ b/packages/test-helpers/src/random.rs @@ -1,6 +1,6 @@ //! Random data generators for testing. use rand::distr::Alphanumeric; -use rand::{rng, Rng}; +use rand::{rng, RngExt}; /// Returns a random alphanumeric string of a certain size. /// diff --git a/packages/tracker-core/src/authentication/key/peer_key.rs b/packages/tracker-core/src/authentication/key/peer_key.rs index 41aba950b..ba648ad2f 100644 --- a/packages/tracker-core/src/authentication/key/peer_key.rs +++ b/packages/tracker-core/src/authentication/key/peer_key.rs @@ -13,7 +13,7 @@ use std::time::Duration; use derive_more::Display; use rand::distr::Alphanumeric; -use rand::{rng, Rng}; +use rand::{rng, RngExt}; use serde::{Deserialize, Serialize}; use thiserror::Error; use torrust_tracker_clock::conv::convert_from_timestamp_to_datetime_utc; diff --git a/packages/tracker-core/src/test_helpers.rs b/packages/tracker-core/src/test_helpers.rs index 62649cd22..bf21e6f94 100644 --- a/packages/tracker-core/src/test_helpers.rs +++ b/packages/tracker-core/src/test_helpers.rs @@ -7,7 +7,7 @@ pub(crate) mod tests { use aquatic_udp_protocol::{AnnounceEvent, NumberOfBytes, PeerId}; use bittorrent_primitives::info_hash::InfoHash; - use rand::Rng; + use rand::RngExt; use torrust_tracker_configuration::Configuration; #[cfg(test)] use torrust_tracker_configuration::Core; diff --git a/packages/udp-tracker-core/Cargo.toml b/packages/udp-tracker-core/Cargo.toml index b3007eb80..aa12f898f 100644 --- a/packages/udp-tracker-core/Cargo.toml +++ b/packages/udp-tracker-core/Cargo.toml @@ -20,9 +20,10 @@ bittorrent-tracker-core = { version = "3.0.0-develop", path = "../tracker-core" bittorrent-udp-tracker-protocol = { version = "3.0.0-develop", path = "../udp-protocol" } bloom = "0.3.2" blowfish = "0" -cipher = "0" +cipher = "0.4" criterion = { version = "0.5.1", features = ["async_tokio"] } futures = "0" +generic-array = "0" lazy_static = "1" rand = "0" serde = "1.0.219" diff --git a/packages/udp-tracker-core/src/crypto/ephemeral_instance_keys.rs b/packages/udp-tracker-core/src/crypto/ephemeral_instance_keys.rs index 58ba70562..de40e4b1d 100644 --- a/packages/udp-tracker-core/src/crypto/ephemeral_instance_keys.rs +++ b/packages/udp-tracker-core/src/crypto/ephemeral_instance_keys.rs @@ -4,10 +4,10 @@ //! application starts and are not persisted anywhere. use blowfish::BlowfishLE; -use cipher::generic_array::GenericArray; use cipher::{BlockSizeUser, KeyInit}; +use generic_array::GenericArray; use rand::rngs::ThreadRng; -use rand::Rng; +use rand::RngExt; pub type Seed = [u8; 32]; pub type CipherBlowfish = BlowfishLE; diff --git a/packages/udp-tracker-core/src/crypto/keys.rs b/packages/udp-tracker-core/src/crypto/keys.rs index f9a3e361d..bb813b9dc 100644 --- a/packages/udp-tracker-core/src/crypto/keys.rs +++ b/packages/udp-tracker-core/src/crypto/keys.rs @@ -5,6 +5,8 @@ //! //! It also provides the logic for the cipher for encryption and decryption. +use cipher::{BlockDecrypt, BlockEncrypt}; + use self::detail_cipher::CURRENT_CIPHER; use self::detail_seed::CURRENT_SEED; pub use crate::crypto::ephemeral_instance_keys::CipherArrayBlowfish; @@ -13,7 +15,7 @@ use crate::crypto::ephemeral_instance_keys::{CipherBlowfish, Seed, RANDOM_CIPHER /// This trait is for structures that can keep and provide a seed. pub trait Keeper { type Seed: Sized + Default + AsMut<[u8]>; - type Cipher: cipher::BlockCipher; + type Cipher: BlockEncrypt + BlockDecrypt; /// It returns a reference to the seed that is keeping. fn get_seed() -> &'static Self::Seed; diff --git a/src/console/ci/e2e/tracker_container.rs b/src/console/ci/e2e/tracker_container.rs index a3845c103..1a7717a41 100644 --- a/src/console/ci/e2e/tracker_container.rs +++ b/src/console/ci/e2e/tracker_container.rs @@ -1,7 +1,7 @@ use std::time::Duration; use rand::distr::Alphanumeric; -use rand::Rng; +use rand::RngExt; use super::docker::{RunOptions, RunningContainer}; use super::logs_parser::RunningServices; From ed0937bfe450a306de84f90660683c7a7fb16f56 Mon Sep 17 00:00:00 2001 From: Jose Celano Date: Tue, 3 Mar 2026 07:55:53 +0000 Subject: [PATCH 785/802] chore(deps): update dependencies ``` cargo update Updating crates.io index Locking 35 packages to latest compatible versions Updating async-compression v0.4.40 -> v0.4.41 Updating aws-lc-rs v1.16.0 -> v1.16.1 Updating aws-lc-sys v0.37.1 -> v0.38.0 Updating chrono v0.4.43 -> v0.4.44 Updating deranged v0.5.6 -> v0.5.8 Updating derive_utils v0.15.0 -> v0.15.1 Updating io-enum v1.2.0 -> v1.2.1 Updating ipnet v2.11.0 -> v2.12.0 Updating js-sys v0.3.85 -> v0.3.91 Updating libredox v0.1.12 -> v0.1.14 Updating libz-sys v1.1.23 -> v1.1.24 Updating linux-raw-sys v0.11.0 -> v0.12.1 Updating owo-colors v4.2.3 -> v4.3.0 Updating pin-project v1.1.10 -> v1.1.11 Updating pin-project-internal v1.1.10 -> v1.1.11 Updating pin-project-lite v0.2.16 -> v0.2.17 Updating piper v0.2.4 -> v0.2.5 Adding plain v0.2.3 Updating redox_syscall v0.7.1 -> v0.7.3 Updating regex-syntax v0.8.9 -> v0.8.10 Updating rustix v1.1.3 -> v1.1.4 Updating rustls v0.23.36 -> v0.23.37 Updating serde_with v3.16.1 -> v3.17.0 Updating serde_with_macros v3.16.1 -> v3.17.0 Updating tempfile v3.25.0 -> v3.26.0 Updating testcontainers v0.27.0 -> v0.27.1 Updating tokio-macros v2.6.0 -> v2.6.1 Updating wasm-bindgen v0.2.108 -> v0.2.114 Updating wasm-bindgen-futures v0.4.58 -> v0.4.64 Updating wasm-bindgen-macro v0.2.108 -> v0.2.114 Updating wasm-bindgen-macro-support v0.2.108 -> v0.2.114 Updating wasm-bindgen-shared v0.2.108 -> v0.2.114 Updating web-sys v0.3.85 -> v0.3.91 Updating zerocopy v0.8.39 -> v0.8.40 Updating zerocopy-derive v0.8.39 -> v0.8.40 note: pass `--verbose` to see 9 unchanged dependencies behind latest ``` --- Cargo.lock | 151 ++++++++++++++++++++++++++++------------------------- 1 file changed, 79 insertions(+), 72 deletions(-) diff --git a/Cargo.lock b/Cargo.lock index c6b151951..6894e2bcd 100644 --- a/Cargo.lock +++ b/Cargo.lock @@ -239,9 +239,9 @@ dependencies = [ [[package]] name = "async-compression" -version = "0.4.40" +version = "0.4.41" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "7d67d43201f4d20c78bcda740c142ca52482d81da80681533d33bf3f0596c8e2" +checksum = "d0f9ee0f6e02ffd7ad5816e9464499fba7b3effd01123b515c41d1697c43dad1" dependencies = [ "compression-codecs", "compression-core", @@ -397,9 +397,9 @@ checksum = "c08606f8c3cbf4ce6ec8e28fb0014a2c086708fe954eaa885384a6165172e7e8" [[package]] name = "aws-lc-rs" -version = "1.16.0" +version = "1.16.1" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "d9a7b350e3bb1767102698302bc37256cbd48422809984b98d292c40e2579aa9" +checksum = "94bffc006df10ac2a68c83692d734a465f8ee6c5b384d8545a636f81d858f4bf" dependencies = [ "aws-lc-sys", "zeroize", @@ -407,9 +407,9 @@ dependencies = [ [[package]] name = "aws-lc-sys" -version = "0.37.1" +version = "0.38.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "b092fe214090261288111db7a2b2c2118e5a7f30dc2569f1732c4069a6840549" +checksum = "4321e568ed89bb5a7d291a7f37997c2c0df89809d7b6d12062c81ddb54aa782e" dependencies = [ "cc", "cmake", @@ -1090,9 +1090,9 @@ dependencies = [ [[package]] name = "chrono" -version = "0.4.43" +version = "0.4.44" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "fac4744fb15ae8337dc853fee7fb3f4e48c0fbaa23d0afe49c447b4fab126118" +checksum = "c673075a2e0e5f4a1dde27ce9dee1ea4558c7ffe648f576438a20ca1d2acc4b0" dependencies = [ "iana-time-zone", "num-traits", @@ -1559,9 +1559,9 @@ dependencies = [ [[package]] name = "deranged" -version = "0.5.6" +version = "0.5.8" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "cc3dc5ad92c2e2d1c193bbbbdf2ea477cb81331de4f3103f267ca18368b988c4" +checksum = "7cd812cc2bc1d69d4764bd80df88b4317eaef9e773c75226407d9bc0876b211c" dependencies = [ "powerfmt", "serde_core", @@ -1623,9 +1623,9 @@ dependencies = [ [[package]] name = "derive_utils" -version = "0.15.0" +version = "0.15.1" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "ccfae181bab5ab6c5478b2ccb69e4c68a02f8c3ec72f6616bfec9dbc599d2ee0" +checksum = "362f47930db19fe7735f527e6595e4900316b893ebf6d48ad3d31be928d57dd6" dependencies = [ "proc-macro2", "quote", @@ -2216,7 +2216,7 @@ checksum = "6ea2d84b969582b4b1864a92dc5d27cd2b77b622a8d79306834f1be5ba20d84b" dependencies = [ "cfg-if", "crunchy", - "zerocopy 0.8.39", + "zerocopy 0.8.40", ] [[package]] @@ -2626,18 +2626,18 @@ dependencies = [ [[package]] name = "io-enum" -version = "1.2.0" +version = "1.2.1" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "d197db2f7ebf90507296df3aebaf65d69f5dce8559d8dbd82776a6cadab61bbf" +checksum = "7de9008599afe8527a8c9d70423437363b321649161e98473f433de802d76107" dependencies = [ "derive_utils", ] [[package]] name = "ipnet" -version = "2.11.0" +version = "2.12.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "469fb0b9cefa57e3ef31275ee7cacb78f2fdca44e4765491884a2b119d4eb130" +checksum = "d98f6fed1fde3f8c21bc40a1abb88dd75e67924f9cffc3ef95607bad8017f8e2" [[package]] name = "iri-string" @@ -2739,9 +2739,9 @@ dependencies = [ [[package]] name = "js-sys" -version = "0.3.85" +version = "0.3.91" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "8c942ebf8e95485ca0d52d97da7c5a2c387d0e7f0ba4c35e93bfcaee045955b3" +checksum = "b49715b7073f385ba4bc528e5747d02e66cb39c6146efb66b781f131f0fb399c" dependencies = [ "once_cell", "wasm-bindgen", @@ -2792,13 +2792,14 @@ checksum = "b6d2cec3eae94f9f509c767b45932f1ada8350c4bdb85af2fcab4a3c14807981" [[package]] name = "libredox" -version = "0.1.12" +version = "0.1.14" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "3d0b95e02c851351f877147b7deea7b1afb1df71b63aa5f8270716e0c5720616" +checksum = "1744e39d1d6a9948f4f388969627434e31128196de472883b39f148769bfe30a" dependencies = [ "bitflags", "libc", - "redox_syscall 0.7.1", + "plain", + "redox_syscall 0.7.3", ] [[package]] @@ -2814,9 +2815,9 @@ dependencies = [ [[package]] name = "libz-sys" -version = "1.1.23" +version = "1.1.24" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "15d118bbf3771060e7311cc7bb0545b01d08a8b4a7de949198dec1fa0ca1c0f7" +checksum = "4735e9cbde5aac84a5ce588f6b23a90b9b0b528f6c5a8db8a4aff300463a0839" dependencies = [ "cc", "pkg-config", @@ -2825,9 +2826,9 @@ dependencies = [ [[package]] name = "linux-raw-sys" -version = "0.11.0" +version = "0.12.1" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "df1d3c3b53da64cf5760482273a98e575c651a67eec7f77df96b5b642de8f039" +checksum = "32a66949e030da00e8c7d4434b251670a91556f4144941d37452769c25d58a53" [[package]] name = "litemap" @@ -3303,9 +3304,9 @@ dependencies = [ [[package]] name = "owo-colors" -version = "4.2.3" +version = "4.3.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "9c6901729fa79e91a0913333229e9ca5dc725089d1c363b2f4b4760709dc4a52" +checksum = "d211803b9b6b570f68772237e415a029d5a50c65d382910b879fb19d3271f94d" [[package]] name = "page_size" @@ -3450,18 +3451,18 @@ dependencies = [ [[package]] name = "pin-project" -version = "1.1.10" +version = "1.1.11" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "677f1add503faace112b9f1373e43e9e054bfdd22ff1a63c1bc485eaec6a6a8a" +checksum = "f1749c7ed4bcaf4c3d0a3efc28538844fb29bcdd7d2b67b2be7e20ba861ff517" dependencies = [ "pin-project-internal", ] [[package]] name = "pin-project-internal" -version = "1.1.10" +version = "1.1.11" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "6e918e4ff8c4549eb882f14b3a4bc8c8bc93de829416eacf579f1207a8fbf861" +checksum = "d9b20ed30f105399776b9c883e68e536ef602a16ae6f596d2c473591d6ad64c6" dependencies = [ "proc-macro2", "quote", @@ -3470,9 +3471,9 @@ dependencies = [ [[package]] name = "pin-project-lite" -version = "0.2.16" +version = "0.2.17" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "3b3cff922bd51709b605d9ead9aa71031d81447142d828eb4a6eba76fe619f9b" +checksum = "a89322df9ebe1c1578d689c92318e070967d1042b512afbe49518723f4e6d5cd" [[package]] name = "pin-utils" @@ -3482,9 +3483,9 @@ checksum = "8b870d8c151b6f2fb93e84a13146138f05d02ed11c7e7c54f8826aaaf7c9f184" [[package]] name = "piper" -version = "0.2.4" +version = "0.2.5" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "96c8c490f422ef9a4efd2cb5b42b76c8613d7e7dfc1caf667b8a3350a5acc066" +checksum = "c835479a4443ded371d6c535cbfd8d31ad92c5d23ae9770a61bc155e4992a3c1" dependencies = [ "atomic-waker", "fastrand", @@ -3497,6 +3498,12 @@ version = "0.3.32" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "7edddbd0b52d732b21ad9a5fab5c704c14cd949e5e9a1ec5929a24fded1b904c" +[[package]] +name = "plain" +version = "0.2.3" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "b4596b6d070b27117e987119b4dac604f3c58cfb0b191112e24771b2faeac1a6" + [[package]] name = "plotters" version = "0.3.7" @@ -3575,7 +3582,7 @@ version = "0.2.21" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "85eae3c4ed2f50dcfe72643da4befc30deadb458a9b590d720cde2f2b1e97da9" dependencies = [ - "zerocopy 0.8.39", + "zerocopy 0.8.40", ] [[package]] @@ -3956,9 +3963,9 @@ dependencies = [ [[package]] name = "redox_syscall" -version = "0.7.1" +version = "0.7.3" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "35985aa610addc02e24fc232012c86fd11f14111180f902b67e2d5331f8ebf2b" +checksum = "6ce70a74e890531977d37e532c34d45e9055d2409ed08ddba14529471ed0be16" dependencies = [ "bitflags", ] @@ -4008,9 +4015,9 @@ dependencies = [ [[package]] name = "regex-syntax" -version = "0.8.9" +version = "0.8.10" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "a96887878f22d7bad8a3b6dc5b7440e0ada9a245242924394987b21cf2210a4c" +checksum = "dc897dd8d9e8bd1ed8cdad82b5966c3e0ecae09fb1907d58efaa013543185d0a" [[package]] name = "relative-path" @@ -4245,9 +4252,9 @@ dependencies = [ [[package]] name = "rustix" -version = "1.1.3" +version = "1.1.4" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "146c9e247ccc180c1f61615433868c99f3de3ae256a30a43b49f67c2d9171f34" +checksum = "b6fe4565b9518b83ef4f91bb47ce29620ca828bd32cb7e408f0062e9930ba190" dependencies = [ "bitflags", "errno", @@ -4258,9 +4265,9 @@ dependencies = [ [[package]] name = "rustls" -version = "0.23.36" +version = "0.23.37" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "c665f33d38cea657d9614f766881e4d510e0eda4239891eea56b4cadcf01801b" +checksum = "758025cb5fccfd3bc2fd74708fd4682be41d99e5dff73c377c0646c6012c73a4" dependencies = [ "aws-lc-rs", "log", @@ -4574,9 +4581,9 @@ dependencies = [ [[package]] name = "serde_with" -version = "3.16.1" +version = "3.17.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "4fa237f2807440d238e0364a218270b98f767a00d3dada77b1c53ae88940e2e7" +checksum = "381b283ce7bc6b476d903296fb59d0d36633652b633b27f64db4fb46dcbfc3b9" dependencies = [ "base64 0.22.1", "chrono", @@ -4593,9 +4600,9 @@ dependencies = [ [[package]] name = "serde_with_macros" -version = "3.16.1" +version = "3.17.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "52a8e3ca0ca629121f70ab50f95249e5a6f925cc0f6ffe8256c45b728875706c" +checksum = "a6d4e30573c8cb306ed6ab1dca8423eec9a463ea0e155f45399455e0368b27e0" dependencies = [ "darling 0.21.3", "proc-macro2", @@ -4878,9 +4885,9 @@ dependencies = [ [[package]] name = "tempfile" -version = "3.25.0" +version = "3.26.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "0136791f7c95b1f6dd99f9cc786b91bb81c3800b639b3478e561ddb7be95e5f1" +checksum = "82a72c767771b47409d2345987fda8628641887d5466101319899796367354a0" dependencies = [ "fastrand", "getrandom 0.4.1", @@ -4916,9 +4923,9 @@ checksum = "8f50febec83f5ee1df3015341d8bd429f2d1cc62bcba7ea2076759d315084683" [[package]] name = "testcontainers" -version = "0.27.0" +version = "0.27.1" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "c3fdcea723c64cc08dbc533b3761e345a15bf1222cbe6cb611de09b43f17a168" +checksum = "c1c0624faaa317c56d6d19136580be889677259caf5c897941c6f446b4655068" dependencies = [ "astral-tokio-tar", "async-trait", @@ -5088,9 +5095,9 @@ dependencies = [ [[package]] name = "tokio-macros" -version = "2.6.0" +version = "2.6.1" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "af407857209536a95c8e56f8231ef2c2e2aff839b22e07a1ffcbc617e9db9fa5" +checksum = "5c55a2eff8b69ce66c84f85e1da1c233edc36ceb85a2058d11b0d6a3c7e7569c" dependencies = [ "proc-macro2", "quote", @@ -6017,9 +6024,9 @@ dependencies = [ [[package]] name = "wasm-bindgen" -version = "0.2.108" +version = "0.2.114" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "64024a30ec1e37399cf85a7ffefebdb72205ca1c972291c51512360d90bd8566" +checksum = "6532f9a5c1ece3798cb1c2cfdba640b9b3ba884f5db45973a6f442510a87d38e" dependencies = [ "cfg-if", "once_cell", @@ -6030,9 +6037,9 @@ dependencies = [ [[package]] name = "wasm-bindgen-futures" -version = "0.4.58" +version = "0.4.64" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "70a6e77fd0ae8029c9ea0063f87c46fde723e7d887703d74ad2616d792e51e6f" +checksum = "e9c5522b3a28661442748e09d40924dfb9ca614b21c00d3fd135720e48b67db8" dependencies = [ "cfg-if", "futures-util", @@ -6044,9 +6051,9 @@ dependencies = [ [[package]] name = "wasm-bindgen-macro" -version = "0.2.108" +version = "0.2.114" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "008b239d9c740232e71bd39e8ef6429d27097518b6b30bdf9086833bd5b6d608" +checksum = "18a2d50fcf105fb33bb15f00e7a77b772945a2ee45dcf454961fd843e74c18e6" dependencies = [ "quote", "wasm-bindgen-macro-support", @@ -6054,9 +6061,9 @@ dependencies = [ [[package]] name = "wasm-bindgen-macro-support" -version = "0.2.108" +version = "0.2.114" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "5256bae2d58f54820e6490f9839c49780dff84c65aeab9e772f15d5f0e913a55" +checksum = "03ce4caeaac547cdf713d280eda22a730824dd11e6b8c3ca9e42247b25c631e3" dependencies = [ "bumpalo", "proc-macro2", @@ -6067,9 +6074,9 @@ dependencies = [ [[package]] name = "wasm-bindgen-shared" -version = "0.2.108" +version = "0.2.114" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "1f01b580c9ac74c8d8f0c0e4afb04eeef2acf145458e52c03845ee9cd23e3d12" +checksum = "75a326b8c223ee17883a4251907455a2431acc2791c98c26279376490c378c16" dependencies = [ "unicode-ident", ] @@ -6110,9 +6117,9 @@ dependencies = [ [[package]] name = "web-sys" -version = "0.3.85" +version = "0.3.91" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "312e32e551d92129218ea9a2452120f4aabc03529ef03e4d0d82fb2780608598" +checksum = "854ba17bb104abfb26ba36da9729addc7ce7f06f5c0f90f3c391f8461cca21f9" dependencies = [ "js-sys", "wasm-bindgen", @@ -6623,11 +6630,11 @@ dependencies = [ [[package]] name = "zerocopy" -version = "0.8.39" +version = "0.8.40" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "db6d35d663eadb6c932438e763b262fe1a70987f9ae936e60158176d710cae4a" +checksum = "a789c6e490b576db9f7e6b6d661bcc9799f7c0ac8352f56ea20193b2681532e5" dependencies = [ - "zerocopy-derive 0.8.39", + "zerocopy-derive 0.8.40", ] [[package]] @@ -6643,9 +6650,9 @@ dependencies = [ [[package]] name = "zerocopy-derive" -version = "0.8.39" +version = "0.8.40" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "4122cd3169e94605190e77839c9a40d40ed048d305bfdc146e7df40ab0f3e517" +checksum = "f65c489a7071a749c849713807783f70672b28094011623e200cb86dcb835953" dependencies = [ "proc-macro2", "quote", From 29edbb6d154f2230d7695d2bd47ca5084fbc53a4 Mon Sep 17 00:00:00 2001 From: Jose Celano Date: Tue, 3 Mar 2026 08:04:34 +0000 Subject: [PATCH 786/802] fix: collapse nested if into match arm guard in pagination test Resolves clippy::collapsible_match lint by moving the inner if condition into the match arm guard for the Pagination { limit: 1, offset: 1 } case. --- .../tests/repository/mod.rs | 10 ++++------ 1 file changed, 4 insertions(+), 6 deletions(-) diff --git a/packages/torrent-repository-benchmarking/tests/repository/mod.rs b/packages/torrent-repository-benchmarking/tests/repository/mod.rs index c3589ce68..ec7e68bae 100644 --- a/packages/torrent-repository-benchmarking/tests/repository/mod.rs +++ b/packages/torrent-repository-benchmarking/tests/repository/mod.rs @@ -364,12 +364,10 @@ async fn it_should_get_paginated( } // it should return the only the second entry if both the limit and the offset are one. - Pagination { limit: 1, offset: 1 } => { - if info_hashes.len() > 1 { - let page = repo.get_paginated(Some(&paginated)).await; - assert_eq!(page.len(), 1); - assert_eq!(page[0].0, info_hashes[1]); - } + Pagination { limit: 1, offset: 1 } if info_hashes.len() > 1 => { + let page = repo.get_paginated(Some(&paginated)).await; + assert_eq!(page.len(), 1); + assert_eq!(page[0].0, info_hashes[1]); } // the other cases are not yet tested. _ => {} From 7e322eb7bf766f2a6c5ee376b42deae21b28bcd9 Mon Sep 17 00:00:00 2001 From: Jose Celano Date: Tue, 3 Mar 2026 08:08:11 +0000 Subject: [PATCH 787/802] ci: upgrade actions/upload-artifact from v6 to v7 in generate_coverage_pr workflow --- .github/workflows/generate_coverage_pr.yaml | 6 +++--- 1 file changed, 3 insertions(+), 3 deletions(-) diff --git a/.github/workflows/generate_coverage_pr.yaml b/.github/workflows/generate_coverage_pr.yaml index f762207cf..a3f97dbf2 100644 --- a/.github/workflows/generate_coverage_pr.yaml +++ b/.github/workflows/generate_coverage_pr.yaml @@ -59,13 +59,13 @@ jobs: # Triggered sub-workflow is not able to detect the original commit/PR which is available # in this workflow. - name: Store PR number - uses: actions/upload-artifact@v6 + uses: actions/upload-artifact@v7 with: name: pr_number path: pr_number.txt - name: Store commit SHA - uses: actions/upload-artifact@v6 + uses: actions/upload-artifact@v7 with: name: commit_sha path: commit_sha.txt @@ -74,7 +74,7 @@ jobs: # is executed by a different workflow `upload_coverage.yml`. The reason for this # split is because `on.pull_request` workflows don't have access to secrets. - name: Store coverage report in artifacts - uses: actions/upload-artifact@v6 + uses: actions/upload-artifact@v7 with: name: codecov_report path: ./codecov.json From de471450fb2a555816b111cd59ae03dade2b6fcb Mon Sep 17 00:00:00 2001 From: Jose Celano Date: Tue, 3 Mar 2026 18:14:57 +0000 Subject: [PATCH 788/802] fix: add sleep after HTTP server stop in health check test to avoid race condition --- packages/axum-health-check-api-server/tests/server/contract.rs | 3 +++ 1 file changed, 3 insertions(+) diff --git a/packages/axum-health-check-api-server/tests/server/contract.rs b/packages/axum-health-check-api-server/tests/server/contract.rs index 1d1ba3539..af1c0cff9 100644 --- a/packages/axum-health-check-api-server/tests/server/contract.rs +++ b/packages/axum-health-check-api-server/tests/server/contract.rs @@ -202,6 +202,9 @@ mod http { service.server.stop().await.expect("it should stop udp server"); + // Give the OS a moment to fully release the TCP port after the server stops. + tokio::time::sleep(std::time::Duration::from_millis(100)).await; + { let config = configuration.health_check_api.clone(); let env = Started::new(&config.into(), registar).await; From 1228a2b986e41fa195fd5418a5fecd028add1524 Mon Sep 17 00:00:00 2001 From: Jose Celano Date: Tue, 7 Apr 2026 19:07:53 +0100 Subject: [PATCH 789/802] chore(deps): update dependencies ``` Updating crates.io index Locking 104 packages to latest compatible versions Updating anstream v0.6.21 -> v1.0.0 Updating anstyle v1.0.13 -> v1.0.14 Updating anstyle-parse v0.2.7 -> v1.0.0 Updating arc-swap v1.8.2 -> v1.9.1 Updating astral-tokio-tar v0.5.6 -> v0.6.0 Updating aws-lc-rs v1.16.1 -> v1.16.2 Updating aws-lc-sys v0.38.0 -> v0.39.1 Updating bollard v0.20.1 -> v0.20.2 Updating borsh v1.6.0 -> v1.6.1 Updating borsh-derive v1.6.0 -> v1.6.1 Updating cc v1.2.56 -> v1.2.59 Updating clap v4.5.60 -> v4.6.0 Updating clap_builder v4.5.60 -> v4.6.0 Updating clap_derive v4.5.55 -> v4.6.0 Updating clap_lex v1.0.0 -> v1.1.0 Updating cmake v0.1.57 -> v0.1.58 Updating colorchoice v1.0.4 -> v1.0.5 Updating darling v0.21.3 -> v0.23.0 Updating darling_core v0.21.3 -> v0.23.0 Updating darling_macro v0.21.3 -> v0.23.0 Updating env_filter v1.0.0 -> v1.0.1 Updating env_logger v0.11.9 -> v0.11.10 Updating fastrand v2.3.0 -> v2.4.1 Updating fragile v2.0.1 -> v2.1.0 Updating getrandom v0.4.1 -> v0.4.2 Updating hyper v1.8.1 -> v1.9.0 Updating icu_collections v2.1.1 -> v2.2.0 Updating icu_locale_core v2.1.1 -> v2.2.0 Updating icu_normalizer v2.1.1 -> v2.2.0 Updating icu_normalizer_data v2.1.1 -> v2.2.0 Updating icu_properties v2.1.2 -> v2.2.0 Updating icu_properties_data v2.1.2 -> v2.2.0 Updating icu_provider v2.1.1 -> v2.2.0 Updating indexmap v2.13.0 -> v2.13.1 Updating iri-string v0.7.10 -> v0.7.12 Updating itoa v1.0.17 -> v1.0.18 Removing jni-sys v0.3.0 Adding jni-sys v0.3.1 Adding jni-sys v0.4.1 Adding jni-sys-macros v0.4.1 Updating js-sys v0.3.91 -> v0.3.94 Updating libc v0.2.182 -> v0.2.184 Updating libredox v0.1.14 -> v0.1.15 Updating libsqlite3-sys v0.36.0 -> v0.37.0 Updating libz-sys v1.1.24 -> v1.1.28 Updating litemap v0.8.1 -> v0.8.2 Updating local-ip-address v0.6.10 -> v0.6.11 Updating mio v1.1.1 -> v1.2.0 Updating num-conv v0.2.0 -> v0.2.1 Updating once_cell v1.21.3 -> v1.21.4 Updating openssl v0.10.75 -> v0.10.76 Updating openssl-sys v0.9.111 -> v0.9.112 Updating portable-atomic-util v0.2.5 -> v0.2.6 Updating potential_utf v0.1.4 -> v0.1.5 Updating proc-macro-crate v3.4.0 -> v3.5.0 Updating quinn-proto v0.11.13 -> v0.11.14 Updating quote v1.0.44 -> v1.0.45 Adding r-efi v6.0.0 Updating r2d2_sqlite v0.32.0 -> v0.33.0 Updating rusqlite v0.38.0 -> v0.39.0 Updating rust_decimal v1.40.0 -> v1.41.0 Updating rustc-hash v2.1.1 -> v2.1.2 Updating rustls-webpki v0.103.9 -> v0.103.10 Updating schannel v0.1.28 -> v0.1.29 Updating semver v1.0.27 -> v1.0.28 Updating serde_spanned v1.0.4 -> v1.1.1 Updating serde_with v3.17.0 -> v3.18.0 Updating serde_with_macros v3.17.0 -> v3.18.0 Updating simd-adler32 v0.3.8 -> v0.3.9 Updating socket2 v0.6.2 -> v0.6.3 Updating tempfile v3.26.0 -> v3.27.0 Updating terminal_size v0.4.3 -> v0.4.4 Updating testcontainers v0.27.1 -> v0.27.2 Updating tinystr v0.8.2 -> v0.8.3 Updating tinyvec v1.10.0 -> v1.11.0 Updating tokio v1.49.0 -> v1.51.0 Updating tokio-macros v2.6.1 -> v2.7.0 Adding toml_datetime v1.1.1+spec-1.1.0 Updating toml_edit v0.23.10+spec-1.0.0 -> v0.25.10+spec-1.1.0 Updating toml_parser v1.0.9+spec-1.1.0 -> v1.1.2+spec-1.1.0 Updating toml_writer v1.0.6+spec-1.1.0 -> v1.1.1+spec-1.1.0 Updating tracing-subscriber v0.3.22 -> v0.3.23 Updating unicode-segmentation v1.12.0 -> v1.13.2 Updating ureq v3.2.0 -> v3.3.0 Updating ureq-proto v0.5.3 -> v0.6.0 Removing utf-8 v0.7.6 Adding utf8-zero v0.8.1 Updating uuid v1.21.0 -> v1.23.0 Updating wasm-bindgen v0.2.114 -> v0.2.117 Updating wasm-bindgen-futures v0.4.64 -> v0.4.67 Updating wasm-bindgen-macro v0.2.114 -> v0.2.117 Updating wasm-bindgen-macro-support v0.2.114 -> v0.2.117 Updating wasm-bindgen-shared v0.2.114 -> v0.2.117 Updating web-sys v0.3.91 -> v0.3.94 Removing winnow v0.7.14 Adding winnow v0.7.15 Adding winnow v1.0.1 Updating writeable v0.6.2 -> v0.6.3 Updating yoke v0.8.1 -> v0.8.2 Updating yoke-derive v0.8.1 -> v0.8.2 Updating zerocopy v0.8.40 -> v0.8.48 Updating zerocopy-derive v0.8.40 -> v0.8.48 Updating zerofrom v0.1.6 -> v0.1.7 Updating zerofrom-derive v0.1.6 -> v0.1.7 Updating zerotrie v0.2.3 -> v0.2.4 Updating zerovec v0.11.5 -> v0.11.6 Updating zerovec-derive v0.11.2 -> v0.11.3 note: pass `--verbose` to see 9 unchanged dependencies behind latest ``` --- Cargo.lock | 543 +++++++++++++++++++++++++++++------------------------ 1 file changed, 296 insertions(+), 247 deletions(-) diff --git a/Cargo.lock b/Cargo.lock index 6894e2bcd..9e0911944 100644 --- a/Cargo.lock +++ b/Cargo.lock @@ -84,9 +84,9 @@ checksum = "4b46cbb362ab8752921c97e041f5e366ee6297bd428a31275b9fcf1e380f7299" [[package]] name = "anstream" -version = "0.6.21" +version = "1.0.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "43d5b281e737544384e969a5ccad3f1cdd24b48086a0fc1b2a5262a26b8f4f4a" +checksum = "824a212faf96e9acacdbd09febd34438f8f711fb84e09a8916013cd7815ca28d" dependencies = [ "anstyle", "anstyle-parse", @@ -99,15 +99,15 @@ dependencies = [ [[package]] name = "anstyle" -version = "1.0.13" +version = "1.0.14" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "5192cca8006f1fd4f7237516f40fa183bb07f8fbdfedaa0036de5ea9b0b45e78" +checksum = "940b3a0ca603d1eade50a4846a2afffd5ef57a9feac2c0e2ec2e14f9ead76000" [[package]] name = "anstyle-parse" -version = "0.2.7" +version = "1.0.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "4e7644824f0aa2c7b9384579234ef10eb7efb6a0deb83f9630a49594dd9c15c2" +checksum = "52ce7f38b242319f7cabaa6813055467063ecdc9d355bbb4ce0c68908cd8130e" dependencies = [ "utf8parse", ] @@ -175,9 +175,9 @@ dependencies = [ [[package]] name = "arc-swap" -version = "1.8.2" +version = "1.9.1" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "f9f3647c145568cec02c42054e07bdf9a5a698e15b466fb2341bfc393cd24aa5" +checksum = "6a3a1fd6f75306b68087b831f025c712524bcb19aad54e557b1129cfa0a2b207" dependencies = [ "rustversion", ] @@ -190,9 +190,9 @@ checksum = "7c02d123df017efcdfbd739ef81735b36c5ba83ec3c59c80a9d7ecc718f92e50" [[package]] name = "astral-tokio-tar" -version = "0.5.6" +version = "0.6.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "ec179a06c1769b1e42e1e2cbe74c7dcdb3d6383c838454d063eaac5bbb7ebbe5" +checksum = "3c23f3af104b40a3430ccb90ed5f7bd877a8dc5c26fc92fde51a22b40890dcf9" dependencies = [ "filetime", "futures-core", @@ -397,9 +397,9 @@ checksum = "c08606f8c3cbf4ce6ec8e28fb0014a2c086708fe954eaa885384a6165172e7e8" [[package]] name = "aws-lc-rs" -version = "1.16.1" +version = "1.16.2" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "94bffc006df10ac2a68c83692d734a465f8ee6c5b384d8545a636f81d858f4bf" +checksum = "a054912289d18629dc78375ba2c3726a3afe3ff71b4edba9dedfca0e3446d1fc" dependencies = [ "aws-lc-sys", "zeroize", @@ -407,9 +407,9 @@ dependencies = [ [[package]] name = "aws-lc-sys" -version = "0.38.0" +version = "0.39.1" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "4321e568ed89bb5a7d291a7f37997c2c0df89809d7b6d12062c81ddb54aa782e" +checksum = "83a25cf98105baa966497416dbd42565ce3a8cf8dbfd59803ec9ad46f3126399" dependencies = [ "cc", "cmake", @@ -837,9 +837,9 @@ dependencies = [ [[package]] name = "bollard" -version = "0.20.1" +version = "0.20.2" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "227aa051deec8d16bd9c34605e7aaf153f240e35483dd42f6f78903847934738" +checksum = "ee04c4c84f1f811b017f2fbb7dd8815c976e7ca98593de9c1e2afad0f636bff4" dependencies = [ "async-stream", "base64 0.22.1", @@ -911,19 +911,20 @@ dependencies = [ [[package]] name = "borsh" -version = "1.6.0" +version = "1.6.1" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "d1da5ab77c1437701eeff7c88d968729e7766172279eab0676857b3d63af7a6f" +checksum = "cfd1e3f8955a5d7de9fab72fc8373fade9fb8a703968cb200ae3dc6cf08e185a" dependencies = [ "borsh-derive", + "bytes", "cfg_aliases", ] [[package]] name = "borsh-derive" -version = "1.6.0" +version = "1.6.1" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "0686c856aa6aac0c4498f936d7d6a02df690f614c03e4d906d1018062b5c5e2c" +checksum = "bfcfdc083699101d5a7965e49925975f2f55060f94f9a05e7187be95d530ca59" dependencies = [ "once_cell", "proc-macro-crate", @@ -1040,9 +1041,9 @@ dependencies = [ [[package]] name = "cc" -version = "1.2.56" +version = "1.2.59" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "aebf35691d1bfb0ac386a69bac2fde4dd276fb618cf8bf4f5318fe285e821bb2" +checksum = "b7a4d3ec6524d28a329fc53654bbadc9bdd7b0431f5d65f1a56ffb28a1ee5283" dependencies = [ "find-msvc-tools", "jobserver", @@ -1150,9 +1151,9 @@ dependencies = [ [[package]] name = "clap" -version = "4.5.60" +version = "4.6.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "2797f34da339ce31042b27d23607e051786132987f595b02ba4f6a6dffb7030a" +checksum = "b193af5b67834b676abd72466a96c1024e6a6ad978a1f484bd90b85c94041351" dependencies = [ "clap_builder", "clap_derive", @@ -1160,9 +1161,9 @@ dependencies = [ [[package]] name = "clap_builder" -version = "4.5.60" +version = "4.6.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "24a241312cea5059b13574bb9b3861cabf758b879c15190b37b6d6fd63ab6876" +checksum = "714a53001bf66416adb0e2ef5ac857140e7dc3a0c48fb28b2f10762fc4b5069f" dependencies = [ "anstream", "anstyle", @@ -1172,9 +1173,9 @@ dependencies = [ [[package]] name = "clap_derive" -version = "4.5.55" +version = "4.6.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "a92793da1a46a5f2a02a6f4c46c6496b28c43638adea8306fcb0caa1634f24e5" +checksum = "1110bd8a634a1ab8cb04345d8d878267d57c3cf1b38d91b71af6686408bbca6a" dependencies = [ "heck", "proc-macro2", @@ -1184,24 +1185,24 @@ dependencies = [ [[package]] name = "clap_lex" -version = "1.0.0" +version = "1.1.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "3a822ea5bc7590f9d40f1ba12c0dc3c2760f3482c6984db1573ad11031420831" +checksum = "c8d4a3bb8b1e0c1050499d1815f5ab16d04f0959b233085fb31653fbfc9d98f9" [[package]] name = "cmake" -version = "0.1.57" +version = "0.1.58" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "75443c44cd6b379beb8c5b45d85d0773baf31cce901fe7bb252f4eff3008ef7d" +checksum = "c0f78a02292a74a88ac736019ab962ece0bc380e3f977bf72e376c5d78ff0678" dependencies = [ "cc", ] [[package]] name = "colorchoice" -version = "1.0.4" +version = "1.0.5" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "b05b61dc5112cbb17e4b6cd61790d9845d13888356391624cbe7e41efeac1e75" +checksum = "1d07550c9036bf2ae0c684c4297d503f838287c83c53686d05370d0e139ae570" [[package]] name = "combine" @@ -1485,12 +1486,12 @@ dependencies = [ [[package]] name = "darling" -version = "0.21.3" +version = "0.23.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "9cdf337090841a411e2a7f3deb9187445851f91b309c0c0a29e05f74a00a48c0" +checksum = "25ae13da2f202d56bd7f91c25fba009e7717a1e4a1cc98a76d844b65ae912e9d" dependencies = [ - "darling_core 0.21.3", - "darling_macro 0.21.3", + "darling_core 0.23.0", + "darling_macro 0.23.0", ] [[package]] @@ -1509,11 +1510,10 @@ dependencies = [ [[package]] name = "darling_core" -version = "0.21.3" +version = "0.23.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "1247195ecd7e3c85f83c8d2a366e4210d588e802133e1e355180a9870b517ea4" +checksum = "9865a50f7c335f53564bb694ef660825eb8610e0a53d3e11bf1b0d3df31e03b0" dependencies = [ - "fnv", "ident_case", "proc-macro2", "quote", @@ -1534,11 +1534,11 @@ dependencies = [ [[package]] name = "darling_macro" -version = "0.21.3" +version = "0.23.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "d38308df82d1080de0afee5d069fa14b0326a88c14f15c5ccda35b4a6c414c81" +checksum = "ac3984ec7bd6cfa798e62b4a642426a5be0e68f9401cfc2a01e3fa9ea2fcdb8d" dependencies = [ - "darling_core 0.21.3", + "darling_core 0.23.0", "quote", "syn 2.0.117", ] @@ -1705,9 +1705,9 @@ dependencies = [ [[package]] name = "env_filter" -version = "1.0.0" +version = "1.0.1" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "7a1c3cc8e57274ec99de65301228b537f1e4eedc1b8e0f9411c6caac8ae7308f" +checksum = "32e90c2accc4b07a8456ea0debdc2e7587bdd890680d71173a15d4ae604f6eef" dependencies = [ "log", "regex", @@ -1715,9 +1715,9 @@ dependencies = [ [[package]] name = "env_logger" -version = "0.11.9" +version = "0.11.10" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "b2daee4ea451f429a58296525ddf28b45a3b64f1acf6587e2067437bb11e218d" +checksum = "0621c04f2196ac3f488dd583365b9c09be011a4ab8b9f37248ffcc8f6198b56a" dependencies = [ "env_filter", "log", @@ -1790,9 +1790,9 @@ checksum = "7360491ce676a36bf9bb3c56c1aa791658183a54d2744120f27285738d90465a" [[package]] name = "fastrand" -version = "2.3.0" +version = "2.4.1" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "37909eebbb50d72f9059c3b6d82c0463f2ff062c9e95845c43a6c9c0355411be" +checksum = "9f1f227452a390804cdb637b74a86990f2a7d7ba4b7d5693aac9b4dd6defd8d6" [[package]] name = "ferroid" @@ -1913,9 +1913,12 @@ dependencies = [ [[package]] name = "fragile" -version = "2.0.1" +version = "2.1.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "28dd6caf6059519a65843af8fe2a3ae298b14b80179855aeb4adc2c1934ee619" +checksum = "8878864ba14bb86e818a412bfd6f18f9eabd4ec0f008a28e8f7eb61db532fcf9" +dependencies = [ + "futures-core", +] [[package]] name = "frunk" @@ -2134,20 +2137,20 @@ dependencies = [ "cfg-if", "js-sys", "libc", - "r-efi", + "r-efi 5.3.0", "wasip2", "wasm-bindgen", ] [[package]] name = "getrandom" -version = "0.4.1" +version = "0.4.2" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "139ef39800118c7683f2fd3c98c1b23c09ae076556b435f8e9064ae108aaeeec" +checksum = "0de51e6874e94e7bf76d726fc5d13ba782deca734ff60d5bb2fb2607c7406555" dependencies = [ "cfg-if", "libc", - "r-efi", + "r-efi 6.0.0", "rand_core 0.10.0", "wasip2", "wasip3", @@ -2201,7 +2204,7 @@ dependencies = [ "futures-core", "futures-sink", "http", - "indexmap 2.13.0", + "indexmap 2.13.1", "slab", "tokio", "tokio-util", @@ -2216,7 +2219,7 @@ checksum = "6ea2d84b969582b4b1864a92dc5d27cd2b77b622a8d79306834f1be5ba20d84b" dependencies = [ "cfg-if", "crunchy", - "zerocopy 0.8.40", + "zerocopy 0.8.48", ] [[package]] @@ -2343,9 +2346,9 @@ checksum = "df3b46402a9d5adb4c86a0cf463f42e19994e3ee891101b1841f30a545cb49a9" [[package]] name = "hyper" -version = "1.8.1" +version = "1.9.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "2ab2d4f250c3d7b1c9fcdff1cece94ea4e2dfbec68614f7b87cb205f24ca9d11" +checksum = "6299f016b246a94207e63da54dbe807655bf9e00044f73ded42c3ac5305fbcca" dependencies = [ "atomic-waker", "bytes", @@ -2358,7 +2361,6 @@ dependencies = [ "httpdate", "itoa", "pin-project-lite", - "pin-utils", "smallvec", "tokio", "want", @@ -2425,7 +2427,7 @@ dependencies = [ "libc", "percent-encoding", "pin-project-lite", - "socket2 0.6.2", + "socket2 0.6.3", "system-configuration", "tokio", "tower-service", @@ -2474,12 +2476,13 @@ dependencies = [ [[package]] name = "icu_collections" -version = "2.1.1" +version = "2.2.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "4c6b649701667bbe825c3b7e6388cb521c23d88644678e83c0c4d0a621a34b43" +checksum = "2984d1cd16c883d7935b9e07e44071dca8d917fd52ecc02c04d5fa0b5a3f191c" dependencies = [ "displaydoc", "potential_utf", + "utf8_iter", "yoke", "zerofrom", "zerovec", @@ -2487,9 +2490,9 @@ dependencies = [ [[package]] name = "icu_locale_core" -version = "2.1.1" +version = "2.2.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "edba7861004dd3714265b4db54a3c390e880ab658fec5f7db895fae2046b5bb6" +checksum = "92219b62b3e2b4d88ac5119f8904c10f8f61bf7e95b640d25ba3075e6cac2c29" dependencies = [ "displaydoc", "litemap", @@ -2500,9 +2503,9 @@ dependencies = [ [[package]] name = "icu_normalizer" -version = "2.1.1" +version = "2.2.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "5f6c8828b67bf8908d82127b2054ea1b4427ff0230ee9141c54251934ab1b599" +checksum = "c56e5ee99d6e3d33bd91c5d85458b6005a22140021cc324cea84dd0e72cff3b4" dependencies = [ "icu_collections", "icu_normalizer_data", @@ -2514,15 +2517,15 @@ dependencies = [ [[package]] name = "icu_normalizer_data" -version = "2.1.1" +version = "2.2.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "7aedcccd01fc5fe81e6b489c15b247b8b0690feb23304303a9e560f37efc560a" +checksum = "da3be0ae77ea334f4da67c12f149704f19f81d1adf7c51cf482943e84a2bad38" [[package]] name = "icu_properties" -version = "2.1.2" +version = "2.2.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "020bfc02fe870ec3a66d93e677ccca0562506e5872c650f893269e08615d74ec" +checksum = "bee3b67d0ea5c2cca5003417989af8996f8604e34fb9ddf96208a033901e70de" dependencies = [ "icu_collections", "icu_locale_core", @@ -2534,15 +2537,15 @@ dependencies = [ [[package]] name = "icu_properties_data" -version = "2.1.2" +version = "2.2.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "616c294cf8d725c6afcd8f55abc17c56464ef6211f9ed59cccffe534129c77af" +checksum = "8e2bbb201e0c04f7b4b3e14382af113e17ba4f63e2c9d2ee626b720cbce54a14" [[package]] name = "icu_provider" -version = "2.1.1" +version = "2.2.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "85962cf0ce02e1e0a629cc34e7ca3e373ce20dda4c4d7294bbd0bf1fdb59e614" +checksum = "139c4cf31c8b5f33d7e199446eff9c1e02decfc2f0eec2c8d71f65befa45b421" dependencies = [ "displaydoc", "icu_locale_core", @@ -2599,9 +2602,9 @@ dependencies = [ [[package]] name = "indexmap" -version = "2.13.0" +version = "2.13.1" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "7714e70437a7dc3ac8eb7e6f8df75fd8eb422675fc7678aff7364301092b1017" +checksum = "45a8a2b9cb3e0b0c1803dbb0758ffac5de2f425b23c28f518faabd9d805342ff" dependencies = [ "equivalent", "hashbrown 0.16.1", @@ -2641,9 +2644,9 @@ checksum = "d98f6fed1fde3f8c21bc40a1abb88dd75e67924f9cffc3ef95607bad8017f8e2" [[package]] name = "iri-string" -version = "0.7.10" +version = "0.7.12" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "c91338f0783edbd6195decb37bae672fd3b165faffb89bf7b9e6942f8b1a731a" +checksum = "25e659a4bb38e810ebc252e53b5814ff908a8c58c2a9ce2fae1bbec24cbf4e20" dependencies = [ "memchr", "serde", @@ -2701,9 +2704,9 @@ dependencies = [ [[package]] name = "itoa" -version = "1.0.17" +version = "1.0.18" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "92ecc6618181def0457392ccd0ee51198e065e016d1d527a7ac1b6dc7c1f09d2" +checksum = "8f42a60cbdf9a97f5d2305f08a87dc4e09308d1276d28c869c684d7777685682" [[package]] name = "jni" @@ -2714,7 +2717,7 @@ dependencies = [ "cesu8", "cfg-if", "combine", - "jni-sys", + "jni-sys 0.3.1", "log", "thiserror 1.0.69", "walkdir", @@ -2723,9 +2726,31 @@ dependencies = [ [[package]] name = "jni-sys" -version = "0.3.0" +version = "0.3.1" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "41a652e1f9b6e0275df1f15b32661cf0d4b78d4d87ddec5e0c3c20f097433258" +dependencies = [ + "jni-sys 0.4.1", +] + +[[package]] +name = "jni-sys" +version = "0.4.1" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "c6377a88cb3910bee9b0fa88d4f42e1d2da8e79915598f65fb0c7ee14c878af2" +dependencies = [ + "jni-sys-macros", +] + +[[package]] +name = "jni-sys-macros" +version = "0.4.1" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "8eaf4bc02d17cbdd7ff4c7438cafcdf7fb9a4613313ad11b4f8fefe7d3fa0130" +checksum = "38c0b942f458fe50cdac086d2f946512305e5631e720728f2a61aabcd47a6264" +dependencies = [ + "quote", + "syn 2.0.117", +] [[package]] name = "jobserver" @@ -2739,10 +2764,12 @@ dependencies = [ [[package]] name = "js-sys" -version = "0.3.91" +version = "0.3.94" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "b49715b7073f385ba4bc528e5747d02e66cb39c6146efb66b781f131f0fb399c" +checksum = "2e04e2ef80ce82e13552136fabeef8a5ed1f985a96805761cbb9a2c34e7664d9" dependencies = [ + "cfg-if", + "futures-util", "once_cell", "wasm-bindgen", ] @@ -2770,9 +2797,9 @@ checksum = "09edd9e8b54e49e587e4f6295a7d29c3ea94d469cb40ab8ca70b288248a81db2" [[package]] name = "libc" -version = "0.2.182" +version = "0.2.184" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "6800badb6cb2082ffd7b6a67e6125bb39f18782f793520caee8cb8846be06112" +checksum = "48f5d2a454e16a5ea0f4ced81bd44e4cfc7bd3a507b61887c99fd3538b28e4af" [[package]] name = "libloading" @@ -2792,9 +2819,9 @@ checksum = "b6d2cec3eae94f9f509c767b45932f1ada8350c4bdb85af2fcab4a3c14807981" [[package]] name = "libredox" -version = "0.1.14" +version = "0.1.15" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "1744e39d1d6a9948f4f388969627434e31128196de472883b39f148769bfe30a" +checksum = "7ddbf48fd451246b1f8c2610bd3b4ac0cc6e149d89832867093ab69a17194f08" dependencies = [ "bitflags", "libc", @@ -2804,9 +2831,9 @@ dependencies = [ [[package]] name = "libsqlite3-sys" -version = "0.36.0" +version = "0.37.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "95b4103cffefa72eb8428cb6b47d6627161e51c2739fc5e3b734584157bc642a" +checksum = "b1f111c8c41e7c61a49cd34e44c7619462967221a6443b0ec299e0ac30cfb9b1" dependencies = [ "cc", "pkg-config", @@ -2815,9 +2842,9 @@ dependencies = [ [[package]] name = "libz-sys" -version = "1.1.24" +version = "1.1.28" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "4735e9cbde5aac84a5ce588f6b23a90b9b0b528f6c5a8db8a4aff300463a0839" +checksum = "fc3a226e576f50782b3305c5ccf458698f92798987f551c6a02efe8276721e22" dependencies = [ "cc", "pkg-config", @@ -2832,15 +2859,15 @@ checksum = "32a66949e030da00e8c7d4434b251670a91556f4144941d37452769c25d58a53" [[package]] name = "litemap" -version = "0.8.1" +version = "0.8.2" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "6373607a59f0be73a39b6fe456b8192fcc3585f602af20751600e974dd455e77" +checksum = "92daf443525c4cce67b150400bc2316076100ce0b3686209eb8cf3c31612e6f0" [[package]] name = "local-ip-address" -version = "0.6.10" +version = "0.6.11" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "79ef8c257c92ade496781a32a581d43e3d512cf8ce714ecf04ea80f93ed0ff4a" +checksum = "d4a59a0cb1c7f84471ad5cd38d768c2a29390d17f1ff2827cdf49bc53e8ac70b" dependencies = [ "libc", "neli", @@ -2946,9 +2973,9 @@ dependencies = [ [[package]] name = "mio" -version = "1.1.1" +version = "1.2.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "a69bcab0ad47271a0234d9422b131806bf3968021e5dc9328caf2d4cd58557fc" +checksum = "50b7e5b27aa02a74bac8c3f23f448f8d87ff11f92d3aac1a6ed369ee08cc56c1" dependencies = [ "libc", "wasi", @@ -3187,9 +3214,9 @@ dependencies = [ [[package]] name = "num-conv" -version = "0.2.0" +version = "0.2.1" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "cf97ec579c3c42f953ef76dbf8d55ac91fb219dde70e49aa4a6b7d74e9919050" +checksum = "c6673768db2d862beb9b39a78fdcb1a69439615d5794a1be50caa9bc92c81967" [[package]] name = "num-integer" @@ -3242,9 +3269,9 @@ dependencies = [ [[package]] name = "once_cell" -version = "1.21.3" +version = "1.21.4" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "42f5e15c9953c5e4ccceeb2e7382a716482c34515315f7b03532b8b4e8393d2d" +checksum = "9f7c3e4beb33f85d45ae3e3a1792185706c8e16d043238c593331cc7cd313b50" [[package]] name = "once_cell_polyfill" @@ -3260,9 +3287,9 @@ checksum = "d6790f58c7ff633d8771f42965289203411a5e5c68388703c06e14f24770b41e" [[package]] name = "openssl" -version = "0.10.75" +version = "0.10.76" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "08838db121398ad17ab8531ce9de97b244589089e290a384c900cb9ff7434328" +checksum = "951c002c75e16ea2c65b8c7e4d3d51d5530d8dfa7d060b4776828c88cfb18ecf" dependencies = [ "bitflags", "cfg-if", @@ -3292,9 +3319,9 @@ checksum = "7c87def4c32ab89d880effc9e097653c8da5d6ef28e6b539d313baaacfbafcbe" [[package]] name = "openssl-sys" -version = "0.9.111" +version = "0.9.112" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "82cab2d520aa75e3c58898289429321eb788c3106963d0dc886ec7a5f4adc321" +checksum = "57d55af3b3e226502be1526dfdba67ab0e9c96fc293004e79576b2b9edb0dbdb" dependencies = [ "cc", "libc", @@ -3554,18 +3581,18 @@ checksum = "c33a9471896f1c69cecef8d20cbe2f7accd12527ce60845ff44c153bb2a21b49" [[package]] name = "portable-atomic-util" -version = "0.2.5" +version = "0.2.6" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "7a9db96d7fa8782dd8c15ce32ffe8680bbd1e978a43bf51a34d39483540495f5" +checksum = "091397be61a01d4be58e7841595bd4bfedb15f1cd54977d79b8271e94ed799a3" dependencies = [ "portable-atomic", ] [[package]] name = "potential_utf" -version = "0.1.4" +version = "0.1.5" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "b73949432f5e2a09657003c25bca5e19a0e9c84f8058ca374f49e0ebe605af77" +checksum = "0103b1cef7ec0cf76490e969665504990193874ea05c85ff9bab8b911d0a0564" dependencies = [ "zerovec", ] @@ -3582,7 +3609,7 @@ version = "0.2.21" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "85eae3c4ed2f50dcfe72643da4befc30deadb458a9b590d720cde2f2b1e97da9" dependencies = [ - "zerocopy 0.8.40", + "zerocopy 0.8.48", ] [[package]] @@ -3633,11 +3660,11 @@ dependencies = [ [[package]] name = "proc-macro-crate" -version = "3.4.0" +version = "3.5.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "219cb19e96be00ab2e37d6e299658a0cfa83e52429179969b0f0121b4ac46983" +checksum = "e67ba7e9b2b56446f1d419b1d807906278ffa1a658a8a5d8a39dcb1f5a78614f" dependencies = [ - "toml_edit 0.23.10+spec-1.0.0", + "toml_edit 0.25.10+spec-1.1.0", ] [[package]] @@ -3760,7 +3787,7 @@ dependencies = [ "quinn-udp", "rustc-hash", "rustls", - "socket2 0.6.2", + "socket2 0.6.3", "thiserror 2.0.18", "tokio", "tracing", @@ -3769,9 +3796,9 @@ dependencies = [ [[package]] name = "quinn-proto" -version = "0.11.13" +version = "0.11.14" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "f1906b49b0c3bc04b5fe5d86a77925ae6524a19b816ae38ce1e426255f1d8a31" +checksum = "434b42fec591c96ef50e21e886936e66d3cc3f737104fdb9b737c40ffb94c098" dependencies = [ "aws-lc-rs", "bytes", @@ -3798,16 +3825,16 @@ dependencies = [ "cfg_aliases", "libc", "once_cell", - "socket2 0.6.2", + "socket2 0.6.3", "tracing", "windows-sys 0.60.2", ] [[package]] name = "quote" -version = "1.0.44" +version = "1.0.45" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "21b2ebcf727b7760c461f091f9f0f539b77b8e87f2fd88131e7f1b433b3cece4" +checksum = "41f2619966050689382d2b44f664f4bc593e129785a36d6ee376ddf37259b924" dependencies = [ "proc-macro2", ] @@ -3818,6 +3845,12 @@ version = "5.3.0" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "69cdb34c158ceb288df11e18b4bd39de994f6657d83847bdffdbd7f346754b0f" +[[package]] +name = "r-efi" +version = "6.0.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "f8dcc9c7d52a811697d2151c701e0d08956f92b0e24136cf4cf27b57a6a0d9bf" + [[package]] name = "r2d2" version = "0.8.10" @@ -3841,9 +3874,9 @@ dependencies = [ [[package]] name = "r2d2_sqlite" -version = "0.32.0" +version = "0.33.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "a2ebd03c29250cdf191da93a35118b4567c2ef0eacab54f65e058d6f4c9965f6" +checksum = "5576df16239e4e422c4835c8ed00be806d4491855c7847dba60b7aa8408b469b" dependencies = [ "r2d2", "rusqlite", @@ -3884,7 +3917,7 @@ source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "bc266eb313df6c5c09c1c7b1fbe2510961e5bcd3add930c1e31f7ed9da0feff8" dependencies = [ "chacha20", - "getrandom 0.4.1", + "getrandom 0.4.2", "rand_core 0.10.0", ] @@ -4200,9 +4233,9 @@ dependencies = [ [[package]] name = "rusqlite" -version = "0.38.0" +version = "0.39.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "f1c93dd1c9683b438c392c492109cb702b8090b2bfc8fed6f6e4eb4523f17af3" +checksum = "a0d2b0146dd9661bf67bb107c0bb2a55064d556eeb3fc314151b957f313bcd4e" dependencies = [ "bitflags", "fallible-iterator", @@ -4215,9 +4248,9 @@ dependencies = [ [[package]] name = "rust_decimal" -version = "1.40.0" +version = "1.41.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "61f703d19852dbf87cbc513643fa81428361eb6940f1ac14fd58155d295a3eb0" +checksum = "2ce901f9a19d251159075a4c37af514c3b8ef99c22e02dd8c19161cf397ee94a" dependencies = [ "arrayvec", "borsh", @@ -4227,6 +4260,7 @@ dependencies = [ "rkyv", "serde", "serde_json", + "wasm-bindgen", ] [[package]] @@ -4237,9 +4271,9 @@ checksum = "b50b8869d9fc858ce7266cce0194bd74df58b9d0e3f6df3a9fc8eb470d95c09d" [[package]] name = "rustc-hash" -version = "2.1.1" +version = "2.1.2" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "357703d41365b4b27c590e3ed91eabb1b663f07c4c084095e60cbed4362dff0d" +checksum = "94300abf3f1ae2e2b8ffb7b58043de3d399c73fa6f4b73826402a5c457614dbe" [[package]] name = "rustc_version" @@ -4330,9 +4364,9 @@ checksum = "f87165f0995f63a9fbeea62b64d10b4d9d8e78ec6d7d51fb2125fda7bb36788f" [[package]] name = "rustls-webpki" -version = "0.103.9" +version = "0.103.10" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "d7df23109aa6c1567d1c575b9952556388da57401e4ace1d15f79eedad0d8f53" +checksum = "df33b2b81ac578cabaf06b89b0631153a3f416b0a886e8a7a1707fb51abbd1ef" dependencies = [ "aws-lc-rs", "ring", @@ -4369,9 +4403,9 @@ checksum = "ece8e78b2f38ec51c51f5d475df0a7187ba5111b2a28bdc761ee05b075d40a71" [[package]] name = "schannel" -version = "0.1.28" +version = "0.1.29" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "891d81b926048e76efe18581bf793546b4c0eaf8448d72be8de2bbee5fd166e1" +checksum = "91c1b7e4904c873ef0710c1f407dde2e6287de2bebc1bbbf7d430bb7cbffd939" dependencies = [ "windows-sys 0.61.2", ] @@ -4446,9 +4480,9 @@ dependencies = [ [[package]] name = "semver" -version = "1.0.27" +version = "1.0.28" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "d767eb0aabc880b29956c35734170f26ed551a859dbd361d140cdbeca61ab1e2" +checksum = "8a7852d02fc848982e0c167ef163aaff9cd91dc640ba85e263cb1ce46fae51cd" [[package]] name = "serde" @@ -4507,7 +4541,7 @@ source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "b2f2d7ff8a2140333718bb329f5c40fc5f0865b84c426183ce14c97d2ab8154f" dependencies = [ "form_urlencoded", - "indexmap 2.13.0", + "indexmap 2.13.1", "itoa", "ryu", "serde_core", @@ -4519,7 +4553,7 @@ version = "1.0.149" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "83fc039473c5595ace860d8c4fafa220ff474b3fc6bfdb4293327f1a37e94d86" dependencies = [ - "indexmap 2.13.0", + "indexmap 2.13.1", "itoa", "memchr", "serde", @@ -4560,9 +4594,9 @@ dependencies = [ [[package]] name = "serde_spanned" -version = "1.0.4" +version = "1.1.1" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "f8bbf91e5a4d6315eee45e704372590b30e260ee83af6639d64557f51b067776" +checksum = "6662b5879511e06e8999a8a235d848113e942c9124f211511b16466ee2995f26" dependencies = [ "serde_core", ] @@ -4581,15 +4615,15 @@ dependencies = [ [[package]] name = "serde_with" -version = "3.17.0" +version = "3.18.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "381b283ce7bc6b476d903296fb59d0d36633652b633b27f64db4fb46dcbfc3b9" +checksum = "dd5414fad8e6907dbdd5bc441a50ae8d6e26151a03b1de04d89a5576de61d01f" dependencies = [ "base64 0.22.1", "chrono", "hex", "indexmap 1.9.3", - "indexmap 2.13.0", + "indexmap 2.13.1", "schemars 0.9.0", "schemars 1.2.1", "serde_core", @@ -4600,11 +4634,11 @@ dependencies = [ [[package]] name = "serde_with_macros" -version = "3.17.0" +version = "3.18.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "a6d4e30573c8cb306ed6ab1dca8423eec9a463ea0e155f45399455e0368b27e0" +checksum = "d3db8978e608f1fe7357e211969fd9abdcae80bac1ba7a3369bb7eb6b404eb65" dependencies = [ - "darling 0.21.3", + "darling 0.23.0", "proc-macro2", "quote", "syn 2.0.117", @@ -4659,9 +4693,9 @@ dependencies = [ [[package]] name = "simd-adler32" -version = "0.3.8" +version = "0.3.9" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "e320a6c5ad31d271ad523dcf3ad13e2767ad8b1cb8f047f75a8aeaf8da139da2" +checksum = "703d5c7ef118737c72f1af64ad2f6f8c5e1921f818cdcb97b8fe6fc69bf66214" [[package]] name = "simdutf8" @@ -4699,12 +4733,12 @@ dependencies = [ [[package]] name = "socket2" -version = "0.6.2" +version = "0.6.3" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "86f4aa3ad99f2088c990dfa82d367e19cb29268ed67c574d10d0a4bfe71f07e0" +checksum = "3a766e1110788c36f4fa1c2b71b387a7815aa65f88ce0229841826633d93723e" dependencies = [ "libc", - "windows-sys 0.60.2", + "windows-sys 0.61.2", ] [[package]] @@ -4885,12 +4919,12 @@ dependencies = [ [[package]] name = "tempfile" -version = "3.26.0" +version = "3.27.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "82a72c767771b47409d2345987fda8628641887d5466101319899796367354a0" +checksum = "32497e9a4c7b38532efcdebeef879707aa9f794296a4f0244f6f69e9bc8574bd" dependencies = [ "fastrand", - "getrandom 0.4.1", + "getrandom 0.4.2", "once_cell", "rustix", "windows-sys 0.61.2", @@ -4907,12 +4941,12 @@ dependencies = [ [[package]] name = "terminal_size" -version = "0.4.3" +version = "0.4.4" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "60b8cb979cb11c32ce1603f8137b22262a9d131aaa5c37b5678025f22b8becd0" +checksum = "230a1b821ccbd75b185820a1f1ff7b14d21da1e442e22c0863ea5f08771a8874" dependencies = [ "rustix", - "windows-sys 0.60.2", + "windows-sys 0.61.2", ] [[package]] @@ -4923,9 +4957,9 @@ checksum = "8f50febec83f5ee1df3015341d8bd429f2d1cc62bcba7ea2076759d315084683" [[package]] name = "testcontainers" -version = "0.27.1" +version = "0.27.2" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "c1c0624faaa317c56d6d19136580be889677259caf5c897941c6f446b4655068" +checksum = "0bd36b06a2a6c0c3c81a83be1ab05fe86460d054d4d51bf513bc56b3e15bdc22" dependencies = [ "astral-tokio-tar", "async-trait", @@ -5044,9 +5078,9 @@ dependencies = [ [[package]] name = "tinystr" -version = "0.8.2" +version = "0.8.3" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "42d3e9c45c09de15d06dd8acf5f4e0e399e85927b7f00711024eb7ae10fa4869" +checksum = "c8323304221c2a851516f22236c5722a72eaa19749016521d6dff0824447d96d" dependencies = [ "displaydoc", "zerovec", @@ -5064,9 +5098,9 @@ dependencies = [ [[package]] name = "tinyvec" -version = "1.10.0" +version = "1.11.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "bfa5fdc3bce6191a1dbc8c02d5c8bffcf557bafa17c124c5264a458f1b0613fa" +checksum = "3e61e67053d25a4e82c844e8424039d9745781b3fc4f32b8d55ed50f5f667ef3" dependencies = [ "tinyvec_macros", ] @@ -5079,25 +5113,25 @@ checksum = "1f3ccbac311fea05f86f61904b462b55fb3df8837a366dfc601a0161d0532f20" [[package]] name = "tokio" -version = "1.49.0" +version = "1.51.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "72a2903cd7736441aac9df9d7688bd0ce48edccaadf181c3b90be801e81d3d86" +checksum = "2bd1c4c0fc4a7ab90fc15ef6daaa3ec3b893f004f915f2392557ed23237820cd" dependencies = [ "bytes", "libc", "mio", "pin-project-lite", "signal-hook-registry", - "socket2 0.6.2", + "socket2 0.6.3", "tokio-macros", "windows-sys 0.61.2", ] [[package]] name = "tokio-macros" -version = "2.6.1" +version = "2.7.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "5c55a2eff8b69ce66c84f85e1da1c233edc36ceb85a2058d11b0d6a3c7e7569c" +checksum = "385a6cb71ab9ab790c5fe8d67f1645e6c450a7ce006a33de03daa956cf70a496" dependencies = [ "proc-macro2", "quote", @@ -5156,13 +5190,13 @@ version = "0.9.12+spec-1.1.0" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "cf92845e79fc2e2def6a5d828f0801e29a2f8acc037becc5ab08595c7d5e9863" dependencies = [ - "indexmap 2.13.0", + "indexmap 2.13.1", "serde_core", - "serde_spanned 1.0.4", + "serde_spanned 1.1.1", "toml_datetime 0.7.5+spec-1.1.0", "toml_parser", "toml_writer", - "winnow", + "winnow 0.7.15", ] [[package]] @@ -5183,39 +5217,48 @@ dependencies = [ "serde_core", ] +[[package]] +name = "toml_datetime" +version = "1.1.1+spec-1.1.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "3165f65f62e28e0115a00b2ebdd37eb6f3b641855f9d636d3cd4103767159ad7" +dependencies = [ + "serde_core", +] + [[package]] name = "toml_edit" version = "0.22.27" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "41fe8c660ae4257887cf66394862d21dbca4a6ddd26f04a3560410406a2f819a" dependencies = [ - "indexmap 2.13.0", + "indexmap 2.13.1", "serde", "serde_spanned 0.6.9", "toml_datetime 0.6.11", "toml_write", - "winnow", + "winnow 0.7.15", ] [[package]] name = "toml_edit" -version = "0.23.10+spec-1.0.0" +version = "0.25.10+spec-1.1.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "84c8b9f757e028cee9fa244aea147aab2a9ec09d5325a9b01e0a49730c2b5269" +checksum = "a82418ca169e235e6c399a84e395ab6debeb3bc90edc959bf0f48647c6a32d1b" dependencies = [ - "indexmap 2.13.0", - "toml_datetime 0.7.5+spec-1.1.0", + "indexmap 2.13.1", + "toml_datetime 1.1.1+spec-1.1.0", "toml_parser", - "winnow", + "winnow 1.0.1", ] [[package]] name = "toml_parser" -version = "1.0.9+spec-1.1.0" +version = "1.1.2+spec-1.1.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "702d4415e08923e7e1ef96cd5727c0dfed80b4d2fa25db9647fe5eb6f7c5a4c4" +checksum = "a2abe9b86193656635d2411dc43050282ca48aa31c2451210f4202550afb7526" dependencies = [ - "winnow", + "winnow 1.0.1", ] [[package]] @@ -5226,9 +5269,9 @@ checksum = "5d99f8c9a7727884afe522e9bd5edbfc91a3312b36a77b5fb8926e4c31a41801" [[package]] name = "toml_writer" -version = "1.0.6+spec-1.1.0" +version = "1.1.1+spec-1.1.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "ab16f14aed21ee8bfd8ec22513f7287cd4a91aa92e44edfe2c17ddd004e92607" +checksum = "756daf9b1013ebe47a8776667b466417e2d4c5679d441c26230efd9ef78692db" [[package]] name = "tonic" @@ -5249,7 +5292,7 @@ dependencies = [ "hyper-util", "percent-encoding", "pin-project", - "socket2 0.6.2", + "socket2 0.6.3", "sync_wrapper", "tokio", "tokio-stream", @@ -5693,7 +5736,7 @@ checksum = "ebe5ef63511595f1344e2d5cfa636d973292adc0eec1f0ad45fae9f0851ab1d4" dependencies = [ "futures-core", "futures-util", - "indexmap 2.13.0", + "indexmap 2.13.1", "pin-project-lite", "slab", "sync_wrapper", @@ -5796,9 +5839,9 @@ dependencies = [ [[package]] name = "tracing-subscriber" -version = "0.3.22" +version = "0.3.23" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "2f30143827ddab0d256fd843b7a66d164e9f271cfa0dde49142c5ca0ca291f1e" +checksum = "cb7f578e5945fb242538965c2d0b04418d38ec25c79d160cd279bf0731c8d319" dependencies = [ "nu-ansi-term", "serde", @@ -5857,9 +5900,9 @@ checksum = "3b09c83c3c29d37506a3e260c08c03743a6bb66a9cd432c6934ab501a190571f" [[package]] name = "unicode-segmentation" -version = "1.12.0" +version = "1.13.2" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "f6ccf251212114b54433ec949fd6a7841275f9ada20dddd2f29e9ceea4501493" +checksum = "9629274872b2bfaf8d66f5f15725007f635594914870f65218920345aa11aa8c" [[package]] name = "unicode-width" @@ -5887,9 +5930,9 @@ checksum = "8ecb6da28b8a351d773b68d5825ac39017e680750f980f3a1a85cd8dd28a47c1" [[package]] name = "ureq" -version = "3.2.0" +version = "3.3.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "fdc97a28575b85cfedf2a7e7d3cc64b3e11bd8ac766666318003abbacc7a21fc" +checksum = "dea7109cdcd5864d4eeb1b58a1648dc9bf520360d7af16ec26d0a9354bafcfc0" dependencies = [ "base64 0.22.1", "log", @@ -5897,14 +5940,14 @@ dependencies = [ "rustls", "rustls-pki-types", "ureq-proto", - "utf-8", + "utf8-zero", ] [[package]] name = "ureq-proto" -version = "0.5.3" +version = "0.6.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "d81f9efa9df032be5934a46a068815a10a042b494b6a58cb0a1a97bb5467ed6f" +checksum = "e994ba84b0bd1b1b0cf92878b7ef898a5c1760108fe7b6010327e274917a808c" dependencies = [ "base64 0.22.1", "http", @@ -5926,10 +5969,10 @@ dependencies = [ ] [[package]] -name = "utf-8" -version = "0.7.6" +name = "utf8-zero" +version = "0.8.1" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "09cc8ee72d2a9becf2f2febe0205bbed8fc6615b7cb429ad062dc7b7ddd036a9" +checksum = "b8c0a043c9540bae7c578c88f91dda8bd82e59ae27c21baca69c8b191aaf5a6e" [[package]] name = "utf8_iter" @@ -5945,13 +5988,13 @@ checksum = "06abde3611657adf66d383f00b093d7faecc7fa57071cce2578660c9f1010821" [[package]] name = "uuid" -version = "1.21.0" +version = "1.23.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "b672338555252d43fd2240c714dc444b8c6fb0a5c5335e65a07bba7742735ddb" +checksum = "5ac8b6f42ead25368cf5b098aeb3dc8a1a2c05a3eee8a9a1a68c640edbfc79d9" dependencies = [ - "getrandom 0.4.1", + "getrandom 0.4.2", "js-sys", - "rand 0.9.2", + "rand 0.10.0", "wasm-bindgen", ] @@ -6024,36 +6067,33 @@ dependencies = [ [[package]] name = "wasm-bindgen" -version = "0.2.114" +version = "0.2.117" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "6532f9a5c1ece3798cb1c2cfdba640b9b3ba884f5db45973a6f442510a87d38e" +checksum = "0551fc1bb415591e3372d0bc4780db7e587d84e2a7e79da121051c5c4b89d0b0" dependencies = [ "cfg-if", "once_cell", "rustversion", + "serde", "wasm-bindgen-macro", "wasm-bindgen-shared", ] [[package]] name = "wasm-bindgen-futures" -version = "0.4.64" +version = "0.4.67" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "e9c5522b3a28661442748e09d40924dfb9ca614b21c00d3fd135720e48b67db8" +checksum = "03623de6905b7206edd0a75f69f747f134b7f0a2323392d664448bf2d3c5d87e" dependencies = [ - "cfg-if", - "futures-util", "js-sys", - "once_cell", "wasm-bindgen", - "web-sys", ] [[package]] name = "wasm-bindgen-macro" -version = "0.2.114" +version = "0.2.117" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "18a2d50fcf105fb33bb15f00e7a77b772945a2ee45dcf454961fd843e74c18e6" +checksum = "7fbdf9a35adf44786aecd5ff89b4563a90325f9da0923236f6104e603c7e86be" dependencies = [ "quote", "wasm-bindgen-macro-support", @@ -6061,9 +6101,9 @@ dependencies = [ [[package]] name = "wasm-bindgen-macro-support" -version = "0.2.114" +version = "0.2.117" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "03ce4caeaac547cdf713d280eda22a730824dd11e6b8c3ca9e42247b25c631e3" +checksum = "dca9693ef2bab6d4e6707234500350d8dad079eb508dca05530c85dc3a529ff2" dependencies = [ "bumpalo", "proc-macro2", @@ -6074,9 +6114,9 @@ dependencies = [ [[package]] name = "wasm-bindgen-shared" -version = "0.2.114" +version = "0.2.117" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "75a326b8c223ee17883a4251907455a2431acc2791c98c26279376490c378c16" +checksum = "39129a682a6d2d841b6c429d0c51e5cb0ed1a03829d8b3d1e69a011e62cb3d3b" dependencies = [ "unicode-ident", ] @@ -6098,7 +6138,7 @@ source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "bb0e353e6a2fbdc176932bbaab493762eb1255a7900fe0fea1a2f96c296cc909" dependencies = [ "anyhow", - "indexmap 2.13.0", + "indexmap 2.13.1", "wasm-encoder", "wasmparser", ] @@ -6111,15 +6151,15 @@ checksum = "47b807c72e1bac69382b3a6fb3dbe8ea4c0ed87ff5629b8685ae6b9a611028fe" dependencies = [ "bitflags", "hashbrown 0.15.5", - "indexmap 2.13.0", + "indexmap 2.13.1", "semver", ] [[package]] name = "web-sys" -version = "0.3.91" +version = "0.3.94" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "854ba17bb104abfb26ba36da9729addc7ce7f06f5c0f90f3c391f8461cca21f9" +checksum = "cd70027e39b12f0849461e08ffc50b9cd7688d942c1c8e3c7b22273236b4dd0a" dependencies = [ "js-sys", "wasm-bindgen", @@ -6469,9 +6509,18 @@ checksum = "d6bbff5f0aada427a1e5a6da5f1f98158182f26556f345ac9e04d36d0ebed650" [[package]] name = "winnow" -version = "0.7.14" +version = "0.7.15" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "df79d97927682d2fd8adb29682d1140b343be4ac0f08fd68b7765d9c059d3945" +dependencies = [ + "memchr", +] + +[[package]] +name = "winnow" +version = "1.0.1" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "5a5364e9d77fcdeeaa6062ced926ee3381faa2ee02d3eb83a5c27a8825540829" +checksum = "09dac053f1cd375980747450bfc7250c264eaae0583872e845c0c7cd578872b5" dependencies = [ "memchr", ] @@ -6504,7 +6553,7 @@ checksum = "b7c566e0f4b284dd6561c786d9cb0142da491f46a9fbed79ea69cdad5db17f21" dependencies = [ "anyhow", "heck", - "indexmap 2.13.0", + "indexmap 2.13.1", "prettyplease", "syn 2.0.117", "wasm-metadata", @@ -6535,7 +6584,7 @@ checksum = "9d66ea20e9553b30172b5e831994e35fbde2d165325bec84fc43dbf6f4eb9cb2" dependencies = [ "anyhow", "bitflags", - "indexmap 2.13.0", + "indexmap 2.13.1", "log", "serde", "serde_derive", @@ -6554,7 +6603,7 @@ checksum = "ecc8ac4bc1dc3381b7f59c34f00b67e18f910c2c0f50015669dde7def656a736" dependencies = [ "anyhow", "id-arena", - "indexmap 2.13.0", + "indexmap 2.13.1", "log", "semver", "serde", @@ -6566,9 +6615,9 @@ dependencies = [ [[package]] name = "writeable" -version = "0.6.2" +version = "0.6.3" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "9edde0db4769d2dc68579893f2306b26c6ecfbe0ef499b013d731b7b9247e0b9" +checksum = "1ffae5123b2d3fc086436f8834ae3ab053a283cfac8fe0a0b8eaae044768a4c4" [[package]] name = "wyz" @@ -6597,9 +6646,9 @@ checksum = "cfe53a6657fd280eaa890a3bc59152892ffa3e30101319d168b781ed6529b049" [[package]] name = "yoke" -version = "0.8.1" +version = "0.8.2" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "72d6e5c6afb84d73944e5cedb052c4680d5657337201555f9f2a16b7406d4954" +checksum = "abe8c5fda708d9ca3df187cae8bfb9ceda00dd96231bed36e445a1a48e66f9ca" dependencies = [ "stable_deref_trait", "yoke-derive", @@ -6608,9 +6657,9 @@ dependencies = [ [[package]] name = "yoke-derive" -version = "0.8.1" +version = "0.8.2" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "b659052874eb698efe5b9e8cf382204678a0086ebf46982b79d6ca3182927e5d" +checksum = "de844c262c8848816172cef550288e7dc6c7b7814b4ee56b3e1553f275f1858e" dependencies = [ "proc-macro2", "quote", @@ -6630,11 +6679,11 @@ dependencies = [ [[package]] name = "zerocopy" -version = "0.8.40" +version = "0.8.48" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "a789c6e490b576db9f7e6b6d661bcc9799f7c0ac8352f56ea20193b2681532e5" +checksum = "eed437bf9d6692032087e337407a86f04cd8d6a16a37199ed57949d415bd68e9" dependencies = [ - "zerocopy-derive 0.8.40", + "zerocopy-derive 0.8.48", ] [[package]] @@ -6650,9 +6699,9 @@ dependencies = [ [[package]] name = "zerocopy-derive" -version = "0.8.40" +version = "0.8.48" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "f65c489a7071a749c849713807783f70672b28094011623e200cb86dcb835953" +checksum = "70e3cd084b1788766f53af483dd21f93881ff30d7320490ec3ef7526d203bad4" dependencies = [ "proc-macro2", "quote", @@ -6661,18 +6710,18 @@ dependencies = [ [[package]] name = "zerofrom" -version = "0.1.6" +version = "0.1.7" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "50cc42e0333e05660c3587f3bf9d0478688e15d870fab3346451ce7f8c9fbea5" +checksum = "69faa1f2a1ea75661980b013019ed6687ed0e83d069bc1114e2cc74c6c04c4df" dependencies = [ "zerofrom-derive", ] [[package]] name = "zerofrom-derive" -version = "0.1.6" +version = "0.1.7" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "d71e5d6e06ab090c67b5e44993ec16b72dcbaabc526db883a360057678b48502" +checksum = "11532158c46691caf0f2593ea8358fed6bbf68a0315e80aae9bd41fbade684a1" dependencies = [ "proc-macro2", "quote", @@ -6688,9 +6737,9 @@ checksum = "b97154e67e32c85465826e8bcc1c59429aaaf107c1e4a9e53c8d8ccd5eff88d0" [[package]] name = "zerotrie" -version = "0.2.3" +version = "0.2.4" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "2a59c17a5562d507e4b54960e8569ebee33bee890c70aa3fe7b97e85a9fd7851" +checksum = "0f9152d31db0792fa83f70fb2f83148effb5c1f5b8c7686c3459e361d9bc20bf" dependencies = [ "displaydoc", "yoke", @@ -6699,9 +6748,9 @@ dependencies = [ [[package]] name = "zerovec" -version = "0.11.5" +version = "0.11.6" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "6c28719294829477f525be0186d13efa9a3c602f7ec202ca9e353d310fb9a002" +checksum = "90f911cbc359ab6af17377d242225f4d75119aec87ea711a880987b18cd7b239" dependencies = [ "yoke", "zerofrom", @@ -6710,9 +6759,9 @@ dependencies = [ [[package]] name = "zerovec-derive" -version = "0.11.2" +version = "0.11.3" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "eadce39539ca5cb3985590102671f2567e659fca9666581ad3411d59207951f3" +checksum = "625dc425cab0dca6dc3c3319506e6593dcb08a9f387ea3b284dbd52a92c40555" dependencies = [ "proc-macro2", "quote", From 48e9606219dd27e69d1db8620be3dd754cb7a8c2 Mon Sep 17 00:00:00 2001 From: Jose Celano Date: Wed, 8 Apr 2026 09:00:06 +0100 Subject: [PATCH 790/802] refactor: extract project dictionary from cSpell.json Move the project-specific word list from the inline 'words' array in cSpell.json into a dedicated project-words.txt dictionary file, following the same pattern used in other Torrust organisation repositories. Update packages/metrics/cSpell.json to reference the shared dictionary instead of maintaining its own inline word list. Closes #1484 --- cSpell.json | 217 +++-------------------------------- packages/metrics/cSpell.json | 26 ++--- project-words.txt | 199 ++++++++++++++++++++++++++++++++ 3 files changed, 228 insertions(+), 214 deletions(-) create mode 100644 project-words.txt diff --git a/cSpell.json b/cSpell.json index 81421e050..43eb391d3 100644 --- a/cSpell.json +++ b/cSpell.json @@ -1,208 +1,23 @@ { - "words": [ - "Addrs", - "adduser", - "alekitto", - "appuser", - "Arvid", - "ASMS", - "asyn", - "autoclean", - "AUTOINCREMENT", - "automock", - "Avicora", - "Azureus", - "bdecode", - "bencode", - "bencoded", - "bencoding", - "beps", - "binascii", - "binstall", - "Bitflu", - "bools", - "Bragilevsky", - "bufs", - "buildid", - "Buildx", - "byteorder", - "callgrind", - "camino", - "canonicalize", - "canonicalized", - "certbot", - "chrono", - "Cinstrument", - "ciphertext", - "clippy", - "cloneable", - "codecov", - "codegen", - "completei", - "Condvar", - "connectionless", - "Containerfile", - "conv", - "curr", - "cvar", - "Cyberneering", - "dashmap", - "datagram", - "datetime", - "debuginfo", - "Deque", - "Dijke", - "distroless", - "dockerhub", - "downloadedi", - "dtolnay", - "elif", - "endianness", - "Eray", - "filesd", - "flamegraph", - "formatjson", - "Freebox", - "Frostegård", - "gecos", - "Gibibytes", - "Grcov", - "hasher", - "healthcheck", - "heaptrack", - "hexlify", - "hlocalhost", - "Hydranode", - "hyperthread", - "Icelake", - "iiiiiiiiiiiiiiiiiiiid", - "imdl", - "impls", - "incompletei", - "infohash", - "infohashes", - "infoschema", - "Intermodal", - "intervali", - "Joakim", - "kallsyms", - "Karatay", - "kcachegrind", - "kexec", - "keyout", - "Kibibytes", - "kptr", - "lcov", - "leecher", - "leechers", - "libsqlite", - "libtorrent", - "libz", - "LOGNAME", - "Lphant", - "matchmakes", - "Mebibytes", - "metainfo", - "middlewares", - "misresolved", - "mockall", - "multimap", - "myacicontext", - "ñaca", - "Naim", - "nanos", - "newkey", - "nextest", - "nocapture", - "nologin", - "nonroot", - "Norberg", - "numwant", - "nvCFlJCq7fz7Qx6KoKTDiMZvns8l5Kw7", - "oneshot", - "ostr", - "Pando", - "peekable", - "peerlist", - "programatik", - "proot", - "proto", - "Quickstart", - "Radeon", - "Rakshasa", - "Rasterbar", - "realpath", - "reannounce", - "Registar", - "repr", - "reqs", - "reqwest", - "rerequests", - "ringbuf", - "ringsize", - "rngs", - "rosegment", - "routable", - "rstest", - "rusqlite", - "rustc", - "RUSTDOCFLAGS", - "RUSTFLAGS", - "rustfmt", - "Rustls", - "Ryzen", - "Seedable", - "serde", - "Shareaza", - "sharktorrent", - "SHLVL", - "skiplist", - "slowloris", - "socketaddr", - "sqllite", - "subsec", - "Swatinem", - "Swiftbit", - "taiki", - "tdyne", - "Tebibytes", - "tempfile", - "testcontainers", - "thiserror", - "tlsv", - "Torrentstorm", - "torrust", - "torrustracker", - "trackerid", - "Trackon", - "typenum", - "udpv", - "Unamed", - "underflows", - "Unsendable", - "untuple", - "uroot", - "Vagaa", - "valgrind", - "Vitaly", - "vmlinux", - "Vuze", - "Weidendorfer", - "Werror", - "whitespaces", - "Xacrimon", - "XBTT", - "Xdebug", - "Xeon", - "Xtorrent", - "Xunlei", - "xxxxxxxxxxxxxxxxxxxxd", - "yyyyyyyyyyyyyyyyyyyyd", - "zerocopy" + "$schema": "https://raw.githubusercontent.com/streetsidesoftware/cspell/main/cspell.schema.json", + "version": "0.2", + "dictionaryDefinitions": [ + { + "name": "project-words", + "path": "./project-words.txt", + "addWords": true + } + ], + "dictionaries": [ + "project-words" ], "enableFiletypes": [ "dockerfile", "shellscript", "toml" + ], + "ignorePaths": [ + "target", + "/project-words.txt" ] -} +} \ No newline at end of file diff --git a/packages/metrics/cSpell.json b/packages/metrics/cSpell.json index f04cce9e3..8f5002833 100644 --- a/packages/metrics/cSpell.json +++ b/packages/metrics/cSpell.json @@ -1,21 +1,21 @@ { - "words": [ - "cloneable", - "formatjson", - "Gibibytes", - "Kibibytes", - "Mebibytes", - "ñaca", - "println", - "rstest", - "serde", - "subsec", - "Tebibytes", - "thiserror" + "$schema": "https://raw.githubusercontent.com/streetsidesoftware/cspell/main/cspell.schema.json", + "version": "0.2", + "dictionaryDefinitions": [ + { + "name": "project-words", + "path": "../../project-words.txt", + "addWords": true + } ], + "dictionaries": ["project-words"], "enableFiletypes": [ "dockerfile", "shellscript", "toml" + ], + "ignorePaths": [ + "target", + "/project-words.txt" ] } diff --git a/project-words.txt b/project-words.txt new file mode 100644 index 000000000..c698eea9c --- /dev/null +++ b/project-words.txt @@ -0,0 +1,199 @@ +Addrs +adduser +alekitto +appuser +Arvid +ASMS +asyn +autoclean +AUTOINCREMENT +automock +Avicora +Azureus +bdecode +bencode +bencoded +bencoding +beps +binascii +binstall +Bitflu +bools +Bragilevsky +bufs +buildid +Buildx +byteorder +callgrind +camino +canonicalize +canonicalized +certbot +chrono +Cinstrument +ciphertext +clippy +cloneable +codecov +codegen +completei +Condvar +connectionless +Containerfile +conv +curr +cvar +Cyberneering +dashmap +datagram +datetime +debuginfo +Deque +Dijke +distroless +dockerhub +downloadedi +dtolnay +elif +endianness +Eray +filesd +flamegraph +formatjson +Freebox +Frostegård +gecos +Gibibytes +Grcov +hasher +healthcheck +heaptrack +hexlify +hlocalhost +Hydranode +hyperthread +Icelake +iiiiiiiiiiiiiiiiiiiid +imdl +impls +incompletei +infohash +infohashes +infoschema +Intermodal +intervali +Joakim +kallsyms +Karatay +kcachegrind +kexec +keyout +Kibibytes +kptr +lcov +leecher +leechers +libsqlite +libtorrent +libz +LOGNAME +Lphant +matchmakes +Mebibytes +metainfo +middlewares +misresolved +mockall +multimap +myacicontext +ñaca +Naim +nanos +newkey +nextest +nocapture +nologin +nonroot +Norberg +numwant +nvCFlJCq7fz7Qx6KoKTDiMZvns8l5Kw7 +oneshot +ostr +Pando +peekable +peerlist +programatik +proot +proto +Quickstart +Radeon +Rakshasa +Rasterbar +realpath +reannounce +Registar +repr +reqs +reqwest +rerequests +ringbuf +ringsize +rngs +rosegment +routable +rstest +rusqlite +rustc +RUSTDOCFLAGS +RUSTFLAGS +rustfmt +Rustls +Ryzen +Seedable +serde +Shareaza +sharktorrent +SHLVL +skiplist +slowloris +socketaddr +sqllite +subsec +Swatinem +Swiftbit +taiki +tdyne +Tebibytes +tempfile +testcontainers +thiserror +tlsv +Torrentstorm +torrust +torrustracker +trackerid +Trackon +typenum +udpv +Unamed +underflows +Unsendable +untuple +uroot +Vagaa +valgrind +Vitaly +vmlinux +Vuze +Weidendorfer +Werror +whitespaces +Xacrimon +XBTT +Xdebug +Xeon +Xtorrent +Xunlei +xxxxxxxxxxxxxxxxxxxxd +yyyyyyyyyyyyyyyyyyyyd +zerocopy From c88f66cd42c733fafc5b7b4afeb862b7b3b28329 Mon Sep 17 00:00:00 2001 From: "dependabot[bot]" <49699333+dependabot[bot]@users.noreply.github.com> Date: Wed, 8 Apr 2026 09:19:40 +0000 Subject: [PATCH 791/802] chore(deps): bump docker/login-action from 3 to 4 Bumps [docker/login-action](https://github.com/docker/login-action) from 3 to 4. - [Release notes](https://github.com/docker/login-action/releases) - [Commits](https://github.com/docker/login-action/compare/v3...v4) --- updated-dependencies: - dependency-name: docker/login-action dependency-version: '4' dependency-type: direct:production update-type: version-update:semver-major ... Signed-off-by: dependabot[bot] --- .github/workflows/container.yaml | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/.github/workflows/container.yaml b/.github/workflows/container.yaml index 7416df71e..2f2b0780c 100644 --- a/.github/workflows/container.yaml +++ b/.github/workflows/container.yaml @@ -117,7 +117,7 @@ jobs: - id: login name: Login to Docker Hub - uses: docker/login-action@v3 + uses: docker/login-action@v4 with: username: ${{ secrets.DOCKER_HUB_USERNAME }} password: ${{ secrets.DOCKER_HUB_ACCESS_TOKEN }} @@ -158,7 +158,7 @@ jobs: - id: login name: Login to Docker Hub - uses: docker/login-action@v3 + uses: docker/login-action@v4 with: username: ${{ secrets.DOCKER_HUB_USERNAME }} password: ${{ secrets.DOCKER_HUB_ACCESS_TOKEN }} From 504135d009b2c16768ed13ce4b44ddf50b7d2f2b Mon Sep 17 00:00:00 2001 From: "dependabot[bot]" <49699333+dependabot[bot]@users.noreply.github.com> Date: Wed, 8 Apr 2026 09:19:47 +0000 Subject: [PATCH 792/802] chore(deps): bump docker/metadata-action from 5 to 6 Bumps [docker/metadata-action](https://github.com/docker/metadata-action) from 5 to 6. - [Release notes](https://github.com/docker/metadata-action/releases) - [Commits](https://github.com/docker/metadata-action/compare/v5...v6) --- updated-dependencies: - dependency-name: docker/metadata-action dependency-version: '6' dependency-type: direct:production update-type: version-update:semver-major ... Signed-off-by: dependabot[bot] --- .github/workflows/container.yaml | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/.github/workflows/container.yaml b/.github/workflows/container.yaml index 2f2b0780c..0615ad6be 100644 --- a/.github/workflows/container.yaml +++ b/.github/workflows/container.yaml @@ -108,7 +108,7 @@ jobs: steps: - id: meta name: Docker Meta - uses: docker/metadata-action@v5 + uses: docker/metadata-action@v6 with: images: | "${{ secrets.DOCKER_HUB_USERNAME }}/${{secrets.DOCKER_HUB_REPOSITORY_NAME }}" @@ -146,7 +146,7 @@ jobs: steps: - id: meta name: Docker Meta - uses: docker/metadata-action@v5 + uses: docker/metadata-action@v6 with: images: | "${{ secrets.DOCKER_HUB_USERNAME }}/${{secrets.DOCKER_HUB_REPOSITORY_NAME }}" From 2c78850ab49ede86f36f1d98f93f45b890b98895 Mon Sep 17 00:00:00 2001 From: "dependabot[bot]" <49699333+dependabot[bot]@users.noreply.github.com> Date: Wed, 8 Apr 2026 09:19:32 +0000 Subject: [PATCH 793/802] chore(deps): bump docker/setup-buildx-action from 3 to 4 Bumps [docker/setup-buildx-action](https://github.com/docker/setup-buildx-action) from 3 to 4. - [Release notes](https://github.com/docker/setup-buildx-action/releases) - [Commits](https://github.com/docker/setup-buildx-action/compare/v3...v4) --- updated-dependencies: - dependency-name: docker/setup-buildx-action dependency-version: '4' dependency-type: direct:production update-type: version-update:semver-major ... Signed-off-by: dependabot[bot] --- .github/workflows/container.yaml | 6 +++--- 1 file changed, 3 insertions(+), 3 deletions(-) diff --git a/.github/workflows/container.yaml b/.github/workflows/container.yaml index 0615ad6be..f09a94bca 100644 --- a/.github/workflows/container.yaml +++ b/.github/workflows/container.yaml @@ -26,7 +26,7 @@ jobs: steps: - id: setup name: Setup Toolchain - uses: docker/setup-buildx-action@v3 + uses: docker/setup-buildx-action@v4 - id: build name: Build @@ -124,7 +124,7 @@ jobs: - id: setup name: Setup Toolchain - uses: docker/setup-buildx-action@v3 + uses: docker/setup-buildx-action@v4 - name: Build and push uses: docker/build-push-action@v6 @@ -165,7 +165,7 @@ jobs: - id: setup name: Setup Toolchain - uses: docker/setup-buildx-action@v3 + uses: docker/setup-buildx-action@v4 - name: Build and push uses: docker/build-push-action@v6 From 4f3f1956f4a8d1839f95bd697b851ab04df31fbd Mon Sep 17 00:00:00 2001 From: "dependabot[bot]" <49699333+dependabot[bot]@users.noreply.github.com> Date: Wed, 8 Apr 2026 09:19:27 +0000 Subject: [PATCH 794/802] chore(deps): bump docker/build-push-action from 6 to 7 Bumps [docker/build-push-action](https://github.com/docker/build-push-action) from 6 to 7. - [Release notes](https://github.com/docker/build-push-action/releases) - [Commits](https://github.com/docker/build-push-action/compare/v6...v7) --- updated-dependencies: - dependency-name: docker/build-push-action dependency-version: '7' dependency-type: direct:production update-type: version-update:semver-major ... Signed-off-by: dependabot[bot] --- .github/workflows/container.yaml | 6 +++--- 1 file changed, 3 insertions(+), 3 deletions(-) diff --git a/.github/workflows/container.yaml b/.github/workflows/container.yaml index f09a94bca..e0857e936 100644 --- a/.github/workflows/container.yaml +++ b/.github/workflows/container.yaml @@ -30,7 +30,7 @@ jobs: - id: build name: Build - uses: docker/build-push-action@v6 + uses: docker/build-push-action@v7 with: file: ./Containerfile push: false @@ -127,7 +127,7 @@ jobs: uses: docker/setup-buildx-action@v4 - name: Build and push - uses: docker/build-push-action@v6 + uses: docker/build-push-action@v7 with: file: ./Containerfile push: true @@ -168,7 +168,7 @@ jobs: uses: docker/setup-buildx-action@v4 - name: Build and push - uses: docker/build-push-action@v6 + uses: docker/build-push-action@v7 with: file: ./Containerfile push: true From f2612dc1fa0aa242503b0354227b644a12fbe47d Mon Sep 17 00:00:00 2001 From: Jose Celano Date: Wed, 8 Apr 2026 12:34:06 +0100 Subject: [PATCH 795/802] docs(issue-523): add internal linting implementation plan --- docs/issues/523-internal-linting-tool.md | 141 +++++++++++++++++++++++ 1 file changed, 141 insertions(+) create mode 100644 docs/issues/523-internal-linting-tool.md diff --git a/docs/issues/523-internal-linting-tool.md b/docs/issues/523-internal-linting-tool.md new file mode 100644 index 000000000..14593e190 --- /dev/null +++ b/docs/issues/523-internal-linting-tool.md @@ -0,0 +1,141 @@ +# Issue #523 Implementation Plan (Internal Linting Tool) + +## Goal + +Replace the MegaLinter idea with Torrust internal linting tooling and integrate it into CI for this repository. + +## Scope + +- Target issue: https://github.com/torrust/torrust-tracker/issues/523 +- CI workflow to modify: .github/workflows/testing.yaml +- External reference workflow: https://raw.githubusercontent.com/torrust/torrust-tracker-deployer/refs/heads/main/.github/workflows/linting.yml + +## Tasks + +### 0) Create a local branch following GitHub branch naming conventions + +- Approved branch name: `523-internal-linting-tool` +- Commands: + - `git fetch --all --prune` + - `git checkout develop` + - `git pull --ff-only` + - `git checkout -b 523-internal-linting-tool` +- Checkpoint: + - `git branch --show-current` should output `523-internal-linting-tool`. + +### 1) Install and run the linting tool locally; verify it passes in this repo + +- Identify/install internal linting package/tool used by Torrust (likely `torrust-linting` or equivalent wrapper). +- Ensure local runtime dependencies are present (if any). +- Note: linter config files (step 2) must exist in the repo root before a full suite run; it is fine to do a first exploratory run first to discover which linters are active. +- Run the internal linting command against this repository. +- Capture the exact command and output summary for reproducibility. +- Checkpoint: + - Linting command exits with code `0`. + +### 2) Add and adapt linter configuration files + +Some linters require a config file in the repo root. Use the deployer configs as reference and adapt values to this repository. + +| File | Linter | Reference | +| -------------------- | ---------------- | ----------------------------------------------------------------------------------------------------- | +| `.markdownlint.json` | markdownlint | https://raw.githubusercontent.com/torrust/torrust-tracker-deployer/refs/heads/main/.markdownlint.json | +| `.taplo.toml` | taplo (TOML fmt) | https://raw.githubusercontent.com/torrust/torrust-tracker-deployer/refs/heads/main/.taplo.toml | +| `.yamllint-ci.yml` | yamllint | https://raw.githubusercontent.com/torrust/torrust-tracker-deployer/refs/heads/main/.yamllint-ci.yml | + +Key adaptations to make per file: + +- `.markdownlint.json`: review line-length rules and Markdown conventions used in this repo's docs. +- `.taplo.toml`: update `exclude` list to match this repo's generated/runtime folders (e.g. `target/**`, `storage/**`) instead of the deployer-specific ones (`build/**`, `data/**`, `envs/**`). +- `.yamllint-ci.yml`: update `ignore` block to reflect this repo's generated/runtime directories instead of cloud-init and deployer folders. + +Commit message: `ci(lint): add linter config files (.markdownlint.json, .taplo.toml, .yamllint-ci.yml)` + +Checkpoint: + +- Config files are present in the repo root. +- Running each individual linter against the repo with the config produces expected/controlled output. + +### 3) If local linting fails, fix all lint errors; commit fixes independently per linter + +- If the linting suite reports failures: + - Group findings by linter (for example: formatting, clippy, docs, spelling, yaml, etc.). + - Fix only one linter category at a time. + - Create one commit per linter category. +- Commit style proposal: + - `fix(lint/): resolve ` +- Constraints: + - Do not mix workflow/tooling changes with source lint fixes in the same commit. + - Keep each commit minimal and reviewable. +- Checkpoint: + - Re-run linting suite; all checks pass before moving to workflow integration. + +### 4) Review existing workflow example using internal linting + +- Read and analyze: + - https://raw.githubusercontent.com/torrust/torrust-tracker-deployer/refs/heads/main/.github/workflows/linting.yml +- Extract and adapt: + - Trigger strategy. + - Tool setup/install method. + - Cache strategy. + - Invocation command and CI fail behavior. +- Checkpoint: + - Document a short mapping from deployer workflow pattern to this repo’s `testing.yaml` job structure. + +### 5) Modify `.github/workflows/testing.yaml` to use the internal linting tool + +- Update the current `check`/lint-related section to run the internal linting command. +- Replace existing lint/check execution path with the internal linting tool in this migration (no parallel transition mode). +- Ensure matrix/toolchain compatibility is explicit (nightly/stable behavior decided and documented). +- Validate workflow syntax before commit. +- Checkpoint: + - Workflow is valid and executes linting through internal tool. + +### 6) Commit workflow changes + +- Commit only workflow-related changes in a dedicated commit. +- Commit message proposal: + - `ci(lint): switch testing workflow to internal linting tool` +- Checkpoint: + - `git show --name-only --stat HEAD` includes only expected workflow files (and any required supporting CI files if intentionally added). + +### 7) Push to remote `josecelano` and open PR into `develop` + +- Verify remote exists: + - `git remote -v` +- Push branch: + - `git push -u josecelano 523-internal-linting-tool` +- Open PR targeting `torrust/torrust-tracker:develop` with head `josecelano:523-internal-linting-tool`. +- PR content should include: + - Why internal linting over MegaLinter. + - Summary of lint-fix commits by linter. + - Summary of workflow change. + - Evidence (local run + CI status). +- Checkpoint: + - PR is open, linked to issue #523, and ready for review. + +## Execution Notes + +- Keep PR review-friendly by separating commits by concern: + 1. Linter config files (step 2) + 2. Per-linter source fixes (step 3, only if needed) + 3. CI workflow migration (step 6) +- Use Conventional Commits for all commits in this implementation. +- If lint checks differ between local and CI, align tool versions and execution flags before merging. +- Avoid broad refactors unrelated to lint failures. + +## Decisions Confirmed + +1. Branch name: `523-internal-linting-tool`. +2. CI strategy: replace existing lint/check path with internal linting. +3. Commit convention: yes, use Conventional Commits. +4. PR target: base `torrust/torrust-tracker:develop`, head `josecelano:523-internal-linting-tool`. + +## Risks and Mitigations + +- Risk: Internal linting wrapper may not be version-pinned and may produce unstable CI behavior. + - Mitigation: Pin tool version in workflow installation step. +- Risk: Internal linting may overlap with existing checks, increasing CI time. + - Mitigation: Remove redundant jobs only after verifying coverage parity. +- Risk: Tool may require secrets or environment assumptions not available in CI. + - Mitigation: Run dry-run in GitHub Actions on branch before requesting review. From fa3b491bb70348be86bc51f80431ece411596554 Mon Sep 17 00:00:00 2001 From: Jose Celano Date: Wed, 8 Apr 2026 16:00:55 +0100 Subject: [PATCH 796/802] ci(lint): add linter config files --- .markdownlint.json | 18 ++++++++++++++++++ .taplo.toml | 31 +++++++++++++++++++++++++++++++ .yamllint-ci.yml | 16 ++++++++++++++++ 3 files changed, 65 insertions(+) create mode 100644 .markdownlint.json create mode 100644 .taplo.toml create mode 100644 .yamllint-ci.yml diff --git a/.markdownlint.json b/.markdownlint.json new file mode 100644 index 000000000..19ec47c2e --- /dev/null +++ b/.markdownlint.json @@ -0,0 +1,18 @@ +{ + "default": true, + "MD013": false, + "MD031": true, + "MD032": true, + "MD040": true, + "MD022": true, + "MD009": true, + "MD007": { + "indent": 2 + }, + "MD026": false, + "MD041": false, + "MD034": false, + "MD024": false, + "MD033": false, + "MD060": false +} diff --git a/.taplo.toml b/.taplo.toml new file mode 100644 index 000000000..d0f755dcd --- /dev/null +++ b/.taplo.toml @@ -0,0 +1,31 @@ +# Taplo configuration file for TOML formatting +# Used by the "Even Better TOML" VS Code extension + +# Exclude generated and runtime folders from linting +exclude = [ + "target/**", + "storage/**", + ".coverage/**", +] + +[formatting] +# Preserve blank lines that exist +allowed_blank_lines = 1 +# Don't reorder keys to maintain structure +reorder_keys = false +# Array formatting +array_trailing_comma = true +array_auto_expand = false +array_auto_collapse = false +# Inline table formatting +inline_table_expand = false +compact_inline_tables = false +compact_arrays = false +# Alignment +align_entries = false +align_comments = true +# Indentation +indent_tables = false +indent_entries = false +# Other +trailing_newline = true diff --git a/.yamllint-ci.yml b/.yamllint-ci.yml new file mode 100644 index 000000000..9380b592a --- /dev/null +++ b/.yamllint-ci.yml @@ -0,0 +1,16 @@ +extends: default + +rules: + line-length: + max: 200 # More reasonable for infrastructure code + comments: + min-spaces-from-content: 1 # Allow single space before comments + document-start: disable # Most project YAML files don't require --- + truthy: + allowed-values: ["true", "false", "yes", "no", "on", "off"] # Allow common GitHub Actions values + +# Ignore generated/runtime directories +ignore: | + target/** + storage/** + .coverage/** From bc1f8cc72c0f8752480321860262a4e04f14305f Mon Sep 17 00:00:00 2001 From: Jose Celano Date: Wed, 8 Apr 2026 16:09:21 +0100 Subject: [PATCH 797/802] fix(lint/clippy): resolve pedantic duration and style violations --- packages/axum-rest-tracker-api-server/src/server.rs | 4 ++-- .../benches/http_tracker_core_benchmark.rs | 2 +- .../benches/repository_benchmark.rs | 8 ++++---- .../torrent-repository-benchmarking/tests/entry/mod.rs | 5 +++-- .../tests/repository/mod.rs | 5 +++-- packages/tracker-client/src/udp/client.rs | 2 +- .../benches/udp_tracker_core_benchmark.rs | 2 +- packages/udp-tracker-server/src/server/launcher.rs | 2 +- packages/udp-tracker-server/src/statistics/repository.rs | 4 ++-- packages/udp-tracker-server/tests/server/contract.rs | 2 +- 10 files changed, 19 insertions(+), 17 deletions(-) diff --git a/packages/axum-rest-tracker-api-server/src/server.rs b/packages/axum-rest-tracker-api-server/src/server.rs index 05adeae8a..9eef6b71a 100644 --- a/packages/axum-rest-tracker-api-server/src/server.rs +++ b/packages/axum-rest-tracker-api-server/src/server.rs @@ -220,9 +220,9 @@ pub struct Launcher { impl std::fmt::Display for Launcher { fn fmt(&self, f: &mut std::fmt::Formatter<'_>) -> std::fmt::Result { if self.tls.is_some() { - write!(f, "(with socket): {}, using TLS", self.bind_to,) + write!(f, "(with socket): {}, using TLS", self.bind_to) } else { - write!(f, "(with socket): {}, without TLS", self.bind_to,) + write!(f, "(with socket): {}, without TLS", self.bind_to) } } } diff --git a/packages/http-tracker-core/benches/http_tracker_core_benchmark.rs b/packages/http-tracker-core/benches/http_tracker_core_benchmark.rs index aa50ceeb9..c193c5124 100644 --- a/packages/http-tracker-core/benches/http_tracker_core_benchmark.rs +++ b/packages/http-tracker-core/benches/http_tracker_core_benchmark.rs @@ -12,7 +12,7 @@ fn announce_once(c: &mut Criterion) { let mut group = c.benchmark_group("http_tracker_handle_announce_once"); group.warm_up_time(Duration::from_millis(500)); - group.measurement_time(Duration::from_millis(1000)); + group.measurement_time(Duration::from_secs(1)); group.bench_function("handle_announce_data", |b| { b.iter(|| sync::return_announce_data_once(100)); diff --git a/packages/torrent-repository-benchmarking/benches/repository_benchmark.rs b/packages/torrent-repository-benchmarking/benches/repository_benchmark.rs index a58207492..f5f8e4b28 100644 --- a/packages/torrent-repository-benchmarking/benches/repository_benchmark.rs +++ b/packages/torrent-repository-benchmarking/benches/repository_benchmark.rs @@ -17,7 +17,7 @@ fn add_one_torrent(c: &mut Criterion) { let mut group = c.benchmark_group("add_one_torrent"); group.warm_up_time(Duration::from_millis(500)); - group.measurement_time(Duration::from_millis(1000)); + group.measurement_time(Duration::from_secs(1)); group.bench_function("RwLockStd", |b| { b.iter_custom(sync::add_one_torrent::); @@ -74,7 +74,7 @@ fn add_multiple_torrents_in_parallel(c: &mut Criterion) { //group.sample_size(10); group.warm_up_time(Duration::from_millis(500)); - group.measurement_time(Duration::from_millis(1000)); + group.measurement_time(Duration::from_secs(1)); group.bench_function("RwLockStd", |b| { b.to_async(&rt) @@ -138,7 +138,7 @@ fn update_one_torrent_in_parallel(c: &mut Criterion) { //group.sample_size(10); group.warm_up_time(Duration::from_millis(500)); - group.measurement_time(Duration::from_millis(1000)); + group.measurement_time(Duration::from_secs(1)); group.bench_function("RwLockStd", |b| { b.to_async(&rt) @@ -202,7 +202,7 @@ fn update_multiple_torrents_in_parallel(c: &mut Criterion) { //group.sample_size(10); group.warm_up_time(Duration::from_millis(500)); - group.measurement_time(Duration::from_millis(1000)); + group.measurement_time(Duration::from_secs(1)); group.bench_function("RwLockStd", |b| { b.to_async(&rt) diff --git a/packages/torrent-repository-benchmarking/tests/entry/mod.rs b/packages/torrent-repository-benchmarking/tests/entry/mod.rs index 5cbb3b19c..86ca891d4 100644 --- a/packages/torrent-repository-benchmarking/tests/entry/mod.rs +++ b/packages/torrent-repository-benchmarking/tests/entry/mod.rs @@ -1,5 +1,4 @@ use std::net::{IpAddr, Ipv4Addr, SocketAddr}; -use std::ops::Sub; use std::time::Duration; use aquatic_udp_protocol::{AnnounceEvent, NumberOfBytes}; @@ -430,7 +429,9 @@ async fn it_should_remove_inactive_peers_beyond_cutoff( let now = clock::Working::now(); clock::Stopped::local_set(&now); - peer.updated = now.sub(EXPIRE); + peer.updated = now + .checked_sub(EXPIRE) + .expect("it_should_remove_inactive_peers_beyond_cutoff: EXPIRE must not exceed now"); torrent.upsert_peer(&peer).await; diff --git a/packages/torrent-repository-benchmarking/tests/repository/mod.rs b/packages/torrent-repository-benchmarking/tests/repository/mod.rs index ec7e68bae..fb0b8fcff 100644 --- a/packages/torrent-repository-benchmarking/tests/repository/mod.rs +++ b/packages/torrent-repository-benchmarking/tests/repository/mod.rs @@ -526,7 +526,6 @@ async fn it_should_remove_inactive_peers( repo: Repo, #[case] entries: Entries, ) { - use std::ops::Sub as _; use std::time::Duration; use torrust_tracker_clock::clock::stopped::Stopped as _; @@ -556,7 +555,9 @@ async fn it_should_remove_inactive_peers( let now = clock::Working::now(); clock::Stopped::local_set(&now); - peer.updated = now.sub(EXPIRE); + peer.updated = now + .checked_sub(EXPIRE) + .expect("it_should_remove_inactive_peers_beyond_cutoff: EXPIRE must not exceed now"); } // Insert the infohash and peer into the repository diff --git a/packages/tracker-client/src/udp/client.rs b/packages/tracker-client/src/udp/client.rs index 1c5ffd901..94c882d29 100644 --- a/packages/tracker-client/src/udp/client.rs +++ b/packages/tracker-client/src/udp/client.rs @@ -256,7 +256,7 @@ pub async fn check(service_binding: &ServiceBinding) -> Result { } }; - let sleep = time::sleep(Duration::from_millis(2000)); + let sleep = time::sleep(Duration::from_secs(2)); tokio::pin!(sleep); tokio::select! { diff --git a/packages/udp-tracker-core/benches/udp_tracker_core_benchmark.rs b/packages/udp-tracker-core/benches/udp_tracker_core_benchmark.rs index 5bd0e27c8..90fc721d0 100644 --- a/packages/udp-tracker-core/benches/udp_tracker_core_benchmark.rs +++ b/packages/udp-tracker-core/benches/udp_tracker_core_benchmark.rs @@ -9,7 +9,7 @@ use crate::helpers::sync; fn bench_connect_once(c: &mut Criterion) { let mut group = c.benchmark_group("udp_tracker/connect_once"); group.warm_up_time(Duration::from_millis(500)); - group.measurement_time(Duration::from_millis(1000)); + group.measurement_time(Duration::from_secs(1)); group.bench_function("connect_once", |b| { b.iter(|| sync::connect_once(100)); diff --git a/packages/udp-tracker-server/src/server/launcher.rs b/packages/udp-tracker-server/src/server/launcher.rs index a514921cc..4fd3a95d9 100644 --- a/packages/udp-tracker-server/src/server/launcher.rs +++ b/packages/udp-tracker-server/src/server/launcher.rs @@ -54,7 +54,7 @@ impl Launcher { panic!("it should not use udp if using authentication"); } - let socket = tokio::time::timeout(Duration::from_millis(5000), BoundSocket::new(bind_to)) + let socket = tokio::time::timeout(Duration::from_secs(5), BoundSocket::new(bind_to)) .await .expect("it should bind to the socket within five seconds"); diff --git a/packages/udp-tracker-server/src/statistics/repository.rs b/packages/udp-tracker-server/src/statistics/repository.rs index 94a86e3ab..c4c995b8a 100644 --- a/packages/udp-tracker-server/src/statistics/repository.rs +++ b/packages/udp-tracker-server/src/statistics/repository.rs @@ -330,7 +330,7 @@ mod tests { // Calculate new average with processing time of 2000ns // This will increment the processed requests counter from 0 to 1 - let processing_time = Duration::from_nanos(2000); + let processing_time = Duration::from_micros(2); let new_avg = repo .recalculate_udp_avg_processing_time_ns(processing_time, &connect_labels, now) .await; @@ -417,7 +417,7 @@ mod tests { let now = CurrentClock::now(); // Test with zero connections (should not panic, should handle division by zero) - let processing_time = Duration::from_nanos(1000); + let processing_time = Duration::from_micros(1); let connect_labels = LabelSet::from([("request_kind", "connect")]); let connect_avg = repo diff --git a/packages/udp-tracker-server/tests/server/contract.rs b/packages/udp-tracker-server/tests/server/contract.rs index e9691c879..350f3b8eb 100644 --- a/packages/udp-tracker-server/tests/server/contract.rs +++ b/packages/udp-tracker-server/tests/server/contract.rs @@ -32,7 +32,7 @@ async fn send_connection_request(transaction_id: TransactionId, client: &UdpTrac match response { Response::Connect(connect_response) => connect_response.connection_id, - _ => panic!("error connecting to udp server {:?}", response), + _ => panic!("error connecting to udp server {response:?}"), } } From f9b59f0c8e3dfbc0d79c0b43efbb10b95a157a6d Mon Sep 17 00:00:00 2001 From: Jose Celano Date: Wed, 8 Apr 2026 16:29:05 +0100 Subject: [PATCH 798/802] fix(lint/cspell): configure ignores and dictionary for repo --- .github/workflows/upload_coverage_pr.yaml | 2 +- cspell.json | 27 ++++++++++++ project-words.txt | 53 +++++++++++++++++++++++ 3 files changed, 81 insertions(+), 1 deletion(-) create mode 100644 cspell.json diff --git a/.github/workflows/upload_coverage_pr.yaml b/.github/workflows/upload_coverage_pr.yaml index 8b0006a6d..55de02c62 100644 --- a/.github/workflows/upload_coverage_pr.yaml +++ b/.github/workflows/upload_coverage_pr.yaml @@ -1,7 +1,7 @@ name: Upload Coverage Report (PR) on: - # This workflow is triggered after every successfull execution + # This workflow is triggered after every successful execution # of `Generate Coverage Report` workflow. workflow_run: workflows: ["Generate Coverage Report (PR)"] diff --git a/cspell.json b/cspell.json new file mode 100644 index 000000000..02f29f7f9 --- /dev/null +++ b/cspell.json @@ -0,0 +1,27 @@ +{ + "$schema": "https://raw.githubusercontent.com/streetsidesoftware/cspell/main/cspell.schema.json", + "version": "0.2", + "dictionaryDefinitions": [ + { + "name": "project-words", + "path": "./project-words.txt", + "addWords": true + } + ], + "dictionaries": [ + "project-words" + ], + "enableFiletypes": [ + "dockerfile", + "shellscript", + "toml" + ], + "ignorePaths": [ + "target", + "docs/media/*.svg", + "contrib/bencode/benches/*.bencode", + "contrib/dev-tools/su-exec/**", + ".github/labels.json", + "/project-words.txt" + ] +} \ No newline at end of file diff --git a/project-words.txt b/project-words.txt index c698eea9c..48c9565cc 100644 --- a/project-words.txt +++ b/project-words.txt @@ -197,3 +197,56 @@ Xunlei xxxxxxxxxxxxxxxxxxxxd yyyyyyyyyyyyyyyyyyyyd zerocopy +Aideq +autoremove +CALLSITE +Dihc +Dmqcd +QJSF +Glrg +Irwe +Uninit +Unparker +eventfd +fastrand +fdbased +fdget +fput +iiiiiiiiiiiiiiiippe +iiiiiiiiiiiiiiiipp +iiiiiiiiiiiiiiip +iipp +iiiipp +jdbe +ksys +llist +mmap +mprotect +nonblocking +peersld +pkey +porti +prealloc +println +shellcheck +sockfd +subkey +sysmalloc +sysret +timespec +toki +torru +ttwu +uninit +unparked +unsync +vtable +wakelist +wakeup +actix +iterationsadd +josecelano +mysqladmin +setgroups +taplo +trixie From b654fa5fb18ffa94d167aef3e21becfdac7fbda7 Mon Sep 17 00:00:00 2001 From: Jose Celano Date: Wed, 8 Apr 2026 16:40:03 +0100 Subject: [PATCH 799/802] fix(lint/markdown): resolve markdownlint violations --- README.md | 18 ++++-------------- contrib/bencode/README.md | 3 ++- contrib/dev-tools/su-exec/README.md | 4 ++-- 3 files changed, 8 insertions(+), 17 deletions(-) diff --git a/README.md b/README.md index bb102355b..2fe28db08 100644 --- a/README.md +++ b/README.md @@ -73,7 +73,7 @@ Others: ## Implemented BitTorrent Enhancement Proposals (BEPs) -> + > _[Learn more about BitTorrent Enhancement Proposals][BEP 00]_ - [BEP 03]: The BitTorrent Protocol. @@ -113,8 +113,8 @@ podman run -it docker.io/torrust/tracker:develop ### Development Version -- Please ensure you have the _**[latest stable (or nightly) version of rust][rust]___. -- Please ensure that your computer has enough RAM. _**Recommended 16GB.___ +- Please ensure you have the \_\*\*[latest stable (or nightly) version of rust][rust]\_\_\_. +- Please ensure that your computer has enough RAM. \_\*\*Recommended 16GB.\_\_\_ #### Checkout, Test and Run @@ -217,7 +217,7 @@ This program is free software: you can redistribute it and/or modify it under th This program is distributed in the hope that it will be useful, but WITHOUT ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the [GNU Affero General Public License][AGPL_3_0] for more details. -You should have received a copy of the *GNU Affero General Public License* along with this program. If not, see . +You should have received a copy of the _GNU Affero General Public License_ along with this program. If not, see . Some files include explicit copyright notices and/or license notices. @@ -250,18 +250,14 @@ This project was a joint effort by [Nautilus Cyberneering GmbH][nautilus] and [D [deployment_wf_b]: ../../actions/workflows/deployment.yaml/badge.svg [testing_wf]: ../../actions/workflows/testing.yaml [testing_wf_b]: ../../actions/workflows/testing.yaml/badge.svg - [bittorrent]: http://bittorrent.org/ [rust]: https://www.rust-lang.org/ [axum]: https://github.com/tokio-rs/axum [newtrackon]: https://newtrackon.com/ [coverage]: https://app.codecov.io/gh/torrust/torrust-tracker [torrust]: https://torrust.com/ - [dockerhub]: https://hub.docker.com/r/torrust/tracker/tags - [torrent_source_felid]: https://github.com/qbittorrent/qBittorrent/discussions/19406 - [BEP 00]: https://www.bittorrent.org/beps/bep_0000.html [BEP 03]: https://www.bittorrent.org/beps/bep_0003.html [BEP 07]: https://www.bittorrent.org/beps/bep_0007.html @@ -269,24 +265,18 @@ This project was a joint effort by [Nautilus Cyberneering GmbH][nautilus] and [D [BEP 23]: https://www.bittorrent.org/beps/bep_0023.html [BEP 27]: https://www.bittorrent.org/beps/bep_0027.html [BEP 48]: https://www.bittorrent.org/beps/bep_0048.html - [containers.md]: ./docs/containers.md - [docs]: https://docs.rs/torrust-tracker/latest/ [api]: https://docs.rs/torrust-tracker/latest/torrust_tracker/servers/apis/v1 [http]: https://docs.rs/torrust-tracker/latest/torrust_tracker/servers/http [udp]: https://docs.rs/torrust-tracker/latest/torrust_tracker/servers/udp - [good first issues]: https://github.com/torrust/torrust-tracker/issues?q=is%3Aissue+is%3Aopen+label%3A%22good+first+issue%22 [discussions]: https://github.com/torrust/torrust-tracker/discussions - [guide.md]: https://github.com/torrust/.github/blob/main/info/contributing.md [agreement.md]: https://github.com/torrust/.github/blob/main/info/licensing/contributor_agreement_v01.md - [AGPL_3_0]: ./docs/licenses/LICENSE-AGPL_3_0 [MIT_0]: ./docs/licenses/LICENSE-MIT_0 [FSF]: https://www.fsf.org/ - [nautilus]: https://github.com/orgs/Nautilus-Cyberneering/ [Dutch Bits]: https://dutchbits.nl [Naim A.]: https://github.com/naim94a/udpt diff --git a/contrib/bencode/README.md b/contrib/bencode/README.md index 7a203082b..81c09f691 100644 --- a/contrib/bencode/README.md +++ b/contrib/bencode/README.md @@ -1,4 +1,5 @@ # Bencode + This library allows for the creation and parsing of bencode encodings. -Bencode is the binary encoding used throughout bittorrent technologies from metainfo files to DHT messages. Bencode types include integers, byte arrays, lists, and dictionaries, of which the last two can hold any bencode type (they could be recursively constructed). \ No newline at end of file +Bencode is the binary encoding used throughout bittorrent technologies from metainfo files to DHT messages. Bencode types include integers, byte arrays, lists, and dictionaries, of which the last two can hold any bencode type (they could be recursively constructed). diff --git a/contrib/dev-tools/su-exec/README.md b/contrib/dev-tools/su-exec/README.md index 2b0517377..1dd4108ac 100644 --- a/contrib/dev-tools/su-exec/README.md +++ b/contrib/dev-tools/su-exec/README.md @@ -1,4 +1,5 @@ # su-exec + switch user and group id, setgroups and exec ## Purpose @@ -21,7 +22,7 @@ name separated with colon (e.g. `nobody:ftp`). Numeric uid/gid values can be used instead of names. Example: ```shell -$ su-exec apache:1000 /usr/sbin/httpd -f /opt/www/httpd.conf +su-exec apache:1000 /usr/sbin/httpd -f /opt/www/httpd.conf ``` ## TTY & parent/child handling @@ -43,4 +44,3 @@ PID USER TIME COMMAND This does more or less exactly the same thing as [gosu](https://github.com/tianon/gosu) but it is only 10kb instead of 1.8MB. - From 0e174af960b966e3a6a8d1b69fd306a4fbc07b83 Mon Sep 17 00:00:00 2001 From: Jose Celano Date: Wed, 8 Apr 2026 16:53:14 +0100 Subject: [PATCH 800/802] fix(lint/yaml): resolve workflow yamllint issues --- .github/workflows/container.yaml | 10 ++++++++-- .github/workflows/coverage.yaml | 4 ++-- .github/workflows/generate_coverage_pr.yaml | 2 +- 3 files changed, 11 insertions(+), 5 deletions(-) diff --git a/.github/workflows/container.yaml b/.github/workflows/container.yaml index e0857e936..7e8ffa442 100644 --- a/.github/workflows/container.yaml +++ b/.github/workflows/container.yaml @@ -80,9 +80,15 @@ jobs: echo "continue=true" >> $GITHUB_OUTPUT echo "On \`develop\` Branch, Type: \`development\`" - elif [[ $(echo "${{ github.ref }}" | grep -P '^(refs\/heads\/releases\/)(v)(0|[1-9]\d*)\.(0|[1-9]\d*)\.(0|[1-9]\d*)(?:-((?:0|[1-9]\d*|\d*[a-zA-Z-][0-9a-zA-Z-]*)(?:\.(?:0|[1-9]\d*|\d*[a-zA-Z-][0-9a-zA-Z-]*))*))?(?:\+([0-9a-zA-Z-]+(?:\.[0-9a-zA-Z-]+)*))?$') ]]; then + elif [[ "${{ github.ref }}" =~ ^refs/heads/releases/ ]]; then + semver_regex='^v(0|[1-9][0-9]*)\.(0|[1-9][0-9]*)\.(0|[1-9][0-9]*)(-((0|[1-9][0-9]*|[0-9]*[A-Za-z-][0-9A-Za-z-]*)(\.(0|[1-9][0-9]*|[0-9]*[A-Za-z-][0-9A-Za-z-]*))*))?(\+([0-9A-Za-z-]+(\.[0-9A-Za-z-]+)*))?$' + version=$(echo "${{ github.ref }}" | sed -n -E 's#^refs/heads/releases/##p') + + if [[ ! "$version" =~ $semver_regex ]]; then + echo "Not a valid release branch semver. Will Not Continue" + exit 0 + fi - version=$(echo "${{ github.ref }}" | sed -n -E 's/^(refs\/heads\/releases\/)//p') echo "version=$version" >> $GITHUB_OUTPUT echo "type=release" >> $GITHUB_OUTPUT echo "continue=true" >> $GITHUB_OUTPUT diff --git a/.github/workflows/coverage.yaml b/.github/workflows/coverage.yaml index 2c8d63d6c..4c49217c2 100644 --- a/.github/workflows/coverage.yaml +++ b/.github/workflows/coverage.yaml @@ -44,7 +44,7 @@ jobs: - id: coverage name: Generate Coverage Report run: | - cargo clean + cargo clean cargo llvm-cov --all-features --workspace --codecov --output-path ./codecov.json - id: upload @@ -54,4 +54,4 @@ jobs: verbose: true token: ${{ secrets.CODECOV_TOKEN }} files: ${{ github.workspace }}/codecov.json - fail_ci_if_error: true \ No newline at end of file + fail_ci_if_error: true diff --git a/.github/workflows/generate_coverage_pr.yaml b/.github/workflows/generate_coverage_pr.yaml index a3f97dbf2..e07a5a755 100644 --- a/.github/workflows/generate_coverage_pr.yaml +++ b/.github/workflows/generate_coverage_pr.yaml @@ -44,7 +44,7 @@ jobs: - id: coverage name: Generate Coverage Report run: | - cargo clean + cargo clean cargo llvm-cov --all-features --workspace --codecov --output-path ./codecov.json - name: Store PR number and commit SHA From 7085250ee5033b6ed62dfdf92e4c2c57256dbb85 Mon Sep 17 00:00:00 2001 From: Jose Celano Date: Wed, 8 Apr 2026 16:55:12 +0100 Subject: [PATCH 801/802] fix(lint/toml): normalize taplo formatting across workspace --- .cargo/config.toml | 32 +++++++++---------- .taplo.toml | 18 ++++------- Cargo.toml | 24 +++++++------- console/tracker-client/Cargo.toml | 18 +++++------ contrib/bencode/Cargo.toml | 4 +-- .../axum-health-check-api-server/Cargo.toml | 18 +++++------ packages/axum-http-tracker-server/Cargo.toml | 20 ++++++------ .../axum-rest-tracker-api-server/Cargo.toml | 28 ++++++++-------- packages/axum-server/Cargo.toml | 12 +++---- packages/clock/Cargo.toml | 4 +-- packages/configuration/Cargo.toml | 16 +++++----- packages/events/Cargo.toml | 4 +-- packages/http-protocol/Cargo.toml | 6 ++-- packages/http-tracker-core/Cargo.toml | 6 ++-- packages/located-error/Cargo.toml | 2 +- packages/metrics/Cargo.toml | 8 ++--- packages/primitives/Cargo.toml | 6 ++-- packages/rest-tracker-api-client/Cargo.toml | 10 +++--- packages/rest-tracker-api-core/Cargo.toml | 4 +-- packages/server-lib/Cargo.toml | 8 ++--- .../swarm-coordination-registry/Cargo.toml | 12 +++---- packages/test-helpers/Cargo.toml | 4 +-- .../Cargo.toml | 8 ++--- packages/tracker-client/Cargo.toml | 12 +++---- packages/tracker-core/Cargo.toml | 14 ++++---- packages/udp-protocol/Cargo.toml | 2 +- packages/udp-tracker-core/Cargo.toml | 6 ++-- packages/udp-tracker-server/Cargo.toml | 10 +++--- 28 files changed, 156 insertions(+), 160 deletions(-) diff --git a/.cargo/config.toml b/.cargo/config.toml index 28cde74ec..36a0b3d8c 100644 --- a/.cargo/config.toml +++ b/.cargo/config.toml @@ -7,20 +7,20 @@ time = "build --timings --all-targets" [build] rustflags = [ - "-D", - "warnings", - "-D", - "future-incompatible", - "-D", - "let-underscore", - "-D", - "nonstandard-style", - "-D", - "rust-2018-compatibility", - "-D", - "rust-2018-idioms", - "-D", - "rust-2021-compatibility", - "-D", - "unused", + "-D", + "warnings", + "-D", + "future-incompatible", + "-D", + "let-underscore", + "-D", + "nonstandard-style", + "-D", + "rust-2018-compatibility", + "-D", + "rust-2018-idioms", + "-D", + "rust-2021-compatibility", + "-D", + "unused", ] diff --git a/.taplo.toml b/.taplo.toml index d0f755dcd..0168711e8 100644 --- a/.taplo.toml +++ b/.taplo.toml @@ -2,11 +2,7 @@ # Used by the "Even Better TOML" VS Code extension # Exclude generated and runtime folders from linting -exclude = [ - "target/**", - "storage/**", - ".coverage/**", -] +exclude = [ ".coverage/**", "storage/**", "target/**" ] [formatting] # Preserve blank lines that exist @@ -14,18 +10,18 @@ allowed_blank_lines = 1 # Don't reorder keys to maintain structure reorder_keys = false # Array formatting -array_trailing_comma = true -array_auto_expand = false array_auto_collapse = false +array_auto_expand = false +array_trailing_comma = true # Inline table formatting -inline_table_expand = false -compact_inline_tables = false compact_arrays = false +compact_inline_tables = false +inline_table_expand = false # Alignment -align_entries = false align_comments = true +align_entries = false # Indentation -indent_tables = false indent_entries = false +indent_tables = false # Other trailing_newline = true diff --git a/Cargo.toml b/Cargo.toml index dbc39bdf8..1eb5f0d35 100644 --- a/Cargo.toml +++ b/Cargo.toml @@ -19,13 +19,13 @@ version.workspace = true name = "torrust_tracker_lib" [workspace.package] -authors = ["Nautilus Cyberneering , Mick van Dijke "] -categories = ["network-programming", "web-programming"] +authors = [ "Nautilus Cyberneering , Mick van Dijke " ] +categories = [ "network-programming", "web-programming" ] description = "A feature rich BitTorrent tracker." documentation = "https://docs.rs/crate/torrust-tracker/" edition = "2021" homepage = "https://torrust.com/" -keywords = ["bittorrent", "file-sharing", "peer-to-peer", "torrent", "tracker"] +keywords = [ "bittorrent", "file-sharing", "peer-to-peer", "torrent", "tracker" ] license = "AGPL-3.0-only" publish = true repository = "https://github.com/torrust/torrust-tracker" @@ -34,19 +34,19 @@ version = "3.0.0-develop" [dependencies] anyhow = "1" -axum-server = { version = "0", features = ["tls-rustls-no-provider"] } +axum-server = { version = "0", features = [ "tls-rustls-no-provider" ] } bittorrent-http-tracker-core = { version = "3.0.0-develop", path = "packages/http-tracker-core" } bittorrent-tracker-core = { version = "3.0.0-develop", path = "packages/tracker-core" } bittorrent-udp-tracker-core = { version = "3.0.0-develop", path = "packages/udp-tracker-core" } -chrono = { version = "0", default-features = false, features = ["clock"] } -clap = { version = "4", features = ["derive", "env"] } +chrono = { version = "0", default-features = false, features = [ "clock" ] } +clap = { version = "4", features = [ "derive", "env" ] } rand = "0" regex = "1" -reqwest = { version = "0", features = ["json"] } -serde = { version = "1", features = ["derive"] } -serde_json = { version = "1", features = ["preserve_order"] } +reqwest = { version = "0", features = [ "json" ] } +serde = { version = "1", features = [ "derive" ] } +serde_json = { version = "1", features = [ "preserve_order" ] } thiserror = "2.0.12" -tokio = { version = "1", features = ["macros", "net", "rt-multi-thread", "signal", "sync"] } +tokio = { version = "1", features = [ "macros", "net", "rt-multi-thread", "signal", "sync" ] } tokio-util = "0.7.15" torrust-axum-health-check-api-server = { version = "3.0.0-develop", path = "packages/axum-health-check-api-server" } torrust-axum-http-tracker-server = { version = "3.0.0-develop", path = "packages/axum-http-tracker-server" } @@ -59,7 +59,7 @@ torrust-tracker-configuration = { version = "3.0.0-develop", path = "packages/co torrust-tracker-swarm-coordination-registry = { version = "3.0.0-develop", path = "packages/swarm-coordination-registry" } torrust-udp-tracker-server = { version = "3.0.0-develop", path = "packages/udp-tracker-server" } tracing = "0" -tracing-subscriber = { version = "0", features = ["json"] } +tracing-subscriber = { version = "0", features = [ "json" ] } [dev-dependencies] bittorrent-primitives = "0.1.0" @@ -70,7 +70,7 @@ torrust-rest-tracker-api-client = { version = "3.0.0-develop", path = "packages/ torrust-tracker-test-helpers = { version = "3.0.0-develop", path = "packages/test-helpers" } [workspace] -members = ["console/tracker-client", "packages/torrent-repository-benchmarking"] +members = [ "console/tracker-client", "packages/torrent-repository-benchmarking" ] [profile.dev] debug = 1 diff --git a/console/tracker-client/Cargo.toml b/console/tracker-client/Cargo.toml index d4ab7c9e3..8c12227e9 100644 --- a/console/tracker-client/Cargo.toml +++ b/console/tracker-client/Cargo.toml @@ -1,6 +1,6 @@ [package] description = "A collection of console clients to make requests to BitTorrent trackers." -keywords = ["bittorrent", "client", "tracker"] +keywords = [ "bittorrent", "client", "tracker" ] license = "LGPL-3.0" name = "torrust-tracker-client" readme = "README.md" @@ -19,21 +19,21 @@ anyhow = "1" aquatic_udp_protocol = "0" bittorrent-primitives = "0.1.0" bittorrent-tracker-client = { version = "3.0.0-develop", path = "../../packages/tracker-client" } -clap = { version = "4", features = ["derive", "env"] } +clap = { version = "4", features = [ "derive", "env" ] } futures = "0" hex-literal = "1" hyper = "1" -reqwest = { version = "0", features = ["json"] } -serde = { version = "1", features = ["derive"] } +reqwest = { version = "0", features = [ "json" ] } +serde = { version = "1", features = [ "derive" ] } serde_bencode = "0" serde_bytes = "0" -serde_json = { version = "1", features = ["preserve_order"] } +serde_json = { version = "1", features = [ "preserve_order" ] } thiserror = "2" -tokio = { version = "1", features = ["macros", "net", "rt-multi-thread", "signal", "sync"] } +tokio = { version = "1", features = [ "macros", "net", "rt-multi-thread", "signal", "sync" ] } torrust-tracker-configuration = { version = "3.0.0-develop", path = "../../packages/configuration" } tracing = "0" -tracing-subscriber = { version = "0", features = ["json"] } -url = { version = "2", features = ["serde"] } +tracing-subscriber = { version = "0", features = [ "json" ] } +url = { version = "2", features = [ "serde" ] } [package.metadata.cargo-machete] -ignored = ["serde_bytes"] +ignored = [ "serde_bytes" ] diff --git a/contrib/bencode/Cargo.toml b/contrib/bencode/Cargo.toml index f6355b6fc..5fab1792d 100644 --- a/contrib/bencode/Cargo.toml +++ b/contrib/bencode/Cargo.toml @@ -1,10 +1,10 @@ [package] description = "(contrib) Efficient decoding and encoding for bencode." -keywords = ["bencode", "contrib", "library"] +keywords = [ "bencode", "contrib", "library" ] name = "torrust-tracker-contrib-bencode" readme = "README.md" -authors = ["Nautilus Cyberneering , Andrew "] +authors = [ "Nautilus Cyberneering , Andrew " ] license = "Apache-2.0" repository = "https://github.com/torrust/bittorrent-infrastructure-project" diff --git a/packages/axum-health-check-api-server/Cargo.toml b/packages/axum-health-check-api-server/Cargo.toml index e0504f7df..cf9d8d9a3 100644 --- a/packages/axum-health-check-api-server/Cargo.toml +++ b/packages/axum-health-check-api-server/Cargo.toml @@ -4,7 +4,7 @@ description = "The Torrust Bittorrent HTTP tracker." documentation.workspace = true edition.workspace = true homepage.workspace = true -keywords = ["axum", "bittorrent", "healthcheck", "http", "server", "torrust", "tracker"] +keywords = [ "axum", "bittorrent", "healthcheck", "http", "server", "torrust", "tracker" ] license.workspace = true name = "torrust-axum-health-check-api-server" publish.workspace = true @@ -14,27 +14,27 @@ rust-version.workspace = true version.workspace = true [dependencies] -axum = { version = "0", features = ["macros"] } -axum-server = { version = "0", features = ["tls-rustls-no-provider"] } +axum = { version = "0", features = [ "macros" ] } +axum-server = { version = "0", features = [ "tls-rustls-no-provider" ] } futures = "0" hyper = "1" -serde = { version = "1", features = ["derive"] } -serde_json = { version = "1", features = ["preserve_order"] } -tokio = { version = "1", features = ["macros", "net", "rt-multi-thread", "signal", "sync"] } +serde = { version = "1", features = [ "derive" ] } +serde_json = { version = "1", features = [ "preserve_order" ] } +tokio = { version = "1", features = [ "macros", "net", "rt-multi-thread", "signal", "sync" ] } torrust-axum-server = { version = "3.0.0-develop", path = "../axum-server" } torrust-server-lib = { version = "3.0.0-develop", path = "../server-lib" } torrust-tracker-configuration = { version = "3.0.0-develop", path = "../configuration" } torrust-tracker-primitives = { version = "3.0.0-develop", path = "../primitives" } -tower-http = { version = "0", features = ["compression-full", "cors", "propagate-header", "request-id", "trace"] } +tower-http = { version = "0", features = [ "compression-full", "cors", "propagate-header", "request-id", "trace" ] } tracing = "0" url = "2.5.4" [dev-dependencies] -reqwest = { version = "0", features = ["json"] } +reqwest = { version = "0", features = [ "json" ] } torrust-axum-health-check-api-server = { version = "3.0.0-develop", path = "../axum-health-check-api-server" } torrust-axum-http-tracker-server = { version = "3.0.0-develop", path = "../axum-http-tracker-server" } torrust-axum-rest-tracker-api-server = { version = "3.0.0-develop", path = "../axum-rest-tracker-api-server" } torrust-tracker-clock = { version = "3.0.0-develop", path = "../clock" } torrust-tracker-test-helpers = { version = "3.0.0-develop", path = "../test-helpers" } torrust-udp-tracker-server = { version = "3.0.0-develop", path = "../udp-tracker-server" } -tracing-subscriber = { version = "0", features = ["json"] } +tracing-subscriber = { version = "0", features = [ "json" ] } diff --git a/packages/axum-http-tracker-server/Cargo.toml b/packages/axum-http-tracker-server/Cargo.toml index eb2c2cad3..88d073527 100644 --- a/packages/axum-http-tracker-server/Cargo.toml +++ b/packages/axum-http-tracker-server/Cargo.toml @@ -4,7 +4,7 @@ description = "The Torrust Bittorrent HTTP tracker." documentation.workspace = true edition.workspace = true homepage.workspace = true -keywords = ["axum", "bittorrent", "http", "server", "torrust", "tracker"] +keywords = [ "axum", "bittorrent", "http", "server", "torrust", "tracker" ] license.workspace = true name = "torrust-axum-http-tracker-server" publish.workspace = true @@ -15,19 +15,19 @@ version.workspace = true [dependencies] aquatic_udp_protocol = "0" -axum = { version = "0", features = ["macros"] } +axum = { version = "0", features = [ "macros" ] } axum-client-ip = "0" -axum-server = { version = "0", features = ["tls-rustls-no-provider"] } +axum-server = { version = "0", features = [ "tls-rustls-no-provider" ] } bittorrent-http-tracker-core = { version = "3.0.0-develop", path = "../http-tracker-core" } bittorrent-http-tracker-protocol = { version = "3.0.0-develop", path = "../http-protocol" } bittorrent-primitives = "0.1.0" bittorrent-tracker-core = { version = "3.0.0-develop", path = "../tracker-core" } -derive_more = { version = "2", features = ["as_ref", "constructor", "from"] } +derive_more = { version = "2", features = [ "as_ref", "constructor", "from" ] } futures = "0" hyper = "1" -reqwest = { version = "0", features = ["json"] } -serde = { version = "1", features = ["derive"] } -tokio = { version = "1", features = ["macros", "net", "rt-multi-thread", "signal", "sync"] } +reqwest = { version = "0", features = [ "json" ] } +serde = { version = "1", features = [ "derive" ] } +tokio = { version = "1", features = [ "macros", "net", "rt-multi-thread", "signal", "sync" ] } tokio-util = "0.7.15" torrust-axum-server = { version = "3.0.0-develop", path = "../axum-server" } torrust-server-lib = { version = "3.0.0-develop", path = "../server-lib" } @@ -35,8 +35,8 @@ torrust-tracker-clock = { version = "3.0.0-develop", path = "../clock" } torrust-tracker-configuration = { version = "3.0.0-develop", path = "../configuration" } torrust-tracker-primitives = { version = "3.0.0-develop", path = "../primitives" } torrust-tracker-swarm-coordination-registry = { version = "3.0.0-develop", path = "../swarm-coordination-registry" } -tower = { version = "0", features = ["timeout"] } -tower-http = { version = "0", features = ["compression-full", "cors", "propagate-header", "request-id", "trace"] } +tower = { version = "0", features = [ "timeout" ] } +tower-http = { version = "0", features = [ "compression-full", "cors", "propagate-header", "request-id", "trace" ] } tracing = "0" [dev-dependencies] @@ -49,5 +49,5 @@ serde_repr = "0" torrust-tracker-clock = { version = "3.0.0-develop", path = "../clock" } torrust-tracker-events = { version = "3.0.0-develop", path = "../events" } torrust-tracker-test-helpers = { version = "3.0.0-develop", path = "../test-helpers" } -uuid = { version = "1", features = ["v4"] } +uuid = { version = "1", features = [ "v4" ] } zerocopy = "0.7" diff --git a/packages/axum-rest-tracker-api-server/Cargo.toml b/packages/axum-rest-tracker-api-server/Cargo.toml index 9493b8693..7353e66e8 100644 --- a/packages/axum-rest-tracker-api-server/Cargo.toml +++ b/packages/axum-rest-tracker-api-server/Cargo.toml @@ -4,7 +4,7 @@ description = "The Torrust Tracker API." documentation.workspace = true edition.workspace = true homepage.workspace = true -keywords = ["axum", "bittorrent", "http", "server", "torrust", "tracker"] +keywords = [ "axum", "bittorrent", "http", "server", "torrust", "tracker" ] license.workspace = true name = "torrust-axum-rest-tracker-api-server" publish.workspace = true @@ -15,22 +15,22 @@ version.workspace = true [dependencies] aquatic_udp_protocol = "0" -axum = { version = "0", features = ["macros"] } -axum-extra = { version = "0", features = ["query"] } -axum-server = { version = "0", features = ["tls-rustls-no-provider"] } +axum = { version = "0", features = [ "macros" ] } +axum-extra = { version = "0", features = [ "query" ] } +axum-server = { version = "0", features = [ "tls-rustls-no-provider" ] } bittorrent-http-tracker-core = { version = "3.0.0-develop", path = "../http-tracker-core" } bittorrent-primitives = "0.1.0" bittorrent-tracker-core = { version = "3.0.0-develop", path = "../tracker-core" } bittorrent-udp-tracker-core = { version = "3.0.0-develop", path = "../udp-tracker-core" } -derive_more = { version = "2", features = ["as_ref", "constructor", "from"] } +derive_more = { version = "2", features = [ "as_ref", "constructor", "from" ] } futures = "0" hyper = "1" -reqwest = { version = "0", features = ["json"] } -serde = { version = "1", features = ["derive"] } -serde_json = { version = "1", features = ["preserve_order"] } -serde_with = { version = "3", features = ["json"] } +reqwest = { version = "0", features = [ "json" ] } +serde = { version = "1", features = [ "derive" ] } +serde_json = { version = "1", features = [ "preserve_order" ] } +serde_with = { version = "3", features = [ "json" ] } thiserror = "2" -tokio = { version = "1", features = ["macros", "net", "rt-multi-thread", "signal", "sync"] } +tokio = { version = "1", features = [ "macros", "net", "rt-multi-thread", "signal", "sync" ] } torrust-axum-server = { version = "3.0.0-develop", path = "../axum-server" } torrust-rest-tracker-api-client = { version = "3.0.0-develop", path = "../rest-tracker-api-client" } torrust-rest-tracker-api-core = { version = "3.0.0-develop", path = "../rest-tracker-api-core" } @@ -41,8 +41,8 @@ torrust-tracker-metrics = { version = "3.0.0-develop", path = "../metrics" } torrust-tracker-primitives = { version = "3.0.0-develop", path = "../primitives" } torrust-tracker-swarm-coordination-registry = { version = "3.0.0-develop", path = "../swarm-coordination-registry" } torrust-udp-tracker-server = { version = "3.0.0-develop", path = "../udp-tracker-server" } -tower = { version = "0", features = ["timeout"] } -tower-http = { version = "0", features = ["compression-full", "cors", "propagate-header", "request-id", "trace"] } +tower = { version = "0", features = [ "timeout" ] } +tower-http = { version = "0", features = [ "compression-full", "cors", "propagate-header", "request-id", "trace" ] } tracing = "0" url = "2" @@ -51,5 +51,5 @@ local-ip-address = "0" mockall = "0" torrust-rest-tracker-api-client = { version = "3.0.0-develop", path = "../rest-tracker-api-client" } torrust-tracker-test-helpers = { version = "3.0.0-develop", path = "../test-helpers" } -url = { version = "2", features = ["serde"] } -uuid = { version = "1", features = ["v4"] } +url = { version = "2", features = [ "serde" ] } +uuid = { version = "1", features = [ "v4" ] } diff --git a/packages/axum-server/Cargo.toml b/packages/axum-server/Cargo.toml index a60bab885..45eddd3b0 100644 --- a/packages/axum-server/Cargo.toml +++ b/packages/axum-server/Cargo.toml @@ -4,7 +4,7 @@ description = "A wrapper for the Axum server for Torrust HTTP servers to add tim documentation.workspace = true edition.workspace = true homepage.workspace = true -keywords = ["axum", "server", "torrust", "wrapper"] +keywords = [ "axum", "server", "torrust", "wrapper" ] license.workspace = true name = "torrust-axum-server" publish.workspace = true @@ -14,19 +14,19 @@ rust-version.workspace = true version.workspace = true [dependencies] -axum-server = { version = "0", features = ["tls-rustls-no-provider"] } -camino = { version = "1", features = ["serde", "serde1"] } +axum-server = { version = "0", features = [ "tls-rustls-no-provider" ] } +camino = { version = "1", features = [ "serde", "serde1" ] } futures-util = "0" http-body = "1" hyper = "1" -hyper-util = { version = "0", features = ["http1", "http2", "tokio"] } +hyper-util = { version = "0", features = [ "http1", "http2", "tokio" ] } pin-project-lite = "0" thiserror = "2" -tokio = { version = "1", features = ["macros", "net", "rt-multi-thread", "signal", "sync"] } +tokio = { version = "1", features = [ "macros", "net", "rt-multi-thread", "signal", "sync" ] } torrust-server-lib = { version = "3.0.0-develop", path = "../server-lib" } torrust-tracker-configuration = { version = "3.0.0-develop", path = "../configuration" } torrust-tracker-located-error = { version = "3.0.0-develop", path = "../located-error" } -tower = { version = "0", features = ["timeout"] } +tower = { version = "0", features = [ "timeout" ] } tracing = "0" [dev-dependencies] diff --git a/packages/clock/Cargo.toml b/packages/clock/Cargo.toml index 3bd00d2b0..c0cafff0a 100644 --- a/packages/clock/Cargo.toml +++ b/packages/clock/Cargo.toml @@ -1,6 +1,6 @@ [package] description = "A library to a clock for the torrust tracker." -keywords = ["clock", "library", "torrents"] +keywords = [ "clock", "library", "torrents" ] name = "torrust-tracker-clock" readme = "README.md" @@ -16,7 +16,7 @@ rust-version.workspace = true version.workspace = true [dependencies] -chrono = { version = "0", default-features = false, features = ["clock"] } +chrono = { version = "0", default-features = false, features = [ "clock" ] } lazy_static = "1" tracing = "0" diff --git a/packages/configuration/Cargo.toml b/packages/configuration/Cargo.toml index e213f7c0c..1155ba417 100644 --- a/packages/configuration/Cargo.toml +++ b/packages/configuration/Cargo.toml @@ -1,6 +1,6 @@ [package] description = "A library to provide configuration to the Torrust Tracker." -keywords = ["config", "library", "settings"] +keywords = [ "config", "library", "settings" ] name = "torrust-tracker-configuration" readme = "README.md" @@ -15,18 +15,18 @@ rust-version.workspace = true version.workspace = true [dependencies] -camino = { version = "1", features = ["serde", "serde1"] } -derive_more = { version = "2", features = ["constructor", "display"] } -figment = { version = "0", features = ["env", "test", "toml"] } -serde = { version = "1", features = ["derive"] } -serde_json = { version = "1", features = ["preserve_order"] } +camino = { version = "1", features = [ "serde", "serde1" ] } +derive_more = { version = "2", features = [ "constructor", "display" ] } +figment = { version = "0", features = [ "env", "test", "toml" ] } +serde = { version = "1", features = [ "derive" ] } +serde_json = { version = "1", features = [ "preserve_order" ] } serde_with = "3" thiserror = "2" toml = "0" torrust-tracker-located-error = { version = "3.0.0-develop", path = "../located-error" } tracing = "0" -tracing-subscriber = { version = "0", features = ["json"] } +tracing-subscriber = { version = "0", features = [ "json" ] } url = "2" [dev-dependencies] -uuid = { version = "1", features = ["v4"] } +uuid = { version = "1", features = [ "v4" ] } diff --git a/packages/events/Cargo.toml b/packages/events/Cargo.toml index 1d183cddb..165ecca68 100644 --- a/packages/events/Cargo.toml +++ b/packages/events/Cargo.toml @@ -1,6 +1,6 @@ [package] description = "A library with functionality to handle events in Torrust tracker packages." -keywords = ["events", "library", "rust", "torrust", "tracker"] +keywords = [ "events", "library", "rust", "torrust", "tracker" ] name = "torrust-tracker-events" readme = "README.md" @@ -16,7 +16,7 @@ version.workspace = true [dependencies] futures = "0" -tokio = { version = "1", features = ["macros", "net", "rt-multi-thread", "signal", "sync", "time"] } +tokio = { version = "1", features = [ "macros", "net", "rt-multi-thread", "signal", "sync", "time" ] } [dev-dependencies] mockall = "0" diff --git a/packages/http-protocol/Cargo.toml b/packages/http-protocol/Cargo.toml index 7803fe78e..78a037b18 100644 --- a/packages/http-protocol/Cargo.toml +++ b/packages/http-protocol/Cargo.toml @@ -1,6 +1,6 @@ [package] description = "A library with the primitive types and functions for the BitTorrent HTTP tracker protocol." -keywords = ["api", "library", "primitives"] +keywords = [ "api", "library", "primitives" ] name = "bittorrent-http-tracker-protocol" readme = "README.md" @@ -18,10 +18,10 @@ version.workspace = true aquatic_udp_protocol = "0" bittorrent-primitives = "0.1.0" bittorrent-tracker-core = { version = "3.0.0-develop", path = "../tracker-core" } -derive_more = { version = "2", features = ["as_ref", "constructor", "from"] } +derive_more = { version = "2", features = [ "as_ref", "constructor", "from" ] } multimap = "0" percent-encoding = "2" -serde = { version = "1", features = ["derive"] } +serde = { version = "1", features = [ "derive" ] } serde_bencode = "0" thiserror = "2" torrust-tracker-clock = { version = "3.0.0-develop", path = "../clock" } diff --git a/packages/http-tracker-core/Cargo.toml b/packages/http-tracker-core/Cargo.toml index 04a6c96b6..c419052f9 100644 --- a/packages/http-tracker-core/Cargo.toml +++ b/packages/http-tracker-core/Cargo.toml @@ -4,7 +4,7 @@ description = "A library with the core functionality needed to implement a BitTo documentation.workspace = true edition.workspace = true homepage.workspace = true -keywords = ["api", "bittorrent", "core", "library", "tracker"] +keywords = [ "api", "bittorrent", "core", "library", "tracker" ] license.workspace = true name = "bittorrent-http-tracker-core" publish.workspace = true @@ -18,11 +18,11 @@ aquatic_udp_protocol = "0" bittorrent-http-tracker-protocol = { version = "3.0.0-develop", path = "../http-protocol" } bittorrent-primitives = "0.1.0" bittorrent-tracker-core = { version = "3.0.0-develop", path = "../tracker-core" } -criterion = { version = "0.5.1", features = ["async_tokio"] } +criterion = { version = "0.5.1", features = [ "async_tokio" ] } futures = "0" serde = "1.0.219" thiserror = "2" -tokio = { version = "1", features = ["macros", "net", "rt-multi-thread", "signal", "sync"] } +tokio = { version = "1", features = [ "macros", "net", "rt-multi-thread", "signal", "sync" ] } tokio-util = "0.7.15" torrust-tracker-clock = { version = "3.0.0-develop", path = "../clock" } torrust-tracker-configuration = { version = "3.0.0-develop", path = "../configuration" } diff --git a/packages/located-error/Cargo.toml b/packages/located-error/Cargo.toml index 29b0dfb2c..232a6113f 100644 --- a/packages/located-error/Cargo.toml +++ b/packages/located-error/Cargo.toml @@ -1,6 +1,6 @@ [package] description = "A library to provide error decorator with the location and the source of the original error." -keywords = ["errors", "helper", "library"] +keywords = [ "errors", "helper", "library" ] name = "torrust-tracker-located-error" readme = "README.md" diff --git a/packages/metrics/Cargo.toml b/packages/metrics/Cargo.toml index 0597785f4..b6d327d70 100644 --- a/packages/metrics/Cargo.toml +++ b/packages/metrics/Cargo.toml @@ -1,6 +1,6 @@ [package] description = "A library with the primitive types shared by the Torrust tracker packages." -keywords = ["api", "library", "metrics"] +keywords = [ "api", "library", "metrics" ] name = "torrust-tracker-metrics" readme = "README.md" @@ -15,9 +15,9 @@ rust-version.workspace = true version.workspace = true [dependencies] -chrono = { version = "0", default-features = false, features = ["clock"] } -derive_more = { version = "2", features = ["constructor"] } -serde = { version = "1", features = ["derive"] } +chrono = { version = "0", default-features = false, features = [ "clock" ] } +derive_more = { version = "2", features = [ "constructor" ] } +serde = { version = "1", features = [ "derive" ] } serde_json = "1.0.140" thiserror = "2" torrust-tracker-primitives = { version = "3.0.0-develop", path = "../primitives" } diff --git a/packages/primitives/Cargo.toml b/packages/primitives/Cargo.toml index 21fab09bf..c9ce64177 100644 --- a/packages/primitives/Cargo.toml +++ b/packages/primitives/Cargo.toml @@ -1,6 +1,6 @@ [package] description = "A library with the primitive types shared by the Torrust tracker packages." -keywords = ["api", "library", "primitives"] +keywords = [ "api", "library", "primitives" ] name = "torrust-tracker-primitives" readme = "README.md" @@ -18,8 +18,8 @@ version.workspace = true aquatic_udp_protocol = "0" binascii = "0" bittorrent-primitives = "0.1.0" -derive_more = { version = "2", features = ["constructor"] } -serde = { version = "1", features = ["derive"] } +derive_more = { version = "2", features = [ "constructor" ] } +serde = { version = "1", features = [ "derive" ] } tdyne-peer-id = "1" tdyne-peer-id-registry = "0" thiserror = "2" diff --git a/packages/rest-tracker-api-client/Cargo.toml b/packages/rest-tracker-api-client/Cargo.toml index c01b9c05a..47307df9a 100644 --- a/packages/rest-tracker-api-client/Cargo.toml +++ b/packages/rest-tracker-api-client/Cargo.toml @@ -1,6 +1,6 @@ [package] description = "A library to interact with the Torrust Tracker REST API." -keywords = ["bittorrent", "client", "tracker"] +keywords = [ "bittorrent", "client", "tracker" ] license = "LGPL-3.0" name = "torrust-rest-tracker-api-client" readme = "README.md" @@ -16,8 +16,8 @@ version.workspace = true [dependencies] hyper = "1" -reqwest = { version = "0", features = ["json", "query"] } -serde = { version = "1", features = ["derive"] } +reqwest = { version = "0", features = [ "json", "query" ] } +serde = { version = "1", features = [ "derive" ] } thiserror = "2" -url = { version = "2", features = ["serde"] } -uuid = { version = "1", features = ["v4"] } +url = { version = "2", features = [ "serde" ] } +uuid = { version = "1", features = [ "v4" ] } diff --git a/packages/rest-tracker-api-core/Cargo.toml b/packages/rest-tracker-api-core/Cargo.toml index be6d493d7..0808c2dd6 100644 --- a/packages/rest-tracker-api-core/Cargo.toml +++ b/packages/rest-tracker-api-core/Cargo.toml @@ -4,7 +4,7 @@ description = "A library with the core functionality needed to implement a BitTo documentation.workspace = true edition.workspace = true homepage.workspace = true -keywords = ["api", "bittorrent", "core", "library", "tracker"] +keywords = [ "api", "bittorrent", "core", "library", "tracker" ] license.workspace = true name = "torrust-rest-tracker-api-core" publish.workspace = true @@ -17,7 +17,7 @@ version.workspace = true bittorrent-http-tracker-core = { version = "3.0.0-develop", path = "../http-tracker-core" } bittorrent-tracker-core = { version = "3.0.0-develop", path = "../tracker-core" } bittorrent-udp-tracker-core = { version = "3.0.0-develop", path = "../udp-tracker-core" } -tokio = { version = "1", features = ["macros", "net", "rt-multi-thread", "signal", "sync"] } +tokio = { version = "1", features = [ "macros", "net", "rt-multi-thread", "signal", "sync" ] } tokio-util = "0.7.15" torrust-tracker-configuration = { version = "3.0.0-develop", path = "../configuration" } torrust-tracker-metrics = { version = "3.0.0-develop", path = "../metrics" } diff --git a/packages/server-lib/Cargo.toml b/packages/server-lib/Cargo.toml index 1d30e7fb5..fbd7a7a7f 100644 --- a/packages/server-lib/Cargo.toml +++ b/packages/server-lib/Cargo.toml @@ -4,7 +4,7 @@ description = "Common functionality used in all Torrust HTTP servers." documentation.workspace = true edition.workspace = true homepage.workspace = true -keywords = ["lib", "server", "torrust"] +keywords = [ "lib", "server", "torrust" ] license.workspace = true name = "torrust-server-lib" publish.workspace = true @@ -14,10 +14,10 @@ rust-version.workspace = true version.workspace = true [dependencies] -derive_more = { version = "2", features = ["as_ref", "constructor", "display", "from"] } -tokio = { version = "1", features = ["macros", "net", "rt-multi-thread", "signal", "sync"] } +derive_more = { version = "2", features = [ "as_ref", "constructor", "display", "from" ] } +tokio = { version = "1", features = [ "macros", "net", "rt-multi-thread", "signal", "sync" ] } torrust-tracker-primitives = { version = "3.0.0-develop", path = "../primitives" } -tower-http = { version = "0", features = ["compression-full", "cors", "propagate-header", "request-id", "trace"] } +tower-http = { version = "0", features = [ "compression-full", "cors", "propagate-header", "request-id", "trace" ] } tracing = "0" [dev-dependencies] diff --git a/packages/swarm-coordination-registry/Cargo.toml b/packages/swarm-coordination-registry/Cargo.toml index 45359ad81..f9513d3c4 100644 --- a/packages/swarm-coordination-registry/Cargo.toml +++ b/packages/swarm-coordination-registry/Cargo.toml @@ -1,6 +1,6 @@ [package] description = "A library that provides a repository of torrents files and their peers." -keywords = ["library", "repository", "torrents"] +keywords = [ "library", "repository", "torrents" ] name = "torrust-tracker-swarm-coordination-registry" readme = "README.md" @@ -18,12 +18,12 @@ version.workspace = true [dependencies] aquatic_udp_protocol = "0" bittorrent-primitives = "0.1.0" -chrono = { version = "0", default-features = false, features = ["clock"] } +chrono = { version = "0", default-features = false, features = [ "clock" ] } crossbeam-skiplist = "0" futures = "0" -serde = { version = "1.0.219", features = ["derive"] } +serde = { version = "1.0.219", features = [ "derive" ] } thiserror = "2.0.12" -tokio = { version = "1", features = ["macros", "net", "rt-multi-thread", "signal", "sync"] } +tokio = { version = "1", features = [ "macros", "net", "rt-multi-thread", "signal", "sync" ] } tokio-util = "0.7.15" torrust-tracker-clock = { version = "3.0.0-develop", path = "../clock" } torrust-tracker-configuration = { version = "3.0.0-develop", path = "../configuration" } @@ -33,8 +33,8 @@ torrust-tracker-primitives = { version = "3.0.0-develop", path = "../primitives" tracing = "0" [dev-dependencies] -async-std = { version = "1", features = ["attributes", "tokio1"] } -criterion = { version = "0", features = ["async_tokio"] } +async-std = { version = "1", features = [ "attributes", "tokio1" ] } +criterion = { version = "0", features = [ "async_tokio" ] } mockall = "0" rand = "0" rstest = "0" diff --git a/packages/test-helpers/Cargo.toml b/packages/test-helpers/Cargo.toml index 3495c314a..fb240730d 100644 --- a/packages/test-helpers/Cargo.toml +++ b/packages/test-helpers/Cargo.toml @@ -1,6 +1,6 @@ [package] description = "A library providing helpers for testing the Torrust tracker." -keywords = ["helper", "library", "testing"] +keywords = [ "helper", "library", "testing" ] name = "torrust-tracker-test-helpers" readme = "README.md" @@ -18,4 +18,4 @@ version.workspace = true rand = "0" torrust-tracker-configuration = { version = "3.0.0-develop", path = "../configuration" } tracing = "0" -tracing-subscriber = { version = "0", features = ["json"] } +tracing-subscriber = { version = "0", features = [ "json" ] } diff --git a/packages/torrent-repository-benchmarking/Cargo.toml b/packages/torrent-repository-benchmarking/Cargo.toml index 1a93c513c..653ad8102 100644 --- a/packages/torrent-repository-benchmarking/Cargo.toml +++ b/packages/torrent-repository-benchmarking/Cargo.toml @@ -1,6 +1,6 @@ [package] description = "A library to runt benchmarking for different implementations of a repository of torrents files and their peers." -keywords = ["library", "repository", "torrents"] +keywords = [ "library", "repository", "torrents" ] name = "torrust-tracker-torrent-repository-benchmarking" readme = "README.md" @@ -22,15 +22,15 @@ crossbeam-skiplist = "0" dashmap = "6" futures = "0" parking_lot = "0" -tokio = { version = "1", features = ["macros", "net", "rt-multi-thread", "signal", "sync"] } +tokio = { version = "1", features = [ "macros", "net", "rt-multi-thread", "signal", "sync" ] } torrust-tracker-clock = { version = "3.0.0-develop", path = "../clock" } torrust-tracker-configuration = { version = "3.0.0-develop", path = "../configuration" } torrust-tracker-primitives = { version = "3.0.0-develop", path = "../primitives" } zerocopy = "0.7" [dev-dependencies] -async-std = { version = "1", features = ["attributes", "tokio1"] } -criterion = { version = "0", features = ["async_tokio"] } +async-std = { version = "1", features = [ "attributes", "tokio1" ] } +criterion = { version = "0", features = [ "async_tokio" ] } rstest = "0" [[bench]] diff --git a/packages/tracker-client/Cargo.toml b/packages/tracker-client/Cargo.toml index ef5cccaa2..0cd419471 100644 --- a/packages/tracker-client/Cargo.toml +++ b/packages/tracker-client/Cargo.toml @@ -1,6 +1,6 @@ [package] description = "A library with the generic tracker clients." -keywords = ["bittorrent", "client", "tracker"] +keywords = [ "bittorrent", "client", "tracker" ] license = "LGPL-3.0" name = "bittorrent-tracker-client" readme = "README.md" @@ -17,16 +17,16 @@ version.workspace = true [dependencies] aquatic_udp_protocol = "0" bittorrent-primitives = "0.1.0" -derive_more = { version = "2", features = ["as_ref", "constructor", "from"] } +derive_more = { version = "2", features = [ "as_ref", "constructor", "from" ] } hyper = "1" percent-encoding = "2" -reqwest = { version = "0", features = ["json"] } -serde = { version = "1", features = ["derive"] } +reqwest = { version = "0", features = [ "json" ] } +serde = { version = "1", features = [ "derive" ] } serde_bencode = "0" serde_bytes = "0" serde_repr = "0" thiserror = "2" -tokio = { version = "1", features = ["macros", "net", "rt-multi-thread", "signal", "sync"] } +tokio = { version = "1", features = [ "macros", "net", "rt-multi-thread", "signal", "sync" ] } torrust-tracker-configuration = { version = "3.0.0-develop", path = "../configuration" } torrust-tracker-located-error = { version = "3.0.0-develop", path = "../located-error" } torrust-tracker-primitives = { version = "3.0.0-develop", path = "../primitives" } @@ -34,4 +34,4 @@ tracing = "0" zerocopy = "0.7" [package.metadata.cargo-machete] -ignored = ["serde_bytes"] +ignored = [ "serde_bytes" ] diff --git a/packages/tracker-core/Cargo.toml b/packages/tracker-core/Cargo.toml index dfc83e58e..fb864cde7 100644 --- a/packages/tracker-core/Cargo.toml +++ b/packages/tracker-core/Cargo.toml @@ -4,7 +4,7 @@ description = "A library with the core functionality needed to implement a BitTo documentation.workspace = true edition.workspace = true homepage.workspace = true -keywords = ["api", "bittorrent", "core", "library", "tracker"] +keywords = [ "api", "bittorrent", "core", "library", "tracker" ] license.workspace = true name = "bittorrent-tracker-core" publish.workspace = true @@ -16,17 +16,17 @@ version.workspace = true [dependencies] aquatic_udp_protocol = "0" bittorrent-primitives = "0.1.0" -chrono = { version = "0", default-features = false, features = ["clock"] } -derive_more = { version = "2", features = ["as_ref", "constructor", "from"] } +chrono = { version = "0", default-features = false, features = [ "clock" ] } +derive_more = { version = "2", features = [ "as_ref", "constructor", "from" ] } mockall = "0" r2d2 = "0" r2d2_mysql = "25" -r2d2_sqlite = { version = "0", features = ["bundled"] } +r2d2_sqlite = { version = "0", features = [ "bundled" ] } rand = "0" -serde = { version = "1", features = ["derive"] } -serde_json = { version = "1", features = ["preserve_order"] } +serde = { version = "1", features = [ "derive" ] } +serde_json = { version = "1", features = [ "preserve_order" ] } thiserror = "2" -tokio = { version = "1", features = ["macros", "net", "rt-multi-thread", "signal", "sync"] } +tokio = { version = "1", features = [ "macros", "net", "rt-multi-thread", "signal", "sync" ] } tokio-util = "0.7.15" torrust-tracker-clock = { version = "3.0.0-develop", path = "../clock" } torrust-tracker-configuration = { version = "3.0.0-develop", path = "../configuration" } diff --git a/packages/udp-protocol/Cargo.toml b/packages/udp-protocol/Cargo.toml index 31fd52af8..3bcde9a95 100644 --- a/packages/udp-protocol/Cargo.toml +++ b/packages/udp-protocol/Cargo.toml @@ -1,6 +1,6 @@ [package] description = "A library with the primitive types and functions for the BitTorrent UDP tracker protocol." -keywords = ["bittorrent", "library", "primitives", "udp"] +keywords = [ "bittorrent", "library", "primitives", "udp" ] name = "bittorrent-udp-tracker-protocol" readme = "README.md" diff --git a/packages/udp-tracker-core/Cargo.toml b/packages/udp-tracker-core/Cargo.toml index aa12f898f..828b3aff2 100644 --- a/packages/udp-tracker-core/Cargo.toml +++ b/packages/udp-tracker-core/Cargo.toml @@ -4,7 +4,7 @@ description = "A library with the core functionality needed to implement a BitTo documentation.workspace = true edition.workspace = true homepage.workspace = true -keywords = ["api", "bittorrent", "core", "library", "tracker"] +keywords = [ "api", "bittorrent", "core", "library", "tracker" ] license.workspace = true name = "bittorrent-udp-tracker-core" publish.workspace = true @@ -21,14 +21,14 @@ bittorrent-udp-tracker-protocol = { version = "3.0.0-develop", path = "../udp-pr bloom = "0.3.2" blowfish = "0" cipher = "0.4" -criterion = { version = "0.5.1", features = ["async_tokio"] } +criterion = { version = "0.5.1", features = [ "async_tokio" ] } futures = "0" generic-array = "0" lazy_static = "1" rand = "0" serde = "1.0.219" thiserror = "2" -tokio = { version = "1", features = ["macros", "net", "rt-multi-thread", "signal", "sync", "time"] } +tokio = { version = "1", features = [ "macros", "net", "rt-multi-thread", "signal", "sync", "time" ] } tokio-util = "0.7.15" torrust-tracker-clock = { version = "3.0.0-develop", path = "../clock" } torrust-tracker-configuration = { version = "3.0.0-develop", path = "../configuration" } diff --git a/packages/udp-tracker-server/Cargo.toml b/packages/udp-tracker-server/Cargo.toml index 160fe58f9..dc66572d8 100644 --- a/packages/udp-tracker-server/Cargo.toml +++ b/packages/udp-tracker-server/Cargo.toml @@ -4,7 +4,7 @@ description = "The Torrust Bittorrent UDP tracker." documentation.workspace = true edition.workspace = true homepage.workspace = true -keywords = ["axum", "bittorrent", "server", "torrust", "tracker", "udp"] +keywords = [ "axum", "bittorrent", "server", "torrust", "tracker", "udp" ] license.workspace = true name = "torrust-udp-tracker-server" publish.workspace = true @@ -19,13 +19,13 @@ bittorrent-primitives = "0.1.0" bittorrent-tracker-client = { version = "3.0.0-develop", path = "../tracker-client" } bittorrent-tracker-core = { version = "3.0.0-develop", path = "../tracker-core" } bittorrent-udp-tracker-core = { version = "3.0.0-develop", path = "../udp-tracker-core" } -derive_more = { version = "2", features = ["as_ref", "constructor", "from"] } +derive_more = { version = "2", features = [ "as_ref", "constructor", "from" ] } futures = "0" futures-util = "0" ringbuf = "0" serde = "1.0.219" thiserror = "2" -tokio = { version = "1", features = ["macros", "net", "rt-multi-thread", "signal", "sync"] } +tokio = { version = "1", features = [ "macros", "net", "rt-multi-thread", "signal", "sync" ] } tokio-util = "0.7.15" torrust-server-lib = { version = "3.0.0-develop", path = "../server-lib" } torrust-tracker-clock = { version = "3.0.0-develop", path = "../clock" } @@ -35,8 +35,8 @@ torrust-tracker-metrics = { version = "3.0.0-develop", path = "../metrics" } torrust-tracker-primitives = { version = "3.0.0-develop", path = "../primitives" } torrust-tracker-swarm-coordination-registry = { version = "3.0.0-develop", path = "../swarm-coordination-registry" } tracing = "0" -url = { version = "2", features = ["serde"] } -uuid = { version = "1", features = ["v4"] } +url = { version = "2", features = [ "serde" ] } +uuid = { version = "1", features = [ "v4" ] } zerocopy = "0.7" [dev-dependencies] From 1d3ba500e9404c971703f24a3e2132dc62486304 Mon Sep 17 00:00:00 2001 From: Jose Celano Date: Wed, 8 Apr 2026 16:59:16 +0100 Subject: [PATCH 802/802] ci(lint): switch testing workflow to internal linting tool --- .github/workflows/testing.yaml | 39 ++++++++++++---------------------- 1 file changed, 13 insertions(+), 26 deletions(-) diff --git a/.github/workflows/testing.yaml b/.github/workflows/testing.yaml index c9328d890..83a290663 100644 --- a/.github/workflows/testing.yaml +++ b/.github/workflows/testing.yaml @@ -33,9 +33,10 @@ jobs: run: cargo fmt --check check: - name: Static Analysis + name: Linting runs-on: ubuntu-latest needs: format + timeout-minutes: 15 strategy: matrix: @@ -51,39 +52,25 @@ jobs: uses: dtolnay/rust-toolchain@stable with: toolchain: ${{ matrix.toolchain }} - components: clippy + components: clippy, rustfmt + + - id: node + name: Setup Node.js + uses: actions/setup-node@v5 + with: + node-version: "20" - id: cache name: Enable Workflow Cache uses: Swatinem/rust-cache@v2 - id: tools - name: Install Tools - uses: taiki-e/install-action@v2 - with: - tool: cargo-machete - - - id: check - name: Run Build Checks - run: cargo check --tests --benches --examples --workspace --all-targets --all-features + name: Install Internal Linter + run: cargo install --locked --git https://github.com/torrust/torrust-linting --bin linter - id: lint - name: Run Lint Checks - run: cargo clippy --tests --benches --examples --workspace --all-targets --all-features - - - id: docs - name: Lint Documentation - env: - RUSTDOCFLAGS: "-D warnings" - run: cargo doc --no-deps --bins --examples --workspace --all-features - - - id: clean - name: Clean Build Directory - run: cargo clean - - - id: deps - name: Check Unused Dependencies - run: cargo machete + name: Run All Linters + run: linter all build: name: Build on ${{ matrix.os }} (${{ matrix.toolchain }})